aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_lpss.c11
-rw-r--r--drivers/acpi/acpi_processor.c182
-rw-r--r--drivers/acpi/acpi_video.c2
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h5
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c4
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c23
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c12
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/arm64/iort.c57
-rw-r--r--drivers/acpi/battery.c75
-rw-r--r--drivers/acpi/button.c11
-rw-r--r--drivers/acpi/device_pm.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c1
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c4
-rw-r--r--drivers/acpi/ec.c16
-rw-r--r--drivers/acpi/fan.c97
-rw-r--r--drivers/acpi/pptt.c29
-rw-r--r--drivers/acpi/processor_idle.c174
-rw-r--r--drivers/acpi/sleep.c3
-rw-r--r--drivers/acpi/thermal.c34
-rw-r--r--drivers/acpi/video_detect.c29
-rw-r--r--drivers/android/binder.c43
-rw-r--r--drivers/ata/acard-ahci.c4
-rw-r--r--drivers/ata/ahci_brcm.c70
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c6
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/atm/eni.c8
-rw-r--r--drivers/atm/firestream.c3
-rw-r--r--drivers/atm/fore200e.c25
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c2
-rw-r--r--drivers/auxdisplay/ht16k33.c2
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/arch_topology.c20
-rw-r--r--drivers/base/attribute_container.c103
-rw-r--r--drivers/base/base.h19
-rw-r--r--drivers/base/bus.c1
-rw-r--r--drivers/base/class.c1
-rw-r--r--drivers/base/component.c11
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/devtmpfs.c79
-rw-r--r--drivers/base/driver.c1
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile2
-rw-r--r--drivers/base/firmware_loader/fallback.c11
-rw-r--r--drivers/base/firmware_loader/firmware.h16
-rw-r--r--drivers/base/firmware_loader/main.c2
-rw-r--r--drivers/base/memory.c25
-rw-r--r--drivers/base/platform.c12
-rw-r--r--drivers/base/power/main.c42
-rw-r--r--drivers/base/power/qos-test.c2
-rw-r--r--drivers/base/power/runtime.c13
-rw-r--r--drivers/base/power/wakeup.c3
-rw-r--r--drivers/base/regmap/regmap-i2c.c10
-rw-r--r--drivers/base/regmap/regmap.c17
-rw-r--r--drivers/base/swnode.c154
-rw-r--r--drivers/base/test/Kconfig3
-rw-r--r--drivers/base/test/Makefile2
-rw-r--r--drivers/base/test/property-entry-test.c475
-rw-r--r--drivers/base/test/test_async_driver_probe.c3
-rw-r--r--drivers/base/transport_class.c11
-rw-r--r--drivers/bcma/driver_chipcommon_b.c2
-rw-r--r--drivers/bcma/driver_pci_host.c6
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/bcma/scan.c13
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/null_blk_zoned.c4
-rw-r--r--drivers/block/paride/pcd.c3
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c1
-rw-r--r--drivers/block/pktcdvd.c26
-rw-r--r--drivers/block/sunvdc.c1
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/block/zram/zram_drv.c10
-rw-r--r--drivers/bluetooth/btbcm.c48
-rw-r--r--drivers/bluetooth/btbcm.h16
-rw-r--r--drivers/bluetooth/btrtl.c20
-rw-r--r--drivers/bluetooth/btsdio.c19
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/bluetooth/hci_bcm.c73
-rw-r--r--drivers/bluetooth/hci_h4.c1
-rw-r--r--drivers/bluetooth/hci_h5.c3
-rw-r--r--drivers/bluetooth/hci_qca.c418
-rw-r--r--drivers/bluetooth/hci_uart.h7
-rw-r--r--drivers/bluetooth/hci_vhci.c1
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c6
-rw-r--r--drivers/bus/fsl-mc/mc-io.c4
-rw-r--r--drivers/bus/ti-sysc.c10
-rw-r--r--drivers/cdrom/cdrom.c35
-rw-r--r--drivers/cdrom/gdrom.c3
-rw-r--r--drivers/char/agp/generic.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/applicom.c4
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/char/hw_random/octeon-rng.c4
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/tpm/tpm-sysfs.c34
-rw-r--r--drivers/char/ttyprintk.c15
-rw-r--r--drivers/clk/clk.c10
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c7
-rw-r--r--drivers/clk/renesas/clk-rz.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c21
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.h2
-rw-r--r--drivers/clk/tegra/clk.c4
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c1
-rw-r--r--drivers/clocksource/Kconfig76
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/bcm2835_timer.c5
-rw-r--r--drivers/clocksource/em_sti.c7
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/hyperv_timer.c84
-rw-r--r--drivers/clocksource/sh_cmt.c2
-rw-r--r--drivers/clocksource/sh_mtu2.c2
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c26
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c451
-rw-r--r--drivers/clocksource/timer-ti-dm.c20
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c10
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c2
-rw-r--r--drivers/cpufreq/cpufreq.c147
-rw-r--r--drivers/cpufreq/freq_table.c4
-rw-r--r--drivers/cpufreq/gx-suspmod.c2
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c40
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c4
-rw-r--r--drivers/cpufreq/longrun.c6
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c8
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c4
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c12
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c11
-rw-r--r--drivers/cpufreq/sh-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c4
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig.arm12
-rw-r--r--drivers/cpuidle/coupled.c9
-rw-r--r--drivers/cpuidle/cpuidle-clps711x.c5
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c5
-rw-r--r--drivers/cpuidle/cpuidle.c9
-rw-r--r--drivers/cpuidle/driver.c46
-rw-r--r--drivers/cpuidle/governors/teo.c2
-rw-r--r--drivers/cpuidle/sysfs.c16
-rw-r--r--drivers/crypto/Kconfig89
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c24
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h8
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c31
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c37
-rw-r--r--drivers/crypto/amlogic/Kconfig1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c4
-rw-r--r--drivers/crypto/atmel-aes.c359
-rw-r--r--drivers/crypto/atmel-authenc.h3
-rw-r--r--drivers/crypto/atmel-sha.c473
-rw-r--r--drivers/crypto/atmel-tdes.c375
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c10
-rw-r--r--drivers/crypto/bcm/cipher.c17
-rw-r--r--drivers/crypto/caam/Kconfig14
-rw-r--r--drivers/crypto/caam/caamalg.c33
-rw-r--r--drivers/crypto/caam/caamalg_qi.c44
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c206
-rw-r--r--drivers/crypto/caam/caamhash.c167
-rw-r--r--drivers/crypto/caam/ctrl.c15
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c12
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c1
-rw-r--r--drivers/crypto/ccp/psp-dev.c1042
-rw-r--r--drivers/crypto/ccp/psp-dev.h51
-rw-r--r--drivers/crypto/ccp/sev-dev.c1077
-rw-r--r--drivers/crypto/ccp/sev-dev.h63
-rw-r--r--drivers/crypto/ccp/sp-dev.h17
-rw-r--r--drivers/crypto/ccp/sp-pci.c43
-rw-r--r--drivers/crypto/ccp/tee-dev.c375
-rw-r--r--drivers/crypto/ccp/tee-dev.h110
-rw-r--r--drivers/crypto/ccree/cc_aead.c43
-rw-r--r--drivers/crypto/ccree/cc_cipher.c58
-rw-r--r--drivers/crypto/ccree/cc_driver.c24
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
-rw-r--r--drivers/crypto/ccree/cc_fips.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c8
-rw-r--r--drivers/crypto/ccree/cc_pm.c39
-rw-r--r--drivers/crypto/ccree/cc_pm.h17
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c103
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h8
-rw-r--r--drivers/crypto/chelsio/Kconfig30
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c53
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c59
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.h21
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c65
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c28
-rw-r--r--drivers/crypto/geode-aes.c24
-rw-r--r--drivers/crypto/hifn_795x.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig11
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c141
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c60
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h53
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c963
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h22
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c19
-rw-r--r--drivers/crypto/hisilicon/sgl.c17
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h4
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c92
-rw-r--r--drivers/crypto/img-hash.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel.h34
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c600
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c36
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c130
-rw-r--r--drivers/crypto/ixp4xx_crypto.c31
-rw-r--r--drivers/crypto/marvell/cipher.c4
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c4
-rw-r--r--drivers/crypto/mxs-dcp.c12
-rw-r--r--drivers/crypto/n2_core.c1
-rw-r--r--drivers/crypto/omap-aes-gcm.c223
-rw-r--r--drivers/crypto/omap-aes.c142
-rw-r--r--drivers/crypto/omap-aes.h12
-rw-r--r--drivers/crypto/omap-crypto.c37
-rw-r--r--drivers/crypto/omap-des.c13
-rw-r--r--drivers/crypto/omap-sham.c191
-rw-r--r--drivers/crypto/padlock-aes.c9
-rw-r--r--drivers/crypto/padlock-sha.c26
-rw-r--r--drivers/crypto/picoxcell_crypto.c30
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qce/Makefile7
-rw-r--r--drivers/crypto/qce/common.c244
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c41
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c4
-rw-r--r--drivers/crypto/sahara.c9
-rw-r--r--drivers/crypto/stm32/Kconfig6
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/talitos.c15
-rw-r--r--drivers/crypto/ux500/Kconfig16
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c8
-rw-r--r--drivers/crypto/vmx/aes_xts.c3
-rw-r--r--drivers/devfreq/Kconfig21
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq-event.c4
-rw-r--r--drivers/devfreq/devfreq.c166
-rw-r--r--drivers/devfreq/event/Kconfig6
-rw-r--r--drivers/devfreq/event/exynos-nocp.c2
-rw-r--r--drivers/devfreq/event/exynos-nocp.h2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c15
-rw-r--r--drivers/devfreq/event/exynos-ppmu.h2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c5
-rw-r--r--drivers/devfreq/exynos-bus.c155
-rw-r--r--drivers/devfreq/imx8m-ddrc.c471
-rw-r--r--drivers/devfreq/rk3399_dmc.c19
-rw-r--r--drivers/dma-buf/Kconfig11
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c63
-rw-r--r--drivers/dma-buf/dma-heap.c298
-rw-r--r--drivers/dma-buf/dma-resv.c32
-rw-r--r--drivers/dma-buf/heaps/Kconfig14
-rw-r--r--drivers/dma-buf/heaps/Makefile4
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c177
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.c271
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.h53
-rw-r--r--drivers/dma-buf/heaps/system_heap.c123
-rw-r--r--drivers/dma-buf/udmabuf.c84
-rw-r--r--drivers/dma/Kconfig30
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/bcm2835-dma.c5
-rw-r--r--drivers/dma/dma-axi-dmac.c10
-rw-r--r--drivers/dma/dma-jz4780.c7
-rw-r--r--drivers/dma/dmaengine.c628
-rw-r--r--drivers/dma/dmaengine.h11
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c8
-rw-r--r--drivers/dma/fsl-edma-common.c5
-rw-r--r--drivers/dma/fsl-edma-common.h1
-rw-r--r--drivers/dma/fsl-edma.c8
-rw-r--r--drivers/dma/fsl-qdma.c2
-rw-r--r--drivers/dma/hisi_dma.c611
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/cdev.c302
-rw-r--r--drivers/dma/idxd/device.c693
-rw-r--r--drivers/dma/idxd/dma.c217
-rw-r--r--drivers/dma/idxd/idxd.h316
-rw-r--r--drivers/dma/idxd/init.c533
-rw-r--r--drivers/dma/idxd/irq.c261
-rw-r--r--drivers/dma/idxd/registers.h336
-rw-r--r--drivers/dma/idxd/submit.c95
-rw-r--r--drivers/dma/idxd/sysfs.c1528
-rw-r--r--drivers/dma/imx-sdma.c37
-rw-r--r--drivers/dma/ioat/init.c38
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c3
-rw-r--r--drivers/dma/of-dma.c2
-rw-r--r--drivers/dma/owl-dma.c3
-rw-r--r--drivers/dma/pl330.c16
-rw-r--r--drivers/dma/plx_dma.c639
-rw-r--r--drivers/dma/s3c24xx-dma.c24
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c4
-rw-r--r--drivers/dma/sun4i-dma.c48
-rw-r--r--drivers/dma/ti/Kconfig24
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/edma.c39
-rw-r--r--drivers/dma/ti/k3-psil-am654.c175
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c222
-rw-r--r--drivers/dma/ti/k3-psil-priv.h43
-rw-r--r--drivers/dma/ti/k3-psil.c90
-rw-r--r--drivers/dma/ti/k3-udma-glue.c1198
-rw-r--r--drivers/dma/ti/k3-udma-private.c133
-rw-r--r--drivers/dma/ti/k3-udma.c3432
-rw-r--r--drivers/dma/ti/k3-udma.h151
-rw-r--r--drivers/dma/virt-dma.c10
-rw-r--r--drivers/dma/virt-dma.h27
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c24
-rw-r--r--drivers/edac/Kconfig3
-rw-r--r--drivers/edac/amd64_edac.c65
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/aspeed_edac.c4
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c7
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c2
-rw-r--r--drivers/edac/mce_amd.c105
-rw-r--r--drivers/edac/sifive_edac.c4
-rw-r--r--drivers/edac/skx_common.c2
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/extcon/extcon-arizona.c354
-rw-r--r--drivers/extcon/extcon-sm5502.c10
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/firmware/efi/Kconfig22
-rw-r--r--drivers/firmware/efi/arm-init.c107
-rw-r--r--drivers/firmware/efi/capsule-loader.c1
-rw-r--r--drivers/firmware/efi/earlycon.c16
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/fake_mem.c43
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c110
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c70
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c290
-rw-r--r--drivers/firmware/efi/libstub/efistub.h48
-rw-r--r--drivers/firmware/efi/libstub/fdt.c53
-rw-r--r--drivers/firmware/efi/libstub/gop.c163
-rw-r--r--drivers/firmware/efi/libstub/pci.c114
-rw-r--r--drivers/firmware/efi/libstub/random.c79
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c11
-rw-r--r--drivers/firmware/efi/libstub/tpm.c48
-rw-r--r--drivers/firmware/efi/memmap.c95
-rw-r--r--drivers/firmware/google/coreboot_table.c7
-rw-r--r--drivers/firmware/google/gsmi.c25
-rw-r--r--drivers/firmware/stratix10-svc.c4
-rw-r--r--drivers/fpga/dfl-afu-main.c2
-rw-r--r--drivers/fpga/dfl-fme-main.c2
-rw-r--r--drivers/fpga/ts73xx-fpga.c4
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c3
-rw-r--r--drivers/gpio/Kconfig30
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/TODO46
-rw-r--r--drivers/gpio/gpio-altera.c2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c12
-rw-r--r--drivers/gpio/gpio-creg-snps.c4
-rw-r--r--drivers/gpio/gpio-grgpio.c15
-rw-r--r--drivers/gpio/gpio-logicvc.c170
-rw-r--r--drivers/gpio/gpio-lynxpoint.c471
-rw-r--r--drivers/gpio/gpio-mockup.c16
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mt7621.c3
-rw-r--r--drivers/gpio/gpio-mvebu.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c5
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c1
-rw-r--r--drivers/gpio/gpio-sifive.c252
-rw-r--r--drivers/gpio/gpio-tb10x.c1
-rw-r--r--drivers/gpio/gpio-tegra.c21
-rw-r--r--drivers/gpio/gpio-tegra186.c13
-rw-r--r--drivers/gpio/gpio-thunderx.c36
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c121
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c3
-rw-r--r--drivers/gpio/gpiolib-of.c21
-rw-r--r--drivers/gpio/gpiolib-sysfs.c7
-rw-r--r--drivers/gpio/gpiolib.c218
-rw-r--r--drivers/gpio/gpiolib.h5
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c1060
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c554
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c188
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c413
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c164
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c586
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c827
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c641
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c)23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c232
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c360
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c589
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c601
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c883
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c59
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c100
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c348
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c66
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c)90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c)41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h2
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig33
-rw-r--r--drivers/gpu/drm/amd/display/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c738
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h58
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c67
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c72
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c452
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h)36
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c840
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c297
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c731
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c316
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c579
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c202
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c718
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c142
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c177
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h (renamed from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h370
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h156
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h289
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h48
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h41
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h63
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h154
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h506
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h69
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h64
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile27
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c202
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h182
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c64
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h41
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c109
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h124
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c505
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h32
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c47
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c37
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c103
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h197
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c40
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c20
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c886
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c679
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c326
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c118
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h98
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c510
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h194
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h15
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c46
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c7
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h19
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h647
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h3912
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h122
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h53
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h257
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h91
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h14
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c378
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c161
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c164
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c126
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c228
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c202
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c133
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c13
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c36
-rw-r--r--drivers/gpu/drm/arc/arcpgu_regs.h2
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig6
-rw-r--r--drivers/gpu/drm/arm/display/include/malidp_product.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c22
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c80
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h16
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c66
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h10
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c129
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h47
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c52
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_event.c26
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c6
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c12
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c67
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h20
-rw-r--r--drivers/gpu/drm/ast/ast_main.c54
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c812
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c18
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c27
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig26
-rw-r--r--drivers/gpu/drm/bridge/Makefile6
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.h703
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig23
-rw-r--r--drivers/gpu/drm/bridge/analogix/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c817
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c (renamed from drivers/gpu/drm/bridge/analogix-anx78xx.c)146
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h249
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c165
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h256
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h234
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c2
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c151
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c155
-rw-r--r--drivers/gpu/drm/bridge/panel.c20
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c40
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c30
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c149
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c78
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c16
-rw-r--r--drivers/gpu/drm/drm_bridge.c280
-rw-r--r--drivers/gpu/drm/drm_client.c10
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c72
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c40
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c9
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c12
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c45
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c647
-rw-r--r--drivers/gpu/drm/drm_drv.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c279
-rw-r--r--drivers/gpu/drm/drm_encoder.c15
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c206
-rw-r--r--drivers/gpu/drm/drm_file.c44
-rw-r--r--drivers/gpu/drm/drm_fourcc.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c3
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c5
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c3
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c53
-rw-r--r--drivers/gpu/drm/drm_internal.h22
-rw-r--r--drivers/gpu/drm/drm_ioctl.c4
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c4
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c57
-rw-r--r--drivers/gpu/drm/drm_mode_config.c28
-rw-r--r--drivers/gpu/drm/drm_mode_object.c14
-rw-r--r--drivers/gpu/drm/drm_modes.c255
-rw-r--r--drivers/gpu/drm/drm_of.c116
-rw-r--r--drivers/gpu/drm/drm_panel.c109
-rw-r--r--drivers/gpu/drm/drm_pci.c17
-rw-r--r--drivers/gpu/drm/drm_prime.c9
-rw-r--r--drivers/gpu/drm/drm_print.c18
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c4
-rw-r--r--drivers/gpu/drm/drm_rect.c42
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h5
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c8
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c19
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c8
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c135
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h15
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c48
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h6
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c23
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c46
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c23
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c88
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c8
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h26
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c240
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c116
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/Makefile42
-rw-r--r--drivers/gpu/drm/i915/display/Makefile6
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c289
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c87
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c138
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c563
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c198
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c578
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4007
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h49
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h107
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c263
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c254
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c229
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c309
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c177
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c452
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c305
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h11
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c95
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c12
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c385
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h49
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c36
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c184
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c322
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c43
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c529
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c47
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h35
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c91
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c221
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c33
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c11
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c66
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c17
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c171
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c101
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c577
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c125
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c16
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.c36
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.h14
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c42
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.h39
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c601
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.h14
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c483
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h76
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c724
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c229
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h85
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c211
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c63
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c1486
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c280
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c80
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c598
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h587
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c645
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c179
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c218
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c149
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c97
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c142
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c245
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c123
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c91
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c49
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c57
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c120
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c360
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c36
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c180
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c608
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c419
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c203
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.h13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c72
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c69
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h46
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c24
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c309
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h52
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c733
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h54
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c143
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h36
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c58
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c299
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c43
-rw-r--r--drivers/gpu/drm/i915/i915_active.c142
-rw-r--r--drivers/gpu/drm/i915/i915_active.h28
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h15
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c318
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c421
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c41
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h111
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c383
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3540
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h629
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c1
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c53
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1257
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h329
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c84
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c75
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.h2
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c69
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c247
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c62
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h2
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c74
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h164
-rw-r--r--drivers/gpu/drm/i915/i915_request.c156
-rw-r--r--drivers/gpu/drm/i915/i915_request.h70
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c40
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h5
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c15
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c37
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h6
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c2
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c92
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h147
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h294
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c45
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h9
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c32
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h14
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c47
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c721
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c29
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c25
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c5
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h28
-rw-r--r--drivers/gpu/drm/i915/oa/Makefile7
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c43
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c109
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h19
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c129
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c43
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.h41
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.c39
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.h19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c40
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c43
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.c73
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.h18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h3
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c38
-rw-r--r--drivers/gpu/drm/lima/Kconfig2
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c40
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h2
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c57
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c18
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c416
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi_regs.h22
-rw-r--r--drivers/gpu/drm/mediatek/Makefile3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c76
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c43
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c190
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c184
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h56
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.h13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c50
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c15
-rw-r--r--drivers/gpu/drm/meson/Makefile1
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c81
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c50
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h23
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.c389
-rw-r--r--drivers/gpu/drm/meson/meson_osd_afbcd.h28
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c231
-rw-r--r--drivers/gpu/drm/meson/meson_rdma.c135
-rw-r--r--drivers/gpu/drm/meson/meson_rdma.h21
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h110
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c83
-rw-r--r--drivers/gpu/drm/meson/meson_viu.h19
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c61
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h52
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c32
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c81
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h16
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c66
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c186
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c241
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c92
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c24
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c46
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c64
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c6
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c19
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h7
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c228
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c17
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/acr.h152
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/flcn.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/fw.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/ls.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/pmu.h98
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/sec2.h60
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0008.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h77
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h16
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h126
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h786
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h786
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c311
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h90
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c130
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c177
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c (renamed from drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h)54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c312
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c577
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c436
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c264
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c411
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c470
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c134
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c281
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c253
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c)46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c1241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c229
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c418
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c168
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c262
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c264
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h161
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c177
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h65
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c21
-rw-r--r--drivers/gpu/drm/panel/Kconfig43
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c6
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c978
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c16
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c19
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c29
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c45
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c11
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c43
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c531
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c6
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c14
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c46
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c6
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c6
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c29
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c37
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c62
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c8
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c26
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c35
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c31
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c4
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c54
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c34
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c6
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c37
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c225
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c23
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c49
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c550
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c6
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c20
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c6
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c26
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c4
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c398
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c32
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c97
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c124
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h41
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c21
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c61
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c34
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c2
-rw-r--r--drivers/gpu/drm/r128/Makefile2
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c (renamed from drivers/gpu/drm/ati_pcigart.c)5
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.h31
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c5
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/cik.c8
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/ni.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/r100.c16
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c149
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c16
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/rv770.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c8
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c3
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig8
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_cmm.c217
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_cmm.h58
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c81
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c93
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c320
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig9
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c175
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c6
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c54
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c488
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h19
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c89
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c33
-rw-r--r--drivers/gpu/drm/selftests/Makefile3
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h5
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h4
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c122
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c12
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h7
-rw-r--r--drivers/gpu/drm/selftests/test-drm_rect.c223
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c2
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c13
-rw-r--r--drivers/gpu/drm/stm/ltdc.c24
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig16
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c22
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c147
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c177
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/gem.c40
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c4
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c125
-rw-r--r--drivers/gpu/drm/tegra/hub.c198
-rw-r--r--drivers/gpu/drm/tegra/hub.h2
-rw-r--r--drivers/gpu/drm/tegra/output.c18
-rw-r--r--drivers/gpu/drm/tegra/sor.c170
-rw-r--r--drivers/gpu/drm/tegra/vic.c8
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c13
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c379
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.h15
-rw-r--r--drivers/gpu/drm/tiny/st7586.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c36
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c2
-rw-r--r--drivers/gpu/drm/udl/Kconfig6
-rw-r--r--drivers/gpu/drm/udl/Makefile2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c21
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c255
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c47
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h85
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c70
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c527
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c253
-rw-r--r--drivers/gpu/drm/udl/udl_main.c9
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c378
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c12
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c8
-rw-r--r--drivers/gpu/drm/vc4/Kconfig8
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c34
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c12
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c8
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/via/via_map.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c113
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c42
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c90
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c9
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c6
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c6
-rw-r--r--drivers/gpu/host1x/bus.c79
-rw-r--r--drivers/gpu/host1x/dev.c4
-rw-r--r--drivers/gpu/host1x/job.c21
-rw-r--r--drivers/gpu/host1x/syncpt.c2
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/hid/hid-logitech-hidpp.c247
-rw-r--r--drivers/hid/hid-picolcd_fb.c3
-rw-r--r--drivers/hid/hidraw.c9
-rw-r--r--drivers/hv/hv_util.c8
-rw-r--r--drivers/hwmon/Kconfig37
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adm1177.c288
-rw-r--r--drivers/hwmon/adt7475.c5
-rw-r--r--drivers/hwmon/drivetemp.c574
-rw-r--r--drivers/hwmon/hwmon.c85
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/k10temp.c489
-rw-r--r--drivers/hwmon/max31730.c440
-rw-r--r--drivers/hwmon/nct7802.c75
-rw-r--r--drivers/hwmon/pmbus/Kconfig32
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c89
-rw-r--r--drivers/hwmon/pmbus/max20730.c372
-rw-r--r--drivers/hwmon/pmbus/max20751.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.c6
-rw-r--r--drivers/hwmon/pmbus/pmbus.h15
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c22
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c44
-rw-r--r--drivers/hwmon/pmbus/tps53679.c46
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c39
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c117
-rw-r--r--drivers/hwmon/pwm-fan.c15
-rw-r--r--drivers/hwmon/w83627ehf.c1969
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c13
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c12
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c38
-rw-r--r--drivers/i3c/master.c4
-rw-r--r--drivers/i3c/master/dw-i3c-master.c20
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c53
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ide/cmd64x.c3
-rw-r--r--drivers/ide/ht6560b.c2
-rw-r--r--drivers/ide/ide-cd.c38
-rw-r--r--drivers/ide/ide-disk.c1
-rw-r--r--drivers/ide/ide-floppy.c4
-rw-r--r--drivers/ide/ide-floppy.h2
-rw-r--r--drivers/ide/ide-floppy_ioctl.c35
-rw-r--r--drivers/ide/ide-gd.c17
-rw-r--r--drivers/ide/ide-ioctls.c47
-rw-r--r--drivers/ide/ide-iops.c1
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/ide/ide-tape.c11
-rw-r--r--drivers/ide/pmac.c3
-rw-r--r--drivers/ide/qd65xx.c2
-rw-r--r--drivers/ide/serverworks.c6
-rw-r--r--drivers/ide/siimage.c3
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/ide/via82cxxx.c3
-rw-r--r--drivers/idle/intel_idle.c484
-rw-r--r--drivers/iio/accel/Kconfig20
-rw-r--r--drivers/iio/accel/Makefile2
-rw-r--r--drivers/iio/accel/adis16201.c8
-rw-r--r--drivers/iio/accel/adis16209.c8
-rw-r--r--drivers/iio/accel/bma180.c225
-rw-r--r--drivers/iio/accel/bma400.h99
-rw-r--r--drivers/iio/accel/bma400_core.c853
-rw-r--r--drivers/iio/accel/bma400_i2c.c61
-rw-r--r--drivers/iio/accel/kxcjk-1013.c27
-rw-r--r--drivers/iio/accel/st_accel.h2
-rw-r--r--drivers/iio/accel/st_accel_i2c.c8
-rw-r--r--drivers/iio/accel/st_accel_spi.c9
-rw-r--r--drivers/iio/adc/Kconfig17
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7091r-base.c298
-rw-r--r--drivers/iio/adc/ad7091r-base.h26
-rw-r--r--drivers/iio/adc/ad7091r5.c113
-rw-r--r--drivers/iio/adc/ad7124.c14
-rw-r--r--drivers/iio/adc/ad7266.c29
-rw-r--r--drivers/iio/adc/ad7780.c1
-rw-r--r--drivers/iio/adc/ad7791.c1
-rw-r--r--drivers/iio/adc/ad7793.c1
-rw-r--r--drivers/iio/adc/ad7887.c82
-rw-r--r--drivers/iio/adc/ad7923.c64
-rw-r--r--drivers/iio/adc/ad799x.c66
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c6
-rw-r--r--drivers/iio/adc/ltc2496.c108
-rw-r--r--drivers/iio/adc/ltc2497-core.c243
-rw-r--r--drivers/iio/adc/ltc2497.c234
-rw-r--r--drivers/iio/adc/ltc2497.h18
-rw-r--r--drivers/iio/adc/max9611.c36
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c6
-rw-r--r--drivers/iio/adc/qcom-vadc-common.h1
-rw-r--r--drivers/iio/adc/stm32-adc-core.c23
-rw-r--r--drivers/iio/adc/stm32-adc-core.h9
-rw-r--r--drivers/iio/adc/stm32-adc.c71
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c55
-rw-r--r--drivers/iio/adc/ti-ads1015.c73
-rw-r--r--drivers/iio/adc/ti-ads7950.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c30
-rw-r--r--drivers/iio/buffer/kfifo_buf.c5
-rw-r--r--drivers/iio/chemical/Kconfig1
-rw-r--r--drivers/iio/chemical/Makefile2
-rw-r--r--drivers/iio/chemical/atlas-sensor.c (renamed from drivers/iio/chemical/atlas-ph-sensor.c)24
-rw-r--r--drivers/iio/common/ssp_sensors/ssp.h14
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c29
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c45
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c21
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c12
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c3
-rw-r--r--drivers/iio/dac/ad5592r-base.c1
-rw-r--r--drivers/iio/dac/ad7303.c25
-rw-r--r--drivers/iio/dac/stm32-dac-core.c19
-rw-r--r--drivers/iio/frequency/adf4350.c30
-rw-r--r--drivers/iio/gyro/Kconfig32
-rw-r--r--drivers/iio/gyro/adis16136.c72
-rw-r--r--drivers/iio/gyro/adis16260.c14
-rw-r--r--drivers/iio/gyro/itg3200_core.c1
-rw-r--r--drivers/iio/gyro/st_gyro.h2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c75
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c9
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c9
-rw-r--r--drivers/iio/humidity/dht11.c1
-rw-r--r--drivers/iio/humidity/hts221_core.c19
-rw-r--r--drivers/iio/iio_core.h8
-rw-r--r--drivers/iio/imu/adis.c139
-rw-r--r--drivers/iio/imu/adis16400.c115
-rw-r--r--drivers/iio/imu/adis16460.c7
-rw-r--r--drivers/iio/imu/adis16480.c92
-rw-r--r--drivers/iio/imu/adis_buffer.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c237
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h22
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c80
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c11
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h49
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c27
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c124
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c76
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c3
-rw-r--r--drivers/iio/industrialio-buffer.c16
-rw-r--r--drivers/iio/industrialio-core.c25
-rw-r--r--drivers/iio/light/apds9960.c2
-rw-r--r--drivers/iio/light/lm3533-als.c2
-rw-r--r--drivers/iio/light/si1145.c1
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c1
-rw-r--r--drivers/iio/light/vcnl4000.c3
-rw-r--r--drivers/iio/magnetometer/ak8975.c107
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c9
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c9
-rw-r--r--drivers/iio/pressure/Kconfig12
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c18
-rw-r--r--drivers/iio/pressure/dlhl60d.c375
-rw-r--r--drivers/iio/pressure/st_pressure.h2
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c22
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c9
-rw-r--r--drivers/iio/proximity/Kconfig15
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/as3935.c3
-rw-r--r--drivers/iio/proximity/ping.c335
-rw-r--r--drivers/iio/resolver/ad2s1200.c1
-rw-r--r--drivers/iio/temperature/max31856.c134
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c44
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c3
-rw-r--r--drivers/infiniband/core/umem.c19
-rw-r--r--drivers/infiniband/core/umem_odp.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h8
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h2
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c8
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c12
-rw-r--r--drivers/input/evdev.c5
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c2
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/misc/keyspan_remote.c9
-rw-r--r--drivers/input/misc/max77650-onkey.c7
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c2
-rw-r--r--drivers/input/mouse/pxa930_trkball.c2
-rw-r--r--drivers/input/rmi4/rmi_f54.c43
-rw-r--r--drivers/input/rmi4/rmi_smbus.c2
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/tablet/aiptek.c8
-rw-r--r--drivers/input/tablet/gtco.c13
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c6
-rw-r--r--drivers/input/touchscreen/sur40.c2
-rw-r--r--drivers/interconnect/Makefile1
-rw-r--r--drivers/interconnect/core.c168
-rw-r--r--drivers/interconnect/internal.h42
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/msm8916.c554
-rw-r--r--drivers/interconnect/qcom/msm8974.c32
-rw-r--r--drivers/interconnect/qcom/qcs404.c32
-rw-r--r--drivers/interconnect/qcom/sdm845.c16
-rw-r--r--drivers/interconnect/trace.h88
-rw-r--r--drivers/iommu/amd_iommu_init.c26
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/ipack/carriers/tpci200.c4
-rw-r--r--drivers/ipack/devices/ipoctal.c6
-rw-r--r--drivers/irqchip/Kconfig14
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c239
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c698
-rw-r--r--drivers/irqchip/irq-gic-v3.c24
-rw-r--r--drivers/irqchip/irq-imx-intmux.c309
-rw-r--r--drivers/irqchip/irq-ingenic.c6
-rw-r--r--drivers/irqchip/irq-mbigen.c1
-rw-r--r--drivers/irqchip/irq-meson-gpio.c137
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/irqchip/irq-nvic.c15
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c2
-rw-r--r--drivers/irqchip/irq-sifive-plic.c30
-rw-r--r--drivers/isdn/Makefile2
-rw-r--r--drivers/isdn/capi/Kconfig32
-rw-r--r--drivers/isdn/capi/Makefile18
-rw-r--r--drivers/isdn/capi/capi.c14
-rw-r--r--drivers/isdn/capi/capilib.c202
-rw-r--r--drivers/isdn/capi/capiutil.c231
-rw-r--r--drivers/isdn/capi/kcapi.c409
-rw-r--r--drivers/isdn/capi/kcapi.h149
-rw-r--r--drivers/isdn/capi/kcapi_proc.c36
-rw-r--r--drivers/leds/leds-as3645a.c3
-rw-r--r--drivers/leds/leds-gpio.c10
-rw-r--r--drivers/leds/leds-lm3532.c3
-rw-r--r--drivers/leds/leds-max77650.c7
-rw-r--r--drivers/leds/leds-rb532.c1
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c4
-rw-r--r--drivers/lightnvm/pblk-trace.h8
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c5
-rw-r--r--drivers/md/bcache/btree.c24
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/journal.c80
-rw-r--r--drivers/md/bcache/super.c136
-rw-r--r--drivers/md/dm-bio-prison-v2.c2
-rw-r--r--drivers/md/dm-crypt.c335
-rw-r--r--drivers/md/dm-dust.c6
-rw-r--r--drivers/md/dm-mpath.c68
-rw-r--r--drivers/md/dm-raid.c43
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-thin-metadata.c22
-rw-r--r--drivers/md/dm-thin.c36
-rw-r--r--drivers/md/dm-verity-target.c18
-rw-r--r--drivers/md/dm-writecache.c71
-rw-r--r--drivers/md/dm-zoned-metadata.c23
-rw-r--r--drivers/md/dm.c9
-rw-r--r--drivers/md/md-bitmap.c25
-rw-r--r--drivers/md/md.c254
-rw-r--r--drivers/md/md.h45
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c27
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h2
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c6
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c5
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c111
-rw-r--r--drivers/md/raid5.c21
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c8
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c8
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c10
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c6
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c5
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/isif.c2
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c2
-rw-r--r--drivers/media/platform/vivid/vivid-osd.c3
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c8
-rw-r--r--drivers/message/fusion/mptctl.c213
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/mfd/Kconfig13
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c21
-rw-r--r--drivers/mfd/ioc3.c669
-rw-r--r--drivers/misc/cardreader/alcor_pci.c8
-rw-r--r--drivers/misc/cardreader/rts5261.c11
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/cxl/context.c2
-rw-r--r--drivers/misc/enclosure.c3
-rw-r--r--drivers/misc/fastrpc.c8
-rw-r--r--drivers/misc/genwqe/card_ddcb.c8
-rw-r--r--drivers/misc/isl29020.c1
-rw-r--r--drivers/misc/lkdtm/bugs.c12
-rw-r--r--drivers/misc/mei/bus.c10
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c33
-rw-r--r--drivers/misc/mei/hw-me-regs.h6
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c3
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c3
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c3
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c2
-rw-r--r--drivers/misc/pti.c6
-rw-r--r--drivers/misc/pvpanic.c12
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/misc/sram-exec.c21
-rw-r--r--drivers/misc/ti-st/st_core.c4
-rw-r--r--drivers/misc/tsl2550.c12
-rw-r--r--drivers/misc/vmw_balloon.c1
-rw-r--r--drivers/misc/xilinx_sdfec.c12
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/core/host.c33
-rw-r--r--drivers/mmc/core/mmc_ops.c34
-rw-r--r--drivers/mmc/core/slot-gpio.c31
-rw-r--r--drivers/mmc/host/Kconfig6
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/mmc/host/au1xmmc.c7
-rw-r--r--drivers/mmc/host/bcm2835.c12
-rw-r--r--drivers/mmc/host/cavium-thunderx.c16
-rw-r--r--drivers/mmc/host/davinci_mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c8
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c10
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c4
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/mmci.c114
-rw-r--r--drivers/mmc/host/mmci.h10
-rw-r--r--drivers/mmc/host/mtk-sd.c3
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c11
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c10
-rw-r--r--drivers/mmc/host/owl-mmc.c6
-rw-r--r--drivers/mmc/host/pxamci.c26
-rw-r--r--drivers/mmc/host/renesas_sdhi.h10
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c22
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c25
-rw-r--r--drivers/mmc/host/s3cmci.c4
-rw-r--r--drivers/mmc/host/sdhci-acpi.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c270
-rw-r--r--drivers/mmc/host/sdhci-cadence.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c18
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c8
-rw-r--r--drivers/mmc/host/sdhci-msm.c139
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c112
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c248
-rw-r--r--drivers/mmc/host/sdhci-omap.c60
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c4
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci-sirf.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c397
-rw-r--r--drivers/mmc/host/sdhci.h13
-rw-r--r--drivers/mmc/host/sdhci_am654.c58
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c4
-rw-r--r--drivers/mmc/host/sh_mmcif.c12
-rw-r--r--drivers/mmc/host/sunxi-mmc.c3
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mmc/host/uniphier-sd.c14
-rw-r--r--drivers/mmc/host/usdhi6rol0.c27
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c8
-rw-r--r--drivers/mtd/maps/amd76xrom.c2
-rw-r--r--drivers/mtd/maps/ck804xrom.c2
-rw-r--r--drivers/mtd/maps/esb2rom.c2
-rw-r--r--drivers/mtd/maps/ichxrom.c2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c4
-rw-r--r--drivers/mtd/maps/l440gx.c2
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c8
-rw-r--r--drivers/mtd/maps/pci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c7
-rw-r--r--drivers/mtd/maps/physmap-core.c20
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/scb2_flash.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c4
-rw-r--r--drivers/mtd/mtdconcat.c5
-rw-r--r--drivers/mtd/nand/onenand/Kconfig14
-rw-r--r--drivers/mtd/nand/onenand/Makefile4
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c82
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c (renamed from drivers/mtd/nand/onenand/omap2.c)0
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c (renamed from drivers/mtd/nand/onenand/samsung_mtd.c)9
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c20
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c10
-rw-r--r--drivers/mtd/nand/raw/denali.c14
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c56
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c6
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c11
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c11
-rw-r--r--drivers/mtd/nand/spi/toshiba.c10
-rw-r--r--drivers/mtd/parsers/sharpslpart.c4
-rw-r--r--drivers/mtd/spi-nor/Kconfig4
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c4
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c4
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c6
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c133
-rw-r--r--drivers/mtd/ubi/attach.c2
-rw-r--r--drivers/mtd/ubi/build.c31
-rw-r--r--drivers/mtd/ubi/fastmap.c23
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/vtbl.c8
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h2
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c122
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/cc770/cc770_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/slcan.c12
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/dsa/Kconfig5
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c66
-rw-r--r--drivers/net/dsa/b53/b53_priv.h4
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/dsa_loop.c3
-rw-r--r--drivers/net/dsa/lan9303-core.c3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c3
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c3
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c32
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c100
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h9
-rw-r--r--drivers/net/dsa/ocelot/Kconfig2
-rw-r--r--drivers/net/dsa/ocelot/felix.c271
-rw-r--r--drivers/net/dsa/ocelot/felix.h16
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c521
-rw-r--r--drivers/net/dsa/qca/Kconfig9
-rw-r--r--drivers/net/dsa/qca/Makefile2
-rw-r--r--drivers/net/dsa/qca/ar9331.c856
-rw-r--r--drivers/net/dsa/qca8k.c3
-rw-r--r--drivers/net/dsa/rtl8366rb.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c125
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c36
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h1
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c5
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/8390.c4
-rw-r--r--drivers/net/ethernet/8390/8390.h4
-rw-r--r--drivers/net/ethernet/8390/8390p.c4
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c4
-rw-r--r--drivers/net/ethernet/agere/et131x.c13
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c17
-rw-r--r--drivers/net/ethernet/alteon/acenic.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c959
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h73
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/7990.h2
-rw-r--r--drivers/net/ethernet/amd/a2065.c13
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c21
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c14
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c15
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c7
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c17
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c238
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c133
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h15
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c91
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c253
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c80
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c71
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c4
-rw-r--r--drivers/net/ethernet/dnet.c15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c13
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c20
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c120
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c43
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c47
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c14
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c12
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c7
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c268
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h139
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c86
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c441
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c6
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c73
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c19
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c3
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c218
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c300
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2563
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c1275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c400
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c485
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c9
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c51
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h47
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h102
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c2945
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c716
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h37
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/lantiq_etop.c13
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c37
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c1410
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h615
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c662
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c1349
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h147
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c848
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h162
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c13
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c314
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c758
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c502
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h152
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c175
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c587
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c874
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h9
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c7
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ana.h625
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev.h275
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qsys.h270
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c4
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c382
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h46
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h106
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c260
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c498
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c116
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c4
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c113
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c58
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h97
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c249
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c23
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c358
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h130
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c3891
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c128
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2564
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c521
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/Makefile2
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h78
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1485
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c1307
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c58
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c6
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/Makefile9
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2822
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c2501
-rw-r--r--drivers/net/ethernet/sfc/efx.h65
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c1234
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h55
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1102
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h73
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c446
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c457
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h30
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c2270
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h159
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c386
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.h32
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c558
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c568
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.h57
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h20
-rw-r--r--drivers/net/ethernet/sfc/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/rx.c592
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c851
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h97
-rw-r--r--drivers/net/ethernet/sfc/selftest.c9
-rw-r--r--drivers/net/ethernet/sfc/selftest.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c398
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c404
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h36
-rw-r--r--drivers/net/ethernet/sgi/Kconfig5
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c548
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/silan/sc92031.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c11
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c11
-rw-r--r--drivers/net/ethernet/socionext/netsec.c55
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c119
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c316
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c162
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c11
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c16
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.h1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c17
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/ethernet/xscale/Kconfig14
-rw-r--r--drivers/net/ethernet/xscale/Makefile3
-rw-r--r--drivers/net/ethernet/xscale/ixp46x_ts.h68
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c213
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c (renamed from drivers/ptp/ptp_ixp46x.c)3
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/fddi/defza.c2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c16
-rw-r--r--drivers/net/fjes/fjes_hw.c2
-rw-r--r--drivers/net/fjes/fjes_main.c4
-rw-r--r--drivers/net/fjes/fjes_trace.h2
-rw-r--r--drivers/net/gtp.c19
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hyperv/Makefile2
-rw-r--r--drivers/net/hyperv/hyperv_net.h21
-rw-r--r--drivers/net/hyperv/netvsc.c31
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c209
-rw-r--r--drivers/net/hyperv/netvsc_drv.c183
-rw-r--r--drivers/net/hyperv/rndis_filter.c4
-rw-r--r--drivers/net/macsec.c787
-rw-r--r--drivers/net/macvlan.c9
-rw-r--r--drivers/net/netdevsim/dev.c4
-rw-r--r--drivers/net/netdevsim/fib.c674
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin.c12
-rw-r--r--drivers/net/phy/aquantia_main.c7
-rw-r--r--drivers/net/phy/bcm84881.c269
-rw-r--r--drivers/net/phy/dp83640.c217
-rw-r--r--drivers/net/phy/dp83822.c18
-rw-r--r--drivers/net/phy/dp83867.c70
-rw-r--r--drivers/net/phy/dp83869.c2
-rw-r--r--drivers/net/phy/fixed_phy.c11
-rw-r--r--drivers/net/phy/lxt.c24
-rw-r--r--drivers/net/phy/marvell.c209
-rw-r--r--drivers/net/phy/marvell10g.c13
-rw-r--r--drivers/net/phy/mdio-i2c.c28
-rw-r--r--drivers/net/phy/mdio_bus.c267
-rw-r--r--drivers/net/phy/mii_timestamper.c125
-rw-r--r--drivers/net/phy/mscc.c1139
-rw-r--r--drivers/net/phy/mscc_fc_buffer.h64
-rw-r--r--drivers/net/phy/mscc_mac.h159
-rw-r--r--drivers/net/phy/mscc_macsec.h266
-rw-r--r--drivers/net/phy/phy-core.c4
-rw-r--r--drivers/net/phy/phy.c29
-rw-r--r--drivers/net/phy/phy_device.c115
-rw-r--r--drivers/net/phy/phylink.c345
-rw-r--r--drivers/net/phy/realtek.c59
-rw-r--r--drivers/net/phy/sfp-bus.c124
-rw-r--r--drivers/net/phy/sfp.c199
-rw-r--r--drivers/net/phy/sfp.h2
-rw-r--r--drivers/net/phy/uPD60620.c7
-rw-r--r--drivers/net/ppp/ppp_async.c18
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/pptp.c5
-rw-r--r--drivers/net/slip/slip.c14
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/ax88172a.c13
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/ch9200.c24
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c28
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c142
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c4
-rw-r--r--drivers/net/vxlan.c21
-rw-r--r--drivers/net/wan/Kconfig3
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c18
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_x25.c93
-rw-r--r--drivers/net/wan/ixp4xx_hss.c39
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wan/wanxl.c4
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wireguard/Makefile18
-rw-r--r--drivers/net/wireguard/allowedips.c376
-rw-r--r--drivers/net/wireguard/allowedips.h59
-rw-r--r--drivers/net/wireguard/cookie.c236
-rw-r--r--drivers/net/wireguard/cookie.h59
-rw-r--r--drivers/net/wireguard/device.c458
-rw-r--r--drivers/net/wireguard/device.h65
-rw-r--r--drivers/net/wireguard/main.c63
-rw-r--r--drivers/net/wireguard/messages.h128
-rw-r--r--drivers/net/wireguard/netlink.c642
-rw-r--r--drivers/net/wireguard/netlink.h12
-rw-r--r--drivers/net/wireguard/noise.c828
-rw-r--r--drivers/net/wireguard/noise.h137
-rw-r--r--drivers/net/wireguard/peer.c240
-rw-r--r--drivers/net/wireguard/peer.h83
-rw-r--r--drivers/net/wireguard/peerlookup.c221
-rw-r--r--drivers/net/wireguard/peerlookup.h64
-rw-r--r--drivers/net/wireguard/queueing.c53
-rw-r--r--drivers/net/wireguard/queueing.h194
-rw-r--r--drivers/net/wireguard/ratelimiter.c223
-rw-r--r--drivers/net/wireguard/ratelimiter.h19
-rw-r--r--drivers/net/wireguard/receive.c595
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c683
-rw-r--r--drivers/net/wireguard/selftest/counter.c104
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c226
-rw-r--r--drivers/net/wireguard/send.c413
-rw-r--r--drivers/net/wireguard/socket.c438
-rw-r--r--drivers/net/wireguard/socket.h44
-rw-r--r--drivers/net/wireguard/timers.c243
-rw-r--r--drivers/net/wireguard/timers.h31
-rw-r--r--drivers/net/wireguard/version.h1
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c52
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h23
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c65
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c232
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig35
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile25
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c1003
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c808
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h183
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c795
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h826
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c1075
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h279
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.c4570
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.h1662
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c543
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c899
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1535
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c4195
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h86
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c962
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h40
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c1124
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h897
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2468
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c1190
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h332
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c154
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h69
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c773
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h313
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h127
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c5907
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h147
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c236
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c2433
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h445
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c702
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h1212
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c199
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode_i.h50
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h113
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c5810
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h4764
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c88
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h33
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c165
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c54
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c70
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c20
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c239
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c157
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c224
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c80
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c7
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_eth.h2
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c75
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/airtime.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c13
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c73
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h52
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c20
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h103
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c48
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c299
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h853
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c112
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h102
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h115
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c389
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h186
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c91
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h72
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c60
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h29
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c24
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c890
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.h58
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c49
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c23
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/hash.c6
-rw-r--r--drivers/net/xen-netback/interface.c10
-rw-r--r--drivers/net/xen-netback/netback.c20
-rw-r--r--drivers/net/xen-netback/xenbus.c349
-rw-r--r--drivers/nfc/pn533/i2c.c1
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/pn544/pn544.c2
-rw-r--r--drivers/nfc/port100.c2
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvme/host/Kconfig1
-rw-r--r--drivers/nvme/host/hwmon.c13
-rw-r--r--drivers/nvmem/Kconfig8
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/core.c8
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c16
-rw-r--r--drivers/nvmem/imx-ocotp.c79
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c192
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/address.c6
-rw-r--r--drivers/of/base.c130
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/of/of_mdio.c30
-rw-r--r--drivers/of/of_private.h6
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/opp/core.c48
-rw-r--r--drivers/opp/of.c31
-rw-r--r--drivers/opp/opp.h6
-rw-r--r--drivers/opp/ti-opp-supply.c2
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/parisc/eisa.c4
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/lba_pci.c8
-rw-r--r--drivers/parisc/sba_iommu.c4
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/quirks.c23
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c58
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/Kconfig3
-rw-r--r--drivers/phy/broadcom/Kconfig4
-rw-r--r--drivers/phy/broadcom/Makefile2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c120
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c414
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c226
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h148
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c269
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c709
-rw-r--r--drivers/phy/hisilicon/Kconfig16
-rw-r--r--drivers/phy/intel/Kconfig9
-rw-r--r--drivers/phy/intel/Makefile2
-rw-r--r--drivers/phy/intel/phy-intel-emmc.c284
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c2
-rw-r--r--drivers/phy/marvell/Kconfig8
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c20
-rw-r--r--drivers/phy/mediatek/Kconfig25
-rw-r--r--drivers/phy/phy-core.c49
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c319
-rw-r--r--drivers/phy/samsung/Kconfig6
-rw-r--r--drivers/phy/ti/Kconfig20
-rw-r--r--drivers/phy/ti/Makefile1
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c959
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c18
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c170
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c212
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c387
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c50
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h38
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c10
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c1
-rw-r--r--drivers/pinctrl/core.c74
-rw-r--r--drivers/pinctrl/core.h4
-rw-r--r--drivers/pinctrl/devicetree.c4
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mp.c345
-rw-r--r--drivers/pinctrl/intel/Kconfig13
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c311
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c101
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h44
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c975
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c547
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c7
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c16
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c637
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c78
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/samsung/Kconfig16
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig12
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c57
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c11
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77950.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c)26
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77951.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7795.c)39
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c33
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c39
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c13
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/keyboard_leds.c28
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c35
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c19
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c14
-rw-r--r--drivers/platform/x86/Kconfig18
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c139
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c27
-rw-r--r--drivers/platform/x86/intel-hid.c1
-rw-r--r--drivers/platform/x86/intel-uncore-frequency.c437
-rw-r--r--drivers/platform/x86/intel_atomisp2_pm.c25
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_typec.c81
-rw-r--r--drivers/platform/x86/intel_ips.h2
-rw-r--r--drivers/platform/x86/intel_menlow.c9
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c5
-rw-r--r--drivers/platform/x86/intel_pmc_core.c141
-rw-r--r--drivers/platform/x86/intel_pmc_core.h6
-rw-r--r--drivers/platform/x86/intel_pmc_core_pltdrv.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c114
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c414
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_common.c3
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c14
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c64
-rw-r--r--drivers/platform/x86/mlx-platform.c564
-rw-r--r--drivers/platform/x86/pmc_atom.c2
-rw-r--r--drivers/platform/x86/samsung-laptop.c4
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c82
-rw-r--r--drivers/pnp/isapnp/core.c25
-rw-r--r--drivers/power/avs/Kconfig16
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/qcom-cpr.c1793
-rw-r--r--drivers/power/reset/Kconfig16
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c72
-rw-r--r--drivers/power/reset/gpio-restart.c8
-rw-r--r--drivers/power/supply/Kconfig30
-rw-r--r--drivers/power/supply/ab8500_charger.c6
-rw-r--r--drivers/power/supply/ab8500_fg.c14
-rw-r--r--drivers/power/supply/abx500_chargalg.c2
-rw-r--r--drivers/power/supply/axp20x_ac_power.c131
-rw-r--r--drivers/power/supply/axp20x_usb_power.c219
-rw-r--r--drivers/power/supply/bq25890_charger.c103
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c10
-rw-r--r--drivers/power/supply/ingenic-battery.c15
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c6
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c2
-rw-r--r--drivers/power/supply/max17040_battery.c122
-rw-r--r--drivers/power/supply/max17042_battery.c17
-rw-r--r--drivers/power/supply/max77650-charger.c7
-rw-r--r--drivers/power/supply/pda_power.c4
-rw-r--r--drivers/power/supply/power_supply_core.c67
-rw-r--r--drivers/power/supply/sbs-battery.c35
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c49
-rw-r--r--drivers/power/supply/ucs1002_power.c42
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/ptp/Kconfig24
-rw-r--r--drivers/ptp/Makefile4
-rw-r--r--drivers/ptp/idt8a340_reg.h2
-rw-r--r--drivers/ptp/ptp_clock.c10
-rw-r--r--drivers/ptp/ptp_clockmatrix.c79
-rw-r--r--drivers/ptp/ptp_ines.c852
-rw-r--r--drivers/ptp/ptp_qoriq.c15
-rw-r--r--drivers/regulator/Kconfig40
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/bd71828-regulator.c807
-rw-r--r--drivers/regulator/bd718x7-regulator.c34
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/da9210-regulator.c5
-rw-r--r--drivers/regulator/da9211-regulator.c5
-rw-r--r--drivers/regulator/helpers.c14
-rw-r--r--drivers/regulator/isl9305.c5
-rw-r--r--drivers/regulator/lp3971.c5
-rw-r--r--drivers/regulator/ltc3676.c5
-rw-r--r--drivers/regulator/mp8859.c156
-rw-r--r--drivers/regulator/mpq7920.c330
-rw-r--r--drivers/regulator/mpq7920.h69
-rw-r--r--drivers/regulator/mt6311-regulator.c5
-rw-r--r--drivers/regulator/pv88060-regulator.c5
-rw-r--r--drivers/regulator/pv88090-regulator.c5
-rw-r--r--drivers/regulator/rk808-regulator.c2
-rw-r--r--drivers/regulator/s2mpa01.c2
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/regulator/slg51000-regulator.c5
-rw-r--r--drivers/regulator/sy8106a-regulator.c5
-rw-r--r--drivers/regulator/sy8824x.c5
-rw-r--r--drivers/regulator/ti-abb-regulator.c4
-rw-r--r--drivers/regulator/tps65132-regulator.c5
-rw-r--r--drivers/regulator/vctrl-regulator.c38
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c101
-rw-r--r--drivers/rtc/rtc-m48t35.c11
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c5
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c4
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c1
-rw-r--r--drivers/s390/net/qeth_core.h98
-rw-r--r--drivers/s390/net/qeth_core_main.c522
-rw-r--r--drivers/s390/net/qeth_core_mpc.h21
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2.h1
-rw-r--r--drivers/s390/net/qeth_l2_main.c166
-rw-r--r--drivers/s390/net/qeth_l2_sys.c34
-rw-r--r--drivers/s390/net/qeth_l3.h6
-rw-r--r--drivers/s390/net/qeth_l3_main.c306
-rw-r--r--drivers/s390/net/qeth_l3_sys.c172
-rw-r--r--drivers/scsi/BusLogic.c110
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c4
-rw-r--r--drivers/scsi/ch.c9
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/esp_scsi.c22
-rw-r--r--drivers/scsi/esp_scsi.h41
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c20
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c74
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c41
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c57
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/scsi/lasi700.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c4
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_port.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c8
-rw-r--r--drivers/scsi/libsas/sas_task.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c108
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h17
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c97
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c134
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h18
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h6
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h19
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_image.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c340
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h45
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c46
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c220
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c11
-rw-r--r--drivers/scsi/mvsas/mv_init.c3
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrb.h4
-rw-r--r--drivers/scsi/myrs.c2
-rw-r--r--drivers/scsi/myrs.h4
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/qla1280.c20
-rw-r--r--drivers/scsi/qla1280.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h50
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c175
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h24
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c51
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c60
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_ioctl.c54
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_logging.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c44
-rw-r--r--drivers/scsi/sd.c63
-rw-r--r--drivers/scsi/sd_zbc.c38
-rw-r--r--drivers/scsi/sg.c170
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/sr.c53
-rw-r--r--drivers/scsi/st.c51
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3x_esp.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c4
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c107
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c206
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h32
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c22
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.h4
-rw-r--r--drivers/scsi/ufs/ufs.h31
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd.c715
-rw-r--r--drivers/scsi/ufs/ufshcd.h34
-rw-r--r--drivers/scsi/ufs/unipro.h11
-rw-r--r--drivers/scsi/vmw_pvscsi.c20
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/scsi/zorro_esp.c6
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/userimask.c2
-rw-r--r--drivers/siox/siox.h2
-rw-r--r--drivers/slimbus/qcom-ctrl.c2
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c20
-rw-r--r--drivers/slimbus/slimbus.h2
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c24
-rw-r--r--drivers/soc/lantiq/fpi-bus.c4
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c147
-rw-r--r--drivers/soc/tegra/flowctrl.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c2
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c4
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soc/ti/Kconfig11
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/k3-ringacc.c1157
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c4
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c4
-rw-r--r--drivers/soundwire/Kconfig9
-rw-r--r--drivers/soundwire/Makefile4
-rw-r--r--drivers/soundwire/bus.c55
-rw-r--r--drivers/soundwire/cadence_master.c66
-rw-r--r--drivers/soundwire/intel.c23
-rw-r--r--drivers/soundwire/intel.h13
-rw-r--r--drivers/soundwire/intel_init.c32
-rw-r--r--drivers/soundwire/qcom.c861
-rw-r--r--drivers/soundwire/stream.c8
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-atmel.c29
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-bcm2835.c47
-rw-r--r--drivers/spi/spi-bitbang.c21
-rw-r--r--drivers/spi/spi-dw-mid.c2
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-fsl-dspi.c12
-rw-r--r--drivers/spi/spi-fsl-lpspi.c36
-rw-r--r--drivers/spi/spi-fsl-qspi.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c27
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c284
-rw-r--r--drivers/spi/spi-img-spfi.c18
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-jcore.c2
-rw-r--r--drivers/spi/spi-meson-spicc.c25
-rw-r--r--drivers/spi/spi-mxs.c6
-rw-r--r--drivers/spi/spi-npcm-fiu.c2
-rw-r--r--drivers/spi/spi-npcm-pspi.c57
-rw-r--r--drivers/spi/spi-oc-tiny.c50
-rw-r--r--drivers/spi/spi-pxa2xx.c31
-rw-r--r--drivers/spi/spi-qcom-qspi.c9
-rw-r--r--drivers/spi/spi-rspi.c23
-rw-r--r--drivers/spi/spi-sh-msiof.c471
-rw-r--r--drivers/spi/spi-sirf.c12
-rw-r--r--drivers/spi/spi-stm32-qspi.c30
-rw-r--r--drivers/spi/spi-stm32.c79
-rw-r--r--drivers/spi/spi-tegra114.c4
-rw-r--r--drivers/spi/spi-ti-qspi.c87
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-uniphier.c227
-rw-r--r--drivers/spi/spi.c24
-rw-r--r--drivers/ssb/driver_extif.c2
-rw-r--r--drivers/ssb/driver_pcicore.c6
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/android/ashmem.c6
-rw-r--r--drivers/staging/android/ion/ion.c14
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c160
-rw-r--r--drivers/staging/comedi/drivers/das6402.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_routes.c12
-rw-r--r--drivers/staging/exfat/Kconfig26
-rw-r--r--drivers/staging/exfat/Makefile2
-rw-r--r--drivers/staging/exfat/exfat.h93
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c16
-rw-r--r--drivers/staging/exfat/exfat_core.c211
-rw-r--r--drivers/staging/exfat/exfat_super.c175
-rw-r--r--drivers/staging/gasket/gasket_core.c2
-rw-r--r--drivers/staging/hp/hp100.c11
-rw-r--r--drivers/staging/iio/accel/adis16203.c8
-rw-r--r--drivers/staging/iio/accel/adis16240.c15
-rw-r--r--drivers/staging/isdn/Kconfig12
-rw-r--r--drivers/staging/isdn/Makefile8
-rw-r--r--drivers/staging/isdn/TODO22
-rw-r--r--drivers/staging/isdn/avm/Kconfig65
-rw-r--r--drivers/staging/isdn/avm/Makefile12
-rw-r--r--drivers/staging/isdn/avm/avm_cs.c166
-rw-r--r--drivers/staging/isdn/avm/avmcard.h581
-rw-r--r--drivers/staging/isdn/avm/b1.c819
-rw-r--r--drivers/staging/isdn/avm/b1dma.c981
-rw-r--r--drivers/staging/isdn/avm/b1isa.c243
-rw-r--r--drivers/staging/isdn/avm/b1pci.c416
-rw-r--r--drivers/staging/isdn/avm/b1pcmcia.c224
-rw-r--r--drivers/staging/isdn/avm/c4.c1317
-rw-r--r--drivers/staging/isdn/avm/t1isa.c594
-rw-r--r--drivers/staging/isdn/avm/t1pci.c259
-rw-r--r--drivers/staging/isdn/gigaset/Kconfig62
-rw-r--r--drivers/staging/isdn/gigaset/Makefile17
-rw-r--r--drivers/staging/isdn/gigaset/asyncdata.c606
-rw-r--r--drivers/staging/isdn/gigaset/bas-gigaset.c2672
-rw-r--r--drivers/staging/isdn/gigaset/capi.c2517
-rw-r--r--drivers/staging/isdn/gigaset/common.c1153
-rw-r--r--drivers/staging/isdn/gigaset/dummyll.c74
-rw-r--r--drivers/staging/isdn/gigaset/ev-layer.c1910
-rw-r--r--drivers/staging/isdn/gigaset/gigaset.h827
-rw-r--r--drivers/staging/isdn/gigaset/interface.c613
-rw-r--r--drivers/staging/isdn/gigaset/isocdata.c1006
-rw-r--r--drivers/staging/isdn/gigaset/proc.c77
-rw-r--r--drivers/staging/isdn/gigaset/ser-gigaset.c796
-rw-r--r--drivers/staging/isdn/gigaset/usb-gigaset.c959
-rw-r--r--drivers/staging/isdn/hysdn/Kconfig15
-rw-r--r--drivers/staging/isdn/hysdn/Makefile12
-rw-r--r--drivers/staging/isdn/hysdn/boardergo.c445
-rw-r--r--drivers/staging/isdn/hysdn/boardergo.h100
-rw-r--r--drivers/staging/isdn/hysdn/hycapi.c785
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_boot.c400
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_defs.h282
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_init.c213
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_net.c330
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_pof.h78
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_procconf.c411
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_proclog.c357
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_sched.c197
-rw-r--r--drivers/staging/isdn/hysdn/ince1pc.h134
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c4
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c122
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c2
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c2
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c4
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-core.c4
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/cdev/Makefile1
-rw-r--r--drivers/staging/most/cdev/cdev.c5
-rw-r--r--drivers/staging/most/configfs.c59
-rw-r--r--drivers/staging/most/core.c204
-rw-r--r--drivers/staging/most/dim2/Makefile1
-rw-r--r--drivers/staging/most/dim2/dim2.c5
-rw-r--r--drivers/staging/most/i2c/Makefile1
-rw-r--r--drivers/staging/most/i2c/i2c.c2
-rw-r--r--drivers/staging/most/most.h (renamed from drivers/staging/most/core.h)30
-rw-r--r--drivers/staging/most/net/Makefile1
-rw-r--r--drivers/staging/most/net/net.c17
-rw-r--r--drivers/staging/most/sound/Makefile1
-rw-r--r--drivers/staging/most/sound/sound.c54
-rw-r--r--drivers/staging/most/usb/Makefile1
-rw-r--r--drivers/staging/most/usb/usb.c26
-rw-r--r--drivers/staging/most/video/Makefile1
-rw-r--r--drivers/staging/most/video/video.c6
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi2
-rw-r--r--drivers/staging/nvec/nvec_kbd.c2
-rw-r--r--drivers/staging/octeon-usb/Kconfig11
-rw-r--r--drivers/staging/octeon-usb/Makefile2
-rw-r--r--drivers/staging/octeon-usb/TODO8
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3737
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h1847
-rw-r--r--drivers/staging/octeon/Kconfig16
-rw-r--r--drivers/staging/octeon/Makefile19
-rw-r--r--drivers/staging/octeon/TODO9
-rw-r--r--drivers/staging/octeon/ethernet-defines.h40
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c178
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h28
-rw-r--r--drivers/staging/octeon/ethernet-mem.c154
-rw-r--r--drivers/staging/octeon/ethernet-mem.h9
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c158
-rw-r--r--drivers/staging/octeon/ethernet-rx.c538
-rw-r--r--drivers/staging/octeon/ethernet-rx.h31
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c30
-rw-r--r--drivers/staging/octeon/ethernet-spi.c226
-rw-r--r--drivers/staging/octeon/ethernet-tx.c717
-rw-r--r--drivers/staging/octeon/ethernet-tx.h14
-rw-r--r--drivers/staging/octeon/ethernet-util.h47
-rw-r--r--drivers/staging/octeon/ethernet.c992
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h107
-rw-r--r--drivers/staging/octeon/octeon-stubs.h1433
-rw-r--r--drivers/staging/qlge/qlge.h15
-rw-r--r--drivers/staging/qlge/qlge_dbg.c32
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c39
-rw-r--r--drivers/staging/qlge/qlge_main.c221
-rw-r--r--drivers/staging/qlge/qlge_mpi.c26
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c200
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c34
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c8
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_rtl8188e.c82
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c41
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c97
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h2
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h2
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h7
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h16
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c15
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c15
-rw-r--r--drivers/staging/rtl8192u/Makefile4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile27
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c62
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c15
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c54
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.c30
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.h8
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c23
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c44
-rw-r--r--drivers/staging/rts5208/Makefile2
-rw-r--r--drivers/staging/rts5208/rtsx.c9
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2
-rw-r--r--drivers/staging/uwb/whc-rc.c6
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c19
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c9
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c2
-rw-r--r--drivers/staging/vt6655/rf.c2
-rw-r--r--drivers/staging/vt6656/baseband.c8
-rw-r--r--drivers/staging/vt6656/device.h19
-rw-r--r--drivers/staging/vt6656/dpc.c114
-rw-r--r--drivers/staging/vt6656/firmware.c14
-rw-r--r--drivers/staging/vt6656/int.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c1
-rw-r--r--drivers/staging/vt6656/rxtx.c26
-rw-r--r--drivers/staging/vt6656/usbpipe.c2
-rw-r--r--drivers/staging/vt6656/usbpipe.h2
-rw-r--r--drivers/staging/wfx/TODO71
-rw-r--r--drivers/staging/wfx/bh.c3
-rw-r--r--drivers/staging/wfx/bus_spi.c9
-rw-r--r--drivers/staging/wfx/data_rx.c85
-rw-r--r--drivers/staging/wfx/data_rx.h4
-rw-r--r--drivers/staging/wfx/data_tx.c322
-rw-r--r--drivers/staging/wfx/data_tx.h27
-rw-r--r--drivers/staging/wfx/debug.c2
-rw-r--r--drivers/staging/wfx/fwio.c28
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h35
-rw-r--r--drivers/staging/wfx/hif_api_mib.h35
-rw-r--r--drivers/staging/wfx/hif_rx.c115
-rw-r--r--drivers/staging/wfx/hif_tx.c164
-rw-r--r--drivers/staging/wfx/hif_tx.h28
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h183
-rw-r--r--drivers/staging/wfx/hwio.h15
-rw-r--r--drivers/staging/wfx/main.c10
-rw-r--r--drivers/staging/wfx/queue.c216
-rw-r--r--drivers/staging/wfx/queue.h10
-rw-r--r--drivers/staging/wfx/scan.c321
-rw-r--r--drivers/staging/wfx/scan.h26
-rw-r--r--drivers/staging/wfx/secure_link.h8
-rw-r--r--drivers/staging/wfx/sta.c1058
-rw-r--r--drivers/staging/wfx/sta.h20
-rw-r--r--drivers/staging/wfx/traces.h14
-rw-r--r--drivers/staging/wfx/wfx.h43
-rw-r--r--drivers/staging/wilc1000/fw.h119
-rw-r--r--drivers/staging/wilc1000/hif.c90
-rw-r--r--drivers/staging/wilc1000/hif.h19
-rw-r--r--drivers/staging/wilc1000/netdev.c63
-rw-r--r--drivers/staging/wilc1000/netdev.h1
-rw-r--r--drivers/staging/wilc1000/sdio.c178
-rw-r--r--drivers/staging/wilc1000/spi.c285
-rw-r--r--drivers/staging/wilc1000/wlan.c192
-rw-r--r--drivers/staging/wilc1000/wlan.h2
-rw-r--r--drivers/staging/wilc1000/wlan_cfg.c152
-rw-r--r--drivers/staging/wilc1000/wlan_if.h1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/tcm_fc/tfc_io.c1
-rw-r--r--drivers/tc/tc-driver.c5
-rw-r--r--drivers/tc/tc.c2
-rw-r--r--drivers/tee/Kconfig4
-rw-r--r--drivers/tee/Makefile1
-rw-r--r--drivers/tee/amdtee/Kconfig8
-rw-r--r--drivers/tee/amdtee/Makefile5
-rw-r--r--drivers/tee/amdtee/amdtee_if.h183
-rw-r--r--drivers/tee/amdtee/amdtee_private.h159
-rw-r--r--drivers/tee/amdtee/call.c373
-rw-r--r--drivers/tee/amdtee/core.c518
-rw-r--r--drivers/tee/amdtee/shm_pool.c93
-rw-r--r--drivers/tee/optee/Kconfig1
-rw-r--r--drivers/tee/optee/shm_pool.c15
-rw-r--r--drivers/tee/tee_shm.c6
-rw-r--r--drivers/thermal/Kconfig35
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/amlogic_thermal.c6
-rw-r--r--drivers/thermal/armada_thermal.c7
-rw-r--r--drivers/thermal/broadcom/Kconfig7
-rw-r--r--drivers/thermal/broadcom/Makefile1
-rw-r--r--drivers/thermal/broadcom/bcm2711_thermal.c123
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c96
-rw-r--r--drivers/thermal/clock_cooling.c2
-rw-r--r--drivers/thermal/cpufreq_cooling.c (renamed from drivers/thermal/cpu_cooling.c)7
-rw-r--r--drivers/thermal/cpuidle_cooling.c232
-rw-r--r--drivers/thermal/db8500_thermal.c4
-rw-r--r--drivers/thermal/devfreq_cooling.c3
-rw-r--r--drivers/thermal/fair_share.c4
-rw-r--r--drivers/thermal/gov_bang_bang.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c7
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c13
-rw-r--r--drivers/thermal/max77620_thermal.c2
-rw-r--r--drivers/thermal/mtk_thermal.c12
-rw-r--r--drivers/thermal/of-thermal.c70
-rw-r--r--drivers/thermal/qoriq_thermal.c337
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c4
-rw-r--r--drivers/thermal/rcar_thermal.c9
-rw-r--r--drivers/thermal/rockchip_thermal.c34
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c9
-rw-r--r--drivers/thermal/st/stm_thermal.c388
-rw-r--r--drivers/thermal/step_wise.c4
-rw-r--r--drivers/thermal/sun8i_thermal.c639
-rw-r--r--drivers/thermal/tegra/soctherm.c15
-rw-r--r--drivers/thermal/thermal-generic-adc.c20
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_hwmon.c28
-rw-r--r--drivers/thermal/thermal_hwmon.h7
-rw-r--r--drivers/thermal/user_space.c4
-rw-r--r--drivers/thermal/zx2967_thermal.c1
-rw-r--r--drivers/thunderbolt/Kconfig11
-rw-r--r--drivers/thunderbolt/Makefile4
-rw-r--r--drivers/thunderbolt/cap.c11
-rw-r--r--drivers/thunderbolt/ctl.c19
-rw-r--r--drivers/thunderbolt/ctl.h3
-rw-r--r--drivers/thunderbolt/eeprom.c137
-rw-r--r--drivers/thunderbolt/nhi.c3
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/switch.c441
-rw-r--r--drivers/thunderbolt/tb.c227
-rw-r--r--drivers/thunderbolt/tb.h101
-rw-r--r--drivers/thunderbolt/tb_msgs.h6
-rw-r--r--drivers/thunderbolt/tb_regs.h65
-rw-r--r--drivers/thunderbolt/tmu.c383
-rw-r--r--drivers/thunderbolt/tunnel.c169
-rw-r--r--drivers/thunderbolt/tunnel.h9
-rw-r--r--drivers/thunderbolt/usb4.c764
-rw-r--r--drivers/thunderbolt/xdomain.c6
-rw-r--r--drivers/tty/cyclades.c10
-rw-r--r--drivers/tty/mips_ejtag_fdc.c2
-rw-r--r--drivers/tty/moxa.c4
-rw-r--r--drivers/tty/n_gsm.c2
-rw-r--r--drivers/tty/n_hdlc.c11
-rw-r--r--drivers/tty/serdev/core.c14
-rw-r--r--drivers/tty/serial/21285.c55
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c5
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c50
-rw-r--r--drivers/tty/serial/8250/8250_core.c1
-rw-r--r--drivers/tty/serial/8250/8250_exar.c6
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c4
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c2
-rw-r--r--drivers/tty/serial/8250/8250_ioc3.c98
-rw-r--r--drivers/tty/serial/8250/8250_of.c4
-rw-r--r--drivers/tty/serial/8250/8250_omap.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_port.c10
-rw-r--r--drivers/tty/serial/8250/Kconfig21
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/amba-pl010.c5
-rw-r--r--drivers/tty/serial/amba-pl011.c13
-rw-r--r--drivers/tty/serial/apbuart.c5
-rw-r--r--drivers/tty/serial/arc_uart.c5
-rw-r--r--drivers/tty/serial/atmel_serial.c41
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c5
-rw-r--r--drivers/tty/serial/clps711x.c5
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c9
-rw-r--r--drivers/tty/serial/dz.c7
-rw-r--r--drivers/tty/serial/efm32-uart.c5
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c8
-rw-r--r--drivers/tty/serial/fsl_lpuart.c16
-rw-r--r--drivers/tty/serial/imx.c58
-rw-r--r--drivers/tty/serial/ip22zilog.c7
-rw-r--r--drivers/tty/serial/kgdb_nmi.c4
-rw-r--r--drivers/tty/serial/lantiq.c2
-rw-r--r--drivers/tty/serial/meson_uart.c72
-rw-r--r--drivers/tty/serial/milbeaut_usio.c5
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c11
-rw-r--r--drivers/tty/serial/msm_serial.c23
-rw-r--r--drivers/tty/serial/mux.c7
-rw-r--r--drivers/tty/serial/mxs-auart.c5
-rw-r--r--drivers/tty/serial/omap-serial.c12
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c12
-rw-r--r--drivers/tty/serial/pic32_uart.c2
-rw-r--r--drivers/tty/serial/pmac_zilog.c5
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c7
-rw-r--r--drivers/tty/serial/pxa.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c128
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/sa1100.c7
-rw-r--r--drivers/tty/serial/samsung.h147
-rw-r--r--drivers/tty/serial/samsung_tty.c315
-rw-r--r--drivers/tty/serial/sb1250-duart.c9
-rw-r--r--drivers/tty/serial/sccnxp.c5
-rw-r--r--drivers/tty/serial/serial-tegra.c94
-rw-r--r--drivers/tty/serial/serial_core.c84
-rw-r--r--drivers/tty/serial/serial_txx9.c5
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/serial/sprd_serial.c5
-rw-r--r--drivers/tty/serial/st-asc.c17
-rw-r--r--drivers/tty/serial/stm32-usart.c5
-rw-r--r--drivers/tty/serial/sunhv.c5
-rw-r--r--drivers/tty/serial/sunsab.c5
-rw-r--r--drivers/tty/serial/sunsu.c5
-rw-r--r--drivers/tty/serial/sunzilog.c6
-rw-r--r--drivers/tty/serial/ucc_uart.c4
-rw-r--r--drivers/tty/serial/vr41xx_siu.c5
-rw-r--r--drivers/tty/serial/vt8500_serial.c5
-rw-r--r--drivers/tty/serial/xilinx_uartps.c51
-rw-r--r--drivers/tty/serial/zs.c7
-rw-r--r--drivers/tty/synclink.c6
-rw-r--r--drivers/tty/synclink_gt.c24
-rw-r--r--drivers/tty/synclinkmp.c34
-rw-r--r--drivers/tty/sysrq.c9
-rw-r--r--drivers/tty/tty_baudrate.c28
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/vt/.gitignore1
-rw-r--r--drivers/tty/vt/Makefile6
-rw-r--r--drivers/tty/vt/conmakehash.c290
-rw-r--r--drivers/tty/vt/vt.c8
-rw-r--r--drivers/uio/uio_dmem_genirq.c6
-rw-r--r--drivers/uio/uio_pdrv_genirq.c2
-rw-r--r--drivers/usb/cdns3/Kconfig10
-rw-r--r--drivers/usb/cdns3/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c216
-rw-r--r--drivers/usb/cdns3/debug.h2
-rw-r--r--drivers/usb/cdns3/gadget.c536
-rw-r--r--drivers/usb/cdns3/gadget.h26
-rw-r--r--drivers/usb/cdns3/trace.h93
-rw-r--r--drivers/usb/chipidea/Kconfig1
-rw-r--r--drivers/usb/chipidea/ci.h10
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c9
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.h2
-rw-r--r--drivers/usb/core/devio.c4
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hub.c1
-rw-r--r--drivers/usb/dwc2/core_intr.c7
-rw-r--r--drivers/usb/dwc2/debugfs.c3
-rw-r--r--drivers/usb/dwc2/gadget.c25
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc3/core.c3
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/dwc3/gadget.c56
-rw-r--r--drivers/usb/dwc3/gadget.h14
-rw-r--r--drivers/usb/dwc3/host.c6
-rw-r--r--drivers/usb/early/xhci-dbc.c2
-rw-r--r--drivers/usb/gadget/Kconfig28
-rw-r--r--drivers/usb/gadget/configfs.c43
-rw-r--r--drivers/usb/gadget/function/f_ecm.c16
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_ncm.c17
-rw-r--r--drivers/usb/gadget/function/u_audio.c29
-rw-r--r--drivers/usb/gadget/legacy/Kconfig28
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c2
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/multi.c2
-rw-r--r--drivers/usb/gadget/legacy/ncm.c2
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c16
-rw-r--r--drivers/usb/gadget/udc/net2272.c6
-rw-r--r--drivers/usb/gadget/udc/net2280.c2
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c6
-rw-r--r--drivers/usb/host/Kconfig56
-rw-r--r--drivers/usb/host/ehci-exynos.c4
-rw-r--r--drivers/usb/host/ehci-mv.c21
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c6
-rw-r--r--drivers/usb/host/ehci-sh.c7
-rw-r--r--drivers/usb/host/ehci-tegra.c16
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c14
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/host/xhci-mtk.c5
-rw-r--r--drivers/usb/host/xhci-tegra.c440
-rw-r--r--drivers/usb/isp1760/isp1760-if.c4
-rw-r--r--drivers/usb/misc/usb3503.c94
-rw-r--r--drivers/usb/musb/Kconfig12
-rw-r--r--drivers/usb/musb/Makefile4
-rw-r--r--drivers/usb/musb/davinci.c57
-rw-r--r--drivers/usb/musb/jz4740.c75
-rw-r--r--drivers/usb/musb/mediatek.c582
-rw-r--r--drivers/usb/musb/musb_am335x.c44
-rw-r--r--drivers/usb/musb/musb_core.c188
-rw-r--r--drivers/usb/musb/musb_core.h20
-rw-r--r--drivers/usb/musb/musb_dma.h9
-rw-r--r--drivers/usb/musb/musb_host.c46
-rw-r--r--drivers/usb/musb/musb_io.h18
-rw-r--r--drivers/usb/musb/musb_trace.h33
-rw-r--r--drivers/usb/musb/musbhsdma.c56
-rw-r--r--drivers/usb/musb/omap2430.c164
-rw-r--r--drivers/usb/musb/sunxi.c6
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/ux500_dma.c4
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c26
-rw-r--r--drivers/usb/phy/phy-am335x.c2
-rw-r--r--drivers/usb/phy/phy-generic.c39
-rw-r--r--drivers/usb/phy/phy-generic.h3
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c96
-rw-r--r--drivers/usb/phy/phy-keystone.c2
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c896
-rw-r--r--drivers/usb/phy/phy-ulpi.c48
-rw-r--r--drivers/usb/phy/phy.c13
-rw-r--r--drivers/usb/renesas_usbhs/common.c22
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.c4
-rw-r--r--drivers/usb/renesas_usbhs/rza2.c2
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c2
-rw-r--r--drivers/usb/serial/Kconfig3
-rw-r--r--drivers/usb/serial/ch341.c6
-rw-r--r--drivers/usb/serial/cyberjack.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c16
-rw-r--r--drivers/usb/serial/ir-usb.c185
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/opticon.c63
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/quatech2.c6
-rw-r--r--drivers/usb/serial/usb-serial-simple.c2
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/typec/altmodes/displayport.c5
-rw-r--r--drivers/usb/typec/bus.c42
-rw-r--r--drivers/usb/typec/class.c52
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c5
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c6
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c6
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/displayport.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c95
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h14
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c191
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c4
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c4
-rw-r--r--drivers/vfio/vfio_iommu_type1.c35
-rw-r--r--drivers/video/console/Kconfig1
-rw-r--r--drivers/video/fbdev/68328fb.c14
-rw-r--r--drivers/video/fbdev/acornfb.c2
-rw-r--r--drivers/video/fbdev/amba-clcd.c2
-rw-r--r--drivers/video/fbdev/amifb.c2
-rw-r--r--drivers/video/fbdev/arcfb.c2
-rw-r--r--drivers/video/fbdev/arkfb.c2
-rw-r--r--drivers/video/fbdev/asiliantfb.c2
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c2
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c2
-rw-r--r--drivers/video/fbdev/aty/atyfb.h2
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c6
-rw-r--r--drivers/video/fbdev/aty/mach64_cursor.c4
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c2
-rw-r--r--drivers/video/fbdev/au1100fb.c2
-rw-r--r--drivers/video/fbdev/au1200fb.c2
-rw-r--r--drivers/video/fbdev/broadsheetfb.c2
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/carminefb.c6
-rw-r--r--drivers/video/fbdev/cg14.c2
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/cg6.c2
-rw-r--r--drivers/video/fbdev/chipsfb.c2
-rw-r--r--drivers/video/fbdev/cirrusfb.c2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c2
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c2
-rw-r--r--drivers/video/fbdev/controlfb.c2
-rw-r--r--drivers/video/fbdev/core/fb_defio.c3
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbmem.c35
-rw-r--r--drivers/video/fbdev/cyber2000fb.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c2
-rw-r--r--drivers/video/fbdev/dnfb.c2
-rw-r--r--drivers/video/fbdev/efifb.c2
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c2
-rw-r--r--drivers/video/fbdev/fb-puv3.c2
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/fm2fb.c2
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c4
-rw-r--r--drivers/video/fbdev/g364fb.c2
-rw-r--r--drivers/video/fbdev/gbefb.c2
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c2
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c2
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c2
-rw-r--r--drivers/video/fbdev/goldfishfb.c2
-rw-r--r--drivers/video/fbdev/grvga.c2
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hecubafb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/hitfb.c2
-rw-r--r--drivers/video/fbdev/hpfb.c2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/video/fbdev/i740fb.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c2
-rw-r--r--drivers/video/fbdev/imsttfb.c2
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/intelfb/intelfb.h2
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c4
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c4
-rw-r--r--drivers/video/fbdev/leo.c2
-rw-r--r--drivers/video/fbdev/macfb.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_crtc2.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.c5
-rw-r--r--drivers/video/fbdev/maxinefb.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb.h2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb_accel.c15
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c4
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c6
-rw-r--r--drivers/video/fbdev/metronomefb.c2
-rw-r--r--drivers/video/fbdev/mmp/Kconfig2
-rw-r--r--drivers/video/fbdev/mmp/fb/Kconfig4
-rw-r--r--drivers/video/fbdev/mmp/fb/mmpfb.c4
-rw-r--r--drivers/video/fbdev/mmp/hw/Kconfig7
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c60
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.h10
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c6
-rw-r--r--drivers/video/fbdev/mx3fb.c5
-rw-r--r--drivers/video/fbdev/neofb.c2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c20
-rw-r--r--drivers/video/fbdev/ocfb.c11
-rw-r--r--drivers/video/fbdev/offb.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/vrfb.c4
-rw-r--r--drivers/video/fbdev/p9100.c2
-rw-r--r--drivers/video/fbdev/platinumfb.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c4
-rw-r--r--drivers/video/fbdev/pm3fb.c6
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c6
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c6
-rw-r--r--drivers/video/fbdev/pmagb-b-fb.c6
-rw-r--r--drivers/video/fbdev/ps3fb.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c6
-rw-r--r--drivers/video/fbdev/pxa168fb.c10
-rw-r--r--drivers/video/fbdev/pxafb.c14
-rw-r--r--drivers/video/fbdev/q40fb.c2
-rw-r--r--drivers/video/fbdev/riva/fbdev.c2
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c4
-rw-r--r--drivers/video/fbdev/s3c-fb.c5
-rw-r--r--drivers/video/fbdev/s3c2410fb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c2
-rw-r--r--drivers/video/fbdev/sa1100fb.c6
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c2
-rw-r--r--drivers/video/fbdev/sh7760fb.c4
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/video/fbdev/skeletonfb.c2
-rw-r--r--drivers/video/fbdev/sm712fb.c2
-rw-r--r--drivers/video/fbdev/smscufx.c3
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/fbdev/sstfb.c6
-rw-r--r--drivers/video/fbdev/stifb.c6
-rw-r--r--drivers/video/fbdev/sunxvr1000.c2
-rw-r--r--drivers/video/fbdev/sunxvr2500.c2
-rw-r--r--drivers/video/fbdev/sunxvr500.c2
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/tgafb.c4
-rw-r--r--drivers/video/fbdev/tmiofb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c6
-rw-r--r--drivers/video/fbdev/udlfb.c1
-rw-r--r--drivers/video/fbdev/uvesafb.c4
-rw-r--r--drivers/video/fbdev/valkyriefb.c4
-rw-r--r--drivers/video/fbdev/vermilion/cr_pll.c2
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c4
-rw-r--r--drivers/video/fbdev/vesafb.c6
-rw-r--r--drivers/video/fbdev/vfb.c2
-rw-r--r--drivers/video/fbdev/vga16fb.c2
-rw-r--r--drivers/video/fbdev/via/via-core.c2
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c2
-rw-r--r--drivers/video/fbdev/vt8623fb.c2
-rw-r--r--drivers/video/fbdev/w100fb.c8
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/video/fbdev/xilinxfb.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c1
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c1
-rw-r--r--drivers/visorbus/visorchipset.c11
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c4
-rw-r--r--drivers/vme/bridges/vme_fake.c30
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/w1/masters/matrox_w1.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c348
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c2
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c2
-rw-r--r--drivers/watchdog/rc32434_wdt.c4
-rw-r--r--drivers/xen/gntdev-dmabuf.c23
-rw-r--r--drivers/xen/preempt.c4
4429 files changed, 250485 insertions, 123949 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index aaef17cc6512..31cf17dee252 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -171,7 +171,7 @@ obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_PERF_EVENTS) += perf/
obj-$(CONFIG_RAS) += ras/
-obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
+obj-$(CONFIG_USB4) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-y += hwtracing/intel_th/
obj-$(CONFIG_STM) += hwtracing/stm/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 002838d23b86..cc57bab146b5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -241,6 +241,7 @@ config ACPI_CPU_FREQ_PSS
config ACPI_PROCESSOR_CSTATE
def_bool y
+ depends on ACPI_PROCESSOR
depends on IA64 || X86
config ACPI_PROCESSOR_IDLE
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 433376e819bb..953437a216f6 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -104,7 +104,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
info->gaddr = lpit_native->residency_counter;
if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- info->iomem_addr = ioremap_nocache(info->gaddr.address,
+ info->iomem_addr = ioremap(info->gaddr.address,
info->gaddr.bit_width / 8);
if (!info->iomem_addr)
return;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 70f740b09684..db18df6cb330 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -69,10 +69,6 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_SAVE_CTX BIT(4)
#define LPSS_NO_D3_DELAY BIT(5)
-/* Crystal Cove PMIC shares same ACPI ID between different platforms */
-#define BYT_CRC_HRV 2
-#define CHT_CRC_HRV 3
-
struct lpss_private_data;
struct lpss_device_desc {
@@ -158,7 +154,7 @@ static void lpss_deassert_reset(struct lpss_private_data *pdata)
*/
static struct pwm_lookup byt_pwm_lookup[] = {
PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
- "pwm_backlight", 0, PWM_POLARITY_NORMAL,
+ "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
"pwm-lpss-platform"),
};
@@ -170,8 +166,7 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
return;
- if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV))
- pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+ pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
}
#define LPSS_I2C_ENABLE 0x6c
@@ -204,7 +199,7 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
/* BSW PWM used for backlight control by the i915 driver */
static struct pwm_lookup bsw_pwm_lookup[] = {
PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
- "pwm_backlight", 0, PWM_POLARITY_NORMAL,
+ "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
"pwm-lpss-platform"),
};
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 2c4dda0787e8..5379bc3f275d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -705,3 +705,185 @@ void __init acpi_processor_init(void)
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
acpi_scan_add_handler(&processor_container_handler);
}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+/**
+ * acpi_processor_claim_cst_control - Request _CST control from the platform.
+ */
+bool acpi_processor_claim_cst_control(void)
+{
+ static bool cst_control_claimed;
+ acpi_status status;
+
+ if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
+ return true;
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ acpi_gbl_FADT.cst_control, 8);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("ACPI: Failed to claim processor _CST control\n");
+ return false;
+ }
+
+ cst_control_claimed = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+
+/**
+ * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
+ * @handle: ACPI handle of the processor object containing the _CST.
+ * @cpu: The numeric ID of the target CPU.
+ * @info: Object write the C-states information into.
+ *
+ * Extract the C-state information for the given CPU from the output of the _CST
+ * control method under the corresponding ACPI processor object (or processor
+ * device object) and populate @info with it.
+ *
+ * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
+ * acpi_processor_ffh_cstate_probe() to verify them and update the
+ * cpu_cstate_entry data for @cpu.
+ */
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *cst;
+ acpi_status status;
+ u64 count;
+ int last_index = 0;
+ int i, ret = 0;
+
+ status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "No _CST\n");
+ return -ENODEV;
+ }
+
+ cst = buffer.pointer;
+
+ /* There must be at least 2 elements. */
+ if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
+ acpi_handle_warn(handle, "Invalid _CST output\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ count = cst->package.elements[0].integer.value;
+
+ /* Validate the number of C-states. */
+ if (count < 1 || count != cst->package.count - 1) {
+ acpi_handle_warn(handle, "Inconsistent _CST data\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ for (i = 1; i <= count; i++) {
+ union acpi_object *element;
+ union acpi_object *obj;
+ struct acpi_power_register *reg;
+ struct acpi_processor_cx cx;
+
+ /*
+ * If there is not enough space for all C-states, skip the
+ * excess ones and log a warning.
+ */
+ if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
+ acpi_handle_warn(handle,
+ "No room for more idle states (limit: %d)\n",
+ ACPI_PROCESSOR_MAX_POWER - 1);
+ break;
+ }
+
+ memset(&cx, 0, sizeof(cx));
+
+ element = &cst->package.elements[i];
+ if (element->type != ACPI_TYPE_PACKAGE)
+ continue;
+
+ if (element->package.count != 4)
+ continue;
+
+ obj = &element->package.elements[0];
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ continue;
+
+ reg = (struct acpi_power_register *)obj->buffer.pointer;
+
+ obj = &element->package.elements[1];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.type = obj->integer.value;
+ /*
+ * There are known cases in which the _CST output does not
+ * contain C1, so if the type of the first state found is not
+ * C1, leave an empty slot for C1 to be filled in later.
+ */
+ if (i == 1 && cx.type != ACPI_STATE_C1)
+ last_index = 1;
+
+ cx.address = reg->address;
+ cx.index = last_index + 1;
+
+ if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
+ /*
+ * In the majority of cases _CST describes C1 as
+ * a FIXED_HARDWARE C-state, but if the command
+ * line forbids using MWAIT, use CSTATE_HALT for
+ * C1 regardless.
+ */
+ if (cx.type == ACPI_STATE_C1 &&
+ boot_option_idle_override == IDLE_NOMWAIT) {
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ cx.entry_method = ACPI_CSTATE_FFH;
+ }
+ } else if (cx.type == ACPI_STATE_C1) {
+ /*
+ * In the special case of C1, FIXED_HARDWARE can
+ * be handled by executing the HLT instruction.
+ */
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ continue;
+ }
+ } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ cx.entry_method = ACPI_CSTATE_SYSTEMIO;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
+ cx.address);
+ } else {
+ continue;
+ }
+
+ if (cx.type == ACPI_STATE_C1)
+ cx.valid = 1;
+
+ obj = &element->package.elements[2];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.latency = obj->integer.value;
+
+ obj = &element->package.elements[3];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ memcpy(&info->states[++last_index], &cx, sizeof(cx));
+ }
+
+ acpi_handle_info(handle, "Found %d idle states\n", last_index);
+
+ info->count = last_index;
+
+ end:
+ kfree(buffer.pointer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 2f380e7381d6..15c5b272e698 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2187,7 +2187,7 @@ int acpi_video_register(void)
if (register_count) {
/*
* if the function of acpi_video_register is already called,
- * don't register the acpi_vide_bus again and return no error.
+ * don't register the acpi_video_bus again and return no error.
*/
goto leave;
}
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 863ade9add6d..173447d50acf 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 54f81eac7ec9..89101e53324b 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index d5478cd4a857..ede4b9cc9e85 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 694cf206fa9a..a676daaa2da5 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 82f81501566b..7ba6e308f146 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c8652f91054e..79f292687bd6 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index fd3beea93421..38ffa2c0a496 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index bcf8f7501db7..67f282e9e0af 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 20706adbc148..a6d896cda2a5 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 1ea52576f0a2..af58cd2dc9d3 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 283614e82a20..2269e10bc21b 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 7da1864798a0..e618ddfab2fd 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8def0e3d690f..9f0219a8cb98 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -260,7 +260,8 @@ struct acpi_object_index_field {
/* The buffer_field is different in that it is part of a Buffer, not an op_region */
struct acpi_object_buffer_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */
+ union acpi_operand_object *buffer_obj; /* Containing Buffer object */
};
/******************************************************************************
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 9d78134428e3..8825394be9ab 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6e32c97cba6c..bc00b85c0a8f 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 387163b962a7..cd0f5df0ea23 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 422cd8f2b92e..6de8a1650d3d 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 2043dff370b1..4c900c108f3f 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index dfbf1dbd4033..734624facda3 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 5fb50634e08e..7c89b470ec81 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 49e412edd7c6..1d541bbac4a3 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 7c3bd4ab60fc..e5234e001acf 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index 47d2e5059849..bb9600b867ee 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index e1632b340182..aa71f65395d2 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -816,7 +816,7 @@ acpi_db_command_dispatch(char *input_buffer,
if (ACPI_FAILURE(status)
|| temp64 >= ACPI_NUM_PREDEFINED_REGIONS) {
acpi_os_printf
- ("Invalid adress space ID: must be between 0 and %u inclusive\n",
+ ("Invalid address space ID: must be between 0 and %u inclusive\n",
ACPI_NUM_PREDEFINED_REGIONS - 1);
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 85b34d02233e..ad17f62e51d9 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 5034fab9cf69..4b5b6e859f62 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 0d3e1ced1f57..63bc5f19fb82 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index faa38a22263a..c901f5aec739 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -243,7 +243,7 @@ cleanup:
* FUNCTION: acpi_ds_get_field_names
*
* PARAMETERS: info - create_field info structure
- * ` walk_state - Current method state
+ * walk_state - Current method state
* arg - First parser arg for the field name list
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a1ffed29903b..9be2a309424c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index f59b4d944f7f..cf67caff878a 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 179129a2deb1..c0a14a6a2c20 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 10f32b62608e..d9c26e720cb7 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -217,6 +217,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
}
obj_desc->buffer_field.buffer_obj = buffer_desc;
+ obj_desc->buffer_field.is_create_field =
+ aml_opcode == AML_CREATE_FIELD_OP;
/* Reference count for buffer_desc inherits obj_desc count */
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 997faa10f615..d869568d55c2 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index d75aae304595..5e81a1ae44cf 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index c88fd31208a5..697974e37edf 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
+ /*
+ * Disassembler: handle create field operators here.
+ *
+ * create_buffer_field is a deferred op that is typically processed in load
+ * pass 2. However, disassembly of control method contents walk the parse
+ * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
+ * in a later walk. This is a problem when there is a control method that
+ * has the same name as the AML_CREATE object. In this case, any use of the
+ * name segment will be detected as a method call rather than a reference
+ * to a buffer field.
+ *
+ * This earlier creation during disassembly solves this issue by inserting
+ * the named object in the ACPI namespace so that references to this name
+ * would be a name string rather than a method call.
+ */
+ if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
+ (walk_state->op_info->flags & AML_CREATE)) {
+ status = acpi_ds_create_buffer_field(op, walk_state);
+ return_ACPI_STATUS(status);
+ }
+
/* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 935a8e2623e4..b31457ca926c 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 39acf7b286da..9c397642fed7 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index de79f835a373..809a0c0536b5 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9e2f5a05c066..8c83d8c620dc 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 5c77bee5d31f..0ced84ae13e4 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 344feba29063..3e39907fedd9 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9c7adaa7b582..132adff1e131 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 70d21d5ec5f3..6effd8076dcc 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 917892227e09..738873e876ca 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 3ef4e27995f0..5884eba047f7 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index aa98fe07cd1b..ce1eda6beb84 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 1ff126460007..738d4b231f34 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index aee09640d710..aefc0145e583 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 279ef0557aa3..e4e012297eee 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index e528fe56b755..1a15b0087379 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 04a40d563dd6..2c39ff2a7406 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 47265b073e6f..da97fd0c6b51 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index c7af07566b7b..43711412722f 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 46a8baf28bd0..68efd704e2dc 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index ca2966bacb50..50c7aad2e86d 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index f376fc00064e..a17482428b46 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index b1aeec8cac55..a5223dcaee70 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index a9bc938a3b55..47a4d9a40d6b 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index d3d2dbfba680..e85eb31e5075 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -96,7 +96,8 @@ acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length)
* RETURN: Status
*
* DESCRIPTION: Read from a named field. Returns either an Integer or a
- * Buffer, depending on the size of the field.
+ * Buffer, depending on the size of the field and whether if a
+ * field is created by the create_field() operator.
*
******************************************************************************/
@@ -154,12 +155,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
* the use of arithmetic operators on the returned value if the
* field size is equal or smaller than an Integer.
*
+ * However, all buffer fields created by create_field operator needs to
+ * remain as a buffer to match other AML interpreter implementations.
+ *
* Note: Field.length is in bits.
*/
buffer_length =
(acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
- if (buffer_length > acpi_gbl_integer_byte_width) {
+ if (buffer_length > acpi_gbl_integer_byte_width ||
+ (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD &&
+ obj_desc->buffer_field.is_create_field)) {
/* Field is too large for an Integer, create a Buffer instead */
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 95a0dcb4f7b9..ade35ff1c7ba 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 60e854965af9..717e3998fd77 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 775cd62af5b3..9ff247cba571 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 6b76be5212a4..74f8b0d0452b 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 06e35ea09823..a46d685a3ffc 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 5e4a31a11df4..03241d18ac1d 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index a4ebce417930..c8d0d75fc450 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 31385a0b2dab..55d0fa056fe7 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 728d752f7adc..a4e306690a21 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index c08521194b29..d15a66de26c0 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index b223d01e6bf8..3e4018678c09 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 36da5c0ef69c..912a078c60a4 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index bdfe4d33b483..4d1b22971d58 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index c5aa4b0deb70..760bc7cef55a 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 7f3c3571c292..3adc0a29d890 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 4e43c8277f07..8c34f4e2ab8f 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc9e2b1c1ad9..dc66696080a5 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index a538f7799b78..f329b01672bb 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index db7f93ca539f..832a47885b99 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 75380be1c2ef..8fefa6feac2f 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 926f7e080f22..9b9aac27ff7e 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index dee3affaca49..d9be5d0545d4 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 565bd3f29f31..1b4252bdcd0b 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index b62db8ec446f..243a25add28f 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 2fb9f75d71c5..07473ddfa9a9 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index cd576153257c..4d94861e6093 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c4fd97104024..134dbfadcd15 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 2919746c9041..a4b66f4b2714 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 0e97ed38973f..d5e8405e9d8f 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index c86d0770ed6e..c86c82939ebb 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 9ad340f644a1..994f0b556c60 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 73e5c83c8c9f..b691fe20e384 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 61e9dfc9fe8c..e16f6a0c2c3f 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index d7c4d6e8e21e..9ba17891edb6 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index f16cf5e4742c..7e74a765e785 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 2f9d93122d0c..0cea9c363ace 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 9a80e3b23496..237b3ddeb075 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index be86fea8e4d4..90db2d85e7f5 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 663d85e0adba..125143c41bb8 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index b8d007c84d32..e66abdab8f31 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index ceea6af79d12..b7f3e8603ad8 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 161e60ddfb69..984129dcaa0c 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index e62c7897fdf1..3b40db4ad9f3 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 207805047bc4..3cf0687b9915 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index ded2779fc8ea..2480c26c5171 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 43775c5ce17c..28af49263ebf 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 15e7563829f1..ab9327f6a63c 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 9b386530ffbe..c780046bf294 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index f153ca804740..fceb311995e9 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 22d8a2becdd0..c8aef0694864 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 2512f584fa3c..00efae2f95ba 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index cf91841297c2..0fe3adf6b0e5 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index ee2ee2c858f2..1bbfc8def388 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 2cf36451e46f..523b1e9b98d4 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 0041bfba9abc..907edc5edba7 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index b2abb40023a6..56d81e490a5c 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index ef1ffd36ab3f..0bb15add2245 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 4764f849cb78..0b3494ad9a70 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index c5f0b8ec70cc..dfe1ac3ae34a 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 1640685bf4ae..f8403d480318 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0782acf85722..bcba993d4dac 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index e2859d09ca2e..0edc6ef5d46d 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index bb260376bd59..99fa48722cf6 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index d64da4d9e8d0..303ab51b4fcf 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index f6cd7d4f698b..d78656d960e8 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index db897af1de05..f2ec427f4e29 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 8533fce7fa93..1b03a2747401 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 1fb8327f3c3b..41bdd0278dd8 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5b169b5f0f1a..0c8cb0612414 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 65beaa237669..befdd13b403b 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 558a9f3b0678..8180d1a458f5 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index b0622ec4bb85..e6dcbdc3fc6e 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index b6da135d5f41..0e02f12513dc 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 30198c828ab6..3bb06935a2ad 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 6f33e7c72327..fdbc397c038d 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 8b4ff11d617a..46be549539e7 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index eee97a902696..3e60bdac2200 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index ad2b218039d0..0a01c08dad8a 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 1b0f68f5ed8c..05fe3470fb93 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 5839f2fa7400..a874dac7db5c 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 14de4d15e618..d366be431a84 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 0a7cf8007643..b8039954b0d1 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index f497c4b30e65..ca7c9f0144ef 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index cf769e94fe0f..653e3bb20036 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 8906c80175e6..103acbbfcf9a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1180,7 +1180,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
- timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE);
+ timer_setup(&ghes->timer, ghes_poll_func, 0);
ghes_add_timer(ghes);
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 33f71983e001..6078064684c6 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -298,6 +298,59 @@ out:
return status;
}
+struct iort_workaround_oem_info {
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+};
+
+static bool apply_id_count_workaround;
+
+static struct iort_workaround_oem_info wa_info[] __initdata = {
+ {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP07 ",
+ .oem_revision = 0,
+ }, {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP08 ",
+ .oem_revision = 0,
+ }
+};
+
+static void __init
+iort_check_id_count_workaround(struct acpi_table_header *tbl)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+ if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ wa_info[i].oem_revision == tbl->oem_revision) {
+ apply_id_count_workaround = true;
+ pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n");
+ break;
+ }
+ }
+}
+
+static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map)
+{
+ u32 map_max = map->input_base + map->id_count;
+
+ /*
+ * The IORT specification revision D (Section 3, table 4, page 9) says
+ * Number of IDs = The number of IDs in the range minus one, but the
+ * IORT code ignored the "minus one", and some firmware did that too,
+ * so apply a workaround here to keep compatible with both the spec
+ * compliant and non-spec compliant firmwares.
+ */
+ if (apply_id_count_workaround)
+ map_max--;
+
+ return map_max;
+}
+
static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
u32 *rid_out)
{
@@ -314,8 +367,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return -ENXIO;
}
- if (rid_in < map->input_base ||
- (rid_in >= map->input_base + map->id_count))
+ if (rid_in < map->input_base || rid_in > iort_get_map_max(map))
return -ENXIO;
*rid_out = map->output_base + (rid_in - map->input_base);
@@ -1631,5 +1683,6 @@ void __init acpi_iort_init(void)
return;
}
+ iort_check_id_count_workaround(iort_table);
iort_init_platform_devices();
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 8f0e0c8d8c3d..15cc7d5a6185 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -38,6 +38,8 @@
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
+#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
+ ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
#define ACPI_BATTERY_DEVICE_NAME "Battery"
@@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery)
static bool acpi_battery_is_degraded(struct acpi_battery *battery)
{
- return battery->full_charge_capacity && battery->design_capacity &&
+ return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+ ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) &&
battery->full_charge_capacity < battery->design_capacity;
}
@@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- int ret = 0;
+ int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
@@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
ret = -ENODEV;
else
val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
- if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
ret = -ENODEV;
else
val->intval = battery->full_charge_capacity * 1000;
@@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- if (battery->capacity_now && battery->full_charge_capacity)
- val->intval = battery->capacity_now * 100/
- battery->full_charge_capacity;
+ if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
+ full_capacity = battery->full_charge_capacity;
+ else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+ full_capacity = battery->design_capacity;
+
+ if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
+ full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
else
- val->intval = 0;
+ val->intval = battery->capacity_now * 100/
+ full_capacity;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
@@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
+static enum power_supply_property charge_battery_full_cap_broken_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -794,20 +817,34 @@ static void __exit battery_hook_exit(void)
static int sysfs_add_battery(struct acpi_battery *battery)
{
struct power_supply_config psy_cfg = { .drv_data = battery, };
+ bool full_cap_broken = false;
+
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+ !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+ full_cap_broken = true;
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
- battery->bat_desc.properties = charge_battery_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(charge_battery_props);
- } else if (battery->full_charge_capacity == 0) {
- battery->bat_desc.properties =
- energy_battery_full_cap_broken_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(energy_battery_full_cap_broken_props);
+ if (full_cap_broken) {
+ battery->bat_desc.properties =
+ charge_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(charge_battery_full_cap_broken_props);
+ } else {
+ battery->bat_desc.properties = charge_battery_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(charge_battery_props);
+ }
} else {
- battery->bat_desc.properties = energy_battery_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(energy_battery_props);
+ if (full_cap_broken) {
+ battery->bat_desc.properties =
+ energy_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_full_cap_broken_props);
+ } else {
+ battery->bat_desc.properties = energy_battery_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_props);
+ }
}
battery->bat_desc.name = acpi_device_bid(battery->device);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index b758b45737f5..f6925f16c4a2 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -122,6 +122,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
+ {
+ /*
+ * Razer Blade Stealth 13 late 2019, notification of the LID device
+ * only happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{}
};
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 5e4a8860a9c0..b64c62bfcea5 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1321,6 +1321,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
*/
static const struct acpi_device_id special_pm_ids[] = {
{"PNP0C0B", }, /* Generic ACPI fan */
+ {"INT1044", }, /* Fan for Tiger Lake generation */
{"INT3404", }, /* Fan */
{}
};
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index eb58fc475a03..387f27ef3368 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -97,6 +97,7 @@ static int dptf_power_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3407_device_ids[] = {
+ {"INT1047", 0},
{"INT3407", 0},
{"", 0},
};
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 5c7a90186e3c..1ec7b6900662 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -13,6 +13,10 @@
#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
+ {"INT1040"},
+ {"INT1043"},
+ {"INT1044"},
+ {"INT1047"},
{"INT3400"},
{"INT3401", INT3401_DEVICE},
{"INT3402"},
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d05be13c1022..08bc9751fe66 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1053,28 +1053,20 @@ void acpi_ec_unblock_transactions(void)
Event Management
-------------------------------------------------------------------------- */
static struct acpi_ec_query_handler *
-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
-{
- if (handler)
- kref_get(&handler->kref);
- return handler;
-}
-
-static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{
struct acpi_ec_query_handler *handler;
- bool found = false;
mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
- found = true;
- break;
+ kref_get(&handler->kref);
+ mutex_unlock(&ec->mutex);
+ return handler;
}
}
mutex_unlock(&ec->mutex);
- return found ? acpi_ec_get_query_handler(handler) : NULL;
+ return NULL;
}
static void acpi_ec_query_handler_release(struct kref *kref)
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 816b0803f7fb..aaf4e8f348cf 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -25,6 +25,7 @@ static int acpi_fan_remove(struct platform_device *pdev);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
+ {"INT1044", 0},
{"INT3404", 0},
{"", 0},
};
@@ -44,12 +45,16 @@ static const struct dev_pm_ops acpi_fan_pm = {
#define FAN_PM_OPS_PTR NULL
#endif
+#define ACPI_FPS_NAME_LEN 20
+
struct acpi_fan_fps {
u64 control;
u64 trip_point;
u64 speed;
u64 noise_level;
u64 power;
+ char name[ACPI_FPS_NAME_LEN];
+ struct device_attribute dev_attr;
};
struct acpi_fan_fif {
@@ -265,6 +270,39 @@ static int acpi_fan_speed_cmp(const void *a, const void *b)
return fps1->speed - fps2->speed;
}
+static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
+ int count;
+
+ if (fps->control == 0xFFFFFFFF || fps->control > 100)
+ count = snprintf(buf, PAGE_SIZE, "not-defined:");
+ else
+ count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+
+ if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point);
+
+ if (fps->speed == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed);
+
+ if (fps->noise_level == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100);
+
+ if (fps->power == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power);
+
+ return count;
+}
+
static int acpi_fan_get_fps(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
@@ -295,12 +333,13 @@ static int acpi_fan_get_fps(struct acpi_device *device)
}
for (i = 0; i < fan->fps_count; i++) {
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
- struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] };
+ struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name),
+ &fan->fps[i] };
status = acpi_extract_package(&obj->package.elements[i + 1],
&format, &fps);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Invalid _FPS element\n");
- break;
+ goto err;
}
}
@@ -308,6 +347,24 @@ static int acpi_fan_get_fps(struct acpi_device *device)
sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
acpi_fan_speed_cmp, NULL);
+ for (i = 0; i < fan->fps_count; ++i) {
+ struct acpi_fan_fps *fps = &fan->fps[i];
+
+ snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
+ fps->dev_attr.show = show_state;
+ fps->dev_attr.store = NULL;
+ fps->dev_attr.attr.name = fps->name;
+ fps->dev_attr.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
+ if (status) {
+ int j;
+
+ for (j = 0; j < i; ++j)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
+ break;
+ }
+ }
+
err:
kfree(obj);
return status;
@@ -330,14 +387,20 @@ static int acpi_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fan);
if (acpi_fan_is_acpi4(device)) {
- if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device))
- goto end;
+ result = acpi_fan_get_fif(device);
+ if (result)
+ return result;
+
+ result = acpi_fan_get_fps(device);
+ if (result)
+ return result;
+
fan->acpi4 = true;
} else {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
- goto end;
+ goto err_end;
}
}
@@ -350,7 +413,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
&fan_cooling_ops);
if (IS_ERR(cdev)) {
result = PTR_ERR(cdev);
- goto end;
+ goto err_end;
}
dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id);
@@ -365,10 +428,21 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
+ goto err_end;
+ }
+
+ return 0;
+
+err_end:
+ if (fan->acpi4) {
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ }
-end:
return result;
}
@@ -376,6 +450,13 @@ static int acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
+ if (fan->acpi4) {
+ struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ }
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&fan->cdev->device.kobj, "device");
thermal_cooling_device_unregister(fan->cdev);
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index f31544d3656e..4ae93350b70d 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -98,11 +98,11 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
*
* Return: The cache structure and the level we terminated with.
*/
-static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
- int local_level,
- struct acpi_subtable_header *res,
- struct acpi_pptt_cache **found,
- int level, int type)
+static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
+ unsigned int local_level,
+ struct acpi_subtable_header *res,
+ struct acpi_pptt_cache **found,
+ unsigned int level, int type)
{
struct acpi_pptt_cache *cache;
@@ -119,7 +119,7 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
if (*found != NULL && cache != *found)
pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
- pr_debug("Found cache @ level %d\n", level);
+ pr_debug("Found cache @ level %u\n", level);
*found = cache;
/*
* continue looking at this node's resource list
@@ -132,16 +132,17 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
return local_level;
}
-static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node,
- int *starting_level, int level,
- int type)
+static struct acpi_pptt_cache *
+acpi_find_cache_level(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *starting_level, unsigned int level,
+ int type)
{
struct acpi_subtable_header *res;
- int number_of_levels = *starting_level;
+ unsigned int number_of_levels = *starting_level;
int resource = 0;
struct acpi_pptt_cache *ret = NULL;
- int local_level;
+ unsigned int local_level;
/* walk down from processor node */
while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) {
@@ -321,12 +322,12 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
unsigned int level,
struct acpi_pptt_processor **node)
{
- int total_levels = 0;
+ unsigned int total_levels = 0;
struct acpi_pptt_cache *found = NULL;
struct acpi_pptt_processor *cpu_node;
u8 acpi_type = acpi_cache_type(type);
- pr_debug("Looking for CPU %d's level %d cache type %d\n",
+ pr_debug("Looking for CPU %d's level %u cache type %d\n",
acpi_cpu_id, level, acpi_type);
cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2ae95df2e74f..dcc289e30166 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -299,164 +299,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
{
- acpi_status status;
- u64 count;
- int current_count;
- int i, ret = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *cst;
+ int ret;
if (nocst)
return -ENODEV;
- current_count = 0;
-
- status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
- return -ENODEV;
- }
-
- cst = buffer.pointer;
-
- /* There must be at least 2 elements */
- if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
- pr_err("not enough elements in _CST\n");
- ret = -EFAULT;
- goto end;
- }
-
- count = cst->package.elements[0].integer.value;
+ ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
+ if (ret)
+ return ret;
- /* Validate number of power states. */
- if (count < 1 || count != cst->package.count - 1) {
- pr_err("count given by _CST is not valid\n");
- ret = -EFAULT;
- goto end;
- }
+ /*
+ * It is expected that there will be at least 2 states, C1 and
+ * something else (C2 or C3), so fail if that is not the case.
+ */
+ if (pr->power.count < 2)
+ return -EFAULT;
- /* Tell driver that at least _CST is supported. */
pr->flags.has_cst = 1;
-
- for (i = 1; i <= count; i++) {
- union acpi_object *element;
- union acpi_object *obj;
- struct acpi_power_register *reg;
- struct acpi_processor_cx cx;
-
- memset(&cx, 0, sizeof(cx));
-
- element = &(cst->package.elements[i]);
- if (element->type != ACPI_TYPE_PACKAGE)
- continue;
-
- if (element->package.count != 4)
- continue;
-
- obj = &(element->package.elements[0]);
-
- if (obj->type != ACPI_TYPE_BUFFER)
- continue;
-
- reg = (struct acpi_power_register *)obj->buffer.pointer;
-
- if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
- (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
- continue;
-
- /* There should be an easy way to extract an integer... */
- obj = &(element->package.elements[1]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- cx.type = obj->integer.value;
- /*
- * Some buggy BIOSes won't list C1 in _CST -
- * Let acpi_processor_get_power_info_default() handle them later
- */
- if (i == 1 && cx.type != ACPI_STATE_C1)
- current_count++;
-
- cx.address = reg->address;
- cx.index = current_count + 1;
-
- cx.entry_method = ACPI_CSTATE_SYSTEMIO;
- if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
- if (acpi_processor_ffh_cstate_probe
- (pr->id, &cx, reg) == 0) {
- cx.entry_method = ACPI_CSTATE_FFH;
- } else if (cx.type == ACPI_STATE_C1) {
- /*
- * C1 is a special case where FIXED_HARDWARE
- * can be handled in non-MWAIT way as well.
- * In that case, save this _CST entry info.
- * Otherwise, ignore this info and continue.
- */
- cx.entry_method = ACPI_CSTATE_HALT;
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
- } else {
- continue;
- }
- if (cx.type == ACPI_STATE_C1 &&
- (boot_option_idle_override == IDLE_NOMWAIT)) {
- /*
- * In most cases the C1 space_id obtained from
- * _CST object is FIXED_HARDWARE access mode.
- * But when the option of idle=halt is added,
- * the entry_method type should be changed from
- * CSTATE_FFH to CSTATE_HALT.
- * When the option of idle=nomwait is added,
- * the C1 entry_method type should be
- * CSTATE_HALT.
- */
- cx.entry_method = ACPI_CSTATE_HALT;
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
- }
- } else {
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
- cx.address);
- }
-
- if (cx.type == ACPI_STATE_C1) {
- cx.valid = 1;
- }
-
- obj = &(element->package.elements[2]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- cx.latency = obj->integer.value;
-
- obj = &(element->package.elements[3]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- current_count++;
- memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
-
- /*
- * We support total ACPI_PROCESSOR_MAX_POWER - 1
- * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
- */
- if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
- pr_warn("Limiting number of power states to max (%d)\n",
- ACPI_PROCESSOR_MAX_POWER);
- pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
- break;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
- current_count));
-
- /* Validate number of power states discovered */
- if (current_count < 2)
- ret = -EFAULT;
-
- end:
- kfree(buffer.pointer);
-
- return ret;
+ return 0;
}
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
@@ -909,7 +769,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
static inline void acpi_processor_cstate_first_run_checks(void)
{
- acpi_status status;
static int first_run;
if (first_run)
@@ -921,13 +780,10 @@ static inline void acpi_processor_cstate_first_run_checks(void)
max_cstate);
first_run++;
- if (acpi_gbl_FADT.cst_control && !nocst) {
- status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
- acpi_gbl_FADT.cst_control, 8);
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status,
- "Notifying BIOS of _CST ability failed"));
- }
+ if (nocst)
+ return;
+
+ acpi_processor_claim_cst_control();
}
#else
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6747a279621b..439880629839 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -61,8 +61,11 @@ static struct notifier_block tts_notifier = {
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
+ unsigned long acpi_wakeup_address;
+
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
+ acpi_wakeup_address = acpi_get_wakeup_address();
if (!acpi_wakeup_address)
return -EFAULT;
acpi_set_waking_vector(acpi_wakeup_address);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d831a61e0010..19067a5e5293 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
+#include <linux/units.h>
#define PREFIX "ACPI: "
@@ -172,7 +173,7 @@ struct acpi_thermal {
struct acpi_handle_list devices;
struct thermal_zone_device *thermal_zone;
int tz_enabled;
- int kelvin_offset;
+ int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
};
@@ -297,7 +298,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (crt == -1) {
tz->trips.critical.flags.valid = 0;
} else if (crt > 0) {
- unsigned long crt_k = CELSIUS_TO_DECI_KELVIN(crt);
+ unsigned long crt_k = celsius_to_deci_kelvin(crt);
+
/*
* Allow override critical threshold
*/
@@ -333,7 +335,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (psv == -1) {
status = AE_SUPPORT;
} else if (psv > 0) {
- tmp = CELSIUS_TO_DECI_KELVIN(psv);
+ tmp = celsius_to_deci_kelvin(psv);
status = AE_OK;
} else {
status = acpi_evaluate_integer(tz->device->handle,
@@ -413,7 +415,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
break;
if (i == 1)
tz->trips.active[0].temperature =
- CELSIUS_TO_DECI_KELVIN(act);
+ celsius_to_deci_kelvin(act);
else
/*
* Don't allow override higher than
@@ -421,9 +423,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
*/
tz->trips.active[i - 1].temperature =
(tz->trips.active[i - 2].temperature <
- CELSIUS_TO_DECI_KELVIN(act) ?
+ celsius_to_deci_kelvin(act) ?
tz->trips.active[i - 2].temperature :
- CELSIUS_TO_DECI_KELVIN(act));
+ celsius_to_deci_kelvin(act));
break;
} else {
tz->trips.active[i].temperature = tmp;
@@ -519,7 +521,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
if (result)
return result;
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(tz->temperature,
+ *temp = deci_kelvin_to_millicelsius_with_offset(tz->temperature,
tz->kelvin_offset);
return 0;
}
@@ -624,7 +626,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.critical.flags.valid) {
if (!trip) {
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.critical.temperature,
tz->kelvin_offset);
return 0;
@@ -634,7 +636,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.hot.flags.valid) {
if (!trip) {
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.hot.temperature,
tz->kelvin_offset);
return 0;
@@ -644,7 +646,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.passive.flags.valid) {
if (!trip) {
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.passive.temperature,
tz->kelvin_offset);
return 0;
@@ -655,7 +657,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
tz->trips.active[i].flags.valid; i++) {
if (!trip) {
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.active[i].temperature,
tz->kelvin_offset);
return 0;
@@ -672,7 +674,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
struct acpi_thermal *tz = thermal->devdata;
if (tz->trips.critical.flags.valid) {
- *temperature = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temperature = deci_kelvin_to_millicelsius_with_offset(
tz->trips.critical.temperature,
tz->kelvin_offset);
return 0;
@@ -692,7 +694,7 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
if (type == THERMAL_TRIP_ACTIVE) {
int trip_temp;
- int temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ int temp = deci_kelvin_to_millicelsius_with_offset(
tz->temperature, tz->kelvin_offset);
if (thermal_get_trip_temp(thermal, trip, &trip_temp))
return -EINVAL;
@@ -1043,9 +1045,9 @@ static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
{
if (tz->trips.critical.flags.valid &&
(tz->trips.critical.temperature % 5) == 1)
- tz->kelvin_offset = 2731;
+ tz->kelvin_offset = 273100;
else
- tz->kelvin_offset = 2732;
+ tz->kelvin_offset = 273200;
}
static void acpi_thermal_check_fn(struct work_struct *work)
@@ -1087,7 +1089,7 @@ static int acpi_thermal_add(struct acpi_device *device)
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
- acpi_device_bid(device), DECI_KELVIN_TO_CELSIUS(tz->temperature));
+ acpi_device_bid(device), deci_kelvin_to_celsius(tz->temperature));
goto end;
free_memory:
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 31014c7d3793..419f814d596a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -303,6 +303,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ .ident = "Lenovo E41-25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81FS"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Lenovo E41-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
.ident = "Apple MacBook Pro 12,1",
@@ -336,6 +352,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+
+ /*
+ * Desktops which falsely report a backlight and which our heuristics
+ * for this do not catch.
+ */
{
.callback = video_detect_force_none,
.ident = "Dell OptiPlex 9020M",
@@ -344,6 +365,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
},
},
+ {
+ .callback = video_detect_force_none,
+ .ident = "MSI MS-7721",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
+ },
+ },
{ },
};
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b2dad43dbf82..a6b2082c24f8 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2249,10 +2249,12 @@ static void binder_deferred_fd_close(int fd)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
__close_fd_get_file(fd, &twcb->file);
- if (twcb->file)
+ if (twcb->file) {
+ filp_close(twcb->file, current->files);
task_work_add(current, &twcb->twork, true);
- else
+ } else {
kfree(twcb);
+ }
}
static void binder_transaction_buffer_release(struct binder_proc *proc,
@@ -5199,10 +5201,11 @@ err_bad_arg:
static int binder_open(struct inode *nodp, struct file *filp)
{
- struct binder_proc *proc;
+ struct binder_proc *proc, *itr;
struct binder_device *binder_dev;
struct binderfs_info *info;
struct dentry *binder_binderfs_dir_entry_proc = NULL;
+ bool existing_pid = false;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
@@ -5235,19 +5238,24 @@ static int binder_open(struct inode *nodp, struct file *filp)
filp->private_data = proc;
mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(itr, &binder_procs, proc_node) {
+ if (itr->pid == proc->pid) {
+ existing_pid = true;
+ break;
+ }
+ }
hlist_add_head(&proc->proc_node, &binder_procs);
mutex_unlock(&binder_procs_lock);
- if (binder_debugfs_dir_entry_proc) {
+ if (binder_debugfs_dir_entry_proc && !existing_pid) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
/*
- * proc debug entries are shared between contexts, so
- * this will fail if the process tries to open the driver
- * again with a different context. The priting code will
- * anyway print all contexts that a given PID has, so this
- * is not a problem.
+ * proc debug entries are shared between contexts.
+ * Only create for the first PID to avoid debugfs log spamming
+ * The printing code will anyway print all contexts for a given
+ * PID so this is not a problem.
*/
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
@@ -5255,19 +5263,16 @@ static int binder_open(struct inode *nodp, struct file *filp)
&proc_fops);
}
- if (binder_binderfs_dir_entry_proc) {
+ if (binder_binderfs_dir_entry_proc && !existing_pid) {
char strbuf[11];
struct dentry *binderfs_entry;
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
/*
* Similar to debugfs, the process specific log file is shared
- * between contexts. If the file has already been created for a
- * process, the following binderfs_create_file() call will
- * fail with error code EEXIST if another context of the same
- * process invoked binder_open(). This is ok since same as
- * debugfs, the log file will contain information on all
- * contexts of a given PID.
+ * between contexts. Only create for the first PID.
+ * This is ok since same as debugfs, the log file will contain
+ * information on all contexts of a given PID.
*/
binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
@@ -5277,10 +5282,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
int error;
error = PTR_ERR(binderfs_entry);
- if (error != -EEXIST) {
- pr_warn("Unable to create file %s in binderfs (error %d)\n",
- strbuf, error);
- }
+ pr_warn("Unable to create file %s in binderfs (error %d)\n",
+ strbuf, error);
}
}
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 46dc54d18f0b..2a04e8abd397 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -218,7 +218,6 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
void *cmd_tbl;
u32 opts;
const u32 cmd_fis_len = 5; /* five dwords */
- unsigned int n_elem;
/*
* Fill in command table information. First, the header,
@@ -232,9 +231,8 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
- n_elem = 0;
if (qc->flags & ATA_QCFLAG_DMAMAP)
- n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
+ acard_ahci_fill_sg(qc, cmd_tbl);
/*
* Fill in command slot information.
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 66a570d0da83..6853dbb4131d 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -73,6 +73,7 @@ enum brcm_ahci_version {
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
BRCM_SATA_NSP,
+ BRCM_SATA_BCM7216,
};
enum brcm_ahci_quirks {
@@ -337,24 +338,39 @@ static const struct ata_port_info ahci_brcm_port_info = {
.port_ops = &ahci_brcm_platform_ops,
};
-#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret;
brcm_sata_phys_disable(priv);
- return ahci_platform_suspend(dev);
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ ret = ahci_platform_suspend(dev);
+ else
+ ret = 0;
+
+ if (priv->version != BRCM_SATA_BCM7216)
+ reset_control_assert(priv->rcdev);
+
+ return ret;
}
-static int brcm_ahci_resume(struct device *dev)
+static int __maybe_unused brcm_ahci_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
- int ret;
+ int ret = 0;
+
+ if (priv->version == BRCM_SATA_BCM7216)
+ ret = reset_control_reset(priv->rcdev);
+ else
+ ret = reset_control_deassert(priv->rcdev);
+ if (ret)
+ return ret;
/* Make sure clocks are turned on before re-configuration */
ret = ahci_platform_enable_clks(hpriv);
@@ -393,7 +409,6 @@ out_disable_phys:
ahci_platform_disable_clks(hpriv);
return ret;
}
-#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
@@ -404,6 +419,7 @@ static const struct of_device_id ahci_of_match[] = {
{.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
+ {.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216},
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -412,6 +428,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
+ const char *reset_name = NULL;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
struct resource *res;
@@ -433,16 +450,19 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
- /* Reset is optional depending on platform */
- priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
- if (!IS_ERR_OR_NULL(priv->rcdev))
- reset_control_deassert(priv->rcdev);
+ /* Reset is optional depending on platform and named differently */
+ if (priv->version == BRCM_SATA_BCM7216)
+ reset_name = "rescal";
+ else
+ reset_name = "ahci";
+
+ priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
+ if (IS_ERR(priv->rcdev))
+ return PTR_ERR(priv->rcdev);
hpriv = ahci_platform_get_resources(pdev, 0);
- if (IS_ERR(hpriv)) {
- ret = PTR_ERR(hpriv);
- goto out_reset;
- }
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
hpriv->plat_data = priv;
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
@@ -459,6 +479,13 @@ static int brcm_ahci_probe(struct platform_device *pdev)
break;
}
+ if (priv->version == BRCM_SATA_BCM7216)
+ ret = reset_control_reset(priv->rcdev);
+ else
+ ret = reset_control_deassert(priv->rcdev);
+ if (ret)
+ return ret;
+
ret = ahci_platform_enable_clks(hpriv);
if (ret)
goto out_reset;
@@ -500,7 +527,7 @@ out_disable_phys:
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
- if (!IS_ERR_OR_NULL(priv->rcdev))
+ if (priv->version != BRCM_SATA_BCM7216)
reset_control_assert(priv->rcdev);
return ret;
}
@@ -521,11 +548,26 @@ static int brcm_ahci_remove(struct platform_device *pdev)
return 0;
}
+static void brcm_ahci_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ /* All resources releasing happens via devres, but our device, unlike a
+ * proper remove is not disappearing, therefore using
+ * brcm_ahci_suspend() here which does explicit power management is
+ * appropriate.
+ */
+ ret = brcm_ahci_suspend(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
.remove = brcm_ahci_remove,
+ .shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 58e09ffe8b9c..eb2eb599e602 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -17,6 +17,7 @@
* - http://www.t13.org/
*/
+#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
@@ -761,6 +762,10 @@ static int ata_ioc32(struct ata_port *ap)
return 0;
}
+/*
+ * This handles both native and compat commands, so anything added
+ * here must have a compatible argument, or check in_compat_syscall()
+ */
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
unsigned int cmd, void __user *arg)
{
@@ -773,6 +778,10 @@ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags);
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ return put_user(val, (compat_ulong_t __user *)arg);
+#endif
return put_user(val, (unsigned long __user *)arg);
case HDIO_SET_32BIT:
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 135173c8d138..391dff0f25a2 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -824,7 +824,7 @@ static int arasan_cf_probe(struct platform_device *pdev)
quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
acdev->pbase = res->start;
- acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
+ acdev->vbase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!acdev->vbase) {
dev_warn(&pdev->dev, "ioremap fail\n");
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 1bfd0154dad5..e47a28271f5b 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -979,7 +979,7 @@ static void pata_macio_invariants(struct pata_macio_priv *priv)
priv->aapl_bus_id = bidp ? *bidp : 0;
/* Fixup missing Apple bus ID in case of media-bay */
- if (priv->mediabay && bidp == 0)
+ if (priv->mediabay && !bidp)
priv->aapl_bus_id = 1;
}
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index d3d851b014a3..bd87476ab481 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -891,7 +891,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
of_node_put(dma_node);
return -EINVAL;
}
- cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
+ cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
resource_size(res_dma));
if (!cf_port->dma_base) {
of_node_put(dma_node);
@@ -909,7 +909,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!res_cs1)
return -EINVAL;
- cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
+ cs1 = devm_ioremap(&pdev->dev, res_cs1->start,
resource_size(res_cs1));
if (!cs1)
return rv;
@@ -925,7 +925,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!res_cs0)
return -EINVAL;
- cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+ cs0 = devm_ioremap(&pdev->dev, res_cs0->start,
resource_size(res_cs0));
if (!cs0)
return rv;
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index deae466395de..479c4b29b856 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -140,7 +140,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
info->gpio_line = gpiod;
info->irq = irq;
- info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
+ info->iobase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!info->iobase)
return -ENOMEM;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 9d0d65efcd94..17d47ad03ab7 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -31,12 +31,6 @@
#include "suni.h"
#include "eni.h"
-#if !defined(__i386__) && !defined(__x86_64__)
-#ifndef ioremap_nocache
-#define ioremap_nocache(X,Y) ioremap(X,Y)
-#endif
-#endif
-
/*
* TODO:
*
@@ -1725,7 +1719,7 @@ static int eni_do_init(struct atm_dev *dev)
}
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,",
dev->number,pci_dev->revision,real_base,eni_dev->irq);
- if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) {
+ if (!(base = ioremap(real_base,MAP_MAX_SIZE))) {
printk("\n");
printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page "
"mapping\n",dev->number);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index aad00d2b28f5..cc87004d5e2d 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
}
if (!to) {
printk ("No more free channels for FS50..\n");
+ kfree(vcc);
return -EBUSY;
}
vcc->channo = dev->channo;
@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
printk ("Channel is in use for FS155.\n");
+ kfree(vcc);
return -EBUSY;
}
}
@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
tc, sizeof (struct fs_transmit_config));
if (!tc) {
fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
+ kfree(vcc);
return -ENOMEM;
}
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f1a500205313..8fbd36eb8941 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1414,12 +1414,14 @@ fore200e_open(struct atm_vcc *vcc)
static void
fore200e_close(struct atm_vcc* vcc)
{
- struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
struct fore200e_vcc* fore200e_vcc;
+ struct fore200e* fore200e;
struct fore200e_vc_map* vc_map;
unsigned long flags;
ASSERT(vcc);
+ fore200e = FORE200E_DEV(vcc->dev);
+
ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
@@ -1464,10 +1466,10 @@ fore200e_close(struct atm_vcc* vcc)
static int
fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
- struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
- struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
+ struct fore200e* fore200e;
+ struct fore200e_vcc* fore200e_vcc;
struct fore200e_vc_map* vc_map;
- struct host_txq* txq = &fore200e->host_txq;
+ struct host_txq* txq;
struct host_txq_entry* entry;
struct tpd* tpd;
struct tpd_haddr tpd_haddr;
@@ -1480,9 +1482,18 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
unsigned char* data;
unsigned long flags;
- ASSERT(vcc);
- ASSERT(fore200e);
- ASSERT(fore200e_vcc);
+ if (!vcc)
+ return -EINVAL;
+
+ fore200e = FORE200E_DEV(vcc->dev);
+ fore200e_vcc = FORE200E_VCC(vcc);
+
+ if (!fore200e)
+ return -EINVAL;
+
+ txq = &fore200e->host_txq;
+ if (!fore200e_vcc)
+ return -EINVAL;
if (!test_bit(ATM_VF_READY, &vcc->flags)) {
DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 4074886b7bc8..2002291ab338 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -57,7 +57,7 @@ static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return vm_map_pages_zero(vma, &pages, 1);
}
-static struct fb_ops cfag12864bfb_ops = {
+static const struct fb_ops cfag12864bfb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a2fcde582e2a..d951d54b26f5 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -228,7 +228,7 @@ static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
return vm_map_pages_zero(vma, &pages, 1);
}
-static struct fb_ops ht16k33_fb_ops = {
+static const struct fb_ops ht16k33_fb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index c3b3b5c0b0da..5f0bc74d2409 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -150,7 +150,7 @@ config DEBUG_TEST_DRIVER_REMOVE
config PM_QOS_KUNIT_TEST
bool "KUnit Test for PM QoS features"
- depends on KUNIT
+ depends on KUNIT=y
config HMEM_REPORTING
bool
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1eb81f113786..6119e11a9f95 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -248,6 +248,16 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+/*
+ * This function returns the logic cpu number of the node.
+ * There are basically three kinds of return values:
+ * (1) logic cpu number which is > 0.
+ * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
+ * there is no possible logical CPU in the kernel to match. This happens
+ * when CONFIG_NR_CPUS is configure to be smaller than the number of
+ * CPU nodes in DT. We need to just ignore this case.
+ * (3) -1 if the node does not exist in the device tree
+ */
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
@@ -261,7 +271,8 @@ static int __init get_cpu_for_node(struct device_node *node)
if (cpu >= 0)
topology_parse_cpu_capacity(cpu_node, cpu);
else
- pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
+ pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
+ cpu_node, cpumask_pr_args(cpu_possible_mask));
of_node_put(cpu_node);
return cpu;
@@ -286,9 +297,8 @@ static int __init parse_core(struct device_node *core, int package_id,
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
- } else {
- pr_err("%pOF: Can't get CPU for thread\n",
- t);
+ } else if (cpu != -ENODEV) {
+ pr_err("%pOF: Can't get CPU for thread\n", t);
of_node_put(t);
return -EINVAL;
}
@@ -307,7 +317,7 @@ static int __init parse_core(struct device_node *core, int package_id,
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].core_id = core_id;
- } else if (leaf) {
+ } else if (leaf && cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for leaf core\n", core);
return -EINVAL;
}
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 20736aaa0e69..f7bd0f4db13d 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -236,6 +236,109 @@ attribute_container_remove_device(struct device *dev,
mutex_unlock(&attribute_container_mutex);
}
+static int
+do_attribute_container_device_trigger_safe(struct device *dev,
+ struct attribute_container *cont,
+ int (*fn)(struct attribute_container *,
+ struct device *, struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *, struct device *))
+{
+ int ret;
+ struct internal_container *ic, *failed;
+ struct klist_iter iter;
+
+ if (attribute_container_no_classdevs(cont))
+ return fn(cont, dev, NULL);
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (dev == ic->classdev.parent) {
+ ret = fn(cont, dev, &ic->classdev);
+ if (ret) {
+ failed = ic;
+ klist_iter_exit(&iter);
+ goto fail;
+ }
+ }
+ }
+ return 0;
+
+fail:
+ if (!undo)
+ return ret;
+
+ /* Attempt to undo the work partially done. */
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (ic == failed) {
+ klist_iter_exit(&iter);
+ break;
+ }
+ if (dev == ic->classdev.parent)
+ undo(cont, dev, &ic->classdev);
+ }
+ return ret;
+}
+
+/**
+ * attribute_container_device_trigger_safe - execute a trigger for each
+ * matching classdev or fail all of them.
+ *
+ * @dev: The generic device to run the trigger for
+ * @fn the function to execute for each classdev.
+ * @undo A function to undo the work previously done in case of error
+ *
+ * This function is a safe version of
+ * attribute_container_device_trigger. It stops on the first error and
+ * undo the partial work that has been done, on previous classdev. It
+ * is guaranteed that either they all succeeded, or none of them
+ * succeeded.
+ */
+int
+attribute_container_device_trigger_safe(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *,
+ struct device *))
+{
+ struct attribute_container *cont, *failed = NULL;
+ int ret = 0;
+
+ mutex_lock(&attribute_container_mutex);
+
+ list_for_each_entry(cont, &attribute_container_list, node) {
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ ret = do_attribute_container_device_trigger_safe(dev, cont,
+ fn, undo);
+ if (ret) {
+ failed = cont;
+ break;
+ }
+ }
+
+ if (ret && !WARN_ON(!undo)) {
+ list_for_each_entry(cont, &attribute_container_list, node) {
+
+ if (failed == cont)
+ break;
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ do_attribute_container_device_trigger_safe(dev, cont,
+ undo, NULL);
+ }
+ }
+
+ mutex_unlock(&attribute_container_mutex);
+ return ret;
+
+}
+
/**
* attribute_container_device_trigger - execute a trigger for each matching classdev
*
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0d32544b6f91..40fb069a8a7e 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -1,4 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2012 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * Core driver model functions and structures that should not be
+ * shared outside of the drivers/base/ directory.
+ *
+ */
#include <linux/notifier.h>
/**
@@ -175,3 +186,11 @@ extern void device_links_unbind_consumers(struct device *dev);
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
+
+#ifdef CONFIG_DEVTMPFS
+int devtmpfs_create_node(struct device *dev);
+int devtmpfs_delete_node(struct device *dev);
+#else
+static inline int devtmpfs_create_node(struct device *dev) { return 0; }
+static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
+#endif
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index a1d1e8256324..886e9054999a 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -9,6 +9,7 @@
*/
#include <linux/async.h>
+#include <linux/device/bus.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/base/class.c b/drivers/base/class.c
index d8a6a5864c2e..bcd410e6d70a 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -8,6 +8,7 @@
* Copyright (c) 2003-2004 IBM Corp.
*/
+#include <linux/device/class.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 532a3a5d8f63..c7879f5ae2fb 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/kref.h>
#include <linux/list.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@@ -102,11 +101,11 @@ static int component_devices_show(struct seq_file *s, void *data)
seq_printf(s, "%-40s %20s\n", "device name", "status");
seq_puts(s, "-------------------------------------------------------------\n");
for (i = 0; i < match->num; i++) {
- struct device *d = (struct device *)match->compare[i].data;
+ struct component *component = match->compare[i].component;
- seq_printf(s, "%-40s %20s\n", dev_name(d),
- match->compare[i].component ?
- "registered" : "not registered");
+ seq_printf(s, "%-40s %20s\n",
+ component ? dev_name(component->dev) : "(unknown)",
+ component ? (component->bound ? "bound" : "not bound") : "not registered");
}
mutex_unlock(&component_mutex);
@@ -775,5 +774,3 @@ void component_del(struct device *dev, const struct component_ops *ops)
kfree(component);
}
EXPORT_SYMBOL_GPL(component_del);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d811e60610d3..b25bcab2a26b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -516,7 +516,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
- WARN_ON(!list_empty(&dev->devres_head));
+ if (!list_empty(&dev->devres_head)) {
+ dev_crit(dev, "Resources present before probing\n");
+ return -EBUSY;
+ }
re_probe:
dev->driver = drv;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 6cdbf1531238..5995c437cbdf 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -30,11 +30,7 @@
static struct task_struct *thread;
-#if defined CONFIG_DEVTMPFS_MOUNT
-static int mount_dev = 1;
-#else
-static int mount_dev;
-#endif
+static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
static DEFINE_SPINLOCK(req_lock);
@@ -93,6 +89,23 @@ static inline int is_blockdev(struct device *dev)
static inline int is_blockdev(struct device *dev) { return 0; }
#endif
+static int devtmpfs_submit_req(struct req *req, const char *tmp)
+{
+ init_completion(&req->done);
+
+ spin_lock(&req_lock);
+ req->next = requests;
+ requests = req;
+ spin_unlock(&req_lock);
+
+ wake_up_process(thread);
+ wait_for_completion(&req->done);
+
+ kfree(tmp);
+
+ return req->err;
+}
+
int devtmpfs_create_node(struct device *dev)
{
const char *tmp = NULL;
@@ -117,19 +130,7 @@ int devtmpfs_create_node(struct device *dev)
req.dev = dev;
- init_completion(&req.done);
-
- spin_lock(&req_lock);
- req.next = requests;
- requests = &req;
- spin_unlock(&req_lock);
-
- wake_up_process(thread);
- wait_for_completion(&req.done);
-
- kfree(tmp);
-
- return req.err;
+ return devtmpfs_submit_req(&req, tmp);
}
int devtmpfs_delete_node(struct device *dev)
@@ -147,18 +148,7 @@ int devtmpfs_delete_node(struct device *dev)
req.mode = 0;
req.dev = dev;
- init_completion(&req.done);
-
- spin_lock(&req_lock);
- req.next = requests;
- requests = &req;
- spin_unlock(&req_lock);
-
- wake_up_process(thread);
- wait_for_completion(&req.done);
-
- kfree(tmp);
- return req.err;
+ return devtmpfs_submit_req(&req, tmp);
}
static int dev_mkdir(const char *name, umode_t mode)
@@ -359,7 +349,7 @@ static int handle_remove(const char *nodename, struct device *dev)
* If configured, or requested by the commandline, devtmpfs will be
* auto-mounted after the kernel mounted the root filesystem.
*/
-int devtmpfs_mount(void)
+int __init devtmpfs_mount(void)
{
int err;
@@ -388,18 +378,30 @@ static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
return handle_remove(name, dev);
}
-static int devtmpfsd(void *p)
+static int devtmpfs_setup(void *p)
{
- int *err = p;
- *err = ksys_unshare(CLONE_NEWNS);
- if (*err)
+ int err;
+
+ err = ksys_unshare(CLONE_NEWNS);
+ if (err)
goto out;
- *err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
- if (*err)
+ err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+ if (err)
goto out;
ksys_chdir("/.."); /* will traverse into overmounted root */
ksys_chroot(".");
+out:
+ *(int *)p = err;
complete(&setup_done);
+ return err;
+}
+
+static int devtmpfsd(void *p)
+{
+ int err = devtmpfs_setup(p);
+
+ if (err)
+ return err;
while (1) {
spin_lock(&req_lock);
while (requests) {
@@ -420,9 +422,6 @@ static int devtmpfsd(void *p)
schedule();
}
return 0;
-out:
- complete(&setup_done);
- return *err;
}
/*
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 4e5ca632f35e..57c68769e157 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -8,6 +8,7 @@
* Copyright (c) 2007 Novell Inc.
*/
+#include <linux/device/driver.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 4a66888e7253..5fa7ce3745a0 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -17,7 +17,7 @@ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
filechk_fwbin = \
echo "/* Generated by $(src)/Makefile */" ;\
echo " .section .rodata" ;\
- echo " .p2align $(ASM_ALIGN)" ;\
+ echo " .p2align 4" ;\
echo "_fw_$(FWSTR)_bin:" ;\
echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\
echo "_fw_end:" ;\
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 62ee90b4db56..8704e1bae175 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -606,7 +606,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
return false;
}
- if ((opt_flags & FW_OPT_NOFALLBACK))
+ if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS))
return false;
/* Also permit LSMs and IMA to fail firmware sysfs fallback */
@@ -630,10 +630,11 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
* interface. Userspace is in charge of loading the firmware through the sysfs
* loading interface. This sysfs fallback mechanism may be disabled completely
* on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
- * If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK
- * flag, if so it would also disable the fallback mechanism. A system may want
- * to enfoce the sysfs fallback mechanism at all times, it can do this by
- * setting ignore_sysfs_fallback to false and force_sysfs_fallback to true.
+ * If this is false we check if the internal API caller set the
+ * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback
+ * mechanism. A system may want to enforce the sysfs fallback mechanism at all
+ * times, it can do this by setting ignore_sysfs_fallback to false and
+ * force_sysfs_fallback to true.
* Enabling force_sysfs_fallback is functionally equivalent to build a kernel
* with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
**/
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 7ecd590e67fe..8656e5239a80 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -27,16 +27,16 @@
* firmware file lookup on storage is avoided. Used for calls where the
* file may be too big, or where the driver takes charge of its own
* firmware caching mechanism.
- * @FW_OPT_NOFALLBACK: Disable the fallback mechanism. Takes precedence over
- * &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
+ * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes
+ * precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
*/
enum fw_opt {
- FW_OPT_UEVENT = BIT(0),
- FW_OPT_NOWAIT = BIT(1),
- FW_OPT_USERHELPER = BIT(2),
- FW_OPT_NO_WARN = BIT(3),
- FW_OPT_NOCACHE = BIT(4),
- FW_OPT_NOFALLBACK = BIT(5),
+ FW_OPT_UEVENT = BIT(0),
+ FW_OPT_NOWAIT = BIT(1),
+ FW_OPT_USERHELPER = BIT(2),
+ FW_OPT_NO_WARN = BIT(3),
+ FW_OPT_NOCACHE = BIT(4),
+ FW_OPT_NOFALLBACK_SYSFS = BIT(5),
};
enum fw_status {
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 249add8c5e05..57133a9dad09 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -877,7 +877,7 @@ int request_firmware_direct(const struct firmware **firmware_p,
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, NULL, 0,
FW_OPT_UEVENT | FW_OPT_NO_WARN |
- FW_OPT_NOFALLBACK);
+ FW_OPT_NOFALLBACK_SYSFS);
module_put(THIS_MODULE);
return ret;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 799b43191dea..15659306ad69 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -70,20 +70,6 @@ void unregister_memory_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_memory_notifier);
-static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
-
-int register_memory_isolate_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&memory_isolate_chain, nb);
-}
-EXPORT_SYMBOL(register_memory_isolate_notifier);
-
-void unregister_memory_isolate_notifier(struct notifier_block *nb)
-{
- atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
-}
-EXPORT_SYMBOL(unregister_memory_isolate_notifier);
-
static void memory_block_release(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
@@ -175,11 +161,6 @@ int memory_notify(unsigned long val, void *v)
return blocking_notifier_call_chain(&memory_chain, val, v);
}
-int memory_isolate_notify(unsigned long val, void *v)
-{
- return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
-}
-
/*
* The probe routines leave the pages uninitialized, just as the bootmem code
* does. Make sure we do not access them, but instead use only information from
@@ -225,7 +206,7 @@ static bool pages_correctly_probed(unsigned long start_pfn)
*/
static int
memory_block_action(unsigned long start_section_nr, unsigned long action,
- int online_type)
+ int online_type, int nid)
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
@@ -238,7 +219,7 @@ memory_block_action(unsigned long start_section_nr, unsigned long action,
if (!pages_correctly_probed(start_pfn))
return -EBUSY;
- ret = online_pages(start_pfn, nr_pages, online_type);
+ ret = online_pages(start_pfn, nr_pages, online_type, nid);
break;
case MEM_OFFLINE:
ret = offline_pages(start_pfn, nr_pages);
@@ -264,7 +245,7 @@ static int memory_block_change_state(struct memory_block *mem,
mem->state = MEM_GOING_OFFLINE;
ret = memory_block_action(mem->start_section_nr, to_state,
- mem->online_type);
+ mem->online_type, mem->nid);
mem->state = ret ? from_state_req : to_state;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index cf6b6b722e5c..7fa654f1288b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -27,6 +27,7 @@
#include <linux/limits.h>
#include <linux/property.h>
#include <linux/kmemleak.h>
+#include <linux/types.h>
#include "base.h"
#include "power/power.h"
@@ -48,7 +49,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
struct resource *platform_get_resource(struct platform_device *dev,
unsigned int type, unsigned int num)
{
- int i;
+ u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@@ -255,7 +256,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
unsigned int type,
const char *name)
{
- int i;
+ u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@@ -501,7 +502,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties);
*/
int platform_device_add(struct platform_device *pdev)
{
- int i, ret;
+ u32 i;
+ int ret;
if (!pdev)
return -EINVAL;
@@ -569,7 +571,7 @@ int platform_device_add(struct platform_device *pdev)
pdev->id = PLATFORM_DEVID_AUTO;
}
- while (--i >= 0) {
+ while (i--) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
@@ -590,7 +592,7 @@ EXPORT_SYMBOL_GPL(platform_device_add);
*/
void platform_device_del(struct platform_device *pdev)
{
- int i;
+ u32 i;
if (!IS_ERR_OR_NULL(pdev)) {
device_del(&pdev->dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 134a8af51511..0e99a760aebd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -273,10 +273,38 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
device_links_read_unlock(idx);
}
-static void dpm_wait_for_superior(struct device *dev, bool async)
+static bool dpm_wait_for_superior(struct device *dev, bool async)
{
- dpm_wait(dev->parent, async);
+ struct device *parent;
+
+ /*
+ * If the device is resumed asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by reference
+ * counting the parent once more unless the device has been deleted
+ * already (in which case return right away).
+ */
+ mutex_lock(&dpm_list_mtx);
+
+ if (!device_pm_initialized(dev)) {
+ mutex_unlock(&dpm_list_mtx);
+ return false;
+ }
+
+ parent = get_device(dev->parent);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ dpm_wait(parent, async);
+ put_device(parent);
+
dpm_wait_for_suppliers(dev, async);
+
+ /*
+ * If the parent's callback has deleted the device, attempting to resume
+ * it would be invalid, so avoid doing that then.
+ */
+ return device_pm_initialized(dev);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
@@ -621,7 +649,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
skip_resume = dev_pm_may_skip_resume(dev);
@@ -829,7 +858,8 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
callback = dpm_subsys_resume_early_cb(dev, state, &info);
@@ -944,7 +974,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Complete;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c
index 3115db08d56b..79fc6c4418da 100644
--- a/drivers/base/power/qos-test.c
+++ b/drivers/base/power/qos-test.c
@@ -114,4 +114,4 @@ static struct kunit_suite pm_qos_test_module = {
.name = "qos-kunit-test",
.test_cases = pm_qos_test_cases,
};
-kunit_test_suite(pm_qos_test_module);
+kunit_test_suites(&pm_qos_test_module);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 48616f358854..16134a69bf6f 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1006,8 +1006,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1038,8 +1040,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1101,6 +1105,7 @@ int pm_runtime_get_if_in_use(struct device *dev)
retval = dev->power.disable_depth > 0 ? -EINVAL :
dev->power.runtime_status == RPM_ACTIVE
&& atomic_inc_not_zero(&dev->power.usage_count);
+ trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
@@ -1434,6 +1439,8 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count))
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+ else
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1501,6 +1508,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
+ } else {
+ trace_rpm_usage_rcuidle(dev, 0);
}
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 70a9edb5f525..27f3e60608e5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1125,6 +1125,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m,
break;
}
+ if (!next_ws)
+ print_wakeup_source_stats(m, &deleted_ws);
+
return next_ws;
}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index ac9b31c57967..008f8da69d97 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
return i2c_smbus_write_byte_data(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_byte = {
+static const struct regmap_bus regmap_smbus_byte = {
.reg_write = regmap_smbus_byte_reg_write,
.reg_read = regmap_smbus_byte_reg_read,
};
@@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
return i2c_smbus_write_word_data(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_word = {
+static const struct regmap_bus regmap_smbus_word = {
.reg_write = regmap_smbus_word_reg_write,
.reg_read = regmap_smbus_word_reg_read,
};
@@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
return i2c_smbus_write_word_swapped(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_word_swapped = {
+static const struct regmap_bus regmap_smbus_word_swapped = {
.reg_write = regmap_smbus_word_write_swapped,
.reg_read = regmap_smbus_word_read_swapped,
};
@@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context,
return -EIO;
}
-static struct regmap_bus regmap_i2c = {
+static const struct regmap_bus regmap_i2c = {
.write = regmap_i2c_write,
.gather_write = regmap_i2c_gather_write,
.read = regmap_i2c_read,
@@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
return -EIO;
}
-static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 19f57ccfbe1d..59f911e57719 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
WARN_ON(!map->bus);
- /* Check for unwritable registers before we start */
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!regmap_writeable(map,
- reg + regmap_get_offset(map, i)))
- return -EINVAL;
+ /* Check for unwritable or noinc registers in range
+ * before we start
+ */
+ if (!regmap_writeable_noinc(map, reg)) {
+ for (i = 0; i < val_len / map->format.val_bytes; i++) {
+ unsigned int element =
+ reg + regmap_get_offset(map, i);
+ if (!regmap_writeable(map, element) ||
+ regmap_writeable_noinc(map, element))
+ return -EINVAL;
+ }
+ }
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index d8d0dc0ca5ac..0b081dee1e95 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -108,10 +108,7 @@ static const void *property_get_pointer(const struct property_entry *prop)
if (!prop->length)
return NULL;
- if (prop->is_array)
- return prop->pointer;
-
- return &prop->value;
+ return prop->is_inline ? &prop->value : prop->pointer;
}
static const void *property_entry_find(const struct property_entry *props,
@@ -201,92 +198,91 @@ static int property_entry_read_string_array(const struct property_entry *props,
static void property_entry_free_data(const struct property_entry *p)
{
- const void *pointer = property_get_pointer(p);
const char * const *src_str;
size_t i, nval;
- if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer) {
- src_str = p->pointer;
- nval = p->length / sizeof(const char *);
- for (i = 0; i < nval; i++)
- kfree(src_str[i]);
- }
- kfree(pointer);
- } else if (p->type == DEV_PROP_STRING) {
- kfree(p->value.str);
+ if (p->type == DEV_PROP_STRING) {
+ src_str = property_get_pointer(p);
+ nval = p->length / sizeof(*src_str);
+ for (i = 0; i < nval; i++)
+ kfree(src_str[i]);
}
+
+ if (!p->is_inline)
+ kfree(p->pointer);
+
kfree(p->name);
}
-static const char * const *
-property_copy_string_array(const struct property_entry *src)
+static bool property_copy_string_array(const char **dst_ptr,
+ const char * const *src_ptr,
+ size_t nval)
{
- const char **d;
- const char * const *src_str = src->pointer;
- size_t nval = src->length / sizeof(*d);
int i;
- d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
- if (!d)
- return NULL;
-
for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src_str[i], GFP_KERNEL);
- if (!d[i] && src_str[i]) {
+ dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL);
+ if (!dst_ptr[i] && src_ptr[i]) {
while (--i >= 0)
- kfree(d[i]);
- kfree(d);
- return NULL;
+ kfree(dst_ptr[i]);
+ return false;
}
}
- return d;
+ return true;
}
static int property_entry_copy_data(struct property_entry *dst,
const struct property_entry *src)
{
const void *pointer = property_get_pointer(src);
- const void *new;
-
- if (src->is_array) {
- if (!src->length)
- return -ENODATA;
-
- if (src->type == DEV_PROP_STRING) {
- new = property_copy_string_array(src);
- if (!new)
- return -ENOMEM;
- } else {
- new = kmemdup(pointer, src->length, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
- }
+ void *dst_ptr;
+ size_t nval;
+
+ /*
+ * Properties with no data should not be marked as stored
+ * out of line.
+ */
+ if (!src->is_inline && !src->length)
+ return -ENODATA;
+
+ /*
+ * Reference properties are never stored inline as
+ * they are too big.
+ */
+ if (src->type == DEV_PROP_REF && src->is_inline)
+ return -EINVAL;
- dst->is_array = true;
- dst->pointer = new;
- } else if (src->type == DEV_PROP_STRING) {
- new = kstrdup(src->value.str, GFP_KERNEL);
- if (!new && src->value.str)
+ if (src->length <= sizeof(dst->value)) {
+ dst_ptr = &dst->value;
+ dst->is_inline = true;
+ } else {
+ dst_ptr = kmalloc(src->length, GFP_KERNEL);
+ if (!dst_ptr)
return -ENOMEM;
+ dst->pointer = dst_ptr;
+ }
- dst->value.str = new;
+ if (src->type == DEV_PROP_STRING) {
+ nval = src->length / sizeof(const char *);
+ if (!property_copy_string_array(dst_ptr, pointer, nval)) {
+ if (!dst->is_inline)
+ kfree(dst->pointer);
+ return -ENOMEM;
+ }
} else {
- dst->value = src->value;
+ memcpy(dst_ptr, pointer, src->length);
}
dst->length = src->length;
dst->type = src->type;
dst->name = kstrdup(src->name, GFP_KERNEL);
- if (!dst->name)
- goto out_free_data;
+ if (!dst->name) {
+ property_entry_free_data(dst);
+ return -ENOMEM;
+ }
return 0;
-
-out_free_data:
- property_entry_free_data(dst);
- return -ENOMEM;
}
/**
@@ -483,31 +479,49 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
struct fwnode_reference_args *args)
{
struct swnode *swnode = to_swnode(fwnode);
- const struct software_node_reference *ref;
+ const struct software_node_ref_args *ref_array;
+ const struct software_node_ref_args *ref;
const struct property_entry *prop;
struct fwnode_handle *refnode;
+ u32 nargs_prop_val;
+ int error;
int i;
- if (!swnode || !swnode->node->references)
+ if (!swnode)
return -ENOENT;
- for (ref = swnode->node->references; ref->name; ref++)
- if (!strcmp(ref->name, propname))
- break;
+ prop = property_entry_get(swnode->node->properties, propname);
+ if (!prop)
+ return -ENOENT;
+
+ if (prop->type != DEV_PROP_REF)
+ return -EINVAL;
- if (!ref->name || index > (ref->nrefs - 1))
+ /*
+ * We expect that references are never stored inline, even
+ * single ones, as they are too big.
+ */
+ if (prop->is_inline)
+ return -EINVAL;
+
+ if (index * sizeof(*ref) >= prop->length)
return -ENOENT;
- refnode = software_node_fwnode(ref->refs[index].node);
+ ref_array = prop->pointer;
+ ref = &ref_array[index];
+
+ refnode = software_node_fwnode(ref->node);
if (!refnode)
return -ENOENT;
if (nargs_prop) {
- prop = property_entry_get(swnode->node->properties, nargs_prop);
- if (!prop)
- return -EINVAL;
+ error = property_entry_read_int_array(swnode->node->properties,
+ nargs_prop, sizeof(u32),
+ &nargs_prop_val, 1);
+ if (error)
+ return error;
- nargs = prop->value.u32_data;
+ nargs = nargs_prop_val;
}
if (nargs > NR_FWNODE_REFERENCE_ARGS)
@@ -517,7 +531,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
args->nargs = nargs;
for (i = 0; i < nargs; i++)
- args->args[i] = ref->refs[index].args[i];
+ args->args[i] = ref->args[i];
return 0;
}
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 86e85daa80bf..305c7751184a 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -8,3 +8,6 @@ config TEST_ASYNC_DRIVER_PROBE
The module name will be test_async_driver_probe.ko
If unsure say N.
+config KUNIT_DRIVER_PE_TEST
+ bool "KUnit Tests for property entry API"
+ depends on KUNIT=y
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 0f1f7277a013..3ca56367c84b 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
+
+obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
new file mode 100644
index 000000000000..abe03315180f
--- /dev/null
+++ b/drivers/base/test/property-entry-test.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+// Unit tests for property entries API
+//
+// Copyright 2019 Google LLC.
+
+#include <kunit/test.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+static void pe_test_uints(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8("prop-u8", 8),
+ PROPERTY_ENTRY_U16("prop-u16", 16),
+ PROPERTY_ENTRY_U32("prop-u32", 32),
+ PROPERTY_ENTRY_U64("prop-u64", 64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[2];
+ u16 val_u16, array_u16[2];
+ u32 val_u32, array_u32[2];
+ u64 val_u64, array_u64[2];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_uint_arrays(struct kunit *test)
+{
+ static const u8 a_u8[16] = { 8, 9 };
+ static const u16 a_u16[16] = { 16, 17 };
+ static const u32 a_u32[16] = { 32, 33 };
+ static const u64 a_u64[16] = { 64, 65 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
+ PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
+ PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32),
+ PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[32];
+ u16 val_u16, array_u16[32];
+ u32 val_u32, array_u32[32];
+ u64 val_u64, array_u64[32];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_strings(struct kunit *test)
+{
+ static const char *strings[] = {
+ "string-a",
+ "string-b",
+ };
+
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING("str", "single"),
+ PROPERTY_ENTRY_STRING("empty", ""),
+ PROPERTY_ENTRY_STRING_ARRAY("strs", strings),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ const char *str;
+ const char *strs[10];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_string(node, "str", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "single");
+
+ error = fwnode_property_read_string_array(node, "str", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ /* asking for more data returns what we have */
+ error = fwnode_property_read_string_array(node, "str", strs, 2);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ error = fwnode_property_read_string(node, "no-str", &str);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_string_array(node, "no-str", strs, 1);
+ KUNIT_EXPECT_LT(test, error, 0);
+
+ error = fwnode_property_read_string(node, "empty", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 3);
+ KUNIT_EXPECT_EQ(test, error, 2);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+ KUNIT_EXPECT_STREQ(test, strs[1], "string-b");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+
+ /* NULL argument -> returns size */
+ error = fwnode_property_read_string_array(node, "strs", NULL, 0);
+ KUNIT_EXPECT_EQ(test, error, 2);
+
+ /* accessing array as single value */
+ error = fwnode_property_read_string(node, "strs", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "string-a");
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_bool(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_BOOL("prop"),
+ { }
+ };
+
+ struct fwnode_handle *node;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop"));
+ KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop"));
+
+ fwnode_remove_software_node(node);
+}
+
+/* Verifies that small U8 array is stored inline when property is copied */
+static void pe_test_move_inline_u8(struct kunit *test)
+{
+ static const u8 u8_array_small[8] = { 1, 2, 3, 4 };
+ static const u8 u8_array_big[128] = { 5, 6, 7, 8 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small),
+ PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big),
+ { }
+ };
+
+ struct property_entry *copy;
+ const u8 *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ data_ptr = (u8 *)&copy[0].value;
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1);
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2);
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5);
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6);
+
+ property_entries_free(copy);
+}
+
+/* Verifies that single string array is stored inline when property is copied */
+static void pe_test_move_inline_str(struct kunit *test)
+{
+ static char *str_array_small[] = { "a" };
+ static char *str_array_big[] = { "b", "c", "d", "e" };
+ static char *str_array_small_empty[] = { "" };
+ static struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small),
+ PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big),
+ PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty),
+ { }
+ };
+
+ struct property_entry *copy;
+ const char * const *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a");
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_STREQ(test, data_ptr[0], "b");
+ KUNIT_EXPECT_STREQ(test, data_ptr[1], "c");
+
+ KUNIT_EXPECT_TRUE(test, copy[2].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], "");
+
+ property_entries_free(copy);
+}
+
+/* Handling of reference properties */
+static void pe_test_reference(struct kunit *test)
+{
+ static const struct software_node nodes[] = {
+ { .name = "1", },
+ { .name = "2", },
+ { }
+ };
+
+ static const struct software_node_ref_args refs[] = {
+ {
+ .node = &nodes[0],
+ .nargs = 0,
+ },
+ {
+ .node = &nodes[1],
+ .nargs = 2,
+ .args = { 3, 4 },
+ },
+ };
+
+ const struct property_entry entries[] = {
+ PROPERTY_ENTRY_REF("ref-1", &nodes[0]),
+ PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2),
+ PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ struct fwnode_reference_args ref;
+ int error;
+
+ error = software_node_register_nodes(nodes);
+ KUNIT_ASSERT_EQ(test, error, 0);
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 1, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+
+ /* asking for more args, padded with zero data */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 3, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 2, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ /* array of references */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* second reference in the array */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 2, 1, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 2, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+ software_node_unregister_nodes(nodes);
+}
+
+static struct kunit_case property_entry_test_cases[] = {
+ KUNIT_CASE(pe_test_uints),
+ KUNIT_CASE(pe_test_uint_arrays),
+ KUNIT_CASE(pe_test_strings),
+ KUNIT_CASE(pe_test_bool),
+ KUNIT_CASE(pe_test_move_inline_u8),
+ KUNIT_CASE(pe_test_move_inline_str),
+ KUNIT_CASE(pe_test_reference),
+ { }
+};
+
+static struct kunit_suite property_entry_test_suite = {
+ .name = "property-entry",
+ .test_cases = property_entry_test_cases,
+};
+
+kunit_test_suite(property_entry_test_suite);
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index f4b1d8e54daf..3bb7beb127a9 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -44,7 +44,8 @@ static int test_probe(struct platform_device *pdev)
* performing an async init on that node.
*/
if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
- if (dev_to_node(dev) != numa_node_id()) {
+ if (IS_ENABLED(CONFIG_NUMA) &&
+ dev_to_node(dev) != numa_node_id()) {
dev_warn(dev, "NUMA node mismatch %d != %d\n",
dev_to_node(dev), numa_node_id());
atomic_inc(&warnings);
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index 5ed86ded6e6b..ccc86206e508 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -30,6 +30,10 @@
#include <linux/attribute_container.h>
#include <linux/transport_class.h>
+static int transport_remove_classdev(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev);
+
/**
* transport_class_register - register an initial transport class
*
@@ -172,10 +176,11 @@ static int transport_add_class_device(struct attribute_container *cont,
* routine is simply a trigger point used to add the device to the
* system and register attributes for it.
*/
-
-void transport_add_device(struct device *dev)
+int transport_add_device(struct device *dev)
{
- attribute_container_device_trigger(dev, transport_add_class_device);
+ return attribute_container_device_trigger_safe(dev,
+ transport_add_class_device,
+ transport_remove_classdev);
}
EXPORT_SYMBOL_GPL(transport_add_device);
diff --git a/drivers/bcma/driver_chipcommon_b.c b/drivers/bcma/driver_chipcommon_b.c
index 57f10b58b47c..c153c96a6145 100644
--- a/drivers/bcma/driver_chipcommon_b.c
+++ b/drivers/bcma/driver_chipcommon_b.c
@@ -48,7 +48,7 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb)
return 0;
ccb->setup_done = 1;
- ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE);
+ ccb->mii = ioremap(ccb->core->addr_s[1], BCMA_CORE_SIZE);
if (!ccb->mii)
return -ENOMEM;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index c42cec7c7ecc..88a93c266c19 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -115,7 +115,7 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, sizeof(val));
+ mmio = ioremap(addr, sizeof(val));
if (!mmio)
goto out;
@@ -180,7 +180,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, sizeof(val));
+ mmio = ioremap(addr, sizeof(val));
if (!mmio)
goto out;
@@ -515,7 +515,7 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
- io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
+ io_map_base = (unsigned long)ioremap(pc_host->mem_resource.start,
resource_size(&pc_host->mem_resource));
pc_host->pci_controller.io_map_base = io_map_base;
set_io_port_base(pc_host->pci_controller.io_map_base);
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index c8073b509a2b..90d5bdc12e03 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -172,7 +172,7 @@ int __init bcma_host_soc_register(struct bcma_soc *soc)
/* iomap only first core. We have to read some register on this core
* to scan the bus.
*/
- bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
+ bus->mmio = ioremap(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
if (!bus->mmio)
return -ENOMEM;
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 4a2d1b235fb5..1a942f734188 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -219,7 +219,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
u32 type, u8 port)
{
- u32 addrl, addrh, sizel, sizeh = 0;
+ u32 addrl, addrh, sizeh = 0;
u32 size;
u32 ent = bcma_erom_get_ent(bus, eromptr);
@@ -239,12 +239,9 @@ static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) {
size = bcma_erom_get_ent(bus, eromptr);
- sizel = size & SCAN_SIZE_SZ;
if (size & SCAN_SIZE_SG32)
sizeh = bcma_erom_get_ent(bus, eromptr);
- } else
- sizel = SCAN_ADDR_SZ_BASE <<
- ((ent & SCAN_ADDR_SZ) >> SCAN_ADDR_SZ_SHIFT);
+ }
return addrl;
}
@@ -425,11 +422,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
}
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
- core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE);
+ core->io_addr = ioremap(core->addr, BCMA_CORE_SIZE);
if (!core->io_addr)
return -ENOMEM;
if (core->wrap) {
- core->io_wrap = ioremap_nocache(core->wrap,
+ core->io_wrap = ioremap(core->wrap,
BCMA_CORE_SIZE);
if (!core->io_wrap) {
iounmap(core->io_addr);
@@ -472,7 +469,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
- eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+ eromptr = ioremap(erombase, BCMA_CORE_SIZE);
if (!eromptr)
return -ENOMEM;
} else {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index bd19f8af950b..7b32fb673375 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -329,6 +329,7 @@ static const struct block_device_operations aoe_bdops = {
.open = aoeblk_open,
.release = aoeblk_release,
.ioctl = aoeblk_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = aoeblk_getgeo,
.owner = THIS_MODULE,
};
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 485865fd0412..cd3612e4e2e1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3879,6 +3879,9 @@ static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
{
int drive = (long)bdev->bd_disk->private_data;
switch (cmd) {
+ case CDROMEJECT: /* CD-ROM eject */
+ case 0x6470: /* SunOS floppy eject */
+
case FDMSGON:
case FDMSGOFF:
case FDSETEMSGTRESH:
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 5cf49d9db95e..ed34785dd64b 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_IOERR;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
/* Writes must be at the write pointer position */
if (sector != zone->wp)
return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_EMPTY)
+ if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone->wp += nr_sectors;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 636bfea2de6f..117cfc8cd05a 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -275,6 +275,9 @@ static const struct block_device_operations pcd_bdops = {
.open = pcd_block_open,
.release = pcd_block_release,
.ioctl = pcd_block_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = blkdev_compat_ptr_ioctl,
+#endif
.check_events = pcd_block_check_events,
};
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 6f9ad3fc716f..c0967507d085 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -874,6 +874,7 @@ static const struct block_device_operations pd_fops = {
.open = pd_open,
.release = pd_release,
.ioctl = pd_ioctl,
+ .compat_ioctl = pd_ioctl,
.getgeo = pd_getgeo,
.check_events = pd_check_events,
.revalidate_disk= pd_revalidate
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 6b7d4cab3687..bb09f21ce21a 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -276,6 +276,7 @@ static const struct block_device_operations pf_fops = {
.open = pf_open,
.release = pf_release,
.ioctl = pf_ioctl,
+ .compat_ioctl = pf_ioctl,
.getgeo = pf_getgeo,
.check_events = pf_check_events,
};
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 861fc65a1b75..5f970a7d32c0 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2663,28 +2663,6 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
return ret;
}
-#ifdef CONFIG_COMPAT
-static int pkt_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- /* compatible */
- case CDROMEJECT:
- case CDROMMULTISESSION:
- case CDROMREADTOCENTRY:
- case SCSI_IOCTL_SEND_COMMAND:
- return pkt_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg));
-
-
- /* FIXME: no handler so far */
- case CDROM_LAST_WRITTEN:
- /* handled in compat_blkdev_driver_ioctl */
- case CDROM_SEND_PACKET:
- default:
- return -ENOIOCTLCMD;
- }
-}
-#endif
-
static unsigned int pkt_check_events(struct gendisk *disk,
unsigned int clearing)
{
@@ -2706,9 +2684,7 @@ static const struct block_device_operations pktcdvd_ops = {
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pkt_compat_ioctl,
-#endif
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = pkt_check_events,
};
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 571612e233fe..39aeebc6837d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -171,6 +171,7 @@ static const struct block_device_operations vdc_fops = {
.owner = THIS_MODULE,
.getgeo = vdc_getgeo,
.ioctl = vdc_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
};
static void vdc_blk_queue_start(struct vdc_port *port)
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 1f3f9e0f02a8..4eaf97d7a170 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -827,7 +827,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_req_csr;
}
- card->csr_remap = ioremap_nocache(csr_base, csr_len);
+ card->csr_remap = ioremap(csr_base, csr_len);
if (!card->csr_remap) {
dev_printk(KERN_ERR, &card->dev->dev,
"Unable to remap memory region\n");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ffd719d89de..fbbf18ac1d5d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -405,6 +405,9 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
static const struct block_device_operations virtblk_fops = {
.ioctl = virtblk_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
+#endif
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
};
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c02be06c5299..57d50c5ba309 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2632,6 +2632,7 @@ static const struct block_device_operations xlvbd_block_fops =
.release = blkif_release,
.getgeo = blkif_getgeo,
.ioctl = blkif_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 4285e75e52c3..1bdb5793842b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -207,14 +207,17 @@ static inline void zram_fill_page(void *ptr, unsigned long len,
static bool page_same_filled(void *ptr, unsigned long *element)
{
- unsigned int pos;
unsigned long *page;
unsigned long val;
+ unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
page = (unsigned long *)ptr;
val = page[0];
- for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
+ if (val != page[last_pos])
+ return false;
+
+ for (pos = 1; pos < last_pos; pos++) {
if (val != page[pos])
return false;
}
@@ -626,7 +629,7 @@ static ssize_t writeback_store(struct device *dev,
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
- ssize_t ret;
+ ssize_t ret = len;
int mode;
unsigned long blk_idx = 0;
@@ -762,7 +765,6 @@ next:
if (blk_idx)
free_block_bdev(zram, blk_idx);
- ret = len;
__free_page(page);
release_init_lock:
up_read(&zram->init_lock);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 8e05706fe5d9..1f498f358f60 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -36,6 +36,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
+
bt_dev_err(hdev, "BCM: Reading device address failed (%d)", err);
return err;
}
@@ -107,6 +108,52 @@ int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
+int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params)
+{
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "BCM: Read PCM int params failed (%d)", err);
+ return err;
+ }
+
+ if (skb->len != 6 || skb->data[0]) {
+ bt_dev_err(hdev, "BCM: Read PCM int params length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ if (params)
+ memcpy(params, skb->data + 1, 5);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_read_pcm_int_params);
+
+int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1c, 5, params, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "BCM: Write PCM int params failed (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_write_pcm_int_params);
+
int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
{
const struct hci_command_hdr *cmd;
@@ -177,6 +224,7 @@ static int btbcm_reset(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
+
bt_dev_err(hdev, "BCM: Reset failed (%d)", err);
return err;
}
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index d204be8a84bf..014ef847a486 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -54,6 +54,10 @@ struct bcm_set_pcm_format_params {
int btbcm_check_bdaddr(struct hci_dev *hdev);
int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw);
+int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params);
+int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params);
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
@@ -74,6 +78,18 @@ static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return -EOPNOTSUPP;
}
+static inline int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
{
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index f838537f9f89..577cfa3329db 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -370,11 +370,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
* the end.
*/
len = patch_length;
- buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
- GFP_KERNEL);
+ buf = kvmalloc(patch_length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+ memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
*_buf = buf;
@@ -460,8 +460,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
if (ret < 0)
return ret;
ret = fw->size;
- *buff = kmemdup(fw->data, ret, GFP_KERNEL);
- if (!*buff)
+ *buff = kvmalloc(fw->size, GFP_KERNEL);
+ if (*buff)
+ memcpy(*buff, fw->data, ret);
+ else
ret = -ENOMEM;
release_firmware(fw);
@@ -499,14 +501,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
goto out;
if (btrtl_dev->cfg_len > 0) {
- tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
+ tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) {
ret = -ENOMEM;
goto out;
}
memcpy(tbuff, fw_data, ret);
- kfree(fw_data);
+ kvfree(fw_data);
memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
ret += btrtl_dev->cfg_len;
@@ -519,14 +521,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
ret = rtl_download_firmware(hdev, fw_data, ret);
out:
- kfree(fw_data);
+ kvfree(fw_data);
return ret;
}
void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
- kfree(btrtl_dev->fw_data);
- kfree(btrtl_dev->cfg_data);
+ kvfree(btrtl_dev->fw_data);
+ kvfree(btrtl_dev->cfg_data);
kfree(btrtl_dev);
}
EXPORT_SYMBOL_GPL(btrtl_free);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index fd9571d5fdac..199e8f7d426d 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -145,11 +145,20 @@ static int btsdio_rx_packet(struct btsdio_data *data)
data->hdev->stat.byte_rx += len;
- hci_skb_pkt_type(skb) = hdr[3];
-
- err = hci_recv_frame(data->hdev, skb);
- if (err < 0)
- return err;
+ switch (hdr[3]) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
+ hci_skb_pkt_type(skb) = hdr[3];
+ err = hci_recv_frame(data->hdev, skb);
+ if (err < 0)
+ return err;
+ break;
+ default:
+ kfree_skb(skb);
+ return -EINVAL;
+ }
sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 70e385987d41..f5924f3e8b8d 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -266,6 +266,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
@@ -552,9 +553,9 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
}
bt_dev_err(hdev, "Reset Realtek device via gpio");
- gpiod_set_value_cansleep(reset_gpio, 0);
- msleep(200);
gpiod_set_value_cansleep(reset_gpio, 1);
+ msleep(200);
+ gpiod_set_value_cansleep(reset_gpio, 0);
}
static inline void btusb_free_frags(struct btusb_data *data)
@@ -2602,7 +2603,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
* and being processed the events from there then.
*/
if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
- data->evt_skb = skb_clone(skb, GFP_KERNEL);
+ data->evt_skb = skb_clone(skb, GFP_ATOMIC);
if (!data->evt_skb)
goto err_out;
}
@@ -2867,7 +2868,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
- return err;
+ goto err_release_fw;
}
/* Wait a few moments for firmware activation done */
@@ -3832,6 +3833,10 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP)
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
+
+ err = usb_autopm_get_interface(intf);
+ if (err < 0)
+ goto out_free_dev;
}
if (id->driver_info & BTUSB_AMP) {
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index d2a6a4afdbbb..b236cb11c0dc 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/platform_device.h>
@@ -48,6 +49,15 @@
#define BCM_NUM_SUPPLIES 2
/**
+ * struct bcm_device_data - device specific data
+ * @no_early_set_baudrate: Disallow set baudrate before driver setup()
+ */
+struct bcm_device_data {
+ bool no_early_set_baudrate;
+ bool drive_rts_on_open;
+};
+
+/**
* struct bcm_device - device driver resources
* @serdev_hu: HCI UART controller struct
* @list: bcm_device_list node
@@ -79,6 +89,7 @@
* @hu: pointer to HCI UART controller struct,
* used to disable flow control during runtime suspend and system sleep
* @is_suspended: whether flow control is currently disabled
+ * @no_early_set_baudrate: don't set_baudrate before setup()
*/
struct bcm_device {
/* Must be the first member, hci_serdev.c expects this. */
@@ -112,6 +123,9 @@ struct bcm_device {
struct hci_uart *hu;
bool is_suspended;
#endif
+ bool no_early_set_baudrate;
+ bool drive_rts_on_open;
+ u8 pcm_int_params[5];
};
/* generic bcm uart resources */
@@ -445,11 +459,22 @@ static int bcm_open(struct hci_uart *hu)
out:
if (bcm->dev) {
- hci_uart_set_flow_control(hu, true);
+ if (bcm->dev->drive_rts_on_open)
+ hci_uart_set_flow_control(hu, true);
+
hu->init_speed = bcm->dev->init_speed;
- hu->oper_speed = bcm->dev->oper_speed;
+
+ /* If oper_speed is set, ldisc/serdev will set the baudrate
+ * before calling setup()
+ */
+ if (!bcm->dev->no_early_set_baudrate)
+ hu->oper_speed = bcm->dev->oper_speed;
+
err = bcm_gpio_set_power(bcm->dev, true);
- hci_uart_set_flow_control(hu, false);
+
+ if (bcm->dev->drive_rts_on_open)
+ hci_uart_set_flow_control(hu, false);
+
if (err)
goto err_unset_hu;
}
@@ -565,6 +590,8 @@ static int bcm_setup(struct hci_uart *hu)
/* Operational speed if any */
if (hu->oper_speed)
speed = hu->oper_speed;
+ else if (bcm->dev && bcm->dev->oper_speed)
+ speed = bcm->dev->oper_speed;
else if (hu->proto->oper_speed)
speed = hu->proto->oper_speed;
else
@@ -576,6 +603,16 @@ static int bcm_setup(struct hci_uart *hu)
host_set_baudrate(hu, speed);
}
+ /* PCM parameters if provided */
+ if (bcm->dev && bcm->dev->pcm_int_params[0] != 0xff) {
+ struct bcm_set_pcm_int_params params;
+
+ btbcm_read_pcm_int_params(hu->hdev, &params);
+
+ memcpy(&params, bcm->dev->pcm_int_params, 5);
+ btbcm_write_pcm_int_params(hu->hdev, &params);
+ }
+
finalize:
release_firmware(fw);
@@ -1113,6 +1150,10 @@ static int bcm_acpi_probe(struct bcm_device *dev)
static int bcm_of_probe(struct bcm_device *bdev)
{
device_property_read_u32(bdev->dev, "max-speed", &bdev->oper_speed);
+ device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params",
+ bdev->pcm_int_params, 5);
+ bdev->irq = of_irq_get_byname(bdev->dev->of_node, "host-wakeup");
+
return 0;
}
@@ -1128,6 +1169,9 @@ static int bcm_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
dev->irq = platform_get_irq(pdev, 0);
+ /* Initialize routing field to an unused value */
+ dev->pcm_int_params[0] = 0xff;
+
if (has_acpi_companion(&pdev->dev)) {
ret = bcm_acpi_probe(dev);
if (ret)
@@ -1374,6 +1418,7 @@ static struct platform_driver bcm_driver = {
static int bcm_serdev_probe(struct serdev_device *serdev)
{
struct bcm_device *bcmdev;
+ const struct bcm_device_data *data;
int err;
bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL);
@@ -1387,6 +1432,9 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
bcmdev->serdev_hu.serdev = serdev;
serdev_device_set_drvdata(serdev, bcmdev);
+ /* Initialize routing field to an unused value */
+ bcmdev->pcm_int_params[0] = 0xff;
+
if (has_acpi_companion(&serdev->dev))
err = bcm_acpi_probe(bcmdev);
else
@@ -1408,6 +1456,12 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
if (err)
dev_err(&serdev->dev, "Failed to power down\n");
+ data = device_get_match_data(bcmdev->dev);
+ if (data) {
+ bcmdev->no_early_set_baudrate = data->no_early_set_baudrate;
+ bcmdev->drive_rts_on_open = data->drive_rts_on_open;
+ }
+
return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
}
@@ -1419,12 +1473,21 @@ static void bcm_serdev_remove(struct serdev_device *serdev)
}
#ifdef CONFIG_OF
+static struct bcm_device_data bcm4354_device_data = {
+ .no_early_set_baudrate = true,
+};
+
+static struct bcm_device_data bcm43438_device_data = {
+ .drive_rts_on_open = true,
+};
+
static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm20702a1" },
+ { .compatible = "brcm,bcm4329-bt" },
{ .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
- { .compatible = "brcm,bcm43438-bt" },
- { .compatible = "brcm,bcm43540-bt" },
+ { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
+ { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" },
{ },
};
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 19ba52005009..6dc1fbeb564b 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -103,6 +103,7 @@ static const struct h4_recv_pkt h4_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
};
/* Recv data */
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index dacf297baf59..0b14547482a7 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -385,6 +385,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
case HCI_EVENT_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
/* Remove Three-wire header */
@@ -594,6 +595,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
break;
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
skb_queue_tail(&h5->unrel, skb);
break;
@@ -636,6 +638,7 @@ static bool valid_packet_type(u8 type)
case HCI_ACLDATA_PKT:
case HCI_COMMAND_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
case HCI_3WIRE_LINK_PKT:
case HCI_3WIRE_ACK_PKT:
return true;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index f10bdf8e1fc5..d6e0c99ee5eb 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/devcoredump.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
@@ -46,6 +47,7 @@
#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
+#define MEMDUMP_TIMEOUT_MS 8000
/* susclk rate */
#define SUSCLK_RATE_32KHZ 32768
@@ -53,12 +55,24 @@
/* Controller debug log header */
#define QCA_DEBUG_HANDLE 0x2EDC
+/* max retry count when init fails */
+#define MAX_INIT_RETRIES 3
+
+/* Controller dump header */
+#define QCA_SSR_DUMP_HANDLE 0x0108
+#define QCA_DUMP_PACKET_SIZE 255
+#define QCA_LAST_SEQUENCE_NUM 0xFFFF
+#define QCA_CRASHBYTE_PACKET_LEN 1096
+#define QCA_MEMDUMP_BYTE 0xFB
+
enum qca_flags {
QCA_IBS_ENABLED,
QCA_DROP_VENDOR_EVENT,
QCA_SUSPENDING,
+ QCA_MEMDUMP_COLLECTION
};
+
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
HCI_IBS_TX_ASLEEP,
@@ -81,11 +95,40 @@ enum hci_ibs_clock_state_vote {
HCI_IBS_RX_VOTE_CLOCK_OFF,
};
+/* Controller memory dump states */
+enum qca_memdump_states {
+ QCA_MEMDUMP_IDLE,
+ QCA_MEMDUMP_COLLECTING,
+ QCA_MEMDUMP_COLLECTED,
+ QCA_MEMDUMP_TIMEOUT,
+};
+
+struct qca_memdump_data {
+ char *memdump_buf_head;
+ char *memdump_buf_tail;
+ u32 current_seq_no;
+ u32 received_dump;
+};
+
+struct qca_memdump_event_hdr {
+ __u8 evt;
+ __u8 plen;
+ __u16 opcode;
+ __u16 seq_no;
+ __u8 reserved;
+} __packed;
+
+
+struct qca_dump_size {
+ u32 dump_size;
+} __packed;
+
struct qca_data {
struct hci_uart *hu;
struct sk_buff *rx_skb;
struct sk_buff_head txq;
struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
+ struct sk_buff_head rx_memdump_q; /* Memdump wait queue */
spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
u8 rx_ibs_state; /* HCI_IBS receive side power state */
@@ -95,14 +138,18 @@ struct qca_data {
u32 tx_idle_delay;
struct timer_list wake_retrans_timer;
u32 wake_retrans;
+ struct timer_list memdump_timer;
struct workqueue_struct *workqueue;
struct work_struct ws_awake_rx;
struct work_struct ws_awake_device;
struct work_struct ws_rx_vote_off;
struct work_struct ws_tx_vote_off;
+ struct work_struct ctrl_memdump_evt;
+ struct qca_memdump_data *qca_memdump;
unsigned long flags;
struct completion drop_ev_comp;
wait_queue_head_t suspend_wait_q;
+ enum qca_memdump_states memdump_state;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -167,6 +214,7 @@ static int qca_regulator_enable(struct qca_serdev *qcadev);
static void qca_regulator_disable(struct qca_serdev *qcadev);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
+static void qca_controller_memdump(struct work_struct *work);
static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
{
@@ -474,12 +522,28 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
hci_uart_tx_wakeup(hu);
}
+static void hci_memdump_timeout(struct timer_list *t)
+{
+ struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct hci_uart *hu = qca->hu;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ char *memdump_buf = qca_memdump->memdump_buf_tail;
+
+ bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
+ /* Inject hw error event to reset the device and driver. */
+ hci_reset_dev(hu->hdev);
+ vfree(memdump_buf);
+ kfree(qca_memdump);
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ del_timer(&qca->memdump_timer);
+ cancel_work_sync(&qca->ctrl_memdump_evt);
+}
+
/* Initialize protocol */
static int qca_open(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
struct qca_data *qca;
- int ret;
BT_DBG("hu %p qca_open", hu);
@@ -492,6 +556,7 @@ static int qca_open(struct hci_uart *hu)
skb_queue_head_init(&qca->txq);
skb_queue_head_init(&qca->tx_wait_q);
+ skb_queue_head_init(&qca->rx_memdump_q);
spin_lock_init(&qca->hci_ibs_lock);
qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
if (!qca->workqueue) {
@@ -504,7 +569,7 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
-
+ INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
init_waitqueue_head(&qca->suspend_wait_q);
qca->hu = hu;
@@ -519,23 +584,10 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
if (hu->serdev) {
-
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (!qca_is_wcn399x(qcadev->btsoc_type)) {
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
- /* Controller needs time to bootup. */
- msleep(150);
- } else {
+ if (qca_is_wcn399x(qcadev->btsoc_type)) {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
- ret = qca_regulator_enable(qcadev);
- if (ret) {
- destroy_workqueue(qca->workqueue);
- kfree_skb(qca->rx_skb);
- hu->priv = NULL;
- kfree(qca);
- return ret;
- }
}
}
@@ -544,6 +596,7 @@ static int qca_open(struct hci_uart *hu)
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
+ timer_setup(&qca->memdump_timer, hci_memdump_timeout, 0);
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -613,7 +666,6 @@ static int qca_flush(struct hci_uart *hu)
/* Close protocol */
static int qca_close(struct hci_uart *hu)
{
- struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
BT_DBG("hu %p qca close", hu);
@@ -622,19 +674,14 @@ static int qca_close(struct hci_uart *hu)
skb_queue_purge(&qca->tx_wait_q);
skb_queue_purge(&qca->txq);
+ skb_queue_purge(&qca->rx_memdump_q);
del_timer(&qca->tx_idle_timer);
del_timer(&qca->wake_retrans_timer);
+ del_timer(&qca->memdump_timer);
destroy_workqueue(qca->workqueue);
qca->hu = NULL;
- if (hu->serdev) {
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qca_is_wcn399x(qcadev->btsoc_type))
- qca_power_shutdown(hu);
- else
- gpiod_set_value_cansleep(qcadev->bt_en, 0);
-
- }
+ qca_power_shutdown(hu);
kfree_skb(qca->rx_skb);
@@ -900,6 +947,125 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
+static void qca_controller_memdump(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ctrl_memdump_evt);
+ struct hci_uart *hu = qca->hu;
+ struct sk_buff *skb;
+ struct qca_memdump_event_hdr *cmd_hdr;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ struct qca_dump_size *dump;
+ char *memdump_buf;
+ char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
+ u16 seq_no;
+ u32 dump_size;
+
+ while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
+
+ if (!qca_memdump) {
+ qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
+ GFP_ATOMIC);
+ if (!qca_memdump)
+ return;
+
+ qca->qca_memdump = qca_memdump;
+ }
+
+ qca->memdump_state = QCA_MEMDUMP_COLLECTING;
+ cmd_hdr = (void *) skb->data;
+ seq_no = __le16_to_cpu(cmd_hdr->seq_no);
+ skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
+
+ if (!seq_no) {
+
+ /* This is the first frame of memdump packet from
+ * the controller, Disable IBS to recevie dump
+ * with out any interruption, ideally time required for
+ * the controller to send the dump is 8 seconds. let us
+ * start timer to handle this asynchronous activity.
+ */
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ dump = (void *) skb->data;
+ dump_size = __le32_to_cpu(dump->dump_size);
+ if (!(dump_size)) {
+ bt_dev_err(hu->hdev, "Rx invalid memdump size");
+ kfree_skb(skb);
+ return;
+ }
+
+ bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
+ dump_size);
+ mod_timer(&qca->memdump_timer, (jiffies +
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)));
+
+ skb_pull(skb, sizeof(dump_size));
+ memdump_buf = vmalloc(dump_size);
+ qca_memdump->memdump_buf_head = memdump_buf;
+ qca_memdump->memdump_buf_tail = memdump_buf;
+ }
+
+ memdump_buf = qca_memdump->memdump_buf_tail;
+
+ /* If sequence no 0 is missed then there is no point in
+ * accepting the other sequences.
+ */
+ if (!memdump_buf) {
+ bt_dev_err(hu->hdev, "QCA: Discarding other packets");
+ kfree(qca_memdump);
+ kfree_skb(skb);
+ qca->qca_memdump = NULL;
+ return;
+ }
+
+ /* There could be chance of missing some packets from
+ * the controller. In such cases let us store the dummy
+ * packets in the buffer.
+ */
+ while ((seq_no > qca_memdump->current_seq_no + 1) &&
+ seq_no != QCA_LAST_SEQUENCE_NUM) {
+ bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
+ qca_memdump->current_seq_no);
+ memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
+ memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
+ qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
+ qca_memdump->current_seq_no++;
+ }
+
+ memcpy(memdump_buf, (unsigned char *) skb->data, skb->len);
+ memdump_buf = memdump_buf + skb->len;
+ qca_memdump->memdump_buf_tail = memdump_buf;
+ qca_memdump->current_seq_no = seq_no + 1;
+ qca_memdump->received_dump += skb->len;
+ qca->qca_memdump = qca_memdump;
+ kfree_skb(skb);
+ if (seq_no == QCA_LAST_SEQUENCE_NUM) {
+ bt_dev_info(hu->hdev, "QCA writing crash dump of size %d bytes",
+ qca_memdump->received_dump);
+ memdump_buf = qca_memdump->memdump_buf_head;
+ dev_coredumpv(&hu->serdev->dev, memdump_buf,
+ qca_memdump->received_dump, GFP_KERNEL);
+ del_timer(&qca->memdump_timer);
+ kfree(qca->qca_memdump);
+ qca->qca_memdump = NULL;
+ qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+ }
+ }
+
+}
+
+int qca_controller_memdump_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ skb_queue_tail(&qca->rx_memdump_q, skb);
+ queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
+
+ return 0;
+}
+
static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
@@ -925,6 +1091,14 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+ /* We receive chip memory dump as an event packet, With a dedicated
+ * handler followed by a hardware error event. When this event is
+ * received we store dump into a file before closing hci. This
+ * dump will help in triaging the issues.
+ */
+ if ((skb->data[0] == HCI_VENDOR_PKT) &&
+ (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
+ return qca_controller_memdump_event(hdev, skb);
return hci_recv_frame(hdev, skb);
}
@@ -1203,6 +1377,91 @@ error:
return ret;
}
+static int qca_send_crashbuffer(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
+ if (!skb) {
+ bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
+ return -ENOMEM;
+ }
+
+ /* We forcefully crash the controller, by sending 0xfb byte for
+ * 1024 times. We also might have chance of losing data, To be
+ * on safer side we send 1096 bytes to the SoC.
+ */
+ memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
+ QCA_CRASHBYTE_PACKET_LEN);
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+ bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
+ skb_queue_tail(&qca->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ return 0;
+}
+
+static void qca_wait_for_dump_collection(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ char *memdump_buf = NULL;
+
+ wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
+ TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
+
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ bt_dev_err(hu->hdev, "Clearing the buffers due to timeout");
+ if (qca_memdump)
+ memdump_buf = qca_memdump->memdump_buf_tail;
+ vfree(memdump_buf);
+ kfree(qca_memdump);
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ del_timer(&qca->memdump_timer);
+ cancel_work_sync(&qca->ctrl_memdump_evt);
+ }
+}
+
+static void qca_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
+
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ /* If hardware error event received for other than QCA
+ * soc memory dump event, then we need to crash the SOC
+ * and wait here for 8 seconds to get the dump packets.
+ * This will block main thread to be on hold until we
+ * collect dump.
+ */
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ qca_send_crashbuffer(hu);
+ qca_wait_for_dump_collection(hdev);
+ } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
+ /* Let us wait here until memory dump collected or
+ * memory dump timer expired.
+ */
+ bt_dev_info(hdev, "waiting for dump to complete");
+ qca_wait_for_dump_collection(hdev);
+ }
+}
+
+static void qca_cmd_timeout(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE)
+ qca_send_crashbuffer(hu);
+ else
+ bt_dev_info(hdev, "Dump collection is in process");
+}
+
static int qca_wcn3990_init(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1253,11 +1512,37 @@ static int qca_wcn3990_init(struct hci_uart *hu)
return 0;
}
+static int qca_power_on(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
+ struct qca_serdev *qcadev;
+ int ret = 0;
+
+ /* Non-serdev device usually is powered by external power
+ * and don't need additional action in driver for power on
+ */
+ if (!hu->serdev)
+ return 0;
+
+ if (qca_is_wcn399x(soc_type)) {
+ ret = qca_wcn3990_init(hu);
+ } else {
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ /* Controller needs time to bootup. */
+ msleep(150);
+ }
+
+ return ret;
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+ unsigned int retries = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
const char *firmware_name = qca_get_firmware_name(hu);
int ret;
@@ -1275,24 +1560,21 @@ static int qca_setup(struct hci_uart *hu)
*/
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- if (qca_is_wcn399x(soc_type)) {
- bt_dev_info(hdev, "setting up wcn3990");
+ bt_dev_info(hdev, "setting up %s",
+ qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME");
- /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
- * setup for every hci up.
- */
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+retry:
+ ret = qca_power_on(hdev);
+ if (ret)
+ return ret;
+
+ if (qca_is_wcn399x(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- hu->hdev->shutdown = qca_power_off;
- ret = qca_wcn3990_init(hu);
- if (ret)
- return ret;
ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
} else {
- bt_dev_info(hdev, "ROME setup");
qca_set_speed(hu, QCA_INIT_SPEED);
}
@@ -1320,6 +1602,8 @@ static int qca_setup(struct hci_uart *hu)
if (!ret) {
set_bit(QCA_IBS_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
+ hu->hdev->hw_error = qca_hw_error;
+ hu->hdev->cmd_timeout = qca_cmd_timeout;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
ret = 0;
@@ -1329,6 +1613,20 @@ static int qca_setup(struct hci_uart *hu)
* patch/nvm-config is found, so run with original fw/config.
*/
ret = 0;
+ } else {
+ if (retries < MAX_INIT_RETRIES) {
+ qca_power_shutdown(hu);
+ if (hu->serdev) {
+ serdev_device_close(hu->serdev);
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hdev, "failed to open port");
+ return ret;
+ }
+ }
+ retries++;
+ goto retry;
+ }
}
/* Setup bdaddr */
@@ -1393,6 +1691,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
unsigned long flags;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
qcadev = serdev_device_get_drvdata(hu->serdev);
@@ -1405,20 +1704,36 @@ static void qca_power_shutdown(struct hci_uart *hu)
qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
- host_set_baudrate(hu, 2400);
- qca_send_power_pulse(hu, false);
- qca_regulator_disable(qcadev);
+ hu->hdev->hw_error = NULL;
+ hu->hdev->cmd_timeout = NULL;
+
+ /* Non-serdev device usually is powered by external power
+ * and don't need additional action in driver for power down
+ */
+ if (!hu->serdev)
+ return;
+
+ if (qca_is_wcn399x(soc_type)) {
+ host_set_baudrate(hu, 2400);
+ qca_send_power_pulse(hu, false);
+ qca_regulator_disable(qcadev);
+ } else {
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ }
}
static int qca_power_off(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
- /* Perform pre shutdown command */
- qca_send_pre_shutdown_cmd(hdev);
-
- usleep_range(8000, 10000);
+ /* Stop sending shutdown command if soc crashes. */
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ qca_send_pre_shutdown_cmd(hdev);
+ usleep_range(8000, 10000);
+ }
+ qca->memdump_state = QCA_MEMDUMP_IDLE;
qca_power_shutdown(hu);
return 0;
}
@@ -1493,6 +1808,7 @@ static int qca_init_regulators(struct qca_power *qca,
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
+ struct hci_dev *hdev;
const struct qca_vreg_data *data;
int err;
@@ -1501,7 +1817,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->serdev_hu.serdev = serdev;
- data = of_device_get_match_data(&serdev->dev);
+ data = device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
device_property_read_string(&serdev->dev, "firmware-name",
&qcadev->firmware_name);
@@ -1518,7 +1834,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
data->num_vregs);
if (err) {
BT_ERR("Failed to init regulators:%d", err);
- goto out;
+ return err;
}
qcadev->bt_power->vregs_on = false;
@@ -1531,7 +1847,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("wcn3990 serdev registration failed");
- goto out;
+ return err;
}
} else {
qcadev->btsoc_type = QCA_ROME;
@@ -1557,12 +1873,18 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return err;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err)
+ if (err) {
+ BT_ERR("Rome serdev registration failed");
clk_disable_unprepare(qcadev->susclk);
+ return err;
+ }
}
-out: return err;
+ hdev = qcadev->serdev_hu.hdev;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hdev->shutdown = qca_power_off;
+ return 0;
}
static void qca_serdev_remove(struct serdev_device *serdev)
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 6ab631101019..4e039d7a16f8 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -143,6 +143,13 @@ struct h4_recv_pkt {
.lsize = 1, \
.maxlen = HCI_MAX_EVENT_SIZE
+#define H4_RECV_ISO \
+ .type = HCI_ISODATA_PKT, \
+ .hlen = HCI_ISO_HDR_SIZE, \
+ .loff = 2, \
+ .lsize = 2, \
+ .maxlen = HCI_MAX_FRAME_SIZE \
+
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 65e41c1d760f..8ab26dec5f6e 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -178,6 +178,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
case HCI_EVENT_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
if (!data->hdev) {
kfree_skb(skb);
return -ENODEV;
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index a07cc19becdb..c78d10ea641f 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -715,9 +715,9 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
{
struct fsl_mc_device *mc_bus_dev, *endpoint;
- struct fsl_mc_obj_desc endpoint_desc = { 0 };
- struct dprc_endpoint endpoint1 = { 0 };
- struct dprc_endpoint endpoint2 = { 0 };
+ struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
+ struct dprc_endpoint endpoint1 = {{ 0 }};
+ struct dprc_endpoint endpoint2 = {{ 0 }};
int state, err;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index d9629fc13a15..6ae48ad80409 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -97,12 +97,12 @@ int __must_check fsl_create_mc_io(struct device *dev,
return -EBUSY;
}
- mc_portal_virt_addr = devm_ioremap_nocache(dev,
+ mc_portal_virt_addr = devm_ioremap(dev,
mc_portal_phys_addr,
mc_portal_size);
if (!mc_portal_virt_addr) {
dev_err(dev,
- "devm_ioremap_nocache failed for MC portal %pa\n",
+ "devm_ioremap failed for MC portal %pa\n",
&mc_portal_phys_addr);
return -ENXIO;
}
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f4d1597df0a2..ccb44fe790a7 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata)
return -EINVAL;
}
+ /* Always add a slot for main clocks fck and ick even if unused */
+ if (!nr_fck)
+ ddata->nr_clocks++;
+ if (!nr_ick)
+ ddata->nr_clocks++;
+
ddata->clocks = devm_kcalloc(ddata->dev,
ddata->nr_clocks, sizeof(*ddata->clocks),
GFP_KERNEL);
@@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata)
struct clk *clock;
int i, error;
- if (!ddata->clocks)
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return 0;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
@@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
struct clk *clock;
int i;
- if (!ddata->clocks)
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index eebdcbef0578..faca0f346fff 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3017,9 +3017,31 @@ static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
struct cdrom_read_audio ra;
int lba;
- if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg,
- sizeof(ra)))
- return -EFAULT;
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_cdrom_read_audio {
+ union cdrom_addr addr;
+ u8 addr_format;
+ compat_int_t nframes;
+ compat_caddr_t buf;
+ } ra32;
+
+ if (copy_from_user(&ra32, arg, sizeof(ra32)))
+ return -EFAULT;
+
+ ra = (struct cdrom_read_audio) {
+ .addr = ra32.addr,
+ .addr_format = ra32.addr_format,
+ .nframes = ra32.nframes,
+ .buf = compat_ptr(ra32.buf),
+ };
+ } else
+#endif
+ {
+ if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg,
+ sizeof(ra)))
+ return -EFAULT;
+ }
if (ra.addr_format == CDROM_MSF)
lba = msf_to_lba(ra.addr.msf.minute,
@@ -3271,9 +3293,10 @@ static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
ret = cdrom_get_last_written(cdi, &last);
if (ret)
return ret;
- if (copy_to_user((long __user *)arg, &last, sizeof(last)))
- return -EFAULT;
- return 0;
+ if (in_compat_syscall())
+ return put_user(last, (__s32 __user *)arg);
+
+ return put_user(last, (long __user *)arg);
}
static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 5b21dc421c94..886b2638c730 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -518,6 +518,9 @@ static const struct block_device_operations gdrom_bdops = {
.release = gdrom_bdops_release,
.check_events = gdrom_bdops_check_events,
.ioctl = gdrom_bdops_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = blkdev_compat_ptr_ioctl,
+#endif
};
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index ab154a75acf0..9e84239f88d4 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -941,7 +941,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_table = (u32 __iomem *)table;
#else
- bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
+ bridge->gatt_table = ioremap(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c6271ce250b3..66a62d17a3f5 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1087,7 +1087,7 @@ static void intel_i9xx_setup_flush(void)
}
if (intel_private.ifp_resource.start)
- intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
if (!intel_private.i9xx_flush_page)
dev_err(&intel_private.pcidev->dev,
"can't ioremap flush page - no chipset flushing\n");
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index eb108b3c619a..51121a4b82c7 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -204,7 +204,7 @@ static int __init applicom_init(void)
if (pci_enable_device(dev))
return -EIO;
- RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO);
+ RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
if (!RamIO) {
printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
@@ -259,7 +259,7 @@ static int __init applicom_init(void)
/* Now try the specified ISA cards */
for (i = 0; i < MAX_ISA_BOARD; i++) {
- RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
+ RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
if (!RamIO) {
printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9ac6671bb514..aed2c45f7968 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -110,7 +110,7 @@ struct hpets {
unsigned long hp_delta;
unsigned int hp_ntimer;
unsigned int hp_which;
- struct hpet_dev hp_dev[1];
+ struct hpet_dev hp_dev[];
};
static struct hpets *hpets;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8486c29d8324..914e293ba62b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -90,7 +90,7 @@ config HW_RANDOM_BCM2835
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB
default HW_RANDOM
---help---
This driver provides kernel-side support for the RNG200
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 290c880266bf..9f205bd1acc0 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -317,7 +317,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
return -EBUSY;
}
- intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
+ intel_rng_hw->mem = ioremap(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
if (intel_rng_hw->mem == NULL)
return -EBUSY;
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 899ff25f4f28..32d9fe61a225 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -213,6 +213,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm2711-rng200", },
{ .compatible = "brcm,bcm7211-rng200", },
{ .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 8c78aa090492..7be8067ac4e8 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -81,13 +81,13 @@ static int octeon_rng_probe(struct platform_device *pdev)
return -ENOENT;
- rng->control_status = devm_ioremap_nocache(&pdev->dev,
+ rng->control_status = devm_ioremap(&pdev->dev,
res_ports->start,
sizeof(u64));
if (!rng->control_status)
return -ENOENT;
- rng->result = devm_ioremap_nocache(&pdev->dev,
+ rng->result = devm_ioremap(&pdev->dev,
res_result->start,
sizeof(u64));
if (!rng->result)
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 82f9a6a814ae..e342daa73d1b 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4169,7 +4169,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
MGSLPC_INFO *info = dev_to_port(dev);
unsigned long flags;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 3b53b3e5ec3e..d52bf4df0bca 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(timeouts);
-static struct attribute *tpm_dev_attrs[] = {
+static ssize_t tpm_version_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2
+ ? "2" : "1");
+}
+static DEVICE_ATTR_RO(tpm_version_major);
+
+static struct attribute *tpm1_dev_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_pcrs.attr,
&dev_attr_enabled.attr,
@@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = {
&dev_attr_cancel.attr,
&dev_attr_durations.attr,
&dev_attr_timeouts.attr,
+ &dev_attr_tpm_version_major.attr,
NULL,
};
-static const struct attribute_group tpm_dev_group = {
- .attrs = tpm_dev_attrs,
+static struct attribute *tpm2_dev_attrs[] = {
+ &dev_attr_tpm_version_major.attr,
+ NULL
+};
+
+static const struct attribute_group tpm1_dev_group = {
+ .attrs = tpm1_dev_attrs,
+};
+
+static const struct attribute_group tpm2_dev_group = {
+ .attrs = tpm2_dev_attrs,
};
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return;
-
WARN_ON(chip->groups_cnt != 0);
- chip->groups[chip->groups_cnt++] = &tpm_dev_group;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
+ else
+ chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
}
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 4f24e46ebe7c..56db949a7b70 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -15,10 +15,11 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
struct ttyprintk_port {
struct tty_port port;
- struct mutex port_write_mutex;
+ spinlock_t spinlock;
};
static struct ttyprintk_port tpk_port;
@@ -99,11 +100,12 @@ static int tpk_open(struct tty_struct *tty, struct file *filp)
static void tpk_close(struct tty_struct *tty, struct file *filp)
{
struct ttyprintk_port *tpkp = tty->driver_data;
+ unsigned long flags;
- mutex_lock(&tpkp->port_write_mutex);
+ spin_lock_irqsave(&tpkp->spinlock, flags);
/* flush tpk_printk buffer */
tpk_printk(NULL, 0);
- mutex_unlock(&tpkp->port_write_mutex);
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
tty_port_close(&tpkp->port, tty, filp);
}
@@ -115,13 +117,14 @@ static int tpk_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct ttyprintk_port *tpkp = tty->driver_data;
+ unsigned long flags;
int ret;
/* exclusive use of tpk_printk within this tty */
- mutex_lock(&tpkp->port_write_mutex);
+ spin_lock_irqsave(&tpkp->spinlock, flags);
ret = tpk_printk(buf, count);
- mutex_unlock(&tpkp->port_write_mutex);
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
return ret;
}
@@ -171,7 +174,7 @@ static int __init ttyprintk_init(void)
{
int ret = -ENOMEM;
- mutex_init(&tpk_port.port_write_mutex);
+ spin_lock_init(&tpk_port.spinlock);
ttyprintk_driver = tty_alloc_driver(1,
TTY_DRIVER_RESET_TERMIOS |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 6a11239ccde3..772258de2d1f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3426,11 +3426,17 @@ static int __clk_core_init(struct clk_core *core)
if (core->flags & CLK_IS_CRITICAL) {
unsigned long flags;
- clk_core_prepare(core);
+ ret = clk_core_prepare(core);
+ if (ret)
+ goto out;
flags = clk_enable_lock();
- clk_core_enable(core);
+ ret = clk_core_enable(core);
clk_enable_unlock(flags);
+ if (ret) {
+ clk_core_unprepare(core);
+ goto out;
+ }
}
clk_core_reparent_orphans_nolock();
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index a60a1be937ad..b4a95cbbda98 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock);
static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
+static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
static DEFINE_SPINLOCK(reset_lock);
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index f7b370f3acef..f6ce888098be 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
@@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
@@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
@@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
@@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
@@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
@@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct clk_regmap *gcc_sdm845_clocks[] = {
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index fbc34beafc78..7b703f14e20b 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -37,8 +37,8 @@ static u16 __init rz_cpg_read_mode_pins(void)
void __iomem *ppr0, *pibc0;
u16 modes;
- ppr0 = ioremap_nocache(PPR0, 2);
- pibc0 = ioremap_nocache(PIBC0, 2);
+ ppr0 = ioremap(PPR0, 2);
+ pibc0 = ioremap(PIBC0, 2);
BUG_ON(!ppr0 || !pibc0);
iowrite16(4, pibc0); /* enable input buffer */
modes = ioread16(ppr0);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 3a991ca1ee36..c9e5a1fb6653 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include "clk.h"
#include "clk-cpu.h"
@@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5x_subcmus);
}
+ /*
+ * Keep top part of G3D clock path enabled permanently to ensure
+ * that the internal busses get their clock regardless of the
+ * main G3D clock enablement status.
+ */
+ clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
+
samsung_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index 45a1ed3fe674..50f8d1bc7046 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -23,9 +23,9 @@
*/
static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k",
- "pll-periph0", "iosc" };
+ "iosc", "pll-periph0" };
static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = {
- { .index = 2, .shift = 0, .width = 5 },
+ { .index = 3, .shift = 0, .width = 5 },
};
static struct ccu_div ar100_clk = {
@@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = {
static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0);
-static struct ccu_div r_apb1_clk = {
- .div = _SUNXI_CCU_DIV(0, 2),
-
- .common = {
- .reg = 0x00c,
- .hw.init = CLK_HW_INIT("r-apb1",
- "r-ahb",
- &ccu_div_ops,
- 0),
- },
-};
+static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0);
static struct ccu_div r_apb2_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 4646fdc61053..4c8c491b87c2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = {
static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0);
-static struct ccu_div apb0_clk = {
- .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO),
-
- .common = {
- .reg = 0x0c,
- .hw.init = CLK_HW_INIT_HW("apb0",
- &ahb0_clk.hw,
- &ccu_div_ops,
- 0),
- },
-};
-
-static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
+static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
/*
* Define the parent as an array that can be reused to save space
@@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = {
static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
&ar100_clk.common,
- &a83t_apb0_clk.common,
+ &apb0_clk.common,
&apb0_pio_clk.common,
&apb0_ir_clk.common,
&apb0_timer_clk.common,
@@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
[CLK_AHB0] = &ahb0_clk.hw,
- [CLK_APB0] = &a83t_apb0_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
[CLK_APB0_PIO] = &apb0_pio_clk.common.hw,
[CLK_APB0_IR] = &apb0_ir_clk.common.hw,
[CLK_APB0_TIMER] = &apb0_timer_clk.common.hw,
@@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
{
- /* Fix apb0 bus gate parents here */
- apb0_gate_parent[0] = &a83t_apb0_clk.common.hw;
-
sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
}
CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 897490800102..23bfe1d12f21 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = {
.reg = 0x1f0,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("outa", out_parents,
- &ccu_mp_ops, 0),
+ &ccu_mp_ops,
+ CLK_SET_RATE_PARENT),
}
};
@@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = {
.reg = 0x1f4,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("outb", out_parents,
- &ccu_mp_ops, 0),
+ &ccu_mp_ops,
+ CLK_SET_RATE_PARENT),
}
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 5c779eec454b..0e36ca3bf3d5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_PLL_DDR1 + 1,
};
static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
@@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_I2S0 + 1,
};
static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
index b0160d305a67..108eeeedcbf7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
@@ -51,6 +51,4 @@
#define CLK_PLL_DDR1 74
-#define CLK_NUMBER (CLK_I2S0 + 1)
-
#endif /* _CCU_SUN8I_H3_H_ */
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index e6bd6d1ea012..f6cdce441cf7 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
periph_banks = banks;
clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
- if (!clks)
+ if (!clks) {
kfree(periph_clk_enb_refcnt);
+ return NULL;
+ }
clk_num = num;
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index f65e16c4f3c4..8d4c08b034bd 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
cinfo->iobase = of_iomap(node, 0);
cinfo->dev = &pdev->dev;
pm_runtime_enable(cinfo->dev);
- pm_runtime_irq_safe(cinfo->dev);
pm_runtime_get_sync(cinfo->dev);
atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5fdd76cb1768..cc909e465823 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -88,7 +88,7 @@ config ROCKCHIP_TIMER
select TIMER_OF
select CLKSRC_MMIO
help
- Enables the support for the rockchip timer driver.
+ Enables the support for the Rockchip timer driver.
config ARMADA_370_XP_TIMER
bool "Armada 370 and XP timer driver" if COMPILE_TEST
@@ -162,13 +162,13 @@ config NPCM7XX_TIMER
select CLKSRC_MMIO
help
Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
- While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
+ where TIMER0 serves as clockevent and TIMER1 serves as clocksource.
config CADENCE_TTC_TIMER
bool "Cadence TTC timer driver" if COMPILE_TEST
depends on COMMON_CLK
help
- Enables support for the cadence ttc driver.
+ Enables support for the Cadence TTC driver.
config ASM9260_TIMER
bool "ASM9260 timer driver" if COMPILE_TEST
@@ -190,10 +190,10 @@ config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer" if COMPILE_TEST
depends on HAS_IOMEM
help
- Use the always on PRCMU Timer as clocksource
+ Use the always on PRCMU Timer as clocksource.
config CLPS711X_TIMER
- bool "Cirrus logic timer driver" if COMPILE_TEST
+ bool "Cirrus Logic timer driver" if COMPILE_TEST
select CLKSRC_MMIO
help
Enables support for the Cirrus Logic PS711 timer.
@@ -205,11 +205,11 @@ config ATLAS7_TIMER
Enables support for the Atlas7 timer.
config MXS_TIMER
- bool "Mxs timer driver" if COMPILE_TEST
+ bool "MXS timer driver" if COMPILE_TEST
select CLKSRC_MMIO
select STMP_DEVICE
help
- Enables support for the Mxs timer.
+ Enables support for the MXS timer.
config PRIMA2_TIMER
bool "Prima2 timer driver" if COMPILE_TEST
@@ -238,10 +238,10 @@ config KEYSTONE_TIMER
Enables support for the Keystone timer.
config INTEGRATOR_AP_TIMER
- bool "Integrator-ap timer driver" if COMPILE_TEST
+ bool "Integrator-AP timer driver" if COMPILE_TEST
select CLKSRC_MMIO
help
- Enables support for the Integrator-ap timer.
+ Enables support for the Integrator-AP timer.
config CLKSRC_EFM32
bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
@@ -283,8 +283,8 @@ config CLKSRC_NPS
select TIMER_OF if OF
help
NPS400 clocksource support.
- Got 64 bit counter with update rate up to 1000MHz.
- This counter is accessed via couple of 32 bit memory mapped registers.
+ It has a 64-bit counter with update rate up to 1000MHz.
+ This counter is accessed via couple of 32-bit memory-mapped registers.
config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
@@ -305,14 +305,14 @@ config ARC_TIMERS
help
These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
(ARC700 as well as ARC HS38).
- TIMER0 serves as clockevent while TIMER1 provides clocksource
+ TIMER0 serves as clockevent while TIMER1 provides clocksource.
config ARC_TIMERS_64BIT
bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
depends on ARC_TIMERS
select TIMER_OF
help
- This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
+ This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP).
RTC is implemented inside the core, while GFRC sits outside the core in
ARConnect IP block. Driver automatically picks one of them for clocksource
as appropriate.
@@ -390,7 +390,7 @@ config ARM_GLOBAL_TIMER
select TIMER_OF if OF
depends on ARM
help
- This options enables support for the ARM global timer unit
+ This option enables support for the ARM global timer unit.
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module" if COMPILE_TEST
@@ -403,14 +403,14 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
depends on ARM_GLOBAL_TIMER
default y
help
- Use ARM global timer clock source as sched_clock
+ Use ARM global timer clock source as sched_clock.
config ARMV7M_SYSTICK
bool "Support for the ARMv7M system time" if COMPILE_TEST
select TIMER_OF if OF
select CLKSRC_MMIO
help
- This options enables support for the ARMv7M system timer unit
+ This option enables support for the ARMv7M system timer unit.
config ATMEL_PIT
bool "Atmel PIT support" if COMPILE_TEST
@@ -460,7 +460,7 @@ config VF_PIT_TIMER
bool
select CLKSRC_MMIO
help
- Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+ Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
config OXNAS_RPS_TIMER
bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
@@ -470,7 +470,7 @@ config OXNAS_RPS_TIMER
This enables support for the Oxford Semiconductor OXNAS RPS timers.
config SYS_SUPPORTS_SH_CMT
- bool
+ bool
config MTK_TIMER
bool "Mediatek timer driver" if COMPILE_TEST
@@ -490,13 +490,13 @@ config SPRD_TIMER
Enables support for the Spreadtrum timer driver.
config SYS_SUPPORTS_SH_MTU2
- bool
+ bool
config SYS_SUPPORTS_SH_TMU
- bool
+ bool
config SYS_SUPPORTS_EM_STI
- bool
+ bool
config CLKSRC_JCORE_PIT
bool "J-Core PIT timer driver" if COMPILE_TEST
@@ -523,7 +523,7 @@ config SH_TIMER_MTU2
help
This enables build of a clockevent driver for the Multi-Function
Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
- This hardware comes with 16 bit-timer registers.
+ This hardware comes with 16-bit timer registers.
config RENESAS_OSTM
bool "Renesas OSTM timer driver" if COMPILE_TEST
@@ -580,7 +580,7 @@ config CLKSRC_TANGO_XTAL
select TIMER_OF
select CLKSRC_MMIO
help
- This enables the clocksource for Tango SoC
+ This enables the clocksource for Tango SoC.
config CLKSRC_PXA
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
@@ -591,24 +591,24 @@ config CLKSRC_PXA
platforms.
config H8300_TMR8
- bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the 8 bits timer for the H8300 platform.
config H8300_TMR16
- bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the 16 bits timer for the H8300 platform with the
- H83069 cpu.
+ H83069 CPU.
config H8300_TPU
- bool "Clocksource for the H8300 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clocksource for the H8300 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the clocksource for the H8300 platform with the
- H8S2678 cpu.
+ H8S2678 CPU.
config CLKSRC_IMX_GPT
bool "Clocksource using i.MX GPT" if COMPILE_TEST
@@ -666,8 +666,8 @@ config CSKY_MP_TIMER
help
Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP
system.
- csky,mptimer is not only used in SMP system, it also could be used
- single core system. It's not a mmio reg and it use mtcr/mfcr instruction.
+ csky,mptimer is not only used in SMP system, it also could be used in
+ single core system. It's not a mmio reg and it uses mtcr/mfcr instruction.
config GX6605S_TIMER
bool "Gx6605s SOC system timer driver" if COMPILE_TEST
@@ -697,4 +697,14 @@ config INGENIC_TIMER
help
Support for the timer/counter unit of the Ingenic JZ SoCs.
+config MICROCHIP_PIT64B
+ bool "Microchip PIT64B support"
+ depends on OF || COMPILE_TEST
+ select CLKSRC_MMIO
+ help
+ This option enables Microchip PIT64B timer for Atmel
+ based system. It supports the oneshot, the periodic
+ modes and high resolution. It is used as a clocksource
+ and a clockevent.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 4dfe4225ece7..713686faa549 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -88,3 +88,4 @@ obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
+obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 2b196cbfadb6..b235f446ee50 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = setup_irq(irq, &timer->act);
if (ret) {
pr_err("Can't set up timer IRQ\n");
- goto err_iounmap;
+ goto err_timer_free;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
@@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node)
return 0;
+err_timer_free:
+ kfree(timer);
+
err_iounmap:
iounmap(base);
return ret;
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 9039df4f90e2..ab190dffb1ed 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -279,9 +279,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
static int em_sti_probe(struct platform_device *pdev)
{
struct em_sti_priv *p;
- struct resource *res;
- int irq;
- int ret;
+ int irq, ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (p == NULL)
@@ -295,8 +293,7 @@ static int em_sti_probe(struct platform_device *pdev)
return irq;
/* map memory, let base point to the STI instance */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->base = devm_ioremap_resource(&pdev->dev, res);
+ p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base))
return PTR_ERR(p->base);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 74cb299f5089..a267fe31ef13 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -4,7 +4,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 MCT(Multi-Core Timer) support
+ * Exynos4 MCT(Multi-Core Timer) support
*/
#include <linux/interrupt.h>
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 287d8d58c21a..9d808d595ca8 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -66,7 +66,7 @@ static int hv_ce_set_next_event(unsigned long delta,
{
u64 current_tick;
- current_tick = hyperv_cs->read(NULL);
+ current_tick = hv_read_reference_counter();
current_tick += delta;
hv_init_timer(0, current_tick);
return 0;
@@ -302,22 +302,33 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
* the other that uses the TSC reference page feature as defined in the
* TLFS. The MSR version is for compatibility with old versions of
* Hyper-V and 32-bit x86. The TSC reference page version is preferred.
+ *
+ * The Hyper-V clocksource ratings of 250 are chosen to be below the
+ * TSC clocksource rating of 300. In configurations where Hyper-V offers
+ * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource
+ * is available and preferred. With the higher rating, it will be the
+ * default. On older hardware and Hyper-V versions, the TSC is marked
+ * "unstable", so no TSC clocksource is created and the selected Hyper-V
+ * clocksource will be the default.
*/
-struct clocksource *hyperv_cs;
-EXPORT_SYMBOL_GPL(hyperv_cs);
+u64 (*hv_read_reference_counter)(void);
+EXPORT_SYMBOL_GPL(hv_read_reference_counter);
-static struct ms_hyperv_tsc_page tsc_pg __aligned(PAGE_SIZE);
+static union {
+ struct ms_hyperv_tsc_page page;
+ u8 reserved[PAGE_SIZE];
+} tsc_pg __aligned(PAGE_SIZE);
struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
- return &tsc_pg;
+ return &tsc_pg.page;
}
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
-static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
+static u64 notrace read_hv_clock_tsc(void)
{
- u64 current_tick = hv_read_tsc_page(&tsc_pg);
+ u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
if (current_tick == U64_MAX)
hv_get_time_ref_count(current_tick);
@@ -325,20 +336,50 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
return current_tick;
}
+static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
+{
+ return read_hv_clock_tsc();
+}
+
static u64 read_hv_sched_clock_tsc(void)
{
- return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
+ return read_hv_clock_tsc() - hv_sched_clock_offset;
+}
+
+static void suspend_hv_clock_tsc(struct clocksource *arg)
+{
+ u64 tsc_msr;
+
+ /* Disable the TSC page */
+ hv_get_reference_tsc(tsc_msr);
+ tsc_msr &= ~BIT_ULL(0);
+ hv_set_reference_tsc(tsc_msr);
+}
+
+
+static void resume_hv_clock_tsc(struct clocksource *arg)
+{
+ phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
+ u64 tsc_msr;
+
+ /* Re-enable the TSC page */
+ hv_get_reference_tsc(tsc_msr);
+ tsc_msr &= GENMASK_ULL(11, 0);
+ tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
+ hv_set_reference_tsc(tsc_msr);
}
static struct clocksource hyperv_cs_tsc = {
.name = "hyperv_clocksource_tsc_page",
- .rating = 400,
- .read = read_hv_clock_tsc,
+ .rating = 250,
+ .read = read_hv_clock_tsc_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .suspend= suspend_hv_clock_tsc,
+ .resume = resume_hv_clock_tsc,
};
-static u64 notrace read_hv_clock_msr(struct clocksource *arg)
+static u64 notrace read_hv_clock_msr(void)
{
u64 current_tick;
/*
@@ -350,15 +391,20 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg)
return current_tick;
}
+static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
+{
+ return read_hv_clock_msr();
+}
+
static u64 read_hv_sched_clock_msr(void)
{
- return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
+ return read_hv_clock_msr() - hv_sched_clock_offset;
}
static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr",
- .rating = 400,
- .read = read_hv_clock_msr,
+ .rating = 250,
+ .read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -371,8 +417,8 @@ static bool __init hv_init_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return false;
- hyperv_cs = &hyperv_cs_tsc;
- phys_addr = virt_to_phys(&tsc_pg);
+ hv_read_reference_counter = read_hv_clock_tsc;
+ phys_addr = virt_to_phys(hv_get_tsc_page());
/*
* The Hyper-V TLFS specifies to preserve the value of reserved
@@ -389,7 +435,7 @@ static bool __init hv_init_tsc_clocksource(void)
hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
- hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_tsc);
return true;
@@ -411,10 +457,10 @@ void __init hv_init_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
return;
- hyperv_cs = &hyperv_cs_msr;
+ hv_read_reference_counter = read_hv_clock_msr;
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
- hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
}
EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 9cde50cb3220..12ac75f7571f 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -905,7 +905,7 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
return -ENXIO;
}
- cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
+ cmt->mapbase = ioremap(mem->start, resource_size(mem));
if (cmt->mapbase == NULL) {
dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
return -ENXIO;
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 64526e50d471..bfccb31e94ad 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -377,7 +377,7 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
return -ENXIO;
}
- mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
+ mtu->mapbase = ioremap(res->start, resource_size(res));
if (mtu->mapbase == NULL)
return -ENXIO;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index d49690d15536..d41df9ba3725 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -486,7 +486,7 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
return -ENXIO;
}
- tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
+ tmu->mapbase = ioremap(res->start, resource_size(res));
if (tmu->mapbase == NULL)
return -ENXIO;
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 88fe2e9ba9a3..38858e141731 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -15,6 +15,8 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
/*
* This driver configures the 2 16/32-bit count-up timers as follows:
@@ -464,13 +466,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
return 0;
}
-/**
- * ttc_timer_init - Initialize the timer
- *
- * Initializes the timer hardware and register the clock source and clock event
- * timers with Linux kernal timer framework
- */
-static int __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_probe(struct platform_device *pdev)
{
unsigned int irq;
void __iomem *timer_baseaddr;
@@ -478,6 +474,7 @@ static int __init ttc_timer_init(struct device_node *timer)
static int initialized;
int clksel, ret;
u32 timer_width = 16;
+ struct device_node *timer = pdev->dev.of_node;
if (initialized)
return 0;
@@ -532,4 +529,17 @@ static int __init ttc_timer_init(struct device_node *timer)
return 0;
}
-TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
+static const struct of_device_id ttc_timer_of_match[] = {
+ {.compatible = "cdns,ttc"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ttc_timer_of_match);
+
+static struct platform_driver ttc_timer_driver = {
+ .driver = {
+ .name = "cdns_ttc_timer",
+ .of_match_table = ttc_timer_of_match,
+ },
+};
+builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
new file mode 100644
index 000000000000..bd63d3484838
--- /dev/null
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 64-bit Periodic Interval Timer driver
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#define MCHP_PIT64B_CR 0x00 /* Control Register */
+#define MCHP_PIT64B_CR_START BIT(0)
+#define MCHP_PIT64B_CR_SWRST BIT(8)
+
+#define MCHP_PIT64B_MR 0x04 /* Mode Register */
+#define MCHP_PIT64B_MR_CONT BIT(0)
+#define MCHP_PIT64B_MR_ONE_SHOT (0)
+#define MCHP_PIT64B_MR_SGCLK BIT(3)
+#define MCHP_PIT64B_MR_PRES GENMASK(11, 8)
+
+#define MCHP_PIT64B_LSB_PR 0x08 /* LSB Period Register */
+
+#define MCHP_PIT64B_MSB_PR 0x0C /* MSB Period Register */
+
+#define MCHP_PIT64B_IER 0x10 /* Interrupt Enable Register */
+#define MCHP_PIT64B_IER_PERIOD BIT(0)
+
+#define MCHP_PIT64B_ISR 0x1C /* Interrupt Status Register */
+
+#define MCHP_PIT64B_TLSBR 0x20 /* Timer LSB Register */
+
+#define MCHP_PIT64B_TMSBR 0x24 /* Timer MSB Register */
+
+#define MCHP_PIT64B_PRES_MAX 0x10
+#define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0)
+#define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8))
+#define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8)
+#define MCHP_PIT64B_DEF_CS_FREQ 5000000UL /* 5 MHz */
+#define MCHP_PIT64B_DEF_CE_FREQ 32768 /* 32 KHz */
+
+#define MCHP_PIT64B_NAME "pit64b"
+
+/**
+ * struct mchp_pit64b_timer - PIT64B timer data structure
+ * @base: base address of PIT64B hardware block
+ * @pclk: PIT64B's peripheral clock
+ * @gclk: PIT64B's generic clock
+ * @mode: precomputed value for mode register
+ */
+struct mchp_pit64b_timer {
+ void __iomem *base;
+ struct clk *pclk;
+ struct clk *gclk;
+ u32 mode;
+};
+
+/**
+ * mchp_pit64b_clkevt - PIT64B clockevent data structure
+ * @timer: PIT64B timer
+ * @clkevt: clockevent
+ */
+struct mchp_pit64b_clkevt {
+ struct mchp_pit64b_timer timer;
+ struct clock_event_device clkevt;
+};
+
+#define to_mchp_pit64b_timer(x) \
+ ((struct mchp_pit64b_timer *)container_of(x,\
+ struct mchp_pit64b_clkevt, clkevt))
+
+/* Base address for clocksource timer. */
+static void __iomem *mchp_pit64b_cs_base;
+/* Default cycles for clockevent timer. */
+static u64 mchp_pit64b_ce_cycles;
+
+static inline u64 mchp_pit64b_cnt_read(void __iomem *base)
+{
+ unsigned long flags;
+ u32 low, high;
+
+ raw_local_irq_save(flags);
+
+ /*
+ * When using a 64 bit period TLSB must be read first, followed by the
+ * read of TMSB. This sequence generates an atomic read of the 64 bit
+ * timer value whatever the lapse of time between the accesses.
+ */
+ low = readl_relaxed(base + MCHP_PIT64B_TLSBR);
+ high = readl_relaxed(base + MCHP_PIT64B_TMSBR);
+
+ raw_local_irq_restore(flags);
+
+ return (((u64)high << 32) | low);
+}
+
+static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer,
+ u64 cycles, u32 mode, u32 irqs)
+{
+ u32 low, high;
+
+ low = cycles & MCHP_PIT64B_LSBMASK;
+ high = cycles >> 32;
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ writel_relaxed(mode | timer->mode, timer->base + MCHP_PIT64B_MR);
+ writel_relaxed(high, timer->base + MCHP_PIT64B_MSB_PR);
+ writel_relaxed(low, timer->base + MCHP_PIT64B_LSB_PR);
+ writel_relaxed(irqs, timer->base + MCHP_PIT64B_IER);
+ writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR);
+}
+
+static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
+{
+ return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
+static u64 mchp_pit64b_sched_read_clk(void)
+{
+ return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
+static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+
+ return 0;
+}
+
+static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT,
+ MCHP_PIT64B_IER_PERIOD);
+
+ return 0;
+}
+
+static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
+ struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
+ MCHP_PIT64B_IER_PERIOD);
+
+ return 0;
+}
+
+static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_disable_unprepare(timer->gclk);
+ clk_disable_unprepare(timer->pclk);
+}
+
+static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ clk_prepare_enable(timer->pclk);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_prepare_enable(timer->gclk);
+}
+
+static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id)
+{
+ struct mchp_pit64b_clkevt *irq_data = dev_id;
+
+ /* Need to clear the interrupt. */
+ readl_relaxed(irq_data->timer.base + MCHP_PIT64B_ISR);
+
+ irq_data->clkevt.event_handler(&irq_data->clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
+ u32 max_rate)
+{
+ u32 tmp;
+
+ for (*pres = 0; *pres < MCHP_PIT64B_PRES_MAX; (*pres)++) {
+ tmp = clk_rate / (*pres + 1);
+ if (tmp <= max_rate)
+ break;
+ }
+
+ /* Use the bigest prescaler if we didn't match one. */
+ if (*pres == MCHP_PIT64B_PRES_MAX)
+ *pres = MCHP_PIT64B_PRES_MAX - 1;
+}
+
+/**
+ * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at
+ * runtime; this includes prescaler and SGCLK bit
+ *
+ * PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to
+ * be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate
+ * could be changed via clock APIs. The chosen clock (pclk or gclk) could be
+ * divided by the internal PIT64B's divider.
+ *
+ * This function, first tries to use GCLK by requesting the desired rate from
+ * PMC and then using the internal PIT64B prescaler, if any, to reach the
+ * requested rate. If PCLK/GCLK < 3 (condition requested by PIT64B hardware)
+ * then the function falls back on using PCLK as clock source for PIT64B timer
+ * choosing the highest prescaler in case it doesn't locate one to match the
+ * requested frequency.
+ *
+ * Below is presented the PIT64B block in relation with PMC:
+ *
+ * PIT64B
+ * PMC +------------------------------------+
+ * +----+ | +-----+ |
+ * | |-->gclk -->|-->| | +---------+ +-----+ |
+ * | | | | MUX |--->| Divider |->|timer| |
+ * | |-->pclk -->|-->| | +---------+ +-----+ |
+ * +----+ | +-----+ |
+ * | ^ |
+ * | sel |
+ * +------------------------------------+
+ *
+ * Where:
+ * - gclk rate <= pclk rate/3
+ * - gclk rate could be requested from PMC
+ * - pclk rate is fixed (cannot be requested from PMC)
+ */
+static int __init mchp_pit64b_init_mode(struct mchp_pit64b_timer *timer,
+ unsigned long max_rate)
+{
+ unsigned long pclk_rate, diff = 0, best_diff = ULONG_MAX;
+ long gclk_round = 0;
+ u32 pres, best_pres = 0;
+
+ pclk_rate = clk_get_rate(timer->pclk);
+ if (!pclk_rate)
+ return -EINVAL;
+
+ timer->mode = 0;
+
+ /* Try using GCLK. */
+ gclk_round = clk_round_rate(timer->gclk, max_rate);
+ if (gclk_round < 0)
+ goto pclk;
+
+ if (pclk_rate / gclk_round < 3)
+ goto pclk;
+
+ mchp_pit64b_pres_compute(&pres, gclk_round, max_rate);
+ best_diff = abs(gclk_round / (pres + 1) - max_rate);
+ best_pres = pres;
+
+ if (!best_diff) {
+ timer->mode |= MCHP_PIT64B_MR_SGCLK;
+ goto done;
+ }
+
+pclk:
+ /* Check if requested rate could be obtained using PCLK. */
+ mchp_pit64b_pres_compute(&pres, pclk_rate, max_rate);
+ diff = abs(pclk_rate / (pres + 1) - max_rate);
+
+ if (best_diff > diff) {
+ /* Use PCLK. */
+ best_pres = pres;
+ } else {
+ /* Use GCLK. */
+ timer->mode |= MCHP_PIT64B_MR_SGCLK;
+ clk_set_rate(timer->gclk, gclk_round);
+ }
+
+done:
+ timer->mode |= MCHP_PIT64B_PRES_TO_MODE(best_pres);
+
+ pr_info("PIT64B: using clk=%s with prescaler %u, freq=%lu [Hz]\n",
+ timer->mode & MCHP_PIT64B_MR_SGCLK ? "gclk" : "pclk", best_pres,
+ timer->mode & MCHP_PIT64B_MR_SGCLK ?
+ gclk_round / (best_pres + 1) : pclk_rate / (best_pres + 1));
+
+ return 0;
+}
+
+static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
+ u32 clk_rate)
+{
+ int ret;
+
+ mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
+
+ mchp_pit64b_cs_base = timer->base;
+
+ ret = clocksource_mmio_init(timer->base, MCHP_PIT64B_NAME, clk_rate,
+ 210, 64, mchp_pit64b_clksrc_read);
+ if (ret) {
+ pr_debug("clksrc: Failed to register PIT64B clocksource!\n");
+
+ /* Stop timer. */
+ writel_relaxed(MCHP_PIT64B_CR_SWRST,
+ timer->base + MCHP_PIT64B_CR);
+
+ return ret;
+ }
+
+ sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate);
+
+ return 0;
+}
+
+static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer,
+ u32 clk_rate, u32 irq)
+{
+ struct mchp_pit64b_clkevt *ce;
+ int ret;
+
+ ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ mchp_pit64b_ce_cycles = DIV_ROUND_CLOSEST(clk_rate, HZ);
+
+ ce->timer.base = timer->base;
+ ce->timer.pclk = timer->pclk;
+ ce->timer.gclk = timer->gclk;
+ ce->timer.mode = timer->mode;
+ ce->clkevt.name = MCHP_PIT64B_NAME;
+ ce->clkevt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
+ ce->clkevt.rating = 150;
+ ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown;
+ ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic;
+ ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event;
+ ce->clkevt.suspend = mchp_pit64b_clkevt_suspend;
+ ce->clkevt.resume = mchp_pit64b_clkevt_resume;
+ ce->clkevt.cpumask = cpumask_of(0);
+ ce->clkevt.irq = irq;
+
+ ret = request_irq(irq, mchp_pit64b_interrupt, IRQF_TIMER,
+ "pit64b_tick", ce);
+ if (ret) {
+ pr_debug("clkevt: Failed to setup PIT64B IRQ\n");
+ kfree(ce);
+ return ret;
+ }
+
+ clockevents_config_and_register(&ce->clkevt, clk_rate, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
+ bool clkevt)
+{
+ u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ;
+ struct mchp_pit64b_timer timer;
+ unsigned long clk_rate;
+ u32 irq = 0;
+ int ret;
+
+ /* Parse DT node. */
+ timer.pclk = of_clk_get_by_name(node, "pclk");
+ if (IS_ERR(timer.pclk))
+ return PTR_ERR(timer.pclk);
+
+ timer.gclk = of_clk_get_by_name(node, "gclk");
+ if (IS_ERR(timer.gclk))
+ return PTR_ERR(timer.gclk);
+
+ timer.base = of_iomap(node, 0);
+ if (!timer.base)
+ return -ENXIO;
+
+ if (clkevt) {
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ ret = -ENODEV;
+ goto io_unmap;
+ }
+ }
+
+ /* Initialize mode (prescaler + SGCK bit). To be used at runtime. */
+ ret = mchp_pit64b_init_mode(&timer, freq);
+ if (ret)
+ goto irq_unmap;
+
+ ret = clk_prepare_enable(timer.pclk);
+ if (ret)
+ goto irq_unmap;
+
+ if (timer.mode & MCHP_PIT64B_MR_SGCLK) {
+ ret = clk_prepare_enable(timer.gclk);
+ if (ret)
+ goto pclk_unprepare;
+
+ clk_rate = clk_get_rate(timer.gclk);
+ } else {
+ clk_rate = clk_get_rate(timer.pclk);
+ }
+ clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1);
+
+ if (clkevt)
+ ret = mchp_pit64b_init_clkevt(&timer, clk_rate, irq);
+ else
+ ret = mchp_pit64b_init_clksrc(&timer, clk_rate);
+
+ if (ret)
+ goto gclk_unprepare;
+
+ return 0;
+
+gclk_unprepare:
+ if (timer.mode & MCHP_PIT64B_MR_SGCLK)
+ clk_disable_unprepare(timer.gclk);
+pclk_unprepare:
+ clk_disable_unprepare(timer.pclk);
+irq_unmap:
+ irq_dispose_mapping(irq);
+io_unmap:
+ iounmap(timer.base);
+
+ return ret;
+}
+
+static int __init mchp_pit64b_dt_init(struct device_node *node)
+{
+ static int inits;
+
+ switch (inits++) {
+ case 0:
+ /* 1st request, register clockevent. */
+ return mchp_pit64b_dt_init_timer(node, true);
+ case 1:
+ /* 2nd request, register clocksource. */
+ return mchp_pit64b_dt_init_timer(node, false);
+ }
+
+ /* The rest, don't care. */
+ return -EINVAL;
+}
+
+TIMER_OF_DECLARE(mchp_pit64b, "microchip,sam9x60-pit64b", mchp_pit64b_dt_init);
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 5394d9dbdfbc..269a994d6a99 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -780,7 +780,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
{
unsigned long flags;
struct omap_dm_timer *timer;
- struct resource *mem, *irq;
struct device *dev = &pdev->dev;
const struct dmtimer_platform_data *pdata;
int ret;
@@ -796,24 +795,16 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (unlikely(!irq)) {
- dev_err(dev, "%s: no IRQ resource.\n", __func__);
- return -ENODEV;
- }
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!mem)) {
- dev_err(dev, "%s: no memory resource.\n", __func__);
- return -ENODEV;
- }
-
timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
+ timer->irq = platform_get_irq(pdev, 0);
+ if (timer->irq < 0)
+ return timer->irq;
+
timer->fclk = ERR_PTR(-ENODEV);
- timer->io_base = devm_ioremap_resource(dev, mem);
+ timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
return PTR_ERR(timer->io_base);
@@ -836,7 +827,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
if (pdata)
timer->errata = pdata->timer_errata;
- timer->irq = irq->start;
timer->pdev = pdev;
pm_runtime_enable(dev);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 77b0e5d0fb13..4f86ce2db34f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct private_data *priv = policy->driver_data;
+ cpufreq_cpu_put(policy);
+
return brcm_avs_get_frequency(priv->base);
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8d8da763adc5..bda0b2406fba 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -39,7 +39,7 @@
static struct cppc_cpudata **all_cpu_data;
struct cppc_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE +1];
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
@@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void)
for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision)
+ wa_info[i].oem_revision == tbl->oem_revision) {
apply_hisi_workaround = true;
+ break;
+ }
}
+
+ acpi_put_table(tbl);
}
/* Callback function used to retrieve the max frequency from DMI */
@@ -217,7 +221,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
return ret;
}
-static int cppc_verify_policy(struct cpufreq_policy *policy)
+static int cppc_verify_policy(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index aba591d57c67..f2ae9cd455c1 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -109,6 +109,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
{ .compatible = "fsl,imx8mn", },
+ { .compatible = "fsl,imx8mp", },
{ .compatible = "marvell,armadaxp", },
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index cd53272e2fa2..f7a7bcf6f52e 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -291,7 +291,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
* nforce2_verify - verifies a new CPUFreq policy
* @policy: new policy
*/
-static int nforce2_verify(struct cpufreq_policy *policy)
+static int nforce2_verify(struct cpufreq_policy_data *policy)
{
unsigned int fsb_pol_max;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 77114a3897fb..4adac3a8c265 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -74,6 +74,9 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
static void cpufreq_stop_governor(struct cpufreq_policy *policy);
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_governor *new_gov,
+ unsigned int new_pol);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -616,25 +619,22 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
return NULL;
}
-static int cpufreq_parse_policy(char *str_governor,
- struct cpufreq_policy *policy)
+static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- return 0;
- }
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
- return 0;
- }
- return -EINVAL;
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ return CPUFREQ_POLICY_PERFORMANCE;
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ return CPUFREQ_POLICY_POWERSAVE;
+
+ return CPUFREQ_POLICY_UNKNOWN;
}
/**
* cpufreq_parse_governor - parse a governor string only for has_target()
+ * @str_governor: Governor name.
*/
-static int cpufreq_parse_governor(char *str_governor,
- struct cpufreq_policy *policy)
+static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
{
struct cpufreq_governor *t;
@@ -648,7 +648,7 @@ static int cpufreq_parse_governor(char *str_governor,
ret = request_module("cpufreq_%s", str_governor);
if (ret)
- return -EINVAL;
+ return NULL;
mutex_lock(&cpufreq_governor_mutex);
@@ -659,12 +659,7 @@ static int cpufreq_parse_governor(char *str_governor,
mutex_unlock(&cpufreq_governor_mutex);
- if (t) {
- policy->governor = t;
- return 0;
- }
-
- return -EINVAL;
+ return t;
}
/**
@@ -765,28 +760,33 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
+ char str_governor[16];
int ret;
- char str_governor[16];
- struct cpufreq_policy new_policy;
-
- memcpy(&new_policy, policy, sizeof(*policy));
ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
return -EINVAL;
if (cpufreq_driver->setpolicy) {
- if (cpufreq_parse_policy(str_governor, &new_policy))
+ unsigned int new_pol;
+
+ new_pol = cpufreq_parse_policy(str_governor);
+ if (!new_pol)
return -EINVAL;
+
+ ret = cpufreq_set_policy(policy, NULL, new_pol);
} else {
- if (cpufreq_parse_governor(str_governor, &new_policy))
+ struct cpufreq_governor *new_gov;
+
+ new_gov = cpufreq_parse_governor(str_governor);
+ if (!new_gov)
return -EINVAL;
- }
- ret = cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, new_gov,
+ CPUFREQ_POLICY_UNKNOWN);
- if (new_policy.governor)
- module_put(new_policy.governor->owner);
+ module_put(new_gov->owner);
+ }
return ret ? ret : count;
}
@@ -1053,40 +1053,33 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void)
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
- struct cpufreq_governor *gov = NULL, *def_gov = NULL;
- struct cpufreq_policy new_policy;
-
- memcpy(&new_policy, policy, sizeof(*policy));
-
- def_gov = cpufreq_default_governor();
+ struct cpufreq_governor *def_gov = cpufreq_default_governor();
+ struct cpufreq_governor *gov = NULL;
+ unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
if (has_target()) {
- /*
- * Update governor of new_policy to the governor used before
- * hotplug
- */
+ /* Update policy governor to the one used before hotplug. */
gov = find_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, policy->cpu);
- } else {
- if (!def_gov)
- return -ENODATA;
+ policy->governor->name, policy->cpu);
+ } else if (def_gov) {
gov = def_gov;
+ } else {
+ return -ENODATA;
}
- new_policy.governor = gov;
} else {
/* Use the default policy if there is no last_policy. */
if (policy->last_policy) {
- new_policy.policy = policy->last_policy;
+ pol = policy->last_policy;
+ } else if (def_gov) {
+ pol = cpufreq_parse_policy(def_gov->name);
} else {
- if (!def_gov)
- return -ENODATA;
- cpufreq_parse_policy(def_gov->name, &new_policy);
+ return -ENODATA;
}
}
- return cpufreq_set_policy(policy, &new_policy);
+ return cpufreq_set_policy(policy, gov, pol);
}
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
@@ -1114,13 +1107,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
void refresh_frequency_limits(struct cpufreq_policy *policy)
{
- struct cpufreq_policy new_policy;
-
if (!policy_is_inactive(policy)) {
- new_policy = *policy;
pr_debug("updating policy for CPU %u\n", policy->cpu);
- cpufreq_set_policy(policy, &new_policy);
+ cpufreq_set_policy(policy, policy->governor, policy->policy);
}
}
EXPORT_SYMBOL(refresh_frequency_limits);
@@ -2364,46 +2354,49 @@ EXPORT_SYMBOL(cpufreq_get_policy);
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
* @policy: Policy object to modify.
- * @new_policy: New policy data.
+ * @new_gov: Policy governor pointer.
+ * @new_pol: Policy value (for drivers with built-in governors).
*
- * Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the
- * min and max parameters of @new_policy to @policy and either invoke the
- * driver's ->setpolicy() callback (if present) or carry out a governor update
- * for @policy. That is, run the current governor's ->limits() callback (if the
- * governor field in @new_policy points to the same object as the one in
- * @policy) or replace the governor for @policy with the new one stored in
- * @new_policy.
+ * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
+ * limits to be set for the policy, update @policy with the verified limits
+ * values and either invoke the driver's ->setpolicy() callback (if present) or
+ * carry out a governor update for @policy. That is, run the current governor's
+ * ->limits() callback (if @new_gov points to the same object as the one in
+ * @policy) or replace the governor for @policy with @new_gov.
*
* The cpuinfo part of @policy is not updated by this function.
*/
-int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_governor *new_gov,
+ unsigned int new_pol)
{
+ struct cpufreq_policy_data new_data;
struct cpufreq_governor *old_gov;
int ret;
- pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
- new_policy->cpu, new_policy->min, new_policy->max);
-
- memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
-
+ memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
+ new_data.freq_table = policy->freq_table;
+ new_data.cpu = policy->cpu;
/*
* PM QoS framework collects all the requests from users and provide us
* the final aggregated value here.
*/
- new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
- new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+ new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+ new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+ new_data.cpu, new_data.min, new_data.max);
/*
* Verify that the CPU speed can be set within these limits and make sure
* that min <= max.
*/
- ret = cpufreq_driver->verify(new_policy);
+ ret = cpufreq_driver->verify(&new_data);
if (ret)
return ret;
- policy->min = new_policy->min;
- policy->max = new_policy->max;
+ policy->min = new_data.min;
+ policy->max = new_data.max;
trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
@@ -2412,12 +2405,12 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min, policy->max);
if (cpufreq_driver->setpolicy) {
- policy->policy = new_policy->policy;
+ policy->policy = new_pol;
pr_debug("setting range\n");
return cpufreq_driver->setpolicy(policy);
}
- if (new_policy->governor == policy->governor) {
+ if (new_gov == policy->governor) {
pr_debug("governor limits update\n");
cpufreq_governor_limits(policy);
return 0;
@@ -2434,7 +2427,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
}
/* start new governor */
- policy->governor = new_policy->governor;
+ policy->governor = new_gov;
ret = cpufreq_init_governor(policy);
if (!ret) {
ret = cpufreq_start_governor(policy);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index ded427e0a488..e117b0059123 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -60,7 +60,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
* Generic routine to verify policy & frequency table, requires driver to set
* policy->freq_table prior to it.
*/
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
{
if (!policy->freq_table)
return -ENODEV;
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index e97b5733aa24..75b3ef7ec679 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -328,7 +328,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
* for the hardware supported by the driver.
*/
-static int cpufreq_gx_verify(struct cpufreq_policy *policy)
+static int cpufreq_gx_verify(struct cpufreq_policy_data *policy)
{
unsigned int tmp_freq = 0;
u8 tmp1, tmp2;
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 85a6efd6b68f..6cb8193421ea 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -35,7 +35,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_machine_is_compatible("fsl,imx8mn"))
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
else
@@ -54,7 +55,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (of_machine_is_compatible("fsl,imx8mm") ||
of_machine_is_compatible("fsl,imx8mq"))
speed_grade = 1;
- if (of_machine_is_compatible("fsl,imx8mn"))
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
speed_grade = 0xb;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d2fa3e9ccd97..c81e1ff29069 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -172,7 +172,7 @@ struct vid_data {
/**
* struct global_params - Global parameters, mostly tunable via sysfs.
* @no_turbo: Whether or not to use turbo P-states.
- * @turbo_disabled: Whethet or not turbo P-states are available at all,
+ * @turbo_disabled: Whether or not turbo P-states are available at all,
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
@@ -2036,8 +2036,9 @@ static int intel_pstate_get_max_freq(struct cpudata *cpu)
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
-static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
- struct cpudata *cpu)
+static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ unsigned int policy_min,
+ unsigned int policy_max)
{
int max_freq = intel_pstate_get_max_freq(cpu);
int32_t max_policy_perf, min_policy_perf;
@@ -2056,18 +2057,17 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
turbo_max = cpu->pstate.turbo_pstate;
}
- max_policy_perf = max_state * policy->max / max_freq;
- if (policy->max == policy->min) {
+ max_policy_perf = max_state * policy_max / max_freq;
+ if (policy_max == policy_min) {
min_policy_perf = max_policy_perf;
} else {
- min_policy_perf = max_state * policy->min / max_freq;
+ min_policy_perf = max_state * policy_min / max_freq;
min_policy_perf = clamp_t(int32_t, min_policy_perf,
0, max_policy_perf);
}
pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
- policy->cpu, max_state,
- min_policy_perf, max_policy_perf);
+ cpu->cpu, max_state, min_policy_perf, max_policy_perf);
/* Normalize user input to [min_perf, max_perf] */
if (per_cpu_limits) {
@@ -2081,7 +2081,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
global_min = clamp_t(int32_t, global_min, 0, global_max);
- pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
+ pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
global_min, global_max);
cpu->min_perf_ratio = max(min_policy_perf, global_min);
@@ -2094,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
cpu->max_perf_ratio);
}
- pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
+ pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
cpu->max_perf_ratio,
cpu->min_perf_ratio);
}
@@ -2114,7 +2114,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock);
- intel_pstate_update_perf_limits(policy, cpu);
+ intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
@@ -2143,8 +2143,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
- struct cpudata *cpu)
+static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
+ struct cpufreq_policy_data *policy)
{
if (!hwp_active &&
cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
@@ -2155,7 +2155,7 @@ static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
}
}
-static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
+static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
@@ -2163,11 +2163,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
intel_pstate_get_max_freq(cpu));
- if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
- policy->policy != CPUFREQ_POLICY_PERFORMANCE)
- return -EINVAL;
-
- intel_pstate_adjust_policy_max(policy, cpu);
+ intel_pstate_adjust_policy_max(cpu, policy);
return 0;
}
@@ -2268,7 +2264,7 @@ static struct cpufreq_driver intel_pstate = {
.name = "intel_pstate",
};
-static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
+static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
@@ -2276,9 +2272,9 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
intel_pstate_get_max_freq(cpu));
- intel_pstate_adjust_policy_max(policy, cpu);
+ intel_pstate_adjust_policy_max(cpu, policy);
- intel_pstate_update_perf_limits(policy, cpu);
+ intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
return 0;
}
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index cb74bdc5baaa..70ad8fe1d78b 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- struct resource *res;
int err;
priv.dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv.base = devm_ioremap_resource(&pdev->dev, res);
+ priv.base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 64b8689f7a4a..0b08be8bff76 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -122,7 +122,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
* Validates a new CPUFreq policy. This function has to be called with
* cpufreq_driver locked.
*/
-static int longrun_verify_policy(struct cpufreq_policy *policy)
+static int longrun_verify_policy(struct cpufreq_policy_data *policy)
{
if (!policy)
return -EINVAL;
@@ -130,10 +130,6 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
policy->cpu = 0;
cpufreq_verify_within_cpu_limits(policy);
- if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
- (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
- return -EINVAL;
-
return 0;
}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index e9caa9586982..909f40fbcde2 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void)
u32 cpu_freq;
spin_lock_irqsave(&loongson2_wait_lock, flags);
- cpu_freq = LOONGSON_CHIPCFG(0);
- LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */
- LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */
+ cpu_freq = readl(LOONGSON_CHIPCFG);
+ /* Put CPU into wait mode */
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
+ /* Restore CPU state */
+ writel(cpu_freq, LOONGSON_CHIPCFG);
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
local_irq_enable();
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index fdc767fdbe6a..5789fe7a94bd 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -109,7 +109,7 @@ struct pcc_cpu {
static struct pcc_cpu __percpu *pcc_cpu_info;
-static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
+static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
@@ -445,7 +445,7 @@ static int __init pcc_cpufreq_probe(void)
goto out_free;
}
- pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
+ pcch_virt_addr = ioremap(mem_resource->minimum,
mem_resource->address_length);
if (pcch_virt_addr == NULL) {
pr_debug("probe: could not map shared mem region\n");
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 106910351c41..5c221bc90210 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
int ret;
+ struct cpufreq_policy *policy;
mutex_lock(&cpufreq_lock);
@@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
*/
if (s3c_freq->is_dvs) {
pr_debug("cpufreq: leave dvs on reboot\n");
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0);
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
+ cpufreq_cpu_put(policy);
+
if (ret < 0)
return NOTIFY_BAD;
}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 5d10030f2560..e84281e2561d 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
int ret;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
+ cpufreq_cpu_put(policy);
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 5096c0ab781b..0ac265d47ef0 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -87,7 +87,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
}
-static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
struct cpufreq_frequency_table *freq_table;
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index bcecb068b51b..2e233ad72758 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- struct resource *res;
unsigned int i = 0, err;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
@@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
goto put_bpmp;
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 707dbc1b7ac8..98d392196df2 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -22,7 +22,7 @@ static struct cpufreq_driver ucv2_driver;
/* make sure that only the "userspace" governor is run
* -- anything else wouldn't make sense on this platform, anyway.
*/
-static int ucv2_verify_speed(struct cpufreq_policy *policy)
+static int ucv2_verify_speed(struct cpufreq_policy_data *policy)
{
if (policy->cpu)
return -EINVAL;
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index a224d33dda7f..62272ecfa771 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -25,7 +25,7 @@ config ARM_PSCI_CPUIDLE
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
- depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
+ depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST
depends on MCPM && !ARM64
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
@@ -51,13 +51,13 @@ config ARM_HIGHBANK_CPUIDLE
config ARM_KIRKWOOD_CPUIDLE
bool "CPU Idle Driver for Marvell Kirkwood SoCs"
- depends on MACH_KIRKWOOD && !ARM64
+ depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64
help
This adds the CPU Idle driver for Marvell Kirkwood SoCs.
config ARM_ZYNQ_CPUIDLE
bool "CPU Idle Driver for Xilinx Zynq processors"
- depends on ARCH_ZYNQ && !ARM64
+ depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Xilinx Zynq processors.
@@ -70,19 +70,19 @@ config ARM_U8500_CPUIDLE
config ARM_AT91_CPUIDLE
bool "Cpu Idle Driver for the AT91 processors"
default y
- depends on ARCH_AT91 && !ARM64
+ depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle for AT91 processors.
config ARM_EXYNOS_CPUIDLE
bool "Cpu Idle Driver for the Exynos processors"
- depends on ARCH_EXYNOS && !ARM64
+ depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
help
Select this to enable cpuidle for Exynos processors.
config ARM_MVEBU_V7_CPUIDLE
bool "CPU Idle Driver for mvebu v7 family processors"
- depends on ARCH_MVEBU && !ARM64
+ depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index b607278df25b..04003b90dc49 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -89,6 +89,7 @@
* @coupled_cpus: mask of cpus that are part of the coupled set
* @requested_state: array of requested states for cpus in the coupled set
* @ready_waiting_counts: combined count of cpus in ready or waiting loops
+ * @abort_barrier: synchronisation point for abort cases
* @online_count: count of cpus that are online
* @refcnt: reference count of cpuidle devices that are using this struct
* @prevent: flag to prevent coupled idle while a cpu is hotplugging
@@ -338,7 +339,7 @@ static void cpuidle_coupled_poke(int cpu)
/**
* cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
- * @dev: struct cpuidle_device for this cpu
+ * @this_cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
*
* Calls cpuidle_coupled_poke on all other online cpus.
@@ -355,7 +356,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
/**
* cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
* @next_state: the index in drv->states of the requested state for this cpu
*
@@ -376,7 +377,7 @@ static int cpuidle_coupled_set_waiting(int cpu,
/**
* cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
*
* Removes the requested idle state for the specified cpuidle device.
@@ -412,7 +413,7 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
/**
* cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
- * @cpu - this cpu
+ * @cpu: this cpu
*
* Turns on interrupts and spins until any outstanding poke interrupts have
* been processed and the poke bit has been cleared.
diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c
index 6e36740f5719..fc22c59b6c73 100644
--- a/drivers/cpuidle/cpuidle-clps711x.c
+++ b/drivers/cpuidle/cpuidle-clps711x.c
@@ -37,10 +37,7 @@ static struct cpuidle_driver clps711x_idle_driver = {
static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- clps711x_halt = devm_ioremap_resource(&pdev->dev, res);
+ clps711x_halt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(clps711x_halt))
return PTR_ERR(clps711x_halt);
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index d23d8f468c12..511c4f46027a 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -55,10 +55,7 @@ static struct cpuidle_driver kirkwood_idle_driver = {
/* Initialize CPU idle by registering the idle states */
static int kirkwood_cpuidle_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
+ ddr_operation_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddr_operation_base))
return PTR_ERR(ddr_operation_base);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 33d19c8eb027..de81298051b3 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -121,6 +121,9 @@ void cpuidle_use_deepest_state(u64 latency_limit_ns)
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
+ * @latency_limit_ns: Idle state exit latency limit
+ *
+ * Return: the index of the deepest available idle state.
*/
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
@@ -572,10 +575,14 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
if (!try_module_get(drv->owner))
return -EINVAL;
- for (i = 0; i < drv->state_count; i++)
+ for (i = 0; i < drv->state_count; i++) {
if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ if (drv->states[i].flags & CPUIDLE_FLAG_OFF)
+ dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+ }
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index ce6a5f80fb83..4070e573bf43 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -155,8 +155,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
int i;
- drv->refcnt = 0;
-
/*
* Use all possible CPUs as the default, because if the kernel boots
* with some CPUs offline and then we online one of them, the CPU
@@ -240,9 +238,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
*/
static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (WARN_ON(drv->refcnt > 0))
- return;
-
if (drv->bctimer) {
drv->bctimer = 0;
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
@@ -350,47 +345,6 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
/**
- * cpuidle_driver_ref - get a reference to the driver.
- *
- * Increment the reference counter of the cpuidle driver associated with
- * the current CPU.
- *
- * Returns a pointer to the driver, or NULL if the current CPU has no driver.
- */
-struct cpuidle_driver *cpuidle_driver_ref(void)
-{
- struct cpuidle_driver *drv;
-
- spin_lock(&cpuidle_driver_lock);
-
- drv = cpuidle_get_driver();
- if (drv)
- drv->refcnt++;
-
- spin_unlock(&cpuidle_driver_lock);
- return drv;
-}
-
-/**
- * cpuidle_driver_unref - puts down the refcount for the driver
- *
- * Decrement the reference counter of the cpuidle driver associated with
- * the current CPU.
- */
-void cpuidle_driver_unref(void)
-{
- struct cpuidle_driver *drv;
-
- spin_lock(&cpuidle_driver_lock);
-
- drv = cpuidle_get_driver();
- if (drv && !WARN_ON(drv->refcnt <= 0))
- drv->refcnt--;
-
- spin_unlock(&cpuidle_driver_lock);
-}
-
-/**
* cpuidle_driver_state_disabled - Disable or enable an idle state
* @drv: cpuidle driver owning the state
* @idx: State index
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index de7e706efd46..6deaaf5f05b5 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* pattern detection.
*/
cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
- if (cpu_data->interval_idx > INTERVALS)
+ if (cpu_data->interval_idx >= INTERVALS)
cpu_data->interval_idx = 0;
}
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 38ef770be90d..cdeedbf02646 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -142,6 +142,7 @@ static struct attribute_group cpuidle_attr_group = {
/**
* cpuidle_add_interface - add CPU global sysfs attributes
+ * @dev: the target device
*/
int cpuidle_add_interface(struct device *dev)
{
@@ -153,6 +154,7 @@ int cpuidle_add_interface(struct device *dev)
/**
* cpuidle_remove_interface - remove CPU global sysfs attributes
+ * @dev: the target device
*/
void cpuidle_remove_interface(struct device *dev)
{
@@ -327,6 +329,14 @@ static ssize_t store_state_disable(struct cpuidle_state *state,
return size;
}
+static ssize_t show_state_default_status(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
+}
+
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
@@ -337,6 +347,7 @@ define_one_state_ro(time, show_state_time);
define_one_state_rw(disable, show_state_disable, store_state_disable);
define_one_state_ro(above, show_state_above);
define_one_state_ro(below, show_state_below);
+define_one_state_ro(default_status, show_state_default_status);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
@@ -349,6 +360,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
&attr_disable.attr,
&attr_above.attr,
&attr_below.attr,
+ &attr_default_status.attr,
NULL
};
@@ -615,7 +627,7 @@ static struct kobj_type ktype_driver_cpuidle = {
/**
* cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
*/
static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
{
@@ -646,7 +658,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
/**
* cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
*/
static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
{
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 91eb768d4221..c2767ed54dfe 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -248,15 +248,15 @@ config CRYPTO_DEV_MARVELL_CESA
This driver supports CPU offload through DMA transfers.
config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
+ tristate "Niagara2 Stream Processing Unit driver"
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ depends on SPARC64
+ help
Each core of a Niagara2 processor contains a Stream
Processing Unit, which itself contains several cryptographic
sub-units. One set provides the Modular Arithmetic Unit,
@@ -357,7 +357,7 @@ config CRYPTO_DEV_OMAP
depends on ARCH_OMAP2PLUS
help
OMAP processors have various crypto HW accelerators. Select this if
- you want to use the OMAP modules for any of the crypto algorithms.
+ you want to use the OMAP modules for any of the crypto algorithms.
if CRYPTO_DEV_OMAP
@@ -430,7 +430,7 @@ config CRYPTO_DEV_SAHARA
found in some Freescale i.MX chips.
config CRYPTO_DEV_EXYNOS_RNG
- tristate "EXYNOS HW pseudo random number generator support"
+ tristate "Exynos HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_RNG
@@ -618,6 +618,14 @@ config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
+ help
+ This driver supports Qualcomm crypto engine accelerator
+ hardware. To compile this driver as a module, choose M here. The
+ module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SKCIPHER
+ bool
+ depends on CRYPTO_DEV_QCE
select CRYPTO_AES
select CRYPTO_LIB_DES
select CRYPTO_ECB
@@ -625,10 +633,57 @@ config CRYPTO_DEV_QCE
select CRYPTO_XTS
select CRYPTO_CTR
select CRYPTO_SKCIPHER
- help
- This driver supports Qualcomm crypto engine accelerator
- hardware. To compile this driver as a module, choose M here. The
- module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SHA
+ bool
+ depends on CRYPTO_DEV_QCE
+
+choice
+ prompt "Algorithms enabled for QCE acceleration"
+ default CRYPTO_DEV_QCE_ENABLE_ALL
+ depends on CRYPTO_DEV_QCE
+ help
+ This option allows to choose whether to build support for all algorihtms
+ (default), hashes-only, or skciphers-only.
+
+ The QCE engine does not appear to scale as well as the CPU to handle
+ multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
+ QCE handles only 2 requests in parallel.
+
+ Ipsec throughput seems to improve when disabling either family of
+ algorithms, sharing the load with the CPU. Enabling skciphers-only
+ appears to work best.
+
+ config CRYPTO_DEV_QCE_ENABLE_ALL
+ bool "All supported algorithms"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable all supported algorithms:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (CBC, ECB)
+ - DES (CBC, ECB)
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+ config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
+ bool "Symmetric-key ciphers only"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ help
+ Enable symmetric-key ciphers only:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (ECB, CBC)
+ - DES (ECB, CBC)
+
+ config CRYPTO_DEV_QCE_ENABLE_SHA
+ bool "Hash/HMAC only"
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable hashes/HMAC algorithms only:
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+endchoice
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
@@ -639,7 +694,7 @@ config CRYPTO_DEV_QCOM_RNG
Generator hardware found on Qualcomm SoCs.
To compile this driver as a module, choose M here. The
- module will be called qcom-rng. If unsure, say N.
+ module will be called qcom-rng. If unsure, say N.
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
@@ -716,7 +771,7 @@ source "drivers/crypto/stm32/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"
- depends on OF || PCI || COMPILE_TEST
+ depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM
select CRYPTO_LIB_AES
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index cb2b0874f68f..7f22d305178e 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -541,7 +541,6 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
op->keylen = keylen;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 814cd12149a9..a2b67f7f8a81 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>
@@ -22,6 +23,14 @@
#include "sun4i-ss.h"
+static const struct ss_variant ss_a10_variant = {
+ .sha1_in_be = false,
+};
+
+static const struct ss_variant ss_a33_variant = {
+ .sha1_in_be = true,
+};
+
static struct sun4i_ss_alg_template ss_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.mode = SS_OP_MD5,
@@ -273,7 +282,7 @@ err_enable:
return err;
}
-const struct dev_pm_ops sun4i_ss_pm_ops = {
+static const struct dev_pm_ops sun4i_ss_pm_ops = {
SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL)
};
@@ -323,6 +332,12 @@ static int sun4i_ss_probe(struct platform_device *pdev)
return PTR_ERR(ss->base);
}
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Security System variant\n");
+ return -EINVAL;
+ }
+
ss->ssclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(ss->ssclk)) {
err = PTR_ERR(ss->ssclk);
@@ -484,7 +499,12 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
static const struct of_device_id a20ss_crypto_of_match_table[] = {
- { .compatible = "allwinner,sun4i-a10-crypto" },
+ { .compatible = "allwinner,sun4i-a10-crypto",
+ .data = &ss_a10_variant
+ },
+ { .compatible = "allwinner,sun8i-a33-crypto",
+ .data = &ss_a33_variant
+ },
{}
};
MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index fdc0e6cdbb85..dc35edd90034 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -479,7 +479,10 @@ hash_final:
/* Get the hash from the device */
if (op->mode == SS_OP_SHA1) {
for (i = 0; i < 5; i++) {
- v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+ if (ss->variant->sha1_in_be)
+ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
+ else
+ v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
} else {
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 60425ac75d90..2b4c6333eb67 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -131,7 +131,16 @@
#define SS_SEED_LEN 192
#define SS_DATA_LEN 160
+/*
+ * struct ss_variant - Describe SS hardware variant
+ * @sha1_in_be: The SHA1 digest is given by SS in BE, and so need to be inverted.
+ */
+struct ss_variant {
+ bool sha1_in_be;
+};
+
struct sun4i_ss_ctx {
+ const struct ss_variant *variant;
void __iomem *base;
int irq;
struct clk *busclk;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 37d0b6c386a0..a5fd8975f3d3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -144,11 +144,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0;
- chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
- chan->op_dir = rctx->op_dir;
- chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
- chan->keylen = op->keylen;
-
addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
cet->t_key = cpu_to_le32(addr_key);
if (dma_mapping_error(ce->dev, addr_key)) {
@@ -394,7 +389,6 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 73a7649f915d..f72346a44e69 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -555,7 +555,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return -EINVAL;
}
- ce->base = devm_platform_ioremap_resource(pdev, 0);;
+ ce->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ce->base))
return PTR_ERR(ce->base);
@@ -624,7 +624,7 @@ error_alg:
error_irq:
sun8i_ce_pm_exit(ce);
error_pm:
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
}
@@ -638,7 +638,7 @@ static int sun8i_ce_remove(struct platform_device *pdev)
debugfs_remove_recursive(ce->dbgfs_dir);
#endif
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
sun8i_ce_pm_exit(ce);
return 0;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 43db49ceafe4..8f8404c84a4d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -131,12 +131,8 @@ struct ce_task {
* @engine: ptr to the crypto_engine for this flow
* @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv
- * @keylen: keylen for this flow operation
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
- * @method: current method for flow
- * @op_dir: direction (encrypt vs decrypt) of this flow
- * @op_mode: op_mode for this flow
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
* @stat_req: number of request done by this flow
@@ -145,12 +141,8 @@ struct sun8i_ce_flow {
struct crypto_engine *engine;
void *bounce_iv;
unsigned int ivlen;
- unsigned int keylen;
struct completion complete;
int status;
- u32 method;
- u32 op_dir;
- u32 op_mode;
dma_addr_t t_phy;
int timeout;
struct ce_task *tl;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index f222979a5623..84d52fc3a2da 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -390,7 +390,6 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
@@ -416,7 +415,6 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 90997cc509b8..6b301afffd11 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -595,7 +595,7 @@ error_alg:
error_irq:
sun8i_ss_pm_exit(ss);
error_pm:
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
return err;
}
@@ -609,7 +609,7 @@ static int sun8i_ss_remove(struct platform_device *pdev)
debugfs_remove_recursive(ss->dbgfs_dir);
#endif
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
sun8i_ss_pm_exit(ss);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a42f8619589d..f7fc0c464125 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -128,12 +128,9 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
struct dynamic_sa_ctl *sa;
int rc;
- if (keylen != AES_KEYSIZE_256 &&
- keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
/* Create SA */
if (ctx->sa_in || ctx->sa_out)
@@ -292,19 +289,11 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
- crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_skcipher_set_flags(cipher,
- crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
}
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -379,18 +368,10 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(ctx->sw_cipher.aead,
crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
- crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(cipher,
- crypto_aead_get_flags(ctx->sw_cipher.aead) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
/**
@@ -551,10 +532,8 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
struct dynamic_sa_ctl *sa;
int rc = 0;
- if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0)
return -EINVAL;
- }
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 7d6b695c4ab3..981de43ea5e2 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -169,7 +169,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
int i;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
- &dev->pdr_pa, GFP_ATOMIC);
+ &dev->pdr_pa, GFP_KERNEL);
if (!dev->pdr)
return -ENOMEM;
@@ -185,13 +185,13 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!dev->shadow_sa_pool)
return -ENOMEM;
dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
- &dev->shadow_sr_pool_pa, GFP_ATOMIC);
+ &dev->shadow_sr_pool_pa, GFP_KERNEL);
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
@@ -277,7 +277,7 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ &dev->gdr_pa, GFP_KERNEL);
if (!dev->gdr)
return -ENOMEM;
@@ -286,7 +286,8 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
{
- dma_free_coherent(dev->core_dev->device,
+ if (dev->gdr)
+ dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
dev->gdr, dev->gdr_pa);
}
@@ -354,20 +355,20 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- /* alloc memory for scatter descriptor ring */
- dev->sdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- &dev->sdr_pa, GFP_ATOMIC);
- if (!dev->sdr)
- return -ENOMEM;
-
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
- &dev->scatter_buffer_pa, GFP_ATOMIC);
+ &dev->scatter_buffer_pa, GFP_KERNEL);
if (!dev->scatter_buffer_va)
return -ENOMEM;
+ /* alloc memory for scatter descriptor ring */
+ dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ &dev->sdr_pa, GFP_KERNEL);
+ if (!dev->sdr)
+ return -ENOMEM;
+
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
PPC4XX_SD_BUFFER_SIZE * i;
@@ -1439,16 +1440,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
+ rc = crypto4xx_build_sdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
+ goto err_build_sdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
-
- rc = crypto4xx_build_sdr(core_dev->dev);
- if (rc)
goto err_build_sdr;
/* Init tasklet for bottom half processing */
@@ -1493,7 +1493,6 @@ err_iomap:
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_pdr:
crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
index b90850d18965..cf9547602670 100644
--- a/drivers/crypto/amlogic/Kconfig
+++ b/drivers/crypto/amlogic/Kconfig
@@ -1,5 +1,6 @@
config CRYPTO_DEV_AMLOGIC_GXL
tristate "Support for amlogic cryptographic offloader"
+ depends on HAS_IOMEM
default y if ARCH_MESON
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e589015aac1c..9819dd50fbad 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -366,7 +366,6 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index fa05fce1c0de..9d4ead2f7ebb 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -289,7 +289,7 @@ static int meson_crypto_probe(struct platform_device *pdev)
error_alg:
meson_unregister_algs(mc);
error_flow:
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return err;
}
@@ -304,7 +304,7 @@ static int meson_crypto_remove(struct platform_device *pdev)
meson_unregister_algs(mc);
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return 0;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 91092504bc96..a6e14491e080 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -37,8 +38,6 @@
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
-#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
#include "atmel-authenc.h"
@@ -89,7 +88,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
bool has_cfb64;
- bool has_ctr32;
bool has_gcm;
bool has_xts;
bool has_authenc;
@@ -122,6 +120,7 @@ struct atmel_aes_ctr_ctx {
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
+ u32 blocks;
};
struct atmel_aes_gcm_ctx {
@@ -514,8 +513,37 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
}
}
+static inline struct atmel_aes_ctr_ctx *
+atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_ctr_ctx, base);
+}
+
+static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ int i;
+
+ /*
+ * The CTR transfer works in fragments of data of maximum 1 MByte
+ * because of the 16 bit CTR counter embedded in the IP. When reaching
+ * here, ctx->blocks contains the number of blocks of the last fragment
+ * processed, there is no need to explicit cast it to u16.
+ */
+ for (i = 0; i < ctx->blocks; i++)
+ crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
+
+ memcpy(req->iv, ctx->iv, ivsize);
+}
+
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->ctx->is_aead)
atmel_aes_authenc_complete(dd, err);
@@ -524,8 +552,13 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
- if (!dd->ctx->is_aead)
- atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && !dd->ctx->is_aead &&
+ (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
+ if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
+ atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ else
+ atmel_aes_ctr_update_req_iv(dd);
+ }
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -790,7 +823,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
int err;
memset(&config, 0, sizeof(config));
- config.direction = dir;
config.src_addr_width = addr_width;
config.dst_addr_width = addr_width;
config.src_maxburst = maxburst;
@@ -830,27 +862,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
return 0;
}
-static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
- enum dma_transfer_direction dir)
-{
- struct atmel_aes_dma *dma;
-
- switch (dir) {
- case DMA_MEM_TO_DEV:
- dma = &dd->src;
- break;
-
- case DMA_DEV_TO_MEM:
- dma = &dd->dst;
- break;
-
- default:
- return;
- }
-
- dmaengine_terminate_all(dma->chan);
-}
-
static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
struct scatterlist *src,
struct scatterlist *dst,
@@ -909,25 +920,18 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
return -EINPROGRESS;
output_transfer_stop:
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+ dmaengine_terminate_sync(dd->dst.chan);
unmap:
atmel_aes_unmap(dd);
exit:
return atmel_aes_complete(dd, err);
}
-static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
-{
- atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
- atmel_aes_unmap(dd);
-}
-
static void atmel_aes_dma_callback(void *data)
{
struct atmel_aes_dev *dd = data;
- atmel_aes_dma_stop(dd);
+ atmel_aes_unmap(dd);
dd->is_async = true;
(void)dd->resume(dd);
}
@@ -1004,19 +1008,14 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
atmel_aes_transfer_complete);
}
-static inline struct atmel_aes_ctr_ctx *
-atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct atmel_aes_ctr_ctx, base);
-}
-
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
- u32 ctr, blocks;
size_t datalen;
+ u32 ctr;
+ u16 start, end;
bool use_dma, fragmented = false;
/* Check for transfer completion. */
@@ -1026,29 +1025,19 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
/* Compute data length. */
datalen = req->cryptlen - ctx->offset;
- blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+ ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
- if (dd->caps.has_ctr32) {
- /* Check 32bit counter overflow. */
- u32 start = ctr;
- u32 end = start + blocks - 1;
-
- if (end < start) {
- ctr |= 0xffffffff;
- datalen = AES_BLOCK_SIZE * -start;
- fragmented = true;
- }
- } else {
- /* Check 16bit counter overflow. */
- u16 start = ctr & 0xffff;
- u16 end = start + (u16)blocks - 1;
-
- if (blocks >> 16 || end < start) {
- ctr |= 0xffff;
- datalen = AES_BLOCK_SIZE * (0x10000-start);
- fragmented = true;
- }
+
+ /* Check 16bit counter overflow. */
+ start = ctr & 0xffff;
+ end = start + ctx->blocks - 1;
+
+ if (ctx->blocks >> 16 || end < start) {
+ ctr |= 0xffff;
+ datalen = AES_BLOCK_SIZE * (0x10000 - start);
+ fragmented = true;
}
+
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
/* Jump to offset. */
@@ -1131,7 +1120,8 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx = skcipher_request_ctx(req);
rctx->mode = mode;
- if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+ !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -1150,10 +1140,8 @@ static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1275,12 +1263,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "atmel-ecb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1292,12 +1276,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "atmel-cbc-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1310,12 +1290,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ofb(aes)",
.base.cra_driver_name = "atmel-ofb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1328,12 +1304,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1346,12 +1318,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb32(aes)",
.base.cra_driver_name = "atmel-cfb32-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1364,12 +1332,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb16(aes)",
.base.cra_driver_name = "atmel-cfb16-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1382,12 +1346,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb8(aes)",
.base.cra_driver_name = "atmel-cfb8-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1400,12 +1360,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "atmel-ctr-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_ctr_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1420,12 +1376,8 @@ static struct skcipher_alg aes_algs[] = {
static struct skcipher_alg aes_cfb64_alg = {
.base.cra_name = "cfb64(aes)",
.base.cra_driver_name = "atmel-cfb64-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB64_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1762,10 +1714,8 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_128) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1776,21 +1726,7 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- /* Same as crypto_gcm_authsize() from crypto/gcm.c */
- switch (authsize) {
- case 4:
- case 8:
- case 12:
- case 13:
- case 14:
- case 15:
- case 16:
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return crypto_gcm_check_authsize(authsize);
}
static int atmel_aes_gcm_encrypt(struct aead_request *req)
@@ -1825,12 +1761,8 @@ static struct aead_alg aes_gcm_alg = {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "atmel-gcm-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
};
@@ -1947,12 +1879,8 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aes_xts_alg = {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "atmel-xts-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2113,7 +2041,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
- u32 flags;
int err;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
@@ -2123,11 +2050,9 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
goto badkey;
/* Save auth key. */
- flags = crypto_aead_get_flags(tfm);
err = atmel_sha_authenc_setkey(ctx->auth,
keys.authkey, keys.authkeylen,
- &flags);
- crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK);
+ crypto_aead_get_flags(tfm));
if (err) {
memzero_explicit(&keys, sizeof(keys));
return err;
@@ -2141,7 +2066,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -2252,12 +2176,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2272,12 +2192,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2292,12 +2208,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2312,12 +2224,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2332,12 +2240,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
};
@@ -2364,47 +2268,30 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
free_page((unsigned long)dd->buf);
}
-static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- struct at_dma_slave *slave;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- slave = &pdata->dma_slave->rxdata;
- dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "tx");
- if (!dd->src.chan)
+ dd->src.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->src.chan)) {
+ ret = PTR_ERR(dd->src.chan);
goto err_dma_in;
+ }
- slave = &pdata->dma_slave->txdata;
- dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "rx");
- if (!dd->dst.chan)
+ dd->dst.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dst.chan)) {
+ ret = PTR_ERR(dd->dst.chan);
goto err_dma_out;
+ }
return 0;
err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2469,29 +2356,45 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
crypto_unregister_skcipher(&aes_algs[i]);
}
+static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
+{
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_alignmask = 0xf;
+ alg->cra_priority = ATMEL_AES_PRIORITY;
+ alg->cra_module = THIS_MODULE;
+}
+
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
{
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_algs[i].base);
+
err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
+ atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
+
err = crypto_register_skcipher(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
if (dd->caps.has_gcm) {
+ atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
+
err = crypto_register_aead(&aes_gcm_alg);
if (err)
goto err_aes_gcm_alg;
}
if (dd->caps.has_xts) {
+ atmel_aes_crypto_alg_init(&aes_xts_alg.base);
+
err = crypto_register_skcipher(&aes_xts_alg);
if (err)
goto err_aes_xts_alg;
@@ -2500,6 +2403,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc) {
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
+
err = crypto_register_aead(&aes_authenc_algs[i]);
if (err)
goto err_aes_authenc_alg;
@@ -2533,7 +2438,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
dd->caps.has_cfb64 = 0;
- dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
dd->caps.has_xts = 0;
dd->caps.has_authenc = 0;
@@ -2544,7 +2448,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.has_xts = 1;
dd->caps.has_authenc = 1;
@@ -2553,7 +2456,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x200:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
@@ -2577,65 +2479,18 @@ static const struct of_device_id atmel_aes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
-
-static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave) {
- devm_kfree(&pdev->dev, pdata);
- return ERR_PTR(-ENOMEM);
- }
-
- return pdata;
-}
-#else
-static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
int err;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_aes_of_init(pdev);
- if (IS_ERR(pdata)) {
- err = PTR_ERR(pdata);
- goto aes_dd_err;
- }
- }
-
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto aes_dd_err;
- }
-
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
- if (aes_dd == NULL) {
- err = -ENOMEM;
- goto aes_dd_err;
- }
+ if (!aes_dd)
+ return -ENOMEM;
aes_dd->dev = dev;
@@ -2656,7 +2511,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (!aes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->phys_base = aes_res->start;
@@ -2664,14 +2519,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
aes_dd->irq = platform_get_irq(pdev, 0);
if (aes_dd->irq < 0) {
err = aes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
IRQF_SHARED, "atmel-aes", aes_dd);
if (err) {
dev_err(dev, "unable to request aes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2679,40 +2534,40 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(aes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(aes_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
err = atmel_aes_hw_version_init(aes_dd);
if (err)
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
atmel_aes_get_cap(aes_dd);
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
}
#endif
err = atmel_aes_buff_init(aes_dd);
if (err)
- goto err_aes_buff;
+ goto err_iclk_unprepare;
- err = atmel_aes_dma_init(aes_dd, pdata);
+ err = atmel_aes_dma_init(aes_dd);
if (err)
- goto err_aes_dma;
+ goto err_buff_cleanup;
spin_lock(&atmel_aes.lock);
list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
@@ -2733,17 +2588,13 @@ err_algs:
list_del(&aes_dd->list);
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
-err_aes_dma:
+err_buff_cleanup:
atmel_aes_buff_cleanup(aes_dd);
-err_aes_buff:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(aes_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
-aes_dd_err:
- if (err != -EPROBE_DEFER)
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index d6de810df44f..c6530a1c8c20 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -30,8 +30,7 @@ unsigned int atmel_sha_authenc_get_reqsize(void);
struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode);
void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags);
+ const u8 *key, unsigned int keylen, u32 flags);
int atmel_sha_authenc_schedule(struct ahash_request *req,
struct atmel_sha_authenc_ctx *auth,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8ea0e4bcde0d..e536e2a6bbd8 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -36,10 +37,11 @@
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-sha-regs.h"
#include "atmel-authenc.h"
+#define ATMEL_SHA_PRIORITY 300
+
/* SHA flags */
#define SHA_FLAGS_BUSY BIT(0)
#define SHA_FLAGS_FINAL BIT(1)
@@ -134,7 +136,6 @@ struct atmel_sha_dev {
void __iomem *io_base;
spinlock_t lock;
- int err;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
@@ -851,7 +852,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
0, final);
}
-static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
+static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -870,8 +871,6 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
ctx->block_size, DMA_TO_DEVICE);
}
-
- return 0;
}
static int atmel_sha_update_req(struct atmel_sha_dev *dd)
@@ -1025,7 +1024,6 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
if (!(SHA_FLAGS_INIT & dd->flags)) {
atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
dd->flags |= SHA_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -1036,9 +1034,13 @@ static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
}
-static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
{
- atmel_sha_hw_init(dd);
+ int err;
+
+ err = atmel_sha_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_sha_get_version(dd);
@@ -1046,6 +1048,8 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable(dd->iclk);
+
+ return 0;
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1248,130 +1252,66 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void atmel_sha_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_cra_init;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->finup = atmel_sha_finup;
+ alg->digest = atmel_sha_digest;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_1_256_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "atmel-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha1",
+ .halg.base.cra_driver_name = "atmel-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "atmel-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha256",
+ .halg.base.cra_driver_name = "atmel-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
};
static struct ahash_alg sha_224_alg = {
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "atmel-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha224",
+ .halg.base.cra_driver_name = "atmel-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
};
static struct ahash_alg sha_384_512_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha384",
- .cra_driver_name = "atmel-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha384",
+ .halg.base.cra_driver_name = "atmel-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "atmel-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha512",
+ .halg.base.cra_driver_name = "atmel-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -1395,10 +1335,6 @@ static int atmel_sha_done(struct atmel_sha_dev *dd)
if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
atmel_sha_update_dma_stop(dd);
- if (dd->err) {
- err = dd->err;
- goto finish;
- }
}
if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
/* hash or semi-hash ready */
@@ -1493,7 +1429,6 @@ static void atmel_sha_dma_callback2(void *data)
struct scatterlist *sg;
int nents;
- dmaengine_terminate_all(dma->chan);
dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
sg = dma->sg;
@@ -1918,12 +1853,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
- if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return 0;
+ return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
}
static int atmel_sha_hmac_init(struct ahash_request *req)
@@ -2084,131 +2014,61 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
atmel_sha_hmac_key_release(&hmac->hkey);
}
+static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
+ alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_hmac_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->digest = atmel_sha_hmac_digest;
+ alg->setkey = atmel_sha_hmac_setkey;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_hmac_algs[] = {
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "atmel-hmac-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha1)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "atmel-hmac-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha224)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "atmel-hmac-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha256)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "atmel-hmac-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha384)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "atmel-hmac-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha512)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -2347,18 +2207,13 @@ void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags)
+ const u8 *key, unsigned int keylen, u32 flags)
{
struct crypto_ahash *tfm = auth->tfm;
- int err;
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(tfm, key, keylen);
- *flags = crypto_ahash_get_flags(tfm);
-
- return err;
+ crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK);
+ return crypto_ahash_setkey(tfm, key, keylen);
}
EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
@@ -2561,12 +2416,16 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+ atmel_sha_alg_init(&sha_1_256_algs[i]);
+
err = crypto_register_ahash(&sha_1_256_algs[i]);
if (err)
goto err_sha_1_256_algs;
}
if (dd->caps.has_sha224) {
+ atmel_sha_alg_init(&sha_224_alg);
+
err = crypto_register_ahash(&sha_224_alg);
if (err)
goto err_sha_224_algs;
@@ -2574,6 +2433,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_sha_384_512) {
for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+ atmel_sha_alg_init(&sha_384_512_algs[i]);
+
err = crypto_register_ahash(&sha_384_512_algs[i]);
if (err)
goto err_sha_384_512_algs;
@@ -2582,6 +2443,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_hmac) {
for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
+ atmel_sha_hmac_alg_init(&sha_hmac_algs[i]);
+
err = crypto_register_ahash(&sha_hmac_algs[i]);
if (err)
goto err_sha_hmac_algs;
@@ -2608,35 +2471,14 @@ err_sha_1_256_algs:
return err;
}
-static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ dev_err(dd->dev, "DMA channel is not available\n");
+ return PTR_ERR(dd->dma_lch_in.chan);
}
-}
-static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask_in;
-
- /* Try to grab DMA channel */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
- atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan) {
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
- }
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
SHA_REG_DIN(0);
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -2709,49 +2551,18 @@ static const struct of_device_id atmel_sha_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
-
-static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *sha_res;
int err;
sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
- if (sha_dd == NULL) {
- err = -ENOMEM;
- goto sha_dd_err;
- }
+ if (!sha_dd)
+ return -ENOMEM;
sha_dd->dev = dev;
@@ -2772,7 +2583,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (!sha_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->phys_base = sha_res->start;
@@ -2780,14 +2591,14 @@ static int atmel_sha_probe(struct platform_device *pdev)
sha_dd->irq = platform_get_irq(pdev, 0);
if (sha_dd->irq < 0) {
err = sha_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
IRQF_SHARED, "atmel-sha", sha_dd);
if (err) {
dev_err(dev, "unable to request sha irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2795,41 +2606,30 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(sha_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(sha_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
- atmel_sha_hw_version_init(sha_dd);
+ err = atmel_sha_hw_version_init(sha_dd);
+ if (err)
+ goto err_iclk_unprepare;
atmel_sha_get_cap(sha_dd);
if (sha_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_sha_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto iclk_unprepare;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto iclk_unprepare;
- }
- err = atmel_sha_dma_init(sha_dd, pdata);
+ err = atmel_sha_dma_init(sha_dd);
if (err)
- goto err_sha_dma;
+ goto err_iclk_unprepare;
dev_info(dev, "using %s for DMA transfers\n",
dma_chan_name(sha_dd->dma_lch_in.chan));
@@ -2855,14 +2655,11 @@ err_algs:
spin_unlock(&atmel_sha.lock);
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
-err_sha_dma:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(sha_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
-sha_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 0c1f79b30fc1..ed40dbb98c6b 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -30,31 +31,32 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
-#include <crypto/hash.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
+#define ATMEL_TDES_PRIORITY 300
+
/* TDES flags */
-#define TDES_FLAGS_MODE_MASK 0x00ff
-#define TDES_FLAGS_ENCRYPT BIT(0)
-#define TDES_FLAGS_CBC BIT(1)
-#define TDES_FLAGS_CFB BIT(2)
-#define TDES_FLAGS_CFB8 BIT(3)
-#define TDES_FLAGS_CFB16 BIT(4)
-#define TDES_FLAGS_CFB32 BIT(5)
-#define TDES_FLAGS_CFB64 BIT(6)
-#define TDES_FLAGS_OFB BIT(7)
-
-#define TDES_FLAGS_INIT BIT(16)
-#define TDES_FLAGS_FAST BIT(17)
-#define TDES_FLAGS_BUSY BIT(18)
-#define TDES_FLAGS_DMA BIT(19)
+/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
+#define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
+#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
+#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
+#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
+#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
+#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
+#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
+#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
+#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
+
+#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
+
+#define TDES_FLAGS_INIT BIT(3)
+#define TDES_FLAGS_FAST BIT(4)
+#define TDES_FLAGS_BUSY BIT(5)
+#define TDES_FLAGS_DMA BIT(6)
#define ATMEL_TDES_QUEUE_LENGTH 50
@@ -100,7 +102,6 @@ struct atmel_tdes_dev {
int irq;
unsigned long flags;
- int err;
spinlock_t lock;
struct crypto_queue queue;
@@ -189,7 +190,7 @@ static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
}
static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
- u32 *value, int count)
+ const u32 *value, int count)
{
for (; count--; value++, offset += 4)
atmel_tdes_write(dd, offset, *value);
@@ -226,7 +227,6 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
if (!(dd->flags & TDES_FLAGS_INIT)) {
atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
dd->flags |= TDES_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -237,9 +237,13 @@ static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
}
-static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
{
- atmel_tdes_hw_init(dd);
+ int err;
+
+ err = atmel_tdes_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_tdes_get_version(dd);
@@ -247,6 +251,8 @@ static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
+
+ return 0;
}
static void atmel_tdes_dma_callback(void *data)
@@ -260,7 +266,7 @@ static void atmel_tdes_dma_callback(void *data)
static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
{
int err;
- u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
+ u32 valmr = TDES_MR_SMOD_PDC;
err = atmel_tdes_hw_init(dd);
@@ -282,36 +288,15 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
valmr |= TDES_MR_TDESMOD_DES;
}
- if (dd->flags & TDES_FLAGS_CBC) {
- valmr |= TDES_MR_OPMOD_CBC;
- } else if (dd->flags & TDES_FLAGS_CFB) {
- valmr |= TDES_MR_OPMOD_CFB;
-
- if (dd->flags & TDES_FLAGS_CFB8)
- valmr |= TDES_MR_CFBS_8b;
- else if (dd->flags & TDES_FLAGS_CFB16)
- valmr |= TDES_MR_CFBS_16b;
- else if (dd->flags & TDES_FLAGS_CFB32)
- valmr |= TDES_MR_CFBS_32b;
- else if (dd->flags & TDES_FLAGS_CFB64)
- valmr |= TDES_MR_CFBS_64b;
- } else if (dd->flags & TDES_FLAGS_OFB) {
- valmr |= TDES_MR_OPMOD_OFB;
- }
-
- if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
- valmr |= TDES_MR_CYPHER_ENC;
+ valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
- atmel_tdes_write(dd, TDES_CR, valcr);
atmel_tdes_write(dd, TDES_MR, valmr);
atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
dd->ctx->keylen >> 2);
- if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
- (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
+ if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
- }
return 0;
}
@@ -397,11 +382,11 @@ static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
free_page((unsigned long)dd->buf_in);
}
-static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
int len32;
dd->dma_size = length;
@@ -411,12 +396,19 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
len32 = DIV_ROUND_UP(length, sizeof(u8));
- else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
+ break;
+
+ case TDES_FLAGS_CFB16:
len32 = DIV_ROUND_UP(length, sizeof(u16));
- else
+ break;
+
+ default:
len32 = DIV_ROUND_UP(length, sizeof(u32));
+ break;
+ }
atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
@@ -433,13 +425,14 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
return 0;
}
-static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
+ enum dma_slave_buswidth addr_width;
dd->dma_size = length;
@@ -448,23 +441,23 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if (dd->flags & TDES_FLAGS_CFB8) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else if (dd->flags & TDES_FLAGS_CFB16) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- } else {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
+ addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+
+ case TDES_FLAGS_CFB16:
+ addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+
+ default:
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
}
+ dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
+ dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
+
dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
@@ -504,8 +497,6 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(
- crypto_skcipher_reqtfm(dd->req));
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
@@ -561,9 +552,9 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
dd->total -= count;
if (dd->caps.has_dma)
- err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
else
- err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
@@ -600,12 +591,14 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
struct skcipher_request *req = dd->req;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
clk_disable_unprepare(dd->iclk);
dd->flags &= ~TDES_FLAGS_BUSY;
- atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
+ atmel_tdes_set_iv_as_last_ciphertext_block(dd);
req->base.complete(&req->base, err);
}
@@ -699,35 +692,44 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
- if (mode & TDES_FLAGS_CFB8) {
+ switch (mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB16) {
+ break;
+
+ case TDES_FLAGS_CFB16:
if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB32) {
+ break;
+
+ case TDES_FLAGS_CFB32:
if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
- } else {
+ break;
+
+ default:
if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
ctx->block_size = DES_BLOCK_SIZE;
+ break;
}
rctx->mode = mode;
- if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
+ !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -739,33 +741,17 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
return atmel_tdes_handle_queue(ctx->dd, req);
}
-static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan)
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ ret = PTR_ERR(dd->dma_lch_in.chan);
goto err_dma_in;
+ }
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
TDES_IDATA1R;
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -776,12 +762,12 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.device_fc = false;
- dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
- if (!dd->dma_lch_out.chan)
+ dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_out.chan)) {
+ ret = PTR_ERR(dd->dma_lch_out.chan);
goto err_dma_out;
+ }
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
TDES_ODATA1R;
dd->dma_lch_out.dma_conf.src_maxburst = 1;
@@ -797,8 +783,8 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -841,17 +827,17 @@ static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, 0);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
}
static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
@@ -860,50 +846,47 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
}
static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
}
static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
}
static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
}
static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
}
static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
@@ -925,18 +908,23 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
+static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
+{
+ alg->base.cra_priority = ATMEL_TDES_PRIORITY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = atmel_tdes_init_tfm;
+}
+
static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "atmel-ecb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = atmel_des_setkey,
@@ -946,14 +934,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "atmel-cbc-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -964,14 +947,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb(des)",
.base.cra_driver_name = "atmel-cfb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -982,14 +960,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb8(des)",
.base.cra_driver_name = "atmel-cfb8-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1000,14 +973,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb16(des)",
.base.cra_driver_name = "atmel-cfb16-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x1,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1018,14 +986,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb32(des)",
.base.cra_driver_name = "atmel-cfb32-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x3,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1036,14 +999,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des)",
.base.cra_driver_name = "atmel-ofb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1054,14 +1012,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "atmel-ecb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1071,14 +1024,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "atmel-cbc-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1089,14 +1037,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des3_ede)",
.base.cra_driver_name = "atmel-ofb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1123,8 +1066,6 @@ static void atmel_tdes_done_task(unsigned long data)
else
err = atmel_tdes_crypt_dma_stop(dd);
- err = dd->err ? : err;
-
if (dd->total && !err) {
if (dd->flags & TDES_FLAGS_FAST) {
dd->in_sg = sg_next(dd->in_sg);
@@ -1173,6 +1114,8 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
+ atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
+
err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
@@ -1214,49 +1157,18 @@ static const struct of_device_id atmel_tdes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
-
-static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *tdes_res;
int err;
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
- if (tdes_dd == NULL) {
- err = -ENOMEM;
- goto tdes_dd_err;
- }
+ if (!tdes_dd)
+ return -ENOMEM;
tdes_dd->dev = dev;
@@ -1277,7 +1189,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (!tdes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->phys_base = tdes_res->start;
@@ -1285,14 +1197,14 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->irq = platform_get_irq(pdev, 0);
if (tdes_dd->irq < 0) {
err = tdes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
IRQF_SHARED, "atmel-tdes", tdes_dd);
if (err) {
dev_err(dev, "unable to request tdes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -1300,41 +1212,30 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (IS_ERR(tdes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
- atmel_tdes_hw_version_init(tdes_dd);
+ err = atmel_tdes_hw_version_init(tdes_dd);
+ if (err)
+ goto err_tasklet_kill;
atmel_tdes_get_cap(tdes_dd);
err = atmel_tdes_buff_init(tdes_dd);
if (err)
- goto err_tdes_buff;
+ goto err_tasklet_kill;
if (tdes_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_tdes_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto err_pdata;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto err_pdata;
- }
- err = atmel_tdes_dma_init(tdes_dd, pdata);
+ err = atmel_tdes_dma_init(tdes_dd);
if (err)
- goto err_tdes_dma;
+ goto err_buff_cleanup;
dev_info(dev, "using %s, %s for DMA transfers\n",
dma_chan_name(tdes_dd->dma_lch_in.chan),
@@ -1359,15 +1260,11 @@ err_algs:
spin_unlock(&atmel_tdes.lock);
if (tdes_dd->caps.has_dma)
atmel_tdes_dma_cleanup(tdes_dd);
-err_tdes_dma:
-err_pdata:
+err_buff_cleanup:
atmel_tdes_buff_cleanup(tdes_dd);
-err_tdes_buff:
-res_err:
+err_tasklet_kill:
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
-tdes_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 4b20606983a4..fcf1effc7661 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1249,10 +1249,8 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
{
struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
- if (len != 16 && len != 24 && len != 32) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -1;
- }
+ if (len != 16 && len != 24 && len != 32)
+ return -EINVAL;
ctx->key_length = len;
@@ -1606,8 +1604,6 @@ artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 32:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1634,8 +1630,6 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 64:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1564a6f8c9cb..c8b9408541a9 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1846,7 +1846,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -2894,13 +2893,8 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2916,7 +2910,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2967,13 +2960,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key,
keylen + ctx->salt_len);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2992,7 +2980,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 87053e46c788..fac5b2e26610 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -130,13 +130,13 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
scatterlist crypto API to the SEC4 via job ring.
config CRYPTO_DEV_FSL_CAAM_PKC_API
- bool "Register public key cryptography implementations with Crypto API"
- default y
- select CRYPTO_RSA
- help
- Selecting this will allow SEC Public key support for RSA.
- Supported cryptographic primitives: encryption, decryption,
- signature and verification.
+ bool "Register public key cryptography implementations with Crypto API"
+ default y
+ select CRYPTO_RSA
+ help
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
config CRYPTO_DEV_FSL_CAAM_RNG_API
bool "Register caam device for hwrng API"
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2912006b946b..ef1a65f4fc92 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -548,10 +548,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -619,7 +617,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -649,10 +646,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -672,10 +667,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -700,10 +693,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -762,11 +753,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -786,11 +774,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -809,11 +794,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -846,7 +828,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 8e3449670d2f..4a29e0ef9d63 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -268,7 +268,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return ret;
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -356,10 +355,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -462,10 +459,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -570,10 +565,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -644,7 +637,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -653,14 +646,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
@@ -669,11 +659,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -693,11 +680,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -716,11 +700,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -748,7 +729,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(jrdev, "key size mismatch\n");
- goto badkey;
+ return -EINVAL;
}
ctx->cdata.keylen = keylen;
@@ -765,7 +746,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -774,14 +755,11 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/*
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 3443f6d6dd83..28669cbecf77 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -313,7 +313,6 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -326,11 +325,11 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
- goto badkey;
+ goto out;
err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
aead_setkey(aead, key, keylen);
@@ -338,10 +337,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -634,10 +629,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -725,10 +718,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -822,10 +813,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -923,10 +912,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -992,11 +979,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1016,11 +1000,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1039,11 +1020,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1051,11 +1029,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
- if (keylen != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE)
return -EINVAL;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1084,7 +1059,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(dev, "key size mismatch\n");
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2481,7 +2455,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
@@ -2998,15 +2972,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -3018,42 +2990,17 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_qm_sg(struct device *dev,
struct dpaa2_sg_entry *qm_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(dev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, state->buf_dma)) {
dev_err(dev, "unable to map buf\n");
@@ -3304,7 +3251,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
return ret;
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -3321,7 +3267,7 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -3383,9 +3329,17 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3440,9 +3394,17 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3464,16 +3426,14 @@ static int ahash_update_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state), last_buflen;
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
@@ -3524,10 +3484,6 @@ static int ahash_update_ctx(struct ahash_request *req)
if (mapped_nents) {
sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
} else {
dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
true);
@@ -3566,14 +3522,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -3592,7 +3545,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -3663,7 +3616,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, qm_sg_src_index;
int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -3852,8 +3805,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = -ENOMEM;
@@ -3925,10 +3878,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int qm_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -3977,11 +3929,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
-
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -4029,14 +3976,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4055,7 +3999,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -4151,8 +4095,9 @@ static int ahash_update_first(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -4220,10 +4165,6 @@ static int ahash_update_first(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
@@ -4257,14 +4198,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4288,10 +4229,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -4321,16 +4261,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -4348,9 +4280,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 65399cb2a770..8d9143407fc5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,15 +107,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -127,31 +125,6 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
static inline bool is_cmac_aes(u32 algtype)
{
return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
@@ -183,12 +156,12 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map buf\n");
@@ -500,7 +473,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
return ahash_set_sh_desc(ahash);
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -510,10 +482,8 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
- if (keylen != AES_KEYSIZE_128) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
@@ -533,10 +503,8 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
/* key is immediate data for all cmac shared descriptors */
ctx->adata.key_virt = key;
@@ -578,7 +546,7 @@ static inline void ahash_unmap(struct device *dev,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -643,9 +611,17 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -703,9 +679,17 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -786,18 +770,16 @@ static int ahash_update_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
@@ -868,10 +850,6 @@ static int ahash_update_ctx(struct ahash_request *req)
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -901,14 +879,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -925,7 +900,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -991,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
@@ -1148,8 +1123,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -1207,11 +1182,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -1278,12 +1252,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
- if (*next_buflen) {
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- }
-
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1317,14 +1285,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1342,7 +1307,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1428,8 +1393,9 @@ static int ahash_update_first(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int blocksize = crypto_ahash_blocksize(ahash);
u32 *desc;
@@ -1491,10 +1457,6 @@ static int ahash_update_first(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
desc = edesc->hw_desc;
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
@@ -1517,14 +1479,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1548,10 +1510,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -1581,16 +1542,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -1608,9 +1561,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d7c3c3805693..7139366da016 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -99,10 +99,13 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (ctrlpriv->virt_en == 1 ||
/*
- * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
+ * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
* and the following steps should be performed regardless
*/
- of_machine_is_compatible("fsl,imx8mq")) {
+ of_machine_is_compatible("fsl,imx8mq") ||
+ of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp")) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -508,7 +511,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
- { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
};
@@ -671,11 +674,9 @@ static int caam_probe(struct platform_device *pdev)
of_node_put(np);
if (!ctrlpriv->mc_en)
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0));
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST);
handle_imx6_err005766(&ctrl->mcr);
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 1ad66677d88e..1be1adffff1d 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -295,8 +295,6 @@ static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 6f80cc3b5c84..dce5423a5883 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -40,10 +40,8 @@ static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
union fc_ctx_flags flags;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
/* fill crypto context */
fctx = nctx->u.fctx;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 97af4d50d003..18088b0a2257 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -200,10 +200,8 @@ static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int aes_keylen;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
@@ -351,10 +349,8 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
keylen /= 2;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
fctx = nctx->u.fctx;
/* copy KEY2 */
@@ -382,10 +378,8 @@ static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 6b86f1e6d634..db362fe472ea 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -8,7 +8,9 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-dmaengine.o
ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
-ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
+ sev-dev.o \
+ tee-dev.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 32f19f402073..5eba7ee49e81 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -276,7 +276,6 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff50ee80d223..9e8f07c1afac 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -42,7 +42,6 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 33328a153225..51e12fbd1159 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -51,7 +51,6 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 453b9797f93f..474e6f1a6a84 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,10 +293,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
key_len = digest_size;
} else {
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 0186b3df4c87..0d5576f6ad21 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -586,6 +586,7 @@ const struct ccp_vdata ccpv3_platform = {
.setup = NULL,
.perform = &ccp3_actions,
.offset = 0,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv3 = {
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 7ca2d3408e7a..e95e7aa5dbf1 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -2,57 +2,20 @@
/*
* AMD Platform Security Processor (PSP) interface
*
- * Copyright (C) 2016,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/spinlock_types.h>
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/hw_random.h>
-#include <linux/ccp.h>
-#include <linux/firmware.h>
-
-#include <asm/smp.h>
+#include <linux/irqreturn.h>
#include "sp-dev.h"
#include "psp-dev.h"
+#include "sev-dev.h"
+#include "tee-dev.h"
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
-#define SEV_FW_NAME_SIZE 64
-
-static DEFINE_MUTEX(sev_cmd_mutex);
-static struct sev_misc_dev *misc_dev;
-static struct psp_device *psp_master;
-
-static int psp_cmd_timeout = 100;
-module_param(psp_cmd_timeout, int, 0644);
-MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
-
-static int psp_probe_timeout = 5;
-module_param(psp_probe_timeout, int, 0644);
-MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
-
-static bool psp_dead;
-static int psp_timeout;
-
-static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
-{
- if (psp_master->api_major > maj)
- return true;
- if (psp_master->api_major == maj && psp_master->api_minor >= min)
- return true;
- return false;
-}
+struct psp_device *psp_master;
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
@@ -75,902 +38,95 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
{
struct psp_device *psp = data;
unsigned int status;
- int reg;
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
- /* Check if it is command completion: */
- if (!(status & PSP_CMD_COMPLETE))
- goto done;
+ /* invoke subdevice interrupt handlers */
+ if (status) {
+ if (psp->sev_irq_handler)
+ psp->sev_irq_handler(irq, psp->sev_irq_data, status);
- /* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
- if (reg & PSP_CMDRESP_RESP) {
- psp->sev_int_rcvd = 1;
- wake_up(&psp->sev_int_queue);
+ if (psp->tee_irq_handler)
+ psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
-done:
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
-static int sev_wait_cmd_ioc(struct psp_device *psp,
- unsigned int *reg, unsigned int timeout)
-{
- int ret;
-
- ret = wait_event_timeout(psp->sev_int_queue,
- psp->sev_int_rcvd, timeout * HZ);
- if (!ret)
- return -ETIMEDOUT;
-
- *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
-
- return 0;
-}
-
-static int sev_cmd_buffer_len(int cmd)
-{
- switch (cmd) {
- case SEV_CMD_INIT: return sizeof(struct sev_data_init);
- case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
- case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
- case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
- case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
- case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
- case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
- case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
- case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
- case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
- case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
- case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
- case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
- case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
- case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
- case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
- case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
- case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
- case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
- case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
- case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
- case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
- case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
- case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
- case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
- default: return 0;
- }
-
- return 0;
-}
-
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
-{
- struct psp_device *psp = psp_master;
- unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp_dead)
- return -EBUSY;
-
- /* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
-
- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
- cmd, phys_msb, phys_lsb, psp_timeout);
-
- print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
- iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
-
- psp->sev_int_rcvd = 0;
-
- reg = cmd;
- reg <<= PSP_CMDRESP_CMD_SHIFT;
- reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
-
- /* wait for command completion */
- ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
- if (ret) {
- if (psp_ret)
- *psp_ret = 0;
-
- dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
- psp_dead = true;
-
- return ret;
- }
-
- psp_timeout = psp_cmd_timeout;
-
- if (psp_ret)
- *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
-
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
- cmd, reg & PSP_CMDRESP_ERR_MASK);
- ret = -EIO;
- }
-
- print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- return ret;
-}
-
-static int sev_do_cmd(int cmd, void *data, int *psp_ret)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_do_cmd_locked(cmd, data, psp_ret);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int __sev_platform_init_locked(int *error)
-{
- struct psp_device *psp = psp_master;
- int rc = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp->sev_state == SEV_STATE_INIT)
- return 0;
-
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
- if (rc)
- return rc;
-
- psp->sev_state = SEV_STATE_INIT;
-
- /* Prepare for first SEV guest launch after INIT */
- wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
- return rc;
-
- dev_dbg(psp->dev, "SEV firmware initialized\n");
-
- return rc;
-}
-
-int sev_platform_init(int *error)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_init_locked(error);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(sev_platform_init);
-
-static int __sev_platform_shutdown_locked(int *error)
-{
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
- return ret;
-
- psp_master->sev_state = SEV_STATE_UNINIT;
- dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
-
- return ret;
-}
-
-static int sev_platform_shutdown(int *error)
+static unsigned int psp_get_capability(struct psp_device *psp)
{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_shutdown_locked(NULL);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int sev_get_platform_state(int *state, int *error)
-{
- int rc;
-
- rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
- &psp_master->status_cmd_buf, error);
- if (rc)
- return rc;
-
- *state = psp_master->status_cmd_buf.state;
- return rc;
-}
-
-static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
-{
- int state, rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
/*
- * The SEV spec requires that FACTORY_RESET must be issued in
- * UNINIT state. Before we go further lets check if any guest is
- * active.
- *
- * If FW is in WORKING state then deny the request otherwise issue
- * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
- *
- */
- rc = sev_get_platform_state(&state, &argp->error);
- if (rc)
- return rc;
-
- if (state == SEV_STATE_WORKING)
- return -EBUSY;
-
- if (state == SEV_STATE_INIT) {
- rc = __sev_platform_shutdown_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_status *data = &psp_master->status_cmd_buf;
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
- if (ret)
- return ret;
-
- if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
- ret = -EFAULT;
-
- return ret;
-}
-
-static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
-{
- int rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_pek_csr input;
- struct sev_data_pek_csr *data;
- void *blob = NULL;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* userspace wants to query CSR length */
- if (!input.address || !input.length)
- goto cmd;
-
- /* allocate a physically contiguous buffer to store the CSR blob */
- if (!access_ok(input.address, input.length) ||
- input.length > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
-
- blob = kmalloc(input.length, GFP_KERNEL);
- if (!blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->address = __psp_pa(blob);
- data->len = input.length;
-
-cmd:
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_blob;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
-
- /* If we query the CSR length, FW responded with expected data. */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_blob;
- }
-
- if (blob) {
- if (copy_to_user((void __user *)input.address, blob, input.length))
- ret = -EFAULT;
- }
-
-e_free_blob:
- kfree(blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-void *psp_copy_user_blob(u64 __user uaddr, u32 len)
-{
- if (!uaddr || !len)
- return ERR_PTR(-EINVAL);
-
- /* verify that blob length does not exceed our limit */
- if (len > SEV_FW_BLOB_MAX_SIZE)
- return ERR_PTR(-EINVAL);
-
- return memdup_user((void __user *)(uintptr_t)uaddr, len);
-}
-EXPORT_SYMBOL_GPL(psp_copy_user_blob);
-
-static int sev_get_api_version(void)
-{
- struct sev_user_data_status *status;
- int error = 0, ret;
-
- status = &psp_master->status_cmd_buf;
- ret = sev_platform_status(status, &error);
- if (ret) {
- dev_err(psp_master->dev,
- "SEV: failed to get status. Error: %#x\n", error);
- return 1;
- }
-
- psp_master->api_major = status->api_major;
- psp_master->api_minor = status->api_minor;
- psp_master->build = status->build;
- psp_master->sev_state = status->state;
-
- return 0;
-}
-
-static int sev_get_firmware(struct device *dev,
- const struct firmware **firmware)
-{
- char fw_name_specific[SEV_FW_NAME_SIZE];
- char fw_name_subset[SEV_FW_NAME_SIZE];
-
- snprintf(fw_name_specific, sizeof(fw_name_specific),
- "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
-
- snprintf(fw_name_subset, sizeof(fw_name_subset),
- "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
- boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
-
- /* Check for SEV FW for a particular model.
- * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
- *
- * or
- *
- * Check for SEV FW common to a subset of models.
- * Ex. amd_sev_fam17h_model0xh.sbin for
- * Family 17h Model 00h -- Family 17h Model 0Fh
- *
- * or
- *
- * Fall-back to using generic name: sev.fw
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and
+ * fail the PSP initialization (but not the load, as the CCP
+ * could get properly initialized).
*/
- if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
- (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
- (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ if (val == 0xffffffff) {
+ dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
return 0;
-
- return -ENOENT;
-}
-
-/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
-static int sev_update_firmware(struct device *dev)
-{
- struct sev_data_download_firmware *data;
- const struct firmware *firmware;
- int ret, error, order;
- struct page *p;
- u64 data_size;
-
- if (sev_get_firmware(dev, &firmware) == -ENOENT) {
- dev_dbg(dev, "No SEV firmware file present\n");
- return -1;
- }
-
- /*
- * SEV FW expects the physical address given to it to be 32
- * byte aligned. Memory allocated has structure placed at the
- * beginning followed by the firmware being passed to the SEV
- * FW. Allocate enough memory for data structure + alignment
- * padding + SEV FW.
- */
- data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
-
- order = get_order(firmware->size + data_size);
- p = alloc_pages(GFP_KERNEL, order);
- if (!p) {
- ret = -1;
- goto fw_err;
}
- /*
- * Copy firmware data to a kernel allocated contiguous
- * memory region.
- */
- data = page_address(p);
- memcpy(page_address(p) + data_size, firmware->data, firmware->size);
-
- data->address = __psp_pa(page_address(p) + data_size);
- data->len = firmware->size;
-
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
- if (ret)
- dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
- else
- dev_info(dev, "SEV firmware update successful\n");
-
- __free_pages(p, order);
-
-fw_err:
- release_firmware(firmware);
-
- return ret;
+ return val;
}
-static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+static int psp_check_sev_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_pek_cert_import input;
- struct sev_data_pek_cert_import *data;
- void *pek_blob, *oca_blob;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* copy PEK certificate blobs from userspace */
- pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
- if (IS_ERR(pek_blob)) {
- ret = PTR_ERR(pek_blob);
- goto e_free;
- }
-
- data->pek_cert_address = __psp_pa(pek_blob);
- data->pek_cert_len = input.pek_cert_len;
-
- /* copy PEK certificate blobs from userspace */
- oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
- if (IS_ERR(oca_blob)) {
- ret = PTR_ERR(oca_blob);
- goto e_free_pek;
- }
-
- data->oca_cert_address = __psp_pa(oca_blob);
- data->oca_cert_len = input.oca_cert_len;
-
- /* If platform is not in INIT state then transition it to INIT */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_oca;
+ /* Check if device supports SEV feature */
+ if (!(capability & 1)) {
+ dev_dbg(psp->dev, "psp does not support SEV\n");
+ return -ENODEV;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
-
-e_free_oca:
- kfree(oca_blob);
-e_free_pek:
- kfree(pek_blob);
-e_free:
- kfree(data);
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+static int psp_check_tee_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_get_id2 input;
- struct sev_data_get_id *data;
- void *id_blob = NULL;
- int ret;
-
- /* SEV GET_ID is available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- /* Check if we have write access to the userspace buffer */
- if (input.address &&
- input.length &&
- !access_ok(input.address, input.length))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (input.address && input.length) {
- id_blob = kmalloc(input.length, GFP_KERNEL);
- if (!id_blob) {
- kfree(data);
- return -ENOMEM;
- }
-
- data->address = __psp_pa(id_blob);
- data->len = input.length;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
-
- /*
- * Firmware will return the length of the ID value (either the minimum
- * required length or the actual length written), return it to the user.
- */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free;
- }
-
- if (id_blob) {
- if (copy_to_user((void __user *)input.address,
- id_blob, data->len)) {
- ret = -EFAULT;
- goto e_free;
- }
+ /* Check if device supports TEE feature */
+ if (!(capability & 2)) {
+ dev_dbg(psp->dev, "psp does not support TEE\n");
+ return -ENODEV;
}
-e_free:
- kfree(id_blob);
- kfree(data);
-
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+static int psp_check_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_data_get_id *data;
- u64 data_size, user_size;
- void *id_blob, *mem;
- int ret;
+ int sev_support = psp_check_sev_support(psp, capability);
+ int tee_support = psp_check_tee_support(psp, capability);
- /* SEV GET_ID available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- /* SEV FW expects the buffer it fills with the ID to be
- * 8-byte aligned. Memory allocated should be enough to
- * hold data structure + alignment padding + memory
- * where SEV FW writes the ID.
- */
- data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
- user_size = sizeof(struct sev_user_data_get_id);
-
- mem = kzalloc(data_size + user_size, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- data = mem;
- id_blob = mem + data_size;
-
- data->address = __psp_pa(id_blob);
- data->len = user_size;
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
- if (!ret) {
- if (copy_to_user((void __user *)argp->data, id_blob, data->len))
- ret = -EFAULT;
- }
-
- kfree(mem);
+ /* Return error if device neither supports SEV nor TEE */
+ if (sev_support && tee_support)
+ return -ENODEV;
- return ret;
+ return 0;
}
-static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+static int psp_init(struct psp_device *psp, unsigned int capability)
{
- struct sev_user_data_pdh_cert_export input;
- void *pdh_blob = NULL, *cert_blob = NULL;
- struct sev_data_pdh_cert_export *data;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
+ if (!psp_check_sev_support(psp, capability)) {
+ ret = sev_dev_init(psp);
if (ret)
return ret;
}
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* Userspace wants to query the certificate length. */
- if (!input.pdh_cert_address ||
- !input.pdh_cert_len ||
- !input.cert_chain_address)
- goto cmd;
-
- /* Allocate a physically contiguous buffer to store the PDH blob. */
- if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- /* Allocate a physically contiguous buffer to store the cert chain blob. */
- if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.cert_chain_address, input.cert_chain_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
- if (!pdh_blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->pdh_cert_address = __psp_pa(pdh_blob);
- data->pdh_cert_len = input.pdh_cert_len;
-
- cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
- if (!cert_blob) {
- ret = -ENOMEM;
- goto e_free_pdh;
- }
-
- data->cert_chain_address = __psp_pa(cert_blob);
- data->cert_chain_len = input.cert_chain_len;
-
-cmd:
- ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
-
- /* If we query the length, FW responded with expected data. */
- input.cert_chain_len = data->cert_chain_len;
- input.pdh_cert_len = data->pdh_cert_len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_cert;
- }
-
- if (pdh_blob) {
- if (copy_to_user((void __user *)input.pdh_cert_address,
- pdh_blob, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free_cert;
- }
- }
-
- if (cert_blob) {
- if (copy_to_user((void __user *)input.cert_chain_address,
- cert_blob, input.cert_chain_len))
- ret = -EFAULT;
- }
-
-e_free_cert:
- kfree(cert_blob);
-e_free_pdh:
- kfree(pdh_blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct sev_issue_cmd input;
- int ret = -EFAULT;
-
- if (!psp_master)
- return -ENODEV;
-
- if (ioctl != SEV_ISSUE_CMD)
- return -EINVAL;
-
- if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
- return -EFAULT;
-
- if (input.cmd > SEV_MAX)
- return -EINVAL;
-
- mutex_lock(&sev_cmd_mutex);
-
- switch (input.cmd) {
-
- case SEV_FACTORY_RESET:
- ret = sev_ioctl_do_reset(&input);
- break;
- case SEV_PLATFORM_STATUS:
- ret = sev_ioctl_do_platform_status(&input);
- break;
- case SEV_PEK_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
- break;
- case SEV_PDH_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
- break;
- case SEV_PEK_CSR:
- ret = sev_ioctl_do_pek_csr(&input);
- break;
- case SEV_PEK_CERT_IMPORT:
- ret = sev_ioctl_do_pek_import(&input);
- break;
- case SEV_PDH_CERT_EXPORT:
- ret = sev_ioctl_do_pdh_export(&input);
- break;
- case SEV_GET_ID:
- pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
- ret = sev_ioctl_do_get_id(&input);
- break;
- case SEV_GET_ID2:
- ret = sev_ioctl_do_get_id2(&input);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
- ret = -EFAULT;
-out:
- mutex_unlock(&sev_cmd_mutex);
-
- return ret;
-}
-
-static const struct file_operations sev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sev_ioctl,
-};
-
-int sev_platform_status(struct sev_user_data_status *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_platform_status);
-
-int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_deactivate);
-
-int sev_guest_activate(struct sev_data_activate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_activate);
-
-int sev_guest_decommission(struct sev_data_decommission *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_decommission);
-
-int sev_guest_df_flush(int *error)
-{
- return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_df_flush);
-
-static void sev_exit(struct kref *ref)
-{
- struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
-
- misc_deregister(&misc_dev->misc);
-}
-
-static int sev_misc_init(struct psp_device *psp)
-{
- struct device *dev = psp->dev;
- int ret;
-
- /*
- * SEV feature support can be detected on multiple devices but the SEV
- * FW commands must be issued on the master. During probe, we do not
- * know the master hence we create /dev/sev on the first device probe.
- * sev_do_cmd() finds the right master device to which to issue the
- * command to the firmware.
- */
- if (!misc_dev) {
- struct miscdevice *misc;
-
- misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
- if (!misc_dev)
- return -ENOMEM;
-
- misc = &misc_dev->misc;
- misc->minor = MISC_DYNAMIC_MINOR;
- misc->name = DEVICE_NAME;
- misc->fops = &sev_fops;
-
- ret = misc_register(misc);
+ if (!psp_check_tee_support(psp, capability)) {
+ ret = tee_dev_init(psp);
if (ret)
return ret;
-
- kref_init(&misc_dev->refcount);
- } else {
- kref_get(&misc_dev->refcount);
- }
-
- init_waitqueue_head(&psp->sev_int_queue);
- psp->sev_misc = misc_dev;
- dev_dbg(dev, "registered SEV device\n");
-
- return 0;
-}
-
-static int psp_check_sev_support(struct psp_device *psp)
-{
- unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
-
- /*
- * Check for a access to the registers. If this read returns
- * 0xffffffff, it's likely that the system is running a broken
- * BIOS which disallows access to the device. Stop here and
- * fail the PSP initialization (but not the load, as the CCP
- * could get properly initialized).
- */
- if (val == 0xffffffff) {
- dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
- return -ENODEV;
- }
-
- if (!(val & 1)) {
- /* Device does not support the SEV feature */
- dev_dbg(psp->dev, "psp does not support SEV\n");
- return -ENODEV;
}
return 0;
@@ -980,6 +136,7 @@ int psp_dev_init(struct sp_device *sp)
{
struct device *dev = sp->dev;
struct psp_device *psp;
+ unsigned int capability;
int ret;
ret = -ENOMEM;
@@ -998,7 +155,11 @@ int psp_dev_init(struct sp_device *sp)
psp->io_regs = sp->io_map;
- ret = psp_check_sev_support(psp);
+ capability = psp_get_capability(psp);
+ if (!capability)
+ goto e_disable;
+
+ ret = psp_check_support(psp, capability);
if (ret)
goto e_disable;
@@ -1013,7 +174,7 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- ret = sev_misc_init(psp);
+ ret = psp_init(psp, capability);
if (ret)
goto e_irq;
@@ -1049,83 +210,52 @@ void psp_dev_destroy(struct sp_device *sp)
if (!psp)
return;
- if (psp->sev_misc)
- kref_put(&misc_dev->refcount, sev_exit);
+ sev_dev_destroy(psp);
+
+ tee_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
}
-int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
- void *data, int *error)
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
{
- if (!filep || filep->f_op != &sev_fops)
- return -EBADF;
-
- return sev_do_cmd(cmd, data, error);
+ psp->sev_irq_data = data;
+ psp->sev_irq_handler = handler;
}
-EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
-void psp_pci_init(void)
+void psp_clear_sev_irq_handler(struct psp_device *psp)
{
- struct sp_device *sp;
- int error, rc;
-
- sp = sp_get_psp_master_device();
- if (!sp)
- return;
-
- psp_master = sp->psp_data;
-
- psp_timeout = psp_probe_timeout;
+ psp_set_sev_irq_handler(psp, NULL, NULL);
+}
- if (sev_get_api_version())
- goto err;
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
+{
+ psp->tee_irq_data = data;
+ psp->tee_irq_handler = handler;
+}
- /*
- * If platform is not in UNINIT state then firmware upgrade and/or
- * platform INIT command will fail. These command require UNINIT state.
- *
- * In a normal boot we should never run into case where the firmware
- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
- * may not go through a typical shutdown sequence and may leave the
- * firmware in INIT or WORKING state.
- */
+void psp_clear_tee_irq_handler(struct psp_device *psp)
+{
+ psp_set_tee_irq_handler(psp, NULL, NULL);
+}
- if (psp_master->sev_state != SEV_STATE_UNINIT) {
- sev_platform_shutdown(NULL);
- psp_master->sev_state = SEV_STATE_UNINIT;
- }
+struct psp_device *psp_get_master_device(void)
+{
+ struct sp_device *sp = sp_get_psp_master_device();
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(psp_master->dev) == 0)
- sev_get_api_version();
+ return sp ? sp->psp_data : NULL;
+}
- /* Initialize the platform */
- rc = sev_platform_init(&error);
- if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
- /*
- * INIT command returned an integrity check failure
- * status code, meaning that firmware load and
- * validation of SEV related persistent data has
- * failed and persistent state has been erased.
- * Retrying INIT command here should succeed.
- */
- dev_dbg(sp->dev, "SEV: retrying INIT command");
- rc = sev_platform_init(&error);
- }
+void psp_pci_init(void)
+{
+ psp_master = psp_get_master_device();
- if (rc) {
- dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+ if (!psp_master)
return;
- }
-
- dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
- psp_master->api_minor, psp_master->build);
-
- return;
-err:
- psp_master = NULL;
+ sev_pci_init();
}
void psp_pci_exit(void)
@@ -1133,5 +263,5 @@ void psp_pci_exit(void)
if (!psp_master)
return;
- sev_platform_shutdown(NULL);
+ sev_pci_exit();
}
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index dd516b35ba86..ef38e4135d81 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Platform Security Processor (PSP) interface driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
@@ -11,35 +11,20 @@
#define __PSP_DEV_H__
#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/dmapool.h>
-#include <linux/hw_random.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/interrupt.h>
-#include <linux/irqreturn.h>
-#include <linux/dmaengine.h>
-#include <linux/psp-sev.h>
-#include <linux/miscdevice.h>
-#include <linux/capability.h>
#include "sp-dev.h"
-#define PSP_CMD_COMPLETE BIT(1)
-
-#define PSP_CMDRESP_CMD_SHIFT 16
-#define PSP_CMDRESP_IOC BIT(0)
#define PSP_CMDRESP_RESP BIT(31)
#define PSP_CMDRESP_ERR_MASK 0xffff
#define MAX_PSP_NAME_LEN 16
-struct sev_misc_dev {
- struct kref refcount;
- struct miscdevice misc;
-};
+extern struct psp_device *psp_master;
+
+typedef void (*psp_irq_handler_t)(int, void *, unsigned int);
struct psp_device {
struct list_head entry;
@@ -52,16 +37,24 @@ struct psp_device {
void __iomem *io_regs;
- int sev_state;
- unsigned int sev_int_rcvd;
- wait_queue_head_t sev_int_queue;
- struct sev_misc_dev *sev_misc;
- struct sev_user_data_status status_cmd_buf;
- struct sev_data_init init_cmd_buf;
+ psp_irq_handler_t sev_irq_handler;
+ void *sev_irq_data;
+
+ psp_irq_handler_t tee_irq_handler;
+ void *tee_irq_data;
- u8 api_major;
- u8 api_minor;
- u8 build;
+ void *sev_data;
+ void *tee_data;
};
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_sev_irq_handler(struct psp_device *psp);
+
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_tee_irq_handler(struct psp_device *psp);
+
+struct psp_device *psp_get_master_device(void);
+
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
new file mode 100644
index 000000000000..e467860f797d
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -0,0 +1,1077 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Encrypted Virtualization (SEV) interface
+ *
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/ccp.h>
+#include <linux/firmware.h>
+
+#include <asm/smp.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
+
+static DEFINE_MUTEX(sev_cmd_mutex);
+static struct sev_misc_dev *misc_dev;
+
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
+static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ if (sev->api_major > maj)
+ return true;
+
+ if (sev->api_major == maj && sev->api_minor >= min)
+ return true;
+
+ return false;
+}
+
+static void sev_irq_handler(int irq, void *data, unsigned int status)
+{
+ struct sev_device *sev = data;
+ int reg;
+
+ /* Check if it is command completion: */
+ if (!(status & SEV_CMD_COMPLETE))
+ return;
+
+ /* Check if it is SEV command completion: */
+ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+ if (reg & PSP_CMDRESP_RESP) {
+ sev->int_rcvd = 1;
+ wake_up(&sev->int_queue);
+ }
+}
+
+static int sev_wait_cmd_ioc(struct sev_device *sev,
+ unsigned int *reg, unsigned int timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(sev->int_queue,
+ sev->int_rcvd, timeout * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+
+ return 0;
+}
+
+static int sev_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
+ case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
+ case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
+ case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
+ case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
+ case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
+ case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
+ case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
+ case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
+ case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
+ case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
+ case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
+ case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
+ case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
+ case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
+ case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
+ case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
+ case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
+ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
+ case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
+ default: return 0;
+ }
+
+ return 0;
+}
+
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ unsigned int phys_lsb, phys_msb;
+ unsigned int reg, ret = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ sev = psp->sev_data;
+
+ /* Get the physical address of the command buffer */
+ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+
+ dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+ cmd, phys_msb, phys_lsb, psp_timeout);
+
+ print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
+
+ sev->int_rcvd = 0;
+
+ reg = cmd;
+ reg <<= SEV_CMDRESP_CMD_SHIFT;
+ reg |= SEV_CMDRESP_IOC;
+ iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
+
+ /* wait for command completion */
+ ret = sev_wait_cmd_ioc(sev, &reg, psp_timeout);
+ if (ret) {
+ if (psp_ret)
+ *psp_ret = 0;
+
+ dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
+ psp_dead = true;
+
+ return ret;
+ }
+
+ psp_timeout = psp_cmd_timeout;
+
+ if (psp_ret)
+ *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
+ cmd, reg & PSP_CMDRESP_ERR_MASK);
+ ret = -EIO;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ return ret;
+}
+
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_do_cmd_locked(cmd, data, psp_ret);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ int rc = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ sev->state = SEV_STATE_INIT;
+
+ /* Prepare for first SEV guest launch after INIT */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
+ dev_dbg(sev->dev, "SEV firmware initialized\n");
+
+ return rc;
+}
+
+int sev_platform_init(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_init_locked(error);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sev_platform_init);
+
+static int __sev_platform_shutdown_locked(int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+ if (ret)
+ return ret;
+
+ sev->state = SEV_STATE_UNINIT;
+ dev_dbg(sev->dev, "SEV firmware shutdown\n");
+
+ return ret;
+}
+
+static int sev_platform_shutdown(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_shutdown_locked(NULL);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int sev_get_platform_state(int *state, int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
+ &sev->status_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ *state = sev->status_cmd_buf.state;
+ return rc;
+}
+
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+{
+ int state, rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * The SEV spec requires that FACTORY_RESET must be issued in
+ * UNINIT state. Before we go further lets check if any guest is
+ * active.
+ *
+ * If FW is in WORKING state then deny the request otherwise issue
+ * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
+ *
+ */
+ rc = sev_get_platform_state(&state, &argp->error);
+ if (rc)
+ return rc;
+
+ if (state == SEV_STATE_WORKING)
+ return -EBUSY;
+
+ if (state == SEV_STATE_INIT) {
+ rc = __sev_platform_shutdown_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *data = &sev->status_cmd_buf;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (sev->state == SEV_STATE_UNINIT) {
+ rc = __sev_platform_init_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_csr input;
+ struct sev_data_pek_csr *data;
+ void *blob = NULL;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* userspace wants to query CSR length */
+ if (!input.address || !input.length)
+ goto cmd;
+
+ /* allocate a physically contiguous buffer to store the CSR blob */
+ if (!access_ok(input.address, input.length) ||
+ input.length > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ blob = kmalloc(input.length, GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->address = __psp_pa(blob);
+ data->len = input.length;
+
+cmd:
+ if (sev->state == SEV_STATE_UNINIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_blob;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+
+ /* If we query the CSR length, FW responded with expected data. */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_blob;
+ }
+
+ if (blob) {
+ if (copy_to_user((void __user *)input.address, blob, input.length))
+ ret = -EFAULT;
+ }
+
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+{
+ if (!uaddr || !len)
+ return ERR_PTR(-EINVAL);
+
+ /* verify that blob length does not exceed our limit */
+ if (len > SEV_FW_BLOB_MAX_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ return memdup_user((void __user *)(uintptr_t)uaddr, len);
+}
+EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+static int sev_get_api_version(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *status;
+ int error = 0, ret;
+
+ status = &sev->status_cmd_buf;
+ ret = sev_platform_status(status, &error);
+ if (ret) {
+ dev_err(sev->dev,
+ "SEV: failed to get status. Error: %#x\n", error);
+ return 1;
+ }
+
+ sev->api_major = status->api_major;
+ sev->api_minor = status->api_minor;
+ sev->build = status->build;
+ sev->state = status->state;
+
+ return 0;
+}
+
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
+/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
+static int sev_update_firmware(struct device *dev)
+{
+ struct sev_data_download_firmware *data;
+ const struct firmware *firmware;
+ int ret, error, order;
+ struct page *p;
+ u64 data_size;
+
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
+ return -1;
+ }
+
+ /*
+ * SEV FW expects the physical address given to it to be 32
+ * byte aligned. Memory allocated has structure placed at the
+ * beginning followed by the firmware being passed to the SEV
+ * FW. Allocate enough memory for data structure + alignment
+ * padding + SEV FW.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
+
+ order = get_order(firmware->size + data_size);
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ ret = -1;
+ goto fw_err;
+ }
+
+ /*
+ * Copy firmware data to a kernel allocated contiguous
+ * memory region.
+ */
+ data = page_address(p);
+ memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+
+ data->address = __psp_pa(page_address(p) + data_size);
+ data->len = firmware->size;
+
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ if (ret)
+ dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
+ else
+ dev_info(dev, "SEV firmware update successful\n");
+
+ __free_pages(p, order);
+
+fw_err:
+ release_firmware(firmware);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_cert_import input;
+ struct sev_data_pek_cert_import *data;
+ void *pek_blob, *oca_blob;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* copy PEK certificate blobs from userspace */
+ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
+ if (IS_ERR(pek_blob)) {
+ ret = PTR_ERR(pek_blob);
+ goto e_free;
+ }
+
+ data->pek_cert_address = __psp_pa(pek_blob);
+ data->pek_cert_len = input.pek_cert_len;
+
+ /* copy PEK certificate blobs from userspace */
+ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
+ if (IS_ERR(oca_blob)) {
+ ret = PTR_ERR(oca_blob);
+ goto e_free_pek;
+ }
+
+ data->oca_cert_address = __psp_pa(oca_blob);
+ data->oca_cert_len = input.oca_cert_len;
+
+ /* If platform is not in INIT state then transition it to INIT */
+ if (sev->state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_oca;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+
+e_free_oca:
+ kfree(oca_blob);
+e_free_pek:
+ kfree(pek_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_get_id2 input;
+ struct sev_data_get_id *data;
+ void *id_blob = NULL;
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ /* Check if we have write access to the userspace buffer */
+ if (input.address &&
+ input.length &&
+ !access_ok(input.address, input.length))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (input.address && input.length) {
+ id_blob = kmalloc(input.length, GFP_KERNEL);
+ if (!id_blob) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ data->address = __psp_pa(id_blob);
+ data->len = input.length;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+ /*
+ * Firmware will return the length of the ID value (either the minimum
+ * required length or the actual length written), return it to the user.
+ */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ if (id_blob) {
+ if (copy_to_user((void __user *)input.address,
+ id_blob, data->len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+e_free:
+ kfree(id_blob);
+ kfree(data);
+
+ return ret;
+}
+
+static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+{
+ struct sev_data_get_id *data;
+ u64 data_size, user_size;
+ void *id_blob, *mem;
+ int ret;
+
+ /* SEV GET_ID available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ /* SEV FW expects the buffer it fills with the ID to be
+ * 8-byte aligned. Memory allocated should be enough to
+ * hold data structure + alignment padding + memory
+ * where SEV FW writes the ID.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
+ user_size = sizeof(struct sev_user_data_get_id);
+
+ mem = kzalloc(data_size + user_size, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ data = mem;
+ id_blob = mem + data_size;
+
+ data->address = __psp_pa(id_blob);
+ data->len = user_size;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+ if (!ret) {
+ if (copy_to_user((void __user *)argp->data, id_blob, data->len))
+ ret = -EFAULT;
+ }
+
+ kfree(mem);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pdh_cert_export input;
+ void *pdh_blob = NULL, *cert_blob = NULL;
+ struct sev_data_pdh_cert_export *data;
+ int ret;
+
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ return ret;
+ }
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Userspace wants to query the certificate length. */
+ if (!input.pdh_cert_address ||
+ !input.pdh_cert_len ||
+ !input.cert_chain_address)
+ goto cmd;
+
+ /* Allocate a physically contiguous buffer to store the PDH blob. */
+ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ /* Allocate a physically contiguous buffer to store the cert chain blob. */
+ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.cert_chain_address, input.cert_chain_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+ if (!pdh_blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->pdh_cert_address = __psp_pa(pdh_blob);
+ data->pdh_cert_len = input.pdh_cert_len;
+
+ cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+ if (!cert_blob) {
+ ret = -ENOMEM;
+ goto e_free_pdh;
+ }
+
+ data->cert_chain_address = __psp_pa(cert_blob);
+ data->cert_chain_len = input.cert_chain_len;
+
+cmd:
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+
+ /* If we query the length, FW responded with expected data. */
+ input.cert_chain_len = data->cert_chain_len;
+ input.pdh_cert_len = data->pdh_cert_len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+
+ if (pdh_blob) {
+ if (copy_to_user((void __user *)input.pdh_cert_address,
+ pdh_blob, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+ }
+
+ if (cert_blob) {
+ if (copy_to_user((void __user *)input.cert_chain_address,
+ cert_blob, input.cert_chain_len))
+ ret = -EFAULT;
+ }
+
+e_free_cert:
+ kfree(cert_blob);
+e_free_pdh:
+ kfree(pdh_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct sev_issue_cmd input;
+ int ret = -EFAULT;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ if (ioctl != SEV_ISSUE_CMD)
+ return -EINVAL;
+
+ if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
+ return -EFAULT;
+
+ if (input.cmd > SEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&sev_cmd_mutex);
+
+ switch (input.cmd) {
+
+ case SEV_FACTORY_RESET:
+ ret = sev_ioctl_do_reset(&input);
+ break;
+ case SEV_PLATFORM_STATUS:
+ ret = sev_ioctl_do_platform_status(&input);
+ break;
+ case SEV_PEK_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ break;
+ case SEV_PDH_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ break;
+ case SEV_PEK_CSR:
+ ret = sev_ioctl_do_pek_csr(&input);
+ break;
+ case SEV_PEK_CERT_IMPORT:
+ ret = sev_ioctl_do_pek_import(&input);
+ break;
+ case SEV_PDH_CERT_EXPORT:
+ ret = sev_ioctl_do_pdh_export(&input);
+ break;
+ case SEV_GET_ID:
+ pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
+ ret = sev_ioctl_do_get_id(&input);
+ break;
+ case SEV_GET_ID2:
+ ret = sev_ioctl_do_get_id2(&input);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
+ ret = -EFAULT;
+out:
+ mutex_unlock(&sev_cmd_mutex);
+
+ return ret;
+}
+
+static const struct file_operations sev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sev_ioctl,
+};
+
+int sev_platform_status(struct sev_user_data_status *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_platform_status);
+
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_deactivate);
+
+int sev_guest_activate(struct sev_data_activate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_activate);
+
+int sev_guest_decommission(struct sev_data_decommission *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+int sev_guest_df_flush(int *error)
+{
+ return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+static void sev_exit(struct kref *ref)
+{
+ struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
+
+ misc_deregister(&misc_dev->misc);
+}
+
+static int sev_misc_init(struct sev_device *sev)
+{
+ struct device *dev = sev->dev;
+ int ret;
+
+ /*
+ * SEV feature support can be detected on multiple devices but the SEV
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sev on the first device probe.
+ * sev_do_cmd() finds the right master device to which to issue the
+ * command to the firmware.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = DEVICE_NAME;
+ misc->fops = &sev_fops;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ init_waitqueue_head(&sev->int_queue);
+ sev->misc = misc_dev;
+ dev_dbg(dev, "registered SEV device\n");
+
+ return 0;
+}
+
+int sev_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sev_device *sev;
+ int ret = -ENOMEM;
+
+ sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
+ if (!sev)
+ goto e_err;
+
+ psp->sev_data = sev;
+
+ sev->dev = dev;
+ sev->psp = psp;
+
+ sev->io_regs = psp->io_regs;
+
+ sev->vdata = (struct sev_vdata *)psp->vdata->sev;
+ if (!sev->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "sev: missing driver data\n");
+ goto e_err;
+ }
+
+ psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
+
+ ret = sev_misc_init(sev);
+ if (ret)
+ goto e_irq;
+
+ dev_notice(dev, "sev enabled\n");
+
+ return 0;
+
+e_irq:
+ psp_clear_sev_irq_handler(psp);
+e_err:
+ psp->sev_data = NULL;
+
+ dev_notice(dev, "sev initialization failed\n");
+
+ return ret;
+}
+
+void sev_dev_destroy(struct psp_device *psp)
+{
+ struct sev_device *sev = psp->sev_data;
+
+ if (!sev)
+ return;
+
+ if (sev->misc)
+ kref_put(&misc_dev->refcount, sev_exit);
+
+ psp_clear_sev_irq_handler(psp);
+}
+
+int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
+ void *data, int *error)
+{
+ if (!filep || filep->f_op != &sev_fops)
+ return -EBADF;
+
+ return sev_do_cmd(cmd, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+void sev_pci_init(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int error, rc;
+
+ if (!sev)
+ return;
+
+ psp_timeout = psp_probe_timeout;
+
+ if (sev_get_api_version())
+ goto err;
+
+ /*
+ * If platform is not in UNINIT state then firmware upgrade and/or
+ * platform INIT command will fail. These command require UNINIT state.
+ *
+ * In a normal boot we should never run into case where the firmware
+ * is not in UNINIT state on boot. But in case of kexec boot, a reboot
+ * may not go through a typical shutdown sequence and may leave the
+ * firmware in INIT or WORKING state.
+ */
+
+ if (sev->state != SEV_STATE_UNINIT) {
+ sev_platform_shutdown(NULL);
+ sev->state = SEV_STATE_UNINIT;
+ }
+
+ if (sev_version_greater_or_equal(0, 15) &&
+ sev_update_firmware(sev->dev) == 0)
+ sev_get_api_version();
+
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
+ /*
+ * INIT command returned an integrity check failure
+ * status code, meaning that firmware load and
+ * validation of SEV related persistent data has
+ * failed and persistent state has been erased.
+ * Retrying INIT command here should succeed.
+ */
+ dev_dbg(sev->dev, "SEV: retrying INIT command");
+ rc = sev_platform_init(&error);
+ }
+
+ if (rc) {
+ dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
+ return;
+ }
+
+ dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ return;
+
+err:
+ psp_master->sev_data = NULL;
+}
+
+void sev_pci_exit(void)
+{
+ if (!psp_master->sev_data)
+ return;
+
+ sev_platform_shutdown(NULL);
+}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
new file mode 100644
index 000000000000..dd5c4fe82914
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) interface driver
+ *
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __SEV_DEV_H__
+#define __SEV_DEV_H__
+
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/psp-sev.h>
+#include <linux/miscdevice.h>
+#include <linux/capability.h>
+
+#define SEV_CMD_COMPLETE BIT(1)
+#define SEV_CMDRESP_CMD_SHIFT 16
+#define SEV_CMDRESP_IOC BIT(0)
+
+struct sev_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sev_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ void __iomem *io_regs;
+
+ struct sev_vdata *vdata;
+
+ int state;
+ unsigned int int_rcvd;
+ wait_queue_head_t int_queue;
+ struct sev_misc_dev *misc;
+ struct sev_user_data_status status_cmd_buf;
+ struct sev_data_init init_cmd_buf;
+
+ u8 api_major;
+ u8 api_minor;
+ u8 build;
+};
+
+int sev_dev_init(struct psp_device *psp);
+void sev_dev_destroy(struct psp_device *psp);
+
+void sev_pci_init(void);
+void sev_pci_exit(void);
+
+#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 53c12562d31e..423594608ad1 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -39,10 +39,23 @@ struct ccp_vdata {
const unsigned int rsamax;
};
-struct psp_vdata {
+struct sev_vdata {
const unsigned int cmdresp_reg;
const unsigned int cmdbuff_addr_lo_reg;
const unsigned int cmdbuff_addr_hi_reg;
+};
+
+struct tee_vdata {
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int ring_wptr_reg;
+ const unsigned int ring_rptr_reg;
+};
+
+struct psp_vdata {
+ const struct sev_vdata *sev;
+ const struct tee_vdata *tee;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b29d2e663e10..56c1f61c0f84 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -262,19 +262,42 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata pspv1 = {
+static const struct sev_vdata sevv1 = {
.cmdresp_reg = 0x10580,
.cmdbuff_addr_lo_reg = 0x105e0,
.cmdbuff_addr_hi_reg = 0x105e4,
+};
+
+static const struct sev_vdata sevv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+};
+
+static const struct tee_vdata teev1 = {
+ .cmdresp_reg = 0x10544,
+ .cmdbuff_addr_lo_reg = 0x10548,
+ .cmdbuff_addr_hi_reg = 0x1054c,
+ .ring_wptr_reg = 0x10550,
+ .ring_rptr_reg = 0x10554,
+};
+
+static const struct psp_vdata pspv1 = {
+ .sev = &sevv1,
.feature_reg = 0x105fc,
.inten_reg = 0x10610,
.intsts_reg = 0x10614,
};
static const struct psp_vdata pspv2 = {
- .cmdresp_reg = 0x10980,
- .cmdbuff_addr_lo_reg = 0x109e0,
- .cmdbuff_addr_hi_reg = 0x109e4,
+ .sev = &sevv2,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
+};
+
+static const struct psp_vdata pspv3 = {
+ .tee = &teev1,
.feature_reg = 0x109fc,
.inten_reg = 0x10690,
.intsts_reg = 0x10694,
@@ -312,12 +335,22 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv2,
#endif
},
+ { /* 4 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv3,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
+ { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
new file mode 100644
index 000000000000..5e697a90ea7f
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: MIT
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-tee.h>
+
+#include "psp-dev.h"
+#include "tee-dev.h"
+
+static bool psp_dead;
+
+static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+ void *start_addr;
+
+ if (!ring_size)
+ return -EINVAL;
+
+ /* We need actual physical address instead of DMA address, since
+ * Trusted OS running on AMD Secure Processor will map this region
+ */
+ start_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(ring_size));
+ if (!start_addr)
+ return -ENOMEM;
+
+ rb_mgr->ring_start = start_addr;
+ rb_mgr->ring_size = ring_size;
+ rb_mgr->ring_pa = __psp_pa(start_addr);
+ mutex_init(&rb_mgr->mutex);
+
+ return 0;
+}
+
+static void tee_free_ring(struct psp_tee_device *tee)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+
+ if (!rb_mgr->ring_start)
+ return;
+
+ free_pages((unsigned long)rb_mgr->ring_start,
+ get_order(rb_mgr->ring_size));
+
+ rb_mgr->ring_start = NULL;
+ rb_mgr->ring_size = 0;
+ rb_mgr->ring_pa = 0;
+ mutex_destroy(&rb_mgr->mutex);
+}
+
+static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
+ unsigned int *reg)
+{
+ /* ~10ms sleep per loop => nloop = timeout * 100 */
+ int nloop = timeout * 100;
+
+ while (--nloop) {
+ *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
+ if (*reg & PSP_CMDRESP_RESP)
+ return 0;
+
+ usleep_range(10000, 10100);
+ }
+
+ dev_err(tee->dev, "tee: command timed out, disabling PSP\n");
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+static
+struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee)
+{
+ struct tee_init_ring_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa);
+ cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa);
+ cmd->size = tee->rb_mgr.ring_size;
+
+ dev_dbg(tee->dev, "tee: ring address: high = 0x%x low = 0x%x size = %u\n",
+ cmd->hi_addr, cmd->low_addr, cmd->size);
+
+ return cmd;
+}
+
+static inline void tee_free_cmd_buffer(struct tee_init_ring_cmd *cmd)
+{
+ kfree(cmd);
+}
+
+static int tee_init_ring(struct psp_tee_device *tee)
+{
+ int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd);
+ struct tee_init_ring_cmd *cmd;
+ phys_addr_t cmd_buffer;
+ unsigned int reg;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct tee_ring_cmd) != 1024);
+
+ ret = tee_alloc_ring(tee, ring_size);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring allocation failed %d\n", ret);
+ return ret;
+ }
+
+ tee->rb_mgr.wptr = 0;
+
+ cmd = tee_alloc_cmd_buffer(tee);
+ if (!cmd) {
+ tee_free_ring(tee);
+ return -ENOMEM;
+ }
+
+ cmd_buffer = __psp_pa((void *)cmd);
+
+ /* Send command buffer details to Trusted OS by writing to
+ * CPU-PSP message registers
+ */
+
+ iowrite32(lower_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(upper_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg);
+ iowrite32(TEE_RING_INIT_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring init command timed out\n");
+ tee_free_ring(tee);
+ goto free_buf;
+ }
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ tee_free_ring(tee);
+ ret = -EIO;
+ }
+
+free_buf:
+ tee_free_cmd_buffer(cmd);
+
+ return ret;
+}
+
+static void tee_destroy_ring(struct psp_tee_device *tee)
+{
+ unsigned int reg;
+ int ret;
+
+ if (!tee->rb_mgr.ring_start)
+ return;
+
+ if (psp_dead)
+ goto free_ring;
+
+ iowrite32(TEE_RING_DESTROY_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring destroy command timed out\n");
+ } else if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ }
+
+free_ring:
+ tee_free_ring(tee);
+}
+
+int tee_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_tee_device *tee;
+ int ret;
+
+ ret = -ENOMEM;
+ tee = devm_kzalloc(dev, sizeof(*tee), GFP_KERNEL);
+ if (!tee)
+ goto e_err;
+
+ psp->tee_data = tee;
+
+ tee->dev = dev;
+ tee->psp = psp;
+
+ tee->io_regs = psp->io_regs;
+
+ tee->vdata = (struct tee_vdata *)psp->vdata->tee;
+ if (!tee->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "tee: missing driver data\n");
+ goto e_err;
+ }
+
+ ret = tee_init_ring(tee);
+ if (ret) {
+ dev_err(dev, "tee: failed to init ring buffer\n");
+ goto e_err;
+ }
+
+ dev_notice(dev, "tee enabled\n");
+
+ return 0;
+
+e_err:
+ psp->tee_data = NULL;
+
+ dev_notice(dev, "tee initialization failed\n");
+
+ return ret;
+}
+
+void tee_dev_destroy(struct psp_device *psp)
+{
+ struct psp_tee_device *tee = psp->tee_data;
+
+ if (!tee)
+ return;
+
+ tee_destroy_ring(tee);
+}
+
+static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ void *buf, size_t len, struct tee_ring_cmd **resp)
+{
+ struct tee_ring_cmd *cmd;
+ u32 rptr, wptr;
+ int nloop = 1000, ret = 0;
+
+ *resp = NULL;
+
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ wptr = tee->rb_mgr.wptr;
+
+ /* Check if ring buffer is full */
+ do {
+ rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
+
+ if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+ break;
+
+ dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+
+ /* Wait if ring buffer is full */
+ mutex_unlock(&tee->rb_mgr.mutex);
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ } while (--nloop);
+
+ if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
+ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Pointer to empty data entry in ring buffer */
+ cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+
+ /* Write command data into ring buffer */
+ cmd->cmd_id = cmd_id;
+ cmd->cmd_state = TEE_CMD_STATE_INIT;
+ memset(&cmd->buf[0], 0, sizeof(cmd->buf));
+ memcpy(&cmd->buf[0], buf, len);
+
+ /* Update local copy of write pointer */
+ tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
+ if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
+ tee->rb_mgr.wptr = 0;
+
+ /* Trigger interrupt to Trusted OS */
+ iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg);
+
+ /* The response is provided by Trusted OS in same
+ * location as submitted data entry within ring buffer.
+ */
+ *resp = cmd;
+
+unlock:
+ mutex_unlock(&tee->rb_mgr.mutex);
+
+ return ret;
+}
+
+static int tee_wait_cmd_completion(struct psp_tee_device *tee,
+ struct tee_ring_cmd *resp,
+ unsigned int timeout)
+{
+ /* ~5ms sleep per loop => nloop = timeout * 200 */
+ int nloop = timeout * 200;
+
+ while (--nloop) {
+ if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
+ return 0;
+
+ usleep_range(5000, 5100);
+ }
+
+ dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
+ resp->cmd_id);
+
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status)
+{
+ struct psp_device *psp = psp_get_master_device();
+ struct psp_tee_device *tee;
+ struct tee_ring_cmd *resp;
+ int ret;
+
+ if (!buf || !status || !len || len > sizeof(resp->buf))
+ return -EINVAL;
+
+ *status = 0;
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ tee = psp->tee_data;
+
+ ret = tee_submit_cmd(tee, cmd_id, buf, len, &resp);
+ if (ret)
+ return ret;
+
+ ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &resp->buf[0], len);
+ *status = resp->status;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_tee_process_cmd);
+
+int psp_check_tee_status(void)
+{
+ struct psp_device *psp = psp_get_master_device();
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_check_tee_status);
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
new file mode 100644
index 000000000000..f09960112115
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ */
+
+/* This file describes the TEE communication interface between host and AMD
+ * Secure Processor
+ */
+
+#ifndef __TEE_DEV_H__
+#define __TEE_DEV_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+#define TEE_DEFAULT_TIMEOUT 10
+#define MAX_BUFFER_SIZE 992
+
+/**
+ * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
+ * @TEE_RING_INIT_CMD: Initialize ring buffer
+ * @TEE_RING_DESTROY_CMD: Destroy ring buffer
+ * @TEE_RING_MAX_CMD: Maximum command id
+ */
+enum tee_ring_cmd_id {
+ TEE_RING_INIT_CMD = 0x00010000,
+ TEE_RING_DESTROY_CMD = 0x00020000,
+ TEE_RING_MAX_CMD = 0x000F0000,
+};
+
+/**
+ * struct tee_init_ring_cmd - Command to init TEE ring buffer
+ * @low_addr: bits [31:0] of the physical address of ring buffer
+ * @hi_addr: bits [63:32] of the physical address of ring buffer
+ * @size: size of ring buffer in bytes
+ */
+struct tee_init_ring_cmd {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+#define MAX_RING_BUFFER_ENTRIES 32
+
+/**
+ * struct ring_buf_manager - Helper structure to manage ring buffer.
+ * @ring_start: starting address of ring buffer
+ * @ring_size: size of ring buffer in bytes
+ * @ring_pa: physical address of ring buffer
+ * @wptr: index to the last written entry in ring buffer
+ */
+struct ring_buf_manager {
+ struct mutex mutex; /* synchronizes access to ring buffer */
+ void *ring_start;
+ u32 ring_size;
+ phys_addr_t ring_pa;
+ u32 wptr;
+};
+
+struct psp_tee_device {
+ struct device *dev;
+ struct psp_device *psp;
+ void __iomem *io_regs;
+ struct tee_vdata *vdata;
+ struct ring_buf_manager rb_mgr;
+};
+
+/**
+ * enum tee_cmd_state - TEE command states for the ring buffer interface
+ * @TEE_CMD_STATE_INIT: initial state of command when sent from host
+ * @TEE_CMD_STATE_PROCESS: command being processed by TEE environment
+ * @TEE_CMD_STATE_COMPLETED: command processing completed
+ */
+enum tee_cmd_state {
+ TEE_CMD_STATE_INIT,
+ TEE_CMD_STATE_PROCESS,
+ TEE_CMD_STATE_COMPLETED,
+};
+
+/**
+ * struct tee_ring_cmd - Structure of the command buffer in TEE ring
+ * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
+ * interface
+ * @cmd_state: refers to &enum tee_cmd_state
+ * @status: status of TEE command execution
+ * @res0: reserved region
+ * @pdata: private data (currently unused)
+ * @res1: reserved region
+ * @buf: TEE command specific buffer
+ */
+struct tee_ring_cmd {
+ u32 cmd_id;
+ u32 cmd_state;
+ u32 status;
+ u32 res0[1];
+ u64 pdata;
+ u32 res1[2];
+ u8 buf[MAX_BUFFER_SIZE];
+
+ /* Total size: 1024 bytes */
+} __packed;
+
+int tee_dev_init(struct psp_device *psp);
+void tee_dev_destroy(struct psp_device *psp);
+
+#endif /* __TEE_DEV_H__ */
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 64d318dc0d47..2fc0e0da790b 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -237,7 +237,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
* revealed the decrypted message --> zero its memory.
*/
sg_zero_buffer(areq->dst, sg_nents(areq->dst),
- areq->cryptlen, 0);
+ areq->cryptlen, areq->assoclen);
err = -EBADMSG;
}
/*ENCRYPT*/
@@ -385,13 +385,13 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
return -EINVAL;
break;
default:
- dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+ dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
return -EINVAL;
}
/* Check cipher key size */
if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
- dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -399,7 +399,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
if (ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_192 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -562,7 +562,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = crypto_authenc_extractkeys(&keys, key, keylen);
if (rc)
- goto badkey;
+ return rc;
enckey = keys.enckey;
authkey = keys.authkey;
ctx->enc_keylen = keys.enckeylen;
@@ -570,10 +570,9 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
- rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
- goto badkey;
+ return -EINVAL;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
@@ -591,7 +590,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = validate_keys_sizes(ctx);
if (rc)
- goto badkey;
+ return rc;
/* STAT_PHASE_1: Copy key to ctx */
@@ -605,7 +604,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
- goto badkey;
+ return rc;
}
/* STAT_PHASE_2: Create sequence */
@@ -622,8 +621,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
break; /* No auth. key setup */
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
- rc = -ENOTSUPP;
- goto badkey;
+ return -ENOTSUPP;
}
/* STAT_PHASE_3: Submit sequence to HW */
@@ -632,18 +630,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- goto setkey_error;
+ return rc;
}
}
/* Update STAT_PHASE_3 */
return rc;
-
-badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
-setkey_error:
- return rc;
}
static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
@@ -1567,7 +1559,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (l < 2 || l > 8) {
- dev_err(dev, "illegal iv value %X\n", req->iv[0]);
+ dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
memcpy(b0, req->iv, AES_BLOCK_SIZE);
@@ -1925,7 +1917,6 @@ static int cc_proc_aead(struct aead_request *req,
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, areq_ctx->assoclen);
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2065,7 +2056,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2115,7 +2106,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2234,7 +2225,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2265,7 +2256,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2299,7 +2290,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2330,7 +2321,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 3112b58d0bb1..7d6252d892d7 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -291,7 +291,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
dev_err(dev, "Unsupported protected key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -303,8 +302,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
keylen = hki.keylen;
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -394,8 +392,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -523,6 +520,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
}
}
+
static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
@@ -534,8 +532,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
- dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
- unsigned int key_len = ctx_p->keylen;
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
@@ -571,6 +567,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+
+static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int du_size = nbytes;
+
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+
+ if (cc_alg->data_unit)
+ du_size = cc_alg->data_unit;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -836,8 +873,7 @@ static int cc_cipher_process(struct skcipher_request *req,
/* TODO: check data length according to mode */
if (validate_data_size(ctx_p, nbytes)) {
- dev_err(dev, "Unsupported data size %d.\n", nbytes);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
rc = -EINVAL;
goto exit_process;
}
@@ -881,12 +917,14 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup IV and XEX key used */
+ /* Setup state (IV) */
cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Setup MLLI line, if needed */
cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
/* Setup key */
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
+ /* Setup state (IV and XEX key) */
+ cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Data processing */
cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
/* Read next IV */
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8b8eee513c27..532bc95a8373 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -133,7 +133,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
- /* if driver suspended return, probebly shared interrupt */
+ /* if driver suspended return, probably shared interrupt */
if (cc_pm_is_dev_suspended(dev))
return IRQ_NONE;
@@ -271,6 +271,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
struct clk *clk;
+ int irq;
int rc = 0;
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -337,9 +338,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
&req_mem_cc_regs->start, new_drvdata->cc_base);
/* Then IRQ */
- new_drvdata->irq = platform_get_irq(plat_dev, 0);
- if (new_drvdata->irq < 0)
- return new_drvdata->irq;
+ irq = platform_get_irq(plat_dev, 0);
+ if (irq < 0)
+ return irq;
init_completion(&new_drvdata->hw_queue_avail);
@@ -442,14 +443,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
/* register the driver isr function */
- rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
- IRQF_SHARED, "ccree", new_drvdata);
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
+ new_drvdata);
if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n",
- new_drvdata->irq);
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
goto post_clk_err;
}
- dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
@@ -465,7 +465,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_fips_init(new_drvdata);
if (rc) {
- dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
goto post_debugfs_err;
}
rc = cc_sram_mgr_init(new_drvdata);
@@ -490,13 +490,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_buffer_mgr_init(new_drvdata);
if (rc) {
- dev_err(dev, "buffer_mgr_init failed\n");
+ dev_err(dev, "cc_buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
rc = cc_pm_init(new_drvdata);
if (rc) {
- dev_err(dev, "ssi_power_mgr_init failed\n");
+ dev_err(dev, "cc_pm_init failed\n");
goto post_buf_mgr_err;
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index ab31d4a68c80..c227718ba992 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -28,7 +28,6 @@
/* Registers definitions from shared/hw/ree_include */
#include "cc_host_regs.h"
-#define CC_DEV_SHA_MAX 512
#include "cc_crypto_ctx.h"
#include "cc_hw_queue_defs.h"
#include "cc_sram_mgr.h"
@@ -133,13 +132,11 @@ struct cc_crypto_req {
/**
* struct cc_drvdata - driver private data context
* @cc_base: virt address of the CC registers
- * @irq: device IRQ number
- * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @irq: bitmap indicating source of last interrupt
*/
struct cc_drvdata {
void __iomem *cc_base;
int irq;
- u32 irq_mask;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
cc_sram_addr_t mlli_sram_addr;
@@ -161,6 +158,7 @@ struct cc_drvdata {
int std_bodies;
bool sec_disabled;
u32 comp_mask;
+ bool pm_on;
};
struct cc_crypto_alg {
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 4c8bce33abcf..702aefc21447 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -120,7 +120,7 @@ static void fips_dsr(unsigned long devarg)
cc_tee_handle_fips_error(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt.
*/
val = (CC_REG(HOST_IMR) & ~irq);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index bc71bdf44a9f..912e5ce5079d 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -899,9 +899,6 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
out:
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
@@ -990,9 +987,6 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -2358,11 +2352,9 @@ cc_digest_len_addr(void *drvdata, u32 mode)
case DRV_HASH_SHA256:
case DRV_HASH_MD5:
return digest_len_addr;
-#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
return digest_len_addr + sizeof(cc_digest_len_init);
-#endif
default:
return digest_len_addr; /*to avoid kernel crash*/
}
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index dbc508fb719b..24c368b866f6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -22,14 +22,8 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_suspend(struct device *dev)
{
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- rc = cc_suspend_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
- return rc;
- }
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
@@ -48,7 +42,7 @@ int cc_pm_resume(struct device *dev)
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
- /* wait for Crytpcell reset completion */
+ /* wait for Cryptocell reset completion */
if (!cc_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
return -EBUSY;
@@ -63,13 +57,6 @@ int cc_pm_resume(struct device *dev)
/* check if tee fips error occurred during power down */
cc_tee_handle_fips_error(drvdata);
- rc = cc_resume_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
- return rc;
- }
-
- /* must be after the queue resuming as it uses the HW queue*/
cc_init_hash_sram(drvdata);
return 0;
@@ -80,28 +67,20 @@ int cc_pm_get(struct device *dev)
int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (cc_req_queue_suspended(drvdata))
+ if (drvdata->pm_on)
rc = pm_runtime_get_sync(dev);
- else
- pm_runtime_get_noresume(dev);
- return rc;
+ return (rc == 1 ? 0 : rc);
}
-int cc_pm_put_suspend(struct device *dev)
+void cc_pm_put_suspend(struct device *dev)
{
- int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (!cc_req_queue_suspended(drvdata)) {
+ if (drvdata->pm_on) {
pm_runtime_mark_last_busy(dev);
- rc = pm_runtime_put_autosuspend(dev);
- } else {
- /* Something wrong happens*/
- dev_err(dev, "request to suspend already suspended queue");
- rc = -EBUSY;
+ pm_runtime_put_autosuspend(dev);
}
- return rc;
}
bool cc_pm_is_dev_suspended(struct device *dev)
@@ -114,10 +93,10 @@ int cc_pm_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- /* must be before the enabling to avoid resdundent suspending */
+ /* must be before the enabling to avoid redundant suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
- /* activate the PM module */
+ /* set us as active - note we won't do PM ops until cc_pm_go()! */
return pm_runtime_set_active(dev);
}
@@ -125,9 +104,11 @@ int cc_pm_init(struct cc_drvdata *drvdata)
void cc_pm_go(struct cc_drvdata *drvdata)
{
pm_runtime_enable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = true;
}
void cc_pm_fini(struct cc_drvdata *drvdata)
{
pm_runtime_disable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = false;
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index a7d98a5da2e1..80a18e11cae4 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -21,7 +21,7 @@ void cc_pm_fini(struct cc_drvdata *drvdata);
int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
-int cc_pm_put_suspend(struct device *dev);
+void cc_pm_put_suspend(struct device *dev);
bool cc_pm_is_dev_suspended(struct device *dev);
#else
@@ -35,25 +35,12 @@ static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
-static inline int cc_pm_suspend(struct device *dev)
-{
- return 0;
-}
-
-static inline int cc_pm_resume(struct device *dev)
-{
- return 0;
-}
-
static inline int cc_pm_get(struct device *dev)
{
return 0;
}
-static inline int cc_pm_put_suspend(struct device *dev)
-{
- return 0;
-}
+static inline void cc_pm_put_suspend(struct device *dev) {}
static inline bool cc_pm_is_dev_suspended(struct device *dev)
{
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index a947d5a2cf35..9d61e6f12478 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -41,7 +41,6 @@ struct cc_req_mgr_handle {
#else
struct tasklet_struct comptask;
#endif
- bool is_runtime_suspended;
};
struct cc_bl_item {
@@ -230,7 +229,7 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
struct device *dev = drvdata_to_dev(drvdata);
/* SW queue is checked only once as it will not
- * be chaned during the poll because the spinlock_bh
+ * be changed during the poll because the spinlock_bh
* is held by the thread
*/
if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
@@ -275,12 +274,11 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
* \param len The crypto sequence length
* \param add_comp If "true": add an artificial dout DMA to mark completion
*
- * \return int Returns -EINPROGRESS or error code
*/
-static int cc_do_send_request(struct cc_drvdata *drvdata,
- struct cc_crypto_req *cc_req,
- struct cc_hw_desc *desc, unsigned int len,
- bool add_comp)
+static void cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
unsigned int used_sw_slots;
@@ -303,8 +301,8 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
@@ -328,9 +326,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/* Update the free slots in HW queue */
req_mgr_h->q_free_slots -= total_seq_len;
}
-
- /* Operation still in process */
- return -EINPROGRESS;
}
static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
@@ -390,20 +385,15 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
return;
}
- rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
- bli->len, false);
-
+ cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
+ false);
spin_unlock(&mgr->hw_lock);
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- creq->user_cb(dev, req, rc);
- }
-
/* Remove ourselves from the backlog list */
spin_lock(&mgr->bl_lock);
list_del(&bli->list);
--mgr->bl_len;
+ kfree(bli);
}
spin_unlock(&mgr->bl_lock);
@@ -422,7 +412,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -451,8 +441,10 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
return -EBUSY;
}
- if (!rc)
- rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
+ if (!rc) {
+ cc_do_send_request(drvdata, cc_req, desc, len, false);
+ rc = -EINPROGRESS;
+ }
spin_unlock_bh(&mgr->hw_lock);
return rc;
@@ -472,7 +464,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -492,14 +484,8 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
reinit_completion(&drvdata->hw_queue_avail);
}
- rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
+ cc_do_send_request(drvdata, cc_req, desc, len, true);
spin_unlock_bh(&mgr->hw_lock);
-
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- return rc;
- }
-
wait_for_completion(&cc_req->seq_compl);
return 0;
}
@@ -532,8 +518,8 @@ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
enqueue_seq(drvdata, desc, len);
@@ -668,7 +654,7 @@ static void comp_handler(unsigned long devarg)
request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
@@ -677,52 +663,3 @@ static void comp_handler(unsigned long devarg)
cc_proc_backlog(drvdata);
dev_dbg(dev, "Comp. handler done.\n");
}
-
-/*
- * resume the queue configuration - no need to take the lock as this happens
- * inside the spin lock protection
- */
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- spin_lock_bh(&request_mgr_handle->hw_lock);
- request_mgr_handle->is_runtime_suspended = false;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-/*
- * suspend the queue configuration. Since it is used for the runtime suspend
- * only verify that the queue can be suspended.
- */
-int cc_suspend_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- /* lock the send_request */
- spin_lock_bh(&request_mgr_handle->hw_lock);
- if (request_mgr_handle->req_queue_head !=
- request_mgr_handle->req_queue_tail) {
- spin_unlock_bh(&request_mgr_handle->hw_lock);
- return -EBUSY;
- }
- request_mgr_handle->is_runtime_suspended = true;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- return request_mgr_handle->is_runtime_suspended;
-}
-
-#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index f46cf766fe4d..ff7746aaaf35 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata);
void cc_req_mgr_fini(struct cc_drvdata *drvdata);
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata);
-
-int cc_suspend_req_queue(struct cc_drvdata *drvdata);
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
-#endif
-
#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 91e424378217..f078b2686418 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -23,22 +23,22 @@ config CRYPTO_DEV_CHELSIO
will be called chcr.
config CHELSIO_IPSEC_INLINE
- bool "Chelsio IPSec XFRM Tx crypto offload"
- depends on CHELSIO_T4
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
depends on CRYPTO_DEV_CHELSIO
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- default n
- ---help---
- Enable support for IPSec Tx Inline.
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
config CRYPTO_DEV_CHELSIO_TLS
- tristate "Chelsio Crypto Inline TLS Driver"
- depends on CHELSIO_T4
- depends on TLS_TOE
- select CRYPTO_DEV_CHELSIO
- ---help---
- Support Chelsio Inline TLS with Chelsio crypto accelerator.
+ tristate "Chelsio Crypto Inline TLS Driver"
+ depends on CHELSIO_T4
+ depends on TLS_TOE
+ select CRYPTO_DEV_CHELSIO
+ ---help---
+ Support Chelsio Inline TLS with Chelsio crypto accelerator.
- To compile this driver as a module, choose M here: the module
- will be called chtls.
+ To compile this driver as a module, choose M here: the module
+ will be called chtls.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 1b4a5664e604..b4b9b22125d1 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -870,20 +870,13 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- int err = 0;
crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
- return err;
+ return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
}
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
@@ -912,7 +905,6 @@ static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -943,7 +935,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -981,7 +972,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -1379,7 +1369,8 @@ static int chcr_device_init(struct chcr_context *ctx)
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
- ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
+ ctx->dev->tx_channel_id =
+ (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -2173,7 +2164,6 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -3195,9 +3185,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
aeadctx->mayverify = VERIFY_SW;
break;
default:
-
- crypto_tfm_set_flags((struct crypto_tfm *) tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3222,8 +3209,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3264,8 +3249,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3290,8 +3273,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3314,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
return chcr_ccm_common_setkey(aead, key, keylen);
@@ -3329,8 +3307,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
int error;
if (keylen < 3) {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3338,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
keylen -= 3;
@@ -3362,9 +3335,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
& CRYPTO_TFM_REQ_MASK);
ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (ret)
goto out;
@@ -3380,8 +3350,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
} else if (keylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
pr_err("GCM: Invalid key length %d\n", keylen);
ret = -EINVAL;
goto out;
@@ -3432,16 +3400,11 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
if (get_alg_config(&param, max_authsize)) {
pr_err("chcr : Unsupported digest size\n");
@@ -3562,16 +3525,12 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
+
subtype = get_aead_subtype(authenc);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 029a7354f541..e937605670ac 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -132,8 +132,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- struct adapter *adap;
-
mutex_lock(&drv_data.drv_mutex);
if (drv_data.last_dev == u_ctx) {
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
@@ -146,8 +144,6 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
list_move(&u_ctx->entry, &drv_data.inact_dev);
if (list_empty(&drv_data.act_dev))
drv_data.last_dev = NULL;
- adap = padap(&u_ctx->dev);
- memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
atomic_dec(&drv_data.dev_count);
mutex_unlock(&drv_data.drv_mutex);
@@ -299,17 +295,21 @@ static int __init chcr_crypto_init(void)
static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
stop_crypto();
-
cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
mutex_lock(&drv_data.drv_mutex);
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index d2bc655ab931..459442704eb1 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -179,7 +179,10 @@ struct chtls_hws {
u32 copied_seq;
u64 tx_seq_no;
struct tls_scmd scmd;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ } crypto_info;
};
struct chtls_sock {
@@ -482,7 +485,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode);
+int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index aca75237bbcf..9b2745ad9e38 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static void chtls_purge_wr_queue(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = dequeue_wr(sk)) != NULL)
+ kfree_skb(skb);
+}
+
static void chtls_release_resources(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
kfree_skb(csk->txdata_skb_cache);
csk->txdata_skb_cache = NULL;
+ if (csk->wr_credits != csk->wr_max_credits) {
+ chtls_purge_wr_queue(sk);
+ chtls_reset_wr_list(csk);
+ }
+
if (csk->l2t_entry) {
cxgb4_l2t_release(csk->l2t_entry);
csk->l2t_entry = NULL;
@@ -1273,7 +1286,7 @@ static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
ctx = (struct listen_ctx *)data;
lsk = ctx->lsk;
- if (unlikely(tid >= cdev->tids->ntids)) {
+ if (unlikely(tid_out_of_range(cdev->tids, tid))) {
pr_info("passive open TID %u too large\n", tid);
return 1;
}
@@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+ kfree_skb(skb);
}
static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
@@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
kfree_skb(skb);
}
+/*
+ * Add an skb to the deferred skb queue for processing from process context.
+ */
+static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
+ defer_handler_t handler)
+{
+ DEFERRED_SKB_CB(skb)->handler = handler;
+ spin_lock_bh(&cdev->deferq.lock);
+ __skb_queue_tail(&cdev->deferq, skb);
+ if (skb_queue_len(&cdev->deferq) == 1)
+ schedule_work(&cdev->deferq_task);
+ spin_unlock_bh(&cdev->deferq.lock);
+}
+
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
@@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
if (!reply_skb) {
req->status = (queue << 1);
- send_defer_abort_rpl(cdev, skb);
+ t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
@@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
-/*
- * Add an skb to the deferred skb queue for processing from process context.
- */
-static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
- defer_handler_t handler)
-{
- DEFERRED_SKB_CB(skb)->handler = handler;
- spin_lock_bh(&cdev->deferq.lock);
- __skb_queue_tail(&cdev->deferq, skb);
- if (skb_queue_len(&cdev->deferq) == 1)
- schedule_work(&cdev->deferq_task);
- spin_unlock_bh(&cdev->deferq.lock);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2062,19 +2076,6 @@ rel_skb:
return 0;
}
-static struct sk_buff *dequeue_wr(struct sock *sk)
-{
- struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct sk_buff *skb = csk->wr_skb_head;
-
- if (likely(skb)) {
- /* Don't bother clearing the tail */
- csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
- WR_SKB_CB(skb)->next_wr = NULL;
- }
- return skb;
-}
-
static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
{
struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 129d7ac649a9..3fac0c74a41f 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline void chtls_reset_wr_list(struct chtls_sock *csk)
+{
+ csk->wr_skb_head = NULL;
+ csk->wr_skb_tail = NULL;
+}
+
static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
{
WR_SKB_CB(skb)->next_wr = NULL;
@@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
csk->wr_skb_tail = skb;
}
+
+static inline struct sk_buff *dequeue_wr(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb = NULL;
+
+ skb = csk->wr_skb_head;
+
+ if (likely(skb)) {
+ /* Don't bother clearing the tail */
+ csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
+ WR_SKB_CB(skb)->next_wr = NULL;
+ }
+ return skb;
+}
#endif
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 2a34035d3cfb..f1820aca0d33 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -208,28 +208,53 @@ static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
static int chtls_key_info(struct chtls_sock *csk,
struct _key_ctx *kctx,
- u32 keylen, u32 optname)
+ u32 keylen, u32 optname,
+ int cipher_type)
{
- unsigned char key[AES_KEYSIZE_128];
- struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
+ unsigned char key[AES_MAX_KEY_SIZE];
+ unsigned char *key_p, *salt;
unsigned char ghash_h[AEAD_H_SIZE];
- int ck_size, key_ctx_size;
+ int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
struct crypto_aes_ctx aes;
int ret;
- gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *)
- &csk->tlshws.crypto_info;
-
key_ctx_size = sizeof(struct _key_ctx) +
roundup(keylen, 16) + AEAD_H_SIZE;
- if (keylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else {
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * prepare key context base on GCM cipher type
+ */
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 =
+ (struct tls12_crypto_info_aes_gcm_128 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_128->key, keylen);
+
+ key_p = gcm_ctx_128->key;
+ salt = gcm_ctx_128->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 =
+ (struct tls12_crypto_info_aes_gcm_256 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_256->key, keylen);
+
+ key_p = gcm_ctx_256->key;
+ salt = gcm_ctx_256->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ break;
+ }
+ default:
pr_err("GCM: Invalid key length %d\n", keylen);
return -EINVAL;
}
- memcpy(key, gcm_ctx->key, keylen);
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
@@ -249,20 +274,20 @@ static int chtls_key_info(struct chtls_sock *csk,
key_ctx = ((key_ctx_size >> 4) << 3);
kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx);
chtls_rxkey_ivauth(kctx);
} else {
kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx_size >> 4);
}
- memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(kctx->key, gcm_ctx->key, keylen);
+ memcpy(kctx->salt, salt, salt_size);
+ memcpy(kctx->key, key_p, keylen);
memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
/* erase key info from driver */
- memset(gcm_ctx->key, 0, keylen);
+ memset(key_p, 0, keylen);
return 0;
}
@@ -288,7 +313,8 @@ static void chtls_set_scmd(struct chtls_sock *csk)
SCMD_TLS_FRAG_ENABLE_V(1);
}
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+int chtls_setkey(struct chtls_sock *csk, u32 keylen,
+ u32 optname, int cipher_type)
{
struct tls_key_req *kwr;
struct chtls_dev *cdev;
@@ -350,9 +376,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
kwr->sc_imm.len = cpu_to_be32(klen);
+ lock_sock(sk);
/* key info */
kctx = (struct _key_ctx *)(kwr + 1);
- ret = chtls_key_info(csk, kctx, keylen, optname);
+ ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
if (ret)
goto out_notcb;
@@ -388,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
csk->tlshws.txkey = keyid;
}
+ release_sock(sk);
return ret;
out_notcb:
+ release_sock(sk);
free_tls_keyid(sk);
out_nokey:
kfree_skb(skb);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 18996935d8ba..a038de90b2ea 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -84,7 +84,6 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
struct chtls_listen *clisten;
- int err;
if (sk->sk_protocol != IPPROTO_TCP)
return -EPROTONOSUPPORT;
@@ -100,10 +99,10 @@ static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
clisten->cdev = cdev;
clisten->sk = sk;
mutex_lock(&notify_mutex);
- err = raw_notifier_call_chain(&listen_notify_list,
+ raw_notifier_call_chain(&listen_notify_list,
CHTLS_LISTEN_START, clisten);
mutex_unlock(&notify_mutex);
- return err;
+ return 0;
}
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
@@ -486,6 +485,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
struct tls_crypto_info *crypto_info, tmp_crypto_info;
struct chtls_sock *csk;
int keylen;
+ int cipher_type;
int rc = 0;
csk = rcu_dereference_sk_user_data(sk);
@@ -509,6 +509,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * copy keys from user based on GCM cipher type.
+ */
switch (tmp_crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
/* Obtain version and type from previous copy */
@@ -525,13 +528,30 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
}
keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
- rc = chtls_setkey(csk, keylen, optname);
+ cipher_type = TLS_CIPHER_AES_GCM_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ crypto_info[0] = tmp_crypto_info;
+ rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
+ optval + sizeof(*crypto_info),
+ sizeof(struct tls12_crypto_info_aes_gcm_256)
+ - sizeof(*crypto_info));
+
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ cipher_type = TLS_CIPHER_AES_GCM_256;
break;
}
default:
rc = -EINVAL;
goto out;
}
+ rc = chtls_setkey(csk, keylen, optname, cipher_type);
out:
return rc;
}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 73a899e6f837..f4f18bfc2247 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -110,7 +110,6 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -119,11 +118,9 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -132,20 +129,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
tctx->fallback.cip->base.crt_flags |=
(tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
- return ret;
+ return crypto_cipher_setkey(tctx->fallback.cip, key, len);
}
static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -154,11 +144,9 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -168,11 +156,7 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_set_flags(tctx->fallback.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(tctx->fallback.skcipher) &
- CRYPTO_TFM_RES_MASK);
- return ret;
+ return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
}
static void
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 4e7323884ae3..354836468c5d 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2507,7 +2507,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
addr = pci_resource_start(pdev, i);
size = pci_resource_len(pdev, i);
- dev->bar[i] = ioremap_nocache(addr, size);
+ dev->bar[i] = ioremap(addr, size);
if (!dev->bar[i]) {
err = -ENOMEM;
goto err_out_unmap_bars;
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index c0e7a85fe129..8851161f722f 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -16,16 +16,22 @@ config CRYPTO_DEV_HISI_SEC
config CRYPTO_DEV_HISI_SEC2
tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select CRYPTO_DEV_HISI_QM
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
depends on PCI && PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
- CBC, and XTS cipher mode.
+ CBC, and XTS cipher mode, and AEAD algorithms.
To compile this as a module, choose M here: the module
will be called hisi_sec2.
@@ -44,7 +50,6 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select CRYPTO_DEV_HISI_QM
- select SG_SPLIT
help
Support for HiSilicon ZIP Driver
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 98f037e6ea3e..5d400d69e8e4 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -123,7 +123,7 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx = hpre_req->ctx;
id = hpre_alloc_req_id(ctx);
- if (id < 0)
+ if (unlikely(id < 0))
return -EINVAL;
ctx->req_list[id] = hpre_req;
@@ -174,8 +174,8 @@ static struct hisi_qp *hpre_get_qp_and_start(void)
}
static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -190,7 +190,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
*tmp = dma_map_single(dev, sg_virt(data),
len, dma_dir);
- if (dma_mapping_error(dev, *tmp)) {
+ if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
}
@@ -199,8 +199,8 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -208,11 +208,11 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
int shift;
shift = ctx->key_sz - len;
- if (shift < 0)
+ if (unlikely(shift < 0))
return -EINVAL;
ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
- if (!ptr)
+ if (unlikely(!ptr))
return -ENOMEM;
if (is_src) {
@@ -226,12 +226,12 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
}
static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, int is_dh)
+ struct scatterlist *data, unsigned int len,
+ int is_src, int is_dh)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- dma_addr_t tmp;
+ dma_addr_t tmp = 0;
int ret;
/* when the data is dh's source, we should format it */
@@ -241,7 +241,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
else
ret = hpre_prepare_dma_buf(hpre_req, data, len,
is_src, &tmp);
- if (ret)
+ if (unlikely(ret))
return ret;
if (is_src)
@@ -253,15 +253,16 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
}
static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst, struct scatterlist *src)
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
{
struct device *dev = HPRE_DEV(ctx);
struct hpre_sqe *sqe = &req->req;
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (src) {
@@ -274,7 +275,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (req->dst) {
@@ -288,7 +289,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
- void **kreq)
+ void **kreq)
{
struct hpre_asym_request *req;
int err, id, done;
@@ -308,7 +309,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
- if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
+ if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
return -EINVAL;
@@ -375,7 +376,7 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_ctx *ctx = qp->qp_ctx;
struct hpre_sqe *sqe = resp;
- ctx->req_list[sqe->tag]->cb(ctx, resp);
+ ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@@ -454,33 +455,30 @@ static int hpre_dh_compute_value(struct kpp_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
ret = hpre_msg_request_set(ctx, req, false);
- if (ret)
+ if (unlikely(ret))
return ret;
if (req->src) {
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
}
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
if (ctx->crt_g2_mode && !req->src)
- msg->dw0 |= HPRE_ALG_DH_G2;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
- msg->dw0 |= HPRE_ALG_DH;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -520,12 +518,12 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
return -EINVAL;
if (hpre_is_dh_params_length_valid(params->p_size <<
- HPRE_BITS_2_BYTES_SHIFT))
+ HPRE_BITS_2_BYTES_SHIFT))
return -EINVAL;
sz = ctx->key_sz = params->p_size;
ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
- &ctx->dh.dma_xa_p, GFP_KERNEL);
+ &ctx->dh.dma_xa_p, GFP_KERNEL);
if (!ctx->dh.xa_p)
return -ENOMEM;
@@ -559,13 +557,12 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (ctx->dh.g) {
- memset(ctx->dh.g, 0, sz);
dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
ctx->dh.g = NULL;
}
if (ctx->dh.xa_p) {
- memset(ctx->dh.xa_p, 0, sz);
+ memzero_explicit(ctx->dh.xa_p, sz);
dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
ctx->dh.dma_xa_p);
ctx->dh.xa_p = NULL;
@@ -661,9 +658,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -673,22 +667,22 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.pubkey)
+ if (unlikely(!ctx->rsa.pubkey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -696,7 +690,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -716,9 +710,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -728,27 +719,29 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.prikey)
+ if (unlikely(!ctx->rsa.prikey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
if (ctx->crt_g2_mode) {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
- msg->dw0 |= HPRE_ALG_NC_CRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_CRT);
} else {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_NCRT);
}
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -756,7 +749,7 @@ static int hpre_rsa_dec(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -811,10 +804,8 @@ static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
- ctx->rsa.pubkey = NULL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
return -EINVAL;
- }
memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
@@ -836,17 +827,17 @@ static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
return 0;
}
-static int hpre_crt_para_get(char *para, const char *raw,
- unsigned int raw_sz, unsigned int para_size)
+static int hpre_crt_para_get(char *para, size_t para_sz,
+ const char *raw, size_t raw_sz)
{
const char *ptr = raw;
size_t len = raw_sz;
hpre_rsa_drop_leading_zeros(&ptr, &len);
- if (!len || len > para_size)
+ if (!len || len > para_sz)
return -EINVAL;
- memcpy(para + para_size - len, ptr, len);
+ memcpy(para + para_sz - len, ptr, len);
return 0;
}
@@ -864,32 +855,32 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
if (!ctx->rsa.crt_prikey)
return -ENOMEM;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
- rsa_key->dq_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
+ rsa_key->dq, rsa_key->dq_sz);
if (ret)
goto free_key;
offset = hlf_ksz;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
- rsa_key->dp_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->dp, rsa_key->dp_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_Q;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->q, rsa_key->q_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->q, rsa_key->q_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_P;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->p, rsa_key->p_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->p, rsa_key->p_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_INV;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->qinv, rsa_key->qinv_sz);
if (ret)
goto free_key;
@@ -899,7 +890,7 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
free_key:
offset = hlf_ksz * HPRE_CRT_PRMS;
- memset(ctx->rsa.crt_prikey, 0, offset);
+ memzero_explicit(ctx->rsa.crt_prikey, offset);
dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
@@ -924,14 +915,15 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
}
if (ctx->rsa.crt_prikey) {
- memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
+ memzero_explicit(ctx->rsa.crt_prikey,
+ half_key_sz * HPRE_CRT_PRMS);
dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
}
if (ctx->rsa.prikey) {
- memset(ctx->rsa.prikey, 0, ctx->key_sz);
+ memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
ctx->rsa.dma_prikey);
ctx->rsa.prikey = NULL;
@@ -1043,6 +1035,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
if (IS_ERR(ctx->rsa.soft_tfm)) {
@@ -1050,7 +1043,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- return hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx);
+ if (ret)
+ crypto_free_akcipher(ctx->rsa.soft_tfm);
+
+ return ret;
}
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 34e0424410bf..401747de67a8 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -73,7 +73,7 @@
#define HPRE_DBGFS_VAL_MAX_LEN 20
#define HPRE_PCI_DEVICE_ID 0xa258
#define HPRE_PCI_VF_DEVICE_ID 0xa259
-#define HPRE_ADDR(qm, offset) (qm->io_base + (offset))
+#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
#define HPRE_QM_USR_CFG_MASK 0xfffffffe
#define HPRE_QM_AXI_CFG_MASK 0xffff
#define HPRE_QM_VFG_AX_MASK 0xff
@@ -106,18 +106,18 @@ static const char * const hpre_debug_file_name[] = {
};
static const struct hpre_hw_error hpre_hw_errors[] = {
- { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
- { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
- { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
- { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
- { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
- { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
- { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
- { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
- { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
- { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
- { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
- { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
+ { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
+ { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
+ { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
+ { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
+ { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
+ { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
+ { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
+ { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
+ { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
+ { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
{ /* sentinel */ }
};
@@ -490,7 +490,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL;
}
spin_unlock_irq(&file->lock);
- ret = sprintf(tbuf, "%u\n", val);
+ ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -557,7 +557,7 @@ static const struct file_operations hpre_ctrl_debug_fops = {
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
- struct dentry *tmp, *file_dir;
+ struct dentry *file_dir;
if (dir)
file_dir = dir;
@@ -571,10 +571,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
dbg->files[indx].debug = dbg;
dbg->files[indx].type = type;
dbg->files[indx].index = indx;
- tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
- dbg->files + indx, &hpre_ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
return 0;
}
@@ -585,7 +583,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -595,10 +592,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
- if (!tmp)
- return -ENOENT;
-
+ debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
return 0;
}
@@ -609,15 +603,14 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
- sprintf(buf, "cluster%d", i);
-
+ ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+ if (ret < 0)
+ return -EINVAL;
tmp_d = debugfs_create_dir(buf, debug->debug_root);
- if (!tmp_d)
- return -ENOENT;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -627,9 +620,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -668,9 +659,6 @@ static int hpre_debugfs_init(struct hpre *hpre)
int ret;
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- if (!dir)
- return -ENOENT;
-
qm->debug.debug_root = dir;
ret = hisi_qm_debug_init(qm);
@@ -1014,8 +1002,6 @@ static void hpre_register_debugfs(void)
return;
hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
- if (IS_ERR_OR_NULL(hpre_debugfs_root))
- hpre_debugfs_root = NULL;
}
static void hpre_unregister_debugfs(void)
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 26754d0570ba..13e2d8d7be94 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -9,10 +9,12 @@
#include "../qm.h"
#include "sec_crypto.h"
-/* Cipher resource per hardware SEC queue */
-struct sec_cipher_res {
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
u8 *c_ivin;
dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
};
/* Cipher request of SEC private */
@@ -21,33 +23,35 @@ struct sec_cipher_req {
dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
};
+struct sec_aead_req {
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+ struct aead_request *aead_req;
+};
+
/* SEC request of Crypto */
struct sec_req {
struct sec_sqe sec_sqe;
struct sec_ctx *ctx;
struct sec_qp_ctx *qp_ctx;
- /* Cipher supported only at present */
struct sec_cipher_req c_req;
+ struct sec_aead_req aead_req;
+
int err_type;
int req_id;
/* Status of the SEC request */
- int fake_busy;
+ bool fake_busy;
};
/**
* struct sec_req_op - Operations for SEC request
- * @get_res: Get resources for TFM on the SEC device
- * @resource_alloc: Allocate resources for queue context on the SEC device
- * @resource_free: Free resources for queue context on the SEC device
* @buf_map: DMA map the SGL buffers of the request
* @buf_unmap: DMA unmap the SGL buffers of the request
* @bd_fill: Fill the SEC queue BD
@@ -56,18 +60,25 @@ struct sec_req {
* @process: Main processing logic of Skcipher
*/
struct sec_req_op {
- int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
- int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
- void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
- void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
int (*process)(struct sec_ctx *ctx, struct sec_req *req);
};
+/* SEC auth context */
+struct sec_auth_ctx {
+ dma_addr_t a_key_dma;
+ u8 *a_key;
+ u8 a_key_len;
+ u8 mac_len;
+ u8 a_alg;
+ struct crypto_shash *hash_tfm;
+};
+
/* SEC cipher context which cipher's relatives */
struct sec_cipher_ctx {
u8 *c_key;
@@ -83,9 +94,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req **req_list;
+ struct sec_req *req_list[QM_Q_DEPTH];
struct idr req_idr;
- void *alg_meta_data;
+ struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
struct mutex req_lock;
struct hisi_acc_sgl_pool *c_in_pool;
@@ -93,6 +104,11 @@ struct sec_qp_ctx {
atomic_t pending_reqs;
};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
struct sec_ctx {
struct sec_qp_ctx *qp_ctx;
@@ -110,7 +126,10 @@ struct sec_ctx {
/* Currrent cyclic index to select a queue for decipher */
atomic_t dec_qcyclic;
+
+ enum sec_alg_type alg_type;
struct sec_cipher_ctx c_ctx;
+ struct sec_auth_ctx a_ctx;
};
enum sec_endian {
@@ -132,8 +151,8 @@ struct sec_debug_file {
};
struct sec_dfx {
- u64 send_cnt;
- u64 recv_cnt;
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
};
struct sec_debug {
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 62b04e19067c..a2cfcc9ccd94 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -3,7 +3,11 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/authenc.h>
#include <crypto/des.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
@@ -27,6 +31,10 @@
#define SEC_SRC_SGL_OFFSET 7
#define SEC_CKEY_OFFSET 9
#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
#define SEC_TYPE_MASK 0x0F
@@ -35,12 +43,19 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-static DEFINE_MUTEX(sec_algs_lock);
-static unsigned int sec_active_devs;
+static atomic_t sec_active_devs;
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
@@ -50,7 +65,7 @@ static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
ctx->hlf_q_num;
}
-static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
atomic_dec(&ctx->enc_qcyclic);
@@ -67,7 +82,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
0, QM_Q_DEPTH, GFP_ATOMIC);
mutex_unlock(&qp_ctx->req_lock);
- if (req_id < 0) {
+ if (unlikely(req_id < 0)) {
dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
return req_id;
}
@@ -82,7 +97,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (req_id < 0 || req_id >= QM_Q_DEPTH) {
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
@@ -95,36 +110,66 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
+static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ u8 *mac_out = qp_ctx->res[req->req_id].out_mac;
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac = mac_out + SEC_MAX_MAC_LEN;
+ struct scatterlist *sgl = aead_req->src;
+ size_t sz;
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
+ aead_req->cryptlen + aead_req->assoclen -
+ authsize);
+ if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
+ dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
u16 done, flag;
+ int err = 0;
u8 type;
- struct sec_req *req;
type = bd->type_cipher_auth & SEC_TYPE_MASK;
- if (type == SEC_BD_TYPE2) {
- req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
- req->err_type = bd->type2.error_type;
-
- done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
- flag = (le16_to_cpu(bd->type2.done_flag) &
- SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
- if (req->err_type || done != 0x1 || flag != 0x2)
- dev_err(SEC_CTX_DEV(req->ctx),
- "err_type[%d],done[%d],flag[%d]\n",
- req->err_type, done, flag);
- } else {
+ if (unlikely(type != SEC_BD_TYPE2)) {
pr_err("err bd type [%d]\n", type);
return;
}
- __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
+ (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
+ dev_err(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
+ }
+
+ if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
+ err = sec_aead_verify(req, qp_ctx);
- req->ctx->req_op->buf_unmap(req->ctx, req);
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
- req->ctx->req_op->callback(req->ctx, req);
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
}
static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
@@ -135,9 +180,9 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
- __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- if (ret == -EBUSY)
+ if (unlikely(ret == -EBUSY))
return -ENOBUFS;
if (!ret) {
@@ -150,6 +195,91 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return ret;
}
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
+}
+
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
+
+static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ &res->out_mac_dma, GFP_KERNEL);
+ if (!res->out_mac)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].out_mac_dma = res->out_mac_dma +
+ i * (SEC_MAX_MAC_LEN << 1);
+ res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
+ }
+
+ return 0;
+}
+
+static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->out_mac)
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac, res->out_mac_dma);
+}
+
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
+ int ret;
+
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->alg_type == SEC_AEAD) {
+ ret = sec_alloc_mac_resource(dev, res);
+ if (ret)
+ goto get_fail;
+ }
+
+ return 0;
+get_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_mac_resource(dev, qp_ctx->res);
+}
+
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
@@ -173,15 +303,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list)
- goto err_destroy_idr;
-
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_free_req_list;
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
@@ -191,7 +317,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
goto err_free_c_in_pool;
}
- ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -202,13 +328,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_req_list:
- kfree(qp_ctx->req_list);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
hisi_qm_release_qp(qp);
@@ -222,66 +346,42 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
idr_destroy(&qp_ctx->req_idr);
- kfree(qp_ctx->req_list);
hisi_qm_release_qp(qp_ctx->qp);
}
-static int sec_skcipher_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx;
struct sec_dev *sec;
- struct device *dev;
- struct hisi_qm *qm;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
-
sec = sec_find_device(cpu_to_node(smp_processor_id()));
if (!sec) {
- pr_err("find no Hisilicon SEC device!\n");
+ pr_err("Can not find proper Hisilicon SEC device!\n");
return -ENODEV;
}
ctx->sec = sec;
- qm = &sec->qm;
- dev = &qm->pdev->dev;
- ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx)
return -ENOMEM;
for (i = 0; i < sec->ctx_q_num; i++) {
- ret = sec_create_qp_ctx(qm, ctx, i, 0);
+ ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
- c_ctx = &ctx->c_ctx;
- c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
- if (c_ctx->ivsize > SEC_IV_SIZE) {
- dev_err(dev, "get error iv size!\n");
- ret = -EINVAL;
- goto err_sec_release_qp_ctx;
- }
- c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
- &c_ctx->c_key_dma, GFP_KERNEL);
- if (!c_ctx->c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
return 0;
-
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -290,17 +390,9 @@ err_sec_release_qp_ctx:
return ret;
}
-static void sec_skcipher_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- int i = 0;
-
- if (c_ctx->c_key) {
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
- c_ctx->c_key, c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -308,6 +400,85 @@ static void sec_skcipher_exit(struct crypto_skcipher *tfm)
kfree(ctx->qp_ctx);
}
+static int sec_cipher_init(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_cipher_uninit(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
+}
+
+static int sec_auth_init(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &a_ctx->a_key_dma, GFP_KERNEL);
+ if (!a_ctx->a_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_auth_uninit(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key, a_ctx->a_key_dma);
+}
+
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ctx = crypto_skcipher_ctx(tfm);
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
+
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
const u32 keylen,
const enum sec_cmode c_mode)
@@ -420,62 +591,8 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int sec_skcipher_get_res(struct sec_ctx *ctx,
- struct sec_req *req)
-{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
- struct sec_cipher_req *c_req = &req->c_req;
- int req_id = req->req_id;
-
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
-
- return 0;
-}
-
-static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_res *res;
- int i;
-
- res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
- &res->c_ivin_dma, GFP_KERNEL);
- if (!res->c_ivin) {
- kfree(res);
- return -ENOMEM;
- }
-
- for (i = 1; i < QM_Q_DEPTH; i++) {
- res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
- res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
- }
- qp_ctx->alg_meta_data = res;
-
- return 0;
-}
-
-static void sec_skcipher_resource_free(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_cipher_res *res = qp_ctx->alg_meta_data;
- struct device *dev = SEC_CTX_DEV(ctx);
-
- if (!res)
- return;
-
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
- kfree(res);
-}
-
-static int sec_skcipher_map(struct device *dev, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static int sec_cipher_map(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -509,12 +626,20 @@ static int sec_skcipher_map(struct device *dev, struct sec_req *req,
return 0;
}
+static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, req->c_in);
+
+ hisi_acc_sg_buf_unmap(dev, dst, req->c_out);
+}
+
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
+ struct skcipher_request *sq = req->c_req.sk_req;
- return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
- c_req->sk_req->src, c_req->sk_req->dst);
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst);
}
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
@@ -523,10 +648,127 @@ static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_req *c_req = &req->c_req;
struct skcipher_request *sk_req = c_req->sk_req;
- if (sk_req->dst != sk_req->src)
- hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
+ sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst);
+}
+
+static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
+ struct crypto_authenc_keys *keys)
+{
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aead aes key error!\n");
+ return -EINVAL;
+ }
+ memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
+
+ return 0;
+}
+
+static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
+ struct crypto_authenc_keys *keys)
+{
+ struct crypto_shash *hash_tfm = ctx->hash_tfm;
+ SHASH_DESC_ON_STACK(shash, hash_tfm);
+ int blocksize, ret;
- hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
+ if (!keys->authkeylen) {
+ pr_err("hisi_sec2: aead auth key error!\n");
+ return -EINVAL;
+ }
+
+ blocksize = crypto_shash_blocksize(hash_tfm);
+ if (keys->authkeylen > blocksize) {
+ ret = crypto_shash_digest(shash, keys->authkey,
+ keys->authkeylen, ctx->a_key);
+ if (ret) {
+ pr_err("hisi_sec2: aead auth digest error!\n");
+ return -EINVAL;
+ }
+ ctx->a_key_len = blocksize;
+ } else {
+ memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ ctx->a_key_len = keys->authkeylen;
+ }
+
+ return 0;
+}
+
+static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ const u32 keylen, const enum sec_hash_alg a_alg,
+ const enum sec_calg c_alg,
+ const enum sec_mac_len mac_len,
+ const enum sec_cmode c_mode)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct crypto_authenc_keys keys;
+ int ret;
+
+ ctx->a_ctx.a_alg = a_alg;
+ ctx->c_ctx.c_alg = c_alg;
+ ctx->a_ctx.mac_len = mac_len;
+ c_ctx->c_mode = c_mode;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ ret = sec_aead_aes_set_key(c_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ goto bad_key;
+ }
+
+ return 0;
+bad_key:
+ memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
+
+ return -EINVAL;
+}
+
+
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
+ u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
+ SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
+ SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
+ SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+
+static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst);
+}
+
+static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_req *cq = &req->c_req;
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ sec_cipher_unmap(dev, cq, aq->src, aq->dst);
}
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
@@ -534,13 +776,13 @@ static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ctx->req_op->do_transfer(ctx, req);
ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto unmap_req_buf;
return ret;
@@ -559,10 +801,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
- c_req->c_len = sk_req->cryptlen;
- memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -570,14 +811,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_sqe *sec_sqe = &req->sec_sqe;
- u8 de = 0;
u8 scene, sa_type, da_type;
u8 bd_type, cipher;
+ u8 de = 0;
memset(sec_sqe, 0, sizeof(struct sec_sqe));
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.c_ivin_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma);
sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
@@ -611,25 +853,37 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
return 0;
}
-static void sec_update_iv(struct sec_req *req)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct skcipher_request *sk_req = req->c_req.sk_req;
u32 iv_size = req->ctx->c_ctx.ivsize;
struct scatterlist *sgl;
+ unsigned int cryptlen;
size_t sz;
+ u8 *iv;
if (req->c_req.encrypt)
- sgl = sk_req->dst;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
else
- sgl = sk_req->src;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
- iv_size, sk_req->cryptlen - iv_size);
- if (sz != iv_size)
+ if (alg_type == SEC_SKCIPHER) {
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
+ } else {
+ iv = aead_req->iv;
+ cryptlen = aead_req->cryptlen;
+ }
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -638,13 +892,109 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
- sec_update_iv(req);
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+ if (req->fake_busy)
sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- sk_req->base.complete(&sk_req->base, req->err_type);
+ sk_req->base.complete(&sk_req->base, err);
+}
+
+static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+
+ memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+}
+
+static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe *sec_sqe)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
+
+ sec_sqe->type2.mac_key_alg =
+ cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)((ctx->a_key_len) /
+ SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
+
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
+
+ if (dir)
+ sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
+ else
+ sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
+
+ sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
+
+ sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sec_sqe->type2.mac_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma);
+}
+
+static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ int ret;
+
+ ret = sec_skcipher_bd_fill(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ return ret;
+ }
+
+ sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+
+ return 0;
+}
+
+static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+{
+ struct aead_request *a_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_cipher_req *c_req = &req->c_req;
+ size_t authsize = crypto_aead_authsize(tfm);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ size_t sz;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+
+ if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
+ sec_update_iv(req, SEC_AEAD);
+
+ /* Copy output mac */
+ if (!err && c_req->encrypt) {
+ struct scatterlist *sgl = a_req->dst;
+
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
+ qp_ctx->res[req->req_id].out_mac,
+ authsize, a_req->cryptlen +
+ a_req->assoclen);
+
+ if (unlikely(sz != authsize)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ err = -EINVAL;
+ }
+ }
+
+ sec_free_req_id(req);
+
+ if (req->fake_busy)
+ a_req->base.complete(&a_req->base, -EINPROGRESS);
+
+ a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
@@ -653,37 +1003,30 @@ static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
- sec_put_queue_id(ctx, req);
+ sec_free_queue_id(ctx, req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int issue_id, ret;
+ int queue_id;
/* To load balance */
- issue_id = sec_get_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[issue_id];
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (req->req_id < 0) {
- sec_put_queue_id(ctx, req);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
return req->req_id;
}
if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- req->fake_busy = 1;
+ req->fake_busy = true;
else
- req->fake_busy = 0;
-
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- atomic_dec(&qp_ctx->pending_reqs);
- sec_request_uninit(ctx, req);
- dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
- }
+ req->fake_busy = false;
- return ret;
+ return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
@@ -691,20 +1034,20 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = sec_request_init(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ret = sec_request_transfer(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto err_uninit_req;
/* Output IV as decrypto */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- sec_update_iv(req);
+ sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
@@ -712,9 +1055,16 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
err_send_req:
/* As failing, restore the IV from user */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
- ctx->c_ctx.ivsize);
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ else
+ memcpy(req->aead_req.aead_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ }
sec_request_untransfer(ctx, req);
err_uninit_req:
@@ -723,10 +1073,7 @@ err_uninit_req:
return ret;
}
-static struct sec_req_op sec_req_ops_tbl = {
- .get_res = sec_skcipher_get_res,
- .resource_alloc = sec_skcipher_resource_alloc,
- .resource_free = sec_skcipher_resource_free,
+static const struct sec_req_op sec_skcipher_req_ops = {
.buf_map = sec_skcipher_sgl_map,
.buf_unmap = sec_skcipher_sgl_unmap,
.do_transfer = sec_skcipher_copy_iv,
@@ -736,39 +1083,139 @@ static struct sec_req_op sec_req_ops_tbl = {
.process = sec_process,
};
+static const struct sec_req_op sec_aead_req_ops = {
+ .buf_map = sec_aead_sgl_map,
+ .buf_unmap = sec_aead_sgl_unmap,
+ .do_transfer = sec_aead_copy_iv,
+ .bd_fill = sec_aead_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_aead_callback,
+ .process = sec_process,
+};
+
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->req_op = &sec_req_ops_tbl;
+ ctx->req_op = &sec_skcipher_req_ops;
return sec_skcipher_init(tfm);
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
{
- sec_skcipher_exit(tfm);
+ sec_skcipher_uninit(tfm);
}
-static int sec_skcipher_param_check(struct sec_ctx *ctx,
- struct skcipher_request *sk_req)
+static int sec_aead_init(struct crypto_aead *tfm)
{
- u8 c_alg = ctx->c_ctx.c_alg;
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->alg_type = SEC_AEAD;
+ ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ return -EINVAL;
+ }
+
+ ctx->req_op = &sec_aead_req_ops;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_auth_init(ctx);
+ if (ret)
+ goto err_auth_init;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return ret;
+
+err_cipher_init:
+ sec_auth_uninit(ctx);
+err_auth_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_aead_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_auth_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
+static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ int ret;
+
+ ret = sec_aead_init(tfm);
+ if (ret) {
+ pr_err("hisi_sec2: aead init error!\n");
+ return ret;
+ }
+
+ auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(auth_ctx->hash_tfm)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ sec_aead_exit(tfm);
+ return PTR_ERR(auth_ctx->hash_tfm);
+ }
+
+ return 0;
+}
+
+static void sec_aead_ctx_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+}
+
+static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha1");
+}
+
+static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha256");
+}
+
+static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha512");
+}
+
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- if (!sk_req->src || !sk_req->dst) {
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
-
+ sreq->c_req.c_len = sk_req->cryptlen;
if (c_alg == SEC_CALG_3DES) {
- if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher 3des input length error!\n");
return -EINVAL;
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher aes input length error!\n");
return -EINVAL;
}
@@ -789,14 +1236,14 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (!sk_req->cryptlen)
return 0;
- ret = sec_skcipher_param_check(ctx, sk_req);
- if (ret)
- return ret;
-
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
return ctx->req_op->process(ctx, req);
}
@@ -837,7 +1284,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_algs[] = {
+static struct skcipher_alg sec_skciphers[] = {
SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
AES_BLOCK_SIZE, 0)
@@ -867,23 +1314,133 @@ static struct skcipher_alg sec_algs[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ u8 c_alg = ctx->c_ctx.c_alg;
+ struct aead_request *req = sreq->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ size_t authsize = crypto_aead_authsize(tfm);
+
+ if (unlikely(!req->src || !req->dst || !req->cryptlen)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ return -EINVAL;
+ }
+
+ /* Support AES only */
+ if (unlikely(c_alg != SEC_CALG_AES)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ return -EINVAL;
+
+ }
+ if (sreq->c_req.encrypt)
+ sreq->c_req.c_len = req->cryptlen;
+ else
+ sreq->c_req.c_len = req->cryptlen - authsize;
+
+ if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ req->aead_req.aead_req = a_req;
+ req->c_req.encrypt = encrypt;
+ req->ctx = ctx;
+
+ ret = sec_aead_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
+ return ctx->req_op->process(ctx, req);
+}
+
+static int sec_aead_encrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, true);
+}
+
+static int sec_aead_decrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, false);
+}
+
+#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
+ ctx_exit, blk_size, iv_size, max_authsize)\
+{\
+ .base = {\
+ .cra_name = sec_cra_name,\
+ .cra_driver_name = "hisi_sec_"sec_cra_name,\
+ .cra_priority = SEC_PRIORITY,\
+ .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_blocksize = blk_size,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
+ .cra_module = THIS_MODULE,\
+ },\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
+ .setkey = sec_set_key,\
+ .decrypt = sec_aead_decrypt,\
+ .encrypt = sec_aead_encrypt,\
+ .ivsize = iv_size,\
+ .maxauthsize = max_authsize,\
+}
+
+#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
+ SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
+ sec_aead_ctx_exit, blksize, ivsize, authsize)
+
+static struct aead_alg sec_aeads[] = {
+ SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
+ sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
+ sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
+ sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+};
+
int sec_register_to_crypto(void)
{
int ret = 0;
/* To avoid repeat register */
- mutex_lock(&sec_algs_lock);
- if (++sec_active_devs == 1)
- ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_add_return(1, &sec_active_devs) == 1) {
+ ret = crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ if (ret)
+ return ret;
+
+ ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ if (ret)
+ goto reg_aead_fail;
+ }
+
+ return ret;
+
+reg_aead_fail:
+ crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(void)
{
- mutex_lock(&sec_algs_lock);
- if (--sec_active_devs == 0)
- crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_sub_return(1, &sec_active_devs) == 0) {
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ }
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 097dce828340..b2786e17d8fe 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -14,6 +14,18 @@ enum sec_calg {
SEC_CALG_SM4 = 0x3,
};
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
@@ -34,6 +46,12 @@ enum sec_bd_type {
SEC_BD_TYPE2 = 0x2,
};
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
enum sec_cipher_dir {
SEC_CIPHER_ENC = 0x1,
SEC_CIPHER_DEC = 0x2,
@@ -48,8 +66,8 @@ enum sec_addr_type {
struct sec_sqe_type2 {
/*
- * mac_len: 0~5 bits
- * a_key_len: 6~10 bits
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
* a_alg: 11~16 bits
*/
__le32 mac_key_alg;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 74f0654028c9..2bbaf1e2dae7 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -32,6 +32,7 @@
#define SEC_PF_DEF_Q_NUM 64
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
@@ -221,7 +222,7 @@ static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
if (ret)
return -EINVAL;
- if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) {
+ if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
return -EINVAL;
}
@@ -235,7 +236,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = {
};
static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
@@ -608,6 +609,14 @@ static const struct file_operations sec_dbg_fops = {
.write = sec_debug_write,
};
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
+
static int sec_core_debug_init(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
@@ -628,9 +637,11 @@ static int sec_core_debug_init(struct sec_dev *sec)
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 012023c347b1..0e8c7e324fb4 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -202,18 +202,21 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int i, ret, sg_n;
+ int i, sg_n, sg_n_mapped;
if (!dev || !sgl || !pool || !hw_sgl_dma)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
- if (sg_n > pool->sge_nr)
+
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ if (!sg_n_mapped)
return ERR_PTR(-EINVAL);
- ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- if (!ret)
+ if (sg_n_mapped > pool->sge_nr) {
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-EINVAL);
+ }
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
@@ -224,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
- for_each_sg(sgl, sg, sg_n, i) {
+ for_each_sg(sgl, sg, sg_n_mapped, i) {
sg_map_to_hw_sg(sg, curr_hw_sge);
inc_hw_sgl_sge(curr_hw_sgl);
curr_hw_sge++;
@@ -260,7 +263,3 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
hw_sgl->entry_length_in_sgl = 0;
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
-MODULE_DESCRIPTION("HiSilicon Accelerator SGL support");
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 79fc4dd3fe00..bc1db26598bb 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -11,6 +11,10 @@
/* hisi_zip_sqe dw3 */
#define HZIP_BD_STATUS_M GENMASK(7, 0)
+/* hisi_zip_sqe dw7 */
+#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw8 */
+#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
#define HZIP_ALG_TYPE_ZLIB 0x02
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 795428c1d07e..9815d5e3ccd0 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -46,10 +46,8 @@ enum hisi_zip_alg_type {
struct hisi_zip_req {
struct acomp_req *req;
- struct scatterlist *src;
- struct scatterlist *dst;
- size_t slen;
- size_t dlen;
+ int sskip;
+ int dskip;
struct hisi_acc_hw_sgl *hw_src;
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
@@ -119,13 +117,15 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen)
+ u32 dlen, int sskip, int dskip)
{
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
- sqe->input_data_length = slen;
+ sqe->input_data_length = slen - sskip;
+ sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
+ sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
- sqe->dest_avail_out = dlen;
+ sqe->dest_avail_out = dlen - dskip;
sqe->source_addr_l = lower_32_bits(s_addr);
sqe->source_addr_h = upper_32_bits(s_addr);
sqe->dest_addr_l = lower_32_bits(d_addr);
@@ -327,11 +327,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP)
- kfree(req->dst);
- else
- kfree(req->src);
-
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
memset(req, 0, sizeof(struct hisi_zip_req));
@@ -359,8 +354,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
}
dlen = sqe->produced;
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
acomp_req->dlen = dlen + head_size;
@@ -454,20 +449,6 @@ static size_t get_comp_head_size(struct scatterlist *src, u8 req_type)
}
}
-static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes,
- size_t remains, struct scatterlist **out)
-{
-#define SPLIT_NUM 2
- size_t split_sizes[SPLIT_NUM];
- int out_mapped_nents[SPLIT_NUM];
-
- split_sizes[0] = bytes;
- split_sizes[1] = remains;
-
- return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out,
- out_mapped_nents, GFP_KERNEL);
-}
-
static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_qp_ctx *qp_ctx,
size_t head_size, bool is_comp)
@@ -475,31 +456,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *q = req_q->q;
struct hisi_zip_req *req_cache;
- struct scatterlist *out[2];
- struct scatterlist *sgl;
- size_t len;
- int ret, req_id;
-
- /*
- * remove/add zlib/gzip head, as hardware operations do not include
- * comp head. so split req->src to get sgl without heads in acomp, or
- * add comp head to req->dst ahead of that hardware output compressed
- * data in sgl splited from req->dst without comp head.
- */
- if (is_comp) {
- sgl = req->dst;
- len = req->dlen - head_size;
- } else {
- sgl = req->src;
- len = req->slen - head_size;
- }
-
- ret = get_sg_skip_bytes(sgl, head_size, len, out);
- if (ret)
- return ERR_PTR(ret);
-
- /* sgl for comp head is useless, so free it now */
- kfree(out[0]);
+ int req_id;
write_lock(&req_q->req_lock);
@@ -507,7 +464,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
if (req_id >= req_q->size) {
write_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- kfree(out[1]);
return ERR_PTR(-EBUSY);
}
set_bit(req_id, req_q->req_bitmap);
@@ -515,16 +471,13 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
+
if (is_comp) {
- req_cache->src = req->src;
- req_cache->dst = out[1];
- req_cache->slen = req->slen;
- req_cache->dlen = req->dlen - head_size;
+ req_cache->sskip = 0;
+ req_cache->dskip = head_size;
} else {
- req_cache->src = out[1];
- req_cache->dst = req->dst;
- req_cache->slen = req->slen - head_size;
- req_cache->dlen = req->dlen;
+ req_cache->sskip = head_size;
+ req_cache->dskip = 0;
}
write_unlock(&req_q->req_lock);
@@ -536,6 +489,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
+ struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
@@ -543,16 +497,16 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
dma_addr_t output;
int ret;
- if (!req->src || !req->slen || !req->dst || !req->dlen)
+ if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
return -EINVAL;
- req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool,
+ req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
req->req_id << 1, &input);
if (IS_ERR(req->hw_src))
return PTR_ERR(req->hw_src);
req->dma_src = input;
- req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool,
+ req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
&output);
if (IS_ERR(req->hw_dst)) {
@@ -561,8 +515,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
- hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen,
- req->dlen);
+ hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
+ a_req->dlen, req->sskip, req->dskip);
hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
hisi_zip_config_tag(zip_sqe, req->req_id);
@@ -574,9 +528,9 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
return ret;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index fe4cc8babe1c..25d5227f74a1 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -332,10 +332,10 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
struct dma_slave_config dma_conf;
int err = -EINVAL;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->bus_addr;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 64894d8b442a..2cb53fbae841 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -501,8 +501,8 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
- priv->config.cd_size,
+ writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
+ (priv->config.cd_offset << 14) | priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
@@ -974,16 +974,18 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
+ struct safexcel_token *dmmy;
int ret = 0;
/* Prepare command descriptor */
- cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+ cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
+ &dmmy);
if (IS_ERR(cdesc))
return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
- cdesc->control_data.refresh = 0;
+ cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
/* Prepare result descriptor */
@@ -1331,6 +1333,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+ priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
/* res token is behind the descr, but ofs must be rounded to buswdth */
priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
@@ -1341,6 +1344,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
/* convert dwords to bytes */
priv->config.cd_offset *= sizeof(u32);
+ priv->config.cdsh_offset *= sizeof(u32);
priv->config.rd_offset *= sizeof(u32);
priv->config.res_offset *= sizeof(u32);
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index b4624b5687ce..94016c505abb 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -40,7 +40,8 @@
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 19
+#define EIP197_EMB_TOKENS 4 /* Pad CD to 16 dwords */
+#define EIP197_MAX_TOKENS 16
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_DEPTH 2
#define EIP197_MAX_BATCH_SZ 64
@@ -207,6 +208,7 @@
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
+#define EIP197_CDR_DESC_MODE_ADCP BIT(30)
/* EIP197_HIA_xDR_DMA_CFG */
#define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
@@ -277,9 +279,9 @@
#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
-#define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29))
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG GENMASK(31, 29)
#define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29)
-#define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31)
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG GENMASK(31, 30)
/* EIP197_HIA_DFE/DSE_THR_CTRL */
#define EIP197_DxE_THR_CTRL_EN BIT(30)
@@ -553,6 +555,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
{
token->opcode = EIP197_TOKEN_OPCODE_NOOP;
token->packet_length = BIT(2);
+ token->stat = 0;
+ token->instructions = 0;
}
/* Instructions */
@@ -574,14 +578,13 @@ struct safexcel_control_data_desc {
u16 application_id;
u16 rsvd;
- u8 refresh:2;
- u32 context_lo:30;
+ u32 context_lo;
u32 context_hi;
u32 control0;
u32 control1;
- u32 token[EIP197_MAX_TOKENS];
+ u32 token[EIP197_EMB_TOKENS];
} __packed;
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
@@ -591,7 +594,10 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
+#define EIP197_TYPE_BCLA 0x0
#define EIP197_TYPE_EXTENDED 0x3
+#define EIP197_CONTEXT_SMALL 0x2
+#define EIP197_CONTEXT_SIZE_MASK 0x3
/* Basic Command Descriptor format */
struct safexcel_command_desc {
@@ -599,13 +605,16 @@ struct safexcel_command_desc {
u8 rsvd0:5;
u8 last_seg:1;
u8 first_seg:1;
- u16 additional_cdata_size:8;
+ u8 additional_cdata_size:8;
u32 rsvd1;
u32 data_lo;
u32 data_hi;
+ u32 atok_lo;
+ u32 atok_hi;
+
struct safexcel_control_data_desc control_data;
} __packed;
@@ -629,15 +638,20 @@ enum eip197_fw {
struct safexcel_desc_ring {
void *base;
+ void *shbase;
void *base_end;
+ void *shbase_end;
dma_addr_t base_dma;
+ dma_addr_t shbase_dma;
/* write and read pointers */
void *write;
+ void *shwrite;
void *read;
/* descriptor element offset */
- unsigned offset;
+ unsigned int offset;
+ unsigned int shoffset;
};
enum safexcel_alg_type {
@@ -652,6 +666,7 @@ struct safexcel_config {
u32 cd_size;
u32 cd_offset;
+ u32 cdsh_offset;
u32 rd_size;
u32 rd_offset;
@@ -862,7 +877,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 len,
u32 full_data_len,
- dma_addr_t context);
+ dma_addr_t context,
+ struct safexcel_token **atoken);
struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index c02995694b41..0c5e80c3f6e3 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -47,8 +47,12 @@ struct safexcel_cipher_ctx {
u32 mode;
enum safexcel_cipher_alg alg;
- char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
- char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
+ u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aadskip;
+ u8 blocksz;
+ u32 ivmask;
+ u32 ctrinit;
__le32 key[16];
u32 nonce;
@@ -72,251 +76,298 @@ struct safexcel_cipher_req {
int nr_src, nr_dst;
};
-static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
- struct safexcel_command_desc *cdesc)
+static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
{
- u32 block_sz = 0;
-
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
- ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
-
- if (ctx->alg == SAFEXCEL_CHACHA20 ||
- ctx->xcm == EIP197_XCM_MODE_CCM) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] =
- (__force u32)cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_GCM ||
- (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* 96 bit IV part */
- memcpy(&cdesc->control_data.token[0], iv, 12);
-
- if (ctx->alg == SAFEXCEL_CHACHA20) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- *(__be32 *)&cdesc->control_data.token[3] =
- cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return 4;
+ }
+ if (ctx->alg == SAFEXCEL_CHACHA20) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 96 bit nonce part */
memcpy(&cdesc->control_data.token[0], &iv[4], 12);
/* 32 bit counter */
cdesc->control_data.token[3] = *(u32 *)iv;
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* Variable length IV part */
- memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]);
- /* Start variable length counter at 0 */
- memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0],
- 0, iv[0] + 1);
-
- return;
+ return 4;
}
- if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) {
- switch (ctx->alg) {
- case SAFEXCEL_DES:
- block_sz = DES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_3DES:
- block_sz = DES3_EDE_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_SM4:
- block_sz = SM4_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_AES:
- block_sz = AES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- default:
- break;
- }
- memcpy(cdesc->control_data.token, iv, block_sz);
- }
+ cdesc->control_data.options |= ctx->ivmask;
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
+ return ctx->blocksz / sizeof(u32);
}
static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
u32 length)
{
struct safexcel_token *token;
+ int ivlen;
- safexcel_cipher_token(ctx, iv, cdesc);
+ ivlen = safexcel_skcipher_iv(ctx, iv, cdesc);
+ if (ivlen == 4) {
+ /* No space in cdesc, instruction moves to atoken */
+ cdesc->additional_cdata_size = 1;
+ token = atoken;
+ } else {
+ /* Everything fits in cdesc */
+ token = (struct safexcel_token *)(cdesc->control_data.token + 2);
+ /* Need to pad with NOP */
+ eip197_noop_token(&token[1]);
+ }
- /* skip over worst case IV of 4 dwords, no need to be exact */
- token = (struct safexcel_token *)(cdesc->control_data.token + 4);
+ token->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token->packet_length = length;
+ token->stat = EIP197_TOKEN_STAT_LAST_PACKET |
+ EIP197_TOKEN_STAT_LAST_HASH;
+ token->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
- token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[0].packet_length = length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
- EIP197_TOKEN_STAT_LAST_HASH;
- token[0].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
+{
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
+ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ /* 32 bit nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 96 bit IV part */
+ memcpy(&cdesc->control_data.token[0], iv, 12);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ /* CBC */
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
enum safexcel_cipher_direction direction,
u32 cryptlen, u32 assoclen, u32 digestsize)
{
- struct safexcel_token *token;
+ struct safexcel_token *aadref;
+ int atoksize = 2; /* Start with minimum size */
+ int assocadj = assoclen - ctx->aadskip, aadalign;
- safexcel_cipher_token(ctx, iv, cdesc);
+ /* Always 4 dwords of embedded IV for AEAD modes */
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- if (direction == SAFEXCEL_ENCRYPT) {
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 14);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
- } else {
+ if (direction == SAFEXCEL_DECRYPT)
cryptlen -= digestsize;
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 15);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
-
- token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY;
- token[14].packet_length = digestsize |
- EIP197_TOKEN_HASH_RESULT_VERIFY;
- token[14].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
- }
-
- if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
- /* For ESP mode (and not GMAC), skip over the IV */
- token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
-
- assoclen -= EIP197_AEAD_IPSEC_IV_SIZE;
- }
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) {
+ /* Construct IV block B0 for the CBC-MAC */
+ u8 *final_iv = (u8 *)cdesc->control_data.token;
+ u8 *cbcmaciv = (u8 *)&atoken[1];
+ __le32 *aadlen = (__le32 *)&atoken[5];
+
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* Length + nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* Fixup flags byte */
+ *(__le32 *)cbcmaciv =
+ cpu_to_le32(ctx->nonce |
+ ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2));
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ memcpy(cbcmaciv + 4, iv, 8);
+ /* Start counter at 0 */
+ cdesc->control_data.token[3] = 0;
+ /* Message length */
+ *(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen);
+ } else {
+ /* Variable length IV part */
+ memcpy(final_iv, iv, 15 - iv[0]);
+ memcpy(cbcmaciv, iv, 15 - iv[0]);
+ /* Start variable length counter at 0 */
+ memset(final_iv + 15 - iv[0], 0, iv[0] + 1);
+ memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+ /* fixup flags byte */
+ cbcmaciv[0] |= ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2);
+ /* insert lower 2 bytes of message length */
+ cbcmaciv[14] = cryptlen >> 8;
+ cbcmaciv[15] = cryptlen & 255;
+ }
- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[6].packet_length = assoclen;
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE +
+ ((assocadj > 0) << 1);
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
+ EIP197_TOKEN_INS_TYPE_HASH;
+
+ if (likely(assocadj)) {
+ *aadlen = cpu_to_le32((assocadj >> 8) |
+ (assocadj & 255) << 8);
+ atoken += 6;
+ atoksize += 7;
+ } else {
+ atoken += 5;
+ atoksize += 6;
+ }
- if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
- token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[11].packet_length = cryptlen;
- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
- if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
- /* Do not send to crypt engine in case of GMAC */
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ atoken++;
+
+ /* For CCM only, align AAD data towards hash engine */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ aadalign = (assocadj + 2) & 15;
+ atoken->packet_length = assocadj && aadalign ?
+ 16 - aadalign :
+ 0;
+ if (likely(cryptlen)) {
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
- } else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
- token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ } else {
+ safexcel_aead_iv(ctx, iv, cdesc);
+
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
+ atoken++;
- if (!ctx->xcm)
- return;
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* For ESP mode (and not GMAC), skip over the IV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ } else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 &&
+ direction == SAFEXCEL_DECRYPT)) {
+ /* Poly-chacha decryption needs a dummy NOP here ... */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16; /* According to Op Manual */
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ }
- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
- token[9].packet_length = 0;
- token[9].instructions = AES_BLOCK_SIZE;
+ if (ctx->xcm) {
+ /* For GCM and CCM, obtain enc(Y0) */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+ atoken->packet_length = 0;
+ atoken->stat = 0;
+ atoken->instructions = AES_BLOCK_SIZE;
+ atoken++;
+
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_TYPE_CRYPTO;
+ atoken++;
+ atoksize += 2;
+ }
- token[10].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[10].packet_length = AES_BLOCK_SIZE;
- token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_TYPE_CRYPTO;
+ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
+ /* Fixup stat field for AAD direction instruction */
+ aadref->stat = 0;
- if (ctx->xcm != EIP197_XCM_MODE_GCM) {
- u8 *final_iv = (u8 *)cdesc->control_data.token;
- u8 *cbcmaciv = (u8 *)&token[1];
- __le32 *aadlen = (__le32 *)&token[5];
+ /* Process crypto data */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = cryptlen;
- /* Construct IV block B0 for the CBC-MAC */
- token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[0].packet_length = AES_BLOCK_SIZE +
- ((assoclen > 0) << 1);
- token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
- EIP197_TOKEN_INS_TYPE_HASH;
- /* Variable length IV part */
- memcpy(cbcmaciv, final_iv, 15 - final_iv[0]);
- /* fixup flags byte */
- cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
- /* Clear upper bytes of variable message length to 0 */
- memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1);
- /* insert lower 2 bytes of message length */
- cbcmaciv[14] = cryptlen >> 8;
- cbcmaciv[15] = cryptlen & 255;
+ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
+ /* Fixup instruction field for AAD dir instruction */
+ aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH;
- if (assoclen) {
- *aadlen = cpu_to_le32((assoclen >> 8) |
- ((assoclen & 0xff) << 8));
- assoclen += 2;
+ /* Do not send to crypt engine in case of GMAC */
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ } else {
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
}
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align AAD data towards hash engine */
- token[7].opcode = EIP197_TOKEN_OPCODE_INSERT;
- assoclen &= 15;
- token[7].packet_length = assoclen ? 16 - assoclen : 0;
-
- if (likely(cryptlen)) {
- token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align crypto data towards hash engine */
- token[11].stat = 0;
-
- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
- cryptlen &= 15;
- token[12].packet_length = cryptlen ? 16 - cryptlen : 0;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ cryptlen &= 15;
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) {
+ atoken->stat = 0;
+ /* For CCM only, pad crypto data to the hash engine */
+ atoken++;
+ atoksize++;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16 - cryptlen;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[7].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
}
+ atoken++;
+ atoksize++;
}
+
+ if (direction == SAFEXCEL_ENCRYPT) {
+ /* Append ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ } else {
+ /* Extract ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ atoken++;
+ atoksize++;
+
+ /* Verify ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ atoken->packet_length = digestsize |
+ EIP197_TOKEN_HASH_RESULT_VERIFY;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
+
+ /* Fixup length of the token in the command descriptor */
+ cdesc->additional_cdata_size = atoksize;
}
static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
@@ -329,10 +380,8 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
@@ -382,12 +431,12 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
case SAFEXCEL_DES:
err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_AES:
err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
@@ -450,9 +499,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
}
- crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_RES_MASK);
-
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
@@ -470,8 +516,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-badkey_expflags:
memzero_explicit(&keys, sizeof(keys));
return err;
}
@@ -656,6 +700,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
unsigned int totlen;
unsigned int totlen_src = cryptlen + assoclen;
unsigned int totlen_dst = totlen_src;
+ struct safexcel_token *atoken;
int n_cdesc = 0, n_rdesc = 0;
int queued, i, ret = 0;
bool first = true;
@@ -730,56 +775,60 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
- /* The EIP cannot deal with zero length input packets! */
- if (totlen == 0)
- totlen = 1;
+ if (!totlen) {
+ /*
+ * The EIP97 cannot deal with zero length input packets!
+ * So stuff a dummy command descriptor indicating a 1 byte
+ * (dummy) input packet, using the context record as source.
+ */
+ first_cdesc = safexcel_add_cdesc(priv, ring,
+ 1, 1, ctx->base.ctxr_dma,
+ 1, 1, ctx->base.ctxr_dma,
+ &atoken);
+ if (IS_ERR(first_cdesc)) {
+ /* No space left in the command descriptor ring */
+ ret = PTR_ERR(first_cdesc);
+ goto cdesc_rollback;
+ }
+ n_cdesc = 1;
+ goto skip_cdesc;
+ }
/* command descriptors */
for_each_sg(src, sg, sreq->nr_src, i) {
int len = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - len < 0)
+ if (queued < len)
len = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - len),
sg_dma_address(sg), len, totlen,
- ctx->base.ctxr_dma);
+ ctx->base.ctxr_dma, &atoken);
if (IS_ERR(cdesc)) {
/* No space left in the command descriptor ring */
ret = PTR_ERR(cdesc);
goto cdesc_rollback;
}
- n_cdesc++;
- if (n_cdesc == 1) {
+ if (!n_cdesc)
first_cdesc = cdesc;
- }
+ n_cdesc++;
queued -= len;
if (!queued)
break;
}
-
- if (unlikely(!n_cdesc)) {
- /*
- * Special case: zero length input buffer.
- * The engine always needs the 1st command descriptor, however!
- */
- first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
- ctx->base.ctxr_dma);
- n_cdesc = 1;
- }
-
+skip_cdesc:
/* Add context control words and token to first command descriptor */
safexcel_context_control(ctx, base, sreq, first_cdesc);
if (ctx->aead)
- safexcel_aead_token(ctx, iv, first_cdesc,
+ safexcel_aead_token(ctx, iv, first_cdesc, atoken,
sreq->direction, cryptlen,
assoclen, digestsize);
else
- safexcel_skcipher_token(ctx, iv, first_cdesc,
+ safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
cryptlen);
/* result descriptors */
@@ -1166,6 +1215,8 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
return 0;
}
@@ -1230,6 +1281,8 @@ static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1264,6 +1317,7 @@ static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1300,6 +1354,7 @@ static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -1336,6 +1391,7 @@ static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -1381,10 +1437,8 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
/* exclude the nonce here */
keylen = len - CTR_RFC3686_NONCE_SIZE;
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -1410,6 +1464,7 @@ static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -1445,6 +1500,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
ret = verify_skcipher_des_key(ctfm, key);
@@ -1452,7 +1508,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1468,6 +1524,8 @@ static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1505,6 +1563,8 @@ static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1537,6 +1597,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int err;
err = verify_skcipher_des3_key(ctfm, key);
@@ -1544,7 +1605,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
return err;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1560,6 +1621,8 @@ static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1597,6 +1660,8 @@ static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1652,6 +1717,9 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
ctx->priv = tmpl->priv;
ctx->alg = SAFEXCEL_AES; /* default */
+ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
ctx->aead = true;
ctx->base.send = safexcel_aead_send;
@@ -1840,6 +1908,8 @@ static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1874,6 +1944,8 @@ static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1908,6 +1980,8 @@ static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1942,6 +2016,8 @@ static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1976,6 +2052,8 @@ static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2010,6 +2088,8 @@ static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2044,6 +2124,8 @@ static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2078,6 +2160,8 @@ static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2112,6 +2196,8 @@ static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2146,6 +2232,8 @@ static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2362,10 +2450,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* Only half of the key data is cipher key */
keylen = (len >> 1);
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2381,10 +2467,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* The other half is the tweak key */
ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2412,6 +2496,7 @@ static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->xts = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
return 0;
@@ -2472,7 +2557,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2496,8 +2580,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2532,10 +2614,7 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->hkaes))
- return PTR_ERR(ctx->hkaes);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ctx->hkaes);
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
@@ -2589,7 +2668,6 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2632,6 +2710,7 @@ static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm)
ctx->state_sz = 3 * AES_BLOCK_SIZE;
ctx->xcm = EIP197_XCM_MODE_CCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
+ ctx->ctrinit = 0;
return 0;
}
@@ -2719,10 +2798,9 @@ static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- if (len != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2734,6 +2812,7 @@ static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->ctrinit = 0;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
return 0;
}
@@ -2775,10 +2854,9 @@ static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
ctx->nonce = *(u32 *)(key + len);
}
- if (len != CHACHA_KEY_SIZE) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2885,6 +2963,7 @@ static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
ctx->alg = SAFEXCEL_CHACHA20;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
+ ctx->ctrinit = 0;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
ctx->state_sz = 0; /* Precomputed by HW */
return 0;
@@ -2933,6 +3012,7 @@ static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_chachapoly_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -2971,10 +3051,8 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- if (len != SM4_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != SM4_KEY_SIZE)
return -EINVAL;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, SM4_KEY_SIZE))
@@ -3013,6 +3091,8 @@ static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -3047,6 +3127,7 @@ static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -3083,6 +3164,7 @@ static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -3119,6 +3201,7 @@ static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -3169,6 +3252,7 @@ static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -3228,6 +3312,7 @@ static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
safexcel_aead_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
@@ -3335,6 +3420,7 @@ static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_fallback_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
ctx->state_sz = SM3_DIGEST_SIZE;
return 0;
@@ -3473,6 +3559,7 @@ static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_gcm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -3607,6 +3694,7 @@ static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_ccm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2134daef24f6..43962bc709c6 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -87,12 +87,14 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
input_length &= 15;
if (unlikely(cbcmac && input_length)) {
+ token[0].stat = 0;
token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
token[1].packet_length = 16 - input_length;
token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ eip197_noop_token(&token[1]);
}
token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
@@ -101,6 +103,8 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
token[2].packet_length = result_length;
token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+ eip197_noop_token(&token[3]);
}
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
@@ -111,6 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
u64 count = 0;
cdesc->control_data.control0 = ctx->alg;
+ cdesc->control_data.control1 = 0;
/*
* Copy the input digest if needed, and setup the context
@@ -277,7 +282,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
sreq->processed = sreq->block_sz;
sreq->hmac = 0;
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE)
+ ctx->base.needs_inv = true;
areq->nbytes = 0;
safexcel_ahash_enqueue(areq);
@@ -314,6 +320,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
+ struct safexcel_token *dmmy;
int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
u64 queued, len;
@@ -397,7 +404,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
req->cache_dma, cache_len,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma,
+ &dmmy);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
@@ -436,7 +444,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
sg_dma_address(sg) + skip, sglen,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma, &dmmy);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
@@ -1911,10 +1919,8 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- if (keylen != sizeof(u32)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
memcpy(ctx->ipad, key, sizeof(u32));
return 0;
@@ -1987,10 +1993,8 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
for (i = 0; i < len / sizeof(u32); i++)
@@ -2057,18 +2061,14 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
/* precompute the XCBC key material */
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2088,8 +2088,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_cipher_setkey(ctx->kaes,
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
AES_MIN_KEY_SIZE);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2160,10 +2158,8 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
for (i = 0; i < len / sizeof(u32); i++)
ctx->ipad[i + 8] =
@@ -2174,8 +2170,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 9237ba745c2f..e454c3d44f07 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -14,6 +14,11 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
+ int i;
+ struct safexcel_command_desc *cdesc;
+ dma_addr_t atok;
+
+ /* Actual command descriptor ring */
cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
@@ -24,7 +29,34 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
+ /* Command descriptor shadow ring for storing additional token data */
+ cdr->shoffset = priv->config.cdsh_offset;
+ cdr->shbase = dmam_alloc_coherent(priv->dev,
+ cdr->shoffset *
+ EIP197_DEFAULT_RING_SIZE,
+ &cdr->shbase_dma, GFP_KERNEL);
+ if (!cdr->shbase)
+ return -ENOMEM;
+ cdr->shwrite = cdr->shbase;
+ cdr->shbase_end = cdr->shbase + cdr->shoffset *
+ (EIP197_DEFAULT_RING_SIZE - 1);
+
+ /*
+ * Populate command descriptors with physical pointers to shadow descs.
+ * Note that we only need to do this once if we don't overwrite them.
+ */
+ cdesc = cdr->base;
+ atok = cdr->shbase_dma;
+ for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
+ cdesc->atok_lo = lower_32_bits(atok);
+ cdesc->atok_hi = upper_32_bits(atok);
+ cdesc = (void *)cdesc + cdr->offset;
+ atok += cdr->shoffset;
+ }
+
rdr->offset = priv->config.rd_offset;
+ /* Use shoffset for result token offset here */
+ rdr->shoffset = priv->config.res_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
@@ -42,11 +74,40 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
}
-static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_desc_ring *ring)
+static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ bool first,
+ struct safexcel_token **atoken)
{
void *ptr = ring->write;
+ if (first)
+ *atoken = ring->shwrite;
+
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
+ return ERR_PTR(-ENOMEM);
+
+ if (ring->write == ring->base_end) {
+ ring->write = ring->base;
+ ring->shwrite = ring->shbase;
+ } else {
+ ring->write += ring->offset;
+ ring->shwrite += ring->shoffset;
+ }
+
+ return ptr;
+}
+
+static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ struct result_data_desc **rtoken)
+{
+ void *ptr = ring->write;
+
+ /* Result token at relative offset shoffset */
+ *rtoken = ring->write + ring->shoffset;
+
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
@@ -106,10 +167,13 @@ void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
if (ring->write == ring->read)
return;
- if (ring->write == ring->base)
+ if (ring->write == ring->base) {
ring->write = ring->base_end;
- else
+ ring->shwrite = ring->shbase_end;
+ } else {
ring->write -= ring->offset;
+ ring->shwrite -= ring->shoffset;
+ }
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
@@ -117,26 +181,26 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 data_len,
u32 full_data_len,
- dma_addr_t context) {
+ dma_addr_t context,
+ struct safexcel_token **atoken)
+{
struct safexcel_command_desc *cdesc;
- int i;
- cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+ cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
+ first, atoken);
if (IS_ERR(cdesc))
return cdesc;
- memset(cdesc, 0, sizeof(struct safexcel_command_desc));
-
- cdesc->first_seg = first;
- cdesc->last_seg = last;
cdesc->particle_size = data_len;
+ cdesc->rsvd0 = 0;
+ cdesc->last_seg = last;
+ cdesc->first_seg = first;
+ cdesc->additional_cdata_size = 0;
+ cdesc->rsvd1 = 0;
cdesc->data_lo = lower_32_bits(data);
cdesc->data_hi = upper_32_bits(data);
- if (first && context) {
- struct safexcel_token *token =
- (struct safexcel_token *)cdesc->control_data.token;
-
+ if (first) {
/*
* Note that the length here MUST be >0 or else the EIP(1)97
* may hang. Newer EIP197 firmware actually incorporates this
@@ -146,20 +210,12 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
cdesc->control_data.packet_length = full_data_len ?: 1;
cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
EIP197_OPTION_64BIT_CTX |
- EIP197_OPTION_CTX_CTRL_IN_CMD;
- cdesc->control_data.context_lo =
- (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+ EIP197_OPTION_CTX_CTRL_IN_CMD |
+ EIP197_OPTION_RC_AUTO;
+ cdesc->control_data.type = EIP197_TYPE_BCLA;
+ cdesc->control_data.context_lo = lower_32_bits(context) |
+ EIP197_CONTEXT_SMALL;
cdesc->control_data.context_hi = upper_32_bits(context);
-
- if (priv->version == EIP197B_MRVL ||
- priv->version == EIP197D_MRVL)
- cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
-
- /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
- cdesc->control_data.refresh = 2;
-
- for (i = 0; i < EIP197_MAX_TOKENS; i++)
- eip197_noop_token(&token[i]);
}
return cdesc;
@@ -171,19 +227,27 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
dma_addr_t data, u32 len)
{
struct safexcel_result_desc *rdesc;
+ struct result_data_desc *rtoken;
- rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+ rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
+ &rtoken);
if (IS_ERR(rdesc))
return rdesc;
- memset(rdesc, 0, sizeof(struct safexcel_result_desc));
-
- rdesc->first_seg = first;
+ rdesc->particle_size = len;
+ rdesc->rsvd0 = 0;
+ rdesc->descriptor_overflow = 0;
+ rdesc->buffer_overflow = 0;
rdesc->last_seg = last;
+ rdesc->first_seg = first;
rdesc->result_size = EIP197_RD64_RESULT_SIZE;
- rdesc->particle_size = len;
+ rdesc->rsvd1 = 0;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
+ /* Clear length & error code in result token */
+ rtoken->packet_length = 0;
+ rtoken->error_code = 0;
+
return rdesc;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 391e3b4df364..ad73fc946682 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -740,7 +740,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ int err;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
@@ -757,12 +757,13 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
case 24: keylen_cfg = MOD_AES192; break;
case 32: keylen_cfg = MOD_AES256; break;
default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else {
- crypto_des_verify_key(tfm, key);
+ err = crypto_des_verify_key(tfm, key);
+ if (err)
+ return err;
}
/* write cfg word to cryptinfo */
*(u32*)cinfo = cpu_to_be32(cipher_cfg);
@@ -819,7 +820,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
int ret;
init_completion(&ctx->completion);
@@ -835,16 +835,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1096,7 +1086,6 @@ free_buf_src:
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
unsigned digest_len = crypto_aead_maxauthsize(tfm);
int ret;
@@ -1120,17 +1109,6 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- goto out;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1169,7 +1147,6 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d8e8c857770c..c24f34a48cef 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -255,10 +255,8 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int i;
ret = aes_expandkey(&ctx->aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
remaining = (ctx->aes.key_length - 16) / 4;
offset = ctx->aes.key_length + 24 - remaining;
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 90880a81c534..78d660d963e2 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -652,7 +652,6 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm,
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1022,7 +1021,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
break;
default:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1033,8 +1031,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index f438b425c655..435ac1c83df9 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -492,7 +492,6 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
/*
* AES 128 is supposed by the hardware, store key into temporary
@@ -513,16 +512,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
- if (!ret)
- return 0;
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
+ return crypto_sync_skcipher_setkey(actx->fallback, key, len);
}
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 63bd565048f4..f5c468f2cc82 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -746,7 +746,6 @@ static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->enc_type |= ENC_TYPE_ALG_AES256;
break;
default:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9bbedbccfadf..32dc00dc570b 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -13,6 +13,7 @@
#include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
@@ -29,11 +30,13 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
{
struct aead_request *req = dd->aead_req;
- dd->flags &= ~FLAGS_BUSY;
dd->in_sg = NULL;
dd->out_sg = NULL;
- req->base.complete(&req->base, ret);
+ crypto_finalize_aead_request(dd->engine, req, ret);
+
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
}
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
@@ -81,7 +84,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
}
omap_aes_gcm_finish_req(dd, ret);
- omap_aes_gcm_handle_queue(dd, NULL);
}
static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
@@ -120,11 +122,16 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_ASSOC_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
if (cryptlen) {
tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
+ if (nsg)
+ sg_unmark_end(dd->in_sgl);
+
ret = omap_crypto_align_sg(&tmp, cryptlen,
AES_BLOCK_SIZE, &dd->in_sgl[nsg],
OMAP_CRYPTO_COPY_DATA |
@@ -132,6 +139,8 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_IN_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
dd->in_sg = dd->in_sgl;
@@ -142,18 +151,20 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
dd->out_sg = req->dst;
dd->orig_out = req->dst;
- dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
+ dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen);
flags = 0;
if (req->src == req->dst || dd->out_sg == sg_arr)
flags |= OMAP_CRYPTO_FORCE_COPY;
- ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
- AES_BLOCK_SIZE, &dd->out_sgl,
- flags,
- FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
- if (ret)
- return ret;
+ if (cryptlen) {
+ ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
+ AES_BLOCK_SIZE, &dd->out_sgl,
+ flags,
+ FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
+ if (ret)
+ return ret;
+ }
dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
@@ -161,62 +172,12 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
return 0;
}
-static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
-{
- struct omap_aes_gcm_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
{
- struct scatterlist iv_sg, tag_sg;
- struct skcipher_request *sk_req;
- struct omap_aes_gcm_result result;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- int ret = 0;
-
- sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
- if (!sk_req) {
- pr_err("skcipher: Failed to allocate request\n");
- return -ENOMEM;
- }
-
- init_completion(&result.completion);
-
- sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
- sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
- skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- omap_aes_gcm_complete, &result);
- ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
- skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
- NULL);
- ret = crypto_skcipher_encrypt(sk_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret) {
- ret = result.err;
- if (!ret) {
- reinit_completion(&result.completion);
- break;
- }
- }
- /* fall through */
- default:
- pr_err("Encryption of IV failed for GCM mode\n");
- break;
- }
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- skcipher_request_free(sk_req);
- return ret;
+ aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
+ return 0;
}
void omap_aes_gcm_dma_out_callback(void *data)
@@ -246,37 +207,21 @@ void omap_aes_gcm_dma_out_callback(void *data)
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
struct aead_request *req)
{
- struct omap_aes_ctx *ctx;
- struct aead_request *backlog;
- struct omap_aes_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = aead_enqueue_request(&dd->aead_queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
-
- backlog = aead_get_backlog(&dd->aead_queue);
- req = aead_dequeue_request(&dd->aead_queue);
- if (req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!req)
- return ret;
+ return crypto_transfer_aead_request_to_engine(dd->engine, req);
- if (backlog)
- backlog->base.complete(&backlog->base, -EINPROGRESS);
+ return 0;
+}
- ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- rctx = aead_request_ctx(req);
+static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ int err;
- dd->ctx = ctx;
- rctx->dd = dd;
dd->aead_req = req;
rctx->mode &= FLAGS_MODE_MASK;
@@ -286,16 +231,9 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
if (err)
return err;
- err = omap_aes_write_ctrl(dd);
- if (!err)
- err = omap_aes_crypt_dma_start(dd);
+ dd->ctx = &ctx->octx;
- if (err) {
- omap_aes_gcm_finish_req(dd, err);
- omap_aes_gcm_handle_queue(dd, NULL);
- }
-
- return ret;
+ return omap_aes_write_ctrl(dd);
}
static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
@@ -350,36 +288,39 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
int omap_aes_4106gcm_encrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
FLAGS_RFC4106_GCM);
}
int omap_aes_4106gcm_decrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
}
int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
+ memcpy(ctx->octx.key, key, keylen);
+ ctx->octx.keylen = keylen;
return 0;
}
@@ -387,19 +328,63 @@ int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
if (keylen < 4)
return -EINVAL;
-
keylen -= 4;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
- memcpy(ctx->key, key, keylen);
- memcpy(ctx->nonce, key + keylen, 4);
- ctx->keylen = keylen;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
+
+ memcpy(ctx->octx.key, key, keylen);
+ memcpy(ctx->octx.nonce, key + keylen, 4);
+ ctx->octx.keylen = keylen;
+
+ return 0;
+}
+
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ int ret = 0;
+
+ if (!dd)
+ return -ENODEV;
+
+ if (dd->in_sg_len)
+ ret = omap_aes_crypt_dma_start(dd);
+ else
+ omap_aes_gcm_dma_out_callback(dd);
+
+ return ret;
+}
+
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+ struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
return 0;
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a1fc03ed01f3..824ddf2a66ff 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -269,13 +269,14 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
struct scatterlist *out_sg,
int in_sg_len, int out_sg_len)
{
- struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc;
struct dma_slave_config cfg;
int ret;
if (dd->pio_only) {
scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ if (out_sg_len)
+ scatterwalk_start(&dd->out_walk, dd->out_sg);
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -312,34 +313,45 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
/* No callback necessary */
tx_in->callback_param = dd;
+ tx_in->callback = NULL;
/* OUT */
- ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
- if (ret) {
- dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
- ret);
- return ret;
- }
+ if (out_sg_len) {
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
- tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!tx_out) {
- dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
- return -EINVAL;
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg,
+ out_sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
+
+ cb_desc = tx_out;
+ } else {
+ cb_desc = tx_in;
}
if (dd->flags & FLAGS_GCM)
- tx_out->callback = omap_aes_gcm_dma_out_callback;
+ cb_desc->callback = omap_aes_gcm_dma_out_callback;
else
- tx_out->callback = omap_aes_dma_out_callback;
- tx_out->callback_param = dd;
+ cb_desc->callback = omap_aes_dma_out_callback;
+ cb_desc->callback_param = dd;
+
dmaengine_submit(tx_in);
- dmaengine_submit(tx_out);
+ if (tx_out)
+ dmaengine_submit(tx_out);
dma_async_issue_pending(dd->dma_lch_in);
- dma_async_issue_pending(dd->dma_lch_out);
+ if (out_sg_len)
+ dma_async_issue_pending(dd->dma_lch_out);
/* start DMA */
dd->pdata->trigger(dd, dd->total);
@@ -361,11 +373,13 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
return -EINVAL;
}
- err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
- if (!err) {
- dev_err(dd->dev, "dma_map_sg() error\n");
- return -EINVAL;
+ if (dd->out_sg_len) {
+ err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
}
}
@@ -373,8 +387,9 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->out_sg_len);
if (err && !dd->pio_only) {
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
+ if (dd->out_sg_len)
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
}
return err;
@@ -479,6 +494,14 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
return omap_aes_crypt_dma_start(dd);
}
+static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
+}
+
static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
@@ -494,12 +517,16 @@ static void omap_aes_done_task(unsigned long data)
omap_aes_crypt_dma_stop(dd);
}
- omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
+ omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save,
FLAGS_IN_DATA_ST_SHIFT, dd->flags);
- omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
+ omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ /* Update IV output */
+ if (dd->flags & (FLAGS_CBC | FLAGS_CTR))
+ omap_aes_copy_ivout(dd, dd->req->iv);
+
omap_aes_finish_req(dd, 0);
pr_debug("exit\n");
@@ -513,6 +540,9 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
struct omap_aes_dev *dd;
int ret;
+ if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR))
+ return -EINVAL;
+
pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
@@ -627,36 +657,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
-static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
-{
- struct omap_aes_dev *dd = NULL;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- int err;
-
- /* Find AES device, currently picks the first device */
- spin_lock_bh(&list_lock);
- list_for_each_entry(dd, &dev_list, list) {
- break;
- }
- spin_unlock_bh(&list_lock);
-
- err = pm_runtime_get_sync(dd->dev);
- if (err < 0) {
- dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
- __func__, err);
- return err;
- }
-
- tfm->reqsize = sizeof(struct omap_aes_reqctx);
- ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- if (IS_ERR(ctx->ctr)) {
- pr_warn("could not load aes driver for encrypting IV\n");
- return PTR_ERR(ctx->ctr);
- }
-
- return 0;
-}
-
static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -667,19 +667,6 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
ctx->fallback = NULL;
}
-static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
-{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
- if (ctx->fallback)
- crypto_free_sync_skcipher(ctx->fallback);
-
- ctx->fallback = NULL;
-
- if (ctx->ctr)
- crypto_free_skcipher(ctx->ctr);
-}
-
/* ********************** ALGS ************************************ */
static struct skcipher_alg algs_ecb_cbc[] = {
@@ -732,7 +719,7 @@ static struct skcipher_alg algs_ctr[] = {
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct omap_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -763,15 +750,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
+ .setauthsize = omap_aes_gcm_setauthsize,
.encrypt = omap_aes_gcm_encrypt,
.decrypt = omap_aes_gcm_decrypt,
},
@@ -783,15 +770,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
.ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
+ .setauthsize = omap_aes_4106gcm_setauthsize,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
},
@@ -1296,7 +1283,8 @@ static int omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
- dd = NULL;
+
+ sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
return 0;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d3575231e31..2d111bf906e1 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -9,6 +9,7 @@
#ifndef __OMAP_AES_H__
#define __OMAP_AES_H__
+#include <crypto/aes.h>
#include <crypto/engine.h>
#define DST_MAXBURST 4
@@ -79,7 +80,6 @@
#define FLAGS_INIT BIT(5)
#define FLAGS_FAST BIT(6)
-#define FLAGS_BUSY BIT(7)
#define FLAGS_IN_DATA_ST_SHIFT 8
#define FLAGS_OUT_DATA_ST_SHIFT 10
@@ -98,7 +98,11 @@ struct omap_aes_ctx {
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
struct crypto_sync_skcipher *fallback;
- struct crypto_skcipher *ctr;
+};
+
+struct omap_aes_gcm_ctx {
+ struct omap_aes_ctx octx;
+ struct crypto_aes_ctx actx;
};
struct omap_aes_reqctx {
@@ -202,8 +206,12 @@ int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int omap_aes_gcm_encrypt(struct aead_request *req);
int omap_aes_gcm_decrypt(struct aead_request *req);
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
int omap_aes_4106gcm_encrypt(struct aead_request *req);
int omap_aes_4106gcm_decrypt(struct aead_request *req);
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize);
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm);
int omap_aes_write_ctrl(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 7d592d93bb1c..cc88b7362bc2 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -154,6 +154,41 @@ int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
}
EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
+static void omap_crypto_copy_data(struct scatterlist *src,
+ struct scatterlist *dst,
+ int offset, int len)
+{
+ int amt;
+ void *srcb, *dstb;
+ int srco = 0, dsto = offset;
+
+ while (src && dst && len) {
+ if (srco >= src->length) {
+ srco -= src->length;
+ src = sg_next(src);
+ continue;
+ }
+
+ if (dsto >= dst->length) {
+ dsto -= dst->length;
+ dst = sg_next(dst);
+ continue;
+ }
+
+ amt = min(src->length - srco, dst->length - dsto);
+ amt = min(len, amt);
+
+ srcb = sg_virt(src) + srco;
+ dstb = sg_virt(dst) + dsto;
+
+ memcpy(dstb, srcb, amt);
+
+ srco += amt;
+ dsto += amt;
+ len -= amt;
+ }
+}
+
void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
int offset, int len, u8 flags_shift,
unsigned long flags)
@@ -171,7 +206,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
pages = get_order(len);
if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
- scatterwalk_map_and_copy(buf, orig, offset, len, 1);
+ omap_crypto_copy_data(sg, orig, offset, len);
if (flags & OMAP_CRYPTO_DATA_COPIED)
free_pages((unsigned long)buf, pages);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 4c4dbc2b377e..8eda43319204 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -597,6 +597,7 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
static void omap_des_done_task(unsigned long data)
{
struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ int i;
pr_debug("enter done_task\n");
@@ -615,6 +616,11 @@ static void omap_des_done_task(unsigned long data)
omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ if ((dd->flags & FLAGS_CBC) && dd->req->iv)
+ for (i = 0; i < 2; i++)
+ ((u32 *)dd->req->iv)[i] =
+ omap_des_read(dd, DES_REG_IV(dd, i));
+
omap_des_finish_req(dd, 0);
pr_debug("exit\n");
@@ -631,10 +637,11 @@ static int omap_des_crypt(struct skcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
+ if (!req->cryptlen)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
return -EINVAL;
- }
dd = omap_des_find_dev(ctx);
if (!dd)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ac80bc6af093..4f915a4ef5b0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -112,6 +112,8 @@
#define FLAGS_BE32_SHA1 8
#define FLAGS_SGS_COPIED 9
#define FLAGS_SGS_ALLOCED 10
+#define FLAGS_HUGE 11
+
/* context flags */
#define FLAGS_FINUP 16
@@ -136,6 +138,8 @@
#define BUFLEN SHA512_BLOCK_SIZE
#define OMAP_SHA_DMA_THRESHOLD 256
+#define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
+
struct omap_sham_dev;
struct omap_sham_reqctx {
@@ -644,6 +648,8 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
struct scatterlist *tmp;
int offset = ctx->offset;
+ ctx->total = new_len;
+
if (ctx->bufcnt)
n++;
@@ -661,15 +667,16 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
tmp = sg_next(tmp);
ctx->sg_len++;
+ new_len -= ctx->bufcnt;
}
while (sg && new_len) {
int len = sg->length - offset;
- if (offset) {
+ if (len <= 0) {
offset -= sg->length;
- if (offset < 0)
- offset = 0;
+ sg = sg_next(sg);
+ continue;
}
if (new_len < len)
@@ -677,33 +684,37 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
if (len > 0) {
new_len -= len;
- sg_set_page(tmp, sg_page(sg), len, sg->offset);
+ sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
+ offset = 0;
+ ctx->offset = 0;
+ ctx->sg_len++;
if (new_len <= 0)
- sg_mark_end(tmp);
+ break;
tmp = sg_next(tmp);
- ctx->sg_len++;
}
sg = sg_next(sg);
}
+ if (tmp)
+ sg_mark_end(tmp);
+
set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
return 0;
}
static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
- struct scatterlist *sg, int bs, int new_len)
+ struct scatterlist *sg, int bs,
+ unsigned int new_len)
{
int pages;
void *buf;
- int len;
-
- len = new_len + ctx->bufcnt;
- pages = get_order(ctx->total);
+ pages = get_order(new_len);
buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
if (!buf) {
@@ -715,14 +726,15 @@ static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
- ctx->total - ctx->bufcnt, 0);
+ min(new_len, ctx->total) - ctx->bufcnt, 0);
sg_init_table(ctx->sgl, 1);
- sg_set_buf(ctx->sgl, buf, len);
+ sg_set_buf(ctx->sgl, buf, new_len);
ctx->sg = ctx->sgl;
set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
ctx->sg_len = 1;
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
- ctx->offset = 0;
+ ctx->total = new_len;
return 0;
}
@@ -737,6 +749,7 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
struct scatterlist *sg_tmp = sg;
int new_len;
int offset = rctx->offset;
+ int bufcnt = rctx->bufcnt;
if (!sg || !sg->length || !nbytes)
return 0;
@@ -751,12 +764,28 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
else
new_len = (new_len - 1) / bs * bs;
+ if (!new_len)
+ return 0;
+
if (nbytes != new_len)
list_ok = false;
while (nbytes > 0 && sg_tmp) {
n++;
+ if (bufcnt) {
+ if (!IS_ALIGNED(bufcnt, bs)) {
+ aligned = false;
+ break;
+ }
+ nbytes -= bufcnt;
+ bufcnt = 0;
+ if (!nbytes)
+ list_ok = false;
+
+ continue;
+ }
+
#ifdef CONFIG_ZONE_DMA
if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
aligned = false;
@@ -794,13 +823,27 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
}
}
+ if (new_len > OMAP_SHA_MAX_DMA_LEN) {
+ new_len = OMAP_SHA_MAX_DMA_LEN;
+ aligned = false;
+ }
+
if (!aligned)
return omap_sham_copy_sgs(rctx, sg, bs, new_len);
else if (!list_ok)
return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
+ rctx->total = new_len;
+ rctx->offset += new_len;
rctx->sg_len = n;
- rctx->sg = sg;
+ if (rctx->bufcnt) {
+ sg_init_table(rctx->sgl, 2);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
+ sg_chain(rctx->sgl, 2, sg);
+ rctx->sg = rctx->sgl;
+ } else {
+ rctx->sg = sg;
+ }
return 0;
}
@@ -810,31 +853,35 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
int bs;
int ret;
- int nbytes;
+ unsigned int nbytes;
bool final = rctx->flags & BIT(FLAGS_FINUP);
- int xmit_len, hash_later;
+ int hash_later;
bs = get_block_size(rctx);
+ nbytes = rctx->bufcnt;
+
if (update)
- nbytes = req->nbytes;
- else
- nbytes = 0;
+ nbytes += req->nbytes - rctx->offset;
- rctx->total = nbytes + rctx->bufcnt;
+ dev_dbg(rctx->dd->dev,
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ __func__, nbytes, bs, rctx->total, rctx->offset,
+ rctx->bufcnt);
- if (!rctx->total)
+ if (!nbytes)
return 0;
- if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
+ rctx->total = nbytes;
+
+ if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
int len = bs - rctx->bufcnt % bs;
- if (len > nbytes)
- len = nbytes;
+ if (len > req->nbytes)
+ len = req->nbytes;
scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
0, len, 0);
rctx->bufcnt += len;
- nbytes -= len;
rctx->offset = len;
}
@@ -845,64 +892,25 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
if (ret)
return ret;
- xmit_len = rctx->total;
-
- if (!IS_ALIGNED(xmit_len, bs)) {
- if (final)
- xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
- else
- xmit_len = xmit_len / bs * bs;
- } else if (!final) {
- xmit_len -= bs;
- }
-
- hash_later = rctx->total - xmit_len;
+ hash_later = nbytes - rctx->total;
if (hash_later < 0)
hash_later = 0;
- if (rctx->bufcnt && nbytes) {
- /* have data from previous operation and current */
- sg_init_table(rctx->sgl, 2);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
-
- sg_chain(rctx->sgl, 2, req->src);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len++;
- } else if (rctx->bufcnt) {
- /* have buffered data only */
- sg_init_table(rctx->sgl, 1);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len = 1;
- }
-
if (hash_later) {
- int offset = 0;
-
- if (hash_later > req->nbytes) {
- memcpy(rctx->buffer, rctx->buffer + xmit_len,
- hash_later - req->nbytes);
- offset = hash_later - req->nbytes;
- }
-
- if (req->nbytes) {
- scatterwalk_map_and_copy(rctx->buffer + offset,
- req->src,
- offset + req->nbytes -
- hash_later, hash_later, 0);
- }
+ scatterwalk_map_and_copy(rctx->buffer,
+ req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
rctx->bufcnt = hash_later;
} else {
rctx->bufcnt = 0;
}
- if (!final)
- rctx->total = xmit_len;
+ if (hash_later > rctx->buflen)
+ set_bit(FLAGS_HUGE, &rctx->dd->flags);
+
+ rctx->total = min(nbytes, rctx->total);
return 0;
}
@@ -998,10 +1006,11 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
struct ahash_request *req = dd->req;
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err;
- bool final = ctx->flags & BIT(FLAGS_FINUP);
+ bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
+ !(dd->flags & BIT(FLAGS_HUGE));
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
+ ctx->total, ctx->digcnt, final);
if (ctx->total < get_block_size(ctx) ||
ctx->total < dd->fallback_sz)
@@ -1024,6 +1033,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0, use_dma = 1;
+ if (dd->flags & BIT(FLAGS_HUGE))
+ return 0;
+
if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
/*
* faster to handle last block with cpu or
@@ -1083,7 +1095,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
free_pages((unsigned long)sg_virt(ctx->sg),
- get_order(ctx->sg->length + ctx->bufcnt));
+ get_order(ctx->sg->length));
if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
kfree(ctx->sg);
@@ -1092,6 +1104,21 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+ if (dd->flags & BIT(FLAGS_HUGE)) {
+ dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
+ BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
+ omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS &&
+ (ctx->flags & BIT(FLAGS_FINUP)))
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ omap_sham_final_req(dd);
+ }
+ return;
+ }
+
if (!err) {
dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
@@ -1107,6 +1134,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
+ ctx->offset = 0;
+
if (req->base.complete)
req->base.complete(&req->base, err);
}
@@ -1158,7 +1187,7 @@ retry:
/* request has changed - restore hash */
dd->pdata->copy_hash(req, 0);
- if (ctx->op == OP_UPDATE) {
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
err = omap_sham_update_req(dd);
if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
/* no final() after finup() */
@@ -1730,6 +1759,8 @@ static void omap_sham_done_task(unsigned long data)
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
int err = 0;
+ dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
+
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
omap_sham_handle_queue(dd, NULL);
return;
@@ -2223,6 +2254,8 @@ static int omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
+ sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
+
return 0;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index c5b60f50e1b5..594d6b1695d5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -108,14 +108,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
- u32 *flags = &tfm->crt_flags;
struct crypto_aes_ctx gen_aes;
int cpu;
- if (key_len % 8) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len % 8)
return -EINVAL;
- }
/*
* If the hardware is capable of generating the extended key
@@ -146,10 +143,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ctx->cword.encrypt.keygen = 1;
ctx->cword.decrypt.keygen = 1;
- if (aes_expandkey(&gen_aes, in_key, key_len)) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (aes_expandkey(&gen_aes, in_key, key_len))
return -EINVAL;
- }
memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index ddf1b549fdca..c826abe79e79 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -190,13 +190,11 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
return padlock_sha256_finup(desc, buf, 0, out);
}
-static int padlock_cra_init(struct crypto_tfm *tfm)
+static int padlock_init_tfm(struct crypto_shash *hash)
{
- struct crypto_shash *hash = __crypto_shash_cast(tfm);
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
struct crypto_shash *fallback_tfm;
- int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
@@ -204,21 +202,17 @@ static int padlock_cra_init(struct crypto_tfm *tfm)
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
+ return PTR_ERR(fallback_tfm);
}
ctx->fallback = fallback_tfm;
hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
-
-out:
- return err;
}
-static void padlock_cra_exit(struct crypto_tfm *tfm)
+static void padlock_exit_tfm(struct crypto_shash *hash)
{
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
crypto_free_shash(ctx->fallback);
}
@@ -231,6 +225,8 @@ static struct shash_alg sha1_alg = {
.final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha1_state),
.base = {
@@ -241,8 +237,6 @@ static struct shash_alg sha1_alg = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
@@ -254,6 +248,8 @@ static struct shash_alg sha256_alg = {
.final = padlock_sha256_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha256_state),
.base = {
@@ -264,8 +260,6 @@ static struct shash_alg sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 29da449b3e9e..7384e91c8b32 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -465,9 +465,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -490,7 +487,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -780,10 +776,8 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
- if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len > AES_MAX_KEY_SIZE)
return -EINVAL;
- }
/*
* IPSec engine only supports 128 and 256 bit AES keys. If we get a
@@ -805,12 +799,6 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
CRYPTO_TFM_REQ_MASK);
err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
-
if (err)
goto sw_setkey_failed;
}
@@ -830,7 +818,6 @@ static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
err = -EINVAL;
goto out;
}
@@ -1595,6 +1582,11 @@ static const struct of_device_id spacc_of_id_table[] = {
MODULE_DEVICE_TABLE(of, spacc_of_id_table);
#endif /* CONFIG_OF */
+static void spacc_tasklet_kill(void *data)
+{
+ tasklet_kill(data);
+}
+
static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret;
@@ -1637,6 +1629,14 @@ static int spacc_probe(struct platform_device *pdev)
return -ENXIO;
}
+ tasklet_init(&engine->complete, spacc_spacc_complete,
+ (unsigned long)engine);
+
+ ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
+ &engine->complete);
+ if (ret)
+ return ret;
+
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
engine->name, engine)) {
dev_err(engine->dev, "failed to request IRQ\n");
@@ -1694,8 +1694,6 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->completed);
INIT_LIST_HEAD(&engine->in_progress);
engine->in_flight = 0;
- tasklet_init(&engine->complete, spacc_spacc_complete,
- (unsigned long)engine);
platform_set_drvdata(pdev, engine);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 35bca76b640f..833bb1d3a11b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -570,7 +570,6 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return 0;
bad_key:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
error:
@@ -586,14 +585,11 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
- goto bad_key;
+ return -EINVAL;
qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 8caa04e1ec43..14ade8a7d664 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
qcrypto-objs := core.o \
common.o \
- dma.o \
- sha.o \
- skcipher.o
+ dma.o
+
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index da1188abc9ba..629e7f34dc09 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
qce_write(qce, offset + i * sizeof(u32), 0);
}
-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+static u32 qce_config_reg(struct qce_device *qce, int little)
{
- u32 cfg = 0;
+ u32 beats = (qce->burst_size >> 3) - 1;
+ u32 pipe_pair = qce->pipe_pair_id;
+ u32 config;
- if (IS_AES(flags)) {
- if (aes_key_size == AES_KEYSIZE_128)
- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
- else if (aes_key_size == AES_KEYSIZE_256)
- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
- }
+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+ config &= ~HIGH_SPD_EN_N_SHIFT;
- if (IS_AES(flags))
- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
- else if (IS_DES(flags) || IS_3DES(flags))
- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+ if (little)
+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
- if (IS_DES(flags))
- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+ return config;
+}
- if (IS_3DES(flags))
- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+ __be32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
- switch (flags & QCE_MODE_MASK) {
- case QCE_MODE_ECB:
- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CBC:
- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CTR:
- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_XTS:
- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CCM:
- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
- break;
- default:
- return ~0;
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = cpu_to_be32p((const __u32 *) s);
+ s += sizeof(__u32);
+ d++;
}
+}
- return cfg;
+static void qce_setup_config(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write(qce, REG_STATUS, 0);
+ qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
}
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
{
u32 cfg = 0;
@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static u32 qce_config_reg(struct qce_device *qce, int little)
-{
- u32 beats = (qce->burst_size >> 3) - 1;
- u32 pipe_pair = qce->pipe_pair_id;
- u32 config;
-
- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
- config &= ~HIGH_SPD_EN_N_SHIFT;
-
- if (little)
- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
-
- return config;
-}
-
-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
-{
- __be32 *d = dst;
- const u8 *s = src;
- unsigned int n;
-
- n = len / sizeof(u32);
- for (; n > 0; n--) {
- *d = cpu_to_be32p((const __u32 *) s);
- s += sizeof(__u32);
- d++;
- }
-}
-
-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
-{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
-
- if (ivsize > QCE_AES_IV_LENGTH)
- return;
-
- memset(swap, 0, QCE_AES_IV_LENGTH);
-
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
-
- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
-}
-
-static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
- unsigned int enckeylen, unsigned int cryptlen)
-{
- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
- unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
-
- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
- enckeylen / 2);
- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
-
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
-}
-
-static void qce_setup_config(struct qce_device *qce)
-{
- u32 config;
-
- /* get big endianness */
- config = qce_config_reg(qce, 0);
-
- /* clear status */
- qce_write(qce, REG_STATUS, 0);
- qce_write(qce, REG_CONFIG, config);
-}
-
-static inline void qce_crypto_go(struct qce_device *qce)
-{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
-}
-
static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
@@ -303,6 +225,87 @@ go_proc:
return 0;
}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags)) {
+ if (aes_key_size == AES_KEYSIZE_128)
+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+ else if (aes_key_size == AES_KEYSIZE_256)
+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+ }
+
+ if (IS_AES(flags))
+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+ else if (IS_DES(flags) || IS_3DES(flags))
+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+ if (IS_DES(flags))
+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+ if (IS_3DES(flags))
+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+ switch (flags & QCE_MODE_MASK) {
+ case QCE_MODE_ECB:
+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CBC:
+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CTR:
+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_XTS:
+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CCM:
+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+ break;
+ default:
+ return ~0;
+ }
+
+ return cfg;
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+ u8 swap[QCE_AES_IV_LENGTH];
+ u32 i, j;
+
+ if (ivsize > QCE_AES_IV_LENGTH)
+ return;
+
+ memset(swap, 0, QCE_AES_IV_LENGTH);
+
+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+ i < QCE_AES_IV_LENGTH; i++, j--)
+ swap[i] = src[j];
+
+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+ unsigned int xtsdusize;
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* xts du size 512B */
+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
@@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
return 0;
}
+#endif
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
return qce_setup_regs_skcipher(async_req, totallen, offset);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 0a44a6eeacf5..cb6d61eb7302 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,8 +22,12 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 40a59214d2e1..7da893dc00e7 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma)
}
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
+ int max_ents)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
@@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg) {
+ while (new_sgl && sg && max_ents) {
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
+ max_ents--;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 1e25a9e0e6f8..ed25a0d9829e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
+ int max_ents);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 95ab16fc8fd6..1ab62e7d5f3c 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret)
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
kfree(buf);
err_free_req:
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index fee07323f8f9..4217b745f124 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -21,6 +21,7 @@ static void qce_skcipher_done(void *data)
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
enum dma_data_direction dir_src, dir_dst;
u32 status;
int error;
@@ -45,6 +46,7 @@ static void qce_skcipher_done(void *data)
if (error < 0)
dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
qce->async_req_done(tmpl->qce, error);
}
@@ -95,13 +97,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -154,12 +156,13 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (keylen) {
+ switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
break;
@@ -213,13 +216,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int keylen;
int ret;
rctx->flags = tmpl->alg_flags;
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
+ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_256) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
@@ -252,7 +257,14 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm)
memset(ctx, 0, sizeof(*ctx));
crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ return 0;
+}
+
+static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ qce_skcipher_init(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
@@ -270,6 +282,7 @@ struct qce_skcipher_def {
const char *name;
const char *drv_name;
unsigned int blocksize;
+ unsigned int chunksize;
unsigned int ivsize;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -298,7 +311,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.flags = QCE_ALG_AES | QCE_MODE_CTR,
.name = "ctr(aes)",
.drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
+ .chunksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -309,8 +323,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.drv_name = "xts-aes-qce",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
},
{
.flags = QCE_ALG_DES | QCE_MODE_ECB,
@@ -368,6 +382,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
def->drv_name);
alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
alg->ivsize = def->ivsize;
alg->min_keysize = def->min_keysize;
alg->max_keysize = def->max_keysize;
@@ -379,14 +394,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
alg->base.cra_alignmask = 0;
alg->base.cra_module = THIS_MODULE;
- alg->init = qce_skcipher_init;
- alg->exit = qce_skcipher_exit;
+ if (IS_AES(def->flags)) {
+ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+ alg->init = qce_skcipher_init_fallback;
+ alg->exit = qce_skcipher_exit;
+ } else {
+ alg->init = qce_skcipher_init;
+ }
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index ca4de4ddfe1f..4a75c8e1fa6c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -34,10 +34,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d4ea2f11ca68..466e30bd529c 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -601,7 +601,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
ctx->keylen = keylen;
@@ -621,13 +620,7 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
- CRYPTO_TFM_RES_MASK;
- return ret;
+ return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 1aba9372cd23..4ef3eb11361c 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -4,7 +4,7 @@ config CRYPTO_DEV_STM32_CRC
depends on ARCH_STM32
select CRYPTO_HASH
help
- This enables support for the CRC32 hw accelerator which can be found
+ This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_HASH
@@ -17,7 +17,7 @@ config CRYPTO_DEV_STM32_HASH
select CRYPTO_SHA256
select CRYPTO_ENGINE
help
- This enables support for the HASH hw accelerator which can be found
+ This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_CRYP
@@ -27,5 +27,5 @@ config CRYPTO_DEV_STM32_CRYP
select CRYPTO_ENGINE
select CRYPTO_LIB_DES
help
- This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 9e11c3480353..8e92e4ac79f1 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -85,10 +85,8 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
mctx->key = get_unaligned_le32(key);
return 0;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cfc8e0e37bee..167b80eec437 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -518,10 +518,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "in");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index d71d65846e47..9c6db7f698c4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -914,7 +914,6 @@ static int aead_setkey(struct crypto_aead *authenc,
return 0;
badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -929,11 +928,11 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
+ goto out;
err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
if (err)
@@ -954,10 +953,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static void talitos_sg_unmap(struct device *dev,
@@ -1528,8 +1523,6 @@ static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
keylen == AES_KEYSIZE_256)
return skcipher_setkey(cipher, key, keylen);
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
return -EINVAL;
}
@@ -2234,10 +2227,8 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
/* Must get the hash of the long key */
ret = keyhash(tfm, key, keylen, hash);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
keysize = digestsize;
memcpy(ctx->key, hash, digestsize);
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index b731895aa241..f56d65c56ccf 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -11,18 +11,18 @@ config CRYPTO_DEV_UX500_CRYP
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
- This selects the crypto driver for the UX500_CRYP hardware. It supports
- AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
+ This selects the crypto driver for the UX500_CRYP hardware. It supports
+ AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
config CRYPTO_DEV_UX500_HASH
- tristate "UX500 crypto driver for HASH block"
- depends on CRYPTO_DEV_UX500
- select CRYPTO_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- help
- This selects the hash driver for the UX500_HASH hardware.
- Depends on UX500/STM DMA if running in DMA mode.
+ help
+ This selects the hash driver for the UX500_HASH hardware.
+ Depends on UX500/STM DMA if running in DMA mode.
config CRYPTO_DEV_UX500_DEBUG
bool "Activate ux500 platform debug-mode for crypto and hash block"
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 95fb694a2667..800dfc4d16c4 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -951,7 +951,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- u32 *flags = &cipher->base.crt_flags;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -970,7 +969,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
default:
pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 4b71e80951b7..fd045e64972a 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -272,11 +272,11 @@ static int virtio_crypto_alg_skcipher_init_sessions(
if (keylen > vcrypto->max_cipher_key_len) {
pr_err("virtio_crypto: the key is too long\n");
- goto bad_key;
+ return -EINVAL;
}
if (virtio_crypto_alg_validate_key(keylen, &alg))
- goto bad_key;
+ return -EINVAL;
/* Create encryption session */
ret = virtio_crypto_alg_skcipher_init_session(ctx,
@@ -291,10 +291,6 @@ static int virtio_crypto_alg_skcipher_init_sessions(
return ret;
}
return 0;
-
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/* Note: kernel crypto API realization */
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index d59e736882f6..9fee1b1532a4 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -84,6 +84,9 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
u8 tweak[AES_BLOCK_SIZE];
int ret;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 35535833b6f7..0b1df12e0f21 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -77,7 +77,7 @@ config DEVFREQ_GOV_PASSIVE
comment "DEVFREQ Drivers"
config ARM_EXYNOS_BUS_DEVFREQ
- tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+ tristate "ARM Exynos Generic Memory Bus DEVFREQ Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select DEVFREQ_GOV_PASSIVE
@@ -91,6 +91,16 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_IMX8M_DDRC_DEVFREQ
+ tristate "i.MX8M DDRC DEVFREQ Driver"
+ depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows
+ adjusting DRAM frequency.
+
config ARM_TEGRA_DEVFREQ
tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver"
depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \
@@ -115,14 +125,15 @@ config ARM_TEGRA20_DEVFREQ
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
- depends on ARCH_ROCKCHIP
+ depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
select DEVFREQ_EVENT_ROCKCHIP_DFI
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ_EVENT
help
- This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
- It sets the frequency for the memory controller and reads the usage counts
- from hardware.
+ This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
+ It sets the frequency for the memory controller and reads the usage counts
+ from hardware.
source "drivers/devfreq/event/Kconfig"
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 338ae8440db6..3eb4d5e6635c 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 3dc5fd6065a3..8c31b0f2e28f 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -346,9 +346,9 @@ EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
/**
* devfreq_event_remove_edev() - Remove the devfreq-event device registered.
- * @dev : the devfreq-event device
+ * @edev : the devfreq-event device
*
- * Note that this function remove the registered devfreq-event device.
+ * Note that this function removes the registered devfreq-event device.
*/
int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
{
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 57f6944d65a6..cceee8bc3c2f 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -33,6 +34,7 @@
#define HZ_PER_KHZ 1000
static struct class *devfreq_class;
+static struct dentry *devfreq_debugfs;
/*
* devfreq core provides delayed work based load monitoring helper
@@ -209,10 +211,10 @@ static int set_freq_table(struct devfreq *devfreq)
int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
{
int lev, prev_lev, ret = 0;
- unsigned long cur_time;
+ u64 cur_time;
lockdep_assert_held(&devfreq->lock);
- cur_time = jiffies;
+ cur_time = get_jiffies_64();
/* Immediately exit if previous_freq is not initialized yet. */
if (!devfreq->previous_freq)
@@ -224,8 +226,8 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
goto out;
}
- devfreq->time_in_state[prev_lev] +=
- cur_time - devfreq->last_stat_updated;
+ devfreq->stats.time_in_state[prev_lev] +=
+ cur_time - devfreq->stats.last_update;
lev = devfreq_get_freq_level(devfreq, freq);
if (lev < 0) {
@@ -234,13 +236,13 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
}
if (lev != prev_lev) {
- devfreq->trans_table[(prev_lev *
- devfreq->profile->max_state) + lev]++;
- devfreq->total_trans++;
+ devfreq->stats.trans_table[
+ (prev_lev * devfreq->profile->max_state) + lev]++;
+ devfreq->stats.total_trans++;
}
out:
- devfreq->last_stat_updated = cur_time;
+ devfreq->stats.last_update = cur_time;
return ret;
}
EXPORT_SYMBOL(devfreq_update_status);
@@ -535,7 +537,7 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
msecs_to_jiffies(devfreq->profile->polling_ms));
out_update:
- devfreq->last_stat_updated = jiffies;
+ devfreq->stats.last_update = get_jiffies_64();
devfreq->stop_polling = false;
if (devfreq->profile->get_cur_freq &&
@@ -807,28 +809,29 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_out;
}
- devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+ devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
array3_size(sizeof(unsigned int),
devfreq->profile->max_state,
devfreq->profile->max_state),
GFP_KERNEL);
- if (!devfreq->trans_table) {
+ if (!devfreq->stats.trans_table) {
mutex_unlock(&devfreq->lock);
err = -ENOMEM;
goto err_devfreq;
}
- devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
+ devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
devfreq->profile->max_state,
- sizeof(unsigned long),
+ sizeof(*devfreq->stats.time_in_state),
GFP_KERNEL);
- if (!devfreq->time_in_state) {
+ if (!devfreq->stats.time_in_state) {
mutex_unlock(&devfreq->lock);
err = -ENOMEM;
goto err_devfreq;
}
- devfreq->last_stat_updated = jiffies;
+ devfreq->stats.total_trans = 0;
+ devfreq->stats.last_update = get_jiffies_64();
srcu_init_notifier_head(&devfreq->transition_notifier_list);
@@ -1259,6 +1262,14 @@ err_out:
}
EXPORT_SYMBOL(devfreq_remove_governor);
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
+}
+static DEVICE_ATTR_RO(name);
+
static ssize_t governor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1461,6 +1472,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%lu\n", min_freq);
}
+static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1501,7 +1513,6 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1580,18 +1591,47 @@ static ssize_t trans_stat_show(struct device *dev,
devfreq->profile->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%10u",
- devfreq->trans_table[(i * max_state) + j]);
- len += sprintf(buf + len, "%10u\n",
- jiffies_to_msecs(devfreq->time_in_state[i]));
+ devfreq->stats.trans_table[(i * max_state) + j]);
+
+ len += sprintf(buf + len, "%10llu\n", (u64)
+ jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
}
len += sprintf(buf + len, "Total transition : %u\n",
- devfreq->total_trans);
+ devfreq->stats.total_trans);
return len;
}
-static DEVICE_ATTR_RO(trans_stat);
+
+static ssize_t trans_stat_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ int err, value;
+
+ if (df->profile->max_state == 0)
+ return count;
+
+ err = kstrtoint(buf, 10, &value);
+ if (err || value != 0)
+ return -EINVAL;
+
+ mutex_lock(&df->lock);
+ memset(df->stats.time_in_state, 0, (df->profile->max_state *
+ sizeof(*df->stats.time_in_state)));
+ memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
+ df->profile->max_state,
+ df->profile->max_state));
+ df->stats.total_trans = 0;
+ df->stats.last_update = get_jiffies_64();
+ mutex_unlock(&df->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(trans_stat);
static struct attribute *devfreq_attrs[] = {
+ &dev_attr_name.attr,
&dev_attr_governor.attr,
&dev_attr_available_governors.attr,
&dev_attr_cur_freq.attr,
@@ -1605,6 +1645,81 @@ static struct attribute *devfreq_attrs[] = {
};
ATTRIBUTE_GROUPS(devfreq);
+/**
+ * devfreq_summary_show() - Show the summary of the devfreq devices
+ * @s: seq_file instance to show the summary of devfreq devices
+ * @data: not used
+ *
+ * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
+ * It helps that user can know the detailed information of the devfreq devices.
+ *
+ * Return 0 always because it shows the information without any data change.
+ */
+static int devfreq_summary_show(struct seq_file *s, void *data)
+{
+ struct devfreq *devfreq;
+ struct devfreq *p_devfreq = NULL;
+ unsigned long cur_freq, min_freq, max_freq;
+ unsigned int polling_ms;
+
+ seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
+ "dev_name",
+ "dev",
+ "parent_dev",
+ "governor",
+ "polling_ms",
+ "cur_freq_Hz",
+ "min_freq_Hz",
+ "max_freq_Hz");
+ seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
+ "------------------------------",
+ "----------",
+ "----------",
+ "---------------",
+ "----------",
+ "------------",
+ "------------",
+ "------------");
+
+ mutex_lock(&devfreq_list_lock);
+
+ list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+ if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
+ DEVFREQ_NAME_LEN)) {
+ struct devfreq_passive_data *data = devfreq->data;
+
+ if (data)
+ p_devfreq = data->parent;
+ } else {
+ p_devfreq = NULL;
+ }
+#endif
+
+ mutex_lock(&devfreq->lock);
+ cur_freq = devfreq->previous_freq,
+ get_freq_range(devfreq, &min_freq, &max_freq);
+ polling_ms = devfreq->profile->polling_ms,
+ mutex_unlock(&devfreq->lock);
+
+ seq_printf(s,
+ "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
+ dev_name(devfreq->dev.parent),
+ dev_name(&devfreq->dev),
+ p_devfreq ? dev_name(&p_devfreq->dev) : "null",
+ devfreq->governor_name,
+ polling_ms,
+ cur_freq,
+ min_freq,
+ max_freq);
+ }
+
+ mutex_unlock(&devfreq_list_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
+
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -1621,6 +1736,11 @@ static int __init devfreq_init(void)
}
devfreq_class->dev_groups = devfreq_groups;
+ devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
+ debugfs_create_file("devfreq_summary", 0444,
+ devfreq_debugfs, NULL,
+ &devfreq_summary_fops);
+
return 0;
}
subsys_initcall(devfreq_init);
@@ -1814,7 +1934,7 @@ static void devm_devfreq_notifier_release(struct device *dev, void *res)
/**
* devm_devfreq_register_notifier()
- - Resource-managed devfreq_register_notifier()
+ * - Resource-managed devfreq_register_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
@@ -1850,7 +1970,7 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
/**
* devm_devfreq_unregister_notifier()
- - Resource-managed devfreq_unregister_notifier()
+ * - Resource-managed devfreq_unregister_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index cef2cf5347ca..878825372f6f 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -15,7 +15,7 @@ menuconfig PM_DEVFREQ_EVENT
if PM_DEVFREQ_EVENT
config DEVFREQ_EVENT_EXYNOS_NOCP
- tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+ tristate "Exynos NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
select REGMAP_MMIO
@@ -24,7 +24,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
config DEVFREQ_EVENT_EXYNOS_PPMU
- tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+ tristate "Exynos PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
help
@@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU
config DEVFREQ_EVENT_ROCKCHIP_DFI
tristate "ROCKCHIP DFI DEVFREQ event Driver"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
help
This add the devfreq-event driver for Rockchip SoC. It provides DFI
(DDR Monitor Module) driver to count ddr load.
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 1c565926db9f..ccc531ee6938 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h
index 55cc96284a36..2d6f08cfd0c5 100644
--- a/drivers/devfreq/event/exynos-nocp.h
+++ b/drivers/devfreq/event/exynos-nocp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ * exynos-nocp.h - Exynos NoC (Network on Chip) Probe header file
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 85c7a77bf3f0..17ed980d9099 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support
*
* Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
@@ -101,17 +101,22 @@ static struct __exynos_ppmu_events {
PPMU_EVENT(dmc1_1),
};
-static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
{
int i;
for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
- if (!strcmp(edev->desc->name, ppmu_events[i].name))
+ if (!strcmp(edev_name, ppmu_events[i].name))
return ppmu_events[i].id;
return -EINVAL;
}
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+ return __exynos_ppmu_find_ppmu_id(edev->desc->name);
+}
+
/*
* The devfreq-event ops structure for PPMU v1.1
*/
@@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np,
* use default if not.
*/
if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
- struct devfreq_event_dev edev;
int id;
/* Not all registers take the same value for
* read+write data count.
*/
- edev.desc = &desc[j];
- id = exynos_ppmu_find_ppmu_id(&edev);
+ id = __exynos_ppmu_find_ppmu_id(desc[j].name);
switch (id) {
case PPMU_PMNCNT0:
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
index 284420047455..97f667d0cbdd 100644
--- a/drivers/devfreq/event/exynos-ppmu.h
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * exynos_ppmu.h - EXYNOS PPMU header file
+ * exynos_ppmu.h - Exynos PPMU header file
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 5d1042188727..9a88faaf8b27 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -177,7 +177,6 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_dfi *data;
- struct resource *res;
struct devfreq_event_desc *desc;
struct device_node *np = pdev->dev.of_node, *node;
@@ -185,8 +184,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs))
return PTR_ERR(data->regs);
@@ -200,6 +198,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
data->regmap_pmu = syscon_node_to_regmap(node);
+ of_node_put(node);
if (IS_ERR(data->regmap_pmu))
return PTR_ERR(data->regmap_pmu);
}
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index c832673273a2..8fa8eb541373 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -15,11 +15,10 @@
#include <linux/device.h>
#include <linux/export.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
#define DEFAULT_SATURATION_RATIO 40
@@ -127,6 +126,7 @@ static int exynos_bus_get_dev_status(struct device *dev,
ret = exynos_bus_get_event(bus, &edata);
if (ret < 0) {
+ dev_err(dev, "failed to get event from devfreq-event devices\n");
stat->total_time = stat->busy_time = 0;
goto err;
}
@@ -287,52 +287,12 @@ err_clk:
return ret;
}
-static int exynos_bus_probe(struct platform_device *pdev)
+static int exynos_bus_profile_init(struct exynos_bus *bus,
+ struct devfreq_dev_profile *profile)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node, *node;
- struct devfreq_dev_profile *profile;
+ struct device *dev = bus->dev;
struct devfreq_simple_ondemand_data *ondemand_data;
- struct devfreq_passive_data *passive_data;
- struct devfreq *parent_devfreq;
- struct exynos_bus *bus;
- int ret, max_state;
- unsigned long min_freq, max_freq;
- bool passive = false;
-
- if (!np) {
- dev_err(dev, "failed to find devicetree node\n");
- return -EINVAL;
- }
-
- bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
- if (!bus)
- return -ENOMEM;
- mutex_init(&bus->lock);
- bus->dev = &pdev->dev;
- platform_set_drvdata(pdev, bus);
-
- profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
- if (!profile)
- return -ENOMEM;
-
- node = of_parse_phandle(dev->of_node, "devfreq", 0);
- if (node) {
- of_node_put(node);
- passive = true;
- } else {
- ret = exynos_bus_parent_parse_of(np, bus);
- if (ret < 0)
- return ret;
- }
-
- /* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
- if (ret < 0)
- goto err_reg;
-
- if (passive)
- goto passive;
+ int ret;
/* Initialize the struct profile and governor data for parent device */
profile->polling_ms = 50;
@@ -341,10 +301,9 @@ static int exynos_bus_probe(struct platform_device *pdev)
profile->exit = exynos_bus_exit;
ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
- if (!ondemand_data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!ondemand_data)
+ return -ENOMEM;
+
ondemand_data->upthreshold = 40;
ondemand_data->downdifferential = 5;
@@ -354,15 +313,14 @@ static int exynos_bus_probe(struct platform_device *pdev)
ondemand_data);
if (IS_ERR(bus->devfreq)) {
dev_err(dev, "failed to add devfreq device\n");
- ret = PTR_ERR(bus->devfreq);
- goto err;
+ return PTR_ERR(bus->devfreq);
}
/* Register opp_notifier to catch the change of OPP */
ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
if (ret < 0) {
dev_err(dev, "failed to register opp notifier\n");
- goto err;
+ return ret;
}
/*
@@ -372,33 +330,44 @@ static int exynos_bus_probe(struct platform_device *pdev)
ret = exynos_bus_enable_edev(bus);
if (ret < 0) {
dev_err(dev, "failed to enable devfreq-event devices\n");
- goto err;
+ return ret;
}
ret = exynos_bus_set_event(bus);
if (ret < 0) {
dev_err(dev, "failed to set event to devfreq-event devices\n");
- goto err;
+ goto err_edev;
}
- goto out;
-passive:
+ return 0;
+
+err_edev:
+ if (exynos_bus_disable_edev(bus))
+ dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+ return ret;
+}
+
+static int exynos_bus_profile_init_passive(struct exynos_bus *bus,
+ struct devfreq_dev_profile *profile)
+{
+ struct device *dev = bus->dev;
+ struct devfreq_passive_data *passive_data;
+ struct devfreq *parent_devfreq;
+
/* Initialize the struct profile and governor data for passive device */
profile->target = exynos_bus_target;
profile->exit = exynos_bus_passive_exit;
/* Get the instance of parent devfreq device */
parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
- if (IS_ERR(parent_devfreq)) {
- ret = -EPROBE_DEFER;
- goto err;
- }
+ if (IS_ERR(parent_devfreq))
+ return -EPROBE_DEFER;
passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
- if (!passive_data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!passive_data)
+ return -ENOMEM;
+
passive_data->parent = parent_devfreq;
/* Add devfreq device for exynos bus with passive governor */
@@ -407,11 +376,61 @@ passive:
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
"failed to add devfreq dev with passive governor\n");
- ret = PTR_ERR(bus->devfreq);
- goto err;
+ return PTR_ERR(bus->devfreq);
+ }
+
+ return 0;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node, *node;
+ struct devfreq_dev_profile *profile;
+ struct exynos_bus *bus;
+ int ret, max_state;
+ unsigned long min_freq, max_freq;
+ bool passive = false;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
}
-out:
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ mutex_init(&bus->lock);
+ bus->dev = &pdev->dev;
+ platform_set_drvdata(pdev, bus);
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ node = of_parse_phandle(dev->of_node, "devfreq", 0);
+ if (node) {
+ of_node_put(node);
+ passive = true;
+ } else {
+ ret = exynos_bus_parent_parse_of(np, bus);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Parse the device-tree to get the resource information */
+ ret = exynos_bus_parse_of(np, bus);
+ if (ret < 0)
+ goto err_reg;
+
+ if (passive)
+ ret = exynos_bus_profile_init_passive(bus, profile);
+ else
+ ret = exynos_bus_profile_init(bus, profile);
+
+ if (ret < 0)
+ goto err;
+
max_state = bus->devfreq->profile->max_state;
min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c
new file mode 100644
index 000000000000..bc82d3653bff
--- /dev/null
+++ b/drivers/devfreq/imx8m-ddrc.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/devfreq.h>
+#include <linux/pm_opp.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/arm-smccc.h>
+
+#define IMX_SIP_DDR_DVFS 0xc2000004
+
+/* Query available frequencies. */
+#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10
+#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11
+
+/*
+ * This should be in a 1:1 mapping with devicetree OPPs but
+ * firmware provides additional info.
+ */
+struct imx8m_ddrc_freq {
+ unsigned long rate;
+ unsigned long smcarg;
+ int dram_core_parent_index;
+ int dram_alt_parent_index;
+ int dram_apb_parent_index;
+};
+
+/* Hardware limitation */
+#define IMX8M_DDRC_MAX_FREQ_COUNT 4
+
+/*
+ * i.MX8M DRAM Controller clocks have the following structure (abridged):
+ *
+ * +----------+ |\ +------+
+ * | dram_pll |-------|M| dram_core | |
+ * +----------+ |U|---------->| D |
+ * /--|X| | D |
+ * dram_alt_root | |/ | R |
+ * | | C |
+ * +---------+ | |
+ * |FIX DIV/4| | |
+ * +---------+ | |
+ * composite: | | |
+ * +----------+ | | |
+ * | dram_alt |----/ | |
+ * +----------+ | |
+ * | dram_apb |-------------------->| |
+ * +----------+ +------+
+ *
+ * The dram_pll is used for higher rates and dram_alt is used for lower rates.
+ *
+ * Frequency switching is implemented in TF-A (via SMC call) and can change the
+ * configuration of the clocks, including mux parents. The dram_alt and
+ * dram_apb clocks are "imx composite" and their parent can change too.
+ *
+ * We need to prepare/enable the new mux parents head of switching and update
+ * their information afterwards.
+ */
+struct imx8m_ddrc {
+ struct devfreq_dev_profile profile;
+ struct devfreq *devfreq;
+
+ /* For frequency switching: */
+ struct clk *dram_core;
+ struct clk *dram_pll;
+ struct clk *dram_alt;
+ struct clk *dram_apb;
+
+ int freq_count;
+ struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT];
+};
+
+static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv,
+ unsigned long rate)
+{
+ struct imx8m_ddrc_freq *freq;
+ int i;
+
+ /*
+ * Firmware reports values in MT/s, so we round-down from Hz
+ * Rounding is extra generous to ensure a match.
+ */
+ rate = DIV_ROUND_CLOSEST(rate, 250000);
+ for (i = 0; i < priv->freq_count; ++i) {
+ freq = &priv->freq_table[i];
+ if (freq->rate == rate ||
+ freq->rate + 1 == rate ||
+ freq->rate - 1 == rate)
+ return freq;
+ }
+
+ return NULL;
+}
+
+static void imx8m_ddrc_smc_set_freq(int target_freq)
+{
+ struct arm_smccc_res res;
+ u32 online_cpus = 0;
+ int cpu;
+
+ local_irq_disable();
+
+ for_each_online_cpu(cpu)
+ online_cpus |= (1 << (cpu * 8));
+
+ /* change the ddr freqency */
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus,
+ 0, 0, 0, 0, 0, &res);
+
+ local_irq_enable();
+}
+
+static struct clk *clk_get_parent_by_index(struct clk *clk, int index)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index);
+
+ return hw ? hw->clk : NULL;
+}
+
+static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct clk *new_dram_core_parent;
+ struct clk *new_dram_alt_parent;
+ struct clk *new_dram_apb_parent;
+ int ret;
+
+ /*
+ * Fetch new parents
+ *
+ * new_dram_alt_parent and new_dram_apb_parent are optional but
+ * new_dram_core_parent is not.
+ */
+ new_dram_core_parent = clk_get_parent_by_index(
+ priv->dram_core, freq->dram_core_parent_index - 1);
+ if (!new_dram_core_parent) {
+ dev_err(dev, "failed to fetch new dram_core parent\n");
+ return -EINVAL;
+ }
+ if (freq->dram_alt_parent_index) {
+ new_dram_alt_parent = clk_get_parent_by_index(
+ priv->dram_alt,
+ freq->dram_alt_parent_index - 1);
+ if (!new_dram_alt_parent) {
+ dev_err(dev, "failed to fetch new dram_alt parent\n");
+ return -EINVAL;
+ }
+ } else
+ new_dram_alt_parent = NULL;
+
+ if (freq->dram_apb_parent_index) {
+ new_dram_apb_parent = clk_get_parent_by_index(
+ priv->dram_apb,
+ freq->dram_apb_parent_index - 1);
+ if (!new_dram_apb_parent) {
+ dev_err(dev, "failed to fetch new dram_apb parent\n");
+ return -EINVAL;
+ }
+ } else
+ new_dram_apb_parent = NULL;
+
+ /* increase reference counts and ensure clks are ON before switch */
+ ret = clk_prepare_enable(new_dram_core_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_core parent: %d\n",
+ ret);
+ goto out;
+ }
+ ret = clk_prepare_enable(new_dram_alt_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_alt parent: %d\n",
+ ret);
+ goto out_disable_core_parent;
+ }
+ ret = clk_prepare_enable(new_dram_apb_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_apb parent: %d\n",
+ ret);
+ goto out_disable_alt_parent;
+ }
+
+ imx8m_ddrc_smc_set_freq(freq->smcarg);
+
+ /* update parents in clk tree after switch. */
+ ret = clk_set_parent(priv->dram_core, new_dram_core_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_core parent: %d\n", ret);
+ if (new_dram_alt_parent) {
+ ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_alt parent: %d\n",
+ ret);
+ }
+ if (new_dram_apb_parent) {
+ ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_apb parent: %d\n",
+ ret);
+ }
+
+ /*
+ * Explicitly refresh dram PLL rate.
+ *
+ * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be
+ * automatically refreshed when clk_get_rate is called on children.
+ */
+ clk_get_rate(priv->dram_pll);
+
+ /*
+ * clk_set_parent transfer the reference count from old parent.
+ * now we drop extra reference counts used during the switch
+ */
+ clk_disable_unprepare(new_dram_apb_parent);
+out_disable_alt_parent:
+ clk_disable_unprepare(new_dram_alt_parent);
+out_disable_core_parent:
+ clk_disable_unprepare(new_dram_core_parent);
+out:
+ return ret;
+}
+
+static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct imx8m_ddrc_freq *freq_info;
+ struct dev_pm_opp *new_opp;
+ unsigned long old_freq, new_freq;
+ int ret;
+
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ ret = PTR_ERR(new_opp);
+ dev_err(dev, "failed to get recommended opp: %d\n", ret);
+ return ret;
+ }
+ dev_pm_opp_put(new_opp);
+
+ old_freq = clk_get_rate(priv->dram_core);
+ if (*freq == old_freq)
+ return 0;
+
+ freq_info = imx8m_ddrc_find_freq(priv, *freq);
+ if (!freq_info)
+ return -EINVAL;
+
+ /*
+ * Read back the clk rate to verify switch was correct and so that
+ * we can report it on all error paths.
+ */
+ ret = imx8m_ddrc_set_freq(dev, freq_info);
+
+ new_freq = clk_get_rate(priv->dram_core);
+ if (ret)
+ dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n",
+ *freq, old_freq, ret, new_freq);
+ else if (*freq != new_freq)
+ dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n",
+ *freq, old_freq, new_freq);
+ else
+ dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n",
+ *freq, old_freq);
+
+ return ret;
+}
+
+static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+ *freq = clk_get_rate(priv->dram_core);
+
+ return 0;
+}
+
+static int imx8m_ddrc_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+ stat->busy_time = 0;
+ stat->total_time = 0;
+ stat->current_frequency = clk_get_rate(priv->dram_core);
+
+ return 0;
+}
+
+static int imx8m_ddrc_init_freq_info(struct device *dev)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
+ int index;
+
+ /* An error here means DDR DVFS API not supported by firmware */
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT,
+ 0, 0, 0, 0, 0, 0, &res);
+ priv->freq_count = res.a0;
+ if (priv->freq_count <= 0 ||
+ priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT)
+ return -ENODEV;
+
+ for (index = 0; index < priv->freq_count; ++index) {
+ struct imx8m_ddrc_freq *freq = &priv->freq_table[index];
+
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO,
+ index, 0, 0, 0, 0, 0, &res);
+ /* Result should be strictly positive */
+ if ((long)res.a0 <= 0)
+ return -ENODEV;
+
+ freq->rate = res.a0;
+ freq->smcarg = index;
+ freq->dram_core_parent_index = res.a1;
+ freq->dram_alt_parent_index = res.a2;
+ freq->dram_apb_parent_index = res.a3;
+
+ /* dram_core has 2 options: dram_pll or dram_alt_root */
+ if (freq->dram_core_parent_index != 1 &&
+ freq->dram_core_parent_index != 2)
+ return -ENODEV;
+ /* dram_apb and dram_alt have exactly 8 possible parents */
+ if (freq->dram_alt_parent_index > 8 ||
+ freq->dram_apb_parent_index > 8)
+ return -ENODEV;
+ /* dram_core from alt requires explicit dram_alt parent */
+ if (freq->dram_core_parent_index == 2 &&
+ freq->dram_alt_parent_index == 0)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int imx8m_ddrc_check_opps(struct device *dev)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct imx8m_ddrc_freq *freq_info;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ int i, opp_count;
+
+ /* Enumerate DT OPPs and disable those not supported by firmware */
+ opp_count = dev_pm_opp_get_opp_count(dev);
+ if (opp_count < 0)
+ return opp_count;
+ for (i = 0, freq = 0; i < opp_count; ++i, ++freq) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Failed enumerating OPPs: %ld\n",
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+ dev_pm_opp_put(opp);
+
+ freq_info = imx8m_ddrc_find_freq(priv, freq);
+ if (!freq_info) {
+ dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n",
+ freq, DIV_ROUND_CLOSEST(freq, 250000));
+ dev_pm_opp_disable(dev, freq);
+ }
+ }
+
+ return 0;
+}
+
+static void imx8m_ddrc_exit(struct device *dev)
+{
+ dev_pm_opp_of_remove_table(dev);
+}
+
+static int imx8m_ddrc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx8m_ddrc *priv;
+ const char *gov = DEVFREQ_GOV_USERSPACE;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = imx8m_ddrc_init_freq_info(dev);
+ if (ret) {
+ dev_err(dev, "failed to init firmware freq info: %d\n", ret);
+ return ret;
+ }
+
+ priv->dram_core = devm_clk_get(dev, "core");
+ if (IS_ERR(priv->dram_core)) {
+ ret = PTR_ERR(priv->dram_core);
+ dev_err(dev, "failed to fetch core clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_pll = devm_clk_get(dev, "pll");
+ if (IS_ERR(priv->dram_pll)) {
+ ret = PTR_ERR(priv->dram_pll);
+ dev_err(dev, "failed to fetch pll clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_alt = devm_clk_get(dev, "alt");
+ if (IS_ERR(priv->dram_alt)) {
+ ret = PTR_ERR(priv->dram_alt);
+ dev_err(dev, "failed to fetch alt clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(priv->dram_apb)) {
+ ret = PTR_ERR(priv->dram_apb);
+ dev_err(dev, "failed to fetch apb clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get OPP table\n");
+ return ret;
+ }
+
+ ret = imx8m_ddrc_check_opps(dev);
+ if (ret < 0)
+ goto err;
+
+ priv->profile.polling_ms = 1000;
+ priv->profile.target = imx8m_ddrc_target;
+ priv->profile.get_dev_status = imx8m_ddrc_get_dev_status;
+ priv->profile.exit = imx8m_ddrc_exit;
+ priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq;
+ priv->profile.initial_freq = clk_get_rate(priv->dram_core);
+
+ priv->devfreq = devm_devfreq_add_device(dev, &priv->profile,
+ gov, NULL);
+ if (IS_ERR(priv->devfreq)) {
+ ret = PTR_ERR(priv->devfreq);
+ dev_err(dev, "failed to add devfreq device: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_pm_opp_of_remove_table(dev);
+ return ret;
+}
+
+static const struct of_device_id imx8m_ddrc_of_match[] = {
+ { .compatible = "fsl,imx8m-ddrc", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match);
+
+static struct platform_driver imx8m_ddrc_platdrv = {
+ .probe = imx8m_ddrc_probe,
+ .driver = {
+ .name = "imx8m-ddrc-devfreq",
+ .of_match_table = of_match_ptr(imx8m_ddrc_of_match),
+ },
+};
+module_platform_driver(imx8m_ddrc_platdrv);
+
+MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 2e65d7279d79..24f04f78285b 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -364,7 +364,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
if (res.a0) {
dev_err(dev, "Failed to set dram param: %ld\n",
res.a0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
}
}
}
@@ -372,8 +373,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
data->regmap_pmu = syscon_node_to_regmap(node);
- if (IS_ERR(data->regmap_pmu))
- return PTR_ERR(data->regmap_pmu);
+ of_node_put(node);
+ if (IS_ERR(data->regmap_pmu)) {
+ ret = PTR_ERR(data->regmap_pmu);
+ goto err_edev;
+ }
}
regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
@@ -391,7 +395,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
};
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
@@ -425,7 +430,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
*/
if (dev_pm_opp_of_add_table(dev)) {
dev_err(dev, "Invalid operating-points in device tree.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
}
of_property_read_u32(np, "upthreshold",
@@ -465,6 +471,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
err_free_opp:
dev_pm_opp_of_remove_table(&pdev->dev);
+err_edev:
+ devfreq_event_disable_edev(data->edev);
+
return ret;
}
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index a23b6752d11a..0613bb7770f5 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -44,4 +44,15 @@ config DMABUF_SELFTESTS
default n
depends on DMA_SHARED_BUFFER
+menuconfig DMABUF_HEAPS
+ bool "DMA-BUF Userland Memory Heaps"
+ select DMA_SHARED_BUFFER
+ help
+ Choose this option to enable the DMA-BUF userland memory heaps.
+ This options creates per heap chardevs in /dev/dma_heap/ which
+ allows userspace to allocate dma-bufs that can be shared
+ between drivers.
+
+source "drivers/dma-buf/heaps/Kconfig"
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 03479da06422..9c190026bfab 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
dma-resv.o seqno-fence.o
+obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
+obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ce41cd9b758a..d4097856c86b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -878,29 +878,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
* access.
*
- * To support dma_buf objects residing in highmem cpu access is page-based
- * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
- * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
- * returns a pointer in kernel virtual address space. Afterwards the chunk
- * needs to be unmapped again. There is no limit on how often a given chunk
- * can be mapped and unmapped, i.e. the importer does not need to call
- * begin_cpu_access again before mapping the same chunk again.
- *
- * Interfaces::
- * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
- * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
- *
- * Implementing the functions is optional for exporters and for importers all
- * the restrictions of using kmap apply.
- *
- * dma_buf kmap calls outside of the range specified in begin_cpu_access are
- * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
- * the partial chunks at the beginning and end but may return stale or bogus
- * data outside of the range (in these partial chunks).
- *
- * For some cases the overhead of kmap can be too high, a vmap interface
- * is introduced. This interface should be used very carefully, as vmalloc
- * space is a limited resources on many architectures.
+ * Since for most kernel internal dma-buf accesses need the entire buffer, a
+ * vmap interface is introduced. Note that on very old 32-bit architectures
+ * vmalloc space might be limited and result in vmap calls failing.
*
* Interfaces::
* void \*dma_buf_vmap(struct dma_buf \*dmabuf)
@@ -1050,43 +1030,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
-/**
- * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
- * same restrictions as for kmap and friends apply.
- * @dmabuf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- if (!dmabuf->ops->map)
- return NULL;
- return dmabuf->ops->map(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap);
-
-/**
- * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
- * @dmabuf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->unmap)
- dmabuf->ops->unmap(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap);
-
/**
* dma_buf_mmap - Setup up a userspace mmap with the given vma
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
new file mode 100644
index 000000000000..afd22c9dbdcf
--- /dev/null
+++ b/drivers/dma-buf/dma-heap.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framework for userspace DMA-BUF allocations
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/xarray.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
+
+#define DEVNAME "dma_heap"
+
+#define NUM_HEAP_MINORS 128
+
+/**
+ * struct dma_heap - represents a dmabuf heap in the system
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @heap_devt heap device node
+ * @list list head connecting to list of heaps
+ * @heap_cdev heap char device
+ *
+ * Represents a heap of memory from which buffers can be made.
+ */
+struct dma_heap {
+ const char *name;
+ const struct dma_heap_ops *ops;
+ void *priv;
+ dev_t heap_devt;
+ struct list_head list;
+ struct cdev heap_cdev;
+};
+
+static LIST_HEAD(heap_list);
+static DEFINE_MUTEX(heap_list_lock);
+static dev_t dma_heap_devt;
+static struct class *dma_heap_class;
+static DEFINE_XARRAY_ALLOC(dma_heap_minors);
+
+static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+ unsigned int fd_flags,
+ unsigned int heap_flags)
+{
+ /*
+ * Allocations from all heaps have to begin
+ * and end on page boundaries.
+ */
+ len = PAGE_ALIGN(len);
+ if (!len)
+ return -EINVAL;
+
+ return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+}
+
+static int dma_heap_open(struct inode *inode, struct file *file)
+{
+ struct dma_heap *heap;
+
+ heap = xa_load(&dma_heap_minors, iminor(inode));
+ if (!heap) {
+ pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
+ return -ENODEV;
+ }
+
+ /* instance data as context */
+ file->private_data = heap;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static long dma_heap_ioctl_allocate(struct file *file, void *data)
+{
+ struct dma_heap_allocation_data *heap_allocation = data;
+ struct dma_heap *heap = file->private_data;
+ int fd;
+
+ if (heap_allocation->fd)
+ return -EINVAL;
+
+ if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
+ return -EINVAL;
+
+ if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
+ return -EINVAL;
+
+ fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
+ heap_allocation->fd_flags,
+ heap_allocation->heap_flags);
+ if (fd < 0)
+ return fd;
+
+ heap_allocation->fd = fd;
+
+ return 0;
+}
+
+static unsigned int dma_heap_ioctl_cmds[] = {
+ DMA_HEAP_IOCTL_ALLOC,
+};
+
+static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
+ unsigned long arg)
+{
+ char stack_kdata[128];
+ char *kdata = stack_kdata;
+ unsigned int kcmd;
+ unsigned int in_size, out_size, drv_size, ksize;
+ int nr = _IOC_NR(ucmd);
+ int ret = 0;
+
+ if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
+ return -EINVAL;
+
+ /* Get the kernel ioctl cmd that matches */
+ kcmd = dma_heap_ioctl_cmds[nr];
+
+ /* Figure out the delta between user cmd size and kernel cmd size */
+ drv_size = _IOC_SIZE(kcmd);
+ out_size = _IOC_SIZE(ucmd);
+ in_size = out_size;
+ if ((ucmd & kcmd & IOC_IN) == 0)
+ in_size = 0;
+ if ((ucmd & kcmd & IOC_OUT) == 0)
+ out_size = 0;
+ ksize = max(max(in_size, out_size), drv_size);
+
+ /* If necessary, allocate buffer for ioctl argument */
+ if (ksize > sizeof(stack_kdata)) {
+ kdata = kmalloc(ksize, GFP_KERNEL);
+ if (!kdata)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* zero out any difference between the kernel/user structure size */
+ if (ksize > in_size)
+ memset(kdata + in_size, 0, ksize - in_size);
+
+ switch (kcmd) {
+ case DMA_HEAP_IOCTL_ALLOC:
+ ret = dma_heap_ioctl_allocate(file, kdata);
+ break;
+ default:
+ ret = -ENOTTY;
+ goto err;
+ }
+
+ if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+ ret = -EFAULT;
+err:
+ if (kdata != stack_kdata)
+ kfree(kdata);
+ return ret;
+}
+
+static const struct file_operations dma_heap_fops = {
+ .owner = THIS_MODULE,
+ .open = dma_heap_open,
+ .unlocked_ioctl = dma_heap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dma_heap_ioctl,
+#endif
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-subdriver data for the heap
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-subdriver data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap)
+{
+ return heap->priv;
+}
+
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
+{
+ struct dma_heap *heap, *h, *err_ret;
+ struct device *dev_ret;
+ unsigned int minor;
+ int ret;
+
+ if (!exp_info->name || !strcmp(exp_info->name, "")) {
+ pr_err("dma_heap: Cannot add heap without a name\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!exp_info->ops || !exp_info->ops->allocate) {
+ pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* check the name is unique */
+ mutex_lock(&heap_list_lock);
+ list_for_each_entry(h, &heap_list, list) {
+ if (!strcmp(h->name, exp_info->name)) {
+ mutex_unlock(&heap_list_lock);
+ pr_err("dma_heap: Already registered heap named %s\n",
+ exp_info->name);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ mutex_unlock(&heap_list_lock);
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+
+ heap->name = exp_info->name;
+ heap->ops = exp_info->ops;
+ heap->priv = exp_info->priv;
+
+ /* Find unused minor number */
+ ret = xa_alloc(&dma_heap_minors, &minor, heap,
+ XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to get minor number for heap\n");
+ err_ret = ERR_PTR(ret);
+ goto err0;
+ }
+
+ /* Create device */
+ heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
+
+ cdev_init(&heap->heap_cdev, &dma_heap_fops);
+ ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to add char device\n");
+ err_ret = ERR_PTR(ret);
+ goto err1;
+ }
+
+ dev_ret = device_create(dma_heap_class,
+ NULL,
+ heap->heap_devt,
+ NULL,
+ heap->name);
+ if (IS_ERR(dev_ret)) {
+ pr_err("dma_heap: Unable to create device\n");
+ err_ret = ERR_CAST(dev_ret);
+ goto err2;
+ }
+ /* Add heap to the list */
+ mutex_lock(&heap_list_lock);
+ list_add(&heap->list, &heap_list);
+ mutex_unlock(&heap_list_lock);
+
+ return heap;
+
+err2:
+ cdev_del(&heap->heap_cdev);
+err1:
+ xa_erase(&dma_heap_minors, minor);
+err0:
+ kfree(heap);
+ return err_ret;
+}
+
+static char *dma_heap_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
+}
+
+static int dma_heap_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
+ if (ret)
+ return ret;
+
+ dma_heap_class = class_create(THIS_MODULE, DEVNAME);
+ if (IS_ERR(dma_heap_class)) {
+ unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
+ return PTR_ERR(dma_heap_class);
+ }
+ dma_heap_class->devnode = dma_heap_devnode;
+
+ return 0;
+}
+subsys_initcall(dma_heap_init);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 709002515550..4264e64788c4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -34,6 +34,7 @@
#include <linux/dma-resv.h>
#include <linux/export.h>
+#include <linux/sched/mm.h>
/**
* DOC: Reservation Object Overview
@@ -95,6 +96,37 @@ static void dma_resv_list_free(struct dma_resv_list *list)
kfree_rcu(list, rcu);
}
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+ struct mm_struct *mm = mm_alloc();
+ struct ww_acquire_ctx ctx;
+ struct dma_resv obj;
+ int ret;
+
+ if (!mm)
+ return -ENOMEM;
+
+ dma_resv_init(&obj);
+
+ down_read(&mm->mmap_sem);
+ ww_acquire_init(&ctx, &reservation_ww_class);
+ ret = dma_resv_lock(&obj, &ctx);
+ if (ret == -EDEADLK)
+ dma_resv_lock_slow(&obj, &ctx);
+ fs_reclaim_acquire(GFP_KERNEL);
+ fs_reclaim_release(GFP_KERNEL);
+ ww_mutex_unlock(&obj.lock);
+ ww_acquire_fini(&ctx);
+ up_read(&mm->mmap_sem);
+
+ mmput(mm);
+
+ return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
+
/**
* dma_resv_init - initialize a reservation object
* @obj: the reservation object
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
new file mode 100644
index 000000000000..a5eef06c4226
--- /dev/null
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -0,0 +1,14 @@
+config DMABUF_HEAPS_SYSTEM
+ bool "DMA-BUF System Heap"
+ depends on DMABUF_HEAPS
+ help
+ Choose this option to enable the system dmabuf heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
+
+config DMABUF_HEAPS_CMA
+ bool "DMA-BUF CMA Heap"
+ depends on DMABUF_HEAPS && DMA_CMA
+ help
+ Choose this option to enable dma-buf CMA heap. This heap is backed
+ by the Contiguous Memory Allocator (CMA). If your system has these
+ regions, you should say Y here.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
new file mode 100644
index 000000000000..6e54cdec3da0
--- /dev/null
+++ b/drivers/dma-buf/heaps/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += heap-helpers.o
+obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
new file mode 100644
index 000000000000..626cf7fd033a
--- /dev/null
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF CMA heap exporter
+ *
+ * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ */
+
+#include <linux/cma.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-contiguous.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
+
+#include "heap-helpers.h"
+
+struct cma_heap {
+ struct dma_heap *heap;
+ struct cma *cma;
+};
+
+static void cma_heap_free(struct heap_helper_buffer *buffer)
+{
+ struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
+ unsigned long nr_pages = buffer->pagecount;
+ struct page *cma_pages = buffer->priv_virt;
+
+ /* free page list */
+ kfree(buffer->pages);
+ /* release memory */
+ cma_release(cma_heap->cma, cma_pages, nr_pages);
+ kfree(buffer);
+}
+
+/* dmabuf heap CMA operations functions */
+static int cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
+ struct heap_helper_buffer *helper_buffer;
+ struct page *cma_pages;
+ size_t size = PAGE_ALIGN(len);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long align = get_order(size);
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+ if (!helper_buffer)
+ return -ENOMEM;
+
+ init_heap_helper_buffer(helper_buffer, cma_heap_free);
+ helper_buffer->heap = heap;
+ helper_buffer->size = len;
+
+ cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
+ if (!cma_pages)
+ goto free_buf;
+
+ if (PageHighMem(cma_pages)) {
+ unsigned long nr_clear_pages = nr_pages;
+ struct page *page = cma_pages;
+
+ while (nr_clear_pages > 0) {
+ void *vaddr = kmap_atomic(page);
+
+ memset(vaddr, 0, PAGE_SIZE);
+ kunmap_atomic(vaddr);
+ /*
+ * Avoid wasting time zeroing memory if the process
+ * has been killed by by SIGKILL
+ */
+ if (fatal_signal_pending(current))
+ goto free_cma;
+
+ page++;
+ nr_clear_pages--;
+ }
+ } else {
+ memset(page_address(cma_pages), 0, size);
+ }
+
+ helper_buffer->pagecount = nr_pages;
+ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+ sizeof(*helper_buffer->pages),
+ GFP_KERNEL);
+ if (!helper_buffer->pages) {
+ ret = -ENOMEM;
+ goto free_cma;
+ }
+
+ for (pg = 0; pg < helper_buffer->pagecount; pg++)
+ helper_buffer->pages[pg] = &cma_pages[pg];
+
+ /* create the dmabuf */
+ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto free_pages;
+ }
+
+ helper_buffer->dmabuf = dmabuf;
+ helper_buffer->priv_virt = cma_pages;
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+free_pages:
+ kfree(helper_buffer->pages);
+free_cma:
+ cma_release(cma_heap->cma, cma_pages, nr_pages);
+free_buf:
+ kfree(helper_buffer);
+ return ret;
+}
+
+static const struct dma_heap_ops cma_heap_ops = {
+ .allocate = cma_heap_allocate,
+};
+
+static int __add_cma_heap(struct cma *cma, void *data)
+{
+ struct cma_heap *cma_heap;
+ struct dma_heap_export_info exp_info;
+
+ cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+ if (!cma_heap)
+ return -ENOMEM;
+ cma_heap->cma = cma;
+
+ exp_info.name = cma_get_name(cma);
+ exp_info.ops = &cma_heap_ops;
+ exp_info.priv = cma_heap;
+
+ cma_heap->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(cma_heap->heap)) {
+ int ret = PTR_ERR(cma_heap->heap);
+
+ kfree(cma_heap);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int add_default_cma_heap(void)
+{
+ struct cma *default_cma = dev_get_cma_area(NULL);
+ int ret = 0;
+
+ if (default_cma)
+ ret = __add_cma_heap(default_cma, NULL);
+
+ return ret;
+}
+module_init(add_default_cma_heap);
+MODULE_DESCRIPTION("DMA-BUF CMA Heap");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
new file mode 100644
index 000000000000..9f964ca3f59c
--- /dev/null
+++ b/drivers/dma-buf/heaps/heap-helpers.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <uapi/linux/dma-heap.h>
+
+#include "heap-helpers.h"
+
+void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
+ void (*free)(struct heap_helper_buffer *))
+{
+ buffer->priv_virt = NULL;
+ mutex_init(&buffer->lock);
+ buffer->vmap_cnt = 0;
+ buffer->vaddr = NULL;
+ buffer->pagecount = 0;
+ buffer->pages = NULL;
+ INIT_LIST_HEAD(&buffer->attachments);
+ buffer->free = free;
+}
+
+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
+ int fd_flags)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &heap_helper_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+
+ return dma_buf_export(&exp_info);
+}
+
+static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
+{
+ void *vaddr;
+
+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
+{
+ if (buffer->vmap_cnt > 0) {
+ WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
+ vunmap(buffer->vaddr);
+ }
+
+ buffer->free(buffer);
+}
+
+static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = dma_heap_map_kernel(buffer);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ return vaddr;
+}
+
+static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
+{
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+}
+
+struct dma_heaps_attachment {
+ struct device *dev;
+ struct sg_table table;
+ struct list_head list;
+};
+
+static int dma_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct dma_heaps_attachment *a;
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
+ buffer->pagecount, 0,
+ buffer->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(a);
+ return ret;
+ }
+
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void dma_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct dma_heaps_attachment *a = attachment->priv;
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(&a->table);
+ kfree(a);
+}
+
+static
+struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heaps_attachment *a = attachment->priv;
+ struct sg_table *table;
+
+ table = &a->table;
+
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
+ direction))
+ table = ERR_PTR(-ENOMEM);
+ return table;
+}
+
+static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+}
+
+static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct heap_helper_buffer *buffer = vma->vm_private_data;
+
+ if (vmf->pgoff > buffer->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = buffer->pages[vmf->pgoff];
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static const struct vm_operations_struct dma_heap_vm_ops = {
+ .fault = dma_heap_vm_fault,
+};
+
+static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &dma_heap_vm_ops;
+ vma->vm_private_data = buffer;
+
+ return 0;
+}
+
+static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ dma_heap_buffer_destroy(buffer);
+}
+
+static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ struct dma_heaps_attachment *a;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
+
+static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ struct dma_heaps_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->size);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ mutex_lock(&buffer->lock);
+ vaddr = dma_heap_buffer_vmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+
+ return vaddr;
+}
+
+static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ dma_heap_buffer_vmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+const struct dma_buf_ops heap_helper_ops = {
+ .map_dma_buf = dma_heap_map_dma_buf,
+ .unmap_dma_buf = dma_heap_unmap_dma_buf,
+ .mmap = dma_heap_mmap,
+ .release = dma_heap_dma_buf_release,
+ .attach = dma_heap_attach,
+ .detach = dma_heap_detach,
+ .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
+ .vmap = dma_heap_dma_buf_vmap,
+ .vunmap = dma_heap_dma_buf_vunmap,
+};
diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h
new file mode 100644
index 000000000000..805d2df88024
--- /dev/null
+++ b/drivers/dma-buf/heaps/heap-helpers.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps helper code
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _HEAP_HELPERS_H
+#define _HEAP_HELPERS_H
+
+#include <linux/dma-heap.h>
+#include <linux/list.h>
+
+/**
+ * struct heap_helper_buffer - helper buffer metadata
+ * @heap: back pointer to the heap the buffer came from
+ * @dmabuf: backing dma-buf for this buffer
+ * @size: size of the buffer
+ * @priv_virt pointer to heap specific private value
+ * @lock mutext to protect the data in this structure
+ * @vmap_cnt count of vmap references on the buffer
+ * @vaddr vmap'ed virtual address
+ * @pagecount number of pages in the buffer
+ * @pages list of page pointers
+ * @attachments list of device attachments
+ *
+ * @free heap callback to free the buffer
+ */
+struct heap_helper_buffer {
+ struct dma_heap *heap;
+ struct dma_buf *dmabuf;
+ size_t size;
+
+ void *priv_virt;
+ struct mutex lock;
+ int vmap_cnt;
+ void *vaddr;
+ pgoff_t pagecount;
+ struct page **pages;
+ struct list_head attachments;
+
+ void (*free)(struct heap_helper_buffer *buffer);
+};
+
+void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
+ void (*free)(struct heap_helper_buffer *));
+
+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
+ int fd_flags);
+
+extern const struct dma_buf_ops heap_helper_ops;
+#endif /* _HEAP_HELPERS_H */
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
new file mode 100644
index 000000000000..0bf688e3c023
--- /dev/null
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF System heap exporter
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-heap.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <asm/page.h>
+
+#include "heap-helpers.h"
+
+struct dma_heap *sys_heap;
+
+static void system_heap_free(struct heap_helper_buffer *buffer)
+{
+ pgoff_t pg;
+
+ for (pg = 0; pg < buffer->pagecount; pg++)
+ __free_page(buffer->pages[pg]);
+ kfree(buffer->pages);
+ kfree(buffer);
+}
+
+static int system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct heap_helper_buffer *helper_buffer;
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+ if (!helper_buffer)
+ return -ENOMEM;
+
+ init_heap_helper_buffer(helper_buffer, system_heap_free);
+ helper_buffer->heap = heap;
+ helper_buffer->size = len;
+
+ helper_buffer->pagecount = len / PAGE_SIZE;
+ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+ sizeof(*helper_buffer->pages),
+ GFP_KERNEL);
+ if (!helper_buffer->pages) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+ /*
+ * Avoid trying to allocate memory if the process
+ * has been killed by by SIGKILL
+ */
+ if (fatal_signal_pending(current))
+ goto err1;
+
+ helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!helper_buffer->pages[pg])
+ goto err1;
+ }
+
+ /* create the dmabuf */
+ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto err1;
+ }
+
+ helper_buffer->dmabuf = dmabuf;
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+err1:
+ while (pg > 0)
+ __free_page(helper_buffer->pages[--pg]);
+ kfree(helper_buffer->pages);
+err0:
+ kfree(helper_buffer);
+
+ return ret;
+}
+
+static const struct dma_heap_ops system_heap_ops = {
+ .allocate = system_heap_allocate,
+};
+
+static int system_heap_create(void)
+{
+ struct dma_heap_export_info exp_info;
+ int ret = 0;
+
+ exp_info.name = "system";
+ exp_info.ops = &system_heap_ops;
+ exp_info.priv = NULL;
+
+ sys_heap = dma_heap_add(&exp_info);
+ if (IS_ERR(sys_heap))
+ ret = PTR_ERR(sys_heap);
+
+ return ret;
+}
+module_init(system_heap_create);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 9635897458a0..acb26c627d27 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -18,6 +18,8 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
struct udmabuf {
pgoff_t pagecount;
struct page **pages;
+ struct sg_table *sg;
+ struct miscdevice *device;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -46,10 +48,10 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
return 0;
}
-static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
- enum dma_data_direction direction)
+static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
+ enum dma_data_direction direction)
{
- struct udmabuf *ubuf = at->dmabuf->priv;
+ struct udmabuf *ubuf = buf->priv;
struct sg_table *sg;
int ret;
@@ -61,7 +63,7 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
GFP_KERNEL);
if (ret < 0)
goto err;
- if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
+ if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
ret = -EINVAL;
goto err;
}
@@ -73,54 +75,89 @@ err:
return ERR_PTR(ret);
}
+static void put_sg_table(struct device *dev, struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
+ enum dma_data_direction direction)
+{
+ return get_sg_table(at->dev, at->dmabuf, direction);
+}
+
static void unmap_udmabuf(struct dma_buf_attachment *at,
struct sg_table *sg,
enum dma_data_direction direction)
{
- dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
- sg_free_table(sg);
- kfree(sg);
+ return put_sg_table(at->dev, sg, direction);
}
static void release_udmabuf(struct dma_buf *buf)
{
struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
pgoff_t pg;
+ if (ubuf->sg)
+ put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
for (pg = 0; pg < ubuf->pagecount; pg++)
put_page(ubuf->pages[pg]);
kfree(ubuf->pages);
kfree(ubuf);
}
-static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
{
struct udmabuf *ubuf = buf->priv;
- struct page *page = ubuf->pages[page_num];
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg) {
+ ubuf->sg = get_sg_table(dev, buf, direction);
+ if (IS_ERR(ubuf->sg))
+ return PTR_ERR(ubuf->sg);
+ } else {
+ dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
+ direction);
+ }
- return kmap(page);
+ return 0;
}
-static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
- void *vaddr)
+static int end_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
{
- kunmap(vaddr);
+ struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg)
+ return -EINVAL;
+
+ dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ return 0;
}
static const struct dma_buf_ops udmabuf_ops = {
- .map_dma_buf = map_udmabuf,
- .unmap_dma_buf = unmap_udmabuf,
- .release = release_udmabuf,
- .map = kmap_udmabuf,
- .unmap = kunmap_udmabuf,
- .mmap = mmap_udmabuf,
+ .cache_sgt_mapping = true,
+ .map_dma_buf = map_udmabuf,
+ .unmap_dma_buf = unmap_udmabuf,
+ .release = release_udmabuf,
+ .mmap = mmap_udmabuf,
+ .begin_cpu_access = begin_cpu_udmabuf,
+ .end_cpu_access = end_cpu_udmabuf,
};
#define SEALS_WANTED (F_SEAL_SHRINK)
#define SEALS_DENIED (F_SEAL_WRITE)
-static long udmabuf_create(const struct udmabuf_create_list *head,
- const struct udmabuf_create_item *list)
+static long udmabuf_create(struct miscdevice *device,
+ struct udmabuf_create_list *head,
+ struct udmabuf_create_item *list)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct file *memfd = NULL;
@@ -187,6 +224,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
exp_info.priv = ubuf;
exp_info.flags = O_RDWR;
+ ubuf->device = device;
buf = dma_buf_export(&exp_info);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
@@ -224,7 +262,7 @@ static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
list.offset = create.offset;
list.size = create.size;
- return udmabuf_create(&head, &list);
+ return udmabuf_create(filp->private_data, &head, &list);
}
static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
@@ -243,7 +281,7 @@ static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
if (IS_ERR(list))
return PTR_ERR(list);
- ret = udmabuf_create(&head, list);
+ ret = udmabuf_create(filp->private_data, &head, list);
kfree(list);
return ret;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6fa1eba9d477..5142da401db3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -239,6 +239,14 @@ config FSL_RAID
the capability to offload memcpy, xor and pq computation
for raid5/6.
+config HISI_DMA
+ tristate "HiSilicon DMA Engine support"
+ depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support HiSilicon Kunpeng DMA engine.
+
config IMG_MDC_DMA
tristate "IMG MDC support"
depends on MIPS || COMPILE_TEST
@@ -273,6 +281,19 @@ config INTEL_IDMA64
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
+config INTEL_IDXD
+ tristate "Intel Data Accelerators support"
+ depends on PCI && X86_64
+ select DMA_ENGINE
+ select SBITMAP
+ help
+ Enable support for the Intel(R) data accelerators present
+ in Intel Xeon CPU.
+
+ Say Y if you have such a platform.
+
+ If unsure, say N.
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
@@ -497,6 +518,15 @@ config PXA_DMA
16 to 32 channels for peripheral to memory or memory to memory
transfers.
+config PLX_DMA
+ tristate "PLX ExpressLane PEX Switch DMA Engine Support"
+ depends on PCI
+ select DMA_ENGINE
+ help
+ Some PLX ExpressLane PCI Switches support additional DMA engines.
+ These are exposed via extra functions on the switch's
+ upstream port. Each function exposes one DMA channel.
+
config SIRF_DMA
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
depends on ARCH_SIRF
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 42d7e2fc64fa..1d908394fbea 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HISI_DMA) += hisi_dma.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IDXD) += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
@@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PLX_DMA) += plx_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 832aefbe7af9..539e785039ca 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -772,10 +772,10 @@ static int request_and_map(struct platform_device *pdev, const char *name,
return -EBUSY;
}
- *ptr = devm_ioremap_nocache(device, region->start,
+ *ptr = devm_ioremap(device, region->start,
resource_size(region));
if (*ptr == NULL) {
- dev_err(device, "ioremap_nocache of %s failed!", name);
+ dev_err(device, "ioremap of %s failed!", name);
return -ENOMEM;
}
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e4c593f48575..4768ef26013b 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
/* stop DMA activity */
if (c->desc) {
- if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
- vchan_terminate_vdesc(&c->desc->vd);
- else
- vchan_vdesc_fini(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c);
}
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index a0ee404b736e..f1d149e32839 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
struct dma_device *dma_dev;
struct axi_dmac *dmac;
struct resource *res;
+ struct regmap *regmap;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dmac);
- devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
+ regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
+ &axi_dmac_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_irq;
+ }
return 0;
+err_free_irq:
+ free_irq(dmac->irq, dmac);
err_unregister_of:
of_dma_controller_free(pdev->dev.of_node);
err_unregister_device:
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 44af435628f8..448f663da89c 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1021,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
.flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
};
+static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
+ .nb_channels = 32,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
+ { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 03ac4b96117c..f3ef4edd4de1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -60,6 +60,8 @@ static long dmaengine_ref_count;
/* --- sysfs implementation --- */
+#define DMA_SLAVE_NAME "slave"
+
/**
* dev_to_dma_chan - convert a device pointer to its sysfs container object
* @dev - device node
@@ -164,11 +166,152 @@ static struct class dma_devclass = {
/* --- client and device registration --- */
-#define dma_device_satisfies_mask(device, mask) \
- __dma_device_satisfies_mask((device), &(mask))
-static int
-__dma_device_satisfies_mask(struct dma_device *device,
- const dma_cap_mask_t *want)
+/**
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+ struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
+ */
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
+{
+ enum dma_transaction_type cap;
+ int err = 0;
+
+ bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+ /* 'interrupt', 'private', and 'slave' are channel capabilities,
+ * but are not associated with an operation so they do not need
+ * an entry in the channel_table
+ */
+ clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+ clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+ clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+ for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+ channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+ if (!channel_table[cap]) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (err) {
+ pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ free_percpu(channel_table[cap]);
+ }
+
+ return err;
+}
+arch_initcall(dma_channel_table_init);
+
+/**
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as
+ * the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == NUMA_NO_NODE ||
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as
+ * the cpu
+ * @cap: capability to match
+ * @cpu: cpu index which the channel should be close to
+ *
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
+{
+ struct dma_device *device;
+ struct dma_chan *chan;
+ struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (!dma_has_cap(cap, device->cap_mask) ||
+ dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (!chan->client_count)
+ continue;
+ if (!min || chan->table_count < min->table_count)
+ min = chan;
+
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
+ }
+ }
+
+ chan = localmin ? localmin : min;
+
+ if (chan)
+ chan->table_count++;
+
+ return chan;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case. Must be called under
+ * dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+ struct dma_chan *chan;
+ struct dma_device *device;
+ int cpu;
+ int cap;
+
+ /* undo the last distribution */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ chan->table_count = 0;
+ }
+
+ /* don't populate the channel_table if no clients are available */
+ if (!dmaengine_ref_count)
+ return;
+
+ /* redistribute available channels */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_online_cpu(cpu) {
+ chan = min_chan(cap, cpu);
+ per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+ }
+}
+
+static int dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
{
dma_cap_mask_t has;
@@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
static struct module *dma_chan_to_owner(struct dma_chan *chan)
{
- return chan->device->dev->driver->owner;
+ return chan->device->owner;
}
/**
@@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan)
}
}
+static void dma_device_release(struct kref *ref)
+{
+ struct dma_device *device = container_of(ref, struct dma_device, ref);
+
+ list_del_rcu(&device->global_node);
+ dma_channel_rebalance();
+
+ if (device->device_release)
+ device->device_release(device);
+}
+
+static void dma_device_put(struct dma_device *device)
+{
+ lockdep_assert_held(&dma_list_mutex);
+ kref_put(&device->ref, dma_device_release);
+}
+
/**
* dma_chan_get - try to grab a dma channel's parent driver module
* @chan - channel to grab
@@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan)
if (!try_module_get(owner))
return -ENODEV;
+ ret = kref_get_unless_zero(&chan->device->ref);
+ if (!ret) {
+ ret = -ENODEV;
+ goto module_put_out;
+ }
+
/* allocate upon first client reference */
if (chan->device->device_alloc_chan_resources) {
ret = chan->device->device_alloc_chan_resources(chan);
@@ -233,6 +399,8 @@ out:
return 0;
err_out:
+ dma_device_put(chan->device);
+module_put_out:
module_put(owner);
return ret;
}
@@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan)
return;
chan->client_count--;
- module_put(dma_chan_to_owner(chan));
/* This channel is not in use anymore, free it */
if (!chan->client_count && chan->device->device_free_chan_resources) {
@@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan)
chan->router = NULL;
chan->route_data = NULL;
}
+
+ dma_device_put(chan->device);
+ module_put(dma_chan_to_owner(chan));
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -289,57 +459,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
EXPORT_SYMBOL(dma_sync_wait);
/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
- */
-struct dma_chan_tbl_ent {
- struct dma_chan *chan;
-};
-
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
-static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
-
-static int __init dma_channel_table_init(void)
-{
- enum dma_transaction_type cap;
- int err = 0;
-
- bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
- /* 'interrupt', 'private', and 'slave' are channel capabilities,
- * but are not associated with an operation so they do not need
- * an entry in the channel_table
- */
- clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
- clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
- clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
-
- for_each_dma_cap_mask(cap, dma_cap_mask_all) {
- channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
- if (!channel_table[cap]) {
- err = -ENOMEM;
- break;
- }
- }
-
- if (err) {
- pr_err("initialization failure\n");
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- free_percpu(channel_table[cap]);
- }
-
- return err;
-}
-arch_initcall(dma_channel_table_init);
-
-/**
* dma_find_channel - find a channel to carry out the operation
* @tx_type: transaction type
*/
@@ -369,97 +488,6 @@ void dma_issue_pending_all(void)
}
EXPORT_SYMBOL(dma_issue_pending_all);
-/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
- */
-static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
-{
- int node = dev_to_node(chan->device->dev);
- return node == NUMA_NO_NODE ||
- cpumask_test_cpu(cpu, cpumask_of_node(node));
-}
-
-/**
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
- *
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
- * reference count is taken into account.
- * Must be called under dma_list_mutex.
- */
-static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
-{
- struct dma_device *device;
- struct dma_chan *chan;
- struct dma_chan *min = NULL;
- struct dma_chan *localmin = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (!dma_has_cap(cap, device->cap_mask) ||
- dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node) {
- if (!chan->client_count)
- continue;
- if (!min || chan->table_count < min->table_count)
- min = chan;
-
- if (dma_chan_is_local(chan, cpu))
- if (!localmin ||
- chan->table_count < localmin->table_count)
- localmin = chan;
- }
- }
-
- chan = localmin ? localmin : min;
-
- if (chan)
- chan->table_count++;
-
- return chan;
-}
-
-/**
- * dma_channel_rebalance - redistribute the available channels
- *
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case, and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case. Must be called under
- * dma_list_mutex.
- */
-static void dma_channel_rebalance(void)
-{
- struct dma_chan *chan;
- struct dma_device *device;
- int cpu;
- int cap;
-
- /* undo the last distribution */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_possible_cpu(cpu)
- per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node)
- chan->table_count = 0;
- }
-
- /* don't populate the channel_table if no clients are available */
- if (!dmaengine_ref_count)
- return;
-
- /* redistribute available channels */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_online_cpu(cpu) {
- chan = min_chan(cap, cpu);
- per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
- }
-}
-
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
{
struct dma_device *device;
@@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
{
struct dma_chan *chan;
- if (mask && !__dma_device_satisfies_mask(dev, mask)) {
+ if (mask && !dma_device_satisfies_mask(dev, mask)) {
dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
return NULL;
}
@@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
if (has_acpi_companion(dev) && !chan)
chan = acpi_dma_request_slave_chan_by_name(dev, name);
- if (chan) {
- /* Valid channel found or requester needs to be deferred */
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
- return chan;
- }
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
/* Try to find the channel via the DMA filter map(s) */
mutex_lock(&dma_list_mutex);
@@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
}
mutex_unlock(&dma_list_mutex);
- return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
+
+ return ERR_PTR(-EPROBE_DEFER);
+
+found:
+ chan->slave = dev;
+ chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
+ if (!chan->name)
+ return ERR_PTR(-ENOMEM);
+
+ if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
+ DMA_SLAVE_NAME))
+ dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+ if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
+ dev_err(dev, "Cannot create DMA %s symlink\n", chan->name);
+ return chan;
}
EXPORT_SYMBOL_GPL(dma_request_chan);
@@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan)
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+ if (chan->slave) {
+ sysfs_remove_link(&chan->slave->kobj, chan->name);
+ kfree(chan->name);
+ chan->name = NULL;
+ chan->slave = NULL;
+ }
+ sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get);
*/
void dmaengine_put(void)
{
- struct dma_device *device;
+ struct dma_device *device, *_d;
struct dma_chan *chan;
mutex_lock(&dma_list_mutex);
dmaengine_ref_count--;
BUG_ON(dmaengine_ref_count < 0);
/* drop channel references */
- list_for_each_entry(device, &dma_device_list, global_node) {
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
continue;
list_for_each_entry(chan, &device->channels, device_node)
@@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device)
return 0;
}
+static int __dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan,
+ int chan_id)
+{
+ int rc = 0;
+ int chancnt = device->chancnt;
+ atomic_t *idr_ref;
+ struct dma_chan *tchan;
+
+ tchan = list_first_entry_or_null(&device->channels,
+ struct dma_chan, device_node);
+ if (tchan->dev) {
+ idr_ref = tchan->dev->idr_ref;
+ } else {
+ idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+ if (!idr_ref)
+ return -ENOMEM;
+ atomic_set(idr_ref, 0);
+ }
+
+ chan->local = alloc_percpu(typeof(*chan->local));
+ if (!chan->local)
+ goto err_out;
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ if (!chan->dev) {
+ free_percpu(chan->local);
+ chan->local = NULL;
+ goto err_out;
+ }
+
+ /*
+ * When the chan_id is a negative value, we are dynamically adding
+ * the channel. Otherwise we are static enumerating.
+ */
+ chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+ chan->dev->device.class = &dma_devclass;
+ chan->dev->device.parent = device->dev;
+ chan->dev->chan = chan;
+ chan->dev->idr_ref = idr_ref;
+ chan->dev->dev_id = device->dev_id;
+ atomic_inc(idr_ref);
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
+ device->dev_id, chan->chan_id);
+
+ rc = device_register(&chan->dev->device);
+ if (rc)
+ goto err_out;
+ chan->client_count = 0;
+ device->chancnt = chan->chan_id + 1;
+
+ return 0;
+
+ err_out:
+ free_percpu(chan->local);
+ kfree(chan->dev);
+ if (atomic_dec_return(idr_ref) == 0)
+ kfree(idr_ref);
+ return rc;
+}
+
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ int rc;
+
+ rc = __dma_async_device_channel_register(device, chan, -1);
+ if (rc < 0)
+ return rc;
+
+ dma_channel_rebalance();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+
+static void __dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ WARN_ONCE(!device->device_release && chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+ mutex_lock(&dma_list_mutex);
+ list_del(&chan->device_node);
+ device->chancnt--;
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
+}
+
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ __dma_async_device_channel_unregister(device, chan);
+ dma_channel_rebalance();
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
+
/**
* dma_async_device_register - registers DMA devices found
* @device: &dma_device
+ *
+ * After calling this routine the structure should not be freed except in the
+ * device_release() callback which will be called after
+ * dma_async_device_unregister() is called and no further references are taken.
*/
int dma_async_device_register(struct dma_device *device)
{
- int chancnt = 0, rc;
+ int rc, i = 0;
struct dma_chan* chan;
- atomic_t *idr_ref;
if (!device)
return -ENODEV;
@@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ device->owner = device->dev->driver->owner;
+
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
@@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ if (!device->device_release)
+ dev_warn(device->dev,
+ "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
+
+ kref_init(&device->ref);
+
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
- idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
- if (!idr_ref)
- return -ENOMEM;
rc = get_dma_id(device);
- if (rc != 0) {
- kfree(idr_ref);
+ if (rc != 0)
return rc;
- }
-
- atomic_set(idr_ref, 0);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
- rc = -ENOMEM;
- chan->local = alloc_percpu(typeof(*chan->local));
- if (chan->local == NULL)
- goto err_out;
- chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
- if (chan->dev == NULL) {
- free_percpu(chan->local);
- chan->local = NULL;
+ rc = __dma_async_device_channel_register(device, chan, i++);
+ if (rc < 0)
goto err_out;
- }
-
- chan->chan_id = chancnt++;
- chan->dev->device.class = &dma_devclass;
- chan->dev->device.parent = device->dev;
- chan->dev->chan = chan;
- chan->dev->idr_ref = idr_ref;
- chan->dev->dev_id = device->dev_id;
- atomic_inc(idr_ref);
- dev_set_name(&chan->dev->device, "dma%dchan%d",
- device->dev_id, chan->chan_id);
-
- rc = device_register(&chan->dev->device);
- if (rc) {
- free_percpu(chan->local);
- chan->local = NULL;
- kfree(chan->dev);
- atomic_dec(idr_ref);
- goto err_out;
- }
- chan->client_count = 0;
- }
-
- if (!chancnt) {
- dev_err(device->dev, "%s: device has no channels!\n", __func__);
- rc = -ENODEV;
- goto err_out;
}
- device->chancnt = chancnt;
-
mutex_lock(&dma_list_mutex);
/* take references on public channels */
if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device)
err_out:
/* if we never registered a channel just release the idr */
- if (atomic_read(idr_ref) == 0) {
+ if (!device->chancnt) {
ida_free(&dma_ida, device->dev_id);
- kfree(idr_ref);
return rc;
}
@@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register);
*/
void dma_async_device_unregister(struct dma_device *device)
{
- struct dma_chan *chan;
+ struct dma_chan *chan, *n;
+
+ list_for_each_entry_safe(chan, n, &device->channels, device_node)
+ __dma_async_device_channel_unregister(device, chan);
mutex_lock(&dma_list_mutex);
- list_del_rcu(&device->global_node);
+ /*
+ * setting DMA_PRIVATE ensures the device being torn down will not
+ * be used in the channel_table
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance();
+ dma_device_put(device);
mutex_unlock(&dma_list_mutex);
-
- list_for_each_entry(chan, &device->channels, device_node) {
- WARN_ONCE(chan->client_count,
- "%s called while %d clients hold a reference\n",
- __func__, chan->client_count);
- mutex_lock(&dma_list_mutex);
- chan->dev->chan = NULL;
- mutex_unlock(&dma_list_mutex);
- device_unregister(&chan->dev->device);
- free_percpu(chan->local);
- }
}
EXPORT_SYMBOL(dma_async_device_unregister);
@@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
+static inline int desc_check_and_set_metadata_mode(
+ struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
+{
+ /* Make sure that the metadata mode is not mixed */
+ if (!desc->desc_metadata_mode) {
+ if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
+ desc->desc_metadata_mode = mode;
+ else
+ return -ENOTSUPP;
+ } else if (desc->desc_metadata_mode != mode) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->attach)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->attach(desc, data, len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
+
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ int ret;
+
+ if (!desc)
+ return ERR_PTR(-EINVAL);
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
+ return ERR_PTR(-ENOTSUPP);
+
+ return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
+
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->set_len)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->set_len(desc, payload_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
+
/* dma_wait_for_async_tx - spin wait for a transaction to complete
* @tx: in-flight transaction to wait on
*/
@@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void)
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
-
-
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 501c0b063f85..e8a320c9e57c 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
state->last = complete;
state->used = used;
state->residue = 0;
+ state->in_flight_bytes = 0;
}
return dma_async_is_complete(cookie, complete, used);
}
@@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue;
}
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+ u32 in_flight_bytes)
+{
+ if (state)
+ state->in_flight_bytes = in_flight_bytes;
+}
+
struct dmaengine_desc_callback {
dma_async_tx_callback callback;
dma_async_tx_callback_result callback_result;
@@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
return (cb->callback) ? true : false;
}
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
#endif
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a1ce307c502f..14c1ac26f866 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
vchan_get_all_descriptors(&chan->vc, &head);
- /*
- * As vchan_dma_desc_free_list can access to desc_allocated list
- * we need to call it in vc.lock context.
- */
- vchan_dma_desc_free_list(&chan->vc, &head);
-
spin_unlock_irqrestore(&chan->vc.lock, flags);
+ vchan_dma_desc_free_list(&chan->vc, &head);
+
dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
return 0;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b1a7ca91701a..5697c3622699 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
unsigned int chans_per_mux, ch_off;
+ int endian_diff[4] = {3, 1, -1, -3};
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+ if (fsl_chan->edma->drvdata->mux_swap)
+ ch_off += endian_diff[ch_off % 4];
+
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 5eaa2902ed39..67e422590c9a 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -147,6 +147,7 @@ struct fsl_edma_drvdata {
enum edma_version version;
u32 dmamuxs;
bool has_dmaclk;
+ bool mux_swap;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index b626c06ac2e0..eff7ebd8cf35 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = {
.setup_irq = fsl_edma_irq_init,
};
+static struct fsl_edma_drvdata ls1028a_data = {
+ .version = v1,
+ .dmamuxs = DMAMUX_NR,
+ .mux_swap = true,
+ .setup_irq = fsl_edma_irq_init,
+};
+
static struct fsl_edma_drvdata imx7ulp_data = {
.version = v3,
.dmamuxs = 1,
@@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = {
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
+ { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ /* sentinel */ }
};
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 89792083d62c..95cc0256b387 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
+ if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
return;
list_for_each_entry_safe(comp_temp, _comp_temp,
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
new file mode 100644
index 000000000000..ed3619266a48
--- /dev/null
+++ b/drivers/dma/hisi_dma.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2019 HiSilicon Limited. */
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "virt-dma.h"
+
+#define HISI_DMA_SQ_BASE_L 0x0
+#define HISI_DMA_SQ_BASE_H 0x4
+#define HISI_DMA_SQ_DEPTH 0x8
+#define HISI_DMA_SQ_TAIL_PTR 0xc
+#define HISI_DMA_CQ_BASE_L 0x10
+#define HISI_DMA_CQ_BASE_H 0x14
+#define HISI_DMA_CQ_DEPTH 0x18
+#define HISI_DMA_CQ_HEAD_PTR 0x1c
+#define HISI_DMA_CTRL0 0x20
+#define HISI_DMA_CTRL0_QUEUE_EN_S 0
+#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4
+#define HISI_DMA_CTRL1 0x24
+#define HISI_DMA_CTRL1_QUEUE_RESET_S 0
+#define HISI_DMA_Q_FSM_STS 0x30
+#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0)
+#define HISI_DMA_INT_STS 0x40
+#define HISI_DMA_INT_STS_MASK GENMASK(12, 0)
+#define HISI_DMA_INT_MSK 0x44
+#define HISI_DMA_MODE 0x217c
+#define HISI_DMA_OFFSET 0x100
+
+#define HISI_DMA_MSI_NUM 30
+#define HISI_DMA_CHAN_NUM 30
+#define HISI_DMA_Q_DEPTH_VAL 1024
+
+#define PCI_BAR_2 2
+
+enum hisi_dma_mode {
+ EP = 0,
+ RC,
+};
+
+enum hisi_dma_chan_status {
+ DISABLE = -1,
+ IDLE = 0,
+ RUN,
+ CPL,
+ PAUSE,
+ HALT,
+ ABORT,
+ WAIT,
+ BUFFCLR,
+};
+
+struct hisi_dma_sqe {
+ __le32 dw0;
+#define OPCODE_MASK GENMASK(3, 0)
+#define OPCODE_SMALL_PACKAGE 0x1
+#define OPCODE_M2M 0x4
+#define LOCAL_IRQ_EN BIT(8)
+#define ATTR_SRC_MASK GENMASK(14, 12)
+ __le32 dw1;
+ __le32 dw2;
+#define ATTR_DST_MASK GENMASK(26, 24)
+ __le32 length;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct hisi_dma_cqe {
+ __le32 rsv0;
+ __le32 rsv1;
+ __le16 sq_head;
+ __le16 rsv2;
+ __le16 rsv3;
+ __le16 w0;
+#define STATUS_MASK GENMASK(15, 1)
+#define STATUS_SUCC 0x0
+#define VALID_BIT BIT(0)
+};
+
+struct hisi_dma_desc {
+ struct virt_dma_desc vd;
+ struct hisi_dma_sqe sqe;
+};
+
+struct hisi_dma_chan {
+ struct virt_dma_chan vc;
+ struct hisi_dma_dev *hdma_dev;
+ struct hisi_dma_sqe *sq;
+ struct hisi_dma_cqe *cq;
+ dma_addr_t sq_dma;
+ dma_addr_t cq_dma;
+ u32 sq_tail;
+ u32 cq_head;
+ u32 qp_num;
+ enum hisi_dma_chan_status status;
+ struct hisi_dma_desc *desc;
+};
+
+struct hisi_dma_dev {
+ struct pci_dev *pdev;
+ void __iomem *base;
+ struct dma_device dma_dev;
+ u32 chan_num;
+ u32 chan_depth;
+ struct hisi_dma_chan chan[];
+};
+
+static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct hisi_dma_chan, vc.chan);
+}
+
+static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct hisi_dma_desc, vd);
+}
+
+static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
+ u32 val)
+{
+ writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
+}
+
+static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(addr);
+ tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
+ writel_relaxed(tmp, addr);
+}
+
+static void hisi_dma_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+ bool pause)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
+}
+
+static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+ bool enable)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
+}
+
+static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
+ HISI_DMA_INT_STS_MASK);
+}
+
+static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ void __iomem *base = hdma_dev->base;
+
+ hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
+ HISI_DMA_INT_STS_MASK);
+ hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
+}
+
+static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
+}
+
+static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+{
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ u32 index = chan->qp_num, tmp;
+ int ret;
+
+ hisi_dma_pause_dma(hdma_dev, index, true);
+ hisi_dma_enable_dma(hdma_dev, index, false);
+ hisi_dma_mask_irq(hdma_dev, index);
+
+ ret = readl_relaxed_poll_timeout(hdma_dev->base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+ FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
+ WARN_ON(1);
+ }
+
+ hisi_dma_do_reset(hdma_dev, index);
+ hisi_dma_reset_qp_point(hdma_dev, index);
+ hisi_dma_pause_dma(hdma_dev, index, false);
+ hisi_dma_enable_dma(hdma_dev, index, true);
+ hisi_dma_unmask_irq(hdma_dev, index);
+
+ ret = readl_relaxed_poll_timeout(hdma_dev->base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+ FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
+ WARN_ON(1);
+ }
+}
+
+static void hisi_dma_free_chan_resources(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+
+ hisi_dma_reset_hw_chan(chan);
+ vchan_free_chan_resources(&chan->vc);
+
+ memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
+ memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
+ chan->sq_tail = 0;
+ chan->cq_head = 0;
+ chan->status = DISABLE;
+}
+
+static void hisi_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_hisi_dma_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *
+hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ struct hisi_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->sqe.length = cpu_to_le32(len);
+ desc->sqe.src_addr = cpu_to_le64(src);
+ desc->sqe.dst_addr = cpu_to_le64(dst);
+
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+}
+
+static enum dma_status
+hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(c, cookie, txstate);
+}
+
+static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
+{
+ struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ struct hisi_dma_desc *desc;
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
+ chan->desc = NULL;
+ return;
+ }
+ list_del(&vd->node);
+ desc = to_hisi_dma_desc(vd);
+ chan->desc = desc;
+
+ memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
+
+ /* update other field in sqe */
+ sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
+ sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
+
+ /* make sure data has been updated in sqe */
+ wmb();
+
+ /* update sq tail, point to new sqe position */
+ chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
+
+ /* update sq_tail to trigger a new task */
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
+ chan->sq_tail);
+}
+
+static void hisi_dma_issue_pending(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (vchan_issue_pending(&chan->vc))
+ hisi_dma_start_transfer(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static int hisi_dma_terminate_all(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vd);
+ chan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&chan->vc, &head);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
+
+ return 0;
+}
+
+static void hisi_dma_synchronize(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
+{
+ size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
+ size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
+ struct device *dev = &hdma_dev->pdev->dev;
+ struct hisi_dma_chan *chan;
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ chan = &hdma_dev->chan[i];
+ chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
+ GFP_KERNEL);
+ if (!chan->sq)
+ return -ENOMEM;
+
+ chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
+ GFP_KERNEL);
+ if (!chan->cq)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ struct hisi_dma_chan *chan = &hdma_dev->chan[index];
+ u32 hw_depth = hdma_dev->chan_depth - 1;
+ void __iomem *base = hdma_dev->base;
+
+ /* set sq, cq base */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
+ lower_32_bits(chan->sq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
+ upper_32_bits(chan->sq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
+ lower_32_bits(chan->cq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
+ upper_32_bits(chan->cq_dma));
+
+ /* set sq, cq depth */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
+ hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
+
+ /* init sq tail and cq head */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_init_hw_qp(hdma_dev, qp_index);
+ hisi_dma_unmask_irq(hdma_dev, qp_index);
+ hisi_dma_enable_dma(hdma_dev, qp_index, true);
+}
+
+static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+}
+
+static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
+{
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ hdma_dev->chan[i].qp_num = i;
+ hdma_dev->chan[i].hdma_dev = hdma_dev;
+ hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
+ vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
+ hisi_dma_enable_qp(hdma_dev, i);
+ }
+}
+
+static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
+{
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ hisi_dma_disable_qp(hdma_dev, i);
+ tasklet_kill(&hdma_dev->chan[i].vc.task);
+ }
+}
+
+static irqreturn_t hisi_dma_irq(int irq, void *data)
+{
+ struct hisi_dma_chan *chan = data;
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ struct hisi_dma_desc *desc;
+ struct hisi_dma_cqe *cqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ desc = chan->desc;
+ cqe = chan->cq + chan->cq_head;
+ if (desc) {
+ if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
+ chan->cq_head = (chan->cq_head + 1) %
+ hdma_dev->chan_depth;
+ hisi_dma_chan_write(hdma_dev->base,
+ HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
+ chan->cq_head);
+ vchan_cookie_complete(&desc->vd);
+ } else {
+ dev_err(&hdma_dev->pdev->dev, "task error!\n");
+ }
+
+ chan->desc = NULL;
+ }
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
+{
+ struct pci_dev *pdev = hdma_dev->pdev;
+ int i, ret;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ hisi_dma_irq, IRQF_SHARED, "hisi_dma",
+ &hdma_dev->chan[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* This function enables all hw channels in a device */
+static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
+{
+ int ret;
+
+ ret = hisi_dma_alloc_qps_mem(hdma_dev);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
+ return ret;
+ }
+
+ ret = hisi_dma_request_qps_irq(hdma_dev);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
+ return ret;
+ }
+
+ hisi_dma_enable_qps(hdma_dev);
+
+ return 0;
+}
+
+static void hisi_dma_disable_hw_channels(void *data)
+{
+ hisi_dma_disable_qps(data);
+}
+
+static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
+ enum hisi_dma_mode mode)
+{
+ writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
+}
+
+static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_dma_dev *hdma_dev;
+ struct dma_device *dma_dev;
+ size_t dev_size;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "failed to enable device mem!\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
+ if (ret) {
+ dev_err(dev, "failed to remap I/O region!\n");
+ return ret;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM +
+ sizeof(*hdma_dev);
+ hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL);
+ if (!hdma_dev)
+ return -EINVAL;
+
+ hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
+ hdma_dev->pdev = pdev;
+ hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
+ hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
+
+ pci_set_drvdata(pdev, hdma_dev);
+ pci_set_master(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
+ PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to allocate MSI vectors!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
+ if (ret)
+ return ret;
+
+ dma_dev = &hdma_dev->dma_dev;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
+ dma_dev->device_tx_status = hisi_dma_tx_status;
+ dma_dev->device_issue_pending = hisi_dma_issue_pending;
+ dma_dev->device_terminate_all = hisi_dma_terminate_all;
+ dma_dev->device_synchronize = hisi_dma_synchronize;
+ dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+ dma_dev->dev = dev;
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ hisi_dma_set_mode(hdma_dev, RC);
+
+ ret = hisi_dma_enable_hw_channels(hdma_dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable hw channel!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
+ hdma_dev);
+ if (ret)
+ return ret;
+
+ ret = dmaenginem_async_device_register(dma_dev);
+ if (ret < 0)
+ dev_err(dev, "failed to register device!\n");
+
+ return ret;
+}
+
+static const struct pci_device_id hisi_dma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
+ { 0, }
+};
+
+static struct pci_driver hisi_dma_pci_driver = {
+ .name = "hisi_dma",
+ .id_table = hisi_dma_pci_tbl,
+ .probe = hisi_dma_probe,
+};
+
+module_pci_driver(hisi_dma_pci_driver);
+
+MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
+MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
new file mode 100644
index 000000000000..8978b898d777
--- /dev/null
+++ b/drivers/dma/idxd/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IDXD) += idxd.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
new file mode 100644
index 000000000000..1d7347825b95
--- /dev/null
+++ b/drivers/dma/idxd/cdev.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/sched/task.h>
+#include <linux/intel-svm.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+struct idxd_cdev_context {
+ const char *name;
+ dev_t devt;
+ struct ida minor_ida;
+};
+
+/*
+ * ictx is an array based off of accelerator types. enum idxd_type
+ * is used as index
+ */
+static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
+ { .name = "dsa" },
+};
+
+struct idxd_user_context {
+ struct idxd_wq *wq;
+ struct task_struct *task;
+ unsigned int flags;
+};
+
+enum idxd_cdev_cleanup {
+ CDEV_NORMAL = 0,
+ CDEV_FAILED,
+};
+
+static void idxd_cdev_dev_release(struct device *dev)
+{
+ dev_dbg(dev, "releasing cdev device\n");
+ kfree(dev);
+}
+
+static struct device_type idxd_cdev_device_type = {
+ .name = "idxd_cdev",
+ .release = idxd_cdev_dev_release,
+};
+
+static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
+{
+ struct cdev *cdev = inode->i_cdev;
+
+ return container_of(cdev, struct idxd_cdev, cdev);
+}
+
+static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
+{
+ return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
+}
+
+static inline struct idxd_wq *inode_wq(struct inode *inode)
+{
+ return idxd_cdev_wq(inode_idxd_cdev(inode));
+}
+
+static int idxd_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct idxd_user_context *ctx;
+ struct idxd_device *idxd;
+ struct idxd_wq *wq;
+ struct device *dev;
+ struct idxd_cdev *idxd_cdev;
+
+ wq = inode_wq(inode);
+ idxd = wq->idxd;
+ dev = &idxd->pdev->dev;
+ idxd_cdev = &wq->idxd_cdev;
+
+ dev_dbg(dev, "%s called\n", __func__);
+
+ if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
+ return -EBUSY;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->wq = wq;
+ filp->private_data = ctx;
+ idxd_wq_get(wq);
+ return 0;
+}
+
+static int idxd_cdev_release(struct inode *node, struct file *filep)
+{
+ struct idxd_user_context *ctx = filep->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ dev_dbg(dev, "%s called\n", __func__);
+ filep->private_data = NULL;
+
+ kfree(ctx);
+ idxd_wq_put(wq);
+ return 0;
+}
+
+static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
+ const char *func)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ dev_info_ratelimited(dev,
+ "%s: %s: mapping too large: %lu\n",
+ current->comm, func,
+ vma->vm_end - vma->vm_start);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct idxd_user_context *ctx = filp->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
+ unsigned long pfn;
+ int rc;
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ rc = check_vma(wq, vma, __func__);
+
+ vma->vm_flags |= VM_DONTCOPY;
+ pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
+ IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_private_data = ctx;
+
+ return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+}
+
+static __poll_t idxd_cdev_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct idxd_user_context *ctx = filp->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ unsigned long flags;
+ __poll_t out = 0;
+
+ poll_wait(filp, &idxd_cdev->err_queue, wait);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ if (idxd->sw_err.valid)
+ out = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ return out;
+}
+
+static const struct file_operations idxd_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = idxd_cdev_open,
+ .release = idxd_cdev_release,
+ .mmap = idxd_cdev_mmap,
+ .poll = idxd_cdev_poll,
+};
+
+int idxd_cdev_get_major(struct idxd_device *idxd)
+{
+ return MAJOR(ictx[idxd->type].devt);
+}
+
+static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+ struct device *dev;
+ int minor, rc;
+
+ idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
+ if (!idxd_cdev->dev)
+ return -ENOMEM;
+
+ dev = idxd_cdev->dev;
+ dev->parent = &idxd->pdev->dev;
+ dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+ idxd->id, wq->id);
+ dev->bus = idxd_get_bus_type(idxd);
+
+ cdev_ctx = &ictx[wq->idxd->type];
+ minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ if (minor < 0) {
+ rc = minor;
+ goto ida_err;
+ }
+
+ dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+ dev->type = &idxd_cdev_device_type;
+ rc = device_register(dev);
+ if (rc < 0) {
+ dev_err(&idxd->pdev->dev, "device register failed\n");
+ put_device(dev);
+ goto dev_reg_err;
+ }
+ idxd_cdev->minor = minor;
+
+ return 0;
+
+ dev_reg_err:
+ ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ ida_err:
+ kfree(dev);
+ idxd_cdev->dev = NULL;
+ return rc;
+}
+
+static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
+ enum idxd_cdev_cleanup cdev_state)
+{
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+
+ cdev_ctx = &ictx[wq->idxd->type];
+ if (cdev_state == CDEV_NORMAL)
+ cdev_del(&idxd_cdev->cdev);
+ device_unregister(idxd_cdev->dev);
+ /*
+ * The device_type->release() will be called on the device and free
+ * the allocated struct device. We can just forget it.
+ */
+ ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ idxd_cdev->dev = NULL;
+ idxd_cdev->minor = -1;
+}
+
+int idxd_wq_add_cdev(struct idxd_wq *wq)
+{
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct cdev *cdev = &idxd_cdev->cdev;
+ struct device *dev;
+ int rc;
+
+ rc = idxd_wq_cdev_dev_setup(wq);
+ if (rc < 0)
+ return rc;
+
+ dev = idxd_cdev->dev;
+ cdev_init(cdev, &idxd_cdev_fops);
+ cdev_set_parent(cdev, &dev->kobj);
+ rc = cdev_add(cdev, dev->devt, 1);
+ if (rc) {
+ dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
+ idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
+ return rc;
+ }
+
+ init_waitqueue_head(&idxd_cdev->err_queue);
+ return 0;
+}
+
+void idxd_wq_del_cdev(struct idxd_wq *wq)
+{
+ idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
+}
+
+int idxd_cdev_register(void)
+{
+ int rc, i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ ida_init(&ictx[i].minor_ida);
+ rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
+ ictx[i].name);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void idxd_cdev_remove(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ unregister_chrdev_region(ictx[i].devt, MINORMASK);
+ ida_destroy(&ictx[i].minor_ida);
+ }
+}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
new file mode 100644
index 000000000000..ada69e722f84
--- /dev/null
+++ b/drivers/dma/idxd/device.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
+
+/* Interrupt control bits */
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ union msix_perm perm;
+ u32 offset;
+
+ if (vec_id < 0 || vec_id >= msixcnt)
+ return -EINVAL;
+
+ offset = idxd->msix_perm_offset + vec_id * 8;
+ perm.bits = ioread32(idxd->reg_base + offset);
+ perm.ignore = 1;
+ iowrite32(perm.bits, idxd->reg_base + offset);
+
+ return 0;
+}
+
+void idxd_mask_msix_vectors(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ int i, rc;
+
+ for (i = 0; i < msixcnt; i++) {
+ rc = idxd_mask_msix_vector(idxd, i);
+ if (rc < 0)
+ dev_warn(&pdev->dev,
+ "Failed disabling msix vec %d\n", i);
+ }
+}
+
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ union msix_perm perm;
+ u32 offset;
+
+ if (vec_id < 0 || vec_id >= msixcnt)
+ return -EINVAL;
+
+ offset = idxd->msix_perm_offset + vec_id * 8;
+ perm.bits = ioread32(idxd->reg_base + offset);
+ perm.ignore = 0;
+ iowrite32(perm.bits, idxd->reg_base + offset);
+
+ return 0;
+}
+
+void idxd_unmask_error_interrupts(struct idxd_device *idxd)
+{
+ union genctrl_reg genctrl;
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.softerr_int_en = 1;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+void idxd_mask_error_interrupts(struct idxd_device *idxd)
+{
+ union genctrl_reg genctrl;
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.softerr_int_en = 0;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+static void free_hw_descs(struct idxd_wq *wq)
+{
+ int i;
+
+ for (i = 0; i < wq->num_descs; i++)
+ kfree(wq->hw_descs[i]);
+
+ kfree(wq->hw_descs);
+}
+
+static int alloc_hw_descs(struct idxd_wq *wq, int num)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int i;
+ int node = dev_to_node(dev);
+
+ wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
+ GFP_KERNEL, node);
+ if (!wq->hw_descs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
+ GFP_KERNEL, node);
+ if (!wq->hw_descs[i]) {
+ free_hw_descs(wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void free_descs(struct idxd_wq *wq)
+{
+ int i;
+
+ for (i = 0; i < wq->num_descs; i++)
+ kfree(wq->descs[i]);
+
+ kfree(wq->descs);
+}
+
+static int alloc_descs(struct idxd_wq *wq, int num)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int i;
+ int node = dev_to_node(dev);
+
+ wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
+ GFP_KERNEL, node);
+ if (!wq->descs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
+ GFP_KERNEL, node);
+ if (!wq->descs[i]) {
+ free_descs(wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/* WQ control bits */
+int idxd_wq_alloc_resources(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_group *group = wq->group;
+ struct device *dev = &idxd->pdev->dev;
+ int rc, num_descs, i;
+
+ if (wq->type != IDXD_WQT_KERNEL)
+ return 0;
+
+ num_descs = wq->size +
+ idxd->hw.gen_cap.max_descs_per_engine * group->num_engines;
+ wq->num_descs = num_descs;
+
+ rc = alloc_hw_descs(wq, num_descs);
+ if (rc < 0)
+ return rc;
+
+ wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
+ wq->compls = dma_alloc_coherent(dev, wq->compls_size,
+ &wq->compls_addr, GFP_KERNEL);
+ if (!wq->compls) {
+ rc = -ENOMEM;
+ goto fail_alloc_compls;
+ }
+
+ rc = alloc_descs(wq, num_descs);
+ if (rc < 0)
+ goto fail_alloc_descs;
+
+ rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL,
+ dev_to_node(dev));
+ if (rc < 0)
+ goto fail_sbitmap_init;
+
+ for (i = 0; i < num_descs; i++) {
+ struct idxd_desc *desc = wq->descs[i];
+
+ desc->hw = wq->hw_descs[i];
+ desc->completion = &wq->compls[i];
+ desc->compl_dma = wq->compls_addr +
+ sizeof(struct dsa_completion_record) * i;
+ desc->id = i;
+ desc->wq = wq;
+
+ dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
+ desc->txd.tx_submit = idxd_dma_tx_submit;
+ }
+
+ return 0;
+
+ fail_sbitmap_init:
+ free_descs(wq);
+ fail_alloc_descs:
+ dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ fail_alloc_compls:
+ free_hw_descs(wq);
+ return rc;
+}
+
+void idxd_wq_free_resources(struct idxd_wq *wq)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ if (wq->type != IDXD_WQT_KERNEL)
+ return;
+
+ free_hw_descs(wq);
+ free_descs(wq);
+ dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ sbitmap_free(&wq->sbmap);
+}
+
+int idxd_wq_enable(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 status;
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d already enabled\n", wq->id);
+ return -ENXIO;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
+ dev_dbg(dev, "WQ enable failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ wq->state = IDXD_WQ_ENABLED;
+ dev_dbg(dev, "WQ %d enabled\n", wq->id);
+ return 0;
+}
+
+int idxd_wq_disable(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 status, operand;
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ dev_dbg(dev, "Disabling WQ %d\n", wq->id);
+
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+ return 0;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "WQ disable failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ wq->state = IDXD_WQ_DISABLED;
+ dev_dbg(dev, "WQ %d disabled\n", wq->id);
+ return 0;
+}
+
+int idxd_wq_map_portal(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ resource_size_t start;
+
+ start = pci_resource_start(pdev, IDXD_WQ_BAR);
+ start = start + wq->id * IDXD_PORTAL_SIZE;
+
+ wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+ if (!wq->dportal)
+ return -ENOMEM;
+ dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
+
+ return 0;
+}
+
+void idxd_wq_unmap_portal(struct idxd_wq *wq)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ devm_iounmap(dev, wq->dportal);
+}
+
+/* Device control bits */
+static inline bool idxd_is_enabled(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+ if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
+ return true;
+ return false;
+}
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
+{
+ u32 sts, to = timeout;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ while (sts & IDXD_CMDSTS_ACTIVE && --to) {
+ cpu_relax();
+ sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ }
+
+ if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
+ dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
+ *status = 0;
+ return -EBUSY;
+ }
+
+ *status = sts;
+ return 0;
+}
+
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
+{
+ union idxd_command_reg cmd;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = cmd_code;
+ cmd.operand = operand;
+ dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
+ __func__, cmd_code, operand);
+ iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
+
+ return 0;
+}
+
+int idxd_device_enable(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ if (idxd_is_enabled(idxd)) {
+ dev_dbg(dev, "Device already enabled\n");
+ return -ENXIO;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ /* If the command is successful or if the device was enabled */
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
+ dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+ return -ENXIO;
+ }
+
+ idxd->state = IDXD_DEV_ENABLED;
+ return 0;
+}
+
+int idxd_device_disable(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ if (!idxd_is_enabled(idxd)) {
+ dev_dbg(dev, "Device is not enabled\n");
+ return 0;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ /* If the command is successful or if the device was disabled */
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
+ dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+ rc = -ENXIO;
+ return rc;
+ }
+
+ idxd->state = IDXD_DEV_CONF_READY;
+ return 0;
+}
+
+int __idxd_device_reset(struct idxd_device *idxd)
+{
+ u32 status;
+ int rc;
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int idxd_device_reset(struct idxd_device *idxd)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = __idxd_device_reset(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ return rc;
+}
+
+/* Device configuration bits */
+static void idxd_group_config_write(struct idxd_group *group)
+{
+ struct idxd_device *idxd = group->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+ u32 grpcfg_offset;
+
+ dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
+
+ /* setup GRPWQCFG */
+ for (i = 0; i < 4; i++) {
+ grpcfg_offset = idxd->grpcfg_offset +
+ group->id * 64 + i * sizeof(u64);
+ iowrite64(group->grpcfg.wqs[i],
+ idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+ group->id, i, grpcfg_offset,
+ ioread64(idxd->reg_base + grpcfg_offset));
+ }
+
+ /* setup GRPENGCFG */
+ grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
+ iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+ grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
+
+ /* setup GRPFLAGS */
+ grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
+ iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ group->id, grpcfg_offset,
+ ioread32(idxd->reg_base + grpcfg_offset));
+}
+
+static int idxd_groups_config_write(struct idxd_device *idxd)
+
+{
+ union gencfg_reg reg;
+ int i;
+ struct device *dev = &idxd->pdev->dev;
+
+ /* Setup bandwidth token limit */
+ if (idxd->token_limit) {
+ reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ reg.token_limit = idxd->token_limit;
+ iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+ }
+
+ dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
+ ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ idxd_group_config_write(group);
+ }
+
+ return 0;
+}
+
+static int idxd_wq_config_write(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 wq_offset;
+ int i;
+
+ if (!wq->group)
+ return 0;
+
+ memset(&wq->wqcfg, 0, sizeof(union wqcfg));
+
+ /* byte 0-3 */
+ wq->wqcfg.wq_size = wq->size;
+
+ if (wq->size == 0) {
+ dev_warn(dev, "Incorrect work queue size: 0\n");
+ return -EINVAL;
+ }
+
+ /* bytes 4-7 */
+ wq->wqcfg.wq_thresh = wq->threshold;
+
+ /* byte 8-11 */
+ wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
+ wq->wqcfg.mode = 1;
+
+ wq->wqcfg.priority = wq->priority;
+
+ /* bytes 12-15 */
+ wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+ wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+
+ dev_dbg(dev, "WQ %d CFGs\n", wq->id);
+ for (i = 0; i < 8; i++) {
+ wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+ iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
+ dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+ wq->id, i, wq_offset,
+ ioread32(idxd->reg_base + wq_offset));
+ }
+
+ return 0;
+}
+
+static int idxd_wqs_config_write(struct idxd_device *idxd)
+{
+ int i, rc;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ rc = idxd_wq_config_write(wq);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void idxd_group_flags_setup(struct idxd_device *idxd)
+{
+ int i;
+
+ /* TC-A 0 and TC-B 1 should be defaults */
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ if (group->tc_a == -1)
+ group->grpcfg.flags.tc_a = 0;
+ else
+ group->grpcfg.flags.tc_a = group->tc_a;
+ if (group->tc_b == -1)
+ group->grpcfg.flags.tc_b = 1;
+ else
+ group->grpcfg.flags.tc_b = group->tc_b;
+ group->grpcfg.flags.use_token_limit = group->use_token_limit;
+ group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
+ if (group->tokens_allowed)
+ group->grpcfg.flags.tokens_allowed =
+ group->tokens_allowed;
+ else
+ group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
+ }
+}
+
+static int idxd_engines_setup(struct idxd_device *idxd)
+{
+ int i, engines = 0;
+ struct idxd_engine *eng;
+ struct idxd_group *group;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = &idxd->groups[i];
+ group->grpcfg.engines = 0;
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ eng = &idxd->engines[i];
+ group = eng->group;
+
+ if (!group)
+ continue;
+
+ group->grpcfg.engines |= BIT(eng->id);
+ engines++;
+ }
+
+ if (!engines)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int idxd_wqs_setup(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ struct idxd_group *group;
+ int i, j, configured = 0;
+ struct device *dev = &idxd->pdev->dev;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = &idxd->groups[i];
+ for (j = 0; j < 4; j++)
+ group->grpcfg.wqs[j] = 0;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = &idxd->wqs[i];
+ group = wq->group;
+
+ if (!wq->group)
+ continue;
+ if (!wq->size)
+ continue;
+
+ if (!wq_dedicated(wq)) {
+ dev_warn(dev, "No shared workqueue support.\n");
+ return -EINVAL;
+ }
+
+ group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
+ configured++;
+ }
+
+ if (configured == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int idxd_device_config(struct idxd_device *idxd)
+{
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ rc = idxd_wqs_setup(idxd);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_engines_setup(idxd);
+ if (rc < 0)
+ return rc;
+
+ idxd_group_flags_setup(idxd);
+
+ rc = idxd_wqs_config_write(idxd);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_groups_config_write(idxd);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
new file mode 100644
index 000000000000..c64c1429d160
--- /dev/null
+++ b/drivers/dma/idxd/dma.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
+{
+ return container_of(c, struct idxd_wq, dma_chan);
+}
+
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct dmaengine_result res;
+ int complete = 1;
+
+ if (desc->completion->status == DSA_COMP_SUCCESS)
+ res.result = DMA_TRANS_NOERROR;
+ else if (desc->completion->status)
+ res.result = DMA_TRANS_WRITE_FAILED;
+ else if (comp_type == IDXD_COMPLETE_ABORT)
+ res.result = DMA_TRANS_ABORTED;
+ else
+ complete = 0;
+
+ tx = &desc->txd;
+ if (complete && tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
+}
+
+static void op_flag_setup(unsigned long flags, u32 *desc_flags)
+{
+ *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
+ if (flags & DMA_PREP_INTERRUPT)
+ *desc_flags |= IDXD_OP_FLAG_RCI;
+}
+
+static inline void set_completion_address(struct idxd_desc *desc,
+ u64 *compl_addr)
+{
+ *compl_addr = desc->compl_dma;
+}
+
+static inline void idxd_prep_desc_common(struct idxd_wq *wq,
+ struct dsa_hw_desc *hw, char opcode,
+ u64 addr_f1, u64 addr_f2, u64 len,
+ u64 compl, u32 flags)
+{
+ struct idxd_device *idxd = wq->idxd;
+
+ hw->flags = flags;
+ hw->opcode = opcode;
+ hw->src_addr = addr_f1;
+ hw->dst_addr = addr_f2;
+ hw->xfer_size = len;
+ hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ hw->completion_addr = compl;
+
+ /*
+ * Descriptor completion vectors are 1-8 for MSIX. We will round
+ * robin through the 8 vectors.
+ */
+ wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+ hw->int_handle = wq->vec_ptr;
+}
+
+static struct dma_async_tx_descriptor *
+idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct idxd_wq *wq = to_idxd_wq(c);
+ u32 desc_flags;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_desc *desc;
+
+ if (wq->state != IDXD_WQ_ENABLED)
+ return NULL;
+
+ if (len > idxd->max_xfer_bytes)
+ return NULL;
+
+ op_flag_setup(flags, &desc_flags);
+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(desc))
+ return NULL;
+
+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
+ dma_src, dma_dest, len, desc->compl_dma,
+ desc_flags);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+}
+
+static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct idxd_wq *wq = to_idxd_wq(chan);
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ idxd_wq_get(wq);
+ dev_dbg(dev, "%s: client_count: %d\n", __func__,
+ idxd_wq_refcount(wq));
+ return 0;
+}
+
+static void idxd_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct idxd_wq *wq = to_idxd_wq(chan);
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ idxd_wq_put(wq);
+ dev_dbg(dev, "%s: client_count: %d\n", __func__,
+ idxd_wq_refcount(wq));
+}
+
+static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dma_chan, cookie, txstate);
+}
+
+/*
+ * issue_pending() does not need to do anything since tx_submit() does the job
+ * already.
+ */
+static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
+{
+}
+
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *c = tx->chan;
+ struct idxd_wq *wq = to_idxd_wq(c);
+ dma_cookie_t cookie;
+ int rc;
+ struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
+
+ cookie = dma_cookie_assign(tx);
+
+ rc = idxd_submit_desc(wq, desc);
+ if (rc < 0) {
+ idxd_free_desc(wq, desc);
+ return rc;
+ }
+
+ return cookie;
+}
+
+static void idxd_dma_release(struct dma_device *device)
+{
+}
+
+int idxd_register_dma_device(struct idxd_device *idxd)
+{
+ struct dma_device *dma = &idxd->dma_dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma->dev = &idxd->pdev->dev;
+
+ dma->device_release = idxd_dma_release;
+
+ if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
+ }
+
+ dma->device_tx_status = idxd_dma_tx_status;
+ dma->device_issue_pending = idxd_dma_issue_pending;
+ dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+
+ return dma_async_device_register(&idxd->dma_dev);
+}
+
+void idxd_unregister_dma_device(struct idxd_device *idxd)
+{
+ dma_async_device_unregister(&idxd->dma_dev);
+}
+
+int idxd_register_dma_channel(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct dma_device *dma = &idxd->dma_dev;
+ struct dma_chan *chan = &wq->dma_chan;
+ int rc;
+
+ memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
+ chan->device = dma;
+ list_add_tail(&chan->device_node, &dma->channels);
+ rc = dma_async_device_channel_register(dma, chan);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+void idxd_unregister_dma_channel(struct idxd_wq *wq)
+{
+ dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
new file mode 100644
index 000000000000..b8f8a363b4a7
--- /dev/null
+++ b/drivers/dma/idxd/idxd.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_H_
+#define _IDXD_H_
+
+#include <linux/sbitmap.h>
+#include <linux/dmaengine.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include "registers.h"
+
+#define IDXD_DRIVER_VERSION "1.00"
+
+extern struct kmem_cache *idxd_desc_pool;
+
+#define IDXD_REG_TIMEOUT 50
+#define IDXD_DRAIN_TIMEOUT 5000
+
+enum idxd_type {
+ IDXD_TYPE_UNKNOWN = -1,
+ IDXD_TYPE_DSA = 0,
+ IDXD_TYPE_MAX
+};
+
+#define IDXD_NAME_SIZE 128
+
+struct idxd_device_driver {
+ struct device_driver drv;
+};
+
+struct idxd_irq_entry {
+ struct idxd_device *idxd;
+ int id;
+ struct llist_head pending_llist;
+ struct list_head work_list;
+};
+
+struct idxd_group {
+ struct device conf_dev;
+ struct idxd_device *idxd;
+ struct grpcfg grpcfg;
+ int id;
+ int num_engines;
+ int num_wqs;
+ bool use_token_limit;
+ u8 tokens_allowed;
+ u8 tokens_reserved;
+ int tc_a;
+ int tc_b;
+};
+
+#define IDXD_MAX_PRIORITY 0xf
+
+enum idxd_wq_state {
+ IDXD_WQ_DISABLED = 0,
+ IDXD_WQ_ENABLED,
+};
+
+enum idxd_wq_flag {
+ WQ_FLAG_DEDICATED = 0,
+};
+
+enum idxd_wq_type {
+ IDXD_WQT_NONE = 0,
+ IDXD_WQT_KERNEL,
+ IDXD_WQT_USER,
+};
+
+struct idxd_cdev {
+ struct cdev cdev;
+ struct device *dev;
+ int minor;
+ struct wait_queue_head err_queue;
+};
+
+#define IDXD_ALLOCATED_BATCH_SIZE 128U
+#define WQ_NAME_SIZE 1024
+#define WQ_TYPE_SIZE 10
+
+enum idxd_op_type {
+ IDXD_OP_BLOCK = 0,
+ IDXD_OP_NONBLOCK = 1,
+};
+
+enum idxd_complete_type {
+ IDXD_COMPLETE_NORMAL = 0,
+ IDXD_COMPLETE_ABORT,
+};
+
+struct idxd_wq {
+ void __iomem *dportal;
+ struct device conf_dev;
+ struct idxd_cdev idxd_cdev;
+ struct idxd_device *idxd;
+ int id;
+ enum idxd_wq_type type;
+ struct idxd_group *group;
+ int client_count;
+ struct mutex wq_lock; /* mutex for workqueue */
+ u32 size;
+ u32 threshold;
+ u32 priority;
+ enum idxd_wq_state state;
+ unsigned long flags;
+ union wqcfg wqcfg;
+ atomic_t dq_count; /* dedicated queue flow control */
+ u32 vec_ptr; /* interrupt steering */
+ struct dsa_hw_desc **hw_descs;
+ int num_descs;
+ struct dsa_completion_record *compls;
+ dma_addr_t compls_addr;
+ int compls_size;
+ struct idxd_desc **descs;
+ struct sbitmap sbmap;
+ struct dma_chan dma_chan;
+ struct percpu_rw_semaphore submit_lock;
+ wait_queue_head_t submit_waitq;
+ char name[WQ_NAME_SIZE + 1];
+};
+
+struct idxd_engine {
+ struct device conf_dev;
+ int id;
+ struct idxd_group *group;
+ struct idxd_device *idxd;
+};
+
+/* shadow registers */
+struct idxd_hw {
+ u32 version;
+ union gen_cap_reg gen_cap;
+ union wq_cap_reg wq_cap;
+ union group_cap_reg group_cap;
+ union engine_cap_reg engine_cap;
+ struct opcap opcap;
+};
+
+enum idxd_device_state {
+ IDXD_DEV_HALTED = -1,
+ IDXD_DEV_DISABLED = 0,
+ IDXD_DEV_CONF_READY,
+ IDXD_DEV_ENABLED,
+};
+
+enum idxd_device_flag {
+ IDXD_FLAG_CONFIGURABLE = 0,
+};
+
+struct idxd_device {
+ enum idxd_type type;
+ struct device conf_dev;
+ struct list_head list;
+ struct idxd_hw hw;
+ enum idxd_device_state state;
+ unsigned long flags;
+ int id;
+ int major;
+
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+
+ spinlock_t dev_lock; /* spinlock for device */
+ struct idxd_group *groups;
+ struct idxd_wq *wqs;
+ struct idxd_engine *engines;
+
+ int num_groups;
+
+ u32 msix_perm_offset;
+ u32 wqcfg_offset;
+ u32 grpcfg_offset;
+ u32 perfmon_offset;
+
+ u64 max_xfer_bytes;
+ u32 max_batch_size;
+ int max_groups;
+ int max_engines;
+ int max_tokens;
+ int max_wqs;
+ int max_wq_size;
+ int token_limit;
+ int nr_tokens; /* non-reserved tokens */
+
+ union sw_err_reg sw_err;
+
+ struct msix_entry *msix_entries;
+ int num_wq_irqs;
+ struct idxd_irq_entry *irq_entries;
+
+ struct dma_device dma_dev;
+};
+
+/* IDXD software descriptor */
+struct idxd_desc {
+ struct dsa_hw_desc *hw;
+ dma_addr_t desc_dma;
+ struct dsa_completion_record *completion;
+ dma_addr_t compl_dma;
+ struct dma_async_tx_descriptor txd;
+ struct llist_node llnode;
+ struct list_head list;
+ int id;
+ struct idxd_wq *wq;
+};
+
+#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
+#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+
+extern struct bus_type dsa_bus_type;
+
+static inline bool wq_dedicated(struct idxd_wq *wq)
+{
+ return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
+}
+
+enum idxd_portal_prot {
+ IDXD_PORTAL_UNLIMITED = 0,
+ IDXD_PORTAL_LIMITED,
+};
+
+static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
+{
+ return prot * 0x1000;
+}
+
+static inline int idxd_get_wq_portal_full_offset(int wq_id,
+ enum idxd_portal_prot prot)
+{
+ return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
+}
+
+static inline void idxd_set_type(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
+ idxd->type = IDXD_TYPE_DSA;
+ else
+ idxd->type = IDXD_TYPE_UNKNOWN;
+}
+
+static inline void idxd_wq_get(struct idxd_wq *wq)
+{
+ wq->client_count++;
+}
+
+static inline void idxd_wq_put(struct idxd_wq *wq)
+{
+ wq->client_count--;
+}
+
+static inline int idxd_wq_refcount(struct idxd_wq *wq)
+{
+ return wq->client_count;
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd);
+int idxd_register_bus_type(void);
+void idxd_unregister_bus_type(void);
+int idxd_setup_sysfs(struct idxd_device *idxd);
+void idxd_cleanup_sysfs(struct idxd_device *idxd);
+int idxd_register_driver(void);
+void idxd_unregister_driver(void);
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+
+/* device interrupt control */
+irqreturn_t idxd_irq_handler(int vec, void *data);
+irqreturn_t idxd_misc_thread(int vec, void *data);
+irqreturn_t idxd_wq_thread(int irq, void *data);
+void idxd_mask_error_interrupts(struct idxd_device *idxd);
+void idxd_unmask_error_interrupts(struct idxd_device *idxd);
+void idxd_mask_msix_vectors(struct idxd_device *idxd);
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
+
+/* device control */
+int idxd_device_enable(struct idxd_device *idxd);
+int idxd_device_disable(struct idxd_device *idxd);
+int idxd_device_reset(struct idxd_device *idxd);
+int __idxd_device_reset(struct idxd_device *idxd);
+void idxd_device_cleanup(struct idxd_device *idxd);
+int idxd_device_config(struct idxd_device *idxd);
+void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+
+/* work queue control */
+int idxd_wq_alloc_resources(struct idxd_wq *wq);
+void idxd_wq_free_resources(struct idxd_wq *wq);
+int idxd_wq_enable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_map_portal(struct idxd_wq *wq);
+void idxd_wq_unmap_portal(struct idxd_wq *wq);
+
+/* submission */
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+
+/* dmaengine */
+int idxd_register_dma_device(struct idxd_device *idxd);
+void idxd_unregister_dma_device(struct idxd_device *idxd);
+int idxd_register_dma_channel(struct idxd_wq *wq);
+void idxd_unregister_dma_channel(struct idxd_wq *wq);
+void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type);
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+
+/* cdev */
+int idxd_cdev_register(void);
+void idxd_cdev_remove(void);
+int idxd_cdev_get_major(struct idxd_device *idxd);
+int idxd_wq_add_cdev(struct idxd_wq *wq);
+void idxd_wq_del_cdev(struct idxd_wq *wq);
+
+#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
new file mode 100644
index 000000000000..7778c05deb5d
--- /dev/null
+++ b/drivers/dma/idxd/init.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/aer.h>
+#include <linux/fs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <uapi/linux/idxd.h>
+#include <linux/dmaengine.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+MODULE_VERSION(IDXD_DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+
+#define DRV_NAME "idxd"
+
+static struct idr idxd_idrs[IDXD_TYPE_MAX];
+static struct mutex idxd_idr_lock;
+
+static struct pci_device_id idxd_pci_tbl[] = {
+ /* DSA ver 1.0 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
+
+static char *idxd_name[] = {
+ "dsa",
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd)
+{
+ return idxd_name[idxd->type];
+}
+
+static int idxd_setup_interrupts(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ struct idxd_irq_entry *irq_entry;
+ int i, msixcnt;
+ int rc = 0;
+
+ msixcnt = pci_msix_vec_count(pdev);
+ if (msixcnt < 0) {
+ dev_err(dev, "Not MSI-X interrupt capable.\n");
+ goto err_no_irq;
+ }
+
+ idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
+ msixcnt, GFP_KERNEL);
+ if (!idxd->msix_entries) {
+ rc = -ENOMEM;
+ goto err_no_irq;
+ }
+
+ for (i = 0; i < msixcnt; i++)
+ idxd->msix_entries[i].entry = i;
+
+ rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
+ if (rc) {
+ dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
+ goto err_no_irq;
+ }
+ dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
+
+ /*
+ * We implement 1 completion list per MSI-X entry except for
+ * entry 0, which is for errors and others.
+ */
+ idxd->irq_entries = devm_kcalloc(dev, msixcnt,
+ sizeof(struct idxd_irq_entry),
+ GFP_KERNEL);
+ if (!idxd->irq_entries) {
+ rc = -ENOMEM;
+ goto err_no_irq;
+ }
+
+ for (i = 0; i < msixcnt; i++) {
+ idxd->irq_entries[i].id = i;
+ idxd->irq_entries[i].idxd = idxd;
+ }
+
+ msix = &idxd->msix_entries[0];
+ irq_entry = &idxd->irq_entries[0];
+ rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
+ idxd_misc_thread, 0, "idxd-misc",
+ irq_entry);
+ if (rc < 0) {
+ dev_err(dev, "Failed to allocate misc interrupt.\n");
+ goto err_no_irq;
+ }
+
+ dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
+ msix->vector);
+
+ /* first MSI-X entry is not for wq interrupts */
+ idxd->num_wq_irqs = msixcnt - 1;
+
+ for (i = 1; i < msixcnt; i++) {
+ msix = &idxd->msix_entries[i];
+ irq_entry = &idxd->irq_entries[i];
+
+ init_llist_head(&idxd->irq_entries[i].pending_llist);
+ INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
+ rc = devm_request_threaded_irq(dev, msix->vector,
+ idxd_irq_handler,
+ idxd_wq_thread, 0,
+ "idxd-portal", irq_entry);
+ if (rc < 0) {
+ dev_err(dev, "Failed to allocate irq %d.\n",
+ msix->vector);
+ goto err_no_irq;
+ }
+ dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
+ i, msix->vector);
+ }
+
+ idxd_unmask_error_interrupts(idxd);
+
+ return 0;
+
+ err_no_irq:
+ /* Disable error interrupt generation */
+ idxd_mask_error_interrupts(idxd);
+ pci_disable_msix(pdev);
+ dev_err(dev, "No usable interrupts\n");
+ return rc;
+}
+
+static void idxd_wqs_free_lock(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ percpu_free_rwsem(&wq->submit_lock);
+ }
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ idxd->groups = devm_kcalloc(dev, idxd->max_groups,
+ sizeof(struct idxd_group), GFP_KERNEL);
+ if (!idxd->groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ idxd->groups[i].idxd = idxd;
+ idxd->groups[i].id = i;
+ idxd->groups[i].tc_a = -1;
+ idxd->groups[i].tc_b = -1;
+ }
+
+ idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
+ GFP_KERNEL);
+ if (!idxd->wqs)
+ return -ENOMEM;
+
+ idxd->engines = devm_kcalloc(dev, idxd->max_engines,
+ sizeof(struct idxd_engine), GFP_KERNEL);
+ if (!idxd->engines)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+ int rc;
+
+ wq->id = i;
+ wq->idxd = idxd;
+ mutex_init(&wq->wq_lock);
+ atomic_set(&wq->dq_count, 0);
+ init_waitqueue_head(&wq->submit_waitq);
+ wq->idxd_cdev.minor = -1;
+ rc = percpu_init_rwsem(&wq->submit_lock);
+ if (rc < 0) {
+ idxd_wqs_free_lock(idxd);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ idxd->engines[i].idxd = idxd;
+ idxd->engines[i].id = i;
+ }
+
+ return 0;
+}
+
+static void idxd_read_table_offsets(struct idxd_device *idxd)
+{
+ union offsets_reg offsets;
+ struct device *dev = &idxd->pdev->dev;
+
+ offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
+ offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
+ + sizeof(u64));
+ idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+ dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
+ idxd->wqcfg_offset = offsets.wqcfg * 0x100;
+ dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
+ idxd->wqcfg_offset);
+ idxd->msix_perm_offset = offsets.msix_perm * 0x100;
+ dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
+ idxd->msix_perm_offset);
+ idxd->perfmon_offset = offsets.perfmon * 0x100;
+ dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
+}
+
+static void idxd_read_caps(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ /* reading generic capabilities */
+ idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
+ dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
+ idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
+ dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
+ idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
+ dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
+ if (idxd->hw.gen_cap.config_en)
+ set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
+
+ /* reading group capabilities */
+ idxd->hw.group_cap.bits =
+ ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
+ dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
+ idxd->max_groups = idxd->hw.group_cap.num_groups;
+ dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
+ idxd->max_tokens = idxd->hw.group_cap.total_tokens;
+ dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
+ idxd->nr_tokens = idxd->max_tokens;
+
+ /* read engine capabilities */
+ idxd->hw.engine_cap.bits =
+ ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
+ dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
+ idxd->max_engines = idxd->hw.engine_cap.num_engines;
+ dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
+
+ /* read workqueue capabilities */
+ idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
+ dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
+ idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
+ dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
+ idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
+ dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
+
+ /* reading operation capabilities */
+ for (i = 0; i < 4; i++) {
+ idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
+ IDXD_OPCAP_OFFSET + i * sizeof(u64));
+ dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
+ }
+}
+
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
+ void __iomem * const *iomap)
+{
+ struct device *dev = &pdev->dev;
+ struct idxd_device *idxd;
+
+ idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+ if (!idxd)
+ return NULL;
+
+ idxd->pdev = pdev;
+ idxd->reg_base = iomap[IDXD_MMIO_BAR];
+ spin_lock_init(&idxd->dev_lock);
+
+ return idxd;
+}
+
+static int idxd_probe(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ dev_dbg(dev, "%s entered and resetting device\n", __func__);
+ rc = idxd_device_reset(idxd);
+ if (rc < 0)
+ return rc;
+ dev_dbg(dev, "IDXD reset complete\n");
+
+ idxd_read_caps(idxd);
+ idxd_read_table_offsets(idxd);
+
+ rc = idxd_setup_internals(idxd);
+ if (rc)
+ goto err_setup;
+
+ rc = idxd_setup_interrupts(idxd);
+ if (rc)
+ goto err_setup;
+
+ dev_dbg(dev, "IDXD interrupt setup complete.\n");
+
+ mutex_lock(&idxd_idr_lock);
+ idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
+ mutex_unlock(&idxd_idr_lock);
+ if (idxd->id < 0) {
+ rc = -ENOMEM;
+ goto err_idr_fail;
+ }
+
+ idxd->major = idxd_cdev_get_major(idxd);
+
+ dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
+ return 0;
+
+ err_idr_fail:
+ idxd_mask_error_interrupts(idxd);
+ idxd_mask_msix_vectors(idxd);
+ err_setup:
+ return rc;
+}
+
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct idxd_device *idxd;
+ int rc;
+ unsigned int mask;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ dev_dbg(dev, "Mapping BARs\n");
+ mask = (1 << IDXD_MMIO_BAR);
+ rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
+ if (rc)
+ return rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Set DMA masks\n");
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev, iomap);
+ if (!idxd)
+ return -ENOMEM;
+
+ idxd_set_type(idxd);
+
+ dev_dbg(dev, "Set PCI master\n");
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, idxd);
+
+ idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+ rc = idxd_probe(idxd);
+ if (rc) {
+ dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+ return -ENODEV;
+ }
+
+ rc = idxd_setup_sysfs(idxd);
+ if (rc) {
+ dev_err(dev, "IDXD sysfs setup failed\n");
+ return -ENODEV;
+ }
+
+ idxd->state = IDXD_DEV_CONF_READY;
+
+ dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+ idxd->hw.version);
+
+ return 0;
+}
+
+static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *itr;
+ struct llist_node *head;
+
+ head = llist_del_all(&ie->pending_llist);
+ if (!head)
+ return;
+
+ llist_for_each_entry_safe(desc, itr, head, llnode) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+ idxd_free_desc(desc->wq, desc);
+ }
+}
+
+static void idxd_flush_work_list(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *iter;
+
+ list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
+ list_del(&desc->list);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+ idxd_free_desc(desc->wq, desc);
+ }
+}
+
+static void idxd_shutdown(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ int rc, i;
+ struct idxd_irq_entry *irq_entry;
+ int msixcnt = pci_msix_vec_count(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_disable(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ if (rc)
+ dev_err(&pdev->dev, "Disabling device failed\n");
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ idxd_mask_msix_vectors(idxd);
+ idxd_mask_error_interrupts(idxd);
+
+ for (i = 0; i < msixcnt; i++) {
+ irq_entry = &idxd->irq_entries[i];
+ synchronize_irq(idxd->msix_entries[i].vector);
+ if (i == 0)
+ continue;
+ idxd_flush_pending_llist(irq_entry);
+ idxd_flush_work_list(irq_entry);
+ }
+}
+
+static void idxd_remove(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ idxd_cleanup_sysfs(idxd);
+ idxd_shutdown(pdev);
+ idxd_wqs_free_lock(idxd);
+ mutex_lock(&idxd_idr_lock);
+ idr_remove(&idxd_idrs[idxd->type], idxd->id);
+ mutex_unlock(&idxd_idr_lock);
+}
+
+static struct pci_driver idxd_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = idxd_pci_tbl,
+ .probe = idxd_pci_probe,
+ .remove = idxd_remove,
+ .shutdown = idxd_shutdown,
+};
+
+static int __init idxd_init_module(void)
+{
+ int err, i;
+
+ /*
+ * If the CPU does not support write512, there's no point in
+ * enumerating the device. We can not utilize it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+ pr_warn("idxd driver failed to load without MOVDIR64B.\n");
+ return -ENODEV;
+ }
+
+ pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
+ DRV_NAME, IDXD_DRIVER_VERSION);
+
+ mutex_init(&idxd_idr_lock);
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ idr_init(&idxd_idrs[i]);
+
+ err = idxd_register_bus_type();
+ if (err < 0)
+ return err;
+
+ err = idxd_register_driver();
+ if (err < 0)
+ goto err_idxd_driver_register;
+
+ err = idxd_cdev_register();
+ if (err)
+ goto err_cdev_register;
+
+ err = pci_register_driver(&idxd_pci_driver);
+ if (err)
+ goto err_pci_register;
+
+ return 0;
+
+err_pci_register:
+ idxd_cdev_remove();
+err_cdev_register:
+ idxd_unregister_driver();
+err_idxd_driver_register:
+ idxd_unregister_bus_type();
+ return err;
+}
+module_init(idxd_init_module);
+
+static void __exit idxd_exit_module(void)
+{
+ pci_unregister_driver(&idxd_pci_driver);
+ idxd_cdev_remove();
+ idxd_unregister_bus_type();
+}
+module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
new file mode 100644
index 000000000000..d6fcd2e60103
--- /dev/null
+++ b/drivers/dma/idxd/irq.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq->state = IDXD_WQ_DISABLED;
+ }
+}
+
+static int idxd_restart(struct idxd_device *idxd)
+{
+ int i, rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ rc = __idxd_device_reset(idxd);
+ if (rc < 0)
+ goto out;
+
+ rc = idxd_device_config(idxd);
+ if (rc < 0)
+ goto out;
+
+ rc = idxd_device_enable(idxd);
+ if (rc < 0)
+ goto out;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ dev_warn(&idxd->pdev->dev,
+ "Unable to re-enable wq %s\n",
+ dev_name(&wq->conf_dev));
+ }
+ }
+ }
+
+ return 0;
+
+ out:
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_HALTED;
+ return rc;
+}
+
+irqreturn_t idxd_irq_handler(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+
+ idxd_mask_msix_vector(idxd, irq_entry->id);
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ union gensts_reg gensts;
+ u32 cause, val = 0;
+ int i, rc;
+ bool err = false;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+ if (cause & IDXD_INTC_ERR) {
+ spin_lock_bh(&idxd->dev_lock);
+ for (i = 0; i < 4; i++)
+ idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
+ IDXD_SWERR_OFFSET + i * sizeof(u64));
+ iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+ if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+ int id = idxd->sw_err.wq_idx;
+ struct idxd_wq *wq = &idxd->wqs[id];
+
+ if (wq->type == IDXD_WQT_USER)
+ wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ } else {
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->type == IDXD_WQT_USER)
+ wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ }
+ }
+
+ spin_unlock_bh(&idxd->dev_lock);
+ val |= IDXD_INTC_ERR;
+
+ for (i = 0; i < 4; i++)
+ dev_warn(dev, "err[%d]: %#16.16llx\n",
+ i, idxd->sw_err.bits[i]);
+ err = true;
+ }
+
+ if (cause & IDXD_INTC_CMD) {
+ /* Driver does use command interrupts */
+ val |= IDXD_INTC_CMD;
+ }
+
+ if (cause & IDXD_INTC_OCCUPY) {
+ /* Driver does not utilize occupancy interrupt */
+ val |= IDXD_INTC_OCCUPY;
+ }
+
+ if (cause & IDXD_INTC_PERFMON_OVFL) {
+ /*
+ * Driver does not utilize perfmon counter overflow interrupt
+ * yet.
+ */
+ val |= IDXD_INTC_PERFMON_OVFL;
+ }
+
+ val ^= cause;
+ if (val)
+ dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
+ val);
+
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (!err)
+ return IRQ_HANDLED;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+ if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+ spin_lock_bh(&idxd->dev_lock);
+ if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+ rc = idxd_restart(idxd);
+ if (rc < 0)
+ dev_err(&idxd->pdev->dev,
+ "idxd restart failed, device halt.");
+ } else {
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_HALTED;
+ dev_err(&idxd->pdev->dev,
+ "idxd halted, need %s.\n",
+ gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
+ "FLR" : "system reset");
+ }
+ spin_unlock_bh(&idxd->dev_lock);
+ }
+
+ idxd_unmask_msix_vector(idxd, irq_entry->id);
+ return IRQ_HANDLED;
+}
+
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+ int *processed)
+{
+ struct idxd_desc *desc, *t;
+ struct llist_node *head;
+ int queued = 0;
+
+ head = llist_del_all(&irq_entry->pending_llist);
+ if (!head)
+ return 0;
+
+ llist_for_each_entry_safe(desc, t, head, llnode) {
+ if (desc->completion->status) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ idxd_free_desc(desc->wq, desc);
+ (*processed)++;
+ } else {
+ list_add_tail(&desc->list, &irq_entry->work_list);
+ queued++;
+ }
+ }
+
+ return queued;
+}
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+ int *processed)
+{
+ struct list_head *node, *next;
+ int queued = 0;
+
+ if (list_empty(&irq_entry->work_list))
+ return 0;
+
+ list_for_each_safe(node, next, &irq_entry->work_list) {
+ struct idxd_desc *desc =
+ container_of(node, struct idxd_desc, list);
+
+ if (desc->completion->status) {
+ list_del(&desc->list);
+ /* process and callback */
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ idxd_free_desc(desc->wq, desc);
+ (*processed)++;
+ } else {
+ queued++;
+ }
+ }
+
+ return queued;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ int rc, processed = 0, retry = 0;
+
+ /*
+ * There are two lists we are processing. The pending_llist is where
+ * submmiter adds all the submitted descriptor after sending it to
+ * the workqueue. It's a lockless singly linked list. The work_list
+ * is the common linux double linked list. We are in a scenario of
+ * multiple producers and a single consumer. The producers are all
+ * the kernel submitters of descriptors, and the consumer is the
+ * kernel irq handler thread for the msix vector when using threaded
+ * irq. To work with the restrictions of llist to remain lockless,
+ * we are doing the following steps:
+ * 1. Iterate through the work_list and process any completed
+ * descriptor. Delete the completed entries during iteration.
+ * 2. llist_del_all() from the pending list.
+ * 3. Iterate through the llist that was deleted from the pending list
+ * and process the completed entries.
+ * 4. If the entry is still waiting on hardware, list_add_tail() to
+ * the work_list.
+ * 5. Repeat until no more descriptors.
+ */
+ do {
+ rc = irq_process_work_list(irq_entry, &processed);
+ if (rc != 0) {
+ retry++;
+ continue;
+ }
+
+ rc = irq_process_pending_llist(irq_entry, &processed);
+ } while (rc != 0 && retry != 10);
+
+ idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+
+ if (processed == 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
new file mode 100644
index 000000000000..a39e7ae6b3d9
--- /dev/null
+++ b/drivers/dma/idxd/registers.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_REGISTERS_H_
+#define _IDXD_REGISTERS_H_
+
+/* PCI Config */
+#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+
+#define IDXD_MMIO_BAR 0
+#define IDXD_WQ_BAR 2
+#define IDXD_PORTAL_SIZE 0x4000
+
+/* MMIO Device BAR0 Registers */
+#define IDXD_VER_OFFSET 0x00
+#define IDXD_VER_MAJOR_MASK 0xf0
+#define IDXD_VER_MINOR_MASK 0x0f
+#define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4)
+#define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK)
+
+union gen_cap_reg {
+ struct {
+ u64 block_on_fault:1;
+ u64 overlap_copy:1;
+ u64 cache_control_mem:1;
+ u64 cache_control_cache:1;
+ u64 rsvd:3;
+ u64 int_handle_req:1;
+ u64 dest_readback:1;
+ u64 drain_readback:1;
+ u64 rsvd2:6;
+ u64 max_xfer_shift:5;
+ u64 max_batch_shift:4;
+ u64 max_ims_mult:6;
+ u64 config_en:1;
+ u64 max_descs_per_engine:8;
+ u64 rsvd3:24;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_GENCAP_OFFSET 0x10
+
+union wq_cap_reg {
+ struct {
+ u64 total_wq_size:16;
+ u64 num_wqs:8;
+ u64 rsvd:24;
+ u64 shared_mode:1;
+ u64 dedicated_mode:1;
+ u64 rsvd2:1;
+ u64 priority:1;
+ u64 occupancy:1;
+ u64 occupancy_int:1;
+ u64 rsvd3:10;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_WQCAP_OFFSET 0x20
+
+union group_cap_reg {
+ struct {
+ u64 num_groups:8;
+ u64 total_tokens:8;
+ u64 token_en:1;
+ u64 token_limit:1;
+ u64 rsvd:46;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_GRPCAP_OFFSET 0x30
+
+union engine_cap_reg {
+ struct {
+ u64 num_engines:8;
+ u64 rsvd:56;
+ };
+ u64 bits;
+} __packed;
+
+#define IDXD_ENGCAP_OFFSET 0x38
+
+#define IDXD_OPCAP_NOOP 0x0001
+#define IDXD_OPCAP_BATCH 0x0002
+#define IDXD_OPCAP_MEMMOVE 0x0008
+struct opcap {
+ u64 bits[4];
+};
+
+#define IDXD_OPCAP_OFFSET 0x40
+
+#define IDXD_TABLE_OFFSET 0x60
+union offsets_reg {
+ struct {
+ u64 grpcfg:16;
+ u64 wqcfg:16;
+ u64 msix_perm:16;
+ u64 ims:16;
+ u64 perfmon:16;
+ u64 rsvd:48;
+ };
+ u64 bits[2];
+} __packed;
+
+#define IDXD_GENCFG_OFFSET 0x80
+union gencfg_reg {
+ struct {
+ u32 token_limit:8;
+ u32 rsvd:4;
+ u32 user_int_en:1;
+ u32 rsvd2:19;
+ };
+ u32 bits;
+} __packed;
+
+#define IDXD_GENCTRL_OFFSET 0x88
+union genctrl_reg {
+ struct {
+ u32 softerr_int_en:1;
+ u32 rsvd:31;
+ };
+ u32 bits;
+} __packed;
+
+#define IDXD_GENSTATS_OFFSET 0x90
+union gensts_reg {
+ struct {
+ u32 state:2;
+ u32 reset_type:2;
+ u32 rsvd:28;
+ };
+ u32 bits;
+} __packed;
+
+enum idxd_device_status_state {
+ IDXD_DEVICE_STATE_DISABLED = 0,
+ IDXD_DEVICE_STATE_ENABLED,
+ IDXD_DEVICE_STATE_DRAIN,
+ IDXD_DEVICE_STATE_HALT,
+};
+
+enum idxd_device_reset_type {
+ IDXD_DEVICE_RESET_SOFTWARE = 0,
+ IDXD_DEVICE_RESET_FLR,
+ IDXD_DEVICE_RESET_WARM,
+ IDXD_DEVICE_RESET_COLD,
+};
+
+#define IDXD_INTCAUSE_OFFSET 0x98
+#define IDXD_INTC_ERR 0x01
+#define IDXD_INTC_CMD 0x02
+#define IDXD_INTC_OCCUPY 0x04
+#define IDXD_INTC_PERFMON_OVFL 0x08
+
+#define IDXD_CMD_OFFSET 0xa0
+union idxd_command_reg {
+ struct {
+ u32 operand:20;
+ u32 cmd:5;
+ u32 rsvd:6;
+ u32 int_req:1;
+ };
+ u32 bits;
+} __packed;
+
+enum idxd_cmd {
+ IDXD_CMD_ENABLE_DEVICE = 1,
+ IDXD_CMD_DISABLE_DEVICE,
+ IDXD_CMD_DRAIN_ALL,
+ IDXD_CMD_ABORT_ALL,
+ IDXD_CMD_RESET_DEVICE,
+ IDXD_CMD_ENABLE_WQ,
+ IDXD_CMD_DISABLE_WQ,
+ IDXD_CMD_DRAIN_WQ,
+ IDXD_CMD_ABORT_WQ,
+ IDXD_CMD_RESET_WQ,
+ IDXD_CMD_DRAIN_PASID,
+ IDXD_CMD_ABORT_PASID,
+ IDXD_CMD_REQUEST_INT_HANDLE,
+};
+
+#define IDXD_CMDSTS_OFFSET 0xa8
+union cmdsts_reg {
+ struct {
+ u8 err;
+ u16 result;
+ u8 rsvd:7;
+ u8 active:1;
+ };
+ u32 bits;
+} __packed;
+#define IDXD_CMDSTS_ACTIVE 0x80000000
+
+enum idxd_cmdsts_err {
+ IDXD_CMDSTS_SUCCESS = 0,
+ IDXD_CMDSTS_INVAL_CMD,
+ IDXD_CMDSTS_INVAL_WQIDX,
+ IDXD_CMDSTS_HW_ERR,
+ /* enable device errors */
+ IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
+ IDXD_CMDSTS_ERR_CONFIG,
+ IDXD_CMDSTS_ERR_BUSMASTER_EN,
+ IDXD_CMDSTS_ERR_PASID_INVAL,
+ IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
+ IDXD_CMDSTS_ERR_GRP_CONFIG,
+ IDXD_CMDSTS_ERR_GRP_CONFIG2,
+ IDXD_CMDSTS_ERR_GRP_CONFIG3,
+ IDXD_CMDSTS_ERR_GRP_CONFIG4,
+ /* enable wq errors */
+ IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
+ IDXD_CMDSTS_ERR_WQ_ENABLED,
+ IDXD_CMDSTS_ERR_WQ_SIZE,
+ IDXD_CMDSTS_ERR_WQ_PRIOR,
+ IDXD_CMDSTS_ERR_WQ_MODE,
+ IDXD_CMDSTS_ERR_BOF_EN,
+ IDXD_CMDSTS_ERR_PASID_EN,
+ IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
+ IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
+ /* disable device errors */
+ IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
+ /* disable WQ, drain WQ, abort WQ, reset WQ */
+ IDXD_CMDSTS_ERR_DEV_NOT_EN,
+ /* request interrupt handle */
+ IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
+ IDXD_CMDSTS_ERR_NO_HANDLE,
+};
+
+#define IDXD_SWERR_OFFSET 0xc0
+#define IDXD_SWERR_VALID 0x00000001
+#define IDXD_SWERR_OVERFLOW 0x00000002
+#define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
+union sw_err_reg {
+ struct {
+ u64 valid:1;
+ u64 overflow:1;
+ u64 desc_valid:1;
+ u64 wq_idx_valid:1;
+ u64 batch:1;
+ u64 fault_rw:1;
+ u64 priv:1;
+ u64 rsvd:1;
+ u64 error:8;
+ u64 wq_idx:8;
+ u64 rsvd2:8;
+ u64 operation:8;
+ u64 pasid:20;
+ u64 rsvd3:4;
+
+ u64 batch_idx:16;
+ u64 rsvd4:16;
+ u64 invalid_flags:32;
+
+ u64 fault_addr;
+
+ u64 rsvd5;
+ };
+ u64 bits[4];
+} __packed;
+
+union msix_perm {
+ struct {
+ u32 rsvd:2;
+ u32 ignore:1;
+ u32 pasid_en:1;
+ u32 rsvd2:8;
+ u32 pasid:20;
+ };
+ u32 bits;
+} __packed;
+
+union group_flags {
+ struct {
+ u32 tc_a:3;
+ u32 tc_b:3;
+ u32 rsvd:1;
+ u32 use_token_limit:1;
+ u32 tokens_reserved:8;
+ u32 rsvd2:4;
+ u32 tokens_allowed:8;
+ u32 rsvd3:4;
+ };
+ u32 bits;
+} __packed;
+
+struct grpcfg {
+ u64 wqs[4];
+ u64 engines;
+ union group_flags flags;
+} __packed;
+
+union wqcfg {
+ struct {
+ /* bytes 0-3 */
+ u16 wq_size;
+ u16 rsvd;
+
+ /* bytes 4-7 */
+ u16 wq_thresh;
+ u16 rsvd1;
+
+ /* bytes 8-11 */
+ u32 mode:1; /* shared or dedicated */
+ u32 bof:1; /* block on fault */
+ u32 rsvd2:2;
+ u32 priority:4;
+ u32 pasid:20;
+ u32 pasid_en:1;
+ u32 priv:1;
+ u32 rsvd3:2;
+
+ /* bytes 12-15 */
+ u32 max_xfer_shift:5;
+ u32 max_batch_shift:4;
+ u32 rsvd4:23;
+
+ /* bytes 16-19 */
+ u16 occupancy_inth;
+ u16 occupancy_table_sel:1;
+ u16 rsvd5:15;
+
+ /* bytes 20-23 */
+ u16 occupancy_limit;
+ u16 occupancy_int_en:1;
+ u16 rsvd6:15;
+
+ /* bytes 24-27 */
+ u16 occupancy;
+ u16 occupancy_int:1;
+ u16 rsvd7:12;
+ u16 mode_support:1;
+ u16 wq_state:2;
+
+ /* bytes 28-31 */
+ u32 rsvd8;
+ };
+ u32 bits[8];
+} __packed;
+#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
new file mode 100644
index 000000000000..45a0c5869a0a
--- /dev/null
+++ b/drivers/dma/idxd/submit.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <uapi/linux/idxd.h>
+#include "idxd.h"
+#include "registers.h"
+
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
+{
+ struct idxd_desc *desc;
+ int idx;
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return ERR_PTR(-EIO);
+
+ if (optype == IDXD_OP_BLOCK)
+ percpu_down_read(&wq->submit_lock);
+ else if (!percpu_down_read_trylock(&wq->submit_lock))
+ return ERR_PTR(-EBUSY);
+
+ if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) {
+ int rc;
+
+ if (optype == IDXD_OP_NONBLOCK) {
+ percpu_up_read(&wq->submit_lock);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ percpu_up_read(&wq->submit_lock);
+ percpu_down_write(&wq->submit_lock);
+ rc = wait_event_interruptible(wq->submit_waitq,
+ atomic_add_unless(&wq->dq_count,
+ 1, wq->size) ||
+ idxd->state != IDXD_DEV_ENABLED);
+ percpu_up_write(&wq->submit_lock);
+ if (rc < 0)
+ return ERR_PTR(-EINTR);
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return ERR_PTR(-EIO);
+ } else {
+ percpu_up_read(&wq->submit_lock);
+ }
+
+ idx = sbitmap_get(&wq->sbmap, 0, false);
+ if (idx < 0) {
+ atomic_dec(&wq->dq_count);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ desc = wq->descs[idx];
+ memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
+ memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+ return desc;
+}
+
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+ atomic_dec(&wq->dq_count);
+
+ sbitmap_clear_bit(&wq->sbmap, desc->id);
+ wake_up(&wq->submit_waitq);
+}
+
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int vec = desc->hw->int_handle;
+ void __iomem *portal;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -EIO;
+
+ portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
+ /*
+ * The wmb() flushes writes to coherent DMA data before possibly
+ * triggering a DMA read. The wmb() is necessary even on UP because
+ * the recipient is a device.
+ */
+ wmb();
+ iosubmit_cmds512(portal, desc->hw, 1);
+
+ /*
+ * Pending the descriptor to the lockless list for the irq_entry
+ * that we designated the descriptor to.
+ */
+ if (desc->hw->flags & IDXD_OP_FLAG_RCI)
+ llist_add(&desc->llnode,
+ &idxd->irq_entries[vec].pending_llist);
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
new file mode 100644
index 000000000000..849c50ab939a
--- /dev/null
+++ b/drivers/dma/idxd/sysfs.c
@@ -0,0 +1,1528 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+static char *idxd_wq_type_names[] = {
+ [IDXD_WQT_NONE] = "none",
+ [IDXD_WQT_KERNEL] = "kernel",
+ [IDXD_WQT_USER] = "user",
+};
+
+static void idxd_conf_device_release(struct device *dev)
+{
+ dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
+}
+
+static struct device_type idxd_group_device_type = {
+ .name = "group",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_wq_device_type = {
+ .name = "wq",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_engine_device_type = {
+ .name = "engine",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type dsa_device_type = {
+ .name = "dsa",
+ .release = idxd_conf_device_release,
+};
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+ return dev ? dev->type == &dsa_device_type : false;
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+ return is_dsa_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+ return dev ? dev->type == &idxd_wq_device_type : false;
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+ if (wq->type == IDXD_WQT_KERNEL &&
+ strcmp(wq->name, "dmaengine") == 0)
+ return true;
+ return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_USER ? true : false;
+}
+
+static int idxd_config_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ int matched = 0;
+
+ if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->state != IDXD_DEV_CONF_READY)
+ return 0;
+ matched = 1;
+ } else if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd->state < IDXD_DEV_CONF_READY)
+ return 0;
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ dev_dbg(dev, "%s not disabled\n", dev_name(dev));
+ return 0;
+ }
+ matched = 1;
+ }
+
+ if (matched)
+ dev_dbg(dev, "%s matched\n", dev_name(dev));
+
+ return matched;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s called\n", __func__);
+
+ if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->state != IDXD_DEV_CONF_READY) {
+ dev_warn(dev, "Device not ready for config\n");
+ return -EBUSY;
+ }
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENXIO;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+
+ /* Perform IDXD configuration and enabling */
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_warn(dev, "Device config failed: %d\n", rc);
+ return rc;
+ }
+
+ /* start device */
+ rc = idxd_device_enable(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_warn(dev, "Device enable failed: %d\n", rc);
+ return rc;
+ }
+
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_info(dev, "Device %s enabled\n", dev_name(dev));
+
+ rc = idxd_register_dma_device(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_dbg(dev, "Failed to register dmaengine device\n");
+ return rc;
+ }
+ return 0;
+ } else if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Enabling while device not enabled.\n");
+ return -EPERM;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d already enabled.\n", wq->id);
+ return -EBUSY;
+ }
+
+ if (!wq->group) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ not attached to group.\n");
+ return -EINVAL;
+ }
+
+ if (strlen(wq->name) == 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ name not set.\n");
+ return -EINVAL;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ resource alloc failed\n");
+ return rc;
+ }
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Writing WQ %d config failed: %d\n",
+ wq->id, rc);
+ return rc;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d enabling failed: %d\n",
+ wq->id, rc);
+ return rc;
+ }
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ dev_warn(dev, "wq portal mapping failed: %d\n", rc);
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ dev_warn(dev, "IDXD wq disable failed\n");
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+
+ wq->client_count = 0;
+
+ dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
+
+ if (is_idxd_wq_dmaengine(wq)) {
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "DMA channel register failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ } else if (is_idxd_wq_cdev(wq)) {
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Cdev creation failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ }
+
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void disable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ unsigned long flags;
+ int rc;
+
+ mutex_lock(&wq->wq_lock);
+ dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
+ if (wq->state == IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ return;
+ }
+
+ if (is_idxd_wq_dmaengine(wq))
+ idxd_unregister_dma_channel(wq);
+ else if (is_idxd_wq_cdev(wq))
+ idxd_wq_del_cdev(wq);
+
+ if (idxd_wq_refcount(wq))
+ dev_warn(dev, "Clients has claim on wq %d: %d\n",
+ wq->id, idxd_wq_refcount(wq));
+
+ idxd_wq_unmap_portal(wq);
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_wq_disable(wq);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ idxd_wq_free_resources(wq);
+ wq->client_count = 0;
+ mutex_unlock(&wq->wq_lock);
+
+ if (rc < 0)
+ dev_warn(dev, "Failed to disable %s: %d\n",
+ dev_name(&wq->conf_dev), rc);
+ else
+ dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+}
+
+static int idxd_config_bus_remove(struct device *dev)
+{
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
+
+ /* disable workqueue here */
+ if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ disable_wq(wq);
+ } else if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+ int i;
+
+ dev_dbg(dev, "%s removing dev %s\n", __func__,
+ dev_name(&idxd->conf_dev));
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_DISABLED)
+ continue;
+ dev_warn(dev, "Active wq %d on disable %s.\n", i,
+ dev_name(&idxd->conf_dev));
+ device_release_driver(&wq->conf_dev);
+ }
+
+ idxd_unregister_dma_device(idxd);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_disable(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
+ if (rc < 0)
+ dev_warn(dev, "Device disable failed\n");
+ else
+ dev_info(dev, "Device %s disabled\n", dev_name(dev));
+
+ }
+
+ return 0;
+}
+
+static void idxd_config_bus_shutdown(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+}
+
+struct bus_type dsa_bus_type = {
+ .name = "dsa",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+ .shutdown = idxd_config_bus_shutdown,
+};
+
+static struct bus_type *idxd_bus_types[] = {
+ &dsa_bus_type
+};
+
+static struct idxd_device_driver dsa_drv = {
+ .drv = {
+ .name = "dsa",
+ .bus = &dsa_bus_type,
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+};
+
+static struct idxd_device_driver *idxd_drvs[] = {
+ &dsa_drv
+};
+
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
+{
+ return idxd_bus_types[idxd->type];
+}
+
+static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
+{
+ if (idxd->type == IDXD_TYPE_DSA)
+ return &dsa_device_type;
+ else
+ return NULL;
+}
+
+/* IDXD generic driver setup */
+int idxd_register_driver(void)
+{
+ int i, rc;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ rc = driver_register(&idxd_drvs[i]->drv);
+ if (rc < 0)
+ goto drv_fail;
+ }
+
+ return 0;
+
+drv_fail:
+ for (; i > 0; i--)
+ driver_unregister(&idxd_drvs[i]->drv);
+ return rc;
+}
+
+void idxd_unregister_driver(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ driver_unregister(&idxd_drvs[i]->drv);
+}
+
+/* IDXD engine attributes */
+static ssize_t engine_group_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_engine *engine =
+ container_of(dev, struct idxd_engine, conf_dev);
+
+ if (engine->group)
+ return sprintf(buf, "%d\n", engine->group->id);
+ else
+ return sprintf(buf, "%d\n", -1);
+}
+
+static ssize_t engine_group_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_engine *engine =
+ container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_device *idxd = engine->idxd;
+ long id;
+ int rc;
+ struct idxd_group *prevg, *group;
+
+ rc = kstrtol(buf, 10, &id);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (id > idxd->max_groups - 1 || id < -1)
+ return -EINVAL;
+
+ if (id == -1) {
+ if (engine->group) {
+ engine->group->num_engines--;
+ engine->group = NULL;
+ }
+ return count;
+ }
+
+ group = &idxd->groups[id];
+ prevg = engine->group;
+
+ if (prevg)
+ prevg->num_engines--;
+ engine->group = &idxd->groups[id];
+ engine->group->num_engines++;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_engine_group =
+ __ATTR(group_id, 0644, engine_group_id_show,
+ engine_group_id_store);
+
+static struct attribute *idxd_engine_attributes[] = {
+ &dev_attr_engine_group.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_engine_attribute_group = {
+ .attrs = idxd_engine_attributes,
+};
+
+static const struct attribute_group *idxd_engine_attribute_groups[] = {
+ &idxd_engine_attribute_group,
+ NULL,
+};
+
+/* Group attributes */
+
+static void idxd_set_free_tokens(struct idxd_device *idxd)
+{
+ int i, tokens;
+
+ for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *g = &idxd->groups[i];
+
+ tokens += g->tokens_reserved;
+ }
+
+ idxd->nr_tokens = idxd->max_tokens - tokens;
+}
+
+static ssize_t group_tokens_reserved_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->tokens_reserved);
+}
+
+static ssize_t group_tokens_reserved_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+
+ if (val > idxd->max_tokens)
+ return -EINVAL;
+
+ if (val > idxd->nr_tokens)
+ return -EINVAL;
+
+ group->tokens_reserved = val;
+ idxd_set_free_tokens(idxd);
+ return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_reserved =
+ __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
+ group_tokens_reserved_store);
+
+static ssize_t group_tokens_allowed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->tokens_allowed);
+}
+
+static ssize_t group_tokens_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+ if (val < 4 * group->num_engines ||
+ val > group->tokens_reserved + idxd->nr_tokens)
+ return -EINVAL;
+
+ group->tokens_allowed = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_allowed =
+ __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
+ group_tokens_allowed_store);
+
+static ssize_t group_use_token_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->use_token_limit);
+}
+
+static ssize_t group_use_token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+
+ group->use_token_limit = !!val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_use_token_limit =
+ __ATTR(use_token_limit, 0644, group_use_token_limit_show,
+ group_use_token_limit_store);
+
+static ssize_t group_engines_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ int i, rc = 0;
+ char *tmp = buf;
+ struct idxd_device *idxd = group->idxd;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ if (!engine->group)
+ continue;
+
+ if (engine->group->id == group->id)
+ rc += sprintf(tmp + rc, "engine%d.%d ",
+ idxd->id, engine->id);
+ }
+
+ rc--;
+ rc += sprintf(tmp + rc, "\n");
+
+ return rc;
+}
+
+static struct device_attribute dev_attr_group_engines =
+ __ATTR(engines, 0444, group_engines_show, NULL);
+
+static ssize_t group_work_queues_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ int i, rc = 0;
+ char *tmp = buf;
+ struct idxd_device *idxd = group->idxd;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (!wq->group)
+ continue;
+
+ if (wq->group->id == group->id)
+ rc += sprintf(tmp + rc, "wq%d.%d ",
+ idxd->id, wq->id);
+ }
+
+ rc--;
+ rc += sprintf(tmp + rc, "\n");
+
+ return rc;
+}
+
+static struct device_attribute dev_attr_group_work_queues =
+ __ATTR(work_queues, 0444, group_work_queues_show, NULL);
+
+static ssize_t group_traffic_class_a_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%d\n", group->tc_a);
+}
+
+static ssize_t group_traffic_class_a_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ group->tc_a = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_a =
+ __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
+ group_traffic_class_a_store);
+
+static ssize_t group_traffic_class_b_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%d\n", group->tc_b);
+}
+
+static ssize_t group_traffic_class_b_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ group->tc_b = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_b =
+ __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
+ group_traffic_class_b_store);
+
+static struct attribute *idxd_group_attributes[] = {
+ &dev_attr_group_work_queues.attr,
+ &dev_attr_group_engines.attr,
+ &dev_attr_group_use_token_limit.attr,
+ &dev_attr_group_tokens_allowed.attr,
+ &dev_attr_group_tokens_reserved.attr,
+ &dev_attr_group_traffic_class_a.attr,
+ &dev_attr_group_traffic_class_b.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_group_attribute_group = {
+ .attrs = idxd_group_attributes,
+};
+
+static const struct attribute_group *idxd_group_attribute_groups[] = {
+ &idxd_group_attribute_group,
+ NULL,
+};
+
+/* IDXD work queue attribs */
+static ssize_t wq_clients_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%d\n", wq->client_count);
+}
+
+static struct device_attribute dev_attr_wq_clients =
+ __ATTR(clients, 0444, wq_clients_show, NULL);
+
+static ssize_t wq_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ switch (wq->state) {
+ case IDXD_WQ_DISABLED:
+ return sprintf(buf, "disabled\n");
+ case IDXD_WQ_ENABLED:
+ return sprintf(buf, "enabled\n");
+ }
+
+ return sprintf(buf, "unknown\n");
+}
+
+static struct device_attribute dev_attr_wq_state =
+ __ATTR(state, 0444, wq_state_show, NULL);
+
+static ssize_t wq_group_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ if (wq->group)
+ return sprintf(buf, "%u\n", wq->group->id);
+ else
+ return sprintf(buf, "-1\n");
+}
+
+static ssize_t wq_group_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ long id;
+ int rc;
+ struct idxd_group *prevg, *group;
+
+ rc = kstrtol(buf, 10, &id);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (id > idxd->max_groups - 1 || id < -1)
+ return -EINVAL;
+
+ if (id == -1) {
+ if (wq->group) {
+ wq->group->num_wqs--;
+ wq->group = NULL;
+ }
+ return count;
+ }
+
+ group = &idxd->groups[id];
+ prevg = wq->group;
+
+ if (prevg)
+ prevg->num_wqs--;
+ wq->group = group;
+ group->num_wqs++;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_group_id =
+ __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
+
+static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%s\n",
+ wq_dedicated(wq) ? "dedicated" : "shared");
+}
+
+static ssize_t wq_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (sysfs_streq(buf, "dedicated")) {
+ set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ wq->threshold = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_mode =
+ __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
+
+static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->size);
+}
+
+static ssize_t wq_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ unsigned long size;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &size);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (size > idxd->max_wq_size)
+ return -EINVAL;
+
+ wq->size = size;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_size =
+ __ATTR(size, 0644, wq_size_show, wq_size_store);
+
+static ssize_t wq_priority_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->priority);
+}
+
+static ssize_t wq_priority_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ unsigned long prio;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &prio);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (prio > IDXD_MAX_PRIORITY)
+ return -EINVAL;
+
+ wq->priority = prio;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_priority =
+ __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
+
+static ssize_t wq_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ switch (wq->type) {
+ case IDXD_WQT_KERNEL:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_KERNEL]);
+ case IDXD_WQT_USER:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_USER]);
+ case IDXD_WQT_NONE:
+ default:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_NONE]);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t wq_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ enum idxd_wq_type old_type;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ old_type = wq->type;
+ if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
+ wq->type = IDXD_WQT_KERNEL;
+ else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
+ wq->type = IDXD_WQT_USER;
+ else
+ wq->type = IDXD_WQT_NONE;
+
+ /* If we are changing queue type, clear the name */
+ if (wq->type != old_type)
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_type =
+ __ATTR(type, 0644, wq_type_show, wq_type_store);
+
+static ssize_t wq_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%s\n", wq->name);
+}
+
+static ssize_t wq_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
+ return -EINVAL;
+
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+ strncpy(wq->name, buf, WQ_NAME_SIZE);
+ strreplace(wq->name, '\n', '\0');
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_name =
+ __ATTR(name, 0644, wq_name_show, wq_name_store);
+
+static ssize_t wq_cdev_minor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
+}
+
+static struct device_attribute dev_attr_wq_cdev_minor =
+ __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
+
+static struct attribute *idxd_wq_attributes[] = {
+ &dev_attr_wq_clients.attr,
+ &dev_attr_wq_state.attr,
+ &dev_attr_wq_group_id.attr,
+ &dev_attr_wq_mode.attr,
+ &dev_attr_wq_size.attr,
+ &dev_attr_wq_priority.attr,
+ &dev_attr_wq_type.attr,
+ &dev_attr_wq_name.attr,
+ &dev_attr_wq_cdev_minor.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_wq_attribute_group = {
+ .attrs = idxd_wq_attributes,
+};
+
+static const struct attribute_group *idxd_wq_attribute_groups[] = {
+ &idxd_wq_attribute_group,
+ NULL,
+};
+
+/* IDXD device attribs */
+static ssize_t max_work_queues_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_wq_size);
+}
+static DEVICE_ATTR_RO(max_work_queues_size);
+
+static ssize_t max_groups_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_groups);
+}
+static DEVICE_ATTR_RO(max_groups);
+
+static ssize_t max_work_queues_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_wqs);
+}
+static DEVICE_ATTR_RO(max_work_queues);
+
+static ssize_t max_engines_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_engines);
+}
+static DEVICE_ATTR_RO(max_engines);
+
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
+static ssize_t max_batch_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_batch_size);
+}
+static DEVICE_ATTR_RO(max_batch_size);
+
+static ssize_t max_transfer_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
+}
+static DEVICE_ATTR_RO(max_transfer_size);
+
+static ssize_t op_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+}
+static DEVICE_ATTR_RO(op_cap);
+
+static ssize_t configurable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n",
+ test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
+}
+static DEVICE_ATTR_RO(configurable);
+
+static ssize_t clients_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ unsigned long flags;
+ int count = 0, i;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ count += wq->client_count;
+ }
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ return sprintf(buf, "%d\n", count);
+}
+static DEVICE_ATTR_RO(clients);
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ switch (idxd->state) {
+ case IDXD_DEV_DISABLED:
+ case IDXD_DEV_CONF_READY:
+ return sprintf(buf, "disabled\n");
+ case IDXD_DEV_ENABLED:
+ return sprintf(buf, "enabled\n");
+ case IDXD_DEV_HALTED:
+ return sprintf(buf, "halted\n");
+ }
+
+ return sprintf(buf, "unknown\n");
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ int i, out = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ for (i = 0; i < 4; i++)
+ out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ out--;
+ out += sprintf(buf + out, "\n");
+ return out;
+}
+static DEVICE_ATTR_RO(errors);
+
+static ssize_t max_tokens_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_tokens);
+}
+static DEVICE_ATTR_RO(max_tokens);
+
+static ssize_t token_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->token_limit);
+}
+
+static ssize_t token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (!idxd->hw.group_cap.token_limit)
+ return -EPERM;
+
+ if (val > idxd->hw.group_cap.total_tokens)
+ return -EINVAL;
+
+ idxd->token_limit = val;
+ return count;
+}
+static DEVICE_ATTR_RW(token_limit);
+
+static ssize_t cdev_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->major);
+}
+static DEVICE_ATTR_RO(cdev_major);
+
+static struct attribute *idxd_device_attributes[] = {
+ &dev_attr_max_groups.attr,
+ &dev_attr_max_work_queues.attr,
+ &dev_attr_max_work_queues_size.attr,
+ &dev_attr_max_engines.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_max_batch_size.attr,
+ &dev_attr_max_transfer_size.attr,
+ &dev_attr_op_cap.attr,
+ &dev_attr_configurable.attr,
+ &dev_attr_clients.attr,
+ &dev_attr_state.attr,
+ &dev_attr_errors.attr,
+ &dev_attr_max_tokens.attr,
+ &dev_attr_token_limit.attr,
+ &dev_attr_cdev_major.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_device_attribute_group = {
+ .attrs = idxd_device_attributes,
+};
+
+static const struct attribute_group *idxd_attribute_groups[] = {
+ &idxd_device_attribute_group,
+ NULL,
+};
+
+static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ engine->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&engine->conf_dev, "engine%d.%d",
+ idxd->id, engine->id);
+ engine->conf_dev.bus = idxd_get_bus_type(idxd);
+ engine->conf_dev.groups = idxd_engine_attribute_groups;
+ engine->conf_dev.type = &idxd_engine_device_type;
+ dev_dbg(dev, "Engine device register: %s\n",
+ dev_name(&engine->conf_dev));
+ rc = device_register(&engine->conf_dev);
+ if (rc < 0) {
+ put_device(&engine->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ device_unregister(&engine->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_group_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ group->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&group->conf_dev, "group%d.%d",
+ idxd->id, group->id);
+ group->conf_dev.bus = idxd_get_bus_type(idxd);
+ group->conf_dev.groups = idxd_group_attribute_groups;
+ group->conf_dev.type = &idxd_group_device_type;
+ dev_dbg(dev, "Group device register: %s\n",
+ dev_name(&group->conf_dev));
+ rc = device_register(&group->conf_dev);
+ if (rc < 0) {
+ put_device(&group->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ device_unregister(&group->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ wq->conf_dev.bus = idxd_get_bus_type(idxd);
+ wq->conf_dev.groups = idxd_wq_attribute_groups;
+ wq->conf_dev.type = &idxd_wq_device_type;
+ dev_dbg(dev, "WQ device register: %s\n",
+ dev_name(&wq->conf_dev));
+ rc = device_register(&wq->conf_dev);
+ if (rc < 0) {
+ put_device(&wq->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ device_unregister(&wq->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_device_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ char devname[IDXD_NAME_SIZE];
+
+ sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+ idxd->conf_dev.parent = dev;
+ dev_set_name(&idxd->conf_dev, "%s", devname);
+ idxd->conf_dev.bus = idxd_get_bus_type(idxd);
+ idxd->conf_dev.groups = idxd_attribute_groups;
+ idxd->conf_dev.type = idxd_get_device_type(idxd);
+
+ dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
+ rc = device_register(&idxd->conf_dev);
+ if (rc < 0) {
+ put_device(&idxd->conf_dev);
+ return rc;
+ }
+
+ return 0;
+}
+
+int idxd_setup_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+
+ rc = idxd_setup_device_sysfs(idxd);
+ if (rc < 0) {
+ dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_wq_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_group_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_engine_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void idxd_cleanup_sysfs(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ device_unregister(&wq->conf_dev);
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ device_unregister(&engine->conf_dev);
+ }
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ device_unregister(&group->conf_dev);
+ }
+
+ device_unregister(&idxd->conf_dev);
+}
+
+int idxd_register_bus_type(void)
+{
+ int i, rc;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ rc = bus_register(idxd_bus_types[i]);
+ if (rc < 0)
+ goto bus_err;
+ }
+
+ return 0;
+
+bus_err:
+ for (; i > 0; i--)
+ bus_unregister(idxd_bus_types[i]);
+ return rc;
+}
+
+void idxd_unregister_bus_type(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ bus_unregister(idxd_bus_types[i]);
+}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c27e206a764c..066b21a32232 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
return;
}
sdmac->desc = desc = to_sdma_desc(&vd->tx);
- /*
- * Do not delete the node in desc_issued list in cyclic mode, otherwise
- * the desc allocated will never be freed in vchan_dma_desc_free_list
- */
- if (!(sdmac->flags & IMX_DMA_SG_LOOP))
- list_del(&vd->node);
+
+ list_del(&vd->node);
sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
@@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work)
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
- sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
sdmac->context_loaded = false;
}
-static int sdma_disable_channel_async(struct dma_chan *chan)
+static int sdma_terminate_all(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
sdma_disable_channel(chan);
- if (sdmac->desc)
+ if (sdmac->desc) {
+ vchan_terminate_vdesc(&sdmac->desc->vd);
+ sdmac->desc = NULL;
schedule_work(&sdmac->terminate_worker);
+ }
+
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
return 0;
}
@@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel_async(chan);
+ sdma_terminate_all(chan);
sdma_channel_synchronize(chan);
@@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- struct sdma_desc *desc;
+ struct sdma_desc *desc = NULL;
u32 residue;
struct virt_dma_desc *vd;
enum dma_status ret;
@@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
return ret;
spin_lock_irqsave(&sdmac->vc.lock, flags);
+
vd = vchan_find_desc(&sdmac->vc, cookie);
- if (vd) {
+ if (vd)
desc = to_sdma_desc(&vd->tx);
+ else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
+ desc = sdmac->desc;
+
+ if (desc) {
if (sdmac->flags & IMX_DMA_SG_LOOP)
residue = (desc->num_bd - desc->buf_ptail) *
desc->period_len - desc->chn_real_count;
else
residue = desc->chn_count - desc->chn_real_count;
- } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
- residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
} else {
residue = 0;
}
+
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
@@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+ sdma->dma_device.device_terminate_all = sdma_terminate_all;
sdma->dma_device.device_synchronize = sdma_channel_synchronize;
sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index a6a6dc432db8..60e9afbb896c 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
ioat_kobject_del(ioat_dma);
dma_async_device_unregister(dma);
-
- dma_pool_destroy(ioat_dma->completion_pool);
-
- INIT_LIST_HEAD(&dma->channels);
}
/**
@@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
- ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+ ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
@@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c)
return;
ioat_stop(ioat_chan);
- ioat_reset_hw(ioat_chan);
- /* Put LTR to idle */
- if (ioat_dma->version >= IOAT_VER_3_4)
- writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
- ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+ if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
+ ioat_reset_hw(ioat_chan);
+
+ /* Put LTR to idle */
+ if (ioat_dma->version >= IOAT_VER_3_4)
+ writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+ ioat_chan->reg_base +
+ IOAT_CHAN_LTR_SWSEL_OFFSET);
+ }
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
@@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = {
.err_handler = &ioat_err_handler,
};
+static void release_ioatdma(struct dma_device *device)
+{
+ struct ioatdma_device *d = to_ioatdma_device(device);
+ int i;
+
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(d->idx[i]);
+
+ dma_pool_destroy(d->completion_pool);
+ kfree(d);
+}
+
static struct ioatdma_device *
alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
{
- struct device *dev = &pdev->dev;
- struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->pdev = pdev;
d->reg_base = iobase;
+ d->dma_dev.device_release = release_ioatdma;
return d;
}
@@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
+ ioat_shutdown(pdev);
+
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index c20e6bd4e298..29f1223b285a 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&c->vc.lock, flags);
vchan_get_all_descriptors(&c->vc, &head);
- vchan_dma_desc_free_list(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index c2d779daa4b5..b2c2b5e8093c 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -15,6 +15,8 @@
#include <linux/of.h>
#include <linux/of_dma.h>
+#include "dmaengine.h"
+
static LIST_HEAD(of_dma_list);
static DEFINE_MUTEX(of_dma_lock);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 023f951189a7..c683051257fd 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
}
vchan_get_all_descriptors(&vchan->vc, &head);
- vchan_dma_desc_free_list(&vchan->vc, &head);
spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6cce9ef61b29..88b884cbb7c1 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
- pm_runtime_disable(dev);
-
- if (!pm_runtime_status_suspended(dev)) {
- /* amba did not disable the clock */
- amba_pclk_disable(pcdev);
- }
+ pm_runtime_force_suspend(dev);
amba_pclk_unprepare(pcdev);
return 0;
@@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev)
if (ret)
return ret;
- if (!pm_runtime_status_suspended(dev))
- ret = amba_pclk_enable(pcdev);
-
- pm_runtime_enable(dev);
+ pm_runtime_force_resume(dev);
return ret;
}
-static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
+static const struct dev_pm_ops pl330_pm = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
+};
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
new file mode 100644
index 000000000000..db4c5fd453a9
--- /dev/null
+++ b/drivers/dma/plx_dma.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
+ * Copyright (c) 2019, GigaIO Networks, Inc
+ */
+
+#include "dmaengine.h"
+
+#include <linux/circ_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logan Gunthorpe");
+
+#define PLX_REG_DESC_RING_ADDR 0x214
+#define PLX_REG_DESC_RING_ADDR_HI 0x218
+#define PLX_REG_DESC_RING_NEXT_ADDR 0x21C
+#define PLX_REG_DESC_RING_COUNT 0x220
+#define PLX_REG_DESC_RING_LAST_ADDR 0x224
+#define PLX_REG_DESC_RING_LAST_SIZE 0x228
+#define PLX_REG_PREF_LIMIT 0x234
+#define PLX_REG_CTRL 0x238
+#define PLX_REG_CTRL2 0x23A
+#define PLX_REG_INTR_CTRL 0x23C
+#define PLX_REG_INTR_STATUS 0x23E
+
+#define PLX_REG_PREF_LIMIT_PREF_FOUR 8
+
+#define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0)
+#define PLX_REG_CTRL_ABORT BIT(1)
+#define PLX_REG_CTRL_WRITE_BACK_EN BIT(2)
+#define PLX_REG_CTRL_START BIT(3)
+#define PLX_REG_CTRL_RING_STOP_MODE BIT(4)
+#define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5)
+#define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5)
+#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5)
+#define PLX_REG_CTRL_DESC_INVALID BIT(8)
+#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9)
+#define PLX_REG_CTRL_ABORT_DONE BIT(10)
+#define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12)
+#define PLX_REG_CTRL_IN_PROGRESS BIT(30)
+
+#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
+ PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
+ PLX_REG_CTRL_ABORT_DONE | \
+ PLX_REG_CTRL_IMM_PAUSE_DONE)
+
+#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
+ PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
+ PLX_REG_CTRL_START | \
+ PLX_REG_CTRL_RESET_VAL)
+
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7
+
+#define PLX_REG_INTR_CRTL_ERROR_EN BIT(0)
+#define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1)
+#define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3)
+#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4)
+#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5)
+
+#define PLX_REG_INTR_STATUS_ERROR BIT(0)
+#define PLX_REG_INTR_STATUS_INV_DESC BIT(1)
+#define PLX_REG_INTR_STATUS_DESC_DONE BIT(2)
+#define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3)
+
+struct plx_dma_hw_std_desc {
+ __le32 flags_and_size;
+ __le16 dst_addr_hi;
+ __le16 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 src_addr_lo;
+};
+
+#define PLX_DESC_SIZE_MASK 0x7ffffff
+#define PLX_DESC_FLAG_VALID BIT(31)
+#define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30)
+
+#define PLX_DESC_WB_SUCCESS BIT(30)
+#define PLX_DESC_WB_RD_FAIL BIT(29)
+#define PLX_DESC_WB_WR_FAIL BIT(28)
+
+#define PLX_DMA_RING_COUNT 2048
+
+struct plx_dma_desc {
+ struct dma_async_tx_descriptor txd;
+ struct plx_dma_hw_std_desc *hw;
+ u32 orig_size;
+};
+
+struct plx_dma_dev {
+ struct dma_device dma_dev;
+ struct dma_chan dma_chan;
+ struct pci_dev __rcu *pdev;
+ void __iomem *bar;
+ struct tasklet_struct desc_task;
+
+ spinlock_t ring_lock;
+ bool ring_active;
+ int head;
+ int tail;
+ struct plx_dma_hw_std_desc *hw_ring;
+ dma_addr_t hw_ring_dma;
+ struct plx_dma_desc **desc_ring;
+};
+
+static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
+{
+ return container_of(c, struct plx_dma_dev, dma_chan);
+}
+
+static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct plx_dma_desc, txd);
+}
+
+static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
+{
+ return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
+}
+
+static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
+{
+ struct dmaengine_result res;
+ struct plx_dma_desc *desc;
+ u32 flags;
+
+ spin_lock_bh(&plxdev->ring_lock);
+
+ while (plxdev->tail != plxdev->head) {
+ desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+ flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
+
+ if (flags & PLX_DESC_FLAG_VALID)
+ break;
+
+ res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
+
+ if (flags & PLX_DESC_WB_SUCCESS)
+ res.result = DMA_TRANS_NOERROR;
+ else if (flags & PLX_DESC_WB_WR_FAIL)
+ res.result = DMA_TRANS_WRITE_FAILED;
+ else
+ res.result = DMA_TRANS_READ_FAILED;
+
+ dma_cookie_complete(&desc->txd);
+ dma_descriptor_unmap(&desc->txd);
+ dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+ desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
+
+ plxdev->tail++;
+ }
+
+ spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
+{
+ struct dmaengine_result res;
+ struct plx_dma_desc *desc;
+
+ plx_dma_process_desc(plxdev);
+
+ spin_lock_bh(&plxdev->ring_lock);
+
+ while (plxdev->tail != plxdev->head) {
+ desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+ res.residue = desc->orig_size;
+ res.result = DMA_TRANS_ABORTED;
+
+ dma_cookie_complete(&desc->txd);
+ dma_descriptor_unmap(&desc->txd);
+ dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+ desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
+
+ plxdev->tail++;
+ }
+
+ spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void __plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 val;
+
+ val = readl(plxdev->bar + PLX_REG_CTRL);
+ if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
+ return;
+
+ writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+ plxdev->bar + PLX_REG_CTRL);
+
+ while (!time_after(jiffies, timeout)) {
+ val = readl(plxdev->bar + PLX_REG_CTRL);
+ if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
+ break;
+
+ cpu_relax();
+ }
+
+ if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
+ dev_err(plxdev->dma_dev.dev,
+ "Timeout waiting for graceful pause!\n");
+
+ writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+ plxdev->bar + PLX_REG_CTRL);
+
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+}
+
+static void plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ __plx_dma_stop(plxdev);
+
+ rcu_read_unlock();
+}
+
+static void plx_dma_desc_task(unsigned long data)
+{
+ struct plx_dma_dev *plxdev = (void *)data;
+
+ plx_dma_process_desc(plxdev);
+}
+
+static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
+ dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
+ unsigned long flags)
+ __acquires(plxdev->ring_lock)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
+ struct plx_dma_desc *plxdesc;
+
+ spin_lock_bh(&plxdev->ring_lock);
+ if (!plxdev->ring_active)
+ goto err_unlock;
+
+ if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
+ goto err_unlock;
+
+ if (len > PLX_DESC_SIZE_MASK)
+ goto err_unlock;
+
+ plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
+ plxdev->head++;
+
+ plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
+ plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
+ plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
+ plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
+
+ plxdesc->orig_size = len;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ len |= PLX_DESC_FLAG_INT_WHEN_DONE;
+
+ plxdesc->hw->flags_and_size = cpu_to_le32(len);
+ plxdesc->txd.flags = flags;
+
+ /* return with the lock held, it will be released in tx_submit */
+
+ return &plxdesc->txd;
+
+err_unlock:
+ /*
+ * Keep sparse happy by restoring an even lock count on
+ * this lock.
+ */
+ __acquire(plxdev->ring_lock);
+
+ spin_unlock_bh(&plxdev->ring_lock);
+ return NULL;
+}
+
+static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
+ __releases(plxdev->ring_lock)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
+ struct plx_dma_desc *plxdesc = to_plx_desc(desc);
+ dma_cookie_t cookie;
+
+ cookie = dma_cookie_assign(desc);
+
+ /*
+ * Ensure the descriptor updates are visible to the dma device
+ * before setting the valid bit.
+ */
+ wmb();
+
+ plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
+
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ return cookie;
+}
+
+static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ plx_dma_process_desc(plxdev);
+
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void plx_dma_issue_pending(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /*
+ * Ensure the valid bits are visible before starting the
+ * DMA engine.
+ */
+ wmb();
+
+ writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
+
+ rcu_read_unlock();
+}
+
+static irqreturn_t plx_dma_isr(int irq, void *devid)
+{
+ struct plx_dma_dev *plxdev = devid;
+ u32 status;
+
+ status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
+ tasklet_schedule(&plxdev->desc_task);
+
+ writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
+{
+ struct plx_dma_desc *desc;
+ int i;
+
+ plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
+ sizeof(*plxdev->desc_ring), GFP_KERNEL);
+ if (!plxdev->desc_ring)
+ return -ENOMEM;
+
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ goto free_and_exit;
+
+ dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
+ desc->txd.tx_submit = plx_dma_tx_submit;
+ desc->hw = &plxdev->hw_ring[i];
+
+ plxdev->desc_ring[i] = desc;
+ }
+
+ return 0;
+
+free_and_exit:
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+ kfree(plxdev->desc_ring[i]);
+ kfree(plxdev->desc_ring);
+ return -ENOMEM;
+}
+
+static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+ int rc;
+
+ plxdev->head = plxdev->tail = 0;
+ plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
+ &plxdev->hw_ring_dma, GFP_KERNEL);
+ if (!plxdev->hw_ring)
+ return -ENOMEM;
+
+ rc = plx_dma_alloc_desc(plxdev);
+ if (rc)
+ goto out_free_hw_ring;
+
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ rc = -ENODEV;
+ goto out_free_hw_ring;
+ }
+
+ writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
+ writel(lower_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_ADDR);
+ writel(upper_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+ writel(lower_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+ writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+ writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
+
+ plxdev->ring_active = true;
+
+ rcu_read_unlock();
+
+ return PLX_DMA_RING_COUNT;
+
+out_free_hw_ring:
+ dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+ plxdev->hw_ring_dma);
+ return rc;
+}
+
+static void plx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+ struct pci_dev *pdev;
+ int irq = -1;
+ int i;
+
+ spin_lock_bh(&plxdev->ring_lock);
+ plxdev->ring_active = false;
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ plx_dma_stop(plxdev);
+
+ rcu_read_lock();
+ pdev = rcu_dereference(plxdev->pdev);
+ if (pdev)
+ irq = pci_irq_vector(pdev, 0);
+ rcu_read_unlock();
+
+ if (irq > 0)
+ synchronize_irq(irq);
+
+ tasklet_kill(&plxdev->desc_task);
+
+ plx_dma_abort_desc(plxdev);
+
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+ kfree(plxdev->desc_ring[i]);
+
+ kfree(plxdev->desc_ring);
+ dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+ plxdev->hw_ring_dma);
+
+}
+
+static void plx_dma_release(struct dma_device *dma_dev)
+{
+ struct plx_dma_dev *plxdev =
+ container_of(dma_dev, struct plx_dma_dev, dma_dev);
+
+ put_device(dma_dev->dev);
+ kfree(plxdev);
+}
+
+static int plx_dma_create(struct pci_dev *pdev)
+{
+ struct plx_dma_dev *plxdev;
+ struct dma_device *dma;
+ struct dma_chan *chan;
+ int rc;
+
+ plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
+ if (!plxdev)
+ return -ENOMEM;
+
+ rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
+ KBUILD_MODNAME, plxdev);
+ if (rc) {
+ kfree(plxdev);
+ return rc;
+ }
+
+ spin_lock_init(&plxdev->ring_lock);
+ tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
+ (unsigned long)plxdev);
+
+ RCU_INIT_POINTER(plxdev->pdev, pdev);
+ plxdev->bar = pcim_iomap_table(pdev)[0];
+
+ dma = &plxdev->dma_dev;
+ dma->chancnt = 1;
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
+ dma->dev = get_device(&pdev->dev);
+
+ dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = plx_dma_free_chan_resources;
+ dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
+ dma->device_issue_pending = plx_dma_issue_pending;
+ dma->device_tx_status = plx_dma_tx_status;
+ dma->device_release = plx_dma_release;
+
+ chan = &plxdev->dma_chan;
+ chan->device = dma;
+ dma_cookie_init(chan);
+ list_add_tail(&chan->device_node, &dma->channels);
+
+ rc = dma_async_device_register(dma);
+ if (rc) {
+ pci_err(pdev, "Failed to register dma device: %d\n", rc);
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+ kfree(plxdev);
+ return rc;
+ }
+
+ pci_set_drvdata(pdev, plxdev);
+
+ return 0;
+}
+
+static int plx_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (rc)
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (rc)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
+ if (rc)
+ return rc;
+
+ rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (rc <= 0)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = plx_dma_create(pdev);
+ if (rc)
+ goto err_free_irq_vectors;
+
+ pci_info(pdev, "PLX DMA Channel Registered\n");
+
+ return 0;
+
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return rc;
+}
+
+static void plx_dma_remove(struct pci_dev *pdev)
+{
+ struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
+
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+
+ rcu_assign_pointer(plxdev->pdev, NULL);
+ synchronize_rcu();
+
+ spin_lock_bh(&plxdev->ring_lock);
+ plxdev->ring_active = false;
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ __plx_dma_stop(plxdev);
+ plx_dma_abort_desc(plxdev);
+
+ plxdev->bar = NULL;
+ dma_async_device_unregister(&plxdev->dma_dev);
+
+ pci_free_irq_vectors(pdev);
+}
+
+static const struct pci_device_id plx_dma_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x87D0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = PCI_CLASS_SYSTEM_OTHER << 8,
+ .class_mask = 0xFFFFFFFF,
+ },
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
+
+static struct pci_driver plx_dma_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = plx_dma_pci_tbl,
+ .probe = plx_dma_probe,
+ .remove = plx_dma_remove,
+};
+module_pci_driver(plx_dma_pci_driver);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 43da8eeb18ef..8e14c72d03f0 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
s3c24xx_dma_start_next_sg(s3cchan, txd);
}
-static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
- struct s3c24xx_dma_chan *s3cchan)
-{
- LIST_HEAD(head);
-
- vchan_get_all_descriptors(&s3cchan->vc, &head);
- vchan_dma_desc_free_list(&s3cchan->vc, &head);
-}
-
/*
* Try to allocate a physical channel. When successful, assign it to
* this virtual channel, and initiate the next descriptor. The
@@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
{
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ LIST_HEAD(head);
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&s3cchan->vc.lock, flags);
@@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
}
/* Dequeue jobs not yet fired as well */
- s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+
+ vchan_get_all_descriptors(&s3cchan->vc, &head);
+
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&s3cchan->vc, &head);
+
+ return 0;
+
unlock:
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
@@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
/* Basic sanity check */
if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
- dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+ dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
pdata->num_phy_channels, MAX_DMA_CHANNELS);
return -EINVAL;
}
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 465256fe8b1f..6d0bec947636 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
kfree(chan->desc);
chan->desc = NULL;
vchan_get_all_descriptors(&chan->vchan, &head);
- vchan_dma_desc_free_list(&chan->vchan, &head);
sf_pdma_disclaim_chan(chan);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
}
static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
@@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan)
chan->desc = NULL;
chan->xfer_err = false;
vchan_get_all_descriptors(&chan->vchan, &head);
- vchan_dma_desc_free_list(&chan->vchan, &head);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
return 0;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e397a50058c8..bbc2bda3b902 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dma_addr_t src, dest;
u32 endpoints;
int nr_periods, offset, plength, i;
+ u8 ram_type, io_mode, linear_mode;
if (!is_slave_direction(dir)) {
dev_err(chan2dev(chan), "Invalid DMA direction\n");
return NULL;
}
- if (vchan->is_dedicated) {
- /*
- * As we are using this just for audio data, we need to use
- * normal DMA. There is nothing stopping us from supporting
- * dedicated DMA here as well, so if a client comes up and
- * requires it, it will be simple to implement it.
- */
- dev_err(chan2dev(chan),
- "Cyclic transfers are only supported on Normal DMA\n");
- return NULL;
- }
-
contract = generate_dma_contract();
if (!contract)
return NULL;
contract->is_cyclic = 1;
- /* Figure out the endpoints and the address we need */
+ if (vchan->is_dedicated) {
+ io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ } else {
+ io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ }
+
if (dir == DMA_MEM_TO_DEV) {
src = buf;
dest = sconfig->dst_addr;
- endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
- SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
} else {
src = sconfig->src_addr;
dest = buf;
- endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
- SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
}
/*
@@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dest = buf + offset;
/* Make the promise */
- promise = generate_ndma_promise(chan, src, dest,
- plength, sconfig, dir);
+ if (vchan->is_dedicated)
+ promise = generate_ddma_promise(chan, src, dest,
+ plength, sconfig);
+ else
+ promise = generate_ndma_promise(chan, src, dest,
+ plength, sconfig, dir);
+
if (!promise) {
/* TODO: should we free everything? */
return NULL;
@@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan)
}
spin_lock_irqsave(&vchan->vc.lock, flags);
- vchan_dma_desc_free_list(&vchan->vc, &head);
/* Clear these so the vchan is usable again */
vchan->processing = NULL;
vchan->pchan = NULL;
spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index d507c24fbf31..f76e06651f80 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -34,5 +34,29 @@ config DMA_OMAP
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
+config TI_K3_UDMA
+ bool "Texas Instruments UDMA support"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_PROTOCOL
+ depends on TI_SCI_INTA_IRQCHIP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select TI_K3_RINGACC
+ select TI_K3_PSIL
+ help
+ Enable support for the TI UDMA (Unified DMA) controller. This
+ DMA engine is used in AM65x and j721e.
+
+config TI_K3_UDMA_GLUE_LAYER
+ bool "Texas Instruments UDMA Glue layer for non DMAengine users"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_K3_UDMA
+ help
+ Say y here to support the K3 NAVSS DMA glue interface
+ If unsure, say N.
+
+config TI_K3_PSIL
+ bool
+
config TI_DMA_CROSSBAR
bool
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 113e59ec9c32..9a29a107e374 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -2,4 +2,7 @@
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
+obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
+obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 756a3c951dc7..03a7f647f7b2 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev)
if (!info)
return -ENODEV;
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- return ret;
- }
-
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ecc);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
/* Get eDMA3 configuration from IP */
ret = edma_setup_from_hw(dev, info, ecc);
if (ret)
- return ret;
+ goto err_disable_pm;
/* Allocate memory based on the information we got from the IP */
ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
sizeof(*ecc->slave_chans), GFP_KERNEL);
- if (!ecc->slave_chans)
- return -ENOMEM;
ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->slot_inuse)
- return -ENOMEM;
ecc->channels_mask = devm_kcalloc(dev,
BITS_TO_LONGS(ecc->num_channels),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->channels_mask)
- return -ENOMEM;
+ if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
/* Mark all channels available initially */
bitmap_fill(ecc->channels_mask, ecc->num_channels);
@@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev)
ecc);
if (ret) {
dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
- return ret;
+ goto err_disable_pm;
}
ecc->ccint = irq;
}
@@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev)
ecc);
if (ret) {
dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
- return ret;
+ goto err_disable_pm;
}
ecc->ccerrint = irq;
}
@@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev)
ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
if (ecc->dummy_slot < 0) {
dev_err(dev, "Can't allocate PaRAM dummy slot\n");
- return ecc->dummy_slot;
+ ret = ecc->dummy_slot;
+ goto err_disable_pm;
}
queue_priority_mapping = info->queue_priority_mapping;
@@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev)
err_reg1:
edma_free_slot(ecc, ecc->dummy_slot);
+err_disable_pm:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev)
if (ecc->dma_memcpy)
dma_async_device_unregister(ecc->dma_memcpy);
edma_free_slot(ecc, ecc->dummy_slot);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return 0;
}
diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c
new file mode 100644
index 000000000000..a896a15908cf
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am654.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am654_src_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0x4000, 0),
+ PSIL_SA2UL(0x4001, 0),
+ PSIL_SA2UL(0x4002, 0),
+ PSIL_SA2UL(0x4003, 0),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0x4100),
+ PSIL_ETHERNET(0x4101),
+ PSIL_ETHERNET(0x4102),
+ PSIL_ETHERNET(0x4103),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0x4200),
+ PSIL_ETHERNET(0x4201),
+ PSIL_ETHERNET(0x4202),
+ PSIL_ETHERNET(0x4203),
+ /* PRU_ICSSG2 */
+ PSIL_ETHERNET(0x4300),
+ PSIL_ETHERNET(0x4301),
+ PSIL_ETHERNET(0x4302),
+ PSIL_ETHERNET(0x4303),
+ /* PDMA0 - McASPs */
+ PSIL_PDMA_XY_TR(0x4400),
+ PSIL_PDMA_XY_TR(0x4401),
+ PSIL_PDMA_XY_TR(0x4402),
+ /* PDMA1 - SPI0-4 */
+ PSIL_PDMA_XY_PKT(0x4500),
+ PSIL_PDMA_XY_PKT(0x4501),
+ PSIL_PDMA_XY_PKT(0x4502),
+ PSIL_PDMA_XY_PKT(0x4503),
+ PSIL_PDMA_XY_PKT(0x4504),
+ PSIL_PDMA_XY_PKT(0x4505),
+ PSIL_PDMA_XY_PKT(0x4506),
+ PSIL_PDMA_XY_PKT(0x4507),
+ PSIL_PDMA_XY_PKT(0x4508),
+ PSIL_PDMA_XY_PKT(0x4509),
+ PSIL_PDMA_XY_PKT(0x450a),
+ PSIL_PDMA_XY_PKT(0x450b),
+ PSIL_PDMA_XY_PKT(0x450c),
+ PSIL_PDMA_XY_PKT(0x450d),
+ PSIL_PDMA_XY_PKT(0x450e),
+ PSIL_PDMA_XY_PKT(0x450f),
+ PSIL_PDMA_XY_PKT(0x4510),
+ PSIL_PDMA_XY_PKT(0x4511),
+ PSIL_PDMA_XY_PKT(0x4512),
+ PSIL_PDMA_XY_PKT(0x4513),
+ /* PDMA1 - USART0-2 */
+ PSIL_PDMA_XY_PKT(0x4514),
+ PSIL_PDMA_XY_PKT(0x4515),
+ PSIL_PDMA_XY_PKT(0x4516),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 - ADCs */
+ PSIL_PDMA_XY_TR(0x7100),
+ PSIL_PDMA_XY_TR(0x7101),
+ PSIL_PDMA_XY_TR(0x7102),
+ PSIL_PDMA_XY_TR(0x7103),
+ /* MCU_PDMA1 - MCU_SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ PSIL_PDMA_XY_PKT(0x7208),
+ PSIL_PDMA_XY_PKT(0x7209),
+ PSIL_PDMA_XY_PKT(0x720a),
+ PSIL_PDMA_XY_PKT(0x720b),
+ /* MCU_PDMA1 - MCU_USART0 */
+ PSIL_PDMA_XY_PKT(0x7212),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am654_dst_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0xc000, 1),
+ PSIL_SA2UL(0xc001, 1),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0xc100),
+ PSIL_ETHERNET(0xc101),
+ PSIL_ETHERNET(0xc102),
+ PSIL_ETHERNET(0xc103),
+ PSIL_ETHERNET(0xc104),
+ PSIL_ETHERNET(0xc105),
+ PSIL_ETHERNET(0xc106),
+ PSIL_ETHERNET(0xc107),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0xc200),
+ PSIL_ETHERNET(0xc201),
+ PSIL_ETHERNET(0xc202),
+ PSIL_ETHERNET(0xc203),
+ PSIL_ETHERNET(0xc204),
+ PSIL_ETHERNET(0xc205),
+ PSIL_ETHERNET(0xc206),
+ PSIL_ETHERNET(0xc207),
+ /* PRU_ICSSG2 */
+ PSIL_ETHERNET(0xc300),
+ PSIL_ETHERNET(0xc301),
+ PSIL_ETHERNET(0xc302),
+ PSIL_ETHERNET(0xc303),
+ PSIL_ETHERNET(0xc304),
+ PSIL_ETHERNET(0xc305),
+ PSIL_ETHERNET(0xc306),
+ PSIL_ETHERNET(0xc307),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+};
+
+struct psil_ep_map am654_ep_map = {
+ .name = "am654",
+ .src = am654_src_ep_map,
+ .src_count = ARRAY_SIZE(am654_src_ep_map),
+ .dst = am654_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am654_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
new file mode 100644
index 000000000000..e3cfd5f66842
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721e_src_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0x4000, 0),
+ PSIL_SA2UL(0x4001, 0),
+ PSIL_SA2UL(0x4002, 0),
+ PSIL_SA2UL(0x4003, 0),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0x4100),
+ PSIL_ETHERNET(0x4101),
+ PSIL_ETHERNET(0x4102),
+ PSIL_ETHERNET(0x4103),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0x4200),
+ PSIL_ETHERNET(0x4201),
+ PSIL_ETHERNET(0x4202),
+ PSIL_ETHERNET(0x4203),
+ /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ PSIL_PDMA_MCASP(0x4503),
+ PSIL_PDMA_MCASP(0x4504),
+ PSIL_PDMA_MCASP(0x4505),
+ PSIL_PDMA_MCASP(0x4506),
+ PSIL_PDMA_MCASP(0x4507),
+ PSIL_PDMA_MCASP(0x4508),
+ /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* PDMA11 (PDMA_MISC_G3) */
+ PSIL_PDMA_XY_PKT(0x4624),
+ PSIL_PDMA_XY_PKT(0x4625),
+ PSIL_PDMA_XY_PKT(0x4626),
+ PSIL_PDMA_XY_PKT(0x4627),
+ PSIL_PDMA_XY_PKT(0x4628),
+ PSIL_PDMA_XY_PKT(0x4629),
+ PSIL_PDMA_XY_PKT(0x4630),
+ PSIL_PDMA_XY_PKT(0x463a),
+ /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CPSW9 */
+ PSIL_ETHERNET(0x4a00),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721e_dst_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0xc000, 1),
+ PSIL_SA2UL(0xc001, 1),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0xc100),
+ PSIL_ETHERNET(0xc101),
+ PSIL_ETHERNET(0xc102),
+ PSIL_ETHERNET(0xc103),
+ PSIL_ETHERNET(0xc104),
+ PSIL_ETHERNET(0xc105),
+ PSIL_ETHERNET(0xc106),
+ PSIL_ETHERNET(0xc107),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0xc200),
+ PSIL_ETHERNET(0xc201),
+ PSIL_ETHERNET(0xc202),
+ PSIL_ETHERNET(0xc203),
+ PSIL_ETHERNET(0xc204),
+ PSIL_ETHERNET(0xc205),
+ PSIL_ETHERNET(0xc206),
+ PSIL_ETHERNET(0xc207),
+ /* CPSW9 */
+ PSIL_ETHERNET(0xca00),
+ PSIL_ETHERNET(0xca01),
+ PSIL_ETHERNET(0xca02),
+ PSIL_ETHERNET(0xca03),
+ PSIL_ETHERNET(0xca04),
+ PSIL_ETHERNET(0xca05),
+ PSIL_ETHERNET(0xca06),
+ PSIL_ETHERNET(0xca07),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+};
+
+struct psil_ep_map j721e_ep_map = {
+ .name = "j721e",
+ .src = j721e_src_ep_map,
+ .src_count = ARRAY_SIZE(j721e_src_ep_map),
+ .dst = j721e_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j721e_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
new file mode 100644
index 000000000000..a1f389ca371e
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_PRIV_H_
+#define K3_PSIL_PRIV_H_
+
+#include <linux/dma/k3-psil.h>
+
+struct psil_ep {
+ u32 thread_id;
+ struct psil_endpoint_config ep_config;
+};
+
+/**
+ * struct psil_ep_map - PSI-L thread ID configuration maps
+ * @name: Name of the map, set it to the name of the SoC
+ * @src: Array of source PSI-L thread configurations
+ * @src_count: Number of entries in the src array
+ * @dst: Array of destination PSI-L thread configurations
+ * @dst_count: Number of entries in the dst array
+ *
+ * In case of symmetric configuration for a matching src/dst thread (for example
+ * 0x4400 and 0xc400) only the src configuration can be present. If no dst
+ * configuration found the code will look for (dst_thread_id & ~0x8000) to find
+ * the symmetric match.
+ */
+struct psil_ep_map {
+ char *name;
+ struct psil_ep *src;
+ int src_count;
+ struct psil_ep *dst;
+ int dst_count;
+};
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
+
+/* SoC PSI-L endpoint maps */
+extern struct psil_ep_map am654_ep_map;
+extern struct psil_ep_map j721e_ep_map;
+
+#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
new file mode 100644
index 000000000000..d7b965049ccb
--- /dev/null
+++ b/drivers/dma/ti/k3-psil.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include "k3-psil-priv.h"
+
+static DEFINE_MUTEX(ep_map_mutex);
+static struct psil_ep_map *soc_ep_map;
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
+{
+ int i;
+
+ mutex_lock(&ep_map_mutex);
+ if (!soc_ep_map) {
+ if (of_machine_is_compatible("ti,am654")) {
+ soc_ep_map = &am654_ep_map;
+ } else if (of_machine_is_compatible("ti,j721e")) {
+ soc_ep_map = &j721e_ep_map;
+ } else {
+ pr_err("PSIL: No compatible machine found for map\n");
+ return ERR_PTR(-ENOTSUPP);
+ }
+ pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
+ }
+ mutex_unlock(&ep_map_mutex);
+
+ if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) {
+ /* check in destination thread map */
+ for (i = 0; i < soc_ep_map->dst_count; i++) {
+ if (soc_ep_map->dst[i].thread_id == thread_id)
+ return &soc_ep_map->dst[i].ep_config;
+ }
+ }
+
+ thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET;
+ if (soc_ep_map->src) {
+ for (i = 0; i < soc_ep_map->src_count; i++) {
+ if (soc_ep_map->src[i].thread_id == thread_id)
+ return &soc_ep_map->src[i].ep_config;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(psil_get_ep_config);
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+ struct psil_endpoint_config *ep_config)
+{
+ struct psil_endpoint_config *dst_ep_config;
+ struct of_phandle_args dma_spec;
+ u32 thread_id;
+ int index;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ index = of_property_match_string(dev->of_node, "dma-names", name);
+ if (index < 0)
+ return index;
+
+ if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells",
+ index, &dma_spec))
+ return -ENOENT;
+
+ thread_id = dma_spec.args[0];
+
+ dst_ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(dst_ep_config)) {
+ pr_err("PSIL: thread ID 0x%04x not defined in map\n",
+ thread_id);
+ of_node_put(dma_spec.np);
+ return PTR_ERR(dst_ep_config);
+ }
+
+ memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config));
+
+ of_node_put(dma_spec.np);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
new file mode 100644
index 000000000000..c1511298ece2
--- /dev/null
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 NAVSS DMA glue interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct k3_udma_glue_common {
+ struct device *dev;
+ struct udma_dev *udmax;
+ const struct udma_tisci_rm *tisci_rm;
+ struct k3_ringacc *ringacc;
+ u32 src_thread;
+ u32 dst_thread;
+
+ u32 hdesc_size;
+ bool epib;
+ u32 psdata_size;
+ u32 swdata_size;
+};
+
+struct k3_udma_glue_tx_channel {
+ struct k3_udma_glue_common common;
+
+ struct udma_tchan *udma_tchanx;
+ int udma_tchan_id;
+
+ struct k3_ring *ringtx;
+ struct k3_ring *ringtxcq;
+
+ bool psil_paired;
+
+ int virq;
+
+ atomic_t free_pkts;
+ bool tx_pause_on_err;
+ bool tx_filt_einfo;
+ bool tx_filt_pswords;
+ bool tx_supr_tdpkt;
+};
+
+struct k3_udma_glue_rx_flow {
+ struct udma_rflow *udma_rflow;
+ int udma_rflow_id;
+ struct k3_ring *ringrx;
+ struct k3_ring *ringrxfdq;
+
+ int virq;
+};
+
+struct k3_udma_glue_rx_channel {
+ struct k3_udma_glue_common common;
+
+ struct udma_rchan *udma_rchanx;
+ int udma_rchan_id;
+ bool remote;
+
+ bool psil_paired;
+
+ u32 swdata_size;
+ int flow_id_base;
+
+ struct k3_udma_glue_rx_flow *flows;
+ u32 flow_num;
+ u32 flows_ready;
+};
+
+#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
+
+static int of_k3_udma_glue_parse(struct device_node *udmax_np,
+ struct k3_udma_glue_common *common)
+{
+ common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
+ "ti,ringacc");
+ if (IS_ERR(common->ringacc))
+ return PTR_ERR(common->ringacc);
+
+ common->udmax = of_xudma_dev_get(udmax_np, NULL);
+ if (IS_ERR(common->udmax))
+ return PTR_ERR(common->udmax);
+
+ common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
+
+ return 0;
+}
+
+static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
+ const char *name, struct k3_udma_glue_common *common,
+ bool tx_chn)
+{
+ struct psil_endpoint_config *ep_config;
+ struct of_phandle_args dma_spec;
+ u32 thread_id;
+ int ret = 0;
+ int index;
+
+ if (unlikely(!name))
+ return -EINVAL;
+
+ index = of_property_match_string(chn_np, "dma-names", name);
+ if (index < 0)
+ return index;
+
+ if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
+ &dma_spec))
+ return -ENOENT;
+
+ thread_id = dma_spec.args[0];
+
+ if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ /* get psil endpoint config */
+ ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(ep_config)) {
+ dev_err(common->dev,
+ "No configuration for psi-l thread 0x%04x\n",
+ thread_id);
+ ret = PTR_ERR(ep_config);
+ goto out_put_spec;
+ }
+
+ common->epib = ep_config->needs_epib;
+ common->psdata_size = ep_config->psd_size;
+
+ if (tx_chn)
+ common->dst_thread = thread_id;
+ else
+ common->src_thread = thread_id;
+
+ ret = of_k3_udma_glue_parse(dma_spec.np, common);
+
+out_put_spec:
+ of_node_put(dma_spec.np);
+ return ret;
+};
+
+static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ struct device *dev = tx_chn->common.dev;
+
+ dev_dbg(dev, "dump_tx_chn:\n"
+ "udma_tchan_id: %d\n"
+ "src_thread: %08x\n"
+ "dst_thread: %08x\n",
+ tx_chn->udma_tchan_id,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+}
+
+static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
+ char *mark)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "=== dump ===> %s\n", mark);
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ xudma_tchanrt_read(chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG));
+}
+
+static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.index = tx_chn->udma_tchan_id;
+ if (tx_chn->tx_pause_on_err)
+ req.tx_pause_on_err = 1;
+ if (tx_chn->tx_filt_einfo)
+ req.tx_filt_einfo = 1;
+ if (tx_chn->tx_filt_pswords)
+ req.tx_filt_pswords = 1;
+ req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ if (tx_chn->tx_supr_tdpkt)
+ req.tx_supr_tdpkt = 1;
+ req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
+ req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+
+ return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
+}
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+ const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &tx_chn->common, true);
+ if (ret)
+ goto err;
+
+ tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
+ tx_chn->common.psdata_size,
+ tx_chn->common.swdata_size);
+
+ /* request and cfg UDMAP TX channel */
+ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+ if (IS_ERR(tx_chn->udma_tchanx)) {
+ ret = PTR_ERR(tx_chn->udma_tchanx);
+ dev_err(dev, "UDMAX tchanx get err %d\n", ret);
+ goto err;
+ }
+ tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+
+ atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+
+ /* request and cfg rings */
+ tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
+ tx_chn->udma_tchan_id, 0);
+ if (!tx_chn->ringtx) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get TX ring %u\n",
+ tx_chn->udma_tchan_id);
+ goto err;
+ }
+
+ tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
+ -1, 0);
+ if (!tx_chn->ringtxcq) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get TXCQ ring\n");
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+ goto err;
+ }
+
+ /* request and cfg psi-l */
+ tx_chn->common.src_thread =
+ xudma_dev_get_psil_base(tx_chn->common.udmax) +
+ tx_chn->udma_tchan_id;
+
+ ret = k3_udma_glue_cfg_tx_chn(tx_chn);
+ if (ret) {
+ dev_err(dev, "Failed to cfg tchan %d\n", ret);
+ goto err;
+ }
+
+ ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(dev, "PSI-L request err %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->psil_paired = true;
+
+ /* reset TX RT registers */
+ k3_udma_glue_disable_tx_chn(tx_chn);
+
+ k3_udma_glue_dump_tx_chn(tx_chn);
+
+ return tx_chn;
+
+err:
+ k3_udma_glue_release_tx_chn(tx_chn);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ if (tx_chn->psil_paired) {
+ xudma_navss_psil_unpair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ tx_chn->psil_paired = false;
+ }
+
+ if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
+ xudma_tchan_put(tx_chn->common.udmax,
+ tx_chn->udma_tchanx);
+
+ if (tx_chn->ringtxcq)
+ k3_ringacc_ring_free(tx_chn->ringtxcq);
+
+ if (tx_chn->ringtx)
+ k3_ringacc_ring_free(tx_chn->ringtx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
+
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma)
+{
+ u32 ringtxcq_id;
+
+ if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
+ return -ENOMEM;
+
+ ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+ cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
+
+ return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
+
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma)
+{
+ int ret;
+
+ ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
+ if (!ret)
+ atomic_inc(&tx_chn->free_pkts);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
+
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ u32 txrt_ctl;
+
+ txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
+ xudma_tchanrt_write(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ txrt_ctl);
+
+ txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_CTL_REG);
+ txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+ txrt_ctl);
+
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
+
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0);
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
+
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ bool sync)
+{
+ int i = 0;
+ u32 val;
+
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
+
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG);
+
+ while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_CTL_REG);
+ udelay(1);
+ if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+ dev_err(tx_chn->common.dev, "TX tdown timeout\n");
+ break;
+ }
+ i++;
+ }
+
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG);
+ if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+ dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
+
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
+{
+ dma_addr_t desc_dma;
+ int occ_tx, i, ret;
+
+ /* reset TXCQ as it is not input for udma - expected to be empty */
+ if (tx_chn->ringtxcq)
+ k3_ringacc_ring_reset(tx_chn->ringtxcq);
+
+ /*
+ * TXQ reset need to be special way as it is input for udma and its
+ * state cached by udma, so:
+ * 1) save TXQ occ
+ * 2) clean up TXQ and call callback .cleanup() for each desc
+ * 3) reset TXQ in a special way
+ */
+ occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
+ dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+
+ for (i = 0; i < occ_tx; i++) {
+ ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
+ if (ret) {
+ dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+ break;
+ }
+ cleanup(data, desc_dma);
+ }
+
+ k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
+
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return tx_chn->common.hdesc_size;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
+
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
+
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+
+ return tx_chn->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+
+static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
+
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.index = rx_chn->udma_rchan_id;
+ req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
+ /*
+ * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
+ * and udmax impl, so just configure it to invalid value.
+ * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
+ */
+ req.rxcq_qnum = 0xFFFF;
+ if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+ /* Default flow + extra ones */
+ req.flowid_start = rx_chn->flow_id_base;
+ req.flowid_cnt = rx_chn->flow_num;
+ }
+ req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
+ if (ret)
+ dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
+ rx_chn->udma_rchan_id, ret);
+
+ return ret;
+}
+
+static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ if (IS_ERR_OR_NULL(flow->udma_rflow))
+ return;
+
+ if (flow->ringrxfdq)
+ k3_ringacc_ring_free(flow->ringrxfdq);
+
+ if (flow->ringrx)
+ k3_ringacc_ring_free(flow->ringrx);
+
+ xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+ flow->udma_rflow = NULL;
+ rx_chn->flows_ready--;
+}
+
+static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx,
+ struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int rx_ring_id;
+ int rx_ringfdq_id;
+ int ret = 0;
+
+ flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
+ flow->udma_rflow_id);
+ if (IS_ERR(flow->udma_rflow)) {
+ ret = PTR_ERR(flow->udma_rflow);
+ dev_err(dev, "UDMAX rflow get err %d\n", ret);
+ goto err;
+ }
+
+ if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
+ xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+ return -ENODEV;
+ }
+
+ /* request and cfg rings */
+ flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
+ flow_cfg->ring_rxq_id, 0);
+ if (!flow->ringrx) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get RX ring\n");
+ goto err;
+ }
+
+ flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
+ flow_cfg->ring_rxfdq0_id, 0);
+ if (!flow->ringrxfdq) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get RXFDQ ring\n");
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringrx %d\n", ret);
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
+ goto err;
+ }
+
+ if (rx_chn->remote) {
+ rx_ring_id = TI_SCI_RESOURCE_NULL;
+ rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
+ } else {
+ rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+ rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ if (rx_chn->common.epib)
+ req.rx_einfo_present = 1;
+ if (rx_chn->common.psdata_size)
+ req.rx_psinfo_present = 1;
+ if (flow_cfg->rx_error_handling)
+ req.rx_error_handling = 1;
+ req.rx_desc_type = 0;
+ req.rx_dest_qnum = rx_ring_id;
+ req.rx_src_tag_hi_sel = 0;
+ req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
+ req.rx_dest_tag_hi_sel = 0;
+ req.rx_dest_tag_lo_sel = 0;
+ req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+ req.rx_fdq1_qnum = rx_ringfdq_id;
+ req.rx_fdq2_qnum = rx_ringfdq_id;
+ req.rx_fdq3_qnum = rx_ringfdq_id;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
+ ret);
+ goto err;
+ }
+
+ rx_chn->flows_ready++;
+ dev_dbg(dev, "flow%d config done. ready:%d\n",
+ flow->udma_rflow_id, rx_chn->flows_ready);
+
+ return 0;
+err:
+ k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
+ return ret;
+}
+
+static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "dump_rx_chn:\n"
+ "udma_rchan_id: %d\n"
+ "src_thread: %08x\n"
+ "dst_thread: %08x\n"
+ "epib: %d\n"
+ "hdesc_size: %u\n"
+ "psdata_size: %u\n"
+ "swdata_size: %u\n"
+ "flow_id_base: %d\n"
+ "flow_num: %d\n",
+ chn->udma_rchan_id,
+ chn->common.src_thread,
+ chn->common.dst_thread,
+ chn->common.epib,
+ chn->common.hdesc_size,
+ chn->common.psdata_size,
+ chn->common.swdata_size,
+ chn->flow_id_base,
+ chn->flow_num);
+}
+
+static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
+ char *mark)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "=== dump ===> %s\n", mark);
+
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ xudma_rchanrt_read(chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG));
+}
+
+static int
+k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ int ret;
+
+ /* default rflow */
+ if (cfg->flow_id_use_rxchan_id)
+ return 0;
+
+ /* not a GP rflows */
+ if (rx_chn->flow_id_base != -1 &&
+ !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+ return 0;
+
+ /* Allocate range of GP rflows */
+ ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
+ rx_chn->flow_id_base,
+ rx_chn->flow_num);
+ if (ret < 0) {
+ dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
+ rx_chn->flow_id_base, rx_chn->flow_num, ret);
+ return ret;
+ }
+ rx_chn->flow_id_base = ret;
+
+ return 0;
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret, i;
+
+ if (cfg->flow_id_num <= 0)
+ return ERR_PTR(-EINVAL);
+
+ if (cfg->flow_id_num != 1 &&
+ (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
+ return ERR_PTR(-EINVAL);
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = false;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &rx_chn->common, false);
+ if (ret)
+ goto err;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ /* request and cfg UDMAP RX channel */
+ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+ if (IS_ERR(rx_chn->udma_rchanx)) {
+ ret = PTR_ERR(rx_chn->udma_rchanx);
+ dev_err(dev, "UDMAX rchanx get err %d\n", ret);
+ goto err;
+ }
+ rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
+
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+
+ /* Use RX channel id as flow id: target dev can't generate flow_id */
+ if (cfg->flow_id_use_rxchan_id)
+ rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ /* request and cfg psi-l */
+ rx_chn->common.dst_thread =
+ xudma_dev_get_psil_base(rx_chn->common.udmax) +
+ rx_chn->udma_rchan_id;
+
+ ret = k3_udma_glue_cfg_rx_chn(rx_chn);
+ if (ret) {
+ dev_err(dev, "Failed to cfg rchan %d\n", ret);
+ goto err;
+ }
+
+ /* init default RX flow only if flow_num = 1 */
+ if (cfg->def_flow_cfg) {
+ ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
+ if (ret)
+ goto err;
+ }
+
+ ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(dev, "PSI-L request err %d\n", ret);
+ goto err;
+ }
+
+ rx_chn->psil_paired = true;
+
+ /* reset RX RT registers */
+ k3_udma_glue_disable_rx_chn(rx_chn);
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return rx_chn;
+
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret, i;
+
+ if (cfg->flow_id_num <= 0 ||
+ cfg->flow_id_use_rxchan_id ||
+ cfg->def_flow_cfg ||
+ cfg->flow_id_base < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Remote RX channel is under control of Remote CPU core, so
+ * Linux can only request and manipulate by dedicated RX flows
+ */
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = true;
+ rx_chn->udma_rchan_id = -1;
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->psil_paired = false;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &rx_chn->common, false);
+ if (ret)
+ goto err;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return rx_chn;
+
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
+
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ if (cfg->remote)
+ return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
+ else
+ return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(rx_chn->common.udmax))
+ return;
+
+ if (rx_chn->psil_paired) {
+ xudma_navss_psil_unpair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ rx_chn->psil_paired = false;
+ }
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ k3_udma_glue_release_rx_flow(rx_chn, i);
+
+ if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+ xudma_free_gp_rflow_range(rx_chn->common.udmax,
+ rx_chn->flow_id_base,
+ rx_chn->flow_num);
+
+ if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
+ xudma_rchan_put(rx_chn->common.udmax,
+ rx_chn->udma_rchanx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
+
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx,
+ struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+ if (flow_idx >= rx_chn->flow_num)
+ return -EINVAL;
+
+ return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
+
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow;
+
+ if (flow_idx >= rx_chn->flow_num)
+ return -EINVAL;
+
+ flow = &rx_chn->flows[flow_idx];
+
+ return k3_ringacc_get_ring_id(flow->ringrxfdq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
+
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ return rx_chn->flow_id_base;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
+
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int rx_ring_id;
+ int rx_ringfdq_id;
+ int ret = 0;
+
+ if (!rx_chn->remote)
+ return -EINVAL;
+
+ rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+ rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ req.rx_dest_qnum = rx_ring_id;
+ req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+ req.rx_fdq1_qnum = rx_ringfdq_id;
+ req.rx_fdq2_qnum = rx_ringfdq_id;
+ req.rx_fdq3_qnum = rx_ringfdq_id;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
+ ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
+
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int ret = 0;
+
+ if (!rx_chn->remote)
+ return -EINVAL;
+
+ memset(&req, 0, sizeof(req));
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
+ ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
+
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ u32 rxrt_ctl;
+
+ if (rx_chn->remote)
+ return -EINVAL;
+
+ if (rx_chn->flows_ready < rx_chn->flow_num)
+ return -EINVAL;
+
+ rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_CTL_REG);
+ rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG,
+ rxrt_ctl);
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
+
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ 0);
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0);
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
+
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ bool sync)
+{
+ int i = 0;
+ u32 val;
+
+ if (rx_chn->remote)
+ return;
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
+
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG);
+
+ while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_CTL_REG);
+ udelay(1);
+ if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+ dev_err(rx_chn->common.dev, "RX tdown timeout\n");
+ break;
+ }
+ i++;
+ }
+
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG);
+ if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+ dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
+
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+ struct device *dev = rx_chn->common.dev;
+ dma_addr_t desc_dma;
+ int occ_rx, i, ret;
+
+ /* reset RXCQ as it is not input for udma - expected to be empty */
+ occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
+ dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
+ if (flow->ringrx)
+ k3_ringacc_ring_reset(flow->ringrx);
+
+ /* Skip RX FDQ in case one FDQ is used for the set of flows */
+ if (skip_fdq)
+ return;
+
+ /*
+ * RX FDQ reset need to be special way as it is input for udma and its
+ * state cached by udma, so:
+ * 1) save RX FDQ occ
+ * 2) clean up RX FDQ and call callback .cleanup() for each desc
+ * 3) reset RX FDQ in a special way
+ */
+ occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
+ dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
+
+ for (i = 0; i < occ_rx; i++) {
+ ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
+ if (ret) {
+ dev_err(dev, "RX reset pop %d\n", ret);
+ break;
+ }
+ cleanup(data, desc_dma);
+ }
+
+ k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
+
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, struct cppi5_host_desc_t *desc_rx,
+ dma_addr_t desc_dma)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
+
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
+
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num)
+{
+ struct k3_udma_glue_rx_flow *flow;
+
+ flow = &rx_chn->flows[flow_num];
+
+ flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+
+ return flow->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
new file mode 100644
index 000000000000..0b8f3dd6b146
--- /dev/null
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ return navss_psil_pair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_pair);
+
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ return navss_psil_unpair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_unpair);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
+{
+ struct device_node *udma_node = np;
+ struct platform_device *pdev;
+ struct udma_dev *ud;
+
+ if (property) {
+ udma_node = of_parse_phandle(np, property, 0);
+ if (!udma_node) {
+ pr_err("UDMA node is not found\n");
+ return ERR_PTR(-ENODEV);
+ }
+ }
+
+ pdev = of_find_device_by_node(udma_node);
+ if (!pdev) {
+ pr_debug("UDMA device not found\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (np != udma_node)
+ of_node_put(udma_node);
+
+ ud = platform_get_drvdata(pdev);
+ if (!ud) {
+ pr_debug("UDMA has not been probed\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return ud;
+}
+EXPORT_SYMBOL(of_xudma_dev_get);
+
+u32 xudma_dev_get_psil_base(struct udma_dev *ud)
+{
+ return ud->psil_base;
+}
+EXPORT_SYMBOL(xudma_dev_get_psil_base);
+
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud)
+{
+ return &ud->tisci_rm;
+}
+EXPORT_SYMBOL(xudma_dev_get_tisci_rm);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ return __udma_alloc_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_alloc_gp_rflow_range);
+
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ return __udma_free_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_free_gp_rflow_range);
+
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
+{
+ return !test_bit(id, ud->rflow_gp_map);
+}
+EXPORT_SYMBOL(xudma_rflow_is_gp);
+
+#define XUDMA_GET_PUT_RESOURCE(res) \
+struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \
+{ \
+ return __udma_reserve_##res(ud, false, id); \
+} \
+EXPORT_SYMBOL(xudma_##res##_get); \
+ \
+void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \
+{ \
+ clear_bit(p->id, ud->res##_map); \
+} \
+EXPORT_SYMBOL(xudma_##res##_put)
+XUDMA_GET_PUT_RESOURCE(tchan);
+XUDMA_GET_PUT_RESOURCE(rchan);
+
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id)
+{
+ return __udma_get_rflow(ud, id);
+}
+EXPORT_SYMBOL(xudma_rflow_get);
+
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
+{
+ __udma_put_rflow(ud, p);
+}
+EXPORT_SYMBOL(xudma_rflow_put);
+
+#define XUDMA_GET_RESOURCE_ID(res) \
+int xudma_##res##_get_id(struct udma_##res *p) \
+{ \
+ return p->id; \
+} \
+EXPORT_SYMBOL(xudma_##res##_get_id)
+XUDMA_GET_RESOURCE_ID(tchan);
+XUDMA_GET_RESOURCE_ID(rchan);
+XUDMA_GET_RESOURCE_ID(rflow);
+
+/* Exported register access functions */
+#define XUDMA_RT_IO_FUNCTIONS(res) \
+u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \
+{ \
+ return udma_##res##rt_read(p, reg); \
+} \
+EXPORT_SYMBOL(xudma_##res##rt_read); \
+ \
+void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
+{ \
+ udma_##res##rt_write(p, reg, val); \
+} \
+EXPORT_SYMBOL(xudma_##res##rt_write)
+XUDMA_RT_IO_FUNCTIONS(tchan);
+XUDMA_RT_IO_FUNCTIONS(rchan);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
new file mode 100644
index 000000000000..ea79c2df28e0
--- /dev/null
+++ b/drivers/dma/ti/k3-udma.c
@@ -0,0 +1,3432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/ti-cppi5.h>
+
+#include "../virt-dma.h"
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct udma_static_tr {
+ u8 elsize; /* RPSTR0 */
+ u16 elcnt; /* RPSTR0 */
+ u16 bstcnt; /* RPSTR1 */
+};
+
+#define K3_UDMA_MAX_RFLOWS 1024
+#define K3_UDMA_DEFAULT_RING_SIZE 16
+
+/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
+#define UDMA_RFLOW_SRCTAG_NONE 0
+#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
+#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
+#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
+
+#define UDMA_RFLOW_DSTTAG_NONE 0
+#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
+#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
+#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
+#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
+
+struct udma_chan;
+
+enum udma_mmr {
+ MMR_GCFG = 0,
+ MMR_RCHANRT,
+ MMR_TCHANRT,
+ MMR_LAST,
+};
+
+static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+
+struct udma_tchan {
+ void __iomem *reg_rt;
+
+ int id;
+ struct k3_ring *t_ring; /* Transmit ring */
+ struct k3_ring *tc_ring; /* Transmit Completion ring */
+};
+
+struct udma_rflow {
+ int id;
+ struct k3_ring *fd_ring; /* Free Descriptor ring */
+ struct k3_ring *r_ring; /* Receive ring */
+};
+
+struct udma_rchan {
+ void __iomem *reg_rt;
+
+ int id;
+};
+
+#define UDMA_FLAG_PDMA_ACC32 BIT(0)
+#define UDMA_FLAG_PDMA_BURST BIT(1)
+
+struct udma_match_data {
+ u32 psil_base;
+ bool enable_memcpy_support;
+ u32 flags;
+ u32 statictr_z_mask;
+ u32 rchan_oes_offset;
+
+ u8 tpl_levels;
+ u32 level_start_idx[];
+};
+
+struct udma_dev {
+ struct dma_device ddev;
+ struct device *dev;
+ void __iomem *mmrs[MMR_LAST];
+ const struct udma_match_data *match_data;
+
+ size_t desc_align; /* alignment to use for descriptors */
+
+ struct udma_tisci_rm tisci_rm;
+
+ struct k3_ringacc *ringacc;
+
+ struct work_struct purge_work;
+ struct list_head desc_to_purge;
+ spinlock_t lock;
+
+ int tchan_cnt;
+ int echan_cnt;
+ int rchan_cnt;
+ int rflow_cnt;
+ unsigned long *tchan_map;
+ unsigned long *rchan_map;
+ unsigned long *rflow_gp_map;
+ unsigned long *rflow_gp_map_allocated;
+ unsigned long *rflow_in_use;
+
+ struct udma_tchan *tchans;
+ struct udma_rchan *rchans;
+ struct udma_rflow *rflows;
+
+ struct udma_chan *channels;
+ u32 psil_base;
+};
+
+struct udma_hwdesc {
+ size_t cppi5_desc_size;
+ void *cppi5_desc_vaddr;
+ dma_addr_t cppi5_desc_paddr;
+
+ /* TR descriptor internal pointers */
+ void *tr_req_base;
+ struct cppi5_tr_resp_t *tr_resp_base;
+};
+
+struct udma_desc {
+ struct virt_dma_desc vd;
+
+ bool terminated;
+
+ enum dma_transfer_direction dir;
+
+ struct udma_static_tr static_tr;
+ u32 residue;
+
+ unsigned int sglen;
+ unsigned int desc_idx; /* Only used for cyclic in packet mode */
+ unsigned int tr_idx;
+
+ u32 metadata_size;
+ void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
+
+ unsigned int hwdesc_count;
+ struct udma_hwdesc hwdesc[0];
+};
+
+enum udma_chan_state {
+ UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
+ UDMA_CHAN_IS_ACTIVE, /* Normal operation */
+ UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
+};
+
+struct udma_tx_drain {
+ struct delayed_work work;
+ unsigned long jiffie;
+ u32 residue;
+};
+
+struct udma_chan_config {
+ bool pkt_mode; /* TR or packet */
+ bool needs_epib; /* EPIB is needed for the communication or not */
+ u32 psd_size; /* size of Protocol Specific Data */
+ u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
+ u32 hdesc_size; /* Size of a packet descriptor in packet mode */
+ bool notdpkt; /* Suppress sending TDC packet */
+ int remote_thread_id;
+ u32 src_thread;
+ u32 dst_thread;
+ enum psil_endpoint_type ep_type;
+ bool enable_acc32;
+ bool enable_burst;
+ enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+
+ enum dma_transfer_direction dir;
+};
+
+struct udma_chan {
+ struct virt_dma_chan vc;
+ struct dma_slave_config cfg;
+ struct udma_dev *ud;
+ struct udma_desc *desc;
+ struct udma_desc *terminated_desc;
+ struct udma_static_tr static_tr;
+ char *name;
+
+ struct udma_tchan *tchan;
+ struct udma_rchan *rchan;
+ struct udma_rflow *rflow;
+
+ bool psil_paired;
+
+ int irq_num_ring;
+ int irq_num_udma;
+
+ bool cyclic;
+ bool paused;
+
+ enum udma_chan_state state;
+ struct completion teardown_completed;
+
+ struct udma_tx_drain tx_drain;
+
+ u32 bcnt; /* number of bytes completed since the start of the channel */
+ u32 in_ring_cnt; /* number of descriptors in flight */
+
+ /* Channel configuration parameters */
+ struct udma_chan_config config;
+
+ /* dmapool for packet mode descriptors */
+ bool use_dma_pool;
+ struct dma_pool *hdesc_pool;
+
+ u32 id;
+};
+
+static inline struct udma_dev *to_udma_dev(struct dma_device *d)
+{
+ return container_of(d, struct udma_dev, ddev);
+}
+
+static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct udma_chan, vc.chan);
+}
+
+static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct udma_desc, vd.tx);
+}
+
+/* Generic register access functions */
+static inline u32 udma_read(void __iomem *base, int reg)
+{
+ return readl(base + reg);
+}
+
+static inline void udma_write(void __iomem *base, int reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static inline void udma_update_bits(void __iomem *base, int reg,
+ u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(base + reg);
+ tmp = orig & ~mask;
+ tmp |= (val & mask);
+
+ if (tmp != orig)
+ writel(tmp, base + reg);
+}
+
+/* TCHANRT */
+static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
+{
+ if (!tchan)
+ return 0;
+ return udma_read(tchan->reg_rt, reg);
+}
+
+static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
+ u32 val)
+{
+ if (!tchan)
+ return;
+ udma_write(tchan->reg_rt, reg, val);
+}
+
+static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
+ u32 mask, u32 val)
+{
+ if (!tchan)
+ return;
+ udma_update_bits(tchan->reg_rt, reg, mask, val);
+}
+
+/* RCHANRT */
+static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
+{
+ if (!rchan)
+ return 0;
+ return udma_read(rchan->reg_rt, reg);
+}
+
+static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
+ u32 val)
+{
+ if (!rchan)
+ return;
+ udma_write(rchan->reg_rt, reg, val);
+}
+
+static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
+ u32 mask, u32 val)
+{
+ if (!rchan)
+ return;
+ udma_update_bits(rchan->reg_rt, reg, mask, val);
+}
+
+static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+ dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+ return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
+ tisci_rm->tisci_navss_dev_id,
+ src_thread, dst_thread);
+}
+
+static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+ u32 dst_thread)
+{
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+ dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+ return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
+ tisci_rm->tisci_navss_dev_id,
+ src_thread, dst_thread);
+}
+
+static void udma_reset_uchan(struct udma_chan *uc)
+{
+ memset(&uc->config, 0, sizeof(uc->config));
+ uc->config.remote_thread_id = -1;
+ uc->state = UDMA_CHAN_IS_IDLE;
+}
+
+static void udma_dump_chan_stdata(struct udma_chan *uc)
+{
+ struct device *dev = uc->ud->dev;
+ u32 offset;
+ int i;
+
+ if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
+ dev_dbg(dev, "TCHAN State data:\n");
+ for (i = 0; i < 32; i++) {
+ offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
+ dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
+ udma_tchanrt_read(uc->tchan, offset));
+ }
+ }
+
+ if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
+ dev_dbg(dev, "RCHAN State data:\n");
+ for (i = 0; i < 32; i++) {
+ offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
+ dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
+ udma_rchanrt_read(uc->rchan, offset));
+ }
+ }
+}
+
+static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
+ int idx)
+{
+ return d->hwdesc[idx].cppi5_desc_paddr;
+}
+
+static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
+{
+ return d->hwdesc[idx].cppi5_desc_vaddr;
+}
+
+static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
+ dma_addr_t paddr)
+{
+ struct udma_desc *d = uc->terminated_desc;
+
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+
+ if (desc_paddr != paddr)
+ d = NULL;
+ }
+
+ if (!d) {
+ d = uc->desc;
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+
+ if (desc_paddr != paddr)
+ d = NULL;
+ }
+ }
+
+ return d;
+}
+
+static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
+{
+ if (uc->use_dma_pool) {
+ int i;
+
+ for (i = 0; i < d->hwdesc_count; i++) {
+ if (!d->hwdesc[i].cppi5_desc_vaddr)
+ continue;
+
+ dma_pool_free(uc->hdesc_pool,
+ d->hwdesc[i].cppi5_desc_vaddr,
+ d->hwdesc[i].cppi5_desc_paddr);
+
+ d->hwdesc[i].cppi5_desc_vaddr = NULL;
+ }
+ } else if (d->hwdesc[0].cppi5_desc_vaddr) {
+ struct udma_dev *ud = uc->ud;
+
+ dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+ d->hwdesc[0].cppi5_desc_vaddr,
+ d->hwdesc[0].cppi5_desc_paddr);
+
+ d->hwdesc[0].cppi5_desc_vaddr = NULL;
+ }
+}
+
+static void udma_purge_desc_work(struct work_struct *work)
+{
+ struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
+ struct virt_dma_desc *vd, *_vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&ud->lock, flags);
+ list_splice_tail_init(&ud->desc_to_purge, &head);
+ spin_unlock_irqrestore(&ud->lock, flags);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+ struct udma_desc *d = to_udma_desc(&vd->tx);
+
+ udma_free_hwdesc(uc, d);
+ list_del(&vd->node);
+ kfree(d);
+ }
+
+ /* If more to purge, schedule the work again */
+ if (!list_empty(&ud->desc_to_purge))
+ schedule_work(&ud->purge_work);
+}
+
+static void udma_desc_free(struct virt_dma_desc *vd)
+{
+ struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
+ struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+ struct udma_desc *d = to_udma_desc(&vd->tx);
+ unsigned long flags;
+
+ if (uc->terminated_desc == d)
+ uc->terminated_desc = NULL;
+
+ if (uc->use_dma_pool) {
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return;
+ }
+
+ spin_lock_irqsave(&ud->lock, flags);
+ list_add_tail(&vd->node, &ud->desc_to_purge);
+ spin_unlock_irqrestore(&ud->lock, flags);
+
+ schedule_work(&ud->purge_work);
+}
+
+static bool udma_is_chan_running(struct udma_chan *uc)
+{
+ u32 trt_ctl = 0;
+ u32 rrt_ctl = 0;
+
+ if (uc->tchan)
+ trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+ if (uc->rchan)
+ rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
+
+ if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
+ return true;
+
+ return false;
+}
+
+static bool udma_is_chan_paused(struct udma_chan *uc)
+{
+ u32 val, pause_mask;
+
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ val = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG);
+ pause_mask = UDMA_PEER_RT_EN_PAUSE;
+ break;
+ case DMA_MEM_TO_DEV:
+ val = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG);
+ pause_mask = UDMA_PEER_RT_EN_PAUSE;
+ break;
+ case DMA_MEM_TO_MEM:
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+ pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
+ break;
+ default:
+ return false;
+ }
+
+ if (val & pause_mask)
+ return true;
+
+ return false;
+}
+
+static void udma_sync_for_device(struct udma_chan *uc, int idx)
+{
+ struct udma_desc *d = uc->desc;
+
+ if (uc->cyclic && uc->config.pkt_mode) {
+ dma_sync_single_for_device(uc->ud->dev,
+ d->hwdesc[idx].cppi5_desc_paddr,
+ d->hwdesc[idx].cppi5_desc_size,
+ DMA_TO_DEVICE);
+ } else {
+ int i;
+
+ for (i = 0; i < d->hwdesc_count; i++) {
+ if (!d->hwdesc[i].cppi5_desc_vaddr)
+ continue;
+
+ dma_sync_single_for_device(uc->ud->dev,
+ d->hwdesc[i].cppi5_desc_paddr,
+ d->hwdesc[i].cppi5_desc_size,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+static int udma_push_to_ring(struct udma_chan *uc, int idx)
+{
+ struct udma_desc *d = uc->desc;
+
+ struct k3_ring *ring = NULL;
+ int ret = -EINVAL;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ ring = uc->rflow->fd_ring;
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ ring = uc->tchan->t_ring;
+ break;
+ default:
+ break;
+ }
+
+ if (ring) {
+ dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
+
+ wmb(); /* Ensure that writes are not moved over this point */
+ udma_sync_for_device(uc, idx);
+ ret = k3_ringacc_ring_push(ring, &desc_addr);
+ uc->in_ring_cnt++;
+ }
+
+ return ret;
+}
+
+static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
+{
+ struct k3_ring *ring = NULL;
+ int ret = -ENOENT;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ ring = uc->rflow->r_ring;
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ ring = uc->tchan->tc_ring;
+ break;
+ default:
+ break;
+ }
+
+ if (ring && k3_ringacc_ring_get_occ(ring)) {
+ struct udma_desc *d = NULL;
+
+ ret = k3_ringacc_ring_pop(ring, addr);
+ if (ret)
+ return ret;
+
+ /* Teardown completion */
+ if (cppi5_desc_is_tdcm(*addr))
+ return ret;
+
+ d = udma_udma_desc_from_paddr(uc, *addr);
+
+ if (d)
+ dma_sync_single_for_cpu(uc->ud->dev, *addr,
+ d->hwdesc[0].cppi5_desc_size,
+ DMA_FROM_DEVICE);
+ rmb(); /* Ensure that reads are not moved before this point */
+
+ if (!ret)
+ uc->in_ring_cnt--;
+ }
+
+ return ret;
+}
+
+static void udma_reset_rings(struct udma_chan *uc)
+{
+ struct k3_ring *ring1 = NULL;
+ struct k3_ring *ring2 = NULL;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ if (uc->rchan) {
+ ring1 = uc->rflow->fd_ring;
+ ring2 = uc->rflow->r_ring;
+ }
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ if (uc->tchan) {
+ ring1 = uc->tchan->t_ring;
+ ring2 = uc->tchan->tc_ring;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (ring1)
+ k3_ringacc_ring_reset_dma(ring1,
+ k3_ringacc_ring_get_occ(ring1));
+ if (ring2)
+ k3_ringacc_ring_reset(ring2);
+
+ /* make sure we are not leaking memory by stalled descriptor */
+ if (uc->terminated_desc) {
+ udma_desc_free(&uc->terminated_desc->vd);
+ uc->terminated_desc = NULL;
+ }
+
+ uc->in_ring_cnt = 0;
+}
+
+static void udma_reset_counters(struct udma_chan *uc)
+{
+ u32 val;
+
+ if (uc->tchan) {
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ if (uc->rchan) {
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ uc->bcnt = 0;
+}
+
+static int udma_reset_chan(struct udma_chan *uc, bool hard)
+{
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Reset all counters */
+ udma_reset_counters(uc);
+
+ /* Hard reset: re-initialize the channel to reset */
+ if (hard) {
+ struct udma_chan_config ucc_backup;
+ int ret;
+
+ memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
+ uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
+
+ /* restore the channel configuration */
+ memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
+ ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting forced teardown after forced reset helps recovering
+ * the rchan.
+ */
+ if (uc->config.dir == DMA_DEV_TO_MEM)
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN |
+ UDMA_CHAN_RT_CTL_FTDOWN);
+ }
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ return 0;
+}
+
+static void udma_start_desc(struct udma_chan *uc)
+{
+ struct udma_chan_config *ucc = &uc->config;
+
+ if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+ int i;
+
+ /* Push all descriptors to ring for packet mode cyclic or RX */
+ for (i = 0; i < uc->desc->sglen; i++)
+ udma_push_to_ring(uc, i);
+ } else {
+ udma_push_to_ring(uc, 0);
+ }
+}
+
+static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
+{
+ /* Only PDMAs have staticTR */
+ if (uc->config.ep_type == PSIL_EP_NATIVE)
+ return false;
+
+ /* Check if the staticTR configuration has changed for TX */
+ if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
+ return true;
+
+ return false;
+}
+
+static int udma_start(struct udma_chan *uc)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
+
+ if (!vd) {
+ uc->desc = NULL;
+ return -ENOENT;
+ }
+
+ list_del(&vd->node);
+
+ uc->desc = to_udma_desc(&vd->tx);
+
+ /* Channel is already running and does not need reconfiguration */
+ if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
+ udma_start_desc(uc);
+ goto out;
+ }
+
+ /* Make sure that we clear the teardown bit, if it is set */
+ udma_reset_chan(uc, false);
+
+ /* Push descriptors before we start the channel */
+ udma_start_desc(uc);
+
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ /* Config remote TR */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+ u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+ PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+ const struct udma_match_data *match_data =
+ uc->ud->match_data;
+
+ if (uc->config.enable_acc32)
+ val |= PDMA_STATIC_TR_XY_ACC32;
+ if (uc->config.enable_burst)
+ val |= PDMA_STATIC_TR_XY_BURST;
+
+ udma_rchanrt_write(uc->rchan,
+ UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+ udma_rchanrt_write(uc->rchan,
+ UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
+ PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
+ match_data->statictr_z_mask));
+
+ /* save the current staticTR configuration */
+ memcpy(&uc->static_tr, &uc->desc->static_tr,
+ sizeof(uc->static_tr));
+ }
+
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ /* Enable remote */
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Config remote TR */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+ u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+ PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+
+ if (uc->config.enable_acc32)
+ val |= PDMA_STATIC_TR_XY_ACC32;
+ if (uc->config.enable_burst)
+ val |= PDMA_STATIC_TR_XY_BURST;
+
+ udma_tchanrt_write(uc->tchan,
+ UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+ /* save the current staticTR configuration */
+ memcpy(&uc->static_tr, &uc->desc->static_tr,
+ sizeof(uc->static_tr));
+ }
+
+ /* Enable remote */
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ uc->state = UDMA_CHAN_IS_ACTIVE;
+out:
+
+ return 0;
+}
+
+static int udma_stop(struct udma_chan *uc)
+{
+ enum udma_chan_state old_state = uc->state;
+
+ uc->state = UDMA_CHAN_IS_TERMINATING;
+ reinit_completion(&uc->teardown_completed);
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE |
+ UDMA_PEER_RT_EN_TEARDOWN);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE |
+ UDMA_PEER_RT_EN_FLUSH);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN);
+ break;
+ default:
+ uc->state = old_state;
+ complete_all(&uc->teardown_completed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
+{
+ struct udma_desc *d = uc->desc;
+ struct cppi5_host_desc_t *h_desc;
+
+ h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
+ cppi5_hdesc_reset_to_original(h_desc);
+ udma_push_to_ring(uc, d->desc_idx);
+ d->desc_idx = (d->desc_idx + 1) % d->sglen;
+}
+
+static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
+{
+ struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ memcpy(d->metadata, h_desc->epib, d->metadata_size);
+}
+
+static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
+{
+ u32 peer_bcnt, bcnt;
+
+ /* Only TX towards PDMA is affected */
+ if (uc->config.ep_type == PSIL_EP_NATIVE ||
+ uc->config.dir != DMA_MEM_TO_DEV)
+ return true;
+
+ peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+ bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+
+ if (peer_bcnt < bcnt) {
+ uc->tx_drain.residue = bcnt - peer_bcnt;
+ uc->tx_drain.jiffie = jiffies;
+ return false;
+ }
+
+ return true;
+}
+
+static void udma_check_tx_completion(struct work_struct *work)
+{
+ struct udma_chan *uc = container_of(work, typeof(*uc),
+ tx_drain.work.work);
+ bool desc_done = true;
+ u32 residue_diff;
+ unsigned long jiffie_diff, delay;
+
+ if (uc->desc) {
+ residue_diff = uc->tx_drain.residue;
+ jiffie_diff = uc->tx_drain.jiffie;
+ desc_done = udma_is_desc_really_done(uc, uc->desc);
+ }
+
+ if (!desc_done) {
+ jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
+ residue_diff -= uc->tx_drain.residue;
+ if (residue_diff) {
+ /* Try to guess when we should check next time */
+ residue_diff /= jiffie_diff;
+ delay = uc->tx_drain.residue / residue_diff / 3;
+ if (jiffies_to_msecs(delay) < 5)
+ delay = 0;
+ } else {
+ /* No progress, check again in 1 second */
+ delay = HZ;
+ }
+
+ schedule_delayed_work(&uc->tx_drain.work, delay);
+ } else if (uc->desc) {
+ struct udma_desc *d = uc->desc;
+
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
+}
+
+static irqreturn_t udma_ring_irq_handler(int irq, void *data)
+{
+ struct udma_chan *uc = data;
+ struct udma_desc *d;
+ unsigned long flags;
+ dma_addr_t paddr = 0;
+
+ if (udma_pop_from_ring(uc, &paddr) || !paddr)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ /* Teardown completion message */
+ if (cppi5_desc_is_tdcm(paddr)) {
+ /* Compensate our internal pop/push counter */
+ uc->in_ring_cnt++;
+
+ complete_all(&uc->teardown_completed);
+
+ if (uc->terminated_desc) {
+ udma_desc_free(&uc->terminated_desc->vd);
+ uc->terminated_desc = NULL;
+ }
+
+ if (!uc->desc)
+ udma_start(uc);
+
+ goto out;
+ }
+
+ d = udma_udma_desc_from_paddr(uc, paddr);
+
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+ if (desc_paddr != paddr) {
+ dev_err(uc->ud->dev, "not matching descriptors!\n");
+ goto out;
+ }
+
+ if (uc->cyclic) {
+ /* push the descriptor back to the ring */
+ if (d == uc->desc) {
+ udma_cyclic_packet_elapsed(uc);
+ vchan_cyclic_callback(&d->vd);
+ }
+ } else {
+ bool desc_done = false;
+
+ if (d == uc->desc) {
+ desc_done = udma_is_desc_really_done(uc, d);
+
+ if (desc_done) {
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ } else {
+ schedule_delayed_work(&uc->tx_drain.work,
+ 0);
+ }
+ }
+
+ if (desc_done)
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+out:
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t udma_udma_irq_handler(int irq, void *data)
+{
+ struct udma_chan *uc = data;
+ struct udma_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+ d = uc->desc;
+ if (d) {
+ d->tr_idx = (d->tr_idx + 1) % d->sglen;
+
+ if (uc->cyclic) {
+ vchan_cyclic_callback(&d->vd);
+ } else {
+ /* TODO: figure out the real amount of data */
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
+ * @ud: UDMA device
+ * @from: Start the search from this flow id number
+ * @cnt: Number of consecutive flow ids to allocate
+ *
+ * Allocate range of RX flow ids for future use, those flows can be requested
+ * only using explicit flow id number. if @from is set to -1 it will try to find
+ * first free range. if @from is positive value it will force allocation only
+ * of the specified range of flows.
+ *
+ * Returns -ENOMEM if can't find free range.
+ * -EEXIST if requested range is busy.
+ * -EINVAL if wrong input values passed.
+ * Returns flow id on success.
+ */
+static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ int start, tmp_from;
+ DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
+
+ tmp_from = from;
+ if (tmp_from < 0)
+ tmp_from = ud->rchan_cnt;
+ /* default flows can't be allocated and accessible only by id */
+ if (tmp_from < ud->rchan_cnt)
+ return -EINVAL;
+
+ if (tmp_from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
+ ud->rflow_cnt);
+
+ start = bitmap_find_next_zero_area(tmp,
+ ud->rflow_cnt,
+ tmp_from, cnt, 0);
+ if (start >= ud->rflow_cnt)
+ return -ENOMEM;
+
+ if (from >= 0 && start != from)
+ return -EEXIST;
+
+ bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
+ return start;
+}
+
+static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ if (from < ud->rchan_cnt)
+ return -EINVAL;
+ if (from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
+ return 0;
+}
+
+static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
+{
+ /*
+ * Attempt to request rflow by ID can be made for any rflow
+ * if not in use with assumption that caller knows what's doing.
+ * TI-SCI FW will perform additional permission check ant way, it's
+ * safe
+ */
+
+ if (id < 0 || id >= ud->rflow_cnt)
+ return ERR_PTR(-ENOENT);
+
+ if (test_bit(id, ud->rflow_in_use))
+ return ERR_PTR(-ENOENT);
+
+ /* GP rflow has to be allocated first */
+ if (!test_bit(id, ud->rflow_gp_map) &&
+ !test_bit(id, ud->rflow_gp_map_allocated))
+ return ERR_PTR(-EINVAL);
+
+ dev_dbg(ud->dev, "get rflow%d\n", id);
+ set_bit(id, ud->rflow_in_use);
+ return &ud->rflows[id];
+}
+
+static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
+{
+ if (!test_bit(rflow->id, ud->rflow_in_use)) {
+ dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
+ return;
+ }
+
+ dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
+ clear_bit(rflow->id, ud->rflow_in_use);
+}
+
+#define UDMA_RESERVE_RESOURCE(res) \
+static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
+ enum udma_tp_level tpl, \
+ int id) \
+{ \
+ if (id >= 0) { \
+ if (test_bit(id, ud->res##_map)) { \
+ dev_err(ud->dev, "res##%d is in use\n", id); \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } else { \
+ int start; \
+ \
+ if (tpl >= ud->match_data->tpl_levels) \
+ tpl = ud->match_data->tpl_levels - 1; \
+ \
+ start = ud->match_data->level_start_idx[tpl]; \
+ \
+ id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
+ start); \
+ if (id == ud->res##_cnt) { \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } \
+ \
+ set_bit(id, ud->res##_map); \
+ return &ud->res##s[id]; \
+}
+
+UDMA_RESERVE_RESOURCE(tchan);
+UDMA_RESERVE_RESOURCE(rchan);
+
+static int udma_get_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+ if (IS_ERR(uc->tchan))
+ return PTR_ERR(uc->tchan);
+
+ return 0;
+}
+
+static int udma_get_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return 0;
+ }
+
+ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+ if (IS_ERR(uc->rchan))
+ return PTR_ERR(uc->rchan);
+
+ return 0;
+}
+
+static int udma_get_chan_pair(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ const struct udma_match_data *match_data = ud->match_data;
+ int chan_id, end;
+
+ if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
+ dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ if (uc->tchan) {
+ dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return -EBUSY;
+ } else if (uc->rchan) {
+ dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return -EBUSY;
+ }
+
+ /* Can be optimized, but let's have it like this for now */
+ end = min(ud->tchan_cnt, ud->rchan_cnt);
+ /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
+ chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
+ for (; chan_id < end; chan_id++) {
+ if (!test_bit(chan_id, ud->tchan_map) &&
+ !test_bit(chan_id, ud->rchan_map))
+ break;
+ }
+
+ if (chan_id == end)
+ return -ENOENT;
+
+ set_bit(chan_id, ud->tchan_map);
+ set_bit(chan_id, ud->rchan_map);
+ uc->tchan = &ud->tchans[chan_id];
+ uc->rchan = &ud->rchans[chan_id];
+
+ return 0;
+}
+
+static int udma_get_rflow(struct udma_chan *uc, int flow_id)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (!uc->rchan) {
+ dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
+ return -EINVAL;
+ }
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
+ uc->id, uc->rflow->id);
+ return 0;
+ }
+
+ uc->rflow = __udma_get_rflow(ud, flow_id);
+ if (IS_ERR(uc->rflow))
+ return PTR_ERR(uc->rflow);
+
+ return 0;
+}
+
+static void udma_put_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
+ uc->rchan->id);
+ clear_bit(uc->rchan->id, ud->rchan_map);
+ uc->rchan = NULL;
+ }
+}
+
+static void udma_put_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
+ uc->tchan->id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+ uc->tchan = NULL;
+ }
+}
+
+static void udma_put_rflow(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
+ uc->rflow->id);
+ __udma_put_rflow(ud, uc->rflow);
+ uc->rflow = NULL;
+ }
+}
+
+static void udma_free_tx_resources(struct udma_chan *uc)
+{
+ if (!uc->tchan)
+ return;
+
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->t_ring = NULL;
+ uc->tchan->tc_ring = NULL;
+
+ udma_put_tchan(uc);
+}
+
+static int udma_alloc_tx_resources(struct udma_chan *uc)
+{
+ struct k3_ring_cfg ring_cfg;
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ ret = udma_get_tchan(uc);
+ if (ret)
+ return ret;
+
+ uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
+ uc->tchan->id, 0);
+ if (!uc->tchan->t_ring) {
+ ret = -EBUSY;
+ goto err_tx_ring;
+ }
+
+ uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+ if (!uc->tchan->tc_ring) {
+ ret = -EBUSY;
+ goto err_txc_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+ ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
+ ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->tc_ring = NULL;
+err_txc_ring:
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ uc->tchan->t_ring = NULL;
+err_tx_ring:
+ udma_put_tchan(uc);
+
+ return ret;
+}
+
+static void udma_free_rx_resources(struct udma_chan *uc)
+{
+ if (!uc->rchan)
+ return;
+
+ if (uc->rflow) {
+ struct udma_rflow *rflow = uc->rflow;
+
+ k3_ringacc_ring_free(rflow->fd_ring);
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->fd_ring = NULL;
+ rflow->r_ring = NULL;
+
+ udma_put_rflow(uc);
+ }
+
+ udma_put_rchan(uc);
+}
+
+static int udma_alloc_rx_resources(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct k3_ring_cfg ring_cfg;
+ struct udma_rflow *rflow;
+ int fd_ring_id;
+ int ret;
+
+ ret = udma_get_rchan(uc);
+ if (ret)
+ return ret;
+
+ /* For MEM_TO_MEM we don't need rflow or rings */
+ if (uc->config.dir == DMA_MEM_TO_MEM)
+ return 0;
+
+ ret = udma_get_rflow(uc, uc->rchan->id);
+ if (ret) {
+ ret = -EBUSY;
+ goto err_rflow;
+ }
+
+ rflow = uc->rflow;
+ fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+ rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
+ if (!rflow->fd_ring) {
+ ret = -EBUSY;
+ goto err_rx_ring;
+ }
+
+ rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+ if (!rflow->r_ring) {
+ ret = -EBUSY;
+ goto err_rxc_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+
+ if (uc->config.pkt_mode)
+ ring_cfg.size = SG_MAX_SEGMENTS;
+ else
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+ ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
+
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->r_ring = NULL;
+err_rxc_ring:
+ k3_ringacc_ring_free(rflow->fd_ring);
+ rflow->fd_ring = NULL;
+err_rx_ring:
+ udma_put_rflow(uc);
+err_rflow:
+ udma_put_rchan(uc);
+
+ return ret;
+}
+
+#define TISCI_TCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
+
+#define TISCI_RCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
+
+static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ struct udma_rchan *rchan = uc->rchan;
+ int ret = 0;
+
+ /* Non synchronized - mem to mem type of transfer */
+ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+
+ req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+ req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+ req_tx.txcq_qnum = tc_ring;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret) {
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+ return ret;
+ }
+
+ req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+ req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+ req_rx.rxcq_qnum = tc_ring;
+ req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret)
+ dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
+
+ return ret;
+}
+
+static int udma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ u32 mode, fetch_size;
+ int ret = 0;
+
+ if (uc->config.pkt_mode) {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+ uc->config.psd_size, 0);
+ } else {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+ fetch_size = sizeof(struct cppi5_desc_hdr_t);
+ }
+
+ req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_chan_type = mode;
+ req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+ req_tx.tx_fetch_size = fetch_size >> 2;
+ req_tx.txcq_qnum = tc_ring;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+ return ret;
+}
+
+static int udma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_rchan *rchan = uc->rchan;
+ int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
+ int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+ u32 mode, fetch_size;
+ int ret = 0;
+
+ if (uc->config.pkt_mode) {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+ uc->config.psd_size, 0);
+ } else {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+ fetch_size = sizeof(struct cppi5_desc_hdr_t);
+ }
+
+ req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+ req_rx.rx_fetch_size = fetch_size >> 2;
+ req_rx.rxcq_qnum = rx_ring;
+ req_rx.rx_chan_type = mode;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret) {
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+ return ret;
+ }
+
+ flow_req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+
+ flow_req.nav_id = tisci_rm->tisci_dev_id;
+ flow_req.flow_index = rchan->id;
+
+ if (uc->config.needs_epib)
+ flow_req.rx_einfo_present = 1;
+ else
+ flow_req.rx_einfo_present = 0;
+ if (uc->config.psd_size)
+ flow_req.rx_psinfo_present = 1;
+ else
+ flow_req.rx_psinfo_present = 0;
+ flow_req.rx_error_handling = 1;
+ flow_req.rx_dest_qnum = rx_ring;
+ flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
+ flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
+ flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
+ flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
+ flow_req.rx_fdq0_sz0_qnum = fd_ring;
+ flow_req.rx_fdq1_qnum = fd_ring;
+ flow_req.rx_fdq2_qnum = fd_ring;
+ flow_req.rx_fdq3_qnum = fd_ring;
+
+ ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+ if (ret)
+ dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
+
+ return 0;
+}
+
+static int udma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_match_data *match_data = ud->match_data;
+ struct k3_ring *irq_ring;
+ u32 irq_udma_idx;
+ int ret;
+
+ if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
+ uc->use_dma_pool = true;
+ /* in case of MEM_TO_MEM we have maximum of two TRs */
+ if (uc->config.dir == DMA_MEM_TO_MEM) {
+ uc->config.hdesc_size = cppi5_trdesc_calc_size(
+ sizeof(struct cppi5_tr_type15_t), 2);
+ uc->config.pkt_mode = false;
+ }
+ }
+
+ if (uc->use_dma_pool) {
+ uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+ uc->config.hdesc_size,
+ ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ /* Non synchronized - mem to mem type of transfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_get_chan_pair(uc);
+ if (ret)
+ return ret;
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret)
+ return ret;
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ udma_free_tx_resources(uc);
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->tchan->tc_ring;
+ irq_udma_idx = uc->tchan->id;
+
+ ret = udma_tisci_m2m_channel_config(uc);
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->tchan->tc_ring;
+ irq_udma_idx = uc->tchan->id;
+
+ ret = udma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->rflow->r_ring;
+ irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
+
+ ret = udma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_stop(uc);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ goto err_res_free;
+ }
+ }
+
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+
+ uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ k3_ringacc_get_ring_id(irq_ring));
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ /* Event from UDMA (TR events) only needed for slave TR mode channels */
+ if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
+ uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+ irq_udma_idx);
+ if (uc->irq_num_udma <= 0) {
+ dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
+ irq_udma_idx);
+ free_irq(uc->irq_num_ring, uc);
+ ret = -EINVAL;
+ goto err_irq_free;
+ }
+
+ ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+ uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
+ uc->id);
+ free_irq(uc->irq_num_ring, uc);
+ goto err_irq_free;
+ }
+ } else {
+ uc->irq_num_udma = 0;
+ }
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+ uc->irq_num_udma = 0;
+err_psi_free:
+ navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+
+ return ret;
+}
+
+static int udma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
+
+ return 0;
+}
+
+static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
+ size_t tr_size, int tr_count,
+ enum dma_transfer_direction dir)
+{
+ struct udma_hwdesc *hwdesc;
+ struct cppi5_desc_hdr_t *tr_desc;
+ struct udma_desc *d;
+ u32 reload_count = 0;
+ u32 ring_id;
+
+ switch (tr_size) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
+ return NULL;
+ }
+
+ /* We have only one descriptor containing multiple TRs */
+ d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->sglen = tr_count;
+
+ d->hwdesc_count = 1;
+ hwdesc = &d->hwdesc[0];
+
+ /* Allocate memory for DMA ring descriptor */
+ if (uc->use_dma_pool) {
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ } else {
+ hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
+ tr_count);
+ hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
+ uc->ud->desc_align);
+ hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
+ hwdesc->cppi5_desc_size,
+ &hwdesc->cppi5_desc_paddr,
+ GFP_NOWAIT);
+ }
+
+ if (!hwdesc->cppi5_desc_vaddr) {
+ kfree(d);
+ return NULL;
+ }
+
+ /* Start of the TR req records */
+ hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
+ /* Start address of the TR response array */
+ hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
+
+ tr_desc = hwdesc->cppi5_desc_vaddr;
+
+ if (uc->cyclic)
+ reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
+
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
+ cppi5_desc_set_pktids(tr_desc, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
+
+ return d;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ enum dma_slave_buswidth dev_width;
+ struct scatterlist *sgent;
+ struct udma_desc *d;
+ size_t tr_size;
+ struct cppi5_tr_type1_t *tr_req = NULL;
+ unsigned int i;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for_each_sg(sgl, sgent, sglen, i) {
+ d->residue += sg_dma_len(sgent);
+
+ cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[i].addr = sg_dma_address(sgent);
+ tr_req[i].icnt0 = burst * dev_width;
+ tr_req[i].dim1 = burst * dev_width;
+ tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
+ }
+
+ cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
+
+ return d;
+}
+
+static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
+ enum dma_slave_buswidth dev_width,
+ u16 elcnt)
+{
+ if (uc->config.ep_type != PSIL_EP_PDMA_XY)
+ return 0;
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ d->static_tr.elsize = 0;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ d->static_tr.elsize = 1;
+ break;
+ case DMA_SLAVE_BUSWIDTH_3_BYTES:
+ d->static_tr.elsize = 2;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ d->static_tr.elsize = 3;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ d->static_tr.elsize = 4;
+ break;
+ default: /* not reached */
+ return -EINVAL;
+ }
+
+ d->static_tr.elcnt = elcnt;
+
+ /*
+ * PDMA must to close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA to close
+ * the packet otherwise the transfer will stall because PDMA holds on
+ * the data it has received from the peripheral.
+ */
+ if (uc->config.pkt_mode || !uc->cyclic) {
+ unsigned int div = dev_width * elcnt;
+
+ if (uc->cyclic)
+ d->static_tr.bstcnt = d->residue / d->sglen / div;
+ else
+ d->static_tr.bstcnt = d->residue / div;
+
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+ } else {
+ d->static_tr.bstcnt = 0;
+ }
+
+ return 0;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct scatterlist *sgent;
+ struct cppi5_host_desc_t *h_desc = NULL;
+ struct udma_desc *d;
+ u32 ring_id;
+ unsigned int i;
+
+ d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+ d->hwdesc_count = sglen;
+
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ for_each_sg(sgl, sgent, sglen, i) {
+ struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+ struct cppi5_host_desc_t *desc;
+ size_t sg_len = sg_dma_len(sgent);
+
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ if (!hwdesc->cppi5_desc_vaddr) {
+ dev_err(uc->ud->dev,
+ "descriptor%d allocation failed\n", i);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ d->residue += sg_len;
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ desc = hwdesc->cppi5_desc_vaddr;
+
+ if (i == 0) {
+ cppi5_hdesc_init(desc, 0, 0);
+ /* Flow and Packed ID */
+ cppi5_desc_set_pktids(&desc->hdr, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
+ } else {
+ cppi5_hdesc_reset_hbdesc(desc);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
+ }
+
+ /* attach the sg buffer to the descriptor */
+ cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
+
+ /* Attach link as host buffer descriptor */
+ if (h_desc)
+ cppi5_hdesc_link_hbdesc(h_desc,
+ hwdesc->cppi5_desc_paddr);
+
+ if (dir == DMA_MEM_TO_DEV)
+ h_desc = desc;
+ }
+
+ if (d->residue >= SZ_4M) {
+ dev_err(uc->ud->dev,
+ "%s: Transfer size %u is over the supported 4M range\n",
+ __func__, d->residue);
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+ cppi5_hdesc_set_pktlen(h_desc, d->residue);
+
+ return d;
+}
+
+static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+ u32 psd_size = len;
+ u32 flags = 0;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return -ENOTSUPP;
+
+ if (!data || len > uc->config.metadata_size)
+ return -EINVAL;
+
+ if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+ return -EINVAL;
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+ if (d->dir == DMA_MEM_TO_DEV)
+ memcpy(h_desc->epib, data, len);
+
+ if (uc->config.needs_epib)
+ psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ d->metadata = data;
+ d->metadata_size = len;
+ if (uc->config.needs_epib)
+ flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+
+ cppi5_hdesc_update_flags(h_desc, flags);
+ cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+ return 0;
+}
+
+static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return ERR_PTR(-ENOTSUPP);
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ *max_len = uc->config.metadata_size;
+
+ *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
+ CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
+ *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
+
+ return h_desc->epib;
+}
+
+static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+ u32 psd_size = payload_len;
+ u32 flags = 0;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return -ENOTSUPP;
+
+ if (payload_len > uc->config.metadata_size)
+ return -EINVAL;
+
+ if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+ return -EINVAL;
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ if (uc->config.needs_epib) {
+ psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+ flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+ }
+
+ cppi5_hdesc_update_flags(h_desc, flags);
+ cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+ return 0;
+}
+
+static struct dma_descriptor_metadata_ops metadata_ops = {
+ .attach = udma_attach_metadata,
+ .get_ptr = udma_get_metadata_ptr,
+ .set_len = udma_set_metadata_len,
+};
+
+static struct dma_async_tx_descriptor *
+udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ u32 burst;
+
+ if (dir != uc->config.dir) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(dir));
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (uc->config.pkt_mode)
+ d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
+ context);
+ else
+ d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
+ context);
+
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->desc_idx = 0;
+ d->tr_idx = 0;
+
+ /* static TR for remote PDMA */
+ if (udma_configure_statictr(uc, d, dev_width, burst)) {
+ dev_err(uc->ud->dev,
+ "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+ __func__, d->static_tr.bstcnt);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ size_t tr_size;
+ struct cppi5_tr_type1_t *tr_req;
+ unsigned int i;
+ unsigned int periods = buf_len / period_len;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
+ if (!d)
+ return NULL;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for (i = 0; i < periods; i++) {
+ cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+ tr_req[i].addr = buf_addr + period_len * i;
+ tr_req[i].icnt0 = dev_width;
+ tr_req[i].icnt1 = period_len / dev_width;
+ tr_req[i].dim1 = dev_width;
+
+ if (!(flags & DMA_PREP_INTERRUPT))
+ cppi5_tr_csf_set(&tr_req[i].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ }
+
+ return d;
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct udma_desc *d;
+ u32 ring_id;
+ int i;
+ int periods = buf_len / period_len;
+
+ if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
+ return NULL;
+
+ if (period_len >= SZ_4M)
+ return NULL;
+
+ d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->hwdesc_count = periods;
+
+ /* TODO: re-check this... */
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ for (i = 0; i < periods; i++) {
+ struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+ dma_addr_t period_addr = buf_addr + (period_len * i);
+ struct cppi5_host_desc_t *h_desc;
+
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ if (!hwdesc->cppi5_desc_vaddr) {
+ dev_err(uc->ud->dev,
+ "descriptor%d allocation failed\n", i);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ h_desc = hwdesc->cppi5_desc_vaddr;
+
+ cppi5_hdesc_init(h_desc, 0, 0);
+ cppi5_hdesc_set_pktlen(h_desc, period_len);
+
+ /* Flow and Packed ID */
+ cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
+
+ /* attach each period to a new descriptor */
+ cppi5_hdesc_attach_buf(h_desc,
+ period_addr, period_len,
+ period_addr, period_len);
+ }
+
+ return d;
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ u32 burst;
+
+ if (dir != uc->config.dir) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(dir));
+ return NULL;
+ }
+
+ uc->cyclic = true;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (uc->config.pkt_mode)
+ d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
+ dir, flags);
+ else
+ d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
+ dir, flags);
+
+ if (!d)
+ return NULL;
+
+ d->sglen = buf_len / period_len;
+
+ d->dir = dir;
+ d->residue = buf_len;
+
+ /* static TR for remote PDMA */
+ if (udma_configure_statictr(uc, d, dev_width, burst)) {
+ dev_err(uc->ud->dev,
+ "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+ __func__, d->static_tr.bstcnt);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long tx_flags)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_desc *d;
+ struct cppi5_tr_type15_t *tr_req;
+ int num_tr;
+ size_t tr_size = sizeof(struct cppi5_tr_type15_t);
+ u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+
+ if (uc->config.dir != DMA_MEM_TO_MEM) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(DMA_MEM_TO_MEM));
+ return NULL;
+ }
+
+ if (len < SZ_64K) {
+ num_tr = 1;
+ tr0_cnt0 = len;
+ tr0_cnt1 = 1;
+ } else {
+ unsigned long align_to = __ffs(src | dest);
+
+ if (align_to > 3)
+ align_to = 3;
+ /*
+ * Keep simple: tr0: SZ_64K-alignment blocks,
+ * tr1: the remaining
+ */
+ num_tr = 2;
+ tr0_cnt0 = (SZ_64K - BIT(align_to));
+ if (len / tr0_cnt0 >= SZ_64K) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ len);
+ return NULL;
+ }
+
+ tr0_cnt1 = len / tr0_cnt0;
+ tr1_cnt0 = len % tr0_cnt0;
+ }
+
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
+ if (!d)
+ return NULL;
+
+ d->dir = DMA_MEM_TO_MEM;
+ d->desc_idx = 0;
+ d->tr_idx = 0;
+ d->residue = len;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+
+ cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[0].addr = src;
+ tr_req[0].icnt0 = tr0_cnt0;
+ tr_req[0].icnt1 = tr0_cnt1;
+ tr_req[0].icnt2 = 1;
+ tr_req[0].icnt3 = 1;
+ tr_req[0].dim1 = tr0_cnt0;
+
+ tr_req[0].daddr = dest;
+ tr_req[0].dicnt0 = tr0_cnt0;
+ tr_req[0].dicnt1 = tr0_cnt1;
+ tr_req[0].dicnt2 = 1;
+ tr_req[0].dicnt3 = 1;
+ tr_req[0].ddim1 = tr0_cnt0;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
+ tr_req[1].icnt0 = tr1_cnt0;
+ tr_req[1].icnt1 = 1;
+ tr_req[1].icnt2 = 1;
+ tr_req[1].icnt3 = 1;
+
+ tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
+ tr_req[1].dicnt0 = tr1_cnt0;
+ tr_req[1].dicnt1 = 1;
+ tr_req[1].dicnt2 = 1;
+ tr_req[1].dicnt3 = 1;
+ }
+
+ cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static void udma_issue_pending(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ /* If we have something pending and no active descriptor, then */
+ if (vchan_issue_pending(&uc->vc) && !uc->desc) {
+ /*
+ * start a descriptor if the channel is NOT [marked as
+ * terminating _and_ it is still running (teardown has not
+ * completed yet)].
+ */
+ if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
+ udma_is_chan_running(uc)))
+ udma_start(uc);
+ }
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+}
+
+static enum dma_status udma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
+ ret = DMA_PAUSED;
+
+ if (ret == DMA_COMPLETE || !txstate)
+ goto out;
+
+ if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
+ u32 peer_bcnt = 0;
+ u32 bcnt = 0;
+ u32 residue = uc->desc->residue;
+ u32 delay = 0;
+
+ if (uc->desc->dir == DMA_MEM_TO_DEV) {
+ bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_SBCNT_REG);
+
+ if (uc->config.ep_type != PSIL_EP_NATIVE) {
+ peer_bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_PEER_BCNT_REG);
+
+ if (bcnt > peer_bcnt)
+ delay = bcnt - peer_bcnt;
+ }
+ } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ bcnt = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_BCNT_REG);
+
+ if (uc->config.ep_type != PSIL_EP_NATIVE) {
+ peer_bcnt = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_PEER_BCNT_REG);
+
+ if (peer_bcnt > bcnt)
+ delay = peer_bcnt - bcnt;
+ }
+ } else {
+ bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_BCNT_REG);
+ }
+
+ bcnt -= uc->bcnt;
+ if (bcnt && !(bcnt % uc->desc->residue))
+ residue = 0;
+ else
+ residue -= bcnt % uc->desc->residue;
+
+ if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
+ ret = DMA_COMPLETE;
+ delay = 0;
+ }
+
+ dma_set_residue(txstate, residue);
+ dma_set_in_flight_bytes(txstate, delay);
+
+ } else {
+ ret = DMA_COMPLETE;
+ }
+
+out:
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+ return ret;
+}
+
+static int udma_pause(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ if (!uc->desc)
+ return -EINVAL;
+
+ /* pause the channel */
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_update_bits(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE,
+ UDMA_PEER_RT_EN_PAUSE);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_update_bits(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE,
+ UDMA_PEER_RT_EN_PAUSE);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_PAUSE,
+ UDMA_CHAN_RT_CTL_PAUSE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int udma_resume(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ if (!uc->desc)
+ return -EINVAL;
+
+ /* resume the channel */
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_update_bits(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE, 0);
+
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_update_bits(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE, 0);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_PAUSE, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int udma_terminate_all(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ if (udma_is_chan_running(uc))
+ udma_stop(uc);
+
+ if (uc->desc) {
+ uc->terminated_desc = uc->desc;
+ uc->desc = NULL;
+ uc->terminated_desc->terminated = true;
+ cancel_delayed_work(&uc->tx_drain.work);
+ }
+
+ uc->paused = false;
+
+ vchan_get_all_descriptors(&uc->vc, &head);
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+ vchan_dma_desc_free_list(&uc->vc, &head);
+
+ return 0;
+}
+
+static void udma_synchronize(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ vchan_synchronize(&uc->vc);
+
+ if (uc->state == UDMA_CHAN_IS_TERMINATING) {
+ timeout = wait_for_completion_timeout(&uc->teardown_completed,
+ timeout);
+ if (!timeout) {
+ dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
+ uc->id);
+ udma_dump_chan_stdata(uc);
+ udma_reset_chan(uc, true);
+ }
+ }
+
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc))
+ dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ udma_reset_rings(uc);
+}
+
+static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd,
+ struct dmaengine_result *result)
+{
+ struct udma_chan *uc = to_udma_chan(&vc->chan);
+ struct udma_desc *d;
+
+ if (!vd)
+ return;
+
+ d = to_udma_desc(&vd->tx);
+
+ if (d->metadata_size)
+ udma_fetch_epib(uc, d);
+
+ /* Provide residue information for the client */
+ if (result) {
+ void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+
+ if (cppi5_desc_get_type(desc_vaddr) ==
+ CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ result->residue = d->residue -
+ cppi5_hdesc_get_pktlen(desc_vaddr);
+ if (result->residue)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
+ } else {
+ result->residue = 0;
+ result->result = DMA_TRANS_NOERROR;
+ }
+ }
+}
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void udma_vchan_complete(unsigned long arg)
+{
+ struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_desc *vd, *_vd;
+ struct dmaengine_desc_callback cb;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&vc->lock);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ vd = vc->cyclic;
+ if (vd) {
+ vc->cyclic = NULL;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
+ }
+ spin_unlock_irq(&vc->lock);
+
+ udma_desc_pre_callback(vc, vd, NULL);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ struct dmaengine_result result;
+
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+
+ list_del(&vd->node);
+
+ udma_desc_pre_callback(vc, vd, &result);
+ dmaengine_desc_callback_invoke(&cb, &result);
+
+ vchan_vdesc_fini(vd);
+ }
+}
+
+static void udma_free_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+
+ udma_terminate_all(chan);
+ if (uc->terminated_desc) {
+ udma_reset_chan(uc, false);
+ udma_reset_rings(uc);
+ }
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ destroy_delayed_work_on_stack(&uc->tx_drain.work);
+
+ if (uc->irq_num_ring > 0) {
+ free_irq(uc->irq_num_ring, uc);
+
+ uc->irq_num_ring = 0;
+ }
+ if (uc->irq_num_udma > 0) {
+ free_irq(uc->irq_num_udma, uc);
+
+ uc->irq_num_udma = 0;
+ }
+
+ /* Release PSI-L pairing */
+ if (uc->psil_paired) {
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+ }
+
+ vchan_free_chan_resources(&uc->vc);
+ tasklet_kill(&uc->vc.task);
+
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+}
+
+static struct platform_driver udma_driver;
+
+static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct udma_chan_config *ucc;
+ struct psil_endpoint_config *ep_config;
+ struct udma_chan *uc;
+ struct udma_dev *ud;
+ u32 *args;
+
+ if (chan->device->dev->driver != &udma_driver.driver)
+ return false;
+
+ uc = to_udma_chan(chan);
+ ucc = &uc->config;
+ ud = uc->ud;
+ args = param;
+
+ ucc->remote_thread_id = args[0];
+
+ if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+ ucc->dir = DMA_MEM_TO_DEV;
+ else
+ ucc->dir = DMA_DEV_TO_MEM;
+
+ ep_config = psil_get_ep_config(ucc->remote_thread_id);
+ if (IS_ERR(ep_config)) {
+ dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
+ ucc->remote_thread_id);
+ ucc->dir = DMA_MEM_TO_MEM;
+ ucc->remote_thread_id = -1;
+ return false;
+ }
+
+ ucc->pkt_mode = ep_config->pkt_mode;
+ ucc->channel_tpl = ep_config->channel_tpl;
+ ucc->notdpkt = ep_config->notdpkt;
+ ucc->ep_type = ep_config->ep_type;
+
+ if (ucc->ep_type != PSIL_EP_NATIVE) {
+ const struct udma_match_data *match_data = ud->match_data;
+
+ if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
+ ucc->enable_acc32 = ep_config->pdma_acc32;
+ if (match_data->flags & UDMA_FLAG_PDMA_BURST)
+ ucc->enable_burst = ep_config->pdma_burst;
+ }
+
+ ucc->needs_epib = ep_config->needs_epib;
+ ucc->psd_size = ep_config->psd_size;
+ ucc->metadata_size =
+ (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
+ ucc->psd_size;
+
+ if (ucc->pkt_mode)
+ ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
+ ucc->metadata_size, ud->desc_align);
+
+ dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
+ ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
+
+ return true;
+}
+
+static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct udma_dev *ud = ofdma->of_dma_data;
+ dma_cap_mask_t mask = ud->ddev.cap_mask;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ chan = __dma_request_channel(&mask, udma_dma_filter_fn,
+ &dma_spec->args[0], ofdma->of_node);
+ if (!chan) {
+ dev_err(ud->dev, "get channel fail in %s.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return chan;
+}
+
+static struct udma_match_data am654_main_data = {
+ .psil_base = 0x1000,
+ .enable_memcpy_support = true,
+ .statictr_z_mask = GENMASK(11, 0),
+ .rchan_oes_offset = 0x2000,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 8, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static struct udma_match_data am654_mcu_data = {
+ .psil_base = 0x6000,
+ .enable_memcpy_support = true, /* TEST: DMA domains */
+ .statictr_z_mask = GENMASK(11, 0),
+ .rchan_oes_offset = 0x2000,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 2, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static struct udma_match_data j721e_main_data = {
+ .psil_base = 0x1000,
+ .enable_memcpy_support = true,
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .statictr_z_mask = GENMASK(23, 0),
+ .rchan_oes_offset = 0x400,
+ .tpl_levels = 3,
+ .level_start_idx = {
+ [0] = 16, /* Normal channels */
+ [1] = 4, /* High Throughput channels */
+ [2] = 0, /* Ultra High Throughput channels */
+ },
+};
+
+static struct udma_match_data j721e_mcu_data = {
+ .psil_base = 0x6000,
+ .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .statictr_z_mask = GENMASK(23, 0),
+ .rchan_oes_offset = 0x400,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 2, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static const struct of_device_id udma_of_match[] = {
+ {
+ .compatible = "ti,am654-navss-main-udmap",
+ .data = &am654_main_data,
+ },
+ {
+ .compatible = "ti,am654-navss-mcu-udmap",
+ .data = &am654_mcu_data,
+ }, {
+ .compatible = "ti,j721e-navss-main-udmap",
+ .data = &j721e_main_data,
+ }, {
+ .compatible = "ti,j721e-navss-mcu-udmap",
+ .data = &j721e_mcu_data,
+ },
+ { /* Sentinel */ },
+};
+
+static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < MMR_LAST; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mmr_names[i]);
+ ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ud->mmrs[i]))
+ return PTR_ERR(ud->mmrs[i]);
+ }
+
+ return 0;
+}
+
+static int udma_setup_resources(struct udma_dev *ud)
+{
+ struct device *dev = ud->dev;
+ int ch_count, ret, i, j;
+ u32 cap2, cap3;
+ struct ti_sci_resource_desc *rm_desc;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ static const char * const range_names[] = { "ti,sci-rm-range-tchan",
+ "ti,sci-rm-range-rchan",
+ "ti,sci-rm-range-rflow" };
+
+ cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+ ud->rflow_cnt = cap3 & 0x3fff;
+ ud->tchan_cnt = cap2 & 0x1ff;
+ ud->echan_cnt = (cap2 >> 9) & 0x1ff;
+ ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
+ ch_count = ud->tchan_cnt + ud->rchan_cnt;
+
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflow_gp_map_allocated = devm_kcalloc(dev,
+ BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+
+ if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
+ !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
+ !ud->rflows || !ud->rflow_in_use)
+ return -ENOMEM;
+
+ /*
+ * RX flows with the same Ids as RX channels are reserved to be used
+ * as default flows if remote HW can't generate flow_ids. Those
+ * RX flows can be requested only explicitly by id.
+ */
+ bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
+
+ /* by default no GP rflows are assigned to Linux */
+ bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++)
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+
+ /* tchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->tchan_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+ irq_res.sets = rm_res->sets;
+
+ /* rchan and matching default flow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->rchan_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+
+ irq_res.sets += rm_res->sets;
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->match_data->rchan_oes_offset;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ /* GP rflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all gp flows are assigned exclusively to Linux */
+ bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
+ ud->rflow_cnt - ud->rchan_cnt);
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->rflow_gp_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+
+ ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
+ ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
+ if (!ch_count)
+ return -ENODEV;
+
+ ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
+ GFP_KERNEL);
+ if (!ud->channels)
+ return -ENOMEM;
+
+ dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
+ ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+ ud->rflow_cnt));
+
+ return ch_count;
+}
+
+#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int udma_probe(struct platform_device *pdev)
+{
+ struct device_node *navss_node = pdev->dev.parent->of_node;
+ struct device *dev = &pdev->dev;
+ struct udma_dev *ud;
+ const struct of_device_id *match;
+ int i, ret;
+ int ch_count;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret)
+ dev_err(dev, "failed to set dma mask stuff\n");
+
+ ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
+ if (!ud)
+ return -ENOMEM;
+
+ ret = udma_get_mmrs(pdev, ud);
+ if (ret)
+ return ret;
+
+ ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
+ if (IS_ERR(ud->tisci_rm.tisci))
+ return PTR_ERR(ud->tisci_rm.tisci);
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
+ &ud->tisci_rm.tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
+ return ret;
+ }
+ pdev->id = ud->tisci_rm.tisci_dev_id;
+
+ ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
+ &ud->tisci_rm.tisci_navss_dev_id);
+ if (ret) {
+ dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
+ return ret;
+ }
+
+ ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
+ ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
+
+ ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ if (IS_ERR(ud->ringacc))
+ return PTR_ERR(ud->ringacc);
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ match = of_match_node(udma_of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ ud->match_data = match->data;
+
+ dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+
+ ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
+ ud->ddev.device_config = udma_slave_config;
+ ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
+ ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+ ud->ddev.device_issue_pending = udma_issue_pending;
+ ud->ddev.device_tx_status = udma_tx_status;
+ ud->ddev.device_pause = udma_pause;
+ ud->ddev.device_resume = udma_resume;
+ ud->ddev.device_terminate_all = udma_terminate_all;
+ ud->ddev.device_synchronize = udma_synchronize;
+
+ ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+ ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
+ ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
+ ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
+ ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
+ DESC_METADATA_ENGINE;
+ if (ud->match_data->enable_memcpy_support) {
+ dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
+ ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
+ ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
+ }
+
+ ud->ddev.dev = dev;
+ ud->dev = dev;
+ ud->psil_base = ud->match_data->psil_base;
+
+ INIT_LIST_HEAD(&ud->ddev.channels);
+ INIT_LIST_HEAD(&ud->desc_to_purge);
+
+ ch_count = udma_setup_resources(ud);
+ if (ch_count <= 0)
+ return ch_count;
+
+ spin_lock_init(&ud->lock);
+ INIT_WORK(&ud->purge_work, udma_purge_desc_work);
+
+ ud->desc_align = 64;
+ if (ud->desc_align < dma_get_cache_alignment())
+ ud->desc_align = dma_get_cache_alignment();
+
+ for (i = 0; i < ud->tchan_cnt; i++) {
+ struct udma_tchan *tchan = &ud->tchans[i];
+
+ tchan->id = i;
+ tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
+ }
+
+ for (i = 0; i < ud->rchan_cnt; i++) {
+ struct udma_rchan *rchan = &ud->rchans[i];
+
+ rchan->id = i;
+ rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
+ }
+
+ for (i = 0; i < ud->rflow_cnt; i++) {
+ struct udma_rflow *rflow = &ud->rflows[i];
+
+ rflow->id = i;
+ }
+
+ for (i = 0; i < ch_count; i++) {
+ struct udma_chan *uc = &ud->channels[i];
+
+ uc->ud = ud;
+ uc->vc.desc_free = udma_desc_free;
+ uc->id = i;
+ uc->tchan = NULL;
+ uc->rchan = NULL;
+ uc->config.remote_thread_id = -1;
+ uc->config.dir = DMA_MEM_TO_MEM;
+ uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
+ dev_name(dev), i);
+
+ vchan_init(&uc->vc, &ud->ddev);
+ /* Use custom vchan completion handling */
+ tasklet_init(&uc->vc.task, udma_vchan_complete,
+ (unsigned long)&uc->vc);
+ init_completion(&uc->teardown_completed);
+ }
+
+ ret = dma_async_device_register(&ud->ddev);
+ if (ret) {
+ dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ud);
+
+ ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
+ if (ret) {
+ dev_err(dev, "failed to register of_dma controller\n");
+ dma_async_device_unregister(&ud->ddev);
+ }
+
+ return ret;
+}
+
+static struct platform_driver udma_driver = {
+ .driver = {
+ .name = "ti-udma",
+ .of_match_table = udma_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = udma_probe,
+};
+builtin_platform_driver(udma_driver);
+
+/* Private interfaces to UDMA */
+#include "k3-udma-private.c"
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
new file mode 100644
index 000000000000..128d8744a435
--- /dev/null
+++ b/drivers/dma/ti/k3-udma.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_H_
+#define K3_UDMA_H_
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/* Global registers */
+#define UDMA_REV_REG 0x0
+#define UDMA_PERF_CTL_REG 0x4
+#define UDMA_EMU_CTL_REG 0x8
+#define UDMA_PSIL_TO_REG 0x10
+#define UDMA_UTC_CTL_REG 0x1c
+#define UDMA_CAP_REG(i) (0x20 + ((i) * 4))
+#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80
+#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88
+
+/* TX chan RT regs */
+#define UDMA_TCHAN_RT_CTL_REG 0x0
+#define UDMA_TCHAN_RT_SWTRIG_REG 0x8
+#define UDMA_TCHAN_RT_STDATA_REG 0x80
+
+#define UDMA_TCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4))
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \
+ UDMA_TCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \
+ UDMA_TCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */
+#define UDMA_TCHAN_RT_PEER_BCNT_REG \
+ UDMA_TCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */
+#define UDMA_TCHAN_RT_PEER_RT_EN_REG \
+ UDMA_TCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */
+
+#define UDMA_TCHAN_RT_PCNT_REG 0x400
+#define UDMA_TCHAN_RT_BCNT_REG 0x408
+#define UDMA_TCHAN_RT_SBCNT_REG 0x410
+
+/* RX chan RT regs */
+#define UDMA_RCHAN_RT_CTL_REG 0x0
+#define UDMA_RCHAN_RT_SWTRIG_REG 0x8
+#define UDMA_RCHAN_RT_STDATA_REG 0x80
+
+#define UDMA_RCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4))
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \
+ UDMA_RCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \
+ UDMA_RCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */
+#define UDMA_RCHAN_RT_PEER_BCNT_REG \
+ UDMA_RCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */
+#define UDMA_RCHAN_RT_PEER_RT_EN_REG \
+ UDMA_RCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */
+
+#define UDMA_RCHAN_RT_PCNT_REG 0x400
+#define UDMA_RCHAN_RT_BCNT_REG 0x408
+#define UDMA_RCHAN_RT_SBCNT_REG 0x410
+
+/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */
+#define UDMA_CHAN_RT_CTL_EN BIT(31)
+#define UDMA_CHAN_RT_CTL_TDOWN BIT(30)
+#define UDMA_CHAN_RT_CTL_PAUSE BIT(29)
+#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28)
+#define UDMA_CHAN_RT_CTL_ERROR BIT(0)
+
+/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */
+#define UDMA_PEER_RT_EN_ENABLE BIT(31)
+#define UDMA_PEER_RT_EN_TEARDOWN BIT(30)
+#define UDMA_PEER_RT_EN_PAUSE BIT(29)
+#define UDMA_PEER_RT_EN_FLUSH BIT(28)
+#define UDMA_PEER_RT_EN_IDLE BIT(1)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
+ */
+#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24)
+#define PDMA_STATIC_TR_X_SHIFT (24)
+#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0)
+#define PDMA_STATIC_TR_Y_SHIFT (0)
+
+#define PDMA_STATIC_TR_Y(x) \
+ (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK)
+#define PDMA_STATIC_TR_X(x) \
+ (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK)
+
+#define PDMA_STATIC_TR_XY_ACC32 BIT(30)
+#define PDMA_STATIC_TR_XY_BURST BIT(31)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
+ */
+#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask))
+
+struct udma_dev;
+struct udma_tchan;
+struct udma_rchan;
+struct udma_rflow;
+
+enum udma_rm_range {
+ RM_RANGE_TCHAN = 0,
+ RM_RANGE_RCHAN,
+ RM_RANGE_RFLOW,
+ RM_RANGE_LAST,
+};
+
+struct udma_tisci_rm {
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
+ u32 tisci_dev_id;
+
+ /* tisci information for PSI-L thread pairing/unpairing */
+ const struct ti_sci_rm_psil_ops *tisci_psil_ops;
+ u32 tisci_navss_dev_id;
+
+ struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
+};
+
+/* Direct access to UDMA low lever resources for the glue layer */
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread);
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+ u32 dst_thread);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+void xudma_dev_put(struct udma_dev *ud);
+u32 xudma_dev_get_psil_base(struct udma_dev *ud);
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+
+struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id);
+struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id);
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id);
+
+void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p);
+void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p);
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p);
+
+int xudma_tchan_get_id(struct udma_tchan *p);
+int xudma_rchan_get_id(struct udma_rchan *p);
+int xudma_rflow_get_id(struct udma_rflow *p);
+
+u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg);
+void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
+u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
+void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+
+#endif /* K3_UDMA_H_ */
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 256fc662c500..23e33a85f033 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -114,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
struct virt_dma_desc *vd, *_vd;
list_for_each_entry_safe(vd, _vd, head, node) {
- if (dmaengine_desc_test_reuse(&vd->tx)) {
- list_move_tail(&vd->node, &vc->desc_allocated);
- } else {
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- list_del(&vd->node);
- vc->desc_free(vd);
- }
+ list_del(&vd->node);
+ vchan_vdesc_fini(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -134,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
+ INIT_LIST_HEAD(&vc->desc_terminated);
tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index ab158bac03a7..e9f5250fbe4d 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -31,9 +31,9 @@ struct virt_dma_chan {
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
+ struct list_head desc_terminated;
struct virt_dma_desc *cyclic;
- struct virt_dma_desc *vd_terminated;
};
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
- if (dmaengine_desc_test_reuse(&vd->tx))
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
list_add(&vd->node, &vc->desc_allocated);
- else
+ spin_unlock_irqrestore(&vc->lock, flags);
+ } else {
vc->desc_free(vd);
+ }
}
/**
@@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
- /* free up stuck descriptor */
- if (vc->vd_terminated)
- vchan_vdesc_fini(vc->vd_terminated);
+ list_add_tail(&vd->node, &vc->desc_terminated);
- vc->vd_terminated = vd;
if (vc->cyclic == vd)
vc->cyclic = NULL;
}
@@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
+ list_splice_tail_init(&vc->desc_terminated, head);
}
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
@@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
*/
static inline void vchan_synchronize(struct virt_dma_chan *vc)
{
+ LIST_HEAD(head);
unsigned long flags;
tasklet_kill(&vc->task);
spin_lock_irqsave(&vc->lock, flags);
- if (vc->vd_terminated) {
- vchan_vdesc_fini(vc->vd_terminated);
- vc->vd_terminated = NULL;
- }
+
+ list_splice_tail_init(&vc->desc_terminated, &head);
+
spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
}
#endif
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9c845c07b107..d47749a35863 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -123,10 +123,12 @@
/* Max transfer size per descriptor */
#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
+
/* Reset values for data attributes */
#define ZYNQMP_DMA_AXCACHE_VAL 0xF
-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
@@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
{
- u32 val;
+ u32 val, burst_val;
val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
val |= ZYNQMP_DMA_POINT_TYPE_SG;
writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ burst_val = __ilog2_u32(chan->src_burst_len);
val = (val & ~ZYNQMP_DMA_ARLEN) |
- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+ burst_val = __ilog2_u32(chan->dst_burst_len);
val = (val & ~ZYNQMP_DMA_AWLEN) |
- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
}
@@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
- chan->src_burst_len = config->src_maxburst;
- chan->dst_burst_len = config->dst_maxburst;
+ chan->src_burst_len = clamp(config->src_maxburst, 1U,
+ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+ ZYNQMP_DMA_MAX_DST_BURST_LEN);
return 0;
}
@@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
return PTR_ERR(chan->regs);
chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
if (err < 0) {
dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5c8272329a65..b3c99bb5fe77 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -491,8 +491,7 @@ config EDAC_TI
tristate "Texas Instruments DDR3 ECC Controller"
depends on ARCH_KEYSTONE || SOC_DRA7XX
help
- Support for error detection and correction on the
- TI SoCs.
+ Support for error detection and correction on the TI SoCs.
config EDAC_QCOM
tristate "QCOM EDAC Controller"
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 428ce98f6776..9fbad908a854 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x17 || pvt->fam == 0x18) {
+ if (pvt->umc) {
__f17h_set_scrubval(pvt, scrubval);
} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
@@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
int i, retval = -EINVAL;
u32 scrubval = 0;
- switch (pvt->fam) {
- case 0x15:
- /* Erratum #505 */
- if (pvt->model < 0x10)
- f15h_select_dct(pvt, 0);
-
- if (pvt->model == 0x60)
- amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
- break;
-
- case 0x17:
- case 0x18:
+ if (pvt->umc) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
if (scrubval & BIT(0)) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
@@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
} else {
scrubval = 0;
}
- break;
+ } else if (pvt->fam == 0x15) {
+ /* Erratum #505 */
+ if (pvt->model < 0x10)
+ f15h_select_dct(pvt, 0);
- default:
+ if (pvt->model == 0x60)
+ amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ } else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
- break;
}
scrubval = scrubval & 0x001F;
@@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
+ if (pvt->umc) {
+ if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+ pvt->dram_type = MEM_LRDDR4;
+ else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+ pvt->dram_type = MEM_RDDR4;
+ else
+ pvt->dram_type = MEM_DDR4;
+ return;
+ }
+
switch (pvt->fam) {
case 0xf:
if (pvt->ext_model >= K8_REV_F)
@@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt)
case 0x16:
goto ddr3;
- case 0x17:
- case 0x18:
- if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
- pvt->dram_type = MEM_LRDDR4;
- else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
- pvt->dram_type = MEM_RDDR4;
- else
- pvt->dram_type = MEM_DDR4;
- return;
-
default:
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
@@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
+ [F19_CPUS] = {
+ .ctl_name = "F19h",
+ .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
+ .max_mcs = 8,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
};
/*
@@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
family_types[F17_CPUS].ctl_name = "F18h";
break;
+ case 0x19:
+ fam_type = &family_types[F19_CPUS];
+ pvt->ops = &family_types[F19_CPUS].ops;
+ family_types[F19_CPUS].ctl_name = "F19h";
+ break;
+
default:
amd64_err("Unsupported family!\n");
return NULL;
@@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid)
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- mci = find_mci_by_dev(&F3->dev);
- WARN_ON(!mci);
-
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&F3->dev);
if (!mci)
@@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 9be31688110b..abbf3c274d74 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -122,6 +122,8 @@
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
+#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
+#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
/*
* Function 1 - Address Map
@@ -292,6 +294,7 @@ enum amd_families {
F17_M10H_CPUS,
F17_M30H_CPUS,
F17_M70H_CPUS,
+ F19_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 09a9e3de9595..b194658b8b5c 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci)
if (!np) {
dev_err(mci->pdev, "dt: missing /memory node\n");
return -ENODEV;
- };
+ }
rc = of_address_to_resource(np, 0, &r);
@@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci)
if (rc) {
dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
return rc;
- };
+ }
dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
r.start, resource_size(&r), PAGE_SHIFT);
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index f564a4a8a4ae..5c1eea96230c 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -324,7 +324,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
mchbar &= I3000_MCHBAR_MASK;
- window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
+ window = ioremap(mchbar, I3000_MMR_WINDOW_SIZE);
if (!window) {
printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
mchbar);
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 432b375a4075..a8988db6d423 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -280,7 +280,7 @@ static void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, I3200_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 0ddc41e47a96..191aa7c19ded 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a)
return a & ((1 << 16) - 1);
}
-static inline u32 i5100_redmemb_ecc_locator(u32 a)
-{
- return a & ((1 << 18) - 1);
-}
-
static inline u32 i5100_recmema_merr(u32 a)
{
return i5100_nrecmema_merr(a);
@@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 dw;
u32 dw2;
unsigned syndrome = 0;
- unsigned ecc_loc = 0;
unsigned merr;
unsigned bank;
unsigned rank;
@@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
syndrome = dw2;
pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
- ecc_loc = i5100_redmemb_ecc_locator(dw2);
}
if (i5100_validlog_recmemvalid(dw)) {
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 7c6a2d4d2360..6be99e0d850d 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -485,7 +485,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
goto fail0;
}
mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
- mch_window = ioremap_nocache(mchbar, 0x1000);
+ mch_window = ioremap(mchbar, 0x1000);
if (!mch_window) {
edac_dbg(3, "error ioremapping MCHBAR!\n");
goto fail0;
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4f65073f230b..d68346a8e141 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -357,7 +357,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ea622c6f3a39..ea980c556f2e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -6,7 +6,7 @@
#include "mce_amd.h"
-static struct amd_decoder_ops *fam_ops;
+static struct amd_decoder_ops fam_ops;
static u8 xec_mask = 0xf;
@@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = {
"L2 Fill Data error",
};
+static const char * const smca_ls2_mce_desc[] = {
+ "An ECC error was detected on a data cache read by a probe or victimization",
+ "An ECC error or L2 poison was detected on a data cache read by a load",
+ "An ECC error was detected on a data cache read-modify-write by a store",
+ "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
+ "An ECC error or poison bit mismatch was detected on a tag read by a load",
+ "An ECC error or poison bit mismatch was detected on a tag read by a store",
+ "An ECC error was detected on an EMEM read by a load",
+ "An ECC error was detected on an EMEM read-modify-write by a store",
+ "A parity error was detected in an L1 TLB entry by any access",
+ "A parity error was detected in an L2 TLB entry by any access",
+ "A parity error was detected in a PWC entry by any access",
+ "A parity error was detected in an STQ entry by any access",
+ "A parity error was detected in an LDQ entry by any access",
+ "A parity error was detected in a MAB entry by any access",
+ "A parity error was detected in an SCB entry state field by any access",
+ "A parity error was detected in an SCB entry address field by any access",
+ "A parity error was detected in an SCB entry data field by any access",
+ "A parity error was detected in a WCB entry by any access",
+ "A poisoned line was detected in an SCB entry by any access",
+ "A SystemReadDataError error was reported on read data returned from L2 for a load",
+ "A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
+ "A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
+ "A hardware assertion error was reported",
+ "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
+};
+
static const char * const smca_if_mce_desc[] = {
"Op Cache Microtag Probe Port Parity Error",
"IC Microtag or Full Tag Multi-hit Error",
@@ -378,6 +405,7 @@ struct smca_mce_desc {
static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) },
+ [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) },
[SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) },
[SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) },
[SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) },
@@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m)
: (xec ? "multimatch" : "parity")));
return;
}
- } else if (fam_ops->mc0_mce(ec, xec))
+ } else if (fam_ops.mc0_mce(ec, xec))
;
else
pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n");
@@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m)
pr_cont("Hardware Assert.\n");
else
goto wrong_mc1_mce;
- } else if (fam_ops->mc1_mce(ec, xec))
+ } else if (fam_ops.mc1_mce(ec, xec))
;
else
goto wrong_mc1_mce;
@@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m)
pr_emerg(HW_ERR "MC2 Error: ");
- if (!fam_ops->mc2_mce(ec, xec))
+ if (!fam_ops.mc2_mce(ec, xec))
pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
@@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->tsc)
pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
- if (!fam_ops)
+ /* Doesn't matter which member to test. */
+ if (!fam_ops.mc0_mce)
goto err_code;
switch (m->bank) {
@@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void)
c->x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
- fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
- if (!fam_ops)
- return -ENOMEM;
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
+ xec_mask = 0x3f;
+ goto out;
+ }
switch (c->x86) {
case 0xf:
- fam_ops->mc0_mce = k8_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = k8_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x10:
- fam_ops->mc0_mce = f10h_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = f10h_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x11:
- fam_ops->mc0_mce = k8_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = k8_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x12:
- fam_ops->mc0_mce = f12h_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = f12h_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x14:
- fam_ops->mc0_mce = cat_mc0_mce;
- fam_ops->mc1_mce = cat_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = cat_mc0_mce;
+ fam_ops.mc1_mce = cat_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
- fam_ops->mc0_mce = f15h_mc0_mce;
- fam_ops->mc1_mce = f15h_mc1_mce;
- fam_ops->mc2_mce = f15h_mc2_mce;
+ fam_ops.mc0_mce = f15h_mc0_mce;
+ fam_ops.mc1_mce = f15h_mc1_mce;
+ fam_ops.mc2_mce = f15h_mc2_mce;
break;
case 0x16:
xec_mask = 0x1f;
- fam_ops->mc0_mce = cat_mc0_mce;
- fam_ops->mc1_mce = cat_mc1_mce;
- fam_ops->mc2_mce = f16h_mc2_mce;
+ fam_ops.mc0_mce = cat_mc0_mce;
+ fam_ops.mc1_mce = cat_mc1_mce;
+ fam_ops.mc2_mce = f16h_mc2_mce;
break;
case 0x17:
case 0x18:
- xec_mask = 0x3f;
- if (!boot_cpu_has(X86_FEATURE_SMCA)) {
- printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
- goto err_out;
- }
- break;
+ pr_warn("Decoding supported only on Scalable MCA processors.\n");
+ return -EINVAL;
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
- goto err_out;
+ return -EINVAL;
}
+out:
pr_info("MCE: In-kernel MCE decoding enabled.\n");
mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
-
-err_out:
- kfree(fam_ops);
- fam_ops = NULL;
- return -EINVAL;
}
early_initcall(mce_amd_init);
@@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init);
static void __exit mce_amd_exit(void)
{
mce_unregister_decode_chain(&amd_mce_dec_nb);
- kfree(fam_ops);
}
MODULE_DESCRIPTION("AMD MCE decoder");
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index c0cc72a3b2be..3a3dcb14ed99 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev)
p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
1, 1, NULL, 0,
edac_device_alloc_index());
- if (IS_ERR(p->dci))
- return PTR_ERR(p->dci);
+ if (!p->dci)
+ return -ENOMEM;
p->dci->dev = &pdev->dev;
p->dci->mod_name = "Sifive ECC Manager";
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 95662a4ff4c4..99bbaf629b8d 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
if (!pdev) {
- skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
+ edac_dbg(2, "Can't get tolm/tohm\n");
return -ENODEV;
}
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index cc779f3f9e2d..a65e2f78a402 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -266,7 +266,7 @@ static void __iomem *x38_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, X38_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index e970134c95fa..7401733db08b 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -77,8 +77,6 @@ struct arizona_extcon_info {
const struct arizona_micd_range *micd_ranges;
int num_micd_ranges;
- int micd_timeout;
-
bool micd_reva;
bool micd_clamp;
@@ -310,9 +308,13 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
}
if (info->micd_reva) {
- regmap_write(arizona->regmap, 0x80, 0x3);
- regmap_write(arizona->regmap, 0x294, 0);
- regmap_write(arizona->regmap, 0x80, 0x0);
+ const struct reg_sequence reva[] = {
+ { 0x80, 0x3 },
+ { 0x294, 0x0 },
+ { 0x80, 0x0 },
+ };
+
+ regmap_multi_reg_write(arizona->regmap, reva, ARRAY_SIZE(reva));
}
if (info->detecting && arizona->pdata.micd_software_compare)
@@ -361,9 +363,13 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
snd_soc_dapm_sync(dapm);
if (info->micd_reva) {
- regmap_write(arizona->regmap, 0x80, 0x3);
- regmap_write(arizona->regmap, 0x294, 2);
- regmap_write(arizona->regmap, 0x80, 0x0);
+ const struct reg_sequence reva[] = {
+ { 0x80, 0x3 },
+ { 0x294, 0x2 },
+ { 0x80, 0x0 },
+ };
+
+ regmap_multi_reg_write(arizona->regmap, reva, ARRAY_SIZE(reva));
}
ret = regulator_allow_bypass(info->micvdd, true);
@@ -527,67 +533,65 @@ static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading,
struct arizona *arizona = info->arizona;
int id_gpio = arizona->pdata.hpdet_id_gpio;
+ if (!arizona->pdata.hpdet_acc_id)
+ return 0;
+
/*
* If we're using HPDET for accessory identification we need
* to take multiple measurements, step through them in sequence.
*/
- if (arizona->pdata.hpdet_acc_id) {
- info->hpdet_res[info->num_hpdet_res++] = *reading;
+ info->hpdet_res[info->num_hpdet_res++] = *reading;
- /* Only check the mic directly if we didn't already ID it */
- if (id_gpio && info->num_hpdet_res == 1) {
- dev_dbg(arizona->dev, "Measuring mic\n");
+ /* Only check the mic directly if we didn't already ID it */
+ if (id_gpio && info->num_hpdet_res == 1) {
+ dev_dbg(arizona->dev, "Measuring mic\n");
- regmap_update_bits(arizona->regmap,
- ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_MODE_MASK |
- ARIZONA_ACCDET_SRC,
- ARIZONA_ACCDET_MODE_HPR |
- info->micd_modes[0].src);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK |
+ ARIZONA_ACCDET_SRC,
+ ARIZONA_ACCDET_MODE_HPR |
+ info->micd_modes[0].src);
- gpio_set_value_cansleep(id_gpio, 1);
+ gpio_set_value_cansleep(id_gpio, 1);
- regmap_update_bits(arizona->regmap,
- ARIZONA_HEADPHONE_DETECT_1,
- ARIZONA_HP_POLL, ARIZONA_HP_POLL);
- return -EAGAIN;
- }
-
- /* OK, got both. Now, compare... */
- dev_dbg(arizona->dev, "HPDET measured %d %d\n",
- info->hpdet_res[0], info->hpdet_res[1]);
+ regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ return -EAGAIN;
+ }
- /* Take the headphone impedance for the main report */
- *reading = info->hpdet_res[0];
+ /* OK, got both. Now, compare... */
+ dev_dbg(arizona->dev, "HPDET measured %d %d\n",
+ info->hpdet_res[0], info->hpdet_res[1]);
- /* Sometimes we get false readings due to slow insert */
- if (*reading >= ARIZONA_HPDET_MAX && !info->hpdet_retried) {
- dev_dbg(arizona->dev, "Retrying high impedance\n");
- info->num_hpdet_res = 0;
- info->hpdet_retried = true;
- arizona_start_hpdet_acc_id(info);
- pm_runtime_put(info->dev);
- return -EAGAIN;
- }
+ /* Take the headphone impedance for the main report */
+ *reading = info->hpdet_res[0];
- /*
- * If we measure the mic as high impedance
- */
- if (!id_gpio || info->hpdet_res[1] > 50) {
- dev_dbg(arizona->dev, "Detected mic\n");
- *mic = true;
- info->detecting = true;
- } else {
- dev_dbg(arizona->dev, "Detected headphone\n");
- }
+ /* Sometimes we get false readings due to slow insert */
+ if (*reading >= ARIZONA_HPDET_MAX && !info->hpdet_retried) {
+ dev_dbg(arizona->dev, "Retrying high impedance\n");
+ info->num_hpdet_res = 0;
+ info->hpdet_retried = true;
+ arizona_start_hpdet_acc_id(info);
+ pm_runtime_put(info->dev);
+ return -EAGAIN;
+ }
- /* Make sure everything is reset back to the real polarity */
- regmap_update_bits(arizona->regmap,
- ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_SRC,
- info->micd_modes[0].src);
+ /*
+ * If we measure the mic as high impedance
+ */
+ if (!id_gpio || info->hpdet_res[1] > 50) {
+ dev_dbg(arizona->dev, "Detected mic\n");
+ *mic = true;
+ info->detecting = true;
+ } else {
+ dev_dbg(arizona->dev, "Detected headphone\n");
}
+ /* Make sure everything is reset back to the real polarity */
+ regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC, info->micd_modes[0].src);
+
return 0;
}
@@ -662,11 +666,6 @@ done:
if (id_gpio)
gpio_set_value_cansleep(id_gpio, 0);
- /* Revert back to MICDET mode */
- regmap_update_bits(arizona->regmap,
- ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
-
/* If we have a mic then reenable MICDET */
if (mic || info->mic)
arizona_start_mic(info);
@@ -699,8 +698,7 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
info->hpdet_active = true;
- if (info->mic)
- arizona_stop_mic(info);
+ arizona_stop_mic(info);
arizona_extcon_hp_clamp(info, true);
@@ -724,8 +722,8 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
return;
err:
- regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+ arizona_extcon_hp_clamp(info, false);
+ pm_runtime_put_autosuspend(info->dev);
/* Just report headphone */
ret = extcon_set_state_sync(info->edev, EXTCON_JACK_HEADPHONE, true);
@@ -781,9 +779,6 @@ static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
return;
err:
- regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
-
/* Just report headphone */
ret = extcon_set_state_sync(info->edev, EXTCON_JACK_HEADPHONE, true);
if (ret != 0)
@@ -806,75 +801,58 @@ static void arizona_micd_timeout_work(struct work_struct *work)
arizona_identify_headphone(info);
- arizona_stop_mic(info);
-
mutex_unlock(&info->lock);
}
-static void arizona_micd_detect(struct work_struct *work)
+static int arizona_micd_adc_read(struct arizona_extcon_info *info)
{
- struct arizona_extcon_info *info = container_of(work,
- struct arizona_extcon_info,
- micd_detect_work.work);
struct arizona *arizona = info->arizona;
- unsigned int val = 0, lvl;
- int ret, i, key;
-
- cancel_delayed_work_sync(&info->micd_timeout_work);
+ unsigned int val;
+ int ret;
- mutex_lock(&info->lock);
+ /* Must disable MICD before we read the ADCVAL */
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA, 0);
- /* If the cable was removed while measuring ignore the result */
- ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
- if (ret < 0) {
- dev_err(arizona->dev, "Failed to check cable state: %d\n",
- ret);
- mutex_unlock(&info->lock);
- return;
- } else if (!ret) {
- dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
- mutex_unlock(&info->lock);
- return;
+ ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_4, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to read MICDET_ADCVAL: %d\n", ret);
+ return ret;
}
- if (info->detecting && arizona->pdata.micd_software_compare) {
- /* Must disable MICD before we read the ADCVAL */
- regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_ENA, 0);
- ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_4, &val);
- if (ret != 0) {
- dev_err(arizona->dev,
- "Failed to read MICDET_ADCVAL: %d\n",
- ret);
- mutex_unlock(&info->lock);
- return;
- }
+ dev_dbg(arizona->dev, "MICDET_ADCVAL: %x\n", val);
- dev_dbg(arizona->dev, "MICDET_ADCVAL: %x\n", val);
+ val &= ARIZONA_MICDET_ADCVAL_MASK;
+ if (val < ARRAY_SIZE(arizona_micd_levels))
+ val = arizona_micd_levels[val];
+ else
+ val = INT_MAX;
+
+ if (val <= QUICK_HEADPHONE_MAX_OHM)
+ val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_0;
+ else if (val <= MICROPHONE_MIN_OHM)
+ val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_1;
+ else if (val <= MICROPHONE_MAX_OHM)
+ val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_8;
+ else
+ val = ARIZONA_MICD_LVL_8;
- val &= ARIZONA_MICDET_ADCVAL_MASK;
- if (val < ARRAY_SIZE(arizona_micd_levels))
- val = arizona_micd_levels[val];
- else
- val = INT_MAX;
-
- if (val <= QUICK_HEADPHONE_MAX_OHM)
- val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_0;
- else if (val <= MICROPHONE_MIN_OHM)
- val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_1;
- else if (val <= MICROPHONE_MAX_OHM)
- val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_8;
- else
- val = ARIZONA_MICD_LVL_8;
- }
+ return val;
+}
+
+static int arizona_micd_read(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val = 0;
+ int ret, i;
for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) {
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(arizona->dev,
"Failed to read MICDET: %d\n", ret);
- mutex_unlock(&info->lock);
- return;
+ return ret;
}
dev_dbg(arizona->dev, "MICDET: %x\n", val);
@@ -882,29 +860,44 @@ static void arizona_micd_detect(struct work_struct *work)
if (!(val & ARIZONA_MICD_VALID)) {
dev_warn(arizona->dev,
"Microphone detection state invalid\n");
- mutex_unlock(&info->lock);
- return;
+ return -EINVAL;
}
}
if (i == 10 && !(val & MICD_LVL_0_TO_8)) {
dev_err(arizona->dev, "Failed to get valid MICDET value\n");
- mutex_unlock(&info->lock);
- return;
+ return -EINVAL;
}
+ return val;
+}
+
+static int arizona_micdet_reading(void *priv)
+{
+ struct arizona_extcon_info *info = priv;
+ struct arizona *arizona = info->arizona;
+ int ret, val;
+
+ if (info->detecting && arizona->pdata.micd_software_compare)
+ ret = arizona_micd_adc_read(info);
+ else
+ ret = arizona_micd_read(info);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+
/* Due to jack detect this should never happen */
if (!(val & ARIZONA_MICD_STS)) {
dev_warn(arizona->dev, "Detected open circuit\n");
info->mic = false;
- arizona_stop_mic(info);
info->detecting = false;
arizona_identify_headphone(info);
- goto handled;
+ return 0;
}
/* If we got a high impedence we should have a headset, report it. */
- if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
+ if (val & ARIZONA_MICD_LVL_8) {
info->mic = true;
info->detecting = false;
@@ -923,7 +916,7 @@ static void arizona_micd_detect(struct work_struct *work)
ret);
}
- goto handled;
+ return 0;
}
/* If we detected a lower impedence during initial startup
@@ -932,15 +925,13 @@ static void arizona_micd_detect(struct work_struct *work)
* plain headphones. If both polarities report a low
* impedence then give up and report headphones.
*/
- if (info->detecting && (val & MICD_LVL_1_TO_7)) {
+ if (val & MICD_LVL_1_TO_7) {
if (info->jack_flips >= info->micd_num_modes * 10) {
dev_dbg(arizona->dev, "Detected HP/line\n");
info->detecting = false;
arizona_identify_headphone(info);
-
- arizona_stop_mic(info);
} else {
info->micd_mode++;
if (info->micd_mode == info->micd_num_modes)
@@ -948,13 +939,45 @@ static void arizona_micd_detect(struct work_struct *work)
arizona_extcon_set_mode(info, info->micd_mode);
info->jack_flips++;
+
+ if (arizona->pdata.micd_software_compare)
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA,
+ ARIZONA_MICD_ENA);
+
+ queue_delayed_work(system_power_efficient_wq,
+ &info->micd_timeout_work,
+ msecs_to_jiffies(arizona->pdata.micd_timeout));
}
- goto handled;
+ return 0;
}
/*
* If we're still detecting and we detect a short then we've
+ * got a headphone.
+ */
+ dev_dbg(arizona->dev, "Headphone detected\n");
+ info->detecting = false;
+
+ arizona_identify_headphone(info);
+
+ return 0;
+}
+
+static int arizona_button_reading(void *priv)
+{
+ struct arizona_extcon_info *info = priv;
+ struct arizona *arizona = info->arizona;
+ int val, key, lvl, i;
+
+ val = arizona_micd_read(info);
+ if (val < 0)
+ return val;
+
+ /*
+ * If we're still detecting and we detect a short then we've
* got a headphone. Otherwise it's a button press.
*/
if (val & MICD_LVL_0_TO_7) {
@@ -968,20 +991,13 @@ static void arizona_micd_detect(struct work_struct *work)
input_report_key(info->input,
info->micd_ranges[i].key, 0);
- WARN_ON(!lvl);
- WARN_ON(ffs(lvl) - 1 >= info->num_micd_ranges);
if (lvl && ffs(lvl) - 1 < info->num_micd_ranges) {
key = info->micd_ranges[ffs(lvl) - 1].key;
input_report_key(info->input, key, 1);
input_sync(info->input);
+ } else {
+ dev_err(arizona->dev, "Button out of range\n");
}
-
- } else if (info->detecting) {
- dev_dbg(arizona->dev, "Headphone detected\n");
- info->detecting = false;
- arizona_stop_mic(info);
-
- arizona_identify_headphone(info);
} else {
dev_warn(arizona->dev, "Button with no mic: %x\n",
val);
@@ -995,19 +1011,39 @@ static void arizona_micd_detect(struct work_struct *work)
arizona_extcon_pulse_micbias(info);
}
-handled:
- if (info->detecting) {
- if (arizona->pdata.micd_software_compare)
- regmap_update_bits(arizona->regmap,
- ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_ENA,
- ARIZONA_MICD_ENA);
+ return 0;
+}
- queue_delayed_work(system_power_efficient_wq,
- &info->micd_timeout_work,
- msecs_to_jiffies(info->micd_timeout));
+static void arizona_micd_detect(struct work_struct *work)
+{
+ struct arizona_extcon_info *info = container_of(work,
+ struct arizona_extcon_info,
+ micd_detect_work.work);
+ struct arizona *arizona = info->arizona;
+ int ret;
+
+ cancel_delayed_work_sync(&info->micd_timeout_work);
+
+ mutex_lock(&info->lock);
+
+ /* If the cable was removed while measuring ignore the result */
+ ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to check cable state: %d\n",
+ ret);
+ mutex_unlock(&info->lock);
+ return;
+ } else if (!ret) {
+ dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
+ mutex_unlock(&info->lock);
+ return;
}
+ if (info->detecting)
+ arizona_micdet_reading(info);
+ else
+ arizona_button_reading(info);
+
pm_runtime_mark_last_busy(info->dev);
mutex_unlock(&info->lock);
}
@@ -1125,7 +1161,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
msecs_to_jiffies(HPDET_DEBOUNCE));
if (cancelled_mic) {
- int micd_timeout = info->micd_timeout;
+ int micd_timeout = arizona->pdata.micd_timeout;
queue_delayed_work(system_power_efficient_wq,
&info->micd_timeout_work,
@@ -1145,11 +1181,11 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
dev_err(arizona->dev, "Mechanical report failed: %d\n",
ret);
- if (!arizona->pdata.hpdet_acc_id) {
- info->detecting = true;
- info->mic = false;
- info->jack_flips = 0;
+ info->detecting = true;
+ info->mic = false;
+ info->jack_flips = 0;
+ if (!arizona->pdata.hpdet_acc_id) {
arizona_start_mic(info);
} else {
queue_delayed_work(system_power_efficient_wq,
@@ -1202,11 +1238,6 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB);
}
- if (arizona->pdata.micd_timeout)
- info->micd_timeout = arizona->pdata.micd_timeout;
- else
- info->micd_timeout = DEFAULT_MICD_TIMEOUT;
-
out:
/* Clear trig_sts to make sure DCVDD is not forced up */
regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
@@ -1435,6 +1466,9 @@ static int arizona_extcon_probe(struct platform_device *pdev)
info->input->name = "Headset";
info->input->phys = "arizona/extcon";
+ if (!pdata->micd_timeout)
+ pdata->micd_timeout = DEFAULT_MICD_TIMEOUT;
+
if (pdata->num_micd_configs) {
info->micd_modes = pdata->micd_configs;
info->micd_num_modes = pdata->num_micd_configs;
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index bcf65aaca5d2..106d4da647bd 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -249,7 +249,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
dev_err(info->dev, "Unknown DM_CON/DP_CON switch type (%d)\n",
con_sw);
return -EINVAL;
- };
+ }
switch (vbus_sw) {
case VBUSIN_SWITCH_OPEN:
@@ -268,7 +268,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
default:
dev_err(info->dev, "Unknown VBUS switch type (%d)\n", vbus_sw);
return -EINVAL;
- };
+ }
return 0;
}
@@ -357,13 +357,13 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
"cannot identify the cable type: adc(0x%x)\n",
adc);
return -EINVAL;
- };
+ }
break;
default:
dev_err(info->dev,
"failed to identify the cable type: adc(0x%x)\n", adc);
return -EINVAL;
- };
+ }
return cable_type;
}
@@ -405,7 +405,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
dev_dbg(info->dev,
"cannot handle this cable_type (0x%x)\n", cable_type);
return 0;
- };
+ }
/* Change internal hardware path(DM_CON/DP_CON, VBUSIN) */
ret = sm5502_muic_set_path(info, con_sw, vbus_sw, attached);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 0cc746673677..6ca2f5ab6c57 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -551,7 +551,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
INIT_LIST_HEAD(&lynx->client_list);
kref_init(&lynx->kref);
- lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
+ lynx->registers = ioremap(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
if (lynx->registers == NULL) {
dev_err(&dev->dev, "Failed to map registers\n");
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index da04fdae62a1..835ece9c00f1 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -120,7 +120,7 @@ int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
void __iomem *iobase;
int err;
- iobase = ioremap_nocache(base, lim);
+ iobase = ioremap(base, lim);
if (!iobase)
return -ENOMEM;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index bcc378c19ebe..ecc83e2f032c 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -215,6 +215,28 @@ config EFI_RCI2_TABLE
Say Y here for Dell EMC PowerEdge systems.
+config EFI_DISABLE_PCI_DMA
+ bool "Clear Busmaster bit on PCI bridges during ExitBootServices()"
+ help
+ Disable the busmaster bit in the control register on all PCI bridges
+ while calling ExitBootServices() and passing control to the runtime
+ kernel. System firmware may configure the IOMMU to prevent malicious
+ PCI devices from being able to attack the OS via DMA. However, since
+ firmware can't guarantee that the OS is IOMMU-aware, it will tear
+ down IOMMU configuration when ExitBootServices() is called. This
+ leaves a window between where a hostile device could still cause
+ damage before Linux configures the IOMMU again.
+
+ If you say Y here, the EFI stub will clear the busmaster bit on all
+ PCI bridges before ExitBootServices() is called. This will prevent
+ any malicious PCI devices from being able to perform DMA until the
+ kernel reenables busmastering after configuring the IOMMU.
+
+ This option will cause failures with some poorly behaved hardware
+ and should not be enabled without testing. The kernel commandline
+ options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
+ may be used to override this option.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 904fa09e6a6b..d99f5b0c8a09 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -10,10 +10,12 @@
#define pr_fmt(fmt) "efi: " fmt
#include <linux/efi.h>
+#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
@@ -276,15 +278,112 @@ void __init efi_init(void)
efi_memmap_unmap();
}
+static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
+{
+ u64 fb_base = screen_info.lfb_base;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
+
+ return fb_base >= range->cpu_addr &&
+ fb_base < (range->cpu_addr + range->size);
+}
+
+static struct device_node *find_pci_overlap_node(void)
+{
+ struct device_node *np;
+
+ for_each_node_by_type(np, "pci") {
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int err;
+
+ err = of_pci_range_parser_init(&parser, np);
+ if (err) {
+ pr_warn("of_pci_range_parser_init() failed: %d\n", err);
+ continue;
+ }
+
+ for_each_of_pci_range(&parser, &range)
+ if (efifb_overlaps_pci_range(&range))
+ return np;
+ }
+ return NULL;
+}
+
+/*
+ * If the efifb framebuffer is backed by a PCI graphics controller, we have
+ * to ensure that this relation is expressed using a device link when
+ * running in DT mode, or the probe order may be reversed, resulting in a
+ * resource reservation conflict on the memory window that the efifb
+ * framebuffer steals from the PCIe host bridge.
+ */
+static int efifb_add_links(const struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ struct device_node *sup_np;
+ struct device *sup_dev;
+
+ sup_np = find_pci_overlap_node();
+
+ /*
+ * If there's no PCI graphics controller backing the efifb, we are
+ * done here.
+ */
+ if (!sup_np)
+ return 0;
+
+ sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+ of_node_put(sup_np);
+
+ /*
+ * Return -ENODEV if the PCI graphics controller device hasn't been
+ * registered yet. This ensures that efifb isn't allowed to probe
+ * and this function is retried again when new devices are
+ * registered.
+ */
+ if (!sup_dev)
+ return -ENODEV;
+
+ /*
+ * If this fails, retrying this function at a later point won't
+ * change anything. So, don't return an error after this.
+ */
+ if (!device_link_add(dev, sup_dev, 0))
+ dev_warn(dev, "device_link_add() failed\n");
+
+ put_device(sup_dev);
+
+ return 0;
+}
+
+static const struct fwnode_operations efifb_fwnode_ops = {
+ .add_links = efifb_add_links,
+};
+
+static struct fwnode_handle efifb_fwnode = {
+ .ops = &efifb_fwnode_ops,
+};
+
static int __init register_gop_device(void)
{
- void *pd;
+ struct platform_device *pd;
+ int err;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
return 0;
- pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
- &screen_info, sizeof(screen_info));
- return PTR_ERR_OR_ZERO(pd);
+ pd = platform_device_alloc("efi-framebuffer", 0);
+ if (!pd)
+ return -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_PCI))
+ pd->dev.fwnode = &efifb_fwnode;
+
+ err = platform_device_add_data(pd, &screen_info, sizeof(screen_info));
+ if (err)
+ return err;
+
+ return platform_device_add(pd);
}
subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index b1395133389e..d3067cbd5114 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/highmem.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index d4077db6dc97..5d4f84781aa0 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -17,7 +17,7 @@ static const struct console *earlycon_console __initdata;
static const struct font_desc *font;
static u32 efi_x, efi_y;
static u64 fb_base;
-static pgprot_t fb_prot;
+static bool fb_wb;
static void *efi_fb;
/*
@@ -33,10 +33,8 @@ static int __init efi_earlycon_remap_fb(void)
if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED))
return 0;
- if (pgprot_val(fb_prot) == pgprot_val(PAGE_KERNEL))
- efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WB);
- else
- efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WC);
+ efi_fb = memremap(fb_base, screen_info.lfb_size,
+ fb_wb ? MEMREMAP_WB : MEMREMAP_WC);
return efi_fb ? 0 : -ENOMEM;
}
@@ -53,9 +51,12 @@ late_initcall(efi_earlycon_unmap_fb);
static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
{
+ pgprot_t fb_prot;
+
if (efi_fb)
return efi_fb + start;
+ fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
}
@@ -215,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)screen_info.ext_lfb_base << 32;
- if (opt && !strcmp(opt, "ram"))
- fb_prot = PAGE_KERNEL;
- else
- fb_prot = pgprot_writecombine(PAGE_KERNEL);
+ fb_wb = opt && !strcmp(opt, "ram");
si = &screen_info;
xres = si->lfb_width;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2b02cb165f16..621220ab3d0e 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr)
*
* Search in the EFI memory map for the region covering @phys_addr.
* Returns the EFI memory type if the region was found in the memory
- * map, EFI_RESERVED_TYPE (zero) otherwise.
+ * map, -EINVAL otherwise.
*/
int efi_mem_type(unsigned long phys_addr)
{
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index bb9fc70d0cfa..6e0f34a38171 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -34,46 +34,45 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
return 0;
}
-void __init efi_fake_memmap(void)
+static void __init efi_fake_range(struct efi_mem_range *efi_range)
{
+ struct efi_memory_map_data data = { 0 };
int new_nr_map = efi.memmap.nr_map;
efi_memory_desc_t *md;
- phys_addr_t new_memmap_phy;
void *new_memmap;
- int i;
-
- if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
- return;
/* count up the number of EFI memory descriptor */
- for (i = 0; i < nr_fake_mem; i++) {
- for_each_efi_memory_desc(md) {
- struct range *r = &efi_fake_mems[i].range;
-
- new_nr_map += efi_memmap_split_count(md, r);
- }
- }
+ for_each_efi_memory_desc(md)
+ new_nr_map += efi_memmap_split_count(md, &efi_range->range);
/* allocate memory for new EFI memmap */
- new_memmap_phy = efi_memmap_alloc(new_nr_map);
- if (!new_memmap_phy)
+ if (efi_memmap_alloc(new_nr_map, &data) != 0)
return;
/* create new EFI memmap */
- new_memmap = early_memremap(new_memmap_phy,
- efi.memmap.desc_size * new_nr_map);
+ new_memmap = early_memremap(data.phys_map, data.size);
if (!new_memmap) {
- memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
+ __efi_memmap_free(data.phys_map, data.size, data.flags);
return;
}
- for (i = 0; i < nr_fake_mem; i++)
- efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]);
+ efi_memmap_insert(&efi.memmap, new_memmap, efi_range);
/* swap into new EFI memmap */
- early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+ early_memunmap(new_memmap, data.size);
+
+ efi_memmap_install(&data);
+}
+
+void __init efi_fake_memmap(void)
+{
+ int i;
- efi_memmap_install(new_memmap_phy, new_nr_map);
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+ return;
+
+ for (i = 0; i < nr_fake_mem; i++)
+ efi_fake_range(&efi_fake_mems[i]);
/* print new EFI memmap */
efi_print_memmap();
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c35f893897e1..98a81576213d 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -39,7 +39,7 @@ OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
- random.o
+ random.o pci.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 817237ce2420..7bbef4a67350 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -37,16 +37,14 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-void efi_char16_printk(efi_system_table_t *sys_table_arg,
- efi_char16_t *str)
-{
- struct efi_simple_text_output_protocol *out;
+static efi_system_table_t *__efistub_global sys_table;
- out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
- out->output_string(out, str);
+__pure efi_system_table_t *efi_system_table(void)
+{
+ return sys_table;
}
-static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+static struct screen_info *setup_graphics(void)
{
efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
efi_status_t status;
@@ -55,27 +53,27 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
struct screen_info *si = NULL;
size = 0;
- status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &gop_proto, NULL, &size, gop_handle);
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &gop_proto, NULL, &size, gop_handle);
if (status == EFI_BUFFER_TOO_SMALL) {
- si = alloc_screen_info(sys_table_arg);
+ si = alloc_screen_info();
if (!si)
return NULL;
- efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+ efi_setup_gop(si, &gop_proto, size);
}
return si;
}
-void install_memreserve_table(efi_system_table_t *sys_table_arg)
+void install_memreserve_table(void)
{
struct linux_efi_memreserve *rsv;
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
efi_status_t status;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
- (void **)&rsv);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+ (void **)&rsv);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n");
+ pr_efi_err("Failed to allocate memreserve entry!\n");
return;
}
@@ -83,11 +81,10 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
rsv->size = 0;
atomic_set(&rsv->count, 0);
- status = efi_call_early(install_configuration_table,
- &memreserve_table_guid,
- rsv);
+ status = efi_bs_call(install_configuration_table,
+ &memreserve_table_guid, rsv);
if (status != EFI_SUCCESS)
- pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n");
+ pr_efi_err("Failed to install memreserve config table!\n");
}
@@ -97,8 +94,7 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
* must be reserved. On failure it is required to free all
* all allocations it has made.
*/
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -110,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* for both archictectures, with the arch-specific code provided in the
* handle_kernel_image() function.
*/
-unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
+unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
unsigned long *image_addr)
{
efi_loaded_image_t *image;
@@ -131,11 +127,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
enum efi_secureboot_mode secure_boot;
struct screen_info *si;
+ sys_table = sys_table_arg;
+
/* Check if we were booted by the EFI firmware */
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
goto fail;
- status = check_platform_features(sys_table);
+ status = check_platform_features();
if (status != EFI_SUCCESS)
goto fail;
@@ -147,13 +145,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
status = sys_table->boottime->handle_protocol(handle,
&loaded_image_proto, (void *)&image);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to get loaded image protocol\n");
+ pr_efi_err("Failed to get loaded image protocol\n");
goto fail;
}
- dram_base = get_dram_base(sys_table);
+ dram_base = get_dram_base();
if (dram_base == EFI_ERROR) {
- pr_efi_err(sys_table, "Failed to find DRAM base\n");
+ pr_efi_err("Failed to find DRAM base\n");
goto fail;
}
@@ -162,9 +160,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
+ cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
if (!cmdline_ptr) {
- pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
+ pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
goto fail;
}
@@ -176,25 +174,25 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
efi_parse_options(cmdline_ptr);
- pr_efi(sys_table, "Booting Linux Kernel...\n");
+ pr_efi("Booting Linux Kernel...\n");
- si = setup_graphics(sys_table);
+ si = setup_graphics();
- status = handle_kernel_image(sys_table, image_addr, &image_size,
+ status = handle_kernel_image(image_addr, &image_size,
&reserve_addr,
&reserve_size,
dram_base, image);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ pr_efi_err("Failed to relocate kernel\n");
goto fail_free_cmdline;
}
- efi_retrieve_tpm2_eventlog(sys_table);
+ efi_retrieve_tpm2_eventlog();
/* Ask the firmware to clear memory on unclean shutdown */
- efi_enable_reset_attack_mitigation(sys_table);
+ efi_enable_reset_attack_mitigation();
- secure_boot = efi_get_secureboot(sys_table);
+ secure_boot = efi_get_secureboot();
/*
* Unauthenticated device tree data is a security hazard, so ignore
@@ -204,39 +202,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
secure_boot != efi_secureboot_mode_disabled) {
if (strstr(cmdline_ptr, "dtb="))
- pr_efi(sys_table, "Ignoring DTB from command line.\n");
+ pr_efi("Ignoring DTB from command line.\n");
} else {
- status = handle_cmdline_files(sys_table, image, cmdline_ptr,
- "dtb=",
+ status = handle_cmdline_files(image, cmdline_ptr, "dtb=",
~0UL, &fdt_addr, &fdt_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to load device tree!\n");
+ pr_efi_err("Failed to load device tree!\n");
goto fail_free_image;
}
}
if (fdt_addr) {
- pr_efi(sys_table, "Using DTB from command line\n");
+ pr_efi("Using DTB from command line\n");
} else {
/* Look for a device tree configuration table entry. */
- fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size);
+ fdt_addr = (uintptr_t)get_fdt(&fdt_size);
if (fdt_addr)
- pr_efi(sys_table, "Using DTB from configuration table\n");
+ pr_efi("Using DTB from configuration table\n");
}
if (!fdt_addr)
- pr_efi(sys_table, "Generating empty DTB\n");
+ pr_efi("Generating empty DTB\n");
- status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=",
+ status = handle_cmdline_files(image, cmdline_ptr, "initrd=",
efi_get_max_initrd_addr(dram_base,
*image_addr),
(unsigned long *)&initrd_addr,
(unsigned long *)&initrd_size);
if (status != EFI_SUCCESS)
- pr_efi_err(sys_table, "Failed initrd from command line!\n");
+ pr_efi_err("Failed initrd from command line!\n");
- efi_random_get_seed(sys_table);
+ efi_random_get_seed();
/* hibernation expects the runtime regions to stay in the same place */
if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
@@ -251,18 +248,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
EFI_RT_VIRTUAL_SIZE;
u32 rnd;
- status = efi_get_random_bytes(sys_table, sizeof(rnd),
- (u8 *)&rnd);
+ status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd);
if (status == EFI_SUCCESS) {
virtmap_base = EFI_RT_VIRTUAL_BASE +
(((headroom >> 21) * rnd) >> (32 - 21));
}
}
- install_memreserve_table(sys_table);
+ install_memreserve_table();
new_fdt_addr = fdt_addr;
- status = allocate_new_fdt_and_exit_boot(sys_table, handle,
+ status = allocate_new_fdt_and_exit_boot(handle,
&new_fdt_addr, efi_get_max_fdt_addr(dram_base),
initrd_addr, initrd_size, cmdline_ptr,
fdt_addr, fdt_size);
@@ -275,17 +271,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status == EFI_SUCCESS)
return new_fdt_addr;
- pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n");
+ pr_efi_err("Failed to update FDT and exit boot services\n");
- efi_free(sys_table, initrd_size, initrd_addr);
- efi_free(sys_table, fdt_size, fdt_addr);
+ efi_free(initrd_size, initrd_addr);
+ efi_free(fdt_size, fdt_addr);
fail_free_image:
- efi_free(sys_table, image_size, *image_addr);
- efi_free(sys_table, reserve_size, reserve_addr);
+ efi_free(image_size, *image_addr);
+ efi_free(reserve_size, reserve_addr);
fail_free_cmdline:
- free_screen_info(sys_table, si);
- efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
+ free_screen_info(si);
+ efi_free(cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 4566640de650..7b2a6382b647 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -7,7 +7,7 @@
#include "efistub.h"
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+efi_status_t check_platform_features(void)
{
int block;
@@ -18,7 +18,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
/* LPAE kernels need compatible hardware */
block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
if (block < 5) {
- pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
+ pr_efi_err("This LPAE kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
@@ -26,7 +26,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
-struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+struct screen_info *alloc_screen_info(void)
{
struct screen_info *si;
efi_status_t status;
@@ -37,32 +37,31 @@ struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
* its contents while we hand over to the kernel proper from the
* decompressor.
*/
- status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
- sizeof(*si), (void **)&si);
+ status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*si), (void **)&si);
if (status != EFI_SUCCESS)
return NULL;
- status = efi_call_early(install_configuration_table,
- &screen_info_guid, si);
+ status = efi_bs_call(install_configuration_table,
+ &screen_info_guid, si);
if (status == EFI_SUCCESS)
return si;
- efi_call_early(free_pool, si);
+ efi_bs_call(free_pool, si);
return NULL;
}
-void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+void free_screen_info(struct screen_info *si)
{
if (!si)
return;
- efi_call_early(install_configuration_table, &screen_info_guid, NULL);
- efi_call_early(free_pool, si);
+ efi_bs_call(install_configuration_table, &screen_info_guid, NULL);
+ efi_bs_call(free_pool, si);
}
-static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
- unsigned long dram_base,
+static efi_status_t reserve_kernel_base(unsigned long dram_base,
unsigned long *reserve_addr,
unsigned long *reserve_size)
{
@@ -92,8 +91,8 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
*/
alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE;
nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
- EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
if (status == EFI_SUCCESS) {
if (alloc_addr == dram_base) {
*reserve_addr = alloc_addr;
@@ -119,10 +118,9 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
* released to the OS after ExitBootServices(), the decompressor can
* safely overwrite them.
*/
- status = efi_get_memory_map(sys_table_arg, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg,
- "reserve_kernel_base(): Unable to retrieve memory map.\n");
+ pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
return status;
}
@@ -158,14 +156,13 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
start = max(start, (u64)dram_base);
end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE);
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- (end - start) / EFI_PAGE_SIZE,
- &start);
+ status = efi_bs_call(allocate_pages,
+ EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA,
+ (end - start) / EFI_PAGE_SIZE,
+ &start);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg,
- "reserve_kernel_base(): alloc failed.\n");
+ pr_efi_err("reserve_kernel_base(): alloc failed.\n");
goto out;
}
break;
@@ -188,12 +185,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
status = EFI_SUCCESS;
out:
- efi_call_early(free_pool, memory_map);
+ efi_bs_call(free_pool, memory_map);
return status;
}
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -221,10 +217,9 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
*/
kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
- status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
- reserve_size);
+ status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
+ pr_efi_err("Unable to allocate memory for uncompressed kernel.\n");
return status;
}
@@ -233,12 +228,11 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* memory window.
*/
*image_size = image->image_size;
- status = efi_relocate_kernel(sys_table, image_addr, *image_size,
- *image_size,
+ status = efi_relocate_kernel(image_addr, *image_size, *image_size,
kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel.\n");
- efi_free(sys_table, *reserve_size, *reserve_addr);
+ pr_efi_err("Failed to relocate kernel.\n");
+ efi_free(*reserve_size, *reserve_addr);
*reserve_size = 0;
return status;
}
@@ -249,10 +243,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* address at which the zImage is loaded.
*/
if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
- pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n");
- efi_free(sys_table, *reserve_size, *reserve_addr);
+ pr_efi_err("Failed to relocate kernel, no low memory available.\n");
+ efi_free(*reserve_size, *reserve_addr);
*reserve_size = 0;
- efi_free(sys_table, *image_size, *image_addr);
+ efi_free(*image_size, *image_addr);
*image_size = 0;
return EFI_LOAD_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 1550d244e996..2915b44132e6 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -21,7 +21,7 @@
#include "efistub.h"
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+efi_status_t check_platform_features(void)
{
u64 tg;
@@ -32,16 +32,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
- pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
+ pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else
- pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
+ pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
}
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -56,17 +55,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
if (!nokaslr()) {
- status = efi_get_random_bytes(sys_table_arg,
- sizeof(phys_seed),
+ status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
- pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
} else if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+ pr_efi_err("efi_get_random_bytes() failed\n");
return status;
}
} else {
- pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+ pr_efi("KASLR disabled on kernel command line\n");
}
}
@@ -108,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
* locate the kernel at a randomized offset in physical memory.
*/
*reserve_size = kernel_memsize + offset;
- status = efi_random_alloc(sys_table_arg, *reserve_size,
+ status = efi_random_alloc(*reserve_size,
MIN_KIMG_ALIGN, reserve_addr,
(u32)phys_seed);
@@ -131,19 +129,19 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
*image_addr = *reserve_addr = preferred_offset;
*reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- *reserve_size / EFI_PAGE_SIZE,
- (efi_physical_addr_t *)reserve_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA,
+ *reserve_size / EFI_PAGE_SIZE,
+ (efi_physical_addr_t *)reserve_addr);
}
if (status != EFI_SUCCESS) {
*reserve_size = kernel_memsize + TEXT_OFFSET;
- status = efi_low_alloc(sys_table_arg, *reserve_size,
+ status = efi_low_alloc(*reserve_size,
MIN_KIMG_ALIGN, reserve_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+ pr_efi_err("Failed to relocate kernel\n");
*reserve_size = 0;
return status;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index e02579907f2e..74ddfb496140 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -27,24 +27,26 @@
*/
#define EFI_READ_CHUNK_SIZE (1024 * 1024)
-static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
+static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE;
-static int __section(.data) __nokaslr;
-static int __section(.data) __quiet;
-static int __section(.data) __novamap;
-static bool __section(.data) efi_nosoftreserve;
+static bool __efistub_global efi_nokaslr;
+static bool __efistub_global efi_quiet;
+static bool __efistub_global efi_novamap;
+static bool __efistub_global efi_nosoftreserve;
+static bool __efistub_global efi_disable_pci_dma =
+ IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
-int __pure nokaslr(void)
+bool __pure nokaslr(void)
{
- return __nokaslr;
+ return efi_nokaslr;
}
-int __pure is_quiet(void)
+bool __pure is_quiet(void)
{
- return __quiet;
+ return efi_quiet;
}
-int __pure novamap(void)
+bool __pure novamap(void)
{
- return __novamap;
+ return efi_novamap;
}
bool __pure __efi_soft_reserve_enabled(void)
{
@@ -58,7 +60,7 @@ struct file_info {
u64 size;
};
-void efi_printk(efi_system_table_t *sys_table_arg, char *str)
+void efi_printk(char *str)
{
char *s8;
@@ -68,10 +70,10 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
ch[0] = *s8;
if (*s8 == '\n') {
efi_char16_t nl[2] = { '\r', 0 };
- efi_char16_printk(sys_table_arg, nl);
+ efi_char16_printk(nl);
}
- efi_char16_printk(sys_table_arg, ch);
+ efi_char16_printk(ch);
}
}
@@ -84,8 +86,7 @@ static inline bool mmap_has_headroom(unsigned long buff_size,
return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
}
-efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map)
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
{
efi_memory_desc_t *m = NULL;
efi_status_t status;
@@ -96,19 +97,19 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
*map->map_size = *map->desc_size * 32;
*map->buff_size = *map->map_size;
again:
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- *map->map_size, (void **)&m);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ *map->map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
*map->desc_size = 0;
key = 0;
- status = efi_call_early(get_memory_map, map->map_size, m,
- &key, map->desc_size, &desc_version);
+ status = efi_bs_call(get_memory_map, map->map_size, m,
+ &key, map->desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL ||
!mmap_has_headroom(*map->buff_size, *map->map_size,
*map->desc_size)) {
- efi_call_early(free_pool, m);
+ efi_bs_call(free_pool, m);
/*
* Make sure there is some entries of headroom so that the
* buffer can be reused for a new map after allocations are
@@ -122,7 +123,7 @@ again:
}
if (status != EFI_SUCCESS)
- efi_call_early(free_pool, m);
+ efi_bs_call(free_pool, m);
if (map->key_ptr && status == EFI_SUCCESS)
*map->key_ptr = key;
@@ -135,7 +136,7 @@ fail:
}
-unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
+unsigned long get_dram_base(void)
{
efi_status_t status;
unsigned long map_size, buff_size;
@@ -151,7 +152,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
return membase;
@@ -164,7 +165,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
}
}
- efi_call_early(free_pool, map.map);
+ efi_bs_call(free_pool, map.map);
return membase;
}
@@ -172,8 +173,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
/*
* Allocate at the highest possible address that is not above 'max'.
*/
-efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_high_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max)
{
unsigned long map_size, desc_size, buff_size;
@@ -191,7 +191,7 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -251,9 +251,8 @@ again:
if (!max_addr)
status = EFI_NOT_FOUND;
else {
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &max_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &max_addr);
if (status != EFI_SUCCESS) {
max = max_addr;
max_addr = 0;
@@ -263,7 +262,7 @@ again:
*addr = max_addr;
}
- efi_call_early(free_pool, map);
+ efi_bs_call(free_pool, map);
fail:
return status;
}
@@ -271,8 +270,7 @@ fail:
/*
* Allocate at the lowest possible address that is not below 'min'.
*/
-efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
unsigned long map_size, desc_size, buff_size;
@@ -289,7 +287,7 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -331,9 +329,8 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if ((start + size) > end)
continue;
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &start);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &start);
if (status == EFI_SUCCESS) {
*addr = start;
break;
@@ -343,13 +340,12 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if (i == map_size / desc_size)
status = EFI_NOT_FOUND;
- efi_call_early(free_pool, map);
+ efi_bs_call(free_pool, map);
fail:
return status;
}
-void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
- unsigned long addr)
+void efi_free(unsigned long size, unsigned long addr)
{
unsigned long nr_pages;
@@ -357,12 +353,11 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
return;
nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- efi_call_early(free_pages, addr, nr_pages);
+ efi_bs_call(free_pages, addr, nr_pages);
}
-static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
- efi_char16_t *filename_16, void **handle,
- u64 *file_sz)
+static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16,
+ void **handle, u64 *file_sz)
{
efi_file_handle_t *h, *fh = __fh;
efi_file_info_t *info;
@@ -370,81 +365,75 @@ static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
efi_guid_t info_guid = EFI_FILE_INFO_ID;
unsigned long info_sz;
- status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16,
- EFI_FILE_MODE_READ, (u64)0);
+ status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open file: ");
- efi_char16_printk(sys_table_arg, filename_16);
- efi_printk(sys_table_arg, "\n");
+ efi_printk("Failed to open file: ");
+ efi_char16_printk(filename_16);
+ efi_printk("\n");
return status;
}
*handle = h;
info_sz = 0;
- status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
- &info_sz, NULL);
+ status = h->get_info(h, &info_guid, &info_sz, NULL);
if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk(sys_table_arg, "Failed to get file info size\n");
+ efi_printk("Failed to get file info size\n");
return status;
}
grow:
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- info_sz, (void **)&info);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz,
+ (void **)&info);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+ efi_printk("Failed to alloc mem for file info\n");
return status;
}
- status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
- &info_sz, info);
+ status = h->get_info(h, &info_guid, &info_sz, info);
if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_early(free_pool, info);
+ efi_bs_call(free_pool, info);
goto grow;
}
*file_sz = info->file_size;
- efi_call_early(free_pool, info);
+ efi_bs_call(free_pool, info);
if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to get initrd info\n");
+ efi_printk("Failed to get initrd info\n");
return status;
}
-static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr)
+static efi_status_t efi_file_read(efi_file_handle_t *handle,
+ unsigned long *size, void *addr)
{
- return efi_call_proto(efi_file_handle, read, handle, size, addr);
+ return handle->read(handle, size, addr);
}
-static efi_status_t efi_file_close(void *handle)
+static efi_status_t efi_file_close(efi_file_handle_t *handle)
{
- return efi_call_proto(efi_file_handle, close, handle);
+ return handle->close(handle);
}
-static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+static efi_status_t efi_open_volume(efi_loaded_image_t *image,
efi_file_handle_t **__fh)
{
efi_file_io_interface_t *io;
efi_file_handle_t *fh;
efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
efi_status_t status;
- void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
- device_handle,
- image);
+ efi_handle_t handle = image->device_handle;
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
+ status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ efi_printk("Failed to handle fs_proto\n");
return status;
}
- status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+ status = io->open_volume(io, &fh);
if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to open volume\n");
+ efi_printk("Failed to open volume\n");
else
*__fh = fh;
@@ -465,11 +454,11 @@ efi_status_t efi_parse_options(char const *cmdline)
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
- __nokaslr = 1;
+ efi_nokaslr = true;
str = strstr(cmdline, "quiet");
if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
- __quiet = 1;
+ efi_quiet = true;
/*
* If no EFI parameters were specified on the cmdline we've got
@@ -489,18 +478,28 @@ efi_status_t efi_parse_options(char const *cmdline)
while (*str && *str != ' ') {
if (!strncmp(str, "nochunk", 7)) {
str += strlen("nochunk");
- __chunk_size = -1UL;
+ efi_chunk_size = -1UL;
}
if (!strncmp(str, "novamap", 7)) {
str += strlen("novamap");
- __novamap = 1;
+ efi_novamap = true;
}
if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
!strncmp(str, "nosoftreserve", 7)) {
str += strlen("nosoftreserve");
- efi_nosoftreserve = 1;
+ efi_nosoftreserve = true;
+ }
+
+ if (!strncmp(str, "disable_early_pci_dma", 21)) {
+ str += strlen("disable_early_pci_dma");
+ efi_disable_pci_dma = true;
+ }
+
+ if (!strncmp(str, "no_disable_early_pci_dma", 24)) {
+ str += strlen("no_disable_early_pci_dma");
+ efi_disable_pci_dma = false;
}
/* Group words together, delimited by "," */
@@ -520,8 +519,7 @@ efi_status_t efi_parse_options(char const *cmdline)
* We only support loading a file from the same filesystem as
* the kernel image.
*/
-efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
char *cmd_line, char *option_string,
unsigned long max_addr,
unsigned long *load_addr,
@@ -570,10 +568,10 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
if (!nr_files)
return EFI_SUCCESS;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- nr_files * sizeof(*files), (void **)&files);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ nr_files * sizeof(*files), (void **)&files);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n");
+ pr_efi_err("Failed to alloc mem for file handle list\n");
goto fail;
}
@@ -612,13 +610,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- status = efi_open_volume(sys_table_arg, image, &fh);
+ status = efi_open_volume(image, &fh);
if (status != EFI_SUCCESS)
goto free_files;
}
- status = efi_file_size(sys_table_arg, fh, filename_16,
- (void **)&file->handle, &file->size);
+ status = efi_file_size(fh, filename_16, (void **)&file->handle,
+ &file->size);
if (status != EFI_SUCCESS)
goto close_handles;
@@ -633,16 +631,16 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
* so allocate enough memory for all the files. This is used
* for loading multiple files.
*/
- status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000,
- &file_addr, max_addr);
+ status = efi_high_alloc(file_size_total, 0x1000, &file_addr,
+ max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n");
+ pr_efi_err("Failed to alloc highmem for files\n");
goto close_handles;
}
/* We've run out of free low memory. */
if (file_addr > max_addr) {
- pr_efi_err(sys_table_arg, "We've run out of free low memory\n");
+ pr_efi_err("We've run out of free low memory\n");
status = EFI_INVALID_PARAMETER;
goto free_file_total;
}
@@ -655,8 +653,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
while (size) {
unsigned long chunksize;
- if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
- chunksize = __chunk_size;
+ if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size)
+ chunksize = efi_chunk_size;
else
chunksize = size;
@@ -664,7 +662,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
&chunksize,
(void *)addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to read file\n");
+ pr_efi_err("Failed to read file\n");
goto free_file_total;
}
addr += chunksize;
@@ -676,7 +674,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
}
- efi_call_early(free_pool, files);
+ efi_bs_call(free_pool, files);
*load_addr = file_addr;
*load_size = file_size_total;
@@ -684,13 +682,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
return status;
free_file_total:
- efi_free(sys_table_arg, file_size_total, file_addr);
+ efi_free(file_size_total, file_addr);
close_handles:
for (k = j; k < i; k++)
efi_file_close(files[k].handle);
free_files:
- efi_call_early(free_pool, files);
+ efi_bs_call(free_pool, files);
fail:
*load_addr = 0;
*load_size = 0;
@@ -707,8 +705,7 @@ fail:
* address is not available the lowest available address will
* be used.
*/
-efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
unsigned long image_size,
unsigned long alloc_size,
unsigned long preferred_addr,
@@ -737,20 +734,19 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
* as possible while respecting the required alignment.
*/
nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &efi_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &efi_addr);
new_addr = efi_addr;
/*
* If preferred address allocation failed allocate as low as
* possible.
*/
if (status != EFI_SUCCESS) {
- status = efi_low_alloc_above(sys_table_arg, alloc_size,
- alignment, &new_addr, min_addr);
+ status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+ min_addr);
}
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
+ pr_efi_err("Failed to allocate usable memory for kernel.\n");
return status;
}
@@ -824,8 +820,7 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
-char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+char *efi_convert_cmdline(efi_loaded_image_t *image,
int *cmd_line_len)
{
const u16 *s2;
@@ -854,8 +849,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
options_bytes++; /* NUL termination */
- status = efi_high_alloc(sys_table_arg, options_bytes, 0,
- &cmdline_addr, MAX_CMDLINE_ADDRESS);
+ status = efi_high_alloc(options_bytes, 0, &cmdline_addr,
+ MAX_CMDLINE_ADDRESS);
if (status != EFI_SUCCESS)
return NULL;
@@ -877,24 +872,26 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
* specific structure may be passed to the function via priv. The client
* function may be called multiple times.
*/
-efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
- void *handle,
+efi_status_t efi_exit_boot_services(void *handle,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func)
{
efi_status_t status;
- status = efi_get_memory_map(sys_table_arg, map);
+ status = efi_get_memory_map(map);
if (status != EFI_SUCCESS)
goto fail;
- status = priv_func(sys_table_arg, map, priv);
+ status = priv_func(map, priv);
if (status != EFI_SUCCESS)
goto free_map;
- status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ if (efi_disable_pci_dma)
+ efi_pci_disable_bridge_busmaster();
+
+ status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
if (status == EFI_INVALID_PARAMETER) {
/*
@@ -911,23 +908,23 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
* to get_memory_map() is expected to succeed here.
*/
*map->map_size = *map->buff_size;
- status = efi_call_early(get_memory_map,
- map->map_size,
- *map->map,
- map->key_ptr,
- map->desc_size,
- map->desc_ver);
+ status = efi_bs_call(get_memory_map,
+ map->map_size,
+ *map->map,
+ map->key_ptr,
+ map->desc_size,
+ map->desc_ver);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
- status = priv_func(sys_table_arg, map, priv);
+ status = priv_func(map, priv);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
- status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
}
/* exit_boot_services() was called, thus cannot free */
@@ -937,38 +934,31 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
return EFI_SUCCESS;
free_map:
- efi_call_early(free_pool, *map->map);
+ efi_bs_call(free_pool, *map->map);
fail:
return status;
}
-#define GET_EFI_CONFIG_TABLE(bits) \
-static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
- efi_guid_t guid) \
-{ \
- efi_system_table_##bits##_t *sys_table; \
- efi_config_table_##bits##_t *tables; \
- int i; \
- \
- sys_table = (typeof(sys_table))_sys_table; \
- tables = (typeof(tables))(unsigned long)sys_table->tables; \
- \
- for (i = 0; i < sys_table->nr_tables; i++) { \
- if (efi_guidcmp(tables[i].guid, guid) != 0) \
- continue; \
- \
- return (void *)(unsigned long)tables[i].table; \
- } \
- \
- return NULL; \
+void *get_efi_config_table(efi_guid_t guid)
+{
+ unsigned long tables = efi_table_attr(efi_system_table(), tables);
+ int nr_tables = efi_table_attr(efi_system_table(), nr_tables);
+ int i;
+
+ for (i = 0; i < nr_tables; i++) {
+ efi_config_table_t *t = (void *)tables;
+
+ if (efi_guidcmp(t->guid, guid) == 0)
+ return efi_table_attr(t, table);
+
+ tables += efi_is_native() ? sizeof(efi_config_table_t)
+ : sizeof(efi_config_table_32_t);
+ }
+ return NULL;
}
-GET_EFI_CONFIG_TABLE(32)
-GET_EFI_CONFIG_TABLE(64)
-void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
+void efi_char16_printk(efi_char16_t *str)
{
- if (efi_is_64bit())
- return get_efi_config_table64(sys_table, guid);
- else
- return get_efi_config_table32(sys_table, guid);
+ efi_call_proto(efi_table_attr(efi_system_table(), con_out),
+ output_string, str);
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 05739ae013c8..c244b165005e 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -25,22 +25,30 @@
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
-extern int __pure nokaslr(void);
-extern int __pure is_quiet(void);
-extern int __pure novamap(void);
+#ifdef CONFIG_ARM
+#define __efistub_global __section(.data)
+#else
+#define __efistub_global
+#endif
+
+extern bool __pure nokaslr(void);
+extern bool __pure is_quiet(void);
+extern bool __pure novamap(void);
+
+extern __pure efi_system_table_t *efi_system_table(void);
-#define pr_efi(sys_table, msg) do { \
- if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
+#define pr_efi(msg) do { \
+ if (!is_quiet()) efi_printk("EFI stub: "msg); \
} while (0)
-#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
+#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg)
-void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
+void efi_char16_printk(efi_char16_t *);
+void efi_char16_printk(efi_char16_t *);
-unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
+unsigned long get_dram_base(void);
-efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
- void *handle,
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
@@ -48,22 +56,20 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr,
unsigned long fdt_size);
-void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size);
+void *get_fdt(unsigned long *fdt_size);
void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
- unsigned long size, u8 *out);
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed);
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+efi_status_t check_platform_features(void);
-void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
+void *get_efi_config_table(efi_guid_t guid);
/* Helper macros for the usual case of using simple C variables: */
#ifndef fdt_setprop_inplace_var
@@ -76,4 +82,12 @@ void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
#endif
+#define get_efi_var(name, vendor, ...) \
+ efi_rt_call(get_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+ efi_rt_call(set_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0bf0190917e0..0a91e5232127 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,7 +16,7 @@
#define EFI_DT_ADDR_CELLS_DEFAULT 2
#define EFI_DT_SIZE_CELLS_DEFAULT 2
-static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
+static void fdt_update_cell_size(void *fdt)
{
int offset;
@@ -27,8 +27,7 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
}
-static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
+static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
void *fdt, int new_fdt_size, char *cmdline_ptr,
u64 initrd_addr, u64 initrd_size)
{
@@ -40,7 +39,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
/* Do some checks on provided FDT, if it exists: */
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
- pr_efi_err(sys_table, "Device Tree header not valid!\n");
+ pr_efi_err("Device Tree header not valid!\n");
return EFI_LOAD_ERROR;
}
/*
@@ -48,7 +47,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
* configuration table:
*/
if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
- pr_efi_err(sys_table, "Truncated device tree! foo!\n");
+ pr_efi_err("Truncated device tree! foo!\n");
return EFI_LOAD_ERROR;
}
}
@@ -62,7 +61,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
* Any failure from the following function is
* non-critical:
*/
- fdt_update_cell_size(sys_table, fdt);
+ fdt_update_cell_size(fdt);
}
}
@@ -111,7 +110,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
- fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table);
+ fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table());
status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
if (status)
@@ -140,7 +139,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
efi_status_t efi_status;
- efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+ efi_status = efi_get_random_bytes(sizeof(fdt_val64),
(u8 *)&fdt_val64);
if (efi_status == EFI_SUCCESS) {
status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
@@ -210,8 +209,7 @@ struct exit_boot_struct {
void *new_fdt_addr;
};
-static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map,
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
void *priv)
{
struct exit_boot_struct *p = priv;
@@ -244,8 +242,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
* with the final memory map in it.
*/
-efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
- void *handle,
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
@@ -275,19 +272,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions.
*/
- status = efi_get_memory_map(sys_table, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
+ pr_efi_err("Unable to retrieve UEFI memory map.\n");
return status;
}
- pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n");
+ pr_efi("Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
- status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN,
+ status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN,
new_fdt_addr, max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n");
+ pr_efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
}
@@ -295,16 +292,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* Now that we have done our final memory allocation (and free)
* we can get the memory map key needed for exit_boot_services().
*/
- status = efi_get_memory_map(sys_table, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS)
goto fail_free_new_fdt;
- status = update_fdt(sys_table, (void *)fdt_addr, fdt_size,
+ status = update_fdt((void *)fdt_addr, fdt_size,
(void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
initrd_addr, initrd_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to construct new device tree.\n");
+ pr_efi_err("Unable to construct new device tree.\n");
goto fail_free_new_fdt;
}
@@ -313,7 +310,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
priv.runtime_entry_count = &runtime_entry_count;
priv.new_fdt_addr = (void *)*new_fdt_addr;
- status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func);
+ status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
@@ -322,7 +319,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
return EFI_SUCCESS;
/* Install the new virtual address map */
- svam = sys_table->runtime->set_virtual_address_map;
+ svam = efi_system_table()->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
desc_ver, runtime_map);
@@ -350,28 +347,28 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
return EFI_SUCCESS;
}
- pr_efi_err(sys_table, "Exit boot services failed.\n");
+ pr_efi_err("Exit boot services failed.\n");
fail_free_new_fdt:
- efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr);
+ efi_free(MAX_FDT_SIZE, *new_fdt_addr);
fail:
- sys_table->boottime->free_pool(runtime_map);
+ efi_system_table()->boottime->free_pool(runtime_map);
return EFI_LOAD_ERROR;
}
-void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
+void *get_fdt(unsigned long *fdt_size)
{
void *fdt;
- fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID);
+ fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
return NULL;
if (fdt_check_header(fdt) != 0) {
- pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
return NULL;
}
*fdt_size = fdt_totalsize(fdt);
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index b7bf1e993b8b..55e6b3f286fe 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -10,6 +10,8 @@
#include <asm/efi.h>
#include <asm/setup.h>
+#include "efistub.h"
+
static void find_bits(unsigned long mask, u8 *pos, u8 *size)
{
u8 first, len;
@@ -35,7 +37,7 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
static void
setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
- struct efi_pixel_bitmask pixel_info, int pixel_format)
+ efi_pixel_bitmask_t pixel_info, int pixel_format)
{
if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
si->lfb_depth = 32;
@@ -83,48 +85,42 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
-static efi_status_t
-setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
- efi_guid_t *proto, unsigned long size, void **gop_handle)
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **handles)
{
- struct efi_graphics_output_protocol_32 *gop32, *first_gop;
- unsigned long nr_gops;
+ efi_graphics_output_protocol_t *gop, *first_gop;
u16 width, height;
u32 pixels_per_scan_line;
u32 ext_lfb_base;
- u64 fb_base;
- struct efi_pixel_bitmask pixel_info;
+ efi_physical_addr_t fb_base;
+ efi_pixel_bitmask_t pixel_info;
int pixel_format;
efi_status_t status;
- u32 *handles = (u32 *)(unsigned long)gop_handle;
+ efi_handle_t h;
int i;
first_gop = NULL;
- gop32 = NULL;
+ gop = NULL;
- nr_gops = size / sizeof(u32);
- for (i = 0; i < nr_gops; i++) {
- struct efi_graphics_output_protocol_mode_32 *mode;
- struct efi_graphics_output_mode_info *info = NULL;
+ for_each_efi_handle(h, handles, size, i) {
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
void *dummy = NULL;
- efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
- u64 current_fb_base;
+ efi_physical_addr_t current_fb_base;
- status = efi_call_early(handle_protocol, h,
- proto, (void **)&gop32);
+ status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
if (status != EFI_SUCCESS)
continue;
- status = efi_call_early(handle_protocol, h,
- &conout_proto, &dummy);
+ status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
if (status == EFI_SUCCESS)
conout_found = true;
- mode = (void *)(unsigned long)gop32->mode;
- info = (void *)(unsigned long)mode->info;
- current_fb_base = mode->frame_buffer_base;
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+ current_fb_base = efi_table_attr(mode, frame_buffer_base);
if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
@@ -146,104 +142,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
* Once we've found a GOP supporting ConOut,
* don't bother looking any further.
*/
- first_gop = gop32;
- if (conout_found)
- break;
- }
- }
-
- /* Did we find any GOPs? */
- if (!first_gop)
- return EFI_NOT_FOUND;
-
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_width = width;
- si->lfb_height = height;
- si->lfb_base = fb_base;
-
- ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
- if (ext_lfb_base) {
- si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
- si->ext_lfb_base = ext_lfb_base;
- }
-
- si->pages = 1;
-
- setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
-
- si->lfb_size = si->lfb_linelength * si->lfb_height;
-
- si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-
- return EFI_SUCCESS;
-}
-
-static efi_status_t
-setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
- efi_guid_t *proto, unsigned long size, void **gop_handle)
-{
- struct efi_graphics_output_protocol_64 *gop64, *first_gop;
- unsigned long nr_gops;
- u16 width, height;
- u32 pixels_per_scan_line;
- u32 ext_lfb_base;
- u64 fb_base;
- struct efi_pixel_bitmask pixel_info;
- int pixel_format;
- efi_status_t status;
- u64 *handles = (u64 *)(unsigned long)gop_handle;
- int i;
-
- first_gop = NULL;
- gop64 = NULL;
-
- nr_gops = size / sizeof(u64);
- for (i = 0; i < nr_gops; i++) {
- struct efi_graphics_output_protocol_mode_64 *mode;
- struct efi_graphics_output_mode_info *info = NULL;
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
- bool conout_found = false;
- void *dummy = NULL;
- efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
- u64 current_fb_base;
-
- status = efi_call_early(handle_protocol, h,
- proto, (void **)&gop64);
- if (status != EFI_SUCCESS)
- continue;
-
- status = efi_call_early(handle_protocol, h,
- &conout_proto, &dummy);
- if (status == EFI_SUCCESS)
- conout_found = true;
-
- mode = (void *)(unsigned long)gop64->mode;
- info = (void *)(unsigned long)mode->info;
- current_fb_base = mode->frame_buffer_base;
-
- if ((!first_gop || conout_found) &&
- info->pixel_format != PIXEL_BLT_ONLY) {
- /*
- * Systems that use the UEFI Console Splitter may
- * provide multiple GOP devices, not all of which are
- * backed by real hardware. The workaround is to search
- * for a GOP implementing the ConOut protocol, and if
- * one isn't found, to just fall back to the first GOP.
- */
- width = info->horizontal_resolution;
- height = info->vertical_resolution;
- pixel_format = info->pixel_format;
- pixel_info = info->pixel_information;
- pixels_per_scan_line = info->pixels_per_scan_line;
- fb_base = current_fb_base;
-
- /*
- * Once we've found a GOP supporting ConOut,
- * don't bother looking any further.
- */
- first_gop = gop64;
+ first_gop = gop;
if (conout_found)
break;
}
@@ -280,33 +179,25 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
/*
* See if we have Graphics Output Protocol
*/
-efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
- struct screen_info *si, efi_guid_t *proto,
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
unsigned long size)
{
efi_status_t status;
void **gop_handle = NULL;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- size, (void **)&gop_handle);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&gop_handle);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_early(locate_handle,
- EFI_LOCATE_BY_PROTOCOL,
- proto, NULL, &size, gop_handle);
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
+ &size, gop_handle);
if (status != EFI_SUCCESS)
goto free_handle;
- if (efi_is_64bit()) {
- status = setup_gop64(sys_table_arg, si, proto, size,
- gop_handle);
- } else {
- status = setup_gop32(sys_table_arg, si, proto, size,
- gop_handle);
- }
+ status = setup_gop(si, proto, size, gop_handle);
free_handle:
- efi_call_early(free_pool, gop_handle);
+ efi_bs_call(free_pool, gop_handle);
return status;
}
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
new file mode 100644
index 000000000000..b025e59b94df
--- /dev/null
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI-related functions used by the EFI stub on multiple
+ * architectures.
+ *
+ * Copyright 2019 Google, LLC
+ */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+void efi_pci_disable_bridge_busmaster(void)
+{
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long pci_handle_size = 0;
+ efi_handle_t *pci_handle = NULL;
+ efi_handle_t handle;
+ efi_status_t status;
+ u16 class, command;
+ int i;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, NULL);
+
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+ pr_efi_err("Failed to locate PCI I/O handles'\n");
+ return;
+ }
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
+ (void **)&pci_handle);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to allocate memory for 'pci_handle'\n");
+ return;
+ }
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, pci_handle);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to locate PCI I/O handles'\n");
+ goto free_handle;
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+ unsigned long segment_nr, bus_nr, device_nr, func_nr;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ /*
+ * Disregard devices living on bus 0 - these are not behind a
+ * bridge so no point in disconnecting them from their drivers.
+ */
+ status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr,
+ &device_nr, &func_nr);
+ if (status != EFI_SUCCESS || bus_nr == 0)
+ continue;
+
+ /*
+ * Don't disconnect VGA controllers so we don't risk losing
+ * access to the framebuffer. Drivers for true PCIe graphics
+ * controllers that are behind a PCIe root port do not use
+ * DMA to implement the GOP framebuffer anyway [although they
+ * may use it in their implentation of Gop->Blt()], and so
+ * disabling DMA in the PCI bridge should not interfere with
+ * normal operation of the device.
+ */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+ if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA)
+ continue;
+
+ /* Disconnect this handle from all its drivers */
+ efi_bs_call(disconnect_controller, handle, NULL, NULL);
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+
+ if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI)
+ continue;
+
+ /* Disable busmastering */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER))
+ continue;
+
+ command &= ~PCI_COMMAND_MASTER;
+ status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS)
+ pr_efi_err("Failed to disable PCI busmastering\n");
+ }
+
+free_handle:
+ efi_bs_call(free_pool, pci_handle);
+}
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 35edd7cfb6a1..316ce9ff0193 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -9,38 +9,34 @@
#include "efistub.h"
-typedef struct efi_rng_protocol efi_rng_protocol_t;
-
-typedef struct {
- u32 get_info;
- u32 get_rng;
-} efi_rng_protocol_32_t;
-
-typedef struct {
- u64 get_info;
- u64 get_rng;
-} efi_rng_protocol_64_t;
-
-struct efi_rng_protocol {
- efi_status_t (*get_info)(struct efi_rng_protocol *,
- unsigned long *, efi_guid_t *);
- efi_status_t (*get_rng)(struct efi_rng_protocol *,
- efi_guid_t *, unsigned long, u8 *out);
+typedef union efi_rng_protocol efi_rng_protocol_t;
+
+union efi_rng_protocol {
+ struct {
+ efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *,
+ unsigned long *,
+ efi_guid_t *);
+ efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *,
+ efi_guid_t *, unsigned long,
+ u8 *out);
+ };
+ struct {
+ u32 get_info;
+ u32 get_rng;
+ } mixed_mode;
};
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
- unsigned long size, u8 *out)
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_status_t status;
- struct efi_rng_protocol *rng;
+ efi_rng_protocol_t *rng = NULL;
- status = efi_call_early(locate_protocol, &rng_proto, NULL,
- (void **)&rng);
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
if (status != EFI_SUCCESS)
return status;
- return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out);
+ return efi_call_proto(rng, get_rng, NULL, size, out);
}
/*
@@ -81,8 +77,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
*/
#define MD_NUM_SLOTS(md) ((md)->virt_addr)
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size,
+efi_status_t efi_random_alloc(unsigned long size,
unsigned long align,
unsigned long *addr,
unsigned long random_seed)
@@ -101,7 +96,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
map.key_ptr = NULL;
map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS)
return status;
@@ -145,39 +140,38 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
target = round_up(md->phys_addr, align) + target_slot * align;
pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, pages, &target);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, pages, &target);
if (status == EFI_SUCCESS)
*addr = target;
break;
}
- efi_call_early(free_pool, memory_map);
+ efi_bs_call(free_pool, memory_map);
return status;
}
-efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
+efi_status_t efi_random_get_seed(void)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
- struct efi_rng_protocol *rng;
- struct linux_efi_random_seed *seed;
+ efi_rng_protocol_t *rng = NULL;
+ struct linux_efi_random_seed *seed = NULL;
efi_status_t status;
- status = efi_call_early(locate_protocol, &rng_proto, NULL,
- (void **)&rng);
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
- sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
- (void **)&seed);
+ status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
+ (void **)&seed);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw,
+ status = efi_call_proto(rng, get_rng, &rng_algo_raw,
EFI_RANDOM_SEED_SIZE, seed->bits);
if (status == EFI_UNSUPPORTED)
@@ -185,21 +179,20 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
* Use whatever algorithm we have available if the raw algorithm
* is not implemented.
*/
- status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL,
- EFI_RANDOM_SEED_SIZE, seed->bits);
+ status = efi_call_proto(rng, get_rng, NULL,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
if (status != EFI_SUCCESS)
goto err_freepool;
seed->size = EFI_RANDOM_SEED_SIZE;
- status = efi_call_early(install_configuration_table, &rng_table_guid,
- seed);
+ status = efi_bs_call(install_configuration_table, &rng_table_guid, seed);
if (status != EFI_SUCCESS)
goto err_freepool;
return EFI_SUCCESS;
err_freepool:
- efi_call_early(free_pool, seed);
+ efi_bs_call(free_pool, seed);
return status;
}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index edba5e7a3743..a765378ad18c 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
-#define get_efi_var(name, vendor, ...) \
- efi_call_runtime(get_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__);
-
/*
* Determine whether we're in secure boot mode.
*
* Please keep the logic in sync with
* arch/x86/xen/efi.c:xen_efi_get_secureboot().
*/
-enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
+enum efi_secureboot_mode efi_get_secureboot(void)
{
u32 attr;
u8 secboot, setupmode, moksbstate;
@@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
- pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n");
+ pr_efi("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled;
out_efi_err:
- pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
+ pr_efi_err("Could not determine UEFI Secure Boot status.\n");
return efi_secureboot_mode_unknown;
}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index eb9af83e4d59..1d59e103a2e3 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -20,23 +20,13 @@ static const efi_char16_t efi_MemoryOverWriteRequest_name[] =
#define MEMORY_ONLY_RESET_CONTROL_GUID \
EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
-#define get_efi_var(name, vendor, ...) \
- efi_call_runtime(get_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__)
-
-#define set_efi_var(name, vendor, ...) \
- efi_call_runtime(set_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__)
-
/*
* Enable reboot attack mitigation. This requests that the firmware clear the
* RAM on next reboot before proceeding with boot, ensuring that any secrets
* are cleared. If userland has ensured that all secrets have been removed
* from RAM before reboot it can simply reset this variable.
*/
-void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
+void efi_enable_reset_attack_mitigation(void)
{
u8 val = 1;
efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
@@ -57,7 +47,7 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
#endif
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+void efi_retrieve_tpm2_eventlog(void)
{
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
@@ -69,23 +59,22 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
size_t log_size, last_entry_size;
efi_bool_t truncated;
int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
- void *tcg2_protocol = NULL;
+ efi_tcg2_protocol_t *tcg2_protocol = NULL;
int final_events_size = 0;
- status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
- &tcg2_protocol);
+ status = efi_bs_call(locate_protocol, &tcg2_guid, NULL,
+ (void **)&tcg2_protocol);
if (status != EFI_SUCCESS)
return;
- status = efi_call_proto(efi_tcg2_protocol, get_event_log,
- tcg2_protocol, version, &log_location,
- &log_last_entry, &truncated);
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry, &truncated);
if (status != EFI_SUCCESS || !log_location) {
version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
- status = efi_call_proto(efi_tcg2_protocol, get_event_log,
- tcg2_protocol, version, &log_location,
- &log_last_entry, &truncated);
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry,
+ &truncated);
if (status != EFI_SUCCESS || !log_location)
return;
@@ -126,13 +115,11 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
}
/* Allocate space for the logs and copy them. */
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- sizeof(*log_tbl) + log_size,
- (void **) &log_tbl);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg,
- "Unable to allocate memory for event log\n");
+ efi_printk("Unable to allocate memory for event log\n");
return;
}
@@ -140,8 +127,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
* Figure out whether any events have already been logged to the
* final events structure, and if so how much space they take up
*/
- final_events_table = get_efi_config_table(sys_table_arg,
- LINUX_EFI_TPM_FINAL_LOG_GUID);
+ final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
int offset;
@@ -169,12 +155,12 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
log_tbl->version = version;
memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
- status = efi_call_early(install_configuration_table,
- &linux_eventlog_guid, log_tbl);
+ status = efi_bs_call(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
if (status != EFI_SUCCESS)
goto err_free;
return;
err_free:
- efi_call_early(free_pool, log_tbl);
+ efi_bs_call(free_pool, log_tbl);
}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 38b686c67b17..2ff1883dc788 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -29,9 +29,32 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
return PFN_PHYS(page_to_pfn(p));
}
+void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
+{
+ if (flags & EFI_MEMMAP_MEMBLOCK) {
+ if (slab_is_available())
+ memblock_free_late(phys, size);
+ else
+ memblock_free(phys, size);
+ } else if (flags & EFI_MEMMAP_SLAB) {
+ struct page *p = pfn_to_page(PHYS_PFN(phys));
+ unsigned int order = get_order(size);
+
+ free_pages((unsigned long) page_address(p), order);
+ }
+}
+
+static void __init efi_memmap_free(void)
+{
+ __efi_memmap_free(efi.memmap.phys_map,
+ efi.memmap.desc_size * efi.memmap.nr_map,
+ efi.memmap.flags);
+}
+
/**
* efi_memmap_alloc - Allocate memory for the EFI memory map
* @num_entries: Number of entries in the allocated map.
+ * @data: efi memmap installation parameters
*
* Depending on whether mm_init() has already been invoked or not,
* either memblock or "normal" page allocation is used.
@@ -39,34 +62,47 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
* Returns the physical address of the allocated memory map on
* success, zero on failure.
*/
-phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+int __init efi_memmap_alloc(unsigned int num_entries,
+ struct efi_memory_map_data *data)
{
- unsigned long size = num_entries * efi.memmap.desc_size;
-
- if (slab_is_available())
- return __efi_memmap_alloc_late(size);
+ /* Expect allocation parameters are zero initialized */
+ WARN_ON(data->phys_map || data->size);
+
+ data->size = num_entries * efi.memmap.desc_size;
+ data->desc_version = efi.memmap.desc_version;
+ data->desc_size = efi.memmap.desc_size;
+ data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
+ data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
+
+ if (slab_is_available()) {
+ data->flags |= EFI_MEMMAP_SLAB;
+ data->phys_map = __efi_memmap_alloc_late(data->size);
+ } else {
+ data->flags |= EFI_MEMMAP_MEMBLOCK;
+ data->phys_map = __efi_memmap_alloc_early(data->size);
+ }
- return __efi_memmap_alloc_early(size);
+ if (!data->phys_map)
+ return -ENOMEM;
+ return 0;
}
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
* @data: EFI memory map data
- * @late: Use early or late mapping function?
*
* This function takes care of figuring out which function to use to
* map the EFI memory map in efi.memmap based on how far into the boot
* we are.
*
- * During bootup @late should be %false since we only have access to
- * the early_memremap*() functions as the vmalloc space isn't setup.
- * Once the kernel is fully booted we can fallback to the more robust
- * memremap*() API.
+ * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
+ * only have access to the early_memremap*() functions as the vmalloc
+ * space isn't setup. Once the kernel is fully booted we can fallback
+ * to the more robust memremap*() API.
*
* Returns zero on success, a negative error code on failure.
*/
-static int __init
-__efi_memmap_init(struct efi_memory_map_data *data, bool late)
+static int __init __efi_memmap_init(struct efi_memory_map_data *data)
{
struct efi_memory_map map;
phys_addr_t phys_map;
@@ -76,7 +112,7 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
phys_map = data->phys_map;
- if (late)
+ if (data->flags & EFI_MEMMAP_LATE)
map.map = memremap(phys_map, data->size, MEMREMAP_WB);
else
map.map = early_memremap(phys_map, data->size);
@@ -86,13 +122,16 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
return -ENOMEM;
}
+ /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
+ efi_memmap_free();
+
map.phys_map = data->phys_map;
map.nr_map = data->size / data->desc_size;
map.map_end = map.map + data->size;
map.desc_version = data->desc_version;
map.desc_size = data->desc_size;
- map.late = late;
+ map.flags = data->flags;
set_bit(EFI_MEMMAP, &efi.flags);
@@ -111,9 +150,10 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
int __init efi_memmap_init_early(struct efi_memory_map_data *data)
{
/* Cannot go backwards */
- WARN_ON(efi.memmap.late);
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
- return __efi_memmap_init(data, false);
+ data->flags = 0;
+ return __efi_memmap_init(data);
}
void __init efi_memmap_unmap(void)
@@ -121,7 +161,7 @@ void __init efi_memmap_unmap(void)
if (!efi_enabled(EFI_MEMMAP))
return;
- if (!efi.memmap.late) {
+ if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
unsigned long size;
size = efi.memmap.desc_size * efi.memmap.nr_map;
@@ -162,13 +202,14 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
struct efi_memory_map_data data = {
.phys_map = addr,
.size = size,
+ .flags = EFI_MEMMAP_LATE,
};
/* Did we forget to unmap the early EFI memmap? */
WARN_ON(efi.memmap.map);
/* Were we already called? */
- WARN_ON(efi.memmap.late);
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
/*
* It makes no sense to allow callers to register different
@@ -178,13 +219,12 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
data.desc_version = efi.memmap.desc_version;
data.desc_size = efi.memmap.desc_size;
- return __efi_memmap_init(&data, true);
+ return __efi_memmap_init(&data);
}
/**
* efi_memmap_install - Install a new EFI memory map in efi.memmap
- * @addr: Physical address of the memory map
- * @nr_map: Number of entries in the memory map
+ * @ctx: map allocation parameters (address, size, flags)
*
* Unlike efi_memmap_init_*(), this function does not allow the caller
* to switch from early to late mappings. It simply uses the existing
@@ -192,18 +232,11 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
*
* Returns zero on success, a negative error code on failure.
*/
-int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
+int __init efi_memmap_install(struct efi_memory_map_data *data)
{
- struct efi_memory_map_data data;
-
efi_memmap_unmap();
- data.phys_map = addr;
- data.size = efi.memmap.desc_size * nr_map;
- data.desc_version = efi.memmap.desc_version;
- data.desc_size = efi.memmap.desc_size;
-
- return __efi_memmap_init(&data, efi.memmap.late);
+ return __efi_memmap_init(data);
}
/**
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 8d132e4f008a..0205987a4fd4 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -163,8 +163,15 @@ static int coreboot_table_probe(struct platform_device *pdev)
return ret;
}
+static int __cb_dev_unregister(struct device *dev, void *dummy)
+{
+ device_unregister(dev);
+ return 0;
+}
+
static int coreboot_table_remove(struct platform_device *pdev)
{
+ bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
bus_unregister(&coreboot_bus_type);
return 0;
}
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index edaa4e5d84ad..5b2011ebbe26 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -76,6 +76,7 @@
#define GSMI_CMD_LOG_S0IX_RESUME 0x0b
#define GSMI_CMD_CLEAR_CONFIG 0x20
#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
+#define GSMI_CMD_RESERVED 0xff
/* Magic entry type for kernel events */
#define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD
@@ -746,6 +747,7 @@ MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
static __init int gsmi_system_valid(void)
{
u32 hash;
+ u16 cmd, result;
if (!dmi_check_system(gsmi_dmi_table))
return -ENODEV;
@@ -780,6 +782,23 @@ static __init int gsmi_system_valid(void)
return -ENODEV;
}
+ /* Test the smihandler with a bogus command. If it leaves the
+ * calling argument in %ax untouched, there is no handler for
+ * GSMI commands.
+ */
+ cmd = GSMI_CALLBACK | GSMI_CMD_RESERVED << 8;
+ asm volatile (
+ "outb %%al, %%dx\n\t"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (acpi_gbl_FADT.smi_command)
+ : "memory", "cc"
+ );
+ if (cmd == result) {
+ pr_info("gsmi: no gsmi handler in firmware\n");
+ return -ENODEV;
+ }
+
/* Found */
return 0;
}
@@ -1016,6 +1035,9 @@ out_err:
dma_pool_destroy(gsmi_dev.dma_pool);
platform_device_unregister(gsmi_dev.pdev);
pr_info("gsmi: failed to load: %d\n", ret);
+#ifdef CONFIG_PM
+ platform_driver_unregister(&gsmi_driver_info);
+#endif
return ret;
}
@@ -1037,6 +1059,9 @@ static void __exit gsmi_exit(void)
gsmi_buf_free(gsmi_dev.name_buf);
dma_pool_destroy(gsmi_dev.dma_pool);
platform_device_unregister(gsmi_dev.pdev);
+#ifdef CONFIG_PM
+ platform_driver_unregister(&gsmi_driver_info);
+#endif
}
module_init(gsmi_init);
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index c6c31402848d..7ffb42b0775e 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -268,7 +268,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
*/
msleep(1000);
count_in_sec--;
- };
+ }
if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
@@ -512,7 +512,7 @@ static int svc_normal_to_secure_thread(void *data)
break;
}
- };
+ }
kfree(cbdata);
kfree(pdata);
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index e4a34dc7947f..65437b6a6842 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -813,10 +813,8 @@ static int afu_dev_init(struct platform_device *pdev)
static int afu_dev_destroy(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct dfl_afu *afu;
mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
afu_mmio_region_destroy(pdata);
afu_dma_region_destroy(pdata);
dfl_fpga_pdata_set_private(pdata, NULL);
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 7c930e6b314d..1d4690c99268 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -675,10 +675,8 @@ static int fme_dev_init(struct platform_device *pdev)
static void fme_dev_destroy(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct dfl_fme *fme;
mutex_lock(&pdata->lock);
- fme = dfl_fpga_pdata_get_private(pdata);
dfl_fpga_pdata_set_private(pdata, NULL);
mutex_unlock(&pdata->lock);
}
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 9a17fe98c1b0..2888ff000e4d 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -119,10 +119,8 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->io_base = devm_ioremap_resource(kdev, res);
- if (IS_ERR(priv->io_base)) {
- dev_err(kdev, "unable to remap registers\n");
+ if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
- }
mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
&ts73xx_fpga_ops, priv);
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index af9b387c56d3..7d69af230567 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -101,7 +101,8 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&pdev->dev, "aclk");
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "input clock not found\n");
+ if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found\n");
return PTR_ERR(priv->clk);
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2ed599236a1c..b585f2ef6dd9 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -312,6 +312,12 @@ config GPIO_IXP4XX
IXP4xx series of chips.
If unsure, say N.
+config GPIO_LOGICVC
+ tristate "Xylon LogiCVC GPIO support"
+ depends on MFD_SYSCON && OF
+ help
+ Say yes here to support GPIO functionality of the Xylon LogiCVC
+ programmable logic block.
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
@@ -335,14 +341,6 @@ config GPIO_LPC32XX
Select this option to enable GPIO driver for
NXP LPC32XX devices.
-config GPIO_LYNXPOINT
- tristate "Intel Lynxpoint GPIO support"
- depends on ACPI && X86
- select GPIOLIB_IRQCHIP
- help
- driver for GPIO functionality on Intel Lynxpoint PCH chipset
- Requires ACPI device enumeration code to set up a platform device.
-
config GPIO_MB86S7X
tristate "GPIO support for Fujitsu MB86S7x Platforms"
help
@@ -479,6 +477,15 @@ config GPIO_SAMA5D2_PIOBU
The difference from regular GPIOs is that they
maintain their value during backup/self-refresh.
+config GPIO_SIFIVE
+ bool "SiFive GPIO support"
+ depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select REGMAP_MMIO
+ help
+ Say yes here to support the GPIO device on SiFive SoCs.
+
config GPIO_SIOX
tristate "SIOX GPIO support"
depends on SIOX
@@ -613,6 +620,13 @@ config GPIO_VX855
additional drivers must be enabled in order to use the
functionality of the device.
+config GPIO_WCD934X
+ tristate "Qualcomm Technologies Inc WCD9340/WCD9341 gpio controller driver"
+ depends on MFD_WCD934X && OF_GPIO
+ help
+ This driver is to supprot GPIO block found on the Qualcomm Technologies
+ Inc WCD9340/WCD9341 Audio Codec.
+
config GPIO_XGENE
bool "APM X-Gene GPIO controller support"
depends on ARM64 && OF_GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 34eb8b2b12dd..76ff2a5feaba 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
+obj-$(CONFIG_GPIO_LOGICVC) += gpio-logicvc.o
obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
@@ -76,7 +77,6 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o
-obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
@@ -124,6 +124,7 @@ obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o
obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
@@ -157,6 +158,7 @@ obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
+obj-$(CONFIG_GPIO_WCD934X) += gpio-wcd934x.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 76f8c7ff18ff..3a44e6ae52bd 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -10,6 +10,28 @@ approach. This means that GPIO consumers, drivers and machine descriptions
ideally have no use or idea of the global GPIO numberspace that has/was
used in the inception of the GPIO subsystem.
+The numberspace issue is the same as to why irq is moving away from irq
+numbers to IRQ descriptors.
+
+The underlying motivation for this is that the GPIO numberspace has become
+unmanageable: machine board files tend to become full of macros trying to
+establish the numberspace at compile-time, making it hard to add any numbers
+in the middle (such as if you missed a pin on a chip) without the numberspace
+breaking.
+
+Machine descriptions such as device tree or ACPI does not have a concept of the
+Linux GPIO number as those descriptions are external to the Linux kernel
+and treat GPIO lines as abstract entities.
+
+The runtime-assigned GPIO numberspace (what you get if you assign the GPIO
+base as -1 in struct gpio_chip) has also became unpredictable due to factors
+such as probe ordering and the introduction of -EPROBE_DEFER making probe
+ordering of independent GPIO chips essentially unpredictable, as their base
+number will be assigned on a first come first serve basis.
+
+The best way to get out of the problem is to make the global GPIO numbers
+unimportant by simply not using them. GPIO descriptors deal with this.
+
Work items:
- Convert all GPIO device drivers to only #include <linux/gpio/driver.h>
@@ -33,7 +55,7 @@ This header and helpers appeared at one point when there was no proper
driver infrastructure for doing simpler MMIO GPIO devices and there was
no core support for parsing device tree GPIOs from the core library with
the [devm_]gpiod_get() calls we have today that will implicitly go into
-the device tree back-end.
+the device tree back-end. It is legacy and should not be used in new code.
Work items:
@@ -59,6 +81,15 @@ Work items:
uses <linux/gpio/consumer.h> or <linux/gpio/driver.h> instead.
+Get rid of <linux/gpio.h>
+
+This legacy header is a one stop shop for anything GPIO is closely tied
+to the global GPIO numberspace. The endgame of the above refactorings will
+be the removal of <linux/gpio.h> and from that point only the specialized
+headers under <linux/gpio/*.h> will be used. This requires all the above to
+be completed and is expected to take a long time.
+
+
Collect drivers
Collect GPIO drivers from arch/* and other places that should be placed
@@ -109,7 +140,7 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
int irq; /* from platform etc */
struct my_gpio *g;
- struct gpio_irq_chip *girq
+ struct gpio_irq_chip *girq;
/* Set up the irqchip dynamically */
g->irq.name = "my_gpio_irq";
@@ -137,9 +168,14 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
-- Support generic hierarchical GPIO interrupts: these are for the
- non-cascading case where there is one IRQ per GPIO line, there is
- currently no common infrastructure for this.
+- Drop gpiochip_set_chained_irqchip() when all the chained irqchips
+ have been converted to the above infrastructure.
+
+- Add more infrastructure to make it possible to also pass a threaded
+ irqchip in struct gpio_irq_chip.
+
+- Drop gpiochip_irqchip_add_nested() when all the chained irqchips
+ have been converted to the above infrastructure.
Increase integration with pin control
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 9f2e6b04c361..cc4ba71e4fe3 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -266,7 +266,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->mmchip.gc.owner = THIS_MODULE;
altera_gc->mmchip.gc.parent = &pdev->dev;
- altera_gc->mapped_irq = platform_get_irq(pdev, 0);
+ altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
if (altera_gc->mapped_irq < 0)
goto skip_irq;
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 8319812593e3..d16645c1d8d9 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -391,7 +391,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
gpio->irq = rc;
- /* Disable IRQ and clear Interrupt status registers for all SPGIO Pins. */
+ /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
bank = &aspeed_sgpio_banks[i];
/* disable irq enable bits */
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index f1037b61f763..879db23d8454 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -978,7 +978,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
/**
- * aspeed_gpio_copro_set_ops - Sets the callbacks used for handhsaking with
+ * aspeed_gpio_copro_set_ops - Sets the callbacks used for handshaking with
* the coprocessor for shared GPIO banks
* @ops: The callbacks
* @data: Pointer passed back to the callbacks
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 4122683eb1f9..baee8c3f06ad 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/gpio/driver.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/init.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -586,11 +585,18 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
kona_gpio->gpio_chip = template_chip;
chip = &kona_gpio->gpio_chip;
- kona_gpio->num_bank = of_irq_count(dev->of_node);
- if (kona_gpio->num_bank == 0) {
+ ret = platform_irq_count(pdev);
+ if (!ret) {
dev_err(dev, "Couldn't determine # GPIO banks\n");
return -ENOENT;
+ } else if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't determine GPIO banks: (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
}
+ kona_gpio->num_bank = ret;
+
if (kona_gpio->num_bank > GPIO_MAX_BANK_NUM) {
dev_err(dev, "Too many GPIO banks configured (max=%d)\n",
GPIO_MAX_BANK_NUM);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index ff19a8ad5663..1d0827e79703 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -64,11 +64,11 @@ static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg,
if (layout->bit_per_gpio[i] < 1 || layout->bit_per_gpio[i] > 8)
return -EINVAL;
- /* Check that on valiue fits it's placeholder */
+ /* Check that on value fits its placeholder */
if (GENMASK(31, layout->bit_per_gpio[i]) & layout->on[i])
return -EINVAL;
- /* Check that off valiue fits it's placeholder */
+ /* Check that off value fits its placeholder */
if (GENMASK(31, layout->bit_per_gpio[i]) & layout->off[i])
return -EINVAL;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 08234e64993a..f954359c9544 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -253,17 +253,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
if (uirq->refcnt == 0) {
+ spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
dev_err(priv->dev,
"Could not request underlying irq %d\n",
uirq->uirq);
-
- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
return ret;
}
+ spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
}
uirq->refcnt++;
@@ -309,8 +308,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
if (index >= 0) {
uirq = &priv->uirqs[lirq->index];
uirq->refcnt--;
- if (uirq->refcnt == 0)
+ if (uirq->refcnt == 0) {
+ spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
free_irq(uirq->uirq, priv);
+ return;
+ }
}
spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
@@ -433,12 +435,9 @@ static int grgpio_probe(struct platform_device *ofdev)
static int grgpio_remove(struct platform_device *ofdev)
{
struct grgpio_priv *priv = platform_get_drvdata(ofdev);
- unsigned long flags;
int i;
int ret = 0;
- spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
-
if (priv->domain) {
for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
if (priv->uirqs[i].refcnt != 0) {
@@ -454,8 +453,6 @@ static int grgpio_remove(struct platform_device *ofdev)
irq_domain_remove(priv->domain);
out:
- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
return ret;
}
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
new file mode 100644
index 000000000000..015632cf159f
--- /dev/null
+++ b/drivers/gpio/gpio-logicvc.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define LOGICVC_CTRL_REG 0x40
+#define LOGICVC_CTRL_GPIO_SHIFT 11
+#define LOGICVC_CTRL_GPIO_BITS 5
+
+#define LOGICVC_POWER_CTRL_REG 0x78
+#define LOGICVC_POWER_CTRL_GPIO_SHIFT 0
+#define LOGICVC_POWER_CTRL_GPIO_BITS 4
+
+struct logicvc_gpio {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+};
+
+static void logicvc_gpio_offset(struct logicvc_gpio *logicvc, unsigned offset,
+ unsigned int *reg, unsigned int *bit)
+{
+ if (offset >= LOGICVC_CTRL_GPIO_BITS) {
+ *reg = LOGICVC_POWER_CTRL_REG;
+
+ /* To the (virtual) power ctrl offset. */
+ offset -= LOGICVC_CTRL_GPIO_BITS;
+ /* To the actual bit offset in reg. */
+ offset += LOGICVC_POWER_CTRL_GPIO_SHIFT;
+ } else {
+ *reg = LOGICVC_CTRL_REG;
+
+ /* To the actual bit offset in reg. */
+ offset += LOGICVC_CTRL_GPIO_SHIFT;
+ }
+
+ *bit = BIT(offset);
+}
+
+static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
+ unsigned int reg, bit, value;
+ int ret;
+
+ logicvc_gpio_offset(logicvc, offset, &reg, &bit);
+
+ ret = regmap_read(logicvc->regmap, reg, &value);
+ if (ret)
+ return ret;
+
+ return !!(value & bit);
+}
+
+static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
+ unsigned int reg, bit;
+
+ logicvc_gpio_offset(logicvc, offset, &reg, &bit);
+
+ regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
+}
+
+static int logicvc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ /* Pins are always configured as output, so just set the value. */
+ logicvc_gpio_set(chip, offset, value);
+
+ return 0;
+}
+
+static struct regmap_config logicvc_gpio_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "logicvc-gpio",
+};
+
+static int logicvc_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_gpio *logicvc;
+ int ret;
+
+ logicvc = devm_kzalloc(dev, sizeof(*logicvc), GFP_KERNEL);
+ if (!logicvc)
+ return -ENOMEM;
+
+ /* Try to get regmap from parent first. */
+ logicvc->regmap = syscon_node_to_regmap(of_node->parent);
+
+ /* Grab our own regmap if that fails. */
+ if (IS_ERR(logicvc->regmap)) {
+ struct resource res;
+ void __iomem *base;
+
+ ret = of_address_to_resource(of_node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get resource from address\n");
+ return ret;
+ }
+
+ base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to map I/O base\n");
+ return PTR_ERR(base);
+ }
+
+ logicvc_gpio_regmap_config.max_register = resource_size(&res) -
+ logicvc_gpio_regmap_config.reg_stride;
+
+ logicvc->regmap =
+ devm_regmap_init_mmio(dev, base,
+ &logicvc_gpio_regmap_config);
+ if (IS_ERR(logicvc->regmap)) {
+ dev_err(dev, "Failed to create regmap for I/O\n");
+ return PTR_ERR(logicvc->regmap);
+ }
+ }
+
+ logicvc->chip.parent = dev;
+ logicvc->chip.owner = THIS_MODULE;
+ logicvc->chip.label = dev_name(dev);
+ logicvc->chip.base = -1;
+ logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
+ LOGICVC_POWER_CTRL_GPIO_BITS;
+ logicvc->chip.get = logicvc_gpio_get;
+ logicvc->chip.set = logicvc_gpio_set;
+ logicvc->chip.direction_output = logicvc_gpio_direction_output;
+
+ platform_set_drvdata(pdev, logicvc);
+
+ return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
+}
+
+static const struct of_device_id logicivc_gpio_of_table[] = {
+ {
+ .compatible = "xylon,logicvc-3.02.a-gpio",
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, logicivc_gpio_of_table);
+
+static struct platform_driver logicvc_gpio_driver = {
+ .driver = {
+ .name = "gpio-logicvc",
+ .of_match_table = logicivc_gpio_of_table,
+ },
+ .probe = logicvc_gpio_probe,
+};
+
+module_platform_driver(logicvc_gpio_driver);
+
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_DESCRIPTION("Xylon LogiCVC GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
deleted file mode 100644
index 490ce7bae25e..000000000000
--- a/drivers/gpio/gpio-lynxpoint.c
+++ /dev/null
@@ -1,471 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPIO controller driver for Intel Lynxpoint PCH chipset>
- * Copyright (c) 2012, Intel Corporation.
- *
- * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/bitops.h>
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-/* LynxPoint chipset has support for 94 gpio pins */
-
-#define LP_NUM_GPIO 94
-
-/* Bitmapped register offsets */
-#define LP_ACPI_OWNED 0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
-#define LP_GC 0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
-#define LP_INT_STAT 0x80
-#define LP_INT_ENABLE 0x90
-
-/* Each pin has two 32 bit config registers, starting at 0x100 */
-#define LP_CONFIG1 0x100
-#define LP_CONFIG2 0x104
-
-/* LP_CONFIG1 reg bits */
-#define OUT_LVL_BIT BIT(31)
-#define IN_LVL_BIT BIT(30)
-#define TRIG_SEL_BIT BIT(4) /* 0: Edge, 1: Level */
-#define INT_INV_BIT BIT(3) /* Invert interrupt triggering */
-#define DIR_BIT BIT(2) /* 0: Output, 1: Input */
-#define USE_SEL_BIT BIT(0) /* 0: Native, 1: GPIO */
-
-/* LP_CONFIG2 reg bits */
-#define GPINDIS_BIT BIT(2) /* disable input sensing */
-#define GPIWP_BIT (BIT(0) | BIT(1)) /* weak pull options */
-
-struct lp_gpio {
- struct gpio_chip chip;
- struct platform_device *pdev;
- spinlock_t lock;
- unsigned long reg_base;
-};
-
-/*
- * Lynxpoint gpios are controlled through both bitmapped registers and
- * per gpio specific registers. The bitmapped registers are in chunks of
- * 3 x 32bit registers to cover all 94 gpios
- *
- * per gpio specific registers consist of two 32bit registers per gpio
- * (LP_CONFIG1 and LP_CONFIG2), with 94 gpios there's a total of
- * 188 config registers.
- *
- * A simplified view of the register layout look like this:
- *
- * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31 (bitmapped registers)
- * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
- * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
- * ...
- * LP_INT_ENABLE[31:0] ...
- * LP_INT_ENABLE[63:31] ...
- * LP_INT_ENABLE[94:64] ...
- * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
- * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
- * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
- * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
- * LP2_CONFIG1 (gpio 2) ...
- * LP2_CONFIG2 (gpio 2) ...
- * ...
- * LP94_CONFIG1 (gpio 94) ...
- * LP94_CONFIG2 (gpio 94) ...
- */
-
-static unsigned long lp_gpio_reg(struct gpio_chip *chip, unsigned offset,
- int reg)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- int reg_offset;
-
- if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
- /* per gpio specific config registers */
- reg_offset = offset * 8;
- else
- /* bitmapped registers */
- reg_offset = (offset / 32) * 4;
-
- return lg->reg_base + reg + reg_offset;
-}
-
-static int lp_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
- unsigned long acpi_use = lp_gpio_reg(chip, offset, LP_ACPI_OWNED);
-
- pm_runtime_get(&lg->pdev->dev); /* should we put if failed */
-
- /* Fail if BIOS reserved pin for ACPI use */
- if (!(inl(acpi_use) & BIT(offset % 32))) {
- dev_err(&lg->pdev->dev, "gpio %d reserved for ACPI\n", offset);
- return -EBUSY;
- }
- /* Fail if pin is in alternate function mode (not GPIO mode) */
- if (!(inl(reg) & USE_SEL_BIT))
- return -ENODEV;
-
- /* enable input sensing */
- outl(inl(conf2) & ~GPINDIS_BIT, conf2);
-
-
- return 0;
-}
-
-static void lp_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
-
- /* disable input sensing */
- outl(inl(conf2) | GPINDIS_BIT, conf2);
-
- pm_runtime_put(&lg->pdev->dev);
-}
-
-static int lp_irq_type(struct irq_data *d, unsigned type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long flags;
- u32 value;
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
-
- if (hwirq >= lg->chip.ngpio)
- return -EINVAL;
-
- spin_lock_irqsave(&lg->lock, flags);
- value = inl(reg);
-
- /* set both TRIG_SEL and INV bits to 0 for rising edge */
- if (type & IRQ_TYPE_EDGE_RISING)
- value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
-
- /* TRIG_SEL bit 0, INV bit 1 for falling edge */
- if (type & IRQ_TYPE_EDGE_FALLING)
- value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
-
- /* TRIG_SEL bit 1, INV bit 0 for level low */
- if (type & IRQ_TYPE_LEVEL_LOW)
- value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
-
- /* TRIG_SEL bit 1, INV bit 1 for level high */
- if (type & IRQ_TYPE_LEVEL_HIGH)
- value |= TRIG_SEL_BIT | INT_INV_BIT;
-
- outl(value, reg);
-
- if (type & IRQ_TYPE_EDGE_BOTH)
- irq_set_handler_locked(d, handle_edge_irq);
- else if (type & IRQ_TYPE_LEVEL_MASK)
- irq_set_handler_locked(d, handle_level_irq);
-
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static int lp_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- return !!(inl(reg) & IN_LVL_BIT);
-}
-
-static void lp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
-
- if (value)
- outl(inl(reg) | OUT_LVL_BIT, reg);
- else
- outl(inl(reg) & ~OUT_LVL_BIT, reg);
-
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) | DIR_BIT, reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static int lp_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- lp_gpio_set(chip, offset, value);
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) & ~DIR_BIT, reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static void lp_gpio_irq_handler(struct irq_desc *desc)
-{
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- unsigned long reg, ena, pending;
- u32 base, pin;
-
- /* check from GPIO controller which pin triggered the interrupt */
- for (base = 0; base < lg->chip.ngpio; base += 32) {
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
- ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
-
- /* Only interrupts that are enabled */
- pending = inl(reg) & inl(ena);
-
- for_each_set_bit(pin, &pending, 32) {
- unsigned irq;
-
- /* Clear before handling so we don't lose an edge */
- outl(BIT(pin), reg);
-
- irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
- generic_handle_irq(irq);
- }
- }
- chip->irq_eoi(data);
-}
-
-static void lp_irq_unmask(struct irq_data *d)
-{
-}
-
-static void lp_irq_mask(struct irq_data *d)
-{
-}
-
-static void lp_irq_enable(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) | BIT(hwirq % 32), reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static void lp_irq_disable(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) & ~BIT(hwirq % 32), reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static struct irq_chip lp_irqchip = {
- .name = "LP-GPIO",
- .irq_mask = lp_irq_mask,
- .irq_unmask = lp_irq_unmask,
- .irq_enable = lp_irq_enable,
- .irq_disable = lp_irq_disable,
- .irq_set_type = lp_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg;
- unsigned base;
-
- for (base = 0; base < lg->chip.ngpio; base += 32) {
- /* disable gpio pin interrupts */
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
- outl(0, reg);
- /* Clear interrupt status register */
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
- outl(0xffffffff, reg);
- }
-
- return 0;
-}
-
-static int lp_gpio_probe(struct platform_device *pdev)
-{
- struct lp_gpio *lg;
- struct gpio_chip *gc;
- struct resource *io_rc, *irq_rc;
- struct device *dev = &pdev->dev;
- unsigned long reg_len;
- int ret = -ENODEV;
-
- lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL);
- if (!lg)
- return -ENOMEM;
-
- lg->pdev = pdev;
- platform_set_drvdata(pdev, lg);
-
- io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
- irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-
- if (!io_rc) {
- dev_err(dev, "missing IO resources\n");
- return -EINVAL;
- }
-
- lg->reg_base = io_rc->start;
- reg_len = resource_size(io_rc);
-
- if (!devm_request_region(dev, lg->reg_base, reg_len, "lp-gpio")) {
- dev_err(dev, "failed requesting IO region 0x%x\n",
- (unsigned int)lg->reg_base);
- return -EBUSY;
- }
-
- spin_lock_init(&lg->lock);
-
- gc = &lg->chip;
- gc->label = dev_name(dev);
- gc->owner = THIS_MODULE;
- gc->request = lp_gpio_request;
- gc->free = lp_gpio_free;
- gc->direction_input = lp_gpio_direction_input;
- gc->direction_output = lp_gpio_direction_output;
- gc->get = lp_gpio_get;
- gc->set = lp_gpio_set;
- gc->base = -1;
- gc->ngpio = LP_NUM_GPIO;
- gc->can_sleep = false;
- gc->parent = dev;
-
- /* set up interrupts */
- if (irq_rc && irq_rc->start) {
- struct gpio_irq_chip *girq;
-
- girq = &gc->irq;
- girq->chip = &lp_irqchip;
- girq->init_hw = lp_gpio_irq_init_hw;
- girq->parent_handler = lp_gpio_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = (unsigned)irq_rc->start;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
- }
-
- ret = devm_gpiochip_add_data(dev, gc, lg);
- if (ret) {
- dev_err(dev, "failed adding lp-gpio chip\n");
- return ret;
- }
-
- pm_runtime_enable(dev);
-
- return 0;
-}
-
-static int lp_gpio_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int lp_gpio_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-static int lp_gpio_resume(struct device *dev)
-{
- struct lp_gpio *lg = dev_get_drvdata(dev);
- unsigned long reg;
- int i;
-
- /* on some hardware suspend clears input sensing, re-enable it here */
- for (i = 0; i < lg->chip.ngpio; i++) {
- if (gpiochip_is_requested(&lg->chip, i) != NULL) {
- reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
- outl(inl(reg) & ~GPINDIS_BIT, reg);
- }
- }
- return 0;
-}
-
-static const struct dev_pm_ops lp_gpio_pm_ops = {
- .runtime_suspend = lp_gpio_runtime_suspend,
- .runtime_resume = lp_gpio_runtime_resume,
- .resume = lp_gpio_resume,
-};
-
-static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
- { "INT33C7", 0 },
- { "INT3437", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
-
-static int lp_gpio_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
- return 0;
-}
-
-static struct platform_driver lp_gpio_driver = {
- .probe = lp_gpio_probe,
- .remove = lp_gpio_remove,
- .driver = {
- .name = "lp_gpio",
- .pm = &lp_gpio_pm_ops,
- .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
- },
-};
-
-static int __init lp_gpio_init(void)
-{
- return platform_driver_register(&lp_gpio_driver);
-}
-
-static void __exit lp_gpio_exit(void)
-{
- platform_driver_unregister(&lp_gpio_driver);
-}
-
-subsys_initcall(lp_gpio_init);
-module_exit(lp_gpio_exit);
-
-MODULE_AUTHOR("Mathias Nyman (Intel)");
-MODULE_DESCRIPTION("GPIO interface for Intel Lynxpoint");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 94b8d3ae27bc..7d343bea784a 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GPIO Testing Device Driver
*
@@ -7,18 +7,18 @@
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gpio/driver.h>
+#include <linux/debugfs.h>
#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq_sim.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
#include "gpiolib.h"
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 5ae30de3490a..604dfec353a1 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -296,6 +296,7 @@ static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
static const struct mpc8xxx_gpio_devtype ls1028a_gpio_devtype = {
.gpio_dir_in_init = ls1028a_gpio_dir_in_init,
+ .irq_set_type = mpc8xxx_irq_set_type,
};
static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d1d785f983a7..b992321bb852 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -253,8 +253,7 @@ mediatek_gpio_bank_probe(struct device *dev,
/*
* Directly request the irq here instead of passing
- * a flow-handler to gpiochip_set_chained_irqchip,
- * because the irq is shared.
+ * a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, mtk->gpio_irq,
mediatek_gpio_irq_handler, IRQF_SHARED,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 993bbeb3c006..d2b999c7987f 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -46,7 +46,6 @@
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -432,6 +431,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
u32 mask = d->mask;
irq_gc_lock(gc);
+ mvebu_gpio_write_edge_cause(mvchip, ~mask);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
@@ -1102,7 +1102,11 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
/* Some gpio controllers do not provide irq support */
- have_irqs = of_irq_count(np) != 0;
+ err = platform_irq_count(pdev);
+ if (err < 0)
+ return err;
+
+ have_irqs = err != 0;
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
GFP_KERNEL);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 9853547e7276..5638b4e5355f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -764,8 +764,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pca953x_irq_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT |
- IRQF_SHARED,
+ IRQF_ONESHOT | IRQF_SHARED,
dev_name(&client->dev), chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
@@ -855,8 +854,6 @@ out:
return ret;
}
-static const struct of_device_id pca953x_dt_ids[];
-
static int pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index b04c561f3280..4d47b2c41186 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -244,7 +244,6 @@ static struct platform_driver sama5d2_piobu_driver = {
module_platform_driver(sama5d2_piobu_driver);
-MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SAMA5D2 PIOBU controller driver");
MODULE_AUTHOR("Andrei Stefanescu <andrei.stefanescu@microchip.com>");
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
new file mode 100644
index 000000000000..147a1bd04515
--- /dev/null
+++ b/drivers/gpio/gpio-sifive.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of_irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+
+#define SIFIVE_GPIO_INPUT_VAL 0x00
+#define SIFIVE_GPIO_INPUT_EN 0x04
+#define SIFIVE_GPIO_OUTPUT_EN 0x08
+#define SIFIVE_GPIO_OUTPUT_VAL 0x0C
+#define SIFIVE_GPIO_RISE_IE 0x18
+#define SIFIVE_GPIO_RISE_IP 0x1C
+#define SIFIVE_GPIO_FALL_IE 0x20
+#define SIFIVE_GPIO_FALL_IP 0x24
+#define SIFIVE_GPIO_HIGH_IE 0x28
+#define SIFIVE_GPIO_HIGH_IP 0x2C
+#define SIFIVE_GPIO_LOW_IE 0x30
+#define SIFIVE_GPIO_LOW_IP 0x34
+#define SIFIVE_GPIO_OUTPUT_XOR 0x40
+
+#define SIFIVE_GPIO_MAX 32
+#define SIFIVE_GPIO_IRQ_OFFSET 7
+
+struct sifive_gpio {
+ void __iomem *base;
+ struct gpio_chip gc;
+ struct regmap *regs;
+ u32 irq_state;
+ unsigned int trigger[SIFIVE_GPIO_MAX];
+ unsigned int irq_parent[SIFIVE_GPIO_MAX];
+};
+
+static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
+{
+ unsigned long flags;
+ unsigned int trigger;
+
+ spin_lock_irqsave(&chip->gc.bgpio_lock, flags);
+ trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0;
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset),
+ (trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_FALL_IE, BIT(offset),
+ (trigger & IRQ_TYPE_EDGE_FALLING) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_HIGH_IE, BIT(offset),
+ (trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset),
+ (trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0);
+ spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags);
+}
+
+static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d);
+
+ if (offset < 0 || offset >= gc->ngpio)
+ return -EINVAL;
+
+ chip->trigger[offset] = trigger;
+ sifive_gpio_set_ie(chip, offset);
+ return 0;
+}
+
+static void sifive_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+ u32 bit = BIT(offset);
+ unsigned long flags;
+
+ irq_chip_enable_parent(d);
+
+ /* Switch to input */
+ gc->direction_input(gc, offset);
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ /* Clear any sticky pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ /* Enable interrupts */
+ assign_bit(offset, (unsigned long *)&chip->irq_state, 1);
+ sifive_gpio_set_ie(chip, offset);
+}
+
+static void sifive_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+
+ assign_bit(offset, (unsigned long *)&chip->irq_state, 0);
+ sifive_gpio_set_ie(chip, offset);
+ irq_chip_disable_parent(d);
+}
+
+static void sifive_gpio_irq_eoi(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+ u32 bit = BIT(offset);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ /* Clear all pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ irq_chip_eoi_parent(d);
+}
+
+static struct irq_chip sifive_gpio_irqchip = {
+ .name = "sifive-gpio",
+ .irq_set_type = sifive_gpio_irq_set_type,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_enable = sifive_gpio_irq_enable,
+ .irq_disable = sifive_gpio_irq_disable,
+ .irq_eoi = sifive_gpio_irq_eoi,
+};
+
+static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ *parent_type = IRQ_TYPE_NONE;
+ *parent = child + SIFIVE_GPIO_IRQ_OFFSET;
+ return 0;
+}
+
+static const struct regmap_config sifive_gpio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .disable_locking = true,
+};
+
+static int sifive_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *irq_parent;
+ struct irq_domain *parent;
+ struct gpio_irq_chip *girq;
+ struct sifive_gpio *chip;
+ int ret, ngpio;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base)) {
+ dev_err(dev, "failed to allocate device memory\n");
+ return PTR_ERR(chip->base);
+ }
+
+ chip->regs = devm_regmap_init_mmio(dev, chip->base,
+ &sifive_gpio_regmap_config);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ngpio = of_irq_count(node);
+ if (ngpio >= SIFIVE_GPIO_MAX) {
+ dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
+ SIFIVE_GPIO_MAX);
+ return -ENXIO;
+ }
+
+ irq_parent = of_irq_find_parent(node);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+
+ ret = bgpio_init(&chip->gc, dev, 4,
+ chip->base + SIFIVE_GPIO_INPUT_VAL,
+ chip->base + SIFIVE_GPIO_OUTPUT_VAL,
+ NULL,
+ chip->base + SIFIVE_GPIO_OUTPUT_EN,
+ chip->base + SIFIVE_GPIO_INPUT_EN,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ /* Disable all GPIO interrupts before enabling parent interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0);
+ chip->irq_state = 0;
+
+ chip->gc.base = -1;
+ chip->gc.ngpio = ngpio;
+ chip->gc.label = dev_name(dev);
+ chip->gc.parent = dev;
+ chip->gc.owner = THIS_MODULE;
+ girq = &chip->gc.irq;
+ girq->chip = &sifive_gpio_irqchip;
+ girq->fwnode = of_node_to_fwnode(node);
+ girq->parent_domain = parent;
+ girq->child_to_parent_hwirq = sifive_gpio_child_to_parent_hwirq;
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+
+ platform_set_drvdata(pdev, chip);
+ return gpiochip_add_data(&chip->gc, chip);
+}
+
+static const struct of_device_id sifive_gpio_match[] = {
+ { .compatible = "sifive,gpio0" },
+ { .compatible = "sifive,fu540-c000-gpio" },
+ { },
+};
+
+static struct platform_driver sifive_gpio_driver = {
+ .probe = sifive_gpio_probe,
+ .driver = {
+ .name = "sifive_gpio",
+ .of_match_table = of_match_ptr(sifive_gpio_match),
+ },
+};
+builtin_platform_driver(sifive_gpio_driver)
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 5e375186f90e..866201cf5f65 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -243,4 +243,3 @@ static struct platform_driver tb10x_gpio_driver = {
module_platform_driver(tb10x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("tb10x gpio.");
-MODULE_VERSION("0.0.1");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 6fdfe4c5303e..acb99eff9939 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -96,12 +96,12 @@ struct tegra_gpio_info {
static inline void tegra_gpio_writel(struct tegra_gpio_info *tgi,
u32 val, u32 reg)
{
- __raw_writel(val, tgi->regs + reg);
+ writel_relaxed(val, tgi->regs + reg);
}
static inline u32 tegra_gpio_readl(struct tegra_gpio_info *tgi, u32 reg)
{
- return __raw_readl(tgi->regs + reg);
+ return readl_relaxed(tgi->regs + reg);
}
static unsigned int tegra_gpio_compose(unsigned int bank, unsigned int port,
@@ -416,11 +416,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
static int tegra_gpio_resume(struct device *dev)
{
struct tegra_gpio_info *tgi = dev_get_drvdata(dev);
- unsigned long flags;
unsigned int b, p;
- local_irq_save(flags);
-
for (b = 0; b < tgi->bank_count; b++) {
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
@@ -448,17 +445,14 @@ static int tegra_gpio_resume(struct device *dev)
}
}
- local_irq_restore(flags);
return 0;
}
static int tegra_gpio_suspend(struct device *dev)
{
struct tegra_gpio_info *tgi = dev_get_drvdata(dev);
- unsigned long flags;
unsigned int b, p;
- local_irq_save(flags);
for (b = 0; b < tgi->bank_count; b++) {
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
@@ -488,7 +482,7 @@ static int tegra_gpio_suspend(struct device *dev)
GPIO_INT_ENB(tgi, gpio));
}
}
- local_irq_restore(flags);
+
return 0;
}
@@ -497,6 +491,11 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
unsigned int gpio = d->hwirq;
u32 port, bit, mask;
+ int err;
+
+ err = irq_set_irq_wake(bank->irq, enable);
+ if (err)
+ return err;
port = GPIO_PORT(gpio);
bit = GPIO_BIT(gpio);
@@ -507,7 +506,7 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
else
bank->wake_enb[port] &= ~mask;
- return irq_set_irq_wake(bank->irq, enable);
+ return 0;
}
#endif
@@ -557,7 +556,7 @@ static inline void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
#endif
static const struct dev_pm_ops tegra_gpio_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
static int tegra_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 55b43b7ce88d..de241263d4be 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -448,17 +448,24 @@ static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain,
return 0;
}
-static void tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ struct irq_fwspec *fwspec;
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 3;
fwspec->param[0] = gpio->soc->instance;
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = parent_type;
+
+ return fwspec;
}
static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
@@ -621,7 +628,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
irq->chip = &gpio->intc;
irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq;
- irq->populate_parent_fwspec = tegra186_gpio_populate_parent_fwspec;
+ irq->populate_parent_alloc_arg = tegra186_gpio_populate_parent_fwspec;
irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq;
irq->child_irq_domain_ops.translate = tegra186_gpio_irq_domain_translate;
irq->handler = handle_simple_irq;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index d08d86a22b1f..9f66deab46ea 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <asm-generic/msi.h>
#define GPIO_RX_DAT 0x0
@@ -395,12 +396,32 @@ static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
unsigned int *parent_type)
{
struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
-
- *parent = txgpio->base_msi + (2 * child);
+ struct irq_data *irqd;
+ unsigned int irq;
+
+ irq = txgpio->msix_entries[child].vector;
+ irqd = irq_domain_get_irq_data(gc->irq.parent_domain, irq);
+ if (!irqd)
+ return -EINVAL;
+ *parent = irqd_to_hwirq(irqd);
*parent_type = IRQ_TYPE_LEVEL_HIGH;
return 0;
}
+static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ msi_alloc_info_t *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ info->hwirq = parent_hwirq;
+ return info;
+}
+
static int thunderx_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -515,6 +536,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
girq->parent_domain =
irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
+ girq->populate_parent_alloc_arg = thunderx_gpio_populate_parent_alloc_info;
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -524,9 +546,15 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
/* Push on irq_data and the domain for each line. */
for (i = 0; i < ngpio; i++) {
- err = irq_domain_push_irq(chip->irq.domain,
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = of_node_to_fwnode(dev->of_node);
+ fwspec.param_count = 2;
+ fwspec.param[0] = i;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+ err = irq_domain_push_irq(girq->domain,
txgpio->msix_entries[i].vector,
- chip);
+ &fwspec);
if (err < 0)
dev_err(dev, "irq_domain_push_irq: %d\n", err);
}
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 4ff146ca32fe..3bf397b8dfbc 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -71,7 +71,7 @@ static inline u_int32_t gpio_o_bit(int i)
return 1 << (i + 13);
}
-/* Mapping betwee numeric GPIO ID and the actual GPIO hardware numbering:
+/* Mapping between numeric GPIO ID and the actual GPIO hardware numbering:
* 0..13 GPI 0..13
* 14..26 GPO 0..12
* 27..41 GPIO 0..14
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
new file mode 100644
index 000000000000..74913f2e5697
--- /dev/null
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+
+#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_REG_DIR_CTL_OFFSET 0x42
+#define WCD_REG_VAL_CTL_OFFSET 0x43
+#define WCD934X_NPINS 5
+
+struct wcd_gpio_data {
+ struct regmap *map;
+ struct gpio_chip chip;
+};
+
+static int wcd_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->map, WCD_REG_DIR_CTL_OFFSET, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value & WCD_PIN_MASK(pin))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int wcd_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+ return regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), 0);
+}
+
+static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
+ int val)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+ regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+
+ return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ WCD_PIN_MASK(pin),
+ val ? WCD_PIN_MASK(pin) : 0);
+}
+
+static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ int value;
+
+ regmap_read(data->map, WCD_REG_VAL_CTL_OFFSET, &value);
+
+ return !!(value && WCD_PIN_MASK(pin));
+}
+
+static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+{
+ wcd_gpio_direction_output(chip, pin, val);
+}
+
+static int wcd_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wcd_gpio_data *data;
+ struct gpio_chip *chip;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->map = dev_get_regmap(dev->parent, NULL);
+ if (!data->map) {
+ dev_err(dev, "%s: failed to get regmap\n", __func__);
+ return -EINVAL;
+ }
+
+ chip = &data->chip;
+ chip->direction_input = wcd_gpio_direction_input;
+ chip->direction_output = wcd_gpio_direction_output;
+ chip->get_direction = wcd_gpio_get_direction;
+ chip->get = wcd_gpio_get;
+ chip->set = wcd_gpio_set;
+ chip->parent = dev;
+ chip->base = -1;
+ chip->ngpio = WCD934X_NPINS;
+ chip->label = dev_name(dev);
+ chip->of_gpio_n_cells = 2;
+ chip->can_sleep = false;
+
+ return devm_gpiochip_add_data(dev, chip, data);
+}
+
+static const struct of_device_id wcd_gpio_of_match[] = {
+ { .compatible = "qcom,wcd9340-gpio" },
+ { .compatible = "qcom,wcd9341-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wcd_gpio_of_match);
+
+static struct platform_driver wcd_gpio_driver = {
+ .driver = {
+ .name = "wcd934x-gpio",
+ .of_match_table = wcd_gpio_of_match,
+ },
+ .probe = wcd_gpio_probe,
+};
+
+module_platform_driver(wcd_gpio_driver);
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc WCD GPIO control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index b21c2e436b61..ad5489a65d54 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -251,8 +251,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
/*
* Directly request the irq here instead of passing
- * a flow-handler to gpiochip_set_chained_irqchip,
- * because the irq is shared.
+ * a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
IRQF_SHARED, chip->gc.label, &chip->gc);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index b696e4598a24..1b3f217a35e2 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -132,27 +132,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
int index)
{
/*
- * Handle MMC "cd-inverted" and "wp-inverted" semantics.
- */
- if (IS_ENABLED(CONFIG_MMC)) {
- /*
- * Active low is the default according to the
- * SDHCI specification and the device tree
- * bindings. However the code in the current
- * kernel was written such that the phandle
- * flags were always respected, and "cd-inverted"
- * would invert the flag from the device phandle.
- */
- if (!strcmp(propname, "cd-gpios")) {
- if (of_property_read_bool(np, "cd-inverted"))
- *flags ^= OF_GPIO_ACTIVE_LOW;
- }
- if (!strcmp(propname, "wp-gpios")) {
- if (of_property_read_bool(np, "wp-inverted"))
- *flags ^= OF_GPIO_ACTIVE_LOW;
- }
- }
- /*
* Some GPIO fixed regulator quirks.
* Note that active low is the default.
*/
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index fbf6b1a0a4fa..23e3d335cd54 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -762,10 +762,9 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
parent = &gdev->dev;
/* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, parent,
- MKDEV(0, 0),
- chip, gpiochip_groups,
- "gpiochip%d", chip->base);
+ dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), chip,
+ gpiochip_groups, GPIOCHIP_NAME "%d",
+ chip->base);
if (IS_ERR(dev))
return PTR_ERR(dev);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 78a16e42f222..99ac27a72e28 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
* in the given chip for the specified hardware number.
*/
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
- u16 hwnum)
+ unsigned int hwnum)
{
struct gpio_device *gdev = chip->gpiodev;
@@ -232,15 +232,15 @@ int gpiod_get_direction(struct gpio_desc *desc)
return -ENOTSUPP;
ret = chip->get_direction(chip, offset);
- if (ret > 0) {
- /* GPIOF_DIR_IN, or other positive */
+ if (ret < 0)
+ return ret;
+
+ /* GPIOF_DIR_IN or other positive, otherwise GPIOF_DIR_OUT */
+ if (ret > 0)
ret = 1;
- clear_bit(FLAG_IS_OUT, &desc->flags);
- }
- if (ret == 0) {
- /* GPIOF_DIR_OUT */
- set_bit(FLAG_IS_OUT, &desc->flags);
- }
+
+ assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(gpiod_get_direction);
@@ -492,15 +492,6 @@ static int linehandle_validate_flags(u32 flags)
return 0;
}
-static void linehandle_configure_flag(unsigned long *flagsp,
- u32 bit, bool active)
-{
- if (active)
- set_bit(bit, flagsp);
- else
- clear_bit(bit, flagsp);
-}
-
static long linehandle_set_config(struct linehandle_state *lh,
void __user *ip)
{
@@ -522,22 +513,22 @@ static long linehandle_set_config(struct linehandle_state *lh,
desc = lh->descs[i];
flagsp = &desc->flags;
- linehandle_configure_flag(flagsp, FLAG_ACTIVE_LOW,
+ assign_bit(FLAG_ACTIVE_LOW, flagsp,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
- linehandle_configure_flag(flagsp, FLAG_OPEN_DRAIN,
+ assign_bit(FLAG_OPEN_DRAIN, flagsp,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
- linehandle_configure_flag(flagsp, FLAG_OPEN_SOURCE,
+ assign_bit(FLAG_OPEN_SOURCE, flagsp,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
- linehandle_configure_flag(flagsp, FLAG_PULL_UP,
+ assign_bit(FLAG_PULL_UP, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
- linehandle_configure_flag(flagsp, FLAG_PULL_DOWN,
+ assign_bit(FLAG_PULL_DOWN, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
- linehandle_configure_flag(flagsp, FLAG_BIAS_DISABLE,
+ assign_bit(FLAG_BIAS_DISABLE, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
/*
@@ -686,14 +677,13 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
- struct gpio_desc *desc;
+ struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
- if (offset >= gdev->ngpio) {
- ret = -EINVAL;
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
goto out_free_descs;
}
- desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
goto out_free_descs;
@@ -1018,8 +1008,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
- if (offset >= gdev->ngpio)
- return -EINVAL;
+ desc = gpiochip_get_desc(gdev->chip, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
/* Return an error if a unknown flag is set */
if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
@@ -1057,7 +1048,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
}
- desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
goto out_free_label;
@@ -1184,10 +1174,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset >= gdev->ngpio)
- return -EINVAL;
- desc = &gdev->descs[lineinfo.line_offset];
+ desc = gpiochip_get_desc(chip, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
if (desc->name) {
strncpy(lineinfo.name, desc->name,
sizeof(lineinfo.name));
@@ -1427,7 +1418,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
ret = gdev->id;
goto err_free_gdev;
}
- dev_set_name(&gdev->dev, "gpiochip%d", gdev->id);
+ dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (chip->parent && chip->parent->driver)
@@ -1452,7 +1443,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (chip->ngpio > FASTPATH_NGPIO)
chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n",
- chip->ngpio, FASTPATH_NGPIO);
+ chip->ngpio, FASTPATH_NGPIO);
gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
@@ -1495,11 +1486,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_label;
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
for (i = 0; i < chip->ngpio; i++)
gdev->descs[i].gdev = gdev;
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -1524,15 +1515,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct gpio_desc *desc = &gdev->descs[i];
if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
- if (!chip->get_direction(chip, i))
- set_bit(FLAG_IS_OUT, &desc->flags);
- else
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ assign_bit(FLAG_IS_OUT,
+ &desc->flags, !chip->get_direction(chip, i));
} else {
- if (!chip->direction_input)
- set_bit(FLAG_IS_OUT, &desc->flags);
- else
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ assign_bit(FLAG_IS_OUT,
+ &desc->flags, !chip->direction_input);
}
}
@@ -1813,7 +1800,7 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
* @gc: the gpiochip to set the irqchip chain to
* @parent_irq: the irq number corresponding to the parent IRQ for this
- * chained irqchip
+ * cascaded irqchip
* @parent_handler: the parent interrupt handler for the accumulated IRQ
* coming out of the gpiochip. If the interrupt is nested rather than
* cascaded, pass NULL in this handler argument
@@ -1856,29 +1843,6 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
}
/**
- * gpiochip_set_chained_irqchip() - connects a chained irqchip to a gpiochip
- * @gpiochip: the gpiochip to set the irqchip chain to
- * @irqchip: the irqchip to chain to the gpiochip
- * @parent_irq: the irq number corresponding to the parent IRQ for this
- * chained irqchip
- * @parent_handler: the parent interrupt handler for the accumulated IRQ
- * coming out of the gpiochip.
- */
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq,
- irq_flow_handler_t parent_handler)
-{
- if (gpiochip->irq.threaded) {
- chip_err(gpiochip, "tried to chain a threaded gpiochip\n");
- return;
- }
-
- gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, parent_handler);
-}
-EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
-
-/**
* gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
* @gpiochip: the gpiochip to set the irqchip nested handler to
* @irqchip: the irqchip to nest to the gpiochip
@@ -2003,7 +1967,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
+ void *parent_arg;
unsigned int parent_hwirq;
unsigned int parent_type;
struct gpio_irq_chip *girq = &gc->irq;
@@ -2019,7 +1983,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (ret)
return ret;
- chip_info(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
+ chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
ret = girq->child_to_parent_hwirq(gc, hwirq, type,
&parent_hwirq, &parent_type);
@@ -2027,7 +1991,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
chip_err(gc, "can't look up hwirq %lu\n", hwirq);
return ret;
}
- chip_info(gc, "found parent hwirq %u\n", parent_hwirq);
+ chip_dbg(gc, "found parent hwirq %u\n", parent_hwirq);
/*
* We set handle_bad_irq because the .set_type() should
@@ -2042,23 +2006,27 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
NULL, NULL);
irq_set_probe(irq);
- /*
- * Create a IRQ fwspec to send up to the parent irqdomain:
- * specify the hwirq we address on the parent and tie it
- * all together up the chain.
- */
- parent_fwspec.fwnode = d->parent->fwnode;
/* This parent only handles asserted level IRQs */
- girq->populate_parent_fwspec(gc, &parent_fwspec, parent_hwirq,
- parent_type);
- chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+ parent_arg = girq->populate_parent_alloc_arg(gc, parent_hwirq, parent_type);
+ if (!parent_arg)
+ return -ENOMEM;
+
+ chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
irq, parent_hwirq);
- ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec);
+ irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
+ ret = irq_domain_alloc_irqs_parent(d, irq, 1, parent_arg);
+ /*
+ * If the parent irqdomain is msi, the interrupts have already
+ * been allocated, so the EEXIST is good.
+ */
+ if (irq_domain_is_msi(d->parent) && (ret == -EEXIST))
+ ret = 0;
if (ret)
chip_err(gc,
"failed to allocate parent hwirq %d for hwirq %lu\n",
parent_hwirq, hwirq);
+ kfree(parent_arg);
return ret;
}
@@ -2095,8 +2063,8 @@ static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
if (!gc->irq.child_offset_to_irq)
gc->irq.child_offset_to_irq = gpiochip_child_offset_to_irq_noop;
- if (!gc->irq.populate_parent_fwspec)
- gc->irq.populate_parent_fwspec =
+ if (!gc->irq.populate_parent_alloc_arg)
+ gc->irq.populate_parent_alloc_arg =
gpiochip_populate_parent_fwspec_twocell;
gpiochip_hierarchy_setup_domain_ops(&gc->irq.child_irq_domain_ops);
@@ -2122,27 +2090,43 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
return !!gc->irq.parent_domain;
}
-void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 2;
fwspec->param[0] = parent_hwirq;
fwspec->param[1] = parent_type;
+
+ return fwspec;
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
-void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 4;
fwspec->param[0] = 0;
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = 0;
fwspec->param[3] = parent_type;
+
+ return fwspec;
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell);
@@ -2998,7 +2982,8 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error
* code on failure.
*/
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+ unsigned int hwnum,
const char *label,
enum gpio_lookup_flags lflags,
enum gpiod_flags dflags)
@@ -3050,25 +3035,13 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
* rely on gpio_request() having been called beforehand.
*/
-static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
+static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
enum pin_config_param mode)
{
- unsigned long config;
- unsigned arg;
-
- switch (mode) {
- case PIN_CONFIG_BIAS_DISABLE:
- case PIN_CONFIG_BIAS_PULL_DOWN:
- case PIN_CONFIG_BIAS_PULL_UP:
- arg = 1;
- break;
-
- default:
- arg = 0;
- }
+ if (!gc->set_config)
+ return -ENOTSUPP;
- config = PIN_CONF_PACKED(mode, arg);
- return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
+ return gc->set_config(gc, offset, mode);
}
static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
@@ -3302,15 +3275,9 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
VALIDATE_DESC(desc);
chip = desc->gdev->chip;
- if (!chip->set || !chip->set_config) {
- gpiod_dbg(desc,
- "%s: missing set() or set_config() operations\n",
- __func__);
- return -ENOTSUPP;
- }
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
+ return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -3334,10 +3301,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
* Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
* persistence state.
*/
- if (transitory)
- set_bit(FLAG_TRANSITORY, &desc->flags);
- else
- clear_bit(FLAG_TRANSITORY, &desc->flags);
+ assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
chip = desc->gdev->chip;
@@ -3347,7 +3311,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = chip->set_config(chip, gpio, packed);
+ rc = gpio_set_config(chip, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
@@ -3371,6 +3335,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
+/**
+ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
+ * @desc: the gpio descriptor to change
+ */
+void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+ VALIDATE_DESC_VOID(desc);
+ change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
*
@@ -3793,10 +3768,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
- if (value)
- __set_bit(hwgpio, bits);
- else
- __clear_bit(hwgpio, bits);
+ __assign_bit(hwgpio, bits, value);
count++;
}
i++;
@@ -5113,7 +5085,7 @@ static int __init gpiolib_dev_init(void)
return ret;
}
- ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, "gpiochip");
+ ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, GPIOCHIP_NAME);
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
bus_unregister(&gpio_bus_type);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index ca9bc1e4803c..3e0aab2945d8 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/cdev.h>
+#define GPIOCHIP_NAME "gpiochip"
+
/**
* struct gpio_device - internal state container for GPIO devices
* @id: numerical ID number for the GPIO chip
@@ -78,7 +80,8 @@ struct gpio_array {
unsigned long invert_mask[];
};
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
+ unsigned int hwnum);
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index bfdadc3667e0..d0aa6cff2e02 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -54,6 +54,9 @@ config DRM_DEBUG_MM
If in doubt, say "N".
+config DRM_EXPORT_FOR_TESTS
+ bool
+
config DRM_DEBUG_SELFTEST
tristate "kselftests for DRM"
depends on DRM
@@ -61,6 +64,7 @@ config DRM_DEBUG_SELFTEST
select PRIME_NUMBERS
select DRM_LIB_RANDOM
select DRM_KMS_HELPER
+ select DRM_EXPORT_FOR_TESTS if m
default n
help
This option provides kernel modules that can be used to run
@@ -164,6 +168,7 @@ config DRM_LOAD_EDID_FIRMWARE
config DRM_DP_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+ depends on DRM
select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
@@ -294,9 +299,6 @@ config DRM_VKMS
If M is selected the module will be called vkms.
-config DRM_ATI_PCIGART
- bool
-
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
@@ -393,7 +395,6 @@ menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
depends on DRM && MMU
select DRM_VM
- select DRM_ATI_PCIGART if PCI
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9f1c7c486f88..6493088a0fdd 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_memory.o drm_drv.o drm_pci.o \
+ drm_memory.o drm_drv.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_encoder_slave.o \
@@ -25,10 +25,10 @@ drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
-drm-$(CONFIG_DRM_ATI_PCIGART) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 0d12ebf66174..13340f353ea8 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -2,11 +2,11 @@
menu "ACP (Audio CoProcessor) Configuration"
config DRM_AMD_ACP
- bool "Enable AMD Audio CoProcessor IP support"
- depends on DRM_AMDGPU
- select MFD_CORE
- select PM_GENERIC_DOMAINS if PM
- help
+ bool "Enable AMD Audio CoProcessor IP support"
+ depends on DRM_AMDGPU
+ select MFD_CORE
+ select PM_GENERIC_DOMAINS if PM
+ help
Choose this option to enable ACP IP support for AMD SOCs.
This adds the ACP (Audio CoProcessor) IP driver and wires
it up into the amdgpu driver. The ACP block provides the DMA
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index ca0e435559d5..7ae3b22c5628 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -147,12 +147,16 @@ amdgpu-y += \
vce_v3_0.o \
vce_v4_0.o
-# add VCN block
+# add VCN and JPEG block
amdgpu-y += \
amdgpu_vcn.o \
vcn_v1_0.o \
vcn_v2_0.o \
- vcn_v2_5.o
+ vcn_v2_5.o \
+ amdgpu_jpeg.o \
+ jpeg_v1_0.o \
+ jpeg_v2_0.o \
+ jpeg_v2_5.o
# add ATHUB block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0c229a92a24b..b1bb10625cd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -69,6 +69,7 @@
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_jpeg.h"
#include "amdgpu_mn.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
@@ -89,6 +90,7 @@
#include "amdgpu_mes.h"
#include "amdgpu_umc.h"
#include "amdgpu_mmhub.h"
+#include "amdgpu_df.h"
#define MAX_GPU_INSTANCE 16
@@ -588,6 +590,8 @@ struct amdgpu_asic_funcs {
bool (*need_reset_on_init)(struct amdgpu_device *adev);
/* PCIe replay counter */
uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
+ /* device supports BACO */
+ bool (*supports_baco)(struct amdgpu_device *adev);
};
/*
@@ -633,9 +637,8 @@ struct amdgpu_fw_vram_usage {
struct amdgpu_bo *reserved_bo;
void *va;
- /* Offset on the top of VRAM, used as c2p write buffer.
+ /* GDDR6 training support flag.
*/
- u64 mem_train_fb_loc;
bool mem_train_support;
};
@@ -662,29 +665,6 @@ struct amdgpu_mmio_remap {
resource_size_t bus_addr;
};
-struct amdgpu_df_funcs {
- void (*sw_init)(struct amdgpu_device *adev);
- void (*sw_fini)(struct amdgpu_device *adev);
- void (*enable_broadcast_mode)(struct amdgpu_device *adev,
- bool enable);
- u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
- u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
- bool enable);
- int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
- int is_enable);
- int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
- int is_disable);
- void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
- uint64_t *count);
- uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
- void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
- uint32_t ficadl_val, uint32_t ficadh_val);
-};
/* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type {
GC_HWIP = 1,
@@ -704,6 +684,7 @@ enum amd_hw_ip_block_type {
MP1_HWIP,
UVD_HWIP,
VCN_HWIP = UVD_HWIP,
+ JPEG_HWIP = VCN_HWIP,
VCE_HWIP,
DF_HWIP,
DCE_HWIP,
@@ -899,6 +880,9 @@ struct amdgpu_device {
/* vcn */
struct amdgpu_vcn vcn;
+ /* jpeg */
+ struct amdgpu_jpeg jpeg;
+
/* firmwares */
struct amdgpu_firmware firmware;
@@ -924,6 +908,9 @@ struct amdgpu_device {
bool enable_mes;
struct amdgpu_mes mes;
+ /* df */
+ struct amdgpu_df df;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
struct mutex mn_lock;
@@ -937,8 +924,6 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
- const struct amdgpu_df_funcs *df_funcs;
-
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -982,6 +967,11 @@ struct amdgpu_device {
/* device pstate */
int pstate;
+ /* enable runtime pm on the device */
+ bool runpm;
+
+ bool pm_sysfs_en;
+ bool ucode_sysfs_en;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1117,6 +1107,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
+#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
+
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
/* Common functions */
@@ -1133,9 +1125,12 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
-bool amdgpu_device_is_px(struct drm_device *dev);
+bool amdgpu_device_supports_boco(struct drm_device *dev);
+bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+int amdgpu_device_baco_enter(struct drm_device *dev);
+int amdgpu_device_baco_exit(struct drm_device *dev);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -1173,8 +1168,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
-int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d3da9dde4ee1..8609287620ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -613,15 +613,9 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- if (is_support_sw_smu(adev))
- smu_switch_power_profile(&adev->smu,
- PP_SMC_POWER_PROFILE_COMPUTE,
- !idle);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->switch_power_profile)
- amdgpu_dpm_switch_power_profile(adev,
- PP_SMC_POWER_PROFILE_COMPUTE,
- !idle);
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE,
+ !idle);
}
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
@@ -634,6 +628,38 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
return false;
}
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ if (adev->family == AMDGPU_FAMILY_AI) {
+ int i;
+
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
+ } else {
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
+ }
+
+ return 0;
+}
+
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ uint32_t flush_type = 0;
+ bool all_hub = false;
+
+ if (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20)
+ flush_type = 2;
+
+ if (adev->family == AMDGPU_FAMILY_AI)
+ all_hub = true;
+
+ return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
+}
+
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 069d5d230810..47b0f2957d1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -136,6 +136,8 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t *ib_cmd, uint32_t ib_len);
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index b6713e0ed1b2..4bcc175a149d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -46,6 +46,8 @@
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_amdkfd_gfx_v9.h"
+#include "gfxhub_v1_0.h"
+#include "mmhub_v9_4.h"
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -69,32 +71,56 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t sdma_engine_reg_base[8] = {
- SOC15_REG_OFFSET(SDMA0, 0,
- mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA1, 0,
- mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA2, 0,
- mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA3, 0,
- mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA4, 0,
- mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA5, 0,
- mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA6, 0,
- mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA7, 0,
- mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
- };
-
- uint32_t retval = sdma_engine_reg_base[engine_id]
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ /* fall through */
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL;
+ break;
+ case 2:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
+ mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ break;
+ case 3:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
+ mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL;
+ break;
+ case 4:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0,
+ mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL;
+ break;
+ case 5:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0,
+ mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL;
+ break;
+ case 6:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0,
+ mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL;
+ break;
+ case 7:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0,
+ mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
- queue_id, retval);
+ queue_id, sdma_rlc_reg_offset);
- return retval;
+ return sdma_rlc_reg_offset;
}
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
@@ -258,11 +284,28 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
+static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID %u\n",
+ vmid);
+ return;
+ }
+
+ mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
+
+ gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
.hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_dump = kgd_gfx_v9_hqd_dump,
.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -277,8 +320,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
- .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
- .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
- .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
+ .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 61cd707158e4..a7b17c8deb00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -107,13 +107,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
lock_srbm(kgd, mec, pipe, queue_id, 0);
}
-static uint32_t get_queue_mask(struct amdgpu_device *adev,
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
- unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
- queue_id) & 31;
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
- return ((uint32_t)1) << bit;
+ return 1ull << bit;
}
static void release_queue(struct kgd_dev *kgd)
@@ -268,21 +268,6 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
acquire_queue(kgd, pipe_id, queue_id);
- /* HIQ is set during driver init period with vmid set to 0*/
- if (m->cp_hqd_vmid == 0) {
- uint32_t value, mec, pipe;
-
- mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
- pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
-
- pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
- mec, pipe, queue_id);
- value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
- value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
- ((mec << 5) | (pipe << 3) | queue_id | 0x80));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
- }
-
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
@@ -332,9 +317,10 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
lower_32_bits((uint64_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uint64_t)wptr));
- pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id));
+ pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
- get_queue_mask(adev, pipe_id, queue_id));
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
@@ -350,6 +336,59 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
+static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct v10_compute_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ release_queue(kgd);
+
+ return r;
+}
+
static int kgd_hqd_dump(struct kgd_dev *kgd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
@@ -686,71 +725,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
-{
- signed long r;
- uint32_t seq;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- spin_lock(&adev->gfx.kiq.ring_lock);
- amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
- amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
- amdgpu_ring_write(ring,
- PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
- PACKET3_INVALIDATE_TLBS_PASID(pasid));
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
-
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
- if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- uint16_t queried_pasid;
- bool ret;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- if (amdgpu_emu_mode == 0 && ring->sched.ready)
- return invalidate_tlbs_with_kiq(adev, pasid);
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
- &queried_pasid);
- if (ret && queried_pasid == pasid) {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, 0);
- break;
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return 0;
- }
-
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
- return 0;
-}
-
static int kgd_address_watch_disable(struct kgd_dev *kgd)
{
return 0;
@@ -817,6 +791,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
+ .hiq_mqd_load = kgd_hiq_mqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_dump = kgd_hqd_dump,
.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -832,7 +807,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
get_atc_vmid_pasid_mapping_info,
.get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 6e6f0a99ec06..8f052e98a3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -696,45 +696,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
lower_32_bits(page_table_base));
}
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- unsigned int tmp;
-
- if (adev->in_gpu_reset)
- return -EIO;
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
- (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- break;
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid\n");
- return 0;
- }
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- return 0;
-}
-
/**
* read_vmid_from_vmfault_reg - read vmid from register
*
@@ -771,7 +732,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index bfbddedb2380..19a10db93d68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -657,45 +657,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
lower_32_bits(page_table_base));
}
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- unsigned int tmp;
-
- if (adev->in_gpu_reset)
- return -EIO;
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
- (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- break;
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return -EINVAL;
- }
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- return 0;
-}
-
const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -717,6 +678,4 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 47c853ef1051..8562afe5b761 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -40,7 +40,6 @@
#include "soc15d.h"
#include "mmhub_v1_0.h"
#include "gfxhub_v1_0.h"
-#include "gmc_v9_0.h"
enum hqd_dequeue_request_type {
@@ -104,13 +103,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
lock_srbm(kgd, mec, pipe, queue_id, 0);
}
-static uint32_t get_queue_mask(struct amdgpu_device *adev,
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
- unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
- queue_id) & 31;
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
- return ((uint32_t)1) << bit;
+ return 1ull << bit;
}
static void release_queue(struct kgd_dev *kgd)
@@ -259,21 +258,6 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
acquire_queue(kgd, pipe_id, queue_id);
- /* HIQ is set during driver init period with vmid set to 0*/
- if (m->cp_hqd_vmid == 0) {
- uint32_t value, mec, pipe;
-
- mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
- pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
-
- pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
- mec, pipe, queue_id);
- value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
- value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
- ((mec << 5) | (pipe << 3) | queue_id | 0x80));
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
- }
-
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
@@ -324,7 +308,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
- get_queue_mask(adev, pipe_id, queue_id));
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
@@ -340,6 +324,59 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
+int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct v9_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ release_queue(kgd);
+
+ return r;
+}
+
int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
@@ -618,100 +655,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
- uint32_t flush_type)
-{
- signed long r;
- uint32_t seq;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- spin_lock(&adev->gfx.kiq.ring_lock);
- amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
- amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
- amdgpu_ring_write(ring,
- PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
- PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
- PACKET3_INVALIDATE_TLBS_PASID(pasid) |
- PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
-
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
- if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- return -ETIME;
- }
-
- return 0;
-}
-
-int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid, i;
- uint16_t queried_pasid;
- bool ret;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- uint32_t flush_type = 0;
-
- if (adev->in_gpu_reset)
- return -EIO;
- if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20)
- flush_type = 2;
-
- if (ring->sched.ready)
- return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
- &queried_pasid);
- if (ret && queried_pasid == pasid) {
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- i, flush_type);
- break;
- }
- }
-
- return 0;
-}
-
-int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int i;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return 0;
- }
-
- /* Use legacy mode tlb invalidation.
- *
- * Currently on Raven the code below is broken for anything but
- * legacy mode due to a MMHUB power gating problem. A workaround
- * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
- * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
- * bit.
- *
- * TODO 1: agree on the right set of invalidation registers for
- * KFD use. Use the last one for now. Invalidate both GC and
- * MMHUB.
- *
- * TODO 2: support range-based invalidation, requires kfg2kgd
- * interface change
- */
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
-
- return 0;
-}
-
int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
{
return 0;
@@ -758,8 +701,8 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -769,16 +712,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
return;
}
- /* TODO: take advantage of per-process address space size. For
- * now, all processes share the same address space size, like
- * on GFX8 and older.
- */
- if (adev->asic_type == CHIP_ARCTURUS) {
- /* Two MMHUBs */
- mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base);
- mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base);
- } else
- mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+ mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
@@ -788,6 +722,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
.hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_dump = kgd_gfx_v9_hqd_dump,
.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -803,7 +738,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
- .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
- .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index d9e9ad22b2bd..63d3e6683dfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -33,6 +33,9 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
+int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off);
int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
@@ -57,9 +60,5 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 888209eb8cec..b2487f4f271b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -85,7 +85,7 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
}
/* Set memory usage limits. Current, limits are
- * System (TTM + userptr) memory - 3/4th System RAM
+ * System (TTM + userptr) memory - 15/16th System RAM
* TTM memory - 3/8th System RAM
*/
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
@@ -98,7 +98,7 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
- kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+ kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
@@ -358,7 +358,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
+ return amdgpu_sync_fence(sync, vm->last_update, false);
}
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
@@ -750,7 +750,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
+ amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
return 0;
}
@@ -769,7 +769,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
return ret;
}
- return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
+ return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -1674,10 +1674,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
struct mm_struct *mm)
{
struct amdkfd_process_info *process_info = mem->process_info;
- int invalid, evicted_bos;
+ int evicted_bos;
int r = 0;
- invalid = atomic_inc_return(&mem->invalid);
+ atomic_inc(&mem->invalid);
evicted_bos = atomic_inc_return(&process_info->evicted_bos);
if (evicted_bos == 1) {
/* First eviction, stop the queues */
@@ -2048,7 +2048,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
pr_debug("Memory eviction: Validate BOs failed. Try again\n");
goto validate_map_fail;
}
- ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+ ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
if (ret) {
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 72232fccf61a..fdd52d86a4d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
- uint8_t con_obj_id, con_obj_num, con_obj_type;
-
- con_obj_id =
+ uint8_t con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
>> OBJECT_ID_SHIFT;
- con_obj_num =
- (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
- >> ENUM_ID_SHIFT;
- con_obj_type =
- (le16_to_cpu(path->usConnObjectId) &
- OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
/* Skip TV/CV support */
if ((le16_to_cpu(path->usDeviceTag) ==
@@ -373,15 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
- uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
- grph_obj_id =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- grph_obj_num =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- grph_obj_type =
+ uint8_t grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
@@ -2038,7 +2022,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
if (adev->is_atom_fw) {
amdgpu_atomfirmware_scratch_regs_init(adev);
amdgpu_atomfirmware_allocate_fb_scratch(adev);
- ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
+ ret = amdgpu_atomfirmware_get_mem_train_info(adev);
if (ret) {
DRM_ERROR("Failed to get mem train fb location.\n");
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ff4eb96bdfb5..58f9d8c3a17a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -525,16 +525,12 @@ static int gddr6_mem_train_support(struct amdgpu_device *adev)
return ret;
}
-int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
+int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
- unsigned char *bios = ctx->bios;
- struct vram_reserve_block *reserved_block;
- int index, block_number;
+ int index;
uint8_t frev, crev;
uint16_t data_offset, size;
- uint32_t start_address_in_kb;
- uint64_t offset;
int ret;
adev->fw_vram_usage.mem_train_support = false;
@@ -569,32 +565,6 @@ int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
return -EINVAL;
}
- reserved_block = (struct vram_reserve_block *)
- (bios + data_offset + sizeof(struct atom_common_table_header));
- block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
- / sizeof(struct vram_reserve_block);
- reserved_block += (block_number > 0) ? block_number-1 : 0;
- DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
- block_number,
- le32_to_cpu(reserved_block->start_address_in_kb),
- le16_to_cpu(reserved_block->used_by_firmware_in_kb),
- le16_to_cpu(reserved_block->used_by_driver_in_kb));
- if (reserved_block->used_by_firmware_in_kb > 0) {
- start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
- offset = (uint64_t)start_address_in_kb * ONE_KiB;
- if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
- offset -= ONE_MiB;
- }
-
- offset &= ~(ONE_MiB - 1);
- adev->fw_vram_usage.mem_train_fb_loc = offset;
- adev->fw_vram_usage.mem_train_support = true;
- DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
- ret = 0;
- } else {
- DRM_ERROR("used_by_firmware_in_kb is 0!\n");
- ret = -EINVAL;
- }
-
- return ret;
+ adev->fw_vram_usage.mem_train_support = true;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index f871af5ea6f3..434fe2fa0089 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -31,7 +31,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type, int *vram_vendor);
-int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5ca905b4a0fb..a52a084158b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -795,29 +795,23 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync,
- fpriv->prt_va->last_pt_update, false);
+ r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
if (r)
return r;
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
- struct dma_fence *f;
-
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+ r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
if (r)
return r;
}
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- struct dma_fence *f;
-
/* ignore duplicates */
bo = ttm_to_amdgpu_bo(e->tv.bo);
if (!bo)
@@ -831,8 +825,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+ r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -845,7 +838,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
+ r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
if (r)
return r;
@@ -916,6 +909,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (parser->entity && parser->entity != entity)
return -EINVAL;
+ /* Return if there is no run queue associated with this entity.
+ * Possibly because of disabled HW IP*/
+ if (entity->rq == NULL)
+ return -EINVAL;
+
parser->entity = entity;
ring = to_amdgpu_ring(entity->rq->sched);
@@ -987,7 +985,7 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
dma_fence_put(old);
}
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+ r = amdgpu_sync_fence(&p->job->sync, fence, true);
dma_fence_put(fence);
if (r)
return r;
@@ -1009,7 +1007,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
return r;
}
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+ r = amdgpu_sync_fence(&p->job->sync, fence, true);
dma_fence_put(fence);
return r;
@@ -1236,7 +1234,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
goto error_abort;
}
- job->owner = p->filp;
p->fence = dma_fence_get(&job->base.s_fence->finished);
amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6614d8a6f4c8..64e2babbc36e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
struct amdgpu_ctx *ctx)
{
unsigned num_entities = amdgpu_ctx_total_num_entities();
- unsigned i, j, k;
+ unsigned i, j;
int r;
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
@@ -121,72 +121,57 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
- struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
- struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
- unsigned num_rings = 0;
- unsigned num_rqs = 0;
+ struct drm_gpu_scheduler **scheds;
+ struct drm_gpu_scheduler *sched;
+ unsigned num_scheds = 0;
switch (i) {
case AMDGPU_HW_IP_GFX:
- rings[0] = &adev->gfx.gfx_ring[0];
- num_rings = 1;
+ sched = &adev->gfx.gfx_ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
break;
case AMDGPU_HW_IP_COMPUTE:
- for (j = 0; j < adev->gfx.num_compute_rings; ++j)
- rings[j] = &adev->gfx.compute_ring[j];
- num_rings = adev->gfx.num_compute_rings;
+ scheds = adev->gfx.compute_sched;
+ num_scheds = adev->gfx.num_compute_sched;
break;
case AMDGPU_HW_IP_DMA:
- for (j = 0; j < adev->sdma.num_instances; ++j)
- rings[j] = &adev->sdma.instance[j].ring;
- num_rings = adev->sdma.num_instances;
+ scheds = adev->sdma.sdma_sched;
+ num_scheds = adev->sdma.num_sdma_sched;
break;
case AMDGPU_HW_IP_UVD:
- rings[0] = &adev->uvd.inst[0].ring;
- num_rings = 1;
+ sched = &adev->uvd.inst[0].ring.sched;
+ scheds = &sched;
+ num_scheds = 1;
break;
case AMDGPU_HW_IP_VCE:
- rings[0] = &adev->vce.ring[0];
- num_rings = 1;
+ sched = &adev->vce.ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
break;
case AMDGPU_HW_IP_UVD_ENC:
- rings[0] = &adev->uvd.inst[0].ring_enc[0];
- num_rings = 1;
+ sched = &adev->uvd.inst[0].ring_enc[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
break;
case AMDGPU_HW_IP_VCN_DEC:
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
- rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
- }
+ scheds = adev->vcn.vcn_dec_sched;
+ num_scheds = adev->vcn.num_vcn_dec_sched;
break;
case AMDGPU_HW_IP_VCN_ENC:
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
- for (k = 0; k < adev->vcn.num_enc_rings; ++k)
- rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
- }
+ scheds = adev->vcn.vcn_enc_sched;
+ num_scheds = adev->vcn.num_vcn_enc_sched;
break;
case AMDGPU_HW_IP_VCN_JPEG:
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
- rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg;
- }
+ scheds = adev->jpeg.jpeg_sched;
+ num_scheds = adev->jpeg.num_jpeg_sched;
break;
}
- for (j = 0; j < num_rings; ++j) {
- if (!rings[j]->adev)
- continue;
-
- rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
- }
-
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
- rqs, num_rqs, &ctx->guilty);
+ priority, scheds,
+ num_scheds, &ctx->guilty);
if (r)
goto error_cleanup_entities;
}
@@ -627,3 +612,45 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
+
+void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
+ adev->gfx.num_gfx_sched++;
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
+ adev->gfx.num_compute_sched++;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
+ adev->sdma.num_sdma_sched++;
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
+ &adev->vcn.inst[i].ring_dec.sched;
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j)
+ adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
+ &adev->vcn.inst[i].ring_enc[j].sched;
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+ adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
+ &adev->jpeg.inst[i].ring_dec.sched;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index da808633732b..4ad90a44dc3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -87,4 +87,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
+
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8e6726e0d035..f24ed9a1a3e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -26,6 +26,7 @@
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_debugfs.h>
@@ -129,7 +130,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
sh_bank = 0xFFFFFFFF;
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
- use_bank = 1;
+ use_bank = true;
} else if (*pos & (1ULL << 61)) {
me = (*pos & GENMASK_ULL(33, 24)) >> 24;
@@ -137,17 +138,24 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
- use_ring = 1;
+ use_ring = true;
} else {
- use_bank = use_ring = 0;
+ use_bank = use_ring = false;
}
*pos &= (1UL << 22) - 1;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
@@ -193,6 +201,9 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -237,13 +248,20 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_PCIE(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -251,6 +269,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -276,12 +297,19 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_PCIE(*pos >> 2, value);
@@ -291,6 +319,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -316,13 +347,20 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_DIDT(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -330,6 +368,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -355,12 +396,19 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_DIDT(*pos >> 2, value);
@@ -370,6 +418,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -395,13 +446,20 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_SMC(*pos);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -409,6 +467,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -434,12 +495,19 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_SMC(*pos, value);
@@ -449,6 +517,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -572,7 +643,16 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
idx = *pos >> 2;
valuesize = sizeof(values);
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -633,6 +713,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -644,6 +728,9 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (!x)
return -EINVAL;
@@ -711,6 +798,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (!data)
return -ENOMEM;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -726,6 +817,9 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
while (size) {
uint32_t value;
@@ -859,6 +953,10 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
int r = 0, i;
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
+
/* Avoid accidently unparking the sched thread during GPU reset */
mutex_lock(&adev->lock_reset);
@@ -889,6 +987,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
mutex_unlock(&adev->lock_reset);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
@@ -907,8 +1008,17 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
@@ -917,8 +1027,17 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c17505fba988..53d882000101 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -66,6 +66,7 @@
#include "amdgpu_pmu.h"
#include <linux/suspend.h>
+#include <drm/task_barrier.h>
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -137,14 +138,14 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/**
- * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
+ * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with HG/PX power control,
* otherwise return false.
*/
-bool amdgpu_device_is_px(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -154,6 +155,21 @@ bool amdgpu_device_is_px(struct drm_device *dev)
}
/**
+ * amdgpu_device_supports_baco - Does the device support BACO
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device supporte BACO,
+ * otherwise return false.
+ */
+bool amdgpu_device_supports_baco(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ return amdgpu_asic_supports_baco(adev);
+}
+
+/**
* VRAM access helper functions.
*
* amdgpu_device_vram_access - read/write a buffer in vram
@@ -1016,8 +1032,6 @@ def_value:
*/
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
- int ret = 0;
-
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
@@ -1057,7 +1071,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- return ret;
+ return 0;
}
/**
@@ -1072,8 +1086,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ int r;
- if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -1081,7 +1096,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_device_resume(dev, true, true);
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ r = pci_enable_device(dev->pdev);
+ if (r)
+ DRM_WARN("pci_enable_device failed (%d)\n", r);
+ amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
@@ -1089,7 +1109,11 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
pr_info("amdgpu: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_device_suspend(dev, true, true);
+ amdgpu_device_suspend(dev, true);
+ pci_save_state(dev->pdev);
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+ pci_set_power_state(dev->pdev, PCI_D3cold);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1527,7 +1551,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
}
parse_soc_bounding_box:
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/*
* soc bounding box info is not integrated in disocovery table,
* we always need to parse it from gpu info firmware.
@@ -1538,7 +1561,6 @@ parse_soc_bounding_box:
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
}
-#endif
break;
}
default:
@@ -1787,7 +1809,8 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
}
}
- r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
+ r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
return r;
}
@@ -1854,6 +1877,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
@@ -1895,11 +1921,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_amdkfd_device_init(adev);
init_failed:
- if (amdgpu_sriov_vf(adev)) {
- if (!r)
- amdgpu_virt_init_data_exchange(adev);
+ if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, true);
- }
return r;
}
@@ -1938,6 +1961,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* amdgpu_device_set_cg_state - set clockgating for amdgpu device
*
* @adev: amdgpu_device pointer
+ * @state: clockgating state (gate or ungate)
*
* The list of all the hardware IPs that make up the asic is walked and the
* set_clockgating_state callbacks are run.
@@ -1962,6 +1986,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
@@ -1992,6 +2017,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
@@ -2319,14 +2345,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- if (is_support_sw_smu(adev)) {
- r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
- } else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_mp1_state) {
- r = adev->powerplay.pp_funcs->set_mp1_state(
- adev->powerplay.pp_handle,
- adev->mp1_state);
- }
+ r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
adev->mp1_state, r);
@@ -2413,7 +2432,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_IP_BLOCK_TYPE_VCE
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_IP_BLOCK_TYPE_VCN
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
@@ -2428,7 +2448,11 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
block->status.hw)
continue;
- r = block->version->funcs->hw_init(adev);
+ if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
+ r = block->version->funcs->resume(adev);
+ else
+ r = block->version->funcs->hw_init(adev);
+
DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
@@ -2600,20 +2624,19 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case CHIP_RAVEN:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case CHIP_RENOIR:
#endif
return amdgpu_dc != 0;
#endif
default:
+ if (amdgpu_dc > 0)
+ DRM_INFO("Display Core has been requested via kernel parameter "
+ "but isn't supported by ASIC, ignoring\n");
return false;
}
}
@@ -2638,8 +2661,38 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{
struct amdgpu_device *adev =
container_of(__work, struct amdgpu_device, xgmi_reset_work);
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+
+ /* It's a bug to not have a hive within this function */
+ if (WARN_ON(!hive))
+ return;
+
+ /*
+ * Use task barrier to synchronize all xgmi reset works across the
+ * hive. task_barrier_enter and task_barrier_exit will block
+ * until all the threads running the xgmi reset works reach
+ * those points. task_barrier_full will do both blocks.
+ */
+ if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+
+ task_barrier_enter(&hive->tb);
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
+
+ if (adev->asic_reset_res)
+ goto fail;
+
+ task_barrier_exit(&hive->tb);
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
+
+ if (adev->asic_reset_res)
+ goto fail;
+ } else {
+
+ task_barrier_full(&hive->tb);
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ }
- adev->asic_reset_res = amdgpu_asic_reset(adev);
+fail:
if (adev->asic_reset_res)
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev->ddev->unique);
@@ -2731,7 +2784,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags)
{
int r, i;
- bool runtime = false;
+ bool boco = false;
u32 max_MBps;
adev->shutdown = false;
@@ -2754,7 +2807,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
- adev->vm_manager.vm_pte_num_rqs = 0;
+ adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@@ -2794,9 +2847,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
- mutex_init(&adev->notifier_lock);
- mutex_init(&adev->virt.dpm_mutex);
mutex_init(&adev->psp.mutex);
+ mutex_init(&adev->notifier_lock);
r = amdgpu_device_check_arguments(adev);
if (r)
@@ -2902,12 +2954,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* ignore it */
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
- if (amdgpu_device_is_px(ddev))
- runtime = true;
- if (!pci_is_thunderbolt_attached(adev->pdev))
+ if (amdgpu_device_supports_boco(ddev))
+ boco = true;
+ if (amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ !pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, runtime);
- if (runtime)
+ &amdgpu_switcheroo_ops, boco);
+ if (boco)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
if (amdgpu_emu_mode == 1) {
@@ -2994,11 +3049,17 @@ fence_driver_init:
}
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
- if (amdgpu_virt_request_full_gpu(adev, false))
- amdgpu_virt_release_full_gpu(adev, false);
goto failed;
}
+ DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+ adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_cu_per_sh,
+ adev->gfx.cu_info.number);
+
+ amdgpu_ctx_init_sched(adev);
+
adev->accel_working = true;
amdgpu_vm_check_compute_bug(adev);
@@ -3013,16 +3074,19 @@ fence_driver_init:
amdgpu_fbdev_init(adev);
- if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
- amdgpu_pm_virt_sysfs_init(adev);
-
r = amdgpu_pm_sysfs_init(adev);
- if (r)
+ if (r) {
+ adev->pm_sysfs_en = false;
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
+ } else
+ adev->pm_sysfs_en = true;
r = amdgpu_ucode_sysfs_init(adev);
- if (r)
+ if (r) {
+ adev->ucode_sysfs_en = false;
DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+ } else
+ adev->ucode_sysfs_en = true;
r = amdgpu_debugfs_gem_init(adev);
if (r)
@@ -3091,7 +3155,7 @@ fence_driver_init:
failed:
amdgpu_vf_error_trans_all(adev);
- if (runtime)
+ if (boco)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
return r;
@@ -3122,7 +3186,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
drm_atomic_helper_shutdown(adev->ddev);
}
amdgpu_fence_driver_fini(adev);
- amdgpu_pm_sysfs_fini(adev);
+ if (adev->pm_sysfs_en)
+ amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_device_ip_fini(adev);
if (adev->firmware.gpu_info_fw) {
@@ -3139,9 +3204,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
- if (!pci_is_thunderbolt_attached(adev->pdev))
+ if (amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ !pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_unregister_client(adev->pdev);
- if (adev->flags & AMD_IS_PX)
+ if (amdgpu_device_supports_boco(adev->ddev))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem)
@@ -3150,12 +3218,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
- amdgpu_pm_virt_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
- amdgpu_ucode_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
amdgpu_debugfs_preempt_cleanup(adev);
@@ -3178,7 +3245,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev;
struct drm_crtc *crtc;
@@ -3261,13 +3328,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- if (suspend) {
- pci_save_state(dev->pdev);
- /* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
- }
-
return 0;
}
@@ -3282,7 +3342,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{
struct drm_connector *connector;
struct drm_connector_list_iter iter;
@@ -3293,14 +3353,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (resume) {
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- r = pci_enable_device(dev->pdev);
- if (r)
- return r;
- }
-
/* post card */
if (amdgpu_device_need_post(adev)) {
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -3639,13 +3691,12 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- amdgpu_amdkfd_pre_reset(adev);
-
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
goto error;
+ amdgpu_virt_init_data_exchange(adev);
/* we need recover gart prior to run SMC/CP/SDMA resume */
amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
@@ -3663,7 +3714,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_amdkfd_post_reset(adev);
error:
- amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
amdgpu_inc_vram_lost(adev);
@@ -3709,6 +3759,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
break;
default:
goto disabled;
@@ -3785,7 +3836,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
r = -EALREADY;
} else
r = amdgpu_asic_reset(tmp_adev);
@@ -3797,7 +3848,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
}
- /* For XGMI wait for all PSP resets to complete before proceed */
+ /* For XGMI wait for all resets to complete before proceed */
if (!r) {
list_for_each_entry(tmp_adev, device_list_handle,
gmc.xgmi.head) {
@@ -3811,6 +3862,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
}
+ if (!r && amdgpu_ras_intr_triggered())
+ amdgpu_ras_intr_cleared();
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) {
@@ -3899,7 +3952,7 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
mutex_lock(&adev->lock_reset);
atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
+ adev->in_gpu_reset = true;
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
@@ -3919,7 +3972,7 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
- adev->in_gpu_reset = 0;
+ adev->in_gpu_reset = false;
mutex_unlock(&adev->lock_reset);
}
@@ -3943,12 +3996,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
bool in_ras_intr = amdgpu_ras_intr_triggered();
+ bool use_baco =
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
+ true : false;
/*
* Flush RAM to disk so that after reboot
* the user can read log and see why the system rebooted.
*/
- if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
+ if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) {
DRM_WARN("Emergency reboot.");
@@ -3959,7 +4015,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
need_full_reset = job_signaled = false;
INIT_LIST_HEAD(&device_list);
- dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
+ dev_info(adev->dev, "GPU %s begin!\n",
+ (in_ras_intr && !use_baco) ? "jobs stop":"reset");
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -4026,7 +4083,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_unregister_gpu_instance(tmp_adev);
/* disable ras on ALL IPs */
- if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!(in_ras_intr && !use_baco) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -4037,13 +4095,13 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
- if (in_ras_intr)
+ if (in_ras_intr && !use_baco)
amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
}
- if (in_ras_intr)
+ if (in_ras_intr && !use_baco)
goto skip_sched_resume;
/*
@@ -4136,7 +4194,7 @@ skip_hw_reset:
skip_sched_resume:
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
/*unlock kfd: SRIOV would do it separately */
- if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
+ if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
@@ -4285,3 +4343,35 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
+int amdgpu_device_baco_enter(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_device_supports_baco(adev->ddev))
+ return -ENOTSUPP;
+
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
+
+ return amdgpu_dpm_baco_enter(adev);
+}
+
+int amdgpu_device_baco_exit(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ if (!amdgpu_device_supports_baco(adev->ddev))
+ return -ENOTSUPP;
+
+ ret = amdgpu_dpm_baco_exit(adev);
+ if (ret)
+ return ret;
+
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
new file mode 100644
index 000000000000..61a26c15c8dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DF_H__
+#define __AMDGPU_DF_H__
+
+struct amdgpu_df_hash_status {
+ bool hash_64k;
+ bool hash_2m;
+ bool hash_1g;
+};
+
+struct amdgpu_df_funcs {
+ void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
+ void (*enable_broadcast_mode)(struct amdgpu_device *adev,
+ bool enable);
+ u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
+ u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
+ bool enable);
+ int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
+ int is_enable);
+ int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
+ int is_disable);
+ void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
+ uint64_t *count);
+ uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
+ void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
+ uint32_t ficadl_val, uint32_t ficadh_val);
+};
+
+struct amdgpu_df {
+ struct amdgpu_df_hash_status hash_status;
+ const struct amdgpu_df_funcs *funcs;
+};
+
+#endif /* __AMDGPU_DF_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 3cadb0b76f22..6d520a3eec40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -513,13 +513,23 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* will not allow USWC mappings.
* Also, don't allow GTT domain if the BO doens't have USWC falg set.
*/
- if (adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type < CHIP_RAVEN &&
- (adev->flags & AMD_IS_APU) &&
- (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
+ if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
- amdgpu_device_asic_has_dc_support(adev->asic_type))
- domain |= AMDGPU_GEM_DOMAIN_GTT;
+ amdgpu_device_asic_has_dc_support(adev->asic_type)) {
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+ case CHIP_RAVEN:
+ /* enable S/G on PCO and RV2 */
+ if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+ default:
+ break;
+ }
+ }
#endif
return domain;
@@ -690,7 +700,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_encoder *amdgpu_encoder;
struct drm_connector *connector;
- struct amdgpu_connector *amdgpu_connector;
u32 src_v = 1, dst_v = 1;
u32 src_h = 1, dst_h = 1;
@@ -702,7 +711,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
continue;
amdgpu_encoder = to_amdgpu_encoder(encoder);
connector = amdgpu_get_connector_for_encoder(encoder);
- amdgpu_connector = to_amdgpu_connector(connector);
/* set scaling */
if (amdgpu_encoder->rmx_type == RMX_OFF)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e2eec7b66334..a59cd47aa6c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -360,10 +360,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
return ERR_PTR(-EPERM);
buf = drm_gem_prime_export(gobj, flags);
- if (!IS_ERR(buf)) {
- buf->file->f_mapping = gobj->dev->anon_inode->i_mapping;
+ if (!IS_ERR(buf))
buf->ops = &amdgpu_dmabuf_ops;
- }
return buf;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 9cc270efee7c..a2e8c3dfb4f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -946,20 +946,63 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
bool swsmu = is_support_sw_smu(adev);
switch (block_type) {
- case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_UVD:
- case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_VCE:
+ if (swsmu) {
+ ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
+ } else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ /*
+ * TODO: need a better lock mechanism
+ *
+ * Here adev->pm.mutex lock protection is enforced on
+ * UVD and VCE cases only. Since for other cases, there
+ * may be already lock protection in amdgpu_pm.c.
+ * This is a quick fix for the deadlock issue below.
+ * NFO: task ocltst:2028 blocked for more than 120 seconds.
+ * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
+ * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ * cltst D 0 2028 2026 0x00000000
+ * all Trace:
+ * __schedule+0x2c0/0x870
+ * schedule+0x2c/0x70
+ * schedule_preempt_disabled+0xe/0x10
+ * __mutex_lock.isra.9+0x26d/0x4e0
+ * __mutex_lock_slowpath+0x13/0x20
+ * ? __mutex_lock_slowpath+0x13/0x20
+ * mutex_lock+0x2f/0x40
+ * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
+ * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
+ * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
+ * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
+ * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
+ * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
+ */
+ mutex_lock(&adev->pm.mutex);
+ ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+ (adev)->powerplay.pp_handle, block_type, gate));
+ mutex_unlock(&adev->pm.mutex);
+ }
+ break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
if (swsmu)
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- else
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ if (swsmu)
+ ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
+ break;
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
+ ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
break;
default:
@@ -968,3 +1011,163 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
return ret;
}
+
+int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_enter(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_exit(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* exit BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
+ enum pp_mp1_state mp1_state)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_set_mp1_state(&adev->smu, mp1_state);
+ } else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_mp1_state) {
+ ret = adev->powerplay.pp_funcs->set_mp1_state(
+ adev->powerplay.pp_handle,
+ mp1_state);
+ }
+
+ return ret;
+}
+
+bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ bool baco_cap;
+
+ if (is_support_sw_smu(adev)) {
+ return smu_baco_is_support(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
+ return false;
+
+ if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
+ return false;
+
+ return baco_cap ? true : false;
+ }
+}
+
+int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev)) {
+ return smu_mode2_reset(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
+ return -ENOENT;
+
+ return pp_funcs->asic_reset_mode_2(pp_handle);
+ }
+}
+
+int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU BACO reset\n");
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_enter(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_baco_exit(smu);
+ if (ret)
+ return ret;
+ } else {
+ if (!pp_funcs
+ || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+ if (ret)
+ return ret;
+
+ /* exit BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_switch_power_profile(&adev->smu, type, en);
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->switch_power_profile)
+ ret = adev->powerplay.pp_funcs->switch_power_profile(
+ adev->powerplay.pp_handle, type, en);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
+ uint32_t pstate)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu_xgmi(adev))
+ ret = smu_set_xgmi_pstate(&adev->smu, pstate);
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_xgmi_pstate)
+ ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
+ pstate);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 2cfb677272af..902ca6c00cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -341,10 +341,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
-#define amdgpu_dpm_switch_power_profile(adev, type, en) \
- ((adev)->powerplay.pp_funcs->switch_power_profile(\
- (adev)->powerplay.pp_handle, type, en))
-
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
@@ -517,4 +513,24 @@ extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
+int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
+ uint32_t pstate);
+
+int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en);
+
+int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
+
+int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+
+bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
+
+int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
+ enum pp_mp1_state mp1_state);
+
+int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
+
+int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 01a793a0cbf7..94e2fd758e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
- {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
@@ -1147,7 +1147,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- return amdgpu_device_suspend(drm_dev, true, true);
+ return amdgpu_device_suspend(drm_dev, true);
}
static int amdgpu_pmops_resume(struct device *dev)
@@ -1155,13 +1155,14 @@ static int amdgpu_pmops_resume(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* GPU comes up enabled by the bios on resume */
- if (amdgpu_device_is_px(drm_dev)) {
+ if (amdgpu_device_supports_boco(drm_dev) ||
+ amdgpu_device_supports_baco(drm_dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
- return amdgpu_device_resume(drm_dev, true, true);
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_freeze(struct device *dev)
@@ -1170,7 +1171,7 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_dev->dev_private;
int r;
- r = amdgpu_device_suspend(drm_dev, false, true);
+ r = amdgpu_device_suspend(drm_dev, true);
if (r)
return r;
return amdgpu_asic_reset(adev);
@@ -1180,46 +1181,66 @@ static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- return amdgpu_device_resume(drm_dev, false, true);
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- return amdgpu_device_suspend(drm_dev, true, true);
+ return amdgpu_device_suspend(drm_dev, true);
}
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- return amdgpu_device_resume(drm_dev, false, true);
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
+ struct amdgpu_device *adev = drm_dev->dev_private;
+ int ret, i;
- if (!amdgpu_device_is_px(drm_dev)) {
+ if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* wait for all rings to drain before suspending */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready) {
+ ret = amdgpu_fence_wait_empty(ring);
+ if (ret)
+ return -EBUSY;
+ }
+ }
+
+ if (amdgpu_device_supports_boco(drm_dev))
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
- ret = amdgpu_device_suspend(drm_dev, false, false);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
- if (amdgpu_is_atpx_hybrid())
- pci_set_power_state(pdev, PCI_D3cold);
- else if (!amdgpu_has_atpx_dgpu_power_cntl())
- pci_set_power_state(pdev, PCI_D3hot);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ ret = amdgpu_device_suspend(drm_dev, false);
+ if (amdgpu_device_supports_boco(drm_dev)) {
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
+ if (amdgpu_is_atpx_hybrid()) {
+ pci_ignore_hotplug(pdev);
+ } else {
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ }
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ } else if (amdgpu_device_supports_baco(drm_dev)) {
+ amdgpu_device_baco_enter(drm_dev);
+ }
return 0;
}
@@ -1228,34 +1249,45 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
int ret;
- if (!amdgpu_device_is_px(drm_dev))
+ if (!adev->runpm)
return -EINVAL;
- drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-
- if (amdgpu_is_atpx_hybrid() ||
- !amdgpu_has_atpx_dgpu_power_cntl())
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- pci_set_master(pdev);
-
- ret = amdgpu_device_resume(drm_dev, false, false);
+ if (amdgpu_device_supports_boco(drm_dev)) {
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
+ if (amdgpu_is_atpx_hybrid()) {
+ pci_set_master(pdev);
+ } else {
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+ }
+ } else if (amdgpu_device_supports_baco(drm_dev)) {
+ amdgpu_device_baco_exit(drm_dev);
+ }
+ ret = amdgpu_device_resume(drm_dev, false);
drm_kms_helper_poll_enable(drm_dev);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ if (amdgpu_device_supports_boco(drm_dev))
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
return 0;
}
static int amdgpu_pmops_runtime_idle(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
struct drm_crtc *crtc;
- if (!amdgpu_device_is_px(drm_dev)) {
+ if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 143753d237e7..2672dc64a310 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -69,7 +69,7 @@ amdgpufb_release(struct fb_info *info, int user)
return 0;
}
-static struct fb_ops amdgpufb_ops = {
+static const struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = amdgpufb_open,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 377fe20bce23..3c01252b1e0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -34,6 +34,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_debugfs.h>
@@ -154,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
-
+ pm_runtime_get_noresume(adev->ddev->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -234,6 +235,7 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
bool amdgpu_fence_process(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
int r;
@@ -274,6 +276,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
BUG();
dma_fence_put(fence);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
} while (last_seq != seq);
return true;
@@ -737,10 +741,18 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return 0;
seq_printf(m, "gpu recover\n");
amdgpu_device_gpu_recover(adev, NULL);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 19705e399905..e01e681d2a60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -302,6 +302,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
*
* Binds the requested pages to the gart page table
* (all asics).
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index e00b46180d2e..b88b8b82bb64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -543,12 +543,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return;
- if (!is_support_sw_smu(adev) &&
- (!adev->powerplay.pp_funcs ||
- !adev->powerplay.pp_funcs->set_powergating_by_smu))
- return;
-
-
mutex_lock(&adev->gfx.gfx_off_mutex);
if (!enable)
@@ -641,7 +635,7 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
if (adev->gfx.funcs->query_ras_error_count)
adev->gfx.funcs->query_ras_error_count(adev, err_data);
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
}
return AMDGPU_RAS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 0ae0a2715b0d..af4bd279f42f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -76,11 +76,15 @@ struct kiq_pm4_funcs {
struct amdgpu_ring *ring,
u64 addr,
u64 seq);
+ void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub);
/* Packet sizes */
int set_resources_size;
int map_queues_size;
int unmap_queues_size;
int query_status_size;
+ int invalidate_tlbs_size;
};
struct amdgpu_kiq {
@@ -269,8 +273,12 @@ struct amdgpu_gfx {
bool me_fw_write_wait;
bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
+ struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
+ uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+ struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
+ uint32_t num_compute_sched;
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index a12f33c0f5df..5884ab590486 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -223,7 +223,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
u64 size_af, size_bf;
if (amdgpu_sriov_vf(adev)) {
- mc->agp_start = 0xffffffff;
+ mc->agp_start = 0xffffffffffff;
mc->agp_end = 0x0;
mc->agp_size = 0;
@@ -333,3 +333,43 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
amdgpu_mmhub_ras_fini(adev);
amdgpu_xgmi_ras_fini(adev);
}
+
+ /*
+ * The latest engine allocation on gfx9/10 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+ {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
+ GFXHUB_FREE_VM_INV_ENGS_BITMAP};
+ unsigned i;
+ unsigned vmhub, inv_eng;
+
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->funcs->vmhub;
+
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
+
+ ring->vm_inv_eng = inv_eng - 1;
+ vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
+
+ dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+ ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b499a3de8bb6..86267baca07c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -60,6 +60,11 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+/*
+ * Default stolen memory size, 1024 * 768 * 4
+ */
+#define AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE 0x300000ULL
+
struct firmware;
/*
@@ -92,6 +97,9 @@ struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type);
+ /* flush the vm tlb via pasid */
+ int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
+ uint32_t flush_type, bool all_hub);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
@@ -216,6 +224,9 @@ struct amdgpu_gmc {
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
+#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
+ ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
+ ((adev), (pasid), (type), (allhub)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
@@ -267,5 +278,6 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 6f9289735e31..3a67f6c046d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -206,7 +206,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
int r;
if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
- return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
+ return amdgpu_sync_fence(sync, ring->vmid_wait, false);
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
if (!fences)
@@ -241,7 +241,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
return -ENOMEM;
}
- r = amdgpu_sync_fence(adev, sync, &array->base, false);
+ r = amdgpu_sync_fence(sync, &array->base, false);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = &array->base;
return r;
@@ -294,7 +294,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) {
*id = NULL;
- r = amdgpu_sync_fence(adev, sync, tmp, false);
+ r = amdgpu_sync_fence(sync, tmp, false);
return r;
}
needs_flush = true;
@@ -303,7 +303,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+ r = amdgpu_sync_fence(&(*id)->active, fence, false);
if (r)
return r;
@@ -375,7 +375,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
/* Good, we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+ r = amdgpu_sync_fence(&(*id)->active, fence, false);
if (r)
return r;
@@ -435,8 +435,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
id = idle;
/* Remember this submission as user of the VMID */
- r = amdgpu_sync_fence(ring->adev, &id->active,
- fence, false);
+ r = amdgpu_sync_fence(&id->active, fence, false);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 6d8f05511aba..111a301ce878 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -66,7 +66,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
if (ih->ring == NULL)
return -ENOMEM;
- memset((void *)ih->ring, 0, ih->ring_size + 8);
ih->gpu_addr = dma_addr;
ih->wptr_addr = dma_addr + ih->ring_size;
ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 30d540d23b77..5ed4227f304b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -55,6 +55,7 @@
#include "amdgpu_connectors.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
#include <linux/pm_runtime.h>
@@ -162,13 +163,15 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
* register to check whether the interrupt is triggered or not, and properly
* ack the interrupt if it is there
*/
- if (adev->nbio.funcs &&
- adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
- adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
-
- if (adev->nbio.funcs &&
- adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
- adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4fb20e870e63..d42be880a236 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -153,7 +153,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (r)
return r;
- job->owner = owner;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
priority = job->base.s_priority;
@@ -193,8 +192,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
if (drm_sched_dependency_optimized(fence, s_entity)) {
- r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
- fence, false);
+ r = amdgpu_sync_fence(&job->sched_sync, fence, false);
if (r)
DRM_ERROR("Error adding fence (%d)\n", r);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index dc7ee9358dcd..3f7b8433d179 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -49,7 +49,6 @@ struct amdgpu_job {
uint32_t preamble_status;
uint32_t preemption_status;
uint32_t num_ibs;
- void *owner;
bool vm_needs_flush;
uint64_t vm_pd_addr;
unsigned vmid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
new file mode 100644
index 000000000000..5727f00afc8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15d.h"
+#include "soc15_common.h"
+
+#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
+
+int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
+{
+ INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
+
+ return 0;
+}
+
+int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec);
+ }
+
+ return 0;
+}
+
+int amdgpu_jpeg_suspend(struct amdgpu_device *adev)
+{
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ return 0;
+}
+
+int amdgpu_jpeg_resume(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, jpeg.idle_work.work);
+ unsigned int fences = 0;
+ unsigned int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec);
+ }
+
+ if (fences == 0)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
+ AMD_PG_STATE_GATE);
+ else
+ schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+}
+
+void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks = !cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ if (set_clocks)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
+ AMD_PG_STATE_UNGATE);
+}
+
+void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+}
+
+int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r)
+ return r;
+
+ amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ return r;
+}
+
+static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ const unsigned ib_size_dw = 16;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+
+ ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
+ ib->ptr[1] = 0xDEADBEEF;
+ for (i = 2; i < 16; i += 2) {
+ ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ib->ptr[i+1] = 0;
+ }
+ ib->length_dw = 16;
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ struct dma_fence *fence = NULL;
+ long r = 0;
+
+ r = amdgpu_jpeg_dec_set_reg(ring, 1, &fence);
+ if (r)
+ goto error;
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ r = -ETIMEDOUT;
+ goto error;
+ } else if (r < 0) {
+ goto error;
+ } else {
+ r = 0;
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ dma_fence_put(fence);
+error:
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
new file mode 100644
index 000000000000..bd9ef9cc86de
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_JPEG_H__
+#define __AMDGPU_JPEG_H__
+
+#define AMDGPU_MAX_JPEG_INSTANCES 2
+
+#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
+#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
+
+struct amdgpu_jpeg_reg{
+ unsigned jpeg_pitch;
+};
+
+struct amdgpu_jpeg_inst {
+ struct amdgpu_ring ring_dec;
+ struct amdgpu_irq_src irq;
+ struct amdgpu_jpeg_reg external;
+};
+
+struct amdgpu_jpeg {
+ uint8_t num_jpeg_inst;
+ struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
+ struct amdgpu_jpeg_reg internal;
+ struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
+ uint32_t num_jpeg_sched;
+ unsigned harvest_config;
+ struct delayed_work idle_work;
+ enum amd_powergating_state cur_state;
+};
+
+int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev);
+int amdgpu_jpeg_suspend(struct amdgpu_device *adev);
+int amdgpu_jpeg_resume(struct amdgpu_device *adev);
+
+void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring);
+
+int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
+#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b6db28a570c2..60591dbc2097 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_request_full_gpu(adev, false);
- if (amdgpu_device_is_px(dev)) {
+ if (adev->runpm) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
@@ -150,8 +150,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)adev;
- if ((amdgpu_runtime_pm != 0) &&
- amdgpu_has_atpx() &&
+ if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0) &&
@@ -170,6 +169,13 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
goto out;
}
+ if (amdgpu_device_supports_boco(dev) &&
+ (amdgpu_runtime_pm != 0)) /* enable runpm by default */
+ adev->runpm = true;
+ else if (amdgpu_device_supports_baco(dev) &&
+ (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
+ adev->runpm = true;
+
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
@@ -180,7 +186,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- if (amdgpu_device_is_px(dev)) {
+ if (adev->runpm) {
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -193,7 +199,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
- if (adev->rmmio && amdgpu_device_is_px(dev))
+ if (adev->rmmio && adev->runpm)
pm_runtime_put_noidle(dev->dev);
amdgpu_driver_unload_kms(dev);
}
@@ -293,6 +299,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->dm.dmcu_fw_version;
fw_info->feature = 0;
break;
+ case AMDGPU_INFO_FW_DMCUB:
+ fw_info->ver = adev->dm.dmcub_fw_version;
+ fw_info->feature = 0;
+ break;
default:
return -EINVAL;
}
@@ -396,12 +406,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_VCN_JPEG:
- type = AMD_IP_BLOCK_TYPE_VCN;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->uvd.harvest_config & (1 << i))
+ type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
+ AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
continue;
- if (adev->vcn.inst[i].ring_jpeg.sched.ready)
+ if (adev->jpeg.inst[i].ring_dec.sched.ready)
++num_rings;
}
ib_start_alignment = 16;
@@ -517,9 +529,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
- case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
+ AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+ break;
default:
return -EINVAL;
}
@@ -688,10 +703,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
- } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
- adev->virt.ops->get_pp_clk) {
- dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
- dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
@@ -1394,6 +1405,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ /* DMCUB */
+ query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f205f56e3358..b03b1eb7ba04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -37,6 +37,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
+#include <linux/pm_runtime.h>
#include "hwmgr.h"
#define WIDTH_4K 3840
@@ -158,6 +159,14 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state)
@@ -170,6 +179,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
pm = adev->pm.dpm.user_state;
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -183,6 +195,10 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
@@ -190,10 +206,12 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
state = POWER_STATE_TYPE_BALANCED;
else if (strncmp("performance", buf, strlen("performance")) == 0)
state = POWER_STATE_TYPE_PERFORMANCE;
- else {
- count = -EINVAL;
- goto fail;
- }
+ else
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
@@ -206,12 +224,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
adev->pm.dpm.user_state = state;
mutex_unlock(&adev->pm.mutex);
- /* Can't set dpm state when the card is off */
- if (!(adev->flags & AMD_IS_PX) ||
- (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_pm_compute_clocks(adev);
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -282,13 +299,14 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level = 0xff;
+ int ret;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return snprintf(buf, PAGE_SIZE, "off\n");
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
level = smu_get_performance_level(&adev->smu);
@@ -297,6 +315,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
else
level = adev->pm.dpm.forced_level;
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -320,9 +341,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
- /* Can't force performance level when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
if (strncmp("low", buf, strlen("low")) == 0) {
@@ -344,30 +363,23 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
} else {
- count = -EINVAL;
- goto fail;
+ return -EINVAL;
}
- /* handle sriov case here */
- if (amdgpu_sriov_vf(adev)) {
- if (amdgim_is_hwperf(adev) &&
- adev->virt.ops->force_dpm_level) {
- mutex_lock(&adev->pm.mutex);
- adev->virt.ops->force_dpm_level(adev, level);
- mutex_unlock(&adev->pm.mutex);
- return count;
- } else {
- return -EINVAL;
- }
- }
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
current_level = smu_get_performance_level(&adev->smu);
else if (adev->powerplay.pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
- if (current_level == level)
+ if (current_level == level) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
+ }
/* profile_exit setting is valid only when current mode is in profile mode */
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
@@ -376,29 +388,40 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
(level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
pr_err("Currently not in any profile mode!\n");
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
if (is_support_sw_smu(adev)) {
ret = smu_force_performance_level(&adev->smu, level);
- if (ret)
- count = -EINVAL;
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
+ }
} else if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
- count = -EINVAL;
mutex_unlock(&adev->pm.mutex);
- goto fail;
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
}
ret = amdgpu_dpm_force_performance_level(adev, level);
- if (ret)
- count = -EINVAL;
- else
+ if (ret) {
+ mutex_unlock(&adev->pm.mutex);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
+ } else {
adev->pm.dpm.forced_level = level;
+ }
mutex_unlock(&adev->pm.mutex);
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
-fail:
return count;
}
@@ -411,6 +434,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len, ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_get_power_num_states(&adev->smu, &data);
if (ret)
@@ -418,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
} else if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
@@ -440,6 +470,13 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
pm = smu_get_current_power_state(smu);
ret = smu_get_power_num_states(smu, &data);
@@ -451,6 +488,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
amdgpu_dpm_get_pp_num_states(adev, &data);
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
for (i = 0; i < data.nums; i++) {
if (pm == data.states[i])
break;
@@ -469,6 +509,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -486,6 +529,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
else if (is_support_sw_smu(adev))
@@ -495,14 +541,18 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx);
- if (ret || idx >= ARRAY_SIZE(data.states)) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret || idx >= ARRAY_SIZE(data.states))
+ return -EINVAL;
+
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
amdgpu_dpm_get_pp_num_states(adev, &data);
state = data.states[idx];
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
/* only set user selected power states */
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
@@ -510,8 +560,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
AMD_PP_TASK_ENABLE_USER_STATE, &state);
adev->pp_force_state_enabled = true;
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
}
-fail:
+
return count;
}
@@ -533,17 +585,32 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
char *table = NULL;
- int size;
+ int size, ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (size < 0)
return size;
- }
- else if (adev->powerplay.pp_funcs->get_pp_table)
+ } else if (adev->powerplay.pp_funcs->get_pp_table) {
size = amdgpu_dpm_get_pp_table(adev, &table);
- else
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ if (size < 0)
+ return size;
+ } else {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return 0;
+ }
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
@@ -562,13 +629,26 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
} else if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -654,6 +734,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
if (count > 127)
return -EINVAL;
@@ -689,18 +772,28 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_od_edit_dpm_table(&adev->smu, type,
parameter, parameter_size);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
} else {
if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
parameter, parameter_size);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
}
if (type == PP_OD_COMMIT_DPM_TABLE) {
@@ -708,12 +801,18 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
} else {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
}
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
}
@@ -724,24 +823,33 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- uint32_t size = 0;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
- return size;
} else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
- return size;
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return size;
}
/**
@@ -770,21 +878,36 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
uint64_t featuremask;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
pr_debug("featuremask = 0x%llx\n", featuremask);
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
} else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
}
@@ -795,13 +918,27 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
- if (is_support_sw_smu(adev)) {
- return smu_sys_get_pp_feature_mask(&adev->smu, buf);
- } else if (adev->powerplay.pp_funcs->get_ppfeature_status)
- return amdgpu_dpm_get_ppfeature_status(adev, buf);
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_support_sw_smu(adev))
+ size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
+ else if (adev->powerplay.pp_funcs->get_ppfeature_status)
+ size = amdgpu_dpm_get_ppfeature_status(adev, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "\n");
+ return size;
}
/**
@@ -840,17 +977,27 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
- if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
- adev->virt.ops->get_pp_clk)
- return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
/*
@@ -899,18 +1046,25 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (ret)
return -EINVAL;
@@ -923,17 +1077,27 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
- if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
- adev->virt.ops->get_pp_clk)
- return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -943,21 +1107,28 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- int ret;
uint32_t mask = 0;
+ int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (ret)
return -EINVAL;
@@ -970,13 +1141,27 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
@@ -989,14 +1174,26 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1010,13 +1207,27 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
@@ -1029,14 +1240,26 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1050,13 +1273,27 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
@@ -1069,14 +1306,26 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1090,13 +1339,27 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -1109,14 +1372,26 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1131,12 +1406,23 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
else if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -1150,12 +1436,17 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
ret = kstrtol(buf, 0, &value);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
@@ -1171,7 +1462,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
}
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -1182,12 +1475,23 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
else if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -1201,12 +1505,17 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
ret = kstrtol(buf, 0, &value);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
@@ -1222,7 +1531,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
}
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -1252,13 +1563,27 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
- return smu_get_power_profile_mode(&adev->smu, buf);
+ size = smu_get_power_profile_mode(&adev->smu, buf);
else if (adev->powerplay.pp_funcs->get_power_profile_mode)
- return amdgpu_dpm_get_power_profile_mode(adev, buf);
+ size = amdgpu_dpm_get_power_profile_mode(adev, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "\n");
+ return size;
}
@@ -1283,7 +1608,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
if (ret)
- goto fail;
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (count < 2 || count > 127)
@@ -1295,23 +1623,30 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
parameter_size++;
while (isspace(*tmp_str))
tmp_str++;
}
}
parameter[parameter_size] = profile_mode;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (!ret)
return count;
-fail:
+
return -EINVAL;
}
@@ -1331,10 +1666,20 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0)
+ return r;
+
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
(void *)&value, &size);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (r)
return r;
@@ -1357,10 +1702,20 @@ static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0)
+ return r;
+
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
(void *)&value, &size);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (r)
return r;
@@ -1386,8 +1741,20 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint64_t count0, count1;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
}
@@ -1409,6 +1776,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
if (adev->unique_id)
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
@@ -1472,42 +1842,43 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0, size = sizeof(temp);
- /* Can't get temperature when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
if (channel >= PP_TEMP_MAX)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
break;
case PP_TEMP_MEM:
/* get current memory temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
+ break;
+ default:
+ r = -EINVAL;
break;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (r)
+ return r;
+
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
@@ -1603,15 +1974,27 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu);
} else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return sprintf(buf, "%i\n", pwm_mode);
}
@@ -1621,27 +2004,32 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
+ int err, ret;
int value;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, value);
} else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
amdgpu_dpm_set_fan_control_mode(adev, value);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return count;
}
@@ -1668,34 +2056,43 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu);
else
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
}
err = kstrtou32(buf, 10, &value);
- if (err)
+ if (err) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
value = (value * 100) / 255;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_set_fan_speed_percent(&adev->smu, value);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return count;
}
@@ -1708,20 +2105,22 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_percent(&adev->smu, &speed);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
speed = (speed * 255) / 100;
@@ -1736,20 +2135,22 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &speed);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return sprintf(buf, "%i\n", speed);
}
@@ -1763,8 +2164,16 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1780,8 +2189,16 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1796,20 +2213,22 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return sprintf(buf, "%i\n", rpm);
}
@@ -1823,32 +2242,40 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu);
else
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
- if (pwm_mode != AMD_FAN_CTRL_MANUAL)
+ if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -ENODATA;
-
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ }
err = kstrtou32(buf, 10, &value);
- if (err)
+ if (err) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_set_fan_speed_rpm(&adev->smu, value);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return count;
}
@@ -1859,15 +2286,27 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu);
} else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -1881,12 +2320,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -1898,14 +2331,24 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, pwm_mode);
} else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return count;
}
@@ -1914,18 +2357,20 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 vddgfx;
int r, size = sizeof(vddgfx);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1944,7 +2389,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 vddnb;
int r, size = sizeof(vddnb);
@@ -1952,14 +2396,17 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1978,19 +2425,21 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 query = 0;
int r, size = sizeof(u32);
unsigned uw;
- /* Can't get power when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
(void *)&query, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2013,16 +2462,27 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
+ ssize_t size;
+ int r;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, true, true);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
@@ -2031,16 +2491,27 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
+ ssize_t size;
+ int r;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, false, true);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return size;
}
@@ -2053,19 +2524,29 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
err = kstrtou32(buf, 10, &value);
if (err)
return err;
value = value / 1000000; /* convert to Watt */
- if (is_support_sw_smu(adev)) {
+
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
+ if (is_support_sw_smu(adev))
err = smu_set_power_limit(&adev->smu, value);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+ else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
- } else {
+ else
err = -EINVAL;
- }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
if (err)
return err;
@@ -2078,18 +2559,20 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
uint32_t sclk;
int r, size = sizeof(sclk);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
(void *)&sclk, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2108,18 +2591,20 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
uint32_t mclk;
int r, size = sizeof(mclk);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
(void *)&mclk, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2299,6 +2784,23 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ /* under multi-vf mode, the hwmon attributes are all not supported */
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ /* there is no fan under pp one vf mode */
+ if (amdgpu_sriov_is_pp_one_vf(adev) &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
@@ -2666,17 +3168,12 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
- if (ret)
- DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
- enable ? "true" : "false", ret);
- } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
- /* enable/disable UVD */
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- mutex_unlock(&adev->pm.mutex);
- }
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+
/* enable/disable Low Memory PState for UVD (4k videos) */
if (adev->asic_type == CHIP_STONEY &&
adev->uvd.decode_image_width >= WIDTH_4K) {
@@ -2693,17 +3190,11 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
- if (ret)
- DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
- enable ? "true" : "false", ret);
- } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
- /* enable/disable VCE */
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- mutex_unlock(&adev->pm.mutex);
- }
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -2718,42 +3209,14 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
}
-int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev)
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
- return ret;
-
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
-
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
-
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
-
- return ret;
-}
-
-void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev)
-{
- if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
- return;
-
- device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
@@ -3163,8 +3626,12 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct drm_device *ddev = adev->ddev;
u32 flags = 0;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
@@ -3173,23 +3640,28 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return 0;
}
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
- seq_printf(m, "PX asic powered off\n");
- } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
+
+ if (!is_support_sw_smu(adev) &&
+ adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
+ r = 0;
} else {
- return amdgpu_debugfs_pm_info_pp(m, adev);
+ r = amdgpu_debugfs_pm_info_pp(m, adev);
}
- return 0;
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return r;
}
static const struct drm_info_list amdgpu_pm_info_list[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index ef31448ee8d8..3da1da277805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -41,5 +41,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 0e6dba9f60f0..07914e34bc25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
if (!(flags & PERF_EF_RELOAD))
- pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
- pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0);
break;
default:
break;
@@ -101,13 +101,13 @@ static void amdgpu_perf_read(struct perf_event *event)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf,
+ pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf,
&count);
break;
default:
count = 0;
break;
- };
+ }
} while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
local64_add(count - prev, &event->count);
@@ -126,11 +126,11 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0);
break;
default:
break;
- };
+ }
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@@ -156,11 +156,11 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
+ retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
break;
default:
return 0;
- };
+ }
if (retval)
return retval;
@@ -184,11 +184,11 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1);
break;
default:
break;
- };
+ }
perf_event_update_userpage(event);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e1b8d8daeafc..3a1570dafe34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -158,7 +158,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
index = atomic_inc_return(&psp->fence_value);
- ret = psp_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
+ ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
if (ret) {
atomic_dec(&psp->fence_value);
mutex_unlock(&psp->mutex);
@@ -191,9 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
- DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
+ DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
psp->cmd_buf_mem->cmd_id,
- psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
+ psp->cmd_buf_mem->resp.status);
if (!timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
@@ -318,35 +318,17 @@ static int psp_tmr_load(struct psp_context *psp)
return ret;
}
-static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t asd_mc, uint64_t asd_mc_shared,
- uint32_t size, uint32_t shared_size)
+static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t asd_mc, uint32_t size)
{
cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
cmd->cmd.cmd_load_ta.app_len = size;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
-static int psp_asd_init(struct psp_context *psp)
-{
- int ret;
-
- /*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for shared ASD <-> Driver
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->asd_shared_bo,
- &psp->asd_shared_mc_addr,
- &psp->asd_shared_buf);
-
- return ret;
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
+ cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
}
static int psp_asd_load(struct psp_context *psp)
@@ -368,11 +350,49 @@ static int psp_asd_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
- psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
- psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
+ psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->asd_ucode_size);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (!ret) {
+ psp->asd_context.asd_initialized = true;
+ psp->asd_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = session_id;
+}
+
+static int psp_asd_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->asd_context.asd_initialized)
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
+ if (!ret)
+ psp->asd_context.asd_initialized = false;
kfree(cmd);
@@ -407,18 +427,20 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
return ret;
}
-static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
- uint32_t xgmi_ta_size, uint32_t shared_size)
+static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t ta_bin_mc,
+ uint32_t ta_bin_size,
+ uint64_t ta_shared_mc,
+ uint32_t ta_shared_size)
{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_len = ta_bin_size;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
}
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
@@ -438,6 +460,36 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp)
return ret;
}
+static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+}
+
+int psp_ta_invoke(struct psp_context *psp,
+ uint32_t ta_cmd_id,
+ uint32_t session_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
static int psp_xgmi_load(struct psp_context *psp)
{
int ret;
@@ -446,8 +498,6 @@ static int psp_xgmi_load(struct psp_context *psp)
/*
* TODO: bypass the loading in sriov for now
*/
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
@@ -456,9 +506,11 @@ static int psp_xgmi_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
- psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->xgmi_context.xgmi_shared_mc_addr,
- psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_xgmi_ucode_size,
+ psp->xgmi_context.xgmi_shared_mc_addr,
+ PSP_XGMI_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -473,29 +525,25 @@ static int psp_xgmi_load(struct psp_context *psp)
return ret;
}
-static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t xgmi_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
-}
-
static int psp_xgmi_unload(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* XGMI TA unload currently is not supported on Arcturus */
+ if (adev->asic_type == CHIP_ARCTURUS)
+ return 0;
/*
* TODO: bypass the unloading in sriov for now
*/
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
- psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -505,40 +553,9 @@ static int psp_xgmi_unload(struct psp_context *psp)
return ret;
}
-static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t xgmi_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->xgmi_context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
}
static int psp_xgmi_terminate(struct psp_context *psp)
@@ -594,20 +611,6 @@ static int psp_xgmi_initialize(struct psp_context *psp)
}
// ras begin
-static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t ras_ta_mc, uint64_t ras_mc_shared,
- uint32_t ras_ta_size, uint32_t shared_size)
-{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
int ret;
@@ -643,15 +646,17 @@ static int psp_ras_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
- psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->ras.ras_shared_mc_addr,
- psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_ras_ucode_size,
+ psp->ras.ras_shared_mc_addr,
+ PSP_RAS_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
- psp->ras.ras_initialized = 1;
+ psp->ras.ras_initialized = true;
psp->ras.session_id = cmd->resp.session_id;
}
@@ -660,13 +665,6 @@ static int psp_ras_load(struct psp_context *psp)
return ret;
}
-static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ras_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
-}
-
static int psp_ras_unload(struct psp_context *psp)
{
int ret;
@@ -682,7 +680,7 @@ static int psp_ras_unload(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -692,40 +690,15 @@ static int psp_ras_unload(struct psp_context *psp)
return ret;
}
-static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t ras_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->ras.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
}
int psp_ras_enable_features(struct psp_context *psp,
@@ -771,7 +744,7 @@ static int psp_ras_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->ras.ras_initialized = 0;
+ psp->ras.ras_initialized = false;
/* free ras shared memory */
amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
@@ -812,24 +785,6 @@ static int psp_ras_initialize(struct psp_context *psp)
// ras end
// HDCP start
-static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t hdcp_ta_mc,
- uint64_t hdcp_mc_shared,
- uint32_t hdcp_ta_size,
- uint32_t shared_size)
-{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
- lower_32_bits(hdcp_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
- upper_32_bits(hdcp_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
{
int ret;
@@ -866,15 +821,16 @@ static int psp_hdcp_load(struct psp_context *psp)
memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
psp->ta_hdcp_ucode_size);
- psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->hdcp_context.hdcp_shared_mc_addr,
- psp->ta_hdcp_ucode_size,
- PSP_HDCP_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_hdcp_ucode_size,
+ psp->hdcp_context.hdcp_shared_mc_addr,
+ PSP_HDCP_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
- psp->hdcp_context.hdcp_initialized = 1;
+ psp->hdcp_context.hdcp_initialized = true;
psp->hdcp_context.session_id = cmd->resp.session_id;
}
@@ -910,12 +866,6 @@ static int psp_hdcp_initialize(struct psp_context *psp)
return 0;
}
-static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t hdcp_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
-}
static int psp_hdcp_unload(struct psp_context *psp)
{
@@ -932,7 +882,7 @@ static int psp_hdcp_unload(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
@@ -941,39 +891,15 @@ static int psp_hdcp_unload(struct psp_context *psp)
return ret;
}
-static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t hdcp_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->hdcp_context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
}
static int psp_hdcp_terminate(struct psp_context *psp)
@@ -993,7 +919,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->hdcp_context.hdcp_initialized = 0;
+ psp->hdcp_context.hdcp_initialized = false;
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
@@ -1005,22 +931,6 @@ static int psp_hdcp_terminate(struct psp_context *psp)
// HDCP end
// DTM start
-static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t dtm_ta_mc,
- uint64_t dtm_mc_shared,
- uint32_t dtm_ta_size,
- uint32_t shared_size)
-{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
static int psp_dtm_init_shared_buf(struct psp_context *psp)
{
int ret;
@@ -1056,15 +966,16 @@ static int psp_dtm_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
- psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->dtm_context.dtm_shared_mc_addr,
- psp->ta_dtm_ucode_size,
- PSP_DTM_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_dtm_ucode_size,
+ psp->dtm_context.dtm_shared_mc_addr,
+ PSP_DTM_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
- psp->dtm_context.dtm_initialized = 1;
+ psp->dtm_context.dtm_initialized = true;
psp->dtm_context.session_id = cmd->resp.session_id;
}
@@ -1102,39 +1013,15 @@ static int psp_dtm_initialize(struct psp_context *psp)
return 0;
}
-static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t dtm_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->dtm_context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
}
static int psp_dtm_terminate(struct psp_context *psp)
@@ -1154,7 +1041,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->dtm_context.dtm_initialized = 0;
+ psp->dtm_context.dtm_initialized = false;
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
@@ -1211,45 +1098,6 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
- ret = psp_asd_init(psp);
- if (ret) {
- DRM_ERROR("PSP asd init failed!\n");
- return ret;
- }
-
- ret = psp_asd_load(psp);
- if (ret) {
- DRM_ERROR("PSP load asd failed!\n");
- return ret;
- }
-
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- ret = psp_xgmi_initialize(psp);
- /* Warning the XGMI seesion initialize failure
- * Instead of stop driver initialization
- */
- if (ret)
- dev_err(psp->adev->dev,
- "XGMI: Failed to initialize XGMI session\n");
- }
-
- if (psp->adev->psp.ta_fw) {
- ret = psp_ras_initialize(psp);
- if (ret)
- dev_err(psp->adev->dev,
- "RAS: Failed to initialize RAS\n");
-
- ret = psp_hdcp_initialize(psp);
- if (ret)
- dev_err(psp->adev->dev,
- "HDCP: Failed to initialize HDCP\n");
-
- ret = psp_dtm_initialize(psp);
- if (ret)
- dev_err(psp->adev->dev,
- "DTM: Failed to initialize DTM\n");
- }
-
return 0;
}
@@ -1329,6 +1177,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_VCN:
*type = GFX_FW_TYPE_VCN;
break;
+ case AMDGPU_UCODE_ID_VCN1:
+ *type = GFX_FW_TYPE_VCN1;
+ break;
case AMDGPU_UCODE_ID_DMCU_ERAM:
*type = GFX_FW_TYPE_DMCU_ERAM;
break;
@@ -1341,6 +1192,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_VCN1_RAM:
*type = GFX_FW_TYPE_VCN1_RAM;
break;
+ case AMDGPU_UCODE_ID_DMCUB:
+ *type = GFX_FW_TYPE_DMUB;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
@@ -1470,7 +1324,8 @@ out:
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
/*skip ucode loading in SRIOV VF */
continue;
@@ -1519,16 +1374,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (!psp->cmd)
return -ENOMEM;
- /* this fw pri bo is not used under SRIOV */
- if (!amdgpu_sriov_vf(psp->adev)) {
- ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp->fw_pri_bo,
- &psp->fw_pri_mc_addr,
- &psp->fw_pri_buf);
- if (ret)
- goto failed;
- }
+ ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr,
+ &psp->fw_pri_buf);
+ if (ret)
+ goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
@@ -1562,6 +1414,39 @@ skip_memalloc:
if (ret)
goto failed;
+ ret = psp_asd_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load asd failed!\n");
+ return ret;
+ }
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
+ }
+
return 0;
failed:
@@ -1619,6 +1504,8 @@ static int psp_hw_fini(void *handle)
psp_hdcp_terminate(psp);
}
+ psp_asd_unload(psp);
+
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
@@ -1627,8 +1514,6 @@ static int psp_hw_fini(void *handle)
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
- amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr,
- &psp->asd_shared_buf);
amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem);
@@ -1704,6 +1589,39 @@ static int psp_resume(void *handle)
if (ret)
goto failed;
+ ret = psp_asd_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load asd failed!\n");
+ goto failed;
+ }
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
+ }
+
mutex_unlock(&adev->firmware.mutex);
return 0;
@@ -1758,6 +1676,56 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
return psp_execute_np_fw_load(&adev->psp, &ucode);
}
+int psp_ring_cmd_submit(struct psp_context *psp,
+ uint64_t cmd_buf_mc_addr,
+ uint64_t fence_mc_addr,
+ int index)
+{
+ unsigned int psp_write_ptr_reg = 0;
+ struct psp_gfx_rb_frame *write_frame;
+ struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t ring_size_dw = ring->ring_size / 4;
+ uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
+
+ /* KM (GPCOM) prepare write pointer */
+ psp_write_ptr_reg = psp_ring_get_wptr(psp);
+
+ /* Update KM RB frame pointer to new frame */
+ /* write_frame ptr increments by size of rb_frame in bytes */
+ /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
+ if ((psp_write_ptr_reg % ring_size_dw) == 0)
+ write_frame = ring_buffer_start;
+ else
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
+ }
+
+ /* Initialize KM RB frame */
+ memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
+
+ /* Update KM RB frame */
+ write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
+ write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
+ write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
+ write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
+ write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
+
+ /* Update the write Pointer in DWORDs */
+ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
+ psp_ring_set_wptr(psp, psp_write_ptr_reg);
+ return 0;
+}
+
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
enum AMDGPU_UCODE_ID ucode_type)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 09c5474ebcc3..3265487b859f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -32,7 +32,6 @@
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
@@ -94,9 +93,6 @@ struct psp_funcs
enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
- int (*cmd_submit)(struct psp_context *psp,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index);
bool (*compare_sram_data)(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
@@ -116,6 +112,8 @@ struct psp_funcs
int (*mem_training_init)(struct psp_context *psp);
void (*mem_training_fini)(struct psp_context *psp);
int (*mem_training)(struct psp_context *psp, uint32_t ops);
+ uint32_t (*ring_get_wptr)(struct psp_context *psp);
+ void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -131,6 +129,11 @@ struct psp_xgmi_topology_info {
struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
};
+struct psp_asd_context {
+ bool asd_initialized;
+ uint32_t session_id;
+};
+
struct psp_xgmi_context {
uint8_t initialized;
uint32_t session_id;
@@ -199,7 +202,6 @@ struct psp_memory_training_context {
/*vram offset of the p2c training data*/
u64 p2c_train_data_offset;
- struct amdgpu_bo *p2c_bo;
/*vram offset of the c2p training data*/
u64 c2p_train_data_offset;
@@ -239,15 +241,12 @@ struct psp_context
struct amdgpu_bo *tmr_bo;
uint64_t tmr_mc_addr;
- /* asd firmware and buffer */
+ /* asd firmware */
const struct firmware *asd_fw;
uint32_t asd_fw_version;
uint32_t asd_feature_version;
uint32_t asd_ucode_size;
uint8_t *asd_start_addr;
- struct amdgpu_bo *asd_shared_bo;
- uint64_t asd_shared_mc_addr;
- void *asd_shared_buf;
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
@@ -282,6 +281,7 @@ struct psp_context
uint32_t ta_dtm_ucode_size;
uint8_t *ta_dtm_start_addr;
+ struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
struct psp_hdcp_context hdcp_context;
@@ -300,8 +300,6 @@ struct amdgpu_psp_funcs {
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
-#define psp_cmd_submit(psp, cmd_mc, fence_mc, index) \
- (psp)->funcs->cmd_submit((psp), (cmd_mc), (fence_mc), (index))
#define psp_compare_sram_data(psp, ucode, type) \
(psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \
@@ -346,6 +344,9 @@ struct amdgpu_psp_funcs {
((psp)->funcs->ras_cure_posion ? \
(psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
+#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
+#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -372,4 +373,8 @@ int psp_rlc_autoload_start(struct psp_context *psp);
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
uint32_t value);
+int psp_ring_cmd_submit(struct psp_context *psp,
+ uint64_t cmd_buf_mc_addr,
+ uint64_t fence_mc_addr,
+ int index);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 404483437bd3..766be7f18282 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -198,9 +198,6 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return 0;
}
-static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
- struct ras_common_if *head);
-
/**
* DOC: AMDGPU RAS debugfs control interface
*
@@ -318,7 +315,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
default:
ret = -EINVAL;
break;
- };
+ }
if (ret)
return -EINVAL;
@@ -445,7 +442,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
}
/* return an obj equal to head, or the first when head is NULL */
-static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
struct ras_common_if *head)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -689,6 +686,7 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
+ int i;
if (!obj)
return -EINVAL;
@@ -703,6 +701,13 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
if (adev->umc.funcs->query_ras_error_address)
adev->umc.funcs->query_ras_error_address(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ if (adev->sdma.funcs->query_ras_error_count) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->sdma.funcs->query_ras_error_count(adev, i,
+ &err_data);
+ }
+ break;
case AMDGPU_RAS_BLOCK__GFX:
if (adev->gfx.funcs->query_ras_error_count)
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
@@ -1314,6 +1319,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
data = con->eh_data;
if (!data || data->count == 0) {
*bps = NULL;
+ ret = -EINVAL;
goto out;
}
@@ -1347,7 +1353,8 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
- amdgpu_device_gpu_recover(ras->adev, 0);
+ if (amdgpu_device_should_recover_gpu(ras->adev))
+ amdgpu_device_gpu_recover(ras->adev, 0);
atomic_set(&ras->in_recovery, 0);
}
@@ -1687,7 +1694,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = 0;
if (amdgpu_sriov_vf(adev) ||
- adev->asic_type != CHIP_VEGA20)
+ (adev->asic_type != CHIP_VEGA20 &&
+ adev->asic_type != CHIP_ARCTURUS))
return;
if (adev->is_atom_fw &&
@@ -1872,7 +1880,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
* See feature_enable_on_boot
*/
amdgpu_ras_disable_all_features(adev, 1);
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
}
}
@@ -1935,6 +1943,6 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
- amdgpu_ras_reset_gpu(adev, false);
+ amdgpu_ras_reset_gpu(adev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index f80fd3428c98..a5fe29a9373e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -494,8 +494,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
-static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
- bool is_baco)
+static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
@@ -611,6 +610,9 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_dispatch_if *info);
+struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
extern atomic_t amdgpu_ras_in_intr;
static inline bool amdgpu_ras_intr_triggered(void)
@@ -618,6 +620,11 @@ static inline bool amdgpu_ras_intr_triggered(void)
return !!atomic_read(&amdgpu_ras_in_intr);
}
+static inline void amdgpu_ras_intr_cleared(void)
+{
+ atomic_set(&amdgpu_ras_in_intr, 0);
+}
+
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 6010999d9020..a2ee30b16212 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -160,7 +160,7 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
return AMDGPU_RAS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 761ff8be6314..485335267d78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,8 +50,18 @@ struct amdgpu_sdma_instance {
bool burst_nop;
};
+struct amdgpu_sdma_ras_funcs {
+ int (*ras_late_init)(struct amdgpu_device *adev,
+ void *ras_ih_info);
+ void (*ras_fini)(struct amdgpu_device *adev);
+ int (*query_ras_error_count)(struct amdgpu_device *adev,
+ uint32_t instance, void *ras_error_status);
+};
+
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+ struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
+ uint32_t num_sdma_sched;
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq;
@@ -59,6 +69,7 @@ struct amdgpu_sdma {
uint32_t srbm_soft_reset;
bool has_page_queue;
struct ras_common_if *ras_if;
+ const struct amdgpu_sdma_ras_funcs *funcs;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 95e5e93edd18..a09b6b9c27d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -129,7 +129,8 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
+ bool explicit)
{
struct amdgpu_sync_entry *e;
@@ -151,19 +152,18 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
* amdgpu_sync_fence - remember to sync to this fence
*
* @sync: sync object to add fence to
- * @fence: fence to sync to
+ * @f: fence to sync to
+ * @explicit: if this is an explicit dependency
*
+ * Add the fence to the sync object.
*/
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f, bool explicit)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ bool explicit)
{
struct amdgpu_sync_entry *e;
if (!f)
return 0;
- if (amdgpu_sync_same_dev(adev, f) &&
- amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
- amdgpu_sync_keep_later(&sync->last_vm_update, f);
if (amdgpu_sync_add_later(sync, f, explicit))
return 0;
@@ -180,6 +180,24 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
}
/**
+ * amdgpu_sync_vm_fence - remember to sync to this VM fence
+ *
+ * @adev: amdgpu device
+ * @sync: sync object to add fence to
+ * @fence: the VM fence to add
+ *
+ * Add the fence to the sync object and remember it as VM update.
+ */
+int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
+{
+ if (!fence)
+ return 0;
+
+ amdgpu_sync_keep_later(&sync->last_vm_update, fence);
+ return amdgpu_sync_fence(sync, fence, false);
+}
+
+/**
* amdgpu_sync_resv - sync to a reservation object
*
* @sync: sync object to add fences from reservation object to
@@ -204,7 +222,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
/* always sync to the exclusive fence */
f = dma_resv_get_excl(resv);
- r = amdgpu_sync_fence(adev, sync, f, false);
+ r = amdgpu_sync_fence(sync, f, false);
flist = dma_resv_get_list(resv);
if (!flist || r)
@@ -222,13 +240,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
continue;
if (amdgpu_sync_same_dev(adev, f)) {
- /* VM updates are only interesting
- * for other VM updates and moves.
+ /* VM updates only sync with moves but not with user
+ * command submissions or KFD evictions fences
*/
- if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
- (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
- ((owner == AMDGPU_FENCE_OWNER_VM) !=
- (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+ if (owner == AMDGPU_FENCE_OWNER_VM &&
+ fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
/* Ignore fence from the same owner and explicit one as
@@ -239,7 +255,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
continue;
}
- r = amdgpu_sync_fence(adev, sync, f, false);
+ r = amdgpu_sync_fence(sync, f, false);
if (r)
break;
}
@@ -340,7 +356,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
hash_for_each_safe(source->fences, i, tmp, e, node) {
f = e->fence;
if (!dma_fence_is_signaled(f)) {
- r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
+ r = amdgpu_sync_fence(clone, f, e->explicit);
if (r)
return r;
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index b5f1778a2319..d62c2b81d92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -40,8 +40,9 @@ struct amdgpu_sync {
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f, bool explicit);
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ bool explicit);
+int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct dma_resv *resv,
@@ -49,7 +50,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
+ bool *explicit);
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2616e2eafdeb..dee446278417 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -41,6 +41,7 @@
#include <linux/swap.h>
#include <linux/swiotlb.h>
#include <linux/dma-buf.h>
+#include <linux/sizes.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -1522,11 +1523,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
struct dma_fence *f;
int i;
- /* Don't evict VM page tables while they are busy, otherwise we can't
- * cleanly handle page faults.
- */
if (bo->type == ttm_bo_type_kernel &&
- !dma_resv_test_signaled_rcu(bo->base.resv, true))
+ !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
return false;
/* If bo is a KFD BO, check if the bo belongs to the current process.
@@ -1717,12 +1715,17 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
ctx->c2p_bo = NULL;
- amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
- ctx->p2c_bo = NULL;
-
return 0;
}
+static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size)
+{
+ if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) )
+ vram_size -= SZ_1M;
+
+ return ALIGN(vram_size, SZ_1M);
+}
+
/**
* amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
*
@@ -1741,7 +1744,7 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
return 0;
}
- ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
+ ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size);
ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
@@ -1751,17 +1754,6 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
ctx->c2p_train_data_offset);
ret = amdgpu_bo_create_kernel_at(adev,
- ctx->p2c_train_data_offset,
- ctx->train_data_size,
- AMDGPU_GEM_DOMAIN_VRAM,
- &ctx->p2c_bo,
- NULL);
- if (ret) {
- DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
- goto Err_out;
- }
-
- ret = amdgpu_bo_create_kernel_at(adev,
ctx->c2p_train_data_offset,
ctx->train_data_size,
AMDGPU_GEM_DOMAIN_VRAM,
@@ -1769,15 +1761,12 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
NULL);
if (ret) {
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
- goto Err_out;
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ return ret;
}
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
return 0;
-
-Err_out:
- amdgpu_ttm_training_reserve_vram_fini(adev);
- return ret;
}
/**
@@ -1990,11 +1979,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
if (enable) {
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->mman.entity,
+ DRM_SCHED_PRIORITY_KERNEL, &sched,
+ 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 833fc4b68940..9ef312428231 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -447,6 +447,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
const struct common_firmware_header *header = NULL;
const struct gfx_firmware_header_v1_0 *cp_hdr = NULL;
const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
+ const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
if (NULL == ucode->fw)
return 0;
@@ -460,6 +461,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
header = (const struct common_firmware_header *)ucode->fw->data;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data;
+ dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
(ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
@@ -470,7 +472,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) {
+ ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
@@ -506,6 +509,12 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
le32_to_cpu(header->ucode_array_offset_bytes) +
le32_to_cpu(dmcu_hdr->intv_offset_bytes)),
ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) {
+ ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
+ memcpy(ucode->kaddr,
+ (void *)((uint8_t *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes)),
+ ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 914acecda5cf..b0e656409c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -251,6 +251,13 @@ struct dmcu_firmware_header_v1_0 {
uint32_t intv_size_bytes; /* size of interrupt vectors, in bytes */
};
+/* version_major=1, version_minor=0 */
+struct dmcub_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t inst_const_bytes; /* size of instruction region, in bytes */
+ uint32_t bss_data_bytes; /* size of bss/data region, in bytes */
+};
+
/* header is fixed size */
union amdgpu_firmware_header {
struct common_firmware_header common;
@@ -268,6 +275,7 @@ union amdgpu_firmware_header {
struct sdma_firmware_header_v1_1 sdma_v1_1;
struct gpu_info_firmware_header_v1_0 gpu_info;
struct dmcu_firmware_header_v1_0 dmcu;
+ struct dmcub_firmware_header_v1_0 dmcub;
uint8_t raw[0x100];
};
@@ -307,6 +315,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_DMCU_INTV,
AMDGPU_UCODE_ID_VCN0_RAM,
AMDGPU_UCODE_ID_VCN1_RAM,
+ AMDGPU_UCODE_ID_DMCUB,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index d4fb9cf27e21..f4d40855147b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -95,13 +95,6 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- /* When “Full RAS” is enabled, the per-IP interrupt sources should
- * be disabled and the driver should only look for the aggregated
- * interrupt via sync flood
- */
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
- return AMDGPU_RAS_SUCCESS;
-
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
if (adev->umc.funcs &&
adev->umc.funcs->query_ras_error_count)
@@ -113,6 +106,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
+
/* still call query_ras_error_address to clear error status
* even NOMEM error is encountered
*/
@@ -132,7 +126,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
err_data->err_addr_cnt))
DRM_WARN("Failed to add ras bad page!\n");
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
}
kfree(err_data->err_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 3283032a78e5..a615a1eb750b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -21,38 +21,6 @@
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
-/* implement 64 bits REG operations via 32 bits interface */
-#define RREG64_UMC(reg) (RREG32(reg) | \
- ((uint64_t)RREG32((reg) + 1) << 32))
-#define WREG64_UMC(reg, v) \
- do { \
- WREG32((reg), lower_32_bits(v)); \
- WREG32((reg) + 1, upper_32_bits(v)); \
- } while (0)
-
-/*
- * void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data,
- * uint32_t umc_reg_offset, uint32_t channel_index)
- */
-#define amdgpu_umc_for_each_channel(func) \
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; \
- uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index; \
- for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) { \
- /* enable the index mode to query eror count per channel */ \
- adev->umc.funcs->enable_umc_index_mode(adev, umc_inst); \
- for (channel_inst = 0; \
- channel_inst < adev->umc.channel_inst_num; \
- channel_inst++) { \
- /* calc the register offset according to channel instance */ \
- umc_reg_offset = adev->umc.channel_offs * channel_inst; \
- /* get channel index of interleaved memory */ \
- channel_index = adev->umc.channel_idx_tbl[ \
- umc_inst * adev->umc.channel_inst_num + channel_inst]; \
- (func)(adev, err_data, umc_reg_offset, channel_index); \
- } \
- } \
- adev->umc.funcs->disable_umc_index_mode(adev);
-
struct amdgpu_umc_funcs {
void (*err_cnt_init)(struct amdgpu_device *adev);
int (*ras_late_init)(struct amdgpu_device *adev);
@@ -60,9 +28,6 @@ struct amdgpu_umc_funcs {
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
- void (*enable_umc_index_mode)(struct amdgpu_device *adev,
- uint32_t umc_instance);
- void (*disable_umc_index_mode)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe6c58f..a92f3b18e657 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int r;
ring = &adev->uvd.inst[0].ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
@@ -349,6 +350,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
unsigned size;
void *ptr;
int i, j;
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -376,13 +378,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
return -ENOMEM;
/* re-write 0 since err_event_athub will corrupt VCPU buffer */
- if (amdgpu_ras_intr_triggered()) {
- DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+ if (in_ras_intr)
memset(adev->uvd.inst[j].saved_bo, 0, size);
- } else {
+ else
memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
- }
}
+
+ if (in_ras_intr)
+ DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..ceb0dbf685f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int r;
ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 9d870444d7d6..f96464e2c157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -28,19 +28,10 @@
#include <linux/module.h>
#include <linux/pci.h>
-#include <drm/drm.h>
-
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_vcn.h"
#include "soc15d.h"
-#include "soc15_common.h"
-
-#include "vcn/vcn_1_0_offset.h"
-#include "vcn/vcn_1_0_sh_mask.h"
-
-/* 1 second timeout */
-#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
@@ -84,6 +75,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
break;
case CHIP_ARCTURUS:
fw_name = FIRMWARE_ARCTURUS;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
break;
case CHIP_RENOIR:
fw_name = FIRMWARE_RENOIR;
@@ -174,15 +168,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
}
- }
- if (adev->vcn.indirect_sram) {
- r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
- &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
- return r;
+ if (adev->vcn.indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
+ return r;
+ }
}
}
@@ -195,15 +189,14 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (adev->vcn.indirect_sram) {
- amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
- &adev->vcn.dpg_sram_gpu_addr,
- (void **)&adev->vcn.dpg_sram_cpu_addr);
- }
-
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+ if (adev->vcn.indirect_sram) {
+ amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
+ &adev->vcn.inst[j].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+ }
kvfree(adev->vcn.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
@@ -214,8 +207,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
-
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg);
}
release_firmware(adev->vcn.fw);
@@ -296,6 +287,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
}
@@ -308,26 +300,17 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg))
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- else
- new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
- adev->vcn.pause_dpg_mode(adev, &new_state);
+ adev->vcn.pause_dpg_mode(adev, j, &new_state);
}
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg);
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
fences += fence[j];
}
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
- if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
- else
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_GATE);
} else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
@@ -340,11 +323,8 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
- if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
- else
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -360,17 +340,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg))
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- else
- new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- adev->vcn.pause_dpg_mode(adev, &new_state);
+ adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
}
}
@@ -520,9 +493,14 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
long r;
+ /* temporarily disable ib test for sriov */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -678,10 +656,15 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
long r;
+ /* temporarily disable ib test for sriov */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
@@ -708,108 +691,3 @@ error:
amdgpu_bo_unref(&bo);
return r;
}
-
-int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t tmp = 0;
- unsigned i;
- int r;
-
- WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
- r = amdgpu_ring_alloc(ring, 3);
- if (r)
- return r;
-
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
- amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_commit(ring);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
- if (tmp == 0xDEADBEEF)
- break;
- udelay(1);
- }
-
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
-
- return r;
-}
-
-static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_job *job;
- struct amdgpu_ib *ib;
- struct dma_fence *f = NULL;
- const unsigned ib_size_dw = 16;
- int i, r;
-
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
- if (r)
- return r;
-
- ib = &job->ibs[0];
-
- ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
- ib->ptr[1] = 0xDEADBEEF;
- for (i = 2; i < 16; i += 2) {
- ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
- ib->ptr[i+1] = 0;
- }
- ib->length_dw = 16;
-
- r = amdgpu_job_submit_direct(job, ring, &f);
- if (r)
- goto err;
-
- if (fence)
- *fence = dma_fence_get(f);
- dma_fence_put(f);
-
- return 0;
-
-err:
- amdgpu_job_free(job);
- return r;
-}
-
-int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t tmp = 0;
- unsigned i;
- struct dma_fence *fence = NULL;
- long r = 0;
-
- r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
- if (r)
- goto error;
-
- r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- r = -ETIMEDOUT;
- goto error;
- } else if (r < 0) {
- goto error;
- } else {
- r = 0;
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
- if (tmp == 0xDEADBEEF)
- break;
- udelay(1);
- }
-
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
-
- dma_fence_put(fence);
-error:
- return r;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index dface275c81a..c4984c5fb2db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -31,6 +31,7 @@
#define AMDGPU_VCN_MAX_ENC_RINGS 3
#define AMDGPU_MAX_VCN_INSTANCES 2
+#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
#define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
@@ -56,6 +57,14 @@
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
+#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
+#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
+#define mmUVD_REG_XX_MASK 0x026c
+#define mmUVD_REG_XX_MASK_BASE_IDX 1
+
+/* 1 second timeout */
+#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
@@ -100,27 +109,27 @@
internal_reg_offset >>= 2; \
})
-#define RREG32_SOC15_DPG_MODE_2_0(offset, mask_en) \
- ({ \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \
- (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
- mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
- offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
- RREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA); \
+#define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \
+ ({ \
+ WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL, \
+ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \
})
-#define WREG32_SOC15_DPG_MODE_2_0(offset, value, mask_en, indirect) \
- do { \
- if (!indirect) { \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA, value); \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \
- (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
- mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
- offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
- } else { \
- *adev->vcn.dpg_sram_curr_addr++ = offset; \
- *adev->vcn.dpg_sram_curr_addr++ = value; \
- } \
+#define WREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, value, mask_en, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ } else { \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \
+ } \
} while (0)
enum engine_status_constants {
@@ -158,7 +167,6 @@ struct amdgpu_vcn_reg{
unsigned ib_size;
unsigned gp_scratch8;
unsigned scratch9;
- unsigned jpeg_pitch;
};
struct amdgpu_vcn_inst {
@@ -168,9 +176,12 @@ struct amdgpu_vcn_inst {
void *saved_bo;
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
- struct amdgpu_ring ring_jpeg;
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
+ struct amdgpu_bo *dpg_sram_bo;
+ void *dpg_sram_cpu_addr;
+ uint64_t dpg_sram_gpu_addr;
+ uint32_t *dpg_sram_curr_addr;
};
struct amdgpu_vcn {
@@ -182,18 +193,18 @@ struct amdgpu_vcn {
struct dpg_pause_state pause_state;
bool indirect_sram;
- struct amdgpu_bo *dpg_sram_bo;
- void *dpg_sram_cpu_addr;
- uint64_t dpg_sram_gpu_addr;
- uint32_t *dpg_sram_curr_addr;
uint8_t num_vcn_inst;
- struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- struct amdgpu_vcn_reg internal;
+ struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
+ struct amdgpu_vcn_reg internal;
+ struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
+ struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
+ uint32_t num_vcn_enc_sched;
+ uint32_t num_vcn_dec_sched;
unsigned harvest_config;
int (*pause_dpg_mode)(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
+ int inst_idx, struct dpg_pause_state *new_state);
};
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
@@ -209,7 +220,4 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
-int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
-int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e32ae906d797..103033f96f13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -379,54 +379,3 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
}
-
-static uint32_t parse_clk(char *buf, bool min)
-{
- char *ptr = buf;
- uint32_t clk = 0;
-
- do {
- ptr = strchr(ptr, ':');
- if (!ptr)
- break;
- ptr+=2;
- if (kstrtou32(ptr, 10, &clk))
- return 0;
- } while (!min);
-
- return clk * 100;
-}
-
-uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
-{
- char *buf = NULL;
- uint32_t clk = 0;
-
- buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
- clk = parse_clk(buf, lowest);
-
- kfree(buf);
-
- return clk;
-}
-
-uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
-{
- char *buf = NULL;
- uint32_t clk = 0;
-
- buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
- clk = parse_clk(buf, lowest);
-
- kfree(buf);
-
- return clk;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b0b2bdc750df..4d1ac7612967 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,8 +57,6 @@ struct amdgpu_virt_ops {
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
- int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
- int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
/*
@@ -85,8 +83,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
- /* HW PERF SIM in GIM */
- AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
+ /* PP ONE VF MODE in GIM */
+ AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
};
struct amd_sriov_msg_pf2vf_info_header {
@@ -257,8 +255,6 @@ struct amdgpu_virt {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
- /* protect DPM events to GIM */
- struct mutex dpm_mutex;
uint32_t reg_access_mode;
};
@@ -286,8 +282,8 @@ static inline bool is_virtual_machine(void)
#endif
}
-#define amdgim_is_hwperf(adev) \
- ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+#define amdgpu_sriov_is_pp_one_vf(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
@@ -306,6 +302,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
-uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 598c24505c73..d16231d6a790 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -83,6 +83,32 @@ struct amdgpu_prt_cb {
};
/**
+ * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
+{
+ mutex_lock(&vm->eviction_lock);
+ vm->saved_flags = memalloc_nofs_save();
+}
+
+static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+{
+ if (mutex_trylock(&vm->eviction_lock)) {
+ vm->saved_flags = memalloc_nofs_save();
+ return 1;
+ }
+ return 0;
+}
+
+static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
+{
+ memalloc_nofs_restore(vm->saved_flags);
+ mutex_unlock(&vm->eviction_lock);
+}
+
+/**
* amdgpu_vm_level_shift - return the addr shift for each level
*
* @adev: amdgpu_device pointer
@@ -562,8 +588,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- /* One for the VM updates, one for TTM and one for the CS job */
- entry->tv.num_shared = 3;
+ /* One for TTM and one for the CS job */
+ entry->tv.num_shared = 2;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -656,7 +682,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
struct amdgpu_vm_bo_base *bo_base, *tmp;
- int r = 0;
+ int r;
vm->bulk_moveable &= list_empty(&vm->evicted);
@@ -665,7 +691,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = validate(param, bo);
if (r)
- break;
+ return r;
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
@@ -678,7 +704,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
}
- return r;
+ amdgpu_vm_eviction_lock(vm);
+ vm->evicting = false;
+ amdgpu_vm_eviction_unlock(vm);
+
+ return 0;
}
/**
@@ -1555,15 +1585,25 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_KFD;
+ amdgpu_vm_eviction_lock(vm);
+ if (vm->evicting) {
+ r = -EBUSY;
+ goto error_unlock;
+ }
+
r = vm->update_funcs->prepare(&params, owner, exclusive);
if (r)
- return r;
+ goto error_unlock;
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
- return r;
+ goto error_unlock;
+
+ r = vm->update_funcs->commit(&params, fence);
- return vm->update_funcs->commit(&params, fence);
+error_unlock:
+ amdgpu_vm_eviction_unlock(vm);
+ return r;
}
/**
@@ -2500,6 +2540,41 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_evictable - check if we can evict a VM
+ *
+ * @bo: A page table of the VM.
+ *
+ * Check if it is possible to evict a VM.
+ */
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
+{
+ struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
+
+ /* Page tables of a destroyed VM can go away immediately */
+ if (!bo_base || !bo_base->vm)
+ return true;
+
+ /* Don't evict VM page tables while they are busy */
+ if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+ return false;
+
+ /* Try to block ongoing updates */
+ if (!amdgpu_vm_eviction_trylock(bo_base->vm))
+ return false;
+
+ /* Don't evict VM page tables while they are updated */
+ if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
+ !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+ amdgpu_vm_eviction_unlock(bo_base->vm);
+ return false;
+ }
+
+ bo_base->vm->evicting = true;
+ amdgpu_vm_eviction_unlock(bo_base->vm);
+ return true;
+}
+
+/**
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
* @adev: amdgpu_device pointer
@@ -2661,8 +2736,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
- true, true, timeout);
+ timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+ true, true, timeout);
+ if (timeout <= 0)
+ return timeout;
+
+ timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
+ if (timeout <= 0)
+ return timeout;
+
+ return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
}
/**
@@ -2696,18 +2779,22 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
+
/* create scheduler entities for page table updates */
- r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
- adev->vm_manager.vm_pte_num_rqs, NULL);
+ r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
- r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
- adev->vm_manager.vm_pte_num_rqs, NULL);
+ r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
vm->pte_support_ats = false;
+ vm->is_compute_context = false;
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2730,6 +2817,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
+ vm->last_direct = dma_fence_get_stub();
+ vm->last_delayed = dma_fence_get_stub();
+
+ mutex_init(&vm->eviction_lock);
+ vm->evicting = false;
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
@@ -2780,6 +2872,8 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_delayed:
+ dma_fence_put(vm->last_direct);
+ dma_fence_put(vm->last_delayed);
drm_sched_entity_destroy(&vm->delayed);
error_free_direct:
@@ -2893,6 +2987,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
dma_fence_put(vm->last_update);
vm->last_update = NULL;
+ vm->is_compute_context = true;
if (vm->pasid) {
unsigned long flags;
@@ -2947,6 +3042,7 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
vm->pasid = 0;
+ vm->is_compute_context = false;
}
/**
@@ -2978,6 +3074,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}
+ dma_fence_wait(vm->last_direct, false);
+ dma_fence_put(vm->last_direct);
+ dma_fence_wait(vm->last_delayed, false);
+ dma_fence_put(vm->last_delayed);
+
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
@@ -3194,11 +3295,20 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
AMDGPU_PTE_SYSTEM;
- if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
+ if (vm->is_compute_context) {
+ /* Intentionally setting invalid PTE flag
+ * combination to force a no-retry-fault
+ */
+ flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
+ AMDGPU_PTE_TF;
+ value = 0;
+
+ } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
/* Redirect the access to the dummy page */
value = adev->dummy_page_addr;
flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
AMDGPU_PTE_WRITEABLE;
+
} else {
/* Let the hw retry silently on the PTE */
value = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 4dbbe1b6b413..b4640ab38c95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -30,6 +30,7 @@
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
@@ -239,6 +240,13 @@ struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
+ /* Lock to prevent eviction while we are updating page tables
+ * use vm_eviction_lock/unlock(vm)
+ */
+ struct mutex eviction_lock;
+ bool evicting;
+ unsigned int saved_flags;
+
/* BOs who needs a validation */
struct list_head evicted;
@@ -266,6 +274,10 @@ struct amdgpu_vm {
struct drm_sched_entity direct;
struct drm_sched_entity delayed;
+ /* Last submission to the scheduler entities */
+ struct dma_fence *last_direct;
+ struct dma_fence *last_delayed;
+
unsigned int pasid;
/* dedicated to vm */
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
@@ -298,6 +310,8 @@ struct amdgpu_vm {
struct ttm_lru_bulk_move lru_bulk_move;
/* mark whether can do the bulk move */
bool bulk_moveable;
+ /* Flag to indicate if VM is used for compute */
+ bool is_compute_context;
};
struct amdgpu_vm_manager {
@@ -317,8 +331,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rqs;
+ struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_scheds;
struct amdgpu_ring *page_fault;
/* partial resident texture handling */
@@ -376,6 +390,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 832db59f441e..19b7f80758f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -71,7 +71,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
p->num_dw_left = ndw;
/* Wait for moves to be completed */
- r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
+ r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
if (r)
return r;
@@ -95,11 +95,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
- struct amdgpu_bo *root = p->vm->root.base.bo;
struct amdgpu_ib *ib = p->job->ibs;
struct drm_sched_entity *entity;
+ struct dma_fence *f, *tmp;
struct amdgpu_ring *ring;
- struct dma_fence *f;
int r;
entity = p->direct ? &p->vm->direct : &p->vm->delayed;
@@ -112,7 +111,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;
- amdgpu_bo_fence(root, f, true);
+ tmp = dma_fence_get(f);
+ if (p->direct)
+ swap(p->vm->last_direct, tmp);
+ else
+ swap(p->vm->last_delayed, tmp);
+ dma_fence_put(tmp);
+
if (fence && !p->direct)
swap(*fence, f);
dma_fence_put(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 61d13d8b7b20..a97af422575a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -146,16 +146,16 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
- fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in);
+ fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
if (fica_out != 0x1f)
pr_err("xGMI error counters not enabled!\n");
- fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in);
+ fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
if ((fica_out & 0xffff) == 2)
error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
- adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
+ adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
}
@@ -261,6 +261,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
INIT_LIST_HEAD(&tmp->device_list);
mutex_init(&tmp->hive_lock);
mutex_init(&tmp->reset_lock);
+ task_barrier_init(&tmp->tb);
if (lock)
mutex_lock(&tmp->hive_lock);
@@ -290,13 +291,7 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
- if (is_support_sw_smu_xgmi(adev))
- ret = smu_set_xgmi_pstate(&adev->smu, pstate);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_xgmi_pstate)
- ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
- pstate);
-
+ ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
if (ret) {
dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
@@ -408,6 +403,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
top_info->num_nodes = count;
hive->number_devices = count;
+ task_barrier_add_task(&hive->tb);
+
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
/* update node list for other device in the hive */
@@ -470,6 +467,7 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
} else {
+ task_barrier_rem_task(&hive->tb);
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index bbf504ff7051..74011fbc2251 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -22,6 +22,7 @@
#ifndef __AMDGPU_XGMI_H__
#define __AMDGPU_XGMI_H__
+#include <drm/task_barrier.h>
#include "amdgpu_psp.h"
struct amdgpu_hive_info {
@@ -33,6 +34,7 @@ struct amdgpu_hive_info {
struct device_attribute dev_attr;
struct amdgpu_device *adev;
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+ struct task_barrier tb;
};
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 6858cde9fc5d..ea702a64f807 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -361,7 +361,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- struct amdgpu_connector_atom_dig *dig_connector;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
@@ -369,8 +368,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
if (!amdgpu_connector->con_priv)
return panel_mode;
- dig_connector = amdgpu_connector->con_priv;
-
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
@@ -713,7 +710,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *dig_connector;
struct amdgpu_atombios_dp_link_train_info dp_info;
@@ -721,7 +717,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
if (!amdgpu_encoder->enc_priv)
return;
- dig = amdgpu_encoder->enc_priv;
amdgpu_connector = to_amdgpu_connector(connector);
if (!amdgpu_connector->con_priv)
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index 980c363b1a0a..b4cc7c55fa16 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -76,11 +76,6 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
}
args.lpI2CDataOut = cpu_to_le16(out);
} else {
- if (num > ATOM_MAX_HW_I2C_READ) {
- DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- r = -EINVAL;
- goto done;
- }
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 1befdee9f0f1..006f21ef7ddf 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1310,6 +1310,17 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
return r;
}
+static bool cik_asic_supports_baco(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return amdgpu_dpm_is_baco_supported(adev);
+ default:
+ return false;
+ }
+}
+
static enum amd_reset_method
cik_asic_reset_method(struct amdgpu_device *adev)
{
@@ -1349,7 +1360,7 @@ static int cik_asic_reset(struct amdgpu_device *adev)
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
- r = smu7_asic_baco_reset(adev);
+ r = amdgpu_dpm_baco_reset(adev);
} else {
r = cik_asic_pci_config_reset(adev);
}
@@ -1927,6 +1938,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.get_pcie_usage = &cik_get_pcie_usage,
.need_reset_on_init = &cik_need_reset_on_init,
.get_pcie_replay_count = &cik_get_pcie_replay_count,
+ .supports_baco = &cik_asic_supports_baco,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 9870bf27870e..f91ab4c246b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -31,7 +31,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
int cik_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
-int smu7_asic_baco_reset(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c45304f1047c..580d3f93d670 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
- cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
@@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1372,16 +1372,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index d6221298b477..d6aca1c08068 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -31,6 +31,9 @@ static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
static void df_v1_7_sw_init(struct amdgpu_device *adev)
{
+ adev->df.hash_status.hash_64k = false;
+ adev->df.hash_status.hash_2m = false;
+ adev->df.hash_status.hash_1g = false;
}
static void df_v1_7_sw_fini(struct amdgpu_device *adev)
@@ -66,7 +69,7 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
{
int fb_channel_number;
- fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+ fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
return df_v1_7_channel_number[fb_channel_number];
}
@@ -77,7 +80,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
u32 tmp;
/* Put DF on broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, true);
+ adev->df.funcs->enable_broadcast_mode(adev, true);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
@@ -92,7 +95,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
/* Exit boradcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, false);
+ adev->df.funcs->enable_broadcast_mode(adev, false);
}
static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..f51326598a8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+/* same as perfmon_wreg but return status on write value check */
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t hi_val)
+{
+ unsigned long flags, address, data;
+ uint32_t lo_val_rb, hi_val_rb;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+ WREG32(data, lo_val);
+ WREG32(address, hi_addr);
+ WREG32(data, hi_val);
+
+ WREG32(address, lo_addr);
+ lo_val_rb = RREG32(data);
+ WREG32(address, hi_addr);
+ hi_val_rb = RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+ return -EBUSY;
+
+ return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT 1000
+#define ARM_RETRY_USEC_INTERVAL 100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t hi_val)
+{
+ int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+ while (countdown) {
+
+ if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+ hi_addr, hi_val))
+ break;
+
+ countdown -= ARM_RETRY_USEC_INTERVAL;
+ udelay(ARM_RETRY_USEC_INTERVAL);
+ }
+
+ return countdown > 0 ? 0 : -ETIME;
+}
+
/* get the number of df counters available */
static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
struct device_attribute *attr,
@@ -207,6 +262,32 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
/* device attr for available perfmon counters */
static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
+static void df_v3_6_query_hashes(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ adev->df.hash_status.hash_64k = false;
+ adev->df.hash_status.hash_2m = false;
+ adev->df.hash_status.hash_1g = false;
+
+ if (adev->asic_type != CHIP_ARCTURUS)
+ return;
+
+ /* encoding for hash-enabled on Arcturus */
+ if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) {
+ tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl);
+ adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl64K);
+ adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl2M);
+ adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl1G);
+ }
+}
+
/* init perfmons */
static void df_v3_6_sw_init(struct amdgpu_device *adev)
{
@@ -218,6 +299,8 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++)
adev->df_perfmon_config_assign_mask[i] = 0;
+
+ df_v3_6_query_hashes(adev);
}
static void df_v3_6_sw_fini(struct amdgpu_device *adev)
@@ -256,7 +339,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
{
int fb_channel_number;
- fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+ fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
fb_channel_number = 0;
@@ -270,7 +353,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
/* Put DF on broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, true);
+ adev->df.funcs->enable_broadcast_mode(adev, true);
if (enable) {
tmp = RREG32_SOC15(DF, 0,
@@ -289,7 +372,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
/* Exit broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, false);
+ adev->df.funcs->enable_broadcast_mode(adev, false);
}
}
@@ -334,20 +417,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
switch (target_cntr) {
case 0:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
break;
case 1:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
break;
case 2:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
break;
case 3:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
break;
}
@@ -422,6 +505,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
return -ENOSPC;
}
+#define DEFERRED_ARM_MASK (1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+ uint64_t config, bool is_deferred)
+{
+ int target_cntr;
+
+ target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+ if (target_cntr < 0)
+ return -EINVAL;
+
+ if (is_deferred)
+ adev->df_perfmon_config_assign_mask[target_cntr] |=
+ DEFERRED_ARM_MASK;
+ else
+ adev->df_perfmon_config_assign_mask[target_cntr] &=
+ ~DEFERRED_ARM_MASK;
+
+ return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+ uint64_t config)
+{
+ int target_cntr;
+
+ target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+ /*
+ * we never get target_cntr < 0 since this funciton is only called in
+ * pmc_count for now but we should check anyways.
+ */
+ return (target_cntr >= 0 &&
+ (adev->df_perfmon_config_assign_mask[target_cntr]
+ & DEFERRED_ARM_MASK));
+
+}
+
/* release performance counter */
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
uint64_t config)
@@ -451,29 +572,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
int is_enable)
{
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
- int ret = 0;
+ int err = 0, ret = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
+ if (is_enable)
+ return df_v3_6_pmc_add_cntr(adev, config);
df_v3_6_reset_perfmon_cntr(adev, config);
- if (is_enable) {
- ret = df_v3_6_pmc_add_cntr(adev, config);
- } else {
- ret = df_v3_6_pmc_get_ctrl_settings(adev,
+ ret = df_v3_6_pmc_get_ctrl_settings(adev,
config,
&lo_base_addr,
&hi_base_addr,
&lo_val,
&hi_val);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
- df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
- hi_base_addr, hi_val);
- }
+ err = df_v3_6_perfmon_arm_with_retry(adev,
+ lo_base_addr,
+ lo_val,
+ hi_base_addr,
+ hi_val);
+
+ if (err)
+ ret = df_v3_6_pmc_set_deferred(adev, config, true);
break;
default:
@@ -501,7 +626,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
if (ret)
return ret;
- df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+ df_v3_6_reset_perfmon_cntr(adev, config);
if (is_disable)
df_v3_6_pmc_release_cntr(adev, config);
@@ -518,18 +643,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
uint64_t *count)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
*count = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
-
df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
&hi_base_addr);
if ((lo_base_addr == 0) || (hi_base_addr == 0))
return;
+ /* rearm the counter or throw away count value on failure */
+ if (df_v3_6_pmc_is_deferred(adev, config)) {
+ int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+ lo_base_addr, lo_val,
+ hi_base_addr, hi_val);
+
+ if (rearm_err)
+ return;
+
+ df_v3_6_pmc_set_deferred(adev, config, false);
+ }
+
df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
hi_base_addr, &hi_val);
@@ -542,7 +678,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
config, lo_base_addr, hi_base_addr, lo_val, hi_val);
break;
-
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ba9e53a1abc3..874f641de281 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -40,6 +40,7 @@
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "clearstate_gfx10.h"
#include "v10_structs.h"
@@ -50,9 +51,6 @@
* Navi10 has two graphic rings to share each graphic pipe.
* 1. Primary ring
* 2. Async ring
- *
- * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
- * first.
*/
#define GFX10_NUM_GFX_RINGS 2
#define GFX10_MEC_HPD_SIZE 2048
@@ -123,7 +121,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
@@ -171,7 +169,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070105),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
@@ -348,15 +346,29 @@ static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
}
+static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx10_kiq_set_resources,
.kiq_map_queues = gfx10_kiq_map_queues,
.kiq_unmap_queues = gfx10_kiq_unmap_queues,
.kiq_query_status = gfx10_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
.query_status_size = 7,
+ .invalidate_tlbs_size = 12,
};
static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
@@ -474,18 +486,10 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
else
udelay(1);
}
- if (i < adev->usec_timeout) {
- if (amdgpu_emu_mode == 1)
- DRM_INFO("ring test on %d succeeded in %d msecs\n",
- ring->idx, i);
- else
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -535,14 +539,10 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -591,8 +591,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
}
if (adev->gfx.cp_fw_write_wait == false)
- DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
- GRBM requires 1-cycle delay in cp firmware\n");
+ DRM_WARN_ONCE("CP firmware version too old, please update!");
}
@@ -617,11 +616,29 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
}
+static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
+{
+ bool ret = false;
+
+ switch (adev->pdev->revision) {
+ case 0xc2:
+ case 0xc3:
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret ;
+}
+
static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_NAVI10:
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
default:
break;
@@ -805,10 +822,11 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
info->fw = adev->gfx.rlc_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
+ if (info->fw) {
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
if (adev->gfx.rlc.is_rlc_v2_1 &&
adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
@@ -1948,7 +1966,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
rlc_toc++;
- };
+ }
return 0;
}
@@ -3319,8 +3337,11 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -3591,23 +3612,16 @@ static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
- i, ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- ring->sched.ready = true;
- DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
- i, ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_test_ring(ring);
+ r = amdgpu_ring_test_helper(ring);
if (r)
- ring->sched.ready = false;
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 7f0a63628c43..31f44d05e606 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1576,7 +1576,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev)
static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
{
u32 gb_addr_config = 0;
- u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 mc_arb_ramcfg;
u32 sx_debug_1;
u32 hdp_host_path_cntl;
u32 tmp;
@@ -1678,7 +1678,6 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
- mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d92e92e5d50b..8f20a5dd44fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4258,7 +4258,7 @@ static int gfx_v7_0_late_init(void *handle)
static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
- u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 mc_arb_ramcfg;
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
u32 tmp;
@@ -4335,7 +4335,6 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
break;
}
- mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 52a647d7022d..46f0533ba43f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1677,7 +1677,7 @@ fail:
static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
- u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 mc_arb_ramcfg;
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
u32 tmp;
int ret;
@@ -1817,7 +1817,6 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
break;
}
- mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
@@ -4559,8 +4558,11 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 97105a5bb246..46ab46757b25 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -131,18 +131,6 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
-struct ras_gfx_subblock_reg {
- const char *name;
- uint32_t hwip;
- uint32_t inst;
- uint32_t seg;
- uint32_t reg_offset;
- uint32_t sec_count_mask;
- uint32_t sec_count_shift;
- uint32_t ded_count_mask;
- uint32_t ded_count_shift;
-};
-
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -751,6 +739,134 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
+ uint64_t queue_mask)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_SET_RESOURCES_VMID_MASK(0) |
+ /* vmid_mask:0* queue_type:0 (KIQ) */
+ PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
+ amdgpu_ring_write(kiq_ring,
+ lower_32_bits(queue_mask)); /* queue mask lo */
+ amdgpu_ring_write(kiq_ring,
+ upper_32_bits(queue_mask)); /* queue mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* oac mask */
+ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
+}
+
+static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
+ PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
+ PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
+ /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
+ /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
+ PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
+ /* num_queues: must be 1 */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+}
+
+static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(action) |
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+
+ if (action == PREEMPT_QUEUES_NO_UNMAP) {
+ amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, seq);
+ } else {
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ }
+}
+
+static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ u64 addr,
+ u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
+ PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
+ PACKET3_QUERY_STATUS_COMMAND(2));
+ /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
+ PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
+}
+
+static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
+static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
+ .kiq_set_resources = gfx_v9_0_kiq_set_resources,
+ .kiq_map_queues = gfx_v9_0_kiq_map_queues,
+ .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
+ .kiq_query_status = gfx_v9_0_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
+ .set_resources_size = 8,
+ .map_queues_size = 7,
+ .unmap_queues_size = 6,
+ .query_status_size = 7,
+ .invalidate_tlbs_size = 12,
+};
+
+static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
+}
+
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -991,8 +1107,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46))
- DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
- GRBM requires 1-cycle delay in cp firmware\n");
+ DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->asic_type) {
case CHIP_VEGA10:
@@ -3119,74 +3234,6 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
-static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- uint64_t queue_mask = 0;
- int r, i;
-
- for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
- continue;
-
- /* This situation may be hit in the future if a new HW
- * generation exposes more than 64 queues. If so, the
- * definition of queue_mask needs updating */
- if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
- break;
- }
-
- queue_mask |= (1ull << i);
- }
-
- r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- return r;
- }
-
- /* set resources */
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
- amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
- PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
- amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
- amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* oac mask */
- amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
- uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
- /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
- PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
- PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
- PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
- PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
- PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
- PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
- PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
- PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
- amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
- amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
- amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
- amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
- amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
- }
-
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ enable failed\n");
-
- return r;
-}
-
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3323,8 +3370,11 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -3593,7 +3643,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = gfx_v9_0_kiq_kcq_enable(adev);
+ r = amdgpu_gfx_enable_kcq(adev);
done:
return r;
}
@@ -3650,6 +3700,23 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ if (adev->asic_type != CHIP_ARCTURUS)
+ return;
+
+ tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
+ adev->df.hash_status.hash_64k);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
+ adev->df.hash_status.hash_2m);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
+ adev->df.hash_status.hash_1g);
+ WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
+}
+
static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
if (adev->asic_type != CHIP_ARCTURUS)
@@ -3667,6 +3734,8 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_constants_init(adev);
+ gfx_v9_0_init_tcp_config(adev);
+
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3678,36 +3747,6 @@ static int gfx_v9_0_hw_init(void *handle)
return r;
}
-static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
-{
- int r, i;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
-
- r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
- if (r)
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
-
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
- PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
- PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
- PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
- PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
- amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- }
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ disable failed\n");
-
- return r;
-}
-
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3719,7 +3758,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
/* disable KCQ to avoid CPC touch memory not valid anymore */
- gfx_v9_0_kcq_disable(adev);
+ amdgpu_gfx_disable_kcq(adev);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3936,30 +3975,58 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf810000,
};
+/* When below register arrays changed, please update gpr_reg_size,
+ and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
+ to cover all gfx9 ASICs */
static const struct soc15_reg_entry vgpr_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
+};
+
+static const struct soc15_reg_entry sgpr1_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
};
-static const struct soc15_reg_entry sgpr_init_regs[] = {
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
+static const struct soc15_reg_entry sgpr2_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
};
static const struct soc15_reg_entry sec_ded_counter_registers[] = {
@@ -3996,6 +4063,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
+ { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
};
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
@@ -4054,6 +4122,12 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
unsigned total_size, vgpr_offset, sgpr_offset;
u64 gpu_addr;
+ int compute_dim_x = adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se;
+ int sgpr_work_group_size = 5;
+ int gpr_reg_size = compute_dim_x / 16 + 6;
+
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
@@ -4063,9 +4137,11 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
return 0;
total_size =
- ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
total_size +=
- ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
+ total_size +=
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
@@ -4092,7 +4168,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* VGPR */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
- PACKET3_SET_SH_REG_START;
@@ -4108,7 +4184,35 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 128; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
+ ib.ptr[ib.length_dw++] = 1; /* y */
+ ib.ptr[ib.length_dw++] = 1; /* z */
+ ib.ptr[ib.length_dw++] =
+ REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
+
+ /* write CS partial flush packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+ ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+
+ /* SGPR1 */
+ /* write the register state for the compute dispatch */
+ for (i = 0; i < gpr_reg_size; i++) {
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
+ }
+ /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
+ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
+ ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+
+ /* write dispatch packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4118,13 +4222,13 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
- /* SGPR */
+ /* SGPR2 */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i])
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
- PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value;
+ ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
@@ -4136,7 +4240,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 128; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4189,6 +4293,7 @@ static int gfx_v9_0_early_init(void *handle)
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ gfx_v9_0_set_kiq_pm4_funcs(adev);
gfx_v9_0_set_ring_funcs(adev);
gfx_v9_0_set_irq_funcs(adev);
gfx_v9_0_set_gds_init(adev);
@@ -4202,10 +4307,6 @@ static int gfx_v9_0_ecc_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_gfx_ras_late_init(adev);
- if (r)
- return r;
-
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
return r;
@@ -4215,6 +4316,10 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_gfx_ras_late_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -5440,7 +5545,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
}
-static const struct ras_gfx_subblock_reg ras_subblock_regs[] = {
+static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = {
{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
@@ -6099,29 +6204,29 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg,
uint32_t i;
uint32_t sec_cnt, ded_cnt;
- for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) {
- if(ras_subblock_regs[i].reg_offset != reg->reg_offset ||
- ras_subblock_regs[i].seg != reg->seg ||
- ras_subblock_regs[i].inst != reg->inst)
+ for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) {
+ if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset ||
+ gc_ras_fields_vg20[i].seg != reg->seg ||
+ gc_ras_fields_vg20[i].inst != reg->inst)
continue;
sec_cnt = (value &
- ras_subblock_regs[i].sec_count_mask) >>
- ras_subblock_regs[i].sec_count_shift;
+ gc_ras_fields_vg20[i].sec_count_mask) >>
+ gc_ras_fields_vg20[i].sec_count_shift;
if (sec_cnt) {
DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
- ras_subblock_regs[i].name,
+ gc_ras_fields_vg20[i].name,
se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
}
ded_cnt = (value &
- ras_subblock_regs[i].ded_count_mask) >>
- ras_subblock_regs[i].ded_count_shift;
+ gc_ras_fields_vg20[i].ded_count_mask) >>
+ gc_ras_fields_vg20[i].ded_count_shift;
if (ded_cnt) {
DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
- ras_subblock_regs[i].name,
+ gc_ras_fields_vg20[i].name,
se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index e91bd7945777..1a2f18b908fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -75,40 +75,45 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
-
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
- /*
- * Raven2 has a HW issue that it is unable to use the vram which
- * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
- * workaround that increase system aperture high address (add 1)
- * to get rid of the VM fault and hardware hang.
- */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.fb_end >> 18) + 0x1,
- adev->gmc.agp_end >> 18));
- else
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
-
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
-
- /* Program "protection fault". */
- WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- (u32)(adev->dummy_page_addr >> 12));
- WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- (u32)((u64)adev->dummy_page_addr >> 44));
-
- WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
- ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ /*
+ * Raven2 has a HW issue that it is unable to use the
+ * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+ * So here is the workaround that increase system
+ * aperture high address (add 1) to get rid of the VM
+ * fault and hardware hang.
+ */
+ WREG32_SOC15_RLC(GC, 0,
+ mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max((adev->gmc.fb_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
+ else
+ WREG32_SOC15_RLC(
+ GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ }
}
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
@@ -264,7 +269,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
/*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
* VF copy registers so vbios post doesn't program them, for
@@ -280,10 +285,12 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_init_gart_aperture_regs(adev);
gfxhub_v1_0_init_system_aperture_regs(adev);
gfxhub_v1_0_init_tlb_regs(adev);
- gfxhub_v1_0_init_cache_regs(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_0_init_cache_regs(adev);
gfxhub_v1_0_enable_system_domain(adev);
- gfxhub_v1_0_disable_identity_aperture(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_0_disable_identity_aperture(adev);
gfxhub_v1_0_setup_vmid_config(adev);
gfxhub_v1_0_program_invalidation(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f5725336a5f2..bbede09983e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -30,6 +30,8 @@
#include "hdp/hdp_5_0_0_sh_mask.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
@@ -37,6 +39,7 @@
#include "navi10_enum.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "nbio_v2_3.h"
@@ -234,6 +237,19 @@ static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
(!amdgpu_sriov_vf(adev)));
}
+static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
+ struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -380,6 +396,63 @@ error_alloc:
DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
}
+/**
+ * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid, i;
+ signed long r;
+ uint32_t seq;
+ uint16_t queried_pasid;
+ bool ret;
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+ if (amdgpu_emu_mode == 0 && ring->sched.ready) {
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, flush_type, all_hub);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ if (r < 1) {
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ return -ETIME;
+ }
+
+ return 0;
+ }
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ if (all_hub) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ gmc_v10_0_flush_gpu_tlb(adev, vmid,
+ i, 0);
+ } else {
+ gmc_v10_0_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, 0);
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
@@ -531,6 +604,7 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
.map_mtype = gmc_v10_0_map_mtype,
@@ -564,22 +638,18 @@ static int gmc_v10_0_early_init(void *handle)
static int gmc_v10_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
- unsigned i;
-
- for(i = 0; i < adev->num_rings; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- unsigned vmhub = ring->funcs->vmhub;
+ int r;
- ring->vm_inv_eng = vm_inv_eng[vmhub]++;
- dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
- ring->idx, ring->name, ring->vm_inv_eng,
- ring->funcs->vmhub);
- }
+ /*
+ * Can't free the stolen VGA memory when it might be used for memory
+ * training again.
+ */
+ if (!adev->fw_vram_usage.mem_train_support)
+ amdgpu_bo_late_init(adev);
- /* Engine 17 is used for GART flushes */
- for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ r = amdgpu_gmc_allocate_vm_inv_eng(adev);
+ if (r)
+ return r;
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
@@ -731,6 +801,10 @@ static int gmc_v10_0_sw_init(void *handle)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -743,15 +817,6 @@ static int gmc_v10_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
- /*
- * Reserve 8M stolen memory for navi10 like vega10
- * TODO: will check if it's really needed on asic.
- */
- if (amdgpu_emu_mode == 1)
- adev->gmc.stolen_size = 0;
- else
- adev->gmc.stolen_size = 9 * 1024 *1024;
-
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
@@ -764,6 +829,19 @@ static int gmc_v10_0_sw_init(void *handle)
adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
+ /*
+ * In dual GPUs scenario, stolen_size is assigned to zero on the
+ * secondary GPU, since there is no pre-OS console using that memory.
+ * Then the bottom region of VRAM was allocated as GTT, unfortunately a
+ * small region of bottom VRAM was encroached by UMC firmware during
+ * GDDR6 BIST training, this cause page fault.
+ * The page fault can be fixed by forcing stolen_size to 3MB, then the
+ * bottom region of VRAM was allocated as stolen memory, GTT corruption
+ * avoid.
+ */
+ adev->gmc.stolen_size = max(adev->gmc.stolen_size,
+ AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE);
+
/* Memory manager */
r = amdgpu_bo_init(adev);
if (r)
@@ -803,6 +881,13 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
static int gmc_v10_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ void *stolen_vga_buf;
+
+ /*
+ * Free the stolen memory if it wasn't already freed in late_init
+ * because of memory training.
+ */
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
amdgpu_vm_manager_fini(adev);
gmc_v10_0_gart_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index f08e5330642d..19d5b133e1d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -418,6 +418,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid;
+ unsigned int tmp;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+ (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ break;
+ }
+ }
+
+ return 0;
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -1333,6 +1365,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 6d96d40fbcb8..27d83204fa2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -620,6 +620,39 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid;
+ unsigned int tmp;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+ (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -1700,6 +1733,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index a5b68b5e452f..40a496804356 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -38,10 +38,12 @@
#include "dce/dce_12_0_sh_mask.h"
#include "vega10_enum.h"
#include "mmhub/mmhub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
#include "athub/athub_1_0_offset.h"
#include "oss/osssys_4_0_offset.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "umc/umc_6_0_sh_mask.h"
@@ -207,6 +209,11 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
{
u32 bits, i, tmp, reg;
+ /* Devices newer then VEGA10/12 shall have these programming
+ sequences performed by PSP BL */
+ if (adev->asic_type >= CHIP_VEGA20)
+ return 0;
+
bits = 0x7f;
switch (state) {
@@ -393,8 +400,10 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
- adev->gmc.ecc_irq.num_types = 1;
- adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gmc.ecc_irq.num_types = 1;
+ adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
+ }
}
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
@@ -434,6 +443,18 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
adev->pdev->device == 0x15d8)));
}
+static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -532,6 +553,67 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
}
+/**
+ * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid, i;
+ signed long r;
+ uint32_t seq;
+ uint16_t queried_pasid;
+ bool ret;
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ if (ring->sched.ready) {
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, flush_type, all_hub);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ if (r < 1) {
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ return -ETIME;
+ }
+
+ return 0;
+ }
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ if (all_hub) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ gmc_v9_0_flush_gpu_tlb(adev, vmid,
+ i, 0);
+ } else {
+ gmc_v9_0_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, 0);
+ }
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
@@ -693,6 +775,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.map_mtype = gmc_v9_0_map_mtype,
@@ -715,7 +798,15 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
- adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
+ adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
+ adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
+ adev->umc.funcs = &umc_v6_1_funcs;
+ break;
+ case CHIP_ARCTURUS:
+ adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
+ adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
+ adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.funcs = &umc_v6_1_funcs;
break;
@@ -730,6 +821,9 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
case CHIP_VEGA20:
adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
+ case CHIP_ARCTURUS:
+ adev->mmhub.funcs = &mmhub_v9_4_funcs;
+ break;
default:
break;
}
@@ -779,36 +873,6 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
}
}
-static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring;
- unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
- {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
- GFXHUB_FREE_VM_INV_ENGS_BITMAP};
- unsigned i;
- unsigned vmhub, inv_eng;
-
- for (i = 0; i < adev->num_rings; ++i) {
- ring = adev->rings[i];
- vmhub = ring->funcs->vmhub;
-
- inv_eng = ffs(vm_inv_engs[vmhub]);
- if (!inv_eng) {
- dev_err(adev->dev, "no VM inv eng for ring %s\n",
- ring->name);
- return -EINVAL;
- }
-
- ring->vm_inv_eng = inv_eng - 1;
- vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
-
- dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
- ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
- }
-
- return 0;
-}
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -817,7 +881,7 @@ static int gmc_v9_0_late_init(void *handle)
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
- r = gmc_v9_0_allocate_vm_inv_eng(adev);
+ r = amdgpu_gmc_allocate_vm_inv_eng(adev);
if (r)
return r;
/* Check if ecc is available */
@@ -825,11 +889,12 @@ static int gmc_v9_0_late_init(void *handle)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
r = amdgpu_atomfirmware_mem_ecc_supported(adev);
if (!r) {
DRM_INFO("ECC is not present.\n");
- if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
- adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
} else {
DRM_INFO("ECC is active.\n");
}
@@ -1034,7 +1099,7 @@ static int gmc_v9_0_sw_init(void *handle)
else
chansize = 128;
- numchan = adev->df_funcs->get_hbm_channel_number(adev);
+ numchan = adev->df.funcs->get_hbm_channel_number(adev);
adev->gmc.vram_width = numchan * chansize;
}
@@ -1100,11 +1165,13 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- /* interrupt sent to DF. */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
- &adev->gmc.ecc_irq);
- if (r)
- return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ /* interrupt sent to DF. */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+ &adev->gmc.ecc_irq);
+ if (r)
+ return r;
+ }
/* Set the internal MC address mask
* This is the max address of the GPU's
@@ -1290,12 +1357,13 @@ static int gmc_v9_0_hw_init(void *handle)
else
value = true;
- gfxhub_v1_0_set_fault_enable_default(adev, value);
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_set_fault_enable_default(adev, value);
- else
- mmhub_v1_0_set_fault_enable_default(adev, value);
-
+ if (!amdgpu_sriov_vf(adev)) {
+ gfxhub_v1_0_set_fault_enable_default(adev, value);
+ if (adev->asic_type == CHIP_ARCTURUS)
+ mmhub_v9_4_set_fault_enable_default(adev, value);
+ else
+ mmhub_v1_0_set_fault_enable_default(adev, value);
+ }
for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index 971c0840358f..e0585e8c6c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -24,24 +24,6 @@
#ifndef __GMC_V9_0_H__
#define __GMC_V9_0_H__
- /*
- * The latest engine allocation on gfx9 is:
- * Engine 2, 3: firmware
- * Engine 0, 1, 4~16: amdgpu ring,
- * subject to change when ring number changes
- * Engine 17: Gart flushes
- */
-#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
-#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
-
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
-
-/* amdgpu_amdkfd*.c */
-void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t value);
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t value);
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid,
- uint32_t vmid, uint64_t value);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
new file mode 100644
index 000000000000..0debfd9f428c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "vcn_v1_0.h"
+
+#include "vcn/vcn_1_0_offset.h"
+#include "vcn/vcn_1_0_sh_mask.h"
+
+static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[(*ptr)++] = 0;
+ ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
+ } else {
+ ring->ring[(*ptr)++] = reg_offset;
+ ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
+ }
+ ring->ring[(*ptr)++] = val;
+}
+
+static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ uint32_t reg, reg_offset, val, mask, i;
+
+ // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
+ reg_offset = (reg << 2);
+ val = lower_32_bits(ring->gpu_addr);
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
+ reg_offset = (reg << 2);
+ val = upper_32_bits(ring->gpu_addr);
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 3rd to 5th: issue MEM_READ commands
+ for (i = 0; i <= 2; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
+ ring->ring[ptr++] = 0;
+ }
+
+ // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x13;
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 7th: program mmUVD_JRBC_RB_REF_DATA
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ mask = 0x1;
+
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = 0x01400200;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = val;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[ptr++] = 0;
+ ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
+ } else {
+ ring->ring[ptr++] = reg_offset;
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
+ }
+ ring->ring[ptr++] = mask;
+
+ //9th to 21st: insert no-op
+ for (i = 0; i <= 12; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ring->ring[ptr++] = 0;
+ }
+
+ //22nd: reset mmUVD_JRBC_RB_RPTR
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+ reg_offset = (reg << 2);
+ val = 0;
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
+ reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x12;
+ jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v1_0_decode_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v1_0_decode_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v1_0_decode_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+}
+
+/**
+ * jpeg_v1_0_decode_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_insert_start(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_insert_end(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void jpeg_v1_0_decode_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0xffffffff);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x3fbc);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
+
+ /* emit trap */
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * jpeg_v1_0_decode_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, ib->length_dw);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+ amdgpu_ring_write(ring, 0x2);
+}
+
+static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val,
+ uint32_t mask)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, val);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE3));
+ }
+ amdgpu_ring_write(ring, mask);
+}
+
+static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ uint32_t data0, data1, mask;
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ jpeg_v1_0_decode_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void jpeg_v1_0_decode_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ }
+ amdgpu_ring_write(ring, val);
+}
+
+static void jpeg_v1_0_decode_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ WARN_ON(ring->wptr % 2 || count % 2);
+
+ for (i = 0; i < count / 2; i++) {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static int jpeg_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("IH: JPEG decode TRAP\n");
+
+ switch (entry->src_id) {
+ case 126:
+ amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v1_0_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+int jpeg_v1_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->jpeg.num_jpeg_inst = 1;
+
+ jpeg_v1_0_set_dec_ring_funcs(adev);
+ jpeg_v1_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v1_0_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+int jpeg_v1_0_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int r;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+
+ ring = &adev->jpeg.inst->ring_dec;
+ sprintf(ring->name, "jpeg_dec");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch = adev->jpeg.inst->external.jpeg_pitch =
+ SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
+
+ return 0;
+}
+
+/**
+ * jpeg_v1_0_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG free up sw allocation
+ */
+void jpeg_v1_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_ring_fini(&adev->jpeg.inst[0].ring_dec);
+}
+
+/**
+ * jpeg_v1_0_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+void jpeg_v1_0_start(struct amdgpu_device *adev, int mode)
+{
+ struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+
+ if (mode == 0) {
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
+ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+ }
+
+ /* initialize wptr */
+ ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+
+ /* copy patch commands to the jpeg ring */
+ jpeg_v1_0_decode_ring_set_patch_ring(ring,
+ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+}
+
+static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .nop = PACKET0(0x81ff, 0),
+ .support_64bit_ptrs = false,
+ .no_user_fence = true,
+ .vmhub = AMDGPU_MMHUB_0,
+ .extra_dw = 64,
+ .get_rptr = jpeg_v1_0_decode_ring_get_rptr,
+ .get_wptr = jpeg_v1_0_decode_ring_get_wptr,
+ .set_wptr = jpeg_v1_0_decode_ring_set_wptr,
+ .emit_frame_size =
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v1_0_decode_ring_emit_vm_flush */
+ 26 + 26 + /* jpeg_v1_0_decode_ring_emit_fence x2 vm fence */
+ 6,
+ .emit_ib_size = 22, /* jpeg_v1_0_decode_ring_emit_ib */
+ .emit_ib = jpeg_v1_0_decode_ring_emit_ib,
+ .emit_fence = jpeg_v1_0_decode_ring_emit_fence,
+ .emit_vm_flush = jpeg_v1_0_decode_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v1_0_decode_ring_nop,
+ .insert_start = jpeg_v1_0_decode_ring_insert_start,
+ .insert_end = jpeg_v1_0_decode_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = vcn_v1_0_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->ring_dec.funcs = &jpeg_v1_0_decode_ring_vm_funcs;
+ DRM_INFO("JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v1_0_irq_funcs = {
+ .set = jpeg_v1_0_set_interrupt_state,
+ .process = jpeg_v1_0_process_interrupt,
+};
+
+static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
new file mode 100644
index 000000000000..bbf33a6a3972
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V1_0_H__
+#define __JPEG_V1_0_H__
+
+int jpeg_v1_0_early_init(void *handle);
+int jpeg_v1_0_sw_init(void *handle);
+void jpeg_v1_0_sw_fini(void *handle);
+void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
+
+#endif /*__JPEG_V1_0_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
new file mode 100644
index 000000000000..a78292d84854
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+
+#include "vcn/vcn_2_0_0_offset.h"
+#include "vcn/vcn_2_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+
+#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
+#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
+#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
+#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
+#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
+#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
+#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
+#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
+#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
+#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+
+#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+
+static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v2_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+
+/**
+ * jpeg_v2_0_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v2_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->jpeg.num_jpeg_inst = 1;
+
+ jpeg_v2_0_set_dec_ring_funcs(adev);
+ jpeg_v2_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v2_0_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int r;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ ring = &adev->jpeg.inst->ring_dec;
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
+ sprintf(ring->name, "jpeg_dec");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v2_0_sw_fini(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v2_0_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v2_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ int r;
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (!r)
+ DRM_INFO("JPEG decode initialized successfully.\n");
+
+ return r;
+}
+
+/**
+ * jpeg_v2_0_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v2_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
+ jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ ring->sched.ready = false;
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v2_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = jpeg_v2_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v2_0_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v2_0_resume(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v2_0_hw_init(adev);
+
+ return r;
+}
+
+static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ int r = 0;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
+
+ SOC15_WAIT_ON_RREG(JPEG, 0,
+ mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
+
+ if (r) {
+ DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
+ return r;
+ }
+ }
+
+ /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
+ data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data);
+
+ return 0;
+}
+
+static int jpeg_v2_0_enable_power_gating(struct amdgpu_device* adev)
+{
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
+ uint32_t data;
+ int r = 0;
+
+ data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS));
+ data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
+ data |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data);
+
+ data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
+ WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
+
+ SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
+ (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
+
+ if (r) {
+ DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static void jpeg_v2_0_disable_clock_gating(struct amdgpu_device* adev)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
+ data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
+ | JPEG_CGC_GATE__JPEG2_DEC_MASK
+ | JPEG_CGC_GATE__JPEG_ENC_MASK
+ | JPEG_CGC_GATE__JMCIF_MASK
+ | JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
+}
+
+static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device* adev)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
+ data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
+ |JPEG_CGC_GATE__JPEG2_DEC_MASK
+ |JPEG_CGC_GATE__JPEG_ENC_MASK
+ |JPEG_CGC_GATE__JMCIF_MASK
+ |JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
+}
+
+/**
+ * jpeg_v2_0_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v2_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ int r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, true);
+
+ /* disable power gating */
+ r = jpeg_v2_0_disable_power_gating(adev);
+ if (r)
+ return r;
+
+ /* JPEG disable CGC */
+ jpeg_v2_0_disable_clock_gating(adev);
+
+ WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v2_0_stop(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable JPEG CGC */
+ jpeg_v2_0_enable_clock_gating(adev);
+
+ /* enable power gating */
+ r = jpeg_v2_0_enable_power_gating(adev);
+ if (r)
+ return r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_jpeg(adev, false);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_0_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+ else
+ return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+/**
+ * jpeg_v2_0_dec_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
+ 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x3fbc);
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * jpeg_v2_0_dec_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, ib->length_dw);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x2);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
+ 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+ amdgpu_ring_write(ring, 0x2);
+}
+
+void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, val);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE3));
+ }
+ amdgpu_ring_write(ring, mask);
+}
+
+void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ uint32_t data0, data1, mask;
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ jpeg_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+{
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ }
+ amdgpu_ring_write(ring, val);
+}
+
+void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ WARN_ON(ring->wptr % 2 || count % 2);
+
+ for (i = 0; i < count / 2; i++) {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static bool jpeg_v2_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+}
+
+static int jpeg_v2_0_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 0;
+
+ SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret);
+
+ return ret;
+}
+
+static int jpeg_v2_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+ if (enable) {
+ if (jpeg_v2_0_is_idle(handle))
+ return -EBUSY;
+ jpeg_v2_0_enable_clock_gating(adev);
+ } else {
+ jpeg_v2_0_disable_clock_gating(adev);
+ }
+
+ return 0;
+}
+
+static int jpeg_v2_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if (state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v2_0_stop(adev);
+ else
+ ret = jpeg_v2_0_start(adev);
+
+ if (!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v2_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("IH: JPEG TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_2_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
+ .name = "jpeg_v2_0",
+ .early_init = jpeg_v2_0_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v2_0_sw_init,
+ .sw_fini = jpeg_v2_0_sw_fini,
+ .hw_init = jpeg_v2_0_hw_init,
+ .hw_fini = jpeg_v2_0_hw_fini,
+ .suspend = jpeg_v2_0_suspend,
+ .resume = jpeg_v2_0_resume,
+ .is_idle = jpeg_v2_0_is_idle,
+ .wait_for_idle = jpeg_v2_0_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v2_0_set_clockgating_state,
+ .set_powergating_state = jpeg_v2_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = jpeg_v2_0_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_0_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_0_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */
+ 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */
+ .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
+ .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v2_0_dec_ring_nop,
+ .insert_start = jpeg_v2_0_dec_ring_insert_start,
+ .insert_end = jpeg_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->ring_dec.funcs = &jpeg_v2_0_dec_ring_vm_funcs;
+ DRM_INFO("JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v2_0_irq_funcs = {
+ .set = jpeg_v2_0_set_interrupt_state,
+ .process = jpeg_v2_0_process_interrupt,
+};
+
+static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.inst->irq.num_types = 1;
+ adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v2_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &jpeg_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
new file mode 100644
index 000000000000..15a344ed340f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V2_0_H__
+#define __JPEG_V2_0_H__
+
+void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
+void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
+void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags);
+void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, uint32_t flags);
+void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr);
+void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
+
+extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
+
+#endif /* __JPEG_V2_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
new file mode 100644
index 000000000000..2c58939e6ad0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -0,0 +1,641 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v2_0.h"
+
+#include "vcn/vcn_2_5_offset.h"
+#include "vcn/vcn_2_5_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+
+#define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2
+
+static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v2_5_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+
+static int amdgpu_ih_clientid_jpeg[] = {
+ SOC15_IH_CLIENTID_VCN,
+ SOC15_IH_CLIENTID_VCN1
+};
+
+/**
+ * jpeg_v2_5_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v2_5_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ u32 harvest;
+ int i;
+
+ adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->jpeg.harvest_config |= 1 << i;
+ }
+
+ if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
+ AMDGPU_JPEG_HARVEST_JPEG1))
+ return -ENOENT;
+ } else
+ adev->jpeg.num_jpeg_inst = 1;
+
+ jpeg_v2_5_set_dec_ring_funcs(adev);
+ jpeg_v2_5_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v2_5_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ int i, r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
+ VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->jpeg.inst[i].ring_dec;
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
+ sprintf(ring->name, "jpeg_dec_%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v2_5_sw_fini(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v2_5_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v2_5_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->jpeg.inst[i].ring_dec;
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+
+ DRM_INFO("JPEG decode initialized successfully.\n");
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v2_5_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->jpeg.inst[i].ring_dec;
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
+ jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ ring->sched.ready = false;
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v2_5_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = jpeg_v2_5_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v2_5_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v2_5_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v2_5_hw_init(adev);
+
+ return r;
+}
+
+static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
+ data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
+ | JPEG_CGC_GATE__JPEG2_DEC_MASK
+ | JPEG_CGC_GATE__JPEG_ENC_MASK
+ | JPEG_CGC_GATE__JMCIF_MASK
+ | JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
+
+ data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
+ data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
+ | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
+ | JPEG_CGC_CTRL__JMCIF_MODE_MASK
+ | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
+ WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
+}
+
+static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device* adev, int inst)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
+ data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
+ |JPEG_CGC_GATE__JPEG2_DEC_MASK
+ |JPEG_CGC_GATE__JPEG_ENC_MASK
+ |JPEG_CGC_GATE__JMCIF_MASK
+ |JPEG_CGC_GATE__JRBBM_MASK);
+ WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
+}
+
+/**
+ * jpeg_v2_5_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v2_5_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ring = &adev->jpeg.inst[i].ring_dec;
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* JPEG disable CGC */
+ jpeg_v2_5_disable_clock_gating(adev, i);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v2_5_stop(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v2_5_enable_clock_gating(adev, i);
+
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v2_5_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * jpeg_v2_5_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+ else
+ return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * jpeg_v2_5_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+ }
+}
+
+static bool jpeg_v2_5_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
+ }
+
+ return ret;
+}
+
+static int jpeg_v2_5_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int jpeg_v2_5_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ if (enable) {
+ if (jpeg_v2_5_is_idle(handle))
+ return -EBUSY;
+ jpeg_v2_5_enable_clock_gating(adev, i);
+ } else {
+ jpeg_v2_5_disable_clock_gating(adev, i);
+ }
+ }
+
+ return 0;
+}
+
+static int jpeg_v2_5_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if(state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v2_5_stop(adev);
+ else
+ ret = jpeg_v2_5_start(adev);
+
+ if(!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t ip_instance;
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_VCN:
+ ip_instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_VCN1:
+ ip_instance = 1;
+ break;
+ default:
+ DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+ return 0;
+ }
+
+ DRM_DEBUG("IH: JPEG TRAP\n");
+
+ switch (entry->src_id) {
+ case VCN_2_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
+ .name = "jpeg_v2_5",
+ .early_init = jpeg_v2_5_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v2_5_sw_init,
+ .sw_fini = jpeg_v2_5_sw_fini,
+ .hw_init = jpeg_v2_5_hw_init,
+ .hw_fini = jpeg_v2_5_hw_fini,
+ .suspend = jpeg_v2_5_suspend,
+ .resume = jpeg_v2_5_resume,
+ .is_idle = jpeg_v2_5_is_idle,
+ .wait_for_idle = jpeg_v2_5_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v2_5_set_clockgating_state,
+ .set_powergating_state = jpeg_v2_5_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_1,
+ .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
+ 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
+ .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
+ .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v2_0_dec_ring_nop,
+ .insert_start = jpeg_v2_0_dec_ring_insert_start,
+ .insert_end = jpeg_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec.me = i;
+ DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
+ }
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
+ .set = jpeg_v2_5_set_interrupt_state,
+ .process = jpeg_v2_5_process_interrupt,
+};
+
+static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ adev->jpeg.inst[i].irq.num_types = 1;
+ adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
+ }
+}
+
+const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 2,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &jpeg_v2_5_ip_funcs,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
index fde6328c6d71..2b4087c02620 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -14,17 +14,16 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
+#ifndef __JPEG_V2_5_H__
+#define __JPEG_V2_5_H__
+
+extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block;
-int
-gp102_nvdec_new(struct nvkm_device *device, int index,
- struct nvkm_nvdec **pnvdec)
-{
- return nvkm_nvdec_new_(device, index, pnvdec);
-}
+#endif /* __JPEG_V2_5_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 28105e4af507..adfd8a6171eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -27,17 +27,13 @@
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "mmhub/mmhub_1_0_default.h"
-#include "mmhub/mmhub_9_4_0_offset.h"
#include "vega10_enum.h"
-
+#include "soc15.h"
#include "soc15_common.h"
#define mmDAGB0_CNTL_MISC2_RV 0x008f
#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
-#define EA_EDC_CNT_MASK 0x3
-#define EA_EDC_CNT_SHIFT 0x2
-
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
@@ -564,59 +560,191 @@ void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_MC_LS;
}
+static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
+ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ }
+};
+
+static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
+};
+
+static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
+ if(mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
+ continue;
+
+ sec_cnt = (value &
+ mmhub_v1_0_ras_fields[i].sec_count_mask) >>
+ mmhub_v1_0_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ mmhub_v1_0_ras_fields[i].name,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ mmhub_v1_0_ras_fields[i].ded_count_mask) >>
+ mmhub_v1_0_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ mmhub_v1_0_ras_fields[i].name,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
- int i;
- uint32_t ea0_edc_cnt, ea0_edc_cnt2;
- uint32_t ea1_edc_cnt, ea1_edc_cnt2;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-
- /* EDC CNT will be cleared automatically after read */
- ea0_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT_VG20);
- ea0_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20);
- ea1_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT_VG20);
- ea1_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20);
-
- /* error count of each error type is recorded by 2 bits,
- * ce and ue count in EDC_CNT
- */
- for (i = 0; i < 5; i++) {
- err_data->ce_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
- err_data->ce_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
- ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
- ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
- err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
- err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
- ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
- ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
- }
- /* successive ue count in EDC_CNT */
- for (i = 0; i < 5; i++) {
- err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
- err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
- ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
- ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i;
+ uint32_t reg_value;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
+ if (reg_value)
+ mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i],
+ reg_value, &sec_count, &ded_count);
}
- /* ce and ue count in EDC_CNT2 */
- for (i = 0; i < 3; i++) {
- err_data->ce_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
- err_data->ce_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
- ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
- ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
- err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
- err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
- ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
- ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
- }
- /* successive ue count in EDC_CNT2 */
- for (i = 0; i < 6; i++) {
- err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
- err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
- ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
- ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
- }
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
}
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 66efe2f7bd76..5c42387c9274 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -21,6 +21,7 @@
*
*/
#include "amdgpu.h"
+#include "amdgpu_ras.h"
#include "mmhub_v9_4.h"
#include "mmhub/mmhub_9_4_1_offset.h"
@@ -29,7 +30,7 @@
#include "athub/athub_1_0_offset.h"
#include "athub/athub_1_0_sh_mask.h"
#include "vega10_enum.h"
-
+#include "soc15.h"
#include "soc15_common.h"
#define MMHUB_NUM_INSTANCES 2
@@ -53,7 +54,7 @@ u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
return base;
}
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid,
+static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
uint32_t vmid, uint64_t value)
{
/* two registers distance between mmVML2VC0_VM_CONTEXT0_* to
@@ -79,7 +80,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
{
uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
- mmhub_v9_4_setup_vm_pt_regs(adev, hubid, 0, pt_base);
+ mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
@@ -100,6 +101,16 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
(u32)(adev->gmc.gart_end >> 44));
}
+void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ int i;
+
+ for (i = 0; i < MMHUB_NUM_INSTANCES; i++)
+ mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid,
+ page_table_base);
+}
+
static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
int hubid)
{
@@ -117,45 +128,53 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
adev->gmc.agp_start >> 24);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
- min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
- max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
- WREG32_SOC15_OFFSET(MMHUB, 0,
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ adev->vm_manager.vram_base_offset;
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(value >> 12));
- WREG32_SOC15_OFFSET(MMHUB, 0,
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(value >> 44));
- /* Program "protection fault". */
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
- (u32)(adev->dummy_page_addr >> 12));
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
- (u32)((u64)adev->dummy_page_addr >> 44));
+ /* Program "protection fault". */
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)((u64)adev->dummy_page_addr >> 44));
- tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
- mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
- ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
- WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+ tmp = RREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ tmp);
+ }
}
static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
@@ -313,7 +332,8 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
adev->vm_manager.block_size - 9);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i,
tmp);
@@ -356,30 +376,16 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
int i;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
- if (amdgpu_sriov_vf(adev)) {
- /*
- * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase
- * they are VF copy registers so vbios post doesn't
- * program them, for SRIOV driver need to program them
- */
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE,
- i * MMHUB_INSTANCE_REGISTER_OFFSET,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP,
- i * MMHUB_INSTANCE_REGISTER_OFFSET,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
mmhub_v9_4_init_gart_aperture_regs(adev, i);
mmhub_v9_4_init_system_aperture_regs(adev, i);
mmhub_v9_4_init_tlb_regs(adev, i);
- mmhub_v9_4_init_cache_regs(adev, i);
+ if (!amdgpu_sriov_vf(adev))
+ mmhub_v9_4_init_cache_regs(adev, i);
mmhub_v9_4_enable_system_domain(adev, i);
- mmhub_v9_4_disable_identity_aperture(adev, i);
+ if (!amdgpu_sriov_vf(adev))
+ mmhub_v9_4_disable_identity_aperture(adev, i);
mmhub_v9_4_setup_vmid_config(adev, i);
mmhub_v9_4_program_invalidation(adev, i);
}
@@ -655,3 +661,253 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_MC_LS;
}
+
+static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
+ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ }
+};
+
+static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0},
+};
+
+static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
+ if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
+ continue;
+
+ sec_cnt = (value &
+ mmhub_v9_4_ras_fields[i].sec_count_mask) >>
+ mmhub_v9_4_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ mmhub_v9_4_ras_fields[i].name,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ mmhub_v9_4_ras_fields[i].ded_count_mask) >>
+ mmhub_v9_4_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ mmhub_v9_4_ras_fields[i].name,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i;
+ uint32_t reg_value;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
+ if (reg_value)
+ mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
+ reg_value, &sec_count, &ded_count);
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+}
+
+const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
+ .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
index d435cfcec1a8..1b979773776c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -23,6 +23,8 @@
#ifndef __MMHUB_V9_4_H__
#define __MMHUB_V9_4_H__
+extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
+
u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev);
int mmhub_v9_4_gart_enable(struct amdgpu_device *adev);
void mmhub_v9_4_gart_disable(struct amdgpu_device *adev);
@@ -32,5 +34,7 @@ void mmhub_v9_4_init(struct amdgpu_device *adev);
int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
index 8af0bddf85e4..20958639b601 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
@@ -47,6 +47,18 @@ struct mmsch_v1_0_init_header {
uint32_t uvd_table_size;
};
+struct mmsch_vf_eng_init_header {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v1_1_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_vf_eng_init_header eng[2];
+};
+
struct mmsch_v1_0_cmd_direct_reg_header {
uint32_t reg_offset : 28;
uint32_t command_type : 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index cc5bf595f9b1..5fd67e1cc2a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -158,82 +158,6 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
}
-static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
-{
- int r = 0;
- u32 req, val, size;
-
- if (!amdgim_is_hwperf(adev) || buf == NULL)
- return -EBADRQC;
-
- switch(type) {
- case PP_SCLK:
- req = IDH_IRQ_GET_PP_SCLK;
- break;
- case PP_MCLK:
- req = IDH_IRQ_GET_PP_MCLK;
- break;
- default:
- return -EBADRQC;
- }
-
- mutex_lock(&adev->virt.dpm_mutex);
-
- xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
-
- r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
- if (!r && adev->fw_vram_usage.va != NULL) {
- val = RREG32_NO_KIQ(
- SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
- size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
- val), PAGE_SIZE);
-
- if (size < PAGE_SIZE)
- strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
- else
- size = 0;
-
- r = size;
- goto out;
- }
-
- r = xgpu_ai_poll_msg(adev, IDH_FAIL);
- if(r)
- pr_info("%s DPM request failed",
- (type == PP_SCLK)? "SCLK" : "MCLK");
-
-out:
- mutex_unlock(&adev->virt.dpm_mutex);
- return r;
-}
-
-static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
-{
- int r = 0;
- u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
-
- if (!amdgim_is_hwperf(adev))
- return -EBADRQC;
-
- mutex_lock(&adev->virt.dpm_mutex);
- xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
-
- r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
- if (!r)
- goto out;
-
- r = xgpu_ai_poll_msg(adev, IDH_FAIL);
- if (!r)
- pr_info("DPM request failed");
- else
- pr_info("Mailbox is broken");
-
-out:
- mutex_unlock(&adev->virt.dpm_mutex);
- return r;
-}
-
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
@@ -326,7 +250,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
*/
locked = mutex_trylock(&adev->lock_reset);
if (locked)
- adev->in_gpu_reset = 1;
+ adev->in_gpu_reset = true;
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -338,7 +262,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
flr_done:
if (locked) {
- adev->in_gpu_reset = 0;
+ adev->in_gpu_reset = false;
mutex_unlock(&adev->lock_reset);
}
@@ -455,6 +379,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
- .get_pp_clk = xgpu_ai_get_pp_clk,
- .force_dpm_level = xgpu_ai_force_dpm_level,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 077e91a33d62..37dbe0f2142f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,10 +35,6 @@ enum idh_request {
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
- IDH_IRQ_FORCE_DPM_LEVEL = 10,
- IDH_IRQ_GET_PP_SCLK,
- IDH_IRQ_GET_PP_MCLK,
-
IDH_LOG_VF_ERROR = 200,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 0d8767eb7a70..237fa5e16b7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -252,7 +252,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
*/
locked = mutex_trylock(&adev->lock_reset);
if (locked)
- adev->in_gpu_reset = 1;
+ adev->in_gpu_reset = true;
do {
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -264,12 +264,16 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
flr_done:
if (locked) {
- adev->in_gpu_reset = 0;
+ adev->in_gpu_reset = false;
mutex_unlock(&adev->lock_reset);
}
/* Trigger recovery for world switch failure if no TDR */
- if (amdgpu_device_should_recover_gpu(adev))
+ if (amdgpu_device_should_recover_gpu(adev)
+ && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
amdgpu_device_gpu_recover(adev, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 9af73567e716..f737ce459c28 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -110,7 +110,6 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih = &adev->irq.ih;
- int ret = 0;
u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
u32 tmp;
@@ -179,7 +178,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
navi10_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 0db458f9fafc..65eb378fa035 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -52,6 +52,9 @@
#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
+static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
+
static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -314,6 +317,7 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
{
uint32_t bif_doorbell_intr_cntl;
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -324,7 +328,18 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
RAS_CNTLR_INTERRUPT_CLEAR, 1);
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
- amdgpu_ras_global_ras_isr(adev);
+ /*
+ * clear error status after ras_controller_intr according to
+ * hw team and count ue number for query
+ */
+ nbio_v7_4_query_ras_error_count(adev, &obj->err_data);
+
+ DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
+
+ /* ras_controller_int is dedicated for nbif ras error,
+ * not the global interrupt for sync flood
+ */
+ amdgpu_ras_reset_gpu(adev);
}
}
@@ -441,10 +456,8 @@ static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
&adev->nbio.ras_controller_irq);
- if (r)
- return r;
- return 0;
+ return r;
}
static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
@@ -461,16 +474,16 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
&adev->nbio.ras_err_event_athub_irq);
- if (r)
- return r;
- return 0;
+ return r;
}
+#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
+
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
- uint32_t global_sts, central_sts, int_eoi;
+ uint32_t global_sts, central_sts, int_eoi, parity_sts;
uint32_t corr, fatal, non_fatal;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -479,6 +492,7 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
ParityErrNonFatal);
+ parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
if (corr)
err_data->ce_count++;
@@ -490,6 +504,11 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
/* clear error status register */
WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
+ if (fatal)
+ /* clear parity fatal error indication field */
+ WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2,
+ parity_sts);
+
if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
BIFL_RasContller_Intr_Recv)) {
/* clear interrupt status register */
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 0ba66bef5746..2e0f8933410e 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -53,6 +53,7 @@
#include "gfx_v10_0.h"
#include "sdma_v5_0.h"
#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
#include "dce_virtual.h"
#include "mes_v10_1.h"
#include "mxgpu_nv.h"
@@ -314,6 +315,16 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+static bool nv_asic_supports_baco(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (smu_baco_is_support(smu))
+ return true;
+ else
+ return false;
+}
+
static enum amd_reset_method
nv_asic_reset_method(struct amdgpu_device *adev)
{
@@ -342,7 +353,12 @@ static int nv_asic_reset(struct amdgpu_device *adev)
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
- ret = smu_baco_reset(smu);
+ ret = smu_baco_enter(smu);
+ if (ret)
+ return ret;
+ ret = smu_baco_exit(smu);
+ if (ret)
+ return ret;
} else {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
@@ -462,7 +478,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -473,9 +489,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
break;
@@ -485,7 +502,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -496,9 +513,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
default:
return -EINVAL;
@@ -617,6 +635,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.get_pcie_usage = &nv_get_pcie_usage,
.need_reset_on_init = &nv_need_reset_on_init,
.get_pcie_replay_count = &nv_get_pcie_replay_count,
+ .supports_baco = &nv_asic_supports_baco,
};
static int nv_common_early_init(void *handle)
@@ -656,10 +675,12 @@ static int nv_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB;
adev->external_rev_id = adev->rev_id + 0x1;
break;
@@ -676,9 +697,11 @@ static int nv_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 20;
break;
@@ -697,10 +720,18 @@ static int nv_common_early_init(void *handle)
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
- AMD_CG_SUPPORT_VCN_MGCG;
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB;
+ /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
+ * as a consequence, the rev_id and external_rev_id are wrong.
+ * workaround it by hardcoding rev_id to 0 (default value).
+ */
+ if (amdgpu_sriov_vf(adev))
+ adev->rev_id = 0;
adev->external_rev_id = adev->rev_id + 0xa;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 74a9fe8e0cfb..36b65797434e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -242,6 +242,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
+ GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index b345e69ba246..7539104175e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -230,54 +230,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int psp_v10_0_cmd_submit(struct psp_context *psp,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
-{
- unsigned int psp_write_ptr_reg = 0;
- struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
- struct psp_ring *ring = &psp->km_ring;
- struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
- struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
- ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
- struct amdgpu_device *adev = psp->adev;
- uint32_t ring_size_dw = ring->ring_size / 4;
- uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
- /* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
- /* Update KM RB frame pointer to new frame */
- if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring_buffer_start;
- else
- write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
- /* Check invalid write_frame ptr address */
- if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
- return -EINVAL;
- }
-
- /* Initialize KM RB frame */
- memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
- /* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
- write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
- write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
- write_frame->fence_value = index;
- amdgpu_asic_flush_hdp(adev, NULL);
-
- /* Update the write Pointer in DWORDs */
- psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
- return 0;
-}
-
static int
psp_v10_0_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -407,15 +359,30 @@ static int psp_v10_0_mode1_reset(struct psp_context *psp)
return -EINVAL;
}
+static uint32_t psp_v10_0_ring_get_wptr(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ return RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+}
+
+static void psp_v10_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
static const struct psp_funcs psp_v10_0_funcs = {
.init_microcode = psp_v10_0_init_microcode,
.ring_init = psp_v10_0_ring_init,
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
.ring_destroy = psp_v10_0_ring_destroy,
- .cmd_submit = psp_v10_0_cmd_submit,
.compare_sram_data = psp_v10_0_compare_sram_data,
.mode1_reset = psp_v10_0_mode1_reset,
+ .ring_get_wptr = psp_v10_0_ring_get_wptr,
+ .ring_set_wptr = psp_v10_0_ring_set_wptr,
};
void psp_v10_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index ffeaa2f5588d..685dd9754c67 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -43,10 +43,13 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
MODULE_FIRMWARE("amdgpu/navi10_sos.bin");
MODULE_FIRMWARE("amdgpu/navi10_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi10_ta.bin");
MODULE_FIRMWARE("amdgpu/navi14_sos.bin");
MODULE_FIRMWARE("amdgpu/navi14_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi14_ta.bin");
MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi12_ta.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
@@ -186,6 +189,31 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
break;
default:
BUG();
@@ -208,6 +236,29 @@ out:
return err;
}
+int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ int ret;
+ int retry_loop;
+
+ for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000,
+ 0x80000000,
+ false);
+
+ if (ret == 0)
+ return 0;
+ }
+
+ return ret;
+}
+
static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -233,9 +284,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
return 0;
}
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -251,9 +300,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1*/
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
return ret;
}
@@ -273,9 +320,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
return 0;
}
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -294,8 +339,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
return ret;
}
@@ -312,9 +356,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
if (psp_v11_0_is_sos_alive(psp))
return 0;
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -519,63 +561,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int psp_v11_0_cmd_submit(struct psp_context *psp,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
-{
- unsigned int psp_write_ptr_reg = 0;
- struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem;
- struct psp_ring *ring = &psp->km_ring;
- struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
- struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
- ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
- struct amdgpu_device *adev = psp->adev;
- uint32_t ring_size_dw = ring->ring_size / 4;
- uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
- /* KM (GPCOM) prepare write pointer */
- if (psp_v11_0_support_vmr_ring(psp))
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
- else
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
- /* Update KM RB frame pointer to new frame */
- /* write_frame ptr increments by size of rb_frame in bytes */
- /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
- if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring_buffer_start;
- else
- write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
- /* Check invalid write_frame ptr address */
- if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
- return -EINVAL;
- }
-
- /* Initialize KM RB frame */
- memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
- /* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
- write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
- write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
- write_frame->fence_value = index;
- amdgpu_asic_flush_hdp(adev, NULL);
-
- /* Update the write Pointer in DWORDs */
- psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- if (psp_v11_0_support_vmr_ring(psp)) {
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
- } else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
- return 0;
-}
-
static int
psp_v11_0_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -1068,6 +1053,30 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
return 0;
}
+static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v11_0_support_vmr_ring(psp))
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -1077,7 +1086,6 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_create = psp_v11_0_ring_create,
.ring_stop = psp_v11_0_ring_stop,
.ring_destroy = psp_v11_0_ring_destroy,
- .cmd_submit = psp_v11_0_cmd_submit,
.compare_sram_data = psp_v11_0_compare_sram_data,
.mode1_reset = psp_v11_0_mode1_reset,
.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
@@ -1091,6 +1099,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.mem_training_init = psp_v11_0_memory_training_init,
.mem_training_fini = psp_v11_0_memory_training_fini,
.mem_training = psp_v11_0_memory_training,
+ .ring_get_wptr = psp_v11_0_ring_get_wptr,
+ .ring_set_wptr = psp_v11_0_ring_set_wptr,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 8f553f6f92d6..58d8b6d732e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -334,63 +334,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int psp_v12_0_cmd_submit(struct psp_context *psp,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
-{
- unsigned int psp_write_ptr_reg = 0;
- struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem;
- struct psp_ring *ring = &psp->km_ring;
- struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
- struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
- ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
- struct amdgpu_device *adev = psp->adev;
- uint32_t ring_size_dw = ring->ring_size / 4;
- uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
- /* KM (GPCOM) prepare write pointer */
- if (psp_v12_0_support_vmr_ring(psp))
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
- else
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
- /* Update KM RB frame pointer to new frame */
- /* write_frame ptr increments by size of rb_frame in bytes */
- /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
- if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring_buffer_start;
- else
- write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
- /* Check invalid write_frame ptr address */
- if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
- return -EINVAL;
- }
-
- /* Initialize KM RB frame */
- memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
- /* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
- write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
- write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
- write_frame->fence_value = index;
- amdgpu_asic_flush_hdp(adev, NULL);
-
- /* Update the write Pointer in DWORDs */
- psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- if (psp_v12_0_support_vmr_ring(psp)) {
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
- } else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
- return 0;
-}
-
static int
psp_v12_0_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -547,6 +490,30 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
return 0;
}
+static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v12_0_support_vmr_ring(psp))
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v12_0_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
static const struct psp_funcs psp_v12_0_funcs = {
.init_microcode = psp_v12_0_init_microcode,
.bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv,
@@ -555,9 +522,10 @@ static const struct psp_funcs psp_v12_0_funcs = {
.ring_create = psp_v12_0_ring_create,
.ring_stop = psp_v12_0_ring_stop,
.ring_destroy = psp_v12_0_ring_destroy,
- .cmd_submit = psp_v12_0_cmd_submit,
.compare_sram_data = psp_v12_0_compare_sram_data,
.mode1_reset = psp_v12_0_mode1_reset,
+ .ring_get_wptr = psp_v12_0_ring_get_wptr,
+ .ring_set_wptr = psp_v12_0_ring_set_wptr,
};
void psp_v12_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index fdc00938327b..735c43c7daab 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -179,7 +179,7 @@ static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
* Double check if the latest four legacy versions.
* If yes, it is still the right version.
*/
- for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) {
+ for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) {
if (sos_old_versions[i] == adev->psp.sos_fw_version)
return true;
}
@@ -410,65 +410,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
return ret;
}
-static int psp_v3_1_cmd_submit(struct psp_context *psp,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
-{
- unsigned int psp_write_ptr_reg = 0;
- struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
- struct psp_ring *ring = &psp->km_ring;
- struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
- struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
- ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
- struct amdgpu_device *adev = psp->adev;
- uint32_t ring_size_dw = ring->ring_size / 4;
- uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
-
- /* KM (GPCOM) prepare write pointer */
- if (psp_v3_1_support_vmr_ring(psp))
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
- else
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
-
- /* Update KM RB frame pointer to new frame */
- /* write_frame ptr increments by size of rb_frame in bytes */
- /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
- if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring_buffer_start;
- else
- write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
- /* Check invalid write_frame ptr address */
- if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
- DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
- ring_buffer_start, ring_buffer_end, write_frame);
- DRM_ERROR("write_frame is pointing to address out of bounds\n");
- return -EINVAL;
- }
-
- /* Initialize KM RB frame */
- memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
-
- /* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
- write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
- write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
- write_frame->fence_value = index;
- amdgpu_asic_flush_hdp(adev, NULL);
-
- /* Update the write Pointer in DWORDs */
- psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- if (psp_v3_1_support_vmr_ring(psp)) {
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
- /* send interrupt to PSP for SRIOV ring write pointer update */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_CONSUME_CMD);
- } else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
-
- return 0;
-}
-
static int
psp_v3_1_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
@@ -642,6 +583,31 @@ static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
return false;
}
+static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v3_1_support_vmr_ring(psp))
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ return data;
+}
+
+static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v3_1_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
+ /* send interrupt to PSP for SRIOV ring write pointer update */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
+}
+
static const struct psp_funcs psp_v3_1_funcs = {
.init_microcode = psp_v3_1_init_microcode,
.bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
@@ -650,11 +616,12 @@ static const struct psp_funcs psp_v3_1_funcs = {
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
.ring_destroy = psp_v3_1_ring_destroy,
- .cmd_submit = psp_v3_1_cmd_submit,
.compare_sram_data = psp_v3_1_compare_sram_data,
.smu_reload_quirk = psp_v3_1_smu_reload_quirk,
.mode1_reset = psp_v3_1_mode1_reset,
.support_vmr_ring = psp_v3_1_support_vmr_ring,
+ .ring_get_wptr = psp_v3_1_ring_get_wptr,
+ .ring_set_wptr = psp_v3_1_ring_set_wptr,
};
void psp_v3_1_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a10175838013..7d509a40076f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1260,16 +1260,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 5f4e2c616241..b6109a99fc43 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1698,16 +1698,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4ef4d31f5231..27c7001be1ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -82,6 +82,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
+static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
@@ -254,7 +255,106 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
+};
+
+static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = {
+ { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
+ 0, 0,
+ },
+ { "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED),
+ 0, 0,
+ },
};
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
@@ -698,7 +798,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -1579,7 +1679,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1686,6 +1786,7 @@ static int sdma_v4_0_early_init(void *handle)
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
sdma_v4_0_set_irq_funcs(adev);
+ sdma_v4_0_set_ras_funcs(adev);
return 0;
}
@@ -1700,8 +1801,18 @@ static int sdma_v4_0_late_init(void *handle)
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
+ int i;
- return amdgpu_sdma_ras_late_init(adev, &ih_info);
+ /* read back edc counter registers to clear the counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
+ }
+
+ if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
+ return adev->sdma.funcs->ras_late_init(adev, &ih_info);
+ else
+ return 0;
}
static int sdma_v4_0_sw_init(void *handle)
@@ -1773,7 +1884,8 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- amdgpu_sdma_ras_fini(adev);
+ if (adev->sdma.funcs && adev->sdma.funcs->ras_fini)
+ adev->sdma.funcs->ras_fini(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -2409,10 +2521,73 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
sched = &adev->sdma.instance[i].page.sched;
else
sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] = sched;
+ }
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+}
+
+static void sdma_v4_0_get_ras_error_count(uint32_t value,
+ uint32_t instance,
+ uint32_t *sec_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt;
+
+ /* double bits error (multiple bits) error detection is not supported */
+ for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) {
+ /* the SDMA_EDC_COUNTER register in each sdma instance
+ * shares the same sed shift_mask
+ * */
+ sec_cnt = (value &
+ sdma_v4_0_ras_fields[i].sec_count_mask) >>
+ sdma_v4_0_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("Detected %s in SDMA%d, SED %d\n",
+ sdma_v4_0_ras_fields[i].name,
+ instance, sec_cnt);
+ *sec_count += sec_cnt;
+ }
+ }
+}
+
+static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t instance, void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0;
+ uint32_t reg_value = 0;
+
+ reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
+ /* double bit error is not supported */
+ if (reg_value)
+ sdma_v4_0_get_ras_error_count(reg_value,
+ instance, &sec_count);
+ /* err_data->ce_count should be initialized to 0
+ * before calling into this function */
+ err_data->ce_count += sec_count;
+ /* double bit error is not supported
+ * set ue count to 0 */
+ err_data->ue_count = 0;
+
+ return 0;
+};
+
+static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
+ .ras_late_init = amdgpu_sdma_ras_late_init,
+ .ras_fini = amdgpu_sdma_ras_fini,
+ .query_ras_error_count = sdma_v4_0_query_ras_error_count,
+};
+
+static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ adev->sdma.funcs = &sdma_v4_0_ras_funcs;
+ break;
+ default:
+ break;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index f4ad2990f973..4c6bf1f8a528 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -382,8 +382,15 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
- /* IB packet must end on a 8 DW boundary */
- sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ /* An IB packet must end on a 8 DW boundary--the next dword
+ * must be on a 8-dword boundary. Our IB packet below is 6
+ * dwords long, thus add x number of NOPs, such that, in
+ * modular arithmetic,
+ * wptr + 6 + x = 8k, k >= 0, which in C is,
+ * (wptr + 6 + x) % 8 = 0.
+ * The expression below, is a solution of x.
+ */
+ sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -907,16 +914,9 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
udelay(1);
}
- if (i < adev->usec_timeout) {
- if (amdgpu_emu_mode == 1)
- DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
- else
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
amdgpu_device_wb_free(adev, index);
return r;
@@ -981,13 +981,10 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1086,10 +1083,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
}
/**
- * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
- *
+ * sdma_v5_0_ring_pad_ib - pad the IB
* @ib: indirect buffer to fill with padding
*
+ * Pad the IB with NOPs to a boundary multiple of 8.
*/
static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
@@ -1097,7 +1094,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 0x7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1721,17 +1718,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index f2d70a47a3af..4d415bfdb42f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1197,6 +1197,11 @@ static int si_asic_reset(struct amdgpu_device *adev)
return 0;
}
+static bool si_asic_supports_baco(struct amdgpu_device *adev)
+{
+ return false;
+}
+
static enum amd_reset_method
si_asic_reset_method(struct amdgpu_device *adev)
{
@@ -1425,6 +1430,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_pcie_usage = &si_get_pcie_usage,
.need_reset_on_init = &si_need_reset_on_init,
.get_pcie_replay_count = &si_get_pcie_replay_count,
+ .supports_baco = &si_asic_supports_baco,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index bdda8b4e03f0..9aac9f9c50bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -834,16 +834,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version si_dma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index c44723c267c9..c902f26cf50d 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -234,7 +234,7 @@ static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control,
DRM_DEBUG_DRIVER("I2C_Transmit(), address = %x, bytes = %d , data: ",
(uint16_t)address, numbytes);
- if (drm_debug & DRM_UT_DRIVER) {
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE,
16, 1, data, numbytes, false);
}
@@ -388,7 +388,7 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
DRM_DEBUG_DRIVER("I2C_Receive(), address = %x, bytes = %d, data :",
(uint16_t)address, bytes_received);
- if (drm_debug & DRM_UT_DRIVER) {
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE,
16, 1, data, bytes_received, false);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8e1640bc07af..317803f6a561 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -67,7 +67,9 @@
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
#include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
#include "amdgpu_smu.h"
@@ -477,56 +479,18 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
return ret;
}
-static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
-{
- if (is_support_sw_smu(adev)) {
- struct smu_context *smu = &adev->smu;
-
- *cap = smu_baco_is_support(smu);
- return 0;
- } else {
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
-
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
- }
-}
-
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
/* avoid NBIF got stuck when do RAS recovery in BACO reset */
if (ras && ras->supported)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- dev_info(adev->dev, "GPU BACO reset\n");
-
- if (is_support_sw_smu(adev)) {
- struct smu_context *smu = &adev->smu;
-
- if (smu_baco_reset(smu))
- return -EIO;
- } else {
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
-
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
- }
+ ret = amdgpu_dpm_baco_reset(adev);
+ if (ret)
+ return ret;
/* re-enable doorbell interrupt after BACO exit */
if (ras && ras->supported)
@@ -535,21 +499,11 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
return 0;
}
-static int soc15_mode2_reset(struct amdgpu_device *adev)
-{
- if (is_support_sw_smu(adev))
- return smu_mode2_reset(&adev->smu);
- if (!adev->powerplay.pp_funcs ||
- !adev->powerplay.pp_funcs->asic_reset_mode_2)
- return -ENOENT;
-
- return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
-}
-
static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev)
{
- bool baco_reset;
+ bool baco_reset = false;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (adev->asic_type) {
case CHIP_RAVEN:
@@ -557,23 +511,21 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
return AMD_RESET_METHOD_MODE2;
case CHIP_VEGA10:
case CHIP_VEGA12:
- soc15_asic_get_baco_capability(adev, &baco_reset);
+ case CHIP_ARCTURUS:
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
break;
case CHIP_VEGA20:
if (adev->psp.sos_fw_version >= 0x80067)
- soc15_asic_get_baco_capability(adev, &baco_reset);
- else
- baco_reset = false;
- if (baco_reset) {
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
- if (hive || (ras && ras->supported))
- baco_reset = false;
- }
+ /*
+ * 1. PMFW version > 0x284300: all cases use baco
+ * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
+ */
+ if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
+ baco_reset = false;
break;
default:
- baco_reset = false;
break;
}
@@ -591,7 +543,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
- return soc15_mode2_reset(adev);
+ return amdgpu_dpm_mode2_reset(adev);
default:
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
@@ -599,6 +551,22 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
}
}
+static bool soc15_supports_baco(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_ARCTURUS:
+ return amdgpu_dpm_is_baco_supported(adev);
+ case CHIP_VEGA20:
+ if (adev->psp.sos_fw_version >= 0x80067)
+ return amdgpu_dpm_is_baco_supported(adev);
+ return false;
+ default:
+ return false;
+ }
+}
+
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
u32 cntl_reg, u32 status_reg)
{
@@ -709,9 +677,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
- adev->df_funcs = &df_v3_6_funcs;
+ adev->df.funcs = &df_v3_6_funcs;
else
- adev->df_funcs = &df_v1_7_funcs;
+ adev->df.funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
adev->nbio.funcs->detect_hw_virt(adev);
@@ -746,11 +714,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev)) {
- if (is_support_sw_smu(adev))
+ if (is_support_sw_smu(adev)) {
+ if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
}
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -798,11 +766,16 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ } else {
amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ }
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
break;
case CHIP_RENOIR:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
@@ -810,8 +783,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
- if (is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
@@ -821,6 +793,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
default:
return -EINVAL;
@@ -999,6 +972,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.get_pcie_usage = &soc15_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
+ .supports_baco = &soc15_supports_baco,
};
static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -1007,6 +981,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.read_bios_from_rom = &soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
+ .reset_method = &soc15_asic_reset_method,
.set_vga_state = &soc15_vga_set_state,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
@@ -1019,7 +994,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.get_pcie_usage = &vega20_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
- .reset_method = &soc15_asic_reset_method
+ .supports_baco = &soc15_supports_baco,
};
static int soc15_common_early_init(void *handle)
@@ -1145,9 +1120,7 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA |
- AMD_PG_SUPPORT_VCN |
- AMD_PG_SUPPORT_VCN_DPG;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
} else if (adev->pdev->device == 0x15d8) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1190,9 +1163,7 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA |
- AMD_PG_SUPPORT_VCN |
- AMD_PG_SUPPORT_VCN_DPG;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
break;
case CHIP_ARCTURUS:
@@ -1208,7 +1179,9 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
- AMD_CG_SUPPORT_IH_CG;
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x32;
break;
@@ -1229,12 +1202,14 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_DF_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x91;
break;
@@ -1272,7 +1247,7 @@ static int soc15_common_sw_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
- adev->df_funcs->sw_init(adev);
+ adev->df.funcs->sw_init(adev);
return 0;
}
@@ -1282,7 +1257,7 @@ static int soc15_common_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_nbio_ras_fini(adev);
- adev->df_funcs->sw_fini(adev);
+ adev->df.funcs->sw_fini(adev);
return 0;
}
@@ -1503,7 +1478,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_rom_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->df_funcs->update_medium_grain_clock_gating(adev,
+ adev->df.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
case CHIP_RAVEN:
@@ -1561,7 +1536,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
- adev->df_funcs->get_clockgating_state(adev, flags);
+ adev->df.funcs->get_clockgating_state(adev, flags);
}
static int soc15_common_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 57af489a5de3..d0fb7a67c1a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -60,6 +60,18 @@ struct soc15_allowed_register_entry {
bool grbm_indexed;
};
+struct soc15_ras_field_entry {
+ const char *name;
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ uint32_t sec_count_mask;
+ uint32_t sec_count_shift;
+ uint32_t ded_count_mask;
+ uint32_t ded_count_shift;
+};
+
#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
#define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 839f186e1182..19e870c79896 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -52,6 +52,7 @@
uint32_t old_ = 0; \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
+ ret = 0; \
while ((tmp_ & (mask)) != (expected_value)) { \
if (old_ != tmp_) { \
loop = adev->usec_timeout; \
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 47c4b96b14d1..793bf70e64b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -28,19 +28,24 @@
#include "rsmu/rsmu_0_0_2_sh_mask.h"
#include "umc/umc_6_1_1_offset.h"
#include "umc/umc_6_1_1_sh_mask.h"
+#include "umc/umc_6_1_2_offset.h"
-#define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10
+#define UMC_6_INST_DIST 0x40000
/*
* (addr / 256) * 8192, the higher 26 bits in ErrorAddr
* is the index of 8KB block
*/
-#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
/* channel index is the index of 256B block */
#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
/* offset in 256B block */
#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
+#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
+#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
+
const uint32_t
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
{2, 18, 11, 27}, {4, 20, 13, 29},
@@ -49,24 +54,10 @@ const uint32_t
{9, 25, 0, 16}, {15, 31, 6, 22}
};
-static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev,
- uint32_t umc_instance)
+static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
{
- uint32_t rsmu_umc_index;
-
- rsmu_umc_index = RREG32_SOC15(RSMU, 0,
- mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
- rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
- RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN, 1);
- rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
- RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
- RSMU_UMC_INDEX_INSTANCE, umc_instance);
- rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
- RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
- RSMU_UMC_INDEX_WREN, 1 << umc_instance);
- WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
- rsmu_umc_index);
}
static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
@@ -75,15 +66,23 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
RSMU_UMC_INDEX_MODE_EN, 0);
}
-static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev)
+static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
{
uint32_t rsmu_umc_index;
rsmu_umc_index = RREG32_SOC15(RSMU, 0,
- mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+
return REG_GET_FIELD(rsmu_umc_index,
- RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
- RSMU_UMC_INDEX_INSTANCE);
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_MODE_EN);
+}
+
+static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
}
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
@@ -95,39 +94,50 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
- ecc_err_cnt_sel_addr =
- SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
- ecc_err_cnt_addr =
- SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ }
/* select the lower chip and check the error count */
- ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset);
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
- WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
- ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
/* clear the lower chip err count */
- WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* select the higher chip and check the err counter */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
- WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
- ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
/* clear the higher chip err count */
- WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
- mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
@@ -141,11 +151,18 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ }
/* check the MCUMC_STATUS */
- mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
@@ -155,49 +172,78 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev
*error_count += 1;
}
-static void umc_v6_1_query_error_count(struct amdgpu_device *adev,
- struct ras_err_data *err_data, uint32_t umc_reg_offset,
- uint32_t channel_index)
-{
- umc_v6_1_query_correctable_error_count(adev, umc_reg_offset,
- &(err_data->ce_count));
- umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset,
- &(err_data->ue_count));
-}
-
static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
- amdgpu_umc_for_each_channel(umc_v6_1_query_error_count);
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v6_1_querry_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
}
static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- uint32_t umc_reg_offset, uint32_t channel_index)
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr, retired_page;
+ uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
struct eeprom_table_record *err_rec;
-
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
+ }
/* skip error address process if -ENOMEM */
if (!err_data->err_addr) {
/* clear umc status */
- WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
return;
}
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
- mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
- err_addr = RREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4);
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
@@ -218,57 +264,105 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
err_rec->cu = 0;
err_rec->mem_channel = channel_index;
- err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev);
+ err_rec->mcumc_id = umc_inst;
err_data->err_addr_cnt++;
}
}
/* clear umc status */
- WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
}
static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
- amdgpu_umc_for_each_channel(umc_v6_1_query_error_address);
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
}
static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- uint32_t umc_reg_offset, uint32_t channel_index)
+ uint32_t umc_reg_offset)
{
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
uint32_t ecc_err_cnt_addr;
- ecc_err_cnt_sel_addr =
- SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
- ecc_err_cnt_addr =
- SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt);
+ }
/* select the lower chip and check the error count */
- ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset);
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
/* set ce error interrupt type to APIC based interrupt */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrInt, 0x1);
- WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
/* set error count to initial value */
- WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* select the higher chip and check the err counter */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
- WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
- WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
}
static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
{
- void *ras_error_status = NULL;
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
+ }
- amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel);
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
}
const struct amdgpu_umc_funcs umc_v6_1_funcs = {
@@ -276,6 +370,4 @@ const struct amdgpu_umc_funcs umc_v6_1_funcs = {
.ras_late_init = amdgpu_umc_ras_late_init,
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
- .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode,
- .disable_umc_index_mode = umc_v6_1_disable_umc_index_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
index dab9cbd292c5..0ce1d323cfdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
@@ -35,7 +35,8 @@
/* total channel instances in one umc block */
#define UMC_V6_1_TOTAL_CHANNEL_NUM (UMC_V6_1_CHANNEL_INSTANCE_NUM * UMC_V6_1_UMC_INSTANCE_NUM)
/* UMC regiser per channel offset */
-#define UMC_V6_1_PER_CHANNEL_OFFSET 0x800
+#define UMC_V6_1_PER_CHANNEL_OFFSET_VG20 0x800
+#define UMC_V6_1_PER_CHANNEL_OFFSET_ARCT 0x400
/* EccErrCnt max value */
#define UMC_V6_1_CE_CNT_MAX 0xffff
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index b4f84a820a44..e654938f6cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
@@ -36,21 +37,22 @@
#include "mmhub/mmhub_9_1_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+#include "jpeg_v1_0.h"
-#define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
-#define mmUVD_REG_XX_MASK 0x05ac
-#define mmUVD_REG_XX_MASK_BASE_IDX 1
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
+#define mmUVD_REG_XX_MASK_1_0 0x05ac
+#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
-static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
+ int inst_idx, struct dpg_pause_state *new_state);
+
+static void vcn_v1_0_idle_work_handler(struct work_struct *work);
/**
* vcn_v1_0_early_init - set function pointers
@@ -68,9 +70,10 @@ static int vcn_v1_0_early_init(void *handle)
vcn_v1_0_set_dec_ring_funcs(adev);
vcn_v1_0_set_enc_ring_funcs(adev);
- vcn_v1_0_set_jpeg_ring_funcs(adev);
vcn_v1_0_set_irq_funcs(adev);
+ jpeg_v1_0_early_init(handle);
+
return 0;
}
@@ -101,15 +104,13 @@ static int vcn_v1_0_sw_init(void *handle)
return r;
}
- /* VCN JPEG TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq);
- if (r)
- return r;
-
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
+ /* Override the work func */
+ adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
+
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
@@ -149,17 +150,11 @@ static int vcn_v1_0_sw_init(void *handle)
return r;
}
- ring = &adev->vcn.inst->ring_jpeg;
- sprintf(ring->name, "vcn_jpeg");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
- if (r)
- return r;
-
adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
- adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch =
- SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
- return 0;
+ r = jpeg_v1_0_sw_init(handle);
+
+ return r;
}
/**
@@ -178,6 +173,8 @@ static int vcn_v1_0_sw_fini(void *handle)
if (r)
return r;
+ jpeg_v1_0_sw_fini(handle);
+
r = amdgpu_vcn_sw_fini(adev);
return r;
@@ -207,7 +204,7 @@ static int vcn_v1_0_hw_init(void *handle)
goto done;
}
- ring = &adev->vcn.inst->ring_jpeg;
+ ring = &adev->jpeg.inst->ring_dec;
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
@@ -838,9 +835,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
vcn_v1_0_mc_resume_spg_mode(adev);
- WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
- WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
- RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
+ WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
+ RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
/* enable VCPU clock */
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
@@ -947,22 +944,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
- ring = &adev->vcn.inst->ring_jpeg;
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
- UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
-
- /* initialize wptr */
- ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
- /* copy patch commands to the jpeg ring */
- vcn_v1_0_jpeg_ring_set_patch_ring(ring,
- (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+ jpeg_v1_0_start(adev, 0);
return 0;
}
@@ -1106,13 +1088,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
- /* initialize JPEG wptr */
- ring = &adev->vcn.inst->ring_jpeg;
- ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
- /* copy patch commands to the jpeg ring */
- vcn_v1_0_jpeg_ring_set_patch_ring(ring,
- (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+ jpeg_v1_0_start(adev, 1);
return 0;
}
@@ -1223,7 +1199,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
}
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state)
+ int inst_idx, struct dpg_pause_state *new_state)
{
int ret_code;
uint32_t reg_data = 0;
@@ -1316,7 +1292,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
/* Restore */
- ring = &adev->vcn.inst->ring_jpeg;
+ ring = &adev->jpeg.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
@@ -1716,389 +1692,6 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-
-/**
- * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware read pointer
- */
-static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware write pointer
- */
-static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Commits the write pointer to the hardware
- */
-static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
-}
-
-/**
- * vcn_v1_0_jpeg_ring_insert_start - insert a start command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a start command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x68e04);
-
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x80010000);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_insert_end - insert a end command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a end command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x68e04);
-
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x00010000);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
- *
- * @ring: amdgpu_ring pointer
- * @fence: fence to emit
- *
- * Write a fence and a trap command to the ring.
- */
-static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
- unsigned flags)
-{
- struct amdgpu_device *adev = ring->adev;
-
- WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x8);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
- amdgpu_ring_write(ring, 0);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
- amdgpu_ring_write(ring, 0xffffffff);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x3fbc);
-
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x1);
-
- /* emit trap */
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
- amdgpu_ring_write(ring, 0);
-}
-
-/**
- * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
- *
- * @ring: amdgpu_ring pointer
- * @ib: indirect buffer to execute
- *
- * Write ring commands to execute the indirect buffer.
- */
-static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib,
- uint32_t flags)
-{
- struct amdgpu_device *adev = ring->adev;
- unsigned vmid = AMDGPU_JOB_GET_VMID(job);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, ib->length_dw);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
-
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
- amdgpu_ring_write(ring, 0);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x2);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
- amdgpu_ring_write(ring, 0x2);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val,
- uint32_t mask)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t reg_offset = (reg << 2);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, val);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
- ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring,
- PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
- } else {
- amdgpu_ring_write(ring, reg_offset);
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, 0, PACKETJ_TYPE3));
- }
- amdgpu_ring_write(ring, mask);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
-{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t data0, data1, mask;
-
- pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
-
- /* wait for register write */
- data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
- data1 = lower_32_bits(pd_addr);
- mask = 0xffffffff;
- vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
-}
-
-static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
- uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t reg_offset = (reg << 2);
-
- amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
- if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
- ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring,
- PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
- } else {
- amdgpu_ring_write(ring, reg_offset);
- amdgpu_ring_write(ring,
- PACKETJ(0, 0, 0, PACKETJ_TYPE0));
- }
- amdgpu_ring_write(ring, val);
-}
-
-static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
-{
- int i;
-
- WARN_ON(ring->wptr % 2 || count % 2);
-
- for (i = 0; i < count / 2; i++) {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
-}
-
-static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
-{
- struct amdgpu_device *adev = ring->adev;
- ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
- if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
- ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
- ring->ring[(*ptr)++] = 0;
- ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
- } else {
- ring->ring[(*ptr)++] = reg_offset;
- ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
- }
- ring->ring[(*ptr)++] = val;
-}
-
-static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
-{
- struct amdgpu_device *adev = ring->adev;
-
- uint32_t reg, reg_offset, val, mask, i;
-
- // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
- reg_offset = (reg << 2);
- val = lower_32_bits(ring->gpu_addr);
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
- reg_offset = (reg << 2);
- val = upper_32_bits(ring->gpu_addr);
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- // 3rd to 5th: issue MEM_READ commands
- for (i = 0; i <= 2; i++) {
- ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
- ring->ring[ptr++] = 0;
- }
-
- // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
- reg_offset = (reg << 2);
- val = 0x13;
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- // 7th: program mmUVD_JRBC_RB_REF_DATA
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
- reg_offset = (reg << 2);
- val = 0x1;
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
- reg_offset = (reg << 2);
- val = 0x1;
- mask = 0x1;
-
- ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
- ring->ring[ptr++] = 0x01400200;
- ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
- ring->ring[ptr++] = val;
- ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
- if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
- ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
- ring->ring[ptr++] = 0;
- ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
- } else {
- ring->ring[ptr++] = reg_offset;
- ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
- }
- ring->ring[ptr++] = mask;
-
- //9th to 21st: insert no-op
- for (i = 0; i <= 12; i++) {
- ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
- ring->ring[ptr++] = 0;
- }
-
- //22nd: reset mmUVD_JRBC_RB_RPTR
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
- reg_offset = (reg << 2);
- val = 0;
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-
- //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
- reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
- reg_offset = (reg << 2);
- val = 0x12;
- vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
-}
-
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -2123,9 +1716,6 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
case 120:
amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
break;
- case 126:
- amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
- break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -2174,6 +1764,86 @@ static int vcn_v1_0_set_powergating_state(void *handle,
return ret;
}
+static void vcn_v1_0_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ unsigned int fences = 0, i;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+
+ if (fences)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ }
+
+ fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
+
+ if (fences == 0) {
+ amdgpu_gfx_off_ctrl(adev, true);
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+ else
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_GATE);
+ } else {
+ schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ }
+}
+
+void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ if (set_clocks) {
+ amdgpu_gfx_off_ctrl(adev, false);
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+ unsigned int fences = 0, i;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
+
+ if (fences)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+
+ adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ }
+}
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -2220,7 +1890,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.insert_start = vcn_v1_0_dec_ring_insert_start,
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
+ .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
@@ -2252,48 +1922,13 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = vcn_v1_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
+ .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
- .type = AMDGPU_RING_TYPE_VCN_JPEG,
- .align_mask = 0xf,
- .nop = PACKET0(0x81ff, 0),
- .support_64bit_ptrs = false,
- .no_user_fence = true,
- .vmhub = AMDGPU_MMHUB_0,
- .extra_dw = 64,
- .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
- .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
- .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
- .emit_frame_size =
- 6 + 6 + /* hdp invalidate / flush */
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
- 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
- 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
- 6,
- .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
- .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
- .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
- .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
- .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
- .insert_nop = vcn_v1_0_jpeg_ring_nop,
- .insert_start = vcn_v1_0_jpeg_ring_insert_start,
- .insert_end = vcn_v1_0_jpeg_ring_insert_end,
- .pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
- .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
@@ -2310,12 +1945,6 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
DRM_INFO("VCN encode is enabled in VM mode\n");
}
-static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
-{
- adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
- DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
-}
-
static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
.set = vcn_v1_0_set_interrupt_state,
.process = vcn_v1_0_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
index 2a497a7a4840..f67d7391fc21 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
@@ -24,6 +24,8 @@
#ifndef __VCN_V1_0_H__
#define __VCN_V1_0_H__
+void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 38f787a560cb..f4db8af6536b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -47,39 +47,13 @@
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
-#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
-#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
-#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
-#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
-#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
-#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
-#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
-#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
-#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
-#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
-#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
-
-#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-
-#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
-#define mmUVD_REG_XX_MASK 0x026c
-#define mmUVD_REG_XX_MASK_BASE_IDX 1
-
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
-static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
+ int inst_idx, struct dpg_pause_state *new_state);
/**
* vcn_v2_0_early_init - set function pointers
@@ -97,7 +71,6 @@ static int vcn_v2_0_early_init(void *handle)
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
- vcn_v2_0_set_jpeg_ring_funcs(adev);
vcn_v2_0_set_irq_funcs(adev);
return 0;
@@ -132,12 +105,6 @@ static int vcn_v2_0_sw_init(void *handle)
return r;
}
- /* VCN JPEG TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq);
- if (r)
- return r;
-
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
@@ -194,19 +161,8 @@ static int vcn_v2_0_sw_init(void *handle)
return r;
}
- ring = &adev->vcn.inst->ring_jpeg;
- ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
- sprintf(ring->name, "vcn_jpeg");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
- if (r)
- return r;
-
adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
- adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
- adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
-
return 0;
}
@@ -258,11 +214,6 @@ static int vcn_v2_0_hw_init(void *handle)
goto done;
}
- ring = &adev->vcn.inst->ring_jpeg;
- r = amdgpu_ring_test_helper(ring);
- if (r)
- goto done;
-
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
@@ -296,9 +247,6 @@ static int vcn_v2_0_hw_fini(void *handle)
ring->sched.ready = false;
}
- ring = &adev->vcn.inst->ring_jpeg;
- ring->sched.ready = false;
-
return 0;
}
@@ -393,7 +341,6 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
- WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
@@ -404,88 +351,88 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
offset = size;
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
/* VCN global tiling registers */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
@@ -631,146 +578,23 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
/**
- * jpeg_v2_0_start - start JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * Setup and start the JPEG block
- */
-static int jpeg_v2_0_start(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg;
- uint32_t tmp;
- int r = 0;
-
- /* disable power gating */
- tmp = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
-
- SOC15_WAIT_ON_RREG(VCN, 0,
- mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
-
- if (r) {
- DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
- return r;
- }
-
- /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
-
- /* JPEG disable CGC */
- tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
- tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
-
- tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
- tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
- | JPEG_CGC_GATE__JPEG2_DEC_MASK
- | JPEG_CGC_GATE__JPEG_ENC_MASK
- | JPEG_CGC_GATE__JMCIF_MASK
- | JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC_MASK,
- ~JPEG_SYS_INT_EN__DJRBC_MASK);
-
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-
- return 0;
-}
-
-/**
- * jpeg_v2_0_stop - stop JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * stop the JPEG block
- */
-static int jpeg_v2_0_stop(struct amdgpu_device *adev)
-{
- uint32_t tmp;
- int r = 0;
-
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- /* enable JPEG CGC */
- tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
- tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
-
-
- tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
- tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
- |JPEG_CGC_GATE__JPEG2_DEC_MASK
- |JPEG_CGC_GATE__JPEG_ENC_MASK
- |JPEG_CGC_GATE__JMCIF_MASK
- |JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
-
- /* enable power gating */
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS));
- tmp &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
- tmp |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
-
- tmp = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
-
- SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
- (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
-
- if (r) {
- DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
- return r;
- }
-
- return r;
-}
-
-/**
* vcn_v2_0_enable_clock_gating - enable VCN clock gating
*
* @adev: amdgpu_device pointer
@@ -930,7 +754,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
if (indirect)
- adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr;
+ adev->vcn.inst->dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst->dpg_sram_cpu_addr;
/* enable clock gating */
vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
@@ -939,11 +763,11 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
@@ -955,28 +779,28 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
@@ -984,29 +808,29 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* release VCPU reset to boot */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
/* enable LMI MC and UMC channels */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL2),
0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
/* enable master interrupt */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
- psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
- (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
- (uintptr_t)adev->vcn.dpg_sram_cpu_addr));
+ psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
+ (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
+ (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -1052,12 +876,8 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
- if (r)
- return r;
- goto jpeg;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
vcn_v2_0_disable_static_power_gating(adev);
@@ -1209,10 +1029,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
-jpeg:
- r = jpeg_v2_0_start(adev);
-
- return r;
+ return 0;
}
static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
@@ -1231,9 +1048,6 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
- tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
-
tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
@@ -1252,10 +1066,6 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
uint32_t tmp;
int r;
- r = jpeg_v2_0_stop(adev);
- if (r)
- return r;
-
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
r = vcn_v2_0_stop_dpg_mode(adev);
if (r)
@@ -1320,7 +1130,7 @@ power_off:
}
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state)
+ int inst_idx, struct dpg_pause_state *new_state)
{
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
@@ -1781,272 +1591,6 @@ void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_
amdgpu_ring_write(ring, val);
}
-/**
- * vcn_v2_0_jpeg_ring_get_rptr - get read pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware read pointer
- */
-static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_get_wptr - get write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware write pointer
- */
-static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- if (ring->use_doorbell)
- return adev->wb.wb[ring->wptr_offs];
- else
- return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_set_wptr - set write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Commits the write pointer to the hardware
- */
-static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- if (ring->use_doorbell) {
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
- WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
- } else {
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
- }
-}
-
-/**
- * vcn_v2_0_jpeg_ring_insert_start - insert a start command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a start command to the ring.
- */
-void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x68e04);
-
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x80010000);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_insert_end - insert a end command
- *
- * @ring: amdgpu_ring pointer
- *
- * Write a end command to the ring.
- */
-void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x68e04);
-
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x00010000);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command
- *
- * @ring: amdgpu_ring pointer
- * @fence: fence to emit
- *
- * Write a fence and a trap command to the ring.
- */
-void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
- unsigned flags)
-{
- WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, seq);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x8);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
- amdgpu_ring_write(ring, 0);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x3fbc);
-
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x1);
-
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
- amdgpu_ring_write(ring, 0);
-}
-
-/**
- * vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer
- *
- * @ring: amdgpu_ring pointer
- * @ib: indirect buffer to execute
- *
- * Write ring commands to execute the indirect buffer.
- */
-void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib,
- uint32_t flags)
-{
- unsigned vmid = AMDGPU_JOB_GET_VMID(job);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, ib->length_dw);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
-
- amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
- amdgpu_ring_write(ring, 0);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x2);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
- amdgpu_ring_write(ring, 0x2);
-}
-
-void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- uint32_t reg_offset = (reg << 2);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x01400200);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, val);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring,
- PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
- } else {
- amdgpu_ring_write(ring, reg_offset);
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE3));
- }
- amdgpu_ring_write(ring, mask);
-}
-
-void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
-{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t data0, data1, mask;
-
- pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
-
- /* wait for register write */
- data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
- data1 = lower_32_bits(pd_addr);
- mask = 0xffffffff;
- vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
-}
-
-void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
-{
- uint32_t reg_offset = (reg << 2);
-
- amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring,
- PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
- } else {
- amdgpu_ring_write(ring, reg_offset);
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- }
- amdgpu_ring_write(ring, val);
-}
-
-void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
-{
- int i;
-
- WARN_ON(ring->wptr % 2 || count % 2);
-
- for (i = 0; i < count / 2; i++) {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
-}
-
static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -2071,9 +1615,6 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
break;
- case VCN_2_0__SRCID__JPEG_DECODE:
- amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
- break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -2219,36 +1760,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = {
- .type = AMDGPU_RING_TYPE_VCN_JPEG,
- .align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB_0,
- .get_rptr = vcn_v2_0_jpeg_ring_get_rptr,
- .get_wptr = vcn_v2_0_jpeg_ring_get_wptr,
- .set_wptr = vcn_v2_0_jpeg_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
- 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
- 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
- 8 + 16,
- .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
- .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
- .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
- .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
- .insert_nop = vcn_v2_0_jpeg_ring_nop,
- .insert_start = vcn_v2_0_jpeg_ring_insert_start,
- .insert_end = vcn_v2_0_jpeg_ring_insert_end,
- .pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
@@ -2265,12 +1776,6 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
DRM_INFO("VCN encode is enabled in VM mode\n");
}
-static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
-{
- adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
- DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
-}
-
static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
.set = vcn_v2_0_set_interrupt_state,
.process = vcn_v2_0_process_interrupt,
@@ -2278,7 +1783,7 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+ adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
index 8467292f32e5..ef749b02ded9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
@@ -49,19 +49,6 @@ extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr);
extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
-extern void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring);
-extern void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring);
-extern void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
- unsigned flags);
-extern void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
- struct amdgpu_ib *ib, uint32_t flags);
-extern void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask);
-extern void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr);
-extern void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
-extern void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count);
-
extern const struct amdgpu_ip_block_version vcn_v2_0_ip_block;
#endif /* __VCN_V2_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 93edf9193a7b..c8b63d57a541 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -29,6 +29,7 @@
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v2_0.h"
+#include "mmsch_v1_0.h"
#include "vcn/vcn_2_5_offset.h"
#include "vcn/vcn_2_5_sh_mask.h"
@@ -47,16 +48,16 @@
#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
-#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
-
-#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
+#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
-static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v2_5_set_powergating_state(void *handle,
enum amd_powergating_state state);
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
@@ -91,11 +92,16 @@ static int vcn_v2_5_early_init(void *handle)
} else
adev->vcn.num_vcn_inst = 1;
- adev->vcn.num_enc_rings = 2;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.num_vcn_inst = 2;
+ adev->vcn.harvest_config = 0;
+ adev->vcn.num_enc_rings = 1;
+ } else {
+ adev->vcn.num_enc_rings = 2;
+ }
vcn_v2_5_set_dec_ring_funcs(adev);
vcn_v2_5_set_enc_ring_funcs(adev);
- vcn_v2_5_set_jpeg_ring_funcs(adev);
vcn_v2_5_set_irq_funcs(adev);
return 0;
@@ -130,12 +136,6 @@ static int vcn_v2_5_sw_init(void *handle)
if (r)
return r;
}
-
- /* VCN JPEG TRAP */
- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
- VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq);
- if (r)
- return r;
}
r = amdgpu_vcn_sw_init(adev);
@@ -184,12 +184,11 @@ static int vcn_v2_5_sw_init(void *handle)
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
- adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH);
-
ring = &adev->vcn.inst[j].ring_dec;
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
+
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
sprintf(ring->name, "vcn_dec_%d", j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
if (r)
@@ -198,22 +197,26 @@ static int vcn_v2_5_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
+
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
+
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
if (r)
return r;
}
+ }
- ring = &adev->vcn.inst[j].ring_jpeg;
- ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
- sprintf(ring->name, "vcn_jpeg_%d", j);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+
return 0;
}
@@ -229,6 +232,9 @@ static int vcn_v2_5_sw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -249,35 +255,44 @@ static int vcn_v2_5_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int i, j, r;
+ int i, j, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ r = vcn_v2_5_sriov_start(adev);
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
- ring = &adev->vcn.inst[j].ring_dec;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ring->doorbell_index, j);
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.inst[j].ring_enc[0].sched.ready = true;
+ adev->vcn.inst[j].ring_enc[1].sched.ready = false;
+ adev->vcn.inst[j].ring_enc[2].sched.ready = false;
+ adev->vcn.inst[j].ring_dec.sched.ready = true;
+ } else {
- r = amdgpu_ring_test_helper(ring);
- if (r)
- goto done;
+ ring = &adev->vcn.inst[j].ring_dec;
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ring->doorbell_index, j);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
- }
- ring = &adev->vcn.inst[j].ring_jpeg;
- r = amdgpu_ring_test_helper(ring);
- if (r)
- goto done;
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ ring = &adev->vcn.inst[j].ring_enc[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+ }
+ }
}
+
done:
if (!r)
- DRM_INFO("VCN decode and encode initialized successfully.\n");
+ DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
return r;
}
@@ -300,7 +315,9 @@ static int vcn_v2_5_hw_fini(void *handle)
continue;
ring = &adev->vcn.inst[i].ring_dec;
- if (RREG32_SOC15(VCN, i, mmUVD_STATUS))
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->sched.ready = false;
@@ -309,9 +326,6 @@ static int vcn_v2_5_hw_fini(void *handle)
ring = &adev->vcn.inst[i].ring_enc[j];
ring->sched.ready = false;
}
-
- ring = &adev->vcn.inst[i].ring_jpeg;
- ring->sched.ready = false;
}
return 0;
@@ -378,9 +392,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
@@ -412,6 +426,99 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
}
}
+static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+}
+
/**
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
@@ -530,6 +637,54 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
}
}
+static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
+ uint8_t sram_sel, int inst_idx, uint8_t indirect)
+{
+ uint32_t reg_data = 0;
+
+ /* enable sw clock gating control */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+ UVD_CGC_CTRL__SYS_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MODE_MASK |
+ UVD_CGC_CTRL__MPEG2_MODE_MASK |
+ UVD_CGC_CTRL__REGS_MODE_MASK |
+ UVD_CGC_CTRL__RBC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+ UVD_CGC_CTRL__IDCT_MODE_MASK |
+ UVD_CGC_CTRL__MPRD_MODE_MASK |
+ UVD_CGC_CTRL__MPC_MODE_MASK |
+ UVD_CGC_CTRL__LBSI_MODE_MASK |
+ UVD_CGC_CTRL__LRBBM_MODE_MASK |
+ UVD_CGC_CTRL__WCB_MODE_MASK |
+ UVD_CGC_CTRL__VCPU_MODE_MASK |
+ UVD_CGC_CTRL__MMSCH_MODE_MASK);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+
+ /* turn off clock gating */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+
+ /* turn on SUVD clock gating */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+
+ /* turn on sw mode in UVD_SUVD_CGC_CTRL */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+}
+
/**
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
@@ -592,111 +747,134 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
}
}
-/**
- * jpeg_v2_5_start - start JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * Setup and start the JPEG block
- */
-static int jpeg_v2_5_start(struct amdgpu_device *adev)
+static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
struct amdgpu_ring *ring;
- uint32_t tmp;
- int i;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- ring = &adev->vcn.inst[i].ring_jpeg;
- /* disable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
-
- /* JPEG disable CGC */
- tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
- tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
-
- tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
- tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
- | JPEG_CGC_GATE__JPEG2_DEC_MASK
- | JPEG_CGC_GATE__JMCIF_MASK
- | JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
-
- tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
- tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
- | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
- | JPEG_CGC_CTRL__JMCIF_MODE_MASK
- | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
- WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
-
- /* MJPEG global tiling registers */
- WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC_MASK,
- ~JPEG_SYS_INT_EN__DJRBC_MASK);
-
- WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
- WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
- WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
- }
-
- return 0;
-}
-
-/**
- * jpeg_v2_5_stop - stop JPEG block
- *
- * @adev: amdgpu_device pointer
- *
- * stop the JPEG block
- */
-static int jpeg_v2_5_stop(struct amdgpu_device *adev)
-{
- uint32_t tmp;
- int i;
+ uint32_t rb_bufsz, tmp;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
- tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
- |JPEG_CGC_GATE__JPEG2_DEC_MASK
- |JPEG_CGC_GATE__JMCIF_MASK
- |JPEG_CGC_GATE__JRBBM_MASK);
- WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
-
- /* enable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS),
- UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
- }
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
+
+ if (indirect)
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+
+ /* enable clock gating */
+ vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interupt */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup mmUVD_LMI_CTRL */
+ tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_MPC_CNTL),
+ 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_MPC_SET_MUXA0),
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_MPC_SET_MUXB0),
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_MPC_SET_MUX),
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
+
+ vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
+
+ /* unblock VCPU register access */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, inst_idx, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+ (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+ (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+
+ ring = &adev->vcn.inst[inst_idx].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* set the write pointer delay */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* set the wb address */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+ (upper_32_bits(ring->gpu_addr) >> 2));
+
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+
+ WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+
+ ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
return 0;
}
@@ -713,6 +891,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -874,23 +1055,251 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
}
- r = jpeg_v2_5_start(adev);
- return r;
+ return 0;
}
-static int vcn_v2_5_stop(struct amdgpu_device *adev)
+static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
+ struct amdgpu_mm_table *table)
+{
+ uint32_t data = 0, loop = 0, size = 0;
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v1_1_init_header *header = NULL;;
+
+ header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
+ size = header->total_size;
+
+ /*
+ * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
+ * memory descriptor location
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+ /* 2, update vmid of descriptor */
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+ /*
+ * 5, kick off the initialization and wait until
+ * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop = 10;
+ while ((data & 0x10000002) != 0x10000002) {
+ udelay(100);
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop--;
+ if (!loop)
+ break;
+ }
+
+ if (!loop) {
+ dev_err(adev->dev,
+ "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
+ data);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint32_t offset, size, tmp, i, rb_bufsz;
+ uint32_t table_size = 0;
+ struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
+ struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
+ struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
+ struct mmsch_v1_0_cmd_end end = { { 0 } };
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
+
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ header->version = MMSCH_VERSION;
+ header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
+ init_table += header->total_size;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ header->eng[i].table_offset = header->total_size;
+ header->eng[i].init_status = 0;
+ header->eng[i].table_size = 0;
+
+ table_size = 0;
+
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ /* mc resume*/
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+ offset = 0;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = size;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ size);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ ring->ring_size / 4);
+
+ ring = &adev->vcn.inst[i].ring_dec;
+ ring->wptr = 0;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+ /* add end packet */
+ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
+ table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+ init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+
+ /* refine header */
+ header->eng[i].table_size = table_size;
+ header->total_size += table_size;
+ }
+
+ return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
+}
+
+static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
+ int ret_code = 0;
uint32_t tmp;
- int i, r;
- r = jpeg_v2_5_stop(adev);
- if (r)
- return r;
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int vcn_v2_5_stop(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+ int i, r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_stop_dpg_mode(adev, i);
+ goto power_off;
+ }
+
/* wait for vcn idle */
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
if (r)
@@ -940,12 +1349,74 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
+power_off:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
return 0;
}
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state)
+{
+ struct amdgpu_ring *ring;
+ uint32_t reg_data = 0;
+ int ret_code;
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ DRM_DEBUG("dpg pause state changed %d -> %d",
+ adev->vcn.pause_state.fw_based, new_state->fw_based);
+ reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* pause DPG */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+
+ /* wait for ACK */
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[1];
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+ 0x0, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.pause_state.fw_based = new_state->fw_based;
+ }
+
+ return 0;
+}
+
/**
* vcn_v2_5_dec_ring_get_rptr - get read pointer
*
@@ -988,6 +1459,10 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
+ lower_32_bits(ring->wptr) | 0x80000000);
+
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
@@ -1125,86 +1600,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-/**
- * vcn_v2_5_jpeg_ring_get_rptr - get read pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware read pointer
- */
-static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
-}
-
-/**
- * vcn_v2_5_jpeg_ring_get_wptr - get write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Returns the current hardware write pointer
- */
-static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- if (ring->use_doorbell)
- return adev->wb.wb[ring->wptr_offs];
- else
- return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
-}
-
-/**
- * vcn_v2_5_jpeg_ring_set_wptr - set write pointer
- *
- * @ring: amdgpu_ring pointer
- *
- * Commits the write pointer to the hardware
- */
-static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- if (ring->use_doorbell) {
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
- WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
- } else {
- WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
- }
-}
-
-static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
- .type = AMDGPU_RING_TYPE_VCN_JPEG,
- .align_mask = 0xf,
- .vmhub = AMDGPU_MMHUB_1,
- .get_rptr = vcn_v2_5_jpeg_ring_get_rptr,
- .get_wptr = vcn_v2_5_jpeg_ring_get_wptr,
- .set_wptr = vcn_v2_5_jpeg_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
- 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
- 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
- 8 + 16,
- .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
- .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
- .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
- .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
- .insert_nop = vcn_v2_0_jpeg_ring_nop,
- .insert_start = vcn_v2_0_jpeg_ring_insert_start,
- .insert_end = vcn_v2_0_jpeg_ring_insert_end,
- .pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
- .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
-};
-
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1233,19 +1628,6 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
- adev->vcn.inst[i].ring_jpeg.me = i;
- DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i);
- }
-}
-
static bool vcn_v2_5_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1283,6 +1665,9 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (enable) {
if (vcn_v2_5_is_idle(handle))
return -EBUSY;
@@ -1300,6 +1685,9 @@ static int vcn_v2_5_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if(state == adev->vcn.cur_state)
return 0;
@@ -1352,9 +1740,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
break;
- case VCN_2_0__SRCID__JPEG_DECODE:
- amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg);
- break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -1376,7 +1761,7 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 5cb7e231de5f..d9e331084ea0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -234,16 +234,9 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
- ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- if (adev->irq.ih.use_bus_addr) {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
- } else {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_FBPA_ENABLE, 1);
- }
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled);
-
if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
@@ -253,10 +246,19 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
}
- if ((adev->asic_type == CHIP_ARCTURUS
- && adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
- || adev->asic_type == CHIP_RENOIR)
+ if ((adev->asic_type == CHIP_ARCTURUS &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
+ adev->asic_type == CHIP_RENOIR) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ } else {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_FBPA_ENABLE, 1);
+ }
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
+ }
/* set the writeback address whether it's enabled or not */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f1b171e30774..78b35901643b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -689,40 +689,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
-{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
-
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
-}
-
-int smu7_asic_baco_reset(struct amdgpu_device *adev)
-{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
-
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
-
- dev_info(adev->dev, "GPU BACO reset\n");
-
- return 0;
-}
-
/**
* vi_asic_pci_config_reset - soft reset GPU
*
@@ -745,6 +711,21 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
return r;
}
+static bool vi_asic_supports_baco(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_TOPAZ:
+ return amdgpu_dpm_is_baco_supported(adev);
+ default:
+ return false;
+ }
+}
+
static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)
{
@@ -757,7 +738,7 @@ vi_asic_reset_method(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_TOPAZ:
- smu7_asic_get_baco_capability(adev, &baco_reset);
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
break;
default:
baco_reset = false;
@@ -786,7 +767,7 @@ static int vi_asic_reset(struct amdgpu_device *adev)
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
- r = smu7_asic_baco_reset(adev);
+ r = amdgpu_dpm_baco_reset(adev);
} else {
r = vi_asic_pci_config_reset(adev);
}
@@ -1119,6 +1100,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.get_pcie_usage = &vi_get_pcie_usage,
.need_reset_on_init = &vi_need_reset_on_init,
.get_pcie_replay_count = &vi_get_pcie_replay_count,
+ .supports_baco = &vi_asic_supports_baco,
};
#define CZ_REV_BRISTOL(rev) \
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 40d4174913a4..defb4aaf929a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -31,7 +31,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
-int smu7_asic_baco_reset(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 48155060a57c..61474627a32c 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -38,11 +38,9 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_mqd_manager_v9.o \
$(AMDKFD_PATH)/kfd_mqd_manager_v10.o \
$(AMDKFD_PATH)/kfd_kernel_queue.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_cik.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_v10.o \
$(AMDKFD_PATH)/kfd_packet_manager.o \
+ $(AMDKFD_PATH)/kfd_packet_manager_vi.o \
+ $(AMDKFD_PATH)/kfd_packet_manager_v9.o \
$(AMDKFD_PATH)/kfd_process_queue_manager.o \
$(AMDKFD_PATH)/kfd_device_queue_manager.o \
$(AMDKFD_PATH)/kfd_device_queue_manager_cik.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1544007af34a..3f0300e53727 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -42,6 +42,7 @@
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
+static int kfd_release(struct inode *, struct file *);
static int kfd_mmap(struct file *, struct vm_area_struct *);
static const char kfd_dev_name[] = "kfd";
@@ -51,6 +52,7 @@ static const struct file_operations kfd_fops = {
.unlocked_ioctl = kfd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = kfd_open,
+ .release = kfd_release,
.mmap = kfd_mmap,
};
@@ -124,8 +126,13 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
- if (kfd_is_locked())
+ if (kfd_is_locked()) {
+ kfd_unref_process(process);
return -EAGAIN;
+ }
+
+ /* filep now owns the reference returned by kfd_create_process */
+ filep->private_data = process;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
@@ -133,6 +140,16 @@ static int kfd_open(struct inode *inode, struct file *filep)
return 0;
}
+static int kfd_release(struct inode *inode, struct file *filep)
+{
+ struct kfd_process *process = filep->private_data;
+
+ if (process)
+ kfd_unref_process(process);
+
+ return 0;
+}
+
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
void *data)
{
@@ -258,6 +275,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
unsigned int queue_id;
struct kfd_process_device *pdd;
struct queue_properties q_properties;
+ uint32_t doorbell_offset_in_process = 0;
memset(&q_properties, 0, sizeof(struct queue_properties));
@@ -286,7 +304,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+ &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
@@ -296,14 +315,11 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
/* Return gpu_id as doorbell offset for mmap usage */
args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
- args->doorbell_offset <<= PAGE_SHIFT;
if (KFD_IS_SOC15(dev->device_info->asic_family))
- /* On SOC15 ASICs, doorbell allocation must be
- * per-device, and independent from the per-process
- * queue_id. Return the doorbell offset within the
- * doorbell aperture to user mode.
+ /* On SOC15 ASICs, include the doorbell offset within the
+ * process doorbell frame, which is 2 pages.
*/
- args->doorbell_offset |= q_properties.doorbell_off;
+ args->doorbell_offset |= doorbell_offset_in_process;
mutex_unlock(&p->mutex);
@@ -1312,10 +1328,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
/* MMIO is mapped through kfd device
* Generate a kfd mmap offset
*/
- if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
- args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id);
- args->mmap_offset <<= PAGE_SHIFT;
- }
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
+ args->mmap_offset = KFD_MMAP_TYPE_MMIO
+ | KFD_MMAP_GPU_ID(args->gpu_id);
return 0;
@@ -1803,9 +1818,14 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
- process = kfd_get_process(current);
- if (IS_ERR(process)) {
- dev_dbg(kfd_device, "no process\n");
+ /* Get the process struct from the filep. Only the process
+ * that opened /dev/kfd can use the file descriptor. Child
+ * processes need to create their own KFD device context.
+ */
+ process = filep->private_data;
+ if (process->lead_thread != current->group_leader) {
+ dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
+ retcode = -EBADF;
goto err_i1;
}
@@ -1899,20 +1919,19 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct kfd_process *process;
struct kfd_dev *dev = NULL;
- unsigned long vm_pgoff;
+ unsigned long mmap_offset;
unsigned int gpu_id;
process = kfd_get_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
- vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
- gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
+ mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
+ gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
if (gpu_id)
dev = kfd_device_by_id(gpu_id);
- switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
+ switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
case KFD_MMAP_TYPE_DOORBELL:
if (!dev)
return -ENODEV;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index d59f2cd056c6..27bcc5b472f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -72,11 +72,11 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
* The receive packet buff will be sitting on the Indirect Buffer
* and in the PQ we put the IB packet + sync packet(s).
*/
- status = kq->ops.acquire_packet_buffer(kq,
+ status = kq_acquire_packet_buffer(kq,
pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff);
if (status) {
- pr_err("acquire_packet_buffer failed\n");
+ pr_err("kq_acquire_packet_buffer failed\n");
return status;
}
@@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
if (status) {
pr_err("Failed to allocate GART memory\n");
- kq->ops.rollback_packet(kq);
+ kq_rollback_packet(kq);
return status;
}
@@ -151,7 +151,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
rm_packet->data_lo = QUEUESTATE__ACTIVE;
- kq->ops.submit_packet(kq);
+ kq_submit_packet(kq);
/* Wait till CP writes sync code: */
status = amdkfd_fence_wait_timeout(
@@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
properties.type = KFD_QUEUE_TYPE_DIQ;
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
- &properties, &qid);
+ &properties, &qid, NULL);
if (status) {
pr_err("Failed to create DIQ\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 15c523027285..511712c2e382 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -93,7 +93,7 @@ void kfd_debugfs_init(void)
kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
- debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+ debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
NULL, &kfd_debugfs_hang_hws_fops);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 4fa8834ce7cb..2a9e40131735 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -728,6 +728,9 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return 0;
+
+ kfd->dqm->ops.pre_reset(kfd->dqm);
+
kgd2kfd_suspend(kfd);
kfd_signal_reset_event(kfd);
@@ -742,7 +745,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
int kgd2kfd_post_reset(struct kfd_dev *kfd)
{
- int ret, count;
+ int ret;
if (!kfd->init_complete)
return 0;
@@ -750,7 +753,7 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
ret = kfd_resume(kfd);
if (ret)
return ret;
- count = atomic_dec_return(&kfd_locked);
+ atomic_dec(&kfd_locked);
atomic_set(&kfd->sram_ecc_flag, 0);
@@ -822,6 +825,21 @@ dqm_start_error:
return err;
}
+static inline void kfd_queue_work(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ int cpu, new_cpu;
+
+ cpu = new_cpu = smp_processor_id();
+ do {
+ new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+ if (cpu_to_node(new_cpu) == numa_node_id())
+ break;
+ } while (cpu != new_cpu);
+
+ queue_work_on(new_cpu, wq, work);
+}
+
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
@@ -844,7 +862,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(kfd,
is_patched ? patched_ihre : ih_ring_entry))
- queue_work(kfd->ih_wq, &kfd->interrupt_work);
+ kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 984c2f2b24b6..2870553a2ce0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -170,7 +170,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
}
q->properties.doorbell_off =
- kfd_doorbell_id_to_offset(dev, q->process,
+ kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
q->doorbell_id);
return 0;
@@ -930,11 +930,11 @@ static void uninitialize(struct device_queue_manager *dqm)
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqd_mgrs[i]);
mutex_destroy(&dqm->lock_hidden);
- kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
static int start_nocpsch(struct device_queue_manager *dqm)
{
+ pr_info("SW scheduler is used");
init_interrupts(dqm);
if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
@@ -947,12 +947,19 @@ static int start_nocpsch(struct device_queue_manager *dqm)
static int stop_nocpsch(struct device_queue_manager *dqm)
{
if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, false);
dqm->sched_running = false;
return 0;
}
+static void pre_reset(struct device_queue_manager *dqm)
+{
+ dqm_lock(dqm);
+ dqm->is_resetting = true;
+ dqm_unlock(dqm);
+}
+
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q)
{
@@ -1100,6 +1107,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
/* clear hang status when driver try to start the hw scheduler */
dqm->is_hws_hang = false;
+ dqm->is_resetting = false;
dqm->sched_running = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1107,20 +1115,24 @@ static int start_cpsch(struct device_queue_manager *dqm)
return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, false);
fail_packet_manager_init:
return retval;
}
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ bool hanging;
+
dqm_lock(dqm);
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ if (!dqm->is_hws_hang)
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ hanging = dqm->is_hws_hang || dqm->is_resetting;
dqm->sched_running = false;
dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, hanging);
return 0;
}
@@ -1352,8 +1364,17 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
queue_preemption_timeout_ms);
- if (retval)
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dqm->is_hws_hang = true;
+ /* It's possible we're detecting a HWS hang in the
+ * middle of a GPU reset. No need to schedule another
+ * reset in this case.
+ */
+ if (!dqm->is_resetting)
+ schedule_work(&dqm->hw_exception_work);
return retval;
+ }
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
@@ -1371,12 +1392,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
if (dqm->is_hws_hang)
return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param);
- if (retval) {
- pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
- dqm->is_hws_hang = true;
- schedule_work(&dqm->hw_exception_work);
+ if (retval)
return retval;
- }
return map_queues_cpsch(dqm);
}
@@ -1595,7 +1612,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
goto dqm_unlock;
}
- mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
+ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
if (!mqd_mgr->get_wave_state) {
r = -EINVAL;
@@ -1770,6 +1787,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
+ dqm->ops.pre_reset = pre_reset;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
@@ -1788,6 +1806,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
/* initialize dqm for no cp scheduling */
dqm->ops.start = start_nocpsch;
dqm->ops.stop = stop_nocpsch;
+ dqm->ops.pre_reset = pre_reset;
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index a8c37e6da027..871d3b628d2d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -104,6 +104,7 @@ struct device_queue_manager_ops {
int (*initialize)(struct device_queue_manager *dqm);
int (*start)(struct device_queue_manager *dqm);
int (*stop)(struct device_queue_manager *dqm);
+ void (*pre_reset)(struct device_queue_manager *dqm);
void (*uninitialize)(struct device_queue_manager *dqm);
int (*create_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
@@ -190,7 +191,6 @@ struct device_queue_manager {
/* the pasid mapping for each kfd vmid */
uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
- struct kfd_mem_obj *pipeline_mem;
uint64_t fence_gpu_addr;
unsigned int *fence_addr;
struct kfd_mem_obj *fence_mem;
@@ -199,6 +199,7 @@ struct device_queue_manager {
/* hw exception */
bool is_hws_hang;
+ bool is_resetting;
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index ebe79bf00145..8e0c00b9555e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -91,7 +91,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
- kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
+ kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32);
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
kfd_doorbell_process_slice(kfd));
@@ -103,8 +103,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
pr_debug("doorbell base == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("doorbell_id_offset == 0x%08lX\n",
- kfd->doorbell_id_offset);
+ pr_debug("doorbell_base_dw_offset == 0x%08lX\n",
+ kfd->doorbell_base_dw_offset);
pr_debug("doorbell_process_limit == 0x%08lX\n",
doorbell_process_limit);
@@ -185,7 +185,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
* Calculating the kernel doorbell offset using the first
* doorbell page.
*/
- *doorbell_off = kfd->doorbell_id_offset + inx;
+ *doorbell_off = kfd->doorbell_base_dw_offset + inx;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@@ -225,17 +225,17 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
}
}
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int doorbell_id)
{
/*
- * doorbell_id_offset accounts for doorbells taken by KGD.
+ * doorbell_base_dw_offset accounts for doorbells taken by KGD.
* index * kfd_doorbell_process_slice/sizeof(u32) adjusts to
* the process's doorbells. The offset returned is in dword
* units regardless of the ASIC-dependent doorbell size.
*/
- return kfd->doorbell_id_offset +
+ return kfd->doorbell_base_dw_offset +
process->doorbell_index
* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 908081c85de1..1f8365575b12 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -346,7 +346,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
ret = create_signal_event(devkfd, p, ev);
if (!ret) {
*event_page_offset = KFD_MMAP_TYPE_EVENTS;
- *event_page_offset <<= PAGE_SHIFT;
*event_slot_index = ev->event_id;
}
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 193e2835bd4d..8d871514671e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,6 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info;
unsigned int pasid_limit;
int err;
- struct kfd_topology_device *top_dev;
-
- top_dev = kfd_topology_device_by_id(kfd->id);
if (!kfd->device_info->needs_iommu_device)
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 11d244891393..bae706462f96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -34,7 +34,10 @@
#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
-static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+/* Initialize a kernel queue, including allocations of GART memory
+ * needed for the queue.
+ */
+static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size)
{
struct queue_properties prop;
@@ -87,9 +90,17 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr;
- retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
- if (!retval)
- goto err_eop_allocate_vidmem;
+ /* For CIK family asics, kq->eop_mem is not needed */
+ if (dev->device_info->asic_family > CHIP_MULLINS) {
+ retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+ if (retval != 0)
+ goto err_eop_allocate_vidmem;
+
+ kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+ kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
+
+ memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
+ }
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
&kq->rptr_mem);
@@ -183,9 +194,10 @@ err_get_kernel_doorbell:
}
-static void uninitialize(struct kernel_queue *kq)
+/* Uninitialize a kernel queue and free all its memory usages. */
+static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
{
- if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging)
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
@@ -200,14 +212,19 @@ static void uninitialize(struct kernel_queue *kq)
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
- kq->ops_asic_specific.uninitialize(kq);
+
+ /* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free()
+ * is able to handle NULL properly.
+ */
+ kfd_gtt_sa_free(kq->dev, kq->eop_mem);
+
kfd_gtt_sa_free(kq->dev, kq->pq);
kfd_release_kernel_doorbell(kq->dev,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
}
-static int acquire_packet_buffer(struct kernel_queue *kq,
+int kq_acquire_packet_buffer(struct kernel_queue *kq,
size_t packet_size_in_dwords, unsigned int **buffer_ptr)
{
size_t available_size;
@@ -268,7 +285,7 @@ err_no_space:
return -ENOMEM;
}
-static void submit_packet(struct kernel_queue *kq)
+void kq_submit_packet(struct kernel_queue *kq)
{
#ifdef DEBUG
int i;
@@ -280,11 +297,18 @@ static void submit_packet(struct kernel_queue *kq)
}
pr_debug("\n");
#endif
-
- kq->ops_asic_specific.submit_packet(kq);
+ if (kq->dev->device_info->doorbell_size == 8) {
+ *kq->wptr64_kernel = kq->pending_wptr64;
+ write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr64);
+ } else {
+ *kq->wptr_kernel = kq->pending_wptr;
+ write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr);
+ }
}
-static void rollback_packet(struct kernel_queue *kq)
+void kq_rollback_packet(struct kernel_queue *kq)
{
if (kq->dev->device_info->doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel;
@@ -304,60 +328,18 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
if (!kq)
return NULL;
- kq->ops.initialize = initialize;
- kq->ops.uninitialize = uninitialize;
- kq->ops.acquire_packet_buffer = acquire_packet_buffer;
- kq->ops.submit_packet = submit_packet;
- kq->ops.rollback_packet = rollback_packet;
-
- switch (dev->device_info->asic_family) {
- case CHIP_CARRIZO:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- kernel_queue_init_vi(&kq->ops_asic_specific);
- break;
-
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- kernel_queue_init_cik(&kq->ops_asic_specific);
- break;
-
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- kernel_queue_init_v9(&kq->ops_asic_specific);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- kernel_queue_init_v10(&kq->ops_asic_specific);
- break;
- default:
- WARN(1, "Unexpected ASIC family %u",
- dev->device_info->asic_family);
- goto out_free;
- }
-
- if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
+ if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
return kq;
pr_err("Failed to init kernel queue\n");
-out_free:
kfree(kq);
return NULL;
}
-void kernel_queue_uninit(struct kernel_queue *kq)
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
{
- kq->ops.uninitialize(kq);
+ kq_uninitialize(kq, hanging);
kfree(kq);
}
@@ -377,7 +359,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
return;
}
- retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
+ retval = kq_acquire_packet_buffer(kq, 5, &buffer);
if (unlikely(retval != 0)) {
pr_err(" Failed to acquire packet buffer\n");
pr_err("Kernel queue test failed\n");
@@ -385,7 +367,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
}
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
- kq->ops.submit_packet(kq);
+ kq_submit_packet(kq);
pr_err("Ending kernel queue test\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 365fc674fea4..f4cfe9f1871c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -29,45 +29,28 @@
#include "kfd_priv.h"
/**
- * struct kernel_queue_ops
- *
- * @initialize: Initialize a kernel queue, including allocations of GART memory
- * needed for the queue.
- *
- * @uninitialize: Uninitialize a kernel queue and free all its memory usages.
- *
- * @acquire_packet_buffer: Returns a pointer to the location in the kernel
+ * kq_acquire_packet_buffer: Returns a pointer to the location in the kernel
* queue ring buffer where the calling function can write its packet. It is
* Guaranteed that there is enough space for that packet. It also updates the
* pending write pointer to that location so subsequent calls to
* acquire_packet_buffer will get a correct write pointer
*
- * @submit_packet: Update the write pointer and doorbell of a kernel queue.
- *
- * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
- * queue are equal, which means the CP has read all the submitted packets.
+ * kq_submit_packet: Update the write pointer and doorbell of a kernel queue.
*
- * @rollback_packet: This routine is called if we failed to build an acquired
+ * kq_rollback_packet: This routine is called if we failed to build an acquired
* packet for some reason. It just overwrites the pending wptr with the current
* one
*
*/
-struct kernel_queue_ops {
- bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
- void (*uninitialize)(struct kernel_queue *kq);
- int (*acquire_packet_buffer)(struct kernel_queue *kq,
- size_t packet_size_in_dwords,
- unsigned int **buffer_ptr);
- void (*submit_packet)(struct kernel_queue *kq);
- void (*rollback_packet)(struct kernel_queue *kq);
-};
+int kq_acquire_packet_buffer(struct kernel_queue *kq,
+ size_t packet_size_in_dwords,
+ unsigned int **buffer_ptr);
+void kq_submit_packet(struct kernel_queue *kq);
+void kq_rollback_packet(struct kernel_queue *kq);
-struct kernel_queue {
- struct kernel_queue_ops ops;
- struct kernel_queue_ops ops_asic_specific;
+struct kernel_queue {
/* data */
struct kfd_dev *dev;
struct mqd_manager *mqd_mgr;
@@ -99,9 +82,4 @@ struct kernel_queue {
struct list_head list;
};
-void kernel_queue_init_cik(struct kernel_queue_ops *ops);
-void kernel_queue_init_vi(struct kernel_queue_ops *ops);
-void kernel_queue_init_v9(struct kernel_queue_ops *ops);
-void kernel_queue_init_v10(struct kernel_queue_ops *ops);
-
#endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c
deleted file mode 100644
index aed32ab7102e..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "kfd_kernel_queue.h"
-#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers_ai.h"
-#include "kfd_pm4_opcodes.h"
-#include "gc/gc_10_1_0_sh_mask.h"
-
-static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_v10(struct kernel_queue *kq);
-static void submit_packet_v10(struct kernel_queue *kq);
-
-void kernel_queue_init_v10(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_v10;
- ops->uninitialize = uninitialize_v10;
- ops->submit_packet = submit_packet_v10;
-}
-
-static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval != 0)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_v10(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_v10(struct kernel_queue *kq)
-{
- *kq->wptr64_kernel = kq->pending_wptr64;
- write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr64);
-}
-
-static int pm_map_process_v10(struct packet_manager *pm,
- uint32_t *buffer, struct qcm_process_device *qpd)
-{
- struct pm4_mes_map_process *packet;
- uint64_t vm_page_table_base_addr = qpd->page_table_base;
-
- packet = (struct pm4_mes_map_process *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_map_process));
-
- packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
- sizeof(struct pm4_mes_map_process));
- packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
- packet->bitfields2.process_quantum = 1;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
- packet->bitfields14.gds_size = qpd->gds_size;
- packet->bitfields14.num_gws = qpd->num_gws;
- packet->bitfields14.num_oac = qpd->num_oac;
- packet->bitfields14.sdma_enable = 1;
-
- packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
-
- packet->sh_mem_config = qpd->sh_mem_config;
- packet->sh_mem_bases = qpd->sh_mem_bases;
- if (qpd->tba_addr) {
- packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tba_hi = (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT) |
- upper_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
- packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
- }
-
- packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
- packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
-
- packet->vm_context_page_table_base_addr_lo32 =
- lower_32_bits(vm_page_table_base_addr);
- packet->vm_context_page_table_base_addr_hi32 =
- upper_32_bits(vm_page_table_base_addr);
-
- return 0;
-}
-
-static int pm_runlist_v10(struct packet_manager *pm, uint32_t *buffer,
- uint64_t ib, size_t ib_size_in_dwords, bool chain)
-{
- struct pm4_mes_runlist *packet;
-
- int concurrent_proc_cnt = 0;
- struct kfd_dev *kfd = pm->dqm->dev;
-
- /* Determine the number of processes to map together to HW:
- * it can not exceed the number of VMIDs available to the
- * scheduler, and it is determined by the smaller of the number
- * of processes in the runlist and kfd module parameter
- * hws_max_conc_proc.
- * Note: the arbitration between the number of VMIDs and
- * hws_max_conc_proc has been done in
- * kgd2kfd_device_init().
- */
- concurrent_proc_cnt = min(pm->dqm->processes_count,
- kfd->max_proc_per_quantum);
-
-
- packet = (struct pm4_mes_runlist *)buffer;
-
- memset(buffer, 0, sizeof(struct pm4_mes_runlist));
- packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
- sizeof(struct pm4_mes_runlist));
-
- packet->bitfields4.ib_size = ib_size_in_dwords;
- packet->bitfields4.chain = chain ? 1 : 0;
- packet->bitfields4.offload_polling = 0;
- packet->bitfields4.valid = 1;
- packet->bitfields4.process_cnt = concurrent_proc_cnt;
- packet->ordinal2 = lower_32_bits(ib);
- packet->ib_base_hi = upper_32_bits(ib);
-
- return 0;
-}
-
-static int pm_map_queues_v10(struct packet_manager *pm, uint32_t *buffer,
- struct queue *q, bool is_static)
-{
- struct pm4_mes_map_queues *packet;
- bool use_static = is_static;
-
- packet = (struct pm4_mes_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
-
- packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_mes_map_queues));
- packet->bitfields2.num_queues = 1;
- packet->bitfields2.queue_sel =
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
-
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__compute_vi;
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__normal_compute_vi;
-
- switch (q->properties.type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- if (use_static)
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__normal_latency_static_queue_vi;
- break;
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__debug_interface_queue_vi;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
- engine_sel__mes_map_queues__sdma0_vi;
- use_static = false; /* no static queues under SDMA */
- break;
- default:
- WARN(1, "queue type %d\n", q->properties.type);
- return -EINVAL;
- }
- packet->bitfields3.doorbell_offset =
- q->properties.doorbell_off;
-
- packet->mqd_addr_lo =
- lower_32_bits(q->gart_mqd_addr);
-
- packet->mqd_addr_hi =
- upper_32_bits(q->gart_mqd_addr);
-
- packet->wptr_addr_lo =
- lower_32_bits((uint64_t)q->properties.write_ptr);
-
- packet->wptr_addr_hi =
- upper_32_bits((uint64_t)q->properties.write_ptr);
-
- return 0;
-}
-
-static int pm_unmap_queues_v10(struct packet_manager *pm, uint32_t *buffer,
- enum kfd_queue_type type,
- enum kfd_unmap_queues_filter filter,
- uint32_t filter_param, bool reset,
- unsigned int sdma_engine)
-{
- struct pm4_mes_unmap_queues *packet;
-
- packet = (struct pm4_mes_unmap_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
-
- packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
- sizeof(struct pm4_mes_unmap_queues));
- switch (type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.engine_sel =
- engine_sel__mes_unmap_queues__compute;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel =
- engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
- break;
- default:
- WARN(1, "queue type %d\n", type);
- break;
- }
-
- if (reset)
- packet->bitfields2.action =
- action__mes_unmap_queues__reset_queues;
- else
- packet->bitfields2.action =
- action__mes_unmap_queues__preempt_queues;
-
- switch (filter) {
- case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
- packet->bitfields2.num_queues = 1;
- packet->bitfields3b.doorbell_offset0 = filter_param;
- break;
- case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
- packet->bitfields3a.pasid = filter_param;
- break;
- case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__unmap_all_queues;
- break;
- case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
- /* in this case, we do not preempt static queues */
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
- break;
- default:
- WARN(1, "filter %d\n", filter);
- break;
- }
-
- return 0;
-
-}
-
-static int pm_query_status_v10(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value)
-{
- struct pm4_mes_query_status *packet;
-
- packet = (struct pm4_mes_query_status *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_query_status));
-
-
- packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
- sizeof(struct pm4_mes_query_status));
-
- packet->bitfields2.context_id = 0;
- packet->bitfields2.interrupt_sel =
- interrupt_sel__mes_query_status__completion_status;
- packet->bitfields2.command =
- command__mes_query_status__fence_only_after_write_ack;
-
- packet->addr_hi = upper_32_bits((uint64_t)fence_address);
- packet->addr_lo = lower_32_bits((uint64_t)fence_address);
- packet->data_hi = upper_32_bits((uint64_t)fence_value);
- packet->data_lo = lower_32_bits((uint64_t)fence_value);
-
- return 0;
-}
-
-
-static int pm_release_mem_v10(uint64_t gpu_addr, uint32_t *buffer)
-{
- struct pm4_mec_release_mem *packet;
-
- WARN_ON(!buffer);
-
- packet = (struct pm4_mec_release_mem *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
-
- packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
- sizeof(struct pm4_mec_release_mem));
-
- packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
- packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
- packet->bitfields2.tcl1_action_ena = 1;
- packet->bitfields2.tc_action_ena = 1;
- packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
-
- packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
- packet->bitfields3.int_sel =
- int_sel__mec_release_mem__send_interrupt_after_write_confirm;
-
- packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
- packet->address_hi = upper_32_bits(gpu_addr);
-
- packet->data_lo = 0;
-
- return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
-}
-
-const struct packet_manager_funcs kfd_v10_pm_funcs = {
- .map_process = pm_map_process_v10,
- .runlist = pm_runlist_v10,
- .set_resources = pm_set_resources_vi,
- .map_queues = pm_map_queues_v10,
- .unmap_queues = pm_unmap_queues_v10,
- .query_status = pm_query_status_v10,
- .release_mem = pm_release_mem_v10,
- .map_process_size = sizeof(struct pm4_mes_map_process),
- .runlist_size = sizeof(struct pm4_mes_runlist),
- .set_resources_size = sizeof(struct pm4_mes_set_resources),
- .map_queues_size = sizeof(struct pm4_mes_map_queues),
- .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .query_status_size = sizeof(struct pm4_mes_query_status),
- .release_mem_size = sizeof(struct pm4_mec_release_mem)
-};
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 28876aceb14b..19f0fe547c57 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -374,7 +374,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -401,7 +400,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
@@ -442,7 +441,7 @@ struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
mqd = mqd_manager_init_cik(type, dev);
if (!mqd)
return NULL;
- if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+ if (type == KFD_MQD_TYPE_CP)
mqd->update_mqd = update_mqd_hawaii;
return mqd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 4a236b2c2354..d1d68a51bfb8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -66,6 +66,12 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se3);
}
+static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
+{
+ m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
+ m->cp_hqd_queue_priority = q->priority;
+}
+
static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
struct queue_properties *q)
{
@@ -109,9 +115,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
- m->cp_hqd_pipe_priority = 1;
- m->cp_hqd_queue_priority = 15;
-
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
@@ -150,6 +153,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return r;
}
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ queue_id, p->doorbell_off);
+}
+
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
@@ -208,11 +219,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, q);
+ set_priority(m, q);
- q->is_active = (q->queue_size > 0 &&
- q->queue_address != 0 &&
- q->queue_percent > 0 &&
- !q->is_evicted);
+ q->is_active = QUEUE_IS_ACTIVE(*q);
}
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
@@ -247,18 +256,22 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
{
struct v10_compute_mqd *m;
- /* Control stack is located one page after MQD. */
- void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
-
m = get_mqd(mqd);
+ /* Control stack is written backwards, while workgroup context data
+ * is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
+ * Current position is at m->cp_hqd_cntl_stack_offset and
+ * m->cp_hqd_wg_state_offset, respectively.
+ */
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
- if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
- return -EFAULT;
+ /* Control stack is not copied to user mode for GFXv10 because
+ * it's part of the context save area that is already
+ * accessible to user mode
+ */
return 0;
}
@@ -277,18 +290,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
-static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
-{
- struct v10_compute_mqd *m;
-
- update_mqd(mm, mqd, q);
-
- /* TODO: what's the point? update_mqd already does this. */
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
-}
-
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -340,11 +341,7 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
-
- q->is_active = (q->queue_size > 0 &&
- q->queue_address != 0 &&
- q->queue_percent > 0 &&
- !q->is_evicted);
+ q->is_active = QUEUE_IS_ACTIVE(*q);
}
/*
@@ -392,7 +389,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
@@ -400,7 +397,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
@@ -421,8 +417,8 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
@@ -432,11 +428,11 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index d3380c5bdbde..436b7f518979 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -92,7 +92,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
* instead of sub-allocation function.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+ mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
@@ -191,6 +191,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
wptr_shift, 0, mms);
}
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ queue_id, p->doorbell_off);
+}
+
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
@@ -302,7 +310,8 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
- *save_area_used_size = m->cp_hqd_wg_state_offset;
+ *save_area_used_size = m->cp_hqd_wg_state_offset -
+ m->cp_hqd_cntl_stack_size;
if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
return -EFAULT;
@@ -324,18 +333,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
-static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
-{
- struct v9_mqd *m;
-
- update_mqd(mm, mqd, q);
-
- /* TODO: what's the point? update_mqd already does this. */
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
-}
-
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -443,7 +440,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -461,8 +457,8 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v9_mqd);
@@ -471,11 +467,11 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v9_mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 7d144f56f421..a5e8ff1e5945 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -312,11 +312,7 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
- struct vi_mqd *m;
__update_mqd(mm, mqd, q, MTYPE_UC, 0);
-
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
@@ -425,7 +421,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -453,7 +448,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
@@ -494,7 +489,7 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
mqd = mqd_manager_init_vi(type, dev);
if (!mqd)
return NULL;
- if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+ if (type == KFD_MQD_TYPE_CP)
mqd->update_mqd = update_mqd_tonga;
return mqd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 83ef4b3dd2fb..dc406e6dee23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -241,12 +241,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_RAVEN:
case CHIP_RENOIR:
case CHIP_ARCTURUS:
- pm->pmf = &kfd_v9_pm_funcs;
- break;
case CHIP_NAVI10:
case CHIP_NAVI12:
case CHIP_NAVI14:
- pm->pmf = &kfd_v10_pm_funcs;
+ pm->pmf = &kfd_v9_pm_funcs;
break;
default:
WARN(1, "Unexpected ASIC family %u",
@@ -266,10 +264,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
return 0;
}
-void pm_uninit(struct packet_manager *pm)
+void pm_uninit(struct packet_manager *pm, bool hanging)
{
mutex_destroy(&pm->lock);
- kernel_queue_uninit(pm->priv_queue);
+ kernel_queue_uninit(pm->priv_queue, hanging);
}
int pm_send_set_resources(struct packet_manager *pm,
@@ -280,7 +278,7 @@ int pm_send_set_resources(struct packet_manager *pm,
size = pm->pmf->set_resources_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t),
(unsigned int **)&buffer);
if (!buffer) {
@@ -291,9 +289,9 @@ int pm_send_set_resources(struct packet_manager *pm,
retval = pm->pmf->set_resources(pm, buffer, res);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -318,7 +316,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
mutex_lock(&pm->lock);
- retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ retval = kq_acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
if (retval)
goto fail_acquire_packet_buffer;
@@ -328,14 +326,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
if (retval)
goto fail_create_runlist;
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return retval;
fail_create_runlist:
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
@@ -354,7 +352,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
size = pm->pmf->query_status_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -364,9 +362,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -383,7 +381,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
size = pm->pmf->unmap_queues_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -394,9 +392,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
reset, sdma_engine);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -441,7 +439,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
size = pm->pmf->query_status_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -449,7 +447,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
goto out;
}
memset(buffer, 0x55, size);
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
buffer[0], buffer[1], buffer[2], buffer[3],
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 9a4bafb2e175..2de01009f1b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -25,47 +25,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_ai.h"
#include "kfd_pm4_opcodes.h"
-
-static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_v9(struct kernel_queue *kq);
-static void submit_packet_v9(struct kernel_queue *kq);
-
-void kernel_queue_init_v9(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_v9;
- ops->uninitialize = uninitialize_v9;
- ops->submit_packet = submit_packet_v9;
-}
-
-static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_v9(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_v9(struct kernel_queue *kq)
-{
- *kq->wptr64_kernel = kq->pending_wptr64;
- write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr64);
-}
+#include "gc/gc_10_1_0_sh_mask.h"
static int pm_map_process_v9(struct packet_manager *pm,
uint32_t *buffer, struct qcm_process_device *qpd)
@@ -90,10 +50,17 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
- packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
- packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
+ if (qpd->tba_addr) {
+ packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
+ /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is
+ * not defined, so setting it won't do any harm.
+ */
+ packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8)
+ | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT;
+
+ packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
+ packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
+ }
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
@@ -341,35 +308,6 @@ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-
-static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
-{
- struct pm4_mec_release_mem *packet;
-
- packet = (struct pm4_mec_release_mem *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
-
- packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
- sizeof(struct pm4_mec_release_mem));
-
- packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
- packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
- packet->bitfields2.tcl1_action_ena = 1;
- packet->bitfields2.tc_action_ena = 1;
- packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
-
- packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
- packet->bitfields3.int_sel =
- int_sel__mec_release_mem__send_interrupt_after_write_confirm;
-
- packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
- packet->address_hi = upper_32_bits(gpu_addr);
-
- packet->data_lo = 0;
-
- return 0;
-}
-
const struct packet_manager_funcs kfd_v9_pm_funcs = {
.map_process = pm_map_process_v9,
.runlist = pm_runlist_v9,
@@ -377,12 +315,12 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
.query_status = pm_query_status_v9,
- .release_mem = pm_release_mem_v9,
+ .release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
.runlist_size = sizeof(struct pm4_mes_runlist),
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
.query_status_size = sizeof(struct pm4_mes_query_status),
- .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ .release_mem_size = 0,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index 2adaf40027eb..bed4d0ccb6b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -26,47 +26,6 @@
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
-static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_vi(struct kernel_queue *kq);
-static void submit_packet_vi(struct kernel_queue *kq);
-
-void kernel_queue_init_vi(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_vi;
- ops->uninitialize = uninitialize_vi;
- ops->submit_packet = submit_packet_vi;
-}
-
-static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval != 0)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_vi(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_vi(struct kernel_queue *kq)
-{
- *kq->wptr_kernel = kq->pending_wptr;
- write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr);
-}
-
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
{
union PM4_MES_TYPE_3_HEADER header;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 060a9e8b301e..6af1b5881f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -59,24 +59,21 @@
* NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
* defines are w.r.t to PAGE_SIZE
*/
-#define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT)
+#define KFD_MMAP_TYPE_SHIFT 62
#define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT)
-#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT)
+#define KFD_MMAP_GPU_ID_SHIFT 46
#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
<< KFD_MMAP_GPU_ID_SHIFT)
#define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
& KFD_MMAP_GPU_ID_MASK)
-#define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
+#define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
>> KFD_MMAP_GPU_ID_SHIFT)
-#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
-#define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
-
/*
* When working with cp scheduler we should assign the HIQ manually or via
* the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
@@ -238,9 +235,10 @@ struct kfd_dev {
* KFD. It is aligned for mapping
* into user mode
*/
- size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
- * to HW doorbell, GFX reserved some
- * at the start)
+ size_t doorbell_base_dw_offset; /* Offset from the start of the PCI
+ * doorbell BAR to the first KFD
+ * doorbell in dwords. GFX reserves
+ * the segment before this offset.
*/
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
* page used by kernel queue
@@ -510,8 +508,7 @@ struct queue {
* Please read the kfd_mqd_manager.h description.
*/
enum KFD_MQD_TYPE {
- KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
- KFD_MQD_TYPE_HIQ, /* for hiq */
+ KFD_MQD_TYPE_HIQ = 0, /* for hiq */
KFD_MQD_TYPE_CP, /* for cp queues and diq */
KFD_MQD_TYPE_SDMA, /* for sdma queues */
KFD_MQD_TYPE_DIQ, /* for diq */
@@ -818,7 +815,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
u32 read_kernel_doorbell(u32 __iomem *db);
void write_kernel_doorbell(void __iomem *db, u32 value);
void write_kernel_doorbell64(void __iomem *db, u64 value);
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int doorbell_id);
phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
@@ -886,7 +883,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
-void kernel_queue_uninit(struct kernel_queue *kq);
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
/* Process Queue Manager */
@@ -904,7 +901,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int *qid);
+ unsigned int *qid,
+ uint32_t *p_doorbell_offset_in_process);
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
@@ -972,10 +970,9 @@ struct packet_manager_funcs {
extern const struct packet_manager_funcs kfd_vi_pm_funcs;
extern const struct packet_manager_funcs kfd_v9_pm_funcs;
-extern const struct packet_manager_funcs kfd_v10_pm_funcs;
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
-void pm_uninit(struct packet_manager *pm);
+void pm_uninit(struct packet_manager *pm, bool hanging);
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res);
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
@@ -991,9 +988,6 @@ void pm_release_ib(struct packet_manager *pm);
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
- struct scheduling_resources *res);
-
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 10f9af5784f2..25b90f70aecd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -32,6 +32,7 @@
#include <linux/mman.h>
#include <linux/file.h>
#include "amdgpu_amdkfd.h"
+#include "amdgpu.h"
struct mm_struct;
@@ -324,6 +325,8 @@ struct kfd_process *kfd_create_process(struct file *filep)
(int)process->lead_thread->pid);
}
out:
+ if (!IS_ERR(process))
+ kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
return process;
@@ -560,8 +563,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
- offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
- << PAGE_SHIFT;
+ offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
MAP_SHARED, offset);
@@ -1151,16 +1153,17 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
void kfd_flush_tlb(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
- const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
/* Nothing to flush until a VMID is assigned, which
* only happens when the first queue is created.
*/
if (pdd->qpd.vmid)
- f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
+ amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
+ pdd->qpd.vmid);
} else {
- f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
+ pdd->process->pasid);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 2659d226c056..31fcd1b51f00 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -162,7 +162,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqm->queue_slot_bitmap = NULL;
}
-static int create_cp_queue(struct process_queue_manager *pqm,
+static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev, struct queue **q,
struct queue_properties *q_properties,
struct file *f, unsigned int qid)
@@ -192,7 +192,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int *qid)
+ unsigned int *qid,
+ uint32_t *p_doorbell_offset_in_process)
{
int retval;
struct kfd_process_device *pdd;
@@ -250,7 +251,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -271,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -303,12 +304,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- if (q)
+ if (q && p_doorbell_offset_in_process)
/* Return the doorbell offset within the doorbell page
* to the caller so it can be passed up to user mode
* (in bytes).
+ * There are always 1024 doorbells per process, so in case
+ * of 8-byte doorbells, there are two doorbell pages per
+ * process.
*/
- properties->doorbell_off =
+ *p_doorbell_offset_in_process =
(q->properties.doorbell_off * sizeof(uint32_t)) &
(kfd_doorbell_process_slice(dev) - 1);
@@ -370,7 +374,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
/* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm;
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
- kernel_queue_uninit(pqn->kq);
+ kernel_queue_uninit(pqn->kq, false);
}
if (pqn->q) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 69bd0628fdc6..203c823d65f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -486,6 +486,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_engines);
sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines",
dev->node_props.num_sdma_xgmi_engines);
+ sysfs_show_32bit_prop(buffer, "num_sdma_queues_per_engine",
+ dev->node_props.num_sdma_queues_per_engine);
+ sysfs_show_32bit_prop(buffer, "num_cp_queues",
+ dev->node_props.num_cp_queues);
if (dev->gpu) {
log_max_watch_addr =
@@ -1309,9 +1313,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
dev->node_props.num_sdma_xgmi_engines =
gpu->device_info->num_xgmi_sdma_engines;
+ dev->node_props.num_sdma_queues_per_engine =
+ gpu->device_info->num_sdma_queues_per_engine;
dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
+ dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 15843e0fc756..74e9b1682af8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -81,6 +81,8 @@ struct kfd_node_properties {
int32_t drm_render_minor;
uint32_t num_sdma_engines;
uint32_t num_sdma_xgmi_engines;
+ uint32_t num_sdma_queues_per_engine;
+ uint32_t num_cp_queues;
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index ae161fe86ebb..87858bc57e64 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,43 +6,16 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
-config DRM_AMD_DC_DCN1_0
+config DRM_AMD_DC_DCN
def_bool n
help
- RV family support for display engine
-
-config DRM_AMD_DC_DCN2_0
- bool "DCN 2.0 family"
- default y
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN1_0
- help
- Choose this option if you want to have
- Navi support for display engine
-
-config DRM_AMD_DC_DCN2_1
- bool "DCN 2.1 family"
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN2_0
- help
- Choose this option if you want to have
- Renoir support for display engine
-
-config DRM_AMD_DC_DSC_SUPPORT
- bool "DSC support"
- default y
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN1_0
- depends on DRM_AMD_DC_DCN2_0
- help
- Choose this option if you want to have
- Dynamic Stream Compression support
+ Raven, Navi and Renoir family support for display engine
config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 36b3d6a5d04d..2633de77de5e 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -34,6 +34,8 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc
+
ifdef CONFIG_DRM_AMD_DC_HDCP
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
endif
@@ -41,7 +43,7 @@ endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
-DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
+DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power dmub/src
ifdef CONFIG_DRM_AMD_DC_HDCP
DAL_LIBS += modules/hdcp
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 803e59d97411..9402374d2466 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -30,6 +30,10 @@
#include "dc.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
+#include "dmub/inc/dmub_srv.h"
+#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc/dc_dmub_srv.h"
#include "vid.h"
#include "amdgpu.h"
@@ -39,6 +43,7 @@
#include "amdgpu_dm.h"
#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "amdgpu_dm_hdcp.h"
+#include <drm/drm_hdcp.h>
#endif
#include "amdgpu_pm.h"
@@ -72,7 +77,7 @@
#include <drm/drm_audio_component.h>
#include <drm/drm_hdcp.h>
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
@@ -87,9 +92,18 @@
#include "modules/power/power_helpers.h"
#include "modules/inc/mod_info_packet.h"
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
/**
* DOC: overview
*
@@ -478,6 +492,70 @@ static void dm_crtc_high_irq(void *interrupt_params)
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+/**
+ * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
+ * @interrupt params - interrupt parameters
+ *
+ * Notify DRM's vblank event handler at VSTARTUP
+ *
+ * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
+ * * We are close enough to VUPDATE - the point of no return for hw
+ * * We are in the fixed portion of variable front porch when vrr is enabled
+ * * We are before VUPDATE, where double-buffered vrr registers are swapped
+ *
+ * It is therefore the correct place to signal vblank, send user flip events,
+ * and update VRR.
+ */
+static void dm_dcn_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dm_crtc_state *acrtc_state;
+ unsigned long flags;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+ if (!acrtc)
+ return;
+
+ acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
+
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+ if (acrtc_state->vrr_params.supported &&
+ acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ }
+
+ if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
+ if (acrtc->event) {
+ drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+ acrtc->event = NULL;
+ drm_crtc_vblank_put(&acrtc->base);
+ }
+ acrtc->pflip_status = AMDGPU_FLIP_NONE;
+ }
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+#endif
+
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -667,12 +745,126 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
}
}
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
+{
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ const struct firmware *dmub_fw = adev->dm.dmub_fw;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ struct abm *abm = adev->dm.dc->res_pool->abm;
+ struct dmub_srv_hw_params hw_params;
+ enum dmub_status status;
+ const unsigned char *fw_inst_const, *fw_bss_data;
+ uint32_t i, fw_inst_const_size, fw_bss_data_size;
+ bool has_hw_support;
+
+ if (!dmub_srv)
+ /* DMUB isn't supported on the ASIC. */
+ return 0;
+
+ if (!fb_info) {
+ DRM_ERROR("No framebuffer info for DMUB service.\n");
+ return -EINVAL;
+ }
+
+ if (!dmub_fw) {
+ /* Firmware required for DMUB support. */
+ DRM_ERROR("No firmware provided for DMUB.\n");
+ return -EINVAL;
+ }
+
+ status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ return -EINVAL;
+ }
+
+ if (!has_hw_support) {
+ DRM_INFO("DMUB unsupported on ASIC\n");
+ return 0;
+ }
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
+
+ fw_inst_const = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+
+ fw_bss_data = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ /* Copy firmware and bios info into FB memory. */
+ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+ fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
+ fw_bss_data_size);
+ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+ adev->bios_size);
+
+ /* Reset regions that need to be reset. */
+ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+
+ /* Initialize hardware. */
+ memset(&hw_params, 0, sizeof(hw_params));
+ hw_params.fb_base = adev->gmc.fb_start;
+ hw_params.fb_offset = adev->gmc.aper_base;
+
+ if (dmcu)
+ hw_params.psp_version = dmcu->psp_version;
+
+ for (i = 0; i < fb_info->num_fb; ++i)
+ hw_params.fb[i] = &fb_info->fb[i];
+
+ status = dmub_srv_hw_init(dmub_srv, &hw_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+
+ /* Init DMCU and ABM if available. */
+ if (dmcu && abm) {
+ dmcu->funcs->dmcu_init(dmcu);
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+ }
+
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv) {
+ DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ return -ENOMEM;
+ }
+
+ DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+
+ return 0;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dc_callback_init init_params;
#endif
+ int r;
adev->dm.ddev = adev->ddev;
adev->dm.adev = adev;
@@ -714,13 +906,16 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
- /*
- * TODO debug why this doesn't work on Raven
- */
- if (adev->flags & AMD_IS_APU &&
- adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type < CHIP_RAVEN)
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
init_data.flags.gpu_vm_support = true;
+ break;
+ default:
+ break;
+ }
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
@@ -733,9 +928,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.power_down_display_on_boot = true;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
init_data.soc_bounding_box = adev->dm.soc_bounding_box;
-#endif
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -749,6 +942,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
+ r = dm_dmub_hw_init(adev);
+ if (r) {
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ goto error;
+ }
+
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -821,6 +1020,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (adev->dm.dc)
dc_deinit_callbacks(adev->dm.dc);
#endif
+ if (adev->dm.dc->ctx->dmub_srv) {
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ adev->dm.dc->ctx->dmub_srv = NULL;
+ }
+
+ if (adev->dm.dmub_bo)
+ amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
@@ -932,9 +1140,160 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_read_reg(adev->dm.dc->ctx, address);
+}
+
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
+ uint32_t value)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_write_reg(adev->dm.dc->ctx, address, value);
+}
+
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
+{
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+ struct dmub_srv_fb_params fb_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ const char *fw_name_dmub;
+ enum dmub_asic dmub_asic;
+ enum dmub_status status;
+ int r;
+
+ switch (adev->asic_type) {
+ case CHIP_RENOIR:
+ dmub_asic = DMUB_ASIC_DCN21;
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ break;
+
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+
+ r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
+ if (r) {
+ DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+ return 0;
+ }
+
+ r = amdgpu_ucode_validate(adev->dm.dmub_fw);
+ if (r) {
+ DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
+ return 0;
+ }
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
+ return 0;
+ }
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+
+ adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+ dmub_srv = adev->dm.dmub_srv;
+
+ if (!dmub_srv) {
+ DRM_ERROR("Failed to allocate DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.user_ctx = adev;
+ create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+ create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+ create_params.asic = dmub_asic;
+
+ /* Create the DMUB service. */
+ status = dmub_srv_create(dmub_srv, &create_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error creating DMUB service: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Calculate the size of all the regions for the DMUB service. */
+ memset(&region_params, 0, sizeof(region_params));
+
+ region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+ region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+ region_params.vbios_size = adev->bios_size;
+ region_params.fw_bss_data =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a framebuffer based on the total size of all the regions.
+ * TODO: Move this into GART.
+ */
+ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+ if (r)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+ memset(&fb_params, 0, sizeof(fb_params));
+ fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+ fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+ fb_params.region_info = &region_info;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+ fb_info = adev->dm.dmub_fb_info;
+
+ if (!fb_info) {
+ DRM_ERROR(
+ "Failed to allocate framebuffer info for DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dm_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = dm_dmub_sw_init(adev);
+ if (r)
+ return r;
return load_dmcu_fw(adev);
}
@@ -943,6 +1302,19 @@ static int dm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ kfree(adev->dm.dmub_fb_info);
+ adev->dm.dmub_fb_info = NULL;
+
+ if (adev->dm.dmub_srv) {
+ dmub_srv_destroy(adev->dm.dmub_srv);
+ adev->dm.dmub_srv = NULL;
+ }
+
+ if (adev->dm.dmub_fw) {
+ release_firmware(adev->dm.dmub_fw);
+ adev->dm.dmub_fw = NULL;
+ }
+
if(adev->dm.fw_dmcu) {
release_firmware(adev->dm.fw_dmcu);
adev->dm.fw_dmcu = NULL;
@@ -1235,7 +1607,7 @@ static int dm_resume(void *handle)
struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
- int i;
+ int i, r;
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
@@ -1243,6 +1615,11 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
+ /* Before powering on DC we need to re-initialize DMUB. */
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -1868,7 +2245,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{
@@ -1914,35 +2291,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_crtc_high_irq, c_irq_params);
- }
-
- /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
- * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
- * to trigger at end of each vblank, regardless of state of the lock,
- * matching DCE behaviour.
- */
- for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
- i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
- i++) {
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
-
- if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
- return r;
- }
-
- int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
- int_params.irq_source =
- dc_interrupt_to_irq_source(dc, i, 0);
-
- c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
-
- c_irq_params->adev = adev;
- c_irq_params->irq_src = int_params.irq_source;
-
- amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_vupdate_high_irq, c_irq_params);
+ dm_dcn_crtc_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
@@ -2457,16 +2806,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case CHIP_RAVEN:
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case CHIP_NAVI12:
case CHIP_NAVI10:
case CHIP_NAVI14:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case CHIP_RENOIR:
-#endif
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -2612,14 +2957,13 @@ static int dm_early_init(void *handle)
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case CHIP_NAVI10:
case CHIP_NAVI12:
adev->mode_info.num_crtc = 6;
@@ -2631,14 +2975,11 @@ static int dm_early_init(void *handle)
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case CHIP_RENOIR:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
-#endif
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
@@ -2931,14 +3272,10 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
adev->asic_type == CHIP_VEGA20 ||
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
adev->asic_type == CHIP_NAVI10 ||
adev->asic_type == CHIP_NAVI14 ||
adev->asic_type == CHIP_NAVI12 ||
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
adev->asic_type == CHIP_RENOIR ||
-#endif
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
tiling_info->gfx9.num_pipes =
@@ -3256,12 +3593,26 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
- const struct drm_connector_state *state)
+ const struct drm_connector_state *state,
+ bool is_y420)
{
- uint8_t bpc = (uint8_t)connector->display_info.bpc;
+ uint8_t bpc;
- /* Assume 8 bpc by default if no bpc is specified. */
- bpc = bpc ? bpc : 8;
+ if (is_y420) {
+ bpc = 8;
+
+ /* Cap display bpc based on HDMI 2.0 HF-VSDB */
+ if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
+ bpc = 16;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
+ bpc = 12;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
+ bpc = 10;
+ } else {
+ bpc = (uint8_t)connector->display_info.bpc;
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
+ }
if (!state)
state = connector->state;
@@ -3427,7 +3778,8 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
timing_out->display_color_depth = convert_color_depth_from_display_info(
- connector, connector_state);
+ connector, connector_state,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
@@ -3645,10 +3997,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
- uint32_t link_bandwidth_kbps;
#endif
+ uint32_t link_bandwidth_kbps;
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
@@ -3723,16 +4075,19 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
fill_stream_properties_from_drm_display_mode(stream,
&mode, &aconnector->base, con_state, old_stream);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
stream->timing.flags.DSC = 0;
if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
- dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
&dsc_caps);
+#endif
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dsc_caps.is_dsc_supported)
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
&dsc_caps,
@@ -3741,8 +4096,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
&stream->timing,
&stream->timing.dsc_cfg))
stream->timing.flags.DSC = 1;
- }
#endif
+ }
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -3762,7 +4117,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct dmcu *dmcu = core_dc->res_pool->dmcu;
stream->psr_version = dmcu->dmcu_version.psr_version;
- mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+ mod_build_vsc_infopacket(stream,
+ &stream->vsc_infopacket,
+ &stream->use_vsc_sdp_for_colorimetry);
}
}
finish:
@@ -3853,6 +4210,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;
+ /* Do not set vupdate for DCN hardware */
+ if (adev->family > AMDGPU_FAMILY_AI)
+ return 0;
+
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -4096,7 +4457,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_hborder = 0;
state->underscan_vborder = 0;
state->base.max_requested_bpc = 8;
-
+ state->vcpi_slots = 0;
+ state->pbn = 0;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
state->abm_level = amdgpu_dm_abm_level;
@@ -4124,7 +4486,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
new_state->underscan_enable = state->underscan_enable;
new_state->underscan_hborder = state->underscan_hborder;
new_state->underscan_vborder = state->underscan_vborder;
-
+ new_state->vcpi_slots = state->vcpi_slots;
+ new_state->pbn = state->pbn;
return &new_state->base;
}
@@ -4521,10 +4884,69 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder)
}
+static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
+{
+ switch (display_color_depth) {
+ case COLOR_DEPTH_666:
+ return 6;
+ case COLOR_DEPTH_888:
+ return 8;
+ case COLOR_DEPTH_101010:
+ return 10;
+ case COLOR_DEPTH_121212:
+ return 12;
+ case COLOR_DEPTH_141414:
+ return 14;
+ case COLOR_DEPTH_161616:
+ return 16;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ enum dc_color_depth color_depth;
+ int clock, bpp = 0;
+ bool is_y420 = false;
+
+ if (!aconnector->port || !aconnector->dc_sink)
+ return 0;
+
+ mst_port = aconnector->port;
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+ return 0;
+
+ if (!state->duplicated) {
+ is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+ aconnector->force_yuv420_output;
+ color_depth = convert_color_depth_from_display_info(connector, conn_state,
+ is_y420);
+ bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+ clock = adjusted_mode->clock;
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+ }
+ dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
+ mst_mgr,
+ mst_port,
+ dm_new_connector_state->pbn,
+ 0);
+ if (dm_new_connector_state->vcpi_slots < 0) {
+ DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+ return dm_new_connector_state->vcpi_slots;
+ }
return 0;
}
@@ -4533,6 +4955,71 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
.atomic_check = dm_encoder_helper_atomic_check
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ struct dc_stream_state *stream = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_state *new_con_state, *old_con_state;
+ struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
+ int i, j, clock, bpp;
+ int vcpi, pbn_div, pbn = 0;
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->port)
+ continue;
+
+ if (!new_con_state || !new_con_state->crtc)
+ continue;
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ stream = dc_state->streams[j];
+ if (!stream)
+ continue;
+
+ if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
+ break;
+
+ stream = NULL;
+ }
+
+ if (!stream)
+ continue;
+
+ if (stream->timing.flags.DSC != 1) {
+ drm_dp_mst_atomic_enable_dsc(state,
+ aconnector->port,
+ dm_conn_state->pbn,
+ 0,
+ false);
+ continue;
+ }
+
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ bpp = stream->timing.dsc_cfg.bits_per_pixel;
+ clock = stream->timing.pix_clk_100hz / 10;
+ pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+ vcpi = drm_dp_mst_atomic_enable_dsc(state,
+ aconnector->port,
+ pbn, pbn_div,
+ true);
+ if (vcpi < 0)
+ return vcpi;
+
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = vcpi;
+ }
+ return 0;
+}
+#endif
+
static void dm_drm_plane_reset(struct drm_plane *plane)
{
struct dm_plane_state *amdgpu_state = NULL;
@@ -5195,9 +5682,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
- /* This defaults to the max in the range, but we want 8bpc. */
- aconnector->base.state->max_bpc = 8;
- aconnector->base.state->max_requested_bpc = 8;
+ /* This defaults to the max in the range, but we want 8bpc for non-edp. */
+ aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
+ aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
dc_is_dmcu_initialized(adev->dm.dc)) {
@@ -5216,7 +5703,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
&aconnector->base);
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN)
- drm_connector_attach_content_protection_property(&aconnector->base, false);
+ drm_connector_attach_content_protection_property(&aconnector->base, true);
#endif
}
}
@@ -5325,11 +5812,12 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
connector_type = to_drm_connector_type(link->connector_signal);
- res = drm_connector_init(
+ res = drm_connector_init_with_ddc(
dm->ddev,
&aconnector->base,
&amdgpu_dm_connector_funcs,
- connector_type);
+ connector_type,
+ &i2c->base);
if (res) {
DRM_ERROR("connector_init failed\n");
@@ -5467,6 +5955,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ if (old_state->hdcp_content_type != state->hdcp_content_type &&
+ state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ return true;
+ }
+
/* CP is being re enabled, ignore this */
if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
@@ -5495,17 +5989,6 @@ static bool is_content_protection_different(struct drm_connector_state *state,
return false;
}
-static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
- struct hdcp_workqueue *hdcp_w)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
- hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
- else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
- hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
-
-}
#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
@@ -6475,7 +6958,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
- update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+ new_con_state->hdcp_content_type,
+ new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
+ : false);
}
#endif
@@ -7265,7 +7752,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
- struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc *new_plane_crtc;
struct drm_plane *plane;
struct drm_crtc *crtc;
@@ -7311,7 +7798,6 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
uint64_t tiling_flags;
new_plane_crtc = new_plane_state->crtc;
- old_plane_crtc = old_plane_state->crtc;
new_dm_plane_state = to_dm_plane_state(new_plane_state);
old_dm_plane_state = to_dm_plane_state(old_plane_state);
@@ -7412,6 +7898,29 @@ cleanup:
return ret;
}
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ int i;
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->crtc != crtc)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->port || !aconnector->mst_port)
+ aconnector = NULL;
+ else
+ break;
+ }
+
+ if (!aconnector)
+ return 0;
+
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@@ -7464,6 +7973,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ if (adev->asic_type >= CHIP_NAVI10) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+ }
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
@@ -7635,6 +8154,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+ goto fail;
+
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+ if (ret)
+ goto fail;
+#endif
+
if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
ret = -EINVAL;
goto fail;
@@ -7663,6 +8191,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
dc_retain_state(old_dm_state->context);
}
}
+ /* Perform validation of MST topology in the state*/
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret)
+ goto fail;
/* Store the overall update type for use later in atomic check. */
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
@@ -7861,17 +8393,37 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
- struct dc_static_screen_events triggers = {0};
+ unsigned int vsync_rate_hz = 0;
+ struct dc_static_screen_params params = {0};
+ /* Calculate number of static frames before generating interrupt to
+ * enter PSR.
+ */
+ unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+ // Init fail safe of 2 frames static
+ unsigned int num_frames_static = 2;
DRM_DEBUG_DRIVER("Enabling psr...\n");
- triggers.cursor_update = true;
- triggers.overlay_update = true;
- triggers.surface_update = true;
+ vsync_rate_hz = div64_u64(div64_u64((
+ stream->timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ /* Round up
+ * Calculate number of frames such that at least 30 ms of time has
+ * passed.
+ */
+ if (vsync_rate_hz != 0)
+ num_frames_static = (30000 / frame_time_microsec) + 1;
+
+ params.triggers.cursor_update = true;
+ params.triggers.overlay_update = true;
+ params.triggers.surface_update = true;
+ params.num_frames = num_frames_static;
- dc_stream_set_static_screen_events(link->ctx->dc,
+ dc_stream_set_static_screen_params(link->ctx->dc,
&stream, 1,
- &triggers);
+ &params);
return dc_link_set_psr_allow_active(link, true, false);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 77c5166e6b08..7ea9acb0358d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -57,6 +57,8 @@ struct amdgpu_device;
struct drm_device;
struct amdgpu_dm_irq_handler_data;
struct dc;
+struct amdgpu_bo;
+struct dmub_srv;
struct common_irq_params {
struct amdgpu_device *adev;
@@ -122,6 +124,57 @@ struct amdgpu_display_manager {
struct dc *dc;
/**
+ * @dmub_srv:
+ *
+ * DMUB service, used for controlling the DMUB on hardware
+ * that supports it. The pointer to the dmub_srv will be
+ * NULL on hardware that does not support it.
+ */
+ struct dmub_srv *dmub_srv;
+
+ /**
+ * @dmub_fb_info:
+ *
+ * Framebuffer regions for the DMUB.
+ */
+ struct dmub_srv_fb_info *dmub_fb_info;
+
+ /**
+ * @dmub_fw:
+ *
+ * DMUB firmware, required on hardware that has DMUB support.
+ */
+ const struct firmware *dmub_fw;
+
+ /**
+ * @dmub_bo:
+ *
+ * Buffer object for the DMUB.
+ */
+ struct amdgpu_bo *dmub_bo;
+
+ /**
+ * @dmub_bo_gpu_addr:
+ *
+ * GPU virtual address for the DMUB buffer object.
+ */
+ u64 dmub_bo_gpu_addr;
+
+ /**
+ * @dmub_bo_cpu_addr:
+ *
+ * CPU address for the DMUB buffer object.
+ */
+ void *dmub_bo_cpu_addr;
+
+ /**
+ * @dmcub_fw_version:
+ *
+ * DMCUB firmware version.
+ */
+ uint32_t dmcub_fw_version;
+
+ /**
* @cgs_device:
*
* The Common Graphics Services device. It provides an interface for
@@ -241,7 +294,6 @@ struct amdgpu_display_manager {
const struct firmware *fw_dmcu;
uint32_t dmcu_fw_version;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/**
* @soc_bounding_box:
*
@@ -249,7 +301,6 @@ struct amdgpu_display_manager {
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
};
struct amdgpu_dm_connector {
@@ -279,6 +330,7 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port;
struct amdgpu_encoder *mst_encoder;
+ struct drm_dp_aux *dsc_aux;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -359,6 +411,8 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
uint8_t abm_level;
+ int vcpi_slots;
+ uint64_t pbn;
};
#define to_dm_connector_state(x)\
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index bdb37e611015..f81d3439ee8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -657,6 +657,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
dc_link_set_test_pattern(
link,
test_pattern,
+ DP_TEST_PATTERN_COLOR_SPACE_RGB,
&link_training_settings,
custom_pattern,
10);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 77181ddf6c8e..ae329335dfcc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -85,42 +85,54 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
msecs_to_jiffies(output.watchdog_timer_delay));
+ schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
}
-void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector)
+void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+ unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector,
+ uint8_t content_type,
+ bool enable_encryption)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+ struct mod_hdcp_display_query query;
mutex_lock(&hdcp_w->mutex);
hdcp_w->aconnector = aconnector;
- mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
-
- schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
-
- process_output(hdcp_w);
+ query.display = NULL;
+ mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
- mutex_unlock(&hdcp_w->mutex);
+ if (query.display != NULL) {
+ memcpy(display, query.display, sizeof(struct mod_hdcp_display));
+ mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
-}
+ hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
-void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index)
-{
- struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ if (enable_encryption) {
+ display->adjust.disable = 0;
+ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
+ hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+ else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1)
+ hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
- mutex_lock(&hdcp_w->mutex);
+ schedule_delayed_work(&hdcp_w->property_validate_dwork,
+ msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+ } else {
+ display->adjust.disable = 1;
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ }
- mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output);
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ }
- cancel_delayed_work(&hdcp_w->property_validate_dwork);
- hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
process_output(hdcp_w);
-
mutex_unlock(&hdcp_w->mutex);
-
}
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -190,10 +202,16 @@ static void event_property_update(struct work_struct *work)
}
}
- if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON)
- drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
- else
+ if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
+ if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 &&
+ hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 &&
+ hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ } else {
drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ }
mutex_unlock(&hdcp_work->mutex);
@@ -207,6 +225,9 @@ static void event_property_validate(struct work_struct *work)
struct mod_hdcp_display_query query;
struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+ if (!aconnector)
+ return;
+
mutex_lock(&hdcp_work->mutex);
query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
@@ -217,8 +238,6 @@ static void event_property_validate(struct work_struct *work)
schedule_work(&hdcp_work->property_update_work);
}
- schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
-
mutex_unlock(&hdcp_work->mutex);
}
@@ -294,8 +313,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dig_be = config->link_enc_inst;
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
- link->adjust.hdcp2.disable = 1;
+ display->adjust.disable = 1;
+ link->adjust.auth_delay = 2;
+ hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
}
struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index d3ba505d0696..6abde86bce4a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -54,9 +54,12 @@ struct hdcp_workqueue {
uint8_t max_link;
};
-void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index,
- struct amdgpu_dm_connector *aconnector);
-void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index);
+void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+ unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector,
+ uint8_t content_type,
+ bool enable_encryption);
+
void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_destroy(struct hdcp_workqueue *work);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 11e5784aa62a..069b7a6f5597 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -37,6 +37,7 @@
#include "dc.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
+#include "amdgpu_dm_mst_types.h"
#include "dm_helpers.h"
@@ -97,8 +98,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
(struct edid *) edid->raw_edid);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
- if (sad_count < 0)
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)
return result;
@@ -182,19 +181,22 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
bool enable)
{
struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
- int slots = 0;
bool ret;
- int clock;
- int bpp = 0;
- int pbn = 0;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ /* Accessing the connector state is required for vcpi_slots allocation
+ * and directly relies on behaviour in commit check
+ * that blocks before commit guaranteeing that the state
+ * is not gonna be swapped while still in use in commit tail */
if (!aconnector || !aconnector->mst_port)
return false;
+ dm_conn_state = to_dm_connector_state(aconnector->base.state);
+
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
@@ -203,42 +205,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_port = aconnector->port;
if (enable) {
- clock = stream->timing.pix_clk_100hz / 10;
-
- switch (stream->timing.display_color_depth) {
-
- case COLOR_DEPTH_666:
- bpp = 6;
- break;
- case COLOR_DEPTH_888:
- bpp = 8;
- break;
- case COLOR_DEPTH_101010:
- bpp = 10;
- break;
- case COLOR_DEPTH_121212:
- bpp = 12;
- break;
- case COLOR_DEPTH_141414:
- bpp = 14;
- break;
- case COLOR_DEPTH_161616:
- bpp = 16;
- break;
- default:
- ASSERT(bpp != 0);
- break;
- }
-
- bpp = bpp * 3;
-
- /* TODO need to know link rate */
-
- pbn = drm_dp_calc_pbn_mode(clock, bpp);
-
- slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
- ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
+ ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
+ dm_conn_state->pbn,
+ dm_conn_state->vcpi_slots);
if (!ret)
return false;
@@ -540,7 +510,6 @@ bool dm_helpers_submit_i2c(
return result;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
@@ -548,10 +517,25 @@ bool dm_helpers_dp_write_dsc_enable(
)
{
uint8_t enable_dsc = enable ? 1 : 0;
+ struct amdgpu_dm_connector *aconnector;
+
+ if (!stream)
+ return false;
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector->dsc_aux)
+ return false;
+
+ return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
+ }
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
- return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1);
+ return false;
}
-#endif
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
{
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 64445c4cc4c2..cbcf504f73a5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -111,17 +111,12 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
*/
static void dm_irq_work_func(struct work_struct *work)
{
- struct list_head *entry;
struct irq_list_head *irq_list_head =
container_of(work, struct irq_list_head, work);
struct list_head *handler_list = &irq_list_head->head;
struct amdgpu_dm_irq_handler_data *handler_data;
- list_for_each(entry, handler_list) {
- handler_data = list_entry(entry,
- struct amdgpu_dm_irq_handler_data,
- list);
-
+ list_for_each_entry(handler_data, handler_list, list) {
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
handler_data->irq_source);
@@ -528,19 +523,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
enum dc_irq_source irq_source)
{
struct amdgpu_dm_irq_handler_data *handler_data;
- struct list_head *entry;
unsigned long irq_table_flags;
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- list_for_each(
- entry,
- &adev->dm.irq_handler_list_high_tab[irq_source]) {
-
- handler_data = list_entry(entry,
- struct amdgpu_dm_irq_handler_data,
- list);
-
+ list_for_each_entry(handler_data,
+ &adev->dm.irq_handler_list_high_tab[irq_source],
+ list) {
/* Call a subcomponent which registered for immediate
* interrupt notification */
handler_data->handler(handler_data->handler_arg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 2bf8534c18fb..96b391e4b3e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -25,6 +25,7 @@
#include <linux/version.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
@@ -39,6 +40,12 @@
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
#endif
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#include "dc/dcn20/dcn20_resource.h"
+#endif
+
/* #define TRACE_DPCD */
#ifdef TRACE_DPCD
@@ -180,6 +187,30 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.early_unregister = amdgpu_dm_mst_connector_early_unregister,
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink *dc_sink = aconnector->dc_sink;
+ struct drm_dp_mst_port *port = aconnector->port;
+ u8 dsc_caps[16] = { 0 };
+
+ aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
+
+ if (!aconnector->dsc_aux)
+ return false;
+
+ if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
+ return false;
+
+ if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ dsc_caps, NULL,
+ &dc_sink->sink_dsc_caps.dsc_dec_caps))
+ return false;
+
+ return true;
+}
+#endif
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -222,10 +253,16 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!validate_dsc_caps_on_connector(aconnector))
+ memset(&aconnector->dc_sink->sink_dsc_caps,
+ 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+#endif
+ }
}
drm_connector_update_edid_property(
@@ -254,11 +291,43 @@ dm_dp_mst_detect(struct drm_connector *connector,
aconnector->port);
}
+static int dm_dp_mst_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+
+ mst_port = aconnector->port;
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!old_conn_state->crtc)
+ return 0;
+
+ if (new_conn_state->crtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ if (!new_crtc_state ||
+ !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ new_crtc_state->enable)
+ return 0;
+ }
+
+ return drm_dp_atomic_release_vcpi_slots(state,
+ mst_mgr,
+ mst_port);
+}
+
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
.get_modes = dm_dp_mst_get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
.atomic_best_encoder = dm_mst_atomic_best_encoder,
.detect_ctx = dm_dp_mst_detect,
+ .atomic_check = dm_dp_mst_atomic_check,
};
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -434,3 +503,384 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
aconnector->connector_id);
}
+int dm_mst_get_pbn_divider(struct dc_link *link)
+{
+ if (!link)
+ return 0;
+
+ return dc_link_bandwidth_kbps(link,
+ dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_params {
+ struct dc_crtc_timing *timing;
+ struct dc_sink *sink;
+ struct dc_dsc_bw_range bw_range;
+ bool compression_possible;
+ struct drm_dp_mst_port *port;
+};
+
+struct dsc_mst_fairness_vars {
+ int pbn;
+ bool dsc_enabled;
+ int bpp_x16;
+};
+
+static int kbps_to_peak_pbn(int kbps)
+{
+ u64 peak_kbps = kbps;
+
+ peak_kbps *= 1006;
+ peak_kbps = div_u64(peak_kbps, 1000);
+ return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+}
+
+static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
+ if (vars[i].dsc_enabled && dc_dsc_compute_config(
+ params[i].sink->ctx->dc->res_pool->dscs[0],
+ &params[i].sink->sink_dsc_caps.dsc_dec_caps,
+ params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
+ 0,
+ params[i].timing,
+ &params[i].timing->dsc_cfg)) {
+ params[i].timing->flags.DSC = 1;
+ params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
+ } else {
+ params[i].timing->flags.DSC = 0;
+ }
+ }
+}
+
+static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
+{
+ struct dc_dsc_config dsc_config;
+ u64 kbps;
+
+ kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ dc_dsc_compute_config(
+ param.sink->ctx->dc->res_pool->dscs[0],
+ &param.sink->sink_dsc_caps.dsc_dec_caps,
+ param.sink->ctx->dc->debug.dsc_min_slice_height_override,
+ (int) kbps, param.timing, &dsc_config);
+
+ return dsc_config.bits_per_pixel;
+}
+
+static void increase_dsc_bpp(struct drm_atomic_state *state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+ bool bpp_increased[MAX_PIPES];
+ int initial_slack[MAX_PIPES];
+ int min_initial_slack;
+ int next_index;
+ int remaining_to_increase = 0;
+ int pbn_per_timeslot;
+ int link_timeslots_used;
+ int fair_pbn_alloc;
+
+ for (i = 0; i < count; i++) {
+ if (vars[i].dsc_enabled) {
+ initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
+ bpp_increased[i] = false;
+ remaining_to_increase += 1;
+ } else {
+ initial_slack[i] = 0;
+ bpp_increased[i] = true;
+ }
+ }
+
+ pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
+ dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
+
+ while (remaining_to_increase) {
+ next_index = -1;
+ min_initial_slack = -1;
+ for (i = 0; i < count; i++) {
+ if (!bpp_increased[i]) {
+ if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
+ min_initial_slack = initial_slack[i];
+ next_index = i;
+ }
+ }
+ }
+
+ if (next_index == -1)
+ break;
+
+ link_timeslots_used = 0;
+
+ for (i = 0; i < count; i++)
+ link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
+
+ fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
+
+ if (initial_slack[next_index] > fair_pbn_alloc) {
+ vars[next_index].pbn += fair_pbn_alloc;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,\
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
+ } else {
+ vars[next_index].pbn -= fair_pbn_alloc;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+ } else {
+ vars[next_index].pbn += initial_slack[next_index];
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
+ } else {
+ vars[next_index].pbn -= initial_slack[next_index];
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+ }
+
+ bpp_increased[next_index] = true;
+ remaining_to_increase--;
+ }
+}
+
+static void try_disable_dsc(struct drm_atomic_state *state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+ bool tried[MAX_PIPES];
+ int kbps_increase[MAX_PIPES];
+ int max_kbps_increase;
+ int next_index;
+ int remaining_to_try = 0;
+
+ for (i = 0; i < count; i++) {
+ if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
+ kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
+ tried[i] = false;
+ remaining_to_try += 1;
+ } else {
+ kbps_increase[i] = 0;
+ tried[i] = true;
+ }
+ }
+
+ while (remaining_to_try) {
+ next_index = -1;
+ max_kbps_increase = -1;
+ for (i = 0; i < count; i++) {
+ if (!tried[i]) {
+ if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
+ max_kbps_increase = kbps_increase[i];
+ next_index = i;
+ }
+ }
+ }
+
+ if (next_index == -1)
+ break;
+
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ 0) < 0)
+ return;
+
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].dsc_enabled = false;
+ vars[next_index].bpp_x16 = 0;
+ } else {
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+
+ tried[next_index] = true;
+ remaining_to_try--;
+ }
+}
+
+static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dc_link *dc_link)
+{
+ int i;
+ struct dc_stream_state *stream;
+ struct dsc_mst_fairness_params params[MAX_PIPES];
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ struct amdgpu_dm_connector *aconnector;
+ int count = 0;
+
+ memset(params, 0, sizeof(params));
+
+ /* Set up params */
+ for (i = 0; i < dc_state->stream_count; i++) {
+ struct dc_dsc_policy dsc_policy = {0};
+
+ stream = dc_state->streams[i];
+
+ if (stream->link != dc_link)
+ continue;
+
+ stream->timing.flags.DSC = 0;
+
+ params[count].timing = &stream->timing;
+ params[count].sink = stream->sink;
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ params[count].port = aconnector->port;
+ params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+ dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
+ if (!dc_dsc_compute_bandwidth_range(
+ stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp,
+ dsc_policy.max_target_bpp,
+ &stream->sink->sink_dsc_caps.dsc_dec_caps,
+ &stream->timing, &params[count].bw_range))
+ params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ count++;
+ }
+ /* Try no compression */
+ for (i = 0; i < count; i++) {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i].dsc_enabled = false;
+ vars[i].bpp_x16 = 0;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ 0) < 0)
+ return false;
+ }
+ if (!drm_dp_mst_atomic_check(state)) {
+ set_dsc_configs_from_fairness_vars(params, vars, count);
+ return true;
+ }
+
+ /* Try max compression */
+ for (i = 0; i < count; i++) {
+ if (params[i].compression_possible) {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i].dsc_enabled = true;
+ vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return false;
+ } else {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i].dsc_enabled = false;
+ vars[i].bpp_x16 = 0;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ 0) < 0)
+ return false;
+ }
+ }
+ if (drm_dp_mst_atomic_check(state))
+ return false;
+
+ /* Optimize degree of compression */
+ increase_dsc_bpp(state, dc_link, params, vars, count);
+
+ try_disable_dsc(state, dc_link, params, vars, count);
+
+ set_dsc_configs_from_fairness_vars(params, vars, count);
+
+ return true;
+}
+
+bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ int i, j;
+ struct dc_stream_state *stream;
+ bool computed_streams[MAX_PIPES];
+ struct amdgpu_dm_connector *aconnector;
+
+ for (i = 0; i < dc_state->stream_count; i++)
+ computed_streams[i] = false;
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ stream = dc_state->streams[i];
+
+ if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
+ continue;
+
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector || !aconnector->dc_sink)
+ continue;
+
+ if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+ continue;
+
+ if (computed_streams[i])
+ continue;
+
+ mutex_lock(&aconnector->mst_mgr.lock);
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+ mutex_unlock(&aconnector->mst_mgr.lock);
+ return false;
+ }
+ mutex_unlock(&aconnector->mst_mgr.lock);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (dc_state->streams[j]->link == stream->link)
+ computed_streams[j] = true;
+ }
+ }
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ stream = dc_state->streams[i];
+
+ if (stream->timing.flags.DSC == 1)
+ dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
+ }
+
+ return true;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b40042..d6813ce67bbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -29,7 +29,14 @@
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
+int dm_mst_get_pbn_divider(struct dc_link *link);
+
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 778f186b3a05..a2e1a73f66b8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -892,7 +892,6 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
return PP_SMU_RESULT_FAIL;
}
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
enum pp_smu_status pp_rn_get_dpm_clock_table(
struct pp_smu *pp, struct dpm_clocks *clock_table)
{
@@ -974,7 +973,6 @@ enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
return PP_SMU_RESULT_OK;
}
-#endif
void dm_pp_get_funcs(
struct dc_context *ctx,
@@ -996,7 +994,6 @@ void dm_pp_get_funcs(
funcs->rv_funcs.set_hard_min_fclk_by_freq =
pp_rv_set_hard_min_fclk_by_freq;
break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
case DCN_VERSION_2_0:
funcs->ctx.ver = PP_SMU_VER_NV;
funcs->nv_funcs.pp_smu.dm = ctx;
@@ -1019,16 +1016,13 @@ void dm_pp_get_funcs(
funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
break;
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
case DCN_VERSION_2_1:
funcs->ctx.ver = PP_SMU_VER_RN;
funcs->rn_funcs.pp_smu.dm = ctx;
funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
break;
-#endif
default:
DRM_ERROR("smu version is not supported !\n");
break;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index a160512a2f04..6e3dddc73246 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -25,19 +25,10 @@
DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
DC_LIBS += dcn20
-endif
-
-
-ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DC_LIBS += dsc
-endif
-
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
DC_LIBS += dcn10 dml
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
DC_LIBS += dcn21
endif
@@ -59,7 +50,7 @@ include $(AMD_DC)
DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
DISPLAY_CORE += dc_vm_helper.o
endif
@@ -70,5 +61,6 @@ AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
-
-
+DC_DMUB += dc_dmub_srv.o
+AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index a50a76471107..7ad0cad0f4ef 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -25,7 +25,7 @@
# subcomponents.
BASICS = conversion.o fixpt31_32.o \
- log_helpers.o vector.o
+ log_helpers.o vector.o dc_common.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
new file mode 100644
index 000000000000..b2fc4f8e6482
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dc_common.h"
+#include "basics/conversion.h"
+
+bool is_rgb_cspace(enum dc_color_space output_color_space)
+{
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_SRGB_LIMITED:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_ADOBERGB:
+ return true;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
+ return false;
+ default:
+ /* Add a case to switch */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ return false;
+}
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+void build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
+ const struct dc_plane_state *plane_state)
+{
+ if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
+ && plane_state->input_csc_color_matrix.enable_adjustment
+ && plane_state->coeff_reduction_factor.value != 0) {
+ bias_and_scale->scale_blue = fixed_point_to_int_frac(
+ dc_fixpt_mul(plane_state->coeff_reduction_factor,
+ dc_fixpt_from_fraction(256, 255)),
+ 2,
+ 13);
+ bias_and_scale->scale_red = bias_and_scale->scale_blue;
+ bias_and_scale->scale_green = bias_and_scale->scale_blue;
+ } else {
+ bias_and_scale->scale_blue = 0x2000;
+ bias_and_scale->scale_red = 0x2000;
+ bias_and_scale->scale_green = 0x2000;
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
index 8bdfb3e5cd1c..7c0cbf47e8ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -14,23 +14,29 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
*/
-#ifndef __NVKM_SECBOOT_ACR_R367_H__
-#define __NVKM_SECBOOT_ACR_R367_H__
+#ifndef __DAL_DC_COMMON_H__
+#define __DAL_DC_COMMON_H__
+
+#include "core_types.h"
+
+bool is_rgb_cspace(enum dc_color_space output_color_space);
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-#include "acr_r352.h"
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
+void build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
+ const struct dc_plane_state *plane_state);
-struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
-int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
-int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 823843cd2613..008d4d11339d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -111,7 +111,7 @@ struct dc_bios *bios_parser_create(
return NULL;
}
-static void destruct(struct bios_parser *bp)
+static void bios_parser_destruct(struct bios_parser *bp)
{
kfree(bp->base.bios_local_image);
kfree(bp->base.integrated_info);
@@ -126,7 +126,7 @@ static void bios_parser_destroy(struct dc_bios **dcb)
return;
}
- destruct(bp);
+ bios_parser_destruct(bp);
kfree(bp);
*dcb = NULL;
@@ -2189,7 +2189,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
break;
default:
break;
- };
+ }
/* Unidentified device ID, return empty support mask. */
return 0;
@@ -2739,7 +2739,6 @@ static enum bp_result bios_get_board_layout_info(
struct board_layout_info *board_layout_info)
{
unsigned int i;
- struct bios_parser *bp;
enum bp_result record_result;
const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
@@ -2748,7 +2747,6 @@ static enum bp_result bios_get_board_layout_info(
0, 0
};
- bp = BP_FROM_DCB(dcb);
if (board_layout_info == NULL) {
DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
return BP_RESULT_BADINPUT;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 5c3fcaa47410..2f1c9584ac32 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -111,7 +111,7 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
-static void destruct(struct bios_parser *bp)
+static void bios_parser2_destruct(struct bios_parser *bp)
{
kfree(bp->base.bios_local_image);
kfree(bp->base.integrated_info);
@@ -126,7 +126,7 @@ static void firmware_parser_destroy(struct dc_bios **dcb)
return;
}
- destruct(bp);
+ bios_parser2_destruct(bp);
kfree(bp);
*dcb = NULL;
@@ -294,11 +294,21 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
struct atom_display_object_path_v2 *object;
struct atom_common_record_header *header;
struct atom_i2c_record *record;
+ struct atom_i2c_record dummy_record = {0};
struct bios_parser *bp = BP_FROM_DCB(dcb);
if (!info)
return BP_RESULT_BADINPUT;
+ if (id.type == OBJECT_TYPE_GENERIC) {
+ dummy_record.i2c_id = id.id;
+
+ if (get_gpio_i2c_info(bp, &dummy_record, info) == BP_RESULT_OK)
+ return BP_RESULT_OK;
+ else
+ return BP_RESULT_NORECORD;
+ }
+
object = get_bios_object(bp, id);
if (!object)
@@ -341,6 +351,7 @@ static enum bp_result get_gpio_i2c_info(
struct atom_gpio_pin_lut_v2_1 *header;
uint32_t count = 0;
unsigned int table_index = 0;
+ bool find_valid = false;
if (!info)
return BP_RESULT_BADINPUT;
@@ -368,33 +379,28 @@ static enum bp_result get_gpio_i2c_info(
- sizeof(struct atom_common_table_header))
/ sizeof(struct atom_gpio_pin_assignment);
- table_index = record->i2c_id & I2C_HW_LANE_MUX;
-
- if (count < table_index) {
- bool find_valid = false;
-
- for (table_index = 0; table_index < count; table_index++) {
- if (((record->i2c_id & I2C_HW_CAP) == (
- header->gpio_pin[table_index].gpio_id &
- I2C_HW_CAP)) &&
- ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
- (header->gpio_pin[table_index].gpio_id &
- I2C_HW_ENGINE_ID_MASK)) &&
- ((record->i2c_id & I2C_HW_LANE_MUX) ==
- (header->gpio_pin[table_index].gpio_id &
- I2C_HW_LANE_MUX))) {
- /* still valid */
- find_valid = true;
- break;
- }
+ for (table_index = 0; table_index < count; table_index++) {
+ if (((record->i2c_id & I2C_HW_CAP) == (
+ header->gpio_pin[table_index].gpio_id &
+ I2C_HW_CAP)) &&
+ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_ENGINE_ID_MASK)) &&
+ ((record->i2c_id & I2C_HW_LANE_MUX) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_LANE_MUX))) {
+ /* still valid */
+ find_valid = true;
+ break;
}
- /* If we don't find the entry that we are looking for then
- * we will return BP_Result_BadBiosTable.
- */
- if (find_valid == false)
- return BP_RESULT_BADBIOSTABLE;
}
+ /* If we don't find the entry that we are looking for then
+ * we will return BP_Result_BadBiosTable.
+ */
+ if (find_valid == false)
+ return BP_RESULT_BADBIOSTABLE;
+
/* get the GPIO_I2C_INFO */
info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false;
info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX;
@@ -828,6 +834,7 @@ static enum bp_result bios_parser_get_spread_spectrum_info(
case 1:
return get_ss_info_v4_1(bp, signal, index, ss_info);
case 2:
+ case 3:
return get_ss_info_v4_2(bp, signal, index, ss_info);
default:
break;
@@ -986,7 +993,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
break;
default:
break;
- };
+ }
/* Unidentified device ID, return empty support mask. */
return 0;
@@ -1205,6 +1212,8 @@ static enum bp_result get_firmware_info_v3_1(
bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
}
+ info->oem_i2c_present = false;
+
return BP_RESULT_OK;
}
@@ -1283,6 +1292,13 @@ static enum bp_result get_firmware_info_v3_2(
bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
}
+ if (firmware_info->board_i2c_feature_id == 0x2) {
+ info->oem_i2c_present = true;
+ info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id;
+ } else {
+ info->oem_i2c_present = false;
+ }
+
return BP_RESULT_OK;
}
@@ -1402,10 +1418,8 @@ static enum bp_result get_integrated_info_v11(
info->ma_channel_number = info_v11->umachannelnumber;
info->lvds_ss_percentage =
le16_to_cpu(info_v11->lvds_ss_percentage);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
info->dp_ss_control =
le16_to_cpu(info_v11->reserved1);
-#endif
info->lvds_sspread_rate_in_10hz =
le16_to_cpu(info_v11->lvds_ss_rate_10hz);
info->hdmi_ss_percentage =
@@ -1826,7 +1840,6 @@ static enum bp_result bios_get_board_layout_info(
struct board_layout_info *board_layout_info)
{
unsigned int i;
- struct bios_parser *bp;
enum bp_result record_result;
const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
@@ -1835,7 +1848,6 @@ static enum bp_result bios_get_board_layout_info(
0, 0
};
- bp = BP_FROM_DCB(dcb);
if (board_layout_info == NULL) {
DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
return BP_RESULT_BADINPUT;
@@ -1915,7 +1927,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_board_layout_info = bios_get_board_layout_info,
};
-static bool bios_parser_construct(
+static bool bios_parser2_construct(
struct bios_parser *bp,
struct bp_init_data *init,
enum dce_version dce_version)
@@ -2008,7 +2020,7 @@ struct dc_bios *firmware_parser_create(
if (!bp)
return NULL;
- if (bios_parser_construct(bp, init, dce_version))
+ if (bios_parser2_construct(bp, init, dce_version))
return &bp->base;
kfree(bp);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index bb2e8105e6ab..2cb7a4288cb7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -37,6 +37,8 @@
#include "bios_parser_types_internal2.h"
#include "amdgpu.h"
+#include "dc_dmub_srv.h"
+#include "dc.h"
#define DC_LOGGER \
bp->base.ctx->logger
@@ -103,6 +105,21 @@ static void init_dig_encoder_control(struct bios_parser *bp)
}
}
+static void encoder_control_dmcub(
+ struct dc_dmub_srv *dmcub,
+ struct dig_encoder_stream_setup_parameters_v1_5 *dig)
+{
+ struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
+
+ encoder_control.header.type = DMUB_CMD__VBIOS;
+ encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
+ encoder_control.encoder_control.dig.stream_param = *dig;
+
+ dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+}
+
static enum bp_result encoder_control_digx_v1_5(
struct bios_parser *bp,
struct bp_encoder_control *cntl)
@@ -155,6 +172,12 @@ static enum bp_result encoder_control_digx_v1_5(
break;
}
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ encoder_control_dmcub(bp->base.ctx->dmub_srv, &params);
+ return BP_RESULT_OK;
+ }
+
if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))
result = BP_RESULT_OK;
@@ -191,6 +214,22 @@ static void init_transmitter_control(struct bios_parser *bp)
}
}
+static void transmitter_control_dmcub(
+ struct dc_dmub_srv *dmcub,
+ struct dig_transmitter_control_parameters_v1_6 *dig)
+{
+ struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
+
+ transmitter_control.header.type = DMUB_CMD__VBIOS;
+ transmitter_control.header.sub_type =
+ DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
+ transmitter_control.transmitter_control.dig = *dig;
+
+ dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+}
+
static enum bp_result transmitter_control_v1_6(
struct bios_parser *bp,
struct bp_transmitter_control *cntl)
@@ -222,6 +261,11 @@ static enum bp_result transmitter_control_v1_6(
__func__, ps.param.symclk_10khz);
}
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param);
+ return BP_RESULT_OK;
+ }
/*color_depth not used any more, driver has deep color factor in the Phyclk*/
if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps))
@@ -255,7 +299,20 @@ static void init_set_pixel_clock(struct bios_parser *bp)
}
}
+static void set_pixel_clock_dmcub(
+ struct dc_dmub_srv *dmcub,
+ struct set_pixel_clock_parameter_v1_7 *clk)
+{
+ struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
+
+ pixel_clock.header.type = DMUB_CMD__VBIOS;
+ pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
+ pixel_clock.pixel_clock.clk = *clk;
+ dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+}
static enum bp_result set_pixel_clock_v7(
struct bios_parser *bp,
@@ -331,6 +388,12 @@ static enum bp_result set_pixel_clock_v7(
if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk);
+ return BP_RESULT_OK;
+ }
+
if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))
result = BP_RESULT_OK;
}
@@ -585,6 +648,21 @@ static void init_enable_disp_power_gating(
}
}
+static void enable_disp_power_gating_dmcub(
+ struct dc_dmub_srv *dmcub,
+ struct enable_disp_power_gating_parameters_v2_1 *pwr)
+{
+ struct dmub_rb_cmd_enable_disp_power_gating power_gating;
+
+ power_gating.header.type = DMUB_CMD__VBIOS;
+ power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
+ power_gating.power_gating.pwr = *pwr;
+
+ dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+}
+
static enum bp_result enable_disp_power_gating_v2_1(
struct bios_parser *bp,
enum controller_id crtc_id,
@@ -604,6 +682,13 @@ static enum bp_result enable_disp_power_gating_v2_1(
ps.param.enable =
bp->cmd_helper->disp_power_gating_action_to_atom(action);
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv,
+ &ps.param);
+ return BP_RESULT_OK;
+ }
+
if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))
result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index db153ddf0fee..7388c987c595 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -55,23 +55,19 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_22:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case DCN_VERSION_2_0:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case DCN_VERSION_2_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#endif
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 26c6d735cdc7..4674aca8f206 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -1,5 +1,6 @@
#
# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2019 Raptor Engineering, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -24,7 +25,13 @@
# It calculates Bandwidth and Watermarks values for HW programming
#
+ifdef CONFIG_X86
calcs_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+calcs_ccflags := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -32,6 +39,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -40,6 +48,7 @@ calcs_ccflags += -mpreferred-stack-boundary=4
else
calcs_ccflags += -msse2
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags)
@@ -47,7 +56,7 @@ CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautologi
BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index a1d49256fab7..5d081c42e81b 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -154,14 +154,14 @@ static void calculate_bandwidth(
- if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
- else {
- d0_underlay_enable = 1;
- }
- if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
- else {
- d1_underlay_enable = 1;
- }
+ if (data->d0_underlay_mode == bw_def_none)
+ d0_underlay_enable = false;
+ else
+ d0_underlay_enable = true;
+ if (data->d1_underlay_mode == bw_def_none)
+ d1_underlay_enable = false;
+ else
+ d1_underlay_enable = true;
data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
switch (data->underlay_surface_type) {
case bw_def_420:
@@ -286,8 +286,8 @@ static void calculate_bandwidth(
data->cursor_width_pixels[2] = bw_int_to_fixed(0);
data->cursor_width_pixels[3] = bw_int_to_fixed(0);
/* graphics surface parameters from spreadsheet*/
- fbc_enabled = 0;
- lpt_enabled = 0;
+ fbc_enabled = false;
+ lpt_enabled = false;
for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
if (i < data->number_of_displays + 4) {
if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
@@ -338,9 +338,9 @@ static void calculate_bandwidth(
data->access_one_channel_only[i] = 0;
}
if (data->fbc_en[i] == 1) {
- fbc_enabled = 1;
+ fbc_enabled = true;
if (data->lpt_en[i] == 1) {
- lpt_enabled = 1;
+ lpt_enabled = true;
}
}
data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 9b2cb57bf2ba..a27d84ca15a5 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1,5 +1,6 @@
/*
* Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -53,13 +54,9 @@
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/* Defaults from spreadsheet rev#247.
* RV2 delta: dram_clock_change_latency, max_num_dpp
*/
-#else
-/* Defaults from spreadsheet rev#247 */
-#endif
const struct dcn_soc_bounding_box dcn10_soc_defaults = {
/* latencies */
.sr_exit_time = 17, /*us*/
@@ -626,7 +623,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
{
bool updated = false;
- kernel_fpu_begin();
+ DC_FP_START();
if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
&& dc->debug.sr_exit_time_ns) {
updated = true;
@@ -662,7 +659,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
dc->dcn_soc->dram_clock_change_latency =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
- kernel_fpu_end();
+ DC_FP_END();
return updated;
}
@@ -708,8 +705,8 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
{
- /* for dali, the highest voltage level we want is 0 */
- if (ASICREV_IS_DALI(hw_internal_rev))
+ /* for dali & pollock, the highest voltage level we want is 0 */
+ if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
return 0;
/* we are ok with all levels */
@@ -742,7 +739,7 @@ bool dcn_validate_bandwidth(
dcn_bw_sync_calcs_and_dml(dc);
memset(v, 0, sizeof(*v));
- kernel_fpu_begin();
+ DC_FP_START();
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
@@ -1275,7 +1272,7 @@ bool dcn_validate_bandwidth(
bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
- kernel_fpu_end();
+ DC_FP_END();
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
@@ -1443,7 +1440,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
- kernel_fpu_begin();
+ DC_FP_START();
if (res)
res = verify_clock_values(&fclks);
@@ -1463,12 +1460,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
} else
BREAK_TO_DEBUGGER();
- kernel_fpu_end();
+ DC_FP_END();
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
- kernel_fpu_begin();
+ DC_FP_START();
if (res)
res = verify_clock_values(&dcfclks);
@@ -1481,7 +1478,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
} else
BREAK_TO_DEBUGGER();
- kernel_fpu_end();
+ DC_FP_END();
}
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
@@ -1496,11 +1493,11 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- kernel_fpu_begin();
+ DC_FP_START();
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
socclk_khz = dc->dcn_soc->socclk * 1000;
- kernel_fpu_end();
+ DC_FP_END();
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
@@ -1551,7 +1548,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
{
- kernel_fpu_begin();
+ DC_FP_START();
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
"sr_enter_plus_exit_time: %f ns\n"
"urgent_latency: %f ns\n"
@@ -1740,5 +1737,5 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
- kernel_fpu_end();
+ DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index b864869cc7e3..3cd283195091 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -63,7 +63,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o
AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120)
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
###############################################################################
# DCN10
###############################################################################
@@ -72,9 +72,7 @@ CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o
AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
###############################################################################
# DCN20
###############################################################################
@@ -83,9 +81,7 @@ CLK_MGR_DCN20 = dcn20_clk_mgr.o
AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
###############################################################################
# DCN21
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 8828dd9c3783..a78e5c74c79c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -37,9 +37,7 @@
#include "dcn10/rv1_clk_mgr.h"
#include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#include "dcn21/rn_clk_mgr.h"
-#endif
int clk_mgr_helper_get_active_display_cnt(
@@ -134,14 +132,19 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dce120_clk_mgr_construct(ctx, clk_mgr);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
+ ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
+ /* TEMP: this check has to come before ASICREV_IS_RENOIR */
+ /* which also incorrectly returns true for Dali/Pollock*/
+ rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
+ break;
+ }
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
}
-#endif /* DCN2_1 */
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break;
@@ -152,13 +155,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
break;
-#endif /* Family RV */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case FAMILY_NV:
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
-#endif /* Family NV */
+#endif /* Family RV and NV*/
default:
ASSERT(0); /* Unknown Asic */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index a6c46e903ff9..d031bd3d3072 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -72,8 +72,8 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
- struct dc *core_dc = clk_mgr_base->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
@@ -110,7 +110,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
bp->funcs->set_dce_clock(bp, &dce_clk_params);
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
@@ -126,8 +126,8 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
{
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
- struct dc *core_dc = clk_mgr->base.ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = clk_mgr->base.ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
@@ -152,7 +152,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 1897e91c8ccb..97b7f32294fd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -88,8 +88,8 @@ int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
- struct dc *core_dc = clk_mgr->base.ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = clk_mgr->base.ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
@@ -100,7 +100,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
/* Actual dispclk set is returned in the parameter register */
actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 25d7b7c6681c..495f01e9f2ca 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -27,6 +27,7 @@
#include "clk_mgr_internal.h"
#include "dce100/dce_clk_mgr.h"
+#include "dcn20_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -100,13 +101,13 @@ uint32_t dentist_get_did_from_divider(int divider)
}
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
- struct dc_state *context)
+ struct dc_state *context, bool safe_to_lower)
{
int i;
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
+ int dpp_inst, dppclk_khz, prev_dppclk_khz;
/* Loop index will match dpp->inst if resource exists,
* and we want to avoid dependency on dpp object
@@ -114,8 +115,12 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- clk_mgr->dccg->funcs->update_dpp_dto(
- clk_mgr->dccg, dpp_inst, dppclk_khz);
+ prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz) {
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ }
}
}
@@ -161,6 +166,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
dc->debug.force_clock_mode & 0x1) {
//this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.
force_reset = true;
+
+ dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
+
//force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level.
}
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
@@ -240,7 +248,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr);
} else {
// if clock is being raised, increase refclk before lowering DTO
@@ -248,7 +256,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
dcn20_update_clocks_update_dentist(clk_mgr);
// always update dtos unless clock is lowered and not safe to lower
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
@@ -339,6 +347,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
}
}
+
+void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t dispclk_wdivider;
+ uint32_t dppclk_wdivider;
+ int disp_divider;
+ int dpp_divider;
+
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider);
+
+ disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+ dpp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+
+ if (disp_divider && dpp_divider) {
+ /* Calculate the current DFS clock, in kHz.*/
+ clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
+
+ clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / dpp_divider;
+ }
+
+}
+
void dcn2_get_clock(struct clk_mgr *clk_mgr,
struct dc_state *context,
enum dc_clock_type clock_type,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index c9fd824f3c23..0b9c045b0c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower);
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
- struct dc_state *context);
+ struct dc_state *context, bool safe_to_lower);
void dcn2_init_clocks(struct clk_mgr *clk_mgr);
@@ -51,4 +51,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
struct dc_clock_config *clock_cfg);
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
+
+void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base);
+
+
#endif //__DCN20_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 35c55e54eac0..7ae4c06232dd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -59,14 +59,16 @@ int rn_get_active_display_cnt_wa(
struct dc_state *context)
{
int i, display_count;
- bool hdmi_present = false;
+ bool tmds_present = false;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
- if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
- hdmi_present = true;
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ tmds_present = true;
}
for (i = 0; i < dc->link_count; i++) {
@@ -85,7 +87,7 @@ int rn_get_active_display_cnt_wa(
}
/* WA for hang on HDMI after display off back back on*/
- if (display_count == 0 && hdmi_present)
+ if (display_count == 0 && tmds_present)
display_count = 1;
return display_count;
@@ -164,16 +166,16 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (dpp_clock_lowered) {
- // if clock is being lowered, increase DTO before lowering refclk
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ // increase per DPP DTO before lowering global dppclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
} else {
- // if clock is being raised, increase refclk before lowering DTO
+ // increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
if (update_dispclk &&
@@ -409,7 +411,7 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
continue;
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
- ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
+ ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
/* We will not select WM based on dcfclk, so leave it as unconstrained */
ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
@@ -578,33 +580,33 @@ struct wm_table lpddr4_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 23.84,
- .sr_exit_time_us = 12.5,
- .sr_enter_plus_exit_time_us = 17.0,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 5.32,
+ .sr_enter_plus_exit_time_us = 6.38,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 23.84,
- .sr_exit_time_us = 12.5,
- .sr_enter_plus_exit_time_us = 17.0,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.82,
+ .sr_enter_plus_exit_time_us = 11.196,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 23.84,
- .sr_exit_time_us = 12.5,
- .sr_enter_plus_exit_time_us = 17.0,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.89,
+ .sr_enter_plus_exit_time_us = 11.24,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 23.84,
- .sr_exit_time_us = 12.5,
- .sr_enter_plus_exit_time_us = 17.0,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.748,
+ .sr_enter_plus_exit_time_us = 11.102,
.valid = true,
},
}
@@ -691,7 +693,6 @@ void rn_clk_mgr_construct(
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dpm_clocks clock_table = { 0 };
- struct clk_state_registers_and_bypass s = { 0 };
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
@@ -711,7 +712,6 @@ void rn_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
clk_mgr->base.dentist_vco_freq_khz = 3600000;
- clk_mgr->base.dprefclk_khz = 600000;
} else {
struct clk_log_info log_info = {0};
@@ -722,24 +722,16 @@ void rn_clk_mgr_construct(
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3600000;
- rn_dump_clk_registers(&s, &clk_mgr->base, &log_info);
- /* Convert dprefclk units from MHz to KHz */
- /* Value already divided by 10, some resolution lost */
- clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
-
- /* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dprefclk_khz == 0) {
- ASSERT(clk_mgr->base.dprefclk_khz == 600000);
- clk_mgr->base.dprefclk_khz = 600000;
- }
-
if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
rn_bw_params.wm_table = lpddr4_wm_table;
} else {
rn_bw_params.wm_table = ddr4_wm_table;
}
+ /* Saved clocks configured at boot for debug purposes */
+ rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
}
+ clk_mgr->base.dprefclk_khz = 600000;
dce_clock_read_ss_info(clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index cb7c0e8b7e1b..6878aedf1d3e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -82,8 +82,8 @@ int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
- struct dc *core_dc = clk_mgr->base.ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = clk_mgr->base.ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
@@ -91,7 +91,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
VBIOSSMC_MSG_SetDispclkFreq,
requested_dispclk_khz / 1000);
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 32f31bf91915..6c797fac189d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -58,21 +58,21 @@
#include "hubp.h"
#include "dc_link_dp.h"
+#include "dc_dmub_srv.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#include "vm_helper.h"
-#endif
#include "dce/dce_i2c.h"
+#define CTX \
+ dc->ctx
+
#define DC_LOGGER \
dc->ctx->logger
-const static char DC_BUILD_ID[] = "production-build";
+static const char DC_BUILD_ID[] = "production-build";
/**
* DOC: Overview
@@ -287,7 +287,6 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream && pipe->stream_res.tg) {
- pipe->stream->adjust = *adjust;
dc->hwss.set_drr(&pipe,
1,
adjust->v_total_min,
@@ -511,10 +510,10 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
return ret;
}
-void dc_stream_set_static_screen_events(struct dc *dc,
+void dc_stream_set_static_screen_params(struct dc *dc,
struct dc_stream_state **streams,
int num_streams,
- const struct dc_static_screen_events *events)
+ const struct dc_static_screen_params *params)
{
int i = 0;
int j = 0;
@@ -533,10 +532,10 @@ void dc_stream_set_static_screen_events(struct dc *dc,
}
}
- dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
}
-static void destruct(struct dc *dc)
+static void dc_destruct(struct dc *dc)
{
if (dc->current_state) {
dc_release_state(dc->current_state);
@@ -569,7 +568,7 @@ static void destruct(struct dc *dc)
kfree(dc->bw_dceip);
dc->bw_dceip = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
kfree(dc->dcn_soc);
dc->dcn_soc = NULL;
@@ -577,28 +576,58 @@ static void destruct(struct dc *dc)
dc->dcn_ip = NULL;
#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
kfree(dc->vm_helper);
dc->vm_helper = NULL;
-#endif
}
-static bool construct(struct dc *dc,
+static bool dc_construct_ctx(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dc_context *dc_ctx;
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+ dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+ if (!dc_ctx)
+ return false;
+
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+ dc_ctx->dc_sink_id_count = 0;
+ dc_ctx->dc_stream_id_count = 0;
+ dc_ctx->dce_environment = init_params->dce_environment;
+
+ /* Create logger */
+
+ dc_version = resource_parse_asic_id(init_params->asic_id);
+ dc_ctx->dce_version = dc_version;
+
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+
+ dc->ctx = dc_ctx;
+
+ return true;
+}
+
+static bool dc_construct(struct dc *dc,
const struct dc_init_data *init_params)
{
struct dc_context *dc_ctx;
struct bw_calcs_dceip *dc_dceip;
struct bw_calcs_vbios *dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
#endif
- enum dce_version dc_version = DCE_VERSION_UNKNOWN;
dc->config = init_params->flags;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
// Allocate memory for the vm_helper
dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
if (!dc->vm_helper) {
@@ -606,7 +635,6 @@ static bool construct(struct dc *dc,
goto fail;
}
-#endif
memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
@@ -624,7 +652,7 @@ static bool construct(struct dc *dc,
}
dc->bw_vbios = dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
@@ -640,31 +668,15 @@ static bool construct(struct dc *dc,
}
dc->dcn_ip = dcn_ip;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
dc->soc_bounding_box = init_params->soc_bounding_box;
#endif
-#endif
- dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
- if (!dc_ctx) {
+ if (!dc_construct_ctx(dc, init_params)) {
dm_error("%s: failed to create ctx\n", __func__);
goto fail;
}
- dc_ctx->cgs_device = init_params->cgs_device;
- dc_ctx->driver_context = init_params->driver;
- dc_ctx->dc = dc;
- dc_ctx->asic_id = init_params->asic_id;
- dc_ctx->dc_sink_id_count = 0;
- dc_ctx->dc_stream_id_count = 0;
- dc->ctx = dc_ctx;
-
- /* Create logger */
-
- dc_ctx->dce_environment = init_params->dce_environment;
-
- dc_version = resource_parse_asic_id(init_params->asic_id);
- dc_ctx->dce_version = dc_version;
+ dc_ctx = dc->ctx;
/* Resource should construct all asic specific resources.
* This should be the only place where we need to parse the asic id
@@ -679,7 +691,7 @@ static bool construct(struct dc *dc,
bp_init_data.bios = init_params->asic_id.atombios_base_address;
dc_ctx->dc_bios = dal_bios_parser_create(
- &bp_init_data, dc_version);
+ &bp_init_data, dc_ctx->dce_version);
if (!dc_ctx->dc_bios) {
ASSERT_CRITICAL(false);
@@ -687,17 +699,13 @@ static bool construct(struct dc *dc,
}
dc_ctx->created_bios = true;
- }
-
- dc_ctx->perf_trace = dc_perf_trace_create();
- if (!dc_ctx->perf_trace) {
- ASSERT_CRITICAL(false);
- goto fail;
}
+
+
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
- dc_version,
+ dc_ctx->dce_version,
dc_ctx->dce_environment,
dc_ctx);
@@ -706,7 +714,7 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
if (!dc->res_pool)
goto fail;
@@ -714,10 +722,8 @@ static bool construct(struct dc *dc,
if (!dc->clk_mgr)
goto fail;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
-#endif
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
@@ -739,12 +745,9 @@ static bool construct(struct dc *dc,
return true;
fail:
-
- destruct(dc);
return false;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static bool disable_all_writeback_pipes_for_stream(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -757,7 +760,6 @@ static bool disable_all_writeback_pipes_for_stream(
return true;
}
-#endif
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
@@ -783,16 +785,12 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
-#endif
if (dc->hwss.apply_ctx_for_surface)
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, dangling_context);
-#endif
}
current_ctx = dc->current_state;
@@ -800,6 +798,33 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc_release_state(current_ctx);
}
+static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ int count = 0;
+ struct pipe_ctx *pipe;
+ PERF_TRACE();
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state)
+ continue;
+
+ /* Timeout 100 ms */
+ while (count < 100000) {
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (!pipe->plane_state->status.is_flip_pending)
+ break;
+ udelay(1);
+ count++;
+ }
+ ASSERT(!pipe->plane_state->status.is_flip_pending);
+ }
+ PERF_TRACE();
+}
+
/*******************************************************************************
* Public functions
******************************************************************************/
@@ -812,26 +837,38 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (NULL == dc)
goto alloc_fail;
- if (false == construct(dc, init_params))
- goto construct_fail;
+ if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
+ if (false == dc_construct_ctx(dc, init_params)) {
+ dc_destruct(dc);
+ goto construct_fail;
+ }
+ } else {
+ if (false == dc_construct(dc, init_params)) {
+ dc_destruct(dc);
+ goto construct_fail;
+ }
+
+ full_pipe_count = dc->res_pool->pipe_count;
+ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+ dc->caps.max_streams = min(
+ full_pipe_count,
+ dc->res_pool->stream_enc_count);
- full_pipe_count = dc->res_pool->pipe_count;
- if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
- full_pipe_count--;
- dc->caps.max_streams = min(
- full_pipe_count,
- dc->res_pool->stream_enc_count);
+ dc->optimize_seamless_boot_streams = 0;
+ dc->caps.max_links = dc->link_count;
+ dc->caps.max_audios = dc->res_pool->audio_count;
+ dc->caps.linear_pitch_alignment = 64;
- dc->caps.max_links = dc->link_count;
- dc->caps.max_audios = dc->res_pool->audio_count;
- dc->caps.linear_pitch_alignment = 64;
+ dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+
+ if (dc->res_pool->dmcu != NULL)
+ dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
+ }
/* Populate versioning information */
dc->versions.dc_ver = DC_VER;
- if (dc->res_pool->dmcu != NULL)
- dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
-
dc->build_id = DC_BUILD_ID;
DC_LOG_DC("Display Core initialized\n");
@@ -849,7 +886,8 @@ alloc_fail:
void dc_hardware_init(struct dc *dc)
{
- dc->hwss.init_hw(dc);
+ if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
+ dc->hwss.init_hw(dc);
}
void dc_init_callbacks(struct dc *dc,
@@ -869,7 +907,7 @@ void dc_deinit_callbacks(struct dc *dc)
void dc_destroy(struct dc **dc)
{
- destruct(*dc);
+ dc_destruct(*dc);
kfree(*dc);
*dc = NULL;
}
@@ -1163,10 +1201,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization)
- dc->optimize_seamless_boot = true;
+ dc->optimize_seamless_boot_streams++;
}
- if (!dc->optimize_seamless_boot)
+ if (dc->optimize_seamless_boot_streams == 0)
dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
@@ -1182,10 +1220,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- if (dc->hwss.program_front_end_for_ctx)
- dc->hwss.program_front_end_for_ctx(dc, context);
-#endif
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1204,10 +1238,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
-#endif
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -1245,6 +1277,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+ if (dc->optimize_seamless_boot_streams == 0) {
+ /* Must wait for no flips to be pending before doing optimize bw */
+ wait_for_no_pipes_pending(dc, context);
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
+ }
+
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
@@ -1279,12 +1318,18 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+bool dc_is_hw_initialized(struct dc *dc)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ return dcb->funcs->is_accelerated_mode(dcb);
+}
+
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
struct dc_state *context = dc->current_state;
- if (!dc->optimized_required || dc->optimize_seamless_boot)
+ if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
return true;
post_surface_trace(dc);
@@ -1313,7 +1358,7 @@ struct dc_state *dc_create_state(struct dc *dc)
* initialize and obtain IP and SOC the base DML instance from DC is
* initially copied into every context
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
#endif
@@ -1486,11 +1531,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
elevate_update_type(&update_type, UPDATE_TYPE_MED);
}
- if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
- update_flags->bits.sdr_white_level = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
- }
-
if (u->plane_info->dcc.enable != u->surface->dcc.enable
|| u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
@@ -1508,7 +1548,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
}
if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
- || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_MED);
@@ -1547,7 +1586,10 @@ static enum surface_update_type get_scaling_info_update_type(
if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
- || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
+ || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
+ || u->scaling_info->scaling_quality.integer_scaling !=
+ u->surface->scaling_quality.integer_scaling
+ ) {
update_flags->bits.scaling_change = 1;
if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
@@ -1563,7 +1605,7 @@ static enum surface_update_type get_scaling_info_update_type(
update_flags->bits.scaling_change = 1;
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
- && u->scaling_info->src_rect.height > u->surface->src_rect.height)
+ || u->scaling_info->src_rect.height > u->surface->src_rect.height)
/* Making src rect bigger requires a bandwidth change */
update_flags->bits.clock_change = 1;
}
@@ -1577,11 +1619,11 @@ static enum surface_update_type get_scaling_info_update_type(
update_flags->bits.position_change = 1;
if (update_flags->bits.clock_change
- || update_flags->bits.bandwidth_change)
+ || update_flags->bits.bandwidth_change
+ || update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
- if (update_flags->bits.scaling_change
- || update_flags->bits.position_change)
+ if (update_flags->bits.position_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@@ -1635,6 +1677,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->bits.gamma_change = 1;
}
+ if (u->hdr_mult.value)
+ if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ update_flags->bits.hdr_mult = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ }
+
if (update_flags->bits.in_transfer_func_change) {
type = UPDATE_TYPE_MED;
elevate_update_type(&overall_type, type);
@@ -1668,7 +1716,8 @@ static enum surface_update_type check_update_surfaces_for_stream(
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
- (stream_update->dst.height != 0 && stream_update->dst.width != 0))
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update)
su_flags->bits.scaling = 1;
if (stream_update->out_transfer_func)
@@ -1683,15 +1732,16 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->gamut_remap)
su_flags->bits.gamut_remap = 1;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (stream_update->wb_update)
su_flags->bits.wb_update = 1;
-#endif
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1;
+
+ if (stream_update->dsc_config)
+ overall_type = UPDATE_TYPE_FULL;
}
for (i = 0 ; i < surface_count; i++) {
@@ -1817,8 +1867,6 @@ static void copy_surface_update_to_plane(
srf_update->plane_info->global_alpha_value;
surface->dcc =
srf_update->plane_info->dcc;
- surface->sdr_white_level =
- srf_update->plane_info->sdr_white_level;
surface->layer_index =
srf_update->plane_info->layer_index;
}
@@ -1851,7 +1899,6 @@ static void copy_surface_update_to_plane(
sizeof(struct dc_transfer_func_distributed_points));
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (srf_update->func_shaper &&
(surface->in_shaper_func !=
srf_update->func_shaper))
@@ -1864,13 +1911,16 @@ static void copy_surface_update_to_plane(
memcpy(surface->lut3d_func, srf_update->lut3d_func,
sizeof(*surface->lut3d_func));
+ if (srf_update->hdr_mult.value)
+ surface->hdr_mult =
+ srf_update->hdr_mult;
+
if (srf_update->blend_tf &&
(surface->blend_tf !=
srf_update->blend_tf))
memcpy(surface->blend_tf, srf_update->blend_tf,
sizeof(*surface->blend_tf));
-#endif
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
*srf_update->input_csc_color_matrix;
@@ -1883,8 +1933,10 @@ static void copy_surface_update_to_plane(
static void copy_stream_update_to_stream(struct dc *dc,
struct dc_state *context,
struct dc_stream_state *stream,
- const struct dc_stream_update *update)
+ struct dc_stream_update *update)
{
+ struct dc_context *dc_ctx = dc->ctx;
+
if (update == NULL || stream == NULL)
return;
@@ -1945,7 +1997,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dither_option)
stream->dither_option = *update->dither_option;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* update current stream with writeback info */
if (update->wb_update) {
int i;
@@ -1956,23 +2007,32 @@ static void copy_stream_update_to_stream(struct dc *dc,
stream->writeback_info[i] =
update->wb_update->writeback_info[i];
}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
if (update->dsc_config) {
struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
uint32_t old_dsc_enabled = stream->timing.flags.DSC;
uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
update->dsc_config->num_slices_v != 0);
- stream->timing.dsc_cfg = *update->dsc_config;
- stream->timing.flags.DSC = enable_dsc;
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
- true)) {
- stream->timing.dsc_cfg = old_dsc_cfg;
- stream->timing.flags.DSC = old_dsc_enabled;
+ /* Use temporarry context for validating new DSC config */
+ struct dc_state *dsc_validate_context = dc_create_state(dc);
+
+ if (dsc_validate_context) {
+ dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
+
+ stream->timing.dsc_cfg = *update->dsc_config;
+ stream->timing.flags.DSC = enable_dsc;
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ stream->timing.dsc_cfg = old_dsc_cfg;
+ stream->timing.flags.DSC = old_dsc_enabled;
+ update->dsc_config = NULL;
+ }
+
+ dc_release_state(dsc_validate_context);
+ } else {
+ DC_ERROR("Failed to allocate new validate context for DSC change\n");
+ update->dsc_config = NULL;
}
}
-#endif
}
static void commit_planes_do_stream_update(struct dc *dc,
@@ -1992,11 +2052,11 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->periodic_interrupt0 &&
dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
if (stream_update->periodic_interrupt1 &&
dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
@@ -2006,6 +2066,12 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.update_info_frame(pipe_ctx);
}
+ if (stream_update->hdr_static_metadata &&
+ stream->use_dynamic_meta &&
+ dc->hwss.set_dmdata_attributes &&
+ pipe_ctx->stream->dmdata_address.quad_part != 0)
+ dc->hwss.set_dmdata_attributes(pipe_ctx);
+
if (stream_update->gamut_remap)
dc_stream_set_gamut_remap(dc, stream);
@@ -2013,31 +2079,25 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc_stream_program_csc_matrix(dc, stream);
if (stream_update->dither_option) {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
-#endif
resource_build_bit_depth_reduction_params(pipe_ctx->stream,
&pipe_ctx->stream->bit_depth_params);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
while (odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
odm_pipe = odm_pipe->next_odm_pipe;
}
-#endif
}
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
dp_update_dsc_config(pipe_ctx);
dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
}
-#endif
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -2053,7 +2113,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.optimize_bandwidth(dc, dc->current_state);
} else {
- if (!dc->optimize_seamless_boot)
+ if (dc->optimize_seamless_boot_streams == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
core_link_enable_stream(dc->current_state, pipe_ctx);
@@ -2094,7 +2154,7 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- if (dc->optimize_seamless_boot && surface_count > 0) {
+ if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -2103,12 +2163,14 @@ static void commit_planes_for_stream(struct dc *dc,
*/
if (stream->apply_seamless_boot_optimization) {
stream->apply_seamless_boot_optimization = false;
- dc->optimize_seamless_boot = false;
- dc->optimized_required = true;
+ dc->optimize_seamless_boot_streams--;
+
+ if (dc->optimize_seamless_boot_streams == 0)
+ dc->optimized_required = true;
}
}
- if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
+ if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -2124,15 +2186,12 @@ static void commit_planes_for_stream(struct dc *dc,
*/
if (dc->hwss.apply_ctx_for_surface)
dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
-#endif
return;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2154,7 +2213,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
}
-#endif
// Update Type FULL, Surface updates
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -2175,7 +2233,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
if (dc->hwss.program_triplebuffer != NULL &&
@@ -2184,7 +2241,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
-#endif
stream_status =
stream_get_status(context, pipe_ctx->stream);
@@ -2193,10 +2249,24 @@ static void commit_planes_for_stream(struct dc *dc,
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (dc->debug.validate_dml_output) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
+ if (cur_pipe.stream == NULL)
+ continue;
+
+ cur_pipe.plane_res.hubp->funcs->validate_dml_output(
+ cur_pipe.plane_res.hubp, dc->ctx,
+ &context->res_ctx.pipe_ctx[i].rq_regs,
+ &context->res_ctx.pipe_ctx[i].dlg_regs,
+ &context->res_ctx.pipe_ctx[i].ttu_regs);
+ }
+ }
#endif
+ }
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
@@ -2206,7 +2276,6 @@ static void commit_planes_for_stream(struct dc *dc,
*/
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2225,7 +2294,6 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->flip_immediate);
}
}
-#endif
/* Perform requested Updates */
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2238,7 +2306,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->plane_state != plane_state)
continue;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/*program triple buffer after lock based on flip type*/
if (dc->hwss.program_triplebuffer != NULL &&
!dc->debug.disable_tri_buf) {
@@ -2246,7 +2313,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.program_triplebuffer(
dc, pipe_ctx, plane_state->triplebuffer_flips);
}
-#endif
if (srf_updates[i].flip_addr)
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
@@ -2407,14 +2473,15 @@ void dc_set_power_state(
case DC_ACPI_CM_POWER_STATE_D0:
dc_resource_state_construct(dc, dc->current_state);
+ if (dc->ctx->dmub_srv)
+ dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
+
dc->hwss.init_hw(dc);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
if (dc->hwss.init_sys_ctx != NULL &&
dc->vm_pa_config.valid) {
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
-#endif
break;
default:
@@ -2494,6 +2561,17 @@ bool dc_submit_i2c(
cmd);
}
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd)
+{
+ struct ddc_service *ddc = dc->res_pool->oem_device;
+ return dce_i2c_submit_command(
+ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+}
+
static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
{
if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index b9227d5de3a3..502ed3c7959d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -33,7 +33,6 @@
#include "core_status.h"
#include "core_types.h"
-#include "hw_sequencer.h"
#include "resource.h"
@@ -310,14 +309,13 @@ void context_timing_trace(
struct resource_context *res_ctx)
{
int i;
- struct dc *core_dc = dc;
int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
struct crtc_position position;
- unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
DC_LOGGER_INIT(dc->ctx->logger);
- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
/* get_position() returns CRTC vertical/horizontal counter
* hence not applicable for underlay pipe
@@ -329,7 +327,7 @@ void context_timing_trace(
h_pos[i] = position.horizontal_count;
v_pos[i] = position.vertical_count;
}
- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
@@ -347,7 +345,7 @@ void context_clock_trace(
struct dc *dc,
struct dc_state *context)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 62d8289abb4e..260c0b62d37d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,6 +45,7 @@
#include "dpcd_defs.h"
#include "dmcu.h"
#include "hw/clk_mgr.h"
+#include "../dce/dmub_psr.h"
#define DC_LOGGER_INIT(logger)
@@ -74,7 +75,7 @@ enum {
/*******************************************************************************
* Private functions
******************************************************************************/
-static void destruct(struct dc_link *link)
+static void dc_link_destruct(struct dc_link *link)
{
int i;
@@ -817,8 +818,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_EDP: {
- read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
+ read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
@@ -1244,7 +1245,7 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static bool construct(
+static bool dc_link_construct(
struct dc_link *link,
const struct link_init_data *init_params)
{
@@ -1446,7 +1447,7 @@ struct dc_link *link_create(const struct link_init_data *init_params)
if (NULL == link)
goto alloc_fail;
- if (false == construct(link, init_params))
+ if (false == dc_link_construct(link, init_params))
goto construct_fail;
return link;
@@ -1460,7 +1461,7 @@ alloc_fail:
void link_destroy(struct dc_link **link)
{
- destruct(*link);
+ dc_link_destruct(*link);
kfree(*link);
*link = NULL;
}
@@ -1495,10 +1496,7 @@ static enum dc_status enable_link_dp(
bool skip_video_pattern;
struct dc_link *link = stream->link;
struct dc_link_settings link_settings = {0};
- enum dp_panel_mode panel_mode;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool fec_enable;
-#endif
int i;
bool apply_seamless_boot_optimization = false;
@@ -1514,15 +1512,6 @@ static enum dc_status enable_link_dp(
decide_link_settings(stream, &link_settings);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
- /* If link settings are different than current and link already enabled
- * then need to disable before programming to new rate.
- */
- if (link->link_status.link_active &&
- (link->cur_link_settings.lane_count != link_settings.lane_count ||
- link->cur_link_settings.link_rate != link_settings.link_rate)) {
- dp_disable_link_phy(link, pipe_ctx->stream->signal);
- }
-
/*in case it is not on*/
link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
@@ -1533,50 +1522,29 @@ static enum dc_status enable_link_dp(
if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
- dp_enable_link_phy(
- link,
- pipe_ctx->stream->signal,
- pipe_ctx->clock_source->id,
- &link_settings);
-
- if (stream->sink_patches.dppowerup_delay > 0) {
- int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
-
- msleep(delay_dp_power_up_in_ms);
- }
-
- panel_mode = dp_get_panel_mode(link);
- dp_set_panel_mode(link, panel_mode);
-
skip_video_pattern = true;
if (link_settings.link_rate == LINK_RATE_LOW)
skip_video_pattern = false;
- if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &link_settings);
-
- link->cur_link_settings = link_settings;
- status = DC_OK;
- } else if (perform_link_training_with_retries(
- link,
+ if (perform_link_training_with_retries(
&link_settings,
skip_video_pattern,
- LINK_TRAINING_ATTEMPTS)) {
+ LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal)) {
link->cur_link_settings = link_settings;
status = DC_OK;
}
else
status = DC_FAIL_DP_LINK_TRAINING;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (link->preferred_training_settings.fec_enable != NULL)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
dp_set_fec_enable(link, fec_enable);
-#endif
return status;
}
@@ -2063,6 +2031,45 @@ static void write_i2c_redriver_setting(
ASSERT(i2c_success);
}
+static void disable_link(struct dc_link *link, enum signal_type signal)
+{
+ /*
+ * TODO: implement call for dp_set_hw_test_pattern
+ * it is needed for compliance testing
+ */
+
+ /* Here we need to specify that encoder output settings
+ * need to be calculated as for the set mode,
+ * it will lead to querying dynamic link capabilities
+ * which should be done before enable output
+ */
+
+ if (dc_is_dp_signal(signal)) {
+ /* SST DP, eDP */
+ if (dc_is_dp_sst_signal(signal))
+ dp_disable_link_phy(link, signal);
+ else
+ dp_disable_link_phy_mst(link, signal);
+
+ if (dc_is_dp_sst_signal(signal) ||
+ link->mst_stream_alloc_table.stream_count == 0) {
+ dp_set_fec_enable(link, false);
+ dp_set_fec_ready(link, false);
+ }
+ } else {
+ if (signal != SIGNAL_TYPE_VIRTUAL)
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
+ }
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count <= 0)
+ link->link_status.link_active = false;
+ } else {
+ link->link_status.link_active = false;
+ }
+}
+
static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -2147,6 +2154,19 @@ static enum dc_status enable_link(
struct pipe_ctx *pipe_ctx)
{
enum dc_status status = DC_ERROR_UNEXPECTED;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ /* There's some scenarios where driver is unloaded with display
+ * still enabled. When driver is reloaded, it may cause a display
+ * to not light up if there is a mismatch between old and new
+ * link settings. Need to call disable first before enabling at
+ * new link settings.
+ */
+ if (link->link_status.link_active) {
+ disable_link(link, pipe_ctx->stream->signal);
+ }
+
switch (pipe_ctx->stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
status = enable_link_dp(state, pipe_ctx);
@@ -2181,46 +2201,6 @@ static enum dc_status enable_link(
return status;
}
-static void disable_link(struct dc_link *link, enum signal_type signal)
-{
- /*
- * TODO: implement call for dp_set_hw_test_pattern
- * it is needed for compliance testing
- */
-
- /* here we need to specify that encoder output settings
- * need to be calculated as for the set mode,
- * it will lead to querying dynamic link capabilities
- * which should be done before enable output */
-
- if (dc_is_dp_signal(signal)) {
- /* SST DP, eDP */
- if (dc_is_dp_sst_signal(signal))
- dp_disable_link_phy(link, signal);
- else
- dp_disable_link_phy_mst(link, signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-
- if (dc_is_dp_sst_signal(signal) ||
- link->mst_stream_alloc_table.stream_count == 0) {
- dp_set_fec_enable(link, false);
- dp_set_fec_ready(link, false);
- }
-#endif
- } else {
- if (signal != SIGNAL_TYPE_VIRTUAL)
- link->link_enc->funcs->disable_output(link->link_enc, signal);
- }
-
- if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- /* MST disable link only when no stream use the link */
- if (link->mst_stream_alloc_table.stream_count <= 0)
- link->link_status.link_active = false;
- } else {
- link->link_status.link_active = false;
- }
-}
-
static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
{
@@ -2357,9 +2337,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp)
{
- struct dc *core_dc = link->ctx->dc;
- struct abm *abm = core_dc->res_pool->abm;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = dc->res_pool->abm;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
unsigned int controller_id = 0;
bool use_smooth_brightness = true;
int i;
@@ -2377,22 +2357,22 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
if (dc_is_embedded_signal(link->connector_signal)) {
for (i = 0; i < MAX_PIPES; i++) {
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
- if (core_dc->current_state->res_ctx.
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (dc->current_state->res_ctx.
pipe_ctx[i].stream->link
== link) {
/* DMCU -1 for all controller id values,
* therefore +1 here
*/
controller_id =
- core_dc->current_state->
+ dc->current_state->
res_ctx.pipe_ctx[i].stream_res.tg->inst +
1;
/* Disable brightness ramping when the display is blanked
* as it can hang the DMCU
*/
- if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
frame_ramp = 0;
}
}
@@ -2410,8 +2390,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
bool dc_link_set_abm_disable(const struct dc_link *link)
{
- struct dc *core_dc = link->ctx->dc;
- struct abm *abm = core_dc->res_pool->abm;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = dc->res_pool->abm;
if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
return false;
@@ -2423,12 +2403,13 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
-
- if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ if ((psr != NULL) && link->psr_feature_enabled)
+ psr->funcs->set_psr_enable(psr, allow_active);
+ else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
link->psr_allow_active = allow_active;
@@ -2438,10 +2419,13 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
- if (dmcu != NULL && link->psr_feature_enabled)
+ if (psr != NULL && link->psr_feature_enabled)
+ psr->funcs->get_psr_state(psr_state);
+ else if (dmcu != NULL && link->psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
return true;
@@ -2486,8 +2470,9 @@ bool dc_link_setup_psr(struct dc_link *link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context)
{
- struct dc *core_dc;
+ struct dc *dc;
struct dmcu *dmcu;
+ struct dmub_psr *psr;
int i;
/* updateSinkPsrDpcdConfig*/
union dpcd_psr_configuration psr_configuration;
@@ -2497,10 +2482,11 @@ bool dc_link_setup_psr(struct dc_link *link,
if (!link)
return false;
- core_dc = link->ctx->dc;
- dmcu = core_dc->res_pool->dmcu;
+ dc = link->ctx->dc;
+ dmcu = dc->res_pool->dmcu;
+ psr = dc->res_pool->psr;
- if (!dmcu)
+ if (!dmcu && !psr)
return false;
@@ -2537,13 +2523,13 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->engineId = link->link_enc->preferred_engine;
for (i = 0; i < MAX_PIPES; i++) {
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
/* dmcu -1 for all controller id values,
* therefore +1 here
*/
psr_context->controllerId =
- core_dc->current_state->res_ctx.
+ dc->current_state->res_ctx.
pipe_ctx[i].stream_res.tg->inst + 1;
break;
}
@@ -2556,7 +2542,7 @@ bool dc_link_setup_psr(struct dc_link *link,
transmitter_to_phy_id(link->link_enc->transmitter);
psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
- psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+ psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->
timing.pix_clk_100hz * 100),
stream->timing.v_total),
stream->timing.h_total);
@@ -2586,7 +2572,7 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->psr_level.u32all = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/*skip power down the single pipe since it blocks the cstate*/
if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
@@ -2609,7 +2595,10 @@ bool dc_link_setup_psr(struct dc_link *link,
*/
psr_context->frame_delay = 0;
- link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ if (psr)
+ link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+ else
+ link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
/* psr_enabled == 0 indicates setup_psr did not succeed, but this
* should not happen since firmware should be running at this point
@@ -2644,28 +2633,13 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
return dc_fixpt_div_int(mbytes_per_sec, 54);
}
-static int get_color_depth(enum dc_color_depth color_depth)
-{
- switch (color_depth) {
- case COLOR_DEPTH_666: return 6;
- case COLOR_DEPTH_888: return 8;
- case COLOR_DEPTH_101010: return 10;
- case COLOR_DEPTH_121212: return 12;
- case COLOR_DEPTH_141414: return 14;
- case COLOR_DEPTH_161616: return 16;
- default: return 0;
- }
-}
-
static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
{
- uint32_t bpc;
uint64_t kbps;
struct fixed31_32 peak_kbps;
uint32_t numerator;
uint32_t denominator;
- bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
/*
@@ -2899,6 +2873,39 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
+
+enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx;
+
+ // Clear all of MST payload then reallocate
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ deallocate_mst_payload(pipe_ctx);
+ }
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* enable/disable PHY will clear connection between BE and FE
+ * need to restore it.
+ */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+ dc_link_allocate_mst_payload(pipe_ctx);
+ }
+ }
+
+ return DC_OK;
+}
+
#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
@@ -2922,12 +2929,12 @@ void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
@@ -2946,6 +2953,7 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
@@ -2969,14 +2977,14 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc,
&stream->timing);
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
bool apply_edp_fast_boot_optimization =
pipe_ctx->stream->apply_edp_fast_boot_optimization;
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
resource_build_info_frame(pipe_ctx);
- core_dc->hwss.update_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
@@ -3019,7 +3027,7 @@ void core_link_enable_stream(
}
}
- core_dc->hwss.enable_audio_stream(pipe_ctx);
+ dc->hwss.enable_audio_stream(pipe_ctx);
/* turn off otg test pattern if enable */
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
@@ -3027,28 +3035,24 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, true);
}
-#endif
- core_dc->hwss.enable_stream(pipe_ctx);
+ dc->hwss.enable_stream(pipe_ctx);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Set DPS PPS SDP (AKA "info frames") */
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_pps_sdp(pipe_ctx, true);
}
-#endif
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_allocate_mst_payload(pipe_ctx);
- core_dc->hwss.unblank_stream(pipe_ctx,
+ dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -3056,24 +3060,21 @@ void core_link_enable_stream(
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
#endif
- }
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
+ } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, true);
}
-#endif
}
void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
@@ -3081,7 +3082,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
#endif
- core_dc->hwss.blank_stream(pipe_ctx);
+ dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
@@ -3110,25 +3111,23 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
write_i2c_redriver_setting(pipe_ctx, false);
}
}
- core_dc->hwss.disable_stream(pipe_ctx);
+ dc->hwss.disable_stream(pipe_ctx);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, false);
}
-#endif
}
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
if (!dc_is_hdmi_signal(pipe_ctx->stream->signal))
return;
- core_dc->hwss.set_avmute(pipe_ctx, enable);
+ dc->hwss.set_avmute(pipe_ctx, enable);
}
/**
@@ -3186,13 +3185,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
uint32_t bits_per_channel = 0;
uint32_t kbps;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (timing->flags.DSC) {
kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
return kbps;
}
-#endif
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
@@ -3345,6 +3342,7 @@ void dc_link_disable_hpd(const struct dc_link *link)
void dc_link_set_test_pattern(struct dc_link *link,
enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size)
@@ -3353,6 +3351,7 @@ void dc_link_set_test_pattern(struct dc_link *link,
dc_link_dp_set_test_pattern(
link,
test_pattern,
+ test_pattern_color_space,
p_link_settings,
p_custom_pattern,
cust_pattern_size);
@@ -3368,7 +3367,6 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
/* Account for FEC overhead.
* We have to do it based on caps,
@@ -3393,7 +3391,6 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
link_bw_kbps, 32);
}
-#endif
return link_bw_kbps;
@@ -3407,3 +3404,10 @@ const struct dc_link_settings *dc_link_get_link_cap(
return &link->preferred_link_setting;
return &link->verified_link_cap;
}
+
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link)
+{
+ dp_overwrite_extended_receiver_cap(link);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 81789191d4ec..a49c10d5df26 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -187,7 +187,7 @@ void dal_ddc_i2c_payloads_add(
}
-static void construct(
+static void ddc_service_construct(
struct ddc_service *ddc_service,
struct ddc_service_init_data *init_data)
{
@@ -206,7 +206,10 @@ static void construct(
ddc_service->ddc_pin = NULL;
} else {
hw_info.ddc_channel = i2c_info.i2c_line;
- hw_info.hw_supported = i2c_info.i2c_hw_assist;
+ if (ddc_service->link != NULL)
+ hw_info.hw_supported = i2c_info.i2c_hw_assist;
+ else
+ hw_info.hw_supported = false;
ddc_service->ddc_pin = dal_gpio_create_ddc(
gpio_service,
@@ -236,11 +239,11 @@ struct ddc_service *dal_ddc_service_create(
if (!ddc_service)
return NULL;
- construct(ddc_service, init_data);
+ ddc_service_construct(ddc_service, init_data);
return ddc_service;
}
-static void destruct(struct ddc_service *ddc)
+static void ddc_service_destruct(struct ddc_service *ddc)
{
if (ddc->ddc_pin)
dal_gpio_destroy_ddc(&ddc->ddc_pin);
@@ -252,7 +255,7 @@ void dal_ddc_service_destroy(struct ddc_service **ddc)
BREAK_TO_DEBUGGER();
return;
}
- destruct(*ddc);
+ ddc_service_destruct(*ddc);
kfree(*ddc);
*ddc = NULL;
}
@@ -587,7 +590,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
struct aux_payload *payload)
{
uint32_t retrieved = 0;
- bool ret = 0;
+ bool ret = false;
if (!ddc)
return false;
@@ -647,17 +650,16 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
}
-enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
uint32_t timeout)
{
- enum dc_status status = DC_OK;
+ uint32_t prev_timeout = 0;
struct ddc *ddc_pin = ddc->ddc_pin;
- if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL)
- return DC_ERROR_UNEXPECTED;
- if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout))
- status = DC_ERROR_UNEXPECTED;
- return status;
+ if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout)
+ prev_timeout =
+ ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
+ return prev_timeout;
}
/*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 504055fc70e8..6ab298c65247 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4,12 +4,8 @@
#include "dc_link_dp.h"
#include "dm_helpers.h"
#include "opp.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "resource.h"
-#endif
#include "inc/core_types.h"
#include "link_hwss.h"
@@ -21,6 +17,9 @@
#define DC_LOGGER \
link->ctx->logger
+
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
+
/* maximum pre emphasis level allowed for each voltage swing level*/
static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
PRE_EMPHASIS_LEVEL3,
@@ -221,19 +220,31 @@ static enum dpcd_training_patterns
return dpcd_tr_pattern;
}
+static inline bool is_repeater(struct dc_link *link, uint32_t offset)
+{
+ return (!link->is_lttpr_mode_transparent && offset != 0);
+}
+
static void dpcd_set_lt_pattern_and_lane_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings,
- enum dc_dp_training_pattern pattern)
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset)
{
union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
- const uint32_t dpcd_base_lt_offset =
- DP_TRAINING_PATTERN_SET;
+
+ uint32_t dpcd_base_lt_offset;
+
uint8_t dpcd_lt_buffer[5] = {0};
union dpcd_training_pattern dpcd_pattern = { {0} };
uint32_t lane;
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
+
+ if (is_repeater(link, offset))
+ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
/*****************************************************************
* DpcdAddress_TrainingPatternSet
@@ -241,14 +252,21 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
- dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+ dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
- DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
- __func__,
- DP_TRAINING_PATTERN_SET,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
-
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ }
/*****************************************************************
* DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
*****************************************************************/
@@ -268,24 +286,35 @@ static void dpcd_set_lt_pattern_and_lane_settings(
PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
}
- /* concatinate everything into one buffer*/
+ /* concatenate everything into one buffer*/
size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
// 0x00103 - 0x00102
memmove(
- &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset],
+ &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
dpcd_lane,
size_in_bytes);
- DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- DP_TRAINING_LANE0_SET,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
-
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ }
if (edp_workaround) {
/* for eDP write in 2 parts because the 5-byte burst is
* causing issues on some eDP panels (EPR#366724)
@@ -495,8 +524,12 @@ static void get_lane_status_and_drive_settings(
const struct link_training_settings *link_training_setting,
union lane_status *ln_status,
union lane_align_status_updated *ln_status_updated,
- struct link_training_settings *req_settings)
+ struct link_training_settings *req_settings,
+ uint32_t offset)
{
+ unsigned int lane01_status_address = DP_LANE0_1_STATUS;
+ uint8_t lane_adjust_offset = 4;
+ unsigned int lane01_adjust_address;
uint8_t dpcd_buf[6] = {0};
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
struct link_training_settings request_settings = { {0} };
@@ -504,9 +537,16 @@ static void get_lane_status_and_drive_settings(
memset(req_settings, '\0', sizeof(struct link_training_settings));
+ if (is_repeater(link, offset)) {
+ lane01_status_address =
+ DP_LANE0_1_STATUS_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ lane_adjust_offset = 3;
+ }
+
core_link_read_dpcd(
link,
- DP_LANE0_1_STATUS,
+ lane01_status_address,
(uint8_t *)(dpcd_buf),
sizeof(dpcd_buf));
@@ -517,22 +557,47 @@ static void get_lane_status_and_drive_settings(
ln_status[lane].raw =
get_nibble_at_index(&dpcd_buf[0], lane);
dpcd_lane_adjust[lane].raw =
- get_nibble_at_index(&dpcd_buf[4], lane);
+ get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
}
ln_status_updated->raw = dpcd_buf[2];
- DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
- __func__,
- DP_LANE0_1_STATUS, dpcd_buf[0],
- DP_LANE2_3_STATUS, dpcd_buf[1]);
-
- DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
- __func__,
- DP_ADJUST_REQUEST_LANE0_1,
- dpcd_buf[4],
- DP_ADJUST_REQUEST_LANE2_3,
- dpcd_buf[5]);
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ offset,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+ }
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
+
+ if (is_repeater(link, offset))
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+ __func__,
+ offset,
+ lane01_adjust_address,
+ dpcd_buf[lane_adjust_offset],
+ lane01_adjust_address + 1,
+ dpcd_buf[lane_adjust_offset + 1]);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+ __func__,
+ lane01_adjust_address,
+ dpcd_buf[lane_adjust_offset],
+ lane01_adjust_address + 1,
+ dpcd_buf[lane_adjust_offset + 1]);
+ }
/*copy to req_settings*/
request_settings.link_settings.lane_count =
@@ -571,10 +636,18 @@ static void get_lane_status_and_drive_settings(
static void dpcd_set_lane_settings(
struct dc_link *link,
- const struct link_training_settings *link_training_setting)
+ const struct link_training_settings *link_training_setting,
+ uint32_t offset)
{
union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
uint32_t lane;
+ unsigned int lane0_set_address;
+
+ lane0_set_address = DP_TRAINING_LANE0_SET;
+
+ if (is_repeater(link, offset))
+ lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
for (lane = 0; lane <
(uint32_t)(link_training_setting->
@@ -597,7 +670,7 @@ static void dpcd_set_lane_settings(
}
core_link_write_dpcd(link,
- DP_TRAINING_LANE0_SET,
+ lane0_set_address,
(uint8_t *)(dpcd_lane),
link_training_setting->link_settings.lane_count);
@@ -620,14 +693,26 @@ static void dpcd_set_lane_settings(
}
*/
- DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- DP_TRAINING_LANE0_SET,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ if (is_repeater(link, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
+ " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ offset,
+ lane0_set_address,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ lane0_set_address,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ }
link->cur_lane_setting = link_training_setting->lane_settings[0];
}
@@ -647,17 +732,6 @@ static bool is_max_vs_reached(
}
-void dc_link_dp_set_drive_settings(
- struct dc_link *link,
- struct link_training_settings *lt_settings)
-{
- /* program ASIC PHY settings*/
- dp_set_hw_lane_settings(link, lt_settings);
-
- /* Notify DP sink the PHY settings from source */
- dpcd_set_lane_settings(link, lt_settings);
-}
-
static bool perform_post_lt_adj_req_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings)
@@ -690,7 +764,8 @@ static bool perform_post_lt_adj_req_sequence(
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings);
+ &req_settings,
+ DPRX);
if (dpcd_lane_status_updated.bits.
POST_LT_ADJ_REQ_IN_PROGRESS == 0)
@@ -747,6 +822,31 @@ static bool perform_post_lt_adj_req_sequence(
}
+/* Only used for channel equalization */
+static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
+{
+ unsigned int aux_rd_interval_us = 400;
+
+ switch (dpcd_aux_read_interval) {
+ case 0x01:
+ aux_rd_interval_us = 400;
+ break;
+ case 0x02:
+ aux_rd_interval_us = 4000;
+ break;
+ case 0x03:
+ aux_rd_interval_us = 8000;
+ break;
+ case 0x04:
+ aux_rd_interval_us = 16000;
+ break;
+ default:
+ break;
+ }
+
+ return aux_rd_interval_us;
+}
+
static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status)
{
@@ -765,37 +865,55 @@ static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
static enum link_training_result perform_channel_equalization_sequence(
struct dc_link *link,
- struct link_training_settings *lt_settings)
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
{
struct link_training_settings req_settings;
enum dc_dp_training_pattern tr_pattern;
uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = { {0} };
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ /* Note: also check that TPS4 is a supported feature*/
+
tr_pattern = lt_settings->pattern_for_eq;
- dp_set_hw_training_pattern(link, tr_pattern);
+ if (is_repeater(link, offset))
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+
+ dp_set_hw_training_pattern(link, tr_pattern, offset);
for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
retries_ch_eq++) {
- dp_set_hw_lane_settings(link, lt_settings);
+ dp_set_hw_lane_settings(link, lt_settings, offset);
/* 2. update DPCD*/
if (!retries_ch_eq)
/* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration*/
+ * but only for the 1-st iteration
+ */
+
dpcd_set_lt_pattern_and_lane_settings(
link,
lt_settings,
- tr_pattern);
+ tr_pattern, offset);
else
- dpcd_set_lane_settings(link, lt_settings);
+ dpcd_set_lane_settings(link, lt_settings, offset);
/* 3. wait for receiver to lock-on*/
- wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time);
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ if (is_repeater(link, offset))
+ wait_time_microsec =
+ translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+
+ wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
/* 4. Read lane status and requested
* drive settings as set by the sink*/
@@ -805,7 +923,8 @@ static enum link_training_result perform_channel_equalization_sequence(
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings);
+ &req_settings,
+ offset);
/* 5. check CR done*/
if (!is_cr_done(lane_count, dpcd_lane_status))
@@ -824,13 +943,16 @@ static enum link_training_result perform_channel_equalization_sequence(
return LINK_TRAINING_EQ_FAIL_EQ;
}
+#define TRAINING_AUX_RD_INTERVAL 100 //us
static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
- struct link_training_settings *lt_settings)
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
{
uint32_t retries_cr;
uint32_t retry_count;
+ uint32_t wait_time_microsec;
struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
@@ -840,7 +962,7 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr = 0;
retry_count = 0;
- dp_set_hw_training_pattern(link, tr_pattern);
+ dp_set_hw_training_pattern(link, tr_pattern, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -857,7 +979,8 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 1. call HWSS to set lane settings*/
dp_set_hw_lane_settings(
link,
- lt_settings);
+ lt_settings,
+ offset);
/* 2. update DPCD of the receiver*/
if (!retries_cr)
@@ -866,16 +989,23 @@ static enum link_training_result perform_clock_recovery_sequence(
dpcd_set_lt_pattern_and_lane_settings(
link,
lt_settings,
- tr_pattern);
+ tr_pattern,
+ offset);
else
dpcd_set_lane_settings(
link,
- lt_settings);
+ lt_settings,
+ offset);
/* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ if (!link->is_lttpr_mode_transparent)
+ wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+
wait_for_training_aux_rd_interval(
link,
- lt_settings->cr_pattern_time);
+ wait_time_microsec);
/* 4. Read lane status and requested drive
* settings as set by the sink
@@ -885,7 +1015,8 @@ static enum link_training_result perform_clock_recovery_sequence(
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings);
+ &req_settings,
+ offset);
/* 5. check CR done*/
if (is_cr_done(lane_count, dpcd_lane_status))
@@ -1054,6 +1185,102 @@ static void initialize_training_settings(
lt_settings->enhanced_framing = 1;
}
+static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
+{
+ switch (lttpr_repeater_count) {
+ case 0x80: // 1 lttpr repeater
+ return 1;
+ case 0x40: // 2 lttpr repeaters
+ return 2;
+ case 0x20: // 3 lttpr repeaters
+ return 3;
+ case 0x10: // 4 lttpr repeaters
+ return 4;
+ case 0x08: // 5 lttpr repeaters
+ return 5;
+ case 0x04: // 6 lttpr repeaters
+ return 6;
+ case 0x02: // 7 lttpr repeaters
+ return 7;
+ case 0x01: // 8 lttpr repeaters
+ return 8;
+ default:
+ break;
+ }
+ return 0; // invalid value
+}
+
+static void configure_lttpr_mode(struct dc_link *link)
+{
+ /* aux timeout is already set to extended */
+ /* RESET/SET lttpr mode to enable non transparent mode */
+ uint8_t repeater_cnt;
+ uint32_t aux_interval_address;
+ uint8_t repeater_id;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
+ if (!link->is_lttpr_mode_transparent) {
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
+
+ repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
+ repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
+ aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
+ core_link_read_dpcd(
+ link,
+ aux_interval_address,
+ (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
+ sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
+ }
+ }
+}
+
+static void repeater_training_done(struct dc_link *link, uint32_t offset)
+{
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+
+ const uint32_t dpcd_base_lt_offset =
+ DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ /* Set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ &dpcd_pattern.raw,
+ 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
static void print_status_message(
struct dc_link *link,
const struct link_training_settings *lt_settings,
@@ -1133,6 +1360,17 @@ static void print_status_message(
lt_spread);
}
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ /* program ASIC PHY settings*/
+ dp_set_hw_lane_settings(link, lt_settings, DPRX);
+
+ /* Notify DP sink the PHY settings from source */
+ dpcd_set_lane_settings(link, lt_settings, DPRX);
+}
+
bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct dc_link_settings *link_setting)
@@ -1149,10 +1387,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 1. Perform_clock_recovery_sequence. */
/* transmit training pattern for clock recovery */
- dp_set_hw_training_pattern(link, pattern_for_cr);
+ dp_set_hw_training_pattern(link, pattern_for_cr, DPRX);
/* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, &lt_settings);
+ dp_set_hw_lane_settings(link, &lt_settings, DPRX);
/* wait receiver to lock-on*/
wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
@@ -1160,10 +1398,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 2. Perform_channel_equalization_sequence. */
/* transmit training pattern for channel equalization. */
- dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq);
+ dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX);
/* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, &lt_settings);
+ dp_set_hw_lane_settings(link, &lt_settings, DPRX);
/* wait receiver to lock-on. */
wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
@@ -1185,9 +1423,10 @@ enum link_training_result dc_link_dp_perform_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
struct link_training_settings lt_settings;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+
bool fec_enable;
-#endif
+ uint8_t repeater_cnt;
+ uint8_t repeater_id;
initialize_training_settings(
link,
@@ -1198,23 +1437,47 @@ enum link_training_result dc_link_dp_perform_link_training(
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (link->preferred_training_settings.fec_enable != NULL)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
dp_set_fec_ready(link, fec_enable);
-#endif
+ if (!link->is_lttpr_mode_transparent) {
+ /* Configure lttpr mode */
+ configure_lttpr_mode(link);
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)
+ */
+ repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
+ repeater_id--) {
+ status = perform_clock_recovery_sequence(link, &lt_settings, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS)
+ break;
+
+ status = perform_channel_equalization_sequence(link,
+ &lt_settings,
+ repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS)
+ break;
+
+ repeater_training_done(link, repeater_id);
+ }
+ }
- /* 2. perform link training (set link training done
- * to false is done as well)
- */
- status = perform_clock_recovery_sequence(link, &lt_settings);
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
if (status == LINK_TRAINING_SUCCESS) {
status = perform_channel_equalization_sequence(link,
- &lt_settings);
+ &lt_settings,
+ DPRX);
+ }
}
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
@@ -1233,23 +1496,58 @@ enum link_training_result dc_link_dp_perform_link_training(
}
bool perform_link_training_with_retries(
- struct dc_link *link,
const struct dc_link_settings *link_setting,
bool skip_video_pattern,
- int attempts)
+ int attempts,
+ struct pipe_ctx *pipe_ctx,
+ enum signal_type signal)
{
uint8_t j;
uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
for (j = 0; j < attempts; ++j) {
- if (dc_link_dp_perform_link_training(
+ dp_enable_link_phy(
+ link,
+ signal,
+ pipe_ctx->clock_source->id,
+ link_setting);
+
+ if (stream->sink_patches.dppowerup_delay > 0) {
+ int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
+
+ msleep(delay_dp_power_up_in_ms);
+ }
+
+ dp_set_panel_mode(link, panel_mode);
+
+ /* We need to do this before the link training to ensure the idle pattern in SST
+ * mode will be sent right after the link training
+ */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
+ if (link->aux_access_disabled) {
+ dc_link_dp_perform_link_training_skip_aux(link, link_setting);
+ return true;
+ } else if (dc_link_dp_perform_link_training(
link,
link_setting,
skip_video_pattern) == LINK_TRAINING_SUCCESS)
return true;
+ /* latest link training still fail, skip delay and keep PHY on
+ */
+ if (j == (attempts - 1))
+ break;
+
+ dp_disable_link_phy(link, signal);
+
msleep(delay_between_attempts);
+
delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
}
@@ -1321,9 +1619,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool fec_enable = false;
-#endif
initialize_training_settings(
link,
@@ -1343,11 +1639,9 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_enable_link_phy(link, link->connector_signal,
dp_cs_id, link_settings);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Set FEC enable */
fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
dp_set_fec_ready(link, fec_enable);
-#endif
if (lt_overrides->alternate_scrambler_reset) {
if (*lt_overrides->alternate_scrambler_reset)
@@ -1367,10 +1661,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
/* 2. perform link training (set link training done
* to false is done as well)
*/
- lt_status = perform_clock_recovery_sequence(link, &lt_settings);
+ lt_status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
if (lt_status == LINK_TRAINING_SUCCESS) {
lt_status = perform_channel_equalization_sequence(link,
- &lt_settings);
+ &lt_settings,
+ DPRX);
}
/* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/
@@ -1387,9 +1682,7 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
*/
if (link_down == true) {
dp_disable_link_phy(link, link->connector_signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
dp_set_fec_ready(link, false);
-#endif
}
link->sync_lt_in_progress = false;
@@ -1423,6 +1716,22 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
max_link_cap.link_spread)
max_link_cap.link_spread =
link->reported_link_cap.link_spread;
+ /*
+ * account for lttpr repeaters cap
+ * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
+ */
+ if (!link->is_lttpr_mode_transparent) {
+ if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+
+ if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
+ __func__,
+ max_link_cap.lane_count,
+ max_link_cap.link_rate);
+ }
return max_link_cap;
}
@@ -1568,6 +1877,13 @@ bool dp_verify_link_cap(
max_link_cap = get_max_link_cap(link);
+ /* Grant extended timeout request */
+ if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+ uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
+
+ core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
+ }
+
/* TODO implement override and monitor patch later */
/* try to train the link from high to low to
@@ -1576,6 +1892,16 @@ bool dp_verify_link_cap(
/* disable PHY done possible by BIOS, will be done by driver itself */
dp_disable_link_phy(link, link->connector_signal);
+ /* Temporary Renoir-specific workaround for SWDEV-215184;
+ * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
+ * so add extra cycle of enabling and disabling the PHY before first link training.
+ */
+ if (link->link_enc->features.flags.bits.DP_IS_USB_C &&
+ link->dc->debug.usbc_combo_phy_reset_wa) {
+ dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
+ dp_disable_link_phy(link, link->connector_signal);
+ }
+
dp_cs_id = get_clock_source_id(link);
/* link training starts with the maximum common settings
@@ -2280,6 +2606,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
dc_link_dp_set_test_pattern(
link,
test_pattern,
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
&link_training_settings,
test_80_bit_pattern,
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
@@ -2291,6 +2618,8 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
union link_test_pattern dpcd_test_pattern;
union test_misc dpcd_test_params;
enum dp_test_pattern test_pattern;
+ enum dp_test_pattern_color_space test_pattern_color_space =
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
@@ -2325,14 +2654,105 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
break;
}
+ test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+
dc_link_dp_set_test_pattern(
link,
test_pattern,
+ test_pattern_color_space,
NULL,
NULL,
0);
}
+static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
+{
+ union audio_test_mode dpcd_test_mode = {0};
+ struct audio_test_pattern_type dpcd_pattern_type = {0};
+ union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &pipes[0];
+ unsigned int channel_count;
+ unsigned int channel = 0;
+ unsigned int modes = 0;
+ unsigned int sampling_rate_in_hz = 0;
+
+ // get audio test mode and test pattern parameters
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_MODE,
+ &dpcd_test_mode.raw,
+ sizeof(dpcd_test_mode));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_PATTERN_TYPE,
+ &dpcd_pattern_type.value,
+ sizeof(dpcd_pattern_type));
+
+ channel_count = dpcd_test_mode.bits.channel_count + 1;
+
+ // read pattern periods for requested channels when sawTooth pattern is requested
+ if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
+ dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) {
+
+ test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ?
+ DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+ // read period for each channel
+ for (channel = 0; channel < channel_count; channel++) {
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_PERIOD_CH1 + channel,
+ &dpcd_pattern_period[channel].raw,
+ sizeof(dpcd_pattern_period[channel]));
+ }
+ }
+
+ // translate sampling rate
+ switch (dpcd_test_mode.bits.sampling_rate) {
+ case AUDIO_SAMPLING_RATE_32KHZ:
+ sampling_rate_in_hz = 32000;
+ break;
+ case AUDIO_SAMPLING_RATE_44_1KHZ:
+ sampling_rate_in_hz = 44100;
+ break;
+ case AUDIO_SAMPLING_RATE_48KHZ:
+ sampling_rate_in_hz = 48000;
+ break;
+ case AUDIO_SAMPLING_RATE_88_2KHZ:
+ sampling_rate_in_hz = 88200;
+ break;
+ case AUDIO_SAMPLING_RATE_96KHZ:
+ sampling_rate_in_hz = 96000;
+ break;
+ case AUDIO_SAMPLING_RATE_176_4KHZ:
+ sampling_rate_in_hz = 176400;
+ break;
+ case AUDIO_SAMPLING_RATE_192KHZ:
+ sampling_rate_in_hz = 192000;
+ break;
+ default:
+ sampling_rate_in_hz = 0;
+ break;
+ }
+
+ link->audio_test_data.flags.test_requested = 1;
+ link->audio_test_data.flags.disable_video = disable_video;
+ link->audio_test_data.sampling_rate = sampling_rate_in_hz;
+ link->audio_test_data.channel_count = channel_count;
+ link->audio_test_data.pattern_type = test_pattern;
+
+ if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) {
+ for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) {
+ link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period;
+ }
+ }
+}
+
static void handle_automated_test(struct dc_link *link)
{
union test_request test_request;
@@ -2362,6 +2782,12 @@ static void handle_automated_test(struct dc_link *link)
dp_test_send_link_test_pattern(link);
test_response.bits.ACK = 1;
}
+
+ if (test_request.bits.AUDIO_TEST_PATTERN) {
+ dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO);
+ test_response.bits.ACK = 1;
+ }
+
if (test_request.bits.PHY_TEST_PATTERN) {
dp_test_send_phy_test_pattern(link);
test_response.bits.ACK = 1;
@@ -2381,9 +2807,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
enum dc_status result;
-
bool status = false;
struct pipe_ctx *pipe_ctx;
+ struct dc_link_settings previous_link_settings;
int i;
if (out_link_loss)
@@ -2447,29 +2873,37 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
/* For now we only handle 'Downstream port status' case.
* If we got sink count changed it means
* Downstream port status changed,
- * then DM should call DC to do the detection. */
- if (hpd_rx_irq_check_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
+ * then DM should call DC to do the detection.
+ * NOTE: Do not handle link loss on eDP since it is internal link*/
+ if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
+ hpd_rx_irq_check_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
/* Connectivity log: link loss */
CONN_DATA_LINK_LOSS(link,
hpd_irq_dpcd_data.raw,
sizeof(hpd_irq_dpcd_data),
"Status: ");
- perform_link_training_with_retries(link,
- &link->cur_link_settings,
- true, LINK_TRAINING_ATTEMPTS);
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
- pipe_ctx->stream->dpms_off == false &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- dc_link_allocate_mst_payload(pipe_ctx);
- }
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ break;
}
+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+ return false;
+
+ previous_link_settings = link->cur_link_settings;
+
+ perform_link_training_with_retries(&previous_link_settings,
+ true, LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_link_reallocate_mst_payload(link);
+
status = false;
if (out_link_loss)
*out_link_loss = true;
@@ -2697,7 +3131,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
int length)
{
int retry = 0;
- union dp_downstream_port_present ds_port = { 0 };
if (!link->dpcd_caps.dpcd_rev.raw) {
do {
@@ -2710,9 +3143,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
}
- ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
- DP_DPCD_REV];
-
if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
switch (link->dpcd_caps.branch_dev_id) {
/* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down
@@ -2737,7 +3167,11 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
static bool retrieve_link_cap(struct dc_link *link)
{
- uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
+ * which means size 16 will be good for both of those DPCD register block reads
+ */
+ uint8_t dpcd_data[16];
+ uint8_t lttpr_dpcd_data[6];
/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
*/
@@ -2753,7 +3187,19 @@ static bool retrieve_link_cap(struct dc_link *link)
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+ /* Set default timeout to 3.2ms and read LTTPR capabilities */
+ bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support &&
+ !link->dc->config.disable_extended_timeout_support;
+
+ link->is_lttpr_mode_transparent = true;
+
+ if (ext_timeout_support) {
+ dc_link_aux_configure_timeout(link->ddc,
+ LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD);
+ }
+
memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
memset(&down_strm_port_count,
'\0', sizeof(union down_stream_port_count));
memset(&edp_config_cap, '\0',
@@ -2785,6 +3231,52 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
+ if (ext_timeout_support) {
+
+ status = core_link_read_dpcd(
+ link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) {
+ link->is_lttpr_mode_transparent = false;
+ } else {
+ /*No lttpr reset timeout to its default value*/
+ link->is_lttpr_mode_transparent = true;
+ dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+ }
+
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ }
+
{
union training_aux_rd_interval aux_rd_interval;
@@ -2792,7 +3284,7 @@ static bool retrieve_link_cap(struct dc_link *link)
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
link->dpcd_caps.ext_receiver_cap_field_present =
- aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;
if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
uint8_t ext_cap_data[16];
@@ -2923,7 +3415,6 @@ static bool retrieve_link_cap(struct dc_link *link)
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
memset(&link->dpcd_caps.dsc_caps, '\0',
sizeof(link->dpcd_caps.dsc_caps));
memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
@@ -2945,7 +3436,6 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
}
-#endif
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -2953,6 +3443,68 @@ static bool retrieve_link_cap(struct dc_link *link)
return true;
}
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
+{
+ uint8_t dpcd_data[16];
+ uint32_t read_dpcd_retry_cnt = 3;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ union dp_downstream_port_present ds_port = { 0 };
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+
+ int i;
+
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ get_active_converter_info(ds_port.byte, link);
+
+ down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
+ DP_DPCD_REV];
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = dpcd_data[
+ DP_MAX_LINK_RATE - DP_DPCD_REV];
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+ return true;
+}
+
bool detect_dp_sink_caps(struct dc_link *link)
{
return retrieve_link_cap(link);
@@ -3067,21 +3619,20 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *pipe_ctx,
- enum dp_test_pattern test_pattern)
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space)
{
enum controller_dp_test_pattern controller_test_pattern;
enum dc_color_depth color_depth = pipe_ctx->
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
int width = pipe_ctx->stream->timing.h_addressable +
pipe_ctx->stream->timing.h_border_left +
pipe_ctx->stream->timing.h_border_right;
int height = pipe_ctx->stream->timing.v_addressable +
pipe_ctx->stream->timing.v_border_bottom +
pipe_ctx->stream->timing.v_border_top;
-#endif
memset(&params, 0, sizeof(params));
@@ -3125,10 +3676,29 @@ static void set_crtc_test_pattern(struct dc_link *link,
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
else if (opp->funcs->opp_set_disp_pattern_generator) {
struct pipe_ctx *odm_pipe;
+ enum controller_dp_color_space controller_color_space;
int opp_cnt = 1;
+ uint8_t count = 0;
+
+ switch (test_pattern_color_space) {
+ case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED:
+ default:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__);
+ ASSERT(0);
+ break;
+ }
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
@@ -3141,6 +3711,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
controller_test_pattern,
+ controller_color_space,
color_depth,
NULL,
width,
@@ -3148,12 +3719,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
}
opp->funcs->opp_set_disp_pattern_generator(opp,
controller_test_pattern,
+ controller_color_space,
color_depth,
NULL,
width,
height);
+ /* wait for dpg to blank pixel data with test pattern */
+ for (count = 0; count < 1000; count++)
+ if (opp->funcs->dpg_is_blanked(opp))
+ break;
+ else
+ udelay(100);
}
-#endif
}
break;
case DP_TEST_PATTERN_VIDEO_MODE:
@@ -3166,7 +3743,6 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
color_depth);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
else if (opp->funcs->opp_set_disp_pattern_generator) {
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
@@ -3181,6 +3757,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
width,
@@ -3188,12 +3765,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
}
opp->funcs->opp_set_disp_pattern_generator(opp,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
width,
height);
}
-#endif
}
break;
@@ -3205,6 +3782,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
bool dc_link_dp_set_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size)
@@ -3233,7 +3811,7 @@ bool dc_link_dp_set_test_pattern(
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {
/* Set CRTC Test Pattern */
- set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
dp_set_hw_test_pattern(link, test_pattern,
(uint8_t *)p_custom_pattern,
(uint32_t)cust_pattern_size);
@@ -3256,8 +3834,8 @@ bool dc_link_dp_set_test_pattern(
if (is_dp_phy_pattern(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- dp_set_hw_lane_settings(link, p_link_settings);
- dpcd_set_lane_settings(link, p_link_settings);
+ dp_set_hw_lane_settings(link, p_link_settings, DPRX);
+ dpcd_set_lane_settings(link, p_link_settings, DPRX);
}
/* Blank stream if running test pattern */
@@ -3348,7 +3926,7 @@ bool dc_link_dp_set_test_pattern(
}
} else {
/* CRTC Patterns */
- set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
@@ -3468,7 +4046,6 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
return DP_PANEL_MODE_DEFAULT;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void dp_set_fec_ready(struct dc_link *link, bool ready)
{
/* FEC has to be "set ready" before the link training.
@@ -3538,5 +4115,4 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
}
}
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index a519dbc5ecb6..ddb855045767 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -12,12 +12,38 @@
#include "dc_link_ddc.h"
#include "dm_helpers.h"
#include "dpcd_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "resource.h"
-#endif
+
+static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
+{
+ switch (lttpr_repeater_count) {
+ case 0x80: // 1 lttpr repeater
+ return 1;
+ case 0x40: // 2 lttpr repeaters
+ return 2;
+ case 0x20: // 3 lttpr repeaters
+ return 3;
+ case 0x10: // 4 lttpr repeaters
+ return 4;
+ case 0x08: // 5 lttpr repeaters
+ return 5;
+ case 0x04: // 6 lttpr repeaters
+ return 6;
+ case 0x02: // 7 lttpr repeaters
+ return 7;
+ case 0x01: // 8 lttpr repeaters
+ return 8;
+ default:
+ break;
+ }
+ return 0; // invalid value
+}
+
+static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
+{
+ return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
+}
enum dc_status core_link_read_dpcd(
struct dc_link *link,
@@ -69,8 +95,8 @@ void dp_enable_link_phy(
const struct dc_link_settings *link_settings)
{
struct link_encoder *link_enc = link->link_enc;
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
struct pipe_ctx *pipes =
link->dc->current_state->res_ctx.pipe_ctx;
@@ -147,15 +173,20 @@ bool edp_receiver_ready_T9(struct dc_link *link)
}
bool edp_receiver_ready_T7(struct dc_link *link)
{
- unsigned int tries = 0;
unsigned char sinkstatus = 0;
unsigned char edpRev = 0;
enum dc_status result = DC_OK;
+ /* use absolute time stamp to constrain max T7*/
+ unsigned long long enter_timestamp = 0;
+ unsigned long long finish_timestamp = 0;
+ unsigned long long time_taken_in_ns = 0;
+
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
if (result == DC_OK && edpRev < DP_EDP_12)
return true;
/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
do {
sinkstatus = 0;
result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
@@ -163,8 +194,10 @@ bool edp_receiver_ready_T7(struct dc_link *link)
break;
if (result != DC_OK)
break;
- udelay(25); //MAx T7 is 50ms
- } while (++tries < 300);
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
@@ -174,8 +207,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
@@ -212,7 +245,8 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
bool dp_set_hw_training_pattern(
struct dc_link *link,
- enum dc_dp_training_pattern pattern)
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset)
{
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
@@ -240,10 +274,14 @@ bool dp_set_hw_training_pattern(
void dp_set_hw_lane_settings(
struct dc_link *link,
- const struct link_training_settings *link_settings)
+ const struct link_training_settings *link_settings,
+ uint32_t offset)
{
struct link_encoder *encoder = link->link_enc;
+ if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset))
+ return;
+
/* call Encoder to set lane settings */
encoder->funcs->dp_set_lane_settings(encoder, link_settings);
}
@@ -302,20 +340,12 @@ void dp_retrain_link_dp_test(struct dc_link *link,
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
- link->link_enc->funcs->enable_dp_output(
- link->link_enc,
- link_setting,
- pipes[i].clock_source->id);
- link->cur_link_settings = *link_setting;
-
- dp_receiver_power_ctrl(link, true);
-
perform_link_training_with_retries(
- link,
link_setting,
skip_video_pattern,
- LINK_TRAINING_ATTEMPTS);
-
+ LINK_TRAINING_ATTEMPTS,
+ &pipes[i],
+ SIGNAL_TYPE_DISPLAY_PORT);
link->dc->hwss.enable_stream(&pipes[i]);
@@ -339,7 +369,6 @@ void dp_retrain_link_dp_test(struct dc_link *link,
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define DC_LOGGER \
dsc->ctx->logger
static void dsc_optc_config_log(struct display_stream_compressor *dsc,
@@ -365,14 +394,14 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,
static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
bool result = false;
- if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
result = true;
else
- result = dm_helpers_dp_write_dsc_enable(core_dc->ctx, stream, enable);
+ result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
return result;
}
@@ -382,7 +411,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
@@ -418,7 +447,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
/* Enable DSC in encoder */
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
@@ -443,7 +472,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
OPTC_DSC_DISABLED, 0, 0);
/* disable DSC in stream encoder */
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
pipe_ctx->stream_res.stream_enc,
OPTC_DSC_DISABLED, 0, 0);
@@ -486,7 +515,7 @@ out:
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
@@ -496,6 +525,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
uint8_t dsc_packed_pps[128];
+ memset(&dsc_cfg, 0, sizeof(dsc_cfg));
+ memset(dsc_packed_pps, 0, 128);
+
/* Enable DSC hw block */
dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
@@ -505,7 +537,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
DC_LOG_DSC(" ");
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.stream_enc,
@@ -514,7 +546,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
}
} else {
/* disable DSC PPS in stream encoder */
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.stream_enc, false, NULL);
}
@@ -537,5 +569,4 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
dp_set_dsc_pps_sdp(pipe_ctx, true);
return true;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 37698305a2dc..a0eb9e533a61 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -46,15 +46,11 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/dcn10_resource.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dcn20/dcn20_resource.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#include "dcn21/dcn21_resource.h"
-#endif
#include "dce120/dce120_resource.h"
#define DC_LOGGER_INIT(logger)
@@ -99,23 +95,19 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
else
dc_version = DCE_VERSION_12_0;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
dc_version = DCN_VERSION_1_0;
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_1_01;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_2_1;
-#endif
break;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
break;
-#endif
default:
dc_version = DCE_VERSION_UNKNOWN;
break;
@@ -162,20 +154,16 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
init_data->num_virtual_links, dc);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
res_pool = dcn10_create_resource_pool(init_data, dc);
break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case DCN_VERSION_2_0:
res_pool = dcn20_create_resource_pool(init_data, dc);
break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case DCN_VERSION_2_1:
res_pool = dcn21_create_resource_pool(init_data, dc);
break;
@@ -951,44 +939,44 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
}
-static bool are_rects_integer_multiples(struct rect src, struct rect dest)
-{
- if (dest.width >= src.width && dest.width % src.width == 0 &&
- dest.height >= src.height && dest.height % src.height == 0)
- return true;
-
- return false;
-}
-static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
+/*
+ * When handling 270 rotation in mixed SLS mode, we have
+ * stream->timing.h_border_left that is non zero. If we are doing
+ * pipe-splitting, this h_border_left value gets added to recout.x and when it
+ * calls calculate_inits_and_adj_vp() and
+ * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a
+ * pipe to be incorrect.
+ *
+ * To fix this, instead of using stream->timing.h_border_left, we can use
+ * stream->dst.x to represent the border instead. So we will set h_border_left
+ * to 0 and shift the appropriate amount in stream->dst.x. We will then
+ * perform all calculations in resource_build_scaling_params() based on this
+ * and then restore the h_border_left and stream->dst.x to their original
+ * values.
+ *
+ * shift_border_left_to_dst() will shift the amount of h_border_left to
+ * stream->dst.x and set h_border_left to 0. restore_border_left_from_dst()
+ * will restore h_border_left and stream->dst.x back to their original values
+ * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
+ * original h_border_left value in its calculation.
+ */
+int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
{
- if (!pipe_ctx->plane_state->scaling_quality.integer_scaling)
- return;
-
- //for Centered Mode
- if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width &&
- pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) {
- // calculate maximum # of replication of src onto addressable
- unsigned int integer_multiple = min(
- pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width,
- pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height);
-
- //scale dst
- pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width;
- pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height;
+ int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
- //center dst onto addressable
- pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
- pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
+ if (store_h_border_left) {
+ pipe_ctx->stream->timing.h_border_left = 0;
+ pipe_ctx->stream->dst.x += store_h_border_left;
}
+ return store_h_border_left;
+}
- //disable taps if src & dst are integer ratio
- if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) {
- pipe_ctx->plane_state->scaling_quality.v_taps = 1;
- pipe_ctx->plane_state->scaling_quality.h_taps = 1;
- pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
- pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
- }
+void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+ int store_h_border_left)
+{
+ pipe_ctx->stream->dst.x -= store_h_border_left;
+ pipe_ctx->stream->timing.h_border_left = store_h_border_left;
}
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
@@ -996,6 +984,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
bool res = false;
+ int store_h_border_left = shift_border_left_to_dst(pipe_ctx);
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
* lb depth calculation requires recout and taps require scaling ratios.
@@ -1004,14 +993,18 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
- calculate_integer_scaling(pipe_ctx);
-
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
- if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
+ pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+ if (store_h_border_left) {
+ restore_border_left_from_dst(pipe_ctx,
+ store_h_border_left);
+ }
return false;
+ }
calculate_recout(pipe_ctx);
@@ -1024,8 +1017,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
- pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
- pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+ pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
+ store_h_border_left + timing->h_border_right;
+ pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
+ timing->v_border_top + timing->v_border_bottom;
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -1072,6 +1067,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
plane_state->dst_rect.x,
plane_state->dst_rect.y);
+ if (store_h_border_left)
+ restore_border_left_from_dst(pipe_ctx, store_h_border_left);
+
return res;
}
@@ -1217,7 +1215,7 @@ static struct pipe_ctx *acquire_free_pipe_for_head(
return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
static int acquire_first_split_pipe(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1298,7 +1296,7 @@ bool dc_add_plane_to_context(
free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
- #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
if (!free_pipe) {
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
if (pipe_idx >= 0)
@@ -1891,7 +1889,7 @@ static int acquire_resource_from_hw_enabled_state(
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (inst == ENGINE_ID_UNKNOWN)
- return false;
+ return -1;
for (i = 0; i < pool->stream_enc_count; i++) {
if (pool->stream_enc[i]->id == inst) {
@@ -1903,10 +1901,10 @@ static int acquire_resource_from_hw_enabled_state(
// tg_inst not found
if (i == pool->stream_enc_count)
- return false;
+ return -1;
if (tg_inst >= pool->timing_generator_count)
- return false;
+ return -1;
if (!res_ctx->pipe_ctx[tg_inst].stream) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst];
@@ -1919,8 +1917,26 @@ static int acquire_resource_from_hw_enabled_state(
pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
pipe_ctx->stream_res.opp = pool->opps[tg_inst];
- if (pool->dpps[tg_inst])
+ if (pool->dpps[tg_inst]) {
pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
+
+ // Read DPP->MPCC->OPP Pipe from HW State
+ if (pool->mpc->funcs->read_mpcc_state) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
+
+ if (s.dpp_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id;
+
+ if (s.bot_mpcc_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
+ &pool->mpc->mpcc_array[s.bot_mpcc_id];
+
+ if (s.opp_id < MAX_OPP)
+ pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
+ }
+ }
pipe_ctx->pipe_idx = tg_inst;
pipe_ctx->stream = stream;
@@ -1972,7 +1988,7 @@ enum dc_status resource_map_pool_resources(
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
if (pipe_idx < 0)
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
#endif
@@ -2050,6 +2066,13 @@ void dc_resource_state_construct(
dst_ctx->clk_mgr = dc->clk_mgr;
}
+
+bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
+{
+ return dc->res_pool->res_cap->num_dsc > 0;
+}
+
+
/**
* dc_validate_global_state() - Determine if HW can support a given state
* Checks HW resource availability and bandwidth requirement.
@@ -2306,7 +2329,7 @@ static void set_avi_info_frame(
if (color_space == COLOR_SPACE_SRGB ||
color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
@@ -2772,9 +2795,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
{
- struct dc *core_dc = dc;
struct dc_link *link = stream->link;
- struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
enum dc_status res = DC_OK;
calculate_phy_pix_clks(stream);
@@ -2837,3 +2859,48 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
return -1;
}
}
+static unsigned int get_max_audio_sample_rate(struct audio_mode *modes)
+{
+ if (modes) {
+ if (modes->sample_rates.rate.RATE_192)
+ return 192000;
+ if (modes->sample_rates.rate.RATE_176_4)
+ return 176400;
+ if (modes->sample_rates.rate.RATE_96)
+ return 96000;
+ if (modes->sample_rates.rate.RATE_88_2)
+ return 88200;
+ if (modes->sample_rates.rate.RATE_48)
+ return 48000;
+ if (modes->sample_rates.rate.RATE_44_1)
+ return 44100;
+ if (modes->sample_rates.rate.RATE_32)
+ return 32000;
+ }
+ /*original logic when no audio info*/
+ return 441000;
+}
+
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *audio_chk)
+{
+ unsigned int i;
+ unsigned int max_sample_rate = 0;
+
+ if (aud_modes) {
+ audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/
+
+ audio_chk->max_audiosample_rate = 0;
+ for (i = 0; i < aud_modes->mode_count; i++) {
+ max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]);
+ if (audio_chk->max_audiosample_rate < max_sample_rate)
+ audio_chk->max_audiosample_rate = max_sample_rate;
+ /*dts takes the same as type 2: AP = 0.25*/
+ }
+ /*check which one take more bandwidth*/
+ if (audio_chk->max_audiosample_rate > 192000)
+ audio_chk->audio_packet_type = 0x9;/*AP =1*/
+ audio_chk->acat = 0;/*not support*/
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index 5cbfdf1c4b11..a249a0e5edd0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -33,7 +33,7 @@
* Private functions
******************************************************************************/
-static void destruct(struct dc_sink *sink)
+static void dc_sink_destruct(struct dc_sink *sink)
{
if (sink->dc_container_id) {
kfree(sink->dc_container_id);
@@ -41,7 +41,7 @@ static void destruct(struct dc_sink *sink)
}
}
-static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
+static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
{
struct dc_link *link = init_params->link;
@@ -75,7 +75,7 @@ void dc_sink_retain(struct dc_sink *sink)
static void dc_sink_free(struct kref *kref)
{
struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
- destruct(sink);
+ dc_sink_destruct(sink);
kfree(sink);
}
@@ -91,7 +91,7 @@ struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
if (NULL == sink)
goto alloc_fail;
- if (false == construct(sink, init_params))
+ if (false == dc_sink_construct(sink, init_params))
goto construct_fail;
kref_init(&sink->refcount);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index bb09243758fe..6ddbb00ed37a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -27,14 +27,12 @@
#include <linux/slab.h>
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dc.h"
#include "core_types.h"
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include "dcn10/dcn10_hw_sequencer.h"
-#endif
#define DC_LOGGER dc->ctx->logger
@@ -58,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
}
}
-static void construct(struct dc_stream_state *stream,
+static void dc_stream_construct(struct dc_stream_state *stream,
struct dc_sink *dc_sink_data)
{
uint32_t i = 0;
@@ -108,7 +106,6 @@ static void construct(struct dc_stream_state *stream,
/* EDID CAP translation for HDMI 2.0 */
stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg));
stream->timing.dsc_cfg.num_slices_h = 0;
stream->timing.dsc_cfg.num_slices_v = 0;
@@ -117,7 +114,6 @@ static void construct(struct dc_stream_state *stream,
stream->timing.dsc_cfg.linebuf_depth = 9;
stream->timing.dsc_cfg.version_minor = 2;
stream->timing.dsc_cfg.ycbcr422_simple = 0;
-#endif
update_stream_signal(stream, dc_sink_data);
@@ -129,7 +125,7 @@ static void construct(struct dc_stream_state *stream,
stream->ctx->dc_stream_id_count++;
}
-static void destruct(struct dc_stream_state *stream)
+static void dc_stream_destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
if (stream->out_transfer_func != NULL) {
@@ -147,7 +143,7 @@ static void dc_stream_free(struct kref *kref)
{
struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount);
- destruct(stream);
+ dc_stream_destruct(stream);
kfree(stream);
}
@@ -170,7 +166,7 @@ struct dc_stream_state *dc_create_stream_for_sink(
if (stream == NULL)
return NULL;
- construct(stream, sink);
+ dc_stream_construct(stream, sink);
kref_init(&stream->refcount);
@@ -237,7 +233,7 @@ struct dc_stream_status *dc_stream_get_status(
static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
unsigned int vupdate_line;
unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -246,7 +242,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
- vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
return;
@@ -272,7 +268,7 @@ bool dc_stream_set_cursor_attributes(
const struct dc_cursor_attributes *attributes)
{
int i;
- struct dc *core_dc;
+ struct dc *dc;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
@@ -290,8 +286,8 @@ bool dc_stream_set_cursor_attributes(
return false;
}
- core_dc = stream->ctx->dc;
- res_ctx = &core_dc->current_state->res_ctx;
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
stream->cursor_attributes = *attributes;
for (i = 0; i < MAX_PIPES; i++) {
@@ -303,17 +299,17 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- delay_cursor_until_vupdate(pipe_ctx, core_dc);
- core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+ delay_cursor_until_vupdate(pipe_ctx, dc);
+ dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
}
- core_dc->hwss.set_cursor_attribute(pipe_ctx);
- if (core_dc->hwss.set_cursor_sdr_white_level)
- core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (pipe_to_program)
- core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+ dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
return true;
}
@@ -323,7 +319,7 @@ bool dc_stream_set_cursor_position(
const struct dc_cursor_position *position)
{
int i;
- struct dc *core_dc;
+ struct dc *dc;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
@@ -337,8 +333,8 @@ bool dc_stream_set_cursor_position(
return false;
}
- core_dc = stream->ctx->dc;
- res_ctx = &core_dc->current_state->res_ctx;
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
stream->cursor_position = *position;
for (i = 0; i < MAX_PIPES; i++) {
@@ -354,20 +350,19 @@ bool dc_stream_set_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- delay_cursor_until_vupdate(pipe_ctx, core_dc);
- core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+ delay_cursor_until_vupdate(pipe_ctx, dc);
+ dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
}
- core_dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_position(pipe_ctx);
}
if (pipe_to_program)
- core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+ dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dc_stream_add_writeback(struct dc *dc,
struct dc_stream_state *stream,
struct dc_writeback_info *wb_info)
@@ -411,25 +406,30 @@ bool dc_stream_add_writeback(struct dc *dc,
stream->writeback_info[stream->num_wb_info++] = *wb_info;
}
- if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
- dm_error("DC: update_bandwidth failed!\n");
- return false;
- }
-
- /* enable writeback */
if (dc->hwss.enable_writeback) {
struct dc_stream_status *stream_status = dc_stream_get_status(stream);
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ dwb->otg_inst = stream_status->primary_otg_inst;
+ }
+ if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
- if (dwb->funcs->is_enabled(dwb)) {
- /* writeback pipe already enabled, only need to update */
- dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
- } else {
- /* Enable writeback pipe from scratch*/
- dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
+ /* enable writeback */
+ if (dc->hwss.enable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, wb_info, dc->current_state);
+ } else {
+ /* Enable writeback pipe from scratch*/
+ dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
+ }
}
}
-
return true;
}
@@ -468,26 +468,35 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
stream->num_wb_info = j;
- /* recalculate and apply DML parameters */
- if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
- dm_error("DC: update_bandwidth failed!\n");
- return false;
- }
-
- /* disable writeback */
- if (dc->hwss.disable_writeback)
- dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+ /* recalculate and apply DML parameters */
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+ /* disable writeback */
+ if (dc->hwss.disable_writeback)
+ dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ }
return true;
}
-#endif
+bool dc_stream_warmup_writeback(struct dc *dc,
+ int num_dwb,
+ struct dc_writeback_info *wb_info)
+{
+ if (dc->hwss.mmhubbub_warmup)
+ return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
+ else
+ return false;
+}
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
{
uint8_t i;
- struct dc *core_dc = stream->ctx->dc;
+ struct dc *dc = stream->ctx->dc;
struct resource_context *res_ctx =
- &core_dc->current_state->res_ctx;
+ &dc->current_state->res_ctx;
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -544,9 +553,9 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
{
uint8_t i;
bool ret = false;
- struct dc *core_dc = stream->ctx->dc;
+ struct dc *dc = stream->ctx->dc;
struct resource_context *res_ctx =
- &core_dc->current_state->res_ctx;
+ &dc->current_state->res_ctx;
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -567,10 +576,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
return ret;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
{
- bool status = true;
struct pipe_ctx *pipe = NULL;
int i;
@@ -586,8 +593,7 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
if (i == MAX_PIPES)
return true;
- status = dc->hwss.dmdata_status_done(pipe);
- return status;
+ return dc->hwss.dmdata_status_done(pipe);
}
bool dc_stream_set_dynamic_metadata(struct dc *dc,
@@ -630,7 +636,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
return true;
}
-#endif
void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index b9d6a5bd8522..ea1229a3e2b2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -37,7 +37,7 @@
/*******************************************************************************
* Private functions
******************************************************************************/
-static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
{
plane_state->ctx = ctx;
@@ -50,7 +50,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
plane_state->in_transfer_func->ctx = ctx;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
plane_state->in_shaper_func = dc_create_transfer_func();
if (plane_state->in_shaper_func != NULL) {
plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
@@ -67,10 +66,9 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
plane_state->blend_tf->ctx = ctx;
}
-#endif
}
-static void destruct(struct dc_plane_state *plane_state)
+static void dc_plane_destruct(struct dc_plane_state *plane_state)
{
if (plane_state->gamma_correction != NULL) {
dc_gamma_release(&plane_state->gamma_correction);
@@ -80,7 +78,6 @@ static void destruct(struct dc_plane_state *plane_state)
plane_state->in_transfer_func);
plane_state->in_transfer_func = NULL;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (plane_state->in_shaper_func != NULL) {
dc_transfer_func_release(
plane_state->in_shaper_func);
@@ -97,7 +94,6 @@ static void destruct(struct dc_plane_state *plane_state)
plane_state->blend_tf = NULL;
}
-#endif
}
/*******************************************************************************
@@ -112,16 +108,14 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
struct dc_plane_state *dc_create_plane_state(struct dc *dc)
{
- struct dc *core_dc = dc;
-
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
- GFP_KERNEL);
+ GFP_KERNEL);
if (NULL == plane_state)
return NULL;
kref_init(&plane_state->refcount);
- construct(core_dc->ctx, plane_state);
+ dc_plane_construct(dc->ctx, plane_state);
return plane_state;
}
@@ -141,7 +135,7 @@ const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state)
{
const struct dc_plane_status *plane_status;
- struct dc *core_dc;
+ struct dc *dc;
int i;
if (!plane_state ||
@@ -152,15 +146,15 @@ const struct dc_plane_status *dc_plane_get_status(
}
plane_status = &plane_state->status;
- core_dc = plane_state->ctx->dc;
+ dc = plane_state->ctx->dc;
- if (core_dc->current_state == NULL)
+ if (dc->current_state == NULL)
return NULL;
/* Find the current plane state and set its pending bit to false */
- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
- &core_dc->current_state->res_ctx.pipe_ctx[i];
+ &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state != plane_state)
continue;
@@ -170,14 +164,14 @@ const struct dc_plane_status *dc_plane_get_status(
break;
}
- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
- &core_dc->current_state->res_ctx.pipe_ctx[i];
+ &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state != plane_state)
continue;
- core_dc->hwss.update_pending_status(pipe_ctx);
+ dc->hwss.update_pending_status(pipe_ctx);
}
return plane_status;
@@ -191,7 +185,7 @@ void dc_plane_state_retain(struct dc_plane_state *plane_state)
static void dc_plane_state_free(struct kref *kref)
{
struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
- destruct(plane_state);
+ dc_plane_destruct(plane_state);
kvfree(plane_state);
}
@@ -262,7 +256,6 @@ alloc_fail:
return NULL;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static void dc_3dlut_func_free(struct kref *kref)
{
struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount);
@@ -296,6 +289,5 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)
{
kref_get(&lut->refcount);
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 0416a17b0897..3fa85a54360f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.56"
+#define DC_VER "3.2.68"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -54,6 +54,10 @@ struct dc_versions {
struct dmcu_version dmcu_version;
};
+enum dp_protocol_version {
+ DP_VERSION_1_4,
+};
+
enum dc_plane_type {
DC_PLANE_TYPE_INVALID,
DC_PLANE_TYPE_DCE_RGB,
@@ -112,17 +116,15 @@ struct dc_caps {
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ bool dmcub_support;
bool hw_3d_lut;
-#endif
+ enum dp_protocol_version max_dp_protocol_version;
struct dc_plane_cap planes[MAX_PLANES];
};
struct dc_bug_wa {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool no_connect_phy_config;
bool dedcn20_305_wa;
-#endif
bool skip_clock_update;
};
@@ -155,11 +157,14 @@ struct dc_surface_dcc_cap {
bool const_color_support;
};
-struct dc_static_screen_events {
- bool force_trigger;
- bool cursor_update;
- bool surface_update;
- bool overlay_update;
+struct dc_static_screen_params {
+ struct {
+ bool force_trigger;
+ bool cursor_update;
+ bool surface_update;
+ bool overlay_update;
+ } triggers;
+ unsigned int num_frames;
};
@@ -363,10 +368,10 @@ struct dc_debug_options {
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
bool disable_hubp_power_gate;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_dsc_power_gate;
int dsc_min_slice_height_override;
-#endif
+ int dsc_bpp_increment_div;
+ bool native422_support;
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
unsigned int min_disp_clk_khz;
@@ -401,22 +406,25 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz;
bool disable_tri_buf;
+ bool dmub_offload_enabled;
+ bool dmcub_emulation;
+ bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_fec;
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
bool disable_48mhz_pwrdwn;
-#endif
/* This forces a hard min on the DCFCLK requested to SMU/PP
* watermarks are not affected.
*/
unsigned int force_min_dcfclk_mhz;
bool disable_timing_sync;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool cm_in_bypass;
-#endif
int force_clock_mode;/*every mode change.*/
+
+ bool nv12_iflip_vm_wa;
+ bool disable_dram_clock_change_vactive_support;
+ bool validate_dml_output;
+ bool enable_dmcub_surface_flip;
+ bool usbc_combo_phy_reset_wa;
};
struct dc_debug_data {
@@ -425,7 +433,6 @@ struct dc_debug_data {
uint32_t auxErrorCount;
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config {
struct {
uint64_t start_addr;
@@ -455,7 +462,6 @@ struct dc_virtual_addr_space_config {
uint32_t page_table_block_size_in_bytes;
uint8_t page_table_depth; // 1 = 1 level, 2 = 2 level, etc. 0 = invalid
};
-#endif
struct dc_bounding_box_overrides {
int sr_exit_time_ns;
@@ -483,9 +489,7 @@ struct dc {
struct dc_bounding_box_overrides bb_overrides;
struct dc_bug_wa work_arounds;
struct dc_context *ctx;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config vm_pa_config;
-#endif
uint8_t link_count;
struct dc_link *links[MAX_PIPES * 2];
@@ -501,7 +505,7 @@ struct dc {
/* Inputs into BW and WM calculations. */
struct bw_calcs_dceip *bw_dceip;
struct bw_calcs_vbios *bw_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
struct display_mode_lib dml;
@@ -515,7 +519,7 @@ struct dc {
bool optimized_required;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
- bool optimize_seamless_boot;
+ int optimize_seamless_boot_streams;
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -523,10 +527,8 @@ struct dc {
struct dc_debug_data debug_data;
const char *build_id;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct vm_helper *vm_helper;
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
};
enum frame_buffer_mode {
@@ -558,15 +560,16 @@ struct dc_init_data {
struct dc_bios *vbios_override;
enum dce_environment dce_environment;
+ struct dmub_offload_funcs *dmub_if;
+ struct dc_reg_helper_state *dmub_offload;
+
struct dc_config flags;
uint32_t log_mask;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/**
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
-#endif
};
struct dc_callback_init {
@@ -581,11 +584,9 @@ struct dc *dc_create(const struct dc_init_data *init_params);
void dc_hardware_init(struct dc *dc);
int dc_get_vmid_use_vector(struct dc *dc);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);
/* Returns the number of vmids supported */
int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config);
-#endif
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params);
void dc_deinit_callbacks(struct dc *dc);
@@ -661,7 +662,6 @@ struct dc_transfer_func {
};
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
union dc_3dlut_state {
struct {
@@ -680,12 +680,11 @@ union dc_3dlut_state {
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
- uint32_t hdr_multiplier;
+ struct fixed31_32 hdr_multiplier;
bool initialized; /*remove after diag fix*/
union dc_3dlut_state state;
struct dc_context *ctx;
};
-#endif
/*
* This structure is filled in by dc_surface_get_status and contains
* the last requested address and the currently active address so the called
@@ -708,7 +707,7 @@ union surface_update_flags {
uint32_t horizontal_mirror_change:1;
uint32_t per_pixel_alpha_change:1;
uint32_t global_alpha_change:1;
- uint32_t sdr_white_level:1;
+ uint32_t hdr_mult:1;
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
@@ -736,9 +735,7 @@ union surface_update_flags {
struct dc_plane_state {
struct dc_plane_address address;
struct dc_plane_flip_time time;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool triplebuffer_flips;
-#endif
struct scaling_taps scaling_quality;
struct rect src_rect;
struct rect dst_rect;
@@ -754,18 +751,16 @@ struct dc_plane_state {
struct dc_bias_and_scale *bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
- uint32_t sdr_white_level;
+ struct fixed31_32 hdr_mult;
// TODO: No longer used, remove
struct dc_hdr_static_metadata hdr_static_ctx;
enum dc_color_space color_space;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_3dlut *lut3d_func;
struct dc_transfer_func *in_shaper_func;
struct dc_transfer_func *blend_tf;
-#endif
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
@@ -801,7 +796,6 @@ struct dc_plane_info {
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
enum dc_color_space color_space;
- unsigned int sdr_white_level;
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
@@ -825,7 +819,7 @@ struct dc_surface_update {
const struct dc_flip_addrs *flip_addr;
const struct dc_plane_info *plane_info;
const struct dc_scaling_info *scaling_info;
-
+ struct fixed31_32 hdr_mult;
/* following updates require alloc/sleep/spin that is not isr safe,
* null means no updates
*/
@@ -834,11 +828,9 @@ struct dc_surface_update {
const struct dc_csc_transform *input_csc_color_matrix;
const struct fixed31_32 *coeff_reduction_factor;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
const struct dc_transfer_func *func_shaper;
const struct dc_3dlut *lut3d_func;
const struct dc_transfer_func *blend_tf;
-#endif
};
/*
@@ -859,11 +851,9 @@ void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);
void dc_transfer_func_release(struct dc_transfer_func *dc_tf);
struct dc_transfer_func *dc_create_transfer_func(void);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_3dlut *dc_create_3dlut_func(void);
void dc_3dlut_func_release(struct dc_3dlut *lut);
void dc_3dlut_func_retain(struct dc_3dlut *lut);
-#endif
/*
* This structure holds a surface address. There could be multiple addresses
* in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such
@@ -925,6 +915,8 @@ void dc_resource_state_copy_construct_current(
void dc_resource_state_destruct(struct dc_state *context);
+bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+
/*
* TODO update to make it about validation sets
* Set up streams and links associated to drive sinks
@@ -980,10 +972,10 @@ struct dpcd_caps {
bool panel_mode_edp;
bool dpcd_display_control_capable;
bool ext_receiver_cap_field_present;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
union dpcd_fec_capability fec_cap;
struct dpcd_dsc_capabilities dsc_caps;
-#endif
+ struct dc_lttpr_caps lttpr_caps;
+
};
#include "dc_link.h"
@@ -1004,14 +996,12 @@ struct dc_container_id {
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
-#endif
/*
* The sink structure contains EDID and other display device properties
@@ -1026,9 +1016,7 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_sink_dsc_caps sink_dsc_caps;
-#endif
/* private to DC core */
struct dc_link *link;
@@ -1086,13 +1074,12 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
unsigned int dc_get_target_backlight_pwm(struct dc *dc);
bool dc_is_dmcu_initialized(struct dc *dc);
+bool dc_is_hw_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
#include "dc_dsc.h"
-#endif
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
new file mode 100644
index 000000000000..59c298a6484f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "../dmub/inc/dmub_srv.h"
+
+static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
+ struct dmub_srv *dmub)
+{
+ dc_srv->dmub = dmub;
+ dc_srv->ctx = dc->ctx;
+}
+
+struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
+{
+ struct dc_dmub_srv *dc_srv =
+ kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
+
+ if (dc_srv == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dc_dmub_srv_construct(dc_srv, dc, dmub);
+
+ return dc_srv;
+}
+
+void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
+{
+ if (*dmub_srv) {
+ kfree(*dmub_srv);
+ *dmub_srv = NULL;
+ }
+}
+
+void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
+ struct dmub_cmd_header *cmd)
+{
+ struct dmub_srv *dmub = dc_dmub_srv->dmub;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
+ status = dmub_srv_cmd_queue(dmub, cmd);
+ if (status == DMUB_STATUS_OK)
+ return;
+
+ if (status != DMUB_STATUS_QUEUE_FULL)
+ goto error;
+
+ /* Execute and wait for queue to become empty again. */
+ dc_dmub_srv_cmd_execute(dc_dmub_srv);
+ dc_dmub_srv_wait_idle(dc_dmub_srv);
+
+ /* Requeue the command. */
+ status = dmub_srv_cmd_queue(dmub, cmd);
+ if (status == DMUB_STATUS_OK)
+ return;
+
+error:
+ DC_ERROR("Error queuing DMUB command: status=%d\n", status);
+}
+
+void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub = dc_dmub_srv->dmub;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
+ status = dmub_srv_cmd_execute(dmub);
+ if (status != DMUB_STATUS_OK)
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+}
+
+void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub = dc_dmub_srv->dmub;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ if (status != DMUB_STATUS_OK)
+ DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
+}
+
+void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub = dc_dmub_srv->dmub;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
+ for (;;) {
+ /* Wait up to a second for PHY init. */
+ status = dmub_srv_wait_for_phy_init(dmub, 1000000);
+ if (status == DMUB_STATUS_OK)
+ /* Initialization OK */
+ break;
+
+ DC_ERROR("DMCUB PHY init failed: status=%d\n", status);
+ ASSERT(0);
+
+ if (status != DMUB_STATUS_TIMEOUT)
+ /*
+ * Server likely initialized or we don't have
+ * DMCUB HW support - this won't end.
+ */
+ break;
+
+ /* Continue spinning so we don't hang the ASIC. */
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
new file mode 100644
index 000000000000..754b6077539c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DC_SRV_H_
+#define _DMUB_DC_SRV_H_
+
+#include "os_types.h"
+#include "../dmub/inc/dmub_cmd.h"
+
+struct dmub_srv;
+struct dmub_cmd_header;
+
+struct dc_reg_helper_state {
+ bool gather_in_progress;
+ uint32_t same_addr_count;
+ bool should_burst_write;
+ union dmub_rb_cmd cmd_data;
+ unsigned int reg_seq_count;
+};
+
+struct dc_dmub_srv {
+ struct dmub_srv *dmub;
+ struct dc_reg_helper_state reg_helper_offload;
+
+ struct dc_context *ctx;
+ void *dm;
+};
+
+void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
+ struct dmub_cmd_header *cmd);
+
+void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
+
+void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+
+void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
+
+#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index ef79a686e4c2..dfe4472c9e40 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -129,9 +129,7 @@ struct dc_link_training_overrides {
bool *alternate_scrambler_reset;
bool *enhanced_framing;
bool *mst_enable;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool *fec_enable;
-#endif
};
union dpcd_rev {
@@ -471,13 +469,13 @@ union training_aux_rd_interval {
/* Automated test structures */
union test_request {
struct {
- uint8_t LINK_TRAINING :1;
- uint8_t LINK_TEST_PATTRN :1;
- uint8_t EDID_READ :1;
- uint8_t PHY_TEST_PATTERN :1;
- uint8_t AUDIO_TEST_PATTERN :1;
- uint8_t RESERVED :1;
- uint8_t TEST_STEREO_3D :1;
+ uint8_t LINK_TRAINING :1;
+ uint8_t LINK_TEST_PATTRN :1;
+ uint8_t EDID_READ :1;
+ uint8_t PHY_TEST_PATTERN :1;
+ uint8_t RESERVED :1;
+ uint8_t AUDIO_TEST_PATTERN :1;
+ uint8_t TEST_AUDIO_DISABLED_VIDEO :1;
} bits;
uint8_t raw;
};
@@ -524,19 +522,52 @@ union link_test_pattern {
union test_misc {
struct dpcd_test_misc_bits {
- unsigned char SYNC_CLOCK :1;
+ unsigned char SYNC_CLOCK :1;
/* dpcd_test_color_format */
- unsigned char CLR_FORMAT :2;
+ unsigned char CLR_FORMAT :2;
/* dpcd_test_dyn_range */
- unsigned char DYN_RANGE :1;
- unsigned char YCBCR :1;
+ unsigned char DYN_RANGE :1;
+ unsigned char YCBCR_COEFS :1;
/* dpcd_test_bit_depth */
- unsigned char BPC :3;
+ unsigned char BPC :3;
} bits;
unsigned char raw;
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+union audio_test_mode {
+ struct {
+ unsigned char sampling_rate :4;
+ unsigned char channel_count :4;
+ } bits;
+ unsigned char raw;
+};
+
+union audio_test_pattern_period {
+ struct {
+ unsigned char pattern_period :4;
+ unsigned char reserved :4;
+ } bits;
+ unsigned char raw;
+};
+
+struct audio_test_pattern_type {
+ unsigned char value;
+};
+
+struct dp_audio_test_data_flags {
+ uint8_t test_requested :1;
+ uint8_t disable_video :1;
+};
+
+struct dp_audio_test_data {
+
+ struct dp_audio_test_data_flags flags;
+ uint8_t sampling_rate;
+ uint8_t channel_count;
+ uint8_t pattern_type;
+ uint8_t pattern_period[8];
+};
+
/* FEC capability DPCD register field bits-*/
union dpcd_fec_capability {
struct {
@@ -661,6 +692,5 @@ struct dpcd_dsc_capabilities {
union dpcd_dsc_ext_capabilities dsc_ext_caps;
};
-#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 0ed2962add5a..3800340a5b4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -1,4 +1,3 @@
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#ifndef DC_DSC_H_
#define DC_DSC_H_
/*
@@ -42,21 +41,28 @@ struct dc_dsc_bw_range {
struct display_stream_compressor {
const struct dsc_funcs *funcs;
-#ifndef AMD_EDID_UTILITY
struct dc_context *ctx;
int inst;
-#endif
};
-bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
+struct dc_dsc_policy {
+ bool use_min_slices_h;
+ int max_slices_h; // Maximum available if 0
+ int min_slice_height; // Must not be less than 8
+ uint32_t max_target_bpp;
+ uint32_t min_target_bpp;
+};
+
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
+ const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
struct dsc_dec_dpcd_caps *dsc_sink_caps);
bool dc_dsc_compute_bandwidth_range(
const struct display_stream_compressor *dsc,
const uint32_t dsc_min_slice_height_override,
- const uint32_t min_kbps,
- const uint32_t max_kbps,
+ const uint32_t min_bpp,
+ const uint32_t max_bpp,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
struct dc_dsc_bw_range *range);
@@ -68,5 +74,10 @@ bool dc_dsc_compute_config(
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
-#endif
+
+void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
+ struct dc_dsc_policy *policy);
+
+void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 30b2f9edd42f..737048d8a96c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -32,6 +32,74 @@
#include "dm_services.h"
#include <stdarg.h>
+#include "dc.h"
+#include "dc_dmub_srv.h"
+
+static inline void submit_dmub_read_modify_write(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
+ bool gather = false;
+
+ offload->should_burst_write =
+ (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
+ cmd_buf->header.payload_bytes =
+ sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
+
+ gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+
+ offload->reg_seq_count = 0;
+ offload->same_addr_count = 0;
+}
+
+static inline void submit_dmub_burst_write(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
+ bool gather = false;
+
+ cmd_buf->header.payload_bytes =
+ sizeof(uint32_t) * offload->reg_seq_count;
+
+ gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+
+ offload->reg_seq_count = 0;
+}
+
+static inline void submit_dmub_reg_wait(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
+ bool gather = false;
+
+ gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
+
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+ offload->reg_seq_count = 0;
+
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+}
+
struct dc_reg_value_masks {
uint32_t value;
uint32_t mask;
@@ -77,6 +145,100 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
}
}
+static void dmub_flush_buffer_execute(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ submit_dmub_read_modify_write(offload, ctx);
+ dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+}
+
+static void dmub_flush_burst_write_buffer_execute(
+ struct dc_reg_helper_state *offload,
+ const struct dc_context *ctx)
+{
+ submit_dmub_burst_write(offload, ctx);
+ dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+}
+
+static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
+ uint32_t reg_val)
+{
+ struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+ struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
+
+ /* flush command if buffer is full */
+ if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX)
+ dmub_flush_burst_write_buffer_execute(offload, ctx);
+
+ if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE &&
+ addr != cmd_buf->addr) {
+ dmub_flush_burst_write_buffer_execute(offload, ctx);
+ return false;
+ }
+
+ cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
+ cmd_buf->header.sub_type = 0;
+ cmd_buf->addr = addr;
+ cmd_buf->write_values[offload->reg_seq_count] = reg_val;
+ offload->reg_seq_count++;
+
+ return true;
+}
+
+static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr,
+ struct dc_reg_value_masks *field_value_mask)
+{
+ struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+ struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
+ struct dmub_cmd_read_modify_write_sequence *seq;
+
+ /* flush command if buffer is full */
+ if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE &&
+ offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX)
+ dmub_flush_buffer_execute(offload, ctx);
+
+ if (offload->should_burst_write) {
+ if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value))
+ return field_value_mask->value;
+ else
+ offload->should_burst_write = false;
+ }
+
+ /* pack commands */
+ cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
+ cmd_buf->header.sub_type = 0;
+ seq = &cmd_buf->seq[offload->reg_seq_count];
+
+ if (offload->reg_seq_count) {
+ if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr)
+ offload->same_addr_count++;
+ else
+ offload->same_addr_count = 0;
+ }
+
+ seq->addr = addr;
+ seq->modify_mask = field_value_mask->mask;
+ seq->modify_value = field_value_mask->value;
+ offload->reg_seq_count++;
+
+ return field_value_mask->value;
+}
+
+static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr,
+ uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us)
+{
+ struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
+ struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
+
+ cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
+ cmd_buf->header.sub_type = 0;
+ cmd_buf->reg_wait.addr = addr;
+ cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift);
+ cmd_buf->reg_wait.mask = mask;
+ cmd_buf->reg_wait.time_out_us = time_out_us;
+}
+
uint32_t generic_reg_update_ex(const struct dc_context *ctx,
uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
@@ -93,6 +255,11 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
va_end(ap);
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress)
+ return dmub_reg_value_pack(ctx, addr, &field_value_mask);
+ /* todo: return void so we can decouple code running in driver from register states */
+
/* mmio write directly */
reg_val = dm_read_reg(ctx, addr);
reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
@@ -118,6 +285,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx,
/* mmio write directly */
reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+ return dmub_reg_value_burst_set_pack(ctx, addr, reg_val);
+ /* todo: return void so we can decouple code running in driver from register states */
+ }
+
dm_write_reg(ctx, addr, reg_val);
return reg_val;
}
@@ -134,6 +308,14 @@ uint32_t dm_read_reg_func(
return 0;
}
#endif
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
+ !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
+ ASSERT(false);
+ return 0;
+ }
+
value = cgs_read_register(ctx->cgs_device, address);
trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
@@ -299,7 +481,19 @@ void generic_reg_wait(const struct dc_context *ctx,
uint32_t reg_val;
int i;
- /* something is terribly wrong if time out is > 200ms. (5Hz) */
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+ dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value,
+ delay_between_poll_us * time_out_num_tries);
+ return;
+ }
+
+ /*
+ * Something is terribly wrong if time out is > 3000ms.
+ * 3000ms is the maximum time needed for SMU to pass values back.
+ * This value comes from experiments.
+ *
+ */
ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
for (i = 0; i <= time_out_num_tries; i++) {
@@ -346,12 +540,48 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
{
uint32_t value = 0;
+ // when reg read, there should not be any offload.
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
+ ASSERT(false);
+ }
+
dm_write_reg(ctx, addr_index, index);
value = dm_read_reg(ctx, addr_data);
return value;
}
+uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ ...)
+{
+ uint32_t shift, mask, *field_value;
+ uint32_t value = 0;
+ int i = 1;
+
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ value = generic_read_indirect_reg(ctx, addr_index, addr_data, index);
+ *field_value1 = get_reg_field_value_ex(value, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t *);
+
+ *field_value = get_reg_field_value_ex(value, mask, shift);
+ i++;
+ }
+
+ va_end(ap);
+
+ return value;
+}
uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
@@ -382,3 +612,68 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
return reg_val;
}
+
+void reg_sequence_start_gather(const struct dc_context *ctx)
+{
+ /* if reg sequence is supported and enabled, set flag to
+ * indicate we want to have REG_SET, REG_UPDATE macro build
+ * reg sequence command buffer rather than MMIO directly.
+ */
+
+ if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) {
+ struct dc_reg_helper_state *offload =
+ &ctx->dmub_srv->reg_helper_offload;
+
+ /* caller sequence mismatch. need to debug caller. offload will not work!!! */
+ ASSERT(!offload->gather_in_progress);
+
+ offload->gather_in_progress = true;
+ }
+}
+
+void reg_sequence_start_execute(const struct dc_context *ctx)
+{
+ struct dc_reg_helper_state *offload;
+
+ if (!ctx->dmub_srv)
+ return;
+
+ offload = &ctx->dmub_srv->reg_helper_offload;
+
+ if (offload && offload->gather_in_progress) {
+ offload->gather_in_progress = false;
+ offload->should_burst_write = false;
+ switch (offload->cmd_data.cmd_common.header.type) {
+ case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE:
+ submit_dmub_read_modify_write(offload, ctx);
+ break;
+ case DMUB_CMD__REG_REG_WAIT:
+ submit_dmub_reg_wait(offload, ctx);
+ break;
+ case DMUB_CMD__REG_SEQ_BURST_WRITE:
+ submit_dmub_burst_write(offload, ctx);
+ break;
+ default:
+ return;
+ }
+
+ dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+ }
+}
+
+void reg_sequence_wait_done(const struct dc_context *ctx)
+{
+ /* callback to DM to poll for last submission done*/
+ struct dc_reg_helper_state *offload;
+
+ if (!ctx->dmub_srv)
+ return;
+
+ offload = &ctx->dmub_srv->reg_helper_offload;
+
+ if (offload &&
+ ctx->dc->debug.dmub_offload_enabled &&
+ !ctx->dc->debug.dmcub_emulation) {
+ dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index e0856bb8511f..25c50bcab9e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -26,8 +26,6 @@
#ifndef DC_HW_TYPES_H
#define DC_HW_TYPES_H
-#ifndef AMD_EDID_UTILITY
-
#include "os_types.h"
#include "fixed31_32.h"
#include "signal_types.h"
@@ -167,12 +165,10 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX,
SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX,
SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT,
SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT,
-#endif
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
@@ -180,10 +176,8 @@ enum surface_pixel_format {
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010,
SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102,
-#endif
SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_INVALID
@@ -222,12 +216,10 @@ enum tile_split_values {
DC_ROTATED_MICRO_TILING = 0x3,
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum tripleBuffer_enable {
DC_TRIPLEBUFFER_DISABLE = 0x0,
DC_TRIPLEBUFFER_ENABLE = 0x1,
};
-#endif
/* TODO: These values come from hardware spec. We need to readdress this
* if they ever change.
@@ -427,13 +419,11 @@ struct dc_csc_transform {
bool enable_adjustment;
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_rgb_fixed {
struct fixed31_32 red;
struct fixed31_32 green;
struct fixed31_32 blue;
};
-#endif
struct dc_gamma {
struct kref refcount;
@@ -468,10 +458,8 @@ enum dc_cursor_color_format {
CURSOR_MODE_COLOR_1BIT_AND,
CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED,
CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED
-#endif
};
/*
@@ -594,8 +582,6 @@ struct scaling_taps {
bool integer_scaling;
};
-#endif /* AMD_EDID_UTILITY */
-
enum dc_timing_standard {
DC_TIMING_STANDARD_UNDEFINED,
DC_TIMING_STANDARD_DMT,
@@ -626,10 +612,8 @@ enum dc_color_depth {
COLOR_DEPTH_121212,
COLOR_DEPTH_141414,
COLOR_DEPTH_161616,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
COLOR_DEPTH_999,
COLOR_DEPTH_111111,
-#endif
COLOR_DEPTH_COUNT
};
@@ -690,9 +674,7 @@ struct dc_crtc_timing_flags {
* rates less than or equal to 340Mcsc */
uint32_t LTE_340MCSC_SCRAMBLE:1;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
uint32_t DSC : 1; /* Use DSC with this timing */
-#endif
};
enum dc_timing_3d_format {
@@ -717,7 +699,6 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
uint32_t num_slices_v; /* Number of DSC slices - vertical */
@@ -728,7 +709,6 @@ struct dc_dsc_config {
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
};
-#endif
struct dc_crtc_timing {
uint32_t h_total;
uint32_t h_border_left;
@@ -755,13 +735,9 @@ struct dc_crtc_timing {
enum scanning_type scan_type;
struct dc_crtc_timing_flags flags;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_dsc_config dsc_cfg;
-#endif
};
-#ifndef AMD_EDID_UTILITY
-
enum trigger_delay {
TRIGGER_DELAY_NEXT_PIXEL = 0,
TRIGGER_DELAY_NEXT_LINE,
@@ -796,7 +772,6 @@ enum vram_type {
VIDEO_MEMORY_TYPE_GDDR6 = 6,
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dwb_cnv_out_bpc {
DWB_CNV_OUT_BPC_8BPC = 0,
DWB_CNV_OUT_BPC_10BPC = 1,
@@ -847,7 +822,6 @@ struct mcif_buf_params {
unsigned int swlock;
};
-#endif
#define MAX_TG_COLOR_VALUE 0x3FF
struct tg_color {
@@ -857,7 +831,5 @@ struct tg_color {
uint16_t color_b_cb;
};
-#endif /* AMD_EDID_UTILITY */
-
#endif /* DC_HW_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index f24fd19ed93d..d25603128394 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -29,13 +29,11 @@
#include "dc_types.h"
#include "grph_object_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
enum dc_link_fec_state {
dc_link_fec_not_ready,
dc_link_fec_ready,
dc_link_fec_enabled
};
-#endif
struct dc_link_status {
bool link_active;
struct dpcd_caps *dpcd_caps;
@@ -85,6 +83,7 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
+ bool is_lttpr_mode_transparent;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@@ -95,6 +94,7 @@ struct dc_link {
struct dc_lane_settings cur_lane_setting;
struct dc_link_settings preferred_link_setting;
struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
uint8_t ddc_hw_inst;
@@ -133,6 +133,7 @@ struct dc_link {
struct link_flags {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -140,9 +141,7 @@ struct dc_link {
struct link_trace link_trace;
struct gpio *hpd_gpio;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
enum dc_link_fec_state fec_state;
-#endif
};
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
@@ -206,6 +205,7 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
+enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
@@ -259,6 +259,7 @@ void dc_link_dp_disable_hpd(const struct dc_link *link);
bool dc_link_dp_set_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
@@ -290,6 +291,7 @@ void dc_link_enable_hpd(const struct dc_link *link);
void dc_link_disable_hpd(const struct dc_link *link);
void dc_link_set_test_pattern(struct dc_link *link,
enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
@@ -300,11 +302,18 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link);
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link);
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
struct i2c_command *cmd);
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd);
+
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index fdb6adc37857..92096de79dec 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -52,7 +52,6 @@ struct freesync_context {
bool dummy;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
enum hubp_dmdata_mode {
DMDATA_SW_MODE,
DMDATA_HW_MODE
@@ -82,9 +81,7 @@ struct dc_dmdata_attributes {
/* An unbounded array of uint32s, represents software dmdata to be loaded */
uint32_t *dmdata_sw_data;
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_writeback_info {
bool wb_enabled;
int dwb_pipe_inst;
@@ -96,7 +93,6 @@ struct dc_writeback_update {
unsigned int num_wb_info;
struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
};
-#endif
enum vertical_interrupt_ref_point {
START_V_UPDATE = 0,
@@ -121,9 +117,7 @@ union stream_update_flags {
uint32_t abm_level:1;
uint32_t dpms_off:1;
uint32_t gamut_remap:1;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t wb_update:1;
-#endif
} bits;
uint32_t raw;
@@ -164,6 +158,7 @@ struct dc_stream_state {
enum view_3d_format view_format;
+ bool use_vsc_sdp_for_colorimetry;
bool ignore_msa_timing_param;
bool converter_disable_audio;
uint8_t qs_bit;
@@ -203,11 +198,9 @@ struct dc_stream_state {
struct crtc_trigger_info triggered_crtc_reset;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* writeback */
unsigned int num_wb_info;
struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
-#endif
/* Computed state bits */
bool mode_changed : 1;
@@ -226,9 +219,7 @@ struct dc_stream_state {
bool apply_seamless_boot_optimization;
uint32_t stream_id;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool is_dsc_enabled;
-#endif
union stream_update_flags update_flags;
};
@@ -251,6 +242,7 @@ struct dc_stream_update {
struct dc_info_packet *vsp_infopacket;
bool *dpms_off;
+ bool integer_scaling_update;
struct colorspace_transform *gamut_remap;
enum dc_color_space *output_color_space;
@@ -258,12 +250,8 @@ struct dc_stream_update {
struct dc_csc_transform *output_csc_transform;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_writeback_update *wb_update;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
struct dc_dsc_config *dsc_config;
-#endif
};
bool dc_is_stream_unchanged(
@@ -353,18 +341,23 @@ bool dc_add_all_planes_for_stream(
int plane_count,
struct dc_state *context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dc_stream_add_writeback(struct dc *dc,
struct dc_stream_state *stream,
struct dc_writeback_info *wb_info);
+
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst);
+
+bool dc_stream_warmup_writeback(struct dc *dc,
+ int num_dwb,
+ struct dc_writeback_info *wb_info);
+
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
+
bool dc_stream_set_dynamic_metadata(struct dc *dc,
struct dc_stream_state *stream,
struct dc_dmdata_attributes *dmdata_attr);
-#endif
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
@@ -446,10 +439,10 @@ bool dc_stream_get_crc(struct dc *dc,
uint32_t *g_y,
uint32_t *b_cb);
-void dc_stream_set_static_screen_events(struct dc *dc,
+void dc_stream_set_static_screen_params(struct dc *dc,
struct dc_stream_state **stream,
int num_streams,
- const struct dc_static_screen_events *events);
+ const struct dc_static_screen_params *params);
void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
enum dc_dynamic_expansion option);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index d9be8fc3889f..e59532d98cb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -25,7 +25,6 @@
#ifndef DC_TYPES_H_
#define DC_TYPES_H_
-#ifndef AMD_EDID_UTILITY
/* AND EdidUtility only needs a portion
* of this file, including the rest only
* causes additional issues.
@@ -48,6 +47,7 @@ struct dc_stream_state;
struct dc_link;
struct dc_sink;
struct dal;
+struct dc_dmub_srv;
/********************************
* Environment definitions
@@ -60,7 +60,12 @@ enum dce_environment {
DCE_ENV_FPGA_MAXIMUS,
/* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
* requirements of Diagnostics team. */
- DCE_ENV_DIAG
+ DCE_ENV_DIAG,
+ /*
+ * Guest VM system, DC HW may exist but is not virtualized and
+ * should not be used. SW support for VDI only.
+ */
+ DCE_ENV_VIRTUAL_HW
};
/* Note: use these macro definitions instead of direct comparison! */
@@ -109,6 +114,8 @@ struct dc_context {
uint32_t dc_sink_id_count;
uint32_t dc_stream_id_count;
uint64_t fbc_gpu_addr;
+ struct dc_dmub_srv *dmub_srv;
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
struct cp_psp cp_psp;
#endif
@@ -119,6 +126,7 @@ struct dc_context {
#define DC_EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
+#define MAX_REPEATER_CNT 8
#include "dc_ddc_types.h"
@@ -221,6 +229,7 @@ struct dc_panel_patch {
unsigned int extra_t12_ms;
unsigned int extra_delay_backlight_off;
unsigned int extra_t7_ms;
+ unsigned int manage_secondary_link;
};
struct dc_edid_caps {
@@ -402,6 +411,30 @@ enum dpcd_downstream_port_max_bpc {
DOWN_STREAM_MAX_12BPC,
DOWN_STREAM_MAX_16BPC
};
+
+
+enum link_training_offset {
+ DPRX = 0,
+ LTTPR_PHY_REPEATER1 = 1,
+ LTTPR_PHY_REPEATER2 = 2,
+ LTTPR_PHY_REPEATER3 = 3,
+ LTTPR_PHY_REPEATER4 = 4,
+ LTTPR_PHY_REPEATER5 = 5,
+ LTTPR_PHY_REPEATER6 = 6,
+ LTTPR_PHY_REPEATER7 = 7,
+ LTTPR_PHY_REPEATER8 = 8
+};
+
+struct dc_lttpr_caps {
+ union dpcd_rev revision;
+ uint8_t mode;
+ uint8_t max_lane_count;
+ uint8_t max_link_rate;
+ uint8_t phy_repeater_cnt;
+ uint8_t max_ext_timeout;
+ uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+};
+
struct dc_dongle_caps {
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
@@ -440,7 +473,6 @@ enum display_content_type {
DISPLAY_CONTENT_TYPE_GAME = 8
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* writeback */
struct dwb_stereo_params {
bool stereo_enabled; /* false: normal mode, true: 3D stereo */
@@ -471,7 +503,6 @@ struct dc_dwb_params {
enum dwb_subsample_position subsample_position;
struct dc_transfer_func *out_transfer_func;
};
-#endif
/* audio*/
@@ -573,15 +604,17 @@ struct audio_info {
/* this field must be last in this struct */
struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
};
-
+struct audio_check {
+ unsigned int audio_packet_type;
+ unsigned int max_audiosample_rate;
+ unsigned int acat;
+};
enum dc_infoframe_type {
DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
DC_HDMI_INFOFRAME_TYPE_AVI = 0x82,
DC_HDMI_INFOFRAME_TYPE_SPD = 0x83,
DC_HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DC_DP_INFOFRAME_TYPE_PPS = 0x10,
-#endif
};
struct dc_info_packet {
@@ -696,7 +729,7 @@ struct psr_context {
/* The VSync rate in Hz used to calculate the
* step size for smooth brightness feature
*/
- unsigned int vsyncRateHz;
+ unsigned int vsync_rate_hz;
unsigned int skipPsrWaitForPllLock;
unsigned int numberOfControllers;
/* Unused, for future use. To indicate that first changed frame from
@@ -757,10 +790,6 @@ struct dc_clock_config {
uint32_t current_clock_khz;/*current clock in use*/
};
-#endif /*AMD_EDID_UTILITY*/
-//AMD EDID UTILITY does not need any of the above structures
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* DSC DPCD capabilities */
union dsc_slice_caps1 {
struct {
@@ -830,6 +859,5 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_overall_throughput_1_mps; /* In MPs */
uint32_t branch_max_line_width;
};
-#endif
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 7ba7e6f722f6..ba0caaffa24b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -67,7 +67,6 @@
SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
NBIO_SR(BIOS_SCRATCH_2)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define ABM_DCN20_REG_LIST() \
ABM_COMMON_REG_LIST_DCE_BASE(), \
SR(DC_ABM1_HG_SAMPLE_RATE), \
@@ -81,7 +80,6 @@
SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
NBIO_SR(BIOS_SCRATCH_2)
-#endif
#define ABM_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -163,9 +161,7 @@
ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh)
-#endif
#define ABM_REG_FIELD_LIST(type) \
type ABM1_HG_NUM_OF_BINS_SEL; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 793c0cec407f..f1a5d2c6aa37 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -60,12 +60,14 @@ enum {
AUX_DEFER_RETRY_COUNTER = 6
};
-#define TIME_OUT_INCREMENT 1016
-#define TIME_OUT_MULTIPLIER_8 8
-#define TIME_OUT_MULTIPLIER_16 16
-#define TIME_OUT_MULTIPLIER_32 32
-#define TIME_OUT_MULTIPLIER_64 64
-#define MAX_TIMEOUT_LENGTH 127
+#define TIME_OUT_INCREMENT 1016
+#define TIME_OUT_MULTIPLIER_8 8
+#define TIME_OUT_MULTIPLIER_16 16
+#define TIME_OUT_MULTIPLIER_32 32
+#define TIME_OUT_MULTIPLIER_64 64
+#define MAX_TIMEOUT_LENGTH 127
+#define DEFAULT_AUX_ENGINE_MULT 0
+#define DEFAULT_AUX_ENGINE_LENGTH 69
static void release_engine(
struct dce_aux *engine)
@@ -427,11 +429,14 @@ void dce110_engine_destroy(struct dce_aux **engine)
}
-static bool dce_aux_configure_timeout(struct ddc_service *ddc,
+static uint32_t dce_aux_configure_timeout(struct ddc_service *ddc,
uint32_t timeout_in_us)
{
uint32_t multiplier = 0;
uint32_t length = 0;
+ uint32_t prev_length = 0;
+ uint32_t prev_mult = 0;
+ uint32_t prev_timeout_val = 0;
struct ddc *ddc_pin = ddc->ddc_pin;
struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
@@ -440,7 +445,10 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc,
aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;
/* 2-Update aux timeout period length and multiplier */
- if (timeout_in_us <= TIME_OUT_INCREMENT) {
+ if (timeout_in_us == 0) {
+ multiplier = DEFAULT_AUX_ENGINE_MULT;
+ length = DEFAULT_AUX_ENGINE_LENGTH;
+ } else if (timeout_in_us <= TIME_OUT_INCREMENT) {
multiplier = 0;
length = timeout_in_us/TIME_OUT_MULTIPLIER_8;
if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0)
@@ -464,9 +472,29 @@ static bool dce_aux_configure_timeout(struct ddc_service *ddc,
length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH;
+ REG_GET_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, &prev_length, AUX_RX_TIMEOUT_LEN_MUL, &prev_mult);
+
+ switch (prev_mult) {
+ case 0:
+ prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_8;
+ break;
+ case 1:
+ prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_16;
+ break;
+ case 2:
+ prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_32;
+ break;
+ case 3:
+ prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_64;
+ break;
+ default:
+ prev_timeout_val = DEFAULT_AUX_ENGINE_LENGTH * TIME_OUT_MULTIPLIER_8;
+ break;
+ }
+
REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier);
- return true;
+ return prev_timeout_val;
}
static struct dce_aux_funcs aux_functions = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index b4b2c79a8073..382465862f29 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -30,7 +30,6 @@
#include "inc/hw/aux_engine.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define AUX_COMMON_REG_LIST0(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
SRI(AUX_ARB_CONTROL, DP_AUX, id), \
@@ -39,7 +38,6 @@
SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
SRI(AUX_SW_STATUS, DP_AUX, id)
-#endif
#define AUX_COMMON_REG_LIST(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
@@ -311,7 +309,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
struct dce_aux_funcs {
- bool (*configure_timeout)
+ uint32_t (*configure_timeout)
(struct ddc_service *ddc,
uint32_t timeout);
void (*destroy)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index f787a6b94781..2e992fbc0d71 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -905,7 +905,7 @@ static bool dce112_program_pix_clk(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
unsigned dp_dto_ref_100hz = 7000000;
@@ -1004,7 +1004,6 @@ static bool get_pixel_clk_frequency_100hz(
return false;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
struct pixel_rate_range_table_entry {
@@ -1064,7 +1063,6 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = {
.get_pix_clk_dividers = dce112_get_pix_clk_dividers,
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
-#endif
/*****************************************/
/* Constructor */
@@ -1435,7 +1433,6 @@ bool dce112_clk_src_construct(
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dcn20_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
@@ -1451,4 +1448,3 @@ bool dcn20_clk_src_construct(
return ret;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 43c1bf60b83c..51bd25079606 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -55,7 +55,6 @@
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define CS_COMMON_REG_LIST_DCN2_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\
@@ -76,9 +75,7 @@
SRII(PIXEL_RATE_CNTL, OTG, 3),\
SRII(PIXEL_RATE_CNTL, OTG, 4),\
SRII(PIXEL_RATE_CNTL, OTG, 5)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\
@@ -93,17 +90,14 @@
SRII(PIXEL_RATE_CNTL, OTG, 1),\
SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
@@ -201,7 +195,6 @@ bool dce112_clk_src_construct(
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool dcn20_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
@@ -210,6 +203,5 @@ bool dcn20_clk_src_construct(
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
-#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index ba995d3f2318..30d953acd016 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -59,6 +59,12 @@
#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
+// PSP FW version
+#define mmMP0_SMN_C2PMSG_58 0x1607A
+
+//Register access policy version
+#define mmMP0_SMN_C2PMSG_91 0x1609B
+
static bool dce_dmcu_init(struct dmcu *dmcu)
{
// Do nothing
@@ -318,7 +324,7 @@ static void dce_get_psr_wait_loop(
return;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
static void dcn10_get_dmcu_version(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -373,6 +379,7 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
+ PERF_TRACE();
/* Definition of DC_DMCU_SCRATCH
* 0 : firmare not loaded
* 1 : PSP load DMCU FW but not initialized
@@ -429,9 +436,21 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
break;
}
+ PERF_TRACE();
return status;
}
+static bool dcn21_dmcu_init(struct dmcu *dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ uint32_t dmcub_psp_version = REG_READ(DMCUB_SCRATCH15);
+
+ if (dmcu->auto_load_dmcu && dmcub_psp_version == 0) {
+ return false;
+ }
+
+ return dcn10_dmcu_init(dmcu);
+}
static bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
unsigned int start_offset,
@@ -518,9 +537,6 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
if (dmcu->dmcu_state != DMCU_RUNNING)
return;
- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
- if (psr_state == 0 && !enable)
- return;
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
@@ -727,9 +743,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
return true;
}
-#endif //(CONFIG_DRM_AMD_DC_DCN1_0)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static bool dcn20_lock_phy(struct dmcu *dmcu)
{
@@ -777,7 +791,7 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu)
return true;
}
-#endif //(CONFIG_DRM_AMD_DC_DCN2_0)
+#endif //(CONFIG_DRM_AMD_DC_DCN)
static const struct dmcu_funcs dce_funcs = {
.dmcu_init = dce_dmcu_init,
@@ -790,7 +804,7 @@ static const struct dmcu_funcs dce_funcs = {
.is_dmcu_initialized = dce_is_dmcu_initialized
};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
static const struct dmcu_funcs dcn10_funcs = {
.dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
@@ -801,9 +815,7 @@ static const struct dmcu_funcs dcn10_funcs = {
.get_psr_wait_loop = dcn10_get_psr_wait_loop,
.is_dmcu_initialized = dcn10_is_dmcu_initialized
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static const struct dmcu_funcs dcn20_funcs = {
.dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
@@ -816,6 +828,19 @@ static const struct dmcu_funcs dcn20_funcs = {
.lock_phy = dcn20_lock_phy,
.unlock_phy = dcn20_unlock_phy
};
+
+static const struct dmcu_funcs dcn21_funcs = {
+ .dmcu_init = dcn21_dmcu_init,
+ .load_iram = dcn10_dmcu_load_iram,
+ .set_psr_enable = dcn10_dmcu_set_psr_enable,
+ .setup_psr = dcn10_dmcu_setup_psr,
+ .get_psr_state = dcn10_get_dmcu_psr_state,
+ .set_psr_wait_loop = dcn10_psr_wait_loop,
+ .get_psr_wait_loop = dcn10_get_psr_wait_loop,
+ .is_dmcu_initialized = dcn10_is_dmcu_initialized,
+ .lock_phy = dcn20_lock_phy,
+ .unlock_phy = dcn20_unlock_phy
+};
#endif
static void dce_dmcu_construct(
@@ -836,6 +861,26 @@ static void dce_dmcu_construct(
dmcu_dce->dmcu_mask = dmcu_mask;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void dcn21_dmcu_construct(
+ struct dce_dmcu *dmcu_dce,
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ uint32_t psp_version = 0;
+
+ dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58);
+ dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029);
+ dmcu_dce->base.psp_version = psp_version;
+ }
+}
+#endif
+
struct dmcu *dce_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
@@ -857,7 +902,7 @@ struct dmcu *dce_dmcu_create(
return &dmcu_dce->base;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dmcu *dcn10_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
@@ -878,9 +923,7 @@ struct dmcu *dcn10_dmcu_create(
return &dmcu_dce->base;
}
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dmcu *dcn20_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
@@ -901,6 +944,27 @@ struct dmcu *dcn20_dmcu_create(
return &dmcu_dce->base;
}
+
+struct dmcu *dcn21_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+ if (dmcu_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn21_dmcu_construct(
+ dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ dmcu_dce->base.funcs = &dcn21_funcs;
+
+ return &dmcu_dce->base;
+}
#endif
void dce_dmcu_destroy(struct dmcu **dmcu)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index cc8587683b4b..5e044c2d3d6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -71,6 +71,10 @@
DMCU_COMMON_REG_LIST_DCE_BASE(), \
SR(DMU_MEM_PWR_CNTL)
+#define DMCU_DCN20_REG_LIST()\
+ DMCU_DCN10_REG_LIST(), \
+ SR(DMCUB_SCRATCH15)
+
#define DMCU_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -175,6 +179,7 @@ struct dce_dmcu_registers {
uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
uint32_t SMU_INTERRUPT_CONTROL;
uint32_t DC_DMCU_SCRATCH;
+ uint32_t DMCUB_SCRATCH15;
};
struct dce_dmcu {
@@ -261,13 +266,17 @@ struct dmcu *dcn10_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dmcu *dcn20_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask);
-#endif
+
+struct dmcu *dcn21_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask);
void dce_dmcu_destroy(struct dmcu **dmcu);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 0275d6d60da4..e1c5839a80dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -25,7 +25,7 @@
#include "dce_hwseq.h"
#include "reg_helper.h"
-#include "hw_sequencer.h"
+#include "hw_sequencer_private.h"
#include "core_types.h"
#define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 32d145a0d6fc..c5aa1f48593a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -25,7 +25,7 @@
#ifndef __DCE_HWSEQ_H__
#define __DCE_HWSEQ_H__
-#include "hw_sequencer.h"
+#include "dc_types.h"
#define BL_REG_LIST()\
SR(LVTMA_PWRSEQ_CNTL), \
@@ -210,7 +210,6 @@
SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST()
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define HWSEQ_DCN2_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
@@ -276,9 +275,7 @@
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST()
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define HWSEQ_DCN21_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
@@ -329,7 +326,6 @@
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST()
-#endif
struct dce_hwseq_registers {
@@ -577,7 +573,6 @@ struct dce_hwseq_registers {
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -637,9 +632,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -682,7 +675,6 @@ struct dce_hwseq_registers {
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-#endif
#define HWSEQ_REG_FIELD_LIST(type) \
type DCFE_CLOCK_ENABLE; \
@@ -800,8 +792,7 @@ struct dce_hwseq_registers {
type D2VGA_MODE_ENABLE; \
type D3VGA_MODE_ENABLE; \
type D4VGA_MODE_ENABLE; \
- type AZALIA_AUDIO_DTO_MODULE;\
- type HPO_HDMISTREAMCLK_GATE_DIS;
+ type AZALIA_AUDIO_DTO_MODULE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
@@ -820,6 +811,10 @@ enum blnd_mode {
BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */
};
+struct dce_hwseq;
+struct pipe_ctx;
+struct clock_source;
+
void dce_enable_fe_clock(struct dce_hwseq *hwss,
unsigned int inst, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
index 35a75398fcb4..dd41736bb5c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
@@ -31,7 +31,7 @@ bool dce_i2c_submit_command(
struct i2c_command *cmd)
{
struct dce_i2c_hw *dce_i2c_hw;
- struct dce_i2c_sw *dce_i2c_sw;
+ struct dce_i2c_sw dce_i2c_sw = {0};
if (!ddc) {
BREAK_TO_DEBUGGER();
@@ -43,18 +43,15 @@ bool dce_i2c_submit_command(
return false;
}
- /* The software engine is only available on dce8 */
- dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc);
-
- if (!dce_i2c_sw) {
- dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc);
-
- if (!dce_i2c_hw)
- return false;
+ dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc);
+ if (dce_i2c_hw)
return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw);
- }
- return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw);
+ dce_i2c_sw.ctx = ddc->ctx;
+ if (dce_i2c_engine_acquire_sw(&dce_i2c_sw, ddc)) {
+ return dce_i2c_submit_command_sw(pool, ddc, cmd, &dce_i2c_sw);
+ }
+ return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index aad7b52165be..1cd4d8fc361f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -296,9 +296,7 @@ static bool setup_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t reset_length = 0;
-#endif
/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
@@ -322,14 +320,12 @@ static bool setup_engine(
REG_UPDATE_N(SETUP, 2,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
} else {
reset_length = dce_i2c_hw->send_reset_length;
REG_UPDATE_N(SETUP, 3,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-#endif
}
/* Program HW priority
* set to High - interrupt software I2C at any time
@@ -705,7 +701,6 @@ void dcn1_i2c_hw_construct(
dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void dcn2_i2c_hw_construct(
struct dce_i2c_hw *dce_i2c_hw,
struct dc_context *ctx,
@@ -724,4 +719,3 @@ void dcn2_i2c_hw_construct(
if (ctx->dc->debug.scl_reset_length10)
dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index cb0234e5d597..d4b2037f7d74 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -177,9 +177,7 @@ struct dce_i2c_shift {
uint8_t DC_I2C_INDEX;
uint8_t DC_I2C_INDEX_WRITE;
uint8_t XTAL_REF_DIV;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint8_t DC_I2C_DDC1_SEND_RESET_LENGTH;
-#endif
uint8_t DC_I2C_REG_RW_CNTL_STATUS;
};
@@ -220,17 +218,13 @@ struct dce_i2c_mask {
uint32_t DC_I2C_INDEX;
uint32_t DC_I2C_INDEX_WRITE;
uint32_t XTAL_REF_DIV;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t DC_I2C_DDC1_SEND_RESET_LENGTH;
-#endif
uint32_t DC_I2C_REG_RW_CNTL_STATUS;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define I2C_COMMON_MASK_SH_LIST_DCN2(mask_sh)\
I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh),\
I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH, mask_sh)
-#endif
struct dce_i2c_registers {
uint32_t SETUP;
@@ -312,7 +306,6 @@ void dcn1_i2c_hw_construct(
const struct dce_i2c_shift *shifts,
const struct dce_i2c_mask *masks);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void dcn2_i2c_hw_construct(
struct dce_i2c_hw *dce_i2c_hw,
struct dc_context *ctx,
@@ -320,7 +313,6 @@ void dcn2_i2c_hw_construct(
const struct dce_i2c_registers *regs,
const struct dce_i2c_shift *shifts,
const struct dce_i2c_mask *masks);
-#endif
bool dce_i2c_submit_command_hw(
struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index a5a11c251e25..87d8428df6c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -73,31 +73,6 @@ static void release_engine_dce_sw(
dce_i2c_sw->ddc = NULL;
}
-static bool get_hw_supported_ddc_line(
- struct ddc *ddc,
- enum gpio_ddc_line *line)
-{
- enum gpio_ddc_line line_found;
-
- *line = GPIO_DDC_LINE_UNKNOWN;
-
- if (!ddc) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!ddc->hw_info.hw_supported)
- return false;
-
- line_found = dal_ddc_get_line(ddc);
-
- if (line_found >= GPIO_DDC_LINE_COUNT)
- return false;
-
- *line = line_found;
-
- return true;
-}
static bool wait_for_scl_high_sw(
struct dc_context *ctx,
struct ddc *ddc,
@@ -524,21 +499,3 @@ bool dce_i2c_submit_command_sw(
return result;
}
-struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine(
- struct resource_pool *pool,
- struct ddc *ddc)
-{
- enum gpio_ddc_line line;
- struct dce_i2c_sw *engine = NULL;
-
- if (get_hw_supported_ddc_line(ddc, &line))
- engine = pool->sw_i2cs[line];
-
- if (!engine)
- return NULL;
-
- if (!dce_i2c_engine_acquire_sw(engine, ddc))
- return NULL;
-
- return engine;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
index 5bbcdd455614..019fc47bb767 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h
@@ -49,9 +49,9 @@ bool dce_i2c_submit_command_sw(
struct i2c_command *cmd,
struct dce_i2c_sw *dce_i2c_sw);
-struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine(
- struct resource_pool *pool,
- struct ddc *ddc);
+bool dce_i2c_engine_acquire_sw(
+ struct dce_i2c_sw *dce_i2c_sw,
+ struct ddc *ddc_handle);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 6ed922a3c1cd..451574971b96 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -137,7 +137,7 @@ static void dce110_update_generic_info_packet(
AFMT_GENERIC0_UPDATE, (packet_index == 0),
AFMT_GENERIC2_UPDATE, (packet_index == 2));
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
switch (packet_index) {
case 0:
@@ -231,7 +231,7 @@ static void dce110_update_hdmi_info_packet(
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case 4:
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -275,9 +275,10 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
@@ -329,7 +330,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc110->se_mask->DP_VID_N_MUL)
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
#endif
@@ -340,7 +341,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (REG(DP_MSA_MISC))
misc1 = REG_READ(DP_MSA_MISC);
#endif
@@ -374,7 +375,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
/* set dynamic range and YCbCr range */
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (hw_crtc_timing.display_color_depth) {
case COLOR_DEPTH_666:
colorimetry_bpc = 0;
@@ -454,7 +455,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
DP_DYN_RANGE, dynamic_range_rgb,
DP_YCBCR_RANGE, dynamic_range_ycbcr);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (REG(DP_MSA_COLORIMETRY))
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
@@ -489,7 +490,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
hw_crtc_timing.v_front_porch;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* start at begining of left border */
if (REG(DP_MSA_TIMING_PARAM2))
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -786,7 +787,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc110->se_mask->HDMI_DB_DISABLE) {
/* for bring up, disable dp double TODO */
if (REG(HDMI_DB_CONTROL))
@@ -824,7 +825,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* stop generic packets 2 & 3 on HDMI */
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
new file mode 100644
index 000000000000..225955ec6d39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_psr.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "../../dmub/inc/dmub_srv.h"
+#include "dmub_fw_state.h"
+#include "core_types.h"
+#include "ipp.h"
+
+#define MAX_PIPES 6
+
+/**
+ * Get PSR state from firmware.
+ */
+static void dmub_get_psr_state(uint32_t *psr_state)
+{
+ // Not yet implemented
+ // Trigger GPINT interrupt from firmware
+}
+
+/**
+ * Enable/Disable PSR.
+ */
+static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ cmd.psr_enable.header.type = DMUB_CMD__PSR;
+
+ if (enable)
+ cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE;
+ else
+ cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_DISABLE;
+
+ cmd.psr_enable.header.payload_bytes = 0; // Send header only
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+/**
+ * Set PSR level.
+ */
+static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t psr_state = 0;
+ struct dc_context *dc = dmub->ctx;
+
+ dmub_get_psr_state(&psr_state);
+
+ if (psr_state == 0)
+ return;
+
+ cmd.psr_set_level.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
+ cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
+ cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+/**
+ * Setup PSR by programming phy registers and sending psr hw context values to firmware.
+ */
+static bool dmub_setup_psr(struct dmub_psr *dmub,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+ struct dmub_cmd_psr_copy_settings_data *copy_settings_data
+ = &cmd.psr_copy_settings.psr_copy_settings_data;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ break;
+ }
+ }
+
+ if (!pipe_ctx ||
+ !&pipe_ctx->plane_res ||
+ !&pipe_ctx->stream_res)
+ return false;
+
+ // Program DP DPHY fast training registers
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ // Program DP_SEC_CNTL1 register to set transmission GPS0 line num and priority to high
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ cmd.psr_copy_settings.header.type = DMUB_CMD__PSR;
+ cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS;
+ cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
+
+ // Hw insts
+ copy_settings_data->dpphy_inst = psr_context->phyType;
+ copy_settings_data->aux_inst = psr_context->channel;
+ copy_settings_data->digfe_inst = psr_context->engineId;
+ copy_settings_data->digbe_inst = psr_context->transmitterId;
+
+ copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+
+ if (pipe_ctx->plane_res.hubp)
+ copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst;
+ else
+ copy_settings_data->hubp_inst = 0;
+ if (pipe_ctx->plane_res.dpp)
+ copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
+ else
+ copy_settings_data->dpp_inst = 0;
+ if (pipe_ctx->stream_res.opp)
+ copy_settings_data->opp_inst = pipe_ctx->stream_res.opp->inst;
+ else
+ copy_settings_data->opp_inst = 0;
+ if (pipe_ctx->stream_res.tg)
+ copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst;
+ else
+ copy_settings_data->otg_inst = 0;
+
+ // Misc
+ copy_settings_data->psr_level = psr_context->psr_level.u32all;
+ copy_settings_data->hyst_frames = psr_context->timehyst_frames;
+ copy_settings_data->hyst_lines = psr_context->hyst_lines;
+ copy_settings_data->phy_type = psr_context->phyType;
+ copy_settings_data->aux_repeat = psr_context->aux_repeats;
+ copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
+ copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock;
+ copy_settings_data->frame_delay = psr_context->frame_delay;
+ copy_settings_data->smu_phy_id = psr_context->smuPhyId;
+ copy_settings_data->num_of_controllers = psr_context->numberOfControllers;
+ copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
+ copy_settings_data->phy_num = psr_context->frame_delay & 0x7;
+ copy_settings_data->link_rate = psr_context->frame_delay & 0xF;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static const struct dmub_psr_funcs psr_funcs = {
+ .set_psr_enable = dmub_set_psr_enable,
+ .setup_psr = dmub_setup_psr,
+ .get_psr_state = dmub_get_psr_state,
+ .set_psr_level = dmub_set_psr_level,
+};
+
+/**
+ * Construct PSR object.
+ */
+static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
+{
+ psr->ctx = ctx;
+ psr->funcs = &psr_funcs;
+}
+
+/**
+ * Allocate and initialize PSR object.
+ */
+struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
+{
+ struct dmub_psr *psr = kzalloc(sizeof(struct dmub_psr), GFP_KERNEL);
+
+ if (psr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dmub_psr_construct(psr, ctx);
+
+ return psr;
+}
+
+/**
+ * Deallocate PSR object.
+ */
+void dmub_psr_destroy(struct dmub_psr **dmub)
+{
+ kfree(dmub);
+ *dmub = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
new file mode 100644
index 000000000000..229958de3035
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_PSR_H_
+#define _DMUB_PSR_H_
+
+#include "os_types.h"
+
+struct dmub_psr {
+ struct dc_context *ctx;
+ const struct dmub_psr_funcs *funcs;
+};
+
+struct dmub_psr_funcs {
+ void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
+ bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+ void (*get_psr_state)(uint32_t *psr_state);
+ void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
+};
+
+struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
+void dmub_psr_destroy(struct dmub_psr **dmub);
+
+
+#endif /* _DCE_DMUB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 799d36299c9b..753cb8edd996 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -26,7 +26,6 @@
#include "dc.h"
#include "core_types.h"
#include "clk_mgr.h"
-#include "hw_sequencer.h"
#include "dce100_hw_sequencer.h"
#include "resource.h"
@@ -136,7 +135,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
{
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index a6b80fdaa666..34518da20009 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -27,6 +27,7 @@
#define __DC_HWSS_DCE100_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
struct dc_state;
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index a5e122c721ec..8f78bf9abbca 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -725,7 +725,7 @@ void dce100_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce100_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -885,7 +885,7 @@ static void dce100_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce100_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -950,7 +950,7 @@ static const struct resource_funcs dce100_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
-static bool construct(
+static bool dce100_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool)
@@ -1122,7 +1122,7 @@ static bool construct(
return true;
res_create_fail:
- destruct(pool);
+ dce100_resource_destruct(pool);
return false;
}
@@ -1137,7 +1137,7 @@ struct resource_pool *dce100_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (dce100_resource_construct(num_virtual_links, dc, pool))
return &pool->base;
kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index f0e837d14000..5b689273ff44 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -61,6 +61,8 @@
#include "atomfirmware.h"
+#define GAMMA_HW_POINTS_NUM 256
+
/*
* All values are in milliseconds;
* For eDP, after power-up/power/down,
@@ -268,7 +270,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
}
static bool
-dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
@@ -596,7 +598,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
}
static bool
-dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
struct transform *xfm = pipe_ctx->plane_res.xfm;
@@ -651,10 +653,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
pipe_ctx->stream->link->cur_link_settings.lane_count;
-
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct dc_link *link = pipe_ctx->stream->link;
-
+ const struct dc *dc = link->dc;
uint32_t active_total_with_borders;
uint32_t early_control = 0;
@@ -667,7 +668,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
- link->dc->hwss.update_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
@@ -943,15 +944,15 @@ void dce110_edp_backlight_control(
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
/* notify audio driver for audio modes of monitor */
- struct dc *core_dc;
+ struct dc *dc;
struct clk_mgr *clk_mgr;
unsigned int i, num_audio = 1;
if (!pipe_ctx->stream)
return;
- core_dc = pipe_ctx->stream->ctx->dc;
- clk_mgr = core_dc->clk_mgr;
+ dc = pipe_ctx->stream->ctx->dc;
+ clk_mgr = dc->clk_mgr;
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
@@ -959,7 +960,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
num_audio++;
}
@@ -1047,6 +1048,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct encoder_unblank_param params = { { 0 } };
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
@@ -1056,7 +1058,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, true);
+ hws->funcs.edp_backlight_control(link, true);
}
}
@@ -1064,9 +1066,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, false);
+ hws->funcs.edp_backlight_control(link, false);
dc_link_set_abm_disable(link);
}
@@ -1223,7 +1226,7 @@ static void program_scaler(const struct dc *dc,
{
struct tg_color color = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* TOFPGA */
if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
return;
@@ -1322,12 +1325,11 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct dc_stream_state *stream = pipe_ctx->stream;
struct drr_params params = {0};
unsigned int event_triggers = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
-#endif
+ struct dce_hwseq *hws = dc->hwseq;
- if (dc->hwss.disable_stream_gating) {
- dc->hwss.disable_stream_gating(dc, pipe_ctx);
+ if (hws->funcs.disable_stream_gating) {
+ hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
if (pipe_ctx->stream_res.audio != NULL) {
@@ -1357,10 +1359,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
/* */
/* Do not touch stream timing on seamless boot optimization. */
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
- dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
+ hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
- if (dc->hwss.setup_vupdate_interrupt)
- dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1371,9 +1373,13 @@ static enum dc_status apply_single_controller_ctx_to_hw(
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx->stream_res.tg, event_triggers);
+ pipe_ctx->stream_res.tg, event_triggers, 2);
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
@@ -1390,7 +1396,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
while (odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion(
odm_pipe->stream_res.opp,
@@ -1404,7 +1409,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
&stream->clamping);
odm_pipe = odm_pipe->next_odm_pipe;
}
-#endif
if (!stream->dpms_off)
core_link_enable_stream(context, pipe_ctx);
@@ -1438,6 +1442,9 @@ static void power_down_encoders(struct dc *dc)
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
+ if (signal != SIGNAL_TYPE_EDP)
+ signal = SIGNAL_TYPE_NONE;
+
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
}
@@ -1552,9 +1559,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ struct dce_hwseq *hws = dc->hwseq;
- if (dc->hwss.init_pipes)
- dc->hwss.init_pipes(dc, context);
+ if (hws->funcs.init_pipes)
+ hws->funcs.init_pipes(dc, context);
edp_stream = get_edp_stream(context);
@@ -1591,7 +1599,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
if (edp_link_with_sink && !keep_edp_vdd_on) {
/*turn off backlight before DP_blank and encoder powered down*/
- dc->hwss.edp_backlight_control(edp_link_with_sink, false);
+ hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
power_down_all_hw_blocks(dc);
@@ -1702,6 +1710,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
struct drr_params params = {0};
// DRR should set trigger event to monitor surface update event
unsigned int event_triggers = 0x80;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
params.vertical_total_max = vmax;
params.vertical_total_min = vmin;
@@ -1717,7 +1727,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
if (vmax != 0 && vmin != 0)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
- event_triggers);
+ event_triggers, num_frames);
}
}
@@ -1734,30 +1744,31 @@ static void get_position(struct pipe_ctx **pipe_ctx,
}
static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events)
+ int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
- unsigned int value = 0;
+ unsigned int triggers = 0;
- if (events->overlay_update)
- value |= 0x100;
- if (events->surface_update)
- value |= 0x80;
- if (events->cursor_update)
- value |= 0x2;
- if (events->force_trigger)
- value |= 0x1;
+ if (params->triggers.overlay_update)
+ triggers |= 0x100;
+ if (params->triggers.surface_update)
+ triggers |= 0x80;
+ if (params->triggers.cursor_update)
+ triggers |= 0x2;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
if (num_pipes) {
struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
if (dc->fbc_compressor)
- value |= 0x84;
+ triggers |= 0x84;
}
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
}
/*
@@ -2006,13 +2017,14 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
enum dc_status status;
int i;
/* Reset old context */
/* look up the targets that have been removed since last commit */
- dc->hwss.reset_hw_ctx_wrap(dc, context);
+ hws->funcs.reset_hw_ctx_wrap(dc, context);
/* Skip applying if no targets */
if (context->stream_count <= 0)
@@ -2037,7 +2049,7 @@ enum dc_status dce110_apply_ctx_to_hw(
continue;
}
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc, i, dc->ctx->dc_bios,
PIPE_GATING_CONTROL_DISABLE);
}
@@ -2346,19 +2358,20 @@ static void init_hw(struct dc *dc)
struct transform *xfm;
struct abm *abm;
struct dmcu *dmcu;
+ struct dce_hwseq *hws = dc->hwseq;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
xfm = dc->res_pool->transforms[i];
xfm->funcs->transform_reset(xfm);
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc, i, bp,
PIPE_GATING_CONTROL_INIT);
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc, i, bp,
PIPE_GATING_CONTROL_DISABLE);
- dc->hwss.enable_display_pipe_clock_gating(
+ hws->funcs.enable_display_pipe_clock_gating(
dc->ctx,
true);
}
@@ -2444,6 +2457,8 @@ static void dce110_program_front_end_for_pipe(
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
unsigned int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2502,10 +2517,10 @@ static void dce110_program_front_end_for_pipe(
if (pipe_ctx->plane_state->update_flags.bits.full_update ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
DC_LOG_SURFACE(
"Pipe:%d %p: addr hi:0x%x, "
@@ -2608,6 +2623,7 @@ static void dce110_apply_ctx_for_surface(
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
int fe_idx = pipe_ctx->plane_res.mi ?
pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx;
@@ -2615,7 +2631,7 @@ static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
return;
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE);
dc->res_pool->transforms[fe_idx]->funcs->transform_reset(
@@ -2704,14 +2720,10 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
.init_hw = init_hw,
- .init_pipes = init_pipes,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
- .set_input_transfer_func = dce110_set_input_transfer_func,
- .set_output_transfer_func = dce110_set_output_transfer_func,
- .power_down = dce110_power_down,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dce110_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset,
@@ -2722,8 +2734,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
- .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
- .enable_display_power_gating = dce110_enable_display_power_gating,
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
.prepare_bandwidth = dce110_prepare_bandwidth,
@@ -2731,22 +2741,33 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_drr = set_drr,
.get_position = get_position,
.set_static_screen_control = set_static_screen_control,
- .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
- .enable_stream_timing = dce110_enable_stream_timing,
- .disable_stream_gating = NULL,
- .enable_stream_gating = NULL,
.setup_stereo = NULL,
.set_avmute = dce110_set_avmute,
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
- .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dce110_set_cursor_position,
.set_cursor_attribute = dce110_set_cursor_attribute
};
+static const struct hwseq_private_funcs dce110_private_funcs = {
+ .init_pipes = init_pipes,
+ .update_plane_addr = update_plane_addr,
+ .set_input_transfer_func = dce110_set_input_transfer_func,
+ .set_output_transfer_func = dce110_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
+ .enable_display_power_gating = dce110_enable_display_power_gating,
+ .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
+ .enable_stream_timing = dce110_enable_stream_timing,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
+ .edp_backlight_control = dce110_edp_backlight_control,
+};
+
void dce110_hw_sequencer_construct(struct dc *dc)
{
dc->hwss = dce110_funcs;
+ dc->hwseq->funcs = dce110_private_funcs;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 2f9b7dbdf415..26a9c14a58b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -27,8 +27,8 @@
#define __DC_HWSS_DCE110_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
-#define GAMMA_HW_POINTS_NUM 256
struct dc;
struct dc_state;
struct dm_pp_display_configuration;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 83a4dbf6d76e..bf14e9ab040c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -782,7 +782,7 @@ void dce110_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce110_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -1097,6 +1097,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
struct dc_stream_state *stream)
{
struct dc *dc = stream->ctx->dc;
+ struct dce_hwseq *hws = dc->hwseq;
struct resource_context *res_ctx = &context->res_ctx;
unsigned int underlay_idx = pool->underlay_pipe_index;
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
@@ -1117,7 +1118,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
struct tg_color black_color = {0};
struct dc_bios *dcb = dc->ctx->dc_bios;
- dc->hwss.enable_display_power_gating(
+ hws->funcs.enable_display_power_gating(
dc,
pipe_ctx->stream_res.tg->inst,
dcb, PIPE_GATING_CONTROL_DISABLE);
@@ -1161,7 +1162,7 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce110_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -1313,7 +1314,7 @@ const struct resource_caps *dce110_resource_cap(
return &carrizo_resource_cap;
}
-static bool construct(
+static bool dce110_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool,
@@ -1492,7 +1493,7 @@ static bool construct(
return true;
res_create_fail:
- destruct(pool);
+ dce110_resource_destruct(pool);
return false;
}
@@ -1507,7 +1508,7 @@ struct resource_pool *dce110_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool, asic_id))
+ if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id))
return &pool->base;
kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 5f7c2c5641c4..1ea7db8eeb98 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -469,22 +469,27 @@ void dce110_timing_generator_set_drr(
void dce110_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t static_screen_cntl = 0;
uint32_t addr = 0;
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
static_screen_cntl = dm_read_reg(tg->ctx, addr);
set_reg_field_value(static_screen_cntl,
- value,
+ event_triggers,
CRTC_STATIC_SCREEN_CONTROL,
CRTC_STATIC_SCREEN_EVENT_MASK);
set_reg_field_value(static_screen_cntl,
- 2,
+ num_frames,
CRTC_STATIC_SCREEN_CONTROL,
CRTC_STATIC_SCREEN_FRAME_COUNT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 768ccf27ada9..d8a5ed7b485d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -231,7 +231,8 @@ void dce110_timing_generator_set_drr(
void dce110_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void dce110_timing_generator_get_crtc_scanoutpos(
struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
index 1e4a7c13f0ed..19873ee1f78d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -158,6 +158,6 @@ void dce112_hw_sequencer_construct(struct dc *dc)
* structure
*/
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating;
+ dc->hwseq->funcs.enable_display_power_gating = dce112_enable_display_power_gating;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
index e646f4a37fa2..943f1b2c5b2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
@@ -27,6 +27,7 @@
#define __DC_HWSS_DCE112_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 97dcc5d0862b..700ad8b3e54b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -744,7 +744,7 @@ void dce112_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce112_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -1013,7 +1013,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce112_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -1186,7 +1186,7 @@ const struct resource_caps *dce112_resource_cap(
return &polaris_10_resource_cap;
}
-static bool construct(
+static bool dce112_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool)
@@ -1372,7 +1372,7 @@ static bool construct(
return true;
res_create_fail:
- destruct(pool);
+ dce112_resource_destruct(pool);
return false;
}
@@ -1386,7 +1386,7 @@ struct resource_pool *dce112_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (dce112_resource_construct(num_virtual_links, dc, pool))
return &pool->base;
kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 1ca30928025e..66a13aa39c95 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -265,7 +265,7 @@ void dce120_hw_sequencer_construct(struct dc *dc)
* structure
*/
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
+ dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
index c51afbd0b012..bc024534732f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
@@ -27,6 +27,7 @@
#define __DC_HWSS_DCE120_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 63543f6918ff..53ab88ef71f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -63,8 +63,8 @@
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#include "nbio/nbio_6_1_offset.h"
-#include "mmhub/mmhub_9_4_0_offset.h"
-#include "mmhub/mmhub_9_4_0_sh_mask.h"
+#include "mmhub/mmhub_1_0_offset.h"
+#include "mmhub/mmhub_1_0_sh_mask.h"
#include "reg_helper.h"
#include "dce100/dce100_resource.h"
@@ -587,7 +587,7 @@ static void dce120_transform_destroy(struct transform **xfm)
*xfm = NULL;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce120_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -872,7 +872,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce120_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -1024,7 +1024,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
return value;
}
-static bool construct(
+static bool dce120_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool)
@@ -1237,7 +1237,7 @@ controller_create_fail:
clk_src_create_fail:
res_create_fail:
- destruct(pool);
+ dce120_resource_destruct(pool);
return false;
}
@@ -1252,7 +1252,7 @@ struct resource_pool *dce120_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (dce120_resource_construct(num_virtual_links, dc, pool))
return &pool->base;
kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 098e56962f2a..82bc4e192bbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -819,13 +819,18 @@ void dce120_tg_set_colors(struct timing_generator *tg,
static void dce120_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL,
- CRTC_STATIC_SCREEN_EVENT_MASK, value,
- CRTC_STATIC_SCREEN_FRAME_COUNT, 2);
+ CRTC_STATIC_SCREEN_EVENT_MASK, event_triggers,
+ CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
void dce120_timing_generator_set_test_pattern(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index c4543178ba20..893261c81854 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -74,7 +74,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
{
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
index 7a1b31def66f..e43af832d00c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
@@ -27,6 +27,7 @@
#define __DC_HWSS_DCE80_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 3e8d4b49f279..2ad5c28c6e66 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -773,7 +773,7 @@ static struct input_pixel_processor *dce80_ipp_create(
return &ipp->base;
}
-static void destruct(struct dce110_resource_pool *pool)
+static void dce80_resource_destruct(struct dce110_resource_pool *pool)
{
unsigned int i;
@@ -901,7 +901,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
- destruct(dce110_pool);
+ dce80_resource_destruct(dce110_pool);
kfree(dce110_pool);
*pool = NULL;
}
@@ -1093,7 +1093,7 @@ static bool dce80_construct(
return true;
res_create_fail:
- destruct(pool);
+ dce80_resource_destruct(pool);
return false;
}
@@ -1290,7 +1290,7 @@ static bool dce81_construct(
return true;
res_create_fail:
- destruct(pool);
+ dce80_resource_destruct(pool);
return false;
}
@@ -1483,7 +1483,7 @@ static bool dce83_construct(
return true;
res_create_fail:
- destruct(pool);
+ dce80_resource_destruct(pool);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 032f872be89c..62ad1a11bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -22,7 +22,8 @@
#
# Makefile for DCN.
-DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
+DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
+ dcn10_hw_sequencer_debug.o \
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 997e9582edc7..0e682b5aa3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -290,12 +290,8 @@ void dpp1_cnv_setup (
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut)
-#else
- enum dc_color_space input_color_space)
-#endif
{
uint32_t pixel_format;
uint32_t alpha_en;
@@ -542,11 +538,9 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.dpp_program_blnd_lut = NULL,
.dpp_program_shaper_lut = NULL,
.dpp_program_3dlut = NULL
-#endif
};
static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 1d4a7d640334..2edf566b3a72 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1486,12 +1486,8 @@ void dpp1_cnv_setup (
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut);
-#else
- enum dc_color_space input_color_space);
-#endif
void dpp1_full_bypass(struct dpp *dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index aa0c7a7d13a0..4d3f7d5e1473 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -88,26 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
- {COLOR_SPACE_SRGB,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_SRGB_LIMITED,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_YCBCR601,
- {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
- 0, 0x2000, 0x38b4, 0xe3a6} },
- {COLOR_SPACE_YCBCR601_LIMITED,
- {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
- 0, 0x2568, 0x40de, 0xdd3a} },
- {COLOR_SPACE_YCBCR709,
- {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
- 0x2000, 0x3b61, 0xe24f} },
-
- {COLOR_SPACE_YCBCR709_LIMITED,
- {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
- 0x2568, 0x43ee, 0xdbb2} }
-};
-
static void program_gamut_remap(
struct dcn10_dpp *dpp,
const uint16_t *regval,
@@ -352,6 +332,8 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
uint32_t i;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ REG_SEQ_START();
+
for (i = 0 ; i < num; i++) {
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
@@ -626,10 +608,16 @@ void dpp1_set_degamma(
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
+ case IPP_DEGAMMA_MODE_USER_PWL:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
}
+
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
}
void dpp1_degamma_ram_select(
@@ -731,10 +719,8 @@ void dpp1_full_bypass(struct dpp *dpp_base)
/* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
if (dpp->tf_mask->CM_BYPASS_EN)
REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
else
REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
-#endif
/* Setting degamma bypass for now */
REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index d67e0abeee93..fce37c527a0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -218,14 +218,12 @@ static void dpp1_dscl_set_lb(
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
else {
/* DSCL caps: pixel data processed in float format */
REG_SET_2(LB_DATA_FORMAT, 0,
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
-#endif
REG_SET_2(LB_MEMORY_CTRL, 0,
MEMORY_CONFIG, mem_size_config,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
index 374cc9acda3b..b6391a5ead78 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
@@ -23,7 +23,7 @@
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "reg_helper.h"
#include "resource.h"
@@ -109,9 +109,7 @@ const struct dwbc_funcs dcn10_dwbc_funcs = {
.update = NULL,
.set_stereo = NULL,
.set_new_content = NULL,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.set_warmup = NULL,
-#endif
.dwb_set_scaler = NULL,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
index c175edd0bae7..d56ea7c8171e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
@@ -24,7 +24,7 @@
#ifndef __DC_DWBC_DCN10_H__
#define __DC_DWBC_DCN10_H__
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
/* DCN */
#define BASE_INNER(seg) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index a02c10e23e0d..f36a0d8cedfe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -930,6 +930,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
+ default:
+ ASSERT(false);
+ break;
}
output->capable = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 69d903d68661..af57751253de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -121,7 +121,6 @@ struct dcn_hubbub_registers {
uint32_t DCN_VM_AGP_BASE;
uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;
@@ -140,7 +139,6 @@ struct dcn_hubbub_registers {
uint32_t DCHVM_CLK_CTRL;
uint32_t DCHVM_RIOMMU_CTRL0;
uint32_t DCHVM_RIOMMU_STAT0;
-#endif
};
/* set field name */
@@ -232,7 +230,6 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define HUBBUB_HVM_REG_FIELD_LIST(type) \
type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\
type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\
@@ -278,22 +275,17 @@ struct dcn_hubbub_registers {
type HOSTVM_POWERSTATUS; \
type RIOMMU_ACTIVE; \
type HOSTVM_PREFETCH_DONE
-#endif
struct dcn_hubbub_shift {
DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
HUBBUB_HVM_REG_FIELD_LIST(uint8_t);
-#endif
};
struct dcn_hubbub_mask {
DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
HUBBUB_HVM_REG_FIELD_LIST(uint32_t);
-#endif
};
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 14d1be6c66e6..31b64733d693 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -306,7 +306,6 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 12);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 112);
@@ -327,7 +326,6 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
-#endif
default:
BREAK_TO_DEBUGGER();
break;
@@ -1014,6 +1012,9 @@ void hubp1_read_state_common(struct hubp *hubp)
HUBP_TTU_DISABLE, &s->ttu_disable,
HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+ REG_GET(HUBP_CLK_CNTL,
+ HUBP_CLOCK_ENABLE, &s->clock_en);
+
REG_GET(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &s->min_ttu_vblank);
@@ -1248,10 +1249,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_get_underflow_status = hubp1_get_underflow_status,
.hubp_init = hubp1_init,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
-#endif
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index ae70d9c0aa1d..780af5b3c16f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -670,6 +670,7 @@ struct dcn_hubp_state {
uint32_t sw_mode;
uint32_t dcc_en;
uint32_t blank_en;
+ uint32_t clock_en;
uint32_t underflow_status;
uint32_t ttu_disable;
uint32_t min_ttu_vblank;
@@ -728,13 +729,11 @@ void hubp1_dcc_control(struct hubp *hubp,
bool enable,
enum hubp_ind_block_size independent_64b_blks);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool hubp1_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate);
-#endif
bool hubp1_is_flip_pending(struct hubp *hubp);
void hubp1_cursor_set_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index eb91432621ab..f2127afb37b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -25,17 +25,18 @@
#include <linux/delay.h>
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "core_types.h"
#include "resource.h"
#include "custom_float.h"
#include "dcn10_hw_sequencer.h"
-#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10_hw_sequencer_debug.h"
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
#include "dcn10_optc.h"
-#include "dcn10/dcn10_dpp.h"
-#include "dcn10/dcn10_mpc.h"
+#include "dcn10_dpp.h"
+#include "dcn10_mpc.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
@@ -49,9 +50,7 @@
#include "clk_mgr.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
-#endif
#define DC_LOGGER_INIT(logger)
@@ -68,6 +67,8 @@
#define DTN_INFO_MICRO_SEC(ref_cycle) \
print_microsec(dc_ctx, log_ctx, ref_cycle)
+#define GAMMA_HW_POINTS_NUM 256
+
void print_microsec(struct dc_context *dc_ctx,
struct dc_log_buffer_ctx *log_ctx,
uint32_t ref_cycle)
@@ -81,6 +82,33 @@ void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
+static void dcn10_lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock)
+{
+ struct pipe_ctx *pipe_ctx;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ tg = pipe_ctx->stream_res.tg;
+ /*
+ * Only lock the top pipe's tg to prevent redundant
+ * (un)locking. Also skip if pipe is disabled.
+ */
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream || !pipe_ctx->plane_state ||
+ !tg->funcs->is_tg_enabled(tg))
+ continue;
+
+ if (lock)
+ tg->funcs->lock(tg);
+ else
+ tg->funcs->unlock(tg);
+ }
+}
+
static void log_mpc_crc(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx)
{
@@ -129,9 +157,8 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
struct resource_pool *pool = dc->res_pool;
int i;
- DTN_INFO("HUBP: format addr_hi width height"
- " rot mir sw_mode dcc_en blank_en ttu_dis underflow"
- " min_ttu_vblank qos_low_wm qos_high_wm\n");
+ DTN_INFO(
+ "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
for (i = 0; i < pool->pipe_count; i++) {
struct hubp *hubp = pool->hubps[i];
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
@@ -139,8 +166,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
hubp->funcs->hubp_read_state(hubp);
if (!s->blank_en) {
- DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
- " %6d %8d %7d %8xh",
+ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
hubp->inst,
s->pixel_format,
s->inuse_addr_hi,
@@ -151,6 +177,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
s->sw_mode,
s->dcc_en,
s->blank_en,
+ s->clock_en,
s->ttu_disable,
s->underflow_status);
DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
@@ -308,21 +335,31 @@ void dcn10_log_hw_state(struct dc *dc,
}
DTN_INFO("\n");
- DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel"
- " h_bs h_be h_ss h_se hpol htot vtot underflow\n");
+ DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
for (i = 0; i < pool->timing_generator_count; i++) {
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
-
+ /* Read shared OTG state registers for all DCNx */
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ /*
+ * For DCN2 and greater, a register on the OPP is used to
+ * determine if the CRTC is blanked instead of the OTG. So use
+ * dpg_is_blanked() if exists, otherwise fallback on otg.
+ *
+ * TODO: Implement DCN-specific read_otg_state hooks.
+ */
+ if (pool->opps[i]->funcs->dpg_is_blanked)
+ s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
+ else
+ s.blank_enabled = tg->funcs->is_blanked(tg);
+
//only print if OTG master is enabled
if ((s.otg_enabled & 1) == 0)
continue;
- DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
- " %5d %5d %5d %5d %9d\n",
+ DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
tg->inst,
s.v_blank_start,
s.v_blank_end,
@@ -340,7 +377,8 @@ void dcn10_log_hw_state(struct dc *dc,
s.h_sync_a_pol,
s.h_total,
s.v_total,
- s.underflow_occurred_status);
+ s.underflow_occurred_status,
+ s.blank_enabled);
// Clear underflow for debug purposes
// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
@@ -350,7 +388,6 @@ void dcn10_log_hw_state(struct dc *dc,
}
DTN_INFO("\n");
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
for (i = 0; i < pool->res_cap->num_dsc; i++) {
struct display_stream_compressor *dsc = pool->dscs[i];
@@ -387,7 +424,7 @@ void dcn10_log_hw_state(struct dc *dc,
}
DTN_INFO("\n");
- DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS\n");
+ DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
for (i = 0; i < dc->link_count; i++) {
struct link_encoder *lenc = dc->links[i]->link_enc;
@@ -395,16 +432,16 @@ void dcn10_log_hw_state(struct dc *dc,
if (lenc->funcs->read_state) {
lenc->funcs->read_state(lenc, &s);
- DTN_INFO("[%-3d]: %-12d %-22d %-22d\n",
+ DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
i,
s.dphy_fec_en,
s.dphy_fec_ready_shadow,
- s.dphy_fec_active_status);
+ s.dphy_fec_active_status,
+ s.dp_link_training_complete);
DTN_INFO("\n");
}
}
DTN_INFO("\n");
-#endif
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
@@ -438,14 +475,14 @@ bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
return false;
}
-static void dcn10_enable_power_gating_plane(
+void dcn10_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
- bool force_on = 1; /* disable power gating */
+ bool force_on = true; /* disable power gating */
if (enable)
- force_on = 0;
+ force_on = false;
/* DCHUBP0/1/2/3 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
@@ -460,7 +497,7 @@ static void dcn10_enable_power_gating_plane(
REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
}
-static void dcn10_disable_vga(
+void dcn10_disable_vga(
struct dce_hwseq *hws)
{
unsigned int in_vga1_mode = 0;
@@ -493,7 +530,7 @@ static void dcn10_disable_vga(
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
}
-static void dcn10_dpp_pg_control(
+void dcn10_dpp_pg_control(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on)
@@ -545,7 +582,7 @@ static void dcn10_dpp_pg_control(
}
}
-static void dcn10_hubp_pg_control(
+void dcn10_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
bool power_on)
@@ -605,8 +642,8 @@ static void power_on_plane(
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- hws->ctx->dc->hwss.dpp_pg_control(hws, plane_id, true);
- hws->ctx->dc->hwss.hubp_pg_control(hws, plane_id, true);
+ hws->funcs.dpp_pg_control(hws, plane_id, true);
+ hws->funcs.hubp_pg_control(hws, plane_id, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
@@ -627,7 +664,7 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc)
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dc->hwss.hubp_pg_control(hws, 0, false);
+ hws->funcs.hubp_pg_control(hws, 0, false);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -656,7 +693,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dc->hwss.hubp_pg_control(hws, 0, true);
+ hws->funcs.hubp_pg_control(hws, 0, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -664,16 +701,16 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)
hws->wa_state.DEGVIDCN10_253_applied = true;
}
-static void dcn10_bios_golden_init(struct dc *dc)
+void dcn10_bios_golden_init(struct dc *dc)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *bp = dc->ctx->dc_bios;
int i;
bool allow_self_fresh_force_enable = true;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
- if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
+ if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
return;
-#endif
+
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
allow_self_fresh_force_enable =
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
@@ -732,7 +769,7 @@ static void false_optc_underflow_wa(
tg->funcs->clear_optc_underflow(tg);
}
-static enum dc_status dcn10_enable_stream_timing(
+enum dc_status dcn10_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -823,6 +860,7 @@ static void dcn10_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
+ struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -830,8 +868,14 @@ static void dcn10_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable */
- if (!pipe_ctx->stream->dpms_off)
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
core_link_disable_stream(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -978,8 +1022,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
}
/* trigger HW to start disconnect plane from stream on the next vsync */
-void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
int dpp_id = pipe_ctx->plane_res.dpp->inst;
struct mpc *mpc = dc->res_pool->mpc;
@@ -1004,10 +1049,10 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_disconnect(hubp);
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
-static void dcn10_plane_atomic_power_down(struct dc *dc,
+void dcn10_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp)
{
@@ -1017,8 +1062,8 @@ static void dcn10_plane_atomic_power_down(struct dc *dc,
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dc->hwss.dpp_pg_control(hws, dpp->inst, false);
- dc->hwss.hubp_pg_control(hws, hubp->inst, false);
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -1030,8 +1075,9 @@ static void dcn10_plane_atomic_power_down(struct dc *dc,
/* disable HW used by plane.
* note: cannot disable until disconnect is complete
*/
-static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
int opp_id = hubp->opp_id;
@@ -1050,7 +1096,7 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
- dc->hwss.plane_atomic_power_down(dc,
+ hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
pipe_ctx->plane_res.hubp);
@@ -1062,14 +1108,15 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state = NULL;
}
-static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
- dc->hwss.plane_atomic_disable(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disable(dc, pipe_ctx);
apply_DEGVIDCN10_253_wa(dc);
@@ -1077,9 +1124,10 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->pipe_idx);
}
-static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
+void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
bool can_apply_seamless_boot = false;
for (i = 0; i < context->stream_count; i++) {
@@ -1104,8 +1152,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
* command table.
*/
if (tg->funcs->is_tg_enabled(tg)) {
- if (dc->hwss.init_blank != NULL) {
- dc->hwss.init_blank(dc, tg);
+ if (hws->funcs.init_blank != NULL) {
+ hws->funcs.init_blank(dc, tg);
tg->funcs->lock(tg);
} else {
tg->funcs->lock(tg);
@@ -1115,7 +1163,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
}
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ /* num_opp will be equal to number of mpcc */
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* Cannot reset the MPC mux if seamless boot */
@@ -1139,8 +1188,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
if (can_apply_seamless_boot &&
pipe_ctx->stream != NULL &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
- pipe_ctx->stream_res.tg))
+ pipe_ctx->stream_res.tg)) {
+ // Enable double buffering for OTG_BLANK no matter if
+ // seamless boot is enabled or not to suppress global sync
+ // signals when OTG blanked. This is to prevent pipe from
+ // requesting data while in PSR.
+ tg->funcs->tg_init(tg);
continue;
+ }
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1162,7 +1217,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- dc->hwss.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
@@ -1176,7 +1231,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
}
}
-static void dcn10_init_hw(struct dc *dc)
+void dcn10_init_hw(struct dc *dc)
{
int i;
struct abm *abm = dc->res_pool->abm;
@@ -1208,15 +1263,15 @@ static void dcn10_init_hw(struct dc *dc)
}
//Enable ability to power gate / don't force power on permanently
- dc->hwss.enable_power_gating_plane(hws, true);
+ hws->funcs.enable_power_gating_plane(hws, true);
return;
}
if (!dcb->funcs->is_accelerated_mode(dcb))
- dc->hwss.disable_vga(dc->hwseq);
+ hws->funcs.disable_vga(dc->hwseq);
- dc->hwss.bios_golden_init(dc);
+ hws->funcs.bios_golden_init(dc);
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
@@ -1258,11 +1313,9 @@ static void dcn10_init_hw(struct dc *dc)
}
/* Power gate DSCs */
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (dc->hwss.dsc_pg_control != NULL)
- dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-#endif
+ if (hws->funcs.dsc_pg_control != NULL)
+ hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -1271,7 +1324,7 @@ static void dcn10_init_hw(struct dc *dc)
* everything down.
*/
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
- dc->hwss.init_pipes(dc, dc->current_state);
+ hws->funcs.init_pipes(dc, dc->current_state);
}
for (i = 0; i < res_pool->audio_count; i++) {
@@ -1285,7 +1338,7 @@ static void dcn10_init_hw(struct dc *dc)
abm->funcs->abm_init(abm);
}
- if (dmcu != NULL)
+ if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
if (abm != NULL && dmcu != NULL)
@@ -1303,18 +1356,19 @@ static void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- dc->hwss.enable_power_gating_plane(dc->hwseq, true);
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
}
-static void dcn10_reset_hw_ctx_wrap(
+void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
@@ -1333,8 +1387,8 @@ static void dcn10_reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating)
- dc->hwss.enable_stream_gating(dc, pipe_ctx);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -1367,9 +1421,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-
-
-static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
@@ -1394,8 +1446,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
-static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
+bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
@@ -1427,6 +1479,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+ cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+ result = true;
+ break;
default:
result = false;
break;
@@ -1472,9 +1529,8 @@ static void log_tf(struct dc_context *ctx,
}
}
-static bool
-dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream)
+bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
@@ -1510,11 +1566,13 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
return true;
}
-static void dcn10_pipe_control_lock(
+void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock)
{
+ struct dce_hwseq *hws = dc->hwseq;
+
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
@@ -1522,7 +1580,7 @@ static void dcn10_pipe_control_lock(
return;
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
if (lock)
pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
@@ -1530,7 +1588,7 @@ static void dcn10_pipe_control_lock(
pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
static bool wait_for_reset_trigger_to_occur(
@@ -1570,7 +1628,7 @@ static bool wait_for_reset_trigger_to_occur(
return rc;
}
-static void dcn10_enable_timing_synchronization(
+void dcn10_enable_timing_synchronization(
struct dc *dc,
int group_index,
int group_size,
@@ -1600,7 +1658,7 @@ static void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Sync complete\n");
}
-static void dcn10_enable_per_frame_crtc_position_reset(
+void dcn10_enable_per_frame_crtc_position_reset(
struct dc *dc,
int group_size,
struct pipe_ctx *grouped_pipes[])
@@ -1625,10 +1683,10 @@ static void dcn10_enable_per_frame_crtc_position_reset(
}
/*static void print_rq_dlg_ttu(
- struct dc *core_dc,
+ struct dc *dc,
struct pipe_ctx *pipe_ctx)
{
- DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\n============== DML TTU Output parameters [%d] ==============\n"
"qos_level_low_wm: %d, \n"
"qos_level_high_wm: %d, \n"
@@ -1658,7 +1716,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
);
- DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\n============== DML DLG Output parameters [%d] ==============\n"
"refcyc_h_blank_end: %d, \n"
"dlg_vblank_end: %d, \n"
@@ -1693,7 +1751,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
);
- DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\ndst_y_per_meta_row_nom_l: %d, \n"
"refcyc_per_meta_chunk_nom_l: %d, \n"
"refcyc_per_line_delivery_pre_l: %d, \n"
@@ -1723,7 +1781,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
);
- DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\n============== DML RQ Output parameters [%d] ==============\n"
"chunk_size: %d \n"
"min_chunk_size: %d \n"
@@ -1838,7 +1896,7 @@ static void dcn10_enable_plane(
struct dce_hwseq *hws = dc->hwseq;
if (dc->debug.sanity_checks) {
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
undo_DEGVIDCN10_253_wa(dc);
@@ -1895,11 +1953,11 @@ static void dcn10_enable_plane(
dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
if (dc->debug.sanity_checks) {
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
}
-static void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
+void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
{
int i = 0;
struct dpp_grph_csc_adjustment adjust;
@@ -1947,7 +2005,7 @@ static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint
matrix[11] = rgb_bias;
}
-static void dcn10_program_output_csc(struct dc *dc,
+void dcn10_program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix,
@@ -1979,57 +2037,6 @@ static void dcn10_program_output_csc(struct dc *dc,
}
}
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
-{
- if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
- return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
- return true;
- return false;
-}
-
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
-{
- if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
- return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
- return true;
- return false;
-}
-
-bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
-{
- if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
- return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
- return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
- return true;
- return false;
-}
-
-bool is_rgb_cspace(enum dc_color_space output_color_space)
-{
- switch (output_color_space) {
- case COLOR_SPACE_SRGB:
- case COLOR_SPACE_SRGB_LIMITED:
- case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- case COLOR_SPACE_ADOBERGB:
- return true;
- case COLOR_SPACE_YCBCR601:
- case COLOR_SPACE_YCBCR709:
- case COLOR_SPACE_YCBCR601_LIMITED:
- case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
- return false;
- default:
- /* Add a case to switch */
- BREAK_TO_DEBUGGER();
- return false;
- }
-}
-
void dcn10_get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color)
@@ -2103,70 +2110,7 @@ void dcn10_get_hdr_visual_confirm_color(
}
}
-static uint16_t fixed_point_to_int_frac(
- struct fixed31_32 arg,
- uint8_t integer_bits,
- uint8_t fractional_bits)
-{
- int32_t numerator;
- int32_t divisor = 1 << fractional_bits;
-
- uint16_t result;
-
- uint16_t d = (uint16_t)dc_fixpt_floor(
- dc_fixpt_abs(
- arg));
-
- if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
- numerator = (uint16_t)dc_fixpt_floor(
- dc_fixpt_mul_int(
- arg,
- divisor));
- else {
- numerator = dc_fixpt_floor(
- dc_fixpt_sub(
- dc_fixpt_from_int(
- 1LL << integer_bits),
- dc_fixpt_recip(
- dc_fixpt_from_int(
- divisor))));
- }
-
- if (numerator >= 0)
- result = (uint16_t)numerator;
- else
- result = (uint16_t)(
- (1 << (integer_bits + fractional_bits + 1)) + numerator);
-
- if ((result != 0) && dc_fixpt_lt(
- arg, dc_fixpt_zero))
- result |= 1 << (integer_bits + fractional_bits);
-
- return result;
-}
-
-void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
- const struct dc_plane_state *plane_state)
-{
- if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
- && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
- && plane_state->input_csc_color_matrix.enable_adjustment
- && plane_state->coeff_reduction_factor.value != 0) {
- bias_and_scale->scale_blue = fixed_point_to_int_frac(
- dc_fixpt_mul(plane_state->coeff_reduction_factor,
- dc_fixpt_from_fraction(256, 255)),
- 2,
- 13);
- bias_and_scale->scale_red = bias_and_scale->scale_blue;
- bias_and_scale->scale_green = bias_and_scale->scale_blue;
- } else {
- bias_and_scale->scale_blue = 0x2000;
- bias_and_scale->scale_red = 0x2000;
- bias_and_scale->scale_green = 0x2000;
- }
-}
-
-static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
+static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
{
struct dc_bias_and_scale bns_params = {0};
@@ -2175,21 +2119,18 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
plane_state->color_space,
NULL);
-#else
- plane_state->color_space);
-#endif
//set scale and bias registers
- dcn10_build_prescale_params(&bns_params, plane_state);
+ build_prescale_params(&bns_params, plane_state);
if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
-static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = {{0}};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
@@ -2199,10 +2140,10 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
- dcn10_get_hdr_visual_confirm_color(
+ hws->funcs.get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
- dcn10_get_surface_visual_confirm_color(
+ hws->funcs.get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else {
color_space_to_black_color(
@@ -2284,11 +2225,12 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
}
-void update_dchubp_dpp(
+static void dcn10_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -2342,12 +2284,12 @@ void update_dchubp_dpp(
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.bpp_change)
- update_dpp(dpp, plane_state);
+ dcn10_update_dpp(dpp, plane_state);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
plane_state->update_flags.bits.global_alpha_change)
- dc->hwss.update_mpcc(dc, pipe_ctx);
+ hws->funcs.update_mpcc(dc, pipe_ctx);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
@@ -2407,13 +2349,13 @@ void update_dchubp_dpp(
hubp->power_gated = false;
- dc->hwss.update_plane_addr(dc, pipe_ctx);
+ hws->funcs.update_plane_addr(dc, pipe_ctx);
if (is_pipe_tree_visible(pipe_ctx))
hubp->funcs->set_blank(hubp, false);
}
-static void dcn10_blank_pixel_data(
+void dcn10_blank_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank)
@@ -2456,10 +2398,9 @@ static void dcn10_blank_pixel_data(
}
}
-void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
{
- struct fixed31_32 multiplier = dc_fixpt_from_fraction(
- pipe_ctx->plane_state->sdr_white_level, 80);
+ struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
struct custom_float_format fmt;
@@ -2467,7 +2408,8 @@ void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
fmt.mantissa_bits = 12;
fmt.sign = true;
- if (pipe_ctx->plane_state->sdr_white_level > 80)
+
+ if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
@@ -2479,17 +2421,19 @@ void dcn10_program_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
+
if (pipe_ctx->plane_state->update_flags.bits.full_update)
dcn10_enable_plane(dc, pipe_ctx, context);
- update_dchubp_dpp(dc, pipe_ctx, context);
+ dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
- set_hdr_multiplier(pipe_ctx);
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->plane_state->update_flags.bits.full_update ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
* only do gamma programming for full update.
@@ -2498,14 +2442,16 @@ void dcn10_program_pipe(
* doing heavy calculation and programming
*/
if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
}
-static void program_all_pipe_in_tree(
+static void dcn10_program_all_pipe_in_tree(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
+
if (pipe_ctx->top_pipe == NULL) {
bool blank = !is_pipe_tree_visible(pipe_ctx);
@@ -2519,20 +2465,20 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (dc->hwss.setup_vupdate_interrupt)
- dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
}
if (pipe_ctx->plane_state != NULL)
- dcn10_program_pipe(dc, pipe_ctx, context);
+ hws->funcs.program_pipe(dc, pipe_ctx, context);
if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
- program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
+ dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
}
-struct pipe_ctx *find_top_pipe_for_stream(
+static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
struct dc *dc,
struct dc_state *context,
const struct dc_stream_state *stream)
@@ -2556,19 +2502,20 @@ struct pipe_ctx *find_top_pipe_for_stream(
return NULL;
}
-static void dcn10_apply_ctx_for_surface(
+void dcn10_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
int num_planes,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
int i;
struct timing_generator *tg;
uint32_t underflow_check_delay_us;
bool removed_pipe[4] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
- find_top_pipe_for_stream(dc, context, stream);
+ dcn10_find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
if (!top_pipe_to_program)
@@ -2581,23 +2528,23 @@ static void dcn10_apply_ctx_for_surface(
underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
- if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
- ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
+ if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
+ ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
if (interdependent_update)
- lock_all_pipes(dc, context, true);
+ dcn10_lock_all_pipes(dc, context, true);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (underflow_check_delay_us != 0xFFFFFFFF)
udelay(underflow_check_delay_us);
- if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
- ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
+ if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
+ ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
if (num_planes == 0) {
/* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
}
/* Disconnect unused mpcc */
@@ -2623,7 +2570,7 @@ static void dcn10_apply_ctx_for_surface(
old_pipe_ctx->plane_state &&
old_pipe_ctx->stream_res.tg == tg) {
- dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
removed_pipe[i] = true;
DC_LOG_DC("Reset mpcc for pipe %d\n",
@@ -2632,13 +2579,11 @@ static void dcn10_apply_ctx_for_surface(
}
if (num_planes > 0)
- program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+ dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* Program secondary blending tree and writeback pipes */
- if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
- dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
-#endif
+ if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
if (interdependent_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2654,7 +2599,7 @@ static void dcn10_apply_ctx_for_surface(
}
if (interdependent_update)
- lock_all_pipes(dc, context, false);
+ dcn10_lock_all_pipes(dc, context, false);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
@@ -2691,14 +2636,15 @@ static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *contex
}
}
-static void dcn10_prepare_bandwidth(
+void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
@@ -2720,17 +2666,18 @@ static void dcn10_prepare_bandwidth(
dcn_bw_notify_pplib_of_wm_ranges(dc);
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
-static void dcn10_optimize_bandwidth(
+void dcn10_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
@@ -2752,10 +2699,10 @@ static void dcn10_optimize_bandwidth(
dcn_bw_notify_pplib_of_wm_ranges(dc);
if (dc->debug.sanity_checks)
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
-static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
+void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
int num_pipes, unsigned int vmin, unsigned int vmax,
unsigned int vmid, unsigned int vmid_frame_number)
{
@@ -2763,6 +2710,8 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
struct drr_params params = {0};
// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
unsigned int event_triggers = 0x800;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
params.vertical_total_max = vmax;
params.vertical_total_min = vmin;
@@ -2779,11 +2728,11 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
if (vmax != 0 && vmin != 0)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
- event_triggers);
+ event_triggers, num_frames);
}
}
-static void dcn10_get_position(struct pipe_ctx **pipe_ctx,
+void dcn10_get_position(struct pipe_ctx **pipe_ctx,
int num_pipes,
struct crtc_position *position)
{
@@ -2795,22 +2744,23 @@ static void dcn10_get_position(struct pipe_ctx **pipe_ctx,
pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
}
-static void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events)
+void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
- unsigned int value = 0;
+ unsigned int triggers = 0;
- if (events->surface_update)
- value |= 0x80;
- if (events->cursor_update)
- value |= 0x2;
- if (events->force_trigger)
- value |= 0x1;
+ if (params->triggers.surface_update)
+ triggers |= 0x80;
+ if (params->triggers.cursor_update)
+ triggers |= 0x2;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
}
static void dcn10_config_stereo_parameters(
@@ -2850,7 +2800,7 @@ static void dcn10_config_stereo_parameters(
return;
}
-static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
+void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
{
struct crtc_stereo_flags flags = { 0 };
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -2889,15 +2839,16 @@ static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_in
return NULL;
}
-static void dcn10_wait_for_mpcc_disconnect(
+void dcn10_wait_for_mpcc_disconnect(
struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
int mpcc_inst;
if (dc->debug.sanity_checks) {
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
if (!pipe_ctx->stream_res.opp)
@@ -2914,12 +2865,12 @@ static void dcn10_wait_for_mpcc_disconnect(
}
if (dc->debug.sanity_checks) {
- dcn10_verify_allow_pstate_change_high(dc);
+ hws->funcs.verify_allow_pstate_change_high(dc);
}
}
-static bool dcn10_dummy_display_power_gating(
+bool dcn10_dummy_display_power_gating(
struct dc *dc,
uint8_t controller_id,
struct dc_bios *dcb,
@@ -2928,7 +2879,7 @@ static bool dcn10_dummy_display_power_gating(
return true;
}
-static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2952,7 +2903,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
}
}
-static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
+void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
{
struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
@@ -2960,7 +2911,7 @@ static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh
hubbub->funcs->update_dchub(hubbub, dh_data);
}
-static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -2974,15 +2925,32 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
- uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
- uint32_t x_offset = min(x_plane, pos_cpy.x);
- uint32_t y_offset = min(y_plane, pos_cpy.y);
+ bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
+ (pipe_ctx->bottom_pipe != NULL);
+
+ int x_plane = pipe_ctx->plane_state->dst_rect.x;
+ int y_plane = pipe_ctx->plane_state->dst_rect.y;
+ int x_pos = pos_cpy.x;
+ int y_pos = pos_cpy.y;
+
+ // translate cursor from stream space to plane space
+ x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
+ pipe_ctx->plane_state->dst_rect.height;
- pos_cpy.x -= x_offset;
- pos_cpy.y -= y_offset;
- pos_cpy.x_hotspot += (x_plane - x_offset);
- pos_cpy.y_hotspot += (y_plane - y_offset);
+ if (x_pos < 0) {
+ pos_cpy.x_hotspot -= x_pos;
+ x_pos = 0;
+ }
+
+ if (y_pos < 0) {
+ pos_cpy.y_hotspot -= y_pos;
+ y_pos = 0;
+ }
+
+ pos_cpy.x = (uint32_t)x_pos;
+ pos_cpy.y = (uint32_t)y_pos;
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@@ -2991,6 +2959,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
// Swap axis and mirror horizontally
if (param.rotation == ROTATION_ANGLE_90) {
uint32_t temp_x = pos_cpy.x;
+
pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
pos_cpy.y = temp_x;
@@ -2998,26 +2967,44 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
// Swap axis and mirror vertically
else if (param.rotation == ROTATION_ANGLE_270) {
uint32_t temp_y = pos_cpy.y;
- if (pos_cpy.x > pipe_ctx->plane_res.scl_data.viewport.height) {
- pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height;
- pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
- } else {
- pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
- }
+ int viewport_height =
+ pipe_ctx->plane_res.scl_data.viewport.height;
+
+ if (pipe_split_on) {
+ if (pos_cpy.x > viewport_height) {
+ pos_cpy.x = pos_cpy.x - viewport_height;
+ pos_cpy.y = viewport_height - pos_cpy.x;
+ } else {
+ pos_cpy.y = 2 * viewport_height - pos_cpy.x;
+ }
+ } else
+ pos_cpy.y = viewport_height - pos_cpy.x;
pos_cpy.x = temp_y;
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
- if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) {
- pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width
- - pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x;
- } else {
- uint32_t temp_x = pos_cpy.x;
- pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x;
- if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width
- || pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width;
+ int viewport_width =
+ pipe_ctx->plane_res.scl_data.viewport.width;
+ int viewport_x =
+ pipe_ctx->plane_res.scl_data.viewport.x;
+
+ if (pipe_split_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
}
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
}
@@ -3026,7 +3013,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
}
-static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
@@ -3036,7 +3023,7 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, attributes);
}
-static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
+void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
{
uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
struct fixed31_32 multiplier;
@@ -3063,12 +3050,12 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, &opt_attr);
}
-/**
-* apply_front_porch_workaround TODO FPGA still need?
-*
-* This is a workaround for a bug that has existed since R5xx and has not been
-* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
-*/
+/*
+ * apply_front_porch_workaround TODO FPGA still need?
+ *
+ * This is a workaround for a bug that has existed since R5xx and has not been
+ * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+ */
static void apply_front_porch_workaround(
struct dc_crtc_timing *timing)
{
@@ -3081,7 +3068,7 @@ static void apply_front_porch_workaround(
}
}
-int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
+int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
{
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
struct dc_crtc_timing patched_crtc_timing;
@@ -3110,34 +3097,8 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
-void lock_all_pipes(struct dc *dc,
- struct dc_state *context,
- bool lock)
-{
- struct pipe_ctx *pipe_ctx;
- struct timing_generator *tg;
- int i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe_ctx = &context->res_ctx.pipe_ctx[i];
- tg = pipe_ctx->stream_res.tg;
- /*
- * Only lock the top pipe's tg to prevent redundant
- * (un)locking. Also skip if pipe is disabled.
- */
- if (pipe_ctx->top_pipe ||
- !pipe_ctx->stream || !pipe_ctx->plane_state ||
- !tg->funcs->is_tg_enabled(tg))
- continue;
-
- if (lock)
- tg->funcs->lock(tg);
- else
- tg->funcs->unlock(tg);
- }
-}
-
-static void calc_vupdate_position(
+static void dcn10_calc_vupdate_position(
+ struct dc *dc,
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
uint32_t *end_line)
@@ -3145,7 +3106,7 @@ static void calc_vupdate_position(
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
int vline_int_offset_from_vupdate =
pipe_ctx->stream->periodic_interrupt0.lines_offset;
- int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
+ int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
int start_position;
if (vline_int_offset_from_vupdate > 0)
@@ -3166,7 +3127,8 @@ static void calc_vupdate_position(
*end_line = 2;
}
-static void cal_vline_position(
+static void dcn10_cal_vline_position(
+ struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum vline_select vline,
uint32_t *start_line,
@@ -3181,7 +3143,8 @@ static void cal_vline_position(
switch (ref_point) {
case START_V_UPDATE:
- calc_vupdate_position(
+ dcn10_calc_vupdate_position(
+ dc,
pipe_ctx,
start_line,
end_line);
@@ -3195,7 +3158,8 @@ static void cal_vline_position(
}
}
-static void dcn10_setup_periodic_interrupt(
+void dcn10_setup_periodic_interrupt(
+ struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum vline_select vline)
{
@@ -3205,7 +3169,7 @@ static void dcn10_setup_periodic_interrupt(
uint32_t start_line = 0;
uint32_t end_line = 0;
- cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
+ dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
@@ -3216,10 +3180,10 @@ static void dcn10_setup_periodic_interrupt(
}
}
-static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (start_line < 0) {
ASSERT(0);
@@ -3230,12 +3194,13 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
-static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
struct encoder_unblank_param params = { { 0 } };
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
/* only 3 items below are used by unblank */
params.timing = pipe_ctx->stream->timing;
@@ -3249,11 +3214,11 @@ static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, true);
+ hws->funcs.edp_backlight_control(link, true);
}
}
-static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
+void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
const uint8_t *custom_sdp_message,
unsigned int sdp_message_size)
{
@@ -3264,7 +3229,7 @@ static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
sdp_message_size);
}
}
-static enum dc_status dcn10_set_clock(struct dc *dc,
+enum dc_status dcn10_set_clock(struct dc *dc,
enum dc_clock_type clock_type,
uint32_t clk_khz,
uint32_t stepping)
@@ -3304,7 +3269,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc,
}
-static void dcn10_get_clock(struct dc *dc,
+void dcn10_get_clock(struct dc *dc,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg)
{
@@ -3314,77 +3279,3 @@ static void dcn10_get_clock(struct dc *dc,
dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
}
-
-static const struct hw_sequencer_funcs dcn10_funcs = {
- .program_gamut_remap = dcn10_program_gamut_remap,
- .init_hw = dcn10_init_hw,
- .init_pipes = dcn10_init_pipes,
- .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
- .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
- .update_plane_addr = dcn10_update_plane_addr,
- .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
- .update_dchub = dcn10_update_dchub,
- .update_mpcc = dcn10_update_mpcc,
- .update_pending_status = dcn10_update_pending_status,
- .set_input_transfer_func = dcn10_set_input_transfer_func,
- .set_output_transfer_func = dcn10_set_output_transfer_func,
- .program_output_csc = dcn10_program_output_csc,
- .power_down = dce110_power_down,
- .enable_accelerated_mode = dce110_enable_accelerated_mode,
- .enable_timing_synchronization = dcn10_enable_timing_synchronization,
- .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
- .update_info_frame = dce110_update_info_frame,
- .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
- .enable_stream = dce110_enable_stream,
- .disable_stream = dce110_disable_stream,
- .unblank_stream = dcn10_unblank_stream,
- .blank_stream = dce110_blank_stream,
- .enable_audio_stream = dce110_enable_audio_stream,
- .disable_audio_stream = dce110_disable_audio_stream,
- .enable_display_power_gating = dcn10_dummy_display_power_gating,
- .disable_plane = dcn10_disable_plane,
- .blank_pixel_data = dcn10_blank_pixel_data,
- .pipe_control_lock = dcn10_pipe_control_lock,
- .prepare_bandwidth = dcn10_prepare_bandwidth,
- .optimize_bandwidth = dcn10_optimize_bandwidth,
- .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
- .enable_stream_timing = dcn10_enable_stream_timing,
- .set_drr = dcn10_set_drr,
- .get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
- .setup_stereo = dcn10_setup_stereo,
- .set_avmute = dce110_set_avmute,
- .log_hw_state = dcn10_log_hw_state,
- .get_hw_state = dcn10_get_hw_state,
- .clear_status_bits = dcn10_clear_status_bits,
- .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
- .edp_backlight_control = dce110_edp_backlight_control,
- .edp_power_control = dce110_edp_power_control,
- .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
- .set_cursor_position = dcn10_set_cursor_position,
- .set_cursor_attribute = dcn10_set_cursor_attribute,
- .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
- .disable_stream_gating = NULL,
- .enable_stream_gating = NULL,
- .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
- .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
- .set_clock = dcn10_set_clock,
- .get_clock = dcn10_get_clock,
- .did_underflow_occur = dcn10_did_underflow_occur,
- .init_blank = NULL,
- .disable_vga = dcn10_disable_vga,
- .bios_golden_init = dcn10_bios_golden_init,
- .plane_atomic_disable = dcn10_plane_atomic_disable,
- .plane_atomic_power_down = dcn10_plane_atomic_power_down,
- .enable_power_gating_plane = dcn10_enable_power_gating_plane,
- .dpp_pg_control = dcn10_dpp_pg_control,
- .hubp_pg_control = dcn10_hubp_pg_control,
- .dsc_pg_control = NULL,
-};
-
-
-void dcn10_hw_sequencer_construct(struct dc *dc)
-{
- dc->hwss = dcn10_funcs;
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index d3616b1948cc..4d20f6586bb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -27,68 +27,160 @@
#define __DC_HWSS_DCN10_H__
#include "core_types.h"
+#include "hw_sequencer_private.h"
struct dc;
void dcn10_hw_sequencer_construct(struct dc *dc);
-extern void fill_display_configs(
- const struct dc_state *context,
- struct dm_pp_display_configuration *pp_display_cfg);
-
-bool is_rgb_cspace(enum dc_color_space output_color_space);
-
-void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+enum dc_status dcn10_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+void dcn10_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn10_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn10_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn10_blank_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn10_program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context);
+void dcn10_hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+void dcn10_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+void dcn10_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable);
+void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_disable_vga(
+ struct dce_hwseq *hws);
void dcn10_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
-
-void dcn10_get_hw_state(
+void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx);
+void dcn10_init_hw(struct dc *dc);
+void dcn10_init_pipes(struct dc *dc, struct dc_state *context);
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data);
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx);
+void dce110_power_down(struct dc *dc);
+void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
+void dcn10_enable_timing_synchronization(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[]);
+void dcn10_enable_per_frame_crtc_position_reset(
+ struct dc *dc,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[]);
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
+void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx);
+bool dcn10_dummy_display_power_gating(
struct dc *dc,
- char *pBuf, unsigned int bufSize,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating);
+void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, unsigned int vmin, unsigned int vmax,
+ unsigned int vmid, unsigned int vmid_frame_number);
+void dcn10_get_position(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ struct crtc_position *position);
+void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
+void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc);
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+void dcn10_log_hw_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
+void dcn10_get_hw_state(struct dc *dc,
+ char *pBuf,
+ unsigned int bufSize,
unsigned int mask);
-
void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
-
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-
-void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
-
-void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
-
+void dcn10_wait_for_mpcc_disconnect(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx);
+void dce110_edp_backlight_control(
+ struct dc_link *link,
+ bool enable);
+void dce110_edp_power_control(
+ struct dc_link *link,
+ bool power_up);
+void dce110_edp_wait_for_hpd_ready(
+ struct dc_link *link,
+ bool power_up);
+void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx);
+void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
+void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx);
+void dcn10_setup_periodic_interrupt(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum vline_select vline);
+enum dc_status dcn10_set_clock(struct dc *dc,
+ enum dc_clock_type clock_type,
+ uint32_t clk_khz,
+ uint32_t stepping);
+void dcn10_get_clock(struct dc *dc,
+ enum dc_clock_type clock_type,
+ struct dc_clock_config *clock_cfg);
+bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_bios_golden_init(struct dc *dc);
+void dcn10_plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp);
void dcn10_get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
void dcn10_get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
-bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
-void update_dchubp_dpp(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context);
-
-struct pipe_ctx *find_top_pipe_for_stream(
- struct dc *dc,
- struct dc_state *context,
- const struct dc_stream_state *stream);
-
-int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
-
-void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
- const struct dc_plane_state *plane_state);
-void lock_all_pipes(struct dc *dc,
- struct dc_state *context,
- bool lock);
+void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+void dcn10_verify_allow_pstate_change_high(struct dc *dc);
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h
new file mode 100644
index 000000000000..596f95c22e85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN10_DEBUG_H__
+#define __DC_HWSS_DCN10_DEBUG_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
+
+void dcn10_log_hw_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx);
+
+void dcn10_get_hw_state(struct dc *dc,
+ char *pBuf,
+ unsigned int bufSize,
+ unsigned int mask);
+
+#endif /* __DC_HWSS_DCN10_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
new file mode 100644
index 000000000000..e7e5352ec424
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hw_sequencer_private.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10_hw_sequencer.h"
+
+static const struct hw_sequencer_funcs dcn10_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .update_plane_addr = dcn10_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn10_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn10_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn10_disable_plane,
+ .pipe_control_lock = dcn10_pipe_control_lock,
+ .prepare_bandwidth = dcn10_prepare_bandwidth,
+ .optimize_bandwidth = dcn10_optimize_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+};
+
+static const struct hwseq_private_funcs dcn10_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn10_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .program_pipe = dcn10_program_pipe,
+ .update_mpcc = dcn10_update_mpcc,
+ .set_input_transfer_func = dcn10_set_input_transfer_func,
+ .set_output_transfer_func = dcn10_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn10_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn10_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
+ .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = NULL,
+ .disable_vga = dcn10_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn10_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn10_enable_power_gating_plane,
+ .dpp_pg_control = dcn10_dpp_pg_control,
+ .hubp_pg_control = dcn10_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+};
+
+void dcn10_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn10_funcs;
+ dc->hwseq->funcs = dcn10_private_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
new file mode 100644
index 000000000000..8c6fd7b844a4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN10_INIT_H__
+#define __DC_DCN10_INIT_H__
+
+struct dc;
+
+void dcn10_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN10_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
index 0fb9e440cb9d..f05371c1fc36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
@@ -53,11 +53,9 @@ static const struct ipp_funcs dcn10_ipp_funcs = {
.ipp_destroy = dcn10_ipp_destroy
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
static const struct ipp_funcs dcn20_ipp_funcs = {
.ipp_destroy = dcn10_ipp_destroy
};
-#endif
void dcn10_ipp_construct(
struct dcn10_ipp *ippn10,
@@ -76,7 +74,6 @@ void dcn10_ipp_construct(
ippn10->ipp_mask = ipp_mask;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void dcn20_ipp_construct(
struct dcn10_ipp *ippn10,
struct dc_context *ctx,
@@ -93,4 +90,3 @@ void dcn20_ipp_construct(
ippn10->ipp_shift = ipp_shift;
ippn10->ipp_mask = ipp_mask;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
index cfa24459242b..f0e0d07b0311 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -49,7 +49,6 @@
SRI(CURSOR_HOT_SPOT, CURSOR, id), \
SRI(CURSOR_DST_OFFSET, CURSOR, id)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define IPP_REG_LIST_DCN20(id) \
IPP_REG_LIST_DCN(id), \
SRI(CURSOR_SETTINGS, HUBPREQ, id), \
@@ -60,7 +59,6 @@
SRI(CURSOR_POSITION, CURSOR0_, id), \
SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
SRI(CURSOR_DST_OFFSET, CURSOR0_, id)
-#endif
#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
@@ -105,7 +103,6 @@
IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define IPP_MASK_SH_LIST_DCN20(mask_sh) \
IPP_MASK_SH_LIST_DCN(mask_sh), \
IPP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
@@ -124,7 +121,6 @@
IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
-#endif
#define IPP_DCN10_REG_FIELD_LIST(type) \
type CNVC_SURFACE_PIXEL_FORMAT; \
@@ -196,13 +192,11 @@ void dcn10_ipp_construct(struct dcn10_ipp *ippn10,
const struct dcn10_ipp_shift *ipp_shift,
const struct dcn10_ipp_mask *ipp_mask);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void dcn20_ipp_construct(struct dcn10_ipp *ippn10,
struct dc_context *ctx,
int inst,
const struct dcn10_ipp_registers *regs,
const struct dcn10_ipp_shift *ipp_shift,
const struct dcn10_ipp_mask *ipp_mask);
-#endif
#endif /* _DCN10_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 88fcc395adf5..eb13589b9a81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -72,9 +72,7 @@
struct dcn10_link_enc_aux_registers {
uint32_t AUX_CONTROL;
uint32_t AUX_DPHY_RX_CONTROL0;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
uint32_t AUX_DPHY_TX_CONTROL;
-#endif
};
struct dcn10_link_enc_hpd_registers {
@@ -106,7 +104,6 @@ struct dcn10_link_enc_registers {
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
uint32_t TMDS_CTL_BITS;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* DCCG */
uint32_t CLOCK_ENABLE;
/* DIG */
@@ -127,6 +124,26 @@ struct dcn10_link_enc_registers {
uint32_t RDPCSTX_PHY_CNTL13;
uint32_t RDPCSTX_PHY_CNTL14;
uint32_t RDPCSTX_PHY_CNTL15;
+ uint32_t RDPCSTX_CNTL;
+ uint32_t RDPCSTX_CLOCK_CNTL;
+ uint32_t RDPCSTX_PHY_CNTL0;
+ uint32_t RDPCSTX_PHY_CNTL2;
+ uint32_t RDPCSTX_PLL_UPDATE_DATA;
+ uint32_t RDPCS_TX_CR_ADDR;
+ uint32_t RDPCS_TX_CR_DATA;
+ uint32_t DPCSTX_TX_CLOCK_CNTL;
+ uint32_t DPCSTX_TX_CNTL;
+ uint32_t RDPCSTX_INTERRUPT_CONTROL;
+ uint32_t RDPCSTX_PHY_FUSE0;
+ uint32_t RDPCSTX_PHY_FUSE1;
+ uint32_t RDPCSTX_PHY_FUSE2;
+ uint32_t RDPCSTX_PHY_FUSE3;
+ uint32_t RDPCSTX_PHY_RX_LD_VAL;
+ uint32_t DPCSTX_DEBUG_CONFIG;
+ uint32_t RDPCSTX_DEBUG_CONFIG;
+ uint32_t RDPCSTX0_RDPCSTX_SCRATCH;
+ uint32_t RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG;
+ uint32_t DCIO_SOFT_RESET;
/* indirect registers */
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
@@ -136,7 +153,6 @@ struct dcn10_link_enc_registers {
uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3;
uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3;
-#endif
};
#define LE_SF(reg_name, field_name, post_fix)\
@@ -242,7 +258,6 @@ struct dcn10_link_enc_registers {
type AUX_LS_READ_EN;\
type AUX_RX_RECEIVE_WINDOW
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DCN20_LINK_ENCODER_DPCS_REG_FIELD_LIST(type) \
type RDPCS_PHY_DP_TX0_DATA_EN;\
@@ -423,20 +438,15 @@ struct dcn10_link_enc_registers {
type AUX_TX_PRECHARGE_SYMBOLS; \
type AUX_MODE_DET_CHECK_DELAY;\
type DPCS_DBG_CBUS_DIS
-#endif
struct dcn10_link_enc_shift {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
-#endif
};
struct dcn10_link_enc_mask {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
-#endif
};
struct dcn10_link_encoder {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 8b2f29f6dabd..04f863499cfb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -42,20 +42,27 @@ void mpc1_set_bg_color(struct mpc *mpc,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+ uint32_t bg_r_cr, bg_g_y, bg_b_cb;
+
+ /* find bottommost mpcc. */
+ while (bottommost_mpcc->mpcc_bot) {
+ bottommost_mpcc = bottommost_mpcc->mpcc_bot;
+ }
/* mpc color is 12 bit. tg_color is 10 bit */
/* todo: might want to use 16 bit to represent color and have each
* hw block translate to correct color depth.
*/
- uint32_t bg_r_cr = bg_color->color_r_cr << 2;
- uint32_t bg_g_y = bg_color->color_g_y << 2;
- uint32_t bg_b_cb = bg_color->color_b_cb << 2;
+ bg_r_cr = bg_color->color_r_cr << 2;
+ bg_g_y = bg_color->color_g_y << 2;
+ bg_b_cb = bg_color->color_b_cb << 2;
- REG_SET(MPCC_BG_R_CR[mpcc_id], 0,
+ REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_R_CR, bg_r_cr);
- REG_SET(MPCC_BG_G_Y[mpcc_id], 0,
+ REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_G_Y, bg_g_y);
- REG_SET(MPCC_BG_B_CB[mpcc_id], 0,
+ REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_B_CB, bg_b_cb);
}
@@ -457,12 +464,10 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.update_blending = mpc1_update_blending,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.set_denorm = NULL,
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
.set_output_gamma = NULL,
-#endif
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 0a9ad692f541..d79718fde5a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -373,11 +373,9 @@ void opp1_program_oppbuf(
*/
REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* Controls the number of padded pixels at the end of a segment */
if (REG(OPPBUF_CONTROL1))
REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels);
-#endif
}
void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
@@ -404,9 +402,8 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
.opp_program_stereo = opp1_program_stereo,
.opp_pipe_clock_control = opp1_pipe_clock_control,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
.opp_set_disp_pattern_generator = NULL,
-#endif
+ .dpg_is_blanked = NULL,
.opp_destroy = opp1_destroy
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index dabccbd49ad4..a9a43b397db9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -457,11 +457,16 @@ static bool optc1_enable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
+ REG_SEQ_START();
+
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
+
return true;
}
@@ -784,21 +789,26 @@ void optc1_set_early_control(
void optc1_set_static_screen_control(
struct timing_generator *optc,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
/* Bit 8 is no longer applicable in RV for PSR case,
* set bit 8 to 0 if given
*/
- if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
+ if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
!= 0)
- value = value &
+ event_triggers = event_triggers &
~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;
REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0,
- OTG_STATIC_SCREEN_EVENT_MASK, value,
- OTG_STATIC_SCREEN_FRAME_COUNT, 2);
+ OTG_STATIC_SCREEN_EVENT_MASK, event_triggers,
+ OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
void optc1_setup_manual_trigger(struct timing_generator *optc)
@@ -1497,7 +1507,6 @@ void dcn10_timing_generator_init(struct optc *optc1)
optc1->min_v_sync_width = 1;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this:
*
* - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as
@@ -1510,15 +1519,12 @@ void dcn10_timing_generator_init(struct optc *optc1)
* to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well.
*
*/
-#endif
bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
-#endif
return two_pix;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c8d795b335ba..f277656d5464 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -165,13 +165,11 @@ struct dcn_optc_registers {
uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
uint32_t GSL_SOURCE_SELECT;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
uint32_t DWB_SOURCE_SELECT;
uint32_t OTG_DSC_START_POSITION;
uint32_t OPTC_DATA_FORMAT_CONTROL;
uint32_t OPTC_BYTES_PER_PIXEL;
uint32_t OPTC_WIDTH_CONTROL;
-#endif
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -456,7 +454,6 @@ struct dcn_optc_registers {
type MANUAL_FLOW_CONTROL;\
type MANUAL_FLOW_CONTROL_SEL;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)\
@@ -479,12 +476,6 @@ struct dcn_optc_registers {
type OPTC_DWB0_SOURCE_SELECT;\
type OPTC_DWB1_SOURCE_SELECT;
-#else
-
-#define TG_REG_FIELD_LIST(type) \
- TG_REG_FIELD_LIST_DCN1_0(type)
-
-#endif
struct dcn_optc_shift {
@@ -542,6 +533,7 @@ struct dcn_otg_state {
uint32_t h_total;
uint32_t underflow_occurred_status;
uint32_t otg_enabled;
+ uint32_t blank_enabled;
};
void optc1_read_otg_state(struct optc *optc1,
@@ -633,7 +625,8 @@ void optc1_set_drr(
void optc1_set_static_screen_control(
struct timing_generator *optc,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void optc1_program_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 15640aedd664..3b71898e859e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -28,6 +28,8 @@
#include "dm_services.h"
#include "dc.h"
+#include "dcn10_init.h"
+
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn10_resource.h"
@@ -919,7 +921,7 @@ static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)
return pp_smu;
}
-static void destruct(struct dcn10_resource_pool *pool)
+static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
{
unsigned int i;
@@ -1166,7 +1168,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
- destruct(dcn10_pool);
+ dcn10_resource_destruct(dcn10_pool);
kfree(dcn10_pool);
*pool = NULL;
}
@@ -1305,7 +1307,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
return value;
}
-static bool construct(
+static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn10_resource_pool *pool)
@@ -1592,7 +1594,7 @@ static bool construct(
fail:
- destruct(pool);
+ dcn10_resource_destruct(pool);
return false;
}
@@ -1607,7 +1609,7 @@ struct resource_pool *dcn10_create_resource_pool(
if (!pool)
return NULL;
- if (construct(init_data->num_virtual_links, dc, pool))
+ if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
kfree(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 06e5bbb4545c..376c4264d295 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -247,6 +247,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
uint32_t h_active_start;
@@ -312,10 +313,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
* Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
* and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
*/
- if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
- (output_color_space == COLOR_SPACE_2020_YCBCR) ||
- (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
- (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+ if (use_vsc_sdp_for_colorimetry)
misc1 = misc1 | 0x40;
else
misc1 = misc1 & ~0x40;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index c9cbc21d121e..f9b9e221c698 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -163,14 +163,12 @@ struct dcn10_stream_enc_registers {
uint32_t DP_MSA_TIMING_PARAM3;
uint32_t DP_MSA_TIMING_PARAM4;
uint32_t HDMI_DB_CONTROL;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t DP_DSC_CNTL;
uint32_t DP_DSC_BYTES_PER_PIXEL;
uint32_t DME_CONTROL;
uint32_t DP_SEC_METADATA_TRANSMISSION;
uint32_t HDMI_METADATA_PACKET_CONTROL;
uint32_t DP_SEC_FRAMING4;
-#endif
uint32_t DIG_CLOCK_PATTERN;
};
@@ -466,7 +464,6 @@ struct dcn10_stream_enc_registers {
type DIG_SOURCE_SELECT;\
type DIG_CLOCK_PATTERN
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define SE_REG_FIELD_LIST_DCN2_0(type) \
type DP_DSC_MODE;\
type DP_DSC_SLICE_WIDTH;\
@@ -485,20 +482,15 @@ struct dcn10_stream_enc_registers {
type DOLBY_VISION_EN;\
type DP_PIXEL_COMBINE;\
type DP_SST_SDP_SPLITTING
-#endif
struct dcn10_stream_encoder_shift {
SE_REG_FIELD_LIST_DCN1_0(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
SE_REG_FIELD_LIST_DCN2_0(uint8_t);
-#endif
};
struct dcn10_stream_encoder_mask {
SE_REG_FIELD_LIST_DCN1_0(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
SE_REG_FIELD_LIST_DCN2_0(uint32_t);
-#endif
};
struct dcn10_stream_encoder {
@@ -526,6 +518,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
void enc1_stream_encoder_hdmi_set_stream_attribute(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 10b47986526b..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,16 +2,20 @@
#
# Makefile for DCN.
-DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
-ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DCN20 += dcn20_dsc.o
-endif
+ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -19,6 +23,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -27,6 +32,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
endif
+endif
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 1e1151356e60..50bffbfdd394 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -50,20 +50,20 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
- ASSERT(req_dppclk <= ref_dppclk);
- /* need to clamp to 8 bits */
- if (ref_dppclk > 0xff) {
- int divider = (ref_dppclk + 0xfe) / 0xff;
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
- ref_dppclk /= divider;
- req_dppclk = (req_dppclk + divider - 1) / divider;
- if (req_dppclk > ref_dppclk)
- req_dppclk = ref_dppclk;
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
}
+
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 4d7e45892f08..13e057d7ee93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -104,7 +104,7 @@ static void dpp2_cnv_setup (
uint32_t pixel_format = 0;
uint32_t alpha_en = 1;
enum dc_color_space color_space = COLOR_SPACE_SRGB;
- enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;
bool force_disable_cursor = false;
struct out_csc_color_matrix tbl_entry;
uint32_t is_2bit = 0;
@@ -145,25 +145,25 @@ static void dpp2_cnv_setup (
force_disable_cursor = false;
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
force_disable_cursor = true;
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
force_disable_cursor = true;
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
force_disable_cursor = true;
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
pixel_format = 22;
@@ -177,7 +177,7 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
pixel_format = 12;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
@@ -188,13 +188,13 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
pixel_format = 115;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
@@ -227,13 +227,13 @@ static void dpp2_cnv_setup (
tbl_entry.color_space = input_color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
else
- select = INPUT_CSC_SELECT_BYPASS;
+ select = DCN2_ICSC_SELECT_BYPASS;
- dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry);
+ dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);
} else
- dpp1_program_input_csc(dpp_base, color_space, select, NULL);
+ dpp2_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
@@ -458,7 +458,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
- .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 5b03b737b1d6..27610251c57f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -150,6 +150,16 @@
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
SRI(CM_SHAPER_LUT_INDEX, CM, id)
+#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \
+ SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+ SRI(CM_ICSC_B_C11_C12, CM, id), \
+ SRI(CM_ICSC_B_C33_C34, CM, id)
+
#define TF_REG_LIST_DCN20(id) \
TF_REG_LIST_DCN(id), \
TF_REG_LIST_DCN20_COMMON(id), \
@@ -572,10 +582,29 @@
TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+/* DPP CM debug status register:
+ *
+ * Status index including current ICSC, Gamut Remap Mode is 9
+ * ICSC Mode: [4..3]
+ * Gamut Remap Mode: [10..9]
+ */
+#define CM_TEST_DEBUG_DATA_STATUS_IDX 9
+
+#define TF_DEBUG_REG_LIST_SH_DCN20 \
+ TF_DEBUG_REG_LIST_SH_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9
+
+#define TF_DEBUG_REG_LIST_MASK_DCN20 \
+ TF_DEBUG_REG_LIST_MASK_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
+ type CM_TEST_DEBUG_DATA_ICSC_MODE; \
+ type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \
type FORMAT_CNV16; \
type CNVC_BYPASS_MSB_ALIGN; \
type CLAMP_POSITIVE; \
@@ -630,11 +659,22 @@ struct dcn2_dpp_mask {
uint32_t COLOR_KEYER_RED; \
uint32_t COLOR_KEYER_GREEN; \
uint32_t COLOR_KEYER_BLUE; \
- uint32_t OBUF_MEM_PWR_CTRL;\
+ uint32_t OBUF_MEM_PWR_CTRL; \
uint32_t DSCL_MEM_PWR_CTRL
+#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \
+ uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+ uint32_t CM_ICSC_B_C11_C12; \
+ uint32_t CM_ICSC_B_C33_C34
+
struct dcn2_dpp_registers {
DPP_DCN2_REG_VARIABLE_LIST;
+ DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND;
};
struct dcn20_dpp {
@@ -656,6 +696,18 @@ struct dcn20_dpp {
struct pwl_params pwl_data;
};
+enum dcn20_input_csc_select {
+ DCN2_ICSC_SELECT_BYPASS = 0,
+ DCN2_ICSC_SELECT_ICSC_A = 1,
+ DCN2_ICSC_SELECT_ICSC_B = 2
+};
+
+enum dcn20_gamut_remap_select {
+ DCN2_GAMUT_REMAP_BYPASS = 0,
+ DCN2_GAMUT_REMAP_COEF_A = 1,
+ DCN2_GAMUT_REMAP_COEF_B = 2
+};
+
void dpp20_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
@@ -667,6 +719,16 @@ void dpp2_set_degamma(
struct dpp *dpp_base,
enum ipp_degamma_mode mode);
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry);
+
bool dpp20_program_blnd_lut(
struct dpp *dpp_base, const struct pwl_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index 2d112c316424..8dc3d1f73984 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -36,6 +36,9 @@
#define REG(reg)\
dpp->tf_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
dpp->base.ctx
@@ -44,9 +47,6 @@
dpp->tf_shift->field_name, dpp->tf_mask->field_name
-
-
-
static void dpp2_enable_cm_block(
struct dpp *dpp_base)
{
@@ -149,12 +149,164 @@ void dpp2_set_degamma(
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
+ case IPP_DEGAMMA_MODE_USER_PWL:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
}
}
+static void program_gamut_remap(
+ struct dcn20_dpp *dpp,
+ const uint16_t *regval,
+ enum dcn20_gamut_remap_select select)
+{
+ uint32_t cur_select = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+
+ /* determine which gamut_remap coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the update so gamut_remap is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select);
+
+ /* value stored in dbg reg will be 1 greater than mode we want */
+ if (cur_select != DCN2_GAMUT_REMAP_COEF_A)
+ select = DCN2_GAMUT_REMAP_COEF_A;
+ else
+ select = DCN2_GAMUT_REMAP_COEF_B;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+ } else {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, select);
+
+}
+
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i = 0;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A);
+ }
+}
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t cur_select = 0;
+ enum dcn20_input_csc_select select;
+ struct color_matrices_reg icsc_regs;
+
+ if (input_select == DCN2_ICSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dpp_input_csc_matrix[i].color_space == color_space) {
+ regval = dpp_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ } else {
+ regval = tbl_entry->regval;
+ }
+
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select);
+
+ if (cur_select != DCN2_ICSC_SELECT_ICSC_A)
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ else
+ select = DCN2_ICSC_SELECT_ICSC_B;
+
+ icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+ if (select == DCN2_ICSC_SELECT_ICSC_A) {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ } else {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34);
+
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &icsc_regs);
+
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, select);
+}
+
static void dpp20_power_on_blnd_lut(
struct dpp *dpp_base,
bool power_on)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 63eb377ed9c0..6bdfee20b6a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
@@ -207,6 +206,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str
struct dsc_reg_values dsc_reg_vals;
struct dsc_optc_config dsc_optc_cfg;
+ memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
+ memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
+
DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
dsc_config_log(dsc, dsc_cfg);
DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
@@ -222,9 +224,18 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
+
+ DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
- /* TODO Check if DSC alreay in use? */
- DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe);
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
REG_UPDATE(DSC_TOP_CONTROL,
DSC_CLOCK_EN, 1);
@@ -238,8 +249,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
static void dsc2_disable(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
- DC_LOG_DSC("disable DSC");
+ DC_LOG_DSC("disable DSC %d", dsc->inst);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if (!dsc_clock_en || !dsc_fw_config) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
DSCRM_DSC_FORWARD_EN, 0);
@@ -715,4 +736,3 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
}
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 4e2fb38390a4..9855a7ed0387 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -21,7 +21,6 @@
* Authors: AMD
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#ifndef __DCN20_DSC_H__
#define __DCN20_DSC_H__
@@ -572,4 +571,3 @@ void dsc2_construct(struct dcn20_dsc *dsc,
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 8b8438566101..9235f7d29454 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -293,6 +293,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
+ default:
+ ASSERT(false);
+ break;
}
output->capable = true;
output->const_color_support = true;
@@ -601,7 +604,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub2_program_watermarks,
- .allow_self_refresh_control = hubbub1_allow_self_refresh_control
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 69e2aae42394..84d7ac5dd206 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -30,6 +30,8 @@
#include "reg_helper.h"
#include "basics/conversion.h"
+#define DC_LOGGER_INIT(logger)
+
#define REG(reg)\
hubp2->hubp_regs->reg
@@ -483,7 +485,6 @@ void hubp2_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 12);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 112);
@@ -504,7 +505,6 @@ void hubp2_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
-#endif
default:
BREAK_TO_DEBUGGER();
break;
@@ -1204,6 +1204,9 @@ void hubp2_read_state_common(struct hubp *hubp)
HUBP_TTU_DISABLE, &s->ttu_disable,
HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+ REG_GET(HUBP_CLK_CNTL,
+ HUBP_CLOCK_ENABLE, &s->clock_en);
+
REG_GET(DCN_GLOBAL_TTU_CNTL,
MIN_TTU_VBLANK, &s->min_ttu_vblank);
@@ -1243,6 +1246,314 @@ void hubp2_read_state(struct hubp *hubp)
}
+void hubp2_validate_dml_output(struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+ struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+ struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+ DC_LOGGER_INIT(ctx->logger);
+ DC_LOG_DEBUG("DML Validation | Running Validation");
+
+ /* Requestor Regs */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+ if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+ DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+ if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+ if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+ if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+ if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+ if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+ if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+ if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+ if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+ if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+ if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+ if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+ if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+ if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+ if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+ if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+ if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+ if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+ if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size);
+ if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+ if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+ if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+ if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+ if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
+ dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+ if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+ DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+ if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+ if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+ if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+ DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
+ dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+ if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+ if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+ if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+ if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+ if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+ if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+ if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+ if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+ if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+ if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+ if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+ REG_GET_3(DCN_CUR0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+ REG_GET(FLIP_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+ REG_GET(DCN_CUR0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ REG_GET(DCN_CUR1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+ if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+ if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+ if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+ if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+ if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+ if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+ if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+ if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+ if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+}
+
static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -1266,6 +1577,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
+ .validate_dml_output = hubp2_validate_dml_output,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index d5c8615af45e..8c04a3606a54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -148,7 +148,6 @@
uint32_t VMID_SETTINGS_0
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\
@@ -157,7 +156,6 @@
uint32_t FLIP_PARAMETERS_6;\
uint32_t VBLANK_PARAMETERS_5;\
uint32_t VBLANK_PARAMETERS_6
-#endif
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
@@ -184,7 +182,6 @@
type SURFACE_TRIPLE_BUFFER_ENABLE;\
type VMID
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
#define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\
type REFCYC_PER_VM_GROUP_FLIP;\
@@ -194,31 +191,18 @@
type REFCYC_PER_PTE_GROUP_FLIP_C; \
type REFCYC_PER_META_CHUNK_FLIP_C; \
type VM_GROUP_SIZE
-#endif
struct dcn_hubp2_registers {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_COMMON_VARIABLE_LIST;
-#else
- DCN2_HUBP_REG_COMMON_VARIABLE_LIST;
-#endif
};
struct dcn_hubp2_shift {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#else
- DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#endif
};
struct dcn_hubp2_mask {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#else
- DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#endif
};
struct dcn20_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index ac8c18fadefc..cfbbaffa8654 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -25,17 +25,15 @@
#include <linux/delay.h>
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
-#include "dcn20/dcn20_resource.h"
-#include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_resource.h"
#include "dcn20_hwseq.h"
#include "dce/dce_hwseq.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dcn20/dcn20_dsc.h"
-#endif
+#include "dcn20_dsc.h"
+#include "dcn20_optc.h"
#include "abm.h"
#include "clk_mgr.h"
#include "dmcu.h"
@@ -45,10 +43,9 @@
#include "ipp.h"
#include "mpc.h"
#include "mcif_wb.h"
+#include "dchubbub.h"
#include "reg_helper.h"
#include "dcn10/dcn10_cm_common.h"
-#include "dcn10/dcn10_hubbub.h"
-#include "dcn10/dcn10_optc.h"
#include "dc_link_dp.h"
#include "vm_helper.h"
#include "dccg.h"
@@ -64,14 +61,132 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void dcn20_enable_power_gating_plane(
+static int find_free_gsl_group(const struct dc *dc)
+{
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
+
+ return 0;
+}
+
+/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
+ * This is only used to lock pipes in pipe splitting case with immediate flip
+ * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
+ * so we get tearing with freesync since we cannot flip multiple pipes
+ * atomically.
+ * We use GSL for this:
+ * - immediate flip: find first available GSL group if not already assigned
+ * program gsl with that group, set current OTG as master
+ * and always us 0x4 = AND of flip_ready from all pipes
+ * - vsync flip: disable GSL if used
+ *
+ * Groups in stream_res are stored as +1 from HW registers, i.e.
+ * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
+ * Using a magic value like -1 would require tracking all inits/resets
+ */
+static void dcn20_setup_gsl_group_as_lock(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
+ }
+
+ /* at this point we want to program whether it's to enable or disable */
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ pipe_ctx->stream_res.tg->funcs->set_gsl(
+ pipe_ctx->stream_res.tg,
+ &gsl);
+
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ } else
+ BREAK_TO_DEBUGGER();
+}
+
+void dcn20_set_flip_control_gsl(
+ struct pipe_ctx *pipe_ctx,
+ bool flip_immediate)
+{
+ if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ pipe_ctx->plane_res.hubp, flip_immediate);
+
+}
+
+void dcn20_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
- bool force_on = 1; /* disable power gating */
+ bool force_on = true; /* disable power gating */
if (enable)
- force_on = 0;
+ force_on = false;
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
@@ -128,44 +243,6 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
/* This value is dependent on the hardware pipeline delay so set once per SOC */
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
}
-void dcn20_display_init(struct dc *dc)
-{
- struct dce_hwseq *hws = dc->hwseq;
-
- /* RBBMIF
- * disable RBBMIF timeout detection for all clients
- * Ensure RBBMIF does not drop register accesses due to the per-client timeout
- */
- REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
- REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
-
- /* DCCG */
- dcn20_dccg_init(hws);
-
- REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0);
-
- /* DCHUB/MMHUBBUB
- * set global timer refclk divider
- * 100Mhz refclk -> 2
- * 27Mhz refclk -> 1
- * 48Mhz refclk -> 1
- */
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(REFCLK_CNTL, 0);
-
- /* OPTC
- * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc
- */
-
- /* AZ
- * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser,
- * if not, it should be programmed according to the ref clock
- */
- REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64);
- /* Enable controller clock gating */
- REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1);
-}
void dcn20_disable_vga(
struct dce_hwseq *hws)
@@ -178,15 +255,15 @@ void dcn20_disable_vga(
REG_WRITE(D6VGA_CONTROL, 0);
}
-void dcn20_program_tripleBuffer(
+void dcn20_program_triple_buffer(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer)
+ bool enable_triple_buffer)
{
if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
pipe_ctx->plane_res.hubp,
- enableTripleBuffer);
+ enable_triple_buffer);
}
}
@@ -195,6 +272,7 @@ void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg)
{
+ struct dce_hwseq *hws = dc->hwseq;
enum dc_color_space color_space;
struct tg_color black_color = {0};
struct output_pixel_processor *opp = NULL;
@@ -225,6 +303,7 @@ void dcn20_init_blank(
opp->funcs->opp_set_disp_pattern_generator(
opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
@@ -234,17 +313,17 @@ void dcn20_init_blank(
bottom_opp->funcs->opp_set_disp_pattern_generator(
bottom_opp,
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
otg_active_height);
}
- dcn20_hwss_wait_for_blank_complete(opp);
+ hws->funcs.wait_for_blank_complete(opp);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static void dcn20_dsc_pg_control(
+void dcn20_dsc_pg_control(
struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on)
@@ -320,9 +399,8 @@ static void dcn20_dsc_pg_control(
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
-#endif
-static void dcn20_dpp_pg_control(
+void dcn20_dpp_pg_control(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on)
@@ -396,7 +474,7 @@ static void dcn20_dpp_pg_control(
}
-static void dcn20_hubp_pg_control(
+void dcn20_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
bool power_on)
@@ -473,8 +551,9 @@ static void dcn20_hubp_pg_control(
/* disable HW used by plane.
* note: cannot disable until disconnect is complete
*/
-static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
@@ -495,7 +574,7 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
- dc->hwss.plane_atomic_power_down(dc,
+ hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
pipe_ctx->plane_res.hubp);
@@ -526,6 +605,7 @@ enum dc_status dcn20_enable_stream_timing(
struct dc_state *context,
struct dc *dc)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dc_stream_state *stream = pipe_ctx->stream;
struct drr_params params = {0};
unsigned int event_triggers = 0;
@@ -585,7 +665,7 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream_res.opp,
true);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, true);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
@@ -593,7 +673,7 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -606,9 +686,13 @@ enum dc_status dcn20_enable_stream_timing(
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx->stream_res.tg, event_triggers);
+ pipe_ctx->stream_res.tg, event_triggers, 2);
/* TODO program crtc source select for non-virtual signal*/
/* TODO program FMT */
@@ -649,7 +733,7 @@ void dcn20_program_output_csc(struct dc *dc,
}
}
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
@@ -736,20 +820,14 @@ bool dcn20_set_shaper_3dlut(
else
result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
- if (plane_state->lut3d_func &&
- plane_state->lut3d_func->state.bits.initialized == 1 &&
- plane_state->lut3d_func->hdr_multiplier != 0)
- dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base,
- plane_state->lut3d_func->hdr_multiplier);
- else
- dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000);
-
return result;
}
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
+bool dcn20_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
bool result = true;
@@ -758,8 +836,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
if (dpp_base == NULL || plane_state == NULL)
return false;
- dcn20_set_shaper_3dlut(pipe_ctx, plane_state);
- dcn20_set_blend_lut(pipe_ctx, plane_state);
+ hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
+ hws->funcs.set_blend_lut(pipe_ctx, plane_state);
if (plane_state->in_transfer_func)
tf = plane_state->in_transfer_func;
@@ -804,6 +882,11 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+ cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+ result = true;
+ break;
default:
result = false;
break;
@@ -824,7 +907,7 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
@@ -855,12 +938,16 @@ void dcn20_blank_pixel_data(
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space = stream->output_color_space;
enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
struct pipe_ctx *odm_pipe;
int odm_cnt = 1;
int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ if (stream->link->test_pattern_enabled)
+ return;
+
/* get opp dpg blank color */
color_space_to_black_color(dc, color_space, &black_color);
@@ -873,8 +960,10 @@ void dcn20_blank_pixel_data(
if (stream_res->abm)
stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
- if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
} else {
test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
}
@@ -882,6 +971,7 @@ void dcn20_blank_pixel_data(
stream_res->opp->funcs->opp_set_disp_pattern_generator(
stream_res->opp,
test_pattern,
+ test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
width,
@@ -892,6 +982,7 @@ void dcn20_blank_pixel_data(
odm_pipe->stream_res.opp,
dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+ test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
width,
@@ -1217,9 +1308,11 @@ static void dcn20_update_dchubp_dpp(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool viewport_changed = false;
if (pipe_ctx->update_flags.bits.dppclk)
dpp->funcs->dpp_dppclk_control(dpp, false, true);
@@ -1261,7 +1354,7 @@ static void dcn20_update_dchubp_dpp(
if (dpp->funcs->dpp_program_bias_and_scale) {
//TODO :for CNVC set scale and bias registers if necessary
- dcn10_build_prescale_params(&bns_params, plane_state);
+ build_prescale_params(&bns_params, plane_state);
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
}
@@ -1269,19 +1362,19 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.mpcc
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
- /* Need mpcc to be idle if changing opp */
- if (pipe_ctx->update_flags.bits.opp_changed) {
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
- int mpcc_inst;
-
- for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
- if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
- continue;
+ // MPCC inst is equal to pipe index in practice
+ int mpcc_inst = hubp->inst;
+ int opp_inst;
+ int opp_count = dc->res_pool->pipe_count;
+
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
- old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
}
}
- dc->hwss.update_mpcc(dc, pipe_ctx);
+ hws->funcs.update_mpcc(dc, pipe_ctx);
}
if (pipe_ctx->update_flags.bits.scaler ||
@@ -1298,14 +1391,18 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
- (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
&pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
- if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
&& pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -1355,8 +1452,13 @@ static void dcn20_update_dchubp_dpp(
hubp->power_gated = false;
}
+ if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed)
+ hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address);
+
if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
- dc->hwss.update_plane_addr(dc, pipe_ctx);
+ hws->funcs.update_plane_addr(dc, pipe_ctx);
+
+
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
@@ -1368,10 +1470,11 @@ static void dcn20_program_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dce_hwseq *hws = dc->hwseq;
/* Only need to unblank on top pipe */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
- dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
if (pipe_ctx->update_flags.bits.global_sync) {
pipe_ctx->stream_res.tg->funcs->program_global_sync(
@@ -1384,12 +1487,12 @@ static void dcn20_program_pipe(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (dc->hwss.setup_vupdate_interrupt)
- dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
if (pipe_ctx->update_flags.bits.odm)
- dc->hwss.update_odm(dc, context, pipe_ctx);
+ hws->funcs.update_odm(dc, context, pipe_ctx);
if (pipe_ctx->update_flags.bits.enable)
dcn20_enable_plane(dc, pipe_ctx, context);
@@ -1398,20 +1501,20 @@ static void dcn20_program_pipe(
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->update_flags.bits.enable
- || pipe_ctx->plane_state->update_flags.bits.sdr_white_level)
- set_hdr_multiplier(pipe_ctx);
+ || pipe_ctx->plane_state->update_flags.bits.hdr_mult)
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
* only do gamma programming for powering on, internal memcmp to avoid
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
* should reprogram the fmt. This deals with cases where
@@ -1445,12 +1548,13 @@ static bool does_pipe_need_lock(struct pipe_ctx *pipe)
return false;
}
-static void dcn20_program_front_end_for_ctx(
+void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
{
const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
int i;
+ struct dce_hwseq *hws = dc->hwseq;
bool pipe_locked[MAX_PIPES] = {false};
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1482,13 +1586,13 @@ static void dcn20_program_front_end_for_ctx(
&& !context->res_ctx.pipe_ctx[i].top_pipe
&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
&& context->res_ctx.pipe_ctx[i].stream)
- dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
- dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -1508,8 +1612,8 @@ static void dcn20_program_front_end_for_ctx(
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
&& (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
- && dc->hwss.program_all_writeback_pipes_in_tree)
- dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
}
@@ -1541,9 +1645,9 @@ static void dcn20_program_front_end_for_ctx(
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
- msleep(1);
+ mdelay(1);
}
}
@@ -1594,6 +1698,7 @@ bool dcn20_update_bandwidth(
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
@@ -1623,10 +1728,10 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
if (pipe_ctx->prev_odm_pipe == NULL)
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
- if (dc->hwss.setup_vupdate_interrupt)
- dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1640,9 +1745,8 @@ bool dcn20_update_bandwidth(
return true;
}
-static void dcn20_enable_writeback(
+void dcn20_enable_writeback(
struct dc *dc,
- const struct dc_stream_status *stream_status,
struct dc_writeback_info *wb_info,
struct dc_state *context)
{
@@ -1656,8 +1760,7 @@ static void dcn20_enable_writeback(
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
/* set the OPTC source mux */
- ASSERT(stream_status->primary_otg_inst < MAX_PIPES);
- optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst];
+ optc = dc->res_pool->timing_generators[dwb->otg_inst];
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
@@ -1684,7 +1787,7 @@ void dcn20_disable_writeback(
mcif_wb->funcs->disable_mcif(mcif_wb);
}
-bool dcn20_hwss_wait_for_blank_complete(
+bool dcn20_wait_for_blank_complete(
struct output_pixel_processor *opp)
{
int counter;
@@ -1713,9 +1816,8 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
return hubp->funcs->dmdata_status_done(hubp);
}
-static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dce_hwseq *hws = dc->hwseq;
if (pipe_ctx->stream_res.dsc) {
@@ -1727,12 +1829,10 @@ static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx
odm_pipe = odm_pipe->next_odm_pipe;
}
}
-#endif
}
-static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dce_hwseq *hws = dc->hwseq;
if (pipe_ctx->stream_res.dsc) {
@@ -1744,7 +1844,6 @@ static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
odm_pipe = odm_pipe->next_odm_pipe;
}
}
-#endif
}
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
@@ -1767,12 +1866,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
hubp->funcs->dmdata_set_attributes(hubp, &attr);
}
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx)
-{
- dce110_disable_stream(pipe_ctx);
-}
-
-static void dcn20_init_vm_ctx(
+void dcn20_init_vm_ctx(
struct dce_hwseq *hws,
struct dc *dc,
struct dc_virtual_addr_space_config *va_config,
@@ -1794,7 +1888,7 @@ static void dcn20_init_vm_ctx(
dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);
}
-static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
struct dcn_hubbub_phys_addr_config config;
@@ -1838,8 +1932,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-
-static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
bool addr_patched = false;
PHYSICAL_ADDRESS_LOC addr;
@@ -1873,6 +1966,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct encoder_unblank_param params = { { 0 } };
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
struct pipe_ctx *odm_pipe;
params.opp_cnt = 1;
@@ -1885,7 +1979,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
+ if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
@@ -1893,14 +1987,14 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, true);
+ hws->funcs.edp_backlight_control(link, true);
}
}
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (start_line < 0)
start_line = 0;
@@ -1915,6 +2009,7 @@ static void dcn20_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
+ struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -1922,8 +2017,14 @@ static void dcn20_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable */
- if (!pipe_ctx->stream->dpms_off)
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
core_link_disable_stream(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -1943,11 +2044,9 @@ static void dcn20_reset_back_end_for_pipe(
}
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
else if (pipe_ctx->stream_res.dsc) {
dp_set_dsc_enable(pipe_ctx, false);
}
-#endif
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
@@ -1978,11 +2077,12 @@ static void dcn20_reset_back_end_for_pipe(
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
-static void dcn20_reset_hw_ctx_wrap(
+void dcn20_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
int i;
+ struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
@@ -2001,8 +2101,8 @@ static void dcn20_reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating)
- dc->hwss.enable_stream_gating(dc, pipe_ctx);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -2031,8 +2131,9 @@ void dcn20_get_mpctree_visual_confirm_color(
*color = pipe_colors[top_pipe->pipe_idx];
}
-static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = { {0} };
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
@@ -2043,10 +2144,10 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
// input to MPCC is always RGB, by default leave black_color at 0
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
- dcn10_get_hdr_visual_confirm_color(
+ hws->funcs.get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
- dcn10_get_surface_visual_confirm_color(
+ hws->funcs.get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
dcn20_get_mpctree_visual_confirm_color(
@@ -2083,12 +2184,6 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
- /* If there is no full update, don't need to touch MPC tree*/
- if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
- mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
- return;
- }
-
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
@@ -2113,125 +2208,7 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->mpcc_id = mpcc_id;
}
-static int find_free_gsl_group(const struct dc *dc)
-{
- if (dc->res_pool->gsl_groups.gsl_0 == 0)
- return 1;
- if (dc->res_pool->gsl_groups.gsl_1 == 0)
- return 2;
- if (dc->res_pool->gsl_groups.gsl_2 == 0)
- return 3;
-
- return 0;
-}
-
-/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
- * This is only used to lock pipes in pipe splitting case with immediate flip
- * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
- * so we get tearing with freesync since we cannot flip multiple pipes
- * atomically.
- * We use GSL for this:
- * - immediate flip: find first available GSL group if not already assigned
- * program gsl with that group, set current OTG as master
- * and always us 0x4 = AND of flip_ready from all pipes
- * - vsync flip: disable GSL if used
- *
- * Groups in stream_res are stored as +1 from HW registers, i.e.
- * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
- * Using a magic value like -1 would require tracking all inits/resets
- */
-void dcn20_setup_gsl_group_as_lock(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enable)
-{
- struct gsl_params gsl;
- int group_idx;
-
- memset(&gsl, 0, sizeof(struct gsl_params));
-
- if (enable) {
- /* return if group already assigned since GSL was set up
- * for vsync flip, we would unassign so it can't be "left over"
- */
- if (pipe_ctx->stream_res.gsl_group > 0)
- return;
-
- group_idx = find_free_gsl_group(dc);
- ASSERT(group_idx != 0);
- pipe_ctx->stream_res.gsl_group = group_idx;
-
- /* set gsl group reg field and mark resource used */
- switch (group_idx) {
- case 1:
- gsl.gsl0_en = 1;
- dc->res_pool->gsl_groups.gsl_0 = 1;
- break;
- case 2:
- gsl.gsl1_en = 1;
- dc->res_pool->gsl_groups.gsl_1 = 1;
- break;
- case 3:
- gsl.gsl2_en = 1;
- dc->res_pool->gsl_groups.gsl_2 = 1;
- break;
- default:
- BREAK_TO_DEBUGGER();
- return; // invalid case
- }
- gsl.gsl_master_en = 1;
- } else {
- group_idx = pipe_ctx->stream_res.gsl_group;
- if (group_idx == 0)
- return; // if not in use, just return
-
- pipe_ctx->stream_res.gsl_group = 0;
-
- /* unset gsl group reg field and mark resource free */
- switch (group_idx) {
- case 1:
- gsl.gsl0_en = 0;
- dc->res_pool->gsl_groups.gsl_0 = 0;
- break;
- case 2:
- gsl.gsl1_en = 0;
- dc->res_pool->gsl_groups.gsl_1 = 0;
- break;
- case 3:
- gsl.gsl2_en = 0;
- dc->res_pool->gsl_groups.gsl_2 = 0;
- break;
- default:
- BREAK_TO_DEBUGGER();
- return;
- }
- gsl.gsl_master_en = 0;
- }
-
- /* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
- pipe_ctx->stream_res.tg->funcs->set_gsl(
- pipe_ctx->stream_res.tg,
- &gsl);
-
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
- } else
- BREAK_TO_DEBUGGER();
-}
-
-static void dcn20_set_flip_control_gsl(
- struct pipe_ctx *pipe_ctx,
- bool flip_immediate)
-{
- if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
- pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
- pipe_ctx->plane_res.hubp, flip_immediate);
-
-}
-
-static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
pipe_ctx->stream->link->cur_link_settings.lane_count;
@@ -2279,7 +2256,7 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
}
}
-static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -2305,7 +2282,7 @@ static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
hubp->inst, mode);
}
-static void dcn20_fpga_init_hw(struct dc *dc)
+void dcn20_fpga_init_hw(struct dc *dc)
{
int i, j;
struct dce_hwseq *hws = dc->hwseq;
@@ -2320,13 +2297,13 @@ static void dcn20_fpga_init_hw(struct dc *dc)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
//Enable ability to power gate / don't force power on permanently
- dc->hwss.enable_power_gating_plane(hws, true);
+ hws->funcs.enable_power_gating_plane(hws, true);
// Specific to FPGA dccg and registers
REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
- dcn20_dccg_init(hws);
+ hws->funcs.dccg_init(hws);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
@@ -2390,7 +2367,7 @@ static void dcn20_fpga_init_hw(struct dc *dc)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
/*to do*/
- hwss1_plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
@@ -2419,57 +2396,3 @@ static void dcn20_fpga_init_hw(struct dc *dc)
tg->funcs->tg_init(tg);
}
}
-
-void dcn20_hw_sequencer_construct(struct dc *dc)
-{
- dcn10_hw_sequencer_construct(dc);
- dc->hwss.unblank_stream = dcn20_unblank_stream;
- dc->hwss.update_plane_addr = dcn20_update_plane_addr;
- dc->hwss.enable_stream_timing = dcn20_enable_stream_timing;
- dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
- dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
- dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
- dc->hwss.apply_ctx_for_surface = NULL;
- dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx;
- dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
- dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
- dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
- dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth;
- dc->hwss.update_bandwidth = dcn20_update_bandwidth;
- dc->hwss.enable_writeback = dcn20_enable_writeback;
- dc->hwss.disable_writeback = dcn20_disable_writeback;
- dc->hwss.program_output_csc = dcn20_program_output_csc;
- dc->hwss.update_odm = dcn20_update_odm;
- dc->hwss.blank_pixel_data = dcn20_blank_pixel_data;
- dc->hwss.dmdata_status_done = dcn20_dmdata_status_done;
- dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine;
- dc->hwss.enable_stream = dcn20_enable_stream;
- dc->hwss.disable_stream = dcn20_disable_stream;
- dc->hwss.init_sys_ctx = dcn20_init_sys_ctx;
- dc->hwss.init_vm_ctx = dcn20_init_vm_ctx;
- dc->hwss.disable_stream_gating = dcn20_disable_stream_gating;
- dc->hwss.enable_stream_gating = dcn20_enable_stream_gating;
- dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt;
- dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap;
- dc->hwss.update_mpcc = dcn20_update_mpcc;
- dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl;
- dc->hwss.init_blank = dcn20_init_blank;
- dc->hwss.disable_plane = dcn20_disable_plane;
- dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable;
- dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane;
- dc->hwss.dpp_pg_control = dcn20_dpp_pg_control;
- dc->hwss.hubp_pg_control = dcn20_hubp_pg_control;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- dc->hwss.dsc_pg_control = dcn20_dsc_pg_control;
-#else
- dc->hwss.dsc_pg_control = NULL;
-#endif
- dc->hwss.disable_vga = dcn20_disable_vga;
-
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- dc->hwss.init_hw = dcn20_fpga_init_hw;
- dc->hwss.init_pipes = NULL;
- }
-
-
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 3098f1049ed7..02c9be5ebd47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -26,90 +26,112 @@
#ifndef __DC_HWSS_DCN20_H__
#define __DC_HWSS_DCN20_H__
-struct dc;
+#include "hw_sequencer_private.h"
-void dcn20_hw_sequencer_construct(struct dc *dc);
-
-enum dc_status dcn20_enable_stream_timing(
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context,
- struct dc *dc);
-
-void dcn20_blank_pixel_data(
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_program_front_end_for_ctx(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool blank);
-
+ struct dc_state *context);
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
void dcn20_program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix,
int opp_id);
-
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
+void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_blank_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn20_pipe_control_lock_global(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
-
void dcn20_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-
bool dcn20_update_bandwidth(
struct dc *dc,
struct dc_state *context);
-
+void dcn20_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+enum dc_status dcn20_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+void dcn20_disable_vga(
+ struct dce_hwseq *hws);
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable);
+void dcn20_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+void dcn20_hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+void dcn20_program_triple_buffer(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable_triple_buffer);
+void dcn20_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void dcn20_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst);
-
-bool dcn20_hwss_wait_for_blank_complete(
- struct output_pixel_processor *opp);
-
-bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream);
-
-bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state);
-
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx);
-
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
-
-void dcn20_disable_stream(struct pipe_ctx *pipe_ctx);
-
-void dcn20_program_tripleBuffer(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer);
-
-void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx);
-
-void dcn20_pipe_control_lock_global(
+void dcn20_init_vm_ctx(
+ struct dce_hwseq *hws,
struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-void dcn20_setup_gsl_group_as_lock(const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enable);
-void dcn20_dccg_init(struct dce_hwseq *hws);
-void dcn20_init_blank(
- struct dc *dc,
- struct timing_generator *tg);
-void dcn20_display_init(struct dc *dc);
-void dcn20_pipe_control_lock(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn20_enable_plane(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context);
-bool dcn20_set_blend_lut(
- struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
-bool dcn20_set_shaper_3dlut(
- struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
-void dcn20_get_mpctree_visual_confirm_color(
+ struct dc_virtual_addr_space_config *va_config,
+ int vmid);
+void dcn20_set_flip_control_gsl(
struct pipe_ctx *pipe_ctx,
- struct tg_color *color);
+ bool flip_immediate);
+void dcn20_dsc_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on);
+void dcn20_fpga_init_hw(struct dc *dc);
+bool dcn20_wait_for_blank_complete(
+ struct output_pixel_processor *opp);
+void dcn20_dccg_init(struct dce_hwseq *hws);
+int dcn20_init_sys_ctx(struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_phy_addr_space_config *pa_config);
+
#endif /* __DC_HWSS_DCN20_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
new file mode 100644
index 000000000000..d51e02fdab4d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn20_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn20_enable_writeback,
+ .disable_writeback = dcn20_disable_writeback,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn20_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn20_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+};
+
+static const struct hwseq_private_funcs dcn20_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn20_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn20_funcs;
+ dc->hwseq->funcs = dcn20_private_funcs;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
new file mode 100644
index 000000000000..12277797cd71
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN20_INIT_H__
+#define __DC_DCN20_INIT_H__
+
+struct dc;
+
+void dcn20_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index e476f27aa3a9..e4ac73035c84 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -168,10 +168,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = {
void enc2_fec_set_enable(struct link_encoder *enc, bool enable)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DC_LOG_DSC("%s FEC at link encoder inst %d",
enable ? "Enabling" : "Disabling", enc->id.enum_id);
-#endif
REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable);
}
@@ -192,7 +190,6 @@ bool enc2_fec_is_active(struct link_encoder *enc)
return (active != 0);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* this function reads dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
@@ -203,8 +200,8 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s)
REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow);
REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status);
+ REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete);
}
-#endif
static bool update_cfg_data(
struct dcn10_link_encoder *enc10,
@@ -315,9 +312,7 @@ void enc2_hw_init(struct link_encoder *enc)
}
static const struct link_encoder_funcs dcn20_link_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.read_state = link_enc2_read_state,
-#endif
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 0c98a0bbbd14..8cab8107fd94 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -33,7 +33,142 @@
SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id)
#define UNIPHY_MASK_SH_LIST(mask_sh)\
- LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh)
+ LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh)
+
+#define DPCS_MASK_SH_LIST(mask_sh)\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
+
+#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
+ DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
@@ -63,6 +198,49 @@
SRI(CLOCK_ENABLE, SYMCLK, id), \
SRI(CHANNEL_XBAR_CNTL, UNIPHY, id)
+#define DPCS_DCN2_CMN_REG_LIST(id) \
+ SRI(DIG_LANE_ENABLE, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+ SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+ SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+
+#define DPCS_DCN2_REG_LIST(id) \
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define LE_DCN2_REG_LIST(id) \
+ LE_DCN10_REG_LIST(id), \
+ SR(DCIO_SOFT_RESET)
+
struct mpll_cfg {
uint32_t mpllb_ana_v2i;
uint32_t mpllb_ana_freq_vco;
@@ -158,9 +336,7 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready);
bool enc2_fec_is_active(struct link_encoder *enc);
void enc2_hw_init(struct link_encoder *enc);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s);
-#endif
void dcn20_link_encoder_enable_dp_output(
struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 5a188b2bc033..de9c857ab3e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -33,6 +33,9 @@
#define REG(reg)\
mpc20->mpc_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
mpc20->base.ctx
@@ -132,19 +135,33 @@ void mpc2_set_output_csc(
const uint16_t *regval,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct color_matrices_reg ocsc_regs;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
-
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -157,10 +174,13 @@ void mpc2_set_output_csc(
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
+
cm_helper_program_color_matrices(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
void mpc2_set_ocsc_default(
@@ -169,14 +189,16 @@ void mpc2_set_ocsc_default(
enum dc_color_space color_space,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
uint32_t arr_size;
struct color_matrices_reg ocsc_regs;
const uint16_t *regval = NULL;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
regval = find_color_matrix(color_space, &arr_size);
@@ -185,6 +207,19 @@ void mpc2_set_ocsc_default(
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -203,6 +238,8 @@ void mpc2_set_ocsc_default(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
static void mpc2_ogam_get_reg_field(
@@ -345,6 +382,9 @@ static void mpc20_program_ogam_pwl(
uint32_t i;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ PERF_TRACE();
+ REG_SEQ_START();
+
for (i = 0 ; i < num; i++) {
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
@@ -463,6 +503,11 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
ASSERT(!mpc_disabled);
ASSERT(!mpc_idle);
}
+
+ REG_SEQ_SUBMIT();
+ PERF_TRACE();
+ REG_SEQ_WAIT_DONE();
+ PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
index 9f53192da2dc..c78fd5123497 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -80,6 +80,10 @@
SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\
SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst)
+#define MPC_DBG_REG_LIST_DCN2_0() \
+ SR(MPC_OCSC_TEST_DEBUG_DATA),\
+ SR(MPC_OCSC_TEST_DEBUG_INDEX)
+
#define MPC_REG_VARIABLE_LIST_DCN2_0 \
MPC_COMMON_REG_VARIABLE_LIST \
uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \
@@ -118,6 +122,8 @@
uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\
uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\
uint32_t MPCC_OGAM_MODE[MAX_MPCC];\
+ uint32_t MPC_OCSC_TEST_DEBUG_DATA;\
+ uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\
uint32_t CSC_MODE[MAX_OPP]; \
uint32_t CSC_C11_C12_A[MAX_OPP]; \
uint32_t CSC_C33_C34_A[MAX_OPP]; \
@@ -134,6 +140,7 @@
SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+ SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\
SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
@@ -174,6 +181,19 @@
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+/*
+ * DCN2 MPC_OCSC debug status register:
+ *
+ * Status index including current OCSC Mode is 1
+ * OCSC Mode: [1..0]
+ */
+#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1
+
+#define MPC_DEBUG_REG_LIST_SH_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0
+
+#define MPC_DEBUG_REG_LIST_MASK_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3
#define MPC_REG_FIELD_LIST_DCN2_0(type) \
MPC_REG_FIELD_LIST(type)\
@@ -182,6 +202,8 @@
type MPCC_TOP_GAIN;\
type MPCC_BOT_GAIN_INSIDE;\
type MPCC_BOT_GAIN_OUTSIDE;\
+ type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\
+ type MPC_OCSC_TEST_DEBUG_INDEX;\
type MPC_OCSC_MODE;\
type MPC_OCSC_C11_A;\
type MPC_OCSC_C12_A;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 40164ed015ea..023cc71fad0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -41,6 +41,7 @@
void opp2_set_disp_pattern_generator(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
@@ -100,9 +101,22 @@ void opp2_set_disp_pattern_generator(
TEST_PATTERN_DYN_RANGE_CEA :
TEST_PATTERN_DYN_RANGE_VESA);
+ switch (color_space) {
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR601:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR709:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_RGB:
+ default:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+ break;
+ }
+
REG_UPDATE_6(DPG_CONTROL,
DPG_EN, 1,
- DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB,
+ DPG_MODE, mode,
DPG_DYNAMIC_RANGE, dyn_range,
DPG_BIT_DEPTH, bit_depth,
DPG_VRES, 6,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index abd8de9a78f8..4093bec172c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -140,6 +140,7 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20,
void opp2_set_disp_pattern_generator(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 3b613fb93ef8..d875b0c38fde 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
+ REG_SEQ_START();
+
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
+
return true;
}
@@ -167,7 +172,6 @@ void optc2_set_gsl_source_select(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */
void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc,
int x_position,
@@ -201,13 +205,12 @@ void optc2_set_dsc_config(struct timing_generator *optc,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
}
-#endif
-/**
- * PTI i think is already done somewhere else for 2ka
- * (opp?, please double check.
- * OPTC side only has 1 register to set for PTI_ENABLE)
- */
+/*TEMP: Need to figure out inheritance model here.*/
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return optc1_is_two_pixels_per_containter(timing);
+}
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
@@ -221,7 +224,7 @@ void optc2_set_odm_bypass(struct timing_generator *optc,
OPTC_SEG1_SRC_SEL, 0xf);
REG_WRITE(OTG_H_TIMING_CNTL, 0);
- h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing);
+ h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div_2);
REG_SET(OPTC_MEMORY_CONFIG, 0,
@@ -233,12 +236,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */
int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
/ opp_cnt;
- int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf;
+ uint32_t memory_mask;
uint32_t data_fmt = 0;
+ ASSERT(opp_cnt == 2);
+
/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
* REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
* Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
@@ -246,9 +250,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
* MASTER_UPDATE_LOCK_DB_X, 160,
* MASTER_UPDATE_LOCK_DB_Y, 240);
*/
+
+ /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
+ * however, for ODM combine we can simplify by always using 4.
+ * To make sure there's no overlap, each instance "reserves" 2 memories and
+ * they are uniquely combined here.
+ */
+ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
+
if (REG(OPTC_MEMORY_CONFIG))
REG_SET(OPTC_MEMORY_CONFIG, 0,
- OPTC_MEM_SEL, memory_mask << (optc->inst * 4));
+ OPTC_MEM_SEL, memory_mask);
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
data_fmt = 1;
@@ -257,7 +269,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
- ASSERT(opp_cnt == 2);
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
OPTC_SEG0_SRC_SEL, opp_id[0],
@@ -379,14 +390,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
- MANUAL_FLOW_CONTROL, 1);
-
- REG_SET(OTG_GLOBAL_CONTROL2, 0,
- MANUAL_FLOW_CONTROL_SEL, optc->inst);
-
REG_SET_8(OTG_TRIGA_CNTL, 0,
- OTG_TRIGA_SOURCE_SELECT, 22,
+ OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
@@ -448,9 +453,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
.configure_crc = optc1_configure_crc,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.set_dsc_config = optc2_set_dsc_config,
-#endif
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
.set_odm_combine = optc2_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index 32a58431fd09..239cc40ae474 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -86,12 +86,10 @@ void optc2_set_gsl_source_select(struct timing_generator *optc,
int group_idx,
uint32_t gsl_ready_signal);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void optc2_set_dsc_config(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
-#endif
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
@@ -108,6 +106,7 @@ void optc2_triplebuffer_lock(struct timing_generator *optc);
void optc2_triplebuffer_unlock(struct timing_generator *optc);
void optc2_lock_doublebuffer_disable(struct timing_generator *optc);
void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
+void optc2_setup_manual_trigger(struct timing_generator *optc);
void optc2_program_manual_trigger(struct timing_generator *optc);
-
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 23ff2f1c75b5..85f90f3e24cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1,5 +1,6 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -28,6 +29,8 @@
#include "dm_services.h"
#include "dc.h"
+#include "dcn20_init.h"
+
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
@@ -45,9 +48,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn20_opp.h"
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dcn20_dsc.h"
-#endif
#include "dcn20_link_encoder.h"
#include "dcn20_stream_encoder.h"
@@ -59,11 +60,14 @@
#include "dml/display_mode_vba.h"
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
+#include "dc_link_ddc.h"
#include "navi10_ip_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "dpcs/dpcs_2_0_0_offset.h"
+#include "dpcs/dpcs_2_0_0_sh_mask.h"
#include "nbio/nbio_2_3_offset.h"
@@ -82,8 +86,6 @@
#include "amdgpu_socbb.h"
-/* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */
-#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
struct _vcs_dpi_ip_params_st dcn2_0_ip = {
@@ -94,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 6,
-#else
- .num_dsc = 0,
-#endif
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 84,
@@ -553,6 +551,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
[id] = {\
LE_DCN10_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
@@ -566,11 +565,13 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
};
static const struct dcn10_link_enc_shift le_shift = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN2_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN2_MASK_SH_LIST(_MASK)
};
#define ipp_regs(id)\
@@ -637,6 +638,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
@@ -650,12 +652,12 @@ static const struct dcn2_dpp_registers tf_regs[] = {
static const struct dcn2_dpp_shift tf_shift = {
TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
- TF_DEBUG_REG_LIST_SH_DCN10
+ TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
TF_REG_LIST_SH_MASK_DCN20(_MASK),
- TF_DEBUG_REG_LIST_MASK_DCN10
+ TF_DEBUG_REG_LIST_MASK_DCN20
};
#define dwbc_regs_dcn2(id)\
@@ -705,14 +707,17 @@ static const struct dcn20_mpc_registers mpc_regs = {
MPC_OUT_MUX_REG_LIST_DCN2_0(3),
MPC_OUT_MUX_REG_LIST_DCN2_0(4),
MPC_OUT_MUX_REG_LIST_DCN2_0(5),
+ MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define tg_regs(id)\
@@ -838,7 +843,6 @@ static int map_transmitter_id_to_phy_instance(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
@@ -860,7 +864,6 @@ static const struct dcn20_dsc_shift dsc_shift = {
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-#endif
static const struct dccg_registers dccg_regs = {
DCCG_REG_LIST_DCN2()
@@ -884,9 +887,7 @@ static const struct resource_caps res_cap_nv10 = {
.num_dwb = 1,
.num_ddc = 6,
.num_vmid = 16,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 6,
-#endif
};
static const struct dc_plane_cap plane_cap = {
@@ -923,9 +924,7 @@ static const struct resource_caps res_cap_nv14 = {
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 16,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 5,
-#endif
};
static const struct dc_debug_options debug_defaults_drv = {
@@ -1284,7 +1283,6 @@ void dcn20_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
@@ -1307,9 +1305,8 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
*dsc = NULL;
}
-#endif
-static void destruct(struct dcn20_resource_pool *pool)
+static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
{
unsigned int i;
@@ -1320,12 +1317,10 @@ static void destruct(struct dcn20_resource_pool *pool)
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
-#endif
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
@@ -1418,6 +1413,8 @@ static void destruct(struct dcn20_resource_pool *pool)
if (pool->base.pp_smu != NULL)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
+ if (pool->base.oem_device != NULL)
+ dal_ddc_service_destroy(&pool->base.oem_device);
}
struct hubp *dcn20_hubp_create(
@@ -1468,7 +1465,7 @@ static void get_pixel_clock_parameters(
if (opp_cnt == 4)
pixel_clk_params->requested_pix_clk_100hz /= 4;
- else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
+ else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
pixel_clk_params->requested_pix_clk_100hz /= 2;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -1534,7 +1531,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
return status;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
static void acquire_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1575,11 +1571,9 @@ static void release_dsc(struct resource_context *res_ctx,
}
}
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
struct dc_state *dc_ctx,
struct dc_stream_state *dc_stream)
{
@@ -1594,11 +1588,13 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream != dc_stream)
continue;
+ if (pipe_ctx->stream_res.dsc)
+ continue;
+
acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
- dm_output_to_console("No DSCs available\n");
result = DC_NO_DSC_RESOURCE;
}
@@ -1630,7 +1626,6 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
else
return DC_OK;
}
-#endif
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
@@ -1642,11 +1637,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,
if (result == DC_OK)
result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Get a DSC if required and available */
if (result == DC_OK && dc_stream->timing.flags.DSC)
- result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
-#endif
+ result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
@@ -1659,9 +1652,7 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_
{
enum dc_status result = DC_OK;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
-#endif
return result;
}
@@ -1744,9 +1735,7 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
next_odm_pipe->stream_res.dsc = NULL;
-#endif
if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
@@ -1792,14 +1781,12 @@ bool dcn20_split_stream_for_odm(
sd->recout.x = 0;
}
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
return false;
}
-#endif
return true;
}
@@ -1823,9 +1810,7 @@ void dcn20_split_stream_for_mpc(
secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
secondary_pipe->stream_res.dsc = NULL;
-#endif
if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
ASSERT(!secondary_pipe->bottom_pipe);
secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
@@ -1876,11 +1861,28 @@ void dcn20_populate_dml_writeback_from_context(
}
+static int get_num_odm_heads(struct pipe_ctx *pipe)
+{
+ int odm_head_count = 0;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ while (next_pipe) {
+ odm_head_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_head_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+ return odm_head_count ? odm_head_count + 1 : 0;
+}
+
int dcn20_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
+ struct resource_context *res_ctx = &context->res_ctx;
for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1900,25 +1902,30 @@ int dcn20_populate_dml_pipes_from_context(
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
+ unsigned int v_total;
+ unsigned int front_porch;
int output_bpc;
if (!res_ctx->pipe_ctx[i].stream)
continue;
+
+ v_total = timing->v_total;
+ front_porch = timing->v_front_porch;
/* todo:
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
pipes[pipe_cnt].pipe.src.dcc = 0;
pipes[pipe_cnt].pipe.src.vm = 0;*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+
pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
/* todo: rotation?*/
pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
-#endif
if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
/* 1/2 vblank */
pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
- (timing->v_total - timing->v_addressable
+ (v_total - timing->v_addressable
- timing->v_border_top - timing->v_border_bottom) / 2;
/* 36 bytes dp, 32 hdmi */
pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
@@ -1932,13 +1939,13 @@ int dcn20_populate_dml_pipes_from_context(
- timing->h_addressable
- timing->h_border_left
- timing->h_border_right;
- pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
+ pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch;
pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
- timing->v_addressable
- timing->v_border_top
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
- pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal = v_total;
pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
@@ -1949,8 +1956,13 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe
- || res_ctx->pipe_ctx[i].next_odm_pipe;
+ switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
+ case 2:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
+ break;
+ default:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled;
+ }
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
== res_ctx->pipe_ctx[i].plane_state)
@@ -2001,14 +2013,12 @@ int dcn20_populate_dml_pipes_from_context(
case COLOR_DEPTH_161616:
output_bpc = 16;
break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
case COLOR_DEPTH_999:
output_bpc = 9;
break;
case COLOR_DEPTH_111111:
output_bpc = 11;
break;
-#endif
default:
output_bpc = 8;
break;
@@ -2036,10 +2046,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
-#endif
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
@@ -2063,6 +2071,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
pipes[pipe_cnt].pipe.src.viewport_height = 1080;
+ pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height;
+ pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
+ pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
+ pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
@@ -2077,8 +2089,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
pipes[pipe_cnt].pipe.src.is_hsplit = 0;
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
- pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
- pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
+ pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
@@ -2096,6 +2108,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+ pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
+ pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+ pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+ pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
@@ -2261,7 +2277,6 @@ void dcn20_set_mcif_arb_params(
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
{
int i;
@@ -2295,7 +2310,6 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
}
return true;
}
-#endif
struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
@@ -2398,10 +2412,8 @@ void dcn20_merge_pipes_for_validate(
odm_pipe->bottom_pipe = NULL;
odm_pipe->prev_odm_pipe = NULL;
odm_pipe->next_odm_pipe = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (odm_pipe->stream_res.dsc)
release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
-#endif
/* Clear plane_res and stream_res */
memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
@@ -2513,7 +2525,7 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
}
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
@@ -2544,7 +2556,7 @@ bool dcn20_fast_validate_bw(
dcn20_merge_pipes_for_validate(dc, context);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes);
*pipe_cnt_out = pipe_cnt;
@@ -2621,14 +2633,12 @@ bool dcn20_fast_validate_bw(
ASSERT(0);
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
}
-#endif
*vlevel_out = vlevel;
@@ -2692,10 +2702,10 @@ static void dcn20_calculate_wm(
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
+ context, pipes);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
+ context, pipes);
}
*out_pipe_cnt = pipe_cnt;
@@ -2715,11 +2725,9 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2;
@@ -2731,10 +2739,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3;
@@ -2746,10 +2752,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
@@ -2759,10 +2763,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
}
void dcn20_calculate_dlg_params(
@@ -2928,11 +2930,19 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool voltage_supported = false;
bool full_pstate_supported = false;
bool dummy_pstate_supported = false;
- double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+ double p_state_latency_us;
- if (fast_validate)
- return dcn20_validate_bandwidth_internal(dc, context, true);
+ DC_FP_START();
+ p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+ context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
+ dc->debug.disable_dram_clock_change_vactive_support;
+ if (fast_validate) {
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
+
+ DC_FP_END();
+ return voltage_supported;
+ }
// Best case, we support full UCLK switch latency
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
@@ -2940,7 +2950,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
(voltage_supported && full_pstate_supported)) {
- context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
goto restore_dml_state;
}
@@ -2961,6 +2971,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
restore_dml_state:
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+ DC_FP_END();
return voltage_supported;
}
@@ -3005,7 +3016,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
- destruct(dcn20_pool);
+ dcn20_resource_destruct(dcn20_pool);
kfree(dcn20_pool);
*pool = NULL;
}
@@ -3252,7 +3263,6 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
- kernel_fpu_begin();
if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
&& dc->bb_overrides.sr_exit_time_ns) {
bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
@@ -3276,7 +3286,6 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
bb->dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
- kernel_fpu_end();
}
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3318,12 +3327,13 @@ static bool init_soc_bounding_box(struct dc *dc,
DC_LOGGER_INIT(dc->ctx->logger);
- if (!bb && !SOC_BOUNDING_BOX_VALID) {
+ /* TODO: upstream NV12 bounding box when its launched */
+ if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
return false;
}
- if (bb && !SOC_BOUNDING_BOX_VALID) {
+ if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
int i;
dcn2_0_nv12_soc.sr_exit_time_us =
@@ -3465,7 +3475,7 @@ static bool init_soc_bounding_box(struct dc *dc,
return true;
}
-static bool construct(
+static bool dcn20_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn20_resource_pool *pool)
@@ -3473,6 +3483,7 @@ static bool construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ struct ddc_service_init_data ddc_init_data;
struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
struct _vcs_dpi_ip_params_st *loaded_ip =
@@ -3480,6 +3491,8 @@ static bool construct(
enum dml_project dml_project_version =
get_dml_project_version(ctx->asic_id.hw_internal_rev);
+ DC_FP_START();
+
ctx->dc_bios->regs = &bios_regs;
pool->base.funcs = &dcn20_res_pool_funcs;
@@ -3732,7 +3745,6 @@ static bool construct(
goto create_fail;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
@@ -3741,7 +3753,6 @@ static bool construct(
goto create_fail;
}
}
-#endif
if (!dcn20_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
@@ -3768,11 +3779,24 @@ static bool construct(
dc->cap_funcs = cap_funcs;
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ } else {
+ pool->base.oem_device = NULL;
+ }
+
+ DC_FP_END();
return true;
create_fail:
- destruct(pool);
+ DC_FP_END();
+ dcn20_resource_destruct(pool);
return false;
}
@@ -3787,7 +3811,7 @@ struct resource_pool *dcn20_create_resource_pool(
if (!pool)
return NULL;
- if (construct(init_data->num_virtual_links, dc, pool))
+ if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index fef473d68a4a..f5893840b79b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -50,7 +50,7 @@ unsigned int dcn20_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
int dcn20_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -127,9 +127,7 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc_state *context,
int vlevel,
bool *split);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
-#endif
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -159,6 +157,7 @@ void dcn20_calculate_dlg_params(
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index fcb3877b4fcb..9b70a1e7b962 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -205,7 +205,6 @@ static void enc2_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC7_LINE, 0);
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Update GSP7 SDP 128 byte long */
static void enc2_update_gsp7_128_info_packet(
@@ -360,7 +359,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
}
}
-#endif
/* Set Dynamic Metadata-configuration.
* enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME
@@ -440,10 +438,8 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
&& !timing->dsc_cfg.ycbcr422_simple);
-#endif
return two_pix;
}
@@ -541,11 +537,16 @@ void enc2_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting);
+ enc1_stream_encoder_dp_set_stream_attribute(enc,
+ crtc_timing,
+ output_color_space,
+ use_vsc_sdp_for_colorimetry,
+ enable_sdp_splitting);
REG_UPDATE(DP_SEC_FRAMING4,
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
@@ -568,6 +569,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc2_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets =
enc2_stream_encoder_update_dp_info_packets,
+ .send_immediate_sdp_message =
+ enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
@@ -590,11 +593,9 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.dp_get_pixel_format =
enc1_stream_encoder_dp_get_pixel_format,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.enc_read_state = enc2_read_state,
.dp_set_dsc_config = enc2_dp_set_dsc_config,
.dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet,
-#endif
.set_dynamic_metadata = enc2_set_dynamic_metadata,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index 3f94a9f13c4a..d2a805bd4573 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -98,6 +98,7 @@ void enc2_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
void enc2_stream_encoder_dp_unblank(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 5b8c17564bc1..07684d3e375a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -2,9 +2,16 @@
#
# Makefile for DCN21.
-DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o
+DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \
+ dcn21_hwseq.o dcn21_link_encoder.o
+ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -12,6 +19,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -20,6 +28,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
endif
+endif
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 2f5a5867e674..da63fc53cc4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -29,6 +29,10 @@
#include "dm_services.h"
#include "reg_helper.h"
+#include "dc_dmub_srv.h"
+
+#define DC_LOGGER_INIT(logger)
+
#define REG(reg)\
hubp21->hubp_regs->reg
@@ -164,6 +168,158 @@ static void hubp21_setup(
}
+void hubp21_set_viewport(
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
+ PRI_VIEWPORT_WIDTH, viewport->width,
+ PRI_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
+ PRI_VIEWPORT_X_START, viewport->x,
+ PRI_VIEWPORT_Y_START, viewport->y);
+
+ /*for stereo*/
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
+ SEC_VIEWPORT_WIDTH, viewport->width,
+ SEC_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
+ SEC_VIEWPORT_X_START, viewport->x,
+ SEC_VIEWPORT_Y_START, viewport->y);
+
+ /* DC supports NV12 only at the moment */
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
+ PRI_VIEWPORT_WIDTH_C, viewport_c->width,
+ PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
+ PRI_VIEWPORT_X_START_C, viewport_c->x,
+ PRI_VIEWPORT_Y_START_C, viewport_c->y);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
+ SEC_VIEWPORT_WIDTH_C, viewport_c->width,
+ SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
+ SEC_VIEWPORT_X_START_C, viewport_c->x,
+ SEC_VIEWPORT_Y_START_C, viewport_c->y);
+}
+
+static void hubp21_apply_PLAT_54186_wa(
+ struct hubp *hubp,
+ const struct dc_plane_address *address)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct dc_debug_options *debug = &hubp->ctx->dc->debug;
+ unsigned int chroma_bpe = 2;
+ unsigned int luma_addr_high_part = 0;
+ unsigned int row_height = 0;
+ unsigned int chroma_pitch = 0;
+ unsigned int viewport_c_height = 0;
+ unsigned int viewport_c_width = 0;
+ unsigned int patched_viewport_height = 0;
+ unsigned int patched_viewport_width = 0;
+ unsigned int rotation_angle = 0;
+ unsigned int pix_format = 0;
+ unsigned int h_mirror_en = 0;
+ unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */
+
+
+ if (!debug->nv12_iflip_vm_wa)
+ return;
+
+ REG_GET(DCHUBP_REQ_SIZE_CONFIG_C,
+ PTE_ROW_HEIGHT_LINEAR_C, &row_height);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
+ PRI_VIEWPORT_WIDTH_C, &viewport_c_width,
+ PRI_VIEWPORT_HEIGHT_C, &viewport_c_height);
+
+ REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part);
+
+ REG_GET(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, &chroma_pitch);
+
+ chroma_pitch += 1;
+
+ REG_GET_3(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &pix_format,
+ ROTATION_ANGLE, &rotation_angle,
+ H_MIRROR_EN, &h_mirror_en);
+
+ /* reset persistent cached data */
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ /* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along
+ * the vertical direction*/
+ if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ address->video_progressive.luma_addr.high_part == 0xf4)
+ return;
+
+ if ((rotation_angle == 0 || rotation_angle == 180)
+ && viewport_c_height <= 512)
+ return;
+
+ if ((rotation_angle == 90 || rotation_angle == 270)
+ && viewport_c_width <= 512)
+ return;
+
+ switch (rotation_angle) {
+ case 0: /* 0 degree rotation */
+ row_height = 128;
+ patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1;
+ patched_viewport_width = viewport_c_width;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ break;
+ case 2: /* 180 degree rotation */
+ row_height = 128;
+ patched_viewport_height = viewport_c_height + row_height;
+ patched_viewport_width = viewport_c_width;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe;
+ break;
+ case 1: /* 90 degree rotation */
+ row_height = 256;
+ if (h_mirror_en) {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ } else {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
+ }
+ break;
+ case 3: /* 270 degree rotation */
+ row_height = 256;
+ if (h_mirror_en) {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
+ } else {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ }
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ /* catch cases where viewport keep growing */
+ ASSERT(patched_viewport_height && patched_viewport_height < 5000);
+ ASSERT(patched_viewport_width && patched_viewport_width < 5000);
+
+ REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
+ PRI_VIEWPORT_WIDTH_C, patched_viewport_width,
+ PRI_VIEWPORT_HEIGHT_C, patched_viewport_height);
+}
+
void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
@@ -191,6 +347,562 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
SYSTEM_ACCESS_MODE, 0x3);
}
+void hubp21_validate_dml_output(struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+ struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+ struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+ DC_LOGGER_INIT(ctx->logger);
+ DC_LOG_DEBUG("DML Validation | Running Validation");
+
+ /* Requester - Per hubp */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+ VM_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+ REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+ if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+ DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+ if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+ if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+ if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+ if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+ if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+ if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+ if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+ if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+ if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+ if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:VM_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+ if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+ if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+ if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+ if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+ if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+ if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+ if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+ if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+ if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+ if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+ if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+ if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
+ dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+ if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+ DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+ if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+ if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+ if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+ DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
+ dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+ if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+ if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+ if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+ if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+ if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+ if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+ if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+ if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+ if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+ if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+ if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+ REG_GET_3(DCN_CUR0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+ REG_GET(FLIP_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+ REG_GET(DCN_CUR0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ REG_GET(DCN_CUR1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+ if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+ if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+ if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+ if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+ if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+ if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+ if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+ if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+ if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ /* Host VM deadline regs */
+ REG_GET(VBLANK_PARAMETERS_5,
+ REFCYC_PER_VM_GROUP_VBLANK, &dlg_attr.refcyc_per_vm_group_vblank);
+ REG_GET(VBLANK_PARAMETERS_6,
+ REFCYC_PER_VM_REQ_VBLANK, &dlg_attr.refcyc_per_vm_req_vblank);
+ REG_GET(FLIP_PARAMETERS_3,
+ REFCYC_PER_VM_GROUP_FLIP, &dlg_attr.refcyc_per_vm_group_flip);
+ REG_GET(FLIP_PARAMETERS_4,
+ REFCYC_PER_VM_REQ_FLIP, &dlg_attr.refcyc_per_vm_req_flip);
+ REG_GET(FLIP_PARAMETERS_5,
+ REFCYC_PER_PTE_GROUP_FLIP_C, &dlg_attr.refcyc_per_pte_group_flip_c);
+ REG_GET(FLIP_PARAMETERS_6,
+ REFCYC_PER_META_CHUNK_FLIP_C, &dlg_attr.refcyc_per_meta_chunk_flip_c);
+ REG_GET(FLIP_PARAMETERS_2,
+ REFCYC_PER_META_CHUNK_FLIP_L, &dlg_attr.refcyc_per_meta_chunk_flip_l);
+
+ if (dlg_attr.refcyc_per_vm_group_vblank != dml_dlg_attr->refcyc_per_vm_group_vblank)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_5:REFCYC_PER_VM_GROUP_VBLANK - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_vm_group_vblank, dlg_attr.refcyc_per_vm_group_vblank);
+ if (dlg_attr.refcyc_per_vm_req_vblank != dml_dlg_attr->refcyc_per_vm_req_vblank)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_6:REFCYC_PER_VM_REQ_VBLANK - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_vm_req_vblank, dlg_attr.refcyc_per_vm_req_vblank);
+ if (dlg_attr.refcyc_per_vm_group_flip != dml_dlg_attr->refcyc_per_vm_group_flip)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_3:REFCYC_PER_VM_GROUP_FLIP - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_vm_group_flip, dlg_attr.refcyc_per_vm_group_flip);
+ if (dlg_attr.refcyc_per_vm_req_flip != dml_dlg_attr->refcyc_per_vm_req_flip)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_4:REFCYC_PER_VM_REQ_FLIP - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_vm_req_flip, dlg_attr.refcyc_per_vm_req_flip);
+ if (dlg_attr.refcyc_per_pte_group_flip_c != dml_dlg_attr->refcyc_per_pte_group_flip_c)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_5:REFCYC_PER_PTE_GROUP_FLIP_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_c, dlg_attr.refcyc_per_pte_group_flip_c);
+ if (dlg_attr.refcyc_per_meta_chunk_flip_c != dml_dlg_attr->refcyc_per_meta_chunk_flip_c)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_6:REFCYC_PER_META_CHUNK_FLIP_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_flip_c, dlg_attr.refcyc_per_meta_chunk_flip_c);
+ if (dlg_attr.refcyc_per_meta_chunk_flip_l != dml_dlg_attr->refcyc_per_meta_chunk_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_2:REFCYC_PER_META_CHUNK_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l);
+}
+
+static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+ REG_UPDATE_3(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_TYPE, flip_regs->immediate,
+ SURFACE_FLIP_MODE_FOR_STEREOSYNC, flip_regs->grph_stereo,
+ SURFACE_FLIP_IN_STEREOSYNC, flip_regs->grph_stereo);
+
+ REG_UPDATE(VMID_SETTINGS_0,
+ VMID, flip_regs->vmid);
+
+ REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, flip_regs->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ SECONDARY_SURFACE_TMZ, flip_regs->tmz_surface,
+ SECONDARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS);
+
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS);
+}
+
+void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+{
+ struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
+
+ PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+ PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+ PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+ PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+ PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_cmd_execute(dmcub);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_wait_idle(dmcub);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+}
+
+bool hubp21_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dc_debug_options *debug = &hubp->ctx->dc->debug;
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct surface_flip_registers flip_regs = { 0 };
+
+ flip_regs.vmid = address->vmid;
+
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ if (address->grph.addr.quad_part == 0) {
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->grph.meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph.meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->grph.addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->grph.addr.high_part;
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->video_progressive.luma_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->video_progressive.luma_meta_addr.high_part;
+
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C =
+ address->video_progressive.chroma_meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->video_progressive.luma_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->video_progressive.luma_addr.high_part;
+
+ if (debug->nv12_iflip_vm_wa) {
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset;
+ } else
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_addr.low_part;
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+ address->video_progressive.chroma_addr.high_part;
+
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ flip_regs.grph_stereo = true;
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS =
+ address->grph_stereo.right_meta_addr.low_part;
+ flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.right_meta_addr.high_part;
+ }
+
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->grph_stereo.left_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.left_meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->grph_stereo.left_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.left_addr.high_part;
+
+ flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS =
+ address->grph_stereo.right_addr.low_part;
+ flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.right_addr.high_part;
+
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ flip_regs.tmz_surface = address->tmz_surface;
+ flip_regs.immediate = flip_immediate;
+
+ if (hubp->ctx->dc->debug.enable_dmcub_surface_flip && address->type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ dmcub_PLAT_54186_wa(hubp, &flip_regs);
+ else
+ program_surface_flip_and_addr(hubp, &flip_regs);
+
+ hubp->request_address = *address;
+
+ return true;
+}
+
void hubp21_init(struct hubp *hubp)
{
// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
@@ -203,7 +915,7 @@ void hubp21_init(struct hubp *hubp)
static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
- .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
+ .hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp21_setup,
@@ -211,7 +923,8 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
- .mem_program_viewport = min_set_viewport,
+ .mem_program_viewport = hubp21_set_viewport,
+ .apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
.hubp_clk_cntl = hubp1_clk_cntl,
@@ -223,6 +936,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp21_init,
+ .validate_dml_output = hubp21_validate_dml_output,
};
bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
index aeda719a2a13..9873b6cbc5ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
@@ -108,6 +108,7 @@ struct dcn21_hubp {
const struct dcn_hubp2_registers *hubp_regs;
const struct dcn_hubp2_shift *hubp_shift;
const struct dcn_hubp2_mask *hubp_mask;
+ int PLAT_54186_wa_chroma_addr_offset;
};
bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index b25215cadf85..081ad8e43d58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -28,7 +28,7 @@
#include "core_types.h"
#include "resource.h"
#include "dce/dce_hwseq.h"
-#include "dcn20/dcn20_hwseq.h"
+#include "dcn21_hwseq.h"
#include "vmid.h"
#include "reg_helper.h"
#include "hw/clk_mgr.h"
@@ -61,7 +61,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c
}
-static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
{
struct dcn_hubbub_phys_addr_config config;
@@ -82,7 +82,7 @@ static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_ph
// work around for Renoir s0i3, if register is programmed, bypass golden init.
-static bool dcn21_s0i3_golden_init_wa(struct dc *dc)
+bool dcn21_s0i3_golden_init_wa(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
uint32_t value = 0;
@@ -112,11 +112,3 @@ void dcn21_optimize_pwr_state(
true);
}
-void dcn21_hw_sequencer_construct(struct dc *dc)
-{
- dcn20_hw_sequencer_construct(dc);
- dc->hwss.init_sys_ctx = dcn21_init_sys_ctx;
- dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa;
- dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state;
- dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index be67b62e6fb1..182736096123 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -26,8 +26,22 @@
#ifndef __DC_HWSS_DCN21_H__
#define __DC_HWSS_DCN21_H__
+#include "hw_sequencer_private.h"
+
struct dc;
-void dcn21_hw_sequencer_construct(struct dc *dc);
+int dcn21_init_sys_ctx(struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_phy_addr_space_config *pa_config);
+
+bool dcn21_s0i3_golden_init_wa(struct dc *dc);
+
+void dcn21_exit_optimized_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context);
+
+void dcn21_optimize_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context);
#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
new file mode 100644
index 000000000000..4861aa5c59ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn21_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn21_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn20_enable_writeback,
+ .disable_writeback = dcn20_disable_writeback,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn20_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn21_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+};
+
+static const struct hwseq_private_funcs dcn21_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn21_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn21_funcs;
+ dc->hwseq->funcs = dcn21_private_funcs;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
new file mode 100644
index 000000000000..3ed24292648a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN21_INIT_H__
+#define __DC_DCN21_INIT_H__
+
+struct dc;
+
+void dcn21_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index e8a504ca5890..e45683ac871a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -323,9 +323,7 @@ void dcn21_link_encoder_disable_output(
static const struct link_encoder_funcs dcn21_link_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.read_state = link_enc2_read_state,
-#endif
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
index 1d7a1a51f13d..033d5d76f195 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
@@ -33,6 +33,45 @@ struct dcn21_link_encoder {
struct dpcssys_phy_seq_cfg phy_seq_cfg;
};
+#define DPCS_DCN21_MASK_SH_LIST(mask_sh)\
+ DPCS_DCN2_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_MPLLB_CP_PROP_GS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_RX_VREF_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_CP_INT_GS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCS_DMCU_DPALT_DIS_BLOCK_REG, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_SUP_PRE_HP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX0_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX1_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX2_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX3_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
+
+#define DPCS_DCN21_REG_LIST(id) \
+ DPCS_DCN2_REG_LIST(id),\
+ SRI(RDPCSTX_PHY_CNTL15, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index b29b2c99a564..1d741bca2211 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1,5 +1,6 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -28,6 +29,8 @@
#include "dm_services.h"
#include "dc.h"
+#include "dcn21_init.h"
+
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
@@ -60,6 +63,8 @@
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
+#include "dpcs/dpcs_2_1_0_offset.h"
+#include "dpcs/dpcs_2_1_0_sh_mask.h"
#include "renoir_ip_offset.h"
#include "dcn/dcn_2_1_0_offset.h"
@@ -78,6 +83,7 @@
#include "dcn21_resource.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
+#include "../dce/dmub_psr.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -90,11 +96,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 2,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 3,
-#else
- .num_dsc = 0,
-#endif
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 44,
@@ -352,7 +354,7 @@ static const struct bios_registers bios_regs = {
};
static const struct dce_dmcu_registers dmcu_regs = {
- DMCU_DCN10_REG_LIST()
+ DMCU_DCN20_REG_LIST()
};
static const struct dce_dmcu_shift dmcu_shift = {
@@ -375,20 +377,6 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN20(_MASK)
};
-#ifdef CONFIG_DRM_AMD_DC_DMUB
-static const struct dcn21_dmcub_registers dmcub_regs = {
- DMCUB_REG_LIST_DCN()
-};
-
-static const struct dcn21_dmcub_shift dmcub_shift = {
- DMCUB_COMMON_MASK_SH_LIST_BASE(__SHIFT)
-};
-
-static const struct dcn21_dmcub_mask dmcub_mask = {
- DMCUB_COMMON_MASK_SH_LIST_BASE(_MASK)
-};
-#endif
-
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -478,15 +466,18 @@ static const struct dcn20_mpc_registers mpc_regs = {
MPC_OUT_MUX_REG_LIST_DCN2_0(0),
MPC_OUT_MUX_REG_LIST_DCN2_0(1),
MPC_OUT_MUX_REG_LIST_DCN2_0(2),
- MPC_OUT_MUX_REG_LIST_DCN2_0(3)
+ MPC_OUT_MUX_REG_LIST_DCN2_0(3),
+ MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define hubp_regs(id)\
@@ -554,7 +545,6 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define dsc_regsDCN20(id)\
[id] = {\
DSC_REG_LIST_DCN20(id)\
@@ -576,7 +566,6 @@ static const struct dcn20_dsc_shift dsc_shift = {
static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-#endif
#define ipp_regs(id)\
[id] = {\
@@ -623,6 +612,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
@@ -633,11 +623,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
};
static const struct dcn2_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
- TF_REG_LIST_SH_MASK_DCN20(_MASK)
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN20
};
#define stream_enc_regs(id)\
@@ -672,7 +664,7 @@ static const struct dcn10_stream_encoder_mask se_mask = {
static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
static int dcn21_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
@@ -773,9 +765,7 @@ static const struct resource_caps res_cap_rn = {
.num_dwb = 1,
.num_ddc = 5,
.num_vmid = 1,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 3,
-#endif
};
#ifdef DIAGS_BUILD
@@ -800,9 +790,7 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
.num_pll = 4,
.num_dwb = 1,
.num_ddc = 4,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 2,
-#endif
};
#endif
@@ -847,6 +835,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = true,
.disable_48mhz_pwrdwn = false,
+ .nv12_iflip_vm_wa = true,
+ .usbc_combo_phy_reset_wa = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -869,7 +859,7 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_TOTAL_DCN21
};
-static void destruct(struct dcn21_resource_pool *pool)
+static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
{
unsigned int i;
@@ -880,12 +870,10 @@ static void destruct(struct dcn21_resource_pool *pool)
}
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
}
-#endif
if (pool->base.mpc != NULL) {
kfree(TO_DCN20_MPC(pool->base.mpc));
@@ -972,11 +960,6 @@ static void destruct(struct dcn21_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
-#ifdef CONFIG_DRM_AMD_DC_DMUB
- if (pool->base.dmcub != NULL)
- dcn21_dmcub_destroy(&pool->base.dmcub);
-#endif
-
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
@@ -1010,11 +993,9 @@ static void calculate_wm_set_for_vlevel(
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
-#endif
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
}
@@ -1023,7 +1004,8 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
{
int i;
- kernel_fpu_begin();
+ DC_FP_START();
+
if (dc->bb_overrides.sr_exit_time_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
@@ -1049,7 +1031,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
}
}
- kernel_fpu_end();
+ DC_FP_END();
}
void dcn21_calculate_wm(
@@ -1099,10 +1081,10 @@ void dcn21_calculate_wm(
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
+ context, pipes);
else
pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
+ context, pipes);
}
*out_pipe_cnt = pipe_cnt;
@@ -1192,7 +1174,7 @@ static void dcn21_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
- destruct(dcn21_pool);
+ dcn21_resource_destruct(dcn21_pool);
kfree(dcn21_pool);
*pool = NULL;
}
@@ -1331,7 +1313,6 @@ static void read_dce_straps(
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dcn21_dsc_create(
struct dc_context *ctx, uint32_t inst)
@@ -1347,16 +1328,9 @@ struct display_stream_compressor *dcn21_dsc_create(
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
return &dsc->base;
}
-#endif
static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- /*
- TODO: Fix this function to calcualte correct values.
- There are known issues with this function currently
- that will need to be investigated. Use hardcoded known good values for now.
-
-
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
int i;
@@ -1371,11 +1345,14 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
+ dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
}
- dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
+ dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
dcn2_1_soc.num_states = i;
- */
+
+ // diags does not retrieve proper values from SMU, do not update DML instance for diags
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
/* Temporary Place holder until we can get them from fuse */
@@ -1529,8 +1506,9 @@ static const struct encoder_feature_support link_enc_feature = {
#define link_regs(id, phyid)\
[id] = {\
- LE_DCN10_REG_LIST(id), \
+ LE_DCN2_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN21_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
@@ -1569,11 +1547,13 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
};
static const struct dcn10_link_enc_shift le_shift = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN21_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN21_MASK_SH_LIST(_MASK)
};
static int map_transmitter_id_to_phy_instance(
@@ -1639,10 +1619,11 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
}
static int dcn21_populate_dml_pipes_from_context(
- struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+ struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
- uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes);
+ uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);
int i;
+ struct resource_context *res_ctx = &context->res_ctx;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1671,7 +1652,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
.update_bw_bounding_box = update_bw_bounding_box
};
-static bool construct(
+static bool dcn21_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dcn21_resource_pool *pool)
@@ -1711,6 +1692,7 @@ static bool construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1760,7 +1742,7 @@ static bool construct(
goto create_fail;
}
- pool->base.dmcu = dcn20_dmcu_create(ctx,
+ pool->base.dmcu = dcn21_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
&dmcu_mask);
@@ -1770,6 +1752,10 @@ static bool construct(
goto create_fail;
}
+ // Leave as NULL to not affect current dmcu psr programming sequence
+ // Will be uncommented when functionality is confirmed to be working
+ pool->base.psr = NULL;
+
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
@@ -1780,18 +1766,6 @@ static bool construct(
goto create_fail;
}
-#ifdef CONFIG_DRM_AMD_DC_DMUB
- pool->base.dmcub = dcn21_dmcub_create(ctx,
- &dmcub_regs,
- &dmcub_shift,
- &dmcub_mask);
- if (pool->base.dmcub == NULL) {
- dm_error("DC: failed to create dmcub!\n");
- BREAK_TO_DEBUGGER();
- goto create_fail;
- }
-#endif
-
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
num_pipes = dcn2_1_ip.max_num_dpp;
@@ -1818,41 +1792,41 @@ static bool construct(
if ((pipe_fuses & (1 << i)) != 0)
continue;
- pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
- if (pool->base.hubps[i] == NULL) {
+ pool->base.hubps[j] = dcn21_hubp_create(ctx, i);
+ if (pool->base.hubps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create memory input!\n");
goto create_fail;
}
- pool->base.ipps[i] = dcn21_ipp_create(ctx, i);
- if (pool->base.ipps[i] == NULL) {
+ pool->base.ipps[j] = dcn21_ipp_create(ctx, i);
+ if (pool->base.ipps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create input pixel processor!\n");
goto create_fail;
}
- pool->base.dpps[i] = dcn21_dpp_create(ctx, i);
- if (pool->base.dpps[i] == NULL) {
+ pool->base.dpps[j] = dcn21_dpp_create(ctx, i);
+ if (pool->base.dpps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
- pool->base.opps[i] = dcn21_opp_create(ctx, i);
- if (pool->base.opps[i] == NULL) {
+ pool->base.opps[j] = dcn21_opp_create(ctx, i);
+ if (pool->base.opps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
- pool->base.timing_generators[i] = dcn21_timing_generator_create(
+ pool->base.timing_generators[j] = dcn21_timing_generator_create(
ctx, i);
- if (pool->base.timing_generators[i] == NULL) {
+ if (pool->base.timing_generators[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto create_fail;
@@ -1896,7 +1870,6 @@ static bool construct(
goto create_fail;
}
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
pool->base.dscs[i] = dcn21_dsc_create(ctx, i);
if (pool->base.dscs[i] == NULL) {
@@ -1905,7 +1878,6 @@ static bool construct(
goto create_fail;
}
}
-#endif
if (!dcn20_dwbc_create(ctx, &pool->base)) {
BREAK_TO_DEBUGGER();
@@ -1936,7 +1908,7 @@ static bool construct(
create_fail:
- destruct(pool);
+ dcn21_resource_destruct(pool);
return false;
}
@@ -1951,7 +1923,7 @@ struct resource_pool *dcn21_create_resource_pool(
if (!pool)
return NULL;
- if (construct(init_data->num_virtual_links, dc, pool))
+ if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 94b75e942607..8bde1d688f2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -118,13 +118,11 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
bool enable
);
-#endif
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index ef7df9ef6d7e..ae608c329366 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -41,12 +41,8 @@ enum pp_smu_ver {
*/
PP_SMU_UNSUPPORTED,
PP_SMU_VER_RV,
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
PP_SMU_VER_NV,
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
PP_SMU_VER_RN,
-#endif
PP_SMU_VER_MAX
};
@@ -143,7 +139,6 @@ struct pp_smu_funcs_rv {
void (*set_pme_wa_enable)(struct pp_smu *pp);
};
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
/* Used by pp_smu_funcs_nv.set_voltage_by_freq
*
*/
@@ -247,7 +242,6 @@ struct pp_smu_funcs_nv {
enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp,
BOOLEAN pstate_handshake_supported);
};
-#endif
#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8
@@ -291,12 +285,8 @@ struct pp_smu_funcs {
struct pp_smu ctx;
union {
struct pp_smu_funcs_rv rv_funcs;
-#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0
struct pp_smu_funcs_nv nv_funcs;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
struct pp_smu_funcs_rn rn_funcs;
-#endif
};
};
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 1a0429744630..968ff1fef486 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -40,6 +40,9 @@
#undef DEPRECATED
+struct dmub_srv;
+struct dc_dmub_srv;
+
irq_handler_idx dm_register_interrupt(
struct dc_context *ctx,
struct dc_interrupt_params *int_params,
@@ -139,6 +142,13 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub);
+void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv);
+
+void reg_sequence_start_gather(const struct dc_context *ctx);
+void reg_sequence_start_execute(const struct dc_context *ctx);
+void reg_sequence_wait_done(const struct dc_context *ctx);
+
#define FD(reg_field) reg_field ## __SHIFT, \
reg_field ## _MASK
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index a3d1be20dd9d..b52ba6ffabe1 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -220,6 +220,7 @@ struct dm_bl_data_point {
};
/* Total size of the structure should not exceed 256 bytes */
+#define BL_DATA_POINTS 99
struct dm_acpi_atif_backlight_caps {
uint16_t size; /* Bytes 0-1 (2 bytes) */
uint16_t flags; /* Byted 2-3 (2 bytes) */
@@ -229,7 +230,7 @@ struct dm_acpi_atif_backlight_caps {
uint8_t min_input_signal; /* Byte 7 */
uint8_t max_input_signal; /* Byte 8 */
uint8_t num_data_points; /* Byte 9 */
- struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
+ struct dm_bl_data_point data_points[BL_DATA_POINTS]; /* Bytes 10-207 (198 bytes)*/
};
enum dm_acpi_display_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 8df251626e22..7ee8b8460a9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -1,5 +1,6 @@
#
# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2019 Raptor Engineering, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -24,7 +25,13 @@
# It provides the general basic services required by other DAL
# subcomponents.
+ifdef CONFIG_X86
dml_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+dml_ccflags := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -32,6 +39,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -40,17 +48,16 @@ dml_ccflags += -mpreferred-stack-boundary=4
else
dml_ccflags += -msse2
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
endif
@@ -61,11 +68,9 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
dml_common_defs.o
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ifdef CONFIG_DRM_AMD_DC_DCN
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 6c6c486b774a..e7a8ac7a1f22 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -937,7 +937,7 @@ static unsigned int CalculateVMAndRowBytes(
*MetaRowByte = 0;
}
- if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1335,11 +1335,11 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
else
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
- if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true)
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
@@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+ if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
@@ -2847,12 +2848,12 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
- if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
@@ -3347,7 +3348,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_lvp)
+ == dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
@@ -3445,10 +3446,10 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->FabricAndDRAMBandwidthPerState[i] * 1000)
* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
- locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState;
+ locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3459,7 +3460,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3471,7 +3472,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3482,7 +3483,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3520,12 +3521,12 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
- + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
- if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+ + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
- locals->ROBSupport[i] = true;
+ locals->ROBSupport[i][0] = true;
} else {
- locals->ROBSupport[i] = false;
+ locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
@@ -3902,7 +3903,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
- && locals->ODMCombineEnablePerState[i][k] == false) {
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -3991,16 +3992,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->ViewportSizeSupport[i] = true;
+ locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
}
}
@@ -4182,8 +4183,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k]
- == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4206,7 +4206,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
@@ -4248,7 +4248,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
- if (locals->ODMCombineEnablePerState[i][k] == false) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4291,7 +4291,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true)
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
else
locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
@@ -4344,28 +4344,28 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min(
locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
- locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesChroma),
locals->SwathHeightCPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]);
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]),
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
- dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]));
+ dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
}
}
}
@@ -4405,14 +4405,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
- mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.PixelClock[k] / 16.0);
if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4422,9 +4422,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4435,9 +4435,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
} else {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4447,9 +4447,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4459,9 +4459,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4472,9 +4472,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4510,7 +4510,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
- mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
@@ -4549,7 +4549,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
- mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2.0,
mode_lib->vba.VTAPsChroma[k],
@@ -4563,14 +4563,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
- locals->PrefetchLinesC[k] = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
- locals->PDEAndMetaPTEBytesPerFrame[k] =
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
- locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
- locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
@@ -4597,14 +4597,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
* mode_lib->vba.MetaChunkSize)
* 1024.0
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
if (mode_lib->vba.GPUVMEnable == true) {
mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PTEGroupSize
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
}
- mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -4654,7 +4654,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
}
@@ -4699,7 +4699,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.RequiredDPPCLK[i][j][k],
mode_lib->vba.RequiredDISPCLK[i][j],
mode_lib->vba.PixelClock[k],
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.DSCDelayPerState[i][k],
mode_lib->vba.NoOfDPP[i][j][k],
mode_lib->vba.ScalerEnabled[k],
@@ -4717,7 +4717,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
- mode_lib->vba.MaximumVStartup[k],
+ mode_lib->vba.MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
@@ -4727,15 +4727,15 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
- mode_lib->vba.PrefetchLinesY[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
+ mode_lib->vba.PrefetchLinesY[0][0][k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.PrefillY[k],
mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[k],
+ mode_lib->vba.PrefetchLinesC[0][0][k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.PrefillC[k],
mode_lib->vba.MaxNumSwC[k],
@@ -4766,19 +4766,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->prefetch_vm_bw_valid = true;
locals->prefetch_row_bw_valid = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0)
+ if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
locals->prefetch_vm_bw[k] = 0;
else if (locals->LinesForMetaPTE[k] > 0)
- locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k]
+ locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_vm_bw[k] = 0;
locals->prefetch_vm_bw_valid = false;
}
- if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0)
+ if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
locals->prefetch_row_bw[k] = 0;
else if (locals->LinesForMetaAndDPTERow[k] > 0)
- locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k])
+ locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_row_bw[k] = 0;
@@ -4797,13 +4797,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
}
- locals->BandwidthWithoutPrefetchSupported[i] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) {
- locals->BandwidthWithoutPrefetchSupported[i] = false;
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) {
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
locals->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4828,7 +4828,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
- mode_lib->vba.ReturnBWPerState[i];
+ mode_lib->vba.ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
@@ -4842,9 +4842,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.ImmediateFlipBytes[k] =
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
- + mode_lib->vba.MetaRowBytes[k]
- + mode_lib->vba.DPTEBytesPerRow[k];
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+ + mode_lib->vba.MetaRowBytes[0][0][k]
+ + mode_lib->vba.DPTEBytesPerRow[0][0][k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0.0;
@@ -4872,9 +4872,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
@@ -4899,7 +4899,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
- > mode_lib->vba.ReturnBWPerState[i]) {
+ > mode_lib->vba.ReturnBWPerState[i][0]) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4918,13 +4918,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++)
mode_lib->vba.MaxTotalVActiveRDBandwidth = mode_lib->vba.MaxTotalVActiveRDBandwidth + mode_lib->vba.ReadBandwidth[k];
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth *
+ mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
- if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i])
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true;
+ if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
else
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false;
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
}
/*PTE Buffer Size Check*/
@@ -5012,7 +5012,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i] != true) {
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
@@ -5022,7 +5022,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_DSC_CLK_REQUIRED;
} else if (locals->UrgentLatencySupport[i][j] != true) {
status = DML_FAIL_URGENT_LATENCY;
- } else if (locals->ROBSupport[i] != true) {
+ } else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5042,7 +5042,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_PITCH_SUPPORT;
} else if (locals->PrefetchSupported[i][j] != true) {
status = DML_FAIL_PREFETCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
status = DML_FAIL_V_RATIO_PREFETCH;
@@ -5088,7 +5088,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 3c70dd577292..22f3b5a4b3b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -997,7 +997,7 @@ static unsigned int CalculateVMAndRowBytes(
*MetaRowByte = 0;
}
- if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1395,11 +1395,11 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
else
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
- if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true)
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
@@ -2611,9 +2611,13 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+ if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
+ } else if (mode_lib->vba.DummyPStateCheck &&
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
@@ -2881,12 +2885,12 @@ static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
- if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
@@ -3381,7 +3385,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_lvp)
+ == dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
@@ -3479,10 +3483,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->FabricAndDRAMBandwidthPerState[i] * 1000)
* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
- locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState;
+ locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3493,7 +3497,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3505,7 +3509,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3516,7 +3520,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3554,12 +3558,12 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
- + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
- if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+ + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
- locals->ROBSupport[i] = true;
+ locals->ROBSupport[i][0] = true;
} else {
- locals->ROBSupport[i] = false;
+ locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
@@ -3942,7 +3946,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
}
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
- && locals->ODMCombineEnablePerState[i][k] == false) {
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -4031,16 +4035,16 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->ViewportSizeSupport[i] = true;
+ locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
}
}
@@ -4222,8 +4226,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k]
- == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4246,7 +4249,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
@@ -4288,7 +4291,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
- if (locals->ODMCombineEnablePerState[i][k] == false) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4331,7 +4334,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true)
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
else
locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
@@ -4384,28 +4387,28 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min(
locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
- locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesChroma),
locals->SwathHeightCPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]);
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]),
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
- dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]));
+ dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
}
}
}
@@ -4450,14 +4453,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
- mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.PixelClock[k] / 16.0);
if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4467,9 +4470,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4480,9 +4483,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
}
} else {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4492,9 +4495,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4504,9 +4507,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4517,9 +4520,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4555,7 +4558,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
- mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
@@ -4594,7 +4597,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
- mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2.0,
mode_lib->vba.VTAPsChroma[k],
@@ -4608,14 +4611,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
- locals->PrefetchLinesC[k] = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
- locals->PDEAndMetaPTEBytesPerFrame[k] =
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
- locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
- locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
@@ -4642,14 +4645,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
* mode_lib->vba.MetaChunkSize)
* 1024.0
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
if (mode_lib->vba.GPUVMEnable == true) {
mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PTEGroupSize
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
}
- mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -4699,7 +4702,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
}
@@ -4739,7 +4742,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
}
- CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,
+ CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,
mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k],
mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal,
@@ -4753,14 +4756,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.RequiredDPPCLK[i][j][k],
mode_lib->vba.RequiredDISPCLK[i][j],
mode_lib->vba.PixelClock[k],
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.NoOfDPP[i][j][k],
mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
- mode_lib->vba.MaximumVStartup[k],
+ mode_lib->vba.MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
@@ -4770,15 +4773,15 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
- mode_lib->vba.PrefetchLinesY[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
+ mode_lib->vba.PrefetchLinesY[0][0][k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.PrefillY[k],
mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[k],
+ mode_lib->vba.PrefetchLinesC[0][0][k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.PrefillC[k],
mode_lib->vba.MaxNumSwC[k],
@@ -4808,19 +4811,19 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->prefetch_vm_bw_valid = true;
locals->prefetch_row_bw_valid = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0)
+ if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
locals->prefetch_vm_bw[k] = 0;
else if (locals->LinesForMetaPTE[k] > 0)
- locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k]
+ locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_vm_bw[k] = 0;
locals->prefetch_vm_bw_valid = false;
}
- if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0)
+ if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
locals->prefetch_row_bw[k] = 0;
else if (locals->LinesForMetaAndDPTERow[k] > 0)
- locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k])
+ locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_row_bw[k] = 0;
@@ -4839,13 +4842,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
}
- locals->BandwidthWithoutPrefetchSupported[i] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) {
- locals->BandwidthWithoutPrefetchSupported[i] = false;
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) {
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
locals->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4870,7 +4873,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
if (mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
- mode_lib->vba.ReturnBWPerState[i];
+ mode_lib->vba.ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
@@ -4884,9 +4887,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.ImmediateFlipBytes[k] =
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
- + mode_lib->vba.MetaRowBytes[k]
- + mode_lib->vba.DPTEBytesPerRow[k];
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+ + mode_lib->vba.MetaRowBytes[0][0][k]
+ + mode_lib->vba.DPTEBytesPerRow[0][0][k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0.0;
@@ -4914,9 +4917,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
@@ -4941,7 +4944,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
}
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
- > mode_lib->vba.ReturnBWPerState[i]) {
+ > mode_lib->vba.ReturnBWPerState[i][0]) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4957,13 +4960,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
/*Vertical Active BW support*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth *
+ mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
- if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i])
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true;
+ if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
else
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false;
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
}
/*PTE Buffer Size Check*/
@@ -5051,7 +5054,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i] != true) {
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
@@ -5061,7 +5064,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
status = DML_FAIL_DSC_CLK_REQUIRED;
} else if (locals->UrgentLatencySupport[i][j] != true) {
status = DML_FAIL_URGENT_LATENCY;
- } else if (locals->ROBSupport[i] != true) {
+ } else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5081,7 +5084,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
status = DML_FAIL_PITCH_SUPPORT;
} else if (locals->PrefetchSupported[i][j] != true) {
status = DML_FAIL_PREFETCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
status = DML_FAIL_V_RATIO_PREFETCH;
@@ -5127,7 +5130,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 2c7455e22a65..ca807846032f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
static bool is_dual_plane(enum source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
@@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
- req128_l = 0;
+ req128_l = false;
else
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
@@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
const display_pipe_source_params_st pipe_src_param,
bool is_chroma)
{
- bool mode_422 = 0;
+ bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
@@ -929,8 +929,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
- disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
- + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) dlg_vblank_start * dml_pow(2, 2));
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
@@ -959,7 +958,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // TODO
+ mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 1e6aeb1bd2bf..287b7a0ad108 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
static bool is_dual_plane(enum source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
@@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
- req128_l = 0;
+ req128_l = false;
else
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
@@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
const display_pipe_source_params_st pipe_src_param,
bool is_chroma)
{
- bool mode_422 = 0;
+ bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
@@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // TODO
+ mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index ba77957aefe3..af35b3bea909 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#include "../display_mode_lib.h"
#include "../dml_inline_defs.h"
@@ -198,7 +197,7 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *vm_group_bytes,
- long *dpte_group_bytes,
+ unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
@@ -296,7 +295,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double UrgentOutOfOrderReturn,
double ReturnBW,
bool GPUVMEnable,
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
@@ -310,13 +309,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
- unsigned int SwathWidthSingleDPPY[],
+ double SwathWidthSingleDPPY[],
unsigned int SwathHeightY[],
double ReadBandwidthPlaneLuma[],
unsigned int SwathHeightC[],
double ReadBandwidthPlaneChroma[],
unsigned int LBBitPerPixel[],
- unsigned int SwathWidthY[],
+ double SwathWidthY[],
double HRatio[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
@@ -345,7 +344,7 @@ static void CalculateDCFCLKDeepSleep(
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double VRatio[],
- unsigned int SwathWidthY[],
+ double SwathWidthY[],
int DPPPerPlane[],
double HRatio[],
double PixelClock[],
@@ -436,7 +435,7 @@ static void CalculateMetaAndPTETimes(
unsigned int meta_row_height[],
unsigned int meta_req_width[],
unsigned int meta_req_height[],
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
unsigned int PTERequestSizeY[],
unsigned int PTERequestSizeC[],
unsigned int PixelPTEReqWidthY[],
@@ -478,7 +477,7 @@ static double CalculateExtraLatency(
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
int HostVMMaxPageTableLevels,
@@ -1281,7 +1280,7 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *vm_group_bytes,
- long *dpte_group_bytes,
+ unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
@@ -1339,7 +1338,7 @@ static unsigned int CalculateVMAndRowBytes(
*MetaRowByte = 0;
}
- if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1684,11 +1683,11 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
else
locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
- if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true)
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
@@ -2941,12 +2940,12 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
- if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
@@ -3454,7 +3453,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_lvp)
+ == dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
@@ -3543,17 +3542,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->IdealSDPPortBandwidthPerState[i] = dml_min3(
+ locals->IdealSDPPortBandwidthPerState[i][0] = dml_min3(
mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
* mode_lib->vba.DRAMChannelWidth,
mode_lib->vba.FabricClockPerState[i]
* mode_lib->vba.FabricDatapathToDCNDataReturn);
if (mode_lib->vba.HostVMEnable == false) {
- locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i]
+ locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100.0;
} else {
- locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i]
+ locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0;
}
}
@@ -3590,12 +3589,12 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly)
- * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
- if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+ * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
- locals->ROBSupport[i] = true;
+ locals->ROBSupport[i][0] = true;
} else {
- locals->ROBSupport[i] = false;
+ locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
@@ -3983,7 +3982,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
- && locals->ODMCombineEnablePerState[i][k] == false) {
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -4072,16 +4071,16 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->ViewportSizeSupport[i] = true;
+ locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
}
}
@@ -4122,11 +4121,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->RequiresDSC[i][k] = 0;
+ locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = 0;
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.Output[k] == dm_hdmi) {
- locals->RequiresDSC[i][k] = 0;
+ locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = 0;
locals->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
@@ -4270,8 +4269,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k]
- == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4294,7 +4292,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
@@ -4336,7 +4334,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
- if (locals->ODMCombineEnablePerState[i][k] == false) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4400,7 +4398,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];
locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k];
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
locals->SwathWidthYThisState[k] =
dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]));
} else {
@@ -4452,7 +4450,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->PSCL_FACTOR,
locals->PSCL_FACTOR_CHROMA,
locals->RequiredDPPCLKThisState,
- &mode_lib->vba.ProjectedDCFCLKDeepSleep);
+ &mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]);
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
@@ -4497,7 +4495,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->PTERequestSizeC,
locals->dpde0_bytes_per_frame_ub_c,
locals->meta_pte_bytes_per_frame_ub_c);
- locals->PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ locals->PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k]/2,
mode_lib->vba.VTAPsChroma[k],
@@ -4512,7 +4510,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
- locals->PrefetchLinesC[k] = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
@@ -4553,7 +4551,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->PTERequestSizeY,
locals->dpde0_bytes_per_frame_ub_l,
locals->meta_pte_bytes_per_frame_ub_l);
- locals->PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ locals->PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
@@ -4563,10 +4561,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ViewportYStartY[k],
&locals->PrefillY[k],
&locals->MaxNumSwY[k]);
- locals->PDEAndMetaPTEBytesPerFrame[k] =
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
- locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
- locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
@@ -4592,7 +4590,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelChunkSizeInKByte,
locals->TotalNumberOfDCCActiveDPP[i][j],
mode_lib->vba.MetaChunkSize,
- locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.NumberOfActivePlanes,
@@ -4603,7 +4601,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels);
- mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
@@ -4645,15 +4643,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
}
- mode_lib->vba.MaxMaxVStartup = 0;
+ mode_lib->vba.MaxMaxVStartup[0][0] = 0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
- mode_lib->vba.MaxMaxVStartup = dml_max(mode_lib->vba.MaxMaxVStartup, locals->MaximumVStartup[k]);
+ mode_lib->vba.MaxMaxVStartup[0][0] = dml_max(mode_lib->vba.MaxMaxVStartup[0][0], locals->MaximumVStartup[0][0][k]);
}
mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode;
- mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup;
+ mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
do {
mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;
mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup;
@@ -4694,7 +4692,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
myPipe.PixelClock = mode_lib->vba.PixelClock[k];
- myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
myPipe.SourceScan = mode_lib->vba.SourceScan[k];
@@ -4728,8 +4726,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[k]),
- locals->MaximumVStartup[k],
+ dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
+ locals->MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
&myHostVM,
@@ -4740,15 +4738,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.UrgentLatency,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
- locals->PDEAndMetaPTEBytesPerFrame[k],
- locals->MetaRowBytes[k],
- locals->DPTEBytesPerRow[k],
- locals->PrefetchLinesY[k],
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+ locals->MetaRowBytes[0][0][k],
+ locals->DPTEBytesPerRow[0][0][k],
+ locals->PrefetchLinesY[0][0][k],
locals->SwathWidthYThisState[k],
locals->BytePerPixelInDETY[k],
locals->PrefillY[k],
locals->MaxNumSwY[k],
- locals->PrefetchLinesC[k],
+ locals->PrefetchLinesC[0][0][k],
locals->BytePerPixelInDETC[k],
locals->PrefillC[k],
locals->MaxNumSwC[k],
@@ -4837,14 +4835,14 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k]
+ locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
}
- locals->BandwidthWithoutPrefetchSupported[i] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]
|| locals->NotEnoughUrgentLatencyHiding == 1) {
- locals->BandwidthWithoutPrefetchSupported[i] = false;
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]
|| locals->NotEnoughUrgentLatencyHiding == 1
|| locals->NotEnoughUrgentLatencyHidingPre == 1) {
locals->PrefetchSupported[i][j] = false;
@@ -4873,17 +4871,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (mode_lib->vba.MaxVStartup <= 13 || mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) {
- mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup;
+ mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;
} else {
mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1;
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
- && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup
+ && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
|| mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
- mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i];
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip
- dml_max(locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
@@ -4896,7 +4894,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.TotImmediateFlipBytes = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes
- + locals->PDEAndMetaPTEBytesPerFrame[k] + locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k];
+ + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] + locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k];
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4911,9 +4909,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels,
mode_lib->vba.GPUVMEnable,
- locals->PDEAndMetaPTEBytesPerFrame[k],
- locals->MetaRowBytes[k],
- locals->DPTEBytesPerRow[k],
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+ locals->MetaRowBytes[0][0][k],
+ locals->DPTEBytesPerRow[0][0][k],
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
@@ -4944,7 +4942,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
locals->ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
- > locals->ReturnBWPerState[i]) {
+ > locals->ReturnBWPerState[i][0]) {
locals->ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4971,7 +4969,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.WritebackInterfaceChromaBufferSize,
mode_lib->vba.DCFCLKPerState[i],
mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels,
- locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0],
mode_lib->vba.GPUVMEnable,
locals->dpte_group_bytes,
mode_lib->vba.MetaChunkSize,
@@ -4983,7 +4981,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.SRExitTime,
mode_lib->vba.SREnterPlusExitTime,
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
locals->NoOfDPPThisState,
mode_lib->vba.DCCEnable,
locals->RequiredDPPCLKThisState,
@@ -5026,8 +5024,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k];
}
for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) {
- locals->MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(
- locals->IdealSDPPortBandwidthPerState[i] *
+ locals->MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(
+ locals->IdealSDPPortBandwidthPerState[i][0] *
mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation
/ 100.0, mode_lib->vba.DRAMSpeedPerState[i] *
mode_lib->vba.NumberOfChannels *
@@ -5035,10 +5033,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation
/ 100.0);
- if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i]) {
- locals->TotalVerticalActiveBandwidthSupport[i] = true;
+ if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i][0]) {
+ locals->TotalVerticalActiveBandwidthSupport[i][0] = true;
} else {
- locals->TotalVerticalActiveBandwidthSupport[i] = false;
+ locals->TotalVerticalActiveBandwidthSupport[i][0] = false;
}
}
}
@@ -5117,7 +5115,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i] != true) {
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
@@ -5125,7 +5123,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_NOT_ENOUGH_DSC;
} else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
status = DML_FAIL_DSC_CLK_REQUIRED;
- } else if (locals->ROBSupport[i] != true) {
+ } else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5143,7 +5141,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_CURSOR_SUPPORT;
} else if (mode_lib->vba.PitchSupport != true) {
status = DML_FAIL_PITCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
status = DML_FAIL_PTE_BUFFER_SIZE;
@@ -5199,13 +5197,13 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.ODMCombineEnabled[k] =
locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
} else {
- mode_lib->vba.ODMCombineEnabled[k] = 0;
+ mode_lib->vba.ODMCombineEnabled[k] = false;
}
mode_lib->vba.DSCEnabled[k] =
locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
@@ -5228,7 +5226,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double UrgentOutOfOrderReturn,
double ReturnBW,
bool GPUVMEnable,
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
@@ -5242,13 +5240,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
- unsigned int SwathWidthSingleDPPY[],
+ double SwathWidthSingleDPPY[],
unsigned int SwathHeightY[],
double ReadBandwidthPlaneLuma[],
unsigned int SwathHeightC[],
double ReadBandwidthPlaneChroma[],
unsigned int LBBitPerPixel[],
- unsigned int SwathWidthY[],
+ double SwathWidthY[],
double HRatio[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
@@ -5504,7 +5502,7 @@ static void CalculateDCFCLKDeepSleep(
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double VRatio[],
- unsigned int SwathWidthY[],
+ double SwathWidthY[],
int DPPPerPlane[],
double HRatio[],
double PixelClock[],
@@ -5832,7 +5830,7 @@ static void CalculateMetaAndPTETimes(
unsigned int meta_row_height[],
unsigned int meta_req_width[],
unsigned int meta_req_height[],
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
unsigned int PTERequestSizeY[],
unsigned int PTERequestSizeC[],
unsigned int PixelPTEReqWidthY[],
@@ -6088,7 +6086,7 @@ static double CalculateExtraLatency(
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
int HostVMMaxPageTableLevels,
@@ -6126,4 +6124,3 @@ static double CalculateExtraLatency(
return CalculateExtraLatency;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index a1f207cbb966..a38baa73d484 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
@@ -83,10 +82,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
static bool is_dual_plane(enum source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -223,8 +222,8 @@ static void handle_det_buf_split(
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -249,13 +248,13 @@ static void handle_det_buf_split(
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
@@ -265,9 +264,9 @@ static void handle_det_buf_split(
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
- req128_l = 0;
+ req128_l = false;
else
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
@@ -680,7 +679,7 @@ static void get_surf_rq_param(
const display_pipe_params_st pipe_param,
bool is_chroma)
{
- bool mode_422 = 0;
+ bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
@@ -1011,7 +1010,7 @@ static void dml_rq_dlg_get_dlg_params(
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = false; // FIXME
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1523,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
- disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;;
- disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;;
+ disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+ disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
// Clamp to max for now
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
@@ -1820,4 +1819,3 @@ static void calculate_ttu_cursor(
}
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index 1c97083b8d0b..bfc2f39bd1ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -85,7 +85,7 @@ enum dm_swizzle_mode {
dm_sw_var_s_x = 29,
dm_sw_var_d_x = 30,
dm_sw_64kb_r_x,
- dm_sw_gfx7_2d_thin_lvp,
+ dm_sw_gfx7_2d_thin_l_vp,
dm_sw_gfx7_2d_thin_gl,
};
enum lb_depth {
@@ -119,6 +119,10 @@ enum mpc_combine_affinity {
dm_mpc_never
};
+enum RequestType {
+ REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
+};
+
enum self_refresh_affinity {
dm_try_to_allow_self_refresh_and_mclk_switch,
dm_allow_self_refresh_and_mclk_switch,
@@ -135,9 +139,7 @@ enum dm_validation_status {
DML_FAIL_DIO_SUPPORT,
DML_FAIL_NOT_ENOUGH_DSC,
DML_FAIL_DSC_CLK_REQUIRED,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DML_FAIL_DSC_VALIDATION_FAILURE,
-#endif
DML_FAIL_URGENT_LATENCY,
DML_FAIL_REORDERING_BUFFER,
DML_FAIL_DISPCLK_DPPCLK,
@@ -167,4 +169,16 @@ enum odm_combine_mode {
dm_odm_combine_mode_4to1,
};
+enum odm_combine_policy {
+ dm_odm_combine_policy_dal,
+ dm_odm_combine_policy_none,
+ dm_odm_combine_policy_2to1,
+ dm_odm_combine_policy_4to1,
+};
+
+enum immediate_flip_requirement {
+ dm_immediate_flip_not_required,
+ dm_immediate_flip_required,
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 704efefdcba8..2689401a03a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -25,18 +25,13 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dcn20/display_mode_vba_20.h"
#include "dcn20/display_rq_dlg_calc_20.h"
#include "dcn20/display_mode_vba_20v2.h"
#include "dcn20/display_rq_dlg_calc_20v2.h"
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
#include "dcn21/display_mode_vba_21.h"
#include "dcn21/display_rq_dlg_calc_21.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
const struct dml_funcs dml20_funcs = {
.validate = dml20_ModeSupportAndSystemConfigurationFull,
.recalculate = dml20_recalculate,
@@ -50,16 +45,13 @@ const struct dml_funcs dml20v2_funcs = {
.rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg
};
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
const struct dml_funcs dml21_funcs = {
.validate = dml21_ModeSupportAndSystemConfigurationFull,
.recalculate = dml21_recalculate,
.rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
};
-#endif
void dml_init_instance(struct display_mode_lib *lib,
const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
@@ -70,19 +62,15 @@ void dml_init_instance(struct display_mode_lib *lib,
lib->ip = *ip_params;
lib->project = project;
switch (project) {
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
case DML_PROJECT_NAVI10:
lib->funcs = dml20_funcs;
break;
case DML_PROJECT_NAVI10v2:
lib->funcs = dml20v2_funcs;
break;
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
case DML_PROJECT_DCN21:
lib->funcs = dml21_funcs;
break;
-#endif
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index d8c59aa356b6..cf2758ca5b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -27,20 +27,14 @@
#include "dml_common_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#include "display_mode_vba.h"
-#endif
enum dml_project {
DML_PROJECT_UNDEFINED,
DML_PROJECT_RAVEN1,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
DML_PROJECT_NAVI10,
DML_PROJECT_NAVI10v2,
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
DML_PROJECT_DCN21,
-#endif
};
struct display_mode_lib;
@@ -70,9 +64,7 @@ struct display_mode_lib {
struct _vcs_dpi_ip_params_st ip;
struct _vcs_dpi_soc_bounding_box_st soc;
enum dml_project project;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct vba_vars_st vba;
-#endif
struct dal_logger *logger;
struct dml_funcs funcs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index cfacd6027467..658f81e757e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -63,6 +63,7 @@ struct _vcs_dpi_voltage_scaling_st {
double dispclk_mhz;
double phyclk_mhz;
double dppclk_mhz;
+ double dtbclk_mhz;
};
struct _vcs_dpi_soc_bounding_box_st {
@@ -99,6 +100,7 @@ struct _vcs_dpi_soc_bounding_box_st {
unsigned int num_chans;
unsigned int vmm_page_size_bytes;
unsigned int hostvm_min_page_size_bytes;
+ unsigned int gpuvm_min_page_size_bytes;
double dram_clock_change_latency_us;
double dummy_pstate_latency_us;
double writeback_dram_clock_change_latency_us;
@@ -112,6 +114,7 @@ struct _vcs_dpi_soc_bounding_box_st {
bool do_urgent_latency_adjustment;
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
+ bool disable_dram_clock_change_vactive_support;
};
struct _vcs_dpi_ip_params_st {
@@ -145,7 +148,6 @@ struct _vcs_dpi_ip_params_st {
unsigned int writeback_interface_buffer_size_kbytes;
unsigned int writeback_line_buffer_buffer_size;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
unsigned int writeback_10bpc420_supported;
double writeback_max_hscl_ratio;
double writeback_max_vscl_ratio;
@@ -155,7 +157,6 @@ struct _vcs_dpi_ip_params_st {
unsigned int writeback_max_vscl_taps;
unsigned int writeback_line_buffer_luma_buffer_size;
unsigned int writeback_line_buffer_chroma_buffer_size;
-#endif
unsigned int max_page_table_levels;
unsigned int max_num_dpp;
@@ -214,6 +215,7 @@ struct _vcs_dpi_display_pipe_source_params_st {
int source_format;
unsigned char dcc;
unsigned int dcc_rate;
+ unsigned int dcc_rate_chroma;
unsigned char dcc_use_global;
unsigned char vm;
bool gpuvm; // gpuvm enabled
@@ -225,6 +227,10 @@ struct _vcs_dpi_display_pipe_source_params_st {
int source_scan;
int sw_mode;
int macro_tile_size;
+ unsigned int surface_width_y;
+ unsigned int surface_height_y;
+ unsigned int surface_width_c;
+ unsigned int surface_height_c;
unsigned int viewport_width;
unsigned int viewport_height;
unsigned int viewport_y_y;
@@ -277,6 +283,7 @@ struct _vcs_dpi_display_output_params_st {
int output_type;
int output_format;
int dsc_slices;
+ int max_audio_sample_rate;
struct writeback_st wb;
};
@@ -322,7 +329,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
- unsigned char odm_combine;
+ unsigned int odm_combine;
unsigned char use_maximum_vstartup;
unsigned int vtotal_max;
unsigned int vtotal_min;
@@ -401,6 +408,7 @@ struct _vcs_dpi_display_rq_misc_params_st {
struct _vcs_dpi_display_rq_params_st {
unsigned char yuv420;
unsigned char yuv420_10bpc;
+ unsigned char rgbe_alpha;
display_rq_misc_params_st misc;
display_rq_sizing_params_st sizing;
display_rq_dlg_params_st dlg;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 7f9a5621922f..b3c96d9b472f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#include "display_mode_lib.h"
#include "display_mode_vba.h"
@@ -222,13 +221,17 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
+ mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
+ mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
+ mode_lib->vba.DummyPStateCheck;
+
mode_lib->vba.Downspreading = soc->downspread_percent;
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new
mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new
mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
- mode_lib->vba.GPUVMMinPageSize = soc->vmm_page_size_bytes / 1024;
+ mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024;
mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 1024;
// Set the voltage scaling clocks as the defaults. Most of these will
// be set to different values by the test
@@ -261,7 +264,10 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mts;
//mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
+ mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
}
+ mode_lib->vba.MinVoltageLevel = 0;
+ mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
mode_lib->vba.DoUrgentLatencyAdjustment =
soc->do_urgent_latency_adjustment;
@@ -303,8 +309,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
mode_lib->vba.WritebackInterfaceBufferSize = ip->writeback_interface_buffer_size_kbytes;
mode_lib->vba.WritebackLineBufferSize = ip->writeback_line_buffer_buffer_size;
- mode_lib->vba.MinVoltageLevel = 0;
- mode_lib->vba.MaxVoltageLevel = 5;
mode_lib->vba.WritebackChromaLineBufferWidth =
ip->writeback_chroma_line_buffer_width_pixels;
@@ -420,8 +424,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
ip->dcc_supported : src->dcc && ip->dcc_supported;
mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
/* TODO: Needs to be set based on src->dcc_rate_luma/chroma */
- mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = 0;
- mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = 0;
+ mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
+ mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma;
mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum source_format_class) (src->source_format);
@@ -433,8 +437,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
dst->odm_combine;
- mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] =
- dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) (dout->output_format);
mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
@@ -451,7 +453,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dout->dp_lanes;
/* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */
mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] =
- 44.1 * 1000;
+ dout->max_audio_sample_rate;
mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
1;
mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
@@ -587,6 +589,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest;
+ display_output_params_st *dout_k = &pipes[j].dout;
if (src_k->is_hsplit && !visited[k]
&& src->hsplit_grp == src_k->hsplit_grp) {
@@ -597,12 +600,18 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
== dm_horz) {
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_width;
+ mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_width;
mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] +=
dst_k->recout_width;
} else {
mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_height;
+ mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_height;
}
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
+ dout_k->dsc_slices;
visited[k] = true;
}
@@ -808,7 +817,9 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
unsigned int total_pipes = 0;
mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
- mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb];
+ if (mode_lib->vba.ReturnBW == 0)
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
fetch_socbb_params(mode_lib);
@@ -858,4 +869,3 @@ double CalculateWriteBackDISPCLK(
return CalculateWriteBackDISPCLK;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 1540ffbe3979..e7a44df676ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -23,7 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#ifndef __DML2_DISPLAY_MODE_VBA_H__
#define __DML2_DISPLAY_MODE_VBA_H__
@@ -155,7 +154,10 @@ struct vba_vars_st {
double UrgentLatencySupportUsChroma;
unsigned int DSCFormatFactor;
+ bool DummyPStateCheck;
+ bool DRAMClockChangeSupportsVActive;
bool PrefetchModeSupported;
+ bool PrefetchAndImmediateFlipSupported;
enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only
double XFCRemoteSurfaceFlipDelay;
double TInitXFill;
@@ -317,8 +319,7 @@ struct vba_vars_st {
unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
double DCCRate[DC__NUM_DPP__MAX];
double AverageDCCCompressionRate;
- bool ODMCombineEnabled[DC__NUM_DPP__MAX];
- enum odm_combine_mode ODMCombineTypeEnabled[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnabled[DC__NUM_DPP__MAX];
double OutputBpp[DC__NUM_DPP__MAX];
bool DSCEnabled[DC__NUM_DPP__MAX];
unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
@@ -346,6 +347,7 @@ struct vba_vars_st {
unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
double BandwidthAvailableForImmediateFlip;
unsigned int PrefetchMode[DC__VOLTAGE_STATES + 1][2];
+ unsigned int PrefetchModePerState[DC__VOLTAGE_STATES + 1][2];
unsigned int MinPrefetchMode;
unsigned int MaxPrefetchMode;
bool AnyLinesForVMOrRowTooLarge;
@@ -395,6 +397,7 @@ struct vba_vars_st {
bool WritebackLumaAndChromaScalingSupported;
bool Cursor64BppSupport;
double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
+ double DCFCLKState[DC__VOLTAGE_STATES + 1][2];
double FabricClockPerState[DC__VOLTAGE_STATES + 1];
double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
@@ -443,7 +446,7 @@ struct vba_vars_st {
double OutputLinkDPLanes[DC__NUM_DPP__MAX];
double ForcedOutputLinkBPP[DC__NUM_DPP__MAX]; // Mode Support only
double ImmediateFlipBW[DC__NUM_DPP__MAX];
- double MaxMaxVStartup;
+ double MaxMaxVStartup[DC__VOLTAGE_STATES + 1][2];
double WritebackLumaVExtra;
double WritebackChromaVExtra;
@@ -470,7 +473,7 @@ struct vba_vars_st {
double RoundedUpMaxSwathSizeBytesC;
double EffectiveDETLBLinesLuma;
double EffectiveDETLBLinesChroma;
- double ProjectedDCFCLKDeepSleep;
+ double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES + 1][2];
double PDEAndMetaPTEBytesPerFrameY;
double PDEAndMetaPTEBytesPerFrameC;
unsigned int MetaRowBytesY;
@@ -488,12 +491,11 @@ struct vba_vars_st {
double FractionOfUrgentBandwidthImmediateFlip; // Mode Support debugging output
/* ms locals */
- double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1];
+ double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1][2];
unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
int NoOfDPPThisState[DC__NUM_DPP__MAX];
- bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- enum odm_combine_mode ODMCombineTypeEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- unsigned int SwathWidthYThisState[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathWidthYThisState[DC__NUM_DPP__MAX];
unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int SwathHeightYThisState[DC__NUM_DPP__MAX];
unsigned int SwathHeightCThisState[DC__NUM_DPP__MAX];
@@ -505,7 +507,7 @@ struct vba_vars_st {
double RequiredDPPCLKThisState[DC__NUM_DPP__MAX];
bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
bool PrefetchSupported[DC__VOLTAGE_STATES + 1][2];
bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
double RequiredDISPCLK[DC__VOLTAGE_STATES + 1][2];
@@ -514,22 +516,22 @@ struct vba_vars_st {
unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1][2];
unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1][2];
bool ModeSupport[DC__VOLTAGE_STATES + 1][2];
- double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
+ double ReturnBWPerState[DC__VOLTAGE_STATES + 1][2];
bool DIOSupport[DC__VOLTAGE_STATES + 1];
bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
- bool ROBSupport[DC__VOLTAGE_STATES + 1];
+ bool ROBSupport[DC__VOLTAGE_STATES + 1][2];
bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1][2];
- bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1];
- double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1];
+ bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1][2];
+ double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1][2];
double PrefetchBW[DC__NUM_DPP__MAX];
- double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
- double MetaRowBytes[DC__NUM_DPP__MAX];
- double DPTEBytesPerRow[DC__NUM_DPP__MAX];
- double PrefetchLinesY[DC__NUM_DPP__MAX];
- double PrefetchLinesC[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double MetaRowBytes[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DPTEBytesPerRow[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double PrefetchLinesY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double PrefetchLinesC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
double PrefillY[DC__NUM_DPP__MAX];
@@ -538,7 +540,7 @@ struct vba_vars_st {
double LinesForMetaPTE[DC__NUM_DPP__MAX];
double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
- unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
double BytePerPixelInDETY[DC__NUM_DPP__MAX];
double BytePerPixelInDETC[DC__NUM_DPP__MAX];
bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
@@ -546,7 +548,7 @@ struct vba_vars_st {
double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
double OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
+ bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1][2];
unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
@@ -561,7 +563,7 @@ struct vba_vars_st {
double WriteBandwidth[DC__NUM_DPP__MAX];
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
- double MaximumVStartup[DC__NUM_DPP__MAX];
+ double MaximumVStartup[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
@@ -578,7 +580,7 @@ struct vba_vars_st {
bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1][2];
double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
unsigned int vm_group_bytes[DC__NUM_DPP__MAX];
- long dpte_group_bytes[DC__NUM_DPP__MAX];
+ unsigned int dpte_group_bytes[DC__NUM_DPP__MAX];
unsigned int dpte_row_height[DC__NUM_DPP__MAX];
unsigned int meta_req_height[DC__NUM_DPP__MAX];
unsigned int meta_req_width[DC__NUM_DPP__MAX];
@@ -604,14 +606,14 @@ struct vba_vars_st {
double UrgentBurstFactorChroma[DC__NUM_DPP__MAX];
double UrgentBurstFactorChromaPre[DC__NUM_DPP__MAX];
+
bool MPCCombine[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
double SwathWidthCSingleDPP[DC__NUM_DPP__MAX];
double MaximumSwathWidthInLineBufferLuma;
double MaximumSwathWidthInLineBufferChroma;
double MaximumSwathWidthLuma[DC__NUM_DPP__MAX];
double MaximumSwathWidthChroma[DC__NUM_DPP__MAX];
- bool odm_combine_dummy[DC__NUM_DPP__MAX];
- enum odm_combine_mode odm_combine_mode_dummy[DC__NUM_DPP__MAX];
+ enum odm_combine_mode odm_combine_dummy[DC__NUM_DPP__MAX];
double dummy1[DC__NUM_DPP__MAX];
double dummy2[DC__NUM_DPP__MAX];
double dummy3[DC__NUM_DPP__MAX];
@@ -621,9 +623,9 @@ struct vba_vars_st {
double dummy7[DC__NUM_DPP__MAX];
double dummy8[DC__NUM_DPP__MAX];
unsigned int dummyinteger1ms[DC__NUM_DPP__MAX];
- unsigned int dummyinteger2ms[DC__NUM_DPP__MAX];
+ double dummyinteger2ms[DC__NUM_DPP__MAX];
unsigned int dummyinteger3[DC__NUM_DPP__MAX];
- unsigned int dummyinteger4;
+ unsigned int dummyinteger4[DC__NUM_DPP__MAX];
unsigned int dummyinteger5;
unsigned int dummyinteger6;
unsigned int dummyinteger7;
@@ -636,7 +638,6 @@ struct vba_vars_st {
unsigned int dummyintegerarr2[DC__NUM_DPP__MAX];
unsigned int dummyintegerarr3[DC__NUM_DPP__MAX];
unsigned int dummyintegerarr4[DC__NUM_DPP__MAX];
- long dummylongarr1[DC__NUM_DPP__MAX];
bool dummysinglestring;
bool SingleDPPViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
double PlaneRequiredDISPCLKWithODMCombine2To1;
@@ -644,20 +645,19 @@ struct vba_vars_st {
unsigned int TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES + 1][2];
bool LinkDSCEnable;
bool ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES + 1];
- bool ODMCombineEnableThisState[DC__NUM_DPP__MAX];
- enum odm_combine_mode ODMCombineEnableTypeThisState[DC__NUM_DPP__MAX];
- unsigned int SwathWidthCThisState[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnableThisState[DC__NUM_DPP__MAX];
+ double SwathWidthCThisState[DC__NUM_DPP__MAX];
bool ViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitchY[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitchC[DC__NUM_DPP__MAX];
unsigned int NotEnoughUrgentLatencyHiding;
unsigned int NotEnoughUrgentLatencyHidingPre;
- long PTEBufferSizeInRequestsForLuma;
- long PTEBufferSizeInRequestsForChroma;
+ int PTEBufferSizeInRequestsForLuma;
+ int PTEBufferSizeInRequestsForChroma;
// Missing from VBA
- long dpte_group_bytes_chroma;
+ int dpte_group_bytes_chroma;
unsigned int vm_group_bytes_chroma;
double dst_x_after_scaler;
double dst_y_after_scaler;
@@ -682,8 +682,8 @@ struct vba_vars_st {
double MinTTUVBlank[DC__NUM_DPP__MAX];
double BytePerPixelDETY[DC__NUM_DPP__MAX];
double BytePerPixelDETC[DC__NUM_DPP__MAX];
- unsigned int SwathWidthY[DC__NUM_DPP__MAX];
- unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
+ double SwathWidthY[DC__NUM_DPP__MAX];
+ double SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
double CursorRequestDeliveryTime[DC__NUM_DPP__MAX];
double CursorRequestDeliveryTimePrefetch[DC__NUM_DPP__MAX];
double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
@@ -759,8 +759,8 @@ struct vba_vars_st {
double LinesInDETY[DC__NUM_DPP__MAX];
double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int SwathWidthSingleDPPC[DC__NUM_DPP__MAX];
- unsigned int SwathWidthC[DC__NUM_DPP__MAX];
+ double SwathWidthSingleDPPC[DC__NUM_DPP__MAX];
+ double SwathWidthC[DC__NUM_DPP__MAX];
unsigned int BytePerPixelY[DC__NUM_DPP__MAX];
unsigned int BytePerPixelC[DC__NUM_DPP__MAX];
long dummyinteger1;
@@ -778,6 +778,7 @@ struct vba_vars_st {
unsigned int DCCCMaxCompressedBlock[DC__NUM_DPP__MAX];
unsigned int DCCCIndependent64ByteBlock[DC__NUM_DPP__MAX];
double VStartupMargin;
+ bool NotEnoughTimeForDynamicMetadata;
/* Missing from VBA */
unsigned int MaximumMaxVStartupLines;
@@ -813,7 +814,7 @@ struct vba_vars_st {
unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX];
double HRatioChroma[DC__NUM_DPP__MAX];
double VRatioChroma[DC__NUM_DPP__MAX];
- long WritebackSourceWidth[DC__NUM_DPP__MAX];
+ int WritebackSourceWidth[DC__NUM_DPP__MAX];
bool ModeIsSupported;
bool ODMCombine4To1Supported;
@@ -849,6 +850,58 @@ struct vba_vars_st {
unsigned int MaxNumHDMIFRLOutputs;
int AudioSampleRate[DC__NUM_DPP__MAX];
int AudioSampleLayout[DC__NUM_DPP__MAX];
+
+ int PercentMarginOverMinimumRequiredDCFCLK;
+ bool DynamicMetadataSupported[DC__VOLTAGE_STATES + 1][2];
+ enum immediate_flip_requirement ImmediateFlipRequirement;
+ double DETBufferSizeYThisState[DC__NUM_DPP__MAX];
+ double DETBufferSizeCThisState[DC__NUM_DPP__MAX];
+ bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX];
+ bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX];
+ int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX];
+ int swath_width_chroma_ub_this_state[DC__NUM_DPP__MAX];
+ double UrgLatency[DC__VOLTAGE_STATES + 1];
+ double VActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double VActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NoTimeForPrefetch[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double dpte_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double meta_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DETBufferSizeYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DETBufferSizeCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES + 1][2];
+ unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double TotalDPTERowBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
+ bool UseMinimumRequiredDCFCLK;
+ double WritebackDelayTime[DC__NUM_DPP__MAX];
+ unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
+ unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
+ unsigned int dummyinteger15;
+ unsigned int dummyinteger16;
+ unsigned int dummyinteger17;
+ unsigned int dummyinteger18;
+ unsigned int dummyinteger19;
+ unsigned int dummyinteger20;
+ unsigned int dummyinteger21;
+ unsigned int dummyinteger22;
+ unsigned int dummyinteger23;
+ unsigned int dummyinteger24;
+ unsigned int dummyinteger25;
+ unsigned int dummyinteger26;
+ unsigned int dummyinteger27;
+ unsigned int dummyinteger28;
+ unsigned int dummyinteger29;
+ bool dummystring[DC__NUM_DPP__MAX];
+ double BPP;
+ enum odm_combine_policy ODMCombinePolicy;
};
bool CalculateMinAndMaxPrefetchMode(
@@ -870,4 +923,3 @@ double CalculateWriteBackDISPCLK(
unsigned int WritebackChromaLineBufferWidth);
#endif /* _DML2_DISPLAY_MODE_VBA_H_ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
index b953b02a1512..723af0b2dda0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
@@ -24,7 +24,7 @@
*/
#include "dml_common_defs.h"
-#include "../calcs/dcn_calc_math.h"
+#include "dcn_calc_math.h"
#include "dml_inline_defs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index eca140da13d8..ded71ea82413 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -27,7 +27,7 @@
#define __DML_INLINE_DEFS_H__
#include "dml_common_defs.h"
-#include "../calcs/dcn_calc_math.h"
+#include "dcn_calc_math.h"
#include "dml_logger.h"
static inline double dml_min(double a, double b)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 641ffb7cfaed..3f66868df171 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -2,7 +2,13 @@
#
# Makefile for the 'dsc' sub-component of DAL.
+ifdef CONFIG_X86
dsc_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+dsc_ccflags := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -10,6 +16,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -18,6 +25,7 @@ dsc_ccflags += -mpreferred-stack-boundary=4
else
dsc_ccflags += -msse2
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index e60f760585e4..8b78fcbfe746 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -22,30 +22,16 @@
* Author: AMD
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dc_hw_types.h"
#include "dsc.h"
#include <drm/drm_dp_helper.h>
-
-struct dc_dsc_policy {
- bool use_min_slices_h;
- int max_slices_h; // Maximum available if 0
- int min_sice_height; // Must not be less than 8
- int max_target_bpp;
- int min_target_bpp; // Minimum target bits per pixel
-};
-
-const struct dc_dsc_policy dsc_policy = {
- .use_min_slices_h = true, // DSC Policy: Use minimum number of slices that fits the pixel clock
- .max_slices_h = 0, // DSC Policy: Use max available slices (in our case 4 for or 8, depending on the mode)
- .min_sice_height = 108, // DSC Policy: Use slice height recommended by VESA DSC Spreadsheet user guide
- .max_target_bpp = 16,
- .min_target_bpp = 8,
-};
-
+#include "dc.h"
/* This module's internal functions */
+/* default DSC policy target bitrate limit is 16bpp */
+static uint32_t dsc_policy_max_target_bpp_limit = 16;
+
static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing)
{
@@ -237,8 +223,11 @@ static void get_dsc_enc_caps(
// This is a static HW query, so we can use any DSC
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
- if (dsc)
+ if (dsc) {
dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ if (dsc->ctx->dc->debug.native422_support)
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+ }
}
/* Returns 'false' if no intersection was found for at least one capablity.
@@ -578,9 +567,11 @@ static bool setup_dsc_config(
bool is_dsc_possible = false;
int pic_height;
int slice_height;
+ struct dc_dsc_policy policy;
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
+ dc_dsc_get_policy_for_timing(timing, &policy);
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -596,7 +587,12 @@ static bool setup_dsc_config(
goto done;
if (target_bandwidth_kbps > 0) {
- is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_policy, &dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp);
+ is_dsc_possible = decide_dsc_target_bpp_x16(
+ &policy,
+ &dsc_common_caps,
+ target_bandwidth_kbps,
+ timing,
+ &target_bpp);
dsc_cfg->bits_per_pixel = target_bpp;
}
if (!is_dsc_possible)
@@ -698,20 +694,20 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- if (dsc_policy.use_min_slices_h) {
+ if (policy.use_min_slices_h) {
if (min_slices_h > 0)
num_slices_h = min_slices_h;
else if (max_slices_h > 0) { // Fall back to max slices if min slices is not working out
- if (dsc_policy.max_slices_h)
- num_slices_h = min(dsc_policy.max_slices_h, max_slices_h);
+ if (policy.max_slices_h)
+ num_slices_h = min(policy.max_slices_h, max_slices_h);
else
num_slices_h = max_slices_h;
} else
is_dsc_possible = false;
} else {
if (max_slices_h > 0) {
- if (dsc_policy.max_slices_h)
- num_slices_h = min(dsc_policy.max_slices_h, max_slices_h);
+ if (policy.max_slices_h)
+ num_slices_h = min(policy.max_slices_h, max_slices_h);
else
num_slices_h = max_slices_h;
} else if (min_slices_h > 0) // Fall back to min slices if max slices is not possible
@@ -733,7 +729,7 @@ static bool setup_dsc_config(
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
if (min_slice_height_override == 0)
- slice_height = min(dsc_policy.min_sice_height, pic_height);
+ slice_height = min(policy.min_slice_height, pic_height);
else
slice_height = min(min_slice_height_override, pic_height);
@@ -764,7 +760,7 @@ done:
return is_dsc_possible;
}
-bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
{
if (!dpcd_dsc_basic_data)
return false;
@@ -817,6 +813,23 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div))
return false;
+ if (dc->debug.dsc_bpp_increment_div) {
+ /* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values,
+ * we'll accept all and get it into range. This also makes the above check against 0 redundant,
+ * but that one stresses out the override will be only used if it's not 0.
+ */
+ if (dc->debug.dsc_bpp_increment_div >= 1)
+ dsc_sink_caps->bpp_increment_div = 1;
+ if (dc->debug.dsc_bpp_increment_div >= 2)
+ dsc_sink_caps->bpp_increment_div = 2;
+ if (dc->debug.dsc_bpp_increment_div >= 4)
+ dsc_sink_caps->bpp_increment_div = 4;
+ if (dc->debug.dsc_bpp_increment_div >= 8)
+ dsc_sink_caps->bpp_increment_div = 8;
+ if (dc->debug.dsc_bpp_increment_div >= 16)
+ dsc_sink_caps->bpp_increment_div = 16;
+ }
+
/* Extended caps */
if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
@@ -903,4 +916,67 @@ bool dc_dsc_compute_config(
timing, dsc_min_slice_height_override, dsc_cfg);
return is_dsc_possible;
}
-#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
+
+void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc_dsc_policy *policy)
+{
+ uint32_t bpc = 0;
+
+ policy->min_target_bpp = 0;
+ policy->max_target_bpp = 0;
+
+ /* DSC Policy: Use minimum number of slices that fits the pixel clock */
+ policy->use_min_slices_h = true;
+
+ /* DSC Policy: Use max available slices
+ * (in our case 4 for or 8, depending on the mode)
+ */
+ policy->max_slices_h = 0;
+
+ /* DSC Policy: Use slice height recommended
+ * by VESA DSC Spreadsheet user guide
+ */
+ policy->min_slice_height = 108;
+
+ /* DSC Policy: follow DP specs with an internal upper limit to 16 bpp
+ * for better interoperability
+ */
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bpc = 12;
+ break;
+ default:
+ return;
+ }
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ case PIXEL_ENCODING_YCBCR422: /* assume no YCbCr422 native support */
+ /* DP specs limits to 8 */
+ policy->min_target_bpp = 8;
+ /* DP specs limits to 3 x bpc */
+ policy->max_target_bpp = 3 * bpc;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ /* DP specs limits to 6 */
+ policy->min_target_bpp = 6;
+ /* DP specs limits to 1.5 x bpc assume bpc is an even number */
+ policy->max_target_bpp = bpc * 3 / 2;
+ break;
+ default:
+ return;
+ }
+ /* internal upper limit, default 16 bpp */
+ if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
+ policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
+}
+
+void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit)
+{
+ dsc_policy_max_target_bpp_limit = limit;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
index 020ad8f685ea..9f70e87b3ecb 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/*
* Copyright 2017 Advanced Micro Devices, Inc.
@@ -51,4 +50,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
index f66d006eac5d..e5fac9f4181d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/*
* Copyright 2017 Advanced Micro Devices, Inc.
@@ -703,4 +702,3 @@ const qp_table qp_table_422_8bpc_max = {
{ 16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }
};
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index 76c4b12d6824..03ae15946c6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
/*
* Copyright 2017 Advanced Micro Devices, Inc.
@@ -252,4 +251,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->rc_buf_thresh[13] = 8064;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index f1d6e793bc61..b6b1f09c2009 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/*
* Copyright 2017 Advanced Micro Devices, Inc.
@@ -82,4 +81,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 73172fd0b529..1f6e63b71456 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -1,4 +1,3 @@
-#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
/*
* Copyright 2012-17 Advanced Micro Devices, Inc.
*
@@ -144,4 +143,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
return ret;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index b3062275711e..202baa210cc8 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -61,26 +61,25 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10)
-endif
###############################################################################
# DCN 2
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o
AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
+###############################################################################
+# DCN 21
+###############################################################################
GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o
AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
index 43a440385b43..83f798cb8b21 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
@@ -110,6 +109,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
ddc_data_regs_dcn2(6),
+ {
+ DDC_GPIO_VGA_REG_LIST(DATA),
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ }
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
@@ -119,6 +124,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
ddc_clk_regs_dcn2(6),
+ {
+ DDC_GPIO_VGA_REG_LIST(CLK),
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ }
};
static const struct ddc_sh_mask ddc_shift[] = {
@@ -246,4 +257,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory)
factory->funcs = &funcs;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
index 43a4ce7aa3bf..0fd9b315bd7a 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#ifndef __DAL_HW_FACTORY_DCN20_H__
#define __DAL_HW_FACTORY_DCN20_H__
@@ -30,4 +29,3 @@
void dal_hw_factory_dcn20_init(struct hw_factory *factory);
#endif /* __DAL_HW_FACTORY_DCN20_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
index 915e896e0e91..52ba62b3b5e4 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
@@ -26,7 +26,6 @@
/*
* Pre-requisites: headers required by header of this unit
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "hw_translate_dcn20.h"
#include "dm_services.h"
@@ -379,4 +378,3 @@ void dal_hw_translate_dcn20_init(struct hw_translate *tr)
tr->funcs = &funcs;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
index 01f52c7bed86..5f7a35530e26 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#ifndef __DAL_HW_TRANSLATE_DCN20_H__
#define __DAL_HW_TRANSLATE_DCN20_H__
@@ -32,4 +31,3 @@ struct hw_translate;
void dal_hw_translate_dcn20_init(struct hw_translate *tr);
#endif /* __DAL_HW_TRANSLATE_DCN20_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
index 8572678f8d4f..907c5911eb9e 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dm_services.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
@@ -239,4 +238,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory)
factory->funcs = &funcs;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h
index 2443f9e7afbf..4949e0c7fa06 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#ifndef __DAL_HW_FACTORY_DCN21_H__
#define __DAL_HW_FACTORY_DCN21_H__
@@ -30,4 +29,3 @@
void dal_hw_factory_dcn21_init(struct hw_factory *factory);
#endif /* __DAL_HW_FACTORY_DCN20_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
index fbb58fb8c318..291966efe63d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
@@ -26,7 +26,6 @@
/*
* Pre-requisites: headers required by header of this unit
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "hw_translate_dcn21.h"
#include "dm_services.h"
@@ -382,4 +381,3 @@ void dal_hw_translate_dcn21_init(struct hw_translate *tr)
tr->funcs = &funcs;
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h
index 2bfaac24c574..9462b0a65200 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#ifndef __DAL_HW_TRANSLATE_DCN21_H__
#define __DAL_HW_TRANSLATE_DCN21_H__
@@ -32,4 +31,3 @@ struct hw_translate;
void dal_hw_translate_dcn21_init(struct hw_translate *tr);
#endif /* __DAL_HW_TRANSLATE_DCN21_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index f91e85b04956..308a543178a5 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -48,13 +48,11 @@
DDC_GPIO_REG_LIST(cd,id),\
.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DDC_REG_LIST_DCN2(cd, id) \
DDC_GPIO_REG_LIST(cd, id),\
.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP),\
.phy_aux_cntl = REG(PHY_AUX_CNTL), \
.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5)
-#endif
#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
.type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\
@@ -90,13 +88,11 @@
DDC_GPIO_I2C_REG_LIST(cd),\
.ddc_setup = 0
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DDC_I2C_REG_LIST_DCN2(cd) \
DDC_GPIO_I2C_REG_LIST(cd),\
.ddc_setup = 0,\
.phy_aux_cntl = REG(PHY_AUX_CNTL), \
.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5)
-#endif
#define DDC_MASK_SH_LIST_COMMON(mask_sh) \
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
@@ -110,22 +106,18 @@
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DDC_MASK_SH_LIST_DCN2(mask_sh, cd) \
{DDC_MASK_SH_LIST_COMMON(mask_sh),\
0,\
0,\
(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\
(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)}
-#endif
struct ddc_registers {
struct gpio_registers gpio;
uint32_t ddc_setup;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t phy_aux_cntl;
uint32_t dc_gpio_aux_ctrl_5;
-#endif
};
struct ddc_sh_mask {
@@ -140,11 +132,9 @@ struct ddc_sh_mask {
/* i2cpad_mask */
uint32_t DC_GPIO_SDA_PD_DIS;
uint32_t DC_GPIO_SCL_PD_DIS;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
//phy_aux_cntl
uint32_t AUX_PAD_RXSEL;
uint32_t DDC_PAD_I2CMODE;
-#endif
};
@@ -180,7 +170,6 @@ struct ddc_sh_mask {
{\
DDC_I2C_REG_LIST(SCL)\
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define ddc_data_regs_dcn2(id) \
{\
DDC_REG_LIST_DCN2(DATA, id)\
@@ -200,7 +189,6 @@ struct ddc_sh_mask {
{\
DDC_REG_LIST_DCN2(SCL)\
}
-#endif
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 1c12961f6472..1ae153eab31d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -48,18 +48,18 @@
struct gpio;
-static void destruct(
+static void dal_hw_ddc_destruct(
struct hw_ddc *pin)
{
dal_hw_gpio_destruct(&pin->base);
}
-static void destroy(
+static void dal_hw_ddc_destroy(
struct hw_gpio_pin **ptr)
{
struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr);
- destruct(pin);
+ dal_hw_ddc_destruct(pin);
kfree(pin);
@@ -150,7 +150,6 @@ static enum gpio_result set_config(
AUX_PAD1_MODE, 0);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1);
}
@@ -158,7 +157,6 @@ static enum gpio_result set_config(
if (ddc->regs->phy_aux_cntl != 0) {
REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1);
}
-#endif
return GPIO_RESULT_OK;
case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
/* set the AUX pad mode */
@@ -166,12 +164,10 @@ static enum gpio_result set_config(
REG_SET(gpio.MASK_reg, regval,
AUX_PAD1_MODE, 1);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
REG_UPDATE(dc_gpio_aux_ctrl_5,
DDC_PAD_I2CMODE, 0);
}
-#endif
return GPIO_RESULT_OK;
case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
@@ -211,7 +207,7 @@ static enum gpio_result set_config(
}
static const struct hw_gpio_pin_funcs funcs = {
- .destroy = destroy,
+ .destroy = dal_hw_ddc_destroy,
.open = dal_hw_gpio_open,
.get_value = dal_hw_gpio_get_value,
.set_value = dal_hw_gpio_set_value,
@@ -220,7 +216,7 @@ static const struct hw_gpio_pin_funcs funcs = {
.close = dal_hw_gpio_close,
};
-static void construct(
+static void dal_hw_ddc_construct(
struct hw_ddc *ddc,
enum gpio_id id,
uint32_t en,
@@ -247,7 +243,7 @@ void dal_hw_ddc_init(
return;
}
- construct(*hw_ddc, id, en, ctx);
+ dal_hw_ddc_construct(*hw_ddc, id, en, ctx);
}
struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index fa9f1d055ec8..d2d36d48caaa 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -45,15 +45,11 @@
#include "dce80/hw_factory_dce80.h"
#include "dce110/hw_factory_dce110.h"
#include "dce120/hw_factory_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_factory_dcn10.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dcn20/hw_factory_dcn20.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#include "dcn21/hw_factory_dcn21.h"
-#endif
#include "diagnostics/hw_factory_diag.h"
@@ -90,19 +86,15 @@ bool dal_hw_factory_init(
case DCE_VERSION_12_1:
dal_hw_factory_dce120_init(factory);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
dal_hw_factory_dcn10_init(factory);
return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case DCN_VERSION_2_0:
dal_hw_factory_dcn20_init(factory);
return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case DCN_VERSION_2_1:
dal_hw_factory_dcn21_init(factory);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
index 69b899741f6d..f9e847e6555d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
@@ -46,22 +46,13 @@
struct gpio;
-static void dal_hw_generic_construct(
- struct hw_generic *pin,
- enum gpio_id id,
- uint32_t en,
- struct dc_context *ctx)
-{
- dal_hw_gpio_construct(&pin->base, id, en, ctx);
-}
-
static void dal_hw_generic_destruct(
struct hw_generic *pin)
{
dal_hw_gpio_destruct(&pin->base);
}
-static void destroy(
+static void dal_hw_generic_destroy(
struct hw_gpio_pin **ptr)
{
struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr);
@@ -90,7 +81,7 @@ static enum gpio_result set_config(
}
static const struct hw_gpio_pin_funcs funcs = {
- .destroy = destroy,
+ .destroy = dal_hw_generic_destroy,
.open = dal_hw_gpio_open,
.get_value = dal_hw_gpio_get_value,
.set_value = dal_hw_gpio_set_value,
@@ -99,14 +90,14 @@ static const struct hw_gpio_pin_funcs funcs = {
.close = dal_hw_gpio_close,
};
-static void construct(
- struct hw_generic *generic,
+static void dal_hw_generic_construct(
+ struct hw_generic *pin,
enum gpio_id id,
uint32_t en,
struct dc_context *ctx)
{
- dal_hw_generic_construct(generic, id, en, ctx);
- generic->base.base.funcs = &funcs;
+ dal_hw_gpio_construct(&pin->base, id, en, ctx);
+ pin->base.base.funcs = &funcs;
}
void dal_hw_generic_init(
@@ -126,7 +117,7 @@ void dal_hw_generic_init(
return;
}
- construct(*hw_generic, id, en, ctx);
+ dal_hw_generic_construct(*hw_generic, id, en, ctx);
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
index 00c9bcf660a3..692f29de7797 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -46,34 +46,18 @@
struct gpio;
-static void dal_hw_hpd_construct(
- struct hw_hpd *pin,
- enum gpio_id id,
- uint32_t en,
- struct dc_context *ctx)
-{
- dal_hw_gpio_construct(&pin->base, id, en, ctx);
-}
-
static void dal_hw_hpd_destruct(
struct hw_hpd *pin)
{
dal_hw_gpio_destruct(&pin->base);
}
-
-static void destruct(
- struct hw_hpd *hpd)
-{
- dal_hw_hpd_destruct(hpd);
-}
-
-static void destroy(
+static void dal_hw_hpd_destroy(
struct hw_gpio_pin **ptr)
{
struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr);
- destruct(hpd);
+ dal_hw_hpd_destruct(hpd);
kfree(hpd);
@@ -120,7 +104,7 @@ static enum gpio_result set_config(
}
static const struct hw_gpio_pin_funcs funcs = {
- .destroy = destroy,
+ .destroy = dal_hw_hpd_destroy,
.open = dal_hw_gpio_open,
.get_value = get_value,
.set_value = dal_hw_gpio_set_value,
@@ -129,14 +113,14 @@ static const struct hw_gpio_pin_funcs funcs = {
.close = dal_hw_gpio_close,
};
-static void construct(
- struct hw_hpd *hpd,
+static void dal_hw_hpd_construct(
+ struct hw_hpd *pin,
enum gpio_id id,
uint32_t en,
struct dc_context *ctx)
{
- dal_hw_hpd_construct(hpd, id, en, ctx);
- hpd->base.base.funcs = &funcs;
+ dal_hw_gpio_construct(&pin->base, id, en, ctx);
+ pin->base.base.funcs = &funcs;
}
void dal_hw_hpd_init(
@@ -156,7 +140,7 @@ void dal_hw_hpd_init(
return;
}
- construct(*hw_hpd, id, en, ctx);
+ dal_hw_hpd_construct(*hw_hpd, id, en, ctx);
}
struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index f2046f55d6a8..5d396657a1ee 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -43,15 +43,11 @@
#include "dce80/hw_translate_dce80.h"
#include "dce110/hw_translate_dce110.h"
#include "dce120/hw_translate_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_translate_dcn10.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dcn20/hw_translate_dcn20.h"
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#include "dcn21/hw_translate_dcn21.h"
-#endif
#include "diagnostics/hw_translate_diag.h"
@@ -85,19 +81,15 @@ bool dal_hw_translate_init(
case DCE_VERSION_12_1:
dal_hw_translate_dce120_init(translate);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
dal_hw_translate_dcn10_init(translate);
return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case DCN_VERSION_2_0:
dal_hw_translate_dcn20_init(translate);
return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case DCN_VERSION_2_1:
dal_hw_translate_dcn21_init(translate);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index fd39e2abe2ed..4ead89dd7c41 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -43,10 +43,8 @@ enum dc_status {
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
DC_FAIL_DP_LINK_TRAINING = 15,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
DC_FAIL_DSC_VALIDATE = 16,
DC_NO_DSC_RESOURCE = 17,
-#endif
DC_FAIL_UNSUPPORTED_1 = 18,
DC_FAIL_CLK_EXCEED_MAX = 21,
DC_FAIL_CLK_BELOW_MIN = 22, /*THIS IS MIN PER IP*/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index a831079607cd..f285b76888fb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -33,13 +33,11 @@
#include "dc_bios_types.h"
#include "mem_input.h"
#include "hubp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "mpc.h"
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#include "dwb.h"
#include "mcif_wb.h"
-#endif
#define MAX_CLOCK_SOURCES 7
@@ -89,9 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
struct resource_pool;
struct dc_state;
struct resource_context;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
struct clk_bw_params;
-#endif
struct resource_funcs {
void (*destroy)(struct resource_pool **pool);
@@ -105,7 +101,7 @@ struct resource_funcs {
int (*populate_dml_pipes)(
struct dc *dc,
- struct resource_context *res_ctx,
+ struct dc_state *context,
display_e2e_pipe_params_st *pipes);
enum dc_status (*validate_global)(
@@ -135,7 +131,6 @@ struct resource_funcs {
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*populate_dml_writeback_from_context)(
struct dc *dc,
struct resource_context *res_ctx,
@@ -146,12 +141,9 @@ struct resource_funcs {
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
void (*update_bw_bounding_box)(
struct dc *dc,
struct clk_bw_params *bw_params);
-#endif
};
@@ -180,7 +172,6 @@ struct resource_pool {
struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
bool i2c_hw_buffer_in_use;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dwbc *dwbc[MAX_DWB_PIPES];
struct mcif_wb *mcif_wb[MAX_DWB_PIPES];
struct {
@@ -188,11 +179,8 @@ struct resource_pool {
unsigned int gsl_1:1;
unsigned int gsl_2:1;
} gsl_groups;
-#endif
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dscs[MAX_PIPES];
-#endif
unsigned int pipe_count;
unsigned int underlay_pipe_index;
@@ -206,9 +194,7 @@ struct resource_pool {
unsigned int timing_generator_count;
unsigned int mpcc_count;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
unsigned int writeback_pipe_count;
-#endif
/*
* reserved clock source for DP
*/
@@ -226,9 +212,12 @@ struct resource_pool {
struct abm *abm;
struct dmcu *dmcu;
+ struct dmub_psr *psr;
const struct resource_funcs *funcs;
const struct resource_caps *res_cap;
+
+ struct ddc_service *oem_device;
};
struct dcn_fe_bandwidth {
@@ -238,9 +227,7 @@ struct dcn_fe_bandwidth {
struct stream_resource {
struct output_pixel_processor *opp;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct display_stream_compressor *dsc;
-#endif
struct timing_generator *tg;
struct stream_encoder *stream_enc;
struct audio *audio;
@@ -249,12 +236,10 @@ struct stream_resource {
struct encoder_info_frame encoder_info_frame;
struct abm *abm;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* There are only (num_pipes+1)/2 groups. 0 means unassigned,
* otherwise it's using group number 'gsl_group-1'
*/
uint8_t gsl_group;
-#endif
};
struct plane_resource {
@@ -306,17 +291,15 @@ struct pipe_ctx {
struct pipe_ctx *next_odm_pipe;
struct pipe_ctx *prev_odm_pipe;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
struct _vcs_dpi_display_dlg_regs_st dlg_regs;
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
#endif
union pipe_update_flags update_flags;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
-#endif
};
struct resource_context {
@@ -325,9 +308,7 @@ struct resource_context {
bool is_audio_acquired[MAX_PIPES];
uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
uint8_t dp_clock_source_ref_count;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool is_dsc_acquired[MAX_PIPES];
-#endif
};
struct dce_bw_output {
@@ -347,18 +328,14 @@ struct dce_bw_output {
int blackout_recovery_time_us;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dcn_bw_writeback {
struct mcif_arb_params mcif_wb_arb[MAX_DWB_PIPES];
};
-#endif
struct dcn_bw_output {
struct dc_clocks clk;
struct dcn_watermark_set watermarks;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dcn_bw_writeback bw_writeback;
-#endif
};
union bw_output {
@@ -392,7 +369,7 @@ struct dc_state {
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 14716ba35662..de2d160114db 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -105,7 +105,7 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
-enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
uint32_t timeout);
void dal_ddc_service_write_scdc_data(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 045138dbdccb..8b1f0ce6c2a7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -28,8 +28,8 @@
#define LINK_TRAINING_ATTEMPTS 4
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
-#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/
-#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/
+#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
struct dc_link;
struct dc_stream_state;
@@ -57,10 +57,11 @@ void decide_link_settings(
struct dc_link_settings *link_setting);
bool perform_link_training_with_retries(
- struct dc_link *link,
const struct dc_link_settings *link_setting,
bool skip_video_pattern,
- int attempts);
+ int attempts,
+ struct pipe_ctx *pipe_ctx,
+ enum signal_type signal);
bool is_mst_supported(struct dc_link *link);
@@ -75,13 +76,13 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+
void dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
-#endif
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
index 45a07eeffbb6..45a07eeffbb6 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 026e6a2a2c44..ac530c057ddd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -31,7 +31,6 @@
#define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
/* Constants */
#define DDR4_DRAM_WIDTH 64
#define WM_A 0
@@ -39,12 +38,10 @@
#define WM_C 2
#define WM_D 3
#define WM_SET_COUNT 4
-#endif
#define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
/* Will these bw structures be ASIC specific? */
#define MAX_NUM_DPM_LVL 8
@@ -154,7 +151,6 @@ struct clk_bw_params {
struct clk_limit_table clk_table;
struct wm_table wm_table;
};
-#endif
/* Public interfaces */
struct clk_states {
@@ -195,9 +191,8 @@ struct clk_mgr {
bool psr_allow_active_cache;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
int dentist_vco_freq_khz;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+ struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
-#endif
};
/* forward declarations */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index a17a77192690..862952c0286a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -96,12 +96,10 @@ enum dentist_divider_range {
.MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \
.MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define CLK_REG_LIST_NV10() \
SR(DENTIST_DISPCLK_CNTL), \
CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \
CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0)
-#endif
#define CLK_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -120,7 +118,6 @@ enum dentist_divider_range {
CLK_SF(MP1_SMN_C2PMSG_83, CONTENT, mask_sh),\
CLK_SF(MP1_SMN_C2PMSG_91, CONTENT, mask_sh),
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh) \
CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
@@ -130,7 +127,6 @@ enum dentist_divider_range {
CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\
CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\
CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh)
-#endif
#define CLK_REG_FIELD_LIST(type) \
type DPREFCLK_SRC_SEL; \
@@ -143,30 +139,24 @@ enum dentist_divider_range {
****************** Clock Manager Private Structures ***********************************
***************************************************************************************
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define CLK20_REG_FIELD_LIST(type) \
type DENTIST_DPPCLK_WDIVIDER; \
type DENTIST_DPPCLK_CHG_DONE; \
type FbMult_int; \
type FbMult_frac;
-#endif
#define VBIOS_SMU_REG_FIELD_LIST(type) \
type CONTENT;
struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
CLK20_REG_FIELD_LIST(uint8_t)
-#endif
VBIOS_SMU_REG_FIELD_LIST(uint32_t)
};
struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
CLK20_REG_FIELD_LIST(uint32_t)
-#endif
VBIOS_SMU_REG_FIELD_LIST(uint32_t)
};
@@ -174,10 +164,8 @@ struct clk_mgr_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
uint32_t CLK3_CLK2_DFS_CNTL;
uint32_t CLK3_CLK_PLL_REQ;
-#endif
uint32_t MP1_SMN_C2PMSG_67;
uint32_t MP1_SMN_C2PMSG_83;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index c81a17aeaa25..c0dc1d0f5cae 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -52,7 +52,6 @@ struct dcn_hubbub_wm {
struct dcn_hubbub_wm_set sets[4];
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dcn_hubbub_page_table_depth {
DCN_PAGE_TABLE_DEPTH_1_LEVEL,
DCN_PAGE_TABLE_DEPTH_2_LEVEL,
@@ -101,13 +100,11 @@ struct hubbub_addr_config {
} default_addrs;
};
-#endif
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
struct dchub_init_data *dh_data);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
int (*init_dchub_sys_ctx)(
struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
@@ -116,7 +113,6 @@ struct hubbub_funcs {
struct dcn_hubbub_virt_addr_config *va_config,
int vmid);
-#endif
bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index c68f0ce346c7..5315f1f86b21 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -52,6 +52,8 @@ struct dmcu {
enum dmcu_state dmcu_state;
struct dmcu_version dmcu_version;
unsigned int cached_wait_loop_number;
+ uint32_t psp_version;
+ bool auto_load_dmcu;
};
struct dmcu_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 474c7194a9f8..45ef390ae052 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -36,14 +36,10 @@ struct dpp {
struct dpp_caps *caps;
struct pwl_params regamma_params;
struct pwl_params degamma_params;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dpp_cursor_attributes cur_attr;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct pwl_params shaper_params;
bool cm_bypass_mode;
-#endif
};
struct dpp_input_csc_matrix {
@@ -51,12 +47,31 @@ struct dpp_input_csc_matrix {
uint16_t regval[12];
};
+static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
struct dpp_grph_csc_adjustment {
struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
enum graphics_gamut_adjust_type gamut_adjust_type;
};
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct cnv_color_keyer_params {
int color_keyer_en;
int color_keyer_mode;
@@ -82,7 +97,6 @@ struct cnv_alpha_2bit_lut {
int lut2;
int lut3;
};
-#endif
struct dcn_dpp_state {
uint32_t is_enabled;
@@ -190,12 +204,8 @@ struct dpp_funcs {
enum surface_pixel_format format,
enum expansion_mode mode,
struct dc_csc_transform input_csc_color_matrix,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
enum dc_color_space input_color_space,
struct cnv_alpha_2bit_lut *alpha_2bit_lut);
-#else
- enum dc_color_space input_color_space);
-#endif
void (*dpp_full_bypass)(struct dpp *dpp_base);
@@ -224,7 +234,6 @@ struct dpp_funcs {
bool dppclk_div,
bool enable);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool (*dpp_program_blnd_lut)(
struct dpp *dpp,
const struct pwl_params *params);
@@ -237,7 +246,6 @@ struct dpp_funcs {
void (*dpp_cnv_set_alpha_keyer)(
struct dpp *dpp_base,
struct cnv_color_keyer_params *color_keyer);
-#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index c6ff3d78b435..c59740084ebc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#ifndef __DAL_DSC_H__
#define __DAL_DSC_H__
@@ -98,4 +97,3 @@ struct dsc_funcs {
};
#endif
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index ff1a07b35c85..459f95f52486 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -51,20 +51,15 @@ enum dwb_source {
dwb_src_otg3, /* for DCN1.x/DCN2.x */
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* DCN1.x, DCN2.x support 2 pipes */
-#else
-/* DCN1.x supports 2 pipes */
-#endif
enum dwb_pipe {
dwb_pipe0 = 0,
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
dwb_pipe1,
#endif
dwb_pipe_max_num,
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
enum dwb_frame_capture_enable {
DWB_FRAME_CAPTURE_DISABLE = 0,
DWB_FRAME_CAPTURE_ENABLE = 1,
@@ -77,9 +72,7 @@ enum wbscl_coef_filter_type_sel {
WBSCL_COEF_CHROMA_HORZ_FILTER = 3
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dwb_warmup_params {
bool warmup_en; /* false: normal mode, true: enable pattern generator */
bool warmup_mode; /* false: 420, true: 444 */
@@ -88,7 +81,6 @@ struct dwb_warmup_params {
int warmup_width; /* Pattern width (pixels) */
int warmup_height; /* Pattern height (lines) */
};
-#endif
struct dwb_caps {
enum dce_version hw_version; /* DCN engine version. */
@@ -121,7 +113,8 @@ struct dwbc {
int wb_src_plane_inst;/*hubp, mpcc, inst*/
bool update_privacymask;
uint32_t mask_id;
-
+ int otg_inst;
+ bool mvc_cfg;
};
struct dwbc_funcs {
@@ -150,13 +143,11 @@ struct dwbc_funcs {
struct dwbc *dwbc,
bool is_new_content);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*set_warmup)(
struct dwbc *dwbc,
struct dwb_warmup_params *warmup_params);
-#endif
bool (*get_dwb_status)(
struct dwbc *dwbc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 809b62b51a43..2cb8466e657b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -38,9 +38,7 @@ enum cursor_pitch {
};
enum cursor_lines_per_chunk {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
CURSOR_LINE_PER_CHUNK_1 = 0, /* new for DCN2 */
-#endif
CURSOR_LINE_PER_CHUNK_2 = 1,
CURSOR_LINE_PER_CHUNK_4,
CURSOR_LINE_PER_CHUNK_8,
@@ -65,6 +63,26 @@ struct hubp {
bool power_gated;
};
+struct surface_flip_registers {
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
+ bool tmz_surface;
+ bool immediate;
+ uint8_t vmid;
+ bool grph_stereo;
+};
+
struct hubp_funcs {
void (*hubp_setup)(
struct hubp *hubp,
@@ -86,6 +104,9 @@ struct hubp_funcs {
const struct rect *viewport,
const struct rect *viewport_c);
+ void (*apply_PLAT_54186_wa)(struct hubp *hubp,
+ const struct dc_plane_address *address);
+
bool (*hubp_program_surface_flip_and_addr)(
struct hubp *hubp,
const struct dc_plane_address *address,
@@ -139,7 +160,6 @@ struct hubp_funcs {
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
void (*hubp_init)(struct hubp *hubp);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*dmdata_set_attributes)(
struct hubp *hubp,
const struct dc_dmdata_attributes *attr);
@@ -159,7 +179,13 @@ struct hubp_funcs {
void (*hubp_set_flip_control_surface_gsl)(
struct hubp *hubp,
bool enable);
-#endif
+
+ void (*validate_dml_output)(
+ struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index f82365e2d03c..75d419081e76 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -36,9 +36,7 @@
#define MAX_AUDIOS 7
#define MAX_PIPES 6
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define MAX_DWB_PIPES 1
-#endif
struct gamma_curve {
uint32_t offset;
@@ -81,7 +79,6 @@ struct pwl_result_data {
uint32_t delta_blue_reg;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_rgb {
uint32_t red;
uint32_t green;
@@ -110,7 +107,6 @@ struct tetrahedral_params {
bool use_12bits;
};
-#endif
/* arr_curve_points - regamma regions/segments specification
* arr_points - beginning and end point specified separately (only one on DCE)
@@ -195,13 +191,11 @@ enum opp_regamma {
OPP_REGAMMA_USER
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
enum optc_dsc_mode {
OPTC_DSC_DISABLED = 0,
OPTC_DSC_ENABLED_444 = 1, /* 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4) */
OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED = 2 /* Native 4:2:2 or 4:2:0 */
};
-#endif
struct dc_bias_and_scale {
uint16_t scale_red;
@@ -224,12 +218,8 @@ enum test_pattern_mode {
TEST_PATTERN_MODE_VERTICALBARS,
TEST_PATTERN_MODE_HORIZONTALBARS,
TEST_PATTERN_MODE_SINGLERAMP_RGB,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
TEST_PATTERN_MODE_DUALRAMP_RGB,
TEST_PATTERN_MODE_XR_BIAS_RGB
-#else
- TEST_PATTERN_MODE_DUALRAMP_RGB
-#endif
};
enum test_pattern_color_format {
@@ -255,6 +245,13 @@ enum controller_dp_test_pattern {
CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR
};
+enum controller_dp_color_space {
+ CONTROLLER_DP_COLOR_SPACE_RGB,
+ CONTROLLER_DP_COLOR_SPACE_YCBCR601,
+ CONTROLLER_DP_COLOR_SPACE_YCBCR709,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED
+};
+
enum dc_lut_mode {
LUT_BYPASS,
LUT_RAM_A,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index b21909216fb6..fb748f082c56 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -113,26 +113,21 @@ struct link_encoder {
struct encoder_feature_support features;
enum transmitter transmitter;
enum hpd_source_id hpd_source;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool usbc_combo_phy;
-#endif
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct link_enc_state {
uint32_t dphy_fec_en;
uint32_t dphy_fec_ready_shadow;
uint32_t dphy_fec_active_status;
+ uint32_t dp_link_training_complete;
};
-#endif
struct link_encoder_funcs {
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*read_state)(
struct link_encoder *enc, struct link_enc_state *s);
-#endif
bool (*validate_output_with_stream)(
struct link_encoder *enc, const struct dc_stream_state *stream);
void (*hw_init)(struct link_encoder *enc);
@@ -174,7 +169,6 @@ struct link_encoder_funcs {
unsigned int (*get_dig_frontend)(struct link_encoder *enc);
void (*destroy)(struct link_encoder **enc);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*fec_set_enable)(struct link_encoder *enc,
bool enable);
@@ -182,7 +176,6 @@ struct link_encoder_funcs {
bool ready);
bool (*fec_is_active)(struct link_encoder *enc);
-#endif
bool (*is_in_alt_mode) (struct link_encoder *enc);
void (*get_max_link_cap)(struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 67b610d6d91f..2e2310f1901a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -40,11 +40,9 @@ struct cstate_pstate_watermarks_st {
struct dcn_watermarks {
uint32_t pte_meta_urgent_ns;
uint32_t urgent_ns;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
uint32_t frac_urg_bw_nom;
uint32_t frac_urg_bw_flip;
int32_t urgent_latency_ns;
-#endif
struct cstate_pstate_watermarks_st cstate_pstate;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 58826be81395..094afc4c8173 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -31,9 +31,7 @@
#define MAX_MPCC 6
#define MAX_OPP 6
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define MAX_DWB 1
-#endif
enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_DISABLE = 0,
@@ -66,14 +64,12 @@ struct mpcc_blnd_cfg {
int global_alpha;
bool overlap_only;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
/* MPCC top/bottom gain settings */
int bottom_gain_mode;
int background_color_bpc;
int top_gain;
int bottom_inside_gain;
int bottom_outside_gain;
-#endif
};
struct mpcc_sm_cfg {
@@ -90,7 +86,6 @@ struct mpcc_sm_cfg {
int force_next_field_polarity;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct mpc_denorm_clamp {
int clamp_max_r_cr;
int clamp_min_r_cr;
@@ -99,7 +94,6 @@ struct mpc_denorm_clamp {
int clamp_max_b_cb;
int clamp_min_b_cb;
};
-#endif
/*
* MPCC connection and blending configuration for a single MPCC instance.
@@ -126,10 +120,8 @@ struct mpc {
struct dc_context *ctx;
struct mpcc mpcc_array[MAX_MPCC];
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct pwl_params blender_params;
bool cm_bypass_mode;
-#endif
};
struct mpcc_state {
@@ -230,7 +222,6 @@ struct mpc_funcs {
struct mpc *mpc,
struct mpc_tree *tree);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*set_denorm)(struct mpc *mpc,
int opp_id,
enum dc_color_depth output_depth);
@@ -258,7 +249,6 @@ struct mpc_funcs {
struct mpc *mpc,
int mpcc_id,
bool power_on);
-#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 18def2b6fafe..7575564b2265 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -263,9 +263,7 @@ struct oppbuf_params {
enum oppbuf_display_segmentation mso_segmentation;
uint32_t mso_overlap_pixel_num;
uint32_t pixel_repetition;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
uint32_t num_segment_padded_pixels;
-#endif
};
struct opp_funcs {
@@ -305,10 +303,10 @@ struct opp_funcs {
struct output_pixel_processor *opp,
bool enable);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*opp_set_disp_pattern_generator)(
struct output_pixel_processor *opp,
enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
@@ -324,7 +322,6 @@ struct opp_funcs {
void (*opp_program_left_edge_extra_pixel)(
struct output_pixel_processor *opp,
bool count);
-#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 6305e388612a..351b387ad606 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -65,13 +65,11 @@ struct audio_clock_info {
uint32_t cts_48khz;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
enum dynamic_metadata_mode {
dmdata_dp,
dmdata_hdmi,
dmdata_dolby_vision
};
-#endif
struct encoder_info_frame {
/* auxiliary video information */
@@ -90,9 +88,7 @@ struct encoder_info_frame {
struct encoder_unblank_param {
struct dc_link_settings link_settings;
struct dc_crtc_timing timing;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
int opp_cnt;
-#endif
};
struct encoder_set_dp_phy_pattern_param {
@@ -109,7 +105,6 @@ struct stream_encoder {
enum engine_id id;
};
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct enc_state {
uint32_t dsc_mode; // DISABLED 0; 1 or 2 indicate enabled state.
uint32_t dsc_slice_width;
@@ -119,13 +114,13 @@ struct enc_state {
uint32_t sec_gsp_pps_enable;
uint32_t sec_stream_enable;
};
-#endif
struct stream_encoder_funcs {
void (*dp_set_stream_attribute)(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
void (*hdmi_set_stream_attribute)(
@@ -219,8 +214,6 @@ struct stream_encoder_funcs {
enum dc_pixel_encoding *encoding,
enum dc_color_depth *depth);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);
void (*dp_set_dsc_config)(
@@ -232,7 +225,6 @@ struct stream_encoder_funcs {
void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc,
bool enable,
uint8_t *dsc_packed_pps);
-#endif
void (*set_dynamic_metadata)(struct stream_encoder *enc,
bool enable,
@@ -242,7 +234,6 @@ struct stream_encoder_funcs {
void (*dp_set_odm_combine)(
struct stream_encoder *enc,
bool odm_combine);
-#endif
};
#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 27c73caf74ee..e5e7d94026fc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -195,10 +195,8 @@ struct timing_generator_funcs {
void (*lock)(struct timing_generator *tg);
void (*lock_doublebuffer_disable)(struct timing_generator *tg);
void (*lock_doublebuffer_enable)(struct timing_generator *tg);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void(*triplebuffer_unlock)(struct timing_generator *tg);
void(*triplebuffer_lock)(struct timing_generator *tg);
-#endif
void (*enable_reset_trigger)(struct timing_generator *tg,
int source_tg_inst);
void (*enable_crtc_reset)(struct timing_generator *tg,
@@ -210,7 +208,8 @@ struct timing_generator_funcs {
bool enable, const struct dc_crtc_timing *timing);
void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);
void (*set_static_screen_control)(struct timing_generator *tg,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void (*set_test_pattern)(
struct timing_generator *tg,
enum controller_dp_test_pattern test_pattern,
@@ -235,7 +234,6 @@ struct timing_generator_funcs {
bool (*is_optc_underflow_occurred)(struct timing_generator *tg);
void (*clear_optc_underflow)(struct timing_generator *tg);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
void (*set_dwb_source)(struct timing_generator *optc,
uint32_t dwb_pipe_inst);
@@ -243,7 +241,6 @@ struct timing_generator_funcs {
uint32_t *num_of_input_segments,
uint32_t *seg0_src_sel,
uint32_t *seg1_src_sel);
-#endif
/**
* Configure CRCs for the given timing generator. Return false if TG is
@@ -267,13 +264,10 @@ struct timing_generator_funcs {
void (*set_vtg_params)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*set_dsc_config)(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
-#endif
void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing);
void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing);
@@ -281,7 +275,6 @@ struct timing_generator_funcs {
void (*set_gsl_source_select)(struct timing_generator *optc,
int group_idx,
uint32_t gsl_ready_signal);
-#endif
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index d39c1e11def5..209118f9f193 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,326 +32,160 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-enum pipe_gating_control {
- PIPE_GATING_CONTROL_DISABLE = 0,
- PIPE_GATING_CONTROL_ENABLE,
- PIPE_GATING_CONTROL_INIT
-};
-
enum vline_select {
VLINE0,
VLINE1
};
-struct dce_hwseq_wa {
- bool blnd_crtc_trigger;
- bool DEGVIDCN10_253;
- bool false_optc_underflow;
- bool DEGVIDCN10_254;
- bool DEGVIDCN21;
-};
-
-struct hwseq_wa_state {
- bool DEGVIDCN10_253_applied;
-};
-
-struct dce_hwseq {
- struct dc_context *ctx;
- const struct dce_hwseq_registers *regs;
- const struct dce_hwseq_shift *shifts;
- const struct dce_hwseq_mask *masks;
- struct dce_hwseq_wa wa;
- struct hwseq_wa_state wa_state;
-};
-
struct pipe_ctx;
struct dc_state;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_stream_status;
struct dc_writeback_info;
-#endif
struct dchub_init_data;
-struct dc_static_screen_events;
+struct dc_static_screen_params;
struct resource_pool;
-struct resource_context;
-struct stream_resource;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config;
struct dc_virtual_addr_space_config;
-#endif
-struct hubp;
struct dpp;
+struct dce_hwseq;
struct hw_sequencer_funcs {
+ /* Embedded Display Related */
+ void (*edp_power_control)(struct dc_link *link, bool enable);
+ void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
- void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
- void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
+ /* Pipe Programming Related */
void (*init_hw)(struct dc *dc);
-
- void (*init_pipes)(struct dc *dc, struct dc_state *context);
-
- enum dc_status (*apply_ctx_to_hw)(
- struct dc *dc, struct dc_state *context);
-
- void (*reset_hw_ctx_wrap)(
- struct dc *dc, struct dc_state *context);
-
- void (*apply_ctx_for_surface)(
- struct dc *dc,
+ void (*enable_accelerated_mode)(struct dc *dc,
+ struct dc_state *context);
+ enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
+ struct dc_state *context);
+ void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
- int num_planes,
+ int num_planes, struct dc_state *context);
+ void (*program_front_end_for_ctx)(struct dc *dc,
struct dc_state *context);
-
- void (*program_gamut_remap)(
+ void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
-
- void (*program_output_csc)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix,
- int opp_id);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- void (*program_front_end_for_ctx)(
- struct dc *dc,
- struct dc_state *context);
- void (*program_triplebuffer)(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- bool enableTripleBuffer);
- void (*set_flip_control_gsl)(
- struct pipe_ctx *pipe_ctx,
- bool flip_immediate);
-#endif
-
- void (*update_plane_addr)(
- const struct dc *dc,
- struct pipe_ctx *pipe_ctx);
-
- void (*plane_atomic_disconnect)(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx);
-
- void (*update_dchub)(
- struct dce_hwseq *hws,
- struct dchub_init_data *dh_data);
-
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
- int (*init_sys_ctx)(
- struct dce_hwseq *hws,
- struct dc *dc,
- struct dc_phy_addr_space_config *pa_config);
- void (*init_vm_ctx)(
- struct dce_hwseq *hws,
- struct dc *dc,
- struct dc_virtual_addr_space_config *va_config,
- int vmid);
-#endif
- void (*update_mpcc)(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx);
-
- void (*update_pending_status)(
+ void (*update_dchub)(struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data);
+ void (*wait_for_mpcc_disconnect)(struct dc *dc,
+ struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
-
- bool (*set_input_transfer_func)(
- struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state);
-
- bool (*set_output_transfer_func)(
- struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream);
-
- void (*power_down)(struct dc *dc);
-
- void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context);
-
- void (*enable_timing_synchronization)(
- struct dc *dc,
- int group_index,
- int group_size,
- struct pipe_ctx *grouped_pipes[]);
-
- void (*enable_per_frame_crtc_position_reset)(
- struct dc *dc,
- int group_size,
+ void (*program_triplebuffer)(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
+ void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
+
+ /* Pipe Lock Related */
+ void (*pipe_control_lock_global)(struct dc *dc,
+ struct pipe_ctx *pipe, bool lock);
+ void (*pipe_control_lock)(struct dc *dc,
+ struct pipe_ctx *pipe, bool lock);
+ void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
+ bool flip_immediate);
+
+ /* Timing Related */
+ void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ struct crtc_position *position);
+ int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
+ void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
+ int group_size, struct pipe_ctx *grouped_pipes[]);
+ void (*enable_timing_synchronization)(struct dc *dc,
+ int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
+ void (*setup_periodic_interrupt)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum vline_select vline);
+ void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ unsigned int vmin, unsigned int vmax,
+ unsigned int vmid, unsigned int vmid_frame_number);
+ void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ const struct dc_static_screen_params *events);
- void (*enable_display_pipe_clock_gating)(
- struct dc_context *ctx,
- bool clock_gating);
-
- bool (*enable_display_power_gating)(
- struct dc *dc,
- uint8_t controller_id,
- struct dc_bios *dcb,
- enum pipe_gating_control power_gating);
-
- void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
- void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
-
- void (*send_immediate_sdp_message)(
- struct pipe_ctx *pipe_ctx,
- const uint8_t *custom_sdp_message,
- unsigned int sdp_message_size);
-
+ /* Stream Related */
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
-
void (*disable_stream)(struct pipe_ctx *pipe_ctx);
-
+ void (*blank_stream)(struct pipe_ctx *pipe_ctx);
void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
- void (*blank_stream)(struct pipe_ctx *pipe_ctx);
-
- void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+ /* Bandwidth Related */
+ void (*prepare_bandwidth)(struct dc *dc, struct dc_state *context);
+ bool (*update_bandwidth)(struct dc *dc, struct dc_state *context);
+ void (*optimize_bandwidth)(struct dc *dc, struct dc_state *context);
- void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx);
-
- void (*pipe_control_lock)(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
-
- void (*pipe_control_lock_global)(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
- void (*blank_pixel_data)(
- struct dc *dc,
+ /* Infopacket Related */
+ void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+ void (*send_immediate_sdp_message)(
struct pipe_ctx *pipe_ctx,
- bool blank);
-
- void (*prepare_bandwidth)(
- struct dc *dc,
- struct dc_state *context);
- void (*optimize_bandwidth)(
- struct dc *dc,
- struct dc_state *context);
-
- void (*exit_optimized_pwr_state)(
- const struct dc *dc,
- struct dc_state *context);
- void (*optimize_pwr_state)(
- const struct dc *dc,
- struct dc_state *context);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- bool (*update_bandwidth)(
- struct dc *dc,
- struct dc_state *context);
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+ void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
+ void (*set_dmdata_attributes)(struct pipe_ctx *pipe);
void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx);
bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx);
-#endif
-
- void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
- unsigned int vmin, unsigned int vmax,
- unsigned int vmid, unsigned int vmid_frame_number);
-
- void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
- struct crtc_position *position);
-
- void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events);
-
- enum dc_status (*enable_stream_timing)(
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context,
- struct dc *dc);
-
- void (*setup_stereo)(
- struct pipe_ctx *pipe_ctx,
- struct dc *dc);
-
- void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
-
- void (*log_hw_state)(struct dc *dc,
- struct dc_log_buffer_ctx *log_ctx);
- void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
- void (*clear_status_bits)(struct dc *dc, unsigned int mask);
-
- void (*wait_for_mpcc_disconnect)(struct dc *dc,
- struct resource_pool *res_pool,
- struct pipe_ctx *pipe_ctx);
-
- void (*edp_power_control)(
- struct dc_link *link,
- bool enable);
- void (*edp_backlight_control)(
- struct dc_link *link,
- bool enable);
- void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
+ /* Cursor Related */
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
- void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline);
- void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx);
- bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
-
- void (*init_blank)(struct dc *dc, struct timing_generator *tg);
- void (*disable_vga)(struct dce_hwseq *hws);
- void (*bios_golden_init)(struct dc *dc);
- void (*plane_atomic_power_down)(struct dc *dc,
- struct dpp *dpp,
- struct hubp *hubp);
-
- void (*plane_atomic_disable)(
- struct dc *dc, struct pipe_ctx *pipe_ctx);
-
- void (*enable_power_gating_plane)(
- struct dce_hwseq *hws,
- bool enable);
-
- void (*dpp_pg_control)(
- struct dce_hwseq *hws,
- unsigned int dpp_inst,
- bool power_on);
-
- void (*hubp_pg_control)(
- struct dce_hwseq *hws,
- unsigned int hubp_inst,
- bool power_on);
-
- void (*dsc_pg_control)(
- struct dce_hwseq *hws,
- unsigned int dsc_inst,
- bool power_on);
-
+ /* Colour Related */
+ void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx);
+ void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix, int opp_id);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
- void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
- void (*program_all_writeback_pipes_in_tree)(
+ /* VM Related */
+ int (*init_sys_ctx)(struct dce_hwseq *hws,
struct dc *dc,
- const struct dc_stream_state *stream,
- struct dc_state *context);
+ struct dc_phy_addr_space_config *pa_config);
+ void (*init_vm_ctx)(struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_virtual_addr_space_config *va_config,
+ int vmid);
+
+ /* Writeback Related */
void (*update_writeback)(struct dc *dc,
- const struct dc_stream_status *stream_status,
struct dc_writeback_info *wb_info,
struct dc_state *context);
void (*enable_writeback)(struct dc *dc,
- const struct dc_stream_status *stream_status,
struct dc_writeback_info *wb_info,
struct dc_state *context);
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
-#endif
- enum dc_status (*set_clock)(struct dc *dc,
- enum dc_clock_type clock_type,
- uint32_t clk_khz,
- uint32_t stepping);
- void (*get_clock)(struct dc *dc,
+ bool (*mmhubbub_warmup)(struct dc *dc,
+ unsigned int num_dwb,
+ struct dc_writeback_info *wb_info);
+
+ /* Clock Related */
+ enum dc_status (*set_clock)(struct dc *dc,
enum dc_clock_type clock_type,
+ uint32_t clk_khz, uint32_t stepping);
+ void (*get_clock)(struct dc *dc, enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+ void (*optimize_pwr_state)(const struct dc *dc,
+ struct dc_state *context);
+ void (*exit_optimized_pwr_state)(const struct dc *dc,
+ struct dc_state *context);
+
+ /* Audio Related */
+ void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+ void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx);
+
+ /* Stereo 3D Related */
+ void (*setup_stereo)(struct pipe_ctx *pipe_ctx, struct dc *dc);
+
+ /* HW State Logging Related */
+ void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx);
+ void (*get_hw_state)(struct dc *dc, char *pBuf,
+ unsigned int bufSize, unsigned int mask);
+ void (*clear_status_bits)(struct dc *dc, unsigned int mask);
+
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
- bool (*s0i3_golden_init_wa)(struct dc *dc);
-#endif
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
new file mode 100644
index 000000000000..ecf566378ccd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HW_SEQUENCER_PRIVATE_H__
+#define __DC_HW_SEQUENCER_PRIVATE_H__
+
+#include "dc_types.h"
+
+enum pipe_gating_control {
+ PIPE_GATING_CONTROL_DISABLE = 0,
+ PIPE_GATING_CONTROL_ENABLE,
+ PIPE_GATING_CONTROL_INIT
+};
+
+struct dce_hwseq_wa {
+ bool blnd_crtc_trigger;
+ bool DEGVIDCN10_253;
+ bool false_optc_underflow;
+ bool DEGVIDCN10_254;
+ bool DEGVIDCN21;
+};
+
+struct hwseq_wa_state {
+ bool DEGVIDCN10_253_applied;
+};
+
+struct pipe_ctx;
+struct dc_state;
+struct dc_stream_status;
+struct dc_writeback_info;
+struct dchub_init_data;
+struct dc_static_screen_params;
+struct resource_pool;
+struct resource_context;
+struct stream_resource;
+struct dc_phy_addr_space_config;
+struct dc_virtual_addr_space_config;
+struct hubp;
+struct dpp;
+struct dce_hwseq;
+struct timing_generator;
+struct tg_color;
+struct output_pixel_processor;
+
+struct hwseq_private_funcs {
+
+ void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*init_pipes)(struct dc *dc, struct dc_state *context);
+ void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context);
+ void (*update_plane_addr)(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+ void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ bool (*set_input_transfer_func)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+ bool (*set_output_transfer_func)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+ void (*power_down)(struct dc *dc);
+ void (*enable_display_pipe_clock_gating)(struct dc_context *ctx,
+ bool clock_gating);
+ bool (*enable_display_power_gating)(struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating);
+ void (*blank_pixel_data)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+ enum dc_status (*enable_stream_timing)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+ void (*edp_backlight_control)(struct dc_link *link,
+ bool enable);
+ void (*setup_vupdate_interrupt)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+ bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*init_blank)(struct dc *dc, struct timing_generator *tg);
+ void (*disable_vga)(struct dce_hwseq *hws);
+ void (*bios_golden_init)(struct dc *dc);
+ void (*plane_atomic_power_down)(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp);
+ void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*enable_power_gating_plane)(struct dce_hwseq *hws,
+ bool enable);
+ void (*dpp_pg_control)(struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+ void (*hubp_pg_control)(struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+ void (*dsc_pg_control)(struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on);
+ void (*update_odm)(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+ void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context);
+ bool (*s0i3_golden_init_wa)(struct dc *dc);
+ void (*get_surface_visual_confirm_color)(
+ const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+ void (*get_hdr_visual_confirm_color)(struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+ void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx);
+ void (*verify_allow_pstate_change_high)(struct dc *dc);
+ void (*program_pipe)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ bool (*wait_for_blank_complete)(struct output_pixel_processor *opp);
+ void (*dccg_init)(struct dce_hwseq *hws);
+ bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+ bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+};
+
+struct dce_hwseq {
+ struct dc_context *ctx;
+ const struct dce_hwseq_registers *regs;
+ const struct dce_hwseq_shift *shifts;
+ const struct dce_hwseq_mask *masks;
+ struct dce_hwseq_wa wa;
+ struct hwseq_wa_state wa_state;
+ struct hwseq_private_funcs funcs;
+
+};
+
+#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 4eff5d38a2f9..9af7ee5bc8ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -60,11 +60,13 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
bool dp_set_hw_training_pattern(
struct dc_link *link,
- enum dc_dp_training_pattern pattern);
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset);
void dp_set_hw_lane_settings(
struct dc_link *link,
- const struct link_training_settings *link_settings);
+ const struct link_training_settings *link_settings,
+ uint32_t offset);
void dp_set_hw_test_pattern(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 8503d9cc4763..2470405e996b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -458,7 +458,14 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
#define IX_REG_READ(index_reg_name, data_reg_name, index) \
generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
+#define IX_REG_GET_N(index_reg_name, data_reg_name, index, n, ...) \
+ generic_indirect_reg_get(CTX, REG(index_reg_name), REG(data_reg_name), \
+ IND_REG(index), \
+ n, __VA_ARGS__)
+#define IX_REG_GET(index_reg_name, data_reg_name, index, field, val) \
+ IX_REG_GET_N(index_reg_name, data_reg_name, index, 1, \
+ FN(data_reg_name, field), val)
#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \
generic_indirect_reg_update_ex(CTX, \
@@ -479,10 +486,35 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
uint32_t index);
+uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ ...);
+
uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
uint32_t index, uint32_t reg_val, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
...);
+/* register offload macros
+ *
+ * instead of MMIO to register directly, in some cases we want
+ * to gather register sequence and execute the register sequence
+ * from another thread so we optimize time required for lengthy ops
+ */
+
+/* start gathering register sequence */
+#define REG_SEQ_START() \
+ reg_sequence_start_gather(CTX)
+
+/* start execution of register sequence gathered since REG_SEQ_START */
+#define REG_SEQ_SUBMIT() \
+ reg_sequence_start_execute(CTX)
+
+/* wait for the last REG_SEQ_SUBMIT to finish */
+#define REG_SEQ_WAIT_DONE() \
+ reg_sequence_wait_done(CTX)
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index bef224bf803e..5ae8ada154ef 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -46,12 +46,8 @@ struct resource_caps {
int num_pll;
int num_dwb;
int num_ddc;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
int num_vmid;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
int num_dsc;
-#endif
-#endif
};
struct resource_straps {
@@ -181,4 +177,6 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *aud_chk);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index ea75420fc876..0f682ac53bb2 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -60,27 +60,23 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_DRM_AMD_DC_DCN
IRQ_DCN1 = irq_service_dcn10.o
AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1)
-endif
###############################################################################
# DCN 20
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_0
IRQ_DCN2 = irq_service_dcn20.o
AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2)
-endif
###############################################################################
# DCN 21
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
IRQ_DCN21 = irq_service_dcn21.o
AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 1a581c464345..378cc11aa047 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -204,7 +204,7 @@ bool dce110_vblank_set(struct irq_service *irq_service,
bool enable)
{
struct dc_context *dc_ctx = irq_service->ctx;
- struct dc *core_dc = irq_service->ctx->dc;
+ struct dc *dc = irq_service->ctx->dc;
enum dc_irq_source dal_irq_src =
dc_interrupt_to_irq_source(irq_service->ctx->dc,
info->src_id,
@@ -212,7 +212,7 @@ bool dce110_vblank_set(struct irq_service *irq_service,
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
struct timing_generator *tg =
- core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) {
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
@@ -403,7 +403,7 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
-static void construct(struct irq_service *irq_service,
+static void dce110_irq_construct(struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
@@ -421,6 +421,6 @@ dal_irq_service_dce110_create(struct irq_service_init_data *init_data)
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dce110_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 15380336cb51..2fe4703395f3 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -273,7 +273,7 @@ static const struct irq_service_funcs irq_service_funcs_dce120 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
-static void construct(
+static void dce120_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -292,6 +292,6 @@ struct irq_service *dal_irq_service_dce120_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dce120_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 281fee8ad1e5..17e426b80a00 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -283,7 +283,7 @@ static const struct irq_service_funcs irq_service_funcs_dce80 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
-static void construct(
+static void dce80_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -302,7 +302,7 @@ struct irq_service *dal_irq_service_dce80_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dce80_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index cc8e7dedccce..f956b3bde680 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -355,7 +355,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn10 = {
.to_dal_irq_source = to_dal_irq_source_dcn10
};
-static void construct(
+static void dcn10_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -374,6 +374,6 @@ struct irq_service *dal_irq_service_dcn10_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dcn10_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 5db29bf582d3..2a1fea501f8c 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -359,7 +359,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn20 = {
.to_dal_irq_source = to_dal_irq_source_dcn20
};
-static void construct(
+static void dcn20_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -378,6 +378,6 @@ struct irq_service *dal_irq_service_dcn20_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dcn20_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index cbe7818529bb..1b971265418b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -350,7 +350,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn21 = {
.to_dal_irq_source = to_dal_irq_source_dcn21
};
-static void construct(
+static void dcn21_irq_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data)
{
@@ -369,6 +369,6 @@ struct irq_service *dal_irq_service_dcn21_create(
if (!irq_service)
return NULL;
- construct(irq_service, init_data);
+ dcn21_irq_construct(irq_service, init_data);
return irq_service;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 0878550a8178..33053b9fe6bd 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -38,7 +38,7 @@
#include "dce120/irq_service_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/irq_service_dcn10.h"
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 30ec80ac6fc8..c34eba19860a 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -1,5 +1,6 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,6 +30,7 @@
#include <linux/kgdb.h>
#include <linux/kref.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <asm/byteorder.h>
@@ -48,8 +50,39 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_X86)
#include <asm/fpu/api.h>
+#define DC_FP_START() kernel_fpu_begin()
+#define DC_FP_END() kernel_fpu_end()
+#elif defined(CONFIG_PPC64)
+#include <asm/switch_to.h>
+#include <asm/cputable.h>
+#define DC_FP_START() { \
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
+ preempt_disable(); \
+ enable_kernel_vsx(); \
+ } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
+ preempt_disable(); \
+ enable_kernel_altivec(); \
+ } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
+ preempt_disable(); \
+ enable_kernel_fp(); \
+ } \
+}
+#define DC_FP_END() { \
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
+ disable_kernel_vsx(); \
+ preempt_enable(); \
+ } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
+ disable_kernel_altivec(); \
+ preempt_enable(); \
+ } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
+ disable_kernel_fp(); \
+ preempt_enable(); \
+ } \
+}
+#endif
#endif
/*
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index ff664bdb1482..b8040da94b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -32,6 +32,7 @@ static void virtual_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting) {}
static void virtual_stream_encoder_hdmi_set_stream_attribute(
@@ -81,22 +82,14 @@ static void virtual_stream_encoder_reset_hdmi_stream_attribute(
struct stream_encoder *enc)
{}
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
static void virtual_enc_dp_set_odm_combine(
struct stream_encoder *enc,
bool odm_combine)
{}
-#endif
-#endif
static const struct stream_encoder_funcs virtual_str_enc_funcs = {
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.dp_set_odm_combine =
virtual_enc_dp_set_odm_combine,
-#endif
-#endif
.dp_set_stream_attribute =
virtual_stream_encoder_dp_set_stream_attribute,
.hdmi_set_stream_attribute =
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
new file mode 100644
index 000000000000..cd9532b4f14d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_H_
+#define _DMUB_CMD_H_
+
+#include "dmub_types.h"
+#include "dmub_cmd_dal.h"
+#include "dmub_cmd_vbios.h"
+#include "atomfirmware.h"
+
+#define DMUB_RB_CMD_SIZE 64
+#define DMUB_RB_MAX_ENTRY 128
+#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
+#define REG_SET_MASK 0xFFFF
+
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_type {
+ DMUB_CMD__NULL = 0,
+ DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE = 1,
+ DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2,
+ DMUB_CMD__REG_SEQ_BURST_WRITE = 3,
+ DMUB_CMD__REG_REG_WAIT = 4,
+ DMUB_CMD__PLAT_54186_WA = 5,
+ DMUB_CMD__PSR = 64,
+ DMUB_CMD__VBIOS = 128,
+};
+
+#pragma pack(push, 1)
+
+struct dmub_cmd_header {
+ unsigned int type : 8;
+ unsigned int sub_type : 8;
+ unsigned int reserved0 : 8;
+ unsigned int payload_bytes : 6; /* up to 60 bytes */
+ unsigned int reserved1 : 2;
+};
+
+/*
+ * Read modify write
+ *
+ * 60 payload bytes can hold up to 5 sets of read modify writes,
+ * each take 3 dwords.
+ *
+ * number of sequences = header.payload_bytes / sizeof(struct dmub_cmd_read_modify_write_sequence)
+ *
+ * modify_mask = 0xffff'ffff means all fields are going to be updated. in this case
+ * command parser will skip the read and we can use modify_mask = 0xffff'ffff as reg write
+ */
+struct dmub_cmd_read_modify_write_sequence {
+ uint32_t addr;
+ uint32_t modify_mask;
+ uint32_t modify_value;
+};
+
+#define DMUB_READ_MODIFY_WRITE_SEQ__MAX 5
+struct dmub_rb_cmd_read_modify_write {
+ struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE
+ struct dmub_cmd_read_modify_write_sequence seq[DMUB_READ_MODIFY_WRITE_SEQ__MAX];
+};
+
+/*
+ * Update a register with specified masks and values sequeunce
+ *
+ * 60 payload bytes can hold address + up to 7 sets of mask/value combo, each take 2 dword
+ *
+ * number of field update sequence = (header.payload_bytes - sizeof(addr)) / sizeof(struct read_modify_write_sequence)
+ *
+ *
+ * USE CASE:
+ * 1. auto-increment register where additional read would update pointer and produce wrong result
+ * 2. toggle a bit without read in the middle
+ */
+
+struct dmub_cmd_reg_field_update_sequence {
+ uint32_t modify_mask; // 0xffff'ffff to skip initial read
+ uint32_t modify_value;
+};
+
+#define DMUB_REG_FIELD_UPDATE_SEQ__MAX 7
+
+struct dmub_rb_cmd_reg_field_update_sequence {
+ struct dmub_cmd_header header;
+ uint32_t addr;
+ struct dmub_cmd_reg_field_update_sequence seq[DMUB_REG_FIELD_UPDATE_SEQ__MAX];
+};
+
+
+/*
+ * Burst write
+ *
+ * support use case such as writing out LUTs.
+ *
+ * 60 payload bytes can hold up to 14 values to write to given address
+ *
+ * number of payload = header.payload_bytes / sizeof(struct read_modify_write_sequence)
+ */
+#define DMUB_BURST_WRITE_VALUES__MAX 14
+struct dmub_rb_cmd_burst_write {
+ struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_BURST_WRITE
+ uint32_t addr;
+ uint32_t write_values[DMUB_BURST_WRITE_VALUES__MAX];
+};
+
+
+struct dmub_rb_cmd_common {
+ struct dmub_cmd_header header;
+ uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)];
+};
+
+struct dmub_cmd_reg_wait_data {
+ uint32_t addr;
+ uint32_t mask;
+ uint32_t condition_field_value;
+ uint32_t time_out_us;
+};
+
+struct dmub_rb_cmd_reg_wait {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_reg_wait_data reg_wait;
+};
+
+#ifndef PHYSICAL_ADDRESS_LOC
+#define PHYSICAL_ADDRESS_LOC union large_integer
+#endif
+
+struct dmub_cmd_PLAT_54186_wa {
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ struct {
+ uint8_t hubp_inst : 4;
+ uint8_t tmz_surface : 1;
+ uint8_t immediate :1;
+ uint8_t vmid : 4;
+ uint8_t grph_stereo : 1;
+ uint32_t reserved : 21;
+ } flip_params;
+ uint32_t reserved[9];
+};
+
+struct dmub_rb_cmd_PLAT_54186_wa {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_PLAT_54186_wa flip;
+};
+
+struct dmub_cmd_digx_encoder_control_data {
+ union dig_encoder_control_parameters_v1_5 dig;
+};
+
+struct dmub_rb_cmd_digx_encoder_control {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_digx_encoder_control_data encoder_control;
+};
+
+struct dmub_cmd_set_pixel_clock_data {
+ struct set_pixel_clock_parameter_v1_7 clk;
+};
+
+struct dmub_rb_cmd_set_pixel_clock {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_set_pixel_clock_data pixel_clock;
+};
+
+struct dmub_cmd_enable_disp_power_gating_data {
+ struct enable_disp_power_gating_parameters_v2_1 pwr;
+};
+
+struct dmub_rb_cmd_enable_disp_power_gating {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_enable_disp_power_gating_data power_gating;
+};
+
+struct dmub_cmd_dig1_transmitter_control_data {
+ struct dig_transmitter_control_parameters_v1_6 dig;
+};
+
+struct dmub_rb_cmd_dig1_transmitter_control {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_dig1_transmitter_control_data transmitter_control;
+};
+
+struct dmub_rb_cmd_dpphy_init {
+ struct dmub_cmd_header header;
+ uint8_t reserved[60];
+};
+
+struct dmub_cmd_psr_copy_settings_data {
+ uint16_t psr_level;
+ uint8_t hubp_inst;
+ uint8_t dpp_inst;
+ uint8_t mpcc_inst;
+ uint8_t opp_inst;
+ uint8_t otg_inst;
+ uint8_t digfe_inst;
+ uint8_t digbe_inst;
+ uint8_t dpphy_inst;
+ uint8_t aux_inst;
+ uint8_t hyst_frames;
+ uint8_t hyst_lines;
+ uint8_t phy_num;
+ uint8_t phy_type;
+ uint8_t aux_repeat;
+ uint8_t smu_optimizations_en;
+ uint8_t skip_wait_for_pll_lock;
+ uint8_t frame_delay;
+ uint8_t smu_phy_id;
+ uint8_t num_of_controllers;
+ uint8_t link_rate;
+ uint8_t frame_cap_ind;
+};
+
+struct dmub_rb_cmd_psr_copy_settings {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_psr_copy_settings_data psr_copy_settings_data;
+};
+
+struct dmub_cmd_psr_set_level_data {
+ uint16_t psr_level;
+};
+
+struct dmub_rb_cmd_psr_set_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_psr_set_level_data psr_set_level_data;
+};
+
+struct dmub_rb_cmd_psr_enable {
+ struct dmub_cmd_header header;
+};
+
+struct dmub_cmd_psr_setup_data {
+ enum psr_version version; // PSR version 1 or 2
+};
+
+struct dmub_rb_cmd_psr_setup {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_psr_setup_data psr_setup_data;
+};
+
+union dmub_rb_cmd {
+ struct dmub_rb_cmd_read_modify_write read_modify_write;
+ struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
+ struct dmub_rb_cmd_burst_write burst_write;
+ struct dmub_rb_cmd_reg_wait reg_wait;
+ struct dmub_rb_cmd_common cmd_common;
+ struct dmub_rb_cmd_digx_encoder_control digx_encoder_control;
+ struct dmub_rb_cmd_set_pixel_clock set_pixel_clock;
+ struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating;
+ struct dmub_rb_cmd_dpphy_init dpphy_init;
+ struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
+ struct dmub_rb_cmd_psr_enable psr_enable;
+ struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
+ struct dmub_rb_cmd_psr_set_level psr_set_level;
+ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
+ struct dmub_rb_cmd_psr_setup psr_setup;
+};
+
+#pragma pack(pop)
+
+#endif /* _DMUB_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
new file mode 100644
index 000000000000..7b69eb37f762
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_DAL_H_
+#define _DMUB_CMD_DAL_H_
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_psr_type {
+ DMUB_CMD__PSR_SETUP = 0,
+ DMUB_CMD__PSR_COPY_SETTINGS = 1,
+ DMUB_CMD__PSR_ENABLE = 2,
+ DMUB_CMD__PSR_DISABLE = 3,
+ DMUB_CMD__PSR_SET_LEVEL = 4,
+};
+
+enum psr_version {
+ PSR_VERSION_1 = 0x10, // PSR Version 1
+ PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
+ PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+};
+
+#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h
new file mode 100644
index 000000000000..b6deb8e2590f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_CMD_VBIOS_H_
+#define _DMUB_CMD_VBIOS_H_
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_vbios_type {
+ DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0,
+ DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1,
+ DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2,
+ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
+};
+
+#endif /* _DMUB_CMD_VBIOS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
new file mode 100644
index 000000000000..242ec257998c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef _DMUB_META_H_
+#define _DMUB_META_H_
+
+#include "dmub_types.h"
+
+#pragma pack(push, 1)
+
+/* Magic value for identifying dmub_fw_meta_info */
+#define DMUB_FW_META_MAGIC 0x444D5542
+
+/* Offset from the end of the file to the dmub_fw_meta_info */
+#define DMUB_FW_META_OFFSET 0x24
+
+/**
+ * struct dmub_fw_meta_info - metadata associated with fw binary
+ *
+ * NOTE: This should be considered a stable API. Fields should
+ * not be repurposed or reordered. New fields should be
+ * added instead to extend the structure.
+ *
+ * @magic_value: magic value identifying DMUB firmware meta info
+ * @fw_region_size: size of the firmware state region
+ * @trace_buffer_size: size of the tracebuffer region
+ */
+struct dmub_fw_meta_info {
+ uint32_t magic_value;
+ uint32_t fw_region_size;
+ uint32_t trace_buffer_size;
+};
+
+/* Ensure that the structure remains 64 bytes. */
+union dmub_fw_meta {
+ struct dmub_fw_meta_info info;
+ uint8_t reserved[64];
+};
+
+#pragma pack(pop)
+
+#endif /* _DMUB_META_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
new file mode 100644
index 000000000000..df875fdd2ab0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_RB_H_
+#define _DMUB_RB_H_
+
+#include "dmub_types.h"
+#include "dmub_cmd.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+struct dmub_cmd_header;
+
+struct dmub_rb_init_params {
+ void *ctx;
+ void *base_address;
+ uint32_t capacity;
+};
+
+struct dmub_rb {
+ void *base_address;
+ uint32_t data_count;
+ uint32_t rptr;
+ uint32_t wrpt;
+ uint32_t capacity;
+
+ void *ctx;
+ void *dmub;
+};
+
+
+static inline bool dmub_rb_empty(struct dmub_rb *rb)
+{
+ return (rb->wrpt == rb->rptr);
+}
+
+static inline bool dmub_rb_full(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
+}
+
+static inline bool dmub_rb_push_front(struct dmub_rb *rb,
+ const struct dmub_cmd_header *cmd)
+{
+ uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ const uint64_t *src = (const uint64_t *)cmd;
+ int i;
+
+ if (dmub_rb_full(rb))
+ return false;
+
+ // copying data
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
+
+ rb->wrpt += DMUB_RB_CMD_SIZE;
+
+ if (rb->wrpt >= rb->capacity)
+ rb->wrpt %= rb->capacity;
+
+ return true;
+}
+
+static inline bool dmub_rb_front(struct dmub_rb *rb,
+ struct dmub_cmd_header *cmd)
+{
+ uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
+
+ if (dmub_rb_empty(rb))
+ return false;
+
+ dmub_memcpy(cmd, rd_ptr, DMUB_RB_CMD_SIZE);
+
+ return true;
+}
+
+static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
+{
+ if (dmub_rb_empty(rb))
+ return false;
+
+ rb->rptr += DMUB_RB_CMD_SIZE;
+
+ if (rb->rptr >= rb->capacity)
+ rb->rptr %= rb->capacity;
+
+ return true;
+}
+
+static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
+{
+ uint32_t rptr = rb->rptr;
+ uint32_t wptr = rb->wrpt;
+
+ while (rptr != wptr) {
+ uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ //uint64_t volatile *p = (uint64_t volatile *)data;
+ uint64_t temp;
+ int i;
+
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ temp = *data++;
+
+ rptr += DMUB_RB_CMD_SIZE;
+ if (rptr >= rb->capacity)
+ rptr %= rb->capacity;
+ }
+}
+
+static inline void dmub_rb_init(struct dmub_rb *rb,
+ struct dmub_rb_init_params *init_params)
+{
+ rb->base_address = init_params->base_address;
+ rb->capacity = init_params->capacity;
+ rb->rptr = 0;
+ rb->wrpt = 0;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_RB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
new file mode 100644
index 000000000000..8e23a7017588
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_SRV_H_
+#define _DMUB_SRV_H_
+
+/**
+ * DOC: DMUB interface and operation
+ *
+ * DMUB is the interface to the display DMCUB microcontroller on DCN hardware.
+ * It delegates hardware initialization and command submission to the
+ * microcontroller. DMUB is the shortname for DMCUB.
+ *
+ * This interface is not thread-safe. Ensure that all access to the interface
+ * is properly synchronized by the caller.
+ *
+ * Initialization and usage of the DMUB service should be done in the
+ * steps given below:
+ *
+ * 1. dmub_srv_create()
+ * 2. dmub_srv_has_hw_support()
+ * 3. dmub_srv_calc_region_info()
+ * 4. dmub_srv_hw_init()
+ *
+ * The call to dmub_srv_create() is required to use the server.
+ *
+ * The calls to dmub_srv_has_hw_support() and dmub_srv_calc_region_info()
+ * are helpers to query cache window size and allocate framebuffer(s)
+ * for the cache windows.
+ *
+ * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
+ * for command submission. Commands can be queued via dmub_srv_cmd_queue()
+ * and executed via dmub_srv_cmd_execute().
+ *
+ * If the queue is full the dmub_srv_wait_for_idle() call can be used to
+ * wait until the queue has been cleared.
+ *
+ * Destroying the DMUB service can be done by calling dmub_srv_destroy().
+ * This does not clear DMUB hardware state, only software state.
+ *
+ * The interface is intended to be standalone and should not depend on any
+ * other component within DAL.
+ */
+
+#include "dmub_types.h"
+#include "dmub_cmd.h"
+#include "dmub_rb.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Forward declarations */
+struct dmub_srv;
+struct dmub_cmd_header;
+struct dmub_srv_common_regs;
+
+/* enum dmub_status - return code for dmcub functions */
+enum dmub_status {
+ DMUB_STATUS_OK = 0,
+ DMUB_STATUS_NO_CTX,
+ DMUB_STATUS_QUEUE_FULL,
+ DMUB_STATUS_TIMEOUT,
+ DMUB_STATUS_INVALID,
+};
+
+/* enum dmub_asic - dmub asic identifier */
+enum dmub_asic {
+ DMUB_ASIC_NONE = 0,
+ DMUB_ASIC_DCN20,
+ DMUB_ASIC_DCN21,
+ DMUB_ASIC_MAX,
+};
+
+/* enum dmub_window_id - dmub window identifier */
+enum dmub_window_id {
+ DMUB_WINDOW_0_INST_CONST = 0,
+ DMUB_WINDOW_1_STACK,
+ DMUB_WINDOW_2_BSS_DATA,
+ DMUB_WINDOW_3_VBIOS,
+ DMUB_WINDOW_4_MAILBOX,
+ DMUB_WINDOW_5_TRACEBUFF,
+ DMUB_WINDOW_6_FW_STATE,
+ DMUB_WINDOW_7_RESERVED,
+ DMUB_WINDOW_TOTAL,
+};
+
+/**
+ * struct dmub_region - dmub hw memory region
+ * @base: base address for region, must be 256 byte aligned
+ * @top: top address for region
+ */
+struct dmub_region {
+ uint32_t base;
+ uint32_t top;
+};
+
+/**
+ * struct dmub_window - dmub hw cache window
+ * @off: offset to the fb memory in gpu address space
+ * @r: region in uc address space for cache window
+ */
+struct dmub_window {
+ union dmub_addr offset;
+ struct dmub_region region;
+};
+
+/**
+ * struct dmub_fb - defines a dmub framebuffer memory region
+ * @cpu_addr: cpu virtual address for the region, NULL if invalid
+ * @gpu_addr: gpu virtual address for the region, NULL if invalid
+ * @size: size of the region in bytes, zero if invalid
+ */
+struct dmub_fb {
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ uint32_t size;
+};
+
+/**
+ * struct dmub_srv_region_params - params used for calculating dmub regions
+ * @inst_const_size: size of the fw inst const section
+ * @bss_data_size: size of the fw bss data section
+ * @vbios_size: size of the vbios data
+ * @fw_bss_data: raw firmware bss data section
+ */
+struct dmub_srv_region_params {
+ uint32_t inst_const_size;
+ uint32_t bss_data_size;
+ uint32_t vbios_size;
+ const uint8_t *fw_bss_data;
+};
+
+/**
+ * struct dmub_srv_region_info - output region info from the dmub service
+ * @fb_size: required minimum fb size for all regions, aligned to 4096 bytes
+ * @num_regions: number of regions used by the dmub service
+ * @regions: region info
+ *
+ * The regions are aligned such that they can be all placed within the
+ * same framebuffer but they can also be placed into different framebuffers.
+ *
+ * The size of each region can be calculated by the caller:
+ * size = reg.top - reg.base
+ *
+ * Care must be taken when performing custom allocations to ensure that each
+ * region base address is 256 byte aligned.
+ */
+struct dmub_srv_region_info {
+ uint32_t fb_size;
+ uint8_t num_regions;
+ struct dmub_region regions[DMUB_WINDOW_TOTAL];
+};
+
+/**
+ * struct dmub_srv_fb_params - parameters used for driver fb setup
+ * @region_info: region info calculated by dmub service
+ * @cpu_addr: base cpu address for the framebuffer
+ * @gpu_addr: base gpu virtual address for the framebuffer
+ */
+struct dmub_srv_fb_params {
+ const struct dmub_srv_region_info *region_info;
+ void *cpu_addr;
+ uint64_t gpu_addr;
+};
+
+/**
+ * struct dmub_srv_fb_info - output fb info from the dmub service
+ * @num_fbs: number of required dmub framebuffers
+ * @fbs: fb data for each region
+ *
+ * Output from the dmub service helper that can be used by the
+ * driver to prepare dmub_fb that can be passed into the dmub
+ * hw init service.
+ *
+ * Assumes that all regions are within the same framebuffer
+ * and have been setup according to the region_info generated
+ * by the dmub service.
+ */
+struct dmub_srv_fb_info {
+ uint8_t num_fb;
+ struct dmub_fb fb[DMUB_WINDOW_TOTAL];
+};
+
+/**
+ * struct dmub_srv_base_funcs - Driver specific base callbacks
+ */
+struct dmub_srv_base_funcs {
+ /**
+ * @reg_read:
+ *
+ * Hook for reading a register.
+ *
+ * Return: The 32-bit register value from the given address.
+ */
+ uint32_t (*reg_read)(void *ctx, uint32_t address);
+
+ /**
+ * @reg_write:
+ *
+ * Hook for writing a value to the register specified by address.
+ */
+ void (*reg_write)(void *ctx, uint32_t address, uint32_t value);
+};
+
+/**
+ * struct dmub_srv_hw_funcs - hardware sequencer funcs for dmub
+ */
+struct dmub_srv_hw_funcs {
+ /* private: internal use only */
+
+ void (*reset)(struct dmub_srv *dmub);
+
+ void (*reset_release)(struct dmub_srv *dmub);
+
+ void (*backdoor_load)(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1);
+
+ void (*setup_windows)(struct dmub_srv *dmub,
+ const struct dmub_window *cw2,
+ const struct dmub_window *cw3,
+ const struct dmub_window *cw4,
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6);
+
+ void (*setup_mailbox)(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1);
+
+ uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
+
+ void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
+
+ bool (*is_supported)(struct dmub_srv *dmub);
+
+ bool (*is_hw_init)(struct dmub_srv *dmub);
+
+ bool (*is_phy_init)(struct dmub_srv *dmub);
+
+ bool (*is_auto_load_done)(struct dmub_srv *dmub);
+};
+
+/**
+ * struct dmub_srv_create_params - params for dmub service creation
+ * @base_funcs: driver supplied base routines
+ * @hw_funcs: optional overrides for hw funcs
+ * @user_ctx: context data for callback funcs
+ * @asic: driver supplied asic
+ * @is_virtual: false for hw support only
+ */
+struct dmub_srv_create_params {
+ struct dmub_srv_base_funcs funcs;
+ struct dmub_srv_hw_funcs *hw_funcs;
+ void *user_ctx;
+ enum dmub_asic asic;
+ bool is_virtual;
+};
+
+/*
+ * struct dmub_srv_hw_params - params for dmub hardware initialization
+ * @fb: framebuffer info for each region
+ * @fb_base: base of the framebuffer aperture
+ * @fb_offset: offset of the framebuffer aperture
+ * @psp_version: psp version to pass for DMCU init
+ * @load_inst_const: true if DMUB should load inst const fw
+ */
+struct dmub_srv_hw_params {
+ struct dmub_fb *fb[DMUB_WINDOW_TOTAL];
+ uint64_t fb_base;
+ uint64_t fb_offset;
+ uint32_t psp_version;
+ bool load_inst_const;
+};
+
+/**
+ * struct dmub_srv - software state for dmcub
+ * @asic: dmub asic identifier
+ * @user_ctx: user provided context for the dmub_srv
+ * @is_virtual: false if hardware support only
+ * @fw_state: dmub firmware state pointer
+ */
+struct dmub_srv {
+ enum dmub_asic asic;
+ void *user_ctx;
+ bool is_virtual;
+ volatile const struct dmub_fw_state *fw_state;
+
+ /* private: internal use only */
+ const struct dmub_srv_common_regs *regs;
+
+ struct dmub_srv_base_funcs funcs;
+ struct dmub_srv_hw_funcs hw_funcs;
+ struct dmub_rb inbox1_rb;
+
+ bool sw_init;
+ bool hw_init;
+
+ uint64_t fb_base;
+ uint64_t fb_offset;
+ uint32_t psp_version;
+};
+
+/**
+ * dmub_srv_create() - creates the DMUB service.
+ * @dmub: the dmub service
+ * @params: creation parameters for the service
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
+ const struct dmub_srv_create_params *params);
+
+/**
+ * dmub_srv_destroy() - destroys the DMUB service.
+ * @dmub: the dmub service
+ */
+void dmub_srv_destroy(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_calc_region_info() - retreives region info from the dmub service
+ * @dmub: the dmub service
+ * @params: parameters used to calculate region locations
+ * @info_out: the output region info from dmub
+ *
+ * Calculates the base and top address for all relevant dmub regions
+ * using the parameters given (if any).
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out);
+
+/**
+ * dmub_srv_calc_region_info() - retreives fb info from the dmub service
+ * @dmub: the dmub service
+ * @params: parameters used to calculate fb locations
+ * @info_out: the output fb info from dmub
+ *
+ * Calculates the base and top address for all relevant dmub regions
+ * using the parameters given (if any).
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ const struct dmub_srv_fb_params *params,
+ struct dmub_srv_fb_info *out);
+
+/**
+ * dmub_srv_has_hw_support() - returns hw support state for dmcub
+ * @dmub: the dmub service
+ * @is_supported: hw support state
+ *
+ * Queries the hardware for DMCUB support and returns the result.
+ *
+ * Can be called before dmub_srv_hw_init().
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
+ bool *is_supported);
+
+/**
+ * dmub_srv_is_hw_init() - returns hardware init state
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init);
+
+/**
+ * dmub_srv_hw_init() - initializes the underlying DMUB hardware
+ * @dmub: the dmub service
+ * @params: params for hardware initialization
+ *
+ * Resets the DMUB hardware and performs backdoor loading of the
+ * required cache regions based on the input framebuffer regions.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_NO_CTX - dmcub context not initialized
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ const struct dmub_srv_hw_params *params);
+
+/**
+ * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * @dmub: the dmub service
+ * @cmd: the command to queue
+ *
+ * Queues a command to the DMUB service but does not begin execution
+ * immediately.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_QUEUE_FULL - no remaining room in queue
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ const struct dmub_cmd_header *cmd);
+
+/**
+ * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * @dmub: the dmub service
+ *
+ * Begins execution of queued commands on the dmub.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until firmware has been autoloaded by the DMCUB. The maximum
+ * wait time is given in microseconds to prevent spinning forever.
+ *
+ * On ASICs without firmware autoload support this function will return
+ * immediately.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for phy init timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_phy_init() - Waits for DMUB PHY init to complete
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the PHY has been initialized by the DMUB. The maximum
+ * wait time is given in microseconds to prevent spinning forever.
+ *
+ * On ASICs without PHY init support this function will return
+ * immediately.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for phy init timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the DMUB buffer is empty and all commands have
+ * finished processing. The maximum wait time is given in
+ * microseconds to prevent spinning forever.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
new file mode 100644
index 000000000000..6b3ee42db350
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef _DMUB_TRACE_BUFFER_H_
+#define _DMUB_TRACE_BUFFER_H_
+
+#include "dmub_types.h"
+
+#define LOAD_DMCU_FW 1
+#define LOAD_PHY_FW 2
+
+
+enum dmucb_trace_code {
+ DMCUB__UNKNOWN,
+ DMCUB__MAIN_BEGIN,
+ DMCUB__PHY_INIT_BEGIN,
+ DMCUB__PHY_FW_SRAM_LOAD_BEGIN,
+ DMCUB__PHY_FW_SRAM_LOAD_END,
+ DMCUB__PHY_INIT_POLL_DONE,
+ DMCUB__PHY_INIT_END,
+ DMCUB__DMCU_ERAM_LOAD_BEGIN,
+ DMCUB__DMCU_ERAM_LOAD_END,
+ DMCUB__DMCU_ISR_LOAD_BEGIN,
+ DMCUB__DMCU_ISR_LOAD_END,
+ DMCUB__MAIN_IDLE,
+ DMCUB__PERF_TRACE,
+ DMCUB__PG_DONE,
+};
+
+struct dmcub_trace_buf_entry {
+ enum dmucb_trace_code trace_code;
+ uint32_t tick_count;
+ uint32_t param0;
+ uint32_t param1;
+};
+
+#define TRACE_BUF_SIZE (1024) //1 kB
+#define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct dmcub_trace_buf_entry))
+
+
+struct dmcub_trace_buf {
+ uint32_t entry_count;
+ uint32_t clk_freq;
+ struct dmcub_trace_buf_entry entries[PERF_TRACE_MAX_ENTRY];
+};
+
+
+#endif /* _DMUB_TRACE_BUFFER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
new file mode 100644
index 000000000000..41d524b0db2f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_TYPES_H_
+#define _DMUB_TYPES_H_
+
+/* Basic type definitions. */
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <stdarg.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef dmub_memcpy
+#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
+#endif
+
+#ifndef dmub_memset
+#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
+#endif
+
+#ifndef dmub_udelay
+#define dmub_udelay(microseconds) udelay(microseconds)
+#endif
+
+union dmub_addr {
+ struct {
+ uint32_t low_part;
+ uint32_t high_part;
+ } u;
+ uint64_t quad_part;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DMUB_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
new file mode 100644
index 000000000000..e08dfeea24b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -0,0 +1,27 @@
+#
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o
+
+AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DMUB)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
new file mode 100644
index 000000000000..cd51c6138894
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn20.h"
+
+#include "dcn/dcn_2_0_0_offset.h"
+#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
+#define CTX dmub
+#define REGS dmub->regs
+
+/* Registers. */
+
+const struct dmub_srv_common_regs dmub_srv_dcn20_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+ { DMUB_COMMON_REGS() },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
+
+/* Shared functions. */
+
+static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
+ uint64_t fb_base,
+ uint64_t fb_offset,
+ union dmub_addr *addr_out)
+{
+ addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
+}
+
+void dmub_dcn20_reset(struct dmub_srv *dmub)
+{
+ REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+}
+
+void dmub_dcn20_reset_release(struct dmub_srv *dmub)
+{
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
+ REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
+ REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0);
+}
+
+void dmub_dcn20_backdoor_load(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1)
+{
+ union dmub_addr offset;
+ uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset;
+
+ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3,
+ DMCUB_MEM_WRITE_SPACE, 0x3);
+
+ dmub_dcn20_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
+ REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
+ DMCUB_REGION3_CW0_ENABLE, 1);
+
+ dmub_dcn20_translate_addr(&cw1->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
+ REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
+ DMCUB_REGION3_CW1_ENABLE, 1);
+
+ REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
+ 0x20);
+}
+
+void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
+ const struct dmub_window *cw2,
+ const struct dmub_window *cw3,
+ const struct dmub_window *cw4,
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6)
+{
+ union dmub_addr offset;
+ uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset;
+
+ dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+ REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+ DMCUB_REGION3_CW2_ENABLE, 1);
+
+ dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base);
+ REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top,
+ DMCUB_REGION3_CW3_ENABLE, 1);
+
+ /* TODO: Move this to CW4. */
+ dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part);
+ REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS,
+ cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE,
+ 1);
+
+ dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
+ REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
+ DMCUB_REGION3_CW5_ENABLE, 1);
+
+ dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base);
+ REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
+ DMCUB_REGION3_CW6_ENABLE, 1);
+}
+
+void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1)
+{
+ /* TODO: Use CW4 instead of region 4. */
+
+ REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
+ REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+ REG_WRITE(DMCUB_INBOX1_RPTR, 0);
+ REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+}
+
+uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_INBOX1_RPTR);
+}
+
+void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset)
+{
+ REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset);
+}
+
+bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_REGION3_CW2_BASE_ADDRESS) != 0;
+}
+
+bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
+{
+ uint32_t supported = 0;
+
+ REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported);
+
+ return supported;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
new file mode 100644
index 000000000000..53bfd4da69ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DCN20_H_
+#define _DMUB_DCN20_H_
+
+#include "../inc/dmub_types.h"
+
+struct dmub_srv;
+
+/* DCN20 register definitions. */
+
+#define DMUB_COMMON_REGS() \
+ DMUB_SR(DMCUB_CNTL) \
+ DMUB_SR(DMCUB_MEM_CNTL) \
+ DMUB_SR(DMCUB_SEC_CNTL) \
+ DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_INBOX1_SIZE) \
+ DMUB_SR(DMCUB_INBOX1_RPTR) \
+ DMUB_SR(DMCUB_INBOX1_WPTR) \
+ DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION4_OFFSET) \
+ DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_SCRATCH0) \
+ DMUB_SR(DMCUB_SCRATCH1) \
+ DMUB_SR(DMCUB_SCRATCH2) \
+ DMUB_SR(DMCUB_SCRATCH3) \
+ DMUB_SR(DMCUB_SCRATCH4) \
+ DMUB_SR(DMCUB_SCRATCH5) \
+ DMUB_SR(DMCUB_SCRATCH6) \
+ DMUB_SR(DMCUB_SCRATCH7) \
+ DMUB_SR(DMCUB_SCRATCH8) \
+ DMUB_SR(DMCUB_SCRATCH9) \
+ DMUB_SR(DMCUB_SCRATCH10) \
+ DMUB_SR(DMCUB_SCRATCH11) \
+ DMUB_SR(DMCUB_SCRATCH12) \
+ DMUB_SR(DMCUB_SCRATCH13) \
+ DMUB_SR(DMCUB_SCRATCH14) \
+ DMUB_SR(DMCUB_SCRATCH15) \
+ DMUB_SR(CC_DC_PIPE_DIS) \
+ DMUB_SR(MMHUBBUB_SOFT_RESET)
+
+#define DMUB_COMMON_FIELDS() \
+ DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_SOFT_RESET) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \
+ DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE) \
+ DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
+ DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \
+ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
+ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
+ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET)
+
+struct dmub_srv_common_reg_offset {
+#define DMUB_SR(reg) uint32_t reg;
+ DMUB_COMMON_REGS()
+#undef DMUB_SR
+};
+
+struct dmub_srv_common_reg_shift {
+#define DMUB_SF(reg, field) uint8_t reg##__##field;
+ DMUB_COMMON_FIELDS()
+#undef DMUB_SF
+};
+
+struct dmub_srv_common_reg_mask {
+#define DMUB_SF(reg, field) uint32_t reg##__##field;
+ DMUB_COMMON_FIELDS()
+#undef DMUB_SF
+};
+
+struct dmub_srv_common_regs {
+ const struct dmub_srv_common_reg_offset offset;
+ const struct dmub_srv_common_reg_mask mask;
+ const struct dmub_srv_common_reg_shift shift;
+};
+
+extern const struct dmub_srv_common_regs dmub_srv_dcn20_regs;
+
+/* Hardware functions. */
+
+void dmub_dcn20_init(struct dmub_srv *dmub);
+
+void dmub_dcn20_reset(struct dmub_srv *dmub);
+
+void dmub_dcn20_reset_release(struct dmub_srv *dmub);
+
+void dmub_dcn20_backdoor_load(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1);
+
+void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
+ const struct dmub_window *cw2,
+ const struct dmub_window *cw3,
+ const struct dmub_window *cw4,
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6);
+
+void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1);
+
+uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
+
+void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+
+bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
+
+bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
+
+#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
new file mode 100644
index 000000000000..5bed9fcd6b5c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn21.h"
+
+#include "dcn/dcn_2_1_0_offset.h"
+#include "dcn/dcn_2_1_0_sh_mask.h"
+#include "renoir_ip_offset.h"
+
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg
+#define CTX dmub
+#define REGS dmub->regs
+
+/* Registers. */
+
+const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+ { DMUB_COMMON_REGS() },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
+
+/* Shared functions. */
+
+bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub)
+{
+ return (REG_READ(DMCUB_SCRATCH0) == 3);
+}
+
+bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_SCRATCH10) == 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
new file mode 100644
index 000000000000..2bbea237137b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DCN21_H_
+#define _DMUB_DCN21_H_
+
+#include "dmub_dcn20.h"
+
+/* Registers. */
+
+extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs;
+
+/* Hardware functions. */
+
+bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub);
+
+bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub);
+
+#endif /* _DMUB_DCN21_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
new file mode 100644
index 000000000000..4094eca212f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_reg.h"
+#include "../inc/dmub_srv.h"
+
+struct dmub_reg_value_masks {
+ uint32_t value;
+ uint32_t mask;
+};
+
+static inline void
+set_reg_field_value_masks(struct dmub_reg_value_masks *field_value_mask,
+ uint32_t value, uint32_t mask, uint8_t shift)
+{
+ field_value_mask->value =
+ (field_value_mask->value & ~mask) | (mask & (value << shift));
+ field_value_mask->mask = field_value_mask->mask | mask;
+}
+
+static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask,
+ uint32_t addr, int n, uint8_t shift1,
+ uint32_t mask1, uint32_t field_value1,
+ va_list ap)
+{
+ uint32_t shift, mask, field_value;
+ int i = 1;
+
+ /* gather all bits value/mask getting updated in this register */
+ set_reg_field_value_masks(field_value_mask, field_value1, mask1,
+ shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t);
+
+ set_reg_field_value_masks(field_value_mask, field_value, mask,
+ shift);
+ i++;
+ }
+}
+
+static inline uint32_t get_reg_field_value_ex(uint32_t reg_value, uint32_t mask,
+ uint8_t shift)
+{
+ return (mask & reg_value) >> shift;
+}
+
+void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1,
+ uint32_t mask1, uint32_t field_value1, ...)
+{
+ struct dmub_reg_value_masks field_value_mask = { 0 };
+ uint32_t reg_val;
+ va_list ap;
+
+ va_start(ap, field_value1);
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+ va_end(ap);
+
+ reg_val = srv->funcs.reg_read(srv->user_ctx, addr);
+ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+ srv->funcs.reg_write(srv->user_ctx, addr, reg_val);
+}
+
+void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...)
+{
+ struct dmub_reg_value_masks field_value_mask = { 0 };
+ va_list ap;
+
+ va_start(ap, field_value1);
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+ va_end(ap);
+
+ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+ srv->funcs.reg_write(srv->user_ctx, addr, reg_val);
+}
+
+void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift,
+ uint32_t mask, uint32_t *field_value)
+{
+ uint32_t reg_val = srv->funcs.reg_read(srv->user_ctx, addr);
+ *field_value = get_reg_field_value_ex(reg_val, mask, shift);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
new file mode 100644
index 000000000000..c1f4030929a4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_REG_H_
+#define _DMUB_REG_H_
+
+#include "../inc/dmub_types.h"
+
+struct dmub_srv;
+
+/* Register offset and field lookup. */
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define REG_OFFSET(reg_name) (BASE(mm##reg_name##_BASE_IDX) + mm##reg_name)
+
+#define FD_SHIFT(reg_name, field) reg_name##__##field##__SHIFT
+
+#define FD_MASK(reg_name, field) reg_name##__##field##_MASK
+
+#define REG(reg) (REGS)->offset.reg
+
+#define FD(reg_field) (REGS)->shift.reg_field, (REGS)->mask.reg_field
+
+#define FN(reg_name, field) FD(reg_name##__##field)
+
+/* Register reads and writes. */
+
+#define REG_READ(reg) ((CTX)->funcs.reg_read((CTX)->user_ctx, REG(reg)))
+
+#define REG_WRITE(reg, val) \
+ ((CTX)->funcs.reg_write((CTX)->user_ctx, REG(reg), (val)))
+
+/* Register field setting. */
+
+#define REG_SET_N(reg_name, n, initial_val, ...) \
+ dmub_reg_set(CTX, REG(reg_name), initial_val, n, __VA_ARGS__)
+
+#define REG_SET(reg_name, initial_val, field, val) \
+ REG_SET_N(reg_name, 1, initial_val, \
+ FN(reg_name, field), val)
+
+#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \
+ REG_SET_N(reg, 2, init_value, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2)
+
+#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \
+ REG_SET_N(reg, 3, init_value, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3)
+
+#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_SET_N(reg, 4, init_value, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4)
+
+/* Register field updating. */
+
+#define REG_UPDATE_N(reg_name, n, ...)\
+ dmub_reg_update(CTX, REG(reg_name), n, __VA_ARGS__)
+
+#define REG_UPDATE(reg_name, field, val) \
+ REG_UPDATE_N(reg_name, 1, \
+ FN(reg_name, field), val)
+
+#define REG_UPDATE_2(reg, f1, v1, f2, v2) \
+ REG_UPDATE_N(reg, 2,\
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \
+ REG_UPDATE_N(reg, 3, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3)
+
+#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_UPDATE_N(reg, 4, \
+ FN(reg, f1), v1, \
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4)
+
+/* Register field getting. */
+
+#define REG_GET(reg_name, field, val) \
+ dmub_reg_get(CTX, REG(reg_name), FN(reg_name, field), val)
+
+void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
+void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1,
+ uint32_t mask1, uint32_t field_value1, ...);
+
+void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift,
+ uint32_t mask, uint32_t *field_value);
+
+#endif /* _DMUB_REG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
new file mode 100644
index 000000000000..dee676335d73
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../inc/dmub_srv.h"
+#include "dmub_dcn20.h"
+#include "dmub_dcn21.h"
+#include "dmub_fw_meta.h"
+#include "os_types.h"
+/*
+ * Note: the DMUB service is standalone. No additional headers should be
+ * added below or above this line unless they reside within the DMUB
+ * folder.
+ */
+
+/* Alignment for framebuffer memory. */
+#define DMUB_FB_ALIGNMENT (1024 * 1024)
+
+/* Stack size. */
+#define DMUB_STACK_SIZE (128 * 1024)
+
+/* Context size. */
+#define DMUB_CONTEXT_SIZE (512 * 1024)
+
+/* Mailbox size */
+#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE)
+
+/* Default state size if meta is absent. */
+#define DMUB_FW_STATE_SIZE (1024)
+
+/* Default tracebuffer size if meta is absent. */
+#define DMUB_TRACE_BUFFER_SIZE (1024)
+
+/* Number of windows in use. */
+#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
+/* Base addresses. */
+
+#define DMUB_CW0_BASE (0x60000000)
+#define DMUB_CW1_BASE (0x61000000)
+#define DMUB_CW3_BASE (0x63000000)
+#define DMUB_CW5_BASE (0x65000000)
+#define DMUB_CW6_BASE (0x66000000)
+
+static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
+{
+ return (val + factor - 1) / factor * factor;
+}
+
+static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+{
+ const uint8_t *base = (const uint8_t *)fb->cpu_addr;
+ uint8_t buf[64];
+ uint32_t pos, end;
+
+ /**
+ * Read 64-byte chunks since we don't want to store a
+ * large temporary buffer for this purpose.
+ */
+ end = fb->size / sizeof(buf) * sizeof(buf);
+
+ for (pos = 0; pos < end; pos += sizeof(buf))
+ dmub_memcpy(buf, base + pos, sizeof(buf));
+
+ /* Read anything leftover into the buffer. */
+ if (end < fb->size)
+ dmub_memcpy(buf, base + pos, fb->size - end);
+}
+
+static const struct dmub_fw_meta_info *
+dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+{
+ const union dmub_fw_meta *meta;
+
+ if (fw_bss_data == NULL)
+ return NULL;
+
+ if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+ return NULL;
+
+ meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
+ DMUB_FW_META_OFFSET -
+ sizeof(union dmub_fw_meta));
+
+ if (meta->info.magic_value != DMUB_FW_META_MAGIC)
+ return NULL;
+
+ return &meta->info;
+}
+
+static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+{
+ struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+
+ switch (asic) {
+ case DMUB_ASIC_DCN20:
+ case DMUB_ASIC_DCN21:
+ dmub->regs = &dmub_srv_dcn20_regs;
+
+ funcs->reset = dmub_dcn20_reset;
+ funcs->reset_release = dmub_dcn20_reset_release;
+ funcs->backdoor_load = dmub_dcn20_backdoor_load;
+ funcs->setup_windows = dmub_dcn20_setup_windows;
+ funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
+ funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
+ funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
+ funcs->is_supported = dmub_dcn20_is_supported;
+ funcs->is_hw_init = dmub_dcn20_is_hw_init;
+
+ if (asic == DMUB_ASIC_DCN21) {
+ dmub->regs = &dmub_srv_dcn21_regs;
+
+ funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done;
+ funcs->is_phy_init = dmub_dcn21_is_phy_init;
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
+ const struct dmub_srv_create_params *params)
+{
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ dmub_memset(dmub, 0, sizeof(*dmub));
+
+ dmub->funcs = params->funcs;
+ dmub->user_ctx = params->user_ctx;
+ dmub->asic = params->asic;
+ dmub->is_virtual = params->is_virtual;
+
+ /* Setup asic dependent hardware funcs. */
+ if (!dmub_srv_hw_setup(dmub, params->asic)) {
+ status = DMUB_STATUS_INVALID;
+ goto cleanup;
+ }
+
+ /* Override (some) hardware funcs based on user params. */
+ if (params->hw_funcs) {
+ if (params->hw_funcs->get_inbox1_rptr)
+ dmub->hw_funcs.get_inbox1_rptr =
+ params->hw_funcs->get_inbox1_rptr;
+
+ if (params->hw_funcs->set_inbox1_wptr)
+ dmub->hw_funcs.set_inbox1_wptr =
+ params->hw_funcs->set_inbox1_wptr;
+
+ if (params->hw_funcs->is_supported)
+ dmub->hw_funcs.is_supported =
+ params->hw_funcs->is_supported;
+ }
+
+ /* Sanity checks for required hw func pointers. */
+ if (!dmub->hw_funcs.get_inbox1_rptr ||
+ !dmub->hw_funcs.set_inbox1_wptr) {
+ status = DMUB_STATUS_INVALID;
+ goto cleanup;
+ }
+
+cleanup:
+ if (status == DMUB_STATUS_OK)
+ dmub->sw_init = true;
+ else
+ dmub_srv_destroy(dmub);
+
+ return status;
+}
+
+void dmub_srv_destroy(struct dmub_srv *dmub)
+{
+ dmub_memset(dmub, 0, sizeof(*dmub));
+}
+
+enum dmub_status
+dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ const struct dmub_srv_region_params *params,
+ struct dmub_srv_region_info *out)
+{
+ struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST];
+ struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK];
+ struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA];
+ struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS];
+ struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
+ struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
+ struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+ const struct dmub_fw_meta_info *fw_info;
+ uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+ uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ memset(out, 0, sizeof(*out));
+
+ out->num_regions = DMUB_NUM_WINDOWS;
+
+ inst->base = 0x0;
+ inst->top = inst->base + params->inst_const_size;
+
+ data->base = dmub_align(inst->top, 256);
+ data->top = data->base + params->bss_data_size;
+
+ /*
+ * All cache windows below should be aligned to the size
+ * of the DMCUB cache line, 64 bytes.
+ */
+
+ stack->base = dmub_align(data->top, 256);
+ stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
+
+ bios->base = dmub_align(stack->top, 256);
+ bios->top = bios->base + params->vbios_size;
+
+ mail->base = dmub_align(bios->top, 256);
+ mail->top = mail->base + DMUB_MAILBOX_SIZE;
+
+ fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
+ params->bss_data_size);
+
+ if (fw_info) {
+ fw_state_size = fw_info->fw_region_size;
+ trace_buffer_size = fw_info->trace_buffer_size;
+ }
+
+ trace_buff->base = dmub_align(mail->top, 256);
+ trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
+
+ fw_state->base = dmub_align(trace_buff->top, 256);
+ fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
+
+ out->fb_size = dmub_align(fw_state->top, 4096);
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ const struct dmub_srv_fb_params *params,
+ struct dmub_srv_fb_info *out)
+{
+ uint8_t *cpu_base;
+ uint64_t gpu_base;
+ uint32_t i;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ memset(out, 0, sizeof(*out));
+
+ if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
+ return DMUB_STATUS_INVALID;
+
+ cpu_base = (uint8_t *)params->cpu_addr;
+ gpu_base = params->gpu_addr;
+
+ for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
+ const struct dmub_region *reg =
+ &params->region_info->regions[i];
+
+ out->fb[i].cpu_addr = cpu_base + reg->base;
+ out->fb[i].gpu_addr = gpu_base + reg->base;
+ out->fb[i].size = reg->top - reg->base;
+ }
+
+ out->num_fb = DMUB_NUM_WINDOWS;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
+ bool *is_supported)
+{
+ *is_supported = false;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.is_supported)
+ *is_supported = dmub->hw_funcs.is_supported(dmub);
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
+{
+ *is_hw_init = false;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.is_hw_init)
+ *is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ const struct dmub_srv_hw_params *params)
+{
+ struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST];
+ struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK];
+ struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA];
+ struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS];
+ struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
+ struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
+ struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
+
+ struct dmub_rb_init_params rb_params;
+ struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
+ struct dmub_region inbox1;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ dmub->fb_base = params->fb_base;
+ dmub->fb_offset = params->fb_offset;
+ dmub->psp_version = params->psp_version;
+
+ if (inst_fb && data_fb) {
+ cw0.offset.quad_part = inst_fb->gpu_addr;
+ cw0.region.base = DMUB_CW0_BASE;
+ cw0.region.top = cw0.region.base + inst_fb->size - 1;
+
+ cw1.offset.quad_part = stack_fb->gpu_addr;
+ cw1.region.base = DMUB_CW1_BASE;
+ cw1.region.top = cw1.region.base + stack_fb->size - 1;
+
+ /**
+ * Read back all the instruction memory so we don't hang the
+ * DMCUB when backdoor loading if the write from x86 hasn't been
+ * flushed yet. This only occurs in backdoor loading.
+ */
+ dmub_flush_buffer_mem(inst_fb);
+
+ if (params->load_inst_const && dmub->hw_funcs.backdoor_load)
+ dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
+ }
+
+ if (dmub->hw_funcs.reset)
+ dmub->hw_funcs.reset(dmub);
+
+ if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
+ fw_state_fb) {
+ cw2.offset.quad_part = data_fb->gpu_addr;
+ cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
+ cw2.region.top = cw2.region.base + data_fb->size;
+
+ cw3.offset.quad_part = bios_fb->gpu_addr;
+ cw3.region.base = DMUB_CW3_BASE;
+ cw3.region.top = cw3.region.base + bios_fb->size;
+
+ cw4.offset.quad_part = mail_fb->gpu_addr;
+ cw4.region.base = cw3.region.top + 1;
+ cw4.region.top = cw4.region.base + mail_fb->size;
+
+ inbox1.base = cw4.region.base;
+ inbox1.top = cw4.region.top;
+
+ cw5.offset.quad_part = tracebuff_fb->gpu_addr;
+ cw5.region.base = DMUB_CW5_BASE;
+ cw5.region.top = cw5.region.base + tracebuff_fb->size;
+
+ cw6.offset.quad_part = fw_state_fb->gpu_addr;
+ cw6.region.base = DMUB_CW6_BASE;
+ cw6.region.top = cw6.region.base + fw_state_fb->size;
+
+ dmub->fw_state = fw_state_fb->cpu_addr;
+
+ if (dmub->hw_funcs.setup_windows)
+ dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
+ &cw5, &cw6);
+
+ if (dmub->hw_funcs.setup_mailbox)
+ dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
+ }
+
+ if (mail_fb) {
+ dmub_memset(&rb_params, 0, sizeof(rb_params));
+ rb_params.ctx = dmub;
+ rb_params.base_address = mail_fb->cpu_addr;
+ rb_params.capacity = DMUB_RB_SIZE;
+
+ dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ }
+
+ if (dmub->hw_funcs.reset_release)
+ dmub->hw_funcs.reset_release(dmub);
+
+ dmub->hw_init = true;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ const struct dmub_cmd_header *cmd)
+{
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ return DMUB_STATUS_OK;
+
+ return DMUB_STATUS_QUEUE_FULL;
+}
+
+enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+{
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ /**
+ * Read back all the queued commands to ensure that they've
+ * been flushed to framebuffer memory. Otherwise DMCUB might
+ * read back stale, fully invalid or partially invalid data.
+ */
+ dmub_rb_flush_pending(&dmub->inbox1_rb);
+
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.is_auto_load_done)
+ return DMUB_STATUS_OK;
+
+ for (i = 0; i <= timeout_us; i += 100) {
+ if (dmub->hw_funcs.is_auto_load_done(dmub))
+ return DMUB_STATUS_OK;
+
+ udelay(100);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i = 0;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.is_phy_init)
+ return DMUB_STATUS_OK;
+
+ for (i = 0; i <= timeout_us; i += 10) {
+ if (dmub->hw_funcs.is_phy_init(dmub))
+ return DMUB_STATUS_OK;
+
+ udelay(10);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; ++i) {
+ dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ if (dmub_rb_empty(&dmub->inbox1_rb))
+ return DMUB_STATUS_OK;
+
+ udelay(1);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 1be6c44fd32f..a2903985b9e8 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,23 +134,34 @@
#define PICASSO_A0 0x41
/* DCN1_01 */
#define RAVEN2_A0 0x81
+#define RAVEN2_15D8_REV_94 0x94
+#define RAVEN2_15D8_REV_95 0x95
+#define RAVEN2_15D8_REV_E3 0xE3
+#define RAVEN2_15D8_REV_E4 0xE4
+#define RAVEN2_15D8_REV_E9 0xE9
+#define RAVEN2_15D8_REV_EA 0xEA
+#define RAVEN2_15D8_REV_EB 0xEB
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
-
-#define PICASSO_15D8_REV_E3 0xE3
-#define PICASSO_15D8_REV_E4 0xE4
-
+#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
-#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < PICASSO_15D8_REV_E3))
-#define ASICREV_IS_DALI(eChipRev) ((eChipRev >= PICASSO_15D8_REV_E3) && (eChipRev < RAVEN1_F0))
+#endif
+#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
+#ifndef ASICREV_IS_RAVEN2
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#endif
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
-
+#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
+ || (eChipRev == RAVEN2_15D8_REV_E4))
+#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
+ || eChipRev == RAVEN2_15D8_REV_95 \
+ || eChipRev == RAVEN2_15D8_REV_E9 \
+ || eChipRev == RAVEN2_15D8_REV_EA \
+ || eChipRev == RAVEN2_15D8_REV_EB)
#define FAMILY_RV 142 /* DCN 1*/
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define FAMILY_NV 143 /* DCN 2*/
@@ -164,12 +175,9 @@ enum {
#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0)
#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define RENOIR_A0 0x91
#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
-#endif
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index fcc42372b6cf..0b6859189ca7 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -46,12 +46,8 @@ enum dce_version {
DCE_VERSION_MAX,
DCN_VERSION_1_0,
DCN_VERSION_1_01,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
DCN_VERSION_2_0,
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN_VERSION_2_1,
-#endif
DCN_VERSION_MAX
};
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index f312834fef50..d51de94e4bc3 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -178,7 +178,8 @@ struct dc_firmware_info {
uint32_t default_engine_clk; /* in KHz */
uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */
uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */
-
+ bool oem_i2c_present;
+ uint8_t oem_i2c_obj_id;
};
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 876b0b3e1a9c..4869d4562e4d 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -123,6 +123,13 @@ enum dp_test_pattern {
DP_TEST_PATTERN_UNSUPPORTED
};
+enum dp_test_pattern_color_space {
+ DP_TEST_PATTERN_COLOR_SPACE_RGB,
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601,
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709,
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED
+};
+
enum dp_panel_mode {
/* not required */
DP_PANEL_MODE_DEFAULT,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 2b219cdb13ad..89a709267019 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -66,12 +66,8 @@
#define DC_LOG_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__)
#define DC_LOG_ALL_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__)
#define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__)
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0) || defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
struct dal_logger;
@@ -116,9 +112,7 @@ enum dc_log_type {
LOG_PERF_TRACE,
LOG_DISPLAYSTATS,
LOG_HDMI_RETIMER_REDRIVER,
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
LOG_DSC,
-#endif
LOG_DWB,
LOG_GAMMA_DEBUG,
LOG_MAX_HW_POINTS,
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 1de4805cb8c7..1b278c42809a 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -154,6 +154,7 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
struct fixed31_32 l_pow_m1;
struct fixed31_32 base, div;
+ struct fixed31_32 base2;
if (dc_fixpt_lt(in_x, dc_fixpt_zero))
@@ -163,13 +164,15 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
dc_fixpt_div(dc_fixpt_one, m2));
base = dc_fixpt_sub(l_pow_m1, c1);
- if (dc_fixpt_lt(base, dc_fixpt_zero))
- base = dc_fixpt_zero;
-
div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1));
- *out_y = dc_fixpt_pow(dc_fixpt_div(base, div),
- dc_fixpt_div(dc_fixpt_one, m1));
+ base2 = dc_fixpt_div(base, div);
+ //avoid complex numbers
+ if (dc_fixpt_lt(base2, dc_fixpt_zero))
+ base2 = dc_fixpt_sub(dc_fixpt_zero, base2);
+
+
+ *out_y = dc_fixpt_pow(base2, dc_fixpt_div(dc_fixpt_one, m1));
}
@@ -361,8 +364,10 @@ static struct fixed31_32 translate_from_linear_space(
scratch_2 = dc_fixpt_mul(gamma_of_2,
pow_buffer[pow_buffer_ptr%16]);
- pow_buffer[pow_buffer_ptr%16] = scratch_2;
- pow_buffer_ptr++;
+ if (pow_buffer_ptr != -1) {
+ pow_buffer[pow_buffer_ptr%16] = scratch_2;
+ pow_buffer_ptr++;
+ }
scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
scratch_1 = dc_fixpt_sub(scratch_1, args->a2);
@@ -937,7 +942,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
struct fixed31_32 max_display;
struct fixed31_32 min_display;
struct fixed31_32 max_content;
- struct fixed31_32 min_content;
struct fixed31_32 clip = dc_fixpt_one;
struct fixed31_32 output;
bool use_eetf = false;
@@ -951,7 +955,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
max_display = dc_fixpt_from_int(fs_params->max_display);
min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
max_content = dc_fixpt_from_int(fs_params->max_content);
- min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
@@ -2000,10 +2003,28 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf_pts->x_point_at_y1_green = 1;
tf_pts->x_point_at_y1_blue = 1;
- map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axis_x, curve,
- MAX_HW_POINTS, tf_pts,
- mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+ if (input_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* just copy current rgb_regamma into tf_pts */
+ struct pwl_float_data_ex *curvePt = curve;
+ int i = 0;
+
+ while (i <= MAX_HW_POINTS) {
+ tf_pts->red[i] = curvePt->r;
+ tf_pts->green[i] = curvePt->g;
+ tf_pts->blue[i] = curvePt->b;
+ ++curvePt;
+ ++i;
+ }
+ } else {
+ //clamps to 0-1
+ map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+ coordinates_x, axis_x, curve,
+ MAX_HW_POINTS, tf_pts,
+ mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+ }
+
+
+
if (ramp->type == GAMMA_CUSTOM)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 5437b50e9f90..6e5ecefe7d9d 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -122,7 +122,7 @@ static unsigned int calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz)
{
- unsigned int v_total = stream->timing.v_total;
+ unsigned int v_total;
unsigned int frame_duration_in_ns;
frame_duration_in_ns =
@@ -254,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
- unsigned int min_frame_duration_in_ns = 0;
- unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-
- min_frame_duration_in_ns = ((unsigned int) (div64_u64(
- (1000000000ULL * 1000000),
- in_out_vrr->max_refresh_in_uhz)));
+ unsigned int max_render_time_in_us =
+ in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > max_render_time_in_us) {
+ } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- in_out_vrr->btr.btr_active = true;
+ if (!in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.btr_active = true;
+ }
}
/* BTR set to "not active" so disengage */
@@ -327,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
+ (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
+ mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- in_out_vrr->max_duration_in_us) &&
+ max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -796,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
in_out_vrr->supported = true;
}
@@ -811,6 +816,9 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
+ in_out_vrr->fixed.fixed_active = false;
+ in_out_vrr->fixed.target_refresh_in_uhz = 0;
+
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
@@ -826,6 +834,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
refresh_range >= MIN_REFRESH_RANGE_IN_US) {
+
in_out_vrr->adjust.v_total_min =
calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
index 1c3c6d47973a..904424da01b5 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
@@ -24,7 +24,8 @@
#
HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \
- hdcp1_execution.o hdcp1_transition.o
+ hdcp1_execution.o hdcp1_transition.o \
+ hdcp2_execution.o hdcp2_transition.o
AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP))
#$(info ************ DAL-HDCP_MAKEFILE ************)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index d7ac445dec6f..8aa528e874c4 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -37,24 +37,52 @@ static void push_error_status(struct mod_hdcp *hdcp,
HDCP_ERROR_TRACE(hdcp, status);
}
- hdcp->connection.hdcp1_retry_count++;
+ if (is_hdcp1(hdcp)) {
+ hdcp->connection.hdcp1_retry_count++;
+ } else if (is_hdcp2(hdcp)) {
+ hdcp->connection.hdcp2_retry_count++;
+ }
}
static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
{
- int i, display_enabled = 0;
+ int i, is_auth_needed = 0;
- /* if all displays on the link are disabled, hdcp is not desired */
+ /* if all displays on the link don't need authentication,
+ * hdcp is not desired
+ */
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
!hdcp->connection.displays[i].adjust.disable) {
- display_enabled = 1;
+ is_auth_needed = 1;
break;
}
}
return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
- display_enabled && !hdcp->connection.link.adjust.hdcp1.disable;
+ is_auth_needed &&
+ !hdcp->connection.link.adjust.hdcp1.disable;
+}
+
+static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
+{
+ int i, is_auth_needed = 0;
+
+ /* if all displays on the link don't need authentication,
+ * hdcp is not desired
+ */
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->connection.displays[i].adjust.disable) {
+ is_auth_needed = 1;
+ break;
+ }
+ }
+
+ return (hdcp->connection.hdcp2_retry_count < MAX_NUM_OF_ATTEMPTS) &&
+ is_auth_needed &&
+ !hdcp->connection.link.adjust.hdcp2.disable &&
+ !hdcp->connection.is_hdcp2_revoked;
}
static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
@@ -82,6 +110,11 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
} else if (is_in_hdcp1_dp_states(hdcp)) {
status = mod_hdcp_hdcp1_dp_execution(hdcp,
event_ctx, &input->hdcp1);
+ } else if (is_in_hdcp2_states(hdcp)) {
+ status = mod_hdcp_hdcp2_execution(hdcp, event_ctx, &input->hdcp2);
+ } else if (is_in_hdcp2_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp2_dp_execution(hdcp,
+ event_ctx, &input->hdcp2);
}
out:
return status;
@@ -99,7 +132,10 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
if (is_in_initialized_state(hdcp)) {
if (is_dp_hdcp(hdcp))
- if (is_cp_desired_hdcp1(hdcp)) {
+ if (is_cp_desired_hdcp2(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A0_DETERMINE_RX_HDCP_CAPABLE);
+ } else if (is_cp_desired_hdcp1(hdcp)) {
callback_in_ms(0, output);
set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);
} else {
@@ -107,7 +143,10 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
}
else if (is_hdmi_dvi_sl_hdcp(hdcp))
- if (is_cp_desired_hdcp1(hdcp)) {
+ if (is_cp_desired_hdcp2(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A0_KNOWN_HDCP2_CAPABLE_RX);
+ } else if (is_cp_desired_hdcp1(hdcp)) {
callback_in_ms(0, output);
set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);
} else {
@@ -126,6 +165,12 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
} else if (is_in_hdcp1_dp_states(hdcp)) {
status = mod_hdcp_hdcp1_dp_transition(hdcp,
event_ctx, &input->hdcp1, output);
+ } else if (is_in_hdcp2_states(hdcp)) {
+ status = mod_hdcp_hdcp2_transition(hdcp,
+ event_ctx, &input->hdcp2, output);
+ } else if (is_in_hdcp2_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp2_dp_transition(hdcp,
+ event_ctx, &input->hdcp2, output);
} else {
status = MOD_HDCP_STATUS_INVALID_STATE;
}
@@ -139,9 +184,13 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (is_hdcp1(hdcp)) {
- if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN)
+ if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) {
+ /* TODO - update psp to unify create session failure
+ * recovery between hdcp1 and 2.
+ */
mod_hdcp_hdcp1_destroy_session(hdcp);
+ }
if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
status = mod_hdcp_remove_display_topology(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS) {
@@ -154,6 +203,27 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else if (is_hdcp2(hdcp)) {
+ if (hdcp->auth.trans_input.hdcp2.create_session == PASS) {
+ status = mod_hdcp_hdcp2_destroy_session(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
} else if (is_in_cp_not_desired_state(hdcp)) {
status = mod_hdcp_remove_display_topology(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS) {
@@ -347,7 +417,20 @@ enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
query->trace = &hdcp->connection.trace;
query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
- mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status);
+ if (is_display_encryption_enabled(display)) {
+ if (is_hdcp1(hdcp)) {
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
+ } else if (is_hdcp2(hdcp)) {
+ if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
+ else if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1)
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
+ else
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON;
+ }
+ } else {
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
out:
return status;
@@ -420,7 +503,7 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
break;
default:
break;
- };
+ }
return mode;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 5664bc0b5bd0..f98d3d9ecb6d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -29,32 +29,8 @@
#include "mod_hdcp.h"
#include "hdcp_log.h"
-#define BCAPS_READY_MASK 0x20
-#define BCAPS_REPEATER_MASK 0x40
-#define BSTATUS_DEVICE_COUNT_MASK 0X007F
-#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080
-#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800
-#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01
-#define BCAPS_REPEATER_MASK_DP 0x02
-#define BSTATUS_READY_MASK_DP 0x01
-#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02
-#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04
-#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08
-#define BINFO_DEVICE_COUNT_MASK_DP 0X007F
-#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080
-#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800
-
-#define RXSTATUS_MSG_SIZE_MASK 0x03FF
-#define RXSTATUS_READY_MASK 0x0400
-#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800
-#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0
-#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01
-#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02
-#define RXSTATUS_READY_MASK_DP 0x0001
-#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002
-#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004
-#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008
-#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010
+#include <drm/drm_hdcp.h>
+#include <drm/drm_dp_helper.h>
enum mod_hdcp_trans_input_result {
UNKNOWN = 0,
@@ -92,8 +68,52 @@ struct mod_hdcp_transition_input_hdcp1 {
uint8_t stream_encryption_dp;
};
+struct mod_hdcp_transition_input_hdcp2 {
+ uint8_t hdcp2version_read;
+ uint8_t hdcp2_capable_check;
+ uint8_t add_topology;
+ uint8_t create_session;
+ uint8_t ake_init_prepare;
+ uint8_t ake_init_write;
+ uint8_t rxstatus_read;
+ uint8_t ake_cert_available;
+ uint8_t ake_cert_read;
+ uint8_t ake_cert_validation;
+ uint8_t stored_km_write;
+ uint8_t no_stored_km_write;
+ uint8_t h_prime_available;
+ uint8_t h_prime_read;
+ uint8_t pairing_available;
+ uint8_t pairing_info_read;
+ uint8_t h_prime_validation;
+ uint8_t lc_init_prepare;
+ uint8_t lc_init_write;
+ uint8_t l_prime_available_poll;
+ uint8_t l_prime_read;
+ uint8_t l_prime_validation;
+ uint8_t eks_prepare;
+ uint8_t eks_write;
+ uint8_t enable_encryption;
+ uint8_t reauth_request_check;
+ uint8_t rx_id_list_read;
+ uint8_t device_count_check;
+ uint8_t rx_id_list_validation;
+ uint8_t repeater_auth_ack_write;
+ uint8_t prepare_stream_manage;
+ uint8_t stream_manage_write;
+ uint8_t stream_ready_available;
+ uint8_t stream_ready_read;
+ uint8_t stream_ready_validation;
+
+ uint8_t rx_caps_read_dp;
+ uint8_t content_stream_type_write;
+ uint8_t link_integrity_check_dp;
+ uint8_t stream_encryption_dp;
+};
+
union mod_hdcp_transition_input {
struct mod_hdcp_transition_input_hdcp1 hdcp1;
+ struct mod_hdcp_transition_input_hdcp2 hdcp2;
};
struct mod_hdcp_message_hdcp1 {
@@ -111,8 +131,33 @@ struct mod_hdcp_message_hdcp1 {
uint16_t binfo_dp;
};
+struct mod_hdcp_message_hdcp2 {
+ uint8_t hdcp2version_hdmi;
+ uint8_t rxcaps_dp[3];
+ uint8_t rxstatus[2];
+
+ uint8_t ake_init[12];
+ uint8_t ake_cert[534];
+ uint8_t ake_no_stored_km[129];
+ uint8_t ake_stored_km[33];
+ uint8_t ake_h_prime[33];
+ uint8_t ake_pairing_info[17];
+ uint8_t lc_init[9];
+ uint8_t lc_l_prime[33];
+ uint8_t ske_eks[25];
+ uint8_t rx_id_list[177]; // 22 + 5 * 31
+ uint16_t rx_id_list_size;
+ uint8_t repeater_auth_ack[17];
+ uint8_t repeater_auth_stream_manage[68]; // 6 + 2 * 31
+ uint16_t stream_manage_size;
+ uint8_t repeater_auth_stream_ready[33];
+ uint8_t rxstatus_dp;
+ uint8_t content_stream_type_dp[2];
+};
+
union mod_hdcp_message {
struct mod_hdcp_message_hdcp1 hdcp1;
+ struct mod_hdcp_message_hdcp2 hdcp2;
};
struct mod_hdcp_auth_counters {
@@ -125,8 +170,10 @@ struct mod_hdcp_connection {
struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
uint8_t is_repeater;
uint8_t is_km_stored;
+ uint8_t is_hdcp2_revoked;
struct mod_hdcp_trace trace;
uint8_t hdcp1_retry_count;
+ uint8_t hdcp2_retry_count;
};
/* contains values per authentication cycle */
@@ -194,6 +241,50 @@ enum mod_hdcp_hdcp1_dp_state_id {
HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST,
};
+enum mod_hdcp_hdcp2_state_id {
+ HDCP2_STATE_START = HDCP1_DP_STATE_END,
+ H2_A0_KNOWN_HDCP2_CAPABLE_RX,
+ H2_A1_SEND_AKE_INIT,
+ H2_A1_VALIDATE_AKE_CERT,
+ H2_A1_SEND_NO_STORED_KM,
+ H2_A1_READ_H_PRIME,
+ H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME,
+ H2_A1_SEND_STORED_KM,
+ H2_A1_VALIDATE_H_PRIME,
+ H2_A2_LOCALITY_CHECK,
+ H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER,
+ H2_ENABLE_ENCRYPTION,
+ H2_A5_AUTHENTICATED,
+ H2_A6_WAIT_FOR_RX_ID_LIST,
+ H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK,
+ H2_A9_SEND_STREAM_MANAGEMENT,
+ H2_A9_VALIDATE_STREAM_READY,
+ HDCP2_STATE_END = H2_A9_VALIDATE_STREAM_READY,
+};
+
+enum mod_hdcp_hdcp2_dp_state_id {
+ HDCP2_DP_STATE_START = HDCP2_STATE_END,
+ D2_A0_DETERMINE_RX_HDCP_CAPABLE,
+ D2_A1_SEND_AKE_INIT,
+ D2_A1_VALIDATE_AKE_CERT,
+ D2_A1_SEND_NO_STORED_KM,
+ D2_A1_READ_H_PRIME,
+ D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME,
+ D2_A1_SEND_STORED_KM,
+ D2_A1_VALIDATE_H_PRIME,
+ D2_A2_LOCALITY_CHECK,
+ D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER,
+ D2_SEND_CONTENT_STREAM_TYPE,
+ D2_ENABLE_ENCRYPTION,
+ D2_A5_AUTHENTICATED,
+ D2_A6_WAIT_FOR_RX_ID_LIST,
+ D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK,
+ D2_A9_SEND_STREAM_MANAGEMENT,
+ D2_A9_VALIDATE_STREAM_READY,
+ HDCP2_DP_STATE_END = D2_A9_VALIDATE_STREAM_READY,
+ HDCP_STATE_END = HDCP2_DP_STATE_END,
+};
+
/* hdcp1 executions and transitions */
typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp);
uint8_t mod_hdcp_execute_and_set(
@@ -214,6 +305,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
struct mod_hdcp_transition_input_hdcp1 *input,
struct mod_hdcp_output *output);
+/* hdcp2 executions and transitions */
+enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input);
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input);
+enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ struct mod_hdcp_output *output);
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ struct mod_hdcp_output *output);
+
/* log functions */
void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
uint8_t *buf, uint32_t buf_size);
@@ -234,6 +341,25 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
enum mod_hdcp_encryption_status *encryption_status);
+enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status);
+
/* ddc functions */
enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp);
@@ -245,6 +371,7 @@ enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp);
@@ -308,11 +435,28 @@ static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp)
current_state(hdcp) <= HDCP1_DP_STATE_END);
}
+static inline uint8_t is_in_hdcp2_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP2_STATE_START &&
+ current_state(hdcp) <= HDCP2_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp2_dp_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP2_DP_STATE_START &&
+ current_state(hdcp) <= HDCP2_DP_STATE_END);
+}
+
static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
{
return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
}
+static inline uint8_t is_hdcp2(struct mod_hdcp *hdcp)
+{
+ return (is_in_hdcp2_states(hdcp) || is_in_hdcp2_dp_states(hdcp));
+}
+
static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp)
{
return current_state(hdcp) == HDCP_CP_NOT_DESIRED;
@@ -437,6 +581,7 @@ static inline struct mod_hdcp_display *get_empty_display_container(
static inline void reset_retry_counts(struct mod_hdcp *hdcp)
{
hdcp->connection.hdcp1_retry_count = 0;
+ hdcp->connection.hdcp2_retry_count = 0;
}
#endif /* HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 3db4a7da414f..04845e43df15 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -27,9 +27,11 @@
static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
- uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv;
+ uint64_t n = 0;
uint8_t count = 0;
+ memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t));
+
while (n) {
count++;
n &= (n - 1);
@@ -41,17 +43,17 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
{
if (is_dp_hdcp(hdcp))
- return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ?
+ return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_READY) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
- return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ?
+ return (hdcp->auth.msg.hdcp1.bcaps & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
}
static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
{
- return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ?
+ return (hdcp->auth.msg.hdcp1.bcaps & DP_BCAPS_HDCP_CAPABLE) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;
}
@@ -61,7 +63,7 @@ static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
status = (hdcp->auth.msg.hdcp1.bstatus &
- BSTATUS_R0_P_AVAILABLE_MASK_DP) ?
+ DP_BSTATUS_R0_PRIME_READY) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;
} else {
@@ -74,7 +76,7 @@ static inline enum mod_hdcp_status check_link_integrity_dp(
struct mod_hdcp *hdcp)
{
return (hdcp->auth.msg.hdcp1.bstatus &
- BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ?
+ DP_BSTATUS_LINK_FAILURE) ?
MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
@@ -82,7 +84,7 @@ static inline enum mod_hdcp_status check_link_integrity_dp(
static inline enum mod_hdcp_status check_no_reauthentication_request_dp(
struct mod_hdcp *hdcp)
{
- return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ?
+ return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_REAUTH_REQ) ?
MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :
MOD_HDCP_STATUS_SUCCESS;
}
@@ -92,15 +94,13 @@ static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
- status = (hdcp->auth.msg.hdcp1.binfo_dp &
- BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ?
- MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
- MOD_HDCP_STATUS_SUCCESS;
+ status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp >> 8)
+ ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE
+ : MOD_HDCP_STATUS_SUCCESS;
else
- status = (hdcp->auth.msg.hdcp1.bstatus &
- BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ?
- MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
- MOD_HDCP_STATUS_SUCCESS;
+ status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus >> 8)
+ ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE
+ : MOD_HDCP_STATUS_SUCCESS;
return status;
}
@@ -109,13 +109,11 @@ static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
- status = (hdcp->auth.msg.hdcp1.binfo_dp &
- BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ?
+ status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp) ?
MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
else
- status = (hdcp->auth.msg.hdcp1.bstatus &
- BSTATUS_MAX_DEVS_EXCEEDED_MASK) ?
+ status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus) ?
MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
return status;
@@ -124,8 +122,8 @@ static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
{
return is_dp_hdcp(hdcp) ?
- (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) :
- (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK);
+ DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.binfo_dp) :
+ DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.bstatus);
}
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 136b8011ff3f..21ebc62bb9d9 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -67,11 +67,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
break;
case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
if (input->bcaps_read != PASS ||
- input->r0p_read != PASS ||
- input->rx_validation != PASS ||
- (!conn->is_repeater && input->encryption != PASS)) {
+ input->r0p_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->rx_validation != PASS) {
/* 1A-06: consider invalid r0' a failure */
/* 1A-08: consider bksv listed in SRM a failure */
+ /*
+ * some slow RX will fail rx validation when it is
+ * not ready. give it more time to react before retry.
+ */
+ fail_and_restart_in_ms(1000, &status, output);
+ break;
+ } else if (!conn->is_repeater && input->encryption != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
@@ -212,7 +220,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
* after 3 attempts.
* 1A-08: consider bksv listed in SRM a failure
*/
- fail_and_restart_in_ms(0, &status, output);
+ /*
+ * some slow RX will fail rx validation when it is
+ * not ready. give it more time to react before retry.
+ */
+ fail_and_restart_in_ms(1000, &status, output);
}
break;
} else if ((!conn->is_repeater && input->encryption != PASS) ||
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
new file mode 100644
index 000000000000..f730b94ac3c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/delay.h>
+
+#include "hdcp.h"
+
+static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp)
+{
+ uint8_t is_ready = 0;
+
+ if (is_dp_hdcp(hdcp))
+ is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
+ else
+ is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) &&
+ (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
+ return is_ready ? MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
+}
+
+static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) &&
+ HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+ else
+ status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+ return status;
+}
+
+static inline enum mod_hdcp_status check_reauthentication_request(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t ret = 0;
+
+ if (is_dp_hdcp(hdcp))
+ ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ?
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
+ MOD_HDCP_STATUS_SUCCESS;
+ return ret;
+}
+
+static inline enum mod_hdcp_status check_link_integrity_failure_dp(
+ struct mod_hdcp *hdcp)
+{
+ return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+ MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint16_t size;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else {
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status == MOD_HDCP_STATUS_SUCCESS) {
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING;
+ }
+ }
+ return status;
+}
+
+static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint8_t size;
+
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
+ } else {
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint8_t size;
+
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus_dp) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
+ } else {
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint8_t size;
+ uint16_t max_wait = 20; // units of ms
+ uint16_t num_polls = 5;
+ uint16_t wait_time = max_wait / num_polls;
+
+ if (is_dp_hdcp(hdcp))
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ else
+ for (; num_polls; num_polls--) {
+ msleep(wait_time);
+
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ break;
+
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING;
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ break;
+ }
+ return status;
+}
+
+static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ uint8_t size;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ } else {
+ status = mod_hdcp_read_rxstatus(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+ size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING;
+ }
+out:
+ return status;
+}
+
+static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
+{
+ return HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+ (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+}
+
+static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
+{
+ /* device count must be greater than or equal to tracked hdcp displays */
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static uint8_t process_rxstatus(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ enum mod_hdcp_status *status)
+{
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxstatus,
+ &input->rxstatus_read, status,
+ hdcp, "rxstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_reauthentication_request,
+ &input->reauth_request_check, status,
+ hdcp, "reauth_request_check"))
+ goto out;
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(check_link_integrity_failure_dp,
+ &input->link_integrity_check_dp, status,
+ hdcp, "link_integrity_check_dp"))
+ goto out;
+ }
+ if (hdcp->connection.is_repeater)
+ if (check_receiver_id_list_ready(hdcp) ==
+ MOD_HDCP_STATUS_SUCCESS) {
+ HDCP_INPUT_PASS_TRACE(hdcp, "rx_id_list_ready");
+ event_ctx->rx_id_list_ready = 1;
+ if (is_dp_hdcp(hdcp))
+ hdcp->auth.msg.hdcp2.rx_id_list_size =
+ sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
+ else
+ hdcp->auth.msg.hdcp2.rx_id_list_size =
+ HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
+ hdcp->auth.msg.hdcp2.rxstatus[0];
+ }
+out:
+ return (*status == MOD_HDCP_STATUS_SUCCESS);
+}
+
+static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
+ &input->hdcp2version_read, &status,
+ hdcp, "hdcp2version_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp2_capable,
+ &input->hdcp2_capable_check, &status,
+ hdcp, "hdcp2_capable"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
+ &input->add_topology, &status,
+ hdcp, "add_topology"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
+ &input->create_session, &status,
+ hdcp, "create_session"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_ake_init,
+ &input->ake_init_prepare, &status,
+ hdcp, "ake_init_prepare"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_ake_init,
+ &input->ake_init_write, &status,
+ hdcp, "ake_init_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status validate_ake_cert(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(check_ake_cert_available,
+ &input->ake_cert_available, &status,
+ hdcp, "ake_cert_available"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_ake_cert,
+ &input->ake_cert_read, &status,
+ hdcp, "ake_cert_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_ake_cert,
+ &input->ake_cert_validation, &status,
+ hdcp, "ake_cert_validation"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_no_stored_km(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_no_stored_km,
+ &input->no_stored_km_write, &status,
+ hdcp, "no_stored_km_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_h_prime(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(check_h_prime_available,
+ &input->h_prime_available, &status,
+ hdcp, "h_prime_available"))
+ goto out;
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime,
+ &input->h_prime_read, &status,
+ hdcp, "h_prime_read"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_pairing_info_and_validate_h_prime(
+ struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(check_pairing_info_available,
+ &input->pairing_available, &status,
+ hdcp, "pairing_available"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_pairing_info,
+ &input->pairing_info_read, &status,
+ hdcp, "pairing_info_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime,
+ &input->h_prime_validation, &status,
+ hdcp, "h_prime_validation"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_stored_km(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_stored_km,
+ &input->stored_km_write, &status,
+ hdcp, "stored_km_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status validate_h_prime(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(check_h_prime_available,
+ &input->h_prime_available, &status,
+ hdcp, "h_prime_available"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime,
+ &input->h_prime_read, &status,
+ hdcp, "h_prime_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime,
+ &input->h_prime_validation, &status,
+ hdcp, "h_prime_validation"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
+ &input->lc_init_prepare, &status,
+ hdcp, "lc_init_prepare"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
+ &input->lc_init_write, &status,
+ hdcp, "lc_init_write"))
+ goto out;
+ if (is_dp_hdcp(hdcp))
+ msleep(16);
+ else
+ if (!mod_hdcp_execute_and_set(poll_l_prime_available,
+ &input->l_prime_available_poll, &status,
+ hdcp, "l_prime_available_poll"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
+ &input->l_prime_validation, &status,
+ hdcp, "l_prime_validation"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status exchange_ks_and_test_for_repeater(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_eks,
+ &input->eks_prepare, &status,
+ hdcp, "eks_prepare"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_eks,
+ &input->eks_write, &status,
+ hdcp, "eks_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status enable_encryption(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+ process_rxstatus(hdcp, event_ctx, input, &status);
+ goto out;
+ }
+
+ if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (event_ctx->rx_id_list_ready)
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_enable_encryption,
+ &input->enable_encryption, &status,
+ hdcp, "enable_encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp2_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (event_ctx->rx_id_list_ready)
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_rx_id_list(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (!event_ctx->rx_id_list_ready) {
+ status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status verify_rx_id_list_and_send_ack(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+ process_rxstatus(hdcp, event_ctx, input, &status);
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_rx_id_list,
+ &input->rx_id_list_read,
+ &status, hdcp, "receiver_id_list_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_device_count,
+ &input->device_count_check,
+ &status, hdcp, "device_count_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_rx_id_list,
+ &input->rx_id_list_validation,
+ &status, hdcp, "rx_id_list_validation"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_repeater_auth_ack,
+ &input->repeater_auth_ack_write,
+ &status, hdcp, "repeater_auth_ack_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_stream_management(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+ process_rxstatus(hdcp, event_ctx, input, &status);
+ goto out;
+ }
+
+ if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (event_ctx->rx_id_list_ready)
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_stream_management,
+ &input->prepare_stream_manage,
+ &status, hdcp, "prepare_stream_manage"))
+ goto out;
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_stream_manage,
+ &input->stream_manage_write,
+ &status, hdcp, "stream_manage_write"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) {
+ process_rxstatus(hdcp, event_ctx, input, &status);
+ goto out;
+ }
+
+ if (is_hdmi_dvi_sl_hdcp(hdcp)) {
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (event_ctx->rx_id_list_ready) {
+ goto out;
+ }
+ }
+ if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(check_stream_ready_available,
+ &input->stream_ready_available,
+ &status, hdcp, "stream_ready_available"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_stream_ready,
+ &input->stream_ready_read,
+ &status, hdcp, "stream_ready_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_stream_ready,
+ &input->stream_ready_validation,
+ &status, hdcp, "stream_ready_validation"))
+ goto out;
+
+out:
+ return status;
+}
+
+static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxcaps,
+ &input->rx_caps_read_dp,
+ &status, hdcp, "rx_caps_read_dp"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp2_capable,
+ &input->hdcp2_capable_check, &status,
+ hdcp, "hdcp2_capable_check"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status send_content_stream_type_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!process_rxstatus(hdcp, event_ctx, input, &status))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_content_type,
+ &input->content_stream_type_write, &status,
+ hdcp, "content_stream_type_write"))
+ goto out;
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+ status = known_hdcp2_capable_rx(hdcp, event_ctx, input);
+ break;
+ case H2_A1_SEND_AKE_INIT:
+ status = send_ake_init(hdcp, event_ctx, input);
+ break;
+ case H2_A1_VALIDATE_AKE_CERT:
+ status = validate_ake_cert(hdcp, event_ctx, input);
+ break;
+ case H2_A1_SEND_NO_STORED_KM:
+ status = send_no_stored_km(hdcp, event_ctx, input);
+ break;
+ case H2_A1_READ_H_PRIME:
+ status = read_h_prime(hdcp, event_ctx, input);
+ break;
+ case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ status = read_pairing_info_and_validate_h_prime(hdcp,
+ event_ctx, input);
+ break;
+ case H2_A1_SEND_STORED_KM:
+ status = send_stored_km(hdcp, event_ctx, input);
+ break;
+ case H2_A1_VALIDATE_H_PRIME:
+ status = validate_h_prime(hdcp, event_ctx, input);
+ break;
+ case H2_A2_LOCALITY_CHECK:
+ status = locality_check(hdcp, event_ctx, input);
+ break;
+ case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ status = exchange_ks_and_test_for_repeater(hdcp, event_ctx, input);
+ break;
+ case H2_ENABLE_ENCRYPTION:
+ status = enable_encryption(hdcp, event_ctx, input);
+ break;
+ case H2_A5_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case H2_A6_WAIT_FOR_RX_ID_LIST:
+ status = wait_for_rx_id_list(hdcp, event_ctx, input);
+ break;
+ case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input);
+ break;
+ case H2_A9_SEND_STREAM_MANAGEMENT:
+ status = send_stream_management(hdcp, event_ctx, input);
+ break;
+ case H2_A9_VALIDATE_STREAM_READY:
+ status = validate_stream_ready(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+ status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
+ break;
+ case D2_A1_SEND_AKE_INIT:
+ status = send_ake_init(hdcp, event_ctx, input);
+ break;
+ case D2_A1_VALIDATE_AKE_CERT:
+ status = validate_ake_cert(hdcp, event_ctx, input);
+ break;
+ case D2_A1_SEND_NO_STORED_KM:
+ status = send_no_stored_km(hdcp, event_ctx, input);
+ break;
+ case D2_A1_READ_H_PRIME:
+ status = read_h_prime(hdcp, event_ctx, input);
+ break;
+ case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ status = read_pairing_info_and_validate_h_prime(hdcp,
+ event_ctx, input);
+ break;
+ case D2_A1_SEND_STORED_KM:
+ status = send_stored_km(hdcp, event_ctx, input);
+ break;
+ case D2_A1_VALIDATE_H_PRIME:
+ status = validate_h_prime(hdcp, event_ctx, input);
+ break;
+ case D2_A2_LOCALITY_CHECK:
+ status = locality_check(hdcp, event_ctx, input);
+ break;
+ case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ status = exchange_ks_and_test_for_repeater(hdcp,
+ event_ctx, input);
+ break;
+ case D2_SEND_CONTENT_STREAM_TYPE:
+ status = send_content_stream_type_dp(hdcp, event_ctx, input);
+ break;
+ case D2_ENABLE_ENCRYPTION:
+ status = enable_encryption(hdcp, event_ctx, input);
+ break;
+ case D2_A5_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case D2_A6_WAIT_FOR_RX_ID_LIST:
+ status = wait_for_rx_id_list(hdcp, event_ctx, input);
+ break;
+ case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input);
+ break;
+ case D2_A9_SEND_STREAM_MANAGEMENT:
+ status = send_stream_management(hdcp, event_ctx, input);
+ break;
+ case D2_A9_VALIDATE_STREAM_READY:
+ status = validate_stream_ready(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
new file mode 100644
index 000000000000..8cae3e3aacd5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+ if (input->hdcp2version_read != PASS ||
+ input->hdcp2_capable_check != PASS) {
+ adjust->hdcp2.disable = 1;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_SEND_AKE_INIT);
+ }
+ break;
+ case H2_A1_SEND_AKE_INIT:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS ||
+ input->ake_init_prepare != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp2.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ake_init_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_VALIDATE_AKE_CERT);
+ break;
+ case H2_A1_VALIDATE_AKE_CERT:
+ if (input->ake_cert_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1A-08: consider ake timeout a failure */
+ /* some hdmi receivers are not ready for HDCP
+ * immediately after video becomes active,
+ * delay 1s before retry on first HDCP message
+ * timeout.
+ */
+ fail_and_restart_in_ms(1000, &status, output);
+ } else {
+ /* continue ake cert polling*/
+ callback_in_ms(10, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->ake_cert_read != PASS ||
+ input->ake_cert_validation != PASS) {
+ /*
+ * 1A-09: consider invalid ake cert a failure
+ * 1A-10: consider receiver id listed in SRM a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_km_stored &&
+ !adjust->hdcp2.force_no_stored_km) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_SEND_STORED_KM);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_SEND_NO_STORED_KM);
+ }
+ break;
+ case H2_A1_SEND_NO_STORED_KM:
+ if (input->no_stored_km_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (adjust->hdcp2.increase_h_prime_timeout)
+ set_watchdog_in_ms(hdcp, 2000, output);
+ else
+ set_watchdog_in_ms(hdcp, 1000, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_READ_H_PRIME);
+ break;
+ case H2_A1_READ_H_PRIME:
+ if (input->h_prime_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1A-11-3: consider h' timeout a failure */
+ fail_and_restart_in_ms(1000, &status, output);
+ } else {
+ /* continue h' polling */
+ callback_in_ms(100, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->h_prime_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 200, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME);
+ break;
+ case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ if (input->pairing_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1A-12: consider pairing info timeout
+ * a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ /* continue pairing info polling */
+ callback_in_ms(20, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->pairing_info_read != PASS ||
+ input->h_prime_validation != PASS) {
+ /* 1A-11-1: consider invalid h' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
+ break;
+ case H2_A1_SEND_STORED_KM:
+ if (input->stored_km_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 200, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A1_VALIDATE_H_PRIME);
+ break;
+ case H2_A1_VALIDATE_H_PRIME:
+ if (input->h_prime_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1A-11-2: consider h' timeout a failure */
+ fail_and_restart_in_ms(1000, &status, output);
+ } else {
+ /* continue h' polling */
+ callback_in_ms(20, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->h_prime_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->h_prime_validation != PASS) {
+ /* 1A-11-1: consider invalid h' a failure */
+ adjust->hdcp2.force_no_stored_km = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
+ break;
+ case H2_A2_LOCALITY_CHECK:
+ if (hdcp->state.stay_count > 10 ||
+ input->lc_init_prepare != PASS ||
+ input->lc_init_write != PASS ||
+ input->l_prime_available_poll != PASS ||
+ input->l_prime_read != PASS) {
+ /*
+ * 1A-05: consider disconnection after LC init a failure
+ * 1A-13-1: consider invalid l' a failure
+ * 1A-13-2: consider l' timeout a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->l_prime_validation != PASS) {
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
+ break;
+ case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ if (input->eks_prepare != PASS ||
+ input->eks_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 3000, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A6_WAIT_FOR_RX_ID_LIST);
+ } else {
+ /* some CTS equipment requires a delay GREATER than
+ * 200 ms, so delay 210 ms instead of 200 ms
+ */
+ callback_in_ms(210, output);
+ set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION);
+ }
+ break;
+ case H2_ENABLE_ENCRYPTION:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ /*
+ * 1A-07: restart hdcp on REAUTH_REQ
+ * 1B-08: restart hdcp on REAUTH_REQ
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->enable_encryption != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A5_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ case H2_A5_AUTHENTICATED:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ }
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ case H2_A6_WAIT_FOR_RX_ID_LIST:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!event_ctx->rx_id_list_ready) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-02: consider rx id list timeout a failure */
+ /* some CTS equipment's actual timeout
+ * measurement is slightly greater than 3000 ms.
+ * Delay 100 ms to ensure it is fully timeout
+ * before re-authentication.
+ */
+ fail_and_restart_in_ms(100, &status, output);
+ } else {
+ callback_in_ms(300, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->rx_id_list_read != PASS ||
+ input->device_count_check != PASS ||
+ input->rx_id_list_validation != PASS ||
+ input->repeater_auth_ack_write != PASS) {
+ /* 1B-03: consider invalid v' a failure
+ * 1B-04: consider MAX_DEVS_EXCEEDED a failure
+ * 1B-05: consider MAX_CASCADE_EXCEEDED a failure
+ * 1B-06: consider invalid seq_num_V a failure
+ * 1B-09: consider seq_num_V rollover a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+ break;
+ case H2_A9_SEND_STREAM_MANAGEMENT:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->prepare_stream_manage != PASS ||
+ input->stream_manage_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A9_VALIDATE_STREAM_READY);
+ break;
+ case H2_A9_VALIDATE_STREAM_READY:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->stream_ready_available != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-10-2: restart content stream management on
+ * stream ready timeout
+ */
+ hdcp->auth.count.stream_management_retry_count++;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+ } else {
+ callback_in_ms(10, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ } else if (input->stream_ready_read != PASS ||
+ input->stream_ready_validation != PASS) {
+ /*
+ * 1B-10-1: restart content stream management
+ * on invalid M'
+ */
+ if (hdcp->auth.count.stream_management_retry_count > 10) {
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ hdcp->auth.count.stream_management_retry_count++;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT);
+ }
+ break;
+ }
+ callback_in_ms(200, output);
+ set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+ if (input->rx_caps_read_dp != PASS ||
+ input->hdcp2_capable_check != PASS) {
+ adjust->hdcp2.disable = 1;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A1_SEND_AKE_INIT);
+ }
+ break;
+ case D2_A1_SEND_AKE_INIT:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS ||
+ input->ake_init_prepare != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp2.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ake_init_write != PASS) {
+ /* possibly display not ready */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(100, output);
+ set_state_id(hdcp, output, D2_A1_VALIDATE_AKE_CERT);
+ break;
+ case D2_A1_VALIDATE_AKE_CERT:
+ if (input->ake_cert_read != PASS ||
+ input->ake_cert_validation != PASS) {
+ /*
+ * 1A-08: consider invalid ake cert a failure
+ * 1A-09: consider receiver id listed in SRM a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_km_stored &&
+ !adjust->hdcp2.force_no_stored_km) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A1_SEND_STORED_KM);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A1_SEND_NO_STORED_KM);
+ }
+ break;
+ case D2_A1_SEND_NO_STORED_KM:
+ if (input->no_stored_km_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (adjust->hdcp2.increase_h_prime_timeout)
+ set_watchdog_in_ms(hdcp, 2000, output);
+ else
+ set_watchdog_in_ms(hdcp, 1000, output);
+ set_state_id(hdcp, output, D2_A1_READ_H_PRIME);
+ break;
+ case D2_A1_READ_H_PRIME:
+ if (input->h_prime_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ /* 1A-10-3: consider h' timeout a failure */
+ fail_and_restart_in_ms(1000, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ } else if (input->h_prime_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 200, output);
+ set_state_id(hdcp, output, D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME);
+ break;
+ case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ if (input->pairing_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ /*
+ * 1A-11: consider pairing info timeout
+ * a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ } else if (input->pairing_info_read != PASS ||
+ input->h_prime_validation != PASS) {
+ /* 1A-10-1: consider invalid h' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
+ break;
+ case D2_A1_SEND_STORED_KM:
+ if (input->stored_km_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 200, output);
+ set_state_id(hdcp, output, D2_A1_VALIDATE_H_PRIME);
+ break;
+ case D2_A1_VALIDATE_H_PRIME:
+ if (input->h_prime_available != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ /* 1A-10-2: consider h' timeout a failure */
+ fail_and_restart_in_ms(1000, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ } else if (input->h_prime_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->h_prime_validation != PASS) {
+ /* 1A-10-1: consider invalid h' a failure */
+ adjust->hdcp2.force_no_stored_km = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
+ break;
+ case D2_A2_LOCALITY_CHECK:
+ if (hdcp->state.stay_count > 10 ||
+ input->lc_init_prepare != PASS ||
+ input->lc_init_write != PASS ||
+ input->l_prime_read != PASS) {
+ /* 1A-12: consider invalid l' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->l_prime_validation != PASS) {
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
+ break;
+ case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ if (input->eks_prepare != PASS ||
+ input->eks_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 3000, output);
+ set_state_id(hdcp, output, D2_A6_WAIT_FOR_RX_ID_LIST);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_SEND_CONTENT_STREAM_TYPE);
+ }
+ break;
+ case D2_SEND_CONTENT_STREAM_TYPE:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS ||
+ input->content_stream_type_write != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(210, output);
+ set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION);
+ break;
+ case D2_ENABLE_ENCRYPTION:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS) {
+ /*
+ * 1A-07: restart hdcp on REAUTH_REQ
+ * 1B-08: restart hdcp on REAUTH_REQ
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->enable_encryption != PASS ||
+ (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_state_id(hdcp, output, D2_A5_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ case D2_A5_AUTHENTICATED:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->link_integrity_check_dp != PASS) {
+ if (hdcp->connection.hdcp2_retry_count >= 1)
+ adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ }
+ increment_stay_counter(hdcp);
+ break;
+ case D2_A6_WAIT_FOR_RX_ID_LIST:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!event_ctx->rx_id_list_ready) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ /* 1B-02: consider rx id list timeout a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS ||
+ input->rx_id_list_read != PASS ||
+ input->device_count_check != PASS ||
+ input->rx_id_list_validation != PASS ||
+ input->repeater_auth_ack_write != PASS) {
+ /*
+ * 1B-03: consider invalid v' a failure
+ * 1B-04: consider MAX_DEVS_EXCEEDED a failure
+ * 1B-05: consider MAX_CASCADE_EXCEEDED a failure
+ * 1B-06: consider invalid seq_num_V a failure
+ * 1B-09: consider seq_num_V rollover a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
+ break;
+ case D2_A9_SEND_STREAM_MANAGEMENT:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->prepare_stream_manage != PASS ||
+ input->stream_manage_write != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK)
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(100, output);
+ set_state_id(hdcp, output, D2_A9_VALIDATE_STREAM_READY);
+ break;
+ case D2_A9_VALIDATE_STREAM_READY:
+ if (input->rxstatus_read != PASS ||
+ input->reauth_request_check != PASS ||
+ input->link_integrity_check_dp != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (event_ctx->rx_id_list_ready) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK);
+ break;
+ } else if (input->stream_ready_read != PASS ||
+ input->stream_ready_validation != PASS) {
+ /*
+ * 1B-10-1: restart content stream management
+ * on invalid M'
+ * 1B-10-2: consider stream ready timeout a failure
+ */
+ if (hdcp->auth.count.stream_management_retry_count > 10) {
+ fail_and_restart_in_ms(0, &status, output);
+ } else if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) {
+ hdcp->auth.count.stream_management_retry_count++;
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
+ } else {
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(200, output);
+ set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index e7baae059b85..ff9d54812e62 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -51,6 +51,26 @@ enum mod_hdcp_ddc_message_id {
MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
MOD_HDCP_MESSAGE_ID_READ_BINFO,
+ /* HDCP 2.2 */
+
+ MOD_HDCP_MESSAGE_ID_HDCP2VERSION,
+ MOD_HDCP_MESSAGE_ID_RX_CAPS,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+ MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+
MOD_HDCP_MESSAGE_ID_MAX
};
@@ -70,6 +90,22 @@ static const uint8_t hdcp_i2c_offsets[] = {
[MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
[MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
[MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+ [MOD_HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
+ [MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0
};
static const uint32_t hdcp_dpcd_addrs[] = {
@@ -88,6 +124,22 @@ static const uint32_t hdcp_dpcd_addrs[] = {
[MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
[MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
[MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+ [MOD_HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
+ [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
+ [MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
+ [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
+ [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
+ [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
+ [MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
+ [MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
};
static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
@@ -303,3 +355,277 @@ enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)
hdcp->auth.msg.hdcp1.an,
sizeof(hdcp->auth.msg.hdcp1.an));
}
+
+enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_HDCP2VERSION,
+ &hdcp->auth.msg.hdcp2.hdcp2version_hdmi,
+ sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi));
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (!is_dp_hdcp(hdcp))
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_RX_CAPS,
+ hdcp->auth.msg.hdcp2.rxcaps_dp,
+ sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp));
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+ &hdcp->auth.msg.hdcp2.rxstatus_dp,
+ 1);
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus,
+ sizeof(hdcp->auth.msg.hdcp2.rxstatus));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ hdcp->auth.msg.hdcp2.ake_cert+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ hdcp->auth.msg.hdcp2.ake_cert,
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ hdcp->auth.msg.hdcp2.ake_h_prime+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ hdcp->auth.msg.hdcp2.ake_h_prime,
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ hdcp->auth.msg.hdcp2.ake_pairing_info+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ hdcp->auth.msg.hdcp2.ake_pairing_info,
+ sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ hdcp->auth.msg.hdcp2.lc_l_prime+1,
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ hdcp->auth.msg.hdcp2.lc_l_prime,
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ hdcp->auth.msg.hdcp2.rx_id_list+1,
+ sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ hdcp->auth.msg.hdcp2.rx_id_list,
+ hdcp->auth.msg.hdcp2.rx_id_list_size);
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp)) {
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
+
+ } else {
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
+ }
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ hdcp->auth.msg.hdcp2.ake_init+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_init)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ hdcp->auth.msg.hdcp2.ake_init,
+ sizeof(hdcp->auth.msg.hdcp2.ake_init));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ hdcp->auth.msg.hdcp2.ake_no_stored_km+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ hdcp->auth.msg.hdcp2.ake_no_stored_km,
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ hdcp->auth.msg.hdcp2.ake_stored_km+1,
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ hdcp->auth.msg.hdcp2.ake_stored_km,
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ hdcp->auth.msg.hdcp2.lc_init+1,
+ sizeof(hdcp->auth.msg.hdcp2.lc_init)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ hdcp->auth.msg.hdcp2.lc_init,
+ sizeof(hdcp->auth.msg.hdcp2.lc_init));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp,
+ MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ hdcp->auth.msg.hdcp2.ske_eks+1,
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks)-1);
+ else
+ status = write(hdcp,
+ MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ hdcp->auth.msg.hdcp2.ske_eks,
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ hdcp->auth.msg.hdcp2.repeater_auth_ack+1,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)-1);
+ else
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ hdcp->auth.msg.hdcp2.repeater_auth_ack,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp,
+ MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_manage+1,
+ hdcp->auth.msg.hdcp2.stream_manage_size-1);
+ else
+ status = write(hdcp,
+ MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+ hdcp->auth.msg.hdcp2.stream_manage_size);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+ hdcp->auth.msg.hdcp2.content_stream_type_dp+1,
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)-1);
+ else
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 3982ced5f969..724ebcee9a19 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -116,6 +116,58 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_DDC_FAILURE";
case MOD_HDCP_STATUS_INVALID_OPERATION:
return "MOD_HDCP_STATUS_INVALID_OPERATION";
+ case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE:
+ return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE";
+ case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
+ case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
+ return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
+ case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION:
+ return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
+ return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
+ case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST:
+ return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST";
+ case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";
default:
return "MOD_HDCP_STATUS_UNKNOWN";
}
@@ -156,6 +208,72 @@ char *mod_hdcp_state_id_to_str(int32_t id)
return "D1_A6_WAIT_FOR_READY";
case D1_A7_READ_KSV_LIST:
return "D1_A7_READ_KSV_LIST";
+ case H2_A0_KNOWN_HDCP2_CAPABLE_RX:
+ return "H2_A0_KNOWN_HDCP2_CAPABLE_RX";
+ case H2_A1_SEND_AKE_INIT:
+ return "H2_A1_SEND_AKE_INIT";
+ case H2_A1_VALIDATE_AKE_CERT:
+ return "H2_A1_VALIDATE_AKE_CERT";
+ case H2_A1_SEND_NO_STORED_KM:
+ return "H2_A1_SEND_NO_STORED_KM";
+ case H2_A1_READ_H_PRIME:
+ return "H2_A1_READ_H_PRIME";
+ case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ return "H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME";
+ case H2_A1_SEND_STORED_KM:
+ return "H2_A1_SEND_STORED_KM";
+ case H2_A1_VALIDATE_H_PRIME:
+ return "H2_A1_VALIDATE_H_PRIME";
+ case H2_A2_LOCALITY_CHECK:
+ return "H2_A2_LOCALITY_CHECK";
+ case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ return "H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER";
+ case H2_ENABLE_ENCRYPTION:
+ return "H2_ENABLE_ENCRYPTION";
+ case H2_A5_AUTHENTICATED:
+ return "H2_A5_AUTHENTICATED";
+ case H2_A6_WAIT_FOR_RX_ID_LIST:
+ return "H2_A6_WAIT_FOR_RX_ID_LIST";
+ case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ return "H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK";
+ case H2_A9_SEND_STREAM_MANAGEMENT:
+ return "H2_A9_SEND_STREAM_MANAGEMENT";
+ case H2_A9_VALIDATE_STREAM_READY:
+ return "H2_A9_VALIDATE_STREAM_READY";
+ case D2_A0_DETERMINE_RX_HDCP_CAPABLE:
+ return "D2_A0_DETERMINE_RX_HDCP_CAPABLE";
+ case D2_A1_SEND_AKE_INIT:
+ return "D2_A1_SEND_AKE_INIT";
+ case D2_A1_VALIDATE_AKE_CERT:
+ return "D2_A1_VALIDATE_AKE_CERT";
+ case D2_A1_SEND_NO_STORED_KM:
+ return "D2_A1_SEND_NO_STORED_KM";
+ case D2_A1_READ_H_PRIME:
+ return "D2_A1_READ_H_PRIME";
+ case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME:
+ return "D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME";
+ case D2_A1_SEND_STORED_KM:
+ return "D2_A1_SEND_STORED_KM";
+ case D2_A1_VALIDATE_H_PRIME:
+ return "D2_A1_VALIDATE_H_PRIME";
+ case D2_A2_LOCALITY_CHECK:
+ return "D2_A2_LOCALITY_CHECK";
+ case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
+ return "D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER";
+ case D2_SEND_CONTENT_STREAM_TYPE:
+ return "D2_SEND_CONTENT_STREAM_TYPE";
+ case D2_ENABLE_ENCRYPTION:
+ return "D2_ENABLE_ENCRYPTION";
+ case D2_A5_AUTHENTICATED:
+ return "D2_A5_AUTHENTICATED";
+ case D2_A6_WAIT_FOR_RX_ID_LIST:
+ return "D2_A6_WAIT_FOR_RX_ID_LIST";
+ case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK:
+ return "D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK";
+ case D2_A9_SEND_STREAM_MANAGEMENT:
+ return "D2_A9_SEND_STREAM_MANAGEMENT";
+ case D2_A9_VALIDATE_STREAM_READY:
+ return "D2_A9_VALIDATE_STREAM_READY";
default:
return "UNKNOWN_STATE_ID";
};
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index 2fd0e0a893ef..ff91373ebada 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -27,7 +27,7 @@
#define MOD_HDCP_LOG_H_
#ifdef CONFIG_DRM_AMD_DC_HDCP
-#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__)
+#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
@@ -37,7 +37,7 @@
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
HDCP_LOG_ERR(hdcp, \
- "[Link %d] ERROR %s IN STATE %s", \
+ "[Link %d] WARNING %s IN STATE %s", \
hdcp->config.index, \
mod_hdcp_status_to_str(status), \
mod_hdcp_state_id_to_str(hdcp->state.id))
@@ -45,6 +45,10 @@
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 1.4 enabled on display %d", \
hdcp->config.index, displayIndex)
+#define HDCP_HDCP2_ENABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 2.2 enabled on display %d", \
+ hdcp->config.index, displayIndex)
/* state machine logs */
#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
HDCP_LOG_FSM(hdcp, \
@@ -93,26 +97,73 @@
hdcp->buf); \
} while (0)
#define HDCP_FULL_DDC_TRACE(hdcp) do { \
- HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
- sizeof(hdcp->auth.msg.hdcp1.bksv)); \
- HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
- sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
- sizeof(hdcp->auth.msg.hdcp1.an)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
- sizeof(hdcp->auth.msg.hdcp1.aksv)); \
- HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
- sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
- HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
- (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
- sizeof(hdcp->auth.msg.hdcp1.r0p)); \
- HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
- (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
- sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
- HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
- hdcp->auth.msg.hdcp1.ksvlist_size); \
- HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
- sizeof(hdcp->auth.msg.hdcp1.vp)); \
+ if (is_hdcp1(hdcp)) { \
+ HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
+ sizeof(hdcp->auth.msg.hdcp1.bksv)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
+ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
+ sizeof(hdcp->auth.msg.hdcp1.an)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
+ sizeof(hdcp->auth.msg.hdcp1.aksv)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
+ sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
+ HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
+ sizeof(hdcp->auth.msg.hdcp1.r0p)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
+ HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
+ hdcp->auth.msg.hdcp1.ksvlist_size); \
+ HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
+ sizeof(hdcp->auth.msg.hdcp1.vp)); \
+ } else { \
+ HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \
+ &hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \
+ sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \
+ sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_init)); \
+ HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \
+ hdcp->auth.msg.hdcp2.ake_stored_km, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \
+ hdcp->auth.msg.hdcp2.ake_no_stored_km, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \
+ HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \
+ HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \
+ hdcp->auth.msg.hdcp2.ake_pairing_info, \
+ sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \
+ sizeof(hdcp->auth.msg.hdcp2.lc_init)); \
+ HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \
+ (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \
+ sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \
+ HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \
+ hdcp->auth.msg.hdcp2.rx_id_list, \
+ hdcp->auth.msg.hdcp2.rx_id_list_size); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \
+ hdcp->auth.msg.hdcp2.repeater_auth_ack, \
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \
+ hdcp->auth.msg.hdcp2.stream_manage_size); \
+ HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \
+ hdcp->auth.msg.hdcp2.content_stream_type_dp, \
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \
+ } \
} while (0)
#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
@@ -123,6 +174,9 @@
#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \
HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \
hdcp->config.index)
+#define HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp2 session", \
+ hdcp->config.index)
#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \
HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index)
#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 646d909bbc37..7911dc157d5a 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -31,6 +31,19 @@
#include "amdgpu.h"
#include "hdcp_psp.h"
+static void hdcp2_message_init(struct mod_hdcp *hdcp,
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *in)
+{
+ in->session_handle = hdcp->auth.id;
+ in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->process.msg1_desc.msg_size = 0;
+ in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->process.msg2_desc.msg_size = 0;
+ in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
+ in->process.msg3_desc.msg_size = 0;
+}
enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
{
@@ -42,7 +55,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) {
+ if (is_display_added(&(hdcp->connection.displays[i]))) {
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -96,7 +109,7 @@ enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
- TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x;
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
@@ -132,10 +145,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
+
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
- hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
sizeof(hdcp->auth.msg.hdcp1.aksv));
@@ -326,3 +340,493 @@ enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *
return MOD_HDCP_STATUS_SUCCESS;
}
+enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
+
+ if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0;
+ else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1)
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1;
+ else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_MAX)
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type =
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
+
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
+
+ HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_INIT;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
+
+ memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_init));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT;
+ msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT;
+
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_cert,
+ sizeof(hdcp->auth.msg.hdcp2.ake_cert));
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM;
+ msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+
+ memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+
+ memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+
+ if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+ return MOD_HDCP_STATUS_SUCCESS;
+ }
+
+ return MOD_HDCP_STATUS_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME;
+ msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME;
+
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_h_prime,
+ sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
+
+ if (!hdcp->connection.is_km_stored) {
+ msg_in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO;
+ msg_in->process.msg2_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO;
+ memcpy(&msg_in->process.receiver_message[sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)],
+ hdcp->auth.msg.hdcp2.ake_pairing_info, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
+ }
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+
+ if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+ else if (!hdcp->connection.is_km_stored &&
+ msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
+
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__LC_INIT;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
+
+ memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.lc_init));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME;
+ msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME;
+
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.lc_l_prime,
+ sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+
+ if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS;
+
+ if (is_dp_hdcp(hdcp))
+ msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
+
+ memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+ msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks);
+
+ if (is_dp_hdcp(hdcp)) {
+ memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
+ hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
+
+ if (!is_dp_mst_hdcp(hdcp)) {
+ display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST;
+ msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.rx_id_list,
+ sizeof(hdcp->auth.msg.hdcp2.rx_id_list));
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+
+ if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+ return MOD_HDCP_STATUS_SUCCESS;
+ }
+
+
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ uint8_t i;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->connection.displays[i].adjust.disable)
+ continue;
+ hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ break;
+
+ hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ }
+
+ return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE;
+
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
+
+ hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
+
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
+ msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2;
+
+ hdcp2_message_init(hdcp, msg_in);
+
+ msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY;
+
+ msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready);
+
+ memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
+ (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id;
+ hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_FAILURE;
+
+ if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) {
+ if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1)
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
+ else
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 986fc07ea9ea..82a5e997d573 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -36,6 +36,11 @@ enum bgd_security_hdcp_encryption_level {
HDCP_ENCRYPTION_LEVEL__ON
};
+enum bgd_security_hdcp2_content_type {
+ HDCP2_CONTENT_TYPE__INVALID = 0,
+ HDCP2_CONTENT_TYPE__TYPE0,
+ HDCP2_CONTENT_TYPE__TYPE1
+};
enum ta_dtm_command {
TA_DTM_COMMAND__UNUSED_1 = 1,
TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2,
@@ -121,8 +126,64 @@ enum ta_hdcp_command {
TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION,
TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION,
TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS,
+ TA_HDCP_COMMAND__UNUSED_1,
+ TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION,
+ TA_HDCP_COMMAND__UNUSED_2,
+ TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS,
+ TA_HDCP_COMMAND__UNUSED_3,
+ TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2,
+ TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2,
+ TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION
+};
+
+enum ta_hdcp2_msg_id {
+ TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE = 1,
+ TA_HDCP_HDCP2_MSG_ID__AKE_INIT = 2,
+ TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT = 3,
+ TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM = 4,
+ TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM = 5,
+ TA_HDCP_HDCP2_MSG_ID__AKE_SEND_RRX = 6,
+ TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME = 7,
+ TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO = 8,
+ TA_HDCP_HDCP2_MSG_ID__LC_INIT = 9,
+ TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME = 10,
+ TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS = 11,
+ TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST = 12,
+ TA_HDCP_HDCP2_MSG_ID__RTT_READY = 13,
+ TA_HDCP_HDCP2_MSG_ID__RTT_CHALLENGE = 14,
+ TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK = 15,
+ TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE = 16,
+ TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY = 17,
+ TA_HDCP_HDCP2_MSG_ID__RECEIVER_AUTH_STATUS = 18,
+ TA_HDCP_HDCP2_MSG_ID__AKE_TRANSMITTER_INFO = 19,
+ TA_HDCP_HDCP2_MSG_ID__AKE_RECEIVER_INFO = 20,
+ TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP = 129
};
+enum ta_hdcp2_hdcp2_msg_id_max_size {
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__NULL_MESSAGE = 0,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_INIT = 12,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT = 534,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM = 129,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM = 33,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_RRX = 9,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME = 33,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO = 17,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_INIT = 9,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME = 33,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SKE_SEND_EKS = 25,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RECEIVERID_LIST = 181,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_READY = 1,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_CHALLENGE = 17,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RACK = 17,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_MANAGE = 13,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_READY = 33,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RECEIVER_AUTH_STATUS = 4,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_TRANSMITTER_INFO = 6,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO = 6,
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SIGNAL_CONTENT_STREAM_TYPE_DP = 1
+};
/* HDCP related enumerations */
/**********************************************************/
@@ -131,6 +192,12 @@ enum ta_hdcp_command {
#define TA_HDCP__HDCP1_KSV_SIZE 5
#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
#define TA_HDCP__HDCP1_V_PRIME_SIZE 20
+#define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6
+
+// 64 bits boundaries
+#define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \
+ TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4
enum ta_hdcp_status {
TA_HDCP_STATUS__SUCCESS = 0x00,
@@ -165,9 +232,47 @@ enum ta_hdcp_authentication_status {
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02,
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03,
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
};
+enum ta_hdcp2_msg_authentication_status {
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS = 0,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__KM_NOT_AVAILABLE,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNUSED,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID = 100, // everything above does not fail the request
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_ENOUGH_MEMORY,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_EXPECTED_MSG,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SIGNATURE_CERTIFICAT_ERROR,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INCORRECT_HDCP_VERSION,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNKNOWN_MESSAGE,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_HMAC,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_TOPOLOGY,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST
+};
+
+enum ta_hdcp_content_type {
+ TA_HDCP2_CONTENT_TYPE__TYPE0 = 1,
+ TA_HDCP2_CONTENT_TYPE__TYPE1,
+};
+
+enum ta_hdcp_content_type_negotiation_type {
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0 = 1,
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1,
+ TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED
+};
+
+enum ta_hdcp2_version {
+ TA_HDCP2_VERSION_UNKNOWN = 0,
+ TA_HDCP2_VERSION_2_0 = 20,
+ TA_HDCP2_VERSION_2_1 = 21,
+ TA_HDCP2_VERSION_2_2 = 22
+};
/* input/output structures for HDCP commands */
/**********************************************************/
@@ -232,6 +337,84 @@ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output {
uint32_t protection_level;
};
+struct ta_hdcp_cmd_hdcp2_create_session_input_v2 {
+ uint32_t display_handle;
+ enum ta_hdcp_content_type_negotiation_type negotiate_content_type;
+};
+
+struct ta_hdcp_cmd_hdcp2_create_session_output_v2 {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_destroy_session_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_authentication_message_v2 {
+ enum ta_hdcp2_msg_id msg_id;
+ uint32_t msg_size;
+};
+
+struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 {
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc;
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc;
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg3_desc;
+ uint8_t receiver_message[TA_HDCP__HDCP2_RX_BUF_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 {
+ uint32_t hdcp_version;
+ uint32_t is_km_stored;
+ uint32_t is_locality_precompute_support;
+ uint32_t is_repeater;
+ enum ta_hdcp2_msg_authentication_status msg1_status;
+ enum ta_hdcp2_msg_authentication_status msg2_status;
+ enum ta_hdcp2_msg_authentication_status msg3_status;
+};
+
+struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 {
+ enum ta_hdcp2_msg_id msg1_id;
+ enum ta_hdcp2_msg_id msg2_id;
+};
+
+struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 {
+ enum ta_hdcp2_msg_authentication_status msg1_status;
+ enum ta_hdcp2_msg_authentication_status msg2_status;
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc;
+ struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc;
+ uint8_t transmitter_message[TA_HDCP__HDCP2_TX_BUF_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 {
+ uint32_t session_handle;
+ struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 process;
+ struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 prepare;
+};
+
+struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 {
+ uint32_t authentication_status;
+ struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 process;
+ struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 prepare;
+};
+
+struct ta_hdcp_cmd_hdcp2_set_encryption_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_get_encryption_status_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp2_get_encryption_status_output {
+ enum ta_hdcp_content_type hdcp2_type;
+ uint32_t protection_level;
+};
+
+struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input {
+ uint32_t session_handle;
+ uint32_t display_handle;
+};
+
/**********************************************************/
/* Common input structure for HDCP callbacks */
union ta_hdcp_cmd_input {
@@ -242,6 +425,13 @@ union ta_hdcp_cmd_input {
struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption;
struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption;
struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status;
+ struct ta_hdcp_cmd_hdcp2_destroy_session_input hdcp2_destroy_session;
+ struct ta_hdcp_cmd_hdcp2_set_encryption_input hdcp2_set_encryption;
+ struct ta_hdcp_cmd_hdcp2_get_encryption_status_input hdcp2_get_encryption_status;
+ struct ta_hdcp_cmd_hdcp2_create_session_input_v2 hdcp2_create_session_v2;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2
+ hdcp2_prepare_process_authentication_message_v2;
+ struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;
};
/* Common output structure for HDCP callbacks */
@@ -250,6 +440,10 @@ union ta_hdcp_cmd_output {
struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication;
struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication;
struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status;
+ struct ta_hdcp_cmd_hdcp2_get_encryption_status_output hdcp2_get_encryption_status;
+ struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2;
+ struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2
+ hdcp2_prepare_process_authentication_message_v2;
};
/**********************************************************/
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dc187844d10b..dbe7835aabcf 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
+ uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index dea21702edff..f2a0e1a064da 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -77,6 +77,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED,
MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
@@ -86,6 +87,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
@@ -156,12 +158,18 @@ struct mod_hdcp_link_adjustment_hdcp1 {
uint8_t reserved : 6;
};
+enum mod_hdcp_force_hdcp_type {
+ MOD_HDCP_FORCE_TYPE_MAX = 0,
+ MOD_HDCP_FORCE_TYPE_0,
+ MOD_HDCP_FORCE_TYPE_1
+};
+
struct mod_hdcp_link_adjustment_hdcp2 {
uint8_t disable : 1;
- uint8_t disable_type1 : 1;
+ uint8_t force_type : 2;
uint8_t force_no_stored_km : 1;
uint8_t increase_h_prime_timeout: 1;
- uint8_t reserved : 4;
+ uint8_t reserved : 3;
};
struct mod_hdcp_link_adjustment {
@@ -184,7 +192,8 @@ enum mod_hdcp_encryption_status {
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0,
MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON,
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON,
- MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON
};
/* per link events dm has to notify to hdcp module */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index ca8ce3c55337..42cbeffac640 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -26,6 +26,7 @@
#ifndef MOD_INFO_PACKET_H_
#define MOD_INFO_PACKET_H_
+#include "dm_services.h"
#include "mod_shared.h"
//Forward Declarations
struct dc_stream_state;
@@ -33,7 +34,8 @@ struct dc_info_packet;
struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet);
+ struct dc_info_packet *info_packet,
+ bool *use_vsc_sdp_for_colorimetry);
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index b45f7d65e76a..fe2117904329 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -45,7 +45,6 @@ enum vrr_packet_type {
PACKET_TYPE_VTEM
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
union lut3d_control_flags {
unsigned int raw;
struct {
@@ -104,6 +103,5 @@ struct lut3d_settings {
enum lut3d_control_gamut_map map2;
enum lut3d_control_rotation_mode rotation2;
};
-#endif
#endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index db6b08f6d093..6a8a056424b8 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -30,6 +30,20 @@
#include "mod_freesync.h"
#include "dc.h"
+enum vsc_packet_revision {
+ vsc_packet_undefined = 0,
+ //01h = VSC SDP supports only 3D stereo.
+ vsc_packet_rev1 = 1,
+ //02h = 3D stereo + PSR.
+ vsc_packet_rev2 = 2,
+ //03h = 3D stereo + PSR2.
+ vsc_packet_rev3 = 3,
+ //04h = 3D stereo + PSR/PSR2 + Y-coordinate.
+ vsc_packet_rev4 = 4,
+ //05h = 3D stereo + PSR/PSR2 + Y-coordinate + Pixel Encoding/Colorimetry Format
+ vsc_packet_rev5 = 5,
+};
+
#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
#define HF_VSIF_VERSION 1
@@ -116,35 +130,41 @@ enum ColorimetryYCCDP {
};
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet)
+ struct dc_info_packet *info_packet,
+ bool *use_vsc_sdp_for_colorimetry)
{
- unsigned int vscPacketRevision = 0;
+ unsigned int vsc_packet_revision = vsc_packet_undefined;
unsigned int i;
unsigned int pixelEncoding = 0;
unsigned int colorimetryFormat = 0;
bool stereo3dSupport = false;
+ /* Initialize first, later if infopacket is valid determine if VSC SDP
+ * should be used to signal colorimetry format and pixel encoding.
+ */
+ *use_vsc_sdp_for_colorimetry = false;
+
if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
- vscPacketRevision = 1;
+ vsc_packet_revision = vsc_packet_rev1;
stereo3dSupport = true;
}
/*VSC packet set to 2 when DP revision >= 1.2*/
if (stream->psr_version != 0)
- vscPacketRevision = 2;
+ vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
- vscPacketRevision = 5;
+ vsc_packet_revision = vsc_packet_rev5;
/* VSC packet not needed based on the features
* supported by this DP display
*/
- if (vscPacketRevision == 0)
+ if (vsc_packet_revision == vsc_packet_undefined)
return;
- if (vscPacketRevision == 0x2) {
+ if (vsc_packet_revision == vsc_packet_rev2) {
/* Secondary-data Packet ID = 0*/
info_packet->hb0 = 0x00;
/* 07h - Packet Type Value indicating Video
@@ -166,7 +186,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
}
- if (vscPacketRevision == 0x1) {
+ if (vsc_packet_revision == vsc_packet_rev1) {
info_packet->hb0 = 0x00; // Secondary-data Packet ID = 0
info_packet->hb1 = 0x07; // 07h = Packet Type Value indicating Video Stream Configuration packet
@@ -237,7 +257,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
* the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
* MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
*/
- if (vscPacketRevision == 0x5) {
+ if (vsc_packet_revision == vsc_packet_rev5) {
/* Secondary-data Packet ID = 0 */
info_packet->hb0 = 0x00;
/* 07h - Packet Type Value indicating Video Stream Configuration packet */
@@ -249,6 +269,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
+ /* If we are using VSC SDP revision 05h, use this to signal for
+ * colorimetry format and pixel encoding. HW should later be
+ * programmed to set MSA MISC1 bit 6 to indicate ignore
+ * colorimetry format and pixel encoding in the MSA.
+ */
+ *use_vsc_sdp_for_colorimetry = true;
+
/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
* Data Bytes DB 18~16
* Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
@@ -393,7 +420,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
*/
info_packet->sb[18] = 0;
}
-
}
/**
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 4e2f615c3566..e75a4bb94488 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -662,7 +662,11 @@ bool dmcu_load_iram(struct dmcu *dmcu,
memset(&ram_table, 0, sizeof(ram_table));
- if (dmcu->dmcu_version.abm_version == 0x23) {
+ if (dmcu->dmcu_version.abm_version == 0x24) {
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+ result = dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ } else if (dmcu->dmcu_version.abm_version == 0x23) {
fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
result = dmcu->funcs->load_iram(
@@ -687,3 +691,4 @@ bool dmcu_load_iram(struct dmcu *dmcu,
return result;
}
+
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index dc7eb28f0296..d655a76bedc6 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -53,7 +53,8 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_VCE,
AMD_IP_BLOCK_TYPE_ACP,
AMD_IP_BLOCK_TYPE_VCN,
- AMD_IP_BLOCK_TYPE_MES
+ AMD_IP_BLOCK_TYPE_MES,
+ AMD_IP_BLOCK_TYPE_JPEG
};
enum amd_clockgating_state {
@@ -99,6 +100,7 @@ enum amd_powergating_state {
#define AMD_CG_SUPPORT_IH_CG (1 << 27)
#define AMD_CG_SUPPORT_ATHUB_LS (1 << 28)
#define AMD_CG_SUPPORT_ATHUB_MGCG (1 << 29)
+#define AMD_CG_SUPPORT_JPEG_MGCG (1 << 30)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
@@ -117,6 +119,7 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_VCN (1 << 14)
#define AMD_PG_SUPPORT_VCN_DPG (1 << 15)
#define AMD_PG_SUPPORT_ATHUB (1 << 16)
+#define AMD_PG_SUPPORT_JPEG (1 << 17)
enum PP_FEATURE_MASK {
PP_SCLK_DPM_MASK = 0x1,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
index cff8f91555d3..e9b2bd84cfed 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
@@ -8134,6 +8134,10 @@
#define mmMPC_OUT5_CSC_C33_C34_B 0x1604
#define mmMPC_OUT5_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_INDEX 0x163b
+#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA 0x163c
// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
// base address: 0x5964
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
index 10c83fecd147..dc8ce7aaa0cf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
@@ -28263,7 +28263,14 @@
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
-
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MPC_OCSC_TEST_DEBUG_DATA
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
//DC_PERFMON17_PERFCOUNTER_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
index eddf83ec1c39..7cd0ee61c030 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
@@ -7103,7 +7103,10 @@
#define mmMPC_OUT3_CSC_C31_C32_B_BASE_IDX 2
#define mmMPC_OUT3_CSC_C33_C34_B 0x15ea
#define mmMPC_OUT3_CSC_C33_C34_B_BASE_IDX 2
-
+#define mmMPC_OCSC_TEST_DEBUG_INDEX 0x163b
+#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA 0x163c
// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
// base address: 0x5964
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
index faa0e76e32b4..2f780aefc722 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
@@ -56634,5 +56634,13 @@
#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MPC_OCSC_TEST_DEBUG_DATA
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index c2bd25589e84..87c84691b5be 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -27,6 +27,9 @@
#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
+#define mmDF_CS_UMC_AON0_DfGlobalCtrl 0x00fe
+#define mmDF_CS_UMC_AON0_DfGlobalCtrl_BASE_IDX 0
+
#define mmDF_CS_UMC_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX 0
@@ -38,6 +41,14 @@
#define smnPerfMonCtlHi2 0x01d464UL
#define smnPerfMonCtlLo3 0x01d470UL
#define smnPerfMonCtlHi3 0x01d474UL
+#define smnPerfMonCtlLo4 0x01d880UL
+#define smnPerfMonCtlHi4 0x01d884UL
+#define smnPerfMonCtlLo5 0x01d888UL
+#define smnPerfMonCtlHi5 0x01d88cUL
+#define smnPerfMonCtlLo6 0x01d890UL
+#define smnPerfMonCtlHi6 0x01d894UL
+#define smnPerfMonCtlLo7 0x01d898UL
+#define smnPerfMonCtlHi7 0x01d89cUL
#define smnPerfMonCtrLo0 0x01d448UL
#define smnPerfMonCtrHi0 0x01d44cUL
@@ -47,6 +58,14 @@
#define smnPerfMonCtrHi2 0x01d46cUL
#define smnPerfMonCtrLo3 0x01d478UL
#define smnPerfMonCtrHi3 0x01d47cUL
+#define smnPerfMonCtrLo4 0x01d790UL
+#define smnPerfMonCtrHi4 0x01d794UL
+#define smnPerfMonCtrLo5 0x01d798UL
+#define smnPerfMonCtrHi5 0x01d79cUL
+#define smnPerfMonCtrLo6 0x01d7a0UL
+#define smnPerfMonCtrHi6 0x01d7a4UL
+#define smnPerfMonCtrLo7 0x01d7a8UL
+#define smnPerfMonCtrHi7 0x01d7acUL
#define smnDF_PIE_AON_FabricIndirectConfigAccessAddress3 0x1d05cUL
#define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
index 06fac509e987..65e9f756e86e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -33,6 +33,14 @@
#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
+/* DF_CS_UMC_AON0_DfGlobalCtrl */
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K__SHIFT 0x14
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M__SHIFT 0x15
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G__SHIFT 0x16
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K_MASK 0x00100000L
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M_MASK 0x00200000L
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G_MASK 0x00400000L
+
/* DF_CS_AON0_DramBaseAddress0 */
#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
new file mode 100644
index 000000000000..36ae5b7fe88e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
@@ -0,0 +1,647 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_0_0_OFFSET_HEADER
+#define _dpcs_2_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+// base address: 0x0
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929
+#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+// base address: 0x0
+#define mmRDPCSTX0_RDPCSTX_CNTL 0x2930
+#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL 0x2936
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL 0x2937
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2 0x2938
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2939
+#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0 0x294f
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1 0x2950
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2 0x2951
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3 0x2952
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL 0x2953
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2954
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2955
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG 0x2956
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+// base address: 0x0
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR 0x2934
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA 0x2935
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+// base address: 0x360
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01
+#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+// base address: 0x360
+#define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08
+#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL 0x2a0e
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL 0x2a0f
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2 0x2a10
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a11
+#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0 0x2a27
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1 0x2a28
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2 0x2a29
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3 0x2a2a
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL 0x2a2b
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2a2c
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2a2d
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG 0x2a2e
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+// base address: 0x360
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR 0x2a0c
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA 0x2a0d
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+// base address: 0x6c0
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL 0x2ad8
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_TX_CNTL 0x2ad9
+#define mmDPCSTX2_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL 0x2ada
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL 0x2adb
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR 0x2adc
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+// base address: 0x6c0
+#define mmRDPCSTX2_RDPCSTX_CNTL 0x2ae0
+#define mmRDPCSTX2_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL 0x2ae1
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL 0x2ae2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA 0x2ae3
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR 0x2ae4
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA 0x2ae5
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL 0x2ae6
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL 0x2ae7
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2 0x2ae8
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_SCRATCH 0x2ae9
+#define mmRDPCSTX2_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2 0x2af2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3 0x2af3
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4 0x2af4
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5 0x2af5
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6 0x2af6
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7 0x2af7
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8 0x2af8
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9 0x2af9
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10 0x2afa
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11 0x2afb
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12 0x2afc
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13 0x2afd
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14 0x2afe
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0 0x2aff
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1 0x2b00
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2 0x2b01
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3 0x2b02
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL 0x2b03
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2b04
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2b05
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG 0x2b06
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+// base address: 0x6c0
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR 0x2ae4
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA 0x2ae5
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+// base address: 0xa20
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL 0x2bb0
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_TX_CNTL 0x2bb1
+#define mmDPCSTX3_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL 0x2bb2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL 0x2bb3
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR 0x2bb4
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+// base address: 0xa20
+#define mmRDPCSTX3_RDPCSTX_CNTL 0x2bb8
+#define mmRDPCSTX3_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL 0x2bb9
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL 0x2bba
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA 0x2bbb
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR 0x2bbc
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA 0x2bbd
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL 0x2bbe
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL 0x2bbf
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2 0x2bc0
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_SCRATCH 0x2bc1
+#define mmRDPCSTX3_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2 0x2bca
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3 0x2bcb
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4 0x2bcc
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5 0x2bcd
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6 0x2bce
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7 0x2bcf
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8 0x2bd0
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9 0x2bd1
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10 0x2bd2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11 0x2bd3
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12 0x2bd4
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13 0x2bd5
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14 0x2bd6
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0 0x2bd7
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1 0x2bd8
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2 0x2bd9
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3 0x2bda
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL 0x2bdb
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2bdc
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2bdd
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG 0x2bde
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+// base address: 0xa20
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR 0x2bbc
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA 0x2bbd
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec
+// base address: 0x0
+#define mmDPCSRX_PHY_CNTL 0x2c76
+#define mmDPCSRX_PHY_CNTL_BASE_IDX 2
+#define mmDPCSRX_RX_CLOCK_CNTL 0x2c78
+#define mmDPCSRX_RX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSRX_RX_CNTL 0x2c7a
+#define mmDPCSRX_RX_CNTL_BASE_IDX 2
+#define mmDPCSRX_CBUS_CNTL 0x2c7b
+#define mmDPCSRX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSRX_REG_ERROR_STATUS 0x2c7c
+#define mmDPCSRX_REG_ERROR_STATUS_BASE_IDX 2
+#define mmDPCSRX_RX_ERROR_STATUS 0x2c7d
+#define mmDPCSRX_RX_ERROR_STATUS_BASE_IDX 2
+#define mmDPCSRX_INDEX_MODE_ADDR 0x2c80
+#define mmDPCSRX_INDEX_MODE_ADDR_BASE_IDX 2
+#define mmDPCSRX_INDEX_MODE_DATA 0x2c81
+#define mmDPCSRX_INDEX_MODE_DATA_BASE_IDX 2
+#define mmDPCSRX_DEBUG_CONFIG 0x2c82
+#define mmDPCSRX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+// base address: 0xd80
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL 0x2c88
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_TX_CNTL 0x2c89
+#define mmDPCSTX4_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL 0x2c8a
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL 0x2c8b
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR 0x2c8c
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+// base address: 0xd80
+#define mmRDPCSTX4_RDPCSTX_CNTL 0x2c90
+#define mmRDPCSTX4_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL 0x2c91
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL 0x2c92
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA 0x2c93
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR 0x2c94
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA 0x2c95
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL 0x2c96
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL 0x2c97
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2 0x2c98
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_SCRATCH 0x2c99
+#define mmRDPCSTX4_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2 0x2ca2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3 0x2ca3
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4 0x2ca4
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5 0x2ca5
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6 0x2ca6
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7 0x2ca7
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8 0x2ca8
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9 0x2ca9
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10 0x2caa
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11 0x2cab
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12 0x2cac
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13 0x2cad
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14 0x2cae
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0 0x2caf
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1 0x2cb0
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2 0x2cb1
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3 0x2cb2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL 0x2cb3
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2cb4
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2cb5
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG 0x2cb6
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+// base address: 0xd80
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR 0x2c94
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA 0x2c95
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec
+// base address: 0x10e0
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL 0x2d60
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_TX_CNTL 0x2d61
+#define mmDPCSTX5_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL 0x2d62
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL 0x2d63
+#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR 0x2d64
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x2d65
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x2d66
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
+// base address: 0x10e0
+#define mmRDPCSTX5_RDPCSTX_CNTL 0x2d68
+#define mmRDPCSTX5_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL 0x2d69
+#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL 0x2d6a
+#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA 0x2d6b
+#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_CR_ADDR 0x2d6c
+#define mmRDPCSTX5_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_CR_DATA 0x2d6d
+#define mmRDPCSTX5_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL 0x2d6e
+#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL 0x2d6f
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2 0x2d70
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_SCRATCH 0x2d71
+#define mmRDPCSTX5_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2d74
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG 0x2d75
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0 0x2d78
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1 0x2d79
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2 0x2d7a
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3 0x2d7b
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4 0x2d7c
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5 0x2d7d
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6 0x2d7e
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7 0x2d7f
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8 0x2d80
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9 0x2d81
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10 0x2d82
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11 0x2d83
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12 0x2d84
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13 0x2d85
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14 0x2d86
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0 0x2d87
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1 0x2d88
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2 0x2d89
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3 0x2d8a
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL 0x2d8b
+#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2d8c
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2d8d
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG 0x2d8e
+#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr5_dispdec
+// base address: 0x10e0
+#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR 0x2d6c
+#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA 0x2d6d
+#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA_BASE_IDX 2
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
new file mode 100644
index 000000000000..25e0569e05c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
@@ -0,0 +1,3912 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_0_0_SH_MASK_HEADER
+#define _dpcs_2_0_0_SH_MASK_HEADER
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+//DPCSTX0_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX0_DPCSTX_TX_CNTL
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_CBUS_CNTL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+//RDPCSTX0_RDPCSTX_CNTL
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX0_RDPCS_TX_CR_ADDR
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_CR_DATA
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX0_RDPCSTX_SCRATCH
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX0_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX0_RDPCSTX_PHY_CNTL2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX0_RDPCSTX_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX0_RDPCSTX_PHY_CNTL13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE1
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE2
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE3
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+//DPCSSYS_CR0_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR0_DPCSSYS_CR_DATA
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+//DPCSTX1_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX1_DPCSTX_TX_CNTL
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_CBUS_CNTL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX1_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX1_DPCSTX_DEBUG_CONFIG
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+//RDPCSTX1_RDPCSTX_CNTL
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX1_RDPCS_TX_CR_ADDR
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_CR_DATA
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX1_RDPCSTX_SCRATCH
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX1_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX1_RDPCSTX_PHY_CNTL2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX1_RDPCSTX_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX1_RDPCSTX_PHY_CNTL13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE1
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE2
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE3
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+//DPCSSYS_CR1_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR1_DPCSSYS_CR_DATA
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+//DPCSTX2_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX2_DPCSTX_TX_CNTL
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX2_DPCSTX_CBUS_CNTL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX2_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX2_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX2_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX2_DPCSTX_DEBUG_CONFIG
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+//RDPCSTX2_RDPCSTX_CNTL
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX2_RDPCS_TX_CR_ADDR
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_CR_DATA
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX2_RDPCSTX_SCRATCH
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX2_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX2_RDPCSTX_PHY_CNTL2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX2_RDPCSTX_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX2_RDPCSTX_PHY_CNTL13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE1
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE2
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE3
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+//DPCSSYS_CR2_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR2_DPCSSYS_CR_DATA
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+//DPCSTX3_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX3_DPCSTX_TX_CNTL
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX3_DPCSTX_CBUS_CNTL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX3_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX3_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX3_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX3_DPCSTX_DEBUG_CONFIG
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+//RDPCSTX3_RDPCSTX_CNTL
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX3_RDPCS_TX_CR_ADDR
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_CR_DATA
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX3_RDPCSTX_SCRATCH
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX3_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX3_RDPCSTX_PHY_CNTL2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX3_RDPCSTX_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX3_RDPCSTX_PHY_CNTL13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE1
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE2
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE3
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+//DPCSSYS_CR3_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR3_DPCSSYS_CR_DATA
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec
+//DPCSRX_PHY_CNTL
+#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET__SHIFT 0x0
+#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET_MASK 0x00000001L
+//DPCSRX_RX_CLOCK_CNTL
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS__SHIFT 0x0
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN__SHIFT 0x1
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL__SHIFT 0x2
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON__SHIFT 0x4
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS__SHIFT 0x10
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN__SHIFT 0x11
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON__SHIFT 0x12
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS__SHIFT 0x14
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN__SHIFT 0x15
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON__SHIFT 0x16
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS__SHIFT 0x18
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN__SHIFT 0x19
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON__SHIFT 0x1a
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS__SHIFT 0x1c
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN__SHIFT 0x1d
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON__SHIFT 0x1e
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS_MASK 0x00000001L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN_MASK 0x00000002L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL_MASK 0x0000000CL
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON_MASK 0x00000010L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS_MASK 0x00010000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN_MASK 0x00020000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON_MASK 0x00040000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS_MASK 0x00100000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN_MASK 0x00200000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON_MASK 0x00400000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS_MASK 0x01000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN_MASK 0x02000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON_MASK 0x04000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS_MASK 0x10000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN_MASK 0x20000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON_MASK 0x40000000L
+//DPCSRX_RX_CNTL
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN__SHIFT 0x0
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN__SHIFT 0x1
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN__SHIFT 0x2
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN__SHIFT 0x3
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN__SHIFT 0x4
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START__SHIFT 0x5
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY__SHIFT 0x8
+#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET__SHIFT 0x1f
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN_MASK 0x00000001L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN_MASK 0x00000002L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN_MASK 0x00000004L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN_MASK 0x00000008L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN_MASK 0x00000010L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START_MASK 0x00000020L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY_MASK 0x00000F00L
+#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET_MASK 0x80000000L
+//DPCSRX_CBUS_CNTL
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY__SHIFT 0x8
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x0000000FL
+#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY_MASK 0x0000FF00L
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSRX_REG_ERROR_STATUS
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+//DPCSRX_RX_ERROR_STATUS
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR__SHIFT 0x0
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR__SHIFT 0x1
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR__SHIFT 0x2
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR__SHIFT 0x3
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR__SHIFT 0x8
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK__SHIFT 0xc
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR_MASK 0x00000001L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR_MASK 0x00000002L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR_MASK 0x00000004L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR_MASK 0x00000008L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR_MASK 0x00000100L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK_MASK 0x00001000L
+//DPCSRX_INDEX_MODE_ADDR
+#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR__SHIFT 0x0
+#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR_MASK 0x0003FFFFL
+//DPCSRX_INDEX_MODE_DATA
+#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA__SHIFT 0x0
+#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA_MASK 0xFFFFFFFFL
+//DPCSRX_DEBUG_CONFIG
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL__SHIFT 0x6
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL__SHIFT 0xb
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL_MASK 0x000000C0L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL_MASK 0x00003800L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+//DPCSTX4_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX4_DPCSTX_TX_CNTL
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX4_DPCSTX_CBUS_CNTL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX4_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX4_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX4_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX4_DPCSTX_DEBUG_CONFIG
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+//RDPCSTX4_RDPCSTX_CNTL
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX4_RDPCS_TX_CR_ADDR
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_CR_DATA
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX4_RDPCSTX_SCRATCH
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX4_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX4_RDPCSTX_PHY_CNTL2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX4_RDPCSTX_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX4_RDPCSTX_PHY_CNTL13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE1
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE2
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE3
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+//DPCSSYS_CR4_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR4_DPCSSYS_CR_DATA
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec
+//DPCSTX5_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX5_DPCSTX_TX_CNTL
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX5_DPCSTX_CBUS_CNTL
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX5_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX5_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX5_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX5_DPCSTX_DEBUG_CONFIG
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
+//RDPCSTX5_RDPCSTX_CNTL
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX5_RDPCS_TX_CR_ADDR
+#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCS_TX_CR_DATA
+#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX5_RDPCSTX_SCRATCH
+#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX5_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX5_RDPCSTX_PHY_CNTL2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX5_RDPCSTX_PHY_CNTL3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX5_RDPCSTX_PHY_CNTL9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCSTX_PHY_CNTL11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX5_RDPCSTX_PHY_CNTL13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE1
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE2
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE3
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr5_dispdec
+//DPCSSYS_CR5_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR5_DPCSSYS_CR_DATA
+#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
index 945bb6101a9d..945bb6101a9d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
index 6e039f2208e1..6e039f2208e1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index 2bfaaa8157d0..d984c916df80 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -2224,6 +2224,14 @@
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3 0x0e1a
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4 0x0e25
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5 0x0e26
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6 0x0e27
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7 0x0e28
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX 0
#define mmCOMPUTE_RESTART_X 0x0e1b
#define mmCOMPUTE_RESTART_X_BASE_IDX 0
#define mmCOMPUTE_RESTART_Y 0x0e1c
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index d4c613a85352..c9e3f6d849a8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -8739,10 +8739,16 @@
#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x4
#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x6
#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x9
+#define TCP_ADDR_CONFIG__ENABLE64KHASH__SHIFT 0xb
+#define TCP_ADDR_CONFIG__ENABLE2MHASH__SHIFT 0xc
+#define TCP_ADDR_CONFIG__ENABLE1GHASH__SHIFT 0xd
#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000FL
#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L
#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001C0L
#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L
+#define TCP_ADDR_CONFIG__ENABLE64KHASH_MASK 0x00000800L
+#define TCP_ADDR_CONFIG__ENABLE2MHASH_MASK 0x00001000L
+#define TCP_ADDR_CONFIG__ENABLE1GHASH_MASK 0x00002000L
//TCP_CREDIT
#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0
#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
index 352ffae7a7ca..2c3ce243861a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
@@ -1964,4 +1964,20 @@
#define mmATC_L2_PERFCOUNTER_RSLT_CNTL 0x084a
#define mmATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+/* MMEA */
+#define mmMMEA0_EDC_CNT_VG20 0x0206
+#define mmMMEA0_EDC_CNT_VG20_BASE_IDX 0
+#define mmMMEA0_EDC_CNT2_VG20 0x0207
+#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX 0
+#define mmMMEA1_EDC_CNT_VG20 0x0346
+#define mmMMEA1_EDC_CNT_VG20_BASE_IDX 0
+#define mmMMEA1_EDC_CNT2_VG20 0x0347
+#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX 0
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
index 34278ef2aa1b..198f5f93ed1a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
@@ -10124,4 +10124,126 @@
#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA0_EDC_CNT
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA0_EDC_CNT2
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+//MMEA1_EDC_CNT
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA1_EDC_CNT2
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
deleted file mode 100644
index f2ae3a58949e..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _mmhub_9_4_0_OFFSET_HEADER
-#define _mmhub_9_4_0_OFFSET_HEADER
-
-/* MMEA */
-#define mmMMEA0_SDP_ARB_FINAL_VG20 0x01ee
-#define mmMMEA0_SDP_ARB_FINAL_VG20_BASE_IDX 0
-#define mmMMEA0_EDC_CNT_VG20 0x0206
-#define mmMMEA0_EDC_CNT_VG20_BASE_IDX 0
-#define mmMMEA0_EDC_CNT2_VG20 0x0207
-#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX 0
-#define mmMMEA0_EDC_MODE_VG20 0x0210
-#define mmMMEA0_EDC_MODE_VG20_BASE_IDX 0
-#define mmMMEA0_ERR_STATUS_VG20 0x0211
-#define mmMMEA0_ERR_STATUS_VG20_BASE_IDX 0
-#define mmMMEA1_SDP_ARB_FINAL_VG20 0x032e
-#define mmMMEA1_SDP_ARB_FINAL_VG20_BASE_IDX 0
-#define mmMMEA1_EDC_CNT_VG20 0x0346
-#define mmMMEA1_EDC_CNT_VG20_BASE_IDX 0
-#define mmMMEA1_EDC_CNT2_VG20 0x0347
-#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX 0
-#define mmMMEA1_EDC_MODE_VG20 0x0350
-#define mmMMEA1_EDC_MODE_VG20_BASE_IDX 0
-#define mmMMEA1_ERR_STATUS_VG20 0x0351
-#define mmMMEA1_ERR_STATUS_VG20_BASE_IDX 0
-
-// addressBlock: mmhub_utcl2_vmsharedpfdec
-// base address: 0x6a040
-#define mmMC_VM_XGMI_LFB_CNTL 0x0823
-#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
-#define mmMC_VM_XGMI_LFB_SIZE 0x0824
-#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
deleted file mode 100644
index c24259ed12a1..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (C) 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _mmhub_9_4_0_SH_MASK_HEADER
-#define _mmhub_9_4_0_SH_MASK_HEADER
-
-//MMEA0_SDP_ARB_FINAL
-#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
-#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
-#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
-#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
-#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
-#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
-#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
-#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
-#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
-#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
-#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
-#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
-#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
-//MMEA0_EDC_CNT
-#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
-#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
-#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
-#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
-#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
-#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
-#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
-#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
-#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
-#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
-#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
-#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
-#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
-#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
-#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
-#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
-#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
-#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
-#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
-#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
-#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
-#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
-#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
-#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
-#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
-#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
-#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
-#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
-#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
-#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
-//MMEA0_EDC_CNT2
-#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
-#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
-#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
-#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
-#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
-#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
-#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
-#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
-#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
-#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
-#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
-#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
-#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
-#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
-#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
-#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
-#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
-#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
-#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
-#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
-#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
-#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
-#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
-#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
-//MMEA0_EDC_MODE
-#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
-#define MMEA0_EDC_MODE__GATE_FUE__SHIFT 0x11
-#define MMEA0_EDC_MODE__DED_MODE__SHIFT 0x14
-#define MMEA0_EDC_MODE__PROP_FED__SHIFT 0x1d
-#define MMEA0_EDC_MODE__BYPASS__SHIFT 0x1f
-#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
-#define MMEA0_EDC_MODE__GATE_FUE_MASK 0x00020000L
-#define MMEA0_EDC_MODE__DED_MODE_MASK 0x00300000L
-#define MMEA0_EDC_MODE__PROP_FED_MASK 0x20000000L
-#define MMEA0_EDC_MODE__BYPASS_MASK 0x80000000L
-//MMEA0_ERR_STATUS
-#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
-#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
-#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
-#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
-#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
-#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
-#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT 0xd
-#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
-#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
-#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
-#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
-#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
-#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
-#define MMEA0_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
-//MMEA1_SDP_ARB_FINAL
-#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
-#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
-#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
-#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
-#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
-#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
-#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
-#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
-#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
-#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
-#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
-#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
-#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
-//MMEA1_EDC_CNT
-#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
-#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
-#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
-#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
-#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
-#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
-#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
-#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
-#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
-#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
-#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
-#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
-#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
-#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
-#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
-#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
-#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
-#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
-#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
-#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
-#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
-#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
-#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
-#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
-#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
-#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
-#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
-#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
-#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
-#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
-//MMEA1_EDC_CNT2
-#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
-#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
-#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
-#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
-#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
-#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
-#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
-#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
-#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
-#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
-#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
-#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
-#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
-#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
-#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
-#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
-#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
-#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
-#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
-#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
-#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
-#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
-#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
-#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
-//MMEA1_EDC_MODE
-#define MMEA1_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
-#define MMEA1_EDC_MODE__GATE_FUE__SHIFT 0x11
-#define MMEA1_EDC_MODE__DED_MODE__SHIFT 0x14
-#define MMEA1_EDC_MODE__PROP_FED__SHIFT 0x1d
-#define MMEA1_EDC_MODE__BYPASS__SHIFT 0x1f
-#define MMEA1_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
-#define MMEA1_EDC_MODE__GATE_FUE_MASK 0x00020000L
-#define MMEA1_EDC_MODE__DED_MODE_MASK 0x00300000L
-#define MMEA1_EDC_MODE__PROP_FED_MASK 0x20000000L
-#define MMEA1_EDC_MODE__BYPASS_MASK 0x80000000L
-//MMEA1_ERR_STATUS
-#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
-#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
-#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
-#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
-#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
-#define MMEA1_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
-#define MMEA1_ERR_STATUS__FUE_FLAG__SHIFT 0xd
-#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
-#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
-#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
-#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
-#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
-#define MMEA1_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
-#define MMEA1_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
-
-// addressBlock: mmhub_utcl2_vmsharedpfdec
-//MC_VM_XGMI_LFB_CNTL
-#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
-#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
-#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
-#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
-//MC_VM_XGMI_LFB_SIZE
-#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
-#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
index 043aa695d63f..0d6b594be775 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
@@ -27,5 +27,7 @@
#define mmUMCCH0_0_EccErrCnt_BASE_IDX 0
#define mmMCA_UMC_UMC0_MCUMC_STATUST0 0x03c2
#define mmMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 0
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 0
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
new file mode 100644
index 000000000000..ce005c674a18
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_2_OFFSET_HEADER
+#define _umc_6_1_2_OFFSET_HEADER
+
+#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360
+#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1
+#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361
+#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT 0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h
new file mode 100644
index 000000000000..a5a8c993ec3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_2_SH_MASK_HEADER
+#define _umc_6_1_2_SH_MASK_HEADER
+
+//UMCCH0_0_EccErrCntSel_ARCT
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel__SHIFT 0x0
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt__SHIFT 0xc
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn__SHIFT 0xf
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel_MASK 0x0000000FL
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt_MASK 0x00003000L
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn_MASK 0x00008000L
+//UMCCH0_0_EccErrCnt_ARCT
+#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt__SHIFT 0x0
+#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt_MASK 0x0000FFFFL
+//MCA_UMC_UMC0_MCUMC_STATUST0_ARCT
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt__SHIFT 0x10
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0__SHIFT 0x16
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId__SHIFT 0x20
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1__SHIFT 0x26
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub__SHIFT 0x28
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2__SHIFT 0x29
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison__SHIFT 0x2b
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred__SHIFT 0x2c
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC__SHIFT 0x2d
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC__SHIFT 0x2e
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3__SHIFT 0x2f
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent__SHIFT 0x34
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV__SHIFT 0x35
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4__SHIFT 0x36
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC__SHIFT 0x37
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC__SHIFT 0x39
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV__SHIFT 0x3a
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV__SHIFT 0x3b
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En__SHIFT 0x3c
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC__SHIFT 0x3d
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val__SHIFT 0x3f
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode_MASK 0x000000000000FFFFL
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt_MASK 0x00000000003F0000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0_MASK 0x00000000FFC00000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId_MASK 0x0000003F00000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1_MASK 0x000000C000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub_MASK 0x0000010000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2_MASK 0x0000060000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison_MASK 0x0000080000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred_MASK 0x0000100000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC_MASK 0x0000200000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC_MASK 0x0000400000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3_MASK 0x000F800000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent_MASK 0x0010000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV_MASK 0x0020000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4_MASK 0x0040000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC_MASK 0x0080000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal_MASK 0x0100000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC_MASK 0x0200000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV_MASK 0x0400000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV_MASK 0x0800000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En_MASK 0x1000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC_MASK 0x2000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow_MASK 0x4000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val_MASK 0x8000000000000000L
+//MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB_MASK 0x3F00000000000000L
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved_MASK 0xC000000000000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index dd7cbc00a0aa..70146518174c 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -672,20 +672,6 @@ struct vram_usagebyfirmware_v2_1
uint16_t used_by_driver_in_kb;
};
-/* This is part of vram_usagebyfirmware_v2_1 */
-struct vram_reserve_block
-{
- uint32_t start_address_in_kb;
- uint16_t used_by_firmware_in_kb;
- uint16_t used_by_driver_in_kb;
-};
-
-/* Definitions for constance */
-enum atomfirmware_internal_constants
-{
- ONE_KiB = 0x400,
- ONE_MiB = 0x100000,
-};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 2cd217e60125..a607b1034962 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -256,6 +256,10 @@ struct kfd2kgd_calls {
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
+ int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off);
+
int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm);
@@ -307,8 +311,6 @@ struct kfd2kgd_calls {
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
- int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
- int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7932eb163a00..c195575366a3 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -48,7 +48,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
- hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->smu_lock);
hwmgr->chip_family = adev->family;
@@ -928,9 +927,12 @@ static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr)
return -EINVAL;
+ if (!hwmgr->pm_en)
+ return 0;
+
if (hwmgr->hwmgr_func->set_mp1_state)
return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index a23729d3174b..99469479e277 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -356,6 +356,35 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
}
+int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min_value, uint32_t *max_value)
+{
+ int ret = 0;
+ uint32_t level_count = 0;
+
+ if (!min_value && !max_value)
+ return -EINVAL;
+
+ if (min_value) {
+ /* by default, level 0 clock value as min value */
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
+ if (ret)
+ return ret;
+ }
+
+ if (max_value) {
+ ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
{
enum smu_feature_mask feature_id = 0;
@@ -404,10 +433,10 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
- ret = smu_dpm_set_uvd_enable(smu, gate);
+ ret = smu_dpm_set_uvd_enable(smu, !gate);
break;
case AMD_IP_BLOCK_TYPE_VCE:
- ret = smu_dpm_set_vce_enable(smu, gate);
+ ret = smu_dpm_set_vce_enable(smu, !gate);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = smu_gfx_off_control(smu, gate);
@@ -415,6 +444,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
case AMD_IP_BLOCK_TYPE_SDMA:
ret = smu_powergate_sdma(smu, gate);
break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ ret = smu_dpm_set_jpeg_enable(smu, !gate);
+ break;
default:
break;
}
@@ -487,26 +519,25 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
- struct smu_table *table = NULL;
- int ret = 0;
+ struct smu_table *table = &smu_table->driver_table;
int table_id = smu_table_get_index(smu, table_index);
+ uint32_t table_size;
+ int ret = 0;
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
- table = &smu_table->tables[table_index];
+ table_size = smu_table->tables[table_index].size;
- if (drv2smu)
- memcpy(table->cpu_addr, table_data, table->size);
+ if (drv2smu) {
+ memcpy(table->cpu_addr, table_data, table_size);
+ /*
+ * Flush hdp cache: to guard the content seen by
+ * GPU is consitent with CPU.
+ */
+ amdgpu_asic_flush_hdp(adev, NULL);
+ }
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(table->mc_address));
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(table->mc_address));
- if (ret)
- return ret;
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
@@ -514,11 +545,10 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
if (ret)
return ret;
- /* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
-
- if (!drv2smu)
- memcpy(table_data, table->cpu_addr, table->size);
+ if (!drv2smu) {
+ amdgpu_asic_flush_hdp(adev, NULL);
+ memcpy(table_data, table->cpu_addr, table_size);
+ }
return ret;
}
@@ -527,9 +557,12 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
{
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
- else if (adev->asic_type >= CHIP_ARCTURUS)
- return true;
- else
+ else if (adev->asic_type >= CHIP_ARCTURUS) {
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ return false;
+ else
+ return true;
+ } else
return false;
}
@@ -637,12 +670,11 @@ int smu_feature_init_dpm(struct smu_context *smu)
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
- struct amdgpu_device *adev = smu->adev;
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
int ret = 0;
- if (adev->flags & AMD_IS_APU)
+ if (smu->is_apu)
return 1;
feature_id = smu_feature_get_index(smu, mask);
@@ -942,32 +974,56 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ uint32_t max_table_size = 0;
int ret, i;
- for (i = 0; i < SMU_TABLE_COUNT; i++) {
- if (tables[i].size == 0)
- continue;
+ /* VRAM allocation for tool table */
+ if (tables[SMU_TABLE_PMSTATUSLOG].size) {
ret = amdgpu_bo_create_kernel(adev,
- tables[i].size,
- tables[i].align,
- tables[i].domain,
- &tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
- if (ret)
- goto failed;
+ tables[SMU_TABLE_PMSTATUSLOG].size,
+ tables[SMU_TABLE_PMSTATUSLOG].align,
+ tables[SMU_TABLE_PMSTATUSLOG].domain,
+ &tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+ if (ret) {
+ pr_err("VRAM allocation for tool table failed!\n");
+ return ret;
+ }
}
- return 0;
-failed:
- while (--i >= 0) {
+ /* VRAM allocation for driver table */
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
- amdgpu_bo_free_kernel(&tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
+ if (i == SMU_TABLE_PMSTATUSLOG)
+ continue;
+
+ if (max_table_size < tables[i].size)
+ max_table_size = tables[i].size;
+ }
+
+ driver_table->size = max_table_size;
+ driver_table->align = PAGE_SIZE;
+ driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ driver_table->size,
+ driver_table->align,
+ driver_table->domain,
+ &driver_table->bo,
+ &driver_table->mc_address,
+ &driver_table->cpu_addr);
+ if (ret) {
+ pr_err("VRAM allocation for driver table failed!\n");
+ if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+ amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
}
+
return ret;
}
@@ -975,18 +1031,19 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t i = 0;
+ struct smu_table *driver_table = &(smu_table->driver_table);
if (!tables)
return 0;
- for (i = 0; i < SMU_TABLE_COUNT; i++) {
- if (tables[i].size == 0)
- continue;
- amdgpu_bo_free_kernel(&tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
- }
+ if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+ amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+
+ amdgpu_bo_free_kernel(&driver_table->bo,
+ &driver_table->mc_address,
+ &driver_table->cpu_addr);
return 0;
}
@@ -1056,28 +1113,31 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
}
/* smu_dump_pptable(smu); */
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = smu_set_driver_table_location(smu);
+ if (ret)
+ return ret;
- /*
- * Copy pptable bo in the vram to smc with SMU MSGs such as
- * SetDriverDramAddr and TransferTableDram2Smu.
- */
- ret = smu_write_pptable(smu);
- if (ret)
- return ret;
-
- /* issue Run*Btc msg */
- ret = smu_run_btc(smu);
- if (ret)
- return ret;
-
- ret = smu_feature_set_allowed_mask(smu);
- if (ret)
- return ret;
+ /*
+ * Copy pptable bo in the vram to smc with SMU MSGs such as
+ * SetDriverDramAddr and TransferTableDram2Smu.
+ */
+ ret = smu_write_pptable(smu);
+ if (ret)
+ return ret;
- ret = smu_system_features_control(smu, true);
- if (ret)
- return ret;
+ /* issue Run*Btc msg */
+ ret = smu_run_btc(smu);
+ if (ret)
+ return ret;
+ ret = smu_feature_set_allowed_mask(smu);
+ if (ret)
+ return ret;
+ ret = smu_system_features_control(smu, true);
+ if (ret)
+ return ret;
+ }
if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_notify_display_change(smu);
if (ret)
@@ -1130,8 +1190,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
/*
* Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
*/
- ret = smu_set_tool_table_location(smu);
-
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = smu_set_tool_table_location(smu);
+ }
if (!smu_is_dpm_running(smu))
pr_info("dpm has been disabled\n");
@@ -1187,10 +1248,9 @@ static int smu_free_memory_pool(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
- int ret = 0;
if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
- return ret;
+ return 0;
amdgpu_bo_free_kernel(&memory_pool->bo,
&memory_pool->mc_address,
@@ -1198,7 +1258,7 @@ static int smu_free_memory_pool(struct smu_context *smu)
memset(memory_pool, 0, sizeof(struct smu_table));
- return ret;
+ return 0;
}
static int smu_start_smc_engine(struct smu_context *smu)
@@ -1237,12 +1297,16 @@ static int smu_hw_init(void *handle)
return ret;
}
- if (adev->flags & AMD_IS_APU) {
+ if (smu->is_apu) {
smu_powergate_sdma(&adev->smu, false);
smu_powergate_vcn(&adev->smu, false);
+ smu_powergate_jpeg(&adev->smu, false);
smu_set_gfx_cgpg(&adev->smu, true);
}
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
if (!smu->pm_enabled)
return 0;
@@ -1285,7 +1349,7 @@ failed:
static int smu_stop_dpms(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+ return smu_system_features_control(smu, false);
}
static int smu_hw_fini(void *handle)
@@ -1295,36 +1359,45 @@ static int smu_hw_fini(void *handle)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
- if (adev->flags & AMD_IS_APU) {
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ if (smu->is_apu) {
smu_powergate_sdma(&adev->smu, true);
smu_powergate_vcn(&adev->smu, true);
+ smu_powergate_jpeg(&adev->smu, true);
}
- ret = smu_stop_thermal_control(smu);
- if (ret) {
- pr_warn("Fail to stop thermal control!\n");
- return ret;
- }
+ if (!smu->pm_enabled)
+ return 0;
- /*
- * For custom pptable uploading, skip the DPM features
- * disable process on Navi1x ASICs.
- * - As the gfx related features are under control of
- * RLC on those ASICs. RLC reinitialization will be
- * needed to reenable them. That will cost much more
- * efforts.
- *
- * - SMU firmware can handle the DPM reenablement
- * properly.
- */
- if (!smu->uploading_custom_pp_table ||
- !((adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_NAVI12))) {
- ret = smu_stop_dpms(smu);
+ if (!amdgpu_sriov_vf(adev)){
+ ret = smu_stop_thermal_control(smu);
if (ret) {
- pr_warn("Fail to stop Dpms!\n");
+ pr_warn("Fail to stop thermal control!\n");
return ret;
}
+
+ /*
+ * For custom pptable uploading, skip the DPM features
+ * disable process on Navi1x ASICs.
+ * - As the gfx related features are under control of
+ * RLC on those ASICs. RLC reinitialization will be
+ * needed to reenable them. That will cost much more
+ * efforts.
+ *
+ * - SMU firmware can handle the DPM reenablement
+ * properly.
+ */
+ if (!smu->uploading_custom_pp_table ||
+ !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
+ }
}
kfree(table_context->driver_pptable);
@@ -1370,14 +1443,17 @@ static int smu_suspend(void *handle)
struct smu_context *smu = &adev->smu;
bool baco_feature_is_enabled = false;
- if(!(adev->flags & AMD_IS_APU))
+ if (!smu->pm_enabled)
+ return 0;
+
+ if(!smu->is_apu)
baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
return ret;
- if (adev->in_gpu_reset && baco_feature_is_enabled) {
+ if (baco_feature_is_enabled) {
ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
if (ret) {
pr_warn("set BACO feature enabled failed, return %d\n", ret);
@@ -1402,6 +1478,12 @@ static int smu_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ if (!smu->pm_enabled)
+ return 0;
+
pr_info("SMU is resuming...\n");
ret = smu_start_smc_engine(smu);
@@ -1600,43 +1682,6 @@ static int smu_enable_umd_pstate(void *handle,
return 0;
}
-static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
-{
- int ret = 0;
- uint32_t sclk_mask, mclk_mask, soc_mask;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = smu_force_dpm_limit_value(smu, true);
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = smu_force_dpm_limit_value(smu, false);
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = smu_unforce_dpm_levels(smu);
- break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu_get_profiling_clk_mask(smu, level,
- &sclk_mask,
- &mclk_mask,
- &soc_mask);
- if (ret)
- return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
- break;
- case AMD_DPM_FORCED_LEVEL_MANUAL:
- case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
- default:
- break;
- }
- return ret;
-}
-
int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings)
@@ -1664,7 +1709,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (!skip_display_settings) {
- ret = smu_notify_smc_dispaly_config(smu);
+ ret = smu_notify_smc_display_config(smu);
if (ret) {
pr_err("Failed to notify smc display config!");
return ret;
@@ -1674,11 +1719,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
- ret = smu_default_set_performance_level(smu, level);
- if (ret) {
- pr_err("Failed to set performance level!");
- return ret;
- }
+ pr_err("Failed to set performance level!");
+ return ret;
}
/* update the saved copy */
@@ -1920,27 +1962,25 @@ int smu_set_df_cstate(struct smu_context *smu,
int smu_write_watermarks_table(struct smu_context *smu)
{
- int ret = 0;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+ void *watermarks_table = smu->smu_table.watermarks_table;
- if (!table->cpu_addr)
+ if (!watermarks_table)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+ return smu_update_table(smu,
+ SMU_TABLE_WATERMARKS,
+ 0,
+ watermarks_table,
true);
-
- return ret;
}
int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
- int ret = 0;
- struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
- void *table = watermarks->cpu_addr;
+ void *table = smu->smu_table.watermarks_table;
+
+ if (!table)
+ return -EINVAL;
mutex_lock(&smu->mutex);
@@ -1954,7 +1994,7 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
mutex_unlock(&smu->mutex);
- return ret;
+ return 0;
}
const struct amd_ip_funcs smu_ip_funcs = {
@@ -2279,13 +2319,9 @@ int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_active_display_count)
ret = smu->ppt_funcs->set_active_display_count(smu, count);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2432,7 +2468,7 @@ bool smu_baco_is_support(struct smu_context *smu)
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->baco_is_support)
+ if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
ret = smu->ppt_funcs->baco_is_support(smu);
mutex_unlock(&smu->mutex);
@@ -2452,14 +2488,28 @@ int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
return 0;
}
-int smu_baco_reset(struct smu_context *smu)
+int smu_baco_enter(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_enter)
+ ret = smu->ppt_funcs->baco_enter(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_baco_exit(struct smu_context *smu)
{
int ret = 0;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->baco_reset)
- ret = smu->ppt_funcs->baco_reset(smu);
+ if (smu->ppt_funcs->baco_exit)
+ ret = smu->ppt_funcs->baco_exit(smu);
mutex_unlock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 472e9fed411a..14ba6aa876e2 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -179,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(OVERDRIVE),
TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ACTIVITY_MONITOR_COEFF),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -280,10 +281,8 @@ static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER
return -EINVAL;
mapping = arcturus_workload_map[profile];
- if (!(mapping.valid_mapping)) {
- pr_warn("Unsupported SMU power source: %d\n", profile);
+ if (!(mapping.valid_mapping))
return -EINVAL;
- }
return mapping.map_to;
}
@@ -304,6 +303,10 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
@@ -495,6 +498,7 @@ static int arcturus_store_powerplay_table(struct smu_context *smu)
{
struct smu_11_0_powerplay_table *powerplay_table = NULL;
struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
int ret = 0;
if (!table_context->power_play_table)
@@ -507,6 +511,12 @@ static int arcturus_store_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
+ mutex_lock(&smu_baco->mutex);
+ if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
+ powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
+ smu_baco->platform_support = true;
+ mutex_unlock(&smu_baco->mutex);
+
return ret;
}
@@ -1308,6 +1318,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
static int arcturus_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
+ DpmActivityMonitorCoeffInt_t activity_monitor;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
@@ -1317,14 +1329,35 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
"COMPUTE",
"CUSTOM"};
static const char *title[] = {
- "PROFILE_INDEX(NAME)"};
+ "PROFILE_INDEX(NAME)",
+ "CLOCK_TYPE(NAME)",
+ "FPS",
+ "UseRlcBusy",
+ "MinActiveFreqType",
+ "MinActiveFreq",
+ "BoosterFreqType",
+ "BoosterFreq",
+ "PD_Data_limit_c",
+ "PD_Data_error_coeff",
+ "PD_Data_error_rate_coeff"};
uint32_t i, size = 0;
int16_t workload_type = 0;
+ int result = 0;
+ uint32_t smu_version;
- if (!smu->pm_enabled || !buf)
+ if (!buf)
return -EINVAL;
- size += sprintf(buf + size, "%16s\n",
+ result = smu_get_smc_version(smu, NULL, &smu_version);
+ if (result)
+ return result;
+
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev))
+ size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ title[0], title[1], title[2], title[3], title[4], title[5],
+ title[6], title[7], title[8], title[9], title[10]);
+ else
+ size += sprintf(buf + size, "%16s\n",
title[0]);
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -1336,8 +1369,50 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
continue;
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) {
+ result = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ workload_type,
+ (void *)(&activity_monitor),
+ false);
+ if (result) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return result;
+ }
+ }
+
size += sprintf(buf + size, "%2d %14s%s\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) {
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 0,
+ "GFXCLK",
+ activity_monitor.Gfx_FPS,
+ activity_monitor.Gfx_UseRlcBusy,
+ activity_monitor.Gfx_MinActiveFreqType,
+ activity_monitor.Gfx_MinActiveFreq,
+ activity_monitor.Gfx_BoosterFreqType,
+ activity_monitor.Gfx_BoosterFreq,
+ activity_monitor.Gfx_PD_Data_limit_c,
+ activity_monitor.Gfx_PD_Data_error_coeff,
+ activity_monitor.Gfx_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 1,
+ "UCLK",
+ activity_monitor.Mem_FPS,
+ activity_monitor.Mem_UseRlcBusy,
+ activity_monitor.Mem_MinActiveFreqType,
+ activity_monitor.Mem_MinActiveFreq,
+ activity_monitor.Mem_BoosterFreqType,
+ activity_monitor.Mem_BoosterFreq,
+ activity_monitor.Mem_PD_Data_limit_c,
+ activity_monitor.Mem_PD_Data_error_coeff,
+ activity_monitor.Mem_PD_Data_error_rate_coeff);
+ }
}
return size;
@@ -1347,18 +1422,69 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
long *input,
uint32_t size)
{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
int workload_type = 0;
uint32_t profile_mode = input[size];
int ret = 0;
-
- if (!smu->pm_enabled)
- return -EINVAL;
+ uint32_t smu_version;
if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
pr_err("Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
+ (smu_version >=0x360d00)) {
+ ret = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ false);
+ if (ret) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ switch (input[0]) {
+ case 0: /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[1];
+ activity_monitor.Gfx_UseRlcBusy = input[2];
+ activity_monitor.Gfx_MinActiveFreqType = input[3];
+ activity_monitor.Gfx_MinActiveFreq = input[4];
+ activity_monitor.Gfx_BoosterFreqType = input[5];
+ activity_monitor.Gfx_BoosterFreq = input[6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 1: /* Uclk */
+ activity_monitor.Mem_FPS = input[1];
+ activity_monitor.Mem_UseRlcBusy = input[2];
+ activity_monitor.Mem_MinActiveFreqType = input[3];
+ activity_monitor.Mem_MinActiveFreq = input[4];
+ activity_monitor.Mem_BoosterFreqType = input[5];
+ activity_monitor.Mem_BoosterFreq = input[6];
+ activity_monitor.Mem_PD_Data_limit_c = input[7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+ break;
+ }
+
+ ret = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ true);
+ if (ret) {
+ pr_err("[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+ }
+
/*
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on arcturus.
@@ -1897,7 +2023,7 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
SwI2cRequest_t req;
struct amdgpu_device *adev = to_amdgpu_device(control);
struct smu_table_context *smu_table = &adev->smu.smu_table;
- struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+ struct smu_table *table = &smu_table->driver_table;
memset(&req, 0, sizeof(req));
arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
@@ -2051,8 +2177,12 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_context *smu = &adev->smu;
int res;
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
@@ -2068,6 +2198,12 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->pm_enabled)
+ return;
+
i2c_del_adapter(control);
}
@@ -2112,6 +2248,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
+ .set_performance_level = smu_v11_0_set_performance_level,
/* debug (internal used) */
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
@@ -2135,6 +2272,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
@@ -2163,7 +2301,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.baco_is_support= smu_v11_0_baco_is_support,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
- .baco_reset = smu_v11_0_baco_reset,
+ .baco_enter = smu_v11_0_baco_enter,
+ .baco_exit = smu_v11_0_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index cc57fb953e62..9454ab50f9a1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -81,8 +81,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
adev = hwmgr->adev;
/* Skip for suspend/resume case */
- if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
- && adev->in_suspend) {
+ if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
+ && !amdgpu_passthrough(adev) && adev->in_suspend) {
pr_info("dpm has been enabled\n");
return 0;
}
@@ -99,6 +99,9 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (!smum_is_dpm_running(hwmgr)) {
pr_info("dpm has been disabled\n");
return 0;
@@ -200,6 +203,9 @@ int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
return -EINVAL;
@@ -237,6 +243,9 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
TEMP_RANGE_MAX};
struct amdgpu_device *adev = hwmgr->adev;
+ if (!hwmgr->not_vf)
+ return 0;
+
if (hwmgr->hwmgr_func->get_thermal_temperature_range)
hwmgr->hwmgr_func->get_thermal_temperature_range(
hwmgr, &range);
@@ -263,6 +272,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
+ if (hwmgr->pp_one_vf)
+ return false;
if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
return false;
@@ -482,6 +493,9 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index d2909c91d65b..f48fdc7f0382 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -221,6 +221,9 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
{
int ret = 0;
+ hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev);
+ hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
+ ? true : false;
if (!hwmgr->pm_en)
return 0;
@@ -279,7 +282,7 @@ err:
int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
{
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
return 0;
phm_stop_thermal_controller(hwmgr);
@@ -299,7 +302,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
return 0;
phm_disable_smc_firmware_ctf(hwmgr);
@@ -321,7 +324,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en)
+ if (!hwmgr->not_vf || !hwmgr->pm_en)
return 0;
ret = phm_setup_asic(hwmgr);
@@ -365,6 +368,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ if (!hwmgr->not_vf)
+ return ret;
ret = phm_pre_display_configuration_changed(hwmgr);
if (ret)
return ret;
@@ -381,6 +386,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
enum PP_StateUILabel requested_ui_label;
struct pp_power_state *requested_ps = NULL;
+ if (!hwmgr->not_vf)
+ return ret;
if (user_state == NULL) {
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 6bf48934fdc4..31a32a79cfc2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -262,20 +262,22 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
uint32_t index;
long workload;
- if (!skip_display_settings)
- phm_display_configuration_changed(hwmgr);
-
- if (hwmgr->ps)
- power_state_management(hwmgr, new_ps);
- else
- /*
- * for vega12/vega20 which does not support power state manager
- * DAL clock limits should also be honoured
- */
- phm_apply_clock_adjust_rules(hwmgr);
-
- if (!skip_display_settings)
- phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+ if (hwmgr->not_vf) {
+ if (!skip_display_settings)
+ phm_display_configuration_changed(hwmgr);
+
+ if (hwmgr->ps)
+ power_state_management(hwmgr, new_ps);
+ else
+ /*
+ * for vega12/vega20 which does not support power state manager
+ * DAL clock limits should also be honoured
+ */
+ phm_apply_clock_adjust_rules(hwmgr);
+
+ if (!skip_display_settings)
+ phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+ }
if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
hwmgr->dpm_level = hwmgr->request_dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 1115761982a7..4e8ab139bb3b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1151,12 +1151,11 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
struct smu10_hwmgr *data = hwmgr->backend;
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
Watermarks_t *table = &(data->water_marks_table);
- int result = 0;
smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
data->water_marks_exist = true;
- return result;
+ return 0;
}
static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index f73dff68e799..d70abada66bf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4238,7 +4238,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vbios_version;
uint32_t tmp;
/* Read MC indirect register offset 0x9F bits [3:0] to see
@@ -4247,7 +4246,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
*/
smu7_get_mc_microcode_version(hwmgr);
- vbios_version = hwmgr->microcode_version_info.MC & 0xf;
data->need_long_memory_training = false;
@@ -4945,7 +4943,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
title[0], title[1], title[2], title[3],
title[4], title[5], title[6], title[7]);
- len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
+ len = ARRAY_SIZE(smu7_profiling);
for (i = 0; i < len; i++) {
if (i == hwmgr->power_profile_mode) {
@@ -5077,13 +5075,11 @@ static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw
PHM_PerformanceLevel *level)
{
const struct smu7_power_state *ps;
- struct smu7_hwmgr *data;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
- data = hwmgr->backend;
ps = cast_const_phw_smu7_power_state(state);
i = index > ps->performance_level_count - 1 ?
@@ -5189,13 +5185,11 @@ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
{
- int ret = 0;
-
hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
if (hwmgr->pp_table_version == PP_TABLE_V0)
hwmgr->pptable_func = &pptable_funcs;
else if (hwmgr->pp_table_version == PP_TABLE_V1)
hwmgr->pptable_func = &pptable_v1_0_funcs;
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d71a492c87a3..92a65e3daff4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -912,6 +912,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number;
+ if (!hwmgr->not_vf)
+ return result;
+
/* Setup default Overdrive Fan control settings */
data->odn_fan_table.target_fan_speed =
hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
@@ -979,6 +982,9 @@ static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
{
+ if (!hwmgr->not_vf)
+ return 0;
+
PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
"Failed to init sclk threshold!",
return -EINVAL);
@@ -2503,6 +2509,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to setup default DPM tables!",
return result);
+ if (!hwmgr->not_vf)
+ return 0;
+
/* initialize ODN table */
if (hwmgr->od_enabled) {
if (odn_table->max_vddc) {
@@ -2826,6 +2835,8 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, feature_mask = 0;
+ if (!hwmgr->not_vf)
+ return 0;
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -2932,61 +2943,73 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = hwmgr->backend;
int tmp_result, result = 0;
- vega10_enable_disable_PCC_limit_feature(hwmgr, true);
-
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
+ if (hwmgr->not_vf) {
+ vega10_enable_disable_PCC_limit_feature(hwmgr, true);
- tmp_result = vega10_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to construct voltage tables!",
- result = tmp_result);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
- tmp_result = vega10_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to initialize SMC table!",
- result = tmp_result);
+ tmp_result = vega10_construct_voltage_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to construct voltage tables!",
+ result = tmp_result);
+ }
- if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
- tmp_result = vega10_enable_thermal_protection(hwmgr);
+ if (hwmgr->not_vf || hwmgr->pp_one_vf) {
+ tmp_result = vega10_init_smc_table(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable thermal protection!",
- result = tmp_result);
+ "Failed to initialize SMC table!",
+ result = tmp_result);
}
- tmp_result = vega10_enable_vrhot_feature(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable VR hot feature!",
- result = tmp_result);
+ if (hwmgr->not_vf) {
+ if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
+ tmp_result = vega10_enable_thermal_protection(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to enable thermal protection!",
+ result = tmp_result);
+ }
- tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable deep sleep master switch!",
- result = tmp_result);
+ tmp_result = vega10_enable_vrhot_feature(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to enable VR hot feature!",
+ result = tmp_result);
- tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to start DPM!", result = tmp_result);
+ tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to enable deep sleep master switch!",
+ result = tmp_result);
+ }
- /* enable didt, do not abort if failed didt */
- tmp_result = vega10_enable_didt_config(hwmgr);
- PP_ASSERT(!tmp_result,
- "Failed to enable didt config!");
+ if (hwmgr->not_vf) {
+ tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to start DPM!", result = tmp_result);
+ }
+
+ if (hwmgr->not_vf) {
+ /* enable didt, do not abort if failed didt */
+ tmp_result = vega10_enable_didt_config(hwmgr);
+ PP_ASSERT(!tmp_result,
+ "Failed to enable didt config!");
+ }
tmp_result = vega10_enable_power_containment(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable power containment!",
- result = tmp_result);
+ "Failed to enable power containment!",
+ result = tmp_result);
- tmp_result = vega10_power_control_set_level(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to power control set level!",
- result = tmp_result);
+ if (hwmgr->not_vf) {
+ tmp_result = vega10_power_control_set_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to power control set level!",
+ result = tmp_result);
- tmp_result = vega10_enable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to enable ULV!",
- result = tmp_result);
+ tmp_result = vega10_enable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to enable ULV!",
+ result = tmp_result);
+ }
return result;
}
@@ -3080,11 +3103,22 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
performance_level->soc_clock = socclk_dep_table->entries
[state_entry->ucSocClockIndexHigh].ulClk;
if (gfxclk_dep_table->ucRevId == 0) {
- performance_level->gfx_clock = gfxclk_dep_table->entries
- [state_entry->ucGfxClockIndexHigh].ulClk;
+ /* under vega10 pp one vf mode, the gfx clk dpm need be lower
+ * to level-4 due to the limited 110w-power
+ */
+ if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
+ performance_level->gfx_clock =
+ gfxclk_dep_table->entries[4].ulClk;
+ else
+ performance_level->gfx_clock = gfxclk_dep_table->entries
+ [state_entry->ucGfxClockIndexHigh].ulClk;
} else if (gfxclk_dep_table->ucRevId == 1) {
patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
- performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
+ if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
+ performance_level->gfx_clock = patom_record_V2[4].ulClk;
+ else
+ performance_level->gfx_clock =
+ patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
}
performance_level->mem_clock = mclk_dep_table->entries
@@ -3495,6 +3529,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
data->smc_state_table.gfx_boot_level);
+
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->smc_state_table.gfx_boot_level;
}
@@ -3503,7 +3538,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_boot_level !=
data->dpm_table.mem_table.dpm_state.soft_min_level) {
- if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
+ if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1)
+ && hwmgr->not_vf) {
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
@@ -3518,6 +3554,9 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
}
}
+ if (!hwmgr->not_vf)
+ return 0;
+
if (!data->registry_data.socclk_dpm_key_disabled) {
if (data->smc_state_table.soc_boot_level !=
data->dpm_table.soc_table.dpm_state.soft_min_level) {
@@ -3560,6 +3599,9 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
}
}
+ if (!hwmgr->not_vf)
+ return 0;
+
if (!data->registry_data.socclk_dpm_key_disabled) {
if (data->smc_state_table.soc_max_level !=
data->dpm_table.soc_table.dpm_state.soft_max_level) {
@@ -4054,15 +4096,25 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
- *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+ /* under vega10 pp one vf mode, the gfx clk dpm need be lower
+ * to level-4 due to the limited power
+ */
+ if (hwmgr->pp_one_vf)
+ *sclk_mask = 4;
+ else
+ *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
}
+
return 0;
}
static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
+ if (!hwmgr->not_vf)
+ return;
+
switch (mode) {
case AMD_FAN_CTRL_NONE:
vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
@@ -4176,6 +4228,9 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
break;
}
+ if (!hwmgr->not_vf)
+ return ret;
+
if (!ret) {
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
@@ -4360,14 +4415,13 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = hwmgr->backend;
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
- int result = 0;
if (!data->registry_data.disable_water_mark) {
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap = WaterMarksExist;
}
- return result;
+ return 0;
}
static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
@@ -4480,7 +4534,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
- int i, now, size = 0;
+ int i, now, size = 0, count = 0;
switch (type) {
case PP_SCLK:
@@ -4490,7 +4544,12 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
now = smum_get_argument(hwmgr);
- for (i = 0; i < sclk_table->count; i++)
+ if (hwmgr->pp_one_vf &&
+ (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
+ count = 5;
+ else
+ count = sclk_table->count;
+ for (i = 0; i < count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
@@ -4701,6 +4760,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
+ if (!hwmgr->not_vf)
+ return 0;
+
if (PP_CAP(PHM_PlatformCaps_ThermalController))
vega10_disable_thermal_protection(hwmgr);
@@ -5252,13 +5314,11 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
PHM_PerformanceLevel *level)
{
const struct vega10_power_state *ps;
- struct vega10_hwmgr *data;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
- data = hwmgr->backend;
ps = cast_const_phw_vega10_power_state(state);
i = index > ps->performance_level_count - 1 ?
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 6f26cb241ecc..0a677d4bc87b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1343,6 +1343,9 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
hwmgr->default_power_limit = hwmgr->power_limit =
(uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 7af9ad450ac4..aca61d1ff3c2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -499,8 +499,6 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
PPCLK_e clkID, uint32_t index, uint32_t *clock)
{
- int result = 0;
-
/*
*SMU expects the Clock ID to be in the top 16 bits.
*Lower 16 bits specify the level
@@ -512,7 +510,7 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*clock = smum_get_argument(hwmgr);
- return result;
+ return 0;
}
static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 5bcf0d684151..3b3ec5666051 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -872,7 +872,7 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
- data->pcie_parameters_override = 1;
+ data->pcie_parameters_override = true;
data->pcie_gen_level1 = pcie_gen;
data->pcie_width_level1 = pcie_width;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 41fce75b263f..b0591a8dda41 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -254,11 +254,21 @@ struct smu_table_context
unsigned long metrics_time;
void *metrics_table;
void *clocks_table;
+ void *watermarks_table;
void *max_sustainable_clocks;
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
+ /*
+ * The driver table is just a staging buffer for
+ * uploading/downloading content from the SMU.
+ *
+ * And the table_id for SMU_MSG_TransferTableSmu2Dram/
+ * SMU_MSG_TransferTableDram2Smu instructs SMU
+ * which content driver is interested.
+ */
+ struct smu_table driver_table;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
@@ -282,6 +292,7 @@ struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
bool vcn_gated;
+ bool jpeg_gated;
};
struct smu_power_context {
@@ -437,12 +448,13 @@ struct pptable_funcs {
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable);
int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable);
+ int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
int (*pre_display_config_changed)(struct smu_context *smu);
int (*display_config_changed)(struct smu_context *smu);
int (*apply_clocks_adjust_rules)(struct smu_context *smu);
- int (*notify_smc_dispaly_config)(struct smu_context *smu);
+ int (*notify_smc_display_config)(struct smu_context *smu);
int (*force_dpm_limit_value)(struct smu_context *smu, bool highest);
int (*unforce_dpm_levels)(struct smu_context *smu);
int (*get_profiling_clk_mask)(struct smu_context *smu,
@@ -491,9 +503,11 @@ struct pptable_funcs {
int (*check_fw_version)(struct smu_context *smu);
int (*powergate_sdma)(struct smu_context *smu, bool gate);
int (*powergate_vcn)(struct smu_context *smu, bool gate);
+ int (*powergate_jpeg)(struct smu_context *smu, bool gate);
int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
int (*write_pptable)(struct smu_context *smu);
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+ int (*set_driver_table_location)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
@@ -544,7 +558,8 @@ struct pptable_funcs {
bool (*baco_is_support)(struct smu_context *smu);
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
- int (*baco_reset)(struct smu_context *smu);
+ int (*baco_enter)(struct smu_context *smu);
+ int (*baco_exit)(struct smu_context *smu);
int (*mode2_reset)(struct smu_context *smu);
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
@@ -626,7 +641,8 @@ bool smu_baco_is_support(struct smu_context *smu);
int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
-int smu_baco_reset(struct smu_context *smu);
+int smu_baco_enter(struct smu_context *smu);
+int smu_baco_exit(struct smu_context *smu);
int smu_mode2_reset(struct smu_context *smu);
@@ -692,6 +708,8 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
+int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min_value, uint32_t *max_value);
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index af977675fd33..2ffb666b97e6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -741,6 +741,7 @@ struct pp_hwmgr {
uint32_t smu_version;
bool not_vf;
bool pm_en;
+ bool pp_one_vf;
struct mutex smu_lock;
uint32_t pp_table_version;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index a886f0644d24..ce5b5011c122 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -622,8 +622,14 @@ typedef struct {
uint16_t PccThresholdHigh;
uint32_t PaddingAPCC[6]; //FIXME pending SPEC
+ // OOB Settings
+ uint16_t BasePerformanceCardPower;
+ uint16_t MaxPerformanceCardPower;
+ uint16_t BasePerformanceFrequencyCap; //In Mhz
+ uint16_t MaxPerformanceFrequencyCap; //In Mhz
+
// SECTION: Reserved
- uint32_t Reserved[11];
+ uint32_t Reserved[9];
// SECTION: BOARD PARAMETERS
@@ -823,7 +829,6 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsFuseOverride_t;
-/* NOT CURRENTLY USED
typedef struct {
uint8_t Gfx_ActiveHystLimit;
uint8_t Gfx_IdleHystLimit;
@@ -866,7 +871,6 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} DpmActivityMonitorCoeffInt_t;
-*/
// These defines are used with the following messages:
// SMC_MSG_TransferTableDram2Smu
@@ -878,11 +882,11 @@ typedef struct {
#define TABLE_PMSTATUSLOG 4
#define TABLE_SMU_METRICS 5
#define TABLE_DRIVER_SMU_CONFIG 6
-//#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 7
#define TABLE_WAFL_XGMI_TOPOLOGY 8
#define TABLE_I2C_COMMANDS 9
-#define TABLE_COUNT 10
+#define TABLE_ACTIVITY_MONITOR_COEFF 10
+#define TABLE_COUNT 11
// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
typedef enum {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
index c27c82851468..2f85a34c0591 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU12_DRIVER_IF_VERSION 10
+#define SMU12_DRIVER_IF_VERSION 11
typedef struct {
int32_t value;
@@ -192,6 +192,11 @@ typedef struct {
uint16_t SocTemperature; //[centi-Celsius]
uint16_t ThrottlerStatus;
uint16_t spare;
+
+ uint16_t StapmOriginalLimit; //[mW]
+ uint16_t StapmCurrentLimit; //[mW]
+ uint16_t ApuPower; //[mW]
+ uint16_t dGpuPower; //[mW]
} SmuMetrics_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 719844257713..d5314d12628a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x10
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
#define SMU11_DRIVER_IF_VERSION_NV10 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x34
@@ -170,6 +170,8 @@ int smu_v11_0_write_pptable(struct smu_context *smu);
int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+int smu_v11_0_set_driver_table_location(struct smu_context *smu);
+
int smu_v11_0_set_tool_table_location(struct smu_context *smu);
int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
@@ -247,7 +249,8 @@ enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
-int smu_v11_0_baco_reset(struct smu_context *smu);
+int smu_v11_0_baco_enter(struct smu_context *smu);
+int smu_v11_0_baco_exit(struct smu_context *smu);
int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
@@ -261,4 +264,7 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 9d81d789c713..d79e54b5ebf6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -57,8 +57,14 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
+int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate);
+
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
+int smu_v12_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+
uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
@@ -69,6 +75,13 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu);
int smu_v12_0_populate_smc_tables(struct smu_context *smu);
+int smu_v12_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num);
+
+int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value);
+
int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
@@ -77,4 +90,6 @@ int smu_v12_0_mode2_reset(struct smu_context *smu);
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
+int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index ca62e92e5a4f..93c66c69ca28 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -384,8 +384,10 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT)
- | FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
+
+ if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
if (is_asic_secure(smu)) {
@@ -553,6 +555,10 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
return -ENOMEM;
smu_table->metrics_time = 0;
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
return 0;
}
@@ -668,6 +674,31 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
return ret;
}
+static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+ if (ret)
+ return ret;
+ }
+ power_gate->jpeg_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+ if (ret)
+ return ret;
+ }
+ power_gate->jpeg_gated = true;
+ }
+
+ return ret;
+}
+
static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
@@ -1350,7 +1381,7 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu,
return ret;
}
-static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
+static int navi10_notify_smc_display_config(struct smu_context *smu)
{
struct smu_clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
@@ -1555,12 +1586,44 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_
return 0;
}
-static int navi10_set_peak_clock_by_device(struct smu_context *smu)
+static int navi10_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
+
+static int navi10_set_standard_performance_level(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ sclk_freq = NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
+ uclk_freq = NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
+ break;
+ case CHIP_NAVI14:
+ sclk_freq = NAVI14_UMD_PSTATE_PROFILING_GFXCLK;
+ uclk_freq = NAVI14_UMD_PSTATE_PROFILING_MEMCLK;
+ break;
+ default:
+ /* by default, this is same as auto performance level */
+ return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
+ }
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int navi10_set_peak_performance_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t sclk_freq = 0, uclk_freq = 0;
- uint32_t uclk_level = 0;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -1601,14 +1664,16 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
break;
}
break;
+ case CHIP_NAVI12:
+ sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
+ break;
default:
- return -EINVAL;
+ ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq);
+ if (ret)
+ return ret;
}
- ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
- if (ret)
- return ret;
- ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
+ ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq);
if (ret)
return ret;
@@ -1622,19 +1687,45 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
return ret;
}
-static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+static int navi10_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
{
int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = navi10_set_standard_performance_level(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = navi10_set_peak_clock_by_device(smu);
+ ret = navi10_set_peak_performance_level(smu);
break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
- ret = -EINVAL;
break;
}
-
return ret;
}
@@ -2015,6 +2106,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
.dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
+ .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
.get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
.print_clk_levels = navi10_print_clk_levels,
.force_clk_levels = navi10_force_clk_levels,
@@ -2022,7 +2114,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
.pre_display_config_changed = navi10_pre_display_config_changed,
.display_config_changed = navi10_display_config_changed,
- .notify_smc_dispaly_config = navi10_notify_smc_dispaly_config,
+ .notify_smc_display_config = navi10_notify_smc_display_config,
.force_dpm_limit_value = navi10_force_dpm_limit_value,
.unforce_dpm_levels = navi10_unforce_dpm_levels,
.is_dpm_running = navi10_is_dpm_running,
@@ -2055,6 +2147,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
@@ -2083,7 +2176,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.baco_is_support= smu_v11_0_baco_is_support,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
- .baco_reset = smu_v11_0_baco_reset,
+ .baco_enter = smu_v11_0_baco_enter,
+ .baco_exit = smu_v11_0_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index ec03c7992f6d..2abb4ba01db1 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,12 +27,26 @@
#define NAVI10_PEAK_SCLK_XT (1755)
#define NAVI10_PEAK_SCLK_XL (1625)
+#define NAVI10_UMD_PSTATE_PROFILING_GFXCLK (1300)
+#define NAVI10_UMD_PSTATE_PROFILING_SOCCLK (980)
+#define NAVI10_UMD_PSTATE_PROFILING_MEMCLK (625)
+#define NAVI10_UMD_PSTATE_PROFILING_VCLK (980)
+#define NAVI10_UMD_PSTATE_PROFILING_DCLK (850)
+
#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670)
#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448)
#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181)
#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+#define NAVI14_UMD_PSTATE_PROFILING_GFXCLK (1200)
+#define NAVI14_UMD_PSTATE_PROFILING_SOCCLK (900)
+#define NAVI14_UMD_PSTATE_PROFILING_MEMCLK (600)
+#define NAVI14_UMD_PSTATE_PROFILING_VCLK (900)
+#define NAVI14_UMD_PSTATE_PROFILING_DCLK (800)
+
+#define NAVI12_UMD_PSTATE_PEAK_GFXCLK (1100)
+
#define NAVI10_VOLTAGE_SCALE (4)
#define smnPCIE_LC_SPEED_CNTL 0x11140290
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 977bdd962e98..861e6410363b 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -31,6 +31,9 @@
#include "renoir_ppt.h"
+#define CLK_MAP(clk, index) \
+ [SMU_##clk] = {1, (index)}
+
#define MSG_MAP(msg, index) \
[SMU_MSG_##msg] = {1, (index)}
@@ -104,6 +107,14 @@ static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq),
};
+static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(GFXCLK, CLOCK_GFXCLK),
+ CLK_MAP(SCLK, CLOCK_GFXCLK),
+ CLK_MAP(SOCCLK, CLOCK_SOCCLK),
+ CLK_MAP(UCLK, CLOCK_UMCCLK),
+ CLK_MAP(MCLK, CLOCK_UMCCLK),
+};
+
static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(WATERMARKS),
TAB_MAP_INVALID(CUSTOM_DPM),
@@ -125,6 +136,21 @@ static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index)
return mapping.map_to;
}
+static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_12_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ mapping = renoir_clk_map[index];
+ if (!(mapping.valid_mapping)) {
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
struct smu_12_0_cmn2aisc_mapping mapping;
@@ -139,6 +165,30 @@ static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
return mapping.map_to;
}
+static int renoir_get_metrics_table(struct smu_context *smu,
+ SmuMetrics_t *metrics_table)
+{
+ struct smu_table_context *smu_table= &smu->smu_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+ if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ (void *)smu_table->metrics_table, false);
+ if (ret) {
+ pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+ smu_table->metrics_time = jiffies;
+ }
+
+ memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -154,6 +204,15 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
if (!smu_table->clocks_table)
return -ENOMEM;
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ return -ENOMEM;
+ smu_table->metrics_time = 0;
+
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
return 0;
}
@@ -187,8 +246,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
memset(&metrics, 0, sizeof(metrics));
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
- (void *)&metrics, false);
+ ret = renoir_get_metrics_table(smu, &metrics);
if (ret)
return ret;
@@ -301,6 +359,51 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
return ret;
}
+static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->jpeg_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->jpeg_gated = true;
+ }
+
+ return ret;
+}
+
+static int renoir_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ SmuMetrics_t metrics;
+
+ ret = renoir_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ *value = metrics.ClockFrequency[clk_id];
+
+ return ret;
+}
+
static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
{
int ret = 0, i = 0;
@@ -361,6 +464,50 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
return ret;
}
+static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = renoir_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ *value = (metrics.GfxTemperature / 100) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int renoir_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = renoir_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ *value = metrics.AverageGfxActivity / 100;
+ break;
+ default:
+ pr_err("Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
{
@@ -565,19 +712,43 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
return ret;
}
-static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+static int renoir_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
{
int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = renoir_set_peak_clock_by_device(smu);
break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
- ret = -EINVAL;
break;
}
-
return ret;
}
@@ -636,9 +807,17 @@ static int renoir_set_watermarks_table(
}
/* pass data to smu controller */
- ret = smu_write_watermarks_table(smu);
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
- return ret;
+ return 0;
}
static int renoir_get_power_profile_mode(struct smu_context *smu,
@@ -674,8 +853,36 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
return size;
}
+static int renoir_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ mutex_lock(&smu->sensor_lock);
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = renoir_get_current_activity_percent(smu, sensor, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ ret = renoir_get_gpu_temperature(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = smu_v12_0_read_sensor(smu, sensor, data, size);
+ }
+ mutex_unlock(&smu->sensor_lock);
+
+ return ret;
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
+ .get_smu_clk_index = renoir_get_smu_clk_index,
.get_smu_table_index = renoir_get_smu_table_index,
.tables_init = renoir_tables_init,
.set_power_state = NULL,
@@ -683,6 +890,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.print_clk_levels = renoir_print_clk_levels,
.get_current_power_state = renoir_get_current_power_state,
.dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+ .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable,
+ .get_current_clk_freq_by_table = renoir_get_current_clk_freq_by_table,
.force_dpm_limit_value = renoir_force_dpm_limit_value,
.unforce_dpm_levels = renoir_unforce_dpm_levels,
.get_workload_type = renoir_get_workload_type,
@@ -693,10 +902,12 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.get_dpm_clock_table = renoir_get_dpm_clock_table,
.set_watermarks_table = renoir_set_watermarks_table,
.get_power_profile_mode = renoir_get_power_profile_mode,
+ .read_sensor = renoir_read_sensor,
.check_fw_status = smu_v12_0_check_fw_status,
.check_fw_version = smu_v12_0_check_fw_version,
.powergate_sdma = smu_v12_0_powergate_sdma,
.powergate_vcn = smu_v12_0_powergate_vcn,
+ .powergate_jpeg = smu_v12_0_powergate_jpeg,
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
.read_smc_arg = smu_v12_0_read_arg,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
@@ -704,9 +915,12 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.init_smc_tables = smu_v12_0_init_smc_tables,
.fini_smc_tables = smu_v12_0_fini_smc_tables,
.populate_smc_tables = smu_v12_0_populate_smc_tables,
+ .get_enabled_mask = smu_v12_0_get_enabled_mask,
+ .get_current_clk_freq = smu_v12_0_get_current_clk_freq,
.get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
.mode2_reset = smu_v12_0_mode2_reset,
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
+ .set_driver_table_location = smu_v12_0_set_driver_table_location,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 8872f8b2d502..783319ec8bf9 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -42,6 +42,8 @@
((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
#define smu_powergate_vcn(smu, gate) \
((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
+#define smu_powergate_jpeg(smu, gate) \
+ ((smu)->ppt_funcs->powergate_jpeg ? (smu)->ppt_funcs->powergate_jpeg((smu), (gate)) : 0)
#define smu_get_vbios_bootup_values(smu) \
((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
@@ -59,6 +61,8 @@
((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
#define smu_set_min_dcef_deep_sleep(smu) \
((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_driver_table_location(smu) \
+ ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0)
#define smu_set_tool_table_location(smu) \
((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
#define smu_notify_memory_pool_location(smu) \
@@ -127,8 +131,8 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
#define smu_apply_clocks_adjust_rules(smu) \
((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
-#define smu_notify_smc_dispaly_config(smu) \
- ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_notify_smc_display_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_display_config ? (smu)->ppt_funcs->notify_smc_display_config((smu)) : 0)
#define smu_force_dpm_limit_value(smu, highest) \
((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
#define smu_unforce_dpm_levels(smu) \
@@ -170,6 +174,8 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
#define smu_dpm_set_vce_enable(smu, enable) \
((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+#define smu_dpm_set_jpeg_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_jpeg_enable ? (smu)->ppt_funcs->dpm_set_jpeg_enable((smu), (enable)) : 0)
#define smu_set_watermarks_table(smu, tab, clock_ranges) \
((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index e4268a627eff..02f8c9cb89d9 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -37,6 +37,7 @@
#include "soc15_common.h"
#include "atom.h"
#include "amd_pcie.h"
+#include "amdgpu_ras.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -79,15 +80,13 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
for (i = 0; i < timeout; i++) {
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- break;
+ return cur_value == 0x1 ? 0 : -EIO;
+
udelay(1);
}
/* timeout means wrong logic */
- if (i == timeout)
- return -ETIME;
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+ return -ETIME;
}
int
@@ -103,9 +102,11 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
return index;
ret = smu_v11_0_wait_for_response(smu);
- if (ret)
- pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
- smu_get_message_name(smu, msg), index, param, ret);
+ if (ret) {
+ pr_err("Msg issuing pre-check failed and "
+ "SMU may be not in the right state!\n");
+ return ret;
+ }
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -449,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->tables);
kfree(smu_table->metrics_table);
+ kfree(smu_table->watermarks_table);
smu_table->tables = NULL;
smu_table->metrics_table = NULL;
+ smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
ret = smu_v11_0_fini_dpm_context(smu);
@@ -773,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
}
+int smu_v11_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address));
+ }
+
+ return ret;
+}
+
int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
@@ -834,27 +855,33 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
if (!feature_mask || num < 2)
return -EINVAL;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
- if (ret)
- return ret;
+ if (bitmap_empty(feature->enabled, feature->feature_num)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_high);
+ if (ret)
+ return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
- if (ret)
- return ret;
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_low);
+ if (ret)
+ return ret;
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+ } else {
+ bitmap_copy((unsigned long *)feature_mask, feature->enabled,
+ feature->feature_num);
+ }
return ret;
}
@@ -866,21 +893,24 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
uint32_t feature_mask[2];
int ret = 0;
- if (smu->pm_enabled) {
- ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures));
- if (ret)
- return ret;
- }
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures));
if (ret)
return ret;
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
+ if (en) {
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ } else {
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+ }
return ret;
}
@@ -1124,11 +1154,12 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
- high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
- range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, powerplay_table->software_shutdown_temp);
if (low > high)
return -EINVAL;
@@ -1617,7 +1648,9 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
if (!baco_support)
return false;
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ /* Arcturus does not support this bit mask */
+ if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -1643,6 +1676,10 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint32_t bif_doorbell_intr_cntl;
+ uint32_t data;
int ret = 0;
if (smu_v11_0_baco_get_state(smu) == state)
@@ -1650,10 +1687,37 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
mutex_lock(&smu_baco->mutex);
- if (state == SMU_BACO_STATE_ENTER)
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
- else
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+
+ if (state == SMU_BACO_STATE_ENTER) {
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ if (!ras || !ras->supported) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+ }
+ } else {
ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+ if (ret)
+ goto out;
+
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ /* clear vbios scratch 6 and 7 for coming asic reinit */
+ WREG32(adev->bios_scratch_reg_offset + 6, 0);
+ WREG32(adev->bios_scratch_reg_offset + 7, 0);
+ }
if (ret)
goto out;
@@ -1663,13 +1727,17 @@ out:
return ret;
}
-int smu_v11_0_baco_reset(struct smu_context *smu)
+int smu_v11_0_baco_enter(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
- if (ret)
- return ret;
+ /* Arcturus does not need this audio workaround */
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
+ if (ret)
+ return ret;
+ }
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
if (ret)
@@ -1677,6 +1745,13 @@ int smu_v11_0_baco_reset(struct smu_context *smu)
msleep(10);
+ return ret;
+}
+
+int smu_v11_0_baco_exit(struct smu_context *smu)
+{
+ int ret = 0;
+
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
if (ret)
return ret;
@@ -1815,3 +1890,42 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
}
return ret;
}
+
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 094cfc46adac..870e6db2907e 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -66,15 +66,13 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
for (i = 0; i < adev->usec_timeout; i++) {
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- break;
+ return cur_value == 0x1 ? 0 : -EIO;
+
udelay(1);
}
/* timeout means wrong logic */
- if (i == adev->usec_timeout)
- return -ETIME;
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+ return -ETIME;
}
int
@@ -90,9 +88,11 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
return index;
ret = smu_v12_0_wait_for_response(smu);
- if (ret)
- pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
- index, ret, param);
+ if (ret) {
+ pr_err("Msg issuing pre-check failed and "
+ "SMU may be not in the right state!\n");
+ return ret;
+ }
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -159,7 +159,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
{
- if (!(smu->adev->flags & AMD_IS_APU))
+ if (!smu->is_apu)
return 0;
if (gate)
@@ -170,7 +170,7 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
{
- if (!(smu->adev->flags & AMD_IS_APU))
+ if (!smu->is_apu)
return 0;
if (gate)
@@ -179,6 +179,17 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
}
+int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
+{
+ if (!smu->is_apu)
+ return 0;
+
+ if (gate)
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ else
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+}
+
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
{
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
@@ -188,6 +199,39 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
}
+int smu_v12_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if(!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
+ *(uint32_t *)data = 0;
+ *size = 4;
+ break;
+ default:
+ ret = smu_common_read_sensor(smu, sensor, data, size);
+ break;
+ }
+
+ if (ret)
+ *size = 0;
+
+ return ret;
+}
+
/**
* smu_v12_0_get_gfxoff_status - get gfxoff status
*
@@ -274,16 +318,57 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
int smu_v12_0_populate_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
- table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
- if (!table)
+ return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+}
+
+int smu_v12_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ int ret = 0;
+
+ if (!feature_mask || num < 2)
return -EINVAL;
- if (!table->cpu_addr)
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_high);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_low);
+ if (ret)
+ return ret;
+
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+
+ return ret;
+}
+
+int smu_v12_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value)
+{
+ int ret = 0;
+ uint32_t freq = 0;
+
+ if (clk_id >= SMU_CLK_COUNT || !value)
return -EINVAL;
- return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+ ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
+ if (ret)
+ return ret;
+
+ freq *= 100;
+ *value = freq;
+
+ return ret;
}
int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -421,3 +506,21 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
return ret;
}
+
+int smu_v12_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address));
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 15590fd86ef4..868e2d5f6e62 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -653,8 +653,8 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ uint16_t HiSidd;
+ uint16_t LoSidd;
struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index da025b1d302d..32ebb383c456 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -940,7 +940,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
{
int result;
/* PP_Clocks minClocks; */
- uint32_t threshold, mvdd;
+ uint32_t mvdd;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -973,8 +973,6 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
level->VoltageDownHyst = 0;
level->PowerThrottle = 0;
- threshold = clock * data->fast_watermark_threshold / 100;
-
data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
@@ -1501,7 +1499,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
uint32_t dram_timing;
uint32_t dram_timing2;
uint32_t burstTime;
- ULONG state, trrds, trrdl;
+ ULONG trrds, trrdl;
int result;
result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
@@ -1513,7 +1511,6 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
- state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index aa0ee2b46135..2319400a3fcb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -150,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct smu10_smumgr *priv =
(struct smu10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -161,6 +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
smu10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
index 742b3dc1f6cb..adfbcbe5d113 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -61,15 +61,29 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
uint32_t reg;
uint32_t ret;
- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ /* Due to the L1 policy problem under SRIOV, we have to use
+ * mmMP1_SMN_C2PMSG_103 as the driver response register
+ */
+ if (hwmgr->pp_one_vf) {
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103);
- ret = phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
+ ret = phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_103__CONTENT_MASK);
- if (ret)
- pr_err("No response from smu\n");
+ if (ret)
+ pr_err("No response from smu\n");
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103);
+ } else {
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+
+ ret = phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_90__CONTENT_MASK);
+
+ if (ret)
+ pr_err("No response from smu\n");
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ }
}
/*
@@ -83,7 +97,11 @@ static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
{
struct amdgpu_device *adev = hwmgr->adev;
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ if (hwmgr->pp_one_vf) {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_101, msg);
+ } else {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ }
return 0;
}
@@ -101,7 +119,10 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
smu9_wait_for_response(hwmgr);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ if (hwmgr->pp_one_vf)
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
+ else
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -127,9 +148,17 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
smu9_wait_for_response(hwmgr);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+ /* Due to the L1 policy problem under SRIOV, we have to use
+ * mmMP1_SMN_C2PMSG_101 as the driver message register and
+ * mmMP1_SMN_C2PMSG_102 as the driver parameter register.
+ */
+ if (hwmgr->pp_one_vf) {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter);
+ } else {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+ }
smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -144,5 +173,8 @@ uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ if (hwmgr->pp_one_vf)
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102);
+ else
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 0f3836fd9666..715564009089 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -70,6 +70,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv = hwmgr->smu_backend;
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ /* under sriov, vbios or hypervisor driver
+ * has already copy table to smc so here only skip it
+ */
+ if (!hwmgr->not_vf)
+ return 0;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -81,6 +88,8 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
@@ -100,6 +109,14 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
PPSMC_MSG_DisableSmuFeatures;
+ /* VF has no permission to change smu feature due
+ * to security concern even under pp one vf mode
+ * it still can't do it. For vega10, the smu in
+ * vbios will enable the appropriate features.
+ * */
+ if (!hwmgr->not_vf)
+ return 0;
+
return smum_send_msg_to_smc_with_parameter(hwmgr,
msg, feature_mask);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 90c782c132d2..a3915bfcce81 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return -EINVAL);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -84,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct vega12_smumgr *priv =
(struct vega12_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
"Invalid SMU Table ID!", return -EINVAL);
@@ -95,6 +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index f604612f411f..0db57fb83d30 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -207,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
@@ -219,6 +220,8 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
@@ -242,11 +245,14 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
@@ -290,7 +296,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index ae18fbcb26fb..b0e0d67cd54b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -1114,7 +1114,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v1_information *)(hwmgr->pptable);
SMIO_Pattern vol_level;
uint32_t mvdd;
- uint16_t us_mvdd;
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
@@ -1168,17 +1167,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
"in Clock Dependency Table",
);
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!vegam_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level))
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
else
@@ -1383,11 +1371,16 @@ static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
result = phm_find_boot_level(&(data->dpm_table.sclk_table),
data->vbios_boot_state.sclk_bootup_value,
(uint32_t *)&(table->GraphicsBootLevel));
+ if (result)
+ return result;
result = phm_find_boot_level(&(data->dpm_table.mclk_table),
data->vbios_boot_state.mclk_bootup_value,
(uint32_t *)&(table->MemoryBootLevel));
+ if (result)
+ return result;
+
table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
VOLTAGE_SCALE;
table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
@@ -1493,7 +1486,7 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
struct vegam_smumgr *smu_data =
(struct vegam_smumgr *)(hwmgr->smu_backend);
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1532,11 +1525,9 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
(table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ?
table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5;
/* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
+ if (!(stretch_amount == 1 || stretch_amount == 2 ||
+ stretch_amount == 5 || stretch_amount == 3 ||
+ stretch_amount == 4)) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 0d3a3b0a934e..38febd5ca4da 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -338,6 +338,10 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
return -ENOMEM;
smu_table->metrics_time = 0;
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
return 0;
}
@@ -2235,7 +2239,7 @@ static int vega20_apply_clocks_adjust_rules(struct smu_context *smu)
}
static int
-vega20_notify_smc_dispaly_config(struct smu_context *smu)
+vega20_notify_smc_display_config(struct smu_context *smu)
{
struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
struct vega20_single_dpm_table *memtable = &dpm_table->mem_table;
@@ -3194,6 +3198,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_od_percentage = vega20_get_od_percentage,
.get_power_profile_mode = vega20_get_power_profile_mode,
.set_power_profile_mode = vega20_set_power_profile_mode,
+ .set_performance_level = smu_v11_0_set_performance_level,
.set_od_percentage = vega20_set_od_percentage,
.set_default_od_settings = vega20_set_default_od_settings,
.od_edit_dpm_table = vega20_odn_edit_dpm_table,
@@ -3203,7 +3208,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.pre_display_config_changed = vega20_pre_display_config_changed,
.display_config_changed = vega20_display_config_changed,
.apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
- .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config,
+ .notify_smc_display_config = vega20_notify_smc_display_config,
.force_dpm_limit_value = vega20_force_dpm_limit_value,
.unforce_dpm_levels = vega20_unforce_dpm_levels,
.get_profiling_clk_mask = vega20_get_profiling_clk_mask,
@@ -3231,6 +3236,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
@@ -3259,7 +3265,8 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.baco_is_support= smu_v11_0_baco_is_support,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
- .baco_reset = smu_v11_0_baco_reset,
+ .baco_enter = smu_v11_0_baco_enter,
+ .baco_exit = smu_v11_0_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index dfaddbb7da0d..8ae1e1f97a73 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -20,9 +20,10 @@
#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1))
-static struct simplefb_format supported_formats[] = {
- { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 },
- { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 },
+static const u32 arc_pgu_supported_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
};
static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
@@ -30,22 +31,24 @@ static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
uint32_t pixel_format = fb->format->format;
- struct simplefb_format *format = NULL;
+ u32 format = DRM_FORMAT_INVALID;
int i;
+ u32 reg_ctrl;
- for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
- if (supported_formats[i].fourcc == pixel_format)
- format = &supported_formats[i];
+ for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) {
+ if (arc_pgu_supported_formats[i] == pixel_format)
+ format = arc_pgu_supported_formats[i];
}
- if (WARN_ON(!format))
+ if (WARN_ON(format == DRM_FORMAT_INVALID))
return;
- if (format->fourcc == DRM_FORMAT_RGB888)
- arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
- arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
- ARCPGU_MODE_RGB888_MASK);
-
+ reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
+ if (format == DRM_FORMAT_RGB565)
+ reg_ctrl &= ~ARCPGU_MODE_XRGB8888;
+ else
+ reg_ctrl |= ARCPGU_MODE_XRGB8888;
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl);
}
static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
@@ -193,18 +196,15 @@ static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
struct drm_plane *plane = NULL;
- u32 formats[ARRAY_SIZE(supported_formats)], i;
int ret;
plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
- formats[i] = supported_formats[i].fourcc;
-
ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
- formats, ARRAY_SIZE(formats),
+ arc_pgu_supported_formats,
+ ARRAY_SIZE(arc_pgu_supported_formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
diff --git a/drivers/gpu/drm/arc/arcpgu_regs.h b/drivers/gpu/drm/arc/arcpgu_regs.h
index dab2c380f7f3..b689a382d556 100644
--- a/drivers/gpu/drm/arc/arcpgu_regs.h
+++ b/drivers/gpu/drm/arc/arcpgu_regs.h
@@ -25,7 +25,7 @@
#define ARCPGU_CTRL_VS_POL_OFST 0x3
#define ARCPGU_CTRL_HS_POL_MASK 0x1
#define ARCPGU_CTRL_HS_POL_OFST 0x4
-#define ARCPGU_MODE_RGB888_MASK 0x04
+#define ARCPGU_MODE_XRGB8888 BIT(2)
#define ARCPGU_STAT_BUSY_MASK 0x02
#endif
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index e87ff8623076..cec0639e3aa1 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -12,9 +12,3 @@ config DRM_KOMEDA
Processor driver. It supports the D71 variants of the hardware.
If compiled as a module it will be called komeda.
-
-config DRM_KOMEDA_ERROR_PRINT
- bool "Enable komeda error print"
- depends on DRM_KOMEDA
- help
- Choose this option to enable error printing.
diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h
index 1053b11352eb..16a8a2c22c42 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_product.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_product.h
@@ -18,7 +18,8 @@
#define MALIDP_CORE_ID_STATUS(__core_id) (((__u32)(__core_id)) & 0xFF)
/* Mali-display product IDs */
-#define MALIDP_D71_PRODUCT_ID 0x0071
+#define MALIDP_D71_PRODUCT_ID 0x0071
+#define MALIDP_D32_PRODUCT_ID 0x0032
union komeda_config_id {
struct {
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index f095a1c68ac7..1931a7fa1a14 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -16,12 +16,11 @@ komeda-y := \
komeda_crtc.o \
komeda_plane.o \
komeda_wb_connector.o \
- komeda_private_obj.o
+ komeda_private_obj.o \
+ komeda_event.o
komeda-y += \
d71/d71_dev.o \
d71/d71_component.o
-komeda-$(CONFIG_DRM_KOMEDA_ERROR_PRINT) += komeda_event.o
-
obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index f0ba26e282c3..8a02ade369db 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -1044,7 +1044,9 @@ static int d71_merger_init(struct d71_dev *d71,
static void d71_improc_update(struct komeda_component *c,
struct komeda_component_state *state)
{
+ struct drm_crtc_state *crtc_st = state->crtc->state;
struct komeda_improc_state *st = to_improc_st(state);
+ struct d71_pipeline *pipe = to_d71_pipeline(c->pipeline);
u32 __iomem *reg = c->reg;
u32 index, mask = 0, ctrl = 0;
@@ -1055,6 +1057,24 @@ static void d71_improc_update(struct komeda_component *c,
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
malidp_write32(reg, IPS_DEPTH, st->color_depth);
+ if (crtc_st->color_mgmt_changed) {
+ mask |= IPS_CTRL_FT | IPS_CTRL_RGB;
+
+ if (crtc_st->gamma_lut) {
+ malidp_write_group(pipe->dou_ft_coeff_addr, FT_COEFF0,
+ KOMEDA_N_GAMMA_COEFFS,
+ st->fgamma_coeffs);
+ ctrl |= IPS_CTRL_FT; /* enable gamma */
+ }
+
+ if (crtc_st->ctm) {
+ malidp_write_group(reg, IPS_RGB_RGB_COEFF0,
+ KOMEDA_N_CTM_COEFFS,
+ st->ctm_coeffs);
+ ctrl |= IPS_CTRL_RGB; /* enable gamut */
+ }
+ }
+
mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
/* config color format */
@@ -1250,7 +1270,7 @@ static int d71_timing_ctrlr_init(struct d71_dev *d71,
ctrlr = to_ctrlr(c);
- ctrlr->supports_dual_link = true;
+ ctrlr->supports_dual_link = d71->supports_dual_link;
return 0;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index 822b23a1ce75..00fa56c29b3e 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -20,8 +20,10 @@ static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
evts |= KOMEDA_EVENT_IBSY;
if (raw_status & LPU_IRQ_EOW)
evts |= KOMEDA_EVENT_EOW;
+ if (raw_status & LPU_IRQ_OVR)
+ evts |= KOMEDA_EVENT_OVR;
- if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) {
+ if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) {
u32 restore = 0, tbu_status;
/* Check error of LPU status */
status = malidp_read32(reg, BLK_STATUS);
@@ -45,6 +47,15 @@ static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
restore |= LPU_STATUS_ACE3;
evts |= KOMEDA_ERR_ACE3;
}
+ if (status & LPU_STATUS_FEMPTY) {
+ restore |= LPU_STATUS_FEMPTY;
+ evts |= KOMEDA_EVENT_EMPTY;
+ }
+ if (status & LPU_STATUS_FFULL) {
+ restore |= LPU_STATUS_FFULL;
+ evts |= KOMEDA_EVENT_FULL;
+ }
+
if (restore != 0)
malidp_write32_mask(reg, BLK_STATUS, restore, 0);
@@ -371,23 +382,33 @@ static int d71_enum_resources(struct komeda_dev *mdev)
goto err_cleanup;
}
- /* probe PERIPH */
+ /* Only the legacy HW has the periph block, the newer merges the periph
+ * into GCU
+ */
value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
- if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) {
- DRM_ERROR("access blk periph but got blk: %d.\n",
- BLOCK_INFO_BLK_TYPE(value));
- err = -EINVAL;
- goto err_cleanup;
+ if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH)
+ d71->periph_addr = NULL;
+
+ if (d71->periph_addr) {
+ /* probe PERIPHERAL in legacy HW */
+ value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
+
+ d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
+ d71->max_vsize = 4096;
+ d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
+ d71->supports_dual_link = !!(value & PERIPH_SPLIT_EN);
+ d71->integrates_tbu = !!(value & PERIPH_TBU_EN);
+ } else {
+ value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0);
+ d71->max_line_size = GCU_MAX_LINE_SIZE(value);
+ d71->max_vsize = GCU_MAX_NUM_LINES(value);
+
+ value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1);
+ d71->num_rich_layers = GCU_NUM_RICH_LAYERS(value);
+ d71->supports_dual_link = GCU_DISPLAY_SPLIT_EN(value);
+ d71->integrates_tbu = GCU_DISPLAY_TBU_EN(value);
}
- value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
-
- d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
- d71->max_vsize = 4096;
- d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
- d71->supports_dual_link = value & PERIPH_SPLIT_EN ? true : false;
- d71->integrates_tbu = value & PERIPH_TBU_EN ? true : false;
-
for (i = 0; i < d71->num_pipelines; i++) {
pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
&d71_pipeline_funcs);
@@ -414,8 +435,11 @@ static int d71_enum_resources(struct komeda_dev *mdev)
d71->pipes[i] = to_d71_pipeline(pipe);
}
- /* loop the register blks and probe */
- i = 2; /* exclude GCU and PERIPH */
+ /* loop the register blks and probe.
+ * NOTE: d71->num_blocks includes reserved blocks.
+ * d71->num_blocks = GCU + valid blocks + reserved blocks
+ */
+ i = 1; /* exclude GCU */
offset = D71_BLOCK_SIZE; /* skip GCU */
while (i < d71->num_blocks) {
blk_base = mdev->reg_base + (offset >> 2);
@@ -425,9 +449,9 @@ static int d71_enum_resources(struct komeda_dev *mdev)
err = d71_probe_block(d71, &blk, blk_base);
if (err)
goto err_cleanup;
- i++;
}
+ i++;
offset += D71_BLOCK_SIZE;
}
@@ -594,10 +618,26 @@ static const struct komeda_dev_funcs d71_chip_funcs = {
const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
{
+ const struct komeda_dev_funcs *funcs;
+ u32 product_id;
+
+ chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
+
+ product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id);
+
+ switch (product_id) {
+ case MALIDP_D71_PRODUCT_ID:
+ case MALIDP_D32_PRODUCT_ID:
+ funcs = &d71_chip_funcs;
+ break;
+ default:
+ DRM_ERROR("Unsupported product: 0x%x\n", product_id);
+ return NULL;
+ }
+
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
- chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
chip->bus_width = D71_BUS_WIDTH_16_BYTES;
- return &d71_chip_funcs;
+ return funcs;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
index 1727dc993909..e80172a0b320 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -72,6 +72,19 @@
#define GCU_CONTROL_MODE(x) ((x) & 0x7)
#define GCU_CONTROL_SRST BIT(16)
+/* GCU_CONFIGURATION registers */
+#define GCU_CONFIGURATION_ID0 0x100
+#define GCU_CONFIGURATION_ID1 0x104
+
+/* GCU configuration */
+#define GCU_MAX_LINE_SIZE(x) ((x) & 0xFFFF)
+#define GCU_MAX_NUM_LINES(x) ((x) >> 16)
+#define GCU_NUM_RICH_LAYERS(x) ((x) & 0x7)
+#define GCU_NUM_PIPELINES(x) (((x) >> 3) & 0x7)
+#define GCU_NUM_SCALERS(x) (((x) >> 6) & 0x7)
+#define GCU_DISPLAY_SPLIT_EN(x) (((x) >> 16) & 0x1)
+#define GCU_DISPLAY_TBU_EN(x) (((x) >> 17) & 0x1)
+
/* GCU opmode */
#define INACTIVE_MODE 0
#define TBU_CONNECT_MODE 1
@@ -162,6 +175,7 @@
#define TBU_DOUTSTDCAPB_MASK 0x3F
/* LPU_IRQ_BITS */
+#define LPU_IRQ_OVR BIT(9)
#define LPU_IRQ_IBSY BIT(10)
#define LPU_IRQ_ERR BIT(11)
#define LPU_IRQ_EOW BIT(12)
@@ -172,6 +186,8 @@
#define LPU_STATUS_AXIE BIT(4)
#define LPU_STATUS_AXIRP BIT(5)
#define LPU_STATUS_AXIWP BIT(6)
+#define LPU_STATUS_FEMPTY BIT(11)
+#define LPU_STATUS_FFULL BIT(14)
#define LPU_STATUS_ACE0 BIT(16)
#define LPU_STATUS_ACE1 BIT(17)
#define LPU_STATUS_ACE2 BIT(18)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
index 9d14a92dbb17..d8e449e6ebda 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
@@ -65,3 +65,69 @@ const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range)
return coeffs;
}
+
+struct gamma_curve_sector {
+ u32 boundary_start;
+ u32 num_of_segments;
+ u32 segment_width;
+};
+
+struct gamma_curve_segment {
+ u32 start;
+ u32 end;
+};
+
+static struct gamma_curve_sector sector_tbl[] = {
+ { 0, 4, 4 },
+ { 16, 4, 4 },
+ { 32, 4, 8 },
+ { 64, 4, 16 },
+ { 128, 4, 32 },
+ { 256, 4, 64 },
+ { 512, 16, 32 },
+ { 1024, 24, 128 },
+};
+
+static void
+drm_lut_to_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs,
+ struct gamma_curve_sector *sector_tbl, u32 num_sectors)
+{
+ struct drm_color_lut *lut;
+ u32 i, j, in, num = 0;
+
+ if (!lut_blob)
+ return;
+
+ lut = lut_blob->data;
+
+ for (i = 0; i < num_sectors; i++) {
+ for (j = 0; j < sector_tbl[i].num_of_segments; j++) {
+ in = sector_tbl[i].boundary_start +
+ j * sector_tbl[i].segment_width;
+
+ coeffs[num++] = drm_color_lut_extract(lut[in].red,
+ KOMEDA_COLOR_PRECISION);
+ }
+ }
+
+ coeffs[num] = BIT(KOMEDA_COLOR_PRECISION);
+}
+
+void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs)
+{
+ drm_lut_to_coeffs(lut_blob, coeffs, sector_tbl, ARRAY_SIZE(sector_tbl));
+}
+
+void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs)
+{
+ struct drm_color_ctm *ctm;
+ u32 i;
+
+ if (!ctm_blob)
+ return;
+
+ ctm = ctm_blob->data;
+
+ for (i = 0; i < KOMEDA_N_CTM_COEFFS; i++)
+ coeffs[i] = drm_color_ctm_s31_32_to_qm_n(ctm->matrix[i], 3, 12);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
index a2df218f58e7..2f4668466112 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h
@@ -11,7 +11,15 @@
#include <drm/drm_color_mgmt.h>
#define KOMEDA_N_YUV2RGB_COEFFS 12
+#define KOMEDA_N_RGB2YUV_COEFFS 12
+#define KOMEDA_COLOR_PRECISION 12
+#define KOMEDA_N_GAMMA_COEFFS 65
+#define KOMEDA_COLOR_LUT_SIZE BIT(KOMEDA_COLOR_PRECISION)
+#define KOMEDA_N_CTM_COEFFS 9
+
+void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs);
+void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs);
const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range);
-#endif
+#endif /*_KOMEDA_COLOR_MGMT_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 252015210fbc..56bd938961ee 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -5,6 +5,7 @@
*
*/
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <drm/drm_atomic.h>
@@ -274,6 +275,7 @@ static void
komeda_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old)
{
+ pm_runtime_get_sync(crtc->dev->dev);
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
WARN_ON(drm_crtc_vblank_get(crtc));
@@ -372,6 +374,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_put(crtc);
drm_crtc_vblank_off(crtc);
komeda_crtc_unprepare(kcrtc);
+ pm_runtime_put(crtc->dev->dev);
}
static void
@@ -617,6 +620,8 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
crtc->port = kcrtc->master->of_output_port;
+ drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE);
+
return err;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 937a6d4c4865..1d767473ba8a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -10,6 +10,7 @@
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -27,12 +28,16 @@ static int komeda_register_show(struct seq_file *sf, void *x)
seq_puts(sf, "\n====== Komeda register dump =========\n");
+ pm_runtime_get_sync(mdev->dev);
+
if (mdev->funcs->dump_register)
mdev->funcs->dump_register(mdev, sf);
for (i = 0; i < mdev->n_pipelines; i++)
komeda_pipeline_dump_register(mdev->pipelines[i], sf);
+ pm_runtime_put(mdev->dev);
+
return 0;
}
@@ -58,6 +63,8 @@ static void komeda_debugfs_init(struct komeda_dev *mdev)
mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
debugfs_create_file("register", 0444, mdev->debugfs_root,
mdev, &komeda_register_fops);
+ debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root,
+ &mdev->err_verbosity);
}
#endif
@@ -113,22 +120,14 @@ static struct attribute_group komeda_sysfs_attr_group = {
.attrs = komeda_sysfs_entries,
};
-static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
+static int komeda_parse_pipe_dt(struct komeda_pipeline *pipe)
{
- struct komeda_pipeline *pipe;
+ struct device_node *np = pipe->of_node;
struct clk *clk;
- u32 pipe_id;
- int ret = 0;
-
- ret = of_property_read_u32(np, "reg", &pipe_id);
- if (ret != 0 || pipe_id >= mdev->n_pipelines)
- return -EINVAL;
-
- pipe = mdev->pipelines[pipe_id];
clk = of_clk_get_by_name(np, "pxclk");
if (IS_ERR(clk)) {
- DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe_id);
+ DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe->id);
return PTR_ERR(clk);
}
pipe->pxlclk = clk;
@@ -142,7 +141,6 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
pipe->dual_link = pipe->of_output_links[0] && pipe->of_output_links[1];
- pipe->of_node = of_node_get(np);
return 0;
}
@@ -151,7 +149,9 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
{
struct platform_device *pdev = to_platform_device(dev);
struct device_node *child, *np = dev->of_node;
- int ret;
+ struct komeda_pipeline *pipe;
+ u32 pipe_id = U32_MAX;
+ int ret = -1;
mdev->irq = platform_get_irq(pdev, 0);
if (mdev->irq < 0) {
@@ -166,37 +166,44 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
ret = 0;
for_each_available_child_of_node(np, child) {
- if (of_node_cmp(child->name, "pipeline") == 0) {
- ret = komeda_parse_pipe_dt(mdev, child);
- if (ret) {
- DRM_ERROR("parse pipeline dt error!\n");
- of_node_put(child);
- break;
+ if (of_node_name_eq(child, "pipeline")) {
+ of_property_read_u32(child, "reg", &pipe_id);
+ if (pipe_id >= mdev->n_pipelines) {
+ DRM_WARN("Skip the redundant DT node: pipeline-%u.\n",
+ pipe_id);
+ continue;
}
+ mdev->pipelines[pipe_id]->of_node = of_node_get(child);
}
}
- return ret;
+ for (pipe_id = 0; pipe_id < mdev->n_pipelines; pipe_id++) {
+ pipe = mdev->pipelines[pipe_id];
+
+ if (!pipe->of_node) {
+ DRM_ERROR("Pipeline-%d doesn't have a DT node.\n",
+ pipe->id);
+ return -EINVAL;
+ }
+ ret = komeda_parse_pipe_dt(pipe);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
struct komeda_dev *komeda_dev_create(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- const struct komeda_product_data *product;
+ komeda_identify_func komeda_identify;
struct komeda_dev *mdev;
- struct resource *io_res;
int err = 0;
- product = of_device_get_match_data(dev);
- if (!product)
+ komeda_identify = of_device_get_match_data(dev);
+ if (!komeda_identify)
return ERR_PTR(-ENODEV);
- io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io_res) {
- DRM_ERROR("No registers defined.\n");
- return ERR_PTR(-ENODEV);
- }
-
mdev = devm_kzalloc(dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return ERR_PTR(-ENOMEM);
@@ -204,7 +211,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
mutex_init(&mdev->lock);
mdev->dev = dev;
- mdev->reg_base = devm_ioremap_resource(dev, io_res);
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->reg_base)) {
DRM_ERROR("Map register space failed.\n");
err = PTR_ERR(mdev->reg_base);
@@ -222,11 +229,9 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
clk_prepare_enable(mdev->aclk);
- mdev->funcs = product->identify(mdev->reg_base, &mdev->chip);
- if (!komeda_product_match(mdev, product->product_id)) {
- DRM_ERROR("DT configured %x mismatch with real HW %x.\n",
- product->product_id,
- MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id));
+ mdev->funcs = komeda_identify(mdev->reg_base, &mdev->chip);
+ if (!mdev->funcs) {
+ DRM_ERROR("Failed to identify the HW.\n");
err = -ENODEV;
goto disable_clk;
}
@@ -263,15 +268,6 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
if (!mdev->iommu)
DRM_INFO("continue without IOMMU support!\n");
- if (mdev->iommu && mdev->funcs->connect_iommu) {
- err = mdev->funcs->connect_iommu(mdev);
- if (err) {
- DRM_ERROR("connect iommu failed.\n");
- mdev->iommu = NULL;
- goto disable_clk;
- }
- }
-
clk_disable_unprepare(mdev->aclk);
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
@@ -280,6 +276,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto err_cleanup;
}
+ mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS;
+
#ifdef CONFIG_DEBUG_FS
komeda_debugfs_init(mdev);
#endif
@@ -308,11 +306,6 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
if (mdev->aclk)
clk_prepare_enable(mdev->aclk);
- if (mdev->iommu && mdev->funcs->disconnect_iommu)
- if (mdev->funcs->disconnect_iommu(mdev))
- DRM_ERROR("disconnect iommu failed.\n");
- mdev->iommu = NULL;
-
for (i = 0; i < mdev->n_pipelines; i++) {
komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
mdev->pipelines[i] = NULL;
@@ -341,44 +334,26 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
int komeda_dev_resume(struct komeda_dev *mdev)
{
- int ret = 0;
-
clk_prepare_enable(mdev->aclk);
- if (mdev->iommu && mdev->funcs->connect_iommu) {
- ret = mdev->funcs->connect_iommu(mdev);
- if (ret < 0) {
- DRM_ERROR("connect iommu failed.\n");
- goto disable_clk;
- }
- }
-
- ret = mdev->funcs->enable_irq(mdev);
+ mdev->funcs->enable_irq(mdev);
-disable_clk:
- clk_disable_unprepare(mdev->aclk);
+ if (mdev->iommu && mdev->funcs->connect_iommu)
+ if (mdev->funcs->connect_iommu(mdev))
+ DRM_ERROR("connect iommu failed.\n");
- return ret;
+ return 0;
}
int komeda_dev_suspend(struct komeda_dev *mdev)
{
- int ret = 0;
-
- clk_prepare_enable(mdev->aclk);
-
- if (mdev->iommu && mdev->funcs->disconnect_iommu) {
- ret = mdev->funcs->disconnect_iommu(mdev);
- if (ret < 0) {
+ if (mdev->iommu && mdev->funcs->disconnect_iommu)
+ if (mdev->funcs->disconnect_iommu(mdev))
DRM_ERROR("disconnect iommu failed.\n");
- goto disable_clk;
- }
- }
- ret = mdev->funcs->disable_irq(mdev);
+ mdev->funcs->disable_irq(mdev);
-disable_clk:
clk_disable_unprepare(mdev->aclk);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 414200233b64..ce27f2f27c24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -20,6 +20,8 @@
#define KOMEDA_EVENT_OVR BIT_ULL(4)
#define KOMEDA_EVENT_EOW BIT_ULL(5)
#define KOMEDA_EVENT_MODE BIT_ULL(6)
+#define KOMEDA_EVENT_FULL BIT_ULL(7)
+#define KOMEDA_EVENT_EMPTY BIT_ULL(8)
#define KOMEDA_ERR_TETO BIT_ULL(14)
#define KOMEDA_ERR_TEMR BIT_ULL(15)
@@ -49,12 +51,15 @@
KOMEDA_ERR_ZME | KOMEDA_ERR_MERR | KOMEDA_ERR_TCF |\
KOMEDA_ERR_TTNG | KOMEDA_ERR_TTF)
-#define KOMEDA_WARN_EVENTS KOMEDA_ERR_CSCE
+#define KOMEDA_WARN_EVENTS \
+ (KOMEDA_ERR_CSCE | KOMEDA_EVENT_FULL | KOMEDA_EVENT_EMPTY)
-/* malidp device id */
-enum {
- MALI_D71 = 0,
-};
+#define KOMEDA_INFO_EVENTS (0 \
+ | KOMEDA_EVENT_VSYNC \
+ | KOMEDA_EVENT_FLIP \
+ | KOMEDA_EVENT_EOW \
+ | KOMEDA_EVENT_MODE \
+ )
/* pipeline DT ports */
enum {
@@ -69,12 +74,6 @@ struct komeda_chip_info {
u32 bus_width;
};
-struct komeda_product_data {
- u32 product_id;
- const struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
- struct komeda_chip_info *info);
-};
-
struct komeda_dev;
struct komeda_events {
@@ -202,6 +201,23 @@ struct komeda_dev {
/** @debugfs_root: root directory of komeda debugfs */
struct dentry *debugfs_root;
+ /**
+ * @err_verbosity: bitmask for how much extra info to print on error
+ *
+ * See KOMEDA_DEV_* macros for details. Low byte contains the debug
+ * level categories, the high byte contains extra debug options.
+ */
+ u16 err_verbosity;
+ /* Print a single line per error per frame with error events. */
+#define KOMEDA_DEV_PRINT_ERR_EVENTS BIT(0)
+ /* Print a single line per warning per frame with error events. */
+#define KOMEDA_DEV_PRINT_WARN_EVENTS BIT(1)
+ /* Print a single line per info event per frame with error events. */
+#define KOMEDA_DEV_PRINT_INFO_EVENTS BIT(2)
+ /* Dump DRM state on an error or warning event. */
+#define KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT BIT(8)
+ /* Disable rate limiting of event prints (normally one per commit) */
+#define KOMEDA_DEV_PRINT_DISABLE_RATELIMIT BIT(12)
};
static inline bool
@@ -210,6 +226,9 @@ komeda_product_match(struct komeda_dev *mdev, u32 target)
return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
}
+typedef const struct komeda_dev_funcs *
+(*komeda_identify_func)(u32 __iomem *reg, struct komeda_chip_info *chip);
+
const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
@@ -218,11 +237,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev);
struct komeda_dev *dev_to_mdev(struct device *dev);
-#ifdef CONFIG_DRM_KOMEDA_ERROR_PRINT
-void komeda_print_events(struct komeda_events *evts);
-#else
-static inline void komeda_print_events(struct komeda_events *evts) {}
-#endif
+void komeda_print_events(struct komeda_events *evts, struct drm_device *dev);
int komeda_dev_resume(struct komeda_dev *mdev);
int komeda_dev_suspend(struct komeda_dev *mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index d6c2222c5d33..ea5cd1e17304 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -33,6 +33,12 @@ static void komeda_unbind(struct device *dev)
return;
komeda_kms_detach(mdrv->kms);
+
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ else
+ komeda_dev_suspend(mdrv->mdev);
+
komeda_dev_destroy(mdrv->mdev);
dev_set_drvdata(dev, NULL);
@@ -54,6 +60,10 @@ static int komeda_bind(struct device *dev)
goto free_mdrv;
}
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev))
+ komeda_dev_resume(mdrv->mdev);
+
mdrv->kms = komeda_kms_attach(mdrv->mdev);
if (IS_ERR(mdrv->kms)) {
err = PTR_ERR(mdrv->kms);
@@ -65,6 +75,11 @@ static int komeda_bind(struct device *dev)
return 0;
destroy_mdev:
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ else
+ komeda_dev_suspend(mdrv->mdev);
+
komeda_dev_destroy(mdrv->mdev);
free_mdrv:
@@ -123,29 +138,37 @@ static int komeda_platform_remove(struct platform_device *pdev)
return 0;
}
-static const struct komeda_product_data komeda_products[] = {
- [MALI_D71] = {
- .product_id = MALIDP_D71_PRODUCT_ID,
- .identify = d71_identify,
- },
-};
-
static const struct of_device_id komeda_of_match[] = {
- { .compatible = "arm,mali-d71", .data = &komeda_products[MALI_D71], },
+ { .compatible = "arm,mali-d71", .data = d71_identify, },
+ { .compatible = "arm,mali-d32", .data = d71_identify, },
{},
};
MODULE_DEVICE_TABLE(of, komeda_of_match);
+static int komeda_rt_pm_suspend(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ return komeda_dev_suspend(mdrv->mdev);
+}
+
+static int komeda_rt_pm_resume(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ return komeda_dev_resume(mdrv->mdev);
+}
+
static int __maybe_unused komeda_pm_suspend(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
- struct drm_device *drm = &mdrv->kms->base;
int res;
- res = drm_mode_config_helper_suspend(drm);
+ res = drm_mode_config_helper_suspend(&mdrv->kms->base);
- komeda_dev_suspend(mdrv->mdev);
+ if (!pm_runtime_status_suspended(dev))
+ komeda_dev_suspend(mdrv->mdev);
return res;
}
@@ -153,15 +176,16 @@ static int __maybe_unused komeda_pm_suspend(struct device *dev)
static int __maybe_unused komeda_pm_resume(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
- struct drm_device *drm = &mdrv->kms->base;
- komeda_dev_resume(mdrv->mdev);
+ if (!pm_runtime_status_suspended(dev))
+ komeda_dev_resume(mdrv->mdev);
- return drm_mode_config_helper_resume(drm);
+ return drm_mode_config_helper_resume(&mdrv->kms->base);
}
static const struct dev_pm_ops komeda_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume)
+ SET_RUNTIME_PM_OPS(komeda_rt_pm_suspend, komeda_rt_pm_resume, NULL)
};
static struct platform_driver komeda_platform_driver = {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
index a36fb86cc054..53f944e66dfc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_event.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
@@ -4,6 +4,7 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_print.h>
#include "komeda_dev.h"
@@ -16,6 +17,7 @@ struct komeda_str {
/* return 0 on success, < 0 on no space.
*/
+__printf(2, 3)
static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...)
{
va_list args;
@@ -76,6 +78,8 @@ static void evt_str(struct komeda_str *str, u64 events)
/* LPU errors or events */
evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|");
+ evt_sprintf(str, events & KOMEDA_EVENT_EMPTY, "EMPTY|");
+ evt_sprintf(str, events & KOMEDA_EVENT_FULL, "FULL|");
evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|");
evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|");
evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|");
@@ -107,20 +111,31 @@ static bool is_new_frame(struct komeda_events *a)
(KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW);
}
-void komeda_print_events(struct komeda_events *evts)
+void komeda_print_events(struct komeda_events *evts, struct drm_device *dev)
{
- u64 print_evts = KOMEDA_ERR_EVENTS;
+ u64 print_evts = 0;
static bool en_print = true;
+ struct komeda_dev *mdev = dev->dev_private;
+ u16 const err_verbosity = mdev->err_verbosity;
+ u64 evts_mask = evts->global | evts->pipes[0] | evts->pipes[1];
/* reduce the same msg print, only print the first evt for one frame */
if (evts->global || is_new_frame(evts))
en_print = true;
- if (!en_print)
+ if (!(err_verbosity & KOMEDA_DEV_PRINT_DISABLE_RATELIMIT) && !en_print)
return;
- if ((evts->global | evts->pipes[0] | evts->pipes[1]) & print_evts) {
+ if (err_verbosity & KOMEDA_DEV_PRINT_ERR_EVENTS)
+ print_evts |= KOMEDA_ERR_EVENTS;
+ if (err_verbosity & KOMEDA_DEV_PRINT_WARN_EVENTS)
+ print_evts |= KOMEDA_WARN_EVENTS;
+ if (err_verbosity & KOMEDA_DEV_PRINT_INFO_EVENTS)
+ print_evts |= KOMEDA_INFO_EVENTS;
+
+ if (evts_mask & print_evts) {
char msg[256];
struct komeda_str str;
+ struct drm_printer p = drm_info_printer(dev->dev);
str.str = msg;
str.sz = sizeof(msg);
@@ -134,6 +149,9 @@ void komeda_print_events(struct komeda_events *evts)
evt_str(&str, evts->pipes[1]);
DRM_ERROR("err detect: %s\n", msg);
+ if ((err_verbosity & KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT) &&
+ (evts_mask & (KOMEDA_ERR_EVENTS | KOMEDA_WARN_EVENTS)))
+ drm_state_dump(dev, &p);
en_print = false;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 52648b4008bc..442d4656150a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -48,7 +48,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
memset(&evts, 0, sizeof(evts));
status = mdev->funcs->irq_handler(mdev, &evts);
- komeda_print_events(&evts);
+ komeda_print_events(&evts, drm);
/* Notify the crtc to handle the events */
for (i = 0; i < kms->n_crtcs; i++)
@@ -308,10 +308,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
if (err)
goto free_component_binding;
- err = mdev->funcs->enable_irq(mdev);
- if (err)
- goto free_component_binding;
-
drm->irq_enabled = true;
drm_kms_helper_poll_init(drm);
@@ -325,7 +321,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
free_interrupts:
drm_kms_helper_poll_fini(drm);
drm->irq_enabled = false;
- mdev->funcs->disable_irq(mdev);
free_component_binding:
component_unbind_all(mdev->dev, drm);
cleanup_mode_config:
@@ -347,7 +342,6 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm->irq_enabled = false;
- mdev->funcs->disable_irq(mdev);
component_unbind_all(mdev->dev, drm);
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index bd6ca7c87037..ac8725e24853 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -11,6 +11,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include "malidp_utils.h"
+#include "komeda_color_mgmt.h"
#define KOMEDA_MAX_PIPELINES 2
#define KOMEDA_PIPELINE_MAX_LAYERS 4
@@ -327,6 +328,8 @@ struct komeda_improc_state {
struct komeda_component_state base;
u8 color_format, color_depth;
u16 hsize, vsize;
+ u32 fgamma_coeffs[KOMEDA_N_GAMMA_COEFFS];
+ u32 ctm_coeffs[KOMEDA_N_CTM_COEFFS];
};
/* display timing controller */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 52750116aa19..8f32ae7c25d0 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -802,6 +802,12 @@ komeda_improc_validate(struct komeda_improc *improc,
st->color_format = BIT(__ffs(avail_formats));
}
+ if (kcrtc_st->base.color_mgmt_changed) {
+ drm_lut_to_fgamma_coeffs(kcrtc_st->base.gamma_lut,
+ st->fgamma_coeffs);
+ drm_ctm_to_coeffs(kcrtc_st->base.ctm, st->ctm_coeffs);
+ }
+
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &improc->base, 0);
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 3c70a53813bf..37715cc6064e 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -512,7 +512,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
int i, ret;
unsigned int block_w, block_h;
- if (!state->crtc || !state->fb)
+ if (!state->crtc || WARN_ON(!state->fb))
return 0;
fb = state->fb;
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 090cc0d699ae..ac8a78bfda03 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -16,7 +16,7 @@
#include "armada_fb.h"
#include "armada_gem.h"
-static /*const*/ struct fb_ops armada_fb_ops = {
+static const struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 93cf8b8bfcff..976685f2939e 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -461,16 +461,6 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
kfree(sgt);
}
-static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
-{
- return NULL;
-}
-
-static void
-armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
-{
-}
-
static int
armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
@@ -481,8 +471,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map = armada_gem_dmabuf_no_kmap,
- .unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
};
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 1f17794b0890..30aa73a5d9b7 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -33,7 +33,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
@@ -86,9 +85,42 @@ static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct drm_device *dev;
+ int ret;
+
ast_kick_out_firmware_fb(pdev);
- return drm_get_pci_dev(pdev, ent, &driver);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = ast_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_ast_driver_unload;
+
+ return 0;
+
+err_ast_driver_unload:
+ ast_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+
}
static void
@@ -96,17 +128,19 @@ ast_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
+ drm_dev_unregister(dev);
+ ast_driver_unload(dev);
+ drm_dev_put(dev);
}
-
-
static int ast_drm_freeze(struct drm_device *dev)
{
- drm_kms_helper_poll_disable(dev);
- pci_save_state(dev->pdev);
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true);
+ int error;
+ error = drm_mode_config_helper_suspend(dev);
+ if (error)
+ return error;
+ pci_save_state(dev->pdev);
return 0;
}
@@ -114,11 +148,7 @@ static int ast_drm_thaw(struct drm_device *dev)
{
ast_post_gpu(dev);
- drm_mode_config_reset(dev);
- drm_helper_resume_force_mode(dev);
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false);
-
- return 0;
+ return drm_mode_config_helper_resume(dev);
}
static int ast_drm_resume(struct drm_device *dev)
@@ -131,8 +161,6 @@ static int ast_drm_resume(struct drm_device *dev)
ret = ast_drm_thaw(dev);
if (ret)
return ret;
-
- drm_kms_helper_poll_enable(dev);
return 0;
}
@@ -150,6 +178,7 @@ static int ast_pm_suspend(struct device *dev)
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
+
static int ast_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -165,7 +194,6 @@ static int ast_pm_freeze(struct device *dev)
if (!ddev || !ddev->dev_private)
return -ENODEV;
return ast_drm_freeze(ddev);
-
}
static int ast_pm_thaw(struct device *dev)
@@ -203,10 +231,9 @@ static struct pci_driver ast_pci_driver = {
DEFINE_DRM_GEM_FOPS(ast_fops);
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
-
- .load = ast_driver_load,
- .unload = ast_driver_unload,
+ .driver_features = DRIVER_ATOMIC |
+ DRIVER_GEM |
+ DRIVER_MODESET,
.fops = &ast_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index ff161bd622f3..f5d8780776ae 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -121,6 +121,9 @@ struct ast_private {
unsigned int next_index;
} cursor;
+ struct drm_plane primary_plane;
+ struct drm_plane cursor_plane;
+
bool support_wide_screen;
enum {
ast_use_p2a,
@@ -137,8 +140,6 @@ struct ast_private {
int ast_driver_load(struct drm_device *dev, unsigned long flags);
void ast_driver_unload(struct drm_device *dev);
-struct ast_gem_object;
-
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
#define AST_IO_VGA_ENABLE_PORT (0x43)
@@ -280,6 +281,17 @@ struct ast_vbios_mode_info {
const struct ast_vbios_enhtable *enh_table;
};
+struct ast_crtc_state {
+ struct drm_crtc_state base;
+
+ /* Last known format of primary plane */
+ const struct drm_format_info *format;
+
+ struct ast_vbios_mode_info vbios_mode_info;
+};
+
+#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
+
extern int ast_mode_init(struct drm_device *dev);
extern void ast_mode_fini(struct drm_device *dev);
@@ -289,10 +301,6 @@ extern void ast_mode_fini(struct drm_device *dev);
int ast_mm_init(struct ast_private *ast);
void ast_mm_fini(struct ast_private *ast);
-int ast_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 21715d6a9b56..b79f484e9bd2 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
@@ -387,8 +388,33 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
+enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGBA8888 */
+
+ struct ast_private *ast = dev->dev_private;
+ unsigned long fbsize, fbpages, max_fbpages;
+
+ /* To support double buffering, a framebuffer may not
+ * consume more than half of the available VRAM.
+ */
+ max_fbpages = (ast->vram_size / 2) >> PAGE_SHIFT;
+
+ fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+ if (fbpages > max_fbpages)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
static const struct drm_mode_config_funcs ast_mode_funcs = {
- .fb_create = drm_gem_fb_create
+ .fb_create = drm_gem_fb_create,
+ .mode_valid = ast_mode_config_mode_valid,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
};
static u32 ast_get_vram_info(struct drm_device *dev)
@@ -506,6 +532,8 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_free;
+ drm_mode_config_reset(dev);
+
ret = drm_fbdev_generic_setup(dev, 32);
if (ret)
goto out_free;
@@ -535,27 +563,3 @@ void ast_driver_unload(struct drm_device *dev)
pci_iounmap(dev->pdev, ast->regs);
kfree(ast);
}
-
-int ast_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
- *obj = &gbo->bo.base;
- return 0;
-}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index b13eaa2619ab..34608f0499eb 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -31,6 +31,9 @@
#include <linux/export.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
@@ -43,11 +46,14 @@
static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
-static int ast_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height);
+static int ast_cursor_update(void *dst, void *src, unsigned int width,
+ unsigned int height);
+static void ast_cursor_set_base(struct ast_private *ast, u64 address);
static int ast_cursor_move(struct drm_crtc *crtc,
int x, int y);
@@ -65,9 +71,8 @@ static inline void ast_load_palette_index(struct ast_private *ast,
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
-static void ast_crtc_load_lut(struct drm_crtc *crtc)
+static void ast_crtc_load_lut(struct ast_private *ast, struct drm_crtc *crtc)
{
- struct ast_private *ast = crtc->dev->dev_private;
u16 *r, *g, *b;
int i;
@@ -82,36 +87,32 @@ static void ast_crtc_load_lut(struct drm_crtc *crtc)
ast_load_palette_index(ast, i, *r++ >> 8, *g++ >> 8, *b++ >> 8);
}
-static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = crtc->dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
- u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+ u32 refresh_rate_index = 0, refresh_rate;
const struct ast_vbios_enhtable *best = NULL;
u32 hborder, vborder;
bool check_sync;
- switch (fb->format->cpp[0] * 8) {
+ switch (format->cpp[0] * 8) {
case 8:
vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
- color_index = VGAModeIndex - 1;
break;
case 16:
vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
- color_index = HiCModeIndex;
break;
case 24:
case 32:
vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
- color_index = TrueCModeIndex;
break;
default:
return false;
}
- switch (crtc->mode.crtc_hdisplay) {
+ switch (mode->crtc_hdisplay) {
case 640:
vbios_mode->enh_table = &res_640x480[refresh_rate_index];
break;
@@ -122,7 +123,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
break;
case 1280:
- if (crtc->mode.crtc_vdisplay == 800)
+ if (mode->crtc_vdisplay == 800)
vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
else
vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
@@ -134,7 +135,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
break;
case 1600:
- if (crtc->mode.crtc_vdisplay == 900)
+ if (mode->crtc_vdisplay == 900)
vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
else
vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
@@ -143,7 +144,7 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
break;
case 1920:
- if (crtc->mode.crtc_vdisplay == 1080)
+ if (mode->crtc_vdisplay == 1080)
vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
else
vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
@@ -154,7 +155,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
refresh_rate = drm_mode_vrefresh(mode);
check_sync = vbios_mode->enh_table->flags & WideScreenMode;
- do {
+
+ while (1) {
const struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
while (loop->refresh_rate != 0xff) {
@@ -178,7 +180,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
if (best || !check_sync)
break;
check_sync = 0;
- } while (1);
+ }
+
if (best)
vbios_mode->enh_table = best;
@@ -203,38 +206,67 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
vbios_mode->enh_table->vfp +
vbios_mode->enh_table->vsync);
- refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
- mode_id = vbios_mode->enh_table->mode_id;
+ return true;
+}
- if (ast->chip == AST1180) {
- /* TODO 1180 */
- } else {
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
-
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
- if (vbios_mode->enh_table->flags & NewModeInfo) {
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92,
- fb->format->cpp[0] * 8);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
-
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
- }
+static void ast_set_vbios_color_reg(struct ast_private *ast,
+ const struct drm_format_info *format,
+ const struct ast_vbios_mode_info *vbios_mode)
+{
+ u32 color_index;
+
+ switch (format->cpp[0]) {
+ case 1:
+ color_index = VGAModeIndex - 1;
+ break;
+ case 2:
+ color_index = HiCModeIndex;
+ break;
+ case 3:
+ case 4:
+ color_index = TrueCModeIndex;
+ default:
+ return;
}
- return true;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0x0f) << 4));
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
+
+ if (vbios_mode->enh_table->flags & NewModeInfo) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, format->cpp[0] * 8);
+ }
+}
+
+static void ast_set_vbios_mode_reg(struct ast_private *ast,
+ const struct drm_display_mode *adjusted_mode,
+ const struct ast_vbios_mode_info *vbios_mode)
+{
+ u32 refresh_rate_index, mode_id;
+
+ refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+ mode_id = vbios_mode->enh_table->mode_id;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
+
+ if (vbios_mode->enh_table->flags & NewModeInfo) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ }
}
-static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+
+static void ast_set_std_reg(struct ast_private *ast,
+ struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = crtc->dev->dev_private;
const struct ast_vbios_stdtable *stdtable;
u32 i;
u8 jreg;
@@ -244,18 +276,21 @@ static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
jreg = stdtable->misc;
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
- /* Set SEQ */
+ /* Set SEQ; except Screen Disable field */
ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
- for (i = 0; i < 4; i++) {
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, stdtable->seq[0]);
+ for (i = 1; i < 4; i++) {
jreg = stdtable->seq[i];
- if (!i)
- jreg |= 0x20;
ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
}
- /* Set CRTC */
+ /* Set CRTC; except base address and offset */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
- for (i = 0; i < 25; i++)
+ for (i = 0; i < 12; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+ for (i = 14; i < 19; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+ for (i = 20; i < 25; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
/* set AR */
@@ -276,10 +311,10 @@ static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
}
-static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static void ast_set_crtc_reg(struct ast_private *ast,
+ struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = crtc->dev->dev_private;
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
u16 temp, precache = 0;
@@ -385,11 +420,9 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
}
-static void ast_set_offset_reg(struct drm_crtc *crtc)
+static void ast_set_offset_reg(struct ast_private *ast,
+ struct drm_framebuffer *fb)
{
- struct ast_private *ast = crtc->dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
-
u16 offset;
offset = fb->pitches[0] >> 3;
@@ -397,10 +430,10 @@ static void ast_set_offset_reg(struct drm_crtc *crtc)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
-static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+static void ast_set_dclk_reg(struct ast_private *ast,
+ struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = dev->dev_private;
const struct ast_vbios_dclk_info *clk_info;
if (ast->chip == AST2500)
@@ -415,14 +448,12 @@ static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mo
((clk_info->param3 & 0x3) << 4));
}
-static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+static void ast_set_color_reg(struct ast_private *ast,
+ const struct drm_format_info *format)
{
- struct ast_private *ast = crtc->dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
- switch (fb->format->cpp[0] * 8) {
+ switch (format->cpp[0] * 8) {
case 8:
jregA0 = 0x70;
jregA3 = 0x01;
@@ -444,7 +475,10 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+}
+static void ast_set_crtthd_reg(struct ast_private *ast)
+{
/* Set Threshold */
if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
@@ -462,10 +496,10 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
}
}
-static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+static void ast_set_sync_reg(struct ast_private *ast,
+ struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = dev->dev_private;
u8 jreg;
jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
@@ -475,23 +509,9 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
-static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
-{
- const struct drm_framebuffer *fb = crtc->primary->fb;
-
- switch (fb->format->cpp[0] * 8) {
- case 8:
- break;
- default:
- return false;
- }
- return true;
-}
-
-static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+static void ast_set_start_address_crt1(struct ast_private *ast,
+ unsigned offset)
{
- struct ast_private *ast = crtc->dev->dev_private;
u32 addr;
addr = offset >> 2;
@@ -501,6 +521,247 @@ static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
}
+/*
+ * Primary plane
+ */
+
+static const uint32_t ast_primary_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_C8,
+};
+
+static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct ast_crtc_state *ast_crtc_state;
+ int ret;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
+
+ if (!state->visible)
+ return 0;
+
+ ast_crtc_state = to_ast_crtc_state(crtc_state);
+
+ ast_crtc_state->format = state->fb->format;
+
+ return 0;
+}
+
+void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct ast_private *ast = plane->dev->dev_private;
+ struct drm_plane_state *state = plane->state;
+ struct drm_gem_vram_object *gbo;
+ s64 gpu_addr;
+
+ gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (WARN_ON_ONCE(gpu_addr < 0))
+ return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
+
+ ast_set_offset_reg(ast, state->fb);
+ ast_set_start_address_crt1(ast, (u32)gpu_addr);
+
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
+}
+
+static void
+ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct ast_private *ast = plane->dev->dev_private;
+
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+}
+
+static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
+ .atomic_check = ast_primary_plane_helper_atomic_check,
+ .atomic_update = ast_primary_plane_helper_atomic_update,
+ .atomic_disable = ast_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * Cursor plane
+ */
+
+static const uint32_t ast_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static int
+ast_cursor_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_gem_vram_object *gbo;
+ struct ast_private *ast;
+ int ret;
+ void *src, *dst;
+
+ if (!crtc || !fb)
+ return 0;
+
+ if (WARN_ON_ONCE(fb->width > AST_MAX_HWC_WIDTH) ||
+ WARN_ON_ONCE(fb->height > AST_MAX_HWC_HEIGHT))
+ return -EINVAL; /* BUG: didn't test in atomic_check() */
+
+ ast = crtc->dev->dev_private;
+
+ gbo = drm_gem_vram_of_gem(fb->obj[0]);
+ src = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto err_drm_gem_vram_unpin;
+ }
+
+ dst = drm_gem_vram_vmap(ast->cursor.gbo[ast->cursor.next_index]);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto err_drm_gem_vram_vunmap_src;
+ }
+
+ ret = ast_cursor_update(dst, src, fb->width, fb->height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap_dst;
+
+ /* Always unmap buffers here. Destination buffers are
+ * perma-pinned while the driver is active. We're only
+ * changing ref-counters here.
+ */
+ drm_gem_vram_vunmap(ast->cursor.gbo[ast->cursor.next_index], dst);
+ drm_gem_vram_vunmap(gbo, src);
+
+ return 0;
+
+err_drm_gem_vram_vunmap_dst:
+ drm_gem_vram_vunmap(ast->cursor.gbo[ast->cursor.next_index], dst);
+err_drm_gem_vram_vunmap_src:
+ drm_gem_vram_vunmap(gbo, src);
+err_drm_gem_vram_unpin:
+ drm_gem_vram_unpin(gbo);
+ return ret;
+}
+
+static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (!state->visible)
+ return 0;
+
+ if (fb->width > AST_MAX_HWC_WIDTH || fb->height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct ast_private *ast = plane->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_vram_object *gbo;
+ s64 off;
+ u8 jreg;
+
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - fb->width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - fb->height;
+
+ if (state->fb != old_state->fb) {
+ /* A new cursor image was installed. */
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ off = drm_gem_vram_offset(gbo);
+ if (WARN_ON_ONCE(off < 0))
+ return; /* Bug: we didn't pin cursor HW BO to VRAM. */
+ ast_cursor_set_base(ast, off);
+
+ ++ast->cursor.next_index;
+ ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
+ }
+
+ ast_cursor_move(crtc, state->crtc_x, state->crtc_y);
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+static void
+ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct ast_private *ast = plane->dev->dev_private;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
+ .prepare_fb = ast_cursor_plane_helper_prepare_fb,
+ .cleanup_fb = NULL, /* not required for cursor plane */
+ .atomic_check = ast_cursor_plane_helper_atomic_check,
+ .atomic_update = ast_cursor_plane_helper_atomic_update,
+ .atomic_disable = ast_cursor_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_cursor_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * CRTC
+ */
+
static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct ast_private *ast = crtc->dev->dev_private;
@@ -508,179 +769,196 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ast->chip == AST1180)
return;
+ /* TODO: Maybe control display signal generation with
+ * Sync Enable (bit CR17.7).
+ */
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 1);
- ast_crtc_load_lut(crtc);
+ ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_OFF:
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 0);
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
break;
}
}
-static int ast_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
+static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- struct drm_gem_vram_object *gbo;
- int ret;
- s64 gpu_addr;
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc_state *ast_state;
+ const struct drm_format_info *format;
+ bool succ;
- if (!atomic && fb) {
- gbo = drm_gem_vram_of_gem(fb->obj[0]);
- drm_gem_vram_unpin(gbo);
+ if (ast->chip == AST1180) {
+ DRM_ERROR("AST 1180 modesetting not supported\n");
+ return -EINVAL;
}
- gbo = drm_gem_vram_of_gem(crtc->primary->fb->obj[0]);
+ ast_state = to_ast_crtc_state(state);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- return ret;
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- ret = (int)gpu_addr;
- goto err_drm_gem_vram_unpin;
- }
+ format = ast_state->format;
+ if (!format)
+ return 0;
- ast_set_offset_reg(crtc);
- ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+ succ = ast_get_vbios_mode_info(format, &state->mode,
+ &state->adjusted_mode,
+ &ast_state->vbios_mode_info);
+ if (!succ)
+ return -EINVAL;
return 0;
-
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
- return ret;
}
-static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
- return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ ast_open_key(ast);
}
-static int ast_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_vbios_mode_info vbios_mode;
- bool ret;
- if (ast->chip == AST1180) {
- DRM_ERROR("AST 1180 modesetting not supported\n");
- return -EINVAL;
- }
+ struct ast_private *ast = dev->dev_private;
+ struct ast_crtc_state *ast_state;
+ const struct drm_format_info *format;
+ struct ast_vbios_mode_info *vbios_mode_info;
+ struct drm_display_mode *adjusted_mode;
- ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
- if (ret == false)
- return -EINVAL;
- ast_open_key(ast);
+ crtc->state->no_vblank = true;
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+ ast_state = to_ast_crtc_state(crtc->state);
- ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
- ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
- ast_set_offset_reg(crtc);
- ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
- ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
- ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
- ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+ format = ast_state->format;
+ if (!format)
+ return;
- ast_crtc_mode_set_base(crtc, x, y, old_fb);
+ vbios_mode_info = &ast_state->vbios_mode_info;
- return 0;
-}
+ ast_set_color_reg(ast, format);
+ ast_set_vbios_color_reg(ast, format, vbios_mode_info);
-static void ast_crtc_disable(struct drm_crtc *crtc)
-{
- DRM_DEBUG_KMS("\n");
- ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(fb->obj[0]);
+ if (!crtc->state->mode_changed)
+ return;
- drm_gem_vram_unpin(gbo);
- }
- crtc->primary->fb = NULL;
+ adjusted_mode = &crtc->state->adjusted_mode;
+
+ ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+ ast_set_std_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_crtthd_reg(ast);
+ ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
}
-static void ast_crtc_prepare(struct drm_crtc *crtc)
+static void
+ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
-
+ ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
-static void ast_crtc_commit(struct drm_crtc *crtc)
+static void
+ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
- struct ast_private *ast = crtc->dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
- ast_crtc_load_lut(crtc);
+ ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
-
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
- .dpms = ast_crtc_dpms,
- .mode_set = ast_crtc_mode_set,
- .mode_set_base = ast_crtc_mode_set_base,
- .disable = ast_crtc_disable,
- .prepare = ast_crtc_prepare,
- .commit = ast_crtc_commit,
-
+ .atomic_check = ast_crtc_helper_atomic_check,
+ .atomic_begin = ast_crtc_helper_atomic_begin,
+ .atomic_flush = ast_crtc_helper_atomic_flush,
+ .atomic_enable = ast_crtc_helper_atomic_enable,
+ .atomic_disable = ast_crtc_helper_atomic_disable,
};
-static void ast_crtc_reset(struct drm_crtc *crtc)
+static void ast_crtc_destroy(struct drm_crtc *crtc)
{
-
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
}
-static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
+static struct drm_crtc_state *
+ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
- ast_crtc_load_lut(crtc);
+ struct ast_crtc_state *new_ast_state, *ast_state;
- return 0;
-}
+ if (WARN_ON(!crtc->state))
+ return NULL;
+ new_ast_state = kmalloc(sizeof(*new_ast_state), GFP_KERNEL);
+ if (!new_ast_state)
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ast_state->base);
-static void ast_crtc_destroy(struct drm_crtc *crtc)
+ ast_state = to_ast_crtc_state(crtc->state);
+
+ new_ast_state->format = ast_state->format;
+ memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info,
+ sizeof(new_ast_state->vbios_mode_info));
+
+ return &new_ast_state->base;
+}
+
+static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- drm_crtc_cleanup(crtc);
- kfree(crtc);
+ struct ast_crtc_state *ast_state = to_ast_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&ast_state->base);
+ kfree(ast_state);
}
static const struct drm_crtc_funcs ast_crtc_funcs = {
- .cursor_set = ast_cursor_set,
- .cursor_move = ast_cursor_move,
- .reset = ast_crtc_reset,
+ .reset = drm_atomic_helper_crtc_reset,
.set_config = drm_crtc_helper_set_config,
- .gamma_set = ast_crtc_gamma_set,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = ast_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = ast_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = ast_crtc_atomic_destroy_state,
};
static int ast_crtc_init(struct drm_device *dev)
{
+ struct ast_private *ast = dev->dev_private;
struct ast_crtc *crtc;
+ int ret;
crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
if (!crtc)
return -ENOMEM;
- drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+ ret = drm_crtc_init_with_planes(dev, &crtc->base, &ast->primary_plane,
+ &ast->cursor_plane, &ast_crtc_funcs,
+ NULL);
+ if (ret)
+ goto err_kfree;
+
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
return 0;
+
+err_kfree:
+ kfree(crtc);
+ return ret;
}
+/*
+ * Encoder
+ */
+
static void ast_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -691,35 +969,6 @@ static const struct drm_encoder_funcs ast_enc_funcs = {
.destroy = ast_encoder_destroy,
};
-static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static void ast_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void ast_encoder_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void ast_encoder_commit(struct drm_encoder *encoder)
-{
-
-}
-
-
-static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
- .dpms = ast_encoder_dpms,
- .prepare = ast_encoder_prepare,
- .commit = ast_encoder_commit,
- .mode_set = ast_encoder_mode_set,
-};
-
static int ast_encoder_init(struct drm_device *dev)
{
struct ast_encoder *ast_encoder;
@@ -730,12 +979,15 @@ static int ast_encoder_init(struct drm_device *dev)
drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
ast_encoder->base.possible_crtcs = 1;
return 0;
}
+/*
+ * Connector
+ */
+
static int ast_get_modes(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -834,14 +1086,16 @@ static void ast_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
- .mode_valid = ast_mode_valid,
.get_modes = ast_get_modes,
+ .mode_valid = ast_mode_valid,
};
static const struct drm_connector_funcs ast_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = ast_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int ast_connector_init(struct drm_device *dev)
@@ -890,8 +1144,7 @@ static int ast_cursor_init(struct drm_device *dev)
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
- gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- size, 0, false);
+ gbo = drm_gem_vram_create(dev, size, 0);
if (IS_ERR(gbo)) {
ret = PTR_ERR(gbo);
goto err_drm_gem_vram_put;
@@ -934,10 +1187,39 @@ static void ast_cursor_fini(struct drm_device *dev)
int ast_mode_init(struct drm_device *dev)
{
+ struct ast_private *ast = dev->dev_private;
+ int ret;
+
+ memset(&ast->primary_plane, 0, sizeof(ast->primary_plane));
+ ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01,
+ &ast_primary_plane_funcs,
+ ast_primary_plane_formats,
+ ARRAY_SIZE(ast_primary_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ DRM_ERROR("ast: drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(&ast->primary_plane,
+ &ast_primary_plane_helper_funcs);
+
+ ret = drm_universal_plane_init(dev, &ast->cursor_plane, 0x01,
+ &ast_cursor_plane_funcs,
+ ast_cursor_plane_formats,
+ ARRAY_SIZE(ast_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+ if (ret) {
+ DRM_ERROR("drm_universal_plane_failed(): %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(&ast->cursor_plane,
+ &ast_cursor_plane_helper_funcs);
+
ast_cursor_init(dev);
ast_crtc_init(dev);
ast_encoder_init(dev);
ast_connector_init(dev);
+
return 0;
}
@@ -1153,106 +1435,6 @@ static void ast_cursor_set_base(struct ast_private *ast, u64 address)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
-static int ast_show_cursor(struct drm_crtc *crtc, void *src,
- unsigned int width, unsigned int height)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- struct drm_gem_vram_object *gbo;
- void *dst;
- s64 off;
- int ret;
- u8 jreg;
-
- gbo = ast->cursor.gbo[ast->cursor.next_index];
- dst = drm_gem_vram_vmap(gbo);
- if (IS_ERR(dst))
- return PTR_ERR(dst);
- off = drm_gem_vram_offset(gbo);
- if (off < 0) {
- ret = (int)off;
- goto err_drm_gem_vram_vunmap;
- }
-
- ret = ast_cursor_update(dst, src, width, height);
- if (ret)
- goto err_drm_gem_vram_vunmap;
- ast_cursor_set_base(ast, off);
-
- ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
- ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
-
- jreg = 0x2;
- /* enable ARGB cursor */
- jreg |= 1;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
-
- ++ast->cursor.next_index;
- ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
-
- drm_gem_vram_vunmap(gbo, dst);
-
- return 0;
-
-err_drm_gem_vram_vunmap:
- drm_gem_vram_vunmap(gbo, dst);
- return ret;
-}
-
-static void ast_hide_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
-
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
-}
-
-static int ast_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height)
-{
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo;
- u8 *src;
- int ret;
-
- if (!handle) {
- ast_hide_cursor(crtc);
- return 0;
- }
-
- if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
- return -EINVAL;
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
- return -ENOENT;
- }
- gbo = drm_gem_vram_of_gem(obj);
- src = drm_gem_vram_vmap(gbo);
- if (IS_ERR(src)) {
- ret = PTR_ERR(src);
- goto err_drm_gem_object_put_unlocked;
- }
-
- ret = ast_show_cursor(crtc, src, width, height);
- if (ret)
- goto err_drm_gem_vram_vunmap;
-
- drm_gem_vram_vunmap(gbo, src);
- drm_gem_object_put_unlocked(obj);
-
- return 0;
-
-err_drm_gem_vram_vunmap:
- drm_gem_vram_vunmap(gbo, src);
-err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(obj);
- return ret;
-}
-
static int ast_cursor_move(struct drm_crtc *crtc,
int x, int y)
{
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f2e73e6d46b8..10985134ce0b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -73,7 +73,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
unsigned long prate;
unsigned int mask = ATMEL_HLCDC_CLKDIV_MASK | ATMEL_HLCDC_CLKPOL;
unsigned int cfg = 0;
- int div;
+ int div, ret;
+
+ ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
+ if (ret)
+ return;
vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
@@ -95,14 +99,14 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
(adj->crtc_hdisplay - 1) |
((adj->crtc_vdisplay - 1) << 16));
+ prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
+ mode_rate = adj->crtc_clock * 1000;
if (!crtc->dc->desc->fixed_clksrc) {
+ prate *= 2;
cfg |= ATMEL_HLCDC_CLKSEL;
mask |= ATMEL_HLCDC_CLKSEL;
}
- prate = 2 * clk_get_rate(crtc->dc->hlcdc->sys_clk);
- mode_rate = adj->crtc_clock * 1000;
-
div = DIV_ROUND_UP(prate, mode_rate);
if (div < 2) {
div = 2;
@@ -117,8 +121,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
int div_low = prate / mode_rate;
if (div_low >= 2 &&
- ((prate / div_low - mode_rate) <
- 10 * (mode_rate - prate / div)))
+ (10 * (prate / div_low - mode_rate) <
+ (mode_rate - prate / div)))
/*
* At least 10 times better when using a higher
* frequency than requested, instead of a lower.
@@ -147,6 +151,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK,
cfg);
+
+ clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 92640298ad41..112aa5066cee 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -557,12 +557,6 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
-}
-
struct atmel_hlcdc_dc_commit {
struct work_struct work;
struct drm_device *dev;
@@ -657,7 +651,7 @@ error:
}
static const struct drm_mode_config_funcs mode_config_funcs = {
- .fb_create = atmel_hlcdc_fb_create,
+ .fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = atmel_hlcdc_dc_atomic_commit,
};
@@ -727,18 +721,10 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
dc->hlcdc = dev_get_drvdata(dev->dev->parent);
dev->dev_private = dc;
- if (dc->desc->fixed_clksrc) {
- ret = clk_prepare_enable(dc->hlcdc->sys_clk);
- if (ret) {
- dev_err(dev->dev, "failed to enable sys_clk\n");
- goto err_destroy_wq;
- }
- }
-
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
dev_err(dev->dev, "failed to enable periph_clk\n");
- goto err_sys_clk_disable;
+ goto err_destroy_wq;
}
pm_runtime_enable(dev->dev);
@@ -774,9 +760,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
err_periph_clk_disable:
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
-err_sys_clk_disable:
- if (dc->desc->fixed_clksrc)
- clk_disable_unprepare(dc->hlcdc->sys_clk);
err_destroy_wq:
destroy_workqueue(dc->wq);
@@ -801,8 +784,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
- if (dc->desc->fixed_clksrc)
- clk_disable_unprepare(dc->hlcdc->sys_clk);
destroy_workqueue(dc->wq);
}
@@ -916,8 +897,6 @@ static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr);
regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr);
clk_disable_unprepare(dc->hlcdc->periph_clk);
- if (dc->desc->fixed_clksrc)
- clk_disable_unprepare(dc->hlcdc->sys_clk);
return 0;
}
@@ -927,8 +906,6 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
- if (dc->desc->fixed_clksrc)
- clk_prepare_enable(dc->hlcdc->sys_clk);
clk_prepare_enable(dc->hlcdc->periph_clk);
regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 034f202dfe8f..40800ec5700a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -604,7 +604,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
int ret;
int i;
- if (!state->base.crtc || !fb)
+ if (!state->base.crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index e567bdfa2ab8..b615b7dfdd9d 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -255,7 +255,7 @@ void bochs_hw_setformat(struct bochs_device *bochs,
DRM_ERROR("%s: Huh? Got framebuffer format 0x%x",
__func__, format->format);
break;
- };
+ }
}
void bochs_hw_setbase(struct bochs_device *bochs,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 34362976cd6f..0b9ca5862455 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,16 +16,6 @@ config DRM_PANEL_BRIDGE
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
-config DRM_ANALOGIX_ANX78XX
- tristate "Analogix ANX78XX bridge"
- select DRM_KMS_HELPER
- select REGMAP_I2C
- ---help---
- ANX78XX is an ultra-low power Full-HD SlimPort transmitter
- designed for portable devices. The ANX78XX transforms
- the HDMI output of an application processor to MyDP
- or DisplayPort.
-
config DRM_CDNS_DSI
tristate "Cadence DPI/DSI bridge"
select DRM_KMS_HELPER
@@ -45,14 +35,14 @@ config DRM_DUMB_VGA_DAC
Support for non-programmable RGB to VGA DAC bridges, such as ADI
ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
-config DRM_LVDS_ENCODER
- tristate "Transparent parallel to LVDS encoder support"
+config DRM_LVDS_CODEC
+ tristate "Transparent LVDS encoders and decoders support"
depends on OF
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
- Support for transparent parallel to LVDS encoders that don't require
- any configuration.
+ Support for transparent LVDS encoders and decoders that don't
+ require any configuration.
config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
tristate "MegaChips stdp4028-ge-b850v3-fw and stdp2690-ge-b850v3-fw"
@@ -60,10 +50,10 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
select DRM_KMS_HELPER
select DRM_PANEL
---help---
- This is a driver for the display bridges of
- GE B850v3 that convert dual channel LVDS
- to DP++. This is used with the i.MX6 imx-ldb
- driver. You are likely to say N here.
+ This is a driver for the display bridges of
+ GE B850v3 that convert dual channel LVDS
+ to DP++. This is used with the i.MX6 imx-ldb
+ driver. You are likely to say N here.
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 4934fcf5a6f8..cd16ce830270 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
-obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
+obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
@@ -12,8 +11,9 @@ obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
-obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+
+obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
deleted file mode 100644
index 55d6c2109740..000000000000
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.h
+++ /dev/null
@@ -1,703 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
- */
-
-#ifndef __ANX78xx_H
-#define __ANX78xx_H
-
-/***************************************************************/
-/* Register definitions for RX_PO */
-/***************************************************************/
-
-/*
- * System Control and Status
- */
-
-/* Software Reset Register 1 */
-#define SP_SOFTWARE_RESET1_REG 0x11
-#define SP_VIDEO_RST BIT(4)
-#define SP_HDCP_MAN_RST BIT(2)
-#define SP_TMDS_RST BIT(1)
-#define SP_SW_MAN_RST BIT(0)
-
-/* System Status Register */
-#define SP_SYSTEM_STATUS_REG 0x14
-#define SP_TMDS_CLOCK_DET BIT(1)
-#define SP_TMDS_DE_DET BIT(0)
-
-/* HDMI Status Register */
-#define SP_HDMI_STATUS_REG 0x15
-#define SP_HDMI_AUD_LAYOUT BIT(3)
-#define SP_HDMI_DET BIT(0)
-# define SP_DVI_MODE 0
-# define SP_HDMI_MODE 1
-
-/* HDMI Mute Control Register */
-#define SP_HDMI_MUTE_CTRL_REG 0x16
-#define SP_AUD_MUTE BIT(1)
-#define SP_VID_MUTE BIT(0)
-
-/* System Power Down Register 1 */
-#define SP_SYSTEM_POWER_DOWN1_REG 0x18
-#define SP_PWDN_CTRL BIT(0)
-
-/*
- * Audio and Video Auto Control
- */
-
-/* Auto Audio and Video Control register */
-#define SP_AUDVID_CTRL_REG 0x20
-#define SP_AVC_OE BIT(7)
-#define SP_AAC_OE BIT(6)
-#define SP_AVC_EN BIT(1)
-#define SP_AAC_EN BIT(0)
-
-/* Audio Exception Enable Registers */
-#define SP_AUD_EXCEPTION_ENABLE_BASE (0x24 - 1)
-/* Bits for Audio Exception Enable Register 3 */
-#define SP_AEC_EN21 BIT(5)
-
-/*
- * Interrupt
- */
-
-/* Interrupt Status Register 1 */
-#define SP_INT_STATUS1_REG 0x31
-/* Bits for Interrupt Status Register 1 */
-#define SP_HDMI_DVI BIT(7)
-#define SP_CKDT_CHG BIT(6)
-#define SP_SCDT_CHG BIT(5)
-#define SP_PCLK_CHG BIT(4)
-#define SP_PLL_UNLOCK BIT(3)
-#define SP_CABLE_PLUG_CHG BIT(2)
-#define SP_SET_MUTE BIT(1)
-#define SP_SW_INTR BIT(0)
-/* Bits for Interrupt Status Register 2 */
-#define SP_HDCP_ERR BIT(5)
-#define SP_AUDIO_SAMPLE_CHG BIT(0) /* undocumented */
-/* Bits for Interrupt Status Register 3 */
-#define SP_AUD_MODE_CHG BIT(0)
-/* Bits for Interrupt Status Register 5 */
-#define SP_AUDIO_RCV BIT(0)
-/* Bits for Interrupt Status Register 6 */
-#define SP_INT_STATUS6_REG 0x36
-#define SP_CTS_RCV BIT(7)
-#define SP_NEW_AUD_PKT BIT(4)
-#define SP_NEW_AVI_PKT BIT(1)
-#define SP_NEW_CP_PKT BIT(0)
-/* Bits for Interrupt Status Register 7 */
-#define SP_NO_VSI BIT(7)
-#define SP_NEW_VS BIT(4)
-
-/* Interrupt Mask 1 Status Registers */
-#define SP_INT_MASK1_REG 0x41
-
-/* HDMI US TIMER Control Register */
-#define SP_HDMI_US_TIMER_CTRL_REG 0x49
-#define SP_MS_TIMER_MARGIN_10_8_MASK 0x07
-
-/*
- * TMDS Control
- */
-
-/* TMDS Control Registers */
-#define SP_TMDS_CTRL_BASE (0x50 - 1)
-/* Bits for TMDS Control Register 7 */
-#define SP_PD_RT BIT(0)
-
-/*
- * Video Control
- */
-
-/* Video Status Register */
-#define SP_VIDEO_STATUS_REG 0x70
-#define SP_COLOR_DEPTH_MASK 0xf0
-#define SP_COLOR_DEPTH_SHIFT 4
-# define SP_COLOR_DEPTH_MODE_LEGACY 0x00
-# define SP_COLOR_DEPTH_MODE_24BIT 0x04
-# define SP_COLOR_DEPTH_MODE_30BIT 0x05
-# define SP_COLOR_DEPTH_MODE_36BIT 0x06
-# define SP_COLOR_DEPTH_MODE_48BIT 0x07
-
-/* Video Data Range Control Register */
-#define SP_VID_DATA_RANGE_CTRL_REG 0x83
-#define SP_R2Y_INPUT_LIMIT BIT(1)
-
-/* Pixel Clock High Resolution Counter Registers */
-#define SP_PCLK_HIGHRES_CNT_BASE (0x8c - 1)
-
-/*
- * Audio Control
- */
-
-/* Number of Audio Channels Status Registers */
-#define SP_AUD_CH_STATUS_REG_NUM 6
-
-/* Audio IN S/PDIF Channel Status Registers */
-#define SP_AUD_SPDIF_CH_STATUS_BASE 0xc7
-
-/* Audio IN S/PDIF Channel Status Register 4 */
-#define SP_FS_FREQ_MASK 0x0f
-# define SP_FS_FREQ_44100HZ 0x00
-# define SP_FS_FREQ_48000HZ 0x02
-# define SP_FS_FREQ_32000HZ 0x03
-# define SP_FS_FREQ_88200HZ 0x08
-# define SP_FS_FREQ_96000HZ 0x0a
-# define SP_FS_FREQ_176400HZ 0x0c
-# define SP_FS_FREQ_192000HZ 0x0e
-
-/*
- * Micellaneous Control Block
- */
-
-/* CHIP Control Register */
-#define SP_CHIP_CTRL_REG 0xe3
-#define SP_MAN_HDMI5V_DET BIT(3)
-#define SP_PLLLOCK_CKDT_EN BIT(2)
-#define SP_ANALOG_CKDT_EN BIT(1)
-#define SP_DIGITAL_CKDT_EN BIT(0)
-
-/* Packet Receiving Status Register */
-#define SP_PACKET_RECEIVING_STATUS_REG 0xf3
-#define SP_AVI_RCVD BIT(5)
-#define SP_VSI_RCVD BIT(1)
-
-/***************************************************************/
-/* Register definitions for RX_P1 */
-/***************************************************************/
-
-/* HDCP BCAPS Shadow Register */
-#define SP_HDCP_BCAPS_SHADOW_REG 0x2a
-#define SP_BCAPS_REPEATER BIT(5)
-
-/* HDCP Status Register */
-#define SP_RX_HDCP_STATUS_REG 0x3f
-#define SP_AUTH_EN BIT(4)
-
-/*
- * InfoFrame and Control Packet Registers
- */
-
-/* AVI InfoFrame packet checksum */
-#define SP_AVI_INFOFRAME_CHECKSUM 0xa3
-
-/* AVI InfoFrame Registers */
-#define SP_AVI_INFOFRAME_DATA_BASE 0xa4
-
-#define SP_AVI_COLOR_F_MASK 0x60
-#define SP_AVI_COLOR_F_SHIFT 5
-
-/* Audio InfoFrame Registers */
-#define SP_AUD_INFOFRAME_DATA_BASE 0xc4
-#define SP_AUD_INFOFRAME_LAYOUT_MASK 0x0f
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet type code */
-#define SP_MPEG_VS_INFOFRAME_TYPE_REG 0xe0
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet length */
-#define SP_MPEG_VS_INFOFRAME_LEN_REG 0xe2
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet version number */
-#define SP_MPEG_VS_INFOFRAME_VER_REG 0xe1
-
-/* MPEG/HDMI Vendor Specific InfoFrame Packet content */
-#define SP_MPEG_VS_INFOFRAME_DATA_BASE 0xe4
-
-/* General Control Packet Register */
-#define SP_GENERAL_CTRL_PACKET_REG 0x9f
-#define SP_CLEAR_AVMUTE BIT(4)
-#define SP_SET_AVMUTE BIT(0)
-
-/***************************************************************/
-/* Register definitions for TX_P0 */
-/***************************************************************/
-
-/* HDCP Status Register */
-#define SP_TX_HDCP_STATUS_REG 0x00
-#define SP_AUTH_FAIL BIT(5)
-#define SP_AUTHEN_PASS BIT(1)
-
-/* HDCP Control Register 0 */
-#define SP_HDCP_CTRL0_REG 0x01
-#define SP_RX_REPEATER BIT(6)
-#define SP_RE_AUTH BIT(5)
-#define SP_SW_AUTH_OK BIT(4)
-#define SP_HARD_AUTH_EN BIT(3)
-#define SP_HDCP_ENC_EN BIT(2)
-#define SP_BKSV_SRM_PASS BIT(1)
-#define SP_KSVLIST_VLD BIT(0)
-/* HDCP Function Enabled */
-#define SP_HDCP_FUNCTION_ENABLED (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-
-/* HDCP Receiver BSTATUS Register 0 */
-#define SP_HDCP_RX_BSTATUS0_REG 0x1b
-/* HDCP Receiver BSTATUS Register 1 */
-#define SP_HDCP_RX_BSTATUS1_REG 0x1c
-
-/* HDCP Embedded "Blue Screen" Content Registers */
-#define SP_HDCP_VID0_BLUE_SCREEN_REG 0x2c
-#define SP_HDCP_VID1_BLUE_SCREEN_REG 0x2d
-#define SP_HDCP_VID2_BLUE_SCREEN_REG 0x2e
-
-/* HDCP Wait R0 Timing Register */
-#define SP_HDCP_WAIT_R0_TIME_REG 0x40
-
-/* HDCP Link Integrity Check Timer Register */
-#define SP_HDCP_LINK_CHECK_TIMER_REG 0x41
-
-/* HDCP Repeater Ready Wait Timer Register */
-#define SP_HDCP_RPTR_RDY_WAIT_TIME_REG 0x42
-
-/* HDCP Auto Timer Register */
-#define SP_HDCP_AUTO_TIMER_REG 0x51
-
-/* HDCP Key Status Register */
-#define SP_HDCP_KEY_STATUS_REG 0x5e
-
-/* HDCP Key Command Register */
-#define SP_HDCP_KEY_COMMAND_REG 0x5f
-#define SP_DISABLE_SYNC_HDCP BIT(2)
-
-/* OTP Memory Key Protection Registers */
-#define SP_OTP_KEY_PROTECT1_REG 0x60
-#define SP_OTP_KEY_PROTECT2_REG 0x61
-#define SP_OTP_KEY_PROTECT3_REG 0x62
-#define SP_OTP_PSW1 0xa2
-#define SP_OTP_PSW2 0x7e
-#define SP_OTP_PSW3 0xc6
-
-/* DP System Control Registers */
-#define SP_DP_SYSTEM_CTRL_BASE (0x80 - 1)
-/* Bits for DP System Control Register 2 */
-#define SP_CHA_STA BIT(2)
-/* Bits for DP System Control Register 3 */
-#define SP_HPD_STATUS BIT(6)
-#define SP_STRM_VALID BIT(2)
-/* Bits for DP System Control Register 4 */
-#define SP_ENHANCED_MODE BIT(3)
-
-/* DP Video Control Register */
-#define SP_DP_VIDEO_CTRL_REG 0x84
-#define SP_COLOR_F_MASK 0x06
-#define SP_COLOR_F_SHIFT 1
-#define SP_BPC_MASK 0xe0
-#define SP_BPC_SHIFT 5
-# define SP_BPC_6BITS 0x00
-# define SP_BPC_8BITS 0x01
-# define SP_BPC_10BITS 0x02
-# define SP_BPC_12BITS 0x03
-
-/* DP Audio Control Register */
-#define SP_DP_AUDIO_CTRL_REG 0x87
-#define SP_AUD_EN BIT(0)
-
-/* 10us Pulse Generate Timer Registers */
-#define SP_I2C_GEN_10US_TIMER0_REG 0x88
-#define SP_I2C_GEN_10US_TIMER1_REG 0x89
-
-/* Packet Send Control Register */
-#define SP_PACKET_SEND_CTRL_REG 0x90
-#define SP_AUD_IF_UP BIT(7)
-#define SP_AVI_IF_UD BIT(6)
-#define SP_MPEG_IF_UD BIT(5)
-#define SP_SPD_IF_UD BIT(4)
-#define SP_AUD_IF_EN BIT(3)
-#define SP_AVI_IF_EN BIT(2)
-#define SP_MPEG_IF_EN BIT(1)
-#define SP_SPD_IF_EN BIT(0)
-
-/* DP HDCP Control Register */
-#define SP_DP_HDCP_CTRL_REG 0x92
-#define SP_AUTO_EN BIT(7)
-#define SP_AUTO_START BIT(5)
-#define SP_LINK_POLLING BIT(1)
-
-/* DP Main Link Bandwidth Setting Register */
-#define SP_DP_MAIN_LINK_BW_SET_REG 0xa0
-#define SP_LINK_BW_SET_MASK 0x1f
-#define SP_INITIAL_SLIM_M_AUD_SEL BIT(5)
-
-/* DP Training Pattern Set Register */
-#define SP_DP_TRAINING_PATTERN_SET_REG 0xa2
-
-/* DP Lane 0 Link Training Control Register */
-#define SP_DP_LANE0_LT_CTRL_REG 0xa3
-#define SP_TX_SW_SET_MASK 0x1b
-#define SP_MAX_PRE_REACH BIT(5)
-#define SP_MAX_DRIVE_REACH BIT(4)
-#define SP_PRE_EMP_LEVEL1 BIT(3)
-#define SP_DRVIE_CURRENT_LEVEL1 BIT(0)
-
-/* DP Link Training Control Register */
-#define SP_DP_LT_CTRL_REG 0xa8
-#define SP_LT_ERROR_TYPE_MASK 0x70
-# define SP_LT_NO_ERROR 0x00
-# define SP_LT_AUX_WRITE_ERROR 0x01
-# define SP_LT_MAX_DRIVE_REACHED 0x02
-# define SP_LT_WRONG_LANE_COUNT_SET 0x03
-# define SP_LT_LOOP_SAME_5_TIME 0x04
-# define SP_LT_CR_FAIL_IN_EQ 0x05
-# define SP_LT_EQ_LOOP_5_TIME 0x06
-#define SP_LT_EN BIT(0)
-
-/* DP CEP Training Control Registers */
-#define SP_DP_CEP_TRAINING_CTRL0_REG 0xa9
-#define SP_DP_CEP_TRAINING_CTRL1_REG 0xaa
-
-/* DP Debug Register 1 */
-#define SP_DP_DEBUG1_REG 0xb0
-#define SP_DEBUG_PLL_LOCK BIT(4)
-#define SP_POLLING_EN BIT(1)
-
-/* DP Polling Control Register */
-#define SP_DP_POLLING_CTRL_REG 0xb4
-#define SP_AUTO_POLLING_DISABLE BIT(0)
-
-/* DP Link Debug Control Register */
-#define SP_DP_LINK_DEBUG_CTRL_REG 0xb8
-#define SP_M_VID_DEBUG BIT(5)
-#define SP_NEW_PRBS7 BIT(4)
-#define SP_INSERT_ER BIT(1)
-#define SP_PRBS31_EN BIT(0)
-
-/* AUX Misc control Register */
-#define SP_AUX_MISC_CTRL_REG 0xbf
-
-/* DP PLL control Register */
-#define SP_DP_PLL_CTRL_REG 0xc7
-#define SP_PLL_RST BIT(6)
-
-/* DP Analog Power Down Register */
-#define SP_DP_ANALOG_POWER_DOWN_REG 0xc8
-#define SP_CH0_PD BIT(0)
-
-/* DP Misc Control Register */
-#define SP_DP_MISC_CTRL_REG 0xcd
-#define SP_EQ_TRAINING_LOOP BIT(6)
-
-/* DP Extra I2C Device Address Register */
-#define SP_DP_EXTRA_I2C_DEV_ADDR_REG 0xce
-#define SP_I2C_STRETCH_DISABLE BIT(7)
-
-#define SP_I2C_EXTRA_ADDR 0x50
-
-/* DP Downspread Control Register 1 */
-#define SP_DP_DOWNSPREAD_CTRL1_REG 0xd0
-
-/* DP M Value Calculation Control Register */
-#define SP_DP_M_CALCULATION_CTRL_REG 0xd9
-#define SP_M_GEN_CLK_SEL BIT(0)
-
-/* AUX Channel Access Status Register */
-#define SP_AUX_CH_STATUS_REG 0xe0
-#define SP_AUX_STATUS 0x0f
-
-/* AUX Channel DEFER Control Register */
-#define SP_AUX_DEFER_CTRL_REG 0xe2
-#define SP_DEFER_CTRL_EN BIT(7)
-
-/* DP Buffer Data Count Register */
-#define SP_BUF_DATA_COUNT_REG 0xe4
-#define SP_BUF_DATA_COUNT_MASK 0x1f
-#define SP_BUF_CLR BIT(7)
-
-/* DP AUX Channel Control Register 1 */
-#define SP_DP_AUX_CH_CTRL1_REG 0xe5
-#define SP_AUX_TX_COMM_MASK 0x0f
-#define SP_AUX_LENGTH_MASK 0xf0
-#define SP_AUX_LENGTH_SHIFT 4
-
-/* DP AUX CH Address Register 0 */
-#define SP_AUX_ADDR_7_0_REG 0xe6
-
-/* DP AUX CH Address Register 1 */
-#define SP_AUX_ADDR_15_8_REG 0xe7
-
-/* DP AUX CH Address Register 2 */
-#define SP_AUX_ADDR_19_16_REG 0xe8
-#define SP_AUX_ADDR_19_16_MASK 0x0f
-
-/* DP AUX Channel Control Register 2 */
-#define SP_DP_AUX_CH_CTRL2_REG 0xe9
-#define SP_AUX_SEL_RXCM BIT(6)
-#define SP_AUX_CHSEL BIT(3)
-#define SP_AUX_PN_INV BIT(2)
-#define SP_ADDR_ONLY BIT(1)
-#define SP_AUX_EN BIT(0)
-
-/* DP Video Stream Control InfoFrame Register */
-#define SP_DP_3D_VSC_CTRL_REG 0xea
-#define SP_INFO_FRAME_VSC_EN BIT(0)
-
-/* DP Video Stream Data Byte 1 Register */
-#define SP_DP_VSC_DB1_REG 0xeb
-
-/* DP AUX Channel Control Register 3 */
-#define SP_DP_AUX_CH_CTRL3_REG 0xec
-#define SP_WAIT_COUNTER_7_0_MASK 0xff
-
-/* DP AUX Channel Control Register 4 */
-#define SP_DP_AUX_CH_CTRL4_REG 0xed
-
-/* DP AUX Buffer Data Registers */
-#define SP_DP_BUF_DATA0_REG 0xf0
-
-/***************************************************************/
-/* Register definitions for TX_P2 */
-/***************************************************************/
-
-/*
- * Core Register Definitions
- */
-
-/* Device ID Low Byte Register */
-#define SP_DEVICE_IDL_REG 0x02
-
-/* Device ID High Byte Register */
-#define SP_DEVICE_IDH_REG 0x03
-
-/* Device version register */
-#define SP_DEVICE_VERSION_REG 0x04
-
-/* Power Down Control Register */
-#define SP_POWERDOWN_CTRL_REG 0x05
-#define SP_REGISTER_PD BIT(7)
-#define SP_HDCP_PD BIT(5)
-#define SP_AUDIO_PD BIT(4)
-#define SP_VIDEO_PD BIT(3)
-#define SP_LINK_PD BIT(2)
-#define SP_TOTAL_PD BIT(1)
-
-/* Reset Control Register 1 */
-#define SP_RESET_CTRL1_REG 0x06
-#define SP_MISC_RST BIT(7)
-#define SP_VIDCAP_RST BIT(6)
-#define SP_VIDFIF_RST BIT(5)
-#define SP_AUDFIF_RST BIT(4)
-#define SP_AUDCAP_RST BIT(3)
-#define SP_HDCP_RST BIT(2)
-#define SP_SW_RST BIT(1)
-#define SP_HW_RST BIT(0)
-
-/* Reset Control Register 2 */
-#define SP_RESET_CTRL2_REG 0x07
-#define SP_AUX_RST BIT(2)
-#define SP_SERDES_FIFO_RST BIT(1)
-#define SP_I2C_REG_RST BIT(0)
-
-/* Video Control Register 1 */
-#define SP_VID_CTRL1_REG 0x08
-#define SP_VIDEO_EN BIT(7)
-#define SP_VIDEO_MUTE BIT(2)
-#define SP_DE_GEN BIT(1)
-#define SP_DEMUX BIT(0)
-
-/* Video Control Register 2 */
-#define SP_VID_CTRL2_REG 0x09
-#define SP_IN_COLOR_F_MASK 0x03
-#define SP_IN_YC_BIT_SEL BIT(2)
-#define SP_IN_BPC_MASK 0x70
-#define SP_IN_BPC_SHIFT 4
-# define SP_IN_BPC_12BIT 0x03
-# define SP_IN_BPC_10BIT 0x02
-# define SP_IN_BPC_8BIT 0x01
-# define SP_IN_BPC_6BIT 0x00
-#define SP_IN_D_RANGE BIT(7)
-
-/* Video Control Register 3 */
-#define SP_VID_CTRL3_REG 0x0a
-#define SP_HPD_OUT BIT(6)
-
-/* Video Control Register 5 */
-#define SP_VID_CTRL5_REG 0x0c
-#define SP_CSC_STD_SEL BIT(7)
-#define SP_XVYCC_RNG_LMT BIT(6)
-#define SP_RANGE_Y2R BIT(5)
-#define SP_CSPACE_Y2R BIT(4)
-#define SP_RGB_RNG_LMT BIT(3)
-#define SP_Y_RNG_LMT BIT(2)
-#define SP_RANGE_R2Y BIT(1)
-#define SP_CSPACE_R2Y BIT(0)
-
-/* Video Control Register 6 */
-#define SP_VID_CTRL6_REG 0x0d
-#define SP_TEST_PATTERN_EN BIT(7)
-#define SP_VIDEO_PROCESS_EN BIT(6)
-#define SP_VID_US_MODE BIT(3)
-#define SP_VID_DS_MODE BIT(2)
-#define SP_UP_SAMPLE BIT(1)
-#define SP_DOWN_SAMPLE BIT(0)
-
-/* Video Control Register 8 */
-#define SP_VID_CTRL8_REG 0x0f
-#define SP_VID_VRES_TH BIT(0)
-
-/* Total Line Status Low Byte Register */
-#define SP_TOTAL_LINE_STAL_REG 0x24
-
-/* Total Line Status High Byte Register */
-#define SP_TOTAL_LINE_STAH_REG 0x25
-
-/* Active Line Status Low Byte Register */
-#define SP_ACT_LINE_STAL_REG 0x26
-
-/* Active Line Status High Byte Register */
-#define SP_ACT_LINE_STAH_REG 0x27
-
-/* Vertical Front Porch Status Register */
-#define SP_V_F_PORCH_STA_REG 0x28
-
-/* Vertical SYNC Width Status Register */
-#define SP_V_SYNC_STA_REG 0x29
-
-/* Vertical Back Porch Status Register */
-#define SP_V_B_PORCH_STA_REG 0x2a
-
-/* Total Pixel Status Low Byte Register */
-#define SP_TOTAL_PIXEL_STAL_REG 0x2b
-
-/* Total Pixel Status High Byte Register */
-#define SP_TOTAL_PIXEL_STAH_REG 0x2c
-
-/* Active Pixel Status Low Byte Register */
-#define SP_ACT_PIXEL_STAL_REG 0x2d
-
-/* Active Pixel Status High Byte Register */
-#define SP_ACT_PIXEL_STAH_REG 0x2e
-
-/* Horizontal Front Porch Status Low Byte Register */
-#define SP_H_F_PORCH_STAL_REG 0x2f
-
-/* Horizontal Front Porch Statys High Byte Register */
-#define SP_H_F_PORCH_STAH_REG 0x30
-
-/* Horizontal SYNC Width Status Low Byte Register */
-#define SP_H_SYNC_STAL_REG 0x31
-
-/* Horizontal SYNC Width Status High Byte Register */
-#define SP_H_SYNC_STAH_REG 0x32
-
-/* Horizontal Back Porch Status Low Byte Register */
-#define SP_H_B_PORCH_STAL_REG 0x33
-
-/* Horizontal Back Porch Status High Byte Register */
-#define SP_H_B_PORCH_STAH_REG 0x34
-
-/* InfoFrame AVI Packet DB1 Register */
-#define SP_INFOFRAME_AVI_DB1_REG 0x70
-
-/* Bit Control Specific Register */
-#define SP_BIT_CTRL_SPECIFIC_REG 0x80
-#define SP_BIT_CTRL_SELECT_SHIFT 1
-#define SP_ENABLE_BIT_CTRL BIT(0)
-
-/* InfoFrame Audio Packet DB1 Register */
-#define SP_INFOFRAME_AUD_DB1_REG 0x83
-
-/* InfoFrame MPEG Packet DB1 Register */
-#define SP_INFOFRAME_MPEG_DB1_REG 0xb0
-
-/* Audio Channel Status Registers */
-#define SP_AUD_CH_STATUS_BASE 0xd0
-
-/* Audio Channel Num Register 5 */
-#define SP_I2S_CHANNEL_NUM_MASK 0xe0
-# define SP_I2S_CH_NUM_1 (0x00 << 5)
-# define SP_I2S_CH_NUM_2 (0x01 << 5)
-# define SP_I2S_CH_NUM_3 (0x02 << 5)
-# define SP_I2S_CH_NUM_4 (0x03 << 5)
-# define SP_I2S_CH_NUM_5 (0x04 << 5)
-# define SP_I2S_CH_NUM_6 (0x05 << 5)
-# define SP_I2S_CH_NUM_7 (0x06 << 5)
-# define SP_I2S_CH_NUM_8 (0x07 << 5)
-#define SP_EXT_VUCP BIT(2)
-#define SP_VBIT BIT(1)
-#define SP_AUDIO_LAYOUT BIT(0)
-
-/* Analog Debug Register 2 */
-#define SP_ANALOG_DEBUG2_REG 0xdd
-#define SP_FORCE_SW_OFF_BYPASS 0x20
-#define SP_XTAL_FRQ 0x1c
-# define SP_XTAL_FRQ_19M2 (0x00 << 2)
-# define SP_XTAL_FRQ_24M (0x01 << 2)
-# define SP_XTAL_FRQ_25M (0x02 << 2)
-# define SP_XTAL_FRQ_26M (0x03 << 2)
-# define SP_XTAL_FRQ_27M (0x04 << 2)
-# define SP_XTAL_FRQ_38M4 (0x05 << 2)
-# define SP_XTAL_FRQ_52M (0x06 << 2)
-#define SP_POWERON_TIME_1P5MS 0x03
-
-/* Analog Control 0 Register */
-#define SP_ANALOG_CTRL0_REG 0xe1
-
-/* Common Interrupt Status Register 1 */
-#define SP_COMMON_INT_STATUS_BASE (0xf1 - 1)
-#define SP_PLL_LOCK_CHG 0x40
-
-/* Common Interrupt Status Register 2 */
-#define SP_COMMON_INT_STATUS2 0xf2
-#define SP_HDCP_AUTH_CHG BIT(1)
-#define SP_HDCP_AUTH_DONE BIT(0)
-
-#define SP_HDCP_LINK_CHECK_FAIL BIT(0)
-
-/* Common Interrupt Status Register 4 */
-#define SP_COMMON_INT_STATUS4_REG 0xf4
-#define SP_HPD_IRQ BIT(6)
-#define SP_HPD_ESYNC_ERR BIT(4)
-#define SP_HPD_CHG BIT(2)
-#define SP_HPD_LOST BIT(1)
-#define SP_HPD_PLUG BIT(0)
-
-/* DP Interrupt Status Register */
-#define SP_DP_INT_STATUS1_REG 0xf7
-#define SP_TRAINING_FINISH BIT(5)
-#define SP_POLLING_ERR BIT(4)
-
-/* Common Interrupt Mask Register */
-#define SP_COMMON_INT_MASK_BASE (0xf8 - 1)
-
-#define SP_COMMON_INT_MASK4_REG 0xfb
-
-/* DP Interrupts Mask Register */
-#define SP_DP_INT_MASK1_REG 0xfe
-
-/* Interrupt Control Register */
-#define SP_INT_CTRL_REG 0xff
-
-/***************************************************************/
-/* Register definitions for TX_P1 */
-/***************************************************************/
-
-/* DP TX Link Training Control Register */
-#define SP_DP_TX_LT_CTRL0_REG 0x30
-
-/* PD 1.2 Lint Training 80bit Pattern Register */
-#define SP_DP_LT_80BIT_PATTERN0_REG 0x80
-#define SP_DP_LT_80BIT_PATTERN_REG_NUM 10
-
-/* Audio Interface Control Register 0 */
-#define SP_AUD_INTERFACE_CTRL0_REG 0x5f
-#define SP_AUD_INTERFACE_DISABLE 0x80
-
-/* Audio Interface Control Register 2 */
-#define SP_AUD_INTERFACE_CTRL2_REG 0x60
-#define SP_M_AUD_ADJUST_ST 0x04
-
-/* Audio Interface Control Register 3 */
-#define SP_AUD_INTERFACE_CTRL3_REG 0x62
-
-/* Audio Interface Control Register 4 */
-#define SP_AUD_INTERFACE_CTRL4_REG 0x67
-
-/* Audio Interface Control Register 5 */
-#define SP_AUD_INTERFACE_CTRL5_REG 0x68
-
-/* Audio Interface Control Register 6 */
-#define SP_AUD_INTERFACE_CTRL6_REG 0x69
-
-/* Firmware Version Register */
-#define SP_FW_VER_REG 0xb7
-
-#endif
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index e930ff9b5cd4..e1fa7d820373 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -1,4 +1,27 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_ANALOGIX_ANX6345
+ tristate "Analogix ANX6345 bridge"
+ depends on OF
+ select DRM_ANALOGIX_DP
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ ANX6345 is an ultra-low Full-HD DisplayPort/eDP
+ transmitter designed for portable devices. The
+ ANX6345 transforms the LVTTL RGB output of an
+ application processor to eDP or DisplayPort.
+
+config DRM_ANALOGIX_ANX78XX
+ tristate "Analogix ANX78XX bridge"
+ select DRM_ANALOGIX_DP
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ ANX78XX is an ultra-low power Full-HD SlimPort transmitter
+ designed for portable devices. The ANX78XX transforms
+ the HDMI output of an application processor to MyDP
+ or DisplayPort.
+
config DRM_ANALOGIX_DP
tristate
depends on DRM
diff --git a/drivers/gpu/drm/bridge/analogix/Makefile b/drivers/gpu/drm/bridge/analogix/Makefile
index fdbf3fd2f087..97669b374098 100644
--- a/drivers/gpu/drm/bridge/analogix/Makefile
+++ b/drivers/gpu/drm/bridge/analogix/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o
+analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o analogix-i2c-dptx.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX6345) += analogix-anx6345.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
new file mode 100644
index 000000000000..56f55c53abfd
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -0,0 +1,817 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ * Copyright(c) 2017, Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "analogix-i2c-dptx.h"
+#include "analogix-i2c-txcommon.h"
+
+#define POLL_DELAY 50000 /* us */
+#define POLL_TIMEOUT 5000000 /* us */
+
+#define I2C_IDX_DPTX 0
+#define I2C_IDX_TXCOM 1
+
+static const u8 anx6345_i2c_addresses[] = {
+ [I2C_IDX_DPTX] = 0x70,
+ [I2C_IDX_TXCOM] = 0x72,
+};
+#define I2C_NUM_ADDRESSES ARRAY_SIZE(anx6345_i2c_addresses)
+
+struct anx6345 {
+ struct drm_dp_aux aux;
+ struct drm_bridge bridge;
+ struct i2c_client *client;
+ struct edid *edid;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+ struct regulator *dvdd12;
+ struct regulator *dvdd25;
+ struct gpio_desc *gpiod_reset;
+ struct mutex lock; /* protect EDID access */
+
+ /* I2C Slave addresses of ANX6345 are mapped as DPTX and SYS */
+ struct i2c_client *i2c_clients[I2C_NUM_ADDRESSES];
+ struct regmap *map[I2C_NUM_ADDRESSES];
+
+ u16 chipid;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ bool powered;
+};
+
+static inline struct anx6345 *connector_to_anx6345(struct drm_connector *c)
+{
+ return container_of(c, struct anx6345, connector);
+}
+
+static inline struct anx6345 *bridge_to_anx6345(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct anx6345, bridge);
+}
+
+static int anx6345_set_bits(struct regmap *map, u8 reg, u8 mask)
+{
+ return regmap_update_bits(map, reg, mask, mask);
+}
+
+static int anx6345_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+ return regmap_update_bits(map, reg, mask, 0);
+}
+
+static ssize_t anx6345_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct anx6345 *anx6345 = container_of(aux, struct anx6345, aux);
+
+ return anx_dp_aux_transfer(anx6345->map[I2C_IDX_DPTX], msg);
+}
+
+static int anx6345_dp_link_training(struct anx6345 *anx6345)
+{
+ unsigned int value;
+ u8 dp_bw, dpcd[2];
+ int err;
+
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_POWERDOWN_CTRL_REG,
+ SP_TOTAL_PD);
+ if (err)
+ return err;
+
+ err = drm_dp_dpcd_readb(&anx6345->aux, DP_MAX_LINK_RATE, &dp_bw);
+ if (err < 0)
+ return err;
+
+ switch (dp_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw);
+ return -EINVAL;
+ }
+
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+ SP_VIDEO_MUTE);
+ if (err)
+ return err;
+
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_VID_CTRL1_REG, SP_VIDEO_EN);
+ if (err)
+ return err;
+
+ /* Get DPCD info */
+ err = drm_dp_dpcd_read(&anx6345->aux, DP_DPCD_REV,
+ &anx6345->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DPCD: %d\n", err);
+ return err;
+ }
+
+ /* Clear channel x SERDES power down */
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+ if (err)
+ return err;
+
+ /*
+ * Power up the sink (DP_SET_POWER register is only available on DPCD
+ * v1.1 and later).
+ */
+ if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
+ err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+ err);
+ return err;
+ }
+
+ dpcd[0] &= ~DP_SET_POWER_MASK;
+ dpcd[0] |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+ }
+
+ /* Possibly enable downspread on the sink */
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_DOWNSPREAD_CTRL1_REG, 0);
+ if (err)
+ return err;
+
+ if (anx6345->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) {
+ DRM_DEBUG("Enable downspread on the sink\n");
+ /* 4000PPM */
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_DOWNSPREAD_CTRL1_REG, 8);
+ if (err)
+ return err;
+
+ err = drm_dp_dpcd_writeb(&anx6345->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ if (err < 0)
+ return err;
+ } else {
+ err = drm_dp_dpcd_writeb(&anx6345->aux, DP_DOWNSPREAD_CTRL, 0);
+ if (err < 0)
+ return err;
+ }
+
+ /* Set the lane count and the link rate on the sink */
+ if (drm_dp_enhanced_frame_cap(anx6345->dpcd))
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_SYSTEM_CTRL_BASE + 4,
+ SP_ENHANCED_MODE);
+ else
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_SYSTEM_CTRL_BASE + 4,
+ SP_ENHANCED_MODE);
+ if (err)
+ return err;
+
+ dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
+ dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+ if (err)
+ return err;
+
+ dpcd[1] = drm_dp_max_lane_count(anx6345->dpcd);
+
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_LANE_COUNT_SET_REG, dpcd[1]);
+ if (err)
+ return err;
+
+ if (drm_dp_enhanced_frame_cap(anx6345->dpcd))
+ dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(&anx6345->aux, DP_LINK_BW_SET, dpcd,
+ sizeof(dpcd));
+
+ if (err < 0) {
+ DRM_ERROR("Failed to configure link: %d\n", err);
+ return err;
+ }
+
+ /* Start training on the source */
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_LT_CTRL_REG,
+ SP_LT_EN);
+ if (err)
+ return err;
+
+ return regmap_read_poll_timeout(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_LT_CTRL_REG,
+ value, !(value & SP_DP_LT_INPROGRESS),
+ POLL_DELAY, POLL_TIMEOUT);
+}
+
+static int anx6345_tx_initialization(struct anx6345 *anx6345)
+{
+ int err, i;
+
+ /* FIXME: colordepth is hardcoded for now */
+ err = regmap_write(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL2_REG,
+ SP_IN_BPC_6BIT << SP_IN_BPC_SHIFT);
+ if (err)
+ return err;
+
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_PLL_CTRL_REG, 0);
+ if (err)
+ return err;
+
+ err = regmap_write(anx6345->map[I2C_IDX_TXCOM],
+ SP_ANALOG_DEBUG1_REG, 0);
+ if (err)
+ return err;
+
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_LINK_DEBUG_CTRL_REG,
+ SP_NEW_PRBS7 | SP_M_VID_DEBUG);
+ if (err)
+ return err;
+
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_ANALOG_POWER_DOWN_REG, 0);
+ if (err)
+ return err;
+
+ /* Force HPD */
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_SYSTEM_CTRL_BASE + 3,
+ SP_HPD_FORCE | SP_HPD_CTRL);
+ if (err)
+ return err;
+
+ for (i = 0; i < 4; i++) {
+ /* 4 lanes */
+ err = regmap_write(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_LANE0_LT_CTRL_REG + i, 0);
+ if (err)
+ return err;
+ }
+
+ /* Reset AUX */
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_RESET_CTRL2_REG, SP_AUX_RST);
+ if (err)
+ return err;
+
+ return anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_RESET_CTRL2_REG, SP_AUX_RST);
+}
+
+static void anx6345_poweron(struct anx6345 *anx6345)
+{
+ int err;
+
+ /* Ensure reset is asserted before starting power on sequence */
+ gpiod_set_value_cansleep(anx6345->gpiod_reset, 1);
+ usleep_range(1000, 2000);
+
+ err = regulator_enable(anx6345->dvdd12);
+ if (err) {
+ DRM_ERROR("Failed to enable dvdd12 regulator: %d\n",
+ err);
+ return;
+ }
+
+ /* T1 - delay between VDD12 and VDD25 should be 0-2ms */
+ usleep_range(1000, 2000);
+
+ err = regulator_enable(anx6345->dvdd25);
+ if (err) {
+ DRM_ERROR("Failed to enable dvdd25 regulator: %d\n",
+ err);
+ return;
+ }
+
+ /* T2 - delay between RESETN and all power rail stable,
+ * should be 2-5ms
+ */
+ usleep_range(2000, 5000);
+
+ gpiod_set_value_cansleep(anx6345->gpiod_reset, 0);
+
+ /* Power on registers module */
+ anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+ SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+ anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+ SP_REGISTER_PD | SP_TOTAL_PD);
+
+ if (anx6345->panel)
+ drm_panel_prepare(anx6345->panel);
+
+ anx6345->powered = true;
+}
+
+static void anx6345_poweroff(struct anx6345 *anx6345)
+{
+ int err;
+
+ gpiod_set_value_cansleep(anx6345->gpiod_reset, 1);
+ usleep_range(1000, 2000);
+
+ if (anx6345->panel)
+ drm_panel_unprepare(anx6345->panel);
+
+ err = regulator_disable(anx6345->dvdd25);
+ if (err) {
+ DRM_ERROR("Failed to disable dvdd25 regulator: %d\n",
+ err);
+ return;
+ }
+
+ usleep_range(5000, 10000);
+
+ err = regulator_disable(anx6345->dvdd12);
+ if (err) {
+ DRM_ERROR("Failed to disable dvdd12 regulator: %d\n",
+ err);
+ return;
+ }
+
+ usleep_range(1000, 2000);
+
+ anx6345->powered = false;
+}
+
+static int anx6345_start(struct anx6345 *anx6345)
+{
+ int err;
+
+ if (!anx6345->powered)
+ anx6345_poweron(anx6345);
+
+ /* Power on needed modules */
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM],
+ SP_POWERDOWN_CTRL_REG,
+ SP_VIDEO_PD | SP_LINK_PD);
+
+ err = anx6345_tx_initialization(anx6345);
+ if (err) {
+ DRM_ERROR("Failed eDP transmitter initialization: %d\n", err);
+ anx6345_poweroff(anx6345);
+ return err;
+ }
+
+ err = anx6345_dp_link_training(anx6345);
+ if (err) {
+ DRM_ERROR("Failed link training: %d\n", err);
+ anx6345_poweroff(anx6345);
+ return err;
+ }
+
+ /*
+ * This delay seems to help keep the hardware in a good state. Without
+ * it, there are times where it fails silently.
+ */
+ usleep_range(10000, 15000);
+
+ return 0;
+}
+
+static int anx6345_config_dp_output(struct anx6345 *anx6345)
+{
+ int err;
+
+ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+ SP_VIDEO_MUTE);
+ if (err)
+ return err;
+
+ /* Enable DP output */
+ err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG,
+ SP_VIDEO_EN);
+ if (err)
+ return err;
+
+ /* Force stream valid */
+ return anx6345_set_bits(anx6345->map[I2C_IDX_DPTX],
+ SP_DP_SYSTEM_CTRL_BASE + 3,
+ SP_STRM_FORCE | SP_STRM_CTRL);
+}
+
+static int anx6345_get_downstream_info(struct anx6345 *anx6345)
+{
+ u8 value;
+ int err;
+
+ err = drm_dp_dpcd_readb(&anx6345->aux, DP_SINK_COUNT, &value);
+ if (err < 0) {
+ DRM_ERROR("Get sink count failed %d\n", err);
+ return err;
+ }
+
+ if (!DP_GET_SINK_COUNT(value)) {
+ DRM_ERROR("Downstream disconnected\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int anx6345_get_modes(struct drm_connector *connector)
+{
+ struct anx6345 *anx6345 = connector_to_anx6345(connector);
+ int err, num_modes = 0;
+ bool power_off = false;
+
+ mutex_lock(&anx6345->lock);
+
+ if (!anx6345->edid) {
+ if (!anx6345->powered) {
+ anx6345_poweron(anx6345);
+ power_off = true;
+ }
+
+ err = anx6345_get_downstream_info(anx6345);
+ if (err) {
+ DRM_ERROR("Failed to get downstream info: %d\n", err);
+ goto unlock;
+ }
+
+ anx6345->edid = drm_get_edid(connector, &anx6345->aux.ddc);
+ if (!anx6345->edid)
+ DRM_ERROR("Failed to read EDID from panel\n");
+
+ err = drm_connector_update_edid_property(connector,
+ anx6345->edid);
+ if (err) {
+ DRM_ERROR("Failed to update EDID property: %d\n", err);
+ goto unlock;
+ }
+ }
+
+ num_modes += drm_add_edid_modes(connector, anx6345->edid);
+
+unlock:
+ if (power_off)
+ anx6345_poweroff(anx6345);
+
+ mutex_unlock(&anx6345->lock);
+
+ if (!num_modes && anx6345->panel)
+ num_modes += drm_panel_get_modes(anx6345->panel, connector);
+
+ return num_modes;
+}
+
+static const struct drm_connector_helper_funcs anx6345_connector_helper_funcs = {
+ .get_modes = anx6345_get_modes,
+};
+
+static void
+anx6345_connector_destroy(struct drm_connector *connector)
+{
+ struct anx6345 *anx6345 = connector_to_anx6345(connector);
+
+ if (anx6345->panel)
+ drm_panel_detach(anx6345->panel);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs anx6345_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = anx6345_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int anx6345_bridge_attach(struct drm_bridge *bridge)
+{
+ struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+ int err;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ /* Register aux channel */
+ anx6345->aux.name = "DP-AUX";
+ anx6345->aux.dev = &anx6345->client->dev;
+ anx6345->aux.transfer = anx6345_aux_transfer;
+
+ err = drm_dp_aux_register(&anx6345->aux);
+ if (err < 0) {
+ DRM_ERROR("Failed to register aux channel: %d\n", err);
+ return err;
+ }
+
+ err = drm_connector_init(bridge->dev, &anx6345->connector,
+ &anx6345_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (err) {
+ DRM_ERROR("Failed to initialize connector: %d\n", err);
+ return err;
+ }
+
+ drm_connector_helper_add(&anx6345->connector,
+ &anx6345_connector_helper_funcs);
+
+ err = drm_connector_register(&anx6345->connector);
+ if (err) {
+ DRM_ERROR("Failed to register connector: %d\n", err);
+ return err;
+ }
+
+ anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ err = drm_connector_attach_encoder(&anx6345->connector,
+ bridge->encoder);
+ if (err) {
+ DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
+ return err;
+ }
+
+ if (anx6345->panel) {
+ err = drm_panel_attach(anx6345->panel, &anx6345->connector);
+ if (err) {
+ DRM_ERROR("Failed to attach panel: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static enum drm_mode_status
+anx6345_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /* Max 1200p at 5.4 Ghz, one lane */
+ if (mode->clock > 154000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void anx6345_bridge_disable(struct drm_bridge *bridge)
+{
+ struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+
+ /* Power off all modules except configuration registers access */
+ anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG,
+ SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+ if (anx6345->panel)
+ drm_panel_disable(anx6345->panel);
+
+ if (anx6345->powered)
+ anx6345_poweroff(anx6345);
+}
+
+static void anx6345_bridge_enable(struct drm_bridge *bridge)
+{
+ struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
+ int err;
+
+ if (anx6345->panel)
+ drm_panel_enable(anx6345->panel);
+
+ err = anx6345_start(anx6345);
+ if (err) {
+ DRM_ERROR("Failed to initialize: %d\n", err);
+ return;
+ }
+
+ err = anx6345_config_dp_output(anx6345);
+ if (err)
+ DRM_ERROR("Failed to enable DP output: %d\n", err);
+}
+
+static const struct drm_bridge_funcs anx6345_bridge_funcs = {
+ .attach = anx6345_bridge_attach,
+ .mode_valid = anx6345_bridge_mode_valid,
+ .disable = anx6345_bridge_disable,
+ .enable = anx6345_bridge_enable,
+};
+
+static void unregister_i2c_dummy_clients(struct anx6345 *anx6345)
+{
+ unsigned int i;
+
+ for (i = 1; i < ARRAY_SIZE(anx6345->i2c_clients); i++)
+ if (anx6345->i2c_clients[i] &&
+ anx6345->i2c_clients[i]->addr != anx6345->client->addr)
+ i2c_unregister_device(anx6345->i2c_clients[i]);
+}
+
+static const struct regmap_config anx6345_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const u16 anx6345_chipid_list[] = {
+ 0x6345,
+};
+
+static bool anx6345_get_chip_id(struct anx6345 *anx6345)
+{
+ unsigned int i, idl, idh, version;
+
+ if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDL_REG, &idl))
+ return false;
+
+ if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDH_REG, &idh))
+ return false;
+
+ anx6345->chipid = (u8)idl | ((u8)idh << 8);
+
+ if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_VERSION_REG,
+ &version))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(anx6345_chipid_list); i++) {
+ if (anx6345->chipid == anx6345_chipid_list[i]) {
+ DRM_INFO("Found ANX%x (ver. %d) eDP Transmitter\n",
+ anx6345->chipid, version);
+ return true;
+ }
+ }
+
+ DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n",
+ anx6345->chipid, version);
+
+ return false;
+}
+
+static int anx6345_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct anx6345 *anx6345;
+ struct device *dev;
+ int i, err;
+
+ anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
+ if (!anx6345)
+ return -ENOMEM;
+
+ mutex_init(&anx6345->lock);
+
+ anx6345->bridge.of_node = client->dev.of_node;
+
+ anx6345->client = client;
+ i2c_set_clientdata(client, anx6345);
+
+ dev = &anx6345->client->dev;
+
+ err = drm_of_find_panel_or_bridge(client->dev.of_node, 1, 0,
+ &anx6345->panel, NULL);
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ if (err)
+ DRM_DEBUG("No panel found\n");
+
+ /* 1.2V digital core power regulator */
+ anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
+ if (IS_ERR(anx6345->dvdd12)) {
+ DRM_ERROR("dvdd12-supply not found\n");
+ return PTR_ERR(anx6345->dvdd12);
+ }
+
+ /* 2.5V digital core power regulator */
+ anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
+ if (IS_ERR(anx6345->dvdd25)) {
+ DRM_ERROR("dvdd25-supply not found\n");
+ return PTR_ERR(anx6345->dvdd25);
+ }
+
+ /* GPIO for chip reset */
+ anx6345->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(anx6345->gpiod_reset)) {
+ DRM_ERROR("Reset gpio not found\n");
+ return PTR_ERR(anx6345->gpiod_reset);
+ }
+
+ /* Map slave addresses of ANX6345 */
+ for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
+ if (anx6345_i2c_addresses[i] >> 1 != client->addr)
+ anx6345->i2c_clients[i] = i2c_new_dummy_device(client->adapter,
+ anx6345_i2c_addresses[i] >> 1);
+ else
+ anx6345->i2c_clients[i] = client;
+
+ if (IS_ERR(anx6345->i2c_clients[i])) {
+ err = PTR_ERR(anx6345->i2c_clients[i]);
+ DRM_ERROR("Failed to reserve I2C bus %02x\n",
+ anx6345_i2c_addresses[i]);
+ goto err_unregister_i2c;
+ }
+
+ anx6345->map[i] = devm_regmap_init_i2c(anx6345->i2c_clients[i],
+ &anx6345_regmap_config);
+ if (IS_ERR(anx6345->map[i])) {
+ err = PTR_ERR(anx6345->map[i]);
+ DRM_ERROR("Failed regmap initialization %02x\n",
+ anx6345_i2c_addresses[i]);
+ goto err_unregister_i2c;
+ }
+ }
+
+ /* Look for supported chip ID */
+ anx6345_poweron(anx6345);
+ if (anx6345_get_chip_id(anx6345)) {
+ anx6345->bridge.funcs = &anx6345_bridge_funcs;
+ drm_bridge_add(&anx6345->bridge);
+
+ return 0;
+ } else {
+ anx6345_poweroff(anx6345);
+ err = -ENODEV;
+ }
+
+err_unregister_i2c:
+ unregister_i2c_dummy_clients(anx6345);
+ return err;
+}
+
+static int anx6345_i2c_remove(struct i2c_client *client)
+{
+ struct anx6345 *anx6345 = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&anx6345->bridge);
+
+ unregister_i2c_dummy_clients(anx6345);
+
+ kfree(anx6345->edid);
+
+ mutex_destroy(&anx6345->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id anx6345_id[] = {
+ { "anx6345", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, anx6345_id);
+
+static const struct of_device_id anx6345_match_table[] = {
+ { .compatible = "analogix,anx6345", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, anx6345_match_table);
+
+static struct i2c_driver anx6345_driver = {
+ .driver = {
+ .name = "anx6345",
+ .of_match_table = of_match_ptr(anx6345_match_table),
+ },
+ .probe = anx6345_i2c_probe,
+ .remove = anx6345_i2c_remove,
+ .id_table = anx6345_id,
+};
+module_i2c_driver(anx6345_driver);
+
+MODULE_DESCRIPTION("ANX6345 eDP Transmitter driver");
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 274989f96a91..41867be03751 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -36,8 +36,6 @@
#define I2C_IDX_RX_P1 4
#define XTAL_CLK 270 /* 27M */
-#define AUX_CH_BUFFER_SIZE 16
-#define AUX_WAIT_TIMEOUT_MS 15
static const u8 anx7808_i2c_addresses[] = {
[I2C_IDX_TX_P0] = 0x78,
@@ -107,153 +105,11 @@ static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask)
return regmap_update_bits(map, reg, mask, 0);
}
-static bool anx78xx_aux_op_finished(struct anx78xx *anx78xx)
-{
- unsigned int value;
- int err;
-
- err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
- &value);
- if (err < 0)
- return false;
-
- return (value & SP_AUX_EN) == 0;
-}
-
-static int anx78xx_aux_wait(struct anx78xx *anx78xx)
-{
- unsigned long timeout;
- unsigned int status;
- int err;
-
- timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
-
- while (!anx78xx_aux_op_finished(anx78xx)) {
- if (time_after(jiffies, timeout)) {
- if (!anx78xx_aux_op_finished(anx78xx)) {
- DRM_ERROR("Timed out waiting AUX to finish\n");
- return -ETIMEDOUT;
- }
-
- break;
- }
-
- usleep_range(1000, 2000);
- }
-
- /* Read the AUX channel access status */
- err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_CH_STATUS_REG,
- &status);
- if (err < 0) {
- DRM_ERROR("Failed to read from AUX channel: %d\n", err);
- return err;
- }
-
- if (status & SP_AUX_STATUS) {
- DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n",
- status);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int anx78xx_aux_address(struct anx78xx *anx78xx, unsigned int addr)
-{
- int err;
-
- err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_7_0_REG,
- addr & 0xff);
- if (err)
- return err;
-
- err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_15_8_REG,
- (addr & 0xff00) >> 8);
- if (err)
- return err;
-
- /*
- * DP AUX CH Address Register #2, only update bits[3:0]
- * [7:4] RESERVED
- * [3:0] AUX_ADDR[19:16], Register control AUX CH address.
- */
- err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
- SP_AUX_ADDR_19_16_REG,
- SP_AUX_ADDR_19_16_MASK,
- (addr & 0xf0000) >> 16);
-
- if (err)
- return err;
-
- return 0;
-}
-
static ssize_t anx78xx_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct anx78xx *anx78xx = container_of(aux, struct anx78xx, aux);
- u8 ctrl1 = msg->request;
- u8 ctrl2 = SP_AUX_EN;
- u8 *buffer = msg->buffer;
- int err;
-
- /* The DP AUX transmit and receive buffer has 16 bytes. */
- if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE))
- return -E2BIG;
-
- /* Zero-sized messages specify address-only transactions. */
- if (msg->size < 1)
- ctrl2 |= SP_ADDR_ONLY;
- else /* For non-zero-sized set the length field. */
- ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT;
-
- if ((msg->request & DP_AUX_I2C_READ) == 0) {
- /* When WRITE | MOT write values to data buffer */
- err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_BUF_DATA0_REG, buffer,
- msg->size);
- if (err)
- return err;
- }
-
- /* Write address and request */
- err = anx78xx_aux_address(anx78xx, msg->address);
- if (err)
- return err;
-
- err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL1_REG,
- ctrl1);
- if (err)
- return err;
-
- /* Start transaction */
- err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY |
- SP_AUX_EN, ctrl2);
- if (err)
- return err;
-
- err = anx78xx_aux_wait(anx78xx);
- if (err)
- return err;
-
- msg->reply = DP_AUX_I2C_REPLY_ACK;
-
- if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) {
- /* Read values from data buffer */
- err = regmap_bulk_read(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_BUF_DATA0_REG, buffer,
- msg->size);
- if (err)
- return err;
- }
-
- err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY);
- if (err)
- return err;
-
- return msg->size;
+ return anx_dp_aux_transfer(anx78xx->map[I2C_IDX_TX_P0], msg);
}
static int anx78xx_set_hpd(struct anx78xx *anx78xx)
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h
new file mode 100644
index 000000000000..db2a2725acb2
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
+ */
+
+#ifndef __ANX78xx_H
+#define __ANX78xx_H
+
+#include "analogix-i2c-dptx.h"
+#include "analogix-i2c-txcommon.h"
+
+/***************************************************************/
+/* Register definitions for RX_PO */
+/***************************************************************/
+
+/*
+ * System Control and Status
+ */
+
+/* Software Reset Register 1 */
+#define SP_SOFTWARE_RESET1_REG 0x11
+#define SP_VIDEO_RST BIT(4)
+#define SP_HDCP_MAN_RST BIT(2)
+#define SP_TMDS_RST BIT(1)
+#define SP_SW_MAN_RST BIT(0)
+
+/* System Status Register */
+#define SP_SYSTEM_STATUS_REG 0x14
+#define SP_TMDS_CLOCK_DET BIT(1)
+#define SP_TMDS_DE_DET BIT(0)
+
+/* HDMI Status Register */
+#define SP_HDMI_STATUS_REG 0x15
+#define SP_HDMI_AUD_LAYOUT BIT(3)
+#define SP_HDMI_DET BIT(0)
+# define SP_DVI_MODE 0
+# define SP_HDMI_MODE 1
+
+/* HDMI Mute Control Register */
+#define SP_HDMI_MUTE_CTRL_REG 0x16
+#define SP_AUD_MUTE BIT(1)
+#define SP_VID_MUTE BIT(0)
+
+/* System Power Down Register 1 */
+#define SP_SYSTEM_POWER_DOWN1_REG 0x18
+#define SP_PWDN_CTRL BIT(0)
+
+/*
+ * Audio and Video Auto Control
+ */
+
+/* Auto Audio and Video Control register */
+#define SP_AUDVID_CTRL_REG 0x20
+#define SP_AVC_OE BIT(7)
+#define SP_AAC_OE BIT(6)
+#define SP_AVC_EN BIT(1)
+#define SP_AAC_EN BIT(0)
+
+/* Audio Exception Enable Registers */
+#define SP_AUD_EXCEPTION_ENABLE_BASE (0x24 - 1)
+/* Bits for Audio Exception Enable Register 3 */
+#define SP_AEC_EN21 BIT(5)
+
+/*
+ * Interrupt
+ */
+
+/* Interrupt Status Register 1 */
+#define SP_INT_STATUS1_REG 0x31
+/* Bits for Interrupt Status Register 1 */
+#define SP_HDMI_DVI BIT(7)
+#define SP_CKDT_CHG BIT(6)
+#define SP_SCDT_CHG BIT(5)
+#define SP_PCLK_CHG BIT(4)
+#define SP_PLL_UNLOCK BIT(3)
+#define SP_CABLE_PLUG_CHG BIT(2)
+#define SP_SET_MUTE BIT(1)
+#define SP_SW_INTR BIT(0)
+/* Bits for Interrupt Status Register 2 */
+#define SP_HDCP_ERR BIT(5)
+#define SP_AUDIO_SAMPLE_CHG BIT(0) /* undocumented */
+/* Bits for Interrupt Status Register 3 */
+#define SP_AUD_MODE_CHG BIT(0)
+/* Bits for Interrupt Status Register 5 */
+#define SP_AUDIO_RCV BIT(0)
+/* Bits for Interrupt Status Register 6 */
+#define SP_INT_STATUS6_REG 0x36
+#define SP_CTS_RCV BIT(7)
+#define SP_NEW_AUD_PKT BIT(4)
+#define SP_NEW_AVI_PKT BIT(1)
+#define SP_NEW_CP_PKT BIT(0)
+/* Bits for Interrupt Status Register 7 */
+#define SP_NO_VSI BIT(7)
+#define SP_NEW_VS BIT(4)
+
+/* Interrupt Mask 1 Status Registers */
+#define SP_INT_MASK1_REG 0x41
+
+/* HDMI US TIMER Control Register */
+#define SP_HDMI_US_TIMER_CTRL_REG 0x49
+#define SP_MS_TIMER_MARGIN_10_8_MASK 0x07
+
+/*
+ * TMDS Control
+ */
+
+/* TMDS Control Registers */
+#define SP_TMDS_CTRL_BASE (0x50 - 1)
+/* Bits for TMDS Control Register 7 */
+#define SP_PD_RT BIT(0)
+
+/*
+ * Video Control
+ */
+
+/* Video Status Register */
+#define SP_VIDEO_STATUS_REG 0x70
+#define SP_COLOR_DEPTH_MASK 0xf0
+#define SP_COLOR_DEPTH_SHIFT 4
+# define SP_COLOR_DEPTH_MODE_LEGACY 0x00
+# define SP_COLOR_DEPTH_MODE_24BIT 0x04
+# define SP_COLOR_DEPTH_MODE_30BIT 0x05
+# define SP_COLOR_DEPTH_MODE_36BIT 0x06
+# define SP_COLOR_DEPTH_MODE_48BIT 0x07
+
+/* Video Data Range Control Register */
+#define SP_VID_DATA_RANGE_CTRL_REG 0x83
+#define SP_R2Y_INPUT_LIMIT BIT(1)
+
+/* Pixel Clock High Resolution Counter Registers */
+#define SP_PCLK_HIGHRES_CNT_BASE (0x8c - 1)
+
+/*
+ * Audio Control
+ */
+
+/* Number of Audio Channels Status Registers */
+#define SP_AUD_CH_STATUS_REG_NUM 6
+
+/* Audio IN S/PDIF Channel Status Registers */
+#define SP_AUD_SPDIF_CH_STATUS_BASE 0xc7
+
+/* Audio IN S/PDIF Channel Status Register 4 */
+#define SP_FS_FREQ_MASK 0x0f
+# define SP_FS_FREQ_44100HZ 0x00
+# define SP_FS_FREQ_48000HZ 0x02
+# define SP_FS_FREQ_32000HZ 0x03
+# define SP_FS_FREQ_88200HZ 0x08
+# define SP_FS_FREQ_96000HZ 0x0a
+# define SP_FS_FREQ_176400HZ 0x0c
+# define SP_FS_FREQ_192000HZ 0x0e
+
+/*
+ * Micellaneous Control Block
+ */
+
+/* CHIP Control Register */
+#define SP_CHIP_CTRL_REG 0xe3
+#define SP_MAN_HDMI5V_DET BIT(3)
+#define SP_PLLLOCK_CKDT_EN BIT(2)
+#define SP_ANALOG_CKDT_EN BIT(1)
+#define SP_DIGITAL_CKDT_EN BIT(0)
+
+/* Packet Receiving Status Register */
+#define SP_PACKET_RECEIVING_STATUS_REG 0xf3
+#define SP_AVI_RCVD BIT(5)
+#define SP_VSI_RCVD BIT(1)
+
+/***************************************************************/
+/* Register definitions for RX_P1 */
+/***************************************************************/
+
+/* HDCP BCAPS Shadow Register */
+#define SP_HDCP_BCAPS_SHADOW_REG 0x2a
+#define SP_BCAPS_REPEATER BIT(5)
+
+/* HDCP Status Register */
+#define SP_RX_HDCP_STATUS_REG 0x3f
+#define SP_AUTH_EN BIT(4)
+
+/*
+ * InfoFrame and Control Packet Registers
+ */
+
+/* AVI InfoFrame packet checksum */
+#define SP_AVI_INFOFRAME_CHECKSUM 0xa3
+
+/* AVI InfoFrame Registers */
+#define SP_AVI_INFOFRAME_DATA_BASE 0xa4
+
+#define SP_AVI_COLOR_F_MASK 0x60
+#define SP_AVI_COLOR_F_SHIFT 5
+
+/* Audio InfoFrame Registers */
+#define SP_AUD_INFOFRAME_DATA_BASE 0xc4
+#define SP_AUD_INFOFRAME_LAYOUT_MASK 0x0f
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet type code */
+#define SP_MPEG_VS_INFOFRAME_TYPE_REG 0xe0
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet length */
+#define SP_MPEG_VS_INFOFRAME_LEN_REG 0xe2
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet version number */
+#define SP_MPEG_VS_INFOFRAME_VER_REG 0xe1
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet content */
+#define SP_MPEG_VS_INFOFRAME_DATA_BASE 0xe4
+
+/* General Control Packet Register */
+#define SP_GENERAL_CTRL_PACKET_REG 0x9f
+#define SP_CLEAR_AVMUTE BIT(4)
+#define SP_SET_AVMUTE BIT(0)
+
+/***************************************************************/
+/* Register definitions for TX_P1 */
+/***************************************************************/
+
+/* DP TX Link Training Control Register */
+#define SP_DP_TX_LT_CTRL0_REG 0x30
+
+/* PD 1.2 Lint Training 80bit Pattern Register */
+#define SP_DP_LT_80BIT_PATTERN0_REG 0x80
+#define SP_DP_LT_80BIT_PATTERN_REG_NUM 10
+
+/* Audio Interface Control Register 0 */
+#define SP_AUD_INTERFACE_CTRL0_REG 0x5f
+#define SP_AUD_INTERFACE_DISABLE 0x80
+
+/* Audio Interface Control Register 2 */
+#define SP_AUD_INTERFACE_CTRL2_REG 0x60
+#define SP_M_AUD_ADJUST_ST 0x04
+
+/* Audio Interface Control Register 3 */
+#define SP_AUD_INTERFACE_CTRL3_REG 0x62
+
+/* Audio Interface Control Register 4 */
+#define SP_AUD_INTERFACE_CTRL4_REG 0x67
+
+/* Audio Interface Control Register 5 */
+#define SP_AUD_INTERFACE_CTRL5_REG 0x68
+
+/* Audio Interface Control Register 6 */
+#define SP_AUD_INTERFACE_CTRL6_REG 0x69
+
+/* Firmware Version Register */
+#define SP_FW_VER_REG 0xb7
+
+#endif
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
new file mode 100644
index 000000000000..fe40bab21530
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#include <linux/regmap.h>
+
+#include <drm/drm.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "analogix-i2c-dptx.h"
+
+#define AUX_WAIT_TIMEOUT_MS 15
+#define AUX_CH_BUFFER_SIZE 16
+
+static int anx_i2c_dp_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+ return regmap_update_bits(map, reg, mask, 0);
+}
+
+static bool anx_dp_aux_op_finished(struct regmap *map_dptx)
+{
+ unsigned int value;
+ int err;
+
+ err = regmap_read(map_dptx, SP_DP_AUX_CH_CTRL2_REG, &value);
+ if (err < 0)
+ return false;
+
+ return (value & SP_AUX_EN) == 0;
+}
+
+static int anx_dp_aux_wait(struct regmap *map_dptx)
+{
+ unsigned long timeout;
+ unsigned int status;
+ int err;
+
+ timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+ while (!anx_dp_aux_op_finished(map_dptx)) {
+ if (time_after(jiffies, timeout)) {
+ if (!anx_dp_aux_op_finished(map_dptx)) {
+ DRM_ERROR("Timed out waiting AUX to finish\n");
+ return -ETIMEDOUT;
+ }
+
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ /* Read the AUX channel access status */
+ err = regmap_read(map_dptx, SP_AUX_CH_STATUS_REG, &status);
+ if (err < 0) {
+ DRM_ERROR("Failed to read from AUX channel: %d\n", err);
+ return err;
+ }
+
+ if (status & SP_AUX_STATUS) {
+ DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n",
+ status);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int anx_dp_aux_address(struct regmap *map_dptx, unsigned int addr)
+{
+ int err;
+
+ err = regmap_write(map_dptx, SP_AUX_ADDR_7_0_REG, addr & 0xff);
+ if (err)
+ return err;
+
+ err = regmap_write(map_dptx, SP_AUX_ADDR_15_8_REG,
+ (addr & 0xff00) >> 8);
+ if (err)
+ return err;
+
+ /*
+ * DP AUX CH Address Register #2, only update bits[3:0]
+ * [7:4] RESERVED
+ * [3:0] AUX_ADDR[19:16], Register control AUX CH address.
+ */
+ err = regmap_update_bits(map_dptx, SP_AUX_ADDR_19_16_REG,
+ SP_AUX_ADDR_19_16_MASK,
+ (addr & 0xf0000) >> 16);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+ssize_t anx_dp_aux_transfer(struct regmap *map_dptx,
+ struct drm_dp_aux_msg *msg)
+{
+ u8 ctrl1 = msg->request;
+ u8 ctrl2 = SP_AUX_EN;
+ u8 *buffer = msg->buffer;
+ int err;
+
+ /* The DP AUX transmit and receive buffer has 16 bytes. */
+ if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE))
+ return -E2BIG;
+
+ /* Zero-sized messages specify address-only transactions. */
+ if (msg->size < 1)
+ ctrl2 |= SP_ADDR_ONLY;
+ else /* For non-zero-sized set the length field. */
+ ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT;
+
+ if ((msg->size > 0) && ((msg->request & DP_AUX_I2C_READ) == 0)) {
+ /* When WRITE | MOT write values to data buffer */
+ err = regmap_bulk_write(map_dptx,
+ SP_DP_BUF_DATA0_REG, buffer,
+ msg->size);
+ if (err)
+ return err;
+ }
+
+ /* Write address and request */
+ err = anx_dp_aux_address(map_dptx, msg->address);
+ if (err)
+ return err;
+
+ err = regmap_write(map_dptx, SP_DP_AUX_CH_CTRL1_REG, ctrl1);
+ if (err)
+ return err;
+
+ /* Start transaction */
+ err = regmap_update_bits(map_dptx, SP_DP_AUX_CH_CTRL2_REG,
+ SP_ADDR_ONLY | SP_AUX_EN, ctrl2);
+ if (err)
+ return err;
+
+ err = anx_dp_aux_wait(map_dptx);
+ if (err)
+ return err;
+
+ msg->reply = DP_AUX_I2C_REPLY_ACK;
+
+ if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) {
+ /* Read values from data buffer */
+ err = regmap_bulk_read(map_dptx,
+ SP_DP_BUF_DATA0_REG, buffer,
+ msg->size);
+ if (err)
+ return err;
+ }
+
+ err = anx_i2c_dp_clear_bits(map_dptx, SP_DP_AUX_CH_CTRL2_REG,
+ SP_ADDR_ONLY);
+ if (err)
+ return err;
+
+ return msg->size;
+}
+EXPORT_SYMBOL_GPL(anx_dp_aux_transfer);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h
new file mode 100644
index 000000000000..663c4bea6e70
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ */
+#ifndef _ANALOGIX_I2C_DPTX_H_
+#define _ANALOGIX_I2C_DPTX_H_
+
+/***************************************************************/
+/* Register definitions for TX_P0 */
+/***************************************************************/
+
+/* HDCP Status Register */
+#define SP_TX_HDCP_STATUS_REG 0x00
+#define SP_AUTH_FAIL BIT(5)
+#define SP_AUTHEN_PASS BIT(1)
+
+/* HDCP Control Register 0 */
+#define SP_HDCP_CTRL0_REG 0x01
+#define SP_RX_REPEATER BIT(6)
+#define SP_RE_AUTH BIT(5)
+#define SP_SW_AUTH_OK BIT(4)
+#define SP_HARD_AUTH_EN BIT(3)
+#define SP_HDCP_ENC_EN BIT(2)
+#define SP_BKSV_SRM_PASS BIT(1)
+#define SP_KSVLIST_VLD BIT(0)
+/* HDCP Function Enabled */
+#define SP_HDCP_FUNCTION_ENABLED (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* HDCP Receiver BSTATUS Register 0 */
+#define SP_HDCP_RX_BSTATUS0_REG 0x1b
+/* HDCP Receiver BSTATUS Register 1 */
+#define SP_HDCP_RX_BSTATUS1_REG 0x1c
+
+/* HDCP Embedded "Blue Screen" Content Registers */
+#define SP_HDCP_VID0_BLUE_SCREEN_REG 0x2c
+#define SP_HDCP_VID1_BLUE_SCREEN_REG 0x2d
+#define SP_HDCP_VID2_BLUE_SCREEN_REG 0x2e
+
+/* HDCP Wait R0 Timing Register */
+#define SP_HDCP_WAIT_R0_TIME_REG 0x40
+
+/* HDCP Link Integrity Check Timer Register */
+#define SP_HDCP_LINK_CHECK_TIMER_REG 0x41
+
+/* HDCP Repeater Ready Wait Timer Register */
+#define SP_HDCP_RPTR_RDY_WAIT_TIME_REG 0x42
+
+/* HDCP Auto Timer Register */
+#define SP_HDCP_AUTO_TIMER_REG 0x51
+
+/* HDCP Key Status Register */
+#define SP_HDCP_KEY_STATUS_REG 0x5e
+
+/* HDCP Key Command Register */
+#define SP_HDCP_KEY_COMMAND_REG 0x5f
+#define SP_DISABLE_SYNC_HDCP BIT(2)
+
+/* OTP Memory Key Protection Registers */
+#define SP_OTP_KEY_PROTECT1_REG 0x60
+#define SP_OTP_KEY_PROTECT2_REG 0x61
+#define SP_OTP_KEY_PROTECT3_REG 0x62
+#define SP_OTP_PSW1 0xa2
+#define SP_OTP_PSW2 0x7e
+#define SP_OTP_PSW3 0xc6
+
+/* DP System Control Registers */
+#define SP_DP_SYSTEM_CTRL_BASE (0x80 - 1)
+/* Bits for DP System Control Register 2 */
+#define SP_CHA_STA BIT(2)
+/* Bits for DP System Control Register 3 */
+#define SP_HPD_STATUS BIT(6)
+#define SP_HPD_FORCE BIT(5)
+#define SP_HPD_CTRL BIT(4)
+#define SP_STRM_VALID BIT(2)
+#define SP_STRM_FORCE BIT(1)
+#define SP_STRM_CTRL BIT(0)
+/* Bits for DP System Control Register 4 */
+#define SP_ENHANCED_MODE BIT(3)
+
+/* DP Video Control Register */
+#define SP_DP_VIDEO_CTRL_REG 0x84
+#define SP_COLOR_F_MASK 0x06
+#define SP_COLOR_F_SHIFT 1
+#define SP_BPC_MASK 0xe0
+#define SP_BPC_SHIFT 5
+# define SP_BPC_6BITS 0x00
+# define SP_BPC_8BITS 0x01
+# define SP_BPC_10BITS 0x02
+# define SP_BPC_12BITS 0x03
+
+/* DP Audio Control Register */
+#define SP_DP_AUDIO_CTRL_REG 0x87
+#define SP_AUD_EN BIT(0)
+
+/* 10us Pulse Generate Timer Registers */
+#define SP_I2C_GEN_10US_TIMER0_REG 0x88
+#define SP_I2C_GEN_10US_TIMER1_REG 0x89
+
+/* Packet Send Control Register */
+#define SP_PACKET_SEND_CTRL_REG 0x90
+#define SP_AUD_IF_UP BIT(7)
+#define SP_AVI_IF_UD BIT(6)
+#define SP_MPEG_IF_UD BIT(5)
+#define SP_SPD_IF_UD BIT(4)
+#define SP_AUD_IF_EN BIT(3)
+#define SP_AVI_IF_EN BIT(2)
+#define SP_MPEG_IF_EN BIT(1)
+#define SP_SPD_IF_EN BIT(0)
+
+/* DP HDCP Control Register */
+#define SP_DP_HDCP_CTRL_REG 0x92
+#define SP_AUTO_EN BIT(7)
+#define SP_AUTO_START BIT(5)
+#define SP_LINK_POLLING BIT(1)
+
+/* DP Main Link Bandwidth Setting Register */
+#define SP_DP_MAIN_LINK_BW_SET_REG 0xa0
+#define SP_LINK_BW_SET_MASK 0x1f
+#define SP_INITIAL_SLIM_M_AUD_SEL BIT(5)
+
+/* DP Lane Count Setting Register */
+#define SP_DP_LANE_COUNT_SET_REG 0xa1
+
+/* DP Training Pattern Set Register */
+#define SP_DP_TRAINING_PATTERN_SET_REG 0xa2
+
+/* DP Lane 0 Link Training Control Register */
+#define SP_DP_LANE0_LT_CTRL_REG 0xa3
+#define SP_TX_SW_SET_MASK 0x1b
+#define SP_MAX_PRE_REACH BIT(5)
+#define SP_MAX_DRIVE_REACH BIT(4)
+#define SP_PRE_EMP_LEVEL1 BIT(3)
+#define SP_DRVIE_CURRENT_LEVEL1 BIT(0)
+
+/* DP Link Training Control Register */
+#define SP_DP_LT_CTRL_REG 0xa8
+#define SP_DP_LT_INPROGRESS 0x80
+#define SP_LT_ERROR_TYPE_MASK 0x70
+# define SP_LT_NO_ERROR 0x00
+# define SP_LT_AUX_WRITE_ERROR 0x01
+# define SP_LT_MAX_DRIVE_REACHED 0x02
+# define SP_LT_WRONG_LANE_COUNT_SET 0x03
+# define SP_LT_LOOP_SAME_5_TIME 0x04
+# define SP_LT_CR_FAIL_IN_EQ 0x05
+# define SP_LT_EQ_LOOP_5_TIME 0x06
+#define SP_LT_EN BIT(0)
+
+/* DP CEP Training Control Registers */
+#define SP_DP_CEP_TRAINING_CTRL0_REG 0xa9
+#define SP_DP_CEP_TRAINING_CTRL1_REG 0xaa
+
+/* DP Debug Register 1 */
+#define SP_DP_DEBUG1_REG 0xb0
+#define SP_DEBUG_PLL_LOCK BIT(4)
+#define SP_POLLING_EN BIT(1)
+
+/* DP Polling Control Register */
+#define SP_DP_POLLING_CTRL_REG 0xb4
+#define SP_AUTO_POLLING_DISABLE BIT(0)
+
+/* DP Link Debug Control Register */
+#define SP_DP_LINK_DEBUG_CTRL_REG 0xb8
+#define SP_M_VID_DEBUG BIT(5)
+#define SP_NEW_PRBS7 BIT(4)
+#define SP_INSERT_ER BIT(1)
+#define SP_PRBS31_EN BIT(0)
+
+/* AUX Misc control Register */
+#define SP_AUX_MISC_CTRL_REG 0xbf
+
+/* DP PLL control Register */
+#define SP_DP_PLL_CTRL_REG 0xc7
+#define SP_PLL_RST BIT(6)
+
+/* DP Analog Power Down Register */
+#define SP_DP_ANALOG_POWER_DOWN_REG 0xc8
+#define SP_CH0_PD BIT(0)
+
+/* DP Misc Control Register */
+#define SP_DP_MISC_CTRL_REG 0xcd
+#define SP_EQ_TRAINING_LOOP BIT(6)
+
+/* DP Extra I2C Device Address Register */
+#define SP_DP_EXTRA_I2C_DEV_ADDR_REG 0xce
+#define SP_I2C_STRETCH_DISABLE BIT(7)
+
+#define SP_I2C_EXTRA_ADDR 0x50
+
+/* DP Downspread Control Register 1 */
+#define SP_DP_DOWNSPREAD_CTRL1_REG 0xd0
+
+/* DP M Value Calculation Control Register */
+#define SP_DP_M_CALCULATION_CTRL_REG 0xd9
+#define SP_M_GEN_CLK_SEL BIT(0)
+
+/* AUX Channel Access Status Register */
+#define SP_AUX_CH_STATUS_REG 0xe0
+#define SP_AUX_STATUS 0x0f
+
+/* AUX Channel DEFER Control Register */
+#define SP_AUX_DEFER_CTRL_REG 0xe2
+#define SP_DEFER_CTRL_EN BIT(7)
+
+/* DP Buffer Data Count Register */
+#define SP_BUF_DATA_COUNT_REG 0xe4
+#define SP_BUF_DATA_COUNT_MASK 0x1f
+#define SP_BUF_CLR BIT(7)
+
+/* DP AUX Channel Control Register 1 */
+#define SP_DP_AUX_CH_CTRL1_REG 0xe5
+#define SP_AUX_TX_COMM_MASK 0x0f
+#define SP_AUX_LENGTH_MASK 0xf0
+#define SP_AUX_LENGTH_SHIFT 4
+
+/* DP AUX CH Address Register 0 */
+#define SP_AUX_ADDR_7_0_REG 0xe6
+
+/* DP AUX CH Address Register 1 */
+#define SP_AUX_ADDR_15_8_REG 0xe7
+
+/* DP AUX CH Address Register 2 */
+#define SP_AUX_ADDR_19_16_REG 0xe8
+#define SP_AUX_ADDR_19_16_MASK 0x0f
+
+/* DP AUX Channel Control Register 2 */
+#define SP_DP_AUX_CH_CTRL2_REG 0xe9
+#define SP_AUX_SEL_RXCM BIT(6)
+#define SP_AUX_CHSEL BIT(3)
+#define SP_AUX_PN_INV BIT(2)
+#define SP_ADDR_ONLY BIT(1)
+#define SP_AUX_EN BIT(0)
+
+/* DP Video Stream Control InfoFrame Register */
+#define SP_DP_3D_VSC_CTRL_REG 0xea
+#define SP_INFO_FRAME_VSC_EN BIT(0)
+
+/* DP Video Stream Data Byte 1 Register */
+#define SP_DP_VSC_DB1_REG 0xeb
+
+/* DP AUX Channel Control Register 3 */
+#define SP_DP_AUX_CH_CTRL3_REG 0xec
+#define SP_WAIT_COUNTER_7_0_MASK 0xff
+
+/* DP AUX Channel Control Register 4 */
+#define SP_DP_AUX_CH_CTRL4_REG 0xed
+
+/* DP AUX Buffer Data Registers */
+#define SP_DP_BUF_DATA0_REG 0xf0
+
+ssize_t anx_dp_aux_transfer(struct regmap *map_dptx,
+ struct drm_dp_aux_msg *msg);
+
+#endif
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h b/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h
new file mode 100644
index 000000000000..3c843497d835
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-txcommon.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
+ */
+#ifndef _ANALOGIX_I2C_TXCOMMON_H_
+#define _ANALOGIX_I2C_TXCOMMON_H_
+
+/***************************************************************/
+/* Register definitions for TX_P2 */
+/***************************************************************/
+
+/*
+ * Core Register Definitions
+ */
+
+/* Device ID Low Byte Register */
+#define SP_DEVICE_IDL_REG 0x02
+
+/* Device ID High Byte Register */
+#define SP_DEVICE_IDH_REG 0x03
+
+/* Device version register */
+#define SP_DEVICE_VERSION_REG 0x04
+
+/* Power Down Control Register */
+#define SP_POWERDOWN_CTRL_REG 0x05
+#define SP_REGISTER_PD BIT(7)
+#define SP_HDCP_PD BIT(5)
+#define SP_AUDIO_PD BIT(4)
+#define SP_VIDEO_PD BIT(3)
+#define SP_LINK_PD BIT(2)
+#define SP_TOTAL_PD BIT(1)
+
+/* Reset Control Register 1 */
+#define SP_RESET_CTRL1_REG 0x06
+#define SP_MISC_RST BIT(7)
+#define SP_VIDCAP_RST BIT(6)
+#define SP_VIDFIF_RST BIT(5)
+#define SP_AUDFIF_RST BIT(4)
+#define SP_AUDCAP_RST BIT(3)
+#define SP_HDCP_RST BIT(2)
+#define SP_SW_RST BIT(1)
+#define SP_HW_RST BIT(0)
+
+/* Reset Control Register 2 */
+#define SP_RESET_CTRL2_REG 0x07
+#define SP_AUX_RST BIT(2)
+#define SP_SERDES_FIFO_RST BIT(1)
+#define SP_I2C_REG_RST BIT(0)
+
+/* Video Control Register 1 */
+#define SP_VID_CTRL1_REG 0x08
+#define SP_VIDEO_EN BIT(7)
+#define SP_VIDEO_MUTE BIT(2)
+#define SP_DE_GEN BIT(1)
+#define SP_DEMUX BIT(0)
+
+/* Video Control Register 2 */
+#define SP_VID_CTRL2_REG 0x09
+#define SP_IN_COLOR_F_MASK 0x03
+#define SP_IN_YC_BIT_SEL BIT(2)
+#define SP_IN_BPC_MASK 0x70
+#define SP_IN_BPC_SHIFT 4
+# define SP_IN_BPC_12BIT 0x03
+# define SP_IN_BPC_10BIT 0x02
+# define SP_IN_BPC_8BIT 0x01
+# define SP_IN_BPC_6BIT 0x00
+#define SP_IN_D_RANGE BIT(7)
+
+/* Video Control Register 3 */
+#define SP_VID_CTRL3_REG 0x0a
+#define SP_HPD_OUT BIT(6)
+
+/* Video Control Register 5 */
+#define SP_VID_CTRL5_REG 0x0c
+#define SP_CSC_STD_SEL BIT(7)
+#define SP_XVYCC_RNG_LMT BIT(6)
+#define SP_RANGE_Y2R BIT(5)
+#define SP_CSPACE_Y2R BIT(4)
+#define SP_RGB_RNG_LMT BIT(3)
+#define SP_Y_RNG_LMT BIT(2)
+#define SP_RANGE_R2Y BIT(1)
+#define SP_CSPACE_R2Y BIT(0)
+
+/* Video Control Register 6 */
+#define SP_VID_CTRL6_REG 0x0d
+#define SP_TEST_PATTERN_EN BIT(7)
+#define SP_VIDEO_PROCESS_EN BIT(6)
+#define SP_VID_US_MODE BIT(3)
+#define SP_VID_DS_MODE BIT(2)
+#define SP_UP_SAMPLE BIT(1)
+#define SP_DOWN_SAMPLE BIT(0)
+
+/* Video Control Register 8 */
+#define SP_VID_CTRL8_REG 0x0f
+#define SP_VID_VRES_TH BIT(0)
+
+/* Total Line Status Low Byte Register */
+#define SP_TOTAL_LINE_STAL_REG 0x24
+
+/* Total Line Status High Byte Register */
+#define SP_TOTAL_LINE_STAH_REG 0x25
+
+/* Active Line Status Low Byte Register */
+#define SP_ACT_LINE_STAL_REG 0x26
+
+/* Active Line Status High Byte Register */
+#define SP_ACT_LINE_STAH_REG 0x27
+
+/* Vertical Front Porch Status Register */
+#define SP_V_F_PORCH_STA_REG 0x28
+
+/* Vertical SYNC Width Status Register */
+#define SP_V_SYNC_STA_REG 0x29
+
+/* Vertical Back Porch Status Register */
+#define SP_V_B_PORCH_STA_REG 0x2a
+
+/* Total Pixel Status Low Byte Register */
+#define SP_TOTAL_PIXEL_STAL_REG 0x2b
+
+/* Total Pixel Status High Byte Register */
+#define SP_TOTAL_PIXEL_STAH_REG 0x2c
+
+/* Active Pixel Status Low Byte Register */
+#define SP_ACT_PIXEL_STAL_REG 0x2d
+
+/* Active Pixel Status High Byte Register */
+#define SP_ACT_PIXEL_STAH_REG 0x2e
+
+/* Horizontal Front Porch Status Low Byte Register */
+#define SP_H_F_PORCH_STAL_REG 0x2f
+
+/* Horizontal Front Porch Statys High Byte Register */
+#define SP_H_F_PORCH_STAH_REG 0x30
+
+/* Horizontal SYNC Width Status Low Byte Register */
+#define SP_H_SYNC_STAL_REG 0x31
+
+/* Horizontal SYNC Width Status High Byte Register */
+#define SP_H_SYNC_STAH_REG 0x32
+
+/* Horizontal Back Porch Status Low Byte Register */
+#define SP_H_B_PORCH_STAL_REG 0x33
+
+/* Horizontal Back Porch Status High Byte Register */
+#define SP_H_B_PORCH_STAH_REG 0x34
+
+/* InfoFrame AVI Packet DB1 Register */
+#define SP_INFOFRAME_AVI_DB1_REG 0x70
+
+/* Bit Control Specific Register */
+#define SP_BIT_CTRL_SPECIFIC_REG 0x80
+#define SP_BIT_CTRL_SELECT_SHIFT 1
+#define SP_ENABLE_BIT_CTRL BIT(0)
+
+/* InfoFrame Audio Packet DB1 Register */
+#define SP_INFOFRAME_AUD_DB1_REG 0x83
+
+/* InfoFrame MPEG Packet DB1 Register */
+#define SP_INFOFRAME_MPEG_DB1_REG 0xb0
+
+/* Audio Channel Status Registers */
+#define SP_AUD_CH_STATUS_BASE 0xd0
+
+/* Audio Channel Num Register 5 */
+#define SP_I2S_CHANNEL_NUM_MASK 0xe0
+# define SP_I2S_CH_NUM_1 (0x00 << 5)
+# define SP_I2S_CH_NUM_2 (0x01 << 5)
+# define SP_I2S_CH_NUM_3 (0x02 << 5)
+# define SP_I2S_CH_NUM_4 (0x03 << 5)
+# define SP_I2S_CH_NUM_5 (0x04 << 5)
+# define SP_I2S_CH_NUM_6 (0x05 << 5)
+# define SP_I2S_CH_NUM_7 (0x06 << 5)
+# define SP_I2S_CH_NUM_8 (0x07 << 5)
+#define SP_EXT_VUCP BIT(2)
+#define SP_VBIT BIT(1)
+#define SP_AUDIO_LAYOUT BIT(0)
+
+/* Analog Debug Register 1 */
+#define SP_ANALOG_DEBUG1_REG 0xdc
+
+/* Analog Debug Register 2 */
+#define SP_ANALOG_DEBUG2_REG 0xdd
+#define SP_FORCE_SW_OFF_BYPASS 0x20
+#define SP_XTAL_FRQ 0x1c
+# define SP_XTAL_FRQ_19M2 (0x00 << 2)
+# define SP_XTAL_FRQ_24M (0x01 << 2)
+# define SP_XTAL_FRQ_25M (0x02 << 2)
+# define SP_XTAL_FRQ_26M (0x03 << 2)
+# define SP_XTAL_FRQ_27M (0x04 << 2)
+# define SP_XTAL_FRQ_38M4 (0x05 << 2)
+# define SP_XTAL_FRQ_52M (0x06 << 2)
+#define SP_POWERON_TIME_1P5MS 0x03
+
+/* Analog Control 0 Register */
+#define SP_ANALOG_CTRL0_REG 0xe1
+
+/* Common Interrupt Status Register 1 */
+#define SP_COMMON_INT_STATUS_BASE (0xf1 - 1)
+#define SP_PLL_LOCK_CHG 0x40
+
+/* Common Interrupt Status Register 2 */
+#define SP_COMMON_INT_STATUS2 0xf2
+#define SP_HDCP_AUTH_CHG BIT(1)
+#define SP_HDCP_AUTH_DONE BIT(0)
+
+#define SP_HDCP_LINK_CHECK_FAIL BIT(0)
+
+/* Common Interrupt Status Register 4 */
+#define SP_COMMON_INT_STATUS4_REG 0xf4
+#define SP_HPD_IRQ BIT(6)
+#define SP_HPD_ESYNC_ERR BIT(4)
+#define SP_HPD_CHG BIT(2)
+#define SP_HPD_LOST BIT(1)
+#define SP_HPD_PLUG BIT(0)
+
+/* DP Interrupt Status Register */
+#define SP_DP_INT_STATUS1_REG 0xf7
+#define SP_TRAINING_FINISH BIT(5)
+#define SP_POLLING_ERR BIT(4)
+
+/* Common Interrupt Mask Register */
+#define SP_COMMON_INT_MASK_BASE (0xf8 - 1)
+
+#define SP_COMMON_INT_MASK4_REG 0xfb
+
+/* DP Interrupts Mask Register */
+#define SP_DP_INT_MASK1_REG 0xfe
+
+/* Interrupt Control Register */
+#define SP_INT_CTRL_REG 0xff
+
+#endif /* _ANALOGIX_I2C_TXCOMMON_H_ */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index bb411fe52ae8..6effe532f820 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1111,7 +1111,7 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
int ret, num_modes = 0;
if (dp->plat_data->panel) {
- num_modes += drm_panel_get_modes(dp->plat_data->panel);
+ num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
} else {
ret = analogix_dp_prepare_panel(dp, true, false);
if (ret) {
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 3a5bd4e7fd1e..b7c97f060241 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -512,7 +512,7 @@ static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
struct cdns_dsi_output *output = &dsi->output;
unsigned int tmp;
bool sync_pulse = false;
- int bpp, nlanes;
+ int bpp;
memset(dsi_cfg, 0, sizeof(*dsi_cfg));
@@ -520,7 +520,6 @@ static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
sync_pulse = true;
bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
- nlanes = output->dev->lanes;
if (mode_valid_check)
tmp = mode->htotal -
@@ -785,13 +784,12 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
u32 tmp, reg_wakeup, div;
- int bpp, nlanes;
+ int nlanes;
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
mode = &bridge->encoder->crtc->state->adjusted_mode;
- bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
nlanes = output->dev->lanes;
WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
new file mode 100644
index 000000000000..5f04cc11227e
--- /dev/null
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2019 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
+
+struct lvds_codec {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+ struct gpio_desc *powerdown_gpio;
+ u32 connector_type;
+};
+
+static int lvds_codec_attach(struct drm_bridge *bridge)
+{
+ struct lvds_codec *lvds_codec = container_of(bridge,
+ struct lvds_codec, bridge);
+
+ return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+ bridge);
+}
+
+static void lvds_codec_enable(struct drm_bridge *bridge)
+{
+ struct lvds_codec *lvds_codec = container_of(bridge,
+ struct lvds_codec, bridge);
+
+ if (lvds_codec->powerdown_gpio)
+ gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
+}
+
+static void lvds_codec_disable(struct drm_bridge *bridge)
+{
+ struct lvds_codec *lvds_codec = container_of(bridge,
+ struct lvds_codec, bridge);
+
+ if (lvds_codec->powerdown_gpio)
+ gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
+}
+
+static struct drm_bridge_funcs funcs = {
+ .attach = lvds_codec_attach,
+ .enable = lvds_codec_enable,
+ .disable = lvds_codec_disable,
+};
+
+static int lvds_codec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct lvds_codec *lvds_codec;
+
+ lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
+ if (!lvds_codec)
+ return -ENOMEM;
+
+ lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
+ lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(lvds_codec->powerdown_gpio)) {
+ int err = PTR_ERR(lvds_codec->powerdown_gpio);
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "powerdown GPIO failure: %d\n", err);
+ return err;
+ }
+
+ /* Locate the panel DT node. */
+ panel_node = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (!panel_node) {
+ dev_dbg(dev, "panel DT node not found\n");
+ return -ENXIO;
+ }
+
+ panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (IS_ERR(panel)) {
+ dev_dbg(dev, "panel not found, deferring probe\n");
+ return PTR_ERR(panel);
+ }
+
+ lvds_codec->panel_bridge =
+ devm_drm_panel_bridge_add_typed(dev, panel,
+ lvds_codec->connector_type);
+ if (IS_ERR(lvds_codec->panel_bridge))
+ return PTR_ERR(lvds_codec->panel_bridge);
+
+ /*
+ * The panel_bridge bridge is attached to the panel's of_node,
+ * but we need a bridge attached to our of_node for our user
+ * to look up.
+ */
+ lvds_codec->bridge.of_node = dev->of_node;
+ lvds_codec->bridge.funcs = &funcs;
+ drm_bridge_add(&lvds_codec->bridge);
+
+ platform_set_drvdata(pdev, lvds_codec);
+
+ return 0;
+}
+
+static int lvds_codec_remove(struct platform_device *pdev)
+{
+ struct lvds_codec *lvds_codec = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&lvds_codec->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id lvds_codec_match[] = {
+ {
+ .compatible = "lvds-decoder",
+ .data = (void *)DRM_MODE_CONNECTOR_DPI,
+ },
+ {
+ .compatible = "lvds-encoder",
+ .data = (void *)DRM_MODE_CONNECTOR_LVDS,
+ },
+ {
+ .compatible = "thine,thc63lvdm83d",
+ .data = (void *)DRM_MODE_CONNECTOR_LVDS,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lvds_codec_match);
+
+static struct platform_driver lvds_codec_driver = {
+ .probe = lvds_codec_probe,
+ .remove = lvds_codec_remove,
+ .driver = {
+ .name = "lvds-codec",
+ .of_match_table = lvds_codec_match,
+ },
+};
+module_platform_driver(lvds_codec_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("LVDS encoders and decoders");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
deleted file mode 100644
index e2132a8d5106..000000000000
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/platform_device.h>
-
-#include <drm/drm_bridge.h>
-#include <drm/drm_panel.h>
-
-struct lvds_encoder {
- struct drm_bridge bridge;
- struct drm_bridge *panel_bridge;
- struct gpio_desc *powerdown_gpio;
-};
-
-static int lvds_encoder_attach(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds_encoder = container_of(bridge,
- struct lvds_encoder,
- bridge);
-
- return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
- bridge);
-}
-
-static void lvds_encoder_enable(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds_encoder = container_of(bridge,
- struct lvds_encoder,
- bridge);
-
- if (lvds_encoder->powerdown_gpio)
- gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 0);
-}
-
-static void lvds_encoder_disable(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds_encoder = container_of(bridge,
- struct lvds_encoder,
- bridge);
-
- if (lvds_encoder->powerdown_gpio)
- gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 1);
-}
-
-static struct drm_bridge_funcs funcs = {
- .attach = lvds_encoder_attach,
- .enable = lvds_encoder_enable,
- .disable = lvds_encoder_disable,
-};
-
-static int lvds_encoder_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *port;
- struct device_node *endpoint;
- struct device_node *panel_node;
- struct drm_panel *panel;
- struct lvds_encoder *lvds_encoder;
-
- lvds_encoder = devm_kzalloc(dev, sizeof(*lvds_encoder), GFP_KERNEL);
- if (!lvds_encoder)
- return -ENOMEM;
-
- lvds_encoder->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
- GPIOD_OUT_HIGH);
- if (IS_ERR(lvds_encoder->powerdown_gpio)) {
- int err = PTR_ERR(lvds_encoder->powerdown_gpio);
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "powerdown GPIO failure: %d\n", err);
- return err;
- }
-
- /* Locate the panel DT node. */
- port = of_graph_get_port_by_id(dev->of_node, 1);
- if (!port) {
- dev_dbg(dev, "port 1 not found\n");
- return -ENXIO;
- }
-
- endpoint = of_get_child_by_name(port, "endpoint");
- of_node_put(port);
- if (!endpoint) {
- dev_dbg(dev, "no endpoint for port 1\n");
- return -ENXIO;
- }
-
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_dbg(dev, "no remote endpoint for port 1\n");
- return -ENXIO;
- }
-
- panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
- if (IS_ERR(panel)) {
- dev_dbg(dev, "panel not found, deferring probe\n");
- return PTR_ERR(panel);
- }
-
- lvds_encoder->panel_bridge =
- devm_drm_panel_bridge_add_typed(dev, panel,
- DRM_MODE_CONNECTOR_LVDS);
- if (IS_ERR(lvds_encoder->panel_bridge))
- return PTR_ERR(lvds_encoder->panel_bridge);
-
- /* The panel_bridge bridge is attached to the panel's of_node,
- * but we need a bridge attached to our of_node for our user
- * to look up.
- */
- lvds_encoder->bridge.of_node = dev->of_node;
- lvds_encoder->bridge.funcs = &funcs;
- drm_bridge_add(&lvds_encoder->bridge);
-
- platform_set_drvdata(pdev, lvds_encoder);
-
- return 0;
-}
-
-static int lvds_encoder_remove(struct platform_device *pdev)
-{
- struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
-
- drm_bridge_remove(&lvds_encoder->bridge);
-
- return 0;
-}
-
-static const struct of_device_id lvds_encoder_match[] = {
- { .compatible = "lvds-encoder" },
- { .compatible = "thine,thc63lvdm83d" },
- {},
-};
-MODULE_DEVICE_TABLE(of, lvds_encoder_match);
-
-static struct platform_driver lvds_encoder_driver = {
- .probe = lvds_encoder_probe,
- .remove = lvds_encoder_remove,
- .driver = {
- .name = "lvds-encoder",
- .of_match_table = lvds_encoder_match,
- },
-};
-module_platform_driver(lvds_encoder_driver);
-
-MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
-MODULE_DESCRIPTION("Transparent parallel to LVDS encoder");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index f4e293e7cf64..f66777e24968 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -37,7 +37,7 @@ static int panel_bridge_connector_get_modes(struct drm_connector *connector)
struct panel_bridge *panel_bridge =
drm_connector_to_panel_bridge(connector);
- return drm_panel_get_modes(panel_bridge->panel);
+ return drm_panel_get_modes(panel_bridge->panel, connector);
}
static const struct drm_connector_helper_funcs
@@ -289,3 +289,21 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
return bridge;
}
EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
+
+/**
+ * drm_panel_bridge_connector - return the connector for the panel bridge
+ *
+ * drm_panel_bridge creates the connector.
+ * This function gives external access to the connector.
+ *
+ * Returns: Pointer to drm_connector
+ */
+struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge;
+
+ panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ return &panel_bridge->connector;
+}
+EXPORT_SYMBOL(drm_panel_bridge_connector);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index b7a72dfdcac3..10c47c008b40 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -461,7 +461,7 @@ static int ps8622_get_modes(struct drm_connector *connector)
ps8622 = connector_to_ps8622(connector);
- return drm_panel_get_modes(ps8622->panel);
+ return drm_panel_get_modes(ps8622->panel, connector);
}
static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 2b7539701b42..dd56996fe9c7 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -291,7 +291,7 @@ static irqreturn_t snd_dw_hdmi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static struct snd_pcm_hardware dw_hdmi_hw = {
+static const struct snd_pcm_hardware dw_hdmi_hw = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b6e793bb653c..b18351b6760a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -719,7 +719,15 @@ static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
{
+ const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
+ struct dw_mipi_dsi_dphy_timing timing;
u32 hw_version;
+ int ret;
+
+ ret = phy_ops->get_timing(dsi->plat_data->priv_data,
+ dsi->lane_mbps, &timing);
+ if (ret)
+ DRM_DEV_ERROR(dsi->dev, "Retrieving phy timings failed\n");
/*
* TODO dw drv improvements
@@ -732,16 +740,20 @@ static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
hw_version = dsi_read(dsi, DSI_VERSION) & VERSION;
if (hw_version >= HWVER_131) {
- dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME_V131(0x40) |
- PHY_LP2HS_TIME_V131(0x40));
+ dsi_write(dsi, DSI_PHY_TMR_CFG,
+ PHY_HS2LP_TIME_V131(timing.data_hs2lp) |
+ PHY_LP2HS_TIME_V131(timing.data_lp2hs));
dsi_write(dsi, DSI_PHY_TMR_RD_CFG, MAX_RD_TIME_V131(10000));
} else {
- dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40) |
- PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
+ dsi_write(dsi, DSI_PHY_TMR_CFG,
+ PHY_HS2LP_TIME(timing.data_hs2lp) |
+ PHY_LP2HS_TIME(timing.data_lp2hs) |
+ MAX_RD_TIME(10000));
}
- dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
- | PHY_CLKLP2HS_TIME(0x40));
+ dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG,
+ PHY_CLKHS2LP_TIME(timing.clk_hs2lp) |
+ PHY_CLKLP2HS_TIME(timing.clk_lp2hs));
}
static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
@@ -798,9 +810,6 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
- if (phy_ops->power_off)
- phy_ops->power_off(dsi->plat_data->priv_data);
-
/*
* Switch to command mode before panel-bridge post_disable &
* panel unprepare.
@@ -817,6 +826,9 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
*/
dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+ if (phy_ops->power_off)
+ phy_ops->power_off(dsi->plat_data->priv_data);
+
if (dsi->slave) {
dw_mipi_dsi_disable(dsi->slave);
clk_disable_unprepare(dsi->slave->pclk);
@@ -883,6 +895,9 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
/* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
dw_mipi_dsi_set_mode(dsi, 0);
+
+ if (phy_ops->power_on)
+ phy_ops->power_on(dsi->plat_data->priv_data);
}
static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
@@ -899,15 +914,11 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
- const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
/* Switch to video mode for panel-bridge enable & panel enable */
dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
if (dsi->slave)
dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
-
- if (phy_ops->power_on)
- phy_ops->power_on(dsi->plat_data->priv_data);
}
static enum drm_mode_status
@@ -991,7 +1002,8 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
dsi->dev = dev;
dsi->plat_data = plat_data;
- if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps) {
+ if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps ||
+ !plat_data->phy_ops->get_timing) {
DRM_ERROR("Phy not properly configured\n");
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index db298f550a5a..96207fcfde19 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -282,7 +282,7 @@ static int tc358764_get_modes(struct drm_connector *connector)
{
struct tc358764 *ctx = connector_to_tc358764(connector);
- return drm_panel_get_modes(ctx->panel);
+ return drm_panel_get_modes(ctx->panel, connector);
}
static const
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8029478ffebb..3709e5ace724 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1346,7 +1346,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
return 0;
}
- count = drm_panel_get_modes(tc->panel);
+ count = drm_panel_get_modes(tc->panel, connector);
if (count > 0)
return count;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 43abf01ebd4c..9a2dd986afa5 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -206,7 +206,7 @@ static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
{
struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector);
- return drm_panel_get_modes(pdata->panel);
+ return drm_panel_get_modes(pdata->panel, connector);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 6e09f27fd9d6..4c7ad46fdd21 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -212,7 +212,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
if (!entry)
return -ENOMEM;
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+ pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
type = (u32) request->type;
memory = agp_allocate_memory(dev->agp->bridge, pages, type);
if (!memory) {
@@ -325,7 +325,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
entry = drm_agp_lookup_entry(dev, request->handle);
if (!entry || entry->bound)
return -EINVAL;
- page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
retcode = drm_bind_agp(entry->memory, page);
if (retcode)
return retcode;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 14aeaf736321..d33691512a8e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -251,7 +251,7 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
* @ref: This atomic state to deallocate
*
* This frees all memory associated with an atomic state, including all the
- * per-object state for planes, crtcs and connectors.
+ * per-object state for planes, CRTCs and connectors.
*/
void __drm_atomic_state_free(struct kref *ref)
{
@@ -272,12 +272,12 @@ void __drm_atomic_state_free(struct kref *ref)
EXPORT_SYMBOL(__drm_atomic_state_free);
/**
- * drm_atomic_get_crtc_state - get crtc state
+ * drm_atomic_get_crtc_state - get CRTC state
* @state: global atomic state object
- * @crtc: crtc to get state object for
+ * @crtc: CRTC to get state object for
*
- * This function returns the crtc state for the given crtc, allocating it if
- * needed. It will also grab the relevant crtc lock to make sure that the state
+ * This function returns the CRTC state for the given CRTC, allocating it if
+ * needed. It will also grab the relevant CRTC lock to make sure that the state
* is consistent.
*
* Returns:
@@ -688,10 +688,12 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
* associated state struct &drm_private_state.
*
* Similar to userspace-exposed objects, private state structures can be
- * acquired by calling drm_atomic_get_private_obj_state(). Since this function
- * does not take care of locking, drivers should wrap it for each type of
- * private state object they have with the required call to drm_modeset_lock()
- * for the corresponding &drm_modeset_lock.
+ * acquired by calling drm_atomic_get_private_obj_state(). This also takes care
+ * of locking, hence drivers should not have a need to call drm_modeset_lock()
+ * directly. Sequence of the actual hardware state commit is not handled,
+ * drivers might need to keep track of struct drm_crtc_commit within subclassed
+ * structure of &drm_private_state as necessary, e.g. similar to
+ * &drm_plane_state.commit. See also &drm_atomic_state.fake_commit.
*
* All private state structures contained in a &drm_atomic_state update can be
* iterated using for_each_oldnew_private_obj_in_state(),
@@ -1016,14 +1018,14 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
}
/**
- * drm_atomic_add_affected_connectors - add connectors for crtc
+ * drm_atomic_add_affected_connectors - add connectors for CRTC
* @state: atomic state
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
*
* This function walks the current configuration and adds all connectors
* currently using @crtc to the atomic configuration @state. Note that this
* function must acquire the connection mutex. This can potentially cause
- * unneeded seralization if the update is just for the planes on one crtc. Hence
+ * unneeded seralization if the update is just for the planes on one CRTC. Hence
* drivers and helpers should only call this when really needed (e.g. when a
* full modeset needs to happen due to some change).
*
@@ -1076,9 +1078,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
/**
- * drm_atomic_add_affected_planes - add planes for crtc
+ * drm_atomic_add_affected_planes - add planes for CRTC
* @state: atomic state
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
*
* This function walks the current configuration and adds all planes
* currently used by @crtc to the atomic configuration @state. This is useful
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index b191d39c071d..4511c2e07bb9 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -150,8 +150,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
* is not set, an error is returned. Userspace can provide a solution
* through the atomic ioctl.
*
- * If the flag is set conflicting connectors are removed from the crtc
- * and the crtc is disabled if no encoder is left. This preserves
+ * If the flag is set conflicting connectors are removed from the CRTC
+ * and the CRTC is disabled if no encoder is left. This preserves
* compatibility with the legacy set_config behavior.
*/
drm_connector_list_iter_begin(state->dev, &conn_iter);
@@ -220,7 +220,7 @@ set_best_encoder(struct drm_atomic_state *state,
crtc = conn_state->connector->state->crtc;
/* A NULL crtc is an error here because we should have
- * duplicated a NULL best_encoder when crtc was NULL.
+ * duplicated a NULL best_encoder when crtc was NULL.
* As an exception restoring duplicated atomic state
* during resume is allowed, so don't warn when
* best_encoder is equal to encoder we intend to set.
@@ -419,6 +419,7 @@ mode_fixup(struct drm_atomic_state *state)
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
@@ -435,8 +436,10 @@ mode_fixup(struct drm_atomic_state *state)
encoder = new_conn_state->best_encoder;
funcs = encoder->helper_private;
- ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
- &new_crtc_state->adjusted_mode);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ ret = drm_bridge_chain_mode_fixup(bridge,
+ &new_crtc_state->mode,
+ &new_crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
return -EINVAL;
@@ -492,6 +495,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
+ struct drm_bridge *bridge;
enum drm_mode_status ret;
ret = drm_encoder_mode_valid(encoder, mode);
@@ -501,7 +505,8 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
return ret;
}
- ret = drm_bridge_mode_valid(encoder->bridge, mode);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ ret = drm_bridge_chain_mode_valid(bridge, mode);
if (ret != MODE_OK) {
DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
return ret;
@@ -556,27 +561,27 @@ mode_valid(struct drm_atomic_state *state)
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
- * This does all the crtc and connector related computations for an atomic
+ * This does all the CRTC and connector related computations for an atomic
* update and adds any additional connectors needed for full modesets. It calls
* the various per-object callbacks in the follow order:
*
* 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
* 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
- * 3. If it's determined a modeset is needed then all connectors on the affected crtc
- * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
+ * 3. If it's determined a modeset is needed then all connectors on the affected
+ * CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
* 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
* &drm_crtc_helper_funcs.mode_valid are called on the affected components.
* 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
* 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
- * This function is only called when the encoder will be part of a configured crtc,
+ * This function is only called when the encoder will be part of a configured CRTC,
* it must not be used for implementing connector property validation.
* If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
* instead.
- * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
+ * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
*
* &drm_crtc_state.mode_changed is set when the input mode is changed.
* &drm_crtc_state.connectors_changed is set when a connector is added or
- * removed from the crtc. &drm_crtc_state.active_changed is set when
+ * removed from the CRTC. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
* See also: drm_atomic_crtc_needs_modeset()
*
@@ -687,7 +692,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
/*
* After all the routing has been prepared we need to add in any
- * connector which is itself unchanged, but whose crtc changes its
+ * connector which is itself unchanged, but whose CRTC changes its
* configuration. This must be done before calling mode_fixup in case a
* crtc only changed its mode but has the same set of connectors.
*/
@@ -736,13 +741,13 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
* drm_atomic_helper_check_plane_state() - Check plane state for validity
* @plane_state: plane state to check
- * @crtc_state: crtc state to check
+ * @crtc_state: CRTC state to check
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
* @can_position: is it legal to position the plane such that it
- * doesn't cover the entire crtc? This will generally
+ * doesn't cover the entire CRTC? This will generally
* only be false for primary planes.
- * @can_update_disabled: can the plane be updated while the crtc
+ * @can_update_disabled: can the plane be updated while the CRTC
* is disabled?
*
* Checks that a desired plane update is valid, and updates various
@@ -839,7 +844,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
* &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
* hooks provided by the driver.
*
- * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
+ * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
* updated planes.
*
* RETURNS:
@@ -903,7 +908,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
- * Only crtcs and planes have check callbacks, so for any additional (global)
+ * Only CRTCs and planes have check callbacks, so for any additional (global)
* checking that a driver needs it can simply wrap that around this function.
* Drivers without such needs can directly use this as their
* &drm_mode_config_funcs.atomic_check callback.
@@ -956,14 +961,14 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
struct drm_crtc_state *new_state)
{
/*
- * No new_state means the crtc is off, so the only criteria is whether
+ * No new_state means the CRTC is off, so the only criteria is whether
* it's currently active or in self refresh mode.
*/
if (!new_state)
return drm_atomic_crtc_effectively_active(old_state);
/*
- * We need to run through the crtc_funcs->disable() function if the crtc
+ * We need to run through the crtc_funcs->disable() function if the CRTC
* is currently on, if it's transitioning to self refresh mode, or if
* it's in self refresh mode and needs to be fully disabled.
*/
@@ -984,6 +989,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
/* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state. */
@@ -1020,7 +1026,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
*/
- drm_atomic_bridge_disable(encoder->bridge, old_state);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_disable(bridge, old_state);
/* Right function depends upon target state. */
if (funcs) {
@@ -1034,7 +1041,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
- drm_atomic_bridge_post_disable(encoder->bridge, old_state);
+ drm_atomic_bridge_chain_post_disable(bridge, old_state);
}
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1080,7 +1087,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* @old_state: atomic state object with old state structures
*
* This function updates all the various legacy modeset state pointers in
- * connectors, encoders and crtcs. It also updates the timestamping constants
+ * connectors, encoders and CRTCs. It also updates the timestamping constants
* used for precise vblank timestamps by calling
* drm_calc_timestamping_constants().
*
@@ -1188,6 +1195,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
+ struct drm_bridge *bridge;
if (!new_conn_state->best_encoder)
continue;
@@ -1215,7 +1223,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->mode_set(encoder, mode, adjusted_mode);
}
- drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
}
}
@@ -1227,7 +1236,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
*
- * For compatibility with legacy crtc helpers this should be called before
+ * For compatibility with legacy CRTC helpers this should be called before
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
@@ -1273,7 +1282,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
* This function enables all the outputs with the new configuration which had to
* be turned off for the update.
*
- * For compatibility with legacy crtc helpers this should be called after
+ * For compatibility with legacy CRTC helpers this should be called after
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
@@ -1314,6 +1323,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
if (!new_conn_state->best_encoder)
continue;
@@ -1332,7 +1342,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
*/
- drm_atomic_bridge_pre_enable(encoder->bridge, old_state);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_pre_enable(bridge, old_state);
if (funcs) {
if (funcs->atomic_enable)
@@ -1343,7 +1354,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs->commit(encoder);
}
- drm_atomic_bridge_enable(encoder->bridge, old_state);
+ drm_atomic_bridge_chain_enable(bridge, old_state);
}
drm_atomic_helper_commit_writebacks(dev, old_state);
@@ -1403,12 +1414,12 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
/**
- * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
+ * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
- * Helper to, after atomic commit, wait for vblanks on all effected
- * crtcs (ie. before cleaning up old framebuffers using
+ * Helper to, after atomic commit, wait for vblanks on all affected
+ * CRTCs (ie. before cleaning up old framebuffers using
* drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
* framebuffers have actually changed to optimize for the legacy cursor and
* plane update use-case.
@@ -1467,10 +1478,10 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
- * Helper to, after atomic commit, wait for page flips on all effected
+ * Helper to, after atomic commit, wait for page flips on all affected
* crtcs (ie. before cleaning up old framebuffers using
* drm_atomic_helper_cleanup_planes()). Compared to
- * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
+ * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
* CRTCs, assuming that cursors-only updates are signalling their completion
* immediately (or using a different path).
*
@@ -1834,17 +1845,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
/**
* DOC: implementing nonblocking commit
*
- * Nonblocking atomic commits have to be implemented in the following sequence:
+ * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
+ * different operations against each another. Locks, especially struct
+ * &drm_modeset_lock, should not be held in worker threads or any other
+ * asynchronous context used to commit the hardware state.
+ *
+ * drm_atomic_helper_commit() implements the recommended sequence for
+ * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
*
- * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
- * which commit needs to call which can fail, so we want to run it first and
+ * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
+ * need to propagate out of memory/VRAM errors to userspace, it must be called
* synchronously.
*
* 2. Synchronize with any outstanding nonblocking commit worker threads which
- * might be affected the new state update. This can be done by either cancelling
- * or flushing the work items, depending upon whether the driver can deal with
- * cancelled updates. Note that it is important to ensure that the framebuffer
- * cleanup is still done when cancelling.
+ * might be affected by the new state update. This is handled by
+ * drm_atomic_helper_setup_commit().
*
* Asynchronous workers need to have sufficient parallelism to be able to run
* different atomic commits on different CRTCs in parallel. The simplest way to
@@ -1855,21 +1870,29 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
* must be done as one global operation, and enabling or disabling a CRTC can
* take a long time. But even that is not required.
*
+ * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
+ * against all CRTCs therein. Therefore for atomic state updates which only flip
+ * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
+ * in its atomic check code: This would prevent committing of atomic updates to
+ * multiple CRTCs in parallel. In general, adding additional state structures
+ * should be avoided as much as possible, because this reduces parallelism in
+ * (nonblocking) commits, both due to locking and due to commit sequencing
+ * requirements.
+ *
* 3. The software state is updated synchronously with
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
- * locks means concurrent callers never see inconsistent state. And doing this
- * while it's guaranteed that no relevant nonblocking worker runs means that
- * nonblocking workers do not need grab any locks. Actually they must not grab
- * locks, for otherwise the work flushing will deadlock.
+ * locks means concurrent callers never see inconsistent state. Note that commit
+ * workers do not hold any locks; their access is only coordinated through
+ * ordering. If workers would access state only through the pointers in the
+ * free-standing state objects (currently not the case for any driver) then even
+ * multiple pending commits could be in-flight at the same time.
*
* 4. Schedule a work item to do all subsequent steps, using the split-out
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
* then cleaning up the framebuffers after the old framebuffer is no longer
- * being displayed.
- *
- * The above scheme is implemented in the atomic helper libraries in
- * drm_atomic_helper_commit() using a bunch of helper functions. See
- * drm_atomic_helper_setup_commit() for a starting point.
+ * being displayed. The scheduled work should synchronize against other workers
+ * using the &drm_crtc_commit infrastructure as needed. See
+ * drm_atomic_helper_setup_commit() for more details.
*/
static int stall_checks(struct drm_crtc *crtc, bool nonblock)
@@ -2098,7 +2121,7 @@ EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
*
* This function waits for all preceeding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
- * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
+ * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
@@ -2185,7 +2208,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* drm_atomic_helper_fake_vblank - fake VBLANK events if needed
* @old_state: atomic state object with old state structures
*
- * This function walks all CRTCs and fake VBLANK events on those with
+ * This function walks all CRTCs and fakes VBLANK events on those with
* &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
* The primary use of this function is writeback connectors working in oneshot
* mode and faking VBLANK events. In this case they only fake the VBLANK event
@@ -2381,7 +2404,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
* @flags: flags for committing plane state
*
* This function commits the new plane state using the plane and atomic helper
- * functions for planes and crtcs. It assumes that the atomic state has already
+ * functions for planes and CRTCs. It assumes that the atomic state has already
* been pushed into the relevant object state pointers, since this step can no
* longer fail.
*
@@ -2502,15 +2525,15 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
/**
- * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
- * @old_crtc_state: atomic state object with the old crtc state
+ * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
+ * @old_crtc_state: atomic state object with the old CRTC state
*
* This function commits the new plane state using the plane and atomic helper
- * functions for planes on the specific crtc. It assumes that the atomic state
+ * functions for planes on the specific CRTC. It assumes that the atomic state
* has already been pushed into the relevant object state pointers, since this
* step can no longer fail.
*
- * This function is useful when plane updates should be done crtc-by-crtc
+ * This function is useful when plane updates should be done CRTC-by-CRTC
* instead of one global step like drm_atomic_helper_commit_planes() does.
*
* This function can only be savely used when planes are not allowed to move
@@ -2800,10 +2823,10 @@ EXPORT_SYMBOL(drm_atomic_helper_swap_state);
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
+ * @crtc_x: x offset of primary plane on @crtc
+ * @crtc_y: y offset of primary plane on @crtc
+ * @crtc_w: width of primary plane rectangle on @crtc
+ * @crtc_h: height of primary plane rectangle on @crtc
* @src_x: x offset of @fb for panning
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
@@ -2909,7 +2932,7 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
* @set: mode set configuration
* @ctx: lock acquisition context
*
- * Provides a default crtc set_config handler using the atomic driver interface.
+ * Provides a default CRTC set_config handler using the atomic driver interface.
*
* NOTE: For backwards compatibility with old userspace this automatically
* resets the "link-status" property to GOOD, to force any link
@@ -3322,7 +3345,7 @@ static int page_flip_common(struct drm_atomic_state *state,
/**
* drm_atomic_helper_page_flip - execute a legacy page flip
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
* @fb: DRM framebuffer
* @event: optional DRM event to signal upon completion
* @flags: flip flags for non-vblank sync'ed updates
@@ -3366,7 +3389,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
/**
* drm_atomic_helper_page_flip_target - do page flip on target vblank period.
- * @crtc: DRM crtc
+ * @crtc: DRM CRTC
* @fb: DRM framebuffer
* @event: optional DRM event to signal upon completion
* @flags: flip flags for non-vblank sync'ed updates
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index d0a937fb0c56..7cf3cf936547 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -58,6 +58,22 @@
*/
/**
+ * __drm_atomic_helper_crtc_state_reset - reset the CRTC state
+ * @crtc_state: atomic CRTC state, must not be NULL
+ * @crtc: CRTC object, must not be NULL
+ *
+ * Initializes the newly allocated @crtc_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
+ */
+void
+__drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc)
+{
+ crtc_state->crtc = crtc;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_state_reset);
+
+/**
* __drm_atomic_helper_crtc_reset - reset state on CRTC
* @crtc: drm CRTC
* @crtc_state: CRTC state to assign
@@ -74,7 +90,7 @@ __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
if (crtc_state)
- crtc_state->crtc = crtc;
+ __drm_atomic_helper_crtc_state_reset(crtc_state, crtc);
crtc->state = crtc_state;
}
@@ -212,23 +228,43 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
/**
- * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * __drm_atomic_helper_plane_state_reset - resets plane state to default values
+ * @plane_state: atomic plane state, must not be NULL
* @plane: plane object, must not be NULL
- * @state: atomic plane state, must not be NULL
*
- * Initializes plane state to default. This is useful for drivers that subclass
- * the plane state.
+ * Initializes the newly allocated @plane_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
*/
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
- struct drm_plane_state *state)
+void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
+ struct drm_plane *plane)
{
- state->plane = plane;
- state->rotation = DRM_MODE_ROTATE_0;
+ plane_state->plane = plane;
+ plane_state->rotation = DRM_MODE_ROTATE_0;
- state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+ plane_state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+ plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
- plane->state = state;
+/**
+ * __drm_atomic_helper_plane_reset - reset state on plane
+ * @plane: drm plane
+ * @plane_state: plane state to assign
+ *
+ * Initializes the newly allocated @plane_state and assigns it to
+ * the &drm_crtc->state pointer of @plane, usually required when
+ * initializing the drivers or when called from the &drm_plane_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ if (plane_state)
+ __drm_atomic_helper_plane_state_reset(plane_state, plane);
+
+ plane->state = plane_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
@@ -336,6 +372,22 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
/**
+ * __drm_atomic_helper_connector_state_reset - reset the connector state
+ * @conn_state: atomic connector state, must not be NULL
+ * @connector: connectotr object, must not be NULL
+ *
+ * Initializes the newly allocated @conn_state with default
+ * values. This is useful for drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
+ struct drm_connector *connector)
+{
+ conn_state->connector = connector;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_state_reset);
+
+/**
* __drm_atomic_helper_connector_reset - reset state on connector
* @connector: drm connector
* @conn_state: connector state to assign
@@ -352,7 +404,7 @@ __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state)
{
if (conn_state)
- conn_state->connector = connector;
+ __drm_atomic_helper_connector_state_reset(conn_state, connector);
connector->state = conn_state;
}
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 0d466d3b0809..a1e5e262bae2 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -160,12 +160,12 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
/**
- * drm_atomic_set_crtc_for_plane - set crtc for plane
+ * drm_atomic_set_crtc_for_plane - set CRTC for plane
* @plane_state: the plane whose incoming state to update
- * @crtc: crtc to use for the plane
+ * @crtc: CRTC to use for the plane
*
- * Changing the assigned crtc for a plane requires us to grab the lock and state
- * for the new crtc, as needed. This function takes care of all these details
+ * Changing the assigned CRTC for a plane requires us to grab the lock and state
+ * for the new CRTC, as needed. This function takes care of all these details
* besides updating the pointer in the state object itself.
*
* Returns:
@@ -279,12 +279,12 @@ drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
/**
- * drm_atomic_set_crtc_for_connector - set crtc for connector
+ * drm_atomic_set_crtc_for_connector - set CRTC for connector
* @conn_state: atomic state object for the connector
- * @crtc: crtc to use for the connector
+ * @crtc: CRTC to use for the connector
*
- * Changing the assigned crtc for a connector requires us to grab the lock and
- * state for the new crtc, as needed. This function takes care of all these
+ * Changing the assigned CRTC for a connector requires us to grab the lock and
+ * state for the new CRTC, as needed. This function takes care of all these
* details besides updating the pointer in the state object itself.
*
* Returns:
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index cba537c99e43..c2cf0c90fa26 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -55,7 +55,7 @@
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
*
- * Bridges can also be chained up using the &drm_bridge.next pointer.
+ * Bridges can also be chained up using the &drm_bridge.chain_node field.
*
* Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
@@ -128,20 +128,21 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
bridge->dev = encoder->dev;
bridge->encoder = encoder;
+ if (previous)
+ list_add(&bridge->chain_node, &previous->chain_node);
+ else
+ list_add(&bridge->chain_node, &encoder->bridge_chain);
+
if (bridge->funcs->attach) {
ret = bridge->funcs->attach(bridge);
if (ret < 0) {
+ list_del(&bridge->chain_node);
bridge->dev = NULL;
bridge->encoder = NULL;
return ret;
}
}
- if (previous)
- previous->next = bridge;
- else
- encoder->bridge = bridge;
-
return 0;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -157,6 +158,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
+ list_del(&bridge->chain_node);
bridge->dev = NULL;
}
@@ -172,8 +174,8 @@ void drm_bridge_detach(struct drm_bridge *bridge)
*/
/**
- * drm_bridge_mode_fixup - fixup proposed mode for all bridges in the
- * encoder chain
+ * drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
+ * encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
@@ -186,27 +188,31 @@ void drm_bridge_detach(struct drm_bridge *bridge)
* RETURNS:
* true on success, false on failure
*/
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- bool ret = true;
+ struct drm_encoder *encoder;
if (!bridge)
return true;
- if (bridge->funcs->mode_fixup)
- ret = bridge->funcs->mode_fixup(bridge, mode, adjusted_mode);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (!bridge->funcs->mode_fixup)
+ continue;
- ret = ret && drm_bridge_mode_fixup(bridge->next, mode, adjusted_mode);
+ if (!bridge->funcs->mode_fixup(bridge, mode, adjusted_mode))
+ return false;
+ }
- return ret;
+ return true;
}
-EXPORT_SYMBOL(drm_bridge_mode_fixup);
+EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
/**
- * drm_bridge_mode_valid - validate the mode against all bridges in the
- * encoder chain.
+ * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
+ * encoder chain.
* @bridge: bridge control structure
* @mode: desired mode to be validated
*
@@ -219,26 +225,33 @@ EXPORT_SYMBOL(drm_bridge_mode_fixup);
* RETURNS:
* MODE_OK on success, drm_mode_status Enum error code on failure
*/
-enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_mode *mode)
+enum drm_mode_status
+drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
- enum drm_mode_status ret = MODE_OK;
+ struct drm_encoder *encoder;
if (!bridge)
- return ret;
+ return MODE_OK;
- if (bridge->funcs->mode_valid)
- ret = bridge->funcs->mode_valid(bridge, mode);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ enum drm_mode_status ret;
- if (ret != MODE_OK)
- return ret;
+ if (!bridge->funcs->mode_valid)
+ continue;
- return drm_bridge_mode_valid(bridge->next, mode);
+ ret = bridge->funcs->mode_valid(bridge, mode);
+ if (ret != MODE_OK)
+ return ret;
+ }
+
+ return MODE_OK;
}
-EXPORT_SYMBOL(drm_bridge_mode_valid);
+EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
/**
- * drm_bridge_disable - disables all bridges in the encoder chain
+ * drm_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
@@ -247,20 +260,28 @@ EXPORT_SYMBOL(drm_bridge_mode_valid);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_disable(struct drm_bridge *bridge)
+void drm_bridge_chain_disable(struct drm_bridge *bridge)
{
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+
if (!bridge)
return;
- drm_bridge_disable(bridge->next);
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ if (iter->funcs->disable)
+ iter->funcs->disable(iter);
- if (bridge->funcs->disable)
- bridge->funcs->disable(bridge);
+ if (iter == bridge)
+ break;
+ }
}
-EXPORT_SYMBOL(drm_bridge_disable);
+EXPORT_SYMBOL(drm_bridge_chain_disable);
/**
- * drm_bridge_post_disable - cleans up after disabling all bridges in the encoder chain
+ * drm_bridge_chain_post_disable - cleans up after disabling all bridges in the
+ * encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.post_disable op for all the bridges in the
@@ -269,47 +290,53 @@ EXPORT_SYMBOL(drm_bridge_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_post_disable(struct drm_bridge *bridge)
+void drm_bridge_chain_post_disable(struct drm_bridge *bridge)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->post_disable)
- bridge->funcs->post_disable(bridge);
-
- drm_bridge_post_disable(bridge->next);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->post_disable)
+ bridge->funcs->post_disable(bridge);
+ }
}
-EXPORT_SYMBOL(drm_bridge_post_disable);
+EXPORT_SYMBOL(drm_bridge_chain_post_disable);
/**
- * drm_bridge_mode_set - set proposed mode for all bridges in the
- * encoder chain
+ * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
+ * encoder chain
* @bridge: bridge control structure
- * @mode: desired mode to be set for the bridge
- * @adjusted_mode: updated mode that works for this bridge
+ * @mode: desired mode to be set for the encoder chain
+ * @adjusted_mode: updated mode that works for this encoder chain
*
* Calls &drm_bridge_funcs.mode_set op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
+void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->mode_set)
- bridge->funcs->mode_set(bridge, mode, adjusted_mode);
-
- drm_bridge_mode_set(bridge->next, mode, adjusted_mode);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->mode_set)
+ bridge->funcs->mode_set(bridge, mode, adjusted_mode);
+ }
}
-EXPORT_SYMBOL(drm_bridge_mode_set);
+EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
- * drm_bridge_pre_enable - prepares for enabling all
- * bridges in the encoder chain
+ * drm_bridge_chain_pre_enable - prepares for enabling all bridges in the
+ * encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
@@ -318,20 +345,24 @@ EXPORT_SYMBOL(drm_bridge_mode_set);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_pre_enable(struct drm_bridge *bridge)
+void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
{
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+
if (!bridge)
return;
- drm_bridge_pre_enable(bridge->next);
-
- if (bridge->funcs->pre_enable)
- bridge->funcs->pre_enable(bridge);
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ if (iter->funcs->pre_enable)
+ iter->funcs->pre_enable(iter);
+ }
}
-EXPORT_SYMBOL(drm_bridge_pre_enable);
+EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
/**
- * drm_bridge_enable - enables all bridges in the encoder chain
+ * drm_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
@@ -340,22 +371,25 @@ EXPORT_SYMBOL(drm_bridge_pre_enable);
*
* Note that the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_enable(struct drm_bridge *bridge)
+void drm_bridge_chain_enable(struct drm_bridge *bridge)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->enable)
- bridge->funcs->enable(bridge);
-
- drm_bridge_enable(bridge->next);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->enable)
+ bridge->funcs->enable(bridge);
+ }
}
-EXPORT_SYMBOL(drm_bridge_enable);
+EXPORT_SYMBOL(drm_bridge_chain_enable);
/**
- * drm_atomic_bridge_disable - disables all bridges in the encoder chain
+ * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_disable (falls back on
* &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
@@ -364,26 +398,33 @@ EXPORT_SYMBOL(drm_bridge_enable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+
if (!bridge)
return;
- drm_atomic_bridge_disable(bridge->next, state);
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ if (iter->funcs->atomic_disable)
+ iter->funcs->atomic_disable(iter, old_state);
+ else if (iter->funcs->disable)
+ iter->funcs->disable(iter);
- if (bridge->funcs->atomic_disable)
- bridge->funcs->atomic_disable(bridge, state);
- else if (bridge->funcs->disable)
- bridge->funcs->disable(bridge);
+ if (iter == bridge)
+ break;
+ }
}
-EXPORT_SYMBOL(drm_atomic_bridge_disable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
/**
- * drm_atomic_bridge_post_disable - cleans up after disabling all bridges in the
- * encoder chain
+ * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
+ * in the encoder chain
* @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_post_disable (falls back on
* &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
@@ -392,26 +433,29 @@ EXPORT_SYMBOL(drm_atomic_bridge_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->atomic_post_disable)
- bridge->funcs->atomic_post_disable(bridge, state);
- else if (bridge->funcs->post_disable)
- bridge->funcs->post_disable(bridge);
-
- drm_atomic_bridge_post_disable(bridge->next, state);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->atomic_post_disable)
+ bridge->funcs->atomic_post_disable(bridge, old_state);
+ else if (bridge->funcs->post_disable)
+ bridge->funcs->post_disable(bridge);
+ }
}
-EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
/**
- * drm_atomic_bridge_pre_enable - prepares for enabling all bridges in the
- * encoder chain
+ * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
+ * the encoder chain
* @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
* &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
@@ -420,25 +464,32 @@ EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+
if (!bridge)
return;
- drm_atomic_bridge_pre_enable(bridge->next, state);
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ if (iter->funcs->atomic_pre_enable)
+ iter->funcs->atomic_pre_enable(iter, old_state);
+ else if (iter->funcs->pre_enable)
+ iter->funcs->pre_enable(iter);
- if (bridge->funcs->atomic_pre_enable)
- bridge->funcs->atomic_pre_enable(bridge, state);
- else if (bridge->funcs->pre_enable)
- bridge->funcs->pre_enable(bridge);
+ if (iter == bridge)
+ break;
+ }
}
-EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
/**
- * drm_atomic_bridge_enable - enables all bridges in the encoder chain
+ * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
- * @state: atomic state being committed
+ * @old_state: old atomic state
*
* Calls &drm_bridge_funcs.atomic_enable (falls back on
* &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
@@ -447,20 +498,23 @@ EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
+ struct drm_encoder *encoder;
+
if (!bridge)
return;
- if (bridge->funcs->atomic_enable)
- bridge->funcs->atomic_enable(bridge, state);
- else if (bridge->funcs->enable)
- bridge->funcs->enable(bridge);
-
- drm_atomic_bridge_enable(bridge->next, state);
+ encoder = bridge->encoder;
+ list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->atomic_enable)
+ bridge->funcs->atomic_enable(bridge, old_state);
+ else if (bridge->funcs->enable)
+ bridge->funcs->enable(bridge);
+ }
}
-EXPORT_SYMBOL(drm_atomic_bridge_enable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
#ifdef CONFIG_OF
/**
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index d9a2e3695525..b031b45aa8ef 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -150,7 +150,7 @@ void drm_client_release(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
- DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name);
+ drm_dbg_kms(dev, "%s\n", client->name);
drm_client_modeset_free(client);
drm_client_close(client);
@@ -203,7 +203,7 @@ void drm_client_dev_hotplug(struct drm_device *dev)
continue;
ret = client->funcs->hotplug(client);
- DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
}
mutex_unlock(&dev->clientlist_mutex);
}
@@ -223,7 +223,7 @@ void drm_client_dev_restore(struct drm_device *dev)
continue;
ret = client->funcs->restore(client);
- DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (!ret) /* The first one to return zero gets the privilege to restore */
break;
}
@@ -351,8 +351,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
if (ret)
- DRM_DEV_ERROR(buffer->client->dev->dev,
- "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+ drm_err(buffer->client->dev,
+ "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
buffer->fb = NULL;
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 895b73f23079..6d4a29e99ae2 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -115,6 +115,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
}
static struct drm_display_mode *
+drm_connector_get_tiled_mode(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->hdisplay == connector->tile_h_size &&
+ mode->vdisplay == connector->tile_v_size)
+ return mode;
+ }
+ return NULL;
+}
+
+static struct drm_display_mode *
+drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->hdisplay == connector->tile_h_size &&
+ mode->vdisplay == connector->tile_v_size)
+ continue;
+ return mode;
+ }
+ return NULL;
+}
+
+static struct drm_display_mode *
drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
@@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
+ int num_tiled_conns = 0;
int i;
+ for (i = 0; i < connector_count; i++) {
+ if (connectors[i]->has_tile &&
+ connectors[i]->status == connector_status_connected)
+ num_tiled_conns++;
+ }
+
retry:
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
@@ -399,6 +433,28 @@ retry:
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
+ /*
+ * In case of tiled mode if all tiles not present fallback to
+ * first available non tiled mode.
+ * After all tiles are present, try to find the tiled mode
+ * for all and if tiled mode not present due to fbcon size
+ * limitations, use first non tiled mode only for
+ * tile 0,0 and set to no mode for all other tiles.
+ */
+ if (connector->has_tile) {
+ if (num_tiled_conns <
+ connector->num_h_tile * connector->num_v_tile ||
+ (connector->tile_h_loc == 0 &&
+ connector->tile_v_loc == 0 &&
+ !drm_connector_get_tiled_mode(connector))) {
+ DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
+ connector->base.id);
+ modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ } else {
+ modes[i] = drm_connector_get_tiled_mode(connector);
+ }
+ }
+
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
conn_configured |= BIT_ULL(i);
@@ -515,6 +571,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
+ int num_tiled_conns = 0;
struct drm_modeset_acquire_ctx ctx;
if (!drm_drv_uses_atomic_modeset(dev))
@@ -532,6 +589,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
memcpy(save_enabled, enabled, count);
mask = GENMASK(count - 1, 0);
conn_configured = 0;
+ for (i = 0; i < count; i++) {
+ if (connectors[i]->has_tile &&
+ connectors[i]->status == connector_status_connected)
+ num_tiled_conns++;
+ }
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
@@ -631,6 +693,16 @@ retry:
connector->name);
modes[i] = &connector->state->crtc->mode;
}
+ /*
+ * In case of tiled modes, if all tiles are not present
+ * then fallback to a non tiled mode.
+ */
+ if (connector->has_tile &&
+ num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
+ DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
+ connector->base.id);
+ modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ }
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 4ce5c6d8de99..c93123ff7c21 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -109,28 +109,38 @@
*/
/**
- * drm_color_lut_extract - clamp and round LUT entries
+ * drm_color_ctm_s31_32_to_qm_n
+ *
* @user_input: input value
- * @bit_precision: number of bits the hw LUT supports
+ * @m: number of integer bits, only support m <= 32, include the sign-bit
+ * @n: number of fractional bits, only support n <= 32
+ *
+ * Convert and clamp S31.32 sign-magnitude to Qm.n (signed 2's complement).
+ * The sign-bit BIT(m+n-1) and above are 0 for positive value and 1 for negative
+ * the range of value is [-2^(m-1), 2^(m-1) - 2^-n]
+ *
+ * For example
+ * A Q3.12 format number:
+ * - required bit: 3 + 12 = 15bits
+ * - range: [-2^2, 2^2 - 2^−15]
*
- * Extract a degamma/gamma LUT value provided by user (in the form of
- * &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
+ * NOTE: the m can be zero if all bit_precision are used to present fractional
+ * bits like Q0.32
*/
-uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision)
+u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n)
{
- uint32_t val = user_input;
- uint32_t max = 0xffff >> (16 - bit_precision);
+ u64 mag = (user_input & ~BIT_ULL(63)) >> (32 - n);
+ bool negative = !!(user_input & BIT_ULL(63));
+ s64 val;
- /* Round only if we're not using full precision. */
- if (bit_precision < 16) {
- val += 1UL << (16 - bit_precision - 1);
- val >>= 16 - bit_precision;
- }
+ WARN_ON(m > 32 || n > 32);
+
+ val = clamp_val(mag, 0, negative ?
+ BIT_ULL(n + m - 1) : BIT_ULL(n + m - 1) - 1);
- return clamp_val(val, 0, max);
+ return negative ? -val : val;
}
-EXPORT_SYMBOL(drm_color_lut_extract);
+EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n);
/**
* drm_crtc_enable_color_mgmt - enable color management properties
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 499b05aaccfc..93a4eec429e8 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -48,6 +48,8 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include "drm_crtc_helper_internal.h"
+
/**
* DOC: overview
*
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index ca3c55c6b815..e22b812c4b80 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -140,8 +140,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
if (IS_ERR(source))
return PTR_ERR(source);
- if (source[len] == '\n')
- source[len] = '\0';
+ if (source[len - 1] == '\n')
+ source[len - 1] = '\0';
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
if (ret)
@@ -258,6 +258,11 @@ static int crtc_crc_release(struct inode *inode, struct file *filep)
struct drm_crtc *crtc = filep->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
+ /* terminate the infinite while loop if 'drm_dp_aux_crc_work' running */
+ spin_lock_irq(&crc->lock);
+ crc->opened = false;
+ spin_unlock_irq(&crc->lock);
+
crtc->funcs->set_crc_source(crtc, NULL);
spin_lock_irq(&crc->lock);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 0cfb386754c3..2510717d5a08 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -163,11 +163,7 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
break;
}
- if (aux_dev->aux->is_remote)
- res = drm_dp_mst_dpcd_read(aux_dev->aux, pos, buf,
- todo);
- else
- res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
+ res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
@@ -215,11 +211,7 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
break;
}
- if (aux_dev->aux->is_remote)
- res = drm_dp_mst_dpcd_write(aux_dev->aux, pos, buf,
- todo);
- else
- res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
+ res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 2c7870aef469..a5364b5192b8 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -32,6 +32,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_dp_mst_helper.h>
#include "drm_crtc_helper_internal.h"
@@ -266,7 +267,7 @@ unlock:
/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
- * @aux: DisplayPort AUX channel
+ * @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
* @buffer: buffer to store the register values
* @size: number of bytes in @buffer
@@ -295,13 +296,18 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* We just have to do it before any DPCD access and hope that the
* monitor doesn't power down exactly after the throw away read.
*/
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
- 1);
- if (ret != 1)
- goto out;
+ if (!aux->is_remote) {
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV,
+ buffer, 1);
+ if (ret != 1)
+ goto out;
+ }
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
- size);
+ if (aux->is_remote)
+ ret = drm_dp_mst_dpcd_read(aux, offset, buffer, size);
+ else
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
+ buffer, size);
out:
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
@@ -311,7 +317,7 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
/**
* drm_dp_dpcd_write() - write a series of bytes to the DPCD
- * @aux: DisplayPort AUX channel
+ * @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to write
* @buffer: buffer containing the values to write
* @size: number of bytes in @buffer
@@ -328,8 +334,12 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
{
int ret;
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
- size);
+ if (aux->is_remote)
+ ret = drm_dp_mst_dpcd_write(aux, offset, buffer, size);
+ else
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset,
+ buffer, size);
+
drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
return ret;
}
@@ -969,6 +979,19 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
}
/**
+ * drm_dp_remote_aux_init() - minimally initialise a remote aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Used for remote aux channel in general. Merely initialize the crc work
+ * struct.
+ */
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux)
+{
+ INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
+}
+EXPORT_SYMBOL(drm_dp_remote_aux_init);
+
+/**
* drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
*
@@ -1155,6 +1178,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) },
/* CH7511 seems to leave SINK_COUNT zeroed */
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
+ /* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
};
#undef OUI
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 644c72f9c594..20cdaf3146b8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -76,6 +76,11 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb);
+
+static void
+drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
+
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -517,8 +522,10 @@ drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
}
if (failed) {
- for (i = 0; i < r->num_transactions; i++)
+ for (i = 0; i < r->num_transactions; i++) {
+ tx = &r->transactions[i];
kfree(tx->bytes);
+ }
return -ENOMEM;
}
@@ -846,6 +853,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband
{
int idx = 1;
repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+ repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
idx++;
if (idx > raw->curlen)
goto fail_len;
@@ -950,6 +958,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
+ case DP_CLEAR_PAYLOAD_ID_TABLE:
+ return true; /* since there's nothing to parse */
default:
DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
drm_dp_mst_req_type_str(msg->req_type));
@@ -1048,6 +1058,15 @@ static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
return 0;
}
+static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
+ drm_dp_encode_sideband_req(&req, msg);
+ return 0;
+}
+
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1190,6 +1209,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
+ mgr->is_waiting_for_dwn_reply = false;
+
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1199,6 +1220,7 @@ out:
}
mutex_unlock(&mgr->qlock);
+ drm_dp_mst_kick_tx(mgr);
return ret;
}
@@ -1913,73 +1935,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+ switch (pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ return true;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ /* For sst branch device */
+ if (!mcs)
+ return true;
+
+ return false;
+ }
+ return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+ bool new_mcs)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
struct drm_dp_mst_branch *mstb;
u8 rad[8], lct;
int ret = 0;
- if (port->pdt == new_pdt)
+ if (port->pdt == new_pdt && port->mcs == new_mcs)
return 0;
/* Teardown the old pdt, if there is one */
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /*
- * If the new PDT would also have an i2c bus, don't bother
- * with reregistering it
- */
- if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- new_pdt == DP_PEER_DEVICE_SST_SINK) {
- port->pdt = new_pdt;
- return 0;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /*
+ * If the new PDT would also have an i2c bus,
+ * don't bother with reregistering it
+ */
+ if (new_pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+ port->pdt = new_pdt;
+ port->mcs = new_mcs;
+ return 0;
+ }
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mutex_lock(&mgr->lock);
- drm_dp_mst_topology_put_mstb(port->mstb);
- port->mstb = NULL;
- mutex_unlock(&mgr->lock);
- break;
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ } else {
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ }
}
port->pdt = new_pdt;
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* add i2c over sideband */
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
- break;
+ port->mcs = new_mcs;
- case DP_PEER_DEVICE_MST_BRANCHING:
- lct = drm_dp_calculate_rad(port, rad);
- mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (!mstb) {
- ret = -ENOMEM;
- DRM_ERROR("Failed to create MSTB for port %p", port);
- goto out;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ } else {
+ lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p",
+ port);
+ goto out;
+ }
- mutex_lock(&mgr->lock);
- port->mstb = mstb;
- mstb->mgr = port->mgr;
- mstb->port_parent = port;
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
- mutex_unlock(&mgr->lock);
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
- /* And make sure we send a link address for this */
- ret = 1;
- break;
+ /* And make sure we send a link address for this */
+ ret = 1;
+ }
}
out:
@@ -2132,9 +2171,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
goto error;
}
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
@@ -2156,6 +2194,7 @@ drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
mutex_lock(&mgr->lock);
+ port->parent->num_ports--;
list_del(&port->next);
mutex_unlock(&mgr->lock);
drm_dp_mst_topology_put_port(port);
@@ -2180,6 +2219,9 @@ drm_dp_mst_add_port(struct drm_device *dev,
port->aux.dev = dev->dev;
port->aux.is_remote = true;
+ /* initialize the MST downstream port's AUX crc work queue */
+ drm_dp_remote_aux_init(&port->aux);
+
/*
* Make sure the memory allocation for our parent branch stays
* around until our own memory allocation is released
@@ -2198,6 +2240,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port;
int old_ddps = 0, ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
@@ -2242,7 +2285,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
port->input = port_msg->input_port;
if (!port->input)
new_pdt = port_msg->peer_device_type;
- port->mcs = port_msg->mcs;
+ new_mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision;
@@ -2255,6 +2298,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
+ mstb->num_ports++;
mutex_unlock(&mgr->lock);
}
@@ -2269,7 +2313,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
}
}
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
send_link_addr = true;
} else if (ret < 0) {
@@ -2283,7 +2327,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* we're coming out of suspend. In this case, always resend the link
* address if there's an MSTB on this port
*/
- if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mcs)
send_link_addr = true;
if (port->connector)
@@ -2318,8 +2363,9 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps, ret;
+ int old_ddps, old_input, ret, i;
u8 new_pdt;
+ bool new_mcs;
bool dowork = false, create_connector = false;
port = drm_dp_get_port(mstb, conn_stat->port_number);
@@ -2349,8 +2395,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
old_ddps = port->ddps;
+ old_input = port->input;
port->input = conn_stat->input_port;
- port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -2363,8 +2409,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ new_mcs = conn_stat->message_capability_status;
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
dowork = true;
} else if (ret < 0) {
@@ -2373,6 +2419,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
dowork = false;
}
+ if (!old_input && old_ddps != port->ddps && !port->ddps) {
+ for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+ struct drm_dp_mst_port *port_validated;
+
+ if (!vcpi)
+ continue;
+
+ port_validated =
+ container_of(vcpi, struct drm_dp_mst_port, vcpi);
+ port_validated =
+ drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+ if (!port_validated) {
+ mutex_lock(&mgr->payload_lock);
+ vcpi->num_slots = 0;
+ mutex_unlock(&mgr->payload_lock);
+ } else {
+ drm_dp_mst_topology_put_port(port_validated);
+ }
+ }
+ }
+
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
@@ -2520,10 +2588,14 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb;
int ret;
+ bool clear_payload_id_table;
mutex_lock(&mgr->probe_lock);
mutex_lock(&mgr->lock);
+ clear_payload_id_table = !mgr->payload_id_table_cleared;
+ mgr->payload_id_table_cleared = true;
+
mstb = mgr->mst_primary;
if (mstb) {
ret = drm_dp_mst_topology_try_get_mstb(mstb);
@@ -2536,6 +2608,19 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
return;
}
+ /*
+ * Certain branch devices seem to incorrectly report an available_pbn
+ * of 0 on downstream sinks, even after clearing the
+ * DP_PAYLOAD_ALLOCATE_* registers in
+ * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
+ * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
+ * things work again.
+ */
+ if (clear_payload_id_table) {
+ DRM_DEBUG_KMS("Clearing payload ID table\n");
+ drm_dp_send_clear_payload_id_table(mgr, mstb);
+ }
+
ret = drm_dp_check_and_send_link_address(mgr, mstb);
drm_dp_mst_topology_put_mstb(mstb);
@@ -2718,9 +2803,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
+ mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2760,7 +2847,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq))
+ if (list_is_singular(&mgr->tx_msg_downq) &&
+ !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -2859,6 +2947,28 @@ out:
return ret < 0 ? ret : changed;
}
+void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int len, ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return;
+
+ txmsg->dst = mstb;
+ len = build_clear_payload_id_table(txmsg);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ DRM_DEBUG_KMS("clear payload table id nak received\n");
+
+ kfree(txmsg);
+}
+
static int
drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
@@ -2894,6 +3004,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
path_res->avail_payload_bw_number);
port->available_pbn =
path_res->avail_payload_bw_number;
+ port->fec_capable = path_res->fec_capable;
}
}
@@ -3388,6 +3499,7 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
+ int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
mutex_lock(&mgr->lock);
@@ -3448,10 +3560,23 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
+ mutex_lock(&mgr->payload_lock);
memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
+ for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+
+ if (vcpi) {
+ vcpi->vcpi = 0;
+ vcpi->num_slots = 0;
+ }
+ mgr->proposed_vcpis[i] = NULL;
+ }
mgr->vcpi_mask = 0;
+ mutex_unlock(&mgr->payload_lock);
+
+ mgr->payload_id_table_cleared = false;
}
out_unlock:
@@ -3678,6 +3803,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[slot] = NULL;
+ mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
@@ -3687,6 +3813,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
no_msg:
drm_dp_mst_topology_put_mstb(mstb);
clear_down_rep_recv:
+ mutex_lock(&mgr->qlock);
+ mgr->is_waiting_for_dwn_reply = false;
+ mutex_unlock(&mgr->qlock);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
@@ -3896,6 +4025,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
case DP_PEER_DEVICE_MST_BRANCHING:
+ if (!port->mcs)
+ ret = connector_status_connected;
break;
case DP_PEER_DEVICE_SST_SINK:
@@ -4018,6 +4149,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
* @mgr: MST topology manager for the port
* @port: port to find vcpi slots for
* @pbn: bandwidth required for the mode in PBN
+ * @pbn_div: divider for DSC mode that takes FEC into account
*
* Allocates VCPI slots to @port, replacing any previous VCPI allocations it
* may have had. Any atomic drivers which support MST must call this function
@@ -4044,11 +4176,12 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
*/
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn)
+ struct drm_dp_mst_port *port, int pbn,
+ int pbn_div)
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, req_slots;
+ int prev_slots, prev_bw, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
@@ -4059,6 +4192,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
if (pos->port == port) {
vcpi = pos;
prev_slots = vcpi->vcpi;
+ prev_bw = vcpi->pbn;
/*
* This should never happen, unless the driver tries
@@ -4074,14 +4208,22 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
break;
}
}
- if (!vcpi)
+ if (!vcpi) {
prev_slots = 0;
+ prev_bw = 0;
+ }
+
+ if (pbn_div <= 0)
+ pbn_div = mgr->pbn_div;
- req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+ req_slots = DIV_ROUND_UP(pbn, pbn_div);
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_slots, req_slots);
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
+ port->connector->base.id, port->connector->name,
+ port, prev_bw, pbn);
/* Add the new allocation to the state */
if (!vcpi) {
@@ -4094,6 +4236,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
list_add(&vcpi->next, &topology_state->vcpis);
}
vcpi->vcpi = req_slots;
+ vcpi->pbn = pbn;
return req_slots;
}
@@ -4344,10 +4487,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
* @clock: dot clock for the mode
* @bpp: bpp for the mode.
+ * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
*
* This uses the formula in the spec to calculate the PBN value for a mode.
*/
-int drm_dp_calc_pbn_mode(int clock, int bpp)
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
{
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -4358,7 +4502,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
+ *
+ * If the bpp is in units of 1/16, further divide by 16. Put this
+ * factor in the numerator rather than the denominator to avoid
+ * integer overflow
*/
+
+ if (dsc)
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
+ 8 * 54 * 1000 * 1000);
+
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
8 * 54 * 1000 * 1000);
}
@@ -4497,7 +4650,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq))
+ if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -4508,7 +4661,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
if (port->connector)
port->mgr->cbs->destroy_connector(port->mgr, port->connector);
- drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
}
@@ -4660,9 +4813,61 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
kfree(mst_state);
}
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_branch *branch)
+{
+ while (port->parent) {
+ if (port->parent == branch)
+ return true;
+
+ if (port->parent->port_parent)
+ port = port->parent->port_parent;
+ else
+ break;
+ }
+ return false;
+}
+
+static inline
+int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
+ struct drm_dp_mst_topology_state *mst_state)
+{
+ struct drm_dp_mst_port *port;
+ struct drm_dp_vcpi_allocation *vcpi;
+ int pbn_limit = 0, pbn_used = 0;
+
+ list_for_each_entry(port, &branch->ports, next) {
+ if (port->mstb)
+ if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
+ return -ENOSPC;
+
+ if (port->available_pbn > 0)
+ pbn_limit = port->available_pbn;
+ }
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
+ branch, pbn_limit);
+
+ list_for_each_entry(vcpi, &mst_state->vcpis, next) {
+ if (!vcpi->pbn)
+ continue;
+
+ if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
+ pbn_used += vcpi->pbn;
+ }
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
+ branch, pbn_used);
+
+ if (pbn_used > pbn_limit) {
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
+ branch);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
static inline int
-drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_topology_state *mst_state)
+drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state)
{
struct drm_dp_vcpi_allocation *vcpi;
int avail_slots = 63, payload_count = 0;
@@ -4700,6 +4905,128 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
}
/**
+ * drm_dp_mst_add_affected_dsc_crtcs
+ * @state: Pointer to the new struct drm_dp_mst_topology_state
+ * @mgr: MST topology manager
+ *
+ * Whenever there is a change in mst topology
+ * DSC configuration would have to be recalculated
+ * therefore we need to trigger modeset on all affected
+ * CRTCs in that topology
+ *
+ * See also:
+ * drm_dp_mst_atomic_enable_dsc()
+ */
+int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_vcpi_allocation *pos;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+
+ if (IS_ERR(mst_state))
+ return -EINVAL;
+
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
+
+ connector = pos->port->connector;
+
+ if (!connector)
+ return -EINVAL;
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ crtc = conn_state->crtc;
+
+ if (WARN_ON(!crtc))
+ return -EINVAL;
+
+ if (!drm_dp_mst_dsc_aux_for_port(pos->port))
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
+
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
+ mgr, crtc);
+
+ crtc_state->mode_changed = true;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
+
+/**
+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
+ * @state: Pointer to the new drm_atomic_state
+ * @port: Pointer to the affected MST Port
+ * @pbn: Newly recalculated bw required for link with DSC enabled
+ * @pbn_div: Divider to calculate correct number of pbn per slot
+ * @enable: Boolean flag to enable or disable DSC on the port
+ *
+ * This function enables DSC on the given Port
+ * by recalculating its vcpi from pbn provided
+ * and sets dsc_enable flag to keep track of which
+ * ports have DSC enabled
+ *
+ */
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+ int pbn, int pbn_div,
+ bool enable)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_vcpi_allocation *pos;
+ bool found = false;
+ int vcpi = 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
+
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
+ if (pos->port == port) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
+ port, mst_state);
+ return -EINVAL;
+ }
+
+ if (pos->dsc_enabled == enable) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
+ port, enable, pos->vcpi);
+ vcpi = pos->vcpi;
+ }
+
+ if (enable) {
+ vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
+ port, vcpi);
+ if (vcpi < 0)
+ return -EINVAL;
+ }
+
+ pos->dsc_enabled = enable;
+
+ return vcpi;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
+/**
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
* atomic update is valid
* @state: Pointer to the new &struct drm_dp_mst_topology_state
@@ -4727,7 +5054,13 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
int i, ret = 0;
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
- ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
+ if (!mgr->mst_state)
+ continue;
+
+ ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
+ if (ret)
+ break;
+ ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
if (ret)
break;
}
@@ -4991,3 +5324,173 @@ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
{
i2c_del_adapter(&aux->ddc);
}
+
+/**
+ * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
+ * @port: The port to check
+ *
+ * A single physical MST hub object can be represented in the topology
+ * by multiple branches, with virtual ports between those branches.
+ *
+ * As of DP1.4, An MST hub with internal (virtual) ports must expose
+ * certain DPCD registers over those ports. See sections 2.6.1.1.1
+ * and 2.6.1.1.2 of Display Port specification v1.4 for details.
+ *
+ * May acquire mgr->lock
+ *
+ * Returns:
+ * true if the port is a virtual DP peer device, false otherwise
+ */
+static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *downstream_port;
+
+ if (!port || port->dpcd_rev < DP_DPCD_REV_14)
+ return false;
+
+ /* Virtual DP Sink (Internal Display Panel) */
+ if (port->port_num >= 8)
+ return true;
+
+ /* DP-to-HDMI Protocol Converter */
+ if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ !port->mcs &&
+ port->ldps)
+ return true;
+
+ /* DP-to-DP */
+ mutex_lock(&port->mgr->lock);
+ if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mstb &&
+ port->mstb->num_ports == 2) {
+ list_for_each_entry(downstream_port, &port->mstb->ports, next) {
+ if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
+ !downstream_port->input) {
+ mutex_unlock(&port->mgr->lock);
+ return true;
+ }
+ }
+ }
+ mutex_unlock(&port->mgr->lock);
+
+ return false;
+}
+
+/**
+ * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
+ * @port: The port to check. A leaf of the MST tree with an attached display.
+ *
+ * Depending on the situation, DSC may be enabled via the endpoint aux,
+ * the immediately upstream aux, or the connector's physical aux.
+ *
+ * This is both the correct aux to read DSC_CAPABILITY and the
+ * correct aux to write DSC_ENABLED.
+ *
+ * This operation can be expensive (up to four aux reads), so
+ * the caller should cache the return.
+ *
+ * Returns:
+ * NULL if DSC cannot be enabled on this port, otherwise the aux device
+ */
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *immediate_upstream_port;
+ struct drm_dp_mst_port *fec_port;
+ struct drm_dp_desc desc = { 0 };
+ u8 endpoint_fec;
+ u8 endpoint_dsc;
+
+ if (!port)
+ return NULL;
+
+ if (port->parent->port_parent)
+ immediate_upstream_port = port->parent->port_parent;
+ else
+ immediate_upstream_port = NULL;
+
+ fec_port = immediate_upstream_port;
+ while (fec_port) {
+ /*
+ * Each physical link (i.e. not a virtual port) between the
+ * output and the primary device must support FEC
+ */
+ if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
+ !fec_port->fec_capable)
+ return NULL;
+
+ fec_port = fec_port->parent->port_parent;
+ }
+
+ /* DP-to-DP peer device */
+ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
+ u8 upstream_dsc;
+
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ return NULL;
+
+ /* Enpoint decompression with DP-to-DP peer device */
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+ (endpoint_fec & DP_FEC_CAPABLE) &&
+ (upstream_dsc & 0x2) /* DSC passthrough */)
+ return &port->aux;
+
+ /* Virtual DPCD decompression with DP-to-DP peer device */
+ return &immediate_upstream_port->aux;
+ }
+
+ /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
+ if (drm_dp_mst_is_virtual_dpcd(port))
+ return &port->aux;
+
+ /*
+ * Synaptics quirk
+ * Applies to ports for which:
+ * - Physical aux has Synaptics OUI
+ * - DPv1.4 or higher
+ * - Port is on primary branch device
+ * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
+ */
+ if (drm_dp_read_desc(port->mgr->aux, &desc, true))
+ return NULL;
+
+ if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
+ port->parent == port->mgr->mst_primary) {
+ u8 downstreamport;
+
+ if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
+ &downstreamport, 1) < 0)
+ return NULL;
+
+ if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
+ ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
+ != DP_DWN_STRM_PORT_TYPE_ANALOG))
+ return port->mgr->aux;
+ }
+
+ /*
+ * The check below verifies if the MST sink
+ * connected to the GPU is capable of DSC -
+ * therefore the endpoint needs to be
+ * both DSC and FEC capable.
+ */
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ return NULL;
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+ (endpoint_fec & DP_FEC_CAPABLE))
+ return &port->aux;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 1b9b40a1c7c9..7c18a980cd4b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -622,7 +622,8 @@ int drm_dev_init(struct drm_device *dev,
return -ENODEV;
}
- BUG_ON(!parent);
+ if (WARN_ON(!parent))
+ return -EINVAL;
kref_init(&dev->ref);
dev->dev = get_device(parent);
@@ -725,7 +726,7 @@ int devm_drm_dev_init(struct device *parent,
{
int ret;
- if (WARN_ON(!parent || !driver->release))
+ if (WARN_ON(!driver->release))
return -EINVAL;
ret = drm_dev_init(dev, driver, parent);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 474ac04d5600..99769d6c9f84 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -710,14 +710,11 @@ static const struct minimode extra_modes[] = {
};
/*
- * Probably taken from CEA-861 spec.
- * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ * From CEA/CTA-861 spec.
*
- * Index using the VIC.
+ * Do not access directly, instead always use cea_mode_for_vic().
*/
-static const struct drm_display_mode edid_cea_modes[] = {
- /* 0 - dummy, VICs start at 1 */
- { },
+static const struct drm_display_mode edid_cea_modes_1[] = {
/* 1 - 640x480@60Hz 4:3 */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
@@ -1381,6 +1378,149 @@ static const struct drm_display_mode edid_cea_modes[] = {
};
/*
+ * From CEA/CTA-861 spec.
+ *
+ * Do not access directly, instead always use cea_mode_for_vic().
+ */
+static const struct drm_display_mode edid_cea_modes_193[] = {
+ /* 193 - 5120x2160@120Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 5284,
+ 5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 194 - 7680x4320@24Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
+ 10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 195 - 7680x4320@25Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
+ 10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 196 - 7680x4320@30Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
+ 8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 197 - 7680x4320@48Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
+ 10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 198 - 7680x4320@50Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
+ 10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 199 - 7680x4320@60Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
+ 8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 200 - 7680x4320@100Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
+ 9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 201 - 7680x4320@120Hz 16:9 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
+ 8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 202 - 7680x4320@24Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
+ 10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 203 - 7680x4320@25Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
+ 10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 204 - 7680x4320@30Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
+ 8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 205 - 7680x4320@48Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
+ 10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 206 - 7680x4320@50Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
+ 10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 207 - 7680x4320@60Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
+ 8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 208 - 7680x4320@100Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
+ 9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 209 - 7680x4320@120Hz 64:27 */
+ { DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
+ 8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 210 - 10240x4320@24Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 11732,
+ 11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 211 - 10240x4320@25Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 12732,
+ 12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 212 - 10240x4320@30Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 10528,
+ 10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 213 - 10240x4320@48Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 11732,
+ 11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 214 - 10240x4320@50Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 12732,
+ 12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 215 - 10240x4320@60Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 10528,
+ 10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 216 - 10240x4320@100Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 12432,
+ 12608, 13200, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 217 - 10240x4320@120Hz 64:27 */
+ { DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 10528,
+ 10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 218 - 4096x2160@100Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 219 - 4096x2160@120Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+};
+
+/*
* HDMI 1.4 4k modes. Index using the VIC.
*/
static const struct drm_display_mode edid_4k_modes[] = {
@@ -1391,25 +1531,25 @@ static const struct drm_display_mode edid_4k_modes[] = {
3840, 4016, 4104, 4400, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, },
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 2 - 3840x2160@25Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4896, 4984, 5280, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, },
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 3 - 3840x2160@24Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 4096x2160@24Hz (SMPTE) */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
4096, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
};
/*** DDC fetch and block validation ***/
@@ -3071,6 +3211,30 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
}
+static const struct drm_display_mode *cea_mode_for_vic(u8 vic)
+{
+ BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
+ BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);
+
+ if (vic >= 1 && vic < 1 + ARRAY_SIZE(edid_cea_modes_1))
+ return &edid_cea_modes_1[vic - 1];
+ if (vic >= 193 && vic < 193 + ARRAY_SIZE(edid_cea_modes_193))
+ return &edid_cea_modes_193[vic - 193];
+ return NULL;
+}
+
+static u8 cea_num_vics(void)
+{
+ return 193 + ARRAY_SIZE(edid_cea_modes_193);
+}
+
+static u8 cea_next_vic(u8 vic)
+{
+ if (++vic == 1 + ARRAY_SIZE(edid_cea_modes_1))
+ vic = 193;
+ return vic;
+}
+
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
@@ -3108,14 +3272,14 @@ cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
* get the other variants by simply increasing the
* vertical front porch length.
*/
- BUILD_BUG_ON(edid_cea_modes[8].vtotal != 262 ||
- edid_cea_modes[9].vtotal != 262 ||
- edid_cea_modes[12].vtotal != 262 ||
- edid_cea_modes[13].vtotal != 262 ||
- edid_cea_modes[23].vtotal != 312 ||
- edid_cea_modes[24].vtotal != 312 ||
- edid_cea_modes[27].vtotal != 312 ||
- edid_cea_modes[28].vtotal != 312);
+ BUILD_BUG_ON(cea_mode_for_vic(8)->vtotal != 262 ||
+ cea_mode_for_vic(9)->vtotal != 262 ||
+ cea_mode_for_vic(12)->vtotal != 262 ||
+ cea_mode_for_vic(13)->vtotal != 262 ||
+ cea_mode_for_vic(23)->vtotal != 312 ||
+ cea_mode_for_vic(24)->vtotal != 312 ||
+ cea_mode_for_vic(27)->vtotal != 312 ||
+ cea_mode_for_vic(28)->vtotal != 312);
if (((vic == 8 || vic == 9 ||
vic == 12 || vic == 13) && mode->vtotal < 263) ||
@@ -3143,8 +3307,8 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
if (to_match->picture_aspect_ratio)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
- for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- struct drm_display_mode cea_mode = edid_cea_modes[vic];
+ for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
+ struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
@@ -3182,8 +3346,8 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
if (to_match->picture_aspect_ratio)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
- for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- struct drm_display_mode cea_mode = edid_cea_modes[vic];
+ for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
+ struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
@@ -3206,28 +3370,31 @@ EXPORT_SYMBOL(drm_match_cea_mode);
static bool drm_valid_cea_vic(u8 vic)
{
- return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
+ return cea_mode_for_vic(vic) != NULL;
}
static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
- return edid_cea_modes[video_code].picture_aspect_ratio;
+ const struct drm_display_mode *mode = cea_mode_for_vic(video_code);
+
+ if (mode)
+ return mode->picture_aspect_ratio;
+
+ return HDMI_PICTURE_ASPECT_NONE;
+}
+
+static enum hdmi_picture_aspect drm_get_hdmi_aspect_ratio(const u8 video_code)
+{
+ return edid_4k_modes[video_code].picture_aspect_ratio;
}
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
* specific block).
- *
- * It's almost like cea_mode_alternate_clock(), we just need to add an
- * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
- * one.
*/
static unsigned int
hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
{
- if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
- return hdmi_mode->clock;
-
return cea_mode_alternate_clock(hdmi_mode);
}
@@ -3240,6 +3407,9 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
if (!to_match->clock)
return 0;
+ if (to_match->picture_aspect_ratio)
+ match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
@@ -3275,6 +3445,9 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
if (!to_match->clock)
return 0;
+ if (to_match->picture_aspect_ratio)
+ match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
@@ -3319,7 +3492,7 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
unsigned int clock1, clock2;
if (drm_valid_cea_vic(vic)) {
- cea_mode = &edid_cea_modes[vic];
+ cea_mode = cea_mode_for_vic(vic);
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
vic = drm_match_hdmi_mode(mode);
@@ -3394,7 +3567,7 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
if (!drm_valid_cea_vic(vic))
return NULL;
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
+ newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
if (!newmode)
return NULL;
@@ -3428,7 +3601,7 @@ static int do_y420vdb_modes(struct drm_connector *connector,
if (!drm_valid_cea_vic(vic))
continue;
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
+ newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
if (!newmode)
break;
bitmap_set(hdmi->y420_vdb_modes, vic, 1);
@@ -3997,7 +4170,7 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
vic = drm_match_cea_mode_clock_tolerance(mode, 5);
if (drm_valid_cea_vic(vic)) {
type = "CEA";
- cea_mode = &edid_cea_modes[vic];
+ cea_mode = cea_mode_for_vic(vic);
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
@@ -4279,12 +4452,12 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
- return -ENOENT;
+ return 0;
}
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -EOPNOTSUPP;
+ return 0;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -4340,12 +4513,12 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
- return -ENOENT;
+ return 0;
}
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -EOPNOTSUPP;
+ return 0;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -4573,7 +4746,7 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
if (scdc->supported) {
scdc->scrambling.supported = true;
- /* Few sinks support scrambling for cloks < 340M */
+ /* Few sinks support scrambling for clocks < 340M */
if ((hf_vsdb[6] & 0x8))
scdc->scrambling.low_rates = true;
}
@@ -5222,6 +5395,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode)
{
enum hdmi_picture_aspect picture_aspect;
+ u8 vic, hdmi_vic;
int err;
if (!frame || !mode)
@@ -5234,7 +5408,8 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
- frame->video_code = drm_mode_cea_vic(connector, mode);
+ vic = drm_mode_cea_vic(connector, mode);
+ hdmi_vic = drm_mode_hdmi_vic(connector, mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
@@ -5248,11 +5423,15 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
/*
* Populate picture aspect ratio from either
- * user input (if specified) or from the CEA mode list.
+ * user input (if specified) or from the CEA/HDMI mode lists.
*/
picture_aspect = mode->picture_aspect_ratio;
- if (picture_aspect == HDMI_PICTURE_ASPECT_NONE)
- picture_aspect = drm_get_cea_aspect_ratio(frame->video_code);
+ if (picture_aspect == HDMI_PICTURE_ASPECT_NONE) {
+ if (vic)
+ picture_aspect = drm_get_cea_aspect_ratio(vic);
+ else if (hdmi_vic)
+ picture_aspect = drm_get_hdmi_aspect_ratio(hdmi_vic);
+ }
/*
* The infoframe can't convey anything but none, 4:3
@@ -5260,12 +5439,20 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
* we can only satisfy it by specifying the right VIC.
*/
if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) {
- if (picture_aspect !=
- drm_get_cea_aspect_ratio(frame->video_code))
+ if (vic) {
+ if (picture_aspect != drm_get_cea_aspect_ratio(vic))
+ return -EINVAL;
+ } else if (hdmi_vic) {
+ if (picture_aspect != drm_get_hdmi_aspect_ratio(hdmi_vic))
+ return -EINVAL;
+ } else {
return -EINVAL;
+ }
+
picture_aspect = HDMI_PICTURE_ASPECT_NONE;
}
+ frame->video_code = vic;
frame->picture_aspect = picture_aspect;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 80d88a55302e..e555281f43d4 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -140,6 +140,7 @@ int drm_encoder_init(struct drm_device *dev,
goto out_put;
}
+ INIT_LIST_HEAD(&encoder->bridge_chain);
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
encoder->index = dev->mode_config.num_encoder++;
@@ -160,22 +161,16 @@ EXPORT_SYMBOL(drm_encoder_init);
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+ struct drm_bridge *bridge, *next;
/* Note that the encoder_list is considered to be static; should we
* remove the drm_encoder at runtime we would have to decrement all
* the indices on the drm_encoder after us in the encoder_list.
*/
- if (encoder->bridge) {
- struct drm_bridge *bridge = encoder->bridge;
- struct drm_bridge *next;
-
- while (bridge) {
- next = bridge->next;
- drm_bridge_detach(bridge);
- bridge = next;
- }
- }
+ list_for_each_entry_safe(bridge, next, &encoder->bridge_chain,
+ chain_node)
+ drm_bridge_detach(bridge);
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c0b0f603af63..9801c0333eca 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -9,6 +9,7 @@
* Copyright (C) 2012 Red Hat
*/
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d8e8f3960f4d..4c7cbce7bae7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -95,10 +95,6 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* It will automatically set up deferred I/O if the driver requires a shadow
* buffer.
*
- * For other drivers, setup fbdev emulation by calling
- * drm_fb_helper_fbdev_setup() and tear it down by calling
- * drm_fb_helper_fbdev_teardown().
- *
* At runtime drivers should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
* They should also notify the fb helper code from updates to the output
@@ -195,6 +191,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_client_dev *client = &helper->client;
+ struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
const struct drm_crtc_helper_funcs *funcs;
struct drm_mode_set *mode_set;
@@ -213,7 +210,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
continue;
if (!fb) {
- DRM_ERROR("no fb to restore??\n");
+ drm_err(dev, "no fb to restore?\n");
continue;
}
@@ -567,8 +564,7 @@ EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
* drm_fb_helper_fini - finialize a &struct drm_fb_helper
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
- * This cleans up all remaining resources associated with @fb_helper. Must be
- * called after drm_fb_helper_unlink_fbi() was called.
+ * This cleans up all remaining resources associated with @fb_helper.
*/
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
@@ -608,19 +604,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
-/**
- * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * A wrapper around unlink_framebuffer implemented by fbdev core
- */
-void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
-{
- if (fb_helper && fb_helper->fbdev)
- unlink_framebuffer(fb_helper->fbdev);
-}
-EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
-
static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
@@ -1266,12 +1249,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_device *dev = fb_helper->dev;
if (in_dbg_master())
return -EINVAL;
if (var->pixclock != 0) {
- DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+ drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
var->pixclock = 0;
}
@@ -1286,7 +1270,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
- DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
+ drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
@@ -1318,7 +1302,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* so reject all pixel format changing requests.
*/
if (!drm_fb_pixel_format_equal(var, &info->var)) {
- DRM_DEBUG("fbdev emulation doesn't support changing the pixel format\n");
+ drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel format\n");
return -EINVAL;
}
@@ -1343,7 +1327,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EBUSY;
if (var->pixclock != 0) {
- DRM_ERROR("PIXEL CLOCK SET\n");
+ drm_err(fb_helper->dev, "PIXEL CLOCK SET\n");
return -EINVAL;
}
@@ -1453,6 +1437,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
{
struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
int ret = 0;
int crtc_count = 0;
struct drm_connector_list_iter conn_iter;
@@ -1516,7 +1501,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_plane *plane = crtc->primary;
int j;
- DRM_DEBUG("test CRTC %u primary plane\n", drm_crtc_index(crtc));
+ drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc));
for (j = 0; j < plane->format_count; j++) {
const struct drm_format_info *fmt;
@@ -1549,7 +1534,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
}
if (sizes.surface_depth != best_depth && best_depth) {
- DRM_INFO("requested bpp %d, scaled depth down to %d",
+ drm_info(dev, "requested bpp %d, scaled depth down to %d",
sizes.surface_bpp, best_depth);
sizes.surface_depth = best_depth;
}
@@ -1581,7 +1566,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
- if (connector->has_tile) {
+ if (connector->has_tile &&
+ desired_mode->hdisplay == connector->tile_h_size &&
+ desired_mode->vdisplay == connector->tile_v_size) {
lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
/* cloning to multiple tiles is just crazy-talk, so: */
@@ -1597,7 +1584,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
mutex_unlock(&client->modeset_mutex);
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
- DRM_INFO("Cannot find any crtc or sizes\n");
+ drm_info(dev, "Cannot find any crtc or sizes\n");
/* First time: disable all crtc's.. */
if (!fb_helper->deferred_setup)
@@ -1912,7 +1899,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
drm_master_internal_release(fb_helper->dev);
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(fb_helper->dev, "\n");
drm_client_modeset_probe(&fb_helper->client, fb_helper->fb->width, fb_helper->fb->height);
drm_setup_crtcs_fb(fb_helper);
@@ -1925,108 +1912,6 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/**
- * drm_fb_helper_fbdev_setup() - Setup fbdev emulation
- * @dev: DRM device
- * @fb_helper: fbdev helper structure to set up
- * @funcs: fbdev helper functions
- * @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors (not used)
- *
- * This function sets up fbdev emulation and registers fbdev for access by
- * userspace. If all connectors are disconnected, setup is deferred to the next
- * time drm_fb_helper_hotplug_event() is called.
- * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback
- * function.
- *
- * Use drm_fb_helper_fbdev_teardown() to destroy the fbdev.
- *
- * See also: drm_fb_helper_initial_config(), drm_fbdev_generic_setup().
- *
- * Returns:
- * Zero on success or negative error code on failure.
- */
-int drm_fb_helper_fbdev_setup(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- const struct drm_fb_helper_funcs *funcs,
- unsigned int preferred_bpp,
- unsigned int max_conn_count)
-{
- int ret;
-
- if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
- preferred_bpp = 32;
-
- drm_fb_helper_prepare(dev, fb_helper, funcs);
-
- ret = drm_fb_helper_init(dev, fb_helper, 0);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "fbdev: Failed to initialize (ret=%d)\n", ret);
- return ret;
- }
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "fbdev: Failed to set configuration (ret=%d)\n", ret);
- goto err_drm_fb_helper_fini;
- }
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fbdev_teardown(dev);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_fb_helper_fbdev_setup);
-
-/**
- * drm_fb_helper_fbdev_teardown - Tear down fbdev emulation
- * @dev: DRM device
- *
- * This function unregisters fbdev if not already done and cleans up the
- * associated resources including the &drm_framebuffer.
- * The driver is responsible for freeing the &drm_fb_helper structure which is
- * stored in &drm_device->fb_helper. Do note that this pointer has been cleared
- * when this function returns.
- *
- * In order to support device removal/unplug while file handles are still open,
- * drm_fb_helper_unregister_fbi() should be called on device removal and
- * drm_fb_helper_fbdev_teardown() in the &drm_driver->release callback when
- * file handles are closed.
- */
-void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
-{
- struct drm_fb_helper *fb_helper = dev->fb_helper;
- struct fb_ops *fbops = NULL;
-
- if (!fb_helper)
- return;
-
- /* Unregister if it hasn't been done already */
- if (fb_helper->fbdev && fb_helper->fbdev->dev)
- drm_fb_helper_unregister_fbi(fb_helper);
-
- if (fb_helper->fbdev && fb_helper->fbdev->fbdefio) {
- fb_deferred_io_cleanup(fb_helper->fbdev);
- kfree(fb_helper->fbdev->fbdefio);
- fbops = fb_helper->fbdev->fbops;
- }
-
- drm_fb_helper_fini(fb_helper);
- kfree(fbops);
-
- if (fb_helper->fb)
- drm_framebuffer_remove(fb_helper->fb);
-}
-EXPORT_SYMBOL(drm_fb_helper_fbdev_teardown);
-
-/**
* drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
* @dev: DRM device
*
@@ -2079,7 +1964,6 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
{
struct fb_info *fbi = fb_helper->fbdev;
- struct fb_ops *fbops = NULL;
void *shadow = NULL;
if (!fb_helper->dev)
@@ -2088,15 +1972,11 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
if (fbi && fbi->fbdefio) {
fb_deferred_io_cleanup(fbi);
shadow = fbi->screen_buffer;
- fbops = fbi->fbops;
}
drm_fb_helper_fini(fb_helper);
- if (shadow) {
- vfree(shadow);
- kfree(fbops);
- }
+ vfree(shadow);
drm_client_framebuffer_delete(fb_helper->buffer);
}
@@ -2127,7 +2007,7 @@ static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return -ENODEV;
}
-static struct fb_ops drm_fbdev_fb_ops = {
+static const struct fb_ops drm_fbdev_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = drm_fbdev_fb_open,
@@ -2146,32 +2026,26 @@ static struct fb_deferred_io drm_fbdev_defio = {
.deferred_io = drm_fb_helper_deferred_io,
};
-/**
- * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper
- * @fb_helper: fbdev helper structure
- * @sizes: describes fbdev size and scanout surface size
- *
+/*
* This function uses the client API to create a framebuffer backed by a dumb buffer.
*
* The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
* fb_copyarea, fb_imageblit.
- *
- * Returns:
- * Zero on success or negative error code on failure.
*/
-int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
+static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
void *vaddr;
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
+ drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
@@ -2194,24 +2068,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
if (drm_fbdev_use_shadow_fb(fb_helper)) {
- struct fb_ops *fbops;
- void *shadow;
-
- /*
- * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per
- * instance version is necessary.
- */
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- shadow = vzalloc(fbi->screen_size);
- if (!fbops || !shadow) {
- kfree(fbops);
- vfree(shadow);
+ fbi->screen_buffer = vzalloc(fbi->screen_size);
+ if (!fbi->screen_buffer)
return -ENOMEM;
- }
- *fbops = *fbi->fbops;
- fbi->fbops = fbops;
- fbi->screen_buffer = shadow;
fbi->fbdefio = &drm_fbdev_defio;
fb_deferred_io_init(fbi);
@@ -2232,7 +2092,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_generic_probe);
static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
.fb_probe = drm_fb_helper_generic_probe,
@@ -2270,7 +2129,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
return drm_fb_helper_hotplug_event(dev->fb_helper);
if (!dev->mode_config.num_connector) {
- DRM_DEV_DEBUG(dev->dev, "No connectors found, will not create framebuffer!\n");
+ drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
return 0;
}
@@ -2295,7 +2154,7 @@ err:
fb_helper->dev = NULL;
fb_helper->fbdev = NULL;
- DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
+ drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
return ret;
}
@@ -2314,8 +2173,7 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* @dev->mode_config.preferred_depth is used if this is zero.
*
* This function sets up generic fbdev emulation for drivers that supports
- * dumb buffers with a virtual address and that can be mmap'ed. If the driver
- * does not support these functions, it could use drm_fb_helper_fbdev_setup().
+ * dumb buffers with a virtual address and that can be mmap'ed.
*
* Restore, hotplug events and teardown are all taken care of. Drivers that do
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -2353,7 +2211,7 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
kfree(fb_helper);
- DRM_DEV_ERROR(dev->dev, "Failed to register client: %d\n", ret);
+ drm_err(dev, "Failed to register client: %d\n", ret);
return ret;
}
@@ -2365,7 +2223,7 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
ret = drm_fbdev_client_hotplug(&fb_helper->client);
if (ret)
- DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret);
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
drm_client_register(&fb_helper->client);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index ea34bc991858..92d16724f949 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -31,7 +31,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
+#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
@@ -285,7 +287,7 @@ static int drm_cpu_valid(void)
}
/*
- * Called whenever a process opens /dev/drm.
+ * Called whenever a process opens a drm node
*
* \param filp file pointer.
* \param minor acquired minor-object.
@@ -754,3 +756,43 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);
+
+/**
+ * mock_drm_getfile - Create a new struct file for the drm device
+ * @minor: drm minor to wrap (e.g. #drm_device.primary)
+ * @flags: file creation mode (O_RDWR etc)
+ *
+ * This create a new struct file that wraps a DRM file context around a
+ * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
+ * invoking userspace. The struct file may be operated on using its f_op
+ * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
+ * to userspace facing functions as an internal/anonymous client.
+ *
+ * RETURNS:
+ * Pointer to newly created struct file, ERR_PTR on failure.
+ */
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
+{
+ struct drm_device *dev = minor->dev;
+ struct drm_file *priv;
+ struct file *file;
+
+ priv = drm_file_alloc(minor);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
+ if (IS_ERR(file)) {
+ drm_file_free(priv);
+ return file;
+ }
+
+ /* Everyone shares a single global address space */
+ file->f_mapping = dev->anon_inode->i_mapping;
+
+ drm_dev_get(dev);
+ priv->filp = file;
+
+ return file;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index c630064ccf41..b234bfaeda06 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -253,17 +253,17 @@ const struct drm_format_info *__drm_format_info(u32 format)
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P010, .depth = 0, .num_planes = 2,
- .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P012, .depth = 0, .num_planes = 2,
- .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
- .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
+ .char_per_block = { 2, 4, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_P210, .depth = 0,
.num_planes = 2, .char_per_block = { 2, 4, 0 },
- .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2,
+ .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
.vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VUY101010, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1,
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 000fa4a1899f..a9e4a610445a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1114,9 +1114,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
drm_gem_object_get(obj);
if (obj->funcs && obj->funcs->mmap) {
- /* Remove the fake offset */
- vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-
ret = obj->funcs->mmap(obj, vma);
if (ret) {
drm_gem_object_put_unlocked(obj);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index b9bcd310ca2d..3a7ace19a902 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -74,8 +74,7 @@ drm_gem_fb_alloc(struct drm_device *dev,
ret = drm_framebuffer_init(dev, fb, funcs);
if (ret) {
- DRM_DEV_ERROR(dev->dev, "Failed to init framebuffer: %d\n",
- ret);
+ drm_err(dev, "Failed to init framebuffer: %d\n", ret);
kfree(fb);
return ERR_PTR(ret);
}
@@ -160,7 +159,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!objs[i]) {
- DRM_DEBUG_KMS("Failed to lookup GEM object\n");
+ drm_dbg_kms(dev, "Failed to lookup GEM object\n");
ret = -ENOENT;
goto err_gem_object_put;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 0810d3ef6961..a421a2eed48a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -528,6 +528,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem;
int ret;
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+
shmem = to_drm_gem_shmem_obj(obj);
ret = drm_gem_shmem_get_pages(shmem);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 666cb4c22bb9..a4863326061a 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -2,6 +2,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -92,14 +93,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
}
static int drm_gem_vram_init(struct drm_device *dev,
- struct ttm_bo_device *bdev,
struct drm_gem_vram_object *gbo,
- size_t size, unsigned long pg_align,
- bool interruptible)
+ size_t size, unsigned long pg_align)
{
+ struct drm_vram_mm *vmm = dev->vram_mm;
+ struct ttm_bo_device *bdev;
int ret;
size_t acc_size;
+ if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
+ return -EINVAL;
+ bdev = &vmm->bdev;
+
gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, &gbo->bo.base, size);
@@ -112,7 +117,7 @@ static int drm_gem_vram_init(struct drm_device *dev,
drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
- &gbo->placement, pg_align, interruptible, acc_size,
+ &gbo->placement, pg_align, false, acc_size,
NULL, NULL, ttm_buffer_object_destroy);
if (ret)
goto err_drm_gem_object_release;
@@ -127,29 +132,33 @@ err_drm_gem_object_release:
/**
* drm_gem_vram_create() - Creates a VRAM-backed GEM object
* @dev: the DRM device
- * @bdev: the TTM BO device backing the object
* @size: the buffer size in bytes
* @pg_align: the buffer's alignment in multiples of the page size
- * @interruptible: sleep interruptible if waiting for memory
*
* Returns:
* A new instance of &struct drm_gem_vram_object on success, or
* an ERR_PTR()-encoded error code otherwise.
*/
struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
- struct ttm_bo_device *bdev,
size_t size,
- unsigned long pg_align,
- bool interruptible)
+ unsigned long pg_align)
{
struct drm_gem_vram_object *gbo;
int ret;
- gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
- if (!gbo)
- return ERR_PTR(-ENOMEM);
+ if (dev->driver->gem_create_object) {
+ struct drm_gem_object *gem =
+ dev->driver->gem_create_object(dev, size);
+ if (!gem)
+ return ERR_PTR(-ENOMEM);
+ gbo = drm_gem_vram_of_gem(gem);
+ } else {
+ gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
+ if (!gbo)
+ return ERR_PTR(-ENOMEM);
+ }
- ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
+ ret = drm_gem_vram_init(dev, gbo, size, pg_align);
if (ret < 0)
goto err_kfree;
@@ -483,9 +492,8 @@ EXPORT_SYMBOL(drm_gem_vram_vunmap);
Helper for implementing &struct drm_driver.dumb_create
* @file: the DRM file
* @dev: the DRM device
- * @bdev: the TTM BO device managing the buffer object
* @pg_align: the buffer's alignment in multiples of the page size
- * @interruptible: sleep interruptible if waiting for memory
+ * @pitch_align: the scanline's alignment in powers of 2
* @args: the arguments as provided to \
&struct drm_driver.dumb_create
*
@@ -500,9 +508,8 @@ EXPORT_SYMBOL(drm_gem_vram_vunmap);
*/
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
- struct ttm_bo_device *bdev,
unsigned long pg_align,
- bool interruptible,
+ unsigned long pitch_align,
struct drm_mode_create_dumb *args)
{
size_t pitch, size;
@@ -510,14 +517,19 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
int ret;
u32 handle;
- pitch = args->width * ((args->bpp + 7) / 8);
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ if (pitch_align) {
+ if (WARN_ON_ONCE(!is_power_of_2(pitch_align)))
+ return -EINVAL;
+ pitch = ALIGN(pitch, pitch_align);
+ }
size = pitch * args->height;
size = roundup(size, PAGE_SIZE);
if (!size)
return -EINVAL;
- gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
+ gbo = drm_gem_vram_create(dev, size, pg_align);
if (IS_ERR(gbo))
return PTR_ERR(gbo);
@@ -612,8 +624,7 @@ int drm_gem_vram_driver_dumb_create(struct drm_file *file,
if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
return -EINVAL;
- return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
- false, args);
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args);
}
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 51a2055c8f18..6937bf923f05 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -45,12 +45,34 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor);
void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
+#ifdef CONFIG_PCI
+
/* drm_pci.c */
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_pci_agp_destroy(struct drm_device *dev);
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+#else
+
+static inline int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -EINVAL;
+}
+
+static inline void drm_pci_agp_destroy(struct drm_device *dev)
+{
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return -EINVAL;
+}
+
+#endif
+
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index fcd728d7cf72..5afb39688b55 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -652,8 +652,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0),
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 2e8ce99d0baa..2c79e8199e3c 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -360,7 +360,8 @@ void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *m
/*
* Since the master is disappearing, so is the
* possibility to lock.
- */ mutex_lock(&dev->struct_mutex);
+ */
+ mutex_lock(&dev->struct_mutex);
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index e34058c721be..16bff1be4b8a 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -367,9 +367,9 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
memset(dbidev->tx_buf, 0, len);
mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
- (width >> 8) & 0xFF, (width - 1) & 0xFF);
+ ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
- (height >> 8) & 0xFF, (height - 1) & 0xFF);
+ ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)dbidev->tx_buf, len);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index bd2498bbd74a..55531895dde6 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -33,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <drm/drm_dsc.h>
#include <video/mipi_display.h>
/**
@@ -373,6 +374,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
case MIPI_DSI_V_SYNC_END:
case MIPI_DSI_H_SYNC_START:
case MIPI_DSI_H_SYNC_END:
+ case MIPI_DSI_COMPRESSION_MODE:
case MIPI_DSI_END_OF_TRANSMISSION:
case MIPI_DSI_COLOR_MODE_OFF:
case MIPI_DSI_COLOR_MODE_ON:
@@ -387,7 +389,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_READ:
- case MIPI_DSI_DCS_COMPRESSION_MODE:
+ case MIPI_DSI_EXECUTE_QUEUE:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
return true;
}
@@ -406,11 +408,12 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
bool mipi_dsi_packet_format_is_long(u8 type)
{
switch (type) {
- case MIPI_DSI_PPS_LONG_WRITE:
case MIPI_DSI_NULL_PACKET:
case MIPI_DSI_BLANKING_PACKET:
case MIPI_DSI_GENERIC_LONG_WRITE:
case MIPI_DSI_DCS_LONG_WRITE:
+ case MIPI_DSI_PICTURE_PARAMETER_SET:
+ case MIPI_DSI_COMPRESSED_PIXEL_STREAM:
case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
@@ -547,6 +550,56 @@ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
/**
+ * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral
+ * @dsi: DSI peripheral device
+ * @enable: Whether to enable or disable the DSC
+ *
+ * Enable or disable Display Stream Compression on the peripheral using the
+ * default Picture Parameter Set and VESA DSC 1.1 algorithm.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+{
+ /* Note: Needs updating for non-default PPS or algorithm */
+ u8 tx[2] = { enable << 0, 0 };
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_COMPRESSION_MODE,
+ .tx_len = sizeof(tx),
+ .tx_buf = tx,
+ };
+ int ret = mipi_dsi_device_transfer(dsi, &msg);
+
+ return (ret < 0) ? ret : 0;
+}
+EXPORT_SYMBOL(mipi_dsi_compression_mode);
+
+/**
+ * mipi_dsi_picture_parameter_set() - transmit the DSC PPS to the peripheral
+ * @dsi: DSI peripheral device
+ * @pps: VESA DSC 1.1 Picture Parameter Set
+ *
+ * Transmit the VESA DSC 1.1 Picture Parameter Set to the peripheral.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps)
+{
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_PICTURE_PARAMETER_SET,
+ .tx_len = sizeof(*pps),
+ .tx_buf = pps,
+ };
+ int ret = mipi_dsi_device_transfer(dsi, &msg);
+
+ return (ret < 0) ? ret : 0;
+}
+EXPORT_SYMBOL(mipi_dsi_picture_parameter_set);
+
+/**
* mipi_dsi_generic_write() - transmit data using a generic write packet
* @dsi: DSI peripheral device
* @payload: buffer containing the payload
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 3b570a404933..08e6eff6a179 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -27,6 +27,7 @@
#include <drm/drm_file.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
+#include <linux/dma-resv.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -415,6 +416,33 @@ void drm_mode_config_init(struct drm_device *dev)
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
dev->mode_config.num_total_plane = 0;
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ struct drm_modeset_acquire_ctx modeset_ctx;
+ struct ww_acquire_ctx resv_ctx;
+ struct dma_resv resv;
+ int ret;
+
+ dma_resv_init(&resv);
+
+ drm_modeset_acquire_init(&modeset_ctx, 0);
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ &modeset_ctx);
+ if (ret == -EDEADLK)
+ ret = drm_modeset_backoff(&modeset_ctx);
+
+ ww_acquire_init(&resv_ctx, &reservation_ww_class);
+ ret = dma_resv_lock(&resv, &resv_ctx);
+ if (ret == -EDEADLK)
+ dma_resv_lock_slow(&resv, &resv_ctx);
+
+ dma_resv_unlock(&resv);
+ ww_acquire_fini(&resv_ctx);
+
+ drm_modeset_drop_locks(&modeset_ctx);
+ drm_modeset_acquire_fini(&modeset_ctx);
+ dma_resv_fini(&resv);
+ }
}
EXPORT_SYMBOL(drm_mode_config_init);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 6a23e36ed4fe..35c2719407a8 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -224,12 +224,26 @@ EXPORT_SYMBOL(drm_mode_object_get);
* This attaches the given property to the modeset object with the given initial
* value. Currently this function cannot fail since the properties are stored in
* a statically sized array.
+ *
+ * Note that all properties must be attached before the object itself is
+ * registered and accessible from userspace.
*/
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
{
int count = obj->properties->count;
+ struct drm_device *dev = property->dev;
+
+
+ if (obj->type == DRM_MODE_OBJECT_CONNECTOR) {
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ WARN_ON(!dev->driver->load &&
+ connector->registration_state == DRM_CONNECTOR_REGISTERED);
+ } else {
+ WARN_ON(!dev->driver->load && dev->registered);
+ }
if (count == DRM_OBJECT_MAX_PROPERTY) {
WARN(1, "Failed to attach object property (type: 0x%x). Please "
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 88232698d7a0..10336b144c72 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -233,7 +233,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
/* 3) Nominal HSync width (% of line period) - default 8 */
#define CVT_HSYNC_PERCENTAGE 8
unsigned int hblank_percentage;
- int vsyncandback_porch, vback_porch, hblank;
+ int vsyncandback_porch, __maybe_unused vback_porch, hblank;
/* estimated the horizontal period */
tmp1 = HV_FACTOR * 1000000 -
@@ -386,9 +386,10 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
int top_margin, bottom_margin;
int interlace;
unsigned int hfreq_est;
- int vsync_plus_bp, vback_porch;
- unsigned int vtotal_lines, vfieldrate_est, hperiod;
- unsigned int vfield_rate, vframe_rate;
+ int vsync_plus_bp, __maybe_unused vback_porch;
+ unsigned int vtotal_lines, __maybe_unused vfieldrate_est;
+ unsigned int __maybe_unused hperiod;
+ unsigned int vfield_rate, __maybe_unused vframe_rate;
int left_margin, right_margin;
unsigned int total_active_pixels, ideal_duty_cycle;
unsigned int hblank, total_pixels, pixel_freq;
@@ -1568,33 +1569,76 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
return 0;
}
-static int drm_mode_parse_cmdline_options(char *str, size_t len,
+static int drm_mode_parse_cmdline_int(const char *delim, unsigned int *int_ret)
+{
+ const char *value;
+ char *endp;
+
+ /*
+ * delim must point to the '=', otherwise it is a syntax error and
+ * if delim points to the terminating zero, then delim + 1 wil point
+ * past the end of the string.
+ */
+ if (*delim != '=')
+ return -EINVAL;
+
+ value = delim + 1;
+ *int_ret = simple_strtol(value, &endp, 10);
+
+ /* Make sure we have parsed something */
+ if (endp == value)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int drm_mode_parse_panel_orientation(const char *delim,
+ struct drm_cmdline_mode *mode)
+{
+ const char *value;
+
+ if (*delim != '=')
+ return -EINVAL;
+
+ value = delim + 1;
+ delim = strchr(value, ',');
+ if (!delim)
+ delim = value + strlen(value);
+
+ if (!strncmp(value, "normal", delim - value))
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ else if (!strncmp(value, "upside_down", delim - value))
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ else if (!strncmp(value, "left_side_up", delim - value))
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+ else if (!strncmp(value, "right_side_up", delim - value))
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int drm_mode_parse_cmdline_options(const char *str,
+ bool freestanding,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
- unsigned int rotation = 0;
- char *sep = str;
+ unsigned int deg, margin, rotation = 0;
+ const char *delim, *option, *sep;
- while ((sep = strchr(sep, ','))) {
- char *delim, *option;
-
- option = sep + 1;
+ option = str;
+ do {
delim = strchr(option, '=');
if (!delim) {
delim = strchr(option, ',');
if (!delim)
- delim = str + len;
+ delim = option + strlen(option);
}
if (!strncmp(option, "rotate", delim - option)) {
- const char *value = delim + 1;
- unsigned int deg;
-
- deg = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &deg))
return -EINVAL;
switch (deg) {
@@ -1619,58 +1663,40 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
}
} else if (!strncmp(option, "reflect_x", delim - option)) {
rotation |= DRM_MODE_REFLECT_X;
- sep = delim;
} else if (!strncmp(option, "reflect_y", delim - option)) {
rotation |= DRM_MODE_REFLECT_Y;
- sep = delim;
} else if (!strncmp(option, "margin_right", delim - option)) {
- const char *value = delim + 1;
- unsigned int margin;
-
- margin = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.right = margin;
} else if (!strncmp(option, "margin_left", delim - option)) {
- const char *value = delim + 1;
- unsigned int margin;
-
- margin = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.left = margin;
} else if (!strncmp(option, "margin_top", delim - option)) {
- const char *value = delim + 1;
- unsigned int margin;
-
- margin = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.top = margin;
} else if (!strncmp(option, "margin_bottom", delim - option)) {
- const char *value = delim + 1;
- unsigned int margin;
-
- margin = simple_strtol(value, &sep, 10);
-
- /* Make sure we have parsed something */
- if (sep == value)
+ if (drm_mode_parse_cmdline_int(delim, &margin))
return -EINVAL;
mode->tv_margins.bottom = margin;
+ } else if (!strncmp(option, "panel_orientation", delim - option)) {
+ if (drm_mode_parse_panel_orientation(delim, mode))
+ return -EINVAL;
} else {
return -EINVAL;
}
- }
+ sep = strchr(delim, ',');
+ option = sep + 1;
+ } while (sep);
+
+ if (rotation && freestanding)
+ return -EINVAL;
mode->rotation_reflection = rotation;
@@ -1682,17 +1708,6 @@ static const char * const drm_named_modes_whitelist[] = {
"PAL",
};
-static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
- if (!strncmp(mode, drm_named_modes_whitelist[i], size))
- return true;
-
- return false;
-}
-
/**
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
* @mode_option: optional per connector mode option
@@ -1723,72 +1738,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
struct drm_cmdline_mode *mode)
{
const char *name;
- bool named_mode = false, parse_extras = false;
+ bool freestanding = false, parse_extras = false;
unsigned int bpp_off = 0, refresh_off = 0, options_off = 0;
unsigned int mode_end = 0;
- char *bpp_ptr = NULL, *refresh_ptr = NULL, *extra_ptr = NULL;
- char *options_ptr = NULL;
+ const char *bpp_ptr = NULL, *refresh_ptr = NULL, *extra_ptr = NULL;
+ const char *options_ptr = NULL;
char *bpp_end_ptr = NULL, *refresh_end_ptr = NULL;
- int ret;
+ int i, len, ret;
-#ifdef CONFIG_FB
- if (!mode_option)
- mode_option = fb_mode_option;
-#endif
+ memset(mode, 0, sizeof(*mode));
+ mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- if (!mode_option) {
- mode->specified = false;
+ if (!mode_option)
return false;
- }
name = mode_option;
- /*
- * This is a bit convoluted. To differentiate between the
- * named modes and poorly formatted resolutions, we need a
- * bunch of things:
- * - We need to make sure that the first character (which
- * would be our resolution in X) is a digit.
- * - If not, then it's either a named mode or a force on/off.
- * To distinguish between the two, we need to run the
- * extra parsing function, and if not, then we consider it
- * a named mode.
- *
- * If this isn't enough, we should add more heuristics here,
- * and matching unit-tests.
- */
- if (!isdigit(name[0]) && name[0] != 'x') {
- unsigned int namelen = strlen(name);
-
- /*
- * Only the force on/off options can be in that case,
- * and they all take a single character.
- */
- if (namelen == 1) {
- ret = drm_mode_parse_cmdline_extra(name, namelen, true,
- connector, mode);
- if (!ret)
- return true;
- }
-
- named_mode = true;
- }
-
/* Try to locate the bpp and refresh specifiers, if any */
bpp_ptr = strchr(name, '-');
- if (bpp_ptr) {
+ if (bpp_ptr)
bpp_off = bpp_ptr - name;
- mode->bpp_specified = true;
- }
refresh_ptr = strchr(name, '@');
- if (refresh_ptr) {
- if (named_mode)
- return false;
-
+ if (refresh_ptr)
refresh_off = refresh_ptr - name;
- mode->refresh_specified = true;
- }
/* Locate the start of named options */
options_ptr = strchr(name, ',');
@@ -1802,33 +1775,58 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
mode_end = refresh_off;
} else if (options_ptr) {
mode_end = options_off;
+ parse_extras = true;
} else {
mode_end = strlen(name);
parse_extras = true;
}
- if (named_mode) {
- if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
- return false;
+ /* First check for a named mode */
+ for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++) {
+ ret = str_has_prefix(name, drm_named_modes_whitelist[i]);
+ if (ret == mode_end) {
+ if (refresh_ptr)
+ return false; /* named + refresh is invalid */
- if (!drm_named_mode_is_in_whitelist(name, mode_end))
- return false;
+ strcpy(mode->name, drm_named_modes_whitelist[i]);
+ mode->specified = true;
+ break;
+ }
+ }
- strscpy(mode->name, name, mode_end + 1);
- } else {
+ /* No named mode? Check for a normal mode argument, e.g. 1024x768 */
+ if (!mode->specified && isdigit(name[0])) {
ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
parse_extras,
connector,
mode);
if (ret)
return false;
+
+ mode->specified = true;
+ }
+
+ /* No mode? Check for freestanding extras and/or options */
+ if (!mode->specified) {
+ unsigned int len = strlen(mode_option);
+
+ if (bpp_ptr || refresh_ptr)
+ return false; /* syntax error */
+
+ if (len == 1 || (len >= 2 && mode_option[1] == ','))
+ extra_ptr = mode_option;
+ else
+ options_ptr = mode_option - 1;
+
+ freestanding = true;
}
- mode->specified = true;
if (bpp_ptr) {
ret = drm_mode_parse_cmdline_bpp(bpp_ptr, &bpp_end_ptr, mode);
if (ret)
return false;
+
+ mode->bpp_specified = true;
}
if (refresh_ptr) {
@@ -1836,6 +1834,8 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
&refresh_end_ptr, mode);
if (ret)
return false;
+
+ mode->refresh_specified = true;
}
/*
@@ -1849,20 +1849,21 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
else if (refresh_ptr)
extra_ptr = refresh_end_ptr;
- if (extra_ptr &&
- extra_ptr != options_ptr) {
- int len = strlen(name) - (extra_ptr - name);
+ if (extra_ptr) {
+ if (options_ptr)
+ len = options_ptr - extra_ptr;
+ else
+ len = strlen(extra_ptr);
- ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
+ ret = drm_mode_parse_cmdline_extra(extra_ptr, len, freestanding,
connector, mode);
if (ret)
return false;
}
if (options_ptr) {
- int len = strlen(name) - (options_ptr - name);
-
- ret = drm_mode_parse_cmdline_options(options_ptr, len,
+ ret = drm_mode_parse_cmdline_options(options_ptr + 1,
+ freestanding,
connector, mode);
if (ret)
return false;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 0ca58803ba46..b50b44e76279 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -274,3 +274,119 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
+
+enum drm_of_lvds_pixels {
+ DRM_OF_LVDS_EVEN = BIT(0),
+ DRM_OF_LVDS_ODD = BIT(1),
+};
+
+static int drm_of_lvds_get_port_pixels_type(struct device_node *port_node)
+{
+ bool even_pixels =
+ of_property_read_bool(port_node, "dual-lvds-even-pixels");
+ bool odd_pixels =
+ of_property_read_bool(port_node, "dual-lvds-odd-pixels");
+
+ return (even_pixels ? DRM_OF_LVDS_EVEN : 0) |
+ (odd_pixels ? DRM_OF_LVDS_ODD : 0);
+}
+
+static int drm_of_lvds_get_remote_pixels_type(
+ const struct device_node *port_node)
+{
+ struct device_node *endpoint = NULL;
+ int pixels_type = -EPIPE;
+
+ for_each_child_of_node(port_node, endpoint) {
+ struct device_node *remote_port;
+ int current_pt;
+
+ if (!of_node_name_eq(endpoint, "endpoint"))
+ continue;
+
+ remote_port = of_graph_get_remote_port(endpoint);
+ if (!remote_port) {
+ of_node_put(remote_port);
+ return -EPIPE;
+ }
+
+ current_pt = drm_of_lvds_get_port_pixels_type(remote_port);
+ of_node_put(remote_port);
+ if (pixels_type < 0)
+ pixels_type = current_pt;
+
+ /*
+ * Sanity check, ensure that all remote endpoints have the same
+ * pixel type. We may lift this restriction later if we need to
+ * support multiple sinks with different dual-link
+ * configurations by passing the endpoints explicitly to
+ * drm_of_lvds_get_dual_link_pixel_order().
+ */
+ if (!current_pt || pixels_type != current_pt) {
+ of_node_put(remote_port);
+ return -EINVAL;
+ }
+ }
+
+ return pixels_type;
+}
+
+/**
+ * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order
+ * @port1: First DT port node of the Dual-link LVDS source
+ * @port2: Second DT port node of the Dual-link LVDS source
+ *
+ * An LVDS dual-link connection is made of two links, with even pixels
+ * transitting on one link, and odd pixels on the other link. This function
+ * returns, for two ports of an LVDS dual-link source, which port shall transmit
+ * the even and odd pixels, based on the requirements of the connected sink.
+ *
+ * The pixel order is determined from the dual-lvds-even-pixels and
+ * dual-lvds-odd-pixels properties in the sink's DT port nodes. If those
+ * properties are not present, or if their usage is not valid, this function
+ * returns -EINVAL.
+ *
+ * If either port is not connected, this function returns -EPIPE.
+ *
+ * @port1 and @port2 are typically DT sibling nodes, but may have different
+ * parents when, for instance, two separate LVDS encoders carry the even and odd
+ * pixels.
+ *
+ * Return:
+ * * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 carries even pixels and @port2
+ * carries odd pixels
+ * * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 carries odd pixels and @port2
+ * carries even pixels
+ * * -EINVAL - @port1 and @port2 are not connected to a dual-link LVDS sink, or
+ * the sink configuration is invalid
+ * * -EPIPE - when @port1 or @port2 are not connected
+ */
+int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+ const struct device_node *port2)
+{
+ int remote_p1_pt, remote_p2_pt;
+
+ if (!port1 || !port2)
+ return -EINVAL;
+
+ remote_p1_pt = drm_of_lvds_get_remote_pixels_type(port1);
+ if (remote_p1_pt < 0)
+ return remote_p1_pt;
+
+ remote_p2_pt = drm_of_lvds_get_remote_pixels_type(port2);
+ if (remote_p2_pt < 0)
+ return remote_p2_pt;
+
+ /*
+ * A valid dual-lVDS bus is found when one remote port is marked with
+ * "dual-lvds-even-pixels", and the other remote port is marked with
+ * "dual-lvds-odd-pixels", bail out if the markers are not right.
+ */
+ if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD)
+ return -EINVAL;
+
+ return remote_p1_pt == DRM_OF_LVDS_EVEN ?
+ DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS :
+ DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+}
+EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index ed7985c0535a..8c7bac85a793 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -21,11 +21,13 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/module.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
static DEFINE_MUTEX(panel_lock);
static LIST_HEAD(panel_list);
@@ -112,12 +114,6 @@ EXPORT_SYMBOL(drm_panel_remove);
*/
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
{
- if (panel->connector)
- return -EBUSY;
-
- panel->connector = connector;
- panel->drm = connector->dev;
-
return 0;
}
EXPORT_SYMBOL(drm_panel_attach);
@@ -134,8 +130,6 @@ EXPORT_SYMBOL(drm_panel_attach);
*/
void drm_panel_detach(struct drm_panel *panel)
{
- panel->connector = NULL;
- panel->drm = NULL;
}
EXPORT_SYMBOL(drm_panel_detach);
@@ -151,10 +145,13 @@ EXPORT_SYMBOL(drm_panel_detach);
*/
int drm_panel_prepare(struct drm_panel *panel)
{
- if (panel && panel->funcs && panel->funcs->prepare)
+ if (!panel)
+ return -EINVAL;
+
+ if (panel->funcs && panel->funcs->prepare)
return panel->funcs->prepare(panel);
- return panel ? -ENOSYS : -EINVAL;
+ return 0;
}
EXPORT_SYMBOL(drm_panel_prepare);
@@ -171,10 +168,13 @@ EXPORT_SYMBOL(drm_panel_prepare);
*/
int drm_panel_unprepare(struct drm_panel *panel)
{
- if (panel && panel->funcs && panel->funcs->unprepare)
+ if (!panel)
+ return -EINVAL;
+
+ if (panel->funcs && panel->funcs->unprepare)
return panel->funcs->unprepare(panel);
- return panel ? -ENOSYS : -EINVAL;
+ return 0;
}
EXPORT_SYMBOL(drm_panel_unprepare);
@@ -190,10 +190,23 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*/
int drm_panel_enable(struct drm_panel *panel)
{
- if (panel && panel->funcs && panel->funcs->enable)
- return panel->funcs->enable(panel);
+ int ret;
+
+ if (!panel)
+ return -EINVAL;
+
+ if (panel->funcs && panel->funcs->enable) {
+ ret = panel->funcs->enable(panel);
+ if (ret < 0)
+ return ret;
+ }
- return panel ? -ENOSYS : -EINVAL;
+ ret = backlight_enable(panel->backlight);
+ if (ret < 0)
+ DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
+ ret);
+
+ return 0;
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -209,16 +222,27 @@ EXPORT_SYMBOL(drm_panel_enable);
*/
int drm_panel_disable(struct drm_panel *panel)
{
- if (panel && panel->funcs && panel->funcs->disable)
+ int ret;
+
+ if (!panel)
+ return -EINVAL;
+
+ ret = backlight_disable(panel->backlight);
+ if (ret < 0)
+ DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
+ ret);
+
+ if (panel->funcs && panel->funcs->disable)
return panel->funcs->disable(panel);
- return panel ? -ENOSYS : -EINVAL;
+ return 0;
}
EXPORT_SYMBOL(drm_panel_disable);
/**
* drm_panel_get_modes - probe the available display modes of a panel
* @panel: DRM panel
+ * @connector: DRM connector
*
* The modes probed from the panel are automatically added to the connector
* that the panel is attached to.
@@ -226,12 +250,16 @@ EXPORT_SYMBOL(drm_panel_disable);
* Return: The number of modes available from the panel on success or a
* negative error code on failure.
*/
-int drm_panel_get_modes(struct drm_panel *panel)
+int drm_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- if (panel && panel->funcs && panel->funcs->get_modes)
- return panel->funcs->get_modes(panel);
+ if (!panel)
+ return -EINVAL;
- return panel ? -ENOSYS : -EINVAL;
+ if (panel->funcs && panel->funcs->get_modes)
+ return panel->funcs->get_modes(panel, connector);
+
+ return -EOPNOTSUPP;
}
EXPORT_SYMBOL(drm_panel_get_modes);
@@ -274,6 +302,45 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
EXPORT_SYMBOL(of_drm_find_panel);
#endif
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+/**
+ * drm_panel_of_backlight - use backlight device node for backlight
+ * @panel: DRM panel
+ *
+ * Use this function to enable backlight handling if your panel
+ * uses device tree and has a backlight phandle.
+ *
+ * When the panel is enabled backlight will be enabled after a
+ * successful call to &drm_panel_funcs.enable()
+ *
+ * When the panel is disabled backlight will be disabled before the
+ * call to &drm_panel_funcs.disable().
+ *
+ * A typical implementation for a panel driver supporting device tree
+ * will call this function at probe time. Backlight will then be handled
+ * transparently without requiring any intervention from the driver.
+ * drm_panel_of_backlight() must be called after the call to drm_panel_init().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_of_backlight(struct drm_panel *panel)
+{
+ struct backlight_device *backlight;
+
+ if (!panel || !panel->dev)
+ return -EINVAL;
+
+ backlight = devm_of_find_backlight(panel->dev);
+
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+
+ panel->backlight = backlight;
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_of_backlight);
+#endif
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("DRM panel infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a86a3ab2771c..f2e43d341980 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -125,8 +125,6 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
-#ifdef CONFIG_PCI
-
static int drm_get_pci_domain(struct drm_device *dev)
{
#ifndef __alpha__
@@ -284,6 +282,8 @@ err_free:
}
EXPORT_SYMBOL(drm_get_pci_dev);
+#ifdef CONFIG_DRM_LEGACY
+
/**
* drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
* @driver: DRM device driver
@@ -331,17 +331,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
EXPORT_SYMBOL(drm_legacy_pci_init);
-#else
-
-void drm_pci_agp_destroy(struct drm_device *dev) {}
-
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return -EINVAL;
-}
-#endif
-
/**
* drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
* @driver: DRM device driver
@@ -367,3 +356,5 @@ void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_legacy_pci_exit);
+
+#endif
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 0814211b0f3f..86d9b0e45c8c 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -240,6 +240,7 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info)
{
+ struct drm_gem_object *obj = exp_info->priv;
struct dma_buf *dma_buf;
dma_buf = dma_buf_export(exp_info);
@@ -247,7 +248,8 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
return dma_buf;
drm_dev_get(dev);
- drm_gem_object_get(exp_info->priv);
+ drm_gem_object_get(obj);
+ dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
return dma_buf;
}
@@ -713,6 +715,9 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct file *fil;
int ret;
+ /* Add the fake offset */
+ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
+
if (obj->funcs && obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
if (ret)
@@ -737,8 +742,6 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
if (ret)
goto out;
- vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
-
ret = obj->dev->driver->fops->mmap(fil, vma);
drm_vma_node_revoke(&obj->vma_node, priv);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 9a25d73c155c..111b932cf2a9 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -37,11 +37,11 @@
#include <drm/drm_print.h>
/*
- * drm_debug: Enable debug output.
+ * __drm_debug: Enable debug output.
* Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
*/
-unsigned int drm_debug;
-EXPORT_SYMBOL(drm_debug);
+unsigned int __drm_debug;
+EXPORT_SYMBOL(__drm_debug);
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug cat
"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, drm_debug, int, 0600);
+module_param_named(debug, __drm_debug, int, 0600);
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
@@ -256,7 +256,7 @@ void drm_dev_printk(const struct device *dev, const char *level,
}
EXPORT_SYMBOL(drm_dev_printk);
-void drm_dev_dbg(const struct device *dev, unsigned int category,
+void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
const char *format, ...)
{
struct va_format vaf;
@@ -280,7 +280,7 @@ void drm_dev_dbg(const struct device *dev, unsigned int category,
}
EXPORT_SYMBOL(drm_dev_dbg);
-void drm_dbg(unsigned int category, const char *format, ...)
+void __drm_dbg(enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -297,9 +297,9 @@ void drm_dbg(unsigned int category, const char *format, ...)
va_end(args);
}
-EXPORT_SYMBOL(drm_dbg);
+EXPORT_SYMBOL(__drm_dbg);
-void drm_err(const char *format, ...)
+void __drm_err(const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -313,7 +313,7 @@ void drm_err(const char *format, ...)
va_end(args);
}
-EXPORT_SYMBOL(drm_err);
+EXPORT_SYMBOL(__drm_err);
/**
* drm_print_regset32 - print the contents of registers to a
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a7c87abe88d0..576b4b7dcd89 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -101,6 +101,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
/* Step 2: Validate against encoders and crtcs */
drm_connector_for_each_possible_encoder(connector, encoder) {
+ struct drm_bridge *bridge;
struct drm_crtc *crtc;
ret = drm_encoder_mode_valid(encoder, mode);
@@ -112,7 +113,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
continue;
}
- ret = drm_bridge_mode_valid(encoder->bridge, mode);
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ ret = drm_bridge_chain_mode_valid(bridge, mode);
if (ret != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index b8363aaa9032..0460e874896e 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -52,9 +52,17 @@ bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
}
EXPORT_SYMBOL(drm_rect_intersect);
-static u32 clip_scaled(u32 src, u32 dst, u32 clip)
+static u32 clip_scaled(int src, int dst, int *clip)
{
- u64 tmp = mul_u32_u32(src, dst - clip);
+ u64 tmp;
+
+ if (dst == 0)
+ return 0;
+
+ /* Only clip what we have. Keeps the result bounded. */
+ *clip = min(*clip, dst);
+
+ tmp = mul_u32_u32(src, dst - *clip);
/*
* Round toward 1.0 when clipping so that we don't accidentally
@@ -73,11 +81,13 @@ static u32 clip_scaled(u32 src, u32 dst, u32 clip)
* @clip: clip rectangle
*
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
- * same amounts multiplied by @hscale and @vscale.
+ * the corresponding amounts, retaining the vertical and horizontal scaling
+ * factors from @src to @dst.
*
* RETURNS:
+ *
* %true if rectangle @dst is still visible after being clipped,
- * %false otherwise
+ * %false otherwise.
*/
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip)
@@ -87,34 +97,34 @@ bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
diff = clip->x1 - dst->x1;
if (diff > 0) {
u32 new_src_w = clip_scaled(drm_rect_width(src),
- drm_rect_width(dst), diff);
+ drm_rect_width(dst), &diff);
- src->x1 = clamp_t(int64_t, src->x2 - new_src_w, INT_MIN, INT_MAX);
- dst->x1 = clip->x1;
+ src->x1 = src->x2 - new_src_w;
+ dst->x1 += diff;
}
diff = clip->y1 - dst->y1;
if (diff > 0) {
u32 new_src_h = clip_scaled(drm_rect_height(src),
- drm_rect_height(dst), diff);
+ drm_rect_height(dst), &diff);
- src->y1 = clamp_t(int64_t, src->y2 - new_src_h, INT_MIN, INT_MAX);
- dst->y1 = clip->y1;
+ src->y1 = src->y2 - new_src_h;
+ dst->y1 += diff;
}
diff = dst->x2 - clip->x2;
if (diff > 0) {
u32 new_src_w = clip_scaled(drm_rect_width(src),
- drm_rect_width(dst), diff);
+ drm_rect_width(dst), &diff);
- src->x2 = clamp_t(int64_t, src->x1 + new_src_w, INT_MIN, INT_MAX);
- dst->x2 = clip->x2;
+ src->x2 = src->x1 + new_src_w;
+ dst->x2 -= diff;
}
diff = dst->y2 - clip->y2;
if (diff > 0) {
u32 new_src_h = clip_scaled(drm_rect_height(src),
- drm_rect_height(dst), diff);
+ drm_rect_height(dst), &diff);
- src->y2 = clamp_t(int64_t, src->y1 + new_src_h, INT_MIN, INT_MAX);
- dst->y2 = clip->y2;
+ src->y2 = src->y1 + new_src_h;
+ dst->y2 -= diff;
}
return drm_rect_visible(dst);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 1f9c01be40d7..6b43c1c94e8f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -65,12 +65,13 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
if (gpu) {
- rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ sched = &gpu->sched;
drm_sched_entity_init(&ctx->sched_entity[i],
- &rq, 1, NULL);
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
}
}
@@ -282,11 +283,6 @@ static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
args->flags, &args->handle);
}
-#define TS(t) ((struct timespec){ \
- .tv_sec = (t).tv_sec, \
- .tv_nsec = (t).tv_nsec \
-})
-
static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -301,7 +297,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+ ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
drm_gem_object_put_unlocked(obj);
@@ -354,7 +350,7 @@ static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
{
struct drm_etnaviv_wait_fence *args = data;
struct etnaviv_drm_private *priv = dev->dev_private;
- struct timespec *timeout = &TS(args->timeout);
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
struct etnaviv_gpu *gpu;
if (args->flags & ~(ETNA_WAIT_NONBLOCK))
@@ -403,7 +399,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_wait *args = data;
- struct timespec *timeout = &TS(args->timeout);
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
struct drm_gem_object *obj;
struct etnaviv_gpu *gpu;
int ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 32cfa5a48d42..efc656efeb0f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -61,7 +61,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
void *etnaviv_gem_vmap(struct drm_gem_object *obj);
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
- struct timespec *timeout);
+ struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
void etnaviv_gem_free_object(struct drm_gem_object *obj);
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -107,11 +107,12 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
* between the specified timeout and the current CLOCK_MONOTONIC time.
*/
static inline unsigned long etnaviv_timeout_to_jiffies(
- const struct timespec *timeout)
+ const struct drm_etnaviv_timespec *timeout)
{
- struct timespec64 ts, to;
-
- to = timespec_to_timespec64(*timeout);
+ struct timespec64 ts, to = {
+ .tv_sec = timeout->tv_sec,
+ .tv_nsec = timeout->tv_nsec,
+ };
ktime_get_ts64(&ts);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index cb1faaac380a..6adea180d629 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -373,7 +373,7 @@ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
}
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
- struct timespec *timeout)
+ struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
@@ -431,7 +431,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
}
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- struct timespec *timeout)
+ struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index d6270acce619..6b68fe16041b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -112,7 +112,7 @@ struct etnaviv_gem_submit {
void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- struct timespec *timeout);
+ struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res);
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index aa3e4c3b063a..3b0afa156d92 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -113,7 +113,7 @@ static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
if (submit->bos[i].flags & BO_LOCKED) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
- ww_mutex_unlock(&obj->resv->lock);
+ dma_resv_unlock(obj->resv);
submit->bos[i].flags &= ~BO_LOCKED;
}
}
@@ -133,8 +133,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&obj->resv->lock,
- ticket);
+ ret = dma_resv_lock_interruptible(obj->resv, ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
i);
@@ -161,8 +160,7 @@ fail:
obj = &submit->bos[contended].obj->base;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
- ticket);
+ ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index d47d1a8e0219..799ec20b267d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1132,7 +1132,7 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
* Cmdstream submission/retirement:
*/
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 id, struct timespec *timeout)
+ u32 id, struct drm_etnaviv_timespec *timeout)
{
struct dma_fence *fence;
int ret;
@@ -1179,7 +1179,8 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
* that lock in this function while waiting.
*/
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout)
{
unsigned long remaining;
long ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8f9bd4edc96a..97bb48042b4d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -169,9 +169,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 fence, struct timespec *timeout);
+ u32 fence, struct drm_etnaviv_timespec *timeout);
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout);
struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6f7d3b3b3628..6417f374b923 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_EXYNOS
- tristate "DRM Support for Samsung SoC EXYNOS Series"
+ tristate "DRM Support for Samsung SoC Exynos Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select SND_SOC_HDMI_CODEC if SND_SOC
help
- Choose this option if you have a Samsung SoC EXYNOS chipset.
+ Choose this option if you have a Samsung SoC Exynos chipset.
If M is selected the module will be called exynosdrm.
if DRM_EXYNOS
@@ -62,7 +62,7 @@ config DRM_EXYNOS_DSI
This enables support for Exynos MIPI-DSI device.
config DRM_EXYNOS_DP
- bool "EXYNOS specific extensions for Analogix DP driver"
+ bool "Exynos specific extensions for Analogix DP driver"
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
select DRM_ANALOGIX_DP
default DRM_EXYNOS
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 2d5cbfda3ca7..8428ae12dfa5 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -510,7 +510,7 @@ static void decon_swreset(struct decon_context *ctx)
ctx->addr + DECON_CRCCTRL);
}
-static void decon_enable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -523,7 +523,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
decon_commit(ctx->crtc);
}
-static void decon_disable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
@@ -599,8 +599,8 @@ static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
- .enable = decon_enable,
- .disable = decon_disable,
+ .atomic_enable = decon_atomic_enable,
+ .atomic_disable = decon_atomic_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
@@ -651,7 +651,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
- decon_disable(ctx->crtc);
+ decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f0640950bd46..ff59c641fa80 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -526,7 +526,7 @@ static void decon_init(struct decon_context *ctx)
writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
}
-static void decon_enable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -546,7 +546,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
ctx->suspended = false;
}
-static void decon_disable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
@@ -568,8 +568,8 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
- .enable = decon_enable,
- .disable = decon_disable,
+ .atomic_enable = decon_atomic_enable,
+ .atomic_disable = decon_atomic_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
@@ -653,7 +653,7 @@ static void decon_unbind(struct device *dev, struct device *master,
{
struct decon_context *ctx = dev_get_drvdata(dev);
- decon_disable(ctx->crtc);
+ decon_atomic_disable(ctx->crtc);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 1e6aa24bf45e..4785885c0f4f 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -110,7 +110,6 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
if (ret) {
DRM_DEV_ERROR(dp->dev,
"Failed to attach bridge to drm\n");
- bridge->next = NULL;
return ret;
}
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 77ce78986408..1c03485676ef 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -23,8 +23,8 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- if (exynos_crtc->ops->enable)
- exynos_crtc->ops->enable(exynos_crtc);
+ if (exynos_crtc->ops->atomic_enable)
+ exynos_crtc->ops->atomic_enable(exynos_crtc);
drm_crtc_vblank_on(crtc);
}
@@ -36,8 +36,8 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- if (exynos_crtc->ops->disable)
- exynos_crtc->ops->disable(exynos_crtc);
+ if (exynos_crtc->ops->atomic_disable)
+ exynos_crtc->ops->atomic_disable(exynos_crtc);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3cebb19ec1c4..43fa0f26c052 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -43,7 +43,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
- if (ctx->panel && !ctx->panel->connector)
+ if (ctx->panel)
drm_panel_attach(ctx->panel, &ctx->connector);
return connector_status_connected;
@@ -85,7 +85,7 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
}
if (ctx->panel)
- return ctx->panel->funcs->get_modes(ctx->panel);
+ return drm_panel_get_modes(ctx->panel, connector);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d4014ba592fd..d4d21d8cfb90 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -118,8 +118,8 @@ struct exynos_drm_plane_config {
/*
* Exynos drm crtc ops
*
- * @enable: enable the device
- * @disable: disable the device
+ * @atomic_enable: enable the device
+ * @atomic_disable: disable the device
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @mode_valid: specific driver callback for mode validation
@@ -133,8 +133,8 @@ struct exynos_drm_plane_config {
*/
struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
- void (*enable)(struct exynos_drm_crtc *crtc);
- void (*disable)(struct exynos_drm_crtc *crtc);
+ void (*atomic_enable)(struct exynos_drm_crtc *crtc);
+ void (*atomic_disable)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 72726f2c7a9f..33628d85edad 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -255,6 +255,7 @@ struct exynos_dsi {
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
struct drm_panel *panel;
+ struct list_head bridge_chain;
struct drm_bridge *out_bridge;
struct device *dev;
@@ -1377,6 +1378,7 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
static void exynos_dsi_enable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_bridge *iter;
int ret;
if (dsi->state & DSIM_STATE_ENABLED)
@@ -1390,7 +1392,11 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
if (ret < 0)
goto err_put_sync;
} else {
- drm_bridge_pre_enable(dsi->out_bridge);
+ list_for_each_entry_reverse(iter, &dsi->bridge_chain,
+ chain_node) {
+ if (iter->funcs->pre_enable)
+ iter->funcs->pre_enable(iter);
+ }
}
exynos_dsi_set_display_mode(dsi);
@@ -1401,7 +1407,10 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
if (ret < 0)
goto err_display_disable;
} else {
- drm_bridge_enable(dsi->out_bridge);
+ list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->enable)
+ iter->funcs->enable(iter);
+ }
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
@@ -1419,6 +1428,7 @@ err_put_sync:
static void exynos_dsi_disable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_bridge *iter;
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
@@ -1426,10 +1436,20 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
drm_panel_disable(dsi->panel);
- drm_bridge_disable(dsi->out_bridge);
+
+ list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->disable)
+ iter->funcs->disable(iter);
+ }
+
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
- drm_bridge_post_disable(dsi->out_bridge);
+
+ list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->post_disable)
+ iter->funcs->post_disable(iter);
+ }
+
dsi->state &= ~DSIM_STATE_ENABLED;
pm_runtime_put_sync(dsi->dev);
}
@@ -1461,7 +1481,7 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
struct exynos_dsi *dsi = connector_to_dsi(connector);
if (dsi->panel)
- return dsi->panel->funcs->get_modes(dsi->panel);
+ return drm_panel_get_modes(dsi->panel, connector);
return 0;
}
@@ -1522,7 +1542,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
if (out_bridge) {
drm_bridge_attach(encoder, out_bridge, NULL);
dsi->out_bridge = out_bridge;
- encoder->bridge = NULL;
+ list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
} else {
int ret = exynos_dsi_create_connector(encoder);
@@ -1588,6 +1608,7 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
if (dsi->out_bridge->funcs->detach)
dsi->out_bridge->funcs->detach(dsi->out_bridge);
dsi->out_bridge = NULL;
+ INIT_LIST_HEAD(&dsi->bridge_chain);
}
if (drm->mode_config.poll_enabled)
@@ -1735,6 +1756,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
INIT_LIST_HEAD(&dsi->transfer_list);
+ INIT_LIST_HEAD(&dsi->bridge_chain);
dsi->dsi_host.ops = &exynos_dsi_ops;
dsi->dsi_host.dev = dev;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index b0877b97291c..647a1fd1d815 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -60,7 +60,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
return 0;
}
-static struct fb_ops exynos_drm_fb_ops = {
+static const struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = exynos_drm_fb_mmap,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 8d0a929104e5..21aec38702fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -894,7 +894,7 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
fimd_enable_shadow_channel_path(ctx, win, false);
}
-static void fimd_enable(struct exynos_drm_crtc *crtc)
+static void fimd_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -912,7 +912,7 @@ static void fimd_enable(struct exynos_drm_crtc *crtc)
fimd_commit(ctx->crtc);
}
-static void fimd_disable(struct exynos_drm_crtc *crtc)
+static void fimd_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
int i;
@@ -1006,8 +1006,8 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
- .enable = fimd_enable,
- .disable = fimd_disable,
+ .atomic_enable = fimd_atomic_enable,
+ .atomic_disable = fimd_atomic_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.atomic_begin = fimd_atomic_begin,
@@ -1098,7 +1098,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
{
struct fimd_context *ctx = dev_get_drvdata(dev);
- fimd_disable(ctx->crtc);
+ fimd_atomic_disable(ctx->crtc);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 65b891cb9c50..b320b3a21ad4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -120,7 +120,7 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc,
DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr);
}
-static void vidi_enable(struct exynos_drm_crtc *crtc)
+static void vidi_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -133,7 +133,7 @@ static void vidi_enable(struct exynos_drm_crtc *crtc)
drm_crtc_vblank_on(&crtc->base);
}
-static void vidi_disable(struct exynos_drm_crtc *crtc)
+static void vidi_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -147,8 +147,8 @@ static void vidi_disable(struct exynos_drm_crtc *crtc)
}
static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
- .enable = vidi_enable,
- .disable = vidi_disable,
+ .atomic_enable = vidi_atomic_enable,
+ .atomic_disable = vidi_atomic_disable,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
.update_plane = vidi_update_plane,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 48159d5d2214..9ff921f43a93 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -946,8 +946,10 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
connector->interlace_allowed = true;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(hdata->drm_dev, connector,
- &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ ret = drm_connector_init_with_ddc(hdata->drm_dev, connector,
+ &hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdata->ddc_adpt);
if (ret) {
DRM_DEV_ERROR(hdata->dev,
"Failed to initialize connector with drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6cfdb95fef2f..38ae9c32feef 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -986,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
exynos_crtc_handle_event(crtc);
}
-static void mixer_enable(struct exynos_drm_crtc *crtc)
+static void mixer_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
@@ -1015,7 +1015,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
set_bit(MXR_BIT_POWERED, &ctx->flags);
}
-static void mixer_disable(struct exynos_drm_crtc *crtc)
+static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
int i;
@@ -1109,8 +1109,8 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
}
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
- .enable = mixer_enable,
- .disable = mixer_disable,
+ .atomic_enable = mixer_atomic_enable,
+ .atomic_disable = mixer_atomic_disable,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.atomic_begin = mixer_atomic_begin,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 82c972e9c024..9598ee3cc4d2 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -68,7 +68,7 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
struct fsl_dcu_drm_connector *fsl_connector;
fsl_connector = to_fsl_dcu_connector(connector);
- return drm_panel_get_modes(fsl_connector->panel);
+ return drm_panel_get_modes(fsl_connector->panel, connector);
}
static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index 45ad5ffedc93..adc0507545bf 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -21,9 +21,9 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_reg.h"
@@ -226,11 +226,10 @@ static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
static void psbfb_copyarea_accel(struct fb_info *info,
const struct fb_copyarea *a)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_device *dev = psbfb->base.dev;
- struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
uint32_t offset;
uint32_t stride;
uint32_t src_format;
@@ -239,6 +238,8 @@ static void psbfb_copyarea_accel(struct fb_info *info,
if (!fb)
return;
+ dev = fb->dev;
+ dev_priv = dev->dev_private;
offset = to_gtt_range(fb->obj[0])->offset;
stride = fb->pitches[0];
@@ -309,9 +310,9 @@ void psbfb_copyarea(struct fb_info *info,
*/
int psbfb_sync(struct fb_info *info)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_device *dev = psbfb->base.dev;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_device *dev = fb->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long _end = jiffies + HZ;
int busy = 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 8b784947ed3b..1ed854f498b7 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -582,8 +582,8 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct gma_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
- bool is_crt = false, is_lvds = false, is_tv = false;
- bool is_hdmi = false, is_dp = false;
+ bool is_lvds = false, is_tv = false;
+ bool is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
const struct gma_limit_t *limit;
@@ -607,10 +607,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
case INTEL_OUTPUT_HDMI:
- is_hdmi = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
@@ -979,6 +976,7 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.gamma_set = gma_crtc_gamma_set,
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
+ .page_flip = gma_crtc_page_flip,
};
const struct gma_clock_funcs cdv_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 570b59520fd1..5772b2dce0d6 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1594,7 +1594,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
- bool channel_eq = false;
int tries, cr_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
@@ -1602,7 +1601,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
/* channel equalization */
tries = 0;
cr_tries = 0;
- channel_eq = false;
DRM_DEBUG_KMS("\n");
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1648,7 +1646,6 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
if (cdv_intel_channel_eq_ok(encoder)) {
DRM_DEBUG_KMS("PT2 train is done\n");
- channel_eq = true;
break;
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 218f3bb15276..1459076d1980 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -40,8 +40,8 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
- struct psb_fbdev *fbdev = info->par;
- struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
uint32_t v;
if (!fb)
@@ -77,10 +77,10 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_device *dev = psbfb->base.dev;
- struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_device *dev = fb->dev;
+ struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
/*
* We have to poke our nose in here. The core fb code assumes
@@ -99,10 +99,10 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct psb_framebuffer *psbfb = vma->vm_private_data;
- struct drm_device *dev = psbfb->base.dev;
+ struct drm_framebuffer *fb = vma->vm_private_data;
+ struct drm_device *dev = fb->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
+ struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
int page_num;
int i;
unsigned long address;
@@ -145,23 +145,21 @@ static const struct vm_operations_struct psbfb_vm_ops = {
static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
if (vma->vm_pgoff != 0)
return -EINVAL;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
- if (!psbfb->addr_space)
- psbfb->addr_space = vma->vm_file->f_mapping;
/*
* If this is a GEM object then info->screen_base is the virtual
* kernel remapping of the object. FIXME: Review if this is
* suitable for our mmap work
*/
vma->vm_ops = &psbfb_vm_ops;
- vma->vm_private_data = (void *)psbfb;
+ vma->vm_private_data = (void *)fb;
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
@@ -209,9 +207,9 @@ static struct fb_ops psbfb_unaccel_ops = {
* 0 on success or an error code if we fail.
*/
static int psb_framebuffer_init(struct drm_device *dev,
- struct psb_framebuffer *fb,
+ struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct gtt_range *gt)
+ struct drm_gem_object *obj)
{
const struct drm_format_info *info;
int ret;
@@ -227,9 +225,9 @@ static int psb_framebuffer_init(struct drm_device *dev,
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->base.obj[0] = &gt->gem;
- ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ fb->obj[0] = obj;
+ ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
return ret;
@@ -252,21 +250,21 @@ static int psb_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *psb_framebuffer_create
(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct gtt_range *gt)
+ struct drm_gem_object *obj)
{
- struct psb_framebuffer *fb;
+ struct drm_framebuffer *fb;
int ret;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
- ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+ ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
kfree(fb);
return ERR_PTR(ret);
}
- return &fb->base;
+ return fb;
}
/**
@@ -300,14 +298,13 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
*
* Create a framebuffer to the specifications provided
*/
-static int psbfb_create(struct psb_fbdev *fbdev,
+static int psbfb_create(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct drm_device *dev = fbdev->psb_fb_helper.dev;
+ struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_mode_fb_cmd2 mode_cmd;
int size;
int ret;
@@ -372,7 +369,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
memset(dev_priv->vram_addr + backing->offset, 0, size);
- info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
+ info = drm_fb_helper_alloc_fbi(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out;
@@ -380,14 +377,13 @@ static int psbfb_create(struct psb_fbdev *fbdev,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
- ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
- if (ret)
+ fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto out;
+ }
- fb = &psbfb->base;
- psbfb->fbdev = info;
-
- fbdev->psb_fb_helper.fb = fb;
+ fb_helper->fb = fb;
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
info->fbops = &psbfb_ops;
@@ -411,15 +407,14 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- dev_dbg(dev->dev, "allocated %dx%d fb\n",
- psbfb->base.width, psbfb->base.height);
+ dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
return 0;
out:
@@ -439,7 +434,6 @@ static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
const struct drm_mode_fb_cmd2 *cmd)
{
- struct gtt_range *r;
struct drm_gem_object *obj;
/*
@@ -451,17 +445,15 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
- r = container_of(obj, struct gtt_range, gem);
- return psb_framebuffer_create(dev, cmd, r);
+ return psb_framebuffer_create(dev, cmd, obj);
}
-static int psbfb_probe(struct drm_fb_helper *helper,
+static int psbfb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct psb_fbdev *psb_fbdev =
- container_of(helper, struct psb_fbdev, psb_fb_helper);
- struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+ struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned int fb_size;
int bytespp;
bytespp = sizes->surface_bpp / 8;
@@ -471,72 +463,77 @@ static int psbfb_probe(struct drm_fb_helper *helper,
/* If the mode will not fit in 32bit then switch to 16bit to get
a console on full resolution. The X mode setting server will
allocate its own 32bit GEM framebuffer */
- if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
- dev_priv->vram_stolen_size) {
+ fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
+ sizes->surface_height;
+ fb_size = ALIGN(fb_size, PAGE_SIZE);
+
+ if (fb_size > dev_priv->vram_stolen_size) {
sizes->surface_bpp = 16;
sizes->surface_depth = 16;
}
- return psbfb_create(psb_fbdev, sizes);
+ return psbfb_create(fb_helper, sizes);
}
static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.fb_probe = psbfb_probe,
};
-static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+static int psb_fbdev_destroy(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper)
{
- struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ drm_fb_helper_unregister_fbi(fb_helper);
- drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
+ drm_fb_helper_fini(fb_helper);
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_cleanup(fb);
- drm_fb_helper_fini(&fbdev->psb_fb_helper);
- drm_framebuffer_unregister_private(&psbfb->base);
- drm_framebuffer_cleanup(&psbfb->base);
+ if (fb->obj[0])
+ drm_gem_object_put_unlocked(fb->obj[0]);
+ kfree(fb);
- if (psbfb->base.obj[0])
- drm_gem_object_put_unlocked(psbfb->base.obj[0]);
return 0;
}
int psb_fbdev_init(struct drm_device *dev)
{
- struct psb_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
struct drm_psb_private *dev_priv = dev->dev_private;
int ret;
- fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
- if (!fbdev) {
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper) {
dev_err(dev->dev, "no memory\n");
return -ENOMEM;
}
- dev_priv->fbdev = fbdev;
+ dev_priv->fb_helper = fb_helper;
- drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
- INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret)
goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ ret = drm_fb_helper_initial_config(fb_helper, 32);
if (ret)
goto fini;
return 0;
fini:
- drm_fb_helper_fini(&fbdev->psb_fb_helper);
+ drm_fb_helper_fini(fb_helper);
free:
- kfree(fbdev);
+ kfree(fb_helper);
return ret;
}
@@ -544,12 +541,12 @@ static void psb_fbdev_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- if (!dev_priv->fbdev)
+ if (!dev_priv->fb_helper)
return;
- psb_fbdev_destroy(dev, dev_priv->fbdev);
- kfree(dev_priv->fbdev);
- dev_priv->fbdev = NULL;
+ psb_fbdev_destroy(dev, dev_priv->fb_helper);
+ kfree(dev_priv->fb_helper);
+ dev_priv->fb_helper = NULL;
}
static const struct drm_mode_config_funcs psb_mode_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index ae8a02639fd9..2fbba4b48841 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -9,23 +9,8 @@
#ifndef _FRAMEBUFFER_H_
#define _FRAMEBUFFER_H_
-#include <drm/drm_fb_helper.h>
-
#include "psb_drv.h"
-struct psb_framebuffer {
- struct drm_framebuffer base;
- struct address_space *addr_space;
- struct fb_info *fbdev;
-};
-
-struct psb_fbdev {
- struct drm_fb_helper psb_fb_helper; /* must be first */
- struct psb_framebuffer pfb;
-};
-
-#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
-
extern int gma_connector_clones(struct drm_device *dev, int type_mask);
#endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index e20ccb5d10fd..17f136985d21 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -255,6 +255,8 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+ drm_crtc_vblank_on(crtc);
break;
case DRM_MODE_DPMS_OFF:
if (!gma_crtc->active)
@@ -501,6 +503,52 @@ void gma_crtc_destroy(struct drm_crtc *crtc)
kfree(gma_crtc);
}
+int gma_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct drm_framebuffer *current_fb = crtc->primary->fb;
+ struct drm_framebuffer *old_fb = crtc->primary->old_fb;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+ int ret;
+
+ if (!crtc_funcs->mode_set_base)
+ return -EINVAL;
+
+ /* Using mode_set_base requires the new fb to be set already. */
+ crtc->primary->fb = fb;
+
+ if (event) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ gma_crtc->page_flip_event = event;
+
+ /* Call this locked if we want an event at vblank interrupt. */
+ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ if (ret) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ }
+
+ /* Restore previous fb in case of failure. */
+ if (ret)
+ crtc->primary->fb = current_fb;
+
+ return ret;
+}
+
int gma_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index fdbd7ecaa59c..7bd6c1ee8b21 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -11,6 +11,7 @@
#define _GMA_DISPLAY_H_
#include <linux/pm_runtime.h>
+#include <drm/drm_vblank.h>
struct drm_encoder;
struct drm_mode_set;
@@ -71,6 +72,11 @@ extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
extern void gma_crtc_disable(struct drm_crtc *crtc);
extern void gma_crtc_destroy(struct drm_crtc *crtc);
+extern int gma_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ struct drm_modeset_acquire_ctx *ctx);
extern int gma_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index afaf4bea21cf..9278bcfad1bf 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -503,7 +503,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
* Map the GTT and the stolen memory area
*/
if (!resume)
- dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ dev_priv->gtt_map = ioremap(pg->gtt_phys_start,
gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
dev_err(dev->dev, "Failure to map gtt.\n");
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index b8bfb96008b8..4fff110c4921 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -113,27 +113,6 @@ static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
return (pfit_control >> 29) & 0x3;
}
-static struct drm_device globle_dev;
-
-void mdfld__intel_plane_set_alpha(int enable)
-{
- struct drm_device *dev = &globle_dev;
- int dspcntr_reg = DSPACNTR;
- u32 dspcntr;
-
- dspcntr = REG_READ(dspcntr_reg);
-
- if (enable) {
- dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
- dspcntr |= DISPPLANE_32BPP;
- } else {
- dspcntr &= ~DISPPLANE_32BPP;
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- }
-
- REG_WRITE(dspcntr_reg, dspcntr);
-}
-
static int check_fb(struct drm_framebuffer *fb)
{
if (!fb)
@@ -164,8 +143,6 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
u32 dspcntr;
int ret;
- memcpy(&globle_dev, dev, sizeof(struct drm_device));
-
dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
/* no fb bound */
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f4c520893ceb..f4370232767d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -159,9 +159,7 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
static unsigned int htotal_calculate(struct drm_display_mode *mode)
{
- u32 htotal, new_crtc_htotal;
-
- htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+ u32 new_crtc_htotal;
/*
* 1024 x 768 new_crtc_htotal = 0x1024;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 7390403ea1b7..582e09597500 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -117,6 +117,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
if (!connector) {
DRM_ERROR("Couldn't find connector when setting mode");
+ gma_power_end(dev);
return;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 7005f8f69c68..6956c8e7501c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -19,10 +19,10 @@
#include <drm/drm.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/drm_vblank.h>
@@ -256,7 +256,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_AUX_RESOURCE);
resource_len = pci_resource_len(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
- dev_priv->aux_reg = ioremap_nocache(resource_start,
+ dev_priv->aux_reg = ioremap(resource_start,
resource_len);
if (!dev_priv->aux_reg)
goto out_err;
@@ -426,14 +426,48 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_pci_dev(pdev, ent, &driver);
-}
+ struct drm_device *dev;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = psb_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_psb_driver_unload;
+
+ return 0;
+
+err_psb_driver_unload:
+ psb_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
static void psb_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
+
+ drm_dev_unregister(dev);
+ psb_driver_unload(dev);
+ drm_dev_put(dev);
}
static const struct dev_pm_ops psb_pm_ops = {
@@ -466,8 +500,6 @@ static const struct file_operations psb_gem_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
- .load = psb_driver_load,
- .unload = psb_driver_unload,
.lastclose = drm_fb_helper_lastclose,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 9b3c03f4a38d..3d4ef3071d45 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -229,6 +229,8 @@ enum {
#define KSEL_BYPASS_25 6
#define KSEL_BYPASS_83_100 7
+struct drm_fb_helper;
+
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -432,7 +434,7 @@ struct drm_psb_private {
struct pci_dev *lpc_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
const struct psb_offset *regmap;
-
+
struct child_device_config *child_dev;
int child_dev_num;
@@ -540,7 +542,7 @@ struct drm_psb_private {
/* Oaktrail HDMI state */
struct oaktrail_hdmi_dev *hdmi_priv;
-
+
/* Register state */
struct psb_save_area regs;
@@ -572,7 +574,7 @@ struct drm_psb_private {
uint32_t blc_adj1;
uint32_t blc_adj2;
- void *fbdev;
+ struct drm_fb_helper *fb_helper;
/* 2D acceleration */
spinlock_t lock_2d;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 4256410535f0..fed3b563e62e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -432,6 +432,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
.gamma_set = gma_crtc_gamma_set,
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
+ .page_flip = gma_crtc_page_flip,
};
const struct gma_clock_funcs psb_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index cdf10333d1c2..16c6136f778b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -12,6 +12,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include <linux/gpio.h>
#include "gma_display.h"
@@ -182,6 +183,8 @@ struct gma_crtc {
struct psb_intel_crtc_state *crtc_state;
const struct gma_clock_funcs *clock_funcs;
+
+ struct drm_pending_vblank_event *page_flip_event;
};
#define to_gma_crtc(x) \
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index e6265fb85626..91f90016dba9 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -165,11 +165,23 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
__func__, pipe, PSB_RVDC32(pipe_stat_reg));
- if (pipe_stat_val & PIPE_VBLANK_STATUS)
- drm_handle_vblank(dev, pipe);
+ if (pipe_stat_val & PIPE_VBLANK_STATUS ||
+ (IS_MFLD(dev) && pipe_stat_val & PIPE_TE_STATUS)) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ unsigned long flags;
- if (pipe_stat_val & PIPE_TE_STATUS)
drm_handle_vblank(dev, pipe);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ drm_crtc_send_vblank_event(crtc,
+ gma_crtc->page_flip_event);
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
}
/*
@@ -194,7 +206,6 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 val, addr;
- int error = false;
if (stat_1 & _PSB_CE_TWOD_COMPLETE)
val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
@@ -229,7 +240,6 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
DRM_ERROR("\tMMU failing address is 0x%08x.\n",
(unsigned int)addr);
- error = true;
}
}
@@ -460,12 +470,11 @@ void psb_irq_turn_off_dpst(struct drm_device *dev)
{
struct drm_psb_private *dev_priv =
(struct drm_psb_private *) dev->dev_private;
- u32 hist_reg;
u32 pwm_reg;
if (gma_power_begin(dev, false)) {
PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
- hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ PSB_RVDC32(HISTOGRAM_INT_CONTROL);
psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 7de3ce637c7f..9e8224456ea2 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -25,7 +25,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_data/tc35876x.h>
+#include <linux/gpio/consumer.h>
#include <asm/intel_scu_ipc.h>
@@ -36,6 +36,11 @@
static struct i2c_client *tc35876x_client;
static struct i2c_client *cmi_lcd_i2c_client;
+/* Panel GPIOs */
+static struct gpio_desc *bridge_reset;
+static struct gpio_desc *bridge_bl_enable;
+static struct gpio_desc *backlight_voltage;
+
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
@@ -316,27 +321,23 @@ static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
{
- struct tc35876x_platform_data *pdata;
-
if (WARN(!tc35876x_client, "%s called before probe", __func__))
return;
dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
- pdata = dev_get_platdata(&tc35876x_client->dev);
-
- if (pdata->gpio_bridge_reset == -1)
+ if (!bridge_reset)
return;
if (state) {
- gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+ gpiod_set_value_cansleep(bridge_reset, 0);
mdelay(10);
} else {
/* Pull MIPI Bridge reset pin to Low */
- gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+ gpiod_set_value_cansleep(bridge_reset, 0);
mdelay(20);
/* Pull MIPI Bridge reset pin to High */
- gpio_set_value_cansleep(pdata->gpio_bridge_reset, 1);
+ gpiod_set_value_cansleep(bridge_reset, 1);
mdelay(40);
}
}
@@ -510,25 +511,20 @@ void tc35876x_brightness_control(struct drm_device *dev, int level)
void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
{
- struct tc35876x_platform_data *pdata;
-
if (WARN(!tc35876x_client, "%s called before probe", __func__))
return;
dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
- pdata = dev_get_platdata(&tc35876x_client->dev);
-
- if (pdata->gpio_panel_bl_en != -1)
- gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 0);
+ if (bridge_bl_enable)
+ gpiod_set_value_cansleep(bridge_bl_enable, 0);
- if (pdata->gpio_panel_vadd != -1)
- gpio_set_value_cansleep(pdata->gpio_panel_vadd, 0);
+ if (backlight_voltage)
+ gpiod_set_value_cansleep(backlight_voltage, 0);
}
void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
{
- struct tc35876x_platform_data *pdata;
struct drm_psb_private *dev_priv = dev->dev_private;
if (WARN(!tc35876x_client, "%s called before probe", __func__))
@@ -536,10 +532,8 @@ void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
- pdata = dev_get_platdata(&tc35876x_client->dev);
-
- if (pdata->gpio_panel_vadd != -1) {
- gpio_set_value_cansleep(pdata->gpio_panel_vadd, 1);
+ if (backlight_voltage) {
+ gpiod_set_value_cansleep(backlight_voltage, 1);
msleep(260);
}
@@ -571,8 +565,8 @@ void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
"i2c write failed (%d)\n", ret);
}
- if (pdata->gpio_panel_bl_en != -1)
- gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 1);
+ if (bridge_bl_enable)
+ gpiod_set_value_cansleep(bridge_bl_enable, 1);
tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
}
@@ -635,8 +629,6 @@ static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
static int tc35876x_bridge_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct tc35876x_platform_data *pdata;
-
dev_info(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -645,26 +637,23 @@ static int tc35876x_bridge_probe(struct i2c_client *client,
return -ENODEV;
}
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- dev_err(&client->dev, "%s: no platform data\n", __func__);
- return -ENODEV;
- }
+ bridge_reset = devm_gpiod_get_optional(&client->dev, "bridge-reset", GPIOD_OUT_LOW);
+ if (IS_ERR(bridge_reset))
+ return PTR_ERR(bridge_reset);
+ if (bridge_reset)
+ gpiod_set_consumer_name(bridge_reset, "tc35876x bridge reset");
- if (pdata->gpio_bridge_reset != -1) {
- gpio_request(pdata->gpio_bridge_reset, "tc35876x bridge reset");
- gpio_direction_output(pdata->gpio_bridge_reset, 0);
- }
-
- if (pdata->gpio_panel_bl_en != -1) {
- gpio_request(pdata->gpio_panel_bl_en, "tc35876x panel bl en");
- gpio_direction_output(pdata->gpio_panel_bl_en, 0);
- }
+ bridge_bl_enable = devm_gpiod_get_optional(&client->dev, "bl-en", GPIOD_OUT_LOW);
+ if (IS_ERR(bridge_bl_enable))
+ return PTR_ERR(bridge_bl_enable);
+ if (bridge_bl_enable)
+ gpiod_set_consumer_name(bridge_bl_enable, "tc35876x panel bl en");
- if (pdata->gpio_panel_vadd != -1) {
- gpio_request(pdata->gpio_panel_vadd, "tc35876x panel vadd");
- gpio_direction_output(pdata->gpio_panel_vadd, 0);
- }
+ backlight_voltage = devm_gpiod_get_optional(&client->dev, "vadd", GPIOD_OUT_LOW);
+ if (IS_ERR(backlight_voltage))
+ return PTR_ERR(backlight_voltage);
+ if (backlight_voltage)
+ gpiod_set_consumer_name(backlight_voltage, "tc35876x panel vadd");
tc35876x_client = client;
@@ -673,19 +662,8 @@ static int tc35876x_bridge_probe(struct i2c_client *client,
static int tc35876x_bridge_remove(struct i2c_client *client)
{
- struct tc35876x_platform_data *pdata = dev_get_platdata(&client->dev);
-
dev_dbg(&client->dev, "%s\n", __func__);
- if (pdata->gpio_bridge_reset != -1)
- gpio_free(pdata->gpio_bridge_reset);
-
- if (pdata->gpio_panel_bl_en != -1)
- gpio_free(pdata->gpio_panel_bl_en);
-
- if (pdata->gpio_panel_vadd != -1)
- gpio_free(pdata->gpio_panel_vadd);
-
tc35876x_client = NULL;
return 0;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 0c2d4296bccd..f99132715597 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 6527a97f68a3..7fa7d4933f60 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -99,14 +99,12 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
- struct hibmc_framebuffer *hibmc_fb;
struct drm_gem_vram_object *gbo;
if (!state->fb)
return;
- hibmc_fb = to_hibmc_framebuffer(state->fb);
- gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
+ gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
gpu_addr = drm_gem_vram_offset(gbo);
if (WARN_ON_ONCE(gpu_addr < 0))
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 2fd4ca91a62d..4a8a4cfb4b75 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_irq.h>
#include <drm/drm_print.h>
@@ -54,6 +55,7 @@ static struct drm_driver hibmc_driver = {
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
+ .debugfs_init = drm_vram_mm_debugfs_init,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
.gem_prime_mmap = drm_gem_prime_mmap,
@@ -211,7 +213,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
- priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+ priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
if (!priv->mmio) {
DRM_ERROR("Cannot map mmio region\n");
return -ENOMEM;
@@ -247,8 +249,6 @@ static int hibmc_unload(struct drm_device *dev)
{
struct hibmc_drm_private *priv = dev->dev_private;
- hibmc_fbdev_fini(priv);
-
drm_atomic_helper_shutdown(dev);
if (dev->irq_enabled)
@@ -307,7 +307,7 @@ static int hibmc_load(struct drm_device *dev)
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
- ret = hibmc_fbdev_init(priv);
+ ret = drm_fbdev_generic_setup(dev, 16);
if (ret) {
DRM_ERROR("failed to initialize fbdev: %d\n", ret);
goto err;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index e58ecd7edcf8..50a0c1f9d211 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -18,18 +18,6 @@
#include <drm/drm_framebuffer.h>
struct drm_device;
-struct drm_gem_object;
-
-struct hibmc_framebuffer {
- struct drm_framebuffer fb;
- struct drm_gem_object *obj;
-};
-
-struct hibmc_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct hibmc_framebuffer *fb;
- int size;
-};
struct hibmc_drm_private {
/* hw */
@@ -42,13 +30,8 @@ struct hibmc_drm_private {
/* drm */
struct drm_device *dev;
bool mode_config_initialized;
-
- /* fbdev */
- struct hibmc_fbdev *fbdev;
};
-#define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb)
-
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
unsigned int power_mode);
void hibmc_set_current_gate(struct hibmc_drm_private *priv,
@@ -56,15 +39,6 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
-int hibmc_fbdev_init(struct hibmc_drm_private *priv);
-void hibmc_fbdev_fini(struct hibmc_drm_private *priv);
-
-int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
- struct drm_gem_object **obj);
-struct hibmc_framebuffer *
-hibmc_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
int hibmc_mm_init(struct hibmc_drm_private *hibmc);
void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
deleted file mode 100644
index b4c1cea051e8..000000000000
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Hisilicon Hibmc SoC drm driver
- *
- * Based on the bochs drm driver.
- *
- * Copyright (c) 2016 Huawei Limited.
- *
- * Author:
- * Rongrong Zou <zourongrong@huawei.com>
- * Rongrong Zou <zourongrong@gmail.com>
- * Jianhua Li <lijianhua@huawei.com>
- */
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include "hibmc_drm_drv.h"
-
-static int hibmcfb_create_object(
- struct hibmc_drm_private *priv,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_gem_object *gobj;
- struct drm_device *dev = priv->dev;
- u32 size;
- int ret = 0;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = hibmc_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static struct fb_ops hibmc_drm_fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct hibmc_fbdev *hi_fbdev =
- container_of(helper, struct hibmc_fbdev, helper);
- struct hibmc_drm_private *priv = helper->dev->dev_private;
- struct fb_info *info;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_gem_object *gobj = NULL;
- int ret = 0;
- size_t size;
- unsigned int bytes_per_pixel;
- struct drm_gem_vram_object *gbo = NULL;
- void *base;
-
- DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
-
- bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height);
-
- ret = hibmcfb_create_object(priv, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object: %d\n", ret);
- return -ENOMEM;
- }
-
- gbo = drm_gem_vram_of_gem(gobj);
-
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- DRM_ERROR("failed to pin fbcon: %d\n", ret);
- goto out_unref_gem;
- }
-
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- DRM_ERROR("failed to kmap fbcon: %d\n", ret);
- goto out_unpin_bo;
- }
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- DRM_ERROR("failed to allocate fbi: %d\n", ret);
- goto out_release_fbi;
- }
-
- hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
- if (IS_ERR(hi_fbdev->fb)) {
- ret = PTR_ERR(hi_fbdev->fb);
- hi_fbdev->fb = NULL;
- DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
- goto out_release_fbi;
- }
-
- priv->fbdev->size = size;
- hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
-
- info->fbops = &hibmc_drm_fb_ops;
-
- drm_fb_helper_fill_info(info, &priv->fbdev->helper, sizes);
-
- info->screen_base = base;
- info->screen_size = size;
-
- info->fix.smem_start = gbo->bo.mem.bus.offset + gbo->bo.mem.bus.base;
- info->fix.smem_len = size;
- return 0;
-
-out_release_fbi:
- drm_gem_vram_kunmap(gbo);
-out_unpin_bo:
- drm_gem_vram_unpin(gbo);
-out_unref_gem:
- drm_gem_object_put_unlocked(gobj);
-
- return ret;
-}
-
-static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
-{
- struct hibmc_framebuffer *gfb = fbdev->fb;
- struct drm_fb_helper *fbh = &fbdev->helper;
-
- drm_fb_helper_unregister_fbi(fbh);
-
- drm_fb_helper_fini(fbh);
-
- if (gfb)
- drm_framebuffer_put(&gfb->fb);
-}
-
-static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
- .fb_probe = hibmc_drm_fb_create,
-};
-
-int hibmc_fbdev_init(struct hibmc_drm_private *priv)
-{
- int ret;
- struct fb_var_screeninfo *var;
- struct fb_fix_screeninfo *fix;
- struct hibmc_fbdev *hifbdev;
-
- hifbdev = devm_kzalloc(priv->dev->dev, sizeof(*hifbdev), GFP_KERNEL);
- if (!hifbdev) {
- DRM_ERROR("failed to allocate hibmc_fbdev\n");
- return -ENOMEM;
- }
-
- priv->fbdev = hifbdev;
- drm_fb_helper_prepare(priv->dev, &hifbdev->helper,
- &hibmc_fbdev_helper_funcs);
-
- /* Now just one crtc and one channel */
- ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1);
- if (ret) {
- DRM_ERROR("failed to initialize fb helper: %d\n", ret);
- return ret;
- }
-
- ret = drm_fb_helper_single_add_all_connectors(&hifbdev->helper);
- if (ret) {
- DRM_ERROR("failed to add all connectors: %d\n", ret);
- goto fini;
- }
-
- ret = drm_fb_helper_initial_config(&hifbdev->helper, 16);
- if (ret) {
- DRM_ERROR("failed to setup initial conn config: %d\n", ret);
- goto fini;
- }
-
- var = &hifbdev->helper.fbdev->var;
- fix = &hifbdev->helper.fbdev->fix;
-
- DRM_DEBUG_DRIVER("Member of info->var is :\n"
- "xres=%d\n"
- "yres=%d\n"
- "xres_virtual=%d\n"
- "yres_virtual=%d\n"
- "xoffset=%d\n"
- "yoffset=%d\n"
- "bits_per_pixel=%d\n"
- "...\n", var->xres, var->yres, var->xres_virtual,
- var->yres_virtual, var->xoffset, var->yoffset,
- var->bits_per_pixel);
- DRM_DEBUG_DRIVER("Member of info->fix is :\n"
- "smem_start=%lx\n"
- "smem_len=%d\n"
- "type=%d\n"
- "type_aux=%d\n"
- "visual=%d\n"
- "xpanstep=%d\n"
- "ypanstep=%d\n"
- "ywrapstep=%d\n"
- "line_length=%d\n"
- "accel=%d\n"
- "capabilities=%d\n"
- "...\n", fix->smem_start, fix->smem_len, fix->type,
- fix->type_aux, fix->visual, fix->xpanstep,
- fix->ypanstep, fix->ywrapstep, fix->line_length,
- fix->accel, fix->capabilities);
-
- return 0;
-
-fini:
- drm_fb_helper_fini(&hifbdev->helper);
- return ret;
-}
-
-void hibmc_fbdev_fini(struct hibmc_drm_private *priv)
-{
- if (!priv->fbdev)
- return;
-
- hibmc_fbdev_destroy(priv->fbdev);
- priv->fbdev = NULL;
-}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 21b684eab5c9..50b988fdd5cc 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_print.h>
@@ -46,125 +47,14 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
drm_vram_helper_release_mm(hibmc->dev);
}
-int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object: %d\n", ret);
- return ret;
- }
- *obj = &gbo->bo.base;
- return 0;
-}
-
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct drm_gem_object *gobj;
- u32 handle;
- int ret;
-
- args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16);
- args->size = args->pitch * args->height;
-
- ret = hibmc_gem_create(dev, args->size, false,
- &gobj);
- if (ret) {
- DRM_ERROR("failed to create GEM object: %d\n", ret);
- return ret;
- }
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret) {
- DRM_ERROR("failed to unreference GEM object: %d\n", ret);
- return ret;
- }
-
- args->handle = handle;
- return 0;
-}
-
-static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
-
- drm_gem_object_put_unlocked(hibmc_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(hibmc_fb);
-}
-
-static const struct drm_framebuffer_funcs hibmc_fb_funcs = {
- .destroy = hibmc_user_framebuffer_destroy,
-};
-
-struct hibmc_framebuffer *
-hibmc_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- struct hibmc_framebuffer *hibmc_fb;
- int ret;
-
- hibmc_fb = kzalloc(sizeof(*hibmc_fb), GFP_KERNEL);
- if (!hibmc_fb) {
- DRM_ERROR("failed to allocate hibmc_fb\n");
- return ERR_PTR(-ENOMEM);
- }
-
- drm_helper_mode_fill_fb_struct(dev, &hibmc_fb->fb, mode_cmd);
- hibmc_fb->obj = obj;
- ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs);
- if (ret) {
- DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
- kfree(hibmc_fb);
- return ERR_PTR(ret);
- }
-
- return hibmc_fb;
-}
-
-static struct drm_framebuffer *
-hibmc_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct hibmc_framebuffer *hibmc_fb;
-
- DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
- mode_cmd->width, mode_cmd->height,
- (mode_cmd->pixel_format) & 0xff,
- (mode_cmd->pixel_format >> 8) & 0xff,
- (mode_cmd->pixel_format >> 16) & 0xff,
- (mode_cmd->pixel_format >> 24) & 0xff);
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(hibmc_fb)) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR((long)hibmc_fb);
- }
- return &hibmc_fb->fb;
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args);
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
- .fb_create = hibmc_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create,
};
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index e66c38332df4..b88c3d5f92b4 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/mman.h>
+#include <linux/pci.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
@@ -39,7 +40,6 @@
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include <drm/i810_drm.h>
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 5dd26a06ee0e..0e53a066d4db 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -31,11 +31,12 @@
*/
#include "i810_drv.h"
+
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/i810_drm.h>
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
new file mode 100644
index 000000000000..d9a77f3b59b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -0,0 +1 @@
+*.hdrtest
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 438040ff0179..1cb28c20807c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -27,6 +27,7 @@ config DRM_I915_DEBUG
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
+ select DRM_EXPORT_FOR_TESTS if m
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
@@ -149,6 +150,7 @@ config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
depends on DRM_I915
default n
+ select DRM_EXPORT_FOR_TESTS if m
select FAULT_INJECTION
select PRIME_NUMBERS
help
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 90dcf09f52cc..b8c5f8934dbd 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -31,9 +31,6 @@ CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
-
subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
@@ -73,8 +70,12 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
-obj-y += gt/
gt-y += \
+ gt/debugfs_engines.o \
+ gt/debugfs_gt.o \
+ gt/debugfs_gt_pm.o \
+ gt/gen6_ppgtt.o \
+ gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
@@ -82,14 +83,17 @@ gt-y += \
gt/intel_engine_pm.o \
gt/intel_engine_pool.o \
gt/intel_engine_user.o \
+ gt/intel_ggtt.o \
gt/intel_gt.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
+ gt/intel_gtt.o \
gt/intel_llc.o \
gt/intel_lrc.o \
gt/intel_mocs.o \
+ gt/intel_ppgtt.o \
gt/intel_rc6.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
@@ -108,7 +112,6 @@ gt-y += \
i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
-obj-y += gem/
gem-y += \
gem/i915_gem_busy.o \
gem/i915_gem_clflush.o \
@@ -154,7 +157,6 @@ i915-y += \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
-obj-y += gt/uc/
i915-y += gt/uc/intel_uc.o \
gt/uc/intel_uc_fw.o \
gt/uc/intel_guc.o \
@@ -167,7 +169,6 @@ i915-y += gt/uc/intel_uc.o \
gt/uc/intel_huc_fw.o
# modesetting core code
-obj-y += display/
i915-y += \
display/intel_atomic.o \
display/intel_atomic_plane.o \
@@ -232,7 +233,6 @@ i915-y += \
display/vlv_dsi_pll.o
# perf code
-obj-y += oa/
i915-y += \
oa/i915_oa_hsw.o \
oa/i915_oa_bdw.o \
@@ -257,8 +257,10 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
gem/selftests/igt_gem_utils.o \
selftests/i915_random.o \
selftests/i915_selftest.o \
+ selftests/igt_atomic.o \
selftests/igt_flush_test.o \
selftests/igt_live_test.o \
+ selftests/igt_mmap.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
@@ -272,3 +274,27 @@ endif
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
+
+# header test
+
+# exclude some broken headers from the test coverage
+no-header-test := \
+ display/intel_vbt_defs.h \
+ gvt/execlist.h \
+ gvt/fb_decoder.h \
+ gvt/gtt.h \
+ gvt/gvt.h \
+ gvt/interrupt.h \
+ gvt/mmio_context.h \
+ gvt/mpt.h \
+ gvt/scheduler.h
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
+ $(shell cd $(srctree)/$(src) && find * -name '*.h')))
+
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
deleted file mode 100644
index 173c305d7866..000000000000
--- a/drivers/gpu/drm/i915/display/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
-header-test- := intel_vbt_defs.h
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 325df29b0447..f8e882101396 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -34,6 +34,7 @@
#include "intel_ddi.h"
#include "intel_dsi.h"
#include "intel_panel.h"
+#include "intel_vdsc.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
@@ -76,7 +77,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port)
static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
enum port port;
enum transcoder dsi_trans;
@@ -201,7 +202,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
u32 tmp;
int lane;
@@ -266,7 +267,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 dss_ctl1;
dss_ctl1 = I915_READ(DSS_CTL1);
@@ -276,7 +277,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
u32 dss_ctl2;
u16 hactive = adjusted_mode->crtc_hdisplay;
u16 dl_buffer_depth;
@@ -301,18 +302,31 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
I915_WRITE(DSS_CTL1, dss_ctl1);
}
-static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
+/* aka DSI 8X clock */
+static int afe_clk(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp;
+
+ if (crtc_state->dsc.compression_enable)
+ bpp = crtc_state->dsc.compressed_bpp;
+ else
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ return DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, intel_dsi->lane_count);
+}
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- u32 afe_clk_khz; /* 8X Clock */
+ int afe_clk_khz;
u32 esc_clk_div_m;
- afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp,
- intel_dsi->lane_count);
-
+ afe_clk_khz = afe_clk(encoder, crtc_state);
esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -346,7 +360,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -362,7 +376,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
for_each_dsi_phy(phy, intel_dsi->phys)
@@ -373,7 +387,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
u32 tmp;
int lane;
@@ -422,7 +436,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
@@ -474,7 +488,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
@@ -490,10 +504,12 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+static void
+gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
enum phy phy;
@@ -531,7 +547,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
* leave all fields at HW default values.
*/
if (IS_GEN(dev_priv, 11)) {
- if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+ if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
@@ -559,7 +575,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
@@ -575,7 +591,7 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
@@ -592,7 +608,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy;
u32 val;
@@ -624,8 +640,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
enum port port;
@@ -641,7 +657,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
tmp |= EOTP_DISABLED;
/* enable link calibration if freq > 1.5Gbps */
- if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+ if (afe_clk(encoder, pipe_config) >= 1500 * 1000) {
tmp &= ~LINK_CALIBRATION_MASK;
tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
}
@@ -667,22 +683,26 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* select pixel format */
tmp &= ~PIX_FMT_MASK;
- switch (intel_dsi->pixel_format) {
- default:
- MISSING_CASE(intel_dsi->pixel_format);
- /* fallthrough */
- case MIPI_DSI_FMT_RGB565:
- tmp |= PIX_FMT_RGB565;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- tmp |= PIX_FMT_RGB666_PACKED;
- break;
- case MIPI_DSI_FMT_RGB666:
- tmp |= PIX_FMT_RGB666_LOOSE;
- break;
- case MIPI_DSI_FMT_RGB888:
- tmp |= PIX_FMT_RGB888;
- break;
+ if (pipe_config->dsc.compression_enable) {
+ tmp |= PIX_FMT_COMPRESSED;
+ } else {
+ switch (intel_dsi->pixel_format) {
+ default:
+ MISSING_CASE(intel_dsi->pixel_format);
+ /* fallthrough */
+ case MIPI_DSI_FMT_RGB565:
+ tmp |= PIX_FMT_RGB565;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ tmp |= PIX_FMT_RGB666_PACKED;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ tmp |= PIX_FMT_RGB666_LOOSE;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ tmp |= PIX_FMT_RGB888;
+ break;
+ }
}
if (INTEL_GEN(dev_priv) >= 12) {
@@ -745,6 +765,9 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
case PIPE_C:
tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
break;
+ case PIPE_D:
+ tmp |= TRANS_DDI_EDP_INPUT_D_ONOFF;
+ break;
}
/* enable DDI buffer */
@@ -763,12 +786,12 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
static void
gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
enum port port;
enum transcoder dsi_trans;
/* horizontal timings */
@@ -776,11 +799,25 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
u16 hback_porch;
/* vertical timings */
u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+ int mul = 1, div = 1;
+
+ /*
+ * Adjust horizontal timings (htotal, hsync_start, hsync_end) to account
+ * for slower link speed if DSC is enabled.
+ *
+ * The compression frequency ratio is the ratio between compressed and
+ * non-compressed link speeds, and simplifies down to the ratio between
+ * compressed and non-compressed bpp.
+ */
+ if (crtc_state->dsc.compression_enable) {
+ mul = crtc_state->dsc.compressed_bpp;
+ div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ }
hactive = adjusted_mode->crtc_hdisplay;
- htotal = adjusted_mode->crtc_htotal;
- hsync_start = adjusted_mode->crtc_hsync_start;
- hsync_end = adjusted_mode->crtc_hsync_end;
+ htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+ hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
hsync_size = hsync_end - hsync_start;
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
@@ -886,7 +923,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -904,10 +941,11 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
@@ -919,7 +957,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
* TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
* ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
*/
- divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+ divisor = intel_dsi_tlpx_ns(intel_dsi) * afe_clk(encoder, crtc_state) * 1000;
mul = 8 * 1000000;
hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
divisor);
@@ -955,7 +993,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -972,13 +1010,13 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_enable_ddi_buffer(encoder);
/* setup D-PHY timings */
- gen11_dsi_setup_dphy_timings(encoder);
+ gen11_dsi_setup_dphy_timings(encoder, crtc_state);
/* step 4h: setup DSI protocol timeouts */
- gen11_dsi_setup_timeouts(encoder);
+ gen11_dsi_setup_timeouts(encoder, crtc_state);
/* Step (4h, 4i, 4j, 4k): Configure transcoder */
- gen11_dsi_configure_transcoder(encoder, pipe_config);
+ gen11_dsi_configure_transcoder(encoder, crtc_state);
/* Step 4l: Gate DDI clocks */
if (IS_GEN(dev_priv, 11))
@@ -988,7 +1026,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
enum port port;
enum transcoder dsi_trans;
@@ -1025,21 +1063,21 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
/* step3: enable DSI PLL */
- gen11_dsi_program_esc_clk_div(encoder);
+ gen11_dsi_program_esc_clk_div(encoder, crtc_state);
}
static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
/* step3b */
gen11_dsi_map_pll(encoder, pipe_config);
@@ -1050,6 +1088,8 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
/* step5: program and powerup panel */
gen11_dsi_powerup_panel(encoder);
+ intel_dsc_enable(encoder, pipe_config);
+
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
@@ -1064,7 +1104,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -1086,7 +1126,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
@@ -1099,7 +1139,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -1140,7 +1180,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
@@ -1162,7 +1202,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -1189,7 +1229,7 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
@@ -1211,12 +1251,42 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
gen11_dsi_disable_io_power(encoder);
}
+static void gen11_dsi_post_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_dsc_disable(old_crtc_state);
+
+ skl_scaler_disable(old_crtc_state);
+}
+
+static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* FIXME: DSC? */
+ return intel_dsi_mode_valid(connector, mode);
+}
+
static void gen11_dsi_get_timings(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
+
+ if (pipe_config->dsc.compressed_bpp) {
+ int div = pipe_config->dsc.compressed_bpp;
+ int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ adjusted_mode->crtc_htotal =
+ DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ adjusted_mode->crtc_hsync_start =
+ DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+ adjusted_mode->crtc_hsync_end =
+ DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
+ }
if (intel_dsi->dual_link) {
adjusted_mode->crtc_hdisplay *= 2;
@@ -1242,22 +1312,66 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ intel_dsc_get_config(encoder, pipe_config);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
pipe_config->port_clock =
cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
- pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
- pipe_config->base.adjusted_mode.crtc_clock *= 2;
+ pipe_config->hw.adjusted_mode.crtc_clock *= 2;
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
}
+static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int dsc_max_bpc = INTEL_GEN(dev_priv) >= 12 ? 12 : 10;
+ bool use_dsc;
+ int ret;
+
+ use_dsc = intel_bios_get_dsc_params(encoder, crtc_state, dsc_max_bpc);
+ if (!use_dsc)
+ return 0;
+
+ if (crtc_state->pipe_bpp < 8 * 3)
+ return -EINVAL;
+
+ /* FIXME: split only when necessary */
+ if (crtc_state->dsc.slice_count > 1)
+ crtc_state->dsc.dsc_split = true;
+
+ vdsc_cfg->convert_rgb = true;
+
+ ret = intel_dsc_compute_params(encoder, crtc_state);
+ if (ret)
+ return ret;
+
+ /* DSI specific sanity checks on the common code */
+ WARN_ON(vdsc_cfg->vbr_enable);
+ WARN_ON(vdsc_cfg->simple_422);
+ WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+ WARN_ON(vdsc_cfg->slice_height < 8);
+ WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+
+ ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
+ if (ret)
+ return ret;
+
+ crtc_state->dsc.compression_enable = true;
+
+ return 0;
+}
+
static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1265,11 +1379,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode =
intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -1283,8 +1397,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+ pipe_config->pipe_bpp = 24;
+ else
+ pipe_config->pipe_bpp = 18;
+
pipe_config->clock_set = true;
- pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+ if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
+ DRM_DEBUG_KMS("Attempting to use DSC failed\n");
+
+ pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
return 0;
}
@@ -1292,15 +1415,21 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- get_dsi_io_power_domains(to_i915(encoder->base.dev),
- enc_to_intel_dsi(&encoder->base));
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ get_dsi_io_power_domains(i915,
+ enc_to_intel_dsi(encoder));
+
+ if (crtc_state->dsc.compression_enable)
+ intel_display_power_get(i915,
+ intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum transcoder dsi_trans;
intel_wakeref_t wakeref;
enum port port;
@@ -1325,6 +1454,9 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
case TRANS_DDI_EDP_INPUT_C_ONOFF:
*pipe = PIPE_C;
break;
+ case TRANS_DDI_EDP_INPUT_D_ONOFF:
+ *pipe = PIPE_D;
+ break;
default:
DRM_ERROR("Invalid PIPE input\n");
goto out;
@@ -1360,7 +1492,7 @@ static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
- .mode_valid = intel_dsi_mode_valid,
+ .mode_valid = gen11_dsi_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};
@@ -1577,6 +1709,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
encoder->pre_enable = gen11_dsi_pre_enable;
encoder->disable = gen11_dsi_disable;
+ encoder->post_disable = gen11_dsi_post_disable;
encoder->port = port;
encoder->get_config = gen11_dsi_get_config;
encoder->update_pipe = intel_panel_update_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index c2875b10adf9..c362eecdd414 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -37,6 +37,7 @@
#include "intel_atomic.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_psr.h"
#include "intel_sprite.h"
/**
@@ -129,6 +130,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_crtc_state *crtc_state;
intel_hdcp_atomic_check(conn, old_state, new_state);
+ intel_psr_atomic_check(conn, old_state, new_state);
if (!new_state->crtc)
return 0;
@@ -175,6 +177,38 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
}
/**
+ * intel_connector_needs_modeset - check if connector needs a modeset
+ */
+bool
+intel_connector_needs_modeset(struct intel_atomic_state *state,
+ struct drm_connector *connector)
+{
+ const struct drm_connector_state *old_conn_state, *new_conn_state;
+
+ old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector);
+ new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector);
+
+ return old_conn_state->crtc != new_conn_state->crtc ||
+ (new_conn_state->crtc &&
+ drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base,
+ new_conn_state->crtc)));
+}
+
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ struct drm_connector_state *conn_state;
+
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ &connector->base);
+ if (IS_ERR(conn_state))
+ return ERR_CAST(conn_state);
+
+ return to_intel_digital_connector_state(conn_state);
+}
+
+/**
* intel_crtc_duplicate_state - duplicate crtc state
* @crtc: drm crtc
*
@@ -186,13 +220,22 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state);
struct intel_crtc_state *crtc_state;
- crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
+ crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
return NULL;
- __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi);
+
+ /* copy color blobs */
+ if (crtc_state->hw.degamma_lut)
+ drm_property_blob_get(crtc_state->hw.degamma_lut);
+ if (crtc_state->hw.ctm)
+ drm_property_blob_get(crtc_state->hw.ctm);
+ if (crtc_state->hw.gamma_lut)
+ drm_property_blob_get(crtc_state->hw.gamma_lut);
crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false;
@@ -205,7 +248,29 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- return &crtc_state->base;
+ return &crtc_state->uapi;
+}
+
+static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state)
+{
+ drm_property_blob_put(crtc_state->hw.degamma_lut);
+ drm_property_blob_put(crtc_state->hw.gamma_lut);
+ drm_property_blob_put(crtc_state->hw.ctm);
+}
+
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
+{
+ intel_crtc_put_color_blobs(crtc_state);
+}
+
+void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state)
+{
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ crtc_state->uapi.degamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->uapi.gamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.ctm,
+ crtc_state->uapi.ctm);
}
/**
@@ -220,7 +285,11 @@ void
intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- drm_atomic_helper_crtc_destroy_state(crtc, state);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ kfree(crtc_state);
}
static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
@@ -249,10 +318,10 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
return;
/* set scaler mode */
- if (plane_state && plane_state->base.fb &&
- plane_state->base.fb->format->is_yuv &&
- plane_state->base.fb->format->num_planes > 1) {
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ if (plane_state && plane_state->hw.fb &&
+ plane_state->hw.fb->format->is_yuv &&
+ plane_state->hw.fb->format->num_planes > 1) {
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
@@ -319,7 +388,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_plane_state *plane_state = NULL;
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- struct drm_atomic_state *drm_state = crtc_state->base.state;
+ struct drm_atomic_state *drm_state = crtc_state->uapi.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
int i;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 49d5cb1b9e0a..74c749dbfb4f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -17,6 +17,7 @@ struct drm_device;
struct drm_i915_private;
struct drm_property;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
@@ -32,10 +33,17 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state);
struct drm_connector_state *
intel_digital_connector_duplicate_state(struct drm_connector *connector);
+bool intel_connector_needs_modeset(struct intel_atomic_state *state,
+ struct drm_connector *connector);
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector);
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
+void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_clear(struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 98f557a9f8ee..3e97af682b1b 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -41,6 +41,16 @@
#include "intel_pm.h"
#include "intel_sprite.h"
+static void intel_plane_state_reset(struct intel_plane_state *plane_state,
+ struct intel_plane *plane)
+{
+ memset(plane_state, 0, sizeof(*plane_state));
+
+ __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
+
+ plane_state->scaler_id = -1;
+}
+
struct intel_plane *intel_plane_alloc(void)
{
struct intel_plane_state *plane_state;
@@ -56,8 +66,9 @@ struct intel_plane *intel_plane_alloc(void)
return ERR_PTR(-ENOMEM);
}
- __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
- plane_state->scaler_id = -1;
+ intel_plane_state_reset(plane_state, plane);
+
+ plane->base.state = &plane_state->uapi;
return plane;
}
@@ -80,22 +91,24 @@ void intel_plane_free(struct intel_plane *plane)
struct drm_plane_state *
intel_plane_duplicate_state(struct drm_plane *plane)
{
- struct drm_plane_state *state;
struct intel_plane_state *intel_state;
- intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
+ intel_state = to_intel_plane_state(plane->state);
+ intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
if (!intel_state)
return NULL;
- state = &intel_state->base;
-
- __drm_atomic_helper_plane_duplicate_state(plane, state);
+ __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
intel_state->vma = NULL;
intel_state->flags = 0;
- return state;
+ /* add reference to fb */
+ if (intel_state->hw.fb)
+ drm_framebuffer_get(intel_state->hw.fb);
+
+ return &intel_state->uapi;
}
/**
@@ -110,18 +123,22 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- WARN_ON(to_intel_plane_state(state)->vma);
+ struct intel_plane_state *plane_state = to_intel_plane_state(state);
+ WARN_ON(plane_state->vma);
- drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
+ if (plane_state->hw.fb)
+ drm_framebuffer_put(plane_state->hw.fb);
+ kfree(plane_state);
}
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
cpp = fb->format->cpp[0];
@@ -144,10 +161,10 @@ bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
- struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
struct intel_crtc_state *crtc_state;
- if (!plane_state->base.visible || !plane->min_cdclk)
+ if (!plane_state->uapi.visible || !plane->min_cdclk)
return false;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -176,23 +193,52 @@ bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
return false;
}
+static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.fb)
+ drm_framebuffer_put(plane_state->hw.fb);
+
+ memset(&plane_state->hw, 0, sizeof(plane_state->hw));
+}
+
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state)
+{
+ intel_plane_clear_hw_state(plane_state);
+
+ plane_state->hw.crtc = from_plane_state->uapi.crtc;
+ plane_state->hw.fb = from_plane_state->uapi.fb;
+ if (plane_state->hw.fb)
+ drm_framebuffer_get(plane_state->hw.fb);
+
+ plane_state->hw.alpha = from_plane_state->uapi.alpha;
+ plane_state->hw.pixel_blend_mode =
+ from_plane_state->uapi.pixel_blend_mode;
+ plane_state->hw.rotation = from_plane_state->uapi.rotation;
+ plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
+ plane_state->hw.color_range = from_plane_state->uapi.color_range;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
- struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
- const struct drm_framebuffer *fb = new_plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ const struct drm_framebuffer *fb;
int ret;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
+ fb = new_plane_state->hw.fb;
+
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
new_crtc_state->data_rate[plane->id] = 0;
new_crtc_state->min_cdclk[plane->id] = 0;
- new_plane_state->base.visible = false;
+ new_plane_state->uapi.visible = false;
- if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
+ if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
ret = plane->check_plane(new_crtc_state, new_plane_state);
@@ -200,18 +246,18 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
return ret;
/* FIXME pre-g4x don't work like this */
- if (new_plane_state->base.visible)
+ if (new_plane_state->uapi.visible)
new_crtc_state->active_planes |= BIT(plane->id);
- if (new_plane_state->base.visible &&
- drm_format_info_is_yuv_semiplanar(fb->format))
+ if (new_plane_state->uapi.visible &&
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
new_crtc_state->nv12_planes |= BIT(plane->id);
- if (new_plane_state->base.visible &&
+ if (new_plane_state->uapi.visible &&
fb->format->format == DRM_FORMAT_C8)
new_crtc_state->c8_planes |= BIT(plane->id);
- if (new_plane_state->base.visible || old_plane_state->base.visible)
+ if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
new_crtc_state->update_planes |= BIT(plane->id);
new_crtc_state->data_rate[plane->id] =
@@ -225,11 +271,11 @@ static struct intel_crtc *
get_crtc_from_states(const struct intel_plane_state *old_plane_state,
const struct intel_plane_state *new_plane_state)
{
- if (new_plane_state->base.crtc)
- return to_intel_crtc(new_plane_state->base.crtc);
+ if (new_plane_state->uapi.crtc)
+ return to_intel_crtc(new_plane_state->uapi.crtc);
- if (old_plane_state->base.crtc)
- return to_intel_crtc(old_plane_state->base.crtc);
+ if (old_plane_state->uapi.crtc)
+ return to_intel_crtc(old_plane_state->uapi.crtc);
return NULL;
}
@@ -246,7 +292,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
- new_plane_state->base.visible = false;
+ new_plane_state->uapi.visible = false;
if (!crtc)
return 0;
@@ -307,26 +353,16 @@ void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_update_plane(&plane->base, crtc);
plane->update_plane(plane, crtc_state, plane_state);
}
-void intel_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_slave(plane, crtc_state, plane_state);
-}
-
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, crtc_state);
@@ -355,25 +391,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, plane);
- if (new_plane_state->base.visible) {
+ if (new_plane_state->uapi.visible ||
+ new_plane_state->planar_slave) {
intel_update_plane(plane, new_crtc_state, new_plane_state);
- } else if (new_plane_state->planar_slave) {
- struct intel_plane *master =
- new_plane_state->planar_linked_plane;
-
- /*
- * We update the slave plane from this function because
- * programming it from the master plane's update_plane
- * callback runs into issues when the Y plane is
- * reassigned, disabled or used by a different plane.
- *
- * The slave plane is updated with the master plane's
- * plane_state.
- */
- new_plane_state =
- intel_atomic_get_new_plane_state(state, master);
-
- intel_update_slave(plane, new_crtc_state, new_plane_state);
} else {
intel_disable_plane(plane, new_crtc_state);
}
@@ -395,7 +415,7 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- if (new_plane_state->base.visible)
+ if (new_plane_state->uapi.visible)
intel_update_plane(plane, new_crtc_state, new_plane_state);
else
intel_disable_plane(plane, new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index e61e9a82aadf..5cedafdddb55 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -20,12 +20,11 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state);
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 3a5ac13d5801..b18040793d9e 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -234,7 +234,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
@@ -555,7 +555,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
u32 tmp, eldv;
@@ -602,7 +602,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
@@ -692,10 +692,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
@@ -707,8 +707,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
connector->name,
- connector->encoder->base.id,
- connector->encoder->name);
+ encoder->base.base.id,
+ encoder->base.name);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -753,7 +753,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 63c1bd4c2954..8beac06e3f10 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -29,6 +29,7 @@
#include <drm/i915_drm.h>
#include "display/intel_display.h"
+#include "display/intel_display_types.h"
#include "display/intel_gmbus.h"
#include "i915_drv.h"
@@ -58,6 +59,13 @@
* that.
*/
+/* Wrapper for VBT child device config */
+struct display_device_data {
+ struct child_device_config child;
+ struct dsc_compression_parameters_entry *dsc;
+ struct list_head node;
+};
+
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
@@ -202,17 +210,12 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
}
-/* Try to find integrated panel data */
+/* Parse general panel options */
static void
-parse_lfp_panel_data(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+parse_panel_options(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
{
const struct bdb_lvds_options *lvds_options;
- const struct bdb_lvds_lfp_data *lvds_lfp_data;
- const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
- const struct lvds_dvo_timing *panel_dvo_timing;
- const struct lvds_fp_timing *fp_timing;
- struct drm_display_mode *panel_fixed_mode;
int panel_type;
int drrs_mode;
int ret;
@@ -261,6 +264,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
break;
}
+}
+
+/* Try to find integrated panel timing data */
+static void
+parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int panel_type = dev_priv->vbt.panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
@@ -282,7 +298,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -300,6 +316,98 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
}
static void
+parse_generic_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_generic_dtd *generic_dtd;
+ const struct generic_dtd_entry *dtd;
+ struct drm_display_mode *panel_fixed_mode;
+ int num_dtd;
+
+ generic_dtd = find_section(bdb, BDB_GENERIC_DTD);
+ if (!generic_dtd)
+ return;
+
+ if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
+ DRM_ERROR("GDTD size %u is too small.\n",
+ generic_dtd->gdtd_size);
+ return;
+ } else if (generic_dtd->gdtd_size !=
+ sizeof(struct generic_dtd_entry)) {
+ DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size);
+ /* DTD has unknown fields, but keep going */
+ }
+
+ num_dtd = (get_blocksize(generic_dtd) -
+ sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
+ if (dev_priv->vbt.panel_type >= num_dtd) {
+ DRM_ERROR("Panel type %d not found in table of %d DTD's\n",
+ dev_priv->vbt.panel_type, num_dtd);
+ return;
+ }
+
+ dtd = &generic_dtd->dtd[dev_priv->vbt.panel_type];
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
+
+ panel_fixed_mode->hdisplay = dtd->hactive;
+ panel_fixed_mode->hsync_start =
+ panel_fixed_mode->hdisplay + dtd->hfront_porch;
+ panel_fixed_mode->hsync_end =
+ panel_fixed_mode->hsync_start + dtd->hsync;
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end;
+
+ panel_fixed_mode->vdisplay = dtd->vactive;
+ panel_fixed_mode->vsync_start =
+ panel_fixed_mode->vdisplay + dtd->vfront_porch;
+ panel_fixed_mode->vsync_end =
+ panel_fixed_mode->vsync_start + dtd->vsync;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end;
+
+ panel_fixed_mode->clock = dtd->pixel_clock;
+ panel_fixed_mode->width_mm = dtd->width_mm;
+ panel_fixed_mode->height_mm = dtd->height_mm;
+
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(panel_fixed_mode);
+
+ if (dtd->hsync_positive_polarity)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dtd->vsync_positive_polarity)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+
+ dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+}
+
+static void
+parse_panel_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ /*
+ * Older VBTs provided provided DTD information for internal displays
+ * through the "LFP panel DTD" block (42). As of VBT revision 229,
+ * that block is now deprecated and DTD information should be provided
+ * via a newer "generic DTD" block (58). Just to be safe, we'll
+ * try the new generic DTD block first on VBT >= 229, but still fall
+ * back to trying the old LFP block if that fails.
+ */
+ if (bdb->version >= 229)
+ parse_generic_dtd(dev_priv, bdb);
+ if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(dev_priv, bdb);
+}
+
+static void
parse_lfp_backlight(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
@@ -449,8 +557,9 @@ static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
{
struct sdvo_device_mapping *mapping;
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i, count = 0;
+ int count = 0;
/*
* Only parse SDVO mappings on gens that could have SDVO. This isn't
@@ -461,8 +570,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
return;
}
- for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
@@ -552,16 +661,45 @@ parse_driver_features(struct drm_i915_private *dev_priv,
dev_priv->vbt.int_lvds_support = 0;
}
- DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ if (bdb->version < 228) {
+ DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ /*
+ * If DRRS is not supported, drrs_type has to be set to 0.
+ * This is because, VBT is configured in such a way that
+ * static DRRS is 0 and DRRS not supported is represented by
+ * driver->drrs_enabled=false
+ */
+ if (!driver->drrs_enabled)
+ dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+
+ dev_priv->vbt.psr.enable = driver->psr_enabled;
+ }
+}
+
+static void
+parse_power_conservation_features(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_lfp_power *power;
+ u8 panel_type = dev_priv->vbt.panel_type;
+
+ if (bdb->version < 228)
+ return;
+
+ power = find_section(bdb, BDB_LVDS_POWER);
+ if (!power)
+ return;
+
+ dev_priv->vbt.psr.enable = power->psr & BIT(panel_type);
+
/*
* If DRRS is not supported, drrs_type has to be set to 0.
* This is because, VBT is configured in such a way that
* static DRRS is 0 and DRRS not supported is represented by
- * driver->drrs_enabled=false
+ * power->drrs & BIT(panel_type)=false
*/
- if (!driver->drrs_enabled)
+ if (!(power->drrs & BIT(panel_type)))
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
- dev_priv->vbt.psr.enable = driver->psr_enabled;
}
static void
@@ -1230,6 +1368,57 @@ err:
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
+static void
+parse_compression_parameters(struct drm_i915_private *i915,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_compression_parameters *params;
+ struct display_device_data *devdata;
+ const struct child_device_config *child;
+ u16 block_size;
+ int index;
+
+ if (bdb->version < 198)
+ return;
+
+ params = find_section(bdb, BDB_COMPRESSION_PARAMETERS);
+ if (params) {
+ /* Sanity checks */
+ if (params->entry_size != sizeof(params->data[0])) {
+ DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n");
+ return;
+ }
+
+ block_size = get_blocksize(params);
+ if (block_size < sizeof(*params)) {
+ DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n");
+ return;
+ }
+ }
+
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!child->compression_enable)
+ continue;
+
+ if (!params) {
+ DRM_DEBUG_KMS("VBT: compression params not available\n");
+ continue;
+ }
+
+ if (child->compression_method_cps) {
+ DRM_DEBUG_KMS("VBT: CPS compression not supported\n");
+ continue;
+ }
+
+ index = child->compression_structure_index;
+
+ devdata->dsc = kmemdup(&params->data[index],
+ sizeof(*devdata->dsc), GFP_KERNEL);
+ }
+}
+
static u8 translate_iboost(u8 val)
{
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
@@ -1246,7 +1435,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
const struct ddi_vbt_port_info *info;
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
info = &i915->vbt.ddi_port_info[port];
if (info->child && ddc_pin == info->alternate_ddc_pin)
@@ -1297,7 +1486,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
const struct ddi_vbt_port_info *info;
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
info = &i915->vbt.ddi_port_info[port];
if (info->child && aux_ch == info->alternate_aux_channel)
@@ -1418,9 +1607,10 @@ static enum port dvo_port_to_port(u8 dvo_port)
}
static void parse_ddi_port(struct drm_i915_private *dev_priv,
- const struct child_device_config *child,
+ struct display_device_data *devdata,
u8 bdb_version)
{
+ const struct child_device_config *child = &devdata->child;
struct ddi_vbt_port_info *info;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
enum port port;
@@ -1443,7 +1633,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
- if (port == PORT_A && is_dvi) {
+ if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
is_hdmi ? "/HDMI" : "");
is_dvi = false;
@@ -1461,26 +1651,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
- DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
+ DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
HAS_LSPCON(dev_priv) && child->lspcon,
- info->supports_typec_usb, info->supports_tbt);
-
- if (is_edp && is_dvi)
- DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
- port_name(port));
- if (is_crt && port != PORT_E)
- DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
- if (is_crt && (is_dvi || is_dp))
- DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
- port_name(port));
- if (is_dvi && (port == PORT_A || port == PORT_E))
- DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
- if (!is_dvi && !is_dp && !is_crt)
- DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
- port_name(port));
- if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
- DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+ info->supports_typec_usb, info->supports_tbt,
+ devdata->dsc != NULL);
if (is_dvi) {
u8 ddc_pin;
@@ -1509,6 +1684,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
port_name(port),
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
+ info->hdmi_level_shift_set = true;
}
if (bdb_version >= 204) {
@@ -1571,8 +1747,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
- const struct child_device_config *child;
- int i;
+ struct display_device_data *devdata;
if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
@@ -1580,11 +1755,8 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
if (bdb_version < 155)
return;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
-
- parse_ddi_port(dev_priv, child, bdb_version);
- }
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node)
+ parse_ddi_port(dev_priv, devdata, bdb_version);
}
static void
@@ -1592,8 +1764,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_general_definitions *defs;
+ struct display_device_data *devdata;
const struct child_device_config *child;
- int i, child_device_num, count;
+ int i, child_device_num;
u8 expected_size;
u16 block_size;
int bus_pin;
@@ -1649,26 +1822,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
/* get the number of child device */
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
- count = 0;
- /* get the number of child device that is present */
- for (i = 0; i < child_device_num; i++) {
- child = child_device_ptr(defs, i);
- if (!child->device_type)
- continue;
- count++;
- }
- if (!count) {
- DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
- return;
- }
- dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
- if (!dev_priv->vbt.child_dev) {
- DRM_DEBUG_KMS("No memory space for child device\n");
- return;
- }
- dev_priv->vbt.child_dev_num = count;
- count = 0;
for (i = 0; i < child_device_num; i++) {
child = child_device_ptr(defs, i);
if (!child->device_type)
@@ -1677,23 +1831,29 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
child->device_type);
+ devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
+ if (!devdata)
+ break;
+
/*
* Copy as much as we know (sizeof) and is available
- * (child_dev_size) of the child device. Accessing the data must
- * depend on VBT version.
+ * (child_dev_size) of the child device config. Accessing the
+ * data must depend on VBT version.
*/
- memcpy(dev_priv->vbt.child_dev + count, child,
+ memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- count++;
+
+ list_add_tail(&devdata->node, &dev_priv->vbt.display_devices);
}
+
+ if (list_empty(&dev_priv->vbt.display_devices))
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
- enum port port;
-
dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* Default to having backlight */
@@ -1721,13 +1881,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
!HAS_PCH_SPLIT(dev_priv));
DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
-
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
- struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
-
- info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
- }
}
/* Defaults to initialize only if there is no VBT. */
@@ -1736,7 +1889,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
{
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -1787,6 +1940,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return false;
}
+ if (vbt->vbt_size > size) {
+ DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ return false;
+ }
+
+ size = vbt->vbt_size;
+
if (range_overflows_t(size_t,
vbt->bdb_offset,
sizeof(struct bdb_header),
@@ -1804,28 +1964,61 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return vbt;
}
-static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
{
- size_t i;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ void __iomem *p = NULL, *oprom;
+ struct vbt_header *vbt;
+ u16 vbt_size;
+ size_t i, size;
- /* Scour memory looking for the VBT signature. */
- for (i = 0; i + 4 < size; i++) {
- void *vbt;
+ oprom = pci_map_rom(pdev, &size);
+ if (!oprom)
+ return NULL;
- if (ioread32(bios + i) != *((const u32 *) "$VBT"))
+ /* Scour memory looking for the VBT signature. */
+ for (i = 0; i + 4 < size; i += 4) {
+ if (ioread32(oprom + i) != *((const u32 *)"$VBT"))
continue;
- /*
- * This is the one place where we explicitly discard the address
- * space (__iomem) of the BIOS/VBT.
- */
- vbt = (void __force *) bios + i;
- if (intel_bios_is_valid_vbt(vbt, size - i))
- return vbt;
-
+ p = oprom + i;
+ size -= i;
break;
}
+ if (!p)
+ goto err_unmap_oprom;
+
+ if (sizeof(struct vbt_header) > size) {
+ DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ goto err_unmap_oprom;
+ }
+
+ vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
+ if (vbt_size > size) {
+ DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ goto err_unmap_oprom;
+ }
+
+ /* The rest will be validated by intel_bios_is_valid_vbt() */
+ vbt = kmalloc(vbt_size, GFP_KERNEL);
+ if (!vbt)
+ goto err_unmap_oprom;
+
+ memcpy_fromio(vbt, p, vbt_size);
+
+ if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+ goto err_free_vbt;
+
+ pci_unmap_rom(pdev, oprom);
+
+ return vbt;
+
+err_free_vbt:
+ kfree(vbt);
+err_unmap_oprom:
+ pci_unmap_rom(pdev, oprom);
+
return NULL;
}
@@ -1839,10 +2032,11 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
*/
void intel_bios_init(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt;
+ struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
- u8 __iomem *bios = NULL;
+
+ INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
@@ -1853,15 +2047,11 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
/* If the OpRegion does not have VBT, look in PCI ROM. */
if (!vbt) {
- size_t size;
-
- bios = pci_map_rom(pdev, &size);
- if (!bios)
+ oprom_vbt = oprom_get_vbt(dev_priv);
+ if (!oprom_vbt)
goto out;
- vbt = find_vbt(bios, size);
- if (!vbt)
- goto out;
+ vbt = oprom_vbt;
DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
}
@@ -1874,15 +2064,20 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb);
- parse_lfp_panel_data(dev_priv, bdb);
+ parse_panel_options(dev_priv, bdb);
+ parse_panel_dtd(dev_priv, bdb);
parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
+ parse_power_conservation_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi_config(dev_priv, bdb);
parse_mipi_sequence(dev_priv, bdb);
+ /* Depends on child device list */
+ parse_compression_parameters(dev_priv, bdb);
+
/* Further processing on pre-parsed data */
parse_sdvo_device_mapping(dev_priv, bdb->version);
parse_ddi_ports(dev_priv, bdb->version);
@@ -1893,8 +2088,7 @@ out:
init_vbt_missing_defaults(dev_priv);
}
- if (bios)
- pci_unmap_rom(pdev, bios);
+ kfree(oprom_vbt);
}
/**
@@ -1903,9 +2097,14 @@ out:
*/
void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
{
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
+ struct display_device_data *devdata, *n;
+
+ list_for_each_entry_safe(devdata, n, &dev_priv->vbt.display_devices, node) {
+ list_del(&devdata->node);
+ kfree(devdata->dsc);
+ kfree(devdata);
+ }
+
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
@@ -1929,17 +2128,18 @@ void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i;
if (!dev_priv->vbt.int_tv_support)
return false;
- if (!dev_priv->vbt.child_dev_num)
+ if (list_empty(&dev_priv->vbt.display_devices))
return true;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
+
/*
* If the device type is not TV, continue.
*/
@@ -1971,14 +2171,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i;
- if (!dev_priv->vbt.child_dev_num)
+ if (list_empty(&dev_priv->vbt.display_devices))
return true;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -2020,6 +2220,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
static const struct {
u16 dp, hdmi;
@@ -2030,7 +2231,6 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
};
- int i;
if (HAS_DDI(dev_priv)) {
const struct ddi_vbt_port_info *port_info =
@@ -2045,11 +2245,8 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
- if (!dev_priv->vbt.child_dev_num)
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if ((child->dvo_port == port_mapping[port].dp ||
child->dvo_port == port_mapping[port].hdmi) &&
@@ -2070,6 +2267,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
*/
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
static const short port_mapping[] = {
[PORT_B] = DVO_PORT_DPB,
@@ -2078,16 +2276,12 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
[PORT_E] = DVO_PORT_DPE,
[PORT_F] = DVO_PORT_DPF,
};
- int i;
if (HAS_DDI(dev_priv))
return dev_priv->vbt.ddi_port_info[port].supports_edp;
- if (!dev_priv->vbt.child_dev_num)
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (child->dvo_port == port_mapping[port] &&
(child->device_type & DEVICE_TYPE_eDP_BITS) ==
@@ -2136,13 +2330,10 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
enum port port)
{
- const struct child_device_config *child;
- int i;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ const struct display_device_data *devdata;
- if (child_dev_is_dp_dual_mode(child, port))
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ if (child_dev_is_dp_dual_mode(&devdata->child, port))
return true;
}
@@ -2159,12 +2350,12 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
enum port *port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
u8 dvo_port;
- int i;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
@@ -2188,6 +2379,104 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
return false;
}
+static void fill_dsc(struct intel_crtc_state *crtc_state,
+ struct dsc_compression_parameters_entry *dsc,
+ int dsc_max_bpc)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int bpc = 8;
+
+ vdsc_cfg->dsc_version_major = dsc->version_major;
+ vdsc_cfg->dsc_version_minor = dsc->version_minor;
+
+ if (dsc->support_12bpc && dsc_max_bpc >= 12)
+ bpc = 12;
+ else if (dsc->support_10bpc && dsc_max_bpc >= 10)
+ bpc = 10;
+ else if (dsc->support_8bpc && dsc_max_bpc >= 8)
+ bpc = 8;
+ else
+ DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n",
+ dsc_max_bpc);
+
+ crtc_state->pipe_bpp = bpc * 3;
+
+ crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp));
+
+ /*
+ * FIXME: This is ugly, and slice count should take DSC engine
+ * throughput etc. into account.
+ *
+ * Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices.
+ */
+ if (dsc->slices_per_line & BIT(2)) {
+ crtc_state->dsc.slice_count = 4;
+ } else if (dsc->slices_per_line & BIT(1)) {
+ crtc_state->dsc.slice_count = 2;
+ } else {
+ /* FIXME */
+ if (!(dsc->slices_per_line & BIT(0)))
+ DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n");
+
+ crtc_state->dsc.slice_count = 1;
+ }
+
+ if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
+ crtc_state->dsc.slice_count != 0)
+ DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n",
+ crtc_state->hw.adjusted_mode.crtc_hdisplay,
+ crtc_state->dsc.slice_count);
+
+ /*
+ * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
+ * implementation specific physical rate buffer size. Currently we use
+ * the required rate buffer model size calculated in
+ * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
+ *
+ * The VBT rc_buffer_block_size and rc_buffer_size definitions
+ * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
+ * implementation should also use the DPCD (or perhaps VBT for eDP)
+ * provided value for the buffer size.
+ */
+
+ /* FIXME: DSI spec says bpc + 1 for this one */
+ vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
+
+ vdsc_cfg->block_pred_enable = dsc->block_prediction_enable;
+
+ vdsc_cfg->slice_height = dsc->slice_height;
+}
+
+/* FIXME: initially DSI specific */
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int dsc_max_bpc)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct display_device_data *devdata;
+ const struct child_device_config *child;
+
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ continue;
+
+ if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (!devdata->dsc)
+ return false;
+
+ if (crtc_state)
+ fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
* intel_bios_is_port_hpd_inverted - is HPD inverted for %port
* @i915: i915 device instance
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 98f064828a57..d6a0c29d37ac 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -35,6 +35,8 @@
#include <drm/i915_drm.h>
struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
enum port;
enum intel_backlight_type {
@@ -242,5 +244,8 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port);
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int dsc_max_bpc);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 22e83f857de8..b228671d5a5d 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -15,7 +15,7 @@ struct intel_qgv_point {
};
struct intel_qgv_info {
- struct intel_qgv_point points[3];
+ struct intel_qgv_point points[I915_NUM_QGV_POINTS];
u8 num_points;
u8 num_channels;
u8 t_bl;
@@ -264,6 +264,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
if (IS_GEN(dev_priv, 12))
icl_get_bw_info(dev_priv, &tgl_sa_info);
else if (IS_GEN(dev_priv, 11))
@@ -273,17 +276,29 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
int num_planes)
{
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
+ /*
+ * Any bw group has same amount of QGV points
+ */
+ const struct intel_bw_info *bi =
+ &dev_priv->max_bw[0];
+ unsigned int min_bw = UINT_MAX;
+ int i;
+
/*
* FIXME with SAGV disabled maybe we can assume
* point 1 will always be used? Seems to match
* the behaviour observed in the wild.
*/
- return min3(icl_max_bw(dev_priv, num_planes, 0),
- icl_max_bw(dev_priv, num_planes, 1),
- icl_max_bw(dev_priv, num_planes, 2));
- else
+ for (i = 0; i < bi->num_qgv_points; i++) {
+ unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
+
+ min_bw = min(bw, min_bw);
+ }
+ return min_bw;
+ } else {
return UINT_MAX;
+ }
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -297,7 +312,7 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -318,7 +333,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
@@ -471,3 +486,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
return 0;
}
+
+void intel_bw_cleanup(struct drm_i915_private *dev_priv)
+{
+ drm_atomic_private_obj_fini(&dev_priv->bw_obj);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 9db10af012f4..20b9ad241802 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -25,6 +25,7 @@ struct intel_bw_state {
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
+void intel_bw_cleanup(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed8c7ce62119..0ce5926006ca 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1904,7 +1904,7 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int pixel_rate = crtc_state->pixel_rate;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1922,7 +1922,7 @@ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane;
int min_cdclk = 0;
@@ -1936,10 +1936,10 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ to_i915(crtc_state->uapi.crtc->dev);
int min_cdclk;
- if (!crtc_state->base.enable)
+ if (!crtc_state->hw.enable)
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
@@ -2004,6 +2004,18 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
/* Account for additional needs from the planes */
min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
+ /*
+ * HACK. Currently for TGL platforms we calculate
+ * min_cdclk initially based on pixel_rate divided
+ * by 2, accounting for also plane requirements,
+ * however in some cases the lowest possible CDCLK
+ * doesn't work and causing the underruns.
+ * Explicitly stating here that this seems to be currently
+ * rather a Hack, than final solution.
+ */
+ if (IS_TIGERLAKE(dev_priv))
+ min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
@@ -2076,7 +2088,7 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret;
- if (crtc_state->base.enable)
+ if (crtc_state->hw.enable)
min_voltage_level = crtc_state->min_voltage_level;
else
min_voltage_level = 0;
@@ -2170,7 +2182,7 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
vco = dev_priv->skl_preferred_vco_freq;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->base.enable)
+ if (!crtc_state->hw.enable)
continue;
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
@@ -2283,11 +2295,11 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- if (!crtc_state->base.active ||
- drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ if (!crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
continue;
- crtc_state->base.mode_changed = true;
+ crtc_state->uapi.mode_changed = true;
ret = drm_atomic_add_affected_connectors(&state->base,
&crtc->base);
@@ -2368,7 +2380,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- if (drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
pipe = INVALID_PIPE;
} else {
pipe = INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index aa3a063549c3..3980e8b50c28 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -117,10 +117,10 @@ static bool lut_is_legacy(const struct drm_property_blob *lut)
static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
{
- return !crtc_state->base.degamma_lut &&
- !crtc_state->base.ctm &&
- crtc_state->base.gamma_lut &&
- lut_is_legacy(crtc_state->base.gamma_lut);
+ return !crtc_state->hw.degamma_lut &&
+ !crtc_state->hw.ctm &&
+ crtc_state->hw.gamma_lut &&
+ lut_is_legacy(crtc_state->hw.gamma_lut);
}
/*
@@ -205,7 +205,7 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* FIXME if there's a gamma LUT after the CSC, we should
@@ -219,7 +219,7 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
u16 coeffs[9])
{
- const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
const u64 *input;
u64 temp[9];
int i;
@@ -270,11 +270,11 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
- if (crtc_state->base.ctm) {
+ if (crtc_state->hw.ctm) {
u16 coeff[9];
ilk_csc_convert_ctm(crtc_state, coeff);
@@ -309,10 +309,10 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (crtc_state->base.ctm) {
+ if (crtc_state->hw.ctm) {
u16 coeff[9];
ilk_csc_convert_ctm(crtc_state, coeff);
@@ -338,12 +338,12 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
*/
static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (crtc_state->base.ctm) {
- const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ if (crtc_state->hw.ctm) {
+ const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
u16 coeffs[9] = {};
int i;
@@ -404,7 +404,7 @@ static u32 ilk_lut_10(const struct drm_color_lut *color)
static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
int i;
@@ -435,12 +435,12 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
{
- i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
+ i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -453,7 +453,7 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -468,7 +468,7 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
@@ -478,7 +478,7 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
static void skl_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val = 0;
@@ -524,8 +524,8 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
i9xx_load_luts(crtc_state);
@@ -547,8 +547,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
i9xx_load_luts(crtc_state);
@@ -654,9 +654,9 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
@@ -677,9 +677,9 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
@@ -700,11 +700,11 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
+ const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
u32 i;
/*
@@ -739,7 +739,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
@@ -766,8 +766,8 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/*
* On GLK+ both pipe CSC and degamma LUT are controlled
@@ -777,7 +777,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
* the degama LUT so that we don't have to reload
* it every time the pipe CSC is being enabled.
*/
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
else
glk_load_degamma_lut_linear(crtc_state);
@@ -808,7 +808,7 @@ static void
icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
@@ -822,8 +822,8 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
@@ -854,8 +854,8 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
@@ -910,11 +910,11 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsb *dsb = intel_dsb_get(crtc);
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
@@ -990,9 +990,9 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
cherryview_load_csc_matrix(crtc_state);
@@ -1010,35 +1010,35 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
dev_priv->display.load_luts(crtc_state);
}
void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
dev_priv->display.color_commit(crtc_state);
}
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- return !old_crtc_state->base.gamma_lut &&
- !old_crtc_state->base.degamma_lut;
+ return !old_crtc_state->hw.gamma_lut &&
+ !old_crtc_state->hw.degamma_lut;
}
static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1050,14 +1050,14 @@ static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
return false;
- return !old_crtc_state->base.gamma_lut;
+ return !old_crtc_state->hw.gamma_lut;
}
static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1068,19 +1068,19 @@ static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
* linear hardware degamma mid scanout.
*/
return !old_crtc_state->csc_enable &&
- !old_crtc_state->base.gamma_lut;
+ !old_crtc_state->hw.gamma_lut;
}
int intel_color_check(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
return dev_priv->display.color_check(crtc_state);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (dev_priv->display.read_luts)
dev_priv->display.read_luts(crtc_state);
@@ -1104,16 +1104,16 @@ static bool need_plane_update(struct intel_plane *plane,
static int
intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_plane *plane;
- if (!new_crtc_state->base.active ||
- drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+ if (!new_crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
return 0;
if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
@@ -1155,9 +1155,9 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
static int check_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
int gamma_length, degamma_length;
u32 gamma_tests, degamma_tests;
@@ -1205,7 +1205,7 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
@@ -1226,11 +1226,11 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
if (crtc_state_is_legacy_gamma(crtc_state))
return 0;
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
- if (crtc_state->base.ctm)
+ if (crtc_state->hw.ctm)
cgm_mode |= CGM_PIPE_MODE_CSC;
- if (crtc_state->base.gamma_lut)
+ if (crtc_state->hw.gamma_lut)
cgm_mode |= CGM_PIPE_MODE_GAMMA;
return cgm_mode;
@@ -1306,7 +1306,7 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
/*
@@ -1334,8 +1334,8 @@ static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
if (!crtc_state->gamma_enable ||
crtc_state_is_legacy_gamma(crtc_state))
return GAMMA_MODE_MODE_8BIT;
- else if (crtc_state->base.gamma_lut &&
- crtc_state->base.degamma_lut)
+ else if (crtc_state->hw.gamma_lut &&
+ crtc_state->hw.degamma_lut)
return GAMMA_MODE_MODE_SPLIT;
else
return GAMMA_MODE_MODE_10BIT;
@@ -1349,7 +1349,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
* CSC comes after the LUT in degamma, RGB->YCbCr,
* and RGB full->limited range mode.
*/
- if (crtc_state->base.degamma_lut ||
+ if (crtc_state->hw.degamma_lut ||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
limited_color_range)
return 0;
@@ -1367,13 +1367,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- (crtc_state->base.gamma_lut ||
- crtc_state->base.degamma_lut) &&
+ (crtc_state->hw.gamma_lut ||
+ crtc_state->hw.degamma_lut) &&
!crtc_state->c8_planes;
crtc_state->csc_enable =
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
- crtc_state->base.ctm || limited_color_range;
+ crtc_state->hw.ctm || limited_color_range;
crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
@@ -1406,14 +1406,14 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
/* On GLK+ degamma LUT is controlled by csc_enable */
crtc_state->csc_enable =
- crtc_state->base.degamma_lut ||
+ crtc_state->hw.degamma_lut ||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
- crtc_state->base.ctm || crtc_state->limited_color_range;
+ crtc_state->hw.ctm || crtc_state->limited_color_range;
crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
@@ -1432,14 +1432,14 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
{
u32 gamma_mode = 0;
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
gamma_mode |= PRE_CSC_GAMMA_ENABLE;
- if (crtc_state->base.gamma_lut &&
+ if (crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes)
gamma_mode |= POST_CSC_GAMMA_ENABLE;
- if (!crtc_state->base.gamma_lut ||
+ if (!crtc_state->hw.gamma_lut ||
crtc_state_is_legacy_gamma(crtc_state))
gamma_mode |= GAMMA_MODE_MODE_8BIT;
else
@@ -1452,7 +1452,7 @@ static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
{
u32 csc_mode = 0;
- if (crtc_state->base.ctm)
+ if (crtc_state->hw.ctm)
csc_mode |= ICL_CSC_ENABLE;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
@@ -1540,7 +1540,7 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (HAS_GMCH(dev_priv)) {
@@ -1646,7 +1646,7 @@ static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
static struct drm_property_blob *
i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
@@ -1683,13 +1683,13 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
if (!crtc_state->gamma_enable)
return;
- crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
}
static struct drm_property_blob *
i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
@@ -1733,15 +1733,15 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
else
- crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state);
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
}
static struct drm_property_blob *
chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
@@ -1775,7 +1775,7 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state);
+ crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
else
i965_read_luts(crtc_state);
}
@@ -1783,7 +1783,7 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *
ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
@@ -1822,15 +1822,15 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
else
- crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
}
static struct drm_property_blob *
glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int hw_lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
@@ -1871,9 +1871,9 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
else
- crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
}
void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 39cc6d79dc85..f976b800b245 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -65,7 +65,7 @@ static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
return container_of(encoder, struct intel_crt, base);
}
-static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
{
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
@@ -132,9 +132,9 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
{
pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
@@ -144,13 +144,13 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, pipe_config);
- pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
- pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+ pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -161,8 +161,8 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 adpa;
if (INTEL_GEN(dev_priv) >= 5)
@@ -241,6 +241,14 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ ilk_pfit_disable(old_crtc_state);
+
intel_ddi_disable_pipe_clock(old_crtc_state);
pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
@@ -271,14 +279,14 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
WARN_ON(!crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- dev_priv->display.fdi_link_train(crtc, crtc_state);
+ hsw_fdi_link_train(encoder, crtc_state);
intel_ddi_enable_pipe_clock(crtc_state);
}
@@ -288,7 +296,7 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
WARN_ON(!crtc_state->has_pch_encoder);
@@ -343,7 +351,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev_priv) &&
- (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
return MODE_CLOCK_HIGH;
/* HSW/BDW FDI limited to 4k */
@@ -358,7 +366,7 @@ static int intel_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -373,7 +381,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -390,7 +398,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -419,10 +427,10 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
return 0;
}
-static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(dev);
u32 adpa;
bool ret;
@@ -432,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
u32 save_adpa;
- crt->force_hotplug_required = 0;
+ crt->force_hotplug_required = false;
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
@@ -469,7 +477,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(dev);
bool reenable_hpd;
u32 adpa;
@@ -527,7 +535,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
int i, tries = 0;
if (HAS_PCH_SPLIT(dev_priv))
- return intel_ironlake_crt_detect_hotplug(connector);
+ return ilk_crt_detect_hotplug(connector);
if (IS_VALLEYVIEW(dev_priv))
return valleyview_crt_detect_hotplug(connector);
@@ -601,7 +609,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
@@ -787,7 +795,7 @@ intel_crt_detect(struct drm_connector *connector,
bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *intel_encoder = &crt->base;
intel_wakeref_t wakeref;
int status, ret;
@@ -878,7 +886,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *intel_encoder = &crt->base;
intel_wakeref_t wakeref;
struct i2c_adapter *i2c;
@@ -917,7 +925,7 @@ void intel_crt_reset(struct drm_encoder *encoder)
POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
- crt->force_hotplug_required = 1;
+ crt->force_hotplug_required = true;
}
}
@@ -1055,7 +1063,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
/*
* Configure the automatic hotplug detection stuff
*/
- crt->force_hotplug_required = 0;
+ crt->force_hotplug_required = false;
/*
* TODO: find a proper way to discover whether we need to set the the
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 2a27fb5d7dc6..33f1dc3d7c1a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -34,6 +34,7 @@
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_mst.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_dsi.h"
@@ -902,11 +903,10 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
{
+ struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port];
int n_entries, level, default_entry;
enum phy phy = intel_port_to_phy(dev_priv, port);
- level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
-
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
@@ -941,12 +941,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
return 0;
}
- /* Choose a good default if VBT is badly populated */
- if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries)
- level = default_entry;
-
if (WARN_ON_ONCE(n_entries == 0))
return 0;
+
+ if (port_info->hdmi_level_shift_set)
+ level = port_info->hdmi_level_shift;
+ else
+ level = default_entry;
+
if (WARN_ON_ONCE(level >= n_entries))
level = n_entries - 1;
@@ -1106,18 +1108,14 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
* DDI A (which is used for eDP)
*/
-void hsw_fdi_link_train(struct intel_crtc *crtc,
+void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 temp, i, rx_ctl_val, ddi_pll_sel;
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
- intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- }
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
@@ -1240,9 +1238,9 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
@@ -1542,7 +1540,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
static void icl_ddi_clock_get(struct intel_encoder *encoder,
@@ -1758,7 +1756,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
@@ -1815,22 +1813,6 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
-void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
- bool state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
-
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (state == true)
- temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- else
- temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
-}
-
/*
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
*
@@ -1840,7 +1822,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
static u32
intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -1872,9 +1854,9 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
BUG();
}
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1918,8 +1900,13 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (INTEL_GEN(dev_priv) >= 12)
- temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder master;
+
+ master = crtc_state->mst_master_transcoder;
+ WARN_ON(master == INVALID_TRANSCODER);
+ temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
+ }
} else {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
@@ -1930,12 +1917,14 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
@@ -1946,7 +1935,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
static void
intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
@@ -1958,20 +1947,21 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
- u32 val = I915_READ(reg);
+ u32 val;
+
+ val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~TRANS_DDI_FUNC_ENABLE;
if (INTEL_GEN(dev_priv) >= 12) {
- val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK |
- TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ if (!intel_dp_mst_is_master_trans(crtc_state))
+ val &= ~TGL_TRANS_DDI_PORT_MASK;
} else {
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK |
- TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ val &= ~TRANS_DDI_PORT_MASK;
}
- I915_WRITE(reg, val);
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -2234,7 +2224,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
/*
@@ -2256,7 +2246,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = encoder->port;
@@ -2274,7 +2264,7 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP) {
@@ -2304,7 +2294,7 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u8 iboost;
@@ -2375,7 +2365,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
@@ -2514,7 +2504,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
width = 4;
rate = 0; /* Rate is always < than 6GHz for HDMI */
} else {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
@@ -2640,7 +2630,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
width = 4;
/* Rate is always < than 6GHz for HDMI */
} else {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
@@ -3016,11 +3006,38 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpll_lock);
}
+static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
+ u32 port_mask, bool ddi_clk_needed)
+{
+ enum port port;
+ u32 val;
+
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_port_masked(port, port_mask) {
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
+ phy);
+
+ if (ddi_clk_needed == !ddi_clk_off)
+ continue;
+
+ /*
+ * Punt on the case now where clock is gated, but it would
+ * be needed by the port. Something else is really broken then.
+ */
+ if (WARN_ON(ddi_clk_needed))
+ continue;
+
+ DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ phy_name(phy));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ }
+}
+
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
- enum port port;
u32 port_mask;
bool ddi_clk_needed;
@@ -3069,29 +3086,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
ddi_clk_needed = false;
}
- val = I915_READ(ICL_DPCLKA_CFGCR0);
- for_each_port_masked(port, port_mask) {
- enum phy phy = intel_port_to_phy(dev_priv, port);
-
- bool ddi_clk_ungated = !(val &
- icl_dpclka_cfgcr0_clk_off(dev_priv,
- phy));
-
- if (ddi_clk_needed == ddi_clk_ungated)
- continue;
-
- /*
- * Punt on the case now where clock is gated, but it would
- * be needed by the port. Something else is really broken then.
- */
- if (WARN_ON(ddi_clk_needed))
- continue;
-
- DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- phy_name(port));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- }
+ icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed);
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -3173,57 +3168,6 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
static void
-icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- u32 val, bits;
- int ln;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
-
- for (ln = 0; ln < 2; ln++) {
- if (INTEL_GEN(dev_priv) >= 12) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
- val = I915_READ(DKL_DP_MODE(tc_port));
- } else {
- val = I915_READ(MG_DP_MODE(ln, tc_port));
- }
-
- if (enable)
- val |= bits;
- else
- val &= ~bits;
-
- if (INTEL_GEN(dev_priv) >= 12)
- I915_WRITE(DKL_DP_MODE(tc_port), val);
- else
- I915_WRITE(MG_DP_MODE(ln, tc_port), val);
- }
-
- if (INTEL_GEN(dev_priv) == 11) {
- bits = MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- if (enable)
- val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3));
- else
- val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
- }
-}
-
-static void
icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
const struct intel_crtc_state *crtc_state)
{
@@ -3329,7 +3273,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
if (!crtc_state->fec_enable)
return;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
@@ -3349,7 +3293,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
if (!crtc_state->fec_enable)
return;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
@@ -3359,7 +3303,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
static void
tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
u32 val;
if (!cstate->dc3co_exitline)
@@ -3374,7 +3318,7 @@ static void
tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
{
u32 val, exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
if (!cstate->dc3co_exitline)
return;
@@ -3392,8 +3336,8 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *cstate)
{
u32 exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- u32 crtc_vdisplay = cstate->base.adjusted_mode.crtc_vdisplay;
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
+ u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay;
cstate->dc3co_exitline = 0;
@@ -3401,11 +3345,11 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
return;
/* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
- if (to_intel_crtc(cstate->base.crtc)->pipe != PIPE_A ||
+ if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A ||
encoder->port != PORT_A)
return;
- if (!cstate->has_psr2 || !cstate->base.active)
+ if (!cstate->has_psr2 || !cstate->hw.active)
return;
/*
@@ -3413,7 +3357,7 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
* PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
*/
exit_scanlines =
- intel_usecs_to_scanlines(&cstate->base.adjusted_mode, 200) + 1;
+ intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1;
if (WARN_ON(exit_scanlines > crtc_vdisplay))
return;
@@ -3425,7 +3369,7 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
{
u32 val;
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) < 12)
return;
@@ -3440,10 +3384,10 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -3455,47 +3399,80 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
- /* 1.a got on intel_atomic_commit_tail() */
+ /*
+ * 1. Enable Power Wells
+ *
+ * This was handled at the beginning of intel_atomic_commit_tail(),
+ * before we called down into this function.
+ */
- /* 2. */
+ /* 2. Enable Panel Power if PPS is required */
intel_edp_panel_on(intel_dp);
/*
- * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by:
- * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and
- * haswell_crtc_enable()->intel_enable_shared_dpll()
+ * 3. For non-TBT Type-C ports, set FIA lane count
+ * (DFLEXDPSP.DPX4TXLATC)
+ *
+ * This was done before tgl_ddi_pre_enable_dp by
+ * hsw_crtc_enable()->intel_encoders_pre_pll_enable().
*/
- /* 4.b */
+ /*
+ * 4. Enable the port PLL.
+ *
+ * The PLL enabling itself was already done before this function by
+ * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * configure the PLL to port mapping here.
+ */
intel_ddi_clk_select(encoder, crtc_state);
- /* 5. */
+ /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
- /* 6. */
+ /* 6. Program DP_MODE */
icl_program_mg_dp_mode(dig_port, crtc_state);
/*
- * 7.a - Steps in this function should only be executed over MST
- * master, what will be taken in care by MST hook
- * intel_mst_pre_enable_dp()
+ * 7. The rest of the below are substeps under the bspec's "Enable and
+ * Train Display Port" step. Note that steps that are specific to
+ * MST will be handled by intel_mst_pre_enable_dp() before/after it
+ * calls into this function. Also intel_mst_pre_enable_dp() only calls
+ * us when active_mst_links==0, so any steps designated for "single
+ * stream or multi-stream master transcoder" can just be performed
+ * unconditionally here.
+ */
+
+ /*
+ * 7.a Configure Transcoder Clock Select to direct the Port clock to the
+ * Transcoder.
*/
intel_ddi_enable_pipe_clock(crtc_state);
- /* 7.b */
+ /*
+ * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
+ * Transport Select
+ */
intel_ddi_config_transcoder_func(crtc_state);
- /* 7.d */
- icl_phy_set_clock_gating(dig_port, false);
+ /*
+ * 7.c Configure & enable DP_TP_CTL with link training pattern 1
+ * selected
+ *
+ * This will be handled by the intel_dp_start_link_train() farther
+ * down this function.
+ */
- /* 7.e */
+ /* 7.e Configure voltage swing and related IO settings */
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
encoder->type);
- /* 7.f */
+ /*
+ * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
+ * the used lanes of the DDI.
+ */
if (intel_phy_is_combo(dev_priv, phy)) {
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
@@ -3505,7 +3482,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
lane_reversal);
}
- /* 7.g */
+ /*
+ * 7.g Configure and enable DDI_BUF_CTL
+ * 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
+ * after 500 us.
+ *
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
@@ -3518,23 +3502,21 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
* training
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
- /* 7.c, 7.h, 7.i, 7.j */
+
+ /*
+ * 7.i Follow DisplayPort specification training sequence (see notes for
+ * failure handling)
+ * 7.j If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle
+ * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent)
+ * (timeout after 800 us)
+ */
intel_dp_start_link_train(intel_dp);
- /* 7.k */
+ /* 7.k Set DP_TP_CTL link training to Normal */
if (!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp);
- /*
- * TODO: enable clock gating
- *
- * It is not written in DP enabling sequence but "PHY Clockgating
- * programming" states that clock gating should be enabled after the
- * link training but doing so causes all the following trainings to fail
- * so not enabling it for now.
- */
-
- /* 7.l */
+ /* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
intel_dsc_enable(encoder, crtc_state);
}
@@ -3543,15 +3525,18 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
- WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ if (INTEL_GEN(dev_priv) < 11)
+ WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ else
+ WARN_ON(is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -3569,7 +3554,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
- icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3603,8 +3587,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_enable_fec(encoder, crtc_state);
- icl_phy_set_clock_gating(dig_port, true);
-
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
@@ -3633,12 +3615,12 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int level = intel_ddi_hdmi_level(dev_priv, port);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
@@ -3646,7 +3628,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
- icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 12)
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3661,8 +3642,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
- icl_phy_set_clock_gating(dig_port, true);
-
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
@@ -3677,7 +3656,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -3705,12 +3684,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
} else {
struct intel_lspcon *lspcon =
- enc_to_intel_lspcon(&encoder->base);
+ enc_to_intel_lspcon(encoder);
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
if (lspcon->active) {
struct intel_digital_port *dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -3735,7 +3714,7 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
}
if (intel_crtc_has_dp_encoder(crtc_state)) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
@@ -3755,23 +3734,42 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (!is_mst) {
- intel_ddi_disable_pipe_clock(old_crtc_state);
- /*
- * Power down sink before disabling the port, otherwise we end
- * up getting interrupts from the sink on detecting link loss.
- */
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (is_mst) {
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+ u32 val;
+
+ val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~TGL_TRANS_DDI_PORT_MASK;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ }
+ } else {
+ if (!is_mst)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
}
intel_disable_ddi_buf(encoder, old_crtc_state);
+ /*
+ * From TGL spec: "If single stream or multi-stream master transcoder:
+ * Configure Transcoder Clock select to direct no clock to the
+ * transcoder"
+ */
+ if (INTEL_GEN(dev_priv) >= 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
@@ -3789,7 +3787,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
dig_port->set_infoframes(encoder, false,
@@ -3807,11 +3805,46 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
+static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
+
+ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
+ transcoder_name(old_crtc_state->cpu_transcoder));
+
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
+}
+
static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_disable_transcoder_port_sync(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ intel_dsc_disable(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_scaler_disable(old_crtc_state);
+ else
+ ilk_pfit_disable(old_crtc_state);
+ }
/*
* When called from DP MST code:
@@ -3835,6 +3868,13 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
if (INTEL_GEN(dev_priv) >= 11)
icl_unmap_plls_to_ports(encoder);
+
+ if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
+ intel_display_power_put_unchecked(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+
+ if (is_tc_port)
+ intel_tc_port_put_link(dig_port);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3876,7 +3916,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -3917,7 +3957,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
@@ -3994,7 +4034,7 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp->link_trained = false;
@@ -4042,7 +4082,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_ddi_set_dp_msa(crtc_state, conn_state);
@@ -4106,8 +4146,9 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
WARN_ON(crtc && crtc->active);
- intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
- if (crtc_state && crtc_state->base.active)
+ intel_tc_port_get_link(enc_to_dig_port(encoder),
+ required_lanes);
+ if (crtc_state && crtc_state->hw.active)
intel_update_active_dpll(state, crtc, encoder);
}
@@ -4116,7 +4157,7 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
- intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
+ intel_tc_port_put_link(enc_to_dig_port(encoder));
}
static void
@@ -4125,7 +4166,7 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
@@ -4147,61 +4188,44 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
crtc_state->lane_lat_optim_mask);
}
-static void
-intel_ddi_post_pll_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
-
- if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
- intel_display_power_put_unchecked(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
-
- if (is_tc_port)
- intel_tc_port_put_link(dig_port);
-}
-
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
- u32 val;
+ u32 dp_tp_ctl, ddi_buf_ctl;
bool wait = false;
- if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) {
- val = I915_READ(DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), val);
+ dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl);
+
+ if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
+ ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port));
+ if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
+ I915_WRITE(DDI_BUF_CTL(port),
+ ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
wait = true;
}
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
POSTING_READ(intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
- val = DP_TP_CTL_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ dp_tp_ctl = DP_TP_CTL_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
if (intel_dp->link_mst)
- val |= DP_TP_CTL_MODE_MST;
+ dp_tp_ctl |= DP_TP_CTL_MODE_MST;
else {
- val |= DP_TP_CTL_MODE_SST;
+ dp_tp_ctl |= DP_TP_CTL_MODE_SST;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
POSTING_READ(intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
@@ -4237,7 +4261,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
@@ -4245,6 +4269,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
return;
+ intel_dsc_get_config(encoder, pipe_config);
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -4255,7 +4281,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
@@ -4326,6 +4352,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ pipe_config->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
+
intel_dp_get_m_n(intel_crtc, pipe_config);
break;
default:
@@ -4404,7 +4435,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
@@ -4439,7 +4470,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
intel_dp_encoder_flush_work(encoder);
@@ -4506,7 +4537,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -4538,7 +4569,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (!crtc_state->hdmi_high_tmds_clock_ratio &&
@@ -4578,7 +4609,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received)
{
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
@@ -4709,8 +4740,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct ddi_vbt_port_info *port_info =
&dev_priv->vbt.ddi_port_info[port];
struct intel_digital_port *intel_dig_port;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -4739,31 +4769,30 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (!intel_dig_port)
return;
- intel_encoder = &intel_dig_port->base;
- encoder = &intel_encoder->base;
+ encoder = &intel_dig_port->base;
- drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
- intel_encoder->hotplug = intel_ddi_hotplug;
- intel_encoder->compute_output_type = intel_ddi_compute_output_type;
- intel_encoder->compute_config = intel_ddi_compute_config;
- intel_encoder->enable = intel_enable_ddi;
- intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
- intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
- intel_encoder->pre_enable = intel_ddi_pre_enable;
- intel_encoder->disable = intel_disable_ddi;
- intel_encoder->post_disable = intel_ddi_post_disable;
- intel_encoder->update_pipe = intel_ddi_update_pipe;
- intel_encoder->get_hw_state = intel_ddi_get_hw_state;
- intel_encoder->get_config = intel_ddi_get_config;
- intel_encoder->suspend = intel_dp_encoder_suspend;
- intel_encoder->get_power_domains = intel_ddi_get_power_domains;
- intel_encoder->type = INTEL_OUTPUT_DDI;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
- intel_encoder->port = port;
- intel_encoder->cloneable = 0;
- intel_encoder->pipe_mask = ~0;
+ encoder->hotplug = intel_ddi_hotplug;
+ encoder->compute_output_type = intel_ddi_compute_output_type;
+ encoder->compute_config = intel_ddi_compute_config;
+ encoder->enable = intel_enable_ddi;
+ encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+ encoder->pre_enable = intel_ddi_pre_enable;
+ encoder->disable = intel_disable_ddi;
+ encoder->post_disable = intel_ddi_post_disable;
+ encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->get_hw_state = intel_ddi_get_hw_state;
+ encoder->get_config = intel_ddi_get_config;
+ encoder->suspend = intel_dp_encoder_suspend;
+ encoder->get_power_domains = intel_ddi_get_power_domains;
+
+ encoder->type = INTEL_OUTPUT_DDI;
+ encoder->power_domain = intel_port_to_power_domain(port);
+ encoder->port = port;
+ encoder->cloneable = 0;
+ encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -4771,6 +4800,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
else
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
@@ -4781,8 +4811,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_tc_port_init(intel_dig_port, is_legacy);
- intel_encoder->update_prepare = intel_ddi_update_prepare;
- intel_encoder->update_complete = intel_ddi_update_complete;
+ encoder->update_prepare = intel_ddi_update_prepare;
+ encoder->update_complete = intel_ddi_update_complete;
}
WARN_ON(port > PORT_I);
@@ -4798,7 +4828,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
- if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
if (!intel_ddi_init_hdmi_connector(intel_dig_port))
goto err;
}
@@ -4822,6 +4852,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
return;
err:
- drm_encoder_cleanup(encoder);
+ drm_encoder_cleanup(&encoder->base);
kfree(intel_dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 19aeab1246ee..167c6579d972 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -22,7 +22,7 @@ struct intel_encoder;
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_crtc *crtc,
+void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 301897791627..19ea842cfd84 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -46,6 +46,7 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
#include "display/intel_dp.h"
+#include "display/intel_dp_mst.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
#include "display/intel_gmbus.h"
@@ -86,8 +87,8 @@
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
@@ -112,6 +113,21 @@ static const u32 i965_primary_formats[] = {
DRM_FORMAT_XBGR16161616F,
};
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
static const u64 i9xx_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -130,8 +146,8 @@ static const u64 cursor_format_modifiers[] = {
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
static int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_i915_gem_object *obj,
@@ -142,21 +158,18 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m_n,
const struct intel_link_m_n *m2_n2);
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
struct intel_limit {
struct {
@@ -357,7 +370,7 @@ static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
},
};
-static const struct intel_limit intel_limits_pineview_sdvo = {
+static const struct intel_limit pnv_limits_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
@@ -372,7 +385,7 @@ static const struct intel_limit intel_limits_pineview_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const struct intel_limit intel_limits_pineview_lvds = {
+static const struct intel_limit pnv_limits_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1700000, .max = 3500000 },
.n = { .min = 3, .max = 6 },
@@ -390,7 +403,7 @@ static const struct intel_limit intel_limits_pineview_lvds = {
* We calculate clock using (register_value + 2) for N/M1/M2, so here
* the range value for them is (actual_value - 2).
*/
-static const struct intel_limit intel_limits_ironlake_dac = {
+static const struct intel_limit ilk_limits_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 5 },
@@ -403,7 +416,7 @@ static const struct intel_limit intel_limits_ironlake_dac = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const struct intel_limit intel_limits_ironlake_single_lvds = {
+static const struct intel_limit ilk_limits_single_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -416,7 +429,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const struct intel_limit intel_limits_ironlake_dual_lvds = {
+static const struct intel_limit ilk_limits_dual_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -430,7 +443,7 @@ static const struct intel_limit intel_limits_ironlake_dual_lvds = {
};
/* LVDS 100mhz refclk limits. */
-static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
+static const struct intel_limit ilk_limits_single_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 2 },
@@ -443,7 +456,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -530,7 +543,7 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
static bool
needs_modeset(const struct intel_crtc_state *state)
{
- return drm_atomic_crtc_needs_modeset(&state->base);
+ return drm_atomic_crtc_needs_modeset(&state->uapi);
}
bool
@@ -541,10 +554,9 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
}
static bool
-is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
- return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
+ return crtc_state->master_transcoder != INVALID_TRANSCODER;
}
/*
@@ -658,7 +670,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -694,7 +706,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int err = target;
@@ -752,7 +764,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int err = target;
@@ -808,7 +820,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int max_n;
bool found = false;
@@ -902,7 +914,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct dpll clock;
unsigned int bestppm = 1000000;
@@ -962,7 +974,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
struct dpll clock;
@@ -1025,33 +1037,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
NULL, best_clock);
}
-bool intel_crtc_active(struct intel_crtc *crtc)
-{
- /* Be paranoid as we can arrive here with only partial
- * state retrieved from the hardware during setup.
- *
- * We can ditch the adjusted_mode.crtc_clock check as soon
- * as Haswell has gained clock readout/fastboot support.
- *
- * We can ditch the crtc->primary->state->fb check as soon as we can
- * properly reconstruct framebuffers.
- *
- * FIXME: The intel_crtc->active here should be switched to
- * crtc->state->active once we have proper CRTC states wired up
- * for atomic.
- */
- return crtc->active && crtc->base.primary->state->fb &&
- crtc->config->base.adjusted_mode.crtc_clock;
-}
-
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
- return crtc->config->cpu_transcoder;
-}
-
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1095,7 +1080,7 @@ static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (INTEL_GEN(dev_priv) >= 4) {
@@ -1145,11 +1130,15 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
bool cur_state;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
if (HAS_DDI(dev_priv)) {
- /* DDI does not have a specific FDI_TX register */
+ /*
+ * DDI does not have a specific FDI_TX register.
+ *
+ * FDI is never fed from EDP transcoder
+ * so pipe->transcoder cast is fine here.
+ */
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
@@ -1266,11 +1255,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
}
void assert_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+ enum transcoder cpu_transcoder, bool state)
{
bool cur_state;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -1290,8 +1277,9 @@ void assert_pipe(struct drm_i915_private *dev_priv,
}
I915_STATE_WARN(cur_state != state,
- "pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), onoff(state), onoff(cur_state));
+ "transcoder %s assertion failure (expected %s, current %s)\n",
+ transcoder_name(cpu_transcoder),
+ onoff(state), onoff(cur_state));
}
static void assert_plane(struct intel_plane *plane, bool state)
@@ -1418,7 +1406,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
@@ -1467,7 +1455,7 @@ static void chv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
@@ -1514,7 +1502,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
u32 dpll = crtc_state->dpll_hw_state.dpll;
int i;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
@@ -1554,7 +1542,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -1563,7 +1551,7 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
return;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe));
@@ -1574,7 +1562,7 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1591,7 +1579,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1643,9 +1631,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
-static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
+static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -1659,11 +1647,16 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
assert_fdi_rx_enabled(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
- /* Workaround: Set the timing override bit before enabling the
- * pch transcoder. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
+ /*
+ * Workaround: Set the timing override bit
+ * before enabling the pch transcoder.
+ */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
I915_WRITE(reg, val);
}
@@ -1672,6 +1665,10 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
pipeconf_val = I915_READ(PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_FRAME_START_DELAY_MASK;
+ val |= TRANS_FRAME_START_DELAY(0);
+
/*
* Make the BPC in transcoder be consistent with
* that in pipeconf reg. For HDMI we must use 8bpc
@@ -1709,9 +1706,12 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, PIPE_A);
- /* Workaround: set timing override bit. */
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ /* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
@@ -1729,8 +1729,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
DRM_ERROR("Failed to enable PCH transcoder\n");
}
-static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
i915_reg_t reg;
u32 val;
@@ -1789,7 +1789,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* On i965gm the hardware frame counter reads
@@ -1809,16 +1809,25 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state
static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
intel_crtc_max_vblank_count(crtc_state));
drm_crtc_vblank_on(&crtc->base);
}
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_crtc_vblank_off(&crtc->base);
+ assert_vblank_disabled(&crtc->base);
+}
+
static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -1874,9 +1883,9 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
intel_wait_for_pipe_scanline_moving(crtc);
}
-static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
+void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -1919,6 +1928,74 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
return IS_GEN(dev_priv, 2) ? 2048 : 4096;
}
+static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (!is_ccs_modifier(fb->modifier))
+ return false;
+
+ return plane >= fb->format->num_planes / 2;
+}
+
+static bool is_gen12_ccs_modifier(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+
+}
+
+static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
+}
+
+static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (is_ccs_modifier(fb->modifier))
+ return is_ccs_plane(fb, plane);
+
+ return plane == 1;
+}
+
+static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ WARN_ON(!is_ccs_modifier(fb->modifier) ||
+ (main_plane && main_plane >= fb->format->num_planes / 2));
+
+ return fb->format->num_planes / 2 + main_plane;
+}
+
+static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
+{
+ WARN_ON(!is_ccs_modifier(fb->modifier) ||
+ ccs_plane < fb->format->num_planes / 2);
+
+ return ccs_plane - fb->format->num_planes / 2;
+}
+
+/* Return either the main plane's CCS or - if not a CCS FB - UV plane */
+int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ if (is_ccs_modifier(fb->modifier))
+ return main_to_ccs_plane(fb, main_plane);
+
+ return 1;
+}
+
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ uint64_t modifier)
+{
+ return info->is_yuv &&
+ info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
+}
+
+static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ color_plane == 1;
+}
+
static unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
@@ -1934,16 +2011,21 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
else
return 512;
case I915_FORMAT_MOD_Y_TILED_CCS:
- if (color_plane == 1)
+ if (is_ccs_plane(fb, color_plane))
return 128;
/* fall through */
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 64;
+ /* fall through */
case I915_FORMAT_MOD_Y_TILED:
if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (color_plane == 1)
+ if (is_ccs_plane(fb, color_plane))
return 128;
/* fall through */
case I915_FORMAT_MOD_Yf_TILED:
@@ -1970,6 +2052,9 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
static unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
{
+ if (is_gen12_ccs_plane(fb, color_plane))
+ return 1;
+
return intel_tile_size(to_i915(fb->dev)) /
intel_tile_width_bytes(fb, color_plane);
}
@@ -1983,7 +2068,17 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
unsigned int cpp = fb->format->cpp[color_plane];
*tile_width = tile_width_bytes / cpp;
- *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
+ *tile_height = intel_tile_height(fb, color_plane);
+}
+
+static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ unsigned int tile_width, tile_height;
+
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ return fb->pitches[color_plane] * tile_height;
}
unsigned int
@@ -2060,7 +2155,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
- if (color_plane == 1)
+ if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
+ is_ccs_plane(fb, color_plane))
return 4096;
switch (fb->modifier) {
@@ -2070,9 +2166,19 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
return 0;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (is_semiplanar_uv_plane(fb, color_plane))
+ return intel_tile_row_size(fb, color_plane);
+ /* Fall-through */
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
+ return intel_tile_row_size(fb, color_plane);
+ /* Fall-through */
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
@@ -2083,7 +2189,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return INTEL_GEN(dev_priv) < 4 ||
@@ -2109,6 +2215,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
+ if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ return ERR_PTR(-EINVAL);
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
@@ -2126,19 +2234,18 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* pin/unpin/fence and not more.
*/
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- i915_gem_object_lock(obj);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- pinctl = 0;
-
- /* Valleyview is definitely limited to scanning out the first
+ /*
+ * Valleyview is definitely limited to scanning out the first
* 512MiB. Lets presume this behaviour was inherited from the
* g4x display engine and that all earlier gen are similarly
* limited. Testing suggests that it is a little more
* complicated than this. For example, Cherryview appears quite
* happy to scanout from anywhere within its global aperture.
*/
+ pinctl = 0;
if (HAS_GMCH(dev_priv))
pinctl |= PIN_MAPPABLE;
@@ -2150,7 +2257,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
int ret;
- /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ /*
+ * Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always, when
* possible, install a fence as the cost is not that onerous.
@@ -2180,8 +2288,6 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
i915_vma_get(vma);
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
- i915_gem_object_unlock(obj);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
@@ -2216,7 +2322,7 @@ u32 intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int color_plane)
{
- const struct drm_framebuffer *fb = state->base.fb;
+ const struct drm_framebuffer *fb = state->hw.fb;
unsigned int cpp = fb->format->cpp[color_plane];
unsigned int pitch = state->color_plane[color_plane].stride;
@@ -2264,9 +2370,10 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
-static bool is_surface_linear(u64 modifier, int color_plane)
+static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
{
- return modifier == DRM_FORMAT_MOD_LINEAR;
+ return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
+ is_gen12_ccs_plane(fb, color_plane);
}
static u32 intel_adjust_aligned_offset(int *x, int *y,
@@ -2281,7 +2388,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (!is_surface_linear(fb->modifier, color_plane)) {
+ if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
@@ -2317,8 +2424,8 @@ static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
int color_plane,
u32 old_offset, u32 new_offset)
{
- return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
- state->base.rotation,
+ return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
+ state->hw.rotation,
state->color_plane[color_plane].stride,
old_offset, new_offset);
}
@@ -2348,10 +2455,7 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
unsigned int cpp = fb->format->cpp[color_plane];
u32 offset, offset_aligned;
- if (alignment)
- alignment--;
-
- if (!is_surface_linear(fb->modifier, color_plane)) {
+ if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2372,17 +2476,24 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
*x %= tile_width;
offset = (tile_rows * pitch_tiles + tiles) * tile_size;
- offset_aligned = offset & ~alignment;
+
+ offset_aligned = offset;
+ if (alignment)
+ offset_aligned = rounddown(offset_aligned, alignment);
intel_adjust_tile_offset(x, y, tile_width, tile_height,
tile_size, pitch_tiles,
offset, offset_aligned);
} else {
offset = *y * pitch + *x * cpp;
- offset_aligned = offset & ~alignment;
-
- *y = (offset & alignment) / pitch;
- *x = ((offset & alignment) - *y * pitch) / cpp;
+ offset_aligned = offset;
+ if (alignment) {
+ offset_aligned = rounddown(offset_aligned, alignment);
+ *y = (offset % alignment) / pitch;
+ *x = ((offset % alignment) - *y * pitch) / cpp;
+ } else {
+ *y = *x = 0;
+ }
}
return offset_aligned;
@@ -2392,10 +2503,10 @@ static u32 intel_plane_compute_aligned_offset(int *x, int *y,
const struct intel_plane_state *state,
int color_plane)
{
- struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
+ struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- const struct drm_framebuffer *fb = state->base.fb;
- unsigned int rotation = state->base.rotation;
+ const struct drm_framebuffer *fb = state->hw.fb;
+ unsigned int rotation = state->hw.rotation;
int pitch = state->color_plane[color_plane].stride;
u32 alignment;
@@ -2415,9 +2526,17 @@ static int intel_fb_offset_to_xy(int *x, int *y,
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int height;
+ u32 alignment;
+
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
+ alignment = intel_tile_row_size(fb, color_plane);
+ else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ alignment = intel_tile_size(dev_priv);
+ else
+ alignment = 0;
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+ if (alignment != 0 && fb->offsets[color_plane] % alignment) {
DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
fb->offsets[color_plane], color_plane);
return -EINVAL;
@@ -2453,6 +2572,8 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
return I915_TILING_X;
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
default:
return I915_TILING_NONE;
@@ -2473,7 +2594,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
* us a ratio of one byte in the CCS for each 8x16 pixels in the
* main surface.
*/
-static const struct drm_format_info ccs_formats[] = {
+static const struct drm_format_info skl_ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
@@ -2484,6 +2605,52 @@ static const struct drm_format_info ccs_formats[] = {
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};
+/*
+ * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
+ * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
+ * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
+ * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
+ * the main surface.
+ */
+static const struct drm_format_info gen12_ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_YUYV, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVYU, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_UYVY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VYUY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV12, .num_planes = 4,
+ .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P010, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P012, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P016, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+};
+
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
@@ -2504,8 +2671,13 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
switch (cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
- return lookup_format_info(ccs_formats,
- ARRAY_SIZE(ccs_formats),
+ return lookup_format_info(skl_ccs_formats,
+ ARRAY_SIZE(skl_ccs_formats),
+ cmd->pixel_format);
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return lookup_format_info(gen12_ccs_formats,
+ ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
default:
return NULL;
@@ -2514,10 +2686,18 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
bool is_ccs_modifier(u64 modifier)
{
- return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
}
+static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
+{
+ return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
+ 512) * 64;
+}
+
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
@@ -2562,8 +2742,9 @@ static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ u32 tile_width;
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ if (is_surface_linear(fb, color_plane)) {
u32 max_stride = intel_plane_fb_max_stride(dev_priv,
fb->format->format,
fb->modifier);
@@ -2572,20 +2753,41 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* To make remapping with linear generally feasible
* we need the stride to be page aligned.
*/
- if (fb->pitches[color_plane] > max_stride)
+ if (fb->pitches[color_plane] > max_stride &&
+ !is_ccs_modifier(fb->modifier))
return intel_tile_size(dev_priv);
else
return 64;
- } else {
- return intel_tile_width_bytes(fb, color_plane);
}
+
+ tile_width = intel_tile_width_bytes(fb, color_plane);
+ if (is_ccs_modifier(fb->modifier)) {
+ /*
+ * Display WA #0531: skl,bxt,kbl,glk
+ *
+ * Render decompression and plane width > 3840
+ * combined with horizontal panning requires the
+ * plane stride to be a multiple of 4. We'll just
+ * require the entire fb to accommodate that to avoid
+ * potential runtime errors at plane configuration time.
+ */
+ if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
+ tile_width *= 4;
+ /*
+ * The main surface pitch must be padded to a multiple of four
+ * tile widths.
+ */
+ else if (INTEL_GEN(dev_priv) >= 12)
+ tile_width *= 4;
+ }
+ return tile_width;
}
bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int i;
/* We don't want to deal with remapping with cursors */
@@ -2623,16 +2825,16 @@ bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride, max_stride;
/*
* No remapping for invisible planes since we don't have
* an actual source viewport to remap.
*/
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return false;
if (!intel_plane_can_remap(plane_state))
@@ -2649,12 +2851,171 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
return stride > max_stride;
}
+static void
+intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+ const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ int main_plane;
+
+ if (color_plane == 0) {
+ *hsub = 1;
+ *vsub = 1;
+
+ return;
+ }
+
+ /*
+ * TODO: Deduct the subsampling from the char block for all CCS
+ * formats and planes.
+ */
+ if (!is_gen12_ccs_plane(fb, color_plane)) {
+ *hsub = fb->format->hsub;
+ *vsub = fb->format->vsub;
+
+ return;
+ }
+
+ main_plane = ccs_to_main_plane(fb, color_plane);
+ *hsub = drm_format_info_block_width(fb->format, color_plane) /
+ drm_format_info_block_width(fb->format, main_plane);
+
+ /*
+ * The min stride check in the core framebuffer_check() function
+ * assumes that format->hsub applies to every plane except for the
+ * first plane. That's incorrect for the CCS AUX plane of the first
+ * plane, but for the above check to pass we must define the block
+ * width with that subsampling applied to it. Adjust the width here
+ * accordingly, so we can calculate the actual subsampling factor.
+ */
+ if (main_plane == 0)
+ *hsub *= fb->format->hsub;
+
+ *vsub = 32;
+}
+static int
+intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ int main_plane;
+ int hsub, vsub;
+ int tile_width, tile_height;
+ int ccs_x, ccs_y;
+ int main_x, main_y;
+
+ if (!is_ccs_plane(fb, ccs_plane))
+ return 0;
+
+ intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ tile_width *= hsub;
+ tile_height *= vsub;
+
+ ccs_x = (x * hsub) % tile_width;
+ ccs_y = (y * vsub) % tile_height;
+
+ main_plane = ccs_to_main_plane(fb, ccs_plane);
+ main_x = intel_fb->normal[main_plane].x % tile_width;
+ main_y = intel_fb->normal[main_plane].y % tile_height;
+
+ /*
+ * CCS doesn't have its own x/y offset register, so the intra CCS tile
+ * x/y offsets must match between CCS and the main surface.
+ */
+ if (main_x != ccs_x || main_y != ccs_y) {
+ DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ main_x, main_y,
+ ccs_x, ccs_y,
+ intel_fb->normal[main_plane].x,
+ intel_fb->normal[main_plane].y,
+ x, y);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
+{
+ int main_plane = is_ccs_plane(fb, color_plane) ?
+ ccs_to_main_plane(fb, color_plane) : 0;
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
+ *w = fb->width / main_hsub / hsub;
+ *h = fb->height / main_vsub / vsub;
+}
+
+/*
+ * Setup the rotated view for an FB plane and return the size the GTT mapping
+ * requires for this view.
+ */
+static u32
+setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
+ u32 gtt_offset_rotated, int x, int y,
+ unsigned int width, unsigned int height,
+ unsigned int tile_size,
+ unsigned int tile_width, unsigned int tile_height,
+ struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+ unsigned int pitch_tiles;
+ struct drm_rect r;
+
+ /* Y or Yf modifiers required for 90/270 rotation */
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED)
+ return 0;
+
+ if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
+ return 0;
+
+ rot_info->plane[plane] = *plane_info;
+
+ intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
+
+ /* rotate the x/y offsets to match the GTT view */
+ drm_rect_init(&r, x, y, width, height);
+ drm_rect_rotate(&r,
+ plane_info->width * tile_width,
+ plane_info->height * tile_height,
+ DRM_MODE_ROTATE_270);
+ x = r.x1;
+ y = r.y1;
+
+ /* rotate the tile dimensions to match the GTT view */
+ pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
+ swap(tile_width, tile_height);
+
+ /*
+ * We only keep the x/y offsets, so push all of the
+ * gtt offset into the x/y offsets.
+ */
+ intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
+ gtt_offset_rotated * tile_size, 0);
+
+ /*
+ * First pixel of the framebuffer from
+ * the start of the rotated gtt mapping.
+ */
+ intel_fb->rotated[plane].x = x;
+ intel_fb->rotated[plane].y = y;
+
+ return plane_info->width * plane_info->height;
+}
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct intel_rotation_info *rot_info = &intel_fb->rot_info;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
@@ -2669,8 +3030,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
int ret;
cpp = fb->format->cpp[i];
- width = drm_framebuffer_plane_width(fb->width, fb, i);
- height = drm_framebuffer_plane_height(fb->height, fb, i);
+ intel_fb_plane_dims(&width, &height, fb, i);
ret = intel_fb_offset_to_xy(&x, &y, fb, i);
if (ret) {
@@ -2679,36 +3039,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
return ret;
}
- if (is_ccs_modifier(fb->modifier) && i == 1) {
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int tile_width, tile_height;
- int main_x, main_y;
- int ccs_x, ccs_y;
-
- intel_tile_dims(fb, i, &tile_width, &tile_height);
- tile_width *= hsub;
- tile_height *= vsub;
-
- ccs_x = (x * hsub) % tile_width;
- ccs_y = (y * vsub) % tile_height;
- main_x = intel_fb->normal[0].x % tile_width;
- main_y = intel_fb->normal[0].y % tile_height;
-
- /*
- * CCS doesn't have its own x/y offset register, so the intra CCS tile
- * x/y offsets must match between CCS and the main surface.
- */
- if (main_x != ccs_x || main_y != ccs_y) {
- DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
- main_x, main_y,
- ccs_x, ccs_y,
- intel_fb->normal[0].x,
- intel_fb->normal[0].y,
- x, y);
- return -EINVAL;
- }
- }
+ ret = intel_fb_check_ccs_xy(fb, i, x, y);
+ if (ret)
+ return ret;
/*
* The fence (if used) is aligned to the start of the object
@@ -2739,23 +3072,21 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
tile_size);
offset /= tile_size;
- if (!is_surface_linear(fb->modifier, i)) {
+ if (!is_surface_linear(fb, i)) {
+ struct intel_remapped_plane_info plane_info;
unsigned int tile_width, tile_height;
- unsigned int pitch_tiles;
- struct drm_rect r;
intel_tile_dims(fb, i, &tile_width, &tile_height);
- rot_info->plane[i].offset = offset;
- rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
- rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
- rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
-
- intel_fb->rotated[i].pitch =
- rot_info->plane[i].height * tile_height;
+ plane_info.offset = offset;
+ plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
+ tile_width * cpp);
+ plane_info.width = DIV_ROUND_UP(x + width, tile_width);
+ plane_info.height = DIV_ROUND_UP(y + height,
+ tile_height);
/* how many tiles does this plane need */
- size = rot_info->plane[i].stride * rot_info->plane[i].height;
+ size = plane_info.stride * plane_info.height;
/*
* If the plane isn't horizontally tile aligned,
* we need one more tile.
@@ -2763,36 +3094,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
if (x != 0)
size++;
- /* rotate the x/y offsets to match the GTT view */
- drm_rect_init(&r, x, y, width, height);
- drm_rect_rotate(&r,
- rot_info->plane[i].width * tile_width,
- rot_info->plane[i].height * tile_height,
- DRM_MODE_ROTATE_270);
- x = r.x1;
- y = r.y1;
-
- /* rotate the tile dimensions to match the GTT view */
- pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
- swap(tile_width, tile_height);
-
- /*
- * We only keep the x/y offsets, so push all of the
- * gtt offset into the x/y offsets.
- */
- intel_adjust_tile_offset(&x, &y,
- tile_width, tile_height,
- tile_size, pitch_tiles,
- gtt_offset_rotated * tile_size, 0);
-
- gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
-
- /*
- * First pixel of the framebuffer from
- * the start of the rotated gtt mapping.
- */
- intel_fb->rotated[i].x = x;
- intel_fb->rotated[i].y = y;
+ gtt_offset_rotated +=
+ setup_fb_rotation(i, &plane_info,
+ gtt_offset_rotated,
+ x, y, width, height,
+ tile_size,
+ tile_width, tile_height,
+ fb);
} else {
size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
x * cpp, tile_size);
@@ -2815,11 +3123,11 @@ static void
intel_plane_remap_gtt(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_rotation_info *info = &plane_state->view.rotated;
- unsigned int rotation = plane_state->base.rotation;
+ unsigned int rotation = plane_state->hw.rotation;
int i, num_planes = fb->format->num_planes;
unsigned int tile_size = intel_tile_size(dev_priv);
unsigned int src_x, src_y;
@@ -2830,20 +3138,20 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
plane_state->view.type = drm_rotation_90_or_270(rotation) ?
I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
- src_x = plane_state->base.src.x1 >> 16;
- src_y = plane_state->base.src.y1 >> 16;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
WARN_ON(is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
- drm_rect_translate(&plane_state->base.src,
+ drm_rect_translate(&plane_state->uapi.src,
-(src_x << 16), -(src_y << 16));
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->base.src,
+ drm_rect_rotate(&plane_state->uapi.src,
src_w << 16, src_h << 16,
DRM_MODE_ROTATE_270);
@@ -2876,6 +3184,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
DRM_MODE_ROTATE_0, tile_size);
offset /= tile_size;
+ WARN_ON(i >= ARRAY_SIZE(info->plane));
info->plane[i].offset = offset;
info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
tile_width * cpp);
@@ -2925,8 +3234,8 @@ static int
intel_plane_compute_gtt(struct intel_plane_state *plane_state)
{
const struct intel_framebuffer *fb =
- to_intel_framebuffer(plane_state->base.fb);
- unsigned int rotation = plane_state->base.rotation;
+ to_intel_framebuffer(plane_state->hw.fb);
+ unsigned int rotation = plane_state->hw.rotation;
int i, num_planes;
if (!fb)
@@ -2963,7 +3272,7 @@ intel_plane_compute_gtt(struct intel_plane_state *plane_state)
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->base.src,
+ drm_rect_rotate(&plane_state->uapi.src,
fb->base.width << 16, fb->base.height << 16,
DRM_MODE_ROTATE_270);
@@ -2975,6 +3284,8 @@ static int i9xx_format_to_fourcc(int format)
switch (format) {
case DISPPLANE_8BPP:
return DRM_FORMAT_C8;
+ case DISPPLANE_BGRA555:
+ return DRM_FORMAT_ARGB1555;
case DISPPLANE_BGRX555:
return DRM_FORMAT_XRGB1555;
case DISPPLANE_BGRX565:
@@ -2984,10 +3295,18 @@ static int i9xx_format_to_fourcc(int format)
return DRM_FORMAT_XRGB8888;
case DISPPLANE_RGBX888:
return DRM_FORMAT_XBGR8888;
+ case DISPPLANE_BGRA888:
+ return DRM_FORMAT_ARGB8888;
+ case DISPPLANE_RGBA888:
+ return DRM_FORMAT_ABGR8888;
case DISPPLANE_BGRX101010:
return DRM_FORMAT_XRGB2101010;
case DISPPLANE_RGBX101010:
return DRM_FORMAT_XBGR2101010;
+ case DISPPLANE_BGRA101010:
+ return DRM_FORMAT_ARGB2101010;
+ case DISPPLANE_RGBA101010:
+ return DRM_FORMAT_ABGR2101010;
case DISPPLANE_RGBX161616:
return DRM_FORMAT_XBGR16161616F;
}
@@ -3032,10 +3351,17 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_XRGB8888;
}
case PLANE_CTL_FORMAT_XRGB_2101010:
- if (rgb_order)
- return DRM_FORMAT_XBGR2101010;
- else
- return DRM_FORMAT_XRGB2101010;
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR2101010;
+ else
+ return DRM_FORMAT_XBGR2101010;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
case PLANE_CTL_FORMAT_XRGB_16161616F:
if (rgb_order) {
if (alpha)
@@ -3131,19 +3457,19 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
bool visible)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- plane_state->base.visible = visible;
+ plane_state->uapi.visible = visible;
if (visible)
- crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
+ crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
else
- crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
+ crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
static void fixup_active_planes(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
/*
@@ -3154,13 +3480,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
crtc_state->active_planes = 0;
drm_for_each_plane_mask(plane, &dev_priv->drm,
- crtc_state->base.plane_mask)
+ crtc_state->uapi.plane_mask)
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -3176,7 +3503,27 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc_state->min_cdclk[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
- intel_pre_disable_primary_noatomic(&crtc->base);
+ hsw_disable_ips(crtc_state);
+
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (HAS_GMCH(dev_priv) &&
+ intel_set_memory_cxsr(dev_priv, false))
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
+ */
+ if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
intel_disable_plane(plane, crtc_state);
}
@@ -3229,7 +3576,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (intel_plane_ggtt_offset(state) == plane_config->base) {
- fb = state->base.fb;
+ fb = state->hw.fb;
drm_framebuffer_get(fb);
goto valid_fb;
}
@@ -3247,11 +3594,11 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
- intel_state->base.rotation = plane_config->rotation;
+ intel_state->hw.rotation = plane_config->rotation;
intel_fill_fb_ggtt_view(&intel_state->view, fb,
- intel_state->base.rotation);
+ intel_state->hw.rotation);
intel_state->color_plane[0].stride =
- intel_fb_pitch(fb, 0, intel_state->base.rotation);
+ intel_fb_pitch(fb, 0, intel_state->hw.rotation);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb,
@@ -3279,14 +3626,15 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->base.src = drm_plane_state_src(plane_state);
- intel_state->base.dst = drm_plane_state_dest(plane_state);
+ intel_state->uapi.src = drm_plane_state_src(plane_state);
+ intel_state->uapi.dst = drm_plane_state_dest(plane_state);
if (plane_config->tiling)
dev_priv->preserve_bios_swizzle = true;
plane_state->fb = fb;
plane_state->crtc = &intel_crtc->base;
+ intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
&to_intel_frontbuffer(fb)->bits);
@@ -3317,6 +3665,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
return 5120;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
/* FIXME AUX plane? */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
@@ -3375,17 +3724,20 @@ static int icl_max_plane_height(void)
return 4320;
}
-static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
- int main_x, int main_y, u32 main_offset)
-{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int aux_x = plane_state->color_plane[1].x;
- int aux_y = plane_state->color_plane[1].y;
- u32 aux_offset = plane_state->color_plane[1].offset;
- u32 alignment = intel_surf_alignment(fb, 1);
-
+static bool
+skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+ int main_x, int main_y, u32 main_offset,
+ int ccs_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_x = plane_state->color_plane[ccs_plane].x;
+ int aux_y = plane_state->color_plane[ccs_plane].y;
+ u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, ccs_plane);
+ int hsub;
+ int vsub;
+
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
while (aux_offset >= main_offset && aux_y <= main_y) {
int x, y;
@@ -3397,8 +3749,12 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
x = aux_x / hsub;
y = aux_y / vsub;
- aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
- aux_offset, aux_offset - alignment);
+ aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane,
+ aux_offset,
+ aux_offset -
+ alignment);
aux_x = x * hsub + aux_x % hsub;
aux_y = y * vsub + aux_y % vsub;
}
@@ -3406,25 +3762,28 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
if (aux_x != main_x || aux_y != main_y)
return false;
- plane_state->color_plane[1].offset = aux_offset;
- plane_state->color_plane[1].x = aux_x;
- plane_state->color_plane[1].y = aux_y;
+ plane_state->color_plane[ccs_plane].offset = aux_offset;
+ plane_state->color_plane[ccs_plane].x = aux_x;
+ plane_state->color_plane[ccs_plane].y = aux_y;
return true;
}
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- int w = drm_rect_width(&plane_state->base.src) >> 16;
- int h = drm_rect_height(&plane_state->base.src) >> 16;
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 16;
int max_width;
int max_height;
- u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
+ u32 alignment;
+ u32 offset;
+ int aux_plane = intel_main_to_aux_plane(fb, 0);
+ u32 aux_offset = plane_state->color_plane[aux_plane].offset;
if (INTEL_GEN(dev_priv) >= 11)
max_width = icl_max_plane_width(fb, 0, rotation);
@@ -3447,6 +3806,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
+ if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ return -EINVAL;
/*
* AUX surface offset is specified as the distance from the
@@ -3482,7 +3843,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* they match with the main surface x/y offsets.
*/
if (is_ccs_modifier(fb->modifier)) {
- while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, aux_plane)) {
if (offset == 0)
break;
@@ -3490,7 +3852,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
offset, offset - alignment);
}
- if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
+ if (x != plane_state->color_plane[aux_plane].x ||
+ y != plane_state->color_plane[aux_plane].y) {
DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
@@ -3504,7 +3867,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate_to(&plane_state->base.src,
+ drm_rect_translate_to(&plane_state->uapi.src,
x << 16, y << 16);
return 0;
@@ -3512,18 +3875,20 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int max_width = skl_max_plane_width(fb, 1, rotation);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int uv_plane = 1;
+ int max_width = skl_max_plane_width(fb, uv_plane, rotation);
int max_height = 4096;
- int x = plane_state->base.src.x1 >> 17;
- int y = plane_state->base.src.y1 >> 17;
- int w = drm_rect_width(&plane_state->base.src) >> 17;
- int h = drm_rect_height(&plane_state->base.src) >> 17;
+ int x = plane_state->uapi.src.x1 >> 17;
+ int y = plane_state->uapi.src.y1 >> 17;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 17;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 17;
u32 offset;
- intel_add_fb_offsets(&x, &y, plane_state, 1);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+ intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state, uv_plane);
/* FIXME not quite sure how/if these apply to the chroma plane */
if (w > max_width || h > max_height) {
@@ -3532,62 +3897,126 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
- plane_state->color_plane[1].offset = offset;
- plane_state->color_plane[1].x = x;
- plane_state->color_plane[1].y = y;
+ if (is_ccs_modifier(fb->modifier)) {
+ int ccs_plane = main_to_ccs_plane(fb, uv_plane);
+ int aux_offset = plane_state->color_plane[ccs_plane].offset;
+ int alignment = intel_surf_alignment(fb, uv_plane);
+
+ if (offset > aux_offset)
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset,
+ aux_offset & ~(alignment - 1));
+
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, ccs_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->color_plane[ccs_plane].x ||
+ y != plane_state->color_plane[ccs_plane].y) {
+ DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ plane_state->color_plane[uv_plane].offset = offset;
+ plane_state->color_plane[uv_plane].x = x;
+ plane_state->color_plane[uv_plane].y = y;
return 0;
}
static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- int src_x = plane_state->base.src.x1 >> 16;
- int src_y = plane_state->base.src.y1 >> 16;
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int x = src_x / hsub;
- int y = src_y / vsub;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x = plane_state->uapi.src.x1 >> 16;
+ int src_y = plane_state->uapi.src.y1 >> 16;
u32 offset;
+ int ccs_plane;
+
+ for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+ int x, y;
+
+ if (!is_ccs_plane(fb, ccs_plane))
+ continue;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
+ ccs_to_main_plane(fb, ccs_plane));
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ hsub *= main_hsub;
+ vsub *= main_vsub;
+ x = src_x / hsub;
+ y = src_y / vsub;
- intel_add_fb_offsets(&x, &y, plane_state, 1);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+ intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
- plane_state->color_plane[1].offset = offset;
- plane_state->color_plane[1].x = x * hsub + src_x % hsub;
- plane_state->color_plane[1].y = y * vsub + src_y % vsub;
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane);
+
+ plane_state->color_plane[ccs_plane].offset = offset;
+ plane_state->color_plane[ccs_plane].x = (x * hsub +
+ src_x % hsub) /
+ main_hsub;
+ plane_state->color_plane[ccs_plane].y = (y * vsub +
+ src_y % vsub) /
+ main_vsub;
+ }
return 0;
}
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
+ bool needs_aux = false;
ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
/*
- * Handle the AUX surface first since
- * the main surface setup depends on it.
+ * Handle the AUX surface first since the main surface setup depends on
+ * it.
*/
- if (drm_format_info_is_yuv_semiplanar(fb->format)) {
- ret = skl_check_nv12_aux_surface(plane_state);
+ if (is_ccs_modifier(fb->modifier)) {
+ needs_aux = true;
+ ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
- } else if (is_ccs_modifier(fb->modifier)) {
- ret = skl_check_ccs_aux_surface(plane_state);
+ }
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format,
+ fb->modifier)) {
+ needs_aux = true;
+ ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
- } else {
- plane_state->color_plane[1].offset = ~0xfff;
- plane_state->color_plane[1].x = 0;
- plane_state->color_plane[1].y = 0;
+ }
+
+ if (!needs_aux) {
+ int i;
+
+ for (i = 1; i < fb->format->num_planes; i++) {
+ plane_state->color_plane[i].offset = ~0xfff;
+ plane_state->color_plane[i].x = 0;
+ plane_state->color_plane[i].y = 0;
+ }
}
ret = skl_check_main_surface(plane_state);
@@ -3601,7 +4030,7 @@ static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
unsigned int *num, unsigned int *den)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
/*
@@ -3673,7 +4102,7 @@ i9xx_plane_max_stride(struct intel_plane *plane,
static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
@@ -3693,9 +4122,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 dspcntr;
dspcntr = DISPLAY_PLANE_ENABLE;
@@ -3711,6 +4140,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB1555:
dspcntr |= DISPPLANE_BGRX555;
break;
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRA555;
+ break;
case DRM_FORMAT_RGB565:
dspcntr |= DISPPLANE_BGRX565;
break;
@@ -3720,12 +4152,24 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XBGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRA888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBA888;
+ break;
case DRM_FORMAT_XRGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRA101010;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBA101010;
+ break;
case DRM_FORMAT_XBGR16161616F:
dspcntr |= DISPPLANE_RGBX161616;
break;
@@ -3750,8 +4194,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int src_x, src_y, src_w;
u32 offset;
int ret;
@@ -3760,12 +4204,12 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_x = plane_state->base.src.x1 >> 16;
- src_y = plane_state->base.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
/* Undocumented hardware limit on i965/g4x/vlv/chv */
if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
@@ -3783,14 +4227,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate_to(&plane_state->base.src,
+ drm_rect_translate_to(&plane_state->uapi.src,
src_x << 16, src_y << 16);
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- unsigned int rotation = plane_state->base.rotation;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
- int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
if (rotation & DRM_MODE_ROTATE_180) {
src_x += src_w - 1;
@@ -3827,15 +4271,15 @@ static int
i9xx_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
int ret;
ret = chv_plane_check_rotation(plane_state);
if (ret)
return ret;
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
i9xx_plane_has_windowing(plane),
@@ -3847,7 +4291,7 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -3868,10 +4312,10 @@ static void i9xx_update_plane(struct intel_plane *plane,
u32 linear_offset;
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- int crtc_w = drm_rect_width(&plane_state->base.dst);
- int crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
unsigned long irqflags;
u32 dspaddr_offset;
u32 dspcntr;
@@ -4011,7 +4455,7 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
*/
static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
int i;
@@ -4030,7 +4474,7 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ if (is_surface_linear(fb, color_plane))
return 64;
else if (drm_rotation_90_or_270(rotation))
return intel_tile_height(fb, color_plane);
@@ -4041,8 +4485,8 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int color_plane)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride = plane_state->color_plane[color_plane].stride;
if (color_plane >= fb->format->num_planes)
@@ -4065,8 +4509,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_ARGB8888:
return PLANE_CTL_FORMAT_XRGB_8888;
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
@@ -4111,10 +4557,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
{
- if (!plane_state->base.fb->format->has_alpha)
+ if (!plane_state->hw.fb->format->has_alpha)
return PLANE_CTL_ALPHA_DISABLE;
- switch (plane_state->base.pixel_blend_mode) {
+ switch (plane_state->hw.pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
return PLANE_CTL_ALPHA_DISABLE;
case DRM_MODE_BLEND_PREMULTI:
@@ -4122,17 +4568,17 @@ static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
case DRM_MODE_BLEND_COVERAGE:
return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
default:
- MISSING_CASE(plane_state->base.pixel_blend_mode);
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
return PLANE_CTL_ALPHA_DISABLE;
}
}
static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
{
- if (!plane_state->base.fb->format->has_alpha)
+ if (!plane_state->hw.fb->format->has_alpha)
return PLANE_COLOR_ALPHA_DISABLE;
- switch (plane_state->base.pixel_blend_mode) {
+ switch (plane_state->hw.pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
return PLANE_COLOR_ALPHA_DISABLE;
case DRM_MODE_BLEND_PREMULTI:
@@ -4140,7 +4586,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state
case DRM_MODE_BLEND_COVERAGE:
return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
default:
- MISSING_CASE(plane_state->base.pixel_blend_mode);
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
return PLANE_COLOR_ALPHA_DISABLE;
}
}
@@ -4156,6 +4602,12 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return PLANE_CTL_TILED_Y |
+ PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -4206,7 +4658,7 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_ctl = 0;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -4225,9 +4677,9 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 plane_ctl;
@@ -4237,10 +4689,10 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl |= skl_plane_ctl_alpha(plane_state);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
}
@@ -4262,7 +4714,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_color_ctl = 0;
if (INTEL_GEN(dev_priv) >= 11)
@@ -4281,21 +4733,21 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
u32 plane_color_ctl = 0;
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
@@ -4483,7 +4935,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 trans_ddi_func_ctl2_val;
u8 master_select;
@@ -4511,20 +4963,6 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state
trans_ddi_func_ctl2_val);
}
-static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
- return;
-
- DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
- transcoder_name(old_crtc_state->cpu_transcoder));
-
- I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
-}
-
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4567,8 +5005,8 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
}
/* The FDI link training functions for ILK/Ibexpeak. */
-static void ironlake_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4577,7 +5015,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc,
u32 temp, tries;
/* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, pipe);
+ assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -4920,9 +5358,9 @@ train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -4957,7 +5395,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
}
}
-static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4987,12 +5425,10 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
udelay(100);
}
-static void ironlake_fdi_disable(struct drm_crtc *crtc)
+static void ilk_fdi_disable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -5083,9 +5519,9 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc_state->base.adjusted_mode.crtc_clock;
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -5196,10 +5632,10 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
desired_divisor << auxdiv);
}
-static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
- enum pipe pch_transcoder)
+static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
+ enum pipe pch_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -5240,9 +5676,9 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e
POSTING_READ(SOUTH_CHICKEN1);
}
-static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
+static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
switch (crtc->pipe) {
@@ -5272,7 +5708,7 @@ static struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_connector_state *connector_state;
const struct drm_connector *connector;
struct intel_encoder *encoder = NULL;
@@ -5301,10 +5737,10 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
* - DP transcoding bits
* - transcoder
*/
-static void ironlake_pch_enable(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
+static void ilk_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
@@ -5313,7 +5749,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev_priv))
- ivybridge_update_fdi_bc_bifurcation(crtc_state);
+ ivb_update_fdi_bc_bifurcation(crtc_state);
/* Write the TU size bits before fdi link training, so that error
* detection works. */
@@ -5350,7 +5786,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
- ironlake_pch_transcoder_set_timings(crtc_state, pipe);
+ ilk_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -5358,7 +5794,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
if (HAS_PCH_CPT(dev_priv) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
@@ -5382,13 +5818,13 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
I915_WRITE(reg, temp);
}
- ironlake_enable_pch_transcoder(crtc_state);
+ ilk_enable_pch_transcoder(crtc_state);
}
static void lpt_pch_enable(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -5397,14 +5833,14 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
lpt_program_iclkip(crtc_state);
/* Set transcoder timing. */
- ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
+ ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
+static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
@@ -5500,15 +5936,16 @@ static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h,
- const struct drm_format_info *format, bool need_scaler)
+ const struct drm_format_info *format,
+ u64 modifier, bool need_scaler)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct intel_crtc *intel_crtc =
- to_intel_crtc(crtc_state->base.crtc);
+ to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
/*
* Src coordinates are already rotated by 270 degrees for
@@ -5524,7 +5961,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* Once NV12 is enabled, handle it here while allocating scaler
* for NV12.
*/
- if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
@@ -5554,7 +5991,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && drm_format_info_is_yuv_semiplanar(format) &&
+ if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
@@ -5596,17 +6033,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
- const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
bool need_scaler = false;
if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
need_scaler = true;
- return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+ return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_vdisplay, NULL, need_scaler);
+ adjusted_mode->crtc_vdisplay, NULL, 0,
+ need_scaler);
}
/**
@@ -5622,26 +6060,28 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
struct intel_plane *intel_plane =
- to_intel_plane(plane_state->base.plane);
+ to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
- bool force_detach = !fb || !plane_state->base.visible;
+ bool force_detach = !fb || !plane_state->uapi.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
- fb && drm_format_info_is_yuv_semiplanar(fb->format))
+ fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
- drm_rect_width(&plane_state->base.src) >> 16,
- drm_rect_height(&plane_state->base.src) >> 16,
- drm_rect_width(&plane_state->base.dst),
- drm_rect_height(&plane_state->base.dst),
- fb ? fb->format : NULL, need_scaler);
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ drm_rect_height(&plane_state->uapi.src) >> 16,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst),
+ fb ? fb->format : NULL,
+ fb ? fb->modifier : 0,
+ need_scaler);
if (ret || plane_state->scaler_id < 0)
return ret;
@@ -5663,6 +6103,8 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -5695,17 +6137,18 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
return 0;
}
-static void skylake_scaler_disable(struct intel_crtc *crtc)
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
int i;
for (i = 0; i < crtc->num_scalers; i++)
skl_detach_scaler(crtc, i);
}
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
+static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const struct intel_crtc_scaler_state *scaler_state =
@@ -5740,9 +6183,9 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
}
}
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -5763,7 +6206,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5799,7 +6242,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5834,77 +6277,10 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
*/
}
-/**
- * intel_post_enable_primary - Perform operations after enabling primary plane
- * @crtc: the CRTC whose primary plane was just enabled
- * @new_crtc_state: the enabling state
- *
- * Performs potentially sleeping operations that must be done after the primary
- * plane is enabled, such as updating FBC and IPS. Note that this may be
- * called due to an explicit primary plane update, or due to an implicit
- * re-enable that is caused when a sprite plane is updated to no longer
- * completely hide the primary plane.
- */
-static void
-intel_post_enable_primary(struct drm_crtc *crtc,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So don't enable underrun reporting before at least some planes
- * are enabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
- */
- if (IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
- /* Underruns don't always raise interrupts, so check manually. */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-}
-
-/* FIXME get rid of this and use pre_plane_update */
-static void
-intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So disable underrun reporting before all the planes get disabled.
- */
- if (IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
- hsw_disable_ips(to_intel_crtc_state(crtc->state));
-
- /*
- * Vblank time updates from the shadow to live plane control register
- * are blocked if the memory self-refresh mode is active at that
- * moment. So to make sure the plane gets truly disabled, disable
- * first the self-refresh mode. The self-refresh enable bit in turn
- * will be checked/applied by the HW only at the next frame start
- * event which is after the vblank start event, so we need to have a
- * wait-for-vblank between disabling the plane and the pipe.
- */
- if (HAS_GMCH(dev_priv) &&
- intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, pipe);
-}
-
static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!old_crtc_state->ips_enabled)
@@ -5920,7 +6296,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
* Disable IPS before we program the LUT.
*/
if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -5931,7 +6307,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!new_crtc_state->ips_enabled)
@@ -5947,7 +6323,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* Re-enable IPS after the LUT has been programmed.
*/
if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -5957,15 +6333,16 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* forcibly enable IPS on the first fastset.
*/
if (new_crtc_state->update_pipe &&
- old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
return true;
return !old_crtc_state->ips_enabled;
}
-static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state)
+static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
if (!crtc_state->nv12_planes)
return false;
@@ -5976,9 +6353,10 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
return false;
}
-static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state)
+static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
/* Wa_2006604312:icl */
if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
return true;
@@ -5986,89 +6364,82 @@ static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
return false;
}
-static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
+static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = old_crtc_state->base.state;
- struct intel_crtc_state *pipe_config =
- intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
- crtc);
- struct drm_plane *primary = crtc->base.primary;
- struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(state, primary);
+ return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+ new_crtc_state->active_planes;
+}
- intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
+static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->active_planes &&
+ (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+}
- if (pipe_config->update_wm_post && pipe_config->base.active)
- intel_update_watermarks(crtc);
+static void intel_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *new_primary_state =
+ intel_atomic_get_new_plane_state(state, primary);
+ enum pipe pipe = crtc->pipe;
- if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
- hsw_enable_ips(pipe_config);
+ intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
- if (old_primary_state) {
- struct drm_plane_state *new_primary_state =
- drm_atomic_get_new_plane_state(state, primary);
+ if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
+ intel_update_watermarks(crtc);
- intel_fbc_post_update(crtc);
+ if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
+ hsw_enable_ips(new_crtc_state);
- if (new_primary_state->visible &&
- (needs_modeset(pipe_config) ||
- !old_primary_state->visible))
- intel_post_enable_primary(&crtc->base, pipe_config);
- }
+ if (new_primary_state)
+ intel_fbc_post_update(crtc);
- if (needs_nv12_wa(dev_priv, old_crtc_state) &&
- !needs_nv12_wa(dev_priv, pipe_config))
- skl_wa_827(dev_priv, crtc->pipe, false);
+ if (needs_nv12_wa(old_crtc_state) &&
+ !needs_nv12_wa(new_crtc_state))
+ skl_wa_827(dev_priv, pipe, false);
- if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
- !needs_scalerclk_wa(dev_priv, pipe_config))
- icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
+ if (needs_scalerclk_wa(old_crtc_state) &&
+ !needs_scalerclk_wa(new_crtc_state))
+ icl_wa_scalerclkgating(dev_priv, pipe, false);
}
-static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *pipe_config)
+static void intel_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = old_crtc_state->base.state;
- struct drm_plane *primary = crtc->base.primary;
- struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(state, primary);
- bool modeset = needs_modeset(pipe_config);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *new_primary_state =
+ intel_atomic_get_new_plane_state(state, primary);
+ enum pipe pipe = crtc->pipe;
- if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
+ if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
hsw_disable_ips(old_crtc_state);
- if (old_primary_state) {
- struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(intel_state,
- to_intel_plane(primary));
-
- intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So disable underrun reporting before all the planes get disabled.
- */
- if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
- (modeset || !new_primary_state->base.visible))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- }
+ if (new_primary_state &&
+ intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
+ intel_wait_for_vblank(dev_priv, pipe);
/* Display WA 827 */
- if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
- needs_nv12_wa(dev_priv, pipe_config))
- skl_wa_827(dev_priv, crtc->pipe, true);
+ if (!needs_nv12_wa(old_crtc_state) &&
+ needs_nv12_wa(new_crtc_state))
+ skl_wa_827(dev_priv, pipe, true);
/* Wa_2006604312:icl */
- if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
- needs_scalerclk_wa(dev_priv, pipe_config))
- icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
+ if (!needs_scalerclk_wa(old_crtc_state) &&
+ needs_scalerclk_wa(new_crtc_state))
+ icl_wa_scalerclkgating(dev_priv, pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
@@ -6079,9 +6450,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
- pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ intel_wait_for_vblank(dev_priv, pipe);
/*
* IVB workaround: must disable low power watermarks for at least
@@ -6090,36 +6461,45 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
- old_crtc_state->base.active)
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (old_crtc_state->hw.active &&
+ new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
+ intel_wait_for_vblank(dev_priv, pipe);
/*
- * If we're doing a modeset, we're done. No need to do any pre-vblank
- * watermark programming here.
+ * If we're doing a modeset we don't need to do any
+ * pre-vblank watermark programming here.
*/
- if (needs_modeset(pipe_config))
- return;
+ if (!needs_modeset(new_crtc_state)) {
+ /*
+ * For platforms that support atomic watermarks, program the
+ * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
+ * will be the intermediate values that are safe for both pre- and
+ * post- vblank; when vblank happens, the 'active' values will be set
+ * to the final 'target' values and we'll do this again to get the
+ * optimal watermarks. For gen9+ platforms, the values we program here
+ * will be the final target values which will get automatically latched
+ * at vblank time; no further programming will be necessary.
+ *
+ * If a platform hasn't been transitioned to atomic watermarks yet,
+ * we'll continue to update watermarks the old way, if flags tell
+ * us to.
+ */
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
+ else if (new_crtc_state->update_wm_pre)
+ intel_update_watermarks(crtc);
+ }
/*
- * For platforms that support atomic watermarks, program the
- * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
- * will be the intermediate values that are safe for both pre- and
- * post- vblank; when vblank happens, the 'active' values will be set
- * to the final 'target' values and we'll do this again to get the
- * optimal watermarks. For gen9+ platforms, the values we program here
- * will be the final target values which will get automatically latched
- * at vblank time; no further programming will be necessary.
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
*
- * If a platform hasn't been transitioned to atomic watermarks yet,
- * we'll continue to update watermarks the old way, if flags tell
- * us to.
+ * We do this after .initial_watermarks() so that we have a
+ * chance of catching underruns with the intermediate watermarks
+ * vs. the old plane configuration.
*/
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(intel_state,
- pipe_config);
- else if (pipe_config->update_wm_pre)
- intel_update_watermarks(crtc);
+ if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
}
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -6143,7 +6523,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
intel_disable_plane(plane, new_crtc_state);
- if (old_plane_state->base.visible)
+ if (old_plane_state->uapi.visible)
fb_bits |= plane->frontbuffer_bit;
}
@@ -6167,45 +6547,29 @@ intel_connector_primary_encoder(struct intel_connector *connector)
if (connector->mst_port)
return &dp_to_dig_port(connector->mst_port)->base;
- encoder = intel_attached_encoder(&connector->base);
+ encoder = intel_attached_encoder(connector);
WARN_ON(!encoder);
return encoder;
}
-static bool
-intel_connector_needs_modeset(struct intel_atomic_state *state,
- const struct drm_connector_state *old_conn_state,
- const struct drm_connector_state *new_conn_state)
-{
- struct intel_crtc *old_crtc = old_conn_state->crtc ?
- to_intel_crtc(old_conn_state->crtc) : NULL;
- struct intel_crtc *new_crtc = new_conn_state->crtc ?
- to_intel_crtc(new_conn_state->crtc) : NULL;
-
- return new_crtc != old_crtc ||
- (new_crtc &&
- needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
-}
-
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
- struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
- struct drm_connector *conn;
+ struct drm_connector *connector;
int i;
- for_each_oldnew_connector_in_state(&state->base, conn,
- old_conn_state, new_conn_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+ i) {
+ struct intel_connector *intel_connector;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
- if (!intel_connector_needs_modeset(state,
- old_conn_state,
- new_conn_state))
+ if (!intel_connector_needs_modeset(state, connector))
continue;
- encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+ intel_connector = to_intel_connector(connector);
+ encoder = intel_connector_primary_encoder(intel_connector);
if (!encoder->update_prepare)
continue;
@@ -6217,22 +6581,21 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
static void intel_encoders_update_complete(struct intel_atomic_state *state)
{
- struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
- struct drm_connector *conn;
+ struct drm_connector *connector;
int i;
- for_each_oldnew_connector_in_state(&state->base, conn,
- old_conn_state, new_conn_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+ i) {
+ struct intel_connector *intel_connector;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
- if (!intel_connector_needs_modeset(state,
- old_conn_state,
- new_conn_state))
+ if (!intel_connector_needs_modeset(state, connector))
continue;
- encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+ intel_connector = to_intel_connector(connector);
+ encoder = intel_connector_primary_encoder(intel_connector);
if (!encoder->update_complete)
continue;
@@ -6242,11 +6605,12 @@ static void intel_encoders_update_complete(struct intel_atomic_state *state)
}
}
-static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
@@ -6262,11 +6626,12 @@ static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_pre_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_pre_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
@@ -6282,11 +6647,12 @@ static void intel_encoders_pre_enable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
@@ -6303,11 +6669,12 @@ static void intel_encoders_enable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_disable(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
@@ -6324,11 +6691,12 @@ static void intel_encoders_disable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_post_disable(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_post_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
@@ -6344,11 +6712,12 @@ static void intel_encoders_post_disable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
@@ -6364,11 +6733,12 @@ static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_update_pipe(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_update_pipe(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
@@ -6386,22 +6756,21 @@ static void intel_encoders_update_pipe(struct intel_crtc *crtc,
static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
plane->disable_plane(plane, crtc_state);
}
-static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state)
+static void ilk_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
/*
@@ -6417,61 +6786,59 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (pipe_config->has_pch_encoder)
- intel_prepare_shared_dpll(pipe_config);
+ if (new_crtc_state->has_pch_encoder)
+ intel_prepare_shared_dpll(new_crtc_state);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
- if (pipe_config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(pipe_config,
- &pipe_config->fdi_m_n, NULL);
- }
+ if (new_crtc_state->has_pch_encoder)
+ intel_cpu_transcoder_set_m_n(new_crtc_state,
+ &new_crtc_state->fdi_m_n, NULL);
- ironlake_set_pipeconf(pipe_config);
+ ilk_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
- intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_enable(state, crtc);
- if (pipe_config->has_pch_encoder) {
+ if (new_crtc_state->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
- ironlake_fdi_pll_enable(pipe_config);
+ ilk_fdi_pll_enable(new_crtc_state);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
}
- ironlake_pfit_enable(pipe_config);
+ ilk_pfit_enable(new_crtc_state);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(state, pipe_config);
- intel_enable_pipe(pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
+ intel_enable_pipe(new_crtc_state);
- if (pipe_config->has_pch_encoder)
- ironlake_pch_enable(state, pipe_config);
+ if (new_crtc_state->has_pch_encoder)
+ ilk_pch_enable(state, new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(intel_crtc, pipe_config, state);
+ intel_encoders_enable(state, crtc);
if (HAS_PCH_CPT(dev_priv))
- cpt_verify_modeset(dev, intel_crtc->pipe);
+ cpt_verify_modeset(dev_priv, pipe);
/*
* Must wait for vblank to avoid spurious PCH FIFO underruns.
@@ -6479,7 +6846,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
* some interlaced HDMI modes. Let's do the double wait always
* in case there are more corner cases we don't know about.
*/
- if (pipe_config->has_pch_encoder) {
+ if (new_crtc_state->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -6526,103 +6893,112 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
}
-static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state)
+static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~HSW_FRAME_START_DELAY_MASK;
+ val |= HSW_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+}
+
+static void hsw_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
bool psl_clkgate_wa;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_pll_enable(state, crtc);
- if (pipe_config->shared_dpll)
- intel_enable_shared_dpll(pipe_config);
+ if (new_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(new_crtc_state);
- intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_enable(state, crtc);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
if (INTEL_GEN(dev_priv) >= 11)
- icl_enable_trans_port_sync(pipe_config);
+ icl_enable_trans_port_sync(new_crtc_state);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_src_size(new_crtc_state);
if (cpu_transcoder != TRANSCODER_EDP &&
- !transcoder_is_dsi(cpu_transcoder)) {
+ !transcoder_is_dsi(cpu_transcoder))
I915_WRITE(PIPE_MULT(cpu_transcoder),
- pipe_config->pixel_multiplier - 1);
- }
+ new_crtc_state->pixel_multiplier - 1);
- if (pipe_config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(pipe_config,
- &pipe_config->fdi_m_n, NULL);
- }
+ if (new_crtc_state->has_pch_encoder)
+ intel_cpu_transcoder_set_m_n(new_crtc_state,
+ &new_crtc_state->fdi_m_n, NULL);
- if (!transcoder_is_dsi(cpu_transcoder))
- haswell_set_pipeconf(pipe_config);
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ hsw_set_frame_start_delay(new_crtc_state);
+ hsw_set_pipeconf(new_crtc_state);
+ }
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(pipe_config);
+ bdw_set_pipemisc(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- pipe_config->pch_pfit.enabled;
+ new_crtc_state->pch_pfit.enabled;
if (psl_clkgate_wa)
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_pfit_enable(pipe_config);
+ skl_pfit_enable(new_crtc_state);
else
- ironlake_pfit_enable(pipe_config);
+ ilk_pfit_enable(new_crtc_state);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma/csc for pipe bottom color */
if (INTEL_GEN(dev_priv) < 9)
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
if (INTEL_GEN(dev_priv) >= 11)
- icl_set_pipe_chicken(intel_crtc);
+ icl_set_pipe_chicken(crtc);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_transcoder_func(pipe_config);
+ intel_ddi_enable_transcoder_func(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(state, pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
if (INTEL_GEN(dev_priv) >= 11)
- icl_pipe_mbus_enable(intel_crtc);
+ icl_pipe_mbus_enable(crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
- intel_enable_pipe(pipe_config);
+ intel_enable_pipe(new_crtc_state);
- if (pipe_config->has_pch_encoder)
- lpt_pch_enable(state, pipe_config);
-
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(pipe_config, true);
+ if (new_crtc_state->has_pch_encoder)
+ lpt_pch_enable(state, new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(intel_crtc, pipe_config, state);
+ intel_encoders_enable(state, crtc);
if (psl_clkgate_wa) {
intel_wait_for_vblank(dev_priv, pipe);
@@ -6631,16 +7007,16 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
- hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+ hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
}
}
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -6653,14 +7029,13 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
}
}
-static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void ilk_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
* Sometimes spurious CPU pipe underruns happen when the
@@ -6670,22 +7045,21 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_encoders_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_disable(state, crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
+ intel_crtc_vblank_off(old_crtc_state);
intel_disable_pipe(old_crtc_state);
- ironlake_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_crtc_state);
if (old_crtc_state->has_pch_encoder)
- ironlake_fdi_disable(crtc);
+ ilk_fdi_disable(crtc);
- intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_post_disable(state, crtc);
if (old_crtc_state->has_pch_encoder) {
- ironlake_disable_pch_transcoder(dev_priv, pipe);
+ ilk_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
i915_reg_t reg;
@@ -6705,54 +7079,27 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
I915_WRITE(PCH_DPLL_SEL, temp);
}
- ironlake_fdi_pll_disable(intel_crtc);
+ ilk_fdi_pll_disable(crtc);
}
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void hsw_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
-
- intel_encoders_disable(intel_crtc, old_crtc_state, state);
-
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
- /* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_disable_pipe(old_crtc_state);
-
- if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
-
- if (INTEL_GEN(dev_priv) >= 11)
- icl_disable_transcoder_port_sync(old_crtc_state);
-
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_transcoder_func(old_crtc_state);
-
- intel_dsc_disable(old_crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 9)
- skylake_scaler_disable(intel_crtc);
- else
- ironlake_pfit_disable(old_crtc_state);
-
- intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
-
- intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
+ /*
+ * FIXME collapse everything to one hook.
+ * Need care with mst->ddi interactions.
+ */
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!crtc_state->gmch_pfit.control)
@@ -6763,7 +7110,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
* according to register description and PRM.
*/
WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
@@ -6888,14 +7235,14 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
u64 mask;
enum transcoder transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
@@ -6905,7 +7252,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
drm_for_each_encoder_mask(encoder, &dev_priv->drm,
- crtc_state->base.encoder_mask) {
+ crtc_state->uapi.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
mask |= BIT_ULL(intel_encoder->power_domain);
@@ -6923,7 +7270,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
static u64
modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
@@ -6949,146 +7296,140 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
intel_display_power_put_unchecked(dev_priv, domain);
}
-static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state)
+static void valleyview_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
}
- i9xx_set_pipeconf(pipe_config);
+ i9xx_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_pll_enable(state, crtc);
if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(intel_crtc, pipe_config);
- chv_enable_pll(intel_crtc, pipe_config);
+ chv_prepare_pll(crtc, new_crtc_state);
+ chv_enable_pll(crtc, new_crtc_state);
} else {
- vlv_prepare_pll(intel_crtc, pipe_config);
- vlv_enable_pll(intel_crtc, pipe_config);
+ vlv_prepare_pll(crtc, new_crtc_state);
+ vlv_enable_pll(crtc, new_crtc_state);
}
- intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_enable(state, crtc);
- i9xx_pfit_enable(pipe_config);
+ i9xx_pfit_enable(new_crtc_state);
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- dev_priv->display.initial_watermarks(state, pipe_config);
- intel_enable_pipe(pipe_config);
+ dev_priv->display.initial_watermarks(state, crtc);
+ intel_enable_pipe(new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(intel_crtc, pipe_config, state);
+ intel_encoders_enable(state, crtc);
}
static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
}
-static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state)
+static void i9xx_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- i9xx_set_pll_dividers(pipe_config);
+ i9xx_set_pll_dividers(new_crtc_state);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
- i9xx_set_pipeconf(pipe_config);
+ i9xx_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_enable(state, crtc);
- i9xx_enable_pll(intel_crtc, pipe_config);
+ i9xx_enable_pll(crtc, new_crtc_state);
- i9xx_pfit_enable(pipe_config);
+ i9xx_pfit_enable(new_crtc_state);
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(state,
- pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
else
- intel_update_watermarks(intel_crtc);
- intel_enable_pipe(pipe_config);
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(intel_crtc, pipe_config, state);
+ intel_encoders_enable(state, crtc);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!old_crtc_state->gmch_pfit.control)
return;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
I915_READ(PFIT_CONTROL));
I915_WRITE(PFIT_CONTROL, 0);
}
-static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void i9xx_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
@@ -7097,16 +7438,15 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (IS_GEN(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
- intel_encoders_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_disable(state, crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
+ intel_crtc_vblank_off(old_crtc_state);
intel_disable_pipe(old_crtc_state);
i9xx_pfit_disable(old_crtc_state);
- intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_post_disable(state, crtc);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev_priv))
@@ -7117,92 +7457,97 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
i9xx_disable_pll(old_crtc_state);
}
- intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_post_pll_disable(state, crtc);
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (!dev_priv->display.initial_watermarks)
- intel_update_watermarks(intel_crtc);
+ intel_update_watermarks(crtc);
/* clock the pipe down to 640x480@60 to potentially save power */
if (IS_I830(dev_priv))
i830_enable_pipe(dev_priv, pipe);
}
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(dev_priv->bw_obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
struct intel_plane *plane;
- u64 domains;
struct drm_atomic_state *state;
- struct intel_crtc_state *crtc_state;
+ struct intel_crtc_state *temp_crtc_state;
+ enum pipe pipe = crtc->pipe;
+ u64 domains;
int ret;
- if (!intel_crtc->active)
+ if (!crtc_state->hw.active)
return;
- for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- if (plane_state->base.visible)
- intel_plane_disable_noatomic(intel_crtc, plane);
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
}
- state = drm_atomic_state_alloc(crtc->dev);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
- crtc->base.id, crtc->name);
+ crtc->base.base.id, crtc->base.name);
return;
}
state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- ret = drm_atomic_add_affected_connectors(state, crtc);
+ temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ ret = drm_atomic_add_affected_connectors(state, &crtc->base);
- WARN_ON(IS_ERR(crtc_state) || ret);
+ WARN_ON(IS_ERR(temp_crtc_state) || ret);
- dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
+ dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
- crtc->base.id, crtc->name);
+ crtc->base.base.id, crtc->base.name);
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
- crtc->state->active = false;
- intel_crtc->active = false;
- crtc->enabled = false;
- crtc->state->connector_mask = 0;
- crtc->state->encoder_mask = 0;
+ crtc->active = false;
+ crtc->base.enabled = false;
+
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ crtc_state->uapi.active = false;
+ crtc_state->uapi.connector_mask = 0;
+ crtc_state->uapi.encoder_mask = 0;
+ intel_crtc_free_hw_state(crtc_state);
+ memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
- for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+ for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
encoder->base.crtc = NULL;
- intel_fbc_disable(intel_crtc);
- intel_update_watermarks(intel_crtc);
- intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
+ intel_fbc_disable(crtc);
+ intel_update_watermarks(crtc);
+ intel_disable_shared_dpll(crtc_state);
- domains = intel_crtc->enabled_power_domains;
+ domains = crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
intel_display_power_put_unchecked(dev_priv, domain);
- intel_crtc->enabled_power_domains = 0;
+ crtc->enabled_power_domains = 0;
- dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
- dev_priv->min_cdclk[intel_crtc->pipe] = 0;
- dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+ dev_priv->active_pipes &= ~BIT(pipe);
+ dev_priv->min_cdclk[pipe] = 0;
+ dev_priv->min_voltage_level[pipe] = 0;
- bw_state->data_rate[intel_crtc->pipe] = 0;
- bw_state->num_active_planes[intel_crtc->pipe] = 0;
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
}
/*
@@ -7252,8 +7597,8 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
if (!crtc_state)
return;
- I915_STATE_WARN(!crtc_state->base.active,
- "connector is active, but attached crtc isn't\n");
+ I915_STATE_WARN(!crtc_state->hw.active,
+ "connector is active, but attached crtc isn't\n");
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
return;
@@ -7264,8 +7609,8 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
"attached encoder crtc differs from connector crtc\n");
} else {
- I915_STATE_WARN(crtc_state && crtc_state->base.active,
- "attached crtc is active, but connector isn't\n");
+ I915_STATE_WARN(crtc_state && crtc_state->hw.active,
+ "attached crtc is active, but connector isn't\n");
I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
"best encoder set without crtc!\n");
}
@@ -7273,17 +7618,17 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->base.enable && crtc_state->has_pch_encoder)
+ if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
return crtc_state->fdi_lanes;
return 0;
}
-static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_state *pipe_config)
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
@@ -7352,11 +7697,11 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
#define RETRY 1
-static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config)
+static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock, ret;
bool needs_recompute = false;
@@ -7372,15 +7717,15 @@ retry:
fdi_dotclock = adjusted_mode->crtc_clock;
- lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
- pipe_config->pipe_bpp);
+ lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n, false, false);
- ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EDEADLK)
return ret;
@@ -7402,7 +7747,7 @@ retry:
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -7432,9 +7777,9 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ to_i915(crtc_state->uapi.crtc->dev);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
if (!hsw_crtc_state_ips_capable(crtc_state))
return false;
@@ -7473,7 +7818,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
u32 pixel_rate;
- pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
+ pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
/*
* We only use IF-ID interlacing. If we ever use
@@ -7506,12 +7851,12 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (HAS_GMCH(dev_priv))
/* FIXME calculate proper pipe pixel rate for GMCH pfit */
crtc_state->pixel_rate =
- crtc_state->base.adjusted_mode.crtc_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock;
else
crtc_state->pixel_rate =
ilk_pipe_pixel_rate(crtc_state);
@@ -7521,7 +7866,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
if (INTEL_GEN(dev_priv) < 4) {
@@ -7547,7 +7892,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
- pipe_config->base.ctm) {
+ pipe_config->hw.ctm) {
/*
* There is only one pipe CSC unit per pipe, and we need that
* for output conversion from RGB->YCBCR. So if CTM is already
@@ -7586,7 +7931,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
if (pipe_config->has_pch_encoder)
- return ironlake_fdi_compute_config(crtc, pipe_config);
+ return ilk_fdi_compute_config(crtc, pipe_config);
return 0;
}
@@ -7741,7 +8086,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
const struct intel_link_m_n *m_n)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -7768,7 +8113,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m_n,
const struct intel_link_m_n *m2_n2)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -8077,11 +8422,11 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_crtc_state *pipe_config;
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ pipe_config = intel_crtc_state_alloc(crtc);
if (!pipe_config)
return -ENOMEM;
- pipe_config->base.crtc = &crtc->base;
+ pipe_config->cpu_transcoder = (enum transcoder)pipe;
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
@@ -8241,11 +8586,11 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -8303,7 +8648,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -8317,7 +8662,7 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (IS_GEN(dev_priv, 2))
@@ -8339,39 +8684,39 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
u32 tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
tmp = I915_READ(HBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hblank_start =
+ pipe_config->hw.adjusted_mode.crtc_hblank_start =
(tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_hblank_end =
+ pipe_config->hw.adjusted_mode.crtc_hblank_end =
((tmp >> 16) & 0xffff) + 1;
}
tmp = I915_READ(HSYNC(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VTOTAL(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
tmp = I915_READ(VBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vblank_start =
+ pipe_config->hw.adjusted_mode.crtc_vblank_start =
(tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vblank_end =
+ pipe_config->hw.adjusted_mode.crtc_vblank_end =
((tmp >> 16) & 0xffff) + 1;
}
tmp = I915_READ(VSYNC(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
if (intel_pipe_is_interlaced(pipe_config)) {
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->base.adjusted_mode.crtc_vtotal += 1;
- pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
+ pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
}
}
@@ -8386,27 +8731,27 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
- pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
- pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
+ pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
}
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config)
{
- mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
- mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
- mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
- mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
+ mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+ mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
+ mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
+ mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
- mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
- mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
- mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
- mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
+ mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
+ mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
+ mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
+ mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
- mode->flags = pipe_config->base.adjusted_mode.flags;
+ mode->flags = pipe_config->hw.adjusted_mode.flags;
mode->type = DRM_MODE_TYPE_DRIVER;
- mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+ mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -8415,7 +8760,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pipeconf;
@@ -8452,7 +8797,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
}
}
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
@@ -8468,6 +8813,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+
I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(crtc->pipe));
}
@@ -8567,9 +8914,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
- limit = &intel_limits_pineview_lvds;
+ limit = &pnv_limits_lvds;
} else {
- limit = &intel_limits_pineview_sdvo;
+ limit = &pnv_limits_sdvo;
}
if (!crtc_state->clock_set &&
@@ -8861,7 +9208,7 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -8985,7 +9332,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* but in case the pipe is enabled w/o any ports we need a sane
* default.
*/
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
ret = true;
@@ -8996,7 +9343,7 @@ out:
return ret;
}
-static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
int i;
@@ -9494,14 +9841,14 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ironlake_init_pch_refclk(dev_priv);
+ ilk_init_pch_refclk(dev_priv);
else if (HAS_PCH_LPT(dev_priv))
lpt_init_pch_refclk(dev_priv);
}
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -9529,7 +9876,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -9549,13 +9896,15 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= PIPECONF_FRAME_START_DELAY(0);
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -9563,7 +9912,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (IS_HASWELL(dev_priv) && crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -9578,7 +9927,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
@@ -9641,7 +9990,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
}
}
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
{
/*
* Account for spread spectrum to avoid
@@ -9652,14 +10001,14 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
{
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
-static void ironlake_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll, fp, fp2;
@@ -9679,7 +10028,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
fp |= FP_CB_TUNE;
if (reduced_clock) {
@@ -9759,12 +10108,12 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp1 = fp2;
}
-static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
const struct intel_limit *limit;
int refclk = 120000;
@@ -9784,17 +10133,17 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_is_dual_link_lvds(dev_priv)) {
if (refclk == 100000)
- limit = &intel_limits_ironlake_dual_lvds_100m;
+ limit = &ilk_limits_dual_lvds_100m;
else
- limit = &intel_limits_ironlake_dual_lvds;
+ limit = &ilk_limits_dual_lvds;
} else {
if (refclk == 100000)
- limit = &intel_limits_ironlake_single_lvds_100m;
+ limit = &ilk_limits_single_lvds_100m;
else
- limit = &intel_limits_ironlake_single_lvds;
+ limit = &ilk_limits_single_lvds;
}
} else {
- limit = &intel_limits_ironlake_dac;
+ limit = &ilk_limits_dac;
}
if (!crtc_state->clock_set &&
@@ -9804,7 +10153,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- ironlake_compute_dpll(crtc, crtc_state, NULL);
+ ilk_compute_dpll(crtc, crtc_state, NULL);
if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
@@ -9879,15 +10228,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
&pipe_config->dp_m2_n2);
}
-static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
}
-static void skylake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void skl_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9918,8 +10267,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
}
static void
-skylake_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9977,7 +10326,11 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
case PLANE_CTL_TILED_Y:
plane_config->tiling = I915_TILING_Y;
if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
+ fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
+ I915_FORMAT_MOD_Y_TILED_CCS;
+ else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
@@ -10044,8 +10397,8 @@ error:
kfree(intel_fb);
}
-static void ironlake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10068,8 +10421,8 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
-static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static bool ilk_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10140,7 +10493,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ ilk_get_fdi_m_n_config(crtc, pipe_config);
if (HAS_PCH_IBX(dev_priv)) {
/*
@@ -10168,7 +10521,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
- ironlake_pch_clock_get(crtc, pipe_config);
+ ilk_pch_clock_get(crtc, pipe_config);
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -10176,7 +10529,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- ironlake_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(crtc, pipe_config);
ret = true;
@@ -10185,12 +10538,13 @@ out:
return ret;
}
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
INTEL_GEN(dev_priv) >= 11) {
@@ -10207,9 +10561,8 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
-static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 temp;
@@ -10223,9 +10576,8 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum phy phy = intel_port_to_phy(dev_priv, port);
enum icl_port_dpll_id port_dpll_id;
@@ -10284,9 +10636,8 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 temp;
@@ -10300,9 +10651,8 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
@@ -10403,6 +10753,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
case TRANS_DDI_EDP_INPUT_C_ONOFF:
trans_pipe = PIPE_C;
break;
+ case TRANS_DDI_EDP_INPUT_D_ONOFF:
+ trans_pipe = PIPE_D;
+ break;
}
if (trans_pipe == crtc->pipe) {
@@ -10487,31 +10840,36 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
-static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_shared_dpll *pll;
enum port port;
u32 tmp;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
-
- if (INTEL_GEN(dev_priv) >= 12)
- port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
- else
- port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ if (transcoder_is_dsi(cpu_transcoder)) {
+ port = (cpu_transcoder == TRANSCODER_DSI_A) ?
+ PORT_A : PORT_B;
+ } else {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (INTEL_GEN(dev_priv) >= 12)
+ port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ else
+ port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ }
if (INTEL_GEN(dev_priv) >= 11)
- icelake_get_ddi_pll(dev_priv, port, pipe_config);
+ icl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
- cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
+ cnl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_BC(dev_priv))
- skylake_get_ddi_pll(dev_priv, port, pipe_config);
+ skl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
- haswell_get_ddi_pll(dev_priv, port, pipe_config);
+ hsw_get_ddi_pll(dev_priv, port, pipe_config);
pll = pipe_config->shared_dpll;
if (pll) {
@@ -10532,7 +10890,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ ilk_get_fdi_m_n_config(crtc, pipe_config);
}
}
@@ -10554,9 +10912,9 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr
return master_select - 1;
}
-static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders;
enum transcoder cpu_transcoder;
@@ -10589,8 +10947,8 @@ static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_sta
crtc_state->sync_mode_slaves_mask);
}
-static bool haswell_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static bool hsw_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
@@ -10598,8 +10956,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
u64 power_domain_mask;
bool active;
- intel_crtc_init_scalers(crtc, pipe_config);
-
pipe_config->master_transcoder = INVALID_TRANSCODER;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
@@ -10627,7 +10983,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
INTEL_GEN(dev_priv) >= 11) {
- haswell_get_ddi_port_state(crtc, pipe_config);
+ hsw_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -10684,9 +11040,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_get_pfit_config(crtc, pipe_config);
+ skl_get_pfit_config(crtc, pipe_config);
else
- ironlake_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(crtc, pipe_config);
}
if (hsw_crtc_supports_ips(crtc)) {
@@ -10712,7 +11068,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (INTEL_GEN(dev_priv) >= 11 &&
!transcoder_is_dsi(pipe_config->cpu_transcoder))
- icelake_get_trans_port_sync_config(pipe_config);
+ icl_get_trans_port_sync_config(pipe_config);
out:
for_each_power_domain(power_domain, power_domain_mask)
@@ -10725,8 +11081,8 @@ out:
static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
@@ -10740,8 +11096,8 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
{
- int x = plane_state->base.dst.x1;
- int y = plane_state->base.dst.y1;
+ int x = plane_state->uapi.dst.x1;
+ int y = plane_state->uapi.dst.y1;
u32 pos = 0;
if (x < 0) {
@@ -10762,9 +11118,9 @@ static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
{
const struct drm_mode_config *config =
- &plane_state->base.plane->dev->mode_config;
- int width = drm_rect_width(&plane_state->base.dst);
- int height = drm_rect_height(&plane_state->base.dst);
+ &plane_state->uapi.plane->dev->mode_config;
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
return width > 0 && width <= config->cursor_width &&
height > 0 && height <= config->cursor_height;
@@ -10773,8 +11129,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ unsigned int rotation = plane_state->hw.rotation;
int src_x, src_y;
u32 offset;
int ret;
@@ -10783,11 +11139,11 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
- src_x = plane_state->base.src.x1 >> 16;
- src_y = plane_state->base.src.y1 >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
@@ -10802,14 +11158,14 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate_to(&plane_state->base.src,
+ drm_rect_translate_to(&plane_state->uapi.src,
src_x << 16, src_y << 16);
/* ILK+ do this automagically in hardware */
if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
- const struct drm_framebuffer *fb = plane_state->base.fb;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
- int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
offset += (src_h * src_w - 1) * fb->format->cpp[0];
}
@@ -10824,7 +11180,7 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
static int intel_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
@@ -10832,8 +11188,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
@@ -10841,14 +11197,14 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
return ret;
/* Use the unclipped src/dst rectangles, which we program to hw */
- plane_state->base.src = drm_plane_state_src(&plane_state->base);
- plane_state->base.dst = drm_plane_state_dest(&plane_state->base);
+ plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
+ plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
ret = intel_cursor_check_surface(plane_state);
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -10886,7 +11242,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- int width = drm_rect_width(&plane_state->base.dst);
+ int width = drm_rect_width(&plane_state->uapi.dst);
/*
* 845g/865g are only limited by the width of their cursors,
@@ -10898,7 +11254,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
ret = intel_check_cursor(crtc_state, plane_state);
@@ -10912,12 +11268,12 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->base.dst),
- drm_rect_height(&plane_state->base.dst));
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->base.visible &&
+ WARN_ON(plane_state->uapi.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
switch (fb->pitches[0]) {
@@ -10945,9 +11301,9 @@ static void i845_update_cursor(struct intel_plane *plane,
u32 cntl = 0, base = 0, pos = 0, size = 0;
unsigned long irqflags;
- if (plane_state && plane_state->base.visible) {
- unsigned int width = drm_rect_width(&plane_state->base.dst);
- unsigned int height = drm_rect_height(&plane_state->base.dst);
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned int height = drm_rect_height(&plane_state->uapi.dst);
cntl = plane_state->ctl |
i845_cursor_ctl_crtc(crtc_state);
@@ -11020,7 +11376,7 @@ i9xx_cursor_max_stride(struct intel_plane *plane,
static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cntl = 0;
@@ -11043,13 +11399,13 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
+ to_i915(plane_state->uapi.plane->dev);
u32 cntl = 0;
if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- switch (drm_rect_width(&plane_state->base.dst)) {
+ switch (drm_rect_width(&plane_state->uapi.dst)) {
case 64:
cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
@@ -11060,11 +11416,11 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(drm_rect_width(&plane_state->base.dst));
+ MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
return 0;
}
- if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
+ if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
cntl |= MCURSOR_ROTATE_180;
return cntl;
@@ -11073,9 +11429,9 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- int width = drm_rect_width(&plane_state->base.dst);
- int height = drm_rect_height(&plane_state->base.dst);
+ to_i915(plane_state->uapi.plane->dev);
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
if (!intel_cursor_size_ok(plane_state))
return false;
@@ -11097,7 +11453,7 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
* cursors.
*/
if (HAS_CUR_FBC(dev_priv) &&
- plane_state->base.rotation & DRM_MODE_ROTATE_0) {
+ plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
if (height < 8 || height > width)
return false;
} else {
@@ -11111,9 +11467,9 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int ret;
@@ -11128,19 +11484,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->base.dst),
- drm_rect_height(&plane_state->base.dst));
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->base.visible &&
+ WARN_ON(plane_state->uapi.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
if (fb->pitches[0] !=
- drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) {
+ drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
fb->pitches[0],
- drm_rect_width(&plane_state->base.dst));
+ drm_rect_width(&plane_state->uapi.dst));
return -EINVAL;
}
@@ -11155,7 +11511,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->base.visible && plane_state->base.dst.x1 < 0) {
+ plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -11174,9 +11530,9 @@ static void i9xx_update_cursor(struct intel_plane *plane,
u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
unsigned long irqflags;
- if (plane_state && plane_state->base.visible) {
- unsigned width = drm_rect_width(&plane_state->base.dst);
- unsigned height = drm_rect_height(&plane_state->base.dst);
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned height = drm_rect_height(&plane_state->uapi.dst);
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
@@ -11332,7 +11688,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
{
struct intel_crtc *intel_crtc;
struct intel_encoder *intel_encoder =
- intel_attached_encoder(connector);
+ intel_attached_encoder(to_intel_connector(connector));
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
@@ -11431,9 +11787,9 @@ found:
goto fail;
}
- crtc_state->base.active = crtc_state->base.enable = true;
+ crtc_state->uapi.active = true;
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base,
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
&load_detect_mode);
if (ret)
goto fail;
@@ -11486,7 +11842,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *intel_encoder =
- intel_attached_encoder(connector);
+ intel_attached_encoder(to_intel_connector(connector));
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_atomic_state *state = old->restore_state;
int ret;
@@ -11629,8 +11985,8 @@ int intel_dotclock_calculate(int link_freq,
return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
}
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -11642,11 +11998,38 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
* we may need some idea for the dotclock anyway.
* Calculate one based on the FDI configuration.
*/
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
}
+static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc)
+{
+ memset(crtc_state, 0, sizeof(*crtc_state));
+
+ __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+ crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
+ crtc_state->scaler_state.scaler_id = -1;
+ crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc_state)
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ return crtc_state;
+}
+
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -11666,14 +12049,12 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
if (!mode)
return NULL;
- crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ crtc_state = intel_crtc_state_alloc(crtc);
if (!crtc_state) {
kfree(mode);
return NULL;
}
- crtc_state->base.crtc = &crtc->base;
-
if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
kfree(crtc_state);
kfree(mode);
@@ -11711,18 +12092,18 @@ static bool intel_wm_need_update(const struct intel_plane_state *cur,
struct intel_plane_state *new)
{
/* Update watermarks on tiling or size changes. */
- if (new->base.visible != cur->base.visible)
+ if (new->uapi.visible != cur->uapi.visible)
return true;
- if (!cur->base.fb || !new->base.fb)
+ if (!cur->hw.fb || !new->hw.fb)
return false;
- if (cur->base.fb->modifier != new->base.fb->modifier ||
- cur->base.rotation != new->base.rotation ||
- drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
- drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
- drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
- drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
+ if (cur->hw.fb->modifier != new->hw.fb->modifier ||
+ cur->hw.rotation != new->hw.rotation ||
+ drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
+ drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
+ drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
+ drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
return true;
return false;
@@ -11730,10 +12111,10 @@ static bool intel_wm_need_update(const struct intel_plane_state *cur,
static bool needs_scaling(const struct intel_plane_state *state)
{
- int src_w = drm_rect_width(&state->base.src) >> 16;
- int src_h = drm_rect_height(&state->base.src) >> 16;
- int dst_w = drm_rect_width(&state->base.dst);
- int dst_h = drm_rect_height(&state->base.dst);
+ int src_w = drm_rect_width(&state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&state->uapi.src) >> 16;
+ int dst_w = drm_rect_width(&state->uapi.dst);
+ int dst_h = drm_rect_height(&state->uapi.dst);
return (src_w != dst_w || src_h != dst_h);
}
@@ -11743,12 +12124,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *plane_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool mode_changed = needs_modeset(crtc_state);
- bool was_crtc_enabled = old_crtc_state->base.active;
- bool is_crtc_enabled = crtc_state->base.active;
+ bool was_crtc_enabled = old_crtc_state->hw.active;
+ bool is_crtc_enabled = crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
int ret;
@@ -11758,8 +12139,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
return ret;
}
- was_visible = old_plane_state->base.visible;
- visible = plane_state->base.visible;
+ was_visible = old_plane_state->uapi.visible;
+ visible = plane_state->uapi.visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -11775,7 +12156,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- plane_state->base.visible = visible = false;
+ plane_state->uapi.visible = visible = false;
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
@@ -11916,9 +12297,9 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_plane *plane, *linked;
struct intel_plane_state *plane_state;
int i;
@@ -11935,7 +12316,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
continue;
plane_state->planar_linked_plane = NULL;
- if (plane_state->planar_slave && !plane_state->base.visible) {
+ if (plane_state->planar_slave && !plane_state->uapi.visible) {
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
@@ -11981,6 +12362,25 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+
+ /* Copy parameters to slave plane */
+ linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
+ linked_state->color_ctl = plane_state->color_ctl;
+ memcpy(linked_state->color_plane, plane_state->color_plane,
+ sizeof(linked_state->color_plane));
+
+ intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
+ linked_state->uapi.src = plane_state->uapi.src;
+ linked_state->uapi.dst = plane_state->uapi.dst;
+
+ if (icl_is_hdr_plane(dev_priv, plane->id)) {
+ if (linked->id == PLANE_SPRITE5)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
+ else if (linked->id == PLANE_SPRITE4)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+ else
+ MISSING_CASE(linked->id);
+ }
}
return 0;
@@ -11988,97 +12388,130 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
+static bool
+intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct drm_connector *master_connector, *connector;
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != crtc)
+ continue;
+ if (connector->has_tile &&
+ connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ return true;
+ }
+
+ return false;
+}
+
+static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->sync_mode_slaves_mask = 0;
+}
+
+static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state,
+ int num_tiled_conns)
+{
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct drm_connector *master_connector;
struct drm_connector_list_iter conn_iter;
struct drm_crtc *master_crtc = NULL;
struct drm_crtc_state *master_crtc_state;
struct intel_crtc_state *master_pipe_config;
- int i, tile_group_id;
if (INTEL_GEN(dev_priv) < 11)
return 0;
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+ return 0;
+
/*
* In case of tiled displays there could be one or more slaves but there is
* only one master. Lets make the CRTC used by the connector corresponding
* to the last horizonal and last vertical tile a master/genlock CRTC.
* All the other CRTCs corresponding to other tiles of the same Tile group
* are the slave CRTCs and hold a pointer to their genlock CRTC.
+ * If all tiles not present do not make master slave assignments.
*/
- for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
- if (connector_state->crtc != crtc)
- continue;
- if (!connector->has_tile)
+ if (!connector->has_tile ||
+ crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
+ crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
+ num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
+ reset_port_sync_mode_state(crtc_state);
+ return 0;
+ }
+ /* Last Horizontal and last vertical tile connector is a master
+ * Master's crtc state is already populated in slave for port sync
+ */
+ if (connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ return 0;
+
+ /* Loop through all connectors and configure the Slave crtc_state
+ * to point to the correct master.
+ */
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(master_connector, &conn_iter) {
+ struct drm_connector_state *master_conn_state = NULL;
+
+ if (!(master_connector->has_tile &&
+ master_connector->tile_group->id == connector->tile_group->id))
continue;
- if (crtc_state->base.mode.hdisplay != connector->tile_h_size ||
- crtc_state->base.mode.vdisplay != connector->tile_v_size)
- return 0;
- if (connector->tile_h_loc == connector->num_h_tile - 1 &&
- connector->tile_v_loc == connector->num_v_tile - 1)
+ if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
+ master_connector->tile_v_loc != master_connector->num_v_tile - 1)
continue;
- crtc_state->sync_mode_slaves_mask = 0;
- tile_group_id = connector->tile_group->id;
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(master_connector, &conn_iter) {
- struct drm_connector_state *master_conn_state = NULL;
-
- if (!master_connector->has_tile)
- continue;
- if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
- master_connector->tile_v_loc != master_connector->num_v_tile - 1)
- continue;
- if (master_connector->tile_group->id != tile_group_id)
- continue;
- master_conn_state = drm_atomic_get_connector_state(&state->base,
- master_connector);
- if (IS_ERR(master_conn_state)) {
- drm_connector_list_iter_end(&conn_iter);
- return PTR_ERR(master_conn_state);
- }
- if (master_conn_state->crtc) {
- master_crtc = master_conn_state->crtc;
- break;
- }
+ master_conn_state = drm_atomic_get_connector_state(&state->base,
+ master_connector);
+ if (IS_ERR(master_conn_state)) {
+ drm_connector_list_iter_end(&conn_iter);
+ return PTR_ERR(master_conn_state);
}
- drm_connector_list_iter_end(&conn_iter);
-
- if (!master_crtc) {
- DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
- connector_state->crtc->base.id);
- return -EINVAL;
+ if (master_conn_state->crtc) {
+ master_crtc = master_conn_state->crtc;
+ break;
}
+ }
+ drm_connector_list_iter_end(&conn_iter);
- master_crtc_state = drm_atomic_get_crtc_state(&state->base,
- master_crtc);
- if (IS_ERR(master_crtc_state))
- return PTR_ERR(master_crtc_state);
-
- master_pipe_config = to_intel_crtc_state(master_crtc_state);
- crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
- master_pipe_config->sync_mode_slaves_mask |=
- BIT(crtc_state->cpu_transcoder);
- DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
- transcoder_name(crtc_state->master_transcoder),
- crtc_state->base.crtc->base.id,
- master_pipe_config->sync_mode_slaves_mask);
+ if (!master_crtc) {
+ DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
+ crtc->base.id);
+ return -EINVAL;
}
+ master_crtc_state = drm_atomic_get_crtc_state(&state->base,
+ master_crtc);
+ if (IS_ERR(master_crtc_state))
+ return PTR_ERR(master_crtc_state);
+
+ master_pipe_config = to_intel_crtc_state(master_crtc_state);
+ crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
+ master_pipe_config->sync_mode_slaves_mask |=
+ BIT(crtc_state->cpu_transcoder);
+ DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
+ transcoder_name(crtc_state->master_transcoder),
+ crtc->base.id,
+ master_pipe_config->sync_mode_slaves_mask);
+
return 0;
}
@@ -12092,10 +12525,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
int ret;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
- mode_changed && !crtc_state->base.active)
+ mode_changed && !crtc_state->hw.active)
crtc_state->update_wm_post = true;
- if (mode_changed && crtc_state->base.enable &&
+ if (mode_changed && crtc_state->hw.enable &&
dev_priv->display.crtc_compute_clock &&
!WARN_ON(crtc_state->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
@@ -12108,10 +12541,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
* when C8 planes are getting enabled/disabled.
*/
if (c8_planes_changed(crtc_state))
- crtc_state->base.color_mgmt_changed = true;
+ crtc_state->uapi.color_mgmt_changed = true;
if (mode_changed || crtc_state->update_pipe ||
- crtc_state->base.color_mgmt_changed) {
+ crtc_state->uapi.color_mgmt_changed) {
ret = intel_color_check(crtc_state);
if (ret)
return ret;
@@ -12224,7 +12657,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
@@ -12281,7 +12714,7 @@ static void
intel_dump_infoframe(struct drm_i915_private *dev_priv,
const union hdmi_infoframe *frame)
{
- if ((drm_debug & DRM_UT_KMS) == 0)
+ if (!drm_debug_enabled(DRM_UT_KMS))
return;
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
@@ -12349,14 +12782,14 @@ static const char *output_formats(enum intel_output_format format)
static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_format_name_buf format_name;
if (!fb) {
DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
plane->base.base.id, plane->base.name,
- yesno(plane_state->base.visible));
+ yesno(plane_state->uapi.visible));
return;
}
@@ -12364,20 +12797,20 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height,
drm_get_format_name(fb->format->format, &format_name),
- yesno(plane_state->base.visible));
+ yesno(plane_state->uapi.visible));
DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
- plane_state->base.rotation, plane_state->scaler_id);
- if (plane_state->base.visible)
+ plane_state->hw.rotation, plane_state->scaler_id);
+ if (plane_state->uapi.visible)
DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->base.src),
- DRM_RECT_ARG(&plane_state->base.dst));
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
struct intel_atomic_state *state,
const char *context)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
@@ -12386,14 +12819,14 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
crtc->base.base.id, crtc->base.name,
- yesno(pipe_config->base.enable), context);
+ yesno(pipe_config->hw.enable), context);
- if (!pipe_config->base.enable)
+ if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
- yesno(pipe_config->base.active),
+ yesno(pipe_config->hw.active),
buf, pipe_config->output_types,
output_formats(pipe_config->output_format));
@@ -12433,10 +12866,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
DRM_DEBUG_KMS("requested mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->base.mode);
+ drm_mode_debug_printmodeline(&pipe_config->hw.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
+ drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
pipe_config->port_clock,
pipe_config->pipe_src_w, pipe_config->pipe_src_h,
@@ -12474,6 +12907,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
pipe_config->csc_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
+ DRM_DEBUG_KMS("MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
dump_planes:
if (!state)
return;
@@ -12556,22 +12992,59 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
return ret;
}
+static void
+intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
+{
+ intel_crtc_copy_color_blobs(crtc_state);
+}
+
+static void
+intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->hw.enable = crtc_state->uapi.enable;
+ crtc_state->hw.active = crtc_state->uapi.active;
+ crtc_state->hw.mode = crtc_state->uapi.mode;
+ crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
+}
+
+static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->uapi.enable = crtc_state->hw.enable;
+ crtc_state->uapi.active = crtc_state->hw.active;
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+
+ crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
+
+ /* copy color blobs to uapi */
+ drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
+ crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
+ crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.ctm,
+ crtc_state->hw.ctm);
+}
+
static int
-clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *saved_state;
- saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
+ saved_state = intel_crtc_state_alloc(crtc);
if (!saved_state)
return -ENOMEM;
+ /* free the old crtc_state->hw members */
+ intel_crtc_free_hw_state(crtc_state);
+
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
* fixed, so that the crtc_state can be safely duplicated. For now,
* only fields that are know to not cause problems are preserved. */
+ saved_state->uapi = crtc_state->uapi;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
@@ -12583,37 +13056,34 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->wm = crtc_state->wm;
/*
* Save the slave bitmask which gets filled for master crtc state during
- * slave atomic check call.
+ * slave atomic check call. For all other CRTCs reset the port sync variables
+ * crtc_state->master_transcoder needs to be set to INVALID
*/
- if (is_trans_port_sync_master(crtc_state))
+ reset_port_sync_mode_state(saved_state);
+ if (intel_atomic_is_master_connector(crtc_state))
saved_state->sync_mode_slaves_mask =
crtc_state->sync_mode_slaves_mask;
- /* Keep base drm_crtc_state intact, only clear our extended struct */
- BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
- memcpy(&crtc_state->base + 1, &saved_state->base + 1,
- sizeof(*crtc_state) - sizeof(crtc_state->base));
-
+ memcpy(crtc_state, saved_state, sizeof(*crtc_state));
kfree(saved_state);
+
+ intel_crtc_copy_uapi_to_hw_state(crtc_state);
+
return 0;
}
static int
intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_crtc *crtc = pipe_config->uapi.crtc;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int base_bpp, ret;
- int i;
+ int i, tile_group_id = -1, num_tiled_conns = 0;
bool retry = true;
- ret = clear_intel_crtc_state(pipe_config);
- if (ret)
- return ret;
-
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
@@ -12622,13 +13092,13 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
- if (!(pipe_config->base.adjusted_mode.flags &
+ if (!(pipe_config->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
- if (!(pipe_config->base.adjusted_mode.flags &
+ if (!(pipe_config->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
pipe_config);
@@ -12645,7 +13115,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_mode_get_hv_timing(&pipe_config->base.mode,
+ drm_mode_get_hv_timing(&pipe_config->hw.mode,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
@@ -12678,16 +13148,25 @@ encoder_retry:
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+ drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
- /* Set the crtc_state defaults for trans_port_sync */
- pipe_config->master_transcoder = INVALID_TRANSCODER;
- ret = icl_add_sync_mode_crtcs(pipe_config);
- if (ret) {
- DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
- ret);
- return ret;
+ /* Get tile_group_id of tiled connector */
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc == crtc &&
+ connector->has_tile) {
+ tile_group_id = connector->tile_group->id;
+ break;
+ }
+ }
+
+ /* Get total number of tiled connectors in state that belong to
+ * this tile group.
+ */
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector->has_tile &&
+ connector->tile_group->id == tile_group_id)
+ num_tiled_conns++;
}
/* Pass our mode to the connectors and the CRTC to give them a chance to
@@ -12698,6 +13177,14 @@ encoder_retry:
if (connector_state->crtc != crtc)
continue;
+ ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
+ num_tiled_conns);
+ if (ret) {
+ DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
+ ret);
+ return ret;
+ }
+
encoder = to_intel_encoder(connector_state->best_encoder);
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
@@ -12712,7 +13199,7 @@ encoder_retry:
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
+ pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
@@ -12741,6 +13228,12 @@ encoder_retry:
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ /*
+ * Make drm_calc_timestamping_constants in
+ * drm_atomic_helper_update_legacy_modeset_state() happy
+ */
+ pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
+
return 0;
}
@@ -12819,7 +13312,7 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
const union hdmi_infoframe *b)
{
if (fastset) {
- if ((drm_debug & DRM_UT_KMS) == 0)
+ if (!drm_debug_enabled(DRM_UT_KMS))
return;
DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
@@ -12879,13 +13372,13 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
{
- struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
bool ret = true;
u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
- (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
- !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
+ (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
+ !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
@@ -13074,19 +13567,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(output_types);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
@@ -13103,17 +13596,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
@@ -13152,7 +13645,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
if (bp_gamma)
- PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma);
+ PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
}
@@ -13197,7 +13690,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
PIPE_CONF_CHECK_I(min_voltage_level);
@@ -13212,6 +13705,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
+ PIPE_CONF_CHECK_I(dsc.compression_enable);
+ PIPE_CONF_CHECK_I(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+
+ PIPE_CONF_CHECK_I(mst_master_transcoder);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -13231,7 +13730,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
if (pipe_config->has_pch_encoder) {
int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
- int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
+ int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
/*
* FDI already provided one idea for the dotclock.
@@ -13259,7 +13758,7 @@ static void verify_wm_state(struct intel_crtc *crtc,
const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -13464,16 +13963,14 @@ verify_crtc_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
- struct intel_crtc_state *pipe_config;
- struct drm_atomic_state *state;
+ struct intel_crtc_state *pipe_config = old_crtc_state;
+ struct drm_atomic_state *state = old_crtc_state->uapi.state;
bool active;
- state = old_crtc_state->base.state;
- __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
- pipe_config = old_crtc_state;
- memset(pipe_config, 0, sizeof(*pipe_config));
- pipe_config->base.crtc = &crtc->base;
- pipe_config->base.state = state;
+ __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
+ intel_crtc_free_hw_state(old_crtc_state);
+ intel_crtc_state_reset(old_crtc_state, crtc);
+ old_crtc_state->uapi.state = state;
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
@@ -13481,23 +13978,26 @@ verify_crtc_state(struct intel_crtc *crtc,
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- active = new_crtc_state->base.active;
+ active = new_crtc_state->hw.active;
- I915_STATE_WARN(new_crtc_state->base.active != active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", new_crtc_state->base.active, active);
+ I915_STATE_WARN(new_crtc_state->hw.active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n",
+ new_crtc_state->hw.active, active);
- I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
- "transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
+ I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
+ "transitional active state does not match atomic hw state "
+ "(expected %i, found %i)\n",
+ new_crtc_state->hw.active, crtc->active);
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
enum pipe pipe;
active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != new_crtc_state->base.active,
- "[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active, new_crtc_state->base.active);
+ I915_STATE_WARN(active != new_crtc_state->hw.active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active,
+ new_crtc_state->hw.active);
I915_STATE_WARN(active && crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
@@ -13509,7 +14009,7 @@ verify_crtc_state(struct intel_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
- if (!new_crtc_state->base.active)
+ if (!new_crtc_state->hw.active)
return;
intel_pipe_config_sanity_check(dev_priv, pipe_config);
@@ -13532,7 +14032,7 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
assert_plane(plane, plane_state->planar_slave ||
- plane_state->base.visible);
+ plane_state->uapi.visible);
}
static void
@@ -13571,7 +14071,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
crtc_mask = drm_crtc_mask(&crtc->base);
- if (new_crtc_state->base.active)
+ if (new_crtc_state->hw.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
@@ -13650,10 +14150,10 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
static void
intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
@@ -13724,7 +14224,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
* multiple pipes, and planes are enabled after the pipe, we need to wait at
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
*/
-static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
+static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
{
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
@@ -13735,7 +14235,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->base.active ||
+ if (!crtc_state->hw.active ||
!needs_modeset(crtc_state))
continue;
@@ -13760,7 +14260,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- if (!crtc_state->base.active ||
+ if (!crtc_state->hw.active ||
needs_modeset(crtc_state))
continue;
@@ -13797,12 +14297,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->base.active)
+ if (new_crtc_state->hw.active)
state->active_pipes |= BIT(crtc->pipe);
else
state->active_pipes &= ~BIT(crtc->pipe);
- if (old_crtc_state->base.active != new_crtc_state->base.active)
+ if (old_crtc_state->hw.active != new_crtc_state->hw.active)
state->active_pipe_changes |= BIT(crtc->pipe);
}
@@ -13819,7 +14319,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
intel_modeset_clear_plls(state);
if (IS_HASWELL(dev_priv))
- return haswell_mode_set_planes_workaround(state);
+ return hsw_mode_set_planes_workaround(state);
return 0;
}
@@ -13847,9 +14347,13 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
return;
- new_crtc_state->base.mode_changed = false;
+ new_crtc_state->uapi.mode_changed = false;
new_crtc_state->update_pipe = true;
+}
+static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
/*
* If we're not doing the full modeset we want to
* keep the current M/N values as they may be
@@ -13972,6 +14476,107 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
return 0;
}
+static bool intel_cpu_transcoder_needs_modeset(struct intel_atomic_state *state,
+ enum transcoder transcoder)
+{
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ if (new_crtc_state->cpu_transcoder == transcoder)
+ return needs_modeset(new_crtc_state);
+
+ return false;
+}
+
+static void
+intel_modeset_synced_crtcs(struct intel_atomic_state *state,
+ u8 transcoders)
+{
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ if (transcoders & BIT(new_crtc_state->cpu_transcoder)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
+}
+
+static int
+intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id != tile_grp_id)
+ continue;
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ break;
+ }
+
+ if (!conn_state->crtc)
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(&state->base,
+ conn_state->crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ break;
+ }
+ crtc_state->mode_changed = true;
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ conn_state->crtc);
+ if (ret)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int
+intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ int i, ret;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /* Is tiled, mark all other tiled CRTCs as needing a modeset */
+ for_each_oldnew_connector_in_state(&state->base, connector,
+ old_conn_state, new_conn_state, i) {
+ if (!connector->has_tile)
+ continue;
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ ret = intel_modeset_all_tiles(state, connector->tile_group->id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -13990,33 +14595,96 @@ static int intel_atomic_check(struct drm_device *dev,
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->base.mode.private_flags !=
- old_crtc_state->base.mode.private_flags)
- new_crtc_state->base.mode_changed = true;
+ if (new_crtc_state->hw.mode.private_flags !=
+ old_crtc_state->hw.mode.private_flags)
+ new_crtc_state->uapi.mode_changed = true;
}
ret = drm_atomic_helper_check_modeset(dev, &state->base);
if (ret)
goto fail;
+ /**
+ * This check adds all the connectors in current state that belong to
+ * the same tile group to a full modeset.
+ * This function directly sets the mode_changed to true and we also call
+ * drm_atomic_add_affected_connectors(). Hence we are not explicitly
+ * calling drm_atomic_helper_check_modeset() after this.
+ *
+ * Fixme: Handle some corner cases where one of the
+ * tiled connectors gets disconnected and tile info is lost but since it
+ * was previously synced to other conn, we need to add that to the modeset.
+ */
+ ret = intel_atomic_check_tiled_conns(state);
+ if (ret)
+ goto fail;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!needs_modeset(new_crtc_state)) {
+ /* Light copy */
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
+
continue;
+ }
- if (!new_crtc_state->base.enable) {
- any_ms = true;
+ if (!new_crtc_state->uapi.enable) {
+ intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
continue;
}
+ ret = intel_crtc_prepare_cleared_state(new_crtc_state);
+ if (ret)
+ goto fail;
+
ret = intel_modeset_pipe_config(new_crtc_state);
if (ret)
goto fail;
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ /**
+ * Check if fastset is allowed by external dependencies like other
+ * pipes and transcoders.
+ *
+ * Right now it only forces a fullmodeset when the MST master
+ * transcoder did not changed but the pipe of the master transcoder
+ * needs a fullmodeset so all slaves also needs to do a fullmodeset or
+ * in case of port synced crtcs, if one of the synced crtcs
+ * needs a full modeset, all other synced crtcs should be
+ * forced a full modeset.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+ continue;
+
+ if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
+ enum transcoder master = new_crtc_state->mst_master_transcoder;
+
+ if (intel_cpu_transcoder_needs_modeset(state, master)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ } else if (is_trans_port_sync_mode(new_crtc_state)) {
+ u8 trans = new_crtc_state->sync_mode_slaves_mask |
+ BIT(new_crtc_state->master_transcoder);
- if (needs_modeset(new_crtc_state))
+ intel_modeset_synced_crtcs(state, trans);
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (needs_modeset(new_crtc_state)) {
any_ms = true;
+ continue;
+ }
+
+ if (!new_crtc_state->update_pipe)
+ continue;
+
+ intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
}
if (any_ms && !check_digital_port_conflicts(state)) {
@@ -14106,7 +14774,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!IS_GEN(dev_priv, 2))
+ if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
@@ -14120,7 +14788,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
@@ -14138,12 +14806,12 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
skl_detach_scalers(new_crtc_state);
if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(new_crtc_state);
+ skl_pfit_enable(new_crtc_state);
} else if (HAS_PCH_SPLIT(dev_priv)) {
if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(new_crtc_state);
+ ilk_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_crtc_state);
}
if (INTEL_GEN(dev_priv) >= 11)
@@ -14154,6 +14822,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
@@ -14162,7 +14831,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
* CRTC was enabled.
*/
if (!modeset) {
- if (new_crtc_state->base.color_mgmt_changed ||
+ if (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe)
intel_color_commit(new_crtc_state);
@@ -14177,8 +14846,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
}
if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state,
- new_crtc_state);
+ dev_priv->display.atomic_update_watermarks(state, crtc);
}
static void intel_update_crtc(struct intel_crtc *crtc,
@@ -14195,20 +14863,20 @@ static void intel_update_crtc(struct intel_crtc *crtc,
if (modeset) {
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(new_crtc_state, state);
+ dev_priv->display.crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
} else {
if (new_crtc_state->preload_luts &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
+ intel_pre_plane_update(state, crtc);
if (new_crtc_state->update_pipe)
- intel_encoders_update_pipe(crtc, new_crtc_state, state);
+ intel_encoders_update_pipe(state, crtc);
}
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
@@ -14235,13 +14903,13 @@ static void intel_update_crtc(struct intel_crtc *crtc,
* of enabling them on the CRTC's first fastset.
*/
if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
enum transcoder slave_transcoder;
WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
@@ -14266,97 +14934,60 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
*/
intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display.crtc_disable(old_crtc_state, state);
+ dev_priv->display.crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
- /*
- * Underruns don't always raise interrupts,
- * so check manually.
- */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-
/* FIXME unify this for all platforms */
- if (!new_crtc_state->base.active &&
+ if (!new_crtc_state->hw.active &&
!HAS_GMCH(dev_priv) &&
dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state,
- new_crtc_state);
-}
-
-static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
- struct intel_crtc_state *new_slave_crtc_state =
- intel_atomic_get_new_crtc_state(state, slave_crtc);
- struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave_crtc);
-
- WARN_ON(!slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
-
- /* Disable Slave first */
- intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
- if (old_slave_crtc_state->base.active)
- intel_old_crtc_state_disables(state,
- old_slave_crtc_state,
- new_slave_crtc_state,
- slave_crtc);
-
- /* Disable Master */
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
- if (old_crtc_state->base.active)
- intel_old_crtc_state_disables(state,
- old_crtc_state,
- new_crtc_state,
- crtc);
+ dev_priv->display.initial_watermarks(state, crtc);
}
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
{
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
+ u32 handled = 0;
int i;
- /*
- * Disable CRTC/pipes in reverse order because some features(MST in
- * TGL+) requires master and slave relationship between pipes, so it
- * should always pick the lowest pipe as master as it will be enabled
- * first and disable in the reverse order so the master will be the
- * last one to be disabled.
- */
- for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ /* Only disable port sync and MST slaves */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
if (!needs_modeset(new_crtc_state))
continue;
+ if (!old_crtc_state->hw.active)
+ continue;
+
/* In case of Transcoder port Sync master slave CRTCs can be
* assigned in any order and we need to make sure that
* slave CRTCs are disabled first and then master CRTC since
* Slave vblanks are masked till Master Vblanks.
*/
- if (is_trans_port_sync_mode(new_crtc_state)) {
- if (is_trans_port_sync_master(new_crtc_state))
- intel_trans_port_sync_modeset_disables(state,
- crtc,
- old_crtc_state,
- new_crtc_state);
- else
- continue;
- } else {
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
+ if (!is_trans_port_sync_slave(old_crtc_state) &&
+ !intel_dp_mst_is_slave_trans(old_crtc_state))
+ continue;
- if (old_crtc_state->base.active)
- intel_old_crtc_state_disables(state,
- old_crtc_state,
- new_crtc_state,
- crtc);
- }
+ intel_pre_plane_update(state, crtc);
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
+ handled |= BIT(crtc->pipe);
+ }
+
+ /* Disable everything else left on */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state) ||
+ (handled & BIT(crtc->pipe)))
+ continue;
+
+ intel_pre_plane_update(state, crtc);
+ if (old_crtc_state->hw.active)
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
}
}
@@ -14367,7 +14998,7 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->base.active)
+ if (!new_crtc_state->hw.active)
continue;
intel_update_crtc(crtc, state, old_crtc_state,
@@ -14382,7 +15013,7 @@ static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(new_crtc_state, state);
+ dev_priv->display.crtc_enable(state, crtc);
intel_crtc_enable_pipe_crc(crtc);
}
@@ -14398,10 +15029,14 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
if (conn_state->crtc == &crtc->base)
break;
}
- intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
+ intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
intel_dp_stop_link_train(intel_dp);
}
+/*
+ * TODO: This is only called from port sync and it is identical to what will be
+ * executed again in intel_update_crtc() over port sync pipes
+ */
static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
struct intel_atomic_state *state)
{
@@ -14432,7 +15067,7 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
* of enabling them on the CRTC's first fastset.
*/
if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -14487,17 +15122,25 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned int updated = 0;
- bool progress;
- int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
+ u8 update_pipes = 0, modeset_pipes = 0;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active)
+ continue;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
/* ignore allocations for crtc's that have been turned off. */
- if (new_crtc_state->base.active)
+ if (!needs_modeset(new_crtc_state)) {
entries[i] = old_crtc_state->wm.skl.ddb;
+ update_pipes |= BIT(crtc->pipe);
+ } else {
+ modeset_pipes |= BIT(crtc->pipe);
+ }
+ }
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -14506,27 +15149,29 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
/*
* Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations
- * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+ * never overlap with each other between CRTC updates. Otherwise we'll
* cause pipe underruns and other bad stuff.
+ *
+ * So first lets enable all pipes that do not need a fullmodeset as
+ * those don't have any external dependency.
*/
- do {
- progress = false;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ while (update_pipes) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
- bool vbl_wait = false;
- bool modeset = needs_modeset(new_crtc_state);
- if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active)
+ if ((update_pipes & BIT(pipe)) == 0)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries,
- INTEL_NUM_PIPES(dev_priv), i))
+ entries, num_pipes, i))
continue;
- updated |= BIT(pipe);
entries[i] = new_crtc_state->wm.skl.ddb;
+ update_pipes &= ~BIT(pipe);
+
+ intel_update_crtc(crtc, state, old_crtc_state,
+ new_crtc_state);
/*
* If this is an already active pipe, it's DDB changed,
@@ -14536,29 +15181,71 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
*/
if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
&old_crtc_state->wm.skl.ddb) &&
- !modeset &&
- state->wm_results.dirty_pipes != updated)
- vbl_wait = true;
-
- if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
- if (is_trans_port_sync_master(new_crtc_state))
- intel_update_trans_port_sync_crtcs(crtc,
- state,
- old_crtc_state,
- new_crtc_state);
- else
- continue;
- } else {
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
- }
-
- if (vbl_wait)
+ (update_pipes | modeset_pipes))
intel_wait_for_vblank(dev_priv, pipe);
+ }
+ }
+
+ /*
+ * Enable all pipes that needs a modeset and do not depends on other
+ * pipes
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
- progress = true;
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
+
+ if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
+ is_trans_port_sync_slave(new_crtc_state))
+ continue;
+
+ WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, num_pipes, i));
+
+ entries[i] = new_crtc_state->wm.skl.ddb;
+ modeset_pipes &= ~BIT(pipe);
+
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ struct intel_crtc *slave_crtc;
+
+ intel_update_trans_port_sync_crtcs(crtc, state,
+ old_crtc_state,
+ new_crtc_state);
+
+ slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ /* TODO: update entries[] of slave */
+ modeset_pipes &= ~BIT(slave_crtc->pipe);
+
+ } else {
+ intel_update_crtc(crtc, state, old_crtc_state,
+ new_crtc_state);
}
- } while (progress);
+ }
+
+ /*
+ * Finally enable all pipes that needs a modeset and depends on
+ * other pipes, right now it is only MST slaves as both port sync slave
+ * and master are enabled together
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
+
+ WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, num_pipes, i));
+
+ entries[i] = new_crtc_state->wm.skl.ddb;
+ modeset_pipes &= ~BIT(pipe);
+
+ intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
+ }
+
+ WARN_ON(modeset_pipes);
/* If 2nd DBuf slice is no more required disable it */
if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
@@ -14679,12 +15366,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
bool modeset = needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
- if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
+ if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
+ drm_crtc_send_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
spin_unlock_irq(&dev->event_lock);
- new_crtc_state->base.event = NULL;
+ new_crtc_state->uapi.event = NULL;
}
}
@@ -14715,10 +15403,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_flip_done(dev, &state->base);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (new_crtc_state->base.active &&
+ if (new_crtc_state->hw.active &&
!needs_modeset(new_crtc_state) &&
!new_crtc_state->preload_luts &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
}
@@ -14730,14 +15418,25 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So re-enable underrun reporting after some planes get enabled.
+ *
+ * We do this before .optimize_watermarks() so that we have a
+ * chance of catching underruns with the intermediate watermarks
+ * vs. the new plane configuration.
+ */
+ if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(state,
- new_crtc_state);
+ dev_priv->display.optimize_watermarks(state, crtc);
}
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- intel_post_plane_update(old_crtc_state);
+ intel_post_plane_update(state, crtc);
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
@@ -14745,6 +15444,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
+ /* Underruns don't always raise interrupts, so check manually */
+ intel_check_cpu_fifo_underruns(dev_priv);
+ intel_check_pch_fifo_underruns(dev_priv);
+
if (state->modeset)
intel_verify_planes(state);
@@ -14818,8 +15521,8 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i)
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
- to_intel_frontbuffer(new_plane_state->base.fb),
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
plane->frontbuffer_bit);
}
@@ -14986,9 +15689,9 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
@@ -15051,9 +15754,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_plane_state->base.state);
+ to_intel_atomic_state(new_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_plane_state->base.fb;
+ struct drm_framebuffer *fb = new_plane_state->hw.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
@@ -15084,9 +15787,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
}
- if (new_plane_state->base.fence) { /* explicit fencing */
+ if (new_plane_state->uapi.fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
- new_plane_state->base.fence,
+ new_plane_state->uapi.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
@@ -15109,7 +15812,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fb_obj_bump_render_priority(obj);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
- if (!new_plane_state->base.fence) { /* implicit fencing */
+ if (!new_plane_state->uapi.fence) { /* implicit fencing */
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
@@ -15121,13 +15824,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
- add_rps_boost_after_vblank(new_plane_state->base.crtc,
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
dma_fence_put(fence);
}
} else {
- add_rps_boost_after_vblank(new_plane_state->base.crtc,
- new_plane_state->base.fence);
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
}
/*
@@ -15160,7 +15863,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
struct intel_plane_state *old_plane_state =
to_intel_plane_state(_old_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(old_plane_state->base.state);
+ to_intel_atomic_state(old_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
if (intel_state->rps_interactive) {
@@ -15224,8 +15927,12 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XBGR16161616F:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED;
@@ -15283,7 +15990,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* When crtc is inactive or there is a modeset pending,
* wait for it to complete in the slowpath
*/
- if (!crtc_state->base.active || needs_modeset(crtc_state) ||
+ if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
crtc_state->update_pipe)
goto slow;
@@ -15292,8 +15999,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
- if (old_plane_state->base.commit &&
- !try_wait_for_completion(&old_plane_state->base.commit->hw_done))
+ if (old_plane_state->uapi.commit &&
+ !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
goto slow;
/*
@@ -15301,12 +16008,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* take the slowpath. Only changing fb or position should be
* in the fastpath.
*/
- if (old_plane_state->base.crtc != &crtc->base ||
- old_plane_state->base.src_w != src_w ||
- old_plane_state->base.src_h != src_h ||
- old_plane_state->base.crtc_w != crtc_w ||
- old_plane_state->base.crtc_h != crtc_h ||
- !old_plane_state->base.fb != !fb)
+ if (old_plane_state->uapi.crtc != &crtc->base ||
+ old_plane_state->uapi.src_w != src_w ||
+ old_plane_state->uapi.src_h != src_h ||
+ old_plane_state->uapi.crtc_w != crtc_w ||
+ old_plane_state->uapi.crtc_h != crtc_h ||
+ !old_plane_state->uapi.fb != !fb)
goto slow;
new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
@@ -15319,16 +16026,16 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
goto out_free;
}
- drm_atomic_set_fb_for_plane(&new_plane_state->base, fb);
+ drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
- new_plane_state->base.src_x = src_x;
- new_plane_state->base.src_y = src_y;
- new_plane_state->base.src_w = src_w;
- new_plane_state->base.src_h = src_h;
- new_plane_state->base.crtc_x = crtc_x;
- new_plane_state->base.crtc_y = crtc_y;
- new_plane_state->base.crtc_w = crtc_w;
- new_plane_state->base.crtc_h = crtc_h;
+ new_plane_state->uapi.src_x = src_x;
+ new_plane_state->uapi.src_y = src_y;
+ new_plane_state->uapi.src_w = src_w;
+ new_plane_state->uapi.src_h = src_h;
+ new_plane_state->uapi.crtc_x = crtc_x;
+ new_plane_state->uapi.crtc_y = crtc_y;
+ new_plane_state->uapi.crtc_w = crtc_w;
+ new_plane_state->uapi.crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
old_plane_state, new_plane_state);
@@ -15339,13 +16046,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
if (ret)
goto out_free;
- intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
- to_intel_frontbuffer(new_plane_state->base.fb),
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+ ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
plane->frontbuffer_bit);
/* Swap plane state */
- plane->base.state = &new_plane_state->base;
+ plane->base.state = &new_plane_state->uapi;
/*
* We cannot swap crtc_state as it may be in use by an atomic commit or
@@ -15359,7 +16067,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (new_plane_state->base.visible)
+ if (new_plane_state->uapi.visible)
intel_update_plane(plane, crtc_state, new_plane_state);
else
intel_disable_plane(plane, crtc_state);
@@ -15368,11 +16076,11 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
out_free:
if (new_crtc_state)
- intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base);
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
if (ret)
- intel_plane_destroy_state(&plane->base, &new_plane_state->base);
+ intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
else
- intel_plane_destroy_state(&plane->base, &old_plane_state->base);
+ intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
return ret;
slow:
@@ -15414,7 +16122,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
unsigned int possible_crtcs;
- const u64 *modifiers;
const u32 *formats;
int num_formats;
int ret, zpos;
@@ -15446,7 +16153,10 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ formats = vlv_primary_formats;
+ num_formats = ARRAY_SIZE(vlv_primary_formats);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
/*
* WaFP16GammaEnabling:ivb
* "Workaround : When using the 64-bit format, the plane
@@ -15467,51 +16177,45 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
}
- modifiers = i9xx_format_modifiers;
-
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
-
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- plane->min_cdclk = hsw_plane_min_cdclk;
- else if (IS_IVYBRIDGE(dev_priv))
- plane->min_cdclk = ivb_plane_min_cdclk;
- else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv))
- plane->min_cdclk = vlv_plane_min_cdclk;
- else
- plane->min_cdclk = i9xx_plane_min_cdclk;
-
- plane_funcs = &i965_plane_funcs;
} else {
formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
- modifiers = i9xx_format_modifiers;
+ }
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane_funcs = &i965_plane_funcs;
+ else
+ plane_funcs = &i8xx_plane_funcs;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else
plane->min_cdclk = i9xx_plane_min_cdclk;
- plane_funcs = &i8xx_plane_funcs;
- }
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
possible_crtcs = BIT(pipe);
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
possible_crtcs, plane_funcs,
- formats, num_formats, modifiers,
+ formats, num_formats,
+ i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
possible_crtcs, plane_funcs,
- formats, num_formats, modifiers,
+ formats, num_formats,
+ i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c",
plane_name(plane->i9xx_plane));
@@ -15615,28 +16319,6 @@ fail:
return ERR_PTR(ret);
}
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i;
-
- crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
- if (!crtc->num_scalers)
- return;
-
- for (i = 0; i < crtc->num_scalers; i++) {
- struct intel_scaler *scaler = &scaler_state->scalers[i];
-
- scaler->in_use = 0;
- scaler->mode = 0;
- }
-
- scaler_state->scaler_id = -1;
-}
-
#define INTEL_CRTC_FUNCS \
.gamma_set = drm_atomic_helper_legacy_gamma_set, \
.set_config = drm_atomic_helper_set_config, \
@@ -15704,33 +16386,53 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
.disable_vblank = i8xx_disable_vblank,
};
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(crtc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ crtc->base.state = &crtc_state->uapi;
+ crtc->config = crtc_state;
+
+ return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+ intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+ kfree(crtc);
+}
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state = NULL;
- struct intel_plane *primary = NULL;
- struct intel_plane *cursor = NULL;
+ struct intel_crtc *crtc;
int sprite, ret;
- intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
- if (!intel_crtc)
- return -ENOMEM;
+ crtc = intel_crtc_alloc();
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
- crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state) {
- ret = -ENOMEM;
- goto fail;
- }
- __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
- intel_crtc->config = crtc_state;
+ crtc->pipe = pipe;
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(primary->id);
+ crtc->plane_ids_mask |= BIT(primary->id);
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
@@ -15740,7 +16442,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(plane);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(plane->id);
+ crtc->plane_ids_mask |= BIT(plane->id);
}
cursor = intel_cursor_plane_create(dev_priv, pipe);
@@ -15748,7 +16450,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(cursor);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(cursor->id);
+ crtc->plane_ids_mask |= BIT(cursor->id);
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv) ||
@@ -15769,42 +16471,32 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
funcs = &ilk_crtc_funcs;
}
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
&primary->base, &cursor->base,
funcs, "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
- intel_crtc->pipe = pipe;
-
- /* initialize shared scalers */
- intel_crtc_init_scalers(intel_crtc, crtc_state);
-
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
- dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
+ dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
if (INTEL_GEN(dev_priv) < 9) {
enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
}
- intel_color_init(intel_crtc);
+ intel_color_init(crtc);
- WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+ WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
fail:
- /*
- * drm_mode_config_cleanup() will free up any
- * crtcs/planes already initialized.
- */
- kfree(crtc_state);
- kfree(intel_crtc);
+ intel_crtc_free(crtc);
return ret;
}
@@ -16291,8 +16983,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
- if (mode_cmd->offsets[0] != 0)
+ if (mode_cmd->offsets[0] != 0) {
+ DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
goto err;
+ }
drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
@@ -16305,26 +17000,23 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
stride_alignment = intel_fb_stride_alignment(fb, i);
-
- /*
- * Display WA #0531: skl,bxt,kbl,glk
- *
- * Render decompression and plane width > 3840
- * combined with horizontal panning requires the
- * plane stride to be a multiple of 4. We'll just
- * require the entire fb to accommodate that to avoid
- * potential runtime errors at plane configuration time.
- */
- if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
- is_ccs_modifier(fb->modifier))
- stride_alignment *= 4;
-
if (fb->pitches[i] & (stride_alignment - 1)) {
DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
goto err;
}
+ if (is_gen12_ccs_plane(fb, i)) {
+ int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
+
+ if (fb->pitches[i] != ccs_aux_stride) {
+ DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
+ goto err;
+ }
+ }
+
fb->obj[i] = &obj->base;
}
@@ -16522,29 +17214,28 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_init_cdclk_hooks(dev_priv);
if (INTEL_GEN(dev_priv) >= 9) {
- dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_pipe_config = hsw_get_pipe_config;
dev_priv->display.get_initial_plane_config =
- skylake_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- haswell_crtc_compute_clock;
- dev_priv->display.crtc_enable = haswell_crtc_enable;
- dev_priv->display.crtc_disable = haswell_crtc_disable;
+ skl_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ dev_priv->display.crtc_enable = hsw_crtc_enable;
+ dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_pipe_config = hsw_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
- haswell_crtc_compute_clock;
- dev_priv->display.crtc_enable = haswell_crtc_enable;
- dev_priv->display.crtc_disable = haswell_crtc_disable;
+ hsw_crtc_compute_clock;
+ dev_priv->display.crtc_enable = hsw_crtc_enable;
+ dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.get_pipe_config = ilk_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
- ironlake_crtc_compute_clock;
- dev_priv->display.crtc_enable = ironlake_crtc_enable;
- dev_priv->display.crtc_disable = ironlake_crtc_disable;
+ ilk_crtc_compute_clock;
+ dev_priv->display.crtc_enable = ilk_crtc_enable;
+ dev_priv->display.crtc_disable = ilk_crtc_disable;
} else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
@@ -16590,14 +17281,12 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
}
if (IS_GEN(dev_priv, 5)) {
- dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.fdi_link_train = ilk_fdi_link_train;
} else if (IS_GEN(dev_priv, 6)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- dev_priv->display.fdi_link_train = hsw_fdi_link_train;
}
if (INTEL_GEN(dev_priv) >= 9)
@@ -16687,7 +17376,7 @@ retry:
/* Write calculated watermark values back */
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
crtc_state->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(intel_state, crtc_state);
+ dev_priv->display.optimize_watermarks(intel_state, crtc);
to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
@@ -16719,8 +17408,7 @@ static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
struct drm_modeset_acquire_ctx ctx;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
int ret = 0;
state = drm_atomic_state_alloc(dev);
@@ -16732,15 +17420,17 @@ static int intel_initial_commit(struct drm_device *dev)
retry:
state->acquire_ctx = &ctx;
- drm_for_each_crtc(crtc, dev) {
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_crtc_state(state, crtc);
+
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto out;
}
- if (crtc_state->active) {
- ret = drm_atomic_add_affected_planes(state, crtc);
+ if (crtc_state->hw.active) {
+ ret = drm_atomic_add_affected_planes(state, &crtc->base);
if (ret)
goto out;
@@ -16750,7 +17440,7 @@ retry:
* having a proper LUT loaded. Remove once we
* have readout for pipe gamma enable.
*/
- crtc_state->color_mgmt_changed = true;
+ crtc_state->uapi.color_mgmt_changed = true;
}
}
@@ -17073,31 +17763,76 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
}
-static void intel_sanitize_crtc(struct intel_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
+static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- /* Clear any frame start delays used for debugging left by the BIOS */
- if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
+ u32 val;
+
+ if (transcoder_is_dsi(cpu_transcoder))
+ return;
+
+ val = I915_READ(reg);
+ val &= ~HSW_FRAME_START_DELAY_MASK;
+ val |= HSW_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ } else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
+ u32 val;
- I915_WRITE(reg,
- I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ val = I915_READ(reg);
+ val &= ~PIPECONF_FRAME_START_DELAY_MASK;
+ val |= PIPECONF_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
}
- if (crtc_state->base.active) {
+ if (!crtc_state->has_pch_encoder)
+ return;
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~TRANS_FRAME_START_DELAY_MASK;
+ val |= TRANS_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ } else {
+ enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
+ i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ }
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
struct intel_plane *plane;
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ intel_sanitize_frame_start_delay(crtc_state);
+
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- if (plane_state->base.visible &&
+ if (plane_state->uapi.visible &&
plane->base.type != DRM_PLANE_TYPE_PRIMARY)
intel_plane_disable_noatomic(crtc, plane);
}
@@ -17114,10 +17849,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
- intel_crtc_disable_noatomic(&crtc->base, ctx);
+ if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
+ intel_crtc_disable_noatomic(crtc, ctx);
- if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
+ if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -17148,7 +17883,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -17161,7 +17896,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* road.
*/
return IS_GEN(dev_priv, 6) &&
- crtc_state->base.active &&
+ crtc_state->hw.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
}
@@ -17178,7 +17913,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* encoder is active and trying to read from a pipe) and the
* pipe itself being active. */
bool has_active_crtc = crtc_state &&
- crtc_state->base.active;
+ crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
@@ -17282,22 +18017,22 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
- memset(crtc_state, 0, sizeof(*crtc_state));
- __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ intel_crtc_state_reset(crtc_state, crtc);
- crtc_state->base.active = crtc_state->base.enable =
+ crtc_state->hw.active = crtc_state->hw.enable =
dev_priv->display.get_pipe_config(crtc, crtc_state);
- crtc->base.enabled = crtc_state->base.enable;
- crtc->active = crtc_state->base.active;
+ crtc->base.enabled = crtc_state->hw.enable;
+ crtc->active = crtc_state->hw.active;
- if (crtc_state->base.active)
+ if (crtc_state->hw.active)
dev_priv->active_pipes |= BIT(crtc->pipe);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc_state->base.active));
+ enableddisabled(crtc_state->hw.active));
}
readout_plane_state(dev_priv);
@@ -17319,7 +18054,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- if (crtc_state->base.active &&
+ if (crtc_state->hw.active &&
crtc_state->shared_dpll == pll)
pll->state.crtc_mask |= 1 << crtc->pipe;
}
@@ -17364,15 +18099,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc = to_intel_crtc(encoder->base.crtc);
crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
- if (crtc_state && crtc_state->base.active) {
+ if (crtc_state && crtc_state->hw.active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
- crtc_state->base.connector_mask |=
+ crtc_state->uapi.connector_mask |=
drm_connector_mask(&connector->base);
- crtc_state->base.encoder_mask |=
+ crtc_state->uapi.encoder_mask |=
drm_encoder_mask(&encoder->base);
}
} else {
@@ -17393,16 +18128,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane *plane;
int min_cdclk = 0;
- if (crtc_state->base.active) {
- struct drm_display_mode mode;
+ if (crtc_state->hw.active) {
+ struct drm_display_mode *mode = &crtc_state->hw.mode;
- intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode,
+ intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
crtc_state);
- mode = crtc_state->base.adjusted_mode;
- mode.hdisplay = crtc_state->pipe_src_w;
- mode.vdisplay = crtc_state->pipe_src_h;
- WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode));
+ *mode = crtc_state->hw.adjusted_mode;
+ mode->hdisplay = crtc_state->pipe_src_w;
+ mode->vdisplay = crtc_state->pipe_src_h;
/*
* The initial mode needs to be set in order to keep
@@ -17413,11 +18147,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* set a flag to indicate that a full recalculation is
* needed on the next commit.
*/
- crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
+ mode->private_flags = I915_MODE_FLAG_INHERITED;
intel_crtc_compute_pixel_rate(crtc_state);
intel_crtc_update_active_timings(crtc_state);
+
+ intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
@@ -17428,14 +18164,14 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* FIXME don't have the fb yet, so can't
* use intel_plane_data_rate() :(
*/
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
crtc_state->data_rate[plane->id] =
4 * crtc_state->pixel_rate;
/*
* FIXME don't have the fb yet, so can't
* use plane->min_cdclk() :(
*/
- if (plane_state->base.visible && plane->min_cdclk) {
+ if (plane_state->uapi.visible && plane->min_cdclk) {
if (crtc_state->double_wide ||
INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
crtc_state->min_cdclk[plane->id] =
@@ -17449,7 +18185,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state->min_cdclk[plane->id]);
}
- if (crtc_state->base.active) {
+ if (crtc_state->hw.active) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (WARN_ON(min_cdclk < 0))
min_cdclk = 0;
@@ -17490,8 +18226,11 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
static void intel_early_display_was(struct drm_i915_private *dev_priv)
{
- /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ /*
+ * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
+ * Also known as Wa_14010480278.
+ */
+ if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
DARBF_GATING_DIS);
@@ -17572,7 +18311,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
@@ -17592,7 +18330,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* We need to sanitize only the MST primary port. */
if (encoder->type != INTEL_OUTPUT_DP_MST &&
intel_phy_is_tc(dev_priv, phy))
- intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
+ intel_tc_port_sanitize(enc_to_dig_port(encoder));
}
get_encoder_power_domains(dev_priv);
@@ -17605,11 +18343,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
* waits, so we need vblank interrupts restored beforehand.
*/
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
drm_crtc_vblank_reset(&crtc->base);
- if (crtc_state->base.active)
+ if (crtc_state->hw.active)
intel_crtc_vblank_on(crtc_state);
}
@@ -17619,7 +18358,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_sanitize_encoder(encoder);
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
}
@@ -17652,17 +18393,16 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
}
for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
u64 put_domains;
- crtc_state = to_intel_crtc_state(crtc->base.state);
put_domains = modeset_get_crtc_power_domains(crtc_state);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
- intel_fbc_init_pipe_state(dev_priv);
}
void intel_display_resume(struct drm_device *dev)
@@ -17738,6 +18478,13 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
*/
intel_hpd_poll_fini(i915);
+ /*
+ * MST topology needs to be suspended so we don't have any calls to
+ * fbdev after it's finalized. MST will be destroyed later as part of
+ * drm_mode_config_cleanup()
+ */
+ intel_dp_mst_suspend(i915);
+
/* poll work can call into fbdev, hence clean that up afterwards */
intel_fbdev_fini(i915);
@@ -17756,6 +18503,8 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
intel_gmbus_teardown(i915);
+ intel_bw_cleanup(i915);
+
destroy_workqueue(i915->flip_wq);
destroy_workqueue(i915->modeset_wq);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f417e0948001..028aab728514 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -332,8 +332,11 @@ enum phy_fia {
(__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
(__s)++)
-#define for_each_port_masked(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+#define for_each_port(__port) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for_each_port(__port) \
for_each_if((__ports_mask) & BIT(__port))
#define for_each_phy_masked(__phy, __phys_mask) \
@@ -377,6 +380,13 @@ enum phy_fia {
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_encoder_mask(dev, intel_encoder, encoder_mask) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head) \
+ for_each_if((encoder_mask) & \
+ drm_encoder_mask(&intel_encoder->base))
+
#define for_each_intel_dp(dev, intel_encoder) \
for_each_intel_encoder(dev, intel_encoder) \
for_each_if(intel_encoder_is_dp(intel_encoder))
@@ -446,16 +456,25 @@ enum phy_fia {
#define intel_atomic_crtc_state_for_each_plane_state( \
plane, plane_state, \
crtc_state) \
- for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \
- ((crtc_state)->base.plane_mask)) \
+ for_each_intel_plane_mask(((crtc_state)->uapi.state->dev), (plane), \
+ ((crtc_state)->uapi.plane_mask)) \
for_each_if ((plane_state = \
- to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base))))
+ to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->uapi.state, &plane->base))))
+
+#define for_each_new_intel_connector_in_state(__state, connector, new_connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.num_connector; \
+ (__i)++) \
+ for_each_if ((__state)->base.connectors[__i].ptr && \
+ ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
+ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n, bool fec_enable);
bool is_ccs_modifier(u64 modifier);
+int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
@@ -467,6 +486,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
+void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -499,11 +519,10 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe);
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
@@ -547,7 +566,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-bool intel_crtc_active(struct intel_crtc *crtc);
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
@@ -561,6 +579,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
@@ -582,6 +602,10 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv);
void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ uint64_t modifier);
+
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
@@ -603,9 +627,10 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state);
+#define assert_pipe_enabled(d, t) assert_pipe(d, t, true)
+#define assert_pipe_disabled(d, t) assert_pipe(d, t, false)
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 12ba74788cce..21561acfa3ac 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -418,7 +418,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- int wa_idx_max;
+
+ WARN_ON(!IS_ICELAKE(dev_priv));
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -430,14 +431,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_enable(dev_priv, power_well);
- /* Display WA #1178: icl, tgl */
- if (IS_TIGERLAKE(dev_priv))
- wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
- else
- wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
-
- if (!IS_ELKHARTLAKE(dev_priv) &&
- pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
+ /* Display WA #1178: icl */
+ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
!intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
@@ -454,10 +449,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- if (INTEL_GEN(dev_priv) < 12) {
- val = I915_READ(ICL_PORT_CL_DW12(phy));
- I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
- }
+ WARN_ON(!IS_ICELAKE(dev_priv));
+
+ val = I915_READ(ICL_PORT_CL_DW12(phy));
+ I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -519,7 +514,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
if (encoder->type == INTEL_OUTPUT_DP_MST)
continue;
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
if (WARN_ON(!dig_port))
continue;
@@ -1669,8 +1664,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
@@ -4928,6 +4923,56 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
intel_combo_phy_uninit(dev_priv);
}
+struct buddy_page_mask {
+ u32 page_mask;
+ u8 type;
+ u8 num_channels;
+};
+
+static const struct buddy_page_mask tgl_buddy_page_masks[] = {
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
+ {}
+};
+
+static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
+ {}
+};
+
+static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+{
+ enum intel_dram_type type = dev_priv->dram_info.type;
+ u8 num_channels = dev_priv->dram_info.num_channels;
+ const struct buddy_page_mask *table;
+ int i;
+
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ /* Wa_1409767108: tgl */
+ table = wa_1409767108_buddy_page_masks;
+ else
+ table = tgl_buddy_page_masks;
+
+ for (i = 0; table[i].page_mask != 0; i++)
+ if (table[i].num_channels == num_channels &&
+ table[i].type == type)
+ break;
+
+ if (table[i].page_mask == 0) {
+ DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
+ I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
+ I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+ } else {
+ I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
+ I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+ }
+}
+
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
@@ -4960,6 +5005,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 6. Setup MBUS. */
icl_mbus_init(dev_priv);
+ /* 7. Program arbiter BW_BUDDY registers */
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_bw_buddy_init(dev_priv);
+
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 1da04f3e0fb3..2608a65af7fa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -28,7 +28,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_D,
POWER_DOMAIN_TRANSCODER_EDP,
- /* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */
+ /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
POWER_DOMAIN_TRANSCODER_VDSC_PW2,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 1a7334dbe802..888ea8a170d1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -90,8 +90,8 @@ struct intel_framebuffer {
/* for each plane in the normal GTT view */
struct {
unsigned int x, y;
- } normal[2];
- /* for each plane in the rotated GTT view */
+ } normal[4];
+ /* for each plane in the rotated GTT view for no-CCS formats */
struct {
unsigned int x, y;
unsigned int pitch; /* pixels */
@@ -523,7 +523,24 @@ struct intel_atomic_state {
};
struct intel_plane_state {
- struct drm_plane_state base;
+ struct drm_plane_state uapi;
+
+ /*
+ * actual hardware state, the state we program to the hardware.
+ * The following members are used to verify the hardware state:
+ * During initial hw readout, they need to be copied from uapi.
+ */
+ struct {
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ u16 alpha;
+ uint16_t pixel_blend_mode;
+ unsigned int rotation;
+ enum drm_color_encoding color_encoding;
+ enum drm_color_range color_range;
+ } hw;
+
struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long flags;
@@ -538,7 +555,7 @@ struct intel_plane_state {
*/
u32 stride;
int x, y;
- } color_plane[2];
+ } color_plane[4];
/* plane control register */
u32 ctl;
@@ -546,6 +563,9 @@ struct intel_plane_state {
/* plane color control register */
u32 color_ctl;
+ /* chroma upsampler control register */
+ u32 cus_ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -757,7 +777,33 @@ enum intel_output_format {
};
struct intel_crtc_state {
- struct drm_crtc_state base;
+ /*
+ * uapi (drm) state. This is the software state shown to userspace.
+ * In particular, the following members are used for bookkeeping:
+ * - crtc
+ * - state
+ * - *_changed
+ * - event
+ * - commit
+ * - mode_blob
+ */
+ struct drm_crtc_state uapi;
+
+ /*
+ * actual hardware state, the state we program to the hardware.
+ * The following members are used to verify the hardware state:
+ * - enable
+ * - active
+ * - mode / adjusted_mode
+ * - color property blobs.
+ *
+ * During initial hw readout, they need to be copied to uapi.
+ */
+ struct {
+ bool active, enable;
+ struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
+ struct drm_display_mode mode, adjusted_mode;
+ } hw;
/**
* quirks - bitfield with hw state readout quirks
@@ -1008,6 +1054,9 @@ struct intel_crtc_state {
/* Bitmask to indicate slaves attached */
u8 sync_mode_slaves_mask;
+
+ /* Only valid on TGL+ */
+ enum transcoder mst_master_transcoder;
};
struct intel_crtc {
@@ -1080,9 +1129,6 @@ struct intel_plane {
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
- void (*update_slave)(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
@@ -1113,12 +1159,12 @@ struct cxsr_latency {
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
+#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
@@ -1392,9 +1438,9 @@ struct intel_load_detect_pipe {
};
static inline struct intel_encoder *
-intel_attached_encoder(struct drm_connector *connector)
+intel_attached_encoder(struct intel_connector *connector)
{
- return to_intel_connector(connector)->encoder;
+ return connector->encoder;
}
static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
@@ -1411,12 +1457,12 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
}
static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
+enc_to_dig_port(struct intel_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_encoder *intel_encoder = encoder;
if (intel_encoder_is_dig_port(intel_encoder))
- return container_of(encoder, struct intel_digital_port,
+ return container_of(&encoder->base, struct intel_digital_port,
base.base);
else
return NULL;
@@ -1425,16 +1471,17 @@ enc_to_dig_port(struct drm_encoder *encoder)
static inline struct intel_digital_port *
conn_to_dig_port(struct intel_connector *connector)
{
- return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+ return enc_to_dig_port(intel_attached_encoder(connector));
}
static inline struct intel_dp_mst_encoder *
-enc_to_mst(struct drm_encoder *encoder)
+enc_to_mst(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+ return container_of(&encoder->base, struct intel_dp_mst_encoder,
+ base.base);
}
-static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
}
@@ -1447,14 +1494,14 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
return true;
case INTEL_OUTPUT_DDI:
/* Skip pure HDMI/DVI DDI encoders */
- return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+ return i915_mmio_reg_valid(enc_to_intel_dp(encoder)->output_reg);
default:
return false;
}
}
static inline struct intel_lspcon *
-enc_to_intel_lspcon(struct drm_encoder *encoder)
+enc_to_intel_lspcon(struct intel_encoder *encoder)
{
return &enc_to_dig_port(encoder)->lspcon;
}
@@ -1528,6 +1575,24 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
&crtc->base));
}
+static inline struct intel_digital_connector_state *
+intel_atomic_get_new_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ return to_intel_digital_connector_state(
+ drm_atomic_get_new_connector_state(&state->base,
+ &connector->base));
+}
+
+static inline struct intel_digital_connector_state *
+intel_atomic_get_old_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ return to_intel_digital_connector_state(
+ drm_atomic_get_old_connector_state(&state->base,
+ &connector->base));
+}
+
/* intel_display.c */
static inline bool
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index b05b2191b919..c7424e2a04a3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -146,9 +146,9 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
{
- return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ return enc_to_intel_dp(intel_attached_encoder(connector));
}
static void intel_dp_link_down(struct intel_encoder *encoder,
@@ -614,7 +614,7 @@ static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
@@ -834,7 +834,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* Pick one that's not used by other ports.
*/
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@ -1031,7 +1031,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
*/
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
@@ -1814,7 +1814,7 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
char str[128]; /* FIXME: too big for stack? */
- if ((drm_debug & DRM_UT_KMS) == 0)
+ if (!drm_debug_enabled(DRM_UT_KMS))
return;
snprintf_int_array(str, sizeof(str),
@@ -1889,32 +1889,15 @@ static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
drm_dp_sink_supports_fec(intel_dp->fec_capable);
}
-static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!INTEL_INFO(dev_priv)->display.has_dsc)
- return false;
-
- /* On TGL, DSC is supported on all Pipes */
- if (INTEL_GEN(dev_priv) >= 12)
- return true;
-
- if (INTEL_GEN(dev_priv) >= 10 &&
- pipe_config->cpu_transcoder != TRANSCODER_A)
- return true;
-
- return false;
-}
-
static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
- if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
return false;
- return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+ return intel_dsc_source_support(encoder, crtc_state) &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
}
@@ -1999,7 +1982,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
const struct link_config_limits *limits)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int bpp, clock, lane_count;
int mode_rate, link_clock, link_avail;
@@ -2046,6 +2029,63 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
return 0;
}
+#define DSC_SUPPORTED_VERSION_MIN 1
+
+static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ u8 line_buf_depth;
+ int ret;
+
+ ret = intel_dsc_compute_params(encoder, crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8. Eventually add
+ * logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(DSC_SUPPORTED_VERSION_MIN,
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ return drm_dsc_compute_rc_parameters(vdsc_cfg);
+}
+
static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -2053,7 +2093,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u8 dsc_max_bpc;
int pipe_bpp;
int ret;
@@ -2132,7 +2172,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
}
}
- ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
+ ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
"Compressed BPP = %d\n",
@@ -2164,8 +2204,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
int common_len;
int ret;
@@ -2252,8 +2292,8 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
{
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ &crtc_state->hw.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int ret;
if (!drm_mode_is_420_only(info, adjusted_mode) ||
@@ -2281,7 +2321,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
/*
* Our YCbCr output is always limited range.
@@ -2308,17 +2348,28 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
}
}
+static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (IS_G4X(dev_priv))
+ return false;
+ if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
+ return false;
+
+ return true;
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
enum port port = encoder->port;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2341,7 +2392,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return ret;
pipe_config->has_drrs = false;
- if (IS_G4X(dev_priv) || port == PORT_A)
+ if (!intel_dp_port_has_audio(dev_priv, port))
pipe_config->has_audio = false;
else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio = intel_dp->has_audio;
@@ -2431,10 +2482,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
pipe_config->lane_count,
@@ -2458,7 +2509,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
*
* CPT PCH is quite different, having many bits moved
* to the TRANS_DP_CTL register instead. That
- * configuration happens (oddly) in ironlake_pch_enable
+ * configuration happens (oddly) in ilk_pch_enable
*/
/* Preserve the BIOS-computed detected bit. This is
@@ -2602,7 +2653,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
* is locked
*/
-static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
+static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 control;
@@ -2652,7 +2703,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
pp_stat_reg = _pp_stat_reg(intel_dp);
@@ -2717,7 +2768,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2813,7 +2864,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_power_cycle(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
if (IS_GEN(dev_priv, 5)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
@@ -2868,7 +2919,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
dig_port->base.base.base.id, dig_port->base.base.name);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
@@ -2917,7 +2968,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2929,7 +2980,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
if (!intel_dp_is_edp(intel_dp))
return;
@@ -2953,7 +3004,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2967,7 +3018,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
/* Disable backlight PP control and backlight PWM. */
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
if (!intel_dp_is_edp(intel_dp))
return;
@@ -2985,13 +3036,13 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
- struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
is_enabled = false;
with_pps_lock(intel_dp, wakeref)
- is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+ is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
if (is_enabled == enable)
return;
@@ -3028,13 +3079,13 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
+static void ilk_edp_pll_on(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
@@ -3068,13 +3119,13 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
udelay(200);
}
-static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
+static void ilk_edp_pll_off(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
@@ -3207,7 +3258,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -3228,10 +3279,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
enum port port = encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
if (encoder->type == INTEL_OUTPUT_EDP)
pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
@@ -3266,7 +3317,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -3283,7 +3334,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->port_clock = 270000;
}
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
@@ -3312,7 +3363,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp->link_trained = false;
@@ -3346,7 +3397,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
/*
@@ -3359,7 +3410,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_off(intel_dp, old_crtc_state);
+ ilk_edp_pll_off(intel_dp, old_crtc_state);
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
@@ -3497,8 +3548,8 @@ static void intel_enable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -3557,14 +3608,14 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
intel_dp_prepare(encoder, pipe_config);
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_on(intel_dp, pipe_config);
+ ilk_edp_pll_on(intel_dp, pipe_config);
}
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -3607,7 +3658,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->pps_mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
@@ -3630,8 +3681,8 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4152,8 +4203,8 @@ intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
u32 DP = intel_dp->DP;
@@ -4852,7 +4903,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
/* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
return DP_TEST_ACK;
}
@@ -4896,7 +4947,7 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
}
/* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
return test_result;
}
@@ -5045,7 +5096,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
@@ -5076,7 +5127,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (conn_state->commit &&
@@ -5482,10 +5533,10 @@ static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
}
-static bool icl_digital_port_connected(struct intel_encoder *encoder)
+static bool icp_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (intel_phy_is_combo(dev_priv, phy))
@@ -5520,9 +5571,9 @@ static bool __intel_digital_port_connected(struct intel_encoder *encoder)
return g4x_digital_port_connected(encoder);
}
- if (INTEL_GEN(dev_priv) >= 11)
- return icl_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ return icp_digital_port_connected(encoder);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
return spt_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
@@ -5600,7 +5651,7 @@ intel_dp_detect(struct drm_connector *connector,
bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
@@ -5704,7 +5755,7 @@ out:
static void
intel_dp_force(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
@@ -5739,7 +5790,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, fall back to fixed mode */
- if (intel_dp_is_edp(intel_attached_dp(connector)) &&
+ if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -5757,7 +5808,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
int ret;
ret = intel_connector_register(connector);
@@ -5779,7 +5830,7 @@ intel_dp_connector_register(struct drm_connector *connector)
static void
intel_dp_connector_unregister(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
drm_dp_cec_unregister_connector(&intel_dp->aux);
drm_dp_aux_unregister(&intel_dp->aux);
@@ -5788,7 +5839,7 @@ intel_dp_connector_unregister(struct drm_connector *connector)
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
@@ -5817,12 +5868,12 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
- kfree(enc_to_dig_port(encoder));
+ kfree(enc_to_dig_port(to_intel_encoder(encoder)));
}
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -5853,7 +5904,7 @@ static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
static const struct drm_dp_aux_msg msg = {
.request = DP_AUX_NATIVE_WRITE,
.address = DP_AUX_HDCP_AKSV,
@@ -6463,7 +6514,7 @@ static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
intel_wakeref_t wakeref;
@@ -6642,7 +6693,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
intel_pps_get_registers(intel_dp, &regs);
- pp_ctl = ironlake_get_pp_control(intel_dp);
+ pp_ctl = ilk_get_pp_control(intel_dp);
/* Ensure PPS is unlocked */
if (!HAS_DDI(dev_priv))
@@ -6812,7 +6863,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
* soon as the new power sequencer gets initialized.
*/
if (force_disable_vdd) {
- u32 pp = ironlake_get_pp_control(intel_dp);
+ u32 pp = ilk_get_pp_control(intel_dp);
WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
@@ -6909,7 +6960,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
int refresh_rate)
{
struct intel_dp *intel_dp = dev_priv->drrs.dp;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
@@ -6942,7 +6993,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- if (!crtc_state->base.active) {
+ if (!crtc_state->hw.active) {
DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
return;
}
@@ -7609,7 +7660,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (!intel_dp->can_mst)
continue;
@@ -7630,7 +7681,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (!intel_dp->can_mst)
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 020422da2ae2..7c653f8c307f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -57,7 +57,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
*/
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 read_val[2] = { 0x0 };
u16 level = 0;
@@ -82,7 +82,7 @@ static void
intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -110,7 +110,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
u8 pn, pn_min, pn_max;
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
@@ -222,13 +222,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
{
- set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false);
+ set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+ false);
}
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct intel_panel *panel = &connector->panel;
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
@@ -247,7 +248,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 03d1cba0b696..cba68c5a80fa 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -42,13 +42,13 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_atomic_state *state = crtc_state->base.state;
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
void *port = connector->port;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
@@ -61,10 +61,11 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
crtc_state->pipe_bpp = bpp;
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- crtc_state->pipe_bpp);
+ crtc_state->pipe_bpp,
+ false);
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- port, crtc_state->pbn);
+ port, crtc_state->pbn, 0);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -87,19 +88,65 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+/*
+ * Iterate over all connectors and return the smallest transcoder in the MST
+ * stream
+ */
+static enum transcoder
+intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
+ struct intel_dp *mst_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ enum pipe ret = I915_MAX_PIPES;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return INVALID_TRANSCODER;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_state->base.crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state->uapi.active)
+ continue;
+
+ /*
+ * Using crtc->pipe because crtc_state->cpu_transcoder is
+ * computed, so others CRTCs could have non-computed
+ * cpu_transcoder
+ */
+ if (crtc->pipe < ret)
+ ret = crtc->pipe;
+ }
+
+ if (ret == I915_MAX_PIPES)
+ return INVALID_TRANSCODER;
+
+ /* Simple cast works because TGL don't have a eDP transcoder */
+ return (enum transcoder)ret;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
void *port = connector->port;
struct link_config_limits limits;
int ret;
@@ -154,25 +201,91 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
+
+ return 0;
+}
+
+/*
+ * If one of the connectors in a MST stream needs a modeset, mark all CRTCs
+ * that shares the same MST stream as mode changed,
+ * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
+ * a fastset when possible.
+ */
+static int
+intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector_list_iter connector_list_iter;
+ struct intel_connector *connector_iter;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return 0;
+
+ if (!intel_connector_needs_modeset(state, &connector->base))
+ return 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
+ for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
+ struct intel_digital_connector_state *conn_iter_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ if (connector_iter->mst_port != connector->mst_port ||
+ connector_iter == connector)
+ continue;
+
+ conn_iter_state = intel_atomic_get_digital_connector_state(state,
+ connector_iter);
+ if (IS_ERR(conn_iter_state)) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return PTR_ERR(conn_iter_state);
+ }
+
+ if (!conn_iter_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_iter_state->base.crtc);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state)) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return PTR_ERR(crtc_state);
+ }
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return ret;
+ }
+ crtc_state->uapi.mode_changed = true;
+ }
+ drm_connector_list_iter_end(&connector_list_iter);
+
return 0;
}
static int
intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(state, connector);
+ drm_atomic_get_new_connector_state(&state->base, connector);
struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(state, connector);
+ drm_atomic_get_old_connector_state(&state->base, connector);
struct intel_connector *intel_connector =
to_intel_connector(connector);
struct drm_crtc *new_crtc = new_conn_state->crtc;
- struct drm_crtc_state *crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
- ret = intel_digital_connector_atomic_check(connector, state);
+ ret = intel_digital_connector_atomic_check(connector, &state->base);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
if (ret)
return ret;
@@ -183,16 +296,18 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
* connector
*/
if (new_crtc) {
- crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, intel_crtc);
if (!crtc_state ||
- !drm_atomic_crtc_needs_modeset(crtc_state) ||
- crtc_state->enable)
+ !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
+ crtc_state->uapi.enable)
return 0;
}
- mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
- ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
+ mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr;
+ ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr,
intel_connector->port);
return ret;
@@ -202,7 +317,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
@@ -226,36 +341,65 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ bool last_mst_stream;
+ u32 val;
+
+ intel_dp->active_mst_links--;
+ last_mst_stream = intel_dp->active_mst_links == 0;
+ WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+ !intel_dp_mst_is_master_trans(old_crtc_state));
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
- /* this can fail */
- drm_dp_check_act_status(&intel_dp->mst_mgr);
- /* and this can also fail */
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
+ val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
+
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_ACT_SENT, 1))
+ DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_scaler_disable(old_crtc_state);
+ else
+ ilk_pfit_disable(old_crtc_state);
+
/*
* Power down mst path before disabling the port, otherwise we end
* up getting interrupts from the sink upon detecting link loss.
*/
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
false);
+ /*
+ * From TGL spec: "If multi-stream slave transcoder: Configure
+ * Transcoder Clock Select to direct no clock to the transcoder"
+ *
+ * From older GENs spec: "Configure Transcoder Clock Select to direct
+ * no clock to the transcoder"
+ */
+ if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_dp->active_mst_links--;
intel_mst->connector = NULL;
- if (intel_dp->active_mst_links == 0) {
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ if (last_mst_stream)
intel_dig_port->base.post_disable(&intel_dig_port->base,
old_crtc_state, NULL);
- }
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
@@ -264,7 +408,7 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -273,25 +417,11 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
pipe_config, NULL);
}
-static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
-
- if (intel_dp->active_mst_links == 0)
- intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
- old_crtc_state,
- old_conn_state);
-}
-
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -299,21 +429,25 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
to_intel_connector(conn_state->connector);
int ret;
u32 temp;
+ bool first_mst_stream;
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
*/
connector->encoder = encoder;
intel_mst->connector = connector;
+ first_mst_stream = intel_dp->active_mst_links == 0;
+ WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+ !intel_dp_mst_is_master_trans(pipe_config));
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_dp->active_mst_links == 0)
+ if (first_mst_stream)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
- if (intel_dp->active_mst_links == 0)
+ if (first_mst_stream)
intel_dig_port->base.pre_enable(&intel_dig_port->base,
pipe_config, NULL);
@@ -330,7 +464,15 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
- intel_ddi_enable_pipe_clock(pipe_config);
+ /*
+ * Before Gen 12 this is not done as part of
+ * intel_dig_port->base.pre_enable() and should be done here. For
+ * Gen 12+ the step in which this should be done is different for the
+ * first MST stream, so it's done on the DDI for the first stream and
+ * here for the following ones.
+ */
+ if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
+ intel_ddi_enable_pipe_clock(pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
@@ -339,7 +481,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -360,7 +502,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
*pipe = intel_mst->pipe;
if (intel_mst->connector)
return true;
@@ -370,7 +512,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
intel_ddi_get_config(&intel_dig_port->base, pipe_config);
@@ -478,7 +620,7 @@ static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_fun
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
drm_encoder_cleanup(encoder);
kfree(intel_mst);
@@ -633,7 +775,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
- intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
@@ -703,3 +844,14 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
}
+
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
+}
+
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f660ad80db04..854724f68f09 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -6,10 +6,15 @@
#ifndef __INTEL_DP_MST_H__
#define __INTEL_DP_MST_H__
+#include <linux/types.h>
+
struct intel_digital_port;
+struct intel_crtc_state;
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 556d1b30f06a..6fb1f7a7364e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -642,7 +642,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
bool uniq_trans_scale)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
@@ -738,8 +738,8 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -781,9 +781,9 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
@@ -861,10 +861,10 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
int data, i, stagger;
@@ -940,7 +940,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (dport->release_cl2_override) {
@@ -953,7 +953,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
u32 val;
vlv_dpio_get(dev_priv);
@@ -989,7 +989,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
@@ -1014,9 +1014,9 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
@@ -1043,10 +1043,10 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -1073,9 +1073,9 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 3ce0a023eee0..c75e34d87111 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -136,7 +136,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
*/
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
@@ -163,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
*/
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
@@ -208,7 +208,7 @@ out:
*/
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
@@ -842,7 +842,7 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
static struct intel_shared_dpll *
hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
int clock = crtc_state->port_clock;
@@ -1751,7 +1751,7 @@ static bool
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct dpll best_clock;
/* Calculate HDMI div */
@@ -2274,7 +2274,7 @@ static bool
cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 afe_clock = crtc_state->port_clock * 5;
u32 ref_clock;
u32 dco_min = 7998000;
@@ -2553,7 +2553,7 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
dev_priv->cdclk.hw.ref == 24000 ?
icl_dp_combo_pll_24MHz_values :
@@ -2575,7 +2575,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) >= 12) {
switch (dev_priv->cdclk.hw.ref) {
@@ -2612,7 +2612,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
struct intel_dpll_hw_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
@@ -2744,7 +2744,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int refclk_khz = dev_priv->cdclk.hw.ref;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2972,8 +2972,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
- enc_to_mst(&encoder->base)->primary :
- enc_to_dig_port(&encoder->base);
+ enc_to_mst(encoder)->primary :
+ enc_to_dig_port(encoder);
if (primary_port &&
(primary_port->tc_mode == TC_PORT_DP_ALT ||
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index bb5a0e91b370..ada006a690df 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -102,43 +102,50 @@ intel_dsb_get(struct intel_crtc *crtc)
struct intel_dsb *dsb = &crtc->dsb;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ u32 *buf;
intel_wakeref_t wakeref;
if (!HAS_DSB(i915))
return dsb;
- if (atomic_add_return(1, &dsb->refcount) != 1)
+ if (dsb->refcount++ != 0)
return dsb;
- dsb->id = DSB1;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Gem object creation failed\n");
- goto err;
+ goto out;
}
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
DRM_ERROR("Vma creation failed\n");
i915_gem_object_put(obj);
- atomic_dec(&dsb->refcount);
- goto err;
+ goto out;
}
- dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
- if (IS_ERR(dsb->cmd_buf)) {
+ buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
DRM_ERROR("Command buffer creation failed\n");
- i915_vma_unpin_and_release(&vma, 0);
- dsb->cmd_buf = NULL;
- atomic_dec(&dsb->refcount);
- goto err;
+ goto out;
}
+
+ dsb->id = DSB1;
dsb->vma = vma;
+ dsb->cmd_buf = buf;
+
+out:
+ /*
+ * On error dsb->cmd_buf will continue to be NULL, making the writes
+ * pass-through. Leave the dangling ref to be removed later by the
+ * corresponding intel_dsb_put(): the important error message will
+ * already be logged above.
+ */
-err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
return dsb;
}
@@ -158,10 +165,10 @@ void intel_dsb_put(struct intel_dsb *dsb)
if (!HAS_DSB(i915))
return;
- if (WARN_ON(atomic_read(&dsb->refcount) == 0))
+ if (WARN_ON(dsb->refcount == 0))
return;
- if (atomic_dec_and_test(&dsb->refcount)) {
+ if (--dsb->refcount == 0) {
i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
dsb->cmd_buf = NULL;
dsb->free_pos = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 6f95c8e909e6..395ef9ce558e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -22,7 +22,7 @@ enum dsb_id {
};
struct intel_dsb {
- atomic_t refcount;
+ long refcount;
enum dsb_id id;
u32 *cmd_buf;
struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index b15be5814599..19f78a4022d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -45,8 +45,9 @@ struct intel_dsi {
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
intel_wakeref_t io_wakeref[I915_MAX_PORTS];
- /* GPIO Desc for CRC based Panel control */
+ /* GPIO Desc for panel and backlight control */
struct gpio_desc *gpio_panel;
+ struct gpio_desc *gpio_backlight;
struct intel_connector *attached_connector;
@@ -68,6 +69,9 @@ struct intel_dsi {
/* number of DSI lanes */
unsigned int lane_count;
+ /* i2c bus associated with the slave device */
+ int i2c_bus_num;
+
/*
* video mode pixel format
*
@@ -141,9 +145,9 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
#define for_each_dsi_phy(__phy, __phys_mask) \
for_each_phy_masked(__phy, __phys_mask)
-static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+static inline struct intel_dsi *enc_to_intel_dsi(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_dsi, base.base);
+ return container_of(&encoder->base, struct intel_dsi, base.base);
}
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
@@ -158,7 +162,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
{
- return enc_to_intel_dsi(&encoder->base)->ports;
+ return enc_to_intel_dsi(encoder)->ports;
}
/* icl_dsi.c */
@@ -203,6 +207,8 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index bb3fd8b786a2..c87838843d0b 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -46,7 +46,7 @@
static u32 dcs_get_backlight(struct intel_connector *connector)
{
struct intel_encoder *encoder = connector->encoder;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi_device;
u8 data = 0;
enum port port;
@@ -64,7 +64,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
u8 data = level;
enum port port;
@@ -79,7 +79,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
enum port port;
@@ -113,7 +113,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index f90946c912ee..89fb0d90b694 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -25,7 +25,10 @@
*/
#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
#include <asm/intel-mid.h>
@@ -83,6 +86,12 @@ static struct gpio_map vlv_gpio_table[] = {
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
+struct i2c_adapter_lookup {
+ u16 slave_addr;
+ struct intel_dsi *intel_dsi;
+ acpi_handle dev_handle;
+};
+
#define CHV_GPIO_IDX_START_N 0
#define CHV_GPIO_IDX_START_E 73
#define CHV_GPIO_IDX_START_SW 100
@@ -375,11 +384,98 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
+static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
+{
+ struct i2c_adapter_lookup *lookup = data;
+ struct intel_dsi *intel_dsi = lookup->intel_dsi;
+ struct acpi_resource_i2c_serialbus *sb;
+ struct i2c_adapter *adapter;
+ acpi_handle adapter_handle;
+ acpi_status status;
+
+ if (intel_dsi->i2c_bus_num >= 0 ||
+ !i2c_acpi_get_i2c_resource(ares, &sb))
+ return 1;
+
+ if (lookup->slave_addr != sb->slave_address)
+ return 1;
+
+ status = acpi_get_handle(lookup->dev_handle,
+ sb->resource_source.string_ptr,
+ &adapter_handle);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
+ if (adapter)
+ intel_dsi->i2c_bus_num = adapter->nr;
+
+ return 1;
+}
+
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- DRM_DEBUG_KMS("Skipping I2C element execution\n");
+ struct drm_device *drm_dev = intel_dsi->base.base.dev;
+ struct device *dev = &drm_dev->pdev->dev;
+ struct i2c_adapter *adapter;
+ struct acpi_device *acpi_dev;
+ struct list_head resource_list;
+ struct i2c_adapter_lookup lookup;
+ struct i2c_msg msg;
+ int ret;
+ u8 vbt_i2c_bus_num = *(data + 2);
+ u16 slave_addr = *(u16 *)(data + 3);
+ u8 reg_offset = *(data + 5);
+ u8 payload_size = *(data + 6);
+ u8 *payload_data;
+
+ if (intel_dsi->i2c_bus_num < 0) {
+ intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (acpi_dev) {
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.slave_addr = slave_addr;
+ lookup.intel_dsi = intel_dsi;
+ lookup.dev_handle = acpi_device_handle(acpi_dev);
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(acpi_dev, &resource_list,
+ i2c_adapter_lookup,
+ &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+ }
+ }
- return data + *(data + 6) + 7;
+ adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
+ if (!adapter) {
+ DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
+ goto err_bus;
+ }
+
+ payload_data = kzalloc(payload_size + 1, GFP_KERNEL);
+ if (!payload_data)
+ goto err_alloc;
+
+ payload_data[0] = reg_offset;
+ memcpy(&payload_data[1], (data + 7), payload_size);
+
+ msg.addr = slave_addr;
+ msg.flags = 0;
+ msg.len = payload_size + 1;
+ msg.buf = payload_data;
+
+ ret = i2c_transfer(adapter, &msg, 1);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev,
+ "Failed to xfer payload of size (%u) to reg (%u)\n",
+ payload_size, reg_offset);
+
+ kfree(payload_data);
+err_alloc:
+ i2c_put_adapter(adapter);
+err_bus:
+ return data + payload_size + 7;
}
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
@@ -453,8 +549,8 @@ static const char *sequence_name(enum mipi_seq seq_id)
return "(unknown)";
}
-void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
- enum mipi_seq seq_id)
+static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
const u8 *data;
@@ -519,6 +615,22 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
}
}
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
+{
+ if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+ if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight)
+ gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1);
+
+ intel_dsi_vbt_exec(intel_dsi, seq_id);
+
+ if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+ if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight)
+ gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
+}
+
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
@@ -664,6 +776,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
+ intel_dsi->i2c_bus_num = -1;
+
/* a regular driver would get the device in probe */
for_each_dsi_port(port, intel_dsi->ports) {
mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
@@ -671,3 +785,110 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
return true;
}
+
+/*
+ * On some BYT/CHT devs some sequences are incomplete and we need to manually
+ * control some GPIOs. We need to add a GPIO lookup table before we get these.
+ * If the GOP did not initialize the panel (HDMI inserted) we may need to also
+ * change the pinmux for the SoC's PWM0 pin from GPIO to PWM.
+ */
+static struct gpiod_lookup_table pmic_panel_gpio_table = {
+ /* Intel GFX is consumer */
+ .dev_id = "0000:00:02.0",
+ .table = {
+ /* Panel EN/DISABLE */
+ GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table soc_panel_gpio_table = {
+ .dev_id = "0000:00:02.0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
+ PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00",
+ "pwm0_grp", "pwm"),
+};
+
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ bool want_backlight_gpio = false;
+ bool want_panel_gpio = false;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ mipi_config->pwm_blc == PPS_BLC_PMIC) {
+ gpiod_add_lookup_table(&pmic_panel_gpio_table);
+ want_panel_gpio = true;
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ gpiod_add_lookup_table(&soc_panel_gpio_table);
+ want_panel_gpio = true;
+ want_backlight_gpio = true;
+
+ /* Ensure PWM0 pin is muxed as PWM instead of GPIO */
+ ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
+ ARRAY_SIZE(soc_pwm_pinctrl_map));
+ if (ret)
+ DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ if (IS_ERR(pinctrl))
+ DRM_ERROR("Failed to set pinmux to PWM\n");
+ }
+
+ if (want_panel_gpio) {
+ intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
+ if (IS_ERR(intel_dsi->gpio_panel)) {
+ DRM_ERROR("Failed to own gpio for panel control\n");
+ intel_dsi->gpio_panel = NULL;
+ }
+ }
+
+ if (want_backlight_gpio) {
+ intel_dsi->gpio_backlight =
+ gpiod_get(dev->dev, "backlight", flags);
+ if (IS_ERR(intel_dsi->gpio_backlight)) {
+ DRM_ERROR("Failed to own gpio for backlight control\n");
+ intel_dsi->gpio_backlight = NULL;
+ }
+ }
+}
+
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+
+ if (intel_dsi->gpio_panel) {
+ gpiod_put(intel_dsi->gpio_panel);
+ intel_dsi->gpio_panel = NULL;
+ }
+
+ if (intel_dsi->gpio_backlight) {
+ gpiod_put(intel_dsi->gpio_backlight);
+ intel_dsi->gpio_backlight = NULL;
+ }
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ mipi_config->pwm_blc == PPS_BLC_PMIC)
+ gpiod_remove_lookup_table(&pmic_panel_gpio_table);
+
+ if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
+ gpiod_remove_lookup_table(&soc_panel_gpio_table);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index bcfbcb743e7d..86a337c9d85d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -125,7 +125,7 @@ static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
return container_of(encoder, struct intel_dvo, base);
}
-static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
{
return enc_to_dvo(intel_attached_encoder(connector));
}
@@ -134,7 +134,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -178,9 +178,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder,
@@ -207,8 +207,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &pipe_config->base.mode,
- &pipe_config->base.adjusted_mode);
+ &pipe_config->hw.mode,
+ &pipe_config->hw.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
@@ -220,7 +220,7 @@ static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -253,7 +253,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
const struct drm_display_mode *fixed_mode =
intel_dvo->attached_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
/*
* If we have timings from the BIOS for the panel, put them in
@@ -277,8 +277,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum pipe pipe = crtc->pipe;
u32 dvo_val;
@@ -311,7 +311,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 20616639b8ab..a1048ece541e 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -50,11 +50,6 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
return HAS_FBC(dev_priv);
}
-static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
-{
- return INTEL_GEN(dev_priv) <= 3;
-}
-
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -73,7 +68,7 @@ static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
-static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
+static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache,
int *width, int *height)
{
if (width)
@@ -83,7 +78,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
}
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
- struct intel_fbc_state_cache *cache)
+ const struct intel_fbc_state_cache *cache)
{
int lines;
@@ -143,8 +138,10 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
u32 fbc_ctl2;
/* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM;
fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
+ if (params->fence_id >= 0)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
}
@@ -156,7 +153,8 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= params->vma->fence->id;
+ if (params->fence_id >= 0)
+ fbc_ctl |= params->fence_id;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
@@ -176,8 +174,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- if (params->flags & PLANE_HAS_FENCE) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
+ if (params->fence_id >= 0) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -234,14 +232,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->flags & PLANE_HAS_FENCE) {
+ if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN(dev_priv, 5))
- dpfc_ctl |= params->vma->fence->id;
+ dpfc_ctl |= params->fence_id;
if (IS_GEN(dev_priv, 6)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
- params->vma->fence->id);
+ params->fence_id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
}
@@ -253,8 +251,6 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
- I915_WRITE(ILK_FBC_RT_BASE,
- i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -285,13 +281,12 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
u32 val = I915_READ(CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
- if (i915_gem_object_get_tiling(params->vma->obj) !=
- I915_TILING_X)
+ if (params->gen9_wa_cfb_stride)
val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
I915_WRITE(CHICKEN_MISC_4, val);
@@ -317,11 +312,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->flags & PLANE_HAS_FENCE) {
+ if (params->fence_id >= 0) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
- params->vma->fence->id);
+ params->fence_id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
} else {
I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -367,6 +362,7 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
struct intel_fbc *fbc = &dev_priv->fbc;
fbc->active = true;
+ fbc->activated = true;
if (INTEL_GEN(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
@@ -419,29 +415,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
fbc->no_fbc_reason = reason;
}
-static bool multiple_pipes_ok(struct intel_crtc *crtc,
- struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- enum pipe pipe = crtc->pipe;
-
- /* Don't even bother tracking anything we don't need. */
- if (!no_fbc_on_multiple_pipes(dev_priv))
- return true;
-
- if (plane_state->base.visible)
- fbc->visible_pipes_mask |= (1 << pipe);
- else
- fbc->visible_pipes_mask &= ~(1 << pipe);
-
- return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
-}
-
static int find_compression_threshold(struct drm_i915_private *dev_priv,
struct drm_mm_node *node,
- int size,
- int fb_cpp)
+ unsigned int size,
+ unsigned int fb_cpp)
{
int compression_threshold = 1;
int ret;
@@ -487,18 +464,15 @@ again:
}
}
-static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
+ unsigned int size, unsigned int fb_cpp)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
- int size, fb_cpp, ret;
+ int ret;
WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
- size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
- fb_cpp = fbc->state_cache.fb.format->cpp[0];
-
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
if (!ret)
@@ -656,46 +630,55 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
}
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
- cache->vma = NULL;
- cache->flags = 0;
+ cache->plane.visible = plane_state->uapi.visible;
+ if (!cache->plane.visible)
+ return;
- cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
+ cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
- cache->plane.rotation = plane_state->base.rotation;
+ cache->plane.rotation = plane_state->hw.rotation;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
- cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
- cache->plane.visible = plane_state->base.visible;
+ cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
cache->plane.adjusted_x = plane_state->color_plane[0].x;
cache->plane.adjusted_y = plane_state->color_plane[0].y;
- cache->plane.y = plane_state->base.src.y1 >> 16;
+ cache->plane.y = plane_state->uapi.src.y1 >> 16;
- cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
-
- if (!cache->plane.visible)
- return;
+ cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
- cache->vma = plane_state->vma;
- cache->flags = plane_state->flags;
- if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
- cache->flags &= ~PLANE_HAS_FENCE;
+ WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
+ !plane_state->vma->fence);
+
+ if (plane_state->flags & PLANE_HAS_FENCE &&
+ plane_state->vma->fence)
+ cache->fence_id = plane_state->vma->fence->id;
+ else
+ cache->fence_id = -1;
+}
+
+static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+ fbc->compressed_fb.size * fbc->threshold;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -704,6 +687,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ if (!cache->plane.visible) {
+ fbc->no_fbc_reason = "primary plane not visible";
+ return false;
+ }
+
/* We don't need to use a state cache here since this information is
* global for all CRTC.
*/
@@ -712,11 +700,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!cache->vma) {
- fbc->no_fbc_reason = "primary plane not visible";
- return false;
- }
-
if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
fbc->no_fbc_reason = "incompatible mode";
return false;
@@ -740,7 +723,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* For now this will effecively disable FBC with 90/270 degree
* rotation.
*/
- if (!(cache->flags & PLANE_HAS_FENCE)) {
+ if (cache->fence_id < 0) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -783,8 +766,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* we didn't get any invalidate/deactivate calls, but this would require
* a lot of tracking just for a specific case. If we conclude it's an
* important case, we can implement it later. */
- if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
- fbc->compressed_fb.size * fbc->threshold) {
+ if (intel_fbc_cfb_size_changed(dev_priv)) {
fbc->no_fbc_reason = "CFB requirements changed";
return false;
}
@@ -794,7 +776,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (IS_GEN_RANGE(dev_priv, 9, 10) &&
+ if (INTEL_GEN(dev_priv) >= 9 &&
(fbc->state_cache.plane.adjusted_y & 3)) {
fbc->no_fbc_reason = "plane Y offset is misaligned";
return false;
@@ -837,8 +819,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
* zero. */
memset(params, 0, sizeof(*params));
- params->vma = cache->vma;
- params->flags = cache->flags;
+ params->fence_id = cache->fence_id;
params->crtc.pipe = crtc->pipe;
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
@@ -849,39 +830,88 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
- params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
- 32 * fbc->threshold) * 8;
+ params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride;
+
+ params->plane_visible = cache->plane.visible;
+}
+
+static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_fbc *fbc = &dev_priv->fbc;
+ const struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ const struct intel_fbc_reg_params *params = &fbc->params;
+
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ return false;
+
+ if (!params->plane_visible)
+ return false;
+
+ if (!intel_fbc_can_activate(crtc))
+ return false;
+
+ if (params->fb.format != cache->fb.format)
+ return false;
+
+ if (params->fb.stride != cache->fb.stride)
+ return false;
+
+ if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache))
+ return false;
+
+ if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride)
+ return false;
+
+ return true;
}
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+bool intel_fbc_pre_update(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
const char *reason = "update pending";
+ bool need_vblank_wait = false;
if (!fbc_supported(dev_priv))
- return;
+ return need_vblank_wait;
mutex_lock(&fbc->lock);
- if (!multiple_pipes_ok(crtc, plane_state)) {
- reason = "more than one pipe active";
- goto deactivate;
- }
-
- if (!fbc->enabled || fbc->crtc != crtc)
+ if (fbc->crtc != crtc)
goto unlock;
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
fbc->flip_pending = true;
-deactivate:
- intel_fbc_deactivate(dev_priv, reason);
+ if (!intel_fbc_can_flip_nuke(crtc_state)) {
+ intel_fbc_deactivate(dev_priv, reason);
+
+ /*
+ * Display WA #1198: glk+
+ * Need an extra vblank wait between FBC disable and most plane
+ * updates. Bspec says this is only needed for plane disable, but
+ * that is not true. Touching most plane registers will cause the
+ * corruption to appear. Also SKL/derivatives do not seem to be
+ * affected.
+ *
+ * TODO: could optimize this a bit by sampling the frame
+ * counter when we disable FBC (if it was already done earlier)
+ * and skipping the extra vblank wait before the plane update
+ * if at least one frame has already passed.
+ */
+ if (fbc->activated &&
+ (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+ need_vblank_wait = true;
+ fbc->activated = false;
+ }
unlock:
mutex_unlock(&fbc->lock);
+
+ return need_vblank_wait;
}
/**
@@ -897,14 +927,13 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
struct intel_crtc *crtc = fbc->crtc;
WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!fbc->enabled);
+ WARN_ON(!fbc->crtc);
WARN_ON(fbc->active);
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
__intel_fbc_cleanup_cfb(dev_priv);
- fbc->enabled = false;
fbc->crtc = NULL;
}
@@ -915,11 +944,10 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
WARN_ON(!mutex_is_locked(&fbc->lock));
- if (!fbc->enabled || fbc->crtc != crtc)
+ if (fbc->crtc != crtc)
return;
fbc->flip_pending = false;
- WARN_ON(fbc->active);
if (!i915_modparams.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
@@ -933,10 +961,9 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
if (!intel_fbc_can_activate(crtc))
return;
- if (!fbc->busy_bits) {
- intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+ if (!fbc->busy_bits)
intel_fbc_hw_activate(dev_priv);
- } else
+ else
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
@@ -955,7 +982,7 @@ void intel_fbc_post_update(struct intel_crtc *crtc)
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
- if (fbc->enabled)
+ if (fbc->crtc)
return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
else
return fbc->possible_framebuffer_bits;
@@ -977,7 +1004,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
- if (fbc->enabled && fbc->busy_bits)
+ if (fbc->crtc && fbc->busy_bits)
intel_fbc_deactivate(dev_priv, "frontbuffer write");
mutex_unlock(&fbc->lock);
@@ -998,7 +1025,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
goto out;
- if (!fbc->busy_bits && fbc->enabled &&
+ if (!fbc->busy_bits && fbc->crtc &&
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
intel_fbc_recompress(dev_priv);
@@ -1047,12 +1074,12 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
* to pipe or plane A. */
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
if (!plane->has_fbc)
continue;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
continue;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -1081,42 +1108,53 @@ out:
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
if (!fbc_supported(dev_priv))
return;
mutex_lock(&fbc->lock);
- if (fbc->enabled) {
- WARN_ON(fbc->crtc == NULL);
- if (fbc->crtc == crtc) {
- WARN_ON(!crtc_state->enable_fbc);
- WARN_ON(fbc->active);
- }
- goto out;
- }
+ if (fbc->crtc) {
+ if (fbc->crtc != crtc ||
+ !intel_fbc_cfb_size_changed(dev_priv))
+ goto out;
- if (!crtc_state->enable_fbc)
- goto out;
+ __intel_fbc_disable(dev_priv);
+ }
WARN_ON(fbc->active);
- WARN_ON(fbc->crtc != NULL);
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
- if (intel_fbc_alloc_cfb(crtc)) {
+
+ /* FIXME crtc_state->enable_fbc lies :( */
+ if (!cache->plane.visible)
+ goto out;
+
+ if (intel_fbc_alloc_cfb(dev_priv,
+ intel_fbc_calculate_cfb_size(dev_priv, cache),
+ fb->format->cpp[0])) {
+ cache->plane.visible = false;
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
+ if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
+ fb->modifier != I915_FORMAT_MOD_X_TILED)
+ cache->gen9_wa_cfb_stride =
+ DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
+ else
+ cache->gen9_wa_cfb_stride = 0;
+
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
- fbc->enabled = true;
fbc->crtc = crtc;
out:
mutex_unlock(&fbc->lock);
@@ -1156,7 +1194,7 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
return;
mutex_lock(&fbc->lock);
- if (fbc->enabled) {
+ if (fbc->crtc) {
WARN_ON(fbc->crtc->active);
__intel_fbc_disable(dev_priv);
}
@@ -1172,7 +1210,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
mutex_lock(&fbc->lock);
/* Maybe we were scheduled twice. */
- if (fbc->underrun_detected || !fbc->enabled)
+ if (fbc->underrun_detected || !fbc->crtc)
goto out;
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
@@ -1244,28 +1282,6 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
schedule_work(&fbc->underrun_work);
}
-/**
- * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
- * @dev_priv: i915 device instance
- *
- * The FBC code needs to track CRTC visibility since the older platforms can't
- * have FBC enabled while multiple pipes are used. This function does the
- * initial setup at driver load to make sure FBC is matching the real hardware.
- */
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
-
- /* Don't even bother tracking anything if we don't need. */
- if (!no_fbc_on_multiple_pipes(dev_priv))
- return;
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- if (intel_crtc_active(crtc) &&
- crtc->base.primary->state->visible)
- dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
-}
-
/*
* The DDX driver changes its behavior depending on the value it reads from
* i915.enable_fbc, so sanitize it by translating the default value into either
@@ -1283,10 +1299,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- return 0;
-
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
@@ -1317,7 +1329,6 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
- fbc->enabled = false;
fbc->active = false;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 50272eda8d43..c8a5e5098687 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,15 +19,14 @@ struct intel_plane_state;
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
+bool intel_fbc_pre_update(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void intel_fbc_post_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 48c960ca12fb..1e98e432c9fa 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -100,7 +100,7 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
return ret;
}
-static struct fb_ops intelfb_ops = {
+static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_set_par = intel_fbdev_set_par,
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index ab61f88d1d33..6c83b350525d 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -126,8 +126,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+static void ilk_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 bit = (pipe == PIPE_A) ?
@@ -139,7 +139,7 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
ilk_disable_display_irq(dev_priv, bit);
}
-static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
+static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -157,9 +157,9 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
}
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable,
+ bool old)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
@@ -180,8 +180,8 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -264,11 +264,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
if (HAS_GMCH(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
- ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+ ilk_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN(dev_priv, 7))
- ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (INTEL_GEN(dev_priv) >= 8)
- broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+ bdw_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
}
@@ -427,7 +427,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
if (HAS_GMCH(dev_priv))
i9xx_check_fifo_underruns(crtc);
else if (IS_GEN(dev_priv, 7))
- ivybridge_check_fifo_underruns(crtc);
+ ivb_check_fifo_underruns(crtc);
}
spin_unlock_irq(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index a448815d8fc2..0fdbd39f6641 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1870,7 +1870,7 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
return false;
return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv));
+ IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
}
void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index f56fffc474fa..93ac0f296852 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -85,16 +85,17 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
"HDMI transcoder function enabled, expecting disabled\n");
}
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
- container_of(encoder, struct intel_digital_port, base.base);
+ container_of(&encoder->base, struct intel_digital_port,
+ base.base);
return &intel_dig_port->hdmi;
}
-static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
{
- return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+ return enc_to_intel_hdmi(intel_attached_encoder(connector));
}
static u32 g4x_infoframe_index(unsigned int type)
@@ -285,7 +286,7 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -321,7 +322,7 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -340,7 +341,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg);
@@ -362,7 +363,7 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -401,7 +402,7 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -420,7 +421,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -438,7 +439,7 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -474,7 +475,7 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -493,7 +494,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -602,7 +603,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val, ret = 0;
int i;
@@ -646,7 +647,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
const union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -675,7 +676,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
int ret;
@@ -708,7 +709,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
{
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
int ret;
@@ -804,7 +805,7 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
- &crtc_state->base.adjusted_mode);
+ &crtc_state->hw.adjusted_mode);
if (WARN_ON(ret))
return false;
@@ -855,7 +856,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
@@ -965,7 +966,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
if ((crtc_state->infoframes.enable &
@@ -990,7 +991,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
if ((crtc_state->infoframes.enable &
@@ -1027,7 +1028,7 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
/* Enable default_phase whenever the display mode is suitably aligned */
if (gcp_default_phase_possible(crtc_state->pipe_bpp,
- &crtc_state->base.adjusted_mode))
+ &crtc_state->hw.adjusted_mode))
crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
}
@@ -1037,8 +1038,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -1096,8 +1097,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -1145,8 +1146,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1736,9 +1737,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 hdmi_val;
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
@@ -1774,7 +1775,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -1793,7 +1794,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@@ -1829,7 +1830,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
dotclock = pipe_config->port_clock * 2 / 3;
@@ -1839,7 +1840,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
pipe_config->lane_count = 4;
@@ -1860,7 +1861,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
WARN_ON(!pipe_config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
@@ -1874,7 +1875,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1896,7 +1897,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1946,8 +1947,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
enum pipe pipe = crtc->pipe;
u32 temp;
@@ -2007,10 +2008,10 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -2160,7 +2161,7 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
@@ -2210,12 +2211,12 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
int bpc)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
- struct drm_atomic_state *state = crtc_state->base.state;
+ to_i915(crtc_state->uapi.crtc->dev);
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_connector_state *connector_state;
struct drm_connector *connector;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int i;
if (HAS_GMCH(dev_priv))
@@ -2240,7 +2241,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
for_each_new_connector_in_state(state, connector, connector_state, i) {
const struct drm_display_info *info = &connector->display_info;
- if (connector_state->crtc != crtc_state->base.crtc)
+ if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
@@ -2281,7 +2282,7 @@ static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *config)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
if (!connector->ycbcr_420_allowed) {
DRM_ERROR("Platform doesn't support YCBCR420 output\n");
@@ -2316,7 +2317,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int clock, bool force_dvi)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int bpc;
for (bpc = 12; bpc >= 10; bpc -= 2) {
@@ -2334,9 +2335,9 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
bool force_dvi)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int bpc, clock = adjusted_mode->crtc_clock;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
@@ -2378,7 +2379,7 @@ static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_s
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
/*
* Our YCbCr output is always limited range.
@@ -2404,9 +2405,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
@@ -2451,8 +2452,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- /* Set user selected PAR to incoming mode's member */
- adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (conn_state->picture_aspect_ratio)
+ adjusted_mode->picture_aspect_ratio =
+ conn_state->picture_aspect_ratio;
pipe_config->lane_count = 4;
@@ -2495,7 +2497,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
static void
intel_hdmi_unset_edid(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
@@ -2511,7 +2513,7 @@ static void
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -2558,7 +2560,7 @@ static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_wakeref_t wakeref;
struct edid *edid;
bool connected = false;
@@ -2599,7 +2601,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status = connector_status_disconnected;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
@@ -2662,7 +2664,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
intel_hdmi_prepare(encoder, pipe_config);
@@ -2675,7 +2677,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -2745,7 +2747,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2771,7 +2773,7 @@ static struct i2c_adapter *
intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
}
@@ -2815,7 +2817,7 @@ intel_hdmi_connector_register(struct drm_connector *connector)
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
+ struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
cec_notifier_conn_unregister(n);
@@ -2872,7 +2874,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
- connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
drm_object_attach_property(&connector->base,
@@ -2906,7 +2907,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool scrambling)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_scrambling *sink_scrambling =
&connector->display_info.hdmi.scdc.scrambling;
struct i2c_adapter *adapter =
@@ -3131,20 +3132,29 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i2c_adapter *ddc;
enum port port = intel_encoder->port;
struct cec_connector_info conn_info;
DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
+ if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A))
+ return;
+
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
intel_dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
return;
- drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+ ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ drm_connector_init_with_ddc(dev, connector,
+ &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ ddc);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
connector->interlace_allowed = 1;
@@ -3154,10 +3164,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
connector->ycbcr_420_allowed = true;
- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
-
- if (WARN_ON(port == PORT_A))
- return;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
if (HAS_DDI(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index cf1ea5427639..d3659d0b408b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -29,7 +29,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index fc29046d48ea..99d3a3c7989e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -302,7 +302,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
{
return intel_encoder_is_dig_port(encoder) &&
- enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+ enc_to_dig_port(encoder)->hpd_pulse != NULL;
}
static void i915_digport_work_func(struct work_struct *work)
@@ -335,7 +335,7 @@ static void i915_digport_work_func(struct work_struct *work)
if (!long_hpd && !short_hpd)
continue;
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
ret = dig_port->hpd_pulse(dig_port, long_hpd);
if (ret == IRQ_NONE) {
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f8f1308643a9..d807c5648c87 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -189,7 +189,7 @@ void lspcon_ycbcr420_config(struct drm_connector *connector,
{
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (drm_mode_is_420_only(info, adjusted_mode) &&
connector->ycbcr_420_allowed) {
@@ -434,8 +434,8 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
const void *frame, ssize_t len)
{
bool ret;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
/* LSPCON only needs AVI IF */
if (type != HDMI_INFOFRAME_TYPE_AVI)
@@ -472,10 +472,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
ssize_t ret;
union hdmi_infoframe frame;
u8 buf[VIDEO_DIP_DATA_SIZE];
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_lspcon *lspcon = &dig_port->lspcon;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (!lspcon->active) {
DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
@@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
/* FIXME actually read this from the hw */
- return enc_to_intel_lspcon(&encoder->base)->active;
+ return enc_to_intel_lspcon(encoder)->active;
}
void lspcon_resume(struct intel_lspcon *lspcon)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index b1bc78623647..10696bb99dcf 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -135,7 +135,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if (INTEL_GEN(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
@@ -148,7 +148,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
@@ -230,8 +230,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
@@ -392,8 +392,8 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
lvds_encoder->attached_connector;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
unsigned int lvds_bpp;
/* Should never happen!! */
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 969ade623691..e59b4992ba1b 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -941,6 +941,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ /*
+ * Indicate we handle monitor hotplug events ourselves so we do
+ * not need ACPI notifications for them. Disabling these avoids
+ * triggering the AML code doing the notifation, which may be
+ * broken as Windows also seems to disable these.
+ */
+ opregion->acpi->chpd = 1;
}
if (mboxes & MBOX_SWSCI) {
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 8a98a1aa7adc..e40c3a0e2cd7 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -677,8 +677,8 @@ static void update_colorkey(struct intel_overlay *overlay,
if (overlay->color_key_enabled)
flags |= DST_KEY_ENABLE;
- if (state->base.visible)
- format = state->base.fb->format->format;
+ if (state->uapi.visible)
+ format = state->hw.fb->format->format;
switch (format) {
case DRM_FORMAT_C8:
@@ -767,10 +767,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- i915_gem_object_lock(new_bo);
vma = i915_gem_object_pin_to_display_plane(new_bo,
0, NULL, PIN_MAPPABLE);
- i915_gem_object_unlock(new_bo);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
@@ -1335,12 +1333,14 @@ err_put_bo:
void intel_overlay_setup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
+ struct intel_engine_cs *engine;
int ret;
if (!HAS_OVERLAY(dev_priv))
return;
- if (!HAS_ENGINE(dev_priv, RCS0))
+ engine = dev_priv->engine[RCS0];
+ if (!engine || !engine->kernel_context)
return;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
@@ -1348,7 +1348,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
return;
overlay->i915 = dev_priv;
- overlay->context = dev_priv->engine[RCS0]->kernel_context;
+ overlay->context = engine->kernel_context;
GEM_BUG_ON(!overlay->context);
overlay->color_key = 0x0101fe;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index bc14e9c0285a..7b3ec6eb3382 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -178,7 +178,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int x = 0, y = 0, width = 0, height = 0;
/* Native modes don't need fitting */
@@ -300,7 +300,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -321,7 +321,7 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -380,7 +380,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
@@ -1047,7 +1047,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -1077,7 +1077,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 pwm_ctl, val;
/* Controller 1 uses the utility pin. */
@@ -1189,7 +1189,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (!panel->backlight.present)
return;
@@ -1840,13 +1840,22 @@ static int pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
+ const char *desc;
int retval;
- /* Get the PWM chip for backlight control */
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+ /* Get the right PWM chip for DSI backlight according to VBT */
+ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ desc = "PMIC";
+ } else {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+ desc = "SoC";
+ }
+
if (IS_ERR(panel->backlight.pwm)) {
- DRM_ERROR("Failed to own the pwm chip\n");
+ DRM_ERROR("Failed to get the %s PWM chip\n", desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1873,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
CRC_PMIC_PWM_PERIOD_NS);
panel->backlight.enabled = panel->backlight.level != 0;
+ DRM_INFO("Using %s PWM for LCD backlight control\n", desc);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 6260a2082719..520408e83681 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -98,7 +98,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
switch (dig_port->base.port) {
case PORT_B:
*source = INTEL_PIPE_CRC_SOURCE_DP_B;
@@ -309,13 +309,13 @@ retry:
goto put_state;
}
- pipe_config->base.mode_changed = pipe_config->has_psr;
+ pipe_config->uapi.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
if (IS_HASWELL(dev_priv) &&
- pipe_config->base.active && crtc->pipe == PIPE_A &&
+ pipe_config->hw.active && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
- pipe_config->base.mode_changed = true;
+ pipe_config->uapi.mode_changed = true;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6a9f322d3fca..89c9cf5f38d2 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,6 +26,7 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_display_types.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -401,7 +402,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
/* Enable ALPM at sink for psr2 */
if (dev_priv->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE);
+ DP_ALPM_ENABLE |
+ DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
} else {
if (dev_priv->psr.link_standby)
@@ -536,11 +539,11 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
{
- if (!cstate || !cstate->base.active)
+ if (!cstate || !cstate->hw.active)
return 0;
return DIV_ROUND_UP(1000 * 1000,
- drm_mode_vrefresh(&cstate->base.adjusted_mode));
+ drm_mode_vrefresh(&cstate->hw.adjusted_mode));
}
static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
@@ -605,9 +608,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
- int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
- int psr_max_h = 0, psr_max_v = 0;
+ int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
if (!dev_priv->psr.sink_psr2_support)
return false;
@@ -631,12 +634,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
+ max_bpp = 30;
} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
+ max_bpp = 24;
} else if (IS_GEN(dev_priv, 9)) {
psr_max_h = 3640;
psr_max_v = 2304;
+ max_bpp = 24;
}
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
@@ -646,6 +652,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->pipe_bpp > max_bpp) {
+ DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
+ crtc_state->pipe_bpp, max_bpp);
+ return false;
+ }
+
/*
* HW sends SU blocks of size four scan lines, which means the starting
* X coordinate and Y granularity requirements will always be met. We
@@ -672,7 +684,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int psr_setup_time;
if (!CAN_PSR(dev_priv))
@@ -792,7 +804,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
@@ -924,6 +936,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ if (dev_priv->psr.psr2_enabled)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
+
dev_priv->psr.enabled = false;
}
@@ -1039,7 +1054,7 @@ unlock:
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
@@ -1096,7 +1111,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
int err;
state = drm_atomic_state_alloc(dev);
@@ -1107,21 +1122,18 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
state->acquire_ctx = &ctx;
retry:
- drm_for_each_crtc(crtc, dev) {
- struct drm_crtc_state *crtc_state;
- struct intel_crtc_state *intel_crtc_state;
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_crtc_state(state, crtc);
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto error;
}
- intel_crtc_state = to_intel_crtc_state(crtc_state);
-
- if (crtc_state->active && intel_crtc_state->has_psr) {
+ if (crtc_state->hw.active && crtc_state->has_psr) {
/* Mark mode as changed to trigger a pipe->update() */
- crtc_state->mode_changed = true;
+ crtc_state->uapi.mode_changed = true;
break;
}
}
@@ -1379,11 +1391,80 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->psr.lock);
}
-void intel_psr_short_pulse(struct intel_dp *intel_dp)
+static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
+ u8 *status, u8 *error_status)
+{
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
+ if (ret != 1)
+ return ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
+ if (ret != 1)
+ return ret;
+
+ *status = *status & DP_PSR_SINK_STATE_MASK;
+
+ return 0;
+}
+
+static void psr_alpm_check(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 val;
+ int r;
+
+ if (!psr->psr2_enabled)
+ return;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ DRM_ERROR("Error reading ALPM status\n");
+ return;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ }
+}
+
+static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct i915_psr *psr = &dev_priv->psr;
u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
+ if (r != 1) {
+ DRM_ERROR("Error reading DP_PSR_ESI\n");
+ return;
+ }
+
+ if (val & DP_PSR_CAPS_CHANGE) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
+
+ /* Clearing it */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
+ }
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
@@ -1396,38 +1477,34 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (!psr->enabled || psr->dp != intel_dp)
goto exit;
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
- DRM_ERROR("PSR_STATUS dpcd read failed\n");
+ if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
+ DRM_ERROR("Error reading PSR status or error status\n");
goto exit;
}
- if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
- DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
}
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
- DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
- goto exit;
- }
-
- if (val & DP_PSR_RFB_STORAGE_ERROR)
+ if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
+ DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ if (error_status & DP_PSR_RFB_STORAGE_ERROR)
DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
- if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+ if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
- if (val & DP_PSR_LINK_CRC_ERROR)
+ if (error_status & DP_PSR_LINK_CRC_ERROR)
DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
- if (val & ~errors)
+ if (error_status & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
- val & ~errors);
- if (val & errors) {
- intel_psr_disable_locked(intel_dp);
- psr->sink_not_reliable = true;
- }
+ error_status & ~errors);
/* clear status register */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
+
+ psr_alpm_check(intel_dp);
+ psr_capability_changed_check(intel_dp);
+
exit:
mutex_unlock(&psr->lock);
}
@@ -1446,3 +1523,27 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
return ret;
}
+
+void intel_psr_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_connector *intel_connector;
+ struct intel_digital_port *dig_port;
+ struct drm_crtc_state *crtc_state;
+
+ if (!CAN_PSR(dev_priv) || !new_state->crtc ||
+ dev_priv->psr.initially_probed)
+ return;
+
+ intel_connector = to_intel_connector(connector);
+ dig_port = enc_to_dig_port(intel_connector->encoder);
+ if (dev_priv->psr.dp != &dig_port->dp)
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+ crtc_state->mode_changed = true;
+ dev_priv->psr.initially_probed = true;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 46e4de8b8cd5..c58a1d438808 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -8,6 +8,8 @@
#include "intel_frontbuffer.h"
+struct drm_connector;
+struct drm_connector_state;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_dp;
@@ -35,5 +37,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
bool intel_psr_enabled(struct intel_dp *intel_dp);
+void intel_psr_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 5b7f4baf7348..e8819fd21e03 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -180,7 +180,7 @@ static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
return container_of(encoder, struct intel_sdvo, base);
}
-static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector)
{
return to_sdvo(intel_attached_encoder(connector));
}
@@ -1087,7 +1087,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
{
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int ret;
if (!crtc_state->has_hdmi_sink)
@@ -1276,8 +1276,8 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
to_intel_sdvo_connector_state(conn_state);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct drm_display_mode *mode = &pipe_config->base.mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->hw.mode;
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
@@ -1349,9 +1349,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (IS_TV(intel_sdvo_connector))
i9xx_adjust_sdvo_tv_clock(pipe_config);
- /* Set user selected PAR to incoming mode's member */
- if (intel_sdvo_connector->is_hdmi)
- adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (conn_state->picture_aspect_ratio)
+ adjusted_mode->picture_aspect_ratio =
+ conn_state->picture_aspect_ratio;
if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
pipe_config, conn_state)) {
@@ -1429,13 +1429,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
const struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
- const struct drm_display_mode *mode = &crtc_state->base.mode;
+ const struct drm_display_mode *mode = &crtc_state->hw.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1551,7 +1551,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(&connector->base);
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
u16 active_outputs = 0;
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
@@ -1629,7 +1629,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
@@ -1649,7 +1649,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
@@ -1701,7 +1701,7 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
u8 *eld = connector->eld;
@@ -1723,7 +1723,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
if (old_crtc_state->has_audio)
@@ -1785,7 +1785,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 temp;
bool input1, input2;
int i;
@@ -1823,7 +1823,7 @@ static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1941,7 +1941,7 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
static struct edid *
intel_sdvo_get_edid(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
return drm_get_edid(connector, &sdvo->ddc);
}
@@ -1959,7 +1959,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
enum drm_connector_status status;
@@ -2028,7 +2028,7 @@ static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
u16 response;
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
@@ -2175,7 +2175,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
const struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_sdtv_resolution_request tv_res;
u32 reply = 0, format_map = 0;
@@ -2215,7 +2215,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
@@ -2379,7 +2379,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
static int
intel_sdvo_connector_register(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
int ret;
ret = intel_connector_register(connector);
@@ -2394,7 +2394,7 @@ intel_sdvo_connector_register(struct drm_connector *connector)
static void
intel_sdvo_connector_unregister(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
sysfs_remove_link(&connector->kdev->kobj,
sdvo->ddc.dev.kobj.name);
@@ -2654,7 +2654,6 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
intel_attach_broadcast_rgb_property(&connector->base.base);
}
intel_attach_aspect_ratio_property(&connector->base.base);
- connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
@@ -2933,7 +2932,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
- if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+ if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) {
drm_connector_unregister(connector);
intel_connector_destroy(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 72fda0430062..fca77ec1e0dd 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -81,9 +81,9 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -120,7 +120,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
- trace_i915_pipe_update_start(crtc);
+ trace_intel_pipe_update_start(crtc);
for (;;) {
/*
@@ -173,7 +173,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
- trace_i915_pipe_update_vblank_evaded(crtc);
+ trace_intel_pipe_update_vblank_evaded(crtc);
return;
irq_disable:
@@ -190,27 +190,28 @@ irq_disable:
*/
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+ trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
/* We're still in the vblank-evade critical section, this can't race.
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
- if (new_crtc_state->base.event) {
+ if (new_crtc_state->uapi.event) {
WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
- drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
+ drm_crtc_arm_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
spin_unlock(&crtc->base.dev->event_lock);
- new_crtc_state->base.event = NULL;
+ new_crtc_state->uapi.event = NULL;
}
local_irq_enable();
@@ -239,9 +240,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
int intel_plane_check_stride(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride, max_stride;
/*
@@ -251,7 +252,7 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
* kick in due the plane being invisible.
*/
if (intel_plane_can_remap(plane_state) &&
- !plane_state->base.visible)
+ !plane_state->uapi.visible)
return 0;
/* FIXME other color planes? */
@@ -271,10 +272,10 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_rect *src = &plane_state->base.src;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_rect *src = &plane_state->uapi.src;
u32 src_x, src_y, src_w, src_h, hsub, vsub;
- bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
+ bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -327,8 +328,8 @@ skl_plane_ratio(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
unsigned int *num, unsigned int *den)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
if (fb->format->cpp[0] == 8) {
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
@@ -347,7 +348,7 @@ skl_plane_ratio(const struct intel_crtc_state *crtc_state,
static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
unsigned int pixel_rate = crtc_state->pixel_rate;
unsigned int src_w, src_h, dst_w, dst_h;
unsigned int num, den;
@@ -358,10 +359,10 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
den *= 2;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
- dst_h = drm_rect_height(&plane_state->base.dst);
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
/* Downscaling limits the maximum pixel rate */
dst_w = min(src_w, dst_w);
@@ -395,28 +396,28 @@ skl_program_scaler(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler =
&crtc_state->scaler_state.scalers[scaler_id];
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u16 y_hphase, uv_rgb_hphase;
u16 y_vphase, uv_rgb_vphase;
int hscale, vscale;
- hscale = drm_rect_calc_hscale(&plane_state->base.src,
- &plane_state->base.dst,
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
0, INT_MAX);
- vscale = drm_rect_calc_vscale(&plane_state->base.src,
- &plane_state->base.dst,
+ vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (drm_format_info_is_yuv_semiplanar(fb->format) &&
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
!icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -541,10 +542,10 @@ icl_program_input_csc(struct intel_plane *plane,
};
const u16 *csc;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->base.color_encoding];
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = input_csc_matrix[plane_state->hw.color_encoding];
else
- csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+ csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
GOFF(csc[1]));
@@ -558,7 +559,7 @@ icl_program_input_csc(struct intel_plane *plane,
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
else
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
@@ -574,7 +575,7 @@ static void
skl_program_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
- int color_plane, bool slave, u32 plane_ctl)
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -582,19 +583,21 @@ skl_program_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->color_plane[color_plane].offset;
u32 stride = skl_plane_stride(plane_state, color_plane);
- u32 aux_stride = skl_plane_stride(plane_state, 1);
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_plane = intel_main_to_aux_plane(fb, color_plane);
+ u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
+ u32 aux_stride = skl_plane_stride(plane_state, aux_plane);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
u32 x = plane_state->color_plane[color_plane].x;
u32 y = plane_state->color_plane[color_plane].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
- struct intel_plane *linked = plane_state->planar_linked_plane;
- const struct drm_framebuffer *fb = plane_state->base.fb;
- u8 alpha = plane_state->base.alpha >> 8;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ u8 alpha = plane_state->hw.alpha >> 8;
u32 plane_color_ctl = 0;
unsigned long irqflags;
u32 keymsk, keymax;
+ u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
@@ -623,29 +626,13 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
- I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
- (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
- u32 cus_ctl = 0;
-
- if (linked) {
- /* Enable and use MPEG-2 chroma siting */
- cus_ctl = PLANE_CUS_ENABLE |
- PLANE_CUS_HPHASE_0 |
- PLANE_CUS_VPHASE_SIGN_NEGATIVE |
- PLANE_CUS_VPHASE_0_25;
-
- if (linked->id == PLANE_SPRITE5)
- cus_ctl |= PLANE_CUS_PLANE_7;
- else if (linked->id == PLANE_SPRITE4)
- cus_ctl |= PLANE_CUS_PLANE_6;
- else
- MISSING_CASE(linked->id);
- }
+ if (INTEL_GEN(dev_priv) < 12)
+ aux_dist |= aux_stride;
+ I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist);
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
- }
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
@@ -675,7 +662,7 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- if (!slave && plane_state->scaler_id >= 0)
+ if (plane_state->scaler_id >= 0)
skl_program_scaler(plane, crtc_state, plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -688,24 +675,12 @@ skl_update_plane(struct intel_plane *plane,
{
int color_plane = 0;
- if (plane_state->planar_linked_plane) {
- /* Program the UV plane */
+ if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ /* Program the UV plane on planar master */
color_plane = 1;
- }
- skl_program_plane(plane, crtc_state, plane_state,
- color_plane, false, plane_state->ctl);
-}
-
-static void
-icl_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- skl_program_plane(plane, crtc_state, plane_state, 0, true,
- plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+ skl_program_plane(plane, crtc_state, plane_state, color_plane);
}
-
static void
skl_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
@@ -765,9 +740,9 @@ static void i9xx_plane_linear_gamma(u16 gamma[8])
static void
chv_update_csc(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
/*
* |r| | c0 c1 c2 | |cr|
@@ -793,7 +768,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
0, 4096, 7601,
},
};
- const s16 *csc = csc_matrix[plane_state->base.color_encoding];
+ const s16 *csc = csc_matrix[plane_state->hw.color_encoding];
/* Seems RGB data bypasses the CSC always */
if (!fb->format->is_yuv)
@@ -824,15 +799,15 @@ chv_update_csc(const struct intel_plane_state *plane_state)
static void
vlv_update_clrc(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
int contrast, brightness, sh_scale, sh_sin, sh_cos;
if (fb->format->is_yuv &&
- plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
+ plane_state->hw.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
/*
* Expand limited range to full range:
* Contrast is applied first and is used to expand Y range.
@@ -866,7 +841,7 @@ vlv_plane_ratio(const struct intel_crtc_state *crtc_state,
unsigned int *num, unsigned int *den)
{
u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
/*
@@ -952,8 +927,8 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprctl;
@@ -972,6 +947,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_VYUY:
sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
break;
+ case DRM_FORMAT_C8:
+ sprctl |= SP_FORMAT_8BPP;
+ break;
case DRM_FORMAT_RGB565:
sprctl |= SP_FORMAT_BGR565;
break;
@@ -987,6 +965,12 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ABGR2101010:
sprctl |= SP_FORMAT_RGBA1010102;
break;
+ case DRM_FORMAT_XRGB2101010:
+ sprctl |= SP_FORMAT_BGRX1010102;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ sprctl |= SP_FORMAT_BGRA1010102;
+ break;
case DRM_FORMAT_XBGR8888:
sprctl |= SP_FORMAT_RGBX8888;
break;
@@ -998,7 +982,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SP_YUV_FORMAT_BT709;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1018,9 +1002,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void vlv_update_gamma(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u16 gamma[8];
@@ -1052,10 +1036,10 @@ vlv_update_plane(struct intel_plane *plane,
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
unsigned long irqflags;
@@ -1150,7 +1134,7 @@ static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state,
unsigned int *num, unsigned int *den)
{
u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
if (hweight8(active_planes) == 2) {
@@ -1186,7 +1170,7 @@ static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
unsigned int *num, unsigned int *den)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
switch (cpp) {
@@ -1244,8 +1228,8 @@ static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
*/
pixel_rate = crtc_state->pixel_rate;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
if (src_w != dst_w)
ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den);
@@ -1264,7 +1248,7 @@ static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state,
unsigned int *num, unsigned int *den)
{
u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
if (hweight8(active_planes) == 2) {
@@ -1319,8 +1303,8 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
return fb->format->cpp[0] == 8 &&
(IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
@@ -1330,9 +1314,9 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprctl;
@@ -1348,6 +1332,12 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR2101010:
+ sprctl |= SPRITE_FORMAT_RGBX101010 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ sprctl |= SPRITE_FORMAT_RGBX101010;
+ break;
case DRM_FORMAT_XBGR16161616F:
sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX;
break;
@@ -1374,10 +1364,10 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
if (!ivb_need_sprite_gamma(plane_state))
sprctl |= SPRITE_INT_GAMMA_DISABLE;
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1421,7 +1411,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
static void ivb_update_gamma(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
u16 gamma[18];
@@ -1460,14 +1450,14 @@ ivb_update_plane(struct intel_plane *plane,
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 sprctl, sprscale = 0;
unsigned long irqflags;
@@ -1566,7 +1556,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int hscale, pixel_rate;
unsigned int limit, decimate;
@@ -1580,8 +1570,8 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
pixel_rate = crtc_state->pixel_rate;
/* Horizontal downscaling limits the maximum pixel rate */
- hscale = drm_rect_calc_hscale(&plane_state->base.src,
- &plane_state->base.dst,
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
0, INT_MAX);
if (hscale < 0x10000)
return pixel_rate;
@@ -1635,9 +1625,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 dvscntr;
@@ -1653,6 +1643,12 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR2101010:
+ dvscntr |= DVS_FORMAT_RGBX101010 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dvscntr |= DVS_FORMAT_RGBX101010;
+ break;
case DRM_FORMAT_XBGR16161616F:
dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR;
break;
@@ -1676,10 +1672,10 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
dvscntr |= DVS_YUV_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1698,9 +1694,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void g4x_update_gamma(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[8];
int i;
@@ -1730,9 +1726,9 @@ static void ilk_sprite_linear_gamma(u16 gamma[17])
static void ilk_update_gamma(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[17];
int i;
@@ -1766,14 +1762,14 @@ g4x_update_plane(struct intel_plane *plane,
u32 dvssurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 dvscntr, dvsscale = 0;
unsigned long irqflags;
@@ -1886,12 +1882,12 @@ static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- const struct drm_rect *src = &plane_state->base.src;
- const struct drm_rect *dst = &plane_state->base.dst;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ const struct drm_rect *dst = &plane_state->uapi.dst;
int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
unsigned int stride = plane_state->color_plane[0].stride;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
@@ -1947,13 +1943,13 @@ static int
g4x_sprite_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
- if (intel_fb_scalable(plane_state->base.fb)) {
+ if (intel_fb_scalable(plane_state->hw.fb)) {
if (INTEL_GEN(dev_priv) < 7) {
min_scale = 1;
max_scale = 16 << 16;
@@ -1963,8 +1959,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
}
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
min_scale, max_scale,
true, true);
if (ret)
@@ -1974,7 +1970,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -1995,9 +1991,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- unsigned int rotation = plane_state->base.rotation;
+ unsigned int rotation = plane_state->hw.rotation;
/* CHV ignores the mirror bit when the rotate bit is set :( */
if (IS_CHERRYVIEW(dev_priv) &&
@@ -2020,8 +2016,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
@@ -2032,7 +2028,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -2047,10 +2043,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
struct drm_format_name_buf format_name;
if (!fb)
@@ -2105,12 +2101,14 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/* Y-tiling is not supported in IF-ID Interlace mode */
- if (crtc_state->base.enable &&
- crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ if (crtc_state->hw.enable &&
+ crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
(fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
}
@@ -2122,9 +2120,9 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- int crtc_x = plane_state->base.dst.x1;
- int crtc_w = drm_rect_width(&plane_state->base.dst);
+ to_i915(plane_state->uapi.plane->dev);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int pipe_src_w = crtc_state->pipe_src_w;
/*
@@ -2150,12 +2148,13 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
/* Display WA #1106 */
- if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 &&
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
@@ -2175,7 +2174,7 @@ static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
* FIXME need to properly check this later.
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- !drm_format_info_is_yuv_semiplanar(fb->format))
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
return 0x30000 - 1;
else
return 0x20000 - 1;
@@ -2184,9 +2183,9 @@ static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
@@ -2201,8 +2200,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
max_scale = skl_plane_max_scale(dev_priv, fb);
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
min_scale, max_scale,
true, true);
if (ret)
@@ -2212,7 +2211,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
@@ -2228,8 +2227,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
return ret;
/* HW only has 8 bits pixel precision, disable plane if invisible */
- if (!(plane_state->base.alpha >> 8))
- plane_state->base.visible = false;
+ if (!(plane_state->hw.alpha >> 8))
+ plane_state->uapi.visible = false;
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
@@ -2237,6 +2236,15 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
plane_state);
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ icl_is_hdr_plane(dev_priv, plane->id))
+ /* Enable and use MPEG-2 chroma siting */
+ plane_state->cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
+ else
+ plane_state->cus_ctl = 0;
+
return 0;
}
@@ -2248,7 +2256,7 @@ static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -2375,6 +2383,8 @@ static const u64 i9xx_plane_format_modifiers[] = {
static const u32 snb_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
DRM_FORMAT_XRGB16161616F,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
@@ -2384,12 +2394,30 @@ static const u32 snb_plane_formats[] = {
};
static const u32 vlv_plane_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static const u32 chv_pipe_b_sprite_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@@ -2462,6 +2490,8 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2483,6 +2513,8 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2508,6 +2540,8 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB16161616F,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_ARGB16161616F,
@@ -2546,7 +2580,17 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static const u64 gen12_plane_format_modifiers_noccs[] = {
+static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -2593,6 +2637,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_YUYV:
@@ -2620,6 +2666,7 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
}
switch (format) {
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB8888:
@@ -2627,6 +2674,8 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2671,6 +2720,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2703,13 +2754,25 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
+static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
+{
+ return plane_id < PLANE_SPRITE4;
+}
+
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
switch (modifier) {
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (!gen12_plane_supports_mc_ccs(plane->id))
+ return false;
+ /* fall through */
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
break;
default:
return false;
@@ -2720,9 +2783,9 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
+ if (is_ccs_modifier(modifier))
+ return true;
+ /* fall through */
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2731,6 +2794,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
+ if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XVYU2101010:
case DRM_FORMAT_C8:
case DRM_FORMAT_XBGR16161616F:
@@ -2864,6 +2935,14 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
}
}
+static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
+{
+ if (gen12_plane_supports_mc_ccs(plane_id))
+ return gen12_plane_format_modifiers_mc_ccs;
+ else
+ return gen12_plane_format_modifiers_rc_ccs;
+}
+
static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2916,8 +2995,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
plane->min_cdclk = skl_plane_min_cdclk;
- if (icl_is_nv12_y_plane(plane_id))
- plane->update_slave = icl_update_slave;
if (INTEL_GEN(dev_priv) >= 11)
formats = icl_get_plane_formats(dev_priv, pipe,
@@ -2929,13 +3006,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (INTEL_GEN(dev_priv) >= 12) {
- /* TODO: Implement support for gen-12 CCS modifiers */
- plane->has_ccs = false;
- modifiers = gen12_plane_format_modifiers_noccs;
+ modifiers = gen12_get_plane_modifiers(plane_id);
plane_funcs = &gen12_plane_funcs;
} else {
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
@@ -3025,8 +3100,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->check_plane = vlv_sprite_check;
plane->min_cdclk = vlv_plane_min_cdclk;
- formats = vlv_plane_formats;
- num_formats = ARRAY_SIZE(vlv_plane_formats);
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ formats = chv_pipe_b_sprite_formats;
+ num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats);
+ } else {
+ formats = vlv_plane_formats;
+ num_formats = ARRAY_SIZE(vlv_plane_formats);
+ }
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 9983fadf6c28..c75e0ceecee6 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -898,7 +898,7 @@ static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
return container_of(encoder, struct intel_tv, base);
}
-static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+static struct intel_tv *intel_attached_tv(struct intel_connector *connector)
{
return enc_to_tv(intel_attached_encoder(connector));
}
@@ -924,7 +924,7 @@ intel_enable_tv(struct intel_encoder *encoder,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(dev_priv,
- to_intel_crtc(pipe_config->base.crtc)->pipe);
+ to_intel_crtc(pipe_config->uapi.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
@@ -1085,7 +1085,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
struct drm_display_mode mode = {};
u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp;
struct tv_mode tv_mode = {};
@@ -1188,7 +1188,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
to_intel_tv_connector_state(conn_state);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
int hdisplay = adjusted_mode->crtc_hdisplay;
int vdisplay = adjusted_mode->crtc_vdisplay;
@@ -1417,7 +1417,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
@@ -1527,7 +1527,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- assert_pipe_disabled(dev_priv, intel_crtc->pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* Filter ctl must be set before TV_WIN_SIZE */
tv_filter_ctl = TV_AUTO_SCALE;
@@ -1662,7 +1662,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int i;
@@ -1689,7 +1689,7 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 69a7cb1fa121..4d0c23b29248 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -115,6 +115,7 @@ enum bdb_block_id {
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
BDB_COMPRESSION_PARAMETERS = 56,
+ BDB_GENERIC_DTD = 58,
BDB_SKIP = 254, /* VBIOS private block, ignore */
};
@@ -368,7 +369,7 @@ struct child_device_config {
u16 dtd_buf_ptr; /* 161 */
u8 edidless_efp:1; /* 161 */
u8 compression_enable:1; /* 198 */
- u8 compression_method:1; /* 198 */
+ u8 compression_method_cps:1; /* 198 */
u8 ganged_edp:1; /* 202 */
u8 reserved0:4;
u8 compression_structure_index:4; /* 198 */
@@ -793,6 +794,35 @@ struct bdb_lfp_backlight_data {
} __packed;
/*
+ * Block 44 - LFP Power Conservation Features Block
+ */
+
+struct als_data_entry {
+ u16 backlight_adjust;
+ u16 lux;
+} __packed;
+
+struct agressiveness_profile_entry {
+ u8 dpst_agressiveness : 4;
+ u8 lace_agressiveness : 4;
+} __packed;
+
+struct bdb_lfp_power {
+ u8 lfp_feature_bits;
+ struct als_data_entry als[5];
+ u8 lace_aggressiveness_profile;
+ u16 dpst;
+ u16 psr;
+ u16 drrs;
+ u16 lace_support;
+ u16 adt;
+ u16 dmrrs;
+ u16 adb;
+ u16 lace_enabled_status;
+ struct agressiveness_profile_entry aggressivenes[16];
+} __packed;
+
+/*
* Block 52 - MIPI Configuration Block
*/
@@ -863,4 +893,34 @@ struct bdb_compression_parameters {
struct dsc_compression_parameters_entry data[16];
} __packed;
+/*
+ * Block 58 - Generic DTD Block
+ */
+
+struct generic_dtd_entry {
+ u32 pixel_clock;
+ u16 hactive;
+ u16 hblank;
+ u16 hfront_porch;
+ u16 hsync;
+ u16 vactive;
+ u16 vblank;
+ u16 vfront_porch;
+ u16 vsync;
+ u16 width_mm;
+ u16 height_mm;
+
+ /* Flags */
+ u8 rsvd_flags:6;
+ u8 vsync_positive_polarity:1;
+ u8 hsync_positive_polarity:1;
+
+ u8 rsvd[3];
+} __packed;
+
+struct bdb_generic_dtd {
+ u16 gdtd_size;
+ struct generic_dtd_entry dtd[]; /* up to 24 DTD's */
+} __packed;
+
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 896b0c334f5e..9e6aaa302e40 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_display_types.h"
+#include "intel_dsi.h"
#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
@@ -30,10 +31,8 @@ enum COLUMN_INDEX_BPC {
MAX_COLUMN_INDEX
};
-#define DSC_SUPPORTED_VERSION_MIN 1
-
/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
-static u16 rc_buf_thresh[] = {
+static const u16 rc_buf_thresh[] = {
896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
7744, 7872, 8000, 8064
};
@@ -53,7 +52,7 @@ struct rc_parameters {
* Selected Rate Control Related Parameter Recommended Values
* from DSC_v1.11 spec & C Model release: DSC_model_20161212
*/
-static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+static const struct rc_parameters rc_parameters[][MAX_COLUMN_INDEX] = {
{
/* 6BPP/8BPC */
{ 768, 15, 6144, 3, 13, 11, 11, {
@@ -319,63 +318,84 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
}
}
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config)
+static const struct rc_parameters *get_rc_params(u16 compressed_bpp,
+ u8 bits_per_component)
+{
+ int row_index, column_index;
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ if (row_index < 0)
+ return NULL;
+
+ column_index = get_column_index_for_rc_params(bits_per_component);
+ if (column_index < 0)
+ return NULL;
+
+ return &rc_parameters[row_index][column_index];
+}
+
+bool intel_dsc_source_support(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+
+ if (!INTEL_INFO(i915)->display.has_dsc)
+ return false;
+
+ /* On TGL, DSC is supported on all Pipes */
+ if (INTEL_GEN(i915) >= 12)
+ return true;
+
+ if (INTEL_GEN(i915) >= 10 &&
+ (pipe != PIPE_A ||
+ (cpu_transcoder == TRANSCODER_EDP ||
+ cpu_transcoder == TRANSCODER_DSI_0 ||
+ cpu_transcoder == TRANSCODER_DSI_1)))
+ return true;
+
+ return false;
+}
+
+static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (INTEL_GEN(i915) >= 12)
+ return true;
+
+ if (cpu_transcoder == TRANSCODER_EDP ||
+ cpu_transcoder == TRANSCODER_DSI_0 ||
+ cpu_transcoder == TRANSCODER_DSI_1)
+ return false;
+
+ /* There's no pipe A DSC engine on ICL */
+ WARN_ON(crtc->pipe == PIPE_A);
+
+ return true;
+}
+
+int intel_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
+ const struct rc_parameters *rc_params;
u8 i = 0;
- int row_index = 0;
- int column_index = 0;
- u8 line_buf_depth = 0;
- vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
pipe_config->dsc.slice_count);
- /*
- * Slice Height of 8 works for all currently available panels. So start
- * with that if pic_height is an integral multiple of 8.
- * Eventually add logic to try multiple slice heights.
- */
- if (vdsc_cfg->pic_height % 8 == 0)
- vdsc_cfg->slice_height = 8;
- else if (vdsc_cfg->pic_height % 4 == 0)
- vdsc_cfg->slice_height = 4;
- else
- vdsc_cfg->slice_height = 2;
-
- /* Values filled from DSC Sink DPCD */
- vdsc_cfg->dsc_version_major =
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
- vdsc_cfg->dsc_version_minor =
- min(DSC_SUPPORTED_VERSION_MIN,
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
-
- vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
- DP_DSC_RGB;
-
- line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
- if (!line_buf_depth) {
- DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
- return -EINVAL;
- }
- if (vdsc_cfg->dsc_version_minor == 2)
- vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
- else
- vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
/* Gen 11 does not support YCbCr */
vdsc_cfg->simple_422 = false;
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
- vdsc_cfg->block_pred_enable =
- intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
- DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
/* Gen 11 only supports integral values of bpp */
vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
@@ -399,39 +419,29 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->rc_buf_thresh[13] = 0x7D;
}
- row_index = get_row_index_for_rc_params(compressed_bpp);
- column_index =
- get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
-
- if (row_index < 0 || column_index < 0)
+ rc_params = get_rc_params(compressed_bpp, vdsc_cfg->bits_per_component);
+ if (!rc_params)
return -EINVAL;
- vdsc_cfg->first_line_bpg_offset =
- rc_params[row_index][column_index].first_line_bpg_offset;
- vdsc_cfg->initial_xmit_delay =
- rc_params[row_index][column_index].initial_xmit_delay;
- vdsc_cfg->initial_offset =
- rc_params[row_index][column_index].initial_offset;
- vdsc_cfg->flatness_min_qp =
- rc_params[row_index][column_index].flatness_min_qp;
- vdsc_cfg->flatness_max_qp =
- rc_params[row_index][column_index].flatness_max_qp;
- vdsc_cfg->rc_quant_incr_limit0 =
- rc_params[row_index][column_index].rc_quant_incr_limit0;
- vdsc_cfg->rc_quant_incr_limit1 =
- rc_params[row_index][column_index].rc_quant_incr_limit1;
+ vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay;
+ vdsc_cfg->initial_offset = rc_params->initial_offset;
+ vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp;
+ vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1;
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
vdsc_cfg->rc_range_params[i].range_min_qp =
- rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+ rc_params->rc_range_params[i].range_min_qp;
vdsc_cfg->rc_range_params[i].range_max_qp =
- rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+ rc_params->rc_range_params[i].range_max_qp;
/*
* Range BPG Offset uses 2's complement and is only a 6 bits. So
* mask it to get only 6 bits.
*/
vdsc_cfg->rc_range_params[i].range_bpg_offset =
- rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+ rc_params->rc_range_params[i].range_bpg_offset &
DSC_RANGE_BPG_OFFSET_MASK;
}
@@ -453,41 +463,42 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
- return drm_dsc_compute_rc_parameters(vdsc_cfg);
+ return 0;
}
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
- * On ICL VDSC/joining for eDP transcoder uses a separate power well,
- * PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain.
- * For any other transcoder, VDSC/joining uses the power well associated
- * with the pipe/transcoder in use. Hence another reference on the
- * transcoder power domain will suffice.
+ * VDSC/joining uses a separate power well, PW2, and requires
+ * POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
+ *
+ * - ICL eDP/DSI transcoder
+ * - TGL pipe A
*
- * On TGL we have the same mapping, but for transcoder A (the special
- * TRANSCODER_EDP is gone).
+ * For any other pipe, VDSC/joining uses the power well associated with
+ * the pipe in use. Hence another reference on the pipe power domain
+ * will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A)
- return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
- else if (cpu_transcoder == TRANSCODER_EDP)
+ if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
+ else if (is_pipe_dsc(crtc_state))
+ return POWER_DOMAIN_PIPE(pipe);
else
- return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
}
-static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_dsc_pps_configure(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
enum pipe pipe = crtc->pipe;
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
u32 rc_range_params_dword[8];
@@ -508,7 +519,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
DRM_INFO("PPS0 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -527,7 +538,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
DRM_INFO("PPS1 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -547,7 +558,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
DRM_INFO("PPS2 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -567,7 +578,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
DRM_INFO("PPS3 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -587,7 +598,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
DRM_INFO("PPS4 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -607,7 +618,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
DRM_INFO("PPS5 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -629,7 +640,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
DRM_INFO("PPS6 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -649,7 +660,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
DRM_INFO("PPS7 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -669,7 +680,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
DRM_INFO("PPS8 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -689,7 +700,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
DRM_INFO("PPS9 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -711,7 +722,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
DRM_INFO("PPS10 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -734,7 +745,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
DRM_INFO("PPS16 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -758,7 +769,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
@@ -807,7 +818,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
@@ -880,10 +891,77 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
}
}
-static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_dsc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 dss_ctl1, dss_ctl2, val;
+
+ if (!intel_dsc_source_support(encoder, crtc_state))
+ return;
+
+ power_domain = intel_dsc_power_domain(crtc_state);
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return;
+
+ if (!is_pipe_dsc(crtc_state)) {
+ dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl2 = I915_READ(DSS_CTL2);
+ } else {
+ dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe));
+ dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe));
+ }
+
+ crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
+ if (!crtc_state->dsc.compression_enable)
+ goto out;
+
+ crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
+ (dss_ctl1 & JOINER_ENABLE);
+
+ /* FIXME: add more state readout as needed */
+
+ /* PPS1 */
+ if (!is_pipe_dsc(crtc_state))
+ val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1);
+ else
+ val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+ vdsc_cfg->bits_per_pixel = val;
+ crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
+out:
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+}
+
+static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_picture_parameter_set pps;
+ enum port port;
+
+ drm_dsc_pps_payload_pack(&pps, vdsc_cfg);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_picture_parameter_set(dsi, &pps);
+ mipi_dsi_compression_mode(dsi, true);
+ }
+}
+
+static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
@@ -902,7 +980,7 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
@@ -916,11 +994,14 @@ void intel_dsc_enable(struct intel_encoder *encoder,
intel_display_power_get(dev_priv,
intel_dsc_power_domain(crtc_state));
- intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+ intel_dsc_pps_configure(encoder, crtc_state);
- intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+ if (encoder->type == INTEL_OUTPUT_DSI)
+ intel_dsc_dsi_pps_write(encoder, crtc_state);
+ else
+ intel_dsc_dp_pps_write(encoder, crtc_state);
- if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
dss_ctl1_reg = DSS_CTL1;
dss_ctl2_reg = DSS_CTL2;
} else {
@@ -938,7 +1019,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
@@ -947,7 +1028,7 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->dsc.compression_enable)
return;
- if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(old_crtc_state)) {
dss_ctl1_reg = DSS_CTL1;
dss_ctl2_reg = DSS_CTL2;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 90d3f6017fcb..e56a3254c214 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -6,15 +6,20 @@
#ifndef __INTEL_VDSC_H__
#define __INTEL_VDSC_H__
+#include <linux/types.h>
+
struct intel_encoder;
struct intel_crtc_state;
-struct intel_dp;
+bool intel_dsc_source_support(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config);
+int intel_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+void intel_dsc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 0ca49b1604c6..daf4fc3dab6f 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -23,7 +23,6 @@
* Author: Jani Nikula <jani.nikula@intel.com>
*/
-#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
@@ -261,9 +260,9 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
DRM_DEBUG_KMS("\n");
@@ -319,7 +318,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
bool cold_boot = false;
@@ -367,7 +366,7 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -438,7 +437,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -465,7 +464,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -516,7 +515,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -546,7 +545,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -579,7 +578,7 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
DRM_DEBUG_KMS("\n");
@@ -624,8 +623,8 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
@@ -681,7 +680,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
@@ -745,8 +744,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct drm_crtc *crtc = pipe_config->uapi.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
@@ -793,9 +792,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
if (!IS_GEMINILAKE(dev_priv))
intel_dsi_prepare(encoder, pipe_config);
- /* Power on, try both CRC pmic gpio and VBT */
- if (intel_dsi->gpio_panel)
- gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
@@ -850,7 +846,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
DRM_DEBUG_KMS("\n");
@@ -882,16 +878,22 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
+ if (IS_GEN9_LP(dev_priv)) {
+ intel_crtc_vblank_off(old_crtc_state);
+
+ skl_scaler_disable(old_crtc_state);
+ }
+
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
@@ -939,11 +941,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
/* Assert reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
- /* Power off, try both CRC pmic gpio and VBT */
intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
- if (intel_dsi->gpio_panel)
- gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
/*
* FIXME As we do with eDP, just make a note of the time here
@@ -956,7 +955,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
bool active = false;
@@ -1032,10 +1031,10 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
@@ -1045,7 +1044,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
crtc_hblank_start_sw, crtc_hblank_end_sw;
/* FIXME: hw readout should not depend on SW state */
- adjusted_mode_sw = &crtc->config->base.adjusted_mode;
+ adjusted_mode_sw = &crtc->config->hw.adjusted_mode;
/*
* Atleast one port is active as encoder->get_config called only if
@@ -1204,7 +1203,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
}
if (pclk) {
- pipe_config->base.adjusted_mode.crtc_clock = pclk;
+ pipe_config->hw.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
}
@@ -1228,7 +1227,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
@@ -1315,9 +1314,9 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
@@ -1506,7 +1505,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -1533,12 +1532,9 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
- /* dispose of the gpios */
- if (intel_dsi->gpio_panel)
- gpiod_put(intel_dsi->gpio_panel);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ intel_dsi_vbt_gpio_cleanup(intel_dsi);
intel_encoder_destroy(encoder);
}
@@ -1819,6 +1815,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_connector *connector;
struct drm_display_mode *current_mode, *fixed_mode;
enum port port;
+ enum pipe pipe;
DRM_DEBUG_KMS("\n");
@@ -1917,20 +1914,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
vlv_dphy_param_init(intel_dsi);
- /*
- * In case of BYT with CRC PMIC, we need to use GPIO for
- * Panel control.
- */
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
- intel_dsi->gpio_panel =
- gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
-
- if (IS_ERR(intel_dsi->gpio_panel)) {
- DRM_ERROR("Failed to own gpio for panel control\n");
- intel_dsi->gpio_panel = NULL;
- }
- }
+ intel_dsi_vbt_gpio_init(intel_dsi,
+ intel_dsi_get_hw_state(intel_encoder, &pipe));
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 95f39cd0ce02..6b89e67b120f 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -117,7 +117,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int ret;
u32 dsi_clk;
@@ -255,7 +255,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
@@ -321,7 +321,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
u32 pclk;
u32 dsi_clk;
u32 dsi_ratio;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -341,7 +341,7 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
temp = I915_READ(MIPI_CTRL(port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
@@ -455,7 +455,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
@@ -503,7 +503,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
deleted file mode 100644
index 7e73aa587967..000000000000
--- a/drivers/gpu/drm/i915/gem/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 3d4f5775a4ba..25235ef630c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -9,16 +9,16 @@
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
-static __always_inline u32 __busy_read_flag(u8 id)
+static __always_inline u32 __busy_read_flag(u16 id)
{
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffff0000u;
GEM_BUG_ON(id >= 16);
return 0x10000u << id;
}
-static __always_inline u32 __busy_write_id(u8 id)
+static __always_inline u32 __busy_write_id(u16 id)
{
/*
* The uABI guarantees an active writer is also amongst the read
@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
* last_read - hence we always set both read and write busy for
* last_write.
*/
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffffffffu;
return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
{
const struct i915_request *rq;
@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
return 0;
/* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
return flag(rq->engine->uabi_class);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 18ee708585a9..34be4c0ee7c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -27,27 +27,24 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
static int clflush_work(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
- struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
+ struct drm_i915_gem_object *obj = clflush->obj;
int err;
err = i915_gem_object_pin_pages(obj);
if (err)
- goto put;
+ return err;
__do_clflush(obj);
i915_gem_object_unpin_pages(obj);
-put:
- i915_gem_object_put(obj);
- return err;
+ return 0;
}
static void clflush_release(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
- if (clflush->obj)
- i915_gem_object_put(clflush->obj);
+ i915_gem_object_put(clflush->obj);
}
static const struct dma_fence_work_ops clflush_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 42385277c684..a2e57e62af30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,7 +69,10 @@
#include <drm/i915_drm.h>
+#include "gt/gen6_ppgtt.h"
+#include "gt/intel_context.h"
#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h"
@@ -169,12 +172,80 @@ lookup_user_engine(struct i915_gem_context *ctx,
return i915_gem_context_get_engine(ctx, idx);
}
+static struct i915_address_space *
+context_get_vm_rcu(struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
+
+ do {
+ struct i915_address_space *vm;
+
+ /*
+ * We do not allow downgrading from full-ppgtt [to a shared
+ * global gtt], so ctx->vm cannot become NULL.
+ */
+ vm = rcu_dereference(ctx->vm);
+ if (!kref_get_unless_zero(&vm->ref))
+ continue;
+
+ /*
+ * This ppgtt may have be reallocated between
+ * the read and the kref, and reassigned to a third
+ * context. In order to avoid inadvertent sharing
+ * of this ppgtt with that third context (and not
+ * src), we have to confirm that we have the same
+ * ppgtt after passing through the strong memory
+ * barrier implied by a successful
+ * kref_get_unless_zero().
+ *
+ * Once we have acquired the current ppgtt of ctx,
+ * we no longer care if it is released from ctx, as
+ * it cannot be reallocated elsewhere.
+ */
+
+ if (vm == rcu_access_pointer(ctx->vm))
+ return rcu_pointer_handoff(vm);
+
+ i915_vm_put(vm);
+ } while (1);
+}
+
+static void intel_context_set_gem(struct intel_context *ce,
+ struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
+ RCU_INIT_POINTER(ce->gem_context, ctx);
+
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+ ce->ring = __intel_context_ring_size(SZ_16K);
+
+ if (rcu_access_pointer(ctx->vm)) {
+ struct i915_address_space *vm;
+
+ rcu_read_lock();
+ vm = context_get_vm_rcu(ctx); /* hmm */
+ rcu_read_unlock();
+
+ i915_vm_put(ce->vm);
+ ce->vm = vm;
+ }
+
+ GEM_BUG_ON(ce->timeline);
+ if (ctx->timeline)
+ ce->timeline = intel_timeline_get(ctx->timeline);
+
+ if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+ intel_engine_has_semaphores(ce->engine))
+ __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
while (count--) {
if (!e->engines[count])
continue;
+ RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
intel_context_put(e->engines[count]);
}
kfree(e);
@@ -211,12 +282,14 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
GEM_BUG_ON(e->engines[engine->legacy_idx]);
- ce = intel_context_create(ctx, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
__free_engines(e, e->num_engines + 1);
return ERR_CAST(ce);
}
+ intel_context_set_gem(ce, ctx);
+
e->engines[engine->legacy_idx] = ce;
e->num_engines = max(e->num_engines, engine->legacy_idx);
}
@@ -236,14 +309,10 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
- kfree(ctx->jump_whitelist);
-
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
- kfree(ctx->name);
put_pid(ctx->pid);
-
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
@@ -389,15 +458,6 @@ static void kill_context(struct i915_gem_context *ctx)
struct intel_context *ce;
/*
- * If we are already banned, it was due to a guilty request causing
- * a reset and the entire context being evicted from the GPU.
- */
- if (i915_gem_context_is_banned(ctx))
- return;
-
- i915_gem_context_set_banned(ctx);
-
- /*
* Map the user's engine back to the actual engines; one virtual
* engine will be mapped to multiple engines, and using ctx->engine[]
* the same engine may be have multiple instances in the user's map.
@@ -407,6 +467,9 @@ static void kill_context(struct i915_gem_context *ctx)
for_each_gem_engine(ce, __context_engines_static(ctx), it) {
struct intel_engine_cs *engine;
+ if (intel_context_set_banned(ce))
+ continue;
+
/*
* Check the current active state of this context; if we
* are currently executing on the GPU we need to evict
@@ -427,11 +490,29 @@ static void kill_context(struct i915_gem_context *ctx)
}
}
+static void set_closed_name(struct i915_gem_context *ctx)
+{
+ char *s;
+
+ /* Replace '[]' with '<>' to indicate closed in debug prints */
+
+ s = strrchr(ctx->name, '[');
+ if (!s)
+ return;
+
+ *s = '<';
+
+ s = strchr(s + 1, ']');
+ if (s)
+ *s = '>';
+}
+
static void context_close(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
i915_gem_context_set_closed(ctx);
+ set_closed_name(ctx);
mutex_lock(&ctx->mutex);
@@ -529,9 +610,6 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
- ctx->jump_whitelist = NULL;
- ctx->jump_whitelist_cmds = 0;
-
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
@@ -628,7 +706,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(i915);
+ ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -661,37 +739,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
return ctx;
}
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
-{
- struct i915_gem_context *ctx;
-
- /* Keep the context ref so that we can free it immediately ourselves */
- ctx = i915_gem_context_get(fetch_and_zero(ctxp));
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
- context_close(ctx);
- i915_gem_context_free(ctx);
-}
-
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
-{
- struct i915_gem_context *ctx;
-
- ctx = i915_gem_create_context(i915, 0);
- if (IS_ERR(ctx))
- return ctx;
-
- i915_gem_context_clear_bannable(ctx);
- i915_gem_context_set_persistence(ctx);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
-
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
- return ctx;
-}
-
static void init_contexts(struct i915_gem_contexts *gc)
{
spin_lock_init(&gc->lock);
@@ -701,41 +748,19 @@ static void init_contexts(struct i915_gem_contexts *gc)
init_llist_head(&gc->free_list);
}
-int i915_gem_init_contexts(struct drm_i915_private *i915)
+void i915_gem_init__contexts(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx;
-
- /* Reassure ourselves we are only called once */
- GEM_BUG_ON(i915->kernel_context);
-
init_contexts(&i915->gem.contexts);
-
- /* lowest priority; idle task */
- ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
- if (IS_ERR(ctx)) {
- DRM_ERROR("Failed to create default global context\n");
- return PTR_ERR(ctx);
- }
- i915->kernel_context = ctx;
-
DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake");
- return 0;
}
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
- destroy_kernel_context(&i915->kernel_context);
flush_work(&i915->gem.contexts.free_work);
}
-static int context_idr_cleanup(int id, void *p, void *data)
-{
- context_close(p);
- return 0;
-}
-
static int vm_idr_cleanup(int id, void *p, void *data)
{
i915_vm_put(p);
@@ -743,7 +768,8 @@ static int vm_idr_cleanup(int id, void *p, void *data)
}
static int gem_context_register(struct i915_gem_context *ctx,
- struct drm_i915_file_private *fpriv)
+ struct drm_i915_file_private *fpriv,
+ u32 *id)
{
struct i915_address_space *vm;
int ret;
@@ -757,24 +783,14 @@ static int gem_context_register(struct i915_gem_context *ctx,
mutex_unlock(&ctx->mutex);
ctx->pid = get_task_pid(current, PIDTYPE_PID);
- ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
- current->comm, pid_nr(ctx->pid));
- if (!ctx->name) {
- ret = -ENOMEM;
- goto err_pid;
- }
+ snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
+ current->comm, pid_nr(ctx->pid));
/* And finally expose ourselves to userspace via the idr */
- mutex_lock(&fpriv->context_idr_lock);
- ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
- mutex_unlock(&fpriv->context_idr_lock);
- if (ret >= 0)
- goto out;
+ ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
+ if (ret)
+ put_pid(fetch_and_zero(&ctx->pid));
- kfree(fetch_and_zero(&ctx->name));
-err_pid:
- put_pid(fetch_and_zero(&ctx->pid));
-out:
return ret;
}
@@ -784,11 +800,11 @@ int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
int err;
+ u32 id;
- mutex_init(&file_priv->context_idr_lock);
- mutex_init(&file_priv->vm_idr_lock);
+ xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
- idr_init(&file_priv->context_idr);
+ mutex_init(&file_priv->vm_idr_lock);
idr_init_base(&file_priv->vm_idr, 1);
ctx = i915_gem_create_context(i915, 0);
@@ -797,22 +813,19 @@ int i915_gem_context_open(struct drm_i915_private *i915,
goto err;
}
- err = gem_context_register(ctx, file_priv);
+ err = gem_context_register(ctx, file_priv, &id);
if (err < 0)
goto err_ctx;
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
- GEM_BUG_ON(err > 0);
-
+ GEM_BUG_ON(id);
return 0;
err_ctx:
context_close(ctx);
err:
idr_destroy(&file_priv->vm_idr);
- idr_destroy(&file_priv->context_idr);
+ xa_destroy(&file_priv->context_xa);
mutex_destroy(&file_priv->vm_idr_lock);
- mutex_destroy(&file_priv->context_idr_lock);
return err;
}
@@ -820,10 +833,12 @@ void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct i915_gem_context *ctx;
+ unsigned long idx;
- idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
- idr_destroy(&file_priv->context_idr);
- mutex_destroy(&file_priv->context_idr_lock);
+ xa_for_each(&file_priv->context_xa, idx, ctx)
+ context_close(ctx);
+ xa_destroy(&file_priv->context_xa);
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
idr_destroy(&file_priv->vm_idr);
@@ -847,7 +862,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;
- ppgtt = i915_ppgtt_create(i915);
+ ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1012,7 +1027,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
return -ENODEV;
rcu_read_lock();
- vm = i915_vm_get(ctx->vm);
+ vm = context_get_vm_rcu(ctx);
rcu_read_unlock();
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
@@ -1049,7 +1064,7 @@ static void set_ppgtt_barrier(void *data)
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct i915_address_space *vm = rq->hw_context->vm;
+ struct i915_address_space *vm = rq->context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
@@ -1096,9 +1111,6 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
- } else {
- /* ppGTT is not part of the legacy context image */
- gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
}
return 0;
@@ -1106,10 +1118,20 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
static bool skip_ppgtt_update(struct intel_context *ce, void *data)
{
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+ return true;
+
if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
- return !ce->state;
- else
- return !atomic_read(&ce->pin_count);
+ return false;
+
+ if (!atomic_read(&ce->pin_count))
+ return true;
+
+ /* ppGTT is not part of the legacy context image */
+ if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
+ return true;
+
+ return false;
}
static int set_ppgtt(struct drm_i915_file_private *file_priv,
@@ -1214,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
* image, or into the registers directory, does not stick). Pristine
* and idle contexts will be configured on pinning.
*/
- if (!intel_context_is_pinned(ce))
+ if (!intel_context_pin_if_active(ce))
return 0;
- rq = i915_request_create(ce->engine->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_unpin;
+ }
/* Serialise with the remote context */
ret = intel_context_prepare_remote_request(ce, rq);
@@ -1227,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
ret = gen8_emit_rpcs_config(rq, ce, sseu);
i915_request_add(rq);
+out_unpin:
+ intel_context_unpin(ce);
return ret;
}
@@ -1485,12 +1511,14 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
}
}
- ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+ ce = intel_execlists_create_virtual(siblings, n);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out_siblings;
}
+ intel_context_set_gem(ce, set->ctx);
+
if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
intel_context_put(ce);
err = -EEXIST;
@@ -1660,12 +1688,14 @@ set_engines(struct i915_gem_context *ctx,
return -ENOENT;
}
- ce = intel_context_create(ctx, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
__free_engines(set.engines, n);
return PTR_ERR(ce);
}
+ intel_context_set_gem(ce, ctx);
+
set.engines->engines[n] = ce;
}
set.engines->num_engines = num_engines;
@@ -1806,6 +1836,44 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
+static void __apply_priority(struct intel_context *ce, void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+
+ if (!intel_engine_has_semaphores(ce->engine))
+ return;
+
+ if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
+ intel_context_set_use_semaphores(ce);
+ else
+ intel_context_clear_use_semaphores(ce);
+}
+
+static int set_priority(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ s64 priority = args->value;
+
+ if (args->size)
+ return -EINVAL;
+
+ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ return -ENODEV;
+
+ if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+ priority < I915_CONTEXT_MIN_USER_PRIORITY)
+ return -EINVAL;
+
+ if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+ !capable(CAP_SYS_NICE))
+ return -EPERM;
+
+ ctx->sched.priority = I915_USER_PRIORITY(priority);
+ context_apply_all(ctx, __apply_priority, ctx);
+
+ return 0;
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1852,23 +1920,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
break;
case I915_CONTEXT_PARAM_PRIORITY:
- {
- s64 priority = args->value;
-
- if (args->size)
- ret = -EINVAL;
- else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
- ret = -ENODEV;
- else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
- priority < I915_CONTEXT_MIN_USER_PRIORITY)
- ret = -EINVAL;
- else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
- !capable(CAP_SYS_NICE))
- ret = -EPERM;
- else
- ctx->sched.priority =
- I915_USER_PRIORITY(priority);
- }
+ ret = set_priority(ctx, args);
break;
case I915_CONTEXT_PARAM_SSEU:
@@ -1948,20 +2000,23 @@ static int clone_engines(struct i915_gem_context *dst,
*/
if (intel_engine_is_virtual(engine))
clone->engines[n] =
- intel_execlists_clone_virtual(dst, engine);
+ intel_execlists_clone_virtual(engine);
else
- clone->engines[n] = intel_context_create(dst, engine);
+ clone->engines[n] = intel_context_create(engine);
if (IS_ERR_OR_NULL(clone->engines[n])) {
__free_engines(clone, n);
goto err_unlock;
}
+
+ intel_context_set_gem(clone->engines[n], dst);
}
clone->num_engines = n;
user_engines = i915_gem_context_user_engines(src);
i915_gem_context_unlock_engines(src);
- free_engines(dst->engines);
+ /* Serialised by constructor */
+ free_engines(__context_engines_static(dst));
RCU_INIT_POINTER(dst->engines, clone);
if (user_engines)
i915_gem_context_set_user_engines(dst);
@@ -1996,7 +2051,8 @@ static int clone_sseu(struct i915_gem_context *dst,
unsigned long n;
int err;
- clone = dst->engines; /* no locking required; sole access */
+ /* no locking required; sole access under constructor*/
+ clone = __context_engines_static(dst);
if (e->num_engines != clone->num_engines) {
err = -EINVAL;
goto unlock;
@@ -2041,47 +2097,21 @@ static int clone_vm(struct i915_gem_context *dst,
struct i915_address_space *vm;
int err = 0;
- rcu_read_lock();
- do {
- vm = rcu_dereference(src->vm);
- if (!vm)
- break;
-
- if (!kref_get_unless_zero(&vm->ref))
- continue;
-
- /*
- * This ppgtt may have be reallocated between
- * the read and the kref, and reassigned to a third
- * context. In order to avoid inadvertent sharing
- * of this ppgtt with that third context (and not
- * src), we have to confirm that we have the same
- * ppgtt after passing through the strong memory
- * barrier implied by a successful
- * kref_get_unless_zero().
- *
- * Once we have acquired the current ppgtt of src,
- * we no longer care if it is released from src, as
- * it cannot be reallocated elsewhere.
- */
-
- if (vm == rcu_access_pointer(src->vm))
- break;
+ if (!rcu_access_pointer(src->vm))
+ return 0;
- i915_vm_put(vm);
- } while (1);
+ rcu_read_lock();
+ vm = context_get_vm_rcu(src);
rcu_read_unlock();
- if (vm) {
- if (!mutex_lock_interruptible(&dst->mutex)) {
- __assign_ppgtt(dst, vm);
- mutex_unlock(&dst->mutex);
- } else {
- err = -EINTR;
- }
- i915_vm_put(vm);
+ if (!mutex_lock_interruptible(&dst->mutex)) {
+ __assign_ppgtt(dst, vm);
+ mutex_unlock(&dst->mutex);
+ } else {
+ err = -EINTR;
}
+ i915_vm_put(vm);
return err;
}
@@ -2153,6 +2183,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_context_create_ext *args = data;
struct create_ext ext_data;
int ret;
+ u32 id;
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return -ENODEV;
@@ -2184,11 +2215,11 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
goto err_ctx;
}
- ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
+ ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
if (ret < 0)
goto err_ctx;
- args->ctx_id = ret;
+ args->ctx_id = id;
DRM_DEBUG("HW context %d created\n", args->ctx_id);
return 0;
@@ -2211,11 +2242,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (!args->ctx_id)
return -ENOENT;
- if (mutex_lock_interruptible(&file_priv->context_idr_lock))
- return -EINTR;
-
- ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
- mutex_unlock(&file_priv->context_idr_lock);
+ ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
if (!ctx)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 18e50a769a6e..3ae61a355d87 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -13,7 +13,6 @@
#include "i915_drv.h"
#include "i915_gem.h"
-#include "i915_gem_gtt.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
@@ -91,26 +90,6 @@ static inline void i915_gem_context_clear_persistence(struct i915_gem_context *c
clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
}
-static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
-{
- return test_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
-{
- set_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
-{
- return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
-{
- __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
static inline bool
i915_gem_context_user_engines(const struct i915_gem_context *ctx)
{
@@ -129,31 +108,8 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
-static inline bool
-i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
-{
- return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
-{
- set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
-{
- clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
-{
- return !ctx->file_priv;
-}
-
/* i915_gem_context.c */
-int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_init__contexts(struct drm_i915_private *i915);
void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
@@ -178,9 +134,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
-
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 3870dd5daaa0..017ca803ab47 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -100,15 +100,6 @@ struct i915_gem_context {
*/
struct pid *pid;
- /**
- * @name: arbitrary name
- *
- * A name is constructed for the context from the creator's process
- * name, pid and user handle in order to uniquely identify the
- * context in messages.
- */
- const char *name;
-
/** link: place with &drm_i915_private.context_list */
struct list_head link;
struct llist_node free_link;
@@ -143,11 +134,8 @@ struct i915_gem_context {
* @flags: small set of booleans
*/
unsigned long flags;
-#define CONTEXT_BANNED 0
-#define CONTEXT_CLOSED 1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
-#define CONTEXT_USER_ENGINES 3
-#define CONTEXT_NOPREEMPT 4
+#define CONTEXT_CLOSED 0
+#define CONTEXT_USER_ENGINES 1
struct mutex mutex;
@@ -177,12 +165,14 @@ struct i915_gem_context {
*/
struct radix_tree_root handles_vma;
- /** jump_whitelist: Bit array for tracking cmds during cmdparsing
- * Guarded by struct_mutex
+ /**
+ * @name: arbitrary name, used for user debug
+ *
+ * A name is constructed for the context from the creator's process
+ * name, pid and user handle in order to uniquely identify the
+ * context in messages.
*/
- unsigned long *jump_whitelist;
- /** jump_whitelist_cmds: No of cmd slots available */
- u32 jump_whitelist_cmds;
+ char name[TASK_COMM_LEN + 8];
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index eaea49d08eb5..372b57ca0efc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -93,40 +93,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
}
-static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct page *page;
-
- if (page_num >= obj->base.size >> PAGE_SHIFT)
- return NULL;
-
- if (!i915_gem_object_has_struct_page(obj))
- return NULL;
-
- if (i915_gem_object_pin_pages(obj))
- return NULL;
-
- /* Synchronisation is left to the caller (via .begin_cpu_access()) */
- page = i915_gem_object_get_page(obj, page_num);
- if (IS_ERR(page))
- goto err_unpin;
-
- return kmap(page);
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
-
- kunmap(virt_to_page(addr));
- i915_gem_object_unpin_pages(obj);
-}
-
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -195,8 +161,6 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map = i915_gem_dmabuf_kmap,
- .unmap = i915_gem_dmabuf_kunmap,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index f86400a191b0..0cc40e77bbd2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -12,6 +12,8 @@
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_vma.h"
+#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
@@ -148,9 +150,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
+ struct i915_vma *vma;
+
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
+
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj)
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ i915_vma_set_ggtt_write(vma);
+ spin_unlock(&obj->vma.lock);
}
i915_gem_object_unpin_pages(obj);
@@ -175,138 +185,34 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct i915_vma *vma;
int ret;
- assert_object_held(obj);
-
if (obj->cache_level == cache_level)
return 0;
- /* Inspect the list of currently bound VMA and unbind any that would
- * be invalid given the new cache-level. This is principally to
- * catch the issue of the CS prefetch crossing page boundaries and
- * reading an invalid PTE on older architectures.
- */
-restart:
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_pinned(vma)) {
- DRM_DEBUG("can not change the cache level of pinned objects\n");
- return -EBUSY;
- }
-
- if (!i915_vma_is_closed(vma) &&
- i915_gem_valid_gtt_space(vma, cache_level))
- continue;
-
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
-
- /* As unbinding may affect other elements in the
- * obj->vma_list (due to side-effects from retiring
- * an active vma), play safe and restart the iterator.
- */
- goto restart;
- }
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
- /* We can reuse the existing drm_mm nodes but need to change the
- * cache-level on the PTE. We could simply unbind them all and
- * rebind with the correct cache-level on next use. However since
- * we already have a valid slot, dma mapping, pages etc, we may as
- * rewrite the PTE in the belief that doing so tramples upon less
- * state and so involves less work.
- */
- if (atomic_read(&obj->bind_count)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret)
+ return ret;
- /* Before we change the PTE, the GPU must not be accessing it.
- * If we wait upon the object, we know that all the bound
- * VMA are no longer active.
- */
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
- intel_wakeref_t wakeref =
- intel_runtime_pm_get(&i915->runtime_pm);
-
- /*
- * Access to snoopable pages through the GTT is
- * incoherent and on some machines causes a hard
- * lockup. Relinquish the CPU mmaping to force
- * userspace to refault in the pages and we can
- * then double check if the GTT mapping is still
- * valid for that pointer access.
- */
- ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
- if (ret) {
- intel_runtime_pm_put(&i915->runtime_pm,
- wakeref);
- return ret;
- }
-
- if (obj->userfault_count)
- __i915_gem_object_release_mmap(obj);
-
- /*
- * As we no longer need a fence for GTT access,
- * we can relinquish it now (and so prevent having
- * to steal a fence from someone else on the next
- * fence request). Note GPU activity would have
- * dropped the fence as all snoopable access is
- * supposed to be linear.
- */
- for_each_ggtt_vma(vma, obj) {
- ret = i915_vma_revoke_fence(vma);
- if (ret)
- break;
- }
- mutex_unlock(&i915->ggtt.vm.mutex);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- if (ret)
- return ret;
- } else {
- /*
- * We either have incoherent backing store and
- * so no GTT access or the architecture is fully
- * coherent. In such cases, existing GTT mmaps
- * ignore the cache bit in the PTE and we can
- * rewrite it without confusing the GPU or having
- * to force userspace to fault back in its mmaps.
- */
- }
-
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- /* Wait for an earlier async bind, need to rewrite it */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
- if (ret)
- return ret;
- }
+ /* Always invalidate stale cachelines */
+ if (obj->cache_level != cache_level) {
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+ obj->cache_dirty = true;
}
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (i915_vm_has_cache_coloring(vma->vm))
- vma->node.color = cache_level;
- }
- i915_gem_object_set_cache_coherency(obj, cache_level);
- obj->cache_dirty = true; /* Always invalidate stale cachelines */
+ i915_gem_object_unlock(obj);
- return 0;
+ /* The cache-level will be applied when each vma is rebound. */
+ return i915_gem_object_unbind(obj,
+ I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_BARRIER);
}
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -387,20 +293,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->cache_level == level)
- goto out;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- goto out;
-
- ret = i915_gem_object_lock_interruptible(obj);
- if (ret == 0) {
- ret = i915_gem_object_set_cache_level(obj, level);
- i915_gem_object_unlock(obj);
- }
+ ret = i915_gem_object_set_cache_level(obj, level);
out:
i915_gem_object_put(obj);
@@ -419,10 +312,13 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
unsigned int flags)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
- assert_object_held(obj);
+ /* Frame buffer must be in LMEM (no migration yet) */
+ if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
+ return ERR_PTR(-EINVAL);
/*
* The display engine is not coherent with the LLC cache on gen6. As
@@ -435,7 +331,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(to_i915(obj->base.dev)) ?
+ HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
return ERR_PTR(ret);
@@ -462,13 +358,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- __i915_gem_object_flush_for_display(obj);
-
- /*
- * It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ i915_gem_object_flush_if_display(obj);
return vma;
}
@@ -479,8 +369,11 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ if (!atomic_read(&obj->bind_count))
+ return;
mutex_lock(&i915->ggtt.vm.mutex);
+ spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -488,6 +381,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
+ spin_unlock(&obj->vma.lock);
mutex_unlock(&i915->ggtt.vm.mutex);
if (i915_gem_object_is_shrinkable(obj)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bc3a67226163..d5a0f5ae4a8b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -25,6 +25,7 @@
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
+#include "i915_sw_fence_work.h"
#include "i915_trace.h"
enum {
@@ -228,6 +229,7 @@ struct i915_execbuffer {
struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
unsigned int buffer_count;
@@ -253,7 +255,6 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
- struct intel_context *ce;
struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
@@ -277,25 +278,6 @@ struct i915_execbuffer {
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-/*
- * Used to convert any address to canonical form.
- * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
- * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
- * addresses to be in a canonical form:
- * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
- * canonical form [63:48] == [47]."
- */
-#define GEN8_HIGH_ADDRESS_BIT 47
-static inline u64 gen8_canonical_addr(u64 address)
-{
- return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
-}
-
-static inline u64 gen8_noncanonical_addr(u64 address)
-{
- return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
-}
-
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -748,9 +730,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
- if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
- return -EIO;
-
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
@@ -886,9 +865,6 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
- if (eb->reloc_cache.ce)
- intel_context_put(eb->reloc_cache.ce);
-
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -912,7 +888,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0;
- cache->ce = NULL;
cache->rq = NULL;
cache->rq_size = 0;
}
@@ -1182,7 +1157,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = intel_context_create_request(cache->ce);
+ rq = i915_request_create(eb->context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1246,36 +1221,9 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (unlikely(!cache->rq)) {
int err;
- /* If we need to copy for the cmdparser, we will stall anyway */
- if (eb_use_cmdparser(eb))
- return ERR_PTR(-EWOULDBLOCK);
-
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
- if (!cache->ce) {
- struct intel_context *ce;
-
- /*
- * The CS pre-parser can pre-fetch commands across
- * memory sync points and starting gen12 it is able to
- * pre-fetch across BB_START and BB_END boundaries
- * (within the same context). We therefore use a
- * separate context gen12+ to guarantee that the reloc
- * writes land before the parser gets to the target
- * memory location.
- */
- if (cache->gen >= 12)
- ce = intel_context_create(eb->context->gem_context,
- eb->engine);
- else
- ce = intel_context_get(eb->context);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- cache->ce = ce;
- }
-
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@@ -1943,15 +1891,15 @@ err_skip:
return err;
}
-static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
- return false;
+ return -EINVAL;
/* Kernel clipping was a DRI1 misfeature */
if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
if (exec->num_cliprects || exec->cliprects_ptr)
- return false;
+ return -EINVAL;
}
if (exec->DR4 == 0xffffffff) {
@@ -1959,12 +1907,12 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
exec->DR4 = 0;
}
if (exec->DR1 || exec->DR4)
- return false;
+ return -EINVAL;
if ((exec->batch_start_offset | exec->batch_len) & 0x7)
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
@@ -1993,99 +1941,179 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
}
static struct i915_vma *
-shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
+shadow_batch_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned int flags)
{
- struct drm_i915_private *dev_priv = eb->i915;
- struct i915_vma * const vma = *eb->vma;
- struct i915_address_space *vm;
- u64 flags;
+ struct i915_vma *vma;
+ int err;
- /*
- * PPGTT backed shadow buffers must be mapped RO, to prevent
- * post-scan tampering
- */
- if (CMDPARSER_USES_GGTT(dev_priv)) {
- flags = PIN_GLOBAL;
- vm = &dev_priv->ggtt.vm;
- } else if (vma->vm->has_read_only) {
- flags = PIN_USER;
- vm = vma->vm;
- i915_gem_object_set_readonly(obj);
- } else {
- DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
- return ERR_PTR(-EINVAL);
- }
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return vma;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ return ERR_PTR(err);
- return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
+ return vma;
}
-static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
+struct eb_parse_work {
+ struct dma_fence_work base;
+ struct intel_engine_cs *engine;
+ struct i915_vma *batch;
+ struct i915_vma *shadow;
+ struct i915_vma *trampoline;
+ unsigned int batch_offset;
+ unsigned int batch_length;
+};
+
+static int __eb_parse(struct dma_fence_work *work)
{
- struct intel_engine_pool_node *pool;
- struct i915_vma *vma;
- u64 batch_start;
- u64 shadow_batch_start;
+ struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+
+ return intel_engine_cmd_parser(pw->engine,
+ pw->batch,
+ pw->batch_offset,
+ pw->batch_length,
+ pw->shadow,
+ pw->trampoline);
+}
+
+static const struct dma_fence_work_ops eb_parse_ops = {
+ .name = "eb_parse",
+ .work = __eb_parse,
+};
+
+static int eb_parse_pipeline(struct i915_execbuffer *eb,
+ struct i915_vma *shadow,
+ struct i915_vma *trampoline)
+{
+ struct eb_parse_work *pw;
int err;
- pool = intel_engine_get_pool(eb->engine, eb->batch_len);
- if (IS_ERR(pool))
- return ERR_CAST(pool);
+ pw = kzalloc(sizeof(*pw), GFP_KERNEL);
+ if (!pw)
+ return -ENOMEM;
- vma = shadow_batch_pin(eb, pool->obj);
- if (IS_ERR(vma))
- goto err;
+ dma_fence_work_init(&pw->base, &eb_parse_ops);
- batch_start = gen8_canonical_addr(eb->batch->node.start) +
- eb->batch_start_offset;
+ pw->engine = eb->engine;
+ pw->batch = eb->batch;
+ pw->batch_offset = eb->batch_start_offset;
+ pw->batch_length = eb->batch_len;
+ pw->shadow = shadow;
+ pw->trampoline = trampoline;
- shadow_batch_start = gen8_canonical_addr(vma->node.start);
+ dma_resv_lock(pw->batch->resv, NULL);
- err = intel_engine_cmd_parser(eb->gem_context,
- eb->engine,
- eb->batch->obj,
- batch_start,
- eb->batch_start_offset,
- eb->batch_len,
- pool->obj,
- shadow_batch_start);
+ err = dma_resv_reserve_shared(pw->batch->resv, 1);
+ if (err)
+ goto err_batch_unlock;
- if (err) {
- i915_vma_unpin(vma);
+ /* Wait for all writes (and relocs) into the batch to complete */
+ err = i915_sw_fence_await_reservation(&pw->base.chain,
+ pw->batch->resv, NULL, false,
+ 0, I915_FENCE_GFP);
+ if (err < 0)
+ goto err_batch_unlock;
+
+ /* Keep the batch alive and unwritten as we parse */
+ dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
+
+ dma_resv_unlock(pw->batch->resv);
+ /* Force execution to wait for completion of the parser */
+ dma_resv_lock(shadow->resv, NULL);
+ dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
+ dma_resv_unlock(shadow->resv);
+
+ dma_fence_work_commit(&pw->base);
+ return 0;
+
+err_batch_unlock:
+ dma_resv_unlock(pw->batch->resv);
+ kfree(pw);
+ return err;
+}
+
+static int eb_parse(struct i915_execbuffer *eb)
+{
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *shadow, *trampoline;
+ unsigned int len;
+ int err;
+
+ if (!eb_use_cmdparser(eb))
+ return 0;
+
+ len = eb->batch_len;
+ if (!CMDPARSER_USES_GGTT(eb->i915)) {
/*
- * Unsafe GGTT-backed buffers can still be submitted safely
- * as non-secure.
- * For PPGTT backing however, we have no choice but to forcibly
- * reject unsafe buffers
+ * ppGTT backed shadow buffers must be mapped RO, to prevent
+ * post-scan tampering
*/
- if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
- /* Execute original buffer non-secure */
- vma = NULL;
- else
- vma = ERR_PTR(err);
+ if (!eb->context->vm->has_read_only) {
+ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ return -EINVAL;
+ }
+ } else {
+ len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
+ }
+
+ pool = intel_engine_get_pool(eb->engine, len);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
+ if (IS_ERR(shadow)) {
+ err = PTR_ERR(shadow);
goto err;
}
+ i915_gem_object_set_readonly(shadow->obj);
+
+ trampoline = NULL;
+ if (CMDPARSER_USES_GGTT(eb->i915)) {
+ trampoline = shadow;
+
+ shadow = shadow_batch_pin(pool->obj,
+ &eb->engine->gt->ggtt->vm,
+ PIN_GLOBAL);
+ if (IS_ERR(shadow)) {
+ err = PTR_ERR(shadow);
+ shadow = trampoline;
+ goto err_shadow;
+ }
+
+ eb->batch_flags |= I915_DISPATCH_SECURE;
+ }
+
+ err = eb_parse_pipeline(eb, shadow, trampoline);
+ if (err)
+ goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(vma);
+ eb->vma[eb->buffer_count] = i915_vma_get(shadow);
eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- vma->exec_flags = &eb->flags[eb->buffer_count];
+ shadow->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
+ eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- eb->batch = vma;
+ eb->batch = shadow;
- if (CMDPARSER_USES_GGTT(eb->i915))
- eb->batch_flags |= I915_DISPATCH_SECURE;
-
- /* eb->batch_len unchanged */
-
- vma->private = pool;
- return vma;
+ shadow->private = pool;
+ return 0;
+err_trampoline:
+ if (trampoline)
+ i915_vma_unpin(trampoline);
+err_shadow:
+ i915_vma_unpin(shadow);
err:
intel_engine_pool_put(pool);
- return vma;
+ return err;
}
static void
@@ -2134,8 +2162,18 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err)
return err;
- if (i915_gem_context_nopreempt(eb->gem_context))
- eb->request->flags |= I915_REQUEST_NOPREEMPT;
+ if (eb->trampoline) {
+ GEM_BUG_ON(eb->batch_start_offset);
+ err = eb->engine->emit_bb_start(eb->request,
+ eb->trampoline->node.start +
+ eb->batch_len,
+ 0, 0);
+ if (err)
+ return err;
+ }
+
+ if (intel_context_nopreempt(eb->context))
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
return 0;
}
@@ -2220,6 +2258,9 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
if (err)
return err;
+ if (unlikely(intel_context_is_banned(ce)))
+ return -EIO;
+
/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
@@ -2515,6 +2556,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.buffer_count = args->buffer_count;
eb.batch_start_offset = args->batch_start_offset;
eb.batch_len = args->batch_len;
+ eb.trampoline = NULL;
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
@@ -2606,15 +2648,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (eb.batch_len == 0)
eb.batch_len = eb.batch->size - eb.batch_start_offset;
- if (eb_use_cmdparser(&eb)) {
- struct i915_vma *vma;
-
- vma = eb_parse(&eb);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_vma;
- }
- }
+ err = eb_parse(&eb);
+ if (err)
+ goto err_vma;
/*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
@@ -2720,6 +2756,8 @@ err_batch_unpin:
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
+ if (eb.trampoline)
+ i915_vma_unpin(eb.trampoline);
mutex_unlock(&dev->struct_mutex);
err_engine:
eb_unpin_engine(&eb);
@@ -2789,8 +2827,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- if (!i915_gem_check_execbuffer(&exec2))
- return -EINVAL;
+ err = i915_gem_check_execbuffer(&exec2);
+ if (err)
+ return err;
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(count, sizeof(*exec_list),
@@ -2867,8 +2906,9 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (!i915_gem_check_execbuffer(args))
- return -EINVAL;
+ err = i915_gem_check_execbuffer(args);
+ if (err)
+ return err;
/* Allocate an extra slot for use by the command parser */
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index ddc7f2a52b3e..87d8b27f426d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -28,8 +28,8 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
+int i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 0e2bf6b7e143..70543c83df06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -16,46 +16,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.release = i915_gem_object_release_memory_region,
};
-/* XXX: Time to vfunc your life up? */
-void __iomem *
-i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- resource_size_t offset;
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
-}
-
-void __iomem *
-i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- resource_size_t offset;
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
-}
-
-void __iomem *
-i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned long size)
-{
- resource_size_t offset;
-
- GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
-}
-
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_lmem_obj_ops;
@@ -79,9 +39,6 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
- if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
- return ERR_PTR(-E2BIG);
-
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 7c176b8b7d2f..fc3f15580fe3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,14 +14,6 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
-void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
- unsigned long n, unsigned long size);
-void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
- unsigned long n);
-void __iomem *
-i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
- unsigned long n);
-
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index e3002849844b..b9fdac2f9003 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -4,7 +4,9 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/anon_inodes.h>
#include <linux/mman.h>
+#include <linux/pfn_t.h>
#include <linux/sizes.h>
#include "gt/intel_gt.h"
@@ -14,7 +16,9 @@
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
+#include "i915_gem_mman.h"
#include "i915_trace.h"
+#include "i915_user_extensions.h"
#include "i915_vma.h"
static inline bool
@@ -144,6 +148,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 3 - Remove implicit set-domain(GTT) and synchronisation on initial
* pagefault; swapin remains transparent.
*
+ * 4 - Support multiple fault handlers per object depending on object's
+ * backing storage (a.k.a. MMAP_OFFSET).
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -171,7 +178,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 3;
+ return 4;
}
static inline struct i915_ggtt_view
@@ -197,29 +204,80 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
return view;
}
-/**
- * i915_gem_fault - fault a page into the GTT
- * @vmf: fault info
- *
- * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
- * from userspace. The fault handler takes care of binding the object to
- * the GTT (if needed), allocating and programming a fence register (again,
- * only if needed based on whether the old reg is still valid or the object
- * is tiled) and inserting a new PTE into the faulting process.
- *
- * Note that the faulting process may involve evicting existing objects
- * from the GTT and/or fence registers to make room. So performance may
- * suffer if the GTT working set is large or there are few fence registers
- * left.
- *
- * The current feature set supported by i915_gem_fault() and thus GTT mmaps
- * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
- */
-vm_fault_t i915_gem_fault(struct vm_fault *vmf)
+static vm_fault_t i915_error_to_vmf_fault(int err)
+{
+ switch (err) {
+ default:
+ WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
+ /* fallthrough */
+ case -EIO: /* shmemfs failure from swap device */
+ case -EFAULT: /* purged object */
+ case -ENODEV: /* bad object, how did you get here! */
+ case -ENXIO: /* unable to access backing store (on device) */
+ return VM_FAULT_SIGBUS;
+
+ case -ENOSPC: /* shmemfs allocation failure */
+ case -ENOMEM: /* our allocation failure */
+ return VM_FAULT_OOM;
+
+ case 0:
+ case -EAGAIN:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
+ return VM_FAULT_NOPAGE;
+ }
+}
+
+static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
+{
+ struct vm_area_struct *area = vmf->vma;
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+ resource_size_t iomap;
+ int err;
+
+ /* Sanity check that we allow writing into this object */
+ if (unlikely(i915_gem_object_is_readonly(obj) &&
+ area->vm_flags & VM_WRITE))
+ return VM_FAULT_SIGBUS;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out;
+
+ iomap = -1;
+ if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
+ iomap = obj->mm.region->iomap.base;
+ iomap -= obj->mm.region->region.start;
+ }
+
+ /* PTEs are revoked in obj->ops->put_pages() */
+ err = remap_io_sg(area,
+ area->vm_start, area->vm_end - area->vm_start,
+ obj->mm.pages->sgl, iomap);
+
+ if (area->vm_flags & VM_WRITE) {
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ obj->mm.dirty = true;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+
+out:
+ return i915_error_to_vmf_fault(err);
+}
+
+static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
- struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_runtime_pm *rpm = &i915->runtime_pm;
@@ -312,6 +370,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
mutex_unlock(&i915->ggtt.vm.mutex);
+ /* Track the mmo associated with the fenced vma */
+ vma->mmo = mmo;
+
if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -332,67 +393,36 @@ err_rpm:
intel_runtime_pm_put(rpm, wakeref);
i915_gem_object_unpin_pages(obj);
err:
- switch (ret) {
- default:
- WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
- /* fallthrough */
- case -EIO: /* shmemfs failure from swap device */
- case -EFAULT: /* purged object */
- case -ENODEV: /* bad object, how did you get here! */
- return VM_FAULT_SIGBUS;
-
- case -ENOSPC: /* shmemfs allocation failure */
- case -ENOMEM: /* our allocation failure */
- return VM_FAULT_OOM;
-
- case 0:
- case -EAGAIN:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- }
+ return i915_error_to_vmf_fault(ret);
}
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
GEM_BUG_ON(!obj->userfault_count);
- obj->userfault_count = 0;
- list_del(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
-
for_each_ggtt_vma(vma, obj)
- i915_vma_unset_userfault(vma);
+ i915_vma_revoke_mmap(vma);
+
+ GEM_BUG_ON(obj->userfault_count);
}
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
+/*
* It is vital that we remove the page mapping if we have mapped a tiled
* object through the GTT and then lose the fence register due to
* resource pressure. Similarly if the object has been moved out of the
* aperture, than pages mapped into userspace must be revoked. Removing the
* mapping will then trigger a page fault on the next user access, allowing
- * fixup by i915_gem_fault().
+ * fixup by vm_fault_gtt().
*/
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
- /* Serialisation between user GTT access and our code depends upon
+ /*
+ * Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
*
@@ -406,9 +436,10 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->userfault_count)
goto out;
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
- /* Ensure that the CPU's PTE are revoked and there are not outstanding
+ /*
+ * Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
* flushing implied above by changing the PTE above *should* be
* sufficient, an extra barrier here just provides us with a bit
@@ -422,54 +453,151 @@ out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct i915_mmap_offset *mmo;
+
+ spin_lock(&obj->mmo.lock);
+ list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+ /*
+ * vma_node_unmap for GTT mmaps handled already in
+ * __i915_gem_object_release_mmap_gtt
+ */
+ if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
+ continue;
+
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_node_unmap(&mmo->vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ spin_lock(&obj->mmo.lock);
+ }
+ spin_unlock(&obj->mmo.lock);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_release_mmap_gtt(obj);
+ i915_gem_object_release_mmap_offset(obj);
+}
+
+static struct i915_mmap_offset *
+mmap_offset_attach(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type,
+ struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct intel_gt *gt = &i915->gt;
+ struct i915_mmap_offset *mmo;
int err;
- err = drm_gem_create_mmap_offset(&obj->base);
+ mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
+ if (!mmo)
+ return ERR_PTR(-ENOMEM);
+
+ mmo->obj = obj;
+ mmo->dev = obj->base.dev;
+ mmo->file = file;
+ mmo->mmap_type = mmap_type;
+ drm_vma_node_reset(&mmo->vma_node);
+
+ err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
+ obj->base.size / PAGE_SIZE);
if (likely(!err))
- return 0;
+ goto out;
/* Attempt to reap some mmap space from dead objects */
- err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
+ err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
if (err)
- return err;
+ goto err;
i915_gem_drain_freed_objects(i915);
- return drm_gem_create_mmap_offset(&obj->base);
+ err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
+ obj->base.size / PAGE_SIZE);
+ if (err)
+ goto err;
+
+out:
+ if (file)
+ drm_vma_node_allow(&mmo->vma_node, file);
+
+ spin_lock(&obj->mmo.lock);
+ list_add(&mmo->offset, &obj->mmo.offsets);
+ spin_unlock(&obj->mmo.lock);
+
+ return mmo;
+
+err:
+ kfree(mmo);
+ return ERR_PTR(err);
}
-int
-i915_gem_mmap_gtt(struct drm_file *file,
- struct drm_device *dev,
- u32 handle,
- u64 *offset)
+static int
+__assign_mmap_offset(struct drm_file *file,
+ u32 handle,
+ enum i915_mmap_type mmap_type,
+ u64 *offset)
{
struct drm_i915_gem_object *obj;
- int ret;
+ struct i915_mmap_offset *mmo;
+ int err;
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
- if (i915_gem_object_never_bind_ggtt(obj)) {
- ret = -ENODEV;
+ if (mmap_type == I915_MMAP_TYPE_GTT &&
+ i915_gem_object_never_bind_ggtt(obj)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (mmap_type != I915_MMAP_TYPE_GTT &&
+ !i915_gem_object_type_has(obj,
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_HAS_IOMEM)) {
+ err = -ENODEV;
goto out;
}
- ret = create_mmap_offset(obj);
- if (ret == 0)
- *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+ mmo = mmap_offset_attach(obj, mmap_type, file);
+ if (IS_ERR(mmo)) {
+ err = PTR_ERR(mmo);
+ goto out;
+ }
+ *offset = drm_vma_node_offset_addr(&mmo->vma_node);
+ err = 0;
out:
i915_gem_object_put(obj);
- return ret;
+ return err;
+}
+
+int
+i915_gem_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle,
+ u64 *offset)
+{
+ enum i915_mmap_type mmap_type;
+
+ if (boot_cpu_has(X86_FEATURE_PAT))
+ mmap_type = I915_MMAP_TYPE_WC;
+ else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
+ return -ENODEV;
+ else
+ mmap_type = I915_MMAP_TYPE_GTT;
+
+ return __assign_mmap_offset(file, handle, mmap_type, offset);
}
/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file: GEM object info
@@ -484,12 +612,237 @@ out:
* userspace.
*/
int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_mmap_offset *args = data;
+ enum i915_mmap_type type;
+ int err;
+
+ /*
+ * Historically we failed to check args.pad and args.offset
+ * and so we cannot use those fields for user input and we cannot
+ * add -EINVAL for them as the ABI is fixed, i.e. old userspace
+ * may be feeding in garbage in those fields.
+ *
+ * if (args->pad) return -EINVAL; is verbotten!
+ */
+
+ err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ NULL, 0, NULL);
+ if (err)
+ return err;
+
+ switch (args->flags) {
+ case I915_MMAP_OFFSET_GTT:
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_GTT;
+ break;
+
+ case I915_MMAP_OFFSET_WC:
+ if (!boot_cpu_has(X86_FEATURE_PAT))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_WC;
+ break;
+
+ case I915_MMAP_OFFSET_WB:
+ type = I915_MMAP_TYPE_WB;
+ break;
+
+ case I915_MMAP_OFFSET_UC:
+ if (!boot_cpu_has(X86_FEATURE_PAT))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_UC;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return __assign_mmap_offset(file, args->handle, type, &args->offset);
+}
+
+static void vm_open(struct vm_area_struct *vma)
{
- struct drm_i915_gem_mmap_gtt *args = data;
+ struct i915_mmap_offset *mmo = vma->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+ GEM_BUG_ON(!obj);
+ i915_gem_object_get(obj);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+ struct i915_mmap_offset *mmo = vma->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+
+ GEM_BUG_ON(!obj);
+ i915_gem_object_put(obj);
+}
+
+static const struct vm_operations_struct vm_ops_gtt = {
+ .fault = vm_fault_gtt,
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static const struct vm_operations_struct vm_ops_cpu = {
+ .fault = vm_fault_cpu,
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static int singleton_release(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = file->private_data;
+
+ cmpxchg(&i915->gem.mmap_singleton, file, NULL);
+ drm_dev_put(&i915->drm);
+
+ return 0;
+}
+
+static const struct file_operations singleton_fops = {
+ .owner = THIS_MODULE,
+ .release = singleton_release,
+};
+
+static struct file *mmap_singleton(struct drm_i915_private *i915)
+{
+ struct file *file;
+
+ rcu_read_lock();
+ file = i915->gem.mmap_singleton;
+ if (file && !get_file_rcu(file))
+ file = NULL;
+ rcu_read_unlock();
+ if (file)
+ return file;
+
+ file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
+ if (IS_ERR(file))
+ return file;
+
+ /* Everyone shares a single global address space */
+ file->f_mapping = i915->drm.anon_inode->i_mapping;
+
+ smp_store_mb(i915->gem.mmap_singleton, file);
+ drm_dev_get(&i915->drm);
+
+ return file;
+}
+
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_vma_offset_node *node;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct i915_mmap_offset *mmo = NULL;
+ struct drm_gem_object *obj = NULL;
+ struct file *anon;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (likely(node)) {
+ mmo = container_of(node, struct i915_mmap_offset,
+ vma_node);
+ /*
+ * In our dependency chain, the drm_vma_offset_node
+ * depends on the validity of the mmo, which depends on
+ * the gem object. However the only reference we have
+ * at this point is the mmo (as the parent of the node).
+ * Try to check if the gem object was at least cleared.
+ */
+ if (!mmo || !mmo->obj) {
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ return -EINVAL;
+ }
+ /*
+ * Skip 0-refcnted objects as it is in the process of being
+ * destroyed and will be invalid when the vma manager lock
+ * is released.
+ */
+ obj = &mmo->obj->base;
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ if (!obj)
+ return -EINVAL;
+
+ if (!drm_vma_node_is_allowed(node, priv)) {
+ drm_gem_object_put_unlocked(obj);
+ return -EACCES;
+ }
+
+ if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
+ if (vma->vm_flags & VM_WRITE) {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
+ anon = mmap_singleton(to_i915(obj->dev));
+ if (IS_ERR(anon)) {
+ drm_gem_object_put_unlocked(obj);
+ return PTR_ERR(anon);
+ }
+
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = mmo;
+
+ /*
+ * We keep the ref on mmo->obj, not vm_file, but we require
+ * vma->vm_file->f_mapping, see vma_link(), for later revocation.
+ * Our userspace is accustomed to having per-file resource cleanup
+ * (i.e. contexts, objects and requests) on their close(fd), which
+ * requires avoiding extraneous references to their filp, hence why
+ * we prefer to use an anonymous file for their mmaps.
+ */
+ fput(vma->vm_file);
+ vma->vm_file = anon;
+
+ switch (mmo->mmap_type) {
+ case I915_MMAP_TYPE_WC:
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_WB:
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_UC:
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_GTT:
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_gtt;
+ break;
+ }
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
+ return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
new file mode 100644
index 000000000000..862e01b7cb69
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -0,0 +1,31 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_MMAN_H__
+#define __I915_GEM_MMAN_H__
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_gem_object;
+struct file;
+struct i915_mmap_offset;
+struct mutex;
+
+int i915_gem_mmap_gtt_version(void);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index a596548c07bf..46bacc82ddc4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -22,11 +22,14 @@
*
*/
+#include <linux/sched/mm.h>
+
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
#include "i915_trace.h"
@@ -59,6 +62,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
+ spin_lock_init(&obj->mmo.lock);
+ INIT_LIST_HEAD(&obj->mmo.offsets);
+
init_rcu_head(&obj->rcu);
obj->ops = ops;
@@ -95,6 +101,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_lut_handle *lut, *ln;
+ struct i915_mmap_offset *mmo;
LIST_HEAD(close);
i915_gem_object_lock(obj);
@@ -109,6 +116,17 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
i915_gem_object_unlock(obj);
+ spin_lock(&obj->mmo.lock);
+ list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+ if (mmo->file != file)
+ continue;
+
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_node_revoke(&mmo->vma_node, file);
+ spin_lock(&obj->mmo.lock);
+ }
+ spin_unlock(&obj->mmo.lock);
+
list_for_each_entry_safe(lut, ln, &close, obj_link) {
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
@@ -156,6 +174,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
+ struct i915_mmap_offset *mmo, *mn;
+
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
@@ -174,19 +194,28 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
spin_lock(&obj->vma.lock);
}
spin_unlock(&obj->vma.lock);
}
+ i915_gem_object_release_mmap(obj);
+
+ list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ }
+ INIT_LIST_HEAD(&obj->mmo.offsets);
+
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
atomic_set(&obj->mm.pages_pin_count, 0);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);
@@ -277,18 +306,14 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
- for_each_ggtt_vma(vma, obj)
- intel_gt_flush_ggtt_writes(vma->vm->gt);
-
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
-
+ spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
- if (vma->iomap)
- continue;
-
- i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
}
+ spin_unlock(&obj->vma.lock);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
break;
case I915_GEM_DOMAIN_WC:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 4b93591fd5c7..db70a3306e59 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,6 +16,7 @@
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
+#include "i915_vma_types.h"
void i915_gem_init__objects(struct drm_i915_private *i915);
@@ -132,13 +133,13 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
- obj->base.vma_node.readonly = true;
+ obj->flags |= I915_BO_READONLY;
}
static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
{
- return obj->base.vma_node.readonly;
+ return obj->flags & I915_BO_READONLY;
}
static inline bool
@@ -271,10 +272,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
+ I915_MM_NORMAL = 0,
+ /*
+ * Only used by struct_mutex, when called "recursively" from
+ * direct-reclaim-esque. Safe because there is only every one
+ * struct_mutex in the entire system.
+ */
+ I915_MM_SHRINKER = 1,
+ /*
+ * Used for obj->mm.lock when allocating pages. Safe because the object
+ * isn't yet on any LRU, and therefore the shrinker can't deadlock on
+ * it. As soon as the object has pages, obj->mm.lock nests within
+ * fs_reclaim.
+ */
+ I915_MM_GET_PAGES = 1,
+};
+
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- might_lock(&obj->mm.lock);
+ might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;
@@ -317,13 +335,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj);
}
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
- I915_MM_NORMAL = 0,
- I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
-};
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
@@ -376,9 +388,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e3f3944fbd90..88e268633fdc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -63,6 +63,23 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *obj);
};
+enum i915_mmap_type {
+ I915_MMAP_TYPE_GTT = 0,
+ I915_MMAP_TYPE_WC,
+ I915_MMAP_TYPE_WB,
+ I915_MMAP_TYPE_UC,
+};
+
+struct i915_mmap_offset {
+ struct drm_device *dev;
+ struct drm_vma_offset_node vma_node;
+ struct drm_i915_gem_object *obj;
+ struct drm_file *file;
+ enum i915_mmap_type mmap_type;
+
+ struct list_head offset;
+};
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -118,12 +135,18 @@ struct drm_i915_gem_object {
unsigned int userfault_count;
struct list_head userfault_link;
+ struct {
+ spinlock_t lock; /* Protects access to mmo offsets */
+ struct list_head offsets;
+ } mmo;
+
I915_SELFTEST_DECLARE(struct list_head st_link);
unsigned long flags;
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
+#define I915_BO_READONLY BIT(2)
/*
* Is the object to be mapped as read-only to the GPU
@@ -162,7 +185,11 @@ struct drm_i915_gem_object {
atomic_t bind_count;
struct {
- struct mutex lock; /* protects the pages and their use */
+ /*
+ * Protects the pages and their use. Do not use directly, but
+ * instead go through the pin/unpin interfaces.
+ */
+ struct mutex lock;
atomic_t pages_pin_count;
atomic_t shrink_pin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 29f4c2850745..54aca5c9101e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -8,6 +8,7 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -106,7 +107,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
- err = mutex_lock_interruptible(&obj->mm.lock);
+ err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (err)
return err;
@@ -157,9 +158,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
- if (i915_gem_object_is_lmem(obj))
- io_mapping_unmap((void __force __iomem *)ptr);
- else if (is_vmalloc_addr(ptr))
+ if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
@@ -190,8 +189,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages;
}
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
int err;
@@ -202,12 +200,14 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(atomic_read(&obj->bind_count));
/* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
+ mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
err = -EBUSY;
goto unlock;
}
+ i915_gem_object_release_mmap_offset(obj);
+
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
@@ -234,46 +234,44 @@ unlock:
return err;
}
+static inline pte_t iomap_pte(resource_size_t base,
+ dma_addr_t offset,
+ pgprot_t prot)
+{
+ return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
+}
+
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
- unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->mm.pages;
- struct sgt_iter sgt_iter;
- struct page *page;
- struct page *stack_pages[32];
- struct page **pages = stack_pages;
- unsigned long i = 0;
+ pte_t *stack[32], **mem;
+ struct vm_struct *area;
pgprot_t pgprot;
- void *addr;
-
- if (i915_gem_object_is_lmem(obj)) {
- void __iomem *io;
-
- if (type != I915_MAP_WC)
- return NULL;
- io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
- return (void __force *)io;
- }
+ if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
+ return NULL;
/* A single page can always be kmapped */
- if (n_pages == 1 && type == I915_MAP_WB)
+ if (n_pte == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
- if (n_pages > ARRAY_SIZE(stack_pages)) {
+ mem = stack;
+ if (n_pte > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */
- pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
+ mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return NULL;
}
- for_each_sgt_page(page, sgt_iter, sgt)
- pages[i++] = page;
-
- /* Check that we have the expected number of pages */
- GEM_BUG_ON(i != n_pages);
+ area = alloc_vm_area(obj->base.size, mem);
+ if (!area) {
+ if (mem != stack)
+ kvfree(mem);
+ return NULL;
+ }
switch (type) {
default:
@@ -286,12 +284,31 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
break;
}
- addr = vmap(pages, n_pages, 0, pgprot);
- if (pages != stack_pages)
- kvfree(pages);
+ if (i915_gem_object_has_struct_page(obj)) {
+ struct sgt_iter iter;
+ struct page *page;
+ pte_t **ptes = mem;
+
+ for_each_sgt_page(page, iter, sgt)
+ **ptes++ = mk_pte(page, pgprot);
+ } else {
+ resource_size_t iomap;
+ struct sgt_iter iter;
+ pte_t **ptes = mem;
+ dma_addr_t addr;
+
+ iomap = obj->mm.region->iomap.base;
+ iomap -= obj->mm.region->region.start;
+
+ for_each_sgt_daddr(addr, iter, sgt)
+ **ptes++ = iomap_pte(iomap, addr, pgprot);
+ }
+
+ if (mem != stack)
+ kvfree(mem);
- return addr;
+ return area->addr;
}
/* get, pin, and map the pages of the object into kernel space */
@@ -308,7 +325,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO);
- err = mutex_lock_interruptible(&obj->mm.lock);
+ err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (err)
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 8043ff63d73f..b1b7c1b3038a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -164,7 +164,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (err)
return err;
- mutex_lock(&obj->mm.lock);
+ mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (obj->mm.madv != I915_MADV_WILLNEED) {
err = -EFAULT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index f88ee1317bb4..c8264eb036bf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -13,7 +13,7 @@
void i915_gem_suspend(struct drm_i915_private *i915)
{
- GEM_TRACE("\n");
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
@@ -99,30 +99,12 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
void i915_gem_resume(struct drm_i915_private *i915)
{
- GEM_TRACE("\n");
-
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- if (intel_gt_init_hw(&i915->gt))
- goto err_wedged;
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- if (intel_gt_resume(&i915->gt))
- goto err_wedged;
-
-out_unlock:
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- return;
-
-err_wedged:
- if (!intel_gt_is_wedged(&i915->gt)) {
- dev_err(i915->drm.dev,
- "Failed to re-initialize GPU, declaring it wedged!\n");
- intel_gt_set_wedged(&i915->gt);
- }
- goto out_unlock;
+ intel_gt_resume(&i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 2f7bcfb9c964..1515384d7e0e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -85,7 +85,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
}
prev_end = offset + block_size;
- };
+ }
sg_page_sizes |= sg->length;
sg_mark_end(sg);
@@ -107,7 +107,10 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
{
INIT_LIST_HEAD(&obj->mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
+
obj->flags |= flags;
+ if (obj->base.size <= mem->min_page_size)
+ obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
mutex_lock(&mem->objects.lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4d69c3fc3439..a2a980d9d241 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -594,6 +594,8 @@ static int init_shmem(struct intel_memory_region *mem)
err);
}
+ intel_memory_region_set_name(mem, "system");
+
return 0; /* Don't error, we can simply fallback to the kernel mnt */
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f2418a1cfe68..f7e4b39c734f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -57,7 +57,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
if (i915_gem_object_unbind(obj, flags) == 0)
- __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ __i915_gem_object_put_pages(obj);
return !i915_gem_object_has_pages(obj);
}
@@ -209,8 +209,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */
- mutex_lock_nested(&obj->mm.lock,
- I915_MM_SHRINKER);
+ mutex_lock(&obj->mm.lock);
if (!i915_gem_object_has_pages(obj)) {
try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index a2d49c04e6a4..451f3078d60d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -26,48 +26,49 @@
* for is a boon.
*/
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start, u64 end)
{
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return -ENODEV;
/* WaSkipStolenMemoryFirstPage:bdw+ */
- if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
+ if (INTEL_GEN(i915) >= 8 && start < 4096)
start = 4096;
- mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+ mutex_lock(&i915->mm.stolen_lock);
+ ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
size, alignment, 0,
start, end, DRM_MM_INSERT_BEST);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_unlock(&i915->mm.stolen_lock);
return ret;
}
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
- return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
+ return i915_gem_stolen_insert_node_in_range(i915, node, size,
alignment, 0, U64_MAX);
}
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
struct drm_mm_node *node)
{
- mutex_lock(&dev_priv->mm.stolen_lock);
+ mutex_lock(&i915->mm.stolen_lock);
drm_mm_remove_node(node);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_unlock(&i915->mm.stolen_lock);
}
-static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
+static int i915_adjust_stolen(struct drm_i915_private *i915,
struct resource *dsm)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct resource *r;
if (dsm->start == 0 || dsm->end <= dsm->start)
@@ -79,14 +80,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
*/
/* Make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_GEN(dev_priv) <= 4 &&
- !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
+ if (INTEL_GEN(i915) <= 4 &&
+ !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
struct resource stolen[2] = {*dsm, *dsm};
struct resource ggtt_res;
resource_size_t ggtt_start;
- ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN(dev_priv, 4))
+ ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
+ if (IS_GEN(i915, 4))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -120,7 +121,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
+ r = devm_request_mem_region(i915->drm.dev, dsm->start,
resource_size(dsm),
"Graphics Stolen Memory");
if (r == NULL) {
@@ -133,14 +134,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* reservation starting from 1 instead of 0.
* There's also BIOS with off-by-one on the other end.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
+ r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
resource_size(dsm) - 2,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN(dev_priv, 3)) {
+ if (!r && !IS_GEN(i915, 3)) {
DRM_ERROR("conflict detected with stolen region: %pR\n",
dsm);
@@ -151,25 +152,27 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
return 0;
}
-static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
{
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return;
- drm_mm_takedown(&dev_priv->mm.stolen);
+ drm_mm_takedown(&i915->mm.stolen);
}
-static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
- CTG_STOLEN_RESERVED :
- ELK_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore,
+ IS_GM45(i915) ?
+ CTG_STOLEN_RESERVED :
+ ELK_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
- IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
+ IS_GM45(i915) ? "CTG" : "ELK", reg_val);
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
return;
@@ -178,7 +181,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+ WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
@@ -190,11 +193,12 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -222,12 +226,13 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -250,11 +255,12 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
*base = stolen_top - *size;
}
-static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -276,11 +282,12 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void chv_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -308,12 +315,13 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -328,10 +336,11 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void icl_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
+ u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
@@ -356,22 +365,23 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
- mutex_init(&dev_priv->mm.stolen_lock);
+ mutex_init(&i915->mm.stolen_lock);
- if (intel_vgpu_active(dev_priv)) {
- dev_notice(dev_priv->drm.dev,
+ if (intel_vgpu_active(i915)) {
+ dev_notice(i915->drm.dev,
"%s, disabling use of stolen memory\n",
"iGVT-g active");
return 0;
}
- if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
- dev_notice(dev_priv->drm.dev,
+ if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
+ dev_notice(i915->drm.dev,
"%s, disabling use of stolen memory\n",
"DMAR active");
return 0;
@@ -380,58 +390,59 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (resource_size(&intel_graphics_stolen_res) == 0)
return 0;
- dev_priv->dsm = intel_graphics_stolen_res;
+ i915->dsm = intel_graphics_stolen_res;
- if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
+ if (i915_adjust_stolen(i915, &i915->dsm))
return 0;
- GEM_BUG_ON(dev_priv->dsm.start == 0);
- GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
+ GEM_BUG_ON(i915->dsm.start == 0);
+ GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
- stolen_top = dev_priv->dsm.end + 1;
+ stolen_top = i915->dsm.end + 1;
reserved_base = stolen_top;
reserved_size = 0;
- switch (INTEL_GEN(dev_priv)) {
+ switch (INTEL_GEN(i915)) {
case 2:
case 3:
break;
case 4:
- if (!IS_G4X(dev_priv))
+ if (!IS_G4X(i915))
break;
/* fall through */
case 5:
- g4x_get_stolen_reserved(dev_priv,
+ g4x_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 6:
- gen6_get_stolen_reserved(dev_priv,
+ gen6_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 7:
- if (IS_VALLEYVIEW(dev_priv))
- vlv_get_stolen_reserved(dev_priv,
+ if (IS_VALLEYVIEW(i915))
+ vlv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
- gen7_get_stolen_reserved(dev_priv,
+ gen7_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 8:
case 9:
case 10:
- if (IS_LP(dev_priv))
- chv_get_stolen_reserved(dev_priv,
+ if (IS_LP(i915))
+ chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
- bdw_get_stolen_reserved(dev_priv,
+ bdw_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
default:
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
/* fall-through */
case 11:
case 12:
- icl_get_stolen_reserved(dev_priv, &reserved_base,
+ icl_get_stolen_reserved(i915, uncore,
+ &reserved_base,
&reserved_size);
break;
}
@@ -448,12 +459,12 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_size = 0;
}
- dev_priv->dsm_reserved =
- (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
+ i915->dsm_reserved =
+ (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
- if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
+ if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
- &dev_priv->dsm_reserved, &dev_priv->dsm);
+ &i915->dsm_reserved, &i915->dsm);
return 0;
}
@@ -462,14 +473,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_total = stolen_top - reserved_base;
DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&dev_priv->dsm) >> 10,
- ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
+ (u64)resource_size(&i915->dsm) >> 10,
+ ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
- dev_priv->stolen_usable_size =
- resource_size(&dev_priv->dsm) - reserved_total;
+ i915->stolen_usable_size =
+ resource_size(&i915->dsm) - reserved_total;
/* Basic memrange allocator for stolen space. */
- drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
+ drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
return 0;
}
@@ -478,11 +489,11 @@ static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
resource_size_t offset, resource_size_t size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -502,7 +513,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
+ sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
sg_dma_len(sg) = size;
return st;
@@ -533,16 +544,15 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
- i915_gem_stolen_remove_node(dev_priv, stolen);
- kfree(stolen);
+ i915_gem_object_release_memory_region(obj);
- if (obj->mm.region)
- i915_gem_object_release_memory_region(obj);
+ i915_gem_stolen_remove_node(i915, stolen);
+ kfree(stolen);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -552,9 +562,8 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
-__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- struct drm_mm_node *stolen,
- struct intel_memory_region *mem)
+__i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ struct drm_mm_node *stolen)
{
static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
@@ -565,20 +574,19 @@ __i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
if (!obj)
goto err;
- drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
+ drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
err = i915_gem_object_pin_pages(obj);
if (err)
goto cleanup;
- if (mem)
- i915_gem_object_init_memory_region(obj, mem, 0);
+ i915_gem_object_init_memory_region(obj, mem, 0);
return obj;
@@ -593,12 +601,12 @@ _i915_gem_object_create_stolen(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags)
{
- struct drm_i915_private *dev_priv = mem->i915;
+ struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return ERR_PTR(-ENODEV);
if (size == 0)
@@ -608,35 +616,37 @@ _i915_gem_object_create_stolen(struct intel_memory_region *mem,
if (!stolen)
return ERR_PTR(-ENOMEM);
- ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
+ ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
if (ret) {
obj = ERR_PTR(ret);
goto err_free;
}
- obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
+ obj = __i915_gem_object_create_stolen(mem, stolen);
if (IS_ERR(obj))
goto err_remove;
return obj;
err_remove:
- i915_gem_stolen_remove_node(dev_priv, stolen);
+ i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
return obj;
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size)
{
- return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
size, I915_BO_ALLOC_CONTIGUOUS);
}
static int init_stolen(struct intel_memory_region *mem)
{
+ intel_memory_region_set_name(mem, "stolen");
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
@@ -665,18 +675,19 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
resource_size_t gtt_offset,
resource_size_t size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return ERR_PTR(-ENODEV);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
@@ -694,19 +705,19 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
stolen->start = stolen_offset;
stolen->size = size;
- mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_lock(&i915->mm.stolen_lock);
+ ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
+ mutex_unlock(&i915->mm.stolen_lock);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
return ERR_PTR(ret);
}
- obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL);
+ obj = __i915_gem_object_create_stolen(mem, stolen);
if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
- i915_gem_stolen_remove_node(dev_priv, stolen);
+ i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
return obj;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 1fa592d82af5..6c7825a2dc2a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_ioctls.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 4c72d74d6576..580319b7bf1a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -129,9 +129,10 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
spin_unlock(&mn->lock);
ret = i915_gem_object_unbind(obj,
- I915_GEM_OBJECT_UNBIND_ACTIVE);
+ I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_BARRIER);
if (ret == 0)
- ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ ret = __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
if (ret)
return ret;
@@ -402,7 +403,7 @@ struct get_pages_work {
static struct sg_table *
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, int num_pages)
+ struct page **pvec, unsigned long num_pages)
{
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
@@ -448,9 +449,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- const int npages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long npages = obj->base.size >> PAGE_SHIFT;
+ unsigned long pinned;
struct page **pvec;
- int pinned, ret;
+ int ret;
ret = -ENOMEM;
pinned = 0;
@@ -459,31 +461,36 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
+ int locked = 0;
if (!i915_gem_object_is_readonly(obj))
flags |= FOLL_WRITE;
ret = -EFAULT;
if (mmget_not_zero(mm)) {
- down_read(&mm->mmap_sem);
while (pinned < npages) {
+ if (!locked) {
+ down_read(&mm->mmap_sem);
+ locked = 1;
+ }
ret = get_user_pages_remote
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
- pvec + pinned, NULL, NULL);
+ pvec + pinned, NULL, &locked);
if (ret < 0)
break;
pinned += ret;
}
- up_read(&mm->mmap_sem);
+ if (locked)
+ up_read(&mm->mmap_sem);
mmput(mm);
}
}
- mutex_lock(&obj->mm.lock);
+ mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (obj->userptr.work == &work->work) {
struct sg_table *pages = ERR_PTR(ret);
@@ -553,7 +560,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- const int num_pages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
struct page **pvec;
struct sg_table *pages;
@@ -773,15 +780,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- struct i915_address_space *vm;
-
/*
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
- true); /* static vm */
- if (!vm || !vm->has_read_only)
+ if (!dev_priv->gt.vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 892d12db6c49..fa16f2c3f3ac 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -12,10 +12,14 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
unsigned long nreal = obj->scratch / PAGE_SIZE;
- struct scatterlist *sg;
+ struct sgt_iter sgt_iter;
+ struct page *page;
- for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
- __free_page(sg_page(sg));
+ for_each_sgt_page(page, sgt_iter, pages) {
+ __free_page(page);
+ if (!--nreal)
+ break;
+ }
sg_free_table(pages);
kfree(pages);
@@ -70,7 +74,6 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
err:
huge_free_pages(obj, pages);
-
return -ENOMEM;
#undef GFP
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
index 549c1394bcdc..b8cf31b7bf14 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
@@ -7,6 +7,12 @@
#ifndef __HUGE_GEM_OBJECT_H
#define __HUGE_GEM_OBJECT_H
+#include <linux/types.h>
+
+#include "gem/i915_gem_object_types.h"
+
+struct drm_i915_private;
+
struct drm_i915_gem_object *
huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 688c49a24f32..9311250d7d6f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -517,7 +517,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
i915_vma_unpin(vma);
i915_vma_close(vma);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -650,7 +650,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -678,7 +678,7 @@ static void close_object_list(struct list_head *objects,
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -948,7 +948,7 @@ static int igt_mock_ppgtt_64K(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -1017,38 +1017,33 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
-static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
- unsigned long n;
+ unsigned long n = obj->base.size >> PAGE_SHIFT;
+ u32 *ptr;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
- for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
- u32 __iomem *base;
- u32 read_val;
-
- base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
-
- read_val = ioread32(base + dword);
- io_mapping_unmap_atomic(base);
- if (read_val != val) {
- pr_err("n=%lu base[%u]=%u, val=%u\n",
- n, dword, read_val, val);
+ ptr += dword;
+ while (n--) {
+ if (*ptr != val) {
+ pr_err("base[%u]=%08x, val=%08x\n",
+ dword, *ptr, val);
err = -EINVAL;
break;
}
+
+ ptr += PAGE_SIZE / sizeof(*ptr);
}
- i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unpin_map(obj);
return err;
}
@@ -1056,10 +1051,8 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
if (i915_gem_object_has_struct_page(obj))
return __cpu_check_shmem(obj, dword, val);
- else if (i915_gem_object_is_lmem(obj))
- return __cpu_check_lmem(obj, dword, val);
-
- return -ENODEV;
+ else
+ return __cpu_check_vmap(obj, dword, val);
}
static int __igt_write_huge(struct intel_context *ce,
@@ -1110,8 +1103,7 @@ static int __igt_write_huge(struct intel_context *ce,
out_vma_unpin:
i915_vma_unpin(vma);
out_vma_close:
- i915_vma_destroy(vma);
-
+ __i915_vma_put(vma);
return err;
}
@@ -1301,7 +1293,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -1420,7 +1412,7 @@ try_again:
err = i915_gem_object_pin_pages(obj);
if (err) {
- if (err == -ENXIO) {
+ if (err == -ENXIO || err == -E2BIG) {
i915_gem_object_put(obj);
size >>= 1;
goto try_again;
@@ -1442,7 +1434,7 @@ try_again:
}
out_unpin:
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
out_put:
i915_gem_object_put(obj);
@@ -1530,7 +1522,7 @@ static int igt_ppgtt_sanity_check(void *arg)
err = igt_write_huge(ctx, obj);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
if (err) {
@@ -1873,7 +1865,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1912,9 +1904,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
};
- struct drm_file *file;
struct i915_gem_context *ctx;
struct i915_address_space *vm;
+ struct file *file;
int err;
if (!HAS_PPGTT(i915)) {
@@ -1944,6 +1936,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
err = i915_subtests(tests, ctx);
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index da8edee4fe0a..b972be165e85 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -24,6 +24,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
prandom_seed_state(&prng, i915_selftest.random_seed);
+ intel_engine_pm_get(engine);
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
@@ -99,6 +100,7 @@ err_put:
err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_engine_pm_put(engine);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 2b29f6b4e1dd..3f6079e1dfb6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -6,6 +6,7 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
@@ -200,7 +201,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_create(ctx->engine->kernel_context);
+ rq = intel_engine_create_kernel_request(ctx->engine);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -324,8 +325,12 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
ctx.engine = random_engine(i915, &prng);
- GEM_BUG_ON(!ctx.engine);
+ if (!ctx.engine) {
+ err = -ENODEV;
+ goto out_free;
+ }
pr_info("%s: using %s\n", __func__, ctx.engine->name);
+ intel_engine_pm_get(ctx.engine);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
@@ -352,7 +357,7 @@ static int igt_gem_coherency(void *arg)
ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(ctx.obj)) {
err = PTR_ERR(ctx.obj);
- goto free;
+ goto out_pm;
}
i915_random_reorder(offsets, ncachelines, &prng);
@@ -403,13 +408,15 @@ static int igt_gem_coherency(void *arg)
}
}
}
-free:
+out_pm:
+ intel_engine_pm_put(ctx.engine);
+out_free:
kfree(offsets);
return err;
put_object:
i915_gem_object_put(ctx.obj);
- goto free;
+ goto out_pm;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 62fabc023a83..7fc46861a54d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
@@ -26,6 +27,12 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
+static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
+{
+ /* single threaded, private ctx */
+ return rcu_dereference_protected(ctx->vm, true);
+}
+
static int live_nop_switch(void *arg)
{
const unsigned int nctx = 1024;
@@ -33,7 +40,7 @@ static int live_nop_switch(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
struct igt_live_test t;
- struct drm_file *file;
+ struct file *file;
unsigned long n;
int err = -ENODEV;
@@ -67,25 +74,34 @@ static int live_nop_switch(void *arg)
}
for_each_uabi_engine(engine, i915) {
- struct i915_request *rq;
+ struct i915_request *rq = NULL;
unsigned long end_time, prime;
ktime_t times[2] = {};
times[0] = ktime_get_raw();
for (n = 0; n < nctx; n++) {
- rq = igt_request_alloc(ctx[n], engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
+ struct i915_request *this;
+
+ this = igt_request_alloc(ctx[n], engine);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
goto out_file;
}
- i915_request_add(rq);
+ if (rq) {
+ i915_request_await_dma_fence(this, &rq->fence);
+ i915_request_put(rq);
+ }
+ rq = i915_request_get(this);
+ i915_request_add(this);
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt);
+ i915_request_put(rq);
err = -EIO;
goto out_file;
}
+ i915_request_put(rq);
times[1] = ktime_get_raw();
@@ -100,13 +116,21 @@ static int live_nop_switch(void *arg)
for_each_prime_number_from(prime, 2, 8192) {
times[1] = ktime_get_raw();
+ rq = NULL;
for (n = 0; n < prime; n++) {
- rq = igt_request_alloc(ctx[n % nctx], engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
+ struct i915_request *this;
+
+ this = igt_request_alloc(ctx[n % nctx], engine);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
goto out_file;
}
+ if (rq) { /* Force submission order */
+ i915_request_await_dma_fence(this, &rq->fence);
+ i915_request_put(rq);
+ }
+
/*
* This space is left intentionally blank.
*
@@ -121,14 +145,18 @@ static int live_nop_switch(void *arg)
* for latency.
*/
- i915_request_add(rq);
+ rq = i915_request_get(this);
+ i915_request_add(this);
}
+ GEM_BUG_ON(!rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
intel_gt_set_wedged(&i915->gt);
+ i915_request_put(rq);
break;
}
+ i915_request_put(rq);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 2)
@@ -149,7 +177,7 @@ static int live_nop_switch(void *arg)
}
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -255,7 +283,7 @@ static int live_parallel_switch(void *arg)
int (* const *fn)(void *arg);
struct i915_gem_context *ctx;
struct intel_context *ce;
- struct drm_file *file;
+ struct file *file;
int n, m, count;
int err = 0;
@@ -309,7 +337,7 @@ static int live_parallel_switch(void *arg)
if (!data[m].ce[0])
continue;
- ce = intel_context_create(ctx, data[m].ce[0]->engine);
+ ce = intel_context_create(data[m].ce[0]->engine);
if (IS_ERR(ce))
goto out;
@@ -377,7 +405,7 @@ out:
}
kfree(data);
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -502,17 +530,17 @@ out_unmap:
return err;
}
-static int file_add_object(struct drm_file *file,
- struct drm_i915_gem_object *obj)
+static int file_add_object(struct file *file, struct drm_i915_gem_object *obj)
{
int err;
GEM_BUG_ON(obj->base.handle_count);
/* tie the object to the drm_file for easy reaping */
- err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
+ err = idr_alloc(&to_drm_file(file)->object_idr,
+ &obj->base, 1, 0, GFP_KERNEL);
if (err < 0)
- return err;
+ return err;
i915_gem_object_get(obj);
obj->base.handle_count++;
@@ -521,7 +549,7 @@ static int file_add_object(struct drm_file *file,
static struct drm_i915_gem_object *
create_test_object(struct i915_address_space *vm,
- struct drm_file *file,
+ struct file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
@@ -621,9 +649,9 @@ static int igt_ctx_exec(void *arg)
unsigned long ncontexts, ndwords, dw;
struct i915_request *tq[5] = {};
struct igt_live_test t;
- struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
+ struct file *file;
if (!intel_engine_can_store_dword(engine))
continue;
@@ -716,7 +744,7 @@ out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mock_file_free(i915, file);
+ fput(file);
if (err)
return err;
@@ -733,7 +761,7 @@ static int igt_shared_ctx_exec(void *arg)
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
struct igt_live_test t;
- struct drm_file *file;
+ struct file *file;
int err = 0;
/*
@@ -786,14 +814,15 @@ static int igt_shared_ctx_exec(void *arg)
}
mutex_lock(&ctx->mutex);
- __assign_ppgtt(ctx, parent->vm);
+ __assign_ppgtt(ctx, ctx_vm(parent));
mutex_unlock(&ctx->mutex);
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
- obj = create_test_object(parent->vm, file, &objects);
+ obj = create_test_object(ctx_vm(parent),
+ file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
intel_context_put(ce);
@@ -854,7 +883,7 @@ out_test:
if (igt_live_test_end(&t))
err = -EIO;
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -1140,8 +1169,7 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = intel_gt_wait_for_idle(ce->engine->gt,
- MAX_SCHEDULE_TIMEOUT);
+ ret = igt_flush_test(ce->engine->i915);
if (ret)
return ret;
@@ -1163,9 +1191,11 @@ __sseu_test(const char *name,
struct igt_spinner *spin = NULL;
int ret;
+ intel_engine_pm_get(ce->engine);
+
ret = __sseu_prepare(name, flags, ce, &spin);
if (ret)
- return ret;
+ goto out_pm;
ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
@@ -1180,6 +1210,8 @@ out_spin:
igt_spinner_fini(spin);
kfree(spin);
}
+out_pm:
+ intel_engine_pm_put(ce->engine);
return ret;
}
@@ -1232,8 +1264,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
hweight32(engine->sseu.slice_mask),
hweight32(pg_sseu.slice_mask));
- ce = intel_context_create(engine->kernel_context->gem_context,
- engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_put;
@@ -1311,16 +1342,18 @@ static int igt_ctx_sseu(void *arg)
static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
+ unsigned long idx, ndwords, dw, num_engines;
struct drm_i915_gem_object *obj = NULL;
struct i915_request *tq[5] = {};
+ struct i915_gem_engines_iter it;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
- unsigned long idx, ndwords, dw;
+ struct intel_context *ce;
struct igt_live_test t;
- struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
+ struct file *file;
int err = -ENODEV;
/*
@@ -1343,21 +1376,21 @@ static int igt_ctx_readonly(void *arg)
goto out_file;
}
- rcu_read_lock();
- vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
+ vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
- rcu_read_unlock();
err = 0;
goto out_file;
}
- rcu_read_unlock();
+
+ num_engines = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ if (intel_engine_can_store_dword(ce->engine))
+ num_engines++;
+ i915_gem_context_unlock_engines(ctx);
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
@@ -1380,7 +1413,7 @@ static int igt_ctx_readonly(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
ce->engine->name,
- yesno(!!rcu_access_pointer(ctx->vm)),
+ yesno(!!ctx_vm(ctx)),
err);
i915_gem_context_unlock_engines(ctx);
goto out_file;
@@ -1400,8 +1433,8 @@ static int igt_ctx_readonly(void *arg)
}
i915_gem_context_unlock_engines(ctx);
}
- pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_engines);
+ pr_info("Submitted %lu dwords (across %lu engines)\n",
+ ndwords, num_engines);
dw = 0;
idx = 0;
@@ -1426,7 +1459,7 @@ out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -1466,7 +1499,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto out;
}
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
@@ -1488,12 +1521,12 @@ static int write_to_scratch(struct i915_gem_context *ctx,
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_vm;
+ goto out_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err_vm;
+ goto out_vm;
err = check_scratch(vm, offset);
if (err)
@@ -1517,22 +1550,20 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
- i915_vma_unpin_and_release(&vma, 0);
+ i915_vma_unpin(vma);
i915_request_add(rq);
- i915_vm_put(vm);
- return 0;
-
+ goto out_vm;
skip_request:
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
-err_vm:
+out_vm:
i915_vm_put(vm);
-err:
+out:
i915_gem_object_put(obj);
return err;
}
@@ -1560,7 +1591,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto out;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
@@ -1592,12 +1623,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_vm;
+ goto out_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err_vm;
+ goto out_vm;
err = check_scratch(vm, offset);
if (err)
@@ -1630,29 +1661,27 @@ static int read_from_scratch(struct i915_gem_context *ctx,
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
- goto err_vm;
+ goto out_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err_vm;
+ goto out_vm;
}
*value = cmd[result / sizeof(*cmd)];
i915_gem_object_unpin_map(obj);
- i915_gem_object_put(obj);
-
- return 0;
+ goto out_vm;
skip_request:
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
-err_vm:
+out_vm:
i915_vm_put(vm);
-err:
+out:
i915_gem_object_put(obj);
return err;
}
@@ -1661,11 +1690,11 @@ static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
+ unsigned long num_engines, count;
struct intel_engine_cs *engine;
struct igt_live_test t;
- struct drm_file *file;
I915_RND_STATE(prng);
- unsigned long count;
+ struct file *file;
u64 vm_total;
int err;
@@ -1698,14 +1727,15 @@ static int igt_vm_isolation(void *arg)
}
/* We can only test vm isolation, if the vm are distinct */
- if (ctx_a->vm == ctx_b->vm)
+ if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
goto out_file;
- vm_total = ctx_a->vm->total;
- GEM_BUG_ON(ctx_b->vm->total != vm_total);
+ vm_total = ctx_vm(ctx_a)->total;
+ GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
+ num_engines = 0;
for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
unsigned long this = 0;
@@ -1743,14 +1773,15 @@ static int igt_vm_isolation(void *arg)
this++;
}
count += this;
+ num_engines++;
}
- pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_engines);
+ pr_info("Checked %lu scratch offsets across %lu engines\n",
+ count, num_engines);
out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mock_file_free(i915, file);
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index d85d1ce273ca..2a52b92586b9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -254,106 +254,6 @@ err_obj:
return err;
}
-static int igt_dmabuf_export_kmap(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct dma_buf *dmabuf;
- void *ptr;
- int err;
-
- obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- dmabuf = i915_gem_prime_export(&obj->base, 0);
- i915_gem_object_put(obj);
- if (IS_ERR(dmabuf)) {
- err = PTR_ERR(dmabuf);
- pr_err("i915_gem_prime_export failed with err=%d\n", err);
- return err;
- }
-
- ptr = dma_buf_kmap(dmabuf, 0);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
-
- if (memchr_inv(ptr, 0, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 0, ptr);
- pr_err("Exported page[0] not initialiased to zero!\n");
- err = -EINVAL;
- goto err;
- }
-
- memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_kunmap(dmabuf, 0, ptr);
-
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
- goto err;
- }
- memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- ptr = dma_buf_kmap(dmabuf, 1);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
-
- if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 1, ptr);
- pr_err("Exported page[1] not set to 0xaa!\n");
- err = -EINVAL;
- goto err;
- }
-
- memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_kunmap(dmabuf, 1, ptr);
-
- ptr = dma_buf_kmap(dmabuf, 0);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
- if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 0, ptr);
- pr_err("Exported page[0] did not retain 0xc5!\n");
- err = -EINVAL;
- goto err;
- }
- dma_buf_kunmap(dmabuf, 0, ptr);
-
- ptr = dma_buf_kmap(dmabuf, 2);
- if (ptr) {
- pr_err("Erroneously kmapped beyond the end of the object!\n");
- dma_buf_kunmap(dmabuf, 2, ptr);
- err = -EINVAL;
- goto err;
- }
-
- ptr = dma_buf_kmap(dmabuf, -1);
- if (ptr) {
- pr_err("Erroneously kmapped before the start of the object!\n");
- dma_buf_kunmap(dmabuf, -1, ptr);
- err = -EINVAL;
- goto err;
- }
-
- err = 0;
-err:
- dma_buf_put(dmabuf);
- return err;
-}
-
int i915_gem_dmabuf_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -362,7 +262,6 @@ int i915_gem_dmabuf_mock_selftests(void)
SUBTEST(igt_dmabuf_import),
SUBTEST(igt_dmabuf_import_ownership),
SUBTEST(igt_dmabuf_export_vmap),
- SUBTEST(igt_dmabuf_export_kmap),
};
struct drm_i915_private *i915;
int err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 29b2077b73d2..ef7c74cff28a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,12 +6,15 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gem/i915_gem_region.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
+#include "selftests/igt_mmap.h"
struct tile {
unsigned int width;
@@ -161,7 +164,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p);
out:
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
return err;
}
@@ -255,7 +258,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (err)
return err;
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
if (igt_timeout(end_time,
"%s: timed out after tiling=%d stride=%d\n",
@@ -535,7 +538,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -563,16 +566,16 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
int expected)
{
struct drm_i915_gem_object *obj;
- int err;
+ struct i915_mmap_offset *mmo;
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = create_mmap_offset(obj);
+ mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj);
- return err == expected;
+ return PTR_ERR_OR_ZERO(mmo) == expected;
}
static void disable_retire_worker(struct drm_i915_private *i915)
@@ -606,28 +609,50 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
struct drm_i915_gem_object *obj;
- struct drm_mm_node resv, *hole;
- u64 hole_start, hole_end;
- int loop, err;
+ struct drm_mm_node *hole, *next;
+ struct i915_mmap_offset *mmo;
+ int loop, err = 0;
/* Disable background reaper */
disable_retire_worker(i915);
GEM_BUG_ON(!i915->gt.awake);
+ intel_gt_retire_requests(&i915->gt);
+ i915_gem_drain_freed_objects(i915);
/* Trim the device mmap space to only a page */
- memset(&resv, 0, sizeof(resv));
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- resv.start = hole_start;
- resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
- mmap_offset_lock(i915);
- err = drm_mm_reserve_node(mm, &resv);
- mmap_offset_unlock(i915);
+ mmap_offset_lock(i915);
+ loop = 1; /* PAGE_SIZE units */
+ list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
+ struct drm_mm_node *resv;
+
+ resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
+ if (!resv) {
+ err = -ENOMEM;
+ goto out_park;
+ }
+
+ resv->start = drm_mm_hole_node_start(hole) + loop;
+ resv->size = hole->hole_size - loop;
+ resv->color = -1ul;
+ loop = 0;
+
+ if (!resv->size) {
+ kfree(resv);
+ continue;
+ }
+
+ pr_debug("Reserving hole [%llx + %llx]\n",
+ resv->start, resv->size);
+
+ err = drm_mm_reserve_node(mm, resv);
if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
+ kfree(resv);
goto out_park;
}
- break;
}
+ GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
+ mmap_offset_unlock(i915);
/* Just fits! */
if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
@@ -650,9 +675,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- err = create_mmap_offset(obj);
- if (err) {
+ mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+ if (IS_ERR(mmo)) {
pr_err("Unable to insert object into reclaimed hole\n");
+ err = PTR_ERR(mmo);
goto err_obj;
}
@@ -684,9 +710,15 @@ static int igt_mmap_offset_exhaustion(void *arg)
out:
mmap_offset_lock(i915);
- drm_mm_remove_node(&resv);
- mmap_offset_unlock(i915);
out_park:
+ drm_mm_for_each_node_safe(hole, next, mm) {
+ if (hole->color != -1ul)
+ continue;
+
+ drm_mm_remove_node(hole);
+ kfree(hole);
+ }
+ mmap_offset_unlock(i915);
restore_retire_worker(i915);
return err;
err_obj:
@@ -694,12 +726,515 @@ err_obj:
goto out;
}
+static int gtt_set(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ void __iomem *map;
+ int err = 0;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ intel_gt_pm_get(vma->vm->gt);
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ memset_io(map, POISON_INUSE, obj->base.size);
+ i915_vma_unpin_iomap(vma);
+
+out:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
+}
+
+static int gtt_check(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ void __iomem *map;
+ int err = 0;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ intel_gt_pm_get(vma->vm->gt);
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
+ pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ }
+ i915_vma_unpin_iomap(vma);
+
+out:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
+}
+
+static int wc_set(struct drm_i915_gem_object *obj)
+{
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ memset(vaddr, POISON_INUSE, obj->base.size);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
+
+static int wc_check(struct drm_i915_gem_object *obj)
+{
+ void *vaddr;
+ int err = 0;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
+ pr_err("%s: Write via mmap did not land in backing store (WC)\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ }
+ i915_gem_object_unpin_map(obj);
+
+ return err;
+}
+
+static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
+{
+ if (type == I915_MMAP_TYPE_GTT &&
+ !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
+ return false;
+
+ if (type != I915_MMAP_TYPE_GTT &&
+ !i915_gem_object_type_has(obj,
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_HAS_IOMEM))
+ return false;
+
+ return true;
+}
+
+#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
+static int __igt_mmap(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct i915_mmap_offset *mmo;
+ struct vm_area_struct *area;
+ unsigned long addr;
+ int err, i;
+
+ if (!can_mmap(obj, type))
+ return 0;
+
+ err = wc_set(obj);
+ if (err == -ENXIO)
+ err = gtt_set(obj);
+ if (err)
+ return err;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
+
+ area = find_vma(current->mm, addr);
+ if (!area) {
+ pr_err("%s: Did not create a vm_area_struct for the mmap\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+ if (area->vm_private_data != mmo) {
+ pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+ for (i = 0; i < obj->base.size / sizeof(u32); i++) {
+ u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
+ u32 x;
+
+ if (get_user(x, ux)) {
+ pr_err("%s: Unable to read from mmap, offset:%zd\n",
+ obj->mm.region->name, i * sizeof(x));
+ err = -EFAULT;
+ goto out_unmap;
+ }
+
+ if (x != expand32(POISON_INUSE)) {
+ pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
+ obj->mm.region->name,
+ i * sizeof(x), x, expand32(POISON_INUSE));
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+ x = expand32(POISON_FREE);
+ if (put_user(x, ux)) {
+ pr_err("%s: Unable to write to mmap, offset:%zd\n",
+ obj->mm.region->name, i * sizeof(x));
+ err = -EFAULT;
+ goto out_unmap;
+ }
+ }
+
+ if (type == I915_MMAP_TYPE_GTT)
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ err = wc_check(obj);
+ if (err == -ENXIO)
+ err = gtt_check(obj);
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
+
+static int igt_mmap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ unsigned long sizes[] = {
+ PAGE_SIZE,
+ mr->min_page_size,
+ SZ_4M,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, sizes[i], 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int __igt_mmap_gpu(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct intel_engine_cs *engine;
+ struct i915_mmap_offset *mmo;
+ unsigned long addr;
+ u32 __user *ux;
+ u32 bbe;
+ int err;
+
+ /*
+ * Verify that the mmap access into the backing store aligns with
+ * that of the GPU, i.e. that mmap is indeed writing into the same
+ * page as being read by the GPU.
+ */
+
+ if (!can_mmap(obj, type))
+ return 0;
+
+ err = wc_set(obj);
+ if (err == -ENXIO)
+ err = gtt_set(obj);
+ if (err)
+ return err;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ ux = u64_to_user_ptr((u64)addr);
+ bbe = MI_BATCH_BUFFER_END;
+ if (put_user(bbe, ux)) {
+ pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
+ err = -EFAULT;
+ goto out_unmap;
+ }
+
+ if (type == I915_MMAP_TYPE_GTT)
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unmap;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_unmap;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s(%s, %s): Failed to execute batch\n",
+ __func__, engine->name, obj->mm.region->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out_unpin:
+ i915_vma_unpin(vma);
+ if (err)
+ goto out_unmap;
+ }
+
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
+
+static int igt_mmap_gpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
+{
+ if (!pte_present(*pte) || pte_none(*pte)) {
+ pr_err("missing PTE:%lx\n",
+ (addr - (unsigned long)data) >> PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
+{
+ if (pte_present(*pte) && !pte_none(*pte)) {
+ pr_err("present PTE:%lx; expected to be revoked\n",
+ (addr - (unsigned long)data) >> PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_present(unsigned long addr, unsigned long len)
+{
+ return apply_to_page_range(current->mm, addr, len,
+ check_present_pte, (void *)addr);
+}
+
+static int check_absent(unsigned long addr, unsigned long len)
+{
+ return apply_to_page_range(current->mm, addr, len,
+ check_absent_pte, (void *)addr);
+}
+
+static int prefault_range(u64 start, u64 len)
+{
+ const char __user *addr, *end;
+ char __maybe_unused c;
+ int err;
+
+ addr = u64_to_user_ptr(start);
+ end = addr + len;
+
+ for (; addr < end; addr += PAGE_SIZE) {
+ err = __get_user(c, addr);
+ if (err)
+ return err;
+ }
+
+ return __get_user(c, end - 1);
+}
+
+static int __igt_mmap_revoke(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct i915_mmap_offset *mmo;
+ unsigned long addr;
+ int err;
+
+ if (!can_mmap(obj, type))
+ return 0;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ err = prefault_range(addr, obj->base.size);
+ if (err)
+ goto out_unmap;
+
+ GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
+ !atomic_read(&obj->bind_count));
+
+ err = check_present(addr, obj->base.size);
+ if (err) {
+ pr_err("%s: was not present\n", obj->mm.region->name);
+ goto out_unmap;
+ }
+
+ /*
+ * After unbinding the object from the GGTT, its address may be reused
+ * for other objects. Ergo we have to revoke the previous mmap PTE
+ * access as it no longer points to the same object.
+ */
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (err) {
+ pr_err("Failed to unbind object!\n");
+ goto out_unmap;
+ }
+ GEM_BUG_ON(atomic_read(&obj->bind_count));
+
+ if (type != I915_MMAP_TYPE_GTT) {
+ __i915_gem_object_put_pages(obj);
+ if (i915_gem_object_has_pages(obj)) {
+ pr_err("Failed to put-pages object!\n");
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
+ err = check_absent(addr, obj->base.size);
+ if (err) {
+ pr_err("%s: was not absent\n", obj->mm.region->name);
+ goto out_unmap;
+ }
+
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
+
+static int igt_mmap_revoke(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
+ SUBTEST(igt_mmap),
+ SUBTEST(igt_mmap_revoke),
+ SUBTEST(igt_mmap_gpu),
};
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index e8132aca0bb6..62077fe46715 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -41,6 +41,7 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj)
if (!engine)
return 0;
+ intel_engine_pm_get(engine);
for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
struct intel_context *ce = engine->kernel_context;
ktime_t t0, t1;
@@ -49,17 +50,20 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj)
err = i915_gem_object_fill_blt(obj, ce, 0);
if (err)
- return err;
+ break;
err = i915_gem_object_wait(obj,
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT);
if (err)
- return err;
+ break;
t1 = ktime_get();
t[pass] = ktime_sub(t1, t0);
}
+ intel_engine_pm_put(engine);
+ if (err)
+ return err;
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
@@ -109,6 +113,7 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src,
struct intel_engine_cs *engine;
ktime_t t[5];
int pass;
+ int err = 0;
engine = intel_engine_lookup_user(i915,
I915_ENGINE_CLASS_COPY,
@@ -116,26 +121,29 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src,
if (!engine)
return 0;
+ intel_engine_pm_get(engine);
for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
struct intel_context *ce = engine->kernel_context;
ktime_t t0, t1;
- int err;
t0 = ktime_get();
err = i915_gem_object_copy_blt(src, dst, ce);
if (err)
- return err;
+ break;
err = i915_gem_object_wait(dst,
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT);
if (err)
- return err;
+ break;
t1 = ktime_get();
t[pass] = ktime_sub(t1, t0);
}
+ intel_engine_pm_put(engine);
+ if (err)
+ return err;
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
@@ -186,6 +194,8 @@ err_src:
struct igt_thread_arg {
struct drm_i915_private *i915;
+ struct i915_gem_context *ctx;
+ struct file *file;
struct rnd_state prng;
unsigned int n_cpus;
};
@@ -198,24 +208,20 @@ static int igt_fill_blt_thread(void *arg)
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_context *ce;
- struct drm_file *file;
unsigned int prio;
IGT_TIMEOUT(end);
int err;
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ ctx = thread->ctx;
+ if (!ctx) {
+ ctx = live_context(i915, thread->file);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
}
- prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
-
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
@@ -300,8 +306,6 @@ err_flush:
err = 0;
intel_context_put(ce);
-out_file:
- mock_file_free(i915, file);
return err;
}
@@ -313,24 +317,20 @@ static int igt_copy_blt_thread(void *arg)
struct drm_i915_gem_object *src, *dst;
struct i915_gem_context *ctx;
struct intel_context *ce;
- struct drm_file *file;
unsigned int prio;
IGT_TIMEOUT(end);
int err;
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ ctx = thread->ctx;
+ if (!ctx) {
+ ctx = live_context(i915, thread->file);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
}
- prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
-
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
@@ -431,19 +431,18 @@ err_flush:
err = 0;
intel_context_put(ce);
-out_file:
- mock_file_free(i915, file);
return err;
}
static int igt_threaded_blt(struct drm_i915_private *i915,
- int (*blt_fn)(void *arg))
+ int (*blt_fn)(void *arg),
+ unsigned int flags)
+#define SINGLE_CTX BIT(0)
{
struct igt_thread_arg *thread;
struct task_struct **tsk;
+ unsigned int n_cpus, i;
I915_RND_STATE(prng);
- unsigned int n_cpus;
- unsigned int i;
int err = 0;
n_cpus = num_online_cpus() + 1;
@@ -453,13 +452,27 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
return 0;
thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL);
- if (!thread) {
- kfree(tsk);
- return 0;
+ if (!thread)
+ goto out_tsk;
+
+ thread[0].file = mock_file(i915);
+ if (IS_ERR(thread[0].file)) {
+ err = PTR_ERR(thread[0].file);
+ goto out_thread;
+ }
+
+ if (flags & SINGLE_CTX) {
+ thread[0].ctx = live_context(i915, thread[0].file);
+ if (IS_ERR(thread[0].ctx)) {
+ err = PTR_ERR(thread[0].ctx);
+ goto out_file;
+ }
}
for (i = 0; i < n_cpus; ++i) {
thread[i].i915 = i915;
+ thread[i].file = thread[0].file;
+ thread[i].ctx = thread[0].ctx;
thread[i].n_cpus = n_cpus;
thread[i].prng =
I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
@@ -488,29 +501,42 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
put_task_struct(tsk[i]);
}
- kfree(tsk);
+out_file:
+ fput(thread[0].file);
+out_thread:
kfree(thread);
-
+out_tsk:
+ kfree(tsk);
return err;
}
static int igt_fill_blt(void *arg)
{
- return igt_threaded_blt(arg, igt_fill_blt_thread);
+ return igt_threaded_blt(arg, igt_fill_blt_thread, 0);
+}
+
+static int igt_fill_blt_ctx0(void *arg)
+{
+ return igt_threaded_blt(arg, igt_fill_blt_thread, SINGLE_CTX);
}
static int igt_copy_blt(void *arg)
{
- return igt_threaded_blt(arg, igt_copy_blt_thread);
+ return igt_threaded_blt(arg, igt_copy_blt_thread, 0);
+}
+
+static int igt_copy_blt_ctx0(void *arg)
+{
+ return igt_threaded_blt(arg, igt_copy_blt_thread, SINGLE_CTX);
}
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
- SUBTEST(perf_fill_blt),
- SUBTEST(perf_copy_blt),
SUBTEST(igt_fill_blt),
+ SUBTEST(igt_fill_blt_ctx0),
SUBTEST(igt_copy_blt),
+ SUBTEST(igt_copy_blt_ctx0),
};
if (intel_gt_is_wedged(&i915->gt))
@@ -521,3 +547,16 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
return i915_live_subtests(tests, i915);
}
+
+int i915_gem_object_blt_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_fill_blt),
+ SUBTEST(perf_copy_blt),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 29b8984f0e47..384143aa7776 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -5,6 +5,7 @@
*/
#include "mock_context.h"
+#include "selftests/mock_drm.h"
#include "selftests/mock_gtt.h"
struct i915_gem_context *
@@ -36,9 +37,7 @@ mock_context(struct drm_i915_private *i915,
if (name) {
struct i915_ppgtt *ppgtt;
- ctx->name = kstrdup(name, GFP_KERNEL);
- if (!ctx->name)
- goto err_put;
+ strncpy(ctx->name, name, sizeof(ctx->name));
ppgtt = mock_ppgtt(i915, name);
if (!ppgtt)
@@ -74,16 +73,17 @@ void mock_init_contexts(struct drm_i915_private *i915)
}
struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file)
+live_context(struct drm_i915_private *i915, struct file *file)
{
struct i915_gem_context *ctx;
int err;
+ u32 id;
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
- err = gem_context_register(ctx, file->driver_priv);
+ err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
if (err < 0)
goto err_ctx;
@@ -97,7 +97,16 @@ err_ctx:
struct i915_gem_context *
kernel_context(struct drm_i915_private *i915)
{
- return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
+ struct i915_gem_context *ctx;
+
+ ctx = i915_gem_create_context(i915, 0);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ i915_gem_context_clear_bannable(ctx);
+ i915_gem_context_set_persistence(ctx);
+
+ return ctx;
}
void kernel_context_close(struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
index 0b926653914f..fb83d2f09212 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -7,6 +7,9 @@
#ifndef __MOCK_CONTEXT_H
#define __MOCK_CONTEXT_H
+struct file;
+struct drm_i915_private;
+
void mock_init_contexts(struct drm_i915_private *i915);
struct i915_gem_context *
@@ -16,7 +19,7 @@ mock_context(struct drm_i915_private *i915,
void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file);
+live_context(struct drm_i915_private *i915, struct file *file);
struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
void kernel_context_close(struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index b9e059d4328a..9272bef57092 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -76,20 +76,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
vm_unmap_ram(vaddr, mock->npages);
}
-static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kmap(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kunmap(mock->pages[page_num]);
-}
-
static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
return -ENODEV;
@@ -99,8 +85,6 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.map_dma_buf = mock_map_dma_buf,
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
- .map = mock_dmabuf_kmap,
- .unmap = mock_dmabuf_kunmap,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
index f0f8bbd82dfc..22818bbb139d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
@@ -14,7 +14,7 @@ struct mock_dmabuf {
struct page *pages[];
};
-static struct mock_dmabuf *to_mock(struct dma_buf *buf)
+static inline struct mock_dmabuf *to_mock(struct dma_buf *buf)
{
return buf->priv;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
index 370360b4a148..688511afa883 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
@@ -7,6 +7,8 @@
#ifndef __MOCK_GEM_OBJECT_H__
#define __MOCK_GEM_OBJECT_H__
+#include "gem/i915_gem_object_types.h"
+
struct mock_object {
struct drm_i915_gem_object base;
};
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
deleted file mode 100644
index 7e73aa587967..000000000000
--- a/drivers/gpu/drm/i915/gt/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c
new file mode 100644
index 000000000000..6a5e9ab20b94
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "debugfs_engines.h"
+#include "debugfs_gt.h"
+#include "i915_drv.h" /* for_each_engine! */
+#include "intel_engine.h"
+
+static int engines_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct drm_printer p;
+
+ p = drm_seq_file_printer(m);
+ for_each_engine(engine, gt, id)
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(engines);
+
+void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "engines", &engines_fops },
+ };
+
+ debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.h b/drivers/gpu/drm/i915/gt/debugfs_engines.h
new file mode 100644
index 000000000000..f69257eaa1cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_ENGINES_H
+#define DEBUGFS_ENGINES_H
+
+struct intel_gt;
+struct dentry;
+
+void debugfs_engines_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* DEBUGFS_ENGINES_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
new file mode 100644
index 000000000000..75255aaacaed
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "debugfs_engines.h"
+#include "debugfs_gt.h"
+#include "debugfs_gt_pm.h"
+#include "i915_drv.h"
+
+void debugfs_gt_register(struct intel_gt *gt)
+{
+ struct dentry *root;
+
+ if (!gt->i915->drm.primary->debugfs_root)
+ return;
+
+ root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
+ if (IS_ERR(root))
+ return;
+
+ debugfs_engines_register(gt, root);
+ debugfs_gt_pm_register(gt, root);
+}
+
+void debugfs_gt_register_files(struct intel_gt *gt,
+ struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count)
+{
+ while (count--) {
+ if (!files->eval || files->eval(gt))
+ debugfs_create_file(files->name,
+ 0444, root, gt,
+ files->fops);
+
+ files++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h
new file mode 100644
index 000000000000..4ea0f06cda8f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GT_H
+#define DEBUGFS_GT_H
+
+#include <linux/file.h>
+
+struct intel_gt;
+
+#define DEFINE_GT_DEBUGFS_ATTRIBUTE(__name) \
+ static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+void debugfs_gt_register(struct intel_gt *gt);
+
+struct debugfs_gt_file {
+ const char *name;
+ const struct file_operations *fops;
+ bool (*eval)(const struct intel_gt *gt);
+};
+
+void debugfs_gt_register_files(struct intel_gt *gt,
+ struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count);
+
+#endif /* DEBUGFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
new file mode 100644
index 000000000000..059c9e5c002e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/seq_file.h>
+
+#include "debugfs_gt.h"
+#include "debugfs_gt_pm.h"
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+#include "intel_runtime_pm.h"
+#include "intel_sideband.h"
+#include "intel_uncore.h"
+
+static int fw_domains_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_uncore_forcewake_domain *fw_domain;
+ unsigned int tmp;
+
+ seq_printf(m, "user.bypass_count = %u\n",
+ uncore->user_forcewake_count);
+
+ for_each_fw_domain(fw_domain, uncore, tmp)
+ seq_printf(m, "%s.wake_count = %u\n",
+ intel_uncore_forcewake_domain_to_str(fw_domain->id),
+ READ_ONCE(fw_domain->wake_count));
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains);
+
+static void print_rc6_res(struct seq_file *m,
+ const char *title,
+ const i915_reg_t reg)
+{
+ struct intel_gt *gt = m->private;
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ seq_printf(m, "%s %u (%llu us)\n", title,
+ intel_uncore_read(gt->uncore, reg),
+ intel_rc6_residency_us(&gt->rc6, reg));
+}
+
+static int vlv_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rcctl1, pw_status;
+
+ pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
+ rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Render Power Well: %s\n",
+ (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+ print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
+ print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
+
+ return fw_domains_show(m, NULL);
+}
+
+static int gen6_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 gt_core_status, rcctl1, rc6vids = 0;
+ u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
+
+ gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
+
+ rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ if (INTEL_GEN(i915) >= 9) {
+ gen9_powergate_enable =
+ intel_uncore_read(uncore, GEN9_PG_ENABLE);
+ gen9_powergate_status =
+ intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
+ }
+
+ if (INTEL_GEN(i915) <= 7)
+ sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
+
+ seq_printf(m, "RC1e Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ if (INTEL_GEN(i915) >= 9) {
+ seq_printf(m, "Render Well Gating Enabled: %s\n",
+ yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+ seq_printf(m, "Media Well Gating Enabled: %s\n",
+ yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+ }
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ seq_puts(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ seq_puts(m, "Core Power Down\n");
+ else
+ seq_puts(m, "on\n");
+ break;
+ case GEN6_RC3:
+ seq_puts(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ seq_puts(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ seq_puts(m, "RC7\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "Core Power Down: %s\n",
+ yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ if (INTEL_GEN(i915) >= 9) {
+ seq_printf(m, "Render Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ }
+
+ /* Not exactly sure what this is */
+ print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
+ GEN6_GT_GFX_RC6_LOCKED);
+ print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
+ print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
+ print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
+
+ if (INTEL_GEN(i915) <= 7) {
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+ }
+
+ return fw_domains_show(m, NULL);
+}
+
+static int ilk_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rgvmodectl, rstdbyctl;
+ u16 crstandvid;
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+ rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
+ crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
+
+ seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
+ seq_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno(rgvmodectl & MEMMODE_SWMODE_EN));
+ seq_printf(m, "Gated voltage change: %s\n",
+ yesno(rgvmodectl & MEMMODE_RCLK_GATE));
+ seq_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ seq_printf(m, "Max P-state: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
+ yesno(!(rstdbyctl & RCX_SW_EXIT)));
+ seq_puts(m, "Current RS state: ");
+ switch (rstdbyctl & RSX_STATUS_MASK) {
+ case RSX_STATUS_ON:
+ seq_puts(m, "on\n");
+ break;
+ case RSX_STATUS_RC1:
+ seq_puts(m, "RC1\n");
+ break;
+ case RSX_STATUS_RC1E:
+ seq_puts(m, "RC1E\n");
+ break;
+ case RSX_STATUS_RS1:
+ seq_puts(m, "RS1\n");
+ break;
+ case RSX_STATUS_RS2:
+ seq_puts(m, "RS2 (RC6)\n");
+ break;
+ case RSX_STATUS_RS3:
+ seq_puts(m, "RC3 (RC6+)\n");
+ break;
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int drpc_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ intel_wakeref_t wakeref;
+ int err = -ENODEV;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_drpc(m);
+ else if (INTEL_GEN(i915) >= 6)
+ err = gen6_drpc(m);
+ else
+ err = ilk_drpc(m);
+ }
+
+ return err;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc);
+
+static int frequency_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ if (IS_GEN(i915, 5)) {
+ u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ u32 rpmodectl, freq_sts;
+
+ rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+
+ vlv_punit_get(i915);
+ freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(i915);
+
+ seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+ seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
+
+ seq_printf(m, "actual GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
+
+ seq_printf(m, "current GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->cur_freq));
+
+ seq_printf(m, "max GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+
+ seq_printf(m, "min GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->min_freq));
+
+ seq_printf(m, "idle GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->idle_freq));
+
+ seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(rps, rps->efficient_freq));
+ } else if (INTEL_GEN(i915) >= 6) {
+ u32 rp_state_limits;
+ u32 gt_perf_status;
+ u32 rp_state_cap;
+ u32 rpmodectl, rpinclimit, rpdeclimit;
+ u32 rpstat, cagf, reqf;
+ u32 rpupei, rpcurup, rpprevup;
+ u32 rpdownei, rpcurdown, rpprevdown;
+ u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
+ int max_freq;
+
+ rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
+ if (IS_GEN9_LP(i915)) {
+ rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
+ } else {
+ rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
+ }
+
+ /* RPSTAT1 is in the GT power well */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+ if (INTEL_GEN(i915) >= 9) {
+ reqf >>= 23;
+ } else {
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ }
+ reqf = intel_gpu_freq(rps, reqf);
+
+ rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+ rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
+ rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+ cagf = intel_rps_read_actual_frequency(rps);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ if (INTEL_GEN(i915) >= 11) {
+ pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (INTEL_GEN(i915) >= 8) {
+ pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
+ pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
+ pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
+ pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
+ } else {
+ pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+ pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+ pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
+ pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
+ }
+ pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (INTEL_GEN(i915) <= 10)
+ seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
+ seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
+ rps->pm_intrmsk_mbz);
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+ seq_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ seq_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+ seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
+ seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+ seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+ seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
+ seq_printf(m, "CAGF: %dMHz\n", cagf);
+ seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
+ rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
+ seq_printf(m, "RP CUR UP: %d (%dus)\n",
+ rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dus)\n",
+ rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
+ seq_printf(m, "Up threshold: %d%%\n",
+ rps->power.up_threshold);
+
+ seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
+ rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
+ rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
+ rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
+ seq_printf(m, "Down threshold: %d%%\n",
+ rps->power.down_threshold);
+
+ max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
+ rp_state_cap >> 16) & 0xff;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+
+ max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
+ rp_state_cap >> 0) & 0xff;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+ seq_printf(m, "Max overclocked frequency: %dMHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+
+ seq_printf(m, "Current freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->cur_freq));
+ seq_printf(m, "Actual freq: %d MHz\n", cagf);
+ seq_printf(m, "Idle freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->idle_freq));
+ seq_printf(m, "Min freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->min_freq));
+ seq_printf(m, "Boost freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->boost_freq));
+ seq_printf(m, "Max freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+ seq_printf(m,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(rps, rps->efficient_freq));
+ } else {
+ seq_puts(m, "no P-state info available\n");
+ }
+
+ seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
+ seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+ seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency);
+
+static int llc_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ const bool edram = INTEL_GEN(i915) > 8;
+ struct intel_rps *rps = &gt->rps;
+ unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
+ int gpu_freq, ia_freq;
+
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915)));
+ seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
+ i915->edram_size_mb);
+
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+ ia_freq = gpu_freq;
+ sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq, NULL);
+ seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+ intel_gpu_freq(rps,
+ (gpu_freq *
+ (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ?
+ GEN9_FREQ_SCALER : 1))),
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
+ }
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ return 0;
+}
+
+static bool llc_eval(const struct intel_gt *gt)
+{
+ return HAS_LLC(gt->i915);
+}
+
+DEFINE_GT_DEBUGFS_ATTRIBUTE(llc);
+
+static const char *rps_power_to_str(unsigned int power)
+{
+ static const char * const strings[] = {
+ [LOW_POWER] = "low power",
+ [BETWEEN] = "mixed",
+ [HIGH_POWER] = "high power",
+ };
+
+ if (power >= ARRAY_SIZE(strings) || !strings[power])
+ return "unknown";
+
+ return strings[power];
+}
+
+static int rps_boost_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_rps *rps = &gt->rps;
+
+ seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+ seq_printf(m, "Boosts outstanding? %d\n",
+ atomic_read(&rps->num_waiters));
+ seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
+ seq_printf(m, "Frequency requested %d, actual %d\n",
+ intel_gpu_freq(rps, rps->cur_freq),
+ intel_rps_read_actual_frequency(rps));
+ seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq));
+ seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
+ intel_gpu_freq(rps, rps->idle_freq),
+ intel_gpu_freq(rps, rps->efficient_freq),
+ intel_gpu_freq(rps, rps->boost_freq));
+
+ seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+
+ if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rpup, rpupei;
+ u32 rpdown, rpdownei;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
+ rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
+ rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
+ rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
+ rps_power_to_str(rps->power.mode));
+ seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
+ rpup && rpupei ? 100 * rpup / rpupei : 0,
+ rps->power.up_threshold);
+ seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
+ rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
+ rps->power.down_threshold);
+ } else {
+ seq_puts(m, "\nRPS Autotuning inactive\n");
+ }
+
+ return 0;
+}
+
+static bool rps_eval(const struct intel_gt *gt)
+{
+ return HAS_RPS(gt->i915);
+}
+
+DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost);
+
+void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "drpc", &drpc_fops, NULL },
+ { "frequency", &frequency_fops, NULL },
+ { "forcewake", &fw_domains_fops, NULL },
+ { "llc", &llc_fops, llc_eval },
+ { "rps_boost", &rps_boost_fops, rps_eval },
+ };
+
+ debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
new file mode 100644
index 000000000000..4cf5f5c9da7d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GT_PM_H
+#define DEBUGFS_GT_PM_H
+
+struct intel_gt;
+struct dentry;
+
+void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* DEBUGFS_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
new file mode 100644
index 000000000000..f4fec7eb4064
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gen6_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
+{
+ /* Caller needs to make sure the write completes if necessary */
+ iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
+}
+
+void gen7_ppgtt_enable(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 ecochk;
+
+ intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
+
+ ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ if (IS_HASWELL(i915)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
+
+ for_each_engine(engine, gt, id) {
+ /* GFX_MODE is per-ring on gen7+ */
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ }
+}
+
+void gen6_ppgtt_enable(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_rmw(uncore,
+ GAC_ECO_BITS,
+ 0,
+ ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
+
+ intel_uncore_rmw(uncore,
+ GAB_CTL,
+ 0,
+ GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ intel_uncore_rmw(uncore,
+ GAM_ECOCHK,
+ 0,
+ ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+ if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
+ intel_uncore_write(uncore,
+ GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ const gen6_pte_t scratch_pte = vm->scratch[0].encode;
+ unsigned int pde = first_entry / GEN6_PTES;
+ unsigned int pte = first_entry % GEN6_PTES;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+
+ while (num_entries) {
+ struct i915_page_table * const pt =
+ i915_pt_entry(ppgtt->base.pd, pde++);
+ const unsigned int count = min(num_entries, GEN6_PTES - pte);
+ gen6_pte_t *vaddr;
+
+ GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
+
+ num_entries -= count;
+
+ GEM_BUG_ON(count > atomic_read(&pt->used));
+ if (!atomic_sub_return(count, &pt->used))
+ ppgtt->scan_for_unused_pt = true;
+
+ /*
+ * Note that the hw doesn't support removing PDE on the fly
+ * (they are cached inside the context with no means to
+ * invalidate the cache), so we can only reset the PTE
+ * entries back to scratch.
+ */
+
+ vaddr = kmap_atomic_px(pt);
+ memset32(vaddr + pte, scratch_pte, count);
+ kunmap_atomic(vaddr);
+
+ pte = 0;
+ }
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pd = ppgtt->pd;
+ unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
+ unsigned int act_pt = first_entry / GEN6_PTES;
+ unsigned int act_pte = first_entry % GEN6_PTES;
+ const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
+ struct sgt_dma iter = sgt_dma(vma);
+ gen6_pte_t *vaddr;
+
+ GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
+
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
+ do {
+ GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
+ vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
+
+ iter.dma += I915_GTT_PAGE_SIZE;
+ if (iter.dma == iter.max) {
+ iter.sg = __sg_next(iter.sg);
+ if (!iter.sg)
+ break;
+
+ iter.dma = sg_dma_address(iter.sg);
+ iter.max = iter.dma + iter.sg->length;
+ }
+
+ if (++act_pte == GEN6_PTES) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
+ act_pte = 0;
+ }
+ } while (1);
+ kunmap_atomic(vaddr);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ start = round_down(start, SZ_64K);
+ end = round_up(end, SZ_64K) - start;
+
+ mutex_lock(&ppgtt->flush);
+
+ gen6_for_each_pde(pt, pd, start, end, pde)
+ gen6_write_pde(ppgtt, pde, pt);
+
+ mb();
+ ioread32(ppgtt->pd_addr + pde - 1);
+ gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
+ mb();
+
+ mutex_unlock(&ppgtt->flush);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt, *alloc = NULL;
+ intel_wakeref_t wakeref;
+ u64 from = start;
+ unsigned int pde;
+ int ret = 0;
+
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ spin_lock(&pd->lock);
+ gen6_for_each_pde(pt, pd, start, length, pde) {
+ const unsigned int count = gen6_pte_count(start, length);
+
+ if (px_base(pt) == px_base(&vm->scratch[1])) {
+ spin_unlock(&pd->lock);
+
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto unwind_out;
+ }
+
+ fill32_px(pt, vm->scratch[0].encode);
+
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == &vm->scratch[1]) {
+ pd->entry[pde] = pt;
+ } else {
+ alloc = pt;
+ pt = pd->entry[pde];
+ }
+ }
+
+ atomic_add(count, &pt->used);
+ }
+ spin_unlock(&pd->lock);
+
+ if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
+ gen6_flush_pd(ppgtt, from, start);
+
+ goto out;
+
+unwind_out:
+ gen6_ppgtt_clear_range(vm, from, start - from);
+out:
+ if (alloc)
+ free_px(vm, alloc);
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return ret;
+}
+
+static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ int ret;
+
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ if (ret)
+ return ret;
+
+ vm->scratch[0].encode =
+ vm->pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_NONE, PTE_READ_ONLY);
+
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
+ cleanup_scratch_page(vm);
+ return -ENOMEM;
+ }
+
+ fill32_px(&vm->scratch[1], vm->scratch[0].encode);
+ memset_p(pd->entry, &vm->scratch[1], I915_PDES);
+
+ return 0;
+}
+
+static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
+ struct i915_page_table *pt;
+ u32 pde;
+
+ gen6_for_all_pdes(pt, pd, pde)
+ if (px_base(pt) != scratch)
+ free_px(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+
+ __i915_vma_put(ppgtt->vma);
+
+ gen6_ppgtt_free_pd(ppgtt);
+ free_scratch(vm);
+
+ mutex_destroy(&ppgtt->flush);
+ mutex_destroy(&ppgtt->pin_mutex);
+ kfree(ppgtt->base.pd);
+}
+
+static int pd_vma_set_pages(struct i915_vma *vma)
+{
+ vma->pages = ERR_PTR(-ENODEV);
+ return 0;
+}
+
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ vma->pages = NULL;
+}
+
+static int pd_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct gen6_ppgtt *ppgtt = vma->private;
+ u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
+
+ px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
+
+ gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
+ return 0;
+}
+
+static void pd_vma_unbind(struct i915_vma *vma)
+{
+ struct gen6_ppgtt *ppgtt = vma->private;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ if (!ppgtt->scan_for_unused_pt)
+ return;
+
+ /* Free all no longer used page tables */
+ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
+ if (px_base(pt) == scratch || atomic_read(&pt->used))
+ continue;
+
+ free_px(&ppgtt->base.vm, pt);
+ pd->entry[pde] = scratch;
+ }
+
+ ppgtt->scan_for_unused_pt = false;
+}
+
+static const struct i915_vma_ops pd_vma_ops = {
+ .set_pages = pd_vma_set_pages,
+ .clear_pages = pd_vma_clear_pages,
+ .bind_vma = pd_vma_bind,
+ .unbind_vma = pd_vma_unbind,
+};
+
+static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
+{
+ struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(size > ggtt->vm.total);
+
+ vma = i915_vma_alloc();
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
+
+ i915_active_init(&vma->active, NULL, NULL);
+
+ kref_init(&vma->ref);
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(&ggtt->vm);
+ vma->ops = &pd_vma_ops;
+ vma->private = ppgtt;
+
+ vma->size = size;
+ vma->fence_size = size;
+ atomic_set(&vma->flags, I915_VMA_GGTT);
+ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
+
+ INIT_LIST_HEAD(&vma->obj_link);
+ INIT_LIST_HEAD(&vma->closed_link);
+
+ return vma;
+}
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ int err;
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
+
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
+ return 0;
+
+ if (mutex_lock_interruptible(&ppgtt->pin_mutex))
+ return -EINTR;
+
+ /*
+ * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ err = 0;
+ if (!atomic_read(&ppgtt->pin_count))
+ err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
+ if (!err)
+ atomic_inc(&ppgtt->pin_count);
+ mutex_unlock(&ppgtt->pin_mutex);
+
+ return err;
+}
+
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
+}
+
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ if (!atomic_read(&ppgtt->pin_count))
+ return;
+
+ i915_vma_unpin(ppgtt->vma);
+ atomic_set(&ppgtt->pin_count, 0);
+}
+
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ggtt * const ggtt = gt->ggtt;
+ struct gen6_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&ppgtt->flush);
+ mutex_init(&ppgtt->pin_mutex);
+
+ ppgtt_init(&ppgtt->base, gt);
+ ppgtt->base.vm.top = 1;
+
+ ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+
+ ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+ ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
+ if (!ppgtt->base.pd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+ goto err_pd;
+
+ ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ goto err_scratch;
+ }
+
+ return &ppgtt->base;
+
+err_scratch:
+ free_scratch(&ppgtt->base.vm);
+err_pd:
+ kfree(ppgtt->base.pd);
+err_free:
+ mutex_destroy(&ppgtt->pin_mutex);
+ kfree(ppgtt);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
new file mode 100644
index 000000000000..72e481806c96
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN6_PPGTT_H__
+#define __GEN6_PPGTT_H__
+
+#include "intel_gtt.h"
+
+struct gen6_ppgtt {
+ struct i915_ppgtt base;
+
+ struct mutex flush;
+ struct i915_vma *vma;
+ gen6_pte_t __iomem *pd_addr;
+
+ atomic_t pin_count;
+ struct mutex pin_mutex;
+
+ bool scan_for_unused_pt;
+};
+
+static inline u32 gen6_pte_index(u32 addr)
+{
+ return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pte_count(u32 addr, u32 length)
+{
+ return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pde_index(u32 addr)
+{
+ return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
+
+static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
+{
+ BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
+ return __to_gen6_ppgtt(base);
+}
+
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
+ */
+#define gen6_for_each_pde(pt, pd, start, length, iter) \
+ for (iter = gen6_pde_index(start); \
+ length > 0 && iter < I915_PDES && \
+ (pt = i915_pt_entry(pd, iter), true); \
+ ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
+ temp = min(temp - start, length); \
+ start += temp, length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter) \
+ for (iter = 0; \
+ iter < I915_PDES && \
+ (pt = i915_pt_entry(pd, iter), true); \
+ ++iter)
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
+void gen6_ppgtt_enable(struct intel_gt *gt);
+void gen7_ppgtt_enable(struct intel_gt *gt);
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
new file mode 100644
index 000000000000..4d1de2d97d5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gen8_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+static u64 gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
+{
+ u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (level != I915_CACHE_NONE)
+ pde |= PPAT_CACHED_PDE;
+ else
+ pde |= PPAT_UNCACHED;
+
+ return pde;
+}
+
+static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+{
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
+ enum vgt_g2v_type msg;
+ int i;
+
+ if (create)
+ atomic_inc(px_used(ppgtt->pd)); /* never remove */
+ else
+ atomic_dec(px_used(ppgtt->pd));
+
+ mutex_lock(&i915->vgpu.lock);
+
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const u64 daddr = px_dma(ppgtt->pd);
+
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+
+ msg = create ?
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
+ } else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[i].lo),
+ lower_32_bits(daddr));
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[i].hi),
+ upper_32_bits(daddr));
+ }
+
+ msg = create ?
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
+ }
+
+ /* g2v_notify atomically (via hv trap) consumes the message packet. */
+ intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
+
+ mutex_unlock(&i915->vgpu.lock);
+}
+
+/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
+#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
+#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
+#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
+#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
+#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
+#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
+#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
+
+#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
+
+static inline unsigned int
+gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
+{
+ const int shift = gen8_pd_shift(lvl);
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+ GEM_BUG_ON(start >= end);
+ end += ~mask >> gen8_pd_shift(1);
+
+ *idx = i915_pde_index(start, shift);
+ if ((start ^ end) & mask)
+ return GEN8_PDES - *idx;
+ else
+ return i915_pde_index(end, shift) - *idx;
+}
+
+static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+{
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+ GEM_BUG_ON(start >= end);
+ return (start ^ end) & mask && (start & ~mask) == 0;
+}
+
+static inline unsigned int gen8_pt_count(u64 start, u64 end)
+{
+ GEM_BUG_ON(start >= end);
+ if ((start ^ end) >> gen8_pd_shift(1))
+ return GEN8_PDES - (start & (GEN8_PDES - 1));
+ else
+ return end - start;
+}
+
+static inline unsigned int
+gen8_pd_top_count(const struct i915_address_space *vm)
+{
+ unsigned int shift = __gen8_pte_shift(vm->top);
+ return (vm->total + (1ull << shift) - 1) >> shift;
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (vm->top == 2)
+ return ppgtt->pd;
+ else
+ return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
+{
+ return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
+}
+
+static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ int count, int lvl)
+{
+ if (lvl) {
+ void **pde = pd->entry;
+
+ do {
+ if (!*pde)
+ continue;
+
+ __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
+ } while (pde++, --count);
+ }
+
+ free_px(vm, pd);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (intel_vgpu_active(vm->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, false);
+
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ free_scratch(vm);
+}
+
+static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 start, const u64 end, int lvl)
+{
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ unsigned int idx, len;
+
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
+
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
+ gen8_pd_contains(start, end, lvl)) {
+ DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
+ __func__, vm, lvl + 1, idx, start, end);
+ clear_pd_entry(pd, idx, scratch);
+ __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
+ start += (u64)I915_PDES << gen8_pd_shift(lvl);
+ continue;
+ }
+
+ if (lvl) {
+ start = __gen8_ppgtt_clear(vm, as_pd(pt),
+ start, end, lvl);
+ } else {
+ unsigned int count;
+ u64 *vaddr;
+
+ count = gen8_pt_count(start, end);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
+ __func__, vm, lvl, start, end,
+ gen8_pd_index(start, 0), count,
+ atomic_read(&pt->used));
+ GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
+
+ vaddr = kmap_atomic_px(pt);
+ memset64(vaddr + gen8_pd_index(start, 0),
+ vm->scratch[0].encode,
+ count);
+ kunmap_atomic(vaddr);
+
+ atomic_sub(count, &pt->used);
+ start += count;
+ }
+
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ } while (idx++, --len);
+
+ return start;
+}
+
+static void gen8_ppgtt_clear(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ start, start + length, vm->top);
+}
+
+static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 * const start, const u64 end, int lvl)
+{
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ struct i915_page_table *alloc = NULL;
+ unsigned int idx, len;
+ int ret = 0;
+
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(*start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, *start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
+
+ spin_lock(&pd->lock);
+ GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (!pt) {
+ spin_unlock(&pd->lock);
+
+ DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
+ __func__, vm, lvl + 1, idx);
+
+ pt = fetch_and_zero(&alloc);
+ if (lvl) {
+ if (!pt) {
+ pt = &alloc_pd(vm)->pt;
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ fill_px(pt, vm->scratch[lvl].encode);
+ } else {
+ if (!pt) {
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ if (intel_vgpu_active(vm->i915) ||
+ gen8_pt_count(*start, end) < I915_PDES)
+ fill_px(pt, vm->scratch[lvl].encode);
+ }
+
+ spin_lock(&pd->lock);
+ if (likely(!pd->entry[idx]))
+ set_pd_entry(pd, idx, pt);
+ else
+ alloc = pt, pt = pd->entry[idx];
+ }
+
+ if (lvl) {
+ atomic_inc(&pt->used);
+ spin_unlock(&pd->lock);
+
+ ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
+ start, end, lvl);
+ if (unlikely(ret)) {
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ goto out;
+ }
+
+ spin_lock(&pd->lock);
+ atomic_dec(&pt->used);
+ GEM_BUG_ON(!atomic_read(&pt->used));
+ } else {
+ unsigned int count = gen8_pt_count(*start, end);
+
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
+ __func__, vm, lvl, *start, end,
+ gen8_pd_index(*start, 0), count,
+ atomic_read(&pt->used));
+
+ atomic_add(count, &pt->used);
+ /* All other pdes may be simultaneously removed */
+ GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
+ *start += count;
+ }
+ } while (idx++, --len);
+ spin_unlock(&pd->lock);
+out:
+ if (alloc)
+ free_px(vm, alloc);
+ return ret;
+}
+
+static int gen8_ppgtt_alloc(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ u64 from;
+ int err;
+
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+ from = start;
+
+ err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top);
+ if (unlikely(err && from != start))
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ from, start, vm->top);
+
+ return err;
+}
+
+static __always_inline u64
+gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
+ struct i915_page_directory *pdp,
+ struct sgt_dma *iter,
+ u64 idx,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_page_directory *pd;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ gen8_pte_t *vaddr;
+
+ pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ do {
+ GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
+ vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
+
+ iter->dma += I915_GTT_PAGE_SIZE;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg) {
+ idx = 0;
+ break;
+ }
+
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + iter->sg->length;
+ }
+
+ if (gen8_pd_index(++idx, 0) == 0) {
+ if (gen8_pd_index(idx, 1) == 0) {
+ /* Limited by sg length for 3lvl */
+ if (gen8_pd_index(idx, 2) == 0)
+ break;
+
+ pd = pdp->entry[gen8_pd_index(idx, 2)];
+ }
+
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ }
+ } while (1);
+ kunmap_atomic(vaddr);
+
+ return idx;
+}
+
+static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
+ struct sgt_dma *iter,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ u64 start = vma->node.start;
+ dma_addr_t rem = iter->sg->length;
+
+ GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_address(vma->vm, start);
+ struct i915_page_directory * const pd =
+ i915_pd_entry(pdp, __gen8_pte_index(start, 2));
+ gen8_pte_t encode = pte_encode;
+ unsigned int maybe_64K = -1;
+ unsigned int page_size;
+ gen8_pte_t *vaddr;
+ u16 index;
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+ rem >= I915_GTT_PAGE_SIZE_2M &&
+ !__gen8_pte_index(start, 0)) {
+ index = __gen8_pte_index(start, 1);
+ encode |= GEN8_PDE_PS_2M;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+
+ vaddr = kmap_atomic_px(pd);
+ } else {
+ struct i915_page_table *pt =
+ i915_pt_entry(pd, __gen8_pte_index(start, 1));
+
+ index = __gen8_pte_index(start, 0);
+ page_size = I915_GTT_PAGE_SIZE;
+
+ if (!index &&
+ vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
+ maybe_64K = __gen8_pte_index(start, 1);
+
+ vaddr = kmap_atomic_px(pt);
+ }
+
+ do {
+ GEM_BUG_ON(iter->sg->length < page_size);
+ vaddr[index++] = encode | iter->dma;
+
+ start += page_size;
+ iter->dma += page_size;
+ rem -= page_size;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg)
+ break;
+
+ rem = iter->sg->length;
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + rem;
+
+ if (maybe_64K != -1 && index < I915_PDES &&
+ !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
+ maybe_64K = -1;
+
+ if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+ break;
+ }
+ } while (rem >= page_size && index < I915_PDES);
+
+ kunmap_atomic(vaddr);
+
+ /*
+ * Is it safe to mark the 2M block as 64K? -- Either we have
+ * filled whole page-table with 64K entries, or filled part of
+ * it and have reached the end of the sg table and we have
+ * enough padding.
+ */
+ if (maybe_64K != -1 &&
+ (index == I915_PDES ||
+ (i915_vm_has_scratch_64K(vma->vm) &&
+ !iter->sg && IS_ALIGNED(vma->node.start +
+ vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)))) {
+ vaddr = kmap_atomic_px(pd);
+ vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
+ kunmap_atomic(vaddr);
+ page_size = I915_GTT_PAGE_SIZE_64K;
+
+ /*
+ * We write all 4K page entries, even when using 64K
+ * pages. In order to verify that the HW isn't cheating
+ * by using the 4K PTE instead of the 64K PTE, we want
+ * to remove all the surplus entries. If the HW skipped
+ * the 64K PTE, it will read/write into the scratch page
+ * instead - which we detect as missing results during
+ * selftests.
+ */
+ if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
+ u16 i;
+
+ encode = vma->vm->scratch[0].encode;
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+
+ for (i = 1; i < index; i += 16)
+ memset64(vaddr + i, encode, 15);
+
+ kunmap_atomic(vaddr);
+ }
+ }
+
+ vma->page_sizes.gtt |= page_size;
+ } while (iter->sg);
+}
+
+static void gen8_ppgtt_insert(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+ struct sgt_dma iter = sgt_dma(vma);
+
+ if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
+ } else {
+ u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_index(vm, idx);
+
+ idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
+ cache_level, flags);
+ } while (idx);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ }
+}
+
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+ int ret;
+ int i;
+
+ /*
+ * If everybody agrees to not to write into the scratch page,
+ * we can reuse it for all vm, keeping contexts and processes separate.
+ */
+ if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+ struct i915_address_space *clone = vm->gt->vm;
+
+ GEM_BUG_ON(!clone->has_read_only);
+
+ vm->scratch_order = clone->scratch_order;
+ memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
+ px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
+ return 0;
+ }
+
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ if (ret)
+ return ret;
+
+ vm->scratch[0].encode =
+ gen8_pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_LLC, vm->has_read_only);
+
+ for (i = 1; i <= vm->top; i++) {
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
+ goto free_scratch;
+
+ fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
+ vm->scratch[i].encode =
+ gen8_pde_encode(px_dma(&vm->scratch[i]),
+ I915_CACHE_LLC);
+ }
+
+ return 0;
+
+free_scratch:
+ free_scratch(vm);
+ return -ENOMEM;
+}
+
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+{
+ struct i915_address_space *vm = &ppgtt->vm;
+ struct i915_page_directory *pd = ppgtt->pd;
+ unsigned int idx;
+
+ GEM_BUG_ON(vm->top != 2);
+ GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
+
+ for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
+ struct i915_page_directory *pde;
+
+ pde = alloc_pd(vm);
+ if (IS_ERR(pde))
+ return PTR_ERR(pde);
+
+ fill_px(pde, vm->scratch[1].encode);
+ set_pd_entry(pd, idx, pde);
+ atomic_inc(px_used(pde)); /* keep pinned */
+ }
+ wmb();
+
+ return 0;
+}
+
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
+{
+ const unsigned int count = gen8_pd_top_count(vm);
+ struct i915_page_directory *pd;
+
+ GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
+
+ pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
+ atomic_inc(px_used(pd)); /* mark as pinned */
+ return pd;
+}
+
+/*
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ */
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ ppgtt_init(ppgtt, gt);
+ ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
+
+ /*
+ * From bdw, there is hw support for read-only pages in the PPGTT.
+ *
+ * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
+ * for now.
+ *
+ * Gen12 has inherited the same read-only fault issue from gen11.
+ */
+ ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
+
+ /*
+ * There are only few exceptions for gen >=6. chv and bxt.
+ * And we are not sure about the latter so play safe for now.
+ */
+ if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
+ ppgtt->vm.pt_kmap_wc = true;
+
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_free;
+
+ ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(ppgtt->pd)) {
+ err = PTR_ERR(ppgtt->pd);
+ goto err_free_scratch;
+ }
+
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_free_pd;
+ }
+
+ ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear;
+
+ if (intel_vgpu_active(gt->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, true);
+
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+
+ return ppgtt;
+
+err_free_pd:
+ __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
+ gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
+err_free_scratch:
+ free_scratch(&ppgtt->vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
new file mode 100644
index 000000000000..76a08b9c1f5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN8_PPGTT_H__
+#define __GEN8_PPGTT_H__
+
+struct intel_gt;
+
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 55317081d48b..0ba524a414c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -28,6 +28,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
static void irq_enable(struct intel_engine_cs *engine)
{
@@ -53,15 +55,17 @@ static void irq_disable(struct intel_engine_cs *engine)
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
lockdep_assert_held(&b->irq_lock);
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
- irq_disable(container_of(b,
- struct intel_engine_cs,
- breadcrumbs));
+ irq_disable(engine);
b->irq_armed = false;
+ intel_gt_pm_put_async(engine->gt);
}
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
@@ -127,16 +131,23 @@ __dma_fence_signal__notify(struct dma_fence *fence,
}
}
-void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
+static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
+ intel_engine_add_retire(engine, tl);
+}
+
+static void signal_irq_work(struct irq_work *work)
+{
+ struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
- unsigned long flags;
LIST_HEAD(signal);
- spin_lock_irqsave(&b->irq_lock, flags);
+ spin_lock(&b->irq_lock);
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
@@ -177,44 +188,41 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
if (!list_is_first(pos, &ce->signals)) {
/* Advance the list to the first incomplete request */
__list_del_many(&ce->signals, pos);
- if (&ce->signals == pos) /* now empty */
+ if (&ce->signals == pos) { /* now empty */
list_del_init(&ce->signal_link);
+ add_retire(b, ce->timeline);
+ }
}
}
- spin_unlock_irqrestore(&b->irq_lock, flags);
+ spin_unlock(&b->irq_lock);
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
struct list_head cb_list;
- spin_lock_irqsave(&rq->lock, flags);
+ spin_lock(&rq->lock);
list_replace(&rq->fence.cb_list, &cb_list);
__dma_fence_signal__timestamp(&rq->fence, timestamp);
__dma_fence_signal__notify(&rq->fence, &cb_list);
- spin_unlock_irqrestore(&rq->lock, flags);
+ spin_unlock(&rq->lock);
i915_request_put(rq);
}
}
-static void signal_irq_work(struct irq_work *work)
-{
- struct intel_engine_cs *engine =
- container_of(work, typeof(*engine), breadcrumbs.irq_work);
-
- intel_engine_breadcrumbs_irq(engine);
-}
-
-static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
+static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
- return;
+ return true;
+
+ if (!intel_gt_pm_get_if_awake(engine->gt))
+ return false;
/*
* The breadcrumb irq will be disarmed on the interrupt after the
@@ -234,6 +242,8 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
if (!b->irq_enabled++)
irq_enable(engine);
+
+ return true;
}
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
@@ -271,19 +281,20 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
struct list_head *pos;
spin_lock(&b->irq_lock);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
- __intel_breadcrumbs_arm_irq(b);
+ if (!__intel_breadcrumbs_arm_irq(b))
+ goto unlock;
/*
* We keep the seqno in retirement order, so we can break
- * inside intel_engine_breadcrumbs_irq as soon as we've passed
- * the last completed request (or seen a request that hasn't
- * event started). We could iterate the timeline->requests list,
+ * inside intel_engine_signal_breadcrumbs as soon as we've
+ * passed the last completed request (or seen a request that
+ * hasn't event started). We could walk the timeline->requests,
* but keeping a separate signalers_list has the advantage of
* hopefully being much smaller than the full list and so
* provides faster iteration and detection when there are no
@@ -306,6 +317,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+unlock:
spin_unlock(&b->irq_lock);
}
@@ -326,7 +338,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
*/
spin_lock(&b->irq_lock);
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
list_del(&rq->signal_link);
if (list_empty(&ce->signals))
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index ef7bc41ffffa..23137b2a8689 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -31,8 +31,7 @@ void intel_context_free(struct intel_context *ce)
}
struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+intel_context_create(struct intel_engine_cs *engine)
{
struct intel_context *ce;
@@ -40,39 +39,82 @@ intel_context_create(struct i915_gem_context *ctx,
if (!ce)
return ERR_PTR(-ENOMEM);
- intel_context_init(ce, ctx, engine);
+ intel_context_init(ce, engine);
return ce;
}
-int __intel_context_do_pin(struct intel_context *ce)
+int intel_context_alloc_state(struct intel_context *ce)
{
- int err;
+ int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
return -EINTR;
- if (likely(!atomic_read(&ce->pin_count))) {
- intel_wakeref_t wakeref;
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ err = ce->ops->alloc(ce);
+ if (unlikely(err))
+ goto unlock;
- if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
- err = ce->ops->alloc(ce);
- if (unlikely(err))
- goto err;
+ set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+ }
+
+unlock:
+ mutex_unlock(&ce->pin_mutex);
+ return err;
+}
+
+static int intel_context_active_acquire(struct intel_context *ce)
+{
+ int err;
+
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ return err;
- __set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+ /* Preallocate tracking nodes */
+ if (!intel_context_is_barrier(ce)) {
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err) {
+ i915_active_release(&ce->active);
+ return err;
}
+ }
+
+ return 0;
+}
- err = 0;
- with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
- err = ce->ops->pin(ce);
+static void intel_context_active_release(struct intel_context *ce)
+{
+ /* Nodes preallocated in intel_context_active() */
+ i915_active_acquire_barrier(&ce->active);
+ i915_active_release(&ce->active);
+}
+
+int __intel_context_do_pin(struct intel_context *ce)
+{
+ int err;
+
+ if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
+ err = intel_context_alloc_state(ce);
if (err)
+ return err;
+ }
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return -EINTR;
+
+ if (likely(!atomic_read(&ce->pin_count))) {
+ err = intel_context_active_acquire(ce);
+ if (unlikely(err))
goto err;
- GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
- ce->engine->name, ce->timeline->fence_context,
- ce->ring->head, ce->ring->tail);
+ err = ce->ops->pin(ce);
+ if (unlikely(err))
+ goto err_active;
- i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
+ CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+ ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
}
@@ -83,6 +125,8 @@ int __intel_context_do_pin(struct intel_context *ce)
mutex_unlock(&ce->pin_mutex);
return 0;
+err_active:
+ intel_context_active_release(ce);
err:
mutex_unlock(&ce->pin_mutex);
return err;
@@ -90,39 +134,36 @@ err:
void intel_context_unpin(struct intel_context *ce)
{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ if (!atomic_dec_and_test(&ce->pin_count))
return;
- /* We may be called from inside intel_context_pin() to evict another */
- intel_context_get(ce);
- mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
-
- if (likely(atomic_dec_and_test(&ce->pin_count))) {
- GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->timeline->fence_context);
-
- ce->ops->unpin(ce);
+ CE_TRACE(ce, "unpin\n");
+ ce->ops->unpin(ce);
- i915_gem_context_put(ce->gem_context);
- intel_context_active_release(ce);
- }
-
- mutex_unlock(&ce->pin_mutex);
+ /*
+ * Once released, we may asynchronously drop the active reference.
+ * As that may be the only reference keeping the context alive,
+ * take an extra now so that it is not freed before we finish
+ * dereferencing it.
+ */
+ intel_context_get(ce);
+ intel_context_active_release(ce);
intel_context_put(ce);
}
static int __context_pin_state(struct i915_vma *vma)
{
- u64 flags;
+ unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
int err;
- flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
- flags |= PIN_HIGH | PIN_GLOBAL;
-
- err = i915_vma_pin(vma, 0, 0, flags);
+ err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
if (err)
return err;
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unpin;
+
/*
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
@@ -131,27 +172,57 @@ static int __context_pin_state(struct i915_vma *vma)
vma->obj->mm.dirty = true;
return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+ return err;
}
static void __context_unpin_state(struct i915_vma *vma)
{
i915_vma_make_shrinkable(vma);
+ i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
+static int __ring_active(struct intel_ring *ring)
+{
+ int err;
+
+ err = i915_active_acquire(&ring->vma->active);
+ if (err)
+ return err;
+
+ err = intel_ring_pin(ring);
+ if (err)
+ goto err_active;
+
+ return 0;
+
+err_active:
+ i915_active_release(&ring->vma->active);
+ return err;
+}
+
+static void __ring_retire(struct intel_ring *ring)
+{
+ intel_ring_unpin(ring);
+ i915_active_release(&ring->vma->active);
+}
+
__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
- GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->timeline->fence_context);
+ CE_TRACE(ce, "retire\n");
+ set_bit(CONTEXT_VALID_BIT, &ce->flags);
if (ce->state)
__context_unpin_state(ce->state);
intel_timeline_unpin(ce->timeline);
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
intel_context_put(ce);
}
@@ -161,9 +232,11 @@ static int __intel_context_active(struct i915_active *active)
struct intel_context *ce = container_of(active, typeof(*ce), active);
int err;
+ CE_TRACE(ce, "active\n");
+
intel_context_get(ce);
- err = intel_ring_pin(ce->ring);
+ err = __ring_active(ce->ring);
if (err)
goto err_put;
@@ -183,66 +256,27 @@ static int __intel_context_active(struct i915_active *active)
err_timeline:
intel_timeline_unpin(ce->timeline);
err_ring:
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
err_put:
intel_context_put(ce);
return err;
}
-int intel_context_active_acquire(struct intel_context *ce)
-{
- int err;
-
- err = i915_active_acquire(&ce->active);
- if (err)
- return err;
-
- /* Preallocate tracking nodes */
- if (!i915_gem_context_is_kernel(ce->gem_context)) {
- err = i915_active_acquire_preallocate_barrier(&ce->active,
- ce->engine);
- if (err) {
- i915_active_release(&ce->active);
- return err;
- }
- }
-
- return 0;
-}
-
-void intel_context_active_release(struct intel_context *ce)
-{
- /* Nodes preallocated in intel_context_active() */
- i915_active_acquire_barrier(&ce->active);
- i915_active_release(&ce->active);
-}
-
void
intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- struct i915_address_space *vm;
-
GEM_BUG_ON(!engine->cops);
+ GEM_BUG_ON(!engine->gt->vm);
kref_init(&ce->ref);
- ce->gem_context = ctx;
- rcu_read_lock();
- vm = rcu_dereference(ctx->vm);
- if (vm)
- ce->vm = i915_vm_get(vm);
- else
- ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
- rcu_read_unlock();
- if (ctx->timeline)
- ce->timeline = intel_timeline_get(ctx->timeline);
-
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
- ce->ring = __intel_context_ring_size(SZ_16K);
+ ce->ring = __intel_context_ring_size(SZ_4K);
+
+ ce->vm = i915_vm_get(engine->gt->vm);
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
@@ -307,30 +341,11 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
int err;
/* Only suitable for use in remotely modifying this context */
- GEM_BUG_ON(rq->hw_context == ce);
+ GEM_BUG_ON(rq->context == ce);
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
- /*
- * Ideally, we just want to insert our foreign fence as
- * a barrier into the remove context, such that this operation
- * occurs after all current operations in that context, and
- * all future operations must occur after this.
- *
- * Currently, the timeline->last_request tracking is guarded
- * by its mutex and so we must obtain that to atomically
- * insert our barrier. However, since we already hold our
- * timeline->mutex, we must be careful against potential
- * inversion if we are the kernel_context as the remote context
- * will itself poke at the kernel_context when it needs to
- * unpin. Ergo, if already locked, we drop both locks and
- * try again (through the magic of userspace repeating EAGAIN).
- */
- if (!mutex_trylock(&tl->mutex))
- return -EAGAIN;
-
/* Queue this switch after current activity by this context. */
err = i915_active_fence_set(&tl->last_request, rq);
- mutex_unlock(&tl->mutex);
if (err)
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 68b3d317d959..30bd248827d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -7,7 +7,9 @@
#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__
+#include <linux/bitops.h>
#include <linux/lockdep.h>
+#include <linux/types.h>
#include "i915_active.h"
#include "intel_context_types.h"
@@ -15,14 +17,21 @@
#include "intel_ring_types.h"
#include "intel_timeline_types.h"
+#define CE_TRACE(ce, fmt, ...) do { \
+ const struct intel_context *ce__ = (ce); \
+ ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
+ ce__->timeline->fence_context, \
+ ##__VA_ARGS__); \
+} while (0)
+
void intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
void intel_context_fini(struct intel_context *ce);
struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+intel_context_create(struct intel_engine_cs *engine);
+
+int intel_context_alloc_state(struct intel_context *ce);
void intel_context_free(struct intel_context *ce);
@@ -69,9 +78,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
int __intel_context_do_pin(struct intel_context *ce);
+static inline bool intel_context_pin_if_active(struct intel_context *ce)
+{
+ return atomic_inc_not_zero(&ce->pin_count);
+}
+
static inline int intel_context_pin(struct intel_context *ce)
{
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ if (likely(intel_context_pin_if_active(ce)))
return 0;
return __intel_context_do_pin(ce);
@@ -109,9 +123,6 @@ static inline void intel_context_exit(struct intel_context *ce)
ce->ops->exit(ce);
}
-int intel_context_active_acquire(struct intel_context *ce);
-void intel_context_active_release(struct intel_context *ce);
-
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
@@ -153,4 +164,64 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
return u64_to_ptr(struct intel_ring, sz);
}
+static inline bool intel_context_is_barrier(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+}
+
+static inline bool intel_context_use_semaphores(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_set_use_semaphores(struct intel_context *ce)
+{
+ set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
+{
+ clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline bool intel_context_is_banned(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool intel_context_set_banned(struct intel_context *ce)
+{
+ return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool
+intel_context_force_single_submission(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline void
+intel_context_set_single_submission(struct intel_context *ce)
+{
+ __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline bool
+intel_context_nopreempt(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_set_nopreempt(struct intel_context *ce)
+{
+ set_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_clear_nopreempt(struct intel_context *ce)
+{
+ clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 6959b05ae5f8..ca1420fb8b53 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -17,6 +17,8 @@
#include "intel_engine_types.h"
#include "intel_sseu.h"
+#define CONTEXT_REDZONE POISON_INUSE
+
struct i915_gem_context;
struct i915_vma;
struct intel_context;
@@ -44,7 +46,7 @@ struct intel_context {
#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
struct i915_address_space *vm;
- struct i915_gem_context *gem_context;
+ struct i915_gem_context __rcu *gem_context;
struct list_head signal_link;
struct list_head signals;
@@ -54,7 +56,13 @@ struct intel_context {
struct intel_timeline *timeline;
unsigned long flags;
-#define CONTEXT_ALLOC_BIT 0
+#define CONTEXT_BARRIER_BIT 0
+#define CONTEXT_ALLOC_BIT 1
+#define CONTEXT_VALID_BIT 2
+#define CONTEXT_USE_SEMAPHORES 3
+#define CONTEXT_BANNED 4
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
+#define CONTEXT_NOPREEMPT 6
u32 *lrc_reg_state;
u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 01765a7ec18f..5df003061e44 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -29,6 +29,13 @@ struct intel_gt;
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+#define ENGINE_TRACE(e, fmt, ...) do { \
+ const struct intel_engine_cs *e__ __maybe_unused = (e); \
+ GEM_TRACE("%s %s: " fmt, \
+ dev_name(e__->i915->drm.dev), e__->name, \
+ ##__VA_ARGS__); \
+} while (0)
+
/*
* The register defines to be used with the following macros need to accept a
* base param, e.g:
@@ -177,15 +184,15 @@ void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
int intel_engines_init_mmio(struct intel_gt *gt);
-int intel_engines_setup(struct intel_gt *gt);
int intel_engines_init(struct intel_gt *gt);
-void intel_engines_cleanup(struct intel_gt *gt);
+
+void intel_engines_release(struct intel_gt *gt);
+void intel_engines_free(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
int intel_ring_submission_setup(struct intel_engine_cs *engine);
-int intel_ring_submission_init(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
@@ -195,7 +202,7 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
-void intel_engine_get_instdone(struct intel_engine_cs *engine,
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone);
void intel_engine_init_execlists(struct intel_engine_cs *engine);
@@ -206,13 +213,11 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
-intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
+intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
irq_work_queue(&engine->breadcrumbs.irq_work);
}
-void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
-
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
@@ -270,8 +275,8 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool stalled)
{
- if (engine->reset.reset)
- engine->reset.reset(engine, stalled);
+ if (engine->reset.rewind)
+ engine->reset.rewind(engine, stalled);
engine->serial++; /* contexts lost */
}
@@ -296,7 +301,7 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);
-u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 813bd3a610d2..f451ef376548 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -141,7 +141,7 @@ static const struct engine_info intel_engines[] = {
/**
* intel_engine_context_size() - return the size of the context for an engine
- * @dev_priv: i915 device private
+ * @gt: the gt
* @class: engine class
*
* Each engine class may require a different amount of space for a context
@@ -153,17 +153,18 @@ static const struct engine_info intel_engines[] = {
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
-u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
{
+ struct intel_uncore *uncore = gt->uncore;
u32 cxt_size;
BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
switch (class) {
case RENDER_CLASS:
- switch (INTEL_GEN(dev_priv)) {
+ switch (INTEL_GEN(gt->i915)) {
default:
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(gt->i915));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
case 12:
case 11:
@@ -175,14 +176,14 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case 8:
return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
- if (IS_HASWELL(dev_priv))
+ if (IS_HASWELL(gt->i915))
return HSW_CXT_TOTAL_SIZE;
- cxt_size = I915_READ(GEN7_CXT_SIZE);
+ cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 6:
- cxt_size = I915_READ(CXT_SIZE);
+ cxt_size = intel_uncore_read(uncore, CXT_SIZE);
return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 5:
@@ -197,9 +198,9 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
* minimum allocation anyway so it should all come
* out in the wash.
*/
- cxt_size = I915_READ(CXT_SIZE) + 1;
+ cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
- INTEL_GEN(dev_priv),
+ INTEL_GEN(gt->i915),
cxt_size * 64,
cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
@@ -216,7 +217,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
- if (INTEL_GEN(dev_priv) < 8)
+ if (INTEL_GEN(gt->i915) < 8)
return 0;
return GEN8_LR_CONTEXT_OTHER_SIZE;
}
@@ -318,14 +319,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
- /*
- * To be overridden by the backend on setup. However to facilitate
- * cleanup on error during setup, we always provide the destroy vfunc.
- */
- engine->destroy = (typeof(engine->destroy))kfree;
-
- engine->context_size = intel_engine_context_size(gt->i915,
- engine->class);
+ engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
@@ -334,6 +328,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
+ ewma__engine_latency_init(&engine->latency);
seqlock_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -344,7 +339,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- intel_engine_add_user(engine);
gt->i915->engine[id] = engine;
return 0;
@@ -390,21 +384,39 @@ static void intel_setup_engine_capabilities(struct intel_gt *gt)
}
/**
- * intel_engines_cleanup() - free the resources allocated for Command Streamers
+ * intel_engines_release() - free the resources allocated for Command Streamers
* @gt: pointer to struct intel_gt
*/
-void intel_engines_cleanup(struct intel_gt *gt)
+void intel_engines_release(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
- engine->destroy(engine);
- gt->engine[id] = NULL;
+ if (!engine->release)
+ continue;
+
+ engine->release(engine);
+ engine->release = NULL;
+
+ memset(&engine->reset, 0, sizeof(engine->reset));
+
gt->i915->engine[id] = NULL;
}
}
+void intel_engines_free(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id) {
+ kfree(engine);
+ gt->engine[id] = NULL;
+ }
+}
+
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @gt: pointer to struct intel_gt
@@ -455,38 +467,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
return 0;
cleanup:
- intel_engines_cleanup(gt);
- return err;
-}
-
-/**
- * intel_engines_init() - init the Engine Command Streamers
- * @gt: pointer to struct intel_gt
- *
- * Return: non-zero if the initialization failed.
- */
-int intel_engines_init(struct intel_gt *gt)
-{
- int (*init)(struct intel_engine_cs *engine);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- if (HAS_EXECLISTS(gt->i915))
- init = intel_execlists_submission_init;
- else
- init = intel_ring_submission_init;
-
- for_each_engine(engine, gt, id) {
- err = init(engine);
- if (err)
- goto cleanup;
- }
-
- return 0;
-
-cleanup:
- intel_engines_cleanup(gt);
+ intel_engines_free(gt);
return err;
}
@@ -601,7 +582,7 @@ err:
return ret;
}
-static int intel_engine_setup_common(struct intel_engine_cs *engine)
+static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
@@ -631,49 +612,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
return 0;
}
-/**
- * intel_engines_setup- setup engine state not requiring hw access
- * @gt: pointer to struct intel_gt
- *
- * Initializes engine structure members shared between legacy and execlists
- * submission modes which do not require hardware access.
- *
- * Typically done early in the submission mode specific engine setup stage.
- */
-int intel_engines_setup(struct intel_gt *gt)
-{
- int (*setup)(struct intel_engine_cs *engine);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- if (HAS_EXECLISTS(gt->i915))
- setup = intel_execlists_submission_setup;
- else
- setup = intel_ring_submission_setup;
-
- for_each_engine(engine, gt, id) {
- err = intel_engine_setup_common(engine);
- if (err)
- goto cleanup;
-
- err = setup(engine);
- if (err)
- goto cleanup;
-
- /* We expect the backend to take control over its state */
- GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
-
- GEM_BUG_ON(!engine->cops);
- }
-
- return 0;
-
-cleanup:
- intel_engines_cleanup(gt);
- return err;
-}
-
struct measure_breadcrumb {
struct i915_request rq;
struct intel_timeline timeline;
@@ -757,13 +695,13 @@ create_kernel_context(struct intel_engine_cs *engine)
struct intel_context *ce;
int err;
- ce = intel_context_create(engine->i915->kernel_context, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return ce;
- ce->ring = __intel_context_ring_size(SZ_4K);
+ __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
- err = intel_context_pin(ce);
+ err = intel_context_pin(ce); /* perma-pin so it is always available */
if (err) {
intel_context_put(ce);
return ERR_PTR(err);
@@ -791,13 +729,19 @@ create_kernel_context(struct intel_engine_cs *engine)
*
* Returns zero on success or an error code on failure.
*/
-int intel_engine_init_common(struct intel_engine_cs *engine)
+static int engine_init_common(struct intel_engine_cs *engine)
{
struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
+ ret = measure_breadcrumb_dw(engine);
+ if (ret < 0)
+ return ret;
+
+ engine->emit_fini_breadcrumb_dw = ret;
+
/*
* We may need to do things with the shrinker which
* require us to immediately switch back to the default
@@ -812,18 +756,38 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
engine->kernel_context = ce;
- ret = measure_breadcrumb_dw(engine);
- if (ret < 0)
- goto err_unpin;
+ return 0;
+}
- engine->emit_fini_breadcrumb_dw = ret;
+int intel_engines_init(struct intel_gt *gt)
+{
+ int (*setup)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
- return 0;
+ if (HAS_EXECLISTS(gt->i915))
+ setup = intel_execlists_submission_setup;
+ else
+ setup = intel_ring_submission_setup;
-err_unpin:
- intel_context_unpin(ce);
- intel_context_put(ce);
- return ret;
+ for_each_engine(engine, gt, id) {
+ err = engine_setup_common(engine);
+ if (err)
+ return err;
+
+ err = setup(engine);
+ if (err)
+ return err;
+
+ err = engine_init_common(engine);
+ if (err)
+ return err;
+
+ intel_engine_add_user(engine);
+ }
+
+ return 0;
}
/**
@@ -836,6 +800,7 @@ err_unpin:
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
GEM_BUG_ON(!list_empty(&engine->active.requests));
+ tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
cleanup_status_page(engine);
@@ -911,7 +876,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
@@ -920,7 +885,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
mode, MODE_IDLE, MODE_IDLE,
1000, stop_timeout(engine),
NULL)) {
- GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+ ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
err = -ETIMEDOUT;
}
@@ -932,7 +897,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
@@ -949,8 +914,8 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
static u32
-read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
- i915_reg_t reg)
+read_subslice_reg(const struct intel_engine_cs *engine,
+ int slice, int subslice, i915_reg_t reg)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
@@ -994,7 +959,7 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
}
/* NB: please notice the memset */
-void intel_engine_get_instdone(struct intel_engine_cs *engine,
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
@@ -1478,6 +1443,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
+ drm_printf(m, "\tBarriers?: %s\n",
+ yesno(!llist_empty(&engine->barrier_tasks)));
+ drm_printf(m, "\tLatency: %luus\n",
+ ewma__engine_latency_read(&engine->latency));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@@ -1517,9 +1486,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request_ring(m, rq);
- if (rq->hw_context->lrc_reg_state) {
+ if (rq->context->lrc_reg_state) {
drm_printf(m, "Logical Ring Context:\n");
- hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
+ hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -1580,7 +1549,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
for (port = execlists->pending; (rq = *port); port++) {
/* Exclude any contexts already counted in active */
- if (!intel_context_inflight_count(rq->hw_context))
+ if (!intel_context_inflight_count(rq->context))
engine->stats.active++;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 06aa14c7aa8c..6c6fd185457c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -63,15 +63,15 @@ static void heartbeat(struct work_struct *wrk)
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- if (!intel_engine_pm_get_if_awake(engine))
- return;
-
rq = engine->heartbeat.systole;
if (rq && i915_request_completed(rq)) {
i915_request_put(rq);
engine->heartbeat.systole = NULL;
}
+ if (!intel_engine_pm_get_if_awake(engine))
+ return;
+
if (intel_gt_is_wedged(engine->gt))
goto out;
@@ -199,7 +199,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
goto out_unlock;
}
- rq->flags |= I915_REQUEST_SENTINEL;
+ __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
idle_pulse(engine, rq);
__i915_request_commit(rq);
@@ -215,18 +215,26 @@ out_rpm:
int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{
struct i915_request *rq;
+ int err = 0;
if (llist_empty(&engine->barrier_tasks))
return 0;
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_rpm;
+ }
idle_pulse(engine, rq);
i915_request_add(rq);
- return 0;
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index c1dd0cd3efc7..ea90ab3e396e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
+#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
@@ -19,9 +20,10 @@ static int __engine_unpark(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
+ struct intel_context *ce;
void *map;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
intel_gt_pm_get(engine->gt);
@@ -33,6 +35,27 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (!IS_ERR_OR_NULL(map))
engine->pinned_default_state = map;
+ /* Discard stale context state from across idling */
+ ce = engine->kernel_context;
+ if (ce) {
+ GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
+
+ /* First poison the image to verify we never fully trust it */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
+ struct drm_i915_gem_object *obj = ce->state->obj;
+ int type = i915_coherent_map_type(engine->i915);
+
+ map = i915_gem_object_pin_map(obj, type);
+ if (!IS_ERR(map)) {
+ memset(map, CONTEXT_REDZONE, obj->base.size);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+ }
+ }
+
+ ce->ops->reset(ce);
+ }
+
if (engine->unpark)
engine->unpark(engine);
@@ -73,6 +96,15 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
+static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct i915_request *rq = to_request(fence);
+
+ ewma__engine_latency_add(&rq->engine->latency,
+ ktime_us_delta(rq->fence.timestamp,
+ rq->duration.emitted));
+}
+
static void
__queue_and_release_pm(struct i915_request *rq,
struct intel_timeline *tl,
@@ -80,7 +112,7 @@ __queue_and_release_pm(struct i915_request *rq,
{
struct intel_gt_timelines *timelines = &engine->gt->timelines;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* We have to serialise all potential retirement paths with our
@@ -113,14 +145,16 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
unsigned long flags;
bool result = true;
- /* Already inside the kernel context, safe to power down. */
- if (engine->wakeref_serial == engine->serial)
- return true;
-
/* GPU is pointing to the void, as good as in the kernel context. */
if (intel_gt_is_wedged(engine->gt))
return true;
+ GEM_BUG_ON(!intel_context_is_barrier(ce));
+
+ /* Already inside the kernel context, safe to power down. */
+ if (engine->wakeref_serial == engine->serial)
+ return true;
+
/*
* Note, we do this without taking the timeline->mutex. We cannot
* as we may be called while retiring the kernel context and so
@@ -163,7 +197,18 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
/* Install ourselves as a preemption barrier */
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- __i915_request_commit(rq);
+ if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
+ /*
+ * Use an interrupt for precise measurement of duration,
+ * otherwise we rely on someone else retiring all the requests
+ * which may delay the signaling (i.e. we will likely wait
+ * until the background request retirement running every
+ * second or two).
+ */
+ BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
+ dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
+ rq->duration.emitted = ktime_get();
+ }
/* Expose ourselves to the world */
__queue_and_release_pm(rq, ce->timeline, engine);
@@ -183,7 +228,7 @@ static void call_idle_barriers(struct intel_engine_cs *engine)
container_of((struct list_head *)node,
typeof(*cb), node);
- cb->func(NULL, cb);
+ cb->func(ERR_PTR(-EAGAIN), cb);
}
}
@@ -204,7 +249,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (!switch_to_kernel_context(engine))
return -EBUSY;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
call_idle_barriers(engine); /* cleanup after wedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 24e20344dc22..e52c2b0cb245 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,6 +7,7 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
+#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
@@ -41,6 +42,26 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
intel_wakeref_unlock_wait(&engine->wakeref);
}
+static inline struct i915_request *
+intel_engine_create_kernel_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ /*
+ * The engine->kernel_context is special as it is used inside
+ * the engine-pm barrier (see __engine_park()), circumventing
+ * the usual mutexes and relying on the engine-pm barrier
+ * instead. So whenever we use the engine->kernel_context
+ * outside of the barrier, we must manually handle the
+ * engine wakeref to serialise with the use inside.
+ */
+ intel_engine_pm_get(engine);
+ rq = i915_request_create(engine->kernel_context);
+ intel_engine_pm_put(engine);
+
+ return rq;
+}
+
void intel_engine_init__pm(struct intel_engine_cs *engine);
#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 17f1f1441efc..350da59e605b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -7,6 +7,7 @@
#ifndef __INTEL_ENGINE_TYPES__
#define __INTEL_ENGINE_TYPES__
+#include <linux/average.h>
#include <linux/hashtable.h>
#include <linux/irq_work.h>
#include <linux/kref.h>
@@ -119,6 +120,9 @@ enum intel_engine_id {
#define INVALID_ENGINE ((enum intel_engine_id)-1)
};
+/* A simple estimator for the round-trip latency of an engine */
+DECLARE_EWMA(_engine_latency, 6, 4)
+
struct st_preempt_hang {
struct completion completion;
unsigned int count;
@@ -274,8 +278,8 @@ struct intel_engine_cs {
u8 class;
u8 instance;
- u8 uabi_class;
- u8 uabi_instance;
+ u16 uabi_class;
+ u16 uabi_instance;
u32 uabi_capabilities;
u32 context_size;
@@ -316,6 +320,13 @@ struct intel_engine_cs {
struct intel_timeline *timeline;
} legacy;
+ /*
+ * We track the average duration of the idle pulse on parking the
+ * engine to keep an estimate of the how the fast the engine is
+ * under ideal conditions.
+ */
+ struct ewma__engine_latency latency;
+
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -389,7 +400,10 @@ struct intel_engine_cs {
struct {
void (*prepare)(struct intel_engine_cs *engine);
- void (*reset)(struct intel_engine_cs *engine, bool stalled);
+
+ void (*rewind)(struct intel_engine_cs *engine, bool stalled);
+ void (*cancel)(struct intel_engine_cs *engine);
+
void (*finish)(struct intel_engine_cs *engine);
} reset;
@@ -439,15 +453,7 @@ struct intel_engine_cs {
void (*schedule)(struct i915_request *request,
const struct i915_sched_attr *attr);
- /*
- * Cancel all requests on the hardware, or queued for execution.
- * This should only cancel the ready requests that have been
- * submitted to the engine (via the engine->submit_request callback).
- * This is called when marking the device as wedged.
- */
- void (*cancel_requests)(struct intel_engine_cs *engine);
-
- void (*destroy)(struct intel_engine_cs *engine);
+ void (*release)(struct intel_engine_cs *engine);
struct intel_engine_execlists execlists;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 7f7150a733f4..9e7f12bef828 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_engine_user.h"
+#include "intel_gt.h"
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
@@ -200,6 +201,9 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
uabi_node);
char old[sizeof(engine->name)];
+ if (intel_gt_has_init_error(engine->gt))
+ continue; /* ignore incomplete engines */
+
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
new file mode 100644
index 000000000000..531d501be01f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -0,0 +1,1486 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/stop_machine.h>
+
+#include <asm/set_memory.h>
+#include <asm/smp.h>
+
+#include "intel_gt.h"
+#include "i915_drv.h"
+#include "i915_scatterlist.h"
+#include "i915_vgpu.h"
+
+#include "intel_gtt.h"
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma);
+
+static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
+{
+ if (i915_node_color_differs(node, color))
+ *start += I915_GTT_PAGE_SIZE;
+
+ /*
+ * Also leave a space between the unallocated reserved node after the
+ * GTT and any objects within the GTT, i.e. we use the color adjustment
+ * to insert a guard page to prevent prefetches crossing over the
+ * GTT boundary.
+ */
+ node = list_next_entry(node, node_list);
+ if (node->color != color)
+ *end -= I915_GTT_PAGE_SIZE;
+}
+
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+ ggtt->vm.is_ggtt = true;
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
+
+ if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
+ ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
+
+ if (ggtt->mappable_end) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
+ return -EIO;
+ }
+
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+ }
+
+ i915_ggtt_init_fences(ggtt);
+
+ return 0;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @i915: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *i915)
+{
+ int ret;
+
+ stash_init(&i915->mm.wc_stash);
+
+ /*
+ * Note that we use page colouring to enforce a guard page at the
+ * end of the address space. This is required as the CS may prefetch
+ * beyond the end of the batch buffer, across the page boundary,
+ * and beyond the end of the GTT if we do not provide a guard.
+ */
+ ret = ggtt_init_hw(&i915->ggtt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+ /*
+ * Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+}
+
+static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ /*
+ * Don't bother messing with faults pre GEN6 as we have little
+ * documentation supporting that it's a good idea.
+ */
+ if (INTEL_GEN(i915) < 6)
+ return;
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+
+ ggtt->invalidate(ggtt);
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
+{
+ ggtt_suspend_mappings(&i915->ggtt);
+}
+
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
+ spin_unlock_irq(&uncore->lock);
+}
+
+static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+ /*
+ * Note that as an uncached mmio write, this will flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
+ */
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+}
+
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ gen8_ggtt_invalidate(ggtt);
+
+ if (INTEL_GEN(i915) >= 12)
+ intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ else
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ intel_gtt_chipset_flush();
+}
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen8_pte_t __iomem *gtt_entries;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+ dma_addr_t addr;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
+ gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+ gen8_set_pte(gtt_entries++, pte_encode | addr);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+ ggtt->invalidate(ggtt);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
+ unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ for_each_sgt_daddr(addr, iter, vma->pages)
+ iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0].encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+ enum i915_cache_level level;
+ u32 flags;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct insert_entries arg = { vm, vma, level, flags };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+ struct i915_address_space *vm;
+ u64 start;
+ u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+ struct clear_range *arg = _arg;
+
+ gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+ u64 start,
+ u64 length)
+{
+ struct clear_range arg = { vm, start, length };
+
+ stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->scratch[0].encode;
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+}
+
+static void i915_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
+ flags);
+}
+
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static int ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ u32 pte_flags;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+
+ return 0;
+}
+
+static void ggtt_unbind_vma(struct i915_vma *vma)
+{
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+}
+
+static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+{
+ u64 size;
+ int ret;
+
+ if (!USES_GUC(ggtt->vm.i915))
+ return 0;
+
+ GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
+ size = ggtt->vm.total - GUC_GGTT_TOP;
+
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+ GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+ PIN_NOEVICT);
+ if (ret)
+ DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+
+ return ret;
+}
+
+static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
+{
+ if (drm_mm_node_allocated(&ggtt->uc_fw))
+ drm_mm_remove_node(&ggtt->uc_fw);
+}
+
+static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
+{
+ ggtt_release_guc_top(ggtt);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+ mutex_destroy(&ggtt->error_mutex);
+}
+
+static int init_ggtt(struct i915_ggtt *ggtt)
+{
+ /*
+ * Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
+ unsigned long hole_start, hole_end;
+ struct drm_mm_node *entry;
+ int ret;
+
+ /*
+ * GuC requires all resources that we're sharing with it to be placed in
+ * non-WOPCM memory. If GuC is not present or not in use we still need a
+ * small bias as ring wraparound at offset 0 sometimes hangs. No idea
+ * why.
+ */
+ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
+ intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
+
+ ret = intel_vgt_balloon(ggtt);
+ if (ret)
+ return ret;
+
+ mutex_init(&ggtt->error_mutex);
+ if (ggtt->mappable_end) {
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ PAGE_SIZE, 0,
+ I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * The upper portion of the GuC address space has a sizeable hole
+ * (several MB) that is inaccessible by GuC. Reserve this range within
+ * GGTT as it can comfortably hold GuC/HuC firmware images.
+ */
+ ret = ggtt_reserve_guc_top(ggtt);
+ if (ret)
+ goto err;
+
+ /* Clear any non-preallocated blocks */
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
+ DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
+ }
+
+ /* And finally clear the reserved guard page */
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
+
+ return 0;
+
+err:
+ cleanup_init_ggtt(ggtt);
+ return ret;
+}
+
+static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+ int ret;
+
+ /* Currently applicable only to VLV */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(vma->obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ if (flags & I915_VMA_LOCAL_BIND) {
+ struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
+
+ if (flags & I915_VMA_ALLOC) {
+ ret = alias->vm.allocate_va_range(&alias->vm,
+ vma->node.start,
+ vma->size);
+ if (ret)
+ return ret;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+ }
+
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
+ __i915_vma_flags(vma)));
+ alias->vm.insert_entries(&alias->vm, vma,
+ cache_level, pte_flags);
+ }
+
+ if (flags & I915_VMA_GLOBAL_BIND)
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ return 0;
+}
+
+static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
+{
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ struct i915_address_space *vm = vma->vm;
+
+ vm->clear_range(vm, vma->node.start, vma->size);
+ }
+
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+ struct i915_address_space *vm =
+ &i915_vm_to_ggtt(vma->vm)->alias->vm;
+
+ vm->clear_range(vm, vma->node.start, vma->size);
+ }
+}
+
+static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = i915_ppgtt_create(ggtt->vm.gt);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
+ err = -ENODEV;
+ goto err_ppgtt;
+ }
+
+ /*
+ * Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
+
+ ggtt->alias = ppgtt;
+ ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
+
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
+
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
+
+ return 0;
+
+err_ppgtt:
+ i915_vm_put(&ppgtt->vm);
+ return err;
+}
+
+static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = fetch_and_zero(&ggtt->alias);
+ if (!ppgtt)
+ return;
+
+ i915_vm_put(&ppgtt->vm);
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+}
+
+int i915_init_ggtt(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = init_ggtt(&i915->ggtt);
+ if (ret)
+ return ret;
+
+ if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+ ret = init_aliasing_ppgtt(&i915->ggtt);
+ if (ret)
+ cleanup_init_ggtt(&i915->ggtt);
+ }
+
+ return 0;
+}
+
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
+{
+ struct i915_vma *vma, *vn;
+
+ atomic_set(&ggtt->vm.open, 0);
+
+ rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
+ flush_workqueue(ggtt->vm.i915->wq);
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
+ WARN_ON(__i915_vma_unbind(vma));
+
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+ mutex_destroy(&ggtt->error_mutex);
+
+ ggtt_release_guc_top(ggtt);
+ intel_vgt_deballoon(ggtt);
+
+ ggtt->vm.cleanup(&ggtt->vm);
+
+ mutex_unlock(&ggtt->vm.mutex);
+ i915_address_space_fini(&ggtt->vm);
+
+ arch_phys_wc_del(ggtt->mtrr);
+
+ if (ggtt->iomap.size)
+ io_mapping_fini(&ggtt->iomap);
+}
+
+/**
+ * i915_ggtt_driver_release - Clean up GGTT hardware initialization
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_release(struct drm_i915_private *i915)
+{
+ struct pagevec *pvec;
+
+ fini_aliasing_ppgtt(&i915->ggtt);
+
+ ggtt_cleanup_hw(&i915->ggtt);
+
+ pvec = &i915->mm.wc_stash.pvec;
+ if (pvec->nr) {
+ set_pages_array_wb(pvec->pages, pvec->nr);
+ __pagevec_release(pvec);
+ }
+}
+
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ phys_addr_t phys_addr;
+ int ret;
+
+ /* For Modern GENs the PTEs and register space are split in the BAR */
+ phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
+
+ /*
+ * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ ggtt->gsm = ioremap(phys_addr, size);
+ else
+ ggtt->gsm = ioremap_wc(phys_addr, size);
+ if (!ggtt->gsm) {
+ DRM_ERROR("Failed to map the ggtt page table\n");
+ return -ENOMEM;
+ }
+
+ ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(ggtt->gsm);
+ return ret;
+ }
+
+ ggtt->vm.scratch[0].encode =
+ ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
+ I915_CACHE_NONE, 0);
+
+ return 0;
+}
+
+int ggtt_set_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ GEM_BUG_ON(vma->pages);
+
+ ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ cleanup_scratch_page(vm);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
+{
+ return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ unsigned int size;
+ u16 snb_gmch_ctl;
+ int err;
+
+ /* TODO: We're not aware of mappable constraints on gen8 yet */
+ if (!IS_DGFX(i915)) {
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (err)
+ DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (IS_CHERRYVIEW(i915))
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
+ else
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+ /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+ if (intel_ggtt_update_needs_vtd_wa(i915) ||
+ IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->vm.clear_range != nop_clear_range)
+ ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ }
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ ggtt->vm.pte_encode = gen8_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ unsigned int size;
+ u16 snb_gmch_ctl;
+ int err;
+
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+ /*
+ * 64/512MB is the current min/max we actually know of, but this is
+ * just a coarse sanity check.
+ */
+ if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
+ DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
+ return -ENXIO;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (err)
+ DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
+ if (HAS_EDRAM(i915))
+ ggtt->vm.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(i915))
+ ggtt->vm.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(i915))
+ ggtt->vm.pte_encode = byt_pte_encode;
+ else if (INTEL_GEN(i915) >= 7)
+ ggtt->vm.pte_encode = ivb_pte_encode;
+ else
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static void i915_gmch_remove(struct i915_address_space *vm)
+{
+ intel_gmch_remove();
+}
+
+static int i915_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ phys_addr_t gmadr_base;
+ int ret;
+
+ ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+ ggtt->gmadr =
+ (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+ ggtt->do_idle_maps = needs_idle_maps(i915);
+ ggtt->vm.insert_page = i915_ggtt_insert_page;
+ ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+ ggtt->vm.clear_range = i915_ggtt_clear_range;
+ ggtt->vm.cleanup = i915_gmch_remove;
+
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ if (unlikely(ggtt->do_idle_maps))
+ dev_notice(i915->drm.dev,
+ "Applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ int ret;
+
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = i915;
+ ggtt->vm.dma = &i915->drm.pdev->dev;
+
+ if (INTEL_GEN(i915) <= 5)
+ ret = i915_gmch_probe(ggtt);
+ else if (INTEL_GEN(i915) < 8)
+ ret = gen6_gmch_probe(ggtt);
+ else
+ ret = gen8_gmch_probe(ggtt);
+ if (ret)
+ return ret;
+
+ if ((ggtt->vm.total - 1) >> 32) {
+ DRM_ERROR("We never expected a Global GTT with more than 32bits"
+ " of address space! Found %lldM!\n",
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
+ }
+
+ if (ggtt->mappable_end > ggtt->vm.total) {
+ DRM_ERROR("mappable aperture extends past end of GGTT,"
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
+ }
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+
+ return 0;
+}
+
+/**
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @i915: i915 device
+ */
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ if (ret)
+ return ret;
+
+ if (intel_vtd_active())
+ dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+
+ return 0;
+}
+
+int i915_ggtt_enable_hw(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
+{
+ GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
+
+ ggtt->invalidate = guc_ggtt_invalidate;
+
+ ggtt->invalidate(ggtt);
+}
+
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
+{
+ /* XXX Temporary pardon for error unload */
+ if (ggtt->invalidate == gen8_ggtt_invalidate)
+ return;
+
+ /* We should only be called after i915_ggtt_enable_guc() */
+ GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->invalidate(ggtt);
+}
+
+static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
+{
+ struct i915_vma *vma;
+ bool flush = false;
+ int open;
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ /* First fill our portion of the GTT with scratch pages */
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
+
+ /* clflush objects bound into the GGTT and rebind them. */
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ continue;
+
+ clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
+ WARN_ON(i915_vma_bind(vma,
+ obj ? obj->cache_level : 0,
+ PIN_GLOBAL, NULL));
+ if (obj) { /* only used during resume => exclusive access */
+ flush |= fetch_and_zero(&obj->write_domain);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ }
+ }
+
+ atomic_set(&ggtt->vm.open, open);
+ ggtt->invalidate(ggtt);
+
+ mutex_unlock(&ggtt->vm.mutex);
+
+ if (flush)
+ wbinvd_on_all_cpus();
+}
+
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ ggtt_restore_mappings(ggtt);
+
+ if (INTEL_GEN(i915) >= 8)
+ setup_private_pat(ggtt->vm.gt->uncore);
+}
+
+static struct scatterlist *
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int column, row;
+ unsigned int src_idx;
+
+ for (column = 0; column < width; column++) {
+ src_idx = stride * (height - 1) + column + offset;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
+ sg_dma_address(sg) =
+ i915_gem_object_get_dma_address(obj, src_idx);
+ sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= stride;
+ }
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_rotate_pages(struct intel_rotation_info *rot_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_rotation_info_size(rot_info);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+ sg = rotate_pages(obj, rot_info->plane[i].offset,
+ rot_info->plane[i].width, rot_info->plane[i].height,
+ rot_info->plane[i].stride, st, sg);
+ }
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static struct scatterlist *
+remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int row;
+
+ for (row = 0; row < height; row++) {
+ unsigned int left = width * I915_GTT_PAGE_SIZE;
+
+ while (left) {
+ dma_addr_t addr;
+ unsigned int length;
+
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+
+ addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
+
+ length = min(left, length);
+
+ st->nents++;
+
+ sg_set_page(sg, NULL, length, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = length;
+ sg = sg_next(sg);
+
+ offset += length / I915_GTT_PAGE_SIZE;
+ left -= length;
+ }
+
+ offset += stride - width;
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_remap_pages(struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_remapped_info_size(rem_info);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ sg = remap_pages(obj, rem_info->plane[i].offset,
+ rem_info->plane[i].width, rem_info->plane[i].height,
+ rem_info->plane[i].stride, st, sg);
+ }
+
+ i915_sg_trim(st);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static noinline struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+ struct drm_i915_gem_object *obj)
+{
+ struct sg_table *st;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->partial.size;
+ unsigned int offset;
+ int ret = -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
+ GEM_BUG_ON(!iter);
+
+ sg = st->sgl;
+ st->nents = 0;
+ do {
+ unsigned int len;
+
+ len = min(iter->length - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
+
+ st->nents++;
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
+
+ return st;
+ }
+
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ return ERR_PTR(ret);
+}
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ /*
+ * The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
+ switch (vma->ggtt_view.type) {
+ default:
+ GEM_BUG_ON(vma->ggtt_view.type);
+ /* fall through */
+ case I915_GGTT_VIEW_NORMAL:
+ vma->pages = vma->obj->mm.pages;
+ return 0;
+
+ case I915_GGTT_VIEW_ROTATED:
+ vma->pages =
+ intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_REMAPPED:
+ vma->pages =
+ intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_PARTIAL:
+ vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+ break;
+ }
+
+ ret = 0;
+ if (IS_ERR(vma->pages)) {
+ ret = PTR_ERR(vma->pages);
+ vma->pages = NULL;
+ DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 4294f146f13c..51b8718513bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -7,6 +7,8 @@
#ifndef _INTEL_GPU_COMMANDS_H_
#define _INTEL_GPU_COMMANDS_H_
+#include <linux/bitops.h>
+
/*
* Target address alignments required for GPU access e.g.
* MI_STORE_DWORD_IMM.
@@ -319,4 +321,31 @@
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
+/*
+ * Used to convert any address to canonical form.
+ * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
+ * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
+ * addresses to be in a canonical form:
+ * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
+ * canonical form [63:48] == [47]."
+ */
+#define GEN8_HIGH_ADDRESS_BIT 47
+static inline u64 gen8_canonical_addr(u64 address)
+{
+ return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
+}
+
+static inline u64 gen8_noncanonical_addr(u64 address)
+{
+ return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
+}
+
+static inline u32 *__gen6_emit_bb_start(u32 *cs, u32 addr, unsigned int flags)
+{
+ *cs++ = MI_BATCH_BUFFER_START | flags;
+ *cs++ = addr;
+
+ return cs;
+}
+
#endif /* _INTEL_GPU_COMMANDS_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 4c26daf7ee46..da2b6e2ae692 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,12 +3,15 @@
* Copyright © 2019 Intel Corporation
*/
+#include "debugfs_gt.h"
#include "i915_drv.h"
+#include "intel_context.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_mocs.h"
#include "intel_rc6.h"
+#include "intel_renderstate.h"
#include "intel_rps.h"
#include "intel_uncore.h"
#include "intel_pm.h"
@@ -25,6 +28,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
+ intel_gt_init_timelines(gt);
intel_gt_pm_init_early(gt);
intel_rps_init_early(&gt->rps);
@@ -34,8 +38,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
{
gt->ggtt = ggtt;
-
- intel_gt_sanitize(gt, false);
}
static void init_unused_ring(struct intel_gt *gt, u32 base)
@@ -73,11 +75,6 @@ int intel_gt_init_hw(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
int ret;
- BUG_ON(!i915->kernel_context);
- ret = intel_gt_terminally_wedged(gt);
- if (ret)
- return ret;
-
gt->last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
@@ -303,7 +300,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
intel_gt_chipset_flush(gt);
- with_intel_runtime_pm(uncore->rpm, wakeref) {
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
unsigned long flags;
spin_lock_irqsave(&uncore->lock, flags);
@@ -323,6 +320,8 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
void intel_gt_driver_register(struct intel_gt *gt)
{
intel_rps_driver_register(&gt->rps);
+
+ debugfs_gt_register(gt);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
@@ -364,22 +363,272 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
i915_vma_unpin_and_release(&gt->scratch, 0);
}
+static struct i915_address_space *kernel_vm(struct intel_gt *gt)
+{
+ if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
+ return &i915_ppgtt_create(gt)->vm;
+ else
+ return i915_vm_get(&gt->ggtt->vm);
+}
+
+static int __intel_context_flush_retire(struct intel_context *ce)
+{
+ struct intel_timeline *tl;
+
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ intel_context_timeline_unlock(tl);
+ return 0;
+}
+
+static int __engines_record_defaults(struct intel_gt *gt)
+{
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * As we reset the gpu during very early sanitisation, the current
+ * register state on the GPU should reflect its defaults values.
+ * We load a context onto the hw (with restore-inhibit), then switch
+ * over to a second context to save that default register state. We
+ * can then prime every new context with that state so they all start
+ * from the same default HW values.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_renderstate so;
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ /* We must be able to switch to something! */
+ GEM_BUG_ON(!engine->kernel_context);
+
+ err = intel_renderstate_init(&so, engine);
+ if (err)
+ goto out;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ intel_context_put(ce);
+ goto out;
+ }
+
+ err = intel_engine_emit_ctx_wa(rq);
+ if (err)
+ goto err_rq;
+
+ err = intel_renderstate_emit(&so, rq);
+ if (err)
+ goto err_rq;
+
+err_rq:
+ requests[id] = i915_request_get(rq);
+ i915_request_add(rq);
+ intel_renderstate_fini(&so);
+ if (err)
+ goto out;
+ }
+
+ /* Flush the default context image to memory, and enable powersaving. */
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct i915_request *rq;
+ struct i915_vma *state;
+ void *vaddr;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
+ state = rq->context->state;
+ if (!state)
+ continue;
+
+ /* Serialise with retirement on another CPU */
+ GEM_BUG_ON(!i915_request_completed(rq));
+ err = __intel_context_flush_retire(rq->context);
+ if (err)
+ goto out;
+
+ /* We want to be able to unbind the state from the GGTT */
+ GEM_BUG_ON(intel_context_is_pinned(rq->context));
+
+ /*
+ * As we will hold a reference to the logical state, it will
+ * not be torn down with the context, and importantly the
+ * object will hold onto its vma (making it possible for a
+ * stray GTT write to corrupt our defaults). Unmap the vma
+ * from the GTT to prevent such accidents and reclaim the
+ * space.
+ */
+ err = i915_vma_unbind(state);
+ if (err)
+ goto out;
+
+ i915_gem_object_lock(state->obj);
+ err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+ i915_gem_object_unlock(state->obj);
+ if (err)
+ goto out;
+
+ i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
+
+ /* Check we can acquire the image of the context state */
+ vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out;
+ }
+
+ rq->engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_unpin_map(state->obj);
+ }
+
+out:
+ /*
+ * If we have to abandon now, we expect the engines to be idle
+ * and ready to be torn-down. The quickest way we can accomplish
+ * this is by declaring ourselves wedged.
+ */
+ if (err)
+ intel_gt_set_wedged(gt);
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ ce = rq->context;
+ i915_request_put(rq);
+ intel_context_put(ce);
+ }
+ return err;
+}
+
+static int __engines_verify_workarounds(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_verify_workarounds(engine, "load"))
+ err = -EIO;
+ }
+
+ return err;
+}
+
+static void __intel_gt_disable(struct intel_gt *gt)
+{
+ intel_gt_set_wedged_on_init(gt);
+
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+}
+
int intel_gt_init(struct intel_gt *gt)
{
int err;
- err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ err = i915_inject_probe_error(gt->i915, -ENODEV);
if (err)
return err;
+ /*
+ * This is just a security blanket to placate dragons.
+ * On some systems, we very sporadically observe that the first TLBs
+ * used by the CS may be stale, despite us poking the TLB reset. If
+ * we hold the forcewake during initialisation these problems
+ * just magically go away.
+ */
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ if (err)
+ goto out_fw;
+
intel_gt_pm_init(gt);
- return 0;
+ gt->vm = kernel_vm(gt);
+ if (!gt->vm) {
+ err = -ENOMEM;
+ goto err_pm;
+ }
+
+ err = intel_engines_init(gt);
+ if (err)
+ goto err_engines;
+
+ intel_uc_init(&gt->uc);
+
+ err = intel_gt_resume(gt);
+ if (err)
+ goto err_uc_init;
+
+ err = __engines_record_defaults(gt);
+ if (err)
+ goto err_gt;
+
+ err = __engines_verify_workarounds(gt);
+ if (err)
+ goto err_gt;
+
+ err = i915_inject_probe_error(gt->i915, -EIO);
+ if (err)
+ goto err_gt;
+
+ goto out_fw;
+err_gt:
+ __intel_gt_disable(gt);
+ intel_uc_fini_hw(&gt->uc);
+err_uc_init:
+ intel_uc_fini(&gt->uc);
+err_engines:
+ intel_engines_release(gt);
+ i915_vm_put(fetch_and_zero(&gt->vm));
+err_pm:
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+out_fw:
+ if (err)
+ intel_gt_set_wedged_on_init(gt);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ return err;
}
void intel_gt_driver_remove(struct intel_gt *gt)
{
- GEM_BUG_ON(gt->awake);
+ __intel_gt_disable(gt);
+
+ intel_uc_fini_hw(&gt->uc);
+ intel_uc_fini(&gt->uc);
+
+ intel_engines_release(gt);
}
void intel_gt_driver_unregister(struct intel_gt *gt)
@@ -389,6 +638,12 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
void intel_gt_driver_release(struct intel_gt *gt)
{
+ struct i915_address_space *vm;
+
+ vm = fetch_and_zero(&gt->vm);
+ if (vm) /* FIXME being called twice on error paths :( */
+ i915_vm_put(vm);
+
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
}
@@ -396,5 +651,8 @@ void intel_gt_driver_release(struct intel_gt *gt)
void intel_gt_driver_late_release(struct intel_gt *gt)
{
intel_uc_driver_late_release(&gt->uc);
+ intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
+ intel_gt_fini_timelines(gt);
+ intel_engines_free(gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 5436f8c30708..1dac441cb8f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -12,6 +12,12 @@
struct drm_i915_private;
+#define GT_TRACE(gt, fmt, ...) do { \
+ const struct intel_gt *gt__ __maybe_unused = (gt); \
+ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
+ ##__VA_ARGS__); \
+} while (0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -52,9 +58,14 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
return i915_ggtt_offset(gt->scratch) + field;
}
-static inline bool intel_gt_is_wedged(struct intel_gt *gt)
+static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
{
return __intel_reset_failed(&gt->reset);
}
+static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
+{
+ return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+}
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 973ee7eded64..f796bdf1ed30 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -28,7 +28,7 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
- intel_engine_breadcrumbs_irq(engine);
+ intel_engine_signal_breadcrumbs(engine);
tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
}
@@ -245,9 +245,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
}
static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
@@ -271,11 +271,11 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 7e64b7d7d330..d1c2f034296a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -43,7 +43,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
i915_globals_unpark();
@@ -61,9 +61,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake);
- if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
+ intel_rc6_unpark(&gt->rc6);
intel_rps_unpark(&gt->rps);
i915_pmu_gt_unparked(i915);
@@ -78,22 +76,18 @@ static int __gt_park(struct intel_wakeref *wf)
intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
struct drm_i915_private *i915 = gt->i915;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
intel_gt_park_requests(gt);
i915_vma_parked(gt);
i915_pmu_gt_parked(i915);
intel_rps_park(&gt->rps);
+ intel_rc6_park(&gt->rc6);
/* Everything switched off, flush any residual interrupt just in case */
intel_synchronize_irq(i915);
- if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
- intel_rc6_ctx_wa_check(&i915->gt.rc6);
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- }
-
/* Defer dropping the display power well for 100ms, it's slow! */
GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
@@ -132,23 +126,13 @@ static bool reset_engines(struct intel_gt *gt)
return __intel_gt_reset(gt, ALL_ENGINES) == 0;
}
-/**
- * intel_gt_sanitize: called after the GPU has lost power
- * @gt: the i915 GT container
- * @force: ignore a failed reset and sanitize engine state anyway
- *
- * Anytime we reset the GPU, either with an explicit GPU reset or through a
- * PCI power cycle, the GPU loses state and we must reset our state tracking
- * to match. Note that calling intel_gt_sanitize() if the GPU has not
- * been reset results in much confusion!
- */
-void intel_gt_sanitize(struct intel_gt *gt, bool force)
+static void gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GEM_TRACE("force:%s\n", yesno(force));
+ GT_TRACE(gt, "force:%s", yesno(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -193,9 +177,13 @@ int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err = 0;
+ int err;
- GEM_TRACE("\n");
+ err = intel_gt_has_init_error(gt);
+ if (err)
+ return err;
+
+ GT_TRACE(gt, "\n");
/*
* After resume, we may need to poke into the pinned kernel
@@ -207,21 +195,26 @@ int intel_gt_resume(struct intel_gt *gt)
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
+ gt_sanitize(gt, true);
+ if (intel_gt_is_wedged(gt)) {
+ err = -EIO;
+ goto out_fw;
+ }
+
+ /* Only when the HW is re-initialised, can we replay the requests */
+ err = intel_gt_init_hw(gt);
+ if (err) {
+ dev_err(gt->i915->drm.dev,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ goto err_wedged;
+ }
intel_rps_enable(&gt->rps);
intel_llc_enable(&gt->llc);
for_each_engine(engine, gt, id) {
- struct intel_context *ce;
-
intel_engine_pm_get(engine);
- ce = engine->kernel_context;
- if (ce) {
- GEM_BUG_ON(!intel_context_is_pinned(ce));
- ce->ops->reset(ce);
- }
-
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
@@ -230,7 +223,7 @@ int intel_gt_resume(struct intel_gt *gt)
dev_err(gt->i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
- break;
+ goto err_wedged;
}
}
@@ -240,10 +233,14 @@ int intel_gt_resume(struct intel_gt *gt)
user_forcewake(gt, false);
+out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
-
return err;
+
+err_wedged:
+ intel_gt_set_wedged(gt);
+ goto out_fw;
}
static void wait_for_suspend(struct intel_gt *gt)
@@ -257,6 +254,7 @@ static void wait_for_suspend(struct intel_gt *gt)
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
+ intel_gt_retire_requests(gt);
}
intel_gt_pm_wait_for_idle(gt);
@@ -286,6 +284,11 @@ void intel_gt_suspend_late(struct intel_gt *gt)
/* We expect to be idle already; but also want to be independent */
wait_for_suspend(gt);
+ if (is_mock_gt(gt))
+ return;
+
+ GEM_BUG_ON(gt->awake);
+
/*
* On disabling the device, we want to turn off HW access to memory
* that we no longer own.
@@ -305,22 +308,21 @@ void intel_gt_suspend_late(struct intel_gt *gt)
intel_llc_disable(&gt->llc);
}
- intel_gt_sanitize(gt, false);
+ gt_sanitize(gt, false);
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
}
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_uc_runtime_suspend(&gt->uc);
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
}
int intel_gt_runtime_resume(struct intel_gt *gt)
{
- GEM_TRACE("\n");
-
+ GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
return intel_uc_runtime_resume(&gt->uc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 990efc27a4e4..60f0e2fbe55c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -22,6 +22,11 @@ static inline void intel_gt_pm_get(struct intel_gt *gt)
intel_wakeref_get(&gt->wakeref);
}
+static inline void __intel_gt_pm_get(struct intel_gt *gt)
+{
+ __intel_wakeref_get(&gt->wakeref);
+}
+
static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
return intel_wakeref_get_if_active(&gt->wakeref);
@@ -46,8 +51,6 @@ void intel_gt_pm_init_early(struct intel_gt *gt);
void intel_gt_pm_init(struct intel_gt *gt);
void intel_gt_pm_fini(struct intel_gt *gt);
-void intel_gt_sanitize(struct intel_gt *gt, bool force);
-
void intel_gt_suspend_prepare(struct intel_gt *gt);
void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 3dc13ecf41bf..7ef1d37970f6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -8,27 +8,40 @@
#include "i915_drv.h" /* for_each_engine() */
#include "i915_request.h"
+#include "intel_engine_heartbeat.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_timeline.h"
-static void retire_requests(struct intel_timeline *tl)
+static bool retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
- break;
+ return false;
+
+ /* And check nothing new was submitted */
+ return !i915_active_fence_isset(&tl->last_request);
}
-static void flush_submission(struct intel_gt *gt)
+static bool flush_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ bool active = false;
+
+ if (!intel_gt_pm_is_awake(gt))
+ return false;
- for_each_engine(engine, gt, id)
+ for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine);
+ active |= flush_work(&engine->retire_work);
+ active |= flush_work(&engine->wakeref.work);
+ }
+
+ return active;
}
static void engine_retire(struct work_struct *work)
@@ -62,19 +75,16 @@ static void engine_retire(struct work_struct *work)
static bool add_retire(struct intel_engine_cs *engine,
struct intel_timeline *tl)
{
+#define STUB ((struct intel_timeline *)1)
struct intel_timeline *first;
/*
* We open-code a llist here to include the additional tag [BIT(0)]
* so that we know when the timeline is already on a
* retirement queue: either this engine or another.
- *
- * However, we rely on that a timeline can only be active on a single
- * engine at any one time and that add_retire() is called before the
- * engine releases the timeline and transferred to another to retire.
*/
- if (READ_ONCE(tl->retire)) /* already queued */
+ if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
return false;
intel_timeline_get(tl);
@@ -109,7 +119,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
- unsigned long flags;
bool interruptible;
LIST_HEAD(free);
@@ -118,8 +127,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
timeout = -timeout, interruptible = false;
flush_submission(gt); /* kick the ksoftirqd tasklets */
-
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
active_count++; /* report busy to caller, try again? */
@@ -129,7 +137,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
intel_timeline_get(tl);
GEM_BUG_ON(!atomic_read(&tl->active_count));
atomic_inc(&tl->active_count); /* pin the list element */
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
if (timeout > 0) {
struct dma_fence *fence;
@@ -143,16 +151,15 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
}
}
- retire_requests(tl);
+ if (!retire_requests(tl) || flush_submission(gt))
+ active_count++;
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
- else
- active_count += !!rcu_access_pointer(tl->last_request.fence);
mutex_unlock(&tl->mutex);
@@ -162,7 +169,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_add(&tl->link, &free);
}
}
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
@@ -190,9 +197,9 @@ static void retire_work_handler(struct work_struct *work)
struct intel_gt *gt =
container_of(work, typeof(*gt), requests.retire_work.work);
- intel_gt_retire_requests(gt);
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
+ intel_gt_retire_requests(gt);
}
void intel_gt_init_requests(struct intel_gt *gt)
@@ -210,3 +217,9 @@ void intel_gt_unpark_requests(struct intel_gt *gt)
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
}
+
+void intel_gt_fini_requests(struct intel_gt *gt)
+{
+ /* Wait until the work is marked as finished before unloading! */
+ cancel_delayed_work_sync(&gt->requests.retire_work);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index d626fb115386..dbac53baf1cb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -27,5 +27,6 @@ int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
void intel_gt_init_requests(struct intel_gt *gt);
void intel_gt_park_requests(struct intel_gt *gt);
void intel_gt_unpark_requests(struct intel_gt *gt);
+void intel_gt_fini_requests(struct intel_gt *gt);
#endif /* INTEL_GT_REQUESTS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index d4e14dbd172e..96890dd12b5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -90,6 +90,13 @@ struct intel_gt {
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
+
+ /*
+ * Default address space (either GGTT or ppGTT depending on arch).
+ *
+ * Reserved for exclusive use by the kernel.
+ */
+ struct i915_address_space *vm;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
new file mode 100644
index 000000000000..16acdc5d6734
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h> /* fault-inject.h is not standalone! */
+
+#include <linux/fault-inject.h>
+
+#include "i915_trace.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+void stash_init(struct pagestash *stash)
+{
+ pagevec_init(&stash->pvec);
+ spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+ struct page *page = NULL;
+
+ spin_lock(&stash->lock);
+ if (likely(stash->pvec.nr))
+ page = stash->pvec.pages[--stash->pvec.nr];
+ spin_unlock(&stash->lock);
+
+ return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+ unsigned int nr;
+
+ spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+ nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
+ memcpy(stash->pvec.pages + stash->pvec.nr,
+ pvec->pages + pvec->nr - nr,
+ sizeof(pvec->pages[0]) * nr);
+ stash->pvec.nr += nr;
+
+ spin_unlock(&stash->lock);
+
+ pvec->nr -= nr;
+}
+
+static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
+{
+ struct pagevec stack;
+ struct page *page;
+
+ if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
+ i915_gem_shrink_all(vm->i915);
+
+ page = stash_pop_page(&vm->free_pages);
+ if (page)
+ return page;
+
+ if (!vm->pt_kmap_wc)
+ return alloc_page(gfp);
+
+ /* Look in our global stash of WC pages... */
+ page = stash_pop_page(&vm->i915->mm.wc_stash);
+ if (page)
+ return page;
+
+ /*
+ * Otherwise batch allocate pages to amortize cost of set_pages_wc.
+ *
+ * We have to be careful as page allocation may trigger the shrinker
+ * (via direct reclaim) which will fill up the WC stash underneath us.
+ * So we add our WB pages into a temporary pvec on the stack and merge
+ * them into the WC stash after all the allocations are complete.
+ */
+ pagevec_init(&stack);
+ do {
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (unlikely(!page))
+ break;
+
+ stack.pages[stack.nr++] = page;
+ } while (pagevec_space(&stack));
+
+ if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+ page = stack.pages[--stack.nr];
+
+ /* Merge spare WC pages to the global stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
+
+ /* Push any surplus WC pages onto the local VM stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->free_pages, &stack);
+ }
+
+ /* Return unwanted leftovers */
+ if (unlikely(stack.nr)) {
+ WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+ __pagevec_release(&stack);
+ }
+
+ return page;
+}
+
+static void vm_free_pages_release(struct i915_address_space *vm,
+ bool immediate)
+{
+ struct pagevec *pvec = &vm->free_pages.pvec;
+ struct pagevec stack;
+
+ lockdep_assert_held(&vm->free_pages.lock);
+ GEM_BUG_ON(!pagevec_count(pvec));
+
+ if (vm->pt_kmap_wc) {
+ /*
+ * When we use WC, first fill up the global stash and then
+ * only if full immediately free the overflow.
+ */
+ stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
+
+ /*
+ * As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+ return;
+
+ /*
+ * We have to drop the lock to allow ourselves to sleep,
+ * so take a copy of the pvec and clear the stash for
+ * others to use it as we sleep.
+ */
+ stack = *pvec;
+ pagevec_reinit(pvec);
+ spin_unlock(&vm->free_pages.lock);
+
+ pvec = &stack;
+ set_pages_array_wb(pvec->pages, pvec->nr);
+
+ spin_lock(&vm->free_pages.lock);
+ }
+
+ __pagevec_release(pvec);
+}
+
+static void vm_free_page(struct i915_address_space *vm, struct page *page)
+{
+ /*
+ * On !llc, we need to change the pages back to WB. We only do so
+ * in bulk, so we rarely need to change the page attributes here,
+ * but doing so requires a stop_machine() from deep inside arch/x86/mm.
+ * To make detection of the possible sleep more likely, use an
+ * unconditional might_sleep() for everybody.
+ */
+ might_sleep();
+ spin_lock(&vm->free_pages.lock);
+ while (!pagevec_space(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, false);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
+ pagevec_add(&vm->free_pages.pvec, page);
+ spin_unlock(&vm->free_pages.lock);
+}
+
+void __i915_vm_close(struct i915_address_space *vm)
+{
+ struct i915_vma *vma, *vn;
+
+ mutex_lock(&vm->mutex);
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ /* Keep the obj (and hence the vma) alive as _we_ destroy it */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ __i915_vma_put(vma);
+
+ i915_gem_object_put(obj);
+ }
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ mutex_unlock(&vm->mutex);
+}
+
+void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
+}
+
+static void __i915_vm_release(struct work_struct *work)
+{
+ struct i915_address_space *vm =
+ container_of(work, struct i915_address_space, rcu.work);
+
+ vm->cleanup(vm);
+ i915_address_space_fini(vm);
+
+ kfree(vm);
+}
+
+void i915_vm_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, struct i915_address_space, ref);
+
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ trace_i915_ppgtt_release(vm);
+
+ queue_rcu_work(vm->i915->wq, &vm->rcu);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass)
+{
+ kref_init(&vm->ref);
+ INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+ atomic_set(&vm->open, 1);
+
+ /*
+ * The vm->mutex must be reclaim safe (for use in the shrinker).
+ * Do a dummy acquire now under fs_reclaim so that any allocation
+ * attempt holding the lock is immediately reported by lockdep.
+ */
+ mutex_init(&vm->mutex);
+ lockdep_set_subclass(&vm->mutex, subclass);
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+
+ GEM_BUG_ON(!vm->total);
+ drm_mm_init(&vm->mm, 0, vm->total);
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+ stash_init(&vm->free_pages);
+
+ INIT_LIST_HEAD(&vm->bound_list);
+}
+
+void clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ if (vma->pages != vma->obj->mm.pages) {
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
+}
+
+static int __setup_page_dma(struct i915_address_space *vm,
+ struct i915_page_dma *p,
+ gfp_t gfp)
+{
+ p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
+ if (unlikely(!p->page))
+ return -ENOMEM;
+
+ p->daddr = dma_map_page_attrs(vm->dma,
+ p->page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
+ if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
+ vm_free_page(vm, p->page);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+{
+ return __setup_page_dma(vm, p, __GFP_HIGHMEM);
+}
+
+void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+{
+ dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ vm_free_page(vm, p->page);
+}
+
+void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
+{
+ kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
+}
+
+int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
+{
+ unsigned long size;
+
+ /*
+ * In order to utilize 64K pages for an object with a size < 2M, we will
+ * need to support a 64K scratch page, given that every 16th entry for a
+ * page-table operating in 64K mode must point to a properly aligned 64K
+ * region, including any PTEs which happen to point to scratch.
+ *
+ * This is only relevant for the 48b PPGTT where we support
+ * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+ * scratch (read-only) between all vm, we create one 64k scratch page
+ * for all.
+ */
+ size = I915_GTT_PAGE_SIZE_4K;
+ if (i915_vm_is_4lvl(vm) &&
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+ size = I915_GTT_PAGE_SIZE_64K;
+ gfp |= __GFP_NOWARN;
+ }
+ gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
+
+ do {
+ unsigned int order = get_order(size);
+ struct page *page;
+ dma_addr_t addr;
+
+ page = alloc_pages(gfp, order);
+ if (unlikely(!page))
+ goto skip;
+
+ addr = dma_map_page_attrs(vm->dma,
+ page, 0, size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
+ if (unlikely(dma_mapping_error(vm->dma, addr)))
+ goto free_page;
+
+ if (unlikely(!IS_ALIGNED(addr, size)))
+ goto unmap_page;
+
+ vm->scratch[0].base.page = page;
+ vm->scratch[0].base.daddr = addr;
+ vm->scratch_order = order;
+ return 0;
+
+unmap_page:
+ dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
+free_page:
+ __free_pages(page, order);
+skip:
+ if (size == I915_GTT_PAGE_SIZE_4K)
+ return -ENOMEM;
+
+ size = I915_GTT_PAGE_SIZE_4K;
+ gfp &= ~__GFP_NOWARN;
+ } while (1);
+}
+
+void cleanup_scratch_page(struct i915_address_space *vm)
+{
+ struct i915_page_dma *p = px_base(&vm->scratch[0]);
+ unsigned int order = vm->scratch_order;
+
+ dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(p->page, order);
+}
+
+void free_scratch(struct i915_address_space *vm)
+{
+ int i;
+
+ if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
+ return;
+
+ for (i = 1; i <= vm->top; i++) {
+ if (!px_dma(&vm->scratch[i]))
+ break;
+ cleanup_page_dma(vm, px_base(&vm->scratch[i]));
+ }
+
+ cleanup_scratch_page(vm);
+}
+
+void gtt_write_workarounds(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
+ /*
+ * This function is for gtt related workarounds. This function is
+ * called on driver load and after a GPU reset, so you can place
+ * workarounds here even if they get overwritten by GPU reset.
+ */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
+ if (IS_BROADWELL(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+ else if (IS_CHERRYVIEW(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+ else if (IS_GEN9_LP(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+ else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+
+ /*
+ * To support 64K PTEs we need to first enable the use of the
+ * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+ * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+ * shouldn't be needed after GEN10.
+ *
+ * 64K pages were first introduced from BDW+, although technically they
+ * only *work* from gen9+. For pre-BDW we instead have the option for
+ * 32K pages, but we don't currently have any support for it in our
+ * driver.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+ INTEL_GEN(i915) <= 10)
+ intel_uncore_rmw(uncore,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ 0,
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
+
+ if (IS_GEN_RANGE(i915, 8, 11)) {
+ bool can_use_gtt_cache = true;
+
+ /*
+ * According to the BSpec if we use 2M/1G pages then we also
+ * need to disable the GTT cache. At least on BDW we can see
+ * visual corruption when using 2M pages, and not disabling the
+ * GTT cache.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
+ can_use_gtt_cache = false;
+
+ /* WaGttCachingOffByDefault */
+ intel_uncore_write(uncore,
+ HSW_GTT_CACHE_EN,
+ can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
+ WARN_ON_ONCE(can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
+ }
+}
+
+u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
+static void tgl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ /* TGL doesn't support LLC or AGE settings */
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
+static void cnl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(0),
+ GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(1),
+ GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(2),
+ GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(3),
+ GEN8_PPAT_UC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(4),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(5),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(6),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(7),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+}
+
+/*
+ * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases.
+ */
+static void bdw_setup_private_ppat(struct intel_uncore *uncore)
+{
+ u64 pat;
+
+ pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
+ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+ GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
+ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+ GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+ GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+ GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+static void chv_setup_private_ppat(struct intel_uncore *uncore)
+{
+ u64 pat;
+
+ /*
+ * Map WB on BDW to snooped on CHV.
+ *
+ * Only the snoop bit has meaning for CHV, the rest is
+ * ignored.
+ *
+ * The hardware will never snoop for certain types of accesses:
+ * - CPU GTT (GMADR->GGTT->no snoop->memory)
+ * - PPGTT page tables
+ * - some other special cycles
+ *
+ * As with BDW, we also need to consider the following for GT accesses:
+ * "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * Which means we must set the snoop bit in PAT entry 0
+ * in order to keep the global status page working.
+ */
+
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+void setup_private_pat(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ if (INTEL_GEN(i915) >= 12)
+ tgl_setup_private_ppat(uncore);
+ else if (INTEL_GEN(i915) >= 10)
+ cnl_setup_private_ppat(uncore);
+ else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
+ chv_setup_private_ppat(uncore);
+ else
+ bdw_setup_private_ppat(uncore);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_gtt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
new file mode 100644
index 000000000000..7da7681c20b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -0,0 +1,587 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Please try to maintain the following order within this file unless it makes
+ * sense to do otherwise. From top to bottom:
+ * 1. typedefs
+ * 2. #defines, and macros
+ * 3. structure definitions
+ * 4. function prototypes
+ *
+ * Within each section, please try to order by generation in ascending order,
+ * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ */
+
+#ifndef __INTEL_GTT_H__
+#define __INTEL_GTT_H__
+
+#include <linux/io-mapping.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/pagevec.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_mm.h>
+
+#include "gt/intel_reset.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_selftest.h"
+#include "i915_vma_types.h"
+
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
+#define DBG(...) trace_printk(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
+#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
+
+#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
+
+#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
+#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
+typedef u32 gen6_pte_t;
+typedef u64 gen8_pte_t;
+
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
+
+#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
+#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
+#define I915_PDES 512
+#define I915_PDE_MASK (I915_PDES - 1)
+
+/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_VALID REG_BIT(0)
+
+#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT 22
+#define GEN6_PDE_VALID REG_BIT(0)
+#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
+
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
+#define BYT_PTE_WRITEABLE REG_BIT(1)
+
+/*
+ * Cacheability Control is a 4-bit value. The low three bits are stored in bits
+ * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
+#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_PTE_UNCACHED (0)
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
+
+/*
+ * GEN8 32b style address is defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 | 11:0
+ * PDPE | PDE | PTE | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ *
+ * GEN8 48b style address is defined as a 4 level page table:
+ * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
+ * PML4E | PDPE | PDE | PTE | offset
+ */
+#define GEN8_3LVL_PDPES 4
+
+#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE 0 /* WB LLC */
+#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
+
+#define CHV_PPAT_SNOOP REG_BIT(6)
+#define GEN8_PPAT_AGE(x) ((x)<<4)
+#define GEN8_PPAT_LLCeLLC (3<<2)
+#define GEN8_PPAT_LLCELLC (2<<2)
+#define GEN8_PPAT_LLC (1<<2)
+#define GEN8_PPAT_WB (3<<0)
+#define GEN8_PPAT_WT (2<<0)
+#define GEN8_PPAT_WC (1<<0)
+#define GEN8_PPAT_UC (0<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
+#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
+
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M BIT(7)
+
+#define for_each_sgt_daddr(__dp, __iter, __sgt) \
+ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
+
+struct i915_page_dma {
+ struct page *page;
+ union {
+ dma_addr_t daddr;
+
+ /*
+ * For gen6/gen7 only. This is the offset in the GGTT
+ * where the page directory entries for PPGTT begin
+ */
+ u32 ggtt_offset;
+ };
+};
+
+struct i915_page_scratch {
+ struct i915_page_dma base;
+ u64 encode;
+};
+
+struct i915_page_table {
+ struct i915_page_dma base;
+ atomic_t used;
+};
+
+struct i915_page_directory {
+ struct i915_page_table pt;
+ spinlock_t lock;
+ void *entry[512];
+};
+
+#define __px_choose_expr(x, type, expr, other) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), type) || \
+ __builtin_types_compatible_p(typeof(x), const type), \
+ ({ type __x = (type)(x); expr; }), \
+ other)
+
+#define px_base(px) \
+ __px_choose_expr(px, struct i915_page_dma *, __x, \
+ __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_table *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
+ (void)0))))
+#define px_dma(px) (px_base(px)->daddr)
+
+#define px_pt(px) \
+ __px_choose_expr(px, struct i915_page_table *, __x, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
+ (void)0))
+#define px_used(px) (&px_pt(px)->used)
+
+enum i915_cache_level;
+
+struct drm_i915_file_private;
+struct drm_i915_gem_object;
+struct i915_vma;
+struct intel_gt;
+
+struct i915_vma_ops {
+ /* Map an object into an address space with the given cache flags. */
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ /*
+ * Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page.
+ */
+ void (*unbind_vma)(struct i915_vma *vma);
+
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+ spinlock_t lock;
+ struct pagevec pvec;
+};
+
+void stash_init(struct pagestash *stash);
+
+struct i915_address_space {
+ struct kref ref;
+ struct rcu_work rcu;
+
+ struct drm_mm mm;
+ struct intel_gt *gt;
+ struct drm_i915_private *i915;
+ struct device *dma;
+ /*
+ * Every address space belongs to a struct file - except for the global
+ * GTT that is owned by the driver (and so @file is set to NULL). In
+ * principle, no information should leak from one context to another
+ * (or between files/processes etc) unless explicitly shared by the
+ * owner. Tracking the owner is important in order to free up per-file
+ * objects along with the file, to aide resource tracking, and to
+ * assign blame.
+ */
+ struct drm_i915_file_private *file;
+ u64 total; /* size addr space maps (ex. 2GB for ggtt) */
+ u64 reserved; /* size addr space reserved */
+
+ unsigned int bind_async_flags;
+
+ /*
+ * Each active user context has its own address space (in full-ppgtt).
+ * Since the vm may be shared between multiple contexts, we count how
+ * many contexts keep us "open". Once open hits zero, we are closed
+ * and do not allow any new attachments, and proceed to shutdown our
+ * vma and page directories.
+ */
+ atomic_t open;
+
+ struct mutex mutex; /* protects vma and our lists */
+#define VM_CLASS_GGTT 0
+#define VM_CLASS_PPGTT 1
+
+ struct i915_page_scratch scratch[4];
+ unsigned int scratch_order;
+ unsigned int top;
+
+ /**
+ * List of vma currently bound.
+ */
+ struct list_head bound_list;
+
+ struct pagestash free_pages;
+
+ /* Global GTT */
+ bool is_ggtt:1;
+
+ /* Some systems require uncached updates of the page directories */
+ bool pt_kmap_wc:1;
+
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
+
+ u64 (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags); /* Create a valid PTE */
+#define PTE_READ_ONLY BIT(0)
+
+ int (*allocate_va_range)(struct i915_address_space *vm,
+ u64 start, u64 length);
+ void (*clear_range)(struct i915_address_space *vm,
+ u64 start, u64 length);
+ void (*insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*cleanup)(struct i915_address_space *vm);
+
+ struct i915_vma_ops vma_ops;
+
+ I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
+ I915_SELFTEST_DECLARE(bool scrub_64K);
+};
+
+/*
+ * The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_ggtt {
+ struct i915_address_space vm;
+
+ struct io_mapping iomap; /* Mapping to our CPU mappable region */
+ struct resource gmadr; /* GMADR resource */
+ resource_size_t mappable_end; /* End offset that we can CPU map */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+ void (*invalidate)(struct i915_ggtt *ggtt);
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_ppgtt *alias;
+
+ bool do_idle_maps;
+
+ int mtrr;
+
+ /** Bit 6 swizzling required for X tiling */
+ u32 bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ u32 bit_6_swizzle_y;
+
+ u32 pin_bias;
+
+ unsigned int num_fences;
+ struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct list_head fence_list;
+
+ /**
+ * List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /* Manual runtime pm autosuspend delay for user GGTT mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
+ struct mutex error_mutex;
+ struct drm_mm_node error_capture;
+ struct drm_mm_node uc_fw;
+};
+
+struct i915_ppgtt {
+ struct i915_address_space vm;
+
+ struct i915_page_directory *pd;
+};
+
+#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+
+static inline bool
+i915_vm_is_4lvl(const struct i915_address_space *vm)
+{
+ return (vm->total - 1) >> 32;
+}
+
+static inline bool
+i915_vm_has_scratch_64K(struct i915_address_space *vm)
+{
+ return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
+}
+
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+ return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, vm);
+}
+
+static inline struct i915_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ppgtt, vm);
+}
+
+static inline struct i915_address_space *
+i915_vm_get(struct i915_address_space *vm)
+{
+ kref_get(&vm->ref);
+ return vm;
+}
+
+void i915_vm_release(struct kref *kref);
+
+static inline void i915_vm_put(struct i915_address_space *vm)
+{
+ kref_put(&vm->ref, i915_vm_release);
+}
+
+static inline struct i915_address_space *
+i915_vm_open(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ atomic_inc(&vm->open);
+ return i915_vm_get(vm);
+}
+
+static inline bool
+i915_vm_tryopen(struct i915_address_space *vm)
+{
+ if (atomic_add_unless(&vm->open, 1, 0))
+ return i915_vm_get(vm);
+
+ return false;
+}
+
+void __i915_vm_close(struct i915_address_space *vm);
+
+static inline void
+i915_vm_close(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ if (atomic_dec_and_test(&vm->open))
+ __i915_vm_close(vm);
+
+ i915_vm_put(vm);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass);
+void i915_address_space_fini(struct i915_address_space *vm);
+
+static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
+{
+ const u32 mask = NUM_PTE(pde_shift) - 1;
+
+ return (address >> PAGE_SHIFT) & mask;
+}
+
+/*
+ * Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+ */
+static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
+{
+ const u64 mask = ~((1ULL << pde_shift) - 1);
+ u64 end;
+
+ GEM_BUG_ON(length == 0);
+ GEM_BUG_ON(offset_in_page(addr | length));
+
+ end = addr + length;
+
+ if ((addr & mask) != (end & mask))
+ return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+ return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline u32 i915_pde_index(u64 addr, u32 shift)
+{
+ return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline struct i915_page_table *
+i915_pt_entry(const struct i915_page_directory * const pd,
+ const unsigned short n)
+{
+ return pd->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pd_entry(const struct i915_page_directory * const pdp,
+ const unsigned short n)
+{
+ return pdp->entry[n];
+}
+
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
+{
+ struct i915_page_dma *pt = ppgtt->pd->entry[n];
+
+ return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
+
+int i915_ggtt_probe_hw(struct drm_i915_private *i915);
+int i915_ggtt_init_hw(struct drm_i915_private *i915);
+int i915_ggtt_enable_hw(struct drm_i915_private *i915);
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
+int i915_init_ggtt(struct drm_i915_private *i915);
+void i915_ggtt_driver_release(struct drm_i915_private *i915);
+
+static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
+{
+ return ggtt->mappable_end > 0;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt);
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
+
+u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags);
+
+int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+
+#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
+
+void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count);
+
+#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
+#define fill32_px(px, v) do { \
+ u64 v__ = lower_32_bits(v); \
+ fill_px((px), v__ << 32 | v__); \
+} while (0)
+
+int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp);
+void cleanup_scratch_page(struct i915_address_space *vm);
+void free_scratch(struct i915_address_space *vm);
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm);
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
+struct i915_page_directory *__alloc_pd(size_t sz);
+
+void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd);
+
+#define free_px(vm, px) free_pd(vm, px_base(px))
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_dma * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
+
+#define set_pd_entry(pd, idx, to) \
+ __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct i915_page_scratch * const scratch);
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct i915_page_scratch * const scratch);
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
+
+int ggtt_set_pages(struct i915_vma *vma);
+int ppgtt_set_pages(struct i915_vma *vma);
+void clear_pages(struct i915_vma *vma);
+
+void gtt_write_workarounds(struct intel_gt *gt);
+
+void setup_private_pat(struct intel_uncore *uncore);
+
+static inline struct sgt_dma {
+ struct scatterlist *sg;
+ dma_addr_t dma, max;
+} sgt_dma(struct i915_vma *vma) {
+ struct scatterlist *sg = vma->pages->sgl;
+ dma_addr_t addr = sg_dma_address(sg);
+
+ return (struct sgt_dma){ sg, addr, addr + sg->length };
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 68179fb56427..0cf0f6fae675 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,12 +133,11 @@
*/
#include <linux/interrupt.h>
-#include "gem/i915_gem_context.h"
-
#include "i915_drv.h"
#include "i915_perf.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
@@ -489,17 +488,23 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
return desc;
}
-static u32 *set_offsets(u32 *regs,
+static inline unsigned int dword_in_page(void *addr)
+{
+ return offset_in_page(addr) / sizeof(u32);
+}
+
+static void set_offsets(u32 *regs,
const u8 *data,
- const struct intel_engine_cs *engine)
+ const struct intel_engine_cs *engine,
+ bool clear)
#define NOP(x) (BIT(7) | (x))
-#define LRI(count, flags) ((flags) << 6 | (count))
+#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
#define POSTED BIT(0)
#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END() 0
+#define END(x) 0, (x)
{
const u32 base = engine->mmio_base;
@@ -507,7 +512,10 @@ static u32 *set_offsets(u32 *regs,
u8 count, flags;
if (*data & BIT(7)) { /* skip */
- regs += *data++ & ~BIT(7);
+ count = *data++ & ~BIT(7);
+ if (clear)
+ memset32(regs, MI_NOOP, count);
+ regs += count;
continue;
}
@@ -533,12 +541,25 @@ static u32 *set_offsets(u32 *regs,
offset |= v & ~BIT(7);
} while (v & BIT(7));
- *regs = base + (offset << 2);
+ regs[0] = base + (offset << 2);
+ if (clear)
+ regs[1] = 0;
regs += 2;
} while (--count);
}
- return regs;
+ if (clear) {
+ u8 count = *++data;
+
+ /* Clear past the tail for HW access */
+ GEM_BUG_ON(dword_in_page(regs) > count);
+ memset32(regs, MI_NOOP, count - dword_in_page(regs));
+
+ /* Close the batch; used mainly by live_lrc_layout() */
+ *regs = MI_BATCH_BUFFER_END;
+ if (INTEL_GEN(engine->i915) >= 10)
+ *regs |= BIT(0);
+ }
}
static const u8 gen8_xcs_offsets[] = {
@@ -573,7 +594,7 @@ static const u8 gen8_xcs_offsets[] = {
REG16(0x200),
REG(0x028),
- END(),
+ END(80)
};
static const u8 gen9_xcs_offsets[] = {
@@ -657,7 +678,7 @@ static const u8 gen9_xcs_offsets[] = {
REG16(0x67c),
REG(0x068),
- END(),
+ END(176)
};
static const u8 gen12_xcs_offsets[] = {
@@ -689,7 +710,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END(),
+ END(80)
};
static const u8 gen8_rcs_offsets[] = {
@@ -726,7 +747,91 @@ static const u8 gen8_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(),
+ END(80)
+};
+
+static const u8 gen9_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x34),
+ REG(0x30),
+ REG(0x38),
+ REG(0x3c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0xc8),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x28),
+ REG(0x9c),
+ REG(0xc0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x68),
+
+ END(176)
};
static const u8 gen11_rcs_offsets[] = {
@@ -767,7 +872,7 @@ static const u8 gen11_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(),
+ END(80)
};
static const u8 gen12_rcs_offsets[] = {
@@ -808,7 +913,7 @@ static const u8 gen12_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(),
+ END(80)
};
#undef END
@@ -833,6 +938,8 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
return gen12_rcs_offsets;
else if (INTEL_GEN(engine->i915) >= 11)
return gen11_rcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return gen9_rcs_offsets;
else
return gen8_rcs_offsets;
} else {
@@ -880,7 +987,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl);
active = rq;
} else {
- struct intel_engine_cs *owner = rq->hw_context->engine;
+ struct intel_engine_cs *owner = rq->context->engine;
/*
* Decouple the virtual breadcrumb before moving it
@@ -983,6 +1090,58 @@ static void intel_engine_context_out(struct intel_engine_cs *engine)
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
+
+static void
+execlists_check_context(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+ bool valid = true;
+ int x;
+
+ if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+ pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_START],
+ i915_ggtt_offset(ring->vma));
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ valid = false;
+ }
+
+ if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+ pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_CTL],
+ (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ valid = false;
+ }
+
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+ pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+ engine->name, regs[x + 1]);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ valid = false;
+ }
+
+ WARN_ONCE(!valid, "Invalid lrc state found before submission\n");
+}
+
static void restore_default_state(struct intel_context *ce,
struct intel_engine_cs *engine)
{
@@ -999,7 +1158,7 @@ static void restore_default_state(struct intel_context *ce,
static void reset_active(struct i915_request *rq,
struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
u32 head;
/*
@@ -1017,8 +1176,8 @@ static void reset_active(struct i915_request *rq,
* remain correctly ordered. And we defer to __i915_request_submit()
* so that all asynchronous waits are correctly handled.
*/
- GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
- __func__, engine->name, rq->fence.context, rq->fence.seqno);
+ ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
/* On resubmission of the active request, payload will be scrubbed */
if (i915_request_completed(rq))
@@ -1040,13 +1199,16 @@ static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
struct intel_engine_cs * const engine = rq->engine;
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
intel_context_get(ce);
- if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+ if (unlikely(intel_context_is_banned(ce)))
reset_active(rq, engine);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ execlists_check_context(ce, engine);
+
if (ce->tag) {
/* Use a fixed tag for OA and friends */
ce->lrc_desc |= (u64)ce->tag << 32;
@@ -1054,12 +1216,12 @@ __execlists_schedule_in(struct i915_request *rq)
/* We don't need a strict matching tag, just different values */
ce->lrc_desc &= ~GENMASK_ULL(47, 37);
ce->lrc_desc |=
- (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
+ (u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
GEN11_SW_CTX_ID_SHIFT;
BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
}
- intel_gt_pm_get(engine->gt);
+ __intel_gt_pm_get(engine->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -1069,7 +1231,7 @@ __execlists_schedule_in(struct i915_request *rq)
static inline struct i915_request *
execlists_schedule_in(struct i915_request *rq, int idx)
{
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
struct intel_engine_cs *old;
GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
@@ -1100,7 +1262,7 @@ static inline void
__execlists_schedule_out(struct i915_request *rq,
struct intel_engine_cs * const engine)
{
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
/*
* NB process_csb() is not under the engine->active.lock and hence
@@ -1138,7 +1300,7 @@ __execlists_schedule_out(struct i915_request *rq,
static inline void
execlists_schedule_out(struct i915_request *rq)
{
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
struct intel_engine_cs *cur, *old;
trace_i915_request_out(rq);
@@ -1155,7 +1317,7 @@ execlists_schedule_out(struct i915_request *rq)
static u64 execlists_update_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
u64 desc = ce->lrc_desc;
u32 tail;
@@ -1186,17 +1348,8 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
- *
- * Furthermore, Braswell, at least, wants a full mb to be sure that
- * the writes are coherent in memory (visible to the GPU) prior to
- * execution, and not just visible to other CPUs (as is the result of
- * wmb).
*/
- mb();
-
- /* Wa_1607138340:tgl */
- if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
- desc |= CTX_DESC_FORCE_RESTORE;
+ wmb();
ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
return desc;
@@ -1224,15 +1377,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
if (!ports[0])
return;
- GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
- engine->name, msg,
- ports[0]->fence.context,
- ports[0]->fence.seqno,
- i915_request_completed(ports[0]) ? "!" :
- i915_request_started(ports[0]) ? "*" :
- "",
- ports[1] ? ports[1]->fence.context : 0,
- ports[1] ? ports[1]->fence.seqno : 0);
+ ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
+ ports[0]->fence.context,
+ ports[0]->fence.seqno,
+ i915_request_completed(ports[0]) ? "!" :
+ i915_request_started(ports[0]) ? "*" :
+ "",
+ ports[1] ? ports[1]->fence.context : 0,
+ ports[1] ? ports[1]->fence.seqno : 0);
}
static __maybe_unused bool
@@ -1256,33 +1408,56 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
for (port = execlists->pending; (rq = *port); port++) {
- if (ce == rq->hw_context) {
- GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
+ unsigned long flags;
+ bool ok = true;
+
+ GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+ GEM_BUG_ON(!i915_request_is_active(rq));
+
+ if (ce == rq->context) {
+ GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
port - execlists->pending);
return false;
}
+ ce = rq->context;
- ce = rq->hw_context;
- if (i915_request_completed(rq))
+ /* Hold tightly onto the lock to prevent concurrent retires! */
+ if (!spin_trylock_irqsave(&rq->lock, flags))
continue;
- if (i915_active_is_idle(&ce->active)) {
- GEM_TRACE_ERR("Inactive context in pending[%zd]\n",
+ if (i915_request_completed(rq))
+ goto unlock;
+
+ if (i915_active_is_idle(&ce->active) &&
+ !intel_context_is_barrier(ce)) {
+ GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
port - execlists->pending);
- return false;
+ ok = false;
+ goto unlock;
}
if (!i915_vma_is_pinned(ce->state)) {
- GEM_TRACE_ERR("Unpinned context in pending[%zd]\n",
+ GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
port - execlists->pending);
- return false;
+ ok = false;
+ goto unlock;
}
if (!i915_vma_is_pinned(ce->ring->vma)) {
- GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n",
+ GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
port - execlists->pending);
- return false;
+ ok = false;
+ goto unlock;
}
+
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (!ok)
+ return false;
}
return ce;
@@ -1327,7 +1502,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
static bool ctx_single_port_submission(const struct intel_context *ce)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- i915_gem_context_force_single_submission(ce->gem_context));
+ intel_context_force_single_submission(ce));
}
static bool can_merge_ctx(const struct intel_context *prev,
@@ -1359,11 +1534,11 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
- if (unlikely((prev->flags ^ next->flags) &
- (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
+ if (unlikely((prev->fence.flags ^ next->fence.flags) &
+ (I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL)))
return false;
- if (!can_merge_ctx(prev->hw_context, next->hw_context))
+ if (!can_merge_ctx(prev->context, next->context))
return false;
return true;
@@ -1372,7 +1547,7 @@ static bool can_merge_rq(const struct i915_request *prev,
static void virtual_update_register_offsets(u32 *regs,
struct intel_engine_cs *engine)
{
- set_offsets(regs, reg_offsets(engine), engine);
+ set_offsets(regs, reg_offsets(engine), engine, false);
}
static bool virtual_matches(const struct virtual_engine *ve,
@@ -1411,7 +1586,7 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
if (!list_empty(&ve->context.signal_link)) {
list_move_tail(&ve->context.signal_link,
&engine->breadcrumbs.signalers);
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
}
spin_unlock(&old->breadcrumbs.irq_lock);
}
@@ -1519,7 +1694,7 @@ active_timeslice(const struct intel_engine_cs *engine)
{
const struct i915_request *rq = *engine->execlists.active;
- if (i915_request_completed(rq))
+ if (!rq || i915_request_completed(rq))
return 0;
if (engine->execlists.switch_priority_hint < effective_prio(rq))
@@ -1550,7 +1725,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return 0;
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
- if (unlikely(i915_gem_context_is_banned(rq->gem_context)))
+ if (unlikely(intel_context_is_banned(rq->context)))
return 1;
return READ_ONCE(engine->props.preempt_timeout_ms);
@@ -1565,6 +1740,11 @@ static void set_preempt_timeout(struct intel_engine_cs *engine)
active_preempt_timeout(engine));
}
+static inline void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1627,12 +1807,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = last_active(execlists);
if (last) {
if (need_preempt(engine, last, rb)) {
- GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
- engine->name,
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
record_preemption(execlists);
/*
@@ -1658,16 +1838,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* tendency to ignore us rewinding the TAIL to the
* end of an earlier request.
*/
- last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL;
} else if (need_timeslice(engine, last) &&
timer_expired(&engine->execlists.timer)) {
- GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
- engine->name,
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
+ ENGINE_TRACE(engine,
+ "expired last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
ring_set_paused(engine, 1);
defer_active(engine);
@@ -1730,7 +1910,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(rq != ve->request);
GEM_BUG_ON(rq->engine != &ve->base);
- GEM_BUG_ON(rq->hw_context != &ve->context);
+ GEM_BUG_ON(rq->context != &ve->context);
if (rq_prio(rq) >= queue_prio(execlists)) {
if (!virtual_matches(ve, rq, engine)) {
@@ -1744,14 +1924,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
return; /* leave this for another */
}
- GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
ve->request = NULL;
ve->base.execlists.queue_priority_hint = INT_MIN;
@@ -1849,7 +2029,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* same LRCA, i.e. we must submit 2 different
* contexts if we submit 2 ELSP.
*/
- if (last->hw_context == rq->hw_context)
+ if (last->context == rq->context)
goto done;
if (i915_request_has_sentinel(last))
@@ -1862,8 +2042,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the same context (even though a different
* request) to the second port.
*/
- if (ctx_single_port_submission(last->hw_context) ||
- ctx_single_port_submission(rq->hw_context))
+ if (ctx_single_port_submission(last->context) ||
+ ctx_single_port_submission(rq->context))
goto done;
merge = false;
@@ -1877,8 +2057,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
GEM_BUG_ON(last &&
- !can_merge_ctx(last->hw_context,
- rq->hw_context));
+ !can_merge_ctx(last->context,
+ rq->context));
submit = true;
last = rq;
@@ -1907,9 +2087,6 @@ done:
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
- GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
- engine->name, execlists->queue_priority_hint,
- yesno(submit));
if (submit) {
*port = execlists_schedule_in(last, port - execlists->pending);
@@ -1928,10 +2105,9 @@ done:
goto skip_submit;
}
+ clear_ports(port + 1, last_port - port);
- memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists_submit_ports(engine);
-
set_preempt_timeout(engine);
} else {
skip_submit:
@@ -1946,13 +2122,14 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
for (port = execlists->pending; *port; port++)
execlists_schedule_out(*port);
- memset(execlists->pending, 0, sizeof(execlists->pending));
+ clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
/* Mark the end of active before we overwrite *active */
for (port = xchg(&execlists->active, execlists->pending); *port; port++)
execlists_schedule_out(*port);
- WRITE_ONCE(execlists->active,
- memset(execlists->inflight, 0, sizeof(execlists->inflight)));
+ clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+ WRITE_ONCE(execlists->active, execlists->inflight);
}
static inline void
@@ -2058,7 +2235,7 @@ static void process_csb(struct intel_engine_cs *engine)
*/
head = execlists->csb_head;
tail = READ_ONCE(*execlists->csb_write);
- GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
if (unlikely(head == tail))
return;
@@ -2096,9 +2273,8 @@ static void process_csb(struct intel_engine_cs *engine)
* status notifier.
*/
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
- engine->name, head,
- buf[2 * head + 0], buf[2 * head + 1]);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, buf[2 * head + 0], buf[2 * head + 1]);
if (INTEL_GEN(engine->i915) >= 12)
promote = gen12_csb_parse(execlists, buf + 2 * head);
@@ -2109,7 +2285,6 @@ static void process_csb(struct intel_engine_cs *engine)
/* Point active to the new ELSP; prevent overwriting */
WRITE_ONCE(execlists->active, execlists->pending);
- set_timeslice(engine);
if (!inject_preempt_hang(execlists))
ring_set_paused(engine, 0);
@@ -2150,6 +2325,7 @@ static void process_csb(struct intel_engine_cs *engine)
} while (head != tail);
execlists->csb_head = head;
+ set_timeslice(engine);
/*
* Gen11 has proven to fail wrt global observation point between
@@ -2189,10 +2365,9 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
/* Mark this tasklet as disabled to avoid waiting for it to complete */
tasklet_disable_nosync(&engine->execlists.tasklet);
- GEM_TRACE("%s: preempt timeout %lu+%ums\n",
- engine->name,
- READ_ONCE(engine->props.preempt_timeout_ms),
- jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+ ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
+ READ_ONCE(engine->props.preempt_timeout_ms),
+ jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
intel_engine_reset(engine, "preemption time out");
tasklet_enable(&engine->execlists.tasklet);
@@ -2333,7 +2508,7 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine)
vaddr += engine->context_size;
- memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+ memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
}
static void
@@ -2344,7 +2519,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
vaddr += engine->context_size;
- if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
+ if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
dev_err_once(engine->i915->drm.dev,
"%s context redzone overwritten!\n",
engine->name);
@@ -2369,7 +2544,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
- regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma);
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD] = ring->head;
regs[CTX_RING_TAIL] = ring->tail;
@@ -2387,33 +2562,21 @@ __execlists_context_pin(struct intel_context *ce,
struct intel_engine_cs *engine)
{
void *vaddr;
- int ret;
GEM_BUG_ON(!ce->state);
-
- ret = intel_context_active_acquire(ce);
- if (ret)
- goto err;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto unpin_active;
- }
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
- ce->lrc_desc = lrc_descriptor(ce, engine);
+ ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
return 0;
-
-unpin_active:
- intel_context_active_release(ce);
-err:
- return ret;
}
static int execlists_context_pin(struct intel_context *ce)
@@ -2428,6 +2591,9 @@ static int execlists_context_alloc(struct intel_context *ce)
static void execlists_context_reset(struct intel_context *ce)
{
+ CE_TRACE(ce, "reset\n");
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
/*
* Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
@@ -2444,8 +2610,14 @@ static void execlists_context_reset(struct intel_context *ce)
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
- intel_ring_reset(ce->ring, 0);
+ intel_ring_reset(ce->ring, ce->ring->emit);
+
+ /* Scrub away the garbage */
+ execlists_init_reg_state(ce->lrc_reg_state,
+ ce, ce->engine, ce->ring, true);
__execlists_update_reg_state(ce, ce->engine);
+
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
static const struct intel_context_ops execlists_context_ops = {
@@ -2497,7 +2669,7 @@ static int execlists_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -2664,6 +2836,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ LRC_PPHWSP_SCRATCH_ADDR);
+
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaMediaPoolStateCmdInWABB:bxt,glk */
@@ -2859,6 +3039,8 @@ static void enable_execlists(struct intel_engine_cs *engine)
RING_HWS_PGA,
i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+ engine->context_tag = 0;
}
static bool unexpected_starting_state(struct intel_engine_cs *engine)
@@ -2898,8 +3080,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
- GEM_TRACE("%s: depth<-%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&execlists->tasklet.count));
/*
* Prevent request submission to the hardware until we have
@@ -2952,26 +3134,20 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
WRITE_ONCE(*execlists->csb_write, reset_value);
wmb(); /* Make sure this is visible to HW (paranoia?) */
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
invalidate_csb_entries(&execlists->csb_status[0],
&execlists->csb_status[reset_value]);
}
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x54;
- else if (engine->class == RENDER_CLASS)
- return 0x58;
- else
- return -1;
-}
-
-static void __execlists_reset_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine)
-{
- u32 *regs = ce->lrc_reg_state;
int x;
x = lrc_ring_mi_mode(engine);
@@ -2981,6 +3157,14 @@ static void __execlists_reset_reg_state(const struct intel_context *ce,
}
}
+static void __execlists_reset_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ __reset_stop_ring(regs, engine);
+}
+
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -3008,7 +3192,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
/* We still have requests in-flight; the engine should be active */
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
- ce = rq->hw_context;
+ ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
if (i915_request_completed(rq)) {
@@ -3065,8 +3249,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
restore_default_state(ce, engine);
out_replay:
- GEM_TRACE("%s replay {head:%04x, tail:%04x}\n",
- engine->name, ce->ring->head, ce->ring->tail);
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
__execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine);
@@ -3078,11 +3262,11 @@ unwind:
__unwind_incomplete_requests(engine);
}
-static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
spin_lock_irqsave(&engine->active.lock, flags);
@@ -3096,14 +3280,14 @@ static void nop_submission_tasklet(unsigned long data)
/* The driver is wedged; don't process any more events. */
}
-static void execlists_cancel_requests(struct intel_engine_cs *engine)
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Before we call engine->cancel_requests(), we should have exclusive
@@ -3190,13 +3374,13 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
if (__tasklet_enable(&execlists->tasklet))
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet);
- GEM_TRACE("%s: depth->%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
}
-static int gen8_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
+static int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
{
u32 *cs;
@@ -3230,7 +3414,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
return 0;
}
-static int gen9_emit_bb_start(struct i915_request *rq,
+static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
@@ -3685,12 +3869,12 @@ static void execlists_park(struct intel_engine_cs *engine)
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
- engine->cancel_requests = execlists_cancel_requests;
engine->schedule = i915_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
- engine->reset.reset = execlists_reset;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
engine->reset.finish = execlists_reset_finish;
engine->park = execlists_park;
@@ -3705,13 +3889,27 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) >= 12)
engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+ /* Synchronise with residual timers and any softirq they raise */
+ del_timer_sync(&engine->execlists.timer);
+ del_timer_sync(&engine->execlists.preempt);
+ tasklet_kill(&engine->execlists.tasklet);
}
-static void execlists_destroy(struct intel_engine_cs *engine)
+static void execlists_release(struct intel_engine_cs *engine)
{
+ execlists_shutdown(engine);
+
intel_engine_cleanup_common(engine);
lrc_destroy_wa_ctx(engine);
- kfree(engine);
}
static void
@@ -3719,13 +3917,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
- engine->destroy = execlists_destroy;
engine->resume = execlists_resume;
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.reset = execlists_reset;
- engine->reset.finish = execlists_reset_finish;
-
engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
@@ -3748,10 +3941,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
* until a more refined solution exists.
*/
}
- if (IS_GEN(engine->i915, 8))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen9_emit_bb_start;
}
static inline void
@@ -3795,6 +3984,11 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ u32 base = engine->mmio_base;
+
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
@@ -3806,21 +4000,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
if (engine->class == RENDER_CLASS)
rcs_submission_override(engine);
- return 0;
-}
-
-int intel_execlists_submission_init(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
- u32 base = engine->mmio_base;
- int ret;
-
- ret = intel_engine_init_common(engine);
- if (ret)
- return ret;
-
if (intel_init_workaround_bb(engine))
/*
* We continue even if we fail to initialize WA batch
@@ -3852,6 +4031,9 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
reset_csb_pointers(engine);
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->release = execlists_release;
+
return 0;
}
@@ -3891,18 +4073,21 @@ static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
static void init_common_reg_state(u32 * const regs,
const struct intel_engine_cs *engine,
- const struct intel_ring *ring)
+ const struct intel_ring *ring,
+ bool inhibit)
{
- regs[CTX_CONTEXT_CONTROL] =
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ u32 ctl;
+
+ ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (inhibit)
+ ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
if (INTEL_GEN(engine->i915) < 11)
- regs[CTX_CONTEXT_CONTROL] |=
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ regs[CTX_CONTEXT_CONTROL] = ctl;
- regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- regs[CTX_BB_STATE] = RING_BB_PPGTT;
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
}
static void init_wa_bb_reg_state(u32 * const regs,
@@ -3958,7 +4143,7 @@ static void execlists_init_reg_state(u32 *regs,
const struct intel_context *ce,
const struct intel_engine_cs *engine,
const struct intel_ring *ring,
- bool close)
+ bool inhibit)
{
/*
* A context is actually a big batch buffer with several
@@ -3970,21 +4155,17 @@ static void execlists_init_reg_state(u32 *regs,
*
* Must keep consistent with virtual_update_register_offsets().
*/
- u32 *bbe = set_offsets(regs, reg_offsets(engine), engine);
+ set_offsets(regs, reg_offsets(engine), engine, inhibit);
- if (close) { /* Close the batch; used mainly by live_lrc_layout() */
- *bbe = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(engine->i915) >= 10)
- *bbe |= BIT(0);
- }
-
- init_common_reg_state(regs, engine, ring);
+ init_common_reg_state(regs, engine, ring, inhibit);
init_ppgtt_reg_state(regs, vm_alias(ce->vm));
init_wa_bb_reg_state(regs, engine,
INTEL_GEN(engine->i915) >= 12 ?
GEN12_CTX_BB_PER_CTX_PTR :
CTX_BB_PER_CTX_PTR);
+
+ __reset_stop_ring(regs, engine);
}
static int
@@ -3995,7 +4176,6 @@ populate_lr_context(struct intel_context *ce,
{
bool inhibit = true;
void *vaddr;
- u32 *regs;
int ret;
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
@@ -4019,16 +4199,14 @@ populate_lr_context(struct intel_context *ce,
memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
inhibit = false;
}
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
- regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ce, engine, ring, inhibit);
- if (inhibit)
- regs[CTX_CONTEXT_CONTROL] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+ ce, engine, ring, inhibit);
ret = 0;
err_unpin_ctx:
@@ -4166,6 +4344,13 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
ve->siblings[0]);
}
+static int virtual_context_alloc(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return __execlists_context_alloc(ce, ve->siblings[0]);
+}
+
static int virtual_context_pin(struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
@@ -4203,6 +4388,8 @@ static void virtual_context_exit(struct intel_context *ce)
}
static const struct intel_context_ops virtual_context_ops = {
+ .alloc = virtual_context_alloc,
+
.pin = virtual_context_pin,
.unpin = execlists_context_unpin,
@@ -4229,10 +4416,9 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
mask = ve->siblings[0]->mask;
}
- GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
- ve->base.name,
- rq->fence.context, rq->fence.seqno,
- mask, ve->base.execlists.queue_priority_hint);
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
return mask;
}
@@ -4323,10 +4509,9 @@ static void virtual_submit_request(struct i915_request *rq)
struct i915_request *old;
unsigned long flags;
- GEM_TRACE("%s: rq=%llx:%lld\n",
- ve->base.name,
- rq->fence.context,
- rq->fence.seqno);
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
@@ -4394,8 +4579,7 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
}
struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count)
{
struct virtual_engine *ve;
@@ -4406,13 +4590,13 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
return ERR_PTR(-EINVAL);
if (count == 1)
- return intel_context_create(ctx, siblings[0]);
+ return intel_context_create(siblings[0]);
ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
if (!ve)
return ERR_PTR(-ENOMEM);
- ve->base.i915 = ctx->i915;
+ ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
@@ -4441,7 +4625,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_breadcrumbs(&ve->base);
-
intel_engine_init_execlists(&ve->base);
ve->base.cops = &virtual_context_ops;
@@ -4457,7 +4640,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
virtual_submission_tasklet,
(unsigned long)ve);
- intel_context_init(&ve->context, ctx, &ve->base);
+ intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {
struct intel_engine_cs *sibling = siblings[n];
@@ -4524,12 +4707,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
- err = __execlists_context_alloc(&ve->context, siblings[0]);
- if (err)
- goto err_put;
-
- __set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags);
-
return &ve->context;
err_put:
@@ -4538,14 +4715,12 @@ err_put:
}
struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs *src)
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
{
struct virtual_engine *se = to_virtual_engine(src);
struct intel_context *dst;
- dst = intel_execlists_create_virtual(ctx,
- se->siblings,
+ dst = intel_execlists_create_virtual(se->siblings,
se->num_siblings);
if (IS_ERR(dst))
return dst;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 04511d8ebdc1..dfbc214e14f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -83,7 +83,6 @@ enum {
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
/* At the start of the context image is its per-process HWS page */
@@ -111,13 +110,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
unsigned int max);
struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count);
struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs *src);
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 06ab0276e10e..08a3be65f700 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -13,8 +13,8 @@
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
#define CTX_RING_TAIL (0x06 + 1)
-#define CTX_RING_BUFFER_START (0x08 + 1)
-#define CTX_RING_BUFFER_CONTROL (0x0a + 1)
+#define CTX_RING_START (0x08 + 1)
+#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_STATE (0x10 + 1)
#define CTX_BB_PER_CTX_PTR (0x18 + 1)
#define CTX_PDP3_UDW (0x24 + 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 2b977991b785..eeef90b55c64 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -127,7 +127,7 @@ struct drm_i915_mocs_table {
LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \
L3_3_WB)
-static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+static const struct drm_i915_mocs_entry skl_mocs_table[] = {
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
@@ -233,7 +233,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_1_UC)
-static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
+static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
/* Base - Error (Reserved for Non-Use) */
MOCS_ENTRY(0, 0x0, 0x0),
/* Base - Reserved */
@@ -267,7 +267,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
L3_3_WB),
};
-static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
+static const struct drm_i915_mocs_entry icl_mocs_table[] = {
/* Base - Uncached (Deprecated) */
MOCS_ENTRY(I915_MOCS_UNCACHED,
LE_1_UC | LE_TC_1_LLC,
@@ -283,65 +283,42 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
static bool get_mocs_settings(const struct drm_i915_private *i915,
struct drm_i915_mocs_table *table)
{
- bool result = false;
-
if (INTEL_GEN(i915) >= 12) {
- table->size = ARRAY_SIZE(tigerlake_mocs_table);
- table->table = tigerlake_mocs_table;
+ table->size = ARRAY_SIZE(tgl_mocs_table);
+ table->table = tgl_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
- result = true;
} else if (IS_GEN(i915, 11)) {
- table->size = ARRAY_SIZE(icelake_mocs_table);
- table->table = icelake_mocs_table;
+ table->size = ARRAY_SIZE(icl_mocs_table);
+ table->table = icl_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
- result = true;
} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
- table->size = ARRAY_SIZE(skylake_mocs_table);
+ table->size = ARRAY_SIZE(skl_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- table->table = skylake_mocs_table;
- result = true;
+ table->table = skl_mocs_table;
} else if (IS_GEN9_LP(i915)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
- result = true;
} else {
WARN_ONCE(INTEL_GEN(i915) >= 9,
"Platform that should have a MOCS table does not.\n");
+ return false;
}
+ if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
+ return false;
+
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (IS_GEN(i915, 9)) {
int i;
for (i = 0; i < table->size; i++)
- if (WARN_ON(table->table[i].l3cc_value &
- (L3_ESC(1) | L3_SCC(0x7))))
+ if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
+ (L3_ESC(1) | L3_SCC(0x7))))
return false;
}
- return result;
-}
-
-static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
-{
- switch (engine->id) {
- case RCS0:
- return GEN9_GFX_MOCS(index);
- case VCS0:
- return GEN9_MFX0_MOCS(index);
- case BCS0:
- return GEN9_BLT_MOCS(index);
- case VECS0:
- return GEN9_VEBOX_MOCS(index);
- case VCS1:
- return GEN9_MFX1_MOCS(index);
- case VCS2:
- return GEN11_MFX2_MOCS(index);
- default:
- MISSING_CASE(engine->id);
- return INVALID_MMIO_REG;
- }
+ return true;
}
/*
@@ -351,29 +328,47 @@ static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
static u32 get_entry_control(const struct drm_i915_mocs_table *table,
unsigned int index)
{
- if (table->table[index].used)
+ if (index < table->size && table->table[index].used)
return table->table[index].control_value;
return table->table[I915_MOCS_PTE].control_value;
}
-static void init_mocs_table(struct intel_engine_cs *engine,
- const struct drm_i915_mocs_table *table)
+#define for_each_mocs(mocs, t, i) \
+ for (i = 0; \
+ i < (t)->n_entries ? (mocs = get_entry_control((t), i)), 1 : 0;\
+ i++)
+
+static void __init_mocs_table(struct intel_uncore *uncore,
+ const struct drm_i915_mocs_table *table,
+ u32 addr)
{
- struct intel_uncore *uncore = engine->uncore;
- u32 unused_value = table->table[I915_MOCS_PTE].control_value;
unsigned int i;
+ u32 mocs;
+
+ for_each_mocs(mocs, table, i)
+ intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
+}
- for (i = 0; i < table->size; i++)
- intel_uncore_write_fw(uncore,
- mocs_register(engine, i),
- get_entry_control(table, i));
+static u32 mocs_offset(const struct intel_engine_cs *engine)
+{
+ static const u32 offset[] = {
+ [RCS0] = __GEN9_RCS0_MOCS0,
+ [VCS0] = __GEN9_VCS0_MOCS0,
+ [VCS1] = __GEN9_VCS1_MOCS0,
+ [VECS0] = __GEN9_VECS0_MOCS0,
+ [BCS0] = __GEN9_BCS0_MOCS0,
+ [VCS2] = __GEN11_VCS2_MOCS0,
+ };
+
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(offset));
+ return offset[engine->id];
+}
- /* All remaining entries are unused */
- for (; i < table->n_entries; i++)
- intel_uncore_write_fw(uncore,
- mocs_register(engine, i),
- unused_value);
+static void init_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
+{
+ __init_mocs_table(engine->uncore, table, mocs_offset(engine));
}
/*
@@ -383,51 +378,34 @@ static void init_mocs_table(struct intel_engine_cs *engine,
static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
unsigned int index)
{
- if (table->table[index].used)
+ if (index < table->size && table->table[index].used)
return table->table[index].l3cc_value;
return table->table[I915_MOCS_PTE].l3cc_value;
}
-static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
- u16 low,
- u16 high)
+static inline u32 l3cc_combine(u16 low, u16 high)
{
return low | (u32)high << 16;
}
+#define for_each_l3cc(l3cc, t, i) \
+ for (i = 0; \
+ i < ((t)->n_entries + 1) / 2 ? \
+ (l3cc = l3cc_combine(get_entry_l3cc((t), 2 * i), \
+ get_entry_l3cc((t), 2 * i + 1))), 1 : \
+ 0; \
+ i++)
+
static void init_l3cc_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table)
{
struct intel_uncore *uncore = engine->uncore;
- u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value;
unsigned int i;
+ u32 l3cc;
- for (i = 0; i < table->size / 2; i++) {
- u16 low = get_entry_l3cc(table, 2 * i);
- u16 high = get_entry_l3cc(table, 2 * i + 1);
-
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(table, low, high));
- }
-
- /* Odd table size - 1 left over */
- if (table->size & 1) {
- u16 low = get_entry_l3cc(table, 2 * i);
-
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(table, low, unused_value));
- i++;
- }
-
- /* All remaining entries are also unused */
- for (; i < table->n_entries / 2; i++)
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(table, unused_value,
- unused_value));
+ for_each_l3cc(l3cc, table, i)
+ intel_uncore_write_fw(uncore, GEN9_LNCFCMOCS(i), l3cc);
}
void intel_mocs_init_engine(struct intel_engine_cs *engine)
@@ -448,11 +426,14 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
init_l3cc_table(engine, &table);
}
-static void intel_mocs_init_global(struct intel_gt *gt)
+static u32 global_mocs_offset(void)
+{
+ return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
+}
+
+static void init_global_mocs(struct intel_gt *gt)
{
- struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
- unsigned int index;
/*
* LLC and eDRAM control values are not applicable to dgfx
@@ -460,32 +441,18 @@ static void intel_mocs_init_global(struct intel_gt *gt)
if (IS_DGFX(gt->i915))
return;
- GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
-
if (!get_mocs_settings(gt->i915, &table))
return;
- if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
- return;
-
- for (index = 0; index < table.size; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[index].control_value);
-
- /*
- * Ok, now set the unused entries to the invalid entry (index 0). These
- * entries are officially undefined and no contract for the contents and
- * settings is given for these entries.
- */
- for (; index < table.n_entries; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[0].control_value);
+ __init_mocs_table(gt->uncore, &table, global_mocs_offset());
}
void intel_mocs_init(struct intel_gt *gt)
{
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
- intel_mocs_init_global(gt);
+ init_global_mocs(gt);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
new file mode 100644
index 000000000000..f86f7e68ce5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include "i915_trace.h"
+#include "intel_gtt.h"
+#include "gen6_ppgtt.h"
+#include "gen8_ppgtt.h"
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm)
+{
+ struct i915_page_table *pt;
+
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pt))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, &pt->base))) {
+ kfree(pt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&pt->used, 0);
+ return pt;
+}
+
+struct i915_page_directory *__alloc_pd(size_t sz)
+{
+ struct i915_page_directory *pd;
+
+ pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pd))
+ return NULL;
+
+ spin_lock_init(&pd->lock);
+ return pd;
+}
+
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
+{
+ struct i915_page_directory *pd;
+
+ pd = __alloc_pd(sizeof(*pd));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pd;
+}
+
+void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
+{
+ cleanup_page_dma(vm, pd);
+ kfree(pd);
+}
+
+static inline void
+write_dma_entry(struct i915_page_dma * const pdma,
+ const unsigned short idx,
+ const u64 encoded_entry)
+{
+ u64 * const vaddr = kmap_atomic(pdma->page);
+
+ vaddr[idx] = encoded_entry;
+ kunmap_atomic(vaddr);
+}
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_dma * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
+{
+ /* Each thread pre-pins the pd, and we may have a thread per pde. */
+ GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
+
+ atomic_inc(px_used(pd));
+ pd->entry[idx] = to;
+ write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
+}
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct i915_page_scratch * const scratch)
+{
+ GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
+
+ write_dma_entry(px_base(pd), idx, scratch->encode);
+ pd->entry[idx] = NULL;
+ atomic_dec(px_used(pd));
+}
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct i915_page_scratch * const scratch)
+{
+ bool free = false;
+
+ if (atomic_add_unless(&pt->used, -1, 1))
+ return false;
+
+ spin_lock(&pd->lock);
+ if (atomic_dec_and_test(&pt->used)) {
+ clear_pd_entry(pd, idx, scratch);
+ free = true;
+ }
+ spin_unlock(&pd->lock);
+
+ return free;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ gtt_write_workarounds(gt);
+
+ if (IS_GEN(i915, 6))
+ gen6_ppgtt_enable(gt);
+ else if (IS_GEN(i915, 7))
+ gen7_ppgtt_enable(gt);
+
+ return 0;
+}
+
+static struct i915_ppgtt *
+__ppgtt_create(struct intel_gt *gt)
+{
+ if (INTEL_GEN(gt->i915) < 8)
+ return gen6_ppgtt_create(gt);
+ else
+ return gen8_ppgtt_create(gt);
+}
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = __ppgtt_create(gt);
+ if (IS_ERR(ppgtt))
+ return ppgtt;
+
+ trace_i915_ppgtt_create(&ppgtt->vm);
+
+ return ppgtt;
+}
+
+static int ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+ int err;
+
+ if (flags & I915_VMA_ALLOC) {
+ err = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start, vma->size);
+ if (err)
+ return err;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+ }
+
+ /* Applicable to VLV, and gen8+ */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(vma->obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ wmb();
+
+ return 0;
+}
+
+static void ppgtt_unbind_vma(struct i915_vma *vma)
+{
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+}
+
+int ppgtt_set_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pages);
+
+ vma->pages = vma->obj->mm.pages;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ ppgtt->vm.gt = gt;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 700104b90163..9e303c29d6e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -88,21 +88,18 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
* do not want the enable hysteresis to less than the wakeup latency.
*
* igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
+ * service latency, and puts it under 10us for Icelake, similar to
+ * Broadwell+, To be conservative, we want to factor in a context
+ * switch on top (due to ksoftirqd).
*/
- set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
/* 3a: Enable RC6 */
- set(uncore, GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- GEN6_RC_CTL_EI_MODE(1));
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1);
set(uncore, GEN9_PG_ENABLE,
GEN9_RENDER_PG_ENABLE |
@@ -173,10 +170,10 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
else
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
- set(uncore, GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- rc6_mode);
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ rc6_mode;
/*
* WaRsDisableCoarsePowerGating:skl,cnl
@@ -203,10 +200,10 @@ static void gen8_rc6_enable(struct intel_rc6 *rc6)
set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
/* 3: Enable RC6 */
- set(uncore, GEN6_RC_CONTROL,
+ rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_RC6_ENABLE);
+ GEN6_RC_CTL_RC6_ENABLE;
}
static void gen6_rc6_enable(struct intel_rc6 *rc6)
@@ -242,10 +239,10 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
if (HAS_RC6pp(i915))
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
- set(uncore, GEN6_RC_CONTROL,
+ rc6->ctl_enable =
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
+ GEN6_RC_CTL_HW_ENABLE;
rc6vids = 0;
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
@@ -363,7 +360,7 @@ static void chv_rc6_enable(struct intel_rc6 *rc6)
VLV_RENDER_RC6_COUNT_EN));
/* 3: Enable RC6 */
- set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE);
+ rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
}
static void vlv_rc6_enable(struct intel_rc6 *rc6)
@@ -389,8 +386,8 @@ static void vlv_rc6_enable(struct intel_rc6 *rc6)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- set(uncore, GEN6_RC_CONTROL,
- GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
+ rc6->ctl_enable =
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
}
static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
@@ -491,64 +488,19 @@ static void rpm_put(struct intel_rc6 *rc6)
rc6->wakeref = false;
}
-static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6)
-{
- return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO);
-}
-
-static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6)
+static bool pctx_corrupted(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- return;
-
- if (intel_rc6_ctx_corrupted(rc6)) {
- DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
- rc6->ctx_corrupted = true;
- }
-}
-
-/**
- * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
- * @rc6: rc6 state
- *
- * Perform any steps needed to re-init the RC6 CTX WA after system resume.
- */
-void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6)
-{
- if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) {
- DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
- rc6->ctx_corrupted = false;
- }
-}
-
-/**
- * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption
- * @rc6: rc6 state
- *
- * Check if an RC6 CTX corruption has happened since the last check and if so
- * disable RC6 and runtime power management.
-*/
-void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6)
-{
- struct drm_i915_private *i915 = rc6_to_i915(rc6);
-
- if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- return;
-
- if (rc6->ctx_corrupted)
- return;
-
- if (!intel_rc6_ctx_corrupted(rc6))
- return;
-
- DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+ return false;
- intel_rc6_disable(rc6);
- rc6->ctx_corrupted = true;
+ if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
+ return false;
- return;
+ dev_notice(i915->drm.dev,
+ "RC6 context corruption, disabling runtime power management\n");
+ return true;
}
static void __intel_rc6_disable(struct intel_rc6 *rc6)
@@ -575,8 +527,6 @@ void intel_rc6_init(struct intel_rc6 *rc6)
if (!rc6_supported(rc6))
return;
- intel_rc6_ctx_wa_init(rc6);
-
if (IS_CHERRYVIEW(i915))
err = chv_rc6_init(rc6);
else if (IS_VALLEYVIEW(i915))
@@ -611,9 +561,6 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
GEM_BUG_ON(rc6->enabled);
- if (rc6->ctx_corrupted)
- return;
-
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (IS_CHERRYVIEW(i915))
@@ -629,13 +576,51 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
else if (INTEL_GEN(i915) >= 6)
gen6_rc6_enable(rc6);
+ rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ rc6->ctl_enable = 0;
+
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ if (unlikely(pctx_corrupted(rc6)))
+ return;
+
/* rc6 is ready, runtime-pm is go! */
rpm_put(rc6);
rc6->enabled = true;
}
+void intel_rc6_unpark(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->enabled)
+ return;
+
+ /* Restore HW timers for automatic RC6 entry while busy */
+ set(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
+}
+
+void intel_rc6_park(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->enabled)
+ return;
+
+ if (unlikely(pctx_corrupted(rc6))) {
+ intel_rc6_disable(rc6);
+ return;
+ }
+
+ if (!rc6->manual)
+ return;
+
+ /* Turn off the HW timers and go directly to rc6 */
+ set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
+ set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+}
+
void intel_rc6_disable(struct intel_rc6 *rc6)
{
if (!rc6->enabled)
@@ -785,3 +770,7 @@ u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
{
return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rc6.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
index 1370f6834a4c..9f0f23fca8af 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -15,6 +15,9 @@ struct intel_rc6;
void intel_rc6_init(struct intel_rc6 *rc6);
void intel_rc6_fini(struct intel_rc6 *rc6);
+void intel_rc6_unpark(struct intel_rc6 *rc6);
+void intel_rc6_park(struct intel_rc6 *rc6);
+
void intel_rc6_sanitize(struct intel_rc6 *rc6);
void intel_rc6_enable(struct intel_rc6 *rc6);
void intel_rc6_disable(struct intel_rc6 *rc6);
@@ -22,7 +25,4 @@ void intel_rc6_disable(struct intel_rc6 *rc6);
u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
-void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6);
-void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6);
-
#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
index 89ad5697a8d4..bfbb623f7a4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -18,12 +18,14 @@ struct intel_rc6 {
u64 prev_hw_residency[4];
u64 cur_residency[4];
+ u32 ctl_enable;
+
struct drm_i915_gem_object *pctx;
bool supported : 1;
bool enabled : 1;
+ bool manual : 1;
bool wakeref : 1;
- bool ctx_corrupted : 1;
};
#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index c4edc35e7d89..5954ecc3207f 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -29,16 +29,6 @@
#include "intel_renderstate.h"
#include "intel_ring.h"
-struct intel_renderstate {
- const struct intel_renderstate_rodata *rodata;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 batch_offset;
- u32 batch_size;
- u32 aux_offset;
- u32 aux_size;
-};
-
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
@@ -84,11 +74,11 @@ static int render_state_setup(struct intel_renderstate *so,
u32 *d;
int ret;
- ret = i915_gem_object_prepare_write(so->obj, &needs_clflush);
+ ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush);
if (ret)
return ret;
- d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0));
+ d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -166,7 +156,7 @@ static int render_state_setup(struct intel_renderstate *so,
ret = 0;
out:
- i915_gem_object_finish_access(so->obj);
+ i915_gem_object_finish_access(so->vma->obj);
return ret;
err:
@@ -177,61 +167,84 @@ err:
#undef OUT_BATCH
-int intel_renderstate_emit(struct i915_request *rq)
+int intel_renderstate_init(struct intel_renderstate *so,
+ struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine = rq->engine;
- struct intel_renderstate so = {}; /* keep the compiler happy */
+ struct drm_i915_gem_object *obj;
int err;
- so.rodata = render_state_get_rodata(engine);
- if (!so.rodata)
+ memset(so, 0, sizeof(*so));
+
+ so->rodata = render_state_get_rodata(engine);
+ if (!so->rodata)
return 0;
- if (so.rodata->batch_items * 4 > PAGE_SIZE)
+ if (so->rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
- so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
- if (IS_ERR(so.obj))
- return PTR_ERR(so.obj);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
- if (IS_ERR(so.vma)) {
- err = PTR_ERR(so.vma);
+ so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(so->vma)) {
+ err = PTR_ERR(so->vma);
goto err_obj;
}
- err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto err_vma;
- err = render_state_setup(&so, rq->i915);
+ err = render_state_setup(so, engine->i915);
if (err)
goto err_unpin;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(so->vma);
+err_vma:
+ i915_vma_close(so->vma);
+err_obj:
+ i915_gem_object_put(obj);
+ so->vma = NULL;
+ return err;
+}
+
+int intel_renderstate_emit(struct intel_renderstate *so,
+ struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int err;
+
+ if (!so->vma)
+ return 0;
+
err = engine->emit_bb_start(rq,
- so.batch_offset, so.batch_size,
+ so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (err)
- goto err_unpin;
+ return err;
- if (so.aux_size > 8) {
+ if (so->aux_size > 8) {
err = engine->emit_bb_start(rq,
- so.aux_offset, so.aux_size,
+ so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (err)
- goto err_unpin;
+ return err;
}
- i915_vma_lock(so.vma);
- err = i915_request_await_object(rq, so.vma->obj, false);
+ i915_vma_lock(so->vma);
+ err = i915_request_await_object(rq, so->vma->obj, false);
if (err == 0)
- err = i915_vma_move_to_active(so.vma, rq, 0);
- i915_vma_unlock(so.vma);
-err_unpin:
- i915_vma_unpin(so.vma);
-err_vma:
- i915_vma_close(so.vma);
-err_obj:
- i915_gem_object_put(so.obj);
+ err = i915_vma_move_to_active(so->vma, rq, 0);
+ i915_vma_unlock(so->vma);
+
return err;
}
+
+void intel_renderstate_fini(struct intel_renderstate *so)
+{
+ i915_vma_unpin_and_release(&so->vma, 0);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 8d5079145054..5700be69a05a 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -27,6 +27,8 @@
#include <linux/types.h>
struct i915_request;
+struct intel_engine_cs;
+struct i915_vma;
struct intel_renderstate_rodata {
const u32 *reloc;
@@ -46,6 +48,19 @@ extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
extern const struct intel_renderstate_rodata gen9_null_state;
-int intel_renderstate_emit(struct i915_request *rq);
+struct intel_renderstate {
+ const struct intel_renderstate_rodata *rodata;
+ struct i915_vma *vma;
+ u32 batch_offset;
+ u32 batch_size;
+ u32 aux_offset;
+ u32 aux_size;
+};
+
+int intel_renderstate_init(struct intel_renderstate *so,
+ struct intel_engine_cs *engine);
+int intel_renderstate_emit(struct intel_renderstate *so,
+ struct i915_request *rq);
+void intel_renderstate_fini(struct intel_renderstate *so);
#endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c97423a76642..beee0cf89bce 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -21,6 +21,7 @@
#include "intel_reset.h"
#include "uc/intel_guc.h"
+#include "uc/intel_guc_submission.h"
#define RESET_MAX_RETRIES 3
@@ -40,27 +41,29 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
static void engine_skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *hung_ctx = rq->gem_context;
+ struct intel_context *hung_ctx = rq->context;
if (!i915_request_is_active(rq))
return;
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
- if (rq->gem_context == hung_ctx)
+ if (rq->context == hung_ctx)
i915_request_skip(rq, -EIO);
}
-static void client_mark_guilty(struct drm_i915_file_private *file_priv,
- const struct i915_gem_context *ctx)
+static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{
- unsigned int score;
+ struct drm_i915_file_private *file_priv = ctx->file_priv;
unsigned long prev_hang;
+ unsigned int score;
+
+ if (IS_ERR_OR_NULL(file_priv))
+ return;
- if (i915_gem_context_is_banned(ctx))
+ score = 0;
+ if (banned)
score = I915_CLIENT_SCORE_CONTEXT_BAN;
- else
- score = 0;
prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
@@ -75,17 +78,38 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
}
}
-static bool context_mark_guilty(struct i915_gem_context *ctx)
+static bool mark_guilty(struct i915_request *rq)
{
+ struct i915_gem_context *ctx;
unsigned long prev_hang;
bool banned;
int i;
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (!ctx)
+ return false;
+
+ if (i915_gem_context_is_closed(ctx)) {
+ intel_context_set_banned(rq->context);
+ banned = true;
+ goto out;
+ }
+
atomic_inc(&ctx->guilty_count);
/* Cool contexts are too cool to be banned! (Used for reset testing.) */
- if (!i915_gem_context_is_bannable(ctx))
- return false;
+ if (!i915_gem_context_is_bannable(ctx)) {
+ banned = false;
+ goto out;
+ }
+
+ dev_notice(ctx->i915->drm.dev,
+ "%s context reset due to GPU hang\n",
+ ctx->name);
/* Record the timestamp for the last N hangs */
prev_hang = ctx->hang_timestamp[0];
@@ -100,38 +124,43 @@ static bool context_mark_guilty(struct i915_gem_context *ctx)
if (banned) {
DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
ctx->name, atomic_read(&ctx->guilty_count));
- i915_gem_context_set_banned(ctx);
+ intel_context_set_banned(rq->context);
}
- if (!IS_ERR_OR_NULL(ctx->file_priv))
- client_mark_guilty(ctx->file_priv, ctx);
+ client_mark_guilty(ctx, banned);
+out:
+ i915_gem_context_put(ctx);
return banned;
}
-static void context_mark_innocent(struct i915_gem_context *ctx)
+static void mark_innocent(struct i915_request *rq)
{
- atomic_inc(&ctx->active_count);
+ struct i915_gem_context *ctx;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ atomic_inc(&ctx->active_count);
+ rcu_read_unlock();
}
void __i915_request_reset(struct i915_request *rq, bool guilty)
{
- GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
- rq->engine->name,
- rq->fence.context,
- rq->fence.seqno,
- yesno(guilty));
+ RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
GEM_BUG_ON(i915_request_completed(rq));
+ rcu_read_lock(); /* protect the GEM context */
if (guilty) {
i915_request_skip(rq, -EIO);
- if (context_mark_guilty(rq->gem_context))
+ if (mark_guilty(rq))
engine_skip_context(rq);
} else {
dma_fence_set_error(&rq->fence, -EAGAIN);
- context_mark_innocent(rq->gem_context);
+ mark_innocent(rq);
}
+ rcu_read_unlock();
}
static bool i915_in_reset(struct pci_dev *pdev)
@@ -218,9 +247,8 @@ out:
return ret;
}
-static int ironlake_do_reset(struct intel_gt *gt,
- intel_engine_mask_t engine_mask,
- unsigned int retry)
+static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
+ unsigned int retry)
{
struct intel_uncore *uncore = gt->uncore;
int ret;
@@ -564,7 +592,7 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
else if (INTEL_GEN(i915) >= 6)
return gen6_reset_engines;
else if (INTEL_GEN(i915) >= 5)
- return ironlake_do_reset;
+ return ilk_do_reset;
else if (IS_G4X(i915))
return g4x_do_reset;
else if (IS_G33(i915) || IS_PINEVIEW(i915))
@@ -592,7 +620,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
- GEM_TRACE("engine_mask=%x\n", engine_mask);
+ GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
preempt_disable();
ret = reset(gt, engine_mask, retry);
preempt_enable();
@@ -647,7 +675,8 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
- engine->reset.prepare(engine);
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
}
static void revoke_mmaps(struct intel_gt *gt)
@@ -667,8 +696,13 @@ static void revoke_mmaps(struct intel_gt *gt)
continue;
GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
- node = &vma->obj->base.vma_node;
+
+ if (!vma->mmo)
+ continue;
+
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+
unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
@@ -722,10 +756,11 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
static void reset_finish_engine(struct intel_engine_cs *engine)
{
- engine->reset.finish(engine);
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
- intel_engine_breadcrumbs_irq(engine);
+ intel_engine_signal_breadcrumbs(engine);
}
static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
@@ -745,8 +780,7 @@ static void nop_submit_request(struct i915_request *request)
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
- GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
- engine->name, request->fence.context, request->fence.seqno);
+ RQ_TRACE(request, "-EIO\n");
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&engine->active.lock, flags);
@@ -754,7 +788,7 @@ static void nop_submit_request(struct i915_request *request)
i915_request_mark_complete(request);
spin_unlock_irqrestore(&engine->active.lock, flags);
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
}
static void __intel_gt_set_wedged(struct intel_gt *gt)
@@ -773,7 +807,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
- GEM_TRACE("start\n");
+ GT_TRACE(gt, "start\n");
/*
* First, stop submission to hw, but do not yet complete requests by
@@ -799,11 +833,12 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
/* Mark all executing requests as skipped */
for_each_engine(engine, gt, id)
- engine->cancel_requests(engine);
+ if (engine->reset.cancel)
+ engine->reset.cancel(engine);
reset_finish(gt, awake);
- GEM_TRACE("end\n");
+ GT_TRACE(gt, "end\n");
}
void intel_gt_set_wedged(struct intel_gt *gt)
@@ -820,7 +855,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl;
- unsigned long flags;
bool ok;
if (!test_bit(I915_WEDGED, &gt->reset.flags))
@@ -830,7 +864,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
return false;
- GEM_TRACE("start\n");
+ GT_TRACE(gt, "start\n");
/*
* Before unwedging, make sure that all pending operations
@@ -842,7 +876,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*
* No more can be submitted until we reset the wedged bit.
*/
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
list_for_each_entry(tl, &timelines->active_list, link) {
struct dma_fence *fence;
@@ -850,7 +884,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (!fence)
continue;
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
/*
* All internal dependencies (i915_requests) will have
@@ -863,10 +897,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
dma_fence_put(fence);
/* Restart iteration after droping lock */
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
/* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
@@ -892,7 +926,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/
intel_engines_reset_default_submission(gt);
- GEM_TRACE("end\n");
+ GT_TRACE(gt, "end\n");
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &gt->reset.flags);
@@ -967,7 +1001,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t awake;
int ret;
- GEM_TRACE("flags=%lx\n", gt->reset.flags);
+ GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
@@ -1070,9 +1104,10 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
+ bool uses_guc = intel_engine_in_guc_submission_mode(engine);
int ret;
- GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
+ ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
if (!intel_engine_pm_get_if_awake(engine))
@@ -1085,14 +1120,14 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
- if (!engine->gt->uc.guc.execbuf_client)
+ if (!uses_guc)
ret = intel_gt_reset_engine(engine);
else
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- engine->gt->uc.guc.execbuf_client ? "GuC " : "",
+ uses_guc ? "GuC " : "",
engine->name, ret);
goto out;
}
@@ -1195,7 +1230,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(gt->i915, engine_mask, msg);
+ i915_capture_error_state(gt->i915);
intel_gt_clear_error_registers(gt, engine_mask);
}
@@ -1288,10 +1323,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
if (!intel_gt_is_wedged(gt))
return 0;
- /* Reset still in progress? Maybe we will recover? */
- if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
+ if (intel_gt_has_init_error(gt))
return -EIO;
+ /* Reset still in progress? Maybe we will recover? */
if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
&gt->reset.flags)))
@@ -1313,6 +1348,9 @@ void intel_gt_init_reset(struct intel_gt *gt)
init_waitqueue_head(&gt->reset.queue);
mutex_init(&gt->reset.mutex);
init_srcu_struct(&gt->reset.backoff_srcu);
+
+ /* no GPU until we are ready! */
+ __set_bit(I915_WEDGED, &gt->reset.flags);
}
void intel_gt_fini_reset(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 93026217c121..bc44fe8e5ffa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -33,6 +33,7 @@
#include "gem/i915_gem_context.h"
+#include "gen6_ppgtt.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
@@ -362,6 +363,12 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
*/
flags |= PIPE_CONTROL_CS_STALL;
+ /*
+ * CS_STALL suggests at least a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
@@ -380,13 +387,6 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
- /*
- * TLB invalidate requires a post-sync write.
- */
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
- flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
@@ -454,7 +454,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
@@ -496,14 +497,13 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
{
- struct drm_i915_private *dev_priv = engine->i915;
u32 addr;
addr = lower_32_bits(phys);
- if (INTEL_GEN(dev_priv) >= 4)
+ if (INTEL_GEN(engine->i915) >= 4)
addr |= (phys >> 28) & 0xf0;
- I915_WRITE(HWS_PGA, addr);
+ intel_uncore_write(engine->uncore, HWS_PGA, addr);
}
static struct page *status_page(struct intel_engine_cs *engine)
@@ -522,14 +522,13 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
{
- struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t hwsp;
/*
* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN(dev_priv, 7)) {
+ if (IS_GEN(engine->i915, 7)) {
switch (engine->id) {
/*
* No more rings exist on Gen7. Default case is only to shut up
@@ -551,14 +550,14 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN(dev_priv, 6)) {
+ } else if (IS_GEN(engine->i915, 6)) {
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
hwsp = RING_HWS_PGA(engine->mmio_base);
}
- I915_WRITE(hwsp, offset);
- POSTING_READ(hwsp);
+ intel_uncore_write(engine->uncore, hwsp, offset);
+ intel_uncore_posting_read(engine->uncore, hwsp);
}
static void flush_cs_tlb(struct intel_engine_cs *engine)
@@ -633,8 +632,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
struct intel_ring *ring = engine->legacy.ring;
int ret = 0;
- GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
- engine->name, ring->head, ring->tail);
+ ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
+ ring->head, ring->tail);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
@@ -721,7 +720,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
}
/* Papering over lost _interrupts_ immediately following the restart */
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
out:
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
@@ -747,10 +746,10 @@ static void reset_prepare(struct intel_engine_cs *engine)
*
* FIXME: Wa for more modern gens needs to be validated
*/
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
if (intel_engine_stop_cs(engine))
- GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
+ ENGINE_TRACE(engine, "timed out on STOP_RING\n");
intel_uncore_write_fw(uncore,
RING_HEAD(base),
@@ -766,12 +765,11 @@ static void reset_prepare(struct intel_engine_cs *engine)
/* Check acts as a post */
if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- GEM_TRACE("%s: ring head [%x] not parked\n",
- engine->name,
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
+ ENGINE_TRACE(engine, "ring head [%x] not parked\n",
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
-static void reset_ring(struct intel_engine_cs *engine, bool stalled)
+static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
struct i915_request *pos, *rq;
unsigned long flags;
@@ -842,7 +840,8 @@ static void reset_finish(struct intel_engine_cs *engine)
static int rcs_resume(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
@@ -854,13 +853,14 @@ static int rcs_resume(struct intel_engine_cs *engine)
* they are already accustomed to from before contexts were
* enabled.
*/
- if (IS_GEN(dev_priv, 4))
- I915_WRITE(ECOSKPD,
+ if (IS_GEN(i915, 4))
+ intel_uncore_write(uncore, ECOSKPD,
_MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (IS_GEN_RANGE(dev_priv, 4, 6))
- I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+ if (IS_GEN_RANGE(i915, 4, 6))
+ intel_uncore_write(uncore, MI_MODE,
+ _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -868,38 +868,40 @@ static int rcs_resume(struct intel_engine_cs *engine)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+ if (IS_GEN_RANGE(i915, 6, 7))
+ intel_uncore_write(uncore, MI_MODE,
+ _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (IS_GEN(dev_priv, 6))
- I915_WRITE(GFX_MODE,
+ if (IS_GEN(i915, 6))
+ intel_uncore_write(uncore, GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN(dev_priv, 7))
- I915_WRITE(GFX_MODE_GEN7,
+ if (IS_GEN(i915, 7))
+ intel_uncore_write(uncore, GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_GEN(i915, 6)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
* policy is not supported."
*/
- I915_WRITE(CACHE_MODE_0,
+ intel_uncore_write(uncore, CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+ if (IS_GEN_RANGE(i915, 6, 7))
+ intel_uncore_write(uncore, INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return xcs_resume(engine);
}
-static void cancel_requests(struct intel_engine_cs *engine)
+static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
unsigned long flags;
@@ -1318,6 +1320,8 @@ static int ring_context_alloc(struct intel_context *ce)
return PTR_ERR(vma);
ce->state = vma;
+ if (engine->default_state)
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
}
return 0;
@@ -1325,26 +1329,12 @@ static int ring_context_alloc(struct intel_context *ce)
static int ring_context_pin(struct intel_context *ce)
{
- int err;
-
- err = intel_context_active_acquire(ce);
- if (err)
- return err;
-
- err = __context_pin_ppgtt(ce);
- if (err)
- goto err_active;
-
- return 0;
-
-err_active:
- intel_context_active_release(ce);
- return err;
+ return __context_pin_ppgtt(ce);
}
static void ring_context_reset(struct intel_context *ce)
{
- intel_ring_reset(ce->ring, 0);
+ intel_ring_reset(ce->ring, ce->ring->emit);
}
static const struct intel_context_ops ring_context_ops = {
@@ -1360,46 +1350,38 @@ static const struct intel_context_ops ring_context_ops = {
.destroy = ring_context_destroy,
};
-static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
+static int load_pd_dir(struct i915_request *rq,
+ const struct i915_ppgtt *ppgtt,
+ u32 valid)
{
const struct intel_engine_cs * const engine = rq->engine;
u32 *cs;
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
- *cs++ = PP_DIR_DCLV_2G;
+ *cs++ = valid;
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int flush_pd_dir(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Stall until the page table load is complete */
+ /* Stall until the page table load is complete? */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ *cs++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
- *cs++ = MI_NOOP;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
intel_ring_advance(rq, cs);
- return 0;
+
+ return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
static inline int mi_set_context(struct i915_request *rq, u32 flags)
@@ -1477,7 +1459,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
+ *cs++ = i915_ggtt_offset(rq->context->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1547,10 +1529,10 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
static int remap_l3(struct i915_request *rq)
{
- struct i915_gem_context *ctx = rq->gem_context;
+ struct i915_gem_context *ctx = i915_request_gem_context(rq);
int i, err;
- if (!ctx->remap_slice)
+ if (!ctx || !ctx->remap_slice)
return 0;
for (i = 0; i < MAX_L3_SLICES; i++) {
@@ -1566,19 +1548,42 @@ static int remap_l3(struct i915_request *rq)
return 0;
}
+static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
+{
+ int ret;
+
+ if (!vm)
+ return 0;
+
+ ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /*
+ * Not only do we need a full barrier (post-sync write) after
+ * invalidating the TLBs, but we need to wait a little bit
+ * longer. Whether this is merely delaying us, or the
+ * subsequent flush is a key part of serialising with the
+ * post-sync op, this extra pass appears vital before a
+ * mm switch!
+ */
+ ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G);
+ if (ret)
+ return ret;
+
+ return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
static int switch_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->hw_context;
- struct i915_address_space *vm = vm_alias(ce);
+ struct intel_context *ce = rq->context;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- if (vm) {
- ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm));
- if (ret)
- return ret;
- }
+ ret = switch_mm(rq, vm_alias(ce));
+ if (ret)
+ return ret;
if (ce->state) {
u32 flags;
@@ -1590,7 +1595,7 @@ static int switch_context(struct i915_request *rq)
BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
- if (!i915_gem_context_is_kernel(rq->gem_context))
+ if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
flags |= MI_RESTORE_EXT_STATE_EN;
else
flags |= MI_RESTORE_INHIBIT;
@@ -1600,34 +1605,6 @@ static int switch_context(struct i915_request *rq)
return ret;
}
- if (vm) {
- struct intel_engine_cs *engine = rq->engine;
-
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (ret)
- return ret;
-
- ret = flush_pd_dir(rq);
- if (ret)
- return ret;
-
- /*
- * Not only do we need a full barrier (post-sync write) after
- * invalidating the TLBs, but we need to wait a little bit
- * longer. Whether this is merely delaying us, or the
- * subsequent flush is a key part of serialising with the
- * post-sync op, this extra pass appears vital before a
- * mm switch!
- */
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (ret)
- return ret;
-
- ret = engine->emit_flush(rq, EMIT_FLUSH);
- if (ret)
- return ret;
- }
-
ret = remap_l3(rq);
if (ret)
return ret;
@@ -1639,7 +1616,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
/*
@@ -1795,7 +1772,6 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
- engine->cancel_requests = cancel_requests;
engine->park = NULL;
engine->unpark = NULL;
@@ -1807,7 +1783,7 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = gen6_bsd_submit_request;
}
-static void ring_destroy(struct intel_engine_cs *engine)
+static void ring_release(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1821,8 +1797,6 @@ static void ring_destroy(struct intel_engine_cs *engine)
intel_timeline_unpin(engine->legacy.timeline);
intel_timeline_put(engine->legacy.timeline);
-
- kfree(engine);
}
static void setup_irq(struct intel_engine_cs *engine)
@@ -1853,11 +1827,10 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
- engine->destroy = ring_destroy;
-
engine->resume = xcs_resume;
engine->reset.prepare = reset_prepare;
- engine->reset.reset = reset_ring;
+ engine->reset.rewind = reset_rewind;
+ engine->reset.cancel = reset_cancel;
engine->reset.finish = reset_finish;
engine->cops = &ring_context_ops;
@@ -1968,6 +1941,10 @@ static void setup_vecs(struct intel_engine_cs *engine)
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
+ struct intel_timeline *timeline;
+ struct intel_ring *ring;
+ int err;
+
setup_common(engine);
switch (engine->class) {
@@ -1988,15 +1965,6 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
return -ENODEV;
}
- return 0;
-}
-
-int intel_ring_submission_init(struct intel_engine_cs *engine)
-{
- struct intel_timeline *timeline;
- struct intel_ring *ring;
- int err;
-
timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
@@ -2022,16 +1990,13 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
engine->legacy.ring = ring;
engine->legacy.timeline = timeline;
- err = intel_engine_init_common(engine);
- if (err)
- goto err_ring_unpin;
-
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->release = ring_release;
+
return 0;
-err_ring_unpin:
- intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err_timeline_unpin:
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 20d6ee148afc..d2a3d935d186 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -37,6 +37,11 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
return mask & ~rps->pm_intrmsk_mbz;
}
+static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
{
u32 mask = 0;
@@ -78,8 +83,7 @@ static void rps_enable_interrupts(struct intel_rps *rps)
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock);
- intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
- rps_pm_mask(rps, rps->cur_freq));
+ set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq));
}
static void gen6_rps_reset_interrupts(struct intel_rps *rps)
@@ -113,8 +117,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
rps->pm_events = 0;
- intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
- rps_pm_sanitize_mask(rps, ~0u));
+ set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
@@ -573,25 +576,21 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
if (IS_VALLEYVIEW(i915))
goto skip_hw_write;
- intel_uncore_write(uncore, GEN6_RP_UP_EI,
- GT_INTERVAL_FROM_US(i915, ei_up));
- intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(i915,
- ei_up * threshold_up / 100));
-
- intel_uncore_write(uncore, GEN6_RP_DOWN_EI,
- GT_INTERVAL_FROM_US(i915, ei_down));
- intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(i915,
- ei_down * threshold_down / 100));
-
- intel_uncore_write(uncore, GEN6_RP_CONTROL,
- (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
+ set(uncore, GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
+
+ set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
+ set(uncore, GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
+
+ set(uncore, GEN6_RP_CONTROL,
+ (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
rps->power.mode = new_power;
@@ -666,7 +665,7 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
swreq = (GEN6_FREQUENCY(val) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
- intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq);
+ set(uncore, GEN6_RPNSWREQ, swreq);
return 0;
}
@@ -683,7 +682,7 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
return err;
}
-static int rps_set(struct intel_rps *rps, u8 val)
+static int rps_set(struct intel_rps *rps, u8 val, bool update)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
@@ -701,7 +700,8 @@ static int rps_set(struct intel_rps *rps, u8 val)
if (err)
return err;
- gen6_rps_set_thresholds(rps, val);
+ if (update)
+ gen6_rps_set_thresholds(rps, val);
rps->last_freq = val;
return 0;
@@ -761,7 +761,7 @@ void intel_rps_park(struct intel_rps *rps)
* power than the render powerwell.
*/
intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
- rps_set(rps, rps->idle_freq);
+ rps_set(rps, rps->idle_freq, false);
intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
}
@@ -777,7 +777,7 @@ void intel_rps_boost(struct i915_request *rq)
spin_lock_irqsave(&rq->lock, flags);
if (!i915_request_has_waitboost(rq) &&
!dma_fence_is_signaled_locked(&rq->fence)) {
- rq->flags |= I915_REQUEST_WAITBOOST;
+ set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
if (!atomic_fetch_inc(&rps->num_waiters) &&
READ_ONCE(rps->cur_freq) < rps->boost_freq)
@@ -790,14 +790,16 @@ void intel_rps_boost(struct i915_request *rq)
int intel_rps_set(struct intel_rps *rps, u8 val)
{
- int err = 0;
+ int err;
lockdep_assert_held(&rps->lock);
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
if (rps->active) {
- err = rps_set(rps, val);
+ err = rps_set(rps, val, true);
+ if (err)
+ return err;
/*
* Make sure we continue to get interrupts
@@ -806,18 +808,15 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
struct intel_uncore *uncore = rps_to_uncore(rps);
- intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS,
- rps_limits(rps, val));
+ set(uncore,
+ GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
- intel_uncore_write(uncore, GEN6_PMINTRMSK,
- rps_pm_mask(rps, val));
+ set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
}
}
- if (err == 0)
- rps->cur_freq = val;
-
- return err;
+ rps->cur_freq = val;
+ return 0;
}
static void gen6_rps_init(struct intel_rps *rps)
@@ -878,7 +877,7 @@ static bool rps_reset(struct intel_rps *rps)
rps->power.mode = -1;
rps->last_freq = -1;
- if (rps_set(rps, rps->min_freq)) {
+ if (rps_set(rps, rps->min_freq, true)) {
DRM_ERROR("Failed to reset RPS to initial values\n");
return false;
}
@@ -1201,7 +1200,7 @@ void intel_rps_enable(struct intel_rps *rps)
static void gen6_rps_disable(struct intel_rps *rps)
{
- intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+ set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
}
void intel_rps_disable(struct intel_rps *rps)
@@ -1566,7 +1565,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
return;
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine[VECS0]);
+ intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -1663,23 +1662,53 @@ void intel_rps_init(struct intel_rps *rps)
if (INTEL_GEN(i915) <= 7)
rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
- if (INTEL_GEN(i915) >= 8)
+ if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
-u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat)
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 cagf;
- if (INTEL_GEN(i915) >= 9)
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ cagf = (rpstat >> 8) & 0xff;
+ else if (INTEL_GEN(i915) >= 9)
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- return cagf;
+ return cagf;
+}
+
+static u32 read_cagf(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 freq;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_punit_get(i915);
+ freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(i915);
+ } else {
+ freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
+ }
+
+ return intel_rps_get_cagf(rps, freq);
+}
+
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
+{
+ struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
+
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_gpu_freq(rps, read_cagf(rps));
+
+ return freq;
}
/* External interface for intel_ips.ko */
@@ -1715,6 +1744,7 @@ void intel_rps_driver_register(struct intel_rps *rps)
* set up, to avoid intel-ips sneaking in and reading bogus values.
*/
if (IS_GEN(gt->i915, 5)) {
+ GEM_BUG_ON(ips_mchdev);
rcu_assign_pointer(ips_mchdev, gt->i915);
ips_ping_for_i915_load();
}
@@ -1722,7 +1752,8 @@ void intel_rps_driver_register(struct intel_rps *rps)
void intel_rps_driver_unregister(struct intel_rps *rps)
{
- rcu_assign_pointer(ips_mchdev, NULL);
+ if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
+ rcu_assign_pointer(ips_mchdev, NULL);
}
static struct drm_i915_private *mchdev_get(void)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 9518c66c9792..dfa98194f3b2 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -29,7 +29,8 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
int intel_gpu_freq(struct intel_rps *rps, int val);
int intel_freq_opcode(struct intel_rps *rps, int val);
-u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1);
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 649798c184fb..87716529cd2f 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -15,6 +15,9 @@
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
+#define CACHELINE_BITS 6
+#define CACHELINE_FREE CACHELINE_BITS
+
struct intel_timeline_hwsp {
struct intel_gt *gt;
struct intel_gt_timelines *gt_timelines;
@@ -23,14 +26,6 @@ struct intel_timeline_hwsp {
u64 free_bitmap;
};
-struct intel_timeline_cacheline {
- struct i915_active active;
- struct intel_timeline_hwsp *hwsp;
- void *vaddr;
-#define CACHELINE_BITS 6
-#define CACHELINE_FREE CACHELINE_BITS
-};
-
static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -133,7 +128,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
i915_active_fini(&cl->active);
- kfree(cl);
+ kfree_rcu(cl, rcu);
}
__i915_active_call
@@ -254,7 +249,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
mutex_init(&timeline->mutex);
- INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -262,7 +257,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
return 0;
}
-static void timelines_init(struct intel_gt *gt)
+void intel_gt_init_timelines(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = &gt->timelines;
@@ -273,11 +268,6 @@ static void timelines_init(struct intel_gt *gt)
INIT_LIST_HEAD(&timelines->hwsp_free_list);
}
-void intel_timelines_init(struct drm_i915_private *i915)
-{
- timelines_init(&i915->gt);
-}
-
void intel_timeline_fini(struct intel_timeline *timeline)
{
GEM_BUG_ON(atomic_read(&timeline->pin_count));
@@ -338,7 +328,6 @@ int intel_timeline_pin(struct intel_timeline *tl)
void intel_timeline_enter(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
- unsigned long flags;
/*
* Pretend we are serialised by the timeline->mutex.
@@ -359,21 +348,19 @@ void intel_timeline_enter(struct intel_timeline *tl)
* use atomic to manipulate tl->active_count.
*/
lockdep_assert_held(&tl->mutex);
- GEM_BUG_ON(!atomic_read(&tl->pin_count));
if (atomic_add_unless(&tl->active_count, 1, 0))
return;
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
if (!atomic_fetch_inc(&tl->active_count))
list_add_tail(&tl->link, &timelines->active_list);
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
}
void intel_timeline_exit(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
- unsigned long flags;
/* See intel_timeline_enter() */
lockdep_assert_held(&tl->mutex);
@@ -382,10 +369,10 @@ void intel_timeline_exit(struct intel_timeline *tl)
if (atomic_add_unless(&tl->active_count, -1, 1))
return;
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
/*
* Since this timeline is idle, all bariers upon which we were waiting
@@ -521,46 +508,35 @@ int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
- struct intel_timeline *tl;
+ struct intel_timeline_cacheline *cl;
int err;
+ GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
+
rcu_read_lock();
- tl = rcu_dereference(from->timeline);
- if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref))
- tl = NULL;
+ cl = rcu_dereference(from->hwsp_cacheline);
+ if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
+ goto unlock; /* seqno wrapped and completed! */
+ if (unlikely(i915_request_completed(from)))
+ goto release;
rcu_read_unlock();
- if (!tl) /* already completed */
- return 1;
-
- GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl);
-
- err = -EBUSY;
- if (mutex_trylock(&tl->mutex)) {
- struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
- if (i915_request_completed(from)) {
- err = 1;
- goto unlock;
- }
+ err = cacheline_ref(cl, to);
+ if (err)
+ goto out;
- err = cacheline_ref(cl, to);
- if (err)
- goto unlock;
+ *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+ ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
- if (likely(cl == tl->hwsp_cacheline)) {
- *hwsp = tl->hwsp_offset;
- } else { /* across a seqno wrap, recover the original offset */
- *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
- ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
- CACHELINE_BYTES;
- }
+out:
+ i915_active_release(&cl->active);
+ return err;
+release:
+ i915_active_release(&cl->active);
unlock:
- mutex_unlock(&tl->mutex);
- }
- intel_timeline_put(tl);
-
- return err;
+ rcu_read_unlock();
+ return 1;
}
void intel_timeline_unpin(struct intel_timeline *tl)
@@ -583,7 +559,7 @@ void __intel_timeline_free(struct kref *kref)
kfree_rcu(timeline, rcu);
}
-static void timelines_fini(struct intel_gt *gt)
+void intel_gt_fini_timelines(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = &gt->timelines;
@@ -591,11 +567,6 @@ static void timelines_fini(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
-void intel_timelines_fini(struct drm_i915_private *i915)
-{
- timelines_fini(&i915->gt);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "gt/selftests/mock_timeline.c"
#include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index f583af1ba18d..f5b7eade3809 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -88,7 +88,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *until,
u32 *hwsp_offset);
-void intel_timelines_init(struct drm_i915_private *i915);
-void intel_timelines_fini(struct drm_i915_private *i915);
+void intel_gt_init_timelines(struct intel_gt *gt);
+void intel_gt_fini_timelines(struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index aaf15cbe1ce1..02181c5020db 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -10,14 +10,15 @@
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
#include <linux/types.h>
#include "i915_active_types.h"
-struct drm_i915_private;
struct i915_vma;
-struct intel_timeline_cacheline;
struct i915_syncmap;
+struct intel_gt;
+struct intel_timeline_hwsp;
struct intel_timeline {
u64 fence_context;
@@ -87,4 +88,13 @@ struct intel_timeline {
struct rcu_head rcu;
};
+struct intel_timeline_cacheline {
+ struct i915_active active;
+
+ struct intel_timeline_hwsp *hwsp;
+ void *vaddr;
+
+ struct rcu_head rcu;
+};
+
#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e4bccc14602f..4e292d4bf7b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -146,21 +147,27 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
}
}
-static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val)
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val, u32 read_mask)
{
struct i915_wa wa = {
.reg = reg,
.mask = mask,
.val = val,
- .read = mask,
+ .read = read_mask,
};
_wa_add(wal, &wa);
}
static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
+{
+ wa_add(wal, reg, mask, val, mask);
+}
+
+static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
@@ -247,7 +254,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaDisableDopClockGating:bdw
*
- * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+ * Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
@@ -568,9 +575,24 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ u32 val;
+
/* Wa_1409142259:tgl */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+
+ /* Wa_1604555607:tgl */
+ val = intel_uncore_read(engine->uncore, FF_MODE2);
+ val &= ~FF_MODE2_TDS_TIMER_MASK;
+ val |= FF_MODE2_TDS_TIMER_128;
+ /*
+ * FIXME: FF_MODE2 register is not readable till TGL B0. We can
+ * enable verification of WA from the later steppings, which enables
+ * the read of FF_MODE2.
+ */
+ wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
+ IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
+ FF_MODE2_TDS_TIMER_MASK);
}
static void
@@ -1315,6 +1337,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+
+ /*
+ * Wa_1606679103:tgl
+ * (see also Wa_1606682166:icl)
+ */
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_SAMPLER_PREFETCH);
}
if (IS_GEN(i915, 11)) {
@@ -1574,7 +1604,9 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_engine_pm_get(ce->engine);
rq = intel_context_create_request(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma;
@@ -1584,16 +1616,17 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (err)
goto err_vma;
+ i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
- goto err_vma;
+ goto err_rq;
}
results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(results)) {
err = PTR_ERR(results);
- goto err_vma;
+ goto err_rq;
}
err = 0;
@@ -1607,6 +1640,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
i915_gem_object_unpin_map(vma->obj);
+err_rq:
+ i915_request_put(rq);
err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 83f549d203a0..a560b7eee2cd 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -77,7 +77,7 @@ static void advance(struct i915_request *request)
i915_request_mark_complete(request);
GEM_BUG_ON(!i915_request_completed(request));
- intel_engine_queue_breadcrumbs(request->engine);
+ intel_engine_signal_breadcrumbs(request->engine);
}
static void hw_delay_complete(struct timer_list *t)
@@ -149,7 +149,11 @@ static int mock_context_alloc(struct intel_context *ce)
static int mock_context_pin(struct intel_context *ce)
{
- return intel_context_active_acquire(ce);
+ return 0;
+}
+
+static void mock_context_reset(struct intel_context *ce)
+{
}
static const struct intel_context_ops mock_context_ops = {
@@ -161,6 +165,7 @@ static const struct intel_context_ops mock_context_ops = {
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
+ .reset = mock_context_reset,
.destroy = mock_context_destroy,
};
@@ -207,16 +212,12 @@ static void mock_reset_prepare(struct intel_engine_cs *engine)
{
}
-static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
GEM_BUG_ON(stalled);
}
-static void mock_reset_finish(struct intel_engine_cs *engine)
-{
-}
-
-static void mock_cancel_requests(struct intel_engine_cs *engine)
+static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
unsigned long flags;
@@ -234,6 +235,24 @@ static void mock_cancel_requests(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_engine_release(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+
+ GEM_BUG_ON(timer_pending(&mock->hw_delay));
+
+ intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+
+ intel_engine_fini_retire(engine);
+ intel_engine_fini_breadcrumbs(engine);
+}
+
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@@ -265,9 +284,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.submit_request = mock_submit_request;
engine->base.reset.prepare = mock_reset_prepare;
- engine->base.reset.reset = mock_reset;
+ engine->base.reset.rewind = mock_reset_rewind;
+ engine->base.reset.cancel = mock_reset_cancel;
engine->base.reset.finish = mock_reset_finish;
- engine->base.cancel_requests = mock_cancel_requests;
+
+ engine->base.release = mock_engine_release;
i915->gt.engine[id] = &engine->base;
i915->gt.engine_class[0][id] = &engine->base;
@@ -290,6 +311,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
+ intel_engine_init_retire(engine);
intel_engine_pool_init(&engine->pool);
ce = create_kernel_context(engine);
@@ -321,18 +343,3 @@ void mock_engine_flush(struct intel_engine_cs *engine)
void mock_engine_reset(struct intel_engine_cs *engine)
{
}
-
-void mock_engine_free(struct intel_engine_cs *engine)
-{
- struct mock_engine *mock =
- container_of(engine, typeof(*mock), base);
-
- GEM_BUG_ON(timer_pending(&mock->hw_delay));
-
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
-
- intel_engine_fini_breadcrumbs(engine);
-
- kfree(engine);
-}
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index bc720defc6b8..e874dfaa5316 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -5,6 +5,7 @@
*/
#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
@@ -47,35 +48,36 @@ static int context_sync(struct intel_context *ce)
mutex_lock(&tl->mutex);
do {
- struct dma_fence *fence;
+ struct i915_request *rq;
long timeout;
- fence = i915_active_fence_get(&tl->last_request);
- if (!fence)
+ if (list_empty(&tl->requests))
break;
- timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
+ rq = list_last_entry(&tl->requests, typeof(*rq), link);
+ i915_request_get(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
err = timeout;
else
- i915_request_retire_upto(to_request(fence));
+ i915_request_retire_upto(rq);
- dma_fence_put(fence);
+ i915_request_put(rq);
} while (!err);
mutex_unlock(&tl->mutex);
return err;
}
-static int __live_context_size(struct intel_engine_cs *engine,
- struct i915_gem_context *fixme)
+static int __live_context_size(struct intel_engine_cs *engine)
{
struct intel_context *ce;
struct i915_request *rq;
void *vaddr;
int err;
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -118,7 +120,7 @@ static int __live_context_size(struct intel_engine_cs *engine,
goto err_unpin;
/* Force the context switch */
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -143,7 +145,6 @@ static int live_context_size(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
enum intel_engine_id id;
int err = 0;
@@ -152,10 +153,6 @@ static int live_context_size(void *arg)
* HW tries to write past the end of one.
*/
- fixme = kernel_context(gt->i915);
- if (IS_ERR(fixme))
- return PTR_ERR(fixme);
-
for_each_engine(engine, gt, id) {
struct {
struct drm_i915_gem_object *state;
@@ -180,7 +177,7 @@ static int live_context_size(void *arg)
/* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE;
- err = __live_context_size(engine, fixme);
+ err = __live_context_size(engine);
engine->context_size -= I915_GTT_PAGE_SIZE;
@@ -193,13 +190,12 @@ static int live_context_size(void *arg)
break;
}
- kernel_context_close(fixme);
return err;
}
-static int __live_active_context(struct intel_engine_cs *engine,
- struct i915_gem_context *fixme)
+static int __live_active_context(struct intel_engine_cs *engine)
{
+ unsigned long saved_heartbeat;
struct intel_context *ce;
int pass;
int err;
@@ -223,40 +219,55 @@ static int __live_active_context(struct intel_engine_cs *engine,
return -EINVAL;
}
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ saved_heartbeat = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
for (pass = 0; pass <= 2; pass++) {
struct i915_request *rq;
+ intel_engine_pm_get(engine);
+
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err;
+ goto out_engine;
}
err = request_sync(rq);
if (err)
- goto err;
+ goto out_engine;
/* Context will be kept active until after an idle-barrier. */
if (i915_active_is_idle(&ce->active)) {
pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
engine->name, pass);
err = -EINVAL;
- goto err;
+ goto out_engine;
}
if (!intel_engine_pm_is_awake(engine)) {
pr_err("%s is asleep before idle-barrier\n",
engine->name);
err = -EINVAL;
- goto err;
+ goto out_engine;
}
+
+out_engine:
+ intel_engine_pm_put(engine);
+ if (err)
+ goto err;
}
/* Now make sure our idle-barriers are flushed */
+ err = intel_engine_flush_barriers(engine);
+ if (err)
+ goto err;
+
+ /* Wait for the barrier and in the process wait for engine to park */
err = context_sync(engine->kernel_context);
if (err)
goto err;
@@ -266,12 +277,15 @@ static int __live_active_context(struct intel_engine_cs *engine,
err = -EINVAL;
}
+ intel_engine_pm_flush(engine);
+
if (intel_engine_pm_is_awake(engine)) {
struct drm_printer p = drm_debug_printer(__func__);
intel_engine_dump(engine, &p,
- "%s is still awake after idle-barriers\n",
- engine->name);
+ "%s is still awake:%d after idle-barriers\n",
+ engine->name,
+ atomic_read(&engine->wakeref.count));
GEM_TRACE_DUMP();
err = -EINVAL;
@@ -279,6 +293,7 @@ static int __live_active_context(struct intel_engine_cs *engine,
}
err:
+ engine->props.heartbeat_interval_ms = saved_heartbeat;
intel_context_put(ce);
return err;
}
@@ -287,23 +302,11 @@ static int live_active_context(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
enum intel_engine_id id;
- struct drm_file *file;
int err = 0;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- fixme = live_context(gt->i915, file);
- if (IS_ERR(fixme)) {
- err = PTR_ERR(fixme);
- goto out_file;
- }
-
for_each_engine(engine, gt, id) {
- err = __live_active_context(engine, fixme);
+ err = __live_active_context(engine);
if (err)
break;
@@ -312,8 +315,6 @@ static int live_active_context(void *arg)
break;
}
-out_file:
- mock_file_free(gt->i915, file);
return err;
}
@@ -345,10 +346,10 @@ unpin:
return err;
}
-static int __live_remote_context(struct intel_engine_cs *engine,
- struct i915_gem_context *fixme)
+static int __live_remote_context(struct intel_engine_cs *engine)
{
struct intel_context *local, *remote;
+ unsigned long saved_heartbeat;
int pass;
int err;
@@ -360,16 +361,26 @@ static int __live_remote_context(struct intel_engine_cs *engine,
* clobber the idle-barrier.
*/
- remote = intel_context_create(fixme, engine);
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is awake before starting %s!\n",
+ engine->name, __func__);
+ return -EINVAL;
+ }
+
+ remote = intel_context_create(engine);
if (IS_ERR(remote))
return PTR_ERR(remote);
- local = intel_context_create(fixme, engine);
+ local = intel_context_create(engine);
if (IS_ERR(local)) {
err = PTR_ERR(local);
goto err_remote;
}
+ saved_heartbeat = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+ intel_engine_pm_get(engine);
+
for (pass = 0; pass <= 2; pass++) {
err = __remote_sync(local, remote);
if (err)
@@ -387,6 +398,9 @@ static int __live_remote_context(struct intel_engine_cs *engine,
}
}
+ intel_engine_pm_put(engine);
+ engine->props.heartbeat_interval_ms = saved_heartbeat;
+
intel_context_put(local);
err_remote:
intel_context_put(remote);
@@ -397,23 +411,11 @@ static int live_remote_context(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
enum intel_engine_id id;
- struct drm_file *file;
int err = 0;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- fixme = live_context(gt->i915, file);
- if (IS_ERR(fixme)) {
- err = PTR_ERR(fixme);
- goto out_file;
- }
-
for_each_engine(engine, gt, id) {
- err = __live_remote_context(engine, fixme);
+ err = __live_remote_context(engine);
if (err)
break;
@@ -422,8 +424,6 @@ static int live_remote_context(void *arg)
break;
}
-out_file:
- mock_file_free(gt->i915, file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 3880f07c29b8..f88e445a1cae 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -4,7 +4,365 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include <linux/sort.h>
+
+#include "intel_gt_pm.h"
+#include "intel_rps.h"
+
+#include "i915_selftest.h"
+#include "selftests/igt_flush_test.h"
+
+#define COUNT 5
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static void perf_begin(struct intel_gt *gt)
+{
+ intel_gt_pm_get(gt);
+
+ /* Boost gpufreq to max [waitboost] and keep it fixed */
+ atomic_inc(&gt->rps.num_waiters);
+ schedule_work(&gt->rps.work);
+ flush_work(&gt->rps.work);
+}
+
+static int perf_end(struct intel_gt *gt)
+{
+ atomic_dec(&gt->rps.num_waiters);
+ intel_gt_pm_put(gt);
+
+ return igt_flush_test(gt->i915);
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ u32 cmd;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ cmd++;
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static struct i915_vma *create_empty_batch(struct intel_context *ce)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_put;
+ }
+
+ cs[0] = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_unpin_map(obj);
+ return vma;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 trifilter(u32 *a)
+{
+ u64 sum;
+
+ sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
+
+ sum = mul_u32_u32(a[2], 2);
+ sum += a[1];
+ sum += a[3];
+
+ return sum >> 2;
+}
+
+static int perf_mi_bb_start(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ return 0;
+
+ perf_begin(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_vma *batch;
+ u32 cycles[COUNT];
+ int i;
+
+ intel_engine_pm_get(engine);
+
+ batch = create_empty_batch(ce);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(batch);
+ if (err) {
+ intel_engine_pm_put(engine);
+ i915_vma_put(batch);
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = write_timestamp(rq, 2);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, 8,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 3);
+ if (err)
+ goto out;
+
+out:
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
+ }
+ i915_vma_put(batch);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+
+ pr_info("%s: MI_BB_START cycles: %u\n",
+ engine->name, trifilter(cycles));
+ }
+ if (perf_end(gt))
+ err = -EIO;
+
+ return err;
+}
+
+static struct i915_vma *create_nop_batch(struct intel_context *ce)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_put;
+ }
+
+ memset(cs, 0, SZ_64K);
+ cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_unpin_map(obj);
+ return vma;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int perf_mi_noop(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ return 0;
+
+ perf_begin(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_vma *base, *nop;
+ u32 cycles[COUNT];
+ int i;
+
+ intel_engine_pm_get(engine);
+
+ base = create_empty_batch(ce);
+ if (IS_ERR(base)) {
+ err = PTR_ERR(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(base);
+ if (err) {
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ nop = create_nop_batch(ce);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(nop);
+ if (err) {
+ i915_vma_put(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = write_timestamp(rq, 2);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ base->node.start, 8,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 3);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ nop->node.start,
+ nop->node.size,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 4);
+ if (err)
+ goto out;
+
+out:
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ cycles[i] =
+ (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
+ (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
+ }
+ i915_vma_put(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+
+ pr_info("%s: 16K MI_NOOP cycles: %u\n",
+ engine->name, trifilter(cycles));
+ }
+ if (perf_end(gt))
+ err = -EIO;
+
+ return err;
+}
+
+int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_mi_bb_start),
+ SUBTEST(perf_mi_noop),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
static int intel_mmio_bases_check(void *arg)
{
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index e864406bd2d9..43d4d589749f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -11,6 +11,28 @@
#include "intel_gt_requests.h"
#include "i915_selftest.h"
+static int timeline_sync(struct intel_timeline *tl)
+{
+ struct dma_fence *fence;
+ long timeout;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
+ return 0;
+
+ timeout = dma_fence_wait_timeout(fence, true, HZ / 2);
+ dma_fence_put(fence);
+ if (timeout < 0)
+ return timeout;
+
+ return 0;
+}
+
+static int engine_sync_barrier(struct intel_engine_cs *engine)
+{
+ return timeline_sync(engine->kernel_context->timeline);
+}
+
struct pulse {
struct i915_active active;
struct kref kref;
@@ -53,9 +75,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p)
{
- mutex_lock(&p->active.mutex);
- mutex_unlock(&p->active.mutex);
- flush_work(&p->active.work);
+ i915_active_unlock_wait(&p->active);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,
@@ -92,7 +112,12 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
- if (intel_gt_retire_requests_timeout(engine->gt, HZ / 5)) {
+ if (engine_sync_barrier(engine)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: no heartbeat pulse?\n", engine->name);
+ intel_engine_dump(engine, &m, "%s", engine->name);
+
err = -ETIME;
goto out;
}
@@ -175,8 +200,7 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
int err;
int i;
- ce = intel_context_create(engine->kernel_context->gem_context,
- engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index d1752f15702a..09ff8e4f88af 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -6,6 +6,7 @@
*/
#include "selftest_llc.h"
+#include "selftest_rc6.h"
static int live_gt_resume(void *arg)
{
@@ -50,6 +51,7 @@ static int live_gt_resume(void *arg)
int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_rc6_manual),
SUBTEST(live_gt_resume),
};
@@ -58,3 +60,20 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
return intel_gt_live_subtests(tests, &i915->gt);
}
+
+int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ /*
+ * These tests may leave the system in an undesirable state.
+ * They are intended to be run last in CI and the system
+ * rebooted afterwards.
+ */
+ SUBTEST(live_rc6_ctx_wa),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 85e9ccf5c304..3e5e6c86e843 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -25,7 +25,9 @@
#include <linux/kthread.h>
#include "gem/i915_gem_context.h"
-#include "gt/intel_gt.h"
+
+#include "intel_gt.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "i915_selftest.h"
@@ -308,6 +310,24 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
1000));
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int igt_hang_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -377,36 +397,30 @@ static int igt_reset_nop(void *arg)
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
unsigned int reset_count, count;
enum intel_engine_id id;
- struct drm_file *file;
IGT_TIMEOUT(end_time);
int err = 0;
/* Check that we can reset during non-user portions of requests */
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ctx = live_context(gt->i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
-
- i915_gem_context_clear_bannable(ctx);
reset_count = i915_reset_count(global);
count = 0;
do {
for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
int i;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -414,6 +428,8 @@ static int igt_reset_nop(void *arg)
i915_request_add(rq);
}
+
+ intel_context_put(ce);
}
igt_global_reset_lock(gt);
@@ -437,10 +453,7 @@ static int igt_reset_nop(void *arg)
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- err = igt_flush_test(gt->i915);
-out:
- mock_file_free(gt->i915, file);
- if (intel_gt_is_wedged(gt))
+ if (igt_flush_test(gt->i915))
err = -EIO;
return err;
}
@@ -450,36 +463,29 @@ static int igt_reset_nop_engine(void *arg)
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct drm_file *file;
- int err = 0;
/* Check that we can engine-reset during non-user portions */
if (!intel_has_reset_engine(gt))
return 0;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ctx = live_context(gt->i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
-
- i915_gem_context_clear_bannable(ctx);
for_each_engine(engine, gt, id) {
- unsigned int reset_count, reset_engine_count;
- unsigned int count;
+ unsigned int reset_count, reset_engine_count, count;
+ struct intel_context *ce;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
+ engine_heartbeat_disable(engine, &heartbeat);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@@ -494,7 +500,7 @@ static int igt_reset_nop_engine(void *arg)
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -523,22 +529,18 @@ static int igt_reset_nop_engine(void *arg)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+ engine_heartbeat_enable(engine, heartbeat);
- if (err)
- break;
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
- err = igt_flush_test(gt->i915);
+ intel_context_put(ce);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
if (err)
- break;
+ return err;
}
- err = igt_flush_test(gt->i915);
-out:
- mock_file_free(gt->i915, file);
- if (intel_gt_is_wedged(gt))
- err = -EIO;
- return err;
+ return 0;
}
static int __igt_reset_engine(struct intel_gt *gt, bool active)
@@ -562,6 +564,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine))
@@ -577,7 +580,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
- intel_engine_pm_get(engine);
+ engine_heartbeat_disable(engine, &heartbeat);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
@@ -629,7 +632,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err)
break;
@@ -699,43 +702,43 @@ static int active_engine(void *data)
struct active_engine *arg = data;
struct intel_engine_cs *engine = arg->engine;
struct i915_request *rq[8] = {};
- struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
- struct drm_file *file;
- unsigned long count = 0;
+ struct intel_context *ce[ARRAY_SIZE(rq)];
+ unsigned long count;
int err = 0;
- file = mock_file(engine->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- for (count = 0; count < ARRAY_SIZE(ctx); count++) {
- ctx[count] = live_context(engine->i915, file);
- if (IS_ERR(ctx[count])) {
- err = PTR_ERR(ctx[count]);
+ for (count = 0; count < ARRAY_SIZE(ce); count++) {
+ ce[count] = intel_context_create(engine);
+ if (IS_ERR(ce[count])) {
+ err = PTR_ERR(ce[count]);
while (--count)
- i915_gem_context_put(ctx[count]);
- goto err_file;
+ intel_context_put(ce[count]);
+ return err;
}
}
+ count = 0;
while (!kthread_should_stop()) {
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
struct i915_request *old = rq[idx];
struct i915_request *new;
- new = igt_request_alloc(ctx[idx], engine);
+ new = intel_context_create_request(ce[idx]);
if (IS_ERR(new)) {
err = PTR_ERR(new);
break;
}
- if (arg->flags & TEST_PRIORITY)
- ctx[idx]->sched.priority =
- i915_prandom_u32_max_state(512, &prng);
-
rq[idx] = i915_request_get(new);
i915_request_add(new);
+ if (engine->schedule && arg->flags & TEST_PRIORITY) {
+ struct i915_sched_attr attr = {
+ .priority =
+ i915_prandom_u32_max_state(512, &prng),
+ };
+ engine->schedule(rq[idx], &attr);
+ }
+
err = active_request_put(old);
if (err)
break;
@@ -749,10 +752,10 @@ static int active_engine(void *data)
/* Keep the first error */
if (!err)
err = err__;
+
+ intel_context_put(ce[count]);
}
-err_file:
- mock_file_free(engine->i915, file);
return err;
}
@@ -786,6 +789,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
if (flags & TEST_ACTIVE &&
@@ -828,7 +832,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */
- intel_engine_pm_get(engine);
+ engine_heartbeat_disable(engine, &heartbeat);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
@@ -902,7 +906,8 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
+
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
@@ -1300,32 +1305,21 @@ static int igt_reset_evict_ggtt(void *arg)
static int igt_reset_evict_ppgtt(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_context *ctx;
- struct i915_address_space *vm;
- struct drm_file *file;
+ struct i915_ppgtt *ppgtt;
int err;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ /* aliasing == global gtt locking, covered above */
+ if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
+ return 0;
- ctx = live_context(gt->i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
+ ppgtt = i915_ppgtt_create(gt);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- err = 0;
- vm = i915_gem_context_get_vm_rcu(ctx);
- if (!i915_is_ggtt(vm)) {
- /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(gt, vm,
- evict_vma, EXEC_OBJECT_WRITE);
- }
- i915_vm_put(vm);
+ err = __igt_reset_evict_vma(gt, &ppgtt->vm,
+ evict_vma, EXEC_OBJECT_WRITE);
+ i915_vm_put(&ppgtt->vm);
-out:
- mock_file_free(gt->i915, file);
return err;
}
@@ -1504,7 +1498,7 @@ static int igt_handle_error(void *arg)
struct intel_engine_cs *engine = gt->engine[RCS0];
struct hang h;
struct i915_request *rq;
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
int err;
/* Check that we can issue a global GPU and engine reset */
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index eb71ac2f992c..15cda024e3e4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -50,14 +50,31 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
return vma;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_engines_iter it;
- struct i915_gem_context *ctx;
- struct intel_context *ce;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct igt_spinner spin;
- int err = -ENOMEM;
+ int err = 0;
if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
return 0;
@@ -65,17 +82,20 @@ static int live_sanitycheck(void *arg)
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- goto err_spin;
-
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
struct i915_request *rq;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx;
+ goto out_ctx;
}
i915_request_add(rq);
@@ -84,21 +104,21 @@ static int live_sanitycheck(void *arg)
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx;
+ goto out_ctx;
}
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915)) {
err = -EIO;
- goto err_ctx;
+ goto out_ctx;
}
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
}
- err = 0;
-err_ctx:
- i915_gem_context_unlock_engines(ctx);
- kernel_context_close(ctx);
-err_spin:
igt_spinner_fini(&spin);
return err;
}
@@ -106,7 +126,6 @@ err_spin:
static int live_unlite_restore(struct intel_gt *gt, int prio)
{
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
int err = -ENOMEM;
@@ -119,15 +138,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
if (igt_spinner_init(&spin, gt))
return err;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- goto err_spin;
-
err = 0;
for_each_engine(engine, gt, id) {
struct intel_context *ce[2] = {};
struct i915_request *rq[2];
struct igt_live_test t;
+ unsigned long saved;
int n;
if (prio && !intel_engine_has_preemption(engine))
@@ -140,11 +156,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
err = -EIO;
break;
}
+ engine_heartbeat_disable(engine, &saved);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
- tmp = intel_context_create(ctx, engine);
+ tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto err_ce;
@@ -247,14 +264,13 @@ err_ce:
intel_context_put(ce[n]);
}
+ engine_heartbeat_enable(engine, saved);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
break;
}
- kernel_context_close(ctx);
-err_spin:
igt_spinner_fini(&spin);
return err;
}
@@ -309,17 +325,17 @@ emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
static struct i915_request *
semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
{
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct i915_request *rq;
int err;
- ctx = kernel_context(engine->i915);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
- goto out_ctx;
+ goto out_ce;
err = 0;
if (rq->engine->emit_init_breadcrumb)
@@ -332,8 +348,8 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
if (err)
rq = ERR_PTR(err);
-out_ctx:
- kernel_context_close(ctx);
+out_ce:
+ intel_context_put(ce);
return rq;
}
@@ -348,7 +364,7 @@ release_queue(struct intel_engine_cs *engine,
struct i915_request *rq;
u32 *cs;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -468,12 +484,16 @@ static int live_timeslice_preempt(void *arg)
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
+ unsigned long saved;
+
if (!intel_engine_has_preemption(engine))
continue;
memset(vaddr, 0, PAGE_SIZE);
+ engine_heartbeat_disable(engine, &saved);
err = slice_semaphore_queue(engine, vma, count);
+ engine_heartbeat_enable(engine, saved);
if (err)
goto err_pin;
@@ -497,7 +517,7 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
{
struct i915_request *rq;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return rq;
@@ -507,13 +527,19 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
return rq;
}
-static void wait_for_submit(struct intel_engine_cs *engine,
- struct i915_request *rq)
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
{
+ timeout += jiffies;
do {
cond_resched();
intel_engine_flush_submission(engine);
- } while (!i915_request_is_active(rq));
+ if (i915_request_is_active(rq))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIME;
}
static long timeslice_threshold(const struct intel_engine_cs *engine)
@@ -566,40 +592,49 @@ static int live_timeslice_queue(void *arg)
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
struct i915_request *rq, *nop;
+ unsigned long saved;
if (!intel_engine_has_preemption(engine))
continue;
+ engine_heartbeat_disable(engine, &saved);
memset(vaddr, 0, PAGE_SIZE);
/* ELSP[0]: semaphore wait */
rq = semaphore_queue(engine, vma, 0);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_pin;
+ goto err_heartbeat;
}
engine->schedule(rq, &attr);
- wait_for_submit(engine, rq);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
/* ELSP[1]: nop request */
nop = nop_request(engine);
if (IS_ERR(nop)) {
err = PTR_ERR(nop);
- i915_request_put(rq);
- goto err_pin;
+ goto err_rq;
}
- wait_for_submit(engine, nop);
+ err = wait_for_submit(engine, nop, HZ / 2);
i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
GEM_BUG_ON(i915_request_completed(rq));
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Queue: semaphore signal, matching priority as semaphore */
err = release_queue(engine, vma, 1, effective_prio(rq));
- if (err) {
- i915_request_put(rq);
- goto err_pin;
- }
+ if (err)
+ goto err_rq;
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.timer.expires) &&
@@ -630,12 +665,14 @@ static int live_timeslice_queue(void *arg)
memset(vaddr, 0xff, PAGE_SIZE);
err = -EIO;
}
+err_rq:
i915_request_put(rq);
+err_heartbeat:
+ engine_heartbeat_enable(engine, saved);
if (err)
break;
}
-err_pin:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
@@ -748,15 +785,19 @@ static int live_busywait_preempt(void *arg)
*cs++ = 0;
intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
i915_request_add(lo);
if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
err = -ETIMEDOUT;
goto err_vma;
}
/* Low priority request should be busywaiting now */
if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
pr_err("%s: Busywaiting request did not!\n",
engine->name);
err = -EIO;
@@ -766,6 +807,7 @@ static int live_busywait_preempt(void *arg)
hi = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(hi)) {
err = PTR_ERR(hi);
+ i915_request_put(lo);
goto err_vma;
}
@@ -773,6 +815,7 @@ static int live_busywait_preempt(void *arg)
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(hi);
+ i915_request_put(lo);
goto err_vma;
}
@@ -793,11 +836,13 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
+ i915_request_put(lo);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_vma;
}
GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
if (igt_live_test_end(&t)) {
err = -EIO;
@@ -1108,7 +1153,7 @@ static int live_nopreempt(void *arg)
}
/* Low priority client, but unpreemptable! */
- rq_a->flags |= I915_REQUEST_NOPREEMPT;
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
@@ -1187,13 +1232,13 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
return PTR_ERR(rq);
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1201,7 +1246,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
goto out;
}
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1236,13 +1281,13 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_NOOP); /* no preemption */
if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]);
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1250,7 +1295,6 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
}
- clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
rq[1] = spinner_create_request(&arg->b.spin,
arg->b.ctx, arg->engine,
MI_ARB_CHECK);
@@ -1259,13 +1303,14 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
}
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]);
if (err)
goto out;
- i915_gem_context_set_banned(arg->b.ctx);
+ intel_context_set_banned(rq[1]->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1308,13 +1353,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]);
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1322,13 +1367,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
goto out;
}
- clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
if (IS_ERR(rq[1])) {
err = PTR_ERR(rq[1]);
goto out;
}
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]);
@@ -1349,7 +1394,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq[2]->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1396,13 +1441,13 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
return 0;
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_NOOP); /* preemption disabled */
if (IS_ERR(rq))
return PTR_ERR(rq);
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1410,7 +1455,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
goto out;
}
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq->context);
err = intel_engine_pulse(arg->engine); /* force reset */
if (err)
goto out;
@@ -1665,6 +1710,7 @@ static int live_suppress_wait_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct preempt_client client[4];
+ struct i915_request *rq[ARRAY_SIZE(client)] = {};
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -1698,7 +1744,6 @@ static int live_suppress_wait_preempt(void *arg)
continue;
for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
- struct i915_request *rq[ARRAY_SIZE(client)];
struct i915_request *dummy;
engine->execlists.preempt_hang.count = 0;
@@ -1708,18 +1753,22 @@ static int live_suppress_wait_preempt(void *arg)
goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- rq[i] = spinner_create_request(&client[i].spin,
- client[i].ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq[i])) {
- err = PTR_ERR(rq[i]);
+ struct i915_request *this;
+
+ this = spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
goto err_wedged;
}
/* Disable NEWCLIENT promotion */
- __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
+ __i915_active_fence_set(&i915_request_timeline(this)->last_request,
&dummy->fence);
- i915_request_add(rq[i]);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
}
dummy_request_free(dummy);
@@ -1740,8 +1789,11 @@ static int live_suppress_wait_preempt(void *arg)
goto err_wedged;
}
- for (i = 0; i < ARRAY_SIZE(client); i++)
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
+ i915_request_put(rq[i]);
+ rq[i] = NULL;
+ }
if (igt_flush_test(gt->i915))
goto err_wedged;
@@ -1769,8 +1821,10 @@ err_client_0:
return err;
err_wedged:
- for (i = 0; i < ARRAY_SIZE(client); i++)
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
+ i915_request_put(rq[i]);
+ }
intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_3;
@@ -1815,6 +1869,8 @@ static int live_chain_preempt(void *arg)
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
ring_size = rq->wa_tail - rq->head;
@@ -1827,8 +1883,10 @@ static int live_chain_preempt(void *arg)
igt_spinner_end(&lo.spin);
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
goto err_wedged;
}
+ i915_request_put(rq);
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
@@ -1862,6 +1920,8 @@ static int live_chain_preempt(void *arg)
rq = igt_request_alloc(hi.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
engine->schedule(rq, &attr);
@@ -1874,14 +1934,19 @@ static int live_chain_preempt(void *arg)
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
+ i915_request_put(rq);
goto err_wedged;
}
igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
+
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@@ -1890,8 +1955,11 @@ static int live_chain_preempt(void *arg)
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
+
+ i915_request_put(rq);
goto err_wedged;
}
+ i915_request_put(rq);
}
if (igt_live_test_end(&t)) {
@@ -1915,6 +1983,201 @@ err_wedged:
goto err_client_lo;
}
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs))
+ goto err_obj;
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_obj;
+
+ rq->batch = vma;
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->client_link.next = &(*prev)->client_link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(prio++),
+ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->schedule(rq, &attr);
+
+ if (prio <= I915_PRIORITY_MAX)
+ continue;
+
+ if (prio > (INT_MAX >> I915_USER_PRIORITY_SHIFT))
+ break;
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ } while (1);
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n =
+ list_next_entry(rq, client_link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int live_preempt_hang(void *arg)
{
struct intel_gt *gt = arg;
@@ -2391,28 +2654,18 @@ static int nop_virtual_engine(struct intel_gt *gt,
#define CHAIN BIT(0)
{
IGT_TIMEOUT(end_time);
- struct i915_request *request[16];
- struct i915_gem_context *ctx[16];
+ struct i915_request *request[16] = {};
struct intel_context *ve[16];
unsigned long n, prime, nc;
struct igt_live_test t;
ktime_t times[2] = {};
int err;
- GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
for (n = 0; n < nctx; n++) {
- ctx[n] = kernel_context(gt->i915);
- if (!ctx[n]) {
- err = -ENOMEM;
- nctx = n;
- goto out;
- }
-
- ve[n] = intel_execlists_create_virtual(ctx[n],
- siblings, nsibling);
+ ve[n] = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve[n])) {
- kernel_context_close(ctx[n]);
err = PTR_ERR(ve[n]);
nctx = n;
goto out;
@@ -2421,7 +2674,6 @@ static int nop_virtual_engine(struct intel_gt *gt,
err = intel_context_pin(ve[n]);
if (err) {
intel_context_put(ve[n]);
- kernel_context_close(ctx[n]);
nctx = n;
goto out;
}
@@ -2437,27 +2689,35 @@ static int nop_virtual_engine(struct intel_gt *gt,
if (flags & CHAIN) {
for (nc = 0; nc < nctx; nc++) {
for (n = 0; n < prime; n++) {
- request[nc] =
- i915_request_create(ve[nc]);
- if (IS_ERR(request[nc])) {
- err = PTR_ERR(request[nc]);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto out;
}
- i915_request_add(request[nc]);
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
}
}
} else {
for (n = 0; n < prime; n++) {
for (nc = 0; nc < nctx; nc++) {
- request[nc] =
- i915_request_create(ve[nc]);
- if (IS_ERR(request[nc])) {
- err = PTR_ERR(request[nc]);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto out;
}
- i915_request_add(request[nc]);
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
}
}
}
@@ -2483,6 +2743,11 @@ static int nop_virtual_engine(struct intel_gt *gt,
if (prime == 1)
times[0] = times[1];
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
if (__igt_timeout(end_time, NULL))
break;
}
@@ -2500,9 +2765,9 @@ out:
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
intel_context_unpin(ve[nc]);
intel_context_put(ve[nc]);
- kernel_context_close(ctx[nc]);
}
return err;
}
@@ -2561,7 +2826,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
unsigned int nsibling)
{
struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
- struct i915_gem_context *ctx;
struct intel_context *ve;
struct igt_live_test t;
unsigned int n;
@@ -2572,11 +2836,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
* restrict it to our desired engine within the virtual engine.
*/
- ctx = kernel_context(gt->i915);
- if (!ctx)
- return -ENOMEM;
-
- ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_close;
@@ -2644,7 +2904,6 @@ out_unpin:
out_put:
intel_context_put(ve);
out_close:
- kernel_context_close(ctx);
return err;
}
@@ -2684,7 +2943,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
unsigned int nsibling)
{
struct i915_request *last = NULL;
- struct i915_gem_context *ctx;
struct intel_context *ve;
struct i915_vma *scratch;
struct igt_live_test t;
@@ -2692,17 +2950,11 @@ static int preserved_virtual_engine(struct intel_gt *gt,
int err = 0;
u32 *cs;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- return -ENOMEM;
-
scratch = create_scratch(siblings[0]->gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
- ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_scratch;
@@ -2785,8 +3037,6 @@ out_put:
intel_context_put(ve);
out_scratch:
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(ctx);
return err;
}
@@ -2838,16 +3088,54 @@ static int bond_virtual_engine(struct intel_gt *gt,
#define BOND_SCHEDULE BIT(0)
{
struct intel_engine_cs *master;
- struct i915_gem_context *ctx;
struct i915_request *rq[16];
enum intel_engine_id id;
+ struct igt_spinner spin;
unsigned long n;
int err;
+ /*
+ * A set of bonded requests is intended to be run concurrently
+ * across a number of engines. We use one request per-engine
+ * and a magic fence to schedule each of the bonded requests
+ * at the same time. A consequence of our current scheduler is that
+ * we only move requests to the HW ready queue when the request
+ * becomes ready, that is when all of its prerequisite fences have
+ * been signaled. As one of those fences is the master submit fence,
+ * there is a delay on all secondary fences as the HW may be
+ * currently busy. Equally, as all the requests are independent,
+ * they may have other fences that delay individual request
+ * submission to HW. Ergo, we do not guarantee that all requests are
+ * immediately submitted to HW at the same time, just that if the
+ * rules are abided by, they are ready at the same time as the
+ * first is submitted. Userspace can embed semaphores in its batch
+ * to ensure parallel execution of its phases as it requires.
+ * Though naturally it gets requested that perhaps the scheduler should
+ * take care of parallel execution, even across preemption events on
+ * different HW. (The proper answer is of course "lalalala".)
+ *
+ * With the submit-fence, we have identified three possible phases
+ * of synchronisation depending on the master fence: queued (not
+ * ready), executing, and signaled. The first two are quite simple
+ * and checked below. However, the signaled master fence handling is
+ * contentious. Currently we do not distinguish between a signaled
+ * fence and an expired fence, as once signaled it does not convey
+ * any information about the previous execution. It may even be freed
+ * and hence checking later it may not exist at all. Ergo we currently
+ * do not apply the bonding constraint for an already signaled fence,
+ * as our expectation is that it should not constrain the secondaries
+ * and is outside of the scope of the bonded request API (i.e. all
+ * userspace requests are meant to be running in parallel). As
+ * it imposes no constraint, and is effectively a no-op, we do not
+ * check below as normal execution flows are checked extensively above.
+ *
+ * XXX Is the degenerate handling of signaled submit fences the
+ * expected behaviour for userpace?
+ */
+
GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
- ctx = kernel_context(gt->i915);
- if (!ctx)
+ if (igt_spinner_init(&spin, gt))
return -ENOMEM;
err = 0;
@@ -2860,7 +3148,9 @@ static int bond_virtual_engine(struct intel_gt *gt,
memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
- rq[0] = igt_request_alloc(ctx, master);
+ rq[0] = igt_spinner_create_request(&spin,
+ master->kernel_context,
+ MI_NOOP);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
goto out;
@@ -2873,16 +3163,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
&fence,
GFP_KERNEL);
}
+
i915_request_add(rq[0]);
if (err < 0)
goto out;
+ if (!(flags & BOND_SCHEDULE) &&
+ !igt_wait_for_spinner(&spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
for (n = 0; n < nsibling; n++) {
struct intel_context *ve;
- ve = intel_execlists_create_virtual(ctx,
- siblings,
- nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
onstack_fence_fini(&fence);
@@ -2924,6 +3219,8 @@ static int bond_virtual_engine(struct intel_gt *gt,
}
}
onstack_fence_fini(&fence);
+ intel_engine_flush_submission(master);
+ igt_spinner_end(&spin);
if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
pr_err("Master request did not execute (on %s)!\n",
@@ -2960,7 +3257,7 @@ out:
if (igt_flush_test(gt->i915))
err = -EIO;
- kernel_context_close(ctx);
+ igt_spinner_fini(&spin);
return err;
}
@@ -3028,6 +3325,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
@@ -3080,7 +3378,7 @@ static int live_lrc_layout(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 *mem;
+ u32 *lrc;
int err;
/*
@@ -3088,13 +3386,13 @@ static int live_lrc_layout(void *arg)
* match the layout saved by HW.
*/
- mem = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!mem)
+ lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!lrc)
return -ENOMEM;
err = 0;
for_each_engine(engine, gt, id) {
- u32 *hw, *lrc;
+ u32 *hw;
int dw;
if (!engine->default_state)
@@ -3108,8 +3406,7 @@ static int live_lrc_layout(void *arg)
}
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
- lrc = memset(mem, 0, PAGE_SIZE);
- execlists_init_reg_state(lrc,
+ execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
engine->kernel_context,
engine,
engine->kernel_context->ring,
@@ -3124,6 +3421,13 @@ static int live_lrc_layout(void *arg)
continue;
}
+ if (lrc[dw] == 0) {
+ pr_debug("%s: skipped instruction %x at dword %d\n",
+ engine->name, lri, dw);
+ dw++;
+ continue;
+ }
+
if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
engine->name, dw, lri);
@@ -3172,7 +3476,7 @@ static int live_lrc_layout(void *arg)
break;
}
- kfree(mem);
+ kfree(lrc);
return err;
}
@@ -3207,12 +3511,12 @@ static int live_lrc_fixed(void *arg)
} tbl[] = {
{
i915_mmio_reg_offset(RING_START(engine->mmio_base)),
- CTX_RING_BUFFER_START - 1,
+ CTX_RING_START - 1,
"RING_START"
},
{
i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
- CTX_RING_BUFFER_CONTROL - 1,
+ CTX_RING_CTL - 1,
"RING_CTL"
},
{
@@ -3231,7 +3535,7 @@ static int live_lrc_fixed(void *arg)
"RING_MI_MODE"
},
{
- engine->mmio_base + 0x110,
+ i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
CTX_BB_STATE - 1,
"BB_STATE"
},
@@ -3270,8 +3574,7 @@ static int live_lrc_fixed(void *arg)
return err;
}
-static int __live_lrc_state(struct i915_gem_context *fixme,
- struct intel_engine_cs *engine,
+static int __live_lrc_state(struct intel_engine_cs *engine,
struct i915_vma *scratch)
{
struct intel_context *ce;
@@ -3286,7 +3589,7 @@ static int __live_lrc_state(struct i915_gem_context *fixme,
int err;
int n;
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -3360,7 +3663,6 @@ static int live_lrc_state(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
struct i915_vma *scratch;
enum intel_engine_id id;
int err = 0;
@@ -3370,18 +3672,12 @@ static int live_lrc_state(void *arg)
* intel_context.
*/
- fixme = kernel_context(gt->i915);
- if (!fixme)
- return -ENOMEM;
-
scratch = create_scratch(gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_lrc_state(fixme, engine, scratch);
+ err = __live_lrc_state(engine, scratch);
if (err)
break;
}
@@ -3390,8 +3686,6 @@ static int live_lrc_state(void *arg)
err = -EIO;
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(fixme);
return err;
}
@@ -3401,7 +3695,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
u32 *cs;
int n;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -3424,8 +3718,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
return 0;
}
-static int __live_gpr_clear(struct i915_gem_context *fixme,
- struct intel_engine_cs *engine,
+static int __live_gpr_clear(struct intel_engine_cs *engine,
struct i915_vma *scratch)
{
struct intel_context *ce;
@@ -3441,7 +3734,7 @@ static int __live_gpr_clear(struct i915_gem_context *fixme,
if (err)
return err;
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -3503,7 +3796,6 @@ static int live_gpr_clear(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
struct i915_vma *scratch;
enum intel_engine_id id;
int err = 0;
@@ -3513,18 +3805,12 @@ static int live_gpr_clear(void *arg)
* to avoid leaking any information from previous contexts.
*/
- fixme = kernel_context(gt->i915);
- if (!fixme)
- return -ENOMEM;
-
scratch = create_scratch(gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_gpr_clear(fixme, engine, scratch);
+ err = __live_gpr_clear(engine, scratch);
if (err)
break;
}
@@ -3533,8 +3819,6 @@ static int live_gpr_clear(void *arg)
err = -EIO;
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(fixme);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000000..de1f83100fb6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,419 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ int err;
+
+ if (!get_mocs_settings(gt->i915, &arg->table))
+ return -EINVAL;
+
+ arg->scratch = create_scratch(gt);
+ if (IS_ERR(arg->scratch))
+ return PTR_ERR(arg->scratch);
+
+ arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_unpin_and_release(&arg->scratch, 0);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+}
+
+static int read_regs(struct i915_request *rq,
+ u32 addr, unsigned int count,
+ uint32_t *offset)
+{
+ unsigned int i;
+ u32 *cs;
+
+ GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < count; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = *offset;
+ *cs++ = 0;
+
+ addr += sizeof(u32);
+ *offset += sizeof(u32);
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ u32 addr;
+
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
+ addr = global_mocs_offset();
+ else
+ addr = mocs_offset(rq->engine);
+
+ return read_regs(rq, addr, table->n_entries, offset);
+}
+
+static int read_l3cc_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+
+ return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for_each_mocs(expect, table, i) {
+ if (**vaddr != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ }
+
+ return 0;
+}
+
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+ /*
+ * Registers in this range are affected by the MCR selector
+ * which only controls CPU initiated MMIO. Routing does not
+ * work for CS access so we cannot verify them on this path.
+ */
+ return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
+}
+
+static int check_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
+ u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+ unsigned int i;
+ u32 expect;
+
+ for_each_l3cc(expect, table, i) {
+ if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ reg += 4;
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_vma *vma = arg->scratch;
+ struct i915_request *rq;
+ u32 offset;
+ u32 *vaddr;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+
+ /* Read the mocs tables back using SRM */
+ offset = i915_ggtt_offset(vma);
+ if (!err)
+ err = read_mocs_table(rq, &arg->table, &offset);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = read_l3cc_table(rq, &arg->table, &offset);
+ offset -= i915_ggtt_offset(vma);
+ GEM_BUG_ON(offset > PAGE_SIZE);
+
+ err = request_add_sync(rq, err);
+ if (err)
+ return err;
+
+ /* Compare the results against the expected tables */
+ vaddr = arg->vaddr;
+ if (!err)
+ err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(arg->vaddr + offset != vaddr);
+ return 0;
+}
+
+static int live_mocs_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ /* Basic check the system is configured with the expected mocs table */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = check_mocs_engine(&mocs, engine->kernel_context);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ /* Every new context should see the same mocs table */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = check_mocs_engine(&mocs, ce);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0)
+ err = intel_engine_reset(ce->engine, reason);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+}
+
+static int __live_mocs_reset(struct live_mocs *mocs,
+ struct intel_context *ce)
+{
+ int err;
+
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err = 0;
+
+ /* Check the mocs setup is retained over per-engine and global resets */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ intel_engine_pm_get(engine);
+ err = __live_mocs_reset(&mocs, ce);
+ intel_engine_pm_put(engine);
+
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_kernel),
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
new file mode 100644
index 000000000000..8cc55a0e9e06
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -0,0 +1,203 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
+#include "selftest_rc6.h"
+
+#include "selftests/i915_random.h"
+
+int live_rc6_manual(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rc6 *rc6 = &gt->rc6;
+ intel_wakeref_t wakeref;
+ u64 res[2];
+ int err = 0;
+
+ /*
+ * Our claim is that we can "encourage" the GPU to enter rc6 at will.
+ * Let's try it!
+ */
+
+ if (!rc6->enabled)
+ return 0;
+
+ /* bsw/byt use a PCU and decouple RC6 from our manual control */
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ /* Force RC6 off for starters */
+ __intel_rc6_disable(rc6);
+ msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+
+ res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ msleep(250);
+ res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ if ((res[1] - res[0]) >> 10) {
+ pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
+ (res[1] - res[0]) >> 10);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Manually enter RC6 */
+ intel_rc6_park(rc6);
+
+ res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ msleep(100);
+ res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+
+ if (res[1] == res[0]) {
+ pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
+ err = -EINVAL;
+ }
+
+ /* Restore what should have been the original state! */
+ intel_rc6_unpark(rc6);
+
+out_unlock:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return err;
+}
+
+static const u32 *__live_rc6_ctx(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ const u32 *result;
+ u32 cmd;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return ERR_CAST(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return cs;
+ }
+
+ cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ cmd++;
+
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO);
+ *cs++ = ce->timeline->hwsp_offset + 8;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ result = rq->hwsp_seqno + 2;
+ i915_request_add(rq);
+
+ return result;
+}
+
+static struct intel_engine_cs **
+randomised_engines(struct intel_gt *gt,
+ struct rnd_state *prng,
+ unsigned int *count)
+{
+ struct intel_engine_cs *engine, **engines;
+ enum intel_engine_id id;
+ int n;
+
+ n = 0;
+ for_each_engine(engine, gt, id)
+ n++;
+ if (!n)
+ return NULL;
+
+ engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return NULL;
+
+ n = 0;
+ for_each_engine(engine, gt, id)
+ engines[n++] = engine;
+
+ i915_prandom_shuffle(engines, sizeof(*engines), n, prng);
+
+ *count = n;
+ return engines;
+}
+
+int live_rc6_ctx_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs **engines;
+ unsigned int n, count;
+ I915_RND_STATE(prng);
+ int err = 0;
+
+ /* A read of CTX_INFO upsets rc6. Poke the bear! */
+ if (INTEL_GEN(gt->i915) < 8)
+ return 0;
+
+ engines = randomised_engines(gt, &prng, &count);
+ if (!engines)
+ return 0;
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *engine = engines[n];
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ struct intel_context *ce;
+ unsigned int resets =
+ i915_reset_engine_count(&gt->i915->gpu_error,
+ engine);
+ const u32 *res;
+
+ /* Use a sacrifical context */
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ intel_engine_pm_get(engine);
+ res = __live_rc6_ctx(ce);
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ goto out;
+ }
+
+ if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ pr_debug("%s: CTX_INFO=%0x\n",
+ engine->name, READ_ONCE(*res));
+
+ if (resets !=
+ i915_reset_engine_count(&gt->i915->gpu_error,
+ engine)) {
+ pr_err("%s: GPU reset required\n",
+ engine->name);
+ add_taint_for_CI(TAINT_WARN);
+ err = -EIO;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(engines);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.h b/drivers/gpu/drm/i915/gt/selftest_rc6.h
new file mode 100644
index 000000000000..762fd442d7b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_RC6_H
+#define SELFTEST_RC6_H
+
+int live_rc6_ctx_wa(void *arg);
+int live_rc6_manual(void *arg);
+
+#endif /* SELFTEST_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index f04a59fe5d2c..e2d78cc22fb4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -458,7 +458,7 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
goto out;
}
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
goto out_unpin;
@@ -675,9 +675,7 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- intel_engine_pm_get(engine);
- rq = i915_request_create(engine->kernel_context);
- intel_engine_pm_put(engine);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index abce6e4ec9c0..ac1921854cbf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -264,22 +264,15 @@ static int
switch_to_scratch_context(struct intel_engine_cs *engine,
struct igt_spinner *spin)
{
- struct i915_gem_context *ctx;
struct intel_context *ce;
struct i915_request *rq;
int err = 0;
- ctx = kernel_context(engine->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
-
- ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
- GEM_BUG_ON(IS_ERR(ce));
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
rq = igt_spinner_create_request(spin, ce, MI_NOOP);
-
intel_context_put(ce);
if (IS_ERR(rq)) {
@@ -293,7 +286,6 @@ err:
if (err && spin)
igt_spinner_end(spin);
- kernel_context_close(ctx);
return err;
}
@@ -367,20 +359,17 @@ out_ctx:
return err;
}
-static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+static struct i915_vma *create_batch(struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
- i915_vm_put(vm);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -452,8 +441,7 @@ static int whitelist_writable_count(struct intel_engine_cs *engine)
return count;
}
-static int check_dirty_whitelist(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int check_dirty_whitelist(struct intel_context *ce)
{
const u32 values[] = {
0x00000000,
@@ -481,19 +469,17 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
0xffff00ff,
0xffffffff,
};
- struct i915_address_space *vm;
+ struct intel_engine_cs *engine = ce->engine;
struct i915_vma *scratch;
struct i915_vma *batch;
int err = 0, i, v;
u32 *cs, *results;
- vm = i915_gem_context_get_vm_rcu(ctx);
- scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
- i915_vm_put(vm);
+ scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
- batch = create_batch(ctx);
+ batch = create_batch(ce->vm);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_scratch;
@@ -518,7 +504,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
lrm++, srm++;
pr_debug("%s: Writing garbage to %x\n",
@@ -577,7 +563,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
i915_gem_object_unpin_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -696,7 +682,7 @@ out_unpin:
break;
}
- if (igt_flush_test(ctx->i915))
+ if (igt_flush_test(engine->i915))
err = -EIO;
out_batch:
i915_vma_unpin_and_release(&batch, 0);
@@ -709,38 +695,31 @@ static int live_dirty_whitelist(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct drm_file *file;
- int err = 0;
/* Can the user write to the whitelisted registers? */
if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ctx = live_context(gt->i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
- }
-
for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ int err;
+
if (engine->whitelist.count == 0)
continue;
- err = check_dirty_whitelist(ctx, engine);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = check_dirty_whitelist(ce);
+ intel_context_put(ce);
if (err)
- goto out_file;
+ return err;
}
-out_file:
- mock_file_free(gt->i915, file);
- return err;
+ return 0;
}
static int live_reset_whitelist(void *arg)
@@ -830,12 +809,15 @@ err_req:
static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *batch;
int i, err = 0;
u32 *cs;
- batch = create_batch(ctx);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ batch = create_batch(vm);
+ i915_vm_put(vm);
if (IS_ERR(batch))
return PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 2a77c051f36a..aeb1d1f616e8 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
mutex_init(&timeline->mutex);
- INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
index 689efc66c908..d2bcc3df6183 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
@@ -7,6 +7,8 @@
#ifndef __MOCK_TIMELINE__
#define __MOCK_TIMELINE__
+#include <linux/types.h>
+
struct intel_timeline;
void mock_timeline_init(struct intel_timeline *timeline, u64 context);
diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile
deleted file mode 100644
index bec94d434cb6..000000000000
--- a/drivers/gpu/drm/i915/gt/uc/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/../..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 3ee4a4e7689d..5d00a3b2d914 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -32,18 +32,17 @@
* just the HuC, but more are expected to land in the future).
*/
-static void gen8_guc_raise_irq(struct intel_guc *guc)
+void intel_guc_notify(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
-}
-
-static void gen11_guc_raise_irq(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
+ /*
+ * On Gen11+, the value written to the register is passes as a payload
+ * to the FW. However, the FW currently treats all values the same way
+ * (H2G interrupt), so we can just write the value that the HW expects
+ * on older gens.
+ */
+ intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
}
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
@@ -177,15 +176,13 @@ void intel_guc_init_early(struct intel_guc *guc)
mutex_init(&guc->send_mutex);
spin_lock_init(&guc->irq_lock);
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
if (INTEL_GEN(i915) >= 11) {
- guc->notify = gen11_guc_raise_irq;
+ guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
guc->interrupts.reset = gen11_reset_guc_interrupts;
guc->interrupts.enable = gen11_enable_guc_interrupts;
guc->interrupts.disable = gen11_disable_guc_interrupts;
} else {
- guc->notify = gen8_guc_raise_irq;
+ guc->notify_reg = GUC_SEND_INTERRUPT;
guc->interrupts.reset = gen9_reset_guc_interrupts;
guc->interrupts.enable = gen9_enable_guc_interrupts;
guc->interrupts.disable = gen9_disable_guc_interrupts;
@@ -401,18 +398,8 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
intel_uc_fw_cleanup_fetch(&guc->fw);
-}
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size)
-{
- WARN(1, "Unexpected send: action=%#x\n", *action);
- return -ENODEV;
-}
-
-void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
-{
- WARN(1, "Unexpected event: no suitable handler\n");
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
}
/*
@@ -704,3 +691,37 @@ err:
i915_gem_object_put(obj);
return vma;
}
+
+/**
+ * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
+ * @guc: the guc
+ * @size: size of area to allocate (both virtual space and memory)
+ * @out_vma: return variable for the allocated vma pointer
+ * @out_vaddr: return variable for the obj mapping
+ *
+ * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
+ * object with I915_MAP_WB.
+ *
+ * Return: 0 if successful, a negative errno code otherwise.
+ */
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+ struct i915_vma **out_vma, void **out_vaddr)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ return PTR_ERR(vaddr);
+ }
+
+ *out_vma = vma;
+ *out_vaddr = vaddr;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e6400204a2bd..910d49590068 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -20,8 +20,8 @@ struct __guc_ads_blob;
/*
* Top level structure of GuC. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
- * ExecList submission.
+ * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
+ * submission.
*/
struct intel_guc {
struct intel_uc_fw fw;
@@ -46,13 +46,13 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
- struct ida stage_ids;
- struct intel_guc_client *execbuf_client;
+ struct i915_vma *workqueue;
+ void *workqueue_vaddr;
+ spinlock_t wq_lock;
- DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
- /* Cyclic counter mod pagesize */
- u32 db_cacheline;
+ struct i915_vma *proc_desc;
+ void *proc_desc_vaddr;
/* Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS];
@@ -64,44 +64,33 @@ struct intel_guc {
enum forcewake_domains fw_domains;
} send_regs;
+ /* register used to send interrupts to the GuC FW */
+ i915_reg_t notify_reg;
+
/* Store msg (e.g. log flush) that we see while CTBs are disabled */
u32 mmio_msg;
/* To serialize the intel_guc_send actions */
struct mutex send_mutex;
-
- /* GuC's FW specific send function */
- int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
- u32 *response_buf, u32 response_buf_size);
-
- /* GuC's FW specific event handler function */
- void (*handler)(struct intel_guc *guc);
-
- /* GuC's FW specific notify function */
- void (*notify)(struct intel_guc *guc);
};
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
- return guc->send(guc, action, len, NULL, 0);
+ return intel_guc_ct_send(&guc->ct, action, len, NULL, 0);
}
static inline int
intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- return guc->send(guc, action, len, response_buf, response_buf_size);
-}
-
-static inline void intel_guc_notify(struct intel_guc *guc)
-{
- guc->notify(guc);
+ return intel_guc_ct_send(&guc->ct, action, len,
+ response_buf, response_buf_size);
}
static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
{
- guc->handler(guc);
+ intel_guc_ct_event_handler(&guc->ct);
}
/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
@@ -136,12 +125,9 @@ void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_write_params(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size);
+void intel_guc_notify(struct intel_guc *guc);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
-void intel_guc_to_host_event_handler(struct intel_guc *guc);
-void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
@@ -149,6 +135,8 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+ struct i915_vma **out_vma, void **out_vaddr);
static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ca6674b8e00c..101728006ae9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -93,7 +93,8 @@ static void __guc_ads_init(struct intel_guc *guc)
*/
blob->ads.golden_context_lrca[engine_class] = 0;
blob->ads.eng_state_size[engine_class] =
- intel_engine_context_size(dev_priv, engine_class) -
+ intel_engine_context_size(guc_to_gt(guc),
+ engine_class) -
skipped_size;
}
@@ -135,32 +136,19 @@ static void __guc_ads_init(struct intel_guc *guc)
int intel_guc_ads_create(struct intel_guc *guc)
{
const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
- struct i915_vma *vma;
- void *blob;
int ret;
GEM_BUG_ON(guc->ads_vma);
- vma = intel_guc_allocate_vma(guc, size);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
+ (void **)&guc->ads_blob);
- blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(blob)) {
- ret = PTR_ERR(blob);
- goto err_vma;
- }
-
- guc->ads_vma = vma;
- guc->ads_blob = blob;
+ if (ret)
+ return ret;
__guc_ads_init(guc);
return 0;
-
-err_vma:
- i915_vma_unpin_and_release(&guc->ads_vma, 0);
- return ret;
}
void intel_guc_ads_destroy(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index b49115517510..c6f971a049f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -37,13 +37,10 @@ static void ct_incoming_request_worker_func(struct work_struct *w);
*/
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
{
- /* we're using static channel owners */
- ct->host_channel.owner = CTB_OWNER_HOST;
-
- spin_lock_init(&ct->lock);
- INIT_LIST_HEAD(&ct->pending_requests);
- INIT_LIST_HEAD(&ct->incoming_requests);
- INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
+ spin_lock_init(&ct->requests.lock);
+ INIT_LIST_HEAD(&ct->requests.pending);
+ INIT_LIST_HEAD(&ct->requests.incoming);
+ INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
}
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
@@ -64,14 +61,13 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
}
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
- u32 cmds_addr, u32 size, u32 owner)
+ u32 cmds_addr, u32 size)
{
- CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
- desc, cmds_addr, size, owner);
+ CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
memset(desc, 0, sizeof(*desc));
desc->addr = cmds_addr;
desc->size = size;
- desc->owner = owner;
+ desc->owner = CTB_OWNER_HOST;
}
static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
@@ -104,12 +100,11 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
}
static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
- u32 owner,
u32 type)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
- owner,
+ CTB_OWNER_HOST,
type
};
int err;
@@ -117,20 +112,27 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
/* Can't use generic send(), CT deregistration must go over MMIO */
err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (err)
- DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
- guc_ct_buffer_type_to_str(type), owner, err);
+ DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
-static int ctch_init(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_init - Init buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for buffer-based communication.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
{
- struct i915_vma *vma;
+ struct intel_guc *guc = ct_to_guc(ct);
void *blob;
int err;
int i;
- GEM_BUG_ON(ctch->vma);
+ GEM_BUG_ON(ct->vma);
/* We allocate 1 page to hold both descriptors and both buffers.
* ___________.....................
@@ -154,71 +156,65 @@ static int ctch_init(struct intel_guc *guc,
* other code will need updating as well.
*/
- /* allocate vma */
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_out;
+ err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
+ if (err) {
+ DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
+ return err;
}
- ctch->vma = vma;
- /* map first page */
- blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(blob)) {
- err = PTR_ERR(blob);
- goto err_vma;
- }
CT_DEBUG_DRIVER("CT: vma base=%#x\n",
- intel_guc_ggtt_offset(guc, ctch->vma));
+ intel_guc_ggtt_offset(guc, ct->vma));
/* store pointers to desc and cmds */
- for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
- GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
- ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
+ for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
+ GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
+ ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
+ ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
}
return 0;
-
-err_vma:
- i915_vma_unpin_and_release(&ctch->vma, 0);
-err_out:
- CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
- ctch->owner, err);
- return err;
}
-static void ctch_fini(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_fini - Fini buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for buffer-based communication.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
- GEM_BUG_ON(ctch->enabled);
+ GEM_BUG_ON(ct->enabled);
- i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
+ i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
}
-static int ctch_enable(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_enable - Enable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
+ struct intel_guc *guc = ct_to_guc(ct);
u32 base;
int err;
int i;
- GEM_BUG_ON(!ctch->vma);
-
- GEM_BUG_ON(ctch->enabled);
+ GEM_BUG_ON(ct->enabled);
/* vma should be already allocated and map'ed */
- base = intel_guc_ggtt_offset(guc, ctch->vma);
+ GEM_BUG_ON(!ct->vma);
+ base = intel_guc_ggtt_offset(guc, ct->vma);
/* (re)initialize descriptors
* cmds buffers are in the second half of the blob page
*/
- for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
+ for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
+ guc_ct_buffer_desc_init(ct->ctbs[i].desc,
base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
- PAGE_SIZE/4,
- ctch->owner);
+ PAGE_SIZE/4);
}
/* register buffers, starting wirh RECV buffer
@@ -236,38 +232,42 @@ static int ctch_enable(struct intel_guc *guc,
if (unlikely(err))
goto err_deregister;
- ctch->enabled = true;
+ ct->enabled = true;
return 0;
err_deregister:
guc_action_deregister_ct_buffer(guc,
- ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
err_out:
- DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
+ DRM_ERROR("CT: can't open channel; err=%d\n", err);
return err;
}
-static void ctch_disable(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_disable - Disable buffer based command transport.
+ * @ct: pointer to CT struct
+ */
+void intel_guc_ct_disable(struct intel_guc_ct *ct)
{
- GEM_BUG_ON(!ctch->enabled);
+ struct intel_guc *guc = ct_to_guc(ct);
- ctch->enabled = false;
+ GEM_BUG_ON(!ct->enabled);
- guc_action_deregister_ct_buffer(guc,
- ctch->owner,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
- guc_action_deregister_ct_buffer(guc,
- ctch->owner,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ ct->enabled = false;
+
+ if (intel_guc_is_running(guc)) {
+ guc_action_deregister_ct_buffer(guc,
+ INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ guc_action_deregister_ct_buffer(guc,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ }
}
-static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
+static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{
/* For now it's trivial */
- return ++ctch->next_fence;
+ return ++ct->requests.next_fence;
}
/**
@@ -440,35 +440,34 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
return err;
}
-static int ctch_send(struct intel_guc_ct *ct,
- struct intel_guc_ct_channel *ctch,
- const u32 *action,
- u32 len,
- u32 *response_buf,
- u32 response_buf_size,
- u32 *status)
+static int ct_send(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len,
+ u32 *response_buf,
+ u32 response_buf_size,
+ u32 *status)
{
- struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
struct guc_ct_buffer_desc *desc = ctb->desc;
struct ct_request request;
unsigned long flags;
u32 fence;
int err;
- GEM_BUG_ON(!ctch->enabled);
+ GEM_BUG_ON(!ct->enabled);
GEM_BUG_ON(!len);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
- fence = ctch_get_next_fence(ctch);
+ fence = ct_get_next_fence(ct);
request.fence = fence;
request.status = 0;
request.response_len = response_buf_size;
request.response_buf = response_buf;
- spin_lock_irqsave(&ct->lock, flags);
- list_add_tail(&request.link, &ct->pending_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_add_tail(&request.link, &ct->requests.pending);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
err = ctb_write(ctb, action, len, fence, !!response_buf);
if (unlikely(err))
@@ -501,9 +500,9 @@ static int ctch_send(struct intel_guc_ct *ct,
}
unlink:
- spin_lock_irqsave(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
list_del(&request.link);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
return err;
}
@@ -511,18 +510,21 @@ unlink:
/*
* Command Transport (CT) buffer based GuC send function.
*/
-int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct intel_guc_ct *ct = &guc->ct;
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
+ struct intel_guc *guc = ct_to_guc(ct);
u32 status = ~0; /* undefined */
int ret;
+ if (unlikely(!ct->enabled)) {
+ WARN(1, "Unexpected send: action=%#x\n", *action);
+ return -ENODEV;
+ }
+
mutex_lock(&guc->send_mutex);
- ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
- &status);
+ ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
action[0], ret, status);
@@ -653,8 +655,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
- spin_lock(&ct->lock);
- list_for_each_entry(req, &ct->pending_requests, link) {
+ spin_lock(&ct->requests.lock);
+ list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
CT_DEBUG_DRIVER("CT: request %u awaits response\n",
req->fence);
@@ -672,7 +674,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
found = true;
break;
}
- spin_unlock(&ct->lock);
+ spin_unlock(&ct->requests.lock);
if (!found)
DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
@@ -710,13 +712,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
u32 *payload;
bool done;
- spin_lock_irqsave(&ct->lock, flags);
- request = list_first_entry_or_null(&ct->incoming_requests,
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ request = list_first_entry_or_null(&ct->requests.incoming,
struct ct_incoming_request, link);
if (request)
list_del(&request->link);
- done = !!list_empty(&ct->incoming_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ done = !!list_empty(&ct->requests.incoming);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
if (!request)
return true;
@@ -734,12 +736,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
static void ct_incoming_request_worker_func(struct work_struct *w)
{
- struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
+ struct intel_guc_ct *ct =
+ container_of(w, struct intel_guc_ct, requests.worker);
bool done;
done = ct_process_incoming_requests(ct);
if (!done)
- queue_work(system_unbound_wq, &ct->worker);
+ queue_work(system_unbound_wq, &ct->requests.worker);
}
/**
@@ -777,23 +780,28 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
}
memcpy(request->msg, msg, 4 * msglen);
- spin_lock_irqsave(&ct->lock, flags);
- list_add_tail(&request->link, &ct->incoming_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_add_tail(&request->link, &ct->requests.incoming);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
- queue_work(system_unbound_wq, &ct->worker);
+ queue_work(system_unbound_wq, &ct->requests.worker);
return 0;
}
-static void ct_process_host_channel(struct intel_guc_ct *ct)
+/*
+ * When we're communicating with the GuC over CT, GuC uses events
+ * to notify us about new messages being posted on the RECV buffer.
+ */
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
- struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
- if (!ctch->enabled)
+ if (unlikely(!ct->enabled)) {
+ WARN(1, "Unexpected GuC event received while CT disabled!\n");
return;
+ }
do {
err = ctb_read(ctb, msg);
@@ -812,86 +820,3 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
}
}
-/*
- * When we're communicating with the GuC over CT, GuC uses events
- * to notify us about new messages being posted on the RECV buffer.
- */
-void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
-{
- struct intel_guc_ct *ct = &guc->ct;
-
- ct_process_host_channel(ct);
-}
-
-/**
- * intel_guc_ct_init - Init CT communication
- * @ct: pointer to CT struct
- *
- * Allocate memory required for communication via
- * the CT channel.
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_init(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
- int err;
-
- err = ctch_init(guc, ctch);
- if (unlikely(err)) {
- DRM_ERROR("CT: can't open channel %d; err=%d\n",
- ctch->owner, err);
- return err;
- }
-
- GEM_BUG_ON(!ctch->vma);
- return 0;
-}
-
-/**
- * intel_guc_ct_fini - Fini CT communication
- * @ct: pointer to CT struct
- *
- * Deallocate memory required for communication via
- * the CT channel.
- */
-void intel_guc_ct_fini(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
- ctch_fini(guc, ctch);
-}
-
-/**
- * intel_guc_ct_enable - Enable buffer based command transport.
- * @ct: pointer to CT struct
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_enable(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
- if (ctch->enabled)
- return 0;
-
- return ctch_enable(guc, ctch);
-}
-
-/**
- * intel_guc_ct_disable - Disable buffer based command transport.
- * @ct: pointer to CT struct
- */
-void intel_guc_ct_disable(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
- if (!ctch->enabled)
- return;
-
- ctch_disable(guc, ctch);
-}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 7c24d83f5c24..3e7fe237cfa5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -35,44 +35,28 @@ struct intel_guc_ct_buffer {
u32 *cmds;
};
-/** Represents pair of command transport buffers.
- *
- * Buffers go in pairs to allow bi-directional communication.
- * To simplify the code we place both of them in the same vma.
- * Buffers from the same pair must share unique owner id.
- *
- * @vma: pointer to the vma with pair of CT buffers
- * @ctbs: buffers for sending(0) and receiving(1) commands
- * @owner: unique identifier
- * @next_fence: fence to be used with next send command
- */
-struct intel_guc_ct_channel {
- struct i915_vma *vma;
- struct intel_guc_ct_buffer ctbs[2];
- u32 owner;
- u32 next_fence;
- bool enabled;
-};
-/** Holds all command transport channels.
+/** Top-level structure for Command Transport related data
*
- * @host_channel: main channel used by the host
+ * Includes a pair of CT buffers for bi-directional communication and tracking
+ * for the H2G and G2H requests sent and received through the buffers.
*/
struct intel_guc_ct {
- struct intel_guc_ct_channel host_channel;
- /* other channels are tbd */
+ struct i915_vma *vma;
+ bool enabled;
- /** @lock: protects pending requests list */
- spinlock_t lock;
+ /* buffers for sending(0) and receiving(1) commands */
+ struct intel_guc_ct_buffer ctbs[2];
- /** @pending_requests: list of requests waiting for response */
- struct list_head pending_requests;
+ struct {
+ u32 next_fence; /* fence to be used with next request to send */
- /** @incoming_requests: list of incoming requests */
- struct list_head incoming_requests;
+ spinlock_t lock; /* protects pending requests list */
+ struct list_head pending; /* requests waiting for response */
- /** @worker: worker for handling incoming requests */
- struct work_struct worker;
+ struct list_head incoming; /* incoming requests */
+ struct work_struct worker; /* handler for incoming requests */
+ } requests;
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
@@ -81,13 +65,13 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
-static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
+static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
{
- ct->host_channel.enabled = false;
+ return ct->enabled;
}
-int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
-void intel_guc_to_host_event_handler_ct(struct intel_guc *guc);
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct);
#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 5528224448f6..3a1c47d600ea 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -149,7 +149,7 @@ int intel_guc_fw_upload(struct intel_guc *guc)
* Current uCode expects the code to be loaded at 8k; locations below
* this are used for the stack.
*/
- ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE);
+ ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index a26a85d50209..a6b733c146c9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -31,7 +31,6 @@
#define GUC_DOORBELL_INVALID 256
-#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
/* Work queue item header definitions */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 2498c55e0ea5..9e42324fdecd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -18,15 +18,6 @@
#include "i915_drv.h"
#include "i915_trace.h"
-enum {
- GUC_PREEMPT_NONE = 0,
- GUC_PREEMPT_INPROGRESS,
- GUC_PREEMPT_FINISHED,
-};
-#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
-#define GUC_PREEMPT_BREADCRUMB_BYTES \
- (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
-
/**
* DOC: GuC-based command submission
*
@@ -36,25 +27,14 @@ enum {
* code) matches the old submission model and will be updated as part of the
* upgrade to the new flow.
*
- * GuC client:
- * A intel_guc_client refers to a submission path through GuC. Currently, there
- * is only one client, which is charged with all submissions to the GuC. This
- * struct is the owner of a doorbell, a process descriptor and a workqueue (all
- * of them inside a single gem object that contains all required pages for these
- * elements).
- *
* GuC stage descriptor:
* During initialization, the driver allocates a static pool of 1024 such
- * descriptors, and shares them with the GuC.
- * Currently, there exists a 1:1 mapping between a intel_guc_client and a
- * guc_stage_desc (via the client's stage_id), so effectively only one
- * gets used. This stage descriptor lets the GuC know about the doorbell,
- * workqueue and process descriptor. Theoretically, it also lets the GuC
- * know about our HW contexts (context ID, etc...), but we actually
- * employ a kind of submission where the GuC uses the LRCA sent via the work
- * item instead (the single guc_stage_desc associated to execbuf client
- * contains information about the default kernel context only, but this is
- * essentially unused). This is called a "proxy" submission.
+ * descriptors, and shares them with the GuC. Currently, we only use one
+ * descriptor. This stage descriptor lets the GuC know about the workqueue and
+ * process descriptor. Theoretically, it also lets the GuC know about our HW
+ * contexts (context ID, etc...), but we actually employ a kind of submission
+ * where the GuC uses the LRCA sent via the work item instead. This is called
+ * a "proxy" submission.
*
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
@@ -63,11 +43,6 @@ enum {
* Firmware writes a success/fail code back to the action register after
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
- * See intel_guc_send()
- *
- * Doorbells:
- * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
- * mapped into process space.
*
* Work Items:
* There are several types of work items that the host may place into a
@@ -84,213 +59,45 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
return rb_entry(rb, struct i915_priolist, node);
}
-static inline bool is_high_priority(struct intel_guc_client *client)
+static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
{
- return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
- client->priority == GUC_CLIENT_PRIORITY_HIGH);
-}
-
-static int reserve_doorbell(struct intel_guc_client *client)
-{
- unsigned long offset;
- unsigned long end;
- u16 id;
-
- GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
+ struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
- /*
- * The bitmap tracks which doorbell registers are currently in use.
- * It is split into two halves; the first half is used for normal
- * priority contexts, the second half for high-priority ones.
- */
- offset = 0;
- end = GUC_NUM_DOORBELLS / 2;
- if (is_high_priority(client)) {
- offset = end;
- end += offset;
- }
-
- id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
- if (id == end)
- return -ENOSPC;
-
- __set_bit(id, client->guc->doorbell_bitmap);
- client->doorbell_id = id;
- DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
- client->stage_id, yesno(is_high_priority(client)),
- id);
- return 0;
+ return &base[id];
}
-static bool has_doorbell(struct intel_guc_client *client)
+static int guc_workqueue_create(struct intel_guc *guc)
{
- if (client->doorbell_id == GUC_DOORBELL_INVALID)
- return false;
-
- return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+ return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
+ &guc->workqueue_vaddr);
}
-static void unreserve_doorbell(struct intel_guc_client *client)
+static void guc_workqueue_destroy(struct intel_guc *guc)
{
- GEM_BUG_ON(!has_doorbell(client));
-
- __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
- client->doorbell_id = GUC_DOORBELL_INVALID;
+ i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
}
/*
- * Tell the GuC to allocate or deallocate a specific doorbell
- */
-
-static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
-{
- u32 action[] = {
- INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
- stage_id
- };
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
-{
- u32 action[] = {
- INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
- stage_id
- };
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
-{
- struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
-
- return &base[client->stage_id];
-}
-
-/*
- * Initialise, update, or clear doorbell data shared with the GuC
- *
- * These functions modify shared data and so need access to the mapped
- * client object which contains the page being used for the doorbell
+ * Initialise the process descriptor shared with the GuC firmware.
*/
-
-static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
-{
- struct guc_stage_desc *desc;
-
- /* Update the GuC's idea of the doorbell ID */
- desc = __get_stage_desc(client);
- desc->db_id = new_id;
-}
-
-static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
-{
- return client->vaddr + client->doorbell_offset;
-}
-
-static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
-{
- struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
-
- GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
-}
-
-static void __init_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *doorbell;
-
- doorbell = __get_doorbell(client);
- doorbell->db_status = GUC_DOORBELL_ENABLED;
- doorbell->cookie = 0;
-}
-
-static void __fini_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *doorbell;
- u16 db_id = client->doorbell_id;
-
- doorbell = __get_doorbell(client);
- doorbell->db_status = GUC_DOORBELL_DISABLED;
-
- /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
- * to go to zero after updating db_status before we call the GuC to
- * release the doorbell
- */
- if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
- WARN_ONCE(true, "Doorbell never became invalid after disable\n");
-}
-
-static int create_doorbell(struct intel_guc_client *client)
+static int guc_proc_desc_create(struct intel_guc *guc)
{
- int ret;
-
- if (WARN_ON(!has_doorbell(client)))
- return -ENODEV; /* internal setup error, should never happen */
-
- __update_doorbell_desc(client, client->doorbell_id);
- __init_doorbell(client);
-
- ret = __guc_allocate_doorbell(client->guc, client->stage_id);
- if (ret) {
- __fini_doorbell(client);
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
- client->stage_id, ret);
- return ret;
- }
+ const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
- return 0;
+ return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
+ &guc->proc_desc_vaddr);
}
-static int destroy_doorbell(struct intel_guc_client *client)
+static void guc_proc_desc_destroy(struct intel_guc *guc)
{
- int ret;
-
- GEM_BUG_ON(!has_doorbell(client));
-
- __fini_doorbell(client);
- ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
- if (ret)
- DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
- client->stage_id, ret);
-
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
-
- return ret;
+ i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
}
-static unsigned long __select_cacheline(struct intel_guc *guc)
-{
- unsigned long offset;
-
- /* Doorbell uses a single cache line within a page */
- offset = offset_in_page(guc->db_cacheline);
-
- /* Moving to next cache line to reduce contention */
- guc->db_cacheline += cache_line_size();
-
- DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
- offset, guc->db_cacheline, cache_line_size());
- return offset;
-}
-
-static inline struct guc_process_desc *
-__get_process_desc(struct intel_guc_client *client)
-{
- return client->vaddr + client->proc_desc_offset;
-}
-
-/*
- * Initialise the process descriptor shared with the GuC firmware.
- */
-static void guc_proc_desc_init(struct intel_guc_client *client)
+static void guc_proc_desc_init(struct intel_guc *guc)
{
struct guc_process_desc *desc;
- desc = memset(__get_process_desc(client), 0, sizeof(*desc));
+ desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
/*
* XXX: pDoorbell and WQVBaseAddress are pointers in process address
@@ -301,47 +108,27 @@ static void guc_proc_desc_init(struct intel_guc_client *client)
desc->wq_base_addr = 0;
desc->db_base_addr = 0;
- desc->stage_id = client->stage_id;
desc->wq_size_bytes = GUC_WQ_SIZE;
desc->wq_status = WQ_STATUS_ACTIVE;
- desc->priority = client->priority;
+ desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
}
-static void guc_proc_desc_fini(struct intel_guc_client *client)
+static void guc_proc_desc_fini(struct intel_guc *guc)
{
- struct guc_process_desc *desc;
-
- desc = __get_process_desc(client);
- memset(desc, 0, sizeof(*desc));
+ memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
}
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
- struct i915_vma *vma;
- void *vaddr;
-
- vma = intel_guc_allocate_vma(guc,
- PAGE_ALIGN(sizeof(struct guc_stage_desc) *
- GUC_MAX_STAGE_DESCRIPTORS));
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
- }
-
- guc->stage_desc_pool = vma;
- guc->stage_desc_pool_vaddr = vaddr;
- ida_init(&guc->stage_ids);
+ u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+ GUC_MAX_STAGE_DESCRIPTORS);
- return 0;
+ return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
+ &guc->stage_desc_pool_vaddr);
}
static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
{
- ida_destroy(&guc->stage_ids);
i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
}
@@ -349,63 +136,49 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
* Initialise/clear the stage descriptor shared with the GuC firmware.
*
* This descriptor tells the GuC where (in GGTT space) to find the important
- * data structures relating to this client (doorbell, process descriptor,
- * write queue, etc).
+ * data structures related to work submission (process descriptor, write queue,
+ * etc).
*/
-static void guc_stage_desc_init(struct intel_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc *guc)
{
- struct intel_guc *guc = client->guc;
struct guc_stage_desc *desc;
- u32 gfx_addr;
- desc = __get_stage_desc(client);
+ /* we only use 1 stage desc, so hardcode it to 0 */
+ desc = __get_stage_desc(guc, 0);
memset(desc, 0, sizeof(*desc));
desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
GUC_STAGE_DESC_ATTR_KERNEL;
- if (is_high_priority(client))
- desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
- desc->stage_id = client->stage_id;
- desc->priority = client->priority;
- desc->db_id = client->doorbell_id;
- /*
- * The doorbell, process descriptor, and workqueue are all parts
- * of the client object, which the GuC will reference via the GGTT
- */
- gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
- desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
- client->doorbell_offset;
- desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
- desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
- desc->process_desc = gfx_addr + client->proc_desc_offset;
- desc->wq_addr = gfx_addr + GUC_DB_SIZE;
- desc->wq_size = GUC_WQ_SIZE;
+ desc->stage_id = 0;
+ desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
- desc->desc_private = ptr_to_u64(client);
+ desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
+ desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
+ desc->wq_size = GUC_WQ_SIZE;
}
-static void guc_stage_desc_fini(struct intel_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc *guc)
{
struct guc_stage_desc *desc;
- desc = __get_stage_desc(client);
+ desc = __get_stage_desc(guc, 0);
memset(desc, 0, sizeof(*desc));
}
/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc_client *client,
+static void guc_wq_item_append(struct intel_guc *guc,
u32 target_engine, u32 context_desc,
u32 ring_tail, u32 fence_id)
{
/* wqi_len is in DWords, and does not include the one-word header */
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct guc_process_desc *desc = __get_process_desc(client);
+ struct guc_process_desc *desc = guc->proc_desc_vaddr;
struct guc_wq_item *wqi;
u32 wq_off;
- lockdep_assert_held(&client->wq_lock);
+ lockdep_assert_held(&guc->wq_lock);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -425,58 +198,30 @@ static void guc_wq_item_append(struct intel_guc_client *client,
GUC_WQ_SIZE) < wqi_size);
GEM_BUG_ON(wq_off & (wqi_size - 1));
- /* WQ starts from the page after doorbell / process_desc */
- wqi = client->vaddr + wq_off + GUC_DB_SIZE;
-
- if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
- wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
- } else {
- /* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
- wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
- wqi->fence_id = fence_id;
- }
+ wqi = guc->workqueue_vaddr + wq_off;
+
+ /* Now fill in the 4-word work queue item */
+ wqi->header = WQ_TYPE_INORDER |
+ (wqi_len << WQ_LEN_SHIFT) |
+ (target_engine << WQ_TARGET_SHIFT) |
+ WQ_NO_WCFLUSH_WAIT;
+ wqi->context_desc = context_desc;
+ wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
+ GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
+ wqi->fence_id = fence_id;
/* Make the update visible to GuC */
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
-static void guc_ring_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *db;
- u32 cookie;
-
- lockdep_assert_held(&client->wq_lock);
-
- /* pointer of current doorbell cacheline */
- db = __get_doorbell(client);
-
- /*
- * We're not expecting the doorbell cookie to change behind our back,
- * we also need to treat 0 as a reserved value.
- */
- cookie = READ_ONCE(db->cookie);
- WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
-
- /* XXX: doorbell was lost and need to acquire it again */
- GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
-}
-
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
- struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
+ u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
- guc_wq_item_append(client, engine->guc_id, ctx_desc,
+ guc_wq_item_append(guc, engine->guc_id, ctx_desc,
ring_tail, rq->fence.seqno);
- guc_ring_doorbell(client);
}
/*
@@ -488,10 +233,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
if (i915_vma_is_map_and_fenceable(vma))
- intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
+ intel_uncore_posting_read_fw(vma->vm->gt->uncore,
+ GUC_STATUS);
}
static void guc_submit(struct intel_engine_cs *engine,
@@ -499,9 +243,8 @@ static void guc_submit(struct intel_engine_cs *engine,
struct i915_request **end)
{
struct intel_guc *guc = &engine->gt->uc.guc;
- struct intel_guc_client *client = guc->execbuf_client;
- spin_lock(&client->wq_lock);
+ spin_lock(&guc->wq_lock);
do {
struct i915_request *rq = *out++;
@@ -510,7 +253,7 @@ static void guc_submit(struct intel_engine_cs *engine,
guc_add_request(guc, rq);
} while (out != end);
- spin_unlock(&client->wq_lock);
+ spin_unlock(&guc->wq_lock);
}
static inline int rq_prio(const struct i915_request *rq)
@@ -529,7 +272,7 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
* required if we generalise the inflight tracking.
*/
- intel_gt_pm_get(rq->engine->gt);
+ __intel_gt_pm_get(rq->engine->gt);
return i915_request_get(rq);
}
@@ -537,7 +280,7 @@ static void schedule_out(struct i915_request *rq)
{
trace_i915_request_out(rq);
- intel_gt_pm_put(rq->engine->gt);
+ intel_gt_pm_put_async(rq->engine->gt);
i915_request_put(rq);
}
@@ -572,7 +315,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- if (last && rq->hw_context != last->hw_context) {
+ if (last && rq->context != last->context) {
if (port == last_port)
goto done;
@@ -631,7 +374,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Prevent request submission to the hardware until we have
@@ -658,7 +401,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
memset(execlists->inflight, 0, sizeof(execlists->inflight));
}
-static void guc_reset(struct intel_engine_cs *engine, bool stalled)
+static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq;
@@ -677,20 +420,20 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
stalled = false;
__i915_request_reset(rq, stalled);
- intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
+ intel_lr_context_reset(engine, rq->context, rq->head, stalled);
out_unlock:
spin_unlock_irqrestore(&engine->active.lock, flags);
}
-static void guc_cancel_requests(struct intel_engine_cs *engine)
+static void guc_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Before we call engine->cancel_requests(), we should have exclusive
@@ -751,8 +494,8 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet);
- GEM_TRACE("%s: depth->%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
}
/*
@@ -761,213 +504,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
* path of guc_submit() above.
*/
-/* Check that a doorbell register is in the expected state */
-static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
-{
- bool valid;
-
- GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
-
- valid = __doorbell_valid(guc, db_id);
-
- if (test_bit(db_id, guc->doorbell_bitmap) == valid)
- return true;
-
- DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
- db_id, yesno(valid));
-
- return false;
-}
-
-static bool guc_verify_doorbells(struct intel_guc *guc)
-{
- bool doorbells_ok = true;
- u16 db_id;
-
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
- if (!doorbell_ok(guc, db_id))
- doorbells_ok = false;
-
- return doorbells_ok;
-}
-
-/**
- * guc_client_alloc() - Allocate an intel_guc_client
- * @guc: the intel_guc structure
- * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
- * The kernel client to replace ExecList submission is created with
- * NORMAL priority. Priority of a client for scheduler can be HIGH,
- * while a preemption context can use CRITICAL.
- *
- * Return: An intel_guc_client object if success, else NULL.
- */
-static struct intel_guc_client *
-guc_client_alloc(struct intel_guc *guc, u32 priority)
-{
- struct intel_guc_client *client;
- struct i915_vma *vma;
- void *vaddr;
- int ret;
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- return ERR_PTR(-ENOMEM);
-
- client->guc = guc;
- client->priority = priority;
- client->doorbell_id = GUC_DOORBELL_INVALID;
- spin_lock_init(&client->wq_lock);
-
- ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
- GFP_KERNEL);
- if (ret < 0)
- goto err_client;
-
- client->stage_id = ret;
-
- /* The first page is doorbell/proc_desc. Two followed pages are wq. */
- vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_id;
- }
-
- /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
- client->vma = vma;
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_vma;
- }
- client->vaddr = vaddr;
-
- ret = reserve_doorbell(client);
- if (ret)
- goto err_vaddr;
-
- client->doorbell_offset = __select_cacheline(guc);
-
- /*
- * Since the doorbell only requires a single cacheline, we can save
- * space by putting the application process descriptor in the same
- * page. Use the half of the page that doesn't include the doorbell.
- */
- if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
- client->proc_desc_offset = 0;
- else
- client->proc_desc_offset = (GUC_DB_SIZE / 2);
-
- DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n",
- priority, client, client->stage_id);
- DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
- client->doorbell_id, client->doorbell_offset);
-
- return client;
-
-err_vaddr:
- i915_gem_object_unpin_map(client->vma->obj);
-err_vma:
- i915_vma_unpin_and_release(&client->vma, 0);
-err_id:
- ida_simple_remove(&guc->stage_ids, client->stage_id);
-err_client:
- kfree(client);
- return ERR_PTR(ret);
-}
-
-static void guc_client_free(struct intel_guc_client *client)
-{
- unreserve_doorbell(client);
- i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
- ida_simple_remove(&client->guc->stage_ids, client->stage_id);
- kfree(client);
-}
-
-static inline bool ctx_save_restore_disabled(struct intel_context *ce)
-{
- u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
-
-#define SR_DISABLED \
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
-
- return (sr & SR_DISABLED) == SR_DISABLED;
-
-#undef SR_DISABLED
-}
-
-static int guc_clients_create(struct intel_guc *guc)
-{
- struct intel_guc_client *client;
-
- GEM_BUG_ON(guc->execbuf_client);
-
- client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for submission!\n");
- return PTR_ERR(client);
- }
- guc->execbuf_client = client;
-
- return 0;
-}
-
-static void guc_clients_destroy(struct intel_guc *guc)
-{
- struct intel_guc_client *client;
-
- client = fetch_and_zero(&guc->execbuf_client);
- if (client)
- guc_client_free(client);
-}
-
-static int __guc_client_enable(struct intel_guc_client *client)
-{
- int ret;
-
- guc_proc_desc_init(client);
- guc_stage_desc_init(client);
-
- ret = create_doorbell(client);
- if (ret)
- goto fail;
-
- return 0;
-
-fail:
- guc_stage_desc_fini(client);
- guc_proc_desc_fini(client);
- return ret;
-}
-
-static void __guc_client_disable(struct intel_guc_client *client)
-{
- /*
- * By the time we're here, GuC may have already been reset. if that is
- * the case, instead of trying (in vain) to communicate with it, let's
- * just cleanup the doorbell HW and our internal state.
- */
- if (intel_guc_is_running(client->guc))
- destroy_doorbell(client);
- else
- __fini_doorbell(client);
-
- guc_stage_desc_fini(client);
- guc_proc_desc_fini(client);
-}
-
-static int guc_clients_enable(struct intel_guc *guc)
-{
- return __guc_client_enable(guc->execbuf_client);
-}
-
-static void guc_clients_disable(struct intel_guc *guc)
-{
- if (guc->execbuf_client)
- __guc_client_disable(guc->execbuf_client);
-}
-
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@@ -988,13 +524,20 @@ int intel_guc_submission_init(struct intel_guc *guc)
*/
GEM_BUG_ON(!guc->stage_desc_pool);
- WARN_ON(!guc_verify_doorbells(guc));
- ret = guc_clients_create(guc);
+ ret = guc_workqueue_create(guc);
if (ret)
goto err_pool;
+ ret = guc_proc_desc_create(guc);
+ if (ret)
+ goto err_workqueue;
+
+ spin_lock_init(&guc->wq_lock);
+
return 0;
+err_workqueue:
+ guc_workqueue_destroy(guc);
err_pool:
guc_stage_desc_pool_destroy(guc);
return ret;
@@ -1002,83 +545,37 @@ err_pool:
void intel_guc_submission_fini(struct intel_guc *guc)
{
- guc_clients_destroy(guc);
- WARN_ON(!guc_verify_doorbells(guc));
-
- if (guc->stage_desc_pool)
+ if (guc->stage_desc_pool) {
+ guc_proc_desc_destroy(guc);
+ guc_workqueue_destroy(guc);
guc_stage_desc_pool_destroy(guc);
+ }
}
static void guc_interrupts_capture(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
+ u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
+ u32 dmask = irqs << 16 | irqs;
- /* tell all command streamers to forward interrupts (but not vblank)
- * to GuC
- */
- irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt, id)
- ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
-
- /* route USER_INTERRUPT to Host, all others are sent to GuC. */
- irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- /* These three registers have the same bit definitions */
- intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs);
- intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs);
- intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs);
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
- /*
- * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
- * (unmasked) PM interrupts to the GuC. All other bits of this
- * register *disable* generation of a specific interrupt.
- *
- * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
- * writing to the PM interrupt mask register, i.e. interrupts
- * that must not be disabled.
- *
- * If the GuC is handling these interrupts, then we must not let
- * the PM code disable ANY interrupt that the GuC is expecting.
- * So for each ENABLED (0) bit in this register, we must SET the
- * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
- * GuC needs ARAT expired interrupt unmasked hence it is set in
- * pm_intrmsk_mbz.
- *
- * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
- * result in the register bit being left SET!
- */
- rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
- rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ /* Don't handle the ctx switch interrupt in GuC submission mode */
+ intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
+ intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
}
static void guc_interrupts_release(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
+ u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
+ u32 dmask = irqs << 16 | irqs;
- /*
- * tell all command streamers NOT to forward interrupts or vblank
- * to GuC.
- */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
- irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt, id)
- ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
-
- /* route all GT interrupts to the host */
- intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0);
- intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0);
- intel_uncore_write(uncore, GUC_WD_VECS_IER, 0);
-
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
+
+ /* Handle ctx switch interrupts again */
+ intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
+ intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
}
static void guc_set_default_submission(struct intel_engine_cs *engine)
@@ -1102,11 +599,10 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->park = engine->unpark = NULL;
engine->reset.prepare = guc_reset_prepare;
- engine->reset.reset = guc_reset;
+ engine->reset.rewind = guc_reset_rewind;
+ engine->reset.cancel = guc_reset_cancel;
engine->reset.finish = guc_reset_finish;
- engine->cancel_requests = guc_cancel_requests;
-
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
@@ -1119,16 +615,11 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
-int intel_guc_submission_enable(struct intel_guc *guc)
+void intel_guc_submission_enable(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
-
- err = i915_inject_probe_error(gt->i915, -ENXIO);
- if (err)
- return err;
/*
* We're using GuC work items for submitting work through GuC. Since
@@ -1143,11 +634,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE);
- GEM_BUG_ON(!guc->execbuf_client);
-
- err = guc_clients_enable(guc);
- if (err)
- return err;
+ guc_proc_desc_init(guc);
+ guc_stage_desc_init(guc);
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(gt);
@@ -1156,8 +644,6 @@ int intel_guc_submission_enable(struct intel_guc *guc)
engine->set_default_submission = guc_set_default_submission;
engine->set_default_submission(engine);
}
-
- return 0;
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -1166,8 +652,12 @@ void intel_guc_submission_disable(struct intel_guc *guc)
GEM_BUG_ON(gt->awake); /* GT should be parked first */
+ /* Note: By the time we're here, GuC may have already been reset */
+
guc_interrupts_release(gt);
- guc_clients_disable(guc);
+
+ guc_stage_desc_fini(guc);
+ guc_proc_desc_fini(guc);
}
static bool __guc_submission_support(struct intel_guc *guc)
@@ -1186,6 +676,7 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
guc->submission_supported = __guc_submission_support(guc);
}
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_guc.c"
-#endif
+bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission == guc_set_default_submission;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 54d716828352..e402a2932592 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -6,62 +6,18 @@
#ifndef _INTEL_GUC_SUBMISSION_H_
#define _INTEL_GUC_SUBMISSION_H_
-#include <linux/spinlock.h>
+#include <linux/types.h>
-#include "gt/intel_engine_types.h"
-
-#include "i915_gem.h"
-#include "i915_selftest.h"
-
-struct drm_i915_private;
-
-/*
- * This structure primarily describes the GEM object shared with the GuC.
- * The specs sometimes refer to this object as a "GuC context", but we use
- * the term "client" to avoid confusion with hardware contexts. This
- * GEM object is held for the entire lifetime of our interaction with
- * the GuC, being allocated before the GuC is loaded with its firmware.
- * Because there's no way to update the address used by the GuC after
- * initialisation, the shared object must stay pinned into the GGTT as
- * long as the GuC is in use. We also keep the first page (only) mapped
- * into kernel address space, as it includes shared data that must be
- * updated on every request submission.
- *
- * The single GEM object described here is actually made up of several
- * separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process descriptor" which holds sequence data for
- * the doorbell, and one cacheline which actually *is* the doorbell; a
- * write to this will "ring the doorbell" (i.e. send an interrupt to the
- * GuC). The subsequent pages of the client object constitute the work
- * queue (a circular array of work items), again described in the process
- * descriptor. Work queue pages are mapped momentarily as required.
- */
-struct intel_guc_client {
- struct i915_vma *vma;
- void *vaddr;
- struct intel_guc *guc;
-
- /* bitmap of (host) engine ids */
- u32 priority;
- u32 stage_id;
- u32 proc_desc_offset;
-
- u16 doorbell_id;
- unsigned long doorbell_offset;
-
- /* Protects GuC client's WQ access */
- spinlock_t wq_lock;
-
- /* For testing purposes, use nop WQ items instead of real ones */
- I915_SELFTEST_DECLARE(bool use_nop_wqi);
-};
+struct intel_guc;
+struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
int intel_guc_submission_init(struct intel_guc *guc);
-int intel_guc_submission_enable(struct intel_guc *guc);
+void intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index d654340d4d03..eee193bf2cc4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -39,5 +39,5 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
int intel_huc_fw_upload(struct intel_huc *huc)
{
/* HW doesn't look at destination address for HuC, so set it to 0 */
- return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL);
+ return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 629b19377a29..64934a876a50 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -12,6 +12,9 @@
#include "i915_drv.h"
+static const struct intel_uc_ops uc_ops_off;
+static const struct intel_uc_ops uc_ops_on;
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -89,6 +92,11 @@ void intel_uc_init_early(struct intel_uc *uc)
intel_huc_init_early(&uc->huc);
__confirm_options(uc);
+
+ if (intel_uc_uses_guc(uc))
+ uc->ops = &uc_ops_on;
+ else
+ uc->ops = &uc_ops_off;
}
void intel_uc_driver_late_release(struct intel_uc *uc)
@@ -123,6 +131,11 @@ static void __uc_free_load_err_log(struct intel_uc *uc)
i915_gem_object_put(log);
}
+static inline bool guc_communication_enabled(struct intel_guc *guc)
+{
+ return intel_guc_ct_enabled(&guc->ct);
+}
+
/*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our
@@ -158,7 +171,7 @@ static void guc_handle_mmio_msg(struct intel_guc *guc)
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
/* we need communication to be enabled to reply to GuC */
- GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
+ GEM_BUG_ON(!guc_communication_enabled(guc));
if (!guc->mmio_msg)
return;
@@ -185,11 +198,6 @@ static void guc_disable_interrupts(struct intel_guc *guc)
guc->interrupts.disable(guc);
}
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
- return guc->send != intel_guc_send_nop;
-}
-
static int guc_enable_communication(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
@@ -205,9 +213,6 @@ static int guc_enable_communication(struct intel_guc *guc)
if (ret)
return ret;
- guc->send = intel_guc_send_ct;
- guc->handler = intel_guc_to_host_event_handler_ct;
-
/* check for mmio messages received before/during the CT enable */
guc_get_mmio_msg(guc);
guc_handle_mmio_msg(guc);
@@ -216,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
/* check for CT messages received before we enabled interrupts */
spin_lock_irq(&i915->irq_lock);
- intel_guc_to_host_event_handler_ct(guc);
+ intel_guc_ct_event_handler(&guc->ct);
spin_unlock_irq(&i915->irq_lock);
DRM_INFO("GuC communication enabled\n");
@@ -224,7 +229,7 @@ static int guc_enable_communication(struct intel_guc *guc)
return 0;
}
-static void __guc_stop_communication(struct intel_guc *guc)
+static void guc_disable_communication(struct intel_guc *guc)
{
/*
* Events generated during or after CT disable are logged by guc in
@@ -235,23 +240,6 @@ static void __guc_stop_communication(struct intel_guc *guc)
guc_disable_interrupts(guc);
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-static void guc_stop_communication(struct intel_guc *guc)
-{
- intel_guc_ct_stop(&guc->ct);
-
- __guc_stop_communication(guc);
-
- DRM_INFO("GuC communication stopped\n");
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
-{
- __guc_stop_communication(guc);
-
intel_guc_ct_disable(&guc->ct);
/*
@@ -265,41 +253,33 @@ static void guc_disable_communication(struct intel_guc *guc)
DRM_INFO("GuC communication disabled\n");
}
-void intel_uc_fetch_firmwares(struct intel_uc *uc)
+static void __uc_fetch_firmwares(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
int err;
- if (!intel_uc_uses_guc(uc))
- return;
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
- err = intel_uc_fw_fetch(&uc->guc.fw, i915);
+ err = intel_uc_fw_fetch(&uc->guc.fw);
if (err)
return;
if (intel_uc_uses_huc(uc))
- intel_uc_fw_fetch(&uc->huc.fw, i915);
+ intel_uc_fw_fetch(&uc->huc.fw);
}
-void intel_uc_cleanup_firmwares(struct intel_uc *uc)
+static void __uc_cleanup_firmwares(struct intel_uc *uc)
{
- if (!intel_uc_uses_guc(uc))
- return;
-
- if (intel_uc_uses_huc(uc))
- intel_uc_fw_cleanup_fetch(&uc->huc.fw);
-
+ intel_uc_fw_cleanup_fetch(&uc->huc.fw);
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
-void intel_uc_init(struct intel_uc *uc)
+static void __uc_init(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret;
- if (!intel_uc_uses_guc(uc))
- return;
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
/* XXX: GuC submission is unavailable for now */
GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
@@ -314,17 +294,10 @@ void intel_uc_init(struct intel_uc *uc)
intel_huc_init(huc);
}
-void intel_uc_fini(struct intel_uc *uc)
+static void __uc_fini(struct intel_uc *uc)
{
- struct intel_guc *guc = &uc->guc;
-
- if (!intel_uc_uses_guc(uc))
- return;
-
- if (intel_uc_uses_huc(uc))
- intel_huc_fini(&uc->huc);
-
- intel_guc_fini(guc);
+ intel_huc_fini(&uc->huc);
+ intel_guc_fini(&uc->guc);
__uc_free_load_err_log(uc);
}
@@ -342,14 +315,6 @@ static int __uc_sanitize(struct intel_uc *uc)
return __intel_uc_reset_hw(uc);
}
-void intel_uc_sanitize(struct intel_uc *uc)
-{
- if (!intel_uc_supports_guc(uc))
- return;
-
- __uc_sanitize(uc);
-}
-
/* Initialize and verify the uC regs related to uC positioning in WOPCM */
static int uc_init_wopcm(struct intel_uc *uc)
{
@@ -413,13 +378,8 @@ static bool uc_is_wopcm_locked(struct intel_uc *uc)
(intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
}
-int intel_uc_init_hw(struct intel_uc *uc)
+static int __uc_check_hw(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
- struct intel_guc *guc = &uc->guc;
- struct intel_huc *huc = &uc->huc;
- int ret, attempts;
-
if (!intel_uc_supports_guc(uc))
return 0;
@@ -428,11 +388,24 @@ int intel_uc_init_hw(struct intel_uc *uc)
* before on this system after reboot, otherwise we risk GPU hangs.
* To check if GuC was loaded before we look at WOPCM registers.
*/
- if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc))
- return 0;
+ if (uc_is_wopcm_locked(uc))
+ return -EIO;
+
+ return 0;
+}
+
+static int __uc_init_hw(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret, attempts;
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
if (!intel_uc_fw_is_available(&guc->fw)) {
- ret = uc_is_wopcm_locked(uc) ||
+ ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
intel_uc_supports_guc_submission(uc) ?
intel_uc_fw_status_to_error(guc->fw.status) : 0;
@@ -486,11 +459,8 @@ int intel_uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_communication;
- if (intel_uc_supports_guc_submission(uc)) {
- ret = intel_guc_submission_enable(guc);
- if (ret)
- goto err_communication;
- }
+ if (intel_uc_supports_guc_submission(uc))
+ intel_guc_submission_enable(guc);
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
@@ -531,7 +501,7 @@ err_out:
return -EIO;
}
-void intel_uc_fini_hw(struct intel_uc *uc)
+static void __uc_fini_hw(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
@@ -560,7 +530,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
if (!intel_guc_is_running(guc))
return;
- guc_stop_communication(guc);
+ guc_disable_communication(guc);
__uc_sanitize(uc);
}
@@ -631,3 +601,20 @@ int intel_uc_runtime_resume(struct intel_uc *uc)
*/
return __uc_resume(uc, true);
}
+
+static const struct intel_uc_ops uc_ops_off = {
+ .init_hw = __uc_check_hw,
+};
+
+static const struct intel_uc_ops uc_ops_on = {
+ .sanitize = __uc_sanitize,
+
+ .init_fw = __uc_fetch_firmwares,
+ .fini_fw = __uc_cleanup_firmwares,
+
+ .init = __uc_init,
+ .fini = __uc_fini,
+
+ .init_hw = __uc_init_hw,
+ .fini_hw = __uc_fini_hw,
+};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 527995c21196..49c913524686 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -10,7 +10,20 @@
#include "intel_huc.h"
#include "i915_params.h"
+struct intel_uc;
+
+struct intel_uc_ops {
+ int (*sanitize)(struct intel_uc *uc);
+ void (*init_fw)(struct intel_uc *uc);
+ void (*fini_fw)(struct intel_uc *uc);
+ void (*init)(struct intel_uc *uc);
+ void (*fini)(struct intel_uc *uc);
+ int (*init_hw)(struct intel_uc *uc);
+ void (*fini_hw)(struct intel_uc *uc);
+};
+
struct intel_uc {
+ struct intel_uc_ops const *ops;
struct intel_guc guc;
struct intel_huc huc;
@@ -21,13 +34,6 @@ struct intel_uc {
void intel_uc_init_early(struct intel_uc *uc);
void intel_uc_driver_late_release(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc);
-void intel_uc_fetch_firmwares(struct intel_uc *uc);
-void intel_uc_cleanup_firmwares(struct intel_uc *uc);
-void intel_uc_sanitize(struct intel_uc *uc);
-void intel_uc_init(struct intel_uc *uc);
-int intel_uc_init_hw(struct intel_uc *uc);
-void intel_uc_fini_hw(struct intel_uc *uc);
-void intel_uc_fini(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc);
void intel_uc_runtime_suspend(struct intel_uc *uc);
@@ -64,4 +70,20 @@ static inline bool intel_uc_uses_huc(struct intel_uc *uc)
return intel_huc_is_enabled(&uc->huc);
}
+#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
+static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
+{ \
+ if (uc->ops->_OPS) \
+ return uc->ops->_OPS(uc); \
+ return _RET; \
+}
+intel_uc_ops_function(sanitize, sanitize, int, 0);
+intel_uc_ops_function(fetch_firmwares, init_fw, void, );
+intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
+intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(fini, fini, void, );
+intel_uc_ops_function(init_hw, init_hw, int, 0);
+intel_uc_ops_function(fini_hw, fini_hw, void, );
+#undef intel_uc_ops_function
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 66a30ab7044a..8ee0a0c7f447 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,7 +11,6 @@
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
-#ifdef CONFIG_DRM_I915_DEBUG_GUC
static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
{
GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
@@ -22,6 +21,7 @@ static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
return container_of(uc_fw, struct intel_gt, uc.huc.fw);
}
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
@@ -219,10 +219,9 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
INTEL_UC_FIRMWARE_NOT_SUPPORTED);
}
-static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
- struct drm_i915_private *i915,
- int e)
+static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
bool user = e == -EINVAL;
if (i915_inject_probe_error(i915, e)) {
@@ -260,14 +259,14 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
/**
* intel_uc_fw_fetch - fetch uC firmware
* @uc_fw: uC firmware
- * @i915: device private
*
* Fetch uC firmware into GEM obj.
*
* Return: 0 on success, a negative errno code on failure.
*/
-int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
@@ -282,8 +281,8 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
if (err)
return err;
- __force_fw_fetch_failures(uc_fw, i915, -EINVAL);
- __force_fw_fetch_failures(uc_fw, i915, -ESTALE);
+ __force_fw_fetch_failures(uc_fw, -EINVAL);
+ __force_fw_fetch_failures(uc_fw, -ESTALE);
err = request_firmware(&fw, uc_fw->path, dev);
if (err)
@@ -390,8 +389,9 @@ fail:
return err;
}
-static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
+static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
{
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
struct drm_mm_node *node = &ggtt->uc_fw;
GEM_BUG_ON(!drm_mm_node_allocated(node));
@@ -401,13 +401,12 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
return lower_32_bits(node->start);
}
-static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
- struct intel_gt *gt)
+static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = gt->ggtt;
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
struct i915_vma dummy = {
- .node.start = uc_fw_ggtt_offset(uc_fw, ggtt),
+ .node.start = uc_fw_ggtt_offset(uc_fw),
.node.size = obj->base.size,
.pages = obj->mm.pages,
.vm = &ggtt->vm,
@@ -422,19 +421,18 @@ static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
}
-static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw,
- struct intel_gt *gt)
+static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = gt->ggtt;
- u64 start = uc_fw_ggtt_offset(uc_fw, ggtt);
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ u64 start = uc_fw_ggtt_offset(uc_fw);
ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
}
-static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
- u32 wopcm_offset, u32 dma_flags)
+static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct intel_uncore *uncore = gt->uncore;
u64 offset;
int ret;
@@ -446,13 +444,13 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
- offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt);
+ offset = uc_fw_ggtt_offset(uc_fw);
GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
/* Set the DMA destination */
- intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset);
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/*
@@ -484,17 +482,16 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
/**
* intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware
- * @gt: the intel_gt structure
- * @wopcm_offset: destination offset in wopcm
+ * @dst_offset: destination offset
* @dma_flags: flags for flags for dma ctrl
*
* Loads uC firmware and updates internal flags.
*
* Return: 0 on success, non-zero on failure.
*/
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
- u32 wopcm_offset, u32 dma_flags)
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
int err;
/* make sure the status was cleared the last time we reset the uc */
@@ -508,9 +505,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
return -ENOEXEC;
/* Call custom loader */
- intel_uc_fw_ggtt_bind(uc_fw, gt);
- err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags);
- intel_uc_fw_ggtt_unbind(uc_fw, gt);
+ uc_fw_bind_ggtt(uc_fw);
+ err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
+ uc_fw_unbind_ggtt(uc_fw);
if (err)
goto fail;
@@ -547,10 +544,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
- if (!intel_uc_fw_is_available(uc_fw))
- return;
-
- i915_gem_object_unpin_pages(uc_fw->obj);
+ intel_uc_fw_cleanup_fetch(uc_fw);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 7a0a5989afc9..1f30543d0d2d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -229,10 +229,9 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_type type, bool supported,
enum intel_platform platform, u8 rev);
-int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915);
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
- u32 wopcm_offset, u32 dma_flags);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
deleted file mode 100644
index d8a80388bd31..000000000000
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ /dev/null
@@ -1,299 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#include "i915_selftest.h"
-#include "gem/i915_gem_pm.h"
-
-/* max doorbell number + negative test for each client type */
-#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
-
-static struct intel_guc_client *clients[ATTEMPTS];
-
-static bool available_dbs(struct intel_guc *guc, u32 priority)
-{
- unsigned long offset;
- unsigned long end;
- u16 id;
-
- /* first half is used for normal priority, second half for high */
- offset = 0;
- end = GUC_NUM_DOORBELLS / 2;
- if (priority <= GUC_CLIENT_PRIORITY_HIGH) {
- offset = end;
- end += offset;
- }
-
- id = find_next_zero_bit(guc->doorbell_bitmap, end, offset);
- if (id < end)
- return true;
-
- return false;
-}
-
-static int check_all_doorbells(struct intel_guc *guc)
-{
- u16 db_id;
-
- pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS);
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) {
- if (!doorbell_ok(guc, db_id)) {
- pr_err("doorbell %d, not ok\n", db_id);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-static int ring_doorbell_nop(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc = __get_process_desc(client);
- int err;
-
- client->use_nop_wqi = true;
-
- spin_lock_irq(&client->wq_lock);
-
- guc_wq_item_append(client, 0, 0, 0, 0);
- guc_ring_doorbell(client);
-
- spin_unlock_irq(&client->wq_lock);
-
- client->use_nop_wqi = false;
-
- /* if there are no issues GuC will update the WQ head and keep the
- * WQ in active status
- */
- err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10);
- if (err) {
- pr_err("doorbell %u ring failed!\n", client->doorbell_id);
- return -EIO;
- }
-
- if (desc->wq_status != WQ_STATUS_ACTIVE) {
- pr_err("doorbell %u ring put WQ in bad state (%u)!\n",
- client->doorbell_id, desc->wq_status);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * Basic client sanity check, handy to validate create_clients.
- */
-static int validate_client(struct intel_guc_client *client, int client_priority)
-{
- if (client->priority != client_priority ||
- client->doorbell_id == GUC_DOORBELL_INVALID)
- return -EINVAL;
- else
- return 0;
-}
-
-static bool client_doorbell_in_sync(struct intel_guc_client *client)
-{
- return !client || doorbell_ok(client->guc, client->doorbell_id);
-}
-
-/*
- * Check that we're able to synchronize guc_clients with their doorbells
- *
- * We're creating clients and reserving doorbells once, at module load. During
- * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
- * GuC being reset. In other words - GuC clients are still around, but the
- * status of their doorbells may be incorrect. This is the reason behind
- * validating that the doorbells status expected by the driver matches what the
- * GuC/HW have.
- */
-static int igt_guc_clients(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
- intel_wakeref_t wakeref;
- int err = 0;
-
- GEM_BUG_ON(!HAS_GT_UC(gt->i915));
- wakeref = intel_runtime_pm_get(gt->uncore->rpm);
-
- err = check_all_doorbells(guc);
- if (err)
- goto unlock;
-
- /*
- * Get rid of clients created during driver load because the test will
- * recreate them.
- */
- guc_clients_disable(guc);
- guc_clients_destroy(guc);
- if (guc->execbuf_client) {
- pr_err("guc_clients_destroy lied!\n");
- err = -EINVAL;
- goto unlock;
- }
-
- err = guc_clients_create(guc);
- if (err) {
- pr_err("Failed to create clients\n");
- goto unlock;
- }
- GEM_BUG_ON(!guc->execbuf_client);
-
- err = validate_client(guc->execbuf_client,
- GUC_CLIENT_PRIORITY_KMD_NORMAL);
- if (err) {
- pr_err("execbug client validation failed\n");
- goto out;
- }
-
- /* the client should now have reserved a doorbell */
- if (!has_doorbell(guc->execbuf_client)) {
- pr_err("guc_clients_create didn't reserve doorbells\n");
- err = -EINVAL;
- goto out;
- }
-
- /* Now enable the clients */
- guc_clients_enable(guc);
-
- /* each client should now have received a doorbell */
- if (!client_doorbell_in_sync(guc->execbuf_client)) {
- pr_err("failed to initialize the doorbells\n");
- err = -EINVAL;
- goto out;
- }
-
- /*
- * Basic test - an attempt to reallocate a valid doorbell to the
- * client it is currently assigned should not cause a failure.
- */
- err = create_doorbell(guc->execbuf_client);
-
-out:
- /*
- * Leave clean state for other test, plus the driver always destroy the
- * clients during unload.
- */
- guc_clients_disable(guc);
- guc_clients_destroy(guc);
- guc_clients_create(guc);
- guc_clients_enable(guc);
-unlock:
- intel_runtime_pm_put(gt->uncore->rpm, wakeref);
- return err;
-}
-
-/*
- * Create as many clients as number of doorbells. Note that there's already
- * client(s)/doorbell(s) created during driver load, but this test creates
- * its own and do not interact with the existing ones.
- */
-static int igt_guc_doorbells(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
- intel_wakeref_t wakeref;
- int i, err = 0;
- u16 db_id;
-
- GEM_BUG_ON(!HAS_GT_UC(gt->i915));
- wakeref = intel_runtime_pm_get(gt->uncore->rpm);
-
- err = check_all_doorbells(guc);
- if (err)
- goto unlock;
-
- for (i = 0; i < ATTEMPTS; i++) {
- clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM);
-
- if (!clients[i]) {
- pr_err("[%d] No guc client\n", i);
- err = -EINVAL;
- goto out;
- }
-
- if (IS_ERR(clients[i])) {
- if (PTR_ERR(clients[i]) != -ENOSPC) {
- pr_err("[%d] unexpected error\n", i);
- err = PTR_ERR(clients[i]);
- goto out;
- }
-
- if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
- pr_err("[%d] non-db related alloc fail\n", i);
- err = -EINVAL;
- goto out;
- }
-
- /* expected, ran out of dbs for this client type */
- continue;
- }
-
- /*
- * The check below is only valid because we keep a doorbell
- * assigned during the whole life of the client.
- */
- if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
- pr_err("[%d] more clients than doorbells (%d >= %d)\n",
- i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
- err = -EINVAL;
- goto out;
- }
-
- err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
- if (err) {
- pr_err("[%d] client_alloc sanity check failed!\n", i);
- err = -EINVAL;
- goto out;
- }
-
- db_id = clients[i]->doorbell_id;
-
- err = __guc_client_enable(clients[i]);
- if (err) {
- pr_err("[%d] Failed to create a doorbell\n", i);
- goto out;
- }
-
- /* doorbell id shouldn't change, we are holding the mutex */
- if (db_id != clients[i]->doorbell_id) {
- pr_err("[%d] doorbell id changed (%d != %d)\n",
- i, db_id, clients[i]->doorbell_id);
- err = -EINVAL;
- goto out;
- }
-
- err = check_all_doorbells(guc);
- if (err)
- goto out;
-
- err = ring_doorbell_nop(clients[i]);
- if (err)
- goto out;
- }
-
-out:
- for (i = 0; i < ATTEMPTS; i++)
- if (!IS_ERR_OR_NULL(clients[i])) {
- __guc_client_disable(clients[i]);
- guc_client_free(clients[i]);
- }
-unlock:
- intel_runtime_pm_put(gt->uncore->rpm, wakeref);
- return err;
-}
-
-int intel_guc_live_selftest(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_guc_clients),
- SUBTEST(igt_guc_doorbells),
- };
-
- if (!USES_GUC_SUBMISSION(i915))
- return 0;
-
- return intel_gt_live_subtests(tests, &i915->gt);
-}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index 286703643002..ab25d151932a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -38,6 +38,10 @@
#define GVT_CMD_HASH_BITS 7
+struct intel_gvt;
+struct intel_shadow_wa_ctx;
+struct intel_vgpu_workload;
+
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index a87f33e6a23c..b59b34046e1e 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -35,6 +35,11 @@
#ifndef _GVT_DISPLAY_H_
#define _GVT_DISPLAY_H_
+#include <linux/types.h>
+
+struct intel_gvt;
+struct intel_vgpu;
+
#define SBI_REG_MAX 20
#define DPCD_SIZE 0x700
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index f6dfc8b795ec..dfe0cbc6aad8 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -35,6 +35,10 @@
#ifndef _GVT_EDID_H_
#define _GVT_EDID_H_
+#include <linux/types.h>
+
+struct intel_vgpu;
+
#define EDID_SIZE 128
#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5ccc2c695848..5c0c1fd30c83 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -35,6 +35,8 @@
#ifndef _GVT_EXECLIST_H_
#define _GVT_EXECLIST_H_
+#include <linux/types.h>
+
struct execlist_ctx_descriptor_format {
union {
u32 ldw;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 60c155085029..67b6ede9e707 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -36,6 +36,8 @@
#ifndef _GVT_FB_DECODER_H_
#define _GVT_FB_DECODER_H_
+#include <linux/types.h>
+
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4b04af569c05..34cb404ba4b7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1282,7 +1282,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
return -EINVAL;
default:
GEM_BUG_ON(1);
- };
+ }
/* direct shadow */
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1043e6d564df..6d28d72e6c7e 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2691,7 +2691,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
return 0;
}
-static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
@@ -3380,20 +3380,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
if (IS_BROADWELL(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
} else if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_COFFEELAKE(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
} else if (IS_BROXTON(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index b19a3b1ea4c1..b17c4a1599cd 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -33,6 +33,10 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
+#include <linux/types.h>
+
+struct device;
+
enum hypervisor_type {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index 5313fb1b33e1..fcd663811d37 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,6 +32,8 @@
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
+#include <linux/types.h>
+
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
RCS_DEBUG,
@@ -135,6 +137,7 @@ enum intel_gvt_event_type {
struct intel_gvt_irq;
struct intel_gvt;
+struct intel_vgpu;
typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 5874f1cb4306..2e68f4b02c94 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -36,6 +36,8 @@
#ifndef _GVT_MMIO_H_
#define _GVT_MMIO_H_
+#include <linux/types.h>
+
struct intel_gvt;
struct intel_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/page_track.h b/drivers/gpu/drm/i915/gvt/page_track.h
index fa607a71c3c0..f6eb7135583c 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.h
+++ b/drivers/gpu/drm/i915/gvt/page_track.h
@@ -25,6 +25,9 @@
#ifndef _GVT_PAGE_TRACK_H_
#define _GVT_PAGE_TRACK_H_
+#include <linux/types.h>
+
+struct intel_vgpu;
struct intel_vgpu_page_track;
typedef int (*gvt_page_track_handler_t)(
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index 7b59e3e88b8b..3dacdad5f529 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -34,6 +34,9 @@
#ifndef __GVT_SCHED_POLICY__
#define __GVT_SCHED_POLICY__
+struct intel_gvt;
+struct intel_vgpu;
+
struct intel_gvt_sched_policy_ops {
int (*init)(struct intel_gvt *gvt);
void (*clean)(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 5b2a7d072ec9..685d1e04a5ff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,12 +35,12 @@
#include <linux/kthread.h>
-#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
+#include "i915_gem_gtt.h"
#include "gvt.h"
#define RING_CTX_OFF(x) \
@@ -59,7 +59,7 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
struct drm_i915_gem_object *ctx_obj =
- workload->req->hw_context->state->obj;
+ workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
- workload->req->hw_context->state->obj;
+ workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -205,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return 0;
}
-static inline bool is_gvt_request(struct i915_request *req)
+static inline bool is_gvt_request(struct i915_request *rq)
{
- return i915_gem_context_force_single_submission(req->gem_context);
+ return intel_context_force_single_submission(rq->context);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -307,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
u32 *cs;
int err;
- if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
+ if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/*
@@ -363,11 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
}
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
- struct i915_gem_context *ctx)
+ struct intel_context *ce)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
- struct i915_ppgtt *ppgtt =
- i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -380,8 +379,6 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
-
- i915_vm_put(&ppgtt->vm);
}
static int
@@ -529,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
struct i915_request *rq = workload->req;
struct execlist_ring_context *shadow_ring_context =
- (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
+ (struct execlist_ring_context *)rq->context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -628,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload);
- set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+ set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
@@ -787,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
+ struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
@@ -1223,18 +1220,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
struct i915_ppgtt *ppgtt;
enum intel_engine_id i;
int ret;
- ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ppgtt = i915_ppgtt_create(&i915->gt);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- i915_gem_context_set_force_single_submission(ctx);
-
- ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
i915_context_ppgtt_root_save(s, ppgtt);
for_each_engine(engine, i915, i) {
@@ -1243,12 +1236,16 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL);
- ce = intel_context_create(ctx, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_shadow_ctx;
}
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(&ppgtt->vm);
+ intel_context_set_single_submission(ce);
+
if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
const unsigned int ring_size = 512 * SZ_4K;
@@ -1281,7 +1278,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
i915_vm_put(&ppgtt->vm);
- i915_gem_context_put(ctx);
return 0;
out_shadow_ctx:
@@ -1294,7 +1290,6 @@ out_shadow_ctx:
intel_context_put(s->shadow[i]);
}
i915_vm_put(&ppgtt->vm);
- i915_gem_context_put(ctx);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index a19e7d89bc8a..f3da5c06f331 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -6,6 +6,7 @@
#include <linux/debugobjects.h>
+#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"
@@ -91,10 +92,9 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
- spin_lock_irq(&ref->tree_lock);
+ lockdep_assert_held(&ref->tree_lock);
if (!atomic_read(&ref->count)) /* before the first inc */
debug_object_activate(ref, &active_debug_desc);
- spin_unlock_irq(&ref->tree_lock);
}
static void debug_active_deactivate(struct i915_active *ref)
@@ -186,18 +186,33 @@ active_retire(struct i915_active *ref)
__active_retire(ref);
}
+static inline struct dma_fence **
+__active_fence_slot(struct i915_active_fence *active)
+{
+ return (struct dma_fence ** __force)&active->fence;
+}
+
+static inline bool
+active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct i915_active_fence *active =
+ container_of(cb, typeof(*active), cb);
+
+ return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
+}
+
static void
node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- i915_active_fence_cb(fence, cb);
- active_retire(container_of(cb, struct active_node, base.cb)->ref);
+ if (active_fence_cb(fence, cb))
+ active_retire(container_of(cb, struct active_node, base.cb)->ref);
}
static void
excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- i915_active_fence_cb(fence, cb);
- active_retire(container_of(cb, struct i915_active, excl.cb));
+ if (active_fence_cb(fence, cb))
+ active_retire(container_of(cb, struct i915_active, excl.cb));
}
static struct i915_active_fence *
@@ -244,7 +259,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
}
node = prealloc;
- __i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire);
+ __i915_active_fence_init(&node->base, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
@@ -262,7 +277,8 @@ out:
void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
- struct lock_class_key *key)
+ struct lock_class_key *mkey,
+ struct lock_class_key *wkey)
{
unsigned long bits;
@@ -280,9 +296,12 @@ void __i915_active_init(struct i915_active *ref,
init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0);
- __mutex_init(&ref->mutex, "i915_active", key);
- __i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire);
+ __mutex_init(&ref->mutex, "i915_active", mkey);
+ __i915_active_fence_init(&ref->excl, NULL, excl_retire);
INIT_WORK(&ref->work, active_work);
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
+#endif
}
static bool ____active_del_barrier(struct i915_active *ref,
@@ -376,15 +395,8 @@ void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
- /*
- * As we don't know which mutex the caller is using, we told a small
- * lie to the debug code that it is using the i915_active.mutex;
- * and now we must stick to that lie.
- */
- mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_);
if (!__i915_active_fence_set(&ref->excl, f))
atomic_inc(&ref->count);
- mutex_release(&ref->mutex.dep_map, _THIS_IP_);
}
bool i915_active_acquire_if_busy(struct i915_active *ref)
@@ -407,8 +419,10 @@ int i915_active_acquire(struct i915_active *ref)
if (!atomic_read(&ref->count) && ref->active)
err = ref->active(ref);
if (!err) {
+ spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
debug_active_activate(ref);
atomic_inc(&ref->count);
+ spin_unlock_irq(&ref->tree_lock);
}
mutex_unlock(&ref->mutex);
@@ -461,6 +475,7 @@ int i915_active_wait(struct i915_active *ref)
if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
return -EINTR;
+ flush_work(&ref->work);
return 0;
}
@@ -590,12 +605,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
intel_engine_mask_t tmp, mask = engine->mask;
+ struct llist_node *pos = NULL, *next;
struct intel_gt *gt = engine->gt;
- struct llist_node *pos, *next;
int err;
GEM_BUG_ON(i915_active_is_idle(ref));
- GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+
+ /* Wait until the previous preallocation is completed */
+ while (!llist_empty(&ref->preallocated_barriers))
+ cond_resched();
/*
* Preallocate a node for each physical engine supporting the target
@@ -615,10 +633,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
goto unwind;
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- node->base.lock =
- &engine->kernel_context->timeline->mutex;
-#endif
RCU_INIT_POINTER(node->base.fence, NULL);
node->base.cb.func = node_retire;
node->timeline = idx;
@@ -639,18 +653,27 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
node->base.cb.node.prev = (void *)engine;
atomic_inc(&ref->count);
}
+ GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine);
- llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
+ next = barrier_to_ll(node);
+ next->next = pos;
+ if (!pos)
+ pos = next;
intel_engine_pm_get(engine);
}
+ GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+ llist_add_batch(next, pos, &ref->preallocated_barriers);
+
return 0;
unwind:
- llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ while (pos) {
struct active_node *node = barrier_from_ll(pos);
+ pos = pos->next;
+
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));
@@ -702,12 +725,18 @@ void i915_active_acquire_barrier(struct i915_active *ref)
}
}
+static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
+{
+ return __active_fence_slot(&barrier_from_ll(node)->base);
+}
+
void i915_request_add_active_barriers(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct llist_node *node, *next;
unsigned long flags;
+ GEM_BUG_ON(!intel_context_is_barrier(rq->context));
GEM_BUG_ON(intel_engine_is_virtual(engine));
GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
@@ -721,19 +750,13 @@ void i915_request_add_active_barriers(struct i915_request *rq)
*/
spin_lock_irqsave(&rq->lock, flags);
llist_for_each_safe(node, next, node) {
- RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence);
- smp_wmb(); /* serialise with reuse_idle_barrier */
+ /* serialise with reuse_idle_barrier */
+ smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
list_add_tail((struct list_head *)node, &rq->fence.cb_list);
}
spin_unlock_irqrestore(&rq->lock, flags);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-#define active_is_held(active) lockdep_is_held((active)->lock)
-#else
-#define active_is_held(active) true
-#endif
-
/*
* __i915_active_fence_set: Update the last active fence along its timeline
* @active: the active tracker
@@ -744,7 +767,7 @@ void i915_request_add_active_barriers(struct i915_request *rq)
* fence onto this one. Returns the previous fence (if not already completed),
* which the caller must ensure is executed before the new fence. To ensure
* that the order of fences within the timeline of the i915_active_fence is
- * maintained, it must be locked by the caller.
+ * understood, it should be locked by the caller.
*/
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
@@ -753,34 +776,41 @@ __i915_active_fence_set(struct i915_active_fence *active,
struct dma_fence *prev;
unsigned long flags;
- /* NB: must be serialised by an outer timeline mutex (active->lock) */
- spin_lock_irqsave(fence->lock, flags);
+ if (fence == rcu_access_pointer(active->fence))
+ return fence;
+
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
- prev = rcu_dereference_protected(active->fence, active_is_held(active));
+ /*
+ * Consider that we have two threads arriving (A and B), with
+ * C already resident as the active->fence.
+ *
+ * A does the xchg first, and so it sees C or NULL depending
+ * on the timing of the interrupt handler. If it is NULL, the
+ * previous fence must have been signaled and we know that
+ * we are first on the timeline. If it is still present,
+ * we acquire the lock on that fence and serialise with the interrupt
+ * handler, in the process removing it from any future interrupt
+ * callback. A will then wait on C before executing (if present).
+ *
+ * As B is second, it sees A as the previous fence and so waits for
+ * it to complete its transition and takes over the occupancy for
+ * itself -- remembering that it needs to wait on A before executing.
+ *
+ * Note the strong ordering of the timeline also provides consistent
+ * nesting rules for the fence->lock; the inner lock is always the
+ * older lock.
+ */
+ spin_lock_irqsave(fence->lock, flags);
+ prev = xchg(__active_fence_slot(active), fence);
if (prev) {
GEM_BUG_ON(prev == fence);
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
-
- /*
- * active->fence is reset by the callback from inside
- * interrupt context. We need to serialise our list
- * manipulation with the fence->lock to prevent the prev
- * being lost inside an interrupt (it can't be replaced as
- * no other caller is allowed to enter __i915_active_fence_set
- * as we hold the timeline lock). After serialising with
- * the callback, we need to double check which ran first,
- * our list_del() [decoupling prev from the callback] or
- * the callback...
- */
- prev = rcu_access_pointer(active->fence);
}
-
- rcu_assign_pointer(active->fence, fence);
+ GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
list_add_tail(&active->cb.node, &fence->cb_list);
-
spin_unlock_irqrestore(fence->lock, flags);
return prev;
@@ -792,10 +822,6 @@ int i915_active_fence_set(struct i915_active_fence *active,
struct dma_fence *fence;
int err = 0;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- lockdep_assert_held(active->lock);
-#endif
-
/* Must maintain timeline ordering wrt previous active requests */
rcu_read_lock();
fence = __i915_active_fence_set(active, &rq->fence);
@@ -812,7 +838,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- i915_active_fence_cb(fence, cb);
+ active_fence_cb(fence, cb);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 44859356ce97..b571f675c795 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -61,19 +61,15 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
*/
static inline void
__i915_active_fence_init(struct i915_active_fence *active,
- struct mutex *lock,
void *fence,
dma_fence_func_t fn)
{
RCU_INIT_POINTER(active->fence, fence);
active->cb.func = fn ?: i915_active_noop;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- active->lock = lock;
-#endif
}
-#define INIT_ACTIVE_FENCE(A, LOCK) \
- __i915_active_fence_init((A), (LOCK), NULL, NULL)
+#define INIT_ACTIVE_FENCE(A) \
+ __i915_active_fence_init((A), NULL, NULL)
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
@@ -127,15 +123,6 @@ i915_active_fence_isset(const struct i915_active_fence *active)
return rcu_access_pointer(active->fence);
}
-static inline void
-i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct i915_active_fence *active =
- container_of(cb, typeof(*active), cb);
-
- RCU_INIT_POINTER(active->fence, NULL);
-}
-
/*
* GPU activity tracking
*
@@ -165,11 +152,15 @@ i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
- struct lock_class_key *key);
+ struct lock_class_key *mkey,
+ struct lock_class_key *wkey);
+
+/* Specialise each class of i915_active to avoid impossible lockdep cycles. */
#define i915_active_init(ref, active, retire) do { \
- static struct lock_class_key __key; \
+ static struct lock_class_key __mkey; \
+ static struct lock_class_key __wkey; \
\
- __i915_active_init(ref, active, retire, &__key); \
+ __i915_active_init(ref, active, retire, &__mkey, &__wkey); \
} while (0)
int i915_active_ref(struct i915_active *ref,
@@ -215,5 +206,6 @@ void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+void i915_active_unlock_wait(struct i915_active *ref);
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 96aed0ee700a..6360c3e4b765 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -20,21 +20,6 @@
struct i915_active_fence {
struct dma_fence __rcu *fence;
struct dma_fence_cb cb;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- /*
- * Incorporeal!
- *
- * Updates to the i915_active_request must be serialised under a lock
- * to ensure that the timeline is ordered. Normally, this is the
- * timeline->mutex, but another mutex may be used so long as it is
- * done so consistently.
- *
- * For lockdep tracking of the above, we store the lock we intend
- * to always use for updates of this i915_active_request during
- * construction and assert that is held on every update.
- */
- struct mutex *lock;
-#endif
};
struct active_node;
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index e9d4200ce3bc..66883af64ca1 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -262,8 +262,10 @@ void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
{
struct i915_buddy_block *block, *on;
- list_for_each_entry_safe(block, on, objects, link)
+ list_for_each_entry_safe(block, on, objects, link) {
i915_buddy_free(mm, block);
+ cond_resched();
+ }
INIT_LIST_HEAD(objects);
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f24096e27bef..a0e437aa65b7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -235,7 +235,7 @@ static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
/*
* MI_BATCH_BUFFER_START requires some special handling. It's not
* really a 'skip' action but it doesn't seem like it's worth adding
- * a new action. See i915_parse_cmds().
+ * a new action. See intel_engine_cmd_parser().
*/
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
@@ -731,7 +731,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
return 0xFF;
}
- DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -754,7 +754,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
return 0xFF;
}
- DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -767,7 +767,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
else if (client == INSTR_BC_CLIENT)
return 0xFF;
- DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -778,7 +778,7 @@ static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
return 0xFF;
- DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -1127,79 +1127,71 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
- u32 batch_start_offset,
- u32 batch_len,
- bool *needs_clflush_after)
+ u32 offset, u32 length)
{
- unsigned int src_needs_clflush;
- unsigned int dst_needs_clflush;
+ bool needs_clflush;
void *dst, *src;
int ret;
- ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
- if (ret)
- return ERR_PTR(ret);
-
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
- i915_gem_object_finish_access(dst_obj);
if (IS_ERR(dst))
return dst;
- ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+ ret = i915_gem_object_pin_pages(src_obj);
if (ret) {
i915_gem_object_unpin_map(dst_obj);
return ERR_PTR(ret);
}
+ needs_clflush =
+ !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
+
src = ERR_PTR(-ENODEV);
- if (src_needs_clflush &&
- i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) {
+ if (needs_clflush && i915_has_memcpy_from_wc()) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) {
- i915_memcpy_from_wc(dst,
- src + batch_start_offset,
- ALIGN(batch_len, 16));
+ i915_unaligned_memcpy_from_wc(dst,
+ src + offset,
+ length);
i915_gem_object_unpin_map(src_obj);
}
}
if (IS_ERR(src)) {
void *ptr;
- int offset, n;
+ int x, n;
- offset = offset_in_page(batch_start_offset);
-
- /* We can avoid clflushing partial cachelines before the write
+ /*
+ * We can avoid clflushing partial cachelines before the write
* if we only every write full cache-lines. Since we know that
* both the source and destination are in multiples of
* PAGE_SIZE, we can simply round up to the next cacheline.
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
- if (dst_needs_clflush & CLFLUSH_BEFORE)
- batch_len = roundup(batch_len,
- boot_cpu_data.x86_clflush_size);
+ if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ length = round_up(length,
+ boot_cpu_data.x86_clflush_size);
ptr = dst;
- for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
- int len = min_t(int, batch_len, PAGE_SIZE - offset);
+ x = offset_in_page(offset);
+ for (n = offset >> PAGE_SHIFT; length; n++) {
+ int len = min_t(int, length, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
- if (src_needs_clflush)
- drm_clflush_virt_range(src + offset, len);
- memcpy(ptr, src + offset, len);
+ if (needs_clflush)
+ drm_clflush_virt_range(src + x, len);
+ memcpy(ptr, src + x, len);
kunmap_atomic(src);
ptr += len;
- batch_len -= len;
- offset = 0;
+ length -= len;
+ x = 0;
}
}
- i915_gem_object_finish_access(src_obj);
+ i915_gem_object_unpin_pages(src_obj);
/* dst_obj is returned with vmap pinned */
- *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
-
return dst;
}
@@ -1211,7 +1203,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
if (desc->flags & CMD_DESC_REJECT) {
- DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+ DRM_DEBUG("CMD: Rejected command: 0x%08X\n", *cmd);
return false;
}
@@ -1231,8 +1223,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
find_reg(engine, reg_addr);
if (!reg) {
- DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
- reg_addr, *cmd, engine->name);
+ DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
+ reg_addr, *cmd, engine->name);
return false;
}
@@ -1242,22 +1234,22 @@ static bool check_cmd(const struct intel_engine_cs *engine,
*/
if (reg->mask) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
+ reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
+ reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
+ reg_addr);
return false;
}
}
@@ -1284,8 +1276,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
}
if (desc->bits[i].offset >= length) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
- *cmd, engine->name);
+ DRM_DEBUG("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
+ *cmd, engine->name);
return false;
}
@@ -1293,11 +1285,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
desc->bits[i].mask;
if (dword != desc->bits[i].expected) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
- *cmd,
- desc->bits[i].mask,
- desc->bits[i].expected,
- dword, engine->name);
+ DRM_DEBUG("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
+ *cmd,
+ desc->bits[i].mask,
+ desc->bits[i].expected,
+ dword, engine->name);
return false;
}
}
@@ -1306,17 +1298,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
}
-static int check_bbstart(const struct i915_gem_context *ctx,
- u32 *cmd, u32 offset, u32 length,
- u32 batch_len,
- u64 batch_start,
- u64 shadow_batch_start)
+static int check_bbstart(u32 *cmd, u32 offset, u32 length,
+ u32 batch_length,
+ u64 batch_addr,
+ u64 shadow_addr,
+ const unsigned long *jump_whitelist)
{
u64 jump_offset, jump_target;
u32 target_cmd_offset, target_cmd_index;
/* For igt compatibility on older platforms */
- if (CMDPARSER_USES_GGTT(ctx->i915)) {
+ if (!jump_whitelist) {
DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
return -EACCES;
}
@@ -1327,14 +1319,14 @@ static int check_bbstart(const struct i915_gem_context *ctx,
return -EINVAL;
}
- jump_target = *(u64*)(cmd+1);
- jump_offset = jump_target - batch_start;
+ jump_target = *(u64 *)(cmd + 1);
+ jump_offset = jump_target - batch_addr;
/*
* Any underflow of jump_target is guaranteed to be outside the range
* of a u32, so >= test catches both too large and too small
*/
- if (jump_offset >= batch_len) {
+ if (jump_offset >= batch_length) {
DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
jump_target);
return -EINVAL;
@@ -1342,20 +1334,20 @@ static int check_bbstart(const struct i915_gem_context *ctx,
/*
* This cannot overflow a u32 because we already checked jump_offset
- * is within the BB, and the batch_len is a u32
+ * is within the BB, and the batch_length is a u32
*/
target_cmd_offset = lower_32_bits(jump_offset);
target_cmd_index = target_cmd_offset / sizeof(u32);
- *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+ *(u64 *)(cmd + 1) = shadow_addr + target_cmd_offset;
if (target_cmd_index == offset)
return 0;
- if (ctx->jump_whitelist_cmds <= target_cmd_index) {
- DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
- return -EINVAL;
- } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+ if (IS_ERR(jump_whitelist))
+ return PTR_ERR(jump_whitelist);
+
+ if (!test_bit(target_cmd_index, jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
return -EINVAL;
@@ -1364,54 +1356,40 @@ static int check_bbstart(const struct i915_gem_context *ctx,
return 0;
}
-static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+static unsigned long *alloc_whitelist(u32 batch_length)
{
- const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
- const u32 exact_size = BITS_TO_LONGS(batch_cmds);
- u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
- unsigned long *next_whitelist;
+ unsigned long *jmp;
- if (CMDPARSER_USES_GGTT(ctx->i915))
- return;
-
- if (batch_cmds <= ctx->jump_whitelist_cmds) {
- bitmap_zero(ctx->jump_whitelist, batch_cmds);
- return;
- }
-
-again:
- next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
- if (next_whitelist) {
- kfree(ctx->jump_whitelist);
- ctx->jump_whitelist = next_whitelist;
- ctx->jump_whitelist_cmds =
- next_size * BITS_PER_BYTE * sizeof(long);
- return;
- }
-
- if (next_size > exact_size) {
- next_size = exact_size;
- goto again;
- }
+ /*
+ * We expect batch_length to be less than 256KiB for known users,
+ * i.e. we need at most an 8KiB bitmap allocation which should be
+ * reasonably cheap due to kmalloc caches.
+ */
- DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
- bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
+ /* Prefer to report transient allocation failure rather than hit oom */
+ jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!jmp)
+ return ERR_PTR(-ENOMEM);
- return;
+ return jmp;
}
#define LENGTH_BIAS 2
+static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+}
+
/**
- * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
- * @ctx: the context in which the batch is to execute
+ * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
- * @batch_obj: the batch buffer in question
- * @batch_start: Canonical base address of batch
- * @batch_start_offset: byte offset in the batch at which execution starts
- * @batch_len: length of the commands in batch_obj
- * @shadow_batch_obj: copy of the batch buffer in question
- * @shadow_batch_start: Canonical base address of shadow_batch_obj
+ * @batch: the batch buffer in question
+ * @batch_offset: byte offset in the batch at which execution starts
+ * @batch_length: length of the commands in batch_obj
+ * @shadow: validated copy of the batch buffer in question
+ * @trampoline: whether to emit a conditional trampoline at the end of the batch
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1419,38 +1397,46 @@ again:
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-
-int intel_engine_cmd_parser(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct drm_i915_gem_object *batch_obj,
- u64 batch_start,
- u32 batch_start_offset,
- u32 batch_len,
- struct drm_i915_gem_object *shadow_batch_obj,
- u64 shadow_batch_start)
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ struct i915_vma *batch,
+ u32 batch_offset,
+ u32 batch_length,
+ struct i915_vma *shadow,
+ bool trampoline)
{
u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
- bool needs_clflush_after = false;
+ unsigned long *jump_whitelist;
+ u64 batch_addr, shadow_addr;
int ret = 0;
- cmd = copy_batch(shadow_batch_obj, batch_obj,
- batch_start_offset, batch_len,
- &needs_clflush_after);
+ GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
+ GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
+ GEM_BUG_ON(range_overflows_t(u64, batch_offset, batch_length,
+ batch->size));
+ GEM_BUG_ON(!batch_length);
+
+ cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length);
if (IS_ERR(cmd)) {
- DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+ DRM_DEBUG("CMD: Failed to copy batch\n");
return PTR_ERR(cmd);
}
- init_whitelist(ctx, batch_len);
+ jump_whitelist = NULL;
+ if (!trampoline)
+ /* Defer failure until attempted use */
+ jump_whitelist = alloc_whitelist(batch_length);
+
+ shadow_addr = gen8_canonical_addr(shadow->node.start);
+ batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
- batch_end = cmd + (batch_len / sizeof(*batch_end));
+ batch_end = cmd + batch_length / sizeof(*batch_end);
do {
u32 length;
@@ -1459,61 +1445,99 @@ int intel_engine_cmd_parser(struct i915_gem_context *ctx,
desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
- DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
- *cmd);
+ DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
ret = -EINVAL;
- goto err;
+ break;
}
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
- length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+ length = (*cmd & desc->length.mask) + LENGTH_BIAS;
if ((batch_end - cmd) < length) {
- DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
- *cmd,
- length,
- batch_end - cmd);
+ DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
+ *cmd,
+ length,
+ batch_end - cmd);
ret = -EINVAL;
- goto err;
+ break;
}
if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES;
- goto err;
+ break;
}
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = check_bbstart(ctx, cmd, offset, length,
- batch_len, batch_start,
- shadow_batch_start);
-
- if (ret)
- goto err;
+ ret = check_bbstart(cmd, offset, length, batch_length,
+ batch_addr, shadow_addr,
+ jump_whitelist);
break;
}
- if (ctx->jump_whitelist_cmds > offset)
- set_bit(offset, ctx->jump_whitelist);
+ if (!IS_ERR_OR_NULL(jump_whitelist))
+ __set_bit(offset, jump_whitelist);
cmd += length;
offset += length;
if (cmd >= batch_end) {
- DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
- goto err;
+ break;
}
} while (1);
- if (needs_clflush_after) {
- void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+ if (trampoline) {
+ /*
+ * With the trampoline, the shadow is executed twice.
+ *
+ * 1 - starting at offset 0, in privileged mode
+ * 2 - starting at offset batch_len, as non-privileged
+ *
+ * Only if the batch is valid and safe to execute, do we
+ * allow the first privileged execution to proceed. If not,
+ * we terminate the first batch and use the second batchbuffer
+ * entry to chain to the original unsafe non-privileged batch,
+ * leaving it to the HW to validate.
+ */
+ *batch_end = MI_BATCH_BUFFER_END;
+
+ if (ret) {
+ /* Batch unsafe to execute with privileges, cancel! */
+ cmd = page_mask_bits(shadow->obj->mm.mapping);
+ *cmd = MI_BATCH_BUFFER_END;
+
+ /* If batch is unsafe but valid, jump to the original */
+ if (ret == -EACCES) {
+ unsigned int flags;
+
+ flags = MI_BATCH_NON_SECURE_I965;
+ if (IS_HASWELL(engine->i915))
+ flags = MI_BATCH_NON_SECURE_HSW;
+
+ GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7));
+ __gen6_emit_bb_start(batch_end,
+ batch_addr,
+ flags);
+
+ ret = 0; /* allow execution */
+ }
+ }
+
+ if (shadow_needs_clflush(shadow->obj))
+ drm_clflush_virt_range(batch_end, 8);
+ }
+
+ if (shadow_needs_clflush(shadow->obj)) {
+ void *ptr = page_mask_bits(shadow->obj->mm.mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
-err:
- i915_gem_object_unpin_map(shadow_batch_obj);
+ if (!IS_ERR_OR_NULL(jump_whitelist))
+ kfree(jump_whitelist);
+ i915_gem_object_unpin_map(shadow->obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8016484ebcd3..d5a9b8a964c2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -61,24 +61,14 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
static int i915_capabilities(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- const char *msg;
- seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
- seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
- msg = "n/a";
-#ifdef CONFIG_INTEL_IOMMU
- msg = enableddisabled(intel_iommu_gfx_mapped);
-#endif
- seq_printf(m, "iommu: %s\n", msg);
-
- intel_device_info_dump_flags(info, &p);
- intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
- intel_driver_caps_print(&dev_priv->caps, &p);
+ intel_device_info_print_static(INTEL_INFO(i915), &p);
+ intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+ intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915_modparams, &p);
@@ -331,16 +321,15 @@ static void print_context_stats(struct seq_file *m,
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- intel_context_lock_pinned(ce);
- if (intel_context_is_pinned(ce)) {
+ if (intel_context_pin_if_active(ce)) {
rcu_read_lock();
if (ce->state)
per_file_stats(0,
ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
rcu_read_unlock();
+ intel_context_unpin(ce);
}
- intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -377,12 +366,16 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count,
atomic_read(&i915->mm.free_count),
i915->mm.shrink_memory);
-
+ for_each_memory_region(mr, i915, id)
+ seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
+ mr->name, &mr->total, &mr->avail);
seq_putc(m, '\n');
print_context_stats(m, i915);
@@ -692,7 +685,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
ssize_t ret;
void *buf;
@@ -705,7 +698,7 @@ static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
if (!buf)
return -ENOMEM;
- ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+ ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
if (ret <= 0)
goto out;
@@ -721,19 +714,19 @@ out:
static int gpu_state_release(struct inode *inode, struct file *file)
{
- i915_gpu_state_put(file->private_data);
+ i915_gpu_coredump_put(file->private_data);
return 0;
}
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- struct i915_gpu_state *gpu;
+ struct i915_gpu_coredump *gpu;
intel_wakeref_t wakeref;
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_capture_gpu_state(i915);
+ gpu = i915_gpu_coredump(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -755,7 +748,7 @@ i915_error_state_write(struct file *filp,
size_t cnt,
loff_t *ppos)
{
- struct i915_gpu_state *error = filp->private_data;
+ struct i915_gpu_coredump *error = filp->private_data;
if (!error)
return 0;
@@ -768,7 +761,7 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
error = i915_first_error_state(inode->i_private);
if (IS_ERR(error))
@@ -891,7 +884,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
+ cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1011,7 +1004,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static int ironlake_drpc_info(struct seq_file *m)
+static int ilk_drpc_info(struct seq_file *m)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_uncore *uncore = &i915->uncore;
@@ -1219,7 +1212,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
else if (INTEL_GEN(dev_priv) >= 6)
err = gen6_drpc_info(m);
else
- err = ironlake_drpc_info(m);
+ err = ilk_drpc_info(m);
}
return err;
@@ -1519,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- intel_context_lock_pinned(ce);
- if (intel_context_is_pinned(ce)) {
+ if (intel_context_pin_if_active(ce)) {
seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
+ intel_context_unpin(ce);
}
- intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -1633,21 +1625,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt.rps;
- u32 act_freq = rps->cur_freq;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
- act_freq = vlv_punit_read(dev_priv,
- PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
- act_freq = (act_freq >> 8) & 0xff;
- } else {
- act_freq = intel_get_cagf(rps,
- I915_READ(GEN6_RPSTAT1));
- }
- }
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
@@ -1656,7 +1633,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d, actual %d\n",
intel_gpu_freq(rps, rps->cur_freq),
- intel_gpu_freq(rps, act_freq));
+ intel_rps_read_actual_frequency(rps));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(rps, rps->min_freq),
intel_gpu_freq(rps, rps->min_freq_softlimit),
@@ -1802,30 +1779,12 @@ static void i915_guc_log_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->gt.uc.guc;
- struct intel_guc_client *client = guc->execbuf_client;
if (!USES_GUC(dev_priv))
return -ENODEV;
i915_guc_log_info(m, dev_priv);
- if (!USES_GUC_SUBMISSION(dev_priv))
- return 0;
-
- GEM_BUG_ON(!guc->execbuf_client);
-
- seq_printf(m, "\nDoorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
- seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
-
- seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
- seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
- client->priority,
- client->stage_id,
- client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
- client->doorbell_id, client->doorbell_offset);
/* Add more as required ... */
return 0;
@@ -2020,7 +1979,7 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
struct drm_connector *connector = m->private;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
int ret;
if (!CAN_PSR(dev_priv)) {
@@ -2367,7 +2326,7 @@ out:
}
static void intel_seq_print_mode(struct seq_file *m, int tabs,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int i;
@@ -2378,59 +2337,35 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs,
}
static void intel_encoder_info(struct seq_file *m,
- struct intel_crtc *intel_crtc,
- struct intel_encoder *intel_encoder)
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_connector *intel_connector;
- struct drm_encoder *encoder;
-
- encoder = &intel_encoder->base;
- seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
- encoder->base.id, encoder->name);
- for_each_connector_on_encoder(dev, encoder, intel_connector) {
- struct drm_connector *connector = &intel_connector->base;
- seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(connector->status));
- if (connector->status == connector_status_connected) {
- struct drm_display_mode *mode = &crtc->mode;
- seq_printf(m, ", mode:\n");
- intel_seq_print_mode(m, 2, mode);
- } else {
- seq_putc(m, '\n');
- }
- }
-}
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_encoder *intel_encoder;
- struct drm_plane_state *plane_state = crtc->primary->state;
- struct drm_framebuffer *fb = plane_state->fb;
+ seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+ encoder->base.base.id, encoder->base.name);
- if (fb)
- seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
- fb->base.id, plane_state->src_x >> 16,
- plane_state->src_y >> 16, fb->width, fb->height);
- else
- seq_puts(m, "\tprimary plane disabled\n");
- for_each_encoder_on_crtc(dev, crtc, intel_encoder)
- intel_encoder_info(m, intel_crtc, intel_encoder);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_state *conn_state =
+ connector->state;
+
+ if (conn_state->best_encoder != &encoder->base)
+ continue;
+
+ seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
}
static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
{
- struct drm_display_mode *mode = panel->fixed_mode;
+ const struct drm_display_mode *mode = panel->fixed_mode;
- seq_printf(m, "\tfixed mode:\n");
- intel_seq_print_mode(m, 2, mode);
+ seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
static void intel_hdcp_info(struct seq_file *m,
@@ -2456,7 +2391,7 @@ static void intel_dp_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
@@ -2476,7 +2411,7 @@ static void intel_dp_mst_info(struct seq_file *m,
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(&intel_encoder->base);
+ enc_to_mst(intel_encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
@@ -2489,7 +2424,7 @@ static void intel_hdmi_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
if (intel_connector->hdcp.shim) {
@@ -2508,10 +2443,12 @@ static void intel_connector_info(struct seq_file *m,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct drm_display_mode *mode;
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ const struct drm_display_mode *mode;
- seq_printf(m, "connector %d: type %s, status: %s\n",
+ seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
@@ -2525,24 +2462,24 @@ static void intel_connector_info(struct seq_file *m,
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
- if (!intel_encoder)
+ if (!encoder)
return;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_LVDS:
- if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ if (encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_HDMIA:
- if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
- intel_encoder->type == INTEL_OUTPUT_DDI)
+ if (encoder->type == INTEL_OUTPUT_HDMI ||
+ encoder->type == INTEL_OUTPUT_DDI)
intel_hdmi_info(m, intel_connector);
break;
default:
@@ -2589,70 +2526,88 @@ static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
rotation);
}
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_plane *intel_plane;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->uapi.fb;
+ struct drm_format_name_buf format_name;
+ struct drm_rect src, dst;
+ char rot_str[48];
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane_state *state;
- struct drm_plane *plane = &intel_plane->base;
- struct drm_format_name_buf format_name;
- char rot_str[48];
+ src = drm_plane_state_src(&plane_state->uapi);
+ dst = drm_plane_state_dest(&plane_state->uapi);
- if (!plane->state) {
- seq_puts(m, "plane->state is NULL!\n");
- continue;
- }
+ if (fb)
+ drm_get_format_name(fb->format->format, &format_name);
- state = plane->state;
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->uapi.rotation);
- if (state->fb) {
- drm_get_format_name(state->fb->format->format,
- &format_name);
- } else {
- sprintf(format_name.str, "N/A");
- }
+ seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+ fb ? fb->width : 0, fb ? fb->height : 0,
+ DRM_RECT_FP_ARG(&src),
+ DRM_RECT_ARG(&dst),
+ rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_format_name_buf format_name;
+ char rot_str[48];
+
+ if (!fb)
+ return;
- plane_rotation(rot_str, sizeof(rot_str), state->rotation);
-
- seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
- plane->base.id,
- plane_type(intel_plane->base.type),
- state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- (state->src_x >> 16),
- ((state->src_x & 0xffff) * 15625) >> 10,
- (state->src_y >> 16),
- ((state->src_y & 0xffff) * 15625) >> 10,
- (state->src_w >> 16),
- ((state->src_w & 0xffff) * 15625) >> 10,
- (state->src_h >> 16),
- ((state->src_h & 0xffff) * 15625) >> 10,
- format_name.str,
- rot_str);
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->hw.rotation);
+
+ seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, format_name.str,
+ fb->width, fb->height,
+ yesno(plane_state->uapi.visible),
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst),
+ rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+ plane->base.base.id, plane->base.name,
+ plane_type(plane->base.type));
+ intel_plane_uapi_info(m, plane);
+ intel_plane_hw_info(m, plane);
}
}
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
{
- struct intel_crtc_state *pipe_config;
- int num_scalers = intel_crtc->num_scalers;
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int num_scalers = crtc->num_scalers;
int i;
- pipe_config = to_intel_crtc_state(intel_crtc->base.state);
-
/* Not all platformas have a scaler */
if (num_scalers) {
seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ crtc_state->scaler_state.scaler_users,
+ crtc_state->scaler_state.scaler_id);
for (i = 0; i < num_scalers; i++) {
- struct intel_scaler *sc =
- &pipe_config->scaler_state.scalers[i];
+ const struct intel_scaler *sc =
+ &crtc_state->scaler_state.scalers[i];
seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
i, yesno(sc->in_use), sc->mode);
@@ -2663,6 +2618,44 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
}
}
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_encoder *encoder;
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->uapi.enable),
+ yesno(crtc_state->uapi.active),
+ DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+ if (crtc_state->hw.enable) {
+ seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->hw.active),
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+ seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+ intel_scaler_info(m, crtc);
+ }
+
+ for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask)
+ intel_encoder_info(m, crtc, encoder);
+
+ intel_plane_info(m, crtc);
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
static int i915_display_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2674,52 +2667,22 @@ static int i915_display_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ drm_modeset_lock_all(dev);
+
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *pipe_config;
-
- drm_modeset_lock(&crtc->base.mutex, NULL);
- pipe_config = to_intel_crtc_state(crtc->base.state);
-
- seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
- crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(pipe_config->base.active),
- pipe_config->pipe_src_w, pipe_config->pipe_src_h,
- yesno(pipe_config->dither), pipe_config->pipe_bpp);
-
- if (pipe_config->base.active) {
- struct intel_plane *cursor =
- to_intel_plane(crtc->base.cursor);
-
- intel_crtc_info(m, crtc);
-
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
- yesno(cursor->base.state->visible),
- cursor->base.state->crtc_x,
- cursor->base.state->crtc_y,
- cursor->base.state->crtc_w,
- cursor->base.state->crtc_h,
- cursor->cursor.base);
- intel_scaler_info(m, crtc);
- intel_plane_info(m, crtc);
- }
-
- seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
- yesno(!crtc->cpu_fifo_underrun_disabled),
- yesno(!crtc->pch_fifo_underrun_disabled));
- drm_modeset_unlock(&crtc->base.mutex);
- }
+ for_each_intel_crtc(dev, crtc)
+ intel_crtc_info(m, crtc);
seq_printf(m, "\n");
seq_printf(m, "Connector info\n");
seq_printf(m, "--------------\n");
- mutex_lock(&dev->mode_config.mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
intel_connector_info(m, connector);
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
+
+ drm_modeset_unlock_all(dev);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -2755,7 +2718,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
+ intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
return 0;
}
@@ -3051,11 +3014,11 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
- intel_encoder = intel_attached_encoder(connector);
+ intel_encoder = intel_attached_encoder(to_intel_connector(connector));
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
continue;
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ intel_dig_port = enc_to_dig_port(intel_encoder);
if (!intel_dig_port->dp.can_mst)
continue;
@@ -3105,7 +3068,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
break;
@@ -3114,9 +3077,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
* testing code, only accept an actual value of 1 here
*/
if (val == 1)
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
else
- intel_dp->compliance.test_active = 0;
+ intel_dp->compliance.test_active = false;
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -3149,7 +3112,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->compliance.test_active)
seq_puts(m, "1");
else
@@ -3199,7 +3162,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->compliance.test_type ==
DP_TEST_LINK_EDID_READ)
seq_printf(m, "%lx",
@@ -3243,7 +3206,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
@@ -3854,8 +3817,8 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
#undef SS_MAX
}
-static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
+static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
{
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
@@ -3940,7 +3903,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_device_status(dev_priv, &sseu);
else if (IS_BROADWELL(dev_priv))
- broadwell_sseu_device_status(dev_priv, &sseu);
+ bdw_sseu_device_status(dev_priv, &sseu);
else if (IS_GEN(dev_priv, 9))
gen9_sseu_device_status(dev_priv, &sseu);
else if (INTEL_GEN(dev_priv) >= 10)
@@ -4161,11 +4124,11 @@ static int i915_drrs_ctl_set(void *data, u64 val)
crtc_state = to_intel_crtc_state(crtc->base.state);
- if (!crtc_state->base.active ||
+ if (!crtc_state->hw.active ||
!crtc_state->has_drrs)
goto out;
- commit = crtc_state->base.commit;
+ commit = crtc_state->uapi.commit;
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
@@ -4177,18 +4140,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
struct intel_encoder *encoder;
struct intel_dp *intel_dp;
- if (!(crtc_state->base.connector_mask &
+ if (!(crtc_state->uapi.connector_mask &
drm_connector_mask(connector)))
continue;
- encoder = intel_attached_encoder(connector);
+ encoder = intel_attached_encoder(to_intel_connector(connector));
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
val ? "en" : "dis", val);
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (val)
intel_edp_drrs_enable(intel_dp,
crtc_state);
@@ -4236,14 +4199,14 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
crtc_state = to_intel_crtc_state(intel_crtc->base.state);
- commit = crtc_state->base.commit;
+ commit = crtc_state->uapi.commit;
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
if (!ret)
ret = wait_for_completion_interruptible(&commit->flip_done);
}
- if (!ret && crtc_state->base.active) {
+ if (!ret && crtc_state->hw.active) {
DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
pipe_name(intel_crtc->pipe));
@@ -4392,7 +4355,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
u8 buf[16];
ssize_t err;
int i;
@@ -4427,7 +4390,7 @@ static int i915_panel_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
if (connector->status != connector_status_connected)
return -ENODEV;
@@ -4505,7 +4468,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
} else if (ret) {
break;
}
- intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "DSC_Enabled: %s\n",
yesno(crtc_state->dsc.compression_enable));
@@ -4532,8 +4495,8 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
int ret;
struct drm_connector *connector =
((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (len == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3c512c571e60..f7385abdd74b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,6 +61,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
@@ -468,6 +469,12 @@ static void vlv_free_s0ix_state(struct drm_i915_private *i915)
i915->vlv_s0ix_state = NULL;
}
+static void sanitize_gpu(struct drm_i915_private *i915)
+{
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(&i915->gt, ALL_ENGINES);
+}
+
/**
* i915_driver_early_probe - setup state not requiring device access
* @dev_priv: device private
@@ -601,6 +608,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_uncore;
+ /* As early as possible, scrub existing GPU state before clobbering */
+ sanitize_gpu(dev_priv);
+
return 0;
err_uncore:
@@ -618,7 +628,6 @@ err_bridge:
*/
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
- intel_engines_cleanup(&dev_priv->gt);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
@@ -1052,7 +1061,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
*/
dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
- if (INTEL_GEN(dev_priv) < 9)
+ if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
return;
if (IS_GEN9_LP(dev_priv))
@@ -1385,7 +1394,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
- if (drm_debug & DRM_UT_DRIVER) {
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
struct drm_printer p = drm_debug_printer("i915 device info:");
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
@@ -1396,8 +1405,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
- intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
- intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
+ intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
+ intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1817,9 +1826,7 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- intel_rc6_ctx_wa_resume(&dev_priv->gt.rc6);
-
- intel_gt_sanitize(&dev_priv->gt, true);
+ sanitize_gpu(dev_priv);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
@@ -2662,18 +2669,12 @@ const struct dev_pm_ops i915_pm_ops = {
.runtime_resume = intel_runtime_resume,
};
-static const struct vm_operations_struct i915_gem_vm_ops = {
- .fault = i915_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
+ .mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = i915_compat_ioctl,
@@ -2720,7 +2721,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
@@ -2762,7 +2763,6 @@ static struct drm_driver driver = {
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
- .gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -2773,7 +2773,8 @@ static struct drm_driver driver = {
.get_scanout_position = i915_get_crtc_scanoutpos,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_map_offset = i915_gem_dumb_mmap_offset,
+
.ioctls = i915_ioctls,
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
@@ -2784,7 +2785,3 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_drm.c"
-#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21aa08f55811..077af22b8340 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -46,6 +46,7 @@
#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/stackdepot.h>
+#include <linux/xarray.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
@@ -110,8 +111,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20191101"
-#define DRIVER_TIMESTAMP 1572604873
+#define DRIVER_DATE "20200114"
+#define DRIVER_TIMESTAMP 1579001978
struct drm_i915_gem_object;
@@ -201,8 +202,7 @@ struct drm_i915_file_private {
struct list_head request_list;
} mm;
- struct idr context_idr;
- struct mutex context_idr_lock; /* guards context_idr */
+ struct xarray context_xa;
struct idr vm_idr;
struct mutex vm_idr_lock; /* guards vm_idr */
@@ -273,11 +273,11 @@ struct drm_i915_display_funcs {
int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
void (*initial_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state);
+ struct intel_crtc *crtc);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state);
+ struct intel_crtc *crtc);
void (*optimize_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state);
+ struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
@@ -290,10 +290,10 @@ struct drm_i915_display_funcs {
struct intel_initial_plane_config *);
int (*crtc_compute_clock)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
- void (*crtc_enable)(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *old_state);
- void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *old_state);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void (*commit_modeset_enables)(struct intel_atomic_state *state);
void (*commit_modeset_disables)(struct intel_atomic_state *state);
void (*audio_codec_enable)(struct intel_encoder *encoder,
@@ -366,7 +366,6 @@ struct intel_fbc {
unsigned threshold;
unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
- unsigned int visible_pipes_mask;
struct intel_crtc *crtc;
struct drm_mm_node compressed_fb;
@@ -374,8 +373,8 @@ struct intel_fbc {
bool false_color;
- bool enabled;
bool active;
+ bool activated;
bool flip_pending;
bool underrun_detected;
@@ -387,9 +386,6 @@ struct intel_fbc {
* these problems.
*/
struct intel_fbc_state_cache {
- struct i915_vma *vma;
- unsigned long flags;
-
struct {
unsigned int mode_flags;
u32 hsw_bdw_pixel_rate;
@@ -418,6 +414,8 @@ struct intel_fbc {
const struct drm_format_info *format;
unsigned int stride;
} fb;
+ u16 gen9_wa_cfb_stride;
+ s8 fence_id;
} state_cache;
/*
@@ -428,9 +426,6 @@ struct intel_fbc {
* are supposed to read from it in order to program the registers.
*/
struct intel_fbc_reg_params {
- struct i915_vma *vma;
- unsigned long flags;
-
struct {
enum pipe pipe;
enum i9xx_plane_id i9xx_plane;
@@ -443,7 +438,9 @@ struct intel_fbc {
} fb;
int cfb_size;
- unsigned int gen9_wa_cfb_stride;
+ u16 gen9_wa_cfb_stride;
+ s8 fence_id;
+ bool plane_visible;
} params;
const char *no_fbc_reason;
@@ -508,6 +505,7 @@ struct i915_psr {
bool dc3co_enabled;
u32 dc3co_exit_delay;
struct delayed_work idle_work;
+ bool initially_probed;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -621,19 +619,18 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
+#define I915_NUM_QGV_POINTS 8
+
struct ddi_vbt_port_info {
/* Non-NULL if port present. */
const struct child_device_config *child;
int max_tmds_clock;
- /*
- * This is an index in the HDMI/DVI DDI buffer translation table.
- * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
- * populate this field.
- */
-#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
+ /* This is an index in the HDMI/DVI DDI buffer translation table. */
u8 hdmi_level_shift;
+ u8 hdmi_level_shift_set:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
@@ -724,8 +721,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
- int child_dev_num;
- struct child_device_config *child_dev;
+ struct list_head display_devices;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
struct sdvo_device_mapping sdvo_mappings[2];
@@ -891,6 +887,10 @@ struct intel_cdclk_state {
u8 voltage_level;
};
+struct i915_selftest_stash {
+ atomic_t counter;
+};
+
struct drm_i915_private {
struct drm_device drm;
@@ -956,9 +956,6 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
- /* Context used internally to idle the GPU and setup initial state */
- struct i915_gem_context *kernel_context;
-
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct rb_root uabi_engines;
@@ -1233,7 +1230,8 @@ struct drm_i915_private {
} dram_info;
struct intel_bw_info {
- unsigned int deratedbw[3]; /* for each QGV point */
+ /* for each QGV point */
+ unsigned int deratedbw[I915_NUM_QGV_POINTS];
u8 num_qgv_points;
u8 num_planes;
} max_bw[6];
@@ -1248,8 +1246,6 @@ struct drm_i915_private {
struct intel_gt gt;
struct {
- struct notifier_block pm_notifier;
-
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
struct list_head list;
@@ -1257,6 +1253,16 @@ struct drm_i915_private {
struct llist_head free_list;
struct work_struct free_work;
} contexts;
+
+ /*
+ * We replace the local file with a global mappings as the
+ * backing storage for the mmap is on the device and not
+ * on the struct file, and we do not want to prolong the
+ * lifetime of the local fd. To minimise the number of
+ * anonymous inodes we create, we use a global singleton to
+ * share the global mapping.
+ */
+ struct file *mmap_singleton;
} gem;
u8 pch_ssc_use;
@@ -1286,6 +1292,8 @@ struct drm_i915_private {
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;
+ I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -1844,14 +1852,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
-
-struct i915_vma * __must_check
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view,
- u64 size,
- u64 alignment,
- u64 flags);
+#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -1864,9 +1865,6 @@ i915_mutex_lock_interruptible(struct drm_device *dev)
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- u32 handle, u64 *offset);
-int i915_gem_mmap_gtt_version(void);
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
@@ -1876,7 +1874,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
}
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
- struct intel_engine_cs *engine)
+ const struct intel_engine_cs *engine)
{
return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
@@ -1889,7 +1887,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
@@ -1905,7 +1902,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
- return idr_find(&file_priv->context_idr, id);
+ return xa_load(&file_priv->context_xa, id);
}
static inline struct i915_gem_context *
@@ -1958,14 +1955,13 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct i915_gem_context *cxt,
- struct intel_engine_cs *engine,
- struct drm_i915_gem_object *batch_obj,
- u64 user_batch_start,
- u32 batch_start_offset,
- u32 batch_len,
- struct drm_i915_gem_object *shadow_batch_obj,
- u64 shadow_batch_start);
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ struct i915_vma *batch,
+ u32 batch_offset,
+ u32 batch_length,
+ struct i915_vma *shadow,
+ bool trampoline);
+#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
/* intel_device_info.c */
static inline struct intel_device_info *
@@ -2032,6 +2028,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, resource_size_t iobase);
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 905890e3ac24..94f993e4c12f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -44,20 +44,14 @@
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
-#include "gem/i915_gem_pm.h"
-#include "gt/intel_context.h"
+#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_gt_requests.h"
-#include "gt/intel_mocs.h"
-#include "gt/intel_reset.h"
-#include "gt/intel_renderstate.h"
-#include "gt/intel_rps.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
-#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
@@ -119,33 +113,65 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags)
{
- struct i915_vma *vma;
+ struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
LIST_HEAD(still_in_list);
- int ret = 0;
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ int ret;
+
+ if (!atomic_read(&obj->bind_count))
+ return 0;
+
+ /*
+ * As some machines use ACPI to handle runtime-resume callbacks, and
+ * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
+ * as they are required by the shrinker. Ergo, we wake the device up
+ * first just in case.
+ */
+ wakeref = intel_runtime_pm_get(rpm);
+try_again:
+ ret = 0;
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
struct i915_address_space *vm = vma->vm;
- ret = -EBUSY;
+ list_move_tail(&vma->obj_link, &still_in_list);
+ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
+ continue;
+
+ ret = -EAGAIN;
if (!i915_vm_tryopen(vm))
break;
- list_move_tail(&vma->obj_link, &still_in_list);
+ /* Prevent vma being freed by i915_vma_parked as we unbind */
+ vma = __i915_vma_get(vma);
spin_unlock(&obj->vma.lock);
- if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma))
- ret = i915_vma_unbind(vma);
+ if (vma) {
+ ret = -EBUSY;
+ if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))
+ ret = i915_vma_unbind(vma);
+
+ __i915_vma_put(vma);
+ }
i915_vm_close(vm);
spin_lock(&obj->vma.lock);
}
- list_splice(&still_in_list, &obj->vma.list);
+ list_splice_init(&still_in_list, &obj->vma.list);
spin_unlock(&obj->vma.lock);
+ if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
+ rcu_barrier(); /* flush the i915_vm_release() */
+ goto try_again;
+ }
+
+ intel_runtime_pm_put(rpm, wakeref);
+
return ret;
}
@@ -175,7 +201,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
static int
i915_gem_create(struct drm_file *file,
- struct drm_i915_private *dev_priv,
+ struct intel_memory_region *mr,
u64 *size_p,
u32 *handle_p)
{
@@ -184,12 +210,16 @@ i915_gem_create(struct drm_file *file,
u64 size;
int ret;
- size = round_up(*size_p, PAGE_SIZE);
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ size = round_up(*size_p, mr->min_page_size);
if (size == 0)
return -EINVAL;
+ /* For most of the ABI (e.g. mmap) we think in system pages */
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
/* Allocate the new object */
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_region(mr, size, 0);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -209,6 +239,7 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ enum intel_memory_type mem_type;
int cpp = DIV_ROUND_UP(args->bpp, 8);
u32 format;
@@ -235,7 +266,14 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->pitch, 4096);
args->size = args->pitch * args->height;
- return i915_gem_create(file, to_i915(dev),
+
+ mem_type = INTEL_MEMORY_SYSTEM;
+ if (HAS_LMEM(to_i915(dev)))
+ mem_type = INTEL_MEMORY_LOCAL;
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(to_i915(dev),
+ mem_type),
&args->size, &args->handle);
}
@@ -249,12 +287,14 @@ int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_create *args = data;
- i915_gem_flush_free_objects(dev_priv);
+ i915_gem_flush_free_objects(i915);
- return i915_gem_create(file, dev_priv,
+ return i915_gem_create(file,
+ intel_memory_region_by_type(i915,
+ INTEL_MEMORY_SYSTEM),
&args->size, &args->handle);
}
@@ -631,11 +671,12 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- intel_gt_flush_ggtt_writes(ggtt->vm.gt);
if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(ggtt, &node);
@@ -855,7 +896,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
list_for_each_entry_safe(obj, on,
&i915->ggtt.userfault_list, userfault_link)
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
/*
* The fence will be lost when the device powers down. If any were
@@ -892,22 +933,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_address_space *vm = &dev_priv->ggtt.vm;
-
- return i915_gem_object_pin(obj, vm, view, size, alignment,
- flags | PIN_GLOBAL);
-}
-
-struct i915_vma *
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view,
- u64 size,
- u64 alignment,
- u64 flags)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_vma *vma;
int ret;
@@ -916,17 +943,19 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
- /* If the required space is larger than the available
+ /*
+ * If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
* vain. Worse, doing so may cause us to ping-pong
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
- if (obj->base.size > dev_priv->ggtt.mappable_end)
+ if (obj->base.size > ggtt->mappable_end)
return ERR_PTR(-E2BIG);
- /* If NONBLOCK is set the caller is optimistically
+ /*
+ * If NONBLOCK is set the caller is optimistically
* trying to cache the full object within the mappable
* aperture, and *must* have a fallback in place for
* situations where we cannot bind the object. We
@@ -942,11 +971,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
- obj->base.size > dev_priv->ggtt.mappable_end / 2)
+ obj->base.size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
- vma = i915_vma_instance(obj, vm, view);
+ vma = i915_vma_instance(obj, &ggtt->vm, view);
if (IS_ERR(vma))
return vma;
@@ -956,7 +985,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE &&
- vma->fence_size > dev_priv->ggtt.mappable_end / 2)
+ vma->fence_size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
@@ -966,14 +995,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (vma->fence && !i915_gem_object_is_tiled(obj)) {
- mutex_lock(&vma->vm->mutex);
+ mutex_lock(&ggtt->vm.mutex);
ret = i915_vma_revoke_fence(vma);
- mutex_unlock(&vma->vm->mutex);
+ mutex_unlock(&ggtt->vm.mutex);
if (ret)
return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags);
+ ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
@@ -1054,172 +1083,6 @@ out:
return err;
}
-static int __intel_context_flush_retire(struct intel_context *ce)
-{
- struct intel_timeline *tl;
-
- tl = intel_context_timeline_lock(ce);
- if (IS_ERR(tl))
- return PTR_ERR(tl);
-
- intel_context_timeline_unlock(tl);
- return 0;
-}
-
-static int __intel_engines_record_defaults(struct intel_gt *gt)
-{
- struct i915_request *requests[I915_NUM_ENGINES] = {};
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * As we reset the gpu during very early sanitisation, the current
- * register state on the GPU should reflect its defaults values.
- * We load a context onto the hw (with restore-inhibit), then switch
- * over to a second context to save that default register state. We
- * can then prime every new context with that state so they all start
- * from the same default HW values.
- */
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- /* We must be able to switch to something! */
- GEM_BUG_ON(!engine->kernel_context);
- engine->serial++; /* force the kernel context switch */
-
- ce = intel_context_create(engine->kernel_context->gem_context,
- engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- intel_context_put(ce);
- goto out;
- }
-
- err = intel_engine_emit_ctx_wa(rq);
- if (err)
- goto err_rq;
-
- err = intel_renderstate_emit(rq);
- if (err)
- goto err_rq;
-
-err_rq:
- requests[id] = i915_request_get(rq);
- i915_request_add(rq);
- if (err)
- goto out;
- }
-
- /* Flush the default context image to memory, and enable powersaving. */
- if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- err = -EIO;
- goto out;
- }
-
- for (id = 0; id < ARRAY_SIZE(requests); id++) {
- struct i915_request *rq;
- struct i915_vma *state;
- void *vaddr;
-
- rq = requests[id];
- if (!rq)
- continue;
-
- GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT,
- &rq->hw_context->flags));
- state = rq->hw_context->state;
- if (!state)
- continue;
-
- /* Serialise with retirement on another CPU */
- err = __intel_context_flush_retire(rq->hw_context);
- if (err)
- goto out;
-
- /* We want to be able to unbind the state from the GGTT */
- GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
-
- /*
- * As we will hold a reference to the logical state, it will
- * not be torn down with the context, and importantly the
- * object will hold onto its vma (making it possible for a
- * stray GTT write to corrupt our defaults). Unmap the vma
- * from the GTT to prevent such accidents and reclaim the
- * space.
- */
- err = i915_vma_unbind(state);
- if (err)
- goto out;
-
- i915_gem_object_lock(state->obj);
- err = i915_gem_object_set_to_cpu_domain(state->obj, false);
- i915_gem_object_unlock(state->obj);
- if (err)
- goto out;
-
- i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
-
- /* Check we can acquire the image of the context state */
- vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto out;
- }
-
- rq->engine->default_state = i915_gem_object_get(state->obj);
- i915_gem_object_unpin_map(state->obj);
- }
-
-out:
- /*
- * If we have to abandon now, we expect the engines to be idle
- * and ready to be torn-down. The quickest way we can accomplish
- * this is by declaring ourselves wedged.
- */
- if (err)
- intel_gt_set_wedged(gt);
-
- for (id = 0; id < ARRAY_SIZE(requests); id++) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- rq = requests[id];
- if (!rq)
- continue;
-
- ce = rq->hw_context;
- i915_request_put(rq);
- intel_context_put(ce);
- }
- return err;
-}
-
-static int intel_engines_verify_workarounds(struct intel_gt *gt)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return 0;
-
- for_each_engine(engine, gt, id) {
- if (intel_engine_verify_workarounds(engine, "load"))
- err = -EIO;
- }
-
- return err;
-}
-
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -1229,8 +1092,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
- intel_timelines_init(dev_priv);
-
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
@@ -1238,51 +1099,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_uc_fetch_firmwares(&dev_priv->gt.uc);
intel_wopcm_init(&dev_priv->wopcm);
- /* This is just a security blanket to placate dragons.
- * On some systems, we very sporadically observe that the first TLBs
- * used by the CS may be stale, despite us poking the TLB reset. If
- * we hold the forcewake during initialisation these problems
- * just magically go away.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
ret = i915_init_ggtt(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
}
- intel_gt_init(&dev_priv->gt);
-
- ret = intel_engines_setup(&dev_priv->gt);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_unlock;
- }
-
- ret = i915_gem_init_contexts(dev_priv);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_scratch;
- }
-
- ret = intel_engines_init(&dev_priv->gt);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_context;
- }
-
- intel_uc_init(&dev_priv->gt.uc);
-
- ret = intel_gt_init_hw(&dev_priv->gt);
- if (ret)
- goto err_uc_init;
-
- /* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_gt_resume(&dev_priv->gt);
- if (ret)
- goto err_init_hw;
-
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
@@ -1294,23 +1116,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_engines_verify_workarounds(&dev_priv->gt);
+ ret = intel_gt_init(&dev_priv->gt);
if (ret)
- goto err_gt;
-
- ret = __intel_engines_record_defaults(&dev_priv->gt);
- if (ret)
- goto err_gt;
-
- ret = i915_inject_probe_error(dev_priv, -ENODEV);
- if (ret)
- goto err_gt;
-
- ret = i915_inject_probe_error(dev_priv, -EIO);
- if (ret)
- goto err_gt;
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ goto err_unlock;
return 0;
@@ -1320,31 +1128,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
-err_gt:
- intel_gt_set_wedged_on_init(&dev_priv->gt);
- i915_gem_suspend(dev_priv);
- i915_gem_suspend_late(dev_priv);
-
- i915_gem_drain_workqueue(dev_priv);
-err_init_hw:
- intel_uc_fini_hw(&dev_priv->gt.uc);
-err_uc_init:
- if (ret != -EIO) {
- intel_uc_fini(&dev_priv->gt.uc);
- intel_engines_cleanup(&dev_priv->gt);
- }
-err_context:
- if (ret != -EIO)
- i915_gem_driver_release__contexts(dev_priv);
-err_scratch:
- intel_gt_driver_release(&dev_priv->gt);
err_unlock:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ i915_gem_drain_workqueue(dev_priv);
if (ret != -EIO) {
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- intel_timelines_fini(dev_priv);
}
if (ret == -EIO) {
@@ -1388,27 +1177,24 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
i915_gem_suspend_late(dev_priv);
intel_gt_driver_remove(&dev_priv->gt);
+ dev_priv->uabi_engines = RB_ROOT;
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
- intel_uc_fini_hw(&dev_priv->gt.uc);
- intel_uc_fini(&dev_priv->gt.uc);
-
i915_gem_drain_freed_objects(dev_priv);
}
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- intel_engines_cleanup(&dev_priv->gt);
i915_gem_driver_release__contexts(dev_priv);
+
intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- intel_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -1430,6 +1216,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
i915_gem_init__mm(dev_priv);
+ i915_gem_init__contexts(dev_priv);
spin_lock_init(&dev_priv->fb_tracking.lock);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index f6f9675848b8..1753c84d6c0d 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -30,15 +30,18 @@
#include <drm/drm_drv.h>
+#include "i915_utils.h"
+
struct drm_i915_private;
#ifdef CONFIG_DRM_I915_DEBUG_GEM
-#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
+#define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER)
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE_DUMP(); \
BUG(); \
} \
} while(0)
@@ -68,9 +71,10 @@ struct drm_i915_private;
pr_err(__VA_ARGS__); \
trace_printk(__VA_ARGS__); \
} while (0)
-#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP() \
+ do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0)
#define GEM_TRACE_DUMP_ON(expr) \
- do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
+ do { if (expr) GEM_TRACE_DUMP(); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
#define GEM_TRACE_ERR(...) do { } while (0)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 7e62c310290f..0697bedebeef 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -359,9 +359,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
*/
int i915_gem_evict_vm(struct i915_address_space *vm)
{
- struct list_head eviction_list;
- struct i915_vma *vma, *next;
- int ret;
+ int ret = 0;
lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict_vm(vm);
@@ -377,21 +375,30 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
return ret;
}
- INIT_LIST_HEAD(&eviction_list);
- list_for_each_entry(vma, &vm->bound_list, vm_link) {
- if (i915_vma_is_pinned(vma))
- continue;
+ do {
+ struct i915_vma *vma, *vn;
+ LIST_HEAD(eviction_list);
- __i915_vma_pin(vma);
- list_add(&vma->evict_link, &eviction_list);
- }
+ list_for_each_entry(vma, &vm->bound_list, vm_link) {
+ if (i915_vma_is_pinned(vma))
+ continue;
+
+ __i915_vma_pin(vma);
+ list_add(&vma->evict_link, &eviction_list);
+ }
+ if (list_empty(&eviction_list))
+ break;
+
+ ret = 0;
+ list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
+ __i915_vma_unpin(vma);
+ if (ret == 0)
+ ret = __i915_vma_unbind(vma);
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
+ } while (ret == 0);
- ret = 0;
- list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
- __i915_vma_unpin(vma);
- if (ret == 0)
- ret = __i915_vma_unbind(vma);
- }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 71efccfde122..d9c34a23cd67 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -412,6 +412,9 @@ int i915_vma_pin_fence(struct i915_vma *vma)
{
int err;
+ if (!vma->fence && !i915_gem_object_is_tiled(vma->obj))
+ return 0;
+
/*
* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6239a9adbf14..e039eb56900f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1,26 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2010 Daniel Vetter
- * Copyright © 2011-2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2020 Intel Corporation
*/
#include <linux/slab.h> /* fault-inject.h is not standalone! */
@@ -45,2096 +26,6 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
-#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
-
-#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
-#define DBG(...) trace_printk(__VA_ARGS__)
-#else
-#define DBG(...)
-#endif
-
-/**
- * DOC: Global GTT views
- *
- * Background and previous state
- *
- * Historically objects could exists (be bound) in global GTT space only as
- * singular instances with a view representing all of the object's backing pages
- * in a linear fashion. This view will be called a normal view.
- *
- * To support multiple views of the same object, where the number of mapped
- * pages is not equal to the backing store, or where the layout of the pages
- * is not linear, concept of a GGTT view was added.
- *
- * One example of an alternative view is a stereo display driven by a single
- * image. In this case we would have a framebuffer looking like this
- * (2x2 pages):
- *
- * 12
- * 34
- *
- * Above would represent a normal GGTT view as normally mapped for GPU or CPU
- * rendering. In contrast, fed to the display engine would be an alternative
- * view which could look something like this:
- *
- * 1212
- * 3434
- *
- * In this example both the size and layout of pages in the alternative view is
- * different from the normal view.
- *
- * Implementation and usage
- *
- * GGTT views are implemented using VMAs and are distinguished via enum
- * i915_ggtt_view_type and struct i915_ggtt_view.
- *
- * A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
- * renaming in large amounts of code. They take the struct i915_ggtt_view
- * parameter encapsulating all metadata required to implement a view.
- *
- * As a helper for callers which are only interested in the normal view,
- * globally const i915_ggtt_view_normal singleton instance exists. All old core
- * GEM API functions, the ones not taking the view parameter, are operating on,
- * or with the normal GGTT view.
- *
- * Code wanting to add or use a new GGTT view needs to:
- *
- * 1. Add a new enum with a suitable name.
- * 2. Extend the metadata in the i915_ggtt_view structure if required.
- * 3. Add support to i915_get_vma_pages().
- *
- * New views are required to build a scatter-gather table from within the
- * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
- * exists for the lifetime of an VMA.
- *
- * Core API is designed to have copy semantics which means that passed in
- * struct i915_ggtt_view does not need to be persistent (left around after
- * calling the core API functions).
- *
- */
-
-#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
-static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- struct intel_uncore *uncore = ggtt->vm.gt->uncore;
-
- /*
- * Note that as an uncached mmio write, this will flush the
- * WCB of the writes into the GGTT before it triggers the invalidate.
- */
- intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-}
-
-static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- struct intel_uncore *uncore = ggtt->vm.gt->uncore;
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- gen6_ggtt_invalidate(ggtt);
-
- if (INTEL_GEN(i915) >= 12)
- intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
- GEN12_GUC_TLB_INV_CR_INVALIDATE);
- else
- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-}
-
-static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- intel_gtt_chipset_flush();
-}
-
-static int ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- u32 pte_flags;
- int err;
-
- if (flags & I915_VMA_ALLOC) {
- err = vma->vm->allocate_va_range(vma->vm,
- vma->node.start, vma->size);
- if (err)
- return err;
-
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
- }
-
- /* Applicable to VLV, and gen8+ */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
- pte_flags |= PTE_READ_ONLY;
-
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- wmb();
-
- return 0;
-}
-
-static void ppgtt_unbind_vma(struct i915_vma *vma)
-{
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-}
-
-static int ppgtt_set_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->pages);
-
- vma->pages = vma->obj->mm.pages;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
-static void clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- if (vma->pages != vma->obj->mm.pages) {
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
-}
-
-static u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
-
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC;
- break;
- default:
- pte |= PPAT_CACHED;
- break;
- }
-
- return pte;
-}
-
-static u64 gen8_pde_encode(const dma_addr_t addr,
- const enum i915_cache_level level)
-{
- u64 pde = _PAGE_PRESENT | _PAGE_RW;
- pde |= addr;
- if (level != I915_CACHE_NONE)
- pde |= PPAT_CACHED_PDE;
- else
- pde |= PPAT_UNCACHED;
- return pde;
-}
-
-static u64 snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- pte |= GEN7_PTE_CACHE_L3_LLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- if (!(flags & PTE_READ_ONLY))
- pte |= BYT_PTE_WRITEABLE;
-
- if (level != I915_CACHE_NONE)
- pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
-
- return pte;
-}
-
-static u64 hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= HSW_PTE_ADDR_ENCODE(addr);
-
- if (level != I915_CACHE_NONE)
- pte |= HSW_WB_LLC_AGE3;
-
- return pte;
-}
-
-static u64 iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= HSW_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_NONE:
- break;
- case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE3;
- break;
- default:
- pte |= HSW_WB_ELLC_LLC_AGE3;
- break;
- }
-
- return pte;
-}
-
-static void stash_init(struct pagestash *stash)
-{
- pagevec_init(&stash->pvec);
- spin_lock_init(&stash->lock);
-}
-
-static struct page *stash_pop_page(struct pagestash *stash)
-{
- struct page *page = NULL;
-
- spin_lock(&stash->lock);
- if (likely(stash->pvec.nr))
- page = stash->pvec.pages[--stash->pvec.nr];
- spin_unlock(&stash->lock);
-
- return page;
-}
-
-static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
-{
- unsigned int nr;
-
- spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
-
- nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
- memcpy(stash->pvec.pages + stash->pvec.nr,
- pvec->pages + pvec->nr - nr,
- sizeof(pvec->pages[0]) * nr);
- stash->pvec.nr += nr;
-
- spin_unlock(&stash->lock);
-
- pvec->nr -= nr;
-}
-
-static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
-{
- struct pagevec stack;
- struct page *page;
-
- if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
- i915_gem_shrink_all(vm->i915);
-
- page = stash_pop_page(&vm->free_pages);
- if (page)
- return page;
-
- if (!vm->pt_kmap_wc)
- return alloc_page(gfp);
-
- /* Look in our global stash of WC pages... */
- page = stash_pop_page(&vm->i915->mm.wc_stash);
- if (page)
- return page;
-
- /*
- * Otherwise batch allocate pages to amortize cost of set_pages_wc.
- *
- * We have to be careful as page allocation may trigger the shrinker
- * (via direct reclaim) which will fill up the WC stash underneath us.
- * So we add our WB pages into a temporary pvec on the stack and merge
- * them into the WC stash after all the allocations are complete.
- */
- pagevec_init(&stack);
- do {
- struct page *page;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- break;
-
- stack.pages[stack.nr++] = page;
- } while (pagevec_space(&stack));
-
- if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
- page = stack.pages[--stack.nr];
-
- /* Merge spare WC pages to the global stash */
- if (stack.nr)
- stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
-
- /* Push any surplus WC pages onto the local VM stash */
- if (stack.nr)
- stash_push_pagevec(&vm->free_pages, &stack);
- }
-
- /* Return unwanted leftovers */
- if (unlikely(stack.nr)) {
- WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
- __pagevec_release(&stack);
- }
-
- return page;
-}
-
-static void vm_free_pages_release(struct i915_address_space *vm,
- bool immediate)
-{
- struct pagevec *pvec = &vm->free_pages.pvec;
- struct pagevec stack;
-
- lockdep_assert_held(&vm->free_pages.lock);
- GEM_BUG_ON(!pagevec_count(pvec));
-
- if (vm->pt_kmap_wc) {
- /*
- * When we use WC, first fill up the global stash and then
- * only if full immediately free the overflow.
- */
- stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
-
- /*
- * As we have made some room in the VM's free_pages,
- * we can wait for it to fill again. Unless we are
- * inside i915_address_space_fini() and must
- * immediately release the pages!
- */
- if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
- return;
-
- /*
- * We have to drop the lock to allow ourselves to sleep,
- * so take a copy of the pvec and clear the stash for
- * others to use it as we sleep.
- */
- stack = *pvec;
- pagevec_reinit(pvec);
- spin_unlock(&vm->free_pages.lock);
-
- pvec = &stack;
- set_pages_array_wb(pvec->pages, pvec->nr);
-
- spin_lock(&vm->free_pages.lock);
- }
-
- __pagevec_release(pvec);
-}
-
-static void vm_free_page(struct i915_address_space *vm, struct page *page)
-{
- /*
- * On !llc, we need to change the pages back to WB. We only do so
- * in bulk, so we rarely need to change the page attributes here,
- * but doing so requires a stop_machine() from deep inside arch/x86/mm.
- * To make detection of the possible sleep more likely, use an
- * unconditional might_sleep() for everybody.
- */
- might_sleep();
- spin_lock(&vm->free_pages.lock);
- while (!pagevec_space(&vm->free_pages.pvec))
- vm_free_pages_release(vm, false);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
- pagevec_add(&vm->free_pages.pvec, page);
- spin_unlock(&vm->free_pages.lock);
-}
-
-static void i915_address_space_fini(struct i915_address_space *vm)
-{
- spin_lock(&vm->free_pages.lock);
- if (pagevec_count(&vm->free_pages.pvec))
- vm_free_pages_release(vm, true);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
- spin_unlock(&vm->free_pages.lock);
-
- drm_mm_takedown(&vm->mm);
-
- mutex_destroy(&vm->mutex);
-}
-
-void __i915_vm_close(struct i915_address_space *vm)
-{
- struct i915_vma *vma, *vn;
-
- mutex_lock(&vm->mutex);
- list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- /* Keep the obj (and hence the vma) alive as _we_ destroy it */
- if (!kref_get_unless_zero(&obj->base.refcount))
- continue;
-
- atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
- WARN_ON(__i915_vma_unbind(vma));
- i915_vma_destroy(vma);
-
- i915_gem_object_put(obj);
- }
- GEM_BUG_ON(!list_empty(&vm->bound_list));
- mutex_unlock(&vm->mutex);
-}
-
-static void __i915_vm_release(struct work_struct *work)
-{
- struct i915_address_space *vm =
- container_of(work, struct i915_address_space, rcu.work);
-
- vm->cleanup(vm);
- i915_address_space_fini(vm);
-
- kfree(vm);
-}
-
-void i915_vm_release(struct kref *kref)
-{
- struct i915_address_space *vm =
- container_of(kref, struct i915_address_space, ref);
-
- GEM_BUG_ON(i915_is_ggtt(vm));
- trace_i915_ppgtt_release(vm);
-
- queue_rcu_work(vm->i915->wq, &vm->rcu);
-}
-
-static void i915_address_space_init(struct i915_address_space *vm, int subclass)
-{
- kref_init(&vm->ref);
- INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
- atomic_set(&vm->open, 1);
-
- /*
- * The vm->mutex must be reclaim safe (for use in the shrinker).
- * Do a dummy acquire now under fs_reclaim so that any allocation
- * attempt holding the lock is immediately reported by lockdep.
- */
- mutex_init(&vm->mutex);
- lockdep_set_subclass(&vm->mutex, subclass);
- i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
-
- GEM_BUG_ON(!vm->total);
- drm_mm_init(&vm->mm, 0, vm->total);
- vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
-
- stash_init(&vm->free_pages);
-
- INIT_LIST_HEAD(&vm->bound_list);
-}
-
-static int __setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- gfp_t gfp)
-{
- p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
- if (unlikely(!p->page))
- return -ENOMEM;
-
- p->daddr = dma_map_page_attrs(vm->dma,
- p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_WARN);
- if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
- vm_free_page(vm, p->page);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p)
-{
- return __setup_page_dma(vm, p, __GFP_HIGHMEM);
-}
-
-static void cleanup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p)
-{
- dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- vm_free_page(vm, p->page);
-}
-
-#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
-
-static void
-fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
-{
- kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
-}
-
-#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
-#define fill32_px(px, v) do { \
- u64 v__ = lower_32_bits(v); \
- fill_px((px), v__ << 32 | v__); \
-} while (0)
-
-static int
-setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
-{
- unsigned long size;
-
- /*
- * In order to utilize 64K pages for an object with a size < 2M, we will
- * need to support a 64K scratch page, given that every 16th entry for a
- * page-table operating in 64K mode must point to a properly aligned 64K
- * region, including any PTEs which happen to point to scratch.
- *
- * This is only relevant for the 48b PPGTT where we support
- * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
- * scratch (read-only) between all vm, we create one 64k scratch page
- * for all.
- */
- size = I915_GTT_PAGE_SIZE_4K;
- if (i915_vm_is_4lvl(vm) &&
- HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
- size = I915_GTT_PAGE_SIZE_64K;
- gfp |= __GFP_NOWARN;
- }
- gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
-
- do {
- unsigned int order = get_order(size);
- struct page *page;
- dma_addr_t addr;
-
- page = alloc_pages(gfp, order);
- if (unlikely(!page))
- goto skip;
-
- addr = dma_map_page_attrs(vm->dma,
- page, 0, size,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_WARN);
- if (unlikely(dma_mapping_error(vm->dma, addr)))
- goto free_page;
-
- if (unlikely(!IS_ALIGNED(addr, size)))
- goto unmap_page;
-
- vm->scratch[0].base.page = page;
- vm->scratch[0].base.daddr = addr;
- vm->scratch_order = order;
- return 0;
-
-unmap_page:
- dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
-free_page:
- __free_pages(page, order);
-skip:
- if (size == I915_GTT_PAGE_SIZE_4K)
- return -ENOMEM;
-
- size = I915_GTT_PAGE_SIZE_4K;
- gfp &= ~__GFP_NOWARN;
- } while (1);
-}
-
-static void cleanup_scratch_page(struct i915_address_space *vm)
-{
- struct i915_page_dma *p = px_base(&vm->scratch[0]);
- unsigned int order = vm->scratch_order;
-
- dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
- PCI_DMA_BIDIRECTIONAL);
- __free_pages(p->page, order);
-}
-
-static void free_scratch(struct i915_address_space *vm)
-{
- int i;
-
- if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
- return;
-
- for (i = 1; i <= vm->top; i++) {
- if (!px_dma(&vm->scratch[i]))
- break;
- cleanup_page_dma(vm, px_base(&vm->scratch[i]));
- }
-
- cleanup_scratch_page(vm);
-}
-
-static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
-{
- struct i915_page_table *pt;
-
- pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
- if (unlikely(!pt))
- return ERR_PTR(-ENOMEM);
-
- if (unlikely(setup_page_dma(vm, &pt->base))) {
- kfree(pt);
- return ERR_PTR(-ENOMEM);
- }
-
- atomic_set(&pt->used, 0);
- return pt;
-}
-
-static struct i915_page_directory *__alloc_pd(size_t sz)
-{
- struct i915_page_directory *pd;
-
- pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
- if (unlikely(!pd))
- return NULL;
-
- spin_lock_init(&pd->lock);
- return pd;
-}
-
-static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
-{
- struct i915_page_directory *pd;
-
- pd = __alloc_pd(sizeof(*pd));
- if (unlikely(!pd))
- return ERR_PTR(-ENOMEM);
-
- if (unlikely(setup_page_dma(vm, px_base(pd)))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
-
- return pd;
-}
-
-static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
-{
- cleanup_page_dma(vm, pd);
- kfree(pd);
-}
-
-#define free_px(vm, px) free_pd(vm, px_base(px))
-
-static inline void
-write_dma_entry(struct i915_page_dma * const pdma,
- const unsigned short idx,
- const u64 encoded_entry)
-{
- u64 * const vaddr = kmap_atomic(pdma->page);
-
- vaddr[idx] = encoded_entry;
- kunmap_atomic(vaddr);
-}
-
-static inline void
-__set_pd_entry(struct i915_page_directory * const pd,
- const unsigned short idx,
- struct i915_page_dma * const to,
- u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
-{
- /* Each thread pre-pins the pd, and we may have a thread per pde. */
- GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry));
-
- atomic_inc(px_used(pd));
- pd->entry[idx] = to;
- write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
-}
-
-#define set_pd_entry(pd, idx, to) \
- __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
-
-static inline void
-clear_pd_entry(struct i915_page_directory * const pd,
- const unsigned short idx,
- const struct i915_page_scratch * const scratch)
-{
- GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
-
- write_dma_entry(px_base(pd), idx, scratch->encode);
- pd->entry[idx] = NULL;
- atomic_dec(px_used(pd));
-}
-
-static bool
-release_pd_entry(struct i915_page_directory * const pd,
- const unsigned short idx,
- struct i915_page_table * const pt,
- const struct i915_page_scratch * const scratch)
-{
- bool free = false;
-
- if (atomic_add_unless(&pt->used, -1, 1))
- return false;
-
- spin_lock(&pd->lock);
- if (atomic_dec_and_test(&pt->used)) {
- clear_pd_entry(pd, idx, scratch);
- free = true;
- }
- spin_unlock(&pd->lock);
-
- return free;
-}
-
-static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
-{
- struct drm_i915_private *dev_priv = ppgtt->vm.i915;
- enum vgt_g2v_type msg;
- int i;
-
- if (create)
- atomic_inc(px_used(ppgtt->pd)); /* never remove */
- else
- atomic_dec(px_used(ppgtt->pd));
-
- mutex_lock(&dev_priv->vgpu.lock);
-
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- const u64 daddr = px_dma(ppgtt->pd);
-
- I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
-
- msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
- } else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++) {
- const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
- }
-
- msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
- }
-
- /* g2v_notify atomically (via hv trap) consumes the message packet. */
- I915_WRITE(vgtif_reg(g2v_notify), msg);
-
- mutex_unlock(&dev_priv->vgpu.lock);
-}
-
-/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
-#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
-#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
-#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
-#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
-#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
-#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
-#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
-
-static inline unsigned int
-gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
-{
- const int shift = gen8_pd_shift(lvl);
- const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
-
- GEM_BUG_ON(start >= end);
- end += ~mask >> gen8_pd_shift(1);
-
- *idx = i915_pde_index(start, shift);
- if ((start ^ end) & mask)
- return GEN8_PDES - *idx;
- else
- return i915_pde_index(end, shift) - *idx;
-}
-
-static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
-{
- const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
-
- GEM_BUG_ON(start >= end);
- return (start ^ end) & mask && (start & ~mask) == 0;
-}
-
-static inline unsigned int gen8_pt_count(u64 start, u64 end)
-{
- GEM_BUG_ON(start >= end);
- if ((start ^ end) >> gen8_pd_shift(1))
- return GEN8_PDES - (start & (GEN8_PDES - 1));
- else
- return end - start;
-}
-
-static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
-{
- unsigned int shift = __gen8_pte_shift(vm->top);
- return (vm->total + (1ull << shift) - 1) >> shift;
-}
-
-static inline struct i915_page_directory *
-gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
-{
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
-
- if (vm->top == 2)
- return ppgtt->pd;
- else
- return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
-}
-
-static inline struct i915_page_directory *
-gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
-{
- return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
-}
-
-static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- int count, int lvl)
-{
- if (lvl) {
- void **pde = pd->entry;
-
- do {
- if (!*pde)
- continue;
-
- __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
- } while (pde++, --count);
- }
-
- free_px(vm, pd);
-}
-
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-
- if (intel_vgpu_active(vm->i915))
- gen8_ppgtt_notify_vgt(ppgtt, false);
-
- __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
- free_scratch(vm);
-}
-
-static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
- struct i915_page_directory * const pd,
- u64 start, const u64 end, int lvl)
-{
- const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
- unsigned int idx, len;
-
- GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
-
- len = gen8_pd_range(start, end, lvl--, &idx);
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
- __func__, vm, lvl + 1, start, end,
- idx, len, atomic_read(px_used(pd)));
- GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
-
- do {
- struct i915_page_table *pt = pd->entry[idx];
-
- if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
- gen8_pd_contains(start, end, lvl)) {
- DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
- __func__, vm, lvl + 1, idx, start, end);
- clear_pd_entry(pd, idx, scratch);
- __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
- start += (u64)I915_PDES << gen8_pd_shift(lvl);
- continue;
- }
-
- if (lvl) {
- start = __gen8_ppgtt_clear(vm, as_pd(pt),
- start, end, lvl);
- } else {
- unsigned int count;
- u64 *vaddr;
-
- count = gen8_pt_count(start, end);
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
- __func__, vm, lvl, start, end,
- gen8_pd_index(start, 0), count,
- atomic_read(&pt->used));
- GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
-
- vaddr = kmap_atomic_px(pt);
- memset64(vaddr + gen8_pd_index(start, 0),
- vm->scratch[0].encode,
- count);
- kunmap_atomic(vaddr);
-
- atomic_sub(count, &pt->used);
- start += count;
- }
-
- if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
- } while (idx++, --len);
-
- return start;
-}
-
-static void gen8_ppgtt_clear(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
- GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
- GEM_BUG_ON(range_overflows(start, length, vm->total));
-
- start >>= GEN8_PTE_SHIFT;
- length >>= GEN8_PTE_SHIFT;
- GEM_BUG_ON(length == 0);
-
- __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
- start, start + length, vm->top);
-}
-
-static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
- struct i915_page_directory * const pd,
- u64 * const start, const u64 end, int lvl)
-{
- const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
- struct i915_page_table *alloc = NULL;
- unsigned int idx, len;
- int ret = 0;
-
- GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
-
- len = gen8_pd_range(*start, end, lvl--, &idx);
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
- __func__, vm, lvl + 1, *start, end,
- idx, len, atomic_read(px_used(pd)));
- GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
-
- spin_lock(&pd->lock);
- GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
- do {
- struct i915_page_table *pt = pd->entry[idx];
-
- if (!pt) {
- spin_unlock(&pd->lock);
-
- DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
- __func__, vm, lvl + 1, idx);
-
- pt = fetch_and_zero(&alloc);
- if (lvl) {
- if (!pt) {
- pt = &alloc_pd(vm)->pt;
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto out;
- }
- }
-
- fill_px(pt, vm->scratch[lvl].encode);
- } else {
- if (!pt) {
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto out;
- }
- }
-
- if (intel_vgpu_active(vm->i915) ||
- gen8_pt_count(*start, end) < I915_PDES)
- fill_px(pt, vm->scratch[lvl].encode);
- }
-
- spin_lock(&pd->lock);
- if (likely(!pd->entry[idx]))
- set_pd_entry(pd, idx, pt);
- else
- alloc = pt, pt = pd->entry[idx];
- }
-
- if (lvl) {
- atomic_inc(&pt->used);
- spin_unlock(&pd->lock);
-
- ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
- start, end, lvl);
- if (unlikely(ret)) {
- if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
- goto out;
- }
-
- spin_lock(&pd->lock);
- atomic_dec(&pt->used);
- GEM_BUG_ON(!atomic_read(&pt->used));
- } else {
- unsigned int count = gen8_pt_count(*start, end);
-
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
- __func__, vm, lvl, *start, end,
- gen8_pd_index(*start, 0), count,
- atomic_read(&pt->used));
-
- atomic_add(count, &pt->used);
- /* All other pdes may be simultaneously removed */
- GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES);
- *start += count;
- }
- } while (idx++, --len);
- spin_unlock(&pd->lock);
-out:
- if (alloc)
- free_px(vm, alloc);
- return ret;
-}
-
-static int gen8_ppgtt_alloc(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- u64 from;
- int err;
-
- GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
- GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
- GEM_BUG_ON(range_overflows(start, length, vm->total));
-
- start >>= GEN8_PTE_SHIFT;
- length >>= GEN8_PTE_SHIFT;
- GEM_BUG_ON(length == 0);
- from = start;
-
- err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
- &start, start + length, vm->top);
- if (unlikely(err && from != start))
- __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
- from, start, vm->top);
-
- return err;
-}
-
-static inline struct sgt_dma {
- struct scatterlist *sg;
- dma_addr_t dma, max;
-} sgt_dma(struct i915_vma *vma) {
- struct scatterlist *sg = vma->pages->sgl;
- dma_addr_t addr = sg_dma_address(sg);
- return (struct sgt_dma) { sg, addr, addr + sg->length };
-}
-
-static __always_inline u64
-gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
- struct i915_page_directory *pdp,
- struct sgt_dma *iter,
- u64 idx,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
- gen8_pte_t *vaddr;
-
- pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
- do {
- vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
-
- iter->dma += I915_GTT_PAGE_SIZE;
- if (iter->dma >= iter->max) {
- iter->sg = __sg_next(iter->sg);
- if (!iter->sg) {
- idx = 0;
- break;
- }
-
- iter->dma = sg_dma_address(iter->sg);
- iter->max = iter->dma + iter->sg->length;
- }
-
- if (gen8_pd_index(++idx, 0) == 0) {
- if (gen8_pd_index(idx, 1) == 0) {
- /* Limited by sg length for 3lvl */
- if (gen8_pd_index(idx, 2) == 0)
- break;
-
- pd = pdp->entry[gen8_pd_index(idx, 2)];
- }
-
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
- }
- } while (1);
- kunmap_atomic(vaddr);
-
- return idx;
-}
-
-static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
- struct sgt_dma *iter,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
- u64 start = vma->node.start;
- dma_addr_t rem = iter->sg->length;
-
- GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
-
- do {
- struct i915_page_directory * const pdp =
- gen8_pdp_for_page_address(vma->vm, start);
- struct i915_page_directory * const pd =
- i915_pd_entry(pdp, __gen8_pte_index(start, 2));
- gen8_pte_t encode = pte_encode;
- unsigned int maybe_64K = -1;
- unsigned int page_size;
- gen8_pte_t *vaddr;
- u16 index;
-
- if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
- IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
- rem >= I915_GTT_PAGE_SIZE_2M &&
- !__gen8_pte_index(start, 0)) {
- index = __gen8_pte_index(start, 1);
- encode |= GEN8_PDE_PS_2M;
- page_size = I915_GTT_PAGE_SIZE_2M;
-
- vaddr = kmap_atomic_px(pd);
- } else {
- struct i915_page_table *pt =
- i915_pt_entry(pd, __gen8_pte_index(start, 1));
-
- index = __gen8_pte_index(start, 0);
- page_size = I915_GTT_PAGE_SIZE;
-
- if (!index &&
- vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
- IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
- (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
- maybe_64K = __gen8_pte_index(start, 1);
-
- vaddr = kmap_atomic_px(pt);
- }
-
- do {
- GEM_BUG_ON(iter->sg->length < page_size);
- vaddr[index++] = encode | iter->dma;
-
- start += page_size;
- iter->dma += page_size;
- rem -= page_size;
- if (iter->dma >= iter->max) {
- iter->sg = __sg_next(iter->sg);
- if (!iter->sg)
- break;
-
- rem = iter->sg->length;
- iter->dma = sg_dma_address(iter->sg);
- iter->max = iter->dma + rem;
-
- if (maybe_64K != -1 && index < I915_PDES &&
- !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
- (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
- maybe_64K = -1;
-
- if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
- break;
- }
- } while (rem >= page_size && index < I915_PDES);
-
- kunmap_atomic(vaddr);
-
- /*
- * Is it safe to mark the 2M block as 64K? -- Either we have
- * filled whole page-table with 64K entries, or filled part of
- * it and have reached the end of the sg table and we have
- * enough padding.
- */
- if (maybe_64K != -1 &&
- (index == I915_PDES ||
- (i915_vm_has_scratch_64K(vma->vm) &&
- !iter->sg && IS_ALIGNED(vma->node.start +
- vma->node.size,
- I915_GTT_PAGE_SIZE_2M)))) {
- vaddr = kmap_atomic_px(pd);
- vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- kunmap_atomic(vaddr);
- page_size = I915_GTT_PAGE_SIZE_64K;
-
- /*
- * We write all 4K page entries, even when using 64K
- * pages. In order to verify that the HW isn't cheating
- * by using the 4K PTE instead of the 64K PTE, we want
- * to remove all the surplus entries. If the HW skipped
- * the 64K PTE, it will read/write into the scratch page
- * instead - which we detect as missing results during
- * selftests.
- */
- if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
- u16 i;
-
- encode = vma->vm->scratch[0].encode;
- vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
-
- for (i = 1; i < index; i += 16)
- memset64(vaddr + i, encode, 15);
-
- kunmap_atomic(vaddr);
- }
- }
-
- vma->page_sizes.gtt |= page_size;
- } while (iter->sg);
-}
-
-static void gen8_ppgtt_insert(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
-
- if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
- } else {
- u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
-
- do {
- struct i915_page_directory * const pdp =
- gen8_pdp_for_page_index(vm, idx);
-
- idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
- cache_level, flags);
- } while (idx);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
- }
-}
-
-static int gen8_init_scratch(struct i915_address_space *vm)
-{
- int ret;
- int i;
-
- /*
- * If everybody agrees to not to write into the scratch page,
- * we can reuse it for all vm, keeping contexts and processes separate.
- */
- if (vm->has_read_only &&
- vm->i915->kernel_context &&
- vm->i915->kernel_context->vm) {
- struct i915_address_space *clone =
- rcu_dereference_protected(vm->i915->kernel_context->vm,
- true); /* static */
-
- GEM_BUG_ON(!clone->has_read_only);
-
- vm->scratch_order = clone->scratch_order;
- memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
- px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
- return 0;
- }
-
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
- if (ret)
- return ret;
-
- vm->scratch[0].encode =
- gen8_pte_encode(px_dma(&vm->scratch[0]),
- I915_CACHE_LLC, vm->has_read_only);
-
- for (i = 1; i <= vm->top; i++) {
- if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
- goto free_scratch;
-
- fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
- vm->scratch[i].encode =
- gen8_pde_encode(px_dma(&vm->scratch[i]),
- I915_CACHE_LLC);
- }
-
- return 0;
-
-free_scratch:
- free_scratch(vm);
- return -ENOMEM;
-}
-
-static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
-{
- struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory *pd = ppgtt->pd;
- unsigned int idx;
-
- GEM_BUG_ON(vm->top != 2);
- GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
-
- for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
- struct i915_page_directory *pde;
-
- pde = alloc_pd(vm);
- if (IS_ERR(pde))
- return PTR_ERR(pde);
-
- fill_px(pde, vm->scratch[1].encode);
- set_pd_entry(pd, idx, pde);
- atomic_inc(px_used(pde)); /* keep pinned */
- }
- wmb();
-
- return 0;
-}
-
-static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- ppgtt->vm.gt = gt;
- ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
-
- i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-
- ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
-}
-
-static struct i915_page_directory *
-gen8_alloc_top_pd(struct i915_address_space *vm)
-{
- const unsigned int count = gen8_pd_top_count(vm);
- struct i915_page_directory *pd;
-
- GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
-
- pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
- if (unlikely(!pd))
- return ERR_PTR(-ENOMEM);
-
- if (unlikely(setup_page_dma(vm, px_base(pd)))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
-
- fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
- atomic_inc(px_used(pd)); /* mark as pinned */
- return pd;
-}
-
-/*
- * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
- * with a net effect resembling a 2-level page table in normal x86 terms. Each
- * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
- * space.
- *
- */
-static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ppgtt *ppgtt;
- int err;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
-
- ppgtt_init(ppgtt, &i915->gt);
- ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
-
- /*
- * From bdw, there is hw support for read-only pages in the PPGTT.
- *
- * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
- * for now.
- *
- * Gen12 has inherited the same read-only fault issue from gen11.
- */
- ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12);
-
- /* There are only few exceptions for gen >=6. chv and bxt.
- * And we are not sure about the latter so play safe for now.
- */
- if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
- ppgtt->vm.pt_kmap_wc = true;
-
- err = gen8_init_scratch(&ppgtt->vm);
- if (err)
- goto err_free;
-
- ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
- if (IS_ERR(ppgtt->pd)) {
- err = PTR_ERR(ppgtt->pd);
- goto err_free_scratch;
- }
-
- if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
-
- ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
- ppgtt->vm.insert_entries = gen8_ppgtt_insert;
- ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
- ppgtt->vm.clear_range = gen8_ppgtt_clear;
-
- if (intel_vgpu_active(i915))
- gen8_ppgtt_notify_vgt(ppgtt, true);
-
- ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
- return ppgtt;
-
-err_free_pd:
- __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
- gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
-err_free_scratch:
- free_scratch(&ppgtt->vm);
-err_free:
- kfree(ppgtt);
- return ERR_PTR(err);
-}
-
-/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
- const unsigned int pde,
- const struct i915_page_table *pt)
-{
- /* Caller needs to make sure the write completes if necessary */
- iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
- ppgtt->pd_addr + pde);
-}
-
-static void gen7_ppgtt_enable(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 ecochk;
-
- intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
-
- ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
- if (IS_HASWELL(i915)) {
- ecochk |= ECOCHK_PPGTT_WB_HSW;
- } else {
- ecochk |= ECOCHK_PPGTT_LLC_IVB;
- ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
- }
- intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
-
- for_each_engine(engine, gt, id) {
- /* GFX_MODE is per-ring on gen7+ */
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
-}
-
-static void gen6_ppgtt_enable(struct intel_gt *gt)
-{
- struct intel_uncore *uncore = gt->uncore;
-
- intel_uncore_rmw(uncore,
- GAC_ECO_BITS,
- 0,
- ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
-
- intel_uncore_rmw(uncore,
- GAB_CTL,
- 0,
- GAB_CTL_CONT_AFTER_PAGEFAULT);
-
- intel_uncore_rmw(uncore,
- GAM_ECOCHK,
- 0,
- ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
-
- if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
- intel_uncore_write(uncore,
- GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-}
-
-/* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = vm->scratch[0].encode;
- unsigned int pde = first_entry / GEN6_PTES;
- unsigned int pte = first_entry % GEN6_PTES;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-
- while (num_entries) {
- struct i915_page_table * const pt =
- i915_pt_entry(ppgtt->base.pd, pde++);
- const unsigned int count = min(num_entries, GEN6_PTES - pte);
- gen6_pte_t *vaddr;
-
- GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
-
- num_entries -= count;
-
- GEM_BUG_ON(count > atomic_read(&pt->used));
- if (!atomic_sub_return(count, &pt->used))
- ppgtt->scan_for_unused_pt = true;
-
- /*
- * Note that the hw doesn't support removing PDE on the fly
- * (they are cached inside the context with no means to
- * invalidate the cache), so we can only reset the PTE
- * entries back to scratch.
- */
-
- vaddr = kmap_atomic_px(pt);
- memset32(vaddr + pte, scratch_pte, count);
- kunmap_atomic(vaddr);
-
- pte = 0;
- }
-}
-
-static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pd = ppgtt->pd;
- unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
- unsigned act_pt = first_entry / GEN6_PTES;
- unsigned act_pte = first_entry % GEN6_PTES;
- const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter = sgt_dma(vma);
- gen6_pte_t *vaddr;
-
- GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
-
- vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
- do {
- vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
-
- iter.dma += I915_GTT_PAGE_SIZE;
- if (iter.dma == iter.max) {
- iter.sg = __sg_next(iter.sg);
- if (!iter.sg)
- break;
-
- iter.dma = sg_dma_address(iter.sg);
- iter.max = iter.dma + iter.sg->length;
- }
-
- if (++act_pte == GEN6_PTES) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
- act_pte = 0;
- }
- } while (1);
- kunmap_atomic(vaddr);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-}
-
-static int gen6_alloc_va_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *pt, *alloc = NULL;
- intel_wakeref_t wakeref;
- u64 from = start;
- unsigned int pde;
- bool flush = false;
- int ret = 0;
-
- wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
- spin_lock(&pd->lock);
- gen6_for_each_pde(pt, pd, start, length, pde) {
- const unsigned int count = gen6_pte_count(start, length);
-
- if (px_base(pt) == px_base(&vm->scratch[1])) {
- spin_unlock(&pd->lock);
-
- pt = fetch_and_zero(&alloc);
- if (!pt)
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto unwind_out;
- }
-
- fill32_px(pt, vm->scratch[0].encode);
-
- spin_lock(&pd->lock);
- if (pd->entry[pde] == &vm->scratch[1]) {
- pd->entry[pde] = pt;
- if (i915_vma_is_bound(ppgtt->vma,
- I915_VMA_GLOBAL_BIND)) {
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
- }
- } else {
- alloc = pt;
- pt = pd->entry[pde];
- }
- }
-
- atomic_add(count, &pt->used);
- }
- spin_unlock(&pd->lock);
-
- if (flush)
- gen6_ggtt_invalidate(vm->gt->ggtt);
-
- goto out;
-
-unwind_out:
- gen6_ppgtt_clear_range(vm, from, start - from);
-out:
- if (alloc)
- free_px(vm, alloc);
- intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
- return ret;
-}
-
-static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
-{
- struct i915_address_space * const vm = &ppgtt->base.vm;
- struct i915_page_directory * const pd = ppgtt->base.pd;
- int ret;
-
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
- if (ret)
- return ret;
-
- vm->scratch[0].encode =
- vm->pte_encode(px_dma(&vm->scratch[0]),
- I915_CACHE_NONE, PTE_READ_ONLY);
-
- if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
- cleanup_scratch_page(vm);
- return -ENOMEM;
- }
-
- fill32_px(&vm->scratch[1], vm->scratch[0].encode);
- memset_p(pd->entry, &vm->scratch[1], I915_PDES);
-
- return 0;
-}
-
-static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
-{
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_dma * const scratch =
- px_base(&ppgtt->base.vm.scratch[1]);
- struct i915_page_table *pt;
- u32 pde;
-
- gen6_for_all_pdes(pt, pd, pde)
- if (px_base(pt) != scratch)
- free_px(&ppgtt->base.vm, pt);
-}
-
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
-
- i915_vma_destroy(ppgtt->vma);
-
- gen6_ppgtt_free_pd(ppgtt);
- free_scratch(vm);
-
- mutex_destroy(&ppgtt->pin_mutex);
- kfree(ppgtt->base.pd);
-}
-
-static int pd_vma_set_pages(struct i915_vma *vma)
-{
- vma->pages = ERR_PTR(-ENODEV);
- return 0;
-}
-
-static void pd_vma_clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- vma->pages = NULL;
-}
-
-static int pd_vma_bind(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
- struct gen6_ppgtt *ppgtt = vma->private;
- u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
- struct i915_page_table *pt;
- unsigned int pde;
-
- px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
-
- gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
- gen6_write_pde(ppgtt, pde, pt);
-
- gen6_ggtt_invalidate(ggtt);
-
- return 0;
-}
-
-static void pd_vma_unbind(struct i915_vma *vma)
-{
- struct gen6_ppgtt *ppgtt = vma->private;
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_dma * const scratch =
- px_base(&ppgtt->base.vm.scratch[1]);
- struct i915_page_table *pt;
- unsigned int pde;
-
- if (!ppgtt->scan_for_unused_pt)
- return;
-
- /* Free all no longer used page tables */
- gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
- if (px_base(pt) == scratch || atomic_read(&pt->used))
- continue;
-
- free_px(&ppgtt->base.vm, pt);
- pd->entry[pde] = scratch;
- }
-
- ppgtt->scan_for_unused_pt = false;
-}
-
-static const struct i915_vma_ops pd_vma_ops = {
- .set_pages = pd_vma_set_pages,
- .clear_pages = pd_vma_clear_pages,
- .bind_vma = pd_vma_bind,
- .unbind_vma = pd_vma_unbind,
-};
-
-static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
-{
- struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(size > ggtt->vm.total);
-
- vma = i915_vma_alloc();
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- i915_active_init(&vma->active, NULL, NULL);
-
- mutex_init(&vma->pages_mutex);
- vma->vm = i915_vm_get(&ggtt->vm);
- vma->ops = &pd_vma_ops;
- vma->private = ppgtt;
-
- vma->size = size;
- vma->fence_size = size;
- atomic_set(&vma->flags, I915_VMA_GGTT);
- vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
-
- INIT_LIST_HEAD(&vma->obj_link);
- INIT_LIST_HEAD(&vma->closed_link);
-
- return vma;
-}
-
-int gen6_ppgtt_pin(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- int err = 0;
-
- GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
-
- /*
- * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
- * which will be pinned into every active context.
- * (When vma->pin_count becomes atomic, I expect we will naturally
- * need a larger, unpacked, type and kill this redundancy.)
- */
- if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
- return 0;
-
- if (mutex_lock_interruptible(&ppgtt->pin_mutex))
- return -EINTR;
-
- /*
- * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
- * allocator works in address space sizes, so it's multiplied by page
- * size. We allocate at the top of the GTT to avoid fragmentation.
- */
- if (!atomic_read(&ppgtt->pin_count)) {
- err = i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
- }
- if (!err)
- atomic_inc(&ppgtt->pin_count);
- mutex_unlock(&ppgtt->pin_mutex);
-
- return err;
-}
-
-void gen6_ppgtt_unpin(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-
- GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
- if (atomic_dec_and_test(&ppgtt->pin_count))
- i915_vma_unpin(ppgtt->vma);
-}
-
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-
- if (!atomic_read(&ppgtt->pin_count))
- return;
-
- i915_vma_unpin(ppgtt->vma);
- atomic_set(&ppgtt->pin_count, 0);
-}
-
-static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ggtt * const ggtt = &i915->ggtt;
- struct gen6_ppgtt *ppgtt;
- int err;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&ppgtt->pin_mutex);
-
- ppgtt_init(&ppgtt->base, &i915->gt);
- ppgtt->base.vm.top = 1;
-
- ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
- ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
- ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
-
- ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
-
- ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
- if (!ppgtt->base.pd) {
- err = -ENOMEM;
- goto err_free;
- }
-
- err = gen6_ppgtt_init_scratch(ppgtt);
- if (err)
- goto err_pd;
-
- ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
- if (IS_ERR(ppgtt->vma)) {
- err = PTR_ERR(ppgtt->vma);
- goto err_scratch;
- }
-
- return &ppgtt->base;
-
-err_scratch:
- free_scratch(&ppgtt->base.vm);
-err_pd:
- kfree(ppgtt->base.pd);
-err_free:
- kfree(ppgtt);
- return ERR_PTR(err);
-}
-
-static void gtt_write_workarounds(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_uncore *uncore = gt->uncore;
-
- /* This function is for gtt related workarounds. This function is
- * called on driver load and after a GPU reset, so you can place
- * workarounds here even if they get overwritten by GPU reset.
- */
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
- if (IS_BROADWELL(i915))
- intel_uncore_write(uncore,
- GEN8_L3_LRA_1_GPGPU,
- GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(i915))
- intel_uncore_write(uncore,
- GEN8_L3_LRA_1_GPGPU,
- GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_LP(i915))
- intel_uncore_write(uncore,
- GEN8_L3_LRA_1_GPGPU,
- GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
- intel_uncore_write(uncore,
- GEN8_L3_LRA_1_GPGPU,
- GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
-
- /*
- * To support 64K PTEs we need to first enable the use of the
- * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
- * mmio, otherwise the page-walker will simply ignore the IPS bit. This
- * shouldn't be needed after GEN10.
- *
- * 64K pages were first introduced from BDW+, although technically they
- * only *work* from gen9+. For pre-BDW we instead have the option for
- * 32K pages, but we don't currently have any support for it in our
- * driver.
- */
- if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
- INTEL_GEN(i915) <= 10)
- intel_uncore_rmw(uncore,
- GEN8_GAMW_ECO_DEV_RW_IA,
- 0,
- GAMW_ECO_ENABLE_64K_IPS_FIELD);
-
- if (IS_GEN_RANGE(i915, 8, 11)) {
- bool can_use_gtt_cache = true;
-
- /*
- * According to the BSpec if we use 2M/1G pages then we also
- * need to disable the GTT cache. At least on BDW we can see
- * visual corruption when using 2M pages, and not disabling the
- * GTT cache.
- */
- if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
- can_use_gtt_cache = false;
-
- /* WaGttCachingOffByDefault */
- intel_uncore_write(uncore,
- HSW_GTT_CACHE_EN,
- can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
- WARN_ON_ONCE(can_use_gtt_cache &&
- intel_uncore_read(uncore,
- HSW_GTT_CACHE_EN) == 0);
- }
-}
-
-int i915_ppgtt_init_hw(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- gtt_write_workarounds(gt);
-
- if (IS_GEN(i915, 6))
- gen6_ppgtt_enable(gt);
- else if (IS_GEN(i915, 7))
- gen7_ppgtt_enable(gt);
-
- return 0;
-}
-
-static struct i915_ppgtt *
-__ppgtt_create(struct drm_i915_private *i915)
-{
- if (INTEL_GEN(i915) < 8)
- return gen6_ppgtt_create(i915);
- else
- return gen8_ppgtt_create(i915);
-}
-
-struct i915_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ppgtt *ppgtt;
-
- ppgtt = __ppgtt_create(i915);
- if (IS_ERR(ppgtt))
- return ppgtt;
-
- trace_i915_ppgtt_create(&ppgtt->vm);
-
- return ppgtt;
-}
-
-/* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static bool needs_idle_maps(struct drm_i915_private *dev_priv)
-{
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
-}
-
-static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- /* Don't bother messing with faults pre GEN6 as we have little
- * documentation supporting that it's a good idea.
- */
- if (INTEL_GEN(i915) < 6)
- return;
-
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
-
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
- ggtt->invalidate(ggtt);
-}
-
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
-{
- ggtt_suspend_mappings(&i915->ggtt);
-}
-
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -2161,368 +52,6 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
return -ENOSPC;
}
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void gen8_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
-
- ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct sgt_iter sgt_iter;
- gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
- dma_addr_t addr;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
- gen8_set_pte(gtt_entries++, pte_encode | addr);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void gen6_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- iowrite32(vm->pte_encode(addr, level, flags), pte);
-
- ggtt->invalidate(ggtt);
-}
-
-/*
- * Binds an object into the global gtt with the specified cache level. The object
- * will be accessible to the GPU via commands whose operands reference offsets
- * within the global GTT as well as accessible by the GPU through the GMADR
- * mapped BAR (dev_priv->mm.gtt->gtt).
- */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
- struct sgt_iter iter;
- dma_addr_t addr;
- for_each_sgt_daddr(addr, iter, vma->pages)
- iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void nop_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0].encode;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
-static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
-{
- struct drm_i915_private *dev_priv = vm->i915;
-
- /*
- * Make sure the internal GAM fifo has been cleared of all GTT
- * writes before exiting stop_machine(). This guarantees that
- * any aperture accesses waiting to start in another process
- * cannot back up behind the GTT writes causing a hang.
- * The register can be any arbitrary GAM register.
- */
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
-}
-
-struct insert_page {
- struct i915_address_space *vm;
- dma_addr_t addr;
- u64 offset;
- enum i915_cache_level level;
-};
-
-static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
-{
- struct insert_page *arg = _arg;
-
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct insert_page arg = { vm, addr, offset, level };
-
- stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
-}
-
-struct insert_entries {
- struct i915_address_space *vm;
- struct i915_vma *vma;
- enum i915_cache_level level;
- u32 flags;
-};
-
-static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
-{
- struct insert_entries *arg = _arg;
-
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct insert_entries arg = { vm, vma, level, flags };
-
- stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
-}
-
-struct clear_range {
- struct i915_address_space *vm;
- u64 start;
- u64 length;
-};
-
-static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
-{
- struct clear_range *arg = _arg;
-
- gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
- u64 start,
- u64 length)
-{
- struct clear_range arg = { vm, start, length };
-
- stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
-}
-
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = vm->scratch[0].encode;
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
-}
-
-static void i915_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-}
-
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
- flags);
-}
-
-static void i915_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
-}
-
-static int ggtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
- intel_wakeref_t wakeref;
- u32 pte_flags;
-
- /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(obj))
- pte_flags |= PTE_READ_ONLY;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
-
- return 0;
-}
-
-static void ggtt_unbind_vma(struct i915_vma *vma)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-}
-
-static int aliasing_gtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- u32 pte_flags;
- int ret;
-
- /* Currently applicable only to VLV */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
- pte_flags |= PTE_READ_ONLY;
-
- if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
-
- if (flags & I915_VMA_ALLOC) {
- ret = alias->vm.allocate_va_range(&alias->vm,
- vma->node.start,
- vma->size);
- if (ret)
- return ret;
-
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
- }
-
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
- __i915_vma_flags(vma)));
- alias->vm.insert_entries(&alias->vm, vma,
- cache_level, pte_flags);
- }
-
- if (flags & I915_VMA_GLOBAL_BIND) {
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- vma->vm->insert_entries(vma->vm, vma,
- cache_level, pte_flags);
- }
- }
-
- return 0;
-}
-
-static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
-
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
- struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vm->clear_range(vm, vma->node.start, vma->size);
- }
-
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
- struct i915_address_space *vm =
- &i915_vm_to_ggtt(vma->vm)->alias->vm;
-
- vm->clear_range(vm, vma->node.start, vma->size);
- }
-}
-
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -2543,1070 +72,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
-static int ggtt_set_pages(struct i915_vma *vma)
-{
- int ret;
-
- GEM_BUG_ON(vma->pages);
-
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
-static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
-{
- if (i915_node_color_differs(node, color))
- *start += I915_GTT_PAGE_SIZE;
-
- /* Also leave a space between the unallocated reserved node after the
- * GTT and any objects within the GTT, i.e. we use the color adjustment
- * to insert a guard page to prevent prefetches crossing over the
- * GTT boundary.
- */
- node = list_next_entry(node, node_list);
- if (node->color != color)
- *end -= I915_GTT_PAGE_SIZE;
-}
-
-static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
-{
- struct i915_ppgtt *ppgtt;
- int err;
-
- ppgtt = i915_ppgtt_create(ggtt->vm.i915);
- if (IS_ERR(ppgtt))
- return PTR_ERR(ppgtt);
-
- if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
- err = -ENODEV;
- goto err_ppgtt;
- }
-
- /*
- * Note we only pre-allocate as far as the end of the global
- * GTT. On 48b / 4-level page-tables, the difference is very,
- * very significant! We have to preallocate as GVT/vgpu does
- * not like the page directory disappearing.
- */
- err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
- if (err)
- goto err_ppgtt;
-
- ggtt->alias = ppgtt;
- ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
-
- GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
- ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
-
- GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
- ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
-
- return 0;
-
-err_ppgtt:
- i915_vm_put(&ppgtt->vm);
- return err;
-}
-
-static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
-{
- struct i915_ppgtt *ppgtt;
-
- ppgtt = fetch_and_zero(&ggtt->alias);
- if (!ppgtt)
- return;
-
- i915_vm_put(&ppgtt->vm);
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-}
-
-static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
-{
- u64 size;
- int ret;
-
- if (!USES_GUC(ggtt->vm.i915))
- return 0;
-
- GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
- size = ggtt->vm.total - GUC_GGTT_TOP;
-
- ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
- GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
- PIN_NOEVICT);
- if (ret)
- DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
-
- return ret;
-}
-
-static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
-{
- if (drm_mm_node_allocated(&ggtt->uc_fw))
- drm_mm_remove_node(&ggtt->uc_fw);
-}
-
-static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
-{
- ggtt_release_guc_top(ggtt);
- if (drm_mm_node_allocated(&ggtt->error_capture))
- drm_mm_remove_node(&ggtt->error_capture);
-}
-
-static int init_ggtt(struct i915_ggtt *ggtt)
-{
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch page.
- * There are a number of places where the hardware apparently prefetches
- * past the end of the object, and we've seen multiple hangs with the
- * GPU head pointer stuck in a batchbuffer bound at the last page of the
- * aperture. One page should be enough to keep any prefetching inside
- * of the aperture.
- */
- unsigned long hole_start, hole_end;
- struct drm_mm_node *entry;
- int ret;
-
- /*
- * GuC requires all resources that we're sharing with it to be placed in
- * non-WOPCM memory. If GuC is not present or not in use we still need a
- * small bias as ring wraparound at offset 0 sometimes hangs. No idea
- * why.
- */
- ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
- intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
-
- ret = intel_vgt_balloon(ggtt);
- if (ret)
- return ret;
-
- if (ggtt->mappable_end) {
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
- }
-
- /*
- * The upper portion of the GuC address space has a sizeable hole
- * (several MB) that is inaccessible by GuC. Reserve this range within
- * GGTT as it can comfortably hold GuC/HuC firmware images.
- */
- ret = ggtt_reserve_guc_top(ggtt);
- if (ret)
- goto err;
-
- /* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
- ggtt->vm.clear_range(&ggtt->vm, hole_start,
- hole_end - hole_start);
- }
-
- /* And finally clear the reserved guard page */
- ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
-
- return 0;
-
-err:
- cleanup_init_ggtt(ggtt);
- return ret;
-}
-
-int i915_init_ggtt(struct drm_i915_private *i915)
-{
- int ret;
-
- ret = init_ggtt(&i915->ggtt);
- if (ret)
- return ret;
-
- if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
- ret = init_aliasing_ppgtt(&i915->ggtt);
- if (ret)
- cleanup_init_ggtt(&i915->ggtt);
- }
-
- return 0;
-}
-
-static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
-{
- struct i915_vma *vma, *vn;
-
- atomic_set(&ggtt->vm.open, 0);
-
- rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
- flush_workqueue(ggtt->vm.i915->wq);
-
- mutex_lock(&ggtt->vm.mutex);
-
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
- WARN_ON(__i915_vma_unbind(vma));
-
- if (drm_mm_node_allocated(&ggtt->error_capture))
- drm_mm_remove_node(&ggtt->error_capture);
-
- ggtt_release_guc_top(ggtt);
- intel_vgt_deballoon(ggtt);
-
- ggtt->vm.cleanup(&ggtt->vm);
-
- mutex_unlock(&ggtt->vm.mutex);
- i915_address_space_fini(&ggtt->vm);
-
- arch_phys_wc_del(ggtt->mtrr);
-
- if (ggtt->iomap.size)
- io_mapping_fini(&ggtt->iomap);
-}
-
-/**
- * i915_ggtt_driver_release - Clean up GGTT hardware initialization
- * @i915: i915 device
- */
-void i915_ggtt_driver_release(struct drm_i915_private *i915)
-{
- struct pagevec *pvec;
-
- fini_aliasing_ppgtt(&i915->ggtt);
-
- ggtt_cleanup_hw(&i915->ggtt);
-
- pvec = &i915->mm.wc_stash.pvec;
- if (pvec->nr) {
- set_pages_array_wb(pvec->pages, pvec->nr);
- __pagevec_release(pvec);
- }
-}
-
-static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
-}
-
-static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
- if (bdw_gmch_ctl)
- bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-
-#ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
- if (bdw_gmch_ctl > 4)
- bdw_gmch_ctl = 4;
-#endif
-
- return bdw_gmch_ctl << 20;
-}
-
-static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GGMS_MASK;
-
- if (gmch_ctrl)
- return 1 << (20 + gmch_ctrl);
-
- return 0;
-}
-
-static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- phys_addr_t phys_addr;
- int ret;
-
- /* For Modern GENs the PTEs and register space are split in the BAR */
- phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
-
- /*
- * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
- * will be dropped. For WC mappings in general we have 64 byte burst
- * writes when the WC buffer is flushed, so we can't use it, but have to
- * resort to an uncached mapping. The WC issue is easily caught by the
- * readback check when writing GTT PTE entries.
- */
- if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- ggtt->gsm = ioremap_nocache(phys_addr, size);
- else
- ggtt->gsm = ioremap_wc(phys_addr, size);
- if (!ggtt->gsm) {
- DRM_ERROR("Failed to map the ggtt page table\n");
- return -ENOMEM;
- }
-
- ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
- if (ret) {
- DRM_ERROR("Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(ggtt->gsm);
- return ret;
- }
-
- ggtt->vm.scratch[0].encode =
- ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
- I915_CACHE_NONE, 0);
-
- return 0;
-}
-
-static void tgl_setup_private_ppat(struct intel_uncore *uncore)
-{
- /* TGL doesn't support LLC or AGE settings */
- intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
-}
-
-static void cnl_setup_private_ppat(struct intel_uncore *uncore)
-{
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(0),
- GEN8_PPAT_WB | GEN8_PPAT_LLC);
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(1),
- GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(2),
- GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(3),
- GEN8_PPAT_UC);
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(4),
- GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(5),
- GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(6),
- GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(7),
- GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
- * bits. When using advanced contexts each context stores its own PAT, but
- * writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct intel_uncore *uncore)
-{
- u64 pat;
-
- pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
- GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
- GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
- GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
- GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
- GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
- GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
- GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-
- intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
-}
-
-static void chv_setup_private_ppat(struct intel_uncore *uncore)
-{
- u64 pat;
-
- /*
- * Map WB on BDW to snooped on CHV.
- *
- * Only the snoop bit has meaning for CHV, the rest is
- * ignored.
- *
- * The hardware will never snoop for certain types of accesses:
- * - CPU GTT (GMADR->GGTT->no snoop->memory)
- * - PPGTT page tables
- * - some other special cycles
- *
- * As with BDW, we also need to consider the following for GT accesses:
- * "For GGTT, there is NO pat_sel[2:0] from the entry,
- * so RTL will always use the value corresponding to
- * pat_sel = 000".
- * Which means we must set the snoop bit in PAT entry 0
- * in order to keep the global status page working.
- */
-
- pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
- GEN8_PPAT(1, 0) |
- GEN8_PPAT(2, 0) |
- GEN8_PPAT(3, 0) |
- GEN8_PPAT(4, CHV_PPAT_SNOOP) |
- GEN8_PPAT(5, CHV_PPAT_SNOOP) |
- GEN8_PPAT(6, CHV_PPAT_SNOOP) |
- GEN8_PPAT(7, CHV_PPAT_SNOOP);
-
- intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
-}
-
-static void gen6_gmch_remove(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-
- iounmap(ggtt->gsm);
- cleanup_scratch_page(vm);
-}
-
-static void setup_private_pat(struct intel_uncore *uncore)
-{
- struct drm_i915_private *i915 = uncore->i915;
-
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
- if (INTEL_GEN(i915) >= 12)
- tgl_setup_private_ppat(uncore);
- else if (INTEL_GEN(i915) >= 10)
- cnl_setup_private_ppat(uncore);
- else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
- chv_setup_private_ppat(uncore);
- else
- bdw_setup_private_ppat(uncore);
-}
-
-static struct resource pci_resource(struct pci_dev *pdev, int bar)
-{
- return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
-}
-
-static int gen8_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int size;
- u16 snb_gmch_ctl;
- int err;
-
- /* TODO: We're not aware of mappable constraints on gen8 yet */
- if (!IS_DGFX(dev_priv)) {
- ggtt->gmadr = pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
- }
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(dev_priv))
- size = chv_get_total_gtt_size(snb_gmch_ctl);
- else
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
-
- ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.cleanup = gen6_gmch_remove;
- ggtt->vm.insert_page = gen8_ggtt_insert_page;
- ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
-
- ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
-
- /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
- if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
- ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->vm.clear_range != nop_clear_range)
- ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
- }
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- ggtt->vm.pte_encode = gen8_pte_encode;
-
- setup_private_pat(ggtt->vm.gt->uncore);
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static int gen6_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int size;
- u16 snb_gmch_ctl;
- int err;
-
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
- /* 64/512MB is the current min/max we actually know of, but this is just
- * a coarse sanity check.
- */
- if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
- return -ENXIO;
- }
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
- size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
-
- ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
- ggtt->vm.insert_page = gen6_ggtt_insert_page;
- ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
- ggtt->vm.cleanup = gen6_gmch_remove;
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- if (HAS_EDRAM(dev_priv))
- ggtt->vm.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(dev_priv))
- ggtt->vm.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(dev_priv))
- ggtt->vm.pte_encode = byt_pte_encode;
- else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->vm.pte_encode = ivb_pte_encode;
- else
- ggtt->vm.pte_encode = snb_pte_encode;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
- intel_gmch_remove();
-}
-
-static int i915_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- phys_addr_t gmadr_base;
- int ret;
-
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- return -EIO;
- }
-
- intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
-
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(gmadr_base,
- ggtt->mappable_end);
-
- ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->vm.insert_page = i915_ggtt_insert_page;
- ggtt->vm.insert_entries = i915_ggtt_insert_entries;
- ggtt->vm.clear_range = i915_ggtt_clear_range;
- ggtt->vm.cleanup = i915_gmch_remove;
-
- ggtt->invalidate = gmch_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- if (unlikely(ggtt->do_idle_maps))
- dev_notice(dev_priv->drm.dev,
- "Applying Ironlake quirks for intel_iommu\n");
-
- return 0;
-}
-
-static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- int ret;
-
- ggtt->vm.gt = gt;
- ggtt->vm.i915 = i915;
- ggtt->vm.dma = &i915->drm.pdev->dev;
-
- if (INTEL_GEN(i915) <= 5)
- ret = i915_gmch_probe(ggtt);
- else if (INTEL_GEN(i915) < 8)
- ret = gen6_gmch_probe(ggtt);
- else
- ret = gen8_gmch_probe(ggtt);
- if (ret)
- return ret;
-
- if ((ggtt->vm.total - 1) >> 32) {
- DRM_ERROR("We never expected a Global GTT with more than 32bits"
- " of address space! Found %lldM!\n",
- ggtt->vm.total >> 20);
- ggtt->vm.total = 1ULL << 32;
- ggtt->mappable_end =
- min_t(u64, ggtt->mappable_end, ggtt->vm.total);
- }
-
- if (ggtt->mappable_end > ggtt->vm.total) {
- DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->vm.total);
- ggtt->mappable_end = ggtt->vm.total;
- }
-
- /* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("DSM size = %lluM\n",
- (u64)resource_size(&intel_graphics_stolen_res) >> 20);
-
- return 0;
-}
-
-/**
- * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @i915: i915 device
- */
-int i915_ggtt_probe_hw(struct drm_i915_private *i915)
-{
- int ret;
-
- ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
- if (ret)
- return ret;
-
- if (intel_vtd_active())
- dev_info(i915->drm.dev, "VT-d active for gfx access\n");
-
- return 0;
-}
-
-static int ggtt_init_hw(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
- ggtt->vm.is_ggtt = true;
-
- /* Only VLV supports read-only GGTT mappings */
- ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
-
- if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
- ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
-
- if (ggtt->mappable_end) {
- if (!io_mapping_init_wc(&ggtt->iomap,
- ggtt->gmadr.start,
- ggtt->mappable_end)) {
- ggtt->vm.cleanup(&ggtt->vm);
- return -EIO;
- }
-
- ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
- ggtt->mappable_end);
- }
-
- i915_ggtt_init_fences(ggtt);
-
- return 0;
-}
-
-/**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
- */
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- stash_init(&dev_priv->mm.wc_stash);
-
- /* Note that we use page colouring to enforce a guard page at the
- * end of the address space. This is required as the CS may prefetch
- * beyond the end of the batch buffer, across the page boundary,
- * and beyond the end of the GTT if we do not provide a guard.
- */
- ret = ggtt_init_hw(&dev_priv->ggtt);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
- return -EIO;
-
- return 0;
-}
-
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
-{
- GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
-
- ggtt->invalidate = guc_ggtt_invalidate;
-
- ggtt->invalidate(ggtt);
-}
-
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
-{
- /* XXX Temporary pardon for error unload */
- if (ggtt->invalidate == gen6_ggtt_invalidate)
- return;
-
- /* We should only be called after i915_ggtt_enable_guc() */
- GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- ggtt->invalidate(ggtt);
-}
-
-static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
-{
- struct i915_vma *vma, *vn;
- bool flush = false;
- int open;
-
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
-
- mutex_lock(&ggtt->vm.mutex);
-
- /* First fill our portion of the GTT with scratch pages */
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
- /* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&ggtt->vm.open, 0);
-
- /* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
- continue;
-
- if (!__i915_vma_unbind(vma))
- continue;
-
- clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
- WARN_ON(i915_vma_bind(vma,
- obj ? obj->cache_level : 0,
- PIN_GLOBAL, NULL));
- if (obj) { /* only used during resume => exclusive access */
- flush |= fetch_and_zero(&obj->write_domain);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
- }
- }
-
- atomic_set(&ggtt->vm.open, open);
- ggtt->invalidate(ggtt);
-
- mutex_unlock(&ggtt->vm.mutex);
-
- if (flush)
- wbinvd_on_all_cpus();
-}
-
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
-{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
- ggtt_restore_mappings(ggtt);
-
- if (INTEL_GEN(i915) >= 8)
- setup_private_pat(ggtt->vm.gt->uncore);
-}
-
-static struct scatterlist *
-rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int column, row;
- unsigned int src_idx;
-
- for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column + offset;
- for (row = 0; row < height; row++) {
- st->nents++;
- /* We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
- sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) =
- i915_gem_object_get_dma_address(obj, src_idx);
- sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
- src_idx -= stride;
- }
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_rotate_pages(struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_rotation_info_size(rot_info);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
- sg = rotate_pages(obj, rot_info->plane[i].offset,
- rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].stride, st, sg);
- }
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int row;
-
- for (row = 0; row < height; row++) {
- unsigned int left = width * I915_GTT_PAGE_SIZE;
-
- while (left) {
- dma_addr_t addr;
- unsigned int length;
-
- /* We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
-
- addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
-
- length = min(left, length);
-
- st->nents++;
-
- sg_set_page(sg, NULL, length, 0);
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = length;
- sg = sg_next(sg);
-
- offset += length / I915_GTT_PAGE_SIZE;
- left -= length;
- }
-
- offset += stride - width;
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_remap_pages(struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_remapped_info_size(rem_info);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- sg = remap_pages(obj, rem_info->plane[i].offset,
- rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].stride, st, sg);
- }
-
- i915_sg_trim(st);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
- struct drm_i915_gem_object *obj)
-{
- struct sg_table *st;
- struct scatterlist *sg, *iter;
- unsigned int count = view->partial.size;
- unsigned int offset;
- int ret = -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, count, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
- GEM_BUG_ON(!iter);
-
- sg = st->sgl;
- st->nents = 0;
- do {
- unsigned int len;
-
- len = min(iter->length - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
-
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0) {
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
-
- return st;
- }
-
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
- return ERR_PTR(ret);
-}
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma)
-{
- int ret;
-
- /* The vma->pages are only valid within the lifespan of the borrowed
- * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
- * must be the vma->pages. A simple rule is that vma->pages must only
- * be accessed when the obj->mm.pages are pinned.
- */
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
-
- switch (vma->ggtt_view.type) {
- default:
- GEM_BUG_ON(vma->ggtt_view.type);
- /* fall through */
- case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
- return 0;
-
- case I915_GGTT_VIEW_ROTATED:
- vma->pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
- break;
-
- case I915_GGTT_VIEW_REMAPPED:
- vma->pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
- break;
-
- case I915_GGTT_VIEW_PARTIAL:
- vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- break;
- }
-
- ret = 0;
- if (IS_ERR(vma->pages)) {
- ret = PTR_ERR(vma->pages);
- vma->pages = NULL;
- DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
- }
- return ret;
-}
-
/**
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
@@ -3828,6 +293,5 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_gtt.c"
#include "selftests/i915_gem_gtt.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 402283ce2864..f6226df9f972 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -1,638 +1,21 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Please try to maintain the following order within this file unless it makes
- * sense to do otherwise. From top to bottom:
- * 1. typedefs
- * 2. #defines, and macros
- * 3. structure definitions
- * 4. function prototypes
- *
- * Within each section, please try to order by generation in ascending order,
- * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ * Copyright © 2020 Intel Corporation
*/
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/pagevec.h>
-#include <linux/workqueue.h>
+#include <linux/types.h>
#include <drm/drm_mm.h>
-#include "gt/intel_reset.h"
-#include "i915_gem_fence_reg.h"
-#include "i915_request.h"
+#include "gt/intel_gtt.h"
#include "i915_scatterlist.h"
-#include "i915_selftest.h"
-#include "gt/intel_timeline.h"
-#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
-#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
-#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
-
-#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
-#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
-
-#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
-
-#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
-
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 32
-/* 32 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 6
-
-struct drm_i915_file_private;
struct drm_i915_gem_object;
-struct i915_vma;
-struct intel_gt;
-
-typedef u32 gen6_pte_t;
-typedef u64 gen8_pte_t;
-
-#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
-
-/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define GEN6_PTE_VALID (1 << 0)
-
-#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
-#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
-#define I915_PDES 512
-#define I915_PDE_MASK (I915_PDES - 1)
-#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
-
-#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
-#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
-#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
-#define GEN6_PDE_SHIFT 22
-#define GEN6_PDE_VALID (1 << 0)
-
-#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
-
-#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
-#define BYT_PTE_WRITEABLE (1 << 1)
-
-/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
- * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
- */
-#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
- (((bits) & 0x8) << (11 - 3)))
-#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
-#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
-#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
-#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
-#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
-#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
-#define HSW_PTE_UNCACHED (0)
-#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
-#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-
-/*
- * GEN8 32b style address is defined as a 3 level page table:
- * 31:30 | 29:21 | 20:12 | 11:0
- * PDPE | PDE | PTE | offset
- * The difference as compared to normal x86 3 level page table is the PDPEs are
- * programmed via register.
- *
- * GEN8 48b style address is defined as a 4 level page table:
- * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
- * PML4E | PDPE | PDE | PTE | offset
- */
-#define GEN8_3LVL_PDPES 4
-
-#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
-#define PPAT_CACHED_PDE 0 /* WB LLC */
-#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
-#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
-
-#define CHV_PPAT_SNOOP (1<<6)
-#define GEN8_PPAT_AGE(x) ((x)<<4)
-#define GEN8_PPAT_LLCeLLC (3<<2)
-#define GEN8_PPAT_LLCELLC (2<<2)
-#define GEN8_PPAT_LLC (1<<2)
-#define GEN8_PPAT_WB (3<<0)
-#define GEN8_PPAT_WT (2<<0)
-#define GEN8_PPAT_WC (1<<0)
-#define GEN8_PPAT_UC (0<<0)
-#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
-#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
-
-#define GEN8_PDE_IPS_64K BIT(11)
-#define GEN8_PDE_PS_2M BIT(7)
-
-#define for_each_sgt_daddr(__dp, __iter, __sgt) \
- __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
-
-struct intel_remapped_plane_info {
- /* in gtt pages */
- unsigned int width, height, stride, offset;
-} __packed;
-
-struct intel_remapped_info {
- struct intel_remapped_plane_info plane[2];
- unsigned int unused_mbz;
-} __packed;
-
-struct intel_rotation_info {
- struct intel_remapped_plane_info plane[2];
-} __packed;
-
-struct intel_partial_info {
- u64 offset;
- unsigned int size;
-} __packed;
-
-enum i915_ggtt_view_type {
- I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
- I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
-static inline void assert_i915_gem_gtt_types(void)
-{
- BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
-
- /* Check that rotation/remapped shares offsets for simplicity */
- BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
- offsetof(struct intel_rotation_info, plane[0]));
- BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
- offsetofend(struct intel_rotation_info, plane[1]));
-
- /* As we encode the size of each branch inside the union into its type,
- * we have to be careful that each branch has a unique size.
- */
- switch ((enum i915_ggtt_view_type)0) {
- case I915_GGTT_VIEW_NORMAL:
- case I915_GGTT_VIEW_PARTIAL:
- case I915_GGTT_VIEW_ROTATED:
- case I915_GGTT_VIEW_REMAPPED:
- /* gcc complains if these are identical cases */
- break;
- }
-}
-
-struct i915_ggtt_view {
- enum i915_ggtt_view_type type;
- union {
- /* Members need to contain no holes/padding */
- struct intel_partial_info partial;
- struct intel_rotation_info rotated;
- struct intel_remapped_info remapped;
- };
-};
-
-enum i915_cache_level;
-
-struct i915_vma;
-
-struct i915_page_dma {
- struct page *page;
- union {
- dma_addr_t daddr;
-
- /* For gen6/gen7 only. This is the offset in the GGTT
- * where the page directory entries for PPGTT begin
- */
- u32 ggtt_offset;
- };
-};
-
-struct i915_page_scratch {
- struct i915_page_dma base;
- u64 encode;
-};
-
-struct i915_page_table {
- struct i915_page_dma base;
- atomic_t used;
-};
-
-struct i915_page_directory {
- struct i915_page_table pt;
- spinlock_t lock;
- void *entry[512];
-};
-
-#define __px_choose_expr(x, type, expr, other) \
- __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), type) || \
- __builtin_types_compatible_p(typeof(x), const type), \
- ({ type __x = (type)(x); expr; }), \
- other)
-
-#define px_base(px) \
- __px_choose_expr(px, struct i915_page_dma *, __x, \
- __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
- __px_choose_expr(px, struct i915_page_table *, &__x->base, \
- __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
- (void)0))))
-#define px_dma(px) (px_base(px)->daddr)
-
-#define px_pt(px) \
- __px_choose_expr(px, struct i915_page_table *, __x, \
- __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
- (void)0))
-#define px_used(px) (&px_pt(px)->used)
-
-struct i915_vma_ops {
- /* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- /*
- * Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page.
- */
- void (*unbind_vma)(struct i915_vma *vma);
-
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
-};
-
-struct pagestash {
- spinlock_t lock;
- struct pagevec pvec;
-};
-
-struct i915_address_space {
- struct kref ref;
- struct rcu_work rcu;
-
- struct drm_mm mm;
- struct intel_gt *gt;
- struct drm_i915_private *i915;
- struct device *dma;
- /* Every address space belongs to a struct file - except for the global
- * GTT that is owned by the driver (and so @file is set to NULL). In
- * principle, no information should leak from one context to another
- * (or between files/processes etc) unless explicitly shared by the
- * owner. Tracking the owner is important in order to free up per-file
- * objects along with the file, to aide resource tracking, and to
- * assign blame.
- */
- struct drm_i915_file_private *file;
- u64 total; /* size addr space maps (ex. 2GB for ggtt) */
- u64 reserved; /* size addr space reserved */
-
- unsigned int bind_async_flags;
-
- /*
- * Each active user context has its own address space (in full-ppgtt).
- * Since the vm may be shared between multiple contexts, we count how
- * many contexts keep us "open". Once open hits zero, we are closed
- * and do not allow any new attachments, and proceed to shutdown our
- * vma and page directories.
- */
- atomic_t open;
-
- struct mutex mutex; /* protects vma and our lists */
-#define VM_CLASS_GGTT 0
-#define VM_CLASS_PPGTT 1
-
- struct i915_page_scratch scratch[4];
- unsigned int scratch_order;
- unsigned int top;
-
- /**
- * List of vma currently bound.
- */
- struct list_head bound_list;
-
- struct pagestash free_pages;
-
- /* Global GTT */
- bool is_ggtt:1;
-
- /* Some systems require uncached updates of the page directories */
- bool pt_kmap_wc:1;
-
- /* Some systems support read-only mappings for GGTT and/or PPGTT */
- bool has_read_only:1;
-
- u64 (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags); /* Create a valid PTE */
-#define PTE_READ_ONLY (1<<0)
-
- int (*allocate_va_range)(struct i915_address_space *vm,
- u64 start, u64 length);
- void (*clear_range)(struct i915_address_space *vm,
- u64 start, u64 length);
- void (*insert_page)(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 flags);
- void (*insert_entries)(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- void (*cleanup)(struct i915_address_space *vm);
-
- struct i915_vma_ops vma_ops;
-
- I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
- I915_SELFTEST_DECLARE(bool scrub_64K);
-};
-
-#define i915_is_ggtt(vm) ((vm)->is_ggtt)
-
-static inline bool
-i915_vm_is_4lvl(const struct i915_address_space *vm)
-{
- return (vm->total - 1) >> 32;
-}
-
-static inline bool
-i915_vm_has_scratch_64K(struct i915_address_space *vm)
-{
- return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
-}
-
-static inline bool
-i915_vm_has_cache_coloring(struct i915_address_space *vm)
-{
- return i915_is_ggtt(vm) && vm->mm.color_adjust;
-}
-
-/* The Graphics Translation Table is the way in which GEN hardware translates a
- * Graphics Virtual Address into a Physical Address. In addition to the normal
- * collateral associated with any va->pa translations GEN hardware also has a
- * portion of the GTT which can be mapped by the CPU and remain both coherent
- * and correct (in cases like swizzling). That region is referred to as GMADR in
- * the spec.
- */
-struct i915_ggtt {
- struct i915_address_space vm;
-
- struct io_mapping iomap; /* Mapping to our CPU mappable region */
- struct resource gmadr; /* GMADR resource */
- resource_size_t mappable_end; /* End offset that we can CPU map */
-
- /** "Graphics Stolen Memory" holds the global PTEs */
- void __iomem *gsm;
- void (*invalidate)(struct i915_ggtt *ggtt);
-
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_ppgtt *alias;
-
- bool do_idle_maps;
-
- int mtrr;
-
- /** Bit 6 swizzling required for X tiling */
- u32 bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- u32 bit_6_swizzle_y;
-
- u32 pin_bias;
-
- unsigned int num_fences;
- struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
- struct list_head fence_list;
-
- /** List of all objects in gtt_space, currently mmaped by userspace.
- * All objects within this list must also be on bound_list.
- */
- struct list_head userfault_list;
-
- /* Manual runtime pm autosuspend delay for user GGTT mmaps */
- struct intel_wakeref_auto userfault_wakeref;
-
- struct drm_mm_node error_capture;
- struct drm_mm_node uc_fw;
-};
-
-struct i915_ppgtt {
- struct i915_address_space vm;
-
- struct i915_page_directory *pd;
-};
-
-struct gen6_ppgtt {
- struct i915_ppgtt base;
-
- struct i915_vma *vma;
- gen6_pte_t __iomem *pd_addr;
-
- atomic_t pin_count;
- struct mutex pin_mutex;
-
- bool scan_for_unused_pt;
-};
-
-#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
-
-static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
-{
- BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
- return __to_gen6_ppgtt(base);
-}
-
-/*
- * gen6_for_each_pde() iterates over every pde from start until start+length.
- * If start and start+length are not perfectly divisible, the macro will round
- * down and up as needed. Start=0 and length=2G effectively iterates over
- * every PDE in the system. The macro modifies ALL its parameters except 'pd',
- * so each of the other parameters should preferably be a simple variable, or
- * at most an lvalue with no side-effects!
- */
-#define gen6_for_each_pde(pt, pd, start, length, iter) \
- for (iter = gen6_pde_index(start); \
- length > 0 && iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen6_for_all_pdes(pt, pd, iter) \
- for (iter = 0; \
- iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ++iter)
-
-static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
-{
- const u32 mask = NUM_PTE(pde_shift) - 1;
-
- return (address >> PAGE_SHIFT) & mask;
-}
-
-/* Helper to counts the number of PTEs within the given length. This count
- * does not cross a page table boundary, so the max value would be
- * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
-*/
-static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
-{
- const u64 mask = ~((1ULL << pde_shift) - 1);
- u64 end;
-
- GEM_BUG_ON(length == 0);
- GEM_BUG_ON(offset_in_page(addr | length));
-
- end = addr + length;
-
- if ((addr & mask) != (end & mask))
- return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
-
- return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
-}
-
-static inline u32 i915_pde_index(u64 addr, u32 shift)
-{
- return (addr >> shift) & I915_PDE_MASK;
-}
-
-static inline u32 gen6_pte_index(u32 addr)
-{
- return i915_pte_index(addr, GEN6_PDE_SHIFT);
-}
-
-static inline u32 gen6_pte_count(u32 addr, u32 length)
-{
- return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
-}
-
-static inline u32 gen6_pde_index(u32 addr)
-{
- return i915_pde_index(addr, GEN6_PDE_SHIFT);
-}
-
-static inline struct i915_page_table *
-i915_pt_entry(const struct i915_page_directory * const pd,
- const unsigned short n)
-{
- return pd->entry[n];
-}
-
-static inline struct i915_page_directory *
-i915_pd_entry(const struct i915_page_directory * const pdp,
- const unsigned short n)
-{
- return pdp->entry[n];
-}
-
-static inline dma_addr_t
-i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
-{
- struct i915_page_dma *pt = ppgtt->pd->entry[n];
-
- return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
-}
-
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
- GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, vm);
-}
-
-static inline struct i915_ppgtt *
-i915_vm_to_ppgtt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
- GEM_BUG_ON(i915_is_ggtt(vm));
- return container_of(vm, struct i915_ppgtt, vm);
-}
-
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
-int i915_init_ggtt(struct drm_i915_private *dev_priv);
-void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
-
-static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
-{
- return ggtt->mappable_end > 0;
-}
-
-int i915_ppgtt_init_hw(struct intel_gt *gt);
-
-struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-
-static inline struct i915_address_space *
-i915_vm_get(struct i915_address_space *vm)
-{
- kref_get(&vm->ref);
- return vm;
-}
-
-void i915_vm_release(struct kref *kref);
-
-static inline void i915_vm_put(struct i915_address_space *vm)
-{
- kref_put(&vm->ref, i915_vm_release);
-}
-
-static inline struct i915_address_space *
-i915_vm_open(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!atomic_read(&vm->open));
- atomic_inc(&vm->open);
- return i915_vm_get(vm);
-}
-
-static inline bool
-i915_vm_tryopen(struct i915_address_space *vm)
-{
- if (atomic_add_unless(&vm->open, 1, 0))
- return i915_vm_get(vm);
-
- return false;
-}
-
-void __i915_vm_close(struct i915_address_space *vm);
-
-static inline void
-i915_vm_close(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!atomic_read(&vm->open));
- if (atomic_dec_and_test(&vm->open))
- __i915_vm_close(vm);
-
- i915_vm_put(vm);
-}
-
-int gen6_ppgtt_pin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
-
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
+struct i915_address_space;
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
@@ -663,6 +46,6 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
-#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
+#define PIN_OFFSET_MASK I915_GTT_PAGE_MASK
#endif
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index cf8a8c3ef047..54fce81d5724 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -2,6 +2,7 @@
* SPDX-License-Identifier: MIT
*/
+#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index be127cd28931..3aa213684293 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -20,7 +20,10 @@ static LIST_HEAD(globals);
static atomic_t active;
static atomic_t epoch;
static struct park_work {
- struct rcu_work work;
+ struct delayed_work work;
+ struct rcu_head rcu;
+ unsigned long flags;
+#define PENDING 0
int epoch;
} park;
@@ -37,11 +40,33 @@ static void i915_globals_shrink(void)
global->shrink();
}
+static void __i915_globals_grace(struct rcu_head *rcu)
+{
+ /* Ratelimit parking as shrinking is quite slow */
+ schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
+}
+
+static void __i915_globals_queue_rcu(void)
+{
+ park.epoch = atomic_inc_return(&epoch);
+ if (!atomic_read(&active)) {
+ init_rcu_head(&park.rcu);
+ call_rcu(&park.rcu, __i915_globals_grace);
+ }
+}
+
static void __i915_globals_park(struct work_struct *work)
{
+ destroy_rcu_head(&park.rcu);
+
/* Confirm nothing woke up in the last grace period */
- if (park.epoch == atomic_read(&epoch))
- i915_globals_shrink();
+ if (park.epoch != atomic_read(&epoch)) {
+ __i915_globals_queue_rcu();
+ return;
+ }
+
+ clear_bit(PENDING, &park.flags);
+ i915_globals_shrink();
}
void __init i915_global_register(struct i915_global *global)
@@ -85,7 +110,7 @@ int __init i915_globals_init(void)
}
}
- INIT_RCU_WORK(&park.work, __i915_globals_park);
+ INIT_DELAYED_WORK(&park.work, __i915_globals_park);
return 0;
}
@@ -103,8 +128,9 @@ void i915_globals_park(void)
if (!atomic_dec_and_test(&active))
return;
- park.epoch = atomic_inc_return(&epoch);
- queue_rcu_work(system_wq, &park.work);
+ /* Queue cleanup after the next RCU grace period has freed slabs */
+ if (!test_and_set_bit(PENDING, &park.flags))
+ __i915_globals_queue_rcu();
}
void i915_globals_unpark(void)
@@ -113,12 +139,21 @@ void i915_globals_unpark(void)
atomic_inc(&active);
}
+static void __exit __i915_globals_flush(void)
+{
+ atomic_inc(&active); /* skip shrinking */
+
+ rcu_barrier(); /* wait for the work to be queued */
+ flush_delayed_work(&park.work);
+
+ atomic_dec(&active);
+}
+
void __exit i915_globals_exit(void)
{
- /* Flush any residual park_work */
- atomic_inc(&epoch);
- flush_rcu_work(&park.work);
+ GEM_BUG_ON(atomic_read(&active));
+ __i915_globals_flush();
__i915_globals_cleanup();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3c85cb0ee99f..4c1836f0a991 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -41,6 +41,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
@@ -232,14 +233,13 @@ static void pool_free(struct pagevec *pv, void *addr)
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
-struct compress {
+struct i915_vma_compress {
struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
- bool wc;
};
-static bool compress_init(struct compress *c)
+static bool compress_init(struct i915_vma_compress *c)
{
struct z_stream_s *zstream = &c->zstream;
@@ -261,7 +261,7 @@ static bool compress_init(struct compress *c)
return true;
}
-static bool compress_start(struct compress *c)
+static bool compress_start(struct i915_vma_compress *c)
{
struct z_stream_s *zstream = &c->zstream;
void *workspace = zstream->workspace;
@@ -272,8 +272,8 @@ static bool compress_start(struct compress *c)
return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
}
-static void *compress_next_page(struct compress *c,
- struct drm_i915_error_object *dst)
+static void *compress_next_page(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
void *page;
@@ -287,14 +287,15 @@ static void *compress_next_page(struct compress *c,
return dst->pages[dst->page_count++] = page;
}
-static int compress_page(struct compress *c,
+static int compress_page(struct i915_vma_compress *c,
void *src,
- struct drm_i915_error_object *dst)
+ struct i915_vma_coredump *dst,
+ bool wc)
{
struct z_stream_s *zstream = &c->zstream;
zstream->next_in = src;
- if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+ if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
@@ -318,8 +319,8 @@ static int compress_page(struct compress *c,
return 0;
}
-static int compress_flush(struct compress *c,
- struct drm_i915_error_object *dst)
+static int compress_flush(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
struct z_stream_s *zstream = &c->zstream;
@@ -347,12 +348,12 @@ end:
return 0;
}
-static void compress_finish(struct compress *c)
+static void compress_finish(struct i915_vma_compress *c)
{
zlib_deflateEnd(&c->zstream);
}
-static void compress_fini(struct compress *c)
+static void compress_fini(struct i915_vma_compress *c)
{
kfree(c->zstream.workspace);
if (c->tmp)
@@ -367,24 +368,24 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
-struct compress {
+struct i915_vma_compress {
struct pagevec pool;
- bool wc;
};
-static bool compress_init(struct compress *c)
+static bool compress_init(struct i915_vma_compress *c)
{
return pool_init(&c->pool, ALLOW_FAIL) == 0;
}
-static bool compress_start(struct compress *c)
+static bool compress_start(struct i915_vma_compress *c)
{
return true;
}
-static int compress_page(struct compress *c,
+static int compress_page(struct i915_vma_compress *c,
void *src,
- struct drm_i915_error_object *dst)
+ struct i915_vma_coredump *dst,
+ bool wc)
{
void *ptr;
@@ -392,24 +393,24 @@ static int compress_page(struct compress *c,
if (!ptr)
return -ENOMEM;
- if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
+ if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
return 0;
}
-static int compress_flush(struct compress *c,
- struct drm_i915_error_object *dst)
+static int compress_flush(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
return 0;
}
-static void compress_finish(struct compress *c)
+static void compress_finish(struct i915_vma_compress *c)
{
}
-static void compress_fini(struct compress *c)
+static void compress_fini(struct i915_vma_compress *c)
{
pool_fini(&c->pool);
}
@@ -422,7 +423,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#endif
static void error_print_instdone(struct drm_i915_error_state_buf *m,
- const struct drm_i915_error_engine *ee)
+ const struct intel_engine_coredump *ee)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
int slice;
@@ -453,40 +454,56 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
- const struct drm_i915_error_request *erq,
- const unsigned long epoch)
+ const struct i915_request_coredump *erq)
{
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n",
prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&erq->flags) ? "+" : "",
erq->sched_attr.priority,
- jiffies_to_msecs(erq->jiffies - epoch),
erq->start, erq->head, erq->tail);
}
static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
- const struct drm_i915_error_context *ctx)
+ const struct i915_gem_context_coredump *ctx)
{
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
ctx->guilty, ctx->active);
}
+static struct i915_vma_coredump *
+__find_vma(struct i915_vma_coredump *vma, const char *name)
+{
+ while (vma) {
+ if (strcmp(vma->name, name) == 0)
+ return vma;
+ vma = vma->next;
+ }
+
+ return NULL;
+}
+
+static struct i915_vma_coredump *
+find_batch(const struct intel_engine_coredump *ee)
+{
+ return __find_vma(ee->vma, "batch");
+}
+
static void error_print_engine(struct drm_i915_error_state_buf *m,
- const struct drm_i915_error_engine *ee,
- const unsigned long epoch)
+ const struct intel_engine_coredump *ee)
{
+ struct i915_vma_coredump *batch;
int n;
err_printf(m, "%s command stream:\n", ee->engine->name);
- err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
+ err_printf(m, " CCID: 0x%08x\n", ee->ccid);
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
@@ -501,9 +518,10 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
error_print_instdone(m, ee);
- if (ee->batchbuffer) {
- u64 start = ee->batchbuffer->gtt_offset;
- u64 end = start + ee->batchbuffer->gtt_size;
+ batch = find_batch(ee);
+ if (batch) {
+ u64 start = batch->gtt_offset;
+ u64 end = start + batch->gtt_size;
err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
upper_32_bits(start), lower_32_bits(start),
@@ -535,13 +553,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
- err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
err_printf(m, " ELSP[%d]:", n);
- error_print_request(m, " ", &ee->execlist[n], epoch);
+ error_print_request(m, " ", &ee->execlist[n]);
}
error_print_context(m, " Active context: ", &ee->context);
@@ -556,38 +572,35 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static void print_error_obj(struct drm_i915_error_state_buf *m,
+static void print_error_vma(struct drm_i915_error_state_buf *m,
const struct intel_engine_cs *engine,
- const char *name,
- const struct drm_i915_error_object *obj)
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
int page;
- if (!obj)
+ if (!vma)
return;
- if (name) {
- err_printf(m, "%s --- %s = 0x%08x %08x\n",
- engine ? engine->name : "global", name,
- upper_32_bits(obj->gtt_offset),
- lower_32_bits(obj->gtt_offset));
- }
+ err_printf(m, "%s --- %s = 0x%08x %08x\n",
+ engine ? engine->name : "global", vma->name,
+ upper_32_bits(vma->gtt_offset),
+ lower_32_bits(vma->gtt_offset));
- if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
- err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes);
+ if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
+ err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
err_compression_marker(m);
- for (page = 0; page < obj->page_count; page++) {
+ for (page = 0; page < vma->page_count; page++) {
int i, len;
len = PAGE_SIZE;
- if (page == obj->page_count - 1)
- len -= obj->unused;
+ if (page == vma->page_count - 1)
+ len -= vma->unused;
len = ascii85_encode_len(len);
for (i = 0; i < len; i++)
- err_puts(m, ascii85_encode(obj->pages[page][i], out));
+ err_puts(m, ascii85_encode(vma->pages[page][i], out));
}
err_puts(m, "\n");
}
@@ -599,9 +612,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
{
struct drm_printer p = i915_error_printer(m);
- intel_device_info_dump_flags(info, &p);
+ intel_device_info_print_static(info, &p);
+ intel_device_info_print_runtime(runtime, &p);
+ intel_device_info_print_topology(&runtime->sseu, &p);
intel_driver_caps_print(caps, &p);
- intel_device_info_dump_topology(&runtime->sseu, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -625,18 +639,13 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
}
static void err_print_uc(struct drm_i915_error_state_buf *m,
- const struct i915_error_uc *error_uc)
+ const struct intel_uc_coredump *error_uc)
{
struct drm_printer p = i915_error_printer(m);
- const struct i915_gpu_state *error =
- container_of(error_uc, typeof(*error), uc);
-
- if (!error->device_info.has_gt_uc)
- return;
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
- print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
+ print_error_vma(m, NULL, error_uc->guc_log);
}
static void err_free_sgl(struct scatterlist *sgl)
@@ -656,12 +665,69 @@ static void err_free_sgl(struct scatterlist *sgl)
}
}
+static void err_print_gt(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ const struct intel_engine_coredump *ee;
+ int i;
+
+ err_printf(m, "GT awake: %s\n", yesno(gt->awake));
+ err_printf(m, "EIR: 0x%08x\n", gt->eir);
+ err_printf(m, "IER: 0x%08x\n", gt->ier);
+ for (i = 0; i < gt->ngtier; i++)
+ err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
+ err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
+ err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
+
+ for (i = 0; i < gt->nfence; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
+
+ if (IS_GEN_RANGE(m->i915, 6, 11)) {
+ err_printf(m, "ERROR: 0x%08x\n", gt->error);
+ err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
+ }
+
+ if (INTEL_GEN(m->i915) >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ gt->fault_data1, gt->fault_data0);
+
+ if (IS_GEN(m->i915, 7))
+ err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
+
+ if (IS_GEN_RANGE(m->i915, 8, 11))
+ err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
+
+ if (IS_GEN(m->i915, 12))
+ err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
+
+ if (INTEL_GEN(m->i915) >= 12) {
+ int i;
+
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+ err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
+ gt->sfc_done[i]);
+
+ err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
+ }
+
+ for (ee = gt->engine; ee; ee = ee->next) {
+ const struct i915_vma_coredump *vma;
+
+ error_print_engine(m, ee);
+ for (vma = ee->vma; vma; vma = vma->next)
+ print_error_vma(m, ee->engine, vma);
+ }
+
+ if (gt->uc)
+ err_print_uc(m, gt->uc);
+}
+
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
- struct i915_gpu_state *error)
+ struct i915_gpu_coredump *error)
{
- const struct drm_i915_error_engine *ee;
+ const struct intel_engine_coredump *ee;
struct timespec64 ts;
- int i, j;
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
@@ -681,7 +747,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
error->capture, jiffies_to_msecs(jiffies - error->capture));
- for (ee = error->engine; ee; ee = ee->next)
+ for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
err_printf(m, "Active process (on ring %s): %s [%d]\n",
ee->engine->name,
ee->context.comm,
@@ -707,90 +773,11 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
CSR_VERSION_MINOR(csr->version));
}
- err_printf(m, "GT awake: %s\n", yesno(error->awake));
err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
- err_printf(m, "EIR: 0x%08x\n", error->eir);
- err_printf(m, "IER: 0x%08x\n", error->ier);
- for (i = 0; i < error->ngtier; i++)
- err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
- err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
- err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
- err_printf(m, "CCID: 0x%08x\n", error->ccid);
-
- for (i = 0; i < error->nfence; i++)
- err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
-
- if (IS_GEN_RANGE(m->i915, 6, 11)) {
- err_printf(m, "ERROR: 0x%08x\n", error->error);
- err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
- }
-
- if (INTEL_GEN(m->i915) >= 8)
- err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
- error->fault_data1, error->fault_data0);
-
- if (IS_GEN(m->i915, 7))
- err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
- if (IS_GEN_RANGE(m->i915, 8, 11))
- err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache);
-
- if (IS_GEN(m->i915, 12))
- err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err);
-
- if (INTEL_GEN(m->i915) >= 12) {
- int i;
-
- for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
- err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
- error->sfc_done[i]);
-
- err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done);
- }
-
- for (ee = error->engine; ee; ee = ee->next)
- error_print_engine(m, ee, error->capture);
-
- for (ee = error->engine; ee; ee = ee->next) {
- const struct drm_i915_error_object *obj;
-
- obj = ee->batchbuffer;
- if (obj) {
- err_puts(m, ee->engine->name);
- if (ee->context.pid)
- err_printf(m, " (submitted by %s [%d])",
- ee->context.comm,
- ee->context.pid);
- err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
- upper_32_bits(obj->gtt_offset),
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, ee->engine, NULL, obj);
- }
- for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, ee->engine, "user", ee->user_bo[j]);
-
- if (ee->num_requests) {
- err_printf(m, "%s --- %d requests\n",
- ee->engine->name,
- ee->num_requests);
- for (j = 0; j < ee->num_requests; j++)
- error_print_request(m, " ",
- &ee->requests[j],
- error->capture);
- }
-
- print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
- print_error_obj(m, ee->engine, "HW Status", ee->hws_page);
- print_error_obj(m, ee->engine, "HW context", ee->ctx);
- print_error_obj(m, ee->engine, "WA context", ee->wa_ctx);
- print_error_obj(m, ee->engine,
- "WA batchbuffer", ee->wa_batchbuffer);
- print_error_obj(m, ee->engine,
- "NULL context", ee->default_state);
- }
+ if (error->gt)
+ err_print_gt(m, error->gt);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -801,10 +788,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info, &error->runtime_info,
&error->driver_caps);
err_print_params(m, &error->params);
- err_print_uc(m, &error->uc);
}
-static int err_print_to_sgl(struct i915_gpu_state *error)
+static int err_print_to_sgl(struct i915_gpu_coredump *error)
{
struct drm_i915_error_state_buf m;
@@ -841,8 +827,8 @@ static int err_print_to_sgl(struct i915_gpu_state *error)
return 0;
}
-ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
- char *buf, loff_t off, size_t rem)
+ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
+ char *buf, loff_t off, size_t rem)
{
struct scatterlist *sg;
size_t count;
@@ -905,85 +891,88 @@ ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
return count;
}
-static void i915_error_object_free(struct drm_i915_error_object *obj)
+static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
{
- int page;
+ while (vma) {
+ struct i915_vma_coredump *next = vma->next;
+ int page;
- if (obj == NULL)
- return;
-
- for (page = 0; page < obj->page_count; page++)
- free_page((unsigned long)obj->pages[page]);
+ for (page = 0; page < vma->page_count; page++)
+ free_page((unsigned long)vma->pages[page]);
- kfree(obj);
+ kfree(vma);
+ vma = next;
+ }
}
-
-static void cleanup_params(struct i915_gpu_state *error)
+static void cleanup_params(struct i915_gpu_coredump *error)
{
i915_params_free(&error->params);
}
-static void cleanup_uc_state(struct i915_gpu_state *error)
+static void cleanup_uc(struct intel_uc_coredump *uc)
{
- struct i915_error_uc *error_uc = &error->uc;
+ kfree(uc->guc_fw.path);
+ kfree(uc->huc_fw.path);
+ i915_vma_coredump_free(uc->guc_log);
- kfree(error_uc->guc_fw.path);
- kfree(error_uc->huc_fw.path);
- i915_error_object_free(error_uc->guc_log);
+ kfree(uc);
}
-void __i915_gpu_state_free(struct kref *error_ref)
+static void cleanup_gt(struct intel_gt_coredump *gt)
{
- struct i915_gpu_state *error =
- container_of(error_ref, typeof(*error), ref);
- long i;
+ while (gt->engine) {
+ struct intel_engine_coredump *ee = gt->engine;
- while (error->engine) {
- struct drm_i915_error_engine *ee = error->engine;
+ gt->engine = ee->next;
- error->engine = ee->next;
+ i915_vma_coredump_free(ee->vma);
+ kfree(ee);
+ }
- for (i = 0; i < ee->user_bo_count; i++)
- i915_error_object_free(ee->user_bo[i]);
- kfree(ee->user_bo);
+ if (gt->uc)
+ cleanup_uc(gt->uc);
- i915_error_object_free(ee->batchbuffer);
- i915_error_object_free(ee->wa_batchbuffer);
- i915_error_object_free(ee->ringbuffer);
- i915_error_object_free(ee->hws_page);
- i915_error_object_free(ee->ctx);
- i915_error_object_free(ee->wa_ctx);
+ kfree(gt);
+}
- kfree(ee->requests);
- kfree(ee);
+void __i915_gpu_coredump_free(struct kref *error_ref)
+{
+ struct i915_gpu_coredump *error =
+ container_of(error_ref, typeof(*error), ref);
+
+ while (error->gt) {
+ struct intel_gt_coredump *gt = error->gt;
+
+ error->gt = gt->next;
+ cleanup_gt(gt);
}
kfree(error->overlay);
kfree(error->display);
cleanup_params(error);
- cleanup_uc_state(error);
err_free_sgl(error->sgl);
kfree(error);
}
-static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *i915,
- struct i915_vma *vma,
- struct compress *compress)
+static struct i915_vma_coredump *
+i915_vma_coredump_create(const struct intel_gt *gt,
+ const struct i915_vma *vma,
+ const char *name,
+ struct i915_vma_compress *compress)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = gt->ggtt;
const u64 slot = ggtt->error_capture.start;
- struct drm_i915_error_object *dst;
+ struct i915_vma_coredump *dst;
unsigned long num_pages;
struct sgt_iter iter;
int ret;
might_sleep();
- if (!vma || !vma->pages)
+ if (!vma || !vma->pages || !compress)
return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
@@ -997,6 +986,9 @@ i915_error_object_create(struct drm_i915_private *i915,
return NULL;
}
+ strcpy(dst->name, name);
+ dst->next = NULL;
+
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
dst->gtt_page_sizes = vma->page_sizes.gtt;
@@ -1004,9 +996,6 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->page_count = 0;
dst->unused = 0;
- compress->wc = i915_gem_object_is_lmem(vma->obj) ||
- drm_mm_node_allocated(&ggtt->error_capture);
-
ret = -EINVAL;
if (drm_mm_node_allocated(&ggtt->error_capture)) {
void __iomem *s;
@@ -1015,9 +1004,12 @@ i915_error_object_create(struct drm_i915_private *i915,
for_each_sgt_daddr(dma, iter, vma->pages) {
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
I915_CACHE_NONE, 0);
+ mb();
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
- ret = compress_page(compress, (void __force *)s, dst);
+ ret = compress_page(compress,
+ (void __force *)s, dst,
+ true);
io_mapping_unmap(s);
if (ret)
break;
@@ -1030,7 +1022,9 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s;
s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
- ret = compress_page(compress, (void __force *)s, dst);
+ ret = compress_page(compress,
+ (void __force *)s, dst,
+ true);
io_mapping_unmap(s);
if (ret)
break;
@@ -1044,8 +1038,8 @@ i915_error_object_create(struct drm_i915_private *i915,
drm_clflush_pages(&page, 1);
s = kmap(page);
- ret = compress_page(compress, s, dst);
- kunmap(s);
+ ret = compress_page(compress, s, dst, false);
+ kunmap(page);
drm_clflush_pages(&page, 1);
@@ -1065,77 +1059,56 @@ i915_error_object_create(struct drm_i915_private *i915,
return dst;
}
-/*
- * Generate a semi-unique error code. The code is not meant to have meaning, The
- * code's only purpose is to try to prevent false duplicated bug reports by
- * grossly estimating a GPU error state.
- *
- * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
- * the hang if we could strip the GTT offset information from it.
- *
- * It's only a small step better than a random number in its current form.
- */
-static u32 i915_error_generate_code(struct i915_gpu_state *error)
+static void gt_record_fences(struct intel_gt_coredump *gt)
{
- const struct drm_i915_error_engine *ee = error->engine;
-
- /*
- * IPEHR would be an ideal way to detect errors, as it's the gross
- * measure of "the command that hung." However, has some very common
- * synchronization commands which almost always appear in the case
- * strictly a client bug. Use instdone to differentiate those some.
- */
- return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
-}
-
-static void gem_record_fences(struct i915_gpu_state *error)
-{
- struct drm_i915_private *dev_priv = error->i915;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
+ struct intel_uncore *uncore = gt->_gt->uncore;
int i;
- if (INTEL_GEN(dev_priv) >= 6) {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ if (INTEL_GEN(uncore->i915) >= 6) {
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_GEN6_LO(i));
- } else if (INTEL_GEN(dev_priv) >= 4) {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ } else if (INTEL_GEN(uncore->i915) >= 4) {
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_965_LO(i));
} else {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read(uncore, FENCE_REG(i));
}
- error->nfence = i;
+ gt->nfence = i;
}
-static void error_record_engine_registers(struct i915_gpu_state *error,
- struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
+static void engine_record_registers(struct intel_engine_coredump *ee)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ const struct intel_engine_cs *engine = ee->engine;
+ struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(i915) >= 6) {
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
- if (INTEL_GEN(dev_priv) >= 12)
- ee->fault_reg = I915_READ(GEN12_RING_FAULT_REG);
- else if (INTEL_GEN(dev_priv) >= 8)
- ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
+ if (INTEL_GEN(i915) >= 12)
+ ee->fault_reg = intel_uncore_read(engine->uncore,
+ GEN12_RING_FAULT_REG);
+ else if (INTEL_GEN(i915) >= 8)
+ ee->fault_reg = intel_uncore_read(engine->uncore,
+ GEN8_RING_FAULT_REG);
else
ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
}
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (INTEL_GEN(i915) >= 4) {
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
ee->instps = ENGINE_READ(engine, RING_INSTPS);
ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
- if (INTEL_GEN(dev_priv) >= 8) {
+ ee->ccid = ENGINE_READ(engine, CCID);
+ if (INTEL_GEN(i915) >= 8) {
ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
@@ -1154,13 +1127,13 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->head = ENGINE_READ(engine, RING_HEAD);
ee->tail = ENGINE_READ(engine, RING_TAIL);
ee->ctl = ENGINE_READ(engine, RING_CTL);
- if (INTEL_GEN(dev_priv) > 2)
+ if (INTEL_GEN(i915) > 2)
ee->mode = ENGINE_READ(engine, RING_MI_MODE);
- if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
+ if (!HWS_NEEDS_PHYSICAL(i915)) {
i915_reg_t mmio;
- if (IS_GEN(dev_priv, 7)) {
+ if (IS_GEN(i915, 7)) {
switch (engine->id) {
default:
MISSING_CASE(engine->id);
@@ -1185,110 +1158,63 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
mmio = RING_HWS_PGA(engine->mmio_base);
}
- ee->hws = I915_READ(mmio);
+ ee->hws = intel_uncore_read(engine->uncore, mmio);
}
- ee->idle = intel_engine_is_idle(engine);
- ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
- engine);
+ ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
- if (HAS_PPGTT(dev_priv)) {
+ if (HAS_PPGTT(i915)) {
int i;
ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_GEN(i915, 6)) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
- } else if (IS_GEN(dev_priv, 7)) {
+ } else if (IS_GEN(i915, 7)) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE);
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (INTEL_GEN(i915) >= 8) {
u32 base = engine->mmio_base;
for (i = 0; i < 4; i++) {
ee->vm_info.pdp[i] =
- I915_READ(GEN8_RING_PDP_UDW(base, i));
+ intel_uncore_read(engine->uncore,
+ GEN8_RING_PDP_UDW(base, i));
ee->vm_info.pdp[i] <<= 32;
ee->vm_info.pdp[i] |=
- I915_READ(GEN8_RING_PDP_LDW(base, i));
+ intel_uncore_read(engine->uncore,
+ GEN8_RING_PDP_LDW(base, i));
}
}
}
}
static void record_request(const struct i915_request *request,
- struct drm_i915_error_request *erq)
+ struct i915_request_coredump *erq)
{
- const struct i915_gem_context *ctx = request->gem_context;
+ const struct i915_gem_context *ctx;
erq->flags = request->fence.flags;
erq->context = request->fence.context;
erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
- erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
erq->tail = request->tail;
+ erq->pid = 0;
rcu_read_lock();
- erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
+ ctx = rcu_dereference(request->context->gem_context);
+ if (ctx)
+ erq->pid = pid_nr(ctx->pid);
rcu_read_unlock();
}
-static void engine_record_requests(struct intel_engine_cs *engine,
- struct i915_request *first,
- struct drm_i915_error_engine *ee)
+static void engine_record_execlists(struct intel_engine_coredump *ee)
{
- struct i915_request *request;
- int count;
-
- count = 0;
- request = first;
- list_for_each_entry_from(request, &engine->active.requests, sched.link)
- count++;
- if (!count)
- return;
-
- ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL);
- if (!ee->requests)
- return;
-
- ee->num_requests = count;
-
- count = 0;
- request = first;
- list_for_each_entry_from(request,
- &engine->active.requests, sched.link) {
- if (count >= ee->num_requests) {
- /*
- * If the ring request list was changed in
- * between the point where the error request
- * list was created and dimensioned and this
- * point then just exit early to avoid crashes.
- *
- * We don't need to communicate that the
- * request list changed state during error
- * state capture and that the error state is
- * slightly incorrect as a consequence since we
- * are typically only interested in the request
- * list state at the point of error state
- * capture, not in any changes happening during
- * the capture.
- */
- break;
- }
-
- record_request(request, &ee->requests[count++]);
- }
- ee->num_requests = count;
-}
-
-static void error_record_engine_execlists(const struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
-{
- const struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request * const *port = execlists->active;
+ const struct intel_engine_execlists * const el = &ee->engine->execlists;
+ struct i915_request * const *port = el->active;
unsigned int n = 0;
while (*port)
@@ -1297,47 +1223,57 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine,
ee->num_ports = n;
}
-static bool record_context(struct drm_i915_error_context *e,
+static bool record_context(struct i915_gem_context_coredump *e,
const struct i915_request *rq)
{
- const struct i915_gem_context *ctx = rq->gem_context;
+ struct i915_gem_context *ctx;
+ struct task_struct *task;
+ bool capture;
- if (ctx->pid) {
- struct task_struct *task;
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (!ctx)
+ return false;
- rcu_read_lock();
- task = pid_task(ctx->pid, PIDTYPE_PID);
- if (task) {
- strcpy(e->comm, task->comm);
- e->pid = task->pid;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ task = pid_task(ctx->pid, PIDTYPE_PID);
+ if (task) {
+ strcpy(e->comm, task->comm);
+ e->pid = task->pid;
}
+ rcu_read_unlock();
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
- return i915_gem_context_no_error_capture(ctx);
+ capture = i915_gem_context_no_error_capture(ctx);
+
+ i915_gem_context_put(ctx);
+ return capture;
}
-struct capture_vma {
- struct capture_vma *next;
- void **slot;
+struct intel_engine_capture_vma {
+ struct intel_engine_capture_vma *next;
+ struct i915_vma *vma;
+ char name[16];
};
-static struct capture_vma *
-capture_vma(struct capture_vma *next,
+static struct intel_engine_capture_vma *
+capture_vma(struct intel_engine_capture_vma *next,
struct i915_vma *vma,
- struct drm_i915_error_object **out)
+ const char *name,
+ gfp_t gfp)
{
- struct capture_vma *c;
+ struct intel_engine_capture_vma *c;
- *out = NULL;
if (!vma)
return next;
- c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL);
+ c = kmalloc(sizeof(*c), gfp);
if (!c)
return next;
@@ -1346,54 +1282,31 @@ capture_vma(struct capture_vma *next,
return next;
}
- c->slot = (void **)out;
- *c->slot = i915_vma_get(vma);
+ strcpy(c->name, name);
+ c->vma = i915_vma_get(vma);
c->next = next;
return c;
}
-static struct capture_vma *
-request_record_user_bo(struct i915_request *request,
- struct drm_i915_error_engine *ee,
- struct capture_vma *capture)
+static struct intel_engine_capture_vma *
+capture_user(struct intel_engine_capture_vma *capture,
+ const struct i915_request *rq,
+ gfp_t gfp)
{
struct i915_capture_list *c;
- struct drm_i915_error_object **bo;
- long count, max;
-
- max = 0;
- for (c = request->capture_list; c; c = c->next)
- max++;
- if (!max)
- return capture;
-
- bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
- if (!bo) {
- /* If we can't capture everything, try to capture something. */
- max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
- bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
- }
- if (!bo)
- return capture;
- count = 0;
- for (c = request->capture_list; c; c = c->next) {
- capture = capture_vma(capture, c->vma, &bo[count]);
- if (++count == max)
- break;
- }
-
- ee->user_bo = bo;
- ee->user_bo_count = count;
+ for (c = rq->capture_list; c; c = c->next)
+ capture = capture_vma(capture, c->vma, "user", gfp);
return capture;
}
-static struct drm_i915_error_object *
-capture_object(struct drm_i915_private *dev_priv,
+static struct i915_vma_coredump *
+capture_object(const struct intel_gt *gt,
struct drm_i915_gem_object *obj,
- struct compress *compress)
+ const char *name,
+ struct i915_vma_compress *compress)
{
if (obj && i915_gem_object_has_pages(obj)) {
struct i915_vma fake = {
@@ -1403,127 +1316,175 @@ capture_object(struct drm_i915_private *dev_priv,
.obj = obj,
};
- return i915_error_object_create(dev_priv, &fake, compress);
+ return i915_vma_coredump_create(gt, &fake, name, compress);
} else {
return NULL;
}
}
-static void
-gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
+static void add_vma(struct intel_engine_coredump *ee,
+ struct i915_vma_coredump *vma)
{
- struct drm_i915_private *i915 = error->i915;
- struct intel_engine_cs *engine;
- struct drm_i915_error_engine *ee;
+ if (vma) {
+ vma->next = ee->vma;
+ ee->vma = vma;
+ }
+}
+
+struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+{
+ struct intel_engine_coredump *ee;
- ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+ ee = kzalloc(sizeof(*ee), gfp);
if (!ee)
- return;
+ return NULL;
- for_each_uabi_engine(engine, i915) {
- struct capture_vma *capture = NULL;
- struct i915_request *request;
- unsigned long flags;
+ ee->engine = engine;
- /* Refill our page pool before entering atomic section */
- pool_refill(&compress->pool, ALLOW_FAIL);
+ engine_record_registers(ee);
+ engine_record_execlists(ee);
- spin_lock_irqsave(&engine->active.lock, flags);
- request = intel_engine_find_active_request(engine);
- if (!request) {
- spin_unlock_irqrestore(&engine->active.lock, flags);
- continue;
- }
+ return ee;
+}
- error->simulated |= record_context(&ee->context, request);
+struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp)
+{
+ struct intel_engine_capture_vma *vma = NULL;
- /*
- * We need to copy these to an anonymous buffer
- * as the simplest method to avoid being overwritten
- * by userspace.
- */
- capture = capture_vma(capture,
- request->batch,
- &ee->batchbuffer);
+ ee->simulated |= record_context(&ee->context, rq);
+ if (ee->simulated)
+ return NULL;
- if (HAS_BROKEN_CS_TLB(i915))
- capture = capture_vma(capture,
- engine->gt->scratch,
- &ee->wa_batchbuffer);
+ /*
+ * We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ vma = capture_vma(vma, rq->batch, "batch", gfp);
+ vma = capture_user(vma, rq, gfp);
+ vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
+ vma = capture_vma(vma, rq->context->state, "HW context", gfp);
+
+ ee->rq_head = rq->head;
+ ee->rq_post = rq->postfix;
+ ee->rq_tail = rq->tail;
+
+ return vma;
+}
- capture = request_record_user_bo(request, ee, capture);
+void
+intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress)
+{
+ const struct intel_engine_cs *engine = ee->engine;
- capture = capture_vma(capture,
- request->hw_context->state,
- &ee->ctx);
+ while (capture) {
+ struct intel_engine_capture_vma *this = capture;
+ struct i915_vma *vma = this->vma;
- capture = capture_vma(capture,
- request->ring->vma,
- &ee->ringbuffer);
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ vma, this->name,
+ compress));
- ee->cpu_ring_head = request->ring->head;
- ee->cpu_ring_tail = request->ring->tail;
+ i915_active_release(&vma->active);
+ i915_vma_put(vma);
- ee->rq_head = request->head;
- ee->rq_post = request->postfix;
- ee->rq_tail = request->tail;
+ capture = this->next;
+ kfree(this);
+ }
- engine_record_requests(engine, request, ee);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ engine->status_page.vma,
+ "HW Status",
+ compress));
- error_record_engine_registers(error, engine, ee);
- error_record_engine_execlists(engine, ee);
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ engine->wa_ctx.vma,
+ "WA context",
+ compress));
- while (capture) {
- struct capture_vma *this = capture;
- struct i915_vma *vma = *this->slot;
+ add_vma(ee,
+ capture_object(engine->gt,
+ engine->default_state,
+ "NULL context",
+ compress));
+}
- *this->slot =
- i915_error_object_create(i915, vma, compress);
+static struct intel_engine_coredump *
+capture_engine(struct intel_engine_cs *engine,
+ struct i915_vma_compress *compress)
+{
+ struct intel_engine_capture_vma *capture = NULL;
+ struct intel_engine_coredump *ee;
+ struct i915_request *rq;
+ unsigned long flags;
- i915_active_release(&vma->active);
- i915_vma_put(vma);
+ ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+ if (!ee)
+ return NULL;
- capture = this->next;
- kfree(this);
- }
+ spin_lock_irqsave(&engine->active.lock, flags);
+ rq = intel_engine_find_active_request(engine);
+ if (rq)
+ capture = intel_engine_coredump_add_request(ee, rq,
+ ATOMIC_MAYFAIL);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ if (!capture) {
+ kfree(ee);
+ return NULL;
+ }
- ee->hws_page =
- i915_error_object_create(i915,
- engine->status_page.vma,
- compress);
+ intel_engine_coredump_add_vma(ee, capture, compress);
- ee->wa_ctx =
- i915_error_object_create(i915,
- engine->wa_ctx.vma,
- compress);
+ return ee;
+}
- ee->default_state =
- capture_object(i915, engine->default_state, compress);
+static void
+gt_record_engines(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- ee->engine = engine;
+ for_each_engine(engine, gt->_gt, id) {
+ struct intel_engine_coredump *ee;
- ee->next = error->engine;
- error->engine = ee;
+ /* Refill our page pool before entering atomic section */
+ pool_refill(&compress->pool, ALLOW_FAIL);
- ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+ ee = capture_engine(engine, compress);
if (!ee)
- return;
- }
+ continue;
- kfree(ee);
+ gt->simulated |= ee->simulated;
+ if (ee->simulated) {
+ kfree(ee);
+ continue;
+ }
+
+ ee->next = gt->engine;
+ gt->engine = ee;
+ }
}
-static void
-capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
+static struct intel_uc_coredump *
+gt_record_uc(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
{
- struct drm_i915_private *i915 = error->i915;
- struct i915_error_uc *error_uc = &error->uc;
- struct intel_uc *uc = &i915->gt.uc;
+ const struct intel_uc *uc = &gt->_gt->uc;
+ struct intel_uc_coredump *error_uc;
- /* Capturing uC state won't be useful if there is no GuC */
- if (!error->device_info.has_gt_uc)
- return;
+ error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
+ if (!error_uc)
+ return NULL;
memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
@@ -1534,19 +1495,42 @@ capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
*/
error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_log = i915_error_object_create(i915,
- uc->guc.log.vma,
- compress);
+ error_uc->guc_log =
+ i915_vma_coredump_create(gt->_gt,
+ uc->guc.log.vma, "GuC log buffer",
+ compress);
+
+ return error_uc;
+}
+
+static void gt_capture_prepare(struct intel_gt_coredump *gt)
+{
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
+
+ mutex_lock(&ggtt->error_mutex);
+}
+
+static void gt_capture_finish(struct intel_gt_coredump *gt)
+{
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
+
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ ggtt->vm.clear_range(&ggtt->vm,
+ ggtt->error_capture.start,
+ PAGE_SIZE);
+
+ mutex_unlock(&ggtt->error_mutex);
}
/* Capture all registers which don't fit into another category. */
-static void capture_reg_state(struct i915_gpu_state *error)
+static void gt_record_regs(struct intel_gt_coredump *gt)
{
- struct drm_i915_private *i915 = error->i915;
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->_gt->uncore;
+ struct drm_i915_private *i915 = uncore->i915;
int i;
- /* General organization
+ /*
+ * General organization
* 1. Registers specific to a single generation
* 2. Registers which belong to multiple generations
* 3. Feature specific registers.
@@ -1556,138 +1540,162 @@ static void capture_reg_state(struct i915_gpu_state *error)
/* 1: Registers specific to a single generation */
if (IS_VALLEYVIEW(i915)) {
- error->gtier[0] = intel_uncore_read(uncore, GTIER);
- error->ier = intel_uncore_read(uncore, VLV_IER);
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ier = intel_uncore_read(uncore, VLV_IER);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
}
if (IS_GEN(i915, 7))
- error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
+ gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
if (INTEL_GEN(i915) >= 12) {
- error->fault_data0 = intel_uncore_read(uncore,
- GEN12_FAULT_TLB_DATA0);
- error->fault_data1 = intel_uncore_read(uncore,
- GEN12_FAULT_TLB_DATA1);
+ gt->fault_data0 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA0);
+ gt->fault_data1 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA1);
} else if (INTEL_GEN(i915) >= 8) {
- error->fault_data0 = intel_uncore_read(uncore,
- GEN8_FAULT_TLB_DATA0);
- error->fault_data1 = intel_uncore_read(uncore,
- GEN8_FAULT_TLB_DATA1);
+ gt->fault_data0 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA0);
+ gt->fault_data1 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA1);
}
if (IS_GEN(i915, 6)) {
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
- error->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
- error->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
+ gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
+ gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
if (INTEL_GEN(i915) >= 7)
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
if (INTEL_GEN(i915) >= 6) {
- error->derrmr = intel_uncore_read(uncore, DERRMR);
+ gt->derrmr = intel_uncore_read(uncore, DERRMR);
if (INTEL_GEN(i915) < 12) {
- error->error = intel_uncore_read(uncore, ERROR_GEN6);
- error->done_reg = intel_uncore_read(uncore, DONE_REG);
+ gt->error = intel_uncore_read(uncore, ERROR_GEN6);
+ gt->done_reg = intel_uncore_read(uncore, DONE_REG);
}
}
- if (INTEL_GEN(i915) >= 5)
- error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE));
-
/* 3: Feature specific registers */
if (IS_GEN_RANGE(i915, 6, 7)) {
- error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
- error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
+ gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
if (IS_GEN_RANGE(i915, 8, 11))
- error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
+ gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
if (IS_GEN(i915, 12))
- error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
+ gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
if (INTEL_GEN(i915) >= 12) {
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
- error->sfc_done[i] =
+ gt->sfc_done[i] =
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
}
- error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
+ gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
}
/* 4: Everything else */
if (INTEL_GEN(i915) >= 11) {
- error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- error->gtier[0] =
+ gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ gt->gtier[0] =
intel_uncore_read(uncore,
GEN11_RENDER_COPY_INTR_ENABLE);
- error->gtier[1] =
+ gt->gtier[1] =
intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
- error->gtier[2] =
+ gt->gtier[2] =
intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
- error->gtier[3] =
+ gt->gtier[3] =
intel_uncore_read(uncore,
GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- error->gtier[4] =
+ gt->gtier[4] =
intel_uncore_read(uncore,
GEN11_CRYPTO_RSVD_INTR_ENABLE);
- error->gtier[5] =
+ gt->gtier[5] =
intel_uncore_read(uncore,
GEN11_GUNIT_CSME_INTR_ENABLE);
- error->ngtier = 6;
+ gt->ngtier = 6;
} else if (INTEL_GEN(i915) >= 8) {
- error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
- error->gtier[i] = intel_uncore_read(uncore,
- GEN8_GT_IER(i));
- error->ngtier = 4;
+ gt->gtier[i] =
+ intel_uncore_read(uncore, GEN8_GT_IER(i));
+ gt->ngtier = 4;
} else if (HAS_PCH_SPLIT(i915)) {
- error->ier = intel_uncore_read(uncore, DEIER);
- error->gtier[0] = intel_uncore_read(uncore, GTIER);
- error->ngtier = 1;
+ gt->ier = intel_uncore_read(uncore, DEIER);
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ngtier = 1;
} else if (IS_GEN(i915, 2)) {
- error->ier = intel_uncore_read16(uncore, GEN2_IER);
+ gt->ier = intel_uncore_read16(uncore, GEN2_IER);
} else if (!IS_VALLEYVIEW(i915)) {
- error->ier = intel_uncore_read(uncore, GEN2_IER);
+ gt->ier = intel_uncore_read(uncore, GEN2_IER);
}
- error->eir = intel_uncore_read(uncore, EIR);
- error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
+ gt->eir = intel_uncore_read(uncore, EIR);
+ gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
+}
+
+/*
+ * Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static u32 generate_ecode(const struct intel_engine_coredump *ee)
+{
+ /*
+ * IPEHR would be an ideal way to detect errors, as it's the gross
+ * measure of "the command that hung." However, has some very common
+ * synchronization commands which almost always appear in the case
+ * strictly a client bug. Use instdone to differentiate those some.
+ */
+ return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
}
-static const char *
-error_msg(struct i915_gpu_state *error,
- intel_engine_mask_t engines, const char *msg)
+static const char *error_msg(struct i915_gpu_coredump *error)
{
+ struct intel_engine_coredump *first = NULL;
+ struct intel_gt_coredump *gt;
+ intel_engine_mask_t engines;
int len;
+ engines = 0;
+ for (gt = error->gt; gt; gt = gt->next) {
+ struct intel_engine_coredump *cs;
+
+ if (gt->engine && !first)
+ first = gt->engine;
+
+ for (cs = gt->engine; cs; cs = cs->next)
+ engines |= cs->engine->mask;
+ }
+
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:%x:0x%08x",
+ "GPU HANG: ecode %d:%x:%08x",
INTEL_GEN(error->i915), engines,
- i915_error_generate_code(error));
- if (error->engine) {
+ generate_ecode(first));
+ if (first) {
/* Just show the first executing process, more is confusing */
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
- error->engine->context.comm,
- error->engine->context.pid);
+ first->context.comm, first->context.pid);
}
- if (msg)
- len += scnprintf(error->error_msg + len,
- sizeof(error->error_msg) - len,
- ", %s", msg);
return error->error_msg;
}
-static void capture_gen_state(struct i915_gpu_state *error)
+static void capture_gen(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915 = error->i915;
- error->awake = i915->gt.awake;
error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
error->suspended = i915->runtime_pm.suspended;
@@ -1698,6 +1706,7 @@ static void capture_gen_state(struct i915_gpu_state *error)
error->reset_count = i915_reset_count(&i915->gpu_error);
error->suspend_count = i915->suspend_count;
+ i915_params_copy(&error->params, &i915_modparams);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
@@ -1707,115 +1716,138 @@ static void capture_gen_state(struct i915_gpu_state *error)
error->driver_caps = i915->caps;
}
-static void capture_params(struct i915_gpu_state *error)
+struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
{
- i915_params_copy(&error->params, &i915_modparams);
+ struct i915_gpu_coredump *error;
+
+ if (!i915_modparams.error_capture)
+ return NULL;
+
+ error = kzalloc(sizeof(*error), gfp);
+ if (!error)
+ return NULL;
+
+ kref_init(&error->ref);
+ error->i915 = i915;
+
+ error->time = ktime_get_real();
+ error->boottime = ktime_get_boottime();
+ error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
+ error->capture = jiffies;
+
+ capture_gen(error);
+
+ return error;
}
-static void capture_finish(struct i915_gpu_state *error)
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
+struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
{
- struct i915_ggtt *ggtt = &error->i915->ggtt;
+ struct intel_gt_coredump *gc;
- if (drm_mm_node_allocated(&ggtt->error_capture)) {
- const u64 slot = ggtt->error_capture.start;
+ gc = kzalloc(sizeof(*gc), gfp);
+ if (!gc)
+ return NULL;
+
+ gc->_gt = gt;
+ gc->awake = intel_gt_pm_is_awake(gt);
+
+ gt_record_regs(gc);
+ gt_record_fences(gc);
+
+ return gc;
+}
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt)
+{
+ struct i915_vma_compress *compress;
+
+ compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
+ if (!compress)
+ return NULL;
+
+ if (!compress_init(compress)) {
+ kfree(compress);
+ return NULL;
}
+
+ gt_capture_prepare(gt);
+
+ return compress;
}
-#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+void i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
+{
+ if (!compress)
+ return;
-struct i915_gpu_state *
-i915_capture_gpu_state(struct drm_i915_private *i915)
+ gt_capture_finish(gt);
+
+ compress_fini(compress);
+ kfree(compress);
+}
+
+struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
- struct compress compress;
+ struct i915_gpu_coredump *error;
/* Check if GPU capture has been disabled */
error = READ_ONCE(i915->gpu_error.first_error);
if (IS_ERR(error))
return error;
- error = kzalloc(sizeof(*error), ALLOW_FAIL);
- if (!error) {
- i915_disable_error_state(i915, -ENOMEM);
+ error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
+ if (!error)
return ERR_PTR(-ENOMEM);
- }
- if (!compress_init(&compress)) {
- kfree(error);
- i915_disable_error_state(i915, -ENOMEM);
- return ERR_PTR(-ENOMEM);
- }
+ error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL);
+ if (error->gt) {
+ struct i915_vma_compress *compress;
- kref_init(&error->ref);
- error->i915 = i915;
+ compress = i915_vma_capture_prepare(error->gt);
+ if (!compress) {
+ kfree(error->gt);
+ kfree(error);
+ return ERR_PTR(-ENOMEM);
+ }
- error->time = ktime_get_real();
- error->boottime = ktime_get_boottime();
- error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
- error->capture = jiffies;
+ gt_record_engines(error->gt, compress);
+
+ if (INTEL_INFO(i915)->has_gt_uc)
+ error->gt->uc = gt_record_uc(error->gt, compress);
- capture_params(error);
- capture_gen_state(error);
- capture_uc_state(error, &compress);
- capture_reg_state(error);
- gem_record_fences(error);
- gem_record_rings(error, &compress);
+ i915_vma_capture_finish(error->gt, compress);
+
+ error->simulated |= error->gt->simulated;
+ }
error->overlay = intel_overlay_capture_error_state(i915);
error->display = intel_display_capture_error_state(i915);
- capture_finish(error);
- compress_fini(&compress);
-
return error;
}
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @i915: i915 device
- * @engine_mask: the mask of engines triggering the hang
- * @msg: a message to insert into the error capture header
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error. Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-void i915_capture_error_state(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- const char *msg)
+void i915_error_state_store(struct i915_gpu_coredump *error)
{
+ struct drm_i915_private *i915;
static bool warned;
- struct i915_gpu_state *error;
- unsigned long flags;
- if (!i915_modparams.error_capture)
+ if (IS_ERR_OR_NULL(error))
return;
- if (READ_ONCE(i915->gpu_error.first_error))
- return;
+ i915 = error->i915;
+ dev_info(i915->drm.dev, "%s\n", error_msg(error));
- error = i915_capture_gpu_state(i915);
- if (IS_ERR(error))
+ if (error->simulated ||
+ cmpxchg(&i915->gpu_error.first_error, NULL, error))
return;
- dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg));
-
- if (!error->simulated) {
- spin_lock_irqsave(&i915->gpu_error.lock, flags);
- if (!i915->gpu_error.first_error) {
- i915->gpu_error.first_error = error;
- error = NULL;
- }
- spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
- }
-
- if (error) {
- __i915_gpu_state_free(&error->ref);
- return;
- }
+ i915_gpu_coredump_get(error);
if (!xchg(&warned, true) &&
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
@@ -1828,15 +1860,38 @@ void i915_capture_error_state(struct drm_i915_private *i915,
}
}
-struct i915_gpu_state *
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @i915: i915 device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_i915_private *i915)
+{
+ struct i915_gpu_coredump *error;
+
+ error = i915_gpu_coredump(i915);
+ if (IS_ERR(error)) {
+ cmpxchg(&i915->gpu_error.first_error, NULL, error);
+ return;
+ }
+
+ i915_error_state_store(error);
+ i915_gpu_coredump_put(error);
+}
+
+struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
if (!IS_ERR_OR_NULL(error))
- i915_gpu_state_get(error);
+ i915_gpu_coredump_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
return error;
@@ -1844,7 +1899,7 @@ i915_first_error_state(struct drm_i915_private *i915)
void i915_reset_error_state(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
@@ -1853,7 +1908,7 @@ void i915_reset_error_state(struct drm_i915_private *i915)
spin_unlock_irq(&i915->gpu_error.lock);
if (!IS_ERR_OR_NULL(error))
- i915_gpu_state_put(error);
+ i915_gpu_coredump_put(error);
}
void i915_disable_error_state(struct drm_i915_private *i915, int err)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 5d2c3372ff99..9109004956bd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -25,43 +25,100 @@
#include "i915_scheduler.h"
struct drm_i915_private;
+struct i915_vma_compress;
+struct intel_engine_capture_vma;
struct intel_overlay_error_state;
struct intel_display_error_state;
-struct i915_gpu_state {
- struct kref ref;
- ktime_t time;
- ktime_t boottime;
- ktime_t uptime;
- unsigned long capture;
+struct i915_vma_coredump {
+ struct i915_vma_coredump *next;
- struct drm_i915_private *i915;
+ char name[20];
+
+ u64 gtt_offset;
+ u64 gtt_size;
+ u32 gtt_page_sizes;
+
+ int num_pages;
+ int page_count;
+ int unused;
+ u32 *pages[0];
+};
+
+struct i915_request_coredump {
+ unsigned long flags;
+ pid_t pid;
+ u32 context;
+ u32 seqno;
+ u32 start;
+ u32 head;
+ u32 tail;
+ struct i915_sched_attr sched_attr;
+};
+
+struct intel_engine_coredump {
+ const struct intel_engine_cs *engine;
- char error_msg[128];
bool simulated;
- bool awake;
- bool wakelock;
- bool suspended;
- int iommu;
u32 reset_count;
- u32 suspend_count;
- struct intel_device_info device_info;
- struct intel_runtime_info runtime_info;
- struct intel_driver_caps driver_caps;
- struct i915_params params;
- struct i915_error_uc {
- struct intel_uc_fw guc_fw;
- struct intel_uc_fw huc_fw;
- struct drm_i915_error_object *guc_log;
- } uc;
+ /* position of active request inside the ring */
+ u32 rq_head, rq_post, rq_tail;
+
+ /* Register state */
+ u32 ccid;
+ u32 start;
+ u32 tail;
+ u32 head;
+ u32 ctl;
+ u32 mode;
+ u32 hws;
+ u32 ipeir;
+ u32 ipehr;
+ u32 bbstate;
+ u32 instpm;
+ u32 instps;
+ u64 bbaddr;
+ u64 acthd;
+ u32 fault_reg;
+ u64 faddr;
+ u32 rc_psmi; /* sleep state */
+ struct intel_instdone instdone;
+
+ struct i915_gem_context_coredump {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ int active;
+ int guilty;
+ struct i915_sched_attr sched_attr;
+ } context;
+
+ struct i915_vma_coredump *vma;
+
+ struct i915_request_coredump execlist[EXECLIST_MAX_PORTS];
+ unsigned int num_ports;
+
+ struct {
+ u32 gfx_mode;
+ union {
+ u64 pdp[4];
+ u32 pp_dir_base;
+ };
+ } vm_info;
+
+ struct intel_engine_coredump *next;
+};
+
+struct intel_gt_coredump {
+ const struct intel_gt *_gt;
+ bool awake;
+ bool simulated;
/* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 gtier[6], ngtier;
- u32 ccid;
u32 derrmr;
u32 forcewake;
u32 error; /* gen6+ */
@@ -80,91 +137,45 @@ struct i915_gpu_state {
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
+
+ struct intel_engine_coredump *engine;
+
+ struct intel_uc_coredump {
+ struct intel_uc_fw guc_fw;
+ struct intel_uc_fw huc_fw;
+ struct i915_vma_coredump *guc_log;
+ } *uc;
+
+ struct intel_gt_coredump *next;
+};
+
+struct i915_gpu_coredump {
+ struct kref ref;
+ ktime_t time;
+ ktime_t boottime;
+ ktime_t uptime;
+ unsigned long capture;
+
+ struct drm_i915_private *i915;
+
+ struct intel_gt_coredump *gt;
+
+ char error_msg[128];
+ bool simulated;
+ bool wakelock;
+ bool suspended;
+ int iommu;
+ u32 reset_count;
+ u32 suspend_count;
+
+ struct intel_device_info device_info;
+ struct intel_runtime_info runtime_info;
+ struct intel_driver_caps driver_caps;
+ struct i915_params params;
+
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
- struct drm_i915_error_engine {
- const struct intel_engine_cs *engine;
-
- /* Software tracked state */
- bool idle;
- int num_requests;
- u32 reset_count;
-
- /* position of active request inside the ring */
- u32 rq_head, rq_post, rq_tail;
-
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head;
- u32 cpu_ring_tail;
-
- /* Register state */
- u32 start;
- u32 tail;
- u32 head;
- u32 ctl;
- u32 mode;
- u32 hws;
- u32 ipeir;
- u32 ipehr;
- u32 bbstate;
- u32 instpm;
- u32 instps;
- u64 bbaddr;
- u64 acthd;
- u32 fault_reg;
- u64 faddr;
- u32 rc_psmi; /* sleep state */
- struct intel_instdone instdone;
-
- struct drm_i915_error_context {
- char comm[TASK_COMM_LEN];
- pid_t pid;
- int active;
- int guilty;
- struct i915_sched_attr sched_attr;
- } context;
-
- struct drm_i915_error_object {
- u64 gtt_offset;
- u64 gtt_size;
- u32 gtt_page_sizes;
- int num_pages;
- int page_count;
- int unused;
- u32 *pages[0];
- } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
- struct drm_i915_error_object **user_bo;
- long user_bo_count;
-
- struct drm_i915_error_object *wa_ctx;
- struct drm_i915_error_object *default_state;
-
- struct drm_i915_error_request {
- unsigned long flags;
- long jiffies;
- pid_t pid;
- u32 context;
- u32 seqno;
- u32 start;
- u32 head;
- u32 tail;
- struct i915_sched_attr sched_attr;
- } *requests, execlist[EXECLIST_MAX_PORTS];
- unsigned int num_ports;
-
- struct {
- u32 gfx_mode;
- union {
- u64 pdp[4];
- u32 pp_dir_base;
- };
- } vm_info;
-
- struct drm_i915_error_engine *next;
- } *engine;
-
struct scatterlist *sgl, *fit;
};
@@ -172,7 +183,7 @@ struct i915_gpu_error {
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
- struct i915_gpu_state *first_error;
+ struct i915_gpu_coredump *first_error;
atomic_t pending_fb_pin;
@@ -200,41 +211,115 @@ struct drm_i915_error_state_buf {
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
- intel_engine_mask_t engine_mask,
- const char *error_msg);
+struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915);
+void i915_capture_error_state(struct drm_i915_private *i915);
+
+struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp);
+
+struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp);
+
+struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp);
+
+struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp);
-static inline struct i915_gpu_state *
-i915_gpu_state_get(struct i915_gpu_state *gpu)
+void intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress);
+
+struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt);
+
+void i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress);
+
+void i915_error_state_store(struct i915_gpu_coredump *error);
+
+static inline struct i915_gpu_coredump *
+i915_gpu_coredump_get(struct i915_gpu_coredump *gpu)
{
kref_get(&gpu->ref);
return gpu;
}
-ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
- char *buf, loff_t offset, size_t count);
+ssize_t
+i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
+ char *buf, loff_t offset, size_t count);
-void __i915_gpu_state_free(struct kref *kref);
-static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+void __i915_gpu_coredump_free(struct kref *kref);
+static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
if (gpu)
- kref_put(&gpu->ref, __i915_gpu_state_free);
+ kref_put(&gpu->ref, __i915_gpu_coredump_free);
}
-struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
#else
-static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- const char *error_msg)
+static inline void i915_capture_error_state(struct drm_i915_private *i915)
+{
+}
+
+static inline struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline void
+intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress)
+{
+}
+
+static inline struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt)
+{
+ return NULL;
+}
+
+static inline void
+i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
+{
+}
+
+static inline void
+i915_error_state_store(struct drm_i915_private *i915,
+ struct i915_gpu_coredump *error)
{
}
-static inline struct i915_gpu_state *
+static inline struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dae00f7dd7df..afc6aad9bf8c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -893,7 +893,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
}
/**
- * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * ivb_parity_work - Workqueue called when a parity error interrupt
* occurred.
* @work: workqueue struct
*
@@ -901,7 +901,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
* this event, userspace should try to remap the bad rows since statistically
* it is likely the same row is more likely to go bad again.
*/
-static void ivybridge_parity_work(struct work_struct *work)
+static void ivb_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
@@ -2031,7 +2031,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
* 4 - Process the interrupt(s) that had bits set in the IIRs.
* 5 - Re-enable Master Interrupt Control.
*/
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
@@ -2453,6 +2453,25 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}
+static void
+gen11_display_irq_handler(struct drm_i915_private *i915)
+{
+ void __iomem * const regs = i915->uncore.regs;
+ const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
+
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ /*
+ * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
+ * for the display related bits.
+ */
+ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
+ gen8_de_irq_handler(i915, disp_ctl);
+ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
+ GEN11_DISPLAY_IRQ_ENABLE);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+}
+
static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,
u32 (*intr_disable)(void __iomem * const regs),
@@ -2476,17 +2495,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
gen11_gt_irq_handler(gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- if (master_ctl & GEN11_DISPLAY_IRQ) {
- const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
-
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
- /*
- * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
- * for the display related bits.
- */
- gen8_de_irq_handler(i915, disp_ctl);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
- }
+ if (master_ctl & GEN11_DISPLAY_IRQ)
+ gen11_display_irq_handler(i915);
gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
@@ -2732,7 +2742,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
/* drm_dma.h hooks
*/
-static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
+static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -2788,15 +2798,11 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
ibx_irq_reset(dev_priv);
}
-static void gen11_irq_reset(struct drm_i915_private *dev_priv)
+static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
- gen11_master_intr_disable(dev_priv->uncore.regs);
-
- gen11_gt_irq_reset(&dev_priv->gt);
-
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
if (INTEL_GEN(dev_priv) >= 12) {
@@ -2825,13 +2831,24 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
- GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
- GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
}
+static void gen11_irq_reset(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ gen11_master_intr_disable(dev_priv->uncore.regs);
+
+ gen11_gt_irq_reset(&dev_priv->gt);
+ gen11_display_irq_reset(dev_priv);
+
+ GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+}
+
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
@@ -2976,6 +2993,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
hotplug_irqs = sde_ddi_mask | sde_tc_mask;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
@@ -3081,6 +3100,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
@@ -3203,7 +3225,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
-static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
@@ -3597,7 +3619,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3702,7 +3724,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3844,10 +3866,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3877,7 +3899,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
intel_hpd_init_work(dev_priv);
- INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
@@ -3958,7 +3980,7 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 8)
return gen8_irq_handler;
else
- return ironlake_irq_handler;
+ return ilk_irq_handler;
}
}
@@ -3981,7 +4003,7 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 8)
gen8_irq_reset(dev_priv);
else
- ironlake_irq_reset(dev_priv);
+ ilk_irq_reset(dev_priv);
}
}
@@ -4004,7 +4026,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 8)
gen8_irq_postinstall(dev_priv);
else
- ironlake_irq_postinstall(dev_priv);
+ ilk_irq_postinstall(dev_priv);
}
}
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 07b04b0acb77..fdd550405fd3 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -27,6 +27,12 @@
#include "i915_memcpy.h"
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define CI_BUG_ON(expr) BUG_ON(expr)
+#else
+#define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
#ifdef CONFIG_AS_MOVNTDQA
@@ -34,7 +40,6 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
{
kernel_fpu_begin();
- len >>= 4;
while (len >= 4) {
asm("movntdqa (%0), %%xmm0\n"
"movntdqa 16(%0), %%xmm1\n"
@@ -59,6 +64,38 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
kernel_fpu_end();
}
+
+static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
+{
+ kernel_fpu_begin();
+
+ while (len >= 4) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movntdqa 16(%0), %%xmm1\n"
+ "movntdqa 32(%0), %%xmm2\n"
+ "movntdqa 48(%0), %%xmm3\n"
+ "movups %%xmm0, (%1)\n"
+ "movups %%xmm1, 16(%1)\n"
+ "movups %%xmm2, 32(%1)\n"
+ "movups %%xmm3, 48(%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 64;
+ dst += 64;
+ len -= 4;
+ }
+ while (len--) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movups %%xmm0, (%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 16;
+ dst += 16;
+ }
+
+ kernel_fpu_end();
+}
+#else
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
+static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
#endif
/**
@@ -83,17 +120,47 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
return false;
-#ifdef CONFIG_AS_MOVNTDQA
if (static_branch_likely(&has_movntdqa)) {
if (likely(len))
- __memcpy_ntdqa(dst, src, len);
+ __memcpy_ntdqa(dst, src, len >> 4);
return true;
}
-#endif
return false;
}
+/**
+ * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
+ * @dst: destination pointer
+ * @src: source pointer
+ * @len: how many bytes to copy
+ *
+ * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
+ * @src to @dst using * non-temporal instructions where available, but
+ * accepts that its arguments may not be aligned, but are valid for the
+ * potential 16-byte read past the end.
+ */
+void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len)
+{
+ unsigned long addr;
+
+ CI_BUG_ON(!i915_has_memcpy_from_wc());
+
+ addr = (unsigned long)src;
+ if (!IS_ALIGNED(addr, 16)) {
+ unsigned long x = min(ALIGN(addr, 16) - addr, len);
+
+ memcpy(dst, src, x);
+
+ len -= x;
+ dst += x;
+ src += x;
+ }
+
+ if (likely(len))
+ __memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16));
+}
+
void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
{
/*
diff --git a/drivers/gpu/drm/i915/i915_memcpy.h b/drivers/gpu/drm/i915/i915_memcpy.h
index 970d84b16987..e36d30edd987 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.h
+++ b/drivers/gpu/drm/i915/i915_memcpy.h
@@ -11,7 +11,9 @@
struct drm_i915_private;
void i915_memcpy_init_early(struct drm_i915_private *i915);
+
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len);
/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
* as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 318562ce64c0..b6376b25ef63 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -33,6 +33,9 @@ struct remap_pfn {
struct mm_struct *mm;
unsigned long pfn;
pgprot_t prot;
+
+ struct sgt_iter sgt;
+ resource_size_t iobase;
};
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
@@ -46,6 +49,35 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
return 0;
}
+#define use_dma(io) ((io) != -1)
+
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+ if (use_dma(r->iobase))
+ return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+ else
+ return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ if (GEM_WARN_ON(!r->sgt.pfn))
+ return -EINVAL;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte,
+ pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+ r->pfn++; /* track insertions in case we need to unwind later */
+
+ r->sgt.curr += PAGE_SIZE;
+ if (r->sgt.curr >= r->sgt.max)
+ r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+ return 0;
+}
+
/**
* remap_io_mapping - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -80,3 +112,40 @@ int remap_io_mapping(struct vm_area_struct *vma,
return 0;
}
+
+/**
+ * remap_io_sg - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @size: size of map area
+ * @sgl: Start sg entry
+ * @iobase: Use stored dma address offset by this address or pfn if -1
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, resource_size_t iobase)
+{
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .prot = vma->vm_page_prot,
+ .sgt = __sgt_iter(sgl, use_dma(iobase)),
+ .iobase = iobase,
+ };
+ int err;
+
+ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+ if (!use_dma(iobase))
+ flush_cache_range(vma, addr, size);
+
+ err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1bb701d32a5d..83f01401b8b5 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -30,6 +30,7 @@
#include "display/intel_fbdev.h"
#include "i915_drv.h"
+#include "i915_perf.h"
#include "i915_globals.h"
#include "i915_selftest.h"
@@ -192,23 +193,23 @@
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i830_info = {
+static const struct intel_device_info i830_info = {
I830_FEATURES,
PLATFORM(INTEL_I830),
};
-static const struct intel_device_info intel_i845g_info = {
+static const struct intel_device_info i845g_info = {
I845_FEATURES,
PLATFORM(INTEL_I845G),
};
-static const struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info i85x_info = {
I830_FEATURES,
PLATFORM(INTEL_I85X),
.display.has_fbc = 1,
};
-static const struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info i865g_info = {
I845_FEATURES,
PLATFORM(INTEL_I865G),
};
@@ -227,7 +228,7 @@ static const struct intel_device_info intel_i865g_info = {
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
.has_coherent_ggtt = false,
@@ -238,7 +239,7 @@ static const struct intel_device_info intel_i915g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info i915gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915GM),
.is_mobile = 1,
@@ -251,7 +252,7 @@ static const struct intel_device_info intel_i915gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info i945g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945G),
.display.has_hotplug = 1,
@@ -262,7 +263,7 @@ static const struct intel_device_info intel_i945g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info i945gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945GM),
.is_mobile = 1,
@@ -276,21 +277,21 @@ static const struct intel_device_info intel_i945gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_g33_info = {
+static const struct intel_device_info g33_info = {
GEN3_FEATURES,
PLATFORM(INTEL_G33),
.display.has_hotplug = 1,
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_g_info = {
+static const struct intel_device_info pnv_g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.display.has_hotplug = 1,
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_m_info = {
+static const struct intel_device_info pnv_m_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
@@ -313,7 +314,7 @@ static const struct intel_device_info intel_pineview_m_info = {
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info i965g_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965G),
.display.has_overlay = 1,
@@ -321,7 +322,7 @@ static const struct intel_device_info intel_i965g_info = {
.has_snoop = false,
};
-static const struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
.is_mobile = 1,
@@ -332,14 +333,14 @@ static const struct intel_device_info intel_i965gm_info = {
.has_snoop = false,
};
-static const struct intel_device_info intel_g45_info = {
+static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
.engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
-static const struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
.is_mobile = 1,
@@ -364,12 +365,12 @@ static const struct intel_device_info intel_gm45_info = {
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info ilk_d_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
};
-static const struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info ilk_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1,
@@ -399,12 +400,12 @@ static const struct intel_device_info intel_ironlake_m_info = {
GEN6_FEATURES, \
PLATFORM(INTEL_SANDYBRIDGE)
-static const struct intel_device_info intel_sandybridge_d_gt1_info = {
+static const struct intel_device_info snb_d_gt1_info = {
SNB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_d_gt2_info = {
+static const struct intel_device_info snb_d_gt2_info = {
SNB_D_PLATFORM,
.gt = 2,
};
@@ -415,12 +416,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info = {
.is_mobile = 1
-static const struct intel_device_info intel_sandybridge_m_gt1_info = {
+static const struct intel_device_info snb_m_gt1_info = {
SNB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_m_gt2_info = {
+static const struct intel_device_info snb_m_gt2_info = {
SNB_M_PLATFORM,
.gt = 2,
};
@@ -436,7 +437,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -449,12 +450,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
PLATFORM(INTEL_IVYBRIDGE), \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_d_gt1_info = {
+static const struct intel_device_info ivb_d_gt1_info = {
IVB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_d_gt2_info = {
+static const struct intel_device_info ivb_d_gt2_info = {
IVB_D_PLATFORM,
.gt = 2,
};
@@ -465,17 +466,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info = {
.is_mobile = 1, \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_m_gt1_info = {
+static const struct intel_device_info ivb_m_gt1_info = {
IVB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_m_gt2_info = {
+static const struct intel_device_info ivb_m_gt2_info = {
IVB_M_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_ivybridge_q_info = {
+static const struct intel_device_info ivb_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
@@ -483,7 +484,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
.has_l3_dpf = 1,
};
-static const struct intel_device_info intel_valleyview_info = {
+static const struct intel_device_info vlv_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
@@ -493,7 +494,7 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_ALIASING,
+ .ppgtt_type = INTEL_PPGTT_FULL,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -522,17 +523,17 @@ static const struct intel_device_info intel_valleyview_info = {
PLATFORM(INTEL_HASWELL), \
.has_l3_dpf = 1
-static const struct intel_device_info intel_haswell_gt1_info = {
+static const struct intel_device_info hsw_gt1_info = {
HSW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_haswell_gt2_info = {
+static const struct intel_device_info hsw_gt2_info = {
HSW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_haswell_gt3_info = {
+static const struct intel_device_info hsw_gt3_info = {
HSW_PLATFORM,
.gt = 3,
};
@@ -550,17 +551,17 @@ static const struct intel_device_info intel_haswell_gt3_info = {
GEN8_FEATURES, \
PLATFORM(INTEL_BROADWELL)
-static const struct intel_device_info intel_broadwell_gt1_info = {
+static const struct intel_device_info bdw_gt1_info = {
BDW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_broadwell_gt2_info = {
+static const struct intel_device_info bdw_gt2_info = {
BDW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_broadwell_rsvd_info = {
+static const struct intel_device_info bdw_rsvd_info = {
BDW_PLATFORM,
.gt = 3,
/* According to the device ID those devices are GT3, they were
@@ -568,14 +569,14 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
*/
};
-static const struct intel_device_info intel_broadwell_gt3_info = {
+static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
.engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
-static const struct intel_device_info intel_cherryview_info = {
+static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
@@ -620,12 +621,12 @@ static const struct intel_device_info intel_cherryview_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_SKYLAKE)
-static const struct intel_device_info intel_skylake_gt1_info = {
+static const struct intel_device_info skl_gt1_info = {
SKL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_skylake_gt2_info = {
+static const struct intel_device_info skl_gt2_info = {
SKL_PLATFORM,
.gt = 2,
};
@@ -636,12 +637,12 @@ static const struct intel_device_info intel_skylake_gt2_info = {
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
-static const struct intel_device_info intel_skylake_gt3_info = {
+static const struct intel_device_info skl_gt3_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 3,
};
-static const struct intel_device_info intel_skylake_gt4_info = {
+static const struct intel_device_info skl_gt4_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 4,
};
@@ -678,13 +679,13 @@ static const struct intel_device_info intel_skylake_gt4_info = {
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_broxton_info = {
+static const struct intel_device_info bxt_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_BROXTON),
.ddb_size = 512,
};
-static const struct intel_device_info intel_geminilake_info = {
+static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
.ddb_size = 1024,
@@ -695,17 +696,17 @@ static const struct intel_device_info intel_geminilake_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_KABYLAKE)
-static const struct intel_device_info intel_kabylake_gt1_info = {
+static const struct intel_device_info kbl_gt1_info = {
KBL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_kabylake_gt2_info = {
+static const struct intel_device_info kbl_gt2_info = {
KBL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_kabylake_gt3_info = {
+static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
.engine_mask =
@@ -716,17 +717,17 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_COFFEELAKE)
-static const struct intel_device_info intel_coffeelake_gt1_info = {
+static const struct intel_device_info cfl_gt1_info = {
CFL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_coffeelake_gt2_info = {
+static const struct intel_device_info cfl_gt2_info = {
CFL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_coffeelake_gt3_info = {
+static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
.engine_mask =
@@ -741,7 +742,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
.has_coherent_ggtt = false, \
GLK_COLORS
-static const struct intel_device_info intel_cannonlake_info = {
+static const struct intel_device_info cnl_info = {
GEN10_FEATURES,
PLATFORM(INTEL_CANNONLAKE),
.gt = 2,
@@ -776,14 +777,14 @@ static const struct intel_device_info intel_cannonlake_info = {
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
-static const struct intel_device_info intel_icelake_11_info = {
+static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
-static const struct intel_device_info intel_elkhartlake_info = {
+static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
.require_force_probe = 1,
@@ -814,7 +815,7 @@ static const struct intel_device_info intel_elkhartlake_info = {
.has_global_mocs = 1, \
.display.has_dsb = 1
-static const struct intel_device_info intel_tigerlake_12_info = {
+static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
@@ -839,68 +840,70 @@ static const struct intel_device_info intel_tigerlake_12_info = {
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
static const struct pci_device_id pciidlist[] = {
- INTEL_I830_IDS(&intel_i830_info),
- INTEL_I845G_IDS(&intel_i845g_info),
- INTEL_I85X_IDS(&intel_i85x_info),
- INTEL_I865G_IDS(&intel_i865g_info),
- INTEL_I915G_IDS(&intel_i915g_info),
- INTEL_I915GM_IDS(&intel_i915gm_info),
- INTEL_I945G_IDS(&intel_i945g_info),
- INTEL_I945GM_IDS(&intel_i945gm_info),
- INTEL_I965G_IDS(&intel_i965g_info),
- INTEL_G33_IDS(&intel_g33_info),
- INTEL_I965GM_IDS(&intel_i965gm_info),
- INTEL_GM45_IDS(&intel_gm45_info),
- INTEL_G45_IDS(&intel_g45_info),
- INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
- INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
- INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
- INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
- INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
- INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info),
- INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info),
- INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info),
- INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
- INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info),
- INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info),
- INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info),
- INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info),
- INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info),
- INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info),
- INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info),
- INTEL_VLV_IDS(&intel_valleyview_info),
- INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info),
- INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info),
- INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
- INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info),
- INTEL_CHV_IDS(&intel_cherryview_info),
- INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info),
- INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info),
- INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
- INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info),
- INTEL_BXT_IDS(&intel_broxton_info),
- INTEL_GLK_IDS(&intel_geminilake_info),
- INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info),
- INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
- INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
- INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
- INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
- INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
- INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CNL_IDS(&intel_cannonlake_info),
- INTEL_ICL_11_IDS(&intel_icelake_11_info),
- INTEL_EHL_IDS(&intel_elkhartlake_info),
- INTEL_TGL_12_IDS(&intel_tigerlake_12_info),
+ INTEL_I830_IDS(&i830_info),
+ INTEL_I845G_IDS(&i845g_info),
+ INTEL_I85X_IDS(&i85x_info),
+ INTEL_I865G_IDS(&i865g_info),
+ INTEL_I915G_IDS(&i915g_info),
+ INTEL_I915GM_IDS(&i915gm_info),
+ INTEL_I945G_IDS(&i945g_info),
+ INTEL_I945GM_IDS(&i945gm_info),
+ INTEL_I965G_IDS(&i965g_info),
+ INTEL_G33_IDS(&g33_info),
+ INTEL_I965GM_IDS(&i965gm_info),
+ INTEL_GM45_IDS(&gm45_info),
+ INTEL_G45_IDS(&g45_info),
+ INTEL_PINEVIEW_G_IDS(&pnv_g_info),
+ INTEL_PINEVIEW_M_IDS(&pnv_m_info),
+ INTEL_IRONLAKE_D_IDS(&ilk_d_info),
+ INTEL_IRONLAKE_M_IDS(&ilk_m_info),
+ INTEL_SNB_D_GT1_IDS(&snb_d_gt1_info),
+ INTEL_SNB_D_GT2_IDS(&snb_d_gt2_info),
+ INTEL_SNB_M_GT1_IDS(&snb_m_gt1_info),
+ INTEL_SNB_M_GT2_IDS(&snb_m_gt2_info),
+ INTEL_IVB_Q_IDS(&ivb_q_info), /* must be first IVB */
+ INTEL_IVB_M_GT1_IDS(&ivb_m_gt1_info),
+ INTEL_IVB_M_GT2_IDS(&ivb_m_gt2_info),
+ INTEL_IVB_D_GT1_IDS(&ivb_d_gt1_info),
+ INTEL_IVB_D_GT2_IDS(&ivb_d_gt2_info),
+ INTEL_HSW_GT1_IDS(&hsw_gt1_info),
+ INTEL_HSW_GT2_IDS(&hsw_gt2_info),
+ INTEL_HSW_GT3_IDS(&hsw_gt3_info),
+ INTEL_VLV_IDS(&vlv_info),
+ INTEL_BDW_GT1_IDS(&bdw_gt1_info),
+ INTEL_BDW_GT2_IDS(&bdw_gt2_info),
+ INTEL_BDW_GT3_IDS(&bdw_gt3_info),
+ INTEL_BDW_RSVD_IDS(&bdw_rsvd_info),
+ INTEL_CHV_IDS(&chv_info),
+ INTEL_SKL_GT1_IDS(&skl_gt1_info),
+ INTEL_SKL_GT2_IDS(&skl_gt2_info),
+ INTEL_SKL_GT3_IDS(&skl_gt3_info),
+ INTEL_SKL_GT4_IDS(&skl_gt4_info),
+ INTEL_BXT_IDS(&bxt_info),
+ INTEL_GLK_IDS(&glk_info),
+ INTEL_KBL_GT1_IDS(&kbl_gt1_info),
+ INTEL_KBL_GT2_IDS(&kbl_gt2_info),
+ INTEL_KBL_GT3_IDS(&kbl_gt3_info),
+ INTEL_KBL_GT4_IDS(&kbl_gt3_info),
+ INTEL_AML_KBL_GT2_IDS(&kbl_gt2_info),
+ INTEL_CFL_S_GT1_IDS(&cfl_gt1_info),
+ INTEL_CFL_S_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_H_GT1_IDS(&cfl_gt1_info),
+ INTEL_CFL_H_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_U_GT3_IDS(&cfl_gt3_info),
+ INTEL_WHL_U_GT1_IDS(&cfl_gt1_info),
+ INTEL_WHL_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info),
+ INTEL_WHL_U_GT3_IDS(&cfl_gt3_info),
+ INTEL_CML_GT1_IDS(&cfl_gt1_info),
+ INTEL_CML_GT2_IDS(&cfl_gt2_info),
+ INTEL_CML_U_GT1_IDS(&cfl_gt1_info),
+ INTEL_CML_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_CNL_IDS(&cnl_info),
+ INTEL_ICL_11_IDS(&icl_info),
+ INTEL_EHL_IDS(&ehl_info),
+ INTEL_TGL_12_IDS(&tgl_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -1003,6 +1006,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err > 0 ? -ENOTTY : err;
}
+ err = i915_perf_selftests(pdev);
+ if (err) {
+ i915_pci_remove(pdev);
+ return err > 0 ? -ENOTTY : err;
+ }
+
return 0;
}
@@ -1045,7 +1054,12 @@ static int __init i915_init(void)
return 0;
}
- return pci_register_driver(&i915_pci_driver);
+ err = pci_register_driver(&i915_pci_driver);
+ if (err)
+ return err;
+
+ i915_perf_sysctl_register();
+ return 0;
}
static void __exit i915_exit(void)
@@ -1053,6 +1067,7 @@ static void __exit i915_exit(void)
if (!i915_pci_driver.driver.owner)
return;
+ i915_perf_sysctl_unregister();
pci_unregister_driver(&i915_pci_driver);
i915_globals_exit();
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2ae14bc14931..0f556d80ba36 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -387,6 +387,8 @@ struct i915_oa_config_bo {
struct i915_vma *vma;
};
+static struct ctl_table_header *sysctl_header;
+
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
void i915_oa_config_release(struct kref *ref)
@@ -1777,6 +1779,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
*cs++ = MI_MATH_ADD;
*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+ *cs++ = MI_ARB_CHECK;
+
/*
* Transfer the result into the predicate register to be used for the
* predicated jump.
@@ -1966,7 +1970,9 @@ static int emit_oa_config(struct i915_perf_stream *stream,
if (err)
goto err_vma_put;
+ intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma_unpin;
@@ -2153,9 +2159,7 @@ static int gen8_modify_context(struct intel_context *ce,
struct i915_request *rq;
int err;
- lockdep_assert_held(&ce->pin_mutex);
-
- rq = i915_request_create(ce->engine->kernel_context);
+ rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2197,17 +2201,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
if (ce->engine->class != RENDER_CLASS)
continue;
- err = intel_context_lock_pinned(ce);
- if (err)
- break;
+ /* Otherwise OA settings will be set upon first use */
+ if (!intel_context_pin_if_active(ce))
+ continue;
flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+ err = gen8_modify_context(ce, flex, count);
- /* Otherwise OA settings will be set upon first use */
- if (intel_context_is_pinned(ce))
- err = gen8_modify_context(ce, flex, count);
-
- intel_context_unlock_pinned(ce);
+ intel_context_unpin(ce);
if (err)
break;
}
@@ -2317,9 +2318,6 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
*/
spin_lock(&i915->gem.contexts.lock);
list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- if (ctx == i915->kernel_context)
- continue;
-
if (!kref_get_unless_zero(&ctx->ref))
continue;
@@ -2455,6 +2453,13 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
return emit_oa_config(stream, oa_config, oa_context(stream));
}
+static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
+{
+ return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
+ (stream->sample_flags & SAMPLE_OA_REPORT) ?
+ 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+}
+
static int gen12_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2468,12 +2473,10 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
_MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
/*
- * If the user didn't require OA reports, instruct the
- * hardware not to emit ctx switch reports.
+ * If the user didn't require OA reports, instruct
+ * the hardware not to emit ctx switch reports.
*/
- !(stream->sample_flags & SAMPLE_OA_REPORT) ?
- _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS) :
- _MASKED_BIT_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS));
+ oag_report_ctx_switches(stream));
intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
(GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
@@ -3101,7 +3104,7 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
stream->ops->enable(stream);
if (stream->hold_preemption)
- i915_gem_context_set_nopreempt(stream->ctx);
+ intel_context_set_nopreempt(stream->pinned_ctx);
}
/**
@@ -3127,7 +3130,7 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
stream->enabled = false;
if (stream->hold_preemption)
- i915_gem_context_clear_nopreempt(stream->ctx);
+ intel_context_clear_nopreempt(stream->pinned_ctx);
if (stream->ops->disable)
stream->ops->disable(stream);
@@ -3981,7 +3984,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_oa_config *args = data;
struct i915_oa_config *oa_config, *tmp;
- static struct i915_oa_reg *regs;
+ struct i915_oa_reg *regs;
int err, id;
if (!perf->i915) {
@@ -4219,7 +4222,7 @@ static struct ctl_table dev_root[] = {
};
/**
- * i915_perf_init - initialize i915-perf state on module load
+ * i915_perf_init - initialize i915-perf state on module bind
* @i915: i915 device instance
*
* Initializes i915-perf state without exposing anything to userspace.
@@ -4336,7 +4339,6 @@ void i915_perf_init(struct drm_i915_private *i915)
oa_sample_rate_hard_limit = 1000 *
(RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
- perf->sysctl_header = register_sysctl_table(dev_root);
mutex_init(&perf->metrics_lock);
idr_init(&perf->metrics_idr);
@@ -4372,6 +4374,16 @@ static int destroy_config(int id, void *p, void *data)
return 0;
}
+void i915_perf_sysctl_register(void)
+{
+ sysctl_header = register_sysctl_table(dev_root);
+}
+
+void i915_perf_sysctl_unregister(void)
+{
+ unregister_sysctl_table(sysctl_header);
+}
+
/**
* i915_perf_fini - Counter part to i915_perf_init()
* @i915: i915 device instance
@@ -4386,8 +4398,6 @@ void i915_perf_fini(struct drm_i915_private *i915)
idr_for_each(&perf->metrics_idr, destroy_config, perf);
idr_destroy(&perf->metrics_idr);
- unregister_sysctl_table(perf->sysctl_header);
-
memset(&perf->ops, 0, sizeof(perf->ops));
perf->i915 = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
index 4ceebce72060..882fdd0a7680 100644
--- a/drivers/gpu/drm/i915/i915_perf.h
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -23,6 +23,8 @@ void i915_perf_fini(struct drm_i915_private *i915);
void i915_perf_register(struct drm_i915_private *i915);
void i915_perf_unregister(struct drm_i915_private *i915);
int i915_perf_ioctl_version(void);
+void i915_perf_sysctl_register(void);
+void i915_perf_sysctl_unregister(void);
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 74ddc20a0d37..45e581455f5d 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -380,7 +380,6 @@ struct i915_perf {
struct drm_i915_private *i915;
struct kobject *metrics_kobj;
- struct ctl_table_header *sysctl_header;
/*
* Lock associated with adding/modifying/removing OA configs
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 6f09aa0be80a..28a82c849bac 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -259,6 +259,16 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
sample->cur += val;
}
+static bool exclusive_mmio_access(const struct drm_i915_private *i915)
+{
+ /*
+ * We have to avoid concurrent mmio cache line access on gen7 or
+ * risk a machine hang. For a fun history lesson dig out the old
+ * userspace intel_gpu_top and run it on Ivybridge or Haswell!
+ */
+ return IS_GEN(i915, 7);
+}
+
static void
engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
@@ -269,8 +279,12 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
for_each_engine(engine, gt, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
+ spinlock_t *mmio_lock;
unsigned long flags;
bool busy;
u32 val;
@@ -278,7 +292,12 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if (!intel_engine_pm_get_if_awake(engine))
continue;
- spin_lock_irqsave(&engine->uncore->lock, flags);
+ mmio_lock = NULL;
+ if (exclusive_mmio_access(i915))
+ mmio_lock = &engine->uncore->lock;
+
+ if (unlikely(mmio_lock))
+ spin_lock_irqsave(mmio_lock, flags);
val = ENGINE_READ_FW(engine, RING_CTL);
if (val == 0) /* powerwell off => engine idle */
@@ -309,7 +328,8 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
skip:
- spin_unlock_irqrestore(&engine->uncore->lock, flags);
+ if (unlikely(mmio_lock))
+ spin_unlock_irqrestore(mmio_lock, flags);
intel_engine_pm_put_async(engine);
}
}
@@ -320,6 +340,13 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
sample->cur += mul_u32_u32(val, mul);
}
+static bool frequency_sampling_enabled(struct i915_pmu *pmu)
+{
+ return pmu->enable &
+ (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+}
+
static void
frequency_sample(struct intel_gt *gt, unsigned int period_ns)
{
@@ -328,19 +355,33 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
+ if (!frequency_sampling_enabled(pmu))
+ return;
+
+ /* Report 0/0 (actual/requested) frequency while parked. */
+ if (!intel_gt_pm_get_if_awake(gt))
+ return;
+
if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
- val = rps->cur_freq;
- if (intel_gt_pm_get_if_awake(gt)) {
- val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
- val = intel_get_cagf(rps, val);
- intel_gt_pm_put_async(gt);
- }
+ /*
+ * We take a quick peek here without using forcewake
+ * so that we don't perturb the system under observation
+ * (forcewake => !rc6 => increased power use). We expect
+ * that if the read fails because it is outside of the
+ * mmio power well, then it will return 0 -- in which
+ * case we assume the system is running at the intended
+ * frequency. Fortunately, the read should rarely fail!
+ */
+ val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1);
+ if (val)
+ val = intel_rps_get_cagf(rps, val);
+ else
+ val = rps->cur_freq;
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
- intel_gpu_freq(rps, val),
- period_ns / 1000);
+ intel_gpu_freq(rps, val), period_ns / 1000);
}
if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
@@ -348,6 +389,8 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
}
+
+ intel_gt_pm_put_async(gt);
}
static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
@@ -1074,12 +1117,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
- if (!is_igp(i915))
+ if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
- "i915-%s",
+ "i915_%s",
dev_name(i915->drm.dev));
- else
+ if (pmu->name) {
+ /* tools/perf reserves colons as special. */
+ strreplace((char *)pmu->name, ':', '_');
+ }
+ } else {
pmu->name = "i915";
+ }
if (!pmu->name)
goto err;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 094011b8f64d..6cc55c103f67 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2244,26 +2244,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6)
-#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5)
-#define MG_DP_MODE_CFG_TRPWR_GATING (1 << 4)
-#define MG_DP_MODE_CFG_CLNPWR_GATING (1 << 3)
-#define MG_DP_MODE_CFG_DIGPWR_GATING (1 << 2)
-#define MG_DP_MODE_CFG_GAONPWR_GATING (1 << 1)
-
-#define MG_MISC_SUS0_PORT1 0x168814
-#define MG_MISC_SUS0_PORT2 0x169814
-#define MG_MISC_SUS0_PORT3 0x16A814
-#define MG_MISC_SUS0_PORT4 0x16B814
-#define MG_MISC_SUS0(tc_port) \
- _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2))
-#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK (3 << 14)
-#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x) ((x) << 14)
-#define MG_MISC_SUS0_CFG_TR2PWR_GATING (1 << 12)
-#define MG_MISC_SUS0_CFG_CL2PWR_GATING (1 << 11)
-#define MG_MISC_SUS0_CFG_GAONPWR_GATING (1 << 10)
-#define MG_MISC_SUS0_CFG_TRPWR_GATING (1 << 7)
-#define MG_MISC_SUS0_CFG_CL1PWR_GATING (1 << 6)
-#define MG_MISC_SUS0_CFG_DGPWR_GATING (1 << 5)
/* The spec defines this only for BXT PHY0, but lets assume that this
* would exist for PHY1 too if it had a second channel.
@@ -5048,14 +5028,20 @@ enum {
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
-#define UTIL_PIN_CTL _MMIO(0x48400)
-#define UTIL_PIN_ENABLE (1 << 31)
-
-#define UTIL_PIN_PIPE(x) ((x) << 29)
-#define UTIL_PIN_PIPE_MASK (3 << 29)
-#define UTIL_PIN_MODE_PWM (1 << 24)
-#define UTIL_PIN_MODE_MASK (0xf << 24)
-#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_CTL _MMIO(0x48400)
+#define UTIL_PIN_ENABLE (1 << 31)
+#define UTIL_PIN_PIPE_MASK (3 << 29)
+#define UTIL_PIN_PIPE(x) ((x) << 29)
+#define UTIL_PIN_MODE_MASK (0xf << 24)
+#define UTIL_PIN_MODE_DATA (0 << 24)
+#define UTIL_PIN_MODE_PWM (1 << 24)
+#define UTIL_PIN_MODE_VBLANK (4 << 24)
+#define UTIL_PIN_MODE_VSYNC (5 << 24)
+#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
+#define UTIL_PIN_OUTPUT_DATA (1 << 23)
+#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
+#define UTIL_PIN_INPUT_DATA (1 << 16)
/* BXT backlight register definition. */
#define _BXT_BLC_PWM_CTL1 0xC8250
@@ -5757,7 +5743,8 @@ enum {
#define PIPECONF_DOUBLE_WIDE (1 << 30)
#define I965_PIPECONF_ACTIVE (1 << 30)
#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
+#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27) /* pre-hsw */
+#define PIPECONF_FRAME_START_DELAY(x) ((x) << 27) /* pre-hsw: 0-3 */
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1 << 25)
@@ -6365,6 +6352,7 @@ enum {
#define DISPPLANE_RGBX101010 (0x8 << 26)
#define DISPPLANE_RGBA101010 (0x9 << 26)
#define DISPPLANE_BGRX101010 (0xa << 26)
+#define DISPPLANE_BGRA101010 (0xb << 26)
#define DISPPLANE_RGBX161616 (0xc << 26)
#define DISPPLANE_RGBX888 (0xe << 26)
#define DISPPLANE_RGBA888 (0xf << 26)
@@ -6639,12 +6627,15 @@ enum {
#define SP_ENABLE (1 << 31)
#define SP_GAMMA_ENABLE (1 << 30)
#define SP_PIXFORMAT_MASK (0xf << 26)
-#define SP_FORMAT_YUV422 (0 << 26)
-#define SP_FORMAT_BGR565 (5 << 26)
-#define SP_FORMAT_BGRX8888 (6 << 26)
-#define SP_FORMAT_BGRA8888 (7 << 26)
-#define SP_FORMAT_RGBX1010102 (8 << 26)
-#define SP_FORMAT_RGBA1010102 (9 << 26)
+#define SP_FORMAT_YUV422 (0x0 << 26)
+#define SP_FORMAT_8BPP (0x2 << 26)
+#define SP_FORMAT_BGR565 (0x5 << 26)
+#define SP_FORMAT_BGRX8888 (0x6 << 26)
+#define SP_FORMAT_BGRA8888 (0x7 << 26)
+#define SP_FORMAT_RGBX1010102 (0x8 << 26)
+#define SP_FORMAT_RGBA1010102 (0x9 << 26)
+#define SP_FORMAT_BGRX1010102 (0xa << 26) /* CHV pipe B */
+#define SP_FORMAT_BGRA1010102 (0xb << 26) /* CHV pipe B */
#define SP_FORMAT_RGBX8888 (0xe << 26)
#define SP_FORMAT_RGBA8888 (0xf << 26)
#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
@@ -6795,6 +6786,7 @@ enum {
#define PLANE_CTL_YUV422_VYUY (3 << 16)
#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
+#define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
#define PLANE_CTL_TILED_LINEAR (0 << 10)
@@ -6802,6 +6794,7 @@ enum {
#define PLANE_CTL_TILED_Y (4 << 10)
#define PLANE_CTL_TILED_YF (5 << 10)
#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8)
+#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
#define PLANE_CTL_ALPHA_DISABLE (0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4)
@@ -7512,11 +7505,15 @@ enum {
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define DSI1_NON_TE (1 << 31)
+#define DSI0_NON_TE (1 << 30)
#define ICL_AUX_CHANNEL_E (1 << 29)
#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
+#define DSI1_TE (1 << 24)
+#define DSI0_TE (1 << 23)
#define BXT_DE_PORT_HP_DDIC (1 << 5)
#define BXT_DE_PORT_HP_DDIB (1 << 4)
#define BXT_DE_PORT_HP_DDIA (1 << 3)
@@ -7731,6 +7728,8 @@ enum {
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
[TRANSCODER_C] = _CHICKEN_TRANS_C, \
[TRANSCODER_D] = _CHICKEN_TRANS_D))
+#define HSW_FRAME_START_DELAY_MASK (3 << 27)
+#define HSW_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7754,6 +7753,14 @@ enum {
#define GEN7_MSG_CTL _MMIO(0x45010)
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
+
+#define BW_BUDDY1_CTL _MMIO(0x45140)
+#define BW_BUDDY2_CTL _MMIO(0x45150)
+#define BW_BUDDY_DISABLE REG_BIT(31)
+
+#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
+#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
+
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
@@ -7911,6 +7918,10 @@ enum {
#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define FF_MODE2 _MMIO(0x6604)
+#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
+#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
+
/* PCH */
#define PCH_DISPLAY_BASE 0xc0000u
@@ -8099,6 +8110,10 @@ enum {
#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
+
+#define SHPD_FILTER_CNT _MMIO(0xc4038)
+#define SHPD_FILTER_CNT_500_ADJ 0x001D9
+
/* Icelake DSC Rate Control Range Parameter Registers */
#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
@@ -8449,10 +8464,8 @@ enum {
#define TRANS_STATE_MASK (1 << 30)
#define TRANS_STATE_DISABLE (0 << 30)
#define TRANS_STATE_ENABLE (1 << 30)
-#define TRANS_FSYNC_DELAY_HB1 (0 << 27)
-#define TRANS_FSYNC_DELAY_HB2 (1 << 27)
-#define TRANS_FSYNC_DELAY_HB3 (2 << 27)
-#define TRANS_FSYNC_DELAY_HB4 (3 << 27)
+#define TRANS_FRAME_START_DELAY_MASK (3 << 27) /* ibx */
+#define TRANS_FRAME_START_DELAY(x) ((x) << 27) /* ibx: 0-3 */
#define TRANS_INTERLACE_MASK (7 << 21)
#define TRANS_PROGRESSIVE (0 << 21)
#define TRANS_INTERLACED (3 << 21)
@@ -8473,6 +8486,7 @@ enum {
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31)
#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29)
#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27)
+#define TRANS_CHICKEN2_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26)
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25)
@@ -9675,6 +9689,7 @@ enum skl_power_gate {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
@@ -10780,6 +10795,57 @@ enum skl_power_gate {
#define ICL_ESC_CLK_DIV_SHIFT 0
#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+#define _DSI_CMD_FRMCTL_0 0x6b034
+#define _DSI_CMD_FRMCTL_1 0x6b834
+#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \
+ _DSI_CMD_FRMCTL_0,\
+ _DSI_CMD_FRMCTL_1)
+#define DSI_FRAME_UPDATE_REQUEST (1 << 31)
+#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29)
+#define DSI_NULL_PACKET_ENABLE (1 << 28)
+#define DSI_FRAME_IN_PROGRESS (1 << 0)
+
+#define _DSI_INTR_MASK_REG_0 0x6b070
+#define _DSI_INTR_MASK_REG_1 0x6b870
+#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_MASK_REG_0,\
+ _DSI_INTR_MASK_REG_1)
+
+#define _DSI_INTR_IDENT_REG_0 0x6b074
+#define _DSI_INTR_IDENT_REG_1 0x6b874
+#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_IDENT_REG_0,\
+ _DSI_INTR_IDENT_REG_1)
+#define DSI_TE_EVENT (1 << 31)
+#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30)
+#define DSI_TX_DATA (1 << 29)
+#define DSI_ULPS_ENTRY_DONE (1 << 28)
+#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27)
+#define DSI_HOST_CHKSUM_ERROR (1 << 26)
+#define DSI_HOST_MULTI_ECC_ERROR (1 << 25)
+#define DSI_HOST_SINGL_ECC_ERROR (1 << 24)
+#define DSI_HOST_CONTENTION_DETECTED (1 << 23)
+#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22)
+#define DSI_HOST_TIMEOUT_ERROR (1 << 21)
+#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20)
+#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19)
+#define DSI_FRAME_UPDATE_DONE (1 << 16)
+#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15)
+#define DSI_INVALID_TX_LENGTH (1 << 13)
+#define DSI_INVALID_VC (1 << 12)
+#define DSI_INVALID_DATA_TYPE (1 << 11)
+#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10)
+#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9)
+#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8)
+#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7)
+#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6)
+#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5)
+#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4)
+#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3)
+#define DSI_EOT_SYNC_ERROR (1 << 2)
+#define DSI_SOT_SYNC_ERROR (1 << 1)
+#define DSI_SOT_ERROR (1 << 0)
+
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -11384,6 +11450,7 @@ enum skl_power_gate {
#define CMD_MODE_TE_GATE (0x1 << 28)
#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
+#define TE_SOURCE_GPIO (1 << 27)
#define LINK_READY (1 << 20)
#define PIX_FMT_MASK (0x3 << 16)
#define PIX_FMT_SHIFT 16
@@ -11636,13 +11703,18 @@ enum skl_power_gate {
/* MOCS (Memory Object Control State) registers */
#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
-#define GEN9_GFX_MOCS(i) _MMIO(0xc800 + (i) * 4) /* Graphics MOCS registers */
-#define GEN9_MFX0_MOCS(i) _MMIO(0xc900 + (i) * 4) /* Media 0 MOCS registers */
-#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */
-#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
-#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
-/* Media decoder 2 MOCS registers */
-#define GEN11_MFX2_MOCS(i) _MMIO(0x10000 + (i) * 4)
+#define __GEN9_RCS0_MOCS0 0xc800
+#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS0_MOCS0 0xc900
+#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS1_MOCS0 0xca00
+#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
+#define __GEN9_VECS0_MOCS0 0xcb00
+#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
+#define __GEN9_BCS0_MOCS0 0xcc00
+#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
+#define __GEN11_VCS2_MOCS0 0x10000
+#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
#define PMFLUSHDONE_LNICRSDROP (1 << 20)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 765bec89fc0d..be185886e4fc 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -57,11 +57,13 @@ static struct i915_global_request {
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return "i915";
+ return dev_name(to_request(fence)->i915->drm.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
+ const struct i915_gem_context *ctx;
+
/*
* The timeline struct (as part of the ppgtt underneath a context)
* may be freed when the request is no longer in use by the GPU.
@@ -74,7 +76,11 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->gem_context->name ?: "[i915]";
+ ctx = i915_request_gem_context(to_request(fence));
+ if (!ctx)
+ return "[" DRIVER_NAME "]";
+
+ return ctx->name;
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -188,7 +194,7 @@ static void free_capture_list(struct i915_request *request)
{
struct i915_capture_list *capture;
- capture = request->capture_list;
+ capture = fetch_and_zero(&request->capture_list);
while (capture) {
struct i915_capture_list *next = capture->next;
@@ -214,7 +220,7 @@ static void remove_from_engine(struct i915_request *rq)
spin_lock(&engine->active.lock);
locked = engine;
}
- list_del(&rq->sched.link);
+ list_del_init(&rq->sched.link);
spin_unlock_irq(&locked->active.lock);
}
@@ -223,10 +229,7 @@ bool i915_request_retire(struct i915_request *rq)
if (!i915_request_completed(rq))
return false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
@@ -272,8 +275,8 @@ bool i915_request_retire(struct i915_request *rq)
remove_from_client(rq);
list_del(&rq->link);
- intel_context_exit(rq->hw_context);
- intel_context_unpin(rq->hw_context);
+ intel_context_exit(rq->context);
+ intel_context_unpin(rq->context);
free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
@@ -287,10 +290,7 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
GEM_BUG_ON(!i915_request_completed(rq));
@@ -351,10 +351,7 @@ bool __i915_request_submit(struct i915_request *request)
struct intel_engine_cs *engine = request->engine;
bool result = false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
@@ -378,7 +375,7 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request))
goto xfer;
- if (i915_gem_context_is_banned(request->gem_context))
+ if (intel_context_is_banned(request->context))
i915_request_skip(request, -EIO);
/*
@@ -417,7 +414,7 @@ xfer: /* We may be recursing from the signal callback of another i915 fence */
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
__notify_execute_cb(request);
@@ -443,10 +440,7 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
@@ -588,6 +582,21 @@ out:
return kmem_cache_alloc(global.slab_requests, gfp);
}
+static void __i915_request_ctor(void *arg)
+{
+ struct i915_request *rq = arg;
+
+ spin_lock_init(&rq->lock);
+ i915_sched_node_init(&rq->sched);
+ i915_sw_fence_init(&rq->submit, submit_notify);
+ i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+
+ rq->file_priv = NULL;
+ rq->capture_list = NULL;
+
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
@@ -645,35 +654,31 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
goto err_free;
rq->i915 = ce->engine->i915;
- rq->hw_context = ce;
- rq->gem_context = ce->gem_context;
+ rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
- rcu_assign_pointer(rq->timeline, tl);
+ RCU_INIT_POINTER(rq->timeline, tl);
+ RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
- rq->hwsp_cacheline = tl->hwsp_cacheline;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
- spin_lock_init(&rq->lock);
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
tl->fence_context, seqno);
/* We bump the ref for the fence chain */
- i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
- i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
+ i915_sw_fence_reinit(&i915_request_get(rq)->submit);
+ i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
- i915_sched_node_init(&rq->sched);
+ i915_sched_node_reinit(&rq->sched);
- /* No zalloc, must clear what we need by hand */
- rq->file_priv = NULL;
+ /* No zalloc, everything must be cleared after use */
rq->batch = NULL;
- rq->capture_list = NULL;
- rq->flags = 0;
-
- INIT_LIST_HEAD(&rq->execute_cb);
+ GEM_BUG_ON(rq->file_priv);
+ GEM_BUG_ON(rq->capture_list);
+ GEM_BUG_ON(!list_empty(&rq->execute_cb));
/*
* Reserve space in the ring buffer for all the commands required to
@@ -755,34 +760,37 @@ err_unlock:
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- struct intel_timeline *tl;
struct dma_fence *fence;
int err;
GEM_BUG_ON(i915_request_timeline(rq) ==
rcu_access_pointer(signal->timeline));
+ fence = NULL;
rcu_read_lock();
- tl = rcu_dereference(signal->timeline);
- if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref))
- tl = NULL;
- rcu_read_unlock();
- if (!tl) /* already started or maybe even completed */
- return 0;
+ spin_lock_irq(&signal->lock);
+ if (!i915_request_started(signal) &&
+ !list_is_first(&signal->link,
+ &rcu_dereference(signal->timeline)->requests)) {
+ struct i915_request *prev = list_prev_entry(signal, link);
- fence = ERR_PTR(-EBUSY);
- if (mutex_trylock(&tl->mutex)) {
- fence = NULL;
- if (!i915_request_started(signal) &&
- !list_is_first(&signal->link, &tl->requests)) {
- signal = list_prev_entry(signal, link);
- fence = dma_fence_get(&signal->fence);
+ /*
+ * Peek at the request before us in the timeline. That
+ * request will only be valid before it is retired, so
+ * after acquiring a reference to it, confirm that it is
+ * still part of the signaler's timeline.
+ */
+ if (i915_request_get_rcu(prev)) {
+ if (list_next_entry(prev, link) == signal)
+ fence = &prev->fence;
+ else
+ i915_request_put(prev);
}
- mutex_unlock(&tl->mutex);
}
- intel_timeline_put(tl);
- if (IS_ERR_OR_NULL(fence))
- return PTR_ERR_OR_ZERO(fence);
+ spin_unlock_irq(&signal->lock);
+ rcu_read_unlock();
+ if (!fence)
+ return 0;
err = 0;
if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
@@ -908,18 +916,16 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return ret;
}
- if (to->engine == from->engine) {
+ if (to->engine == from->engine)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- } else if (intel_engine_has_semaphores(to->engine) &&
- to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+ else if (intel_context_use_semaphores(to->context))
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
- } else {
+ else
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
I915_FENCE_GFP);
- }
if (ret < 0)
return ret;
@@ -959,8 +965,10 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ i915_sw_fence_set_error_once(&rq->submit, fence->error);
continue;
+ }
/*
* Requests on the same timeline are explicitly ordered, along
@@ -1067,8 +1075,10 @@ i915_request_await_execution(struct i915_request *rq,
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ i915_sw_fence_set_error_once(&rq->submit, fence->error);
continue;
+ }
/*
* We don't squash repeated fence dependencies here as we
@@ -1244,8 +1254,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
u32 *cs;
- GEM_TRACE("%s fence %llx:%lld\n",
- engine->name, rq->fence.context, rq->fence.seqno);
+ RQ_TRACE(rq, "\n");
/*
* To ensure that this call will not fail, space for its emissions
@@ -1291,8 +1300,8 @@ void __i915_request_queue(struct i915_request *rq,
void i915_request_add(struct i915_request *rq)
{
- struct i915_sched_attr attr = rq->gem_context->sched;
struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
struct i915_request *prev;
lockdep_assert_held(&tl->mutex);
@@ -1302,6 +1311,9 @@ void i915_request_add(struct i915_request *rq)
prev = __i915_request_commit(rq);
+ if (rcu_access_pointer(rq->context->gem_context))
+ attr = i915_request_gem_context(rq)->sched;
+
/*
* Boost actual workloads past semaphores!
*
@@ -1597,10 +1609,14 @@ static struct i915_global_request global = { {
int __init i915_global_request_init(void)
{
- global.slab_requests = KMEM_CACHE(i915_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
+ global.slab_requests =
+ kmem_cache_create("i915_request",
+ sizeof(struct i915_request),
+ __alignof__(struct i915_request),
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU,
+ __i915_request_ctor);
if (!global.slab_requests)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 96991d64759c..031433691a06 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,8 +28,10 @@
#include <linux/dma-fence.h>
#include <linux/lockdep.h>
+#include "gem/i915_gem_context_types.h"
#include "gt/intel_context_types.h"
#include "gt/intel_engine_types.h"
+#include "gt/intel_timeline_types.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
@@ -41,14 +43,19 @@
struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
-struct intel_timeline;
-struct intel_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
struct i915_vma *vma;
};
+#define RQ_TRACE(rq, fmt, ...) do { \
+ const struct i915_request *rq__ = (rq); \
+ ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
+ rq__->fence.context, rq__->fence.seqno, \
+ hwsp_seqno(rq__), ##__VA_ARGS__); \
+} while (0)
+
enum {
/*
* I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
@@ -70,6 +77,38 @@ enum {
* a request is on the various signal_list.
*/
I915_FENCE_FLAG_SIGNAL,
+
+ /*
+ * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
+ *
+ * The execution of some requests should not be interrupted. This is
+ * a sensitive operation as it makes the request super important,
+ * blocking other higher priority work. Abuse of this flag will
+ * lead to quality of service issues.
+ */
+ I915_FENCE_FLAG_NOPREEMPT,
+
+ /*
+ * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
+ *
+ * A high priority sentinel request may be submitted to clear the
+ * submission queue. As it will be the only request in-flight, upon
+ * execution all other active requests will have been preempted and
+ * unsubmitted. This preemptive pulse is used to re-evaluate the
+ * in-flight requests, particularly in cases where an active context
+ * is banned and those active requests need to be cancelled.
+ */
+ I915_FENCE_FLAG_SENTINEL,
+
+ /*
+ * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
+ *
+ * Some requests are more important than others! In particular, a
+ * request that the user is waiting on is typically required for
+ * interactive latency, for which we want to minimise by upclocking
+ * the GPU. Here we track such boost requests on a per-request basis.
+ */
+ I915_FENCE_FLAG_BOOST,
};
/**
@@ -109,9 +148,8 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the
* context.
*/
- struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
- struct intel_context *hw_context;
+ struct intel_context *context;
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
struct list_head signal_link;
@@ -144,6 +182,10 @@ struct i915_request {
union {
wait_queue_entry_t submitq;
struct i915_sw_dma_fence_cb dmaq;
+ struct i915_request_duration_cb {
+ struct dma_fence_cb cb;
+ ktime_t emitted;
+ } duration;
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
@@ -176,7 +218,7 @@ struct i915_request {
* inside the timeline's HWSP vma, but it is only valid while this
* request has not completed and guarded by the timeline mutex.
*/
- struct intel_timeline_cacheline *hwsp_cacheline;
+ struct intel_timeline_cacheline __rcu *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@@ -215,11 +257,6 @@ struct i915_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- unsigned long flags;
-#define I915_REQUEST_WAITBOOST BIT(0)
-#define I915_REQUEST_NOPREEMPT BIT(1)
-#define I915_REQUEST_SENTINEL BIT(2)
-
/** timeline->request entry for this request */
struct list_head link;
@@ -432,18 +469,18 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
{
- return rq->flags & I915_REQUEST_WAITBOOST;
+ return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
}
static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
{
/* Preemption should only be disabled very rarely */
- return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
+ return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
}
static inline bool i915_request_has_sentinel(const struct i915_request *rq)
{
- return unlikely(rq->flags & I915_REQUEST_SENTINEL);
+ return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
}
static inline struct intel_timeline *
@@ -454,6 +491,13 @@ i915_request_timeline(struct i915_request *rq)
lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
}
+static inline struct i915_gem_context *
+i915_request_gem_context(struct i915_request *rq)
+{
+ /* Valid only while the request is being constructed (or retired). */
+ return rcu_dereference_protected(rq->context->gem_context, true);
+}
+
static inline struct intel_timeline *
i915_request_active_timeline(struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 247a9671bca5..bf87c70bfdd9 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -213,7 +213,7 @@ static void kick_submission(struct intel_engine_cs *engine,
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
- if (inflight->hw_context == rq->hw_context)
+ if (inflight->context == rq->context)
goto unlock;
engine->execlists.queue_priority_hint = prio;
@@ -387,9 +387,19 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
+
+ i915_sched_node_reinit(node);
+}
+
+void i915_sched_node_reinit(struct i915_sched_node *node)
+{
node->attr.priority = I915_PRIORITY_INVALID;
node->semaphores = 0;
node->flags = 0;
+
+ GEM_BUG_ON(!list_empty(&node->signalers_list));
+ GEM_BUG_ON(!list_empty(&node->waiters_list));
+ GEM_BUG_ON(!list_empty(&node->link));
}
static struct i915_dependency *
@@ -480,6 +490,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
+ INIT_LIST_HEAD(&node->signalers_list);
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
@@ -490,6 +501,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
+ INIT_LIST_HEAD(&node->waiters_list);
spin_unlock_irq(&schedule_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 07d243acf553..d1dc4efef77b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -26,6 +26,7 @@
sched.link)
void i915_sched_node_init(struct i915_sched_node *node);
+void i915_sched_node_reinit(struct i915_sched_node *node);
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 4d88205de51b..98bcb6fa0ab4 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -36,6 +36,7 @@ struct i915_selftest {
char *filter;
int mock;
int live;
+ int perf;
};
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -45,6 +46,7 @@ extern struct i915_selftest i915_selftest;
int i915_mock_selftests(void);
int i915_live_selftests(struct pci_dev *pdev);
+int i915_perf_selftests(struct pci_dev *pdev);
/* We extract the function declarations from i915_mock_selftests.h and
* i915_live_selftests.h Add your unit test declarations there!
@@ -61,6 +63,7 @@ int i915_live_selftests(struct pci_dev *pdev);
#undef selftest
#define selftest(name, func) int func(struct drm_i915_private *i915);
#include "selftests/i915_live_selftests.h"
+#include "selftests/i915_perf_selftests.h"
#undef selftest
struct i915_subtest {
@@ -109,6 +112,7 @@ int __i915_subtests(const char *caller,
static inline int i915_mock_selftests(void) { return 0; }
static inline int i915_live_selftests(struct pci_dev *pdev) { return 0; }
+static inline int i915_perf_selftests(struct pci_dev *pdev) { return 0; }
#define I915_SELFTEST_DECLARE(x)
#define I915_SELFTEST_ONLY(x) 0
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6a88db291252..51ba97daf2a0 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -12,6 +12,12 @@
#include "i915_sw_fence.h"
#include "i915_selftest.h"
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
+#else
+#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
static DEFINE_SPINLOCK(i915_sw_fence_lock);
@@ -218,13 +224,21 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
{
BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
+ __init_waitqueue_head(&fence->wait, name, key);
+ fence->flags = (unsigned long)fn;
+
+ i915_sw_fence_reinit(fence);
+}
+
+void i915_sw_fence_reinit(struct i915_sw_fence *fence)
+{
debug_fence_init(fence);
- __init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
fence->error = 0;
- fence->flags = (unsigned long)fn;
+ I915_SW_FENCE_BUG_ON(!fence->flags);
+ I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
}
void i915_sw_fence_commit(struct i915_sw_fence *fence)
@@ -414,8 +428,10 @@ static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
struct i915_sw_fence *fence;
fence = xchg(&cb->base.fence, NULL);
- if (fence)
+ if (fence) {
+ i915_sw_fence_set_error_once(fence, dma->error);
i915_sw_fence_complete(fence);
+ }
irq_work_queue(&cb->work);
}
@@ -443,8 +459,10 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- if (dma_fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma)) {
+ i915_sw_fence_set_error_once(fence, dma->error);
return 0;
+ }
cb = kmalloc(timeout ?
sizeof(struct i915_sw_dma_fence_cb_timer) :
@@ -454,7 +472,12 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
- return dma_fence_wait(dma, false);
+ ret = dma_fence_wait(dma, false);
+ if (ret)
+ return ret;
+
+ i915_sw_fence_set_error_once(fence, dma->error);
+ return 0;
}
cb->fence = fence;
@@ -504,8 +527,10 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
- if (dma_fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma)) {
+ i915_sw_fence_set_error_once(fence, dma->error);
return 0;
+ }
cb->fence = fence;
i915_sw_fence_await(fence);
@@ -539,8 +564,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index ab7d58bd0b9d..19e806ce43bc 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -54,6 +54,8 @@ do { \
__i915_sw_fence_init((fence), (fn), NULL, NULL)
#endif
+void i915_sw_fence_reinit(struct i915_sw_fence *fence);
+
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
void i915_sw_fence_fini(struct i915_sw_fence *fence);
#else
@@ -110,7 +112,8 @@ static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
static inline void
i915_sw_fence_set_error_once(struct i915_sw_fence *fence, int error)
{
- cmpxchg(&fence->error, 0, error);
+ if (unlikely(error))
+ cmpxchg(&fence->error, 0, error);
}
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 8538ee7a521d..997b2998f1f2 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -6,6 +6,13 @@
#include "i915_sw_fence_work.h"
+static void fence_complete(struct dma_fence_work *f)
+{
+ if (f->ops->release)
+ f->ops->release(f);
+ dma_fence_signal(&f->dma);
+}
+
static void fence_work(struct work_struct *work)
{
struct dma_fence_work *f = container_of(work, typeof(*f), work);
@@ -14,7 +21,8 @@ static void fence_work(struct work_struct *work)
err = f->ops->work(f);
if (err)
dma_fence_set_error(&f->dma, err);
- dma_fence_signal(&f->dma);
+
+ fence_complete(f);
dma_fence_put(&f->dma);
}
@@ -32,7 +40,7 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
dma_fence_get(&f->dma);
queue_work(system_unbound_wq, &f->work);
} else {
- dma_fence_signal(&f->dma);
+ fence_complete(f);
}
break;
@@ -60,9 +68,6 @@ static void fence_release(struct dma_fence *fence)
{
struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
- if (f->ops->release)
- f->ops->release(f);
-
i915_sw_fence_fini(&f->chain);
BUILD_BUG_ON(offsetof(typeof(*f), dma));
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 65476909d1bf..0cef3130db05 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -259,33 +259,18 @@ static const struct bin_attribute dpf_attrs_1 = {
static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
- intel_wakeref_t wakeref;
- u32 freq;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
- freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
-
- freq = (freq >> 8) & 0xff;
- } else {
- freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1));
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq));
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ intel_rps_read_actual_frequency(rps));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(rps, rps->cur_freq));
@@ -293,8 +278,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(rps, rps->boost_freq));
@@ -513,15 +498,15 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct i915_gpu_state *gpu;
+ struct i915_gpu_coredump *gpu;
ssize_t ret;
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
ret = PTR_ERR(gpu);
} else if (gpu) {
- ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
- i915_gpu_state_put(gpu);
+ ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_coredump_put(gpu);
} else {
const char *str = "No error state collected\n";
size_t len = strlen(str);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 7ef7a1e1664c..233a97a2c276 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -341,7 +341,7 @@ TRACE_EVENT(intel_disable_plane,
/* pipe updates */
-TRACE_EVENT(i915_pipe_update_start,
+TRACE_EVENT(intel_pipe_update_start,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
@@ -366,7 +366,7 @@ TRACE_EVENT(i915_pipe_update_start,
__entry->scanline, __entry->min, __entry->max)
);
-TRACE_EVENT(i915_pipe_update_vblank_evaded,
+TRACE_EVENT(intel_pipe_update_vblank_evaded,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
@@ -391,7 +391,7 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
__entry->scanline, __entry->min, __entry->max)
);
-TRACE_EVENT(i915_pipe_update_end,
+TRACE_EVENT(intel_pipe_update_end,
TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
TP_ARGS(crtc, frame, scanline_end),
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 0348c6d0ef5f..c47261ae86ea 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -23,7 +23,7 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
struct va_format vaf;
va_list args;
- if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+ if (is_debug && !drm_debug_enabled(DRM_UT_DRIVER))
return;
va_start(args, fmt);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 04139ba1191e..b0ade76bec90 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -69,7 +69,7 @@ bool i915_error_injected(void);
#else
-#define i915_inject_probe_error(_i915, _err) 0
+#define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; })
#define i915_error_injected() false
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 01c822256b39..17d7c525ea5c 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -28,7 +28,9 @@
#include "display/intel_frontbuffer.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_globals.h"
@@ -112,6 +114,7 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
+ kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
@@ -290,6 +293,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma_work {
struct dma_fence_work base;
struct i915_vma *vma;
+ struct drm_i915_gem_object *pinned;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -304,15 +308,21 @@ static int __vma_bind(struct dma_fence_work *work)
if (err)
atomic_or(I915_VMA_ERROR, &vma->flags);
- if (vma->obj)
- __i915_gem_object_unpin_pages(vma->obj);
-
return err;
}
+static void __vma_release(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+
+ if (vw->pinned)
+ __i915_gem_object_unpin_pages(vw->pinned);
+}
+
static const struct dma_fence_work_ops bind_ops = {
.name = "bind",
.work = __vma_bind,
+ .release = __vma_release,
};
struct i915_vma_work *i915_vma_work(void)
@@ -393,8 +403,10 @@ int i915_vma_bind(struct i915_vma *vma,
i915_active_set_exclusive(&vma->active, &work->base.dma);
work->base.dma.error = 0; /* enable the queue_work() */
- if (vma->obj)
+ if (vma->obj) {
__i915_gem_object_pin_pages(vma->obj);
+ work->pinned = vma->obj;
+ }
} else {
GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
@@ -411,8 +423,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr;
int err;
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
err = -ENODEV;
goto err;
@@ -444,6 +454,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
goto err_unpin;
i915_vma_set_ggtt_write(vma);
+
+ /* NB Access through the GTT requires the device to be awake. */
return ptr;
err_unpin:
@@ -846,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma)
int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
struct i915_vma_work *work = NULL;
+ intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
@@ -871,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
}
+ if (flags & PIN_GLOBAL)
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+
/* No more allocations allowed once we hold vm->mutex */
err = mutex_lock_interruptible(&vma->vm->mutex);
if (err)
@@ -934,11 +950,45 @@ err_unlock:
err_fence:
if (work)
dma_fence_work_commit(&work->base);
+ if (wakeref)
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
err_pages:
vma_put_pages(vma);
return err;
}
+static void flush_idle_contexts(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_flush_barriers(engine);
+
+ intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+}
+
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
+{
+ struct i915_address_space *vm = vma->vm;
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ do {
+ err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+ if (err != -ENOSPC)
+ return err;
+
+ /* Unlike i915_vma_pin, we don't take no for an answer! */
+ flush_idle_contexts(vm->gt);
+ if (mutex_lock_interruptible(&vm->mutex) == 0) {
+ i915_gem_evict_vm(vm);
+ mutex_unlock(&vm->mutex);
+ }
+ } while (1);
+}
+
void i915_vma_close(struct i915_vma *vma)
{
struct intel_gt *gt = vma->vm->gt;
@@ -978,8 +1028,10 @@ void i915_vma_reopen(struct i915_vma *vma)
__i915_vma_remove_closed(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_release(struct kref *ref)
{
+ struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+
if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex);
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
@@ -1019,7 +1071,9 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (!i915_vm_tryopen(vm)) {
+ if (i915_vm_tryopen(vm)) {
+ list_del_init(&vma->closed_link);
+ } else {
i915_gem_object_put(obj);
obj = NULL;
}
@@ -1027,7 +1081,7 @@ void i915_vma_parked(struct intel_gt *gt)
spin_unlock_irq(&gt->closed_lock);
if (obj) {
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
i915_gem_object_put(obj);
}
@@ -1054,17 +1108,16 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
- struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ struct drm_vma_offset_node *node;
u64 vma_offset;
- lockdep_assert_held(&vma->vm->mutex);
-
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
@@ -1152,7 +1205,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
- return -EBUSY;
+ return -EAGAIN;
}
GEM_BUG_ON(i915_vma_is_active(vma));
@@ -1192,15 +1245,23 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma);
vma_unbind_pages(vma);
- drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref = 0;
int err;
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ /* XXX not always required: nop_clear_range */
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
err = mutex_lock_interruptible(&vm->mutex);
if (err)
return err;
@@ -1208,6 +1269,9 @@ int i915_vma_unbind(struct i915_vma *vma)
err = __i915_vma_unbind(vma);
mutex_unlock(&vm->mutex);
+ if (wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 465932813bc5..02b31a62951e 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,139 +30,14 @@
#include <drm/drm_mm.h>
+#include "gem/i915_gem_object.h"
+
#include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h"
-#include "gem/i915_gem_object.h"
#include "i915_active.h"
#include "i915_request.h"
-
-enum i915_cache_level;
-
-/**
- * DOC: Virtual Memory Address
- *
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
- const struct i915_vma_ops *ops;
- struct i915_fence_reg *fence;
- struct dma_resv *resv; /** Alias of obj->resv */
- struct sg_table *pages;
- void __iomem *iomap;
- void *private; /* owned by creator */
- u64 size;
- u64 display_alignment;
- struct i915_page_sizes page_sizes;
-
- u32 fence_size;
- u32 fence_alignment;
-
- /**
- * Count of the number of times this vma has been opened by different
- * handles (but same file) for execbuf, i.e. the number of aliases
- * that exist in the ctx->handle_vmas LUT for this vma.
- */
- atomic_t open_count;
- atomic_t flags;
- /**
- * How many users have pinned this object in GTT space.
- *
- * This is a tightly bound, fairly small number of users, so we
- * stuff inside the flags field so that we can both check for overflow
- * and detect a no-op i915_vma_pin() in a single check, while also
- * pinning the vma.
- *
- * The worst case display setup would have the same vma pinned for
- * use on each plane on each crtc, while also building the next atomic
- * state and holding a pin for the length of the cleanup queue. In the
- * future, the flip queue may be increased from 1.
- * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
- *
- * For GEM, the number of concurrent users for pwrite/pread is
- * unbounded. For execbuffer, it is currently one but will in future
- * be extended to allow multiple clients to pin vma concurrently.
- *
- * We also use suballocated pages, with each suballocation claiming
- * its own pin on the shared vma. At present, this is limited to
- * exclusive cachelines of a single page, so a maximum of 64 possible
- * users.
- */
-#define I915_VMA_PIN_MASK 0x3ff
-#define I915_VMA_OVERFLOW 0x200
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND_BIT 10
-#define I915_VMA_LOCAL_BIND_BIT 11
-
-#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
-#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
-
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
-
-#define I915_VMA_ALLOC_BIT 12
-#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
-
-#define I915_VMA_ERROR_BIT 13
-#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
-
-#define I915_VMA_GGTT_BIT 14
-#define I915_VMA_CAN_FENCE_BIT 15
-#define I915_VMA_USERFAULT_BIT 16
-#define I915_VMA_GGTT_WRITE_BIT 17
-
-#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
-#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
-#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
-#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
-
- struct i915_active active;
-
-#define I915_VMA_PAGES_BIAS 24
-#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
- atomic_t pages_count; /* number of active binds to the pages */
- struct mutex pages_mutex; /* protect acquire/release of backing pages */
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
-
- struct list_head obj_link; /* Link in the object's VMA list */
- struct rb_node obj_node;
- struct hlist_node obj_hash;
-
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
-
- /** This vma's place in the eviction list */
- struct list_head evict_link;
-
- struct list_head closed_link;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- unsigned int *exec_flags;
- struct hlist_node exec_node;
- u32 exec_handle;
-};
+#include "i915_vma_types.h"
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
@@ -333,7 +208,20 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
+
+static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
+{
+ if (kref_get_unless_zero(&vma->ref))
+ return vma;
+
+ return NULL;
+}
+
+void i915_vma_release(struct kref *ref);
+static inline void __i915_vma_put(struct i915_vma *vma)
+{
+ kref_put(&vma->ref, i915_vma_release);
+}
#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
@@ -349,6 +237,7 @@ static inline void i915_vma_unlock(struct i915_vma *vma)
int __must_check
i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags);
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
new file mode 100644
index 000000000000..e0942efd5236
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_TYPES_H__
+#define __I915_VMA_TYPES_H__
+
+#include <linux/rbtree.h>
+
+#include <drm/drm_mm.h>
+
+#include "gem/i915_gem_object_types.h"
+
+enum i915_cache_level;
+
+/**
+ * DOC: Global GTT views
+ *
+ * Background and previous state
+ *
+ * Historically objects could exists (be bound) in global GTT space only as
+ * singular instances with a view representing all of the object's backing pages
+ * in a linear fashion. This view will be called a normal view.
+ *
+ * To support multiple views of the same object, where the number of mapped
+ * pages is not equal to the backing store, or where the layout of the pages
+ * is not linear, concept of a GGTT view was added.
+ *
+ * One example of an alternative view is a stereo display driven by a single
+ * image. In this case we would have a framebuffer looking like this
+ * (2x2 pages):
+ *
+ * 12
+ * 34
+ *
+ * Above would represent a normal GGTT view as normally mapped for GPU or CPU
+ * rendering. In contrast, fed to the display engine would be an alternative
+ * view which could look something like this:
+ *
+ * 1212
+ * 3434
+ *
+ * In this example both the size and layout of pages in the alternative view is
+ * different from the normal view.
+ *
+ * Implementation and usage
+ *
+ * GGTT views are implemented using VMAs and are distinguished via enum
+ * i915_ggtt_view_type and struct i915_ggtt_view.
+ *
+ * A new flavour of core GEM functions which work with GGTT bound objects were
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
+ *
+ * As a helper for callers which are only interested in the normal view,
+ * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * GEM API functions, the ones not taking the view parameter, are operating on,
+ * or with the normal GGTT view.
+ *
+ * Code wanting to add or use a new GGTT view needs to:
+ *
+ * 1. Add a new enum with a suitable name.
+ * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 3. Add support to i915_get_vma_pages().
+ *
+ * New views are required to build a scatter-gather table from within the
+ * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * exists for the lifetime of an VMA.
+ *
+ * Core API is designed to have copy semantics which means that passed in
+ * struct i915_ggtt_view does not need to be persistent (left around after
+ * calling the core API functions).
+ *
+ */
+
+struct intel_remapped_plane_info {
+ /* in gtt pages */
+ unsigned int width, height, stride, offset;
+} __packed;
+
+struct intel_remapped_info {
+ struct intel_remapped_plane_info plane[2];
+ unsigned int unused_mbz;
+} __packed;
+
+struct intel_rotation_info {
+ struct intel_remapped_plane_info plane[2];
+} __packed;
+
+struct intel_partial_info {
+ u64 offset;
+ unsigned int size;
+} __packed;
+
+enum i915_ggtt_view_type {
+ I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+};
+
+static inline void assert_i915_gem_gtt_types(void)
+{
+ BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
+
+ /* Check that rotation/remapped shares offsets for simplicity */
+ BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
+ offsetof(struct intel_rotation_info, plane[0]));
+ BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
+ offsetofend(struct intel_rotation_info, plane[1]));
+
+ /* As we encode the size of each branch inside the union into its type,
+ * we have to be careful that each branch has a unique size.
+ */
+ switch ((enum i915_ggtt_view_type)0) {
+ case I915_GGTT_VIEW_NORMAL:
+ case I915_GGTT_VIEW_PARTIAL:
+ case I915_GGTT_VIEW_ROTATED:
+ case I915_GGTT_VIEW_REMAPPED:
+ /* gcc complains if these are identical cases */
+ break;
+ }
+}
+
+struct i915_ggtt_view {
+ enum i915_ggtt_view_type type;
+ union {
+ /* Members need to contain no holes/padding */
+ struct intel_partial_info partial;
+ struct intel_rotation_info rotated;
+ struct intel_remapped_info remapped;
+ };
+};
+
+/**
+ * DOC: Virtual Memory Address
+ *
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+
+ struct i915_address_space *vm;
+ const struct i915_vma_ops *ops;
+
+ struct drm_i915_gem_object *obj;
+ struct dma_resv *resv; /** Alias of obj->resv */
+
+ struct sg_table *pages;
+ void __iomem *iomap;
+ void *private; /* owned by creator */
+
+ struct i915_fence_reg *fence;
+
+ u64 size;
+ u64 display_alignment;
+ struct i915_page_sizes page_sizes;
+
+ /* mmap-offset associated with fencing for this vma */
+ struct i915_mmap_offset *mmo;
+
+ u32 fence_size;
+ u32 fence_alignment;
+
+ /**
+ * Count of the number of times this vma has been opened by different
+ * handles (but same file) for execbuf, i.e. the number of aliases
+ * that exist in the ctx->handle_vmas LUT for this vma.
+ */
+ struct kref ref;
+ atomic_t open_count;
+ atomic_t flags;
+ /**
+ * How many users have pinned this object in GTT space.
+ *
+ * This is a tightly bound, fairly small number of users, so we
+ * stuff inside the flags field so that we can both check for overflow
+ * and detect a no-op i915_vma_pin() in a single check, while also
+ * pinning the vma.
+ *
+ * The worst case display setup would have the same vma pinned for
+ * use on each plane on each crtc, while also building the next atomic
+ * state and holding a pin for the length of the cleanup queue. In the
+ * future, the flip queue may be increased from 1.
+ * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
+ *
+ * For GEM, the number of concurrent users for pwrite/pread is
+ * unbounded. For execbuffer, it is currently one but will in future
+ * be extended to allow multiple clients to pin vma concurrently.
+ *
+ * We also use suballocated pages, with each suballocation claiming
+ * its own pin on the shared vma. At present, this is limited to
+ * exclusive cachelines of a single page, so a maximum of 64 possible
+ * users.
+ */
+#define I915_VMA_PIN_MASK 0x3ff
+#define I915_VMA_OVERFLOW 0x200
+
+ /** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND_BIT 10
+#define I915_VMA_LOCAL_BIND_BIT 11
+
+#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
+#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
+
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
+
+#define I915_VMA_ALLOC_BIT 12
+#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
+
+#define I915_VMA_ERROR_BIT 13
+#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
+
+#define I915_VMA_GGTT_BIT 14
+#define I915_VMA_CAN_FENCE_BIT 15
+#define I915_VMA_USERFAULT_BIT 16
+#define I915_VMA_GGTT_WRITE_BIT 17
+
+#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
+#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
+#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
+#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
+
+ struct i915_active active;
+
+#define I915_VMA_PAGES_BIAS 24
+#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
+ atomic_t pages_count; /* number of active binds to the pages */
+ struct mutex pages_mutex; /* protect acquire/release of backing pages */
+
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head vm_link;
+
+ struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
+ struct hlist_node obj_hash;
+
+ /** This vma's place in the execbuf reservation list */
+ struct list_head exec_link;
+ struct list_head reloc_link;
+
+ /** This vma's place in the eviction list */
+ struct list_head evict_link;
+
+ struct list_head closed_link;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ unsigned int *exec_flags;
+ struct hlist_node exec_node;
+ u32 exec_handle;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index a5b571364cf6..6670a0763be2 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -73,9 +73,30 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_dump_flags(const struct intel_device_info *info,
- struct drm_printer *p)
+static const char *iommu_name(void)
{
+ const char *msg = "n/a";
+
+#ifdef CONFIG_INTEL_IOMMU
+ msg = enableddisabled(intel_iommu_gfx_mapped);
+#endif
+
+ return msg;
+}
+
+void intel_device_info_print_static(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ drm_printf(p, "engines: %x\n", info->engine_mask);
+ drm_printf(p, "gen: %d\n", info->gen);
+ drm_printf(p, "gt: %d\n", info->gt);
+ drm_printf(p, "iommu: %s\n", iommu_name());
+ drm_printf(p, "memory-regions: %x\n", info->memory_regions);
+ drm_printf(p, "page-sizes: %x\n", info->page_sizes);
+ drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
+ drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
+ drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -106,8 +127,8 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}
-void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p)
+void intel_device_info_print_runtime(const struct intel_runtime_info *info,
+ struct drm_printer *p)
{
sseu_dump(&info->sseu, p);
@@ -148,8 +169,8 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
}
}
-void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p)
+void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
{
int s, ss;
@@ -498,7 +519,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
}
}
-static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
@@ -579,7 +600,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = 0;
}
-static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
@@ -808,6 +829,8 @@ static const u16 subplatform_ult_ids[] = {
INTEL_WHL_U_GT1_IDS(0),
INTEL_WHL_U_GT2_IDS(0),
INTEL_WHL_U_GT3_IDS(0),
+ INTEL_CML_U_GT1_IDS(0),
+ INTEL_CML_U_GT2_IDS(0),
};
static const u16 subplatform_ulx_ids[] = {
@@ -998,11 +1021,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
/* Initialize slice/subslice/EU info */
if (IS_HASWELL(dev_priv))
- haswell_sseu_info_init(dev_priv);
+ hsw_sseu_info_init(dev_priv);
else if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
- broadwell_sseu_info_init(dev_priv);
+ bdw_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 9))
gen9_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 10))
@@ -1070,7 +1093,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
* hooked up to an SFC (Scaler & Format Converter) unit.
* In TGL each VDBOX has access to an SFC.
*/
- if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
+ if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 4bdf8a6cfb47..2725cb7fc169 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -230,12 +230,13 @@ const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_dump_flags(const struct intel_device_info *info,
- struct drm_printer *p);
-void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
+
+void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p);
-void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
+void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p);
+void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p);
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index baaeaecc64af..d0d038b3cd79 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -16,6 +16,20 @@ const u32 intel_region_map[] = {
[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
};
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+ enum intel_memory_type mem_type)
+{
+ struct intel_memory_region *mr;
+ int id;
+
+ for_each_memory_region(mr, i915, id)
+ if (mr->type == mem_type)
+ return mr;
+
+ return NULL;
+}
+
static u64
intel_memory_region_free_pages(struct intel_memory_region *mem,
struct list_head *blocks)
@@ -37,7 +51,7 @@ __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks)
{
mutex_lock(&mem->mm_lock);
- intel_memory_region_free_pages(mem, blocks);
+ mem->avail += intel_memory_region_free_pages(mem, blocks);
mutex_unlock(&mem->mm_lock);
}
@@ -73,6 +87,9 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
}
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return -E2BIG;
+
n_pages = size >> ilog2(mem->mm.chunk_size);
mutex_lock(&mem->mm_lock);
@@ -103,6 +120,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
break;
} while (1);
+ mem->avail -= size;
mutex_unlock(&mem->mm_lock);
return 0;
@@ -161,6 +179,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->io_start = io_start;
mem->min_page_size = min_page_size;
mem->ops = ops;
+ mem->total = size;
+ mem->avail = mem->total;
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
@@ -182,6 +202,16 @@ err_free:
return ERR_PTR(err);
}
+void intel_memory_region_set_name(struct intel_memory_region *mem,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
+ va_end(ap);
+}
+
static void __intel_memory_region_destroy(struct kref *kref)
{
struct intel_memory_region *mem =
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 238722009677..232490d89a83 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -47,6 +47,10 @@ enum intel_region_id {
#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
#define I915_ALLOC_CONTIGUOUS BIT(1)
+#define for_each_memory_region(mr, i915, id) \
+ for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
+ for_each_if((mr) = (i915)->mm.regions[id])
+
/**
* Memory regions encoded as type | instance
*/
@@ -82,10 +86,13 @@ struct intel_memory_region {
resource_size_t io_start;
resource_size_t min_page_size;
+ resource_size_t total;
+ resource_size_t avail;
unsigned int type;
unsigned int instance;
unsigned int id;
+ char name[8];
dma_addr_t remap_addr;
@@ -125,5 +132,12 @@ void intel_memory_region_put(struct intel_memory_region *mem);
int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
void intel_memory_regions_driver_release(struct drm_i915_private *i915);
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+ enum intel_memory_type mem_type);
+
+__printf(2, 3) void
+intel_memory_region_set_name(struct intel_memory_region *mem,
+ const char *fmt, ...);
#endif
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 8fd92b9130a7..4ed60e1f01db 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -12,89 +12,91 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
{
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN(dev_priv, 5));
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
/* KBP is SPT compatible */
return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found Cannon Lake LP PCH (CNP-LP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ice Lake PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
+ case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
WARN_ON(!IS_TIGERLAKE(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Jasper Lake PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_JSP;
default:
@@ -144,9 +146,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
- DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
+ drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
else
- DRM_DEBUG_KMS("Assuming no PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
return id;
}
@@ -200,13 +202,14 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
* display.
*/
if (pch && !HAS_DISPLAY(dev_priv)) {
- DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
}
if (!pch)
- DRM_DEBUG_KMS("No PCH found.\n");
+ drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
pci_dev_put(pch);
}
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index d26c25dd8d54..3053d1ce398b 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -47,6 +47,7 @@ enum intel_pch {
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 86379eddc908..bd2d30ecc030 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -140,7 +140,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
}
-static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
+static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
@@ -178,7 +178,7 @@ static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
-static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
+static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
{
u16 ddrpll, csipll;
@@ -199,8 +199,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->mem_freq = 1600;
break;
default:
- DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
+ drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
dev_priv->mem_freq = 0;
break;
}
@@ -228,8 +228,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq = 6400;
break;
default:
- DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
+ drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
dev_priv->fsb_freq = 0;
break;
}
@@ -314,7 +314,8 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for Punit DDR DVFS request\n");
vlv_punit_put(dev_priv);
}
@@ -383,9 +384,9 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
- DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
- enableddisabled(enable),
- enableddisabled(was_enabled));
+ drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ enableddisabled(enable),
+ enableddisabled(was_enabled));
return was_enabled;
}
@@ -463,7 +464,7 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
@@ -510,8 +511,8 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
@@ -527,8 +528,8 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
@@ -542,41 +543,45 @@ static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
/* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
+static const struct intel_watermark_params pnv_display_wm = {
.fifo_size = PINEVIEW_DISPLAY_FIFO,
.max_wm = PINEVIEW_MAX_WM,
.default_wm = PINEVIEW_DFT_WM,
.guard_size = PINEVIEW_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
+
+static const struct intel_watermark_params pnv_display_hplloff_wm = {
.fifo_size = PINEVIEW_DISPLAY_FIFO,
.max_wm = PINEVIEW_MAX_WM,
.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
.guard_size = PINEVIEW_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_cursor_wm = {
+
+static const struct intel_watermark_params pnv_cursor_wm = {
.fifo_size = PINEVIEW_CURSOR_FIFO,
.max_wm = PINEVIEW_CURSOR_MAX_WM,
.default_wm = PINEVIEW_CURSOR_DFT_WM,
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+
+static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
.fifo_size = PINEVIEW_CURSOR_FIFO,
.max_wm = PINEVIEW_CURSOR_MAX_WM,
.default_wm = PINEVIEW_CURSOR_DFT_WM,
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i965_cursor_wm_info = {
.fifo_size = I965_CURSOR_FIFO,
.max_wm = I965_CURSOR_MAX_WM,
@@ -584,6 +589,7 @@ static const struct intel_watermark_params i965_cursor_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i945_wm_info = {
.fifo_size = I945_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -591,6 +597,7 @@ static const struct intel_watermark_params i945_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i915_wm_info = {
.fifo_size = I915_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -598,6 +605,7 @@ static const struct intel_watermark_params i915_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i830_a_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -605,6 +613,7 @@ static const struct intel_watermark_params i830_a_wm_info = {
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i830_bc_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM/2,
@@ -612,6 +621,7 @@ static const struct intel_watermark_params i830_bc_wm_info = {
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i845_wm_info = {
.fifo_size = I830_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -794,10 +804,10 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
/* FIXME check the 'enable' instead */
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return false;
/*
@@ -809,9 +819,28 @@ static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
* around this problem with the watermark code.
*/
if (plane->id == PLANE_CURSOR)
- return plane_state->base.fb != NULL;
+ return plane_state->hw.fb != NULL;
else
- return plane_state->base.visible;
+ return plane_state->uapi.visible;
+}
+
+static bool intel_crtc_active(struct intel_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->primary->state->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
+ */
+ return crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->hw.adjusted_mode.crtc_clock;
}
static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
@@ -829,7 +858,7 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pineview_update_wm(struct intel_crtc *unused_crtc)
+static void pnv_update_wm(struct intel_crtc *unused_crtc)
{
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
@@ -842,7 +871,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
dev_priv->fsb_freq,
dev_priv->mem_freq);
if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown FSB/MEM found, disable CxSR\n");
intel_set_memory_cxsr(dev_priv, false);
return;
}
@@ -850,25 +880,25 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
crtc = single_enabled_crtc(dev_priv);
if (crtc) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp = fb->format->cpp[0];
int clock = adjusted_mode->crtc_clock;
/* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm,
- pineview_display_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_display_wm,
+ pnv_display_wm.fifo_size,
cpp, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm,
- pineview_display_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_cursor_wm,
+ pnv_display_wm.fifo_size,
4, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -876,8 +906,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
@@ -885,14 +915,14 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
intel_set_memory_cxsr(dev_priv, true);
} else {
@@ -1083,10 +1113,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
unsigned int clock, htotal, cpp, width, wm;
@@ -1096,7 +1126,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
/*
* Not 100% sure which way ELK should go here as the
@@ -1116,7 +1146,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- width = drm_rect_width(&plane_state->base.dst);
+ width = drm_rect_width(&plane_state->uapi.dst);
if (plane->id == PLANE_CURSOR) {
wm = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1143,7 +1173,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool dirty = false;
for (; level < intel_wm_num_levels(dev_priv); level++) {
@@ -1159,7 +1189,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
@@ -1182,7 +1212,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
enum plane_id plane_id = plane->id;
bool dirty = false;
@@ -1235,16 +1266,18 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
- plane->base.name,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
+ plane->base.name,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "FBC watermarks: SR=%d, HPLL=%d\n",
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
}
return dirty;
@@ -1261,7 +1294,7 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (level > dev_priv->wm.max_level)
return false;
@@ -1299,9 +1332,9 @@ static void g4x_invalidate_wms(struct intel_crtc *crtc,
static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
int num_active_planes = hweight8(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
@@ -1316,8 +1349,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for_each_oldnew_intel_plane_in_state(state, plane,
old_plane_state,
new_plane_state, i) {
- if (new_plane_state->base.crtc != &crtc->base &&
- old_plane_state->base.crtc != &crtc->base)
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
continue;
if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
@@ -1388,17 +1421,17 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(intel_state, crtc);
const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
enum plane_id plane_id;
- if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
*intermediate = *optimal;
intermediate->cxsr = false;
@@ -1528,10 +1561,11 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
}
static void g4x_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
@@ -1540,10 +1574,11 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state,
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
@@ -1589,10 +1624,10 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
unsigned int clock, htotal, cpp, width, wm;
if (dev_priv->wm.pri_latency[level] == 0)
@@ -1601,7 +1636,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
width = crtc_state->pipe_src_w;
@@ -1630,7 +1665,7 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1742,7 +1777,7 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int num_levels = intel_wm_num_levels(dev_priv);
bool dirty = false;
@@ -1759,7 +1794,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
int level;
@@ -1787,11 +1823,12 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
- plane->base.name,
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
+ plane->base.name,
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
return dirty;
}
@@ -1817,16 +1854,16 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int num_active_planes = hweight8(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
- bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
+ bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
@@ -1837,8 +1874,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for_each_oldnew_intel_plane_in_state(state, plane,
old_plane_state,
new_plane_state, i) {
- if (new_plane_state->base.crtc != &crtc->base &&
- old_plane_state->base.crtc != &crtc->base)
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
continue;
if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
@@ -1923,11 +1960,12 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
@@ -2021,17 +2059,17 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(intel_state, crtc);
const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
int level;
- if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
*intermediate = *optimal;
intermediate->cxsr = false;
@@ -2147,10 +2185,11 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
}
static void vlv_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
@@ -2159,10 +2198,11 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state,
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
@@ -2187,7 +2227,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
@@ -2203,8 +2243,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
- entries, srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
entries = intel_wm_method2(clock, htotal,
crtc->base.cursor->state->crtc_w, 4,
@@ -2217,8 +2258,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
cxsr_enabled = true;
} else {
@@ -2227,8 +2269,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, false);
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
/* 965 has limitations... */
I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
@@ -2268,7 +2311,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
@@ -2295,7 +2338,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
@@ -2318,7 +2361,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
planeb_wm = wm_info->max_wm;
}
- DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ drm_dbg_kms(&dev_priv->drm,
+ "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
if (IS_I915GM(dev_priv) && enabled) {
struct drm_i915_gem_object *obj;
@@ -2343,7 +2387,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *adjusted_mode =
- &enabled->config->base.adjusted_mode;
+ &enabled->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
enabled->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
@@ -2360,7 +2404,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
@@ -2372,8 +2417,9 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
@@ -2401,7 +2447,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
if (crtc == NULL)
return;
- adjusted_mode = &crtc->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->hw.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
@@ -2409,7 +2455,8 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d\n", planea_wm);
I915_WRITE(FW_BLC, fwater_lo);
}
@@ -2483,7 +2530,7 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
@@ -2491,8 +2538,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
return method1;
method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&plane_state->base.dst),
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
cpp, mem_value);
return min(method1, method2);
@@ -2515,12 +2562,12 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&plane_state->base.dst),
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
cpp, mem_value);
return min(method1, method2);
}
@@ -2541,11 +2588,11 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
return ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&plane_state->base.dst),
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
cpp, mem_value);
}
@@ -2559,9 +2606,10 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
- return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp);
+ return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
+ cpp);
}
static unsigned int
@@ -2766,12 +2814,12 @@ static u32
hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
u32 linetime, ips_linetime;
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
@@ -2807,7 +2855,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
&val, NULL);
if (ret) {
- DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ drm_err(&dev_priv->drm,
+ "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -2825,7 +2874,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
- DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ drm_err(&dev_priv->drm,
+ "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -2943,8 +2993,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
unsigned int latency = wm[level];
if (latency == 0) {
- DRM_DEBUG_KMS("%s WM%d latency not provided\n",
- name, level);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency not provided\n",
+ name, level);
continue;
}
@@ -2957,9 +3008,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
else if (level > 0)
latency *= 5;
- DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
- name, level, wm[level],
- latency / 10, latency % 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency %u (%u.%u usec)\n", name, level,
+ wm[level], latency / 10, latency % 10);
}
}
@@ -2993,7 +3044,8 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
if (!changed)
return;
- DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "WM latency values increased to avoid potential underruns\n");
intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -3021,7 +3073,8 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
dev_priv->wm.spr_latency[3] = 0;
dev_priv->wm.cur_latency[3] = 0;
- DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LP3 watermarks disabled due to potential for lost interrupts\n");
intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -3071,7 +3124,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
- DRM_DEBUG_KMS("LP0 watermark invalid\n");
+ drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
return false;
}
@@ -3081,11 +3134,9 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
/* Compute new watermarks for the pipe */
static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->base.state;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_pipe_wm *pipe_wm;
- struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
const struct intel_plane_state *pristate = NULL;
@@ -3105,12 +3156,12 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
curstate = plane_state;
}
- pipe_wm->pipe_enabled = crtc_state->base.active;
+ pipe_wm->pipe_enabled = crtc_state->hw.active;
if (sprstate) {
- pipe_wm->sprites_enabled = sprstate->base.visible;
- pipe_wm->sprites_scaled = sprstate->base.visible &&
- (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
- drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
+ pipe_wm->sprites_enabled = sprstate->uapi.visible;
+ pipe_wm->sprites_scaled = sprstate->uapi.visible &&
+ (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
+ drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
}
usable_level = max_level;
@@ -3162,11 +3213,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
*/
static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(newstate->base.state);
+ to_intel_atomic_state(newstate->uapi.state);
const struct intel_crtc_state *oldstate =
intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
@@ -3178,7 +3229,7 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
* and after the vblank.
*/
*a = newstate->wm.ilk.optimal;
- if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+ if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
intel_state->skip_intermediate_wm)
return 0;
@@ -3588,10 +3639,8 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
dev_priv->wm.hw = *results;
}
-bool ilk_disable_lp_wm(struct drm_device *dev)
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
@@ -3652,7 +3701,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
return;
}
- DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n");
+ drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
} else if (IS_GEN(dev_priv, 11)) {
dev_priv->sagv_block_time_us = 10;
return;
@@ -3692,7 +3741,7 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_ENABLED)
return 0;
- DRM_DEBUG_KMS("Enabling SAGV\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
@@ -3703,11 +3752,11 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
* don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to enable SAGV\n");
+ drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
return ret;
}
@@ -3726,7 +3775,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_DISABLED)
return 0;
- DRM_DEBUG_KMS("Disabling SAGV\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
@@ -3737,11 +3786,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
* don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
return ret;
}
@@ -3780,7 +3829,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
- if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
@@ -3830,7 +3879,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
- adjusted_mode = &crtc_state->base.adjusted_mode;
+ adjusted_mode = &crtc_state->hw.adjusted_mode;
total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
@@ -3859,16 +3908,16 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
- struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = crtc_state->base.crtc;
+ struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
const struct intel_crtc *crtc;
u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
u16 ddb_size;
u32 i;
- if (WARN_ON(!state) || !crtc_state->base.active) {
+ if (WARN_ON(!state) || !crtc_state->hw.active) {
alloc->start = 0;
alloc->end = 0;
*num_active = hweight8(dev_priv->active_pipes);
@@ -3907,11 +3956,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
*/
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
int hdisplay, vdisplay;
- if (!crtc_state->base.enable)
+ if (!crtc_state->hw.enable)
continue;
drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
@@ -3942,7 +3991,7 @@ static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
@@ -4082,10 +4131,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
*
* n.b., src is 16.16 fixed point, dst is whole integer.
*/
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
- dst_h = drm_rect_height(&plane_state->base.dst);
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
fp_w_ratio = div_fixed16(src_w, dst_w);
fp_h_ratio = div_fixed16(src_h, dst_h);
@@ -4100,21 +4149,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
u32 data_rate;
u32 width = 0, height = 0;
uint_fixed_16_16_t down_scale_amount;
u64 rate;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
if (plane->id == PLANE_CURSOR)
return 0;
if (color_plane == 1 &&
- !drm_format_info_is_yuv_semiplanar(fb->format))
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
return 0;
/*
@@ -4122,8 +4171,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&plane_state->base.src) >> 16;
- height = drm_rect_height(&plane_state->base.src) >> 16;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ height = drm_rect_height(&plane_state->uapi.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
if (color_plane == 1) {
@@ -4146,7 +4195,7 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
@@ -4181,7 +4230,7 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!crtc_state->base.state))
+ if (WARN_ON(!crtc_state->uapi.state))
return 0;
/* Calculate and cache data rate for each plane */
@@ -4225,8 +4274,8 @@ static int
skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
@@ -4248,7 +4297,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
if (WARN_ON(!state))
return 0;
- if (!crtc_state->base.active) {
+ if (!crtc_state->hw.active) {
alloc->start = alloc->end = 0;
return 0;
}
@@ -4310,9 +4359,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
}
if (level < 0) {
- DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
- DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
- alloc_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Requested display configuration exceeds system DDB limitations");
+ drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
+ blocks, alloc_size);
return -EINVAL;
}
@@ -4490,7 +4540,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return u32_to_fixed16(0);
pixel_rate = crtc_state->pixel_rate;
@@ -4498,7 +4548,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
if (WARN_ON(pixel_rate == 0))
return u32_to_fixed16(0);
- crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal;
+ crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
return linetime_us;
@@ -4533,13 +4583,15 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
- if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) {
- DRM_DEBUG_KMS("Non planar format have single plane\n");
+ if (color_plane == 1 &&
+ !intel_format_info_is_yuv_semiplanar(format, modifier)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Non planar format have single plane\n");
return -EINVAL;
}
@@ -4550,7 +4602,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = drm_format_info_is_yuv_semiplanar(format);
+ wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
if (color_plane == 1 && wp->is_planar)
@@ -4622,7 +4674,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct skl_wm_params *wp, int color_plane)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int width;
/*
@@ -4630,11 +4682,11 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&plane_state->base.src) >> 16;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
return skl_compute_wm_params(crtc_state, width,
fb->format, fb->modifier,
- plane_state->base.rotation,
+ plane_state->hw.rotation,
skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
wp, color_plane);
}
@@ -4654,7 +4706,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@@ -4680,14 +4732,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
- crtc_state->base.adjusted_mode.crtc_htotal,
+ crtc_state->hw.adjusted_mode.crtc_htotal,
latency,
wp->plane_blocks_per_line);
if (wp->y_tiled) {
selected_result = max_fixed16(method2, wp->y_tile_minimum);
} else {
- if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal /
+ if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
@@ -4778,7 +4830,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
@@ -4795,7 +4847,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
static u32
skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
u32 linetime_wm;
@@ -4814,7 +4866,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
u16 trans_min, trans_y_tile_min;
const u16 trans_amount = 10; /* This is configurable amount */
@@ -4912,8 +4964,8 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
int ret;
@@ -4938,7 +4990,7 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+ enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
int ret;
/* Watermarks calculated in master */
@@ -4946,7 +4998,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
if (plane_state->planar_linked_plane) {
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
@@ -4974,7 +5026,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
@@ -5151,8 +5203,8 @@ static int
skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane;
@@ -5218,7 +5270,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
- if ((drm_debug & DRM_UT_KMS) == 0)
+ if (!drm_debug_enabled(DRM_UT_KMS))
return;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -5238,10 +5290,11 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
- plane->base.base.id, plane->base.name,
- old->start, old->end, new->start, new->end,
- skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
@@ -5254,70 +5307,74 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
- enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
- enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
- enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
- enast(old_wm->trans_wm.plane_en),
- enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
- enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
- enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
- enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
- enast(new_wm->trans_wm.plane_en));
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
+ enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
+ enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
+ enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
+ enast(old_wm->trans_wm.plane_en),
+ enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
+ enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
+ enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
+ enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
+ enast(new_wm->trans_wm.plane_en));
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
-
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
- old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
- old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
- old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
- old_wm->trans_wm.plane_res_b,
- new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
- new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
- new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
- new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
- new_wm->trans_wm.plane_res_b);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc);
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
+ old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
+ old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
+ old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
+ old_wm->trans_wm.plane_res_b,
+ new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
+ new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
+ new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
+ new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
+ new_wm->trans_wm.plane_res_b);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc);
}
}
}
@@ -5356,7 +5413,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
if (ret)
return ret;
- state->active_pipe_changes = ~0;
+ state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
/*
* We usually only initialize state->active_pipes if we
@@ -5382,7 +5439,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- state->wm_results.dirty_pipes = ~0;
+ state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
ret = intel_add_all_pipes(state);
if (ret)
@@ -5436,7 +5493,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
* power well the hardware state will go out of sync
* with the software state.
*/
- if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
skl_plane_wm_equals(dev_priv,
&old_crtc_state->wm.skl.optimal.planes[plane_id],
&new_crtc_state->wm.skl.optimal.planes[plane_id]))
@@ -5500,11 +5557,12 @@ skl_compute_wm(struct intel_atomic_state *state)
}
static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
enum pipe pipe = crtc->pipe;
if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
@@ -5514,10 +5572,11 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
}
static void skl_initial_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct skl_ddb_values *results = &state->wm_results;
if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
@@ -5525,8 +5584,8 @@ static void skl_initial_wm(struct intel_atomic_state *state,
mutex_lock(&dev_priv->wm.wm_mutex);
- if (crtc_state->base.active_changed)
- skl_atomic_update_crtc_wm(state, crtc_state);
+ if (crtc_state->uapi.active_changed)
+ skl_atomic_update_crtc_wm(state, crtc);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5582,10 +5641,11 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
@@ -5594,10 +5654,11 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state,
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
@@ -5905,19 +5966,22 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
- wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
- yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
+ wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
+ drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
}
void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -5938,7 +6002,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
enum plane_id plane_id = plane->id;
int level;
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
continue;
for (level = 0; level < 3; level++) {
@@ -6009,8 +6073,9 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
- "assuming DDR DVFS is disabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Punit not acking DDR DVFS request, "
+ "assuming DDR DVFS is disabled\n");
dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
@@ -6061,16 +6126,18 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0],
- wm->pipe[pipe].plane[PLANE_SPRITE1]);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0],
+ wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -6093,7 +6160,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
enum plane_id plane_id = plane->id;
int level;
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
continue;
for (level = 0; level < wm_state->num_levels; level++) {
@@ -6369,7 +6436,6 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
I915_WRITE(TRANS_CHICKEN2(pipe), val);
@@ -6387,8 +6453,9 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
- DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
- tmp);
+ drm_dbg_kms(&dev_priv->drm,
+ "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+ tmp);
}
static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6583,6 +6650,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
+ /* Wa_1408615072:tgl */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ 0, VSUNIT_CLKGATE_DIS_TGL);
+
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
if (HAS_ENGINE(dev_priv, _VCS(i)))
@@ -7099,7 +7170,8 @@ void intel_suspend_hw(struct drm_i915_private *dev_priv)
static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No clock gating settings or workarounds applied.\n");
}
/**
@@ -7166,9 +7238,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
{
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
- i915_pineview_get_mem_freq(dev_priv);
+ pnv_get_mem_freq(dev_priv);
else if (IS_GEN(dev_priv, 5))
- i915_ironlake_get_mem_freq(dev_priv);
+ ilk_get_mem_freq(dev_priv);
if (intel_has_sagv(dev_priv))
skl_setup_sagv_block_time(dev_priv);
@@ -7194,8 +7266,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.optimize_watermarks =
ilk_optimize_watermarks;
} else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to read display plane latency. "
+ "Disable CxSR\n");
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
@@ -7215,7 +7288,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
- DRM_INFO("failed to find known CxSR latency "
+ drm_info(&dev_priv->drm,
+ "failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
(dev_priv->is_ddr3 == 1) ? "3" : "2",
@@ -7224,7 +7298,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL;
} else
- dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.update_wm = pnv_update_wm;
} else if (IS_GEN(dev_priv, 4)) {
dev_priv->display.update_wm = i965_update_wm;
} else if (IS_GEN(dev_priv, 3)) {
@@ -7239,7 +7313,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
} else {
- DRM_ERROR("unexpected fall-through in intel_init_pm\n");
+ drm_err(&dev_priv->drm,
+ "unexpected fall-through in %s\n", __func__);
}
}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index b579c724b915..c06c6a846d9a 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -54,7 +54,7 @@ void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
-bool ilk_disable_lp_wm(struct drm_device *dev);
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index 583118095635..14b59b899c9b 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -51,8 +51,10 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
static void release_fake_lmem_bar(struct intel_memory_region *mem)
{
- if (drm_mm_node_allocated(&mem->fake_mappable))
- drm_mm_remove_node(&mem->fake_mappable);
+ if (!drm_mm_node_allocated(&mem->fake_mappable))
+ return;
+
+ drm_mm_remove_node(&mem->fake_mappable);
dma_unmap_resource(&mem->i915->drm.pdev->dev,
mem->remap_addr,
@@ -88,6 +90,8 @@ region_lmem_init(struct intel_memory_region *mem)
if (ret)
io_mapping_fini(&mem->iomap);
+ intel_memory_region_set_name(mem, "local");
+
return ret;
}
@@ -121,10 +125,12 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
io_start,
&intel_region_lmem_ops);
if (!IS_ERR(mem)) {
- DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
- DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
- (u64)mem->io_start);
- DRM_INFO("Intel graphics fake LMEM size: %llx\n",
+ drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
+ &mem->region);
+ drm_info(&i915->drm,
+ "Intel graphics fake LMEM IO start: %llx\n",
+ (u64)mem->io_start);
+ drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
(u64)resource_size(&mem->region));
}
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index e06b35b844a0..cbfb7171d62d 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -105,8 +105,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
if (intel_wait_for_register(uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
- DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
- is_read ? "read" : "write");
+ drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
+ is_read ? "read" : "write");
return -EAGAIN;
}
@@ -129,8 +129,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
*val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
err = 0;
} else {
- DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
- is_read ? "read" : "write");
+ drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
+ is_read ? "read" : "write");
err = -ETIMEDOUT;
}
@@ -283,7 +283,8 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
if (intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
- DRM_ERROR("timeout waiting for SBI to become ready\n");
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to become ready\n");
return -EBUSY;
}
@@ -301,12 +302,13 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
if (__intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100, 100, &cmd)) {
- DRM_ERROR("timeout waiting for SBI to complete read\n");
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to complete read\n");
return -ETIMEDOUT;
}
if (cmd & SBI_RESPONSE_FAIL) {
- DRM_ERROR("error during SBI read of reg %x\n", reg);
+ drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
return -ENXIO;
}
@@ -426,8 +428,9 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
mutex_unlock(&i915->sb_lock);
if (err) {
- DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
- mbox, __builtin_return_address(0), err);
+ drm_dbg(&i915->drm,
+ "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+ mbox, __builtin_return_address(0), err);
}
return err;
@@ -447,8 +450,9 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
mutex_unlock(&i915->sb_lock);
if (err) {
- DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
- val, mbox, __builtin_return_address(0), err);
+ drm_dbg(&i915->drm,
+ "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+ val, mbox, __builtin_return_address(0), err);
}
return err;
@@ -519,7 +523,8 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
- DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ drm_dbg_kms(&i915->drm,
+ "PCODE timeout, retrying with preemption disabled\n");
WARN_ON_ONCE(timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 94a97bf8c021..5f2cf6f43b8b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -359,7 +359,8 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
if (wait_for_atomic((n = fifo_free_entries(uncore)) >
GT_FIFO_NUM_RESERVED_ENTRIES,
GT_FIFO_TIMEOUT_MS)) {
- DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
+ drm_dbg(&uncore->i915->drm,
+ "GT_FIFO timeout, entries: %u\n", n);
return;
}
}
@@ -432,7 +433,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
break;
if (--retry_count == 0) {
- DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
+ drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
break;
}
@@ -490,7 +491,7 @@ gen6_check_for_fifo_debug(struct intel_uncore *uncore)
fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
if (unlikely(fifodbg)) {
- DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
+ drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
}
@@ -562,7 +563,7 @@ void intel_uncore_resume_early(struct intel_uncore *uncore)
unsigned int restore_forcewake;
if (intel_uncore_unclaimed_mmio(uncore))
- DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
+ drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
if (!intel_uncore_has_forcewake(uncore))
return;
@@ -1595,8 +1596,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
spin_unlock_irq(&uncore->lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
- DRM_INFO("when using vblank-synced partial screen updates.\n");
+ drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
+ drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
@@ -1683,8 +1684,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
mmio_size = 2 * 1024 * 1024;
uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
if (uncore->regs == NULL) {
- DRM_ERROR("failed to map registers\n");
-
+ drm_err(&i915->drm, "failed to map registers\n");
return -EIO;
}
@@ -1807,7 +1807,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
/* clear out unclaimed reg detection bit */
if (intel_uncore_unclaimed_mmio(uncore))
- DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
+ drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
return 0;
@@ -2072,9 +2072,10 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
if (unlikely(check_for_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) {
- DRM_DEBUG("Unclaimed register detected, "
- "enabling oneshot unclaimed register reporting. "
- "Please use i915.mmio_debug=N for more information.\n");
+ drm_dbg(&uncore->i915->drm,
+ "Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++;
}
uncore->debug->unclaimed_mmio_check--;
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 59aa1b6f1827..8fbf6f4d3f26 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -95,16 +95,17 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
void __intel_wakeref_init(struct intel_wakeref *wf,
struct intel_runtime_pm *rpm,
const struct intel_wakeref_ops *ops,
- struct lock_class_key *key)
+ struct intel_wakeref_lockclass *key)
{
wf->rpm = rpm;
wf->ops = ops;
- __mutex_init(&wf->mutex, "wakeref", key);
+ __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
atomic_set(&wf->count, 0);
wf->wakeref = 0;
INIT_WORK(&wf->work, __intel_wakeref_put_work);
+ lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index da6e8fd506e6..7d1e676b71ef 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -44,12 +44,17 @@ struct intel_wakeref {
struct work_struct work;
};
+struct intel_wakeref_lockclass {
+ struct lock_class_key mutex;
+ struct lock_class_key work;
+};
+
void __intel_wakeref_init(struct intel_wakeref *wf,
struct intel_runtime_pm *rpm,
const struct intel_wakeref_ops *ops,
- struct lock_class_key *key);
+ struct intel_wakeref_lockclass *key);
#define intel_wakeref_init(wf, rpm, ops) do { \
- static struct lock_class_key __key; \
+ static struct intel_wakeref_lockclass __key; \
\
__intel_wakeref_init((wf), (rpm), (ops), &__key); \
} while (0)
@@ -59,9 +64,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
/**
* intel_wakeref_get: Acquire the wakeref
- * @i915: the drm_i915_private device
* @wf: the wakeref
- * @fn: callback for acquired the wakeref, called only on first acquire.
*
* Acquire a hold on the wakeref. The first user to do so, will acquire
* the runtime pm wakeref and then call the @fn underneath the wakeref
@@ -76,6 +79,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
static inline int
intel_wakeref_get(struct intel_wakeref *wf)
{
+ might_sleep();
if (unlikely(!atomic_inc_not_zero(&wf->count)))
return __intel_wakeref_get_first(wf);
@@ -83,6 +87,22 @@ intel_wakeref_get(struct intel_wakeref *wf)
}
/**
+ * __intel_wakeref_get: Acquire the wakeref, again
+ * @wf: the wakeref
+ *
+ * Increment the wakeref counter, only valid if it is already held by
+ * the caller.
+ *
+ * See intel_wakeref_get().
+ */
+static inline void
+__intel_wakeref_get(struct intel_wakeref *wf)
+{
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+ atomic_inc(&wf->count);
+}
+
+/**
* intel_wakeref_get_if_in_use: Acquire the wakeref
* @wf: the wakeref
*
diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile
deleted file mode 100644
index df028e2b0d64..000000000000
--- a/drivers/gpu/drm/i915/oa/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 260b0ee5d1e3..ef572a0c2566 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -99,7 +99,7 @@ __live_active_setup(struct drm_i915_private *i915)
for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -155,7 +155,11 @@ static int live_active_wait(void *arg)
i915_active_wait(&active->base);
if (!READ_ONCE(active->retired)) {
+ struct drm_printer p = drm_err_printer(__func__);
+
pr_err("i915_active not retired after waiting!\n");
+ i915_active_print(&active->base, &p);
+
err = -EINVAL;
}
@@ -184,7 +188,11 @@ static int live_active_retire(void *arg)
err = -EIO;
if (!READ_ONCE(active->retired)) {
+ struct drm_printer p = drm_err_printer(__func__);
+
pr_err("i915_active not retired after flushing!\n");
+ i915_active_print(&active->base, &p);
+
err = -EINVAL;
}
@@ -250,3 +258,36 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
i915_active_release(ref);
}
}
+
+static void spin_unlock_wait(spinlock_t *lock)
+{
+ spin_lock_irq(lock);
+ spin_unlock_irq(lock);
+}
+
+void i915_active_unlock_wait(struct i915_active *ref)
+{
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rcu_read_lock();
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ struct dma_fence *f;
+
+ /* Wait for all active callbacks */
+ f = rcu_dereference(it->base.fence);
+ if (f)
+ spin_unlock_wait(f->lock);
+ }
+ rcu_read_unlock();
+
+ i915_active_release(ref);
+ }
+
+ /* And wait for the retire callback */
+ spin_lock_irq(&ref->tree_lock);
+ spin_unlock_irq(&ref->tree_lock);
+
+ /* ... which may have been on a thread instead */
+ flush_work(&ref->work);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index d83f6bf6d9d4..78f36faf2bbe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -9,6 +9,7 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "i915_selftest.h"
@@ -123,8 +124,6 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- intel_gt_sanitize(&i915->gt, false);
-
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(&i915->ggtt);
@@ -136,7 +135,7 @@ static int igt_gem_suspend(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct file *file;
int err;
file = mock_file(i915);
@@ -163,7 +162,7 @@ static int igt_gem_suspend(void *arg)
err = switch_to_context(ctx);
out:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -171,7 +170,7 @@ static int igt_gem_hibernate(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct file *file;
int err;
file = mock_file(i915);
@@ -198,7 +197,7 @@ static int igt_gem_hibernate(void *arg)
err = switch_to_context(ctx);
out:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 42e948144f1b..06ef88510209 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -198,8 +198,8 @@ static int igt_overcommit(void *arg)
quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
- pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
+ if (vma != ERR_PTR(-ENOSPC)) {
+ pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
err = -EINVAL;
goto cleanup;
}
@@ -466,7 +466,7 @@ static int igt_evict_contexts(void *arg)
/* Overfill the GGTT with context objects and so try to evict one. */
for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
- struct drm_file *file;
+ struct file *file;
file = mock_file(i915);
if (IS_ERR(file)) {
@@ -515,7 +515,7 @@ static int igt_evict_contexts(void *arg)
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
- mock_file_free(i915, file);
+ fput(file);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3f7e80fb3bbd..b342bef5e7c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -34,6 +34,7 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
+#include "mock_gtt.h"
#include "igt_flush_test.h"
static void cleanup_freed_objects(struct drm_i915_private *i915)
@@ -151,7 +152,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = __ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -206,16 +207,17 @@ err_ppgtt_cleanup:
return err;
}
-static int lowlevel_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int lowlevel_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
I915_RND_STATE(seed_prng);
+ struct i915_vma *mock_vma;
unsigned int size;
- struct i915_vma mock_vma;
- memset(&mock_vma, 0, sizeof(struct i915_vma));
+ mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
+ if (!mock_vma)
+ return -ENOMEM;
/* Keep creating larger objects until one cannot fit into the hole */
for (size = 12; (hole_end - hole_start) >> size; size++) {
@@ -239,8 +241,10 @@ static int lowlevel_hole(struct drm_i915_private *i915,
if (order)
break;
} while (count >>= 1);
- if (!count)
+ if (!count) {
+ kfree(mock_vma);
return -ENOMEM;
+ }
GEM_BUG_ON(!order);
GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
@@ -252,7 +256,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
* memory. We expect to hit -ENOMEM.
*/
- obj = fake_dma_object(i915, BIT_ULL(size));
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
if (IS_ERR(obj)) {
kfree(order);
break;
@@ -283,12 +287,12 @@ static int lowlevel_hole(struct drm_i915_private *i915,
vm->allocate_va_range(vm, addr, BIT_ULL(size)))
break;
- mock_vma.pages = obj->mm.pages;
- mock_vma.node.size = BIT_ULL(size);
- mock_vma.node.start = addr;
+ mock_vma->pages = obj->mm.pages;
+ mock_vma->node.size = BIT_ULL(size);
+ mock_vma->node.start = addr;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vm->insert_entries(vm, &mock_vma,
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
+ vm->insert_entries(vm, mock_vma,
I915_CACHE_NONE, 0);
}
count = n;
@@ -299,7 +303,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
vm->clear_range(vm, addr, BIT_ULL(size));
}
@@ -308,9 +312,10 @@ static int lowlevel_hole(struct drm_i915_private *i915,
kfree(order);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
+ kfree(mock_vma);
return 0;
}
@@ -335,8 +340,7 @@ static void close_object_list(struct list_head *objects,
}
}
-static int fill_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int fill_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -369,7 +373,7 @@ static int fill_hole(struct drm_i915_private *i915,
{ }
}, *p;
- obj = fake_dma_object(i915, full_size);
+ obj = fake_dma_object(vm->i915, full_size);
if (IS_ERR(obj))
break;
@@ -537,7 +541,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
@@ -547,8 +551,7 @@ err:
return err;
}
-static int walk_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int walk_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -570,7 +573,7 @@ static int walk_hole(struct drm_i915_private *i915,
u64 addr;
int err = 0;
- obj = fake_dma_object(i915, size << PAGE_SHIFT);
+ obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
if (IS_ERR(obj))
break;
@@ -625,14 +628,13 @@ err_put:
if (err)
return err;
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
}
-static int pot_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int pot_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -646,7 +648,7 @@ static int pot_hole(struct drm_i915_private *i915,
if (i915_is_ggtt(vm))
flags |= PIN_GLOBAL;
- obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -707,8 +709,7 @@ err_obj:
return err;
}
-static int drunk_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int drunk_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -753,7 +754,7 @@ static int drunk_hole(struct drm_i915_private *i915,
* memory. We expect to hit -ENOMEM.
*/
- obj = fake_dma_object(i915, BIT_ULL(size));
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
if (IS_ERR(obj)) {
kfree(order);
break;
@@ -811,14 +812,13 @@ err_obj:
if (err)
return err;
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
}
-static int __shrink_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int __shrink_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -835,7 +835,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
u64 size = BIT_ULL(order++);
size = min(size, hole_end - addr);
- obj = fake_dma_object(i915, size);
+ obj = fake_dma_object(vm->i915, size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
break;
@@ -889,12 +889,11 @@ static int __shrink_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
return err;
}
-static int shrink_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int shrink_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -906,7 +905,7 @@ static int shrink_hole(struct drm_i915_private *i915,
for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
vm->fault_attr.interval = prime;
- err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
+ err = __shrink_hole(vm, hole_start, hole_end, end_time);
if (err)
break;
}
@@ -916,8 +915,7 @@ static int shrink_hole(struct drm_i915_private *i915,
return err;
}
-static int shrink_boom(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int shrink_boom(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -939,7 +937,7 @@ static int shrink_boom(struct drm_i915_private *i915,
unsigned int size = sizes[i];
struct i915_vma *vma;
- purge = fake_dma_object(i915, size);
+ purge = fake_dma_object(vm->i915, size);
if (IS_ERR(purge))
return PTR_ERR(purge);
@@ -956,7 +954,7 @@ static int shrink_boom(struct drm_i915_private *i915,
/* Should now be ripe for purging */
i915_vma_unpin(vma);
- explode = fake_dma_object(i915, size);
+ explode = fake_dma_object(vm->i915, size);
if (IS_ERR(explode)) {
err = PTR_ERR(explode);
goto err_purge;
@@ -982,7 +980,7 @@ static int shrink_boom(struct drm_i915_private *i915,
i915_gem_object_put(explode);
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
@@ -996,14 +994,13 @@ err_purge:
}
static int exercise_ppgtt(struct drm_i915_private *dev_priv,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
- struct drm_file *file;
struct i915_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
+ struct file *file;
int err;
if (!HAS_FULL_PPGTT(dev_priv))
@@ -1013,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_free;
@@ -1021,12 +1018,12 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
- err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
+ err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_vm_put(&ppgtt->vm);
out_free:
- mock_file_free(dev_priv, file);
+ fput(file);
return err;
}
@@ -1077,8 +1074,7 @@ static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
}
static int exercise_ggtt(struct drm_i915_private *i915,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
@@ -1100,7 +1096,7 @@ restart:
if (hole_start >= hole_end)
continue;
- err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
+ err = func(&ggtt->vm, hole_start, hole_end, end_time);
if (err)
break;
@@ -1161,11 +1157,13 @@ static int igt_ggtt_page(void *arg)
goto out_free;
memset(&tmp, 0, sizeof(tmp));
+ mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
if (err)
goto out_unpin;
@@ -1217,7 +1215,9 @@ static int igt_ggtt_page(void *arg)
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&tmp);
+ mutex_unlock(&ggtt->vm.mutex);
out_unpin:
i915_gem_object_unpin_pages(obj);
out_free:
@@ -1243,8 +1243,7 @@ static void track_vma_bind(struct i915_vma *vma)
}
static int exercise_mock(struct drm_i915_private *i915,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
@@ -1259,7 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915,
return -ENOMEM;
vm = i915_gem_context_get_vm_rcu(ctx);
- err = func(i915, vm, 0, min(vm->total, limit), end_time);
+ err = func(vm, 0, min(vm->total, limit), end_time);
i915_vm_put(vm);
mock_context_close(ctx);
@@ -1782,9 +1781,9 @@ static int igt_cs_tlb(void *arg)
struct i915_address_space *vm;
struct i915_gem_context *ctx;
struct intel_context *ce;
- struct drm_file *file;
struct i915_vma *vma;
I915_RND_STATE(prng);
+ struct file *file;
unsigned int i;
u32 *result;
u32 *batch;
@@ -2022,7 +2021,7 @@ out_put_bbe:
out_vm:
i915_vm_put(vm);
out_unlock:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 4b3cac73e291..34138c7bdd15 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -1,5 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
* a module parameter. It must be unique and legal for a C identifier.
@@ -16,6 +22,7 @@ selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(gt_heartbeat, intel_heartbeat_live_selftests)
selftest(requests, i915_request_live_selftests)
@@ -36,5 +43,6 @@ selftest(reset, intel_reset_live_selftests)
selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
-selftest(guc, intel_guc_live_selftest)
selftest(perf, i915_perf_live_selftests)
+/* Here be dragons: keep last to run last! */
+selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index aa5a0e7f5d9e..5b39bab4da1d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -1,5 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
* a module parameter. It must be unique and legal for a C identifier.
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index aabd07f67e49..d1a1568c47ba 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -132,7 +132,7 @@ static int live_noa_delay(void *arg)
for (i = 0; i < 4; i++)
intel_write_status_page(stream->engine, 0x100 + i, 0);
- rq = i915_request_create(stream->engine->kernel_context);
+ rq = intel_engine_create_kernel_request(stream->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
new file mode 100644
index 000000000000..5a577a1332f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/i915_selftest
+ */
+selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(blt, i915_gem_object_blt_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 35cc69a3a1b9..05364eca20f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -25,6 +25,7 @@
#ifndef __I915_SELFTESTS_RANDOM_H__
#define __I915_SELFTESTS_RANDOM_H__
+#include <linux/math64.h>
#include <linux/random.h>
#include "../i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 8618a4dc0701..f89d9c42f1fa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -27,11 +27,13 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "i915_random.h"
#include "i915_selftest.h"
#include "igt_live_test.h"
+#include "igt_spinner.h"
#include "lib_sw_fence.h"
#include "mock_drm.h"
@@ -540,6 +542,7 @@ static int live_nop_request(void *arg)
if (err)
return err;
+ intel_engine_pm_get(engine);
for_each_prime_number_from(prime, 1, 8192) {
struct i915_request *request = NULL;
@@ -578,6 +581,7 @@ static int live_nop_request(void *arg)
if (__igt_timeout(end_time, NULL))
break;
}
+ intel_engine_pm_put(engine);
err = igt_live_test_end(&t);
if (err)
@@ -692,10 +696,13 @@ static int live_empty_request(void *arg)
if (err)
goto out_batch;
+ intel_engine_pm_get(engine);
+
/* Warmup / preload */
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
+ intel_engine_pm_put(engine);
goto out_batch;
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
@@ -708,6 +715,7 @@ static int live_empty_request(void *arg)
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
+ intel_engine_pm_put(engine);
goto out_batch;
}
}
@@ -721,6 +729,7 @@ static int live_empty_request(void *arg)
break;
}
i915_request_put(request);
+ intel_engine_pm_put(engine);
err = igt_live_test_end(&t);
if (err)
@@ -740,10 +749,8 @@ out_batch:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx = i915->kernel_context;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
- struct i915_address_space *vm;
struct i915_vma *vma;
u32 *cmd;
int err;
@@ -752,9 +759,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(obj, vm, NULL);
- i915_vm_put(vm);
+ vma = i915_vma_instance(obj, i915->gt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -845,7 +850,7 @@ static int live_all_engines(void *arg)
idx = 0;
for_each_uabi_engine(engine, i915) {
- request[idx] = i915_request_create(engine->kernel_context);
+ request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
@@ -962,7 +967,7 @@ static int live_sequential_engines(void *arg)
goto out_free;
}
- request[idx] = i915_request_create(engine->kernel_context);
+ request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
@@ -1067,15 +1072,18 @@ static int __live_parallel_engine1(void *arg)
struct intel_engine_cs *engine = arg;
IGT_TIMEOUT(end_time);
unsigned long count;
+ int err = 0;
count = 0;
+ intel_engine_pm_get(engine);
do {
struct i915_request *rq;
- int err;
rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
i915_request_get(rq);
i915_request_add(rq);
@@ -1085,13 +1093,14 @@ static int __live_parallel_engine1(void *arg)
err = -ETIME;
i915_request_put(rq);
if (err)
- return err;
+ break;
count++;
} while (!__igt_timeout(end_time, NULL));
+ intel_engine_pm_put(engine);
pr_info("%s: %lu request + sync\n", engine->name, count);
- return 0;
+ return err;
}
static int __live_parallel_engineN(void *arg)
@@ -1099,21 +1108,100 @@ static int __live_parallel_engineN(void *arg)
struct intel_engine_cs *engine = arg;
IGT_TIMEOUT(end_time);
unsigned long count;
+ int err = 0;
count = 0;
+ intel_engine_pm_get(engine);
do {
struct i915_request *rq;
rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
i915_request_add(rq);
count++;
} while (!__igt_timeout(end_time, NULL));
+ intel_engine_pm_put(engine);
pr_info("%s: %lu requests\n", engine->name, count);
- return 0;
+ return err;
+}
+
+static bool wake_all(struct drm_i915_private *i915)
+{
+ if (atomic_dec_and_test(&i915->selftest.counter)) {
+ wake_up_var(&i915->selftest.counter);
+ return true;
+ }
+
+ return false;
+}
+
+static int wait_for_all(struct drm_i915_private *i915)
+{
+ if (wake_all(i915))
+ return 0;
+
+ if (wait_var_event_timeout(&i915->selftest.counter,
+ !atomic_read(&i915->selftest.counter),
+ i915_selftest.timeout_jiffies))
+ return 0;
+
+ return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ /*
+ * Create a spinner running for eternity on each engine. If a second
+ * spinner is incorrectly placed on the same engine, it will not be
+ * able to start in time.
+ */
+
+ if (igt_spinner_init(&spin, engine->gt)) {
+ wake_all(engine->i915);
+ return -ENOMEM;
+ }
+
+ intel_engine_pm_get(engine);
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP); /* no preemption */
+ intel_engine_pm_put(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ if (err == -ENODEV)
+ err = 0;
+ wake_all(engine->i915);
+ goto out_spin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin, rq)) {
+ /* Occupy this engine for the whole test */
+ err = wait_for_all(engine->i915);
+ } else {
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ err = -EINVAL;
+ }
+ igt_spinner_end(&spin);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
}
static int live_parallel_engines(void *arg)
@@ -1122,6 +1210,7 @@ static int live_parallel_engines(void *arg)
static int (* const func[])(void *arg) = {
__live_parallel_engine1,
__live_parallel_engineN,
+ __live_parallel_spin,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
@@ -1140,13 +1229,17 @@ static int live_parallel_engines(void *arg)
return -ENOMEM;
for (fn = func; !err && *fn; fn++) {
+ char name[KSYM_NAME_LEN];
struct igt_live_test t;
unsigned int idx;
- err = igt_live_test_begin(&t, i915, __func__, "");
+ snprintf(name, sizeof(name), "%pS", fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
if (err)
break;
+ atomic_set(&i915->selftest.counter, nengines);
+
idx = 0;
for_each_uabi_engine(engine, i915) {
tsk[idx] = kthread_run(*fn, engine,
@@ -1230,9 +1323,9 @@ static int live_breadcrumbs_smoketest(void *arg)
struct task_struct **threads;
struct igt_live_test live;
intel_wakeref_t wakeref;
- struct drm_file *file;
struct smoketest *smoke;
unsigned int n, idx;
+ struct file *file;
int ret = 0;
/*
@@ -1354,7 +1447,7 @@ out_threads:
out_smoke:
kfree(smoke);
out_file:
- mock_file_free(i915, file);
+ fput(file);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index a6cca4ad96f6..d3bf9eefb682 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -57,6 +57,12 @@ enum {
#undef selftest
};
+enum {
+#define selftest(name, func) perf_##name,
+#include "i915_perf_selftests.h"
+#undef selftest
+};
+
struct selftest {
bool enabled;
const char *name;
@@ -78,6 +84,12 @@ static struct selftest live_selftests[] = {
};
#undef selftest
+#define selftest(n, f) [perf_##n] = { .name = #n, { .live = f } },
+static struct selftest perf_selftests[] = {
+#include "i915_perf_selftests.h"
+};
+#undef selftest
+
/* Embed the line number into the parameter name so that we can order tests */
#define selftest(n, func) selftest_0(n, func, param(n))
#define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n))
@@ -93,6 +105,13 @@ module_param_named(id, live_selftests[live_##n].enabled, bool, 0400);
#include "i915_live_selftests.h"
#undef selftest_0
#undef param
+
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __perf_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, perf_selftests[perf_##n].enabled, bool, 0400);
+#include "i915_perf_selftests.h"
+#undef selftest_0
+#undef param
#undef selftest
static void set_default_test_all(struct selftest *st, unsigned int count)
@@ -200,6 +219,27 @@ int i915_live_selftests(struct pci_dev *pdev)
return 0;
}
+int i915_perf_selftests(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!i915_selftest.perf)
+ return 0;
+
+ err = run_selftests(perf, pdev_to_i915(pdev));
+ if (err) {
+ i915_selftest.perf = err;
+ return err;
+ }
+
+ if (i915_selftest.perf < 0) {
+ i915_selftest.perf = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
static bool apply_subtest_filter(const char *caller, const char *name)
{
char *filter, *sep, *tok;
@@ -365,3 +405,6 @@ MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardw
module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400);
MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
+
+module_param_named_unsafe(perf_selftests, i915_selftest.perf, int, 0400);
+MODULE_PARM_DESC(perf_selftests, "Run performance orientated selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.c b/drivers/gpu/drm/i915/selftests/igt_atomic.c
new file mode 100644
index 000000000000..fb506b699095
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/preempt.h>
+#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
+
+#include "igt_atomic.h"
+
+static void __preempt_begin(void)
+{
+ preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+ preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+const struct igt_atomic_section igt_atomic_phases[] = {
+ { "preempt", __preempt_begin, __preempt_end },
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+};
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h
index 93ec89f487ec..1991798abf4b 100644
--- a/drivers/gpu/drm/i915/selftests/igt_atomic.h
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h
@@ -6,51 +6,12 @@
#ifndef IGT_ATOMIC_H
#define IGT_ATOMIC_H
-#include <linux/preempt.h>
-#include <linux/bottom_half.h>
-#include <linux/irqflags.h>
-
-static void __preempt_begin(void)
-{
- preempt_disable();
-}
-
-static void __preempt_end(void)
-{
- preempt_enable();
-}
-
-static void __softirq_begin(void)
-{
- local_bh_disable();
-}
-
-static void __softirq_end(void)
-{
- local_bh_enable();
-}
-
-static void __hardirq_begin(void)
-{
- local_irq_disable();
-}
-
-static void __hardirq_end(void)
-{
- local_irq_enable();
-}
-
struct igt_atomic_section {
const char *name;
void (*critical_section_begin)(void);
void (*critical_section_end)(void);
};
-static const struct igt_atomic_section igt_atomic_phases[] = {
- { "preempt", __preempt_begin, __preempt_end },
- { "softirq", __softirq_begin, __softirq_end },
- { "hardirq", __hardirq_begin, __hardirq_end },
- { }
-};
+extern const struct igt_atomic_section igt_atomic_phases[];
#endif /* IGT_ATOMIC_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
index c0e9f99d50de..36ed42736c52 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -7,7 +7,7 @@
#ifndef IGT_LIVE_TEST_H
#define IGT_LIVE_TEST_H
-#include "../i915_gem.h"
+#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.c b/drivers/gpu/drm/i915/selftests/igt_mmap.c
new file mode 100644
index 000000000000..583a4ff8b8c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.c
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+#include "igt_mmap.h"
+
+unsigned long igt_mmap_node(struct drm_i915_private *i915,
+ struct drm_vma_offset_node *node,
+ unsigned long addr,
+ unsigned long prot,
+ unsigned long flags)
+{
+ struct file *file;
+ int err;
+
+ /* Pretend to open("/dev/dri/card0") */
+ file = mock_drm_getfile(i915->drm.primary, O_RDWR);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ err = drm_vma_node_allow(node, file->private_data);
+ if (err) {
+ addr = err;
+ goto out_file;
+ }
+
+ addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT,
+ prot, flags, drm_vma_node_offset_addr(node));
+
+ drm_vma_node_revoke(node, file->private_data);
+out_file:
+ fput(file);
+ return addr;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.h b/drivers/gpu/drm/i915/selftests/igt_mmap.h
new file mode 100644
index 000000000000..6e716cb59d7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.h
@@ -0,0 +1,19 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef IGT_MMAP_H
+#define IGT_MMAP_H
+
+struct drm_i915_private;
+struct drm_vma_offset_node;
+
+unsigned long igt_mmap_node(struct drm_i915_private *i915,
+ struct drm_vma_offset_node *node,
+ unsigned long addr,
+ unsigned long prot,
+ unsigned long flags);
+
+#endif /* IGT_MMAP_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ee8450b871da..e8a58fe49c39 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -15,8 +15,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
void *vaddr;
int err;
- GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
-
memset(spin, 0, sizeof(*spin));
spin->gt = gt;
@@ -95,11 +93,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
+ unsigned int flags;
u32 *batch;
int err;
GEM_BUG_ON(spin->gt != ce->vm->gt);
+ if (!intel_engine_can_store_dword(ce->engine))
+ return ERR_PTR(-ENODEV);
+
vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
@@ -132,16 +134,37 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ } else if (INTEL_GEN(rq->i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else if (INTEL_GEN(rq->i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = hws_address(hws, rq);
+ }
*batch++ = rq->fence.seqno;
*batch++ = arbitration_command;
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ if (INTEL_GEN(rq->i915) >= 8)
+ *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ else if (IS_HASWELL(rq->i915))
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
+ else if (INTEL_GEN(rq->i915) >= 6)
+ *batch++ = MI_BATCH_BUFFER_START;
+ else
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
+
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
intel_gt_chipset_flush(engine->gt);
@@ -153,7 +176,10 @@ igt_spinner_create_request(struct igt_spinner *spin,
goto cancel_rq;
}
- err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+ flags = 0;
+ if (INTEL_GEN(rq->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+ err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
cancel_rq:
if (err) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 19e1cca8f143..3ef3620e0da5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -32,7 +32,7 @@ static void close_objects(struct intel_memory_region *mem,
if (i915_gem_object_has_pinned_pages(obj))
i915_gem_object_unpin_pages(obj);
/* No polluting the memory region between tests */
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
list_del(&obj->st_link);
i915_gem_object_put(obj);
}
@@ -122,7 +122,7 @@ put:
static void igt_object_release(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
list_del(&obj->st_link);
i915_gem_object_put(obj);
}
@@ -270,36 +270,31 @@ static int igt_gpu_write_dw(struct intel_context *ce,
static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
- unsigned long n;
+ unsigned long n = obj->base.size >> PAGE_SHIFT;
+ u32 *ptr;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
-
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
- for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
- u32 __iomem *base;
- u32 read_val;
-
- base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
- read_val = ioread32(base + dword);
- io_mapping_unmap_atomic(base);
- if (read_val != val) {
- pr_err("n=%lu base[%u]=%u, val=%u\n",
- n, dword, read_val, val);
+ ptr += dword;
+ while (n--) {
+ if (*ptr != val) {
+ pr_err("base[%u]=%08x, val=%08x\n",
+ dword, *ptr, val);
err = -EINVAL;
break;
}
+
+ ptr += PAGE_SIZE / sizeof(*ptr);
}
- i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unpin_map(obj);
return err;
}
@@ -404,7 +399,7 @@ static int igt_lmem_write_gpu(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct file *file;
I915_RND_STATE(prng);
u32 sz;
int err;
@@ -439,7 +434,7 @@ static int igt_lmem_write_gpu(void *arg)
out_put:
i915_gem_object_put(obj);
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -506,7 +501,9 @@ static int igt_lmem_write_cpu(void *arg)
}
/* Put the pages into a known state -- from the gpu for added fun */
+ intel_engine_pm_get(engine);
err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
+ intel_engine_pm_put(engine);
if (err)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c
deleted file mode 100644
index 09c704153456..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_drm.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "mock_drm.h"
-
-struct drm_file *mock_file(struct drm_i915_private *i915)
-{
- struct file *filp;
- struct inode *inode;
- struct drm_file *file;
- int err;
-
- inode = kzalloc(sizeof(*inode), GFP_KERNEL);
- if (!inode) {
- err = -ENOMEM;
- goto err;
- }
-
- inode->i_rdev = i915->drm.primary->index;
-
- filp = kzalloc(sizeof(*filp), GFP_KERNEL);
- if (!filp) {
- err = -ENOMEM;
- goto err_inode;
- }
-
- err = drm_open(inode, filp);
- if (err)
- goto err_filp;
-
- file = filp->private_data;
- memset(&file->filp, POISON_INUSE, sizeof(file->filp));
- file->authenticated = true;
-
- kfree(filp);
- kfree(inode);
- return file;
-
-err_filp:
- kfree(filp);
-err_inode:
- kfree(inode);
-err:
- return ERR_PTR(err);
-}
-
-void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
-{
- struct file filp = { .private_data = file };
-
- drm_release(NULL, &filp);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.h b/drivers/gpu/drm/i915/selftests/mock_drm.h
index b39beee9f8f6..9916b6f95526 100644
--- a/drivers/gpu/drm/i915/selftests/mock_drm.h
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.h
@@ -25,7 +25,21 @@
#ifndef __MOCK_DRM_H
#define __MOCK_DRM_H
-struct drm_file *mock_file(struct drm_i915_private *i915);
-void mock_file_free(struct drm_i915_private *i915, struct drm_file *file);
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+
+struct drm_file;
+struct file;
+
+static inline struct file *mock_file(struct drm_i915_private *i915)
+{
+ return mock_drm_getfile(i915->drm.primary, O_RDWR);
+}
+
+static inline struct drm_file *to_drm_file(struct file *f)
+{
+ return f->private_data;
+}
#endif /* !__MOCK_DRM_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 27ed3cee6a9b..3b8986983afc 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -54,25 +54,19 @@ void mock_device_flush(struct drm_i915_private *i915)
static void mock_device_release(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
mock_device_flush(i915);
+ intel_gt_driver_remove(&i915->gt);
- i915_gem_drain_workqueue(i915);
-
- for_each_engine(engine, &i915->gt, id)
- mock_engine_free(engine);
i915_gem_driver_release__contexts(i915);
- intel_timelines_fini(i915);
-
- drain_workqueue(i915->wq);
+ i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
mock_fini_ggtt(&i915->ggtt);
destroy_workqueue(i915->wq);
+ intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -180,9 +174,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- intel_timelines_init(i915);
-
mock_init_ggtt(i915, &i915->ggtt);
+ i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
mkwrite_device_info(i915)->engine_mask = BIT(0);
@@ -190,25 +183,20 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->engine[RCS0])
goto err_unlock;
- i915->kernel_context = mock_context(i915, NULL);
- if (!i915->kernel_context)
- goto err_engine;
-
if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
+ __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
intel_engines_driver_register(i915);
return i915;
err_context:
- i915_gem_driver_release__contexts(i915);
-err_engine:
- mock_engine_free(i915->engine[RCS0]);
+ intel_gt_driver_remove(&i915->gt);
err_unlock:
- intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
+ intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
drm_dev_fini(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 20ac3844edec..edc5e3dda8ca 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -55,6 +55,11 @@ static void mock_cleanup(struct i915_address_space *vm)
{
}
+static void mock_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
{
struct i915_ppgtt *ppgtt;
@@ -70,7 +75,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
- ppgtt->vm.clear_range = nop_clear_range;
+ ppgtt->vm.clear_range = mock_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
ppgtt->vm.insert_entries = mock_insert_entries;
ppgtt->vm.cleanup = mock_cleanup;
@@ -107,7 +112,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->vm.total = 4096 * PAGE_SIZE;
- ggtt->vm.clear_range = nop_clear_range;
+ ggtt->vm.clear_range = mock_clear_range;
ggtt->vm.insert_page = mock_insert_page;
ggtt->vm.insert_entries = mock_insert_entries;
ggtt->vm.cleanup = mock_cleanup;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 3387393286de..e3f224f43beb 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -25,6 +25,9 @@
#ifndef __MOCK_GTT_H
#define __MOCK_GTT_H
+struct drm_i915_private;
+struct i915_ggtt;
+
void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
void mock_fini_ggtt(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
index 24608089d833..329bf74dfaca 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.h
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -6,6 +6,11 @@
#ifndef __MOCK_REGION_H
#define __MOCK_REGION_H
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_memory_region;
+
struct intel_memory_region *
mock_region_create(struct drm_i915_private *i915,
resource_size_t start,
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index 8a2cc553f466..7acf1ef4d488 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,9 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
+struct drm_i915_private;
+struct intel_uncore;
+
void mock_uncore_init(struct intel_uncore *uncore,
struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 208069faf183..8cb2665b2c74 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -127,7 +127,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
int num_modes;
- num_modes = drm_panel_get_modes(imx_ldb_ch->panel);
+ num_modes = drm_panel_get_modes(imx_ldb_ch->panel, connector);
if (num_modes > 0)
return num_modes;
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 35518e5de356..3dca424059f7 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -50,7 +50,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
struct device_node *np = imxpd->dev->of_node;
int num_modes;
- num_modes = drm_panel_get_modes(imxpd->panel);
+ num_modes = drm_panel_get_modes(imxpd->panel, connector);
if (num_modes > 0)
return num_modes;
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index ec32e1c67335..6d47ef7b148c 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -153,6 +153,7 @@ struct ingenic_dma_hwdesc {
struct jz_soc_info {
bool needs_dev_clk;
+ unsigned int max_width, max_height;
};
struct ingenic_drm {
@@ -164,6 +165,7 @@ struct ingenic_drm {
struct device *dev;
struct regmap *map;
struct clk *lcd_clk, *pix_clk;
+ const struct jz_soc_info *soc_info;
struct ingenic_dma_hwdesc *dma_hwdesc;
dma_addr_t dma_hwdesc_phys;
@@ -326,6 +328,10 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (!drm_atomic_crtc_needs_modeset(state))
return 0;
+ if (state->mode.hdisplay > priv->soc_info->max_height ||
+ state->mode.vdisplay > priv->soc_info->max_width)
+ return -EINVAL;
+
rate = clk_round_rate(priv->pix_clk,
state->adjusted_mode.clock * 1000);
if (rate < 0)
@@ -372,14 +378,18 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
struct ingenic_drm *priv = drm_plane_get_priv(plane);
struct drm_plane_state *state = plane->state;
unsigned int width, height, cpp;
+ dma_addr_t addr;
- width = state->crtc->state->adjusted_mode.hdisplay;
- height = state->crtc->state->adjusted_mode.vdisplay;
- cpp = state->fb->format->cpp[plane->index];
+ if (state && state->fb) {
+ addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
+ width = state->src_w >> 16;
+ height = state->src_h >> 16;
+ cpp = state->fb->format->cpp[plane->index];
- priv->dma_hwdesc->addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
- priv->dma_hwdesc->cmd = width * height * cpp / 4;
- priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
+ priv->dma_hwdesc->addr = addr;
+ priv->dma_hwdesc->cmd = width * height * cpp / 4;
+ priv->dma_hwdesc->cmd |= JZ_LCD_CMD_EOF_IRQ;
+ }
}
static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
@@ -617,6 +627,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->soc_info = soc_info;
priv->dev = dev;
drm = &priv->drm;
drm->dev_private = priv;
@@ -632,8 +643,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
drm_mode_config_init(drm);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
- drm->mode_config.max_width = 800;
- drm->mode_config.max_height = 600;
+ drm->mode_config.max_width = soc_info->max_width;
+ drm->mode_config.max_height = 4095;
drm->mode_config.funcs = &ingenic_drm_mode_config_funcs;
base = devm_platform_ioremap_resource(pdev, 0);
@@ -810,15 +821,26 @@ static int ingenic_drm_remove(struct platform_device *pdev)
static const struct jz_soc_info jz4740_soc_info = {
.needs_dev_clk = true,
+ .max_width = 800,
+ .max_height = 600,
};
static const struct jz_soc_info jz4725b_soc_info = {
.needs_dev_clk = false,
+ .max_width = 800,
+ .max_height = 600,
+};
+
+static const struct jz_soc_info jz4770_soc_info = {
+ .needs_dev_clk = false,
+ .max_width = 1280,
+ .max_height = 720,
};
static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4740-lcd", .data = &jz4740_soc_info },
{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
+ { .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
{ /* sentinel */ },
};
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index 571dc369a7e9..d589f09d04d9 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -11,4 +11,4 @@ config DRM_LIMA
select DRM_SCHED
select DRM_GEM_SHMEM_HELPER
help
- DRM driver for ARM Mali 400/450 GPUs.
+ DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index f522c5f99729..b561dd05bd62 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -159,9 +159,10 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
struct lima_sched_context *context,
atomic_t *guilty)
{
- struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+ struct drm_gpu_scheduler *sched = &pipe->base;
- return drm_sched_entity_init(&context->base, &rq, 1, guilty);
+ return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, guilty);
}
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
@@ -255,13 +256,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
return task->fence;
}
-static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
- struct lima_sched_task *task)
+static void lima_sched_timedout_job(struct drm_sched_job *job)
{
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_sched_task *task = to_lima_task(job);
+
+ if (!pipe->error)
+ DRM_ERROR("lima job timeout\n");
+
drm_sched_stop(&pipe->base, &task->base);
- if (task)
- drm_sched_increase_karma(&task->base);
+ drm_sched_increase_karma(&task->base);
pipe->task_error(pipe);
@@ -284,16 +289,6 @@ static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
drm_sched_start(&pipe->base, true);
}
-static void lima_sched_timedout_job(struct drm_sched_job *job)
-{
- struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
- struct lima_sched_task *task = to_lima_task(job);
-
- DRM_ERROR("lima job timeout\n");
-
- lima_sched_handle_error_task(pipe, task);
-}
-
static void lima_sched_free_job(struct drm_sched_job *job)
{
struct lima_sched_task *task = to_lima_task(job);
@@ -318,15 +313,6 @@ static const struct drm_sched_backend_ops lima_sched_ops = {
.free_job = lima_sched_free_job,
};
-static void lima_sched_error_work(struct work_struct *work)
-{
- struct lima_sched_pipe *pipe =
- container_of(work, struct lima_sched_pipe, error_work);
- struct lima_sched_task *task = pipe->current_task;
-
- lima_sched_handle_error_task(pipe, task);
-}
-
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
@@ -335,8 +321,6 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
- INIT_WORK(&pipe->error_work, lima_sched_error_work);
-
return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
msecs_to_jiffies(timeout), name);
}
@@ -349,7 +333,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
if (pipe->error)
- schedule_work(&pipe->error_work);
+ drm_sched_fault(&pipe->base);
else {
struct lima_sched_task *task = pipe->current_task;
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 928af91c1118..1d814fecbcc0 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -68,8 +68,6 @@ struct lima_sched_pipe {
void (*task_fini)(struct lima_sched_pipe *pipe);
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
-
- struct work_struct error_work;
};
int lima_sched_task_init(struct lima_sched_task *task,
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 751454ae3cd1..e59907e68854 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -498,24 +498,20 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
}
/* Set up channel 0 sync (based on chnl_update_registers()) */
- if (mcde->te_sync) {
- /*
- * Turn on hardware TE0 synchronization
- */
+ if (mcde->video_mode || mcde->te_sync)
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
- val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
- << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
- } else {
- /*
- * Set up sync source to software, out sync formatter
- * Code mostly from mcde_hw.c chnl_update_registers()
- */
+ else
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SOFTWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
+
+ if (mcde->te_sync)
+ val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
+ << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
+ else
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
- }
+
writel(val, mcde->regs + sync);
/* Set up pixels per line and lines per frame */
@@ -934,10 +930,17 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
val = readl(mcde->regs + MCDE_CRC);
val |= MCDE_CRC_SYCEN0;
writel(val, mcde->regs + MCDE_CRC);
-
- drm_crtc_vblank_on(crtc);
}
+ drm_crtc_vblank_on(crtc);
+
+ if (mcde->video_mode)
+ /*
+ * Keep FIFO permanently enabled in video mode,
+ * otherwise MCDE will stop feeding data to the panel.
+ */
+ mcde_enable_fifo(mcde, MCDE_FIFO_A);
+
dev_info(drm->dev, "MCDE display is enabled\n");
}
@@ -946,13 +949,22 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = drm->dev_private;
+ struct drm_pending_vblank_event *event;
- if (mcde->te_sync)
- drm_crtc_vblank_off(crtc);
+ drm_crtc_vblank_off(crtc);
/* Disable FIFO A flow */
mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+
dev_info(drm->dev, "MCDE display is disabled\n");
}
@@ -1048,8 +1060,9 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
- /* Send a single frame using software sync */
- mcde_display_send_one_frame(mcde);
+ if (!mcde->video_mode)
+ /* Send a single frame using software sync */
+ mcde_display_send_one_frame(mcde);
dev_info_once(mcde->dev, "sent first display update\n");
} else {
/*
@@ -1097,6 +1110,8 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
.enable = mcde_display_enable,
.disable = mcde_display_disable,
.update = mcde_display_update,
+ .enable_vblank = mcde_display_enable_vblank,
+ .disable_vblank = mcde_display_disable_vblank,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
@@ -1123,12 +1138,6 @@ int mcde_display_init(struct drm_device *drm)
DRM_FORMAT_YUV422,
};
- /* Provide vblank only when we have TE enabled */
- if (mcde->te_sync) {
- mcde_display_funcs.enable_vblank = mcde_display_enable_vblank;
- mcde_display_funcs.disable_vblank = mcde_display_disable_vblank;
- }
-
ret = drm_simple_display_pipe_init(drm, &mcde->pipe,
&mcde_display_funcs,
formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index dab4db021231..80edd6628979 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -19,6 +19,7 @@ struct mcde {
struct mipi_dsi_device *mdsi;
s16 stride;
bool te_sync;
+ bool video_mode;
bool oneshot_mode;
unsigned int flow_active;
spinlock_t flow_lock; /* Locks the channel flow control */
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 5649887d2b90..9008ddcfc528 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -179,18 +179,10 @@ static int mcde_modeset_init(struct drm_device *drm)
mode_config->min_height = 1;
mode_config->max_height = 1080;
- /*
- * Currently we only support vblank handling on the DSI bridge, using
- * TE synchronization. If TE sync is not set up, it is still possible
- * to push out a single update on demand, but this is hard for DRM to
- * exploit.
- */
- if (mcde->te_sync) {
- ret = drm_vblank_init(drm, 1);
- if (ret) {
- dev_err(drm->dev, "failed to init vblank\n");
- goto out_config;
- }
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(drm->dev, "failed to init vblank\n");
+ goto out_config;
}
ret = mcde_display_init(drm);
@@ -339,8 +331,6 @@ static int mcde_probe(struct platform_device *pdev)
drm->dev_private = mcde;
platform_set_drvdata(pdev, drm);
- /* Enable use of the TE signal and interrupt */
- mcde->te_sync = true;
/* Enable continuous updates: this is what Linux' framebuffer expects */
mcde->oneshot_mode = false;
drm->dev_private = mcde;
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index ef4c630afe3f..bb6528b01cd0 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -39,7 +39,6 @@ struct mcde_dsi {
struct device *dev;
struct mcde *mcde;
struct drm_bridge bridge;
- struct drm_connector connector;
struct drm_panel *panel;
struct drm_bridge *bridge_out;
struct mipi_dsi_host dsi_host;
@@ -64,11 +63,6 @@ static inline struct mcde_dsi *host_to_mcde_dsi(struct mipi_dsi_host *h)
return container_of(h, struct mcde_dsi, dsi_host);
}
-static inline struct mcde_dsi *connector_to_mcde_dsi(struct drm_connector *c)
-{
- return container_of(c, struct mcde_dsi, connector);
-}
-
bool mcde_dsi_irq(struct mipi_dsi_device *mdsi)
{
struct mcde_dsi *d;
@@ -124,12 +118,41 @@ bool mcde_dsi_irq(struct mipi_dsi_device *mdsi)
val = readl(d->regs + DSI_VID_MODE_STS_FLAG);
if (val)
- dev_err(d->dev, "some video mode error status\n");
+ dev_dbg(d->dev, "DSI_VID_MODE_STS_FLAG = %08x\n", val);
+ if (val & DSI_VID_MODE_STS_VSG_RUNNING)
+ dev_dbg(d->dev, "VID mode VSG running\n");
+ if (val & DSI_VID_MODE_STS_ERR_MISSING_DATA)
+ dev_err(d->dev, "VID mode missing data\n");
+ if (val & DSI_VID_MODE_STS_ERR_MISSING_HSYNC)
+ dev_err(d->dev, "VID mode missing HSYNC\n");
+ if (val & DSI_VID_MODE_STS_ERR_MISSING_VSYNC)
+ dev_err(d->dev, "VID mode missing VSYNC\n");
+ if (val & DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH)
+ dev_err(d->dev, "VID mode less bytes than expected between two HSYNC\n");
+ if (val & DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT)
+ dev_err(d->dev, "VID mode less lines than expected between two VSYNC\n");
+ if (val & (DSI_VID_MODE_STS_ERR_BURSTWRITE |
+ DSI_VID_MODE_STS_ERR_LINEWRITE |
+ DSI_VID_MODE_STS_ERR_LONGREAD))
+ dev_err(d->dev, "VID mode read/write error\n");
+ if (val & DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH)
+ dev_err(d->dev, "VID mode received packets differ from expected size\n");
+ if (val & DSI_VID_MODE_STS_VSG_RECOVERY)
+ dev_err(d->dev, "VID mode VSG in recovery mode\n");
writel(val, d->regs + DSI_VID_MODE_STS_CLR);
return te_received;
}
+static void mcde_dsi_attach_to_mcde(struct mcde_dsi *d)
+{
+ d->mcde->mdsi = d->mdsi;
+
+ d->mcde->video_mode = !!(d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+ /* Enable use of the TE signal for all command mode panels */
+ d->mcde->te_sync = !d->mcde->video_mode;
+}
+
static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *mdsi)
{
@@ -148,7 +171,7 @@ static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
d->mdsi = mdsi;
if (d->mcde)
- d->mcde->mdsi = mdsi;
+ mcde_dsi_attach_to_mcde(d);
return 0;
}
@@ -223,25 +246,25 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
if (txlen > 0) {
val = 0;
for (i = 0; i < 4 && i < txlen; i++)
- val |= tx[i] << (i & 3) * 8;
+ val |= tx[i] << (i * 8);
}
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT0);
if (txlen > 4) {
val = 0;
for (i = 0; i < 4 && (i + 4) < txlen; i++)
- val |= tx[i + 4] << (i & 3) * 8;
+ val |= tx[i + 4] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT1);
}
if (txlen > 8) {
val = 0;
for (i = 0; i < 4 && (i + 8) < txlen; i++)
- val |= tx[i + 8] << (i & 3) * 8;
+ val |= tx[i + 8] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT2);
}
if (txlen > 12) {
val = 0;
for (i = 0; i < 4 && (i + 12) < txlen; i++)
- val |= tx[i + 12] << (i & 3) * 8;
+ val |= tx[i + 12] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT3);
}
@@ -336,7 +359,7 @@ void mcde_dsi_te_request(struct mipi_dsi_device *mdsi)
val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
val |= 2 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
- val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 <<
+ val |= MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM <<
DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
@@ -365,13 +388,14 @@ void mcde_dsi_te_request(struct mipi_dsi_device *mdsi)
static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
const struct drm_display_mode *mode)
{
- u8 bpp = mipi_dsi_pixel_format_to_bpp(d->mdsi->format);
+ /* cpp, characters per pixel, number of bytes per pixel */
+ u8 cpp = mipi_dsi_pixel_format_to_bpp(d->mdsi->format) / 8;
+ u64 pclk;
u64 bpl;
- u32 hfp;
- u32 hbp;
- u32 hsa;
+ int hfp;
+ int hbp;
+ int hsa;
u32 blkline_pck, line_duration;
- u32 blkeol_pck, blkeol_duration;
u32 val;
val = 0;
@@ -408,11 +432,21 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
return;
}
- /* TODO: TVG could be enabled here */
+ /* TODO: TVG (test video generator) could be enabled here */
- /* Send blanking packet */
+ /*
+ * During vertical blanking: go to LP mode
+ * Like with the EOL setting, if this is not set, the EOL area will be
+ * filled with NULL or blanking packets in the vblank area.
+ * FIXME: some Samsung phones and display panels such as s6e63m0 use
+ * DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING here instead,
+ * figure out how to properly configure that from the panel.
+ */
val |= DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0;
- /* Send EOL packet */
+ /*
+ * During EOL: go to LP mode. If this is not set, the EOL area will be
+ * filled with NULL or blanking packets.
+ */
val |= DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0;
/* Recovery mode 1 */
val |= 1 << DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT;
@@ -420,13 +454,13 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
writel(val, d->regs + DSI_VID_MAIN_CTL);
/* Vertical frame parameters are pretty straight-forward */
- val = mode->vdisplay << DSI_VID_VSIZE_VSA_LENGTH_SHIFT;
+ val = mode->vdisplay << DSI_VID_VSIZE_VACT_LENGTH_SHIFT;
/* vertical front porch */
val |= (mode->vsync_start - mode->vdisplay)
<< DSI_VID_VSIZE_VFP_LENGTH_SHIFT;
/* vertical sync active */
val |= (mode->vsync_end - mode->vsync_start)
- << DSI_VID_VSIZE_VACT_LENGTH_SHIFT;
+ << DSI_VID_VSIZE_VSA_LENGTH_SHIFT;
/* vertical back porch */
val |= (mode->vtotal - mode->vsync_end)
<< DSI_VID_VSIZE_VBP_LENGTH_SHIFT;
@@ -434,36 +468,54 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
/*
* Horizontal frame parameters:
- * horizontal resolution is given in pixels and must be re-calculated
- * into bytes since this is what the hardware expects.
+ * horizontal resolution is given in pixels but must be re-calculated
+ * into bytes since this is what the hardware expects, these registers
+ * define the payload size of the packet.
+ *
+ * hfp = horizontal front porch in bytes
+ * hbp = horizontal back porch in bytes
+ * hsa = horizontal sync active in bytes
*
* 6 + 2 is HFP header + checksum
*/
- hfp = (mode->hsync_start - mode->hdisplay) * bpp - 6 - 2;
+ hfp = (mode->hsync_start - mode->hdisplay) * cpp - 6 - 2;
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
/*
+ * Use sync pulse for sync: explicit HSA time
* 6 is HBP header + checksum
* 4 is RGB header + checksum
*/
- hbp = (mode->htotal - mode->hsync_end) * bpp - 4 - 6;
+ hbp = (mode->htotal - mode->hsync_end) * cpp - 4 - 6;
/*
* 6 is HBP header + checksum
* 4 is HSW packet bytes
* 4 is RGB header + checksum
*/
- hsa = (mode->hsync_end - mode->hsync_start) * bpp - 4 - 4 - 6;
+ hsa = (mode->hsync_end - mode->hsync_start) * cpp - 4 - 4 - 6;
} else {
/*
- * HBP includes both back porch and sync
+ * Use event for sync: HBP includes both back porch and sync
* 6 is HBP header + checksum
* 4 is HSW packet bytes
* 4 is RGB header + checksum
*/
- hbp = (mode->htotal - mode->hsync_start) * bpp - 4 - 4 - 6;
- /* HSA is not considered in this mode and set to 0 */
+ hbp = (mode->htotal - mode->hsync_start) * cpp - 4 - 4 - 6;
+ /* HSA is not present in this mode and set to 0 */
hsa = 0;
}
- dev_dbg(d->dev, "hfp: %u, hbp: %u, hsa: %u\n",
+ if (hfp < 0) {
+ dev_info(d->dev, "hfp negative, set to 0\n");
+ hfp = 0;
+ }
+ if (hbp < 0) {
+ dev_info(d->dev, "hbp negative, set to 0\n");
+ hbp = 0;
+ }
+ if (hsa < 0) {
+ dev_info(d->dev, "hsa negative, set to 0\n");
+ hsa = 0;
+ }
+ dev_dbg(d->dev, "hfp: %u, hbp: %u, hsa: %u bytes\n",
hfp, hbp, hsa);
/* Frame parameters: horizontal sync active */
@@ -474,91 +526,185 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
val |= hfp << DSI_VID_HSIZE1_HFP_LENGTH_SHIFT;
writel(val, d->regs + DSI_VID_HSIZE1);
- /* RGB data length (bytes on one scanline) */
- val = mode->hdisplay * (bpp / 8);
+ /* RGB data length (visible bytes on one scanline) */
+ val = mode->hdisplay * cpp;
writel(val, d->regs + DSI_VID_HSIZE2);
+ dev_dbg(d->dev, "RGB length, visible area on a line: %u bytes\n", val);
- /* TODO: further adjustments for TVG mode here */
+ /*
+ * Calculate the time between two pixels in picoseconds using
+ * the supplied refresh rate and total resolution including
+ * porches and sync.
+ */
+ /* (ps/s) / (pixels/s) = ps/pixels */
+ pclk = DIV_ROUND_UP_ULL(1000000000000,
+ (mode->vrefresh * mode->htotal * mode->vtotal));
+ dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
+ pclk);
/*
- * EOL packet length from bits per line calculations: pixel clock
- * is given in kHz, calculate the time between two pixels in
- * picoseconds.
+ * How many bytes per line will this update frequency yield?
+ *
+ * Calculate the number of picoseconds for one scanline (1), then
+ * divide by 1000000000000 (2) to get in pixels per second we
+ * want to output.
+ *
+ * Multiply with number of bytes per second at this video display
+ * frequency (3) to get number of bytes transferred during this
+ * time. Notice that we use the frequency the display wants,
+ * not what we actually get from the DSI PLL, which is hs_freq.
+ *
+ * These arithmetics are done in a different order to avoid
+ * overflow.
*/
- bpl = mode->clock * mode->htotal;
- bpl *= (d->hs_freq / 8);
- do_div(bpl, 1000000); /* microseconds */
- do_div(bpl, 1000000); /* seconds */
+ bpl = pclk * mode->htotal; /* (1) picoseconds per line */
+ dev_dbg(d->dev, "picoseconds per line: %llu\n", bpl);
+ /* Multiply with bytes per second (3) */
+ bpl *= (d->mdsi->hs_rate / 8);
+ /* Pixels per second (2) */
+ bpl = DIV_ROUND_DOWN_ULL(bpl, 1000000); /* microseconds */
+ bpl = DIV_ROUND_DOWN_ULL(bpl, 1000000); /* seconds */
+ /* parallel transactions in all lanes */
bpl *= d->mdsi->lanes;
- dev_dbg(d->dev, "calculated bytes per line: %llu\n", bpl);
+ dev_dbg(d->dev,
+ "calculated bytes per line: %llu @ %d Hz with HS %lu Hz\n",
+ bpl, mode->vrefresh, d->mdsi->hs_rate);
+
/*
* 6 is header + checksum, header = 4 bytes, checksum = 2 bytes
* 4 is short packet for vsync/hsync
*/
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
- /* Fixme: isn't the hsync width in pixels? */
+ /* Set the event packet size to 0 (not used) */
+ writel(0, d->regs + DSI_VID_BLKSIZE1);
+ /*
+ * FIXME: isn't the hsync width in pixels? The porch and
+ * sync area size is in pixels here, but this -6
+ * seems to be for bytes. It looks like this in the vendor
+ * code though. Is it completely untested?
+ */
blkline_pck = bpl - (mode->hsync_end - mode->hsync_start) - 6;
val = blkline_pck << DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT;
writel(val, d->regs + DSI_VID_BLKSIZE2);
} else {
+ /* Set the sync pulse packet size to 0 (not used) */
+ writel(0, d->regs + DSI_VID_BLKSIZE2);
+ /* Specifying payload size in bytes (-4-6 from manual) */
blkline_pck = bpl - 4 - 6;
+ if (blkline_pck > 0x1FFF)
+ dev_err(d->dev, "blkline_pck too big %d bytes\n",
+ blkline_pck);
val = blkline_pck << DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT;
+ val &= DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK;
writel(val, d->regs + DSI_VID_BLKSIZE1);
}
- line_duration = (blkline_pck + 6) / d->mdsi->lanes;
- dev_dbg(d->dev, "line duration %u\n", line_duration);
+ /*
+ * The line duration is used to scale back the frequency from
+ * the max frequency supported by the HS clock to the desired
+ * update frequency in vrefresh.
+ */
+ line_duration = blkline_pck + 6;
+ /*
+ * The datasheet contains this complex condition to decreasing
+ * the line duration by 1 under very specific circumstances.
+ * Here we also imply that LP is used during burst EOL.
+ */
+ if (d->mdsi->lanes == 2 && (hsa & 0x01) && (hfp & 0x01)
+ && (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST))
+ line_duration--;
+ line_duration = DIV_ROUND_CLOSEST(line_duration, d->mdsi->lanes);
+ dev_dbg(d->dev, "line duration %u bytes\n", line_duration);
val = line_duration << DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT;
/*
* This is the time to perform LP->HS on D-PHY
* FIXME: nowhere to get this from: DT property on the DSI?
+ * The manual says this is "system dependent".
+ * values like 48 and 72 seen in the vendor code.
*/
- val |= 0 << DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT;
+ val |= 48 << DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT;
writel(val, d->regs + DSI_VID_DPHY_TIME);
- /* Calculate block end of line */
- blkeol_pck = bpl - mode->hdisplay * bpp - 6;
- blkeol_duration = (blkeol_pck + 6) / d->mdsi->lanes;
- dev_dbg(d->dev, "blkeol pck: %u, duration: %u\n",
- blkeol_pck, blkeol_duration);
-
+ /*
+ * See the manual figure 657 page 2203 for understanding the impact
+ * of the different burst mode settings.
+ */
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
- /* Set up EOL clock for burst mode */
+ int blkeol_pck, blkeol_duration;
+ /*
+ * Packet size at EOL for burst mode, this is only used
+ * if DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 is NOT set,
+ * but we instead send NULL or blanking packets at EOL.
+ * This is given in number of bytes.
+ *
+ * See the manual page 2198 for the 13 reg_blkeol_pck bits.
+ */
+ blkeol_pck = bpl - (mode->htotal * cpp) - 6;
+ if (blkeol_pck < 0) {
+ dev_err(d->dev, "video block does not fit on line!\n");
+ dev_err(d->dev,
+ "calculated bytes per line: %llu @ %d Hz\n",
+ bpl, mode->vrefresh);
+ dev_err(d->dev,
+ "bytes per line (blkline_pck) %u bytes\n",
+ blkline_pck);
+ dev_err(d->dev,
+ "blkeol_pck becomes %d bytes\n", blkeol_pck);
+ return;
+ }
+ dev_dbg(d->dev, "BLKEOL packet: %d bytes\n", blkeol_pck);
+
val = readl(d->regs + DSI_VID_BLKSIZE1);
+ val &= ~DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK;
val |= blkeol_pck << DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT;
writel(val, d->regs + DSI_VID_BLKSIZE1);
- writel(blkeol_pck, d->regs + DSI_VID_VCA_SETTING2);
-
- writel(blkeol_duration, d->regs + DSI_VID_PCK_TIME);
- writel(blkeol_duration - 6, d->regs + DSI_VID_VCA_SETTING1);
+ /* Use the same value for exact burst limit */
+ val = blkeol_pck <<
+ DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT;
+ val &= DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK;
+ writel(val, d->regs + DSI_VID_VCA_SETTING2);
+ /*
+ * This BLKEOL duration is claimed to be the duration in clock
+ * cycles of the BLLP end-of-line (EOL) period for each line if
+ * DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 is set.
+ *
+ * It is hard to trust the manuals' claim that this is in clock
+ * cycles as we mimic the behaviour of the vendor code, which
+ * appears to write a number of bytes that would have been
+ * transferred on a single lane.
+ *
+ * See the manual figure 657 page 2203 and page 2198 for the 13
+ * reg_blkeol_duration bits.
+ *
+ * FIXME: should this also be set up also for non-burst mode
+ * according to figure 565 page 2202?
+ */
+ blkeol_duration = DIV_ROUND_CLOSEST(blkeol_pck + 6,
+ d->mdsi->lanes);
+ dev_dbg(d->dev, "BLKEOL duration: %d clock cycles\n",
+ blkeol_duration);
+
+ val = readl(d->regs + DSI_VID_PCK_TIME);
+ val &= ~DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK;
+ val |= blkeol_duration <<
+ DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT;
+ writel(val, d->regs + DSI_VID_PCK_TIME);
+
+ /* Max burst limit, this is given in bytes */
+ val = readl(d->regs + DSI_VID_VCA_SETTING1);
+ val &= ~DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK;
+ val |= (blkeol_pck - 6) <<
+ DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT;
+ writel(val, d->regs + DSI_VID_VCA_SETTING1);
}
/* Maximum line limit */
val = readl(d->regs + DSI_VID_VCA_SETTING2);
- val |= blkline_pck <<
- DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT;
+ val &= ~DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK;
+ val |= (blkline_pck - 6) <<
+ DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT;
writel(val, d->regs + DSI_VID_VCA_SETTING2);
-
- /* Put IF1 into video mode */
- val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
- val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
- writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
-
- /* Disable command mode on IF1 */
- val = readl(d->regs + DSI_CMD_MODE_CTL);
- val &= ~DSI_CMD_MODE_CTL_IF1_LP_EN;
- writel(val, d->regs + DSI_CMD_MODE_CTL);
-
- /* Enable some error interrupts */
- val = readl(d->regs + DSI_VID_MODE_STS_CTL);
- val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
- val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
- writel(val, d->regs + DSI_VID_MODE_STS_CTL);
-
- /* Enable video mode */
- val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
- val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
- writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ dev_dbg(d->dev, "blkline pck: %d bytes\n", blkline_pck - 6);
}
static void mcde_dsi_start(struct mcde_dsi *d)
@@ -670,30 +816,25 @@ static void mcde_dsi_start(struct mcde_dsi *d)
static void mcde_dsi_bridge_enable(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+ u32 val;
+
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ /* Enable video mode */
+ val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ }
dev_info(d->dev, "enable DSI master\n");
};
-static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj)
+static void mcde_dsi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
- unsigned long pixel_clock_hz = mode->clock * 1000;
unsigned long hs_freq, lp_freq;
u32 val;
int ret;
- if (!d->mdsi) {
- dev_err(d->dev, "no DSI device attached to encoder!\n");
- return;
- }
-
- dev_info(d->dev, "set DSI master to %dx%d %lu Hz %s mode\n",
- mode->hdisplay, mode->vdisplay, pixel_clock_hz,
- (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
- );
-
/* Copy maximum clock frequencies */
if (d->mdsi->lp_rate)
lp_freq = d->mdsi->lp_rate;
@@ -732,7 +873,21 @@ static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
d->hs_freq);
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- mcde_dsi_setup_video_mode(d, mode);
+ /* Put IF1 into video mode */
+ val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+
+ /* Disable command mode on IF1 */
+ val = readl(d->regs + DSI_CMD_MODE_CTL);
+ val &= ~DSI_CMD_MODE_CTL_IF1_LP_EN;
+ writel(val, d->regs + DSI_CMD_MODE_CTL);
+
+ /* Enable some error interrupts */
+ val = readl(d->regs + DSI_VID_MODE_STS_CTL);
+ val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
+ val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
+ writel(val, d->regs + DSI_VID_MODE_STS_CTL);
} else {
/* Command mode, clear IF1 ID */
val = readl(d->regs + DSI_CMD_MODE_CTL);
@@ -746,6 +901,26 @@ static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
}
}
+static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adj)
+{
+ struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+
+ if (!d->mdsi) {
+ dev_err(d->dev, "no DSI device attached to encoder!\n");
+ return;
+ }
+
+ dev_info(d->dev, "set DSI master to %dx%d %u Hz %s mode\n",
+ mode->hdisplay, mode->vdisplay, mode->clock * 1000,
+ (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
+ );
+
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)
+ mcde_dsi_setup_video_mode(d, mode);
+}
+
static void mcde_dsi_wait_for_command_mode_stop(struct mcde_dsi *d)
{
u32 val;
@@ -811,67 +986,23 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
clk_disable_unprepare(d->lp_clk);
}
-/*
- * This connector needs no special handling, just use the default
- * helpers for everything. It's pretty dummy.
- */
-static const struct drm_connector_funcs mcde_dsi_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int mcde_dsi_get_modes(struct drm_connector *connector)
-{
- struct mcde_dsi *d = connector_to_mcde_dsi(connector);
-
- /* Just pass the question to the panel */
- if (d->panel)
- return drm_panel_get_modes(d->panel);
-
- /* TODO: deal with bridges */
-
- return 0;
-}
-
-static const struct drm_connector_helper_funcs
-mcde_dsi_connector_helper_funcs = {
- .get_modes = mcde_dsi_get_modes,
-};
-
static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
struct drm_device *drm = bridge->dev;
int ret;
- drm_connector_helper_add(&d->connector,
- &mcde_dsi_connector_helper_funcs);
-
if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
dev_err(d->dev, "we need atomic updates\n");
return -ENOTSUPP;
}
- ret = drm_connector_init(drm, &d->connector,
- &mcde_dsi_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
- if (ret) {
- dev_err(d->dev, "failed to initialize DSI bridge connector\n");
- return ret;
- }
- d->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
- /* The encoder in the bridge attached to the DSI bridge */
- drm_connector_attach_encoder(&d->connector, bridge->encoder);
- /* Then we attach the DSI bridge to the output (panel etc) bridge */
+ /* Attach the DSI bridge to the output (panel etc) bridge */
ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
if (ret) {
dev_err(d->dev, "failed to attach the DSI bridge\n");
return ret;
}
- d->connector.status = connector_status_connected;
return 0;
}
@@ -881,6 +1012,7 @@ static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
.mode_set = mcde_dsi_bridge_mode_set,
.disable = mcde_dsi_bridge_disable,
.enable = mcde_dsi_bridge_enable,
+ .pre_enable = mcde_dsi_bridge_pre_enable,
};
static int mcde_dsi_bind(struct device *dev, struct device *master,
@@ -901,7 +1033,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
d->mcde = mcde;
/* If the display attached before binding, set this up */
if (d->mdsi)
- d->mcde->mdsi = d->mdsi;
+ mcde_dsi_attach_to_mcde(d);
/* Obtain the clocks */
d->hs_clk = devm_clk_get(dev, "hs");
diff --git a/drivers/gpu/drm/mcde/mcde_dsi_regs.h b/drivers/gpu/drm/mcde/mcde_dsi_regs.h
index c9253321a3be..16551af1089e 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi_regs.h
+++ b/drivers/gpu/drm/mcde/mcde_dsi_regs.h
@@ -123,17 +123,6 @@
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT BIT(3)
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT 8
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_MASK 0x00003F00
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_TURN_ON_PERIPHERAL 50
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHUT_DOWN_PERIPHERAL 34
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_0 3
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_1 19
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_2 35
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_LONG_WRITE 41
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_0 5
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 21
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_LONG_WRITE 57
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_READ 6
-#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SET_MAX_PKT_SIZE 55
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT 14
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT 16
#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN BIT(21)
@@ -239,6 +228,7 @@
#define DSI_VID_PCK_TIME 0x000000A8
#define DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT 0
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK 0x00000FFF
#define DSI_VID_DPHY_TIME 0x000000AC
#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT 0
@@ -248,6 +238,16 @@
#define DSI_VID_MODE_STS 0x000000BC
#define DSI_VID_MODE_STS_VSG_RUNNING BIT(0)
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA BIT(1)
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC BIT(2)
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC BIT(3)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH BIT(4)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT BIT(5)
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE BIT(6)
+#define DSI_VID_MODE_STS_ERR_LINEWRITE BIT(7)
+#define DSI_VID_MODE_STS_ERR_LONGREAD BIT(8)
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH BIT(9)
+#define DSI_VID_MODE_STS_VSG_RECOVERY BIT(10)
#define DSI_VID_VCA_SETTING1 0x000000C0
#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT 0
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 8067a4be8311..b7a82ed5788f 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -7,7 +7,6 @@ mediatek-drm-y := mtk_disp_color.o \
mtk_drm_ddp.o \
mtk_drm_ddp_comp.o \
mtk_drm_drv.o \
- mtk_drm_fb.o \
mtk_drm_gem.o \
mtk_drm_plane.o \
mtk_dsi.o \
@@ -21,7 +20,7 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
mediatek-drm-hdmi-objs := mtk_cec.o \
mtk_hdmi.o \
mtk_hdmi_ddc.o \
- mtk_mt2701_hdmi_phy.o \
+ mtk_mt2701_hdmi_phy.o \
mtk_mt8173_hdmi_phy.o \
mtk_hdmi_phy.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 59de2a46aa49..6fb0d6983a4a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -45,12 +46,12 @@ static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_color *color = comp_to_color(comp);
- writel(w, comp->regs + DISP_COLOR_WIDTH(color));
- writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
+ mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color));
+ mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color));
}
static void mtk_color_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 4a55bb6e2213..891d80c73e04 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -11,6 +11,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -124,14 +125,15 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
if (w != 0 && h != 0)
- writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
- writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR);
+ mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp,
+ DISP_REG_OVL_ROI_SIZE);
+ mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR);
- writel(0x1, comp->regs + DISP_REG_OVL_RST);
- writel(0x0, comp->regs + DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST);
}
static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
@@ -175,16 +177,16 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
return 0;
}
-static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
+static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
- unsigned int reg;
unsigned int gmc_thrshd_l;
unsigned int gmc_thrshd_h;
unsigned int gmc_value;
struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
-
+ mtk_ddp_write(cmdq_pkt, 0x1, comp,
+ DISP_REG_OVL_RDMA_CTRL(idx));
gmc_thrshd_l = GMC_THRESHOLD_LOW >>
(GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
@@ -194,22 +196,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
else
gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
- writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
-
- reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
- reg = reg | BIT(idx);
- writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+ mtk_ddp_write(cmdq_pkt, gmc_value,
+ comp, DISP_REG_OVL_RDMA_GMC(idx));
+ mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp,
+ DISP_REG_OVL_SRC_CON, BIT(idx));
}
-static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
+static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
- unsigned int reg;
-
- reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
- reg = reg & ~BIT(idx);
- writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
-
- writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
+ mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ DISP_REG_OVL_SRC_CON, BIT(idx));
+ mtk_ddp_write(cmdq_pkt, 0, comp,
+ DISP_REG_OVL_RDMA_CTRL(idx));
}
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
@@ -249,7 +248,8 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
}
static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
struct mtk_plane_pending_state *pending = &state->pending;
@@ -260,11 +260,13 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int src_size = (pending->height << 16) | pending->width;
unsigned int con;
- if (!pending->enable)
- mtk_ovl_layer_off(comp, idx);
+ if (!pending->enable) {
+ mtk_ovl_layer_off(comp, idx, cmdq_pkt);
+ return;
+ }
con = ovl_fmt_convert(ovl, fmt);
- if (idx != 0)
+ if (state->base.fb->format->has_alpha)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
if (pending->rotation & DRM_MODE_REFLECT_Y) {
@@ -277,14 +279,18 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
addr += pending->pitch - 1;
}
- writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
- writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
- writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
- writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
- writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx));
-
- if (pending->enable)
- mtk_ovl_layer_on(comp, idx);
+ mtk_ddp_write_relaxed(cmdq_pkt, con, comp,
+ DISP_REG_OVL_CON(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp,
+ DISP_REG_OVL_PITCH(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp,
+ DISP_REG_OVL_SRC_SIZE(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, offset, comp,
+ DISP_REG_OVL_OFFSET(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, comp,
+ DISP_REG_OVL_ADDR(ovl, idx));
+
+ mtk_ovl_layer_on(comp, idx, cmdq_pkt);
}
static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
@@ -313,8 +319,6 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.disable_vblank = mtk_ovl_disable_vblank,
.supported_rotations = mtk_ovl_supported_rotations,
.layer_nr = mtk_ovl_layer_nr,
- .layer_on = mtk_ovl_layer_on,
- .layer_off = mtk_ovl_layer_off,
.layer_check = mtk_ovl_layer_check,
.layer_config = mtk_ovl_layer_config,
.bgclr_in_on = mtk_ovl_bgclr_in_on,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 405afef31407..0cb848d64206 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -125,14 +126,16 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
unsigned int height, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
unsigned int threshold;
unsigned int reg;
struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
+ mtk_ddp_write_mask(cmdq_pkt, width, comp,
+ DISP_REG_RDMA_SIZE_CON_0, 0xfff);
+ mtk_ddp_write_mask(cmdq_pkt, height, comp,
+ DISP_REG_RDMA_SIZE_CON_1, 0xfffff);
/*
* Enable FIFO underflow since DSI and DPI can't be blocked.
@@ -144,7 +147,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
reg = RDMA_FIFO_UNDERFLOW_EN |
RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
- writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
+ mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON);
}
static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
@@ -190,7 +193,8 @@ static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
}
static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
struct mtk_plane_pending_state *pending = &state->pending;
@@ -200,24 +204,27 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int con;
con = rdma_fmt_convert(rdma, fmt);
- writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+ mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON);
if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_INT_MTX_SEL,
- RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp,
+ DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB,
+ comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_INT_MTX_SEL);
} else {
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_ENABLE, 0);
+ mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE);
}
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR);
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH);
+ mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp,
+ DISP_RDMA_MEM_GMC_SETTING_0);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp,
+ DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY);
- writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
- writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
- writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
- rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
- RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
}
static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 3305a94fc930..0dfcd1787e65 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -42,11 +43,20 @@ struct mtk_drm_crtc {
struct drm_plane *planes;
unsigned int layer_nr;
bool pending_planes;
+ bool pending_async_planes;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct cmdq_client *cmdq_client;
+ u32 cmdq_event;
+#endif
void __iomem *config_regs;
struct mtk_disp_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
+
+ /* lock for display hardware access */
+ struct mutex hw_lock;
};
struct mtk_crtc_state {
@@ -230,6 +240,13 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
return NULL;
}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+static void ddp_cmdq_cb(struct cmdq_cb_data data)
+{
+ cmdq_pkt_destroy(data.data);
+}
+#endif
+
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -298,7 +315,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
if (i == 1)
mtk_ddp_comp_bgclr_in_on(comp);
- mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
+ mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
mtk_ddp_comp_start(comp);
}
@@ -313,7 +330,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
- plane_state);
+ plane_state, NULL);
}
return 0;
@@ -328,6 +345,7 @@ err_pm_runtime_put:
static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
+ struct drm_crtc *crtc = &mtk_crtc->base;
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
@@ -353,9 +371,17 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_disp_mutex_unprepare(mtk_crtc->mutex);
pm_runtime_put(drm->dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
-static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
+static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
+ struct cmdq_pkt *cmdq_handle)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
@@ -371,7 +397,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
if (state->pending_config) {
mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
- state->pending_vrefresh, 0);
+ state->pending_vrefresh, 0,
+ cmdq_handle);
state->pending_config = false;
}
@@ -391,11 +418,82 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
if (comp)
mtk_ddp_comp_layer_config(comp, local_layer,
- plane_state);
+ plane_state,
+ cmdq_handle);
plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
+
+ if (mtk_crtc->pending_async_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (!plane_state->pending.async_config)
+ continue;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
+ &local_layer);
+
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state,
+ cmdq_handle);
+ plane_state->pending.async_config = false;
+ }
+ mtk_crtc->pending_async_planes = false;
+ }
+}
+
+static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct cmdq_pkt *cmdq_handle;
+#endif
+ struct drm_crtc *crtc = &mtk_crtc->base;
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
+ unsigned int pending_planes = 0, pending_async_planes = 0;
+ int i;
+
+ mutex_lock(&mtk_crtc->hw_lock);
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+ if (plane_state->pending.dirty) {
+ plane_state->pending.config = true;
+ plane_state->pending.dirty = false;
+ pending_planes |= BIT(i);
+ } else if (plane_state->pending.async_dirty) {
+ plane_state->pending.async_config = true;
+ plane_state->pending.async_dirty = false;
+ pending_async_planes |= BIT(i);
+ }
+ }
+ if (pending_planes)
+ mtk_crtc->pending_planes = true;
+ if (pending_async_planes)
+ mtk_crtc->pending_async_planes = true;
+
+ if (priv->data->shadow_register) {
+ mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_crtc_ddp_config(crtc, NULL);
+ mtk_disp_mutex_release(mtk_crtc->mutex);
+ }
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (mtk_crtc->cmdq_client) {
+ cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+ cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
+ cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
+ mtk_crtc_ddp_config(crtc, cmdq_handle);
+ cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+ }
+#endif
+ mutex_unlock(&mtk_crtc->hw_lock);
}
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
@@ -410,6 +508,20 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
return 0;
}
+void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ const struct drm_plane_helper_funcs *plane_helper_funcs =
+ plane->helper_private;
+
+ if (!mtk_crtc->enabled)
+ return;
+
+ plane_helper_funcs->atomic_update(plane, new_state);
+ mtk_drm_crtc_hw_config(mtk_crtc);
+}
+
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -457,6 +569,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
mtk_crtc->pending_planes = true;
+ mtk_drm_crtc_hw_config(mtk_crtc);
/* Wait for planes to be disabled */
drm_crtc_wait_one_vblank(crtc);
@@ -488,34 +601,16 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_drm_private *priv = crtc->dev->dev_private;
- unsigned int pending_planes = 0;
int i;
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
- for (i = 0; i < mtk_crtc->layer_nr; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.dirty) {
- plane_state->pending.config = true;
- plane_state->pending.dirty = false;
- pending_planes |= BIT(i);
- }
- }
- if (pending_planes)
- mtk_crtc->pending_planes = true;
if (crtc->state->color_mgmt_changed)
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
-
- if (priv->data->shadow_register) {
- mtk_disp_mutex_acquire(mtk_crtc->mutex);
- mtk_crtc_ddp_config(crtc);
- mtk_disp_mutex_release(mtk_crtc->mutex);
- }
+ mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
+ }
+ mtk_drm_crtc_hw_config(mtk_crtc);
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -565,8 +660,12 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+#else
if (!priv->data->shadow_register)
- mtk_crtc_ddp_config(crtc);
+#endif
+ mtk_crtc_ddp_config(crtc, NULL);
mtk_drm_finish_page_flip(mtk_crtc);
}
@@ -633,6 +732,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
int pipe = priv->num_pipes;
int ret;
int i;
+ bool has_ctm = false;
+ uint gamma_lut_size = 0;
if (!path)
return 0;
@@ -683,6 +784,14 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
mtk_crtc->ddp_comp[i] = comp;
+
+ if (comp->funcs) {
+ if (comp->funcs->gamma_set)
+ gamma_lut_size = MTK_LUT_SIZE;
+
+ if (comp->funcs->ctm_set)
+ has_ctm = true;
+ }
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
@@ -703,9 +812,28 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
NULL, pipe);
if (ret < 0)
return ret;
- drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
- drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
- priv->num_pipes++;
+ if (gamma_lut_size)
+ drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
+ drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
+ priv->num_pipes++;
+ mutex_init(&mtk_crtc->hw_lock);
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ mtk_crtc->cmdq_client =
+ cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+ 2000);
+ if (IS_ERR(mtk_crtc->cmdq_client)) {
+ dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
+ drm_crtc_index(&mtk_crtc->base));
+ mtk_crtc->cmdq_client = NULL;
+ }
+ ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
+ drm_crtc_index(&mtk_crtc->base),
+ &mtk_crtc->cmdq_event);
+ if (ret)
+ dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
+ drm_crtc_index(&mtk_crtc->base));
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 6afe1c19557a..a2b4677a451c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -21,5 +21,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
unsigned int path_len);
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state);
+void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 7f21307cda75..1f5a112bb034 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -12,7 +12,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
@@ -37,7 +37,15 @@
#define CCORR_EN BIT(0)
#define DISP_CCORR_CFG 0x0020
#define CCORR_RELAY_MODE BIT(0)
+#define CCORR_ENGINE_EN BIT(1)
+#define CCORR_GAMMA_OFF BIT(2)
+#define CCORR_WGAMUT_SRC_CLIP BIT(3)
#define DISP_CCORR_SIZE 0x0030
+#define DISP_CCORR_COEF_0 0x0080
+#define DISP_CCORR_COEF_1 0x0084
+#define DISP_CCORR_COEF_2 0x0088
+#define DISP_CCORR_COEF_3 0x008C
+#define DISP_CCORR_COEF_4 0x0090
#define DISP_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
@@ -76,36 +84,84 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt)
+ cmdq_pkt_write(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value);
+ else
+#endif
+ writel(value, comp->regs + offset);
+}
+
+void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp,
+ unsigned int offset)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt)
+ cmdq_pkt_write(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value);
+ else
+#endif
+ writel_relaxed(value, comp->regs + offset);
+}
+
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt,
+ unsigned int value,
+ struct mtk_ddp_comp *comp,
+ unsigned int offset,
+ unsigned int mask)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt) {
+ cmdq_pkt_write_mask(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value, mask);
+ } else {
+#endif
+ u32 tmp = readl(comp->regs + offset);
+
+ tmp = (tmp & ~mask) | (value & mask);
+ writel(tmp, comp->regs + offset);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ }
+#endif
+}
+
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG)
+ unsigned int CFG, struct cmdq_pkt *cmdq_pkt)
{
/* If bpc equal to 0, the dithering function didn't be enabled */
if (bpc == 0)
return;
if (bpc >= MTK_MIN_BPC) {
- writel(0, comp->regs + DISP_DITHER_5);
- writel(0, comp->regs + DISP_DITHER_7);
- writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
- DITHER_NEW_BIT_MODE,
- comp->regs + DISP_DITHER_15);
- writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
- DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
- comp->regs + DISP_DITHER_16);
- writel(DISP_DITHERING, comp->regs + CFG);
+ mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5);
+ mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7);
+ mtk_ddp_write(cmdq_pkt,
+ DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
+ DITHER_NEW_BIT_MODE,
+ comp, DISP_DITHER_15);
+ mtk_ddp_write(cmdq_pkt,
+ DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
+ DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
+ comp, DISP_DITHER_16);
+ mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG);
}
}
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
- writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
- mtk_dither_set(comp, bpc, DISP_OD_CFG);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE);
+ mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG);
+ mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt);
}
static void mtk_od_start(struct mtk_ddp_comp *comp)
@@ -120,9 +176,9 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_AAL_SIZE);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE);
}
static void mtk_aal_start(struct mtk_ddp_comp *comp)
@@ -137,10 +193,10 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp)
static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE);
- writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE);
+ mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG);
}
static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
@@ -153,12 +209,63 @@ static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
}
+/* Converts a DRM S31.32 value to the HW S1.10 format. */
+static u16 mtk_ctm_s31_32_to_s1_10(u64 in)
+{
+ u16 r;
+
+ /* Sign bit. */
+ r = in & BIT_ULL(63) ? BIT(11) : 0;
+
+ if ((in & GENMASK_ULL(62, 33)) > 0) {
+ /* identity value 0x100000000 -> 0x400, */
+ /* if bigger this, set it to max 0x7ff. */
+ r |= GENMASK(10, 0);
+ } else {
+ /* take the 11 most important bits. */
+ r |= (in >> 22) & GENMASK(10, 0);
+ }
+
+ return r;
+}
+
+static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state)
+{
+ struct drm_property_blob *blob = state->ctm;
+ struct drm_color_ctm *ctm;
+ const u64 *input;
+ uint16_t coeffs[9] = { 0 };
+ int i;
+ struct cmdq_pkt *cmdq_pkt = NULL;
+
+ if (!blob)
+ return;
+
+ ctm = (struct drm_color_ctm *)blob->data;
+ input = ctm->matrix;
+
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++)
+ coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]);
+
+ mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
+ comp, DISP_CCORR_COEF_0);
+ mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
+ comp, DISP_CCORR_COEF_1);
+ mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
+ comp, DISP_CCORR_COEF_2);
+ mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
+ comp, DISP_CCORR_COEF_3);
+ mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
+ comp, DISP_CCORR_COEF_4);
+}
+
static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE);
- writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG);
}
static void mtk_dither_start(struct mtk_ddp_comp *comp)
@@ -173,10 +280,10 @@ static void mtk_dither_stop(struct mtk_ddp_comp *comp)
static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE);
- mtk_dither_set(comp, bpc, DISP_GAMMA_CFG);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE);
+ mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt);
}
static void mtk_gamma_start(struct mtk_ddp_comp *comp)
@@ -223,6 +330,7 @@ static const struct mtk_ddp_comp_funcs ddp_ccorr = {
.config = mtk_ccorr_config,
.start = mtk_ccorr_start,
.stop = mtk_ccorr_stop,
+ .ctm_set = mtk_ccorr_ctm_set,
};
static const struct mtk_ddp_comp_funcs ddp_dither = {
@@ -326,6 +434,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct resource res;
+ struct cmdq_client_reg cmdq_reg;
+ int ret;
+#endif
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
@@ -379,6 +492,19 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
comp->larb_dev = &larb_pdev->dev;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (of_address_to_resource(node, 0, &res) != 0) {
+ dev_err(dev, "Missing reg in %s node\n", node->full_name);
+ return -EINVAL;
+ }
+ comp->regs_pa = res.start;
+
+ ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+ else
+ comp->subsys = cmdq_reg.subsys;
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 2f1e9e75b8da..debe36395fe7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -69,27 +69,29 @@ enum mtk_ddp_comp_id {
};
struct mtk_ddp_comp;
-
+struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh, unsigned int bpc);
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
void (*start)(struct mtk_ddp_comp *comp);
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
- void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
- void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
int (*layer_check)(struct mtk_ddp_comp *comp,
unsigned int idx,
struct mtk_plane_state *state);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state);
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt);
void (*gamma_set)(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state);
void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
+ void (*ctm_set)(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state);
};
struct mtk_ddp_comp {
@@ -99,14 +101,17 @@ struct mtk_ddp_comp {
struct device *larb_dev;
enum mtk_ddp_comp_id id;
const struct mtk_ddp_comp_funcs *funcs;
+ resource_size_t regs_pa;
+ u8 subsys;
};
static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
unsigned int w, unsigned int h,
- unsigned int vrefresh, unsigned int bpc)
+ unsigned int vrefresh, unsigned int bpc,
+ struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->config)
- comp->funcs->config(comp, w, h, vrefresh, bpc);
+ comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt);
}
static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
@@ -151,20 +156,6 @@ static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
return 0;
}
-static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
- unsigned int idx)
-{
- if (comp->funcs && comp->funcs->layer_on)
- comp->funcs->layer_on(comp, idx);
-}
-
-static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
- unsigned int idx)
-{
- if (comp->funcs && comp->funcs->layer_off)
- comp->funcs->layer_off(comp, idx);
-}
-
static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
unsigned int idx,
struct mtk_plane_state *state)
@@ -176,10 +167,11 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->layer_config)
- comp->funcs->layer_config(comp, idx, state);
+ comp->funcs->layer_config(comp, idx, state, cmdq_pkt);
}
static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
@@ -201,6 +193,13 @@ static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
comp->funcs->bgclr_in_off(comp);
}
+static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state)
+{
+ if (comp->funcs && comp->funcs->ctm_set)
+ comp->funcs->ctm_set(comp, state);
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
@@ -209,6 +208,13 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG);
-
+ unsigned int CFG, struct cmdq_pkt *cmdq_pkt);
+enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id);
+void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset);
+void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset);
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset,
+ unsigned int mask);
#endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 84d14213d992..0563c6813333 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -16,8 +16,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -27,7 +29,6 @@
#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
#define DRIVER_NAME "mediatek"
@@ -36,89 +37,27 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static void mtk_atomic_schedule(struct mtk_drm_private *private,
- struct drm_atomic_state *state)
-{
- private->commit.state = state;
- schedule_work(&private->commit.work);
-}
-
-static void mtk_atomic_complete(struct mtk_drm_private *private,
- struct drm_atomic_state *state)
-{
- struct drm_device *drm = private->drm;
-
- drm_atomic_helper_wait_for_fences(drm, state, false);
-
- /*
- * Mediatek drm supports runtime PM, so plane registers cannot be
- * written when their crtc is disabled.
- *
- * The comment for drm_atomic_helper_commit states:
- * For drivers supporting runtime PM the recommended sequence is
- *
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state,
- * DRM_PLANE_COMMIT_ACTIVE_ONLY);
- *
- * See the kerneldoc entries for these three functions for more details.
- */
- drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- drm_atomic_helper_wait_for_vblanks(drm, state);
-
- drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_put(state);
-}
-
-static void mtk_atomic_work(struct work_struct *work)
-{
- struct mtk_drm_private *private = container_of(work,
- struct mtk_drm_private, commit.work);
-
- mtk_atomic_complete(private, private->commit.state);
-}
+static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
-static int mtk_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state,
- bool async)
+static struct drm_framebuffer *
+mtk_drm_mode_fb_create(struct drm_device *dev,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *cmd)
{
- struct mtk_drm_private *private = drm->dev_private;
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(drm, state);
- if (ret)
- return ret;
+ const struct drm_format_info *info = drm_get_format_info(dev, cmd);
- mutex_lock(&private->commit.lock);
- flush_work(&private->commit.work);
+ if (info->num_planes != 1)
+ return ERR_PTR(-EINVAL);
- ret = drm_atomic_helper_swap_state(state, true);
- if (ret) {
- mutex_unlock(&private->commit.lock);
- drm_atomic_helper_cleanup_planes(drm, state);
- return ret;
- }
-
- drm_atomic_state_get(state);
- if (async)
- mtk_atomic_schedule(private, state);
- else
- mtk_atomic_complete(private, state);
-
- mutex_unlock(&private->commit.lock);
-
- return 0;
+ return drm_gem_fb_create(dev, file, cmd);
}
static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
.fb_create = mtk_drm_mode_fb_create,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = mtk_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = {
@@ -236,6 +175,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &mtk_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &mtk_drm_mode_config_helpers;
ret = component_bind_all(drm->dev, drm);
if (ret)
@@ -495,8 +435,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
if (!private)
return -ENOMEM;
- mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, mtk_atomic_work);
private->data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index e03fea12ff59..17bc99b9f5d4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -43,13 +43,6 @@ struct mtk_drm_private {
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
-
- struct {
- struct drm_atomic_state *state;
- struct work_struct work;
- struct mutex lock;
- } commit;
-
struct drm_atomic_state *suspend_state;
bool dma_parms_allocated;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
deleted file mode 100644
index 3f230a28a2dc..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#include <linux/dma-buf.h>
-#include <linux/dma-resv.h>
-
-#include <drm/drm_modeset_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
-#include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
-#include "mtk_drm_gem.h"
-
-static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
- .create_handle = drm_gem_fb_create_handle,
- .destroy = drm_gem_fb_destroy,
-};
-
-static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode,
- struct drm_gem_object *obj)
-{
- const struct drm_format_info *info = drm_get_format_info(dev, mode);
- struct drm_framebuffer *fb;
- int ret;
-
- if (info->num_planes != 1)
- return ERR_PTR(-EINVAL);
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb)
- return ERR_PTR(-ENOMEM);
-
- drm_helper_mode_fill_fb_struct(dev, fb, mode);
-
- fb->obj[0] = obj;
-
- ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
- if (ret) {
- DRM_ERROR("failed to initialize framebuffer\n");
- kfree(fb);
- return ERR_PTR(ret);
- }
-
- return fb;
-}
-
-struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
- struct drm_file *file,
- const struct drm_mode_fb_cmd2 *cmd)
-{
- const struct drm_format_info *info = drm_get_format_info(dev, cmd);
- struct drm_framebuffer *fb;
- struct drm_gem_object *gem;
- unsigned int width = cmd->width;
- unsigned int height = cmd->height;
- unsigned int size, bpp;
- int ret;
-
- if (info->num_planes != 1)
- return ERR_PTR(-EINVAL);
-
- gem = drm_gem_object_lookup(file, cmd->handles[0]);
- if (!gem)
- return ERR_PTR(-ENOENT);
-
- bpp = info->cpp[0];
- size = (height - 1) * cmd->pitches[0] + width * bpp;
- size += cmd->offsets[0];
-
- if (gem->size < size) {
- ret = -EINVAL;
- goto unreference;
- }
-
- fb = mtk_drm_framebuffer_init(dev, cmd, gem);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto unreference;
- }
-
- return fb;
-
-unreference:
- drm_gem_object_put_unlocked(gem);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
deleted file mode 100644
index eb64d26001c6..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#ifndef MTK_DRM_FB_H
-#define MTK_DRM_FB_H
-
-struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
- struct drm_file *file,
- const struct drm_mode_fb_cmd2 *cmd);
-
-#endif /* MTK_DRM_FB_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 3b0cc91c7023..914cc7619cd7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -7,13 +7,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
-#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
#include "mtk_drm_plane.h"
@@ -76,6 +76,50 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
kfree(to_mtk_plane_state(state));
}
+static int mtk_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane != state->crtc->cursor)
+ return -EINVAL;
+
+ if (!plane->state)
+ return -EINVAL;
+
+ if (!plane->state->fb)
+ return -EINVAL;
+
+ if (state->state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ else /* Special case for asynchronous cursor updates. */
+ crtc_state = state->crtc->state;
+
+ return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+}
+
+static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+ state->pending.async_dirty = true;
+
+ mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);
+}
+
static const struct drm_plane_funcs mtk_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -95,7 +139,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!state->crtc)
+ if (WARN_ON(!state->crtc))
return 0;
ret = mtk_drm_crtc_plane_check(state->crtc, plane,
@@ -164,6 +208,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
.atomic_check = mtk_plane_atomic_check,
.atomic_update = mtk_plane_atomic_update,
.atomic_disable = mtk_plane_atomic_disable,
+ .atomic_async_update = mtk_plane_atomic_async_update,
+ .atomic_async_check = mtk_plane_atomic_async_check,
};
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 760885e35b27..d454bece9535 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -22,6 +22,8 @@ struct mtk_plane_pending_state {
unsigned int height;
unsigned int rotation;
bool dirty;
+ bool async_dirty;
+ bool async_config;
};
struct mtk_plane_state {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d77c9f484ce3..5fa1073cf26b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -830,7 +830,7 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
{
struct mtk_dsi *dsi = connector_to_dsi(connector);
- return drm_panel_get_modes(dsi->panel);
+ return drm_panel_get_modes(dsi->panel, connector);
}
static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index c79b1f855d89..5e4a4dbda443 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1238,17 +1238,19 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
struct drm_display_mode *mode)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+ struct drm_bridge *next_bridge;
dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
mode->hdisplay, mode->vdisplay, mode->vrefresh,
!!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
- if (hdmi->bridge.next) {
+ next_bridge = drm_bridge_get_next_bridge(&hdmi->bridge);
+ if (next_bridge) {
struct drm_display_mode adjusted_mode;
drm_mode_copy(&adjusted_mode, mode);
- if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
- &adjusted_mode))
+ if (!drm_bridge_chain_mode_fixup(next_bridge, mode,
+ &adjusted_mode))
return MODE_BAD;
}
@@ -1300,9 +1302,10 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
int ret;
- ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn,
- &mtk_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
+ &mtk_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc_adpt);
if (ret) {
dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index c389e2399133..28a519cdf66b 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
+meson-drm-y += meson_rdma.o meson_osd_afbcd.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 57ae1c13d1e6..e66b6271ff58 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -23,7 +23,9 @@
#include "meson_registers.h"
#include "meson_venc.h"
#include "meson_viu.h"
+#include "meson_rdma.h"
#include "meson_vpp.h"
+#include "meson_osd_afbcd.h"
#define MESON_G12A_VIU_OFFSET 0x5ec0
@@ -35,7 +37,11 @@ struct meson_crtc {
struct meson_drm *priv;
void (*enable_osd1)(struct meson_drm *priv);
void (*enable_vd1)(struct meson_drm *priv);
+ void (*enable_osd1_afbc)(struct meson_drm *priv);
+ void (*disable_osd1_afbc)(struct meson_drm *priv);
unsigned int viu_offset;
+ bool vsync_forced;
+ bool vsync_disabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -46,6 +52,7 @@ static int meson_crtc_enable_vblank(struct drm_crtc *crtc)
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
+ meson_crtc->vsync_disabled = false;
meson_venc_enable_vsync(priv);
return 0;
@@ -56,7 +63,10 @@ static void meson_crtc_disable_vblank(struct drm_crtc *crtc)
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
- meson_venc_disable_vsync(priv);
+ if (!meson_crtc->vsync_forced) {
+ meson_crtc->vsync_disabled = true;
+ meson_venc_disable_vsync(priv);
+ }
}
static const struct drm_crtc_funcs meson_crtc_funcs = {
@@ -236,6 +246,26 @@ static void meson_crtc_enable_osd1(struct meson_drm *priv)
priv->io_base + _REG(VPP_MISC));
}
+static void meson_crtc_g12a_enable_osd1_afbc(struct meson_drm *priv)
+{
+ writel_relaxed(priv->viu.osd1_blk2_cfg4,
+ priv->io_base + _REG(VIU_OSD1_BLK2_CFG_W4));
+
+ writel_bits_relaxed(OSD_MEM_LINEAR_ADDR, OSD_MEM_LINEAR_ADDR,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+
+ writel_relaxed(priv->viu.osd1_blk1_cfg4,
+ priv->io_base + _REG(VIU_OSD1_BLK1_CFG_W4));
+
+ meson_viu_g12a_enable_osd1_afbc(priv);
+
+ writel_bits_relaxed(OSD_MEM_LINEAR_ADDR, OSD_MEM_LINEAR_ADDR,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+
+ writel_bits_relaxed(OSD_MALI_SRC_EN, OSD_MALI_SRC_EN,
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
+}
+
static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
{
writel_relaxed(priv->viu.osd_blend_din0_scope_h,
@@ -281,6 +311,8 @@ void meson_crtc_irq(struct meson_drm *priv)
if (priv->viu.osd1_enabled && priv->viu.osd1_commit) {
writel_relaxed(priv->viu.osd1_ctrl_stat,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_relaxed(priv->viu.osd1_ctrl_stat2,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
writel_relaxed(priv->viu.osd1_blk0_cfg[0],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
writel_relaxed(priv->viu.osd1_blk0_cfg[1],
@@ -291,6 +323,20 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
writel_relaxed(priv->viu.osd1_blk0_cfg[4],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
+
+ if (priv->viu.osd1_afbcd) {
+ if (meson_crtc->enable_osd1_afbc)
+ meson_crtc->enable_osd1_afbc(priv);
+ } else {
+ if (meson_crtc->disable_osd1_afbc)
+ meson_crtc->disable_osd1_afbc(priv);
+ if (priv->afbcd.ops) {
+ priv->afbcd.ops->reset(priv);
+ priv->afbcd.ops->disable(priv);
+ }
+ meson_crtc->vsync_forced = false;
+ }
+
writel_relaxed(priv->viu.osd_sc_ctrl0,
priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(priv->viu.osd_sc_i_wh_m1,
@@ -312,15 +358,25 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(priv->viu.osd_sc_v_ctrl0,
priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
- meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
- priv->viu.osd1_addr, priv->viu.osd1_stride,
- priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR, 0);
+ if (!priv->viu.osd1_afbcd)
+ meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
+ priv->viu.osd1_addr,
+ priv->viu.osd1_stride,
+ priv->viu.osd1_height,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
if (meson_crtc->enable_osd1)
meson_crtc->enable_osd1(priv);
+ if (priv->viu.osd1_afbcd) {
+ priv->afbcd.ops->reset(priv);
+ priv->afbcd.ops->setup(priv);
+ priv->afbcd.ops->enable(priv);
+ meson_crtc->vsync_forced = true;
+ }
+
priv->viu.osd1_commit = false;
}
@@ -357,7 +413,7 @@ void meson_crtc_irq(struct meson_drm *priv)
MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
- };
+ }
writel_relaxed(priv->viu.vd1_if0_gen_reg,
priv->io_base + meson_crtc->viu_offset +
@@ -543,6 +599,9 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->viu.vd1_commit = false;
}
+ if (meson_crtc->vsync_disabled)
+ return;
+
drm_crtc_handle_vblank(priv->crtc);
spin_lock_irqsave(&priv->drm->event_lock, flags);
@@ -579,10 +638,20 @@ int meson_crtc_create(struct meson_drm *priv)
meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
+ meson_crtc->enable_osd1_afbc =
+ meson_crtc_g12a_enable_osd1_afbc;
+ meson_crtc->disable_osd1_afbc =
+ meson_viu_g12a_disable_osd1_afbc;
drm_crtc_helper_add(crtc, &meson_g12a_crtc_helper_funcs);
} else {
meson_crtc->enable_osd1 = meson_crtc_enable_osd1;
meson_crtc->enable_vd1 = meson_crtc_enable_vd1;
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
+ meson_crtc->enable_osd1_afbc =
+ meson_viu_gxm_enable_osd1_afbc;
+ meson_crtc->disable_osd1_afbc =
+ meson_viu_gxm_disable_osd1_afbc;
+ }
drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 397c33182f4f..b5f5eb7b4bb9 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -28,10 +28,12 @@
#include "meson_drv.h"
#include "meson_overlay.h"
#include "meson_plane.h"
+#include "meson_osd_afbcd.h"
#include "meson_registers.h"
#include "meson_venc_cvbs.h"
#include "meson_viu.h"
#include "meson_vpp.h"
+#include "meson_rdma.h"
#define DRIVER_NAME "meson"
#define DRIVER_DESC "Amlogic Meson DRM driver"
@@ -184,6 +186,7 @@ static void meson_remove_framebuffers(void)
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct meson_drm_match_data *match;
struct meson_drm *priv;
struct drm_device *drm;
struct resource *res;
@@ -196,6 +199,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
return -ENODEV;
}
+ match = of_device_get_match_data(dev);
+ if (!match)
+ return -ENODEV;
+
drm = drm_dev_alloc(&meson_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
@@ -208,8 +215,8 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
drm->dev_private = priv;
priv->drm = drm;
priv->dev = dev;
-
- priv->compat = (enum vpu_compatible)of_device_get_match_data(priv->dev);
+ priv->compat = match->compat;
+ priv->afbcd.ops = match->afbcd_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
regs = devm_ioremap_resource(dev, res);
@@ -289,6 +296,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
meson_venc_init(priv);
meson_vpp_init(priv);
meson_viu_init(priv);
+ if (priv->afbcd.ops) {
+ ret = priv->afbcd.ops->init(priv);
+ if (ret)
+ return ret;
+ }
/* Encoder Initialization */
@@ -359,12 +371,16 @@ static void meson_drv_unbind(struct device *dev)
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
}
+ if (priv->afbcd.ops) {
+ priv->afbcd.ops->reset(priv);
+ meson_rdma_free(priv);
+ }
+
drm_dev_unregister(drm);
drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
drm_dev_put(drm);
-
}
static const struct component_master_ops meson_drv_master_ops = {
@@ -393,6 +409,8 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev)
meson_venc_init(priv);
meson_vpp_init(priv);
meson_viu_init(priv);
+ if (priv->afbcd.ops)
+ priv->afbcd.ops->init(priv);
drm_mode_config_helper_resume(priv->drm);
@@ -481,15 +499,33 @@ static int meson_drv_probe(struct platform_device *pdev)
return 0;
};
+static struct meson_drm_match_data meson_drm_gxbb_data = {
+ .compat = VPU_COMPATIBLE_GXBB,
+};
+
+static struct meson_drm_match_data meson_drm_gxl_data = {
+ .compat = VPU_COMPATIBLE_GXL,
+};
+
+static struct meson_drm_match_data meson_drm_gxm_data = {
+ .compat = VPU_COMPATIBLE_GXM,
+ .afbcd_ops = &meson_afbcd_gxm_ops,
+};
+
+static struct meson_drm_match_data meson_drm_g12a_data = {
+ .compat = VPU_COMPATIBLE_G12A,
+ .afbcd_ops = &meson_afbcd_g12a_ops,
+};
+
static const struct of_device_id dt_match[] = {
{ .compatible = "amlogic,meson-gxbb-vpu",
- .data = (void *)VPU_COMPATIBLE_GXBB },
+ .data = (void *)&meson_drm_gxbb_data },
{ .compatible = "amlogic,meson-gxl-vpu",
- .data = (void *)VPU_COMPATIBLE_GXL },
+ .data = (void *)&meson_drm_gxl_data },
{ .compatible = "amlogic,meson-gxm-vpu",
- .data = (void *)VPU_COMPATIBLE_GXM },
+ .data = (void *)&meson_drm_gxm_data },
{ .compatible = "amlogic,meson-g12a-vpu",
- .data = (void *)VPU_COMPATIBLE_G12A },
+ .data = (void *)&meson_drm_g12a_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 820d07bdd42a..04fdf3826643 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -16,6 +16,7 @@ struct drm_crtc;
struct drm_device;
struct drm_plane;
struct meson_drm;
+struct meson_afbcd_ops;
enum vpu_compatible {
VPU_COMPATIBLE_GXBB = 0,
@@ -24,6 +25,11 @@ enum vpu_compatible {
VPU_COMPATIBLE_G12A = 3,
};
+struct meson_drm_match_data {
+ enum vpu_compatible compat;
+ struct meson_afbcd_ops *afbcd_ops;
+};
+
struct meson_drm {
struct device *dev;
enum vpu_compatible compat;
@@ -47,11 +53,16 @@ struct meson_drm {
bool osd1_enabled;
bool osd1_interlace;
bool osd1_commit;
+ bool osd1_afbcd;
uint32_t osd1_ctrl_stat;
+ uint32_t osd1_ctrl_stat2;
uint32_t osd1_blk0_cfg[5];
+ uint32_t osd1_blk1_cfg4;
+ uint32_t osd1_blk2_cfg4;
uint32_t osd1_addr;
uint32_t osd1_stride;
uint32_t osd1_height;
+ uint32_t osd1_width;
uint32_t osd_sc_ctrl0;
uint32_t osd_sc_i_wh_m1;
uint32_t osd_sc_o_h_start_end;
@@ -122,6 +133,18 @@ struct meson_drm {
bool venc_repeat;
bool hdmi_use_enci;
} venc;
+
+ struct {
+ dma_addr_t addr_dma;
+ uint32_t *addr;
+ unsigned int offset;
+ } rdma;
+
+ struct {
+ struct meson_afbcd_ops *ops;
+ u64 modifier;
+ u32 format;
+ } afbcd;
};
static inline int meson_vpu_is_compatible(struct meson_drm *priv,
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c
new file mode 100644
index 000000000000..f12e0271f166
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_fourcc.h>
+
+#include "meson_drv.h"
+#include "meson_registers.h"
+#include "meson_viu.h"
+#include "meson_rdma.h"
+#include "meson_osd_afbcd.h"
+
+/*
+ * DOC: Driver for the ARM FrameBuffer Compression Decoders
+ *
+ * The Amlogic GXM and G12A SoC families embeds an AFBC Decoder,
+ * to decode compressed buffers generated by the ARM Mali GPU.
+ *
+ * For the GXM Family, Amlogic designed their own Decoder, named in
+ * the vendor source as "MESON_AFBC", and a single decoder is available
+ * for the 2 OSD planes.
+ * This decoder is compatible with the AFBC 1.0 specifications and the
+ * Mali T820 GPU capabilities.
+ * It supports :
+ * - basic AFBC buffer for RGB32 only, thus YTR feature is mandatory
+ * - SPARSE layout and SPLIT layout
+ * - only 16x16 superblock
+ *
+ * The decoder reads the data from the SDRAM, decodes and sends the
+ * decoded pixel stream to the OSD1 Plane pixel composer.
+ *
+ * For the G12A Family, Amlogic integrated an ARM AFBC Decoder, named
+ * in the vendor source as "MALI_AFBC", and the decoder can decode up
+ * to 4 surfaces, one for each of the 4 available OSDs.
+ * This decoder is compatible with the AFBC 1.2 specifications for the
+ * Mali G31 and G52 GPUs.
+ * Is supports :
+ * - basic AFBC buffer for multiple RGB and YUV pixel formats
+ * - SPARSE layout and SPLIT layout
+ * - 16x16 and 32x8 "wideblk" superblocks
+ * - Tiled header
+ *
+ * The ARM AFBC Decoder independent from the VPU Pixel Pipeline, so
+ * the ARM AFBC Decoder reads the data from the SDRAM then decodes
+ * into a private internal physical address where the OSD1 Plane pixel
+ * composer unpacks the decoded data.
+ */
+
+/* Amlogic AFBC Decoder for GXM Family */
+
+#define OSD1_AFBCD_RGB32 0x15
+
+static int meson_gxm_afbcd_pixel_fmt(u64 modifier, uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return OSD1_AFBCD_RGB32;
+ /* TOFIX support mode formats */
+ default:
+ DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format)
+{
+ if (modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ return false;
+
+ if (!(modifier & AFBC_FORMAT_MOD_YTR))
+ return false;
+
+ return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0;
+}
+
+static int meson_gxm_afbcd_init(struct meson_drm *priv)
+{
+ return 0;
+}
+
+static int meson_gxm_afbcd_reset(struct meson_drm *priv)
+{
+ writel_relaxed(VIU_SW_RESET_OSD1_AFBCD,
+ priv->io_base + _REG(VIU_SW_RESET));
+ writel_relaxed(0, priv->io_base + _REG(VIU_SW_RESET));
+
+ return 0;
+}
+
+static int meson_gxm_afbcd_enable(struct meson_drm *priv)
+{
+ writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) |
+ OSD1_AFBCD_DEC_ENABLE,
+ priv->io_base + _REG(OSD1_AFBCD_ENABLE));
+
+ return 0;
+}
+
+static int meson_gxm_afbcd_disable(struct meson_drm *priv)
+{
+ writel_bits_relaxed(OSD1_AFBCD_DEC_ENABLE, 0,
+ priv->io_base + _REG(OSD1_AFBCD_ENABLE));
+
+ return 0;
+}
+
+static int meson_gxm_afbcd_setup(struct meson_drm *priv)
+{
+ u32 conv_lbuf_len;
+ u32 mode = FIELD_PREP(OSD1_AFBCD_MIF_URGENT, 3) |
+ FIELD_PREP(OSD1_AFBCD_HOLD_LINE_NUM, 4) |
+ FIELD_PREP(OSD1_AFBCD_RGBA_EXCHAN_CTRL, 0x34) |
+ meson_gxm_afbcd_pixel_fmt(priv->afbcd.modifier,
+ priv->afbcd.format);
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPARSE)
+ mode |= OSD1_AFBCD_HREG_HALF_BLOCK;
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPLIT)
+ mode |= OSD1_AFBCD_HREG_BLOCK_SPLIT;
+
+ writel_relaxed(mode, priv->io_base + _REG(OSD1_AFBCD_MODE));
+
+ writel_relaxed(FIELD_PREP(OSD1_AFBCD_HREG_VSIZE_IN,
+ priv->viu.osd1_width) |
+ FIELD_PREP(OSD1_AFBCD_HREG_HSIZE_IN,
+ priv->viu.osd1_height),
+ priv->io_base + _REG(OSD1_AFBCD_SIZE_IN));
+
+ writel_relaxed(priv->viu.osd1_addr >> 4,
+ priv->io_base + _REG(OSD1_AFBCD_HDR_PTR));
+ writel_relaxed(priv->viu.osd1_addr >> 4,
+ priv->io_base + _REG(OSD1_AFBCD_FRAME_PTR));
+ /* TOFIX: bits 31:24 are not documented, nor the meaning of 0xe4 */
+ writel_relaxed((0xe4 << 24) | (priv->viu.osd1_addr & 0xffffff),
+ priv->io_base + _REG(OSD1_AFBCD_CHROMA_PTR));
+
+ if (priv->viu.osd1_width <= 128)
+ conv_lbuf_len = 32;
+ else if (priv->viu.osd1_width <= 256)
+ conv_lbuf_len = 64;
+ else if (priv->viu.osd1_width <= 512)
+ conv_lbuf_len = 128;
+ else if (priv->viu.osd1_width <= 1024)
+ conv_lbuf_len = 256;
+ else if (priv->viu.osd1_width <= 2048)
+ conv_lbuf_len = 512;
+ else
+ conv_lbuf_len = 1024;
+
+ writel_relaxed(conv_lbuf_len,
+ priv->io_base + _REG(OSD1_AFBCD_CONV_CTRL));
+
+ writel_relaxed(FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_BGN_H, 0) |
+ FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_END_H,
+ priv->viu.osd1_width - 1),
+ priv->io_base + _REG(OSD1_AFBCD_PIXEL_HSCOPE));
+
+ writel_relaxed(FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_BGN_V, 0) |
+ FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_END_V,
+ priv->viu.osd1_height - 1),
+ priv->io_base + _REG(OSD1_AFBCD_PIXEL_VSCOPE));
+
+ return 0;
+}
+
+struct meson_afbcd_ops meson_afbcd_gxm_ops = {
+ .init = meson_gxm_afbcd_init,
+ .reset = meson_gxm_afbcd_reset,
+ .enable = meson_gxm_afbcd_enable,
+ .disable = meson_gxm_afbcd_disable,
+ .setup = meson_gxm_afbcd_setup,
+ .supported_fmt = meson_gxm_afbcd_supported_fmt,
+};
+
+/* ARM AFBC Decoder for G12A Family */
+
+/* Amlogic G12A Mali AFBC Decoder supported formats */
+enum {
+ MAFBC_FMT_RGB565 = 0,
+ MAFBC_FMT_RGBA5551,
+ MAFBC_FMT_RGBA1010102,
+ MAFBC_FMT_YUV420_10B,
+ MAFBC_FMT_RGB888,
+ MAFBC_FMT_RGBA8888,
+ MAFBC_FMT_RGBA4444,
+ MAFBC_FMT_R8,
+ MAFBC_FMT_RG88,
+ MAFBC_FMT_YUV420_8B,
+ MAFBC_FMT_YUV422_8B = 11,
+ MAFBC_FMT_YUV422_10B = 14,
+};
+
+static int meson_g12a_afbcd_pixel_fmt(u64 modifier, uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ /* YTR is forbidden for non XBGR formats */
+ if (modifier & AFBC_FORMAT_MOD_YTR)
+ return -EINVAL;
+ /* fall through */
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return MAFBC_FMT_RGBA8888;
+ case DRM_FORMAT_RGB888:
+ /* YTR is forbidden for non XBGR formats */
+ if (modifier & AFBC_FORMAT_MOD_YTR)
+ return -EINVAL;
+ return MAFBC_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ /* YTR is forbidden for non XBGR formats */
+ if (modifier & AFBC_FORMAT_MOD_YTR)
+ return -EINVAL;
+ return MAFBC_FMT_RGB565;
+ /* TOFIX support mode formats */
+ default:
+ DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static int meson_g12a_afbcd_bpp(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return 32;
+ case DRM_FORMAT_RGB888:
+ return 24;
+ case DRM_FORMAT_RGB565:
+ return 16;
+ /* TOFIX support mode formats */
+ default:
+ DRM_ERROR("unsupported afbc format[%08x]\n", format);
+ return 0;
+ }
+}
+
+static int meson_g12a_afbcd_fmt_to_blk_mode(u64 modifier, uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return OSD_MALI_COLOR_MODE_RGBA8888;
+ case DRM_FORMAT_RGB888:
+ return OSD_MALI_COLOR_MODE_RGB888;
+ case DRM_FORMAT_RGB565:
+ return OSD_MALI_COLOR_MODE_RGB565;
+ /* TOFIX support mode formats */
+ default:
+ DRM_DEBUG("unsupported afbc format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format)
+{
+ return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0;
+}
+
+static int meson_g12a_afbcd_init(struct meson_drm *priv)
+{
+ int ret;
+
+ ret = meson_rdma_init(priv);
+ if (ret)
+ return ret;
+
+ meson_rdma_setup(priv);
+
+ /* Handle AFBC Decoder reset manually */
+ writel_bits_relaxed(MALI_AFBCD_MANUAL_RESET, MALI_AFBCD_MANUAL_RESET,
+ priv->io_base + _REG(MALI_AFBCD_TOP_CTRL));
+
+ return 0;
+}
+
+static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+{
+ meson_rdma_reset(priv);
+
+ meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
+ VIU_SW_RESET_G12A_OSD1_AFBCD,
+ VIU_SW_RESET);
+ meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
+
+ return 0;
+}
+
+static int meson_g12a_afbcd_enable(struct meson_drm *priv)
+{
+ meson_rdma_writel_sync(priv, VPU_MAFBC_IRQ_SURFACES_COMPLETED |
+ VPU_MAFBC_IRQ_CONFIGURATION_SWAPPED |
+ VPU_MAFBC_IRQ_DECODE_ERROR |
+ VPU_MAFBC_IRQ_DETILING_ERROR,
+ VPU_MAFBC_IRQ_MASK);
+
+ meson_rdma_writel_sync(priv, VPU_MAFBC_S0_ENABLE,
+ VPU_MAFBC_SURFACE_CFG);
+
+ meson_rdma_writel_sync(priv, VPU_MAFBC_DIRECT_SWAP,
+ VPU_MAFBC_COMMAND);
+
+ /* This will enable the RDMA replaying the register writes on vsync */
+ meson_rdma_flush(priv);
+
+ return 0;
+}
+
+static int meson_g12a_afbcd_disable(struct meson_drm *priv)
+{
+ writel_bits_relaxed(VPU_MAFBC_S0_ENABLE, 0,
+ priv->io_base + _REG(VPU_MAFBC_SURFACE_CFG));
+
+ return 0;
+}
+
+static int meson_g12a_afbcd_setup(struct meson_drm *priv)
+{
+ u32 format = meson_g12a_afbcd_pixel_fmt(priv->afbcd.modifier,
+ priv->afbcd.format);
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_YTR)
+ format |= VPU_MAFBC_YUV_TRANSFORM;
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPLIT)
+ format |= VPU_MAFBC_BLOCK_SPLIT;
+
+ if (priv->afbcd.modifier & AFBC_FORMAT_MOD_TILED)
+ format |= VPU_MAFBC_TILED_HEADER_EN;
+
+ if ((priv->afbcd.modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ format |= FIELD_PREP(VPU_MAFBC_SUPER_BLOCK_ASPECT, 1);
+
+ meson_rdma_writel_sync(priv, format,
+ VPU_MAFBC_FORMAT_SPECIFIER_S0);
+
+ meson_rdma_writel_sync(priv, priv->viu.osd1_addr,
+ VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0);
+ meson_rdma_writel_sync(priv, 0,
+ VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0);
+
+ meson_rdma_writel_sync(priv, priv->viu.osd1_width,
+ VPU_MAFBC_BUFFER_WIDTH_S0);
+ meson_rdma_writel_sync(priv, ALIGN(priv->viu.osd1_height, 32),
+ VPU_MAFBC_BUFFER_HEIGHT_S0);
+
+ meson_rdma_writel_sync(priv, 0,
+ VPU_MAFBC_BOUNDING_BOX_X_START_S0);
+ meson_rdma_writel_sync(priv, priv->viu.osd1_width - 1,
+ VPU_MAFBC_BOUNDING_BOX_X_END_S0);
+ meson_rdma_writel_sync(priv, 0,
+ VPU_MAFBC_BOUNDING_BOX_Y_START_S0);
+ meson_rdma_writel_sync(priv, priv->viu.osd1_height - 1,
+ VPU_MAFBC_BOUNDING_BOX_Y_END_S0);
+
+ meson_rdma_writel_sync(priv, MESON_G12A_AFBCD_OUT_ADDR,
+ VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S0);
+ meson_rdma_writel_sync(priv, 0,
+ VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0);
+
+ meson_rdma_writel_sync(priv, priv->viu.osd1_width *
+ (meson_g12a_afbcd_bpp(priv->afbcd.format) / 8),
+ VPU_MAFBC_OUTPUT_BUF_STRIDE_S0);
+
+ return 0;
+}
+
+struct meson_afbcd_ops meson_afbcd_g12a_ops = {
+ .init = meson_g12a_afbcd_init,
+ .reset = meson_g12a_afbcd_reset,
+ .enable = meson_g12a_afbcd_enable,
+ .disable = meson_g12a_afbcd_disable,
+ .setup = meson_g12a_afbcd_setup,
+ .fmt_to_blk_mode = meson_g12a_afbcd_fmt_to_blk_mode,
+ .supported_fmt = meson_g12a_afbcd_supported_fmt,
+};
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h
new file mode 100644
index 000000000000..5e5523304f42
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_OSD_AFBCD_H
+#define __MESON_OSD_AFBCD_H
+
+#include "meson_drv.h"
+
+/* This is an internal address used to transfer pixel from AFBC to the VIU */
+#define MESON_G12A_AFBCD_OUT_ADDR 0x1000000
+
+struct meson_afbcd_ops {
+ int (*init)(struct meson_drm *priv);
+ int (*reset)(struct meson_drm *priv);
+ int (*enable)(struct meson_drm *priv);
+ int (*disable)(struct meson_drm *priv);
+ int (*setup)(struct meson_drm *priv);
+ int (*fmt_to_blk_mode)(u64 modifier, uint32_t format);
+ bool (*supported_fmt)(u64 modifier, uint32_t format);
+};
+
+extern struct meson_afbcd_ops meson_afbcd_gxm_ops;
+extern struct meson_afbcd_ops meson_afbcd_g12a_ops;
+
+#endif /* __MESON_OSD_AFBCD_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index ed543227b00d..d5cbc47835bf 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -23,6 +23,7 @@
#include "meson_plane.h"
#include "meson_registers.h"
#include "meson_viu.h"
+#include "meson_osd_afbcd.h"
/* OSD_SCI_WH_M1 */
#define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w)
@@ -92,12 +93,38 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
false, true);
}
+#define MESON_MOD_AFBC_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | \
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | \
+ AFBC_FORMAT_MOD_YTR | \
+ AFBC_FORMAT_MOD_SPARSE | \
+ AFBC_FORMAT_MOD_SPLIT)
+
/* Takes a fixed 16.16 number and converts it to integer. */
static inline int64_t fixed16_to_int(int64_t value)
{
return value >> 16;
}
+static u32 meson_g12a_afbcd_line_stride(struct meson_drm *priv)
+{
+ u32 line_stride = 0;
+
+ switch (priv->afbcd.format) {
+ case DRM_FORMAT_RGB565:
+ line_stride = ((priv->viu.osd1_width << 4) + 127) >> 7;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ line_stride = ((priv->viu.osd1_width << 5) + 127) >> 7;
+ break;
+ }
+
+ return ((line_stride + 1) >> 1) << 1;
+}
+
static void meson_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -126,59 +153,91 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
*/
spin_lock_irqsave(&priv->drm->event_lock, flags);
+ /* Check if AFBC decoder is required for this buffer */
+ if ((meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) &&
+ fb->modifier & DRM_FORMAT_MOD_ARM_AFBC(MESON_MOD_AFBC_VALID_BITS))
+ priv->viu.osd1_afbcd = true;
+ else
+ priv->viu.osd1_afbcd = false;
+
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
(0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
+ priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
+ _REG(VIU_OSD1_CTRL_STAT2));
+
canvas_id_osd1 = priv->canvas_id_osd1;
/* Set up BLK0 to point to the right canvas */
- priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
- OSD_ENDIANNESS_LE);
+ priv->viu.osd1_blk0_cfg[0] = canvas_id_osd1 << OSD_CANVAS_SEL;
+
+ if (priv->viu.osd1_afbcd) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ /* This is the internal decoding memory address */
+ priv->viu.osd1_blk1_cfg4 = MESON_G12A_AFBCD_OUT_ADDR;
+ priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_BE;
+ priv->viu.osd1_ctrl_stat2 |= OSD_PENDING_STAT_CLEAN;
+ priv->viu.osd1_ctrl_stat |= VIU_OSD1_CFG_SYN_EN;
+ }
+
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
+ priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
+ priv->viu.osd1_ctrl_stat2 |= OSD_DPATH_MALI_AFBCD;
+ }
+ } else {
+ priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
+
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+ priv->viu.osd1_ctrl_stat2 &= ~OSD_DPATH_MALI_AFBCD;
+ }
/* On GXBB, Use the old non-HDR RGB2YUV converter */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
+ if (priv->viu.osd1_afbcd &&
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ priv->viu.osd1_blk0_cfg[0] |= OSD_MALI_SRC_EN |
+ priv->afbcd.ops->fmt_to_blk_mode(fb->modifier,
+ fb->format->format);
+ } else {
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ARGB;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ABGR;
+ break;
+ case DRM_FORMAT_RGB888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
+ OSD_COLOR_MATRIX_24_RGB;
+ break;
+ case DRM_FORMAT_RGB565:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
+ OSD_COLOR_MATRIX_16_RGB565;
+ break;
+ };
+ }
+
switch (fb->format->format) {
case DRM_FORMAT_XRGB8888:
- /* For XRGB, replace the pixel's alpha by 0xFF */
- writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
- OSD_COLOR_MATRIX_32_ARGB;
- break;
case DRM_FORMAT_XBGR8888:
/* For XRGB, replace the pixel's alpha by 0xFF */
- writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
- OSD_COLOR_MATRIX_32_ABGR;
+ priv->viu.osd1_ctrl_stat2 |= OSD_REPLACE_EN;
break;
case DRM_FORMAT_ARGB8888:
- /* For ARGB, use the pixel's alpha */
- writel_bits_relaxed(OSD_REPLACE_EN, 0,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
- OSD_COLOR_MATRIX_32_ARGB;
- break;
case DRM_FORMAT_ABGR8888:
/* For ARGB, use the pixel's alpha */
- writel_bits_relaxed(OSD_REPLACE_EN, 0,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
- OSD_COLOR_MATRIX_32_ABGR;
+ priv->viu.osd1_ctrl_stat2 &= ~OSD_REPLACE_EN;
break;
- case DRM_FORMAT_RGB888:
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
- OSD_COLOR_MATRIX_24_RGB;
- break;
- case DRM_FORMAT_RGB565:
- priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
- OSD_COLOR_MATRIX_16_RGB565;
- break;
- };
+ }
/* Default scaler parameters */
vsc_bot_rcv_num = 0;
@@ -305,6 +364,17 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_addr = gem->paddr;
priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height;
+ priv->viu.osd1_width = fb->width;
+
+ if (priv->viu.osd1_afbcd) {
+ priv->afbcd.modifier = fb->modifier;
+ priv->afbcd.format = fb->format->format;
+
+ /* Calculate decoder write stride */
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ priv->viu.osd1_blk2_cfg4 =
+ meson_g12a_afbcd_line_stride(priv);
+ }
if (!meson_plane->enabled) {
/* Reset OSD1 before enabling it on GXL+ SoCs */
@@ -326,6 +396,11 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
struct meson_plane *meson_plane = to_meson_plane(plane);
struct meson_drm *priv = meson_plane->priv;
+ if (priv->afbcd.ops) {
+ priv->afbcd.ops->reset(priv);
+ priv->afbcd.ops->disable(priv);
+ }
+
/* Disable OSD1 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_bits_relaxed(VIU_OSD1_POSTBLD_SRC_OSD1, 0,
@@ -345,6 +420,42 @@ static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
};
+static bool meson_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ struct meson_plane *meson_plane = to_meson_plane(plane);
+ struct meson_drm *priv = meson_plane->priv;
+ int i;
+
+ if (modifier == DRM_FORMAT_MOD_INVALID)
+ return false;
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) &&
+ !meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ return false;
+
+ if (modifier & ~DRM_FORMAT_MOD_ARM_AFBC(MESON_MOD_AFBC_VALID_BITS))
+ return false;
+
+ for (i = 0 ; i < plane->modifier_count ; ++i)
+ if (plane->modifiers[i] == modifier)
+ break;
+
+ if (i == plane->modifier_count) {
+ DRM_DEBUG_KMS("Unsupported modifier\n");
+ return false;
+ }
+
+ if (priv->afbcd.ops && priv->afbcd.ops->supported_fmt)
+ return priv->afbcd.ops->supported_fmt(modifier, format);
+
+ DRM_DEBUG_KMS("AFBC Unsupported\n");
+ return false;
+}
+
static const struct drm_plane_funcs meson_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -352,6 +463,7 @@ static const struct drm_plane_funcs meson_plane_funcs = {
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = meson_plane_format_mod_supported,
};
static const uint32_t supported_drm_formats[] = {
@@ -363,10 +475,60 @@ static const uint32_t supported_drm_formats[] = {
DRM_FORMAT_RGB565,
};
+static const uint64_t format_modifiers_afbc_gxm[] = {
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_YTR),
+ /* SPLIT mandates SPARSE, RGB modes mandates YTR */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_afbc_g12a[] = {
+ /*
+ * - TOFIX Support AFBC modifiers for YUV formats (16x16 + TILED)
+ * - SPLIT is mandatory for performances reasons when in 16x16
+ * block size
+ * - 32x8 block size + SPLIT is mandatory with 4K frame size
+ * for performances reasons
+ */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_default[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
int meson_plane_create(struct meson_drm *priv)
{
struct meson_plane *meson_plane;
struct drm_plane *plane;
+ const uint64_t *format_modifiers = format_modifiers_default;
meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
GFP_KERNEL);
@@ -376,11 +538,16 @@ int meson_plane_create(struct meson_drm *priv)
meson_plane->priv = priv;
plane = &meson_plane->base;
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+ format_modifiers = format_modifiers_afbc_gxm;
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ format_modifiers = format_modifiers_afbc_g12a;
+
drm_universal_plane_init(priv->drm, plane, 0xFF,
&meson_plane_funcs,
supported_drm_formats,
ARRAY_SIZE(supported_drm_formats),
- NULL,
+ format_modifiers,
DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
drm_plane_helper_add(plane, &meson_plane_helper_funcs);
diff --git a/drivers/gpu/drm/meson/meson_rdma.c b/drivers/gpu/drm/meson/meson_rdma.c
new file mode 100644
index 000000000000..130382178c63
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_rdma.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+
+#include "meson_drv.h"
+#include "meson_registers.h"
+#include "meson_rdma.h"
+
+/*
+ * The VPU embeds a "Register DMA" that can write a sequence of registers
+ * on the VPU AHB bus, either manually or triggered by an internal IRQ
+ * event like VSYNC or a line input counter.
+ * The initial implementation handles a single channel (over 8), triggered
+ * by the VSYNC irq and does not handle the RDMA irq.
+ */
+
+#define RDMA_DESC_SIZE (sizeof(uint32_t) * 2)
+
+int meson_rdma_init(struct meson_drm *priv)
+{
+ if (!priv->rdma.addr) {
+ /* Allocate a PAGE buffer */
+ priv->rdma.addr =
+ dma_alloc_coherent(priv->dev, SZ_4K,
+ &priv->rdma.addr_dma,
+ GFP_KERNEL);
+ if (!priv->rdma.addr)
+ return -ENOMEM;
+ }
+
+ priv->rdma.offset = 0;
+
+ writel_relaxed(RDMA_CTRL_SW_RESET,
+ priv->io_base + _REG(RDMA_CTRL));
+ writel_relaxed(RDMA_DEFAULT_CONFIG |
+ FIELD_PREP(RDMA_CTRL_AHB_WR_BURST, 3) |
+ FIELD_PREP(RDMA_CTRL_AHB_RD_BURST, 0),
+ priv->io_base + _REG(RDMA_CTRL));
+
+ return 0;
+}
+
+void meson_rdma_free(struct meson_drm *priv)
+{
+ if (!priv->rdma.addr && !priv->rdma.addr_dma)
+ return;
+
+ meson_rdma_stop(priv);
+
+ dma_free_coherent(priv->dev, SZ_4K,
+ priv->rdma.addr, priv->rdma.addr_dma);
+
+ priv->rdma.addr = NULL;
+ priv->rdma.addr_dma = (dma_addr_t)0;
+}
+
+void meson_rdma_setup(struct meson_drm *priv)
+{
+ /* Channel 1: Write Flag, No Address Increment */
+ writel_bits_relaxed(RDMA_ACCESS_RW_FLAG_CHAN1 |
+ RDMA_ACCESS_ADDR_INC_CHAN1,
+ RDMA_ACCESS_RW_FLAG_CHAN1,
+ priv->io_base + _REG(RDMA_ACCESS_AUTO));
+}
+
+void meson_rdma_stop(struct meson_drm *priv)
+{
+ writel_bits_relaxed(RDMA_IRQ_CLEAR_CHAN1,
+ RDMA_IRQ_CLEAR_CHAN1,
+ priv->io_base + _REG(RDMA_CTRL));
+
+ /* Stop Channel 1 */
+ writel_bits_relaxed(RDMA_ACCESS_TRIGGER_CHAN1,
+ FIELD_PREP(RDMA_ACCESS_ADDR_INC_CHAN1,
+ RDMA_ACCESS_TRIGGER_STOP),
+ priv->io_base + _REG(RDMA_ACCESS_AUTO));
+}
+
+void meson_rdma_reset(struct meson_drm *priv)
+{
+ meson_rdma_stop(priv);
+
+ priv->rdma.offset = 0;
+}
+
+static void meson_rdma_writel(struct meson_drm *priv, uint32_t val,
+ uint32_t reg)
+{
+ if (priv->rdma.offset >= (SZ_4K / RDMA_DESC_SIZE)) {
+ dev_warn_once(priv->dev, "%s: overflow\n", __func__);
+ return;
+ }
+
+ priv->rdma.addr[priv->rdma.offset++] = reg;
+ priv->rdma.addr[priv->rdma.offset++] = val;
+}
+
+/*
+ * This will add the register to the RDMA buffer and write it to the
+ * hardware at the same time.
+ * When meson_rdma_flush is called, the RDMA will replay the register
+ * writes in order.
+ */
+void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg)
+{
+ meson_rdma_writel(priv, val, reg);
+
+ writel_relaxed(val, priv->io_base + _REG(reg));
+}
+
+void meson_rdma_flush(struct meson_drm *priv)
+{
+ meson_rdma_stop(priv);
+
+ /* Start of Channel 1 register writes buffer */
+ writel(priv->rdma.addr_dma,
+ priv->io_base + _REG(RDMA_AHB_START_ADDR_1));
+
+ /* Last byte on Channel 1 register writes buffer */
+ writel(priv->rdma.addr_dma + (priv->rdma.offset * RDMA_DESC_SIZE) - 1,
+ priv->io_base + _REG(RDMA_AHB_END_ADDR_1));
+
+ /* Trigger Channel 1 on VSYNC event */
+ writel_bits_relaxed(RDMA_ACCESS_TRIGGER_CHAN1,
+ FIELD_PREP(RDMA_ACCESS_TRIGGER_CHAN1,
+ RDMA_ACCESS_TRIGGER_VSYNC),
+ priv->io_base + _REG(RDMA_ACCESS_AUTO));
+
+ priv->rdma.offset = 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_rdma.h b/drivers/gpu/drm/meson/meson_rdma.h
new file mode 100644
index 000000000000..3870bff7b47f
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_rdma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_RDMA_H
+#define __MESON_RDMA_H
+
+#include "meson_drv.h"
+
+int meson_rdma_init(struct meson_drm *priv);
+void meson_rdma_free(struct meson_drm *priv);
+void meson_rdma_setup(struct meson_drm *priv);
+void meson_rdma_reset(struct meson_drm *priv);
+void meson_rdma_stop(struct meson_drm *priv);
+
+void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg);
+void meson_rdma_flush(struct meson_drm *priv);
+
+#endif /* __MESON_RDMA_H */
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 05fce48ceee0..8ea00546cd4e 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -138,19 +138,25 @@
#define VIU_ADDR_START 0x1a00
#define VIU_ADDR_END 0x1aff
#define VIU_SW_RESET 0x1a01
+#define VIU_SW_RESET_OSD1_AFBCD BIT(31)
+#define VIU_SW_RESET_G12A_OSD1_AFBCD BIT(21)
+#define VIU_SW_RESET_G12A_AFBC_ARB BIT(19)
#define VIU_SW_RESET_OSD1 BIT(0)
#define VIU_MISC_CTRL0 0x1a06
#define VIU_CTRL0_VD1_AFBC_MASK 0x170000
#define VIU_MISC_CTRL1 0x1a07
+#define MALI_AFBC_MISC GENMASK(15, 8)
#define D2D3_INTF_LENGTH 0x1a08
#define D2D3_INTF_CTRL0 0x1a09
#define VIU_OSD1_CTRL_STAT 0x1a10
#define VIU_OSD1_OSD_BLK_ENABLE BIT(0)
+#define VIU_OSD1_OSD_MEM_MODE_LINEAR BIT(2)
#define VIU_OSD1_POSTBLD_SRC_VD1 (1 << 8)
#define VIU_OSD1_POSTBLD_SRC_VD2 (2 << 8)
#define VIU_OSD1_POSTBLD_SRC_OSD1 (3 << 8)
#define VIU_OSD1_POSTBLD_SRC_OSD2 (4 << 8)
#define VIU_OSD1_OSD_ENABLE BIT(21)
+#define VIU_OSD1_CFG_SYN_EN BIT(31)
#define VIU_OSD1_CTRL_STAT2 0x1a2d
#define VIU_OSD1_COLOR_ADDR 0x1a11
#define VIU_OSD1_COLOR 0x1a12
@@ -181,6 +187,16 @@
#define VIU_OSD1_FIFO_CTRL_STAT 0x1a2b
#define VIU_OSD1_TEST_RDDATA 0x1a2c
#define VIU_OSD1_PROT_CTRL 0x1a2e
+#define VIU_OSD1_MALI_UNPACK_CTRL 0x1a2f
+#define VIU_OSD1_MALI_UNPACK_EN BIT(31)
+#define VIU_OSD1_MALI_AFBCD_R_REORDER GENMASK(15, 12)
+#define VIU_OSD1_MALI_AFBCD_G_REORDER GENMASK(11, 8)
+#define VIU_OSD1_MALI_AFBCD_B_REORDER GENMASK(7, 4)
+#define VIU_OSD1_MALI_AFBCD_A_REORDER GENMASK(3, 0)
+#define VIU_OSD1_MALI_REORDER_R 1
+#define VIU_OSD1_MALI_REORDER_G 2
+#define VIU_OSD1_MALI_REORDER_B 3
+#define VIU_OSD1_MALI_REORDER_A 4
#define VIU_OSD2_CTRL_STAT 0x1a30
#define VIU_OSD2_CTRL_STAT2 0x1a4d
#define VIU_OSD2_COLOR_ADDR 0x1a31
@@ -1195,11 +1211,59 @@
#define RDMA_AHB_START_ADDR_7 0x110e
#define RDMA_AHB_END_ADDR_7 0x110f
#define RDMA_ACCESS_AUTO 0x1110
+#define RDMA_ACCESS_TRIGGER_CHAN3 GENMASK(31, 24)
+#define RDMA_ACCESS_TRIGGER_CHAN2 GENMASK(23, 16)
+#define RDMA_ACCESS_TRIGGER_CHAN1 GENMASK(15, 8)
+#define RDMA_ACCESS_TRIGGER_STOP 0
+#define RDMA_ACCESS_TRIGGER_VSYNC 1
+#define RDMA_ACCESS_TRIGGER_LINE 32
+#define RDMA_ACCESS_RW_FLAG_CHAN3 BIT(7)
+#define RDMA_ACCESS_RW_FLAG_CHAN2 BIT(6)
+#define RDMA_ACCESS_RW_FLAG_CHAN1 BIT(5)
+#define RDMA_ACCESS_ADDR_INC_CHAN3 BIT(3)
+#define RDMA_ACCESS_ADDR_INC_CHAN2 BIT(2)
+#define RDMA_ACCESS_ADDR_INC_CHAN1 BIT(1)
#define RDMA_ACCESS_AUTO2 0x1111
+#define RDMA_ACCESS_RW_FLAG_CHAN7 BIT(7)
+#define RDMA_ACCESS_RW_FLAG_CHAN6 BIT(6)
+#define RDMA_ACCESS_RW_FLAG_CHAN5 BIT(5)
+#define RDMA_ACCESS_RW_FLAG_CHAN4 BIT(4)
+#define RDMA_ACCESS_ADDR_INC_CHAN7 BIT(3)
+#define RDMA_ACCESS_ADDR_INC_CHAN6 BIT(2)
+#define RDMA_ACCESS_ADDR_INC_CHAN5 BIT(1)
+#define RDMA_ACCESS_ADDR_INC_CHAN4 BIT(0)
#define RDMA_ACCESS_AUTO3 0x1112
+#define RDMA_ACCESS_TRIGGER_CHAN7 GENMASK(31, 24)
+#define RDMA_ACCESS_TRIGGER_CHAN6 GENMASK(23, 16)
+#define RDMA_ACCESS_TRIGGER_CHAN5 GENMASK(15, 8)
+#define RDMA_ACCESS_TRIGGER_CHAN4 GENMASK(7, 0)
#define RDMA_ACCESS_MAN 0x1113
+#define RDMA_ACCESS_MAN_RW_FLAG BIT(2)
+#define RDMA_ACCESS_MAN_ADDR_INC BIT(1)
+#define RDMA_ACCESS_MAN_START BIT(0)
#define RDMA_CTRL 0x1114
+#define RDMA_IRQ_CLEAR_CHAN7 BIT(31)
+#define RDMA_IRQ_CLEAR_CHAN6 BIT(30)
+#define RDMA_IRQ_CLEAR_CHAN5 BIT(29)
+#define RDMA_IRQ_CLEAR_CHAN4 BIT(28)
+#define RDMA_IRQ_CLEAR_CHAN3 BIT(27)
+#define RDMA_IRQ_CLEAR_CHAN2 BIT(26)
+#define RDMA_IRQ_CLEAR_CHAN1 BIT(25)
+#define RDMA_IRQ_CLEAR_CHAN_MAN BIT(24)
+#define RDMA_DEFAULT_CONFIG (BIT(7) | BIT(6))
+#define RDMA_CTRL_AHB_WR_BURST GENMASK(5, 4)
+#define RDMA_CTRL_AHB_RD_BURST GENMASK(3, 2)
+#define RDMA_CTRL_SW_RESET BIT(1)
+#define RDMA_CTRL_FREE_CLK_EN BIT(0)
#define RDMA_STATUS 0x1115
+#define RDMA_IRQ_STAT_CHAN7 BIT(31)
+#define RDMA_IRQ_STAT_CHAN6 BIT(30)
+#define RDMA_IRQ_STAT_CHAN5 BIT(29)
+#define RDMA_IRQ_STAT_CHAN4 BIT(28)
+#define RDMA_IRQ_STAT_CHAN3 BIT(27)
+#define RDMA_IRQ_STAT_CHAN2 BIT(26)
+#define RDMA_IRQ_STAT_CHAN1 BIT(25)
+#define RDMA_IRQ_STAT_CHAN_MAN BIT(24)
#define RDMA_STATUS2 0x1116
#define RDMA_STATUS3 0x1117
#define L_GAMMA_CNTL_PORT 0x1400
@@ -1595,15 +1659,33 @@
/* osd afbcd on gxtvbb */
#define OSD1_AFBCD_ENABLE 0x31a0
+#define OSD1_AFBCD_ID_FIFO_THRD GENMASK(15, 9)
+#define OSD1_AFBCD_DEC_ENABLE BIT(8)
+#define OSD1_AFBCD_FRM_START BIT(0)
#define OSD1_AFBCD_MODE 0x31a1
+#define OSD1_AFBCD_SOFT_RESET BIT(31)
+#define OSD1_AFBCD_AXI_REORDER_MODE BIT(28)
+#define OSD1_AFBCD_MIF_URGENT GENMASK(25, 24)
+#define OSD1_AFBCD_HOLD_LINE_NUM GENMASK(22, 16)
+#define OSD1_AFBCD_RGBA_EXCHAN_CTRL GENMASK(15, 8)
+#define OSD1_AFBCD_HREG_BLOCK_SPLIT BIT(6)
+#define OSD1_AFBCD_HREG_HALF_BLOCK BIT(5)
+#define OSD1_AFBCD_HREG_PIXEL_PACKING_FMT GENMASK(4, 0)
#define OSD1_AFBCD_SIZE_IN 0x31a2
+#define OSD1_AFBCD_HREG_VSIZE_IN GENMASK(31, 16)
+#define OSD1_AFBCD_HREG_HSIZE_IN GENMASK(15, 0)
#define OSD1_AFBCD_HDR_PTR 0x31a3
#define OSD1_AFBCD_FRAME_PTR 0x31a4
#define OSD1_AFBCD_CHROMA_PTR 0x31a5
#define OSD1_AFBCD_CONV_CTRL 0x31a6
+#define OSD1_AFBCD_CONV_LBUF_LEN GENMASK(15, 0)
#define OSD1_AFBCD_STATUS 0x31a8
#define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9
+#define OSD1_AFBCD_DEC_PIXEL_BGN_H GENMASK(31, 16)
+#define OSD1_AFBCD_DEC_PIXEL_END_H GENMASK(15, 0)
#define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa
+#define OSD1_AFBCD_DEC_PIXEL_BGN_V GENMASK(31, 16)
+#define OSD1_AFBCD_DEC_PIXEL_END_V GENMASK(15, 0)
/* add for gxm and 962e dv core2 */
#define DOLBY_CORE2A_SWAP_CTRL1 0x3434
@@ -1615,12 +1697,34 @@
#define VPU_MAFBC_IRQ_CLEAR 0x3a02
#define VPU_MAFBC_IRQ_MASK 0x3a03
#define VPU_MAFBC_IRQ_STATUS 0x3a04
+#define VPU_MAFBC_IRQ_SECURE_ID_ERROR BIT(5)
+#define VPU_MAFBC_IRQ_AXI_ERROR BIT(4)
+#define VPU_MAFBC_IRQ_DETILING_ERROR BIT(3)
+#define VPU_MAFBC_IRQ_DECODE_ERROR BIT(2)
+#define VPU_MAFBC_IRQ_CONFIGURATION_SWAPPED BIT(1)
+#define VPU_MAFBC_IRQ_SURFACES_COMPLETED BIT(0)
#define VPU_MAFBC_COMMAND 0x3a05
+#define VPU_MAFBC_PENDING_SWAP BIT(1)
+#define VPU_MAFBC_DIRECT_SWAP BIT(0)
#define VPU_MAFBC_STATUS 0x3a06
+#define VPU_MAFBC_ERROR BIT(2)
+#define VPU_MAFBC_SWAPPING BIT(1)
+#define VPU_MAFBC_ACTIVE BIT(0)
#define VPU_MAFBC_SURFACE_CFG 0x3a07
+#define VPU_MAFBC_CONTINUOUS_DECODING_ENABLE BIT(16)
+#define VPU_MAFBC_S3_ENABLE BIT(3)
+#define VPU_MAFBC_S2_ENABLE BIT(2)
+#define VPU_MAFBC_S1_ENABLE BIT(1)
+#define VPU_MAFBC_S0_ENABLE BIT(0)
#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10
#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11
#define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12
+#define VPU_MAFBC_PAYLOAD_LIMIT_EN BIT(19)
+#define VPU_MAFBC_TILED_HEADER_EN BIT(18)
+#define VPU_MAFBC_SUPER_BLOCK_ASPECT GENMASK(17, 16)
+#define VPU_MAFBC_BLOCK_SPLIT BIT(9)
+#define VPU_MAFBC_YUV_TRANSFORM BIT(8)
+#define VPU_MAFBC_PIXEL_FORMAT GENMASK(3, 0)
#define VPU_MAFBC_BUFFER_WIDTH_S0 0x3a13
#define VPU_MAFBC_BUFFER_HEIGHT_S0 0x3a14
#define VPU_MAFBC_BOUNDING_BOX_X_START_S0 0x3a15
@@ -1631,6 +1735,8 @@
#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0 0x3a1a
#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S0 0x3a1b
#define VPU_MAFBC_PREFETCH_CFG_S0 0x3a1c
+#define VPU_MAFBC_PREFETCH_READ_DIRECTION_Y BIT(1)
+#define VPU_MAFBC_PREFETCH_READ_DIRECTION_X BIT(0)
#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S1 0x3a30
#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S1 0x3a31
@@ -1677,7 +1783,11 @@
#define DOLBY_PATH_CTRL 0x1a0c
#define DOLBY_BYPASS_EN(val) (val & 0xf)
#define OSD_PATH_MISC_CTRL 0x1a0e
+#define OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD BIT(4)
+#define OSD_PATH_OSD_AXI_SEL_OSD2_AFBCD BIT(5)
+#define OSD_PATH_OSD_AXI_SEL_OSD3_AFBCD BIT(6)
#define MALI_AFBCD_TOP_CTRL 0x1a0f
+#define MALI_AFBCD_MANUAL_RESET BIT(23)
#define VIU_OSD_BLEND_CTRL 0x39b0
#define VIU_OSD_BLEND_REORDER(dest, src) ((src) << (dest * 4))
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 68cf2c2eca5f..304f8ff1339c 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -7,6 +7,9 @@
*/
#include <linux/export.h>
+#include <linux/bitfield.h>
+
+#include <drm/drm_fourcc.h>
#include "meson_drv.h"
#include "meson_viu.h"
@@ -335,6 +338,79 @@ void meson_viu_osd1_reset(struct meson_drm *priv)
meson_viu_load_matrix(priv);
}
+#define OSD1_MALI_ORDER_ABGR \
+ (FIELD_PREP(VIU_OSD1_MALI_AFBCD_A_REORDER, \
+ VIU_OSD1_MALI_REORDER_A) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_B_REORDER, \
+ VIU_OSD1_MALI_REORDER_B) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_G_REORDER, \
+ VIU_OSD1_MALI_REORDER_G) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_R_REORDER, \
+ VIU_OSD1_MALI_REORDER_R))
+
+#define OSD1_MALI_ORDER_ARGB \
+ (FIELD_PREP(VIU_OSD1_MALI_AFBCD_A_REORDER, \
+ VIU_OSD1_MALI_REORDER_A) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_B_REORDER, \
+ VIU_OSD1_MALI_REORDER_R) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_G_REORDER, \
+ VIU_OSD1_MALI_REORDER_G) | \
+ FIELD_PREP(VIU_OSD1_MALI_AFBCD_R_REORDER, \
+ VIU_OSD1_MALI_REORDER_B))
+
+void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv)
+{
+ u32 afbc_order = OSD1_MALI_ORDER_ARGB;
+
+ /* Enable Mali AFBC Unpack */
+ writel_bits_relaxed(VIU_OSD1_MALI_UNPACK_EN,
+ VIU_OSD1_MALI_UNPACK_EN,
+ priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+
+ switch (priv->afbcd.format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ afbc_order = OSD1_MALI_ORDER_ABGR;
+ break;
+ }
+
+ /* Setup RGBA Reordering */
+ writel_bits_relaxed(VIU_OSD1_MALI_AFBCD_A_REORDER |
+ VIU_OSD1_MALI_AFBCD_B_REORDER |
+ VIU_OSD1_MALI_AFBCD_G_REORDER |
+ VIU_OSD1_MALI_AFBCD_R_REORDER,
+ afbc_order,
+ priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+
+ /* Select AFBCD path for OSD1 */
+ writel_bits_relaxed(OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD,
+ OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD,
+ priv->io_base + _REG(OSD_PATH_MISC_CTRL));
+}
+
+void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv)
+{
+ /* Disable AFBCD path for OSD1 */
+ writel_bits_relaxed(OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD, 0,
+ priv->io_base + _REG(OSD_PATH_MISC_CTRL));
+
+ /* Disable AFBCD unpack */
+ writel_bits_relaxed(VIU_OSD1_MALI_UNPACK_EN, 0,
+ priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
+}
+
+void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv)
+{
+ writel_bits_relaxed(MALI_AFBC_MISC, FIELD_PREP(MALI_AFBC_MISC, 0x90),
+ priv->io_base + _REG(VIU_MISC_CTRL1));
+}
+
+void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
+{
+ writel_bits_relaxed(MALI_AFBC_MISC, FIELD_PREP(MALI_AFBC_MISC, 0x00),
+ priv->io_base + _REG(VIU_MISC_CTRL1));
+}
+
static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
{
uint32_t val = (((length & 0x80) % 24) / 12);
@@ -362,7 +438,7 @@ void meson_viu_init(struct meson_drm *priv)
/* Initialize OSD1 fifo control register */
reg = VIU_OSD_DDR_PRIORITY_URGENT |
- VIU_OSD_HOLD_FIFO_LINES(4) |
+ VIU_OSD_HOLD_FIFO_LINES(31) |
VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
@@ -420,8 +496,13 @@ void meson_viu_init(struct meson_drm *priv)
writel_bits_relaxed(DOLBY_BYPASS_EN(0xc), DOLBY_BYPASS_EN(0xc),
priv->io_base + _REG(DOLBY_PATH_CTRL));
+
+ meson_viu_g12a_disable_osd1_afbc(priv);
}
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
+ meson_viu_gxm_disable_osd1_afbc(priv);
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
priv->viu.osd1_interlace = false;
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
index a112e8d18850..e4a2f24d7c38 100644
--- a/drivers/gpu/drm/meson/meson_viu.h
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -10,6 +10,8 @@
#define __MESON_VIU_H
/* OSDx_BLKx_CFG */
+#define OSD_MALI_SRC_EN BIT(30)
+
#define OSD_CANVAS_SEL 16
#define OSD_ENDIANNESS_LE BIT(15)
@@ -33,21 +35,38 @@
#define OSD_COLOR_MATRIX_16_RGB655 (0x00 << 2)
#define OSD_COLOR_MATRIX_16_RGB565 (0x04 << 2)
+#define OSD_MALI_COLOR_MODE_R8 (0 << 8)
+#define OSD_MALI_COLOR_MODE_YUV422 (1 << 8)
+#define OSD_MALI_COLOR_MODE_RGB565 (2 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA5551 (3 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA4444 (4 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA8888 (5 << 8)
+#define OSD_MALI_COLOR_MODE_RGB888 (7 << 8)
+#define OSD_MALI_COLOR_MODE_YUV422_10B (8 << 8)
+#define OSD_MALI_COLOR_MODE_RGBA1010102 (9 << 8)
+
#define OSD_INTERLACE_ENABLED BIT(1)
#define OSD_INTERLACE_ODD BIT(0)
#define OSD_INTERLACE_EVEN (0)
/* OSDx_CTRL_STAT */
#define OSD_ENABLE BIT(21)
+#define OSD_MEM_LINEAR_ADDR BIT(2)
#define OSD_BLK0_ENABLE BIT(0)
#define OSD_GLOBAL_ALPHA_SHIFT 12
/* OSDx_CTRL_STAT2 */
+#define OSD_DPATH_MALI_AFBCD BIT(15)
#define OSD_REPLACE_EN BIT(14)
#define OSD_REPLACE_SHIFT 6
+#define OSD_PENDING_STAT_CLEAN BIT(1)
void meson_viu_osd1_reset(struct meson_drm *priv);
+void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv);
+void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv);
void meson_viu_init(struct meson_drm *priv);
#endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index d5deecb93975..66df51607896 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -32,6 +32,7 @@
#define __MGA_DRV_H__
#include <linux/irqreturn.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <drm/drm_agpsupport.h>
@@ -40,7 +41,6 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_legacy.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include <drm/drm_sarea.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index aed11f4f4c55..d60aa4b9ccd4 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -8,8 +8,8 @@ config DRM_MGAG200
select DRM_TTM_HELPER
help
This is a KMS driver for the MGA G200 server chips, it
- does not support the original MGA G200 or any of the desktop
- chips. It requires 0.3.0 of the modesetting userspace driver,
- and a version of mga driver that will fail on KMS enabled
- devices.
+ does not support the original MGA G200 or any of the desktop
+ chips. It requires 0.3.0 of the modesetting userspace driver,
+ and a version of mga driver that will fail on KMS enabled
+ devices.
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 79711dbb5b03..d491edd317ff 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -5,7 +5,7 @@
* Author: Christopher Harvey <charvey@matrox.com>
*/
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
#include "mgag200_drv.h"
@@ -208,8 +208,7 @@ int mgag200_cursor_init(struct mga_device *mdev)
return -ENOMEM;
for (i = 0; i < ncursors; ++i) {
- gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- size, 0, false);
+ gbo = drm_gem_vram_create(dev, size, 0);
if (IS_ERR(gbo)) {
ret = PTR_ERR(gbo);
goto err_drm_gem_vram_put;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index b113876c2428..7a5bad2f57d7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -6,13 +6,13 @@
* Dave Airlie
*/
-#include <linux/module.h>
#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include "mgag200_drv.h"
@@ -27,6 +27,10 @@ int mgag200_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, mgag200_modeset, int, 0400);
+int mgag200_hw_bug_no_startadd = -1;
+MODULE_PARM_DESC(modeset, "HW does not interpret scanout-buffer start address correctly");
+module_param_named(hw_bug_no_startadd, mgag200_hw_bug_no_startadd, int, 0400);
+
static struct drm_driver driver;
static const struct pci_device_id pciidlist[] = {
@@ -47,22 +51,66 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct drm_device *dev;
+ int ret;
+
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
- return drm_get_pci_dev(pdev, ent, &driver);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_mgag200_driver_unload;
+
+ return 0;
+
+err_mgag200_driver_unload:
+ mgag200_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
}
static void mga_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
+ drm_dev_unregister(dev);
+ mgag200_driver_unload(dev);
+ drm_dev_put(dev);
}
DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
static bool mgag200_pin_bo_at_0(const struct mga_device *mdev)
{
+ if (mgag200_hw_bug_no_startadd > 0) {
+ DRM_WARN_ONCE("Option hw_bug_no_startradd is enabled. Please "
+ "report the output of 'lspci -vvnn' to "
+ "<dri-devel@lists.freedesktop.org> if this "
+ "option is required to make mgag200 work "
+ "correctly on your system.\n");
+ return true;
+ } else if (!mgag200_hw_bug_no_startadd) {
+ return false;
+ }
return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD;
}
@@ -86,14 +134,11 @@ int mgag200_driver_dumb_create(struct drm_file *file,
if (mgag200_pin_bo_at_0(mdev))
pg_align = PFN_UP(mdev->mc.vram_size);
- return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev,
- pg_align, false, args);
+ return drm_gem_vram_fill_create_dumb(file, dev, pg_align, 0, args);
}
static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
- .load = mgag200_driver_load,
- .unload = mgag200_driver_unload,
.fops = &mgag200_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 51d4037f00d4..9f4635916d32 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -29,8 +29,7 @@
#include <linux/export.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
-
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index e1bc5b0aa774..e278b6a547bd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -8,9 +8,10 @@
* Dave Airlie
*/
+#include <linux/pci.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_pci.h>
#include "mgag200_drv.h"
@@ -118,8 +119,11 @@ static int mgag200_device_init(struct drm_device *dev,
return -ENOMEM;
/* stash G200 SE model number for later use */
- if (IS_G200_SE(mdev))
+ if (IS_G200_SE(mdev)) {
mdev->unique_rev_id = RREG32(0x1e24);
+ DRM_DEBUG("G200 SE unique revision id is 0x%x\n",
+ mdev->unique_rev_id);
+ }
ret = mga_vram_init(mdev);
if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 5ec697148fc1..62a8e9ccb16d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -9,10 +9,10 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_pci.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 99997d737362..e89657630ea7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -26,7 +26,7 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 7ad14937fcdf..b67f88872726 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -506,6 +506,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index b01388a9e89e..253d8d85daad 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -591,6 +591,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
goto fail;
}
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b02e2042547f..7d9e63e20ded 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -753,11 +753,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu->funcs->flush(gpu, gpu->rb[0]);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
- } else {
- /* Print a warning so if we die, we know why */
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ } else {
+ return ret;
}
/* Last step - yield the ringbuffer */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index f44553ec3193..ed78fee2a262 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -16,11 +16,11 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml ( 161969 bytes, from 2019-11-29 07:18:16)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-Copyright (C) 2013-2018 by the following authors:
+Copyright (C) 2013-2019 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -2519,6 +2519,54 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02
+
+#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03
+
+#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04
+
+#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05
+
+#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06
+
+#define REG_A6XX_GBIF_HALT 0x00003c45
+
+#define REG_A6XX_GBIF_HALT_ACK 0x00003c46
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0
+
+#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
+
#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 85f14feafdec..983afeaee737 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/interconnect.h>
@@ -149,6 +149,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
if (freq == gmu->gpu_freqs[perf_index])
break;
+ gmu->current_perf_index = perf_index;
+
__a6xx_gmu_set_freq(gmu, perf_index);
}
@@ -433,6 +435,8 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
@@ -480,20 +484,34 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
@@ -741,8 +759,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
enable_irq(gmu->hfi_irq);
- /* Set the GPU to the highest power frequency */
- __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /* Set the GPU to the current freq */
+ __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
/*
* "enable" the GX power domain which won't actually do anything but it
@@ -1166,6 +1184,8 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+ gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 39a26dd63674..2af91ed7ed0c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -63,6 +63,9 @@ struct a6xx_gmu {
struct clk_bulk_data *clocks;
struct clk *core_clk;
+ /* current performance index set externally */
+ int current_perf_index;
+
int nr_gpu_freqs;
unsigned long gpu_freqs[16];
u32 gx_arc_votes[16];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index dc8ec2c94301..daf07800cde0 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include "msm_gem.h"
@@ -378,6 +378,18 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
+ /*
+ * During a previous slumber, GBIF halt is asserted to ensure
+ * no further transaction can go through GPU before GPU
+ * headswitch is turned off.
+ *
+ * This halt is deasserted once headswitch goes off but
+ * incase headswitch doesn't goes off clear GBIF halt
+ * here to ensure GPU wake-up doesn't fail because of
+ * halted GPU transactions.
+ */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
@@ -406,12 +418,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
- /* enable hardware clockgating */
- a6xx_set_hwcg(gpu, true);
+ /*
+ * enable hardware clockgating
+ * For now enable clock gating only for a630
+ */
+ if (adreno_is_a630(adreno_gpu))
+ a6xx_set_hwcg(gpu, true);
- /* VBIF start */
- gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ /* VBIF/GBIF start*/
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ if (adreno_is_a630(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
@@ -537,12 +554,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_flush(gpu, gpu->rb[0]);
if (!a6xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
- } else {
- /* Print a warning so if we die, we know why */
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
ret = 0;
+ } else {
+ return ret;
}
out:
@@ -724,6 +748,39 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if(!a6xx_has_gbif(adreno_gpu)){
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /*
+ * GMU needs DDR access in slumber path. Deassert GBIF halt now
+ * to allow for GMU to access system memory.
+ */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -748,6 +805,16 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
+ /*
+ * Make sure the GMU is idle before continuing (because some transitions
+ * may use VBIF
+ */
+ a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
+
+ /* Clear the VBIF pipe before shutting down */
+ /* FIXME: This accesses the GPU - do we need to make sure it is on? */
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
+
return a6xx_gmu_stop(a6xx_gpu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 64399554f2dd..7239b8b60939 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
#ifndef __A6XX_GPU_H__
#define __A6XX_GPU_H__
@@ -42,6 +42,13 @@ struct a6xx_gpu {
#define A6XX_PROTECT_RDONLY(_reg, _len) \
((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
+{
+ if(adreno_is_a630(gpu))
+ return false;
+
+ return true;
+}
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 691c1a277d91..d6023ba8033c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#include <linux/ascii85.h>
#include "msm_gem.h"
@@ -320,6 +320,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
{
struct resource *res;
void __iomem *cxdbg = NULL;
+ int nr_debugbus_blocks;
/* Set up the GX debug bus */
@@ -374,9 +375,11 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
- a6xx_state->debugbus = state_kcalloc(a6xx_state,
- ARRAY_SIZE(a6xx_debugbus_blocks),
- sizeof(*a6xx_state->debugbus));
+ nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
+ (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
+ sizeof(*a6xx_state->debugbus));
if (a6xx_state->debugbus) {
int i;
@@ -388,15 +391,31 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
&a6xx_state->debugbus[i]);
a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+
+ /*
+ * GBIF has same debugbus as of other GPU blocks, fall back to
+ * default path if GPU uses GBIF, also GBIF uses exactly same
+ * ID as of VBIF.
+ */
+ if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_get_debugbus_block(gpu, a6xx_state,
+ &a6xx_gbif_debugbus_block,
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus += 1;
+ }
}
- a6xx_state->vbif_debugbus =
- state_kcalloc(a6xx_state, 1,
- sizeof(*a6xx_state->vbif_debugbus));
+ /* Dump the VBIF debugbus on applicable targets */
+ if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
- if (a6xx_state->vbif_debugbus)
- a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
- a6xx_state->vbif_debugbus);
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+ }
if (cxdbg) {
a6xx_state->cx_debugbus =
@@ -770,14 +789,16 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
&a6xx_state->gmu_registers[1]);
}
+#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
ARRAY_SIZE(a6xx_reglist) +
- ARRAY_SIZE(a6xx_hlsq_reglist);
+ ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
int index = 0;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
a6xx_state->registers = state_kcalloc(a6xx_state,
count, sizeof(*a6xx_state->registers));
@@ -792,6 +813,15 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_state, &a6xx_ahb_reglist[i],
&a6xx_state->registers[index++]);
+ if (a6xx_has_gbif(adreno_gpu))
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_gbif_reglist,
+ &a6xx_state->registers[index++]);
+ else
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_vbif_reglist,
+ &a6xx_state->registers[index++]);
+
for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
a6xx_get_crashdumper_registers(gpu,
a6xx_state, &a6xx_reglist[i],
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 68cccfa2870a..e67c20c415af 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#ifndef _A6XX_CRASH_DUMP_H_
#define _A6XX_CRASH_DUMP_H_
@@ -307,11 +307,20 @@ static const u32 a6xx_vbif_registers[] = {
0x3410, 0x3410, 0x3800, 0x3801,
};
+static const u32 a6xx_gbif_registers[] = {
+ 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
+};
+
static const struct a6xx_registers a6xx_ahb_reglist[] = {
REGS(a6xx_ahb_registers, 0, 0),
- REGS(a6xx_vbif_registers, 0, 0),
};
+static const struct a6xx_registers a6xx_vbif_reglist =
+ REGS(a6xx_vbif_registers, 0, 0);
+
+static const struct a6xx_registers a6xx_gbif_reglist =
+ REGS(a6xx_gbif_registers, 0, 0);
+
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
@@ -422,6 +431,9 @@ static const struct a6xx_debugbus_block {
DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
};
+static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block =
+ DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100);
+
static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index fbbdf86504f5..cb3a6e597d76 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -167,6 +167,17 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}, {
+ .rev = ADRENO_REV(6, 1, 8, ANY_ID),
+ .revn = 618,
+ .name = "A618",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ }, {
.rev = ADRENO_REV(6, 3, 0, ANY_ID),
.revn = 630,
.name = "A630",
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 0783e4b5486a..7fd29829b2fa 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -26,6 +26,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
{
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
+ const char *signed_fwname = NULL;
struct device_node *np, *mem_np;
struct resource r;
phys_addr_t mem_phys;
@@ -58,8 +59,43 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
mem_phys = r.start;
- /* Request the MDT file for the firmware */
- fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ /*
+ * Check for a firmware-name property. This is the new scheme
+ * to handle firmware that may be signed with device specific
+ * keys, allowing us to have a different zap fw path for different
+ * devices.
+ *
+ * If the firmware-name property is found, we bypass the
+ * adreno_request_fw() mechanism, because we don't need to handle
+ * the /lib/firmware/qcom/... vs /lib/firmware/... case.
+ *
+ * If the firmware-name property is not found, for backwards
+ * compatibility we fall back to the fwname from the gpulist
+ * table.
+ */
+ of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
+ if (signed_fwname) {
+ fwname = signed_fwname;
+ ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
+ if (ret)
+ fw = ERR_PTR(ret);
+ } else if (fwname) {
+ /* Request the MDT file from the default location: */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ } else {
+ /*
+ * For new targets, we require the firmware-name property,
+ * if a zap-shader is required, rather than falling back
+ * to a firmware name specified in gpulist.
+ *
+ * Because the firmware is signed with a (potentially)
+ * device specific key, having the name come from gpulist
+ * was a bad idea, and is only provided for backwards
+ * compatibility for older targets.
+ */
+ return -ENODEV;
+ }
+
if (IS_ERR(fw)) {
DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
return PTR_ERR(fw);
@@ -95,7 +131,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
* not. But since we've already gotten through adreno_request_fw()
* we know which of the two cases it is:
*/
- if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
ret = qcom_mdt_load(dev, fw, fwname, pasid,
mem_region, mem_phys, mem_size, NULL);
} else {
@@ -146,14 +182,6 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return -EPROBE_DEFER;
}
- /* Each GPU has a target specific zap shader firmware name to use */
- if (!adreno_gpu->info->zapfw) {
- zap_available = false;
- DRM_DEV_ERROR(&pdev->dev,
- "Zap shader firmware file not specified for this target\n");
- return -ENODEV;
- }
-
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
@@ -826,7 +854,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
- DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
+ DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
@@ -887,10 +915,21 @@ static int adreno_get_pwrlevels(struct device *dev,
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
/* Check for an interconnect path for the bus */
- gpu->icc_path = of_icc_get(dev, NULL);
+ gpu->icc_path = of_icc_get(dev, "gfx-mem");
+ if (!gpu->icc_path) {
+ /*
+ * Keep compatbility with device trees that don't have an
+ * interconnect-names property.
+ */
+ gpu->icc_path = of_icc_get(dev, NULL);
+ }
if (IS_ERR(gpu->icc_path))
gpu->icc_path = NULL;
+ gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
+ if (IS_ERR(gpu->ocmem_icc_path))
+ gpu->ocmem_icc_path = NULL;
+
return 0;
}
@@ -977,6 +1016,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
release_firmware(adreno_gpu->fw[i]);
icc_put(gpu->icc_path);
+ icc_put(gpu->ocmem_icc_path);
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e71a7570ef72..9ff4e550e7bd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -3,7 +3,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
*/
#ifndef __ADRENO_GPU_H__
@@ -227,6 +227,16 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
+static inline int adreno_is_a618(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 618;
+}
+
+static inline int adreno_is_a630(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 630;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -330,10 +340,7 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
static inline bool adreno_reg_check(struct adreno_gpu *gpu,
enum adreno_regs offset_name)
{
- if (offset_name >= REG_ADRENO_REGISTER_MAX ||
- !gpu->reg_offsets[offset_name]) {
- BUG();
- }
+ BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
/*
* REG_SKIP is a special value that tell us that the register in
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index f197dce54576..bf513411b243 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -197,10 +197,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
DPU_DEBUG("%s\n", dpu_crtc->name);
for (i = 0; i < cstate->num_mixers; i++) {
- if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
- DPU_ERROR("invalid lm or ctl assigned to mixer\n");
- return;
- }
mixer[i].mixer_op_mode = 0;
mixer[i].flush_mask = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
@@ -1113,14 +1109,9 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
for (i = 0; i < cstate->num_mixers; ++i) {
m = &cstate->mixers[i];
- if (!m->hw_lm)
- seq_printf(s, "\tmixer[%d] has no lm\n", i);
- else if (!m->lm_ctl)
- seq_printf(s, "\tmixer[%d] has no ctl\n", i);
- else
- seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
- m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
- out_width, mode->vdisplay);
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
}
seq_puts(s, "\n");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f96e142c4361..f8ac3bf60fd6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -58,7 +58,7 @@
#define IDLE_SHORT_TIMEOUT 1
-#define MAX_VDISPLAY_SPLIT 1080
+#define MAX_HDISPLAY_SPLIT 1080
/* timeout in frames waiting for frame done */
#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
@@ -233,7 +233,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
u32 irq_status;
int ret;
- if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+ if (!wait_info || intr_idx >= INTR_IDX_MAX) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -308,7 +308,7 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_irq *irq;
int ret = 0;
- if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+ if (intr_idx >= INTR_IDX_MAX) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -363,10 +363,6 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_irq *irq;
int ret;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
irq = &phys_enc->irq[intr_idx];
/* silently skip irqs that weren't registered */
@@ -415,7 +411,7 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.get_hw_resources)
+ if (phys->ops.get_hw_resources)
phys->ops.get_hw_resources(phys, hw_res);
}
}
@@ -438,7 +434,7 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.destroy) {
+ if (phys->ops.destroy) {
phys->ops.destroy(phys);
--dpu_enc->num_phys_encs;
dpu_enc->phys_encs[i] = NULL;
@@ -464,7 +460,7 @@ void dpu_encoder_helper_split_config(
struct dpu_hw_mdp *hw_mdptop;
struct msm_display_info *disp_info;
- if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ if (!phys_enc->hw_mdptop || !phys_enc->parent) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
@@ -534,8 +530,23 @@ static struct msm_display_topology dpu_encoder_get_topology(
if (dpu_enc->phys_encs[i])
intf_count++;
- /* User split topology for width > 1080 */
- topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+ /* Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ */
+ if (intf_count == 2)
+ topology.num_lm = 2;
+ else if (!dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = 1;
+ else
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -583,10 +594,10 @@ static int dpu_encoder_virt_atomic_check(
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.atomic_check)
+ if (phys->ops.atomic_check)
ret = phys->ops.atomic_check(phys, crtc_state,
conn_state);
- else if (phys && phys->ops.mode_fixup)
+ else if (phys->ops.mode_fixup)
if (!phys->ops.mode_fixup(phys, mode, adj_mode))
ret = -EINVAL;
@@ -682,7 +693,7 @@ static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.irq_control)
+ if (phys->ops.irq_control)
phys->ops.irq_control(phys, enable);
}
@@ -1032,46 +1043,43 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys) {
- if (!dpu_enc->hw_pp[i]) {
- DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
- "at idx: %d\n", i);
- goto error;
- }
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no pp block assigned at idx: %d\n", i);
+ goto error;
+ }
- if (!hw_ctl[i]) {
- DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
- "at idx: %d\n", i);
- goto error;
- }
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no ctl block assigned at idx: %d\n", i);
+ goto error;
+ }
- phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = hw_ctl[i];
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_ctl = hw_ctl[i];
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
- DPU_HW_BLK_INTF);
- for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
- struct dpu_hw_intf *hw_intf;
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
+ DPU_HW_BLK_INTF);
+ for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ struct dpu_hw_intf *hw_intf;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
- hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
- if (hw_intf->idx == phys->intf_idx)
- phys->hw_intf = hw_intf;
- }
+ hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ if (hw_intf->idx == phys->intf_idx)
+ phys->hw_intf = hw_intf;
+ }
- if (!phys->hw_intf) {
- DPU_ERROR_ENC(dpu_enc,
- "no intf block assigned at idx: %d\n",
- i);
+ if (!phys->hw_intf) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no intf block assigned at idx: %d\n", i);
goto error;
- }
-
- phys->connector = conn->state->connector;
- if (phys->ops.mode_set)
- phys->ops.mode_set(phys, mode, adj_mode);
}
+
+ phys->connector = conn->state->connector;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
}
dpu_enc->mode_set_complete = true;
@@ -1203,7 +1211,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.disable)
+ if (phys->ops.disable)
phys->ops.disable(phys);
}
@@ -1216,8 +1224,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- if (dpu_enc->phys_encs[i])
- dpu_enc->phys_encs[i]->connector = NULL;
+ dpu_enc->phys_encs[i]->connector = NULL;
}
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
@@ -1307,7 +1314,7 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.control_vblank_irq)
+ if (phys->ops.control_vblank_irq)
phys->ops.control_vblank_irq(phys, enable);
}
}
@@ -1419,7 +1426,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
}
ctl = phys->hw_ctl;
- if (!ctl || !ctl->ops.trigger_flush) {
+ if (!ctl->ops.trigger_flush) {
DPU_ERROR("missing trigger cb\n");
return;
}
@@ -1463,13 +1470,8 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
ctl = phys_enc->hw_ctl;
- if (ctl && ctl->ops.trigger_start) {
+ if (ctl->ops.trigger_start) {
ctl->ops.trigger_start(ctl);
trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
}
@@ -1506,14 +1508,10 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.reset)
+ if (!ctl->ops.reset)
return;
DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
@@ -1550,12 +1548,10 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+ if (phys->enable_state == DPU_ENC_DISABLED)
continue;
ctl = phys->hw_ctl;
- if (!ctl)
- continue;
/*
* This is cleared in frame_done worker, which isn't invoked
@@ -1603,17 +1599,15 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->hw_ctl) {
- ctl = phys->hw_ctl;
- if (ctl->ops.clear_pending_flush)
- ctl->ops.clear_pending_flush(ctl);
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
- /* update only for command mode primary ctl */
- if ((phys == dpu_enc->cur_master) &&
- (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
- && ctl->ops.trigger_pending)
- ctl->ops.trigger_pending(ctl);
- }
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
}
}
@@ -1773,12 +1767,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys) {
- if (phys->ops.prepare_for_kickoff)
- phys->ops.prepare_for_kickoff(phys);
- if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
- needs_hw_reset = true;
- }
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
}
DPU_ATRACE_END("enc_prepare_for_kickoff");
@@ -1819,7 +1811,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
/* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.handle_post_kickoff)
+ if (phys->ops.handle_post_kickoff)
phys->ops.handle_post_kickoff(phys);
}
@@ -1848,7 +1840,7 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.prepare_commit)
+ if (phys->ops.prepare_commit)
phys->ops.prepare_commit(phys);
}
}
@@ -1863,9 +1855,6 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys)
- continue;
-
seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
phys->intf_idx - INTF_0,
atomic_read(&phys->vsync_cnt),
@@ -1924,8 +1913,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
for (i = 0; i < dpu_enc->num_phys_encs; i++)
- if (dpu_enc->phys_encs[i] &&
- dpu_enc->phys_encs[i]->ops.late_register)
+ if (dpu_enc->phys_encs[i]->ops.late_register)
dpu_enc->phys_encs[i]->ops.late_register(
dpu_enc->phys_encs[i],
dpu_enc->debugfs_root);
@@ -2094,11 +2082,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys) {
- atomic_set(&phys->vsync_cnt, 0);
- atomic_set(&phys->underrun_cnt, 0);
- }
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
}
mutex_unlock(&dpu_enc->enc_lock);
@@ -2240,8 +2225,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys)
- continue;
switch (event) {
case MSM_ENC_COMMIT_DONE:
@@ -2257,7 +2240,7 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
return -EINVAL;
- };
+ }
if (fn_wait) {
DPU_ATRACE_BEGIN("wait_for_completion_event");
@@ -2274,7 +2257,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
{
struct dpu_encoder_virt *dpu_enc = NULL;
- int i;
if (!encoder) {
DPU_ERROR("invalid encoder\n");
@@ -2285,12 +2267,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
if (dpu_enc->cur_master)
return dpu_enc->cur_master->intf_mode;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys)
- return phys->intf_mode;
- }
+ if (dpu_enc->num_phys_encs)
+ return dpu_enc->phys_encs[0]->intf_mode;
return INTF_MODE_NONE;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 047960949fbb..39e1e280ba44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -45,8 +45,7 @@ static bool dpu_encoder_phys_cmd_mode_fixup(
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (phys_enc)
- DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
return true;
}
@@ -58,11 +57,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
struct dpu_hw_ctl *ctl;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
- if (!phys_enc)
- return;
-
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.setup_intf_cfg)
+ if (!ctl->ops.setup_intf_cfg)
return;
intf_cfg.intf = phys_enc->intf_idx;
@@ -79,7 +75,7 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
int new_cnt;
u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("pp_done_irq");
@@ -106,7 +102,7 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("rd_ptr_irq");
@@ -125,9 +121,6 @@ static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (!phys_enc || !phys_enc->hw_ctl)
- return;
-
DPU_ATRACE_BEGIN("ctl_start_irq");
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
@@ -141,9 +134,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (!phys_enc)
- return;
-
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
@@ -179,7 +169,7 @@ static void dpu_encoder_phys_cmd_mode_set(
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !mode || !adj_mode) {
+ if (!mode || !adj_mode) {
DPU_ERROR("invalid args\n");
return;
}
@@ -198,7 +188,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
bool do_log = false;
- if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+ if (!phys_enc->hw_pp)
return -EINVAL;
cmd_enc->pp_timeout_report_cnt++;
@@ -247,11 +237,6 @@ static int _dpu_encoder_phys_cmd_wait_for_idle(
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -273,7 +258,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
int ret = 0;
int refcount;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
@@ -314,9 +299,6 @@ end:
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
- if (!phys_enc)
- return;
-
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
@@ -351,7 +333,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
u32 vsync_hz;
struct dpu_kms *dpu_kms;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -428,8 +410,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
- || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
return;
}
@@ -458,7 +439,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
@@ -480,7 +461,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid phys encoder\n");
return;
}
@@ -499,8 +480,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
static void _dpu_encoder_phys_cmd_connect_te(
struct dpu_encoder_phys *phys_enc, bool enable)
{
- if (!phys_enc || !phys_enc->hw_pp ||
- !phys_enc->hw_pp->ops.connect_external_te)
+ if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
return;
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
@@ -518,7 +498,7 @@ static int dpu_encoder_phys_cmd_get_line_count(
{
struct dpu_hw_pingpong *hw_pp;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return -EINVAL;
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
@@ -536,7 +516,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -559,10 +539,6 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
kfree(cmd_enc);
}
@@ -580,7 +556,7 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
to_dpu_encoder_phys_cmd(phys_enc);
int ret;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -614,11 +590,6 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc || !phys_enc->hw_ctl) {
- DPU_ERROR("invalid argument(s)\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -639,9 +610,6 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
{
int rc;
- if (!phys_enc)
- return -EINVAL;
-
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (rc) {
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
@@ -658,9 +626,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
- if (!phys_enc)
- return -EINVAL;
-
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
@@ -681,9 +646,6 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
struct dpu_encoder_phys_cmd *cmd_enc;
struct dpu_encoder_wait_info wait_info;
- if (!phys_enc)
- return -EINVAL;
-
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
@@ -715,9 +677,6 @@ static void dpu_encoder_phys_cmd_handle_post_kickoff(
static void dpu_encoder_phys_cmd_trigger_start(
struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc)
- return;
-
dpu_encoder_helper_trigger_start(phys_enc);
}
@@ -816,6 +775,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
DPU_DEBUG_CMDENC(cmd_enc, "created\n");
return phys_enc;
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 3123ef873cdf..c71c18de5966 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -220,8 +220,7 @@ static bool dpu_encoder_phys_vid_mode_fixup(
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (phys_enc)
- DPU_DEBUG_VIDENC(phys_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
@@ -239,7 +238,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
unsigned long lock_flags;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
- if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
return;
}
@@ -280,6 +279,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ true,
+ phys_enc->hw_pp->idx);
+
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
@@ -293,12 +300,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
u32 flush_register = 0;
int new_cnt = -1, old_cnt = -1;
- if (!phys_enc)
- return;
-
hw_ctl = phys_enc->hw_ctl;
- if (!hw_ctl)
- return;
DPU_ATRACE_BEGIN("vblank_irq");
@@ -314,7 +316,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
* so we need to double-check with hw that it accepted the flush bits
*/
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- if (hw_ctl && hw_ctl->ops.get_flush_register)
+ if (hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
@@ -335,9 +337,6 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (!phys_enc)
- return;
-
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
@@ -374,11 +373,6 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (!phys_enc) {
- DPU_ERROR("invalid encoder/kms\n");
- return;
- }
-
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
@@ -395,11 +389,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
int ret = 0;
int refcount;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
-
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
@@ -435,6 +424,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
+ u32 intf_flush_mask = 0;
ctl = phys_enc->hw_ctl;
@@ -459,10 +449,18 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
+ if (ctl->ops.get_bitmask_active_intf)
+ ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask,
+ phys_enc->hw_intf->idx);
+
+ if (ctl->ops.update_pending_intf_flush)
+ ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask);
+
skip_flush:
DPU_DEBUG_VIDENC(phys_enc,
- "update pending flush ctl %d flush_mask %x\n",
- ctl->idx - CTL_0, flush_mask);
+ "update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n",
+ ctl->idx - CTL_0, flush_mask, intf_flush_mask);
+
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -471,11 +469,6 @@ skip_flush:
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
DPU_DEBUG_VIDENC(phys_enc, "\n");
kfree(phys_enc);
}
@@ -493,11 +486,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc) {
- pr_err("invalid encoder\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -543,13 +531,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder/parameters\n");
- return;
- }
-
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.wait_reset_status)
+ if (!ctl->ops.wait_reset_status)
return;
/*
@@ -569,12 +552,12 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
unsigned long lock_flags;
int ret;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) {
+ if (!phys_enc->parent || !phys_enc->parent->dev) {
DPU_ERROR("invalid encoder/device\n");
return;
}
- if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
+ if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
@@ -639,9 +622,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
{
int ret;
- if (!phys_enc)
- return;
-
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0,
enable,
@@ -662,9 +642,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc)
- return -EINVAL;
-
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 24ab6249083a..528632690f1e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -489,12 +489,28 @@ static const struct dpu_format dpu_format_map_ubwc[] = {
true, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+ /* ARGB8888 and ABGR8888 purposely have the same color
+ * ordering. The hardware only supports ABGR8888 UBWC
+ * natively.
+ */
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
INTERLEAVED_RGB_FMT_TILED(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
@@ -550,7 +566,9 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
{
static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 04c8c44f5b9c..c567917541e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -11,11 +11,17 @@
#include "dpu_hw_catalog_format.h"
#include "dpu_kms.h"
-#define VIG_SDM845_MASK \
- (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+#define VIG_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+#define VIG_SDM845_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+
+#define VIG_SC7180_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4))
+
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
@@ -27,6 +33,9 @@
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+#define MIXER_SC7180_MASK \
+ (BIT(DPU_DIM_LAYER))
+
#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
#define PINGPONG_SDM845_SPLIT_MASK \
@@ -58,9 +67,20 @@ static const struct dpu_caps sdm845_dpu_caps = {
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
+ .has_3d_merge = true,
+};
+
+static const struct dpu_caps sc7180_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x9,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
};
-static struct dpu_mdp_cfg sdm845_mdp[] = {
+static const struct dpu_mdp_cfg sdm845_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
.base = 0x0, .len = 0x45C,
@@ -85,10 +105,27 @@ static struct dpu_mdp_cfg sdm845_mdp[] = {
},
};
+static const struct dpu_mdp_cfg sc7180_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = 0,
+ .highest_bank_bit = 0x3,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ },
+};
+
/*************************************************************
* CTL sub blocks config
*************************************************************/
-static struct dpu_ctl_cfg sdm845_ctl[] = {
+static const struct dpu_ctl_cfg sdm845_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0xE4,
@@ -116,6 +153,24 @@ static struct dpu_ctl_cfg sdm845_ctl[] = {
},
};
+static const struct dpu_ctl_cfg sc7180_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+};
+
/*************************************************************
* SSPP sub blocks config
*************************************************************/
@@ -128,7 +183,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.maxvdeciexp = MAX_VERT_DECIMATION,
};
-#define _VIG_SBLK(num, sdma_pri) \
+#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
{ \
.common = &sdm845_sspp_common, \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
@@ -137,7 +192,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
- .id = DPU_SSPP_SCALER_QSEED3, \
+ .id = qseed_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = STRCAT("sspp_csc", num), \
.id = DPU_SSPP_CSC_10BIT, \
@@ -162,10 +217,14 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
@@ -184,7 +243,7 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
.clk_ctrl = _clkctrl \
}
-static struct dpu_sspp_cfg sdm845_sspp[] = {
+static const struct dpu_sspp_cfg sdm845_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
@@ -203,9 +262,26 @@ static struct dpu_sspp_cfg sdm845_sspp[] = {
sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
+static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
+ _VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_cfg sc7180_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+};
+
/*************************************************************
* MIXER sub blocks config
*************************************************************/
+
+/* SDM845 */
+
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
@@ -215,23 +291,46 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-#define LM_BLK(_name, _id, _base, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
- .features = MIXER_SDM845_MASK, \
- .sblk = &sdm845_lm_sblk, \
+ .features = _fmask, \
+ .sblk = _sblk, \
.pingpong = _pp, \
.lm_pair_mask = (1 << _lmpair) \
}
-static struct dpu_lm_cfg sdm845_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1),
- LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0),
- LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5),
- LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0),
- LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0),
- LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2),
+static const struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_3, LM_2),
+};
+
+/* SC7180 */
+
+static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 7, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
+ },
+};
+
+static const struct dpu_lm_cfg sc7180_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_1, LM_0),
};
/*************************************************************
@@ -264,13 +363,18 @@ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.sblk = &sdm845_pp_sblk \
}
-static struct dpu_pingpong_cfg sdm845_pp[] = {
+static const struct dpu_pingpong_cfg sdm845_pp[] = {
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
};
+static struct dpu_pingpong_cfg sc7180_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+};
+
/*************************************************************
* INTF sub blocks config
*************************************************************/
@@ -278,26 +382,32 @@ static struct dpu_pingpong_cfg sdm845_pp[] = {
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
+ .features = BIT(DPU_CTL_ACTIVE_CFG), \
.type = _type, \
.controller_id = _ctrl_id, \
.prog_fetch_lines_worst_case = 24 \
}
-static struct dpu_intf_cfg sdm845_intf[] = {
+static const struct dpu_intf_cfg sdm845_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
};
+static const struct dpu_intf_cfg sc7180_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+};
+
/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
-static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
-static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
-static struct dpu_vbif_cfg sdm845_vbif[] = {
+static const struct dpu_vbif_cfg sdm845_vbif[] = {
{
.name = "vbif_0", .id = VBIF_0,
.base = 0, .len = 0x1040,
@@ -316,7 +426,7 @@ static struct dpu_vbif_cfg sdm845_vbif[] = {
},
};
-static struct dpu_reg_dma_cfg sdm845_regdma = {
+static const struct dpu_reg_dma_cfg sdm845_regdma = {
.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
};
@@ -325,7 +435,7 @@ static struct dpu_reg_dma_cfg sdm845_regdma = {
*************************************************************/
/* SSPP QOS LUTs */
-static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
{.fl = 4, .lut = 0x357},
{.fl = 5, .lut = 0x3357},
{.fl = 6, .lut = 0x23357},
@@ -340,7 +450,11 @@ static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
{.fl = 0, .lut = 0x11222222223357}
};
-static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222335777},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 10, .lut = 0x344556677},
{.fl = 11, .lut = 0x3344556677},
{.fl = 12, .lut = 0x23344556677},
@@ -349,11 +463,19 @@ static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 0, .lut = 0x112233344556677},
};
-static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = {
+ {.fl = 0, .lut = 0x0011223344556677},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
{.fl = 0, .lut = 0x0},
};
-static struct dpu_perf_cfg sdm845_perf_data = {
+static const struct dpu_perf_cfg sdm845_perf_data = {
.max_bw_low = 6800000,
.max_bw_high = 6800000,
.min_core_ib = 2400000,
@@ -392,6 +514,30 @@ static struct dpu_perf_cfg sdm845_perf_data = {
},
};
+static const struct dpu_perf_cfg sc7180_perf_data = {
+ .max_bw_low = 3900000,
+ .max_bw_high = 5500000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xff, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
/*************************************************************
* Hardware catalog init
*************************************************************/
@@ -421,12 +567,43 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.reg_dma_count = 1,
.dma_cfg = sdm845_regdma,
.perf = sdm845_perf_data,
+ .mdss_irqs = 0x3ff,
+ };
+}
+
+/*
+ * sc7180_cfg_init(): populate sc7180 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sc7180_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc7180_mdp),
+ .mdp = sc7180_mdp,
+ .ctl_count = ARRAY_SIZE(sc7180_ctl),
+ .ctl = sc7180_ctl,
+ .sspp_count = ARRAY_SIZE(sc7180_sspp),
+ .sspp = sc7180_sspp,
+ .mixer_count = ARRAY_SIZE(sc7180_lm),
+ .mixer = sc7180_lm,
+ .pingpong_count = ARRAY_SIZE(sc7180_pp),
+ .pingpong = sc7180_pp,
+ .intf_count = ARRAY_SIZE(sc7180_intf),
+ .intf = sc7180_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sdm845_regdma,
+ .perf = sc7180_perf_data,
+ .mdss_irqs = 0x3f,
};
}
-static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init},
};
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index ec76b8687a98..09df7d87dd43 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -38,6 +38,7 @@
#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
@@ -45,6 +46,7 @@
#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620)
#define DPU_HW_BLK_NAME_LEN 16
@@ -92,6 +94,7 @@ enum {
* @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
@@ -110,6 +113,7 @@ enum {
DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_QSEED4,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
DPU_SSPP_CSC_10BIT,
@@ -166,6 +170,7 @@ enum {
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_ACTIVE_CFG,
DPU_CTL_MAX
};
@@ -269,7 +274,7 @@ struct dpu_qos_lut_entry {
*/
struct dpu_qos_lut_tbl {
u32 nentry;
- struct dpu_qos_lut_entry *entries;
+ const struct dpu_qos_lut_entry *entries;
};
/**
@@ -283,6 +288,7 @@ struct dpu_qos_lut_tbl {
* @has_src_split source split feature status
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
+ * @has_3d_merge indicate if 3D merge is supported
*/
struct dpu_caps {
u32 max_mixer_width;
@@ -293,6 +299,7 @@ struct dpu_caps {
bool has_src_split;
bool has_dim_layer;
bool has_idle_pc;
+ bool has_3d_merge;
};
/**
@@ -320,6 +327,7 @@ struct dpu_sspp_blks_common {
* @maxupscale: maxupscale ratio supported
* @smart_dma_priority: hw priority of rect1 of multirect pipe
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @qseed_ver: qseed version
* @src_blk:
* @scaler_blk:
* @csc_blk:
@@ -340,6 +348,7 @@ struct dpu_sspp_sub_blks {
u32 maxupscale;
u32 smart_dma_priority;
u32 max_per_pipe_bw;
+ u32 qseed_ver;
struct dpu_src_blk src_blk;
struct dpu_scaler_blk scaler_blk;
struct dpu_pp_blk csc_blk;
@@ -511,7 +520,7 @@ struct dpu_vbif_dynamic_ot_cfg {
*/
struct dpu_vbif_dynamic_ot_tbl {
u32 count;
- struct dpu_vbif_dynamic_ot_cfg *cfg;
+ const struct dpu_vbif_dynamic_ot_cfg *cfg;
};
/**
@@ -521,7 +530,7 @@ struct dpu_vbif_dynamic_ot_tbl {
*/
struct dpu_vbif_qos_tbl {
u32 npriority_lvl;
- u32 *priority_lvl;
+ const u32 *priority_lvl;
};
/**
@@ -646,6 +655,7 @@ struct dpu_perf_cfg {
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
+ * @mdss_irqs: Bitmap with the irqs supported by the target
*/
struct dpu_mdss_cfg {
u32 hwversion;
@@ -653,25 +663,25 @@ struct dpu_mdss_cfg {
const struct dpu_caps *caps;
u32 mdp_count;
- struct dpu_mdp_cfg *mdp;
+ const struct dpu_mdp_cfg *mdp;
u32 ctl_count;
- struct dpu_ctl_cfg *ctl;
+ const struct dpu_ctl_cfg *ctl;
u32 sspp_count;
- struct dpu_sspp_cfg *sspp;
+ const struct dpu_sspp_cfg *sspp;
u32 mixer_count;
- struct dpu_lm_cfg *mixer;
+ const struct dpu_lm_cfg *mixer;
u32 pingpong_count;
- struct dpu_pingpong_cfg *pingpong;
+ const struct dpu_pingpong_cfg *pingpong;
u32 intf_count;
- struct dpu_intf_cfg *intf;
+ const struct dpu_intf_cfg *intf;
u32 vbif_count;
- struct dpu_vbif_cfg *vbif;
+ const struct dpu_vbif_cfg *vbif;
u32 reg_dma_count;
struct dpu_reg_dma_cfg dma_cfg;
@@ -681,9 +691,11 @@ struct dpu_mdss_cfg {
/* Add additional block data structures here */
struct dpu_perf_cfg perf;
- struct dpu_format_extended *dma_formats;
- struct dpu_format_extended *cursor_formats;
- struct dpu_format_extended *vig_formats;
+ const struct dpu_format_extended *dma_formats;
+ const struct dpu_format_extended *cursor_formats;
+ const struct dpu_format_extended *vig_formats;
+
+ unsigned long mdss_irqs;
};
struct dpu_mdss_hw_cfg_handler {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
index bb6112c949ae..3766f0fd0bf0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -6,8 +6,12 @@
static const uint32_t qcom_compressed_supported_formats[] = {
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGR565,
+
+ DRM_FORMAT_NV12,
};
static const uint32_t plane_formats[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 179e8d52cadb..831e5f7a9b7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -22,14 +22,18 @@
#define CTL_PREPARE 0x0d0
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
+#define CTL_INTF_ACTIVE 0x0F4
+#define CTL_INTF_FLUSH 0x110
+#define CTL_INTF_MASTER 0x134
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_CTL BIT(17)
#define DPU_REG_RESET_TIMEOUT_US 2000
+#define INTF_IDX 31
-static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
- struct dpu_mdss_cfg *m,
+static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -100,11 +104,27 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= flushbits;
}
+static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_intf_flush_mask |= flushbits;
+}
+
static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
{
return ctx->pending_flush_mask;
}
+static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
+{
+
+ if (ctx->pending_flush_mask & BIT(INTF_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
+ ctx->pending_intf_flush_mask);
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
@@ -222,6 +242,36 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
+static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ case INTF_1:
+ *flushbits |= BIT(31);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(0);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(1);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -422,6 +472,24 @@ exit:
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
}
+
+static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 mode_sel = 0;
+
+ if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
+ mode_sel |= BIT(17);
+
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ intf_active |= BIT(cfg->intf - INTF_0);
+
+ DPU_REG_WRITE(c, CTL_TOP, mode_sel);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+}
+
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
@@ -455,31 +523,41 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
unsigned long cap)
{
+ if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1;
+ ops->get_bitmask_active_intf =
+ dpu_hw_ctl_active_get_bitmask_intf;
+ ops->update_pending_intf_flush =
+ dpu_hw_ctl_update_pending_intf_flush;
+ } else {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+ }
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
- ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->get_flush_register = dpu_hw_ctl_get_flush_register;
ops->trigger_start = dpu_hw_ctl_trigger_start;
ops->trigger_pending = dpu_hw_ctl_trigger_pending;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->reset = dpu_hw_ctl_reset_control;
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
- ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_ctl *c;
- struct dpu_ctl_cfg *cfg;
+ const struct dpu_ctl_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index d3ae939ef9f8..09e1263c72e2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -91,6 +91,15 @@ struct dpu_hw_ctl_ops {
u32 flushbits);
/**
+ * OR in the given flushbits to the cached pending_intf_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
@@ -130,11 +139,24 @@ struct dpu_hw_ctl_ops {
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
+ /**
+ * Query the value of the intf flush mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
u32 *flushbits,
enum dpu_intf blk);
/**
+ * Query the value of the intf active flush mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf blk);
+
+ /**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
*/
@@ -159,6 +181,7 @@ struct dpu_hw_ctl_ops {
* @mixer_count: number of mixers
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @pending_intf_flush_mask: pending INTF flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -171,6 +194,7 @@ struct dpu_hw_ctl {
int mixer_count;
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
+ u32 pending_intf_flush_mask;
/* ops */
struct dpu_hw_ctl_ops ops;
@@ -195,7 +219,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
*/
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_ctl_destroy(): Destroys ctl driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 8bfa7d0eede6..d84a84f7fe1a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -800,8 +800,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
start_idx = reg_idx * 32;
end_idx = start_idx + 32;
- if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
- end_idx > ARRAY_SIZE(dpu_irq_map))
+ if (!test_bit(reg_idx, &intr->irq_mask) ||
+ start_idx >= ARRAY_SIZE(dpu_irq_map))
continue;
/*
@@ -955,8 +955,11 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
if (!intr)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ dpu_intr_set[i].clr_off, 0xffffffff);
+ }
/* ensure register writes go through */
wmb();
@@ -971,8 +974,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
if (!intr)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ dpu_intr_set[i].en_off, 0x00000000);
+ }
/* ensure register writes go through */
wmb();
@@ -991,6 +997,9 @@ static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (!test_bit(i, &intr->irq_mask))
+ continue;
+
/* Read interrupt status */
intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
dpu_intr_set[i].status_off);
@@ -1115,6 +1124,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return ERR_PTR(-ENOMEM);
}
+ intr->irq_mask = m->mdss_irqs;
spin_lock_init(&intr->irq_lock);
return intr;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 4edcf402dc46..fc9c98617281 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -187,6 +187,7 @@ struct dpu_hw_intr {
u32 *save_irq_status;
u32 irq_idx_tbl_size;
spinlock_t irq_lock;
+ unsigned long irq_mask;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index dcd87cda13fe..efe9a5719c6b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -56,8 +56,10 @@
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
-static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
- struct dpu_mdss_cfg *m,
+#define INTF_MUX 0x25C
+
+static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -218,6 +220,30 @@ static void dpu_hw_intf_setup_prg_fetch(
DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
}
+static void dpu_hw_intf_bind_pingpong_blk(
+ struct dpu_hw_intf *intf,
+ bool enable,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 mux_cfg;
+
+ if (!intf)
+ return;
+
+ c = &intf->hw;
+
+ mux_cfg = DPU_REG_READ(c, INTF_MUX);
+ mux_cfg &= ~0xf;
+
+ if (enable)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, INTF_MUX, mux_cfg);
+}
+
static void dpu_hw_intf_get_status(
struct dpu_hw_intf *intf,
struct intf_status *s)
@@ -254,16 +280,18 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->get_line_count = dpu_hw_intf_get_line_count;
+ if (cap & BIT(DPU_CTL_ACTIVE_CFG))
+ ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
}
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intf *c;
- struct dpu_intf_cfg *cfg;
+ const struct dpu_intf_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index b03acc225c9b..85468981632d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -52,6 +52,8 @@ struct intf_status {
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
* @ get_line_count: reads current vertical line counter
+ * @bind_pingpong_blk: enable/disable the connection with pingpong which will
+ * feed pixels to this interface
*/
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
@@ -68,6 +70,10 @@ struct dpu_hw_intf_ops {
struct intf_status *status);
u32 (*get_line_count)(struct dpu_hw_intf *intf);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
+ bool enable,
+ const enum dpu_pingpong pp);
};
struct dpu_hw_intf {
@@ -92,7 +98,7 @@ struct dpu_hw_intf {
*/
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_intf_destroy(): Destroys INTF driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 5bc39baa746a..37becd43bd54 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -24,8 +24,8 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
-static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
- struct dpu_mdss_cfg *m,
+static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -147,12 +147,13 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
{
ops->setup_mixer_out = dpu_hw_lm_setup_out;
- if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+ if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)
+ || IS_SC7180_TARGET(m->hwversion))
ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
@@ -164,10 +165,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mixer *c;
- struct dpu_lm_cfg *cfg;
+ const struct dpu_lm_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 147ace31cfc2..4a6b2de19ef6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -91,7 +91,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
*/
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_lm_destroy(): Destroys layer mixer driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 5dbaba9fd180..d110a40f0e73 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -28,8 +28,8 @@
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
-static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
- struct dpu_mdss_cfg *m,
+static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -195,10 +195,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_pingpong *c;
- struct dpu_pingpong_cfg *cfg;
+ const struct dpu_pingpong_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 58bdb9279aa8..3d6f46b1db30 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -106,7 +106,7 @@ struct dpu_hw_pingpong {
*/
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_pingpong_destroy - destroys pingpong driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 4f8b813aab81..82c5dbfdabc7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -132,6 +132,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
+
static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
@@ -657,7 +658,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
- if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
}
@@ -666,7 +668,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
}
-static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
void __iomem *addr,
struct dpu_mdss_cfg *catalog,
struct dpu_hw_blk_reg_map *b)
@@ -696,7 +698,7 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
bool is_virtual_pipe)
{
struct dpu_hw_pipe *hw_pipe;
- struct dpu_sspp_cfg *cfg;
+ const struct dpu_sspp_cfg *cfg;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index a3680b482b41..85b018a9b03c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -27,7 +27,8 @@ struct dpu_hw_pipe;
*/
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \
- (1UL << DPU_SSPP_SCALER_QSEED3))
+ (1UL << DPU_SSPP_SCALER_QSEED3) | \
+ (1UL << DPU_SSPP_SCALER_QSEED4))
/**
* Component indices
@@ -373,7 +374,7 @@ struct dpu_hw_pipe {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
struct dpu_mdss_cfg *catalog;
- struct dpu_mdp_cfg *mdp;
+ const struct dpu_mdp_cfg *mdp;
/* Pipe */
enum dpu_sspp idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index 27fbeb504362..078afc5f5882 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -93,19 +93,12 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
DEV_DBG("%pS->%s: enable '%s'\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
- if (clk_arry[i].clk) {
- rc = clk_prepare_enable(clk_arry[i].clk);
- if (rc)
- DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
- __builtin_return_address(0),
- __func__,
- clk_arry[i].clk_name, rc);
- } else {
- DEV_ERR("%pS->%s: '%s' is not available\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
- rc = -EPERM;
- }
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
if (rc && i) {
msm_dss_enable_clk(&clk_arry[i - 1],
@@ -119,12 +112,7 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
- if (clk_arry[i].clk)
- clk_disable_unprepare(clk_arry[i].clk);
- else
- DEV_ERR("%pS->%s: '%s' is not available\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
+ clk_disable_unprepare(clk_arry[i].clk);
}
}
@@ -187,6 +175,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
continue;
mp->clk_config[i].rate = rate;
mp->clk_config[i].type = DSS_CLK_PCLK;
+ mp->clk_config[i].max_rate = rate;
}
mp->num_clk = num_clk;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 6c92f0fbeac9..cb08fafb1dc1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1059,6 +1059,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sdm845-dpu", },
+ { .compatible = "qcom,sc7180-dpu", },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 58d5acbcfc5c..3b9c33e694bf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -53,8 +53,13 @@ enum {
R_MAX
};
+/*
+ * Default Preload Values
+ */
#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2
+#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4
#define DEFAULT_REFRESH_RATE 60
@@ -477,8 +482,16 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->src_width[i] /= chroma_subsmpl_h;
scale_cfg->src_height[i] /= chroma_subsmpl_v;
}
- scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
- scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+
+ if (pdpu->pipe_hw->cap->features &
+ BIT(DPU_SSPP_SCALER_QSEED4)) {
+ scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
+ } else {
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ }
+
pstate->pixel_ext.num_ext_pxls_top[i] =
scale_cfg->src_height[i];
pstate->pixel_ext.num_ext_pxls_left[i] =
@@ -738,7 +751,7 @@ done:
} else {
pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
- };
+ }
DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
@@ -858,7 +871,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
pdpu->pipe_sblk->maxupscale << 16,
true, true);
if (ret) {
- DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+ DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
return ret;
}
if (!state->visible)
@@ -884,13 +897,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
(!(pdpu->features & DPU_SSPP_SCALER) ||
!(pdpu->features & (BIT(DPU_SSPP_CSC)
| BIT(DPU_SSPP_CSC_10BIT))))) {
- DPU_ERROR_PLANE(pdpu,
+ DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
/* check src bounds */
} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
- DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
return -E2BIG;
@@ -899,19 +912,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
(src.x1 & 0x1 || src.y1 & 0x1 ||
drm_rect_width(&src) & 0x1 ||
drm_rect_height(&src) & 0x1)) {
- DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
return -EINVAL;
/* min dst support */
} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
- DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&dst));
return -EINVAL;
/* check decimated source width */
} else if (drm_rect_width(&src) > max_linewidth) {
- DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&src), max_linewidth);
return -E2BIG;
}
@@ -1337,7 +1350,8 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
pdpu->debugfs_root, &pdpu->debugfs_src);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
sblk->scaler_blk.base + cfg->base,
sblk->scaler_blk.len,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index ddc8412731af..23f5b1433b35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -141,11 +141,11 @@ int dpu_rm_destroy(struct dpu_rm *rm)
static int _dpu_rm_hw_blk_create(
struct dpu_rm *rm,
- struct dpu_mdss_cfg *cat,
+ const struct dpu_mdss_cfg *cat,
void __iomem *mmio,
enum dpu_hw_blk_type type,
uint32_t id,
- void *hw_catalog_info)
+ const void *hw_catalog_info)
{
struct dpu_rm_hw_blk *blk;
void *hw;
@@ -215,7 +215,7 @@ int dpu_rm_init(struct dpu_rm *rm,
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
- struct dpu_lm_cfg *lm = &cat->mixer[i];
+ const struct dpu_lm_cfg *lm = &cat->mixer[i];
if (lm->pingpong == PINGPONG_MAX) {
DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 991f4c8f8a12..93ab36bd8df3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -299,7 +299,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
entry = debugfs_create_dir("vbif", debugfs_root);
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
- struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
@@ -318,7 +318,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
(u32 *)&vbif->default_ot_wr_limit);
for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
- struct dpu_vbif_dynamic_ot_cfg *cfg =
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_rd_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
@@ -332,7 +332,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
}
for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
- struct dpu_vbif_dynamic_ot_cfg *cfg =
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_wr_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 772f0753ed38..aaf2f26f8505 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -121,7 +121,7 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
if (mdp4_dsi_encoder->enabled)
return;
- mdp4_crtc_set_config(encoder->crtc,
+ mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN |
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index 9262ed2dc8c3..c7df71e2fafc 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -53,7 +53,7 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
if (panel) {
drm_panel_attach(panel, connector);
- ret = panel->funcs->get_modes(panel);
+ ret = drm_panel_get_modes(panel, connector);
drm_panel_detach(panel);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 1f48f64539a2..e3c4c250238b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -902,7 +902,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
major, minor);
ret = -ENXIO;
goto fail;
- };
+ }
/* only after mdp5_cfg global pointer's init can we access the hw */
for (i = 0; i < num_handlers; i++) {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index eff1a4c61258..4de771d6f0be 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -178,6 +178,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 86ad3fdf207d..813d69deb5e8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -153,6 +153,10 @@ static const char * const dsi_sdm845_bus_clk_names[] = {
"iface", "bus",
};
+static const char * const dsi_sc7180_bus_clk_names[] = {
+ "iface", "bus",
+};
+
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
@@ -167,7 +171,22 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
+static const struct msm_dsi_config sc7180_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdda", 21800, 4 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sc7180_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
+ .io_start = { 0xae94000 },
+ .num_dsi = 1,
+};
+
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
.link_clk_disable = dsi_link_clk_disable_v2,
.clk_init_ver = dsi_clk_init_v2,
@@ -179,6 +198,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
};
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = NULL,
@@ -190,6 +210,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
};
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = dsi_clk_init_6g_v2,
@@ -223,6 +244,9 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
+ &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 50a37ceb6a25..217e24a65178 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -20,6 +20,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
+#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
@@ -35,6 +36,7 @@ struct msm_dsi_config {
};
struct msm_dsi_host_cfg_ops {
+ int (*link_clk_set_rate)(struct msm_dsi_host *msm_host);
int (*link_clk_enable)(struct msm_dsi_host *msm_host);
void (*link_clk_disable)(struct msm_dsi_host *msm_host);
int (*clk_init_ver)(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 458cec82ae13..11ae5b8444c3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -505,7 +505,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
-int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -515,13 +515,13 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
if (msm_host->byte_intf_clk) {
@@ -530,10 +530,18 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
if (ret) {
pr_err("%s: Failed to set rate byte intf clk, %d\n",
__func__, ret);
- goto error;
+ return ret;
}
}
+ return 0;
+}
+
+
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
@@ -573,7 +581,7 @@ error:
return ret;
}
-int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
@@ -584,27 +592,34 @@ int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
+ return 0;
+}
+
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
ret = clk_prepare_enable(msm_host->byte_clk);
if (ret) {
pr_err("%s: Failed to enable dsi byte clk\n", __func__);
@@ -818,7 +833,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- u32 data = 0;
+ u32 data = 0, lane_ctrl = 0;
if (!enable) {
dsi_write(msm_host, REG_DSI_CTRL, 0);
@@ -906,9 +921,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
- if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
+ lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
dsi_write(msm_host, REG_DSI_LANE_CTRL,
- DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+ lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+ }
data |= DSI_CTRL_ENABLE;
@@ -1996,6 +2013,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
+ cfg_hnd->ops->link_clk_set_rate(msm_host);
cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -2344,7 +2362,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
- ret = cfg_hnd->ops->link_clk_enable(msm_host);
+ ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
+ if (!ret)
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 271aa7bbca92..104115d112eb 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -329,7 +329,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
* attached to the drm_panel.
*/
drm_panel_attach(panel, connector);
- num = drm_panel_get_modes(panel);
+ num = drm_panel_get_modes(panel, connector);
if (!num)
return 0;
@@ -432,20 +432,8 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
}
}
- if (panel) {
- ret = drm_panel_enable(panel);
- if (ret) {
- pr_err("%s: enable panel %d failed, %d\n", __func__, id,
- ret);
- goto panel_en_fail;
- }
- }
-
return;
-panel_en_fail:
- if (is_dual_dsi && msm_dsi1)
- msm_dsi_host_disable(msm_dsi1->host);
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
@@ -464,12 +452,51 @@ phy_en_fail:
static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
{
- DBG("");
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_dsi = IS_DUAL_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ if (panel) {
+ ret = drm_panel_enable(panel);
+ if (ret) {
+ pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+ ret);
+ }
+ }
}
static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
{
- DBG("");
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_dsi = IS_DUAL_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ if (panel) {
+ ret = drm_panel_disable(panel);
+ if (ret)
+ pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+ ret);
+ }
}
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
@@ -495,13 +522,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
- if (panel) {
- ret = drm_panel_disable(panel);
- if (ret)
- pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
- ret);
- }
-
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index 2950bba4aca9..b65b5cc2dba2 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -55,8 +55,14 @@ static void edp_bridge_mode_set(struct drm_bridge *bridge,
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if ((connector->encoder != NULL) &&
- (connector->encoder->bridge == bridge)) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_bridge *first_bridge;
+
+ if (!connector->encoder)
+ continue;
+
+ first_bridge = drm_bridge_chain_get_first_bridge(encoder);
+ if (bridge == first_bridge) {
msm_edp_ctrl_timing_cfg(edp->ctrl,
adjusted_mode, &connector->display_info);
break;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 839822d894d0..58707a1f3878 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -101,7 +101,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpiod_set_value_cansleep(gpio.gpiod, value);
}
- };
+ }
DBG("gpio off");
}
@@ -433,8 +433,10 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector = &hdmi_connector->base;
- drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(hdmi->dev, connector,
+ &hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->i2c);
drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c84f0a8b3f2c..c26219c7a49f 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -138,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
size = resource_size(res);
- ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ ptr = devm_ioremap(&pdev->dev, res->start, size);
if (!ptr) {
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
@@ -1192,7 +1192,8 @@ static int add_display_components(struct device *dev,
* the interfaces to our components list.
*/
if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
- of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
+ of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to populate children devices\n");
@@ -1317,6 +1318,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
+ { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 71547e756e29..740bf7c70d8f 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -454,8 +454,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
remaining_jiffies = 0;
} else {
ktime_t rem = ktime_sub(*timeout, now);
- struct timespec ts = ktime_to_timespec(rem);
- remaining_jiffies = timespec_to_jiffies(&ts);
+ remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
}
return remaining_jiffies;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index cff198b2f470..db48867df47d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -26,7 +26,7 @@ struct msm_fbdev {
struct drm_framebuffer *fb;
};
-static struct fb_ops msm_fb_ops = {
+static const struct fb_ops msm_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index be5327af16fa..385d4965a8d0 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -54,7 +54,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
INIT_LIST_HEAD(&submit->node);
INIT_LIST_HEAD(&submit->bo_list);
- ww_acquire_init(&submit->ticket, &reservation_ww_class);
return submit;
}
@@ -158,7 +157,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED)
- ww_mutex_unlock(&msm_obj->base.resv->lock);
+ dma_resv_unlock(msm_obj->base.resv);
if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
@@ -181,8 +180,8 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
- &submit->ticket);
+ ret = dma_resv_lock_interruptible(msm_obj->base.resv,
+ &submit->ticket);
if (ret)
goto fail;
submit->bos[i].flags |= BO_LOCKED;
@@ -203,8 +202,8 @@ fail:
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
- &submit->ticket);
+ ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
+ &submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
@@ -390,8 +389,6 @@ static void submit_cleanup(struct msm_gem_submit *submit)
list_del_init(&msm_obj->submit_entry);
drm_gem_object_put(&msm_obj->base);
}
-
- ww_acquire_fini(&submit->ticket);
}
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
@@ -408,6 +405,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_ringbuffer *ring;
int out_fence_fd = -1;
struct pid *pid = get_pid(task_pid(current));
+ bool has_ww_ticket = false;
unsigned i;
int ret, submitid;
if (!gpu)
@@ -489,6 +487,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
+ /* copy_*_user while holding a ww ticket upsets lockdep */
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ has_ww_ticket = true;
ret = submit_lock_objects(submit);
if (ret)
goto out;
@@ -588,6 +589,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
submit_cleanup(submit);
+ if (has_ww_ticket)
+ ww_acquire_fini(&submit->ticket);
if (ret)
msm_gem_submit_free(submit);
out_unlock:
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index ab8f0f9c9dc8..be5bc2e8425c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -111,8 +111,15 @@ struct msm_gpu {
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
+ /* The gfx-mem interconnect path that's used by all GPU types. */
struct icc_path *icc_path;
+ /*
+ * Second interconnect path for some A3xx and all A4xx GPUs to the
+ * On Chip MEMory (OCMEM).
+ */
+ struct icc_path *ocmem_icc_path;
+
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index 4eb94744c526..9eca1605d11d 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -31,7 +31,7 @@ static int mxsfb_panel_get_modes(struct drm_connector *connector)
drm_connector_to_mxsfb_drm_private(connector);
if (mxsfb->panel)
- return drm_panel_get_modes(mxsfb->panel);
+ return drm_panel_get_modes(mxsfb->panel, connector);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 3558df043592..d6e4ae1ef705 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -2,7 +2,8 @@
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
depends on DRM && PCI && MMU
- select FW_LOADER
+ select IOMMU_API
+ select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
@@ -16,6 +17,7 @@ config DRM_NOUVEAU
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
+ select SND_HDA_COMPONENT if SND_HDA_CORE
help
Choose this option for open-source NVIDIA support.
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 362495535e69..9d4a2d97507e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -53,8 +53,8 @@ struct nv_sim_state {
static void
nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
{
- int pagemiss, cas, width, bpp;
- int nvclks, mclks, pclks, crtpagemiss;
+ int pagemiss, cas, bpp;
+ int nvclks, mclks, crtpagemiss;
int found, mclk_extra, mclk_loop, cbs, m1, p1;
int mclk_freq, pclk_freq, nvclk_freq;
int us_m, us_n, us_p, crtc_drain_rate;
@@ -65,11 +65,9 @@ nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
cas = arb->mem_latency;
- width = arb->memory_width >> 6;
bpp = arb->bpp;
cbs = 128;
- pclks = 2;
nvclks = 10;
mclks = 13 + cas;
mclk_extra = 3;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 03466f04c741..3a9489ed6544 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -644,16 +644,13 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
int i;
if (nouveau_tv_norm) {
- for (i = 0; i < num_tv_norms; i++) {
- if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
- tv_enc->tv_norm = i;
- break;
- }
- }
-
- if (i == num_tv_norms)
+ i = match_string(nv17_tv_norm_names, num_tv_norms,
+ nouveau_tv_norm);
+ if (i < 0)
NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
nouveau_tv_norm);
+ else
+ tv_enc->tv_norm = i;
}
drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 5f2de77e0f32..224a34c340fe 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -75,12 +75,16 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
-base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- asyw->xlut.i.mode = 7;
+ if (size != 256 && size != 1024)
+ return false;
+
+ asyw->xlut.i.mode = size == 1024 ? 4 : 7;
asyw->xlut.i.enable = 2;
asyw->xlut.i.load = head907d_olut_load;
+ return true;
}
static inline u32
@@ -160,6 +164,7 @@ base907c = {
.csc_set = base907c_csc_set,
.csc_clr = base907c_csc_clr,
.olut_core = true,
+ .ilut_size = 1024,
.xlut_set = base907c_xlut_set,
.xlut_clr = base907c_xlut_clr,
.image_set = base907c_image_set,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 63425e246018..2f123082c85d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/hdmi.h>
+#include <linux/component.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
@@ -476,12 +477,113 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
return 0;
}
+/*
+ * audio component binding for ELD notification
+ */
+static void
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
+{
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ port, -1);
+}
+
+static int
+nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
+ bool *enabled, unsigned char *buf, int max_bytes)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_encoder *encoder;
+ struct nouveau_encoder *nv_encoder;
+ struct nouveau_connector *nv_connector;
+ struct nouveau_crtc *nv_crtc;
+ int ret = 0;
+
+ *enabled = false;
+ drm_for_each_encoder(encoder, drm->dev) {
+ nv_encoder = nouveau_encoder(encoder);
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ nv_crtc = nouveau_crtc(encoder->crtc);
+ if (!nv_connector || !nv_crtc || nv_crtc->index != port)
+ continue;
+ *enabled = drm_detect_monitor_audio(nv_connector->edid);
+ if (*enabled) {
+ ret = drm_eld_size(nv_connector->base.eld);
+ memcpy(buf, nv_connector->base.eld,
+ min(max_bytes, ret));
+ }
+ break;
+ }
+ return ret;
+}
+
+static const struct drm_audio_component_ops nv50_audio_component_ops = {
+ .get_eld = nv50_audio_component_get_eld,
+};
+
+static int
+nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
+ void *data)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_audio_component *acomp = data;
+
+ if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
+ drm_modeset_lock_all(drm_dev);
+ acomp->ops = &nv50_audio_component_ops;
+ acomp->dev = kdev;
+ drm->audio.component = acomp;
+ drm_modeset_unlock_all(drm_dev);
+ return 0;
+}
+
+static void
+nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
+ void *data)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_audio_component *acomp = data;
+
+ drm_modeset_lock_all(drm_dev);
+ drm->audio.component = NULL;
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ drm_modeset_unlock_all(drm_dev);
+}
+
+static const struct component_ops nv50_audio_component_bind_ops = {
+ .bind = nv50_audio_component_bind,
+ .unbind = nv50_audio_component_unbind,
+};
+
+static void
+nv50_audio_component_init(struct nouveau_drm *drm)
+{
+ if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+ drm->audio.component_registered = true;
+}
+
+static void
+nv50_audio_component_fini(struct nouveau_drm *drm)
+{
+ if (drm->audio.component_registered) {
+ component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+ drm->audio.component_registered = false;
+ }
+}
+
/******************************************************************************
* Audio
*****************************************************************************/
static void
nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
@@ -496,11 +598,14 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+
+ nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
}
static void
nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
@@ -527,6 +632,8 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
+
+ nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
}
/******************************************************************************
@@ -660,7 +767,6 @@ struct nv50_mstm {
struct nouveau_encoder *outp;
struct drm_dp_mst_topology_mgr mgr;
- struct nv50_msto *msto[4];
bool modified;
bool disabled;
@@ -726,7 +832,6 @@ nv50_msto_cleanup(struct nv50_msto *msto)
drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
msto->mstc = NULL;
- msto->head = NULL;
msto->disabled = false;
}
@@ -806,11 +911,11 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
* topology
*/
asyh->or.bpc = min(connector->display_info.bpc, 8U);
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
}
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
- asyh->dp.pbn);
+ asyh->dp.pbn, 0);
if (slots < 0)
return slots;
@@ -872,7 +977,6 @@ nv50_msto_enable(struct drm_encoder *encoder)
mstm->outp->update(mstm->outp, head->base.index, armh, proto,
nv50_dp_bpc_to_depth(armh->or.bpc));
- msto->head = head;
msto->mstc = mstc;
mstm->modified = true;
}
@@ -913,45 +1017,40 @@ nv50_msto = {
.destroy = nv50_msto_destroy,
};
-static int
-nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
- struct nv50_msto **pmsto)
+static struct nv50_msto *
+nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
{
struct nv50_msto *msto;
int ret;
- if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
- return -ENOMEM;
+ msto = kzalloc(sizeof(*msto), GFP_KERNEL);
+ if (!msto)
+ return ERR_PTR(-ENOMEM);
ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
- DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
+ DRM_MODE_ENCODER_DPMST, "mst-%d", id);
if (ret) {
- kfree(*pmsto);
- *pmsto = NULL;
- return ret;
+ kfree(msto);
+ return ERR_PTR(ret);
}
drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
- msto->encoder.possible_crtcs = heads;
- return 0;
+ msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
+ msto->head = head;
+ return msto;
}
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
- struct nv50_head *head = nv50_head(connector_state->crtc);
struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct drm_crtc *crtc = connector_state->crtc;
- return &mstc->mstm->msto[head->base.index]->encoder;
-}
-
-static struct drm_encoder *
-nv50_mstc_best_encoder(struct drm_connector *connector)
-{
- struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+ return NULL;
- return &mstc->mstm->msto[0]->encoder;
+ return &nv50_head(crtc)->msto->encoder;
}
static enum drm_mode_status
@@ -1038,7 +1137,6 @@ static const struct drm_connector_helper_funcs
nv50_mstc_help = {
.get_modes = nv50_mstc_get_modes,
.mode_valid = nv50_mstc_mode_valid,
- .best_encoder = nv50_mstc_best_encoder,
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
.atomic_check = nv50_mstc_atomic_check,
.detect_ctx = nv50_mstc_detect,
@@ -1071,8 +1169,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
const char *path, struct nv50_mstc **pmstc)
{
struct drm_device *dev = mstm->outp->base.base.dev;
+ struct drm_crtc *crtc;
struct nv50_mstc *mstc;
- int ret, i;
+ int ret;
if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
return -ENOMEM;
@@ -1092,8 +1191,13 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->connector.funcs->reset(&mstc->connector);
nouveau_conn_attach_properties(&mstc->connector);
- for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
- drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+ drm_for_each_crtc(crtc, dev) {
+ if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+ continue;
+
+ drm_connector_attach_encoder(&mstc->connector,
+ &nv50_head(crtc)->msto->encoder);
+ }
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
@@ -1367,7 +1471,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
const int max_payloads = hweight8(outp->dcb->heads);
struct drm_device *dev = outp->base.base.dev;
struct nv50_mstm *mstm;
- int ret, i;
+ int ret;
u8 dpcd;
/* This is a workaround for some monitors not functioning
@@ -1390,13 +1494,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
if (ret)
return ret;
- for (i = 0; i < max_payloads; i++) {
- ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
- i, &mstm->msto[i]);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -1569,17 +1666,24 @@ nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
+static bool nv50_has_mst(struct nouveau_drm *drm)
+{
+ struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ u32 data;
+ u8 ver, hdr, cnt, len;
+
+ data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
+ return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
+}
+
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- u8 ver, hdr, cnt, len;
- u32 data;
int type, ret;
switch (dcbe->type) {
@@ -1624,10 +1728,9 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
}
if (nv_connector->type != DCB_CONNECTOR_eDP &&
- (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
- ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
- ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
- nv_connector->base.base.id,
+ nv50_has_mst(drm)) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
+ 16, nv_connector->base.base.id,
&nv_encoder->dp.mstm);
if (ret)
return ret;
@@ -1673,7 +1776,6 @@ nv50_pior_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u8 owner = 1 << nv_crtc->index;
@@ -1681,7 +1783,6 @@ nv50_pior_enable(struct drm_encoder *encoder)
nv50_outp_acquire(nv_encoder);
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
switch (asyh->or.bpc) {
case 10: asyh->or.depth = 0x6; break;
case 8: asyh->or.depth = 0x5; break;
@@ -2302,6 +2403,8 @@ nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
+ nv50_audio_component_fini(nouveau_drm(dev));
+
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
@@ -2323,6 +2426,7 @@ nv50_display_create(struct drm_device *dev)
struct nv50_disp *disp;
struct dcb_output *dcbe;
int crtcs, ret, i;
+ bool has_mst = nv50_has_mst(drm);
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
@@ -2371,11 +2475,37 @@ nv50_display_create(struct drm_device *dev)
crtcs = 0x3;
for (i = 0; i < fls(crtcs); i++) {
+ struct nv50_head *head;
+
if (!(crtcs & (1 << i)))
continue;
- ret = nv50_head_create(dev, i);
- if (ret)
+
+ head = nv50_head_create(dev, i);
+ if (IS_ERR(head)) {
+ ret = PTR_ERR(head);
goto out;
+ }
+
+ if (has_mst) {
+ head->msto = nv50_msto_new(dev, head, i);
+ if (IS_ERR(head->msto)) {
+ ret = PTR_ERR(head->msto);
+ head->msto = NULL;
+ goto out;
+ }
+
+ /*
+ * FIXME: This is a hack to workaround the following
+ * issues:
+ *
+ * https://gitlab.gnome.org/GNOME/mutter/issues/759
+ * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
+ *
+ * Once these issues are closed, this should be
+ * removed
+ */
+ head->msto->encoder.possible_crtcs = crtcs;
+ }
}
/* create encoder/connector objects based on VBIOS DCB table */
@@ -2423,6 +2553,8 @@ nv50_display_create(struct drm_device *dev)
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
dev->vblank_disable_immediate = true;
+ nv50_audio_component_init(drm);
+
out:
if (ret)
nv50_display_destroy(dev);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 7c41b0599d1a..d54fe00ac3a3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -4,6 +4,8 @@
#include "nouveau_display.h"
+struct nv50_msto;
+
struct nv50_disp {
struct nvif_disp *disp;
struct nv50_core *core;
@@ -78,14 +80,14 @@ void evo_kick(u32 *, struct nv50_dmac *);
#define evo_mthd(p, m, s) do { \
const u32 _m = (m), _s = (s); \
- if (drm_debug & DRM_UT_KMS) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
pr_err("%04x %d %s\n", _m, _s, __func__); \
*((p)++) = ((_s << 18) | _m); \
} while(0)
#define evo_data(p, d) do { \
const u32 _d = (d); \
- if (drm_debug & DRM_UT_KMS) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
pr_err("\t%08x\n", _d); \
*((p)++) = _d; \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index c9692df2b76c..d9d64602947d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -213,6 +213,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
{
struct nv50_disp *disp = nv50_disp(head->base.base.dev);
struct drm_property_blob *olut = asyh->state.gamma_lut;
+ int size;
/* Determine whether core output LUT should be enabled. */
if (olut) {
@@ -229,14 +230,23 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
}
}
- if (!olut && !head->func->olut_identity) {
- asyh->olut.handle = 0;
- return 0;
+ if (!olut) {
+ if (!head->func->olut_identity) {
+ asyh->olut.handle = 0;
+ return 0;
+ }
+ size = 0;
+ } else {
+ size = drm_color_lut_size(olut);
}
+ if (!head->func->olut(head, asyh, size)) {
+ DRM_DEBUG_KMS("Invalid olut\n");
+ return -EINVAL;
+ }
asyh->olut.handle = disp->core->chan.vram.handle;
asyh->olut.buffer = !asyh->olut.buffer;
- head->func->olut(head, asyh);
+
return 0;
}
@@ -473,7 +483,7 @@ nv50_head_func = {
.atomic_destroy_state = nv50_head_atomic_destroy_state,
};
-int
+struct nv50_head *
nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -485,7 +495,7 @@ nv50_head_create(struct drm_device *dev, int index)
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
head->func = disp->core->func->head;
head->base.index = index;
@@ -503,27 +513,26 @@ nv50_head_create(struct drm_device *dev, int index)
ret = nv50_curs_new(drm, head->base.index, &curs);
if (ret) {
kfree(head);
- return ret;
+ return ERR_PTR(ret);
}
crtc = &head->base.base;
drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane,
&nv50_head_func, "head-%d", head->base.index);
drm_crtc_helper_add(crtc, &nv50_head_help);
+ /* Keep the legacy gamma size at 256 to avoid compatibility issues */
drm_mode_crtc_set_gamma_size(crtc, 256);
- if (disp->disp->object.oclass >= GF110_DISP)
- drm_crtc_enable_color_mgmt(crtc, 256, true, 256);
- else
- drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
+ drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size,
+ disp->disp->object.oclass >= GF110_DISP,
+ head->func->olut_size);
if (head->func->olut_set) {
ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
- if (ret)
- goto out;
+ if (ret) {
+ nv50_head_destroy(crtc);
+ return ERR_PTR(ret);
+ }
}
-out:
- if (ret)
- nv50_head_destroy(crtc);
- return ret;
+ return head;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index d1c002f534d4..c32b27cdaefc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -11,17 +11,19 @@ struct nv50_head {
const struct nv50_head_func *func;
struct nouveau_crtc base;
struct nv50_lut olut;
+ struct nv50_msto *msto;
};
-int nv50_head_create(struct drm_device *, int index);
+struct nv50_head *nv50_head_create(struct drm_device *, int index);
void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *);
void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y);
struct nv50_head_func {
void (*view)(struct nv50_head *, struct nv50_head_atom *);
void (*mode)(struct nv50_head *, struct nv50_head_atom *);
- void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+ bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
bool olut_identity;
+ int olut_size;
void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
void (*olut_clr)(struct nv50_head *);
void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
@@ -43,7 +45,7 @@ struct nv50_head_func {
extern const struct nv50_head_func head507d;
void head507d_view(struct nv50_head *, struct nv50_head_atom *);
void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head507d_olut(struct nv50_head *, struct nv50_head_atom *);
+bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int);
void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
void head507d_core_clr(struct nv50_head *);
int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
@@ -60,7 +62,7 @@ extern const struct nv50_head_func head827d;
extern const struct nv50_head_func head907d;
void head907d_view(struct nv50_head *, struct nv50_head_atom *);
void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head907d_olut(struct nv50_head *, struct nv50_head_atom *);
+bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
void head907d_olut_clr(struct nv50_head *);
void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 7561be5ca707..66ccf36b56a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -271,15 +271,19 @@ head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 256)
+ return false;
+
if (asyh->base.cpp == 1)
asyh->olut.mode = 0;
else
asyh->olut.mode = 1;
asyh->olut.load = head507d_olut_load;
+ return true;
}
void
@@ -328,6 +332,7 @@ head507d = {
.view = head507d_view,
.mode = head507d_mode,
.olut = head507d_olut,
+ .olut_size = 256,
.olut_set = head507d_olut_set,
.olut_clr = head507d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index af5e7bd5978b..11877119eea4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -108,6 +108,7 @@ head827d = {
.view = head507d_view,
.mode = head507d_mode,
.olut = head507d_olut,
+ .olut_size = 256,
.olut_set = head827d_olut_set,
.olut_clr = head827d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index c2d09dd97b1f..3002ec23d7a6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -230,11 +230,15 @@ head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
- asyh->olut.mode = 7;
+ if (size != 256 && size != 1024)
+ return false;
+
+ asyh->olut.mode = size == 1024 ? 4 : 7;
asyh->olut.load = head907d_olut_load;
+ return true;
}
void
@@ -285,6 +289,7 @@ head907d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
+ .olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 303df8459ca8..76958cedd51f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -83,6 +83,7 @@ head917d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
+ .olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index ef6a99d95a9c..00011ce109a6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -148,14 +148,18 @@ headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
-headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+static bool
+headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 256 && size != 1024)
+ return false;
+
asyh->olut.mode = 2;
- asyh->olut.size = 0;
+ asyh->olut.size = size == 1024 ? 2 : 0;
asyh->olut.range = 0;
asyh->olut.output_mode = 1;
asyh->olut.load = head907d_olut_load;
+ return true;
}
static void
@@ -201,6 +205,7 @@ headc37d = {
.view = headc37d_view,
.mode = headc37d_mode,
.olut = headc37d_olut,
+ .olut_size = 1024,
.olut_set = headc37d_olut_set,
.olut_clr = headc37d_olut_clr,
.curs_layout = head917d_curs_layout,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 32a7f9e85fb0..938d910a1b1e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -151,17 +151,20 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 0 && size != 256 && size != 1024)
+ return false;
+
asyh->olut.mode = 2; /* DIRECT10 */
asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
- if (asyh->state.gamma_lut &&
- asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256)
+ if (size == 256)
asyh->olut.load = headc57d_olut_load_8;
else
asyh->olut.load = headc57d_olut_load;
+ return true;
}
static void
@@ -194,6 +197,7 @@ headc57d = {
.mode = headc57d_mode,
.olut = headc57d_olut,
.olut_identity = true,
+ .olut_size = 1024,
.olut_set = headc57d_olut_set,
.olut_clr = headc57d_olut_clr,
.curs_layout = head917d_curs_layout,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
index 994def4fd51a..4e95ca5604ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -49,7 +49,7 @@ nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
kvfree(in);
}
} else {
- load(in, blob->length / sizeof(*in), mem);
+ load(in, drm_color_lut_size(blob), mem);
}
return addr;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 5193b6257061..890315291b01 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -318,7 +318,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
return wndw->func->acquire(wndw, asyw, asyh);
}
-static void
+static int
nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
struct nv50_wndw_atom *armw,
struct nv50_wndw_atom *asyw,
@@ -340,7 +340,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
*/
if (!(ilut = asyh->state.gamma_lut)) {
asyw->visible = false;
- return;
+ return 0;
}
if (wndw->func->ilut)
@@ -359,7 +359,10 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
/* Recalculate LUT state. */
memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
- wndw->func->ilut(wndw, asyw);
+ if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) {
+ DRM_DEBUG_KMS("Invalid ilut\n");
+ return -EINVAL;
+ }
asyw->xlut.handle = wndw->wndw.vram.handle;
asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
asyw->set.xlut = true;
@@ -384,6 +387,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
/* Can't do an immediate flip while changing the LUT. */
asyh->state.async_flip = false;
+ return 0;
}
static int
@@ -424,8 +428,11 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
(!armw->visible ||
asyh->state.color_mgmt_changed ||
asyw->state.fb->format->format !=
- armw->state.fb->format->format))
- nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+ armw->state.fb->format->format)) {
+ ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+ if (ret)
+ return ret;
+ }
/* Calculate new window state. */
if (asyw->visible) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index c63bd3bdaf06..caf397475918 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -64,12 +64,13 @@ struct nv50_wndw_func {
void (*ntfy_clr)(struct nv50_wndw *);
int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
- void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int);
void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
const struct drm_color_ctm *);
void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*csc_clr)(struct nv50_wndw *);
bool ilut_identity;
+ int ilut_size;
bool olut_core;
void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*xlut_clr)(struct nv50_wndw *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 0f9402162bde..b92dc3461bbd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -71,14 +71,18 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
-wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
+ if (size != 256 && size != 1024)
+ return false;
+
asyw->xlut.i.mode = 2;
- asyw->xlut.i.size = 0;
+ asyw->xlut.i.size = size == 1024 ? 2 : 0;
asyw->xlut.i.range = 0;
asyw->xlut.i.output_mode = 1;
asyw->xlut.i.load = head907d_olut_load;
+ return true;
}
void
@@ -261,6 +265,7 @@ wndwc37e = {
.ntfy_reset = corec37d_ntfy_init,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc37e_ilut,
+ .ilut_size = 1024,
.xlut_set = wndwc37e_ilut_set,
.xlut_clr = wndwc37e_ilut_clr,
.csc = base907c_csc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index a311c79e5295..35c9c52fab26 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -156,19 +156,21 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static void
-wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- u16 size = asyw->ilut->length / sizeof(struct drm_color_lut);
+ if (size = size ? size : 1024, size != 256 && size != 1024)
+ return false;
+
if (size == 256) {
asyw->xlut.i.mode = 1; /* DIRECT8. */
} else {
asyw->xlut.i.mode = 2; /* DIRECT10. */
- size = 1024;
}
asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
asyw->xlut.i.load = wndwc57e_ilut_load;
+ return true;
}
static const struct nv50_wndw_func
@@ -183,6 +185,7 @@ wndwc57e = {
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc57e_ilut,
.ilut_identity = true,
+ .ilut_size = 1024,
.xlut_set = wndwc57e_ilut_set,
.xlut_clr = wndwc57e_ilut_clr,
.csc = base907c_csc,
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/acr.h b/drivers/gpu/drm/nouveau/include/nvfw/acr.h
new file mode 100644
index 000000000000..e65d6a8db104
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/acr.h
@@ -0,0 +1,152 @@
+#ifndef __NVFW_ACR_H__
+#define __NVFW_ACR_H__
+
+struct wpr_header {
+#define WPR_HEADER_V0_FALCON_ID_INVALID 0xffffffff
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+#define WPR_HEADER_V0_STATUS_NONE 0
+#define WPR_HEADER_V0_STATUS_COPY 1
+#define WPR_HEADER_V0_STATUS_VALIDATION_CODE_FAILED 2
+#define WPR_HEADER_V0_STATUS_VALIDATION_DATA_FAILED 3
+#define WPR_HEADER_V0_STATUS_VALIDATION_DONE 4
+#define WPR_HEADER_V0_STATUS_VALIDATION_SKIPPED 5
+#define WPR_HEADER_V0_STATUS_BOOTSTRAP_READY 6
+ u32 status;
+};
+
+void wpr_header_dump(struct nvkm_subdev *, const struct wpr_header *);
+
+struct wpr_header_v1 {
+#define WPR_HEADER_V1_FALCON_ID_INVALID 0xffffffff
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+ u32 bin_version;
+#define WPR_HEADER_V1_STATUS_NONE 0
+#define WPR_HEADER_V1_STATUS_COPY 1
+#define WPR_HEADER_V1_STATUS_VALIDATION_CODE_FAILED 2
+#define WPR_HEADER_V1_STATUS_VALIDATION_DATA_FAILED 3
+#define WPR_HEADER_V1_STATUS_VALIDATION_DONE 4
+#define WPR_HEADER_V1_STATUS_VALIDATION_SKIPPED 5
+#define WPR_HEADER_V1_STATUS_BOOTSTRAP_READY 6
+#define WPR_HEADER_V1_STATUS_REVOCATION_CHECK_FAILED 7
+ u32 status;
+};
+
+void wpr_header_v1_dump(struct nvkm_subdev *, const struct wpr_header_v1 *);
+
+struct lsf_signature {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+};
+
+struct lsf_signature_v1 {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+ u32 supports_versioning;
+ u32 version;
+ u32 depmap_count;
+ u8 depmap[11/*LSF_LSB_DEPMAP_SIZE*/ * 2 * 4];
+ u8 kdf[16];
+};
+
+struct lsb_header_tail {
+ u32 ucode_off;
+ u32 ucode_size;
+ u32 data_size;
+ u32 bl_code_size;
+ u32 bl_imem_off;
+ u32 bl_data_off;
+ u32 bl_data_size;
+ u32 app_code_off;
+ u32 app_code_size;
+ u32 app_data_off;
+ u32 app_data_size;
+ u32 flags;
+};
+
+struct lsb_header {
+ struct lsf_signature signature;
+ struct lsb_header_tail tail;
+};
+
+void lsb_header_dump(struct nvkm_subdev *, struct lsb_header *);
+
+struct lsb_header_v1 {
+ struct lsf_signature_v1 signature;
+ struct lsb_header_tail tail;
+};
+
+void lsb_header_v1_dump(struct nvkm_subdev *, struct lsb_header_v1 *);
+
+struct flcn_acr_desc {
+ union {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ } ucode_reserved_space;
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_mem_range;
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ } region_props[2];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+void flcn_acr_desc_dump(struct nvkm_subdev *, struct flcn_acr_desc *);
+
+struct flcn_acr_desc_v1 {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_memory_range;
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ u32 shadow_mem_start_addr;
+ } region_props[2];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+void flcn_acr_desc_v1_dump(struct nvkm_subdev *, struct flcn_acr_desc_v1 *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/flcn.h b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h
new file mode 100644
index 000000000000..e090f347d220
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_FLCN_H__
+#define __NVFW_FLCN_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct loader_config {
+ u32 dma_idx;
+ u32 code_dma_base;
+ u32 code_size_total;
+ u32 code_size_to_load;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 overlay_dma_base;
+ u32 argc;
+ u32 argv;
+ u32 code_dma_base1;
+ u32 data_dma_base1;
+ u32 overlay_dma_base1;
+};
+
+void
+loader_config_dump(struct nvkm_subdev *, const struct loader_config *);
+
+struct loader_config_v1 {
+ u32 reserved;
+ u32 dma_idx;
+ u64 code_dma_base;
+ u32 code_size_total;
+ u32 code_size_to_load;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+ u64 overlay_dma_base;
+ u32 argc;
+ u32 argv;
+} __packed;
+
+void
+loader_config_v1_dump(struct nvkm_subdev *, const struct loader_config_v1 *);
+
+struct flcn_bl_dmem_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u32 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 code_dma_base1;
+ u32 data_dma_base1;
+};
+
+void
+flcn_bl_dmem_desc_dump(struct nvkm_subdev *, const struct flcn_bl_dmem_desc *);
+
+struct flcn_bl_dmem_desc_v1 {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+} __packed;
+
+void flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *,
+ const struct flcn_bl_dmem_desc_v1 *);
+
+struct flcn_bl_dmem_desc_v2 {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+ u32 argc;
+ u32 argv;
+} __packed;
+
+void flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *,
+ const struct flcn_bl_dmem_desc_v2 *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/fw.h b/drivers/gpu/drm/nouveau/include/nvfw/fw.h
new file mode 100644
index 000000000000..a7cf1188c9d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/fw.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_FW_H__
+#define __NVFW_FW_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_bin_hdr {
+ u32 bin_magic;
+ u32 bin_ver;
+ u32 bin_size;
+ u32 header_offset;
+ u32 data_offset;
+ u32 data_size;
+};
+
+const struct nvfw_bin_hdr *nvfw_bin_hdr(struct nvkm_subdev *, const void *);
+
+struct nvfw_bl_desc {
+ u32 start_tag;
+ u32 dmem_load_off;
+ u32 code_off;
+ u32 code_size;
+ u32 data_off;
+ u32 data_size;
+};
+
+const struct nvfw_bl_desc *nvfw_bl_desc(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
new file mode 100644
index 000000000000..64d0d32200c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_HS_H__
+#define __NVFW_HS_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_hs_header {
+ u32 sig_dbg_offset;
+ u32 sig_dbg_size;
+ u32 sig_prod_offset;
+ u32 sig_prod_size;
+ u32 patch_loc;
+ u32 patch_sig;
+ u32 hdr_offset;
+ u32 hdr_size;
+};
+
+const struct nvfw_hs_header *nvfw_hs_header(struct nvkm_subdev *, const void *);
+
+struct nvfw_hs_load_header {
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 num_apps;
+ u32 apps[0];
+};
+
+const struct nvfw_hs_load_header *
+nvfw_hs_load_header(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/ls.h b/drivers/gpu/drm/nouveau/include/nvfw/ls.h
new file mode 100644
index 000000000000..f63692a2a16c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/ls.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_LS_H__
+#define __NVFW_LS_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_ls_desc_head {
+ u32 descriptor_size;
+ u32 image_size;
+ u32 tools_version;
+ u32 app_version;
+ char date[64];
+ u32 bootloader_start_offset;
+ u32 bootloader_size;
+ u32 bootloader_imem_offset;
+ u32 bootloader_entry_point;
+ u32 app_start_offset;
+ u32 app_size;
+ u32 app_imem_offset;
+ u32 app_imem_entry;
+ u32 app_dmem_offset;
+ u32 app_resident_code_offset;
+ u32 app_resident_code_size;
+ u32 app_resident_data_offset;
+ u32 app_resident_data_size;
+};
+
+struct nvfw_ls_desc {
+ struct nvfw_ls_desc_head head;
+ u32 nb_overlays;
+ struct {
+ u32 start;
+ u32 size;
+ } load_ovl[64];
+ u32 compressed;
+};
+
+const struct nvfw_ls_desc *nvfw_ls_desc(struct nvkm_subdev *, const void *);
+
+struct nvfw_ls_desc_v1 {
+ struct nvfw_ls_desc_head head;
+ u32 nb_imem_overlays;
+ u32 nb_dmem_overlays;
+ struct {
+ u32 start;
+ u32 size;
+ } load_ovl[64];
+ u32 compressed;
+};
+
+const struct nvfw_ls_desc_v1 *
+nvfw_ls_desc_v1(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/pmu.h b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
new file mode 100644
index 000000000000..452ed7d03827
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
@@ -0,0 +1,98 @@
+#ifndef __NVFW_PMU_H__
+#define __NVFW_PMU_H__
+
+struct nv_pmu_args {
+ u32 reserved;
+ u32 freq_hz;
+ u32 trace_size;
+ u32 trace_dma_base;
+ u16 trace_dma_base1;
+ u8 trace_dma_offset;
+ u32 trace_dma_idx;
+ bool secure_mode;
+ bool raise_priv_sec;
+ struct {
+ u32 dma_base;
+ u16 dma_base1;
+ u8 dma_offset;
+ u16 fb_size;
+ u8 dma_idx;
+ } gc6_ctx;
+ u8 pad;
+};
+
+#define NV_PMU_UNIT_INIT 0x07
+#define NV_PMU_UNIT_ACR 0x0a
+
+struct nv_pmu_init_msg {
+ struct nv_falcon_msg hdr;
+#define NV_PMU_INIT_MSG_INIT 0x00
+ u8 msg_type;
+
+ u8 pad;
+ u16 os_debug_entry_point;
+
+ struct {
+ u16 size;
+ u16 offset;
+ u8 index;
+ u8 pad;
+ } queue_info[5];
+
+ u16 sw_managed_area_offset;
+ u16 sw_managed_area_size;
+};
+
+struct nv_pmu_acr_cmd {
+ struct nv_falcon_cmd hdr;
+#define NV_PMU_ACR_CMD_INIT_WPR_REGION 0x00
+#define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON 0x01
+#define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS 0x03
+ u8 cmd_type;
+};
+
+struct nv_pmu_acr_msg {
+ struct nv_falcon_cmd hdr;
+ u8 msg_type;
+};
+
+struct nv_pmu_acr_init_wpr_region_cmd {
+ struct nv_pmu_acr_cmd cmd;
+ u32 region_id;
+ u32 wpr_offset;
+};
+
+struct nv_pmu_acr_init_wpr_region_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 error_code;
+};
+
+struct nv_pmu_acr_bootstrap_falcon_cmd {
+ struct nv_pmu_acr_cmd cmd;
+#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000
+#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_id;
+};
+
+struct nv_pmu_acr_bootstrap_falcon_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 falcon_id;
+};
+
+struct nv_pmu_acr_bootstrap_multiple_falcons_cmd {
+ struct nv_pmu_acr_cmd cmd;
+#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES 0x00000000
+#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_mask;
+ u32 use_va_mask;
+ u32 wpr_lo;
+ u32 wpr_hi;
+};
+
+struct nv_pmu_acr_bootstrap_multiple_falcons_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 falcon_mask;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/sec2.h b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
new file mode 100644
index 000000000000..03496558b775
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
@@ -0,0 +1,60 @@
+#ifndef __NVFW_SEC2_H__
+#define __NVFW_SEC2_H__
+
+struct nv_sec2_args {
+ u32 freq_hz;
+ u32 falc_trace_size;
+ u32 falc_trace_dma_base;
+ u32 falc_trace_dma_idx;
+ bool secure_mode;
+};
+
+#define NV_SEC2_UNIT_INIT 0x01
+#define NV_SEC2_UNIT_ACR 0x08
+
+struct nv_sec2_init_msg {
+ struct nv_falcon_msg hdr;
+#define NV_SEC2_INIT_MSG_INIT 0x00
+ u8 msg_type;
+
+ u8 num_queues;
+ u16 os_debug_entry_point;
+
+ struct {
+ u32 offset;
+ u16 size;
+ u8 index;
+#define NV_SEC2_INIT_MSG_QUEUE_ID_CMDQ 0x00
+#define NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ 0x01
+ u8 id;
+ } queue_info[2];
+
+ u32 sw_managed_area_offset;
+ u16 sw_managed_area_size;
+};
+
+struct nv_sec2_acr_cmd {
+ struct nv_falcon_cmd hdr;
+#define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON 0x00
+ u8 cmd_type;
+};
+
+struct nv_sec2_acr_msg {
+ struct nv_falcon_cmd hdr;
+ u8 msg_type;
+};
+
+struct nv_sec2_acr_bootstrap_falcon_cmd {
+ struct nv_sec2_acr_cmd cmd;
+#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000
+#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_id;
+};
+
+struct nv_sec2_acr_bootstrap_falcon_msg {
+ struct nv_sec2_acr_msg msg;
+ u32 error_code;
+ u32 falcon_id;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index f704ae600e94..30659747ffe8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -166,6 +166,8 @@
#define VOLTA_A /* cl9097.h */ 0x0000c397
+#define TURING_A /* cl9097.h */ 0x0000c597
+
#define NV74_BSP 0x000074b0
#define GT212_MSVLD 0x000085b1
@@ -207,6 +209,7 @@
#define PASCAL_COMPUTE_A 0x0000c0c0
#define PASCAL_COMPUTE_B 0x0000c1c0
#define VOLTA_COMPUTE_A 0x0000c3c0
+#define TURING_COMPUTE_A 0x0000c5c0
#define NV74_CIPHER 0x000074c1
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
index 8450127420f5..c21d09f04f1d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0008.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
@@ -35,7 +35,7 @@ struct nvif_mmu_type_v0 {
struct nvif_mmu_kind_v0 {
__u8 version;
- __u8 pad01[1];
+ __u8 kind_inv;
__u16 count;
__u8 data[];
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
index 747ecf67e403..cec1e88a0a05 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -7,6 +7,7 @@ struct nvif_mmu {
u8 dmabits;
u8 heap_nr;
u8 type_nr;
+ u8 kind_inv;
u16 kind_nr;
s32 mem;
@@ -36,9 +37,8 @@ void nvif_mmu_fini(struct nvif_mmu *);
static inline bool
nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
{
- const u8 invalid = mmu->kind_nr - 1;
if (kind) {
- if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
+ if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv)
return false;
}
return true;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 6d55cd0476aa..5c007ce62fc3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -23,13 +23,13 @@ enum nvkm_devidx {
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_FAULT,
+ NVKM_SUBDEV_ACR,
NVKM_SUBDEV_PMU,
NVKM_SUBDEV_VOLT,
NVKM_SUBDEV_ICCSENSE,
NVKM_SUBDEV_THERM,
NVKM_SUBDEV_CLK,
NVKM_SUBDEV_GSP,
- NVKM_SUBDEV_SECBOOT,
NVKM_ENGINE_BSP,
@@ -129,6 +129,7 @@ struct nvkm_device {
struct notifier_block nb;
} acpi;
+ struct nvkm_acr *acr;
struct nvkm_bar *bar;
struct nvkm_bios *bios;
struct nvkm_bus *bus;
@@ -149,7 +150,6 @@ struct nvkm_device {
struct nvkm_subdev *mxm;
struct nvkm_pci *pci;
struct nvkm_pmu *pmu;
- struct nvkm_secboot *secboot;
struct nvkm_therm *therm;
struct nvkm_timer *timer;
struct nvkm_top *top;
@@ -169,7 +169,7 @@ struct nvkm_device {
struct nvkm_engine *mspdec;
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
- struct nvkm_engine *nvenc[3];
+ struct nvkm_nvenc *nvenc[3];
struct nvkm_nvdec *nvdec[3];
struct nvkm_pm *pm;
struct nvkm_engine *sec;
@@ -202,6 +202,7 @@ struct nvkm_device_quirk {
struct nvkm_device_chip {
const char *name;
+ int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **);
int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
@@ -222,7 +223,6 @@ struct nvkm_device_chip {
int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
- int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
int (*top )(struct nvkm_device *, int idx, struct nvkm_top **);
@@ -242,7 +242,7 @@ struct nvkm_device_chip {
int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **);
int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
new file mode 100644
index 000000000000..daa8e4bfb6bf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -0,0 +1,77 @@
+#ifndef __NVKM_FALCON_H__
+#define __NVKM_FALCON_H__
+#include <engine/falcon.h>
+
+int nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *owner,
+ const char *name, u32 addr, struct nvkm_falcon *);
+void nvkm_falcon_dtor(struct nvkm_falcon *);
+
+void nvkm_falcon_v1_load_imem(struct nvkm_falcon *,
+ void *, u32, u32, u16, u8, bool);
+void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
+void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
+void nvkm_falcon_v1_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
+int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *, u32);
+int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *, u32);
+void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *, u32 start_addr);
+void nvkm_falcon_v1_start(struct nvkm_falcon *);
+int nvkm_falcon_v1_enable(struct nvkm_falcon *);
+void nvkm_falcon_v1_disable(struct nvkm_falcon *);
+
+void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
+int gp102_sec2_flcn_enable(struct nvkm_falcon *);
+
+#define FLCN_PRINTK(t,f,fmt,a...) do { \
+ if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \
+ nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \
+ else \
+ nvkm_##t((f)->owner, fmt"\n", ##a); \
+} while(0)
+#define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a)
+#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
+
+/**
+ * struct nv_falcon_msg - header for all messages
+ *
+ * @unit_id: id of firmware process that sent the message
+ * @size: total size of message
+ * @ctrl_flags: control flags
+ * @seq_id: used to match a message from its corresponding command
+ */
+struct nv_falcon_msg {
+ u8 unit_id;
+ u8 size;
+ u8 ctrl_flags;
+ u8 seq_id;
+};
+
+#define nv_falcon_cmd nv_falcon_msg
+#define NV_FALCON_CMD_UNIT_ID_REWIND 0x00
+
+struct nvkm_falcon_qmgr;
+int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **);
+void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **);
+
+typedef int
+(*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *);
+
+struct nvkm_falcon_cmdq;
+int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name,
+ struct nvkm_falcon_cmdq **);
+void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **);
+void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *,
+ u32 index, u32 offset, u32 size);
+void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *);
+int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *,
+ nvkm_falcon_qmgr_callback, void *priv,
+ unsigned long timeout_jiffies);
+
+struct nvkm_falcon_msgq;
+int nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *, const char *name,
+ struct nvkm_falcon_msgq **);
+void nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **);
+void nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *,
+ u32 index, u32 offset, u32 size);
+int nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *, void *, u32 size);
+void nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index 383370c32428..d14b7fb07368 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,12 +1,55 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FIRMWARE_H__
#define __NVKM_FIRMWARE_H__
+#include <core/option.h>
#include <core/subdev.h>
-int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
- int min_version, int max_version,
- const struct firmware **);
-int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
+int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, int ver,
const struct firmware **);
void nvkm_firmware_put(const struct firmware *);
+
+int nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *path,
+ const char *name, int ver, struct nvkm_blob *);
+int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path,
+ const char *name, int ver,
+ const struct firmware **);
+
+#define nvkm_firmware_load(s,l,o,p...) ({ \
+ struct nvkm_subdev *_s = (s); \
+ const char *_opts = (o); \
+ char _option[32]; \
+ typeof(l[0]) *_list = (l), *_next, *_fwif = NULL; \
+ int _ver, _fwv, _ret = 0; \
+ \
+ snprintf(_option, sizeof(_option), "Nv%sFw", _opts); \
+ _ver = nvkm_longopt(_s->device->cfgopt, _option, -2); \
+ if (_ver >= -1) { \
+ for (_next = _list; !_fwif && _next->load; _next++) { \
+ if (_next->version == _ver) \
+ _fwif = _next; \
+ } \
+ _ret = _fwif ? 0 : -EINVAL; \
+ } \
+ \
+ if (_ret == 0) { \
+ snprintf(_option, sizeof(_option), "Nv%sFwVer", _opts); \
+ _fwv = _fwif ? _fwif->version : -1; \
+ _ver = nvkm_longopt(_s->device->cfgopt, _option, _fwv); \
+ for (_next = _fwif ? _fwif : _list; _next->load; _next++) { \
+ _fwv = (_ver >= 0) ? _ver : _next->version; \
+ _ret = _next->load(p, _fwv, _next); \
+ if (_ret == 0 || _ver >= 0) { \
+ _fwif = _next; \
+ break; \
+ } \
+ } \
+ } \
+ \
+ if (_ret) { \
+ nvkm_error(_s, "failed to load firmware\n"); \
+ _fwif = ERR_PTR(_ret); \
+ } \
+ \
+ _fwif; \
+})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index b23bf6109f2d..74d3f1a809d7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -84,6 +84,22 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \
} while(0)
+#define nvkm_robj(o,a,p,s) do { \
+ u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \
+ while (_size--) { \
+ *(_data++) = nvkm_ro32((o), _addr); \
+ _addr += 4; \
+ } \
+} while(0)
+
+#define nvkm_wobj(o,a,p,s) do { \
+ u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \
+ while (_size--) { \
+ nvkm_wo32((o), _addr, *(_data++)); \
+ _addr += 4; \
+ } \
+} while(0)
+
#define nvkm_fill(t,s,o,a,d,c) do { \
u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \
u##t __iomem *_m = nvkm_kmap(o); \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 029a416197db..d7ba3205207f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -21,4 +21,17 @@
iowrite32_native(lower_32_bits(_v), &_p[0]); \
iowrite32_native(upper_32_bits(_v), &_p[1]); \
} while(0)
+
+struct nvkm_blob {
+ void *data;
+ u32 size;
+};
+
+static inline void
+nvkm_blob_dtor(struct nvkm_blob *blob)
+{
+ kfree(blob->data);
+ blob->data = NULL;
+ blob->size = 0;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 23b582d696c6..27c1f868552c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_FALCON_H__
-#define __NVKM_FALCON_H__
+#ifndef __NVKM_FLCNEN_H__
+#define __NVKM_FLCNEN_H__
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
struct nvkm_fifo_chan;
@@ -23,12 +23,13 @@ struct nvkm_falcon {
struct mutex mutex;
struct mutex dmem_mutex;
+ bool oneinit;
+
const struct nvkm_subdev *user;
u8 version;
u8 secret;
bool debug;
- bool has_emem;
struct nvkm_memory *core;
bool external;
@@ -76,9 +77,14 @@ struct nvkm_falcon_func {
} data;
void (*init)(struct nvkm_falcon *);
void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+
+ u32 debug;
+ u32 fbif;
+
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
+ u32 emem_addr;
void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
int (*wait_for_halt)(struct nvkm_falcon *, u32);
int (*clear_interrupt)(struct nvkm_falcon *, u32);
@@ -86,6 +92,13 @@ struct nvkm_falcon_func {
void (*start)(struct nvkm_falcon *);
int (*enable)(struct nvkm_falcon *falcon);
void (*disable)(struct nvkm_falcon *falcon);
+ int (*reset)(struct nvkm_falcon *);
+
+ struct {
+ u32 head;
+ u32 tail;
+ u32 stride;
+ } cmdq, msgq;
struct nvkm_sclass sclass[];
};
@@ -122,5 +135,4 @@ int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32);
int nvkm_falcon_enable(struct nvkm_falcon *);
void nvkm_falcon_disable(struct nvkm_falcon *);
int nvkm_falcon_reset(struct nvkm_falcon *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 2cde36f3c064..1530c81f86a2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -50,6 +50,8 @@ int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 7c7d7f0abfcc..1b3183e31606 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -3,13 +3,13 @@
#define __NVKM_NVDEC_H__
#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
#include <core/engine.h>
+#include <core/falcon.h>
struct nvkm_nvdec {
+ const struct nvkm_nvdec_func *func;
struct nvkm_engine engine;
- u32 addr;
-
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
};
-int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
+int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 21624046d0a1..33e6ba8adc8d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -1,5 +1,15 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_NVENC_H__
#define __NVKM_NVENC_H__
+#define nvkm_nvenc(p) container_of((p), struct nvkm_nvenc, engine)
#include <core/engine.h>
+#include <core/falcon.h>
+
+struct nvkm_nvenc {
+ const struct nvkm_nvenc_func *func;
+ struct nvkm_engine engine;
+ struct nvkm_falcon falcon;
+};
+
+int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index 33078f86c779..34dc765648d5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -1,17 +1,24 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SEC2_H__
#define __NVKM_SEC2_H__
+#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
#include <core/engine.h>
+#include <core/falcon.h>
struct nvkm_sec2 {
+ const struct nvkm_sec2_func *func;
struct nvkm_engine engine;
- u32 addr;
+ struct nvkm_falcon falcon;
+
+ struct nvkm_falcon_qmgr *qmgr;
+ struct nvkm_falcon_cmdq *cmdq;
+ struct nvkm_falcon_msgq *msgq;
- struct nvkm_falcon *falcon;
- struct nvkm_msgqueue *queue;
struct work_struct work;
+ bool initmsg_received;
};
int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
new file mode 100644
index 000000000000..5d9c3a966de6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_ACR_H__
+#define __NVKM_ACR_H__
+#define nvkm_acr(p) container_of((p), struct nvkm_acr, subdev)
+#include <core/subdev.h>
+#include <core/falcon.h>
+
+enum nvkm_acr_lsf_id {
+ NVKM_ACR_LSF_PMU = 0,
+ NVKM_ACR_LSF_GSPLITE = 1,
+ NVKM_ACR_LSF_FECS = 2,
+ NVKM_ACR_LSF_GPCCS = 3,
+ NVKM_ACR_LSF_NVDEC = 4,
+ NVKM_ACR_LSF_SEC2 = 7,
+ NVKM_ACR_LSF_MINION = 10,
+ NVKM_ACR_LSF_NUM
+};
+
+static inline const char *
+nvkm_acr_lsf_id(enum nvkm_acr_lsf_id id)
+{
+ switch (id) {
+ case NVKM_ACR_LSF_PMU : return "pmu";
+ case NVKM_ACR_LSF_GSPLITE: return "gsplite";
+ case NVKM_ACR_LSF_FECS : return "fecs";
+ case NVKM_ACR_LSF_GPCCS : return "gpccs";
+ case NVKM_ACR_LSF_NVDEC : return "nvdec";
+ case NVKM_ACR_LSF_SEC2 : return "sec2";
+ case NVKM_ACR_LSF_MINION : return "minion";
+ default:
+ return "unknown";
+ }
+}
+
+struct nvkm_acr {
+ const struct nvkm_acr_func *func;
+ struct nvkm_subdev subdev;
+
+ struct list_head hsfw, hsf;
+ struct list_head lsfw, lsf;
+
+ struct nvkm_memory *wpr;
+ u64 wpr_start;
+ u64 wpr_end;
+ u64 shadow_start;
+
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+
+ bool done;
+
+ const struct firmware *wpr_fw;
+ bool wpr_comp;
+ u64 wpr_prev;
+};
+
+bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id);
+int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask);
+
+int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+
+struct nvkm_acr_lsfw {
+ const struct nvkm_acr_lsf_func *func;
+ struct nvkm_falcon *falcon;
+ enum nvkm_acr_lsf_id id;
+
+ struct list_head head;
+
+ struct nvkm_blob img;
+
+ const struct firmware *sig;
+
+ u32 bootloader_size;
+ u32 bootloader_imem_offset;
+
+ u32 app_size;
+ u32 app_start_offset;
+ u32 app_imem_entry;
+ u32 app_resident_code_offset;
+ u32 app_resident_code_size;
+ u32 app_resident_data_offset;
+ u32 app_resident_data_size;
+
+ u32 ucode_size;
+ u32 data_size;
+
+ struct {
+ u32 lsb;
+ u32 img;
+ u32 bld;
+ } offset;
+ u32 bl_data_size;
+};
+
+struct nvkm_acr_lsf_func {
+/* The (currently) map directly to LSB header flags. */
+#define NVKM_ACR_LSF_LOAD_CODE_AT_0 0x00000001
+#define NVKM_ACR_LSF_DMACTL_REQ_CTX 0x00000004
+#define NVKM_ACR_LSF_FORCE_PRIV_LOAD 0x00000008
+ u32 flags;
+ u32 bld_size;
+ void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *);
+ void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust);
+ int (*boot)(struct nvkm_falcon *);
+ int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
+ int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask);
+};
+
+int
+nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+int
+nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+int
+nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 97322f95b3ee..a513c16ab105 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -31,6 +31,7 @@ struct nvkm_fault_data {
};
int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 239ad222b95a..34b56b10218a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -33,6 +33,8 @@ struct nvkm_fb {
const struct nvkm_fb_func *func;
struct nvkm_subdev subdev;
+ struct nvkm_blob vpr_scrubber;
+
struct nvkm_ram *ram;
struct nvkm_mm tags;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 4c672a5c4cd5..06db67610a50 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -2,12 +2,11 @@
#define __NVKM_GSP_H__
#define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
#include <core/subdev.h>
+#include <core/falcon.h>
struct nvkm_gsp {
struct nvkm_subdev subdev;
- u32 addr;
-
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
};
int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 644d527c3b96..d76f60d7d29a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -40,4 +40,5 @@ int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 4752006880f3..da553089d2d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -2,13 +2,20 @@
#ifndef __NVKM_PMU_H__
#define __NVKM_PMU_H__
#include <core/subdev.h>
-#include <engine/falcon.h>
+#include <core/falcon.h>
struct nvkm_pmu {
const struct nvkm_pmu_func *func;
struct nvkm_subdev subdev;
- struct nvkm_falcon *falcon;
- struct nvkm_msgqueue *queue;
+ struct nvkm_falcon falcon;
+
+ struct nvkm_falcon_qmgr *qmgr;
+ struct nvkm_falcon_cmdq *hpq;
+ struct nvkm_falcon_cmdq *lpq;
+ struct nvkm_falcon_msgq *msgq;
+ bool initmsg_received;
+
+ struct completion wpr_ready;
struct {
u32 base;
@@ -43,6 +50,7 @@ int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f8015e0318d7..1b62ccc57aef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1162,7 +1162,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
void
nouveau_bo_move_init(struct nouveau_drm *drm)
{
- static const struct {
+ static const struct _method_table {
const char *name;
int engine;
s32 oclass;
@@ -1192,7 +1192,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
{ "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
{},
{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
- }, *mthd = _methods;
+ };
+ const struct _method_table *mthd = _methods;
const char *name = "CPU";
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 282fd90b65e1..d9381a053169 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -55,6 +55,8 @@ nouveau_channel_killed(struct nvif_notify *ntfy)
struct nouveau_cli *cli = (void *)chan->user.client;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
atomic_set(&chan->killed, 1);
+ if (chan->fence)
+ nouveau_fence_context_kill(chan->fence, -ENODEV);
return NVIF_NOTIFY_DROP;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index fa1439941596..0ad5d87b5a8e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -635,10 +635,10 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
unsigned long c, i;
int ret = -ENOMEM;
- args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
+ args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
if (!args.src)
goto out;
- args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
+ args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
if (!args.dst)
goto out_free_src;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2cd83849600f..b65ae817eabf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -715,7 +715,6 @@ fail_nvkm:
void
nouveau_drm_device_remove(struct drm_device *dev)
{
- struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_client *client;
struct nvkm_device *device;
@@ -727,7 +726,6 @@ nouveau_drm_device_remove(struct drm_device *dev)
device = nvkm_device_find(client->device);
nouveau_drm_device_fini(dev);
- pci_disable_device(pdev);
drm_dev_put(dev);
nvkm_device_del(&device);
}
@@ -738,6 +736,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
nouveau_drm_device_remove(dev);
+ pci_disable_device(pdev);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 70f34cacc552..c2c332fbde97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -58,6 +58,8 @@
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/drm_audio_component.h>
+
#include "uapi/drm/nouveau_drm.h"
struct nouveau_channel;
@@ -211,6 +213,11 @@ struct nouveau_drm {
struct nouveau_svm *svm;
struct nouveau_dmem *dmem;
+
+ struct {
+ struct drm_audio_component *component;
+ bool component_registered;
+ } audio;
};
static inline struct nouveau_drm *
@@ -248,11 +255,11 @@ void nouveau_drm_device_remove(struct drm_device *dev);
#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
#define NV_DEBUG(drm,f,a...) do { \
- if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ if (drm_debug_enabled(DRM_UT_DRIVER)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
#define NV_ATOMIC(drm,f,a...) do { \
- if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ if (drm_debug_enabled(DRM_UT_ATOMIC)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f439f0a5b43a..0c5cdda3c336 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -203,7 +203,7 @@ nouveau_fbcon_release(struct fb_info *info, int user)
return 0;
}
-static struct fb_ops nouveau_fbcon_ops = {
+static const struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
@@ -214,7 +214,7 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_sync = nouveau_fbcon_sync,
};
-static struct fb_ops nouveau_fbcon_sw_ops = {
+static const struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 9118df035b28..666f2090d92b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -87,7 +87,7 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
}
void
-nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
{
struct nouveau_fence *fence;
@@ -95,11 +95,19 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ if (error)
+ dma_fence_set_error(&fence->base, error);
+
if (nouveau_fence_signal(fence))
nvif_notify_put(&fctx->notify);
}
spin_unlock_irq(&fctx->lock);
+}
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+{
+ nouveau_fence_context_kill(fctx, 0);
nvif_notify_fini(&fctx->notify);
fctx->dead = 1;
@@ -156,7 +164,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (nouveau_fence_update(fence->channel, fctx))
+ if (nouveau_fence_update(chan, fctx))
ret = NVIF_NOTIFY_DROP;
}
spin_unlock_irqrestore(&fctx->lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c9e24baaaa4f..4887caa69c65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -63,6 +63,7 @@ struct nouveau_fence_priv {
void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *);
void nouveau_fence_context_del(struct nouveau_fence_chan *);
void nouveau_fence_context_free(struct nouveau_fence_chan *);
+void nouveau_fence_context_kill(struct nouveau_fence_chan *, int error);
int nv04_fence_create(struct nouveau_drm *);
int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1324c19f4e5c..f5ece1f94973 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -484,12 +484,9 @@ retry:
static int
validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
- struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
- uint64_t user_pbbo_ptr)
+ struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
struct nouveau_drm *drm = chan->drm;
- struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
- (void __force __user *)(uintptr_t)user_pbbo_ptr;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
@@ -533,10 +530,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
b->presumed.offset = nvbo->bo.offset;
b->presumed.valid = 0;
relocs++;
-
- if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
- &b->presumed, sizeof(b->presumed)))
- return -EFAULT;
}
}
@@ -547,8 +540,8 @@ static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
- uint64_t user_buffers, int nr_buffers,
- struct validate_op *op, int *apply_relocs)
+ int nr_buffers,
+ struct validate_op *op, bool *apply_relocs)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int ret;
@@ -565,7 +558,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return ret;
}
- ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->list, pbbo);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
@@ -605,16 +598,12 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
+ struct drm_nouveau_gem_pushbuf_reloc *reloc,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
- struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
- reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
- if (IS_ERR(reloc))
- return PTR_ERR(reloc);
-
for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
@@ -693,11 +682,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf *req = data;
struct drm_nouveau_gem_pushbuf_push *push;
+ struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan = NULL;
struct validate_op op;
struct nouveau_fence *fence = NULL;
- int i, j, ret = 0, do_reloc = 0;
+ int i, j, ret = 0;
+ bool do_reloc = false, sync = false;
if (unlikely(!abi16))
return -ENOMEM;
@@ -711,6 +702,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
+ if (unlikely(atomic_read(&chan->killed)))
+ return nouveau_abi16_put(abi16, -ENODEV);
+
+ sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
req->vram_available = drm->gem.vram_available;
req->gart_available = drm->gem.gart_available;
@@ -755,7 +750,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
/* Validate buffer list */
- ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+revalidate:
+ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
@@ -765,7 +761,18 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Apply any relocations that are required */
if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
+ if (!reloc) {
+ validate_fini(&op, chan, NULL, bo);
+ reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
+ if (IS_ERR(reloc)) {
+ ret = PTR_ERR(reloc);
+ goto out_prevalid;
+ }
+
+ goto revalidate;
+ }
+
+ ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
if (ret) {
NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
goto out;
@@ -847,10 +854,33 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out;
}
+ if (sync) {
+ if (!(ret = nouveau_fence_wait(fence, false, false))) {
+ if ((ret = dma_fence_get_status(&fence->base)) == 1)
+ ret = 0;
+ }
+ }
+
out:
validate_fini(&op, chan, fence, bo);
nouveau_fence_unref(&fence);
+ if (do_reloc) {
+ struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
+ u64_to_user_ptr(req->buffers);
+
+ for (i = 0; i < req->nr_buffers; i++) {
+ if (bo[i].presumed.valid)
+ continue;
+
+ if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
+ sizeof(bo[i].presumed))) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+ u_free(reloc);
+ }
out_prevalid:
u_free(bo);
u_free(push);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index d445c6f3fece..1c3104d20571 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -741,7 +741,7 @@ nouveau_hwmon_init(struct drm_device *dev)
special_groups[i++] = &pwm_fan_sensor_group;
}
- special_groups[i] = 0;
+ special_groups[i] = NULL;
hwmon_dev = hwmon_device_register_with_info(dev->dev, "nouveau", dev,
&nouveau_chip_info,
special_groups);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 77a0c6ad3cef..7ca0a2498532 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
- struct nouveau_mem *mem;
int ret;
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
if (ret)
return ret;
@@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
- struct nouveau_mem *mem;
int ret;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 77061182a1cf..b28c7dc13ad6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -69,8 +69,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
}
list_del(&vma->head);
kfree(*pvma);
- *pvma = NULL;
}
+ *pvma = NULL;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index 5641bda2046d..47efc408efa6 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -121,6 +121,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
kind, argc);
if (ret == 0)
memcpy(mmu->kind, kind->data, kind->count);
+ mmu->kind_inv = kind->kind_inv;
kfree(kind);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild
index b53de9ba8c73..db3ade125fa9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: MIT
include $(src)/nvkm/core/Kbuild
+include $(src)/nvkm/nvfw/Kbuild
include $(src)/nvkm/falcon/Kbuild
include $(src)/nvkm/subdev/Kbuild
include $(src)/nvkm/engine/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 092acdec2c39..8b25367917ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -22,6 +22,40 @@
#include <core/device.h>
#include <core/firmware.h>
+int
+nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base,
+ const char *name, int ver, const struct firmware **pfw)
+{
+ char path[64];
+ int ret;
+
+ snprintf(path, sizeof(path), "%s%s", base, name);
+ ret = nvkm_firmware_get(subdev, path, ver, pfw);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int
+nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base,
+ const char *name, int ver, struct nvkm_blob *blob)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw);
+ if (ret == 0) {
+ blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ blob->size = fw->size;
+ nvkm_firmware_put(fw);
+ if (!blob->data)
+ return -ENOMEM;
+ }
+
+ return ret;
+}
+
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
* @subdev subdevice that will use that firmware
@@ -32,9 +66,8 @@
* Firmware files released by NVIDIA will always follow this format.
*/
int
-nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
- int min_version, int max_version,
- const struct firmware **fw)
+nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver,
+ const struct firmware **fw)
{
struct nvkm_device *device = subdev->device;
char f[64];
@@ -50,31 +83,21 @@ nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
cname[i] = tolower(cname[i]);
}
- for (i = max_version; i >= min_version; i--) {
- if (i != 0)
- snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
- else
- snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
-
- if (!firmware_request_nowarn(fw, f, device->dev)) {
- nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
- return i;
- }
+ if (ver != 0)
+ snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver);
+ else
+ snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
- nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
+ if (!firmware_request_nowarn(fw, f, device->dev)) {
+ nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n",
+ f, (*fw)->size);
+ return 0;
}
- nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
+ nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
return -ENOENT;
}
-int
-nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
- const struct firmware **fw)
-{
- return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
-}
-
/**
* nvkm_firmware_put - release firmware loaded with nvkm_firmware_get
*/
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index e85a08ecd9da..4cc186262d34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -91,8 +91,8 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
}
refcount_set(&tags->refcount, 1);
+ *ptags = memory->tags = tags;
mutex_unlock(&fb->subdev.mutex);
- *ptags = tags;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 245990de1e90..79a8f9d305c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -30,6 +30,7 @@ static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
const char *
nvkm_subdev_name[NVKM_SUBDEV_NR] = {
+ [NVKM_SUBDEV_ACR ] = "acr",
[NVKM_SUBDEV_BAR ] = "bar",
[NVKM_SUBDEV_VBIOS ] = "bios",
[NVKM_SUBDEV_BUS ] = "bus",
@@ -50,7 +51,6 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_SUBDEV_MXM ] = "mxm",
[NVKM_SUBDEV_PCI ] = "pci",
[NVKM_SUBDEV_PMU ] = "pmu",
- [NVKM_SUBDEV_SECBOOT ] = "secboot",
[NVKM_SUBDEV_THERM ] = "therm",
[NVKM_SUBDEV_TIMER ] = "tmr",
[NVKM_SUBDEV_TOP ] = "top",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index c3c7159f3411..c7d700916eae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1987,6 +1987,8 @@ nv117_chipset = {
.dma = gf119_dma_new,
.fifo = gm107_fifo_new,
.gr = gm107_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
@@ -2027,6 +2029,7 @@ nv118_chipset = {
static const struct nvkm_device_chip
nv120_chipset = {
.name = "GM200",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2045,7 +2048,6 @@ nv120_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2056,12 +2058,16 @@ nv120_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2080,7 +2086,6 @@ nv124_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2091,12 +2096,16 @@ nv124_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv126_chipset = {
.name = "GM206",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2115,7 +2124,6 @@ nv126_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2126,12 +2134,15 @@ nv126_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv12b_chipset = {
.name = "GM20B",
+ .acr = gm20b_acr_new,
.bar = gm20b_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
@@ -2143,7 +2154,6 @@ nv12b_chipset = {
.mc = gk20a_mc_new,
.mmu = gm20b_mmu_new,
.pmu = gm20b_pmu_new,
- .secboot = gm20b_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[2] = gm200_ce_new,
@@ -2157,6 +2167,7 @@ nv12b_chipset = {
static const struct nvkm_device_chip
nv130_chipset = {
.name = "GP100",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2172,7 +2183,6 @@ nv130_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gm200_secboot_new,
.pci = gp100_pci_new,
.pmu = gp100_pmu_new,
.timer = gk20a_timer_new,
@@ -2187,12 +2197,17 @@ nv130_chipset = {
.disp = gp100_disp_new,
.fifo = gp100_fifo_new,
.gr = gp100_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
+ .nvenc[2] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv132_chipset = {
.name = "GP102",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2208,7 +2223,6 @@ nv132_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2221,7 +2235,9 @@ nv132_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2229,6 +2245,7 @@ nv132_chipset = {
static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2244,7 +2261,6 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2257,7 +2273,9 @@ nv134_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2265,6 +2283,7 @@ nv134_chipset = {
static const struct nvkm_device_chip
nv136_chipset = {
.name = "GP106",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2280,7 +2299,6 @@ nv136_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2293,7 +2311,8 @@ nv136_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2301,6 +2320,7 @@ nv136_chipset = {
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2316,7 +2336,6 @@ nv137_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2329,7 +2348,9 @@ nv137_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp107_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2337,6 +2358,7 @@ nv137_chipset = {
static const struct nvkm_device_chip
nv138_chipset = {
.name = "GP108",
+ .acr = gp108_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2352,7 +2374,6 @@ nv138_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp108_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2364,30 +2385,30 @@ nv138_chipset = {
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
- .gr = gp107_gr_new,
- .nvdec[0] = gp102_nvdec_new,
- .sec2 = gp102_sec2_new,
+ .gr = gp108_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .sec2 = gp108_sec2_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
+ .acr = gp10b_acr_new,
.bar = gm20b_bar_new,
.bus = gf100_bus_new,
- .fault = gp100_fault_new,
+ .fault = gp10b_fault_new,
.fb = gp10b_fb_new,
.fuse = gm107_fuse_new,
.ibus = gp10b_ibus_new,
.imem = gk20a_instmem_new,
- .ltc = gp102_ltc_new,
+ .ltc = gp10b_ltc_new,
.mc = gp10b_mc_new,
.mmu = gp10b_mmu_new,
- .secboot = gp10b_secboot_new,
- .pmu = gm20b_pmu_new,
+ .pmu = gp10b_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[2] = gp102_ce_new,
+ .ce[0] = gp100_ce_new,
.dma = gf119_dma_new,
.fifo = gp10b_fifo_new,
.gr = gp10b_gr_new,
@@ -2397,6 +2418,7 @@ nv13b_chipset = {
static const struct nvkm_device_chip
nv140_chipset = {
.name = "GV100",
+ .acr = gp108_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2414,7 +2436,6 @@ nv140_chipset = {
.mmu = gv100_mmu_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
- .secboot = gp108_secboot_new,
.therm = gp100_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2431,13 +2452,17 @@ nv140_chipset = {
.dma = gv100_dma_new,
.fifo = gv100_fifo_new,
.gr = gv100_gr_new,
- .nvdec[0] = gp102_nvdec_new,
- .sec2 = gp102_sec2_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
+ .nvenc[2] = gm107_nvenc_new,
+ .sec2 = gp108_sec2_new,
};
static const struct nvkm_device_chip
nv162_chipset = {
.name = "TU102",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2466,13 +2491,16 @@ nv162_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2501,13 +2529,17 @@ nv164_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvdec[1] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv166_chipset = {
.name = "TU106",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2536,7 +2568,11 @@ nv166_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvdec[1] = gm107_nvdec_new,
+ .nvdec[2] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2571,7 +2607,8 @@ nv167_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2606,7 +2643,8 @@ nv168_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2638,6 +2676,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
switch (index) {
#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
+ _(ACR , device->acr , &device->acr->subdev);
_(BAR , device->bar , &device->bar->subdev);
_(VBIOS , device->bios , &device->bios->subdev);
_(BUS , device->bus , &device->bus->subdev);
@@ -2658,7 +2697,6 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
_(MXM , device->mxm , device->mxm);
_(PCI , device->pci , &device->pci->subdev);
_(PMU , device->pmu , &device->pmu->subdev);
- _(SECBOOT , device->secboot , &device->secboot->subdev);
_(THERM , device->therm , &device->therm->subdev);
_(TIMER , device->timer , &device->timer->subdev);
_(TOP , device->top , &device->top->subdev);
@@ -2703,9 +2741,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(MSPDEC , device->mspdec , device->mspdec);
_(MSPPP , device->msppp , device->msppp);
_(MSVLD , device->msvld , device->msvld);
- _(NVENC0 , device->nvenc[0], device->nvenc[0]);
- _(NVENC1 , device->nvenc[1], device->nvenc[1]);
- _(NVENC2 , device->nvenc[2], device->nvenc[2]);
+ _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine);
+ _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine);
+ _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine);
_(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
_(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
_(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
@@ -3144,6 +3182,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
} \
break
switch (i) {
+ _(NVKM_SUBDEV_ACR , acr);
_(NVKM_SUBDEV_BAR , bar);
_(NVKM_SUBDEV_VBIOS , bios);
_(NVKM_SUBDEV_BUS , bus);
@@ -3164,7 +3203,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_SUBDEV_MXM , mxm);
_(NVKM_SUBDEV_PCI , pci);
_(NVKM_SUBDEV_PMU , pmu);
- _(NVKM_SUBDEV_SECBOOT , secboot);
_(NVKM_SUBDEV_THERM , therm);
_(NVKM_SUBDEV_TIMER , timer);
_(NVKM_SUBDEV_TOP , top);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index d8be2f77ac66..54eab5e04230 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -3,6 +3,7 @@
#define __NVKM_DEVICE_PRIV_H__
#include <core/device.h>
+#include <subdev/acr.h>
#include <subdev/bar.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
@@ -27,7 +28,6 @@
#include <subdev/timer.h>
#include <subdev/top.h>
#include <subdev/volt.h>
-#include <subdev/secboot.h>
#include <engine/bsp.h>
#include <engine/ce.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 0e372a190d3f..d0d52c1d4aee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -52,18 +52,18 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
clk_set_rate(tdev->clk_pwr, 204000000);
udelay(10);
- reset_control_assert(tdev->rst);
- udelay(10);
-
if (!tdev->pdev->dev.pm_domain) {
+ reset_control_assert(tdev->rst);
+ udelay(10);
+
ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
if (ret)
goto err_clamp;
udelay(10);
- }
- reset_control_deassert(tdev->rst);
- udelay(10);
+ reset_control_deassert(tdev->rst);
+ udelay(10);
+ }
return 0;
@@ -279,6 +279,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct nvkm_device **pdevice)
{
struct nvkm_device_tegra *tdev;
+ unsigned long rate;
int ret;
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
@@ -307,6 +308,17 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
goto free;
}
+ rate = clk_get_rate(tdev->clk);
+ if (rate == 0) {
+ ret = clk_set_rate(tdev->clk, ULONG_MAX);
+ if (ret < 0)
+ goto free;
+
+ rate = clk_get_rate(tdev->clk);
+
+ dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate);
+ }
+
if (func->require_ref_clk)
tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(tdev->clk_ref)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index bcf32d92ee5a..50e3539f33d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -74,6 +74,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
if (debug > subdev->debug)
return;
+ if (!mthd)
+ return;
for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
u32 base = chan->head * mthd->addr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 818d21bd28d3..3800aeb507d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,7 +365,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
* and it's better to have a failed modeset than that.
*/
for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
+ if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) {
/* Try to respect sink limits too when selecting
* lowest link configuration.
*/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index 892be6c9b76c..3aa2cc3af1e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -101,15 +101,26 @@ gv100_disp_exception(struct nv50_disp *disp, int chid)
u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
u32 type = (stat & 0x00007000) >> 12;
u32 mthd = (stat & 0x00000fff) << 2;
- u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
- u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
const struct nvkm_enum *reason =
nvkm_enum_find(nv50_disp_intr_error_type, type);
- nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
- "data %08x code %08x\n",
- chid, stat, type, reason ? reason->name : "",
- mthd, data, code);
+ /*TODO: Suspect 33->41 are for WRBK channel exceptions, but we
+ * don't support those currently.
+ *
+ * CORE+WIN CHIDs map directly to the FE_EXCEPT() slots.
+ */
+ if (chid <= 32) {
+ u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
+ u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
+ nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
+ "mthd %04x data %08x code %08x\n",
+ chid, stat, type, reason ? reason->name : "",
+ mthd, data, code);
+ } else {
+ nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
+ "mthd %04x\n",
+ chid, stat, type, reason ? reason->name : "", mthd);
+ }
if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
switch (mthd) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 73724a8cb861..558c86fd8e82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -36,8 +36,10 @@ nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/gp102.o
nvkm-y += nvkm/engine/gr/gp104.o
nvkm-y += nvkm/engine/gr/gp107.o
+nvkm-y += nvkm/engine/gr/gp108.o
nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/gv100.o
+nvkm-y += nvkm/engine/gr/tu102.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -60,3 +62,4 @@ nvkm-y += nvkm/engine/gr/ctxgp102.o
nvkm-y += nvkm/engine/gr/ctxgp104.o
nvkm-y += nvkm/engine/gr/ctxgp107.o
nvkm-y += nvkm/engine/gr/ctxgv100.o
+nvkm-y += nvkm/engine/gr/ctxtu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 85f2d1e950e8..297915719bf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1324,10 +1324,8 @@ gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
void
gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
{
- struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *func = gr->func->grctx;
- int gpc, sm, i, j;
- u32 data;
+ int sm;
for (sm = 0; sm < gr->sm_nr; sm++) {
func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm);
@@ -1335,12 +1333,9 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
func->tpc_nr(gr, gr->sm[sm].gpc);
}
- for (gpc = 0, i = 0; i < 4; i++) {
- for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
- data |= gr->tpc_nr[gpc] << (j * 4);
- nvkm_wr32(device, 0x406028 + (i * 4), data);
- nvkm_wr32(device, 0x405870 + (i * 4), data);
- }
+ gf100_gr_init_num_tpc_per_gpc(gr, false, true);
+ if (!func->skip_pd_num_tpc_per_gpc)
+ gf100_gr_init_num_tpc_per_gpc(gr, true, false);
if (func->r4060a8)
func->r4060a8(gr);
@@ -1374,7 +1369,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_mc_unk260(device, 0);
- if (!gr->fuc_sw_ctx) {
+ if (!gr->sw_ctx) {
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc_0);
gf100_gr_mmio(gr, grctx->zcull);
@@ -1382,7 +1377,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
} else {
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
}
gf100_gr_wait_idle(gr);
@@ -1401,8 +1396,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_wait_idle(gr);
if (grctx->r400088) grctx->r400088(gr, false);
- if (gr->fuc_bundle)
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ if (gr->bundle)
+ gf100_gr_icmd(gr, gr->bundle);
else
gf100_gr_icmd(gr, grctx->icmd);
if (grctx->sw_veid_bundle_init)
@@ -1411,8 +1406,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
- if (gr->fuc_method)
- gf100_gr_mthd(gr, gr->fuc_method);
+ if (gr->method)
+ gf100_gr_mthd(gr, gr->method);
else
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc_unk260(device, 1);
@@ -1431,6 +1426,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
grctx->r419a3c(gr);
if (grctx->r408840)
grctx->r408840(gr);
+ if (grctx->r419c0c)
+ grctx->r419c0c(gr);
}
#define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 478b4723d0f9..32bbddc0993e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -57,6 +57,7 @@ struct gf100_grctx_func {
/* floorsweeping */
void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm);
void (*tpc_nr)(struct gf100_gr *, int gpc);
+ bool skip_pd_num_tpc_per_gpc;
void (*r4060a8)(struct gf100_gr *);
void (*rop_mapping)(struct gf100_gr *);
void (*alpha_beta_tables)(struct gf100_gr *);
@@ -76,6 +77,7 @@ struct gf100_grctx_func {
void (*r418e94)(struct gf100_gr *);
void (*r419a3c)(struct gf100_gr *);
void (*r408840)(struct gf100_gr *);
+ void (*r419c0c)(struct gf100_gr *);
};
extern const struct gf100_grctx_func gf100_grctx;
@@ -153,6 +155,14 @@ extern const struct gf100_grctx_func gp107_grctx;
extern const struct gf100_grctx_func gv100_grctx;
+extern const struct gf100_grctx_func tu102_grctx;
+void gv100_grctx_unkn88c(struct gf100_gr *, bool);
+void gv100_grctx_generate_unkn(struct gf100_gr *);
+extern const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[];
+void gv100_grctx_generate_attrib(struct gf100_grctx *);
+void gv100_grctx_generate_rop_mapping(struct gf100_gr *);
+void gv100_grctx_generate_r400088(struct gf100_gr *, bool);
+
/* context init value lists */
extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 896d473dcc0f..c0d36bc601f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -32,7 +32,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i;
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
gf100_gr_wait_idle(gr);
@@ -56,10 +56,10 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_wait_idle(gr);
- gf100_gr_mthd(gr, gr->fuc_method);
+ gf100_gr_mthd(gr, gr->method);
gf100_gr_wait_idle(gr);
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ gf100_gr_icmd(gr, gr->bundle);
grctx->pagepool(info);
grctx->bundle(info);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index a1d9e114ebeb..6b92f8aa18a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -29,7 +29,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i, tmp;
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
gf100_gr_wait_idle(gr);
@@ -59,10 +59,10 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_wait_idle(gr);
- gf100_gr_mthd(gr, gr->fuc_method);
+ gf100_gr_mthd(gr, gr->method);
gf100_gr_wait_idle(gr);
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ gf100_gr_icmd(gr, gr->bundle);
grctx->pagepool(info);
grctx->bundle(info);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
index 0990765ef191..39553d55d3f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
@@ -25,7 +25,7 @@
* PGRAPH context implementation
******************************************************************************/
-static const struct gf100_gr_init
+const struct gf100_gr_init
gv100_grctx_init_sw_veid_bundle_init_0[] = {
{ 0x00001000, 64, 0x00100000, 0x00000008 },
{ 0x00000941, 64, 0x00100000, 0x00000000 },
@@ -58,7 +58,7 @@ gv100_grctx_pack_sw_veid_bundle_init[] = {
{}
};
-static void
+void
gv100_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
@@ -67,14 +67,14 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
const u32 attrib = grctx->attrib_nr;
const u32 gfxp = grctx->gfxp_nr;
const int s = 12;
- const int max_batches = 0xffff;
u32 size = grctx->alpha_nr_max * gr->tpc_total;
u32 ao = 0;
u32 bo = ao + size;
int gpc, ppc, b, n = 0;
- size += grctx->gfxp_nr * gr->tpc_total;
- size = ((size * 0x20) + 128) & ~127;
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+ size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max;
+ size = ((size * 0x20) + 127) & ~127;
b = mmio_vram(info, size, (1 << s), false);
mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -84,13 +84,12 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7);
mmio_wr32(info, 0x405830, attrib);
mmio_wr32(info, 0x40585c, alpha);
- mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
- const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
- const u32 gs = gfxp * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * gr->ppc_tpc_max;
+ const u32 gs = gfxp * gr->ppc_tpc_max;
const u32 u = 0x418ea0 + (n * 0x04);
const u32 o = PPC_UNIT(gpc, ppc, 0);
if (!(gr->ppc_mask[gpc] & (1 << ppc)))
@@ -110,7 +109,7 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x41befc, 0x00000100);
}
-static void
+void
gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -147,7 +146,7 @@ gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
gr->screen_tile_row_offset);
}
-static void
+void
gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -163,7 +162,7 @@ gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
}
-static void
+void
gv100_grctx_generate_unkn(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -174,7 +173,7 @@ gv100_grctx_generate_unkn(struct gf100_gr *gr)
nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
}
-static void
+void
gv100_grctx_unkn88c(struct gf100_gr *gr, bool on)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
new file mode 100644
index 000000000000..2299ca07d04a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+static void
+tu102_grctx_generate_r419c0c(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_mask(device, 0x419c0c, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x40584c, 0x00000008, 0x00000000);
+ nvkm_mask(device, 0x400080, 0x00000000, 0x00000000);
+}
+
+static void
+tu102_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
+}
+
+static const struct gf100_gr_init
+tu102_grctx_init_unknown_bundle_init_0[] = {
+ { 0x00001000, 1, 0x00000001, 0x00000004 },
+ { 0x00002020, 64, 0x00000001, 0x00000000 },
+ { 0x0001e100, 1, 0x00000001, 0x00000001 },
+ {}
+};
+
+static const struct gf100_gr_pack
+tu102_grctx_pack_sw_veid_bundle_init[] = {
+ { gv100_grctx_init_sw_veid_bundle_init_0 },
+ { tu102_grctx_init_unknown_bundle_init_0 },
+ {}
+};
+
+static void
+tu102_grctx_generate_attrib(struct gf100_grctx *info)
+{
+ const u64 size = 0x80000; /*XXX: educated guess */
+ const int s = 8;
+ const int b = mmio_vram(info, size, (1 << s), true);
+
+ gv100_grctx_generate_attrib(info);
+
+ mmio_refn(info, 0x408070, 0x00000000, s, b);
+ mmio_wr32(info, 0x408074, size >> s); /*XXX: guess */
+ mmio_refn(info, 0x419034, 0x00000000, s, b);
+ mmio_wr32(info, 0x408078, 0x00000000);
+}
+
+const struct gf100_grctx_func
+tu102_grctx = {
+ .unkn88c = gv100_grctx_unkn88c,
+ .main = gf100_grctx_generate_main,
+ .unkn = gv100_grctx_generate_unkn,
+ .sw_veid_bundle_init = tu102_grctx_pack_sw_veid_bundle_init,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0xa80,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = tu102_grctx_generate_attrib,
+ .attrib_nr_max = 0x800,
+ .attrib_nr = 0x700,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+ .gfxp_nr = 0xfa8,
+ .sm_id = tu102_grctx_generate_sm_id,
+ .skip_pd_num_tpc_per_gpc = true,
+ .rop_mapping = gv100_grctx_generate_rop_mapping,
+ .r406500 = gm200_grctx_generate_r406500,
+ .r400088 = gv100_grctx_generate_r400088,
+ .r419c0c = tu102_grctx_generate_r419c0c,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index c24f35ad56a6..ae2d5b6891cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -441,7 +441,7 @@ static uint32_t gk208_grhub_code[] = {
0x020014fe,
0x12004002,
0xbd0002f6,
- 0x05c94104,
+ 0x05ca4104,
0xbd0010fe,
0x07004024,
0xbd0002f6,
@@ -460,423 +460,423 @@ static uint32_t gk208_grhub_code[] = {
0x01039204,
0x03090080,
0xbd0003f6,
- 0x87044204,
- 0xf6040040,
- 0x04bd0002,
- 0x00400402,
- 0x0002f603,
- 0x31f404bd,
- 0x96048e10,
- 0x00657e40,
- 0xc7feb200,
- 0x01b590f1,
- 0x1ff4f003,
- 0x01020fb5,
- 0x041fbb01,
- 0x800112b6,
- 0xf6010300,
- 0x04bd0001,
- 0x01040080,
+ 0x87048204,
+ 0x04004000,
+ 0xbd0002f6,
+ 0x40040204,
+ 0x02f60300,
+ 0xf404bd00,
+ 0x048e1031,
+ 0x657e4096,
+ 0xfeb20000,
+ 0xb590f1c7,
+ 0xf4f00301,
+ 0x020fb51f,
+ 0x1fbb0101,
+ 0x0112b604,
+ 0x01030080,
0xbd0001f6,
- 0x01004104,
- 0xac7e020f,
- 0xbb7e0006,
- 0x100f0006,
- 0x0006fd7e,
- 0x98000e98,
- 0x207e010f,
- 0x14950001,
- 0xc0008008,
- 0x0004f601,
- 0x008004bd,
- 0x04f601c1,
- 0xb704bd00,
- 0xbb130030,
- 0xf5b6001f,
- 0xd3008002,
- 0x000ff601,
- 0x15b604bd,
- 0x0110b608,
- 0xb20814b6,
- 0x02687e1f,
- 0x001fbb00,
- 0x84020398,
-/* 0x041f: init_gpc */
- 0xb8502000,
- 0x0008044e,
- 0x8f7e1fb2,
+ 0x04008004,
+ 0x0001f601,
+ 0x004104bd,
+ 0x7e020f01,
+ 0x7e0006ad,
+ 0x0f0006bc,
+ 0x06fe7e10,
+ 0x000e9800,
+ 0x7e010f98,
+ 0x95000120,
+ 0x00800814,
+ 0x04f601c0,
+ 0x8004bd00,
+ 0xf601c100,
+ 0x04bd0004,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x008002f5,
+ 0x0ff601d3,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0x687e1fb2,
+ 0x1fbb0002,
+ 0x02039800,
+ 0x50200084,
+/* 0x0420: init_gpc */
+ 0x08044eb8,
+ 0x7e1fb200,
+ 0xb800008f,
+ 0x00010c4e,
+ 0x8f7ef4bd,
0x4eb80000,
- 0xbd00010c,
- 0x008f7ef4,
- 0x044eb800,
- 0x8f7e0001,
+ 0x7e000104,
+ 0xb800008f,
+ 0x0001004e,
+ 0x8f7e020f,
0x4eb80000,
- 0x0f000100,
- 0x008f7e02,
- 0x004eb800,
-/* 0x044e: init_gpc_wait */
+/* 0x044f: init_gpc_wait */
+ 0x7e000800,
+ 0xc8000065,
+ 0x0bf41fff,
+ 0x044eb8f9,
0x657e0008,
- 0xffc80000,
- 0xf90bf41f,
- 0x08044eb8,
- 0x00657e00,
- 0x001fbb00,
- 0x800040b7,
- 0xf40132b6,
- 0x000fb41b,
- 0x0006fd7e,
- 0xac7e000f,
- 0x00800006,
- 0x01f60201,
- 0xbd04bd00,
- 0x1f19f014,
- 0x02300080,
- 0xbd0001f6,
-/* 0x0491: wait */
- 0x0028f404,
-/* 0x0497: main */
- 0x0d0031f4,
- 0x00377e10,
- 0xf401f400,
- 0x4001e4b1,
- 0x00c71bf5,
- 0x99f094bd,
- 0x37008004,
- 0x0009f602,
- 0x008104bd,
- 0x11cf02c0,
- 0xc1008200,
- 0x0022cf02,
- 0xf41f13c8,
- 0x23c8770b,
- 0x550bf41f,
- 0x12b220f9,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0231f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x20fc04bd,
- 0x99f094bd,
- 0x37008006,
- 0x0009f602,
- 0x31f404bd,
- 0x08807e01,
+ 0x1fbb0000,
+ 0x0040b700,
+ 0x0132b680,
+ 0x0fb41bf4,
+ 0x06fe7e00,
+ 0x7e000f00,
+ 0x800006ad,
+ 0xf6020100,
+ 0x04bd0001,
+ 0x19f014bd,
+ 0x3000801f,
+ 0x0001f602,
+/* 0x0492: wait */
+ 0x28f404bd,
+ 0x0031f400,
+/* 0x0498: main */
+ 0x377e100d,
+ 0x01f40000,
+ 0x01e4b1f4,
+ 0xc71bf540,
0xf094bd00,
- 0x00800699,
+ 0x00800499,
+ 0x09f60237,
+ 0x8104bd00,
+ 0xcf02c000,
+ 0x00820011,
+ 0x22cf02c1,
+ 0x1f13c800,
+ 0xc8770bf4,
+ 0x0bf41f23,
+ 0xb220f955,
+ 0xf094bd12,
+ 0x00800799,
+ 0x09f60237,
+ 0xf404bd00,
+ 0x31f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
+ 0xfc04bd00,
+ 0xf094bd20,
+ 0x00800699,
+ 0x09f60237,
0xf404bd00,
-/* 0x0522: chsw_prev_no_next */
- 0x20f92f0e,
- 0x32f412b2,
- 0x0232f401,
- 0x0008807e,
- 0x008020fc,
- 0x02f602c0,
+ 0x817e0131,
+ 0x94bd0008,
+ 0x800699f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x0523: chsw_prev_no_next */
+ 0xf92f0ef4,
+ 0xf412b220,
+ 0x32f40132,
+ 0x08817e02,
+ 0x8020fc00,
+ 0xf602c000,
+ 0x04bd0002,
+/* 0x053f: chsw_no_prev */
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0x7e0232f4,
+/* 0x054f: chsw_done */
+ 0x02000881,
+ 0xc3008001,
+ 0x0002f602,
+ 0x94bd04bd,
+ 0x800499f0,
+ 0xf6021700,
+ 0x04bd0009,
+ 0xff300ef5,
+/* 0x056c: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b20c1b,
+ 0x0008217e,
+/* 0x057b: main_not_ctx_chan */
+ 0xb0400ef4,
+ 0x1bf402e4,
+ 0xf094bd2c,
+ 0x00800799,
+ 0x09f60237,
0xf404bd00,
-/* 0x053e: chsw_no_prev */
- 0x23c8130e,
- 0x0d0bf41f,
- 0xf40131f4,
- 0x807e0232,
-/* 0x054e: chsw_done */
- 0x01020008,
- 0x02c30080,
- 0xbd0002f6,
- 0xf094bd04,
- 0x00800499,
+ 0x32f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
- 0xf504bd00,
-/* 0x056b: main_not_ctx_switch */
- 0xb0ff300e,
- 0x1bf401e4,
- 0x7ef2b20c,
- 0xf4000820,
-/* 0x057a: main_not_ctx_chan */
- 0xe4b0400e,
- 0x2c1bf402,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0232f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x0ef404bd,
-/* 0x05a9: main_not_ctx_save */
- 0x10ef9411,
- 0x7e01f5f0,
- 0xf50002f8,
-/* 0x05b7: main_done */
- 0xbdfee40e,
- 0x1f29f024,
- 0x02300080,
- 0xbd0002f6,
- 0xd20ef504,
-/* 0x05c9: ih */
- 0xf900f9fe,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0xcf02004a,
- 0xabc400aa,
- 0x230bf404,
- 0x004e100d,
- 0x00eecf1a,
- 0xcf19004f,
- 0x047e00ff,
- 0xb0b70000,
- 0x010e0400,
- 0xf61d0040,
- 0x04bd000e,
-/* 0x060c: ih_no_fifo */
- 0x0100abe4,
- 0x0d0c0bf4,
- 0x40014e10,
- 0x0000047e,
-/* 0x061c: ih_no_ctxsw */
- 0x0400abe4,
- 0x8e560bf4,
- 0x7e400708,
+ 0xf404bd00,
+/* 0x05aa: main_not_ctx_save */
+ 0xef94110e,
+ 0x01f5f010,
+ 0x0002f87e,
+ 0xfee40ef5,
+/* 0x05b8: main_done */
+ 0x29f024bd,
+ 0x3000801f,
+ 0x0002f602,
+ 0x0ef504bd,
+/* 0x05ca: ih */
+ 0x00f9fed2,
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x02004a04,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x4e100d23,
+ 0xeecf1a00,
+ 0x19004f00,
+ 0x7e00ffcf,
+ 0xb7000004,
+ 0x0e0400b0,
+ 0x1d004001,
+ 0xbd000ef6,
+/* 0x060d: ih_no_fifo */
+ 0x00abe404,
+ 0x0c0bf401,
+ 0x014e100d,
+ 0x00047e40,
+/* 0x061d: ih_no_ctxsw */
+ 0x00abe400,
+ 0x560bf404,
+ 0x4007088e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60204,
+ 0x8e04bd00,
+ 0x7e400704,
0xb2000065,
- 0x040080ff,
+ 0x030080ff,
0x000ff602,
- 0x048e04bd,
- 0x657e4007,
- 0xffb20000,
- 0x02030080,
- 0xbd000ff6,
- 0x50fec704,
- 0x8f02ee94,
- 0xbb400700,
- 0x657e00ef,
- 0x00800000,
- 0x0ff60202,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x4007008f,
+ 0x7e00efbb,
+ 0x80000065,
+ 0xf6020200,
+ 0x04bd000f,
+ 0xf87e030f,
+ 0x004b0002,
+ 0x8ebfb201,
+ 0x7e400144,
+/* 0x0677: ih_no_fwmthd */
+ 0x4b00008f,
+ 0xb0bd0504,
+ 0xf4b4abff,
+ 0x00800c0b,
+ 0x0bf60307,
+/* 0x068b: ih_no_other */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x06ad: ctx_4170s */
+ 0x10f5f001,
+ 0x708effb2,
+ 0x8f7e4041,
+ 0x00f80000,
+/* 0x06bc: ctx_4170w */
+ 0x4041708e,
+ 0x0000657e,
+ 0xf4f0ffb2,
+ 0xf31bf410,
+/* 0x06ce: ctx_redswitch */
+ 0x004e00f8,
+ 0x40e5f002,
+ 0xf020e5f0,
+ 0x008010e5,
+ 0x0ef60185,
0x0f04bd00,
- 0x02f87e03,
- 0x01004b00,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x0676: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd05,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x068a: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0xf400fc80,
- 0x01f80032,
-/* 0x06ac: ctx_4170s */
- 0xb210f5f0,
- 0x41708eff,
+/* 0x06e5: ctx_redswitch_delay */
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xf10400e5,
+ 0x800100e5,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x06fe: ctx_86c */
+ 0x008000f8,
+ 0x0ff60223,
+ 0xb204bd00,
+ 0x8a148eff,
0x008f7e40,
-/* 0x06bb: ctx_4170w */
- 0x8e00f800,
- 0x7e404170,
- 0xb2000065,
- 0x10f4f0ff,
- 0xf8f31bf4,
-/* 0x06cd: ctx_redswitch */
- 0x02004e00,
- 0xf040e5f0,
- 0xe5f020e5,
- 0x85008010,
- 0x000ef601,
- 0x080f04bd,
-/* 0x06e4: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xe5f1fd1b,
- 0xe5f10400,
- 0x00800100,
- 0x0ef60185,
- 0xf804bd00,
-/* 0x06fd: ctx_86c */
- 0x23008000,
+ 0x8effb200,
+ 0x7e41a88c,
+ 0xf800008f,
+/* 0x071d: ctx_mem */
+ 0x84008000,
0x000ff602,
- 0xffb204bd,
- 0x408a148e,
- 0x00008f7e,
- 0x8c8effb2,
- 0x8f7e41a8,
- 0x00f80000,
-/* 0x071c: ctx_mem */
- 0x02840080,
- 0xbd000ff6,
-/* 0x0725: ctx_mem_wait */
- 0x84008f04,
- 0x00ffcf02,
- 0xf405fffd,
- 0x00f8f61b,
-/* 0x0734: ctx_load */
- 0x99f094bd,
- 0x37008005,
- 0x0009f602,
- 0x0c0a04bd,
- 0x0000b87e,
- 0x0080f4bd,
- 0x0ff60289,
- 0x8004bd00,
- 0xf602c100,
- 0x04bd0002,
- 0x02830080,
+/* 0x0726: ctx_mem_wait */
+ 0x008f04bd,
+ 0xffcf0284,
+ 0x05fffd00,
+ 0xf8f61bf4,
+/* 0x0735: ctx_load */
+ 0xf094bd00,
+ 0x00800599,
+ 0x09f60237,
+ 0x0a04bd00,
+ 0x00b87e0c,
+ 0x80f4bd00,
+ 0xf6028900,
+ 0x04bd000f,
+ 0x02c10080,
0xbd0002f6,
- 0x7e070f04,
- 0x8000071c,
- 0xf602c000,
- 0x04bd0002,
- 0xf0000bfe,
- 0x24b61f2a,
- 0x0220b604,
- 0x99f094bd,
- 0x37008008,
- 0x0009f602,
- 0x008004bd,
- 0x02f60281,
- 0xd204bd00,
- 0x80000000,
- 0x800225f0,
- 0xf6028800,
- 0x04bd0002,
- 0x00421001,
- 0x0223f002,
- 0xf80512fa,
- 0xf094bd03,
+ 0x83008004,
+ 0x0002f602,
+ 0x070f04bd,
+ 0x00071d7e,
+ 0x02c00080,
+ 0xbd0002f6,
+ 0x000bfe04,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0xf094bd02,
0x00800899,
- 0x09f60217,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01b50512,
- 0xf094bd16,
- 0x00800999,
0x09f60237,
0x8004bd00,
0xf6028100,
- 0x04bd0001,
- 0x00800102,
- 0x02f60288,
- 0x4104bd00,
- 0x13f00100,
- 0x0501fa06,
+ 0x04bd0002,
+ 0x000000d2,
+ 0x0225f080,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x42100104,
+ 0x23f00200,
+ 0x0512fa02,
0x94bd03f8,
- 0x800999f0,
+ 0x800899f0,
0xf6021700,
0x04bd0009,
- 0x99f094bd,
- 0x17008005,
- 0x0009f602,
- 0x00f804bd,
-/* 0x0820: ctx_chan */
- 0x0007347e,
- 0xb87e0c0a,
- 0x050f0000,
- 0x00071c7e,
-/* 0x0832: ctx_mmio_exec */
- 0x039800f8,
- 0x81008041,
- 0x0003f602,
- 0x34bd04bd,
-/* 0x0840: ctx_mmio_loop */
- 0xf4ff34c4,
- 0x00450e1b,
- 0x0653f002,
- 0xf80535fa,
-/* 0x0851: ctx_mmio_pull */
- 0x804e9803,
- 0x7e814f98,
- 0xb600008f,
- 0x12b60830,
- 0xdf1bf401,
-/* 0x0864: ctx_mmio_done */
- 0x80160398,
- 0xf6028100,
- 0x04bd0003,
- 0x414000b5,
- 0x13f00100,
- 0x0601fa06,
- 0x00f803f8,
-/* 0x0880: ctx_xfer */
- 0x0080040e,
- 0x0ef60302,
-/* 0x088b: ctx_xfer_idle */
- 0x8e04bd00,
- 0xcf030000,
- 0xe4f100ee,
- 0x1bf42000,
- 0x0611f4f5,
-/* 0x089f: ctx_xfer_pre */
- 0x0f0c02f4,
- 0x06fd7e10,
- 0x1b11f400,
-/* 0x08a8: ctx_xfer_pre_load */
- 0xac7e020f,
- 0xbb7e0006,
- 0xcd7e0006,
- 0xf4bd0006,
- 0x0006ac7e,
- 0x0007347e,
-/* 0x08c0: ctx_xfer_exec */
- 0xbd160198,
- 0x05008024,
- 0x0002f601,
- 0x1fb204bd,
- 0x41a5008e,
- 0x00008f7e,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0x048effb2,
- 0x8f7e41a5,
- 0x167e0000,
- 0x24bd0002,
- 0x0247fc80,
- 0xbd0002f6,
- 0x012cf004,
- 0x800320b6,
- 0xf6024afc,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0xb50512fd,
+ 0x94bd1601,
+ 0x800999f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0001f6,
+ 0x80010204,
+ 0xf6028800,
0x04bd0002,
- 0xf001acf0,
- 0x000b06a5,
- 0x98000c98,
- 0x000e010d,
- 0x00013d7e,
- 0xec7e080a,
- 0x0a7e0000,
- 0x01f40002,
- 0x7e0c0a12,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xbd03f805,
+ 0x0999f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0xf094bd04,
+ 0x00800599,
+ 0x09f60217,
+ 0xf804bd00,
+/* 0x0821: ctx_chan */
+ 0x07357e00,
+ 0x7e0c0a00,
0x0f0000b8,
- 0x071c7e05,
- 0x2d02f400,
-/* 0x093c: ctx_xfer_post */
- 0xac7e020f,
- 0xf4bd0006,
- 0x0006fd7e,
- 0x0002277e,
- 0x0006bb7e,
- 0xac7ef4bd,
+ 0x071d7e05,
+/* 0x0833: ctx_mmio_exec */
+ 0x9800f800,
+ 0x00804103,
+ 0x03f60281,
+ 0xbd04bd00,
+/* 0x0841: ctx_mmio_loop */
+ 0xff34c434,
+ 0x450e1bf4,
+ 0x53f00200,
+ 0x0535fa06,
+/* 0x0852: ctx_mmio_pull */
+ 0x4e9803f8,
+ 0x814f9880,
+ 0x00008f7e,
+ 0xb60830b6,
+ 0x1bf40112,
+/* 0x0865: ctx_mmio_done */
+ 0x160398df,
+ 0x02810080,
+ 0xbd0003f6,
+ 0x4000b504,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xf803f806,
+/* 0x0881: ctx_xfer */
+ 0x80040e00,
+ 0xf6030200,
+ 0x04bd000e,
+/* 0x088c: ctx_xfer_idle */
+ 0x0300008e,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f51b,
+ 0x0c02f406,
+/* 0x08a0: ctx_xfer_pre */
+ 0xfe7e100f,
0x11f40006,
- 0x40019810,
- 0xf40511fd,
- 0x327e070b,
-/* 0x0966: ctx_xfer_no_post_mmio */
-/* 0x0966: ctx_xfer_done */
- 0x00f80008,
+/* 0x08a9: ctx_xfer_pre_load */
+ 0x7e020f1b,
+ 0x7e0006ad,
+ 0x7e0006bc,
+ 0xbd0006ce,
+ 0x06ad7ef4,
+ 0x07357e00,
+/* 0x08c1: ctx_xfer_exec */
+ 0x16019800,
+ 0x008024bd,
+ 0x02f60105,
+ 0xb204bd00,
+ 0xa5008e1f,
+ 0x008f7e41,
+ 0x01fcf000,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x8effb205,
+ 0x7e41a504,
+ 0x7e00008f,
+ 0xbd000216,
+ 0x47fc8024,
+ 0x0002f602,
+ 0x2cf004bd,
+ 0x0320b601,
+ 0x024afc80,
+ 0xbd0002f6,
+ 0x01acf004,
+ 0x0b06a5f0,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x7e080a00,
+ 0x7e0000ec,
+ 0xf400020a,
+ 0x0c0a1201,
+ 0x0000b87e,
+ 0x1d7e050f,
+ 0x02f40007,
+/* 0x093d: ctx_xfer_post */
+ 0x7e020f2d,
+ 0xbd0006ad,
+ 0x06fe7ef4,
+ 0x02277e00,
+ 0x06bc7e00,
+ 0x7ef4bd00,
+ 0xf40006ad,
+ 0x01981011,
+ 0x0511fd40,
+ 0x7e070bf4,
+/* 0x0967: ctx_xfer_no_post_mmio */
+/* 0x0967: ctx_xfer_done */
+ 0xf8000833,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 649a442b4390..449dae753203 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -441,7 +441,7 @@ static uint32_t gm107_grhub_code[] = {
0x020014fe,
0x12004002,
0xbd0002f6,
- 0x05c94104,
+ 0x05ca4104,
0xbd0010fe,
0x07004024,
0xbd0002f6,
@@ -460,423 +460,423 @@ static uint32_t gm107_grhub_code[] = {
0x01039204,
0x03090080,
0xbd0003f6,
- 0x87044204,
- 0xf6040040,
- 0x04bd0002,
- 0x00400402,
- 0x0002f603,
- 0x31f404bd,
- 0x96048e10,
- 0x00657e40,
- 0xc7feb200,
- 0x01b590f1,
- 0x1ff4f003,
- 0x01020fb5,
- 0x041fbb01,
- 0x800112b6,
- 0xf6010300,
- 0x04bd0001,
- 0x01040080,
+ 0x87048204,
+ 0x04004000,
+ 0xbd0002f6,
+ 0x40040204,
+ 0x02f60300,
+ 0xf404bd00,
+ 0x048e1031,
+ 0x657e4096,
+ 0xfeb20000,
+ 0xb590f1c7,
+ 0xf4f00301,
+ 0x020fb51f,
+ 0x1fbb0101,
+ 0x0112b604,
+ 0x01030080,
0xbd0001f6,
- 0x01004104,
- 0xac7e020f,
- 0xbb7e0006,
- 0x100f0006,
- 0x0006fd7e,
- 0x98000e98,
- 0x207e010f,
- 0x14950001,
- 0xc0008008,
- 0x0004f601,
- 0x008004bd,
- 0x04f601c1,
- 0xb704bd00,
- 0xbb130030,
- 0xf5b6001f,
- 0xd3008002,
- 0x000ff601,
- 0x15b604bd,
- 0x0110b608,
- 0xb20814b6,
- 0x02687e1f,
- 0x001fbb00,
- 0x84020398,
-/* 0x041f: init_gpc */
- 0xb8502000,
- 0x0008044e,
- 0x8f7e1fb2,
+ 0x04008004,
+ 0x0001f601,
+ 0x004104bd,
+ 0x7e020f01,
+ 0x7e0006ad,
+ 0x0f0006bc,
+ 0x06fe7e10,
+ 0x000e9800,
+ 0x7e010f98,
+ 0x95000120,
+ 0x00800814,
+ 0x04f601c0,
+ 0x8004bd00,
+ 0xf601c100,
+ 0x04bd0004,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x008002f5,
+ 0x0ff601d3,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0x687e1fb2,
+ 0x1fbb0002,
+ 0x02039800,
+ 0x50200084,
+/* 0x0420: init_gpc */
+ 0x08044eb8,
+ 0x7e1fb200,
+ 0xb800008f,
+ 0x00010c4e,
+ 0x8f7ef4bd,
0x4eb80000,
- 0xbd00010c,
- 0x008f7ef4,
- 0x044eb800,
- 0x8f7e0001,
+ 0x7e000104,
+ 0xb800008f,
+ 0x0001004e,
+ 0x8f7e020f,
0x4eb80000,
- 0x0f000100,
- 0x008f7e02,
- 0x004eb800,
-/* 0x044e: init_gpc_wait */
+/* 0x044f: init_gpc_wait */
+ 0x7e000800,
+ 0xc8000065,
+ 0x0bf41fff,
+ 0x044eb8f9,
0x657e0008,
- 0xffc80000,
- 0xf90bf41f,
- 0x08044eb8,
- 0x00657e00,
- 0x001fbb00,
- 0x800040b7,
- 0xf40132b6,
- 0x000fb41b,
- 0x0006fd7e,
- 0xac7e000f,
- 0x00800006,
- 0x01f60201,
- 0xbd04bd00,
- 0x1f19f014,
- 0x02300080,
- 0xbd0001f6,
-/* 0x0491: wait */
- 0x0028f404,
-/* 0x0497: main */
- 0x0d0031f4,
- 0x00377e10,
- 0xf401f400,
- 0x4001e4b1,
- 0x00c71bf5,
- 0x99f094bd,
- 0x37008004,
- 0x0009f602,
- 0x008104bd,
- 0x11cf02c0,
- 0xc1008200,
- 0x0022cf02,
- 0xf41f13c8,
- 0x23c8770b,
- 0x550bf41f,
- 0x12b220f9,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0231f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x20fc04bd,
- 0x99f094bd,
- 0x37008006,
- 0x0009f602,
- 0x31f404bd,
- 0x08807e01,
+ 0x1fbb0000,
+ 0x0040b700,
+ 0x0132b680,
+ 0x0fb41bf4,
+ 0x06fe7e00,
+ 0x7e000f00,
+ 0x800006ad,
+ 0xf6020100,
+ 0x04bd0001,
+ 0x19f014bd,
+ 0x3000801f,
+ 0x0001f602,
+/* 0x0492: wait */
+ 0x28f404bd,
+ 0x0031f400,
+/* 0x0498: main */
+ 0x377e100d,
+ 0x01f40000,
+ 0x01e4b1f4,
+ 0xc71bf540,
0xf094bd00,
- 0x00800699,
+ 0x00800499,
+ 0x09f60237,
+ 0x8104bd00,
+ 0xcf02c000,
+ 0x00820011,
+ 0x22cf02c1,
+ 0x1f13c800,
+ 0xc8770bf4,
+ 0x0bf41f23,
+ 0xb220f955,
+ 0xf094bd12,
+ 0x00800799,
+ 0x09f60237,
+ 0xf404bd00,
+ 0x31f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
+ 0xfc04bd00,
+ 0xf094bd20,
+ 0x00800699,
+ 0x09f60237,
0xf404bd00,
-/* 0x0522: chsw_prev_no_next */
- 0x20f92f0e,
- 0x32f412b2,
- 0x0232f401,
- 0x0008807e,
- 0x008020fc,
- 0x02f602c0,
+ 0x817e0131,
+ 0x94bd0008,
+ 0x800699f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x0523: chsw_prev_no_next */
+ 0xf92f0ef4,
+ 0xf412b220,
+ 0x32f40132,
+ 0x08817e02,
+ 0x8020fc00,
+ 0xf602c000,
+ 0x04bd0002,
+/* 0x053f: chsw_no_prev */
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0x7e0232f4,
+/* 0x054f: chsw_done */
+ 0x02000881,
+ 0xc3008001,
+ 0x0002f602,
+ 0x94bd04bd,
+ 0x800499f0,
+ 0xf6021700,
+ 0x04bd0009,
+ 0xff300ef5,
+/* 0x056c: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b20c1b,
+ 0x0008217e,
+/* 0x057b: main_not_ctx_chan */
+ 0xb0400ef4,
+ 0x1bf402e4,
+ 0xf094bd2c,
+ 0x00800799,
+ 0x09f60237,
0xf404bd00,
-/* 0x053e: chsw_no_prev */
- 0x23c8130e,
- 0x0d0bf41f,
- 0xf40131f4,
- 0x807e0232,
-/* 0x054e: chsw_done */
- 0x01020008,
- 0x02c30080,
- 0xbd0002f6,
- 0xf094bd04,
- 0x00800499,
+ 0x32f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
- 0xf504bd00,
-/* 0x056b: main_not_ctx_switch */
- 0xb0ff300e,
- 0x1bf401e4,
- 0x7ef2b20c,
- 0xf4000820,
-/* 0x057a: main_not_ctx_chan */
- 0xe4b0400e,
- 0x2c1bf402,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0232f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x0ef404bd,
-/* 0x05a9: main_not_ctx_save */
- 0x10ef9411,
- 0x7e01f5f0,
- 0xf50002f8,
-/* 0x05b7: main_done */
- 0xbdfee40e,
- 0x1f29f024,
- 0x02300080,
- 0xbd0002f6,
- 0xd20ef504,
-/* 0x05c9: ih */
- 0xf900f9fe,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0xcf02004a,
- 0xabc400aa,
- 0x230bf404,
- 0x004e100d,
- 0x00eecf1a,
- 0xcf19004f,
- 0x047e00ff,
- 0xb0b70000,
- 0x010e0400,
- 0xf61d0040,
- 0x04bd000e,
-/* 0x060c: ih_no_fifo */
- 0x0100abe4,
- 0x0d0c0bf4,
- 0x40014e10,
- 0x0000047e,
-/* 0x061c: ih_no_ctxsw */
- 0x0400abe4,
- 0x8e560bf4,
- 0x7e400708,
+ 0xf404bd00,
+/* 0x05aa: main_not_ctx_save */
+ 0xef94110e,
+ 0x01f5f010,
+ 0x0002f87e,
+ 0xfee40ef5,
+/* 0x05b8: main_done */
+ 0x29f024bd,
+ 0x3000801f,
+ 0x0002f602,
+ 0x0ef504bd,
+/* 0x05ca: ih */
+ 0x00f9fed2,
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x02004a04,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x4e100d23,
+ 0xeecf1a00,
+ 0x19004f00,
+ 0x7e00ffcf,
+ 0xb7000004,
+ 0x0e0400b0,
+ 0x1d004001,
+ 0xbd000ef6,
+/* 0x060d: ih_no_fifo */
+ 0x00abe404,
+ 0x0c0bf401,
+ 0x014e100d,
+ 0x00047e40,
+/* 0x061d: ih_no_ctxsw */
+ 0x00abe400,
+ 0x560bf404,
+ 0x4007088e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60204,
+ 0x8e04bd00,
+ 0x7e400704,
0xb2000065,
- 0x040080ff,
+ 0x030080ff,
0x000ff602,
- 0x048e04bd,
- 0x657e4007,
- 0xffb20000,
- 0x02030080,
- 0xbd000ff6,
- 0x50fec704,
- 0x8f02ee94,
- 0xbb400700,
- 0x657e00ef,
- 0x00800000,
- 0x0ff60202,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x4007008f,
+ 0x7e00efbb,
+ 0x80000065,
+ 0xf6020200,
+ 0x04bd000f,
+ 0xf87e030f,
+ 0x004b0002,
+ 0x8ebfb201,
+ 0x7e400144,
+/* 0x0677: ih_no_fwmthd */
+ 0x4b00008f,
+ 0xb0bd0504,
+ 0xf4b4abff,
+ 0x00800c0b,
+ 0x0bf60307,
+/* 0x068b: ih_no_other */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x06ad: ctx_4170s */
+ 0x10f5f001,
+ 0x708effb2,
+ 0x8f7e4041,
+ 0x00f80000,
+/* 0x06bc: ctx_4170w */
+ 0x4041708e,
+ 0x0000657e,
+ 0xf4f0ffb2,
+ 0xf31bf410,
+/* 0x06ce: ctx_redswitch */
+ 0x004e00f8,
+ 0x40e5f002,
+ 0xf020e5f0,
+ 0x008010e5,
+ 0x0ef60185,
0x0f04bd00,
- 0x02f87e03,
- 0x01004b00,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x0676: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd05,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x068a: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0xf400fc80,
- 0x01f80032,
-/* 0x06ac: ctx_4170s */
- 0xb210f5f0,
- 0x41708eff,
+/* 0x06e5: ctx_redswitch_delay */
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xf10400e5,
+ 0x800100e5,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x06fe: ctx_86c */
+ 0x008000f8,
+ 0x0ff60223,
+ 0xb204bd00,
+ 0x8a148eff,
0x008f7e40,
-/* 0x06bb: ctx_4170w */
- 0x8e00f800,
- 0x7e404170,
- 0xb2000065,
- 0x10f4f0ff,
- 0xf8f31bf4,
-/* 0x06cd: ctx_redswitch */
- 0x02004e00,
- 0xf040e5f0,
- 0xe5f020e5,
- 0x85008010,
- 0x000ef601,
- 0x080f04bd,
-/* 0x06e4: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xe5f1fd1b,
- 0xe5f10400,
- 0x00800100,
- 0x0ef60185,
- 0xf804bd00,
-/* 0x06fd: ctx_86c */
- 0x23008000,
+ 0x8effb200,
+ 0x7e41a88c,
+ 0xf800008f,
+/* 0x071d: ctx_mem */
+ 0x84008000,
0x000ff602,
- 0xffb204bd,
- 0x408a148e,
- 0x00008f7e,
- 0x8c8effb2,
- 0x8f7e41a8,
- 0x00f80000,
-/* 0x071c: ctx_mem */
- 0x02840080,
- 0xbd000ff6,
-/* 0x0725: ctx_mem_wait */
- 0x84008f04,
- 0x00ffcf02,
- 0xf405fffd,
- 0x00f8f61b,
-/* 0x0734: ctx_load */
- 0x99f094bd,
- 0x37008005,
- 0x0009f602,
- 0x0c0a04bd,
- 0x0000b87e,
- 0x0080f4bd,
- 0x0ff60289,
- 0x8004bd00,
- 0xf602c100,
- 0x04bd0002,
- 0x02830080,
+/* 0x0726: ctx_mem_wait */
+ 0x008f04bd,
+ 0xffcf0284,
+ 0x05fffd00,
+ 0xf8f61bf4,
+/* 0x0735: ctx_load */
+ 0xf094bd00,
+ 0x00800599,
+ 0x09f60237,
+ 0x0a04bd00,
+ 0x00b87e0c,
+ 0x80f4bd00,
+ 0xf6028900,
+ 0x04bd000f,
+ 0x02c10080,
0xbd0002f6,
- 0x7e070f04,
- 0x8000071c,
- 0xf602c000,
- 0x04bd0002,
- 0xf0000bfe,
- 0x24b61f2a,
- 0x0220b604,
- 0x99f094bd,
- 0x37008008,
- 0x0009f602,
- 0x008004bd,
- 0x02f60281,
- 0xd204bd00,
- 0x80000000,
- 0x800225f0,
- 0xf6028800,
- 0x04bd0002,
- 0x00421001,
- 0x0223f002,
- 0xf80512fa,
- 0xf094bd03,
+ 0x83008004,
+ 0x0002f602,
+ 0x070f04bd,
+ 0x00071d7e,
+ 0x02c00080,
+ 0xbd0002f6,
+ 0x000bfe04,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0xf094bd02,
0x00800899,
- 0x09f60217,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01b50512,
- 0xf094bd16,
- 0x00800999,
0x09f60237,
0x8004bd00,
0xf6028100,
- 0x04bd0001,
- 0x00800102,
- 0x02f60288,
- 0x4104bd00,
- 0x13f00100,
- 0x0501fa06,
+ 0x04bd0002,
+ 0x000000d2,
+ 0x0225f080,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x42100104,
+ 0x23f00200,
+ 0x0512fa02,
0x94bd03f8,
- 0x800999f0,
+ 0x800899f0,
0xf6021700,
0x04bd0009,
- 0x99f094bd,
- 0x17008005,
- 0x0009f602,
- 0x00f804bd,
-/* 0x0820: ctx_chan */
- 0x0007347e,
- 0xb87e0c0a,
- 0x050f0000,
- 0x00071c7e,
-/* 0x0832: ctx_mmio_exec */
- 0x039800f8,
- 0x81008041,
- 0x0003f602,
- 0x34bd04bd,
-/* 0x0840: ctx_mmio_loop */
- 0xf4ff34c4,
- 0x00450e1b,
- 0x0653f002,
- 0xf80535fa,
-/* 0x0851: ctx_mmio_pull */
- 0x804e9803,
- 0x7e814f98,
- 0xb600008f,
- 0x12b60830,
- 0xdf1bf401,
-/* 0x0864: ctx_mmio_done */
- 0x80160398,
- 0xf6028100,
- 0x04bd0003,
- 0x414000b5,
- 0x13f00100,
- 0x0601fa06,
- 0x00f803f8,
-/* 0x0880: ctx_xfer */
- 0x0080040e,
- 0x0ef60302,
-/* 0x088b: ctx_xfer_idle */
- 0x8e04bd00,
- 0xcf030000,
- 0xe4f100ee,
- 0x1bf42000,
- 0x0611f4f5,
-/* 0x089f: ctx_xfer_pre */
- 0x0f0c02f4,
- 0x06fd7e10,
- 0x1b11f400,
-/* 0x08a8: ctx_xfer_pre_load */
- 0xac7e020f,
- 0xbb7e0006,
- 0xcd7e0006,
- 0xf4bd0006,
- 0x0006ac7e,
- 0x0007347e,
-/* 0x08c0: ctx_xfer_exec */
- 0xbd160198,
- 0x05008024,
- 0x0002f601,
- 0x1fb204bd,
- 0x41a5008e,
- 0x00008f7e,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0x048effb2,
- 0x8f7e41a5,
- 0x167e0000,
- 0x24bd0002,
- 0x0247fc80,
- 0xbd0002f6,
- 0x012cf004,
- 0x800320b6,
- 0xf6024afc,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0xb50512fd,
+ 0x94bd1601,
+ 0x800999f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0001f6,
+ 0x80010204,
+ 0xf6028800,
0x04bd0002,
- 0xf001acf0,
- 0x000b06a5,
- 0x98000c98,
- 0x000e010d,
- 0x00013d7e,
- 0xec7e080a,
- 0x0a7e0000,
- 0x01f40002,
- 0x7e0c0a12,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xbd03f805,
+ 0x0999f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0xf094bd04,
+ 0x00800599,
+ 0x09f60217,
+ 0xf804bd00,
+/* 0x0821: ctx_chan */
+ 0x07357e00,
+ 0x7e0c0a00,
0x0f0000b8,
- 0x071c7e05,
- 0x2d02f400,
-/* 0x093c: ctx_xfer_post */
- 0xac7e020f,
- 0xf4bd0006,
- 0x0006fd7e,
- 0x0002277e,
- 0x0006bb7e,
- 0xac7ef4bd,
+ 0x071d7e05,
+/* 0x0833: ctx_mmio_exec */
+ 0x9800f800,
+ 0x00804103,
+ 0x03f60281,
+ 0xbd04bd00,
+/* 0x0841: ctx_mmio_loop */
+ 0xff34c434,
+ 0x450e1bf4,
+ 0x53f00200,
+ 0x0535fa06,
+/* 0x0852: ctx_mmio_pull */
+ 0x4e9803f8,
+ 0x814f9880,
+ 0x00008f7e,
+ 0xb60830b6,
+ 0x1bf40112,
+/* 0x0865: ctx_mmio_done */
+ 0x160398df,
+ 0x02810080,
+ 0xbd0003f6,
+ 0x4000b504,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xf803f806,
+/* 0x0881: ctx_xfer */
+ 0x80040e00,
+ 0xf6030200,
+ 0x04bd000e,
+/* 0x088c: ctx_xfer_idle */
+ 0x0300008e,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f51b,
+ 0x0c02f406,
+/* 0x08a0: ctx_xfer_pre */
+ 0xfe7e100f,
0x11f40006,
- 0x40019810,
- 0xf40511fd,
- 0x327e070b,
-/* 0x0966: ctx_xfer_no_post_mmio */
-/* 0x0966: ctx_xfer_done */
- 0x00f80008,
+/* 0x08a9: ctx_xfer_pre_load */
+ 0x7e020f1b,
+ 0x7e0006ad,
+ 0x7e0006bc,
+ 0xbd0006ce,
+ 0x06ad7ef4,
+ 0x07357e00,
+/* 0x08c1: ctx_xfer_exec */
+ 0x16019800,
+ 0x008024bd,
+ 0x02f60105,
+ 0xb204bd00,
+ 0xa5008e1f,
+ 0x008f7e41,
+ 0x01fcf000,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x8effb205,
+ 0x7e41a504,
+ 0x7e00008f,
+ 0xbd000216,
+ 0x47fc8024,
+ 0x0002f602,
+ 0x2cf004bd,
+ 0x0320b601,
+ 0x024afc80,
+ 0xbd0002f6,
+ 0x01acf004,
+ 0x0b06a5f0,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x7e080a00,
+ 0x7e0000ec,
+ 0xf400020a,
+ 0x0c0a1201,
+ 0x0000b87e,
+ 0x1d7e050f,
+ 0x02f40007,
+/* 0x093d: ctx_xfer_post */
+ 0x7e020f2d,
+ 0xbd0006ad,
+ 0x06fe7ef4,
+ 0x02277e00,
+ 0x06bc7e00,
+ 0x7ef4bd00,
+ 0xf40006ad,
+ 0x01981011,
+ 0x0511fd40,
+ 0x7e070bf4,
+/* 0x0967: ctx_xfer_no_post_mmio */
+/* 0x0967: ctx_xfer_done */
+ 0xf8000833,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c578deb5867a..dd8f85b8b3a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,9 +26,9 @@
#include "fuc/os.h"
#include <core/client.h>
-#include <core/option.h>
#include <core/firmware.h>
-#include <subdev/secboot.h>
+#include <core/option.h>
+#include <subdev/acr.h>
#include <subdev/fb.h>
#include <subdev/mc.h>
#include <subdev/pmu.h>
@@ -1636,7 +1636,7 @@ gf100_gr_intr(struct nvkm_gr *base)
static void
gf100_gr_init_fw(struct nvkm_falcon *falcon,
- struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
+ struct nvkm_blob *code, struct nvkm_blob *data)
{
nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0);
nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false);
@@ -1690,26 +1690,30 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
{
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_secboot *sb = device->secboot;
- u32 secboot_mask = 0;
+ u32 lsf_mask = 0;
int ret;
/* load fuc microcode */
nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
- else
- gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d);
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
- else
- gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad);
+ if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) {
+ gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst,
+ &gr->fecs.data);
+ } else {
+ lsf_mask |= BIT(NVKM_ACR_LSF_FECS);
+ }
- if (secboot_mask != 0) {
- int ret = nvkm_secboot_reset(sb, secboot_mask);
+ if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) {
+ gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst,
+ &gr->gpccs.data);
+ } else {
+ lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS);
+ }
+
+ if (lsf_mask) {
+ ret = nvkm_acr_bootstrap_falcons(device, lsf_mask);
if (ret)
return ret;
}
@@ -1721,8 +1725,8 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
nvkm_wr32(device, 0x41a10c, 0x00000000);
nvkm_wr32(device, 0x40910c, 0x00000000);
- nvkm_falcon_start(gr->gpccs.falcon);
- nvkm_falcon_start(gr->fecs.falcon);
+ nvkm_falcon_start(&gr->gpccs.falcon);
+ nvkm_falcon_start(&gr->fecs.falcon);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x00000001)
@@ -1784,18 +1788,18 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
/* load HUB microcode */
nvkm_mc_unk260(device, 0);
- nvkm_falcon_load_dmem(gr->fecs.falcon,
+ nvkm_falcon_load_dmem(&gr->fecs.falcon,
gr->func->fecs.ucode->data.data, 0x0,
gr->func->fecs.ucode->data.size, 0);
- nvkm_falcon_load_imem(gr->fecs.falcon,
+ nvkm_falcon_load_imem(&gr->fecs.falcon,
gr->func->fecs.ucode->code.data, 0x0,
gr->func->fecs.ucode->code.size, 0, 0, false);
/* load GPC microcode */
- nvkm_falcon_load_dmem(gr->gpccs.falcon,
+ nvkm_falcon_load_dmem(&gr->gpccs.falcon,
gr->func->gpccs.ucode->data.data, 0x0,
gr->func->gpccs.ucode->data.size, 0);
- nvkm_falcon_load_imem(gr->gpccs.falcon,
+ nvkm_falcon_load_imem(&gr->gpccs.falcon,
gr->func->gpccs.ucode->code.data, 0x0,
gr->func->gpccs.ucode->code.size, 0, 0, false);
nvkm_mc_unk260(device, 1);
@@ -1941,17 +1945,6 @@ gf100_gr_oneinit(struct nvkm_gr *base)
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
int i, j;
- int ret;
-
- ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon);
- if (ret)
- return ret;
-
- mutex_init(&gr->fecs.mutex);
-
- ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon);
- if (ret)
- return ret;
nvkm_pmu_pgob(device->pmu, false);
@@ -1992,11 +1985,11 @@ gf100_gr_init_(struct nvkm_gr *base)
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
- ret = nvkm_falcon_get(gr->fecs.falcon, subdev);
+ ret = nvkm_falcon_get(&gr->fecs.falcon, subdev);
if (ret)
return ret;
- ret = nvkm_falcon_get(gr->gpccs.falcon, subdev);
+ ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev);
if (ret)
return ret;
@@ -2004,49 +1997,34 @@ gf100_gr_init_(struct nvkm_gr *base)
}
static int
-gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
+gf100_gr_fini(struct nvkm_gr *base, bool suspend)
{
struct gf100_gr *gr = gf100_gr(base);
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- nvkm_falcon_put(gr->gpccs.falcon, subdev);
- nvkm_falcon_put(gr->fecs.falcon, subdev);
+ nvkm_falcon_put(&gr->gpccs.falcon, subdev);
+ nvkm_falcon_put(&gr->fecs.falcon, subdev);
return 0;
}
-void
-gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
-{
- kfree(fuc->data);
- fuc->data = NULL;
-}
-
-static void
-gf100_gr_dtor_init(struct gf100_gr_pack *pack)
-{
- vfree(pack);
-}
-
void *
gf100_gr_dtor(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
- if (gr->func->dtor)
- gr->func->dtor(gr);
kfree(gr->data);
- nvkm_falcon_del(&gr->gpccs.falcon);
- nvkm_falcon_del(&gr->fecs.falcon);
+ nvkm_falcon_dtor(&gr->gpccs.falcon);
+ nvkm_falcon_dtor(&gr->fecs.falcon);
- gf100_gr_dtor_fw(&gr->fuc409c);
- gf100_gr_dtor_fw(&gr->fuc409d);
- gf100_gr_dtor_fw(&gr->fuc41ac);
- gf100_gr_dtor_fw(&gr->fuc41ad);
+ nvkm_blob_dtor(&gr->fecs.inst);
+ nvkm_blob_dtor(&gr->fecs.data);
+ nvkm_blob_dtor(&gr->gpccs.inst);
+ nvkm_blob_dtor(&gr->gpccs.data);
- gf100_gr_dtor_init(gr->fuc_bundle);
- gf100_gr_dtor_init(gr->fuc_method);
- gf100_gr_dtor_init(gr->fuc_sw_ctx);
- gf100_gr_dtor_init(gr->fuc_sw_nonctx);
+ vfree(gr->bundle);
+ vfree(gr->method);
+ vfree(gr->sw_ctx);
+ vfree(gr->sw_nonctx);
return gr;
}
@@ -2056,7 +2034,7 @@ gf100_gr_ = {
.dtor = gf100_gr_dtor,
.oneinit = gf100_gr_oneinit,
.init = gf100_gr_init_,
- .fini = gf100_gr_fini_,
+ .fini = gf100_gr_fini,
.intr = gf100_gr_intr,
.units = gf100_gr_units,
.chan_new = gf100_gr_chan_new,
@@ -2067,87 +2045,24 @@ gf100_gr_ = {
.ctxsw.inst = gf100_gr_ctxsw_inst,
};
-int
-gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
- struct gf100_gr_fuc *fuc, int ret)
-{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- const struct firmware *fw;
- char f[32];
-
- /* see if this firmware has a legacy path */
- if (!strcmp(fwname, "fecs_inst"))
- fwname = "fuc409c";
- else if (!strcmp(fwname, "fecs_data"))
- fwname = "fuc409d";
- else if (!strcmp(fwname, "gpccs_inst"))
- fwname = "fuc41ac";
- else if (!strcmp(fwname, "gpccs_data"))
- fwname = "fuc41ad";
- else {
- /* nope, let's just return the error we got */
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
-
- /* yes, try to load from the legacy path */
- nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
-
- snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
- ret = request_firmware(&fw, f, device->dev);
- if (ret) {
- snprintf(f, sizeof(f), "nouveau/%s", fwname);
- ret = request_firmware(&fw, f, device->dev);
- if (ret) {
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
- }
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- release_firmware(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-int
-gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
- struct gf100_gr_fuc *fuc)
-{
- const struct firmware *fw;
- int ret;
-
- ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
- if (ret) {
- ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
- if (ret)
- return -ENODEV;
- return 0;
- }
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- nvkm_firmware_put(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-int
-gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct gf100_gr *gr)
-{
- gr->func = func;
- gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
- func->fecs.ucode == NULL);
-
- return nvkm_gr_ctor(&gf100_gr_, device, index,
- gr->firmware || func->fecs.ucode != NULL,
- &gr->base);
-}
+static const struct nvkm_falcon_func
+gf100_gr_flcn = {
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
int
-gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+gf100_gr_new_(const struct gf100_gr_fwif *fwif,
+ struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
struct gf100_gr *gr;
int ret;
@@ -2156,22 +2071,49 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
return -ENOMEM;
*pgr = &gr->base;
- ret = gf100_gr_ctor(func, device, index, gr);
+ ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base);
if (ret)
return ret;
- if (gr->firmware) {
- if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
- gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
- gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
- gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
- return -ENODEV;
- }
+ fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ gr->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev,
+ "fecs", 0x409000, &gr->fecs.falcon);
+ if (ret)
+ return ret;
+
+ mutex_init(&gr->fecs.mutex);
+
+ ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev,
+ "gpccs", 0x41a000, &gr->gpccs.falcon);
+ if (ret)
+ return ret;
return 0;
}
void
+gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int gpc, i, j;
+ u32 data;
+
+ for (gpc = 0, i = 0; i < 4; i++) {
+ for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
+ data |= gr->tpc_nr[gpc] << (j * 4);
+ if (pd)
+ nvkm_wr32(device, 0x406028 + (i * 4), data);
+ if (ds)
+ nvkm_wr32(device, 0x405870 + (i * 4), data);
+ }
+}
+
+void
gf100_gr_init_400054(struct gf100_gr *gr)
{
nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464);
@@ -2295,8 +2237,8 @@ gf100_gr_init(struct gf100_gr *gr)
gr->func->init_gpc_mmu(gr);
- if (gr->fuc_sw_nonctx)
- gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+ if (gr->sw_nonctx)
+ gf100_gr_mmio(gr, gr->sw_nonctx);
else
gf100_gr_mmio(gr, gr->func->mmio);
@@ -2320,6 +2262,8 @@ gf100_gr_init(struct gf100_gr *gr)
gr->func->init_bios_2(gr);
if (gr->func->init_swdx_pes_mask)
gr->func->init_swdx_pes_mask(gr);
+ if (gr->func->init_fs)
+ gr->func->init_fs(gr);
nvkm_wr32(device, 0x400500, 0x00010001);
@@ -2338,8 +2282,8 @@ gf100_gr_init(struct gf100_gr *gr)
if (gr->func->init_40601c)
gr->func->init_40601c(gr);
- nvkm_wr32(device, 0x404490, 0xc0000000);
nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
if (gr->func->init_sked_hww_esr)
gr->func->init_sked_hww_esr(gr);
@@ -2454,7 +2398,66 @@ gf100_gr = {
};
int
+gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ gr->firmware = false;
+ return 0;
+}
+
+static int
+gf100_gr_load_fw(struct gf100_gr *gr, const char *name,
+ struct nvkm_blob *blob)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", name);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ nvkm_error(subdev, "failed to load %s\n", name);
+ return ret;
+ }
+ }
+
+ blob->size = fw->size;
+ blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (blob->data != NULL) ? 0 : -ENOMEM;
+}
+
+int
+gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false))
+ return -EINVAL;
+
+ if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) ||
+ gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) ||
+ gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) ||
+ gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data))
+ return -ENOENT;
+
+ gr->firmware = true;
+ return 0;
+}
+
+static const struct gf100_gr_fwif
+gf100_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf100_gr },
+ { -1, gf100_gr_nofw, &gf100_gr },
+ {}
+};
+
+int
gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf100_gr, device, index, pgr);
+ return gf100_gr_new_(gf100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index fafdd0bbea9b..88bcb57c2e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -31,6 +31,8 @@
#include <subdev/mmu.h>
#include <engine/falcon.h>
+struct nvkm_acr_lsfw;
+
#define GPC_MAX 32
#define TPC_MAX_PER_GPC 8
#define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC)
@@ -55,11 +57,6 @@ struct gf100_gr_mmio {
int buffer;
};
-struct gf100_gr_fuc {
- u32 *data;
- u32 size;
-};
-
struct gf100_gr_zbc_color {
u32 format;
u32 ds[4];
@@ -83,29 +80,30 @@ struct gf100_gr {
struct nvkm_gr base;
struct {
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
+ struct nvkm_blob inst;
+ struct nvkm_blob data;
+
struct mutex mutex;
u32 disable;
} fecs;
struct {
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
+ struct nvkm_blob inst;
+ struct nvkm_blob data;
} gpccs;
- struct gf100_gr_fuc fuc409c;
- struct gf100_gr_fuc fuc409d;
- struct gf100_gr_fuc fuc41ac;
- struct gf100_gr_fuc fuc41ad;
bool firmware;
/*
* Used if the register packs are loaded from NVIDIA fw instead of
* using hardcoded arrays. To be allocated with vzalloc().
*/
- struct gf100_gr_pack *fuc_sw_nonctx;
- struct gf100_gr_pack *fuc_sw_ctx;
- struct gf100_gr_pack *fuc_bundle;
- struct gf100_gr_pack *fuc_method;
+ struct gf100_gr_pack *sw_nonctx;
+ struct gf100_gr_pack *sw_ctx;
+ struct gf100_gr_pack *bundle;
+ struct gf100_gr_pack *method;
struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
@@ -140,12 +138,6 @@ struct gf100_gr {
u32 size_pm;
};
-int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
- int, struct gf100_gr *);
-int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
- int, struct nvkm_gr **);
-void *gf100_gr_dtor(struct nvkm_gr *);
-
int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst);
struct gf100_gr_func_zbc {
@@ -157,7 +149,6 @@ struct gf100_gr_func_zbc {
};
struct gf100_gr_func {
- void (*dtor)(struct gf100_gr *);
void (*oneinit_tiles)(struct gf100_gr *);
void (*oneinit_sm_id)(struct gf100_gr *);
int (*init)(struct gf100_gr *);
@@ -171,6 +162,7 @@ struct gf100_gr_func {
void (*init_rop_active_fbps)(struct gf100_gr *);
void (*init_bios_2)(struct gf100_gr *);
void (*init_swdx_pes_mask)(struct gf100_gr *);
+ void (*init_fs)(struct gf100_gr *);
void (*init_fecs_exceptions)(struct gf100_gr *);
void (*init_ds_hww_esr_2)(struct gf100_gr *);
void (*init_40601c)(struct gf100_gr *);
@@ -217,6 +209,7 @@ void gf100_gr_init_419eb4(struct gf100_gr *);
void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
void gf100_gr_init_400054(struct gf100_gr *);
+void gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *, bool, bool);
extern const struct gf100_gr_func_zbc gf100_gr_zbc;
void gf117_gr_init_zcull(struct gf100_gr *);
@@ -245,10 +238,18 @@ void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
void gp100_gr_zbc_clear_color(struct gf100_gr *, int);
void gp100_gr_zbc_clear_depth(struct gf100_gr *, int);
+extern const struct gf100_gr_func_zbc gp100_gr_zbc;
void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
extern const struct gf100_gr_func_zbc gp102_gr_zbc;
+extern const struct gf100_gr_func gp107_gr;
+
+void gv100_gr_init_419bd8(struct gf100_gr *);
+void gv100_gr_init_504430(struct gf100_gr *, int, int);
+void gv100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
+void gv100_gr_trap_mp(struct gf100_gr *, int, int);
+
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
#include <core/object.h>
@@ -269,9 +270,6 @@ struct gf100_gr_chan {
void gf100_gr_ctxctl_debug(struct gf100_gr *);
-void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
-int gf100_gr_ctor_fw(struct gf100_gr *, const char *,
- struct gf100_gr_fuc *);
u64 gf100_gr_units(struct nvkm_gr *);
void gf100_gr_zbc_init(struct gf100_gr *);
@@ -294,8 +292,8 @@ struct gf100_gr_pack {
for (init = pack->init; init && init->count; init++)
struct gf100_gr_ucode {
- struct gf100_gr_fuc code;
- struct gf100_gr_fuc data;
+ struct nvkm_blob code;
+ struct nvkm_blob data;
};
extern struct gf100_gr_ucode gf100_gr_fecs_ucode;
@@ -310,17 +308,6 @@ void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
int gf100_gr_init_ctxctl(struct gf100_gr *);
-/* external bundles loading functions */
-int gk20a_gr_av_to_init(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-int gk20a_gr_av_to_method(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-
-int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int,
- struct nvkm_gr **);
-
/* register init value lists */
extern const struct gf100_gr_init gf100_gr_init_main_0[];
@@ -403,4 +390,31 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
void gm200_gr_init_gpc_mmu(struct gf100_gr *);
+
+struct gf100_gr_fwif {
+ int version;
+ int (*load)(struct gf100_gr *, int ver, const struct gf100_gr_fwif *);
+ const struct gf100_gr_func *func;
+ const struct nvkm_acr_lsf_func *fecs;
+ const struct nvkm_acr_lsf_func *gpccs;
+};
+
+int gf100_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+
+int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver);
+
+int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr;
+extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr;
+
+extern const struct nvkm_acr_lsf_func gm20b_gr_fecs_acr;
+void gm20b_gr_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
+void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
+
+extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr;
+extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr;
+
+int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int,
+ struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 42c2fd9fc04e..0536fe8b2b92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -144,8 +144,15 @@ gf104_gr = {
}
};
+static const struct gf100_gr_fwif
+gf104_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf104_gr },
+ { -1, gf100_gr_nofw, &gf104_gr },
+ {}
+};
+
int
gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf104_gr, device, index, pgr);
+ return gf100_gr_new_(gf104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 4731a460adc7..14284b06112f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -143,8 +143,15 @@ gf108_gr = {
}
};
+const struct gf100_gr_fwif
+gf108_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf108_gr },
+ { -1, gf100_gr_nofw, &gf108_gr },
+ {}
+};
+
int
gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf108_gr, device, index, pgr);
+ return gf100_gr_new_(gf108_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index cdf759c8cd7f..280752551a3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -119,8 +119,15 @@ gf110_gr = {
}
};
+static const struct gf100_gr_fwif
+gf110_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf110_gr },
+ { -1, gf100_gr_nofw, &gf110_gr },
+ {}
+};
+
int
gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf110_gr, device, index, pgr);
+ return gf100_gr_new_(gf110_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index a4158f84c649..235c3fbe4b95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -184,8 +184,15 @@ gf117_gr = {
}
};
+static const struct gf100_gr_fwif
+gf117_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf117_gr },
+ { -1, gf100_gr_nofw, &gf117_gr },
+ {}
+};
+
int
gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf117_gr, device, index, pgr);
+ return gf100_gr_new_(gf117_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 4197844870b3..7eac385ece97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -210,8 +210,15 @@ gf119_gr = {
}
};
+static const struct gf100_gr_fwif
+gf119_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf119_gr },
+ { -1, gf100_gr_nofw, &gf119_gr },
+ {}
+};
+
int
gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf119_gr, device, index, pgr);
+ return gf100_gr_new_(gf119_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 477fee3e3715..89f51d76082b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -489,8 +489,15 @@ gk104_gr = {
}
};
+static const struct gf100_gr_fwif
+gk104_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk104_gr },
+ { -1, gf100_gr_nofw, &gk104_gr },
+ {}
+};
+
int
gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk104_gr, device, index, pgr);
+ return gf100_gr_new_(gk104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 7cd628c84e07..735f05e54d62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -385,8 +385,15 @@ gk110_gr = {
}
};
+static const struct gf100_gr_fwif
+gk110_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk110_gr },
+ { -1, gf100_gr_nofw, &gk110_gr },
+ {}
+};
+
int
gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk110_gr, device, index, pgr);
+ return gf100_gr_new_(gk110_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index a38faa215635..adc971be8f3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -136,8 +136,15 @@ gk110b_gr = {
}
};
+static const struct gf100_gr_fwif
+gk110b_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk110b_gr },
+ { -1, gf100_gr_nofw, &gk110b_gr },
+ {}
+};
+
int
gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk110b_gr, device, index, pgr);
+ return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 58456660e603..aa0eff6795ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -194,8 +194,15 @@ gk208_gr = {
}
};
+static const struct gf100_gr_fwif
+gk208_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk208_gr },
+ { -1, gf100_gr_nofw, &gk208_gr },
+ {}
+};
+
int
gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk208_gr, device, index, pgr);
+ return gf100_gr_new_(gk208_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 500cb08dd608..4209b24a46d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -22,6 +22,7 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
#include <subdev/timer.h>
#include <nvif/class.h>
@@ -33,21 +34,22 @@ struct gk20a_fw_av
};
int
-gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
int nent;
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_av));
+ nent = (blob.size / sizeof(struct gk20a_fw_av));
pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
if (!pack) {
ret = -ENOMEM;
@@ -59,7 +61,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
ent->addr = av->addr;
ent->data = av->data;
@@ -70,7 +72,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
@@ -82,21 +84,22 @@ struct gk20a_fw_aiv
};
int
-gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
int nent;
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_aiv));
+ nent = (blob.size / sizeof(struct gk20a_fw_aiv));
pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
if (!pack) {
ret = -ENOMEM;
@@ -108,7 +111,7 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i];
+ struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i];
ent->addr = av->addr;
ent->data = av->data;
@@ -119,15 +122,16 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
int
-gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
/* We don't suppose we will initialize more than 16 classes here... */
@@ -137,29 +141,30 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_av));
+ nent = (blob.size / sizeof(struct gk20a_fw_av));
- pack = vzalloc((sizeof(*pack) * max_classes) +
- (sizeof(*init) * (nent + 1)));
+ pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
+ (sizeof(*init) * (nent + max_classes + 1)));
if (!pack) {
ret = -ENOMEM;
goto end;
}
- init = (void *)(pack + max_classes);
+ init = (void *)(pack + max_classes + 1);
- for (i = 0; i < nent; i++) {
- struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+ for (i = 0; i < nent; i++, init++) {
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
u32 class = av->addr & 0xffff;
u32 addr = (av->addr & 0xffff0000) >> 14;
if (prevclass != class) {
- pack[classidx].init = ent;
+ if (prevclass) /* Add terminator to the method list. */
+ init++;
+ pack[classidx].init = init;
pack[classidx].type = class;
prevclass = class;
if (++classidx >= max_classes) {
@@ -169,16 +174,16 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
}
}
- ent->addr = addr;
- ent->data = av->data;
- ent->count = 1;
- ent->pitch = 1;
+ init->addr = addr;
+ init->data = av->data;
+ init->count = 1;
+ init->pitch = 1;
}
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
@@ -224,7 +229,7 @@ gk20a_gr_init(struct gf100_gr *gr)
/* Clear SCC RAM */
nvkm_wr32(device, 0x40802c, 0x1);
- gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+ gf100_gr_mmio(gr, gr->sw_nonctx);
ret = gk20a_gr_wait_mem_scrubbing(gr);
if (ret)
@@ -303,40 +308,45 @@ gk20a_gr = {
};
int
-gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
{
- struct gf100_gr *gr;
- int ret;
+ if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) ||
+ gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) ||
+ gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) ||
+ gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method))
+ return -ENOENT;
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
- return -ENOMEM;
- *pgr = &gr->base;
-
- ret = gf100_gr_ctor(&gk20a_gr, device, index, gr);
- if (ret)
- return ret;
+ return 0;
+}
- if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
- gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
- gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
- gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
- return -ENODEV;
+static int
+gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx);
- if (ret)
- return ret;
+ if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver,
+ &gr->fecs.inst) ||
+ nvkm_firmware_load_blob(subdev, "", "fecs_data", ver,
+ &gr->fecs.data) ||
+ nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver,
+ &gr->gpccs.inst) ||
+ nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver,
+ &gr->gpccs.data))
+ return -ENOENT;
- ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx);
- if (ret)
- return ret;
+ gr->firmware = true;
- ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle);
- if (ret)
- return ret;
+ return gk20a_gr_load_sw(gr, "", ver);
+}
- ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method);
- if (ret)
- return ret;
+static const struct gf100_gr_fwif
+gk20a_gr_fwif[] = {
+ { -1, gk20a_gr_load, &gk20a_gr },
+ {}
+};
- return 0;
+int
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 92e31d397207..09bb78ba9d00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -429,8 +429,15 @@ gm107_gr = {
}
};
+static const struct gf100_gr_fwif
+gm107_gr_fwif[] = {
+ { -1, gf100_gr_load, &gm107_gr },
+ { -1, gf100_gr_nofw, &gm107_gr },
+ {}
+};
+
int
gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gm107_gr, device, index, pgr);
+ return gf100_gr_new_(gm107_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index eff30662b984..3d67cfb08395 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -24,14 +24,64 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
+#include <subdev/acr.h>
#include <subdev/secboot.h>
+#include <nvfw/flcn.h>
+
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
+static void
+gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v1 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr);
+}
+
+static void
+gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = base + lsfw->app_resident_code_offset;
+ const u64 data = base + lsfw->app_resident_data_offset;
+ const struct flcn_bl_dmem_desc_v1 hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = code,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = data,
+ .data_size = lsfw->app_resident_data_size,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gm200_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
+ .bld_write = gm200_gr_acr_bld_write,
+ .bld_patch = gm200_gr_acr_bld_patch,
+};
+
+const struct nvkm_acr_lsf_func
+gm200_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
+ .bld_write = gm200_gr_acr_bld_write,
+ .bld_patch = gm200_gr_acr_bld_patch,
+};
+
int
gm200_gr_rops(struct gf100_gr *gr)
{
@@ -124,44 +174,6 @@ gm200_gr_oneinit_tiles(struct gf100_gr *gr)
}
}
-int
-gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
-{
- struct gf100_gr *gr;
- int ret;
-
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
- return -ENOMEM;
- *pgr = &gr->base;
-
- ret = gf100_gr_ctor(func, device, index, gr);
- if (ret)
- return ret;
-
- /* Load firmwares for non-secure falcons */
- if (!nvkm_secboot_is_managed(device->secboot,
- NVKM_SECBOOT_FALCON_FECS)) {
- if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
- (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
- return ret;
- }
- if (!nvkm_secboot_is_managed(device->secboot,
- NVKM_SECBOOT_FALCON_GPCCS)) {
- if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
- (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
- return ret;
- }
-
- if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
- (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
- (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
- (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
- return ret;
-
- return 0;
-}
-
static const struct gf100_gr_func
gm200_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
@@ -198,7 +210,77 @@ gm200_gr = {
};
int
+gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ int ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
+ &gr->fecs.falcon,
+ NVKM_ACR_LSF_FECS,
+ "gr/fecs_", ver, fwif->fecs);
+ if (ret)
+ return ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
+ &gr->gpccs.falcon,
+ NVKM_ACR_LSF_GPCCS,
+ "gr/gpccs_", ver,
+ fwif->gpccs);
+ if (ret)
+ return ret;
+
+ gr->firmware = true;
+
+ return gk20a_gr_load_sw(gr, "gr/", ver);
+}
+
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gm200_gr_fwif[] = {
+ { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
+int
gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gm200_gr, device, index, pgr);
+ return gf100_gr_new_(gm200_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index a667770ce3cb..09d8c5d5b000 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -22,10 +22,61 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
+#include <subdev/acr.h>
#include <subdev/timer.h>
+#include <nvfw/flcn.h>
+
#include <nvif/class.h>
+void
+gm20b_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc hdr;
+ u64 addr;
+
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
+ hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
+ hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+
+ flcn_bl_dmem_desc_dump(&acr->subdev, &hdr);
+}
+
+void
+gm20b_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
+ const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
+ const struct flcn_bl_dmem_desc hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = lower_32_bits(code),
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lower_32_bits(data),
+ .data_size = lsfw->app_resident_data_size,
+ .code_dma_base1 = upper_32_bits(code),
+ .data_dma_base1 = upper_32_bits(data),
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gm20b_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc),
+ .bld_write = gm20b_gr_acr_bld_write,
+ .bld_patch = gm20b_gr_acr_bld_patch,
+};
+
static void
gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
{
@@ -33,7 +84,7 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
u32 val;
/* Bypass MMU check for non-secure boot */
- if (!device->secboot) {
+ if (!device->acr) {
nvkm_wr32(device, 0x100ce4, 0xffffffff);
if (nvkm_rd32(device, 0x100ce4) != 0xffffffff)
@@ -85,8 +136,51 @@ gm20b_gr = {
}
};
+static int
+gm20b_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ int ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(subdev, &gr->fecs.falcon,
+ NVKM_ACR_LSF_FECS,
+ "gr/fecs_", ver, fwif->fecs);
+ if (ret)
+ return ret;
+
+
+ if (nvkm_firmware_load_blob(subdev, "gr/", "gpccs_inst", ver,
+ &gr->gpccs.inst) ||
+ nvkm_firmware_load_blob(subdev, "gr/", "gpccs_data", ver,
+ &gr->gpccs.data))
+ return -ENOENT;
+
+ gr->firmware = true;
+
+ return gk20a_gr_load_sw(gr, "gr/", ver);
+}
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
+#endif
+
+static const struct gf100_gr_fwif
+gm20b_gr_fwif[] = {
+ { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr },
+ {}
+};
+
int
gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gm20b_gr, device, index, pgr);
+ return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 9d0521ce309a..33c8634ae567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -62,7 +62,7 @@ gp100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
gr->zbc_depth[zbc].format << ((znum % 4) * 7));
}
-static const struct gf100_gr_func_zbc
+const struct gf100_gr_func_zbc
gp100_gr_zbc = {
.clear_color = gp100_gr_zbc_clear_color,
.clear_depth = gp100_gr_zbc_clear_depth,
@@ -135,8 +135,27 @@ gp100_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp100_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp100_gr, device, index, pgr);
+ return gf100_gr_new_(gp100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 37f7d739bf80..7baf67f743f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -131,8 +131,27 @@ gp102_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp102_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp102_gr, device, index, pgr);
+ return gf100_gr_new_(gp102_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index 4573c914c021..d9b8ef875f8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -59,8 +59,40 @@ gp104_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp104_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp104_gr, device, index, pgr);
+ return gf100_gr_new_(gp104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 812aba91653f..2b1ad5522184 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-static const struct gf100_gr_func
+const struct gf100_gr_func
gp107_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
.oneinit_sm_id = gm200_gr_oneinit_sm_id,
@@ -61,8 +61,27 @@ gp107_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp107_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp107_gr, device, index, pgr);
+ return gf100_gr_new_(gp107_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
new file mode 100644
index 000000000000..113e4c1ba9e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+
+static void
+gp108_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v2 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp108_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = base + lsfw->app_resident_code_offset;
+ const u64 data = base + lsfw->app_resident_data_offset;
+ const struct flcn_bl_dmem_desc_v2 hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = code,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = data,
+ .data_size = lsfw->app_resident_data_size,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gp108_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp108_gr_acr_bld_write,
+ .bld_patch = gp108_gr_acr_bld_patch,
+};
+
+const struct nvkm_acr_lsf_func
+gp108_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp108_gr_acr_bld_write,
+ .bld_patch = gp108_gr_acr_bld_patch,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp108_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
+int
+gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(gp108_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 303dceddd4a8..eaf913eb5aa3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -23,8 +23,20 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <subdev/acr.h>
+
#include <nvif/class.h>
+#include <nvfw/flcn.h>
+
+static const struct nvkm_acr_lsf_func
+gp10b_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc),
+ .bld_write = gm20b_gr_acr_bld_write,
+ .bld_patch = gm20b_gr_acr_bld_patch,
+};
+
static const struct gf100_gr_func
gp10b_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
@@ -48,8 +60,8 @@ gp10b_gr = {
.gpc_nr = 1,
.tpc_nr = 2,
.ppc_nr = 1,
- .grctx = &gp102_grctx,
- .zbc = &gp102_gr_zbc,
+ .grctx = &gp100_grctx,
+ .zbc = &gp100_gr_zbc,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
@@ -59,8 +71,29 @@ gp10b_gr = {
}
};
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
+#endif
+
+static const struct gf100_gr_fwif
+gp10b_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr },
+ {}
+};
+
int
gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp10b_gr, device, index, pgr);
+ return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 3b3327789ae7..70639d88b8e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -45,7 +45,7 @@ gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
}
-static void
+void
gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
{
gv100_gr_trap_sm(gr, gpc, tpc, 0);
@@ -59,7 +59,7 @@ gv100_gr_init_4188a4(struct gf100_gr *gr)
nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000);
}
-static void
+void
gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -71,14 +71,14 @@ gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
}
}
-static void
+void
gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000);
}
-static void
+void
gv100_gr_init_419bd8(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -120,8 +120,27 @@ gv100_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gv100_gr_fwif[] = {
+ { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
int
gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gv100_gr, device, index, pgr);
+ return gf100_gr_new_(gv100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
new file mode 100644
index 000000000000..454668b1cf54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static void
+tu102_gr_init_fecs_exceptions(struct gf100_gr *gr)
+{
+ nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x006f0002);
+}
+
+static void
+tu102_gr_init_fs(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int sm;
+
+ gp100_grctx_generate_smid_config(gr);
+ gk104_grctx_generate_gpc_tpc_nr(gr);
+
+ for (sm = 0; sm < gr->sm_nr; sm++) {
+ nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 +
+ gr->sm[sm].tpc * 4), sm);
+ }
+
+ gm200_grctx_generate_dist_skip_table(gr);
+ gf100_gr_init_num_tpc_per_gpc(gr, true, true);
+}
+
+static void
+tu102_gr_init_zcull(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ const u8 tile_nr = ALIGN(gr->tpc_total, 64);
+ u8 bank[GPC_MAX] = {}, gpc, i, j;
+ u32 data;
+
+ for (i = 0; i < tile_nr; i += 8) {
+ for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) {
+ data |= bank[gr->tile[i + j]] << (j * 4);
+ bank[gr->tile[i + j]]++;
+ }
+ nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data);
+ }
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+}
+
+static void
+tu102_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff);
+ nvkm_wr32(device, 0x418890, 0x00000000);
+ nvkm_wr32(device, 0x418894, 0x00000000);
+
+ nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+ nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+ nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+}
+
+static const struct gf100_gr_func
+tu102_gr = {
+ .oneinit_tiles = gm200_gr_oneinit_tiles,
+ .oneinit_sm_id = gm200_gr_oneinit_sm_id,
+ .init = gf100_gr_init,
+ .init_419bd8 = gv100_gr_init_419bd8,
+ .init_gpc_mmu = tu102_gr_init_gpc_mmu,
+ .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+ .init_zcull = tu102_gr_init_zcull,
+ .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+ .init_fs = tu102_gr_init_fs,
+ .init_fecs_exceptions = tu102_gr_init_fecs_exceptions,
+ .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+ .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_504430 = gv100_gr_init_504430,
+ .init_shader_exceptions = gv100_gr_init_shader_exceptions,
+ .trap_mp = gv100_gr_trap_mp,
+ .rops = gm200_gr_rops,
+ .gpc_nr = 6,
+ .tpc_nr = 5,
+ .ppc_nr = 3,
+ .grctx = &tu102_grctx,
+ .zbc = &gp102_gr_zbc,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, TURING_A, &gf100_fermi },
+ { -1, -1, TURING_COMPUTE_A },
+ {}
+ }
+};
+
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+tu102_gr_fwif[] = {
+ { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
+int
+tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(tu102_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index cdf631822282..9a0fd9812750 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/nvdec/base.o
-nvkm-y += nvkm/engine/nvdec/gp102.o
+nvkm-y += nvkm/engine/nvdec/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 4a63581bdd5e..9b23c1b70ebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -20,48 +20,42 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
-
-#include <subdev/top.h>
-#include <engine/falcon.h>
-
-static int
-nvkm_nvdec_oneinit(struct nvkm_engine *engine)
-{
- struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
- struct nvkm_subdev *subdev = &nvdec->engine.subdev;
-
- nvdec->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (!nvdec->addr)
- return -EINVAL;
-
- /*XXX: fix naming of this when adding support for multiple-NVDEC */
- return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr,
- &nvdec->falcon);
-}
+#include <core/firmware.h>
static void *
nvkm_nvdec_dtor(struct nvkm_engine *engine)
{
struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
- nvkm_falcon_del(&nvdec->falcon);
+ nvkm_falcon_dtor(&nvdec->falcon);
return nvdec;
}
static const struct nvkm_engine_func
nvkm_nvdec = {
.dtor = nvkm_nvdec_dtor,
- .oneinit = nvkm_nvdec_oneinit,
};
int
-nvkm_nvdec_new_(struct nvkm_device *device, int index,
- struct nvkm_nvdec **pnvdec)
+nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_nvdec **pnvdec)
{
struct nvkm_nvdec *nvdec;
+ int ret;
if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
- &nvdec->engine);
+ ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
+ &nvdec->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&nvdec->engine.subdev, fwif, "Nvdec", nvdec);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ nvdec->func = fwif->func;
+
+ return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
+ nvkm_subdev_name[index], 0, &nvdec->falcon);
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
index bf3e532665fb..0ab27ab4d8ee 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
@@ -19,25 +19,45 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include "priv.h"
-#ifndef __NVKM_CORE_MSGQUEUE_H
-#define __NVKM_CORE_MSGQUEUE_H
-#include <subdev/secboot.h>
-struct nvkm_msgqueue;
+static const struct nvkm_falcon_func
+gm107_nvdec_flcn = {
+ .debug = 0xd00,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
-/* Hopefully we will never have firmware arguments larger than that... */
-#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
+static const struct nvkm_nvdec_func
+gm107_nvdec = {
+ .flcn = &gm107_nvdec_flcn,
+};
-int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-void nvkm_msgqueue_del(struct nvkm_msgqueue **);
-void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
-int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
+static int
+gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver,
+ const struct nvkm_nvdec_fwif *fwif)
+{
+ return 0;
+}
-/* useful if we run a NVIDIA-signed firmware */
-void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
+static const struct nvkm_nvdec_fwif
+gm107_nvdec_fwif[] = {
+ { -1, gm107_nvdec_nofw, &gm107_nvdec },
+ {}
+};
-/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
-int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long);
-
-#endif
+int
+gm107_nvdec_new(struct nvkm_device *device, int index,
+ struct nvkm_nvdec **pnvdec)
+{
+ return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index 57bfa3aa1835..e14da8b000d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -3,5 +3,17 @@
#define __NVKM_NVDEC_PRIV_H__
#include <engine/nvdec.h>
-int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **);
+struct nvkm_nvdec_func {
+ const struct nvkm_falcon_func *flcn;
+};
+
+struct nvkm_nvdec_fwif {
+ int version;
+ int (*load)(struct nvkm_nvdec *, int ver,
+ const struct nvkm_nvdec_fwif *);
+ const struct nvkm_nvdec_func *func;
+};
+
+int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif,
+ struct nvkm_device *, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index f316de8d45a8..75bf4436bf3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: MIT
-#nvkm-y += nvkm/engine/nvenc/base.o
+nvkm-y += nvkm/engine/nvenc/base.o
+nvkm-y += nvkm/engine/nvenc/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
new file mode 100644
index 000000000000..484100e15668
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include "priv.h"
+#include <core/firmware.h>
+
+static void *
+nvkm_nvenc_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
+ nvkm_falcon_dtor(&nvenc->falcon);
+ return nvenc;
+}
+
+static const struct nvkm_engine_func
+nvkm_nvenc = {
+ .dtor = nvkm_nvenc_dtor,
+};
+
+int
+nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_nvenc **pnvenc)
+{
+ struct nvkm_nvenc *nvenc;
+ int ret;
+
+ if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true,
+ &nvenc->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&nvenc->engine.subdev, fwif, "Nvenc", nvenc);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ nvenc->func = fwif->func;
+
+ return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
+ nvkm_subdev_name[index], 0, &nvenc->falcon);
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
new file mode 100644
index 000000000000..d249c8ffb2d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gm107_nvenc_flcn = {
+ .fbif = 0x800,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
+
+static const struct nvkm_nvenc_func
+gm107_nvenc = {
+ .flcn = &gm107_nvenc_flcn,
+};
+
+static int
+gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver,
+ const struct nvkm_nvenc_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_nvenc_fwif
+gm107_nvenc_fwif[] = {
+ { -1, gm107_nvenc_nofw, &gm107_nvenc },
+ {}
+};
+
+int
+gm107_nvenc_new(struct nvkm_device *device, int index,
+ struct nvkm_nvenc **pnvenc)
+{
+ return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
new file mode 100644
index 000000000000..100fa5ebbeef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVENC_PRIV_H__
+#define __NVKM_NVENC_PRIV_H__
+#include <engine/nvenc.h>
+
+struct nvkm_nvenc_func {
+ const struct nvkm_falcon_func *flcn;
+};
+
+struct nvkm_nvenc_fwif {
+ int version;
+ int (*load)(struct nvkm_nvenc *, int ver,
+ const struct nvkm_nvenc_fwif *);
+ const struct nvkm_nvenc_func *func;
+};
+
+int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *,
+ int, struct nvkm_nvenc **pnvenc);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
index 97c4696171f0..63cd2be3de08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/sec2/base.o
nvkm-y += nvkm/engine/sec2/gp102.o
+nvkm-y += nvkm/engine/sec2/gp108.o
nvkm-y += nvkm/engine/sec2/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index 1b49e5b6717f..41318aa0d481 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -21,97 +21,98 @@
*/
#include "priv.h"
-#include <core/msgqueue.h>
+#include <core/firmware.h>
#include <subdev/top.h>
-#include <engine/falcon.h>
-
-static void *
-nvkm_sec2_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- nvkm_msgqueue_del(&sec2->queue);
- nvkm_falcon_del(&sec2->falcon);
- return sec2;
-}
static void
-nvkm_sec2_intr(struct nvkm_engine *engine)
+nvkm_sec2_recv(struct work_struct *work)
{
- struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- struct nvkm_subdev *subdev = &engine->subdev;
- struct nvkm_device *device = subdev->device;
- u32 disp = nvkm_rd32(device, sec2->addr + 0x01c);
- u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16);
-
- if (intr & 0x00000040) {
- schedule_work(&sec2->work);
- nvkm_wr32(device, sec2->addr + 0x004, 0x00000040);
- intr &= ~0x00000040;
- }
+ struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
- if (intr) {
- nvkm_error(subdev, "unhandled intr %08x\n", intr);
- nvkm_wr32(device, sec2->addr + 0x004, intr);
+ if (!sec2->initmsg_received) {
+ int ret = sec2->func->initmsg(sec2);
+ if (ret) {
+ nvkm_error(&sec2->engine.subdev,
+ "error parsing init message: %d\n", ret);
+ return;
+ }
+ sec2->initmsg_received = true;
}
+
+ nvkm_falcon_msgq_recv(sec2->msgq);
}
static void
-nvkm_sec2_recv(struct work_struct *work)
+nvkm_sec2_intr(struct nvkm_engine *engine)
{
- struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
-
- if (!sec2->queue) {
- nvkm_warn(&sec2->engine.subdev,
- "recv function called while no firmware set!\n");
- return;
- }
-
- nvkm_msgqueue_recv(sec2->queue);
+ struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+ sec2->func->intr(sec2);
}
-
static int
-nvkm_sec2_oneinit(struct nvkm_engine *engine)
+nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- struct nvkm_subdev *subdev = &sec2->engine.subdev;
- if (!sec2->addr) {
- sec2->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (WARN_ON(!sec2->addr))
- return -EINVAL;
+ flush_work(&sec2->work);
+
+ if (suspend) {
+ nvkm_falcon_cmdq_fini(sec2->cmdq);
+ sec2->initmsg_received = false;
}
- return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon);
+ return 0;
}
-static int
-nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
+static void *
+nvkm_sec2_dtor(struct nvkm_engine *engine)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- flush_work(&sec2->work);
- return 0;
+ nvkm_falcon_msgq_del(&sec2->msgq);
+ nvkm_falcon_cmdq_del(&sec2->cmdq);
+ nvkm_falcon_qmgr_del(&sec2->qmgr);
+ nvkm_falcon_dtor(&sec2->falcon);
+ return sec2;
}
static const struct nvkm_engine_func
nvkm_sec2 = {
.dtor = nvkm_sec2_dtor,
- .oneinit = nvkm_sec2_oneinit,
.fini = nvkm_sec2_fini,
.intr = nvkm_sec2_intr,
};
int
-nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr,
- struct nvkm_sec2 **psec2)
+nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
+ int index, u32 addr, struct nvkm_sec2 **psec2)
{
struct nvkm_sec2 *sec2;
+ int ret;
if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
return -ENOMEM;
- sec2->addr = addr;
- INIT_WORK(&sec2->work, nvkm_sec2_recv);
- return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+ ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&sec2->engine.subdev, fwif, "Sec2", sec2);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ sec2->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev,
+ nvkm_subdev_name[index], addr, &sec2->falcon);
+ if (ret)
+ return ret;
+
+ if ((ret = nvkm_falcon_qmgr_new(&sec2->falcon, &sec2->qmgr)) ||
+ (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) ||
+ (ret = nvkm_falcon_msgq_new(sec2->qmgr, "msgq", &sec2->msgq)))
+ return ret;
+
+ INIT_WORK(&sec2->work, nvkm_sec2_recv);
+ return 0;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index 858cf27fa010..368f2a0042ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -19,12 +19,316 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+#include <core/memory.h>
+#include <subdev/acr.h>
+#include <subdev/timer.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/sec2.h>
+
+static int
+gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_sec2_acr_bootstrap_falcon_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ struct nvkm_subdev *subdev = priv;
+ const char *name = nvkm_acr_lsf_id(msg->falcon_id);
+
+ if (msg->error_code) {
+ nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for "
+ "falcon %d [%s]: %08x\n",
+ msg->falcon_id, name, msg->error_code);
+ return -EINVAL;
+ }
+
+ nvkm_debug(subdev, "%s booted\n", name);
+ return 0;
+}
+
+static int
+gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon);
+ struct nv_sec2_acr_bootstrap_falcon_cmd cmd = {
+ .cmd.hdr.unit_id = sec2->func->unit_acr,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON,
+ .flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
+ .falcon_id = id,
+ };
+
+ return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr,
+ gp102_sec2_acr_bootstrap_falcon_callback,
+ &sec2->engine.subdev,
+ msecs_to_jiffies(1000));
+}
+
+static int
+gp102_sec2_acr_boot(struct nvkm_falcon *falcon)
+{
+ struct nv_sec2_args args = {};
+ nvkm_falcon_load_dmem(falcon, &args,
+ falcon->func->emem_addr, sizeof(args), 0);
+ nvkm_falcon_start(falcon);
+ return 0;
+}
+
+static void
+gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct loader_config_v1 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ hdr.overlay_dma_base = hdr.overlay_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ loader_config_v1_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const struct loader_config_v1 hdr = {
+ .dma_idx = FALCON_SEC2_DMAIDX_UCODE,
+ .code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .code_size_total = lsfw->app_size,
+ .code_size_to_load = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset,
+ .data_size = lsfw->app_resident_data_size,
+ .overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .argc = 1,
+ .argv = lsfw->falcon->func->emem_addr,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+static const struct nvkm_acr_lsf_func
+gp102_sec2_acr_0 = {
+ .bld_size = sizeof(struct loader_config_v1),
+ .bld_write = gp102_sec2_acr_bld_write,
+ .bld_patch = gp102_sec2_acr_bld_patch,
+ .boot = gp102_sec2_acr_boot,
+ .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
+};
+
+int
+gp102_sec2_initmsg(struct nvkm_sec2 *sec2)
+{
+ struct nv_sec2_init_msg msg;
+ int ret, i;
+
+ ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT ||
+ msg.msg_type != NV_SEC2_INIT_MSG_INIT)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) {
+ if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) {
+ nvkm_falcon_msgq_init(sec2->msgq,
+ msg.queue_info[i].index,
+ msg.queue_info[i].offset,
+ msg.queue_info[i].size);
+ } else {
+ nvkm_falcon_cmdq_init(sec2->cmdq,
+ msg.queue_info[i].index,
+ msg.queue_info[i].offset,
+ msg.queue_info[i].size);
+ }
+ }
+
+ return 0;
+}
+
+void
+gp102_sec2_intr(struct nvkm_sec2 *sec2)
+{
+ struct nvkm_subdev *subdev = &sec2->engine.subdev;
+ struct nvkm_falcon *falcon = &sec2->falcon;
+ u32 disp = nvkm_falcon_rd32(falcon, 0x01c);
+ u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000040) {
+ schedule_work(&sec2->work);
+ nvkm_falcon_wr32(falcon, 0x004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "unhandled intr %08x\n", intr);
+ nvkm_falcon_wr32(falcon, 0x004, intr);
+ }
+}
+
+int
+gp102_sec2_flcn_enable(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
+ udelay(10);
+ nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
+ return nvkm_falcon_v1_enable(falcon);
+}
+
+void
+gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon,
+ struct nvkm_memory *ctx)
+{
+ struct nvkm_device *device = falcon->owner->device;
+
+ nvkm_falcon_v1_bind_context(falcon, ctx);
+ if (!ctx)
+ return;
+
+ /* Not sure if this is a WAR for a HW issue, or some additional
+ * programming sequence that's needed to properly complete the
+ * context switch we trigger above.
+ *
+ * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
+ * particularly when resuming from suspend.
+ *
+ * Also removes the need for an odd workaround where we needed
+ * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
+ * the SEC2 RTOS would begin executing.
+ */
+ nvkm_msec(device, 10,
+ u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
+ u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+ if ((irqstat & 0x00000008) &&
+ (flcn0dc & 0x00007000) == 0x00005000)
+ break;
+ );
+
+ nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
+ nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
+
+ nvkm_msec(device, 10,
+ u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+ if ((flcn0dc & 0x00007000) == 0x00000000)
+ break;
+ );
+}
+
+static const struct nvkm_falcon_func
+gp102_sec2_flcn = {
+ .debug = 0x408,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .emem_addr = 0x01000000,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = gp102_sec2_flcn_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0xa00, 0xa04, 8 },
+ .msgq = { 0xa30, 0xa34, 8 },
+};
+
+const struct nvkm_sec2_func
+gp102_sec2 = {
+ .flcn = &gp102_sec2_flcn,
+ .unit_acr = NV_SEC2_UNIT_ACR,
+ .intr = gp102_sec2_intr,
+ .initmsg = gp102_sec2_initmsg,
+};
+
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
+
+static void
+gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v2 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const struct flcn_bl_dmem_desc_v2 hdr = {
+ .ctx_dma = FALCON_SEC2_DMAIDX_UCODE,
+ .code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset,
+ .data_size = lsfw->app_resident_data_size,
+ .argc = 1,
+ .argv = lsfw->falcon->func->emem_addr,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gp102_sec2_acr_1 = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp102_sec2_acr_bld_write_1,
+ .bld_patch = gp102_sec2_acr_bld_patch_1,
+ .boot = gp102_sec2_acr_boot,
+ .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
+};
+
+int
+gp102_sec2_load(struct nvkm_sec2 *sec2, int ver,
+ const struct nvkm_sec2_fwif *fwif)
+{
+ return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev,
+ &sec2->falcon,
+ NVKM_ACR_LSF_SEC2, "sec2/",
+ ver, fwif->acr);
+}
+
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
+
+static const struct nvkm_sec2_fwif
+gp102_sec2_fwif[] = {
+ { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+ { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
+ {}
+};
+
int
-gp102_sec2_new(struct nvkm_device *device, int index,
- struct nvkm_sec2 **psec2)
+gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
{
- return nvkm_sec2_new_(device, index, 0, psec2);
+ return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
new file mode 100644
index 000000000000..232a9d7c51e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <subdev/acr.h>
+
+MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
+
+static const struct nvkm_sec2_fwif
+gp108_sec2_fwif[] = {
+ { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+ {}
+};
+
+int
+gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+{
+ return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index b331b00517e6..bb88117e018a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -3,7 +3,27 @@
#define __NVKM_SEC2_PRIV_H__
#include <engine/sec2.h>
-#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
+struct nvkm_sec2_func {
+ const struct nvkm_falcon_func *flcn;
+ u8 unit_acr;
+ void (*intr)(struct nvkm_sec2 *);
+ int (*initmsg)(struct nvkm_sec2 *);
+};
-int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **);
+void gp102_sec2_intr(struct nvkm_sec2 *);
+int gp102_sec2_initmsg(struct nvkm_sec2 *);
+
+struct nvkm_sec2_fwif {
+ int version;
+ int (*load)(struct nvkm_sec2 *, int ver, const struct nvkm_sec2_fwif *);
+ const struct nvkm_sec2_func *func;
+ const struct nvkm_acr_lsf_func *acr;
+};
+
+int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
+extern const struct nvkm_sec2_func gp102_sec2;
+extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
+
+int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *,
+ int, u32 addr, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index d655576164b1..b6ebd95c9ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -19,15 +19,54 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+#include <subdev/acr.h>
+
+static const struct nvkm_falcon_func
+tu102_sec2_flcn = {
+ .debug = 0x408,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .emem_addr = 0x01000000,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0xc00, 0xc04, 8 },
+ .msgq = { 0xc80, 0xc84, 8 },
+};
+
+static const struct nvkm_sec2_func
+tu102_sec2 = {
+ .flcn = &tu102_sec2_flcn,
+ .unit_acr = 0x07,
+ .intr = gp102_sec2_intr,
+ .initmsg = gp102_sec2_initmsg,
+};
+
+static int
+tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
+ const struct nvkm_sec2_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_sec2_fwif
+tu102_sec2_fwif[] = {
+ { 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
+ { -1, tu102_sec2_nofw, &tu102_sec2 }
+};
int
-tu102_sec2_new(struct nvkm_device *device, int index,
- struct nvkm_sec2 **psec2)
+tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
{
/* TOP info wasn't updated on Turing to reflect the PRI
* address change for some reason. We override it here.
*/
- return nvkm_sec2_new_(device, index, 0x840000, psec2);
+ return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
index b5665ada850a..d79d783904ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/falcon/base.o
+nvkm-y += nvkm/falcon/cmdq.o
+nvkm-y += nvkm/falcon/msgq.o
+nvkm-y += nvkm/falcon/qmgr.o
nvkm-y += nvkm/falcon/v1.o
-nvkm-y += nvkm/falcon/msgqueue.o
-nvkm-y += nvkm/falcon/msgqueue_0137c63d.o
-nvkm-y += nvkm/falcon/msgqueue_0148cdec.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 366c87de6e72..c6a3448180d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -22,6 +22,7 @@
#include "priv.h"
#include <subdev/mc.h>
+#include <subdev/top.h>
void
nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
@@ -134,6 +135,37 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
return falcon->func->clear_interrupt(falcon, mask);
}
+static int
+nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
+{
+ const struct nvkm_falcon_func *func = falcon->func;
+ const struct nvkm_subdev *subdev = falcon->owner;
+ u32 reg;
+
+ if (!falcon->addr) {
+ falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
+ if (WARN_ON(!falcon->addr))
+ return -ENODEV;
+ }
+
+ reg = nvkm_falcon_rd32(falcon, 0x12c);
+ falcon->version = reg & 0xf;
+ falcon->secret = (reg >> 4) & 0x3;
+ falcon->code.ports = (reg >> 8) & 0xf;
+ falcon->data.ports = (reg >> 12) & 0xf;
+
+ reg = nvkm_falcon_rd32(falcon, 0x108);
+ falcon->code.limit = (reg & 0x1ff) << 8;
+ falcon->data.limit = (reg & 0x3fe00) >> 1;
+
+ if (func->debug) {
+ u32 val = nvkm_falcon_rd32(falcon, func->debug);
+ falcon->debug = (val >> 20) & 0x1;
+ }
+
+ return 0;
+}
+
void
nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
{
@@ -151,6 +183,8 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
int
nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
{
+ int ret = 0;
+
mutex_lock(&falcon->mutex);
if (falcon->user) {
nvkm_error(user, "%s falcon already acquired by %s!\n",
@@ -160,70 +194,37 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
}
nvkm_debug(user, "acquired %s falcon\n", falcon->name);
+ if (!falcon->oneinit)
+ ret = nvkm_falcon_oneinit(falcon);
falcon->user = user;
mutex_unlock(&falcon->mutex);
- return 0;
+ return ret;
}
void
+nvkm_falcon_dtor(struct nvkm_falcon *falcon)
+{
+}
+
+int
nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
struct nvkm_subdev *subdev, const char *name, u32 addr,
struct nvkm_falcon *falcon)
{
- u32 debug_reg;
- u32 reg;
-
falcon->func = func;
falcon->owner = subdev;
falcon->name = name;
falcon->addr = addr;
mutex_init(&falcon->mutex);
mutex_init(&falcon->dmem_mutex);
-
- reg = nvkm_falcon_rd32(falcon, 0x12c);
- falcon->version = reg & 0xf;
- falcon->secret = (reg >> 4) & 0x3;
- falcon->code.ports = (reg >> 8) & 0xf;
- falcon->data.ports = (reg >> 12) & 0xf;
-
- reg = nvkm_falcon_rd32(falcon, 0x108);
- falcon->code.limit = (reg & 0x1ff) << 8;
- falcon->data.limit = (reg & 0x3fe00) >> 1;
-
- switch (subdev->index) {
- case NVKM_ENGINE_GR:
- debug_reg = 0x0;
- break;
- case NVKM_SUBDEV_PMU:
- debug_reg = 0xc08;
- break;
- case NVKM_ENGINE_NVDEC0:
- debug_reg = 0xd00;
- break;
- case NVKM_ENGINE_SEC2:
- debug_reg = 0x408;
- falcon->has_emem = true;
- break;
- case NVKM_SUBDEV_GSP:
- debug_reg = 0x0; /*XXX*/
- break;
- default:
- nvkm_warn(subdev, "unsupported falcon %s!\n",
- nvkm_subdev_name[subdev->index]);
- debug_reg = 0;
- break;
- }
-
- if (debug_reg) {
- u32 val = nvkm_falcon_rd32(falcon, debug_reg);
- falcon->debug = (val >> 20) & 0x1;
- }
+ return 0;
}
void
nvkm_falcon_del(struct nvkm_falcon **pfalcon)
{
if (*pfalcon) {
+ nvkm_falcon_dtor(*pfalcon);
kfree(*pfalcon);
*pfalcon = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
new file mode 100644
index 000000000000..40e3f3fc83ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static bool
+nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
+{
+ u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
+ u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
+ u32 free;
+
+ size = ALIGN(size, QUEUE_ALIGNMENT);
+
+ if (head >= tail) {
+ free = cmdq->offset + cmdq->size - head;
+ free -= HDR_SIZE;
+
+ if (size > free) {
+ *rewind = true;
+ head = cmdq->offset;
+ }
+ }
+
+ if (head < tail)
+ free = tail - head - 1;
+
+ return size <= free;
+}
+
+static void
+nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
+ cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
+}
+
+static void
+nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
+{
+ struct nv_falcon_cmd cmd;
+
+ cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
+ cmd.size = sizeof(cmd);
+ nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
+
+ cmdq->position = cmdq->offset;
+}
+
+static int
+nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ bool rewind = false;
+
+ mutex_lock(&cmdq->mutex);
+
+ if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
+ FLCNQ_DBG(cmdq, "queue full");
+ mutex_unlock(&cmdq->mutex);
+ return -EAGAIN;
+ }
+
+ cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
+
+ if (rewind)
+ nvkm_falcon_cmdq_rewind(cmdq);
+
+ return 0;
+}
+
+static void
+nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
+{
+ nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
+ mutex_unlock(&cmdq->mutex);
+}
+
+static int
+nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
+{
+ static unsigned timeout = 2000;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+ int ret = -EAGAIN;
+
+ while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
+ ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
+ if (ret) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue space");
+ return ret;
+ }
+
+ nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
+ nvkm_falcon_cmdq_close(cmdq);
+ return ret;
+}
+
+/* specifies that we want to know the command status in the answer message */
+#define CMD_FLAGS_STATUS BIT(0)
+/* specifies that we want an interrupt when the answer message is queued */
+#define CMD_FLAGS_INTR BIT(1)
+
+int
+nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd,
+ nvkm_falcon_qmgr_callback cb, void *priv,
+ unsigned long timeout)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+ int ret;
+
+ if (!wait_for_completion_timeout(&cmdq->ready,
+ msecs_to_jiffies(1000))) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
+ return -ETIMEDOUT;
+ }
+
+ seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
+ if (IS_ERR(seq))
+ return PTR_ERR(seq);
+
+ cmd->seq_id = seq->id;
+ cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
+
+ seq->state = SEQ_STATE_USED;
+ seq->async = !timeout;
+ seq->callback = cb;
+ seq->priv = priv;
+
+ ret = nvkm_falcon_cmdq_write(cmdq, cmd);
+ if (ret) {
+ seq->state = SEQ_STATE_PENDING;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ return ret;
+ }
+
+ if (!seq->async) {
+ if (!wait_for_completion_timeout(&seq->done, timeout)) {
+ FLCNQ_ERR(cmdq, "timeout waiting for reply");
+ return -ETIMEDOUT;
+ }
+ ret = seq->result;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ }
+
+ return ret;
+}
+
+void
+nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
+{
+ reinit_completion(&cmdq->ready);
+}
+
+void
+nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
+
+ cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
+ cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
+ cmdq->offset = offset;
+ cmdq->size = size;
+ complete_all(&cmdq->ready);
+
+ FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, cmdq->offset, cmdq->size);
+}
+
+void
+nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+ if (cmdq) {
+ kfree(*pcmdq);
+ *pcmdq = NULL;
+ }
+}
+
+int
+nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+
+ if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ cmdq->qmgr = qmgr;
+ cmdq->name = name;
+ mutex_init(&cmdq->mutex);
+ init_completion(&cmdq->ready);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
new file mode 100644
index 000000000000..cbfe09a561a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static void
+nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
+{
+ mutex_lock(&msgq->mutex);
+ msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+}
+
+static void
+nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+
+ if (commit)
+ nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
+
+ mutex_unlock(&msgq->mutex);
+}
+
+static bool
+nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
+{
+ u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
+ u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+ return head == tail;
+}
+
+static int
+nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ u32 head, tail, available;
+
+ head = nvkm_falcon_rd32(falcon, msgq->head_reg);
+ /* has the buffer looped? */
+ if (head < msgq->position)
+ msgq->position = msgq->offset;
+
+ tail = msgq->position;
+
+ available = head - tail;
+ if (size > available) {
+ FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
+ size, available);
+ return -EINVAL;
+ }
+
+ nvkm_falcon_read_dmem(falcon, tail, size, 0, data);
+ msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
+ return 0;
+}
+
+static int
+nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+{
+ int ret = 0;
+
+ nvkm_falcon_msgq_open(msgq);
+
+ if (nvkm_falcon_msgq_empty(msgq))
+ goto close;
+
+ ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message header");
+ goto close;
+ }
+
+ if (hdr->size > MSG_BUF_SIZE) {
+ FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
+ ret = -ENOSPC;
+ goto close;
+ }
+
+ if (hdr->size > HDR_SIZE) {
+ u32 read_size = hdr->size - HDR_SIZE;
+
+ ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message data");
+ goto close;
+ }
+ }
+
+ ret = 1;
+close:
+ nvkm_falcon_msgq_close(msgq, (ret >= 0));
+ return ret;
+}
+
+static int
+nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+
+ seq = &msgq->qmgr->seq.id[hdr->seq_id];
+ if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
+ FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
+ return -EINVAL;
+ }
+
+ if (seq->state == SEQ_STATE_USED) {
+ if (seq->callback)
+ seq->result = seq->callback(seq->priv, hdr);
+ }
+
+ if (seq->async) {
+ nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
+ return 0;
+ }
+
+ complete_all(&seq->done);
+ return 0;
+}
+
+void
+nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
+{
+ /*
+ * We are invoked from a worker thread, so normally we have plenty of
+ * stack space to work with.
+ */
+ u8 msg_buffer[MSG_BUF_SIZE];
+ struct nv_falcon_msg *hdr = (void *)msg_buffer;
+
+ while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
+ nvkm_falcon_msgq_exec(msgq, hdr);
+}
+
+int
+nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
+ void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ struct nv_falcon_msg *hdr = data;
+ int ret;
+
+ msgq->head_reg = falcon->func->msgq.head;
+ msgq->tail_reg = falcon->func->msgq.tail;
+ msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
+
+ nvkm_falcon_msgq_open(msgq);
+ ret = nvkm_falcon_msgq_pop(msgq, data, size);
+ if (ret == 0 && hdr->size != size) {
+ FLCN_ERR(falcon, "unexpected init message size %d vs %d",
+ hdr->size, size);
+ ret = -EINVAL;
+ }
+ nvkm_falcon_msgq_close(msgq, ret == 0);
+ return ret;
+}
+
+void
+nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
+
+ msgq->head_reg = func->msgq.head + index * func->msgq.stride;
+ msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
+ msgq->offset = offset;
+
+ FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, msgq->offset, size);
+}
+
+void
+nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+ if (msgq) {
+ kfree(*pmsgq);
+ *pmsgq = NULL;
+ }
+}
+
+int
+nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+
+ if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ msgq->qmgr = qmgr;
+ msgq->name = name;
+ mutex_init(&msgq->mutex);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
deleted file mode 100644
index a8bee1e046aa..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "msgqueue.h"
-#include <engine/falcon.h>
-
-#include <subdev/secboot.h>
-
-
-#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
-#define QUEUE_ALIGNMENT 4
-/* max size of the messages we can receive */
-#define MSG_BUF_SIZE 128
-
-static int
-msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- mutex_lock(&queue->mutex);
-
- queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- return 0;
-}
-
-static void
-msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- bool commit)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- if (commit)
- nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
-
- mutex_unlock(&queue->mutex);
-}
-
-static bool
-msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- u32 head, tail;
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- return head == tail;
-}
-
-static int
-msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- void *data, u32 size)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 head, tail, available;
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- /* has the buffer looped? */
- if (head < queue->position)
- queue->position = queue->offset;
-
- tail = queue->position;
-
- available = head - tail;
-
- if (available == 0) {
- nvkm_warn(subdev, "no message data available\n");
- return 0;
- }
-
- if (size > available) {
- nvkm_warn(subdev, "message data smaller than read request\n");
- size = available;
- }
-
- nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
- queue->position += ALIGN(size, QUEUE_ALIGNMENT);
-
- return size;
-}
-
-static int
-msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- struct nvkm_msgqueue_hdr *hdr)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- int err;
-
- err = msg_queue_open(priv, queue);
- if (err) {
- nvkm_error(subdev, "fail to open queue %d\n", queue->index);
- return err;
- }
-
- if (msg_queue_empty(priv, queue)) {
- err = 0;
- goto close;
- }
-
- err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
- if (err >= 0 && err != HDR_SIZE)
- err = -EINVAL;
- if (err < 0) {
- nvkm_error(subdev, "failed to read message header: %d\n", err);
- goto close;
- }
-
- if (hdr->size > MSG_BUF_SIZE) {
- nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
- err = -ENOSPC;
- goto close;
- }
-
- if (hdr->size > HDR_SIZE) {
- u32 read_size = hdr->size - HDR_SIZE;
-
- err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
- if (err >= 0 && err != read_size)
- err = -EINVAL;
- if (err < 0) {
- nvkm_error(subdev, "failed to read message: %d\n", err);
- goto close;
- }
- }
-
-close:
- msg_queue_close(priv, queue, (err >= 0));
-
- return err;
-}
-
-static bool
-cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- u32 size, bool *rewind)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- u32 head, tail, free;
-
- size = ALIGN(size, QUEUE_ALIGNMENT);
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- if (head >= tail) {
- free = queue->offset + queue->size - head;
- free -= HDR_SIZE;
-
- if (size > free) {
- *rewind = true;
- head = queue->offset;
- }
- }
-
- if (head < tail)
- free = tail - head - 1;
-
- return size <= free;
-}
-
-static int
-cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- void *data, u32 size)
-{
- nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
- queue->position += ALIGN(size, QUEUE_ALIGNMENT);
-
- return 0;
-}
-
-/* REWIND unit is always 0x00 */
-#define MSGQUEUE_UNIT_REWIND 0x00
-
-static void
-cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_hdr cmd;
- int err;
-
- cmd.unit_id = MSGQUEUE_UNIT_REWIND;
- cmd.size = sizeof(cmd);
- err = cmd_queue_push(priv, queue, &cmd, cmd.size);
- if (err)
- nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
- else
- nvkm_error(subdev, "queue %d rewinded\n", queue->index);
-
- queue->position = queue->offset;
-}
-
-static int
-cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- u32 size)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- bool rewind = false;
-
- mutex_lock(&queue->mutex);
-
- if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
- nvkm_error(subdev, "queue full\n");
- mutex_unlock(&queue->mutex);
- return -EAGAIN;
- }
-
- queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
-
- if (rewind)
- cmd_queue_rewind(priv, queue);
-
- return 0;
-}
-
-static void
-cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- bool commit)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- if (commit)
- nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
-
- mutex_unlock(&queue->mutex);
-}
-
-static int
-cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
- struct nvkm_msgqueue_queue *queue)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- static unsigned timeout = 2000;
- unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
- int ret = -EAGAIN;
- bool commit = true;
-
- while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
- ret = cmd_queue_open(priv, queue, cmd->size);
- if (ret) {
- nvkm_error(subdev, "pmu_queue_open_write failed\n");
- return ret;
- }
-
- ret = cmd_queue_push(priv, queue, cmd, cmd->size);
- if (ret) {
- nvkm_error(subdev, "pmu_queue_push failed\n");
- commit = false;
- }
-
- cmd_queue_close(priv, queue, commit);
-
- return ret;
-}
-
-static struct nvkm_msgqueue_seq *
-msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_seq *seq;
- u32 index;
-
- mutex_lock(&priv->seq_lock);
-
- index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
-
- if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
- nvkm_error(subdev, "no free sequence available\n");
- mutex_unlock(&priv->seq_lock);
- return ERR_PTR(-EAGAIN);
- }
-
- set_bit(index, priv->seq_tbl);
-
- mutex_unlock(&priv->seq_lock);
-
- seq = &priv->seq[index];
- seq->state = SEQ_STATE_PENDING;
-
- return seq;
-}
-
-static void
-msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
-{
- /* no need to acquire seq_lock since clear_bit is atomic */
- seq->state = SEQ_STATE_FREE;
- seq->callback = NULL;
- seq->completion = NULL;
- clear_bit(seq->id, priv->seq_tbl);
-}
-
-/* specifies that we want to know the command status in the answer message */
-#define CMD_FLAGS_STATUS BIT(0)
-/* specifies that we want an interrupt when the answer message is queued */
-#define CMD_FLAGS_INTR BIT(1)
-
-int
-nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
- struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
- struct completion *completion, bool wait_init)
-{
- struct nvkm_msgqueue_seq *seq;
- struct nvkm_msgqueue_queue *queue;
- int ret;
-
- if (wait_init && !wait_for_completion_timeout(&priv->init_done,
- msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- queue = priv->func->cmd_queue(priv, prio);
- if (IS_ERR(queue))
- return PTR_ERR(queue);
-
- seq = msgqueue_seq_acquire(priv);
- if (IS_ERR(seq))
- return PTR_ERR(seq);
-
- cmd->seq_id = seq->id;
- cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
-
- seq->callback = cb;
- seq->state = SEQ_STATE_USED;
- seq->completion = completion;
-
- ret = cmd_write(priv, cmd, queue);
- if (ret) {
- seq->state = SEQ_STATE_PENDING;
- msgqueue_seq_release(priv, seq);
- }
-
- return ret;
-}
-
-static int
-msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_seq *seq;
-
- seq = &priv->seq[hdr->seq_id];
- if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
- nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
- return -EINVAL;
- }
-
- if (seq->state == SEQ_STATE_USED) {
- if (seq->callback)
- seq->callback(priv, hdr);
- }
-
- if (seq->completion)
- complete(seq->completion);
-
- msgqueue_seq_release(priv, seq);
-
- return 0;
-}
-
-static int
-msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = falcon->owner;
- u32 tail;
- u32 tail_reg;
- int ret;
-
- /*
- * Of course the message queue registers vary depending on the falcon
- * used...
- */
- switch (falcon->owner->index) {
- case NVKM_SUBDEV_PMU:
- tail_reg = 0x4cc;
- break;
- case NVKM_ENGINE_SEC2:
- tail_reg = 0xa34;
- break;
- default:
- nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
- nvkm_subdev_name[falcon->owner->index]);
- return -EINVAL;
- }
-
- /*
- * Read the message - queues are not initialized yet so we cannot rely
- * on msg_queue_read()
- */
- tail = nvkm_falcon_rd32(falcon, tail_reg);
- nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
-
- if (hdr->size > MSG_BUF_SIZE) {
- nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
- return -ENOSPC;
- }
-
- nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
- (hdr + 1));
-
- tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
- nvkm_falcon_wr32(falcon, tail_reg, tail);
-
- ret = priv->func->init_func->init_callback(priv, hdr);
- if (ret)
- return ret;
-
- return 0;
-}
-
-void
-nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_queue *queue)
-{
- /*
- * We are invoked from a worker thread, so normally we have plenty of
- * stack space to work with.
- */
- u8 msg_buffer[MSG_BUF_SIZE];
- struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
- int ret;
-
- /* the first message we receive must be the init message */
- if ((!priv->init_msg_received)) {
- ret = msgqueue_handle_init_msg(priv, hdr);
- if (!ret)
- priv->init_msg_received = true;
- } else {
- while (msg_queue_read(priv, queue, hdr) > 0)
- msgqueue_msg_handle(priv, hdr);
- }
-}
-
-void
-nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- if (!queue || !queue->func || !queue->func->init_func)
- return;
-
- queue->func->init_func->gen_cmdline(queue, buf);
-}
-
-int
-nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
- unsigned long falcon_mask)
-{
- unsigned long falcon;
-
- if (!queue || !queue->func->acr_func)
- return -ENODEV;
-
- /* Does the firmware support booting multiple falcons? */
- if (queue->func->acr_func->boot_multiple_falcons)
- return queue->func->acr_func->boot_multiple_falcons(queue,
- falcon_mask);
-
- /* Else boot all requested falcons individually */
- if (!queue->func->acr_func->boot_falcon)
- return -ENODEV;
-
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- int ret = queue->func->acr_func->boot_falcon(queue, falcon);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int
-nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
- const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
-{
- const struct nvkm_subdev *subdev = falcon->owner;
- int ret = -EINVAL;
-
- switch (version) {
- case 0x0137c63d:
- ret = msgqueue_0137c63d_new(falcon, sb, queue);
- break;
- case 0x0137bca5:
- ret = msgqueue_0137bca5_new(falcon, sb, queue);
- break;
- case 0x0148cdec:
- case 0x015ccf3e:
- case 0x0167d263:
- ret = msgqueue_0148cdec_new(falcon, sb, queue);
- break;
- default:
- nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
- version);
- break;
- }
-
- if (ret == 0) {
- nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
- (*queue)->fw_version = version;
- }
-
- return ret;
-}
-
-void
-nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
-{
- if (*queue) {
- (*queue)->func->dtor(*queue);
- *queue = NULL;
- }
-}
-
-void
-nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
-{
- if (!queue->func || !queue->func->recv) {
- const struct nvkm_subdev *subdev = queue->falcon->owner;
-
- nvkm_warn(subdev, "missing msgqueue recv function\n");
- return;
- }
-
- queue->func->recv(queue);
-}
-
-int
-nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
-{
- /* firmware not set yet... */
- if (!queue)
- return 0;
-
- queue->init_msg_received = false;
- reinit_completion(&queue->init_done);
-
- return 0;
-}
-
-void
-nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
- struct nvkm_falcon *falcon,
- struct nvkm_msgqueue *queue)
-{
- int i;
-
- queue->func = func;
- queue->falcon = falcon;
- mutex_init(&queue->seq_lock);
- for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
- queue->seq[i].id = i;
-
- init_completion(&queue->init_done);
-
-
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
deleted file mode 100644
index 13b54f8d8e04..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H
-#define __NVKM_CORE_FALCON_MSGQUEUE_H
-
-#include <core/msgqueue.h>
-
-/*
- * The struct nvkm_msgqueue (named so for lack of better candidate) manages
- * a firmware (typically, NVIDIA signed firmware) running under a given falcon.
- *
- * Such firmwares expect to receive commands (through one or several command
- * queues) and will reply to such command by sending messages (using one
- * message queue).
- *
- * Each firmware can support one or several units - ACR for managing secure
- * falcons, PMU for power management, etc. A unit can be seen as a class to
- * which command can be sent.
- *
- * One usage example would be to send a command to the SEC falcon to ask it to
- * reset a secure falcon. The SEC falcon will receive the command, process it,
- * and send a message to signal success or failure. Only when the corresponding
- * message is received can the requester assume the request has been processed.
- *
- * Since we expect many variations between the firmwares NVIDIA will release
- * across GPU generations, this library is built in a very modular way. Message
- * formats and queues details (such as number of usage) are left to
- * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c
- * take care of posting commands and processing messages in a fashion that is
- * universal.
- *
- */
-
-enum msgqueue_msg_priority {
- MSGQUEUE_MSG_PRIORITY_HIGH,
- MSGQUEUE_MSG_PRIORITY_LOW,
-};
-
-/**
- * struct nvkm_msgqueue_hdr - header for all commands/messages
- * @unit_id: id of firmware using receiving the command/sending the message
- * @size: total size of command/message
- * @ctrl_flags: type of command/message
- * @seq_id: used to match a message from its corresponding command
- */
-struct nvkm_msgqueue_hdr {
- u8 unit_id;
- u8 size;
- u8 ctrl_flags;
- u8 seq_id;
-};
-
-/**
- * struct nvkm_msgqueue_msg - base message.
- *
- * This is just a header and a message (or command) type. Useful when
- * building command-specific structures.
- */
-struct nvkm_msgqueue_msg {
- struct nvkm_msgqueue_hdr hdr;
- u8 msg_type;
-};
-
-struct nvkm_msgqueue;
-typedef void
-(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
-
-/**
- * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization
- *
- * @gen_cmdline: build the commandline into a pre-allocated buffer
- * @init_callback: called to process the init message
- */
-struct nvkm_msgqueue_init_func {
- void (*gen_cmdline)(struct nvkm_msgqueue *, void *);
- int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
-};
-
-/**
- * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
- *
- * @boot_falcon: build and send the command to reset a given falcon
- * @boot_multiple_falcons: build and send the command to reset several falcons
- */
-struct nvkm_msgqueue_acr_func {
- int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
- int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long);
-};
-
-struct nvkm_msgqueue_func {
- const struct nvkm_msgqueue_init_func *init_func;
- const struct nvkm_msgqueue_acr_func *acr_func;
- void (*dtor)(struct nvkm_msgqueue *);
- struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *,
- enum msgqueue_msg_priority);
- void (*recv)(struct nvkm_msgqueue *queue);
-};
-
-/**
- * struct nvkm_msgqueue_queue - information about a command or message queue
- *
- * The number of queues is firmware-dependent. All queues must have their
- * information filled by the init message handler.
- *
- * @mutex_lock: to be acquired when the queue is being used
- * @index: physical queue index
- * @offset: DMEM offset where this queue begins
- * @size: size allocated to this queue in DMEM (in bytes)
- * @position: current write position
- * @head_reg: address of the HEAD register for this queue
- * @tail_reg: address of the TAIL register for this queue
- */
-struct nvkm_msgqueue_queue {
- struct mutex mutex;
- u32 index;
- u32 offset;
- u32 size;
- u32 position;
-
- u32 head_reg;
- u32 tail_reg;
-};
-
-/**
- * struct nvkm_msgqueue_seq - keep track of ongoing commands
- *
- * Every time a command is sent, a sequence is assigned to it so the
- * corresponding message can be matched. Upon receiving the message, a callback
- * can be called and/or a completion signaled.
- *
- * @id: sequence ID
- * @state: current state
- * @callback: callback to call upon receiving matching message
- * @completion: completion to signal after callback is called
- */
-struct nvkm_msgqueue_seq {
- u16 id;
- enum {
- SEQ_STATE_FREE = 0,
- SEQ_STATE_PENDING,
- SEQ_STATE_USED,
- SEQ_STATE_CANCELLED
- } state;
- nvkm_msgqueue_callback callback;
- struct completion *completion;
-};
-
-/*
- * We can have an arbitrary number of sequences, but realistically we will
- * probably not use that much simultaneously.
- */
-#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
-
-/**
- * struct nvkm_msgqueue - manage a command/message based FW on a falcon
- *
- * @falcon: falcon to be managed
- * @func: implementation of the firmware to use
- * @init_msg_received: whether the init message has already been received
- * @init_done: whether all init is complete and commands can be processed
- * @seq_lock: protects seq and seq_tbl
- * @seq: sequences to match commands and messages
- * @seq_tbl: bitmap of sequences currently in use
- */
-struct nvkm_msgqueue {
- struct nvkm_falcon *falcon;
- const struct nvkm_msgqueue_func *func;
- u32 fw_version;
- bool init_msg_received;
- struct completion init_done;
-
- struct mutex seq_lock;
- struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
- unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
-};
-
-void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
- struct nvkm_msgqueue *);
-int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
- struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback,
- struct completion *, bool);
-void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
- struct nvkm_msgqueue_queue *);
-
-int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
deleted file mode 100644
index fec0273158f6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "msgqueue.h"
-#include <engine/falcon.h>
-#include <subdev/secboot.h>
-
-/* Queues identifiers */
-enum {
- /* High Priority Command Queue for Host -> PMU communication */
- MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
- /* Low Priority Command Queue for Host -> PMU communication */
- MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
- /* Message queue for PMU -> Host communication */
- MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
- MSGQUEUE_0137C63D_NUM_QUEUES = 5,
-};
-
-struct msgqueue_0137c63d {
- struct nvkm_msgqueue base;
-
- struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
-};
-#define msgqueue_0137c63d(q) \
- container_of(q, struct msgqueue_0137c63d, base)
-
-struct msgqueue_0137bca5 {
- struct msgqueue_0137c63d base;
-
- u64 wpr_addr;
-};
-#define msgqueue_0137bca5(q) \
- container_of(container_of(q, struct msgqueue_0137c63d, base), \
- struct msgqueue_0137bca5, base);
-
-static struct nvkm_msgqueue_queue *
-msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
- enum msgqueue_msg_priority priority)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
- const struct nvkm_subdev *subdev = priv->base.falcon->owner;
-
- switch (priority) {
- case MSGQUEUE_MSG_PRIORITY_HIGH:
- return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
- case MSGQUEUE_MSG_PRIORITY_LOW:
- return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
- default:
- nvkm_error(subdev, "invalid command queue!\n");
- return ERR_PTR(-EINVAL);
- }
-}
-
-static void
-msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
- struct nvkm_msgqueue_queue *q_queue =
- &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
-
- nvkm_msgqueue_process_msgs(&priv->base, q_queue);
-}
-
-/* Init unit */
-#define MSGQUEUE_0137C63D_UNIT_INIT 0x07
-
-enum {
- INIT_MSG_INIT = 0x0,
-};
-
-static void
-init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- struct {
- u32 reserved;
- u32 freq_hz;
- u32 trace_size;
- u32 trace_dma_base;
- u16 trace_dma_base1;
- u8 trace_dma_offset;
- u32 trace_dma_idx;
- bool secure_mode;
- bool raise_priv_sec;
- struct {
- u32 dma_base;
- u16 dma_base1;
- u8 dma_offset;
- u16 fb_size;
- u8 dma_idx;
- } gc6_ctx;
- u8 pad;
- } *args = buf;
-
- args->secure_mode = 1;
-}
-
-/* forward declaration */
-static int acr_init_wpr(struct nvkm_msgqueue *queue);
-
-static int
-init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
- struct {
- struct nvkm_msgqueue_msg base;
-
- u8 pad;
- u16 os_debug_entry_point;
-
- struct {
- u16 size;
- u16 offset;
- u8 index;
- u8 pad;
- } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
-
- u16 sw_managed_area_offset;
- u16 sw_managed_area_size;
- } *init = (void *)hdr;
- const struct nvkm_subdev *subdev = _queue->falcon->owner;
- int i;
-
- if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
- nvkm_error(subdev, "expected message from init unit\n");
- return -EINVAL;
- }
-
- if (init->base.msg_type != INIT_MSG_INIT) {
- nvkm_error(subdev, "expected PMU init msg\n");
- return -EINVAL;
- }
-
- for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
- struct nvkm_msgqueue_queue *queue = &priv->queue[i];
-
- mutex_init(&queue->mutex);
-
- queue->index = init->queue_info[i].index;
- queue->offset = init->queue_info[i].offset;
- queue->size = init->queue_info[i].size;
-
- if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
- queue->head_reg = 0x4a0 + (queue->index * 4);
- queue->tail_reg = 0x4b0 + (queue->index * 4);
- } else {
- queue->head_reg = 0x4c8;
- queue->tail_reg = 0x4cc;
- }
-
- nvkm_debug(subdev,
- "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
- i, queue->index, queue->offset, queue->size);
- }
-
- /* Complete initialization by initializing WPR region */
- return acr_init_wpr(&priv->base);
-}
-
-static const struct nvkm_msgqueue_init_func
-msgqueue_0137c63d_init_func = {
- .gen_cmdline = init_gen_cmdline,
- .init_callback = init_callback,
-};
-
-
-
-/* ACR unit */
-#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
-
-enum {
- ACR_CMD_INIT_WPR_REGION = 0x00,
- ACR_CMD_BOOTSTRAP_FALCON = 0x01,
- ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
-};
-
-static void
-acr_init_wpr_callback(struct nvkm_msgqueue *queue,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct {
- struct nvkm_msgqueue_msg base;
- u32 error_code;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = queue->falcon->owner;
-
- if (msg->error_code) {
- nvkm_error(subdev, "ACR WPR init failure: %d\n",
- msg->error_code);
- return;
- }
-
- nvkm_debug(subdev, "ACR WPR init complete\n");
- complete_all(&queue->init_done);
-}
-
-static int
-acr_init_wpr(struct nvkm_msgqueue *queue)
-{
- /*
- * region_id: region ID in WPR region
- * wpr_offset: offset in WPR region
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 region_id;
- u32 wpr_offset;
- } cmd;
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
- cmd.region_id = 0x01;
- cmd.wpr_offset = 0x00;
-
- nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_init_wpr_callback, NULL, false);
-
- return 0;
-}
-
-
-static void
-acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 falcon_id;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 falcon_id = msg->falcon_id;
-
- if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
- return;
- }
- nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
-}
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
-};
-
-static int
-acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_id;
- } cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_id = falcon;
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_falcon_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static void
-acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 falcon_mask;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- unsigned long falcon_mask = msg->falcon_mask;
- u32 falcon_id, falcon_treated = 0;
-
- for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- nvkm_debug(subdev, "%s booted\n",
- nvkm_secboot_falcon_name[falcon_id]);
- falcon_treated |= BIT(falcon_id);
- }
-
- if (falcon_treated != msg->falcon_mask) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon mask 0x%x\n",
- msg->falcon_mask);
- return;
- }
-}
-
-static int
-acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_mask;
- u32 use_va_mask;
- u32 wpr_lo;
- u32 wpr_hi;
- } cmd;
- struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_mask = falcon_mask;
- cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
- cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_multiple_falcons_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_acr_func
-msgqueue_0137c63d_acr_func = {
- .boot_falcon = acr_boot_falcon,
-};
-
-static const struct nvkm_msgqueue_acr_func
-msgqueue_0137bca5_acr_func = {
- .boot_falcon = acr_boot_falcon,
- .boot_multiple_falcons = acr_boot_multiple_falcons,
-};
-
-static void
-msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
-{
- kfree(msgqueue_0137c63d(queue));
-}
-
-static const struct nvkm_msgqueue_func
-msgqueue_0137c63d_func = {
- .init_func = &msgqueue_0137c63d_init_func,
- .acr_func = &msgqueue_0137c63d_acr_func,
- .cmd_queue = msgqueue_0137c63d_cmd_queue,
- .recv = msgqueue_0137c63d_process_msgs,
- .dtor = msgqueue_0137c63d_dtor,
-};
-
-int
-msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0137c63d *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base;
-
- nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_func
-msgqueue_0137bca5_func = {
- .init_func = &msgqueue_0137c63d_init_func,
- .acr_func = &msgqueue_0137bca5_acr_func,
- .cmd_queue = msgqueue_0137c63d_cmd_queue,
- .recv = msgqueue_0137c63d_process_msgs,
- .dtor = msgqueue_0137c63d_dtor,
-};
-
-int
-msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0137bca5 *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base.base;
-
- /*
- * FIXME this must be set to the address of a *GPU* mapping within the
- * ACR address space!
- */
- /* ret->wpr_addr = sb->wpr_addr; */
-
- nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
deleted file mode 100644
index 9424803b9ef4..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "msgqueue.h"
-#include <engine/falcon.h>
-#include <subdev/secboot.h>
-
-/*
- * This firmware runs on the SEC falcon. It only has one command and one
- * message queue, and uses a different command line and init message.
- */
-
-enum {
- MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0,
- MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1,
- MSGQUEUE_0148CDEC_NUM_QUEUES,
-};
-
-struct msgqueue_0148cdec {
- struct nvkm_msgqueue base;
-
- struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES];
-};
-#define msgqueue_0148cdec(q) \
- container_of(q, struct msgqueue_0148cdec, base)
-
-static struct nvkm_msgqueue_queue *
-msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue,
- enum msgqueue_msg_priority priority)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
-
- return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE];
-}
-
-static void
-msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
- struct nvkm_msgqueue_queue *q_queue =
- &priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE];
-
- nvkm_msgqueue_process_msgs(&priv->base, q_queue);
-}
-
-
-/* Init unit */
-#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01
-
-enum {
- INIT_MSG_INIT = 0x0,
-};
-
-static void
-init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- struct {
- u32 freq_hz;
- u32 falc_trace_size;
- u32 falc_trace_dma_base;
- u32 falc_trace_dma_idx;
- bool secure_mode;
- } *args = buf;
-
- args->secure_mode = false;
-}
-
-static int
-init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue);
- struct {
- struct nvkm_msgqueue_msg base;
-
- u8 num_queues;
- u16 os_debug_entry_point;
-
- struct {
- u32 offset;
- u16 size;
- u8 index;
- u8 id;
- } queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES];
-
- u16 sw_managed_area_offset;
- u16 sw_managed_area_size;
- } *init = (void *)hdr;
- const struct nvkm_subdev *subdev = _queue->falcon->owner;
- int i;
-
- if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) {
- nvkm_error(subdev, "expected message from init unit\n");
- return -EINVAL;
- }
-
- if (init->base.msg_type != INIT_MSG_INIT) {
- nvkm_error(subdev, "expected SEC init msg\n");
- return -EINVAL;
- }
-
- for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) {
- u8 id = init->queue_info[i].id;
- struct nvkm_msgqueue_queue *queue = &priv->queue[id];
-
- mutex_init(&queue->mutex);
-
- queue->index = init->queue_info[i].index;
- queue->offset = init->queue_info[i].offset;
- queue->size = init->queue_info[i].size;
-
- if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) {
- queue->head_reg = 0xa30 + (queue->index * 8);
- queue->tail_reg = 0xa34 + (queue->index * 8);
- } else {
- queue->head_reg = 0xa00 + (queue->index * 8);
- queue->tail_reg = 0xa04 + (queue->index * 8);
- }
-
- nvkm_debug(subdev,
- "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
- id, queue->index, queue->offset, queue->size);
- }
-
- complete_all(&_queue->init_done);
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_init_func
-msgqueue_0148cdec_init_func = {
- .gen_cmdline = init_gen_cmdline,
- .init_callback = init_callback,
-};
-
-
-
-/* ACR unit */
-#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON = 0x00,
-};
-
-static void
-acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 error_code;
- u32 falcon_id;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 falcon_id = msg->falcon_id;
-
- if (msg->error_code) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "expected error code 0x%x\n",
- msg->error_code);
- return;
- }
-
- if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
- return;
- }
-
- nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
-}
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
-};
-
-static int
-acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_id;
- } cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_id = falcon;
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_falcon_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-const struct nvkm_msgqueue_acr_func
-msgqueue_0148cdec_acr_func = {
- .boot_falcon = acr_boot_falcon,
-};
-
-static void
-msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue)
-{
- kfree(msgqueue_0148cdec(queue));
-}
-
-const struct nvkm_msgqueue_func
-msgqueue_0148cdec_func = {
- .init_func = &msgqueue_0148cdec_init_func,
- .acr_func = &msgqueue_0148cdec_acr_func,
- .cmd_queue = msgqueue_0148cdec_cmd_queue,
- .recv = msgqueue_0148cdec_process_msgs,
- .dtor = msgqueue_0148cdec_dtor,
-};
-
-int
-msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0148cdec *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base;
-
- nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
index 900fe1d37b4d..466188752eb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -1,9 +1,5 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FALCON_PRIV_H__
#define __NVKM_FALCON_PRIV_H__
-#include <engine/falcon.h>
-
-void
-nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *,
- const char *, u32, struct nvkm_falcon *);
+#include <core/falcon.h>
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
new file mode 100644
index 000000000000..a453de341a75
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
+{
+ const struct nvkm_subdev *subdev = qmgr->falcon->owner;
+ struct nvkm_falcon_qmgr_seq *seq;
+ u32 index;
+
+ mutex_lock(&qmgr->seq.mutex);
+ index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
+ if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
+ nvkm_error(subdev, "no free sequence available\n");
+ mutex_unlock(&qmgr->seq.mutex);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ set_bit(index, qmgr->seq.tbl);
+ mutex_unlock(&qmgr->seq.mutex);
+
+ seq = &qmgr->seq.id[index];
+ seq->state = SEQ_STATE_PENDING;
+ return seq;
+}
+
+void
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
+ struct nvkm_falcon_qmgr_seq *seq)
+{
+ /* no need to acquire seq.mutex since clear_bit is atomic */
+ seq->state = SEQ_STATE_FREE;
+ seq->callback = NULL;
+ reinit_completion(&seq->done);
+ clear_bit(seq->id, qmgr->seq.tbl);
+}
+
+void
+nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr = *pqmgr;
+ if (qmgr) {
+ kfree(*pqmgr);
+ *pqmgr = NULL;
+ }
+}
+
+int
+nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
+ struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr;
+ int i;
+
+ if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
+ return -ENOMEM;
+
+ qmgr->falcon = falcon;
+ mutex_init(&qmgr->seq.mutex);
+ for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
+ qmgr->seq.id[i].id = i;
+ init_completion(&qmgr->seq.id[i].done);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
new file mode 100644
index 000000000000..a45cd705e4f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FALCON_QMGR_H__
+#define __NVKM_FALCON_QMGR_H__
+#include <core/falcon.h>
+
+#define HDR_SIZE sizeof(struct nv_falcon_msg)
+#define QUEUE_ALIGNMENT 4
+/* max size of the messages we can receive */
+#define MSG_BUF_SIZE 128
+
+/**
+ * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands
+ *
+ * Every time a command is sent, a sequence is assigned to it so the
+ * corresponding message can be matched. Upon receiving the message, a callback
+ * can be called and/or a completion signaled.
+ *
+ * @id: sequence ID
+ * @state: current state
+ * @callback: callback to call upon receiving matching message
+ * @completion: completion to signal after callback is called
+ */
+struct nvkm_falcon_qmgr_seq {
+ u16 id;
+ enum {
+ SEQ_STATE_FREE = 0,
+ SEQ_STATE_PENDING,
+ SEQ_STATE_USED,
+ SEQ_STATE_CANCELLED
+ } state;
+ bool async;
+ nvkm_falcon_qmgr_callback callback;
+ void *priv;
+ struct completion done;
+ int result;
+};
+
+/*
+ * We can have an arbitrary number of sequences, but realistically we will
+ * probably not use that much simultaneously.
+ */
+#define NVKM_FALCON_QMGR_SEQ_NUM 16
+
+struct nvkm_falcon_qmgr {
+ struct nvkm_falcon *falcon;
+
+ struct {
+ struct mutex mutex;
+ struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM];
+ unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)];
+ } seq;
+};
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
+void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
+ struct nvkm_falcon_qmgr_seq *);
+
+struct nvkm_falcon_cmdq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+ struct completion ready;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+ u32 size;
+
+ u32 position;
+};
+
+struct nvkm_falcon_msgq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+
+ u32 position;
+};
+
+#define FLCNQ_PRINTK(t,q,f,a...) \
+ FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)
+#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a)
+#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 6d978feebbd7..1ff9b9c2e651 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -25,7 +25,7 @@
#include <core/memory.h>
#include <subdev/timer.h>
-static void
+void
nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u16 tag, u8 port, bool secure)
{
@@ -89,18 +89,17 @@ nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
}
}
-static const u32 EMEM_START_ADDR = 0x1000000;
-
-static void
+void
nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
- u32 size, u8 port)
+ u32 size, u8 port)
{
+ const struct nvkm_falcon_func *func = falcon->func;
u8 rem = size % 4;
int i;
- if (start >= EMEM_START_ADDR && falcon->has_emem)
+ if (func->emem_addr && start >= func->emem_addr)
return nvkm_falcon_v1_load_emem(falcon, data,
- start - EMEM_START_ADDR, size,
+ start - func->emem_addr, size,
port);
size -= rem;
@@ -148,15 +147,16 @@ nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
}
-static void
+void
nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
u8 port, void *data)
{
+ const struct nvkm_falcon_func *func = falcon->func;
u8 rem = size % 4;
int i;
- if (start >= EMEM_START_ADDR && falcon->has_emem)
- return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR,
+ if (func->emem_addr && start >= func->emem_addr)
+ return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr,
size, port, data);
size -= rem;
@@ -179,12 +179,11 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
}
-static void
+void
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
{
- struct nvkm_device *device = falcon->owner->device;
+ const u32 fbif = falcon->func->fbif;
u32 inst_loc;
- u32 fbif;
/* disable instance block binding */
if (ctx == NULL) {
@@ -192,20 +191,6 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
return;
}
- switch (falcon->owner->index) {
- case NVKM_ENGINE_NVENC0:
- case NVKM_ENGINE_NVENC1:
- case NVKM_ENGINE_NVENC2:
- fbif = 0x800;
- break;
- case NVKM_SUBDEV_PMU:
- fbif = 0xe00;
- break;
- default:
- fbif = 0x600;
- break;
- }
-
nvkm_falcon_wr32(falcon, 0x10c, 0x1);
/* setup apertures - virtual */
@@ -234,50 +219,15 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
-
- /* Not sure if this is a WAR for a HW issue, or some additional
- * programming sequence that's needed to properly complete the
- * context switch we trigger above.
- *
- * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
- * particularly when resuming from suspend.
- *
- * Also removes the need for an odd workaround where we needed
- * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
- * the SEC2 RTOS would begin executing.
- */
- switch (falcon->owner->index) {
- case NVKM_SUBDEV_GSP:
- case NVKM_ENGINE_SEC2:
- nvkm_msec(device, 10,
- u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
- u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
- if ((irqstat & 0x00000008) &&
- (flcn0dc & 0x00007000) == 0x00005000)
- break;
- );
-
- nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
- nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
-
- nvkm_msec(device, 10,
- u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
- if ((flcn0dc & 0x00007000) == 0x00000000)
- break;
- );
- break;
- default:
- break;
- }
}
-static void
+void
nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
{
nvkm_falcon_wr32(falcon, 0x104, start_addr);
}
-static void
+void
nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
{
u32 reg = nvkm_falcon_rd32(falcon, 0x100);
@@ -288,7 +238,7 @@ nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
nvkm_falcon_wr32(falcon, 0x100, 0x2);
}
-static int
+int
nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
{
struct nvkm_device *device = falcon->owner->device;
@@ -301,7 +251,7 @@ nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
return 0;
}
-static int
+int
nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
{
struct nvkm_device *device = falcon->owner->device;
@@ -330,7 +280,7 @@ falcon_v1_wait_idle(struct nvkm_falcon *falcon)
return 0;
}
-static int
+int
nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
@@ -352,7 +302,7 @@ nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
return 0;
}
-static void
+void
nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
{
/* disable IRQs and wait for any previous code to complete */
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
new file mode 100644
index 000000000000..41d75f98e603
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/nvfw/fw.o
+nvkm-y += nvkm/nvfw/hs.o
+nvkm-y += nvkm/nvfw/ls.o
+
+nvkm-y += nvkm/nvfw/acr.o
+nvkm-y += nvkm/nvfw/flcn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
new file mode 100644
index 000000000000..0d063b8317f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/acr.h>
+
+void
+wpr_header_dump(struct nvkm_subdev *subdev, const struct wpr_header *hdr)
+{
+ nvkm_debug(subdev, "wprHeader\n");
+ nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id);
+ nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset);
+ nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
+ nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
+ nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
+}
+
+void
+wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr)
+{
+ nvkm_debug(subdev, "wprHeader\n");
+ nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id);
+ nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset);
+ nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
+ nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
+ nvkm_debug(subdev, "\tbinVersion : %d\n", hdr->bin_version);
+ nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
+}
+
+void
+lsb_header_tail_dump(struct nvkm_subdev *subdev,
+ struct lsb_header_tail *hdr)
+{
+ nvkm_debug(subdev, "lsbHeader\n");
+ nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off);
+ nvkm_debug(subdev, "\tucodeSize : 0x%x\n", hdr->ucode_size);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tblCodeSize : 0x%x\n", hdr->bl_code_size);
+ nvkm_debug(subdev, "\tblImemOff : 0x%x\n", hdr->bl_imem_off);
+ nvkm_debug(subdev, "\tblDataOff : 0x%x\n", hdr->bl_data_off);
+ nvkm_debug(subdev, "\tblDataSize : 0x%x\n", hdr->bl_data_size);
+ nvkm_debug(subdev, "\tappCodeOff : 0x%x\n", hdr->app_code_off);
+ nvkm_debug(subdev, "\tappCodeSize : 0x%x\n", hdr->app_code_size);
+ nvkm_debug(subdev, "\tappDataOff : 0x%x\n", hdr->app_data_off);
+ nvkm_debug(subdev, "\tappDataSize : 0x%x\n", hdr->app_data_size);
+ nvkm_debug(subdev, "\tflags : 0x%x\n", hdr->flags);
+}
+
+void
+lsb_header_dump(struct nvkm_subdev *subdev, struct lsb_header *hdr)
+{
+ lsb_header_tail_dump(subdev, &hdr->tail);
+}
+
+void
+lsb_header_v1_dump(struct nvkm_subdev *subdev, struct lsb_header_v1 *hdr)
+{
+ lsb_header_tail_dump(subdev, &hdr->tail);
+}
+
+void
+flcn_acr_desc_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc *hdr)
+{
+ int i;
+
+ nvkm_debug(subdev, "acrDesc\n");
+ nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id);
+ nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset);
+ nvkm_debug(subdev, "\tmmuMemRange : 0x%x\n",
+ hdr->mmu_mem_range);
+ nvkm_debug(subdev, "\tnoRegions : %d\n",
+ hdr->regions.no_regions);
+
+ for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
+ nvkm_debug(subdev, "\tregion[%d] :\n", i);
+ nvkm_debug(subdev, "\t startAddr : 0x%x\n",
+ hdr->regions.region_props[i].start_addr);
+ nvkm_debug(subdev, "\t endAddr : 0x%x\n",
+ hdr->regions.region_props[i].end_addr);
+ nvkm_debug(subdev, "\t regionId : %d\n",
+ hdr->regions.region_props[i].region_id);
+ nvkm_debug(subdev, "\t readMask : 0x%x\n",
+ hdr->regions.region_props[i].read_mask);
+ nvkm_debug(subdev, "\t writeMask : 0x%x\n",
+ hdr->regions.region_props[i].write_mask);
+ nvkm_debug(subdev, "\t clientMask : 0x%x\n",
+ hdr->regions.region_props[i].client_mask);
+ }
+
+ nvkm_debug(subdev, "\tucodeBlobSize: %d\n",
+ hdr->ucode_blob_size);
+ nvkm_debug(subdev, "\tucodeBlobBase: 0x%llx\n",
+ hdr->ucode_blob_base);
+ nvkm_debug(subdev, "\tvprEnabled : %d\n",
+ hdr->vpr_desc.vpr_enabled);
+ nvkm_debug(subdev, "\tvprStart : 0x%x\n",
+ hdr->vpr_desc.vpr_start);
+ nvkm_debug(subdev, "\tvprEnd : 0x%x\n",
+ hdr->vpr_desc.vpr_end);
+ nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
+ hdr->vpr_desc.hdcp_policies);
+}
+
+void
+flcn_acr_desc_v1_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc_v1 *hdr)
+{
+ int i;
+
+ nvkm_debug(subdev, "acrDesc\n");
+ nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id);
+ nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset);
+ nvkm_debug(subdev, "\tmmuMemoryRange : 0x%x\n",
+ hdr->mmu_memory_range);
+ nvkm_debug(subdev, "\tnoRegions : %d\n",
+ hdr->regions.no_regions);
+
+ for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
+ nvkm_debug(subdev, "\tregion[%d] :\n", i);
+ nvkm_debug(subdev, "\t startAddr : 0x%x\n",
+ hdr->regions.region_props[i].start_addr);
+ nvkm_debug(subdev, "\t endAddr : 0x%x\n",
+ hdr->regions.region_props[i].end_addr);
+ nvkm_debug(subdev, "\t regionId : %d\n",
+ hdr->regions.region_props[i].region_id);
+ nvkm_debug(subdev, "\t readMask : 0x%x\n",
+ hdr->regions.region_props[i].read_mask);
+ nvkm_debug(subdev, "\t writeMask : 0x%x\n",
+ hdr->regions.region_props[i].write_mask);
+ nvkm_debug(subdev, "\t clientMask : 0x%x\n",
+ hdr->regions.region_props[i].client_mask);
+ nvkm_debug(subdev, "\t shadowMemStartAddr: 0x%x\n",
+ hdr->regions.region_props[i].shadow_mem_start_addr);
+ }
+
+ nvkm_debug(subdev, "\tucodeBlobSize : %d\n",
+ hdr->ucode_blob_size);
+ nvkm_debug(subdev, "\tucodeBlobBase : 0x%llx\n",
+ hdr->ucode_blob_base);
+ nvkm_debug(subdev, "\tvprEnabled : %d\n",
+ hdr->vpr_desc.vpr_enabled);
+ nvkm_debug(subdev, "\tvprStart : 0x%x\n",
+ hdr->vpr_desc.vpr_start);
+ nvkm_debug(subdev, "\tvprEnd : 0x%x\n",
+ hdr->vpr_desc.vpr_end);
+ nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
+ hdr->vpr_desc.hdcp_policies);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
new file mode 100644
index 000000000000..00ec764e1aab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/flcn.h>
+
+void
+loader_config_dump(struct nvkm_subdev *subdev, const struct loader_config *hdr)
+{
+ nvkm_debug(subdev, "loaderConfig\n");
+ nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%xx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
+ nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\toverlayDmaBase: 0x%x\n", hdr->overlay_dma_base);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+ nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1);
+ nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1);
+ nvkm_debug(subdev, "\tovlyDmaBase1 : 0x%x\n", hdr->overlay_dma_base1);
+}
+
+void
+loader_config_v1_dump(struct nvkm_subdev *subdev,
+ const struct loader_config_v1 *hdr)
+{
+ nvkm_debug(subdev, "loaderConfig\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x\n", hdr->reserved);
+ nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%llxx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
+ nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\toverlayDmaBase: 0x%llx\n", hdr->overlay_dma_base);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+}
+
+void
+flcn_bl_dmem_desc_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc *hdr)
+{
+ nvkm_debug(subdev, "flcnBlDmemDesc\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
+ hdr->reserved[3]);
+ nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->signature[0], hdr->signature[1], hdr->signature[2],
+ hdr->signature[3]);
+ nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%x\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off);
+ nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1);
+ nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1);
+}
+
+void
+flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc_v1 *hdr)
+{
+ nvkm_debug(subdev, "flcnBlDmemDesc\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
+ hdr->reserved[3]);
+ nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->signature[0], hdr->signature[1], hdr->signature[2],
+ hdr->signature[3]);
+ nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%llx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off);
+ nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+}
+
+void
+flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc_v2 *hdr)
+{
+ flcn_bl_dmem_desc_v1_dump(subdev, (void *)hdr);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
new file mode 100644
index 000000000000..746803bd5318
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/fw.h>
+
+const struct nvfw_bin_hdr *
+nvfw_bin_hdr(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_bin_hdr *hdr = data;
+ nvkm_debug(subdev, "binHdr:\n");
+ nvkm_debug(subdev, "\tbinMagic : 0x%08x\n", hdr->bin_magic);
+ nvkm_debug(subdev, "\tbinVer : %d\n", hdr->bin_ver);
+ nvkm_debug(subdev, "\tbinSize : %d\n", hdr->bin_size);
+ nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset);
+ nvkm_debug(subdev, "\tdataOffset : 0x%x\n", hdr->data_offset);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ return hdr;
+}
+
+const struct nvfw_bl_desc *
+nvfw_bl_desc(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_bl_desc *hdr = data;
+ nvkm_debug(subdev, "blDesc\n");
+ nvkm_debug(subdev, "\tstartTag : 0x%x\n", hdr->start_tag);
+ nvkm_debug(subdev, "\tdmemLoadOff : 0x%x\n", hdr->dmem_load_off);
+ nvkm_debug(subdev, "\tcodeOff : 0x%x\n", hdr->code_off);
+ nvkm_debug(subdev, "\tcodeSize : 0x%x\n", hdr->code_size);
+ nvkm_debug(subdev, "\tdataOff : 0x%x\n", hdr->data_off);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
new file mode 100644
index 000000000000..04ed77cb2eba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/hs.h>
+
+const struct nvfw_hs_header *
+nvfw_hs_header(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_hs_header *hdr = data;
+ nvkm_debug(subdev, "hsHeader:\n");
+ nvkm_debug(subdev, "\tsigDbgOffset : 0x%x\n", hdr->sig_dbg_offset);
+ nvkm_debug(subdev, "\tsigDbgSize : 0x%x\n", hdr->sig_dbg_size);
+ nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset);
+ nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size);
+ nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc);
+ nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig);
+ nvkm_debug(subdev, "\thdrOffset : 0x%x\n", hdr->hdr_offset);
+ nvkm_debug(subdev, "\thdrSize : 0x%x\n", hdr->hdr_size);
+ return hdr;
+}
+
+const struct nvfw_hs_load_header *
+nvfw_hs_load_header(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_hs_load_header *hdr = data;
+ int i;
+
+ nvkm_debug(subdev, "hsLoadHeader:\n");
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n",
+ hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize : 0x%x\n",
+ hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tnumApps : 0x%x\n", hdr->num_apps);
+ for (i = 0; i < hdr->num_apps; i++) {
+ nvkm_debug(subdev,
+ "\tApp[%d] : offset 0x%x size 0x%x\n", i,
+ hdr->apps[(i * 2) + 0], hdr->apps[(i * 2) + 1]);
+ }
+
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
new file mode 100644
index 000000000000..b847f281ce97
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/ls.h>
+
+static void
+nvfw_ls_desc_head(struct nvkm_subdev *subdev,
+ const struct nvfw_ls_desc_head *hdr)
+{
+ char *date;
+
+ nvkm_debug(subdev, "lsUcodeImgDesc:\n");
+ nvkm_debug(subdev, "\tdescriptorSize : %d\n",
+ hdr->descriptor_size);
+ nvkm_debug(subdev, "\timageSize : %d\n", hdr->image_size);
+ nvkm_debug(subdev, "\ttoolsVersion : 0x%x\n",
+ hdr->tools_version);
+ nvkm_debug(subdev, "\tappVersion : 0x%x\n", hdr->app_version);
+
+ date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL);
+ nvkm_debug(subdev, "\tdate : %s\n", date);
+ kfree(date);
+
+ nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n",
+ hdr->bootloader_start_offset);
+ nvkm_debug(subdev, "\tbootloaderSize : 0x%x\n",
+ hdr->bootloader_size);
+ nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n",
+ hdr->bootloader_imem_offset);
+ nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n",
+ hdr->bootloader_entry_point);
+
+ nvkm_debug(subdev, "\tappStartOffset : 0x%x\n",
+ hdr->app_start_offset);
+ nvkm_debug(subdev, "\tappSize : 0x%x\n", hdr->app_size);
+ nvkm_debug(subdev, "\tappImemOffset : 0x%x\n",
+ hdr->app_imem_offset);
+ nvkm_debug(subdev, "\tappImemEntry : 0x%x\n",
+ hdr->app_imem_entry);
+ nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n",
+ hdr->app_dmem_offset);
+ nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n",
+ hdr->app_resident_code_offset);
+ nvkm_debug(subdev, "\tappResidentCodeSize : 0x%x\n",
+ hdr->app_resident_code_size);
+ nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n",
+ hdr->app_resident_data_offset);
+ nvkm_debug(subdev, "\tappResidentDataSize : 0x%x\n",
+ hdr->app_resident_data_size);
+}
+
+const struct nvfw_ls_desc *
+nvfw_ls_desc(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_ls_desc *hdr = data;
+ int i;
+
+ nvfw_ls_desc_head(subdev, &hdr->head);
+
+ nvkm_debug(subdev, "\tnbOverlays : %d\n", hdr->nb_overlays);
+ for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
+ nvkm_debug(subdev, "\tloadOvl[%d] : 0x%x %d\n", i,
+ hdr->load_ovl[i].start, hdr->load_ovl[i].size);
+ }
+ nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed);
+
+ return hdr;
+}
+
+const struct nvfw_ls_desc_v1 *
+nvfw_ls_desc_v1(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_ls_desc_v1 *hdr = data;
+ int i;
+
+ nvfw_ls_desc_head(subdev, &hdr->head);
+
+ nvkm_debug(subdev, "\tnbImemOverlays : %d\n",
+ hdr->nb_imem_overlays);
+ nvkm_debug(subdev, "\tnbDmemOverlays : %d\n",
+ hdr->nb_imem_overlays);
+ for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
+ nvkm_debug(subdev, "\tloadOvl[%2d] : 0x%x %d\n", i,
+ hdr->load_ovl[i].start, hdr->load_ovl[i].size);
+ }
+ nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed);
+
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 4e136f3d7c28..fb4fff1222af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: MIT
+include $(src)/nvkm/subdev/acr/Kbuild
include $(src)/nvkm/subdev/bar/Kbuild
include $(src)/nvkm/subdev/bios/Kbuild
include $(src)/nvkm/subdev/bus/Kbuild
@@ -19,7 +20,6 @@ include $(src)/nvkm/subdev/mmu/Kbuild
include $(src)/nvkm/subdev/mxm/Kbuild
include $(src)/nvkm/subdev/pci/Kbuild
include $(src)/nvkm/subdev/pmu/Kbuild
-include $(src)/nvkm/subdev/secboot/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
include $(src)/nvkm/subdev/top/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
new file mode 100644
index 000000000000..5b9f64a8957f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/acr/base.o
+nvkm-y += nvkm/subdev/acr/hsfw.o
+nvkm-y += nvkm/subdev/acr/lsfw.o
+nvkm-y += nvkm/subdev/acr/gm200.o
+nvkm-y += nvkm/subdev/acr/gm20b.o
+nvkm-y += nvkm/subdev/acr/gp102.o
+nvkm-y += nvkm/subdev/acr/gp108.o
+nvkm-y += nvkm/subdev/acr/gp10b.o
+nvkm-y += nvkm/subdev/acr/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
new file mode 100644
index 000000000000..8eb2a930a9b5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+static struct nvkm_acr_hsf *
+nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name)
+{
+ struct nvkm_acr_hsf *hsf;
+ list_for_each_entry(hsf, &acr->hsf, head) {
+ if (!strcmp(hsf->name, name))
+ return hsf;
+ }
+ return NULL;
+}
+
+int
+nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_hsf *hsf;
+ int ret;
+
+ hsf = nvkm_acr_hsf_find(acr, name);
+ if (!hsf)
+ return -EINVAL;
+
+ nvkm_debug(subdev, "executing %s binary\n", hsf->name);
+ ret = nvkm_falcon_get(hsf->falcon, subdev);
+ if (ret)
+ return ret;
+
+ ret = hsf->func->boot(acr, hsf);
+ nvkm_falcon_put(hsf->falcon, subdev);
+ if (ret) {
+ nvkm_error(subdev, "%s binary failed\n", hsf->name);
+ return ret;
+ }
+
+ nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name);
+ return 0;
+}
+
+static void
+nvkm_acr_unload(struct nvkm_acr *acr)
+{
+ if (acr->done) {
+ nvkm_acr_hsf_boot(acr, "unload");
+ acr->done = false;
+ }
+}
+
+static int
+nvkm_acr_load(struct nvkm_acr *acr)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_lsf *lsf;
+ u64 start, limit;
+ int ret;
+
+ if (list_empty(&acr->lsf)) {
+ nvkm_debug(subdev, "No LSF(s) present.\n");
+ return 0;
+ }
+
+ ret = acr->func->init(acr);
+ if (ret)
+ return ret;
+
+ acr->func->wpr_check(acr, &start, &limit);
+
+ if (start != acr->wpr_start || limit != acr->wpr_end) {
+ nvkm_error(subdev, "WPR not configured as expected: "
+ "%016llx-%016llx vs %016llx-%016llx\n",
+ acr->wpr_start, acr->wpr_end, start, limit);
+ return -EIO;
+ }
+
+ acr->done = true;
+
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->func->boot) {
+ ret = lsf->func->boot(lsf->falcon);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_acr_reload(struct nvkm_acr *acr)
+{
+ nvkm_acr_unload(acr);
+ return nvkm_acr_load(acr);
+}
+
+static struct nvkm_acr_lsf *
+nvkm_acr_falcon(struct nvkm_device *device)
+{
+ struct nvkm_acr *acr = device->acr;
+ struct nvkm_acr_lsf *lsf;
+
+ if (acr) {
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->func->bootstrap_falcon)
+ return lsf;
+ }
+ }
+
+ return NULL;
+}
+
+int
+nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
+{
+ struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device);
+ struct nvkm_acr *acr = device->acr;
+ unsigned long id;
+
+ if (!acrflcn) {
+ int ret = nvkm_acr_reload(acr);
+ if (ret)
+ return ret;
+
+ return acr->done ? 0 : -EINVAL;
+ }
+
+ if (acrflcn->func->bootstrap_multiple_falcons) {
+ return acrflcn->func->
+ bootstrap_multiple_falcons(acrflcn->falcon, mask);
+ }
+
+ for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) {
+ int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool
+nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr *acr = device->acr;
+ struct nvkm_acr_lsf *lsf;
+
+ if (acr) {
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->id == id)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int
+nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ nvkm_acr_unload(nvkm_acr(subdev));
+ return 0;
+}
+
+static int
+nvkm_acr_init(struct nvkm_subdev *subdev)
+{
+ if (!nvkm_acr_falcon(subdev->device))
+ return 0;
+
+ return nvkm_acr_load(nvkm_acr(subdev));
+}
+
+static void
+nvkm_acr_cleanup(struct nvkm_acr *acr)
+{
+ nvkm_acr_lsfw_del_all(acr);
+ nvkm_acr_hsfw_del_all(acr);
+ nvkm_firmware_put(acr->wpr_fw);
+ acr->wpr_fw = NULL;
+}
+
+static int
+nvkm_acr_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_acr *acr = nvkm_acr(subdev);
+ struct nvkm_acr_hsfw *hsfw;
+ struct nvkm_acr_lsfw *lsfw, *lsft;
+ struct nvkm_acr_lsf *lsf;
+ u32 wpr_size = 0;
+ int ret, i;
+
+ if (list_empty(&acr->hsfw)) {
+ nvkm_debug(subdev, "No HSFW(s)\n");
+ nvkm_acr_cleanup(acr);
+ return 0;
+ }
+
+ /* Determine layout/size of WPR image up-front, as we need to know
+ * it to allocate memory before we begin constructing it.
+ */
+ list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+ /* Cull unknown falcons that are present in WPR image. */
+ if (acr->wpr_fw) {
+ if (!lsfw->func) {
+ nvkm_acr_lsfw_del(lsfw);
+ continue;
+ }
+
+ wpr_size = acr->wpr_fw->size;
+ }
+
+ /* Ensure we've fetched falcon configuration. */
+ ret = nvkm_falcon_get(lsfw->falcon, subdev);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_put(lsfw->falcon, subdev);
+
+ if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL)))
+ return -ENOMEM;
+ lsf->func = lsfw->func;
+ lsf->falcon = lsfw->falcon;
+ lsf->id = lsfw->id;
+ list_add_tail(&lsf->head, &acr->lsf);
+ }
+
+ if (!acr->wpr_fw || acr->wpr_comp)
+ wpr_size = acr->func->wpr_layout(acr);
+
+ /* Allocate/Locate WPR + fill ucode blob pointer.
+ *
+ * dGPU: allocate WPR + shadow blob
+ * Tegra: locate WPR with regs, ensure size is sufficient,
+ * allocate ucode blob.
+ */
+ ret = acr->func->wpr_alloc(acr, wpr_size);
+ if (ret)
+ return ret;
+
+ nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n",
+ acr->wpr_start, acr->wpr_end, acr->shadow_start);
+
+ /* Write WPR to ucode blob. */
+ nvkm_kmap(acr->wpr);
+ if (acr->wpr_fw && !acr->wpr_comp)
+ nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size);
+
+ if (!acr->wpr_fw || acr->wpr_comp)
+ acr->func->wpr_build(acr, nvkm_acr_falcon(device));
+ acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev);
+
+ if (acr->wpr_fw && acr->wpr_comp) {
+ nvkm_kmap(acr->wpr);
+ for (i = 0; i < acr->wpr_fw->size; i += 4) {
+ u32 us = nvkm_ro32(acr->wpr, i);
+ u32 fw = ((u32 *)acr->wpr_fw->data)[i/4];
+ if (fw != us) {
+ nvkm_warn(subdev, "%08x: %08x %08x\n",
+ i, us, fw);
+ }
+ }
+ return -EINVAL;
+ }
+ nvkm_done(acr->wpr);
+
+ /* Allocate instance block for ACR-related stuff. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
+ &acr->inst);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm);
+ if (ret)
+ return ret;
+
+ acr->vmm->debug = acr->subdev.debug;
+
+ ret = nvkm_vmm_join(acr->vmm, acr->inst);
+ if (ret)
+ return ret;
+
+ /* Load HS firmware blobs into ACR VMM. */
+ list_for_each_entry(hsfw, &acr->hsfw, head) {
+ nvkm_debug(subdev, "loading %s fw\n", hsfw->name);
+ ret = hsfw->func->load(acr, hsfw);
+ if (ret)
+ return ret;
+ }
+
+ /* Kill temporary data. */
+ nvkm_acr_cleanup(acr);
+ return 0;
+}
+
+static void *
+nvkm_acr_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_acr *acr = nvkm_acr(subdev);
+ struct nvkm_acr_hsf *hsf, *hst;
+ struct nvkm_acr_lsf *lsf, *lst;
+
+ list_for_each_entry_safe(hsf, hst, &acr->hsf, head) {
+ nvkm_vmm_put(acr->vmm, &hsf->vma);
+ nvkm_memory_unref(&hsf->ucode);
+ kfree(hsf->imem);
+ list_del(&hsf->head);
+ kfree(hsf);
+ }
+
+ nvkm_vmm_part(acr->vmm, acr->inst);
+ nvkm_vmm_unref(&acr->vmm);
+ nvkm_memory_unref(&acr->inst);
+
+ nvkm_memory_unref(&acr->wpr);
+
+ list_for_each_entry_safe(lsf, lst, &acr->lsf, head) {
+ list_del(&lsf->head);
+ kfree(lsf);
+ }
+
+ nvkm_acr_cleanup(acr);
+ return acr;
+}
+
+static const struct nvkm_subdev_func
+nvkm_acr = {
+ .dtor = nvkm_acr_dtor,
+ .oneinit = nvkm_acr_oneinit,
+ .init = nvkm_acr_init,
+ .fini = nvkm_acr_fini,
+};
+
+static int
+nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_device *device = subdev->device;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw);
+ if (ret < 0)
+ return ret;
+
+ /* Pre-add LSFs in the order they appear in the FW WPR image so that
+ * we're able to do a binary comparison with our own generator.
+ */
+ ret = acr->func->wpr_parse(acr);
+ if (ret)
+ return ret;
+
+ acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false);
+ acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0);
+ return 0;
+}
+
+int
+nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_acr **pacr)
+{
+ struct nvkm_acr *acr;
+ long wprfw;
+
+ if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev);
+ INIT_LIST_HEAD(&acr->hsfw);
+ INIT_LIST_HEAD(&acr->lsfw);
+ INIT_LIST_HEAD(&acr->hsf);
+ INIT_LIST_HEAD(&acr->lsf);
+
+ fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ acr->func = fwif->func;
+
+ wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1);
+ if (wprfw >= 0) {
+ int ret = nvkm_acr_ctor_wpr(acr, wprfw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
new file mode 100644
index 000000000000..9a6394085cf0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/pmu.h>
+#include <subdev/timer.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+int
+gm200_acr_init(struct nvkm_acr *acr)
+{
+ return nvkm_acr_hsf_boot(acr, "load");
+}
+
+void
+gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit)
+{
+ struct nvkm_device *device = acr->subdev.device;
+
+ nvkm_wr32(device, 0x100cd4, 2);
+ *start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
+ nvkm_wr32(device, 0x100cd4, 3);
+ *limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
+ *limit = *limit + 0x20000;
+}
+
+void
+gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct wpr_header hdr;
+ struct lsb_header lsb;
+ struct nvkm_acr_lsf *lsfw;
+ u32 offset = 0;
+
+ do {
+ nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
+ wpr_header_dump(subdev, &hdr);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id != hdr.falcon_id)
+ continue;
+
+ nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
+ lsb_header_dump(subdev, &lsb);
+
+ lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
+ break;
+ }
+ offset += sizeof(hdr);
+ } while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID);
+}
+
+void
+gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw,
+ struct lsb_header_tail *hdr)
+{
+ hdr->ucode_off = lsfw->offset.img;
+ hdr->ucode_size = lsfw->ucode_size;
+ hdr->data_size = lsfw->data_size;
+ hdr->bl_code_size = lsfw->bootloader_size;
+ hdr->bl_imem_off = lsfw->bootloader_imem_offset;
+ hdr->bl_data_off = lsfw->offset.bld;
+ hdr->bl_data_size = lsfw->bl_data_size;
+ hdr->app_code_off = lsfw->app_start_offset +
+ lsfw->app_resident_code_offset;
+ hdr->app_code_size = lsfw->app_resident_code_size;
+ hdr->app_data_off = lsfw->app_start_offset +
+ lsfw->app_resident_data_offset;
+ hdr->app_data_size = lsfw->app_resident_data_size;
+ hdr->flags = lsfw->func->flags;
+}
+
+static int
+gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
+{
+ struct lsb_header hdr;
+
+ if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
+ return -EINVAL;
+
+ memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
+ gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
+
+ nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
+ return 0;
+}
+
+int
+gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct wpr_header hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_PMU,
+ .lazy_bootstrap = rtos && lsfw->id != rtos->id,
+ .status = WPR_HEADER_V0_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gm200_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID);
+ return 0;
+}
+
+static int
+gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
+ ALIGN(wpr_size, 0x40000), 0x40000, true,
+ &acr->wpr);
+ if (ret)
+ return ret;
+
+ acr->wpr_start = nvkm_memory_addr(acr->wpr);
+ acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr);
+ return 0;
+}
+
+u32
+gm200_acr_wpr_layout(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 wpr = 0;
+
+ wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.lsb = wpr;
+ wpr += sizeof(struct lsb_header);
+
+ wpr = ALIGN(wpr, 4096);
+ lsfw->offset.img = wpr;
+ wpr += lsfw->img.size;
+
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.bld = wpr;
+ lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
+ wpr += lsfw->bl_data_size;
+ }
+
+ return wpr;
+}
+
+int
+gm200_acr_wpr_parse(struct nvkm_acr *acr)
+{
+ const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
+
+ while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
+ wpr_header_dump(&acr->subdev, hdr);
+ if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc_v1 hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = hsf->vma->addr + hsf->data_addr,
+ .data_size = hsf->data_size,
+ };
+
+ flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+int
+gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
+ u32 intr_clear, u32 mbox0_ok)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_falcon *falcon = hsf->falcon;
+ u32 mbox0, mbox1;
+ int ret;
+
+ /* Reset falcon. */
+ nvkm_falcon_reset(falcon);
+ nvkm_falcon_bind_context(falcon, acr->inst);
+
+ /* Load bootloader into IMEM. */
+ nvkm_falcon_load_imem(falcon, hsf->imem,
+ falcon->code.limit - hsf->imem_size,
+ hsf->imem_size,
+ hsf->imem_tag,
+ 0, false);
+
+ /* Load bootloader data into DMEM. */
+ hsf->func->bld(acr, hsf);
+
+ /* Boot the falcon. */
+ nvkm_mc_intr_mask(device, falcon->owner->index, false);
+
+ nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
+ nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8);
+ nvkm_falcon_start(falcon);
+ ret = nvkm_falcon_wait_for_halt(falcon, 100);
+ if (ret)
+ return ret;
+
+ /* Check for successful completion. */
+ mbox0 = nvkm_falcon_rd32(falcon, 0x040);
+ mbox1 = nvkm_falcon_rd32(falcon, 0x044);
+ nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1);
+ if (mbox0 && mbox0 != mbox0_ok)
+ return -EIO;
+
+ nvkm_falcon_clear_interrupt(falcon, intr_clear);
+ nvkm_mc_intr_mask(device, falcon->owner->index, true);
+ return ret;
+}
+
+int
+gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw,
+ struct nvkm_falcon *falcon)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_hsf *hsf;
+ int ret;
+
+ /* Patch the appropriate signature (production/debug) into the FW
+ * image, as determined by the mode the falcon is in.
+ */
+ ret = nvkm_falcon_get(falcon, subdev);
+ if (ret)
+ return ret;
+
+ if (hsfw->sig.patch_loc) {
+ if (!falcon->debug) {
+ nvkm_debug(subdev, "patching production signature\n");
+ memcpy(hsfw->image + hsfw->sig.patch_loc,
+ hsfw->sig.prod.data,
+ hsfw->sig.prod.size);
+ } else {
+ nvkm_debug(subdev, "patching debug signature\n");
+ memcpy(hsfw->image + hsfw->sig.patch_loc,
+ hsfw->sig.dbg.data,
+ hsfw->sig.dbg.size);
+ }
+ }
+
+ nvkm_falcon_put(falcon, subdev);
+
+ if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL)))
+ return -ENOMEM;
+ hsf->func = hsfw->func;
+ hsf->name = hsfw->name;
+ list_add_tail(&hsf->head, &acr->hsf);
+
+ hsf->imem_size = hsfw->imem_size;
+ hsf->imem_tag = hsfw->imem_tag;
+ hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL);
+ if (!hsf->imem)
+ return -ENOMEM;
+
+ hsf->non_sec_addr = hsfw->non_sec_addr;
+ hsf->non_sec_size = hsfw->non_sec_size;
+ hsf->sec_addr = hsfw->sec_addr;
+ hsf->sec_size = hsfw->sec_size;
+ hsf->data_addr = hsfw->data_addr;
+ hsf->data_size = hsfw->data_size;
+
+ /* Make the FW image accessible to the HS bootloader. */
+ ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
+ hsfw->image_size, 0x1000, false, &hsf->ucode);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(hsf->ucode);
+ nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size);
+ nvkm_done(hsf->ucode);
+
+ ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode),
+ &hsf->vma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0);
+ if (ret)
+ return ret;
+
+ hsf->falcon = falcon;
+ return 0;
+}
+
+int
+gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d);
+}
+
+int
+gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+const struct nvkm_acr_hsf_func
+gm200_acr_unload_0 = {
+ .load = gm200_acr_unload_load,
+ .boot = gm200_acr_unload_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gm200_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
+ {}
+};
+
+int
+gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0);
+}
+
+static int
+gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ flcn_acr_desc_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+gm200_acr_load_0 = {
+ .load = gm200_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gm200_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gm200_acr = {
+ .load = gm200_acr_load_fwif,
+ .unload = gm200_acr_unload_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm200_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static int
+gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gm200_acr_fwif[] = {
+ { 0, gm200_acr_load, &gm200_acr },
+ {}
+};
+
+int
+gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
new file mode 100644
index 000000000000..034a6ede70c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/pmu.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+int
+gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+
+ acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end);
+
+ if ((acr->wpr_end - acr->wpr_start) < wpr_size) {
+ nvkm_error(subdev, "WPR image too big for WPR!\n");
+ return -ENOSPC;
+ }
+
+ return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
+ wpr_size, 0, true, &acr->wpr);
+}
+
+static void
+gm20b_acr_load_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr >> 8,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = (hsf->vma->addr + hsf->data_addr) >> 8,
+ .data_size = hsf->data_size,
+ };
+
+ flcn_bl_dmem_desc_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+static int
+gm20b_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->ucode_blob_base = nvkm_memory_addr(acr->wpr);
+ desc->ucode_blob_size = nvkm_memory_size(acr->wpr);
+ flcn_acr_desc_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+const struct nvkm_acr_hsf_func
+gm20b_acr_load_0 = {
+ .load = gm20b_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm20b_acr_load_bld,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
+#endif
+
+static const struct nvkm_acr_hsf_fwif
+gm20b_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gm20b_acr = {
+ .load = gm20b_acr_load_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm20b_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+int
+gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gm20b_acr_fwif[] = {
+ { 0, gm20b_acr_load, &gm20b_acr },
+ {}
+};
+
+int
+gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
new file mode 100644
index 000000000000..49e11c46d525
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/sec2.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+void
+gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
+{
+ struct wpr_header_v1 hdr;
+ struct lsb_header_v1 lsb;
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+
+ do {
+ nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
+ wpr_header_v1_dump(&acr->subdev, &hdr);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id != hdr.falcon_id)
+ continue;
+
+ nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
+ lsb_header_v1_dump(&acr->subdev, &lsb);
+
+ lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
+ break;
+ }
+
+ offset += sizeof(hdr);
+ } while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID);
+}
+
+int
+gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
+{
+ struct lsb_header_v1 hdr;
+
+ if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
+ return -EINVAL;
+
+ memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
+ gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
+
+ nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
+ return 0;
+}
+
+int
+gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
+ struct wpr_header_v1 hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_SEC2,
+ .lazy_bootstrap = rtos && lsfw->id != rtos->id,
+ .bin_version = sig->version,
+ .status = WPR_HEADER_V1_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gp102_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
+ return 0;
+}
+
+int
+gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
+ ALIGN(wpr_size, 0x40000) << 1, 0x40000, true,
+ &acr->wpr);
+ if (ret)
+ return ret;
+
+ acr->shadow_start = nvkm_memory_addr(acr->wpr);
+ acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1);
+ acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1);
+ return 0;
+}
+
+u32
+gp102_acr_wpr_layout(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 wpr = 0;
+
+ wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1);
+ wpr = ALIGN(wpr, 256);
+
+ wpr += 0x100; /* Shared sub-WPR headers. */
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.lsb = wpr;
+ wpr += sizeof(struct lsb_header_v1);
+
+ wpr = ALIGN(wpr, 4096);
+ lsfw->offset.img = wpr;
+ wpr += lsfw->img.size;
+
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.bld = wpr;
+ lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
+ wpr += lsfw->bl_data_size;
+ }
+
+ return wpr;
+}
+
+int
+gp102_acr_wpr_parse(struct nvkm_acr *acr)
+{
+ const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
+
+ while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
+ wpr_header_v1_dump(&acr->subdev, hdr);
+ if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp102_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
+ {}
+};
+
+int
+gp102_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc_v1 *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ desc->regions.region_props[0].shadow_mem_start_addr =
+ acr->shadow_start >> 8;
+ flcn_acr_desc_v1_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw,
+ &acr->subdev.device->sec2->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+gp102_acr_load_0 = {
+ .load = gp102_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp102_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp102_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp102_acr = {
+ .load = gp102_acr_load_fwif,
+ .unload = gp102_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_build = gp102_acr_wpr_build,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+int
+gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/unload_bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gp102_acr_fwif[] = {
+ { 0, gp102_acr_load, &gp102_acr },
+ {}
+};
+
+int
+gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
new file mode 100644
index 000000000000..f10dc9112678
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+#include <nvfw/flcn.h>
+
+void
+gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc_v2 hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = hsf->vma->addr + hsf->data_addr,
+ .data_size = hsf->data_size,
+ .argc = 0,
+ .argv = 0,
+ };
+
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+const struct nvkm_acr_hsf_func
+gp108_acr_unload_0 = {
+ .load = gm200_acr_unload_load,
+ .boot = gm200_acr_unload_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp108_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
+ {}
+};
+
+static const struct nvkm_acr_hsf_func
+gp108_acr_load_0 = {
+ .load = gp102_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp108_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp108_acr = {
+ .load = gp108_acr_load_fwif,
+ .unload = gp108_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_build = gp102_acr_wpr_build,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static const struct nvkm_acr_fwif
+gp108_acr_fwif[] = {
+ { 0, gp102_acr_load, &gp108_acr },
+ {}
+};
+
+int
+gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
new file mode 100644
index 000000000000..39de64292a41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
+#endif
+
+static const struct nvkm_acr_hsf_fwif
+gp10b_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp10b_acr = {
+ .load = gp10b_acr_load_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm20b_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static const struct nvkm_acr_fwif
+gp10b_acr_fwif[] = {
+ { 0, gm20b_acr_load, &gp10b_acr },
+ {}
+};
+
+int
+gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
new file mode 100644
index 000000000000..aecce2dac558
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+static void
+nvkm_acr_hsfw_del(struct nvkm_acr_hsfw *hsfw)
+{
+ list_del(&hsfw->head);
+ kfree(hsfw->imem);
+ kfree(hsfw->image);
+ kfree(hsfw->sig.prod.data);
+ kfree(hsfw->sig.dbg.data);
+ kfree(hsfw);
+}
+
+void
+nvkm_acr_hsfw_del_all(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_hsfw *hsfw, *hsft;
+ list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) {
+ nvkm_acr_hsfw_del(hsfw);
+ }
+}
+
+static int
+nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
+ struct nvkm_acr_hsfw *hsfw)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct firmware *fw;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_hs_header *fwhdr;
+ const struct nvfw_hs_load_header *lhdr;
+ u32 loc, sig;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, name, ver, &fw);
+ if (ret < 0)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, fw->data);
+ fwhdr = nvfw_hs_header(subdev, fw->data + hdr->header_offset);
+
+ /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's
+ * standard format, and don't have the indirection seen in the 0x10de
+ * case.
+ */
+ switch (hdr->bin_magic) {
+ case 0x000010de:
+ loc = *(u32 *)(fw->data + fwhdr->patch_loc);
+ sig = *(u32 *)(fw->data + fwhdr->patch_sig);
+ break;
+ case 0x3b1d14f0:
+ loc = fwhdr->patch_loc;
+ sig = fwhdr->patch_sig;
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+
+ lhdr = nvfw_hs_load_header(subdev, fw->data + fwhdr->hdr_offset);
+
+ if (!(hsfw->image = kmalloc(hdr->data_size, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->image, fw->data + hdr->data_offset, hdr->data_size);
+ hsfw->image_size = hdr->data_size;
+ hsfw->non_sec_addr = lhdr->non_sec_code_off;
+ hsfw->non_sec_size = lhdr->non_sec_code_size;
+ hsfw->sec_addr = lhdr->apps[0];
+ hsfw->sec_size = lhdr->apps[lhdr->num_apps];
+ hsfw->data_addr = lhdr->data_dma_base;
+ hsfw->data_size = lhdr->data_size;
+
+ hsfw->sig.prod.size = fwhdr->sig_prod_size;
+ hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
+ if (!hsfw->sig.prod.data) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
+ hsfw->sig.prod.size);
+
+ hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
+ hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
+ if (!hsfw->sig.dbg.data) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
+ hsfw->sig.dbg.size);
+
+ hsfw->sig.patch_loc = loc;
+done:
+ nvkm_firmware_put(fw);
+ return ret;
+}
+
+static int
+nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver,
+ struct nvkm_acr_hsfw *hsfw)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_bl_desc *desc;
+ const struct firmware *fw;
+ u8 *data;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, name, ver, &fw);
+ if (ret)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, fw->data);
+ desc = nvfw_bl_desc(subdev, fw->data + hdr->header_offset);
+ data = (void *)fw->data + hdr->data_offset;
+
+ hsfw->imem_size = desc->code_size;
+ hsfw->imem_tag = desc->start_tag;
+ hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL);
+ memcpy(hsfw->imem, data + desc->code_off, desc->code_size);
+
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_hsfw_load(struct nvkm_acr *acr, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *fwif)
+{
+ struct nvkm_acr_hsfw *hsfw;
+ int ret;
+
+ if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL)))
+ return -ENOMEM;
+
+ hsfw->func = fwif->func;
+ hsfw->name = name;
+ list_add_tail(&hsfw->head, &acr->hsfw);
+
+ ret = nvkm_acr_hsfw_load_bl(acr, bl, version, hsfw);
+ if (ret)
+ goto done;
+
+ ret = nvkm_acr_hsfw_load_image(acr, fw, version, hsfw);
+done:
+ if (ret)
+ nvkm_acr_hsfw_del(hsfw);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
new file mode 100644
index 000000000000..07d1830126ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <nvfw/fw.h>
+#include <nvfw/ls.h>
+
+void
+nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw)
+{
+ nvkm_blob_dtor(&lsfw->img);
+ nvkm_firmware_put(lsfw->sig);
+ list_del(&lsfw->head);
+ kfree(lsfw);
+}
+
+void
+nvkm_acr_lsfw_del_all(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw, *lsft;
+ list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+ nvkm_acr_lsfw_del(lsfw);
+ }
+}
+
+static struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id == id)
+ return lsfw;
+ }
+ return NULL;
+}
+
+struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr,
+ struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr_lsfw *lsfw;
+
+ if (!acr)
+ return ERR_PTR(-ENOSYS);
+
+ lsfw = nvkm_acr_lsfw_get(acr, id);
+ if (lsfw && lsfw->func) {
+ nvkm_error(&acr->subdev, "LSFW %d redefined\n", id);
+ return ERR_PTR(-EEXIST);
+ }
+
+ if (!lsfw) {
+ if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+
+ lsfw->id = id;
+ list_add_tail(&lsfw->head, &acr->lsfw);
+ }
+
+ lsfw->func = func;
+ lsfw->falcon = falcon;
+ return lsfw;
+}
+
+static struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func,
+ const struct firmware **pdesc)
+{
+ struct nvkm_acr *acr = subdev->device->acr;
+ struct nvkm_acr_lsfw *lsfw;
+ int ret;
+
+ if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
+ return lsfw;
+
+ ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc);
+done:
+ if (ret) {
+ nvkm_acr_lsfw_del(lsfw);
+ return ERR_PTR(ret);
+ }
+
+ return lsfw;
+}
+
+static void
+nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256);
+ lsfw->bootloader_imem_offset = desc->bootloader_imem_offset;
+
+ lsfw->app_size = ALIGN(desc->app_size, 256);
+ lsfw->app_start_offset = desc->app_start_offset;
+ lsfw->app_imem_entry = desc->app_imem_entry;
+ lsfw->app_resident_code_offset = desc->app_resident_code_offset;
+ lsfw->app_resident_code_size = desc->app_resident_code_size;
+ lsfw->app_resident_data_offset = desc->app_resident_data_offset;
+ lsfw->app_resident_data_size = desc->app_resident_data_size;
+
+ lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
+ lsfw->bootloader_size;
+ lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
+ lsfw->ucode_size;
+}
+
+int
+nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ const struct firmware *fw;
+ struct nvkm_acr_lsfw *lsfw;
+
+ lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
+ func, &fw);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
+
+ nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw);
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ const struct firmware *fw;
+ struct nvkm_acr_lsfw *lsfw;
+
+ lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
+ func, &fw);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
+
+ nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw);
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ struct nvkm_acr *acr = subdev->device->acr;
+ struct nvkm_acr_lsfw *lsfw;
+ const struct firmware *bl = NULL, *inst = NULL, *data = NULL;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_bl_desc *desc;
+ u32 *bldata;
+ int ret;
+
+ if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
+ return PTR_ERR(lsfw);
+
+ ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl);
+ if (ret)
+ goto done;
+
+ hdr = nvfw_bin_hdr(subdev, bl->data);
+ desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
+ bldata = (void *)(bl->data + hdr->data_offset);
+
+ ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
+ if (ret)
+ goto done;
+
+ lsfw->bootloader_size = ALIGN(desc->code_size, 256);
+ lsfw->bootloader_imem_offset = desc->start_tag << 8;
+
+ lsfw->app_start_offset = lsfw->bootloader_size;
+ lsfw->app_imem_entry = 0;
+ lsfw->app_resident_code_offset = 0;
+ lsfw->app_resident_code_size = ALIGN(inst->size, 256);
+ lsfw->app_resident_data_offset = lsfw->app_resident_code_size;
+ lsfw->app_resident_data_size = ALIGN(data->size, 256);
+ lsfw->app_size = lsfw->app_resident_code_size +
+ lsfw->app_resident_data_size;
+
+ lsfw->img.size = lsfw->bootloader_size + lsfw->app_size;
+ if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(lsfw->img.data, bldata, lsfw->bootloader_size);
+ memcpy(lsfw->img.data + lsfw->app_start_offset +
+ lsfw->app_resident_code_offset, inst->data, inst->size);
+ memcpy(lsfw->img.data + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset, data->data, data->size);
+
+ lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
+ lsfw->bootloader_size;
+ lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
+ lsfw->ucode_size;
+
+done:
+ if (ret)
+ nvkm_acr_lsfw_del(lsfw);
+ nvkm_firmware_put(data);
+ nvkm_firmware_put(inst);
+ nvkm_firmware_put(bl);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
new file mode 100644
index 000000000000..d8ba72806d39
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
@@ -0,0 +1,151 @@
+#ifndef __NVKM_ACR_PRIV_H__
+#define __NVKM_ACR_PRIV_H__
+#include <subdev/acr.h>
+struct lsb_header_tail;
+
+struct nvkm_acr_fwif {
+ int version;
+ int (*load)(struct nvkm_acr *, int version,
+ const struct nvkm_acr_fwif *);
+ const struct nvkm_acr_func *func;
+};
+
+int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
+int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
+
+struct nvkm_acr_lsf;
+struct nvkm_acr_func {
+ const struct nvkm_acr_hsf_fwif *load;
+ const struct nvkm_acr_hsf_fwif *ahesasc;
+ const struct nvkm_acr_hsf_fwif *asb;
+ const struct nvkm_acr_hsf_fwif *unload;
+ int (*wpr_parse)(struct nvkm_acr *);
+ u32 (*wpr_layout)(struct nvkm_acr *);
+ int (*wpr_alloc)(struct nvkm_acr *, u32 wpr_size);
+ int (*wpr_build)(struct nvkm_acr *, struct nvkm_acr_lsf *rtos);
+ void (*wpr_patch)(struct nvkm_acr *, s64 adjust);
+ void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit);
+ int (*init)(struct nvkm_acr *);
+ void (*fini)(struct nvkm_acr *);
+};
+
+int gm200_acr_wpr_parse(struct nvkm_acr *);
+u32 gm200_acr_wpr_layout(struct nvkm_acr *);
+int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
+void gm200_acr_wpr_patch(struct nvkm_acr *, s64);
+void gm200_acr_wpr_check(struct nvkm_acr *, u64 *, u64 *);
+void gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *,
+ struct lsb_header_tail *);
+int gm200_acr_init(struct nvkm_acr *);
+
+int gm20b_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size);
+
+int gp102_acr_wpr_parse(struct nvkm_acr *);
+u32 gp102_acr_wpr_layout(struct nvkm_acr *);
+int gp102_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size);
+int gp102_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
+int gp102_acr_wpr_build_lsb(struct nvkm_acr *, struct nvkm_acr_lsfw *);
+void gp102_acr_wpr_patch(struct nvkm_acr *, s64);
+
+struct nvkm_acr_hsfw {
+ const struct nvkm_acr_hsf_func *func;
+ const char *name;
+ struct list_head head;
+
+ u32 imem_size;
+ u32 imem_tag;
+ u32 *imem;
+
+ u8 *image;
+ u32 image_size;
+ u32 non_sec_addr;
+ u32 non_sec_size;
+ u32 sec_addr;
+ u32 sec_size;
+ u32 data_addr;
+ u32 data_size;
+
+ struct {
+ struct {
+ void *data;
+ u32 size;
+ } prod, dbg;
+ u32 patch_loc;
+ } sig;
+};
+
+struct nvkm_acr_hsf_fwif {
+ int version;
+ int (*load)(struct nvkm_acr *, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *);
+ const struct nvkm_acr_hsf_func *func;
+};
+
+int nvkm_acr_hsfw_load(struct nvkm_acr *, const char *, const char *,
+ const char *, int, const struct nvkm_acr_hsf_fwif *);
+void nvkm_acr_hsfw_del_all(struct nvkm_acr *);
+
+struct nvkm_acr_hsf {
+ const struct nvkm_acr_hsf_func *func;
+ const char *name;
+ struct list_head head;
+
+ u32 imem_size;
+ u32 imem_tag;
+ u32 *imem;
+
+ u32 non_sec_addr;
+ u32 non_sec_size;
+ u32 sec_addr;
+ u32 sec_size;
+ u32 data_addr;
+ u32 data_size;
+
+ struct nvkm_memory *ucode;
+ struct nvkm_vma *vma;
+ struct nvkm_falcon *falcon;
+};
+
+struct nvkm_acr_hsf_func {
+ int (*load)(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+ int (*boot)(struct nvkm_acr *, struct nvkm_acr_hsf *);
+ void (*bld)(struct nvkm_acr *, struct nvkm_acr_hsf *);
+};
+
+int gm200_acr_hsfw_load(struct nvkm_acr *, struct nvkm_acr_hsfw *,
+ struct nvkm_falcon *);
+int gm200_acr_hsfw_boot(struct nvkm_acr *, struct nvkm_acr_hsf *,
+ u32 clear_intr, u32 mbox0_ok);
+
+int gm200_acr_load_boot(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+extern const struct nvkm_acr_hsf_func gm200_acr_unload_0;
+int gm200_acr_unload_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+int gm200_acr_unload_boot(struct nvkm_acr *, struct nvkm_acr_hsf *);
+void gm200_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+extern const struct nvkm_acr_hsf_func gm20b_acr_load_0;
+
+int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+
+extern const struct nvkm_acr_hsf_func gp108_acr_unload_0;
+void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int,
+ struct nvkm_acr **);
+int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name);
+
+struct nvkm_acr_lsf {
+ const struct nvkm_acr_lsf_func *func;
+ struct nvkm_falcon *falcon;
+ enum nvkm_acr_lsf_id id;
+ struct list_head head;
+};
+
+struct nvkm_acr_lsfw *nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *,
+ struct nvkm_acr *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id);
+void nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *);
+void nvkm_acr_lsfw_del_all(struct nvkm_acr *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
new file mode 100644
index 000000000000..7f4b89d82d32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/pmu.h>
+#include <engine/sec2.h>
+
+#include <nvfw/acr.h>
+
+static int
+tu102_acr_init(struct nvkm_acr *acr)
+{
+ int ret = nvkm_acr_hsf_boot(acr, "AHESASC");
+ if (ret)
+ return ret;
+
+ return nvkm_acr_hsf_boot(acr, "ASB");
+}
+
+static int
+tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /*XXX: shared sub-WPR headers, fill terminator for now. */
+ nvkm_wo32(acr->wpr, 0x200, 0xffffffff);
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
+ struct wpr_header_v1 hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_GSPLITE,
+ .lazy_bootstrap = 1,
+ .bin_version = sig->version,
+ .status = WPR_HEADER_V1_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gp102_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
+ return 0;
+}
+
+static int
+tu102_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0, 0);
+}
+
+static int
+tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *fwif)
+{
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static int
+tu102_acr_asb_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->gsp->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+tu102_acr_asb_0 = {
+ .load = tu102_acr_asb_load,
+ .boot = tu102_acr_hsfw_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_asb_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &tu102_acr_asb_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static const struct nvkm_acr_hsf_func
+tu102_acr_ahesasc_0 = {
+ .load = gp102_acr_load_load,
+ .boot = tu102_acr_hsfw_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_ahesasc_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static const struct nvkm_acr_func
+tu102_acr = {
+ .ahesasc = tu102_acr_ahesasc_fwif,
+ .asb = tu102_acr_asb_fwif,
+ .unload = tu102_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_build = tu102_acr_wpr_build,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = tu102_acr_init,
+};
+
+static int
+tu102_acr_load(struct nvkm_acr *acr, int version,
+ const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC",
+ acr, "acr/bl", "acr/ucode_ahesasc",
+ "AHESASC");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB",
+ acr, "acr/bl", "acr/ucode_asb", "ASB");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/unload_bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+tu102_acr_fwif[] = {
+ { 0, tu102_acr_load, &tu102_acr },
+ {}
+};
+
+int
+tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 53b9d638f2c8..d65ec719f153 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -2,5 +2,6 @@
nvkm-y += nvkm/subdev/fault/base.o
nvkm-y += nvkm/subdev/fault/user.o
nvkm-y += nvkm/subdev/fault/gp100.o
+nvkm-y += nvkm/subdev/fault/gp10b.o
nvkm-y += nvkm/subdev/fault/gv100.o
nvkm-y += nvkm/subdev/fault/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index ca251560d3e0..f6dca97140d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -108,7 +108,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
return ret;
/* Pin fault buffer in BAR2. */
- buffer->addr = nvkm_memory_bar2(buffer->mem);
+ buffer->addr = fault->func->buffer.pin(buffer);
if (buffer->addr == ~0ULL)
return -EFAULT;
@@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
struct nvkm_fault *fault = nvkm_fault(subdev);
int i;
+ nvkm_notify_fini(&fault->nrpfb);
nvkm_event_fini(&fault->event);
for (i = 0; i < fault->buffer_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 4f3c4e091117..f6b189cc4330 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -21,25 +21,26 @@
*/
#include "priv.h"
+#include <core/memory.h>
#include <subdev/mc.h>
#include <nvif/class.h>
-static void
+void
gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
}
-static void
+void
gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
}
-static void
+void
gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
@@ -48,7 +49,12 @@ gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
}
-static void
+u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
+{
+ return nvkm_memory_bar2(buffer->mem);
+}
+
+void
gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
@@ -56,7 +62,7 @@ gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
buffer->put = 0x002a80;
}
-static void
+void
gp100_fault_intr(struct nvkm_fault *fault)
{
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
@@ -68,6 +74,7 @@ gp100_fault = {
.buffer.nr = 1,
.buffer.entry_size = 32,
.buffer.info = gp100_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = gp100_fault_buffer_init,
.buffer.fini = gp100_fault_buffer_fini,
.buffer.intr = gp100_fault_buffer_intr,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
index 19e54acb4125..9e66d1f7654d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2019 NVIDIA Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,36 +18,36 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
*/
-#include "kfd_kernel_queue.h"
+#include "priv.h"
-static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_cik(struct kernel_queue *kq);
-static void submit_packet_cik(struct kernel_queue *kq);
+#include <core/memory.h>
-void kernel_queue_init_cik(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_cik;
- ops->uninitialize = uninitialize_cik;
- ops->submit_packet = submit_packet_cik;
-}
+#include <nvif/class.h>
-static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
+u64
+gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
{
- return true;
+ return nvkm_memory_addr(buffer->mem);
}
-static void uninitialize_cik(struct kernel_queue *kq)
-{
-}
+static const struct nvkm_fault_func
+gp10b_fault = {
+ .intr = gp100_fault_intr,
+ .buffer.nr = 1,
+ .buffer.entry_size = 32,
+ .buffer.info = gp100_fault_buffer_info,
+ .buffer.pin = gp10b_fault_buffer_pin,
+ .buffer.init = gp100_fault_buffer_init,
+ .buffer.fini = gp100_fault_buffer_fini,
+ .buffer.intr = gp100_fault_buffer_intr,
+ .user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
+};
-static void submit_packet_cik(struct kernel_queue *kq)
+int
+gp10b_fault_new(struct nvkm_device *device, int index,
+ struct nvkm_fault **pfault)
{
- *kq->wptr_kernel = kq->pending_wptr;
- write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr);
+ return nvkm_fault_new_(&gp10b_fault, device, index, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 6747f09c2dc3..2707be4ffabc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -214,6 +214,7 @@ gv100_fault = {
.buffer.nr = 2,
.buffer.entry_size = 32,
.buffer.info = gv100_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = gv100_fault_buffer_init,
.buffer.fini = gv100_fault_buffer_fini,
.buffer.intr = gv100_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 975e66ac6344..f6f1dd7eee1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -30,6 +30,7 @@ struct nvkm_fault_func {
int nr;
u32 entry_size;
void (*info)(struct nvkm_fault_buffer *);
+ u64 (*pin)(struct nvkm_fault_buffer *);
void (*init)(struct nvkm_fault_buffer *);
void (*fini)(struct nvkm_fault_buffer *);
void (*intr)(struct nvkm_fault_buffer *, bool enable);
@@ -40,6 +41,15 @@ struct nvkm_fault_func {
} user;
};
+void gp100_fault_buffer_intr(struct nvkm_fault_buffer *, bool enable);
+void gp100_fault_buffer_fini(struct nvkm_fault_buffer *);
+void gp100_fault_buffer_init(struct nvkm_fault_buffer *);
+u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *);
+void gp100_fault_buffer_info(struct nvkm_fault_buffer *);
+void gp100_fault_intr(struct nvkm_fault *);
+
+u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *);
+
int gv100_fault_oneinit(struct nvkm_fault *);
int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index fa1dfe5692b0..45a6a68b9f48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -154,6 +154,7 @@ tu102_fault = {
.buffer.nr = 2,
.buffer.entry_size = 32,
.buffer.info = tu102_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = tu102_fault_buffer_init,
.buffer.fini = tu102_fault_buffer_fini,
.buffer.intr = tu102_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index b2bb5a3ccb02..5940e0dea2f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -126,6 +126,34 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
}
static int
+nvkm_fb_init_scrub_vpr(struct nvkm_fb *fb)
+{
+ struct nvkm_subdev *subdev = &fb->subdev;
+ int ret;
+
+ nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
+
+ if (!fb->vpr_scrubber.size) {
+ nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
+ return 0;
+ }
+
+ ret = fb->func->vpr.scrub(fb);
+ if (ret) {
+ nvkm_error(subdev, "VPR scrubber binary failed\n");
+ return ret;
+ }
+
+ if (fb->func->vpr.scrub_required(fb)) {
+ nvkm_error(subdev, "VPR still locked after scrub!\n");
+ return -EIO;
+ }
+
+ nvkm_debug(subdev, "VPR scrubber binary successful\n");
+ return 0;
+}
+
+static int
nvkm_fb_init(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
@@ -154,6 +182,14 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init_unkn)
fb->func->init_unkn(fb);
+
+ if (fb->func->vpr.scrub_required &&
+ fb->func->vpr.scrub_required(fb)) {
+ ret = nvkm_fb_init_scrub_vpr(fb);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -172,6 +208,8 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
nvkm_mm_fini(&fb->tags);
nvkm_ram_del(&fb->ram);
+ nvkm_blob_dtor(&fb->vpr_scrubber);
+
if (fb->func->dtor)
return fb->func->dtor(fb);
return fb;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b4d74e815674..fc8c93aa3da5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -24,7 +24,81 @@
#include "gf100.h"
#include "ram.h"
+#include <core/firmware.h>
#include <core/memory.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+#include <engine/nvdec.h>
+
+int
+gp102_fb_vpr_scrub(struct nvkm_fb *fb)
+{
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_falcon *falcon = &device->nvdec[0]->falcon;
+ struct nvkm_blob *blob = &fb->vpr_scrubber;
+ const struct nvfw_bin_hdr *hsbin_hdr;
+ const struct nvfw_hs_header *fw_hdr;
+ const struct nvfw_hs_load_header *lhdr;
+ void *scrub_data;
+ u32 patch_loc, patch_sig;
+ int ret;
+
+ nvkm_falcon_get(falcon, subdev);
+
+ hsbin_hdr = nvfw_bin_hdr(subdev, blob->data);
+ fw_hdr = nvfw_hs_header(subdev, blob->data + hsbin_hdr->header_offset);
+ lhdr = nvfw_hs_load_header(subdev, blob->data + fw_hdr->hdr_offset);
+ scrub_data = blob->data + hsbin_hdr->data_offset;
+
+ patch_loc = *(u32 *)(blob->data + fw_hdr->patch_loc);
+ patch_sig = *(u32 *)(blob->data + fw_hdr->patch_sig);
+ if (falcon->debug) {
+ memcpy(scrub_data + patch_loc,
+ blob->data + fw_hdr->sig_dbg_offset + patch_sig,
+ fw_hdr->sig_dbg_size);
+ } else {
+ memcpy(scrub_data + patch_loc,
+ blob->data + fw_hdr->sig_prod_offset + patch_sig,
+ fw_hdr->sig_prod_size);
+ }
+
+ nvkm_falcon_reset(falcon);
+ nvkm_falcon_bind_context(falcon, NULL);
+
+ nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
+ lhdr->non_sec_code_size,
+ lhdr->non_sec_code_off >> 8, 0, false);
+ nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
+ ALIGN(lhdr->apps[0], 0x100),
+ lhdr->apps[1],
+ lhdr->apps[0] >> 8, 0, true);
+ nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
+ lhdr->data_size, 0);
+
+ nvkm_falcon_set_start_addr(falcon, 0x0);
+ nvkm_falcon_start(falcon);
+
+ ret = nvkm_falcon_wait_for_halt(falcon, 500);
+ if (ret < 0) {
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ /* put nvdec in clean state - without reset it will remain in HS mode */
+ nvkm_falcon_reset(falcon);
+end:
+ nvkm_falcon_put(falcon, subdev);
+ return ret;
+}
+
+bool
+gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_wr32(device, 0x100cd0, 0x2);
+ return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
+}
static const struct nvkm_fb_func
gp102_fb = {
@@ -33,11 +107,32 @@ gp102_fb = {
.init = gp100_fb_init,
.init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
+ .vpr.scrub_required = gp102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp100_ram_new,
};
int
+gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fb **pfb)
+{
+ int ret = gf100_fb_new_(func, device, index, pfb);
+ if (ret)
+ return ret;
+
+ nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0,
+ &(*pfb)->vpr_scrubber);
+ return 0;
+}
+
+int
gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp102_fb, device, index, pfb);
+ return gp102_fb_new_(&gp102_fb, device, index, pfb);
}
+
+MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 3c5e02e9794a..389bad312bf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -35,6 +35,8 @@ gv100_fb = {
.init = gp100_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
+ .vpr.scrub_required = gp102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp100_ram_new,
.default_bigpage = 16,
};
@@ -42,5 +44,10 @@ gv100_fb = {
int
gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gv100_fb, device, index, pfb);
+ return gp102_fb_new_(&gv100_fb, device, index, pfb);
}
+
+MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index c4e9f55af283..5be9c563350d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -17,6 +17,11 @@ struct nvkm_fb_func {
void (*intr)(struct nvkm_fb *);
struct {
+ bool (*scrub_required)(struct nvkm_fb *);
+ int (*scrub)(struct nvkm_fb *);
+ } vpr;
+
+ struct {
int regions;
void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
@@ -72,4 +77,9 @@ int gm200_fb_init_page(struct nvkm_fb *);
void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
+
+int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
+ struct nvkm_fb **);
+bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
+int gp102_fb_vpr_scrub(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index ac87a3b6b7c9..ba43fe158b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -655,7 +655,7 @@ gf100_ram_new_(const struct nvkm_ram_func *func,
static const struct nvkm_ram_func
gf100_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf100_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
index 70a06e3cd55a..d97fa43efb91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -43,7 +43,7 @@ gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
static const struct nvkm_ram_func
gf108_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 456aed1f2a02..d350d92852d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1698,7 +1698,7 @@ gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
static const struct nvkm_ram_func
gk104_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 27c68e3f9772..be91da854dca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -33,7 +33,7 @@ gm107_ram_probe_fbp(const struct nvkm_ram_func *func,
static const struct nvkm_ram_func
gm107_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
index 6b0cac1fe7b4..8f91ea91ee25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -48,7 +48,7 @@ gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
static const struct nvkm_ram_func
gm200_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index adb62a6beb63..378f6fb70990 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -79,7 +79,7 @@ gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa)
static const struct nvkm_ram_func
gp100_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gp100_ram_probe_fbpa,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index e7c4f068936e..67cc3b320169 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/base.o
nvkm-y += nvkm/subdev/gsp/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
new file mode 100644
index 000000000000..5a32df0f9992
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <subdev/acr.h>
+#include <subdev/top.h>
+
+static void *
+nvkm_gsp_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+ nvkm_falcon_dtor(&gsp->falcon);
+ return gsp;
+}
+
+static const struct nvkm_subdev_func
+nvkm_gsp = {
+ .dtor = nvkm_gsp_dtor,
+};
+
+int
+nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_gsp **pgsp)
+{
+ struct nvkm_gsp *gsp;
+
+ if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev);
+
+ fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev,
+ nvkm_subdev_name[gsp->subdev.index], 0,
+ &gsp->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
index dccfaf1162e2..2114f9b00a28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -19,44 +19,37 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/gsp.h>
-#include <subdev/top.h>
-#include <engine/falcon.h>
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gv100_gsp_flcn = {
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = gp102_sec2_flcn_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
static int
-gv100_gsp_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_gsp *gsp = nvkm_gsp(subdev);
-
- gsp->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (!gsp->addr)
- return -EINVAL;
-
- return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon);
-}
-
-static void *
-gv100_gsp_dtor(struct nvkm_subdev *subdev)
+gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
- struct nvkm_gsp *gsp = nvkm_gsp(subdev);
- nvkm_falcon_del(&gsp->falcon);
- return gsp;
+ return 0;
}
-static const struct nvkm_subdev_func
-gv100_gsp = {
- .dtor = gv100_gsp_dtor,
- .oneinit = gv100_gsp_oneinit,
+struct nvkm_gsp_fwif
+gv100_gsp[] = {
+ { -1, gv100_gsp_nofw, &gv100_gsp_flcn },
+ {}
};
int
gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
{
- struct nvkm_gsp *gsp;
-
- if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev);
- return 0;
+ return nvkm_gsp_new_(gv100_gsp, device, index, pgsp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
new file mode 100644
index 000000000000..92820fb997c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_GSP_PRIV_H__
+#define __NVKM_GSP_PRIV_H__
+#include <subdev/gsp.h>
+enum nvkm_acr_lsf_id;
+
+struct nvkm_gsp_fwif {
+ int version;
+ int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
+ const struct nvkm_falcon_func *flcn;
+};
+
+int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int,
+ struct nvkm_gsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 2b6d36ea7067..728d75010847 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -6,3 +6,4 @@ nvkm-y += nvkm/subdev/ltc/gm107.o
nvkm-y += nvkm/subdev/ltc/gm200.o
nvkm-y += nvkm/subdev/ltc/gp100.o
nvkm-y += nvkm/subdev/ltc/gp102.o
+nvkm-y += nvkm/subdev/ltc/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
new file mode 100644
index 000000000000..c0063c7caa50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 NVIDIA Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Thierry Reding
+ */
+
+#include "priv.h"
+
+static void
+gp10b_ltc_init(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ struct iommu_fwspec *spec;
+
+ nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
+ nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
+ nvkm_wr32(device, 0x100800, ltc->ltc_nr);
+
+ spec = dev_iommu_fwspec_get(device->dev);
+ if (spec) {
+ u32 sid = spec->ids[0] & 0xffff;
+
+ /* stream ID */
+ nvkm_wr32(device, 0x160000, sid << 2);
+ }
+}
+
+static const struct nvkm_ltc_func
+gp10b_ltc = {
+ .oneinit = gp100_ltc_oneinit,
+ .init = gp10b_ltc_init,
+ .intr = gp100_ltc_intr,
+ .cbc_clear = gm107_ltc_cbc_clear,
+ .cbc_wait = gm107_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gm107_ltc_zbc_clear_color,
+ .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+ .zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
+ .invalidate = gf100_ltc_invalidate,
+ .flush = gf100_ltc_flush,
+};
+
+int
+gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 2fcf18e46ce3..eca5a711b1b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -46,4 +46,6 @@ void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
int gp100_ltc_oneinit(struct nvkm_ltc *);
void gp100_ltc_init(struct nvkm_ltc *);
void gp100_ltc_intr(struct nvkm_ltc *);
+
+void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *, int, const u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 2d075246dc46..2cd5ec81c0d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -30,7 +30,7 @@
* The value 0xff represents an invalid storage type.
*/
const u8 *
-gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
+gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[256] = {
@@ -69,6 +69,7 @@ gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0xff;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
index dbf644ebac97..83990c83f9f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
const u8 *
-gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[256] = {
@@ -65,6 +65,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0xff;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index db3dfbbb2aa0..c0083ddda65a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
const u8 *
-nv50_mmu_kind(struct nvkm_mmu *base, int *count)
+nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid)
{
/* 0x01: no bank swizzle
* 0x02: bank swizzled
@@ -57,6 +57,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count)
0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0x7f;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 07f2fcd18f3d..479b02344271 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -35,17 +35,17 @@ struct nvkm_mmu_func {
u32 pd_offset;
} vmm;
- const u8 *(*kind)(struct nvkm_mmu *, int *count);
+ const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
bool kind_sys;
};
extern const struct nvkm_mmu_func nv04_mmu;
-const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count);
+const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
-const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
+const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
-const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
struct nvkm_mmu_pt {
union {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index c0db0ce10cba..b21e82eb0916 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -1,5 +1,6 @@
/*
* Copyright 2018 Red Hat Inc.
+ * Copyright 2019 NVIDIA Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -26,13 +27,26 @@
#include <nvif/class.h>
+const u8 *
+tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
+{
+ static const u8
+ kind[16] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */
+ 0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07,
+ };
+ *count = ARRAY_SIZE(kind);
+ *invalid = 0x07;
+ return kind;
+}
+
static const struct nvkm_mmu_func
tu102_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
- .kind = gm200_mmu_kind,
+ .kind = tu102_mmu_kind,
.kind_sys = true,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
index 353f10f92b77..0e4b8941da37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -111,15 +111,17 @@ nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
} *args = argv;
const u8 *kind = NULL;
int ret = -ENOSYS, count = 0;
+ u8 kind_inv = 0;
if (mmu->func->kind)
- kind = mmu->func->kind(mmu, &count);
+ kind = mmu->func->kind(mmu, &count, &kind_inv);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
if (argc != args->v0.count * sizeof(*args->v0.data))
return -EINVAL;
if (args->v0.count > count)
return -EINVAL;
+ args->v0.kind_inv = kind_inv;
memcpy(args->v0.data, kind, args->v0.count);
} else
return ret;
@@ -157,9 +159,10 @@ nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
struct nvkm_mmu *mmu = device->mmu;
struct nvkm_ummu *ummu;
int ret = -ENOSYS, kinds = 0;
+ u8 unused = 0;
if (mmu->func->kind)
- mmu->func->kind(mmu, &kinds);
+ mmu->func->kind(mmu, &kinds, &unused);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
args->v0.dmabits = mmu->dma_bits;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index ab6424faf84c..6a2d9eb8e1ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -247,7 +247,7 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_memory *memory = map->memory;
- u8 kind, priv, ro, vol;
+ u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
@@ -274,8 +274,8 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
if (WARN_ON(aper < 0))
return aper;
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0xff) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index b4f519768d5e..d86287565542 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -320,7 +320,7 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_memory *memory = map->memory;
- u8 kind, priv, ro, vol;
+ u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
@@ -347,8 +347,8 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
if (WARN_ON(aper < 0))
return aper;
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0xff) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index c98afe3134ee..2d89e27e8e9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -235,7 +235,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_ram *ram = device->fb->ram;
struct nvkm_memory *memory = map->memory;
- u8 aper, kind, comp, priv, ro;
+ u8 aper, kind, kind_inv, comp, priv, ro;
int kindn, ret = -ENOSYS;
const u8 *kindm;
@@ -278,8 +278,8 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0x7f) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index e37b6e45eaa2..a76c2a7bd696 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/pmu/gm107.o
nvkm-y += nvkm/subdev/pmu/gm20b.o
nvkm-y += nvkm/subdev/pmu/gp100.o
nvkm-y += nvkm/subdev/pmu/gp102.o
+nvkm-y += nvkm/subdev/pmu/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index ea2e11771bca..a0fe607c9c07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -23,7 +23,7 @@
*/
#include "priv.h"
-#include <core/msgqueue.h>
+#include <core/firmware.h>
#include <subdev/timer.h>
bool
@@ -85,6 +85,12 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
pmu->func->fini(pmu);
flush_work(&pmu->recv.work);
+
+ reinit_completion(&pmu->wpr_ready);
+
+ nvkm_falcon_cmdq_fini(pmu->lpq);
+ nvkm_falcon_cmdq_fini(pmu->hpq);
+ pmu->initmsg_received = false;
return 0;
}
@@ -133,19 +139,15 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
return ret;
}
-static int
-nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
-}
-
static void *
nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- nvkm_msgqueue_del(&pmu->queue);
- nvkm_falcon_del(&pmu->falcon);
+ nvkm_falcon_msgq_del(&pmu->msgq);
+ nvkm_falcon_cmdq_del(&pmu->lpq);
+ nvkm_falcon_cmdq_del(&pmu->hpq);
+ nvkm_falcon_qmgr_del(&pmu->qmgr);
+ nvkm_falcon_dtor(&pmu->falcon);
return nvkm_pmu(subdev);
}
@@ -153,29 +155,50 @@ static const struct nvkm_subdev_func
nvkm_pmu = {
.dtor = nvkm_pmu_dtor,
.preinit = nvkm_pmu_preinit,
- .oneinit = nvkm_pmu_oneinit,
.init = nvkm_pmu_init,
.fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr,
};
int
-nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
int index, struct nvkm_pmu *pmu)
{
+ int ret;
+
nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
- pmu->func = func;
+
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait);
+
+ fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ pmu->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
+ nvkm_subdev_name[pmu->subdev.index], 0x10a000,
+ &pmu->falcon);
+ if (ret)
+ return ret;
+
+ if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) ||
+ (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) ||
+ (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) ||
+ (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq)))
+ return ret;
+
+ init_completion(&pmu->wpr_ready);
return 0;
}
int
-nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
int index, struct nvkm_pmu **ppmu)
{
struct nvkm_pmu *pmu;
if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_pmu_ctor(func, device, index, *ppmu);
+ return nvkm_pmu_ctor(fwif, device, index, *ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 0b458656e870..3ecb3d9cbcf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -42,6 +42,7 @@ gf100_pmu_enabled(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gf100_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gf100_pmu_code,
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
@@ -56,7 +57,19 @@ gf100_pmu = {
};
int
+gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_pmu_fwif
+gf100_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gf100_pmu },
+ {}
+};
+
+int
gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 3dfa79d4fb13..8dd0271aaaee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -26,6 +26,7 @@
static const struct nvkm_pmu_func
gf119_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gf119_pmu_code,
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
@@ -39,8 +40,14 @@ gf119_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gf119_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gf119_pmu },
+ {}
+};
+
int
gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 8f7ec10fd2a4..8b70cc17a634 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -105,6 +105,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
static const struct nvkm_pmu_func
gk104_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk104_pmu_code,
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
@@ -119,8 +120,14 @@ gk104_pmu = {
.pgob = gk104_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk104_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk104_pmu },
+ {}
+};
+
int
gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 345741d55a56..0081f2141b10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -84,6 +84,7 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
static const struct nvkm_pmu_func
gk110_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk110_pmu_code,
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
@@ -98,8 +99,14 @@ gk110_pmu = {
.pgob = gk110_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk110_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk110_pmu },
+ {}
+};
+
int
gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index e4acf7876ea1..b227c701a5e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -26,6 +26,7 @@
static const struct nvkm_pmu_func
gk208_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk208_pmu_code,
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
@@ -40,8 +41,14 @@ gk208_pmu = {
.pgob = gk110_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk208_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk208_pmu },
+ {}
+};
+
int
gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 05e81855c367..26c1adf8f44c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -95,7 +95,7 @@ static void
gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
struct gk20a_pmu_dvfs_dev_status *status)
{
- struct nvkm_falcon *falcon = pmu->base.falcon;
+ struct nvkm_falcon *falcon = &pmu->base.falcon;
status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
@@ -104,7 +104,7 @@ gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
static void
gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
{
- struct nvkm_falcon *falcon = pmu->base.falcon;
+ struct nvkm_falcon *falcon = &pmu->base.falcon;
nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
@@ -160,7 +160,7 @@ gk20a_pmu_fini(struct nvkm_pmu *pmu)
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
- nvkm_falcon_put(pmu->falcon, &pmu->subdev);
+ nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
}
static int
@@ -169,7 +169,7 @@ gk20a_pmu_init(struct nvkm_pmu *pmu)
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = pmu->subdev.device;
- struct nvkm_falcon *falcon = pmu->falcon;
+ struct nvkm_falcon *falcon = &pmu->falcon;
int ret;
ret = nvkm_falcon_get(falcon, subdev);
@@ -196,25 +196,34 @@ gk20a_dvfs_data= {
static const struct nvkm_pmu_func
gk20a_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.init = gk20a_pmu_init,
.fini = gk20a_pmu_fini,
.reset = gf100_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gk20a_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk20a_pmu },
+ {}
+};
+
int
gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
struct gk20a_pmu *pmu;
+ int ret;
if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
*ppmu = &pmu->base;
- nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base);
+ ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base);
+ if (ret)
+ return ret;
pmu->data = &gk20a_dvfs_data;
nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 459df1ef9e70..5afb55e58b51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -28,6 +28,7 @@
static const struct nvkm_pmu_func
gm107_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gm107_pmu_code,
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
@@ -41,8 +42,14 @@ gm107_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gm107_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gm107_pmu },
+ {}
+};
+
int
gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 31c843145c7a..82571032a07d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -19,38 +19,224 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
-#include <engine/falcon.h>
-#include <core/msgqueue.h>
#include "priv.h"
-static void
+#include <core/memory.h>
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/pmu.h>
+
+static int
+gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_bootstrap_falcon_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ return msg->falcon_id;
+}
+
+int
+gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+ struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
+ .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
+ .falcon_id = id,
+ };
+ int ret;
+
+ ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gm20b_pmu_acr_bootstrap_falcon_cb,
+ &pmu->subdev, msecs_to_jiffies(1000));
+ if (ret >= 0) {
+ if (ret != cmd.falcon_id)
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int
+gm20b_pmu_acr_boot(struct nvkm_falcon *falcon)
+{
+ struct nv_pmu_args args = { .secure_mode = true };
+ const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args);
+ nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
+ nvkm_falcon_start(falcon);
+ return 0;
+}
+
+void
+gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct loader_config hdr;
+ u64 addr;
+
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
+ hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
+ hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8);
+ hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8);
+ hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8);
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+
+ loader_config_dump(&acr->subdev, &hdr);
+}
+
+void
+gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
+ const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
+ const struct loader_config hdr = {
+ .dma_idx = FALCON_DMAIDX_UCODE,
+ .code_dma_base = lower_32_bits(code),
+ .code_size_total = lsfw->app_size,
+ .code_size_to_load = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lower_32_bits(data),
+ .data_size = lsfw->app_resident_data_size,
+ .overlay_dma_base = lower_32_bits(code),
+ .argc = 1,
+ .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args),
+ .code_dma_base1 = upper_32_bits(code),
+ .data_dma_base1 = upper_32_bits(data),
+ .overlay_dma_base1 = upper_32_bits(code),
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+static const struct nvkm_acr_lsf_func
+gm20b_pmu_acr = {
+ .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
+ .bld_size = sizeof(struct loader_config),
+ .bld_write = gm20b_pmu_acr_bld_write,
+ .bld_patch = gm20b_pmu_acr_bld_patch,
+ .boot = gm20b_pmu_acr_boot,
+ .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
+};
+
+static int
+gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_init_wpr_region_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ struct nvkm_pmu *pmu = priv;
+ struct nvkm_subdev *subdev = &pmu->subdev;
+
+ if (msg->error_code) {
+ nvkm_error(subdev, "ACR WPR init failure: %d\n",
+ msg->error_code);
+ return -EINVAL;
+ }
+
+ nvkm_debug(subdev, "ACR WPR init complete\n");
+ complete_all(&pmu->wpr_ready);
+ return 0;
+}
+
+static int
+gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
+{
+ struct nv_pmu_acr_init_wpr_region_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
+ .region_id = 1,
+ .wpr_offset = 0,
+ };
+
+ return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gm20b_pmu_acr_init_wpr_callback, pmu, 0);
+}
+
+int
+gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
+{
+ struct nv_pmu_init_msg msg;
+ int ret;
+
+ ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
+ msg.msg_type != NV_PMU_INIT_MSG_INIT)
+ return -EINVAL;
+
+ nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
+ msg.queue_info[0].offset,
+ msg.queue_info[0].size);
+ nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
+ msg.queue_info[1].offset,
+ msg.queue_info[1].size);
+ nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
+ msg.queue_info[4].offset,
+ msg.queue_info[4].size);
+ return gm20b_pmu_acr_init_wpr(pmu);
+}
+
+void
gm20b_pmu_recv(struct nvkm_pmu *pmu)
{
- if (!pmu->queue) {
- nvkm_warn(&pmu->subdev,
- "recv function called while no firmware set!\n");
- return;
+ if (!pmu->initmsg_received) {
+ int ret = pmu->func->initmsg(pmu);
+ if (ret) {
+ nvkm_error(&pmu->subdev,
+ "error parsing init message: %d\n", ret);
+ return;
+ }
+
+ pmu->initmsg_received = true;
}
- nvkm_msgqueue_recv(pmu->queue);
+ nvkm_falcon_msgq_recv(pmu->msgq);
}
static const struct nvkm_pmu_func
gm20b_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
+ .initmsg = gm20b_pmu_initmsg,
};
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
+#endif
+
int
-gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
{
- int ret;
+ return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
+ NVKM_ACR_LSF_PMU, "pmu/",
+ ver, fwif->acr);
+}
- ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
- if (ret)
- return ret;
+static const struct nvkm_pmu_fwif
+gm20b_pmu_fwif[] = {
+ { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
+ {}
+};
- return 0;
+int
+gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
index e210cd6af816..09e05db21ff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -25,12 +25,19 @@
static const struct nvkm_pmu_func
gp100_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gp100_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gp100_pmu },
+ {}
+};
+
int
gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 98c7a2a8afc4..262b8a3dd507 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -39,12 +39,19 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gp102_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gp102_pmu_enabled,
.reset = gp102_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gp102_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gp102_pmu },
+ {}
+};
+
int
gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
new file mode 100644
index 000000000000..5b81c7320479
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/pmu.h>
+
+static int
+gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv,
+ struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ return msg->falcon_mask;
+}
+static int
+gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask)
+{
+ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+ struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS,
+ .flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES,
+ .falcon_mask = mask,
+ .wpr_lo = 0, /*XXX*/
+ .wpr_hi = 0, /*XXX*/
+ };
+ int ret;
+
+ ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gp10b_pmu_acr_bootstrap_multiple_falcons_cb,
+ &pmu->subdev, msecs_to_jiffies(1000));
+ if (ret >= 0) {
+ if (ret != cmd.falcon_mask)
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_acr_lsf_func
+gp10b_pmu_acr = {
+ .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
+ .bld_size = sizeof(struct loader_config),
+ .bld_write = gm20b_pmu_acr_bld_write,
+ .bld_patch = gm20b_pmu_acr_bld_patch,
+ .boot = gm20b_pmu_acr_boot,
+ .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
+ .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
+};
+
+static const struct nvkm_pmu_func
+gp10b_pmu = {
+ .flcn = &gt215_pmu_flcn,
+ .enabled = gf100_pmu_enabled,
+ .intr = gt215_pmu_intr,
+ .recv = gm20b_pmu_recv,
+ .initmsg = gm20b_pmu_initmsg,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+#endif
+
+static const struct nvkm_pmu_fwif
+gp10b_pmu_fwif[] = {
+ { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr },
+ {}
+};
+
+int
+gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index e04216daea58..88b909913ff9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -241,8 +241,27 @@ gt215_pmu_init(struct nvkm_pmu *pmu)
return 0;
}
+const struct nvkm_falcon_func
+gt215_pmu_flcn = {
+ .debug = 0xc08,
+ .fbif = 0xe00,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0x4a0, 0x4b0, 4 },
+ .msgq = { 0x4c8, 0x4cc, 0 },
+};
+
static const struct nvkm_pmu_func
gt215_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
@@ -256,8 +275,14 @@ gt215_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gt215_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gt215_pmu },
+ {}
+};
+
int
gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 26d73f9cd6d3..f470859244de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -4,13 +4,12 @@
#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
#include <subdev/pmu.h>
#include <subdev/pmu/fuc/os.h>
-
-int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *,
- int index, struct nvkm_pmu *);
-int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
- int index, struct nvkm_pmu **);
+enum nvkm_acr_lsf_id;
+struct nvkm_acr_lsfw;
struct nvkm_pmu_func {
+ const struct nvkm_falcon_func *flcn;
+
struct {
u32 *data;
u32 size;
@@ -29,9 +28,11 @@ struct nvkm_pmu_func {
int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
u32 message, u32 data0, u32 data1);
void (*recv)(struct nvkm_pmu *);
+ int (*initmsg)(struct nvkm_pmu *);
void (*pgob)(struct nvkm_pmu *, bool);
};
+extern const struct nvkm_falcon_func gt215_pmu_flcn;
int gt215_pmu_init(struct nvkm_pmu *);
void gt215_pmu_fini(struct nvkm_pmu *);
void gt215_pmu_intr(struct nvkm_pmu *);
@@ -42,4 +43,26 @@ bool gf100_pmu_enabled(struct nvkm_pmu *);
void gf100_pmu_reset(struct nvkm_pmu *);
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
+
+void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
+void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
+int gm20b_pmu_acr_boot(struct nvkm_falcon *);
+int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
+void gm20b_pmu_recv(struct nvkm_pmu *);
+int gm20b_pmu_initmsg(struct nvkm_pmu *);
+
+struct nvkm_pmu_fwif {
+ int version;
+ int (*load)(struct nvkm_pmu *, int ver, const struct nvkm_pmu_fwif *);
+ const struct nvkm_pmu_func *func;
+ const struct nvkm_acr_lsf_func *acr;
+};
+
+int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+
+int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
+ int index, struct nvkm_pmu *);
+int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *,
+ int index, struct nvkm_pmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
deleted file mode 100644
index f3dee2693c79..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/subdev/secboot/base.o
-nvkm-y += nvkm/subdev/secboot/hs_ucode.o
-nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
-nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o
-nvkm-y += nvkm/subdev/secboot/acr.o
-nvkm-y += nvkm/subdev/secboot/acr_r352.o
-nvkm-y += nvkm/subdev/secboot/acr_r361.o
-nvkm-y += nvkm/subdev/secboot/acr_r364.o
-nvkm-y += nvkm/subdev/secboot/acr_r367.o
-nvkm-y += nvkm/subdev/secboot/acr_r370.o
-nvkm-y += nvkm/subdev/secboot/acr_r375.o
-nvkm-y += nvkm/subdev/secboot/gm200.o
-nvkm-y += nvkm/subdev/secboot/gm20b.o
-nvkm-y += nvkm/subdev/secboot/gp102.o
-nvkm-y += nvkm/subdev/secboot/gp108.o
-nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
deleted file mode 100644
index dc80985cf093..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-
-#include <core/firmware.h>
-
-/**
- * Convenience function to duplicate a firmware file in memory and check that
- * it has the required minimum size.
- */
-void *
-nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
- size_t min_size)
-{
- const struct firmware *fw;
- void *blob;
- int ret;
-
- ret = nvkm_firmware_get(subdev, name, &fw);
- if (ret)
- return ERR_PTR(ret);
- if (fw->size < min_size) {
- nvkm_error(subdev, "%s is smaller than expected size %zu\n",
- name, min_size);
- nvkm_firmware_put(fw);
- return ERR_PTR(-EINVAL);
- }
- blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
- nvkm_firmware_put(fw);
- if (!blob)
- return ERR_PTR(-ENOMEM);
-
- return blob;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
deleted file mode 100644
index 73a2ac81ac69..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef __NVKM_SECBOOT_ACR_H__
-#define __NVKM_SECBOOT_ACR_H__
-
-#include "priv.h"
-
-struct nvkm_acr;
-
-/**
- * struct nvkm_acr_func - properties and functions specific to an ACR
- *
- * @load: make the ACR ready to run on the given secboot device
- * @reset: reset the specified falcon
- * @start: start the specified falcon (assumed to have been reset)
- */
-struct nvkm_acr_func {
- void (*dtor)(struct nvkm_acr *);
- int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
- int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
- int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
- struct nvkm_gpuobj *, u64);
- int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long);
-};
-
-/**
- * struct nvkm_acr - instance of an ACR
- *
- * @boot_falcon: ID of the falcon that will perform secure boot
- * @managed_falcons: bitfield of falcons managed by this ACR
- * @optional_falcons: bitfield of falcons we can live without
- */
-struct nvkm_acr {
- const struct nvkm_acr_func *func;
- const struct nvkm_subdev *subdev;
-
- enum nvkm_secboot_falcon boot_falcon;
- unsigned long managed_falcons;
- unsigned long optional_falcons;
-};
-
-void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
-
-struct nvkm_acr *acr_r352_new(unsigned long);
-struct nvkm_acr *acr_r361_new(unsigned long);
-struct nvkm_acr *acr_r364_new(unsigned long);
-struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
-struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
-struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
deleted file mode 100644
index 7af971db91bc..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ /dev/null
@@ -1,1241 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r352.h"
-#include "hs_ucode.h"
-
-#include <core/gpuobj.h>
-#include <core/firmware.h>
-#include <engine/falcon.h>
-#include <subdev/pmu.h>
-#include <core/msgqueue.h>
-#include <engine/sec2.h>
-
-/**
- * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
- * @signature: 16B signature for secure code. 0s if no secure code
- * @ctx_dma: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * (falcon's $xcbase register)
- * @non_sec_code_off: offset from code_dma_base where the non-secure code is
- * located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @sec_code_size: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @code_entry_point: code entry point which will be invoked by BL after
- * code is loaded.
- * @data_dma_base: 256B aligned Physical FB Address where data is located.
- * (falcon's $xdbase register)
- * @data_size: size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r352_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- u32 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- u32 data_dma_base;
- u32 data_size;
- u32 code_dma_base1;
- u32 data_dma_base1;
-};
-
-/**
- * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
- */
-static void
-acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r352_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = (base + pdesc->app_resident_code_offset) >> 8;
- addr_data = (base + pdesc->app_resident_data_offset) >> 8;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = lower_32_bits(addr_code);
- desc->code_dma_base1 = upper_32_bits(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = lower_32_bits(addr_data);
- desc->data_dma_base1 = upper_32_bits(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-
-/**
- * struct hsflcn_acr_desc - data section of the HS firmware
- *
- * This header is to be copied at the beginning of DMEM by the HS bootloader.
- *
- * @signature: signature of ACR ucode
- * @wpr_region_id: region ID holding the WPR header and its details
- * @wpr_offset: offset from the WPR region holding the wpr header
- * @regions: region descriptors
- * @nonwpr_ucode_blob_size: size of LS blob
- * @nonwpr_ucode_blob_start: FB location of LS blob is
- */
-struct hsflcn_acr_desc {
- union {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- } ucode_reserved_space;
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_mem_range;
-#define FLCN_ACR_MAX_REGIONS 2
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- } region_props[FLCN_ACR_MAX_REGIONS];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-
-/*
- * Low-secure blob creation
- */
-
-/**
- * struct acr_r352_lsf_lsb_header - LS firmware header
- * @signature: signature to verify the firmware against
- * @ucode_off: offset of the ucode blob in the WPR region. The ucode
- * blob contains the bootloader, code and data of the
- * LS falcon
- * @ucode_size: size of the ucode blob, including bootloader
- * @data_size: size of the ucode blob data
- * @bl_code_size: size of the bootloader code
- * @bl_imem_off: offset in imem of the bootloader
- * @bl_data_off: offset of the bootloader data in WPR region
- * @bl_data_size: size of the bootloader data
- * @app_code_off: offset of the app code relative to ucode_off
- * @app_code_size: size of the app code
- * @app_data_off: offset of the app data relative to ucode_off
- * @app_data_size: size of the app data
- * @flags: flags for the secure bootloader
- *
- * This structure is written into the WPR region for each managed falcon. Each
- * instance is referenced by the lsb_offset member of the corresponding
- * lsf_wpr_header.
- */
-struct acr_r352_lsf_lsb_header {
- /**
- * LS falcon signatures
- * @prd_keys: signature to use in production mode
- * @dgb_keys: signature to use in debug mode
- * @b_prd_present: whether the production key is present
- * @b_dgb_present: whether the debug key is present
- * @falcon_id: ID of the falcon the ucode applies to
- */
- struct {
- u8 prd_keys[2][16];
- u8 dbg_keys[2][16];
- u32 b_prd_present;
- u32 b_dbg_present;
- u32 falcon_id;
- } signature;
- u32 ucode_off;
- u32 ucode_size;
- u32 data_size;
- u32 bl_code_size;
- u32 bl_imem_off;
- u32 bl_data_off;
- u32 bl_data_size;
- u32 app_code_off;
- u32 app_code_size;
- u32 app_data_off;
- u32 app_data_size;
- u32 flags;
-};
-
-/**
- * struct acr_r352_lsf_wpr_header - LS blob WPR Header
- * @falcon_id: LS falcon ID
- * @lsb_offset: offset of the lsb_lsf_header in the WPR region
- * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
- * @lazy_bootstrap: skip bootstrapping by ACR
- * @status: bootstrapping status
- *
- * An array of these is written at the beginning of the WPR region, one for
- * each managed falcon. The array is terminated by an instance which falcon_id
- * is LSF_FALCON_ID_INVALID.
- */
-struct acr_r352_lsf_wpr_header {
- u32 falcon_id;
- u32 lsb_offset;
- u32 bootstrap_owner;
- u32 lazy_bootstrap;
- u32 status;
-#define LSF_IMAGE_STATUS_NONE 0
-#define LSF_IMAGE_STATUS_COPY 1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
-};
-
-/**
- * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
- */
-struct ls_ucode_img_r352 {
- struct ls_ucode_img base;
-
- const struct acr_r352_lsf_func *func;
-
- struct acr_r352_lsf_wpr_header wpr_header;
- struct acr_r352_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
-
-/**
- * ls_ucode_img_load() - create a lsf_ucode_img and load it
- */
-struct ls_ucode_img *
-acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
- const struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon_id)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
- struct ls_ucode_img_r352 *img;
- int ret;
-
- img = kzalloc(sizeof(*img), GFP_KERNEL);
- if (!img)
- return ERR_PTR(-ENOMEM);
-
- img->base.falcon_id = falcon_id;
-
- ret = func->load(sb, func->version_max, &img->base);
- if (ret < 0) {
- kfree(img->base.ucode_data);
- kfree(img->base.sig);
- kfree(img);
- return ERR_PTR(ret);
- }
-
- img->func = func->version[ret];
-
- /* Check that the signature size matches our expectations... */
- if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
- nvkm_error(subdev, "invalid signature size for %s falcon!\n",
- nvkm_secboot_falcon_name[falcon_id]);
- return ERR_PTR(-EINVAL);
- }
-
- /* Copy signature to the right place */
- memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
-
- /* not needed? the signature should already have the right value */
- img->lsb_header.signature.falcon_id = falcon_id;
-
- return &img->base;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-/**
- * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
- * @acr: ACR to use
- * @img: image to generate for
- * @offset: offset in the WPR region where this image starts
- *
- * Allocate space in the WPR area from offset and write the WPR and LSB headers
- * accordingly.
- *
- * Return: offset at the end of this image.
- */
-static u32
-acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
- struct ls_ucode_img_r352 *img, u32 offset)
-{
- struct ls_ucode_img *_img = &img->base;
- struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
- struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
- struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_lsf_func *func = img->func;
-
- /* Fill WPR header */
- whdr->falcon_id = _img->falcon_id;
- whdr->bootstrap_owner = acr->base.boot_falcon;
- whdr->status = LSF_IMAGE_STATUS_COPY;
-
- /* Skip bootstrapping falcons started by someone else than ACR */
- if (acr->lazy_bootstrap & BIT(_img->falcon_id))
- whdr->lazy_bootstrap = 1;
-
- /* Align, save off, and include an LSB header size */
- offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
- whdr->lsb_offset = offset;
- offset += sizeof(*lhdr);
-
- /*
- * Align, save off, and include the original (static) ucode
- * image size
- */
- offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
- _img->ucode_off = lhdr->ucode_off = offset;
- offset += _img->ucode_size;
-
- /*
- * For falcons that use a boot loader (BL), we append a loader
- * desc structure on the end of the ucode image and consider
- * this the boot loader data. The host will then copy the loader
- * desc args to this space within the WPR region (before locking
- * down) and the HS bin will then copy them to DMEM 0 for the
- * loader.
- */
- lhdr->bl_code_size = ALIGN(desc->bootloader_size,
- LSF_BL_CODE_SIZE_ALIGN);
- lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
- LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
- lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
- lhdr->bl_code_size - lhdr->ucode_size;
- /*
- * Though the BL is located at 0th offset of the image, the VA
- * is different to make sure that it doesn't collide the actual
- * OS VA range
- */
- lhdr->bl_imem_off = desc->bootloader_imem_offset;
- lhdr->app_code_off = desc->app_start_offset +
- desc->app_resident_code_offset;
- lhdr->app_code_size = desc->app_resident_code_size;
- lhdr->app_data_off = desc->app_start_offset +
- desc->app_resident_data_offset;
- lhdr->app_data_size = desc->app_resident_data_size;
-
- lhdr->flags = func->lhdr_flags;
- if (_img->falcon_id == acr->base.boot_falcon)
- lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
-
- /* Align and save off BL descriptor size */
- lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
-
- /*
- * Align, save off, and include the additional BL data
- */
- offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
- lhdr->bl_data_off = offset;
- offset += lhdr->bl_data_size;
-
- return offset;
-}
-
-/**
- * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
- */
-int
-acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
-{
- struct ls_ucode_img_r352 *img;
- struct list_head *l;
- u32 count = 0;
- u32 offset;
-
- /* Count the number of images to manage */
- list_for_each(l, imgs)
- count++;
-
- /*
- * Start with an array of WPR headers at the base of the WPR.
- * The expectation here is that the secure falcon will do a single DMA
- * read of this array and cache it internally so it's ok to pack these.
- * Also, we add 1 to the falcon count to indicate the end of the array.
- */
- offset = sizeof(img->wpr_header) * (count + 1);
-
- /*
- * Walk the managed falcons, accounting for the LSB structs
- * as well as the ucode images.
- */
- list_for_each_entry(img, imgs, base.node) {
- offset = acr_r352_ls_img_fill_headers(acr, img, offset);
- }
-
- return offset;
-}
-
-/**
- * acr_r352_ls_write_wpr - write the WPR blob contents
- */
-int
-acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
- struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
-{
- struct ls_ucode_img *_img;
- u32 pos = 0;
- u32 max_desc_size = 0;
- u8 *gdesc;
-
- /* Figure out how large we need gdesc to be. */
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
- }
-
- gdesc = kmalloc(max_desc_size, GFP_KERNEL);
- if (!gdesc)
- return -ENOMEM;
-
- nvkm_kmap(wpr_blob);
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
- sizeof(img->wpr_header));
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
- &img->lsb_header, sizeof(img->lsb_header));
-
- /* Generate and write BL descriptor */
- memset(gdesc, 0, ls_func->bl_desc_size);
- ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
- gdesc, ls_func->bl_desc_size);
-
- /* Copy ucode */
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
- _img->ucode_data, _img->ucode_size);
-
- pos += sizeof(img->wpr_header);
- }
-
- nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
- nvkm_done(wpr_blob);
-
- kfree(gdesc);
-
- return 0;
-}
-
-/* Both size and address of WPR need to be 256K-aligned */
-#define WPR_ALIGNMENT 0x40000
-/**
- * acr_r352_prepare_ls_blob() - prepare the LS blob
- *
- * For each securely managed falcon, load the FW, signatures and bootloaders and
- * prepare a ucode blob. Then, compute the offsets in the WPR region for each
- * blob, and finally write the headers and ucode blobs into a GPU object that
- * will be copied into the WPR region by the HS firmware.
- */
-static int
-acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- struct list_head imgs;
- struct ls_ucode_img *img, *t;
- unsigned long managed_falcons = acr->base.managed_falcons;
- u64 wpr_addr = sb->wpr_addr;
- u32 wpr_size = sb->wpr_size;
- int managed_count = 0;
- u32 image_wpr_size, ls_blob_size;
- int falcon_id;
- int ret;
-
- INIT_LIST_HEAD(&imgs);
-
- /* Load all LS blobs */
- for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- struct ls_ucode_img *img;
-
- img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
- if (IS_ERR(img)) {
- if (acr->base.optional_falcons & BIT(falcon_id)) {
- managed_falcons &= ~BIT(falcon_id);
- nvkm_info(subdev, "skipping %s falcon...\n",
- nvkm_secboot_falcon_name[falcon_id]);
- continue;
- }
- ret = PTR_ERR(img);
- goto cleanup;
- }
-
- list_add_tail(&img->node, &imgs);
- managed_count++;
- }
-
- /* Commit the actual list of falcons we will manage from now on */
- acr->base.managed_falcons = managed_falcons;
-
- /*
- * If the boot falcon has a firmare, let it manage the bootstrap of other
- * falcons.
- */
- if (acr->func->ls_func[acr->base.boot_falcon] &&
- (managed_falcons & BIT(acr->base.boot_falcon))) {
- for_each_set_bit(falcon_id, &managed_falcons,
- NVKM_SECBOOT_FALCON_END) {
- if (falcon_id == acr->base.boot_falcon)
- continue;
-
- acr->lazy_bootstrap |= BIT(falcon_id);
- }
- }
-
- /*
- * Fill the WPR and LSF headers with the right offsets and compute
- * required WPR size
- */
- image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
- image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
-
- ls_blob_size = image_wpr_size;
-
- /*
- * If we need a shadow area, allocate twice the size and use the
- * upper half as WPR
- */
- if (wpr_size == 0 && acr->func->shadow_blob)
- ls_blob_size *= 2;
-
- /* Allocate GPU object that will contain the WPR region */
- ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
- false, NULL, &acr->ls_blob);
- if (ret)
- goto cleanup;
-
- nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
- managed_count, image_wpr_size);
-
- /* If WPR address and size are not fixed, set them to fit the LS blob */
- if (wpr_size == 0) {
- wpr_addr = acr->ls_blob->addr;
- if (acr->func->shadow_blob)
- wpr_addr += acr->ls_blob->size / 2;
-
- wpr_size = image_wpr_size;
- /*
- * But if the WPR region is set by the bootloader, it is illegal for
- * the HS blob to be larger than this region.
- */
- } else if (image_wpr_size > wpr_size) {
- nvkm_error(subdev, "WPR region too small for FW blob!\n");
- nvkm_error(subdev, "required: %dB\n", image_wpr_size);
- nvkm_error(subdev, "available: %dB\n", wpr_size);
- ret = -ENOSPC;
- goto cleanup;
- }
-
- /* Write LS blob */
- ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
- if (ret)
- nvkm_gpuobj_del(&acr->ls_blob);
-
-cleanup:
- list_for_each_entry_safe(img, t, &imgs, node) {
- kfree(img->ucode_data);
- kfree(img->sig);
- kfree(img);
- }
-
- return ret;
-}
-
-
-
-
-void
-acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct hsflcn_acr_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = wpr_start + ls_blob->size;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-static void
-acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
- u64 addr_code, addr_data;
-
- addr_code = offset >> 8;
- addr_data = (offset + hdr->data_dma_base) >> 8;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->code_dma_base = lower_32_bits(addr_code);
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->data_dma_base = lower_32_bits(addr_data);
- bl_desc->data_size = hdr->data_size;
-}
-
-/**
- * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
- *
- * @sb secure boot instance to prepare for
- * @fw name of the HS firmware to load
- * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
- * @bl_desc pointer to the BL descriptor to write for this firmware
- * @patch whether we should patch the HS descriptor (only for HS loaders)
- */
-static int
-acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
- const char *fw, struct nvkm_gpuobj **blob,
- struct hsf_load_header *load_header, bool patch)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- void *acr_image;
- struct fw_bin_header *hsbin_hdr;
- struct hsf_fw_header *fw_hdr;
- struct hsf_load_header *load_hdr;
- void *acr_data;
- int ret;
-
- acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
- if (IS_ERR(acr_image))
- return PTR_ERR(acr_image);
-
- hsbin_hdr = acr_image;
- fw_hdr = acr_image + hsbin_hdr->header_offset;
- load_hdr = acr_image + fw_hdr->hdr_offset;
- acr_data = acr_image + hsbin_hdr->data_offset;
-
- /* Patch descriptor with WPR information? */
- if (patch) {
- struct hsflcn_acr_desc *desc;
-
- desc = acr_data + load_hdr->data_dma_base;
- acr->func->fixup_hs_desc(acr, sb, desc);
- }
-
- if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
- nvkm_error(subdev, "more apps (%d) than supported (%d)!",
- load_hdr->num_apps, ACR_R352_MAX_APPS);
- ret = -EINVAL;
- goto cleanup;
- }
- memcpy(load_header, load_hdr, sizeof(*load_header) +
- (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
-
- /* Create ACR blob and copy HS data to it */
- ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
- 0x1000, false, NULL, blob);
- if (ret)
- goto cleanup;
-
- nvkm_kmap(*blob);
- nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
- nvkm_done(*blob);
-
-cleanup:
- kfree(acr_image);
-
- return ret;
-}
-
-/**
- * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
- *
- * This includes the LS blob, HS ucode loading blob, and HS bootloader.
- *
- * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
- */
-int
-acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- int ret;
-
- /* Firmware already loaded? */
- if (acr->firmware_ok)
- return 0;
-
- /* Load and prepare the managed falcon's firmwares */
- ret = acr_r352_prepare_ls_blob(acr, sb);
- if (ret)
- return ret;
-
- /* Load the HS firmware that will load the LS firmwares */
- if (!acr->load_blob) {
- ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
- &acr->load_blob,
- &acr->load_bl_header, true);
- if (ret)
- return ret;
- }
-
- /* If the ACR region is dynamically programmed, we need an unload FW */
- if (sb->wpr_size == 0) {
- ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
- &acr->unload_blob,
- &acr->unload_bl_header, false);
- if (ret)
- return ret;
- }
-
- /* Load the HS firmware bootloader */
- if (!acr->hsbl_blob) {
- acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
- if (IS_ERR(acr->hsbl_blob)) {
- ret = PTR_ERR(acr->hsbl_blob);
- acr->hsbl_blob = NULL;
- return ret;
- }
-
- if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
- acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
- "acr/unload_bl", 0);
- if (IS_ERR(acr->hsbl_unload_blob)) {
- ret = PTR_ERR(acr->hsbl_unload_blob);
- acr->hsbl_unload_blob = NULL;
- return ret;
- }
- } else {
- acr->hsbl_unload_blob = acr->hsbl_blob;
- }
- }
-
- acr->firmware_ok = true;
- nvkm_debug(&sb->subdev, "LS blob successfully created\n");
-
- return 0;
-}
-
-/**
- * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
- *
- * Returns the start address to use, or a negative error value.
- */
-static int
-acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
- struct nvkm_gpuobj *blob, u64 offset)
-{
- struct acr_r352 *acr = acr_r352(_acr);
- const u32 bl_desc_size = acr->func->hs_bl_desc_size;
- const struct hsf_load_header *load_hdr;
- struct fw_bin_header *bl_hdr;
- struct fw_bl_desc *hsbl_desc;
- void *bl, *blob_data, *hsbl_code, *hsbl_data;
- u32 code_size;
- u8 *bl_desc;
-
- bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
- if (!bl_desc)
- return -ENOMEM;
-
- /* Find the bootloader descriptor for our blob and copy it */
- if (blob == acr->load_blob) {
- load_hdr = &acr->load_bl_header;
- bl = acr->hsbl_blob;
- } else if (blob == acr->unload_blob) {
- load_hdr = &acr->unload_bl_header;
- bl = acr->hsbl_unload_blob;
- } else {
- nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
- kfree(bl_desc);
- return -EINVAL;
- }
-
- bl_hdr = bl;
- hsbl_desc = bl + bl_hdr->header_offset;
- blob_data = bl + bl_hdr->data_offset;
- hsbl_code = blob_data + hsbl_desc->code_off;
- hsbl_data = blob_data + hsbl_desc->data_off;
- code_size = ALIGN(hsbl_desc->code_size, 256);
-
- /*
- * Copy HS bootloader data
- */
- nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
-
- /* Copy HS bootloader code to end of IMEM */
- nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
- code_size, hsbl_desc->start_tag, 0, false);
-
- /* Generate the BL header */
- acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
-
- /*
- * Copy HS BL header where the HS descriptor expects it to be
- */
- nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
- bl_desc_size, 0);
-
- kfree(bl_desc);
- return hsbl_desc->start_tag << 8;
-}
-
-static int
-acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- int i;
-
- /* Run the unload blob to unprotect the WPR region */
- if (acr->unload_blob && sb->wpr_set) {
- int ret;
-
- nvkm_debug(subdev, "running HS unload blob\n");
- ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
- if (ret < 0)
- return ret;
- /*
- * Unload blob will return this error code - it is not an error
- * and the expected behavior on RM as well
- */
- if (ret && ret != 0x1d) {
- nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret);
- return -EINVAL;
- }
- nvkm_debug(subdev, "HS unload blob completed\n");
- }
-
- for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
- acr->falcon_state[i] = NON_SECURE;
-
- sb->wpr_set = false;
-
- return 0;
-}
-
-/**
- * Check if the WPR region has been indeed set by the ACR firmware, and
- * matches where it should be.
- */
-static bool
-acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- const struct nvkm_device *device = subdev->device;
- u64 wpr_lo, wpr_hi;
- u64 wpr_range_lo, wpr_range_hi;
-
- nvkm_wr32(device, 0x100cd4, 0x2);
- wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
- wpr_lo <<= 8;
- nvkm_wr32(device, 0x100cd4, 0x3);
- wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
- wpr_hi <<= 8;
-
- if (sb->wpr_size != 0) {
- wpr_range_lo = sb->wpr_addr;
- wpr_range_hi = wpr_range_lo + sb->wpr_size;
- } else {
- wpr_range_lo = acr->ls_blob->addr;
- wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
- }
-
- return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
- wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
-}
-
-static int
-acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- unsigned long managed_falcons = acr->base.managed_falcons;
- int falcon_id;
- int ret;
-
- if (sb->wpr_set)
- return 0;
-
- /* Make sure all blobs are ready */
- ret = acr_r352_load_blobs(acr, sb);
- if (ret)
- return ret;
-
- nvkm_debug(subdev, "running HS load blob\n");
- ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
- /* clear halt interrupt */
- nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
- sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
- if (ret < 0) {
- return ret;
- } else if (ret > 0) {
- nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret);
- return -EINVAL;
- }
- nvkm_debug(subdev, "HS load blob completed\n");
- /* WPR must be set at this point */
- if (!sb->wpr_set) {
- nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
- return -EINVAL;
- }
-
- /* Run LS firmwares post_run hooks */
- for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- const struct acr_r352_ls_func *func =
- acr->func->ls_func[falcon_id];
-
- if (func->post_run) {
- ret = func->post_run(&acr->base, sb);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/**
- * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
- *
- * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
- * disabled. This has the effect of making all managed falcons ready-to-run.
- */
-static int
-acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
- unsigned long falcon_mask)
-{
- int falcon;
- int ret;
-
- /*
- * Perform secure boot each time we are called on FECS. Since only FECS
- * and GPCCS are managed and started together, this ought to be safe.
- */
- if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
- goto end;
-
- ret = acr_r352_shutdown(acr, sb);
- if (ret)
- return ret;
-
- ret = acr_r352_bootstrap(acr, sb);
- if (ret)
- return ret;
-
-end:
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- acr->falcon_state[falcon] = RESET;
- }
- return 0;
-}
-
-/*
- * acr_r352_reset() - execute secure boot from the prepared state
- *
- * Load the HS bootloader and ask the falcon to run it. This will in turn
- * load the HS firmware and run it, so once the falcon stops all the managed
- * falcons should have their LS firmware loaded and be ready to run.
- */
-static int
-acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
- unsigned long falcon_mask)
-{
- struct acr_r352 *acr = acr_r352(_acr);
- struct nvkm_msgqueue *queue;
- int falcon;
- bool wpr_already_set = sb->wpr_set;
- int ret;
-
- /* Make sure secure boot is performed */
- ret = acr_r352_bootstrap(acr, sb);
- if (ret)
- return ret;
-
- /* No PMU interface? */
- if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
- /* Redo secure boot entirely if it was already done */
- if (wpr_already_set)
- return acr_r352_reset_nopmu(acr, sb, falcon_mask);
- /* Else return the result of the initial invokation */
- else
- return ret;
- }
-
- switch (_acr->boot_falcon) {
- case NVKM_SECBOOT_FALCON_PMU:
- queue = sb->subdev.device->pmu->queue;
- break;
- case NVKM_SECBOOT_FALCON_SEC2:
- queue = sb->subdev.device->sec2->queue;
- break;
- default:
- return -EINVAL;
- }
-
- /* Otherwise just ask the LS firmware to reset the falcon */
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
- nvkm_debug(&sb->subdev, "resetting %s falcon\n",
- nvkm_secboot_falcon_name[falcon]);
- ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
- if (ret) {
- nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
- return ret;
- }
- nvkm_debug(&sb->subdev, "falcon reset done\n");
-
- return 0;
-}
-
-static int
-acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
-{
- struct acr_r352 *acr = acr_r352(_acr);
-
- return acr_r352_shutdown(acr, sb);
-}
-
-static void
-acr_r352_dtor(struct nvkm_acr *_acr)
-{
- struct acr_r352 *acr = acr_r352(_acr);
-
- nvkm_gpuobj_del(&acr->unload_blob);
-
- if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
- kfree(acr->hsbl_unload_blob);
- kfree(acr->hsbl_blob);
- nvkm_gpuobj_del(&acr->load_blob);
- nvkm_gpuobj_del(&acr->ls_blob);
-
- kfree(acr);
-}
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r352_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r352_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-static const struct acr_r352_ls_func
-acr_r352_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r352_ls_gpccs_func_0,
- }
-};
-
-
-
-/**
- * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
- * @dma_idx: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * @total_code_size: total size of the code part in the ucode
- * @code_size_to_load: size of the code part to load in PMU IMEM.
- * @code_entry_point: entry point in the code.
- * @data_dma_base: Physical FB address where data part of ucode is located
- * @data_size: Total size of the data portion.
- * @overlay_dma_base: Physical Fb address for resident code present in ucode
- * @argc: Total number of args
- * @argv: offset where args are copied into PMU's DMEM.
- *
- * Structure used by the PMU bootloader to load the rest of the code
- */
-struct acr_r352_pmu_bl_desc {
- u32 dma_idx;
- u32 code_dma_base;
- u32 code_size_total;
- u32 code_size_to_load;
- u32 code_entry_point;
- u32 data_dma_base;
- u32 data_size;
- u32 overlay_dma_base;
- u32 argc;
- u32 argv;
- u16 code_dma_base1;
- u16 data_dma_base1;
- u16 overlay_dma_base1;
-};
-
-/**
- * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
- *
- */
-static void
-acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r352_pmu_bl_desc *desc = _desc;
- u64 base;
- u64 addr_code;
- u64 addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = (base + pdesc->app_resident_code_offset) >> 8;
- addr_data = (base + pdesc->app_resident_data_offset) >> 8;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = lower_32_bits(addr_code);
- desc->code_dma_base1 = upper_32_bits(addr_code);
- desc->code_size_total = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = lower_32_bits(addr_data);
- desc->data_dma_base1 = upper_32_bits(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = lower_32_bits(addr_code);
- desc->overlay_dma_base1 = upper_32_bits(addr_code);
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
-};
-
-static const struct acr_r352_ls_func
-acr_r352_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r352_ls_pmu_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r352_func = {
- .fixup_hs_desc = acr_r352_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
- },
-};
-
-static const struct nvkm_acr_func
-acr_r352_base_func = {
- .dtor = acr_r352_dtor,
- .fini = acr_r352_fini,
- .load = acr_r352_load,
- .reset = acr_r352_reset,
-};
-
-struct nvkm_acr *
-acr_r352_new_(const struct acr_r352_func *func,
- enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- struct acr_r352 *acr;
- int i;
-
- /* Check that all requested falcons are supported */
- for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- if (!func->ls_func[i])
- return ERR_PTR(-ENOTSUPP);
- }
-
- acr = kzalloc(sizeof(*acr), GFP_KERNEL);
- if (!acr)
- return ERR_PTR(-ENOMEM);
-
- acr->base.boot_falcon = boot_falcon;
- acr->base.managed_falcons = managed_falcons;
- acr->base.func = &acr_r352_base_func;
- acr->func = func;
-
- return &acr->base;
-}
-
-struct nvkm_acr *
-acr_r352_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
deleted file mode 100644
index e516cab849dd..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef __NVKM_SECBOOT_ACR_R352_H__
-#define __NVKM_SECBOOT_ACR_R352_H__
-
-#include "acr.h"
-#include "ls_ucode.h"
-#include "hs_ucode.h"
-
-struct ls_ucode_img;
-
-#define ACR_R352_MAX_APPS 8
-
-#define LSF_FLAG_LOAD_CODE_AT_0 1
-#define LSF_FLAG_DMACTL_REQ_CTX 4
-#define LSF_FLAG_FORCE_PRIV_LOAD 8
-
-static inline u32
-hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app)
-{
- return hdr->apps[app];
-}
-
-static inline u32
-hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
-{
- return hdr->apps[hdr->num_apps + app];
-}
-
-/**
- * struct acr_r352_lsf_func - manages a specific LS firmware version
- *
- * @generate_bl_desc: function called on a block of bl_desc_size to generate the
- * proper bootloader descriptor for this LS firmware
- * @bl_desc_size: size of the bootloader descriptor
- * @lhdr_flags: LS flags
- */
-struct acr_r352_lsf_func {
- void (*generate_bl_desc)(const struct nvkm_acr *,
- const struct ls_ucode_img *, u64, void *);
- u32 bl_desc_size;
- u32 lhdr_flags;
-};
-
-/**
- * struct acr_r352_ls_func - manages a single LS falcon
- *
- * @load: load the external firmware into a ls_ucode_img
- * @post_run: hook called right after the ACR is executed
- */
-struct acr_r352_ls_func {
- int (*load)(const struct nvkm_secboot *, int maxver,
- struct ls_ucode_img *);
- int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
- int version_max;
- const struct acr_r352_lsf_func *version[];
-};
-
-struct acr_r352;
-
-/**
- * struct acr_r352_func - manages nuances between ACR versions
- *
- * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate
- * the proper HS bootloader descriptor
- * @hs_bl_desc_size: size of the HS bootloader descriptor
- */
-struct acr_r352_func {
- void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
- u64);
- void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *);
- u32 hs_bl_desc_size;
- bool shadow_blob;
-
- struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
- int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
- int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
-
- const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
-};
-
-/**
- * struct acr_r352 - ACR data for driver release 352 (and beyond)
- */
-struct acr_r352 {
- struct nvkm_acr base;
- const struct acr_r352_func *func;
-
- /*
- * HS FW - lock WPR region (dGPU only) and load LS FWs
- * on Tegra the HS FW copies the LS blob into the fixed WPR instead
- */
- struct nvkm_gpuobj *load_blob;
- struct {
- struct hsf_load_header load_bl_header;
- u32 __load_apps[ACR_R352_MAX_APPS * 2];
- };
-
- /* HS FW - unlock WPR region (dGPU only) */
- struct nvkm_gpuobj *unload_blob;
- struct {
- struct hsf_load_header unload_bl_header;
- u32 __unload_apps[ACR_R352_MAX_APPS * 2];
- };
-
- /* HS bootloader */
- void *hsbl_blob;
-
- /* HS bootloader for unload blob, if using a different falcon */
- void *hsbl_unload_blob;
-
- /* LS FWs, to be loaded by the HS ACR */
- struct nvkm_gpuobj *ls_blob;
-
- /* Firmware already loaded? */
- bool firmware_ok;
-
- /* Falcons to lazy-bootstrap */
- u32 lazy_bootstrap;
-
- /* To keep track of the state of all managed falcons */
- enum {
- /* In non-secure state, no firmware loaded, no privileges*/
- NON_SECURE = 0,
- /* In low-secure mode and ready to be started */
- RESET,
- /* In low-secure mode and running */
- RUNNING,
- } falcon_state[NVKM_SECBOOT_FALCON_END];
-};
-#define acr_r352(acr) container_of(acr, struct acr_r352, base)
-
-struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
- enum nvkm_secboot_falcon, unsigned long);
-
-struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
-int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
-int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
-
-void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
deleted file mode 100644
index f6b2d20d7fc3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r361.h"
-
-#include <engine/falcon.h>
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-
-static void
-acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r361_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-void
-acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->code_dma_base = u64_to_flcn64(offset);
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
- bl_desc->data_size = hdr->data_size;
-}
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r361_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r361_ls_gpccs_func_0,
- }
-};
-
-struct acr_r361_pmu_bl_desc {
- u32 reserved;
- u32 dma_idx;
- struct flcn_u64 code_dma_base;
- u32 total_code_size;
- u32 code_size_to_load;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
- struct flcn_u64 overlay_dma_base;
- u32 argc;
- u32 argv;
-};
-
-static void
-acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r361_pmu_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->total_code_size = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = u64_to_flcn64(addr_code);
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r361_ls_pmu_func_0,
- }
-};
-
-static void
-acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
- struct acr_r361_pmu_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- /* For some reason we should not add app_resident_code_offset here */
- addr_code = base;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = sec->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->total_code_size = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = u64_to_flcn64(addr_code);
- desc->argc = 1;
- /* args are stored at the beginning of EMEM */
- desc->argv = 0x01000000;
-}
-
-const struct acr_r352_lsf_func
-acr_r361_ls_sec2_func_0 = {
- .generate_bl_desc = acr_r361_generate_sec2_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
-};
-
-static const struct acr_r352_ls_func
-acr_r361_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 0,
- .version = {
- &acr_r361_ls_sec2_func_0,
- }
-};
-
-
-const struct acr_r352_func
-acr_r361_func = {
- .fixup_hs_desc = acr_r352_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
- },
-};
-
-struct nvkm_acr *
-acr_r361_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
deleted file mode 100644
index 38dec93779c8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R361_H__
-#define __NVKM_SECBOOT_ACR_R361_H__
-
-#include "acr_r352.h"
-
-/**
- * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
- * @signature: 16B signature for secure code. 0s if no secure code
- * @ctx_dma: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * (falcon's $xcbase register)
- * @non_sec_code_off: offset from code_dma_base where the non-secure code is
- * located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @sec_code_size: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @code_entry_point: code entry point which will be invoked by BL after
- * code is loaded.
- * @data_dma_base: 256B aligned Physical FB Address where data is located.
- * (falcon's $xdbase register)
- * @data_size: size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r361_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
-};
-
-void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
-
-extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
-extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
-extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
-extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
deleted file mode 100644
index 30cf04109991..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r361.h"
-
-#include <core/gpuobj.h>
-
-/*
- * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
- * parameter.
- */
-
-struct acr_r364_hsflcn_desc {
- union {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- } ucode_reserved_space;
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_memory_range;
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- u32 shadow_mem_start_addr;
- } region_props[2];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-static void
-acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct acr_r364_hsflcn_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = ls_blob->addr + ls_blob->size;
-
- if (acr->func->shadow_blob)
- wpr_start += ls_blob->size / 2;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- if (acr->func->shadow_blob)
- desc->regions.region_props[0].shadow_mem_start_addr =
- ls_blob->addr >> 8;
- else
- desc->regions.region_props[0].shadow_mem_start_addr = 0;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-const struct acr_r352_func
-acr_r364_func = {
- .fixup_hs_desc = acr_r364_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- },
-};
-
-
-struct nvkm_acr *
-acr_r364_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
deleted file mode 100644
index 472ced29da7e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ /dev/null
@@ -1,418 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r367.h"
-#include "acr_r361.h"
-#include "acr_r370.h"
-
-#include <core/gpuobj.h>
-
-/*
- * r367 ACR: new LS signature format requires a rewrite of LS firmware and
- * blob creation functions. Also the hsflcn_desc layout has changed slightly.
- */
-
-#define LSF_LSB_DEPMAP_SIZE 11
-
-/**
- * struct acr_r367_lsf_lsb_header - LS firmware header
- *
- * See also struct acr_r352_lsf_lsb_header for documentation.
- */
-struct acr_r367_lsf_lsb_header {
- /**
- * LS falcon signatures
- * @prd_keys: signature to use in production mode
- * @dgb_keys: signature to use in debug mode
- * @b_prd_present: whether the production key is present
- * @b_dgb_present: whether the debug key is present
- * @falcon_id: ID of the falcon the ucode applies to
- */
- struct {
- u8 prd_keys[2][16];
- u8 dbg_keys[2][16];
- u32 b_prd_present;
- u32 b_dbg_present;
- u32 falcon_id;
- u32 supports_versioning;
- u32 version;
- u32 depmap_count;
- u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
- u8 kdf[16];
- } signature;
- u32 ucode_off;
- u32 ucode_size;
- u32 data_size;
- u32 bl_code_size;
- u32 bl_imem_off;
- u32 bl_data_off;
- u32 bl_data_size;
- u32 app_code_off;
- u32 app_code_size;
- u32 app_data_off;
- u32 app_data_size;
- u32 flags;
-};
-
-/**
- * struct acr_r367_lsf_wpr_header - LS blob WPR Header
- *
- * See also struct acr_r352_lsf_wpr_header for documentation.
- */
-struct acr_r367_lsf_wpr_header {
- u32 falcon_id;
- u32 lsb_offset;
- u32 bootstrap_owner;
- u32 lazy_bootstrap;
- u32 bin_version;
- u32 status;
-#define LSF_IMAGE_STATUS_NONE 0
-#define LSF_IMAGE_STATUS_COPY 1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
-#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
-};
-
-/**
- * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
- */
-struct ls_ucode_img_r367 {
- struct ls_ucode_img base;
-
- const struct acr_r352_lsf_func *func;
-
- struct acr_r367_lsf_wpr_header wpr_header;
- struct acr_r367_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
-
-struct ls_ucode_img *
-acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
- const struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon_id)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
- struct ls_ucode_img_r367 *img;
- int ret;
-
- img = kzalloc(sizeof(*img), GFP_KERNEL);
- if (!img)
- return ERR_PTR(-ENOMEM);
-
- img->base.falcon_id = falcon_id;
-
- ret = func->load(sb, func->version_max, &img->base);
- if (ret < 0) {
- kfree(img->base.ucode_data);
- kfree(img->base.sig);
- kfree(img);
- return ERR_PTR(ret);
- }
-
- img->func = func->version[ret];
-
- /* Check that the signature size matches our expectations... */
- if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
- nvkm_error(subdev, "invalid signature size for %s falcon!\n",
- nvkm_secboot_falcon_name[falcon_id]);
- return ERR_PTR(-EINVAL);
- }
-
- /* Copy signature to the right place */
- memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
-
- /* not needed? the signature should already have the right value */
- img->lsb_header.signature.falcon_id = falcon_id;
-
- return &img->base;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-static u32
-acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
- struct ls_ucode_img_r367 *img, u32 offset)
-{
- struct ls_ucode_img *_img = &img->base;
- struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
- struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
- struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_lsf_func *func = img->func;
-
- /* Fill WPR header */
- whdr->falcon_id = _img->falcon_id;
- whdr->bootstrap_owner = acr->base.boot_falcon;
- whdr->bin_version = lhdr->signature.version;
- whdr->status = LSF_IMAGE_STATUS_COPY;
-
- /* Skip bootstrapping falcons started by someone else than ACR */
- if (acr->lazy_bootstrap & BIT(_img->falcon_id))
- whdr->lazy_bootstrap = 1;
-
- /* Align, save off, and include an LSB header size */
- offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
- whdr->lsb_offset = offset;
- offset += sizeof(*lhdr);
-
- /*
- * Align, save off, and include the original (static) ucode
- * image size
- */
- offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
- _img->ucode_off = lhdr->ucode_off = offset;
- offset += _img->ucode_size;
-
- /*
- * For falcons that use a boot loader (BL), we append a loader
- * desc structure on the end of the ucode image and consider
- * this the boot loader data. The host will then copy the loader
- * desc args to this space within the WPR region (before locking
- * down) and the HS bin will then copy them to DMEM 0 for the
- * loader.
- */
- lhdr->bl_code_size = ALIGN(desc->bootloader_size,
- LSF_BL_CODE_SIZE_ALIGN);
- lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
- LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
- lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
- lhdr->bl_code_size - lhdr->ucode_size;
- /*
- * Though the BL is located at 0th offset of the image, the VA
- * is different to make sure that it doesn't collide the actual
- * OS VA range
- */
- lhdr->bl_imem_off = desc->bootloader_imem_offset;
- lhdr->app_code_off = desc->app_start_offset +
- desc->app_resident_code_offset;
- lhdr->app_code_size = desc->app_resident_code_size;
- lhdr->app_data_off = desc->app_start_offset +
- desc->app_resident_data_offset;
- lhdr->app_data_size = desc->app_resident_data_size;
-
- lhdr->flags = func->lhdr_flags;
- if (_img->falcon_id == acr->base.boot_falcon)
- lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
-
- /* Align and save off BL descriptor size */
- lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
-
- /*
- * Align, save off, and include the additional BL data
- */
- offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
- lhdr->bl_data_off = offset;
- offset += lhdr->bl_data_size;
-
- return offset;
-}
-
-int
-acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
-{
- struct ls_ucode_img_r367 *img;
- struct list_head *l;
- u32 count = 0;
- u32 offset;
-
- /* Count the number of images to manage */
- list_for_each(l, imgs)
- count++;
-
- /*
- * Start with an array of WPR headers at the base of the WPR.
- * The expectation here is that the secure falcon will do a single DMA
- * read of this array and cache it internally so it's ok to pack these.
- * Also, we add 1 to the falcon count to indicate the end of the array.
- */
- offset = sizeof(img->wpr_header) * (count + 1);
-
- /*
- * Walk the managed falcons, accounting for the LSB structs
- * as well as the ucode images.
- */
- list_for_each_entry(img, imgs, base.node) {
- offset = acr_r367_ls_img_fill_headers(acr, img, offset);
- }
-
- return offset;
-}
-
-int
-acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
- struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
-{
- struct ls_ucode_img *_img;
- u32 pos = 0;
- u32 max_desc_size = 0;
- u8 *gdesc;
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
- }
-
- gdesc = kmalloc(max_desc_size, GFP_KERNEL);
- if (!gdesc)
- return -ENOMEM;
-
- nvkm_kmap(wpr_blob);
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
- sizeof(img->wpr_header));
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
- &img->lsb_header, sizeof(img->lsb_header));
-
- /* Generate and write BL descriptor */
- memset(gdesc, 0, ls_func->bl_desc_size);
- ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
- gdesc, ls_func->bl_desc_size);
-
- /* Copy ucode */
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
- _img->ucode_data, _img->ucode_size);
-
- pos += sizeof(img->wpr_header);
- }
-
- nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
- nvkm_done(wpr_blob);
-
- kfree(gdesc);
-
- return 0;
-}
-
-struct acr_r367_hsflcn_desc {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_memory_range;
-#define FLCN_ACR_MAX_REGIONS 2
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- u32 shadow_mem_start_addr;
- } region_props[FLCN_ACR_MAX_REGIONS];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-void
-acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct acr_r367_hsflcn_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = ls_blob->addr + ls_blob->size;
-
- if (acr->func->shadow_blob)
- wpr_start += ls_blob->size / 2;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- if (acr->func->shadow_blob)
- desc->regions.region_props[0].shadow_mem_start_addr =
- ls_blob->addr >> 8;
- else
- desc->regions.region_props[0].shadow_mem_start_addr = 0;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-static const struct acr_r352_ls_func
-acr_r367_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 1,
- .version = {
- &acr_r361_ls_sec2_func_0,
- &acr_r370_ls_sec2_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r367_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
- },
-};
-
-struct nvkm_acr *
-acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
deleted file mode 100644
index e821d0fd6217..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r370.h"
-#include "acr_r367.h"
-
-#include <core/msgqueue.h>
-#include <engine/falcon.h>
-#include <engine/sec2.h>
-
-static void
-acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r370_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-static const struct acr_r352_lsf_func
-acr_r370_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r370_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r370_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r370_ls_gpccs_func_0,
- }
-};
-
-static void
-acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
- struct acr_r370_flcn_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- /* For some reason we should not add app_resident_code_offset here */
- addr_code = base;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = sec->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->argc = 1;
- /* args are stored at the beginning of EMEM */
- desc->argv = 0x01000000;
-}
-
-const struct acr_r352_lsf_func
-acr_r370_ls_sec2_func_0 = {
- .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 0,
- .version = {
- &acr_r370_ls_sec2_func_0,
- }
-};
-
-void
-acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->code_dma_base = u64_to_flcn64(offset);
- bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
- bl_desc->data_size = hdr->data_size;
-}
-
-const struct acr_r352_func
-acr_r370_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
- },
-};
-
-struct nvkm_acr *
-acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
deleted file mode 100644
index 2efed6f995ad..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R370_H__
-#define __NVKM_SECBOOT_ACR_R370_H__
-
-#include "priv.h"
-struct hsf_load_header;
-
-/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
-struct acr_r370_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
- u32 argc;
- u32 argv;
-};
-
-void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
-extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
-extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
-extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
deleted file mode 100644
index 8f0647766038..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r370.h"
-#include "acr_r367.h"
-
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-
-static void
-acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r370_flcn_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r375_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r375_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r375_ls_pmu_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r375_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
- },
-};
-
-struct nvkm_acr *
-acr_r375_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
deleted file mode 100644
index ee29c6c11afd..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Secure boot is the process by which NVIDIA-signed firmware is loaded into
- * some of the falcons of a GPU. For production devices this is the only way
- * for the firmware to access useful (but sensitive) registers.
- *
- * A Falcon microprocessor supporting advanced security modes can run in one of
- * three modes:
- *
- * - Non-secure (NS). In this mode, functionality is similar to Falcon
- * architectures before security modes were introduced (pre-Maxwell), but
- * capability is restricted. In particular, certain registers may be
- * inaccessible for reads and/or writes, and physical memory access may be
- * disabled (on certain Falcon instances). This is the only possible mode that
- * can be used if you don't have microcode cryptographically signed by NVIDIA.
- *
- * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
- * not possible to read or write any Falcon internal state or Falcon registers
- * from outside the Falcon (for example, from the host system). The only way
- * to enable this mode is by loading microcode that has been signed by NVIDIA.
- * (The loading process involves tagging the IMEM block as secure, writing the
- * signature into a Falcon register, and starting execution. The hardware will
- * validate the signature, and if valid, grant HS privileges.)
- *
- * - Light Secure (LS). In this mode, the microprocessor has more privileges
- * than NS but fewer than HS. Some of the microprocessor state is visible to
- * host software to ease debugging. The only way to enable this mode is by HS
- * microcode enabling LS mode. Some privileges available to HS mode are not
- * available here. LS mode is introduced in GM20x.
- *
- * Secure boot consists in temporarily switching a HS-capable falcon (typically
- * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
- * load them, and switch managed falcons into LS mode. Once secure boot
- * completes, no falcon remains in HS mode.
- *
- * Secure boot requires a write-protected memory region (WPR) which can only be
- * written by the secure falcon. On dGPU, the driver sets up the WPR region in
- * video memory. On Tegra, it is set up by the bootloader and its location and
- * size written into memory controller registers.
- *
- * The secure boot process takes place as follows:
- *
- * 1) A LS blob is constructed that contains all the LS firmwares we want to
- * load, along with their signatures and bootloaders.
- *
- * 2) A HS blob (also called ACR) is created that contains the signed HS
- * firmware in charge of loading the LS firmwares into their respective
- * falcons.
- *
- * 3) The HS blob is loaded (via its own bootloader) and executed on the
- * HS-capable falcon. It authenticates itself, switches the secure falcon to
- * HS mode and setup the WPR region around the LS blob (dGPU) or copies the
- * LS blob into the WPR region (Tegra).
- *
- * 4) The LS blob is now secure from all external tampering. The HS falcon
- * checks the signatures of the LS firmwares and, if valid, switches the
- * managed falcons to LS mode and makes them ready to run the LS firmware.
- *
- * 5) The managed falcons remain in LS mode and can be started.
- *
- */
-
-#include "priv.h"
-#include "acr.h"
-
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-
-const char *
-nvkm_secboot_falcon_name[] = {
- [NVKM_SECBOOT_FALCON_PMU] = "PMU",
- [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
- [NVKM_SECBOOT_FALCON_FECS] = "FECS",
- [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
- [NVKM_SECBOOT_FALCON_SEC2] = "SEC2",
- [NVKM_SECBOOT_FALCON_END] = "<invalid>",
-};
-/**
- * nvkm_secboot_reset() - reset specified falcon
- */
-int
-nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask)
-{
- /* Unmanaged falcon? */
- if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) {
- nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
- return -EINVAL;
- }
-
- return sb->acr->func->reset(sb->acr, sb, falcon_mask);
-}
-
-/**
- * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
- */
-bool
-nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid)
-{
- if (!sb)
- return false;
-
- return sb->acr->managed_falcons & BIT(fid);
-}
-
-static int
-nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- int ret = 0;
-
- switch (sb->acr->boot_falcon) {
- case NVKM_SECBOOT_FALCON_PMU:
- sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
- break;
- case NVKM_SECBOOT_FALCON_SEC2:
- /* we must keep SEC2 alive forever since ACR will run on it */
- nvkm_engine_ref(&subdev->device->sec2->engine);
- sb->boot_falcon = subdev->device->sec2->falcon;
- sb->halt_falcon = subdev->device->pmu->falcon;
- break;
- default:
- nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
- nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
- return -EINVAL;
- }
- nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name);
-
- /* Call chip-specific init function */
- if (sb->func->oneinit)
- ret = sb->func->oneinit(sb);
- if (ret) {
- nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-static int
-nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- int ret = 0;
-
- if (sb->func->fini)
- ret = sb->func->fini(sb, suspend);
-
- return ret;
-}
-
-static void *
-nvkm_secboot_dtor(struct nvkm_subdev *subdev)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- void *ret = NULL;
-
- if (sb->func->dtor)
- ret = sb->func->dtor(sb);
-
- return ret;
-}
-
-static const struct nvkm_subdev_func
-nvkm_secboot = {
- .oneinit = nvkm_secboot_oneinit,
- .fini = nvkm_secboot_fini,
- .dtor = nvkm_secboot_dtor,
-};
-
-int
-nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr,
- struct nvkm_device *device, int index,
- struct nvkm_secboot *sb)
-{
- unsigned long fid;
-
- nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
- sb->func = func;
- sb->acr = acr;
- acr->subdev = &sb->subdev;
-
- nvkm_debug(&sb->subdev, "securely managed falcons:\n");
- for_each_set_bit(fid, &sb->acr->managed_falcons,
- NVKM_SECBOOT_FALCON_END)
- nvkm_debug(&sb->subdev, "- %s\n",
- nvkm_secboot_falcon_name[fid]);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
deleted file mode 100644
index 5e91b3f90065..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "acr.h"
-#include "gm200.h"
-
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <engine/falcon.h>
-#include <subdev/mc.h>
-
-/**
- * gm200_secboot_run_blob() - run the given high-secure blob
- *
- */
-int
-gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
- struct nvkm_falcon *falcon)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- struct nvkm_subdev *subdev = &gsb->base.subdev;
- struct nvkm_vma *vma = NULL;
- u32 start_address;
- int ret;
-
- ret = nvkm_falcon_get(falcon, subdev);
- if (ret)
- return ret;
-
- /* Map the HS firmware so the HS bootloader can see it */
- ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
- if (ret) {
- nvkm_falcon_put(falcon, subdev);
- return ret;
- }
-
- ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
- if (ret)
- goto end;
-
- /* Reset and set the falcon up */
- ret = nvkm_falcon_reset(falcon);
- if (ret)
- goto end;
- nvkm_falcon_bind_context(falcon, gsb->inst);
-
- /* Load the HS bootloader into the falcon's IMEM/DMEM */
- ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
- if (ret < 0)
- goto end;
-
- start_address = ret;
-
- /* Disable interrupts as we will poll for the HALT bit */
- nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
-
- /* Set default error value in mailbox register */
- nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
-
- /* Start the HS bootloader */
- nvkm_falcon_set_start_addr(falcon, start_address);
- nvkm_falcon_start(falcon);
- ret = nvkm_falcon_wait_for_halt(falcon, 100);
- if (ret)
- goto end;
-
- /*
- * The mailbox register contains the (positive) error code - return this
- * to the caller
- */
- ret = nvkm_falcon_rd32(falcon, 0x040);
-
-end:
- /* Reenable interrupts */
- nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
-
- /* We don't need the ACR firmware anymore */
- nvkm_vmm_put(gsb->vmm, &vma);
- nvkm_falcon_put(falcon, subdev);
-
- return ret;
-}
-
-int
-gm200_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- struct nvkm_device *device = sb->subdev.device;
- int ret;
-
- /* Allocate instance block and VM */
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
- &gsb->inst);
- if (ret)
- return ret;
-
- ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr",
- &gsb->vmm);
- if (ret)
- return ret;
-
- atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]);
- gsb->vmm->debug = gsb->base.subdev.debug;
-
- ret = nvkm_vmm_join(gsb->vmm, gsb->inst);
- if (ret)
- return ret;
-
- if (sb->acr->func->oneinit) {
- ret = sb->acr->func->oneinit(sb->acr, sb);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int
-gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
-{
- int ret = 0;
-
- if (sb->acr->func->fini)
- ret = sb->acr->func->fini(sb->acr, sb, suspend);
-
- return ret;
-}
-
-void *
-gm200_secboot_dtor(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
-
- sb->acr->func->dtor(sb->acr);
-
- nvkm_vmm_part(gsb->vmm, gsb->inst);
- nvkm_vmm_unref(&gsb->vmm);
- nvkm_memory_unref(&gsb->inst);
-
- return gsb;
-}
-
-
-static const struct nvkm_secboot_func
-gm200_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm200_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gm200_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-
-MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
deleted file mode 100644
index 62c5e162099a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_GM200_H__
-#define __NVKM_SECBOOT_GM200_H__
-
-#include "priv.h"
-
-struct gm200_secboot {
- struct nvkm_secboot base;
-
- /* Instance block & address space used for HS FW execution */
- struct nvkm_memory *inst;
- struct nvkm_vmm *vmm;
-};
-#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
-
-int gm200_secboot_oneinit(struct nvkm_secboot *);
-int gm200_secboot_fini(struct nvkm_secboot *, bool);
-void *gm200_secboot_dtor(struct nvkm_secboot *);
-int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
- struct nvkm_falcon *);
-
-/* Tegra-only */
-int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
deleted file mode 100644
index df8b919dcf09..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#define TEGRA210_MC_BASE 0x70019000
-
-#ifdef CONFIG_ARCH_TEGRA
-#define MC_SECURITY_CARVEOUT2_CFG0 0xc58
-#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c
-#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60
-#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64
-#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1)
-/**
- * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra
- *
- * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
- * is reserved from system memory by the bootloader and irreversibly locked.
- * This function reads the address and size of the pre-configured WPR region.
- */
-int
-gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
-{
- struct nvkm_secboot *sb = &gsb->base;
- void __iomem *mc;
- u32 cfg;
-
- mc = ioremap(mc_base, 0xd00);
- if (!mc) {
- nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
- return -ENOMEM;
- }
- sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
- ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
- sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
- << 17;
- cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
- iounmap(mc);
-
- /* Check that WPR settings are valid */
- if (sb->wpr_size == 0) {
- nvkm_error(&sb->subdev, "WPR region is empty\n");
- return -EINVAL;
- }
-
- if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) {
- nvkm_error(&sb->subdev, "WPR region not locked\n");
- return -EINVAL;
- }
-
- return 0;
-}
-#else
-int
-gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
-{
- nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
- return -EINVAL;
-}
-#endif
-
-static int
-gm20b_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int ret;
-
- ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE);
- if (ret)
- return ret;
-
- return gm200_secboot_oneinit(sb);
-}
-
-static const struct nvkm_secboot_func
-gm20b_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm20b_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gm20b_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_PMU));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
- /* Support the initial GM20B firmware release without PMU */
- acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
-MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
deleted file mode 100644
index 4695f1c8e33f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#include "ls_ucode.h"
-#include "hs_ucode.h"
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <engine/falcon.h>
-#include <engine/nvdec.h>
-
-static bool
-gp102_secboot_scrub_required(struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- u32 reg;
-
- nvkm_wr32(device, 0x100cd0, 0x2);
- reg = nvkm_rd32(device, 0x100cd0);
-
- return (reg & BIT(4));
-}
-
-static int
-gp102_run_secure_scrub(struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_engine *engine;
- struct nvkm_falcon *falcon;
- void *scrub_image;
- struct fw_bin_header *hsbin_hdr;
- struct hsf_fw_header *fw_hdr;
- struct hsf_load_header *lhdr;
- void *scrub_data;
- int ret;
-
- nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
-
- engine = nvkm_engine_ref(&device->nvdec[0]->engine);
- if (IS_ERR(engine))
- return PTR_ERR(engine);
- falcon = device->nvdec[0]->falcon;
-
- nvkm_falcon_get(falcon, &sb->subdev);
-
- scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber");
- if (IS_ERR(scrub_image))
- return PTR_ERR(scrub_image);
-
- nvkm_falcon_reset(falcon);
- nvkm_falcon_bind_context(falcon, NULL);
-
- hsbin_hdr = scrub_image;
- fw_hdr = scrub_image + hsbin_hdr->header_offset;
- lhdr = scrub_image + fw_hdr->hdr_offset;
- scrub_data = scrub_image + hsbin_hdr->data_offset;
-
- nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
- lhdr->non_sec_code_size,
- lhdr->non_sec_code_off >> 8, 0, false);
- nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
- ALIGN(lhdr->apps[0], 0x100),
- lhdr->apps[1],
- lhdr->apps[0] >> 8, 0, true);
- nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
- lhdr->data_size, 0);
-
- kfree(scrub_image);
-
- nvkm_falcon_set_start_addr(falcon, 0x0);
- nvkm_falcon_start(falcon);
-
- ret = nvkm_falcon_wait_for_halt(falcon, 500);
- if (ret < 0) {
- nvkm_error(subdev, "failed to run VPR scrubber binary!\n");
- ret = -ETIMEDOUT;
- goto end;
- }
-
- /* put nvdec in clean state - without reset it will remain in HS mode */
- nvkm_falcon_reset(falcon);
-
- if (gp102_secboot_scrub_required(sb)) {
- nvkm_error(subdev, "VPR scrubber binary failed!\n");
- ret = -EINVAL;
- goto end;
- }
-
- nvkm_debug(subdev, "VPR scrub successfully completed\n");
-
-end:
- nvkm_falcon_put(falcon, &sb->subdev);
- nvkm_engine_unref(&engine);
- return ret;
-}
-
-static int
-gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
- struct nvkm_falcon *falcon)
-{
- int ret;
-
- /* make sure the VPR region is unlocked */
- if (gp102_secboot_scrub_required(sb)) {
- ret = gp102_run_secure_scrub(sb);
- if (ret)
- return ret;
- }
-
- return gm200_secboot_run_blob(sb, blob, falcon);
-}
-
-const struct nvkm_secboot_func
-gp102_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm200_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gp102_secboot_run_blob,
-};
-
-int
-gp102_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
- BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_SEC2));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
deleted file mode 100644
index 737a8d50a1f2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2017 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gm200.h"
-#include "acr.h"
-
-int
-gp108_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
- BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_SEC2));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
- acr->func->dtor(acr);
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
-}
-
-MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
-
-MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
deleted file mode 100644
index 28ca29d0eeee..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#define TEGRA186_MC_BASE 0x02c10000
-
-static int
-gp10b_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int ret;
-
- ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE);
- if (ret)
- return ret;
-
- return gm200_secboot_oneinit(sb);
-}
-
-static const struct nvkm_secboot_func
-gp10b_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gp10b_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gp10b_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_PMU));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
-MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
deleted file mode 100644
index 6b33182ddc2f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "hs_ucode.h"
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <engine/falcon.h>
-
-/**
- * hs_ucode_patch_signature() - patch HS blob with correct signature for
- * specified falcon.
- */
-static void
-hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image,
- bool new_format)
-{
- struct fw_bin_header *hsbin_hdr = acr_image;
- struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
- void *hs_data = acr_image + hsbin_hdr->data_offset;
- void *sig;
- u32 sig_size;
- u32 patch_loc, patch_sig;
-
- /*
- * I had the brilliant idea to "improve" the binary format by
- * removing this useless indirection. However to make NVIDIA files
- * directly compatible, let's support both format.
- */
- if (new_format) {
- patch_loc = fw_hdr->patch_loc;
- patch_sig = fw_hdr->patch_sig;
- } else {
- patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc);
- patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig);
- }
-
- /* Falcon in debug or production mode? */
- if (falcon->debug) {
- sig = acr_image + fw_hdr->sig_dbg_offset;
- sig_size = fw_hdr->sig_dbg_size;
- } else {
- sig = acr_image + fw_hdr->sig_prod_offset;
- sig_size = fw_hdr->sig_prod_size;
- }
-
- /* Patch signature */
- memcpy(hs_data + patch_loc, sig + patch_sig, sig_size);
-}
-
-void *
-hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon,
- const char *fw)
-{
- void *acr_image;
- bool new_format;
-
- acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
- if (IS_ERR(acr_image))
- return acr_image;
-
- /* detect the format to define how signature should be patched */
- switch (((u32 *)acr_image)[0]) {
- case 0x3b1d14f0:
- new_format = true;
- break;
- case 0x000010de:
- new_format = false;
- break;
- default:
- nvkm_error(subdev, "unknown header for HS blob %s\n", fw);
- return ERR_PTR(-EINVAL);
- }
-
- hs_ucode_patch_signature(falcon, acr_image, new_format);
-
- return acr_image;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
deleted file mode 100644
index d8cfc6f7752a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_HS_UCODE_H__
-#define __NVKM_SECBOOT_HS_UCODE_H__
-
-#include <core/os.h>
-#include <core/subdev.h>
-
-struct nvkm_falcon;
-
-/**
- * struct hsf_fw_header - HS firmware descriptor
- * @sig_dbg_offset: offset of the debug signature
- * @sig_dbg_size: size of the debug signature
- * @sig_prod_offset: offset of the production signature
- * @sig_prod_size: size of the production signature
- * @patch_loc: offset of the offset (sic) of where the signature is
- * @patch_sig: offset of the offset (sic) to add to sig_*_offset
- * @hdr_offset: offset of the load header (see struct hs_load_header)
- * @hdr_size: size of above header
- *
- * This structure is embedded in the HS firmware image at
- * hs_bin_hdr.header_offset.
- */
-struct hsf_fw_header {
- u32 sig_dbg_offset;
- u32 sig_dbg_size;
- u32 sig_prod_offset;
- u32 sig_prod_size;
- u32 patch_loc;
- u32 patch_sig;
- u32 hdr_offset;
- u32 hdr_size;
-};
-
-/**
- * struct hsf_load_header - HS firmware load header
- */
-struct hsf_load_header {
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 data_dma_base;
- u32 data_size;
- u32 num_apps;
- /*
- * Organized as follows:
- * - app0_code_off
- * - app1_code_off
- * - ...
- * - appn_code_off
- * - app0_code_size
- * - app1_code_size
- * - ...
- */
- u32 apps[0];
-};
-
-void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *,
- const char *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
deleted file mode 100644
index d43f906da3a7..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_LS_UCODE_H__
-#define __NVKM_SECBOOT_LS_UCODE_H__
-
-#include <core/os.h>
-#include <core/subdev.h>
-#include <subdev/secboot.h>
-
-struct nvkm_acr;
-
-/**
- * struct ls_ucode_img_desc - descriptor of firmware image
- * @descriptor_size: size of this descriptor
- * @image_size: size of the whole image
- * @bootloader_start_offset: start offset of the bootloader in ucode image
- * @bootloader_size: size of the bootloader
- * @bootloader_imem_offset: start off set of the bootloader in IMEM
- * @bootloader_entry_point: entry point of the bootloader in IMEM
- * @app_start_offset: start offset of the LS firmware
- * @app_size: size of the LS firmware's code and data
- * @app_imem_offset: offset of the app in IMEM
- * @app_imem_entry: entry point of the app in IMEM
- * @app_dmem_offset: offset of the data in DMEM
- * @app_resident_code_offset: offset of app code from app_start_offset
- * @app_resident_code_size: size of the code
- * @app_resident_data_offset: offset of data from app_start_offset
- * @app_resident_data_size: size of data
- *
- * A firmware image contains the code, data, and bootloader of a given LS
- * falcon in a single blob. This structure describes where everything is.
- *
- * This can be generated from a (bootloader, code, data) set if they have
- * been loaded separately, or come directly from a file.
- */
-struct ls_ucode_img_desc {
- u32 descriptor_size;
- u32 image_size;
- u32 tools_version;
- u32 app_version;
- char date[64];
- u32 bootloader_start_offset;
- u32 bootloader_size;
- u32 bootloader_imem_offset;
- u32 bootloader_entry_point;
- u32 app_start_offset;
- u32 app_size;
- u32 app_imem_offset;
- u32 app_imem_entry;
- u32 app_dmem_offset;
- u32 app_resident_code_offset;
- u32 app_resident_code_size;
- u32 app_resident_data_offset;
- u32 app_resident_data_size;
- u32 nb_overlays;
- struct {u32 start; u32 size; } load_ovl[64];
- u32 compressed;
-};
-
-/**
- * struct ls_ucode_img - temporary storage for loaded LS firmwares
- * @node: to link within lsf_ucode_mgr
- * @falcon_id: ID of the falcon this LS firmware is for
- * @ucode_desc: loaded or generated map of ucode_data
- * @ucode_data: firmware payload (code and data)
- * @ucode_size: size in bytes of data in ucode_data
- * @ucode_off: offset of the ucode in ucode_data
- * @sig: signature for this firmware
- * @sig:size: size of the signature in bytes
- *
- * Preparing the WPR LS blob requires information about all the LS firmwares
- * (size, etc) to be known. This structure contains all the data of one LS
- * firmware.
- */
-struct ls_ucode_img {
- struct list_head node;
- enum nvkm_secboot_falcon falcon_id;
-
- struct ls_ucode_img_desc ucode_desc;
- u8 *ucode_data;
- u32 ucode_size;
- u32 ucode_off;
-
- u8 *sig;
- u32 sig_size;
-};
-
-/**
- * struct fw_bin_header - header of firmware files
- * @bin_magic: always 0x3b1d14f0
- * @bin_ver: version of the bin format
- * @bin_size: entire image size including this header
- * @header_offset: offset of the firmware/bootloader header in the file
- * @data_offset: offset of the firmware/bootloader payload in the file
- * @data_size: size of the payload
- *
- * This header is located at the beginning of the HS firmware and HS bootloader
- * files, to describe where the headers and data can be found.
- */
-struct fw_bin_header {
- u32 bin_magic;
- u32 bin_ver;
- u32 bin_size;
- u32 header_offset;
- u32 data_offset;
- u32 data_size;
-};
-
-/**
- * struct fw_bl_desc - firmware bootloader descriptor
- * @start_tag: starting tag of bootloader
- * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
- * @code_off: offset of code section
- * @code_size: size of code section
- * @data_off: offset of data section
- * @data_size: size of data section
- *
- * This structure is embedded in bootloader firmware files at to describe the
- * IMEM and DMEM layout expected by the bootloader.
- */
-struct fw_bl_desc {
- u32 start_tag;
- u32 dmem_load_off;
- u32 code_off;
- u32 code_size;
- u32 data_off;
- u32 data_size;
-};
-
-int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
deleted file mode 100644
index 821d3b2bdb1f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <core/firmware.h>
-
-#define BL_DESC_BLK_SIZE 256
-/**
- * Build a ucode image and descriptor from provided bootloader, code and data.
- *
- * @bl: bootloader image, including 16-bytes descriptor
- * @code: LS firmware code segment
- * @data: LS firmware data segment
- * @desc: ucode descriptor to be written
- *
- * Return: allocated ucode image with corresponding descriptor information. desc
- * is also updated to contain the right offsets within returned image.
- */
-static void *
-ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
- const struct firmware *data, struct ls_ucode_img_desc *desc)
-{
- struct fw_bin_header *bin_hdr = (void *)bl->data;
- struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
- void *bl_data = (void *)bl->data + bin_hdr->data_offset;
- u32 pos = 0;
- void *image;
-
- desc->bootloader_start_offset = pos;
- desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
- desc->bootloader_imem_offset = bl_desc->start_tag * 256;
- desc->bootloader_entry_point = bl_desc->start_tag * 256;
-
- pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
- desc->app_start_offset = pos;
- desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
- ALIGN(data->size, BL_DESC_BLK_SIZE);
- desc->app_imem_offset = 0;
- desc->app_imem_entry = 0;
- desc->app_dmem_offset = 0;
- desc->app_resident_code_offset = 0;
- desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
-
- pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
- desc->app_resident_data_offset = pos - desc->app_start_offset;
- desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
-
- desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
- desc->app_size;
-
- image = kzalloc(desc->image_size, GFP_KERNEL);
- if (!image)
- return ERR_PTR(-ENOMEM);
-
- memcpy(image + desc->bootloader_start_offset, bl_data,
- bl_desc->code_size);
- memcpy(image + desc->app_start_offset, code->data, code->size);
- memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
- data->data, data->size);
-
- return image;
-}
-
-/**
- * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image
- *
- * Load the LS microcode, bootloader and signature and pack them into a single
- * blob. Also generate the corresponding ucode descriptor.
- */
-static int
-ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
- struct ls_ucode_img *img, const char *falcon_name)
-{
- const struct firmware *bl, *code, *data, *sig;
- char f[64];
- int ret;
-
- snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &bl);
- if (ret)
- goto error;
-
- snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &code);
- if (ret)
- goto free_bl;
-
- snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &data);
- if (ret)
- goto free_inst;
-
- snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &sig);
- if (ret)
- goto free_data;
-
- img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
- if (!img->sig) {
- ret = -ENOMEM;
- goto free_sig;
- }
- img->sig_size = sig->size;
-
- img->ucode_data = ls_ucode_img_build(bl, code, data,
- &img->ucode_desc);
- if (IS_ERR(img->ucode_data)) {
- kfree(img->sig);
- ret = PTR_ERR(img->ucode_data);
- goto free_sig;
- }
- img->ucode_size = img->ucode_desc.image_size;
-
-free_sig:
- nvkm_firmware_put(sig);
-free_data:
- nvkm_firmware_put(data);
-free_inst:
- nvkm_firmware_put(code);
-free_bl:
- nvkm_firmware_put(bl);
-error:
- return ret;
-}
-
-int
-acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
-}
-
-int
-acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
deleted file mode 100644
index a84a999445bb..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <core/firmware.h>
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-
-/**
- * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
- *
- * Load the LS microcode, desc and signature and pack them into a single
- * blob.
- */
-static int
-acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
- int maxver, struct ls_ucode_img *img)
-{
- const struct firmware *image, *desc, *sig;
- char f[64];
- int ver, ret;
-
- snprintf(f, sizeof(f), "%s/image", name);
- ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
- if (ver < 0)
- return ver;
- img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
- nvkm_firmware_put(image);
- if (!img->ucode_data)
- return -ENOMEM;
-
- snprintf(f, sizeof(f), "%s/desc", name);
- ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
- if (ret < 0)
- return ret;
- memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
- img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
- nvkm_firmware_put(desc);
-
- snprintf(f, sizeof(f), "%s/sig", name);
- ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
- if (ret < 0)
- return ret;
- img->sig_size = sig->size;
- img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
- nvkm_firmware_put(sig);
- if (!img->sig)
- return -ENOMEM;
-
- return ver;
-}
-
-static int
-acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
- struct nvkm_falcon *falcon, u32 addr_args)
-{
- struct nvkm_device *device = falcon->owner->device;
- u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE];
-
- memset(buf, 0, sizeof(buf));
- nvkm_msgqueue_write_cmdline(queue, buf);
- nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0);
- /* rearm the queue so it will wait for the init message */
- nvkm_msgqueue_reinit(queue);
-
- /* Enable interrupts */
- nvkm_falcon_wr32(falcon, 0x10, 0xff);
- nvkm_mc_intr_mask(device, falcon->owner->index, true);
-
- /* Start LS firmware on boot falcon */
- nvkm_falcon_start(falcon);
-
- return 0;
-}
-
-int
-acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- struct nvkm_pmu *pmu = sb->subdev.device->pmu;
- int ret;
-
- ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
- if (ret)
- return ret;
-
- /* Allocate the PMU queue corresponding to the FW version */
- ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
- sb, &pmu->queue);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int
-acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
-{
- struct nvkm_device *device = sb->subdev.device;
- struct nvkm_pmu *pmu = device->pmu;
- u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
- int ret;
-
- ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
- if (ret)
- return ret;
-
- nvkm_debug(&sb->subdev, "%s started\n",
- nvkm_secboot_falcon_name[acr->boot_falcon]);
-
- return 0;
-}
-
-int
-acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- struct nvkm_sec2 *sec = sb->subdev.device->sec2;
- int ver, ret;
-
- ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
- if (ver < 0)
- return ver;
-
- /* Allocate the PMU queue corresponding to the FW version */
- ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
- sb, &sec->queue);
- if (ret)
- return ret;
-
- return ver;
-}
-
-int
-acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_sec2 *sec = device->sec2;
- /* on SEC arguments are always at the beginning of EMEM */
- const u32 addr_args = 0x01000000;
- int ret;
-
- ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
- if (ret)
- return ret;
-
- nvkm_debug(&sb->subdev, "%s started\n",
- nvkm_secboot_falcon_name[acr->boot_falcon]);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
deleted file mode 100644
index 959a7b2dbdc9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_PRIV_H__
-#define __NVKM_SECBOOT_PRIV_H__
-
-#include <subdev/secboot.h>
-#include <subdev/mmu.h>
-struct nvkm_gpuobj;
-
-struct nvkm_secboot_func {
- int (*oneinit)(struct nvkm_secboot *);
- int (*fini)(struct nvkm_secboot *, bool suspend);
- void *(*dtor)(struct nvkm_secboot *);
- int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *,
- struct nvkm_falcon *);
-};
-
-int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
- struct nvkm_device *, int, struct nvkm_secboot *);
-int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
-int nvkm_secboot_falcon_run(struct nvkm_secboot *);
-
-extern const struct nvkm_secboot_func gp102_secboot;
-
-struct flcn_u64 {
- u32 lo;
- u32 hi;
-};
-
-static inline u64 flcn64_to_u64(const struct flcn_u64 f)
-{
- return ((u64)f.hi) << 32 | f.lo;
-}
-
-static inline struct flcn_u64 u64_to_flcn64(u64 u)
-{
- struct flcn_u64 ret;
-
- ret.hi = upper_32_bits(u);
- ret.lo = lower_32_bits(u);
-
- return ret;
-}
-
-#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index 240dda102845..b562a8cd61bf 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -8,18 +8,18 @@ config DRM_OMAP_ENCODER_OPA362
through a GPIO.
config DRM_OMAP_ENCODER_TPD12S015
- tristate "TPD12S015 HDMI ESD protection and level shifter"
+ tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
config DRM_OMAP_CONNECTOR_HDMI
- tristate "HDMI Connector"
+ tristate "HDMI Connector"
help
Driver for a generic HDMI connector.
config DRM_OMAP_CONNECTOR_ANALOG_TV
- tristate "Analog TV Connector"
+ tristate "Analog TV Connector"
help
Driver for a generic analog TV connector.
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
index 956f23e1452d..72ae79c0c9b4 100644
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ b/drivers/gpu/drm/omapdrm/dss/Kconfig
@@ -6,12 +6,12 @@ config OMAP_DSS_BASE
tristate
menuconfig OMAP2_DSS
- tristate "OMAP2+ Display Subsystem support"
+ tristate "OMAP2+ Display Subsystem support"
select OMAP_DSS_BASE
select VIDEOMODE_HELPERS
select OMAP2_DSS_INIT
select HDMI
- help
+ help
OMAP2+ Display Subsystem support.
if OMAP2_DSS
@@ -52,7 +52,7 @@ config OMAP2_DSS_DPI
config OMAP2_DSS_VENC
bool "VENC support"
- default y
+ default y
help
OMAP Video Encoder support for S-Video and composite TV-out.
@@ -61,7 +61,7 @@ config OMAP2_DSS_HDMI_COMMON
config OMAP4_DSS_HDMI
bool "HDMI support for OMAP4"
- default y
+ default y
select OMAP2_DSS_HDMI_COMMON
help
HDMI support for OMAP4 based SoCs.
@@ -85,7 +85,7 @@ config OMAP5_DSS_HDMI
config OMAP2_DSS_SDI
bool "SDI support"
- default n
+ default n
help
SDI (Serial Display Interface) support.
@@ -94,7 +94,7 @@ config OMAP2_DSS_SDI
config OMAP2_DSS_DSI
bool "DSI support"
- default n
+ default n
help
MIPI DSI (Display Serial Interface) support.
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 413dbdd1771e..dbb90f2d2ccd 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -393,8 +393,7 @@ static void dispc_get_reg_field(struct dispc_device *dispc,
enum dispc_feat_reg_field id,
u8 *start, u8 *end)
{
- if (id >= dispc->feat->num_reg_fields)
- BUG();
+ BUG_ON(id >= dispc->feat->num_reg_fields);
*start = dispc->feat->reg_fields[id].start;
*end = dispc->feat->reg_fields[id].end;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 5b8799c69f68..94cded387174 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -229,7 +229,8 @@ static int omap_connector_get_modes(struct drm_connector *connector)
* operation to the panel API.
*/
if (omap_connector->output->panel)
- return drm_panel_get_modes(omap_connector->output->panel);
+ return drm_panel_get_modes(omap_connector->output->panel,
+ connector);
/*
* We can't retrieve modes, which can happen for instance for a DVI or
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index b3e22c890c51..d2750f60f519 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -217,8 +217,8 @@ static int omap_display_id(struct omap_dss_device *output)
} else if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
- while (bridge->next)
- bridge = bridge->next;
+ while (drm_bridge_get_next_bridge(bridge))
+ bridge = drm_bridge_get_next_bridge(bridge);
node = bridge->of_node;
} else if (output->panel) {
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 24bbe9f2a32e..4f2165a37795 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -126,7 +126,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
for (dssdev = output; dssdev; dssdev = dssdev->next)
omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
- for (bridge = output->bridge; bridge; bridge = bridge->next) {
+ for (bridge = output->bridge; bridge;
+ bridge = drm_bridge_get_next_bridge(bridge)) {
if (!bridge->timings)
continue;
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 58f53946ee4d..b06e5cbfd03a 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -70,7 +70,7 @@ fallback:
return drm_fb_helper_pan_display(var, fbi);
}
-static struct fb_ops omap_fb_ops = {
+static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 7344bb61936c..b319fe7f2371 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -85,25 +85,6 @@ static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
return 0;
}
-static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
- unsigned long page_num)
-{
- struct drm_gem_object *obj = buffer->priv;
- struct page **pages;
- omap_gem_get_pages(obj, &pages, false);
- omap_gem_cpu_sync_page(obj, page_num);
- return kmap(pages[page_num]);
-}
-
-static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
- unsigned long page_num, void *addr)
-{
- struct drm_gem_object *obj = buffer->priv;
- struct page **pages;
- omap_gem_get_pages(obj, &pages, false);
- kunmap(pages[page_num]);
-}
-
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct vm_area_struct *vma)
{
@@ -123,8 +104,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .map = omap_gem_dmabuf_kmap,
- .unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
};
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index f152bc4eeb53..ae44ac2ec106 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,6 +18,17 @@ config DRM_PANEL_ARM_VERSATILE
reference designs. The panel is detected using special registers
in the Versatile family syscon registers.
+config DRM_PANEL_BOE_HIMAX8279D
+ tristate "Boe Himax8279d panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Boe Himax8279d
+ TFT-LCD modules. The panel has a 1200x1920 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -98,6 +109,17 @@ config DRM_PANEL_KINGDISPLAY_KD097D04
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_LEADTEK_LTK500HD1829
+ tristate "Leadtek LTK500HD1829 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Kingdisplay kd097d04
+ TFT-LCD modules. The panel has a 1536x2048 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
config DRM_PANEL_SAMSUNG_LD9040
tristate "Samsung LD9040 RGB/SPI panel"
depends on OF && SPI
@@ -316,6 +338,17 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
+config DRM_PANEL_SONY_ACX424AKP
+ tristate "Sony ACX424AKP DSI command mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable the Sony ACX424 display
+ panel. This panel supports DSI in both command and video
+ mode.
+
config DRM_PANEL_SONY_ACX565AKM
tristate "Sony ACX565AKM panel"
depends on GPIOLIB && OF && SPI
@@ -355,4 +388,14 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
help
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+
+config DRM_PANEL_XINPENG_XPP055C272
+ tristate "Xinpeng XPP055C272 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Xinpeng
+ XPP055C272 controller for 720x1280 LCD panels with MIPI/RGB/SPI
+ system interfaces.
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index b6cd39fe0f20..7c4d3c581fd4 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
+obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
@@ -8,6 +9,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
@@ -33,8 +35,10 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index a0574dc03e16..41444a73c980 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -260,9 +260,9 @@ static int versatile_panel_enable(struct drm_panel *panel)
return 0;
}
-static int versatile_panel_get_modes(struct drm_panel *panel)
+static int versatile_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct versatile_panel *vpanel = to_versatile_panel(panel);
struct drm_display_mode *mode;
@@ -270,7 +270,7 @@ static int versatile_panel_get_modes(struct drm_panel *panel)
connector->display_info.height_mm = vpanel->panel_type->height_mm;
connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
- mode = drm_mode_duplicate(panel->drm, &vpanel->panel_type->mode);
+ mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
new file mode 100644
index 000000000000..74d58ee7d04c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Huaqin Telecom Technology Co., Ltd
+ *
+ * Author: Jerry Han <jerry.han.hq@gmail.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/mipi_display.h>
+
+struct panel_cmd {
+ char cmd;
+ char data;
+};
+
+struct panel_desc {
+ const struct drm_display_mode *display_mode;
+ unsigned int bpc;
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+ const struct panel_cmd *on_cmds;
+ unsigned int on_cmds_num;
+};
+
+struct panel_info {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+ const struct panel_desc *desc;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *pp33_gpio;
+ struct gpio_desc *pp18_gpio;
+
+ bool prepared;
+ bool enabled;
+};
+
+static inline struct panel_info *to_panel_info(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_info, base);
+}
+
+static void disable_gpios(struct panel_info *pinfo)
+{
+ gpiod_set_value(pinfo->enable_gpio, 0);
+ gpiod_set_value(pinfo->pp33_gpio, 0);
+ gpiod_set_value(pinfo->pp18_gpio, 0);
+}
+
+static int send_mipi_cmds(struct drm_panel *panel, const struct panel_cmd *cmds)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ unsigned int i = 0;
+ int err;
+
+ for (i = 0; i < pinfo->desc->on_cmds_num; i++) {
+ err = mipi_dsi_dcs_write_buffer(pinfo->link, &cmds[i],
+ sizeof(struct panel_cmd));
+
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int boe_panel_disable(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int err;
+
+ if (!pinfo->enabled)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(pinfo->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ err);
+ return err;
+ }
+
+ pinfo->enabled = false;
+
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int err;
+
+ if (!pinfo->prepared)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(pinfo->link);
+ if (err < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ err);
+
+ err = mipi_dsi_dcs_enter_sleep_mode(pinfo->link);
+ if (err < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ err);
+
+ /* sleep_mode_delay: 1ms - 2ms */
+ usleep_range(1000, 2000);
+
+ disable_gpios(pinfo);
+
+ pinfo->prepared = false;
+
+ return 0;
+}
+
+static int boe_panel_prepare(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int err;
+
+ if (pinfo->prepared)
+ return 0;
+
+ gpiod_set_value(pinfo->pp18_gpio, 1);
+ /* T1: 5ms - 6ms */
+ usleep_range(5000, 6000);
+ gpiod_set_value(pinfo->pp33_gpio, 1);
+
+ /* reset sequence */
+ /* T2: 14ms - 15ms */
+ usleep_range(14000, 15000);
+ gpiod_set_value(pinfo->enable_gpio, 1);
+
+ /* T3: 1ms - 2ms */
+ usleep_range(1000, 2000);
+ gpiod_set_value(pinfo->enable_gpio, 0);
+
+ /* T4: 1ms - 2ms */
+ usleep_range(1000, 2000);
+ gpiod_set_value(pinfo->enable_gpio, 1);
+
+ /* T5: 5ms - 6ms */
+ usleep_range(5000, 6000);
+
+ /* send init code */
+ err = send_mipi_cmds(panel, pinfo->desc->on_cmds);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to send DCS Init Code: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ /* T6: 120ms - 121ms */
+ usleep_range(120000, 121000);
+
+ err = mipi_dsi_dcs_set_display_on(pinfo->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ /* T7: 20ms - 21ms */
+ usleep_range(20000, 21000);
+
+ pinfo->prepared = true;
+
+ return 0;
+
+poweroff:
+ disable_gpios(pinfo);
+ return err;
+}
+
+static int boe_panel_enable(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int ret;
+
+ if (pinfo->enabled)
+ return 0;
+
+ usleep_range(120000, 121000);
+
+ ret = mipi_dsi_dcs_set_display_on(pinfo->link);
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+ ret);
+ return ret;
+ }
+
+ pinfo->enabled = true;
+
+ return 0;
+}
+
+static int boe_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ const struct drm_display_mode *m = pinfo->desc->display_mode;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ DRM_DEV_ERROR(pinfo->base.dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = pinfo->desc->width_mm;
+ connector->display_info.height_mm = pinfo->desc->height_mm;
+ connector->display_info.bpc = pinfo->desc->bpc;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs panel_funcs = {
+ .disable = boe_panel_disable,
+ .unprepare = boe_panel_unprepare,
+ .prepare = boe_panel_prepare,
+ .enable = boe_panel_enable,
+ .get_modes = boe_panel_get_modes,
+};
+
+static const struct drm_display_mode default_display_mode = {
+ .clock = 159420,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 60,
+ .htotal = 1200 + 80 + 60 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .vrefresh = 60,
+};
+
+/* 8 inch */
+static const struct panel_cmd boe_himax8279d8p_on_cmds[] = {
+ { 0xB0, 0x05 },
+ { 0xB1, 0xE5 },
+ { 0xB3, 0x52 },
+ { 0xC0, 0x00 },
+ { 0xC2, 0x57 },
+ { 0xD9, 0x85 },
+ { 0xB0, 0x01 },
+ { 0xC8, 0x00 },
+ { 0xC9, 0x00 },
+ { 0xCC, 0x26 },
+ { 0xCD, 0x26 },
+ { 0xDC, 0x00 },
+ { 0xDD, 0x00 },
+ { 0xE0, 0x26 },
+ { 0xE1, 0x26 },
+ { 0xB0, 0x03 },
+ { 0xC3, 0x2A },
+ { 0xE7, 0x2A },
+ { 0xC5, 0x2A },
+ { 0xDE, 0x2A },
+ { 0xBC, 0x02 },
+ { 0xCB, 0x02 },
+ { 0xB0, 0x00 },
+ { 0xB6, 0x03 },
+ { 0xBA, 0x8B },
+ { 0xBF, 0x15 },
+ { 0xC0, 0x18 },
+ { 0xC2, 0x14 },
+ { 0xC3, 0x02 },
+ { 0xC4, 0x14 },
+ { 0xC5, 0x02 },
+ { 0xCC, 0x0A },
+ { 0xB0, 0x06 },
+ { 0xC0, 0xA5 },
+ { 0xD5, 0x20 },
+ { 0xC0, 0x00 },
+ { 0xB0, 0x02 },
+ { 0xC0, 0x00 },
+ { 0xC1, 0x02 },
+ { 0xC2, 0x06 },
+ { 0xC3, 0x16 },
+ { 0xC4, 0x0E },
+ { 0xC5, 0x18 },
+ { 0xC6, 0x26 },
+ { 0xC7, 0x32 },
+ { 0xC8, 0x3F },
+ { 0xC9, 0x3F },
+ { 0xCA, 0x3F },
+ { 0xCB, 0x3F },
+ { 0xCC, 0x3D },
+ { 0xCD, 0x2F },
+ { 0xCE, 0x2F },
+ { 0xCF, 0x2F },
+ { 0xD0, 0x07 },
+ { 0xD2, 0x00 },
+ { 0xD3, 0x02 },
+ { 0xD4, 0x06 },
+ { 0xD5, 0x12 },
+ { 0xD6, 0x0A },
+ { 0xD7, 0x14 },
+ { 0xD8, 0x22 },
+ { 0xD9, 0x2E },
+ { 0xDA, 0x3D },
+ { 0xDB, 0x3F },
+ { 0xDC, 0x3F },
+ { 0xDD, 0x3F },
+ { 0xDE, 0x3D },
+ { 0xDF, 0x2F },
+ { 0xE0, 0x2F },
+ { 0xE1, 0x2F },
+ { 0xE2, 0x07 },
+ { 0xB0, 0x07 },
+ { 0xB1, 0x18 },
+ { 0xB2, 0x19 },
+ { 0xB3, 0x2E },
+ { 0xB4, 0x52 },
+ { 0xB5, 0x72 },
+ { 0xB6, 0x8C },
+ { 0xB7, 0xBD },
+ { 0xB8, 0xEB },
+ { 0xB9, 0x47 },
+ { 0xBA, 0x96 },
+ { 0xBB, 0x1E },
+ { 0xBC, 0x90 },
+ { 0xBD, 0x93 },
+ { 0xBE, 0xFA },
+ { 0xBF, 0x56 },
+ { 0xC0, 0x8C },
+ { 0xC1, 0xB7 },
+ { 0xC2, 0xCC },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x08 },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x15 },
+ { 0xB3, 0x2D },
+ { 0xB4, 0x51 },
+ { 0xB5, 0x72 },
+ { 0xB6, 0x8D },
+ { 0xB7, 0xBE },
+ { 0xB8, 0xED },
+ { 0xB9, 0x4A },
+ { 0xBA, 0x9A },
+ { 0xBB, 0x23 },
+ { 0xBC, 0x95 },
+ { 0xBD, 0x98 },
+ { 0xBE, 0xFF },
+ { 0xBF, 0x59 },
+ { 0xC0, 0x8E },
+ { 0xC1, 0xB9 },
+ { 0xC2, 0xCD },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x09 },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x2C },
+ { 0xB3, 0x36 },
+ { 0xB4, 0x53 },
+ { 0xB5, 0x73 },
+ { 0xB6, 0x8E },
+ { 0xB7, 0xC0 },
+ { 0xB8, 0xEF },
+ { 0xB9, 0x4C },
+ { 0xBA, 0x9D },
+ { 0xBB, 0x25 },
+ { 0xBC, 0x96 },
+ { 0xBD, 0x9A },
+ { 0xBE, 0x01 },
+ { 0xBF, 0x59 },
+ { 0xC0, 0x8E },
+ { 0xC1, 0xB9 },
+ { 0xC2, 0xCD },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xBF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0A },
+ { 0xB1, 0x18 },
+ { 0xB2, 0x19 },
+ { 0xB3, 0x2E },
+ { 0xB4, 0x52 },
+ { 0xB5, 0x72 },
+ { 0xB6, 0x8C },
+ { 0xB7, 0xBD },
+ { 0xB8, 0xEB },
+ { 0xB9, 0x47 },
+ { 0xBA, 0x96 },
+ { 0xBB, 0x1E },
+ { 0xBC, 0x90 },
+ { 0xBD, 0x93 },
+ { 0xBE, 0xFA },
+ { 0xBF, 0x56 },
+ { 0xC0, 0x8C },
+ { 0xC1, 0xB7 },
+ { 0xC2, 0xCC },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0B },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x15 },
+ { 0xB3, 0x2D },
+ { 0xB4, 0x51 },
+ { 0xB5, 0x72 },
+ { 0xB6, 0x8D },
+ { 0xB7, 0xBE },
+ { 0xB8, 0xED },
+ { 0xB9, 0x4A },
+ { 0xBA, 0x9A },
+ { 0xBB, 0x23 },
+ { 0xBC, 0x95 },
+ { 0xBD, 0x98 },
+ { 0xBE, 0xFF },
+ { 0xBF, 0x59 },
+ { 0xC0, 0x8E },
+ { 0xC1, 0xB9 },
+ { 0xC2, 0xCD },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0C },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x2C },
+ { 0xB3, 0x36 },
+ { 0xB4, 0x53 },
+ { 0xB5, 0x73 },
+ { 0xB6, 0x8E },
+ { 0xB7, 0xC0 },
+ { 0xB8, 0xEF },
+ { 0xB9, 0x4C },
+ { 0xBA, 0x9D },
+ { 0xBB, 0x25 },
+ { 0xBC, 0x96 },
+ { 0xBD, 0x9A },
+ { 0xBE, 0x01 },
+ { 0xBF, 0x59 },
+ { 0xC0, 0x8E },
+ { 0xC1, 0xB9 },
+ { 0xC2, 0xCD },
+ { 0xC3, 0xDF },
+ { 0xC4, 0xE8 },
+ { 0xC5, 0xF0 },
+ { 0xC6, 0xF8 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x5A },
+ { 0xCC, 0xBF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x04 },
+ { 0xB5, 0x02 },
+ { 0xB6, 0x01 },
+};
+
+static const struct panel_desc boe_himax8279d8p_panel_desc = {
+ .display_mode = &default_display_mode,
+ .bpc = 8,
+ .width_mm = 107,
+ .height_mm = 172,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .on_cmds = boe_himax8279d8p_on_cmds,
+ .on_cmds_num = 260,
+};
+
+/* 10 inch */
+static const struct panel_cmd boe_himax8279d10p_on_cmds[] = {
+ { 0xB0, 0x05 },
+ { 0xB1, 0xE5 },
+ { 0xB3, 0x52 },
+ { 0xB0, 0x00 },
+ { 0xB6, 0x03 },
+ { 0xBA, 0x8B },
+ { 0xBF, 0x1A },
+ { 0xC0, 0x0F },
+ { 0xC2, 0x0C },
+ { 0xC3, 0x02 },
+ { 0xC4, 0x0C },
+ { 0xC5, 0x02 },
+ { 0xB0, 0x01 },
+ { 0xE0, 0x26 },
+ { 0xE1, 0x26 },
+ { 0xDC, 0x00 },
+ { 0xDD, 0x00 },
+ { 0xCC, 0x26 },
+ { 0xCD, 0x26 },
+ { 0xC8, 0x00 },
+ { 0xC9, 0x00 },
+ { 0xD2, 0x03 },
+ { 0xD3, 0x03 },
+ { 0xE6, 0x04 },
+ { 0xE7, 0x04 },
+ { 0xC4, 0x09 },
+ { 0xC5, 0x09 },
+ { 0xD8, 0x0A },
+ { 0xD9, 0x0A },
+ { 0xC2, 0x0B },
+ { 0xC3, 0x0B },
+ { 0xD6, 0x0C },
+ { 0xD7, 0x0C },
+ { 0xC0, 0x05 },
+ { 0xC1, 0x05 },
+ { 0xD4, 0x06 },
+ { 0xD5, 0x06 },
+ { 0xCA, 0x07 },
+ { 0xCB, 0x07 },
+ { 0xDE, 0x08 },
+ { 0xDF, 0x08 },
+ { 0xB0, 0x02 },
+ { 0xC0, 0x00 },
+ { 0xC1, 0x0D },
+ { 0xC2, 0x17 },
+ { 0xC3, 0x26 },
+ { 0xC4, 0x31 },
+ { 0xC5, 0x1C },
+ { 0xC6, 0x2C },
+ { 0xC7, 0x33 },
+ { 0xC8, 0x31 },
+ { 0xC9, 0x37 },
+ { 0xCA, 0x37 },
+ { 0xCB, 0x37 },
+ { 0xCC, 0x39 },
+ { 0xCD, 0x2E },
+ { 0xCE, 0x2F },
+ { 0xCF, 0x2F },
+ { 0xD0, 0x07 },
+ { 0xD2, 0x00 },
+ { 0xD3, 0x0D },
+ { 0xD4, 0x17 },
+ { 0xD5, 0x26 },
+ { 0xD6, 0x31 },
+ { 0xD7, 0x3F },
+ { 0xD8, 0x3F },
+ { 0xD9, 0x3F },
+ { 0xDA, 0x3F },
+ { 0xDB, 0x37 },
+ { 0xDC, 0x37 },
+ { 0xDD, 0x37 },
+ { 0xDE, 0x39 },
+ { 0xDF, 0x2E },
+ { 0xE0, 0x2F },
+ { 0xE1, 0x2F },
+ { 0xE2, 0x07 },
+ { 0xB0, 0x03 },
+ { 0xC8, 0x0B },
+ { 0xC9, 0x07 },
+ { 0xC3, 0x00 },
+ { 0xE7, 0x00 },
+ { 0xC5, 0x2A },
+ { 0xDE, 0x2A },
+ { 0xCA, 0x43 },
+ { 0xC9, 0x07 },
+ { 0xE4, 0xC0 },
+ { 0xE5, 0x0D },
+ { 0xCB, 0x01 },
+ { 0xBC, 0x01 },
+ { 0xB0, 0x06 },
+ { 0xB8, 0xA5 },
+ { 0xC0, 0xA5 },
+ { 0xC7, 0x0F },
+ { 0xD5, 0x32 },
+ { 0xB8, 0x00 },
+ { 0xC0, 0x00 },
+ { 0xBC, 0x00 },
+ { 0xB0, 0x07 },
+ { 0xB1, 0x00 },
+ { 0xB2, 0x05 },
+ { 0xB3, 0x10 },
+ { 0xB4, 0x22 },
+ { 0xB5, 0x36 },
+ { 0xB6, 0x4A },
+ { 0xB7, 0x6C },
+ { 0xB8, 0x9A },
+ { 0xB9, 0xD7 },
+ { 0xBA, 0x17 },
+ { 0xBB, 0x92 },
+ { 0xBC, 0x15 },
+ { 0xBD, 0x18 },
+ { 0xBE, 0x8C },
+ { 0xBF, 0x00 },
+ { 0xC0, 0x3A },
+ { 0xC1, 0x72 },
+ { 0xC2, 0x8C },
+ { 0xC3, 0xA5 },
+ { 0xC4, 0xB1 },
+ { 0xC5, 0xBE },
+ { 0xC6, 0xCA },
+ { 0xC7, 0xD1 },
+ { 0xC8, 0xD4 },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x08 },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x05 },
+ { 0xB3, 0x11 },
+ { 0xB4, 0x24 },
+ { 0xB5, 0x39 },
+ { 0xB6, 0x4E },
+ { 0xB7, 0x72 },
+ { 0xB8, 0xA3 },
+ { 0xB9, 0xE1 },
+ { 0xBA, 0x25 },
+ { 0xBB, 0xA8 },
+ { 0xBC, 0x2E },
+ { 0xBD, 0x32 },
+ { 0xBE, 0xAD },
+ { 0xBF, 0x28 },
+ { 0xC0, 0x63 },
+ { 0xC1, 0x9B },
+ { 0xC2, 0xB5 },
+ { 0xC3, 0xCF },
+ { 0xC4, 0xDB },
+ { 0xC5, 0xE8 },
+ { 0xC6, 0xF5 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x09 },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x04 },
+ { 0xB3, 0x0F },
+ { 0xB4, 0x22 },
+ { 0xB5, 0x37 },
+ { 0xB6, 0x4D },
+ { 0xB7, 0x71 },
+ { 0xB8, 0xA2 },
+ { 0xB9, 0xE1 },
+ { 0xBA, 0x26 },
+ { 0xBB, 0xA9 },
+ { 0xBC, 0x2F },
+ { 0xBD, 0x33 },
+ { 0xBE, 0xAC },
+ { 0xBF, 0x24 },
+ { 0xC0, 0x5D },
+ { 0xC1, 0x94 },
+ { 0xC2, 0xAC },
+ { 0xC3, 0xC5 },
+ { 0xC4, 0xD1 },
+ { 0xC5, 0xDC },
+ { 0xC6, 0xE8 },
+ { 0xC7, 0xED },
+ { 0xC8, 0xF0 },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0A },
+ { 0xB1, 0x00 },
+ { 0xB2, 0x05 },
+ { 0xB3, 0x10 },
+ { 0xB4, 0x22 },
+ { 0xB5, 0x36 },
+ { 0xB6, 0x4A },
+ { 0xB7, 0x6C },
+ { 0xB8, 0x9A },
+ { 0xB9, 0xD7 },
+ { 0xBA, 0x17 },
+ { 0xBB, 0x92 },
+ { 0xBC, 0x15 },
+ { 0xBD, 0x18 },
+ { 0xBE, 0x8C },
+ { 0xBF, 0x00 },
+ { 0xC0, 0x3A },
+ { 0xC1, 0x72 },
+ { 0xC2, 0x8C },
+ { 0xC3, 0xA5 },
+ { 0xC4, 0xB1 },
+ { 0xC5, 0xBE },
+ { 0xC6, 0xCA },
+ { 0xC7, 0xD1 },
+ { 0xC8, 0xD4 },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0B },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x05 },
+ { 0xB3, 0x11 },
+ { 0xB4, 0x24 },
+ { 0xB5, 0x39 },
+ { 0xB6, 0x4E },
+ { 0xB7, 0x72 },
+ { 0xB8, 0xA3 },
+ { 0xB9, 0xE1 },
+ { 0xBA, 0x25 },
+ { 0xBB, 0xA8 },
+ { 0xBC, 0x2E },
+ { 0xBD, 0x32 },
+ { 0xBE, 0xAD },
+ { 0xBF, 0x28 },
+ { 0xC0, 0x63 },
+ { 0xC1, 0x9B },
+ { 0xC2, 0xB5 },
+ { 0xC3, 0xCF },
+ { 0xC4, 0xDB },
+ { 0xC5, 0xE8 },
+ { 0xC6, 0xF5 },
+ { 0xC7, 0xFA },
+ { 0xC8, 0xFC },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+ { 0xB0, 0x0C },
+ { 0xB1, 0x04 },
+ { 0xB2, 0x04 },
+ { 0xB3, 0x0F },
+ { 0xB4, 0x22 },
+ { 0xB5, 0x37 },
+ { 0xB6, 0x4D },
+ { 0xB7, 0x71 },
+ { 0xB8, 0xA2 },
+ { 0xB9, 0xE1 },
+ { 0xBA, 0x26 },
+ { 0xBB, 0xA9 },
+ { 0xBC, 0x2F },
+ { 0xBD, 0x33 },
+ { 0xBE, 0xAC },
+ { 0xBF, 0x24 },
+ { 0xC0, 0x5D },
+ { 0xC1, 0x94 },
+ { 0xC2, 0xAC },
+ { 0xC3, 0xC5 },
+ { 0xC4, 0xD1 },
+ { 0xC5, 0xDC },
+ { 0xC6, 0xE8 },
+ { 0xC7, 0xED },
+ { 0xC8, 0xF0 },
+ { 0xC9, 0x00 },
+ { 0xCA, 0x00 },
+ { 0xCB, 0x16 },
+ { 0xCC, 0xAF },
+ { 0xCD, 0xFF },
+ { 0xCE, 0xFF },
+};
+
+static const struct panel_desc boe_himax8279d10p_panel_desc = {
+ .display_mode = &default_display_mode,
+ .bpc = 8,
+ .width_mm = 135,
+ .height_mm = 216,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .on_cmds = boe_himax8279d10p_on_cmds,
+ .on_cmds_num = 283,
+};
+
+static const struct of_device_id panel_of_match[] = {
+ {
+ .compatible = "boe,himax8279d8p",
+ .data = &boe_himax8279d8p_panel_desc,
+ },
+ {
+ .compatible = "boe,himax8279d10p",
+ .data = &boe_himax8279d10p_panel_desc,
+ },
+ {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, panel_of_match);
+
+static int panel_add(struct panel_info *pinfo)
+{
+ struct device *dev = &pinfo->link->dev;
+ int ret;
+
+ pinfo->pp18_gpio = devm_gpiod_get(dev, "pp18", GPIOD_OUT_HIGH);
+ if (IS_ERR(pinfo->pp18_gpio)) {
+ ret = PTR_ERR(pinfo->pp18_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get pp18 gpio: %d\n",
+ ret);
+ return ret;
+ }
+
+ pinfo->pp33_gpio = devm_gpiod_get(dev, "pp33", GPIOD_OUT_HIGH);
+ if (IS_ERR(pinfo->pp33_gpio)) {
+ ret = PTR_ERR(pinfo->pp33_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get pp33 gpio: %d\n",
+ ret);
+ return ret;
+ }
+
+ pinfo->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(pinfo->enable_gpio)) {
+ ret = PTR_ERR(pinfo->enable_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get enable gpio: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_panel_init(&pinfo->base, dev, &panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&pinfo->base);
+ if (ret)
+ return ret;
+
+ return drm_panel_add(&pinfo->base);
+}
+
+static int panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct panel_info *pinfo;
+ const struct panel_desc *desc;
+ int err;
+
+ pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->mode_flags = desc->mode_flags;
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
+ pinfo->desc = desc;
+
+ pinfo->link = dsi;
+ mipi_dsi_set_drvdata(dsi, pinfo);
+
+ err = panel_add(pinfo);
+ if (err < 0)
+ return err;
+
+ err = mipi_dsi_attach(dsi);
+ if (err < 0)
+ drm_panel_remove(&pinfo->base);
+
+ return err;
+}
+
+static int panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = boe_panel_disable(&pinfo->base);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n",
+ err);
+
+ err = boe_panel_unprepare(&pinfo->base);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
+ err);
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
+ err);
+
+ drm_panel_remove(&pinfo->base);
+
+ return 0;
+}
+
+static void panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
+
+ boe_panel_disable(&pinfo->base);
+ boe_panel_unprepare(&pinfo->base);
+}
+
+static struct mipi_dsi_driver panel_driver = {
+ .driver = {
+ .name = "panel-boe-himax8279d",
+ .of_match_table = panel_of_match,
+ },
+ .probe = panel_probe,
+ .remove = panel_remove,
+ .shutdown = panel_shutdown,
+};
+module_mipi_dsi_driver(panel_driver);
+
+MODULE_AUTHOR("Jerry Han <jerry.han.hq@gmail.com>");
+MODULE_DESCRIPTION("Boe Himax8279d driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 98f184b81187..95b789ab9d29 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -9,7 +9,6 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
-#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -22,7 +21,6 @@ struct feiyang {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *dvdd;
struct regulator *avdd;
struct gpio_desc *reset;
@@ -102,7 +100,6 @@ static int feiyang_enable(struct drm_panel *panel)
msleep(200);
mipi_dsi_dcs_set_display_on(ctx->dsi);
- backlight_enable(ctx->backlight);
return 0;
}
@@ -111,7 +108,6 @@ static int feiyang_disable(struct drm_panel *panel)
{
struct feiyang *ctx = panel_to_feiyang(panel);
- backlight_disable(ctx->backlight);
return mipi_dsi_dcs_set_display_off(ctx->dsi);
}
@@ -162,13 +158,13 @@ static const struct drm_display_mode feiyang_default_mode = {
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
-static int feiyang_get_modes(struct drm_panel *panel)
+static int feiyang_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct feiyang *ctx = panel_to_feiyang(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &feiyang_default_mode);
+ mode = drm_mode_duplicate(connector->dev, &feiyang_default_mode);
if (!mode) {
DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
feiyang_default_mode.hdisplay,
@@ -225,9 +221,9 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset);
}
- ctx->backlight = devm_of_find_backlight(&dsi->dev);
- if (IS_ERR(ctx->backlight))
- return PTR_ERR(ctx->backlight);
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 24955bec1958..f394d53a7da4 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -641,10 +641,11 @@ static const struct drm_display_mode itu_r_bt_656_720_mode = {
.flags = 0,
};
-static int ili9322_get_modes(struct drm_panel *panel)
+static int ili9322_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct ili9322 *ili = panel_to_ili9322(panel);
+ struct drm_device *drm = connector->dev;
struct drm_display_mode *mode;
struct drm_display_info *info;
@@ -663,26 +664,26 @@ static int ili9322_get_modes(struct drm_panel *panel)
switch (ili->input) {
case ILI9322_INPUT_SRGB_DUMMY_320X240:
- mode = drm_mode_duplicate(panel->drm, &srgb_320x240_mode);
+ mode = drm_mode_duplicate(drm, &srgb_320x240_mode);
break;
case ILI9322_INPUT_SRGB_DUMMY_360X240:
- mode = drm_mode_duplicate(panel->drm, &srgb_360x240_mode);
+ mode = drm_mode_duplicate(drm, &srgb_360x240_mode);
break;
case ILI9322_INPUT_PRGB_THROUGH:
case ILI9322_INPUT_PRGB_ALIGNED:
- mode = drm_mode_duplicate(panel->drm, &prgb_320x240_mode);
+ mode = drm_mode_duplicate(drm, &prgb_320x240_mode);
break;
case ILI9322_INPUT_YUV_640X320_YCBCR:
- mode = drm_mode_duplicate(panel->drm, &yuv_640x320_mode);
+ mode = drm_mode_duplicate(drm, &yuv_640x320_mode);
break;
case ILI9322_INPUT_YUV_720X360_YCBCR:
- mode = drm_mode_duplicate(panel->drm, &yuv_720x360_mode);
+ mode = drm_mode_duplicate(drm, &yuv_720x360_mode);
break;
case ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR:
- mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_720_mode);
+ mode = drm_mode_duplicate(drm, &itu_r_bt_656_720_mode);
break;
case ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR:
- mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_640_mode);
+ mode = drm_mode_duplicate(drm, &itu_r_bt_656_640_mode);
break;
default:
mode = NULL;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index e8789e460a16..f54077c216a3 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017-2018, Bootlin
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,7 +24,6 @@ struct ili9881c {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *power;
struct gpio_desc *reset;
};
@@ -348,7 +346,6 @@ static int ili9881c_enable(struct drm_panel *panel)
msleep(120);
mipi_dsi_dcs_set_display_on(ctx->dsi);
- backlight_enable(ctx->backlight);
return 0;
}
@@ -357,7 +354,6 @@ static int ili9881c_disable(struct drm_panel *panel)
{
struct ili9881c *ctx = panel_to_ili9881c(panel);
- backlight_disable(ctx->backlight);
return mipi_dsi_dcs_set_display_off(ctx->dsi);
}
@@ -387,13 +383,13 @@ static const struct drm_display_mode bananapi_default_mode = {
.vtotal = 1280 + 10 + 10 + 20,
};
-static int ili9881c_get_modes(struct drm_panel *panel)
+static int ili9881c_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct ili9881c *ctx = panel_to_ili9881c(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &bananapi_default_mode);
+ mode = drm_mode_duplicate(connector->dev, &bananapi_default_mode);
if (!mode) {
dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
bananapi_default_mode.hdisplay,
@@ -407,8 +403,8 @@ static int ili9881c_get_modes(struct drm_panel *panel)
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 62;
- panel->connector->display_info.height_mm = 110;
+ connector->display_info.width_mm = 62;
+ connector->display_info.height_mm = 110;
return 1;
}
@@ -423,7 +419,6 @@ static const struct drm_panel_funcs ili9881c_funcs = {
static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
{
- struct device_node *np;
struct ili9881c *ctx;
int ret;
@@ -448,14 +443,9 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset);
}
- np = of_parse_phandle(dsi->dev.of_node, "backlight", 0);
- if (np) {
- ctx->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!ctx->backlight)
- return -EPROBE_DEFER;
- }
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
@@ -475,9 +465,6 @@ static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
- if (ctx->backlight)
- put_device(&ctx->backlight->dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 83df1ac4211f..7419f1f0acee 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -3,7 +3,6 @@
* Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -52,7 +51,6 @@ struct innolux_panel {
struct mipi_dsi_device *link;
const struct panel_desc *desc;
- struct backlight_device *backlight;
struct regulator_bulk_data *supplies;
struct gpio_desc *enable_gpio;
@@ -72,8 +70,6 @@ static int innolux_panel_disable(struct drm_panel *panel)
if (!innolux->enabled)
return 0;
- backlight_disable(innolux->backlight);
-
innolux->enabled = false;
return 0;
@@ -204,18 +200,10 @@ poweroff:
static int innolux_panel_enable(struct drm_panel *panel)
{
struct innolux_panel *innolux = to_innolux_panel(panel);
- int ret;
if (innolux->enabled)
return 0;
- ret = backlight_enable(innolux->backlight);
- if (ret) {
- DRM_DEV_ERROR(panel->drm->dev,
- "Failed to enable backlight %d\n", ret);
- return ret;
- }
-
innolux->enabled = true;
return 0;
@@ -403,28 +391,27 @@ static const struct panel_desc innolux_p097pfg_panel_desc = {
.sleep_mode_delay = 100, /* T15 */
};
-static int innolux_panel_get_modes(struct drm_panel *panel)
+static int innolux_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct innolux_panel *innolux = to_innolux_panel(panel);
const struct drm_display_mode *m = innolux->desc->mode;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, m);
+ mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
m->hdisplay, m->vdisplay, m->vrefresh);
return -ENOMEM;
}
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm =
- innolux->desc->size.width;
- panel->connector->display_info.height_mm =
- innolux->desc->size.height;
- panel->connector->display_info.bpc = innolux->desc->bpc;
+ connector->display_info.width_mm = innolux->desc->size.width;
+ connector->display_info.height_mm = innolux->desc->size.height;
+ connector->display_info.bpc = innolux->desc->bpc;
return 1;
}
@@ -483,13 +470,13 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
innolux->enable_gpio = NULL;
}
- innolux->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(innolux->backlight))
- return PTR_ERR(innolux->backlight);
-
drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
+ err = drm_panel_of_backlight(&innolux->base);
+ if (err)
+ return err;
+
err = drm_panel_add(&innolux->base);
if (err < 0)
return err;
@@ -527,12 +514,12 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
int err;
- err = innolux_panel_unprepare(&innolux->base);
+ err = drm_panel_unprepare(&innolux->base);
if (err < 0)
DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
err);
- err = innolux_panel_disable(&innolux->base);
+ err = drm_panel_disable(&innolux->base);
if (err < 0)
DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
@@ -550,8 +537,8 @@ static void innolux_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
- innolux_panel_unprepare(&innolux->base);
- innolux_panel_disable(&innolux->base);
+ drm_panel_unprepare(&innolux->base);
+ drm_panel_disable(&innolux->base);
}
static struct mipi_dsi_driver innolux_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 56364a93f0b8..4bfd8c877c8e 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -300,13 +300,14 @@ static const struct drm_display_mode default_mode = {
.flags = 0,
};
-static int jdi_panel_get_modes(struct drm_panel *panel)
+static int jdi_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
struct jdi_panel *jdi = to_jdi_panel(panel);
struct device *dev = &jdi->dsi->dev;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -316,10 +317,10 @@ static int jdi_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 95;
- panel->connector->display_info.height_mm = 151;
+ connector->display_info.width_mm = 95;
+ connector->display_info.height_mm = 151;
return 1;
}
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 45f96556ec8c..bac1a2a06c92 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -3,7 +3,6 @@
* Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -23,7 +22,6 @@ struct kingdisplay_panel {
struct drm_panel base;
struct mipi_dsi_device *link;
- struct backlight_device *backlight;
struct regulator *supply;
struct gpio_desc *enable_gpio;
@@ -191,8 +189,6 @@ static int kingdisplay_panel_disable(struct drm_panel *panel)
if (!kingdisplay->enabled)
return 0;
- backlight_disable(kingdisplay->backlight);
-
err = mipi_dsi_dcs_set_display_off(kingdisplay->link);
if (err < 0)
DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
@@ -303,18 +299,10 @@ poweroff:
static int kingdisplay_panel_enable(struct drm_panel *panel)
{
struct kingdisplay_panel *kingdisplay = to_kingdisplay_panel(panel);
- int ret;
if (kingdisplay->enabled)
return 0;
- ret = backlight_enable(kingdisplay->backlight);
- if (ret) {
- DRM_DEV_ERROR(panel->drm->dev,
- "Failed to enable backlight %d\n", ret);
- return ret;
- }
-
kingdisplay->enabled = true;
return 0;
@@ -333,13 +321,14 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int kingdisplay_panel_get_modes(struct drm_panel *panel)
+static int kingdisplay_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh);
return -ENOMEM;
@@ -347,11 +336,11 @@ static int kingdisplay_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 147;
- panel->connector->display_info.height_mm = 196;
- panel->connector->display_info.bpc = 8;
+ connector->display_info.width_mm = 147;
+ connector->display_info.height_mm = 196;
+ connector->display_info.bpc = 8;
return 1;
}
@@ -387,13 +376,13 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
kingdisplay->enable_gpio = NULL;
}
- kingdisplay->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(kingdisplay->backlight))
- return PTR_ERR(kingdisplay->backlight);
-
drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
&kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ err = drm_panel_of_backlight(&kingdisplay->base);
+ if (err)
+ return err;
+
return drm_panel_add(&kingdisplay->base);
}
@@ -431,12 +420,12 @@ static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
int err;
- err = kingdisplay_panel_unprepare(&kingdisplay->base);
+ err = drm_panel_unprepare(&kingdisplay->base);
if (err < 0)
DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
err);
- err = kingdisplay_panel_disable(&kingdisplay->base);
+ err = drm_panel_disable(&kingdisplay->base);
if (err < 0)
DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
@@ -454,8 +443,8 @@ static void kingdisplay_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
- kingdisplay_panel_unprepare(&kingdisplay->base);
- kingdisplay_panel_disable(&kingdisplay->base);
+ drm_panel_unprepare(&kingdisplay->base);
+ drm_panel_disable(&kingdisplay->base);
}
static struct mipi_dsi_driver kingdisplay_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
new file mode 100644
index 000000000000..76ecf2de9c44
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2019 Theobroma Systems Design und Consulting GmbH
+ *
+ * base on panel-kingdisplay-kd097d04.c
+ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct ltk500hd1829 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vcc;
+ struct regulator *iovcc;
+ bool prepared;
+};
+
+struct ltk500hd1829_cmd {
+ char cmd;
+ char data;
+};
+
+/*
+ * There is no description in the Reference Manual about these commands.
+ * We received them from the vendor, so just use them as is.
+ */
+static const struct ltk500hd1829_cmd init_code[] = {
+ { 0xE0, 0x00 },
+ { 0xE1, 0x93 },
+ { 0xE2, 0x65 },
+ { 0xE3, 0xF8 },
+ { 0x80, 0x03 },
+ { 0xE0, 0x04 },
+ { 0x2D, 0x03 },
+ { 0xE0, 0x01 },
+ { 0x00, 0x00 },
+ { 0x01, 0xB6 },
+ { 0x03, 0x00 },
+ { 0x04, 0xC5 },
+ { 0x17, 0x00 },
+ { 0x18, 0xBF },
+ { 0x19, 0x01 },
+ { 0x1A, 0x00 },
+ { 0x1B, 0xBF },
+ { 0x1C, 0x01 },
+ { 0x1F, 0x7C },
+ { 0x20, 0x26 },
+ { 0x21, 0x26 },
+ { 0x22, 0x4E },
+ { 0x37, 0x09 },
+ { 0x38, 0x04 },
+ { 0x39, 0x08 },
+ { 0x3A, 0x1F },
+ { 0x3B, 0x1F },
+ { 0x3C, 0x78 },
+ { 0x3D, 0xFF },
+ { 0x3E, 0xFF },
+ { 0x3F, 0x00 },
+ { 0x40, 0x04 },
+ { 0x41, 0xA0 },
+ { 0x43, 0x0F },
+ { 0x44, 0x0A },
+ { 0x45, 0x24 },
+ { 0x55, 0x01 },
+ { 0x56, 0x01 },
+ { 0x57, 0xA5 },
+ { 0x58, 0x0A },
+ { 0x59, 0x4A },
+ { 0x5A, 0x38 },
+ { 0x5B, 0x10 },
+ { 0x5C, 0x19 },
+ { 0x5D, 0x7C },
+ { 0x5E, 0x64 },
+ { 0x5F, 0x54 },
+ { 0x60, 0x48 },
+ { 0x61, 0x44 },
+ { 0x62, 0x35 },
+ { 0x63, 0x3A },
+ { 0x64, 0x24 },
+ { 0x65, 0x3B },
+ { 0x66, 0x39 },
+ { 0x67, 0x37 },
+ { 0x68, 0x56 },
+ { 0x69, 0x41 },
+ { 0x6A, 0x47 },
+ { 0x6B, 0x2F },
+ { 0x6C, 0x23 },
+ { 0x6D, 0x13 },
+ { 0x6E, 0x02 },
+ { 0x6F, 0x08 },
+ { 0x70, 0x7C },
+ { 0x71, 0x64 },
+ { 0x72, 0x54 },
+ { 0x73, 0x48 },
+ { 0x74, 0x44 },
+ { 0x75, 0x35 },
+ { 0x76, 0x3A },
+ { 0x77, 0x22 },
+ { 0x78, 0x3B },
+ { 0x79, 0x39 },
+ { 0x7A, 0x38 },
+ { 0x7B, 0x52 },
+ { 0x7C, 0x41 },
+ { 0x7D, 0x47 },
+ { 0x7E, 0x2F },
+ { 0x7F, 0x23 },
+ { 0x80, 0x13 },
+ { 0x81, 0x02 },
+ { 0x82, 0x08 },
+ { 0xE0, 0x02 },
+ { 0x00, 0x57 },
+ { 0x01, 0x77 },
+ { 0x02, 0x44 },
+ { 0x03, 0x46 },
+ { 0x04, 0x48 },
+ { 0x05, 0x4A },
+ { 0x06, 0x4C },
+ { 0x07, 0x4E },
+ { 0x08, 0x50 },
+ { 0x09, 0x55 },
+ { 0x0A, 0x52 },
+ { 0x0B, 0x55 },
+ { 0x0C, 0x55 },
+ { 0x0D, 0x55 },
+ { 0x0E, 0x55 },
+ { 0x0F, 0x55 },
+ { 0x10, 0x55 },
+ { 0x11, 0x55 },
+ { 0x12, 0x55 },
+ { 0x13, 0x40 },
+ { 0x14, 0x55 },
+ { 0x15, 0x55 },
+ { 0x16, 0x57 },
+ { 0x17, 0x77 },
+ { 0x18, 0x45 },
+ { 0x19, 0x47 },
+ { 0x1A, 0x49 },
+ { 0x1B, 0x4B },
+ { 0x1C, 0x4D },
+ { 0x1D, 0x4F },
+ { 0x1E, 0x51 },
+ { 0x1F, 0x55 },
+ { 0x20, 0x53 },
+ { 0x21, 0x55 },
+ { 0x22, 0x55 },
+ { 0x23, 0x55 },
+ { 0x24, 0x55 },
+ { 0x25, 0x55 },
+ { 0x26, 0x55 },
+ { 0x27, 0x55 },
+ { 0x28, 0x55 },
+ { 0x29, 0x41 },
+ { 0x2A, 0x55 },
+ { 0x2B, 0x55 },
+ { 0x2C, 0x57 },
+ { 0x2D, 0x77 },
+ { 0x2E, 0x4F },
+ { 0x2F, 0x4D },
+ { 0x30, 0x4B },
+ { 0x31, 0x49 },
+ { 0x32, 0x47 },
+ { 0x33, 0x45 },
+ { 0x34, 0x41 },
+ { 0x35, 0x55 },
+ { 0x36, 0x53 },
+ { 0x37, 0x55 },
+ { 0x38, 0x55 },
+ { 0x39, 0x55 },
+ { 0x3A, 0x55 },
+ { 0x3B, 0x55 },
+ { 0x3C, 0x55 },
+ { 0x3D, 0x55 },
+ { 0x3E, 0x55 },
+ { 0x3F, 0x51 },
+ { 0x40, 0x55 },
+ { 0x41, 0x55 },
+ { 0x42, 0x57 },
+ { 0x43, 0x77 },
+ { 0x44, 0x4E },
+ { 0x45, 0x4C },
+ { 0x46, 0x4A },
+ { 0x47, 0x48 },
+ { 0x48, 0x46 },
+ { 0x49, 0x44 },
+ { 0x4A, 0x40 },
+ { 0x4B, 0x55 },
+ { 0x4C, 0x52 },
+ { 0x4D, 0x55 },
+ { 0x4E, 0x55 },
+ { 0x4F, 0x55 },
+ { 0x50, 0x55 },
+ { 0x51, 0x55 },
+ { 0x52, 0x55 },
+ { 0x53, 0x55 },
+ { 0x54, 0x55 },
+ { 0x55, 0x50 },
+ { 0x56, 0x55 },
+ { 0x57, 0x55 },
+ { 0x58, 0x40 },
+ { 0x59, 0x00 },
+ { 0x5A, 0x00 },
+ { 0x5B, 0x10 },
+ { 0x5C, 0x09 },
+ { 0x5D, 0x30 },
+ { 0x5E, 0x01 },
+ { 0x5F, 0x02 },
+ { 0x60, 0x30 },
+ { 0x61, 0x03 },
+ { 0x62, 0x04 },
+ { 0x63, 0x06 },
+ { 0x64, 0x6A },
+ { 0x65, 0x75 },
+ { 0x66, 0x0F },
+ { 0x67, 0xB3 },
+ { 0x68, 0x0B },
+ { 0x69, 0x06 },
+ { 0x6A, 0x6A },
+ { 0x6B, 0x10 },
+ { 0x6C, 0x00 },
+ { 0x6D, 0x04 },
+ { 0x6E, 0x04 },
+ { 0x6F, 0x88 },
+ { 0x70, 0x00 },
+ { 0x71, 0x00 },
+ { 0x72, 0x06 },
+ { 0x73, 0x7B },
+ { 0x74, 0x00 },
+ { 0x75, 0xBC },
+ { 0x76, 0x00 },
+ { 0x77, 0x05 },
+ { 0x78, 0x2E },
+ { 0x79, 0x00 },
+ { 0x7A, 0x00 },
+ { 0x7B, 0x00 },
+ { 0x7C, 0x00 },
+ { 0x7D, 0x03 },
+ { 0x7E, 0x7B },
+ { 0xE0, 0x04 },
+ { 0x09, 0x10 },
+ { 0x2B, 0x2B },
+ { 0x2E, 0x44 },
+ { 0xE0, 0x00 },
+ { 0xE6, 0x02 },
+ { 0xE7, 0x02 },
+ { 0x35, 0x00 },
+};
+
+static inline
+struct ltk500hd1829 *panel_to_ltk500hd1829(struct drm_panel *panel)
+{
+ return container_of(panel, struct ltk500hd1829, panel);
+}
+
+static int ltk500hd1829_unprepare(struct drm_panel *panel)
+{
+ struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ }
+
+ /* 120ms to enter sleep mode */
+ msleep(120);
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vcc);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int ltk500hd1829_prepare(struct drm_panel *panel)
+{
+ struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ unsigned int i;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_enable(ctx->vcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vci supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vcc;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ /* tRW: 10us */
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ /* tRT: >= 5ms */
+ usleep_range(5000, 6000);
+
+ for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+ ret = mipi_dsi_generic_write(dsi, &init_code[i],
+ sizeof(struct ltk500hd1829_cmd));
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev,
+ "failed to write init cmds: %d\n", ret);
+ goto disable_iovcc;
+ }
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ /* 120ms to exit sleep mode */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vcc:
+ regulator_disable(ctx->vcc);
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 50,
+ .hsync_end = 720 + 50 + 50,
+ .htotal = 720 + 50 + 50 + 50,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 30,
+ .vsync_end = 1280 + 30 + 4,
+ .vtotal = 1280 + 30 + 4 + 12,
+ .vrefresh = 60,
+ .clock = 41600,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static int ltk500hd1829_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ltk500hd1829_funcs = {
+ .unprepare = ltk500hd1829_unprepare,
+ .prepare = ltk500hd1829_prepare,
+ .get_modes = ltk500hd1829_get_modes,
+};
+
+static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
+{
+ struct ltk500hd1829 *ctx;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ctx->vcc)) {
+ ret = PTR_ERR(ctx->vcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &ltk500hd1829_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltk500hd1829_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
+{
+ struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ltk500hd1829_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id ltk500hd1829_of_match[] = {
+ { .compatible = "leadtek,ltk500hd1829", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltk500hd1829_of_match);
+
+static struct mipi_dsi_driver ltk500hd1829_driver = {
+ .driver = {
+ .name = "panel-leadtek-ltk500hd1829",
+ .of_match_table = ltk500hd1829_of_match,
+ },
+ .probe = ltk500hd1829_probe,
+ .remove = ltk500hd1829_remove,
+ .shutdown = ltk500hd1829_shutdown,
+};
+module_mipi_dsi_driver(ltk500hd1829_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("Leadtek LTK500HD1829 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index 7a1385e834f0..e90efeaba4ad 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -141,12 +141,12 @@ static const struct drm_display_mode lb035q02_mode = {
.height_mm = 53,
};
-static int lb035q02_get_modes(struct drm_panel *panel)
+static int lb035q02_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &lb035q02_mode);
+ mode = drm_mode_duplicate(connector->dev, &lb035q02_mode);
if (!mode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index db4865a4c2b9..b262b53dbd85 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -42,7 +42,7 @@ static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
struct spi_transfer xfer = {
.len = 2,
};
- u16 temp = cpu_to_be16(data);
+ __be16 temp = cpu_to_be16(data);
struct spi_message msg;
dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
@@ -209,14 +209,14 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int lg4573_get_modes(struct drm_panel *panel)
+static int lg4573_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh);
return -ENOMEM;
@@ -227,8 +227,8 @@ static int lg4573_get_modes(struct drm_panel *panel)
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 61;
- panel->connector->display_info.height_mm = 103;
+ connector->display_info.width_mm = 61;
+ connector->display_info.height_mm = 103;
return 1;
}
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 2405f26e5d31..5ce3f4a2b7a1 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -8,7 +8,6 @@
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -34,7 +33,6 @@ struct panel_lvds {
unsigned int bus_format;
bool data_mirror;
- struct backlight_device *backlight;
struct regulator *supply;
struct gpio_desc *enable_gpio;
@@ -46,19 +44,6 @@ static inline struct panel_lvds *to_panel_lvds(struct drm_panel *panel)
return container_of(panel, struct panel_lvds, panel);
}
-static int panel_lvds_disable(struct drm_panel *panel)
-{
- struct panel_lvds *lvds = to_panel_lvds(panel);
-
- if (lvds->backlight) {
- lvds->backlight->props.power = FB_BLANK_POWERDOWN;
- lvds->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(lvds->backlight);
- }
-
- return 0;
-}
-
static int panel_lvds_unprepare(struct drm_panel *panel)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
@@ -93,26 +78,13 @@ static int panel_lvds_prepare(struct drm_panel *panel)
return 0;
}
-static int panel_lvds_enable(struct drm_panel *panel)
+static int panel_lvds_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
-
- if (lvds->backlight) {
- lvds->backlight->props.state &= ~BL_CORE_FBBLANK;
- lvds->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(lvds->backlight);
- }
-
- return 0;
-}
-
-static int panel_lvds_get_modes(struct drm_panel *panel)
-{
- struct panel_lvds *lvds = to_panel_lvds(panel);
- struct drm_connector *connector = lvds->panel.connector;
struct drm_display_mode *mode;
- mode = drm_mode_create(lvds->panel.drm);
+ mode = drm_mode_create(connector->dev);
if (!mode)
return 0;
@@ -132,10 +104,8 @@ static int panel_lvds_get_modes(struct drm_panel *panel)
}
static const struct drm_panel_funcs panel_lvds_funcs = {
- .disable = panel_lvds_disable,
.unprepare = panel_lvds_unprepare,
.prepare = panel_lvds_prepare,
- .enable = panel_lvds_enable,
.get_modes = panel_lvds_get_modes,
};
@@ -242,10 +212,6 @@ static int panel_lvds_probe(struct platform_device *pdev)
return ret;
}
- lvds->backlight = devm_of_find_backlight(lvds->dev);
- if (IS_ERR(lvds->backlight))
- return PTR_ERR(lvds->backlight);
-
/*
* TODO: Handle all power supplies specified in the DT node in a generic
* way for panels that don't care about power supply ordering. LVDS
@@ -257,6 +223,10 @@ static int panel_lvds_probe(struct platform_device *pdev)
drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
DRM_MODE_CONNECTOR_LVDS);
+ ret = drm_panel_of_backlight(&lvds->panel);
+ if (ret)
+ return ret;
+
ret = drm_panel_add(&lvds->panel);
if (ret < 0)
return ret;
@@ -271,7 +241,7 @@ static int panel_lvds_remove(struct platform_device *pdev)
drm_panel_remove(&lvds->panel);
- panel_lvds_disable(&lvds->panel);
+ drm_panel_disable(&lvds->panel);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index fd593532ab23..c4f83f6384e1 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -123,12 +123,12 @@ static const struct drm_display_mode nl8048_mode = {
.height_mm = 53,
};
-static int nl8048_get_modes(struct drm_panel *panel)
+static int nl8048_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &nl8048_mode);
+ mode = drm_mode_duplicate(connector->dev, &nl8048_mode);
if (!mode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 60ccedce530c..a470810f7dbe 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -206,14 +206,14 @@ static int nt39016_disable(struct drm_panel *drm_panel)
return 0;
}
-static int nt39016_get_modes(struct drm_panel *drm_panel)
+static int nt39016_get_modes(struct drm_panel *drm_panel,
+ struct drm_connector *connector)
{
struct nt39016 *panel = to_nt39016(drm_panel);
const struct nt39016_panel_info *panel_info = panel->panel_info;
- struct drm_connector *connector = drm_panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(drm_panel->drm, &panel_info->display_mode);
+ mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode);
if (!mode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index f2a72ee6ee07..09deb99981a4 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -6,7 +6,6 @@
* Author: Stefan Mavrodiev <stefan@olimex.com>
*/
-#include <linux/backlight.h>
#include <linux/crc32.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -68,7 +67,6 @@ struct lcd_olinuxino {
bool prepared;
bool enabled;
- struct backlight_device *backlight;
struct regulator *supply;
struct gpio_desc *enable_gpio;
@@ -87,8 +85,6 @@ static int lcd_olinuxino_disable(struct drm_panel *panel)
if (!lcd->enabled)
return 0;
- backlight_disable(lcd->backlight);
-
lcd->enabled = false;
return 0;
@@ -134,19 +130,16 @@ static int lcd_olinuxino_enable(struct drm_panel *panel)
if (lcd->enabled)
return 0;
- backlight_enable(lcd->backlight);
-
lcd->enabled = true;
return 0;
}
-static int lcd_olinuxino_get_modes(struct drm_panel *panel)
+static int lcd_olinuxino_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
- struct drm_connector *connector = lcd->panel.connector;
struct lcd_olinuxino_info *lcd_info = &lcd->eeprom.info;
- struct drm_device *drm = lcd->panel.drm;
struct lcd_olinuxino_mode *lcd_mode;
struct drm_display_mode *mode;
u32 i, num = 0;
@@ -155,13 +148,13 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel)
lcd_mode = (struct lcd_olinuxino_mode *)
&lcd->eeprom.reserved[i * sizeof(*lcd_mode)];
- mode = drm_mode_create(drm);
+ mode = drm_mode_create(connector->dev);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
lcd_mode->hactive,
lcd_mode->vactive,
lcd_mode->refresh);
- continue;
+ continue;
}
mode->clock = lcd_mode->pixelclock;
@@ -284,13 +277,13 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
if (IS_ERR(lcd->enable_gpio))
return PTR_ERR(lcd->enable_gpio);
- lcd->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(lcd->backlight))
- return PTR_ERR(lcd->backlight);
-
drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
DRM_MODE_CONNECTOR_DPI);
+ ret = drm_panel_of_backlight(&lcd->panel);
+ if (ret)
+ return ret;
+
return drm_panel_add(&lcd->panel);
}
@@ -300,8 +293,8 @@ static int lcd_olinuxino_remove(struct i2c_client *client)
drm_panel_remove(&panel->panel);
- lcd_olinuxino_disable(&panel->panel);
- lcd_olinuxino_unprepare(&panel->panel);
+ drm_panel_disable(&panel->panel);
+ drm_panel_unprepare(&panel->panel);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index bf1f928b215f..bb0c992171e8 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -349,11 +349,12 @@ static int otm8009a_enable(struct drm_panel *panel)
return 0;
}
-static int otm8009a_get_modes(struct drm_panel *panel)
+static int otm8009a_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -364,10 +365,10 @@ static int otm8009a_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = mode->width_mm;
- panel->connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
return 1;
}
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index 2b40913899d8..3a0229d60095 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -4,7 +4,6 @@
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
-#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
@@ -20,7 +19,6 @@ struct osd101t2587_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *supply;
bool prepared;
@@ -42,8 +40,6 @@ static int osd101t2587_panel_disable(struct drm_panel *panel)
if (!osd101t2587->enabled)
return 0;
- backlight_disable(osd101t2587->backlight);
-
ret = mipi_dsi_shutdown_peripheral(osd101t2587->dsi);
osd101t2587->enabled = false;
@@ -91,8 +87,6 @@ static int osd101t2587_panel_enable(struct drm_panel *panel)
if (ret)
return ret;
- backlight_enable(osd101t2587->backlight);
-
osd101t2587->enabled = true;
return ret;
@@ -112,14 +106,15 @@ static const struct drm_display_mode default_mode_osd101t2587 = {
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
-static int osd101t2587_panel_get_modes(struct drm_panel *panel)
+static int osd101t2587_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, osd101t2587->default_mode);
+ mode = drm_mode_duplicate(connector->dev, osd101t2587->default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
osd101t2587->default_mode->hdisplay,
osd101t2587->default_mode->vdisplay,
osd101t2587->default_mode->vrefresh);
@@ -128,10 +123,10 @@ static int osd101t2587_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 217;
- panel->connector->display_info.height_mm = 136;
+ connector->display_info.width_mm = 217;
+ connector->display_info.height_mm = 136;
return 1;
}
@@ -157,18 +152,19 @@ MODULE_DEVICE_TABLE(of, osd101t2587_of_match);
static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
{
struct device *dev = &osd101t2587->dsi->dev;
+ int ret;
osd101t2587->supply = devm_regulator_get(dev, "power");
if (IS_ERR(osd101t2587->supply))
return PTR_ERR(osd101t2587->supply);
- osd101t2587->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(osd101t2587->backlight))
- return PTR_ERR(osd101t2587->backlight);
-
drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
&osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ ret = drm_panel_of_backlight(&osd101t2587->base);
+ if (ret)
+ return ret;
+
return drm_panel_add(&osd101t2587->base);
}
@@ -214,12 +210,11 @@ static int osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
int ret;
- ret = osd101t2587_panel_disable(&osd101t2587->base);
+ ret = drm_panel_disable(&osd101t2587->base);
if (ret < 0)
dev_warn(&dsi->dev, "failed to disable panel: %d\n", ret);
- osd101t2587_panel_unprepare(&osd101t2587->base);
-
+ drm_panel_unprepare(&osd101t2587->base);
drm_panel_remove(&osd101t2587->base);
ret = mipi_dsi_detach(dsi);
@@ -233,8 +228,8 @@ static void osd101t2587_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
- osd101t2587_panel_disable(&osd101t2587->base);
- osd101t2587_panel_unprepare(&osd101t2587->base);
+ drm_panel_disable(&osd101t2587->base);
+ drm_panel_unprepare(&osd101t2587->base);
}
static struct mipi_dsi_driver osd101t2587_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 664605071d34..69693451462e 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -7,7 +7,6 @@
* Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -31,7 +30,6 @@ struct wuxga_nt_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *supply;
bool prepared;
@@ -62,12 +60,6 @@ static int wuxga_nt_panel_disable(struct drm_panel *panel)
mipi_ret = mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
- if (wuxga_nt->backlight) {
- wuxga_nt->backlight->props.power = FB_BLANK_POWERDOWN;
- wuxga_nt->backlight->props.state |= BL_CORE_FBBLANK;
- bl_ret = backlight_update_status(wuxga_nt->backlight);
- }
-
wuxga_nt->enabled = false;
return mipi_ret ? mipi_ret : bl_ret;
@@ -142,12 +134,6 @@ static int wuxga_nt_panel_enable(struct drm_panel *panel)
if (wuxga_nt->enabled)
return 0;
- if (wuxga_nt->backlight) {
- wuxga_nt->backlight->props.power = FB_BLANK_UNBLANK;
- wuxga_nt->backlight->props.state &= ~BL_CORE_FBBLANK;
- backlight_update_status(wuxga_nt->backlight);
- }
-
wuxga_nt->enabled = true;
return 0;
@@ -166,24 +152,25 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int wuxga_nt_panel_get_modes(struct drm_panel *panel)
+static int wuxga_nt_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
return -ENOMEM;
}
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 217;
- panel->connector->display_info.height_mm = 136;
+ connector->display_info.width_mm = 217;
+ connector->display_info.height_mm = 136;
return 1;
}
@@ -205,7 +192,6 @@ MODULE_DEVICE_TABLE(of, wuxga_nt_of_match);
static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
{
struct device *dev = &wuxga_nt->dsi->dev;
- struct device_node *np;
int ret;
wuxga_nt->mode = &default_mode;
@@ -214,38 +200,20 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
if (IS_ERR(wuxga_nt->supply))
return PTR_ERR(wuxga_nt->supply);
- np = of_parse_phandle(dev->of_node, "backlight", 0);
- if (np) {
- wuxga_nt->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!wuxga_nt->backlight)
- return -EPROBE_DEFER;
- }
-
drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
&wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
- ret = drm_panel_add(&wuxga_nt->base);
- if (ret < 0)
- goto put_backlight;
-
- return 0;
-
-put_backlight:
- if (wuxga_nt->backlight)
- put_device(&wuxga_nt->backlight->dev);
+ ret = drm_panel_of_backlight(&wuxga_nt->base);
+ if (ret)
+ return ret;
- return ret;
+ return drm_panel_add(&wuxga_nt->base);
}
static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt)
{
if (wuxga_nt->base.dev)
drm_panel_remove(&wuxga_nt->base);
-
- if (wuxga_nt->backlight)
- put_device(&wuxga_nt->backlight->dev);
}
static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
@@ -280,7 +248,7 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
int ret;
- ret = wuxga_nt_panel_disable(&wuxga_nt->base);
+ ret = drm_panel_disable(&wuxga_nt->base);
if (ret < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
@@ -297,7 +265,7 @@ static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
- wuxga_nt_panel_disable(&wuxga_nt->base);
+ drm_panel_disable(&wuxga_nt->base);
}
static struct mipi_dsi_driver wuxga_nt_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 09824e92fc78..8f078b7dd89e 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -44,8 +44,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -311,10 +309,9 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
return 0;
}
-static int rpi_touchscreen_get_modes(struct drm_panel *panel)
+static int rpi_touchscreen_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
- struct drm_device *drm = panel->drm;
unsigned int i, num = 0;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
@@ -322,9 +319,9 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel)
const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(drm, m);
+ mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay, m->vrefresh);
continue;
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index fd67fc6185c4..313637d53d28 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -436,12 +436,12 @@ static int rad_panel_disable(struct drm_panel *panel)
return 0;
}
-static int rad_panel_get_modes(struct drm_panel *panel)
+static int rad_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -451,7 +451,7 @@ static int rad_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 994e855721f4..e8982948e0ea 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -6,9 +6,9 @@
* Yannick Fertre <yannick.fertre@st.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -78,7 +78,6 @@ struct rm68200 {
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *supply;
- struct backlight_device *backlight;
bool prepared;
bool enabled;
};
@@ -242,8 +241,6 @@ static int rm68200_disable(struct drm_panel *panel)
if (!ctx->enabled)
return 0;
- backlight_disable(ctx->backlight);
-
ctx->enabled = false;
return 0;
@@ -328,18 +325,17 @@ static int rm68200_enable(struct drm_panel *panel)
if (ctx->enabled)
return 0;
- backlight_enable(ctx->backlight);
-
ctx->enabled = true;
return 0;
}
-static int rm68200_get_modes(struct drm_panel *panel)
+static int rm68200_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -350,10 +346,10 @@ static int rm68200_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = mode->width_mm;
- panel->connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
return 1;
}
@@ -391,10 +387,6 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
return ret;
}
- ctx->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(ctx->backlight))
- return PTR_ERR(ctx->backlight);
-
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
@@ -407,6 +399,10 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index 31234b79d3b1..38ff742bc120 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -5,20 +5,22 @@
* Copyright (C) Purism SPC 2019
*/
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
-#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+
#include <video/display_timing.h>
#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
#define DRV_NAME "panel-rocktech-jh057n00900"
/* Manufacturer specific Commands send via DSI */
@@ -47,7 +49,6 @@ struct jh057n {
struct device *dev;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
- struct backlight_device *backlight;
struct regulator *vcc;
struct regulator *iovcc;
bool prepared;
@@ -152,7 +153,7 @@ static int jh057n_enable(struct drm_panel *panel)
return ret;
}
- return backlight_enable(ctx->backlight);
+ return 0;
}
static int jh057n_disable(struct drm_panel *panel)
@@ -160,7 +161,6 @@ static int jh057n_disable(struct drm_panel *panel)
struct jh057n *ctx = panel_to_jh057n(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- backlight_disable(ctx->backlight);
return mipi_dsi_dcs_set_display_off(dsi);
}
@@ -230,12 +230,13 @@ static const struct drm_display_mode default_mode = {
.height_mm = 130,
};
-static int jh057n_get_modes(struct drm_panel *panel)
+static int jh057n_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct jh057n *ctx = panel_to_jh057n(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
@@ -246,9 +247,9 @@ static int jh057n_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- panel->connector->display_info.width_mm = mode->width_mm;
- panel->connector->display_info.height_mm = mode->height_mm;
- drm_mode_probed_add(panel->connector, mode);
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
return 1;
}
@@ -320,10 +321,6 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
- ctx->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(ctx->backlight))
- return PTR_ERR(ctx->backlight);
-
ctx->vcc = devm_regulator_get(dev, "vcc");
if (IS_ERR(ctx->vcc)) {
ret = PTR_ERR(ctx->vcc);
@@ -346,6 +343,10 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 170a5cda21b9..ef18559e237e 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -7,7 +7,6 @@
* This file based on panel-ilitek-ili9881c.c
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -29,7 +28,6 @@
struct rb070d30_panel {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *supply;
struct {
@@ -84,22 +82,13 @@ static int rb070d30_panel_enable(struct drm_panel *panel)
if (ret)
return ret;
- ret = backlight_enable(ctx->backlight);
- if (ret)
- goto out;
-
return 0;
-
-out:
- mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
- return ret;
}
static int rb070d30_panel_disable(struct drm_panel *panel)
{
struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
- backlight_disable(ctx->backlight);
return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
}
@@ -120,14 +109,14 @@ static const struct drm_display_mode default_mode = {
.height_mm = 85,
};
-static int rb070d30_panel_get_modes(struct drm_panel *panel)
+static int rb070d30_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
struct drm_display_mode *mode;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_DEV_ERROR(&ctx->dsi->dev,
"Failed to add mode " DRM_MODE_FMT "\n",
@@ -140,9 +129,9 @@ static int rb070d30_panel_get_modes(struct drm_panel *panel)
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- panel->connector->display_info.bpc = 8;
- panel->connector->display_info.width_mm = mode->width_mm;
- panel->connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
@@ -208,11 +197,9 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->gpios.shlr);
}
- ctx->backlight = devm_of_find_backlight(&dsi->dev);
- if (IS_ERR(ctx->backlight)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n");
- return PTR_ERR(ctx->backlight);
- }
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 250809ba37c7..3c52f15f7a1c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -261,9 +261,9 @@ static int ld9040_enable(struct drm_panel *panel)
return 0;
}
-static int ld9040_get_modes(struct drm_panel *panel)
+static int ld9040_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct ld9040 *ctx = panel_to_ld9040(panel);
struct drm_display_mode *mode;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index e3a0397e953e..2150043dcf6b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -143,12 +143,12 @@ static int s6d16d0_disable(struct drm_panel *panel)
return 0;
}
-static int s6d16d0_get_modes(struct drm_panel *panel)
+static int s6d16d0_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
+ mode = drm_mode_duplicate(connector->dev, &samsung_s6d16d0_mode);
if (!mode) {
DRM_ERROR("bad mode or failed to add mode\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 938ab72c5540..36ebd5a4ac7b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -645,13 +645,13 @@ static const struct s6e3ha2_panel_desc samsung_s6e3hf2 = {
.type = HF2_TYPE,
};
-static int s6e3ha2_get_modes(struct drm_panel *panel)
+static int s6e3ha2_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, ctx->desc->mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index a60635e9226d..a3570e0a90a8 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -400,12 +400,12 @@ static int s6e63j0x03_enable(struct drm_panel *panel)
return 0;
}
-static int s6e63j0x03_get_modes(struct drm_panel *panel)
+static int s6e63j0x03_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index ba01af0b14fd..a5f76eb4fa25 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -362,12 +362,12 @@ static int s6e63m0_enable(struct drm_panel *panel)
return 0;
}
-static int s6e63m0_get_modes(struct drm_panel *panel)
+static int s6e63m0_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index dbced6501204..8a028d2bd0d6 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -920,9 +920,9 @@ static int s6e8aa0_enable(struct drm_panel *panel)
return 0;
}
-static int s6e8aa0_get_modes(struct drm_panel *panel)
+static int s6e8aa0_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
struct drm_display_mode *mode;
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index b3619ba443bd..40fcbbbacb2c 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -6,7 +6,6 @@
* Based on Panel Simple driver by Thierry Reding <treding@nvidia.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -46,7 +45,6 @@ struct seiko_panel {
bool prepared;
bool enabled;
const struct seiko_panel_desc *desc;
- struct backlight_device *backlight;
struct regulator *dvdd;
struct regulator *avdd;
};
@@ -56,10 +54,9 @@ static inline struct seiko_panel *to_seiko_panel(struct drm_panel *panel)
return container_of(panel, struct seiko_panel, base);
}
-static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
+static int seiko_panel_get_fixed_modes(struct seiko_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->base.connector;
- struct drm_device *drm = panel->base.drm;
struct drm_display_mode *mode;
unsigned int i, num = 0;
@@ -71,9 +68,9 @@ static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
struct videomode vm;
videomode_from_timing(dt, &vm);
- mode = drm_mode_create(drm);
+ mode = drm_mode_create(connector->dev);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dev_err(panel->base.dev, "failed to add mode %ux%u\n",
dt->hactive.typ, dt->vactive.typ);
continue;
}
@@ -92,9 +89,9 @@ static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
- mode = drm_mode_duplicate(drm, m);
+ mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay, m->vrefresh);
continue;
}
@@ -128,12 +125,6 @@ static int seiko_panel_disable(struct drm_panel *panel)
if (!p->enabled)
return 0;
- if (p->backlight) {
- p->backlight->props.power = FB_BLANK_POWERDOWN;
- p->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(p->backlight);
- }
-
p->enabled = false;
return 0;
@@ -197,23 +188,18 @@ static int seiko_panel_enable(struct drm_panel *panel)
if (p->enabled)
return 0;
- if (p->backlight) {
- p->backlight->props.state &= ~BL_CORE_FBBLANK;
- p->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(p->backlight);
- }
-
p->enabled = true;
return 0;
}
-static int seiko_panel_get_modes(struct drm_panel *panel)
+static int seiko_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct seiko_panel *p = to_seiko_panel(panel);
/* add hard-coded panel modes */
- return seiko_panel_get_fixed_modes(p);
+ return seiko_panel_get_fixed_modes(p, connector);
}
static int seiko_panel_get_timings(struct drm_panel *panel,
@@ -245,7 +231,6 @@ static const struct drm_panel_funcs seiko_panel_funcs = {
static int seiko_panel_probe(struct device *dev,
const struct seiko_panel_desc *desc)
{
- struct device_node *backlight;
struct seiko_panel *panel;
int err;
@@ -265,18 +250,13 @@ static int seiko_panel_probe(struct device *dev,
if (IS_ERR(panel->avdd))
return PTR_ERR(panel->avdd);
- backlight = of_parse_phandle(dev->of_node, "backlight", 0);
- if (backlight) {
- panel->backlight = of_find_backlight_by_node(backlight);
- of_node_put(backlight);
-
- if (!panel->backlight)
- return -EPROBE_DEFER;
- }
-
drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
DRM_MODE_CONNECTOR_DPI);
+ err = drm_panel_of_backlight(&panel->base);
+ if (err)
+ return err;
+
err = drm_panel_add(&panel->base);
if (err < 0)
return err;
@@ -291,11 +271,7 @@ static int seiko_panel_remove(struct platform_device *pdev)
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
drm_panel_remove(&panel->base);
-
- seiko_panel_disable(&panel->base);
-
- if (panel->backlight)
- put_device(&panel->backlight->dev);
+ drm_panel_disable(&panel->base);
return 0;
}
@@ -304,7 +280,7 @@ static void seiko_panel_shutdown(struct platform_device *pdev)
{
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
- seiko_panel_disable(&panel->base);
+ drm_panel_disable(&panel->base);
}
static const struct display_timing seiko_43wvf1g_timing = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 5e136c3ba185..b5d1977221a7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -3,7 +3,6 @@
* Copyright (C) 2014 NVIDIA Corporation
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -23,7 +22,6 @@ struct sharp_panel {
struct mipi_dsi_device *link1;
struct mipi_dsi_device *link2;
- struct backlight_device *backlight;
struct regulator *supply;
bool prepared;
@@ -94,8 +92,6 @@ static int sharp_panel_disable(struct drm_panel *panel)
if (!sharp->enabled)
return 0;
- backlight_disable(sharp->backlight);
-
sharp->enabled = false;
return 0;
@@ -258,8 +254,6 @@ static int sharp_panel_enable(struct drm_panel *panel)
if (sharp->enabled)
return 0;
- backlight_enable(sharp->backlight);
-
sharp->enabled = true;
return 0;
@@ -278,13 +272,14 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int sharp_panel_get_modes(struct drm_panel *panel)
+static int sharp_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh);
return -ENOMEM;
@@ -292,10 +287,10 @@ static int sharp_panel_get_modes(struct drm_panel *panel)
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 217;
- panel->connector->display_info.height_mm = 136;
+ connector->display_info.width_mm = 217;
+ connector->display_info.height_mm = 136;
return 1;
}
@@ -316,7 +311,7 @@ MODULE_DEVICE_TABLE(of, sharp_of_match);
static int sharp_panel_add(struct sharp_panel *sharp)
{
- struct device *dev = &sharp->link1->dev;
+ int ret;
sharp->mode = &default_mode;
@@ -324,14 +319,13 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->supply))
return PTR_ERR(sharp->supply);
- sharp->backlight = devm_of_find_backlight(dev);
-
- if (IS_ERR(sharp->backlight))
- return PTR_ERR(sharp->backlight);
-
drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
+ ret = drm_panel_of_backlight(&sharp->base);
+ if (ret)
+ return ret;
+
return drm_panel_add(&sharp->base);
}
@@ -407,7 +401,7 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
return 0;
}
- err = sharp_panel_disable(&sharp->base);
+ err = drm_panel_disable(&sharp->base);
if (err < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
@@ -428,7 +422,7 @@ static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
if (!sharp)
return;
- sharp_panel_disable(&sharp->base);
+ drm_panel_disable(&sharp->base);
}
static struct mipi_dsi_driver sharp_panel_driver = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index eeab7998c7de..1cf3f02435c1 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -100,12 +100,12 @@ static const struct drm_display_mode ls037v7dw01_mode = {
.height_mm = 75,
};
-static int ls037v7dw01_get_modes(struct drm_panel *panel)
+static int ls037v7dw01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &ls037v7dw01_mode);
+ mode = drm_mode_duplicate(connector->dev, &ls037v7dw01_mode);
if (!mode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index b963ba4ab589..ce586c6d70c7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -7,7 +7,6 @@
* Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -25,7 +24,6 @@ struct sharp_nt_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
- struct backlight_device *backlight;
struct regulator *supply;
struct gpio_desc *reset_gpio;
@@ -107,8 +105,6 @@ static int sharp_nt_panel_disable(struct drm_panel *panel)
if (!sharp_nt->enabled)
return 0;
- backlight_disable(sharp_nt->backlight);
-
sharp_nt->enabled = false;
return 0;
@@ -190,8 +186,6 @@ static int sharp_nt_panel_enable(struct drm_panel *panel)
if (sharp_nt->enabled)
return 0;
- backlight_enable(sharp_nt->backlight);
-
sharp_nt->enabled = true;
return 0;
@@ -210,24 +204,25 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int sharp_nt_panel_get_modes(struct drm_panel *panel)
+static int sharp_nt_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
return -ENOMEM;
}
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 54;
- panel->connector->display_info.height_mm = 95;
+ connector->display_info.width_mm = 54;
+ connector->display_info.height_mm = 95;
return 1;
}
@@ -243,6 +238,7 @@ static const struct drm_panel_funcs sharp_nt_panel_funcs = {
static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
{
struct device *dev = &sharp_nt->dsi->dev;
+ int ret;
sharp_nt->mode = &default_mode;
@@ -259,14 +255,13 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
gpiod_set_value(sharp_nt->reset_gpio, 0);
}
- sharp_nt->backlight = devm_of_find_backlight(dev);
-
- if (IS_ERR(sharp_nt->backlight))
- return PTR_ERR(sharp_nt->backlight);
-
drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev,
&sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ ret = drm_panel_of_backlight(&sharp_nt->base);
+ if (ret)
+ return ret;
+
return drm_panel_add(&sharp_nt->base);
}
@@ -308,7 +303,7 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
int ret;
- ret = sharp_nt_panel_disable(&sharp_nt->base);
+ ret = drm_panel_disable(&sharp_nt->base);
if (ret < 0)
dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
@@ -325,7 +320,7 @@ static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
- sharp_nt_panel_disable(&sharp_nt->base);
+ drm_panel_disable(&sharp_nt->base);
}
static const struct of_device_id sharp_nt_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5d487686d25c..e14c14ac62b5 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -21,7 +21,6 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -105,7 +104,6 @@ struct panel_simple {
const struct panel_desc *desc;
- struct backlight_device *backlight;
struct regulator *supply;
struct i2c_adapter *ddc;
@@ -119,10 +117,9 @@ static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
return container_of(panel, struct panel_simple, base);
}
-static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel)
+static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->base.connector;
- struct drm_device *drm = panel->base.drm;
struct drm_display_mode *mode;
unsigned int i, num = 0;
@@ -131,9 +128,9 @@ static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel)
struct videomode vm;
videomode_from_timing(dt, &vm);
- mode = drm_mode_create(drm);
+ mode = drm_mode_create(connector->dev);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dev_err(panel->base.dev, "failed to add mode %ux%u\n",
dt->hactive.typ, dt->vactive.typ);
continue;
}
@@ -152,19 +149,18 @@ static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel)
return num;
}
-static unsigned int panel_simple_get_display_modes(struct panel_simple *panel)
+static unsigned int panel_simple_get_display_modes(struct panel_simple *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->base.connector;
- struct drm_device *drm = panel->base.drm;
struct drm_display_mode *mode;
unsigned int i, num = 0;
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
- mode = drm_mode_duplicate(drm, m);
+ mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay, m->vrefresh);
continue;
}
@@ -183,10 +179,9 @@ static unsigned int panel_simple_get_display_modes(struct panel_simple *panel)
return num;
}
-static int panel_simple_get_non_edid_modes(struct panel_simple *panel)
+static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->base.connector;
- struct drm_device *drm = panel->base.drm;
struct drm_display_mode *mode;
bool has_override = panel->override_mode.type;
unsigned int num = 0;
@@ -195,18 +190,19 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel)
return 0;
if (has_override) {
- mode = drm_mode_duplicate(drm, &panel->override_mode);
+ mode = drm_mode_duplicate(connector->dev,
+ &panel->override_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
num = 1;
} else {
- dev_err(drm->dev, "failed to add override mode\n");
+ dev_err(panel->base.dev, "failed to add override mode\n");
}
}
/* Only add timings if override was not there or failed to validate */
if (num == 0 && panel->desc->num_timings)
- num = panel_simple_get_timings_modes(panel);
+ num = panel_simple_get_timings_modes(panel, connector);
/*
* Only add fixed modes if timings/override added no mode.
@@ -216,7 +212,7 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel)
*/
WARN_ON(panel->desc->num_timings && panel->desc->num_modes);
if (num == 0)
- num = panel_simple_get_display_modes(panel);
+ num = panel_simple_get_display_modes(panel, connector);
connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
@@ -236,12 +232,6 @@ static int panel_simple_disable(struct drm_panel *panel)
if (!p->enabled)
return 0;
- if (p->backlight) {
- p->backlight->props.power = FB_BLANK_POWERDOWN;
- p->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(p->backlight);
- }
-
if (p->desc->delay.disable)
msleep(p->desc->delay.disable);
@@ -307,34 +297,30 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
- if (p->backlight) {
- p->backlight->props.state &= ~BL_CORE_FBBLANK;
- p->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(p->backlight);
- }
-
p->enabled = true;
return 0;
}
-static int panel_simple_get_modes(struct drm_panel *panel)
+static int panel_simple_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct panel_simple *p = to_panel_simple(panel);
int num = 0;
/* probe EDID if a DDC bus is available */
if (p->ddc) {
- struct edid *edid = drm_get_edid(panel->connector, p->ddc);
- drm_connector_update_edid_property(panel->connector, edid);
+ struct edid *edid = drm_get_edid(connector, p->ddc);
+
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
- num += drm_add_edid_modes(panel->connector, edid);
+ num += drm_add_edid_modes(connector, edid);
kfree(edid);
}
}
/* add hard-coded panel modes */
- num += panel_simple_get_non_edid_modes(p);
+ num += panel_simple_get_non_edid_modes(p, connector);
return num;
}
@@ -414,9 +400,9 @@ static void panel_simple_parse_panel_timing_node(struct device *dev,
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
- struct device_node *backlight, *ddc;
struct panel_simple *panel;
struct display_timing dt;
+ struct device_node *ddc;
int err;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
@@ -442,24 +428,13 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return err;
}
- backlight = of_parse_phandle(dev->of_node, "backlight", 0);
- if (backlight) {
- panel->backlight = of_find_backlight_by_node(backlight);
- of_node_put(backlight);
-
- if (!panel->backlight)
- return -EPROBE_DEFER;
- }
-
ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
if (ddc) {
panel->ddc = of_find_i2c_adapter_by_node(ddc);
of_node_put(ddc);
- if (!panel->ddc) {
- err = -EPROBE_DEFER;
- goto free_backlight;
- }
+ if (!panel->ddc)
+ return -EPROBE_DEFER;
}
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
@@ -468,6 +443,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
drm_panel_init(&panel->base, dev, &panel_simple_funcs,
desc->connector_type);
+ err = drm_panel_of_backlight(&panel->base);
+ if (err)
+ goto free_ddc;
+
err = drm_panel_add(&panel->base);
if (err < 0)
goto free_ddc;
@@ -479,9 +458,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
free_ddc:
if (panel->ddc)
put_device(&panel->ddc->dev);
-free_backlight:
- if (panel->backlight)
- put_device(&panel->backlight->dev);
return err;
}
@@ -491,16 +467,12 @@ static int panel_simple_remove(struct device *dev)
struct panel_simple *panel = dev_get_drvdata(dev);
drm_panel_remove(&panel->base);
-
- panel_simple_disable(&panel->base);
- panel_simple_unprepare(&panel->base);
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
if (panel->ddc)
put_device(&panel->ddc->dev);
- if (panel->backlight)
- put_device(&panel->backlight->dev);
-
return 0;
}
@@ -508,8 +480,8 @@ static void panel_simple_shutdown(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
- panel_simple_disable(&panel->base);
- panel_simple_unprepare(&panel->base);
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
}
static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
@@ -657,6 +629,35 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
+static const struct drm_display_mode auo_b116xak01_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 10,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 6,
+ .vtotal = 768 + 4 + 6 + 15,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xak01 = {
+ .modes = &auo_b116xak01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode auo_b116xw03_mode = {
.clock = 70589,
.hdisplay = 1366,
@@ -1036,6 +1037,38 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
+ {
+ .clock = 148500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 2200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1125,
+ .vrefresh = 60,
+ },
+};
+
+static const struct panel_desc boe_nv140fhmn49 = {
+ .modes = boe_nv140fhmn49_modes,
+ .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
+ .bpc = 6,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ .prepare = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2061,6 +2094,40 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
+static const struct drm_display_mode logicpd_type_28_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 3,
+ .hsync_end = 480 + 3 + 42,
+ .htotal = 480 + 3 + 42 + 2,
+
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 11,
+ .vtotal = 272 + 2 + 11 + 3,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc logicpd_type_28 = {
+ .modes = &logicpd_type_28_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 105,
+ .height = 67,
+ },
+ .delay = {
+ .prepare = 200,
+ .enable = 200,
+ .unprepare = 200,
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+};
+
static const struct panel_desc mitsubishi_aa070mc01 = {
.modes = &mitsubishi_aa070mc01_mode,
.num_modes = 1,
@@ -2547,6 +2614,30 @@ static const struct panel_desc samsung_ltn140at29_301 = {
},
};
+static const struct display_timing satoz_sat050at40h12r2_timing = {
+ .pixelclock = {33300000, 33300000, 50000000},
+ .hactive = {800, 800, 800},
+ .hfront_porch = {16, 210, 354},
+ .hback_porch = {46, 46, 46},
+ .hsync_len = {1, 1, 40},
+ .vactive = {480, 480, 480},
+ .vfront_porch = {7, 22, 147},
+ .vback_porch = {23, 23, 23},
+ .vsync_len = {1, 1, 20},
+};
+
+static const struct panel_desc satoz_sat050at40h12r2 = {
+ .timings = &satoz_sat050at40h12r2_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
.clock = 168480,
.hdisplay = 1920,
@@ -3120,6 +3211,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
+ .compatible = "auo,b116xa01",
+ .data = &auo_b116xak01,
+ }, {
.compatible = "auo,b116xw03",
.data = &auo_b116xw03,
}, {
@@ -3162,6 +3256,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "boe,nv140fhmn49",
+ .data = &boe_nv140fhmn49,
+ }, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
}, {
@@ -3288,6 +3385,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
+ .compatible = "logicpd,type28",
+ .data = &logicpd_type_28,
+ }, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
@@ -3348,6 +3448,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
}, {
+ .compatible = "satoz,sat050at40h12r2",
+ .data = &satoz_sat050at40h12r2,
+ }, {
.compatible = "sharp,ld-d5116z01b",
.data = &sharp_ld_d5116z01b,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index ee3f23f45755..4b4f2558e3b4 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -9,7 +9,6 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
-#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -103,7 +102,6 @@ struct st7701 {
struct mipi_dsi_device *dsi;
const struct st7701_panel_desc *desc;
- struct backlight_device *backlight;
struct regulator_bulk_data *supplies;
struct gpio_desc *reset;
unsigned int sleep_delay;
@@ -223,7 +221,6 @@ static int st7701_enable(struct drm_panel *panel)
struct st7701 *st7701 = panel_to_st7701(panel);
ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
- backlight_enable(st7701->backlight);
return 0;
}
@@ -232,7 +229,6 @@ static int st7701_disable(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
- backlight_disable(st7701->backlight);
ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
return 0;
@@ -264,13 +260,14 @@ static int st7701_unprepare(struct drm_panel *panel)
return 0;
}
-static int st7701_get_modes(struct drm_panel *panel)
+static int st7701_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
struct st7701 *st7701 = panel_to_st7701(panel);
const struct drm_display_mode *desc_mode = st7701->desc->mode;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, desc_mode);
+ mode = drm_mode_duplicate(connector->dev, desc_mode);
if (!mode) {
DRM_DEV_ERROR(&st7701->dsi->dev,
"failed to add mode %ux%ux@%u\n",
@@ -280,10 +277,10 @@ static int st7701_get_modes(struct drm_panel *panel)
}
drm_mode_set_name(mode);
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = desc_mode->width_mm;
- panel->connector->display_info.height_mm = desc_mode->height_mm;
+ connector->display_info.width_mm = desc_mode->width_mm;
+ connector->display_info.height_mm = desc_mode->height_mm;
return 1;
}
@@ -365,10 +362,6 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(st7701->reset);
}
- st7701->backlight = devm_of_find_backlight(&dsi->dev);
- if (IS_ERR(st7701->backlight))
- return PTR_ERR(st7701->backlight);
-
drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -383,6 +376,10 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
*/
st7701->sleep_delay = 120 + desc->panel_sleep_delay;
+ ret = drm_panel_of_backlight(&st7701->panel);
+ if (ret)
+ return ret;
+
ret = drm_panel_add(&st7701->panel);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 108a85bb6667..cc02c54c1b2e 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017 Free Electrons
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -116,7 +115,6 @@ struct st7789v {
struct drm_panel panel;
struct spi_device *spi;
struct gpio_desc *reset;
- struct backlight_device *backlight;
struct regulator *power;
};
@@ -170,14 +168,14 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
-static int st7789v_get_modes(struct drm_panel *panel)
+static int st7789v_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
default_mode.vrefresh);
return -ENOMEM;
@@ -188,8 +186,8 @@ static int st7789v_get_modes(struct drm_panel *panel)
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- panel->connector->display_info.width_mm = 61;
- panel->connector->display_info.height_mm = 103;
+ connector->display_info.width_mm = 61;
+ connector->display_info.height_mm = 103;
return 1;
}
@@ -323,12 +321,6 @@ static int st7789v_enable(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
- if (ctx->backlight) {
- ctx->backlight->props.state &= ~BL_CORE_FBBLANK;
- ctx->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(ctx->backlight);
- }
-
return st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_ON);
}
@@ -339,12 +331,6 @@ static int st7789v_disable(struct drm_panel *panel)
ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_OFF));
- if (ctx->backlight) {
- ctx->backlight->props.power = FB_BLANK_POWERDOWN;
- ctx->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(ctx->backlight);
- }
-
return 0;
}
@@ -370,7 +356,6 @@ static const struct drm_panel_funcs st7789v_drm_funcs = {
static int st7789v_probe(struct spi_device *spi)
{
- struct device_node *backlight;
struct st7789v *ctx;
int ret;
@@ -394,26 +379,15 @@ static int st7789v_probe(struct spi_device *spi)
return PTR_ERR(ctx->reset);
}
- backlight = of_parse_phandle(spi->dev.of_node, "backlight", 0);
- if (backlight) {
- ctx->backlight = of_find_backlight_by_node(backlight);
- of_node_put(backlight);
-
- if (!ctx->backlight)
- return -EPROBE_DEFER;
- }
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
- goto err_free_backlight;
+ return ret;
return 0;
-
-err_free_backlight:
- if (ctx->backlight)
- put_device(&ctx->backlight->dev);
-
- return ret;
}
static int st7789v_remove(struct spi_device *spi)
@@ -422,9 +396,6 @@ static int st7789v_remove(struct spi_device *spi)
drm_panel_remove(&ctx->panel);
- if (ctx->backlight)
- put_device(&ctx->backlight->dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
new file mode 100644
index 000000000000..de0abf76ae6f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI-DSI Sony ACX424AKP panel driver. This is a 480x864
+ * AMOLED panel with a command-only DSI interface.
+ *
+ * Copyright (C) Linaro Ltd. 2019
+ * Author: Linus Walleij
+ * Based on code and know-how from Marcus Lorentzon
+ * Copyright (C) ST-Ericsson SA 2010
+ */
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define ACX424_DCS_READ_ID1 0xDA
+#define ACX424_DCS_READ_ID2 0xDB
+#define ACX424_DCS_READ_ID3 0xDC
+#define ACX424_DCS_SET_MDDI 0xAE
+
+/*
+ * Sony seems to use vendor ID 0x81
+ */
+#define DISPLAY_SONY_ACX424AKP_ID1 0x811b
+#define DISPLAY_SONY_ACX424AKP_ID2 0x811a
+/*
+ * The third ID looks like a bug, vendor IDs begin at 0x80
+ * and panel 00 ... seems like default values.
+ */
+#define DISPLAY_SONY_ACX424AKP_ID3 0x8000
+
+struct acx424akp {
+ struct drm_panel panel;
+ struct device *dev;
+ struct backlight_device *bl;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ bool video_mode;
+};
+
+static const struct drm_display_mode sony_acx424akp_vid_mode = {
+ .clock = 330000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 15,
+ .hsync_end = 480 + 15 + 0,
+ .htotal = 480 + 15 + 0 + 15,
+ .vdisplay = 864,
+ .vsync_start = 864 + 14,
+ .vsync_end = 864 + 14 + 1,
+ .vtotal = 864 + 14 + 1 + 11,
+ .vrefresh = 60,
+ .width_mm = 48,
+ .height_mm = 84,
+ .flags = DRM_MODE_FLAG_PVSYNC,
+};
+
+/*
+ * The timings are not very helpful as the display is used in
+ * command mode using the maximum HS frequency.
+ */
+static const struct drm_display_mode sony_acx424akp_cmd_mode = {
+ .clock = 420160,
+ .hdisplay = 480,
+ .hsync_start = 480 + 154,
+ .hsync_end = 480 + 154 + 16,
+ .htotal = 480 + 154 + 16 + 32,
+ .vdisplay = 864,
+ .vsync_start = 864 + 1,
+ .vsync_end = 864 + 1 + 1,
+ .vtotal = 864 + 1 + 1 + 1,
+ /*
+ * Some desired refresh rate, experiments at the maximum "pixel"
+ * clock speed (HS clock 420 MHz) yields around 117Hz.
+ */
+ .vrefresh = 60,
+ .width_mm = 48,
+ .height_mm = 84,
+};
+
+static inline struct acx424akp *panel_to_acx424akp(struct drm_panel *panel)
+{
+ return container_of(panel, struct acx424akp, panel);
+}
+
+#define FOSC 20 /* 20Mhz */
+#define SCALE_FACTOR_NS_DIV_MHZ 1000
+
+static int acx424akp_set_brightness(struct backlight_device *bl)
+{
+ struct acx424akp *acx = bl_get_data(bl);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+ int period_ns = 1023;
+ int duty_ns = bl->props.brightness;
+ u8 pwm_ratio;
+ u8 pwm_div;
+ u8 par;
+ int ret;
+
+ /* Calculate the PWM duty cycle in n/256's */
+ pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1);
+ pwm_div = max(1,
+ ((FOSC * period_ns) / 256) /
+ SCALE_FACTOR_NS_DIV_MHZ);
+
+ /* Set up PWM dutycycle ONE byte (differs from the standard) */
+ DRM_DEV_DEBUG(acx->dev, "calculated duty cycle %02x\n", pwm_ratio);
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &pwm_ratio, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to set display PWM ratio (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Sequence to write PWMDIV:
+ * address data
+ * 0xF3 0xAA CMD2 Unlock
+ * 0x00 0x01 Enter CMD2 page 0
+ * 0X7D 0x01 No reload MTP of CMD2 P1
+ * 0x22 PWMDIV
+ * 0x7F 0xAA CMD2 page 1 lock
+ */
+ par = 0xaa;
+ ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to unlock CMD 2 (%d)\n",
+ ret);
+ return ret;
+ }
+ par = 0x01;
+ ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to enter page 1 (%d)\n",
+ ret);
+ return ret;
+ }
+ par = 0x01;
+ ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to disable MTP reload (%d)\n",
+ ret);
+ return ret;
+ }
+ ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to set PWM divisor (%d)\n",
+ ret);
+ return ret;
+ }
+ par = 0xaa;
+ ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to lock CMD 2 (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Enable backlight */
+ par = 0x24;
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &par, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to enable display backlight (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct backlight_ops acx424akp_bl_ops = {
+ .update_status = acx424akp_set_brightness,
+};
+
+static int acx424akp_read_id(struct acx424akp *acx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+ u8 vendor, version, panel;
+ u16 val;
+ int ret;
+
+ ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID1, &vendor, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev, "could not vendor ID byte\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID2, &version, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev, "could not read device version byte\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID3, &panel, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev, "could not read panel ID byte\n");
+ return ret;
+ }
+
+ if (vendor == 0x00) {
+ DRM_DEV_ERROR(acx->dev, "device vendor ID is zero\n");
+ return -ENODEV;
+ }
+
+ val = (vendor << 8) | panel;
+ switch (val) {
+ case DISPLAY_SONY_ACX424AKP_ID1:
+ case DISPLAY_SONY_ACX424AKP_ID2:
+ case DISPLAY_SONY_ACX424AKP_ID3:
+ DRM_DEV_INFO(acx->dev,
+ "MTP vendor: %02x, version: %02x, panel: %02x\n",
+ vendor, version, panel);
+ break;
+ default:
+ DRM_DEV_INFO(acx->dev,
+ "unknown vendor: %02x, version: %02x, panel: %02x\n",
+ vendor, version, panel);
+ break;
+ }
+
+ return 0;
+}
+
+static int acx424akp_power_on(struct acx424akp *acx)
+{
+ int ret;
+
+ ret = regulator_enable(acx->supply);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to enable supply (%d)\n", ret);
+ return ret;
+ }
+
+ /* Assert RESET */
+ gpiod_set_value_cansleep(acx->reset_gpio, 1);
+ udelay(20);
+ /* De-assert RESET */
+ gpiod_set_value_cansleep(acx->reset_gpio, 0);
+ usleep_range(11000, 20000);
+
+ return 0;
+}
+
+static void acx424akp_power_off(struct acx424akp *acx)
+{
+ /* Assert RESET */
+ gpiod_set_value_cansleep(acx->reset_gpio, 1);
+ usleep_range(11000, 20000);
+
+ regulator_disable(acx->supply);
+}
+
+static int acx424akp_prepare(struct drm_panel *panel)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+ const u8 mddi = 3;
+ int ret;
+
+ ret = acx424akp_power_on(acx);
+ if (ret)
+ return ret;
+
+ ret = acx424akp_read_id(acx);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to read panel ID (%d)\n", ret);
+ goto err_power_off;
+ }
+
+ /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
+ ret = mipi_dsi_dcs_set_tear_on(dsi,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to enable vblank TE (%d)\n",
+ ret);
+ goto err_power_off;
+ }
+
+ /*
+ * Set MDDI
+ *
+ * This presumably deactivates the Qualcomm MDDI interface and
+ * selects DSI, similar code is found in other drivers such as the
+ * Sharp LS043T1LE01 which makes us suspect that this panel may be
+ * using a Novatek NT35565 or similar display driver chip that shares
+ * this command. Due to the lack of documentation we cannot know for
+ * sure.
+ */
+ ret = mipi_dsi_dcs_write(dsi, ACX424_DCS_SET_MDDI,
+ &mddi, sizeof(mddi));
+ if (ret < 0) {
+ DRM_DEV_ERROR(acx->dev, "failed to set MDDI (%d)\n", ret);
+ goto err_power_off;
+ }
+
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to exit sleep mode (%d)\n",
+ ret);
+ goto err_power_off;
+ }
+ msleep(140);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to turn display on (%d)\n",
+ ret);
+ goto err_power_off;
+ }
+ if (acx->video_mode) {
+ /* In video mode turn peripheral on */
+ ret = mipi_dsi_turn_on_peripheral(dsi);
+ if (ret) {
+ dev_err(acx->dev, "failed to turn on peripheral\n");
+ goto err_power_off;
+ }
+ }
+
+ acx->bl->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+
+err_power_off:
+ acx424akp_power_off(acx);
+ return ret;
+}
+
+static int acx424akp_unprepare(struct drm_panel *panel)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev);
+ u8 par;
+ int ret;
+
+ /* Disable backlight */
+ par = 0x00;
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &par, 1);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev,
+ "failed to disable display backlight (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to turn display off (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(acx->dev, "failed to enter sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+ msleep(85);
+
+ acx424akp_power_off(acx);
+ acx->bl->props.power = FB_BLANK_POWERDOWN;
+
+ return 0;
+}
+
+static int acx424akp_enable(struct drm_panel *panel)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+
+ /*
+ * The backlight is on as long as the display is on
+ * so no use to call backlight_enable() here.
+ */
+ acx->bl->props.power = FB_BLANK_UNBLANK;
+
+ return 0;
+}
+
+static int acx424akp_disable(struct drm_panel *panel)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+
+ /*
+ * The backlight is on as long as the display is on
+ * so no use to call backlight_disable() here.
+ */
+ acx->bl->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+}
+
+static int acx424akp_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct acx424akp *acx = panel_to_acx424akp(panel);
+ struct drm_display_mode *mode;
+
+ if (acx->video_mode)
+ mode = drm_mode_duplicate(connector->dev,
+ &sony_acx424akp_vid_mode);
+ else
+ mode = drm_mode_duplicate(connector->dev,
+ &sony_acx424akp_cmd_mode);
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs acx424akp_drm_funcs = {
+ .disable = acx424akp_disable,
+ .unprepare = acx424akp_unprepare,
+ .prepare = acx424akp_prepare,
+ .enable = acx424akp_enable,
+ .get_modes = acx424akp_get_modes,
+};
+
+static int acx424akp_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct acx424akp *acx;
+ int ret;
+
+ acx = devm_kzalloc(dev, sizeof(struct acx424akp), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+ acx->video_mode = of_property_read_bool(dev->of_node,
+ "enforce-video-mode");
+
+ mipi_dsi_set_drvdata(dsi, acx);
+ acx->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ /*
+ * FIXME: these come from the ST-Ericsson vendor driver for the
+ * HREF520 and seems to reflect limitations in the PLLs on that
+ * platform, if you have the datasheet, please cross-check the
+ * actual max rates.
+ */
+ dsi->lp_rate = 19200000;
+ dsi->hs_rate = 420160000;
+
+ if (acx->video_mode)
+ /* Burst mode using event for sync */
+ dsi->mode_flags =
+ MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST;
+ else
+ dsi->mode_flags =
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ acx->supply = devm_regulator_get(dev, "vddi");
+ if (IS_ERR(acx->supply))
+ return PTR_ERR(acx->supply);
+
+ /* This asserts RESET by default */
+ acx->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(acx->reset_gpio)) {
+ ret = PTR_ERR(acx->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to request GPIO (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drm_panel_init(&acx->panel, dev, &acx424akp_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ acx->bl = devm_backlight_device_register(dev, "acx424akp", dev, acx,
+ &acx424akp_bl_ops, NULL);
+ if (IS_ERR(acx->bl)) {
+ DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+ return PTR_ERR(acx->bl);
+ }
+ acx->bl->props.max_brightness = 1023;
+ acx->bl->props.brightness = 512;
+ acx->bl->props.power = FB_BLANK_POWERDOWN;
+
+ ret = drm_panel_add(&acx->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&acx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int acx424akp_remove(struct mipi_dsi_device *dsi)
+{
+ struct acx424akp *acx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&acx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id acx424akp_of_match[] = {
+ { .compatible = "sony,acx424akp" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, acx424akp_of_match);
+
+static struct mipi_dsi_driver acx424akp_driver = {
+ .probe = acx424akp_probe,
+ .remove = acx424akp_remove,
+ .driver = {
+ .name = "panel-sony-acx424akp",
+ .of_match_table = acx424akp_of_match,
+ },
+};
+module_mipi_dsi_driver(acx424akp_driver);
+
+MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("MIPI-DSI Sony acx424akp Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index d6387d8f88a3..5c4b6f6e5c2d 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -521,12 +521,12 @@ static const struct drm_display_mode acx565akm_mode = {
.height_mm = 46,
};
-static int acx565akm_get_modes(struct drm_panel *panel)
+static int acx565akm_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &acx565akm_mode);
+ mode = drm_mode_duplicate(connector->dev, &acx565akm_mode);
if (!mode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index c44d6a65c0aa..cf29405a2dbe 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -17,7 +17,6 @@
* H. Nikolaus Schaller <hns@goldelico.com>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
@@ -83,7 +82,6 @@ struct td028ttec1_panel {
struct drm_panel panel;
struct spi_device *spi;
- struct backlight_device *backlight;
};
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
@@ -243,8 +241,6 @@ static int td028ttec1_enable(struct drm_panel *panel)
if (ret)
return ret;
- backlight_enable(lcd->backlight);
-
return 0;
}
@@ -252,8 +248,6 @@ static int td028ttec1_disable(struct drm_panel *panel)
{
struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
- backlight_disable(lcd->backlight);
-
jbt_ret_write_0(lcd, JBT_REG_DISPLAY_OFF, NULL);
return 0;
@@ -287,12 +281,12 @@ static const struct drm_display_mode td028ttec1_mode = {
.height_mm = 58,
};
-static int td028ttec1_get_modes(struct drm_panel *panel)
+static int td028ttec1_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &td028ttec1_mode);
+ mode = drm_mode_duplicate(connector->dev, &td028ttec1_mode);
if (!mode)
return -ENOMEM;
@@ -334,10 +328,6 @@ static int td028ttec1_probe(struct spi_device *spi)
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
- lcd->backlight = devm_of_find_backlight(&spi->dev);
- if (IS_ERR(lcd->backlight))
- return PTR_ERR(lcd->backlight);
-
spi->mode = SPI_MODE_3;
spi->bits_per_word = 9;
@@ -350,6 +340,10 @@ static int td028ttec1_probe(struct spi_device *spi)
drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
DRM_MODE_CONNECTOR_DPI);
+ ret = drm_panel_of_backlight(&lcd->panel);
+ if (ret)
+ return ret;
+
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index 621b65feec07..75f1f1f1b6de 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -346,12 +346,12 @@ static const struct drm_display_mode td043mtea1_mode = {
.height_mm = 56,
};
-static int td043mtea1_get_modes(struct drm_panel *panel)
+static int td043mtea1_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &td043mtea1_mode);
+ mode = drm_mode_duplicate(connector->dev, &td043mtea1_mode);
if (!mode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 1a5418ae2ccf..8472d018c16f 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -14,13 +14,13 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
-#include <linux/backlight.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
@@ -77,10 +77,6 @@ struct tpg110 {
*/
struct drm_panel panel;
/**
- * @backlight: backlight for this panel
- */
- struct backlight_device *backlight;
- /**
* @panel_type: the panel mode as detected
*/
const struct tpg110_panel_mode *panel_mode;
@@ -356,8 +352,6 @@ static int tpg110_disable(struct drm_panel *panel)
val &= ~TPG110_CTRL2_PM;
tpg110_write_reg(tpg, TPG110_CTRL2_PM, val);
- backlight_disable(tpg->backlight);
-
return 0;
}
@@ -366,8 +360,6 @@ static int tpg110_enable(struct drm_panel *panel)
struct tpg110 *tpg = to_tpg110(panel);
u8 val;
- backlight_enable(tpg->backlight);
-
/* Take chip out of standby */
val = tpg110_read_reg(tpg, TPG110_CTRL2_PM);
val |= TPG110_CTRL2_PM;
@@ -384,9 +376,9 @@ static int tpg110_enable(struct drm_panel *panel)
* presents the mode that is configured for the system under use,
* and which is detected by reading the registers of the display.
*/
-static int tpg110_get_modes(struct drm_panel *panel)
+static int tpg110_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct tpg110 *tpg = to_tpg110(panel);
struct drm_display_mode *mode;
@@ -394,7 +386,7 @@ static int tpg110_get_modes(struct drm_panel *panel)
connector->display_info.height_mm = tpg->height;
connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
- mode = drm_mode_duplicate(panel->drm, &tpg->panel_mode->mode);
+ mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
@@ -432,11 +424,6 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
DRM_DEV_ERROR(dev, "no panel height specified\n");
- /* Look for some optional backlight */
- tpg->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(tpg->backlight))
- return PTR_ERR(tpg->backlight);
-
/* This asserts the GRESTB signal, putting the display into reset */
tpg->grestb = devm_gpiod_get(dev, "grestb", GPIOD_OUT_HIGH);
if (IS_ERR(tpg->grestb)) {
@@ -459,6 +446,11 @@ static int tpg110_probe(struct spi_device *spi)
drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
+
+ ret = drm_panel_of_backlight(&tpg->panel);
+ if (ret)
+ return ret;
+
spi_set_drvdata(spi, tpg);
return drm_panel_add(&tpg->panel);
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 0feea2456e14..012ca62bf30e 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -454,9 +454,9 @@ static int truly_nt35597_enable(struct drm_panel *panel)
return 0;
}
-static int truly_nt35597_get_modes(struct drm_panel *panel)
+static int truly_nt35597_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
{
- struct drm_connector *connector = panel->connector;
struct truly_nt35597 *ctx = panel_to_ctx(panel);
struct drm_display_mode *mode;
const struct nt35597_config *config;
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
new file mode 100644
index 000000000000..1645aceab597
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xinpeng xpp055c272 5.5" MIPI-DSI panel driver
+ * Copyright (C) 2019 Theobroma Systems Design und Consulting GmbH
+ *
+ * based on
+ *
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+/* Manufacturer specific Commands send via DSI */
+#define XPP055C272_CMD_ALL_PIXEL_OFF 0x22
+#define XPP055C272_CMD_ALL_PIXEL_ON 0x23
+#define XPP055C272_CMD_SETDISP 0xb2
+#define XPP055C272_CMD_SETRGBIF 0xb3
+#define XPP055C272_CMD_SETCYC 0xb4
+#define XPP055C272_CMD_SETBGP 0xb5
+#define XPP055C272_CMD_SETVCOM 0xb6
+#define XPP055C272_CMD_SETOTP 0xb7
+#define XPP055C272_CMD_SETPOWER_EXT 0xb8
+#define XPP055C272_CMD_SETEXTC 0xb9
+#define XPP055C272_CMD_SETMIPI 0xbA
+#define XPP055C272_CMD_SETVDC 0xbc
+#define XPP055C272_CMD_SETPCR 0xbf
+#define XPP055C272_CMD_SETSCR 0xc0
+#define XPP055C272_CMD_SETPOWER 0xc1
+#define XPP055C272_CMD_SETECO 0xc6
+#define XPP055C272_CMD_SETPANEL 0xcc
+#define XPP055C272_CMD_SETGAMMA 0xe0
+#define XPP055C272_CMD_SETEQ 0xe3
+#define XPP055C272_CMD_SETGIP1 0xe9
+#define XPP055C272_CMD_SETGIP2 0xea
+
+struct xpp055c272 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vci;
+ struct regulator *iovcc;
+ bool prepared;
+};
+
+static inline struct xpp055c272 *panel_to_xpp055c272(struct drm_panel *panel)
+{
+ return container_of(panel, struct xpp055c272, panel);
+}
+
+#define dsi_generic_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+
+ /*
+ * Init sequence was supplied by the panel vendor without much
+ * documentation.
+ */
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETMIPI,
+ 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
+ 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
+ 0x00, 0x00, 0x37);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
+ 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
+ 0x00, 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER,
+ 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
+ 0x67, 0x77, 0x33, 0x33);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
+ 0xff, 0x01, 0xff);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
+ msleep(20);
+
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP1,
+ 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
+ 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
+ 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
+ 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
+ 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
+ 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP2,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
+ 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
+ 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
+ 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
+ 0xa0, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
+ 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
+ 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
+ 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
+ 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
+ 0x11, 0x18);
+
+ msleep(60);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int xpp055c272_unprepare(struct drm_panel *panel)
+{
+ struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+
+ mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vci);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int xpp055c272_prepare(struct drm_panel *panel)
+{
+ struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vci);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vci supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vci;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ /* T6: 10us */
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ /* T8: 20ms */
+ msleep(20);
+
+ ret = xpp055c272_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ /* T9: 120ms */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vci:
+ regulator_disable(ctx->vci);
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 40,
+ .hsync_end = 720 + 40 + 10,
+ .htotal = 720 + 40 + 10 + 40,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 22,
+ .vsync_end = 1280 + 22 + 4,
+ .vtotal = 1280 + 22 + 4 + 11,
+ .vrefresh = 60,
+ .clock = 64000,
+ .width_mm = 68,
+ .height_mm = 121,
+};
+
+static int xpp055c272_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs xpp055c272_funcs = {
+ .unprepare = xpp055c272_unprepare,
+ .prepare = xpp055c272_prepare,
+ .get_modes = xpp055c272_get_modes,
+};
+
+static int xpp055c272_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct xpp055c272 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vci = devm_regulator_get(dev, "vci");
+ if (IS_ERR(ctx->vci)) {
+ ret = PTR_ERR(ctx->vci);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vci regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xpp055c272_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int xpp055c272_remove(struct mipi_dsi_device *dsi)
+{
+ struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ xpp055c272_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id xpp055c272_of_match[] = {
+ { .compatible = "xinpeng,xpp055c272" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, xpp055c272_of_match);
+
+static struct mipi_dsi_driver xpp055c272_driver = {
+ .driver = {
+ .name = "panel-xinpeng-xpp055c272",
+ .of_match_table = xpp055c272_of_match,
+ },
+ .probe = xpp055c272_probe,
+ .remove = xpp055c272_remove,
+ .shutdown = xpp055c272_shutdown,
+};
+module_mipi_dsi_driver(xpp055c272_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Xinpeng xpp055c272 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 536ba93b0f46..413987038fbf 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Collabora ltd. */
#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/clk.h>
@@ -74,8 +75,11 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
int ret;
struct dev_pm_opp *opp;
unsigned long cur_freq;
+ struct device *dev = &pfdev->pdev->dev;
+ struct devfreq *devfreq;
+ struct thermal_cooling_device *cooling;
- ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
+ ret = dev_pm_opp_of_add_table(dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
return 0;
else if (ret)
@@ -85,29 +89,35 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
cur_freq = clk_get_rate(pfdev->clock);
- opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0);
+ opp = devfreq_recommended_opp(dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
- pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
- &panfrost_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
- NULL);
- if (IS_ERR(pfdev->devfreq.devfreq)) {
- DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
- ret = PTR_ERR(pfdev->devfreq.devfreq);
- pfdev->devfreq.devfreq = NULL;
- dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
- return ret;
+ devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ if (IS_ERR(devfreq)) {
+ DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
+ dev_pm_opp_of_remove_table(dev);
+ return PTR_ERR(devfreq);
}
+ pfdev->devfreq.devfreq = devfreq;
+
+ cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
+ if (IS_ERR(cooling))
+ DRM_DEV_INFO(dev, "Failed to register cooling device\n");
+ else
+ pfdev->devfreq.cooling = cooling;
return 0;
}
void panfrost_devfreq_fini(struct panfrost_device *pfdev)
{
+ if (pfdev->devfreq.cooling)
+ devfreq_cooling_unregister(pfdev->devfreq.cooling);
dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f61364f7c471..6da59f476aba 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct panfrost_file_priv *priv = file->driver_priv;
struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
+ struct panfrost_gem_mapping *mapping;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
- args->offset = bo->node.start << PAGE_SHIFT;
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ drm_gem_object_put_unlocked(&bo->base.base);
+ return -EINVAL;
+ }
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
struct drm_panfrost_submit *args,
struct panfrost_job *job)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo;
+ unsigned int i;
+ int ret;
+
job->bo_count = args->bo_handle_count;
if (!job->bo_count)
@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
if (!job->implicit_fences)
return -ENOMEM;
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)args->bo_handles,
- job->bo_count, &job->bos);
+ ret = drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+ if (ret)
+ return ret;
+
+ job->mappings = kvmalloc_array(job->bo_count,
+ sizeof(struct panfrost_gem_mapping *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->mappings)
+ return -ENOMEM;
+
+ for (i = 0; i < job->bo_count; i++) {
+ struct panfrost_gem_mapping *mapping;
+
+ bo = to_panfrost_bo(job->bos[i]);
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ ret = -EINVAL;
+ break;
+ }
+
+ job->mappings[i] = mapping;
+ }
+
+ return ret;
}
/**
@@ -320,7 +357,9 @@ out:
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data;
+ struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
}
bo = to_panfrost_bo(gem_obj);
- args->offset = bo->node.start << PAGE_SHIFT;
-
+ mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put_unlocked(gem_obj);
+
+ if (!mapping)
+ return -EINVAL;
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+ int ret = 0;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
return -ENOENT;
}
+ bo = to_panfrost_bo(gem_obj);
+
mutex_lock(&pfdev->shrinker_lock);
+ mutex_lock(&bo->mappings.lock);
+ if (args->madv == PANFROST_MADV_DONTNEED) {
+ struct panfrost_gem_mapping *first;
+
+ first = list_first_entry(&bo->mappings.list,
+ struct panfrost_gem_mapping,
+ node);
+
+ /*
+ * If we want to mark the BO purgeable, there must be only one
+ * user: the caller FD.
+ * We could do something smarter and mark the BO purgeable only
+ * when all its users have marked it purgeable, but globally
+ * visible/shared BOs are likely to never be marked purgeable
+ * anyway, so let's not bother.
+ */
+ if (!list_is_singular(&bo->mappings.list) ||
+ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+ ret = -EINVAL;
+ goto out_unlock_mappings;
+ }
+ }
+
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) {
- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
-
if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list,
&pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
+
+out_unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj);
- return 0;
+ return ret;
}
int panfrost_unstable_ioctl_check(void)
@@ -453,15 +526,11 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
kfree(panfrost_priv);
}
-/* DRM_AUTH is required on SUBMIT for now, while all clients share a single
- * address space. Note that render nodes would be able to submit jobs that
- * could access BOs from clients authenticated with the master node.
- */
static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
#define PANFROST_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
- PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW | DRM_AUTH),
+ PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW),
PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index fd766b1395fb..17b654e1eb94 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock);
+ /*
+ * If we still have mappings attached to the BO, there's a problem in
+ * our refcounting.
+ */
+ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_gem_shmem_free_object(obj);
}
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv)
+{
+ struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ kref_get(&iter->refcount);
+ mapping = iter;
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
+
+ return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+ struct panfrost_file_priv *priv;
+
+ if (mapping->active)
+ panfrost_mmu_unmap(mapping);
+
+ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+ spin_lock(&priv->mm_lock);
+ if (drm_mm_node_allocated(&mapping->mmnode))
+ drm_mm_remove_node(&mapping->mmnode);
+ spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+ panfrost_gem_teardown_mapping(mapping);
+ drm_gem_object_put_unlocked(&mapping->obj->base.base);
+ kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+ if (!mapping)
+ return;
+
+ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(mapping, &bo->mappings.list, node)
+ panfrost_gem_teardown_mapping(mapping);
+ mutex_unlock(&bo->mappings.lock);
+}
+
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
int ret;
@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mapping->node);
+ kref_init(&mapping->refcount);
+ drm_gem_object_get(obj);
+ mapping->obj = bo;
/*
* Executable buffers cannot cross a 16MB boundary as the program
@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
- bo->mmu = &priv->mmu;
+ mapping->mmu = &priv->mmu;
spin_lock(&priv->mm_lock);
- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
size >> PAGE_SHIFT, align, color, 0);
spin_unlock(&priv->mm_lock);
if (ret)
- return ret;
+ goto err;
if (!bo->is_heap) {
- ret = panfrost_mmu_map(bo);
- if (ret) {
- spin_lock(&priv->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
- }
+ ret = panfrost_mmu_map(mapping);
+ if (ret)
+ goto err;
}
+
+ mutex_lock(&bo->mappings.lock);
+ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+ list_add_tail(&mapping->node, &bo->mappings.list);
+ mutex_unlock(&bo->mappings.lock);
+
+err:
+ if (ret)
+ panfrost_gem_mapping_put(mapping);
return ret;
}
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_gem_mapping *mapping = NULL, *iter;
- if (bo->is_mapped)
- panfrost_mmu_unmap(bo);
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ mapping = iter;
+ list_del(&iter->node);
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
- spin_lock(&priv->mm_lock);
- if (drm_mm_node_allocated(&bo->node))
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
+ panfrost_gem_mapping_put(mapping);
}
static int panfrost_gem_pin(struct drm_gem_object *obj)
@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
if (!obj)
return NULL;
+ INIT_LIST_HEAD(&obj->mappings.list);
+ mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
return &obj->base.base;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 4b17e7308764..ca1bc9019600 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -13,23 +13,46 @@ struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct sg_table *sgts;
- struct panfrost_mmu *mmu;
- struct drm_mm_node node;
- bool is_mapped :1;
+ /*
+ * Use a list for now. If searching a mapping ever becomes the
+ * bottleneck, we should consider using an RB-tree, or even better,
+ * let the core store drm_gem_object_mapping entries (where we
+ * could place driver specific data) instead of drm_gem_object ones
+ * in its drm_file->object_idr table.
+ *
+ * struct drm_gem_object_mapping {
+ * struct drm_gem_object *obj;
+ * void *driver_priv;
+ * };
+ */
+ struct {
+ struct list_head list;
+ struct mutex lock;
+ } mappings;
+
bool noexec :1;
bool is_heap :1;
};
+struct panfrost_gem_mapping {
+ struct list_head node;
+ struct kref refcount;
+ struct panfrost_gem_object *obj;
+ struct drm_mm_node mmnode;
+ struct panfrost_mmu *mmu;
+ bool active :1;
+};
+
static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
-static inline
-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{
- return container_of(node, struct panfrost_gem_object, node);
+ return container_of(node, struct panfrost_gem_mapping, mmnode);
}
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 458f0fa68111..f5dd7b29bc95 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
if (!mutex_trylock(&shmem->pages_lock))
return false;
- panfrost_mmu_unmap(to_panfrost_bo(obj));
+ panfrost_gem_teardown_mappings(bo);
drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index d411eb6c8eb9..7c36ec675b73 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
- if (job->bos) {
+ if (job->mappings) {
for (i = 0; i < job->bo_count; i++)
+ panfrost_gem_mapping_put(job->mappings[i]);
+ kvfree(job->mappings);
+ }
+
+ if (job->bos) {
+ struct panfrost_gem_object *bo;
+
+ for (i = 0; i < job->bo_count; i++) {
+ bo = to_panfrost_bo(job->bos[i]);
drm_gem_object_put_unlocked(job->bos[i]);
+ }
+
kvfree(job->bos);
}
@@ -542,12 +553,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
{
struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int ret, i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
- rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+ sched = &js->queue[i].sched;
+ ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
if (WARN_ON(ret))
return ret;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 62454128a792..bbd3ba97ff67 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -32,6 +32,7 @@ struct panfrost_job {
/* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences;
+ struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos;
u32 bo_count;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index a3ed64a1f15e..763cfca886a7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
return 0;
}
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
- if (WARN_ON(bo->is_mapped))
+ if (WARN_ON(mapping->active))
return 0;
if (bo->noexec)
@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
- bo->is_mapped = true;
+ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ mapping->active = true;
return 0;
}
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
- u64 iova = bo->node.start << PAGE_SHIFT;
- size_t len = bo->node.size << PAGE_SHIFT;
+ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+ size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0;
- if (WARN_ON(!bo->is_mapped))
+ if (WARN_ON(!mapping->active))
return;
- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
+ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+ mapping->mmu->as, iova, len);
while (unmapped_len < len) {
size_t unmapped_page;
@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
unmapped_len += pgsize;
}
- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
- bo->is_mapped = false;
+ panfrost_mmu_flush_range(pfdev, mapping->mmu,
+ mapping->mmnode.start << PAGE_SHIFT, len);
+ mapping->active = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
free_io_pgtable_ops(mmu->pgtbl_ops);
}
-static struct panfrost_gem_object *
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{
- struct panfrost_gem_object *bo = NULL;
+ struct panfrost_gem_mapping *mapping = NULL;
struct panfrost_file_priv *priv;
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
@@ -418,8 +423,9 @@ found_mmu:
drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start &&
offset < (node->start + node->size)) {
- bo = drm_mm_node_to_panfrost_bo(node);
- drm_gem_object_get(&bo->base.base);
+ mapping = drm_mm_node_to_panfrost_mapping(node);
+
+ kref_get(&mapping->refcount);
break;
}
}
@@ -427,7 +433,7 @@ found_mmu:
spin_unlock(&priv->mm_lock);
out:
spin_unlock(&pfdev->as_lock);
- return bo;
+ return mapping;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
u64 addr)
{
int ret, i;
+ struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo;
struct address_space *mapping;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
- bo = addr_to_drm_mm_node(pfdev, as, addr);
- if (!bo)
+ bomapping = addr_to_mapping(pfdev, as, addr);
+ if (!bomapping)
return -ENOENT;
+ bo = bomapping->obj;
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
- bo->node.start << PAGE_SHIFT);
+ bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
}
- WARN_ON(bo->mmu->as != as);
+ WARN_ON(bomapping->mmu->as != as);
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
- page_offset -= bo->node.start;
+ page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock);
@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
}
- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
- bo->is_mapped = true;
+ bomapping->active = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
- drm_gem_object_put_unlocked(&bo->base.base);
+ panfrost_gem_mapping_put(bomapping);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index 7c5b6775ae23..44fc2edf63ce 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -4,12 +4,12 @@
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
struct panfrost_file_priv;
struct panfrost_mmu;
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 2c04e858c50a..684820448be3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -25,7 +25,7 @@
#define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt {
- struct panfrost_gem_object *bo;
+ struct panfrost_gem_mapping *mapping;
size_t bosize;
void *buf;
struct panfrost_file_priv *user;
@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
int ret;
reinit_completion(&pfdev->perfcnt->dump_comp);
- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_INT_CLEAR,
@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (IS_ERR(bo))
return PTR_ERR(bo);
- perfcnt->bo = to_panfrost_bo(&bo->base);
-
/* Map the perfcnt buf in the address space attached to file_priv. */
- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
+ ret = panfrost_gem_open(&bo->base, file_priv);
if (ret)
goto err_put_bo;
+ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+ user);
+ if (!perfcnt->mapping) {
+ ret = -EINVAL;
+ goto err_close_bo;
+ }
+
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf);
- goto err_close_bo;
+ goto err_put_mapping;
}
/*
@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
+ /* The BO ref is retained by the mapping. */
+ drm_gem_object_put_unlocked(&bo->base);
+
return 0;
err_vunmap:
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+ panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
+ panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
drm_gem_object_put_unlocked(&bo->base);
return ret;
@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
- perfcnt->bo = NULL;
+ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+ panfrost_gem_mapping_put(perfcnt->mapping);
+ perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 63dfcda04147..aa8aa8d9e405 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -166,7 +166,7 @@ static int pl111_modeset_init(struct drm_device *dev)
priv->bridge = bridge;
if (panel) {
priv->panel = panel;
- priv->connector = panel->connector;
+ priv->connector = drm_panel_bridge_connector(bridge);
}
ret = pl111_display_init(dev);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 611cbe7aee69..bfc1631093e9 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -184,7 +184,7 @@ int qxl_device_init(struct qxl_device *qdev,
if (!qxl_check_device(qdev)) {
r = -ENODEV;
- goto surface_mapping_free;
+ goto rom_unmap;
}
r = qxl_bo_init(qdev);
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
index ae8a1860c6b8..c07a069533ef 100644
--- a/drivers/gpu/drm/r128/Makefile
+++ b/drivers/gpu/drm/r128/Makefile
@@ -3,7 +3,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o
+r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o ati_pcigart.o
r128-$(CONFIG_COMPAT) += r128_ioc32.o
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 580aa2676358..9b4072f97215 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -33,11 +33,12 @@
#include <linux/export.h>
-#include <drm/ati_pcigart.h>
#include <drm/drm_device.h>
#include <drm/drm_pci.h>
#include <drm/drm_print.h>
+#include "ati_pcigart.h"
+
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
@@ -95,7 +96,6 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
return 1;
}
-EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
@@ -207,4 +207,3 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_info->bus_addr = bus_address;
return ret;
}
-EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/drivers/gpu/drm/r128/ati_pcigart.h b/drivers/gpu/drm/r128/ati_pcigart.h
new file mode 100644
index 000000000000..a728a1364e66
--- /dev/null
+++ b/drivers/gpu/drm/r128/ati_pcigart.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DRM_ATI_PCIGART_H
+#define DRM_ATI_PCIGART_H
+
+#include <drm/drm_legacy.h>
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+#define DRM_ATI_GART_PCI 1
+#define DRM_ATI_GART_PCIE 2
+#define DRM_ATI_GART_IGP 3
+
+struct drm_ati_pcigart_info {
+ int gart_table_location;
+ int gart_reg_if;
+ void *addr;
+ dma_addr_t bus_addr;
+ dma_addr_t table_mask;
+ struct drm_dma_handle *table_handle;
+ struct drm_local_map mapping;
+ int table_size;
+};
+
+extern int drm_ati_pcigart_init(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+
+#endif
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index fd74f744604f..b7a5f162ebae 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -30,10 +30,10 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/drm_vblank.h>
#include <drm/r128_drm.h>
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index ba8c30ed91d1..8b256123cf2b 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -39,11 +39,12 @@
#include <linux/io.h>
#include <linux/irqreturn.h>
-#include <drm/ati_pcigart.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
#include <drm/r128_drm.h>
+#include "ati_pcigart.h"
+
/* General customization:
*/
#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 364b895e7ebb..1bf06c91cd95 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -25,6 +25,7 @@
#ifndef ATOM_H
#define ATOM_H
+#include <linux/mutex.h>
#include <linux/types.h>
#define ATOM_BIOS_MAGIC 0xAA55
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index da2c9e295408..be583695427a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -244,9 +244,8 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (ASIC_IS_DCE8(rdev)) {
+ if (ASIC_IS_DCE8(rdev))
WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
- }
}
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6f38375c77c8..15b00a347560 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -412,7 +412,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
@@ -423,8 +422,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
if (!radeon_connector->con_priv)
return panel_mode;
- dig_connector = radeon_connector->con_priv;
-
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
@@ -816,9 +813,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
dp_info.use_dpencoder = true;
index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
- if (crev > 1) {
+ if (crev > 1)
dp_info.use_dpencoder = false;
- }
}
dp_info.enc_id = 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index cc8f32a1b03c..cc5ee1b3af84 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -26,10 +26,10 @@
#include <linux/backlight.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
@@ -1885,11 +1885,10 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
if (ASIC_IS_AVIVO(rdev))
args.v1.ucCRTC = radeon_crtc->crtc_id;
else {
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1)
args.v1.ucCRTC = radeon_crtc->crtc_id;
- } else {
+ else
args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
- }
}
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
@@ -2234,9 +2233,9 @@ assigned:
DRM_ERROR("Got encoder index incorrect - returning 0\n");
return 0;
}
- if (rdev->mode_info.active_encoders & (1 << enc_idx)) {
+ if (rdev->mode_info.active_encoders & (1 << enc_idx))
DRM_ERROR("chosen encoder in use %d\n", enc_idx);
- }
+
rdev->mode_info.active_encoders |= (1 << enc_idx);
return enc_idx;
}
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index a570ce40af19..ab4d21072191 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -68,11 +68,6 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
memcpy(&out, &buf[1], num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
- if (num > ATOM_MAX_HW_I2C_READ) {
- DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- r = -EINVAL;
- goto done;
- }
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index ce37de020b91..d1d8aaf8323c 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -22,10 +22,9 @@
* Authors: Alex Deucher
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "btc_dpm.h"
#include "btcd.h"
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index c6fd123f60b5..a9257bed3484 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -22,10 +22,9 @@
*/
#include <linux/firmware.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "ci_dpm.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 40a7e702c2a9..5c42877fd6fb 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -23,10 +23,10 @@
*/
#include <linux/firmware.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include "atom.h"
@@ -8137,7 +8137,7 @@ static void cik_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -8209,7 +8209,7 @@ static void cik_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 35b9dc6ce46a..68403e77756d 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -333,7 +333,7 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
u32 me_cntl, reg_offset;
int i;
- if (enable == false) {
+ if (!enable) {
cik_sdma_gfx_stop(rdev);
cik_sdma_rlc_stop(rdev);
}
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 32ed60f1048b..35b177d77791 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
#include "atom.h"
#include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d978a3d9c82..14d90dc376e7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -23,9 +23,9 @@
*/
#include <linux/firmware.h>
+#include <linux/pci.h>
#include <linux/slab.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -4945,7 +4945,7 @@ static void evergreen_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 0d8d30b78f95..5e6086eb1807 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -21,10 +21,9 @@
*
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "cikd.h"
#include "kv_dpm.h"
#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 410f626a39d4..02feb0801fd3 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -23,10 +23,10 @@
*/
#include <linux/firmware.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
@@ -2017,7 +2017,7 @@ static void cayman_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -2085,7 +2085,7 @@ static void cayman_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index d9e62ca65ab8..b57c37ddd164 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -22,10 +22,9 @@
*/
#include <linux/math64.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "ni_dpm.h"
#include "nid.h"
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 110fb38004b1..24c8db673931 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -26,16 +26,16 @@
* Jerome Glisse
*/
-#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -1823,9 +1823,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXFORMAT_2:
i = (reg - RADEON_PP_TXFORMAT_0) / 24;
if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
- track->textures[i].use_pitch = 1;
+ track->textures[i].use_pitch = true;
} else {
- track->textures[i].use_pitch = 0;
+ track->textures[i].use_pitch = false;
track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
}
@@ -2387,12 +2387,12 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
else
track->num_texture = 6;
track->maxy = 2048;
- track->separate_cube = 1;
+ track->separate_cube = true;
} else {
track->num_cb = 4;
track->num_texture = 16;
track->maxy = 4096;
- track->separate_cube = 0;
+ track->separate_cube = false;
track->aaresolve = false;
track->aa.robj = NULL;
}
@@ -2815,7 +2815,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
uint32_t temp;
temp = RREG32(RADEON_CONFIG_CNTL);
- if (state == false) {
+ if (!state) {
temp &= ~RADEON_CFG_VGA_RAM_EN;
temp |= RADEON_CFG_VGA_IO_DIS;
} else {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 44856e3a7108..3b7ead5be5bf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -34,7 +35,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "r100_track.h"
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 83282ee2bde0..1d4c04e0a449 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -26,13 +26,13 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include "atom.h"
#include "r100d.h"
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 033bc466a862..d9a33ca768f3 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -26,14 +26,14 @@
* Jerome Glisse
*/
-#include <linux/slab.h>
-#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -3053,7 +3053,7 @@ static void r600_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -3191,7 +3191,7 @@ void r600_vga_set_state(struct radeon_device *rdev, bool state)
uint32_t temp;
temp = RREG32(CONFIG_CNTL);
- if (state == false) {
+ if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d6c28a5d77ab..49e8266461f8 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -350,7 +350,7 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 slice_tile_max, size, tmp;
+ u32 slice_tile_max, tmp;
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
@@ -360,7 +360,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
/* When resolve is used, the second colorbuffer has always 1 sample. */
unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
- size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
format = G_0280A0_FORMAT(track->cb_color_info[i]);
if (!r600_fmt_is_valid_color(format)) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
@@ -517,7 +516,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
{
struct r600_cs_track *track = p->track;
- u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+ u32 nviews, bpe, ntiles, slice_tile_max, tmp;
u32 height_align, pitch_align, depth_align;
u32 pitch = 8192;
u32 height = 8192;
@@ -564,7 +563,6 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
}
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
} else {
- size = radeon_bo_size(track->db_bo);
/* pitch in pixels */
pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
@@ -2342,7 +2340,6 @@ int r600_cs_parse(struct radeon_cs_parser *p)
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_bo_list **cs_reloc)
{
- struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
*cs_reloc = NULL;
@@ -2350,7 +2347,6 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- relocs_chunk = p->chunk_relocs;
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 4de16f3badb4..0aca7bdf54c7 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -25,9 +25,10 @@
* Jerome Glisse <glisse@freedesktop.org>
*/
+#include <linux/pci.h>
+
#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index dc3c2227e06a..495700d16fc9 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -27,10 +27,10 @@
*/
#include <linux/console.h>
+#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 226a7bf0eb7a..848ef68d9086 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -24,8 +24,9 @@
* Alex Deucher
*/
+#include <linux/pci.h>
+
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -569,7 +570,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
- uint8_t con_obj_id, con_obj_num, con_obj_type;
+ uint8_t con_obj_id, con_obj_num;
con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
@@ -577,9 +578,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
con_obj_num =
(le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
>> ENUM_ID_SHIFT;
- con_obj_type =
- (le16_to_cpu(path->usConnObjectId) &
- OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
/* TODO CV support */
if (le16_to_cpu(path->usDeviceTag) ==
@@ -647,15 +645,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
- uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
- grph_obj_id =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- grph_obj_num =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- grph_obj_type =
+ uint8_t grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 72db2b41e96d..8c63ccb8b623 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -288,7 +288,7 @@ static void radeon_audio_interface_init(struct radeon_device *rdev)
} else {
rdev->audio.funcs = &r600_funcs;
rdev->audio.hdmi_funcs = &r600_hdmi_funcs;
- rdev->audio.dp_funcs = 0;
+ rdev->audio.dp_funcs = NULL;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 4d1490fbb075..c42f73fad3e3 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -26,11 +26,11 @@
* Jerome Glisse
*/
-#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include "atom.h"
#include "radeon.h"
@@ -664,17 +664,17 @@ bool radeon_get_bios(struct radeon_device *rdev)
uint16_t tmp;
r = radeon_atrm_get_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_acpi_vfct_bios(rdev);
- if (r == false)
+ if (!r)
r = igp_read_bios_from_vram(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_disabled_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_platform_bios(rdev);
- if (r == false || rdev->bios == NULL) {
+ if (!r || rdev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
rdev->bios = NULL;
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 9057b32f4498..c594ca68e3a7 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -26,8 +26,9 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
+
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c18ae15189f3..c3e49c973812 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -25,8 +25,9 @@
* Alex Deucher
*/
+#include <linux/pci.h>
+
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -2638,7 +2639,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
u16 offset, misc, misc2 = 0;
- u8 rev, blocks, tmp;
+ u8 rev, tmp;
int state_index = 0;
struct radeon_i2c_bus_rec i2c_bus;
@@ -2731,7 +2732,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
if (offset) {
rev = RBIOS8(offset);
- blocks = RBIOS8(offset + 0x2);
/* power mode 0 tends to be the only valid one */
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index c07427d3c199..fe12d9d91d7a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -440,7 +440,7 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
if (radeon_conflict->use_digital)
continue;
- if (priority == true) {
+ if (priority) {
DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
conflict->name);
DRM_DEBUG_KMS("in favor of %s\n",
@@ -700,9 +700,9 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
else
ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds);
}
- if (val == 1 || ret == false) {
+ if (val == 1 || !ret)
radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds);
- }
+
radeon_property_change_mode(&radeon_encoder->base);
}
@@ -1861,6 +1861,7 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_connector_atom_dig *radeon_dig_connector;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
+ struct i2c_adapter *ddc = NULL;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
bool is_dp_bridge = false;
@@ -1938,17 +1939,21 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->con_priv = radeon_dig_connector;
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &radeon_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
- drm_connector_init(dev, &radeon_connector->base,
- &radeon_dp_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
connector->interlace_allowed = true;
@@ -1970,8 +1975,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_init(dev, &radeon_connector->base,
- &radeon_dp_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
@@ -2018,8 +2025,10 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_init(dev, &radeon_connector->base,
- &radeon_lvds_bridge_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_lvds_bridge_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
@@ -2033,13 +2042,18 @@ radeon_add_atom_connector(struct drm_device *dev,
} else {
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
- drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_vga_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
@@ -2058,13 +2072,18 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
- drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_vga_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
@@ -2089,13 +2108,18 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dvi_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
@@ -2146,13 +2170,18 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dvi_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
@@ -2196,15 +2225,20 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &radeon_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
@@ -2246,15 +2280,20 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &radeon_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_edp_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -2265,7 +2304,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_tv_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -2285,13 +2327,18 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_lvds_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -2335,6 +2382,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
+ struct i2c_adapter *ddc = NULL;
uint32_t subpixel_order = SubPixelNone;
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -2369,13 +2417,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
- drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_vga_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
@@ -2386,13 +2439,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
- drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_vga_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
@@ -2404,13 +2462,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_dvi_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -2427,7 +2490,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_tv_connector_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
/* RS400,RC410,RS480 chipset seems to report a lot
@@ -2449,13 +2515,18 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
- drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &radeon_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &radeon_connector->base,
+ &radeon_lvds_connector_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7b5460678382..0d0ab8e0ff3b 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -26,11 +26,11 @@
*/
#include <linux/list_sort.h>
+#include <linux/pci.h>
#include <linux/uaccess.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5d017f0aec66..a522e092038b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -28,6 +28,7 @@
#include <linux/console.h>
#include <linux/efi.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
@@ -38,7 +39,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index e81b01f8db90..856526cb2caf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -24,6 +24,7 @@
* Alex Deucher
*/
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/gcd.h>
@@ -36,7 +37,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_pci.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -847,11 +847,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
if (rdev->bios) {
if (rdev->is_atom_bios) {
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
- if (ret == false)
+ if (!ret)
ret = radeon_get_atom_connector_info_from_object_table(dev);
} else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
- if (ret == false)
+ if (!ret)
ret = radeon_get_legacy_connector_info_from_table(dev);
}
} else {
@@ -1687,7 +1687,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_encoder *radeon_encoder;
struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
bool first = true;
u32 src_v = 1, dst_v = 1;
u32 src_h = 1, dst_h = 1;
@@ -1700,7 +1699,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
continue;
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- radeon_connector = to_radeon_connector(connector);
if (first) {
/* set scaling */
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index ee28f5b3785e..28eef9282874 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -518,7 +518,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
mst_enc = radeon_encoder->enc_priv;
- mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+ mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp, false);
mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index a0c99087034a..ced022fae19d 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -24,9 +24,10 @@
* Alex Deucher
*/
+#include <linux/pci.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 2c564f4f3468..ec0b7d6c994d 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -25,6 +25,7 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
@@ -33,7 +34,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -73,7 +73,7 @@ radeonfb_release(struct fb_info *info, int user)
return 0;
}
-static struct fb_ops radeonfb_ops = {
+static const struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = radeonfb_open,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d4d3778d0a98..f178ba321715 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -26,9 +26,9 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 67298a0739cb..068c3e5da173 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -26,10 +26,11 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index d465a3de7732..545e31e6cc3a 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -25,10 +25,10 @@
*/
#include <linux/export.h>
+#include <linux/pci.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index d9613638f9cc..b86bc88ad430 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,12 +26,12 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_irq.h>
-#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index e85c554eeaa9..d24f23a81656 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -34,7 +35,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index ef100b790463..44d060f75318 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -25,11 +25,11 @@
*/
#include <linux/backlight.h>
+#include <linux/pci.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_util.h>
#include <drm/radeon_drm.h>
@@ -1712,7 +1712,7 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
else
ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
+ if (!ret)
radeon_legacy_get_tmds_info_from_table(encoder, tmds);
return tmds;
@@ -1735,7 +1735,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
+ if (!ret)
radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
return tmds;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index f132eec737ad..d9df7f311e76 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -537,7 +537,7 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
- uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
+ uint32_t tv_pll_cntl, tv_ftotal;
uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
uint32_t m, n, p;
const uint16_t *hor_timing;
@@ -709,12 +709,6 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
(((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) |
((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
- tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) |
- ((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) |
- ((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) |
- RADEON_TVCLK_SRC_SEL_TVPLL |
- RADEON_TVPLL_TEST_DIS);
-
tv_dac->tv.tv_uv_adr = 0xc8;
if (tv_dac->tv_std == TV_STD_NTSC ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 5d10e11a9225..8c5d6fda0d75 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,10 +23,10 @@
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
+#include <linux/pci.h>
#include <linux/power_supply.h>
#include <drm/drm_debugfs.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include "atom.h"
@@ -1789,7 +1789,7 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
u32 stat_crtc = 0;
bool in_vbl = radeon_pm_in_vbl(rdev);
- if (in_vbl == false)
+ if (!in_vbl)
DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
finish ? "exit" : "entry");
return in_vbl;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 098bc9f40b98..3b92311d30b9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -32,6 +32,7 @@
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -41,7 +42,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_prime.h>
#include <drm/radeon_drm.h>
#include <drm/ttm/ttm_bo_api.h>
@@ -443,7 +443,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
mem->bus.size);
else
mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
+ ioremap(mem->bus.base + mem->bus.offset,
mem->bus.size);
if (!mem->bus.addr)
return -ENOMEM;
@@ -881,9 +881,6 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
man->size = size >> PAGE_SHIFT;
}
-static struct vm_operations_struct radeon_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops = NULL;
-
static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
@@ -891,34 +888,36 @@ static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
vm_fault_t ret;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL) {
+ if (bo == NULL)
return VM_FAULT_NOPAGE;
- }
+
rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
- ret = ttm_vm_ops->fault(vmf);
+ ret = ttm_bo_vm_fault(vmf);
up_read(&rdev->pm.mclk_lock);
return ret;
}
+static struct vm_operations_struct radeon_ttm_vm_ops = {
+ .fault = radeon_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
int r;
struct drm_file *file_priv = filp->private_data;
struct radeon_device *rdev = file_priv->minor->dev->dev_private;
- if (rdev == NULL) {
+ if (rdev == NULL)
return -EINVAL;
- }
+
r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
- if (unlikely(r != 0)) {
+ if (unlikely(r != 0))
return r;
- }
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- radeon_ttm_vm_ops = *ttm_vm_ops;
- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
- }
+
vma->vm_ops = &radeon_ttm_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 59db54ace428..5e8006444704 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -388,9 +388,9 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
ib.ptr[i] = cpu_to_le32(0x0);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
- if (r) {
+ if (r)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- }
+
if (fence)
*fence = radeon_fence_ref(ib.fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e0ad547786e8..f60fae0aed11 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -296,9 +296,9 @@ struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
+ if (bo_va->vm == vm)
return bo_va;
- }
+
}
return NULL;
}
@@ -323,9 +323,9 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_bo_va *bo_va;
bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (bo_va == NULL) {
+ if (bo_va == NULL)
return NULL;
- }
+
bo_va->vm = vm;
bo_va->bo = bo;
bo_va->it.start = 0;
@@ -947,9 +947,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
if (mem) {
addr = (u64)mem->start << PAGE_SHIFT;
- if (mem->mem_type != TTM_PL_SYSTEM) {
+ if (mem->mem_type != TTM_PL_SYSTEM)
bo_va->flags |= RADEON_VM_PAGE_VALID;
- }
+
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
@@ -1233,9 +1233,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root))
dev_err(rdev->dev, "still active bo inside vm\n");
- }
+
rbtree_postorder_for_each_entry_safe(bo_va, tmp,
&vm->va.rb_root, it.rb) {
interval_tree_remove(&bo_va->it, &vm->va);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 2f8ff089f7b1..c88b4906f7bc 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -37,9 +37,9 @@
*/
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 267d8a9134c8..c296f94f9700 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -26,7 +26,7 @@
* Jerome Glisse
*/
-#include <drm/drm_pci.h>
+#include <linux/pci.h>
#include "atom.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 72dbf3251c53..17390074277a 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -22,10 +22,9 @@
* Authors: Alex Deucher
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "r600_dpm.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 7a6fc66d6a40..21f653ae1e1b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -27,10 +27,10 @@
*/
#include <linux/firmware.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#include "atom.h"
@@ -1703,7 +1703,7 @@ static void rv770_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d7eea75b2c27..93dcab548a83 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -23,10 +23,10 @@
*/
#include <linux/firmware.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -6472,7 +6472,7 @@ static void si_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -6539,7 +6539,7 @@ static void si_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a0b382a637a6..05e8b4d0af3f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -22,10 +22,9 @@
*/
#include <linux/math64.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "atom.h"
#include "r600_dpm.h"
#include "radeon.h"
@@ -3640,14 +3639,13 @@ static int si_notify_smc_display_change(struct radeon_device *rdev,
static void si_program_response_times(struct radeon_device *rdev)
{
- u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+ u32 voltage_response_time, acpi_delay_time, vbi_time_out;
u32 vddc_dly, acpi_dly, vbi_dly;
u32 reference_clock;
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
- backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
@@ -5900,7 +5898,7 @@ static int si_patch_single_dependency_table_based_on_leakage(struct radeon_devic
static int si_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
{
- int ret = 0;
+ int ret;
ret = si_patch_single_dependency_table_based_on_leakage(rdev,
&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 65302f9d025e..4d93b84aa739 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -21,10 +21,9 @@
*
*/
+#include <linux/pci.h>
#include <linux/seq_file.h>
-#include <drm/drm_pci.h>
-
#include "r600_dpm.h"
#include "radeon.h"
#include "radeon_asic.h"
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 1529849e217e..0919f1f159a4 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
depends on DRM && OF
depends on ARM || ARM64
depends on ARCH_RENESAS || COMPILE_TEST
+ imply DRM_RCAR_CMM
imply DRM_RCAR_LVDS
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
@@ -13,6 +14,13 @@ config DRM_RCAR_DU
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
+config DRM_RCAR_CMM
+ tristate "R-Car DU Color Management Module (CMM) Support"
+ depends on DRM && OF
+ depends on DRM_RCAR_DU
+ help
+ Enable support for R-Car Color Management Module (CMM).
+
config DRM_RCAR_DW_HDMI
tristate "R-Car DU Gen3 HDMI Encoder Support"
depends on DRM && OF
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 6c2ed9c46467..4d1187ccc3e5 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -15,6 +15,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o
+obj-$(CONFIG_DRM_RCAR_CMM) += rcar_cmm.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
new file mode 100644
index 000000000000..c578095b09a5
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rcar_cmm.c -- R-Car Display Unit Color Management Module
+ *
+ * Copyright (C) 2019 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_color_mgmt.h>
+
+#include "rcar_cmm.h"
+
+#define CM2_LUT_CTRL 0x0000
+#define CM2_LUT_CTRL_LUT_EN BIT(0)
+#define CM2_LUT_TBL_BASE 0x0600
+#define CM2_LUT_TBL(__i) (CM2_LUT_TBL_BASE + (__i) * 4)
+
+struct rcar_cmm {
+ void __iomem *base;
+
+ /*
+ * @lut: 1D-LUT state
+ * @lut.enabled: 1D-LUT enabled flag
+ */
+ struct {
+ bool enabled;
+ } lut;
+};
+
+static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg)
+{
+ return ioread32(rcmm->base + reg);
+}
+
+static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data)
+{
+ iowrite32(data, rcmm->base + reg);
+}
+
+/*
+ * rcar_cmm_lut_write() - Scale the DRM LUT table entries to hardware precision
+ * and write to the CMM registers
+ * @rcmm: Pointer to the CMM device
+ * @drm_lut: Pointer to the DRM LUT table
+ */
+static void rcar_cmm_lut_write(struct rcar_cmm *rcmm,
+ const struct drm_color_lut *drm_lut)
+{
+ unsigned int i;
+
+ for (i = 0; i < CM2_LUT_SIZE; ++i) {
+ u32 entry = drm_color_lut_extract(drm_lut[i].red, 8) << 16
+ | drm_color_lut_extract(drm_lut[i].green, 8) << 8
+ | drm_color_lut_extract(drm_lut[i].blue, 8);
+
+ rcar_cmm_write(rcmm, CM2_LUT_TBL(i), entry);
+ }
+}
+
+/*
+ * rcar_cmm_setup() - Configure the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ * @config: The CMM unit configuration
+ *
+ * Configure the CMM unit with the given configuration. Currently enabling,
+ * disabling and programming of the 1-D LUT unit is supported.
+ *
+ * As rcar_cmm_setup() accesses the CMM registers the unit should be powered
+ * and its functional clock enabled. To guarantee this, before any call to
+ * this function is made, the CMM unit has to be enabled by calling
+ * rcar_cmm_enable() first.
+ *
+ * TODO: Add support for LUT double buffer operations to avoid updating the
+ * LUT table entries while a frame is being displayed.
+ */
+int rcar_cmm_setup(struct platform_device *pdev,
+ const struct rcar_cmm_config *config)
+{
+ struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+ /* Disable LUT if no table is provided. */
+ if (!config->lut.table) {
+ if (rcmm->lut.enabled) {
+ rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0);
+ rcmm->lut.enabled = false;
+ }
+
+ return 0;
+ }
+
+ /* Enable LUT and program the new gamma table values. */
+ if (!rcmm->lut.enabled) {
+ rcar_cmm_write(rcmm, CM2_LUT_CTRL, CM2_LUT_CTRL_LUT_EN);
+ rcmm->lut.enabled = true;
+ }
+
+ rcar_cmm_lut_write(rcmm, config->lut.table);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_setup);
+
+/*
+ * rcar_cmm_enable() - Enable the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * When the output of the corresponding DU channel is routed to the CMM unit,
+ * the unit shall be enabled before the DU channel is started, and remain
+ * enabled until the channel is stopped. The CMM unit shall be disabled with
+ * rcar_cmm_disable().
+ *
+ * Calls to rcar_cmm_enable() and rcar_cmm_disable() are not reference-counted.
+ * It is an error to attempt to enable an already enabled CMM unit, or to
+ * attempt to disable a disabled unit.
+ */
+int rcar_cmm_enable(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_enable);
+
+/*
+ * rcar_cmm_disable() - Disable the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * See rcar_cmm_enable() for usage information.
+ *
+ * Disabling the CMM unit disable all the internal processing blocks. The CMM
+ * state shall thus be restored with rcar_cmm_setup() when re-enabling the CMM
+ * unit after the next rcar_cmm_enable() call.
+ */
+void rcar_cmm_disable(struct platform_device *pdev)
+{
+ struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+ rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0);
+ rcmm->lut.enabled = false;
+
+ pm_runtime_put(&pdev->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_disable);
+
+/*
+ * rcar_cmm_init() - Initialize the CMM unit
+ * @pdev: The platform device associated with the CMM instance
+ *
+ * Return: 0 on success, -EPROBE_DEFER if the CMM is not available yet,
+ * -ENODEV if the DRM_RCAR_CMM config option is disabled
+ */
+int rcar_cmm_init(struct platform_device *pdev)
+{
+ struct rcar_cmm *rcmm = platform_get_drvdata(pdev);
+
+ if (!rcmm)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_cmm_init);
+
+static int rcar_cmm_probe(struct platform_device *pdev)
+{
+ struct rcar_cmm *rcmm;
+
+ rcmm = devm_kzalloc(&pdev->dev, sizeof(*rcmm), GFP_KERNEL);
+ if (!rcmm)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, rcmm);
+
+ rcmm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rcmm->base))
+ return PTR_ERR(rcmm->base);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+}
+
+static int rcar_cmm_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_cmm_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-cmm", },
+ { .compatible = "renesas,rcar-gen2-cmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rcar_cmm_of_table);
+
+static struct platform_driver rcar_cmm_platform_driver = {
+ .probe = rcar_cmm_probe,
+ .remove = rcar_cmm_remove,
+ .driver = {
+ .name = "rcar-cmm",
+ .of_match_table = rcar_cmm_of_table,
+ },
+};
+
+module_platform_driver(rcar_cmm_platform_driver);
+
+MODULE_AUTHOR("Jacopo Mondi <jacopo+renesas@jmondi.org>");
+MODULE_DESCRIPTION("Renesas R-Car CMM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.h b/drivers/gpu/drm/rcar-du/rcar_cmm.h
new file mode 100644
index 000000000000..b5f7ec6db04a
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rcar_cmm.h -- R-Car Display Unit Color Management Module
+ *
+ * Copyright (C) 2019 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#ifndef __RCAR_CMM_H__
+#define __RCAR_CMM_H__
+
+#define CM2_LUT_SIZE 256
+
+struct drm_color_lut;
+struct platform_device;
+
+/**
+ * struct rcar_cmm_config - CMM configuration
+ *
+ * @lut: 1D-LUT configuration
+ * @lut.table: 1D-LUT table entries. Disable LUT operations when NULL
+ */
+struct rcar_cmm_config {
+ struct {
+ struct drm_color_lut *table;
+ } lut;
+};
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_CMM)
+int rcar_cmm_init(struct platform_device *pdev);
+
+int rcar_cmm_enable(struct platform_device *pdev);
+void rcar_cmm_disable(struct platform_device *pdev);
+
+int rcar_cmm_setup(struct platform_device *pdev,
+ const struct rcar_cmm_config *config);
+#else
+static inline int rcar_cmm_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline int rcar_cmm_enable(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static inline void rcar_cmm_disable(struct platform_device *pdev)
+{
+}
+
+static inline int rcar_cmm_setup(struct platform_device *pdev,
+ const struct rcar_cmm_config *config)
+{
+ return 0;
+}
+#endif /* IS_ENABLED(CONFIG_DRM_RCAR_CMM) */
+
+#endif /* __RCAR_CMM_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 2da46e3dc4ae..d73e88ddecd0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
@@ -21,6 +22,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
+#include "rcar_cmm.h"
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -475,6 +477,45 @@ static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
}
/* -----------------------------------------------------------------------------
+ * Color Management Module (CMM)
+ */
+
+static int rcar_du_cmm_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_property_blob *drm_lut = state->gamma_lut;
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct device *dev = rcrtc->dev->dev;
+
+ if (!drm_lut)
+ return 0;
+
+ /* We only accept fully populated LUT tables. */
+ if (drm_color_lut_size(drm_lut) != CM2_LUT_SIZE) {
+ dev_err(dev, "invalid gamma lut size: %zu bytes\n",
+ drm_lut->length);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rcar_du_cmm_setup(struct drm_crtc *crtc)
+{
+ struct drm_property_blob *drm_lut = crtc->state->gamma_lut;
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct rcar_cmm_config cmm_config = {};
+
+ if (!rcrtc->cmm)
+ return;
+
+ if (drm_lut)
+ cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data;
+
+ rcar_cmm_setup(rcrtc->cmm, &cmm_config);
+}
+
+/* -----------------------------------------------------------------------------
* Start/Stop and Suspend/Resume
*/
@@ -619,6 +660,9 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
+ if (rcrtc->cmm)
+ rcar_cmm_disable(rcrtc->cmm);
+
/*
* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
@@ -642,6 +686,11 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
{
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
struct drm_encoder *encoder;
+ int ret;
+
+ ret = rcar_du_cmm_check(crtc, state);
+ if (ret)
+ return ret;
/* Store the routes from the CRTC output to the DU outputs. */
rstate->outputs = 0;
@@ -667,6 +716,8 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
struct rcar_du_device *rcdu = rcrtc->dev;
+ if (rcrtc->cmm)
+ rcar_cmm_enable(rcrtc->cmm);
rcar_du_crtc_get(rcrtc);
/*
@@ -680,12 +731,20 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
+ struct drm_bridge *bridge;
- rcar_lvds_clk_enable(encoder->base.bridge,
- mode->clock * 1000);
+ bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+ rcar_lvds_clk_enable(bridge, mode->clock * 1000);
}
rcar_du_crtc_start(rcrtc);
+
+ /*
+ * TODO: The chip manual indicates that CMM tables should be written
+ * after the DU channel has been activated. Investigate the impact
+ * of this restriction on the first displayed frame.
+ */
+ rcar_du_cmm_setup(crtc);
}
static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -702,12 +761,14 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
struct rcar_du_encoder *encoder =
rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+ struct drm_bridge *bridge;
/*
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable().
*/
- rcar_lvds_clk_disable(encoder->base.bridge);
+ bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+ rcar_lvds_clk_disable(bridge);
}
spin_lock_irq(&crtc->dev->event_lock);
@@ -739,6 +800,10 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
*/
rcar_du_crtc_get(rcrtc);
+ /* If the active state changed, we let .atomic_enable handle CMM. */
+ if (crtc->state->color_mgmt_changed && !crtc->state->active_changed)
+ rcar_du_cmm_setup(crtc);
+
if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
}
@@ -1075,6 +1140,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.set_crc_source = rcar_du_crtc_set_crc_source,
.verify_crc_source = rcar_du_crtc_verify_crc_source,
.get_crc_sources = rcar_du_crtc_get_crc_sources,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/* -----------------------------------------------------------------------------
@@ -1194,6 +1260,15 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
if (ret < 0)
return ret;
+ /* CMM might be disabled for this CRTC. */
+ if (rcdu->cmms[swindex]) {
+ rcrtc->cmm = rcdu->cmms[swindex];
+ rgrp->cmms_mask |= BIT(hwindex % 2);
+
+ drm_mode_crtc_set_gamma_size(crtc, CM2_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, CM2_LUT_SIZE);
+ }
+
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
/* Start with vertical blanking interrupt reporting disabled. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 3b7fc668996f..5f2940c42225 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -39,6 +39,7 @@ struct rcar_du_vsp;
* @vblank_wait: wait queue used to signal vertical blanking
* @vblank_count: number of vertical blanking interrupts to wait for
* @group: CRTC group this CRTC belongs to
+ * @cmm: CMM associated with this CRTC
* @vsp: VSP feeding video to this CRTC
* @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
* @writeback: the writeback connector
@@ -64,6 +65,7 @@ struct rcar_du_crtc {
unsigned int vblank_count;
struct rcar_du_group *group;
+ struct platform_device *cmm;
struct rcar_du_vsp *vsp;
unsigned int vsp_pipe;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index f266c17b907a..654e2dd08146 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -399,7 +399,10 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = {
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(0),
.routes = {
- /* R8A77970 has one RGB output and one LVDS output. */
+ /*
+ * R8A77970 and R8A77980 have one RGB output and one LVDS
+ * output.
+ */
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.port = 0,
@@ -457,6 +460,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
{ .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info },
{ .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info },
+ { .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info },
{ .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
{ }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 1327cd0df90a..61504c54e2ec 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/wait.h>
+#include "rcar_cmm.h"
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
#include "rcar_du_vsp.h"
@@ -85,6 +86,7 @@ struct rcar_du_device {
struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
+ struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
struct {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 9eee47969e77..88a783ceb3e9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -135,6 +135,7 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
static void rcar_du_group_setup(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
+ u32 defr7 = DEFR7_CODE;
/* Enable extended features */
rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
@@ -147,6 +148,15 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
rcar_du_group_setup_pins(rgrp);
+ /*
+ * TODO: Handle routing of the DU output to CMM dynamically, as we
+ * should bypass CMM completely when no color management feature is
+ * used.
+ */
+ defr7 |= (rgrp->cmms_mask & BIT(1) ? DEFR7_CMME1 : 0) |
+ (rgrp->cmms_mask & BIT(0) ? DEFR7_CMME0 : 0);
+ rcar_du_group_write(rgrp, DEFR7, defr7);
+
if (rcdu->info->gen >= 2) {
rcar_du_group_setup_defr8(rgrp);
rcar_du_group_setup_didsr(rgrp);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 87950c1f6a52..e9906609c635 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -22,6 +22,7 @@ struct rcar_du_device;
* @mmio_offset: registers offset in the device memory map
* @index: group index
* @channels_mask: bitmask of populated DU channels in this group
+ * @cmms_mask: bitmask of available CMMs in this group
* @num_crtcs: number of CRTCs in this group (1 or 2)
* @use_count: number of users of the group (rcar_du_group_(get|put))
* @used_crtcs: number of CRTCs currently in use
@@ -37,6 +38,7 @@ struct rcar_du_group {
unsigned int index;
unsigned int channels_mask;
+ unsigned int cmms_mask;
unsigned int num_crtcs;
unsigned int use_count;
unsigned int used_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 0d59f390de19..fcfd916227d1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -17,7 +17,9 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <linux/device.h>
#include <linux/of_graph.h>
+#include <linux/of_platform.h>
#include <linux/wait.h>
#include "rcar_du_crtc.h"
@@ -542,6 +544,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
{
const struct device_node *np = rcdu->dev->of_node;
+ const char *vsps_prop_name = "renesas,vsps";
struct of_phandle_args args;
struct {
struct device_node *np;
@@ -557,15 +560,21 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
* entry contains a pointer to the VSP DT node and a bitmask of the
* connected DU CRTCs.
*/
- cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1;
+ ret = of_property_count_u32_elems(np, vsps_prop_name);
+ if (ret < 0) {
+ /* Backward compatibility with old DTBs. */
+ vsps_prop_name = "vsps";
+ ret = of_property_count_u32_elems(np, vsps_prop_name);
+ }
+ cells = ret / rcdu->num_crtcs - 1;
if (cells > 1)
return -EINVAL;
for (i = 0; i < rcdu->num_crtcs; ++i) {
unsigned int j;
- ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i,
- &args);
+ ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
+ cells, i, &args);
if (ret < 0)
goto error;
@@ -587,8 +596,8 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
/*
* Store the VSP pointer and pipe index in the CRTC. If the
- * second cell of the 'vsps' specifier isn't present, default
- * to 0 to remain compatible with older DT bindings.
+ * second cell of the 'renesas,vsps' specifier isn't present,
+ * default to 0 to remain compatible with older DT bindings.
*/
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
@@ -618,6 +627,75 @@ error:
return ret;
}
+static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
+{
+ const struct device_node *np = rcdu->dev->of_node;
+ unsigned int i;
+ int cells;
+
+ cells = of_property_count_u32_elems(np, "renesas,cmms");
+ if (cells == -EINVAL)
+ return 0;
+
+ if (cells > rcdu->num_crtcs) {
+ dev_err(rcdu->dev,
+ "Invalid number of entries in 'renesas,cmms'\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cells; ++i) {
+ struct platform_device *pdev;
+ struct device_link *link;
+ struct device_node *cmm;
+ int ret;
+
+ cmm = of_parse_phandle(np, "renesas,cmms", i);
+ if (IS_ERR(cmm)) {
+ dev_err(rcdu->dev,
+ "Failed to parse 'renesas,cmms' property\n");
+ return PTR_ERR(cmm);
+ }
+
+ if (!of_device_is_available(cmm)) {
+ /* It's fine to have a phandle to a non-enabled CMM. */
+ of_node_put(cmm);
+ continue;
+ }
+
+ pdev = of_find_device_by_node(cmm);
+ if (IS_ERR(pdev)) {
+ dev_err(rcdu->dev, "No device found for CMM%u\n", i);
+ of_node_put(cmm);
+ return PTR_ERR(pdev);
+ }
+
+ of_node_put(cmm);
+
+ /*
+ * -ENODEV is used to report that the CMM config option is
+ * disabled: return 0 and let the DU continue probing.
+ */
+ ret = rcar_cmm_init(pdev);
+ if (ret)
+ return ret == -ENODEV ? 0 : ret;
+
+ /*
+ * Enforce suspend/resume ordering by making the CMM a provider
+ * of the DU: CMM is suspended after and resumed before the DU.
+ */
+ link = device_link_add(rcdu->dev, &pdev->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(rcdu->dev,
+ "Failed to create device link to CMM%u\n", i);
+ return -EINVAL;
+ }
+
+ rcdu->cmms[i] = pdev;
+ }
+
+ return 0;
+}
+
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
static const unsigned int mmio_offsets[] = {
@@ -708,6 +786,11 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
return ret;
}
+ /* Initialize the Color Management Modules. */
+ ret = rcar_du_cmm_init(rcdu);
+ if (ret)
+ return ret;
+
/* Create the CRTCs. */
for (swindex = 0, hwindex = 0; swindex < rcdu->num_crtcs; ++hwindex) {
struct rcar_du_group *rgrp;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index bc87f080b170..fb9964949368 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -197,6 +197,11 @@
#define DEFR6_MLOS1 (1 << 2)
#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1)
+#define DEFR7 0x000ec
+#define DEFR7_CODE (0x7779 << 16)
+#define DEFR7_CMME1 BIT(6)
+#define DEFR7_CMME0 BIT(4)
+
/* -----------------------------------------------------------------------------
* R8A7790-only Control Registers
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 8c6c172bbf2e..8ffa4fbbdeb3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -36,6 +37,12 @@ enum rcar_lvds_mode {
RCAR_LVDS_MODE_VESA = 4,
};
+enum rcar_lvds_link_type {
+ RCAR_LVDS_SINGLE_LINK = 0,
+ RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 1,
+ RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 2,
+};
+
#define RCAR_LVDS_QUIRK_LANES BIT(0) /* LVDS lanes 1 and 3 inverted */
#define RCAR_LVDS_QUIRK_GEN3_LVEN BIT(1) /* LVEN bit needs to be set on R8A77970/R8A7799x */
#define RCAR_LVDS_QUIRK_PWD BIT(2) /* PWD bit available (all of Gen3 but E3) */
@@ -65,11 +72,8 @@ struct rcar_lvds {
struct clk *dotclkin[2]; /* External DU clocks */
} clocks;
- struct drm_display_mode display_mode;
- enum rcar_lvds_mode mode;
-
struct drm_bridge *companion;
- bool dual_link;
+ enum rcar_lvds_link_type link_type;
};
#define bridge_to_rcar_lvds(b) \
@@ -91,7 +95,7 @@ static int rcar_lvds_connector_get_modes(struct drm_connector *connector)
{
struct rcar_lvds *lvds = connector_to_rcar_lvds(connector);
- return drm_panel_get_modes(lvds->panel);
+ return drm_panel_get_modes(lvds->panel, connector);
}
static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
@@ -402,10 +406,53 @@ EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
* Bridge
*/
-static void rcar_lvds_enable(struct drm_bridge *bridge)
+static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds,
+ const struct drm_connector *connector)
+{
+ const struct drm_display_info *info;
+ enum rcar_lvds_mode mode;
+
+ /*
+ * There is no API yet to retrieve LVDS mode from a bridge, only panels
+ * are supported.
+ */
+ if (!lvds->panel)
+ return RCAR_LVDS_MODE_JEIDA;
+
+ info = &connector->display_info;
+ if (!info->num_bus_formats || !info->bus_formats) {
+ dev_warn(lvds->dev,
+ "no LVDS bus format reported, using JEIDA\n");
+ return RCAR_LVDS_MODE_JEIDA;
+ }
+
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ mode = RCAR_LVDS_MODE_JEIDA;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ mode = RCAR_LVDS_MODE_VESA;
+ break;
+ default:
+ dev_warn(lvds->dev,
+ "unsupported LVDS bus format 0x%04x, using JEIDA\n",
+ info->bus_formats[0]);
+ return RCAR_LVDS_MODE_JEIDA;
+ }
+
+ if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
+ mode |= RCAR_LVDS_MODE_MIRROR;
+
+ return mode;
+}
+
+static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
- const struct drm_display_mode *mode = &lvds->display_mode;
u32 lvdhcr;
u32 lvdcr0;
int ret;
@@ -415,8 +462,9 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
return;
/* Enable the companion LVDS encoder in dual-link mode. */
- if (lvds->dual_link && lvds->companion)
- lvds->companion->funcs->enable(lvds->companion);
+ if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
+ __rcar_lvds_atomic_enable(lvds->companion, state, crtc,
+ connector);
/*
* Hardcode the channels and control signals routing for now.
@@ -440,30 +488,51 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) {
- /*
- * Configure vertical stripe based on the mode of operation of
- * the connected device.
- */
- rcar_lvds_write(lvds, LVDSTRIPE,
- lvds->dual_link ? LVDSTRIPE_ST_ON : 0);
+ u32 lvdstripe = 0;
+
+ if (lvds->link_type != RCAR_LVDS_SINGLE_LINK) {
+ /*
+ * By default we generate even pixels from the primary
+ * encoder and odd pixels from the companion encoder.
+ * Swap pixels around if the sink requires odd pixels
+ * from the primary encoder and even pixels from the
+ * companion encoder.
+ */
+ bool swap_pixels = lvds->link_type ==
+ RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+
+ /*
+ * Configure vertical stripe since we are dealing with
+ * an LVDS dual-link connection.
+ *
+ * ST_SWAP is reserved for the companion encoder, only
+ * set it in the primary encoder.
+ */
+ lvdstripe = LVDSTRIPE_ST_ON
+ | (lvds->companion && swap_pixels ?
+ LVDSTRIPE_ST_SWAP : 0);
+ }
+ rcar_lvds_write(lvds, LVDSTRIPE, lvdstripe);
}
/*
* PLL clock configuration on all instances but the companion in
* dual-link mode.
*/
- if (!lvds->dual_link || lvds->companion)
+ if (lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) {
+ const struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_display_mode *mode =
+ &crtc_state->adjusted_mode;
+
lvds->info->pll_setup(lvds, mode->clock * 1000);
+ }
/* Set the LVDS mode and select the input. */
- lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
+ lvdcr0 = rcar_lvds_get_lvds_mode(lvds, connector) << LVDCR0_LVMD_SHIFT;
if (lvds->bridge.encoder) {
- /*
- * FIXME: We should really retrieve the CRTC through the state,
- * but how do we get a state pointer?
- */
- if (drm_crtc_index(lvds->bridge.encoder->crtc) == 2)
+ if (drm_crtc_index(crtc) == 2)
lvdcr0 |= LVDCR0_DUSEL;
}
@@ -520,7 +589,21 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
}
}
-static void rcar_lvds_disable(struct drm_bridge *bridge)
+static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+
+ __rcar_lvds_atomic_enable(bridge, state, crtc, connector);
+}
+
+static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -534,8 +617,8 @@ static void rcar_lvds_disable(struct drm_bridge *bridge)
rcar_lvds_write(lvds, LVDPLLCR, 0);
/* Disable the companion LVDS encoder in dual-link mode. */
- if (lvds->dual_link && lvds->companion)
- lvds->companion->funcs->disable(lvds->companion);
+ if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
+ lvds->companion->funcs->atomic_disable(lvds->companion, state);
clk_disable_unprepare(lvds->clocks.mod);
}
@@ -558,54 +641,6 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
return true;
}
-static void rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds)
-{
- struct drm_display_info *info = &lvds->connector.display_info;
- enum rcar_lvds_mode mode;
-
- /*
- * There is no API yet to retrieve LVDS mode from a bridge, only panels
- * are supported.
- */
- if (!lvds->panel)
- return;
-
- if (!info->num_bus_formats || !info->bus_formats) {
- dev_err(lvds->dev, "no LVDS bus format reported\n");
- return;
- }
-
- switch (info->bus_formats[0]) {
- case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
- case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
- mode = RCAR_LVDS_MODE_JEIDA;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
- mode = RCAR_LVDS_MODE_VESA;
- break;
- default:
- dev_err(lvds->dev, "unsupported LVDS bus format 0x%04x\n",
- info->bus_formats[0]);
- return;
- }
-
- if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
- mode |= RCAR_LVDS_MODE_MIRROR;
-
- lvds->mode = mode;
-}
-
-static void rcar_lvds_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
-
- lvds->display_mode = *adjusted_mode;
-
- rcar_lvds_get_lvds_mode(lvds);
-}
-
static int rcar_lvds_attach(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -647,17 +682,16 @@ static void rcar_lvds_detach(struct drm_bridge *bridge)
static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
.attach = rcar_lvds_attach,
.detach = rcar_lvds_detach,
- .enable = rcar_lvds_enable,
- .disable = rcar_lvds_disable,
+ .atomic_enable = rcar_lvds_atomic_enable,
+ .atomic_disable = rcar_lvds_atomic_disable,
.mode_fixup = rcar_lvds_mode_fixup,
- .mode_set = rcar_lvds_mode_set,
};
bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
- return lvds->dual_link;
+ return lvds->link_type != RCAR_LVDS_SINGLE_LINK;
}
EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
@@ -669,7 +703,10 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
{
const struct of_device_id *match;
struct device_node *companion;
+ struct device_node *port0, *port1;
+ struct rcar_lvds *companion_lvds;
struct device *dev = lvds->dev;
+ int dual_link;
int ret = 0;
/* Locate the companion LVDS encoder for dual-link operation, if any. */
@@ -688,13 +725,68 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
goto done;
}
+ /*
+ * We need to work out if the sink is expecting us to function in
+ * dual-link mode. We do this by looking at the DT port nodes we are
+ * connected to, if they are marked as expecting even pixels and
+ * odd pixels than we need to enable vertical stripe output.
+ */
+ port0 = of_graph_get_port_by_id(dev->of_node, 1);
+ port1 = of_graph_get_port_by_id(companion, 1);
+ dual_link = drm_of_lvds_get_dual_link_pixel_order(port0, port1);
+ of_node_put(port0);
+ of_node_put(port1);
+
+ switch (dual_link) {
+ case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
+ lvds->link_type = RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+ break;
+ case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
+ lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS;
+ break;
+ default:
+ /*
+ * Early dual-link bridge specific implementations populate the
+ * timings field of drm_bridge. If the flag is set, we assume
+ * that we are expected to generate even pixels from the primary
+ * encoder, and odd pixels from the companion encoder.
+ */
+ if (lvds->next_bridge && lvds->next_bridge->timings &&
+ lvds->next_bridge->timings->dual_link)
+ lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS;
+ else
+ lvds->link_type = RCAR_LVDS_SINGLE_LINK;
+ }
+
+ if (lvds->link_type == RCAR_LVDS_SINGLE_LINK) {
+ dev_dbg(dev, "Single-link configuration detected\n");
+ goto done;
+ }
+
lvds->companion = of_drm_find_bridge(companion);
if (!lvds->companion) {
ret = -EPROBE_DEFER;
goto done;
}
- dev_dbg(dev, "Found companion encoder %pOF\n", companion);
+ dev_dbg(dev,
+ "Dual-link configuration detected (companion encoder %pOF)\n",
+ companion);
+
+ if (lvds->link_type == RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS)
+ dev_dbg(dev, "Data swapping required\n");
+
+ /*
+ * FIXME: We should not be messing with the companion encoder private
+ * data from the primary encoder, we should rather let the companion
+ * encoder work things out on its own. However, the companion encoder
+ * doesn't hold a reference to the primary encoder, and
+ * drm_of_lvds_get_dual_link_pixel_order needs to be given references
+ * to the output ports of both encoders, therefore leave it like this
+ * for the time being.
+ */
+ companion_lvds = bridge_to_rcar_lvds(lvds->companion);
+ companion_lvds->link_type = lvds->link_type;
done:
of_node_put(companion);
@@ -704,79 +796,17 @@ done:
static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
{
- struct device_node *local_output = NULL;
- struct device_node *remote_input = NULL;
- struct device_node *remote = NULL;
- struct device_node *node;
- bool is_bridge = false;
- int ret = 0;
-
- local_output = of_graph_get_endpoint_by_regs(lvds->dev->of_node, 1, 0);
- if (!local_output) {
- dev_dbg(lvds->dev, "unconnected port@1\n");
- ret = -ENODEV;
- goto done;
- }
-
- /*
- * Locate the connected entity and infer its type from the number of
- * endpoints.
- */
- remote = of_graph_get_remote_port_parent(local_output);
- if (!remote) {
- dev_dbg(lvds->dev, "unconnected endpoint %pOF\n", local_output);
- ret = -ENODEV;
- goto done;
- }
+ int ret;
- if (!of_device_is_available(remote)) {
- dev_dbg(lvds->dev, "connected entity %pOF is disabled\n",
- remote);
- ret = -ENODEV;
+ ret = drm_of_find_panel_or_bridge(lvds->dev->of_node, 1, 0,
+ &lvds->panel, &lvds->next_bridge);
+ if (ret)
goto done;
- }
-
- remote_input = of_graph_get_remote_endpoint(local_output);
- for_each_endpoint_of_node(remote, node) {
- if (node != remote_input) {
- /*
- * We've found one endpoint other than the input, this
- * must be a bridge.
- */
- is_bridge = true;
- of_node_put(node);
- break;
- }
- }
-
- if (is_bridge) {
- lvds->next_bridge = of_drm_find_bridge(remote);
- if (!lvds->next_bridge) {
- ret = -EPROBE_DEFER;
- goto done;
- }
-
- if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
- lvds->dual_link = lvds->next_bridge->timings
- ? lvds->next_bridge->timings->dual_link
- : false;
- } else {
- lvds->panel = of_drm_find_panel(remote);
- if (IS_ERR(lvds->panel)) {
- ret = PTR_ERR(lvds->panel);
- goto done;
- }
- }
-
- if (lvds->dual_link)
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
ret = rcar_lvds_parse_dt_companion(lvds);
done:
- of_node_put(local_output);
- of_node_put(remote_input);
- of_node_put(remote);
-
/*
* On D3/E3 the LVDS encoder provides a clock to the DU, which can be
* used for the DPAD output even when the LVDS output is not connected.
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 6f4222f8beeb..310aa1546893 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -28,17 +28,17 @@ config ROCKCHIP_ANALOGIX_DP
on RK3288 or RK3399 based SoC, you should select this option.
config ROCKCHIP_CDN_DP
- bool "Rockchip cdn DP"
+ bool "Rockchip cdn DP"
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
- help
+ help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
RK3399 based SoC, you should select this
option.
config ROCKCHIP_DW_HDMI
- bool "Rockchip specific extensions for Synopsys DW HDMI"
- help
+ bool "Rockchip specific extensions for Synopsys DW HDMI"
+ help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
enable HDMI on RK3288 or RK3399 based SoC, you should select
@@ -46,6 +46,7 @@ config ROCKCHIP_DW_HDMI
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
+ select GENERIC_PHY_MIPI_DPHY
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 83c4586665b4..81ac9b658a70 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -95,7 +95,7 @@ struct cdn_dp_device {
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
u8 max_lanes;
- u8 max_rate;
+ unsigned int max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index bc073ec5c183..6e1270e45f97 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -12,6 +12,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -139,6 +140,12 @@
#define DW_MIPI_NEEDS_PHY_CFG_CLK BIT(0)
#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
+#define PX30_GRF_PD_VO_CON1 0x0438
+#define PX30_DSI_FORCETXSTOPMODE (0xf << 7)
+#define PX30_DSI_FORCERXMODE BIT(6)
+#define PX30_DSI_TURNDISABLE BIT(5)
+#define PX30_DSI_LCDC_SEL BIT(0)
+
#define RK3288_GRF_SOC_CON6 0x025c
#define RK3288_DSI0_LCDC_SEL BIT(6)
#define RK3288_DSI1_LCDC_SEL BIT(9)
@@ -223,6 +230,10 @@ struct dw_mipi_dsi_rockchip {
bool is_slave;
struct dw_mipi_dsi_rockchip *slave;
+ /* optional external dphy */
+ struct phy *phy;
+ union phy_configure_opts phy_opts;
+
unsigned int lane_mbps; /* per lane */
u16 input_div;
u16 feedback_div;
@@ -359,6 +370,9 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
struct dw_mipi_dsi_rockchip *dsi = priv_data;
int ret, i, vco;
+ if (dsi->phy)
+ return 0;
+
/*
* Get vco from frequency(lane_mbps)
* vco frequency table
@@ -467,6 +481,28 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
return ret;
}
+static void dw_mipi_dsi_phy_power_on(void *priv_data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ int ret;
+
+ ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "failed to set phy mode: %d\n", ret);
+ return;
+ }
+
+ phy_configure(dsi->phy, &dsi->phy_opts);
+ phy_power_on(dsi->phy);
+}
+
+static void dw_mipi_dsi_phy_power_off(void *priv_data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+
+ phy_power_off(dsi->phy);
+}
+
static int
dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
@@ -504,6 +540,17 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
"DPHY clock frequency is out of range\n");
}
+ /* for external phy only a the mipi_dphy_config is necessary */
+ if (dsi->phy) {
+ phy_mipi_dphy_get_default_config(mode->clock * 1000 * 10 / 8,
+ bpp, lanes,
+ &dsi->phy_opts.mipi_dphy);
+ dsi->lane_mbps = target_mbps;
+ *lane_mbps = dsi->lane_mbps;
+
+ return 0;
+ }
+
fin = clk_get_rate(dsi->pllref_clk);
fout = target_mbps * USEC_PER_SEC;
@@ -559,9 +606,89 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
return 0;
}
+struct hstt {
+ unsigned int maxfreq;
+ struct dw_mipi_dsi_dphy_timing timing;
+};
+
+#define HSTT(_maxfreq, _c_lp2hs, _c_hs2lp, _d_lp2hs, _d_hs2lp) \
+{ \
+ .maxfreq = _maxfreq, \
+ .timing = { \
+ .clk_lp2hs = _c_lp2hs, \
+ .clk_hs2lp = _c_hs2lp, \
+ .data_lp2hs = _d_lp2hs, \
+ .data_hs2lp = _d_hs2lp, \
+ } \
+}
+
+/* Table A-3 High-Speed Transition Times */
+struct hstt hstt_table[] = {
+ HSTT( 90, 32, 20, 26, 13),
+ HSTT( 100, 35, 23, 28, 14),
+ HSTT( 110, 32, 22, 26, 13),
+ HSTT( 130, 31, 20, 27, 13),
+ HSTT( 140, 33, 22, 26, 14),
+ HSTT( 150, 33, 21, 26, 14),
+ HSTT( 170, 32, 20, 27, 13),
+ HSTT( 180, 36, 23, 30, 15),
+ HSTT( 200, 40, 22, 33, 15),
+ HSTT( 220, 40, 22, 33, 15),
+ HSTT( 240, 44, 24, 36, 16),
+ HSTT( 250, 48, 24, 38, 17),
+ HSTT( 270, 48, 24, 38, 17),
+ HSTT( 300, 50, 27, 41, 18),
+ HSTT( 330, 56, 28, 45, 18),
+ HSTT( 360, 59, 28, 48, 19),
+ HSTT( 400, 61, 30, 50, 20),
+ HSTT( 450, 67, 31, 55, 21),
+ HSTT( 500, 73, 31, 59, 22),
+ HSTT( 550, 79, 36, 63, 24),
+ HSTT( 600, 83, 37, 68, 25),
+ HSTT( 650, 90, 38, 73, 27),
+ HSTT( 700, 95, 40, 77, 28),
+ HSTT( 750, 102, 40, 84, 28),
+ HSTT( 800, 106, 42, 87, 30),
+ HSTT( 850, 113, 44, 93, 31),
+ HSTT( 900, 118, 47, 98, 32),
+ HSTT( 950, 124, 47, 102, 34),
+ HSTT(1000, 130, 49, 107, 35),
+ HSTT(1050, 135, 51, 111, 37),
+ HSTT(1100, 139, 51, 114, 38),
+ HSTT(1150, 146, 54, 120, 40),
+ HSTT(1200, 153, 57, 125, 41),
+ HSTT(1250, 158, 58, 130, 42),
+ HSTT(1300, 163, 58, 135, 44),
+ HSTT(1350, 168, 60, 140, 45),
+ HSTT(1400, 172, 64, 144, 47),
+ HSTT(1450, 176, 65, 148, 48),
+ HSTT(1500, 181, 66, 153, 50)
+};
+
+static int
+dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi_dphy_timing *timing)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hstt_table); i++)
+ if (lane_mbps < hstt_table[i].maxfreq)
+ break;
+
+ if (i == ARRAY_SIZE(hstt_table))
+ i--;
+
+ *timing = hstt_table[i].timing;
+
+ return 0;
+}
+
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
.init = dw_mipi_dsi_phy_init,
+ .power_on = dw_mipi_dsi_phy_power_on,
+ .power_off = dw_mipi_dsi_phy_power_off,
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+ .get_timing = dw_mipi_dsi_phy_get_timing,
};
static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
@@ -916,16 +1043,33 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
}
if (!dsi->cdata) {
- dev_err(dev, "no dsi-config for %s node\n", np->name);
+ DRM_DEV_ERROR(dev, "no dsi-config for %s node\n", np->name);
return -EINVAL;
}
+ /* try to get a possible external dphy */
+ dsi->phy = devm_phy_optional_get(dev, "dphy");
+ if (IS_ERR(dsi->phy)) {
+ ret = PTR_ERR(dsi->phy);
+ DRM_DEV_ERROR(dev, "failed to get mipi dphy: %d\n", ret);
+ return ret;
+ }
+
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
- ret = PTR_ERR(dsi->pllref_clk);
- DRM_DEV_ERROR(dev,
- "Unable to get pll reference clock: %d\n", ret);
- return ret;
+ if (dsi->phy) {
+ /*
+ * if external phy is present, pll will be
+ * generated there.
+ */
+ dsi->pllref_clk = NULL;
+ } else {
+ ret = PTR_ERR(dsi->pllref_clk);
+ DRM_DEV_ERROR(dev,
+ "Unable to get pll reference clock: %d\n",
+ ret);
+ return ret;
+ }
}
if (dsi->cdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
@@ -989,6 +1133,24 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
return 0;
}
+static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
+ {
+ .reg = 0xff450000,
+ .lcdsel_grf_reg = PX30_GRF_PD_VO_CON1,
+ .lcdsel_big = HIWORD_UPDATE(0, PX30_DSI_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(PX30_DSI_LCDC_SEL,
+ PX30_DSI_LCDC_SEL),
+
+ .lanecfg1_grf_reg = PX30_GRF_PD_VO_CON1,
+ .lanecfg1 = HIWORD_UPDATE(0, PX30_DSI_TURNDISABLE |
+ PX30_DSI_FORCERXMODE |
+ PX30_DSI_FORCETXSTOPMODE),
+
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
{
.reg = 0xff960000,
@@ -1057,6 +1219,9 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
{
+ .compatible = "rockchip,px30-mipi-dsi",
+ .data = &px30_chip_data,
+ }, {
.compatible = "rockchip,rk3288-mipi-dsi",
.data = &rk3288_chip_data,
}, {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index ed344a795b4d..e5864e823020 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -624,8 +624,10 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&inno_hdmi_connector_helper_funcs);
- drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm, &hdmi->connector,
+ &inno_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc);
drm_connector_attach_encoder(&hdmi->connector, encoder);
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index cdb401f4283d..fe203d38664e 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -564,9 +564,10 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&rk3066_hdmi_connector_helper_funcs);
- drm_connector_init(drm, &hdmi->connector,
- &rk3066_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm, &hdmi->connector,
+ &rk3066_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc);
drm_connector_attach_encoder(&hdmi->connector, encoder);
@@ -640,6 +641,9 @@ static int rk3066_hdmi_i2c_write(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
if (msgs->addr == DDC_ADDR)
hdmi->i2c->ddc_addr = msgs->buf[0];
+ /* Set edid fifo first address. */
+ hdmi_writeb(hdmi, HDMI_EDID_FIFO_ADDR, 0x00);
+
/* Set edid word address 0x00/0x80. */
hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index ca01234c037c..221e72e71432 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -53,64 +53,12 @@ rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cm
return fb;
}
-static struct drm_framebuffer *
-rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- const struct drm_format_info *info = drm_get_format_info(dev,
- mode_cmd);
- struct drm_framebuffer *fb;
- struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
- struct drm_gem_object *obj;
- int num_planes = min_t(int, info->num_planes, ROCKCHIP_MAX_FB_BUFFER);
- int ret;
- int i;
-
- for (i = 0; i < num_planes; i++) {
- unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
- unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
- unsigned int min_size;
-
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to lookup GEM object\n");
- ret = -ENXIO;
- goto err_gem_object_unreference;
- }
-
- min_size = (height - 1) * mode_cmd->pitches[i] +
- mode_cmd->offsets[i] +
- width * info->cpp[i];
-
- if (obj->size < min_size) {
- drm_gem_object_put_unlocked(obj);
- ret = -EINVAL;
- goto err_gem_object_unreference;
- }
- objs[i] = obj;
- }
-
- fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto err_gem_object_unreference;
- }
-
- return fb;
-
-err_gem_object_unreference:
- for (i--; i >= 0; i--)
- drm_gem_object_put_unlocked(objs[i]);
- return ERR_PTR(ret);
-}
-
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
- .fb_create = rockchip_user_fb_create,
+ .fb_create = drm_gem_fb_create_with_dirty,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 02be6c5ff857..521fe42ac5e2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -27,7 +27,7 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
}
-static struct fb_ops rockchip_drm_fbdev_ops = {
+static const struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = rockchip_fbdev_mmap,
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 8a4c9af0ba73..f25a36743cbd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -31,6 +32,8 @@
#define DISPLAY_OUTPUT_LVDS 1
#define DISPLAY_OUTPUT_DUAL_LVDS 2
+struct rockchip_lvds;
+
#define connector_to_lvds(c) \
container_of(c, struct rockchip_lvds, connector)
@@ -39,16 +42,12 @@
/**
* rockchip_lvds_soc_data - rockchip lvds Soc private data
- * @ch1_offset: lvds channel 1 registe offset
- * grf_soc_con6: general registe offset for LVDS contrl
- * grf_soc_con7: general registe offset for LVDS contrl
- * has_vop_sel: to indicate whether need to choose from different VOP.
+ * @probe: LVDS platform probe function
+ * @helper_funcs: LVDS connector helper functions
*/
struct rockchip_lvds_soc_data {
- u32 ch1_offset;
- int grf_soc_con6;
- int grf_soc_con7;
- bool has_vop_sel;
+ int (*probe)(struct platform_device *pdev, struct rockchip_lvds *lvds);
+ const struct drm_encoder_helper_funcs *helper_funcs;
};
struct rockchip_lvds {
@@ -56,6 +55,7 @@ struct rockchip_lvds {
void __iomem *regs;
struct regmap *grf;
struct clk *pclk;
+ struct phy *dphy;
const struct rockchip_lvds_soc_data *soc_data;
int output; /* rgb lvds or dual lvds output */
int format; /* vesa or jeida format */
@@ -67,15 +67,16 @@ struct rockchip_lvds {
struct dev_pin_info *pins;
};
-static inline void lvds_writel(struct rockchip_lvds *lvds, u32 offset, u32 val)
+static inline void rk3288_writel(struct rockchip_lvds *lvds, u32 offset,
+ u32 val)
{
writel_relaxed(val, lvds->regs + offset);
if (lvds->output == DISPLAY_OUTPUT_LVDS)
return;
- writel_relaxed(val, lvds->regs + offset + lvds->soc_data->ch1_offset);
+ writel_relaxed(val, lvds->regs + offset + RK3288_LVDS_CH1_OFFSET);
}
-static inline int lvds_name_to_format(const char *s)
+static inline int rockchip_lvds_name_to_format(const char *s)
{
if (strncmp(s, "jeida-18", 8) == 0)
return LVDS_JEIDA_18;
@@ -87,7 +88,7 @@ static inline int lvds_name_to_format(const char *s)
return -EINVAL;
}
-static inline int lvds_name_to_output(const char *s)
+static inline int rockchip_lvds_name_to_output(const char *s)
{
if (strncmp(s, "rgb", 3) == 0)
return DISPLAY_OUTPUT_RGB;
@@ -99,7 +100,41 @@ static inline int lvds_name_to_output(const char *s)
return -EINVAL;
}
-static int rockchip_lvds_poweron(struct rockchip_lvds *lvds)
+static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
+{
+ struct rockchip_lvds *lvds = connector_to_lvds(connector);
+ struct drm_panel *panel = lvds->panel;
+
+ return drm_panel_get_modes(panel, connector);
+}
+
+static const
+struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
+ .get_modes = rockchip_lvds_connector_get_modes,
+};
+
+static int
+rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ s->output_type = DRM_MODE_CONNECTOR_LVDS;
+
+ return 0;
+}
+
+static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
{
int ret;
u32 val;
@@ -121,66 +156,73 @@ static int rockchip_lvds_poweron(struct rockchip_lvds *lvds)
if (lvds->output == DISPLAY_OUTPUT_RGB) {
val |= RK3288_LVDS_CH0_REG0_TTL_EN |
RK3288_LVDS_CH0_REG0_LANECK_EN;
- lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
- RK3288_LVDS_PLL_FBDIV_REG2(0x46));
- lvds_writel(lvds, RK3288_LVDS_CH0_REG4,
- RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
- RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG5,
- RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
- RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
+ RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG4,
+ RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG5,
+ RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
} else {
val |= RK3288_LVDS_CH0_REG0_LVDS_EN |
RK3288_LVDS_CH0_REG0_LANECK_EN;
- lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG1,
- RK3288_LVDS_CH0_REG1_LANECK_BIAS |
- RK3288_LVDS_CH0_REG1_LANE4_BIAS |
- RK3288_LVDS_CH0_REG1_LANE3_BIAS |
- RK3288_LVDS_CH0_REG1_LANE2_BIAS |
- RK3288_LVDS_CH0_REG1_LANE1_BIAS |
- RK3288_LVDS_CH0_REG1_LANE0_BIAS);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
- RK3288_LVDS_CH0_REG2_RESERVE_ON |
- RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
- RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
- RK3288_LVDS_PLL_FBDIV_REG2(0x46));
- lvds_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
- lvds_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG1,
+ RK3288_LVDS_CH0_REG1_LANECK_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE4_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE3_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE2_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE1_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE0_BIAS);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG2,
+ RK3288_LVDS_CH0_REG2_RESERVE_ON |
+ RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
+ RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
}
- lvds_writel(lvds, RK3288_LVDS_CH0_REG3, RK3288_LVDS_PLL_FBDIV_REG3(0x46));
- lvds_writel(lvds, RK3288_LVDS_CH0_REGD, RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
- lvds_writel(lvds, RK3288_LVDS_CH0_REG20, RK3288_LVDS_CH0_REG20_LSB);
-
- lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
- lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG3,
+ RK3288_LVDS_PLL_FBDIV_REG3(0x46));
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REGD,
+ RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
+ rk3288_writel(lvds, RK3288_LVDS_CH0_REG20,
+ RK3288_LVDS_CH0_REG20_LSB);
+
+ rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
+ RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+ rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
+ RK3288_LVDS_CFG_REG21_TX_ENABLE);
return 0;
}
-static void rockchip_lvds_poweroff(struct rockchip_lvds *lvds)
+static void rk3288_lvds_poweroff(struct rockchip_lvds *lvds)
{
int ret;
u32 val;
- lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
- lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+ rk3288_writel(lvds, RK3288_LVDS_CFG_REG21,
+ RK3288_LVDS_CFG_REG21_TX_ENABLE);
+ rk3288_writel(lvds, RK3288_LVDS_CFG_REGC,
+ RK3288_LVDS_CFG_REGC_PLL_ENABLE);
val = LVDS_DUAL | LVDS_TTL_EN | LVDS_CH0_EN | LVDS_CH1_EN | LVDS_PWRDN;
val |= val << 16;
- ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
+ ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
if (ret != 0)
DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
@@ -188,29 +230,8 @@ static void rockchip_lvds_poweroff(struct rockchip_lvds *lvds)
clk_disable(lvds->pclk);
}
-static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
-{
- struct rockchip_lvds *lvds = connector_to_lvds(connector);
- struct drm_panel *panel = lvds->panel;
-
- return drm_panel_get_modes(panel);
-}
-
-static const
-struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
- .get_modes = rockchip_lvds_connector_get_modes,
-};
-
-static void rockchip_lvds_grf_config(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int rk3288_lvds_grf_config(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
u8 pin_hsync = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1 : 0;
@@ -234,22 +255,19 @@ static void rockchip_lvds_grf_config(struct drm_encoder *encoder,
val |= (pin_dclk << 8) | (pin_hsync << 9);
val |= (0xffff << 16);
- ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
- if (ret != 0) {
+ ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON7, val);
+ if (ret)
DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
- return;
- }
+
+ return ret;
}
-static int rockchip_lvds_set_vop_source(struct rockchip_lvds *lvds,
- struct drm_encoder *encoder)
+static int rk3288_lvds_set_vop_source(struct rockchip_lvds *lvds,
+ struct drm_encoder *encoder)
{
u32 val;
int ret;
- if (!lvds->soc_data->has_vop_sel)
- return 0;
-
ret = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
if (ret < 0)
return ret;
@@ -258,56 +276,162 @@ static int rockchip_lvds_set_vop_source(struct rockchip_lvds *lvds,
if (ret)
val |= RK3288_LVDS_SOC_CON6_SEL_VOP_LIT;
- ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con6, val);
+ ret = regmap_write(lvds->grf, RK3288_LVDS_GRF_SOC_CON6, val);
if (ret < 0)
return ret;
return 0;
}
-static int
-rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static void rk3288_lvds_encoder_enable(struct drm_encoder *encoder)
{
- struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ int ret;
- s->output_mode = ROCKCHIP_OUT_MODE_P888;
- s->output_type = DRM_MODE_CONNECTOR_LVDS;
+ drm_panel_prepare(lvds->panel);
- return 0;
+ ret = rk3288_lvds_poweron(lvds);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
+ ret = rk3288_lvds_grf_config(encoder, mode);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
+ ret = rk3288_lvds_set_vop_source(lvds, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
+ drm_panel_enable(lvds->panel);
+}
+
+static void rk3288_lvds_encoder_disable(struct drm_encoder *encoder)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+
+ drm_panel_disable(lvds->panel);
+ rk3288_lvds_poweroff(lvds);
+ drm_panel_unprepare(lvds->panel);
+}
+
+static int px30_lvds_poweron(struct rockchip_lvds *lvds)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(lvds->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ return ret;
+ }
+
+ /* Enable LVDS mode */
+ return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
+}
+
+static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
+{
+ regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
+ PX30_LVDS_MODE_EN(0) | PX30_LVDS_P2S_EN(0));
+
+ pm_runtime_put(lvds->dev);
+}
+
+static int px30_lvds_grf_config(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+
+ if (lvds->output != DISPLAY_OUTPUT_LVDS) {
+ DRM_DEV_ERROR(lvds->dev, "Unsupported display output %d\n",
+ lvds->output);
+ return -EINVAL;
+ }
+
+ /* Set format */
+ return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_FORMAT(lvds->format),
+ PX30_LVDS_FORMAT(lvds->format));
+}
+
+static int px30_lvds_set_vop_source(struct rockchip_lvds *lvds,
+ struct drm_encoder *encoder)
+{
+ int vop;
+
+ vop = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
+ if (vop < 0)
+ return vop;
+
+ return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_VOP_SEL(1),
+ PX30_LVDS_VOP_SEL(vop));
}
-static void rockchip_lvds_encoder_enable(struct drm_encoder *encoder)
+static void px30_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
int ret;
drm_panel_prepare(lvds->panel);
- ret = rockchip_lvds_poweron(lvds);
- if (ret < 0) {
- DRM_DEV_ERROR(lvds->dev, "failed to power on lvds: %d\n", ret);
+
+ ret = px30_lvds_poweron(lvds);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to power on LVDS: %d\n", ret);
drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
+ ret = px30_lvds_grf_config(encoder, mode);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to configure LVDS: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
}
- rockchip_lvds_grf_config(encoder, mode);
- rockchip_lvds_set_vop_source(lvds, encoder);
+
+ ret = px30_lvds_set_vop_source(lvds, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(lvds->dev, "failed to set VOP source: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ return;
+ }
+
drm_panel_enable(lvds->panel);
}
-static void rockchip_lvds_encoder_disable(struct drm_encoder *encoder)
+static void px30_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
drm_panel_disable(lvds->panel);
- rockchip_lvds_poweroff(lvds);
+ px30_lvds_poweroff(lvds);
drm_panel_unprepare(lvds->panel);
}
static const
-struct drm_encoder_helper_funcs rockchip_lvds_encoder_helper_funcs = {
- .enable = rockchip_lvds_encoder_enable,
- .disable = rockchip_lvds_encoder_disable,
+struct drm_encoder_helper_funcs rk3288_lvds_encoder_helper_funcs = {
+ .enable = rk3288_lvds_encoder_enable,
+ .disable = rk3288_lvds_encoder_disable,
+ .atomic_check = rockchip_lvds_encoder_atomic_check,
+};
+
+static const
+struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
+ .enable = px30_lvds_encoder_enable,
+ .disable = px30_lvds_encoder_disable,
.atomic_check = rockchip_lvds_encoder_atomic_check,
};
@@ -315,11 +439,88 @@ static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static int rk3288_lvds_probe(struct platform_device *pdev,
+ struct rockchip_lvds *lvds)
+{
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lvds->regs = devm_ioremap_resource(lvds->dev, res);
+ if (IS_ERR(lvds->regs))
+ return PTR_ERR(lvds->regs);
+
+ lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds");
+ if (IS_ERR(lvds->pclk)) {
+ DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n");
+ return PTR_ERR(lvds->pclk);
+ }
+
+ lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
+ GFP_KERNEL);
+ if (!lvds->pins)
+ return -ENOMEM;
+
+ lvds->pins->p = devm_pinctrl_get(lvds->dev);
+ if (IS_ERR(lvds->pins->p)) {
+ DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n");
+ devm_kfree(lvds->dev, lvds->pins);
+ lvds->pins = NULL;
+ } else {
+ lvds->pins->default_state =
+ pinctrl_lookup_state(lvds->pins->p, "lcdc");
+ if (IS_ERR(lvds->pins->default_state)) {
+ DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n");
+ devm_kfree(lvds->dev, lvds->pins);
+ lvds->pins = NULL;
+ }
+ }
+
+ ret = clk_prepare(lvds->pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int px30_lvds_probe(struct platform_device *pdev,
+ struct rockchip_lvds *lvds)
+{
+ int ret;
+
+ /* MSB */
+ ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ PX30_LVDS_MSBSEL(1),
+ PX30_LVDS_MSBSEL(1));
+ if (ret)
+ return ret;
+
+ /* PHY */
+ lvds->dphy = devm_phy_get(&pdev->dev, "dphy");
+ if (IS_ERR(lvds->dphy))
+ return PTR_ERR(lvds->dphy);
+
+ phy_init(lvds->dphy);
+ if (ret)
+ return ret;
+
+ phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
+ if (ret)
+ return ret;
+
+ return phy_power_on(lvds->dphy);
+}
+
static const struct rockchip_lvds_soc_data rk3288_lvds_data = {
- .ch1_offset = 0x100,
- .grf_soc_con6 = 0x025c,
- .grf_soc_con7 = 0x0260,
- .has_vop_sel = true,
+ .probe = rk3288_lvds_probe,
+ .helper_funcs = &rk3288_lvds_encoder_helper_funcs,
+};
+
+static const struct rockchip_lvds_soc_data px30_lvds_data = {
+ .probe = px30_lvds_probe,
+ .helper_funcs = &px30_lvds_encoder_helper_funcs,
};
static const struct of_device_id rockchip_lvds_dt_ids[] = {
@@ -327,6 +528,10 @@ static const struct of_device_id rockchip_lvds_dt_ids[] = {
.compatible = "rockchip,rk3288-lvds",
.data = &rk3288_lvds_data
},
+ {
+ .compatible = "rockchip,px30-lvds",
+ .data = &px30_lvds_data
+ },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_lvds_dt_ids);
@@ -378,7 +583,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
/* default set it as output rgb */
lvds->output = DISPLAY_OUTPUT_RGB;
else
- lvds->output = lvds_name_to_output(name);
+ lvds->output = rockchip_lvds_name_to_output(name);
if (lvds->output < 0) {
DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
@@ -390,7 +595,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
/* default set it as format vesa 18 */
lvds->format = LVDS_VESA_18;
else
- lvds->format = lvds_name_to_format(name);
+ lvds->format = rockchip_lvds_name_to_format(name);
if (lvds->format < 0) {
DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
@@ -410,7 +615,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_put_remote;
}
- drm_encoder_helper_add(encoder, &rockchip_lvds_encoder_helper_funcs);
+ drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
if (lvds->panel) {
connector = &lvds->connector;
@@ -471,8 +676,10 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_lvds *lvds = dev_get_drvdata(dev);
+ const struct drm_encoder_helper_funcs *encoder_funcs;
- rockchip_lvds_encoder_disable(&lvds->encoder);
+ encoder_funcs = lvds->soc_data->helper_funcs;
+ encoder_funcs->disable(&lvds->encoder);
if (lvds->panel)
drm_panel_detach(lvds->panel);
pm_runtime_disable(dev);
@@ -490,7 +697,6 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rockchip_lvds *lvds;
const struct of_device_id *match;
- struct resource *res;
int ret;
if (!dev->of_node)
@@ -506,37 +712,6 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
return -ENODEV;
lvds->soc_data = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lvds->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(lvds->regs))
- return PTR_ERR(lvds->regs);
-
- lvds->pclk = devm_clk_get(&pdev->dev, "pclk_lvds");
- if (IS_ERR(lvds->pclk)) {
- DRM_DEV_ERROR(dev, "could not get pclk_lvds\n");
- return PTR_ERR(lvds->pclk);
- }
-
- lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
- GFP_KERNEL);
- if (!lvds->pins)
- return -ENOMEM;
-
- lvds->pins->p = devm_pinctrl_get(lvds->dev);
- if (IS_ERR(lvds->pins->p)) {
- DRM_DEV_ERROR(dev, "no pinctrl handle\n");
- devm_kfree(lvds->dev, lvds->pins);
- lvds->pins = NULL;
- } else {
- lvds->pins->default_state =
- pinctrl_lookup_state(lvds->pins->p, "lcdc");
- if (IS_ERR(lvds->pins->default_state)) {
- DRM_DEV_ERROR(dev, "no default pinctrl state\n");
- devm_kfree(lvds->dev, lvds->pins);
- lvds->pins = NULL;
- }
- }
-
lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
if (IS_ERR(lvds->grf)) {
@@ -544,13 +719,14 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
return PTR_ERR(lvds->grf);
}
- dev_set_drvdata(dev, lvds);
-
- ret = clk_prepare(lvds->pclk);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to prepare pclk_lvds\n");
+ ret = lvds->soc_data->probe(pdev, lvds);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Platform initialization failed\n");
return ret;
}
+
+ dev_set_drvdata(dev, lvds);
+
ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to add component\n");
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index 029bad8e1a14..4ce967d23813 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -70,7 +70,10 @@
#define RK3288_LVDS_CFG_REG21 0x84
#define RK3288_LVDS_CFG_REG21_TX_ENABLE 0x92
#define RK3288_LVDS_CFG_REG21_TX_DISABLE 0x00
-#define RK3288_LVDS_CH1_OFFSET 0x100
+#define RK3288_LVDS_CH1_OFFSET 0x100
+
+#define RK3288_LVDS_GRF_SOC_CON6 0x025C
+#define RK3288_LVDS_GRF_SOC_CON7 0x0260
/* fbdiv value is split over 2 registers, with bit8 in reg2 */
#define RK3288_LVDS_PLL_FBDIV_REG2(_fbd) \
@@ -103,4 +106,18 @@
#define LVDS_VESA_18 2
#define LVDS_JEIDA_18 3
+#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
+
+#define PX30_LVDS_GRF_PD_VO_CON0 0x434
+#define PX30_LVDS_TIE_CLKS(val) HIWORD_UPDATE(val, 8, 8)
+#define PX30_LVDS_INVERT_CLKS(val) HIWORD_UPDATE(val, 9, 9)
+#define PX30_LVDS_INVERT_DCLK(val) HIWORD_UPDATE(val, 5, 5)
+
+#define PX30_LVDS_GRF_PD_VO_CON1 0x438
+#define PX30_LVDS_FORMAT(val) HIWORD_UPDATE(val, 14, 13)
+#define PX30_LVDS_MODE_EN(val) HIWORD_UPDATE(val, 12, 12)
+#define PX30_LVDS_MSBSEL(val) HIWORD_UPDATE(val, 11, 11)
+#define PX30_LVDS_P2S_EN(val) HIWORD_UPDATE(val, 6, 6)
+#define PX30_LVDS_VOP_SEL(val) HIWORD_UPDATE(val, 1, 1)
+
#endif /* _ROCKCHIP_LVDS_ */
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 2966fcfd9548..799bd11adb9c 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -24,10 +24,10 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include "savage_drv.h"
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 461a7a8129f4..ec79e8e5ad3c 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -38,46 +38,40 @@
* submit to HW ring.
*
* @entity: scheduler entity to init
- * @rq_list: the list of run queue on which jobs from this
+ * @priority: priority of the entity
+ * @sched_list: the list of drm scheds on which jobs from this
* entity can be submitted
- * @num_rq_list: number of run queue in rq_list
+ * @num_sched_list: number of drm sched in sched_list
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
- * Note: the rq_list should have atleast one element to schedule
+ * Note: the sched_list should have atleast one element to schedule
* the entity
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_sched_entity_init(struct drm_sched_entity *entity,
- struct drm_sched_rq **rq_list,
- unsigned int num_rq_list,
+ enum drm_sched_priority priority,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list,
atomic_t *guilty)
{
- int i;
-
- if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
+ if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = NULL;
entity->guilty = guilty;
- entity->num_rq_list = num_rq_list;
- entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
- GFP_KERNEL);
- if (!entity->rq_list)
- return -ENOMEM;
-
- init_completion(&entity->entity_idle);
-
- for (i = 0; i < num_rq_list; ++i)
- entity->rq_list[i] = rq_list[i];
+ entity->num_sched_list = num_sched_list;
+ entity->priority = priority;
+ entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
+ entity->last_scheduled = NULL;
- if (num_rq_list)
- entity->rq = rq_list[0];
+ if(num_sched_list)
+ entity->rq = &sched_list[0]->sched_rq[entity->priority];
- entity->last_scheduled = NULL;
+ init_completion(&entity->entity_idle);
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
@@ -136,21 +130,21 @@ static struct drm_sched_rq *
drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
{
struct drm_sched_rq *rq = NULL;
- unsigned int min_jobs = UINT_MAX, num_jobs;
+ unsigned int min_score = UINT_MAX, num_score;
int i;
- for (i = 0; i < entity->num_rq_list; ++i) {
- struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+ for (i = 0; i < entity->num_sched_list; ++i) {
+ struct drm_gpu_scheduler *sched = entity->sched_list[i];
- if (!entity->rq_list[i]->sched->ready) {
+ if (!entity->sched_list[i]->ready) {
DRM_WARN("sched%s is not ready, skipping", sched->name);
continue;
}
- num_jobs = atomic_read(&sched->num_jobs);
- if (num_jobs < min_jobs) {
- min_jobs = num_jobs;
- rq = entity->rq_list[i];
+ num_score = atomic_read(&sched->score);
+ if (num_score < min_score) {
+ min_score = num_score;
+ rq = &entity->sched_list[i]->sched_rq[entity->priority];
}
}
@@ -308,7 +302,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL;
- kfree(entity->rq_list);
}
EXPORT_SYMBOL(drm_sched_entity_fini);
@@ -354,15 +347,6 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
}
/**
- * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
- */
-static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
- enum drm_sched_priority priority)
-{
- *rq = &(*rq)->sched->sched_rq[priority];
-}
-
-/**
* drm_sched_entity_set_priority - Sets priority of the entity
*
* @entity: scheduler entity
@@ -373,19 +357,8 @@ static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
- unsigned int i;
-
spin_lock(&entity->rq_lock);
-
- for (i = 0; i < entity->num_rq_list; ++i)
- drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
-
- if (entity->rq) {
- drm_sched_rq_remove_entity(entity->rq, entity);
- drm_sched_entity_set_rq_priority(&entity->rq, priority);
- drm_sched_rq_add_entity(entity->rq, entity);
- }
-
+ entity->priority = priority;
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -490,20 +463,20 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
struct dma_fence *fence;
struct drm_sched_rq *rq;
- if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
+ if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
return;
fence = READ_ONCE(entity->last_scheduled);
if (fence && !dma_fence_is_signaled(fence))
return;
+ spin_lock(&entity->rq_lock);
rq = drm_sched_entity_get_free_sched(entity);
- if (rq == entity->rq)
- return;
+ if (rq != entity->rq) {
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ entity->rq = rq;
+ }
- spin_lock(&entity->rq_lock);
- drm_sched_rq_remove_entity(entity->rq, entity);
- entity->rq = rq;
spin_unlock(&entity->rq_lock);
}
@@ -525,7 +498,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->num_jobs);
+ atomic_inc(&entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 3c57e84222ca..71ce6215956f 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
+ atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -110,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
+ atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -287,10 +289,21 @@ static void drm_sched_job_timedout(struct work_struct *work)
unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+ /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
if (job) {
+ /*
+ * Remove the bad job so it cannot be freed by concurrent
+ * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
+ * is parked at which point it's safe.
+ */
+ list_del_init(&job->node);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
job->sched->ops->timedout_job(job);
/*
@@ -301,6 +314,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->free_job(job);
sched->free_guilty = false;
}
+ } else {
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
spin_lock_irqsave(&sched->job_list_lock, flags);
@@ -373,6 +388,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
kthread_park(sched->thread);
/*
+ * Reinsert back the bad job here - now it's safe as
+ * drm_sched_get_cleanup_job cannot race against us and release the
+ * bad job at this point - we parked (waited for) any in progress
+ * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+ * now until the scheduler thread is unparked.
+ */
+ if (bad && bad->sched == sched)
+ /*
+ * Add at the head of the queue to reflect it was the earliest
+ * job extracted.
+ */
+ list_add(&bad->node, &sched->ring_mirror_list);
+
+ /*
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from mirror list if they already
* signaled.
@@ -628,7 +657,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->num_jobs);
+ atomic_dec(&sched->score);
trace_drm_sched_process_job(s_fence);
@@ -803,7 +832,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->num_jobs, 0);
+ atomic_set(&sched->score, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index d2137342b371..0856e4b12f70 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
test-drm_format.o test-drm_framebuffer.o \
- test-drm_damage_helper.o test-drm_dp_mst_helper.o
+ test-drm_damage_helper.o test-drm_dp_mst_helper.o \
+ test-drm_rect.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index 6d61a0eb5d64..ceac7af9a172 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -60,3 +60,8 @@ cmdline_test(drm_cmdline_test_vmirror)
cmdline_test(drm_cmdline_test_margin_options)
cmdline_test(drm_cmdline_test_multiple_options)
cmdline_test(drm_cmdline_test_invalid_option)
+cmdline_test(drm_cmdline_test_bpp_extra_and_option)
+cmdline_test(drm_cmdline_test_extra_and_option)
+cmdline_test(drm_cmdline_test_freestanding_options)
+cmdline_test(drm_cmdline_test_freestanding_force_e_and_options)
+cmdline_test(drm_cmdline_test_panel_orientation)
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
index 1898de0b4a4d..782e285ca383 100644
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -6,6 +6,10 @@
*
* Tests are executed in order by igt/drm_selftests_helper
*/
+selftest(drm_rect_clip_scaled_div_by_zero, igt_drm_rect_clip_scaled_div_by_zero)
+selftest(drm_rect_clip_scaled_not_clipped, igt_drm_rect_clip_scaled_not_clipped)
+selftest(drm_rect_clip_scaled_clipped, igt_drm_rect_clip_scaled_clipped)
+selftest(drm_rect_clip_scaled_signed_vs_unsigned, igt_drm_rect_clip_scaled_signed_vs_unsigned)
selftest(check_plane_state, igt_check_plane_state)
selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 013de9d27c35..520f3e66a384 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -992,6 +992,128 @@ static int drm_cmdline_test_invalid_option(void *ignored)
return 0;
}
+static int drm_cmdline_test_bpp_extra_and_option(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24e,rotate=180",
+ &no_connector,
+ &mode));
+ FAIL_ON(!mode.specified);
+ FAIL_ON(mode.xres != 720);
+ FAIL_ON(mode.yres != 480);
+ FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
+
+ FAIL_ON(mode.refresh_specified);
+
+ FAIL_ON(!mode.bpp_specified);
+ FAIL_ON(mode.bpp != 24);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_extra_and_option(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480e,rotate=180",
+ &no_connector,
+ &mode));
+ FAIL_ON(!mode.specified);
+ FAIL_ON(mode.xres != 720);
+ FAIL_ON(mode.yres != 480);
+ FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
+
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_freestanding_options(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.tv_margins.right != 14);
+ FAIL_ON(mode.tv_margins.left != 24);
+ FAIL_ON(mode.tv_margins.bottom != 36);
+ FAIL_ON(mode.tv_margins.top != 42);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
+
+ return 0;
+}
+
+static int drm_cmdline_test_freestanding_force_e_and_options(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.tv_margins.right != 14);
+ FAIL_ON(mode.tv_margins.left != 24);
+ FAIL_ON(mode.tv_margins.bottom != 36);
+ FAIL_ON(mode.tv_margins.top != 42);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_panel_orientation(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("panel_orientation=upside_down",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.panel_orientation != DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
+
+ return 0;
+}
+
#include "drm_selftest.c"
static int __init test_drm_cmdline_init(void)
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
index af2b2de65316..bd990d178765 100644
--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -18,15 +18,19 @@ int igt_dp_mst_calc_pbn_mode(void *ignored)
int rate;
int bpp;
int expected;
+ bool dsc;
} test_params[] = {
- { 154000, 30, 689 },
- { 234000, 30, 1047 },
- { 297000, 24, 1063 },
+ { 154000, 30, 689, false },
+ { 234000, 30, 1047, false },
+ { 297000, 24, 1063, false },
+ { 332880, 24, 50, true },
+ { 324540, 24, 49, true },
};
for (i = 0; i < ARRAY_SIZE(test_params); i++) {
pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
- test_params[i].bpp);
+ test_params[i].bpp,
+ test_params[i].dsc);
FAIL(pbn != test_params[i].expected,
"Expected PBN %d for clock %d bpp %d, got %d\n",
test_params[i].expected, test_params[i].rate,
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
index 0fcb8bbc6a1b..cfb51d8da2bc 100644
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -3,6 +3,9 @@
#ifndef __TEST_DRM_MODESET_COMMON_H__
#define __TEST_DRM_MODESET_COMMON_H__
+#include <linux/errno.h>
+#include <linux/printk.h>
+
#define FAIL(test, msg, ...) \
do { \
if (test) { \
@@ -13,6 +16,10 @@
#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+int igt_drm_rect_clip_scaled_div_by_zero(void *ignored);
+int igt_drm_rect_clip_scaled_not_clipped(void *ignored);
+int igt_drm_rect_clip_scaled_clipped(void *ignored);
+int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored);
int igt_check_plane_state(void *ignored);
int igt_check_drm_format_block_width(void *ignored);
int igt_check_drm_format_block_height(void *ignored);
diff --git a/drivers/gpu/drm/selftests/test-drm_rect.c b/drivers/gpu/drm/selftests/test-drm_rect.c
new file mode 100644
index 000000000000..3a5ff38321f4
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_rect.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_rect functions
+ */
+
+#define pr_fmt(fmt) "drm_rect: " fmt
+
+#include <linux/limits.h>
+
+#include <drm/drm_rect.h>
+
+#include "test-drm_modeset_common.h"
+
+int igt_drm_rect_clip_scaled_div_by_zero(void *ignored)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * Make sure we don't divide by zero when dst
+ * width/height is zero and dst and clip do not intersect.
+ */
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 0, 0, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+ FAIL(visible, "Destination not be visible\n");
+ FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 3, 3, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+ FAIL(visible, "Destination not be visible\n");
+ FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+ return 0;
+}
+
+int igt_drm_rect_clip_scaled_not_clipped(void *ignored)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 2 ||
+ dst.y1 != 0 || dst.y2 != 2,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ return 0;
+}
+
+int igt_drm_rect_clip_scaled_clipped(void *ignored)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 1 || dst.x2 != 2 ||
+ dst.y1 != 1 || dst.y2 != 2,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
+ src.y1 != 2 << 16 || src.y2 != 4 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 1 || dst.x2 != 2 ||
+ dst.y1 != 1 || dst.y2 != 2,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 0 || dst.x2 != 2 ||
+ dst.y1 != 0 || dst.y2 != 2,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 2, 2, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16,
+ "Source badly clipped\n");
+ FAIL(dst.x1 != 2 || dst.x2 != 4 ||
+ dst.y1 != 2 || dst.y2 != 4,
+ "Destination badly clipped\n");
+ FAIL(!visible, "Destination should be visible\n");
+ FAIL(!drm_rect_visible(&src), "Source should be visible\n");
+
+ return 0;
+}
+
+int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * 'clip.x2 - dst.x1 >= dst width' could result a negative
+ * src rectangle width which is no longer expected by the
+ * code as it's using unsigned types. This could lead to
+ * the clipped source rectangle appering visible when it
+ * should have been fully clipped. Make sure both rectangles
+ * end up invisible.
+ */
+ drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 3, 3, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ FAIL(visible, "Destination should not be visible\n");
+ FAIL(drm_rect_visible(&src), "Source should not be visible\n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index ee3801201ecc..2c54b33abb54 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -26,10 +26,10 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/sis_drm.h>
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 68289b0b063a..b2778ec1cdd7 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -339,7 +339,7 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
struct sti_dvo *dvo = dvo_connector->dvo;
if (dvo->panel)
- return drm_panel_get_modes(dvo->panel);
+ return drm_panel_get_modes(dvo->panel, connector);
return 0;
}
@@ -534,7 +534,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
DRM_ERROR("Invalid dvo resource\n");
return -ENOMEM;
}
- dvo->regs = devm_ioremap_nocache(dev, res->start,
+ dvo->regs = devm_ioremap(dev, res->start,
resource_size(res));
if (!dvo->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 8f7bf33815fd..2bb32009d117 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -759,14 +759,14 @@ static int sti_hda_probe(struct platform_device *pdev)
DRM_ERROR("Invalid hda resource\n");
return -ENOMEM;
}
- hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hda->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hda->regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
if (res) {
- hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+ hda->video_dacs_ctrl = devm_ioremap(dev, res->start,
resource_size(res));
if (!hda->video_dacs_ctrl)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 814560ead4e1..64ed102033c8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1393,7 +1393,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto release_adapter;
}
- hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hdmi->regs) {
ret = -ENOMEM;
goto release_adapter;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5767e93dd1cd..c36a8da373cb 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -860,7 +860,7 @@ static int sti_tvout_probe(struct platform_device *pdev)
DRM_ERROR("Invalid glue resource\n");
return -ENOMEM;
}
- tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!tvout->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 0b17ac8a3faa..5e5f82b6a5d9 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -393,7 +393,7 @@ static int vtg_probe(struct platform_device *pdev)
DRM_ERROR("Get memory resource failed\n");
return -ENOMEM;
}
- vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vtg->regs) {
DRM_ERROR("failed to remap I/O memory\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 514efefb0016..4b165635b2d4 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -309,11 +309,24 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
return 0;
}
+static int
+dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi_dphy_timing *timing)
+{
+ timing->clk_hs2lp = 0x40;
+ timing->clk_lp2hs = 0x40;
+ timing->data_hs2lp = 0x40;
+ timing->data_lp2hs = 0x40;
+
+ return 0;
+}
+
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
.init = dw_mipi_dsi_phy_init,
.power_on = dw_mipi_dsi_phy_power_on,
.power_off = dw_mipi_dsi_phy_power_off,
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+ .get_timing = dw_mipi_dsi_phy_get_timing,
};
static struct dw_mipi_dsi_plat_data dw_mipi_dsi_stm_plat_data = {
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 5b51298921cf..c2815e8ae1da 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -437,9 +437,6 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
/* Commit shadow registers = update planes at next vblank */
reg_set(ldev->regs, LTDC_SRCR, SRCR_VBR);
- /* Enable LTDC */
- reg_set(ldev->regs, LTDC_GCR, GCR_LTDCEN);
-
drm_crtc_vblank_on(crtc);
}
@@ -453,9 +450,6 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- /* disable LTDC */
- reg_clear(ldev->regs, LTDC_GCR, GCR_LTDCEN);
-
/* disable IRQ */
reg_clear(ldev->regs, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
@@ -1044,9 +1038,13 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
static void ltdc_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
+ struct ltdc_device *ldev = ddev->dev_private;
DRM_DEBUG_DRIVER("\n");
+ /* Disable LTDC */
+ reg_clear(ldev->regs, LTDC_GCR, GCR_LTDCEN);
+
/* Set to sleep state the pinctrl whatever type of encoder */
pinctrl_pm_select_sleep_state(ddev->dev);
}
@@ -1054,6 +1052,19 @@ static void ltdc_encoder_disable(struct drm_encoder *encoder)
static void ltdc_encoder_enable(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Enable LTDC */
+ reg_set(ldev->regs, LTDC_GCR, GCR_LTDCEN);
+}
+
+static void ltdc_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *ddev = encoder->dev;
DRM_DEBUG_DRIVER("\n");
@@ -1069,6 +1080,7 @@ static void ltdc_encoder_enable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs ltdc_encoder_helper_funcs = {
.disable = ltdc_encoder_disable,
.enable = ltdc_encoder_enable,
+ .mode_set = ltdc_encoder_mode_set,
};
static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 37e90e42943f..5755f0432e77 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -17,18 +17,18 @@ config DRM_SUN4I
if DRM_SUN4I
config DRM_SUN4I_HDMI
- tristate "Allwinner A10 HDMI Controller Support"
- default DRM_SUN4I
- help
+ tristate "Allwinner A10 HDMI Controller Support"
+ default DRM_SUN4I
+ help
Choose this option if you have an Allwinner SoC with an HDMI
controller.
config DRM_SUN4I_HDMI_CEC
- bool "Allwinner A10 HDMI CEC Support"
- depends on DRM_SUN4I_HDMI
- select CEC_CORE
- select CEC_PIN
- help
+ bool "Allwinner A10 HDMI CEC Support"
+ depends on DRM_SUN4I_HDMI
+ select CEC_CORE
+ select CEC_PIN
+ help
Choose this option if you have an Allwinner SoC with an HDMI
controller and want to use CEC.
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 4e29f4fe4a05..072ea113e6be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -856,6 +856,13 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
ret = PTR_ERR(backend->mod_clk);
goto err_disable_bus_clk;
}
+
+ ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
+ if (ret) {
+ dev_err(dev, "Couldn't set the module clock frequency\n");
+ goto err_disable_bus_clk;
+ }
+
clk_prepare_enable(backend->mod_clk);
backend->ram_clk = devm_clk_get(dev, "ram");
@@ -932,6 +939,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
err_disable_ram_clk:
clk_disable_unprepare(backend->ram_clk);
err_disable_mod_clk:
+ clk_rate_exclusive_put(backend->mod_clk);
clk_disable_unprepare(backend->mod_clk);
err_disable_bus_clk:
clk_disable_unprepare(backend->bus_clk);
@@ -952,6 +960,7 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
sun4i_backend_free_sat(dev);
clk_disable_unprepare(backend->ram_clk);
+ clk_rate_exclusive_put(backend->mod_clk);
clk_disable_unprepare(backend->mod_clk);
clk_disable_unprepare(backend->bus_clk);
reset_control_assert(backend->reset);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index a5757b11b730..5ae67d526b1d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -346,6 +346,27 @@ static int sun4i_drv_add_endpoints(struct device *dev,
return count;
}
+#ifdef CONFIG_PM_SLEEP
+static int sun4i_drv_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(drm);
+}
+
+static int sun4i_drv_drm_sys_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(drm);
+}
+#endif
+
+static const struct dev_pm_ops sun4i_drv_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sun4i_drv_drm_sys_suspend,
+ sun4i_drv_drm_sys_resume)
+};
+
static int sun4i_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
@@ -418,6 +439,7 @@ static struct platform_driver sun4i_drv_platform_driver = {
.driver = {
.name = "sun4i-drm",
.of_match_table = sun4i_drv_of_table,
+ .pm = &sun4i_drv_drm_pm_ops,
},
};
module_platform_driver(sun4i_drv_platform_driver);
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index c04f4ba0d69d..acfbfd4463a1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -250,11 +250,11 @@ struct drm_plane **sun4i_layers_init(struct drm_device *drm,
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
return ERR_CAST(layer);
- };
+ }
layer->id = i;
planes[i] = &layer->plane;
- };
+ }
return planes;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 25ab2ef6d545..65b7a8739666 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -43,7 +43,7 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector)
struct sun4i_lvds *lvds =
drm_connector_to_sun4i_lvds(connector);
- return drm_panel_get_modes(lvds->panel);
+ return drm_panel_get_modes(lvds->panel, connector);
}
static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index e74b9eddca01..b27f16af50f5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -47,7 +47,7 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
struct sun4i_rgb *rgb =
drm_connector_to_sun4i_rgb(connector);
- return drm_panel_get_modes(rgb->panel);
+ return drm_panel_get_modes(rgb->panel, connector);
}
/*
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index f7ab72244796..4fbe9a6b5182 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -56,6 +56,13 @@ static int sun6i_drc_bind(struct device *dev, struct device *master,
ret = PTR_ERR(drc->mod_clk);
goto err_disable_bus_clk;
}
+
+ ret = clk_set_rate_exclusive(drc->mod_clk, 300000000);
+ if (ret) {
+ dev_err(dev, "Couldn't set the module clock frequency\n");
+ goto err_disable_bus_clk;
+ }
+
clk_prepare_enable(drc->mod_clk);
return 0;
@@ -72,6 +79,7 @@ static void sun6i_drc_unbind(struct device *dev, struct device *master,
{
struct sun6i_drc *drc = dev_get_drvdata(dev);
+ clk_rate_exclusive_put(drc->mod_clk);
clk_disable_unprepare(drc->mod_clk);
clk_disable_unprepare(drc->bus_clk);
reset_control_assert(drc->reset);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index c958ca9bae63..a75fcb113172 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -795,7 +795,7 @@ static int sun6i_dsi_get_modes(struct drm_connector *connector)
{
struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
- return drm_panel_get_modes(dsi->panel);
+ return drm_panel_get_modes(dsi->panel, connector);
}
static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
@@ -1081,6 +1081,7 @@ static const struct component_ops sun6i_dsi_ops = {
static int sun6i_dsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const char *bus_clk_name = NULL;
struct sun6i_dsi *dsi;
struct resource *res;
void __iomem *base;
@@ -1094,6 +1095,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
dsi->host.ops = &sun6i_dsi_host_ops;
dsi->host.dev = dev;
+ if (of_device_is_compatible(dev->of_node,
+ "allwinner,sun6i-a31-mipi-dsi"))
+ bus_clk_name = "bus";
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
@@ -1107,23 +1112,36 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
return PTR_ERR(dsi->regulator);
}
- dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
- &sun6i_dsi_regmap_config);
- if (IS_ERR(dsi->regs)) {
- dev_err(dev, "Couldn't create the DSI encoder regmap\n");
- return PTR_ERR(dsi->regs);
- }
-
dsi->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(dsi->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
return PTR_ERR(dsi->reset);
}
- dsi->mod_clk = devm_clk_get(dev, "mod");
- if (IS_ERR(dsi->mod_clk)) {
- dev_err(dev, "Couldn't get the DSI mod clock\n");
- return PTR_ERR(dsi->mod_clk);
+ dsi->regs = devm_regmap_init_mmio(dev, base, &sun6i_dsi_regmap_config);
+ if (IS_ERR(dsi->regs)) {
+ dev_err(dev, "Couldn't init regmap\n");
+ return PTR_ERR(dsi->regs);
+ }
+
+ dsi->bus_clk = devm_clk_get(dev, bus_clk_name);
+ if (IS_ERR(dsi->bus_clk)) {
+ dev_err(dev, "Couldn't get the DSI bus clock\n");
+ return PTR_ERR(dsi->bus_clk);
+ }
+
+ ret = regmap_mmio_attach_clk(dsi->regs, dsi->bus_clk);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(dev->of_node,
+ "allwinner,sun6i-a31-mipi-dsi")) {
+ dsi->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(dsi->mod_clk)) {
+ dev_err(dev, "Couldn't get the DSI mod clock\n");
+ ret = PTR_ERR(dsi->mod_clk);
+ goto err_attach_clk;
+ }
}
/*
@@ -1161,6 +1179,9 @@ err_pm_disable:
pm_runtime_disable(dev);
err_unprotect_clk:
clk_rate_exclusive_put(dsi->mod_clk);
+err_attach_clk:
+ if (!IS_ERR(dsi->bus_clk))
+ regmap_mmio_detach_clk(dsi->regs);
return ret;
}
@@ -1174,6 +1195,9 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
clk_rate_exclusive_put(dsi->mod_clk);
+ if (!IS_ERR(dsi->bus_clk))
+ regmap_mmio_detach_clk(dsi->regs);
+
return 0;
}
@@ -1232,6 +1256,7 @@ static const struct dev_pm_ops sun6i_dsi_pm_ops = {
static const struct of_device_id sun6i_dsi_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
+ { .compatible = "allwinner,sun50i-a64-mipi-dsi" },
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b803eb903b8..7c24f8f832a5 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -286,10 +286,10 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
dev_err(drm->dev,
"Couldn't initialize overlay plane\n");
return ERR_CAST(layer);
- };
+ }
planes[i] = &layer->plane;
- };
+ }
for (i = 0; i < mixer->cfg->ui_num; i++) {
struct sun8i_ui_layer *layer;
@@ -299,10 +299,10 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
return ERR_CAST(layer);
- };
+ }
planes[mixer->cfg->vi_num + i] = &layer->plane;
- };
+ }
return planes;
}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index c243af156ee7..ab699bf0ac5c 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -31,12 +31,12 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include "tdfx_drv.h"
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 714af052fbef..7c70fd31a4c2 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1727,6 +1727,7 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ int err;
if (!tegra_dc_idle(dc)) {
tegra_dc_stop(dc);
@@ -1773,7 +1774,9 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
- pm_runtime_put_sync(dc->dev);
+ err = host1x_client_suspend(&dc->client);
+ if (err < 0)
+ dev_err(dc->dev, "failed to suspend: %d\n", err);
}
static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -1783,8 +1786,13 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ int err;
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
/* initialize display controller */
if (dc->syncpt) {
@@ -1996,7 +2004,7 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
static int tegra_dc_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
struct tegra_dc *dc = host1x_client_to_dc(client);
struct tegra_drm *tegra = drm->dev_private;
@@ -2012,6 +2020,15 @@ static int tegra_dc_init(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
+ /*
+ * Set the display hub as the host1x client parent for the display
+ * controller. This is needed for the runtime reference counting that
+ * ensures the display hub is always powered when any of the display
+ * controllers are.
+ */
+ if (dc->soc->has_nvdisplay)
+ client->parent = &tegra->hub->client;
+
dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
@@ -2077,9 +2094,9 @@ static int tegra_dc_init(struct host1x_client *client)
/*
* Inherit the DMA parameters (such as maximum segment size) from the
- * parent device.
+ * parent host1x device.
*/
- client->dev->dma_parms = client->parent->dma_parms;
+ client->dev->dma_parms = client->host->dma_parms;
return 0;
@@ -2121,9 +2138,74 @@ static int tegra_dc_exit(struct host1x_client *client)
return 0;
}
+static int tegra_dc_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_powergate)
+ tegra_powergate_power_off(dc->powergate);
+
+ clk_disable_unprepare(dc->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_dc_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_powergate) {
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to power partition: %d\n", err);
+ goto put_rpm;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(dc->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops dc_client_ops = {
.init = tegra_dc_init,
.exit = tegra_dc_exit,
+ .suspend = tegra_dc_runtime_suspend,
+ .resume = tegra_dc_runtime_resume,
};
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
@@ -2535,65 +2617,10 @@ static int tegra_dc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_dc_suspend(struct device *dev)
-{
- struct tegra_dc *dc = dev_get_drvdata(dev);
- int err;
-
- err = reset_control_assert(dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
-
- if (dc->soc->has_powergate)
- tegra_powergate_power_off(dc->powergate);
-
- clk_disable_unprepare(dc->clk);
-
- return 0;
-}
-
-static int tegra_dc_resume(struct device *dev)
-{
- struct tegra_dc *dc = dev_get_drvdata(dev);
- int err;
-
- if (dc->soc->has_powergate) {
- err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
- dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to power partition: %d\n", err);
- return err;
- }
- } else {
- err = clk_prepare_enable(dc->clk);
- if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- return err;
- }
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops tegra_dc_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
-};
-
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
.of_match_table = tegra_dc_of_match,
- .pm = &tegra_dc_pm_ops,
},
.probe = tegra_dc_probe,
.remove = tegra_dc_remove,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 622cdf1ad246..7dfb50f65067 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -588,7 +588,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
/* make sure pads are powered down when not in use */
tegra_dpaux_pad_power_down(dpaux);
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
drm_dp_aux_unregister(&dpaux->aux);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index f455ce71e85d..aa9e49f04988 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -905,7 +905,7 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
int host1x_client_iommu_attach(struct host1x_client *client)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
@@ -941,7 +941,7 @@ int host1x_client_iommu_attach(struct host1x_client *client)
void host1x_client_iommu_detach(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_domain *domain;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index d941553f7a3d..ed99b67deb29 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -144,6 +144,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
void tegra_output_exit(struct tegra_output *output);
void tegra_output_find_possible_crtcs(struct tegra_output *output,
struct drm_device *drm);
+int tegra_output_suspend(struct tegra_output *output);
+int tegra_output_resume(struct tegra_output *output);
int tegra_output_connector_get_modes(struct drm_connector *connector);
enum drm_connector_status
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index a5d47e301c5f..88b9d64c77bf 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -840,7 +840,9 @@ static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
err);
- pm_runtime_put(dsi->dev);
+ err = host1x_client_suspend(&dsi->client);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to suspend: %d\n", err);
}
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
@@ -882,11 +884,15 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
tegra_dsi_unprepare(dsi);
}
-static void tegra_dsi_prepare(struct tegra_dsi *dsi)
+static int tegra_dsi_prepare(struct tegra_dsi *dsi)
{
int err;
- pm_runtime_get_sync(dsi->dev);
+ err = host1x_client_resume(&dsi->client);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to resume: %d\n", err);
+ return err;
+ }
err = tegra_mipi_enable(dsi->mipi);
if (err < 0)
@@ -899,6 +905,8 @@ static void tegra_dsi_prepare(struct tegra_dsi *dsi)
if (dsi->slave)
tegra_dsi_prepare(dsi->slave);
+
+ return 0;
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -909,8 +917,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
struct tegra_dsi *dsi = to_dsi(output);
struct tegra_dsi_state *state;
u32 value;
+ int err;
- tegra_dsi_prepare(dsi);
+ err = tegra_dsi_prepare(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to prepare: %d\n", err);
+ return;
+ }
state = tegra_dsi_get_state(dsi);
@@ -1030,7 +1043,7 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
static int tegra_dsi_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
@@ -1075,9 +1088,89 @@ static int tegra_dsi_exit(struct host1x_client *client)
return 0;
}
+static int tegra_dsi_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ if (dsi->rst) {
+ err = reset_control_assert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(dsi->clk_lp);
+ clk_disable_unprepare(dsi->clk);
+
+ regulator_disable(dsi->vdd);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_dsi_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(dsi->vdd);
+ if (err < 0) {
+ dev_err(dev, "failed to enable VDD supply: %d\n", err);
+ goto put_rpm;
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(dev, "cannot enable DSI clock: %d\n", err);
+ goto disable_vdd;
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(dev, "cannot enable low-power clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dsi->rst) {
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "cannot assert reset: %d\n", err);
+ goto disable_clk_lp;
+ }
+ }
+
+ return 0;
+
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops dsi_client_ops = {
.init = tegra_dsi_init,
.exit = tegra_dsi_exit,
+ .suspend = tegra_dsi_runtime_suspend,
+ .resume = tegra_dsi_runtime_resume,
};
static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
@@ -1596,79 +1689,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_dsi_suspend(struct device *dev)
-{
- struct tegra_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- if (dsi->rst) {
- err = reset_control_assert(dsi->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
- }
-
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(dsi->clk_lp);
- clk_disable_unprepare(dsi->clk);
-
- regulator_disable(dsi->vdd);
-
- return 0;
-}
-
-static int tegra_dsi_resume(struct device *dev)
-{
- struct tegra_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- err = regulator_enable(dsi->vdd);
- if (err < 0) {
- dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
- return err;
- }
-
- err = clk_prepare_enable(dsi->clk);
- if (err < 0) {
- dev_err(dev, "cannot enable DSI clock: %d\n", err);
- goto disable_vdd;
- }
-
- err = clk_prepare_enable(dsi->clk_lp);
- if (err < 0) {
- dev_err(dev, "cannot enable low-power clock: %d\n", err);
- goto disable_clk;
- }
-
- usleep_range(1000, 2000);
-
- if (dsi->rst) {
- err = reset_control_deassert(dsi->rst);
- if (err < 0) {
- dev_err(dev, "cannot assert reset: %d\n", err);
- goto disable_clk_lp;
- }
- }
-
- return 0;
-
-disable_clk_lp:
- clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
- clk_disable_unprepare(dsi->clk);
-disable_vdd:
- regulator_disable(dsi->vdd);
- return err;
-}
-#endif
-
-static const struct dev_pm_ops tegra_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
-};
-
static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra132-dsi", },
@@ -1682,7 +1702,6 @@ struct platform_driver tegra_dsi_driver = {
.driver = {
.name = "tegra-dsi",
.of_match_table = tegra_dsi_of_match,
- .pm = &tegra_dsi_pm_ops,
},
.probe = tegra_dsi_probe,
.remove = tegra_dsi_remove,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 7cea89f29a5c..84f0e01e3428 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -192,7 +192,7 @@ static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return __tegra_gem_mmap(&bo->gem, vma);
}
-static struct fb_ops tegra_fb_ops = {
+static const struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index bc15b430156d..1237df157e05 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -146,32 +146,6 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
vunmap(addr);
}
-static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
-{
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
-
- if (obj->vaddr)
- return obj->vaddr + page * PAGE_SIZE;
- else if (obj->gem.import_attach)
- return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
- else
- return vmap(obj->pages + page, 1, VM_MAP,
- pgprot_writecombine(PAGE_KERNEL));
-}
-
-static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
- void *addr)
-{
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
-
- if (obj->vaddr)
- return;
- else if (obj->gem.import_attach)
- dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
- else
- vunmap(addr);
-}
-
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
@@ -188,8 +162,6 @@ static const struct host1x_bo_ops tegra_bo_ops = {
.unpin = tegra_bo_unpin,
.mmap = tegra_bo_mmap,
.munmap = tegra_bo_munmap,
- .kmap = tegra_bo_kmap,
- .kunmap = tegra_bo_kunmap,
};
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
@@ -649,16 +621,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
return 0;
}
-static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
-{
- return NULL;
-}
-
-static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
- void *addr)
-{
-}
-
static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
struct drm_gem_object *gem = buf->priv;
@@ -689,8 +651,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
- .map = tegra_gem_prime_kmap,
- .unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
.vmap = tegra_gem_prime_vmap,
.vunmap = tegra_gem_prime_vunmap,
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 1fc4e56c7cc5..48363f744bb9 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -34,7 +34,7 @@ static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
static int gr2d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr2d *gr2d = to_gr2d(drm);
int err;
@@ -76,7 +76,7 @@ put:
static int gr2d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct gr2d *gr2d = to_gr2d(drm);
int err;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 24fae0f64032..c0a528be0369 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -43,7 +43,7 @@ static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
static int gr3d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr3d *gr3d = to_gr3d(drm);
int err;
@@ -85,7 +85,7 @@ put:
static int gr3d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct gr3d *gr3d = to_gr3d(drm);
int err;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 50269ffbcb6b..6f117628f257 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1146,6 +1146,7 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_hdmi *hdmi = to_hdmi(output);
u32 value;
+ int err;
/*
* The following accesses registers of the display controller, so make
@@ -1171,7 +1172,9 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
- pm_runtime_put(hdmi->dev);
+ err = host1x_client_suspend(&hdmi->client);
+ if (err < 0)
+ dev_err(hdmi->dev, "failed to suspend: %d\n", err);
}
static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -1186,7 +1189,11 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
u32 value;
int err;
- pm_runtime_get_sync(hdmi->dev);
+ err = host1x_client_resume(&hdmi->client);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to resume: %d\n", err);
+ return;
+ }
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
@@ -1424,15 +1431,16 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
static int tegra_hdmi_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct drm_device *drm = dev_get_drvdata(client->host);
int err;
hdmi->output.dev = client->dev;
- drm_connector_init(drm, &hdmi->output.connector,
- &tegra_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm, &hdmi->output.connector,
+ &tegra_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->output.ddc);
drm_connector_helper_add(&hdmi->output.connector,
&tegra_hdmi_connector_helper_funcs);
hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
@@ -1489,9 +1497,66 @@ static int tegra_hdmi_exit(struct host1x_client *client)
return 0;
}
+static int tegra_hdmi_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = reset_control_assert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(hdmi->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_hdmi_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ usleep_range(1000, 2000);
+
+ err = reset_control_deassert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(hdmi->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops hdmi_client_ops = {
.init = tegra_hdmi_init,
.exit = tegra_hdmi_exit,
+ .suspend = tegra_hdmi_runtime_suspend,
+ .resume = tegra_hdmi_runtime_resume,
};
static const struct tegra_hdmi_config tegra20_hdmi_config = {
@@ -1699,58 +1764,10 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_hdmi_suspend(struct device *dev)
-{
- struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
- int err;
-
- err = reset_control_assert(hdmi->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
-
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(hdmi->clk);
-
- return 0;
-}
-
-static int tegra_hdmi_resume(struct device *dev)
-{
- struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
- int err;
-
- err = clk_prepare_enable(hdmi->clk);
- if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- usleep_range(1000, 2000);
-
- err = reset_control_deassert(hdmi->rst);
- if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- clk_disable_unprepare(hdmi->clk);
- return err;
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops tegra_hdmi_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
-};
-
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
.of_match_table = tegra_hdmi_of_match,
- .pm = &tegra_hdmi_pm_ops,
},
.probe = tegra_hdmi_probe,
.remove = tegra_hdmi_remove,
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 47d985ac7cd7..8183e617bf6b 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -95,17 +95,25 @@ static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
{
+ int err = 0;
+
mutex_lock(&wgrp->lock);
if (wgrp->usecount == 0) {
- pm_runtime_get_sync(wgrp->parent);
+ err = host1x_client_resume(wgrp->parent);
+ if (err < 0) {
+ dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
+ goto unlock;
+ }
+
reset_control_deassert(wgrp->rst);
}
wgrp->usecount++;
- mutex_unlock(&wgrp->lock);
- return 0;
+unlock:
+ mutex_unlock(&wgrp->lock);
+ return err;
}
static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
@@ -121,7 +129,7 @@ static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
wgrp->index);
}
- pm_runtime_put(wgrp->parent);
+ host1x_client_suspend(wgrp->parent);
}
wgrp->usecount--;
@@ -379,6 +387,7 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc *dc;
u32 value;
+ int err;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
@@ -386,6 +395,12 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
dc = to_tegra_dc(old_state->crtc);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
/*
* XXX Legacy helpers seem to sometimes call ->atomic_disable() even
* on planes that are already disabled. Make sure we fallback to the
@@ -394,15 +409,13 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
if (WARN_ON(p->dc == NULL))
p->dc = dc;
- pm_runtime_get_sync(dc->dev);
-
value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
value &= ~WIN_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
tegra_dc_remove_shared_plane(dc, p);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
@@ -415,6 +428,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
struct tegra_plane *p = to_tegra_plane(plane);
dma_addr_t base;
u32 value;
+ int err;
/* rien ne va plus */
if (!plane->state->crtc || !plane->state->fb)
@@ -425,7 +439,11 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
return;
}
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
tegra_dc_assign_shared_plane(dc, p);
@@ -515,7 +533,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
value &= ~CONTROL_CSC_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
@@ -551,7 +569,7 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
plane->base.index = index;
plane->wgrp = &hub->wgrps[wgrp];
- plane->wgrp->parent = dc->dev;
+ plane->wgrp->parent = &dc->client;
p = &plane->base.base;
@@ -656,8 +674,13 @@ int tegra_display_hub_atomic_check(struct drm_device *drm,
static void tegra_display_hub_update(struct tegra_dc *dc)
{
u32 value;
+ int err;
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
value &= ~LATENCY_EVENT;
@@ -672,7 +695,7 @@ static void tegra_display_hub_update(struct tegra_dc *dc)
tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
void tegra_display_hub_atomic_commit(struct drm_device *drm,
@@ -705,7 +728,7 @@ void tegra_display_hub_atomic_commit(struct drm_device *drm,
static int tegra_display_hub_init(struct host1x_client *client)
{
struct tegra_display_hub *hub = to_tegra_display_hub(client);
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub_state *state;
@@ -723,7 +746,7 @@ static int tegra_display_hub_init(struct host1x_client *client)
static int tegra_display_hub_exit(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
drm_atomic_private_obj_fini(&tegra->hub->base);
@@ -732,9 +755,85 @@ static int tegra_display_hub_exit(struct host1x_client *client)
return 0;
}
+static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct device *dev = client->dev;
+ unsigned int i = hub->num_heads;
+ int err;
+
+ err = reset_control_assert(hub->rst);
+ if (err < 0)
+ return err;
+
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
+ clk_disable_unprepare(hub->clk_hub);
+ clk_disable_unprepare(hub->clk_dsc);
+ clk_disable_unprepare(hub->clk_disp);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_display_hub_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct device *dev = client->dev;
+ unsigned int i;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hub->clk_disp);
+ if (err < 0)
+ goto put_rpm;
+
+ err = clk_prepare_enable(hub->clk_dsc);
+ if (err < 0)
+ goto disable_disp;
+
+ err = clk_prepare_enable(hub->clk_hub);
+ if (err < 0)
+ goto disable_dsc;
+
+ for (i = 0; i < hub->num_heads; i++) {
+ err = clk_prepare_enable(hub->clk_heads[i]);
+ if (err < 0)
+ goto disable_heads;
+ }
+
+ err = reset_control_deassert(hub->rst);
+ if (err < 0)
+ goto disable_heads;
+
+ return 0;
+
+disable_heads:
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
+ clk_disable_unprepare(hub->clk_hub);
+disable_dsc:
+ clk_disable_unprepare(hub->clk_dsc);
+disable_disp:
+ clk_disable_unprepare(hub->clk_disp);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops tegra_display_hub_ops = {
.init = tegra_display_hub_init,
.exit = tegra_display_hub_exit,
+ .suspend = tegra_display_hub_runtime_suspend,
+ .resume = tegra_display_hub_runtime_resume,
};
static int tegra_display_hub_probe(struct platform_device *pdev)
@@ -851,6 +950,7 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
static int tegra_display_hub_remove(struct platform_device *pdev)
{
struct tegra_display_hub *hub = platform_get_drvdata(pdev);
+ unsigned int i;
int err;
err = host1x_client_unregister(&hub->client);
@@ -859,78 +959,17 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
err);
}
- pm_runtime_disable(&pdev->dev);
-
- return err;
-}
-
-static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
-{
- struct tegra_display_hub *hub = dev_get_drvdata(dev);
- unsigned int i = hub->num_heads;
- int err;
-
- err = reset_control_assert(hub->rst);
- if (err < 0)
- return err;
-
- while (i--)
- clk_disable_unprepare(hub->clk_heads[i]);
-
- clk_disable_unprepare(hub->clk_hub);
- clk_disable_unprepare(hub->clk_dsc);
- clk_disable_unprepare(hub->clk_disp);
-
- return 0;
-}
-
-static int __maybe_unused tegra_display_hub_resume(struct device *dev)
-{
- struct tegra_display_hub *hub = dev_get_drvdata(dev);
- unsigned int i;
- int err;
-
- err = clk_prepare_enable(hub->clk_disp);
- if (err < 0)
- return err;
-
- err = clk_prepare_enable(hub->clk_dsc);
- if (err < 0)
- goto disable_disp;
-
- err = clk_prepare_enable(hub->clk_hub);
- if (err < 0)
- goto disable_dsc;
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- for (i = 0; i < hub->num_heads; i++) {
- err = clk_prepare_enable(hub->clk_heads[i]);
- if (err < 0)
- goto disable_heads;
+ mutex_destroy(&wgrp->lock);
}
- err = reset_control_deassert(hub->rst);
- if (err < 0)
- goto disable_heads;
-
- return 0;
-
-disable_heads:
- while (i--)
- clk_disable_unprepare(hub->clk_heads[i]);
+ pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(hub->clk_hub);
-disable_dsc:
- clk_disable_unprepare(hub->clk_dsc);
-disable_disp:
- clk_disable_unprepare(hub->clk_disp);
return err;
}
-static const struct dev_pm_ops tegra_display_hub_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_display_hub_suspend,
- tegra_display_hub_resume, NULL)
-};
-
static const struct tegra_display_hub_soc tegra186_display_hub = {
.num_wgrps = 6,
.supports_dsc = true,
@@ -958,7 +997,6 @@ struct platform_driver tegra_display_hub_driver = {
.driver = {
.name = "tegra-display-hub",
.of_match_table = tegra_display_hub_of_match,
- .pm = &tegra_display_hub_pm_ops,
},
.probe = tegra_display_hub_probe,
.remove = tegra_display_hub_remove,
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 767a60d9313c..3efa1be07ff8 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -17,7 +17,7 @@ struct tegra_windowgroup {
struct mutex lock;
unsigned int index;
- struct device *parent;
+ struct host1x_client *parent;
struct reset_control *rst;
};
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 34373734ff68..a264259b97a2 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -23,7 +23,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
* ignore any other means of obtaining a mode.
*/
if (output->panel) {
- err = output->panel->funcs->get_modes(output->panel);
+ err = drm_panel_get_modes(output->panel, connector);
if (err > 0)
return err;
}
@@ -250,3 +250,19 @@ void tegra_output_find_possible_crtcs(struct tegra_output *output,
output->encoder.possible_crtcs = mask;
}
+
+int tegra_output_suspend(struct tegra_output *output)
+{
+ if (output->hpd_irq)
+ disable_irq(output->hpd_irq);
+
+ return 0;
+}
+
+int tegra_output_resume(struct tegra_output *output)
+{
+ if (output->hpd_irq)
+ enable_irq(output->hpd_irq);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index a68d3b36b972..41d24949478e 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2255,7 +2255,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
- pm_runtime_put(sor->dev);
+ host1x_client_suspend(&sor->client);
}
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -2276,7 +2276,11 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
mode = &encoder->crtc->state->adjusted_mode;
pclk = mode->clock * 1000;
- pm_runtime_get_sync(sor->dev);
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ return;
+ }
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
@@ -2722,7 +2726,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder)
if (output->panel)
drm_panel_unprepare(output->panel);
- pm_runtime_put(sor->dev);
+ host1x_client_suspend(&sor->client);
}
static void tegra_sor_dp_enable(struct drm_encoder *encoder)
@@ -2742,7 +2746,11 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder)
mode = &encoder->crtc->state->adjusted_mode;
info = &output->connector.display_info;
- pm_runtime_get_sync(sor->dev);
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ return;
+ }
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
@@ -3053,7 +3061,7 @@ static const struct tegra_sor_ops tegra_sor_dp_ops = {
static int tegra_sor_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
const struct drm_encoder_helper_funcs *helpers = NULL;
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
@@ -3086,9 +3094,10 @@ static int tegra_sor_init(struct host1x_client *client)
sor->output.dev = sor->dev;
- drm_connector_init(drm, &sor->output.connector,
- &tegra_sor_connector_funcs,
- connector);
+ drm_connector_init_with_ddc(drm, &sor->output.connector,
+ &tegra_sor_connector_funcs,
+ connector,
+ sor->output.ddc);
drm_connector_helper_add(&sor->output.connector,
&tegra_sor_connector_helper_funcs);
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
@@ -3189,9 +3198,80 @@ static int tegra_sor_exit(struct host1x_client *client)
return 0;
}
+static int tegra_sor_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ struct device *dev = client->dev;
+ int err;
+
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ reset_control_release(sor->rst);
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(sor->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_sor_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto release_reset;
+ }
+ }
+
+ return 0;
+
+release_reset:
+ reset_control_release(sor->rst);
+disable_clk:
+ clk_disable_unprepare(sor->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops sor_client_ops = {
.init = tegra_sor_init,
.exit = tegra_sor_exit,
+ .suspend = tegra_sor_runtime_suspend,
+ .resume = tegra_sor_runtime_resume,
};
static const u8 tegra124_sor_xbar_cfg[5] = {
@@ -3842,10 +3922,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->clk_pad) {
char *name;
- err = pm_runtime_get_sync(&pdev->dev);
+ err = host1x_client_resume(&sor->client);
if (err < 0) {
- dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
- err);
+ dev_err(sor->dev, "failed to resume: %d\n", err);
goto remove;
}
@@ -3856,7 +3935,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
}
sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
- pm_runtime_put(&pdev->dev);
+ host1x_client_suspend(&sor->client);
}
if (IS_ERR(sor->clk_pad)) {
@@ -3912,54 +3991,21 @@ static int tegra_sor_remove(struct platform_device *pdev)
return 0;
}
-static int tegra_sor_runtime_suspend(struct device *dev)
-{
- struct tegra_sor *sor = dev_get_drvdata(dev);
- int err;
-
- if (sor->rst) {
- err = reset_control_assert(sor->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
-
- reset_control_release(sor->rst);
- }
-
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(sor->clk);
-
- return 0;
-}
-
-static int tegra_sor_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_sor_suspend(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
- err = clk_prepare_enable(sor->clk);
+ err = tegra_output_suspend(&sor->output);
if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
+ dev_err(dev, "failed to suspend output: %d\n", err);
return err;
}
- usleep_range(1000, 2000);
-
- if (sor->rst) {
- err = reset_control_acquire(sor->rst);
- if (err < 0) {
- dev_err(dev, "failed to acquire reset: %d\n", err);
- clk_disable_unprepare(sor->clk);
- return err;
- }
-
- err = reset_control_deassert(sor->rst);
+ if (sor->hdmi_supply) {
+ err = regulator_disable(sor->hdmi_supply);
if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- reset_control_release(sor->rst);
- clk_disable_unprepare(sor->clk);
+ tegra_output_resume(&sor->output);
return err;
}
}
@@ -3967,37 +4013,31 @@ static int tegra_sor_runtime_resume(struct device *dev)
return 0;
}
-static int tegra_sor_suspend(struct device *dev)
+static int __maybe_unused tegra_sor_resume(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
if (sor->hdmi_supply) {
- err = regulator_disable(sor->hdmi_supply);
+ err = regulator_enable(sor->hdmi_supply);
if (err < 0)
return err;
}
- return 0;
-}
+ err = tegra_output_resume(&sor->output);
+ if (err < 0) {
+ dev_err(dev, "failed to resume output: %d\n", err);
-static int tegra_sor_resume(struct device *dev)
-{
- struct tegra_sor *sor = dev_get_drvdata(dev);
- int err;
+ if (sor->hdmi_supply)
+ regulator_disable(sor->hdmi_supply);
- if (sor->hdmi_supply) {
- err = regulator_enable(sor->hdmi_supply);
- if (err < 0)
- return err;
+ return err;
}
return 0;
}
static const struct dev_pm_ops tegra_sor_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_sor_runtime_suspend, tegra_sor_runtime_resume,
- NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume)
};
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 3526c2892ddb..ade56b860cf9 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -161,7 +161,7 @@ static int vic_boot(struct vic *vic)
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
@@ -190,9 +190,9 @@ static int vic_init(struct host1x_client *client)
/*
* Inherit the DMA parameters (such as maximum segment size) from the
- * parent device.
+ * parent host1x device.
*/
- client->dev->dma_parms = client->parent->dma_parms;
+ client->dev->dma_parms = client->host->dma_parms;
return 0;
@@ -209,7 +209,7 @@ detach:
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index 87f9480e43b0..662bf3a348c9 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -6,7 +6,6 @@ endif
tilcdc-y := \
tilcdc_plane.o \
tilcdc_crtc.o \
- tilcdc_tfp410.o \
tilcdc_panel.o \
tilcdc_external.o \
tilcdc_drv.o
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 2a9e67597375..0791a0200cc3 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -30,7 +30,6 @@
#include "tilcdc_external.h"
#include "tilcdc_panel.h"
#include "tilcdc_regs.h"
-#include "tilcdc_tfp410.h"
static LIST_HEAD(module_list);
@@ -64,12 +63,6 @@ void tilcdc_module_cleanup(struct tilcdc_module *mod)
static struct of_device_id tilcdc_of_match[];
-static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
-}
-
static int tilcdc_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -140,7 +133,7 @@ static int tilcdc_commit(struct drm_device *dev,
}
static const struct drm_mode_config_funcs mode_config_funcs = {
- .fb_create = tilcdc_fb_create,
+ .fb_create = drm_gem_fb_create,
.atomic_check = tilcdc_atomic_check,
.atomic_commit = tilcdc_commit,
};
@@ -256,7 +249,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
- priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ priv->mmio = ioremap(res->start, resource_size(res));
if (!priv->mmio) {
dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
@@ -649,7 +642,6 @@ static struct platform_driver tilcdc_platform_driver = {
static int __init tilcdc_drm_init(void)
{
DBG("init");
- tilcdc_tfp410_init();
tilcdc_panel_init();
return platform_driver_register(&tilcdc_platform_driver);
}
@@ -659,7 +651,6 @@ static void __exit tilcdc_drm_fini(void)
DBG("fini");
platform_driver_unregister(&tilcdc_platform_driver);
tilcdc_panel_fini();
- tilcdc_tfp410_fini();
}
module_init(tilcdc_drm_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
deleted file mode 100644
index 530edb3b51cc..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <robdclark@gmail.com>
- */
-
-#include <linux/gpio.h>
-#include <linux/mod_devicetable.h>
-#include <linux/of_gpio.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/platform_device.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_probe_helper.h>
-
-#include "tilcdc_drv.h"
-#include "tilcdc_tfp410.h"
-
-struct tfp410_module {
- struct tilcdc_module base;
- struct i2c_adapter *i2c;
- int gpio;
-};
-#define to_tfp410_module(x) container_of(x, struct tfp410_module, base)
-
-
-static const struct tilcdc_panel_info dvi_info = {
- .ac_bias = 255,
- .ac_bias_intrpt = 0,
- .dma_burst_sz = 16,
- .bpp = 16,
- .fdd = 0x80,
- .tft_alt_mode = 0,
- .sync_edge = 0,
- .sync_ctrl = 1,
- .raster_order = 0,
-};
-
-/*
- * Encoder:
- */
-
-struct tfp410_encoder {
- struct drm_encoder base;
- struct tfp410_module *mod;
- int dpms;
-};
-#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
-
-static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
-
- if (tfp410_encoder->dpms == mode)
- return;
-
- if (mode == DRM_MODE_DPMS_ON) {
- DBG("Power on");
- gpio_direction_output(tfp410_encoder->mod->gpio, 1);
- } else {
- DBG("Power off");
- gpio_direction_output(tfp410_encoder->mod->gpio, 0);
- }
-
- tfp410_encoder->dpms = mode;
-}
-
-static void tfp410_encoder_prepare(struct drm_encoder *encoder)
-{
- tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tfp410_encoder_commit(struct drm_encoder *encoder)
-{
- tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* nothing needed */
-}
-
-static const struct drm_encoder_funcs tfp410_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
- .dpms = tfp410_encoder_dpms,
- .prepare = tfp410_encoder_prepare,
- .commit = tfp410_encoder_commit,
- .mode_set = tfp410_encoder_mode_set,
-};
-
-static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
- struct tfp410_module *mod)
-{
- struct tfp410_encoder *tfp410_encoder;
- struct drm_encoder *encoder;
- int ret;
-
- tfp410_encoder = devm_kzalloc(dev->dev, sizeof(*tfp410_encoder),
- GFP_KERNEL);
- if (!tfp410_encoder)
- return NULL;
-
- tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
- tfp410_encoder->mod = mod;
-
- encoder = &tfp410_encoder->base;
- encoder->possible_crtcs = 1;
-
- ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret < 0)
- goto fail;
-
- drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs);
-
- return encoder;
-
-fail:
- drm_encoder_cleanup(encoder);
- return NULL;
-}
-
-/*
- * Connector:
- */
-
-struct tfp410_connector {
- struct drm_connector base;
-
- struct drm_encoder *encoder; /* our connected encoder */
- struct tfp410_module *mod;
-};
-#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base)
-
-
-static void tfp410_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static enum drm_connector_status tfp410_connector_detect(
- struct drm_connector *connector,
- bool force)
-{
- struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
-
- if (drm_probe_ddc(tfp410_connector->mod->i2c))
- return connector_status_connected;
-
- return connector_status_unknown;
-}
-
-static int tfp410_connector_get_modes(struct drm_connector *connector)
-{
- struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
- struct edid *edid;
- int ret = 0;
-
- edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
-
- drm_connector_update_edid_property(connector, edid);
-
- if (edid) {
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
-
- return ret;
-}
-
-static struct drm_encoder *tfp410_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
- return tfp410_connector->encoder;
-}
-
-static const struct drm_connector_funcs tfp410_connector_funcs = {
- .destroy = tfp410_connector_destroy,
- .detect = tfp410_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
- .get_modes = tfp410_connector_get_modes,
- .best_encoder = tfp410_connector_best_encoder,
-};
-
-static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
- struct tfp410_module *mod, struct drm_encoder *encoder)
-{
- struct tfp410_connector *tfp410_connector;
- struct drm_connector *connector;
- int ret;
-
- tfp410_connector = devm_kzalloc(dev->dev, sizeof(*tfp410_connector),
- GFP_KERNEL);
- if (!tfp410_connector)
- return NULL;
-
- tfp410_connector->encoder = encoder;
- tfp410_connector->mod = mod;
-
- connector = &tfp410_connector->base;
-
- drm_connector_init(dev, connector, &tfp410_connector_funcs,
- DRM_MODE_CONNECTOR_DVID);
- drm_connector_helper_add(connector, &tfp410_connector_helper_funcs);
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- goto fail;
-
- return connector;
-
-fail:
- tfp410_connector_destroy(connector);
- return NULL;
-}
-
-/*
- * Module:
- */
-
-static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
-{
- struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
- struct tilcdc_drm_private *priv = dev->dev_private;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
- encoder = tfp410_encoder_create(dev, tfp410_mod);
- if (!encoder)
- return -ENOMEM;
-
- connector = tfp410_connector_create(dev, tfp410_mod, encoder);
- if (!connector)
- return -ENOMEM;
-
- priv->encoders[priv->num_encoders++] = encoder;
- priv->connectors[priv->num_connectors++] = connector;
-
- tilcdc_crtc_set_panel_info(priv->crtc, &dvi_info);
- return 0;
-}
-
-static const struct tilcdc_module_ops tfp410_module_ops = {
- .modeset_init = tfp410_modeset_init,
-};
-
-/*
- * Device:
- */
-
-static int tfp410_probe(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- struct device_node *i2c_node;
- struct tfp410_module *tfp410_mod;
- struct tilcdc_module *mod;
- struct pinctrl *pinctrl;
- uint32_t i2c_phandle;
- int ret = -EINVAL;
-
- /* bail out early if no DT data: */
- if (!node) {
- dev_err(&pdev->dev, "device-tree data is missing\n");
- return -ENXIO;
- }
-
- tfp410_mod = devm_kzalloc(&pdev->dev, sizeof(*tfp410_mod), GFP_KERNEL);
- if (!tfp410_mod)
- return -ENOMEM;
-
- mod = &tfp410_mod->base;
- pdev->dev.platform_data = mod;
-
- tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev, "pins are not configured\n");
-
- if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
- dev_err(&pdev->dev, "could not get i2c bus phandle\n");
- goto fail;
- }
-
- i2c_node = of_find_node_by_phandle(i2c_phandle);
- if (!i2c_node) {
- dev_err(&pdev->dev, "could not get i2c bus node\n");
- goto fail;
- }
-
- tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
- if (!tfp410_mod->i2c) {
- dev_err(&pdev->dev, "could not get i2c\n");
- of_node_put(i2c_node);
- goto fail;
- }
-
- of_node_put(i2c_node);
-
- tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
- 0, NULL);
- if (tfp410_mod->gpio < 0) {
- dev_warn(&pdev->dev, "No power down GPIO\n");
- } else {
- ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
- if (ret) {
- dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
- goto fail_adapter;
- }
- }
-
- return 0;
-
-fail_adapter:
- i2c_put_adapter(tfp410_mod->i2c);
-
-fail:
- tilcdc_module_cleanup(mod);
- return ret;
-}
-
-static int tfp410_remove(struct platform_device *pdev)
-{
- struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
- struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
-
- i2c_put_adapter(tfp410_mod->i2c);
- gpio_free(tfp410_mod->gpio);
-
- tilcdc_module_cleanup(mod);
-
- return 0;
-}
-
-static const struct of_device_id tfp410_of_match[] = {
- { .compatible = "ti,tilcdc,tfp410", },
- { },
-};
-
-struct platform_driver tfp410_driver = {
- .probe = tfp410_probe,
- .remove = tfp410_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "tfp410",
- .of_match_table = tfp410_of_match,
- },
-};
-
-int __init tilcdc_tfp410_init(void)
-{
- return platform_driver_register(&tfp410_driver);
-}
-
-void __exit tilcdc_tfp410_fini(void)
-{
- platform_driver_unregister(&tfp410_driver);
-}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
deleted file mode 100644
index f9aaf6911ffc..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <robdclark@gmail.com>
- */
-
-#ifndef __TILCDC_TFP410_H__
-#define __TILCDC_TFP410_H__
-
-/* sub-module for tfp410 dvi adaptor */
-
-int tilcdc_tfp410_init(void);
-void tilcdc_tfp410_fini(void);
-
-#endif /* __TILCDC_TFP410_H__ */
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 3cc21a1b30c8..060cc756194f 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -240,7 +240,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
mipi_dbi_command(dbi, ST7586_SET_DISP_DUTY, 0x7f);
mipi_dbi_command(dbi, ST7586_SET_PART_DISP, 0xa0);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PARTIAL_ROWS, 0x00, 0x00, 0x00, 0x77);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_INVERT_MODE);
msleep(100);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d91b0428af1..5df596fb0280 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -161,7 +161,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
- mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
@@ -1299,7 +1298,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
- mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->type = type;
bo->num_pages = num_pages;
@@ -1903,37 +1901,3 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
-
-/**
- * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
- * unreserved
- *
- * @bo: Pointer to buffer
- */
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
-{
- int ret;
-
- /*
- * In the absense of a wait_unlocked API,
- * Use the bo::wu_mutex to avoid triggering livelocks due to
- * concurrent use of this function. Note that this use of
- * bo::wu_mutex can go away if we change locking order to
- * mmap_sem -> bo::reserve.
- */
- ret = mutex_lock_interruptible(&bo->wu_mutex);
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
- if (!dma_resv_is_locked(bo->base.resv))
- goto out_unlock;
- ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- if (unlikely(ret != 0))
- goto out_unlock;
- dma_resv_unlock(bo->base.resv);
-
-out_unlock:
- mutex_unlock(&bo->wu_mutex);
- return ret;
-}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 6b0883a1776e..49ed55779128 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -218,7 +218,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
@@ -504,7 +504,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
- mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
@@ -565,7 +564,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 11863fbdd5d6..eebb4c06c04d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -139,19 +139,17 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait_unreserved(bo);
+ if (!dma_resv_lock_interruptible(bo->base.resv,
+ NULL))
+ dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
}
return VM_FAULT_RETRY;
}
- /*
- * If we'd want to change locking order to
- * mmap_sem -> bo::reserve, we'd use a blocking reserve here
- * instead of retrying the fault...
- */
- return VM_FAULT_NOPAGE;
+ if (dma_resv_lock_interruptible(bo->base.resv, NULL))
+ return VM_FAULT_NOPAGE;
}
return 0;
@@ -316,7 +314,7 @@ out_io_unlock:
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgprot_t prot;
@@ -336,6 +334,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
return ret;
}
+EXPORT_SYMBOL(ttm_bo_vm_fault);
void ttm_bo_vm_open(struct vm_area_struct *vma)
{
@@ -395,8 +394,8 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
return len;
}
-static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write)
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
{
unsigned long offset = (addr) - vma->vm_start;
struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -432,6 +431,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
+EXPORT_SYMBOL(ttm_bo_vm_access);
static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
@@ -520,13 +520,6 @@ EXPORT_SYMBOL(ttm_bo_mmap);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
ttm_bo_get(bo);
-
- /*
- * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset
- * removed. Add it back here until the rest of TTM works without it.
- */
- vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node);
-
ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e0e9b4f69db6..2ec448e1d663 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -223,8 +223,9 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm->func->destroy(ttm);
}
-void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+static void ttm_tt_init_fields(struct ttm_tt *ttm,
+ struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
ttm->bdev = bo->bdev;
ttm->num_pages = bo->num_pages;
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 954b09c948eb..00ba9e5ce130 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -110,7 +110,7 @@ static int tve200_modeset_init(struct drm_device *dev)
}
priv->panel = panel;
- priv->connector = panel->connector;
+ priv->connector = drm_panel_bridge_connector(bridge);
priv->bridge = bridge;
dev_info(dev->dev, "attached to panel %s\n",
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index b4d179b87f01..1f497d8f1ae5 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -2,10 +2,10 @@
config DRM_UDL
tristate "DisplayLink"
depends on DRM
- depends on USB_SUPPORT
+ depends on USB
depends on USB_ARCH_HAS_HCD
- select USB
+ select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
- Say M/Y to add support for these devices via drm/kms interfaces.
+ Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index e5bb6f757e11..b50179bb4de0 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_main.o udl_transfer.o udl_gem.o
obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b4ae3e89a7b4..e9671d38b4a0 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -7,6 +7,7 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
@@ -90,13 +91,6 @@ udl_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static int udl_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
static void udl_connector_destroy(struct drm_connector *connector)
{
struct udl_drm_connector *udl_connector =
@@ -104,7 +98,6 @@ static void udl_connector_destroy(struct drm_connector *connector)
struct udl_drm_connector,
connector);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(udl_connector->edid);
kfree(connector);
@@ -117,30 +110,30 @@ static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
static const struct drm_connector_funcs udl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = udl_connector_destroy,
- .set_property = udl_connector_set_property,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
+struct drm_connector *udl_connector_init(struct drm_device *dev)
{
struct udl_drm_connector *udl_connector;
struct drm_connector *connector;
udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
if (!udl_connector)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
connector = &udl_connector->connector;
drm_connector_init(dev, connector, &udl_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
- drm_connector_register(connector);
- drm_connector_attach_encoder(connector, encoder);
connector->polled = DRM_CONNECTOR_POLL_HPD |
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
- return 0;
+ return connector;
}
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
deleted file mode 100644
index 3108e9a9234b..000000000000
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * udl_dmabuf.c
- *
- * Copyright (c) 2014 The Chromium OS Authors
- */
-
-#include <linux/shmem_fs.h>
-#include <linux/dma-buf.h>
-
-#include <drm/drm_prime.h>
-
-#include "udl_drv.h"
-
-struct udl_drm_dmabuf_attachment {
- struct sg_table sgt;
- enum dma_data_direction dir;
- bool is_mapped;
-};
-
-static int udl_attach_dma_buf(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct udl_drm_dmabuf_attachment *udl_attach;
-
- DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
- attach->dmabuf->size);
-
- udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
- if (!udl_attach)
- return -ENOMEM;
-
- udl_attach->dir = DMA_NONE;
- attach->priv = udl_attach;
-
- return 0;
-}
-
-static void udl_detach_dma_buf(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
- struct sg_table *sgt;
-
- if (!udl_attach)
- return;
-
- DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
- attach->dmabuf->size);
-
- sgt = &udl_attach->sgt;
-
- if (udl_attach->dir != DMA_NONE)
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
- udl_attach->dir);
-
- sg_free_table(sgt);
- kfree(udl_attach);
- attach->priv = NULL;
-}
-
-static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
-{
- struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
- struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
- struct drm_device *dev = obj->base.dev;
- struct udl_device *udl = dev->dev_private;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt = NULL;
- unsigned int i;
- int page_count;
- int nents, ret;
-
- DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
- attach->dmabuf->size, dir);
-
- /* just return current sgt if already requested. */
- if (udl_attach->dir == dir && udl_attach->is_mapped)
- return &udl_attach->sgt;
-
- if (!obj->pages) {
- ret = udl_gem_get_pages(obj);
- if (ret) {
- DRM_ERROR("failed to map pages.\n");
- return ERR_PTR(ret);
- }
- }
-
- page_count = obj->base.size / PAGE_SIZE;
- obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
- if (IS_ERR(obj->sg)) {
- DRM_ERROR("failed to allocate sgt.\n");
- return ERR_CAST(obj->sg);
- }
-
- sgt = &udl_attach->sgt;
-
- ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
- if (ret) {
- DRM_ERROR("failed to alloc sgt.\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mutex_lock(&udl->gem_lock);
-
- rd = obj->sg->sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
- }
-
- if (dir != DMA_NONE) {
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
- if (!nents) {
- DRM_ERROR("failed to map sgl with iommu.\n");
- sg_free_table(sgt);
- sgt = ERR_PTR(-EIO);
- goto err_unlock;
- }
- }
-
- udl_attach->is_mapped = true;
- udl_attach->dir = dir;
- attach->priv = udl_attach;
-
-err_unlock:
- mutex_unlock(&udl->gem_lock);
- return sgt;
-}
-
-static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- /* Nothing to do. */
- DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
- attach->dmabuf->size, dir);
-}
-
-static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
-static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
- /* TODO */
-}
-
-static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
-{
- /* TODO */
-
- return -EINVAL;
-}
-
-static const struct dma_buf_ops udl_dmabuf_ops = {
- .attach = udl_attach_dma_buf,
- .detach = udl_detach_dma_buf,
- .map_dma_buf = udl_map_dma_buf,
- .unmap_dma_buf = udl_unmap_dma_buf,
- .map = udl_dmabuf_kmap,
- .unmap = udl_dmabuf_kunmap,
- .mmap = udl_dmabuf_mmap,
- .release = drm_gem_dmabuf_release,
-};
-
-struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags)
-{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &udl_dmabuf_ops;
- exp_info.size = obj->size;
- exp_info.flags = flags;
- exp_info.priv = obj;
-
- return drm_gem_dmabuf_export(obj->dev, &exp_info);
-}
-
-static int udl_prime_create(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct udl_gem_object **obj_p)
-{
- struct udl_gem_object *obj;
- int npages;
-
- npages = size / PAGE_SIZE;
-
- *obj_p = NULL;
- obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
- if (!obj)
- return -ENOMEM;
-
- obj->sg = sg;
- obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (obj->pages == NULL) {
- DRM_ERROR("obj pages is NULL %d\n", npages);
- return -ENOMEM;
- }
-
- drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
- *obj_p = obj;
- return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct udl_gem_object *uobj;
- int ret;
-
- /* need to attach */
- get_device(dev->dev);
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach)) {
- put_device(dev->dev);
- return ERR_CAST(attach);
- }
-
- get_dma_buf(dma_buf);
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
- if (ret)
- goto fail_unmap;
-
- uobj->base.import_attach = attach;
- uobj->flags = UDL_BO_WC;
-
- return &uobj->base;
-
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- dma_buf_put(dma_buf);
- put_device(dev->dev);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 8426669433e4..e6c1cd77d4d4 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -7,7 +7,9 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
@@ -19,36 +21,17 @@ static int udl_usb_suspend(struct usb_interface *interface,
{
struct drm_device *dev = usb_get_intfdata(interface);
- drm_kms_helper_poll_disable(dev);
- return 0;
+ return drm_mode_config_helper_suspend(dev);
}
static int udl_usb_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- drm_kms_helper_poll_enable(dev);
- udl_modeset_restore(dev);
- return 0;
+ return drm_mode_config_helper_resume(dev);
}
-static const struct vm_operations_struct udl_gem_vm_ops = {
- .fault = udl_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-static const struct file_operations udl_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = udl_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static void udl_driver_release(struct drm_device *dev)
{
@@ -59,21 +42,14 @@ static void udl_driver_release(struct drm_device *dev)
}
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
.release = udl_driver_release,
/* gem hooks */
- .gem_free_object_unlocked = udl_gem_free_object,
- .gem_vm_ops = &udl_gem_vm_ops,
+ .gem_create_object = udl_driver_gem_create_object,
- .dumb_create = udl_dumb_create,
- .dumb_map_offset = udl_gem_mmap,
.fops = &udl_driver_fops,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = udl_gem_prime_export,
- .gem_prime_import = udl_gem_prime_import,
+ DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -129,8 +105,14 @@ static int udl_usb_probe(struct usb_interface *interface,
DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
+ r = drm_fbdev_generic_setup(&udl->drm, 0);
+ if (r)
+ goto err_drm_dev_unregister;
+
return 0;
+err_drm_dev_unregister:
+ drm_dev_unregister(&udl->drm);
err_free:
drm_dev_put(&udl->drm);
return r;
@@ -141,7 +123,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_disable(dev);
- udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
drm_dev_put(dev);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 12a970fd9a87..e67227c44cc4 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -17,8 +17,8 @@
#include <drm/drm_device.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_simple_kms_helper.h>
-struct drm_encoder;
struct drm_mode_create_dumb;
#define DRIVER_NAME "udl"
@@ -29,9 +29,6 @@ struct drm_mode_create_dumb;
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 1
-#define UDL_BO_CACHEABLE (1 << 0)
-#define UDL_BO_WC (1 << 1)
-
struct udl_device;
struct urb_node {
@@ -50,57 +47,29 @@ struct urb_list {
size_t size;
};
-struct udl_fbdev;
-
struct udl_device {
struct drm_device drm;
struct device *dev;
struct usb_device *udev;
- struct drm_crtc *crtc;
+
+ struct drm_simple_display_pipe display_pipe;
struct mutex gem_lock;
int sku_pixel_limit;
struct urb_list urbs;
- atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
- struct udl_fbdev *fbdev;
char mode_buf[1024];
uint32_t mode_buf_len;
- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
- atomic_t bytes_sent; /* to usb, after compression including overhead */
- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
};
#define to_udl(x) container_of(x, struct udl_device, drm)
-struct udl_gem_object {
- struct drm_gem_object base;
- struct page **pages;
- void *vmapping;
- struct sg_table *sg;
- unsigned int flags;
-};
-
-#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
-
-struct udl_framebuffer {
- struct drm_framebuffer base;
- struct udl_gem_object *obj;
- bool active_16; /* active on the 16-bit channel */
-};
-
-#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
-
/* modeset */
int udl_modeset_init(struct drm_device *dev);
-void udl_modeset_restore(struct drm_device *dev);
void udl_modeset_cleanup(struct drm_device *dev);
-int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
-
-struct drm_encoder *udl_encoder_init(struct drm_device *dev);
+struct drm_connector *udl_connector_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
@@ -110,41 +79,12 @@ void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
void udl_fini(struct drm_device *dev);
-int udl_fbdev_init(struct drm_device *dev);
-void udl_fbdev_cleanup(struct drm_device *dev);
-void udl_fbdev_unplug(struct drm_device *dev);
-struct drm_framebuffer *
-udl_fb_user_fb_create(struct drm_device *dev,
- struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd);
-
int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
- u32 byte_offset, u32 device_byte_offset, u32 byte_width,
- int *ident_ptr, int *sent_ptr);
-
-int udl_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
-
-void udl_gem_free_object(struct drm_gem_object *gem_obj);
-struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
- size_t size);
-struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags);
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
-
-int udl_gem_get_pages(struct udl_gem_object *obj);
-void udl_gem_put_pages(struct udl_gem_object *obj);
-int udl_gem_vmap(struct udl_gem_object *obj);
-void udl_gem_vunmap(struct udl_gem_object *obj);
-int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t udl_gem_fault(struct vm_fault *vmf);
-
-int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
- int width, int height);
+ u32 byte_offset, u32 device_byte_offset, u32 byte_width);
+
+struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
+ size_t size);
int udl_drop_usb(struct drm_device *dev);
@@ -158,4 +98,13 @@ int udl_drop_usb(struct drm_device *dev);
#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
#define CMD_WRITE_RLX16 "\xAF\x6B" /**< 16 bit extended run length command. */
+/* On/Off for driving the DisplayLink framebuffer to the display */
+#define UDL_REG_BLANK_MODE 0x1f
+
+#define UDL_BLANK_MODE_ON 0x00 /* hsync and vsync on, visible */
+#define UDL_BLANK_MODE_BLANKED 0x01 /* hsync and vsync on, blanked */
+#define UDL_BLANK_MODE_VSYNC_OFF 0x03 /* vsync off, blanked */
+#define UDL_BLANK_MODE_HSYNC_OFF 0x05 /* hsync off, blanked */
+#define UDL_BLANK_MODE_POWERDOWN 0x07 /* powered off; requires modeset */
+
#endif
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
deleted file mode 100644
index 203f041e737c..000000000000
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Red Hat
- * based in parts on udlfb.c:
- * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
- * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
- * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
- */
-
-#include <drm/drm_encoder.h>
-#include <drm/drm_modeset_helper_vtables.h>
-
-#include "udl_drv.h"
-
-/* dummy encoder */
-static void udl_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static void udl_encoder_disable(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void udl_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void
-udl_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static const struct drm_encoder_helper_funcs udl_helper_funcs = {
- .dpms = udl_encoder_dpms,
- .prepare = udl_encoder_prepare,
- .mode_set = udl_encoder_mode_set,
- .commit = udl_encoder_commit,
- .disable = udl_encoder_disable,
-};
-
-static const struct drm_encoder_funcs udl_enc_funcs = {
- .destroy = udl_enc_destroy,
-};
-
-struct drm_encoder *udl_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
-
- encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
- if (!encoder)
- return NULL;
-
- drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS,
- NULL);
- drm_encoder_helper_add(encoder, &udl_helper_funcs);
- encoder->possible_crtcs = 1;
- return encoder;
-}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
deleted file mode 100644
index ef3504d06343..000000000000
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ /dev/null
@@ -1,527 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Red Hat
- *
- * based in parts on udlfb.c:
- * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
- * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
- * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
- */
-
-#include <linux/moduleparam.h>
-#include <linux/dma-buf.h>
-
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_modeset_helper.h>
-
-#include "udl_drv.h"
-
-#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
-
-static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
-static int fb_bpp = 16;
-
-module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
-module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
-
-struct udl_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct udl_framebuffer ufb;
- int fb_count;
-};
-
-#define DL_ALIGN_UP(x, a) ALIGN(x, a)
-#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
-
-/** Read the red component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
-
-/** Read the green component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
-
-/** Read the blue component (0..255) of a 32 bpp colour. */
-#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
-
-/** Return red/green component of a 16 bpp colour number. */
-#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
-
-/** Return green/blue component of a 16 bpp colour number. */
-#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
-
-/** Return 8 bpp colour number from red, green and blue components. */
-#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
-
-#if 0
-static uint8_t rgb8(uint32_t col)
-{
- uint8_t red = DLO_RGB_GETRED(col);
- uint8_t grn = DLO_RGB_GETGRN(col);
- uint8_t blu = DLO_RGB_GETBLU(col);
-
- return DLO_RGB8(red, grn, blu);
-}
-
-static uint16_t rgb16(uint32_t col)
-{
- uint8_t red = DLO_RGB_GETRED(col);
- uint8_t grn = DLO_RGB_GETGRN(col);
- uint8_t blu = DLO_RGB_GETBLU(col);
-
- return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
-}
-#endif
-
-int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
- int width, int height)
-{
- struct drm_device *dev = fb->base.dev;
- struct udl_device *udl = to_udl(dev);
- int i, ret;
- char *cmd;
- cycles_t start_cycles, end_cycles;
- int bytes_sent = 0;
- int bytes_identical = 0;
- struct urb *urb;
- int aligned_x;
- int log_bpp;
-
- BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
- log_bpp = __ffs(fb->base.format->cpp[0]);
-
- if (!fb->active_16)
- return 0;
-
- if (!fb->obj->vmapping) {
- ret = udl_gem_vmap(fb->obj);
- if (ret == -ENOMEM) {
- DRM_ERROR("failed to vmap fb\n");
- return 0;
- }
- if (!fb->obj->vmapping) {
- DRM_ERROR("failed to vmapping\n");
- return 0;
- }
- }
-
- aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
- width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
- x = aligned_x;
-
- if ((width <= 0) ||
- (x + width > fb->base.width) ||
- (y + height > fb->base.height))
- return -EINVAL;
-
- start_cycles = get_cycles();
-
- urb = udl_get_urb(dev);
- if (!urb)
- return 0;
- cmd = urb->transfer_buffer;
-
- for (i = y; i < y + height ; i++) {
- const int line_offset = fb->base.pitches[0] * i;
- const int byte_offset = line_offset + (x << log_bpp);
- const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
- if (udl_render_hline(dev, log_bpp, &urb,
- (char *) fb->obj->vmapping,
- &cmd, byte_offset, dev_byte_offset,
- width << log_bpp,
- &bytes_identical, &bytes_sent))
- goto error;
- }
-
- if (cmd > (char *) urb->transfer_buffer) {
- /* Send partial buffer remaining before exiting */
- int len;
- if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
- *cmd++ = 0xAF;
- len = cmd - (char *) urb->transfer_buffer;
- ret = udl_submit_urb(dev, urb, len);
- bytes_sent += len;
- } else
- udl_urb_completion(urb);
-
-error:
- atomic_add(bytes_sent, &udl->bytes_sent);
- atomic_add(bytes_identical, &udl->bytes_identical);
- atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
- end_cycles = get_cycles();
- atomic_add(((unsigned int) ((end_cycles - start_cycles)
- >> 10)), /* Kcycles */
- &udl->cpu_kcycles_used);
-
- return 0;
-}
-
-static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- unsigned long start = vma->vm_start;
- unsigned long size = vma->vm_end - vma->vm_start;
- unsigned long offset;
- unsigned long page, pos;
-
- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
- return -EINVAL;
-
- offset = vma->vm_pgoff << PAGE_SHIFT;
-
- if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
- return -EINVAL;
-
- pos = (unsigned long)info->fix.smem_start + offset;
-
- pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
- pos, size);
-
- /* We don't want the framebuffer to be mapped encrypted */
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-
- while (size > 0) {
- page = vmalloc_to_pfn((void *)pos);
- if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
- return -EAGAIN;
-
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- if (size > PAGE_SIZE)
- size -= PAGE_SIZE;
- else
- size = 0;
- }
-
- /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
- return 0;
-}
-
-/*
- * It's common for several clients to have framebuffer open simultaneously.
- * e.g. both fbcon and X. Makes things interesting.
- * Assumes caller is holding info->lock (for open and release at least)
- */
-static int udl_fb_open(struct fb_info *info, int user)
-{
- struct udl_fbdev *ufbdev = info->par;
- struct drm_device *dev = ufbdev->ufb.base.dev;
- struct udl_device *udl = to_udl(dev);
-
- /* If the USB device is gone, we don't accept new opens */
- if (drm_dev_is_unplugged(&udl->drm))
- return -ENODEV;
-
- ufbdev->fb_count++;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (fb_defio && (info->fbdefio == NULL)) {
- /* enable defio at last moment if not disabled by client */
-
- struct fb_deferred_io *fbdefio;
-
- fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
-
- if (fbdefio) {
- fbdefio->delay = DL_DEFIO_WRITE_DELAY;
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
- }
-
- info->fbdefio = fbdefio;
- fb_deferred_io_init(info);
- }
-#endif
-
- pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d\n",
- info->node, user, info, ufbdev->fb_count);
-
- return 0;
-}
-
-
-/*
- * Assumes caller is holding info->lock mutex (for open and release at least)
- */
-static int udl_fb_release(struct fb_info *info, int user)
-{
- struct udl_fbdev *ufbdev = info->par;
-
- ufbdev->fb_count--;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
- fb_deferred_io_cleanup(info);
- kfree(info->fbdefio);
- info->fbdefio = NULL;
- info->fbops->fb_mmap = udl_fb_mmap;
- }
-#endif
-
- pr_debug("released /dev/fb%d user=%d count=%d\n",
- info->node, user, ufbdev->fb_count);
-
- return 0;
-}
-
-static struct fb_ops udlfb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_mmap = udl_fb_mmap,
- .fb_open = udl_fb_open,
- .fb_release = udl_fb_release,
-};
-
-static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct udl_framebuffer *ufb = to_udl_fb(fb);
- int i;
- int ret = 0;
-
- drm_modeset_lock_all(fb->dev);
-
- if (!ufb->active_16)
- goto unlock;
-
- if (ufb->obj->base.import_attach) {
- ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
- DMA_FROM_DEVICE);
- if (ret)
- goto unlock;
- }
-
- for (i = 0; i < num_clips; i++) {
- ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
- clips[i].x2 - clips[i].x1,
- clips[i].y2 - clips[i].y1);
- if (ret)
- break;
- }
-
- if (ufb->obj->base.import_attach) {
- ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
- DMA_FROM_DEVICE);
- }
-
- unlock:
- drm_modeset_unlock_all(fb->dev);
-
- return ret;
-}
-
-static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct udl_framebuffer *ufb = to_udl_fb(fb);
-
- if (ufb->obj)
- drm_gem_object_put_unlocked(&ufb->obj->base);
-
- drm_framebuffer_cleanup(fb);
- kfree(ufb);
-}
-
-static const struct drm_framebuffer_funcs udlfb_funcs = {
- .destroy = udl_user_framebuffer_destroy,
- .dirty = udl_user_framebuffer_dirty,
-};
-
-
-static int
-udl_framebuffer_init(struct drm_device *dev,
- struct udl_framebuffer *ufb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct udl_gem_object *obj)
-{
- int ret;
-
- ufb->obj = obj;
- drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
- return ret;
-}
-
-
-static int udlfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct udl_fbdev *ufbdev =
- container_of(helper, struct udl_fbdev, helper);
- struct drm_device *dev = ufbdev->helper.dev;
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct udl_gem_object *obj;
- uint32_t size;
- int ret = 0;
-
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
-
- obj = udl_gem_alloc_object(dev, size);
- if (!obj)
- goto out;
-
- ret = udl_gem_vmap(obj);
- if (ret) {
- DRM_ERROR("failed to vmap fb\n");
- goto out_gfree;
- }
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out_gfree;
- }
-
- ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
- if (ret)
- goto out_gfree;
-
- fb = &ufbdev->ufb.base;
-
- ufbdev->helper.fb = fb;
-
- info->screen_base = ufbdev->ufb.obj->vmapping;
- info->fix.smem_len = size;
- info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
-
- info->fbops = &udlfb_ops;
- drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
-
- DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
- fb->width, fb->height,
- ufbdev->ufb.obj->vmapping);
-
- return ret;
-out_gfree:
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
-out:
- return ret;
-}
-
-static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
- .fb_probe = udlfb_create,
-};
-
-static void udl_fbdev_destroy(struct drm_device *dev,
- struct udl_fbdev *ufbdev)
-{
- drm_fb_helper_unregister_fbi(&ufbdev->helper);
- drm_fb_helper_fini(&ufbdev->helper);
- if (ufbdev->ufb.obj) {
- drm_framebuffer_unregister_private(&ufbdev->ufb.base);
- drm_framebuffer_cleanup(&ufbdev->ufb.base);
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
- }
-}
-
-int udl_fbdev_init(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
- int bpp_sel = fb_bpp;
- struct udl_fbdev *ufbdev;
- int ret;
-
- ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
- if (!ufbdev)
- return -ENOMEM;
-
- udl->fbdev = ufbdev;
-
- drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
-
- ret = drm_fb_helper_init(dev, &ufbdev->helper, 1);
- if (ret)
- goto free;
-
- ret = drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
- if (ret)
- goto fini;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
- if (ret)
- goto fini;
-
- return 0;
-
-fini:
- drm_fb_helper_fini(&ufbdev->helper);
-free:
- kfree(ufbdev);
- return ret;
-}
-
-void udl_fbdev_cleanup(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
- if (!udl->fbdev)
- return;
-
- udl_fbdev_destroy(dev, udl->fbdev);
- kfree(udl->fbdev);
- udl->fbdev = NULL;
-}
-
-void udl_fbdev_unplug(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
- struct udl_fbdev *ufbdev;
- if (!udl->fbdev)
- return;
-
- ufbdev = udl->fbdev;
- drm_fb_helper_unlink_fbi(&ufbdev->helper);
-}
-
-struct drm_framebuffer *
-udl_fb_user_fb_create(struct drm_device *dev,
- struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct udl_framebuffer *ufb;
- int ret;
- uint32_t size;
-
- obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- size = ALIGN(size, PAGE_SIZE);
-
- if (size > obj->size) {
- DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
- return ERR_PTR(-ENOMEM);
- }
-
- ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
- if (ufb == NULL)
- return ERR_PTR(-ENOMEM);
-
- ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
- if (ret) {
- kfree(ufb);
- return ERR_PTR(-EINVAL);
- }
- return &ufb->base;
-}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index b23a5c2fcd80..b6e26f98aa0a 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -6,226 +6,101 @@
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_prime.h>
#include "udl_drv.h"
-struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
- size_t size)
-{
- struct udl_gem_object *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (obj == NULL)
- return NULL;
-
- if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- kfree(obj);
- return NULL;
- }
-
- obj->flags = UDL_BO_CACHEABLE;
- return obj;
-}
-
-static int
-udl_gem_create(struct drm_file *file,
- struct drm_device *dev,
- uint64_t size,
- uint32_t *handle_p)
-{
- struct udl_gem_object *obj;
- int ret;
- u32 handle;
-
- size = roundup(size, PAGE_SIZE);
-
- obj = udl_gem_alloc_object(dev, size);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- if (ret) {
- drm_gem_object_release(&obj->base);
- kfree(obj);
- return ret;
- }
-
- drm_gem_object_put_unlocked(&obj->base);
- *handle_p = handle;
- return 0;
-}
-
-static void update_vm_cache_attr(struct udl_gem_object *obj,
- struct vm_area_struct *vma)
-{
- DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
-
- /* non-cacheable as default. */
- if (obj->flags & UDL_BO_CACHEABLE) {
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- } else if (obj->flags & UDL_BO_WC) {
- vma->vm_page_prot =
- pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- } else {
- vma->vm_page_prot =
- pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- }
-}
-
-int udl_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
- args->size = args->pitch * args->height;
- return udl_gem_create(file, dev,
- args->size, &args->handle);
-}
+/*
+ * GEM object funcs
+ */
-int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int udl_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
int ret;
- ret = drm_gem_mmap(filp, vma);
+ ret = drm_gem_shmem_mmap(obj, vma);
if (ret)
return ret;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
-
- update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
-
- return ret;
-}
-
-vm_fault_t udl_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
- struct page *page;
- unsigned int page_offset;
-
- page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
- if (!obj->pages)
- return VM_FAULT_SIGBUS;
-
- page = obj->pages[page_offset];
- return vmf_insert_page(vma, vmf->address, page);
-}
-
-int udl_gem_get_pages(struct udl_gem_object *obj)
-{
- struct page **pages;
-
- if (obj->pages)
- return 0;
-
- pages = drm_gem_get_pages(&obj->base);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- obj->pages = pages;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ if (obj->import_attach)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
return 0;
}
-void udl_gem_put_pages(struct udl_gem_object *obj)
-{
- if (obj->base.import_attach) {
- kvfree(obj->pages);
- obj->pages = NULL;
- return;
- }
-
- drm_gem_put_pages(&obj->base, obj->pages, false, false);
- obj->pages = NULL;
-}
-
-int udl_gem_vmap(struct udl_gem_object *obj)
+static void *udl_gem_object_vmap(struct drm_gem_object *obj)
{
- int page_count = obj->base.size / PAGE_SIZE;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
int ret;
- if (obj->base.import_attach) {
- obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
- if (!obj->vmapping)
- return -ENOMEM;
- return 0;
- }
-
- ret = udl_gem_get_pages(obj);
+ ret = mutex_lock_interruptible(&shmem->vmap_lock);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
- if (!obj->vmapping)
- return -ENOMEM;
- return 0;
-}
+ if (shmem->vmap_use_count++ > 0)
+ goto out;
-void udl_gem_vunmap(struct udl_gem_object *obj)
-{
- if (obj->base.import_attach) {
- dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
- return;
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ goto err_zero_use;
+
+ if (obj->import_attach)
+ shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ else
+ shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, PAGE_KERNEL);
+
+ if (!shmem->vaddr) {
+ DRM_DEBUG_KMS("Failed to vmap pages\n");
+ ret = -ENOMEM;
+ goto err_put_pages;
}
- vunmap(obj->vmapping);
-
- udl_gem_put_pages(obj);
+out:
+ mutex_unlock(&shmem->vmap_lock);
+ return shmem->vaddr;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+ shmem->vmap_use_count = 0;
+ mutex_unlock(&shmem->vmap_lock);
+ return ERR_PTR(ret);
}
-void udl_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct udl_gem_object *obj = to_udl_bo(gem_obj);
+static const struct drm_gem_object_funcs udl_gem_object_funcs = {
+ .free = drm_gem_shmem_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = udl_gem_object_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = udl_gem_object_mmap,
+};
- if (obj->vmapping)
- udl_gem_vunmap(obj);
-
- if (gem_obj->import_attach) {
- drm_prime_gem_destroy(gem_obj, obj->sg);
- put_device(gem_obj->dev->dev);
- }
-
- if (obj->pages)
- udl_gem_put_pages(obj);
-
- drm_gem_free_mmap_offset(gem_obj);
-}
+/*
+ * Helpers for struct drm_driver
+ */
-/* the dumb interface doesn't work with the GEM straight MMAP
- interface, it expects to do MMAP on the drm fd, like normal */
-int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
+struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
+ size_t size)
{
- struct udl_gem_object *gobj;
+ struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
- struct udl_device *udl = to_udl(dev);
- int ret = 0;
-
- mutex_lock(&udl->gem_lock);
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
- gobj = to_udl_bo(obj);
- ret = udl_gem_get_pages(gobj);
- if (ret)
- goto out;
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return NULL;
- *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
+ obj = &shmem->base;
+ obj->funcs = &udl_gem_object_funcs;
-out:
- drm_gem_object_put_unlocked(&gobj->base);
-unlock:
- mutex_unlock(&udl->gem_lock);
- return ret;
+ return obj;
}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 4e854e017390..538718919916 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -140,7 +140,6 @@ void udl_urb_completion(struct urb *urb)
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
- atomic_set(&udl->lost_pixels, 1);
}
}
@@ -271,7 +270,6 @@ struct urb *udl_get_urb(struct drm_device *dev)
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
if (ret) {
- atomic_set(&udl->lost_pixels, 1);
DRM_INFO("wait for urb interrupted: %x available: %d\n",
ret, udl->urbs.available);
goto error;
@@ -304,7 +302,6 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
udl_urb_completion(urb); /* because no one else will */
- atomic_set(&udl->lost_pixels, 1);
DRM_ERROR("usb_submit_urb error %x\n", ret);
}
return ret;
@@ -338,10 +335,6 @@ int udl_init(struct udl_device *udl)
if (ret)
goto err;
- ret = udl_fbdev_init(dev);
- if (ret)
- goto err;
-
drm_kms_helper_poll_init(dev);
return 0;
@@ -367,6 +360,4 @@ void udl_fini(struct drm_device *dev)
if (udl->urbs.count)
udl_free_urb_list(dev);
-
- udl_fbdev_cleanup(dev);
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index bc1ab6060dc6..22af17959053 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -9,12 +9,21 @@
*/
+#include <linux/dma-buf.h>
+
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "udl_drv.h"
+#define UDL_COLOR_DEPTH_16BPP 0
+
/*
* All DisplayLink bulk operations start with 0xAF, followed by specific code
* All operations are written to buffers which then later get sent to device
@@ -38,31 +47,9 @@ static char *udl_vidreg_unlock(char *buf)
return udl_set_register(buf, 0xFF, 0xFF);
}
-/*
- * On/Off for driving the DisplayLink framebuffer to the display
- * 0x00 H and V sync on
- * 0x01 H and V sync off (screen blank but powered)
- * 0x07 DPMS powerdown (requires modeset to come back)
- */
-static char *udl_set_blank(char *buf, int dpms_mode)
+static char *udl_set_blank_mode(char *buf, u8 mode)
{
- u8 reg;
- switch (dpms_mode) {
- case DRM_MODE_DPMS_OFF:
- reg = 0x07;
- break;
- case DRM_MODE_DPMS_STANDBY:
- reg = 0x05;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- reg = 0x01;
- break;
- case DRM_MODE_DPMS_ON:
- reg = 0x00;
- break;
- }
-
- return udl_set_register(buf, 0x1f, reg);
+ return udl_set_register(buf, UDL_REG_BLANK_MODE, mode);
}
static char *udl_set_color_depth(char *buf, u8 selection)
@@ -233,6 +220,11 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
char *buf;
int retval;
+ if (udl->mode_buf_len == 0) {
+ DRM_ERROR("No mode set\n");
+ return -EINVAL;
+ }
+
urb = udl_get_urb(dev);
if (!urb)
return -ENOMEM;
@@ -245,80 +237,152 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
return retval;
}
+static long udl_log_cpp(unsigned int cpp)
+{
+ if (WARN_ON(!is_power_of_2(cpp)))
+ return -EINVAL;
+ return __ffs(cpp);
+}
-static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
+static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
+ int width, int height)
{
- struct drm_device *dev = crtc->dev;
- struct udl_device *udl = dev->dev_private;
- int retval;
+ int x1, x2;
- if (mode == DRM_MODE_DPMS_OFF) {
- char *buf;
- struct urb *urb;
- urb = udl_get_urb(dev);
- if (!urb)
- return;
-
- buf = (char *)urb->transfer_buffer;
- buf = udl_vidreg_lock(buf);
- buf = udl_set_blank(buf, mode);
- buf = udl_vidreg_unlock(buf);
-
- buf = udl_dummy_render(buf);
- retval = udl_submit_urb(dev, urb, buf - (char *)
- urb->transfer_buffer);
- } else {
- if (udl->mode_buf_len == 0) {
- DRM_ERROR("Trying to enable DPMS with no mode\n");
- return;
- }
- udl_crtc_write_mode_to_hw(crtc);
- }
+ if (WARN_ON_ONCE(x < 0) ||
+ WARN_ON_ONCE(y < 0) ||
+ WARN_ON_ONCE(width < 0) ||
+ WARN_ON_ONCE(height < 0))
+ return -EINVAL;
-}
+ x1 = ALIGN_DOWN(x, sizeof(unsigned long));
+ x2 = ALIGN(width + (x - x1), sizeof(unsigned long)) + x1;
+
+ clip->x1 = x1;
+ clip->y1 = y;
+ clip->x2 = x2;
+ clip->y2 = y + height;
-#if 0
-static int
-udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
return 0;
}
-static int
-udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
+ int width, int height)
{
- return 0;
+ struct drm_device *dev = fb->dev;
+ struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
+ int i, ret, tmp_ret;
+ char *cmd;
+ struct urb *urb;
+ struct drm_rect clip;
+ int log_bpp;
+ void *vaddr;
+
+ ret = udl_log_cpp(fb->format->cpp[0]);
+ if (ret < 0)
+ return ret;
+ log_bpp = ret;
+
+ ret = udl_aligned_damage_clip(&clip, x, y, width, height);
+ if (ret)
+ return ret;
+ else if ((clip.x2 > fb->width) || (clip.y2 > fb->height))
+ return -EINVAL;
+
+ if (import_attach) {
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+ }
+
+ vaddr = drm_gem_shmem_vmap(fb->obj[0]);
+ if (IS_ERR(vaddr)) {
+ DRM_ERROR("failed to vmap fb\n");
+ goto out_dma_buf_end_cpu_access;
+ }
+
+ urb = udl_get_urb(dev);
+ if (!urb)
+ goto out_drm_gem_shmem_vunmap;
+ cmd = urb->transfer_buffer;
+
+ for (i = clip.y1; i < clip.y2; i++) {
+ const int line_offset = fb->pitches[0] * i;
+ const int byte_offset = line_offset + (clip.x1 << log_bpp);
+ const int dev_byte_offset = (fb->width * i + clip.x1) << log_bpp;
+ const int byte_width = (clip.x2 - clip.x1) << log_bpp;
+ ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
+ &cmd, byte_offset, dev_byte_offset,
+ byte_width);
+ if (ret)
+ goto out_drm_gem_shmem_vunmap;
+ }
+
+ if (cmd > (char *)urb->transfer_buffer) {
+ /* Send partial buffer remaining before exiting */
+ int len;
+ if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
+ *cmd++ = 0xAF;
+ len = cmd - (char *)urb->transfer_buffer;
+ ret = udl_submit_urb(dev, urb, len);
+ } else {
+ udl_urb_completion(urb);
+ }
+
+ ret = 0;
+
+out_drm_gem_shmem_vunmap:
+ drm_gem_shmem_vunmap(fb->obj[0], vaddr);
+out_dma_buf_end_cpu_access:
+ if (import_attach) {
+ tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (tmp_ret && !ret)
+ ret = tmp_ret; /* only update ret if not set yet */
+ }
+
+ return ret;
}
-#endif
-static int udl_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+/*
+ * Simple display pipeline
+ */
+static const uint32_t udl_simple_display_pipe_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static enum drm_mode_status
+udl_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
{
+ return MODE_OK;
+}
+
+static void
+udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
- struct udl_framebuffer *ufb = to_udl_fb(crtc->primary->fb);
+ struct drm_framebuffer *fb = plane_state->fb;
struct udl_device *udl = dev->dev_private;
+ struct drm_display_mode *mode = &crtc_state->mode;
char *buf;
char *wrptr;
- int color_depth = 0;
+ int color_depth = UDL_COLOR_DEPTH_16BPP;
- udl->crtc = crtc;
+ crtc_state->no_vblank = true;
buf = (char *)udl->mode_buf;
- /* for now we just clip 24 -> 16 - if we fix that fix this */
- /*if (crtc->fb->bits_per_pixel != 16)
- color_depth = 1; */
-
/* This first section has to do with setting the base address on the
- * controller * associated with the display. There are 2 base
- * pointers, currently, we only * use the 16 bpp segment.
- */
+ * controller associated with the display. There are 2 base
+ * pointers, currently, we only use the 16 bpp segment.
+ */
wrptr = udl_vidreg_lock(buf);
wrptr = udl_set_color_depth(wrptr, color_depth);
/* set base for 16bpp segment to 0 */
@@ -326,108 +390,95 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
/* set base for 8bpp segment to end of fb */
wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
- wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
- wrptr = udl_set_blank(wrptr, DRM_MODE_DPMS_ON);
+ wrptr = udl_set_vid_cmds(wrptr, mode);
+ wrptr = udl_set_blank_mode(wrptr, UDL_BLANK_MODE_ON);
wrptr = udl_vidreg_unlock(wrptr);
wrptr = udl_dummy_render(wrptr);
- if (old_fb) {
- struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
- uold_fb->active_16 = false;
- }
- ufb->active_16 = true;
udl->mode_buf_len = wrptr - buf;
- /* damage all of it */
- udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
- return 0;
-}
-
+ udl_handle_damage(fb, 0, 0, fb->width, fb->height);
-static void udl_crtc_disable(struct drm_crtc *crtc)
-{
- udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
+ if (!crtc_state->mode_changed)
+ return;
-static void udl_crtc_destroy(struct drm_crtc *crtc)
-{
- drm_crtc_cleanup(crtc);
- kfree(crtc);
+ /* enable display */
+ udl_crtc_write_mode_to_hw(crtc);
}
-static int udl_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags,
- struct drm_modeset_acquire_ctx *ctx)
+static void
+udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct udl_framebuffer *ufb = to_udl_fb(fb);
+ struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
+ struct urb *urb;
+ char *buf;
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- if (old_fb) {
- struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
- uold_fb->active_16 = false;
- }
- ufb->active_16 = true;
-
- udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return;
- spin_lock_irq(&dev->event_lock);
- if (event)
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&dev->event_lock);
- crtc->primary->fb = fb;
+ buf = (char *)urb->transfer_buffer;
+ buf = udl_vidreg_lock(buf);
+ buf = udl_set_blank_mode(buf, UDL_BLANK_MODE_POWERDOWN);
+ buf = udl_vidreg_unlock(buf);
+ buf = udl_dummy_render(buf);
- return 0;
+ udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
}
-static void udl_crtc_prepare(struct drm_crtc *crtc)
+static int
+udl_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
{
+ return 0;
}
-static void udl_crtc_commit(struct drm_crtc *crtc)
+static void
+udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_plane_state)
{
- udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_crtc_helper_funcs udl_helper_funcs = {
- .dpms = udl_crtc_dpms,
- .mode_set = udl_crtc_mode_set,
- .prepare = udl_crtc_prepare,
- .commit = udl_crtc_commit,
- .disable = udl_crtc_disable,
-};
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect rect;
-static const struct drm_crtc_funcs udl_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
- .destroy = udl_crtc_destroy,
- .page_flip = udl_crtc_page_flip,
-};
-
-static int udl_crtc_init(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
+ if (!fb)
+ return;
- crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL);
- if (crtc == NULL)
- return -ENOMEM;
+ if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
+ udl_handle_damage(fb, rect.x1, rect.y1, rect.x2 - rect.x1,
+ rect.y2 - rect.y1);
+}
- drm_crtc_init(dev, crtc, &udl_crtc_funcs);
- drm_crtc_helper_add(crtc, &udl_helper_funcs);
+static const
+struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
+ .mode_valid = udl_simple_display_pipe_mode_valid,
+ .enable = udl_simple_display_pipe_enable,
+ .disable = udl_simple_display_pipe_disable,
+ .check = udl_simple_display_pipe_check,
+ .update = udl_simple_display_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
- return 0;
-}
+/*
+ * Modesetting
+ */
static const struct drm_mode_config_funcs udl_mode_funcs = {
- .fb_create = udl_fb_user_fb_create,
- .output_poll_changed = NULL,
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
};
int udl_modeset_init(struct drm_device *dev)
{
- struct drm_encoder *encoder;
+ size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
+ struct udl_device *udl = dev->dev_private;
+ struct drm_connector *connector;
+ int ret;
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 640;
@@ -437,29 +488,32 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 2048;
dev->mode_config.prefer_shadow = 0;
- dev->mode_config.preferred_depth = 24;
+ dev->mode_config.preferred_depth = 16;
dev->mode_config.funcs = &udl_mode_funcs;
- udl_crtc_init(dev);
+ connector = udl_connector_init(dev);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ goto err_drm_mode_config_cleanup;
+ }
- encoder = udl_encoder_init(dev);
+ format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
- udl_connector_init(dev, encoder);
+ ret = drm_simple_display_pipe_init(dev, &udl->display_pipe,
+ &udl_simple_display_pipe_funcs,
+ udl_simple_display_pipe_formats,
+ format_count, NULL, connector);
+ if (ret)
+ goto err_drm_mode_config_cleanup;
- return 0;
-}
+ drm_mode_config_reset(dev);
-void udl_modeset_restore(struct drm_device *dev)
-{
- struct udl_device *udl = dev->dev_private;
- struct udl_framebuffer *ufb;
+ return 0;
- if (!udl->crtc || !udl->crtc->primary->fb)
- return;
- udl_crtc_commit(udl->crtc);
- ufb = to_udl_fb(udl->crtc->primary->fb);
- udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+err_drm_mode_config_cleanup:
+ drm_mode_config_cleanup(dev);
+ return ret;
}
void udl_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 1973a4c1e358..971927669d6b 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -212,8 +212,7 @@ static void udl_compress_hline16(
int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
- u32 byte_width,
- int *ident_ptr, int *sent_ptr)
+ u32 byte_width)
{
const u8 *line_start, *line_end, *next_pixel;
u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
@@ -235,12 +234,12 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
- if (udl_submit_urb(dev, urb, len))
- return 1; /* lost pixels is set */
- *sent_ptr += len;
+ int ret = udl_submit_urb(dev, urb, len);
+ if (ret)
+ return ret;
urb = udl_get_urb(dev);
if (!urb)
- return 1; /* lost_pixels is set */
+ return -EAGAIN;
*urb_ptr = urb;
cmd = urb->transfer_buffer;
cmd_end = &cmd[urb->transfer_buffer_length];
@@ -251,4 +250,3 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
return 0;
}
-
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1a07462b4528..eaa8e9682373 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -140,7 +140,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -150,8 +150,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
+ sched = &v3d->queue[i].sched;
+ drm_sched_entity_init(&v3d_priv->sched_entity[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
}
file->driver_priv = v3d_priv;
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 7c2317efd5b7..118e8a426b1a 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -22,9 +22,9 @@ config DRM_VC4
our display setup.
config DRM_VC4_HDMI_CEC
- bool "Broadcom VC4 HDMI CEC Support"
- depends on DRM_VC4
- select CEC_CORE
- help
+ bool "Broadcom VC4 HDMI CEC Support"
+ depends on DRM_VC4
+ select CEC_CORE
+ help
Choose this option if you have a Broadcom VC4 GPU
and want to use CEC.
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index c9ba83ed49b9..fd8a2eb60505 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -499,6 +499,7 @@ struct vc4_dsi {
struct mipi_dsi_host dsi_host;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
+ struct list_head bridge_chain;
void __iomem *regs;
@@ -752,10 +753,19 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
+ struct drm_bridge *iter;
+
+ list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->disable)
+ iter->funcs->disable(iter);
+ }
- drm_bridge_disable(dsi->bridge);
vc4_dsi_ulps(dsi, true);
- drm_bridge_post_disable(dsi->bridge);
+
+ list_for_each_entry_from(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->post_disable)
+ iter->funcs->post_disable(iter);
+ }
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
@@ -823,6 +833,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
bool debug_dump_regs = false;
+ struct drm_bridge *iter;
unsigned long hs_clock;
u32 ui_ns;
/* Minimum LP state duration in escape clock cycles. */
@@ -1055,7 +1066,10 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_ulps(dsi, false);
- drm_bridge_pre_enable(dsi->bridge);
+ list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->pre_enable)
+ iter->funcs->pre_enable(iter);
+ }
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
DSI_PORT_WRITE(DISP0_CTRL,
@@ -1072,7 +1086,10 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_DISP0_ENABLE);
}
- drm_bridge_enable(dsi->bridge);
+ list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
+ if (iter->funcs->enable)
+ iter->funcs->enable(iter);
+ }
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
@@ -1460,6 +1477,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
GFP_KERNEL);
if (!vc4_dsi_encoder)
return -ENOMEM;
+
+ INIT_LIST_HEAD(&dsi->bridge_chain);
vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1;
vc4_dsi_encoder->dsi = dsi;
dsi->encoder = &vc4_dsi_encoder->base.base;
@@ -1610,7 +1629,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
* from our driver, since we need to sequence them within the
* encoder's enable/disable paths.
*/
- dsi->encoder->bridge = NULL;
+ list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
if (dsi->port == 0)
vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
@@ -1632,6 +1651,11 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
if (dsi->bridge)
pm_runtime_disable(dev);
+ /*
+ * Restore the bridge_chain so the bridge detach procedure can happen
+ * normally.
+ */
+ list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
vc4_dsi_encoder_destroy(dsi->encoder);
if (dsi->port == 1)
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 7a06cb6e31c5..e1cfc3ccd05a 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -568,7 +568,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
for (i = 0; i < exec->bo_count; i++) {
struct drm_gem_object *bo = &exec->bo[i]->base;
- ww_mutex_unlock(&bo->resv->lock);
+ dma_resv_unlock(bo->resv);
}
ww_acquire_fini(acquire_ctx);
@@ -595,8 +595,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
retry:
if (contended_lock != -1) {
bo = &exec->bo[contended_lock]->base;
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- acquire_ctx);
+ ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
return ret;
@@ -609,19 +608,19 @@ retry:
bo = &exec->bo[i]->base;
- ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
+ ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
bo = &exec->bo[j]->base;
- ww_mutex_unlock(&bo->resv->lock);
+ dma_resv_unlock(bo->resv);
}
if (contended_lock != -1 && contended_lock >= i) {
bo = &exec->bo[contended_lock]->base;
- ww_mutex_unlock(&bo->resv->lock);
+ dma_resv_unlock(bo->resv);
}
if (ret == -EDEADLK) {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 1c62c6c9244b..cea18dc15f77 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -267,7 +267,8 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
};
static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
- struct drm_encoder *encoder)
+ struct drm_encoder *encoder,
+ struct i2c_adapter *ddc)
{
struct drm_connector *connector;
struct vc4_hdmi_connector *hdmi_connector;
@@ -281,8 +282,10 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
hdmi_connector->encoder = encoder;
- drm_connector_init(dev, connector, &vc4_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(dev, connector,
+ &vc4_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ ddc);
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/* Create and attach TV margin props to this connector. */
@@ -1395,7 +1398,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
- hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder);
+ hdmi->connector =
+ vc4_hdmi_connector_init(drm, hdmi->encoder, hdmi->ddc);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
goto err_destroy_encoder;
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 3db000aacd26..551fa31629af 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -35,11 +35,11 @@
*/
#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/via_drm.h>
#include "via_dmablit.h"
@@ -188,8 +188,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
kfree(vsg->desc_pages);
/* fall through */
case dr_via_pages_locked:
- put_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE));
+ unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+ (vsg->direction == DMA_FROM_DEVICE));
/* fall through */
case dr_via_pages_alloc:
vfree(vsg->pages);
@@ -239,7 +239,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
if (NULL == vsg->pages)
return -ENOMEM;
- ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
+ ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
vsg->num_pages,
vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
vsg->pages);
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 666a16de84f9..5da38082821f 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -23,10 +23,10 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/via_drm.h>
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 431c150df014..255c5066a939 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -22,8 +22,9 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/pci.h>
+
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
#include <drm/drm_vblank.h>
#include <drm/via_drm.h>
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index e622485ae826..0966208ec30d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -43,6 +43,9 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
+#define drm_connector_to_virtio_gpu_output(x) \
+ container_of(x, struct virtio_gpu_output, conn)
+
static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
@@ -59,7 +62,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
.dirty = drm_atomic_helper_dirtyfb,
};
-int
+static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 8dee698c90ff..8cf27af3ad53 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -137,7 +137,7 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
drm_dev_unregister(dev);
virtio_gpu_deinit(dev);
- drm_put_dev(dev);
+ drm_dev_put(dev);
}
static void virtio_gpu_config_changed(struct virtio_device *vdev)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0b56ba005e25..7e69c06e168e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -38,6 +38,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/virtgpu_drm.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
@@ -102,8 +103,6 @@ struct virtio_gpu_fence {
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
-#define to_virtio_fence(x) \
- container_of(x, struct virtio_gpu_fence, f)
struct virtio_gpu_vbuffer {
char *buf;
@@ -134,10 +133,6 @@ struct virtio_gpu_output {
};
#define drm_crtc_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, crtc)
-#define drm_connector_to_virtio_gpu_output(x) \
- container_of(x, struct virtio_gpu_output, conn)
-#define drm_encoder_to_virtio_gpu_output(x) \
- container_of(x, struct virtio_gpu_output, enc)
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
@@ -182,6 +177,9 @@ struct virtio_gpu_device {
struct kmem_cache *vbufs;
bool vqs_ready;
+ bool disable_notify;
+ bool pending_notify;
+
struct ida resource_ida;
wait_queue_head_t resp_wq;
@@ -312,13 +310,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void
@@ -334,11 +332,10 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
void virtio_gpu_dequeue_fence_func(struct work_struct *work);
+void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
+
/* virtio_gpu_display.c */
-int virtio_gpu_framebuffer_init(struct drm_device *dev,
- struct virtio_gpu_framebuffer *vgfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
@@ -349,7 +346,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
int index);
/* virtio_gpu_fence.c */
-bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -365,18 +361,12 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
-
/* virtgpu_prime.c */
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
-{
- return drm_vma_node_offset_addr(&bo->base.base.vma_node);
-}
-
-/* virgl debufs */
+/* virgl debugfs */
int virtio_gpu_debugfs_init(struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index a4b9881ca1d3..5b2a4146c5bd 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -27,6 +27,9 @@
#include "virtgpu_drv.h"
+#define to_virtio_fence(x) \
+ container_of(x, struct virtio_gpu_fence, f)
+
static const char *virtio_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
@@ -37,7 +40,7 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
return "controlq";
}
-bool virtio_fence_signaled(struct dma_fence *f)
+static bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 4c1f579edfb3..0a2b62279647 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -96,14 +96,12 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
- struct virtio_gpu_object *obj;
BUG_ON(!offset_p);
gobj = drm_gem_object_lookup(file_priv, handle);
if (gobj == NULL)
return -ENOENT;
- obj = gem_to_virtio_gpu_obj(gobj);
- *offset_p = virtio_gpu_object_mmap_offset(obj);
+ *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
drm_gem_object_put_unlocked(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9af1ec62434f..205ec4abae2b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -33,17 +33,6 @@
#include "virtgpu_drv.h"
-static void convert_to_hw_box(struct virtio_gpu_box *dst,
- const struct drm_virtgpu_3d_box *src)
-{
- dst->x = cpu_to_le32(src->x);
- dst->y = cpu_to_le32(src->y);
- dst->z = cpu_to_le32(src->z);
- dst->w = cpu_to_le32(src->w);
- dst->h = cpu_to_le32(src->h);
- dst->d = cpu_to_le32(src->d);
-}
-
static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -304,7 +293,6 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_fence *fence;
int ret;
u32 offset = args->offset;
- struct virtio_gpu_box box;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
@@ -317,8 +305,6 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret != 0)
goto err_put_free;
- convert_to_hw_box(&box, &args->box);
-
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
@@ -326,7 +312,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
}
virtio_gpu_cmd_transfer_from_host_3d
(vgdev, vfpriv->ctx_id, offset, args->level,
- &box, objs, fence);
+ &args->box, objs, fence);
dma_fence_put(&fence->f);
return 0;
@@ -345,7 +331,6 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct drm_virtgpu_3d_transfer_to_host *args = data;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
- struct virtio_gpu_box box;
int ret;
u32 offset = args->offset;
@@ -353,11 +338,10 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
if (objs == NULL)
return -ENOENT;
- convert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset,
- box.w, box.h, box.x, box.y,
+ args->box.w, args->box.h, args->box.x, args->box.y,
objs, NULL);
} else {
ret = virtio_gpu_array_lock_resv(objs);
@@ -372,7 +356,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_transfer_to_host_3d
(vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, objs, fence);
+ args->level, &args->box, objs, fence);
dma_fence_put(&fence->f);
}
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 390524143139..d1c3f5fbfee4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -24,6 +24,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
@@ -88,7 +89,7 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state;
int ret;
- if (!state->fb || !state->crtc)
+ if (!state->fb || WARN_ON(!state->crtc))
return 0;
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
@@ -102,15 +103,37 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return ret;
}
+static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
+ struct drm_plane_state *state,
+ struct drm_rect *rect)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(state->fb->obj[0]);
+ struct virtio_gpu_object_array *objs;
+ uint32_t w = rect->x2 - rect->x1;
+ uint32_t h = rect->y2 - rect->y1;
+ uint32_t x = rect->x1;
+ uint32_t y = rect->y1;
+ uint32_t off = x * state->fb->format->cpp[0] +
+ y * state->fb->pitches[0];
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+ objs, NULL);
+}
+
static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
- struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
- uint32_t handle;
+ struct drm_rect rect;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
@@ -119,47 +142,52 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (WARN_ON(!output))
return;
- if (plane->state->fb && output->enabled) {
- vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- handle = bo->hw_res_handle;
- if (bo->dumb) {
- struct virtio_gpu_object_array *objs;
-
- objs = virtio_gpu_array_alloc(1);
- if (!objs)
- return;
- virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
- virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, 0,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16,
- objs, NULL);
- }
- } else {
- handle = 0;
+ if (!plane->state->fb || !output->enabled) {
+ DRM_DEBUG("nofb\n");
+ virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ 0, 0);
+ return;
}
- DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
- plane->state->crtc_w, plane->state->crtc_h,
- plane->state->crtc_x, plane->state->crtc_y,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16);
- virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16);
- if (handle)
- virtio_gpu_cmd_resource_flush(vgdev, handle,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16);
+ if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
+ return;
+
+ virtio_gpu_disable_notify(vgdev);
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+ if (bo->dumb)
+ virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
+
+ if (plane->state->fb != old_state->fb ||
+ plane->state->src_w != old_state->src_w ||
+ plane->state->src_h != old_state->src_h ||
+ plane->state->src_x != old_state->src_x ||
+ plane->state->src_y != old_state->src_y) {
+ DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
+ bo->hw_res_handle,
+ plane->state->crtc_w, plane->state->crtc_h,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
+ virtio_gpu_cmd_set_scanout(vgdev, output->index,
+ bo->hw_res_handle,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
+ }
+
+ virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
+ rect.x1,
+ rect.y1,
+ rect.x2 - rect.x1,
+ rect.y2 - rect.y1);
+
+ virtio_gpu_enable_notify(vgdev);
}
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -232,6 +260,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!objs)
return;
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+ virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0,
plane->state->crtc_w,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 74ad3bc3ebe8..5914e79d3429 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -40,6 +40,17 @@
+ MAX_INLINE_CMD_SIZE \
+ MAX_INLINE_RESP_SIZE)
+static void convert_to_hw_box(struct virtio_gpu_box *dst,
+ const struct drm_virtgpu_3d_box *src)
+{
+ dst->x = cpu_to_le32(src->x);
+ dst->y = cpu_to_le32(src->y);
+ dst->z = cpu_to_le32(src->z);
+ dst->w = cpu_to_le32(src->w);
+ dst->h = cpu_to_le32(src->h);
+ dst->d = cpu_to_le32(src->d);
+}
+
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
@@ -393,8 +404,12 @@ again:
}
notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
spin_unlock(&vgdev->ctrlq.qlock);
- if (notify)
- virtqueue_notify(vgdev->ctrlq.vq);
+ if (notify) {
+ if (vgdev->disable_notify)
+ vgdev->pending_notify = true;
+ else
+ virtqueue_notify(vgdev->ctrlq.vq);
+ }
if (sgt) {
sg_free_table(sgt);
@@ -402,6 +417,21 @@ again:
}
}
+void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
+{
+ vgdev->disable_notify = true;
+}
+
+void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
+{
+ vgdev->disable_notify = false;
+
+ if (!vgdev->pending_notify)
+ return;
+ vgdev->pending_notify = false;
+ virtqueue_notify(vgdev->ctrlq.vq);
+}
+
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -965,7 +995,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
@@ -987,7 +1017,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->box = *box;
+ convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
@@ -997,7 +1027,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
@@ -1013,7 +1043,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->box = *box;
+ convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index d5585695c64d..4af2f19480f4 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -43,18 +43,18 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
}
/**
- * blend - belnd value at vaddr_src with value at vaddr_dst
+ * blend - blend value at vaddr_src with value at vaddr_dst
* @vaddr_dst: destination address
* @vaddr_src: source address
* @dest_composer: destination framebuffer's metadata
* @src_composer: source framebuffer's metadata
*
* Blend value at vaddr_src with value at vaddr_dst.
- * Currently, this function write value at vaddr_src on value
+ * Currently, this function write value of vaddr_src on value
* at vaddr_dst using buffer's metadata to locate the new values
- * from vaddr_src and their distenation at vaddr_dst.
+ * from vaddr_src and their destination at vaddr_dst.
*
- * Todo: Use the alpha value to blend vaddr_src with vaddr_dst
+ * TODO: Use the alpha value to blend vaddr_src with vaddr_dst
* instead of overwriting it.
*/
static void blend(void *vaddr_dst, void *vaddr_src,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index d1fe144aa289..25bd7519295f 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -3,10 +3,10 @@
/**
* DOC: vkms (Virtual Kernel Modesetting)
*
- * vkms is a software-only model of a kms driver that is useful for testing,
- * or for running X (or similar) on headless machines and be able to still
- * use the GPU. vkms aims to enable a virtual display without the need for
- * a hardware display capability.
+ * VKMS is a software-only model of a KMS driver that is useful for testing
+ * and for running X (or similar) on headless machines. VKMS aims to enable
+ * a virtual display with no need of a hardware display capability, releasing
+ * the GPU in DRM API tests.
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 4ac55fc2bf97..44d858ce4ce7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -209,8 +209,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
cres->hash.key = user_key | (res_type << 24);
ret = drm_ht_insert_item(&man->resources, &cres->hash);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ kfree(cres);
goto out_invalid_key;
+ }
cres->state = VMW_CMDBUF_RES_ADD;
cres->res = vmw_resource_reference(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e962048f65d2..827458f49112 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -28,10 +28,10 @@
#include <linux/console.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
@@ -150,6 +150,9 @@
#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
union drm_vmw_gb_surface_reference_ext_arg)
+#define DRM_IOCTL_VMW_MSG \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
+ struct drm_vmw_msg_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -165,9 +168,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
@@ -182,16 +185,16 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER),
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
+ DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_RENDER_ALLOW),
@@ -201,9 +204,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
@@ -221,28 +224,31 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
vmw_gb_surface_define_ext_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
vmw_gb_surface_reference_ext_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_MSG,
+ vmw_msg_ioctl,
+ DRM_RENDER_ALLOW),
};
static const struct pci_device_id vmw_pci_id_list[] = {
@@ -1211,8 +1217,10 @@ static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_dev_unregister(dev);
+ vmw_driver_unload(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
- drm_put_dev(dev);
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
@@ -1391,8 +1399,6 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
- .load = vmw_driver_load,
- .unload = vmw_driver_unload,
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
@@ -1431,7 +1437,39 @@ static struct pci_driver vmw_pci_driver = {
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_pci_dev(pdev, ent, &driver);
+ struct drm_device *dev;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = vmw_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_vmw_driver_unload;
+
+ return 0;
+
+err_vmw_driver_unload:
+ vmw_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a31e726d6d71..86b69397d166 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -56,9 +56,9 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20190328"
+#define VMWGFX_DRIVER_DATE "20200114"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 16
+#define VMWGFX_DRIVER_MINOR 17
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -1403,6 +1403,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
+int vmw_msg_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* VMW logging */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 934ad7c0c342..73489a45decb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2377,9 +2377,12 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
- cmd->body.renderTargetViewId));
+ ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
+ cmd->body.renderTargetViewId);
+
+ return PTR_ERR_OR_ZERO(ret);
}
/**
@@ -2396,9 +2399,12 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
+ cmd->body.depthStencilViewId);
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
- cmd->body.depthStencilViewId));
+ return PTR_ERR_OR_ZERO(ret);
}
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
@@ -2741,9 +2747,12 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId);
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
- cmd->body.shaderResourceViewId));
+ return PTR_ERR_OR_ZERO(ret);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ea29953e0b08..c59806d40e15 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -624,7 +624,7 @@ out_unlock:
}
-static struct fb_ops vmw_fb_ops = {
+static const struct fb_ops vmw_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = vmw_fb_check_var,
.fb_set_par = vmw_fb_set_par,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index b6c5e4c2ac3c..e9f448a5ebb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/mem_encrypt.h>
#include <asm/hypervisor.h>
@@ -56,6 +57,8 @@
#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
+#define MAX_USER_MSG_LENGTH PAGE_SIZE
+
static u32 vmw_msg_enabled = 1;
enum rpc_msg_type {
@@ -148,7 +151,8 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long si, di, eax, ebx, ecx, edx;
unsigned long msg_len = strlen(msg);
- if (hb) {
+ /* HB port can't access encrypted memory. */
+ if (hb && !mem_encrypt_active()) {
unsigned long bp = channel->cookie_high;
si = (uintptr_t) msg;
@@ -202,7 +206,8 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
{
unsigned long si, di, eax, ebx, ecx, edx;
- if (hb) {
+ /* HB port can't access encrypted memory */
+ if (hb && !mem_encrypt_active()) {
unsigned long bp = channel->cookie_low;
si = channel->cookie_high;
@@ -514,3 +519,84 @@ out_open:
return -EINVAL;
}
+
+
+/**
+ * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
+ *
+ * Sends a message from user-space to host.
+ * Can also receive a result from host and return that to user-space.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ */
+
+int vmw_msg_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_msg_arg *arg =
+ (struct drm_vmw_msg_arg *) data;
+ struct rpc_channel channel;
+ char *msg;
+ int length;
+
+ msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
+ if (!msg) {
+ DRM_ERROR("Cannot allocate memory for log message.\n");
+ return -ENOMEM;
+ }
+
+ length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
+ MAX_USER_MSG_LENGTH);
+ if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
+ DRM_ERROR("Userspace message access failure.\n");
+ kfree(msg);
+ return -EINVAL;
+ }
+
+
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
+ DRM_ERROR("Failed to open channel.\n");
+ goto out_open;
+ }
+
+ if (vmw_send_msg(&channel, msg)) {
+ DRM_ERROR("Failed to send message to host.\n");
+ goto out_msg;
+ }
+
+ if (!arg->send_only) {
+ char *reply = NULL;
+ size_t reply_len = 0;
+
+ if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
+ DRM_ERROR("Failed to receive message from host.\n");
+ goto out_msg;
+ }
+ if (reply && reply_len > 0) {
+ if (copy_to_user((void __user *)((unsigned long)arg->receive),
+ reply, reply_len)) {
+ DRM_ERROR("Failed to copy message to userspace.\n");
+ kfree(reply);
+ goto out_msg;
+ }
+ arg->receive_len = (__u32)reply_len;
+ }
+ kfree(reply);
+ }
+
+ vmw_close_channel(&channel);
+ kfree(msg);
+
+ return 0;
+
+out_msg:
+ vmw_close_channel(&channel);
+out_open:
+ kfree(msg);
+
+ return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index e420675e8db3..d9552a1efd13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -62,45 +62,12 @@ static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
{
}
-static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
-{
-}
-
-static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
-
-static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
-{
- WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
- return -ENOSYS;
-}
-
const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.attach = vmw_prime_map_attach,
.detach = vmw_prime_map_detach,
.map_dma_buf = vmw_prime_map_dma_buf,
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
- .map = vmw_prime_dmabuf_kmap,
- .unmap = vmw_prime_dmabuf_kunmap,
- .mmap = vmw_prime_dmabuf_mmap,
- .vmap = vmw_prime_dmabuf_vmap,
- .vunmap = vmw_prime_dmabuf_vunmap,
};
int vmw_prime_fd_to_handle(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 32b9131b2bae..3ce630aa4fde 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -934,16 +934,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
uint32_t handle;
struct ttm_base_object *base;
int ret;
- bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv)))
- require_exist = true;
-
handle = u_handle;
}
@@ -960,9 +956,18 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
}
if (handle_type != DRM_VMW_HANDLE_PRIME) {
+ bool require_exist = false;
+
user_srf = container_of(base, struct vmw_user_surface,
prime.base);
+ /* Error out if we are unauthenticated primary */
+ if (drm_is_primary_client(file_priv) &&
+ !file_priv->authenticated) {
+ ret = -EACCES;
+ goto out_bad_resource;
+ }
+
/*
* Make sure the surface creator has the same
* authenticating master, or is already registered with us.
@@ -971,6 +976,9 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
user_srf->master != file_priv->master)
require_exist = true;
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
require_exist);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index ce288756531b..aa7e50f63b94 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -45,6 +45,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &vmw_vm_ops;
+ /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE)
+ vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+
return 0;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index ff506bc99414..4f34c5208180 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -63,14 +63,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
if (IS_ERR_OR_NULL(fb))
return fb;
- gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!gem_obj) {
- DRM_ERROR("Failed to lookup GEM object\n");
- ret = -ENOENT;
- goto fail;
- }
-
- drm_gem_object_put_unlocked(gem_obj);
+ gem_obj = fb->obj[0];
ret = xen_drm_front_fb_attach(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(gem_obj),
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index a50f5a1f09b8..b98a1420dcd3 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -319,8 +319,10 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
- drm_connector_init(drm, &hdmi->connector, &zx_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm, &hdmi->connector,
+ &zx_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &hdmi->ddc->adap);
drm_connector_helper_add(&hdmi->connector,
&zx_hdmi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index 9b67e419280c..c4fa3bbaba78 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -165,8 +165,10 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
vga->connector.polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(drm, connector, &zx_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(drm, connector,
+ &zx_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &vga->ddc->adap);
if (ret) {
DRM_DEV_ERROR(dev, "failed to init connector: %d\n", ret);
goto clean_encoder;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 2c8559ff3481..6a995db51d6d 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -120,7 +120,7 @@ static void host1x_subdev_register(struct host1x_device *device,
mutex_lock(&device->clients_lock);
list_move_tail(&client->list, &device->clients);
list_move_tail(&subdev->list, &device->active);
- client->parent = &device->dev;
+ client->host = &device->dev;
subdev->client = client;
mutex_unlock(&device->clients_lock);
mutex_unlock(&device->subdevs_lock);
@@ -156,7 +156,7 @@ static void __host1x_subdev_unregister(struct host1x_device *device,
*/
mutex_lock(&device->clients_lock);
subdev->client = NULL;
- client->parent = NULL;
+ client->host = NULL;
list_move_tail(&subdev->list, &device->subdevs);
/*
* XXX: Perhaps don't do this here, but rather explicitly remove it
@@ -710,6 +710,10 @@ int host1x_client_register(struct host1x_client *client)
struct host1x *host1x;
int err;
+ INIT_LIST_HEAD(&client->list);
+ mutex_init(&client->lock);
+ client->usecount = 0;
+
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
@@ -768,3 +772,74 @@ int host1x_client_unregister(struct host1x_client *client)
return 0;
}
EXPORT_SYMBOL(host1x_client_unregister);
+
+int host1x_client_suspend(struct host1x_client *client)
+{
+ int err = 0;
+
+ mutex_lock(&client->lock);
+
+ if (client->usecount == 1) {
+ if (client->ops && client->ops->suspend) {
+ err = client->ops->suspend(client);
+ if (err < 0)
+ goto unlock;
+ }
+ }
+
+ client->usecount--;
+ dev_dbg(client->dev, "use count: %u\n", client->usecount);
+
+ if (client->parent) {
+ err = host1x_client_suspend(client->parent);
+ if (err < 0)
+ goto resume;
+ }
+
+ goto unlock;
+
+resume:
+ if (client->usecount == 0)
+ if (client->ops && client->ops->resume)
+ client->ops->resume(client);
+
+ client->usecount++;
+unlock:
+ mutex_unlock(&client->lock);
+ return err;
+}
+EXPORT_SYMBOL(host1x_client_suspend);
+
+int host1x_client_resume(struct host1x_client *client)
+{
+ int err = 0;
+
+ mutex_lock(&client->lock);
+
+ if (client->parent) {
+ err = host1x_client_resume(client->parent);
+ if (err < 0)
+ goto unlock;
+ }
+
+ if (client->usecount == 0) {
+ if (client->ops && client->ops->resume) {
+ err = client->ops->resume(client);
+ if (err < 0)
+ goto suspend;
+ }
+ }
+
+ client->usecount++;
+ dev_dbg(client->dev, "use count: %u\n", client->usecount);
+
+ goto unlock;
+
+suspend:
+ if (client->parent)
+ host1x_client_suspend(client->parent);
+unlock:
+ mutex_unlock(&client->lock);
+ return err;
+}
+EXPORT_SYMBOL(host1x_client_resume);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index a738ea55e407..388bcc2889aa 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -339,10 +339,8 @@ static int host1x_probe(struct platform_device *pdev)
}
syncpt_irq = platform_get_irq(pdev, 0);
- if (syncpt_irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
+ if (syncpt_irq < 0)
return syncpt_irq;
- }
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 25ca54de8fc5..60b2fedd0061 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -244,8 +244,7 @@ unpin:
static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
{
- u32 last_page = ~0;
- void *cmdbuf_page_addr = NULL;
+ void *cmdbuf_addr = NULL;
struct host1x_bo *cmdbuf = g->bo;
unsigned int i;
@@ -267,28 +266,22 @@ static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
goto patch_reloc;
}
- if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
- if (cmdbuf_page_addr)
- host1x_bo_kunmap(cmdbuf, last_page,
- cmdbuf_page_addr);
+ if (!cmdbuf_addr) {
+ cmdbuf_addr = host1x_bo_mmap(cmdbuf);
- cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
- reloc->cmdbuf.offset >> PAGE_SHIFT);
- last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
-
- if (unlikely(!cmdbuf_page_addr)) {
+ if (unlikely(!cmdbuf_addr)) {
pr_err("Could not map cmdbuf for relocation\n");
return -ENOMEM;
}
}
- target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
+ target = cmdbuf_addr + reloc->cmdbuf.offset;
patch_reloc:
*target = reloc_addr;
}
- if (cmdbuf_page_addr)
- host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
+ if (cmdbuf_addr)
+ host1x_bo_munmap(cmdbuf, cmdbuf_addr);
return 0;
}
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index dd1cd0142941..fce7892d5137 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -421,7 +421,7 @@ int host1x_syncpt_init(struct host1x *host)
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags)
{
- struct host1x *host = dev_get_drvdata(client->parent->parent);
+ struct host1x *host = dev_get_drvdata(client->host->parent);
return host1x_syncpt_alloc(host, client, flags);
}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index c8c770b05ed9..1ad4c4ef0b5e 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -28,6 +28,6 @@ config VGA_SWITCHEROO
help
Many laptops released in 2008/9/10 have two GPUs with a multiplexer
to switch between them. This adds support for dynamic switching when
- X isn't running and delayed switching until the next logoff. This
+ X isn't running and delayed switching until the next logoff. This
feature is called hybrid graphics, ATI PowerXpress, and Nvidia
HybridPower.
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index cd9193078525..70e1cb928bf0 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_REPORT_LONG_LENGTH 20
#define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64
+#define HIDPP_REPORT_SHORT_SUPPORTED BIT(0)
+#define HIDPP_REPORT_LONG_SUPPORTED BIT(1)
+#define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2)
+
#define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03
#define HIDPP_SUB_ID_ROLLER 0x05
#define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06
@@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1)
#define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2)
#define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3)
+#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
/*
* There are two hidpp protocols in use, the first version hidpp10 is known
@@ -135,12 +140,15 @@ struct hidpp_report {
struct hidpp_battery {
u8 feature_index;
u8 solar_feature_index;
+ u8 voltage_feature_index;
struct power_supply_desc desc;
struct power_supply *ps;
char name[64];
int status;
int capacity;
int level;
+ int voltage;
+ int charge_type;
bool online;
};
@@ -183,9 +191,12 @@ struct hidpp_device {
unsigned long quirks;
unsigned long capabilities;
+ u8 supported_reports;
struct hidpp_battery battery;
struct hidpp_scroll_counter vertical_wheel_counter;
+
+ u8 wireless_feature_index;
};
/* HID++ 1.0 error codes */
@@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
struct hidpp_report *message;
int ret, max_count;
+ /* Send as long report if short reports are not supported. */
+ if (report_id == REPORT_ID_HIDPP_SHORT &&
+ !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED))
+ report_id = REPORT_ID_HIDPP_LONG;
+
switch (report_id) {
case REPORT_ID_HIDPP_SHORT:
max_count = HIDPP_REPORT_SHORT_LENGTH - 4;
@@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question,
(answer->fap.params[0] == question->fap.funcindex_clientid);
}
-static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
+static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp,
+ struct hidpp_report *report)
{
- return (report->report_id == REPORT_ID_HIDPP_SHORT) &&
- (report->rap.sub_id == 0x41);
+ return (hidpp->wireless_feature_index &&
+ (report->fap.feature_index == hidpp->wireless_feature_index)) ||
+ ((report->report_id == REPORT_ID_HIDPP_SHORT) &&
+ (report->rap.sub_id == 0x41));
}
/**
@@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp,
return 0;
}
+/* -------------------------------------------------------------------------- */
+/* 0x1001: Battery voltage */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001
+
+#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00
+
+#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00
+
+static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
+ int *level, int *charge_type)
+{
+ int status;
+
+ long charge_sts = (long)data[2];
+
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ switch (data[2] & 0xe0) {
+ case 0x00:
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x20:
+ status = POWER_SUPPLY_STATUS_FULL;
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ break;
+ case 0x40:
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 0xe0:
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ if (test_bit(3, &charge_sts)) {
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+ if (test_bit(4, &charge_sts)) {
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ }
+
+ if (test_bit(5, &charge_sts)) {
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ }
+
+ *voltage = get_unaligned_be16(data);
+
+ return status;
+}
+
+static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp,
+ u8 feature_index,
+ int *status, int *voltage,
+ int *level, int *charge_type)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE,
+ NULL, 0, &response);
+
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE;
+
+ *status = hidpp20_battery_map_status_voltage(params, voltage,
+ level, charge_type);
+
+ return 0;
+}
+
+static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+ int status, voltage, level, charge_type;
+
+ if (hidpp->battery.voltage_feature_index == 0xff) {
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE,
+ &hidpp->battery.voltage_feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+ }
+
+ ret = hidpp20_battery_get_battery_voltage(hidpp,
+ hidpp->battery.voltage_feature_index,
+ &status, &voltage, &level, &charge_type);
+
+ if (ret)
+ return ret;
+
+ hidpp->battery.status = status;
+ hidpp->battery.voltage = voltage;
+ hidpp->battery.level = level;
+ hidpp->battery.charge_type = charge_type;
+ hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ return 0;
+}
+
+static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ struct hidpp_report *report = (struct hidpp_report *)data;
+ int status, voltage, level, charge_type;
+
+ if (report->fap.feature_index != hidpp->battery.voltage_feature_index ||
+ report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST)
+ return 0;
+
+ status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage,
+ &level, &charge_type);
+
+ hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) {
+ hidpp->battery.voltage = voltage;
+ hidpp->battery.status = status;
+ hidpp->battery.level = level;
+ hidpp->battery.charge_type = charge_type;
+ if (hidpp->battery.ps)
+ power_supply_changed(hidpp->battery.ps);
+ }
+ return 0;
+}
+
static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
@@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */
0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */
+ 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */
};
static int hidpp_battery_get_property(struct power_supply *psy,
@@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
val->strval = hidpp->hid_dev->uniq;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ /* hardware reports voltage in in mV. sysfs expects uV */
+ val->intval = hidpp->battery.voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = hidpp->battery.charge_type;
+ break;
default:
ret = -EINVAL;
break;
@@ -1277,6 +1442,24 @@ static int hidpp_battery_get_property(struct power_supply *psy,
}
/* -------------------------------------------------------------------------- */
+/* 0x1d4b: Wireless device status */
+/* -------------------------------------------------------------------------- */
+#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b
+
+static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+ &hidpp->wireless_feature_index,
+ &feature_type);
+
+ return ret;
+}
+
+/* -------------------------------------------------------------------------- */
/* 0x2120: Hi-resolution scrolling */
/* -------------------------------------------------------------------------- */
@@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
}
}
- if (unlikely(hidpp_report_is_connect_event(report))) {
+ if (unlikely(hidpp_report_is_connect_event(hidpp, report))) {
atomic_set(&hidpp->connected,
!(report->rap.params[0] & (1 << 6)));
if (schedule_work(&hidpp->work) == 0)
@@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
ret = hidpp_solar_battery_event(hidpp, data, size);
if (ret != 0)
return ret;
+ ret = hidpp20_battery_voltage_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
}
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
hidpp->battery.feature_index = 0xff;
hidpp->battery.solar_feature_index = 0xff;
+ hidpp->battery.voltage_feature_index = 0xff;
if (hidpp->protocol_major >= 2) {
if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750)
ret = hidpp_solar_request_battery_event(hidpp);
- else
- ret = hidpp20_query_battery_info(hidpp);
+ else {
+ ret = hidpp20_query_battery_voltage_info(hidpp);
+ if (ret)
+ ret = hidpp20_query_battery_info(hidpp);
+ }
if (ret)
return ret;
@@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
if (!battery_props)
return -ENOMEM;
- num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
+ num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
battery_props[num_battery_props++] =
@@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
battery_props[num_battery_props++] =
POWER_SUPPLY_PROP_CAPACITY_LEVEL;
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+ battery_props[num_battery_props++] =
+ POWER_SUPPLY_PROP_VOLTAGE_NOW;
+
battery = &hidpp->battery;
n = atomic_inc_return(&battery_no) - 1;
@@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
else
hidpp10_query_battery_status(hidpp);
} else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
- hidpp20_query_battery_info(hidpp);
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+ hidpp20_query_battery_voltage_info(hidpp);
+ else
+ hidpp20_query_battery_info(hidpp);
}
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
@@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
return report->field[0]->report_count + 1;
}
-static bool hidpp_validate_device(struct hid_device *hdev)
+static u8 hidpp_validate_device(struct hid_device *hdev)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- int id, report_length, supported_reports = 0;
+ int id, report_length;
+ u8 supported_reports = 0;
id = REPORT_ID_HIDPP_SHORT;
report_length = hidpp_get_report_length(hdev, id);
@@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
if (report_length < HIDPP_REPORT_SHORT_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED;
}
id = REPORT_ID_HIDPP_LONG;
@@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
if (report_length < HIDPP_REPORT_LONG_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_LONG_SUPPORTED;
}
id = REPORT_ID_HIDPP_VERY_LONG;
@@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED;
hidpp->very_long_report_length = report_length;
}
@@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
/*
* Make sure the device is HID++ capable, otherwise treat as generic HID
*/
- if (!hidpp_validate_device(hdev)) {
+ hidpp->supported_reports = hidpp_validate_device(hdev);
+
+ if (!hidpp->supported_reports) {
hid_set_drvdata(hdev, NULL);
devm_kfree(&hdev->dev, hidpp);
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
@@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret < 0) {
dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
__func__, ret);
- hid_hw_stop(hdev);
goto hid_hw_open_fail;
}
@@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hidpp_overwrite_name(hdev);
}
+ if (connected && hidpp->protocol_major >= 2) {
+ ret = hidpp_set_wireless_feature_index(hidpp);
+ if (ret == -ENOENT)
+ hidpp->wireless_feature_index = 0;
+ else if (ret)
+ goto hid_hw_init_fail;
+ }
+
if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
ret = wtp_get_config(hidpp);
if (ret)
@@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = {
{ LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech MX Master 2S */
LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master 3 */
+ LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech Performance MX */
LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
@@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* MX Master mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Master 3 mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{}
};
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index e162a668fb7e..a549c42e8c90 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -417,8 +417,7 @@ static int picolcd_set_par(struct fb_info *info)
return 0;
}
-/* Note this can't be const because of struct fb_info definition */
-static struct fb_ops picolcdfb_ops = {
+static const struct fb_ops picolcdfb_ops = {
.owner = THIS_MODULE,
.fb_destroy = picolcd_fb_destroy,
.fb_read = fb_sys_read,
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 7a75aff78388..2eee5e31c2b7 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -451,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
+
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
+ int len = strlen(hid->uniq) + 1;
+ if (len > _IOC_SIZE(cmd))
+ len = _IOC_SIZE(cmd);
+ ret = copy_to_user(user_arg, hid->uniq, len) ?
+ -EFAULT : len;
+ break;
+ }
}
ret = -ENOTTY;
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 766bd8457346..296f9098c9e4 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -211,7 +211,7 @@ static struct timespec64 hv_get_adj_host_time(void)
unsigned long flags;
spin_lock_irqsave(&host_ts.lock, flags);
- reftime = hyperv_cs->read(hyperv_cs);
+ reftime = hv_read_reference_counter();
newtime = host_ts.host_time + (reftime - host_ts.ref_time);
ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
spin_unlock_irqrestore(&host_ts.lock, flags);
@@ -250,7 +250,7 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
*/
spin_lock_irqsave(&host_ts.lock, flags);
- cur_reftime = hyperv_cs->read(hyperv_cs);
+ cur_reftime = hv_read_reference_counter();
host_ts.host_time = hosttime;
host_ts.ref_time = cur_reftime;
@@ -315,7 +315,7 @@ static void timesync_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
adj_guesttime(timedatap->parenttime,
- hyperv_cs->read(hyperv_cs),
+ hv_read_reference_counter(),
timedatap->flags);
}
}
@@ -524,7 +524,7 @@ static struct ptp_clock *hv_ptp_clock;
static int hv_timesync_init(struct hv_util_service *srv)
{
/* TimeSync requires Hyper-V clocksource. */
- if (!hyperv_cs)
+ if (!hv_read_reference_counter)
return -ENODEV;
spin_lock_init(&host_ts.lock);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 23dfe848979a..47ac20aee06f 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -164,6 +164,16 @@ config SENSORS_ADM1031
This driver can also be built as a module. If so, the module
will be called adm1031.
+config SENSORS_ADM1177
+ tristate "Analog Devices ADM1177 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for Analog Devices ADM1177
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called adm1177.
+
config SENSORS_ADM9240
tristate "Analog Devices ADM9240 and compatibles"
depends on I2C
@@ -385,6 +395,16 @@ config SENSORS_ATXP1
This driver can also be built as a module. If so, the module
will be called atxp1.
+config SENSORS_DRIVETEMP
+ tristate "Hard disk drives with temperature sensors"
+ depends on SCSI && ATA
+ help
+ If you say yes you get support for the temperature sensor on
+ hard disk drives.
+
+ This driver can also be built as a module. If so, the module
+ will be called satatemp.
+
config SENSORS_DS620
tristate "Dallas Semiconductor DS620"
depends on I2C
@@ -889,7 +909,7 @@ config SENSORS_MAX197
will be called max197.
config SENSORS_MAX31722
-tristate "MAX31722 temperature sensor"
+ tristate "MAX31722 temperature sensor"
depends on SPI
help
Support for the Maxim Integrated MAX31722/MAX31723 digital
@@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor"
This driver can also be built as a module. If so, the module
will be called max31722.
+config SENSORS_MAX31730
+ tristate "MAX31730 temperature sensor"
+ depends on I2C
+ help
+ Support for the Maxim Integrated MAX31730 3-Channel Remote
+ Temperature Sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31730.
+
config SENSORS_MAX6621
tristate "Maxim MAX6621 sensor chip"
depends on I2C
@@ -1905,7 +1935,7 @@ config SENSORS_W83627HF
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F"
+ tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG"
depends on !PPC
select HWMON_VID
help
@@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF
the Core 2 Duo. And also the W83627UHG, which is a stripped down
version of the W83627DHG (as far as hardware monitoring goes.)
- This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
- (also known as W83667HG-I), and NCT6776F.
+ This driver also supports Nuvoton W83667HG and W83667HG-B.
This driver can also be built as a module. If so, the module
will be called w83627ehf.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 6db5db9cdc29..613f50987965 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
+obj-$(CONFIG_SENSORS_ADM1177) += adm1177.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
@@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
+obj-$(CONFIG_SENSORS_DRIVETEMP) += drivetemp.o
obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
@@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
new file mode 100644
index 000000000000..d314223a404a
--- /dev/null
+++ b/drivers/hwmon/adm1177.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin
+ *
+ * Copyright 2015-2019 Analog Devices Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+/* Command Byte Operations */
+#define ADM1177_CMD_V_CONT BIT(0)
+#define ADM1177_CMD_I_CONT BIT(2)
+#define ADM1177_CMD_VRANGE BIT(4)
+
+/* Extended Register */
+#define ADM1177_REG_ALERT_TH 2
+
+#define ADM1177_BITS 12
+
+/**
+ * struct adm1177_state - driver instance specific data
+ * @client pointer to i2c client
+ * @reg regulator info for the the power supply of the device
+ * @r_sense_uohm current sense resistor value
+ * @alert_threshold_ua current limit for shutdown
+ * @vrange_high internal voltage divider
+ */
+struct adm1177_state {
+ struct i2c_client *client;
+ struct regulator *reg;
+ u32 r_sense_uohm;
+ u32 alert_threshold_ua;
+ bool vrange_high;
+};
+
+static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data)
+{
+ return i2c_master_recv(st->client, data, num);
+}
+
+static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd)
+{
+ return i2c_smbus_write_byte(st->client, cmd);
+}
+
+static int adm1177_write_alert_thr(struct adm1177_state *st,
+ u32 alert_threshold_ua)
+{
+ u64 val;
+ int ret;
+
+ val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm;
+ val = div_u64(val, 105840000U);
+ val = div_u64(val, 1000U);
+ if (val > 0xFF)
+ val = 0xFF;
+
+ ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH,
+ val);
+ if (ret)
+ return ret;
+
+ st->alert_threshold_ua = alert_threshold_ua;
+ return 0;
+}
+
+static int adm1177_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct adm1177_state *st = dev_get_drvdata(dev);
+ u8 data[3];
+ long dummy;
+ int ret;
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ ret = adm1177_read_raw(st, 3, data);
+ if (ret < 0)
+ return ret;
+ dummy = (data[1] << 4) | (data[2] & 0xF);
+ /*
+ * convert to milliamperes
+ * ((105.84mV / 4096) x raw) / senseResistor(ohm)
+ */
+ *val = div_u64((105840000ull * dummy),
+ 4096 * st->r_sense_uohm);
+ return 0;
+ case hwmon_curr_max_alarm:
+ *val = st->alert_threshold_ua;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_in:
+ ret = adm1177_read_raw(st, 3, data);
+ if (ret < 0)
+ return ret;
+ dummy = (data[0] << 4) | (data[2] >> 4);
+ /*
+ * convert to millivolts based on resistor devision
+ * (V_fullscale / 4096) * raw
+ */
+ if (st->vrange_high)
+ dummy *= 26350;
+ else
+ dummy *= 6650;
+
+ *val = DIV_ROUND_CLOSEST(dummy, 4096);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adm1177_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct adm1177_state *st = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_max_alarm:
+ adm1177_write_alert_thr(st, val);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t adm1177_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct adm1177_state *st = data;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return 0444;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ if (st->r_sense_uohm)
+ return 0444;
+ return 0;
+ case hwmon_curr_max_alarm:
+ if (st->r_sense_uohm)
+ return 0644;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *adm1177_info[] = {
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops adm1177_hwmon_ops = {
+ .is_visible = adm1177_is_visible,
+ .read = adm1177_read,
+ .write = adm1177_write,
+};
+
+static const struct hwmon_chip_info adm1177_chip_info = {
+ .ops = &adm1177_hwmon_ops,
+ .info = adm1177_info,
+};
+
+static void adm1177_remove(void *data)
+{
+ struct adm1177_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static int adm1177_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct adm1177_state *st;
+ u32 alert_threshold_ua;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->client = client;
+
+ st->reg = devm_regulator_get_optional(&client->dev, "vref");
+ if (IS_ERR(st->reg)) {
+ if (PTR_ERR(st->reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ st->reg = NULL;
+ } else {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(&client->dev, adm1177_remove,
+ st);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &st->r_sense_uohm))
+ st->r_sense_uohm = 0;
+ if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp",
+ &alert_threshold_ua)) {
+ if (st->r_sense_uohm)
+ /*
+ * set maximum default value from datasheet based on
+ * shunt-resistor
+ */
+ alert_threshold_ua = div_u64(105840000000,
+ st->r_sense_uohm);
+ else
+ alert_threshold_ua = 0;
+ }
+ st->vrange_high = device_property_read_bool(dev,
+ "adi,vrange-high-enable");
+ if (alert_threshold_ua && st->r_sense_uohm)
+ adm1177_write_alert_thr(st, alert_threshold_ua);
+
+ ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT |
+ ADM1177_CMD_I_CONT |
+ (st->vrange_high ? 0 : ADM1177_CMD_VRANGE));
+ if (ret)
+ return ret;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_info(dev, client->name, st,
+ &adm1177_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id adm1177_id[] = {
+ {"adm1177", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adm1177_id);
+
+static const struct of_device_id adm1177_dt_ids[] = {
+ { .compatible = "adi,adm1177" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, adm1177_dt_ids);
+
+static struct i2c_driver adm1177_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "adm1177",
+ .of_match_table = adm1177_dt_ids,
+ },
+ .probe = adm1177_probe,
+ .id_table = adm1177_id,
+};
+module_i2c_driver(adm1177_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 6c64d50c9aae..01c2eeb02aa9 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
long reg;
if (bypass_attn & (1 << channel))
- reg = (volt * 1024) / 2250;
+ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
else
- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
+ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
+ (r[0] + r[1]) * 2250);
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
new file mode 100644
index 000000000000..370d0c74eb01
--- /dev/null
+++ b/drivers/hwmon/drivetemp.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hwmon client for disk and solid state drives with temperature sensors
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ *
+ * With input from:
+ * Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors.
+ * (C) 2018 Linus Walleij
+ *
+ * hwmon: Driver for SCSI/ATA temperature sensors
+ * by Constantin Baranov <const@mimas.ru>, submitted September 2009
+ *
+ * This drive supports reporting the temperatire of SATA drives. It can be
+ * easily extended to report the temperature of SCSI drives.
+ *
+ * The primary means to read drive temperatures and temperature limits
+ * for ATA drives is the SCT Command Transport feature set as specified in
+ * ATA8-ACS.
+ * It can be used to read the current drive temperature, temperature limits,
+ * and historic minimum and maximum temperatures. The SCT Command Transport
+ * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set
+ * (ATA8-ACS)".
+ *
+ * If the SCT Command Transport feature set is not available, drive temperatures
+ * may be readable through SMART attributes. Since SMART attributes are not well
+ * defined, this method is only used as fallback mechanism.
+ *
+ * There are three SMART attributes which may report drive temperatures.
+ * Those are defined as follows (from
+ * http://www.cropel.com/library/smart-attribute-list.aspx).
+ *
+ * 190 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 194 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 231 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * Wikipedia defines attributes a bit differently.
+ *
+ * 190 Temperature Value is equal to (100-temp. °C), allowing manufacturer
+ * Difference or to set a minimum threshold which corresponds to a
+ * Airflow maximum temperature. This also follows the convention of
+ * Temperature 100 being a best-case value and lower values being
+ * undesirable. However, some older drives may instead
+ * report raw Temperature (identical to 0xC2) or
+ * Temperature minus 50 here.
+ * 194 Temperature or Indicates the device temperature, if the appropriate
+ * Temperature sensor is fitted. Lowest byte of the raw value contains
+ * Celsius the exact temperature value (Celsius degrees).
+ * 231 Life Left Indicates the approximate SSD life left, in terms of
+ * (SSDs) or program/erase cycles or available reserved blocks.
+ * Temperature A normalized value of 100 represents a new drive, with
+ * a threshold value at 10 indicating a need for
+ * replacement. A value of 0 may mean that the drive is
+ * operating in read-only mode to allow data recovery.
+ * Previously (pre-2010) occasionally used for Drive
+ * Temperature (more typically reported at 0xC2).
+ *
+ * Common denominator is that the first raw byte reports the temperature
+ * in degrees C on almost all drives. Some drives may report a fractional
+ * temperature in the second raw byte.
+ *
+ * Known exceptions (from libatasmart):
+ * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th
+ * degrees C in the first two raw bytes.
+ * - A few Maxtor drives report an unknown or bad value in attribute 194.
+ * - Certain Apple SSD drives report an unknown value in attribute 190.
+ * Only certain firmware versions are affected.
+ *
+ * Those exceptions affect older ATA drives and are currently ignored.
+ * Also, the second raw byte (possibly reporting the fractional temperature)
+ * is currently ignored.
+ *
+ * Many drives also report temperature limits in additional SMART data raw
+ * bytes. The format of those is not well defined and varies widely.
+ * The driver does not currently attempt to report those limits.
+ *
+ * According to data in smartmontools, attribute 231 is rarely used to report
+ * drive temperatures. At the same time, several drives report SSD life left
+ * in attribute 231, but do not support temperature sensors. For this reason,
+ * attribute 231 is currently ignored.
+ *
+ * Following above definitions, temperatures are reported as follows.
+ * If SCT Command Transport is supported, it is used to read the
+ * temperature and, if available, temperature limits.
+ * - Otherwise, if SMART attribute 194 is supported, it is used to read
+ * the temperature.
+ * - Otherwise, if SMART attribute 190 is supported, it is used to read
+ * the temperature.
+ */
+
+#include <linux/ata.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_proto.h>
+
+struct drivetemp_data {
+ struct list_head list; /* list of instantiated devices */
+ struct mutex lock; /* protect data buffer accesses */
+ struct scsi_device *sdev; /* SCSI device */
+ struct device *dev; /* instantiating device */
+ struct device *hwdev; /* hardware monitoring device */
+ u8 smartdata[ATA_SECT_SIZE]; /* local buffer */
+ int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val);
+ bool have_temp_lowest; /* lowest temp in SCT status */
+ bool have_temp_highest; /* highest temp in SCT status */
+ bool have_temp_min; /* have min temp */
+ bool have_temp_max; /* have max temp */
+ bool have_temp_lcrit; /* have lower critical limit */
+ bool have_temp_crit; /* have critical limit */
+ int temp_min; /* min temp */
+ int temp_max; /* max temp */
+ int temp_lcrit; /* lower critical limit */
+ int temp_crit; /* critical limit */
+};
+
+static LIST_HEAD(drivetemp_devlist);
+
+#define ATA_MAX_SMART_ATTRS 30
+#define SMART_TEMP_PROP_190 190
+#define SMART_TEMP_PROP_194 194
+
+#define SCT_STATUS_REQ_ADDR 0xe0
+#define SCT_STATUS_VERSION_LOW 0 /* log byte offsets */
+#define SCT_STATUS_VERSION_HIGH 1
+#define SCT_STATUS_TEMP 200
+#define SCT_STATUS_TEMP_LOWEST 201
+#define SCT_STATUS_TEMP_HIGHEST 202
+#define SCT_READ_LOG_ADDR 0xe1
+#define SMART_READ_LOG 0xd5
+#define SMART_WRITE_LOG 0xd6
+
+#define INVALID_TEMP 0x80
+
+#define temp_is_valid(temp) ((temp) != INVALID_TEMP)
+#define temp_from_sct(temp) (((s8)(temp)) * 1000)
+
+static inline bool ata_id_smart_supported(u16 *id)
+{
+ return id[ATA_ID_COMMAND_SET_1] & BIT(0);
+}
+
+static inline bool ata_id_smart_enabled(u16 *id)
+{
+ return id[ATA_ID_CFS_ENABLE_1] & BIT(0);
+}
+
+static int drivetemp_scsi_command(struct drivetemp_data *st,
+ u8 ata_command, u8 feature,
+ u8 lba_low, u8 lba_mid, u8 lba_high)
+{
+ u8 scsi_cmd[MAX_COMMAND_SIZE];
+ int data_dir;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ scsi_cmd[0] = ATA_16;
+ if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) {
+ scsi_cmd[1] = (5 << 1); /* PIO Data-out */
+ /*
+ * No off.line or cc, write to dev, block count in sector count
+ * field.
+ */
+ scsi_cmd[2] = 0x06;
+ data_dir = DMA_TO_DEVICE;
+ } else {
+ scsi_cmd[1] = (4 << 1); /* PIO Data-in */
+ /*
+ * No off.line or cc, read from dev, block count in sector count
+ * field.
+ */
+ scsi_cmd[2] = 0x0e;
+ data_dir = DMA_FROM_DEVICE;
+ }
+ scsi_cmd[4] = feature;
+ scsi_cmd[6] = 1; /* 1 sector */
+ scsi_cmd[8] = lba_low;
+ scsi_cmd[10] = lba_mid;
+ scsi_cmd[12] = lba_high;
+ scsi_cmd[14] = ata_command;
+
+ return scsi_execute_req(st->sdev, scsi_cmd, data_dir,
+ st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5,
+ NULL);
+}
+
+static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
+ u8 select)
+{
+ return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select,
+ ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS);
+}
+
+static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr,
+ long *temp)
+{
+ u8 *buf = st->smartdata;
+ bool have_temp = false;
+ u8 temp_raw;
+ u8 csum;
+ int err;
+ int i;
+
+ err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0);
+ if (err)
+ return err;
+
+ /* Checksum the read value table */
+ csum = 0;
+ for (i = 0; i < ATA_SECT_SIZE; i++)
+ csum += buf[i];
+ if (csum) {
+ dev_dbg(&st->sdev->sdev_gendev,
+ "checksum error reading SMART values\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) {
+ u8 *attr = buf + i * 12;
+ int id = attr[2];
+
+ if (!id)
+ continue;
+
+ if (id == SMART_TEMP_PROP_190) {
+ temp_raw = attr[7];
+ have_temp = true;
+ }
+ if (id == SMART_TEMP_PROP_194) {
+ temp_raw = attr[7];
+ have_temp = true;
+ break;
+ }
+ }
+
+ if (have_temp) {
+ *temp = temp_raw * 1000;
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
+{
+ u8 *buf = st->smartdata;
+ int err;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ return err;
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP]);
+ break;
+ case hwmon_temp_lowest:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
+ break;
+ case hwmon_temp_highest:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int drivetemp_identify_sata(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+ u8 *buf = st->smartdata;
+ struct scsi_vpd *vpd;
+ bool is_ata, is_sata;
+ bool have_sct_data_table;
+ bool have_sct_temp;
+ bool have_smart;
+ bool have_sct;
+ u16 *ata_id;
+ u16 version;
+ long temp;
+ int err;
+
+ /* SCSI-ATA Translation present? */
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+
+ /*
+ * Verify that ATA IDENTIFY DEVICE data is included in ATA Information
+ * VPD and that the drive implements the SATA protocol.
+ */
+ if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA ||
+ vpd->data[36] != 0x34) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ ata_id = (u16 *)&vpd->data[60];
+ is_ata = ata_id_is_ata(ata_id);
+ is_sata = ata_id_is_sata(ata_id);
+ have_sct = ata_id_sct_supported(ata_id);
+ have_sct_data_table = ata_id_sct_data_tables(ata_id);
+ have_smart = ata_id_smart_supported(ata_id) &&
+ ata_id_smart_enabled(ata_id);
+
+ rcu_read_unlock();
+
+ /* bail out if this is not a SATA device */
+ if (!is_ata || !is_sata)
+ return -ENODEV;
+ if (!have_sct)
+ goto skip_sct;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ goto skip_sct;
+
+ version = (buf[SCT_STATUS_VERSION_HIGH] << 8) |
+ buf[SCT_STATUS_VERSION_LOW];
+ if (version != 2 && version != 3)
+ goto skip_sct;
+
+ have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]);
+ if (!have_sct_temp)
+ goto skip_sct;
+
+ st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]);
+ st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
+
+ if (!have_sct_data_table)
+ goto skip_sct;
+
+ /* Request and read temperature history table */
+ memset(buf, '\0', sizeof(st->smartdata));
+ buf[0] = 5; /* data table command */
+ buf[2] = 1; /* read table */
+ buf[4] = 2; /* temperature history table */
+
+ err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ goto skip_sct_data;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR);
+ if (err)
+ goto skip_sct_data;
+
+ /*
+ * Temperature limits per AT Attachment 8 -
+ * ATA/ATAPI Command Set (ATA8-ACS)
+ */
+ st->have_temp_max = temp_is_valid(buf[6]);
+ st->have_temp_crit = temp_is_valid(buf[7]);
+ st->have_temp_min = temp_is_valid(buf[8]);
+ st->have_temp_lcrit = temp_is_valid(buf[9]);
+
+ st->temp_max = temp_from_sct(buf[6]);
+ st->temp_crit = temp_from_sct(buf[7]);
+ st->temp_min = temp_from_sct(buf[8]);
+ st->temp_lcrit = temp_from_sct(buf[9]);
+
+skip_sct_data:
+ if (have_sct_temp) {
+ st->get_temp = drivetemp_get_scttemp;
+ return 0;
+ }
+skip_sct:
+ if (!have_smart)
+ return -ENODEV;
+ st->get_temp = drivetemp_get_smarttemp;
+ return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp);
+}
+
+static int drivetemp_identify(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+
+ /* Bail out immediately if there is no inquiry data */
+ if (!sdev->inquiry || sdev->inquiry_len < 16)
+ return -ENODEV;
+
+ /* Disk device? */
+ if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC)
+ return -ENODEV;
+
+ return drivetemp_identify_sata(st);
+}
+
+static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct drivetemp_data *st = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_lowest:
+ case hwmon_temp_highest:
+ mutex_lock(&st->lock);
+ err = st->get_temp(st, attr, val);
+ mutex_unlock(&st->lock);
+ break;
+ case hwmon_temp_lcrit:
+ *val = st->temp_lcrit;
+ break;
+ case hwmon_temp_min:
+ *val = st->temp_min;
+ break;
+ case hwmon_temp_max:
+ *val = st->temp_max;
+ break;
+ case hwmon_temp_crit:
+ *val = st->temp_crit;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static umode_t drivetemp_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct drivetemp_data *st = data;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_lowest:
+ if (st->have_temp_lowest)
+ return 0444;
+ break;
+ case hwmon_temp_highest:
+ if (st->have_temp_highest)
+ return 0444;
+ break;
+ case hwmon_temp_min:
+ if (st->have_temp_min)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ if (st->have_temp_max)
+ return 0444;
+ break;
+ case hwmon_temp_lcrit:
+ if (st->have_temp_lcrit)
+ return 0444;
+ break;
+ case hwmon_temp_crit:
+ if (st->have_temp_crit)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *drivetemp_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+ HWMON_T_LOWEST | HWMON_T_HIGHEST |
+ HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_LCRIT | HWMON_T_CRIT),
+ NULL
+};
+
+static const struct hwmon_ops drivetemp_ops = {
+ .is_visible = drivetemp_is_visible,
+ .read = drivetemp_read,
+};
+
+static const struct hwmon_chip_info drivetemp_chip_info = {
+ .ops = &drivetemp_ops,
+ .info = drivetemp_info,
+};
+
+/*
+ * The device argument points to sdev->sdev_dev. Its parent is
+ * sdev->sdev_gendev, which we can use to get the scsi_device pointer.
+ */
+static int drivetemp_add(struct device *dev, struct class_interface *intf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev->parent);
+ struct drivetemp_data *st;
+ int err;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->sdev = sdev;
+ st->dev = dev;
+ mutex_init(&st->lock);
+
+ if (drivetemp_identify(st)) {
+ err = -ENODEV;
+ goto abort;
+ }
+
+ st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp",
+ st, &drivetemp_chip_info,
+ NULL);
+ if (IS_ERR(st->hwdev)) {
+ err = PTR_ERR(st->hwdev);
+ goto abort;
+ }
+
+ list_add(&st->list, &drivetemp_devlist);
+ return 0;
+
+abort:
+ kfree(st);
+ return err;
+}
+
+static void drivetemp_remove(struct device *dev, struct class_interface *intf)
+{
+ struct drivetemp_data *st, *tmp;
+
+ list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) {
+ if (st->dev == dev) {
+ list_del(&st->list);
+ hwmon_device_unregister(st->hwdev);
+ kfree(st);
+ break;
+ }
+ }
+}
+
+static struct class_interface drivetemp_interface = {
+ .add_dev = drivetemp_add,
+ .remove_dev = drivetemp_remove,
+};
+
+static int __init drivetemp_init(void)
+{
+ return scsi_register_interface(&drivetemp_interface);
+}
+
+static void __exit drivetemp_exit(void)
+{
+ scsi_unregister_interface(&drivetemp_interface);
+}
+
+module_init(drivetemp_init);
+module_exit(drivetemp_exit);
+
+MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>");
+MODULE_DESCRIPTION("Hard drive temperature monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 1f3b30b085b9..6a30fb453f7a 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
#define to_hwmon_attr(d) \
container_of(d, struct hwmon_device_attribute, dev_attr)
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
/*
* Thermal zone information
@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
* also provides the sensor index.
*/
struct hwmon_thermal_data {
- struct hwmon_device *hwdev; /* Reference to hwmon device */
+ struct device *dev; /* Reference to hwmon device */
int index; /* sensor index */
};
@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
NULL
};
+static void hwmon_free_attrs(struct attribute **attrs)
+{
+ int i;
+
+ for (i = 0; attrs[i]; i++) {
+ struct device_attribute *dattr = to_dev_attr(attrs[i]);
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
+
+ kfree(hattr);
+ }
+ kfree(attrs);
+}
+
static void hwmon_dev_release(struct device *dev)
{
- kfree(to_hwmon_device(dev));
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ if (hwdev->group.attrs)
+ hwmon_free_attrs(hwdev->group.attrs);
+ kfree(hwdev->groups);
+ kfree(hwdev);
}
static struct class hwmon_class = {
@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
- struct hwmon_device *hwdev = tdata->hwdev;
+ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
int ret;
long t;
- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
tdata->index, &t);
if (ret < 0)
return ret;
@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
};
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
struct hwmon_thermal_data *tdata;
struct thermal_zone_device *tzd;
@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
if (!tdata)
return -ENOMEM;
- tdata->hwdev = hwdev;
+ tdata->dev = dev;
tdata->index = index;
- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
&hwmon_thermal_ops);
/*
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
return 0;
}
#else
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
return 0;
}
@@ -171,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
static int hwmon_attr_base(enum hwmon_sensor_types type)
{
- if (type == hwmon_in)
+ if (type == hwmon_in || type == hwmon_intrusion)
return 0;
return 1;
}
@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
(type == hwmon_fan && attr == hwmon_fan_label);
}
-static struct attribute *hwmon_genattr(struct device *dev,
- const void *drvdata,
+static struct attribute *hwmon_genattr(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr,
int index,
@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
if ((mode & 0222) && !ops->write)
return ERR_PTR(-EINVAL);
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
if (!hattr)
return ERR_PTR(-ENOMEM);
@@ -327,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = {
};
static const char * const hwmon_temp_attr_templates[] = {
+ [hwmon_temp_enable] = "temp%d_enable",
[hwmon_temp_input] = "temp%d_input",
[hwmon_temp_type] = "temp%d_type",
[hwmon_temp_lcrit] = "temp%d_lcrit",
@@ -354,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = {
};
static const char * const hwmon_in_attr_templates[] = {
+ [hwmon_in_enable] = "in%d_enable",
[hwmon_in_input] = "in%d_input",
[hwmon_in_min] = "in%d_min",
[hwmon_in_max] = "in%d_max",
@@ -369,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = {
[hwmon_in_max_alarm] = "in%d_max_alarm",
[hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
[hwmon_in_crit_alarm] = "in%d_crit_alarm",
- [hwmon_in_enable] = "in%d_enable",
};
static const char * const hwmon_curr_attr_templates[] = {
+ [hwmon_curr_enable] = "curr%d_enable",
[hwmon_curr_input] = "curr%d_input",
[hwmon_curr_min] = "curr%d_min",
[hwmon_curr_max] = "curr%d_max",
@@ -391,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = {
};
static const char * const hwmon_power_attr_templates[] = {
+ [hwmon_power_enable] = "power%d_enable",
[hwmon_power_average] = "power%d_average",
[hwmon_power_average_interval] = "power%d_average_interval",
[hwmon_power_average_interval_max] = "power%d_interval_max",
@@ -422,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = {
};
static const char * const hwmon_energy_attr_templates[] = {
+ [hwmon_energy_enable] = "energy%d_enable",
[hwmon_energy_input] = "energy%d_input",
[hwmon_energy_label] = "energy%d_label",
};
static const char * const hwmon_humidity_attr_templates[] = {
+ [hwmon_humidity_enable] = "humidity%d_enable",
[hwmon_humidity_input] = "humidity%d_input",
[hwmon_humidity_label] = "humidity%d_label",
[hwmon_humidity_min] = "humidity%d_min",
@@ -438,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = {
};
static const char * const hwmon_fan_attr_templates[] = {
+ [hwmon_fan_enable] = "fan%d_enable",
[hwmon_fan_input] = "fan%d_input",
[hwmon_fan_label] = "fan%d_label",
[hwmon_fan_min] = "fan%d_min",
@@ -458,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = {
[hwmon_pwm_freq] = "pwm%d_freq",
};
+static const char * const hwmon_intrusion_attr_templates[] = {
+ [hwmon_intrusion_alarm] = "intrusion%d_alarm",
+ [hwmon_intrusion_beep] = "intrusion%d_beep",
+};
+
static const char * const *__templates[] = {
[hwmon_chip] = hwmon_chip_attrs,
[hwmon_temp] = hwmon_temp_attr_templates,
@@ -468,6 +495,7 @@ static const char * const *__templates[] = {
[hwmon_humidity] = hwmon_humidity_attr_templates,
[hwmon_fan] = hwmon_fan_attr_templates,
[hwmon_pwm] = hwmon_pwm_attr_templates,
+ [hwmon_intrusion] = hwmon_intrusion_attr_templates,
};
static const int __templates_size[] = {
@@ -480,6 +508,7 @@ static const int __templates_size[] = {
[hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
[hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
[hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
+ [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates),
};
static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
@@ -492,8 +521,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
return n;
}
-static int hwmon_genattrs(struct device *dev,
- const void *drvdata,
+static int hwmon_genattrs(const void *drvdata,
struct attribute **attrs,
const struct hwmon_ops *ops,
const struct hwmon_channel_info *info)
@@ -519,7 +547,7 @@ static int hwmon_genattrs(struct device *dev,
attr_mask &= ~BIT(attr);
if (attr >= template_size)
return -EINVAL;
- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+ a = hwmon_genattr(drvdata, info->type, attr, i,
templates[attr], ops);
if (IS_ERR(a)) {
if (PTR_ERR(a) != -ENOENT)
@@ -533,8 +561,7 @@ static int hwmon_genattrs(struct device *dev,
}
static struct attribute **
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
- const struct hwmon_chip_info *chip)
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
{
int ret, i, aindex = 0, nattrs = 0;
struct attribute **attrs;
@@ -545,15 +572,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
if (nattrs == 0)
return ERR_PTR(-EINVAL);
- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return ERR_PTR(-ENOMEM);
for (i = 0; chip->info[i]; i++) {
- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
chip->info[i]);
- if (ret < 0)
+ if (ret < 0) {
+ hwmon_free_attrs(attrs);
return ERR_PTR(ret);
+ }
aindex += ret;
}
@@ -595,14 +624,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
for (i = 0; groups[i]; i++)
ngroups++;
- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
- GFP_KERNEL);
+ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
if (!hwdev->groups) {
err = -ENOMEM;
goto free_hwmon;
}
- attrs = __hwmon_create_attrs(dev, drvdata, chip);
+ attrs = __hwmon_create_attrs(drvdata, chip);
if (IS_ERR(attrs)) {
err = PTR_ERR(attrs);
goto free_hwmon;
@@ -647,8 +675,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwmon_temp_input, j))
continue;
if (info[i]->config[j] & HWMON_T_INPUT) {
- err = hwmon_thermal_add_sensor(dev,
- hwdev, j);
+ err = hwmon_thermal_add_sensor(hdev, j);
if (err) {
device_unregister(hdev);
/*
@@ -667,7 +694,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
return hdev;
free_hwmon:
- kfree(hwdev);
+ hwmon_dev_release(hdev);
ida_remove:
ida_simple_remove(&hwmon_ida, id);
return ERR_PTR(err);
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index b09c39abd3a8..eeac4b04df27 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -528,7 +528,7 @@ static int i5k_amb_probe(struct platform_device *pdev)
goto err;
}
- data->amb_mmio = ioremap_nocache(data->amb_base, data->amb_len);
+ data->amb_mmio = ioremap(data->amb_base, data->amb_len);
if (!data->amb_mmio) {
res = -EBUSY;
goto err_map_failed;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 5c1dddde193c..e39354ffe973 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,13 +1,29 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h
+ * processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Implementation notes:
+ * - CCD register address information as well as the calculation to
+ * convert raw register values is from https://github.com/ocerman/zenpower.
+ * The information is not confirmed from chip datasheets, but experiments
+ * suggest that it provides reasonable temperature values.
+ * - Register addresses to read chip voltage and current are also from
+ * https://github.com/ocerman/zenpower, and not confirmed from chip
+ * datasheets. Current calibration is board specific and not typically
+ * shared by board vendors. For this reason, current values are
+ * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC
+ * current. Reported values can be adjusted using the sensors configuration
+ * file.
*/
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#endif
/* CPUID function 0x80000001, ebx */
-#define CPUID_PKGTYPE_MASK 0xf0000000
+#define CPUID_PKGTYPE_MASK GENMASK(31, 28)
#define CPUID_PKGTYPE_F 0x00000000
#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
/* DRAM controller (PCI function 2) */
#define REG_DCT0_CONFIG_HIGH 0x094
-#define DDR3_MODE 0x00000100
+#define DDR3_MODE BIT(8)
/* miscellaneous (PCI function 3) */
#define REG_HARDWARE_THERMAL_CONTROL 0x64
-#define HTC_ENABLE 0x00000001
+#define HTC_ENABLE BIT(0)
#define REG_REPORTED_TEMPERATURE 0xa4
#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
-#define NB_CAP_HTC 0x00000400
+#define NB_CAP_HTC BIT(10)
/*
* For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
@@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
/* F17h M01h Access througn SMN */
#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4))
+#define F17H_M70H_CCD_TEMP_VALID BIT(11)
+#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0)
+
+#define F17H_M01H_SVI 0x0005A000
+#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc)
+#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10)
+
+#define CUR_TEMP_SHIFT 21
+#define CUR_TEMP_RANGE_SEL_MASK BIT(19)
+
+#define CFACTOR_ICORE 1000000 /* 1A / LSB */
+#define CFACTOR_ISOC 250000 /* 0.25A / LSB */
+
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@@ -67,6 +97,10 @@ struct k10temp_data {
int temp_offset;
u32 temp_adjust_mask;
bool show_tdie;
+ u32 show_tccd;
+ u32 svi_addr[2];
+ bool show_current;
+ int cfactor[2];
};
struct tctl_offset {
@@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
+static bool is_threadripper(void)
+{
+ return strstr(boot_cpu_data.x86_model_id, "Threadripper");
+}
+
+static bool is_epyc(void)
+{
+ return strstr(boot_cpu_data.x86_model_id, "EPYC");
+}
+
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
{
pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
@@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
-static unsigned int get_raw_temp(struct k10temp_data *data)
+static long get_raw_temp(struct k10temp_data *data)
{
- unsigned int temp;
u32 regval;
+ long temp;
data->read_tempreg(data->pdev, &regval);
- temp = (regval >> 21) * 125;
+ temp = (regval >> CUR_TEMP_SHIFT) * 125;
if (regval & data->temp_adjust_mask)
temp -= 49000;
return temp;
}
-static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct k10temp_data *data = dev_get_drvdata(dev);
- unsigned int temp = get_raw_temp(data);
+const char *k10temp_temp_label[] = {
+ "Tdie",
+ "Tctl",
+ "Tccd1",
+ "Tccd2",
+ "Tccd3",
+ "Tccd4",
+ "Tccd5",
+ "Tccd6",
+ "Tccd7",
+ "Tccd8",
+};
- if (temp > data->temp_offset)
- temp -= data->temp_offset;
- else
- temp = 0;
+const char *k10temp_in_label[] = {
+ "Vcore",
+ "Vsoc",
+};
- return sprintf(buf, "%u\n", temp);
-}
+const char *k10temp_curr_label[] = {
+ "Icore",
+ "Isoc",
+};
-static ssize_t temp2_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
{
- struct k10temp_data *data = dev_get_drvdata(dev);
- unsigned int temp = get_raw_temp(data);
-
- return sprintf(buf, "%u\n", temp);
+ switch (type) {
+ case hwmon_temp:
+ *str = k10temp_temp_label[channel];
+ break;
+ case hwmon_in:
+ *str = k10temp_in_label[channel];
+ break;
+ case hwmon_curr:
+ *str = k10temp_curr_label[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_label_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_curr(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ u32 regval;
- return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie");
+ switch (attr) {
+ case hwmon_curr_input:
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ data->svi_addr[channel], &regval);
+ *val = DIV_ROUND_CLOSEST(data->cfactor[channel] *
+ (regval & 0xff),
+ 1000);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp1_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val)
{
- return sprintf(buf, "%d\n", 70 * 1000);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ u32 regval;
+
+ switch (attr) {
+ case hwmon_in_input:
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ data->svi_addr[channel], &regval);
+ regval = (regval >> 16) & 0xff;
+ *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_crit_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct k10temp_data *data = dev_get_drvdata(dev);
- int show_hyst = attr->index;
u32 regval;
- int value;
- data->read_htcreg(data->pdev, &regval);
- value = ((regval >> 16) & 0x7f) * 500 + 52000;
- if (show_hyst)
- value -= ((regval >> 24) & 0xf) * 500;
- return sprintf(buf, "%d\n", value);
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case 0: /* Tdie */
+ *val = get_raw_temp(data) - data->temp_offset;
+ if (*val < 0)
+ *val = 0;
+ break;
+ case 1: /* Tctl */
+ *val = get_raw_temp(data);
+ if (*val < 0)
+ *val = 0;
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ F17H_M70H_CCD_TEMP(channel - 2), &regval);
+ *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp_max:
+ *val = 70 * 1000;
+ break;
+ case hwmon_temp_crit:
+ data->read_htcreg(data->pdev, &regval);
+ *val = ((regval >> 16) & 0x7f) * 500 + 52000;
+ break;
+ case hwmon_temp_crit_hyst:
+ data->read_htcreg(data->pdev, &regval);
+ *val = (((regval >> 16) & 0x7f)
+ - ((regval >> 24) & 0xf)) * 500 + 52000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static DEVICE_ATTR_RO(temp1_input);
-static DEVICE_ATTR_RO(temp1_max);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0);
-static DEVICE_ATTR_RO(temp2_input);
-static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1);
+static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return k10temp_read_temp(dev, attr, channel, val);
+ case hwmon_in:
+ return k10temp_read_in(dev, attr, channel, val);
+ case hwmon_curr:
+ return k10temp_read_curr(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
-static umode_t k10temp_is_visible(struct kobject *kobj,
- struct attribute *attr, int index)
+static umode_t k10temp_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct k10temp_data *data = dev_get_drvdata(dev);
+ const struct k10temp_data *data = _data;
struct pci_dev *pdev = data->pdev;
u32 reg;
- switch (index) {
- case 0 ... 1: /* temp1_input, temp1_max */
- default:
- break;
- case 2 ... 3: /* temp1_crit, temp1_crit_hyst */
- if (!data->read_htcreg)
- return 0;
-
- pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
- &reg);
- if (!(reg & NB_CAP_HTC))
- return 0;
-
- data->read_htcreg(data->pdev, &reg);
- if (!(reg & HTC_ENABLE))
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case 0: /* Tdie, or Tctl if we don't show it */
+ break;
+ case 1: /* Tctl */
+ if (!data->show_tdie)
+ return 0;
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ if (!(data->show_tccd & BIT(channel - 2)))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case hwmon_temp_max:
+ if (channel || data->show_tdie)
+ return 0;
+ break;
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ if (channel || !data->read_htcreg)
+ return 0;
+
+ pci_read_config_dword(pdev,
+ REG_NORTHBRIDGE_CAPABILITIES,
+ &reg);
+ if (!(reg & NB_CAP_HTC))
+ return 0;
+
+ data->read_htcreg(data->pdev, &reg);
+ if (!(reg & HTC_ENABLE))
+ return 0;
+ break;
+ case hwmon_temp_label:
+ /* No labels if we don't show the die temperature */
+ if (!data->show_tdie)
+ return 0;
+ switch (channel) {
+ case 0: /* Tdie */
+ case 1: /* Tctl */
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ if (!(data->show_tccd & BIT(channel - 2)))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
return 0;
+ }
break;
- case 4 ... 6: /* temp1_label, temp2_input, temp2_label */
- if (!data->show_tdie)
+ case hwmon_in:
+ case hwmon_curr:
+ if (!data->show_current)
return 0;
break;
+ default:
+ return 0;
}
- return attr->mode;
+ return 0444;
}
-static struct attribute *k10temp_attrs[] = {
- &dev_attr_temp1_input.attr,
- &dev_attr_temp1_max.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &dev_attr_temp2_input.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group k10temp_group = {
- .attrs = k10temp_attrs,
- .is_visible = k10temp_is_visible,
-};
-__ATTRIBUTE_GROUPS(k10temp);
-
static bool has_erratum_319(struct pci_dev *pdev)
{
u32 pkg_type, reg_dram_cfg;
@@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev)
(boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
}
-static int k10temp_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+#ifdef CONFIG_DEBUG_FS
+
+static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev,
+ u32 addr, int count)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (!(i & 3))
+ seq_printf(s, "0x%06x: ", addr + i * 4);
+ amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, &reg);
+ seq_printf(s, "%08x ", reg);
+ if ((i & 3) == 3)
+ seq_puts(s, "\n");
+ }
+}
+
+static int svi_show(struct seq_file *s, void *unused)
+{
+ struct k10temp_data *data = s->private;
+
+ k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(svi);
+
+static int thm_show(struct seq_file *s, void *unused)
+{
+ struct k10temp_data *data = s->private;
+
+ k10temp_smn_regs_show(s, data->pdev,
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(thm);
+
+static void k10temp_debugfs_cleanup(void *ddir)
+{
+ debugfs_remove_recursive(ddir);
+}
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+ struct dentry *debugfs;
+ char name[32];
+
+ /* Only show debugfs data for Family 17h/18h CPUs */
+ if (!data->show_tdie)
+ return;
+
+ scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev));
+
+ debugfs = debugfs_create_dir(name, NULL);
+ if (debugfs) {
+ debugfs_create_file("svi", 0444, debugfs, data, &svi_fops);
+ debugfs_create_file("thm", 0444, debugfs, data, &thm_fops);
+ devm_add_action_or_reset(&data->pdev->dev,
+ k10temp_debugfs_cleanup, debugfs);
+ }
+}
+
+#else
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+}
+
+#endif
+
+static const struct hwmon_channel_info *k10temp_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops k10temp_hwmon_ops = {
+ .is_visible = k10temp_is_visible,
+ .read = k10temp_read,
+ .read_string = k10temp_read_labels,
+};
+
+static const struct hwmon_chip_info k10temp_chip_info = {
+ .ops = &k10temp_hwmon_ops,
+ .info = k10temp_info,
+};
+
+static void k10temp_get_ccd_support(struct pci_dev *pdev,
+ struct k10temp_data *data, int limit)
+{
+ u32 regval;
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ amd_smn_read(amd_pci_dev_to_node_id(pdev),
+ F17H_M70H_CCD_TEMP(i), &regval);
+ if (regval & F17H_M70H_CCD_TEMP_VALID)
+ data->show_tccd |= BIT(i);
+ }
+}
+
+static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int unreliable = has_erratum_319(pdev);
struct device *dev = &pdev->dev;
@@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev,
data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
- data->temp_adjust_mask = 0x80000;
+ data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK;
data->read_tempreg = read_tempreg_nb_f17;
data->show_tdie = true;
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x1: /* Zen */
+ case 0x8: /* Zen+ */
+ case 0x11: /* Zen APU */
+ case 0x18: /* Zen+ APU */
+ data->show_current = !is_threadripper() && !is_epyc();
+ data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
+ data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
+ data->cfactor[0] = CFACTOR_ICORE;
+ data->cfactor[1] = CFACTOR_ISOC;
+ k10temp_get_ccd_support(pdev, data, 4);
+ break;
+ case 0x31: /* Zen2 Threadripper */
+ case 0x71: /* Zen2 */
+ data->show_current = !is_threadripper() && !is_epyc();
+ data->cfactor[0] = CFACTOR_ICORE;
+ data->cfactor[1] = CFACTOR_ISOC;
+ data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1;
+ data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0;
+ k10temp_get_ccd_support(pdev, data, 8);
+ break;
+ }
} else {
data->read_htcreg = read_htcreg_pci;
data->read_tempreg = read_tempreg_pci;
@@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev,
}
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
- k10temp_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
+ &k10temp_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ k10temp_init_debugfs(data);
+
+ return 0;
}
static const struct pci_device_id k10temp_id_table[] = {
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
new file mode 100644
index 000000000000..eb22a34dc36b
--- /dev/null
+++ b/drivers/hwmon/max31730.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX31730 3-Channel Remote Temperature Sensor
+ *
+ * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+/* Addresses scanned */
+static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c,
+ 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
+
+/* The MAX31730 registers */
+#define MAX31730_REG_TEMP 0x00
+#define MAX31730_REG_CONF 0x13
+#define MAX31730_STOP BIT(7)
+#define MAX31730_EXTRANGE BIT(1)
+#define MAX31730_REG_TEMP_OFFSET 0x16
+#define MAX31730_TEMP_OFFSET_BASELINE 0x77
+#define MAX31730_REG_OFFSET_ENABLE 0x17
+#define MAX31730_REG_TEMP_MAX 0x20
+#define MAX31730_REG_TEMP_MIN 0x30
+#define MAX31730_REG_STATUS_HIGH 0x32
+#define MAX31730_REG_STATUS_LOW 0x33
+#define MAX31730_REG_CHANNEL_ENABLE 0x35
+#define MAX31730_REG_TEMP_FAULT 0x36
+
+#define MAX31730_REG_MFG_ID 0x50
+#define MAX31730_MFG_ID 0x4d
+#define MAX31730_REG_MFG_REV 0x51
+#define MAX31730_MFG_REV 0x01
+
+#define MAX31730_TEMP_MIN (-128000)
+#define MAX31730_TEMP_MAX 127937
+
+/* Each client has this additional data */
+struct max31730_data {
+ struct i2c_client *client;
+ u8 orig_conf;
+ u8 current_conf;
+ u8 offset_enable;
+ u8 channel_enable;
+};
+
+/*-----------------------------------------------------------------------*/
+
+static inline long max31730_reg_to_mc(s16 temp)
+{
+ return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16);
+}
+
+static int max31730_write_config(struct max31730_data *data, u8 set_mask,
+ u8 clr_mask)
+{
+ u8 value;
+
+ clr_mask |= MAX31730_EXTRANGE;
+ value = data->current_conf & ~clr_mask;
+ value |= set_mask;
+
+ if (data->current_conf != value) {
+ s32 err;
+
+ err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF,
+ value);
+ if (err)
+ return err;
+ data->current_conf = value;
+ }
+ return 0;
+}
+
+static int max31730_set_enable(struct i2c_client *client, int reg,
+ u8 *confdata, int channel, bool enable)
+{
+ u8 regval = *confdata;
+ int err;
+
+ if (enable)
+ regval |= BIT(channel);
+ else
+ regval &= ~BIT(channel);
+
+ if (regval != *confdata) {
+ err = i2c_smbus_write_byte_data(client, reg, regval);
+ if (err)
+ return err;
+ *confdata = regval;
+ }
+ return 0;
+}
+
+static int max31730_set_offset_enable(struct max31730_data *data, int channel,
+ bool enable)
+{
+ return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE,
+ &data->offset_enable, channel, enable);
+}
+
+static int max31730_set_channel_enable(struct max31730_data *data, int channel,
+ bool enable)
+{
+ return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE,
+ &data->channel_enable, channel, enable);
+}
+
+static int max31730_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+ int regval, reg, offset;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!(data->channel_enable & BIT(channel)))
+ return -ENODATA;
+ reg = MAX31730_REG_TEMP + (channel * 2);
+ break;
+ case hwmon_temp_max:
+ reg = MAX31730_REG_TEMP_MAX + (channel * 2);
+ break;
+ case hwmon_temp_min:
+ reg = MAX31730_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_enable:
+ *val = !!(data->channel_enable & BIT(channel));
+ return 0;
+ case hwmon_temp_offset:
+ if (!channel)
+ return -EINVAL;
+ if (!(data->offset_enable & BIT(channel))) {
+ *val = 0;
+ return 0;
+ }
+ offset = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_TEMP_OFFSET);
+ if (offset < 0)
+ return offset;
+ *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125;
+ return 0;
+ case hwmon_temp_fault:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_TEMP_FAULT);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ case hwmon_temp_min_alarm:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_STATUS_LOW);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ case hwmon_temp_max_alarm:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_STATUS_HIGH);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ regval = i2c_smbus_read_word_swapped(data->client, reg);
+ if (regval < 0)
+ return regval;
+
+ *val = max31730_reg_to_mc(regval);
+
+ return 0;
+}
+
+static int max31730_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+ int reg, err;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_max:
+ reg = MAX31730_REG_TEMP_MAX + channel * 2;
+ break;
+ case hwmon_temp_min:
+ reg = MAX31730_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_enable:
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ return max31730_set_channel_enable(data, channel, val);
+ case hwmon_temp_offset:
+ val = clamp_val(val, -14875, 17000) + 14875;
+ val = DIV_ROUND_CLOSEST(val, 125);
+ err = max31730_set_offset_enable(data, channel,
+ val != MAX31730_TEMP_OFFSET_BASELINE);
+ if (err)
+ return err;
+ return i2c_smbus_write_byte_data(data->client,
+ MAX31730_REG_TEMP_OFFSET, val);
+ default:
+ return -EINVAL;
+ }
+
+ val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX);
+ val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4;
+
+ return i2c_smbus_write_word_swapped(data->client, reg, (u16)val);
+}
+
+static umode_t max31730_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_min:
+ return channel ? 0444 : 0644;
+ case hwmon_temp_offset:
+ case hwmon_temp_enable:
+ case hwmon_temp_max:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *max31730_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT
+ ),
+ NULL
+};
+
+static const struct hwmon_ops max31730_hwmon_ops = {
+ .is_visible = max31730_is_visible,
+ .read = max31730_read,
+ .write = max31730_write,
+};
+
+static const struct hwmon_chip_info max31730_chip_info = {
+ .ops = &max31730_hwmon_ops,
+ .info = max31730_info,
+};
+
+static void max31730_remove(void *data)
+{
+ struct max31730_data *max31730 = data;
+ struct i2c_client *client = max31730->client;
+
+ i2c_smbus_write_byte_data(client, MAX31730_REG_CONF,
+ max31730->orig_conf);
+}
+
+static int
+max31730_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct max31730_data *data;
+ int status, err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -EIO;
+
+ data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+
+ /* Cache original configuration and enable status */
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE);
+ if (status < 0)
+ return status;
+ data->channel_enable = status;
+
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE);
+ if (status < 0)
+ return status;
+ data->offset_enable = status;
+
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF);
+ if (status < 0)
+ return status;
+ data->orig_conf = status;
+ data->current_conf = status;
+
+ err = max31730_write_config(data,
+ data->channel_enable ? 0 : MAX31730_STOP,
+ data->channel_enable ? MAX31730_STOP : 0);
+ if (err)
+ return err;
+
+ dev_set_drvdata(dev, data);
+
+ err = devm_add_action_or_reset(dev, max31730_remove, data);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max31730_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max31730_ids[] = {
+ { "max31730", 0, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max31730_ids);
+
+static const struct of_device_id __maybe_unused max31730_of_match[] = {
+ {
+ .compatible = "maxim,max31730",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max31730_of_match);
+
+static bool max31730_check_reg_temp(struct i2c_client *client,
+ int reg)
+{
+ int regval;
+
+ regval = i2c_smbus_read_byte_data(client, reg + 1);
+ return regval < 0 || (regval & 0x0f);
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max31730_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+ int i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID);
+ if (regval != MAX31730_MFG_ID)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV);
+ if (regval != MAX31730_MFG_REV)
+ return -ENODEV;
+
+ /* lower 4 bit of temperature and limit registers must be 0 */
+ if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN))
+ return -ENODEV;
+
+ for (i = 0; i < 4; i++) {
+ if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2))
+ return -ENODEV;
+ if (max31730_check_reg_temp(client,
+ MAX31730_REG_TEMP_MAX + i * 2))
+ return -ENODEV;
+ }
+
+ strlcpy(info->type, "max31730", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int __maybe_unused max31730_suspend(struct device *dev)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+
+ return max31730_write_config(data, MAX31730_STOP, 0);
+}
+
+static int __maybe_unused max31730_resume(struct device *dev)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+
+ return max31730_write_config(data, 0, MAX31730_STOP);
+}
+
+static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
+
+static struct i2c_driver max31730_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max31730",
+ .of_match_table = of_match_ptr(max31730_of_match),
+ .pm = &max31730_pm_ops,
+ },
+ .probe = max31730_probe,
+ .id_table = max31730_ids,
+ .detect = max31730_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(max31730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX31730 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index f3dd2a17bd42..2e97e56c72c7 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -23,8 +23,8 @@
static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
- { 0x40, 0x00, 0x42, 0x44, 0x46 },
- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
+ { 0x46, 0x00, 0x40, 0x42, 0x44 },
+ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
};
static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
struct nct7802_data {
struct regmap *regmap;
struct mutex access_lock; /* for multi-byte read and write operations */
+ u8 in_status;
+ struct mutex in_alarm_lock;
};
static ssize_t temp_type_show(struct device *dev,
@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
return err ? : count;
}
+static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ int volt, min, max, ret;
+ unsigned int val;
+
+ mutex_lock(&data->in_alarm_lock);
+
+ /*
+ * The SMI Voltage status register is the only register giving a status
+ * for voltages. A bit is set for each input crossing a threshold, in
+ * both direction, but the "inside" or "outside" limits info is not
+ * available. Also this register is cleared on read.
+ * Note: this is not explicitly spelled out in the datasheet, but
+ * from experiment.
+ * To deal with this we use a status cache with one validity bit and
+ * one status bit for each input. Validity is cleared at startup and
+ * each time the register reports a change, and the status is processed
+ * by software based on current input value and limits.
+ */
+ ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
+ if (ret < 0)
+ goto abort;
+
+ /* invalidate cached status for all inputs crossing a threshold */
+ data->in_status &= ~((val & 0x0f) << 4);
+
+ /* if cached status for requested input is invalid, update it */
+ if (!(data->in_status & (0x10 << sattr->index))) {
+ ret = nct7802_read_voltage(data, sattr->nr, 0);
+ if (ret < 0)
+ goto abort;
+ volt = ret;
+
+ ret = nct7802_read_voltage(data, sattr->nr, 1);
+ if (ret < 0)
+ goto abort;
+ min = ret;
+
+ ret = nct7802_read_voltage(data, sattr->nr, 2);
+ if (ret < 0)
+ goto abort;
+ max = ret;
+
+ if (volt < min || volt > max)
+ data->in_status |= (1 << sattr->index);
+ else
+ data->in_status &= ~(1 << sattr->index);
+
+ data->in_status |= 0x10 << sattr->index;
+ }
+
+ ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
+abort:
+ mutex_unlock(&data->in_alarm_lock);
+ return ret;
+}
+
static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
static struct attribute *nct7802_in_attrs[] = {
@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
return PTR_ERR(data->regmap);
mutex_init(&data->access_lock);
+ mutex_init(&data->in_alarm_lock);
ret = nct7802_init_chip(data);
if (ret < 0)
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 59859979571d..a9ea06204767 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -20,8 +20,8 @@ config SENSORS_PMBUS
help
If you say yes here you get hardware monitoring support for generic
PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
- MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20,
- TPS544B25, TPS544C20, TPS544C25, and UDT020.
+ MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400,
+ TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -145,6 +145,15 @@ config SENSORS_MAX16064
This driver can also be built as a module. If so, the module will
be called max16064.
+config SENSORS_MAX20730
+ tristate "Maxim MAX20730, MAX20734, MAX20743"
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX20730, MAX20734, and MAX20743.
+
+ This driver can also be built as a module. If so, the module will
+ be called max20730.
+
config SENSORS_MAX20751
tristate "Maxim MAX20751"
help
@@ -200,20 +209,20 @@ config SENSORS_TPS40422
be called tps40422.
config SENSORS_TPS53679
- tristate "TI TPS53679"
+ tristate "TI TPS53679, TPS53688"
help
If you say yes here you get hardware monitoring support for TI
- TPS53679.
+ TPS53679, TPS53688
This driver can also be built as a module. If so, the module will
be called tps53679.
config SENSORS_UCD9000
- tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
+ tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
help
If you say yes here you get hardware monitoring support for TI
- UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System
- Health Controllers.
+ UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer
+ and System Health Controllers.
This driver can also be built as a module. If so, the module will
be called ucd9000.
@@ -228,6 +237,15 @@ config SENSORS_UCD9200
This driver can also be built as a module. If so, the module will
be called ucd9200.
+config SENSORS_XDPE122
+ tristate "Infineon XDPE122 family"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ XDPE12254, XDPE12284, device.
+
+ This driver can also be built as a module. If so, the module will
+ be called xdpe12284.
+
config SENSORS_ZL6100
tristate "Intersil ZL6100 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 3f8c1014938b..5feb45806123 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
@@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d359b76bcb36..3795fe55b84f 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -20,12 +20,15 @@
#define CFFPS_FRU_CMD 0x9A
#define CFFPS_PN_CMD 0x9B
+#define CFFPS_HEADER_CMD 0x9C
#define CFFPS_SN_CMD 0x9E
+#define CFFPS_MAX_POWER_OUT_CMD 0xA7
#define CFFPS_CCIN_CMD 0xBD
#define CFFPS_FW_CMD 0xFA
#define CFFPS1_FW_NUM_BYTES 4
#define CFFPS2_FW_NUM_WORDS 3
#define CFFPS_SYS_CONFIG_CMD 0xDA
+#define CFFPS_12VCS_VOUT_CMD 0xDE
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
@@ -44,22 +47,21 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
-/*
- * LED off state actually relinquishes LED control to PSU firmware, so it can
- * turn on the LED for faults.
- */
-#define CFFPS_LED_OFF 0
#define CFFPS_LED_BLINK BIT(0)
#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
#define CFFPS_BLINK_RATE_MS 250
enum {
CFFPS_DEBUGFS_INPUT_HISTORY = 0,
CFFPS_DEBUGFS_FRU,
CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_HEADER,
CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_MAX_POWER_OUT,
CFFPS_DEBUGFS_CCIN,
CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_ON_OFF_CONFIG,
CFFPS_DEBUGFS_NUM_ENTRIES
};
@@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
psu->input_history.byte_count);
}
-static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
u8 cmd;
int i, rc;
int *idxp = file->private_data;
int idx = *idxp;
struct ibm_cffps *psu = to_psu(idxp, idx);
- char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+ char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
pmbus_set_page(psu->client, 0);
@@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
case CFFPS_DEBUGFS_PN:
cmd = CFFPS_PN_CMD;
break;
+ case CFFPS_DEBUGFS_HEADER:
+ cmd = CFFPS_HEADER_CMD;
+ break;
case CFFPS_DEBUGFS_SN:
cmd = CFFPS_SN_CMD;
break;
+ case CFFPS_DEBUGFS_MAX_POWER_OUT:
+ rc = i2c_smbus_read_word_swapped(psu->client,
+ CFFPS_MAX_POWER_OUT_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc);
+ goto done;
case CFFPS_DEBUGFS_CCIN:
rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
if (rc < 0)
@@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
return -EOPNOTSUPP;
}
goto done;
+ case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+ rc = i2c_smbus_read_byte_data(psu->client,
+ PMBUS_ON_OFF_CONFIG);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 3, "%02x", rc);
+ goto done;
default:
return -EINVAL;
}
@@ -214,9 +235,42 @@ done:
return simple_read_from_buffer(buf, count, ppos, data, rc);
}
+static ssize_t ibm_cffps_debugfs_write(struct file *file,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ u8 data;
+ ssize_t rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+ pmbus_set_page(psu->client, 0);
+
+ rc = simple_write_to_buffer(&data, 1, ppos, buf, count);
+ if (rc <= 0)
+ return rc;
+
+ rc = i2c_smbus_write_byte_data(psu->client,
+ PMBUS_ON_OFF_CONFIG, data);
+ if (rc)
+ return rc;
+
+ rc = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
static const struct file_operations ibm_cffps_fops = {
.llseek = noop_llseek,
- .read = ibm_cffps_debugfs_op,
+ .read = ibm_cffps_debugfs_read,
+ .write = ibm_cffps_debugfs_write,
.open = simple_open,
};
@@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
if (mfr & CFFPS_MFR_PS_KILL)
rc |= PB_STATUS_OFF;
break;
+ case PMBUS_VIRT_READ_VMON:
+ rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD);
+ break;
default:
rc = -ENODATA;
break;
@@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
rc = devm_led_classdev_register(dev, &psu->led);
if (rc)
dev_warn(dev, "failed to register led class: %d\n", rc);
+ else
+ i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_OFF);
}
static struct pmbus_driver_info ibm_cffps_info[] = {
@@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = {
PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
- PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON,
.func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
@@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client,
debugfs_create_file("part_number", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_PN],
&ibm_cffps_fops);
+ debugfs_create_file("header", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER],
+ &ibm_cffps_fops);
debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_SN],
&ibm_cffps_fops);
+ debugfs_create_file("max_power_out", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT],
+ &ibm_cffps_fops);
debugfs_create_file("ccin", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
&ibm_cffps_fops);
debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_FW],
&ibm_cffps_fops);
+ debugfs_create_file("on_off_config", 0644, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG],
+ &ibm_cffps_fops);
return 0;
}
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
new file mode 100644
index 000000000000..294e2212f61e
--- /dev/null
+++ b/drivers/hwmon/pmbus/max20730.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down
+ * Switching Regulators
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/pmbus.h>
+#include <linux/util_macros.h>
+#include "pmbus.h"
+
+enum chips {
+ max20730,
+ max20734,
+ max20743
+};
+
+struct max20730_data {
+ enum chips id;
+ struct pmbus_driver_info info;
+ struct mutex lock; /* Used to protect against parallel writes */
+ u16 mfr_devset1;
+};
+
+#define to_max20730_data(x) container_of(x, struct max20730_data, info)
+
+#define MAX20730_MFR_DEVSET1 0xd2
+
+/*
+ * Convert discreet value to direct data format. Strictly speaking, all passed
+ * values are constants, so we could do that calculation manually. On the
+ * downside, that would make the driver more difficult to maintain, so lets
+ * use this approach.
+ */
+static u16 val_to_direct(int v, enum pmbus_sensor_classes class,
+ const struct pmbus_driver_info *info)
+{
+ int R = info->R[class] - 3; /* take milli-units into account */
+ int b = info->b[class] * 1000;
+ long d;
+
+ d = v * info->m[class] + b;
+ /*
+ * R < 0 is true for all callers, so we don't need to bother
+ * about the R > 0 case.
+ */
+ while (R < 0) {
+ d = DIV_ROUND_CLOSEST(d, 10);
+ R++;
+ }
+ return (u16)d;
+}
+
+static long direct_to_val(u16 w, enum pmbus_sensor_classes class,
+ const struct pmbus_driver_info *info)
+{
+ int R = info->R[class] - 3;
+ int b = info->b[class] * 1000;
+ int m = info->m[class];
+ long d = (s16)w;
+
+ if (m == 0)
+ return 0;
+
+ while (R < 0) {
+ d *= 10;
+ R++;
+ }
+ d = (d - b) / m;
+ return d;
+}
+
+static u32 max_current[][5] = {
+ [max20730] = { 13000, 16600, 20100, 23600 },
+ [max20734] = { 21000, 27000, 32000, 38000 },
+ [max20743] = { 18900, 24100, 29200, 34100 },
+};
+
+static int max20730_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct max20730_data *data = to_max20730_data(info);
+ int ret = 0;
+ u32 max_c;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ switch ((data->mfr_devset1 >> 11) & 0x3) {
+ case 0x0:
+ ret = val_to_direct(150000, PSC_TEMPERATURE, info);
+ break;
+ case 0x1:
+ ret = val_to_direct(130000, PSC_TEMPERATURE, info);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3];
+ ret = val_to_direct(max_c, PSC_CURRENT_OUT, info);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max20730_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ struct pmbus_driver_info *info;
+ struct max20730_data *data;
+ u16 devset1;
+ int ret = 0;
+ int idx;
+
+ info = (struct pmbus_driver_info *)pmbus_get_driver_info(client);
+ data = to_max20730_data(info);
+
+ mutex_lock(&data->lock);
+ devset1 = data->mfr_devset1;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ devset1 &= ~(BIT(11) | BIT(12));
+ if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000)
+ devset1 |= BIT(11);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ devset1 &= ~(BIT(5) | BIT(6));
+
+ idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info),
+ max_current[data->id], 4);
+ devset1 |= (idx << 5);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ if (!ret && devset1 != data->mfr_devset1) {
+ ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1,
+ devset1);
+ if (!ret) {
+ data->mfr_devset1 = devset1;
+ pmbus_clear_cache(client);
+ }
+ }
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static const struct pmbus_driver_info max20730_info[] = {
+ [max20730] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6042 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3609,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ /*
+ * Values in the datasheet are adjusted for temperature and
+ * for the relationship between Vin and Vout.
+ * Unfortunately, the data sheet suggests that Vout measurement
+ * may be scaled with a resistor array. This is indeed the case
+ * at least on the evaulation boards. As a result, any in-driver
+ * adjustments would either be wrong or require elaborate means
+ * to configure the scaling. Instead of doing that, just report
+ * raw values and let userspace handle adjustments.
+ */
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 153,
+ .b[PSC_CURRENT_OUT] = 4976,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+ [max20734] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6209 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3592,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 111,
+ .b[PSC_CURRENT_OUT] = 3461,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+ [max20743] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6042 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3597,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 95,
+ .b[PSC_CURRENT_OUT] = 5014,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+};
+
+static int max20730_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ struct max20730_data *data;
+ enum chips chip_id;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+ if (ret != 5 || strncmp(buf, "MAXIM", 5)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ /*
+ * The chips support reading PMBUS_MFR_MODEL. On both MAX20730
+ * and MAX20734, reading it returns M20743. Presumably that is
+ * the reason why the command is not documented. Unfortunately,
+ * that means that there is no reliable means to detect the chip.
+ * However, we can at least detect the chip series. Compare
+ * the returned value against 'M20743' and bail out if there is
+ * a mismatch. If that doesn't work for all chips, we may have
+ * to remove this check.
+ */
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+ if (ret != 6 || strncmp(buf, "M20743", 6)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read Manufacturer Revision\n");
+ return ret;
+ }
+ if (ret != 1 || buf[0] != 'F') {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ if (client->dev.of_node)
+ chip_id = (enum chips)of_device_get_match_data(dev);
+ else
+ chip_id = id->driver_data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->id = chip_id;
+ mutex_init(&data->lock);
+ memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info));
+
+ ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1);
+ if (ret < 0)
+ return ret;
+ data->mfr_devset1 = ret;
+
+ return pmbus_do_probe(client, id, &data->info);
+}
+
+static const struct i2c_device_id max20730_id[] = {
+ { "max20730", max20730 },
+ { "max20734", max20734 },
+ { "max20743", max20743 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max20730_id);
+
+static const struct of_device_id max20730_of_match[] = {
+ { .compatible = "maxim,max20730", .data = (void *)max20730 },
+ { .compatible = "maxim,max20734", .data = (void *)max20734 },
+ { .compatible = "maxim,max20743", .data = (void *)max20743 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max20730_of_match);
+
+static struct i2c_driver max20730_driver = {
+ .driver = {
+ .name = "max20730",
+ .of_match_table = max20730_of_match,
+ },
+ .probe = max20730_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max20730_id,
+};
+
+module_i2c_driver(max20730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
index ee5f0cdbde06..da3c38cb9a5c 100644
--- a/drivers/hwmon/pmbus/max20751.c
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = linear,
.format[PSC_VOLTAGE_OUT] = vid,
- .vrm_version = vr12,
+ .vrm_version[0] = vr12,
.format[PSC_TEMPERATURE] = linear,
.format[PSC_CURRENT_OUT] = linear,
.format[PSC_POWER] = linear,
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index c0bc43d01018..51e8312b6c2d 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client,
}
if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
- int vout_mode;
+ int vout_mode, i;
vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
@@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client,
break;
case 1:
info->format[PSC_VOLTAGE_OUT] = vid;
- info->vrm_version = vr11;
+ for (i = 0; i < info->pages; i++)
+ info->vrm_version[i] = vr11;
break;
case 2:
info->format[PSC_VOLTAGE_OUT] = direct;
@@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = {
{"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
+ {"max20796", (kernel_ulong_t)&pmbus_info_one},
{"mdt040", (kernel_ulong_t)&pmbus_info_one},
{"ncp4200", (kernel_ulong_t)&pmbus_info_one},
{"ncp4208", (kernel_ulong_t)&pmbus_info_one},
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index d198af3a92b6..13b34bd67f23 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -22,6 +22,8 @@ enum pmbus_regs {
PMBUS_CLEAR_FAULTS = 0x03,
PMBUS_PHASE = 0x04,
+ PMBUS_WRITE_PROTECT = 0x10,
+
PMBUS_CAPABILITY = 0x19,
PMBUS_QUERY = 0x1A,
@@ -226,6 +228,15 @@ enum pmbus_regs {
#define PB_OPERATION_CONTROL_ON BIT(7)
/*
+ * WRITE_PROTECT
+ */
+#define PB_WP_ALL BIT(7) /* all but WRITE_PROTECT */
+#define PB_WP_OP BIT(6) /* all but WP, OPERATION, PAGE */
+#define PB_WP_VOUT BIT(5) /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */
+
+#define PB_WP_ANY (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT)
+
+/*
* CAPABILITY
*/
#define PB_CAPABILITY_SMBALERT BIT(4)
@@ -377,12 +388,12 @@ enum pmbus_sensor_classes {
#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
-enum vrm_version { vr11 = 0, vr12, vr13 };
+enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
struct pmbus_driver_info {
int pages; /* Total number of pages */
enum pmbus_data_format format[PSC_NUM_CLASSES];
- enum vrm_version vrm_version;
+ enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */
/*
* Support one set of coefficients for each sensor type
* Used for chips providing data in direct mode.
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 8470097907bc..d9c17feb7b4a 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
long val = sensor->data;
long rv = 0;
- switch (data->info->vrm_version) {
+ switch (data->info->vrm_version[sensor->page]) {
case vr11:
if (val >= 0x02 && val <= 0xb2)
rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
@@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
if (val >= 0x01)
rv = 500 + (val - 1) * 10;
break;
+ case imvp9:
+ if (val >= 0x01)
+ rv = 200 + (val - 1) * 10;
+ break;
+ case amd625mv:
+ if (val >= 0x0 && val <= 0xd8)
+ rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100);
+ break;
}
return rv;
}
@@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
snprintf(sensor->name, sizeof(sensor->name), "%s%d",
name, seq);
+ if (data->flags & PMBUS_WRITE_PROTECTED)
+ readonly = true;
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
@@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
client->flags |= I2C_CLIENT_PEC;
+ /*
+ * Check if the chip is write protected. If it is, we can not clear
+ * faults, and we should not try it. Also, in that case, writes into
+ * limit registers need to be disabled.
+ */
+ ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+ if (ret > 0 && (ret & PB_WP_ANY))
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+
if (data->info->pages)
pmbus_clear_faults(client);
else
diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
index ebe3f023f840..517584cff3de 100644
--- a/drivers/hwmon/pmbus/pxe1610.c
+++ b/drivers/hwmon/pmbus/pxe1610.c
@@ -19,26 +19,30 @@
static int pxe1610_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
- u8 vout_mode;
- int ret;
-
- /* Read the register with VOUT scaling value.*/
- ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
- if (ret < 0)
- return ret;
-
- vout_mode = ret & GENMASK(4, 0);
-
- switch (vout_mode) {
- case 1:
- info->vrm_version = vr12;
- break;
- case 2:
- info->vrm_version = vr13;
- break;
- default:
- return -ENODEV;
+ int i;
+
+ for (i = 0; i < PXE1610_NUM_PAGES; i++) {
+ if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) {
+ u8 vout_mode;
+ int ret;
+
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_mode = ret & GENMASK(4, 0);
+
+ switch (vout_mode) {
+ case 1:
+ info->vrm_version[i] = vr12;
+ break;
+ case 2:
+ info->vrm_version[i] = vr13;
+ break;
+ default:
+ return -ENODEV;
+ }
}
}
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 86bb3aca09ed..9c22e9013dd7 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
u8 vout_params;
- int ret;
-
- /* Read the register with VOUT scaling value.*/
- ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
- if (ret < 0)
- return ret;
-
- vout_params = ret & GENMASK(4, 0);
-
- switch (vout_params) {
- case TPS53679_PROT_VR13_10MV:
- case TPS53679_PROT_VR12_5_10MV:
- info->vrm_version = vr13;
- break;
- case TPS53679_PROT_VR13_5MV:
- case TPS53679_PROT_VR12_5MV:
- case TPS53679_PROT_IMVP8_5MV:
- info->vrm_version = vr12;
- break;
- default:
- return -EINVAL;
+ int i, ret;
+
+ for (i = 0; i < TPS53679_PAGE_NUM; i++) {
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case TPS53679_PROT_VR13_10MV:
+ case TPS53679_PROT_VR12_5_10MV:
+ info->vrm_version[i] = vr13;
+ break;
+ case TPS53679_PROT_VR13_5MV:
+ case TPS53679_PROT_VR12_5MV:
+ case TPS53679_PROT_IMVP8_5MV:
+ info->vrm_version[i] = vr12;
+ break;
+ default:
+ return -EINVAL;
+ }
}
return 0;
@@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client,
static const struct i2c_device_id tps53679_id[] = {
{"tps53679", 0},
+ {"tps53688", 0},
{}
};
@@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id);
static const struct of_device_id __maybe_unused tps53679_of_match[] = {
{.compatible = "ti,tps53679"},
+ {.compatible = "ti,tps53688"},
{}
};
MODULE_DEVICE_TABLE(of, tps53679_of_match);
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index a9229c6b0e84..23ea3415f166 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -18,7 +18,8 @@
#include <linux/gpio/driver.h>
#include "pmbus.h"
-enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
+enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
+ ucd90910 };
#define UCD9000_MONITOR_CONFIG 0xd5
#define UCD9000_NUM_PAGES 0xd6
@@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
#define UCD9000_GPIO_OUTPUT 1
#define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07)
-#define UCD9000_MON_PAGE(x) ((x) & 0x0f)
+#define UCD9000_MON_PAGE(x) ((x) & 0x1f)
#define UCD9000_MON_VOLTAGE 1
#define UCD9000_MON_TEMPERATURE 2
@@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
#define UCD9000_GPIO_NAME_LEN 16
#define UCD9090_NUM_GPIOS 23
#define UCD901XX_NUM_GPIOS 26
+#define UCD90320_NUM_GPIOS 84
#define UCD90910_NUM_GPIOS 26
#define UCD9000_DEBUGFS_NAME_LEN 24
#define UCD9000_GPI_COUNT 8
+#define UCD90320_GPI_COUNT 32
struct ucd9000_data {
u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX];
@@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = {
{"ucd90120", ucd90120},
{"ucd90124", ucd90124},
{"ucd90160", ucd90160},
+ {"ucd90320", ucd90320},
{"ucd9090", ucd9090},
{"ucd90910", ucd90910},
{}
@@ -155,6 +159,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = {
.data = (void *)ucd90160
},
{
+ .compatible = "ti,ucd90320",
+ .data = (void *)ucd90320
+ },
+ {
.compatible = "ti,ucd9090",
.data = (void *)ucd9090
},
@@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
case ucd90160:
data->gpio.ngpio = UCD901XX_NUM_GPIOS;
break;
+ case ucd90320:
+ data->gpio.ngpio = UCD90320_NUM_GPIOS;
+ break;
case ucd90910:
data->gpio.ngpio = UCD90910_NUM_GPIOS;
break;
@@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val)
struct ucd9000_debugfs_entry *entry = data;
struct i2c_client *client = entry->client;
u8 buffer[I2C_SMBUS_BLOCK_MAX];
- int ret;
+ int ret, i;
ret = ucd9000_get_mfr_status(client, buffer);
if (ret < 0)
return ret;
/*
- * Attribute only created for devices with gpi fault bits at bits
- * 16-23, which is the second byte of the response.
+ * GPI fault bits are in sets of 8, two bytes from end of response.
*/
- *val = !!(buffer[1] & BIT(entry->index));
+ i = ret - 3 - entry->index / 8;
+ if (i >= 0)
+ *val = !!(buffer[i] & BIT(entry->index % 8));
return 0;
}
@@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
{
struct dentry *debugfs;
struct ucd9000_debugfs_entry *entries;
- int i;
+ int i, gpi_count;
char name[UCD9000_DEBUGFS_NAME_LEN];
debugfs = pmbus_get_debugfs_dir(client);
@@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
/*
* Of the chips this driver supports, only the UCD9090, UCD90160,
- * and UCD90910 report GPI faults in their MFR_STATUS register, so only
- * create the GPI fault debugfs attributes for those chips.
+ * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS
+ * register, so only create the GPI fault debugfs attributes for those
+ * chips.
*/
if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 ||
- mid->driver_data == ucd90910) {
+ mid->driver_data == ucd90320 || mid->driver_data == ucd90910) {
+ gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT
+ : UCD9000_GPI_COUNT;
entries = devm_kcalloc(&client->dev,
- UCD9000_GPI_COUNT, sizeof(*entries),
+ gpi_count, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
- for (i = 0; i < UCD9000_GPI_COUNT; i++) {
+ for (i = 0; i < gpi_count; i++) {
entries[i].client = client;
entries[i].index = i;
scnprintf(name, UCD9000_DEBUGFS_NAME_LEN,
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
new file mode 100644
index 000000000000..3d47806ff4d3
--- /dev/null
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers
+ *
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */
+#define XDPE122_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */
+#define XDPE122_PROT_IMVP9_10MV 0x03 /* IMVP9 mode, 10-mV DAC */
+#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */
+#define XDPE122_PAGE_NUM 2
+
+static int xdpe122_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ u8 vout_params;
+ int i, ret;
+
+ for (i = 0; i < XDPE122_PAGE_NUM; i++) {
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case XDPE122_PROT_VR12_5_10MV:
+ info->vrm_version[i] = vr13;
+ break;
+ case XDPE122_PROT_VR12_5MV:
+ info->vrm_version[i] = vr12;
+ break;
+ case XDPE122_PROT_IMVP9_10MV:
+ info->vrm_version[i] = imvp9;
+ break;
+ case XDPE122_AMD_625MV:
+ info->vrm_version[i] = amd625mv;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static struct pmbus_driver_info xdpe122_info = {
+ .pages = XDPE122_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .identify = xdpe122_identify,
+};
+
+static int xdpe122_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+
+ info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id xdpe122_id[] = {
+ {"xdpe12254", 0},
+ {"xdpe12284", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, xdpe122_id);
+
+static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
+ {.compatible = "infineon, xdpe12254"},
+ {.compatible = "infineon, xdpe12284"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, xdpe122_of_match);
+
+static struct i2c_driver xdpe122_driver = {
+ .driver = {
+ .name = "xdpe12284",
+ .of_match_table = of_match_ptr(xdpe122_of_match),
+ },
+ .probe = xdpe122_probe,
+ .remove = pmbus_do_remove,
+ .id_table = xdpe122_id,
+};
+
+module_i2c_driver(xdpe122_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 42ffd2e5182d..30b7b3ea8836 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pwm_fan_suspend(struct device *dev)
+static int pwm_fan_disable(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
struct pwm_args args;
@@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev)
return 0;
}
+static void pwm_fan_shutdown(struct platform_device *pdev)
+{
+ pwm_fan_disable(&pdev->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pwm_fan_suspend(struct device *dev)
+{
+ return pwm_fan_disable(dev);
+}
+
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
@@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
+ .shutdown = pwm_fan_shutdown,
.driver = {
.name = "pwm-fan",
.pm = &pwm_fan_pm,
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index eb171d15ac48..7ffadc2da57b 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -28,8 +28,6 @@
* w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3
* w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
* w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
- * nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
- * nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -50,7 +48,7 @@
enum kinds {
w83627ehf, w83627dhg, w83627dhg_p, w83627uhg,
- w83667hg, w83667hg_b, nct6775, nct6776,
+ w83667hg, w83667hg_b,
};
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
@@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = {
"w83627uhg",
"w83667hg",
"w83667hg",
- "nct6775",
- "nct6776",
};
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static unsigned short fan_debounce;
-module_param(fan_debounce, ushort, 0);
-MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
-
#define DRVNAME "w83627ehf"
/*
@@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_W83627UHG_ID 0xa230
#define SIO_W83667HG_ID 0xa510
#define SIO_W83667HG_B_ID 0xb350
-#define SIO_NCT6775_ID 0xb470
-#define SIO_NCT6776_ID 0xc330
#define SIO_ID_MASK 0xFFF0
static inline void
@@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
#define W83627EHF_REG_DIODE 0x59
#define W83627EHF_REG_SMI_OVT 0x4C
-/* NCT6775F has its own fan divider registers */
-#define NCT6775_REG_FANDIV1 0x506
-#define NCT6775_REG_FANDIV2 0x507
-#define NCT6775_REG_FAN_DEBOUNCE 0xf0
-
#define W83627EHF_REG_ALARM1 0x459
#define W83627EHF_REG_ALARM2 0x45A
#define W83627EHF_REG_ALARM3 0x45B
@@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[]
static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 };
-static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
-static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
-static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
-static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
-static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
-static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
-static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
-static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
-static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
-static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
-
-static const u16 NCT6775_REG_TEMP[]
- = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
-static const u16 NCT6775_REG_TEMP_CONFIG[]
- = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
-static const u16 NCT6775_REG_TEMP_HYST[]
- = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
-static const u16 NCT6775_REG_TEMP_OVER[]
- = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
-static const u16 NCT6775_REG_TEMP_SOURCE[]
- = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
-
static const char *const w83667hg_b_temp_label[] = {
"SYSTIN",
"CPUTIN",
@@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = {
"PECI Agent 4"
};
-static const char *const nct6775_temp_label[] = {
- "",
- "SYSTIN",
- "CPUTIN",
- "AUXTIN",
- "AMD SB-TSI",
- "PECI Agent 0",
- "PECI Agent 1",
- "PECI Agent 2",
- "PECI Agent 3",
- "PECI Agent 4",
- "PECI Agent 5",
- "PECI Agent 6",
- "PECI Agent 7",
- "PCH_CHIP_CPU_MAX_TEMP",
- "PCH_CHIP_TEMP",
- "PCH_CPU_TEMP",
- "PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP"
-};
-
-static const char *const nct6776_temp_label[] = {
- "",
- "SYSTIN",
- "CPUTIN",
- "AUXTIN",
- "SMBUSMASTER 0",
- "SMBUSMASTER 1",
- "SMBUSMASTER 2",
- "SMBUSMASTER 3",
- "SMBUSMASTER 4",
- "SMBUSMASTER 5",
- "SMBUSMASTER 6",
- "SMBUSMASTER 7",
- "PECI Agent 0",
- "PECI Agent 1",
- "PCH_CHIP_CPU_MAX_TEMP",
- "PCH_CHIP_TEMP",
- "PCH_CPU_TEMP",
- "PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP",
- "BYTE_TEMP"
-};
-
-#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP)
+#define NUM_REG_TEMP ARRAY_SIZE(W83627EHF_REG_TEMP)
static int is_word_sized(u16 reg)
{
@@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
return 1350000U / (reg << divreg);
}
-static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
-{
- if ((reg & 0xff1f) == 0xff1f)
- return 0;
-
- reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
-
- if (reg == 0)
- return 0;
-
- return 1350000U / reg;
-}
-
-static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
-{
- if (reg == 0 || reg == 0xffff)
- return 0;
-
- /*
- * Even though the registers are 16 bit wide, the fan divisor
- * still applies.
- */
- return 1350000U / (reg << divreg);
-}
-
static inline unsigned int
div_from_reg(u8 reg)
{
@@ -418,7 +306,6 @@ struct w83627ehf_data {
int addr; /* IO base of hw monitor block */
const char *name;
- struct device *hwmon_dev;
struct mutex lock;
u16 reg_temp[NUM_REG_TEMP];
@@ -428,20 +315,10 @@ struct w83627ehf_data {
u8 temp_src[NUM_REG_TEMP];
const char * const *temp_label;
- const u16 *REG_PWM;
- const u16 *REG_TARGET;
- const u16 *REG_FAN;
- const u16 *REG_FAN_MIN;
- const u16 *REG_FAN_START_OUTPUT;
- const u16 *REG_FAN_STOP_OUTPUT;
- const u16 *REG_FAN_STOP_TIME;
const u16 *REG_FAN_MAX_OUTPUT;
const u16 *REG_FAN_STEP_OUTPUT;
const u16 *scale_in;
- unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
- unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
-
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -457,7 +334,6 @@ struct w83627ehf_data {
u8 fan_div[5];
u8 has_fan; /* some fan inputs can be disabled */
u8 has_fan_min; /* some fans don't have min register */
- bool has_fan_div;
u8 temp_type[3];
s8 temp_offset[3];
s16 temp[9];
@@ -494,6 +370,7 @@ struct w83627ehf_data {
u16 have_temp_offset;
u8 in6_skip:1;
u8 temp3_val_only:1;
+ u8 have_vid:1;
#ifdef CONFIG_PM
/* Remember extra register values over suspend/resume */
@@ -584,35 +461,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg,
}
/* This function assumes that the caller holds data->update_lock */
-static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
-{
- u8 reg;
-
- switch (nr) {
- case 0:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
- | (data->fan_div[0] & 0x7);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
- break;
- case 1:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
- | ((data->fan_div[1] << 4) & 0x70);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
- break;
- case 2:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
- | (data->fan_div[2] & 0x7);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
- break;
- case 3:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
- | ((data->fan_div[3] << 4) & 0x70);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
- break;
- }
-}
-
-/* This function assumes that the caller holds data->update_lock */
static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
{
u8 reg;
@@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
}
}
-static void w83627ehf_write_fan_div_common(struct device *dev,
- struct w83627ehf_data *data, int nr)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6776)
- ; /* no dividers, do nothing */
- else if (sio_data->kind == nct6775)
- nct6775_write_fan_div(data, nr);
- else
- w83627ehf_write_fan_div(data, nr);
-}
-
-static void nct6775_update_fan_div(struct w83627ehf_data *data)
-{
- u8 i;
-
- i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
- data->fan_div[0] = i & 0x7;
- data->fan_div[1] = (i & 0x70) >> 4;
- i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
- data->fan_div[2] = i & 0x7;
- if (data->has_fan & (1<<3))
- data->fan_div[3] = (i & 0x70) >> 4;
-}
-
static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
{
int i;
@@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
}
}
-static void w83627ehf_update_fan_div_common(struct device *dev,
- struct w83627ehf_data *data)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6776)
- ; /* no dividers, do nothing */
- else if (sio_data->kind == nct6775)
- nct6775_update_fan_div(data);
- else
- w83627ehf_update_fan_div(data);
-}
-
-static void nct6775_update_pwm(struct w83627ehf_data *data)
-{
- int i;
- int pwmcfg, fanmodecfg;
-
- for (i = 0; i < data->pwm_num; i++) {
- pwmcfg = w83627ehf_read_value(data,
- W83627EHF_REG_PWM_ENABLE[i]);
- fanmodecfg = w83627ehf_read_value(data,
- NCT6775_REG_FAN_MODE[i]);
- data->pwm_mode[i] =
- ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
- data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
- data->tolerance[i] = fanmodecfg & 0x0f;
- data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
- }
-}
-
static void w83627ehf_update_pwm(struct w83627ehf_data *data)
{
int i;
@@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data)
((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
& 3) + 1;
- data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+ data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]);
data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
}
}
-static void w83627ehf_update_pwm_common(struct device *dev,
- struct w83627ehf_data *data)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
- nct6775_update_pwm(data);
- else
- w83627ehf_update_pwm(data);
-}
-
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
int i;
mutex_lock(&data->update_lock);
@@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ + HZ/2)
|| !data->valid) {
/* Fan clock dividers */
- w83627ehf_update_fan_div_common(dev, data);
+ w83627ehf_update_fan_div(data);
/* Measured voltages and limits */
for (i = 0; i < data->in_num; i++) {
@@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (!(data->has_fan & (1 << i)))
continue;
- reg = w83627ehf_read_value(data, data->REG_FAN[i]);
- data->rpm[i] = data->fan_from_reg(reg,
- data->fan_div[i]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]);
+ data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]);
if (data->has_fan_min & (1 << i))
data->fan_min[i] = w83627ehf_read_value(data,
- data->REG_FAN_MIN[i]);
+ W83627EHF_REG_FAN_MIN[i]);
/*
* If we failed to measure the fan speed and clock
* divider can be increased, let's try that for next
* time
*/
- if (data->has_fan_div
- && (reg >= 0xff || (sio_data->kind == nct6775
- && reg == 0x00))
- && data->fan_div[i] < 0x07) {
+ if (reg >= 0xff && data->fan_div[i] < 0x07) {
dev_dbg(dev,
"Increasing fan%d clock divider from %u to %u\n",
i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
- w83627ehf_write_fan_div_common(dev, data, i);
+ w83627ehf_write_fan_div(data, i);
/* Preserve min limit if possible */
if ((data->has_fan_min & (1 << i))
&& data->fan_min[i] >= 2
&& data->fan_min[i] != 255)
w83627ehf_write_value(data,
- data->REG_FAN_MIN[i],
+ W83627EHF_REG_FAN_MIN[i],
(data->fan_min[i] /= 2));
}
}
- w83627ehf_update_pwm_common(dev, data);
+ w83627ehf_update_pwm(data);
for (i = 0; i < data->pwm_num; i++) {
if (!(data->has_fan & (1 << i)))
@@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->fan_start_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_START_OUTPUT[i]);
+ W83627EHF_REG_FAN_START_OUTPUT[i]);
data->fan_stop_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STOP_OUTPUT[i]);
+ W83627EHF_REG_FAN_STOP_OUTPUT[i]);
data->fan_stop_time[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STOP_TIME[i]);
+ W83627EHF_REG_FAN_STOP_TIME[i]);
if (data->REG_FAN_MAX_OUTPUT &&
data->REG_FAN_MAX_OUTPUT[i] != 0xff)
@@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->target_temp[i] =
w83627ehf_read_value(data,
- data->REG_TARGET[i]) &
+ W83627EHF_REG_TARGET[i]) &
(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
}
@@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
return data;
}
-/*
- * Sysfs callback functions
- */
-#define show_in_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \
- data->scale_in)); \
-}
-show_in_reg(in)
-show_in_reg(in_min)
-show_in_reg(in_max)
-
#define store_in_reg(REG, reg) \
-static ssize_t \
-store_in_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+static int \
+store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+ long val) \
{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- unsigned long val; \
- int err; \
- err = kstrtoul(buf, 10, &val); \
- if (err < 0) \
- return err; \
+ if (val < 0) \
+ return -EINVAL; \
mutex_lock(&data->update_lock); \
- data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \
- w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
- data->in_##reg[nr]); \
+ data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \
+ w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \
+ data->in_##reg[channel]); \
mutex_unlock(&data->update_lock); \
- return count; \
+ return 0; \
}
store_in_reg(MIN, min)
store_in_reg(MAX, max)
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int
+store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01);
-}
-
-static struct sensor_device_attribute sda_in_input[] = {
- SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
- SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
- SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
- SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
- SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
- SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
- SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
- SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
- SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
- SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9),
-};
-
-static struct sensor_device_attribute sda_in_alarm[] = {
- SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0),
- SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1),
- SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2),
- SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3),
- SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8),
- SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21),
- SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20),
- SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16),
- SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17),
- SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19),
-};
-
-static struct sensor_device_attribute sda_in_min[] = {
- SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
- SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
- SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
- SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
- SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
- SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
- SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
- SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
- SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
- SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
-};
-
-static struct sensor_device_attribute sda_in_max[] = {
- SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
- SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
- SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
- SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
- SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
- SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
- SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
- SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
- SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
- SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
-};
-
-static ssize_t
-show_fan(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n", data->rpm[nr]);
-}
-
-static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n",
- data->fan_from_reg_min(data->fan_min[nr],
- data->fan_div[nr]));
-}
-
-static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr]));
-}
-
-static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
unsigned int reg;
u8 new_div;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
+ if (val < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
- if (!data->has_fan_div) {
- /*
- * Only NCT6776F for now, so we know that this is a 13 bit
- * register
- */
- if (!val) {
- val = 0xff1f;
- } else {
- if (val > 1350000U)
- val = 135000U;
- val = 1350000U / val;
- val = (val & 0x1f) | ((val << 3) & 0xff00);
- }
- data->fan_min[nr] = val;
- goto done; /* Leave fan divider alone */
- }
if (!val) {
/* No min limit, alarm disabled */
- data->fan_min[nr] = 255;
- new_div = data->fan_div[nr]; /* No change */
- dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1);
+ data->fan_min[channel] = 255;
+ new_div = data->fan_div[channel]; /* No change */
+ dev_info(dev, "fan%u low limit and alarm disabled\n",
+ channel + 1);
} else if ((reg = 1350000U / val) >= 128 * 255) {
/*
* Speed below this value cannot possibly be represented,
* even with the highest divider (128)
*/
- data->fan_min[nr] = 254;
+ data->fan_min[channel] = 254;
new_div = 7; /* 128 == (1 << 7) */
dev_warn(dev,
"fan%u low limit %lu below minimum %u, set to minimum\n",
- nr + 1, val, data->fan_from_reg_min(254, 7));
+ channel + 1, val, fan_from_reg8(254, 7));
} else if (!reg) {
/*
* Speed above this value cannot possibly be represented,
* even with the lowest divider (1)
*/
- data->fan_min[nr] = 1;
+ data->fan_min[channel] = 1;
new_div = 0; /* 1 == (1 << 0) */
dev_warn(dev,
"fan%u low limit %lu above maximum %u, set to maximum\n",
- nr + 1, val, data->fan_from_reg_min(1, 0));
+ channel + 1, val, fan_from_reg8(1, 0));
} else {
/*
* Automatically pick the best divider, i.e. the one such
@@ -1127,362 +763,117 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
reg >>= 1;
new_div++;
}
- data->fan_min[nr] = reg;
+ data->fan_min[channel] = reg;
}
/*
* Write both the fan clock divider (if it changed) and the new
* fan min (unconditionally)
*/
- if (new_div != data->fan_div[nr]) {
+ if (new_div != data->fan_div[channel]) {
dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
- nr + 1, div_from_reg(data->fan_div[nr]),
+ channel + 1, div_from_reg(data->fan_div[channel]),
div_from_reg(new_div));
- data->fan_div[nr] = new_div;
- w83627ehf_write_fan_div_common(dev, data, nr);
+ data->fan_div[channel] = new_div;
+ w83627ehf_write_fan_div(data, channel);
/* Give the chip time to sample a new speed value */
data->last_updated = jiffies;
}
-done:
- w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
- data->fan_min[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static struct sensor_device_attribute sda_fan_input[] = {
- SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
- SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
- SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
- SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
- SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
-};
-
-static struct sensor_device_attribute sda_fan_alarm[] = {
- SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6),
- SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7),
- SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11),
- SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10),
- SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23),
-};
-
-static struct sensor_device_attribute sda_fan_min[] = {
- SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 0),
- SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 1),
- SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 2),
- SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 3),
- SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 4),
-};
-
-static struct sensor_device_attribute sda_fan_div[] = {
- SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
- SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
- SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
- SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3),
- SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
-};
-
-static ssize_t
-show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
-}
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel],
+ data->fan_min[channel]);
+ mutex_unlock(&data->update_lock);
-#define show_temp_reg(addr, reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \
+ return 0;
}
-show_temp_reg(reg_temp, temp);
-show_temp_reg(reg_temp_over, temp_max);
-show_temp_reg(reg_temp_hyst, temp_max_hyst);
#define store_temp_reg(addr, reg) \
-static ssize_t \
-store_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+static int \
+store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+ long val) \
{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- int err; \
- long val; \
- err = kstrtol(buf, 10, &val); \
- if (err < 0) \
- return err; \
mutex_lock(&data->update_lock); \
- data->reg[nr] = LM75_TEMP_TO_REG(val); \
- w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \
+ data->reg[channel] = LM75_TEMP_TO_REG(val); \
+ w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \
mutex_unlock(&data->update_lock); \
- return count; \
+ return 0; \
}
store_temp_reg(reg_temp_over, temp_max);
store_temp_reg(reg_temp_hyst, temp_max_hyst);
-static ssize_t
-show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-
- return sprintf(buf, "%d\n",
- data->temp_offset[sensor_attr->index] * 1000);
-}
-
-static ssize_t
-store_temp_offset(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
-
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
mutex_lock(&data->update_lock);
- data->temp_offset[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val);
+ data->temp_offset[channel] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val);
mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
-}
-
-static struct sensor_device_attribute sda_temp_input[] = {
- SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
- SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
- SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
- SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
- SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
- SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
- SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
- SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
- SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_label[] = {
- SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
- SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
- SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
- SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
- SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
- SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
- SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
- SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
- SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max[] = {
- SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 0),
- SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 1),
- SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 2),
- SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 3),
- SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 4),
- SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 5),
- SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 6),
- SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 7),
- SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max_hyst[] = {
- SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 0),
- SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 1),
- SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 2),
- SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 3),
- SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 4),
- SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 5),
- SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 6),
- SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 7),
- SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 8),
-};
-
-static struct sensor_device_attribute sda_temp_alarm[] = {
- SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
- SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
- SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
-};
-
-static struct sensor_device_attribute sda_temp_type[] = {
- SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
- SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
- SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
-};
-
-static struct sensor_device_attribute sda_temp_offset[] = {
- SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 0),
- SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 1),
- SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 2),
-};
-
-#define show_pwm_reg(reg) \
-static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", data->reg[nr]); \
+ return 0;
}
-show_pwm_reg(pwm_mode)
-show_pwm_reg(pwm_enable)
-show_pwm_reg(pwm)
-
-static ssize_t
-store_pwm_mode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
u16 reg;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- if (val > 1)
- return -EINVAL;
-
- /* On NCT67766F, DC mode is only supported for pwm1 */
- if (sio_data->kind == nct6776 && nr && val != 1)
+ if (val < 0 || val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
- data->pwm_mode[nr] = val;
- reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]);
+ data->pwm_mode[channel] = val;
+ reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]);
if (!val)
- reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static ssize_t
-store_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
- data->pwm[nr] = val;
- w83627ehf_write_value(data, data->REG_PWM[nr], val);
+ data->pwm[channel] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
u16 reg;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
- return -EINVAL;
- /* SmartFan III mode is not supported on NCT6776F */
- if (sio_data->kind == nct6776 && val == 4)
+ if (!val || val < 0 ||
+ (val > 4 && val != data->pwm_enable_orig[channel]))
return -EINVAL;
mutex_lock(&data->update_lock);
- data->pwm_enable[nr] = val;
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- reg = w83627ehf_read_value(data,
- NCT6775_REG_FAN_MODE[nr]);
- reg &= 0x0f;
- reg |= (val - 1) << 4;
- w83627ehf_write_value(data,
- NCT6775_REG_FAN_MODE[nr], reg);
- } else {
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
- reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
- }
+ data->pwm_enable[channel] = val;
+ reg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[channel]);
+ reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]);
+ reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel],
+ reg);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-
#define show_tol_temp(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1510,7 +901,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
- w83627ehf_write_value(data, data->REG_TARGET[nr], val);
+ w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
@@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- /* Limit tolerance further for NCT6776F */
- if (sio_data->kind == nct6776 && val > 7)
- val = 7;
- reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+ if (nr == 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
- } else {
- reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
- if (nr == 1)
- reg = (reg & 0x0f) | (val << 4);
- else
- reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
- }
+ w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
data->tolerance[nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
-static struct sensor_device_attribute sda_pwm[] = {
- SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0),
- SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1),
- SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2),
- SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_mode[] = {
- SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 0),
- SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 1),
- SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 2),
- SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_enable[] = {
- SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 0),
- SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 1),
- SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 2),
- SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 3),
-};
-
-static struct sensor_device_attribute sda_target_temp[] = {
- SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 0),
- SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 1),
- SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 2),
- SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 3),
-};
-
-static struct sensor_device_attribute sda_tolerance[] = {
- SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 0),
- SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 1),
- SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 2),
- SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 3),
-};
+static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp,
+ store_target_temp, 0);
+static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp,
+ store_target_temp, 1);
+static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp,
+ store_target_temp, 2);
+static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp,
+ store_target_temp, 3);
+
+static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance,
+ store_tolerance, 0);
+static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance,
+ store_tolerance, 1);
+static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance,
+ store_tolerance, 2);
+static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance,
+ store_tolerance, 3);
/* Smart Fan registers */
@@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = {
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
val = clamp_val(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ w83627ehf_write_value(data, REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
}
-fan_functions(fan_start_output, FAN_START_OUTPUT)
-fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
-fan_functions(fan_max_output, FAN_MAX_OUTPUT)
-fan_functions(fan_step_output, FAN_STEP_OUTPUT)
+fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT)
+fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT)
+fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT)
+fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT)
#define fan_time_functions(reg, REG) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
val = step_time_to_reg(val, data->pwm_mode[nr]); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ w83627ehf_write_value(data, REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
} \
-fan_time_functions(fan_stop_time, FAN_STOP_TIME)
-
-static ssize_t name_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
- SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 3),
- SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 3),
- SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 3),
- SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 3),
- SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 3),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays_fan3[] = {
- SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 2),
- SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 2),
- SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 2),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays[] = {
- SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 0),
- SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 1),
- SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 0),
- SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 1),
- SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 0),
- SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 1),
-};
+fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME)
+
+static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 3);
+static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 3);
+
+static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 2);
+static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 2);
+
+static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 1);
+static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 1);
+static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 1);
/*
* pwm1 and pwm3 don't support max and step settings on all chips.
* Need to check support while generating/removing attribute files.
*/
-static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
- SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 0),
- SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 0),
- SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 1),
- SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 1),
- SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 2),
- SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 2),
-};
+static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 0);
+static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 1);
+static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 1);
+static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 2);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
struct w83627ehf_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR_RO(cpu0_vid);
+DEVICE_ATTR_RO(cpu0_vid);
/* Case open detection */
-
-static ssize_t
-show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
-
- return sprintf(buf, "%d\n",
- !!(data->caseopen & to_sensor_dev_attr_2(attr)->index));
-}
-
-static ssize_t
-clear_caseopen(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- u16 reg, mask;
+ const u16 mask = 0x80;
+ u16 reg;
- if (kstrtoul(buf, 10, &val) || val != 0)
+ if (val != 0 || channel != 0)
return -EINVAL;
- mask = to_sensor_dev_attr_2(attr)->nr;
-
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
@@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static struct sensor_device_attribute_2 sda_caseopen[] = {
- SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen, 0x80, 0x10),
- SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen, 0x40, 0x40),
-};
-
-/*
- * Driver and device management
- */
-
-static void w83627ehf_device_remove_files(struct device *dev)
+static umode_t w83627ehf_attrs_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- /*
- * some entries in the following arrays may not have been used in
- * device_create_file(), but device_remove_file() will ignore them
- */
- int i;
+ struct device *dev = container_of(kobj, struct device, kobj);
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct device_attribute *devattr;
+ struct sensor_device_attribute *sda;
+
+ devattr = container_of(a, struct device_attribute, attr);
+
+ /* Not sensor */
+ if (devattr->show == cpu0_vid_show && data->have_vid)
+ return a->mode;
+
+ sda = (struct sensor_device_attribute *)devattr;
+
+ if (sda->index < 2 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output))
+ return a->mode;
+
+ if (sda->index < 3 &&
+ (devattr->show == show_fan_max_output ||
+ devattr->show == show_fan_step_output) &&
+ data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff)
+ return a->mode;
+
+ /* if fan3 and fan4 are enabled create the files for them */
+ if (sda->index == 2 &&
+ (data->has_fan & (1 << 2)) && data->pwm_num >= 3 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output))
+ return a->mode;
+
+ if (sda->index == 3 &&
+ (data->has_fan & (1 << 3)) && data->pwm_num >= 4 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output ||
+ devattr->show == show_fan_max_output ||
+ devattr->show == show_fan_step_output))
+ return a->mode;
+
+ if ((devattr->show == show_target_temp ||
+ devattr->show == show_tolerance) &&
+ (data->has_fan & (1 << sda->index)) &&
+ sda->index < data->pwm_num)
+ return a->mode;
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
- device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
- for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
- struct sensor_device_attribute *attr =
- &sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT &&
- data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
- device_remove_file(dev, &attr->dev_attr);
- }
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++)
- device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr);
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
- device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
- for (i = 0; i < data->in_num; i++) {
- if ((i == 6) && data->in6_skip)
- continue;
- device_remove_file(dev, &sda_in_input[i].dev_attr);
- device_remove_file(dev, &sda_in_alarm[i].dev_attr);
- device_remove_file(dev, &sda_in_min[i].dev_attr);
- device_remove_file(dev, &sda_in_max[i].dev_attr);
- }
- for (i = 0; i < 5; i++) {
- device_remove_file(dev, &sda_fan_input[i].dev_attr);
- device_remove_file(dev, &sda_fan_alarm[i].dev_attr);
- device_remove_file(dev, &sda_fan_div[i].dev_attr);
- device_remove_file(dev, &sda_fan_min[i].dev_attr);
- }
- for (i = 0; i < data->pwm_num; i++) {
- device_remove_file(dev, &sda_pwm[i].dev_attr);
- device_remove_file(dev, &sda_pwm_mode[i].dev_attr);
- device_remove_file(dev, &sda_pwm_enable[i].dev_attr);
- device_remove_file(dev, &sda_target_temp[i].dev_attr);
- device_remove_file(dev, &sda_tolerance[i].dev_attr);
- }
- for (i = 0; i < NUM_REG_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- device_remove_file(dev, &sda_temp_input[i].dev_attr);
- device_remove_file(dev, &sda_temp_label[i].dev_attr);
- if (i == 2 && data->temp3_val_only)
- continue;
- device_remove_file(dev, &sda_temp_max[i].dev_attr);
- device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
- if (i > 2)
- continue;
- device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
- device_remove_file(dev, &sda_temp_type[i].dev_attr);
- device_remove_file(dev, &sda_temp_offset[i].dev_attr);
- }
+ return 0;
+}
+
+/* These groups handle non-standard attributes used in this device */
+static struct attribute *w83627ehf_attrs[] = {
+
+ &sensor_dev_attr_pwm1_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm1_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_target.dev_attr.attr,
+ &sensor_dev_attr_pwm1_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm2_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm2_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_target.dev_attr.attr,
+ &sensor_dev_attr_pwm2_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm3_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm3_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_target.dev_attr.attr,
+ &sensor_dev_attr_pwm3_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm4_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm4_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_target.dev_attr.attr,
+ &sensor_dev_attr_pwm4_tolerance.dev_attr.attr,
+
+ &dev_attr_cpu0_vid.attr,
+ NULL
+};
- device_remove_file(dev, &sda_caseopen[0].dev_attr);
- device_remove_file(dev, &sda_caseopen[1].dev_attr);
+static const struct attribute_group w83627ehf_group = {
+ .attrs = w83627ehf_attrs,
+ .is_visible = w83627ehf_attrs_visible,
+};
- device_remove_file(dev, &dev_attr_name);
- device_remove_file(dev, &dev_attr_cpu0_vid);
-}
+static const struct attribute_group *w83627ehf_groups[] = {
+ &w83627ehf_group,
+ NULL
+};
+
+/*
+ * Driver and device management
+ */
/* Get the monitoring functions started */
static inline void w83627ehf_init_device(struct w83627ehf_data *data,
@@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data,
}
}
-static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
- int r1, int r2)
-{
- swap(data->temp_src[r1], data->temp_src[r2]);
- swap(data->reg_temp[r1], data->reg_temp[r2]);
- swap(data->reg_temp_over[r1], data->reg_temp_over[r2]);
- swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]);
- swap(data->reg_temp_config[r1], data->reg_temp_config[r2]);
-}
-
static void
w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
{
@@ -1954,7 +1293,7 @@ static void
w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
struct w83627ehf_data *data)
{
- int fan3pin, fan4pin, fan4min, fan5pin, regval;
+ int fan3pin, fan4pin, fan5pin, regval;
/* The W83627UHG is simple, only two fan inputs, no config */
if (sio_data->kind == w83627uhg) {
@@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
}
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == nct6775) {
- /* On NCT6775, fan4 shares pins with the fdc interface */
- fan3pin = 1;
- fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
- fan4min = 0;
- fan5pin = 0;
- } else if (sio_data->kind == nct6776) {
- bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
-
- superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
- regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
-
- if (regval & 0x80)
- fan3pin = gpok;
- else
- fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
-
- if (regval & 0x40)
- fan4pin = gpok;
- else
- fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
-
- if (regval & 0x20)
- fan5pin = gpok;
- else
- fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
-
- fan4min = fan4pin;
- } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
fan3pin = 1;
fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
- fan4min = fan4pin;
} else {
fan3pin = 1;
fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
- fan4min = fan4pin;
}
data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
data->has_fan |= (fan3pin << 2);
data->has_fan_min |= (fan3pin << 2);
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- /*
- * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1
- * register
- */
- data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
- data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
- } else {
- /*
- * It looks like fan4 and fan5 pins can be alternatively used
- * as fan on/off switches, but fan5 control is write only :/
- * We assume that if the serial interface is disabled, designers
- * connected fan5 as input unless they are emitting log 1, which
- * is not the default.
- */
- regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
- if ((regval & (1 << 2)) && fan4pin) {
- data->has_fan |= (1 << 3);
- data->has_fan_min |= (1 << 3);
+ /*
+ * It looks like fan4 and fan5 pins can be alternatively used
+ * as fan on/off switches, but fan5 control is write only :/
+ * We assume that if the serial interface is disabled, designers
+ * connected fan5 as input unless they are emitting log 1, which
+ * is not the default.
+ */
+ regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+ if ((regval & (1 << 2)) && fan4pin) {
+ data->has_fan |= (1 << 3);
+ data->has_fan_min |= (1 << 3);
+ }
+ if (!(regval & (1 << 1)) && fan5pin) {
+ data->has_fan |= (1 << 4);
+ data->has_fan_min |= (1 << 4);
+ }
+}
+
+static umode_t
+w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct w83627ehf_data *data = drvdata;
+
+ switch (type) {
+ case hwmon_temp:
+ /* channel 0.., name 1.. */
+ if (!(data->have_temp & (1 << channel)))
+ return 0;
+ if (attr == hwmon_temp_input || attr == hwmon_temp_label)
+ return 0444;
+ if (channel == 2 && data->temp3_val_only)
+ return 0;
+ if (attr == hwmon_temp_max) {
+ if (data->reg_temp_over[channel])
+ return 0644;
+ else
+ return 0;
+ }
+ if (attr == hwmon_temp_max_hyst) {
+ if (data->reg_temp_hyst[channel])
+ return 0644;
+ else
+ return 0;
+ }
+ if (channel > 2)
+ return 0;
+ if (attr == hwmon_temp_alarm || attr == hwmon_temp_type)
+ return 0444;
+ if (attr == hwmon_temp_offset) {
+ if (data->have_temp_offset & (1 << channel))
+ return 0644;
+ else
+ return 0;
+ }
+ break;
+
+ case hwmon_fan:
+ /* channel 0.., name 1.. */
+ if (!(data->has_fan & (1 << channel)))
+ return 0;
+ if (attr == hwmon_fan_input || attr == hwmon_fan_alarm)
+ return 0444;
+ if (attr == hwmon_fan_div) {
+ return 0444;
}
- if (!(regval & (1 << 1)) && fan5pin) {
- data->has_fan |= (1 << 4);
- data->has_fan_min |= (1 << 4);
+ if (attr == hwmon_fan_min) {
+ if (data->has_fan_min & (1 << channel))
+ return 0644;
+ else
+ return 0;
}
+ break;
+
+ case hwmon_in:
+ /* channel 0.., name 0.. */
+ if (channel >= data->in_num)
+ return 0;
+ if (channel == 6 && data->in6_skip)
+ return 0;
+ if (attr == hwmon_in_alarm || attr == hwmon_in_input)
+ return 0444;
+ if (attr == hwmon_in_min || attr == hwmon_in_max)
+ return 0644;
+ break;
+
+ case hwmon_pwm:
+ /* channel 0.., name 1.. */
+ if (!(data->has_fan & (1 << channel)) ||
+ channel >= data->pwm_num)
+ return 0;
+ if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable ||
+ attr == hwmon_pwm_input)
+ return 0644;
+ break;
+
+ case hwmon_intrusion:
+ return 0644;
+
+ default: /* Shouldn't happen */
+ return 0;
}
+
+ return 0; /* Shouldn't happen */
}
+static int
+w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = LM75_TEMP_FROM_REG(data->temp[channel]);
+ return 0;
+ case hwmon_temp_max:
+ *val = LM75_TEMP_FROM_REG(data->temp_max[channel]);
+ return 0;
+ case hwmon_temp_max_hyst:
+ *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]);
+ return 0;
+ case hwmon_temp_offset:
+ *val = data->temp_offset[channel] * 1000;
+ return 0;
+ case hwmon_temp_type:
+ *val = (int)data->temp_type[channel];
+ return 0;
+ case hwmon_temp_alarm:
+ if (channel < 3) {
+ int bit[] = { 4, 5, 13 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ *val = in_from_reg(data->in[channel], channel, data->scale_in);
+ return 0;
+ case hwmon_in_min:
+ *val = in_from_reg(data->in_min[channel], channel,
+ data->scale_in);
+ return 0;
+ case hwmon_in_max:
+ *val = in_from_reg(data->in_max[channel], channel,
+ data->scale_in);
+ return 0;
+ case hwmon_in_alarm:
+ if (channel < 10) {
+ int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = data->rpm[channel];
+ return 0;
+ case hwmon_fan_min:
+ *val = fan_from_reg8(data->fan_min[channel],
+ data->fan_div[channel]);
+ return 0;
+ case hwmon_fan_div:
+ *val = div_from_reg(data->fan_div[channel]);
+ return 0;
+ case hwmon_fan_alarm:
+ if (channel < 5) {
+ int bit[] = { 6, 7, 11, 10, 23 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = data->pwm[channel];
+ return 0;
+ case hwmon_pwm_enable:
+ *val = data->pwm_enable[channel];
+ return 0;
+ case hwmon_pwm_mode:
+ *val = data->pwm_enable[channel];
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ if (attr != hwmon_intrusion_alarm || channel != 0)
+ return -EOPNOTSUPP; /* shouldn't happen */
+
+ *val = !!(data->caseopen & 0x10);
+ return 0;
+}
+
+static int
+w83627ehf_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent);
+
+ switch (type) {
+ case hwmon_fan:
+ return w83627ehf_do_read_fan(data, attr, channel, val);
+
+ case hwmon_in:
+ return w83627ehf_do_read_in(data, attr, channel, val);
+
+ case hwmon_pwm:
+ return w83627ehf_do_read_pwm(data, attr, channel, val);
+
+ case hwmon_temp:
+ return w83627ehf_do_read_temp(data, attr, channel, val);
+
+ case hwmon_intrusion:
+ return w83627ehf_do_read_intrusion(data, attr, channel, val);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ if (attr == hwmon_temp_label) {
+ *str = data->temp_label[data->temp_src[channel]];
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ /* Nothing else should be read as a string */
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ if (type == hwmon_in && attr == hwmon_in_min)
+ return store_in_min(dev, data, channel, val);
+ if (type == hwmon_in && attr == hwmon_in_max)
+ return store_in_max(dev, data, channel, val);
+
+ if (type == hwmon_fan && attr == hwmon_fan_min)
+ return store_fan_min(dev, data, channel, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_max)
+ return store_temp_max(dev, data, channel, val);
+ if (type == hwmon_temp && attr == hwmon_temp_max_hyst)
+ return store_temp_max_hyst(dev, data, channel, val);
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return store_temp_offset(dev, data, channel, val);
+
+ if (type == hwmon_pwm && attr == hwmon_pwm_mode)
+ return store_pwm_mode(dev, data, channel, val);
+ if (type == hwmon_pwm && attr == hwmon_pwm_enable)
+ return store_pwm_enable(dev, data, channel, val);
+ if (type == hwmon_pwm && attr == hwmon_pwm_input)
+ return store_pwm(dev, data, channel, val);
+
+ if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm)
+ return clear_caseopen(dev, data, channel, val);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops w83627ehf_ops = {
+ .is_visible = w83627ehf_is_visible,
+ .read = w83627ehf_read,
+ .read_string = w83627ehf_read_string,
+ .write = w83627ehf_write,
+};
+
+static const struct hwmon_channel_info *w83627ehf_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE),
+ HWMON_CHANNEL_INFO(intrusion,
+ HWMON_INTRUSION_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info w83627ehf_chip_info = {
+ .ops = &w83627ehf_ops,
+ .info = w83627ehf_info,
+};
+
static int w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
struct resource *res;
u8 en_vrm10;
int i, err = 0;
+ struct device *hwmon_dev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) {
@@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev)
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
- /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */
+ /* 667HG has 3 pwms, and 627UHG has only 2 */
switch (sio_data->kind) {
default:
data->pwm_num = 4;
break;
case w83667hg:
case w83667hg_b:
- case nct6775:
- case nct6776:
data->pwm_num = 3;
break;
case w83627uhg:
@@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
data->have_temp = 0x07;
/* Deal with temperature register setup first. */
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- int mask = 0;
-
- /*
- * Display temperature sensor output only if it monitors
- * a source other than one already reported. Always display
- * first three temperature registers, though.
- */
- for (i = 0; i < NUM_REG_TEMP; i++) {
- u8 src;
-
- data->reg_temp[i] = NCT6775_REG_TEMP[i];
- data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
- data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
- data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
-
- src = w83627ehf_read_value(data,
- NCT6775_REG_TEMP_SOURCE[i]);
- src &= 0x1f;
- if (src && !(mask & (1 << src))) {
- data->have_temp |= 1 << i;
- mask |= 1 << src;
- }
-
- data->temp_src[i] = src;
-
- /*
- * Now do some register swapping if index 0..2 don't
- * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
- * Idea is to have the first three attributes
- * report SYSTIN, CPUIN, and AUXIN if possible
- * without overriding the basic system configuration.
- */
- if (i > 0 && data->temp_src[0] != 1
- && data->temp_src[i] == 1)
- w82627ehf_swap_tempreg(data, 0, i);
- if (i > 1 && data->temp_src[1] != 2
- && data->temp_src[i] == 2)
- w82627ehf_swap_tempreg(data, 1, i);
- if (i > 2 && data->temp_src[2] != 3
- && data->temp_src[i] == 3)
- w82627ehf_swap_tempreg(data, 2, i);
- }
- if (sio_data->kind == nct6776) {
- /*
- * On NCT6776, AUXTIN and VIN3 pins are shared.
- * Only way to detect it is to check if AUXTIN is used
- * as a temperature source, and if that source is
- * enabled.
- *
- * If that is the case, disable in6, which reports VIN3.
- * Otherwise disable temp3.
- */
- if (data->temp_src[2] == 3) {
- u8 reg;
-
- if (data->reg_temp_config[2])
- reg = w83627ehf_read_value(data,
- data->reg_temp_config[2]);
- else
- reg = 0; /* Assume AUXTIN is used */
-
- if (reg & 0x01)
- data->have_temp &= ~(1 << 2);
- else
- data->in6_skip = 1;
- }
- data->temp_label = nct6776_temp_label;
- } else {
- data->temp_label = nct6775_temp_label;
- }
- data->have_temp_offset = data->have_temp & 0x07;
- for (i = 0; i < 3; i++) {
- if (data->temp_src[i] > 3)
- data->have_temp_offset &= ~(1 << i);
- }
- } else if (sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg_b) {
u8 reg;
w83627ehf_set_temp_reg_ehf(data, 4);
@@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
data->have_temp_offset = data->have_temp & 0x07;
}
- if (sio_data->kind == nct6775) {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg16;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = NCT6775_REG_PWM;
- data->REG_TARGET = NCT6775_REG_TARGET;
- data->REG_FAN = NCT6775_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
- data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
- } else if (sio_data->kind == nct6776) {
- data->has_fan_div = false;
- data->fan_from_reg = fan_from_reg13;
- data->fan_from_reg_min = fan_from_reg13;
- data->REG_PWM = NCT6775_REG_PWM;
- data->REG_TARGET = NCT6775_REG_TARGET;
- data->REG_FAN = NCT6775_REG_FAN;
- data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
- } else if (sio_data->kind == w83667hg_b) {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg8;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = W83627EHF_REG_PWM;
- data->REG_TARGET = W83627EHF_REG_TARGET;
- data->REG_FAN = W83627EHF_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
+ if (sio_data->kind == w83667hg_b) {
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
data->REG_FAN_STEP_OUTPUT =
W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
} else {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg8;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = W83627EHF_REG_PWM;
- data->REG_TARGET = W83627EHF_REG_TARGET;
- data->REG_FAN = W83627EHF_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
data->REG_FAN_STEP_OUTPUT =
@@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
goto exit_release;
/* Read VID value */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
- sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
/*
* W83667HG has different pins for VID input and output, so
* we can get the VID input values directly at logical device D
@@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
*/
superio_select(sio_data->sioreg, W83667HG_LD_VID);
data->vid = superio_inb(sio_data->sioreg, 0xe3);
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err) {
- superio_exit(sio_data->sioreg);
- goto exit_release;
- }
+ data->have_vid = true;
} else if (sio_data->kind != w83627uhg) {
superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) {
@@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev)
SIO_REG_VID_DATA);
if (sio_data->kind == w83627ehf) /* 6 VID pins only */
data->vid &= 0x3f;
-
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err) {
- superio_exit(sio_data->sioreg);
- goto exit_release;
- }
+ data->have_vid = true;
} else {
dev_info(dev,
"VID pins in output mode, CPU VID not available\n");
}
}
- if (fan_debounce &&
- (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
- u8 tmp;
-
- superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
- tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
- if (sio_data->kind == nct6776)
- superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
- 0x3e | tmp);
- else
- superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
- 0x1e | tmp);
- pr_info("Enabled fan debounce for chip %s\n", data->name);
- }
-
w83627ehf_check_fan_inputs(sio_data, data);
superio_exit(sio_data->sioreg);
/* Read fan clock dividers immediately */
- w83627ehf_update_fan_div_common(dev, data);
+ w83627ehf_update_fan_div(data);
/* Read pwm data to save original values */
- w83627ehf_update_pwm_common(dev, data);
+ w83627ehf_update_pwm(data);
for (i = 0; i < data->pwm_num; i++)
data->pwm_enable_orig[i] = data->pwm_enable[i];
- /* Register sysfs hooks */
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
- err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
- struct sensor_device_attribute *attr =
- &sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT &&
- data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
- err = device_create_file(dev, &attr->dev_attr);
- if (err)
- goto exit_remove;
- }
- }
- /* if fan3 and fan4 are enabled create the sf3 files for them */
- if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3)
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) {
- err = device_create_file(dev,
- &sda_sf3_arrays_fan3[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
- err = device_create_file(dev,
- &sda_sf3_arrays_fan4[i].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < data->in_num; i++) {
- if ((i == 6) && data->in6_skip)
- continue;
- if ((err = device_create_file(dev, &sda_in_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_min[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_max[i].dev_attr)))
- goto exit_remove;
- }
-
- for (i = 0; i < 5; i++) {
- if (data->has_fan & (1 << i)) {
- if ((err = device_create_file(dev,
- &sda_fan_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_alarm[i].dev_attr)))
- goto exit_remove;
- if (sio_data->kind != nct6776) {
- err = device_create_file(dev,
- &sda_fan_div[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->has_fan_min & (1 << i)) {
- err = device_create_file(dev,
- &sda_fan_min[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i < data->pwm_num &&
- ((err = device_create_file(dev,
- &sda_pwm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_pwm_mode[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_pwm_enable[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_target_temp[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_tolerance[i].dev_attr))))
- goto exit_remove;
- }
- }
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ data->name,
+ data,
+ &w83627ehf_chip_info,
+ w83627ehf_groups);
- for (i = 0; i < NUM_REG_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- err = device_create_file(dev, &sda_temp_input[i].dev_attr);
- if (err)
- goto exit_remove;
- if (data->temp_label) {
- err = device_create_file(dev,
- &sda_temp_label[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i == 2 && data->temp3_val_only)
- continue;
- if (data->reg_temp_over[i]) {
- err = device_create_file(dev,
- &sda_temp_max[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->reg_temp_hyst[i]) {
- err = device_create_file(dev,
- &sda_temp_max_hyst[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i > 2)
- continue;
- if ((err = device_create_file(dev,
- &sda_temp_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_type[i].dev_attr)))
- goto exit_remove;
- if (data->have_temp_offset & (1 << i)) {
- err = device_create_file(dev,
- &sda_temp_offset[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- }
-
- err = device_create_file(dev, &sda_caseopen[0].dev_attr);
- if (err)
- goto exit_remove;
-
- if (sio_data->kind == nct6776) {
- err = device_create_file(dev, &sda_caseopen[1].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- err = device_create_file(dev, &dev_attr_name);
- if (err)
- goto exit_remove;
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
+ return PTR_ERR_OR_ZERO(hwmon_dev);
- return 0;
-
-exit_remove:
- w83627ehf_device_remove_files(dev);
exit_release:
release_region(res->start, IOREGION_LENGTH);
exit:
@@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev)
{
struct w83627ehf_data *data = platform_get_drvdata(pdev);
- hwmon_device_unregister(data->hwmon_dev);
- w83627ehf_device_remove_files(&pdev->dev);
release_region(data->addr, IOREGION_LENGTH);
return 0;
@@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev)
static int w83627ehf_suspend(struct device *dev)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
mutex_lock(&data->update_lock);
data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
- if (sio_data->kind == nct6775) {
- data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
- data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
- }
mutex_unlock(&data->update_lock);
return 0;
@@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev)
static int w83627ehf_resume(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
int i;
mutex_lock(&data->update_lock);
@@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev)
if (!(data->has_fan_min & (1 << i)))
continue;
- w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i],
data->fan_min[i]);
}
@@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev)
/* Restore other settings */
w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
- if (sio_data->kind == nct6775) {
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
- }
/* Force re-reading all values */
data->valid = 0;
@@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char sio_name_W83627UHG[] __initconst = "W83627UHG";
static const char sio_name_W83667HG[] __initconst = "W83667HG";
static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B";
- static const char sio_name_NCT6775[] __initconst = "NCT6775F";
- static const char sio_name_NCT6776[] __initconst = "NCT6776F";
u16 val;
const char *sio_name;
@@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83667hg_b;
sio_name = sio_name_W83667HG_B;
break;
- case SIO_NCT6775_ID:
- sio_data->kind = nct6775;
- sio_name = sio_name_NCT6775;
- break;
- case SIO_NCT6776_ID:
- sio_data->kind = nct6776;
- sio_name = sio_name_NCT6776;
- break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index dc3f507e7562..a90d757f7043 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1132,7 +1132,6 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
-#ifdef CONFIG_CPU_PM
static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
@@ -1402,17 +1401,17 @@ static struct notifier_block etm4_cpu_pm_nb = {
static int etm4_cpu_pm_register(void)
{
- return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+ if (IS_ENABLED(CONFIG_CPU_PM))
+ return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+
+ return 0;
}
static void etm4_cpu_pm_unregister(void)
{
- cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+ if (IS_ENABLED(CONFIG_CPU_PM))
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
}
-#else
-static int etm4_cpu_pm_register(void) { return 0; }
-static void etm4_cpu_pm_unregister(void) { }
-#endif
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index ff340d7ae2e5..abfe3094c047 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -369,7 +369,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
if (unlikely(!dev))
return -ENOMEM;
- dev->base = ioremap_nocache(res->start, resource_size(res));
+ dev->base = ioremap(res->start, resource_size(res));
if (unlikely(!dev->base)) {
ret = -ENXIO;
goto err;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 38556381f4ca..2f8b8050a223 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev)
adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev,
"scl",
GPIOD_ASIS);
- if (IS_ERR(adapter_data->gpio_scl))
- return PTR_ERR(adapter_data->gpio_scl);
+ if (IS_ERR(adapter_data->gpio_scl)) {
+ ret = PTR_ERR(adapter_data->gpio_scl);
+ goto free_both;
+ }
adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev,
"sda",
GPIOD_ASIS);
- if (IS_ERR(adapter_data->gpio_sda))
- return PTR_ERR(adapter_data->gpio_sda);
+ if (IS_ERR(adapter_data->gpio_sda)) {
+ ret = PTR_ERR(adapter_data->gpio_sda);
+ goto free_both;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 0829cb696d9d..4fde74eb34a7 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -281,7 +281,7 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
}
/* remap the memory */
- pmcmsptwi_data.iobase = ioremap_nocache(res->start,
+ pmcmsptwi_data.iobase = ioremap(res->start,
resource_size(res));
if (!pmcmsptwi_data.iobase) {
dev_err(&pldev->dev,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index a98bf31d0e5c..61339c665ebd 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1608,14 +1608,18 @@ static int tegra_i2c_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev))
+ if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra_i2c_runtime_resume(&pdev->dev);
- else
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto unprepare_div_clk;
+ }
+ } else {
ret = pm_runtime_get_sync(i2c_dev->dev);
-
- if (ret < 0) {
- dev_err(&pdev->dev, "runtime resume failed\n");
- goto unprepare_div_clk;
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto disable_rpm;
+ }
}
if (i2c_dev->is_multimaster_mode) {
@@ -1623,7 +1627,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
ret);
- goto disable_rpm;
+ goto put_rpm;
}
}
@@ -1671,11 +1675,16 @@ disable_div_clk:
if (i2c_dev->is_multimaster_mode)
clk_disable(i2c_dev->div_clk);
-disable_rpm:
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
+put_rpm:
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_put_sync(&pdev->dev);
+ else
tegra_i2c_runtime_suspend(&pdev->dev);
+disable_rpm:
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_disable(&pdev->dev);
+
unprepare_div_clk:
clk_unprepare(i2c_dev->div_clk);
@@ -1710,9 +1719,14 @@ static int tegra_i2c_remove(struct platform_device *pdev)
static int __maybe_unused tegra_i2c_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ int err;
i2c_mark_adapter_suspended(&i2c_dev->adapter);
+ err = pm_runtime_force_suspend(dev);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -1733,6 +1747,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
if (err)
return err;
+ err = pm_runtime_force_resume(dev);
+ if (err < 0)
+ return err;
+
i2c_mark_adapter_resumed(&i2c_dev->adapter);
return 0;
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 043691656245..7f8f896fa0c3 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = {
.groups = i3c_masterdev_groups,
};
-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
- unsigned long max_i2c_scl_rate)
+static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
{
struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index b0ff0e12d84c..bd26c3b9634e 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+
+ if (data->index > pos && pos > 0) {
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr;
+ master->free_pos &= ~BIT(pos);
+ }
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
master->regs +
@@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
static int dw_i3c_probe(struct platform_device *pdev)
{
struct dw_i3c_master *master;
- struct resource *res;
int ret, irq;
master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master->regs = devm_ioremap_resource(&pdev->dev, res);
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 10db0bf0655a..54712793709e 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/of_device.h>
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
@@ -60,6 +61,7 @@
#define CTRL_HALT_EN BIT(30)
#define CTRL_MCS BIT(29)
#define CTRL_MCS_EN BIT(28)
+#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
#define CTRL_HJ_DISEC BIT(8)
#define CTRL_MST_ACK BIT(7)
#define CTRL_HJ_ACK BIT(6)
@@ -70,6 +72,7 @@
#define CTRL_MIXED_FAST_BUS_MODE 2
#define CTRL_MIXED_SLOW_BUS_MODE 3
#define CTRL_BUS_MODE_MASK GENMASK(1, 0)
+#define THD_DELAY_MAX 3
#define PRESCL_CTRL0 0x14
#define PRESCL_CTRL0_I2C(x) ((x) << 16)
@@ -388,6 +391,10 @@ struct cdns_i3c_xfer {
struct cdns_i3c_cmd cmds[0];
};
+struct cdns_i3c_data {
+ u8 thd_delay_ns;
+};
+
struct cdns_i3c_master {
struct work_struct hj_work;
struct i3c_master_controller base;
@@ -408,6 +415,7 @@ struct cdns_i3c_master {
struct clk *pclk;
struct cdns_i3c_master_caps caps;
unsigned long i3c_scl_lim;
+ const struct cdns_i3c_data *devdata;
};
static inline struct cdns_i3c_master *
@@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
return 0;
}
+static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
+{
+ unsigned long sysclk_rate = clk_get_rate(master->sysclk);
+ u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
+ (NSEC_PER_SEC / sysclk_rate));
+
+ /* Every value greater than 3 is not valid. */
+ if (thd_delay > THD_DELAY_MAX)
+ thd_delay = THD_DELAY_MAX;
+
+ /* CTLR_THD_DEL value is encoded. */
+ return (THD_DELAY_MAX - thd_delay);
+}
+
static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
@@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
* We will issue ENTDAA afterwards from the threaded IRQ handler.
*/
ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+
+ /*
+ * Configure data hold delay based on device-specific data.
+ *
+ * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
+ * master output. This setting allows to meet this timing on master's
+ * SoC outputs, regardless of PCB balancing.
+ */
+ ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
writel(ctrl, master->regs + CTRL);
cdns_i3c_master_enable(master);
@@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work)
i3c_master_do_daa(&master->base);
}
+static struct cdns_i3c_data cdns_i3c_devdata = {
+ .thd_delay_ns = 10,
+};
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+ { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
+ { /* sentinel */ },
+};
+
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
struct cdns_i3c_master *master;
- struct resource *res;
int ret, irq;
u32 val;
@@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master->regs = devm_ioremap_resource(&pdev->dev, res);
+ master->devdata = of_device_get_match_data(&pdev->dev);
+ if (!master->devdata)
+ return -EINVAL;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
@@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id cdns_i3c_master_of_ids[] = {
- { .compatible = "cdns,i3c-master" },
- { /* sentinel */ },
-};
-
static struct platform_driver cdns_i3c_master = {
.probe = cdns_i3c_master_probe,
.remove = cdns_i3c_master_remove,
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index cac02db4098d..d4f4409cfb8b 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -3,8 +3,6 @@
# link order is important here
#
-ccflags-y := -Idrivers/ide
-
ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
ide-taskfile.o ide-pm.o ide-park.o ide-sysfs.o ide-devsets.o \
ide-io-std.o ide-eh.o
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index a1898e11b04e..943bf944bf72 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -66,6 +66,9 @@ static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
struct ide_timing t;
u8 arttim = 0;
+ if (drive->dn >= ARRAY_SIZE(drwtim_regs))
+ return;
+
ide_timing_compute(drive, mode, &t, T, 0);
/*
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index 0dae65ac7d6d..743bc3693ac8 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -310,7 +310,7 @@ static void __init ht6560b_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
/* Setting default configurations for drives. */
- int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
+ unsigned long t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
if (hwif->channel)
t |= (HT_SECONDARY_IF << 8);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 9d117936bee1..dcf8b51b47fd 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -25,6 +25,7 @@
#define IDECD_VERSION "5.00"
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -1710,6 +1711,41 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
+static int idecd_locked_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
+ void __user *argp = compat_ptr(arg);
+ int err;
+
+ switch (cmd) {
+ case CDROMSETSPINDOWN:
+ return idecd_set_spindown(&info->devinfo, (unsigned long)argp);
+ case CDROMGETSPINDOWN:
+ return idecd_get_spindown(&info->devinfo, (unsigned long)argp);
+ default:
+ break;
+ }
+
+ err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
+ if (err == -EINVAL)
+ err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd,
+ (unsigned long)argp);
+
+ return err;
+}
+
+static int idecd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ide_cd_mutex);
+ ret = idecd_locked_compat_ioctl(bdev, mode, cmd, arg);
+ mutex_unlock(&ide_cd_mutex);
+
+ return ret;
+}
static unsigned int idecd_check_events(struct gendisk *disk,
unsigned int clearing)
@@ -1732,6 +1768,8 @@ static const struct block_device_operations idecd_ops = {
.open = idecd_open,
.release = idecd_release,
.ioctl = idecd_ioctl,
+ .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
+ idecd_compat_ioctl : NULL,
.check_events = idecd_check_events,
.revalidate_disk = idecd_revalidate_disk
};
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 197912af5c2f..1d3407d7e095 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -794,4 +794,5 @@ const struct ide_disk_ops ide_ata_disk_ops = {
.set_doorlock = ide_disk_set_doorlock,
.do_request = ide_do_rw_disk,
.ioctl = ide_disk_ioctl,
+ .compat_ioctl = ide_disk_ioctl,
};
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 1ea2f9e82bf8..1fe1f9d37a51 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/mm.h>
@@ -546,4 +547,7 @@ const struct ide_disk_ops ide_atapi_disk_ops = {
.set_doorlock = ide_set_media_lock,
.do_request = ide_floppy_do_request,
.ioctl = ide_floppy_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ide_floppy_compat_ioctl,
+#endif
};
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
index 13c9b4b6d75e..8505a5f58f4e 100644
--- a/drivers/ide/ide-floppy.h
+++ b/drivers/ide/ide-floppy.h
@@ -26,6 +26,8 @@ void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
/* ide-floppy_ioctl.c */
int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
unsigned int, unsigned long);
+int ide_floppy_compat_ioctl(ide_drive_t *, struct block_device *, fmode_t,
+ unsigned int, unsigned long);
#ifdef CONFIG_IDE_PROC_FS
/* ide-floppy_proc.c */
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index 40a2ebe34e1d..39a790ac6cc3 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/ide.h>
+#include <linux/compat.h>
#include <linux/cdrom.h>
#include <linux/mutex.h>
@@ -302,3 +303,37 @@ out:
mutex_unlock(&ide_floppy_ioctl_mutex);
return err;
}
+
+#ifdef CONFIG_COMPAT
+int ide_floppy_compat_ioctl(ide_drive_t *drive, struct block_device *bdev,
+ fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+ struct ide_atapi_pc pc;
+ void __user *argp = compat_ptr(arg);
+ int err;
+
+ mutex_lock(&ide_floppy_ioctl_mutex);
+ if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
+ err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
+ goto out;
+ }
+
+ err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
+ if (err != -ENOTTY)
+ goto out;
+
+ /*
+ * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
+ * and CDROM_SEND_PACKET (legacy) ioctls
+ */
+ if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
+ err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+
+ if (err == -ENOTTY)
+ err = generic_ide_ioctl(drive, bdev, cmd, arg);
+
+out:
+ mutex_unlock(&ide_floppy_ioctl_mutex);
+ return err;
+}
+#endif
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index dba9ad5c97b3..1bb99b556393 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -341,11 +341,28 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
}
+#ifdef CONFIG_COMPAT
+static int ide_gd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ if (!drive->disk_ops->compat_ioctl)
+ return -ENOIOCTLCMD;
+
+ return drive->disk_ops->compat_ioctl(drive, bdev, mode, cmd, arg);
+}
+#endif
+
static const struct block_device_operations ide_gd_ops = {
.owner = THIS_MODULE,
.open = ide_gd_unlocked_open,
.release = ide_gd_release,
.ioctl = ide_gd_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = ide_gd_compat_ioctl,
+#endif
.getgeo = ide_gd_getgeo,
.check_events = ide_gd_check_events,
.unlock_native_capacity = ide_gd_unlock_native_capacity,
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index d48c17003874..09491098047b 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -3,11 +3,20 @@
* IDE ioctls handling.
*/
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/hdreg.h>
#include <linux/ide.h>
#include <linux/slab.h>
+static int put_user_long(long val, unsigned long arg)
+{
+ if (in_compat_syscall())
+ return put_user(val, (compat_long_t __user *)compat_ptr(arg));
+
+ return put_user(val, (long __user *)arg);
+}
+
static const struct ide_ioctl_devset ide_ioctl_settings[] = {
{ HDIO_GET_32BIT, HDIO_SET_32BIT, &ide_devset_io_32bit },
{ HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, &ide_devset_keepsettings },
@@ -37,7 +46,7 @@ read_val:
mutex_lock(&ide_setting_mtx);
err = ds->get(drive);
mutex_unlock(&ide_setting_mtx);
- return err >= 0 ? put_user(err, (long __user *)arg) : err;
+ return err >= 0 ? put_user_long(err, arg) : err;
set_val:
if (bdev != bdev->bd_contains)
@@ -56,7 +65,7 @@ set_val:
EXPORT_SYMBOL_GPL(ide_setting_ioctl);
static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
- unsigned long arg)
+ void __user *argp)
{
u16 *id = NULL;
int size = (cmd == HDIO_GET_IDENTITY) ? (ATA_ID_WORDS * 2) : 142;
@@ -77,7 +86,7 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
memcpy(id, drive->id, size);
ata_id_to_hd_driveid(id);
- if (copy_to_user((void __user *)arg, id, size))
+ if (copy_to_user(argp, id, size))
rc = -EFAULT;
kfree(id);
@@ -87,10 +96,10 @@ out:
static int ide_get_nice_ioctl(ide_drive_t *drive, unsigned long arg)
{
- return put_user((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
+ return put_user_long((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
<< IDE_NICE_DSC_OVERLAP) |
(!!(drive->dev_flags & IDE_DFLAG_NICE1)
- << IDE_NICE_1), (long __user *)arg);
+ << IDE_NICE_1), arg);
}
static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
@@ -115,7 +124,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
return 0;
}
-static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
+static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp)
{
u8 *buf = NULL;
int bufsize = 0, err = 0;
@@ -123,7 +132,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
struct ide_cmd cmd;
struct ide_taskfile *tf = &cmd.tf;
- if (NULL == (void *) arg) {
+ if (NULL == argp) {
struct request *rq;
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
@@ -135,7 +144,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
return err;
}
- if (copy_from_user(args, (void __user *)arg, 4))
+ if (copy_from_user(args, argp, 4))
return -EFAULT;
memset(&cmd, 0, sizeof(cmd));
@@ -181,19 +190,18 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
args[1] = tf->error;
args[2] = tf->nsect;
abort:
- if (copy_to_user((void __user *)arg, &args, 4))
+ if (copy_to_user(argp, &args, 4))
err = -EFAULT;
if (buf) {
- if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
+ if (copy_to_user((argp + 4), buf, bufsize))
err = -EFAULT;
kfree(buf);
}
return err;
}
-static int ide_task_ioctl(ide_drive_t *drive, unsigned long arg)
+static int ide_task_ioctl(ide_drive_t *drive, void __user *p)
{
- void __user *p = (void __user *)arg;
int err = 0;
u8 args[7];
struct ide_cmd cmd;
@@ -237,6 +245,10 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
unsigned int cmd, unsigned long arg)
{
int err;
+ void __user *argp = (void __user *)arg;
+
+ if (in_compat_syscall())
+ argp = compat_ptr(arg);
err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings);
if (err != -EOPNOTSUPP)
@@ -247,7 +259,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
case HDIO_GET_IDENTITY:
if (bdev != bdev->bd_contains)
return -EINVAL;
- return ide_get_identity_ioctl(drive, cmd, arg);
+ return ide_get_identity_ioctl(drive, cmd, argp);
case HDIO_GET_NICE:
return ide_get_nice_ioctl(drive, arg);
case HDIO_SET_NICE:
@@ -258,6 +270,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
case HDIO_DRIVE_TASKFILE:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
+ /* missing compat handler for HDIO_DRIVE_TASKFILE */
+ if (in_compat_syscall())
+ return -ENOTTY;
if (drive->media == ide_disk)
return ide_taskfile_ioctl(drive, arg);
return -ENOMSG;
@@ -265,11 +280,11 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
case HDIO_DRIVE_CMD:
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
- return ide_cmd_ioctl(drive, arg);
+ return ide_cmd_ioctl(drive, argp);
case HDIO_DRIVE_TASK:
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
- return ide_task_ioctl(drive, arg);
+ return ide_task_ioctl(drive, argp);
case HDIO_DRIVE_RESET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -277,7 +292,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (put_user(BUSSTATE_ON, (long __user *)arg))
+ if (put_user_long(BUSSTATE_ON, arg))
return -EFAULT;
return 0;
case HDIO_SET_BUSSTATE:
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index d1445d74e9c3..f2be127ee96e 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -530,7 +530,6 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
*/
if (stat == 0xff)
return -ENODEV;
- touch_softlockup_watchdog();
touch_nmi_watchdog();
}
return -EBUSY;
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 0363d73b0be0..e73016cbd406 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -206,7 +206,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
ide_devset_rw(current_speed, xfer_rate);
ide_devset_rw_field(init_speed, init_speed);
ide_devset_rw_flag(nice1, IDE_DFLAG_NICE1);
-ide_devset_rw_field(number, dn);
+ide_devset_ro_field(number, dn);
static const struct ide_proc_devset ide_generic_settings[] = {
IDE_PROC_DEVSET(current_speed, 0, 70),
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 3e7482695f77..6f26634b22bb 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1945,11 +1945,22 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
return err;
}
+static int idetape_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ if (cmd == 0x0340 || cmd == 0x350)
+ arg = (unsigned long)compat_ptr(arg);
+
+ return idetape_ioctl(bdev, mode, cmd, arg);
+}
+
static const struct block_device_operations idetape_block_ops = {
.owner = THIS_MODULE,
.open = idetape_open,
.release = idetape_release,
.ioctl = idetape_ioctl,
+ .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
+ idetape_compat_ioctl : NULL,
};
static int ide_tape_probe(ide_drive_t *drive)
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index b5647e34e74e..ea0b064b5f56 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1019,7 +1019,6 @@ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
struct device_node *np = pmif->node;
const int *bidp;
struct ide_host *host;
- ide_hwif_t *hwif;
struct ide_hw *hws[] = { hw };
struct ide_port_info d = pmac_port_info;
int rc;
@@ -1075,7 +1074,7 @@ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
rc = -ENOMEM;
goto bail;
}
- hwif = pmif->hwif = host->ports[0];
+ pmif->hwif = host->ports[0];
if (on_media_bay(pmif)) {
/* Fixup bus ID for media bay */
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
index 6ce318ebd0cc..ab79b6289464 100644
--- a/drivers/ide/qd65xx.c
+++ b/drivers/ide/qd65xx.c
@@ -299,7 +299,7 @@ static void __init qd6500_init_dev(ide_drive_t *drive)
static void __init qd6580_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- u16 t1, t2;
+ unsigned long t1, t2;
u8 base = (hwif->config_data & 0xff00) >> 8;
u8 config = QD_CONFIG(hwif);
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index ac6fc3fffa0d..458e72e034b0 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -115,6 +115,9 @@ static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
struct pci_dev *dev = to_pci_dev(hwif->dev);
const u8 pio = drive->pio_mode - XFER_PIO_0;
+ if (drive->dn >= ARRAY_SIZE(drive_pci))
+ return;
+
pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
if (svwks_csb_check(dev)) {
@@ -141,6 +144,9 @@ static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
+ if (drive->dn >= ARRAY_SIZE(drive_pci2))
+ return;
+
pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
pci_read_config_byte(dev, 0x54, &ultra_enable);
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index 57eea5a9047f..c4b20f350b84 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -648,8 +648,7 @@ static void sil_quirkproc(ide_drive_t *drive)
static void init_iops_siimage(ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
+ struct ide_host *host = dev_get_drvdata(hwif->dev);
hwif->hwif_data = NULL;
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index d5e871fe840d..b1bbf807bb3d 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -549,7 +549,7 @@ static int __init tx4939ide_probe(struct platform_device *pdev)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), "tx4938ide"))
+ resource_size(res), MODNAME))
return -EBUSY;
mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
resource_size(res));
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 977cb00398b0..63a3aca506fc 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -175,8 +175,7 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
{
ide_drive_t *peer = ide_get_pair_dev(drive);
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
+ struct ide_host *host = dev_get_drvdata(hwif->dev);
struct via82cxxx_dev *vdev = host->host_priv;
struct ide_timing t, p;
unsigned int T, UT;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 75fd2a7b0842..7833e650789f 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -41,6 +41,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/tick.h>
@@ -79,6 +80,7 @@ struct idle_cpu {
unsigned long auto_demotion_disable_flags;
bool byt_auto_demotion_disable_flag;
bool disable_promotion_to_c1e;
+ bool use_acpi;
};
static const struct idle_cpu *icpu;
@@ -90,6 +92,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,
static struct cpuidle_state *cpuidle_state_table;
/*
+ * Enable this state by default even if the ACPI _CST does not list it.
+ */
+#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
+
+/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
* If this flag is set, SW flushes the TLB, so even if the
@@ -124,7 +131,7 @@ static struct cpuidle_state nehalem_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -161,7 +168,7 @@ static struct cpuidle_state snb_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -296,7 +303,7 @@ static struct cpuidle_state ivb_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -341,7 +348,7 @@ static struct cpuidle_state ivt_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 80,
.enter = &intel_idle,
@@ -378,7 +385,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 250,
.enter = &intel_idle,
@@ -415,7 +422,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 500,
.enter = &intel_idle,
@@ -452,7 +459,7 @@ static struct cpuidle_state hsw_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -520,7 +527,7 @@ static struct cpuidle_state bdw_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -589,7 +596,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -658,7 +665,7 @@ static struct cpuidle_state skx_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -808,7 +815,7 @@ static struct cpuidle_state bxt_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -869,7 +876,7 @@ static struct cpuidle_state dnv_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -944,37 +951,19 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,
mwait_idle_with_hints(eax, ecx);
}
-static void __setup_broadcast_timer(bool on)
-{
- if (on)
- tick_broadcast_enable();
- else
- tick_broadcast_disable();
-}
-
-static void auto_demotion_disable(void)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
- msr_bits &= ~(icpu->auto_demotion_disable_flags);
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
-}
-static void c1e_promotion_disable(void)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
- msr_bits &= ~0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
-}
-
static const struct idle_cpu idle_cpu_nehalem = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_nhx = {
+ .state_table = nehalem_cstates,
+ .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
@@ -993,6 +982,12 @@ static const struct idle_cpu idle_cpu_snb = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_snx = {
+ .state_table = snb_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_byt = {
.state_table = byt_cstates,
.disable_promotion_to_c1e = true,
@@ -1013,6 +1008,7 @@ static const struct idle_cpu idle_cpu_ivb = {
static const struct idle_cpu idle_cpu_ivt = {
.state_table = ivt_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_hsw = {
@@ -1020,11 +1016,23 @@ static const struct idle_cpu idle_cpu_hsw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_hsx = {
+ .state_table = hsw_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_bdw = {
.state_table = bdw_cstates,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_bdx = {
+ .state_table = bdw_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_skl = {
.state_table = skl_cstates,
.disable_promotion_to_c1e = true,
@@ -1033,15 +1041,18 @@ static const struct idle_cpu idle_cpu_skl = {
static const struct idle_cpu idle_cpu_skx = {
.state_table = skx_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_avn = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_knl = {
.state_table = knl_cstates,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_bxt = {
@@ -1052,20 +1063,21 @@ static const struct idle_cpu idle_cpu_bxt = {
static const struct idle_cpu idle_cpu_dnv = {
.state_table = dnv_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx),
INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem),
INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem),
INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem),
- INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem),
- INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx),
+ INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx),
INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom),
INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft),
- INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx),
INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb),
- INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx),
INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom),
INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt),
INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier),
@@ -1073,14 +1085,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw),
- INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx),
INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw),
INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw),
INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn),
INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw),
INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw),
+ INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx),
+ INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx),
INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl),
INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl),
INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl),
@@ -1095,76 +1107,169 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
{}
};
-/*
- * intel_idle_probe()
+#define INTEL_CPU_FAM6_MWAIT \
+ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 }
+
+static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
+ INTEL_CPU_FAM6_MWAIT,
+ {}
+};
+
+static bool __init intel_idle_max_cstate_reached(int cstate)
+{
+ if (cstate + 1 > max_cstate) {
+ pr_info("max_cstate %d reached\n", max_cstate);
+ return true;
+ }
+ return false;
+}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+#include <acpi/processor.h>
+
+static bool no_acpi __read_mostly;
+module_param(no_acpi, bool, 0444);
+MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
+
+static struct acpi_processor_power acpi_state_table __initdata;
+
+/**
+ * intel_idle_cst_usable - Check if the _CST information can be used.
+ *
+ * Check if all of the C-states listed by _CST in the max_cstate range are
+ * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.
*/
-static int __init intel_idle_probe(void)
+static bool __init intel_idle_cst_usable(void)
{
- unsigned int eax, ebx, ecx;
- const struct x86_cpu_id *id;
+ int cstate, limit;
- if (max_cstate == 0) {
- pr_debug("disabled\n");
- return -EPERM;
- }
+ limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1),
+ acpi_state_table.count);
- id = x86_match_cpu(intel_idle_ids);
- if (!id) {
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6)
- pr_debug("does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- return -ENODEV;
+ for (cstate = 1; cstate < limit; cstate++) {
+ struct acpi_processor_cx *cx = &acpi_state_table.states[cstate];
+
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ return false;
}
- if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
- pr_debug("Please enable MWAIT in BIOS SETUP\n");
- return -ENODEV;
+ return true;
+}
+
+static bool __init intel_idle_acpi_cst_extract(void)
+{
+ unsigned int cpu;
+
+ if (no_acpi) {
+ pr_debug("Not allowed to use ACPI _CST\n");
+ return false;
}
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return -ENODEV;
+ for_each_possible_cpu(cpu) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ if (!pr)
+ continue;
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
- !mwait_substates)
- return -ENODEV;
+ if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
+ continue;
- pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+ acpi_state_table.count++;
- icpu = (const struct idle_cpu *)id->driver_data;
- cpuidle_state_table = icpu->state_table;
+ if (!intel_idle_cst_usable())
+ continue;
- pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
- boot_cpu_data.x86_model);
+ if (!acpi_processor_claim_cst_control()) {
+ acpi_state_table.count = 0;
+ return false;
+ }
- return 0;
+ return true;
+ }
+
+ pr_debug("ACPI _CST not found or not usable\n");
+ return false;
}
-/*
- * intel_idle_cpuidle_devices_uninit()
- * Unregisters the cpuidle devices.
- */
-static void intel_idle_cpuidle_devices_uninit(void)
+static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
{
- int i;
- struct cpuidle_device *dev;
+ int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+
+ /*
+ * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+ * the interesting states are ACPI_CSTATE_FFH.
+ */
+ for (cstate = 1; cstate < limit; cstate++) {
+ struct acpi_processor_cx *cx;
+ struct cpuidle_state *state;
- for_each_online_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
+ if (intel_idle_max_cstate_reached(cstate))
+ break;
+
+ cx = &acpi_state_table.states[cstate];
+
+ state = &drv->states[drv->state_count++];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
+ strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ state->exit_latency = cx->latency;
+ /*
+ * For C1-type C-states use the same number for both the exit
+ * latency and target residency, because that is the case for
+ * C1 in the majority of the static C-states tables above.
+ * For the other types of C-states, however, set the target
+ * residency to 3 times the exit latency which should lead to
+ * a reasonable balance between energy-efficiency and
+ * performance in the majority of interesting cases.
+ */
+ state->target_residency = cx->latency;
+ if (cx->type > ACPI_STATE_C1)
+ state->target_residency *= 3;
+
+ state->flags = MWAIT2flg(cx->address);
+ if (cx->type > ACPI_STATE_C2)
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+
+ state->enter = intel_idle;
+ state->enter_s2idle = intel_idle_s2idle;
}
}
+static bool __init intel_idle_off_by_default(u32 mwait_hint)
+{
+ int cstate, limit;
+
+ /*
+ * If there are no _CST C-states, do not disable any C-states by
+ * default.
+ */
+ if (!acpi_state_table.count)
+ return false;
+
+ limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+ /*
+ * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+ * the interesting states are ACPI_CSTATE_FFH.
+ */
+ for (cstate = 1; cstate < limit; cstate++) {
+ if (acpi_state_table.states[cstate].address == mwait_hint)
+ return false;
+ }
+ return true;
+}
+#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+static inline bool intel_idle_acpi_cst_extract(void) { return false; }
+static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
+static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
+#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+
/*
* ivt_idle_state_table_update(void)
*
* Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-static void ivt_idle_state_table_update(void)
+static void __init ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
int cpu, package_num, num_sockets = 1;
@@ -1187,15 +1292,17 @@ static void ivt_idle_state_table_update(void)
/* else, 1 and 2 socket systems use default ivt_cstates */
}
-/*
- * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+/**
+ * irtl_2_usec - IRTL to microseconds conversion.
+ * @irtl: IRTL MSR value.
+ *
+ * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.
*/
-
-static unsigned int irtl_ns_units[] = {
- 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
-
-static unsigned long long irtl_2_usec(unsigned long long irtl)
+static unsigned long long __init irtl_2_usec(unsigned long long irtl)
{
+ static const unsigned int irtl_ns_units[] __initconst = {
+ 1, 32, 1024, 32768, 1048576, 33554432, 0, 0
+ };
unsigned long long ns;
if (!irtl)
@@ -1203,15 +1310,16 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
ns = irtl_ns_units[(irtl >> 10) & 0x7];
- return div64_u64((irtl & 0x3FF) * ns, 1000);
+ return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
}
+
/*
* bxt_idle_state_table_update(void)
*
* On BXT, we trust the IRTL to show the definitive maximum latency
* We use the same value for target_residency.
*/
-static void bxt_idle_state_table_update(void)
+static void __init bxt_idle_state_table_update(void)
{
unsigned long long msr;
unsigned int usec;
@@ -1258,7 +1366,7 @@ static void bxt_idle_state_table_update(void)
* On SKL-H (model 0x5e) disable C8 and C9 if:
* C10 is enabled and SGX disabled
*/
-static void sklh_idle_state_table_update(void)
+static void __init sklh_idle_state_table_update(void)
{
unsigned long long msr;
unsigned int eax, ebx, ecx, edx;
@@ -1284,7 +1392,7 @@ static void sklh_idle_state_table_update(void)
/* if SGX is present */
if (ebx & (1 << 2)) {
- rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+ rdmsrl(MSR_IA32_FEAT_CTL, msr);
/* if SGX is enabled */
if (msr & (1 << 18))
@@ -1294,16 +1402,28 @@ static void sklh_idle_state_table_update(void)
skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
}
-/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
- */
-static void intel_idle_state_table_update(void)
+static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
- switch (boot_cpu_data.x86_model) {
+ unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
+ unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
+ MWAIT_SUBSTATE_MASK;
+
+ /* Ignore the C-state if there are NO sub-states in CPUID for it. */
+ if (num_substates == 0)
+ return false;
+
+ if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle states deeper than C2");
+ return true;
+}
+
+static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
+{
+ int cstate;
+
+ switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_IVYBRIDGE_X:
ivt_idle_state_table_update();
break;
@@ -1315,62 +1435,36 @@ static void intel_idle_state_table_update(void)
sklh_idle_state_table_update();
break;
}
-}
-
-/*
- * intel_idle_cpuidle_driver_init()
- * allocate, initialize cpuidle_states
- */
-static void __init intel_idle_cpuidle_driver_init(void)
-{
- int cstate;
- struct cpuidle_driver *drv = &intel_idle_driver;
-
- intel_idle_state_table_update();
-
- cpuidle_poll_state_init(drv);
- drv->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
- int num_substates, mwait_hint, mwait_cstate;
+ unsigned int mwait_hint;
- if ((cpuidle_state_table[cstate].enter == NULL) &&
- (cpuidle_state_table[cstate].enter_s2idle == NULL))
+ if (intel_idle_max_cstate_reached(cstate))
break;
- if (cstate + 1 > max_cstate) {
- pr_info("max_cstate %d reached\n", max_cstate);
+ if (!cpuidle_state_table[cstate].enter &&
+ !cpuidle_state_table[cstate].enter_s2idle)
break;
- }
-
- mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
- mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
-
- /* number of sub-states for this state in CPUID.MWAIT */
- num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
- & MWAIT_SUBSTATE_MASK;
-
- /* if NO sub-states for this state in CPUID, skip it */
- if (num_substates == 0)
- continue;
- /* if state marked as disabled, skip it */
+ /* If marked as unusable, skip this state. */
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
cpuidle_state_table[cstate].name);
continue;
}
+ mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ if (!intel_idle_verify_cstate(mwait_hint))
+ continue;
- if (((mwait_cstate + 1) > 2) &&
- !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- mark_tsc_unstable("TSC halts in idle"
- " states deeper than C2");
+ /* Structure copy. */
+ drv->states[drv->state_count] = cpuidle_state_table[cstate];
- drv->states[drv->state_count] = /* structure copy */
- cpuidle_state_table[cstate];
+ if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) &&
+ !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE))
+ drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
- drv->state_count += 1;
+ drv->state_count++;
}
if (icpu->byt_auto_demotion_disable_flag) {
@@ -1379,6 +1473,38 @@ static void __init intel_idle_cpuidle_driver_init(void)
}
}
+/*
+ * intel_idle_cpuidle_driver_init()
+ * allocate, initialize cpuidle_states
+ */
+static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
+{
+ cpuidle_poll_state_init(drv);
+ drv->state_count = 1;
+
+ if (icpu)
+ intel_idle_init_cstates_icpu(drv);
+ else
+ intel_idle_init_cstates_acpi(drv);
+}
+
+static void auto_demotion_disable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ msr_bits &= ~(icpu->auto_demotion_disable_flags);
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+}
+
+static void c1e_promotion_disable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits &= ~0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
/*
* intel_idle_cpu_init()
@@ -1397,6 +1523,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
return -EIO;
}
+ if (!icpu)
+ return 0;
+
if (icpu->auto_demotion_disable_flags)
auto_demotion_disable();
@@ -1411,7 +1540,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
struct cpuidle_device *dev;
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
- __setup_broadcast_timer(true);
+ tick_broadcast_enable();
/*
* Some systems can hotplug a cpu at runtime after
@@ -1425,23 +1554,74 @@ static int intel_idle_cpu_online(unsigned int cpu)
return 0;
}
+/**
+ * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
+ */
+static void __init intel_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
+}
+
static int __init intel_idle_init(void)
{
+ const struct x86_cpu_id *id;
+ unsigned int eax, ebx, ecx;
int retval;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return -ENODEV;
- retval = intel_idle_probe();
- if (retval)
- return retval;
+ if (max_cstate == 0) {
+ pr_debug("disabled\n");
+ return -EPERM;
+ }
+
+ id = x86_match_cpu(intel_idle_ids);
+ if (id) {
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_debug("Please enable MWAIT in BIOS SETUP\n");
+ return -ENODEV;
+ }
+ } else {
+ id = x86_match_cpu(intel_mwait_ids);
+ if (!id)
+ return -ENODEV;
+ }
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return -ENODEV;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+ !mwait_substates)
+ return -ENODEV;
+
+ pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+
+ icpu = (const struct idle_cpu *)id->driver_data;
+ if (icpu) {
+ cpuidle_state_table = icpu->state_table;
+ if (icpu->use_acpi)
+ intel_idle_acpi_cst_extract();
+ } else if (!intel_idle_acpi_cst_extract()) {
+ return -ENODEV;
+ }
+
+ pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+ boot_cpu_data.x86_model);
intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
+ if (!intel_idle_cpuidle_devices)
return -ENOMEM;
- intel_idle_cpuidle_driver_init();
+ intel_idle_cpuidle_driver_init(&intel_idle_driver);
+
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index d4ef35aeb579..5d91a6dda894 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -89,13 +89,13 @@ config ADXL372_I2C
module will be called adxl372_i2c.
config BMA180
- tristate "Bosch BMA180/BMA250 3-Axis Accelerometer Driver"
+ tristate "Bosch BMA180/BMA25x 3-Axis Accelerometer Driver"
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build a driver for the Bosch BMA180 or
- BMA250 triaxial acceleration sensor.
+ BMA25x triaxial acceleration sensor.
To compile this driver as a module, choose M here: the
module will be called bma180.
@@ -112,6 +112,22 @@ config BMA220
To compile this driver as a module, choose M here: the
module will be called bma220_spi.
+config BMA400
+ tristate "Bosch BMA400 3-Axis Accelerometer Driver"
+ select REGMAP
+ select BMA400_I2C if I2C
+ help
+ Say Y here if you want to build a driver for the Bosch BMA400
+ triaxial acceleration sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bma400_core and you will also get
+ bma400_i2c if I2C is enabled.
+
+config BMA400_I2C
+ tristate
+ depends on BMA400
+
config BMC150_ACCEL
tristate "Bosch BMC150 Accelerometer Driver"
select IIO_BUFFER
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 56bd0215e0d4..3a051cf37f40 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_ADXL372_I2C) += adxl372_i2c.o
obj-$(CONFIG_ADXL372_SPI) += adxl372_spi.o
obj-$(CONFIG_BMA180) += bma180.o
obj-$(CONFIG_BMA220) += bma220_spi.o
+obj-$(CONFIG_BMA400) += bma400_core.o
+obj-$(CONFIG_BMA400_I2C) += bma400_i2c.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index c4810c73b2a2..0f0f27a8184e 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -233,6 +233,12 @@ static const char * const adis16201_status_error_msgs[] = {
[ADIS16201_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
};
+static const struct adis_timeout adis16201_timeouts = {
+ .reset_ms = ADIS16201_STARTUP_DELAY_MS,
+ .sw_reset_ms = ADIS16201_STARTUP_DELAY_MS,
+ .self_test_ms = ADIS16201_STARTUP_DELAY_MS,
+};
+
static const struct adis_data adis16201_data = {
.read_delay = 20,
.msc_ctrl_reg = ADIS16201_MSC_CTRL_REG,
@@ -241,7 +247,7 @@ static const struct adis_data adis16201_data = {
.self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
.self_test_no_autoclear = true,
- .startup_delay = ADIS16201_STARTUP_DELAY_MS,
+ .timeouts = &adis16201_timeouts,
.status_error_msgs = adis16201_status_error_msgs,
.status_error_mask = BIT(ADIS16201_DIAG_STAT_SPI_FAIL_BIT) |
diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c
index 98d77af8a2b0..c6dbd2424e10 100644
--- a/drivers/iio/accel/adis16209.c
+++ b/drivers/iio/accel/adis16209.c
@@ -243,6 +243,12 @@ static const char * const adis16209_status_error_msgs[] = {
[ADIS16209_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
};
+static const struct adis_timeout adis16209_timeouts = {
+ .reset_ms = ADIS16209_STARTUP_DELAY_MS,
+ .self_test_ms = ADIS16209_STARTUP_DELAY_MS,
+ .sw_reset_ms = ADIS16209_STARTUP_DELAY_MS,
+};
+
static const struct adis_data adis16209_data = {
.read_delay = 30,
.msc_ctrl_reg = ADIS16209_MSC_CTRL_REG,
@@ -251,7 +257,7 @@ static const struct adis_data adis16209_data = {
.self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
.self_test_no_autoclear = true,
- .startup_delay = ADIS16209_STARTUP_DELAY_MS,
+ .timeouts = &adis16209_timeouts,
.status_error_msgs = adis16209_status_error_msgs,
.status_error_mask = BIT(ADIS16209_STAT_SELFTEST_FAIL_BIT) |
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 1574e4604a4f..fcd91d5f05fd 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -9,6 +9,7 @@
* SPI is not supported by driver
* BMA180: 7-bit I2C slave address 0x40 or 0x41
* BMA250: 7-bit I2C slave address 0x18 or 0x19
+ * BMA254: 7-bit I2C slave address 0x18 or 0x19
*/
#include <linux/module.h>
@@ -18,6 +19,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/iio/iio.h>
@@ -33,17 +35,20 @@
enum chip_ids {
BMA180,
BMA250,
+ BMA254,
};
struct bma180_data;
struct bma180_part_info {
+ u8 chip_id;
const struct iio_chan_spec *channels;
unsigned int num_channels;
const int *scale_table;
unsigned int num_scales;
const int *bw_table;
unsigned int num_bw;
+ int center_temp;
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
@@ -51,6 +56,7 @@ struct bma180_part_info {
u8 scale_reg, scale_mask;
u8 power_reg, power_mask, lowpower_val;
u8 int_enable_reg, int_enable_mask;
+ u8 int_map_reg, int_enable_dataready_int1_mask;
u8 softreset_reg;
int (*chip_config)(struct bma180_data *data);
@@ -89,6 +95,8 @@ struct bma180_part_info {
#define BMA180_RESET_VAL 0xb6
#define BMA180_ID_REG_VAL 0x03
+#define BMA250_ID_REG_VAL 0x03
+#define BMA254_ID_REG_VAL 0xfa /* 250 decimal */
/* Chip power modes */
#define BMA180_LOW_POWER 0x03
@@ -109,7 +117,26 @@ struct bma180_part_info {
#define BMA250_INT1_DATA_MASK BIT(0)
#define BMA250_INT_RESET_MASK BIT(7) /* Reset pending interrupts */
+#define BMA254_RANGE_REG 0x0f
+#define BMA254_BW_REG 0x10
+#define BMA254_POWER_REG 0x11
+#define BMA254_RESET_REG 0x14
+#define BMA254_INT_ENABLE_REG 0x17
+#define BMA254_INT_MAP_REG 0x1a
+#define BMA254_INT_RESET_REG 0x21
+
+#define BMA254_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
+#define BMA254_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
+#define BMA254_SUSPEND_MASK BIT(7) /* chip will sleep */
+#define BMA254_LOWPOWER_MASK BIT(6)
+#define BMA254_DATA_INTEN_MASK BIT(4)
+#define BMA254_INT2_DATA_MASK BIT(7)
+#define BMA254_INT1_DATA_MASK BIT(0)
+#define BMA254_INT_RESET_MASK BIT(7) /* Reset pending interrupts */
+
struct bma180_data {
+ struct regulator *vdd_supply;
+ struct regulator *vddio_supply;
struct i2c_client *client;
struct iio_trigger *trig;
const struct bma180_part_info *part_info;
@@ -132,8 +159,8 @@ enum bma180_chan {
static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
-static int bma250_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
-static int bma250_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
+static int bma25x_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
+static int bma25x_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
0, 0, 306458 };
static int bma180_get_data_reg(struct bma180_data *data, enum bma180_chan chan)
@@ -307,8 +334,11 @@ static int bma180_chip_init(struct bma180_data *data)
if (ret < 0)
return ret;
- if (ret != BMA180_ID_REG_VAL)
+ if (ret != data->part_info->chip_id) {
+ dev_err(&data->client->dev, "wrong chip ID %d expected %d\n",
+ ret, data->part_info->chip_id);
return -ENODEV;
+ }
ret = bma180_soft_reset(data);
if (ret)
@@ -355,7 +385,7 @@ err:
return ret;
}
-static int bma250_chip_config(struct bma180_data *data)
+static int bma25x_chip_config(struct bma180_data *data)
{
int ret = bma180_chip_init(data);
@@ -367,8 +397,12 @@ static int bma250_chip_config(struct bma180_data *data)
ret = bma180_set_scale(data, 38344); /* 2 G */
if (ret)
goto err;
- ret = bma180_set_bits(data, BMA250_INT_MAP_REG,
- BMA250_INT1_DATA_MASK, 1);
+ /*
+ * This enables dataready interrupt on the INT1 pin
+ * FIXME: support using the INT2 pin
+ */
+ ret = bma180_set_bits(data, data->part_info->int_map_reg,
+ data->part_info->int_enable_dataready_int1_mask, 1);
if (ret)
goto err;
@@ -394,7 +428,7 @@ err:
dev_err(&data->client->dev, "failed to disable the chip\n");
}
-static void bma250_chip_disable(struct bma180_data *data)
+static void bma25x_chip_disable(struct bma180_data *data)
{
if (bma180_set_new_data_intr_state(data, false))
goto err;
@@ -497,7 +531,7 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
- *val = 48; /* 0 LSB @ 24 degree C */
+ *val = data->part_info->center_temp;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -627,34 +661,96 @@ static const struct iio_chan_spec bma250_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(4),
};
+static const struct iio_chan_spec bma254_channels[] = {
+ BMA180_ACC_CHANNEL(X, 12),
+ BMA180_ACC_CHANNEL(Y, 12),
+ BMA180_ACC_CHANNEL(Z, 12),
+ BMA180_TEMP_CHANNEL,
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
static const struct bma180_part_info bma180_part_info[] = {
[BMA180] = {
- bma180_channels, ARRAY_SIZE(bma180_channels),
- bma180_scale_table, ARRAY_SIZE(bma180_scale_table),
- bma180_bw_table, ARRAY_SIZE(bma180_bw_table),
- BMA180_CTRL_REG0, BMA180_RESET_INT,
- BMA180_CTRL_REG0, BMA180_SLEEP,
- BMA180_BW_TCS, BMA180_BW,
- BMA180_OFFSET_LSB1, BMA180_RANGE,
- BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER,
- BMA180_CTRL_REG3, BMA180_NEW_DATA_INT,
- BMA180_RESET,
- bma180_chip_config,
- bma180_chip_disable,
+ .chip_id = BMA180_ID_REG_VAL,
+ .channels = bma180_channels,
+ .num_channels = ARRAY_SIZE(bma180_channels),
+ .scale_table = bma180_scale_table,
+ .num_scales = ARRAY_SIZE(bma180_scale_table),
+ .bw_table = bma180_bw_table,
+ .num_bw = ARRAY_SIZE(bma180_bw_table),
+ .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .int_reset_reg = BMA180_CTRL_REG0,
+ .int_reset_mask = BMA180_RESET_INT,
+ .sleep_reg = BMA180_CTRL_REG0,
+ .sleep_mask = BMA180_SLEEP,
+ .bw_reg = BMA180_BW_TCS,
+ .bw_mask = BMA180_BW,
+ .scale_reg = BMA180_OFFSET_LSB1,
+ .scale_mask = BMA180_RANGE,
+ .power_reg = BMA180_TCO_Z,
+ .power_mask = BMA180_MODE_CONFIG,
+ .lowpower_val = BMA180_LOW_POWER,
+ .int_enable_reg = BMA180_CTRL_REG3,
+ .int_enable_mask = BMA180_NEW_DATA_INT,
+ .softreset_reg = BMA180_RESET,
+ .chip_config = bma180_chip_config,
+ .chip_disable = bma180_chip_disable,
},
[BMA250] = {
- bma250_channels, ARRAY_SIZE(bma250_channels),
- bma250_scale_table, ARRAY_SIZE(bma250_scale_table),
- bma250_bw_table, ARRAY_SIZE(bma250_bw_table),
- BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK,
- BMA250_POWER_REG, BMA250_SUSPEND_MASK,
- BMA250_BW_REG, BMA250_BW_MASK,
- BMA250_RANGE_REG, BMA250_RANGE_MASK,
- BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1,
- BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK,
- BMA250_RESET_REG,
- bma250_chip_config,
- bma250_chip_disable,
+ .chip_id = BMA250_ID_REG_VAL,
+ .channels = bma250_channels,
+ .num_channels = ARRAY_SIZE(bma250_channels),
+ .scale_table = bma25x_scale_table,
+ .num_scales = ARRAY_SIZE(bma25x_scale_table),
+ .bw_table = bma25x_bw_table,
+ .num_bw = ARRAY_SIZE(bma25x_bw_table),
+ .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .int_reset_reg = BMA250_INT_RESET_REG,
+ .int_reset_mask = BMA250_INT_RESET_MASK,
+ .sleep_reg = BMA250_POWER_REG,
+ .sleep_mask = BMA250_SUSPEND_MASK,
+ .bw_reg = BMA250_BW_REG,
+ .bw_mask = BMA250_BW_MASK,
+ .scale_reg = BMA250_RANGE_REG,
+ .scale_mask = BMA250_RANGE_MASK,
+ .power_reg = BMA250_POWER_REG,
+ .power_mask = BMA250_LOWPOWER_MASK,
+ .lowpower_val = 1,
+ .int_enable_reg = BMA250_INT_ENABLE_REG,
+ .int_enable_mask = BMA250_DATA_INTEN_MASK,
+ .int_map_reg = BMA250_INT_MAP_REG,
+ .int_enable_dataready_int1_mask = BMA250_INT1_DATA_MASK,
+ .softreset_reg = BMA250_RESET_REG,
+ .chip_config = bma25x_chip_config,
+ .chip_disable = bma25x_chip_disable,
+ },
+ [BMA254] = {
+ .chip_id = BMA254_ID_REG_VAL,
+ .channels = bma254_channels,
+ .num_channels = ARRAY_SIZE(bma254_channels),
+ .scale_table = bma25x_scale_table,
+ .num_scales = ARRAY_SIZE(bma25x_scale_table),
+ .bw_table = bma25x_bw_table,
+ .num_bw = ARRAY_SIZE(bma25x_bw_table),
+ .center_temp = 46, /* 0 LSB @ 23 degree C */
+ .int_reset_reg = BMA254_INT_RESET_REG,
+ .int_reset_mask = BMA254_INT_RESET_MASK,
+ .sleep_reg = BMA254_POWER_REG,
+ .sleep_mask = BMA254_SUSPEND_MASK,
+ .bw_reg = BMA254_BW_REG,
+ .bw_mask = BMA254_BW_MASK,
+ .scale_reg = BMA254_RANGE_REG,
+ .scale_mask = BMA254_RANGE_MASK,
+ .power_reg = BMA254_POWER_REG,
+ .power_mask = BMA254_LOWPOWER_MASK,
+ .lowpower_val = 1,
+ .int_enable_reg = BMA254_INT_ENABLE_REG,
+ .int_enable_mask = BMA254_DATA_INTEN_MASK,
+ .int_map_reg = BMA254_INT_MAP_REG,
+ .int_enable_dataready_int1_mask = BMA254_INT1_DATA_MASK,
+ .softreset_reg = BMA254_RESET_REG,
+ .chip_config = bma25x_chip_config,
+ .chip_disable = bma25x_chip_disable,
},
};
@@ -712,12 +808,13 @@ static const struct iio_trigger_ops bma180_trigger_ops = {
static int bma180_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct bma180_data *data;
struct iio_dev *indio_dev;
enum chip_ids chip;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
@@ -725,22 +822,56 @@ static int bma180_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
if (client->dev.of_node)
- chip = (enum chip_ids)of_device_get_match_data(&client->dev);
+ chip = (enum chip_ids)of_device_get_match_data(dev);
else
chip = id->driver_data;
data->part_info = &bma180_part_info[chip];
- ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+ ret = iio_read_mount_matrix(dev, "mount-matrix",
&data->orientation);
if (ret)
return ret;
+ data->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(data->vdd_supply)) {
+ if (PTR_ERR(data->vdd_supply) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vdd regulator %d\n",
+ (int)PTR_ERR(data->vdd_supply));
+ return PTR_ERR(data->vdd_supply);
+ }
+ data->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(data->vddio_supply)) {
+ if (PTR_ERR(data->vddio_supply) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vddio regulator %d\n",
+ (int)PTR_ERR(data->vddio_supply));
+ return PTR_ERR(data->vddio_supply);
+ }
+ /* Typical voltage 2.4V these are min and max */
+ ret = regulator_set_voltage(data->vdd_supply, 1620000, 3600000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(data->vddio_supply, 1200000, 3600000);
+ if (ret)
+ return ret;
+ ret = regulator_enable(data->vdd_supply);
+ if (ret) {
+ dev_err(dev, "Failed to enable vdd regulator: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(data->vddio_supply);
+ if (ret) {
+ dev_err(dev, "Failed to enable vddio regulator: %d\n", ret);
+ goto err_disable_vdd;
+ }
+ /* Wait to make sure we started up properly (3 ms at least) */
+ usleep_range(3000, 5000);
+
ret = data->part_info->chip_config(data);
if (ret < 0)
goto err_chip_disable;
mutex_init(&data->mutex);
- indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.parent = dev;
indio_dev->channels = data->part_info->channels;
indio_dev->num_channels = data->part_info->num_channels;
indio_dev->name = id->name;
@@ -755,15 +886,15 @@ static int bma180_probe(struct i2c_client *client,
goto err_chip_disable;
}
- ret = devm_request_irq(&client->dev, client->irq,
+ ret = devm_request_irq(dev, client->irq,
iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING,
"bma180_event", data->trig);
if (ret) {
- dev_err(&client->dev, "unable to request IRQ\n");
+ dev_err(dev, "unable to request IRQ\n");
goto err_trigger_free;
}
- data->trig->dev.parent = &client->dev;
+ data->trig->dev.parent = dev;
data->trig->ops = &bma180_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
indio_dev->trig = iio_trigger_get(data->trig);
@@ -776,13 +907,13 @@ static int bma180_probe(struct i2c_client *client,
ret = iio_triggered_buffer_setup(indio_dev, NULL,
bma180_trigger_handler, NULL);
if (ret < 0) {
- dev_err(&client->dev, "unable to setup iio triggered buffer\n");
+ dev_err(dev, "unable to setup iio triggered buffer\n");
goto err_trigger_unregister;
}
ret = iio_device_register(indio_dev);
if (ret < 0) {
- dev_err(&client->dev, "unable to register iio device\n");
+ dev_err(dev, "unable to register iio device\n");
goto err_buffer_cleanup;
}
@@ -797,6 +928,9 @@ err_trigger_free:
iio_trigger_free(data->trig);
err_chip_disable:
data->part_info->chip_disable(data);
+ regulator_disable(data->vddio_supply);
+err_disable_vdd:
+ regulator_disable(data->vdd_supply);
return ret;
}
@@ -816,6 +950,8 @@ static int bma180_remove(struct i2c_client *client)
mutex_lock(&data->mutex);
data->part_info->chip_disable(data);
mutex_unlock(&data->mutex);
+ regulator_disable(data->vddio_supply);
+ regulator_disable(data->vdd_supply);
return 0;
}
@@ -856,6 +992,7 @@ static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
static const struct i2c_device_id bma180_ids[] = {
{ "bma180", BMA180 },
{ "bma250", BMA250 },
+ { "bma254", BMA254 },
{ }
};
@@ -870,6 +1007,10 @@ static const struct of_device_id bma180_of_match[] = {
.compatible = "bosch,bma250",
.data = (void *)BMA250
},
+ {
+ .compatible = "bosch,bma254",
+ .data = (void *)BMA254
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bma180_of_match);
@@ -889,5 +1030,5 @@ module_i2c_driver(bma180_driver);
MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("Bosch BMA180/BMA250 triaxial acceleration sensor");
+MODULE_DESCRIPTION("Bosch BMA180/BMA25x triaxial acceleration sensor");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
new file mode 100644
index 000000000000..5ad10db9819f
--- /dev/null
+++ b/drivers/iio/accel/bma400.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Register constants and other forward declarations needed by the bma400
+ * sources.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ */
+
+#ifndef _BMA400_H_
+#define _BMA400_H_
+
+#include <linux/bits.h>
+#include <linux/regmap.h>
+
+/*
+ * Read-Only Registers
+ */
+
+/* Status and ID registers */
+#define BMA400_CHIP_ID_REG 0x00
+#define BMA400_ERR_REG 0x02
+#define BMA400_STATUS_REG 0x03
+
+/* Acceleration registers */
+#define BMA400_X_AXIS_LSB_REG 0x04
+#define BMA400_X_AXIS_MSB_REG 0x05
+#define BMA400_Y_AXIS_LSB_REG 0x06
+#define BMA400_Y_AXIS_MSB_REG 0x07
+#define BMA400_Z_AXIS_LSB_REG 0x08
+#define BMA400_Z_AXIS_MSB_REG 0x09
+
+/* Sensor time registers */
+#define BMA400_SENSOR_TIME0 0x0a
+#define BMA400_SENSOR_TIME1 0x0b
+#define BMA400_SENSOR_TIME2 0x0c
+
+/* Event and interrupt registers */
+#define BMA400_EVENT_REG 0x0d
+#define BMA400_INT_STAT0_REG 0x0e
+#define BMA400_INT_STAT1_REG 0x0f
+#define BMA400_INT_STAT2_REG 0x10
+
+/* Temperature register */
+#define BMA400_TEMP_DATA_REG 0x11
+
+/* FIFO length and data registers */
+#define BMA400_FIFO_LENGTH0_REG 0x12
+#define BMA400_FIFO_LENGTH1_REG 0x13
+#define BMA400_FIFO_DATA_REG 0x14
+
+/* Step count registers */
+#define BMA400_STEP_CNT0_REG 0x15
+#define BMA400_STEP_CNT1_REG 0x16
+#define BMA400_STEP_CNT3_REG 0x17
+#define BMA400_STEP_STAT_REG 0x18
+
+/*
+ * Read-write configuration registers
+ */
+#define BMA400_ACC_CONFIG0_REG 0x19
+#define BMA400_ACC_CONFIG1_REG 0x1a
+#define BMA400_ACC_CONFIG2_REG 0x1b
+#define BMA400_CMD_REG 0x7e
+
+/* Chip ID of BMA 400 devices found in the chip ID register. */
+#define BMA400_ID_REG_VAL 0x90
+
+#define BMA400_LP_OSR_SHIFT 5
+#define BMA400_NP_OSR_SHIFT 4
+#define BMA400_SCALE_SHIFT 6
+
+#define BMA400_TWO_BITS_MASK GENMASK(1, 0)
+#define BMA400_LP_OSR_MASK GENMASK(6, 5)
+#define BMA400_NP_OSR_MASK GENMASK(5, 4)
+#define BMA400_ACC_ODR_MASK GENMASK(3, 0)
+#define BMA400_ACC_SCALE_MASK GENMASK(7, 6)
+
+#define BMA400_ACC_ODR_MIN_RAW 0x05
+#define BMA400_ACC_ODR_LP_RAW 0x06
+#define BMA400_ACC_ODR_MAX_RAW 0x0b
+
+#define BMA400_ACC_ODR_MAX_HZ 800
+#define BMA400_ACC_ODR_MIN_WHOLE_HZ 25
+#define BMA400_ACC_ODR_MIN_HZ 12
+
+#define BMA400_SCALE_MIN 38357
+#define BMA400_SCALE_MAX 306864
+
+#define BMA400_NUM_REGULATORS 2
+#define BMA400_VDD_REGULATOR 0
+#define BMA400_VDDIO_REGULATOR 1
+
+extern const struct regmap_config bma400_regmap_config;
+
+int bma400_probe(struct device *dev, struct regmap *regmap, const char *name);
+
+int bma400_remove(struct device *dev);
+
+#endif
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
new file mode 100644
index 000000000000..cc77f89c048b
--- /dev/null
+++ b/drivers/iio/accel/bma400_core.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Core IIO driver for Bosch BMA400 triaxial acceleration sensor.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ *
+ * TODO:
+ * - Support for power management
+ * - Support events and interrupts
+ * - Create channel for step count
+ * - Create channel for sensor time
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "bma400.h"
+
+/*
+ * The G-range selection may be one of 2g, 4g, 8, or 16g. The scale may
+ * be selected with the acc_range bits of the ACC_CONFIG1 register.
+ * NB: This buffer is populated in the device init.
+ */
+static int bma400_scales[8];
+
+/*
+ * See the ACC_CONFIG1 section of the datasheet.
+ * NB: This buffer is populated in the device init.
+ */
+static int bma400_sample_freqs[14];
+
+static const int bma400_osr_range[] = { 0, 1, 3 };
+
+/* See the ACC_CONFIG0 section of the datasheet */
+enum bma400_power_mode {
+ POWER_MODE_SLEEP = 0x00,
+ POWER_MODE_LOW = 0x01,
+ POWER_MODE_NORMAL = 0x02,
+ POWER_MODE_INVALID = 0x03,
+};
+
+struct bma400_sample_freq {
+ int hz;
+ int uhz;
+};
+
+struct bma400_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_bulk_data regulators[BMA400_NUM_REGULATORS];
+ struct mutex mutex; /* data register lock */
+ struct iio_mount_matrix orientation;
+ enum bma400_power_mode power_mode;
+ struct bma400_sample_freq sample_freq;
+ int oversampling_ratio;
+ int scale;
+};
+
+static bool bma400_is_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMA400_CHIP_ID_REG:
+ case BMA400_ERR_REG:
+ case BMA400_STATUS_REG:
+ case BMA400_X_AXIS_LSB_REG:
+ case BMA400_X_AXIS_MSB_REG:
+ case BMA400_Y_AXIS_LSB_REG:
+ case BMA400_Y_AXIS_MSB_REG:
+ case BMA400_Z_AXIS_LSB_REG:
+ case BMA400_Z_AXIS_MSB_REG:
+ case BMA400_SENSOR_TIME0:
+ case BMA400_SENSOR_TIME1:
+ case BMA400_SENSOR_TIME2:
+ case BMA400_EVENT_REG:
+ case BMA400_INT_STAT0_REG:
+ case BMA400_INT_STAT1_REG:
+ case BMA400_INT_STAT2_REG:
+ case BMA400_TEMP_DATA_REG:
+ case BMA400_FIFO_LENGTH0_REG:
+ case BMA400_FIFO_LENGTH1_REG:
+ case BMA400_FIFO_DATA_REG:
+ case BMA400_STEP_CNT0_REG:
+ case BMA400_STEP_CNT1_REG:
+ case BMA400_STEP_CNT3_REG:
+ case BMA400_STEP_STAT_REG:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool bma400_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMA400_ERR_REG:
+ case BMA400_STATUS_REG:
+ case BMA400_X_AXIS_LSB_REG:
+ case BMA400_X_AXIS_MSB_REG:
+ case BMA400_Y_AXIS_LSB_REG:
+ case BMA400_Y_AXIS_MSB_REG:
+ case BMA400_Z_AXIS_LSB_REG:
+ case BMA400_Z_AXIS_MSB_REG:
+ case BMA400_SENSOR_TIME0:
+ case BMA400_SENSOR_TIME1:
+ case BMA400_SENSOR_TIME2:
+ case BMA400_EVENT_REG:
+ case BMA400_INT_STAT0_REG:
+ case BMA400_INT_STAT1_REG:
+ case BMA400_INT_STAT2_REG:
+ case BMA400_TEMP_DATA_REG:
+ case BMA400_FIFO_LENGTH0_REG:
+ case BMA400_FIFO_LENGTH1_REG:
+ case BMA400_FIFO_DATA_REG:
+ case BMA400_STEP_CNT0_REG:
+ case BMA400_STEP_CNT1_REG:
+ case BMA400_STEP_CNT3_REG:
+ case BMA400_STEP_STAT_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config bma400_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = BMA400_CMD_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = bma400_is_writable_reg,
+ .volatile_reg = bma400_is_volatile_reg,
+};
+EXPORT_SYMBOL(bma400_regmap_config);
+
+static const struct iio_mount_matrix *
+bma400_accel_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bma400_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma400_accel_get_mount_matrix),
+ { }
+};
+
+#define BMA400_ACC_CHANNEL(_axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .ext_info = bma400_ext_info, \
+}
+
+static const struct iio_chan_spec bma400_channels[] = {
+ BMA400_ACC_CHANNEL(X),
+ BMA400_ACC_CHANNEL(Y),
+ BMA400_ACC_CHANNEL(Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+};
+
+static int bma400_get_temp_reg(struct bma400_data *data, int *val, int *val2)
+{
+ unsigned int raw_temp;
+ int host_temp;
+ int ret;
+
+ if (data->power_mode == POWER_MODE_SLEEP)
+ return -EBUSY;
+
+ ret = regmap_read(data->regmap, BMA400_TEMP_DATA_REG, &raw_temp);
+ if (ret)
+ return ret;
+
+ host_temp = sign_extend32(raw_temp, 7);
+ /*
+ * The formula for the TEMP_DATA register in the datasheet
+ * is: x * 0.5 + 23
+ */
+ *val = (host_temp >> 1) + 23;
+ *val2 = (host_temp & 0x1) * 500000;
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int bma400_get_accel_reg(struct bma400_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ __le16 raw_accel;
+ int lsb_reg;
+ int ret;
+
+ if (data->power_mode == POWER_MODE_SLEEP)
+ return -EBUSY;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ lsb_reg = BMA400_X_AXIS_LSB_REG;
+ break;
+ case IIO_MOD_Y:
+ lsb_reg = BMA400_Y_AXIS_LSB_REG;
+ break;
+ case IIO_MOD_Z:
+ lsb_reg = BMA400_Z_AXIS_LSB_REG;
+ break;
+ default:
+ dev_err(data->dev, "invalid axis channel modifier\n");
+ return -EINVAL;
+ }
+
+ /* bulk read two registers, with the base being the LSB register */
+ ret = regmap_bulk_read(data->regmap, lsb_reg, &raw_accel,
+ sizeof(raw_accel));
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpu(raw_accel), 11);
+ return IIO_VAL_INT;
+}
+
+static void bma400_output_data_rate_from_raw(int raw, unsigned int *val,
+ unsigned int *val2)
+{
+ *val = BMA400_ACC_ODR_MAX_HZ >> (BMA400_ACC_ODR_MAX_RAW - raw);
+ if (raw > BMA400_ACC_ODR_MIN_RAW)
+ *val2 = 0;
+ else
+ *val2 = 500000;
+}
+
+static int bma400_get_accel_output_data_rate(struct bma400_data *data)
+{
+ unsigned int val;
+ unsigned int odr;
+ int ret;
+
+ switch (data->power_mode) {
+ case POWER_MODE_LOW:
+ /*
+ * Runs at a fixed rate in low-power mode. See section 4.3
+ * in the datasheet.
+ */
+ bma400_output_data_rate_from_raw(BMA400_ACC_ODR_LP_RAW,
+ &data->sample_freq.hz,
+ &data->sample_freq.uhz);
+ return 0;
+ case POWER_MODE_NORMAL:
+ /*
+ * In normal mode the ODR can be found in the ACC_CONFIG1
+ * register.
+ */
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+ if (ret)
+ goto error;
+
+ odr = val & BMA400_ACC_ODR_MASK;
+ if (odr < BMA400_ACC_ODR_MIN_RAW ||
+ odr > BMA400_ACC_ODR_MAX_RAW) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ bma400_output_data_rate_from_raw(odr, &data->sample_freq.hz,
+ &data->sample_freq.uhz);
+ return 0;
+ case POWER_MODE_SLEEP:
+ data->sample_freq.hz = 0;
+ data->sample_freq.uhz = 0;
+ return 0;
+ default:
+ ret = 0;
+ goto error;
+ }
+error:
+ data->sample_freq.hz = -1;
+ data->sample_freq.uhz = -1;
+ return ret;
+}
+
+static int bma400_set_accel_output_data_rate(struct bma400_data *data,
+ int hz, int uhz)
+{
+ unsigned int idx;
+ unsigned int odr;
+ unsigned int val;
+ int ret;
+
+ if (hz >= BMA400_ACC_ODR_MIN_WHOLE_HZ) {
+ if (uhz || hz > BMA400_ACC_ODR_MAX_HZ)
+ return -EINVAL;
+
+ /* Note this works because MIN_WHOLE_HZ is odd */
+ idx = __ffs(hz);
+
+ if (hz >> idx != BMA400_ACC_ODR_MIN_WHOLE_HZ)
+ return -EINVAL;
+
+ idx += BMA400_ACC_ODR_MIN_RAW + 1;
+ } else if (hz == BMA400_ACC_ODR_MIN_HZ && uhz == 500000) {
+ idx = BMA400_ACC_ODR_MIN_RAW;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+ if (ret)
+ return ret;
+
+ /* preserve the range and normal mode osr */
+ odr = (~BMA400_ACC_ODR_MASK & val) | idx;
+
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG, odr);
+ if (ret)
+ return ret;
+
+ bma400_output_data_rate_from_raw(idx, &data->sample_freq.hz,
+ &data->sample_freq.uhz);
+ return 0;
+}
+
+static int bma400_get_accel_oversampling_ratio(struct bma400_data *data)
+{
+ unsigned int val;
+ unsigned int osr;
+ int ret;
+
+ /*
+ * The oversampling ratio is stored in a different register
+ * based on the power-mode. In normal mode the OSR is stored
+ * in ACC_CONFIG1. In low-power mode it is stored in
+ * ACC_CONFIG0.
+ */
+ switch (data->power_mode) {
+ case POWER_MODE_LOW:
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG, &val);
+ if (ret) {
+ data->oversampling_ratio = -1;
+ return ret;
+ }
+
+ osr = (val & BMA400_LP_OSR_MASK) >> BMA400_LP_OSR_SHIFT;
+
+ data->oversampling_ratio = osr;
+ return 0;
+ case POWER_MODE_NORMAL:
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+ if (ret) {
+ data->oversampling_ratio = -1;
+ return ret;
+ }
+
+ osr = (val & BMA400_NP_OSR_MASK) >> BMA400_NP_OSR_SHIFT;
+
+ data->oversampling_ratio = osr;
+ return 0;
+ case POWER_MODE_SLEEP:
+ data->oversampling_ratio = 0;
+ return 0;
+ default:
+ data->oversampling_ratio = -1;
+ return -EINVAL;
+ }
+}
+
+static int bma400_set_accel_oversampling_ratio(struct bma400_data *data,
+ int val)
+{
+ unsigned int acc_config;
+ int ret;
+
+ if (val & ~BMA400_TWO_BITS_MASK)
+ return -EINVAL;
+
+ /*
+ * The oversampling ratio is stored in a different register
+ * based on the power-mode.
+ */
+ switch (data->power_mode) {
+ case POWER_MODE_LOW:
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG,
+ &acc_config);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG0_REG,
+ (acc_config & ~BMA400_LP_OSR_MASK) |
+ (val << BMA400_LP_OSR_SHIFT));
+ if (ret) {
+ dev_err(data->dev, "Failed to write out OSR\n");
+ return ret;
+ }
+
+ data->oversampling_ratio = val;
+ return 0;
+ case POWER_MODE_NORMAL:
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG,
+ &acc_config);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG,
+ (acc_config & ~BMA400_NP_OSR_MASK) |
+ (val << BMA400_NP_OSR_SHIFT));
+ if (ret) {
+ dev_err(data->dev, "Failed to write out OSR\n");
+ return ret;
+ }
+
+ data->oversampling_ratio = val;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int bma400_accel_scale_to_raw(struct bma400_data *data,
+ unsigned int val)
+{
+ int raw;
+
+ if (val == 0)
+ return -EINVAL;
+
+ /* Note this works because BMA400_SCALE_MIN is odd */
+ raw = __ffs(val);
+
+ if (val >> raw != BMA400_SCALE_MIN)
+ return -EINVAL;
+
+ return raw;
+}
+
+static int bma400_get_accel_scale(struct bma400_data *data)
+{
+ unsigned int raw_scale;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+ if (ret)
+ return ret;
+
+ raw_scale = (val & BMA400_ACC_SCALE_MASK) >> BMA400_SCALE_SHIFT;
+ if (raw_scale > BMA400_TWO_BITS_MASK)
+ return -EINVAL;
+
+ data->scale = BMA400_SCALE_MIN << raw_scale;
+
+ return 0;
+}
+
+static int bma400_set_accel_scale(struct bma400_data *data, unsigned int val)
+{
+ unsigned int acc_config;
+ int raw;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &acc_config);
+ if (ret)
+ return ret;
+
+ raw = bma400_accel_scale_to_raw(data, val);
+ if (raw < 0)
+ return raw;
+
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG,
+ (acc_config & ~BMA400_ACC_SCALE_MASK) |
+ (raw << BMA400_SCALE_SHIFT));
+ if (ret)
+ return ret;
+
+ data->scale = val;
+ return 0;
+}
+
+static int bma400_get_power_mode(struct bma400_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMA400_STATUS_REG, &val);
+ if (ret) {
+ dev_err(data->dev, "Failed to read status register\n");
+ return ret;
+ }
+
+ data->power_mode = (val >> 1) & BMA400_TWO_BITS_MASK;
+ return 0;
+}
+
+static int bma400_set_power_mode(struct bma400_data *data,
+ enum bma400_power_mode mode)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG, &val);
+ if (ret)
+ return ret;
+
+ if (data->power_mode == mode)
+ return 0;
+
+ if (mode == POWER_MODE_INVALID)
+ return -EINVAL;
+
+ /* Preserve the low-power oversample ratio etc */
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG0_REG,
+ mode | (val & ~BMA400_TWO_BITS_MASK));
+ if (ret) {
+ dev_err(data->dev, "Failed to write to power-mode\n");
+ return ret;
+ }
+
+ data->power_mode = mode;
+
+ /*
+ * Update our cached osr and odr based on the new
+ * power-mode.
+ */
+ bma400_get_accel_output_data_rate(data);
+ bma400_get_accel_oversampling_ratio(data);
+ return 0;
+}
+
+static void bma400_init_tables(void)
+{
+ int raw;
+ int i;
+
+ for (i = 0; i + 1 < ARRAY_SIZE(bma400_sample_freqs); i += 2) {
+ raw = (i / 2) + 5;
+ bma400_output_data_rate_from_raw(raw, &bma400_sample_freqs[i],
+ &bma400_sample_freqs[i + 1]);
+ }
+
+ for (i = 0; i + 1 < ARRAY_SIZE(bma400_scales); i += 2) {
+ raw = i / 2;
+ bma400_scales[i] = 0;
+ bma400_scales[i + 1] = BMA400_SCALE_MIN << raw;
+ }
+}
+
+static int bma400_init(struct bma400_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ /* Try to read chip_id register. It must return 0x90. */
+ ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
+ if (ret) {
+ dev_err(data->dev, "Failed to read chip id register\n");
+ goto out;
+ }
+
+ if (val != BMA400_ID_REG_VAL) {
+ dev_err(data->dev, "Chip ID mismatch\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ data->regulators[BMA400_VDD_REGULATOR].supply = "vdd";
+ data->regulators[BMA400_VDDIO_REGULATOR].supply = "vddio";
+ ret = devm_regulator_bulk_get(data->dev,
+ ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(data->dev,
+ "Failed to get regulators: %d\n",
+ ret);
+
+ goto out;
+ }
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret) {
+ dev_err(data->dev, "Failed to enable regulators: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = bma400_get_power_mode(data);
+ if (ret) {
+ dev_err(data->dev, "Failed to get the initial power-mode\n");
+ goto err_reg_disable;
+ }
+
+ if (data->power_mode != POWER_MODE_NORMAL) {
+ ret = bma400_set_power_mode(data, POWER_MODE_NORMAL);
+ if (ret) {
+ dev_err(data->dev, "Failed to wake up the device\n");
+ goto err_reg_disable;
+ }
+ /*
+ * TODO: The datasheet waits 1500us here in the example, but
+ * lists 2/ODR as the wakeup time.
+ */
+ usleep_range(1500, 2000);
+ }
+
+ bma400_init_tables();
+
+ ret = bma400_get_accel_output_data_rate(data);
+ if (ret)
+ goto err_reg_disable;
+
+ ret = bma400_get_accel_oversampling_ratio(data);
+ if (ret)
+ goto err_reg_disable;
+
+ ret = bma400_get_accel_scale(data);
+ if (ret)
+ goto err_reg_disable;
+
+ /*
+ * Once the interrupt engine is supported we might use the
+ * data_src_reg, but for now ensure this is set to the
+ * variable ODR filter selectable by the sample frequency
+ * channel.
+ */
+ return regmap_write(data->regmap, BMA400_ACC_CONFIG2_REG, 0x00);
+
+err_reg_disable:
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+out:
+ return ret;
+}
+
+static int bma400_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&data->mutex);
+ ret = bma400_get_temp_reg(data, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->mutex);
+ ret = bma400_get_accel_reg(data, chan, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ if (data->sample_freq.hz < 0)
+ return -EINVAL;
+
+ *val = data->sample_freq.hz;
+ *val2 = data->sample_freq.uhz;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ /*
+ * Runs at a fixed sampling frequency. See Section 4.4
+ * of the datasheet.
+ */
+ *val = 6;
+ *val2 = 250000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = data->scale;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ /*
+ * TODO: We could avoid this logic and returning -EINVAL here if
+ * we set both the low-power and normal mode OSR registers when
+ * we configure the device.
+ */
+ if (data->oversampling_ratio < 0)
+ return -EINVAL;
+
+ *val = data->oversampling_ratio;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *vals = bma400_scales;
+ *length = ARRAY_SIZE(bma400_scales);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *type = IIO_VAL_INT;
+ *vals = bma400_osr_range;
+ *length = ARRAY_SIZE(bma400_osr_range);
+ return IIO_AVAIL_RANGE;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *vals = bma400_sample_freqs;
+ *length = ARRAY_SIZE(bma400_sample_freqs);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /*
+ * The sample frequency is readonly for the temperature
+ * register and a fixed value in low-power mode.
+ */
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = bma400_set_accel_output_data_rate(data, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ if (val != 0 ||
+ val2 < BMA400_SCALE_MIN || val2 > BMA400_SCALE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = bma400_set_accel_scale(data, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ mutex_lock(&data->mutex);
+ ret = bma400_set_accel_oversampling_ratio(data, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bma400_info = {
+ .read_raw = bma400_read_raw,
+ .read_avail = bma400_read_avail,
+ .write_raw = bma400_write_raw,
+ .write_raw_get_fmt = bma400_write_raw_get_fmt,
+};
+
+int bma400_probe(struct device *dev, struct regmap *regmap, const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct bma400_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->regmap = regmap;
+ data->dev = dev;
+
+ ret = bma400_init(data);
+ if (ret)
+ return ret;
+
+ ret = iio_read_mount_matrix(dev, "mount-matrix", &data->orientation);
+ if (ret)
+ return ret;
+
+ mutex_init(&data->mutex);
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &bma400_info;
+ indio_dev->channels = bma400_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bma400_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ dev_set_drvdata(dev, indio_dev);
+
+ return iio_device_register(indio_dev);
+}
+EXPORT_SYMBOL(bma400_probe);
+
+int bma400_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma400_set_power_mode(data, POWER_MODE_SLEEP);
+ mutex_unlock(&data->mutex);
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+
+ iio_device_unregister(indio_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(bma400_remove);
+
+MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
+MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bma400_i2c.c b/drivers/iio/accel/bma400_i2c.c
new file mode 100644
index 000000000000..9dcb7cc9996e
--- /dev/null
+++ b/drivers/iio/accel/bma400_i2c.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I2C IIO driver for Bosch BMA400 triaxial acceleration sensor.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ *
+ * I2C address is either 0x14 or 0x15 depending on SDO
+ */
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bma400.h"
+
+static int bma400_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &bma400_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "failed to create regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return bma400_probe(&client->dev, regmap, id->name);
+}
+
+static int bma400_i2c_remove(struct i2c_client *client)
+{
+ return bma400_remove(&client->dev);
+}
+
+static const struct i2c_device_id bma400_i2c_ids[] = {
+ { "bma400", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bma400_i2c_ids);
+
+static const struct of_device_id bma400_of_i2c_match[] = {
+ { .compatible = "bosch,bma400" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bma400_of_i2c_match);
+
+static struct i2c_driver bma400_i2c_driver = {
+ .driver = {
+ .name = "bma400",
+ .of_match_table = bma400_of_i2c_match,
+ },
+ .probe = bma400_i2c_probe,
+ .remove = bma400_i2c_remove,
+ .id_table = bma400_i2c_ids,
+};
+
+module_i2c_driver(bma400_i2c_driver);
+
+MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
+MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor (I2C)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index fee535d6e45b..c9924a65c32a 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -130,6 +130,7 @@ struct kxcjk1013_data {
struct i2c_client *client;
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
+ struct iio_mount_matrix orientation;
struct mutex mutex;
s16 buffer[8];
u8 odr_bits;
@@ -983,6 +984,20 @@ static const struct iio_event_spec kxcjk1013_event = {
BIT(IIO_EV_INFO_PERIOD)
};
+static const struct iio_mount_matrix *
+kxcjk1013_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info kxcjk1013_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kxcjk1013_get_mount_matrix),
+ { }
+};
+
#define KXCJK1013_CHANNEL(_axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -999,6 +1014,7 @@ static const struct iio_event_spec kxcjk1013_event = {
.endianness = IIO_LE, \
}, \
.event_spec = &kxcjk1013_event, \
+ .ext_info = kxcjk1013_ext_info, \
.num_event_specs = 1 \
}
@@ -1267,11 +1283,18 @@ static int kxcjk1013_probe(struct i2c_client *client,
data->client = client;
pdata = dev_get_platdata(&client->dev);
- if (pdata)
+ if (pdata) {
data->active_high_intr = pdata->active_high_intr;
- else
+ data->orientation = pdata->orientation;
+ } else {
data->active_high_intr = true; /* default polarity */
+ ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+ }
+
if (id) {
data->chipset = (enum kx_chipset)(id->driver_data);
name = id->name;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index af09943f38c9..5b13e293cade 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -64,7 +64,7 @@ enum st_accel_type {
* struct st_sensors_platform_data - default accel platform data
* @drdy_int_pin: default accel DRDY is available on INT1 pin.
*/
-static const struct st_sensors_platform_data default_accel_pdata = {
+static __maybe_unused const struct st_sensors_platform_data default_accel_pdata = {
.drdy_int_pin = 1,
};
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 50fa0fc32baa..633955d764cc 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -18,7 +18,6 @@
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_accel.h"
-#ifdef CONFIG_OF
static const struct of_device_id st_accel_of_match[] = {
{
/* An older compatible */
@@ -108,9 +107,6 @@ static const struct of_device_id st_accel_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
-#else
-#define st_accel_of_match NULL
-#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
@@ -119,8 +115,6 @@ static const struct acpi_device_id st_accel_acpi_match[] = {
{ },
};
MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
-#else
-#define st_accel_acpi_match NULL
#endif
static const struct i2c_device_id st_accel_id_table[] = {
@@ -195,7 +189,7 @@ static int st_accel_i2c_remove(struct i2c_client *client)
static struct i2c_driver st_accel_driver = {
.driver = {
.name = "st-accel-i2c",
- .of_match_table = of_match_ptr(st_accel_of_match),
+ .of_match_table = st_accel_of_match,
.acpi_match_table = ACPI_PTR(st_accel_acpi_match),
},
.probe_new = st_accel_i2c_probe,
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 8af7027d5598..568ff1bae0ee 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_accel.h"
-#ifdef CONFIG_OF
/*
* For new single-chip sensors use <device_name> as compatible string.
* For old single-chip devices keep <device_name>-accel to maintain
@@ -96,9 +95,6 @@ static const struct of_device_id st_accel_of_match[] = {
{}
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
-#else
-#define st_accel_of_match NULL
-#endif
static int st_accel_spi_probe(struct spi_device *spi)
{
@@ -107,8 +103,7 @@ static int st_accel_spi_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&spi->dev, st_accel_of_match,
- spi->modalias, sizeof(spi->modalias));
+ st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
settings = st_accel_get_settings(spi->modalias);
if (!settings) {
@@ -166,7 +161,7 @@ MODULE_DEVICE_TABLE(spi, st_accel_id_table);
static struct spi_driver st_accel_driver = {
.driver = {
.name = "st-accel-spi",
- .of_match_table = of_match_ptr(st_accel_of_match),
+ .of_match_table = st_accel_of_match,
},
.probe = st_accel_spi_probe,
.remove = st_accel_spi_remove,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 5d8540b7b427..82e33082958c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -21,6 +21,13 @@ config AD_SIGMA_DELTA
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+config AD7091R5
+ tristate "Analog Devices AD7091R5 ADC Driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Analog Devices AD7091R-5 ADC.
+
config AD7124
tristate "Analog Devices AD7124 and similar sigma-delta ADCs driver"
depends on SPI_MASTER
@@ -523,6 +530,16 @@ config LTC2485
To compile this driver as a module, choose M here: the module will be
called ltc2485.
+config LTC2496
+ tristate "Linear Technology LTC2496 ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Linear Technology LTC2496
+ 16-Bit 8-/16-Channel Delta Sigma ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ltc2496.
+
config LTC2497
tristate "Linear Technology LTC2497 ADC driver"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index a1f1fbec0f87..919228900df9 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
+obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o
obj-$(CONFIG_AD7124) += ad7124.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7291) += ad7291.o
@@ -50,7 +51,8 @@ obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
obj-$(CONFIG_LTC2471) += ltc2471.o
obj-$(CONFIG_LTC2485) += ltc2485.o
-obj-$(CONFIG_LTC2497) += ltc2497.o
+obj-$(CONFIG_LTC2496) += ltc2496.o ltc2497-core.o
+obj-$(CONFIG_LTC2497) += ltc2497.o ltc2497-core.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX11100) += max11100.o
obj-$(CONFIG_MAX1118) += max1118.o
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
new file mode 100644
index 000000000000..33c40357bd5e
--- /dev/null
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD7091RX Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "ad7091r-base.h"
+
+#define AD7091R_REG_RESULT 0
+#define AD7091R_REG_CHANNEL 1
+#define AD7091R_REG_CONF 2
+#define AD7091R_REG_ALERT 3
+#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4)
+#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5)
+#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6)
+
+/* AD7091R_REG_RESULT */
+#define AD7091R_REG_RESULT_CH_ID(x) (((x) >> 13) & 0x3)
+#define AD7091R_REG_RESULT_CONV_RESULT(x) ((x) & 0xfff)
+
+/* AD7091R_REG_CONF */
+#define AD7091R_REG_CONF_AUTO BIT(8)
+#define AD7091R_REG_CONF_CMD BIT(10)
+
+#define AD7091R_REG_CONF_MODE_MASK \
+ (AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD)
+
+enum ad7091r_mode {
+ AD7091R_MODE_SAMPLE,
+ AD7091R_MODE_COMMAND,
+ AD7091R_MODE_AUTOCYCLE,
+};
+
+struct ad7091r_state {
+ struct device *dev;
+ struct regmap *map;
+ struct regulator *vref;
+ const struct ad7091r_chip_info *chip_info;
+ enum ad7091r_mode mode;
+ struct mutex lock; /*lock to prevent concurent reads */
+};
+
+static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+{
+ int ret, conf;
+
+ switch (mode) {
+ case AD7091R_MODE_SAMPLE:
+ conf = 0;
+ break;
+ case AD7091R_MODE_COMMAND:
+ conf = AD7091R_REG_CONF_CMD;
+ break;
+ case AD7091R_MODE_AUTOCYCLE:
+ conf = AD7091R_REG_CONF_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
+ AD7091R_REG_CONF_MODE_MASK, conf);
+ if (ret)
+ return ret;
+
+ st->mode = mode;
+
+ return 0;
+}
+
+static int ad7091r_set_channel(struct ad7091r_state *st, unsigned int channel)
+{
+ unsigned int dummy;
+ int ret;
+
+ /* AD7091R_REG_CHANNEL specified which channels to be converted */
+ ret = regmap_write(st->map, AD7091R_REG_CHANNEL,
+ BIT(channel) | (BIT(channel) << 8));
+ if (ret)
+ return ret;
+
+ /*
+ * There is a latency of one conversion before the channel conversion
+ * sequence is updated
+ */
+ return regmap_read(st->map, AD7091R_REG_RESULT, &dummy);
+}
+
+static int ad7091r_read_one(struct iio_dev *iio_dev,
+ unsigned int channel, unsigned int *read_val)
+{
+ struct ad7091r_state *st = iio_priv(iio_dev);
+ unsigned int val;
+ int ret;
+
+ ret = ad7091r_set_channel(st, channel);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->map, AD7091R_REG_RESULT, &val);
+ if (ret)
+ return ret;
+
+ if (AD7091R_REG_RESULT_CH_ID(val) != channel)
+ return -EIO;
+
+ *read_val = AD7091R_REG_RESULT_CONV_RESULT(val);
+
+ return 0;
+}
+
+static int ad7091r_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct ad7091r_state *st = iio_priv(iio_dev);
+ unsigned int read_val;
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ if (st->mode != AD7091R_MODE_COMMAND) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = ad7091r_read_one(iio_dev, chan->channel, &read_val);
+ if (ret)
+ goto unlock;
+
+ *val = read_val;
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ if (st->vref) {
+ ret = regulator_get_voltage(st->vref);
+ if (ret < 0)
+ goto unlock;
+
+ *val = ret / 1000;
+ } else {
+ *val = st->chip_info->vref_mV;
+ }
+
+ *val2 = chan->scan_type.realbits;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static const struct iio_info ad7091r_info = {
+ .read_raw = ad7091r_read_raw,
+};
+
+static irqreturn_t ad7091r_event_handler(int irq, void *private)
+{
+ struct ad7091r_state *st = (struct ad7091r_state *) private;
+ struct iio_dev *iio_dev = dev_get_drvdata(st->dev);
+ unsigned int i, read_val;
+ int ret;
+ s64 timestamp = iio_get_time_ns(iio_dev);
+
+ ret = regmap_read(st->map, AD7091R_REG_ALERT, &read_val);
+ if (ret)
+ return IRQ_HANDLED;
+
+ for (i = 0; i < st->chip_info->num_channels; i++) {
+ if (read_val & BIT(i * 2))
+ iio_push_event(iio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING), timestamp);
+ if (read_val & BIT(i * 2 + 1))
+ iio_push_event(iio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING), timestamp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ad7091r_remove(void *data)
+{
+ struct ad7091r_state *st = data;
+
+ regulator_disable(st->vref);
+}
+
+int ad7091r_probe(struct device *dev, const char *name,
+ const struct ad7091r_chip_info *chip_info,
+ struct regmap *map, int irq)
+{
+ struct iio_dev *iio_dev;
+ struct ad7091r_state *st;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(iio_dev);
+ st->dev = dev;
+ st->chip_info = chip_info;
+ st->map = map;
+
+ iio_dev->dev.parent = dev;
+ iio_dev->name = name;
+ iio_dev->info = &ad7091r_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ iio_dev->num_channels = chip_info->num_channels;
+ iio_dev->channels = chip_info->channels;
+
+ if (irq) {
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ ad7091r_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st);
+ if (ret)
+ return ret;
+ }
+
+ st->vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(st->vref)) {
+ if (PTR_ERR(st->vref) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ st->vref = NULL;
+ } else {
+ ret = regulator_enable(st->vref);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, ad7091r_remove, st);
+ if (ret)
+ return ret;
+ }
+
+ /* Use command mode by default to convert only desired channels*/
+ ret = ad7091r_set_mode(st, AD7091R_MODE_COMMAND);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, iio_dev);
+}
+EXPORT_SYMBOL_GPL(ad7091r_probe);
+
+static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AD7091R_REG_RESULT:
+ case AD7091R_REG_ALERT:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AD7091R_REG_RESULT:
+ case AD7091R_REG_ALERT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config ad7091r_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .writeable_reg = ad7091r_writeable_reg,
+ .volatile_reg = ad7091r_volatile_reg,
+};
+EXPORT_SYMBOL_GPL(ad7091r_regmap_config);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7091Rx multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7091r-base.h b/drivers/iio/adc/ad7091r-base.h
new file mode 100644
index 000000000000..509748aef9b1
--- /dev/null
+++ b/drivers/iio/adc/ad7091r-base.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AD7091RX Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+#define __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+
+struct device;
+struct ad7091r_state;
+
+struct ad7091r_chip_info {
+ unsigned int num_channels;
+ const struct iio_chan_spec *channels;
+ unsigned int vref_mV;
+};
+
+extern const struct regmap_config ad7091r_regmap_config;
+
+int ad7091r_probe(struct device *dev, const char *name,
+ const struct ad7091r_chip_info *chip_info,
+ struct regmap *map, int irq);
+
+#endif /* __DRIVERS_IIO_ADC_AD7091R_BASE_H__ */
diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
new file mode 100644
index 000000000000..9665679c3ea6
--- /dev/null
+++ b/drivers/iio/adc/ad7091r5.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD7091R5 Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ad7091r-base.h"
+
+static const struct iio_event_spec ad7091r5_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+#define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = 1, \
+ .channel = idx, \
+ .event_spec = ev, \
+ .num_event_specs = num_ev, \
+ .scan_type.storagebits = 16, \
+ .scan_type.realbits = bits, \
+}
+static const struct iio_chan_spec ad7091r5_channels_irq[] = {
+ AD7091R_CHANNEL(0, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+ AD7091R_CHANNEL(1, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+ AD7091R_CHANNEL(2, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+ AD7091R_CHANNEL(3, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+};
+
+static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
+ AD7091R_CHANNEL(0, 12, NULL, 0),
+ AD7091R_CHANNEL(1, 12, NULL, 0),
+ AD7091R_CHANNEL(2, 12, NULL, 0),
+ AD7091R_CHANNEL(3, 12, NULL, 0),
+};
+
+static const struct ad7091r_chip_info ad7091r5_chip_info_irq = {
+ .channels = ad7091r5_channels_irq,
+ .num_channels = ARRAY_SIZE(ad7091r5_channels_irq),
+ .vref_mV = 2500,
+};
+
+static const struct ad7091r_chip_info ad7091r5_chip_info_noirq = {
+ .channels = ad7091r5_channels_noirq,
+ .num_channels = ARRAY_SIZE(ad7091r5_channels_noirq),
+ .vref_mV = 2500,
+};
+
+static int ad7091r5_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ const struct ad7091r_chip_info *chip_info;
+ struct regmap *map = devm_regmap_init_i2c(i2c, &ad7091r_regmap_config);
+
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ if (i2c->irq)
+ chip_info = &ad7091r5_chip_info_irq;
+ else
+ chip_info = &ad7091r5_chip_info_noirq;
+
+ return ad7091r_probe(&i2c->dev, id->name, chip_info, map, i2c->irq);
+}
+
+static const struct of_device_id ad7091r5_dt_ids[] = {
+ { .compatible = "adi,ad7091r5" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids);
+
+static const struct i2c_device_id ad7091r5_i2c_ids[] = {
+ {"ad7091r5", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids);
+
+static struct i2c_driver ad7091r5_driver = {
+ .driver = {
+ .name = "ad7091r5",
+ .of_match_table = ad7091r5_dt_ids,
+ },
+ .probe = ad7091r5_i2c_probe,
+ .id_table = ad7091r5_i2c_ids,
+};
+module_i2c_driver(ad7091r5_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7091R5 multi-channel ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 3f03abf100b5..d9915dc71d1e 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -224,6 +225,7 @@ static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.addr_shift = 0,
.read_mask = BIT(6),
.data_reg = AD7124_DATA,
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
static int ad7124_set_channel_odr(struct ad7124_state *st,
@@ -494,13 +496,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
st->channel_config[channel].buf_negative =
of_property_read_bool(child, "adi,buffered-negative");
- *chan = ad7124_channel_template;
- chan->address = channel;
- chan->scan_index = channel;
- chan->channel = ain[0];
- chan->channel2 = ain[1];
-
- chan++;
+ chan[channel] = ad7124_channel_template;
+ chan[channel].address = channel;
+ chan[channel].scan_index = channel;
+ chan[channel].channel = ain[0];
+ chan[channel].channel2 = ain[1];
}
return 0;
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index c31b8eabb894..c8524f098883 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -11,7 +11,7 @@
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -34,7 +34,7 @@ struct ad7266_state {
enum ad7266_range range;
enum ad7266_mode mode;
bool fixed_addr;
- struct gpio gpios[3];
+ struct gpio_desc *gpios[3];
/*
* DMA (thus cache coherency maintenance) requires the
@@ -117,7 +117,7 @@ static void ad7266_select_input(struct ad7266_state *st, unsigned int nr)
}
for (i = 0; i < 3; ++i)
- gpio_set_value(st->gpios[i].gpio, (bool)(nr & BIT(i)));
+ gpiod_set_value(st->gpios[i], (bool)(nr & BIT(i)));
}
static int ad7266_update_scan_mode(struct iio_dev *indio_dev,
@@ -376,7 +376,7 @@ static void ad7266_init_channels(struct iio_dev *indio_dev)
}
static const char * const ad7266_gpio_labels[] = {
- "AD0", "AD1", "AD2",
+ "ad0", "ad1", "ad2",
};
static int ad7266_probe(struct spi_device *spi)
@@ -419,14 +419,14 @@ static int ad7266_probe(struct spi_device *spi)
if (!st->fixed_addr) {
for (i = 0; i < ARRAY_SIZE(st->gpios); ++i) {
- st->gpios[i].gpio = pdata->addr_gpios[i];
- st->gpios[i].flags = GPIOF_OUT_INIT_LOW;
- st->gpios[i].label = ad7266_gpio_labels[i];
+ st->gpios[i] = devm_gpiod_get(&spi->dev,
+ ad7266_gpio_labels[i],
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpios[i])) {
+ ret = PTR_ERR(st->gpios[i]);
+ goto error_disable_reg;
+ }
}
- ret = gpio_request_array(st->gpios,
- ARRAY_SIZE(st->gpios));
- if (ret)
- goto error_disable_reg;
}
} else {
st->fixed_addr = true;
@@ -465,7 +465,7 @@ static int ad7266_probe(struct spi_device *spi)
ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
&ad7266_trigger_handler, &iio_triggered_buffer_setup_ops);
if (ret)
- goto error_free_gpios;
+ goto error_disable_reg;
ret = iio_device_register(indio_dev);
if (ret)
@@ -475,9 +475,6 @@ static int ad7266_probe(struct spi_device *spi)
error_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
-error_free_gpios:
- if (!st->fixed_addr)
- gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
@@ -492,8 +489,6 @@ static int ad7266_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
- if (!st->fixed_addr)
- gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index 217a5a5c3c6d..291c1a898129 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -203,6 +203,7 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
.set_mode = ad7780_set_mode,
.postprocess_sample = ad7780_postprocess_sample,
.has_registers = false,
+ .irq_flags = IRQF_TRIGGER_LOW,
};
#define AD7780_CHANNEL(bits, wordsize) \
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 54025ea10239..abb239392631 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -205,6 +205,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
.has_registers = true,
.addr_shift = 4,
.read_mask = BIT(3),
+ .irq_flags = IRQF_TRIGGER_LOW,
};
static int ad7791_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index bbc41ecf0d2f..b747db97f78a 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -206,6 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
.has_registers = true,
.addr_shift = 3,
.read_mask = BIT(6),
+ .irq_flags = IRQF_TRIGGER_LOW,
};
static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 6223043e432b..c6a3428e950a 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -43,11 +43,17 @@ enum ad7887_channels {
/**
* struct ad7887_chip_info - chip specifc information
* @int_vref_mv: the internal reference voltage
- * @channel: channel specification
+ * @channels: channels specification
+ * @num_channels: number of channels
+ * @dual_channels: channels specification in dual mode
+ * @num_dual_channels: number of channels in dual mode
*/
struct ad7887_chip_info {
u16 int_vref_mv;
- struct iio_chan_spec channel[3];
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ const struct iio_chan_spec *dual_channels;
+ unsigned int num_dual_channels;
};
struct ad7887_state {
@@ -183,45 +189,43 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+#define AD7887_CHANNEL(x) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (x), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = (x), \
+ .scan_index = (x), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec ad7887_channels[] = {
+ AD7887_CHANNEL(0),
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct iio_chan_spec ad7887_dual_channels[] = {
+ AD7887_CHANNEL(0),
+ AD7887_CHANNEL(1),
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
/*
* More devices added in future
*/
[ID_AD7887] = {
- .channel[0] = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .address = 1,
- .scan_index = 1,
- .scan_type = {
- .sign = 'u',
- .realbits = 12,
- .storagebits = 16,
- .shift = 0,
- .endianness = IIO_BE,
- },
- },
- .channel[1] = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .address = 0,
- .scan_index = 0,
- .scan_type = {
- .sign = 'u',
- .realbits = 12,
- .storagebits = 16,
- .shift = 0,
- .endianness = IIO_BE,
- },
- },
- .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+ .channels = ad7887_channels,
+ .num_channels = ARRAY_SIZE(ad7887_channels),
+ .dual_channels = ad7887_dual_channels,
+ .num_dual_channels = ARRAY_SIZE(ad7887_dual_channels),
.int_vref_mv = 2500,
},
};
@@ -306,11 +310,11 @@ static int ad7887_probe(struct spi_device *spi)
spi_message_init(&st->msg[AD7887_CH1]);
spi_message_add_tail(&st->xfer[3], &st->msg[AD7887_CH1]);
- indio_dev->channels = st->chip_info->channel;
- indio_dev->num_channels = 3;
+ indio_dev->channels = st->chip_info->dual_channels;
+ indio_dev->num_channels = st->chip_info->num_dual_channels;
} else {
- indio_dev->channels = &st->chip_info->channel[1];
- indio_dev->num_channels = 2;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
}
ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 3212eb4c0f25..1d124c87c6ac 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * AD7904/AD7914/AD7923/AD7924 SPI ADC driver
+ * AD7904/AD7914/AD7923/AD7924/AD7908/AD7918/AD7928 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc (from AD7923 Driver)
* Copyright 2012 CS Systemes d'Information
@@ -29,15 +29,10 @@
#define AD7923_PM_MODE_AS (1) /* auto shutdown */
#define AD7923_PM_MODE_FS (2) /* full shutdown */
#define AD7923_PM_MODE_OPS (3) /* normal operation */
-#define AD7923_CHANNEL_0 (0) /* analog input 0 */
-#define AD7923_CHANNEL_1 (1) /* analog input 1 */
-#define AD7923_CHANNEL_2 (2) /* analog input 2 */
-#define AD7923_CHANNEL_3 (3) /* analog input 3 */
#define AD7923_SEQUENCE_OFF (0) /* no sequence fonction */
#define AD7923_SEQUENCE_PROTECT (2) /* no interrupt write cycle */
#define AD7923_SEQUENCE_ON (3) /* continuous sequence */
-#define AD7923_MAX_CHAN 4
#define AD7923_PM_MODE_WRITE(mode) ((mode) << 4) /* write mode */
#define AD7923_CHANNEL_WRITE(channel) ((channel) << 6) /* write channel */
@@ -78,6 +73,9 @@ enum ad7923_id {
AD7904,
AD7914,
AD7924,
+ AD7908,
+ AD7918,
+ AD7928
};
#define AD7923_V_CHAN(index, bits) \
@@ -106,9 +104,25 @@ const struct iio_chan_spec name ## _channels[] = { \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
+#define DECLARE_AD7908_CHANNELS(name, bits) \
+const struct iio_chan_spec name ## _channels[] = { \
+ AD7923_V_CHAN(0, bits), \
+ AD7923_V_CHAN(1, bits), \
+ AD7923_V_CHAN(2, bits), \
+ AD7923_V_CHAN(3, bits), \
+ AD7923_V_CHAN(4, bits), \
+ AD7923_V_CHAN(5, bits), \
+ AD7923_V_CHAN(6, bits), \
+ AD7923_V_CHAN(7, bits), \
+ IIO_CHAN_SOFT_TIMESTAMP(8), \
+}
+
static DECLARE_AD7923_CHANNELS(ad7904, 8);
static DECLARE_AD7923_CHANNELS(ad7914, 10);
static DECLARE_AD7923_CHANNELS(ad7924, 12);
+static DECLARE_AD7908_CHANNELS(ad7908, 8);
+static DECLARE_AD7908_CHANNELS(ad7918, 10);
+static DECLARE_AD7908_CHANNELS(ad7928, 12);
static const struct ad7923_chip_info ad7923_chip_info[] = {
[AD7904] = {
@@ -123,6 +137,18 @@ static const struct ad7923_chip_info ad7923_chip_info[] = {
.channels = ad7924_channels,
.num_channels = ARRAY_SIZE(ad7924_channels),
},
+ [AD7908] = {
+ .channels = ad7908_channels,
+ .num_channels = ARRAY_SIZE(ad7908_channels),
+ },
+ [AD7918] = {
+ .channels = ad7918_channels,
+ .num_channels = ARRAY_SIZE(ad7918_channels),
+ },
+ [AD7928] = {
+ .channels = ad7928_channels,
+ .num_channels = ARRAY_SIZE(ad7928_channels),
+ },
};
/**
@@ -135,7 +161,11 @@ static int ad7923_update_scan_mode(struct iio_dev *indio_dev,
int i, cmd, len;
len = 0;
- for_each_set_bit(i, active_scan_mask, AD7923_MAX_CHAN) {
+ /*
+ * For this driver the last channel is always the software timestamp so
+ * skip that one.
+ */
+ for_each_set_bit(i, active_scan_mask, indio_dev->num_channels - 1) {
cmd = AD7923_WRITE_CR | AD7923_CHANNEL_WRITE(i) |
AD7923_SEQUENCE_WRITE(AD7923_SEQUENCE_OFF) |
st->settings;
@@ -188,7 +218,7 @@ done:
return IRQ_HANDLED;
}
-static int ad7923_scan_direct(struct ad7923_state *st, unsigned ch)
+static int ad7923_scan_direct(struct ad7923_state *st, unsigned int ch)
{
int ret, cmd;
@@ -348,13 +378,29 @@ static const struct spi_device_id ad7923_id[] = {
{"ad7914", AD7914},
{"ad7923", AD7924},
{"ad7924", AD7924},
+ {"ad7908", AD7908},
+ {"ad7918", AD7918},
+ {"ad7928", AD7928},
{}
};
MODULE_DEVICE_TABLE(spi, ad7923_id);
+static const struct of_device_id ad7923_of_match[] = {
+ { .compatible = "adi,ad7904", },
+ { .compatible = "adi,ad7914", },
+ { .compatible = "adi,ad7923", },
+ { .compatible = "adi,ad7924", },
+ { .compatible = "adi,ad7908", },
+ { .compatible = "adi,ad7918", },
+ { .compatible = "adi,ad7928", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ad7923_of_match);
+
static struct spi_driver ad7923_driver = {
.driver = {
.name = "ad7923",
+ .of_match_table = ad7923_of_match,
},
.probe = ad7923_probe,
.remove = ad7923_remove,
@@ -364,5 +410,5 @@ module_spi_driver(ad7923_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_AUTHOR("Patrick Vasseur <patrick.vasseur@c-s.fr>");
-MODULE_DESCRIPTION("Analog Devices AD7904/AD7914/AD7923/AD7924 ADC");
+MODULE_DESCRIPTION("Analog Devices AD7923 and similar ADC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index f658012baad8..ef013af1aec0 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -167,6 +167,21 @@ static int ad799x_read_config(struct ad799x_state *st)
}
}
+static int ad799x_update_config(struct ad799x_state *st, u16 config)
+{
+ int ret;
+
+ ret = ad799x_write_config(st, config);
+ if (ret < 0)
+ return ret;
+ ret = ad799x_read_config(st);
+ if (ret < 0)
+ return ret;
+ st->config = ret;
+
+ return 0;
+}
+
/**
* ad799x_trigger_handler() bh of trigger launched polling to ring buffer
*
@@ -808,13 +823,9 @@ static int ad799x_probe(struct i2c_client *client,
indio_dev->channels = st->chip_config->channel;
indio_dev->num_channels = chip_info->num_channels;
- ret = ad799x_write_config(st, st->chip_config->default_config);
- if (ret < 0)
- goto error_disable_vref;
- ret = ad799x_read_config(st);
- if (ret < 0)
+ ret = ad799x_update_config(st, st->chip_config->default_config);
+ if (ret)
goto error_disable_vref;
- st->config = ret;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
&ad799x_trigger_handler, NULL);
@@ -864,6 +875,48 @@ static int ad799x_remove(struct i2c_client *client)
return 0;
}
+static int __maybe_unused ad799x_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static int __maybe_unused ad799x_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ad799x_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(dev, "Unable to enable vcc regulator\n");
+ return ret;
+ }
+ ret = regulator_enable(st->vref);
+ if (ret) {
+ regulator_disable(st->reg);
+ dev_err(dev, "Unable to enable vref regulator\n");
+ return ret;
+ }
+
+ /* resync config */
+ ret = ad799x_update_config(st, st->config);
+ if (ret) {
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ad799x_pm_ops, ad799x_suspend, ad799x_resume);
+
static const struct i2c_device_id ad799x_id[] = {
{ "ad7991", ad7991 },
{ "ad7995", ad7995 },
@@ -881,6 +934,7 @@ MODULE_DEVICE_TABLE(i2c, ad799x_id);
static struct i2c_driver ad799x_driver = {
.driver = {
.name = "ad799x",
+ .pm = &ad799x_pm_ops,
},
.probe = ad799x_probe,
.remove = ad799x_remove,
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 8ba90486c787..8115b6de1d6c 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -500,7 +500,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
ret = request_irq(sigma_delta->spi->irq,
ad_sd_data_rdy_trig_poll,
- IRQF_TRIGGER_LOW,
+ sigma_delta->info->irq_flags,
indio_dev->name,
sigma_delta);
if (ret)
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index e1850f3d5cf3..a5c7771227d5 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1444,10 +1444,10 @@ static void at91_adc_dma_init(struct platform_device *pdev)
if (st->dma_st.dma_chan)
return;
- st->dma_st.dma_chan = dma_request_slave_channel(&pdev->dev, "rx");
-
- if (!st->dma_st.dma_chan) {
+ st->dma_st.dma_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(st->dma_st.dma_chan)) {
dev_info(&pdev->dev, "can't get DMA channel\n");
+ st->dma_st.dma_chan = NULL;
goto dma_exit;
}
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
new file mode 100644
index 000000000000..88a30156a849
--- /dev/null
+++ b/drivers/iio/adc/ltc2496.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ltc2496.c - Driver for Analog Devices/Linear Technology LTC2496 ADC
+ *
+ * Based on ltc2497.c which has
+ * Copyright (C) 2017 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/2496fc.pdf
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "ltc2497.h"
+
+struct ltc2496_driverdata {
+ /* this must be the first member */
+ struct ltc2497core_driverdata common_ddata;
+ struct spi_device *spi;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ unsigned char rxbuf[3] ____cacheline_aligned;
+ unsigned char txbuf[3];
+};
+
+static int ltc2496_result_and_measure(struct ltc2497core_driverdata *ddata,
+ u8 address, int *val)
+{
+ struct ltc2496_driverdata *st =
+ container_of(ddata, struct ltc2496_driverdata, common_ddata);
+ struct spi_transfer t = {
+ .tx_buf = st->txbuf,
+ .rx_buf = st->rxbuf,
+ .len = sizeof(st->txbuf),
+ };
+ int ret;
+
+ st->txbuf[0] = LTC2497_ENABLE | address;
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret < 0) {
+ dev_err(&st->spi->dev, "spi_sync_transfer failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (val)
+ *val = ((st->rxbuf[0] & 0x3f) << 12 |
+ st->rxbuf[1] << 4 | st->rxbuf[2] >> 4) -
+ (1 << 17);
+
+ return 0;
+}
+
+static int ltc2496_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ltc2496_driverdata *st;
+ struct device *dev = &spi->dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+ st->common_ddata.result_and_measure = ltc2496_result_and_measure;
+
+ return ltc2497core_probe(dev, indio_dev);
+}
+
+static int ltc2496_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ ltc2497core_remove(indio_dev);
+
+ return 0;
+}
+
+static const struct of_device_id ltc2496_of_match[] = {
+ { .compatible = "lltc,ltc2496", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltc2496_of_match);
+
+static struct spi_driver ltc2496_driver = {
+ .driver = {
+ .name = "ltc2496",
+ .of_match_table = of_match_ptr(ltc2496_of_match),
+ },
+ .probe = ltc2496_probe,
+ .remove = ltc2496_remove,
+};
+module_spi_driver(ltc2496_driver);
+
+MODULE_AUTHOR("Uwe Kleine-König <u.kleine-könig@pengutronix.de>");
+MODULE_DESCRIPTION("Linear Technology LTC2496 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ltc2497-core.c b/drivers/iio/adc/ltc2497-core.c
new file mode 100644
index 000000000000..f5f7039caacc
--- /dev/null
+++ b/drivers/iio/adc/ltc2497-core.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ltc2497-core.c - Common code for Analog Devices/Linear Technology
+ * LTC2496 and LTC2497 ADCs
+ *
+ * Copyright (C) 2017 Analog Devices Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include "ltc2497.h"
+
+#define LTC2497_SGL BIT(4)
+#define LTC2497_DIFF 0
+#define LTC2497_SIGN BIT(3)
+
+static int ltc2497core_wait_conv(struct ltc2497core_driverdata *ddata)
+{
+ s64 time_elapsed;
+
+ time_elapsed = ktime_ms_delta(ktime_get(), ddata->time_prev);
+
+ if (time_elapsed < LTC2497_CONVERSION_TIME_MS) {
+ /* delay if conversion time not passed
+ * since last read or write
+ */
+ if (msleep_interruptible(
+ LTC2497_CONVERSION_TIME_MS - time_elapsed))
+ return -ERESTARTSYS;
+
+ return 0;
+ }
+
+ if (time_elapsed - LTC2497_CONVERSION_TIME_MS <= 0) {
+ /* We're in automatic mode -
+ * so the last reading is still not outdated
+ */
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ltc2497core_read(struct ltc2497core_driverdata *ddata, u8 address, int *val)
+{
+ int ret;
+
+ ret = ltc2497core_wait_conv(ddata);
+ if (ret < 0)
+ return ret;
+
+ if (ret || ddata->addr_prev != address) {
+ ret = ddata->result_and_measure(ddata, address, NULL);
+ if (ret < 0)
+ return ret;
+ ddata->addr_prev = address;
+
+ if (msleep_interruptible(LTC2497_CONVERSION_TIME_MS))
+ return -ERESTARTSYS;
+ }
+
+ ret = ddata->result_and_measure(ddata, address, val);
+ if (ret < 0)
+ return ret;
+
+ ddata->time_prev = ktime_get();
+
+ return ret;
+}
+
+static int ltc2497core_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ ret = ltc2497core_read(ddata, chan->address, val);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(ddata->ref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = 17;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#define LTC2497_CHAN(_chan, _addr, _ds_name) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = (_ds_name), \
+}
+
+#define LTC2497_CHAN_DIFF(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 1 : 0), \
+ .channel2 = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 0 : 1),\
+ .address = (_addr | _chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .differential = 1, \
+}
+
+static const struct iio_chan_spec ltc2497core_channel[] = {
+ LTC2497_CHAN(0, LTC2497_SGL, "CH0"),
+ LTC2497_CHAN(1, LTC2497_SGL, "CH1"),
+ LTC2497_CHAN(2, LTC2497_SGL, "CH2"),
+ LTC2497_CHAN(3, LTC2497_SGL, "CH3"),
+ LTC2497_CHAN(4, LTC2497_SGL, "CH4"),
+ LTC2497_CHAN(5, LTC2497_SGL, "CH5"),
+ LTC2497_CHAN(6, LTC2497_SGL, "CH6"),
+ LTC2497_CHAN(7, LTC2497_SGL, "CH7"),
+ LTC2497_CHAN(8, LTC2497_SGL, "CH8"),
+ LTC2497_CHAN(9, LTC2497_SGL, "CH9"),
+ LTC2497_CHAN(10, LTC2497_SGL, "CH10"),
+ LTC2497_CHAN(11, LTC2497_SGL, "CH11"),
+ LTC2497_CHAN(12, LTC2497_SGL, "CH12"),
+ LTC2497_CHAN(13, LTC2497_SGL, "CH13"),
+ LTC2497_CHAN(14, LTC2497_SGL, "CH14"),
+ LTC2497_CHAN(15, LTC2497_SGL, "CH15"),
+ LTC2497_CHAN_DIFF(0, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(1, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(2, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(3, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(4, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(5, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(6, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(7, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(0, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(1, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(2, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(3, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(4, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(5, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(6, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(7, LTC2497_DIFF | LTC2497_SIGN),
+};
+
+static const struct iio_info ltc2497core_info = {
+ .read_raw = ltc2497core_read_raw,
+};
+
+int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev)
+{
+ struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+ int ret;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = dev_name(dev);
+ indio_dev->info = &ltc2497core_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ltc2497core_channel;
+ indio_dev->num_channels = ARRAY_SIZE(ltc2497core_channel);
+
+ ret = ddata->result_and_measure(ddata, LTC2497_CONFIG_DEFAULT, NULL);
+ if (ret < 0)
+ return ret;
+
+ ddata->ref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(ddata->ref)) {
+ if (PTR_ERR(ddata->ref) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vref regulator: %pe\n",
+ ddata->ref);
+
+ return PTR_ERR(ddata->ref);
+ }
+
+ ret = regulator_enable(ddata->ref);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable vref regulator: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (dev->platform_data) {
+ struct iio_map *plat_data;
+
+ plat_data = (struct iio_map *)dev->platform_data;
+
+ ret = iio_map_array_register(indio_dev, plat_data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
+ goto err_regulator_disable;
+ }
+ }
+
+ ddata->addr_prev = LTC2497_CONFIG_DEFAULT;
+ ddata->time_prev = ktime_get();
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto err_array_unregister;
+
+ return 0;
+
+err_array_unregister:
+ iio_map_array_unregister(indio_dev);
+
+err_regulator_disable:
+ regulator_disable(ddata->ref);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(ltc2497core_probe, LTC2497);
+
+void ltc2497core_remove(struct iio_dev *indio_dev)
+{
+ struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_map_array_unregister(indio_dev);
+
+ regulator_disable(ddata->ref);
+}
+EXPORT_SYMBOL_NS(ltc2497core_remove, LTC2497);
+
+MODULE_DESCRIPTION("common code for LTC2496/LTC2497 drivers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 470406032720..5db63d7c6bc5 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -7,27 +7,18 @@
* Datasheet: http://cds.linear.com/docs/en/datasheet/2497fd.pdf
*/
-#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/driver.h>
-#include <linux/iio/sysfs.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/regulator/consumer.h>
-#define LTC2497_ENABLE 0xA0
-#define LTC2497_SGL BIT(4)
-#define LTC2497_DIFF 0
-#define LTC2497_SIGN BIT(3)
-#define LTC2497_CONFIG_DEFAULT LTC2497_ENABLE
-#define LTC2497_CONVERSION_TIME_MS 150ULL
+#include "ltc2497.h"
-struct ltc2497_st {
+struct ltc2497_driverdata {
+ /* this must be the first member */
+ struct ltc2497core_driverdata common_ddata;
struct i2c_client *client;
- struct regulator *ref;
- ktime_t time_prev;
- u8 addr_prev;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
@@ -35,232 +26,59 @@ struct ltc2497_st {
__be32 buf ____cacheline_aligned;
};
-static int ltc2497_wait_conv(struct ltc2497_st *st)
+static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
+ u8 address, int *val)
{
- s64 time_elapsed;
-
- time_elapsed = ktime_ms_delta(ktime_get(), st->time_prev);
-
- if (time_elapsed < LTC2497_CONVERSION_TIME_MS) {
- /* delay if conversion time not passed
- * since last read or write
- */
- if (msleep_interruptible(
- LTC2497_CONVERSION_TIME_MS - time_elapsed))
- return -ERESTARTSYS;
-
- return 0;
- }
-
- if (time_elapsed - LTC2497_CONVERSION_TIME_MS <= 0) {
- /* We're in automatic mode -
- * so the last reading is stil not outdated
- */
- return 0;
- }
-
- return 1;
-}
-
-static int ltc2497_read(struct ltc2497_st *st, u8 address, int *val)
-{
- struct i2c_client *client = st->client;
- int ret;
-
- ret = ltc2497_wait_conv(st);
- if (ret < 0)
- return ret;
-
- if (ret || st->addr_prev != address) {
- ret = i2c_smbus_write_byte(st->client,
- LTC2497_ENABLE | address);
- if (ret < 0)
- return ret;
- st->addr_prev = address;
- if (msleep_interruptible(LTC2497_CONVERSION_TIME_MS))
- return -ERESTARTSYS;
- }
- ret = i2c_master_recv(client, (char *)&st->buf, 3);
- if (ret < 0) {
- dev_err(&client->dev, "i2c_master_recv failed\n");
- return ret;
- }
- st->time_prev = ktime_get();
-
- /* convert and shift the result,
- * and finally convert from offset binary to signed integer
- */
- *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
-
- return ret;
-}
-
-static int ltc2497_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- struct ltc2497_st *st = iio_priv(indio_dev);
+ struct ltc2497_driverdata *st =
+ container_of(ddata, struct ltc2497_driverdata, common_ddata);
int ret;
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- ret = ltc2497_read(st, chan->address, val);
- mutex_unlock(&indio_dev->mlock);
- if (ret < 0)
- return ret;
-
- return IIO_VAL_INT;
-
- case IIO_CHAN_INFO_SCALE:
- ret = regulator_get_voltage(st->ref);
- if (ret < 0)
+ if (val) {
+ ret = i2c_master_recv(st->client, (char *)&st->buf, 3);
+ if (ret < 0) {
+ dev_err(&st->client->dev, "i2c_master_recv failed\n");
return ret;
+ }
- *val = ret / 1000;
- *val2 = 17;
-
- return IIO_VAL_FRACTIONAL_LOG2;
-
- default:
- return -EINVAL;
+ *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
}
-}
-#define LTC2497_CHAN(_chan, _addr, _ds_name) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = (_chan), \
- .address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .datasheet_name = (_ds_name), \
-}
-
-#define LTC2497_CHAN_DIFF(_chan, _addr) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 1 : 0), \
- .channel2 = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 0 : 1),\
- .address = (_addr | _chan), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .differential = 1, \
+ ret = i2c_smbus_write_byte(st->client,
+ LTC2497_ENABLE | address);
+ if (ret)
+ dev_err(&st->client->dev, "i2c transfer failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
}
-static const struct iio_chan_spec ltc2497_channel[] = {
- LTC2497_CHAN(0, LTC2497_SGL, "CH0"),
- LTC2497_CHAN(1, LTC2497_SGL, "CH1"),
- LTC2497_CHAN(2, LTC2497_SGL, "CH2"),
- LTC2497_CHAN(3, LTC2497_SGL, "CH3"),
- LTC2497_CHAN(4, LTC2497_SGL, "CH4"),
- LTC2497_CHAN(5, LTC2497_SGL, "CH5"),
- LTC2497_CHAN(6, LTC2497_SGL, "CH6"),
- LTC2497_CHAN(7, LTC2497_SGL, "CH7"),
- LTC2497_CHAN(8, LTC2497_SGL, "CH8"),
- LTC2497_CHAN(9, LTC2497_SGL, "CH9"),
- LTC2497_CHAN(10, LTC2497_SGL, "CH10"),
- LTC2497_CHAN(11, LTC2497_SGL, "CH11"),
- LTC2497_CHAN(12, LTC2497_SGL, "CH12"),
- LTC2497_CHAN(13, LTC2497_SGL, "CH13"),
- LTC2497_CHAN(14, LTC2497_SGL, "CH14"),
- LTC2497_CHAN(15, LTC2497_SGL, "CH15"),
- LTC2497_CHAN_DIFF(0, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(1, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(2, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(3, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(4, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(5, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(6, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(7, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(0, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(1, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(2, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(3, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(4, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(5, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(6, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(7, LTC2497_DIFF | LTC2497_SIGN),
-};
-
-static const struct iio_info ltc2497_info = {
- .read_raw = ltc2497_read_raw,
-};
-
static int ltc2497_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
- struct ltc2497_st *st;
- struct iio_map *plat_data;
- int ret;
+ struct ltc2497_driverdata *st;
+ struct device *dev = &client->dev;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE))
return -EOPNOTSUPP;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
st->client = client;
+ st->common_ddata.result_and_measure = ltc2497_result_and_measure;
- indio_dev->dev.parent = &client->dev;
- indio_dev->name = id->name;
- indio_dev->info = &ltc2497_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ltc2497_channel;
- indio_dev->num_channels = ARRAY_SIZE(ltc2497_channel);
-
- st->ref = devm_regulator_get(&client->dev, "vref");
- if (IS_ERR(st->ref))
- return PTR_ERR(st->ref);
-
- ret = regulator_enable(st->ref);
- if (ret < 0)
- return ret;
-
- if (client->dev.platform_data) {
- plat_data = ((struct iio_map *)client->dev.platform_data);
- ret = iio_map_array_register(indio_dev, plat_data);
- if (ret) {
- dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
- goto err_regulator_disable;
- }
- }
-
- ret = i2c_smbus_write_byte(st->client, LTC2497_CONFIG_DEFAULT);
- if (ret < 0)
- goto err_array_unregister;
-
- st->addr_prev = LTC2497_CONFIG_DEFAULT;
- st->time_prev = ktime_get();
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto err_array_unregister;
-
- return 0;
-
-err_array_unregister:
- iio_map_array_unregister(indio_dev);
-
-err_regulator_disable:
- regulator_disable(st->ref);
-
- return ret;
+ return ltc2497core_probe(dev, indio_dev);
}
static int ltc2497_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ltc2497_st *st = iio_priv(indio_dev);
- iio_map_array_unregister(indio_dev);
- iio_device_unregister(indio_dev);
- regulator_disable(st->ref);
+ ltc2497core_remove(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/ltc2497.h b/drivers/iio/adc/ltc2497.h
new file mode 100644
index 000000000000..d0b42dd6b8ad
--- /dev/null
+++ b/drivers/iio/adc/ltc2497.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#define LTC2497_ENABLE 0xA0
+#define LTC2497_CONFIG_DEFAULT LTC2497_ENABLE
+#define LTC2497_CONVERSION_TIME_MS 150ULL
+
+struct ltc2497core_driverdata {
+ struct regulator *ref;
+ ktime_t time_prev;
+ u8 addr_prev;
+ int (*result_and_measure)(struct ltc2497core_driverdata *ddata,
+ u8 address, int *val);
+};
+
+int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev);
+void ltc2497core_remove(struct iio_dev *indio_dev);
+
+MODULE_IMPORT_NS(LTC2497);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index e480529b3f04..04d5ff7d2c8e 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -115,22 +115,17 @@ enum max9611_conf_ids {
* where data shall be read from
*/
static const unsigned int max9611_mux_conf[][2] = {
- /* CONF_SENSE_1x */
- { MAX9611_MUX_SENSE_1x, MAX9611_REG_CSA_DATA },
- /* CONF_SENSE_4x */
- { MAX9611_MUX_SENSE_4x, MAX9611_REG_CSA_DATA },
- /* CONF_SENSE_8x */
- { MAX9611_MUX_SENSE_8x, MAX9611_REG_CSA_DATA },
- /* CONF_IN_VOLT */
- { MAX9611_INPUT_VOLT, MAX9611_REG_RS_DATA },
- /* CONF_TEMP */
- { MAX9611_MUX_TEMP, MAX9611_REG_TEMP_DATA },
+ [CONF_SENSE_1x] = { MAX9611_MUX_SENSE_1x, MAX9611_REG_CSA_DATA },
+ [CONF_SENSE_4x] = { MAX9611_MUX_SENSE_4x, MAX9611_REG_CSA_DATA },
+ [CONF_SENSE_8x] = { MAX9611_MUX_SENSE_8x, MAX9611_REG_CSA_DATA },
+ [CONF_IN_VOLT] = { MAX9611_INPUT_VOLT, MAX9611_REG_RS_DATA },
+ [CONF_TEMP] = { MAX9611_MUX_TEMP, MAX9611_REG_TEMP_DATA },
};
enum max9611_csa_gain {
- CSA_GAIN_1x,
- CSA_GAIN_4x,
- CSA_GAIN_8x,
+ CSA_GAIN_1x = CONF_SENSE_1x,
+ CSA_GAIN_4x = CONF_SENSE_4x,
+ CSA_GAIN_8x = CONF_SENSE_8x,
};
enum max9611_csa_gain_params {
@@ -148,18 +143,9 @@ enum max9611_csa_gain_params {
* value; use this structure to retrieve the correct LSB and offset values.
*/
static const unsigned int max9611_gain_conf[][2] = {
- { /* [0] CSA_GAIN_1x */
- MAX9611_CSA_1X_LSB_nV,
- MAX9611_CSA_1X_OFFS_RAW,
- },
- { /* [1] CSA_GAIN_4x */
- MAX9611_CSA_4X_LSB_nV,
- MAX9611_CSA_4X_OFFS_RAW,
- },
- { /* [2] CSA_GAIN_8x */
- MAX9611_CSA_8X_LSB_nV,
- MAX9611_CSA_8X_OFFS_RAW,
- },
+ [CSA_GAIN_1x] = { MAX9611_CSA_1X_LSB_nV, MAX9611_CSA_1X_OFFS_RAW, },
+ [CSA_GAIN_4x] = { MAX9611_CSA_4X_LSB_nV, MAX9611_CSA_4X_OFFS_RAW, },
+ [CSA_GAIN_8x] = { MAX9611_CSA_8X_LSB_nV, MAX9611_CSA_8X_OFFS_RAW, },
};
enum max9611_chan_addrs {
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index dcd7fb5b9fb2..2bb78d1c4daa 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -6,6 +6,7 @@
#include <linux/log2.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/units.h>
#include "qcom-vadc-common.h"
@@ -236,8 +237,7 @@ static int qcom_vadc_scale_die_temp(const struct vadc_linear_graph *calib_graph,
voltage = 0;
}
- voltage -= KELVINMIL_CELSIUSMIL;
- *result_mdec = voltage;
+ *result_mdec = milli_kelvin_to_millicelsius(voltage);
return 0;
}
@@ -325,7 +325,7 @@ static int qcom_vadc_scale_hw_calib_die_temp(
{
*result_mdec = qcom_vadc_scale_code_voltage_factor(adc_code,
prescale, data, 2);
- *result_mdec -= KELVINMIL_CELSIUSMIL;
+ *result_mdec = milli_kelvin_to_millicelsius(*result_mdec);
return 0;
}
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
index bbb1fa02b382..e074902a24cc 100644
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ b/drivers/iio/adc/qcom-vadc-common.h
@@ -38,7 +38,6 @@
#define VADC_AVG_SAMPLES_MAX 512
#define ADC5_AVG_SAMPLES_MAX 16
-#define KELVINMIL_CELSIUSMIL 273150
#define PMIC5_CHG_TEMP_SCALE_FACTOR 377500
#define PMIC5_SMB_TEMP_CONSTANT 419400
#define PMIC5_SMB_TEMP_SCALE_FACTOR 356
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 6537f4f776c5..2df88d2b880a 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -280,21 +280,21 @@ out:
static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
.csr = STM32F4_ADC_CSR,
.ccr = STM32F4_ADC_CCR,
- .eoc1_msk = STM32F4_EOC1,
- .eoc2_msk = STM32F4_EOC2,
- .eoc3_msk = STM32F4_EOC3,
+ .eoc1_msk = STM32F4_EOC1 | STM32F4_OVR1,
+ .eoc2_msk = STM32F4_EOC2 | STM32F4_OVR2,
+ .eoc3_msk = STM32F4_EOC3 | STM32F4_OVR3,
.ier = STM32F4_ADC_CR1,
- .eocie_msk = STM32F4_EOCIE,
+ .eocie_msk = STM32F4_EOCIE | STM32F4_OVRIE,
};
/* STM32H7 common registers definitions */
static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
.csr = STM32H7_ADC_CSR,
.ccr = STM32H7_ADC_CCR,
- .eoc1_msk = STM32H7_EOC_MST,
- .eoc2_msk = STM32H7_EOC_SLV,
+ .eoc1_msk = STM32H7_EOC_MST | STM32H7_OVR_MST,
+ .eoc2_msk = STM32H7_EOC_SLV | STM32H7_OVR_SLV,
.ier = STM32H7_ADC_IER,
- .eocie_msk = STM32H7_EOCIE,
+ .eocie_msk = STM32H7_EOCIE | STM32H7_OVRIE,
};
static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
@@ -688,7 +688,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->vref = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(priv->vref)) {
ret = PTR_ERR(priv->vref);
- dev_err(&pdev->dev, "vref get failed, %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "vref get failed, %d\n", ret);
return ret;
}
@@ -696,7 +697,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (IS_ERR(priv->aclk)) {
ret = PTR_ERR(priv->aclk);
if (ret != -ENOENT) {
- dev_err(&pdev->dev, "Can't get 'adc' clock\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get 'adc' clock\n");
return ret;
}
priv->aclk = NULL;
@@ -706,7 +708,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (IS_ERR(priv->bclk)) {
ret = PTR_ERR(priv->bclk);
if (ret != -ENOENT) {
- dev_err(&pdev->dev, "Can't get 'bus' clock\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get 'bus' clock\n");
return ret;
}
priv->bclk = NULL;
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 2579d514c2a3..2322809bfd2f 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -51,10 +51,12 @@
#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_OVR BIT(5)
#define STM32F4_STRT BIT(4)
#define STM32F4_EOC BIT(1)
/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_OVRIE BIT(26)
#define STM32F4_RES_SHIFT 24
#define STM32F4_RES_MASK GENMASK(25, 24)
#define STM32F4_SCAN BIT(8)
@@ -72,8 +74,11 @@
#define STM32F4_ADON BIT(0)
/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_OVR3 BIT(21)
#define STM32F4_EOC3 BIT(17)
+#define STM32F4_OVR2 BIT(13)
#define STM32F4_EOC2 BIT(9)
+#define STM32F4_OVR1 BIT(5)
#define STM32F4_EOC1 BIT(1)
/* STM32F4_ADC_CCR - bit fields */
@@ -103,10 +108,12 @@
/* STM32H7_ADC_ISR - bit fields */
#define STM32MP1_VREGREADY BIT(12)
+#define STM32H7_OVR BIT(4)
#define STM32H7_EOC BIT(2)
#define STM32H7_ADRDY BIT(0)
/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_OVRIE STM32H7_OVR
#define STM32H7_EOCIE STM32H7_EOC
/* STM32H7_ADC_CR - bit fields */
@@ -155,7 +162,9 @@ enum stm32h7_adc_dmngt {
#define STM32H7_LINCALFACT_MASK GENMASK(29, 0)
/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_OVR_SLV BIT(20)
#define STM32H7_EOC_SLV BIT(18)
+#define STM32H7_OVR_MST BIT(4)
#define STM32H7_EOC_MST BIT(2)
/* STM32H7_ADC_CCR - bit fields */
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 3b291d72701c..80c3f963527b 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -117,7 +117,9 @@ struct stm32_adc_regs {
* struct stm32_adc_regspec - stm32 registers definition
* @dr: data register offset
* @ier_eoc: interrupt enable register & eocie bitfield
+ * @ier_ovr: interrupt enable register & overrun bitfield
* @isr_eoc: interrupt status register & eoc bitfield
+ * @isr_ovr: interrupt status register & overrun bitfield
* @sqr: reference to sequence registers array
* @exten: trigger control register & bitfield
* @extsel: trigger selection register & bitfield
@@ -128,7 +130,9 @@ struct stm32_adc_regs {
struct stm32_adc_regspec {
const u32 dr;
const struct stm32_adc_regs ier_eoc;
+ const struct stm32_adc_regs ier_ovr;
const struct stm32_adc_regs isr_eoc;
+ const struct stm32_adc_regs isr_ovr;
const struct stm32_adc_regs *sqr;
const struct stm32_adc_regs exten;
const struct stm32_adc_regs extsel;
@@ -337,7 +341,9 @@ static const unsigned int stm32f4_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
static const struct stm32_adc_regspec stm32f4_adc_regspec = {
.dr = STM32F4_ADC_DR,
.ier_eoc = { STM32F4_ADC_CR1, STM32F4_EOCIE },
+ .ier_ovr = { STM32F4_ADC_CR1, STM32F4_OVRIE },
.isr_eoc = { STM32F4_ADC_SR, STM32F4_EOC },
+ .isr_ovr = { STM32F4_ADC_SR, STM32F4_OVR },
.sqr = stm32f4_sq,
.exten = { STM32F4_ADC_CR2, STM32F4_EXTEN_MASK, STM32F4_EXTEN_SHIFT },
.extsel = { STM32F4_ADC_CR2, STM32F4_EXTSEL_MASK,
@@ -429,7 +435,9 @@ static const unsigned int stm32h7_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
static const struct stm32_adc_regspec stm32h7_adc_regspec = {
.dr = STM32H7_ADC_DR,
.ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE },
+ .ier_ovr = { STM32H7_ADC_IER, STM32H7_OVRIE },
.isr_eoc = { STM32H7_ADC_ISR, STM32H7_EOC },
+ .isr_ovr = { STM32H7_ADC_ISR, STM32H7_OVR },
.sqr = stm32h7_sq,
.exten = { STM32H7_ADC_CFGR, STM32H7_EXTEN_MASK, STM32H7_EXTEN_SHIFT },
.extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK,
@@ -506,6 +514,18 @@ static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
adc->cfg->regs->ier_eoc.mask);
}
+static void stm32_adc_ovr_irq_enable(struct stm32_adc *adc)
+{
+ stm32_adc_set_bits(adc, adc->cfg->regs->ier_ovr.reg,
+ adc->cfg->regs->ier_ovr.mask);
+}
+
+static void stm32_adc_ovr_irq_disable(struct stm32_adc *adc)
+{
+ stm32_adc_clr_bits(adc, adc->cfg->regs->ier_ovr.reg,
+ adc->cfg->regs->ier_ovr.mask);
+}
+
static void stm32_adc_set_res(struct stm32_adc *adc)
{
const struct stm32_adc_regs *res = &adc->cfg->regs->res;
@@ -1205,6 +1225,19 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
}
}
+static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
+{
+ struct stm32_adc *adc = data;
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ const struct stm32_adc_regspec *regs = adc->cfg->regs;
+ u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+
+ if (status & regs->isr_ovr.mask)
+ dev_err(&indio_dev->dev, "Overrun, stopping: restart needed\n");
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t stm32_adc_isr(int irq, void *data)
{
struct stm32_adc *adc = data;
@@ -1212,6 +1245,19 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
const struct stm32_adc_regspec *regs = adc->cfg->regs;
u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+ if (status & regs->isr_ovr.mask) {
+ /*
+ * Overrun occurred on regular conversions: data for wrong
+ * channel may be read. Unconditionally disable interrupts
+ * to stop processing data and print error message.
+ * Restarting the capture can be done by disabling, then
+ * re-enabling it (e.g. write 0, then 1 to buffer/enable).
+ */
+ stm32_adc_ovr_irq_disable(adc);
+ stm32_adc_conv_irq_disable(adc);
+ return IRQ_WAKE_THREAD;
+ }
+
if (status & regs->isr_eoc.mask) {
/* Reading DR also clears EOC status flag */
adc->buffer[adc->bufi] = stm32_adc_readw(adc, regs->dr);
@@ -1441,6 +1487,8 @@ static int __stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
/* Reset adc buffer index */
adc->bufi = 0;
+ stm32_adc_ovr_irq_enable(adc);
+
if (!adc->dma_chan)
stm32_adc_conv_irq_enable(adc);
@@ -1481,6 +1529,8 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
if (!adc->dma_chan)
stm32_adc_conv_irq_disable(adc);
+ stm32_adc_ovr_irq_disable(adc);
+
if (adc->dma_chan)
dmaengine_terminate_sync(adc->dma_chan);
@@ -1746,9 +1796,21 @@ static int stm32_adc_dma_request(struct iio_dev *indio_dev)
struct dma_slave_config config;
int ret;
- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
- if (!adc->dma_chan)
+ adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+ if (IS_ERR(adc->dma_chan)) {
+ ret = PTR_ERR(adc->dma_chan);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&indio_dev->dev,
+ "DMA channel request failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ /* DMA is optional: fall back to IRQ mode */
+ adc->dma_chan = NULL;
return 0;
+ }
adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
STM32_DMA_BUFFER_SIZE,
@@ -1818,8 +1880,9 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (adc->irq < 0)
return adc->irq;
- ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
- 0, pdev->name, adc);
+ ret = devm_request_threaded_irq(&pdev->dev, adc->irq, stm32_adc_isr,
+ stm32_adc_threaded_isr,
+ 0, pdev->name, adc);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ\n");
return ret;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index e493242c266e..2aad2cda6943 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1204,6 +1204,8 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
stm32_dfsdm_stop_conv(adc);
+ stm32_dfsdm_process_data(adc, res);
+
stop_dfsdm:
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
@@ -1219,14 +1221,32 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
unsigned int spi_freq;
int ret = -EINVAL;
+ switch (ch->src) {
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
+ spi_freq = adc->dfsdm->spi_master_freq;
+ break;
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
+ spi_freq = adc->dfsdm->spi_master_freq / 2;
+ break;
+ default:
+ spi_freq = adc->spi_freq;
+ }
+
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
+
ret = stm32_dfsdm_compute_all_osrs(indio_dev, val);
- if (!ret)
+ if (!ret) {
+ dev_dbg(&indio_dev->dev,
+ "Sampling rate changed from (%u) to (%u)\n",
+ adc->sample_freq, spi_freq / val);
adc->oversamp = val;
+ adc->sample_freq = spi_freq / val;
+ }
iio_device_release_direct_mode(indio_dev);
return ret;
@@ -1238,18 +1258,6 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- switch (ch->src) {
- case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
- spi_freq = adc->dfsdm->spi_master_freq;
- break;
- case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
- case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
- spi_freq = adc->dfsdm->spi_master_freq / 2;
- break;
- default:
- spi_freq = adc->spi_freq;
- }
-
ret = dfsdm_adc_set_samp_freq(indio_dev, val, spi_freq);
iio_device_release_direct_mode(indio_dev);
return ret;
@@ -1383,9 +1391,13 @@ static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
- if (!adc->dma_chan)
- return -EINVAL;
+ adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+ if (IS_ERR(adc->dma_chan)) {
+ int ret = PTR_ERR(adc->dma_chan);
+
+ adc->dma_chan = NULL;
+ return ret;
+ }
adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
DFSDM_DMA_BUFFER_SIZE,
@@ -1509,7 +1521,16 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
init_completion(&adc->completion);
/* Optionally request DMA */
- if (stm32_dfsdm_dma_request(indio_dev)) {
+ ret = stm32_dfsdm_dma_request(indio_dev);
+ if (ret) {
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&indio_dev->dev,
+ "DMA channel request failed with %d\n",
+ ret);
+ return ret;
+ }
+
dev_dbg(&indio_dev->dev, "No DMA support\n");
return 0;
}
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index a550b132cfb7..5ea4f45d6bad 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -12,17 +12,15 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/i2c.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <linux/platform_data/ads1015.h>
-
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/iio/sysfs.h>
@@ -33,6 +31,8 @@
#define ADS1015_DRV_NAME "ads1015"
+#define ADS1015_CHANNELS 8
+
#define ADS1015_CONV_REG 0x00
#define ADS1015_CFG_REG 0x01
#define ADS1015_LO_THRESH_REG 0x02
@@ -77,6 +77,7 @@
#define ADS1015_DEFAULT_CHAN 0
enum chip_ids {
+ ADSXXXX = 0,
ADS1015,
ADS1115,
};
@@ -219,6 +220,12 @@ static const struct iio_event_spec ads1015_events[] = {
.datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
}
+struct ads1015_channel_data {
+ bool enabled;
+ unsigned int pga;
+ unsigned int data_rate;
+};
+
struct ads1015_thresh_data {
unsigned int comp_queue;
int high_thresh;
@@ -837,65 +844,58 @@ static const struct iio_info ads1115_info = {
.attrs = &ads1115_attribute_group,
};
-#ifdef CONFIG_OF
-static int ads1015_get_channels_config_of(struct i2c_client *client)
+static int ads1015_client_get_channels_config(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ads1015_data *data = iio_priv(indio_dev);
- struct device_node *node;
+ struct device *dev = &client->dev;
+ struct fwnode_handle *node;
+ int i = -1;
- if (!client->dev.of_node ||
- !of_get_next_child(client->dev.of_node, NULL))
- return -EINVAL;
-
- for_each_child_of_node(client->dev.of_node, node) {
+ device_for_each_child_node(dev, node) {
u32 pval;
unsigned int channel;
unsigned int pga = ADS1015_DEFAULT_PGA;
unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
- if (of_property_read_u32(node, "reg", &pval)) {
- dev_err(&client->dev, "invalid reg on %pOF\n",
- node);
+ if (fwnode_property_read_u32(node, "reg", &pval)) {
+ dev_err(dev, "invalid reg on %pfw\n", node);
continue;
}
channel = pval;
if (channel >= ADS1015_CHANNELS) {
- dev_err(&client->dev,
- "invalid channel index %d on %pOF\n",
+ dev_err(dev, "invalid channel index %d on %pfw\n",
channel, node);
continue;
}
- if (!of_property_read_u32(node, "ti,gain", &pval)) {
+ if (!fwnode_property_read_u32(node, "ti,gain", &pval)) {
pga = pval;
if (pga > 6) {
- dev_err(&client->dev, "invalid gain on %pOF\n",
- node);
- of_node_put(node);
+ dev_err(dev, "invalid gain on %pfw\n", node);
+ fwnode_handle_put(node);
return -EINVAL;
}
}
- if (!of_property_read_u32(node, "ti,datarate", &pval)) {
+ if (!fwnode_property_read_u32(node, "ti,datarate", &pval)) {
data_rate = pval;
if (data_rate > 7) {
- dev_err(&client->dev,
- "invalid data_rate on %pOF\n",
- node);
- of_node_put(node);
+ dev_err(dev, "invalid data_rate on %pfw\n", node);
+ fwnode_handle_put(node);
return -EINVAL;
}
}
data->channel_data[channel].pga = pga;
data->channel_data[channel].data_rate = data_rate;
+
+ i++;
}
- return 0;
+ return i < 0 ? -EINVAL : 0;
}
-#endif
static void ads1015_get_channels_config(struct i2c_client *client)
{
@@ -903,19 +903,10 @@ static void ads1015_get_channels_config(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ads1015_data *data = iio_priv(indio_dev);
- struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
- /* prefer platform data */
- if (pdata) {
- memcpy(data->channel_data, pdata->channel_data,
- sizeof(data->channel_data));
+ if (!ads1015_client_get_channels_config(client))
return;
- }
-#ifdef CONFIG_OF
- if (!ads1015_get_channels_config_of(client))
- return;
-#endif
/* fallback on default configuration */
for (k = 0; k < ADS1015_CHANNELS; ++k) {
data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
@@ -953,9 +944,8 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->name = ADS1015_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- if (client->dev.of_node)
- chip = (enum chip_ids)of_device_get_match_data(&client->dev);
- else
+ chip = (enum chip_ids)device_get_match_data(&client->dev);
+ if (chip == ADSXXXX)
chip = id->driver_data;
switch (chip) {
case ADS1015:
@@ -970,6 +960,9 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->info = &ads1115_info;
data->data_rate = (unsigned int *) &ads1115_data_rate;
break;
+ default:
+ dev_err(&client->dev, "Unknown chip %d\n", chip);
+ return -EINVAL;
}
data->event_channel = ADS1015_CHANNELS;
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 2e66e4d586ff..f9edc1207f75 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -602,7 +602,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(st->reg)) {
- dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
+ dev_err(&spi->dev, "Failed to get regulator \"vref\"\n");
ret = PTR_ERR(st->reg);
goto error_destroy_mutex;
}
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 90cf6e586b10..a74bd9c0587c 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -476,7 +476,7 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
* @n: Number of bytes to read
* @user_buffer: Userspace buffer to copy the data to
*
- * Should be used as the read_first_n callback for iio_buffer_access_ops
+ * Should be used as the read callback for iio_buffer_access_ops
* struct for DMA buffers.
*/
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index bea4a75e92f1..b129693af0fd 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -10,8 +10,10 @@
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
@@ -107,7 +109,7 @@ static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
}
static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
- .read_first_n = iio_dma_buffer_read,
+ .read = iio_dma_buffer_read,
.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
.set_length = iio_dma_buffer_set_length,
.request_update = iio_dma_buffer_request_update,
@@ -125,6 +127,24 @@ static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
.abort = iio_dmaengine_buffer_abort,
};
+static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct dmaengine_buffer *dmaengine_buffer =
+ iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
+
+ return sprintf(buf, "%u\n", dmaengine_buffer->align);
+}
+
+static IIO_DEVICE_ATTR(length_align_bytes, 0444,
+ iio_dmaengine_buffer_get_length_align, NULL, 0);
+
+static const struct attribute *iio_dmaengine_buffer_attrs[] = {
+ &iio_dev_attr_length_align_bytes.dev_attr.attr,
+ NULL,
+};
+
/**
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
* @dev: Parent device for the buffer
@@ -150,7 +170,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
if (!dmaengine_buffer)
return ERR_PTR(-ENOMEM);
- chan = dma_request_slave_channel_reason(dev, channel);
+ chan = dma_request_chan(dev, channel);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
goto err_free;
@@ -178,6 +198,8 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
&iio_dmaengine_default_ops);
+ iio_buffer_set_attrs(&dmaengine_buffer->queue.buffer,
+ iio_dmaengine_buffer_attrs);
dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
@@ -206,3 +228,7 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
iio_buffer_put(buffer);
}
EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("DMA buffer for the IIO framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index e78fc0834e6b..3150f8ab984b 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -98,8 +98,7 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
return 0;
}
-static int iio_read_first_n_kfifo(struct iio_buffer *r,
- size_t n, char __user *buf)
+static int iio_read_kfifo(struct iio_buffer *r, size_t n, char __user *buf)
{
int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r);
@@ -141,7 +140,7 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
- .read_first_n = &iio_read_first_n_kfifo,
+ .read = &iio_read_kfifo,
.data_available = iio_kfifo_buf_data_available,
.request_update = &iio_request_update_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index fa4586037bb8..0b91de4df8f4 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -65,6 +65,7 @@ config IAQCORE
config PMS7003
tristate "Plantower PMS7003 particulate matter sensor"
depends on SERIAL_DEV_BUS
+ select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Plantower PMS7003 particulate
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index f97270bc4034..33d3a595dda9 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -4,7 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
-obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o
+obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-sensor.o
obj-$(CONFIG_BME680) += bme680_core.o
obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
obj-$(CONFIG_BME680_SPI) += bme680_spi.o
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 6c175eb1c7a7..2f0a6fed2589 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * atlas-ph-sensor.c - Support for Atlas Scientific OEM pH-SM sensor
+ * atlas-sensor.c - Support for Atlas Scientific OEM SM sensors
*
- * Copyright (C) 2015-2018 Matt Ranostay
+ * Copyright (C) 2015-2019 Konsulko Group
* Author: Matt Ranostay <matt.ranostay@konsulko.com>
*/
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/irq_work.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
@@ -25,8 +24,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/pm_runtime.h>
-#define ATLAS_REGMAP_NAME "atlas_ph_regmap"
-#define ATLAS_DRV_NAME "atlas_ph"
+#define ATLAS_REGMAP_NAME "atlas_regmap"
+#define ATLAS_DRV_NAME "atlas"
#define ATLAS_REG_DEV_TYPE 0x00
#define ATLAS_REG_DEV_VERSION 0x01
@@ -87,6 +86,16 @@ static const struct regmap_config atlas_regmap_config = {
.val_bits = 8,
};
+static int atlas_buffer_num_channels(const struct iio_chan_spec *spec)
+{
+ int idx = 0;
+
+ for (; spec->type != IIO_TIMESTAMP; spec++)
+ idx++;
+
+ return idx;
+};
+
static const struct iio_chan_spec atlas_ph_channels[] = {
{
.type = IIO_PH,
@@ -355,11 +364,12 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
struct iio_poll_func *pf = private;
struct iio_dev *indio_dev = pf->indio_dev;
struct atlas_data *data = iio_priv(indio_dev);
+ int channels = atlas_buffer_num_channels(data->chip->channels);
int ret;
ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
(u8 *) &data->buffer,
- sizeof(__be32) * (data->chip->num_channels - 2));
+ sizeof(__be32) * channels);
if (!ret)
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
@@ -681,5 +691,5 @@ static struct i2c_driver atlas_driver = {
module_i2c_driver(atlas_driver);
MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
-MODULE_DESCRIPTION("Atlas Scientific pH-SM sensor");
+MODULE_DESCRIPTION("Atlas Scientific SM sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/ssp_sensors/ssp.h b/drivers/iio/common/ssp_sensors/ssp.h
index 0a381bb1ae6f..abb832795619 100644
--- a/drivers/iio/common/ssp_sensors/ssp.h
+++ b/drivers/iio/common/ssp_sensors/ssp.h
@@ -7,7 +7,7 @@
#define __SSP_SENSORHUB_H__
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/iio/common/ssp_sensors.h>
#include <linux/iio/iio.h>
#include <linux/spi/spi.h>
@@ -168,9 +168,9 @@ struct ssp_sensorhub_info {
* @fw_dl_state: firmware download state
* @comm_lock: lock protecting the handshake
* @pending_lock: lock protecting pending list and completion
- * @mcu_reset_gpio: mcu reset line
- * @ap_mcu_gpio: ap to mcu gpio line
- * @mcu_ap_gpio: mcu to ap gpio line
+ * @mcu_reset_gpiod: mcu reset line
+ * @ap_mcu_gpiod: ap to mcu gpio line
+ * @mcu_ap_gpiod: mcu to ap gpio line
* @pending_list: pending list for messages queued to be sent/read
* @sensor_devs: registered IIO devices table
* @enable_refcount: enable reference count for wdt (watchdog timer)
@@ -212,9 +212,9 @@ struct ssp_data {
struct mutex comm_lock;
struct mutex pending_lock;
- int mcu_reset_gpio;
- int ap_mcu_gpio;
- int mcu_ap_gpio;
+ struct gpio_desc *mcu_reset_gpiod;
+ struct gpio_desc *ap_mcu_gpiod;
+ struct gpio_desc *mcu_ap_gpiod;
struct list_head pending_list;
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 9c70553994c6..a94dbcf491ce 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -9,7 +9,6 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include "ssp.h"
@@ -61,9 +60,9 @@ static const struct mfd_cell sensorhub_sensor_devs[] = {
static void ssp_toggle_mcu_reset_gpio(struct ssp_data *data)
{
- gpio_set_value(data->mcu_reset_gpio, 0);
+ gpiod_set_value(data->mcu_reset_gpiod, 0);
usleep_range(1000, 1200);
- gpio_set_value(data->mcu_reset_gpio, 1);
+ gpiod_set_value(data->mcu_reset_gpiod, 1);
msleep(50);
}
@@ -441,7 +440,6 @@ MODULE_DEVICE_TABLE(of, ssp_of_match);
static struct ssp_data *ssp_parse_dt(struct device *dev)
{
- int ret;
struct ssp_data *data;
struct device_node *node = dev->of_node;
const struct of_device_id *match;
@@ -450,26 +448,17 @@ static struct ssp_data *ssp_parse_dt(struct device *dev)
if (!data)
return NULL;
- data->mcu_ap_gpio = of_get_named_gpio(node, "mcu-ap-gpios", 0);
- if (data->mcu_ap_gpio < 0)
- return NULL;
-
- data->ap_mcu_gpio = of_get_named_gpio(node, "ap-mcu-gpios", 0);
- if (data->ap_mcu_gpio < 0)
- return NULL;
-
- data->mcu_reset_gpio = of_get_named_gpio(node, "mcu-reset-gpios", 0);
- if (data->mcu_reset_gpio < 0)
+ data->mcu_ap_gpiod = devm_gpiod_get(dev, "mcu-ap", GPIOD_IN);
+ if (IS_ERR(data->mcu_ap_gpiod))
return NULL;
- ret = devm_gpio_request_one(dev, data->ap_mcu_gpio, GPIOF_OUT_INIT_HIGH,
- "ap-mcu-gpios");
- if (ret)
+ data->ap_mcu_gpiod = devm_gpiod_get(dev, "ap-mcu", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->ap_mcu_gpiod))
return NULL;
- ret = devm_gpio_request_one(dev, data->mcu_reset_gpio,
- GPIOF_OUT_INIT_HIGH, "mcu-reset-gpios");
- if (ret)
+ data->mcu_reset_gpiod = devm_gpiod_get(dev, "mcu-reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(data->mcu_reset_gpiod))
return NULL;
match = of_match_node(ssp_of_match, node);
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
index 7db3d5886e3e..4864c38b8d1c 100644
--- a/drivers/iio/common/ssp_sensors/ssp_spi.c
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -155,9 +155,9 @@ static int ssp_check_lines(struct ssp_data *data, bool state)
{
int delay_cnt = 0;
- gpio_set_value_cansleep(data->ap_mcu_gpio, state);
+ gpiod_set_value_cansleep(data->ap_mcu_gpiod, state);
- while (gpio_get_value_cansleep(data->mcu_ap_gpio) != state) {
+ while (gpiod_get_value_cansleep(data->mcu_ap_gpiod) != state) {
usleep_range(3000, 3500);
if (data->shut_down || delay_cnt++ > 500) {
@@ -165,7 +165,7 @@ static int ssp_check_lines(struct ssp_data *data, bool state)
__func__, state);
if (!state)
- gpio_set_value_cansleep(data->ap_mcu_gpio, 1);
+ gpiod_set_value_cansleep(data->ap_mcu_gpiod, 1);
return -ETIMEDOUT;
}
@@ -197,7 +197,7 @@ static int ssp_do_transfer(struct ssp_data *data, struct ssp_msg *msg,
status = spi_write(data->spi, msg->buffer, SSP_HEADER_SIZE);
if (status < 0) {
- gpio_set_value_cansleep(data->ap_mcu_gpio, 1);
+ gpiod_set_value_cansleep(data->ap_mcu_gpiod, 1);
dev_err(SSP_DEV, "%s spi_write fail\n", __func__);
goto _error_locked;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 4a3064fb6cd9..e051edbc43c1 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -12,9 +12,8 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <asm/unaligned.h>
#include <linux/iio/common/st_sensors.h>
@@ -319,63 +318,49 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
return 0;
}
-#ifdef CONFIG_OF
-static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
+static struct st_sensors_platform_data *st_sensors_dev_probe(struct device *dev,
struct st_sensors_platform_data *defdata)
{
struct st_sensors_platform_data *pdata;
- struct device_node *np = dev->of_node;
u32 val;
- if (!np)
+ if (!dev_fwnode(dev))
return NULL;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!of_property_read_u32(np, "st,drdy-int-pin", &val) && (val <= 2))
+ if (!device_property_read_u32(dev, "st,drdy-int-pin", &val) && (val <= 2))
pdata->drdy_int_pin = (u8) val;
else
pdata->drdy_int_pin = defdata ? defdata->drdy_int_pin : 0;
- pdata->open_drain = of_property_read_bool(np, "drive-open-drain");
+ pdata->open_drain = device_property_read_bool(dev, "drive-open-drain");
return pdata;
}
/**
- * st_sensors_of_name_probe() - device tree probe for ST sensor name
+ * st_sensors_dev_name_probe() - device probe for ST sensor name
* @dev: driver model representation of the device.
- * @match: the OF match table for the device, containing compatible strings
- * but also a .data field with the corresponding internal kernel name
- * used by this sensor.
* @name: device name buffer reference.
* @len: device name buffer length.
*
- * In effect this function matches a compatible string to an internal kernel
+ * In effect this function matches an ID to an internal kernel
* name for a certain sensor device, so that the rest of the autodetection can
* rely on that name from this point on. I2C/SPI devices will be renamed
* to match the internal kernel convention.
*/
-void st_sensors_of_name_probe(struct device *dev,
- const struct of_device_id *match,
- char *name, int len)
+void st_sensors_dev_name_probe(struct device *dev, char *name, int len)
{
- const struct of_device_id *of_id;
+ const void *match;
- of_id = of_match_device(match, dev);
- if (!of_id || !of_id->data)
+ match = device_get_match_data(dev);
+ if (!match)
return;
- /* The name from the OF match takes precedence if present */
- strlcpy(name, of_id->data, len);
+ /* The name from the match takes precedence if present */
+ strlcpy(name, match, len);
}
-EXPORT_SYMBOL(st_sensors_of_name_probe);
-#else
-static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
- struct st_sensors_platform_data *defdata)
-{
- return NULL;
-}
-#endif
+EXPORT_SYMBOL(st_sensors_dev_name_probe);
int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
@@ -385,7 +370,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
int err = 0;
/* If OF/DT pdata exists, it will take precedence of anything else */
- of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
+ of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
if (of_pdata)
pdata = of_pdata;
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index aa89d54a7c59..286830fb5d35 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -11,8 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
-#include <linux/of_device.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include <linux/iio/common/st_sensors_i2c.h>
@@ -68,25 +66,6 @@ int st_sensors_i2c_configure(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL(st_sensors_i2c_configure);
-#ifdef CONFIG_ACPI
-int st_sensors_match_acpi_device(struct device *dev)
-{
- const struct acpi_device_id *acpi_id;
- kernel_ulong_t driver_data = 0;
-
- if (ACPI_HANDLE(dev)) {
- acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!acpi_id) {
- dev_err(dev, "No driver data\n");
- return -EINVAL;
- }
- driver_data = acpi_id->driver_data;
- }
- return driver_data;
-}
-EXPORT_SYMBOL(st_sensors_match_acpi_device);
-#endif
-
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics ST-sensors i2c driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 2262f81b07c2..1275fb0eda31 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/iio/common/st_sensors_spi.h>
@@ -37,14 +38,15 @@ static const struct regmap_config st_sensors_spi_regmap_multiread_bit_config = {
*/
static bool st_sensors_is_spi_3_wire(struct spi_device *spi)
{
- struct device_node *np = spi->dev.of_node;
struct st_sensors_platform_data *pdata;
+ struct device *dev = &spi->dev;
- pdata = (struct st_sensors_platform_data *)spi->dev.platform_data;
- if ((np && of_property_read_bool(np, "spi-3wire")) ||
- (pdata && pdata->spi_3wire)) {
+ if (device_property_read_bool(dev, "spi-3wire"))
+ return true;
+
+ pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ if (pdata && pdata->spi_3wire)
return true;
- }
return false;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 4a2efa00f7f2..e817537cdfb5 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -19,6 +19,9 @@
/**
* st_sensors_new_samples_available() - check if more samples came in
+ * @indio_dev: IIO device reference.
+ * @sdata: Sensor data.
+ *
* returns:
* 0 - no new samples available
* 1 - new samples available
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 2d897e64c6a9..e2110113e884 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -15,7 +15,6 @@
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
-#include <linux/gpio.h>
#include <linux/property.h>
#include <dt-bindings/iio/adi,ad5592r.h>
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 14bbac6bee98..15af8a1cce3e 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
-#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -202,7 +201,6 @@ static int ad7303_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
struct iio_dev *indio_dev;
struct ad7303_state *st;
- bool ext_ref;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -224,24 +222,15 @@ static int ad7303_probe(struct spi_device *spi)
if (ret)
return ret;
- if (spi->dev.of_node) {
- ext_ref = of_property_read_bool(spi->dev.of_node,
- "REF-supply");
- } else {
- struct ad7303_platform_data *pdata = spi->dev.platform_data;
- if (pdata && pdata->use_external_ref)
- ext_ref = true;
- else
- ext_ref = false;
- }
-
- if (ext_ref) {
- st->vref_reg = devm_regulator_get(&spi->dev, "REF");
- if (IS_ERR(st->vref_reg)) {
- ret = PTR_ERR(st->vref_reg);
+ st->vref_reg = devm_regulator_get_optional(&spi->dev, "REF");
+ if (IS_ERR(st->vref_reg)) {
+ ret = PTR_ERR(st->vref_reg);
+ if (ret != -ENODEV)
goto err_disable_vdd_reg;
- }
+ st->vref_reg = NULL;
+ }
+ if (st->vref_reg) {
ret = regulator_enable(st->vref_reg);
if (ret)
goto err_disable_vdd_reg;
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 9e6b4cd0a5cc..7e5809ba0dee 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -20,13 +20,11 @@
/**
* struct stm32_dac_priv - stm32 DAC core private data
* @pclk: peripheral clock common for all DACs
- * @rst: peripheral reset control
* @vref: regulator reference
* @common: Common data for all DAC instances
*/
struct stm32_dac_priv {
struct clk *pclk;
- struct reset_control *rst;
struct regulator *vref;
struct stm32_dac_common common;
};
@@ -94,6 +92,7 @@ static int stm32_dac_probe(struct platform_device *pdev)
struct regmap *regmap;
struct resource *res;
void __iomem *mmio;
+ struct reset_control *rst;
int ret;
if (!dev->of_node)
@@ -148,11 +147,19 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv->common.vref_mv = ret / 1000;
dev_dbg(dev, "vref+=%dmV\n", priv->common.vref_mv);
- priv->rst = devm_reset_control_get_exclusive(dev, NULL);
- if (!IS_ERR(priv->rst)) {
- reset_control_assert(priv->rst);
+ rst = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (rst) {
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "reset get failed, %d\n", ret);
+
+ goto err_hw_stop;
+ }
+
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(priv->rst);
+ reset_control_deassert(rst);
}
if (cfg && cfg->has_hfsel) {
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index ae0ca09ae062..1c2dc9b00f31 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -14,11 +14,10 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/gcd.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/div64.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -34,6 +33,7 @@ enum {
struct adf4350_state {
struct spi_device *spi;
struct regulator *reg;
+ struct gpio_desc *lock_detect_gpiod;
struct adf4350_platform_data *pdata;
struct clk *clk;
unsigned long clkin;
@@ -61,7 +61,6 @@ static struct adf4350_platform_data default_pdata = {
.r3_user_settings = ADF4350_REG3_12BIT_CLKDIV_MODE(0),
.r4_user_settings = ADF4350_REG4_OUTPUT_PWR(3) |
ADF4350_REG4_MUTE_TILL_LOCK_EN,
- .gpio_lock_detect = -1,
};
static int adf4350_sync_config(struct adf4350_state *st)
@@ -317,8 +316,8 @@ static ssize_t adf4350_read(struct iio_dev *indio_dev,
(u64)st->fpfd;
do_div(val, st->r1_mod * (1 << st->r4_rf_div_sel));
/* PLL unlocked? return error */
- if (gpio_is_valid(st->pdata->gpio_lock_detect))
- if (!gpio_get_value(st->pdata->gpio_lock_detect)) {
+ if (st->lock_detect_gpiod)
+ if (!gpiod_get_value(st->lock_detect_gpiod)) {
dev_dbg(&st->spi->dev, "PLL un-locked\n");
ret = -EBUSY;
}
@@ -381,7 +380,6 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
struct adf4350_platform_data *pdata;
unsigned int tmp;
- int ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -401,12 +399,6 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
of_property_read_u32(np, "adi,reference-div-factor", &tmp);
pdata->ref_div_factor = tmp;
- ret = of_get_gpio(np, 0);
- if (ret < 0)
- pdata->gpio_lock_detect = -1;
- else
- pdata->gpio_lock_detect = ret;
-
pdata->ref_doubler_en = of_property_read_bool(np,
"adi,reference-doubler-enable");
pdata->ref_div2_en = of_property_read_bool(np,
@@ -561,16 +553,10 @@ static int adf4350_probe(struct spi_device *spi)
memset(st->regs_hw, 0xFF, sizeof(st->regs_hw));
- if (gpio_is_valid(pdata->gpio_lock_detect)) {
- ret = devm_gpio_request(&spi->dev, pdata->gpio_lock_detect,
- indio_dev->name);
- if (ret) {
- dev_err(&spi->dev, "fail to request lock detect GPIO-%d",
- pdata->gpio_lock_detect);
- goto error_disable_reg;
- }
- gpio_direction_input(pdata->gpio_lock_detect);
- }
+ st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+ GPIOD_IN);
+ if (IS_ERR(st->lock_detect_gpiod))
+ return PTR_ERR(st->lock_detect_gpiod);
if (pdata->power_up_frequency) {
ret = adf4350_set_freq(st, pdata->power_up_frequency);
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 95e6f96d4529..7eaf77707b0b 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -75,26 +75,26 @@ config BMG160_SPI
select REGMAP_SPI
config FXAS21002C
- tristate "NXP FXAS21002C Gyro Sensor"
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- select FXAS21002C_I2C if (I2C)
- select FXAS21002C_SPI if (SPI)
- depends on (I2C || SPI_MASTER)
- help
- Say yes here to build support for NXP FXAS21002C Tri-axis Gyro
- Sensor driver connected via I2C or SPI.
-
- This driver can also be built as a module. If so, the module
- will be called fxas21002c_i2c or fxas21002c_spi.
+ tristate "NXP FXAS21002C Gyro Sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select FXAS21002C_I2C if (I2C)
+ select FXAS21002C_SPI if (SPI)
+ depends on (I2C || SPI_MASTER)
+ help
+ Say yes here to build support for NXP FXAS21002C Tri-axis Gyro
+ Sensor driver connected via I2C or SPI.
+
+ This driver can also be built as a module. If so, the module
+ will be called fxas21002c_i2c or fxas21002c_spi.
config FXAS21002C_I2C
- tristate
- select REGMAP_I2C
+ tristate
+ select REGMAP_I2C
config FXAS21002C_SPI
- tristate
- select REGMAP_SPI
+ tristate
+ select REGMAP_SPI
config HID_SENSOR_GYRO_3D
depends on HID_SENSOR_HUB
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index d637d52d051a..d5e03a406d4a 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -59,6 +59,7 @@
struct adis16136_chip_info {
unsigned int precision;
unsigned int fullscale;
+ const struct adis_timeout *timeouts;
};
struct adis16136 {
@@ -185,12 +186,12 @@ static int adis16136_set_freq(struct adis16136 *adis16136, unsigned int freq)
return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, t);
}
-static int adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
+static int __adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
{
uint16_t t;
int ret;
- ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
+ ret = __adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
if (ret)
return ret;
@@ -224,10 +225,13 @@ static ssize_t adis16136_read_frequency(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
int ret;
- ret = adis16136_get_freq(adis16136, &freq);
+ mutex_lock(slock);
+ ret = __adis16136_get_freq(adis16136, &freq);
+ mutex_unlock(slock);
if (ret)
return ret;
@@ -252,42 +256,50 @@ static const unsigned adis16136_3db_divisors[] = {
static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
int i, ret;
- ret = adis16136_get_freq(adis16136, &freq);
+ mutex_lock(slock);
+ ret = __adis16136_get_freq(adis16136, &freq);
if (ret)
- return ret;
+ goto out_unlock;
for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
if (freq / adis16136_3db_divisors[i] >= val)
break;
}
- return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
+ ret = __adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
+out_unlock:
+ mutex_unlock(slock);
+
+ return ret;
}
static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
uint16_t val16;
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(slock);
- ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, &val16);
+ ret = __adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT,
+ &val16);
if (ret)
goto err_unlock;
- ret = adis16136_get_freq(adis16136, &freq);
+ ret = __adis16136_get_freq(adis16136, &freq);
if (ret)
goto err_unlock;
*val = freq / adis16136_3db_divisors[val16 & 0x07];
err_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return ret ? ret : IIO_VAL_INT;
}
@@ -460,7 +472,6 @@ static const struct adis_data adis16136_data = {
.msc_ctrl_reg = ADIS16136_REG_MSC_CTRL,
.self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST,
- .startup_delay = 80,
.read_delay = 10,
.write_delay = 10,
@@ -479,30 +490,63 @@ enum adis16136_id {
ID_ADIS16137,
};
+static const struct adis_timeout adis16133_timeouts = {
+ .reset_ms = 75,
+ .sw_reset_ms = 75,
+ .self_test_ms = 50,
+};
+
+static const struct adis_timeout adis16136_timeouts = {
+ .reset_ms = 128,
+ .sw_reset_ms = 75,
+ .self_test_ms = 245,
+};
+
static const struct adis16136_chip_info adis16136_chip_info[] = {
[ID_ADIS16133] = {
.precision = IIO_DEGREE_TO_RAD(1200),
.fullscale = 24000,
+ .timeouts = &adis16133_timeouts,
},
[ID_ADIS16135] = {
.precision = IIO_DEGREE_TO_RAD(300),
.fullscale = 24000,
+ .timeouts = &adis16133_timeouts,
},
[ID_ADIS16136] = {
.precision = IIO_DEGREE_TO_RAD(450),
.fullscale = 24623,
+ .timeouts = &adis16136_timeouts,
},
[ID_ADIS16137] = {
.precision = IIO_DEGREE_TO_RAD(1000),
.fullscale = 24609,
+ .timeouts = &adis16136_timeouts,
},
};
+static struct adis_data *adis16136_adis_data_alloc(struct adis16136 *st,
+ struct device *dev)
+{
+ struct adis_data *data;
+
+ data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(data, &adis16136_data, sizeof(*data));
+
+ data->timeouts = st->chip_info->timeouts;
+
+ return data;
+}
+
static int adis16136_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct adis16136 *adis16136;
struct iio_dev *indio_dev;
+ const struct adis_data *adis16136_data;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis16136));
@@ -521,7 +565,11 @@ static int adis16136_probe(struct spi_device *spi)
indio_dev->info = &adis16136_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis_init(&adis16136->adis, indio_dev, spi, &adis16136_data);
+ adis16136_data = adis16136_adis_data_alloc(adis16136, &spi->dev);
+ if (IS_ERR(adis16136_data))
+ return PTR_ERR(adis16136_data);
+
+ ret = adis_init(&adis16136->adis, indio_dev, spi, adis16136_data);
if (ret)
return ret;
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 207a0ce13439..be09b3e5910c 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -293,7 +293,7 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
addr = adis16260_addresses[chan->scan_index][1];
return adis_write_reg_16(adis, addr, val);
case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&adis->state_lock);
if (spi_get_device_id(adis->spi)->driver_data)
t = 256 / val;
else
@@ -308,9 +308,9 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
adis->spi->max_speed_hz = ADIS16260_SPI_SLOW;
else
adis->spi->max_speed_hz = ADIS16260_SPI_FAST;
- ret = adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
+ ret = __adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&adis->state_lock);
return ret;
}
return -EINVAL;
@@ -332,6 +332,12 @@ static const char * const adis1620_status_error_msgs[] = {
[ADIS16260_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 4.75",
};
+static const struct adis_timeout adis16260_timeouts = {
+ .reset_ms = ADIS16260_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16260_STARTUP_DELAY,
+ .self_test_ms = ADIS16260_STARTUP_DELAY,
+};
+
static const struct adis_data adis16260_data = {
.write_delay = 30,
.read_delay = 30,
@@ -340,7 +346,7 @@ static const struct adis_data adis16260_data = {
.diag_stat_reg = ADIS16260_DIAG_STAT,
.self_test_mask = ADIS16260_MSC_CTRL_MEM_TEST,
- .startup_delay = ADIS16260_STARTUP_DELAY,
+ .timeouts = &adis16260_timeouts,
.status_error_msgs = adis1620_status_error_msgs,
.status_error_mask = BIT(ADIS16260_DIAG_STAT_FLASH_CHK_BIT) |
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 981ae2291505..b3afa556f973 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -15,7 +15,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index 592f6b34e987..fd9171cc3aba 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -28,7 +28,7 @@
* struct st_sensors_platform_data - gyro platform data
* @drdy_int_pin: DRDY on gyros is available only on INT2 pin.
*/
-static const struct st_sensors_platform_data gyro_pdata = {
+static __maybe_unused const struct st_sensors_platform_data gyro_pdata = {
.drdy_int_pin = 2,
};
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 57be68b291fa..26c50b24bc08 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -138,7 +138,6 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
[2] = LSM330DLC_GYRO_DEV_NAME,
[3] = L3G4IS_GYRO_DEV_NAME,
[4] = LSM330_GYRO_DEV_NAME,
- [5] = LSM9DS0_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
@@ -209,6 +208,80 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.bootime = 2,
},
{
+ .wai = 0xd4,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LSM9DS0_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = GENMASK(7, 6),
+ .odr_avl = {
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = BIT(3),
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = GENMASK(5, 4),
+ .fs_avl = {
+ [0] = {
+ .num = ST_GYRO_FS_AVL_245DPS,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
+ },
+ [1] = {
+ .num = ST_GYRO_FS_AVL_500DPS,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
+ },
+ [2] = {
+ .num = ST_GYRO_FS_AVL_2000DPS,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x23,
+ .mask = BIT(7),
+ },
+ .drdy_irq = {
+ .int2 = {
+ .addr = 0x22,
+ .mask = BIT(3),
+ },
+ /*
+ * The sensor has IHL (active low) and open
+ * drain settings, but only for INT1 and not
+ * for the DRDY line on INT2.
+ */
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = GENMASK(2, 0),
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+ {
.wai = 0xd7,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 05a1a0874bd5..8190966e6ff0 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_gyro.h"
-#ifdef CONFIG_OF
static const struct of_device_id st_gyro_of_match[] = {
{
.compatible = "st,l3g4200d-gyro",
@@ -58,9 +57,6 @@ static const struct of_device_id st_gyro_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_gyro_of_match);
-#else
-#define st_gyro_of_match NULL
-#endif
static int st_gyro_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -70,8 +66,7 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&client->dev, st_gyro_of_match,
- client->name, sizeof(client->name));
+ st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
settings = st_gyro_get_settings(client->name);
if (!settings) {
@@ -122,7 +117,7 @@ MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
static struct i2c_driver st_gyro_driver = {
.driver = {
.name = "st-gyro-i2c",
- .of_match_table = of_match_ptr(st_gyro_of_match),
+ .of_match_table = st_gyro_of_match,
},
.probe = st_gyro_i2c_probe,
.remove = st_gyro_i2c_remove,
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index b5c624251231..efb862763ca3 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_gyro.h"
-#ifdef CONFIG_OF
/*
* For new single-chip sensors use <device_name> as compatible string.
* For old single-chip devices keep <device_name>-gyro to maintain
@@ -63,9 +62,6 @@ static const struct of_device_id st_gyro_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_gyro_of_match);
-#else
-#define st_gyro_of_match NULL
-#endif
static int st_gyro_spi_probe(struct spi_device *spi)
{
@@ -74,8 +70,7 @@ static int st_gyro_spi_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&spi->dev, st_gyro_of_match,
- spi->modalias, sizeof(spi->modalias));
+ st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
settings = st_gyro_get_settings(spi->modalias);
if (!settings) {
@@ -126,7 +121,7 @@ MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
static struct spi_driver st_gyro_driver = {
.driver = {
.name = "st-gyro-spi",
- .of_match_table = of_match_ptr(st_gyro_of_match),
+ .of_match_table = st_gyro_of_match,
},
.probe = st_gyro_spi_probe,
.remove = st_gyro_spi_remove,
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index b459600e1a33..d05c6fdb758b 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -174,7 +174,6 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
struct iio_dev *iio = data;
struct dht11 *dht11 = iio_priv(iio);
- /* TODO: Consider making the handler safe for IRQ sharing */
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
dht11->edges[dht11->num_edges].ts = ktime_get_boottime_ns();
dht11->edges[dht11->num_edges++].value =
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index 4922444771c6..9003671f14fb 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -24,13 +24,6 @@
#define HTS221_REG_CNTRL1_ADDR 0x20
#define HTS221_REG_CNTRL2_ADDR 0x21
-#define HTS221_REG_AVG_ADDR 0x10
-#define HTS221_REG_H_OUT_L 0x28
-#define HTS221_REG_T_OUT_L 0x2a
-
-#define HTS221_HUMIDITY_AVG_MASK 0x07
-#define HTS221_TEMP_AVG_MASK 0x38
-
#define HTS221_ODR_MASK 0x03
#define HTS221_BDU_MASK BIT(2)
#define HTS221_ENABLE_MASK BIT(7)
@@ -66,8 +59,8 @@ static const struct hts221_odr hts221_odr_table[] = {
static const struct hts221_avg hts221_avg_list[] = {
{
- .addr = HTS221_REG_AVG_ADDR,
- .mask = HTS221_HUMIDITY_AVG_MASK,
+ .addr = 0x10,
+ .mask = 0x07,
.avg_avl = {
4, /* 0.4 %RH */
8, /* 0.3 %RH */
@@ -80,8 +73,8 @@ static const struct hts221_avg hts221_avg_list[] = {
},
},
{
- .addr = HTS221_REG_AVG_ADDR,
- .mask = HTS221_TEMP_AVG_MASK,
+ .addr = 0x10,
+ .mask = 0x38,
.avg_avl = {
2, /* 0.08 degC */
4, /* 0.05 degC */
@@ -98,7 +91,7 @@ static const struct hts221_avg hts221_avg_list[] = {
static const struct iio_chan_spec hts221_channels[] = {
{
.type = IIO_HUMIDITYRELATIVE,
- .address = HTS221_REG_H_OUT_L,
+ .address = 0x28,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
@@ -114,7 +107,7 @@ static const struct iio_chan_spec hts221_channels[] = {
},
{
.type = IIO_TEMP,
- .address = HTS221_REG_T_OUT_L,
+ .address = 0x2a,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 159ea3f8c02b..fd9a5f1d5e51 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -42,14 +42,14 @@ struct poll_table_struct;
__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
-ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
- size_t n, loff_t *f_ps);
+ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps);
int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev);
void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev);
#define iio_buffer_poll_addr (&iio_buffer_poll)
-#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
+#define iio_buffer_read_outer_addr (&iio_buffer_read_outer)
void iio_disable_all_buffers(struct iio_dev *indio_dev);
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
@@ -57,7 +57,7 @@ void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
#else
#define iio_buffer_poll_addr NULL
-#define iio_buffer_read_first_n_outer_addr NULL
+#define iio_buffer_read_outer_addr NULL
static inline int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index e14c8536fd09..022bb54fb748 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -26,7 +26,14 @@
#define ADIS_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
#define ADIS_GLOB_CMD_SW_RESET BIT(7)
-int adis_write_reg(struct adis *adis, unsigned int reg,
+/**
+ * __adis_write_reg() - write N bytes to register (unlocked version)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: The value to write to device (up to 4 bytes)
+ * @size: The size of the @value (in bytes)
+ */
+int __adis_write_reg(struct adis *adis, unsigned int reg,
unsigned int value, unsigned int size)
{
unsigned int page = reg / ADIS_PAGE_SIZE;
@@ -38,7 +45,8 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
@@ -46,7 +54,8 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
@@ -54,24 +63,25 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 6,
.bits_per_word = 8,
.len = 2,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 8,
.bits_per_word = 8,
.len = 2,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
},
};
- mutex_lock(&adis->txrx_lock);
-
spi_message_init(&msg);
if (adis->current_page != page) {
@@ -96,8 +106,7 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
adis->tx[3] = value & 0xff;
break;
default:
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
xfers[size].cs_change = 0;
@@ -113,20 +122,18 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
adis->current_page = page;
}
-out_unlock:
- mutex_unlock(&adis->txrx_lock);
-
return ret;
}
-EXPORT_SYMBOL_GPL(adis_write_reg);
+EXPORT_SYMBOL_GPL(__adis_write_reg);
/**
- * adis_read_reg() - read 2 bytes from a 16-bit register
+ * __adis_read_reg() - read N bytes from register (unlocked version)
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ * @size: The size of the @val buffer
*/
-int adis_read_reg(struct adis *adis, unsigned int reg,
+int __adis_read_reg(struct adis *adis, unsigned int reg,
unsigned int *val, unsigned int size)
{
unsigned int page = reg / ADIS_PAGE_SIZE;
@@ -138,7 +145,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
@@ -146,7 +154,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->read_delay,
+ .delay.value = adis->data->read_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
@@ -155,18 +164,19 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->read_delay,
+ .delay.value = adis->data->read_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.rx_buf = adis->rx + 2,
.bits_per_word = 8,
.len = 2,
- .delay_usecs = adis->data->read_delay,
+ .delay.value = adis->data->read_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
},
};
- mutex_lock(&adis->txrx_lock);
spi_message_init(&msg);
if (adis->current_page != page) {
@@ -188,15 +198,14 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
spi_message_add_tail(&xfers[3], &msg);
break;
default:
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
ret = spi_sync(adis->spi, &msg);
if (ret) {
dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n",
reg, ret);
- goto out_unlock;
+ return ret;
} else {
adis->current_page = page;
}
@@ -210,12 +219,9 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
break;
}
-out_unlock:
- mutex_unlock(&adis->txrx_lock);
-
return ret;
}
-EXPORT_SYMBOL_GPL(adis_read_reg);
+EXPORT_SYMBOL_GPL(__adis_read_reg);
#ifdef CONFIG_DEBUG_FS
@@ -253,12 +259,16 @@ int adis_enable_irq(struct adis *adis, bool enable)
int ret = 0;
uint16_t msc;
- if (adis->data->enable_irq)
- return adis->data->enable_irq(adis, enable);
+ mutex_lock(&adis->state_lock);
+
+ if (adis->data->enable_irq) {
+ ret = adis->data->enable_irq(adis, enable);
+ goto out_unlock;
+ }
- ret = adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
+ ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
if (ret)
- goto error_ret;
+ goto out_unlock;
msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH;
msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2;
@@ -267,26 +277,27 @@ int adis_enable_irq(struct adis *adis, bool enable)
else
msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN;
- ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+ ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
-error_ret:
+out_unlock:
+ mutex_unlock(&adis->state_lock);
return ret;
}
EXPORT_SYMBOL(adis_enable_irq);
/**
- * adis_check_status() - Check the device for error conditions
+ * __adis_check_status() - Check the device for error conditions (unlocked)
* @adis: The adis device
*
* Returns 0 on success, a negative error code otherwise
*/
-int adis_check_status(struct adis *adis)
+int __adis_check_status(struct adis *adis)
{
uint16_t status;
int ret;
int i;
- ret = adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
+ ret = __adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
if (ret)
return ret;
@@ -304,32 +315,38 @@ int adis_check_status(struct adis *adis)
return -EIO;
}
-EXPORT_SYMBOL_GPL(adis_check_status);
+EXPORT_SYMBOL_GPL(__adis_check_status);
/**
- * adis_reset() - Reset the device
+ * __adis_reset() - Reset the device (unlocked version)
* @adis: The adis device
*
* Returns 0 on success, a negative error code otherwise
*/
-int adis_reset(struct adis *adis)
+int __adis_reset(struct adis *adis)
{
int ret;
+ const struct adis_timeout *timeouts = adis->data->timeouts;
- ret = adis_write_reg_8(adis, adis->data->glob_cmd_reg,
+ ret = __adis_write_reg_8(adis, adis->data->glob_cmd_reg,
ADIS_GLOB_CMD_SW_RESET);
- if (ret)
+ if (ret) {
dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret);
+ return ret;
+ }
- return ret;
+ msleep(timeouts->sw_reset_ms);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(adis_reset);
+EXPORT_SYMBOL_GPL(__adis_reset);
static int adis_self_test(struct adis *adis)
{
int ret;
+ const struct adis_timeout *timeouts = adis->data->timeouts;
- ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
+ ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
adis->data->self_test_mask);
if (ret) {
dev_err(&adis->spi->dev, "Failed to initiate self test: %d\n",
@@ -337,12 +354,12 @@ static int adis_self_test(struct adis *adis)
return ret;
}
- msleep(adis->data->startup_delay);
+ msleep(timeouts->self_test_ms);
- ret = adis_check_status(adis);
+ ret = __adis_check_status(adis);
if (adis->data->self_test_no_autoclear)
- adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
+ __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
return ret;
}
@@ -360,19 +377,22 @@ int adis_initial_startup(struct adis *adis)
{
int ret;
+ mutex_lock(&adis->state_lock);
+
ret = adis_self_test(adis);
if (ret) {
dev_err(&adis->spi->dev, "Self-test failed, trying reset.\n");
- adis_reset(adis);
- msleep(adis->data->startup_delay);
+ __adis_reset(adis);
ret = adis_self_test(adis);
if (ret) {
dev_err(&adis->spi->dev, "Second self-test failed, giving up.\n");
- return ret;
+ goto out_unlock;
}
}
- return 0;
+out_unlock:
+ mutex_unlock(&adis->state_lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(adis_initial_startup);
@@ -398,15 +418,15 @@ int adis_single_conversion(struct iio_dev *indio_dev,
unsigned int uval;
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&adis->state_lock);
- ret = adis_read_reg(adis, chan->address, &uval,
+ ret = __adis_read_reg(adis, chan->address, &uval,
chan->scan_type.storagebits / 8);
if (ret)
goto err_unlock;
if (uval & error_mask) {
- ret = adis_check_status(adis);
+ ret = __adis_check_status(adis);
if (ret)
goto err_unlock;
}
@@ -418,7 +438,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
err_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&adis->state_lock);
return ret;
}
EXPORT_SYMBOL_GPL(adis_single_conversion);
@@ -438,7 +458,12 @@ EXPORT_SYMBOL_GPL(adis_single_conversion);
int adis_init(struct adis *adis, struct iio_dev *indio_dev,
struct spi_device *spi, const struct adis_data *data)
{
- mutex_init(&adis->txrx_lock);
+ if (!data || !data->timeouts) {
+ dev_err(&spi->dev, "No config data or timeouts not defined!\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&adis->state_lock);
adis->spi = spi;
adis->data = data;
iio_device_set_drvdata(indio_dev, adis);
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 44e46dc96e00..cfb1c19eb930 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -156,12 +156,14 @@ struct adis16400_state;
struct adis16400_chip_info {
const struct iio_chan_spec *channels;
+ const struct adis_timeout *timeouts;
const int num_channels;
const long flags;
unsigned int gyro_scale_micro;
unsigned int accel_scale_micro;
int temp_scale_nano;
int temp_offset;
+ /* set_freq() & get_freq() need to avoid using ADIS lib's state lock */
int (*set_freq)(struct adis16400_state *st, unsigned int freq);
int (*get_freq)(struct adis16400_state *st);
};
@@ -326,7 +328,7 @@ static int adis16334_get_freq(struct adis16400_state *st)
int ret;
uint16_t t;
- ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ ret = __adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
if (ret)
return ret;
@@ -350,7 +352,7 @@ static int adis16334_set_freq(struct adis16400_state *st, unsigned int freq)
t <<= ADIS16334_RATE_DIV_SHIFT;
t |= ADIS16334_RATE_INT_CLK;
- return adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
+ return __adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
}
static int adis16400_get_freq(struct adis16400_state *st)
@@ -358,7 +360,7 @@ static int adis16400_get_freq(struct adis16400_state *st)
int sps, ret;
uint16_t t;
- ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ ret = __adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
if (ret)
return ret;
@@ -390,7 +392,7 @@ static int adis16400_set_freq(struct adis16400_state *st, unsigned int freq)
else
st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
- return adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
+ return __adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
}
static const unsigned int adis16400_3db_divisors[] = {
@@ -404,7 +406,7 @@ static const unsigned int adis16400_3db_divisors[] = {
[7] = 200, /* Not a valid setting */
};
-static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+static int __adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
{
struct adis16400_state *st = iio_priv(indio_dev);
uint16_t val16;
@@ -415,11 +417,11 @@ static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
break;
}
- ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
+ ret = __adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
if (ret)
return ret;
- ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
+ ret = __adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
(val16 & ~0x07) | i);
return ret;
}
@@ -507,32 +509,31 @@ static int adis16400_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long info)
{
struct adis16400_state *st = iio_priv(indio_dev);
+ struct mutex *slock = &st->adis.state_lock;
int ret, sps;
switch (info) {
case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&indio_dev->mlock);
ret = adis_write_reg_16(&st->adis,
adis16400_addresses[chan->scan_index], val);
- mutex_unlock(&indio_dev->mlock);
return ret;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
/*
* Need to cache values so we can update if the frequency
* changes.
*/
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(slock);
st->filt_int = val;
/* Work out update to current value */
sps = st->variant->get_freq(st);
if (sps < 0) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return sps;
}
- ret = adis16400_set_filter(indio_dev, sps,
+ ret = __adis16400_set_filter(indio_dev, sps,
val * 1000 + val2 / 1000);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
sps = val * 1000 + val2 / 1000;
@@ -540,9 +541,9 @@ static int adis16400_write_raw(struct iio_dev *indio_dev,
if (sps <= 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(slock);
ret = st->variant->set_freq(st, sps);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return ret;
default:
return -EINVAL;
@@ -553,6 +554,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct adis16400_state *st = iio_priv(indio_dev);
+ struct mutex *slock = &st->adis.state_lock;
int16_t val16;
int ret;
@@ -596,10 +598,8 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&indio_dev->mlock);
ret = adis_read_reg_16(&st->adis,
adis16400_addresses[chan->scan_index], &val16);
- mutex_unlock(&indio_dev->mlock);
if (ret)
return ret;
val16 = sign_extend32(val16, 11);
@@ -610,27 +610,27 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val = st->variant->temp_offset;
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(slock);
/* Need both the number of taps and the sampling frequency */
- ret = adis_read_reg_16(&st->adis,
+ ret = __adis_read_reg_16(&st->adis,
ADIS16400_SENS_AVG,
&val16);
if (ret) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return ret;
}
ret = st->variant->get_freq(st);
- if (ret >= 0) {
- ret /= adis16400_3db_divisors[val16 & 0x07];
- *val = ret / 1000;
- *val2 = (ret % 1000) * 1000;
- }
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
if (ret)
return ret;
+ ret /= adis16400_3db_divisors[val16 & 0x07];
+ *val = ret / 1000;
+ *val2 = (ret % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(slock);
ret = st->variant->get_freq(st);
+ mutex_unlock(slock);
if (ret)
return ret;
*val = ret / 1000;
@@ -930,6 +930,36 @@ static const struct iio_chan_spec adis16334_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
+static const struct adis_timeout adis16300_timeouts = {
+ .reset_ms = ADIS16400_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16400_STARTUP_DELAY,
+ .self_test_ms = ADIS16400_STARTUP_DELAY,
+};
+
+static const struct adis_timeout adis16362_timeouts = {
+ .reset_ms = 130,
+ .sw_reset_ms = 130,
+ .self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16400_timeouts = {
+ .reset_ms = 170,
+ .sw_reset_ms = 170,
+ .self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16445_timeouts = {
+ .reset_ms = 55,
+ .sw_reset_ms = 55,
+ .self_test_ms = 16,
+};
+
+static const struct adis_timeout adis16448_timeouts = {
+ .reset_ms = 90,
+ .sw_reset_ms = 90,
+ .self_test_ms = 45,
+};
+
static struct adis16400_chip_info adis16400_chips[] = {
[ADIS16300] = {
.channels = adis16300_channels,
@@ -942,6 +972,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16300_timeouts,
},
[ADIS16334] = {
.channels = adis16334_channels,
@@ -965,6 +996,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16300_timeouts,
},
[ADIS16360] = {
.channels = adis16350_channels,
@@ -977,6 +1009,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16300_timeouts,
},
[ADIS16362] = {
.channels = adis16350_channels,
@@ -989,6 +1022,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16362_timeouts,
},
[ADIS16364] = {
.channels = adis16350_channels,
@@ -1001,6 +1035,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16362_timeouts,
},
[ADIS16367] = {
.channels = adis16350_channels,
@@ -1013,6 +1048,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16300_timeouts,
},
[ADIS16400] = {
.channels = adis16400_channels,
@@ -1024,6 +1060,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16400_timeouts,
},
[ADIS16445] = {
.channels = adis16445_channels,
@@ -1037,6 +1074,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
+ .timeouts = &adis16445_timeouts,
},
[ADIS16448] = {
.channels = adis16448_channels,
@@ -1050,6 +1088,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
+ .timeouts = &adis16448_timeouts,
}
};
@@ -1087,7 +1126,6 @@ static const struct adis_data adis16400_data = {
.write_delay = 50,
.self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST,
- .startup_delay = ADIS16400_STARTUP_DELAY,
.status_error_msgs = adis16400_status_error_msgs,
.status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) |
@@ -1121,11 +1159,28 @@ static void adis16400_setup_chan_mask(struct adis16400_state *st)
}
}
+static struct adis_data *adis16400_adis_data_alloc(struct adis16400_state *st,
+ struct device *dev)
+{
+ struct adis_data *data;
+
+ data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(data, &adis16400_data, sizeof(*data));
+
+ data->timeouts = st->variant->timeouts;
+
+ return data;
+}
+
static int adis16400_probe(struct spi_device *spi)
{
struct adis16400_state *st;
struct iio_dev *indio_dev;
int ret;
+ const struct adis_data *adis16400_data;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
@@ -1152,7 +1207,11 @@ static int adis16400_probe(struct spi_device *spi)
st->adis.burst->extra_len = sizeof(u16);
}
- ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
+ adis16400_data = adis16400_adis_data_alloc(st, &spi->dev);
+ if (IS_ERR(adis16400_data))
+ return PTR_ERR(adis16400_data);
+
+ ret = adis_init(&st->adis, indio_dev, spi, adis16400_data);
if (ret)
return ret;
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index b55812521537..9539cfe4a259 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -383,6 +383,12 @@ static const char * const adis16460_status_error_msgs[] = {
[ADIS16460_DIAG_STAT_FLASH_UPT] = "Flash update failure",
};
+static const struct adis_timeout adis16460_timeouts = {
+ .reset_ms = 225,
+ .sw_reset_ms = 225,
+ .self_test_ms = 10,
+};
+
static const struct adis_data adis16460_data = {
.diag_stat_reg = ADIS16460_REG_DIAG_STAT,
.glob_cmd_reg = ADIS16460_REG_GLOB_CMD,
@@ -398,6 +404,7 @@ static const struct adis_data adis16460_data = {
BIT(ADIS16460_DIAG_STAT_SPI_COMM) |
BIT(ADIS16460_DIAG_STAT_FLASH_UPT),
.enable_irq = adis16460_enable_irq,
+ .timeouts = &adis16460_timeouts,
};
static int adis16460_probe(struct spi_device *spi)
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 748f8bbf184d..dac87f1001fd 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -138,6 +138,7 @@ struct adis16480_chip_info {
unsigned int max_dec_rate;
const unsigned int *filter_freqs;
bool has_pps_clk_mode;
+ const struct adis_timeout *timeouts;
};
enum adis16480_int_pin {
@@ -555,6 +556,7 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int freq)
{
struct adis16480 *st = iio_priv(indio_dev);
+ struct mutex *slock = &st->adis.state_lock;
unsigned int enable_mask, offset, reg;
unsigned int diff, best_diff;
unsigned int i, best_freq;
@@ -565,9 +567,11 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
offset = ad16480_filter_data[chan->scan_index][1];
enable_mask = BIT(offset + 2);
- ret = adis_read_reg_16(&st->adis, reg, &val);
+ mutex_lock(slock);
+
+ ret = __adis_read_reg_16(&st->adis, reg, &val);
if (ret)
- return ret;
+ goto out_unlock;
if (freq == 0) {
val &= ~enable_mask;
@@ -589,7 +593,11 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
val |= enable_mask;
}
- return adis_write_reg_16(&st->adis, reg, val);
+ ret = __adis_write_reg_16(&st->adis, reg, val);
+out_unlock:
+ mutex_unlock(slock);
+
+ return ret;
}
static int adis16480_read_raw(struct iio_dev *indio_dev,
@@ -779,6 +787,7 @@ enum adis16480_variant {
ADIS16480,
ADIS16485,
ADIS16488,
+ ADIS16490,
ADIS16495_1,
ADIS16495_2,
ADIS16495_3,
@@ -787,6 +796,30 @@ enum adis16480_variant {
ADIS16497_3,
};
+static const struct adis_timeout adis16485_timeouts = {
+ .reset_ms = 560,
+ .sw_reset_ms = 120,
+ .self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16480_timeouts = {
+ .reset_ms = 560,
+ .sw_reset_ms = 560,
+ .self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16495_timeouts = {
+ .reset_ms = 170,
+ .sw_reset_ms = 130,
+ .self_test_ms = 40,
+};
+
+static const struct adis_timeout adis16495_1_timeouts = {
+ .reset_ms = 250,
+ .sw_reset_ms = 210,
+ .self_test_ms = 20,
+};
+
static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16375] = {
.channels = adis16485_channels,
@@ -805,6 +838,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
+ .timeouts = &adis16485_timeouts,
},
[ADIS16480] = {
.channels = adis16480_channels,
@@ -817,6 +851,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
+ .timeouts = &adis16480_timeouts,
},
[ADIS16485] = {
.channels = adis16485_channels,
@@ -829,6 +864,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
+ .timeouts = &adis16485_timeouts,
},
[ADIS16488] = {
.channels = adis16480_channels,
@@ -841,6 +877,21 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
+ .timeouts = &adis16485_timeouts,
+ },
+ [ADIS16490] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(100),
+ .accel_max_val = IIO_M_S_2_TO_G(16000 << 16),
+ .accel_max_scale = 8,
+ .temp_scale = 14285, /* 14.285 milli degree Celsius */
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ .timeouts = &adis16495_timeouts,
},
[ADIS16495_1] = {
.channels = adis16485_channels,
@@ -854,6 +905,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16495_2] = {
.channels = adis16485_channels,
@@ -867,6 +919,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16495_3] = {
.channels = adis16485_channels,
@@ -880,6 +933,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16497_1] = {
.channels = adis16485_channels,
@@ -893,6 +947,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16497_2] = {
.channels = adis16485_channels,
@@ -906,6 +961,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16497_3] = {
.channels = adis16485_channels,
@@ -919,6 +975,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
};
@@ -947,14 +1004,14 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
uint16_t val;
int ret;
- ret = adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
+ ret = __adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
if (ret)
return ret;
val &= ~ADIS16480_DRDY_EN_MSK;
val |= ADIS16480_DRDY_EN(enable);
- return adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
+ return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
}
static int adis16480_initial_setup(struct iio_dev *indio_dev)
@@ -1188,9 +1245,26 @@ static int adis16480_get_ext_clocks(struct adis16480 *st)
return 0;
}
+static struct adis_data *adis16480_adis_data_alloc(struct adis16480 *st,
+ struct device *dev)
+{
+ struct adis_data *data;
+
+ data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(data, &adis16480_data, sizeof(*data));
+
+ data->timeouts = st->chip_info->timeouts;
+
+ return data;
+}
+
static int adis16480_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct adis_data *adis16480_data;
struct iio_dev *indio_dev;
struct adis16480 *st;
int ret;
@@ -1211,7 +1285,11 @@ static int adis16480_probe(struct spi_device *spi)
indio_dev->info = &adis16480_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis_init(&st->adis, indio_dev, spi, &adis16480_data);
+ adis16480_data = adis16480_adis_data_alloc(st, &spi->dev);
+ if (IS_ERR(adis16480_data))
+ return PTR_ERR(adis16480_data);
+
+ ret = adis_init(&st->adis, indio_dev, spi, adis16480_data);
if (ret)
return ret;
@@ -1278,6 +1356,7 @@ static const struct spi_device_id adis16480_ids[] = {
{ "adis16480", ADIS16480 },
{ "adis16485", ADIS16485 },
{ "adis16488", ADIS16488 },
+ { "adis16490", ADIS16490 },
{ "adis16495-1", ADIS16495_1 },
{ "adis16495-2", ADIS16495_2 },
{ "adis16495-3", ADIS16495_3 },
@@ -1293,6 +1372,7 @@ static const struct of_device_id adis16480_of_match[] = {
{ .compatible = "adi,adis16480" },
{ .compatible = "adi,adis16485" },
{ .compatible = "adi,adis16488" },
+ { .compatible = "adi,adis16490" },
{ .compatible = "adi,adis16495-1" },
{ .compatible = "adi,adis16495-2" },
{ .compatible = "adi,adis16495-3" },
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 4998a89d083d..3f4dd5c00b03 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -129,7 +129,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
return -ENOMEM;
if (adis->data->has_paging) {
- mutex_lock(&adis->txrx_lock);
+ mutex_lock(&adis->state_lock);
if (adis->current_page != 0) {
adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
adis->tx[1] = 0;
@@ -144,7 +144,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
if (adis->data->has_paging) {
adis->current_page = 0;
- mutex_unlock(&adis->txrx_lock);
+ mutex_unlock(&adis->state_lock);
}
iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index e4c4c12236a7..017bc0fcc365 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -10,11 +10,12 @@ config INV_MPU6050_IIO
config INV_MPU6050_I2C
tristate "Invensense MPU6050 devices (I2C)"
- depends on I2C_MUX
+ depends on I2C
+ select I2C_MUX
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6000/6050/6500/6515,
+ This driver supports the Invensense MPU6050/6500/6515,
MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
over I2C.
This driver can be built as a module. The module will be called
@@ -26,8 +27,8 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6000/6050/6500/6515,
- MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
+ This driver supports the Invensense MPU6000/6500/6515,
+ MPU9250/9255 and ICM20608/20602 motion tracking devices
over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 0686e41bb8a1..5096fc49012d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -104,6 +104,7 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
+ .temp_fifo_enable = false,
.magn_fifo_enable = false,
.accl_fs = INV_MPU6050_FS_02G,
.user_ctrl = 0,
@@ -856,19 +857,27 @@ static const struct iio_chan_spec_ext_info inv_ext_info[] = {
.ext_info = inv_ext_info, \
}
+#define INV_MPU6050_TEMP_CHAN(_index) \
+ { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | BIT(IIO_CHAN_INFO_OFFSET) \
+ | BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
static const struct iio_chan_spec inv_mpu_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
- /*
- * Note that temperature should only be via polled reading only,
- * not the final scan elements output.
- */
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
- | BIT(IIO_CHAN_INFO_OFFSET)
- | BIT(IIO_CHAN_INFO_SCALE),
- .scan_index = -1,
- },
+
+ INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
@@ -878,22 +887,29 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
};
+#define INV_MPU6050_SCAN_MASK_3AXIS_ACCEL \
+ (BIT(INV_MPU6050_SCAN_ACCL_X) \
+ | BIT(INV_MPU6050_SCAN_ACCL_Y) \
+ | BIT(INV_MPU6050_SCAN_ACCL_Z))
+
+#define INV_MPU6050_SCAN_MASK_3AXIS_GYRO \
+ (BIT(INV_MPU6050_SCAN_GYRO_X) \
+ | BIT(INV_MPU6050_SCAN_GYRO_Y) \
+ | BIT(INV_MPU6050_SCAN_GYRO_Z))
+
+#define INV_MPU6050_SCAN_MASK_TEMP (BIT(INV_MPU6050_SCAN_TEMP))
+
static const unsigned long inv_mpu_scan_masks[] = {
/* 3-axis accel */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
/* 3-axis gyro */
- BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis accel + gyro */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z)
- | BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU6050_SCAN_MASK_TEMP,
0,
};
@@ -915,19 +931,30 @@ static const unsigned long inv_mpu_scan_masks[] = {
.ext_info = inv_ext_info, \
}
+static const struct iio_chan_spec inv_mpu9150_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
+
+ INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+
+ /* Magnetometer resolution is 13 bits */
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_X, 13, INV_MPU9X50_SCAN_MAGN_X),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Y, 13, INV_MPU9X50_SCAN_MAGN_Y),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 13, INV_MPU9X50_SCAN_MAGN_Z),
+};
+
static const struct iio_chan_spec inv_mpu9250_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
- /*
- * Note that temperature should only be via polled reading only,
- * not the final scan elements output.
- */
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
- | BIT(IIO_CHAN_INFO_OFFSET)
- | BIT(IIO_CHAN_INFO_SCALE),
- .scan_index = -1,
- },
+
+ INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
@@ -942,98 +969,50 @@ static const struct iio_chan_spec inv_mpu9250_channels[] = {
INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 16, INV_MPU9X50_SCAN_MAGN_Z),
};
+#define INV_MPU9X50_SCAN_MASK_3AXIS_MAGN \
+ (BIT(INV_MPU9X50_SCAN_MAGN_X) \
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y) \
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z))
+
static const unsigned long inv_mpu9x50_scan_masks[] = {
/* 3-axis accel */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
/* 3-axis gyro */
- BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
/* 3-axis magn */
- BIT(INV_MPU9X50_SCAN_MAGN_X)
- | BIT(INV_MPU9X50_SCAN_MAGN_Y)
- | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+ INV_MPU9X50_SCAN_MASK_3AXIS_MAGN | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis accel + gyro */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z)
- | BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis accel + magn */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z)
- | BIT(INV_MPU9X50_SCAN_MAGN_X)
- | BIT(INV_MPU9X50_SCAN_MAGN_Y)
- | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+ | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis gyro + magn */
- BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z)
- | BIT(INV_MPU9X50_SCAN_MAGN_X)
- | BIT(INV_MPU9X50_SCAN_MAGN_Y)
- | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+ | INV_MPU6050_SCAN_MASK_TEMP,
/* 9-axis accel + gyro + magn */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z)
- | BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z)
- | BIT(INV_MPU9X50_SCAN_MAGN_X)
- | BIT(INV_MPU9X50_SCAN_MAGN_Y)
- | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+ | INV_MPU6050_SCAN_MASK_TEMP,
0,
};
-static const struct iio_chan_spec inv_icm20602_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
- | BIT(IIO_CHAN_INFO_OFFSET)
- | BIT(IIO_CHAN_INFO_SCALE),
- .scan_index = INV_ICM20602_SCAN_TEMP,
- .scan_type = {
- .sign = 's',
- .realbits = 16,
- .storagebits = 16,
- .shift = 0,
- .endianness = IIO_BE,
- },
- },
-
- INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_ICM20602_SCAN_GYRO_X),
- INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_ICM20602_SCAN_GYRO_Y),
- INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_ICM20602_SCAN_GYRO_Z),
-
- INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_ICM20602_SCAN_ACCL_Y),
- INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_ICM20602_SCAN_ACCL_X),
- INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
-};
-
static const unsigned long inv_icm20602_scan_masks[] = {
/* 3-axis accel + temp (mandatory) */
- BIT(INV_ICM20602_SCAN_ACCL_X)
- | BIT(INV_ICM20602_SCAN_ACCL_Y)
- | BIT(INV_ICM20602_SCAN_ACCL_Z)
- | BIT(INV_ICM20602_SCAN_TEMP),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
/* 3-axis gyro + temp (mandatory) */
- BIT(INV_ICM20602_SCAN_GYRO_X)
- | BIT(INV_ICM20602_SCAN_GYRO_Y)
- | BIT(INV_ICM20602_SCAN_GYRO_Z)
- | BIT(INV_ICM20602_SCAN_TEMP),
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis accel + gyro + temp (mandatory) */
- BIT(INV_ICM20602_SCAN_ACCL_X)
- | BIT(INV_ICM20602_SCAN_ACCL_Y)
- | BIT(INV_ICM20602_SCAN_ACCL_Z)
- | BIT(INV_ICM20602_SCAN_GYRO_X)
- | BIT(INV_ICM20602_SCAN_GYRO_Y)
- | BIT(INV_ICM20602_SCAN_GYRO_Z)
- | BIT(INV_ICM20602_SCAN_TEMP),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU6050_SCAN_MASK_TEMP,
0,
};
@@ -1241,7 +1220,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
irq_type = irqd_get_trigger_type(desc);
if (!irq_type)
irq_type = IRQF_TRIGGER_RISING;
- if (irq_type == IRQF_TRIGGER_RISING)
+ if (irq_type & IRQF_TRIGGER_RISING) // rising or both-edge
st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
else if (irq_type == IRQF_TRIGGER_FALLING)
st->irq_mask = INV_MPU6050_ACTIVE_LOW;
@@ -1324,25 +1303,20 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
inv_mpu_bus_setup(indio_dev);
switch (chip_type) {
+ case INV_MPU9150:
+ indio_dev->channels = inv_mpu9150_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu9150_channels);
+ indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
+ break;
case INV_MPU9250:
case INV_MPU9255:
- /*
- * Use magnetometer inside the chip only if there is no i2c
- * auxiliary device in use.
- */
- if (!st->magn_disabled) {
- indio_dev->channels = inv_mpu9250_channels;
- indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
- indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
- } else {
- indio_dev->channels = inv_mpu_channels;
- indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
- indio_dev->available_scan_masks = inv_mpu_scan_masks;
- }
+ indio_dev->channels = inv_mpu9250_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
+ indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
break;
case INV_ICM20602:
- indio_dev->channels = inv_icm20602_channels;
- indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
indio_dev->available_scan_masks = inv_icm20602_scan_masks;
break;
default:
@@ -1351,6 +1325,15 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->available_scan_masks = inv_mpu_scan_masks;
break;
}
+ /*
+ * Use magnetometer inside the chip only if there is no i2c
+ * auxiliary device in use. Otherwise Going back to 6-axis only.
+ */
+ if (st->magn_disabled) {
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+ indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ }
indio_dev->info = &mpu_info;
indio_dev->modes = INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 389cc8505e0e..f47a28b4be23 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -77,6 +77,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
case INV_ICM20602:
/* no i2c auxiliary bus on the chip */
return false;
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
if (st->magn_disabled)
@@ -102,6 +103,7 @@ static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
struct device_node *mux_node;
switch (st->chip_type) {
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
mux_node = of_get_child_by_name(dev->of_node, "i2c-gate");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index b096e010d4ee..6158fca7f70e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -86,6 +86,7 @@ enum inv_devices {
* @accl_fs: accel full scale range.
* @accl_fifo_enable: enable accel data output
* @gyro_fifo_enable: enable gyro data output
+ * @temp_fifo_enable: enable temp data output
* @magn_fifo_enable: enable magn data output
* @divider: chip sample rate divider (sample rate divider - 1)
*/
@@ -95,6 +96,7 @@ struct inv_mpu6050_chip_config {
unsigned int accl_fs:2;
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
+ unsigned int temp_fifo_enable:1;
unsigned int magn_fifo_enable:1;
u8 divider;
u8 user_ctrl;
@@ -184,6 +186,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_SLAVE_2 0x04
#define INV_MPU6050_BIT_ACCEL_OUT 0x08
#define INV_MPU6050_BITS_GYRO_OUT 0x70
+#define INV_MPU6050_BIT_TEMP_OUT 0x80
#define INV_MPU6050_REG_I2C_MST_CTRL 0x24
#define INV_MPU6050_BITS_I2C_MST_CLK_400KHZ 0x0D
@@ -268,8 +271,8 @@ struct inv_mpu6050_state {
/* MPU9X50 9-axis magnetometer */
#define INV_MPU9X50_BYTES_MAGN 7
-/* ICM20602 FIFO samples include temperature readings */
-#define INV_ICM20602_BYTES_PER_TEMP_SENSOR 2
+/* FIFO temperature sample size */
+#define INV_MPU6050_BYTES_PER_TEMP_SENSOR 2
/* mpu6500 registers */
#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
@@ -298,7 +301,7 @@ struct inv_mpu6050_state {
#define INV_ICM20608_TEMP_OFFSET 8170
#define INV_ICM20608_TEMP_SCALE 3059976
-/* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */
+/* 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8 */
#define INV_MPU6050_OUTPUT_DATA_SIZE 32
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
@@ -344,6 +347,7 @@ enum inv_mpu6050_scan {
INV_MPU6050_SCAN_ACCL_X,
INV_MPU6050_SCAN_ACCL_Y,
INV_MPU6050_SCAN_ACCL_Z,
+ INV_MPU6050_SCAN_TEMP,
INV_MPU6050_SCAN_GYRO_X,
INV_MPU6050_SCAN_GYRO_Y,
INV_MPU6050_SCAN_GYRO_Z,
@@ -355,18 +359,6 @@ enum inv_mpu6050_scan {
INV_MPU9X50_SCAN_TIMESTAMP,
};
-/* scan element definition for ICM20602, which includes temperature */
-enum inv_icm20602_scan {
- INV_ICM20602_SCAN_ACCL_X,
- INV_ICM20602_SCAN_ACCL_Y,
- INV_ICM20602_SCAN_ACCL_Z,
- INV_ICM20602_SCAN_TEMP,
- INV_ICM20602_SCAN_GYRO_X,
- INV_ICM20602_SCAN_GYRO_Y,
- INV_ICM20602_SCAN_GYRO_Z,
- INV_ICM20602_SCAN_TIMESTAMP,
-};
-
enum inv_mpu6050_filter_e {
INV_MPU6050_FILTER_256HZ_NOLPF2 = 0,
INV_MPU6050_FILTER_188HZ,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
index 02735af152c8..4f192352521e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
@@ -12,7 +12,9 @@
#include "inv_mpu_magn.h"
/*
- * MPU9250 magnetometer is an AKM AK8963 chip on I2C aux bus
+ * MPU9xxx magnetometer are AKM chips on I2C aux bus
+ * MPU9150 is AK8975
+ * MPU9250 is AK8963
*/
#define INV_MPU_MAGN_I2C_ADDR 0x0C
@@ -33,10 +35,10 @@
#define INV_MPU_MAGN_BITS_MODE_PWDN 0x00
#define INV_MPU_MAGN_BITS_MODE_SINGLE 0x01
#define INV_MPU_MAGN_BITS_MODE_FUSE 0x0F
-#define INV_MPU_MAGN_BIT_OUTPUT_BIT 0x10
+#define INV_MPU9250_MAGN_BIT_OUTPUT_BIT 0x10
-#define INV_MPU_MAGN_REG_CNTL2 0x0B
-#define INV_MPU_MAGN_BIT_SRST 0x01
+#define INV_MPU9250_MAGN_REG_CNTL2 0x0B
+#define INV_MPU9250_MAGN_BIT_SRST 0x01
#define INV_MPU_MAGN_REG_ASAX 0x10
#define INV_MPU_MAGN_REG_ASAY 0x11
@@ -48,6 +50,7 @@
static bool inv_magn_supported(const struct inv_mpu6050_state *st)
{
switch (st->chip_type) {
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
return true;
@@ -61,6 +64,7 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
{
uint8_t val;
uint8_t asa[3];
+ int32_t sensitivity;
int ret;
/* check whoami */
@@ -71,12 +75,19 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
if (val != INV_MPU_MAGN_BITS_WIA)
return -ENODEV;
- /* reset chip */
- ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
- INV_MPU_MAGN_REG_CNTL2,
- INV_MPU_MAGN_BIT_SRST);
- if (ret)
- return ret;
+ /* software reset for MPU925x only */
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU9250_MAGN_REG_CNTL2,
+ INV_MPU9250_MAGN_BIT_SRST);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
/* read fuse ROM data */
ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
@@ -98,22 +109,36 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
return ret;
/*
+ * Sensor sentivity
+ * 1 uT = 0.01 G and value is in micron (1e6)
+ * sensitvity = x uT * 0.01 * 1e6
+ */
+ switch (st->chip_type) {
+ case INV_MPU9150:
+ /* sensor sensitivity is 0.3 uT */
+ sensitivity = 3000;
+ break;
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /* sensor sensitivity in 16 bits mode: 0.15 uT */
+ sensitivity = 1500;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
* Sensitivity adjustement and scale to Gauss
*
* Hadj = H * (((ASA - 128) * 0.5 / 128) + 1)
* Factor simplification:
* Hadj = H * ((ASA + 128) / 256)
*
- * Sensor sentivity
- * 0.15 uT in 16 bits mode
- * 1 uT = 0.01 G and value is in micron (1e6)
- * sensitvity = 0.15 uT * 0.01 * 1e6
- *
- * raw_to_gauss = Hadj * 1500
+ * raw_to_gauss = Hadj * sensitivity
*/
- st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * 1500) / 256;
- st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * 1500) / 256;
- st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * 1500) / 256;
+ st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * sensitivity) / 256;
+ st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * sensitivity) / 256;
+ st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * sensitivity) / 256;
return 0;
}
@@ -129,6 +154,7 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
*/
int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
{
+ uint8_t val;
int ret;
/* quit if chip is not supported */
@@ -179,10 +205,17 @@ int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
if (ret)
return ret;
- /* add 16 bits mode */
- ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1),
- INV_MPU_MAGN_BITS_MODE_SINGLE |
- INV_MPU_MAGN_BIT_OUTPUT_BIT);
+ /* add 16 bits mode for MPU925x */
+ val = INV_MPU_MAGN_BITS_MODE_SINGLE;
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ val |= INV_MPU9250_MAGN_BIT_OUTPUT_BIT;
+ break;
+ default:
+ break;
+ }
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1), val);
if (ret)
return ret;
@@ -237,6 +270,7 @@ int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
/* fill magnetometer orientation */
switch (st->chip_type) {
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
/* x <- y */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 10d16ec5104b..f9fdf4302a91 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -142,6 +142,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
d |= INV_MPU6050_BITS_GYRO_OUT;
if (st->chip_config.accl_fifo_enable)
d |= INV_MPU6050_BIT_ACCEL_OUT;
+ if (st->chip_config.temp_fifo_enable)
+ d |= INV_MPU6050_BIT_TEMP_OUT;
if (st->chip_config.magn_fifo_enable)
d |= INV_MPU6050_BIT_SLAVE_0;
result = regmap_write(st->map, st->reg->fifo_en, d);
@@ -183,11 +185,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
"failed to ack interrupt\n");
goto flush_fifo;
}
- if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
- dev_warn(regmap_get_device(st->map),
- "spurious interrupt with status 0x%x\n", int_status);
+ if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT))
goto end_session;
- }
if (!(st->chip_config.accl_fifo_enable |
st->chip_config.gyro_fifo_enable |
@@ -200,8 +199,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (st->chip_config.gyro_fifo_enable)
bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
- if (st->chip_type == INV_ICM20602)
- bytes_per_datum += INV_ICM20602_BYTES_PER_TEMP_SENSOR;
+ if (st->chip_config.temp_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_TEMP_SENSOR;
if (st->chip_config.magn_fifo_enable)
bytes_per_datum += INV_MPU9X50_BYTES_MAGN;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 142692fc0758..ec102d5a5c77 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -74,7 +74,6 @@ static int inv_mpu_probe(struct spi_device *spi)
static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
{"mpu6500", INV_MPU6500},
- {"mpu9150", INV_MPU9150},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index d7d951927a44..5199fe790c30 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -24,6 +24,9 @@ static void inv_scan_query_mpu6050(struct iio_dev *indio_dev)
indio_dev->active_scan_mask) ||
test_bit(INV_MPU6050_SCAN_ACCL_Z,
indio_dev->active_scan_mask);
+
+ st->chip_config.temp_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_TEMP, indio_dev->active_scan_mask);
}
static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
@@ -50,6 +53,7 @@ static void inv_scan_query(struct iio_dev *indio_dev)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
switch (st->chip_type) {
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
return inv_scan_query_mpu9x50(indio_dev);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index dc55d7dff3eb..9c3486a8134f 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -76,6 +76,7 @@ enum st_lsm6dsx_hw_id {
.endianness = IIO_LE, \
}, \
.event_spec = &st_lsm6dsx_event, \
+ .ext_info = st_lsm6dsx_accel_ext_info, \
.num_event_specs = 1, \
}
@@ -176,21 +177,38 @@ struct st_lsm6dsx_hw_ts_settings {
* @pullup_en: i2c controller pull-up register info (addr + mask).
* @aux_sens: aux sensor register info (addr + mask).
* @wr_once: write_once register info (addr + mask).
+ * @emb_func: embedded function register info (addr + mask).
+ * @num_ext_dev: max number of slave devices.
* @shub_out: sensor hub first output register info.
* @slv0_addr: slave0 address in secondary page.
* @dw_slv0_addr: slave0 write register address in secondary page.
* @batch_en: Enable/disable FIFO batching.
+ * @pause: controller pause value.
*/
struct st_lsm6dsx_shub_settings {
struct st_lsm6dsx_reg page_mux;
- struct st_lsm6dsx_reg master_en;
- struct st_lsm6dsx_reg pullup_en;
+ struct {
+ bool sec_page;
+ u8 addr;
+ u8 mask;
+ } master_en;
+ struct {
+ bool sec_page;
+ u8 addr;
+ u8 mask;
+ } pullup_en;
struct st_lsm6dsx_reg aux_sens;
struct st_lsm6dsx_reg wr_once;
- u8 shub_out;
+ struct st_lsm6dsx_reg emb_func;
+ u8 num_ext_dev;
+ struct {
+ bool sec_page;
+ u8 addr;
+ } shub_out;
u8 slv0_addr;
u8 dw_slv0_addr;
u8 batch_en;
+ u8 pause;
};
struct st_lsm6dsx_event_settings {
@@ -361,6 +379,7 @@ struct st_lsm6dsx_sensor {
* @enable_event: enabled event bitmask.
* @iio_devs: Pointers to acc/gyro iio_dev instances.
* @settings: Pointer to the specific sensor settings in use.
+ * @orientation: sensor chip orientation relative to main hardware.
*/
struct st_lsm6dsx_hw {
struct device *dev;
@@ -387,16 +406,21 @@ struct st_lsm6dsx_hw {
struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
const struct st_lsm6dsx_settings *settings;
+
+ struct iio_mount_matrix orientation;
};
-static const struct iio_event_spec st_lsm6dsx_event = {
+static __maybe_unused const struct iio_event_spec st_lsm6dsx_event = {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_EITHER,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE)
};
-static const unsigned long st_lsm6dsx_available_scan_masks[] = {0x7, 0x0};
+static __maybe_unused const unsigned long st_lsm6dsx_available_scan_masks[] = {
+ 0x7, 0x0,
+};
+
extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
@@ -457,4 +481,19 @@ st_lsm6dsx_write_locked(struct st_lsm6dsx_hw *hw, unsigned int addr,
return err;
}
+static const inline struct iio_mount_matrix *
+st_lsm6dsx_get_mount_matrix(const struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ return &hw->orientation;
+}
+
+static const struct iio_chan_spec_ext_info st_lsm6dsx_accel_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, st_lsm6dsx_get_mount_matrix),
+ { }
+};
+
#endif /* ST_LSM6DSX_H */
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index cb536b81a1c2..bb899345f2bb 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -336,12 +336,13 @@ static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 addr,
*/
int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
{
+ struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor, *ext_sensor = NULL;
+ int err, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
- int err, acc_sip, gyro_sip, ts_sip, read_len, offset;
- struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
u8 acc_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+ u8 ext_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
bool reset_ts = false;
__le16 fifo_status;
s64 ts = 0;
@@ -364,6 +365,8 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
acc_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
gyro_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_GYRO]);
+ if (hw->iio_devs[ST_LSM6DSX_ID_EXT0])
+ ext_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_EXT0]);
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw, ST_LSM6DSX_REG_FIFO_OUTL_ADDR,
@@ -391,12 +394,13 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
* following pattern is repeated every 9 samples:
* - Gx, Gy, Gz, Ax, Ay, Az, Ts, Gx, Gy, Gz, Ts, Gx, ..
*/
+ ext_sip = ext_sensor ? ext_sensor->sip : 0;
gyro_sip = gyro_sensor->sip;
acc_sip = acc_sensor->sip;
ts_sip = hw->ts_sip;
offset = 0;
- while (acc_sip > 0 || gyro_sip > 0) {
+ while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) {
if (gyro_sip > 0) {
memcpy(gyro_buff, &hw->buff[offset],
ST_LSM6DSX_SAMPLE_SIZE);
@@ -407,6 +411,11 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
ST_LSM6DSX_SAMPLE_SIZE);
offset += ST_LSM6DSX_SAMPLE_SIZE;
}
+ if (ext_sip > 0) {
+ memcpy(ext_buff, &hw->buff[offset],
+ ST_LSM6DSX_SAMPLE_SIZE);
+ offset += ST_LSM6DSX_SAMPLE_SIZE;
+ }
if (ts_sip-- > 0) {
u8 data[ST_LSM6DSX_SAMPLE_SIZE];
@@ -440,6 +449,10 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_ACC],
acc_buff, acc_sensor->ts_ref + ts);
+ if (ext_sip-- > 0)
+ iio_push_to_buffers_with_timestamp(
+ hw->iio_devs[ST_LSM6DSX_ID_EXT0],
+ ext_buff, ext_sensor->ts_ref + ts);
}
}
@@ -638,12 +651,12 @@ int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable)
err = st_lsm6dsx_sensor_set_enable(sensor, enable);
if (err < 0)
goto out;
-
- err = st_lsm6dsx_set_fifo_odr(sensor, enable);
- if (err < 0)
- goto out;
}
+ err = st_lsm6dsx_set_fifo_odr(sensor, enable);
+ if (err < 0)
+ goto out;
+
err = st_lsm6dsx_update_decimators(hw);
if (err < 0)
goto out;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index a7d40c02ce6b..84d219ae6aee 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -54,6 +54,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pm.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
@@ -655,6 +656,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x08,
.mask = GENMASK(5, 3),
},
+ [ST_LSM6DSX_ID_EXT0] = {
+ .addr = 0x09,
+ .mask = GENMASK(2, 0),
+ },
},
.fifo_ops = {
.update_fifo = st_lsm6dsx_update_fifo,
@@ -687,6 +692,39 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .shub_settings = {
+ .page_mux = {
+ .addr = 0x01,
+ .mask = BIT(7),
+ },
+ .master_en = {
+ .addr = 0x1a,
+ .mask = BIT(0),
+ },
+ .pullup_en = {
+ .addr = 0x1a,
+ .mask = BIT(3),
+ },
+ .aux_sens = {
+ .addr = 0x04,
+ .mask = GENMASK(5, 4),
+ },
+ .wr_once = {
+ .addr = 0x07,
+ .mask = BIT(5),
+ },
+ .emb_func = {
+ .addr = 0x19,
+ .mask = BIT(2),
+ },
+ .num_ext_dev = 1,
+ .shub_out = {
+ .addr = 0x2e,
+ },
+ .slv0_addr = 0x02,
+ .dw_slv0_addr = 0x0e,
+ .pause = 0x7,
+ },
.event_settings = {
.enable_reg = {
.addr = 0x58,
@@ -867,10 +905,12 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = BIT(6),
},
.master_en = {
+ .sec_page = true,
.addr = 0x14,
.mask = BIT(2),
},
.pullup_en = {
+ .sec_page = true,
.addr = 0x14,
.mask = BIT(3),
},
@@ -882,7 +922,11 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x14,
.mask = BIT(6),
},
- .shub_out = 0x02,
+ .num_ext_dev = 3,
+ .shub_out = {
+ .sec_page = true,
+ .addr = 0x02,
+ },
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
@@ -1241,10 +1285,12 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = BIT(6),
},
.master_en = {
+ .sec_page = true,
.addr = 0x14,
.mask = BIT(2),
},
.pullup_en = {
+ .sec_page = true,
.addr = 0x14,
.mask = BIT(3),
},
@@ -1256,7 +1302,11 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x14,
.mask = BIT(6),
},
- .shub_out = 0x02,
+ .num_ext_dev = 3,
+ .shub_out = {
+ .sec_page = true,
+ .addr = 0x02,
+ },
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
@@ -1301,7 +1351,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id,
for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
- if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
+ if (st_lsm6dsx_sensor_settings[i].id[j].name &&
+ id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
break;
}
if (j < ST_LSM6DSX_MAX_ID)
@@ -1505,8 +1556,11 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- if (!hw->enable_event)
- st_lsm6dsx_sensor_set_enable(sensor, false);
+ if (!hw->enable_event) {
+ err = st_lsm6dsx_sensor_set_enable(sensor, false);
+ if (err < 0)
+ return err;
+ }
*val = (s16)le16_to_cpu(data);
@@ -1608,11 +1662,11 @@ static int st_lsm6dsx_event_setup(struct st_lsm6dsx_hw *hw, int state)
}
static int st_lsm6dsx_read_event(struct iio_dev *iio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
struct st_lsm6dsx_hw *hw = sensor->hw;
@@ -1826,14 +1880,14 @@ static const struct iio_info st_lsm6dsx_gyro_info = {
.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
};
-static int st_lsm6dsx_of_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
+static int st_lsm6dsx_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
{
- struct device_node *np = hw->dev->of_node;
+ struct device *dev = hw->dev;
- if (!np)
+ if (!dev_fwnode(dev))
return -EINVAL;
- return of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
+ return device_property_read_u32(dev, "st,drdy-int-pin", drdy_pin);
}
static int
@@ -1842,7 +1896,7 @@ st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
{
int err = 0, drdy_pin;
- if (st_lsm6dsx_of_get_drdy_pin(hw, &drdy_pin) < 0) {
+ if (st_lsm6dsx_get_drdy_pin(hw, &drdy_pin) < 0) {
struct st_sensors_platform_data *pdata;
struct device *dev = hw->dev;
@@ -1871,26 +1925,29 @@ st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw)
{
const struct st_lsm6dsx_shub_settings *hub_settings;
- struct device_node *np = hw->dev->of_node;
struct st_sensors_platform_data *pdata;
+ struct device *dev = hw->dev;
unsigned int data;
int err = 0;
hub_settings = &hw->settings->shub_settings;
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "st,pullups")) ||
+ pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ if ((dev_fwnode(dev) && device_property_read_bool(dev, "st,pullups")) ||
(pdata && pdata->pullups)) {
- err = st_lsm6dsx_set_page(hw, true);
- if (err < 0)
- return err;
+ if (hub_settings->pullup_en.sec_page) {
+ err = st_lsm6dsx_set_page(hw, true);
+ if (err < 0)
+ return err;
+ }
data = ST_LSM6DSX_SHIFT_VAL(1, hub_settings->pullup_en.mask);
err = regmap_update_bits(hw->regmap,
hub_settings->pullup_en.addr,
hub_settings->pullup_en.mask, data);
- st_lsm6dsx_set_page(hw, false);
+ if (hub_settings->pullup_en.sec_page)
+ st_lsm6dsx_set_page(hw, false);
if (err < 0)
return err;
@@ -1908,6 +1965,16 @@ static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw)
hub_settings->aux_sens.mask, data);
st_lsm6dsx_set_page(hw, false);
+
+ if (err < 0)
+ return err;
+ }
+
+ if (hub_settings->emb_func.addr) {
+ data = ST_LSM6DSX_SHIFT_VAL(1, hub_settings->emb_func.mask);
+ err = regmap_update_bits(hw->regmap,
+ hub_settings->emb_func.addr,
+ hub_settings->emb_func.mask, data);
}
return err;
@@ -2157,9 +2224,9 @@ static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
{
- struct device_node *np = hw->dev->of_node;
struct st_sensors_platform_data *pdata;
const struct st_lsm6dsx_reg *reg;
+ struct device *dev = hw->dev;
unsigned long irq_type;
bool irq_active_low;
int err;
@@ -2187,8 +2254,8 @@ static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
if (err < 0)
return err;
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ if ((dev_fwnode(dev) && device_property_read_bool(dev, "drive-open-drain")) ||
(pdata && pdata->open_drain)) {
reg = &hw->settings->irq_config.od;
err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
@@ -2218,7 +2285,6 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
{
struct st_sensors_platform_data *pdata = dev->platform_data;
const struct st_lsm6dsx_shub_settings *hub_settings;
- struct device_node *np = dev->of_node;
struct st_lsm6dsx_hw *hw;
const char *name = NULL;
int i, err;
@@ -2272,6 +2338,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
+ err = iio_read_mount_matrix(hw->dev, "mount-matrix", &hw->orientation);
+ if (err)
+ return err;
+
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
if (!hw->iio_devs[i])
continue;
@@ -2281,7 +2351,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
- if ((np && of_property_read_bool(np, "wakeup-source")) ||
+ if ((dev_fwnode(dev) && device_property_read_bool(dev, "wakeup-source")) ||
(pdata && pdata->wakeup_source))
device_init_wakeup(dev, true);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index cd47ec1fedcb..0fb32131afce 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "st_lsm6dsx.h"
@@ -122,7 +121,7 @@ static struct i2c_driver st_lsm6dsx_driver = {
.driver = {
.name = "st_lsm6dsx_i2c",
.pm = &st_lsm6dsx_pm_ops,
- .of_match_table = of_match_ptr(st_lsm6dsx_i2c_of_match),
+ .of_match_table = st_lsm6dsx_i2c_of_match,
},
.probe = st_lsm6dsx_i2c_probe,
.id_table = st_lsm6dsx_i2c_id_table,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index fa5d1001a46c..eea555617d4a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -30,7 +30,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_MAX_SLV_NUM 3
#define ST_LSM6DSX_SLV_ADDR(n, base) ((base) + (n) * 3)
#define ST_LSM6DSX_SLV_SUB_ADDR(n, base) ((base) + 1 + (n) * 3)
#define ST_LSM6DSX_SLV_CONFIG(n, base) ((base) + 2 + (n) * 3)
@@ -102,24 +101,31 @@ static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
}
/**
- * st_lsm6dsx_shub_read_reg - read i2c controller register
+ * st_lsm6dsx_shub_read_output - read i2c controller register
*
* Read st_lsm6dsx i2c controller register
*/
-static int st_lsm6dsx_shub_read_reg(struct st_lsm6dsx_hw *hw, u8 addr,
- u8 *data, int len)
+static int
+st_lsm6dsx_shub_read_output(struct st_lsm6dsx_hw *hw, u8 *data,
+ int len)
{
+ const struct st_lsm6dsx_shub_settings *hub_settings;
int err;
mutex_lock(&hw->page_lock);
- err = st_lsm6dsx_set_page(hw, true);
- if (err < 0)
- goto out;
+ hub_settings = &hw->settings->shub_settings;
+ if (hub_settings->shub_out.sec_page) {
+ err = st_lsm6dsx_set_page(hw, true);
+ if (err < 0)
+ goto out;
+ }
- err = regmap_bulk_read(hw->regmap, addr, data, len);
+ err = regmap_bulk_read(hw->regmap, hub_settings->shub_out.addr,
+ data, len);
- st_lsm6dsx_set_page(hw, false);
+ if (hub_settings->shub_out.sec_page)
+ st_lsm6dsx_set_page(hw, false);
out:
mutex_unlock(&hw->page_lock);
@@ -186,15 +192,18 @@ static int st_lsm6dsx_shub_master_enable(struct st_lsm6dsx_sensor *sensor,
mutex_lock(&hw->page_lock);
hub_settings = &hw->settings->shub_settings;
- err = st_lsm6dsx_set_page(hw, true);
- if (err < 0)
- goto out;
+ if (hub_settings->master_en.sec_page) {
+ err = st_lsm6dsx_set_page(hw, true);
+ if (err < 0)
+ goto out;
+ }
data = ST_LSM6DSX_SHIFT_VAL(enable, hub_settings->master_en.mask);
err = regmap_update_bits(hw->regmap, hub_settings->master_en.addr,
hub_settings->master_en.mask, data);
- st_lsm6dsx_set_page(hw, false);
+ if (hub_settings->master_en.sec_page)
+ st_lsm6dsx_set_page(hw, false);
out:
mutex_unlock(&hw->page_lock);
@@ -212,16 +221,21 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_sensor *sensor, u8 addr,
u8 *data, int len)
{
const struct st_lsm6dsx_shub_settings *hub_settings;
+ u8 config[3], slv_addr, slv_config = 0;
struct st_lsm6dsx_hw *hw = sensor->hw;
- u8 config[3], slv_addr;
+ const struct st_lsm6dsx_reg *aux_sens;
int err;
hub_settings = &hw->settings->shub_settings;
slv_addr = ST_LSM6DSX_SLV_ADDR(0, hub_settings->slv0_addr);
+ aux_sens = &hw->settings->shub_settings.aux_sens;
+ /* do not overwrite aux_sens */
+ if (slv_addr + 2 == aux_sens->addr)
+ slv_config = ST_LSM6DSX_SHIFT_VAL(3, aux_sens->mask);
config[0] = (sensor->ext_info.addr << 1) | 1;
config[1] = addr;
- config[2] = len & ST_LS6DSX_READ_OP_MASK;
+ config[2] = (len & ST_LS6DSX_READ_OP_MASK) | slv_config;
err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
sizeof(config));
@@ -234,12 +248,14 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_sensor *sensor, u8 addr,
st_lsm6dsx_shub_wait_complete(hw);
- err = st_lsm6dsx_shub_read_reg(hw, hub_settings->shub_out, data,
- len & ST_LS6DSX_READ_OP_MASK);
+ err = st_lsm6dsx_shub_read_output(hw, data,
+ len & ST_LS6DSX_READ_OP_MASK);
st_lsm6dsx_shub_master_enable(sensor, false);
- memset(config, 0, sizeof(config));
+ config[0] = hub_settings->pause;
+ config[1] = 0;
+ config[2] = slv_config;
return st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
sizeof(config));
}
@@ -296,7 +312,8 @@ st_lsm6dsx_shub_write(struct st_lsm6dsx_sensor *sensor, u8 addr,
st_lsm6dsx_shub_master_enable(sensor, false);
}
- memset(config, 0, sizeof(config));
+ config[0] = hub_settings->pause;
+ config[1] = 0;
return st_lsm6dsx_shub_write_reg(hw, slv_addr, config, sizeof(config));
}
@@ -688,14 +705,19 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
const struct st_lsm6dsx_ext_dev_settings *settings)
{
const struct st_lsm6dsx_shub_settings *hub_settings;
+ u8 config[3], data, slv_addr, slv_config = 0;
+ const struct st_lsm6dsx_reg *aux_sens;
struct st_lsm6dsx_sensor *sensor;
- u8 config[3], data, slv_addr;
bool found = false;
int i, err;
+ sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
hub_settings = &hw->settings->shub_settings;
+ aux_sens = &hw->settings->shub_settings.aux_sens;
slv_addr = ST_LSM6DSX_SLV_ADDR(0, hub_settings->slv0_addr);
- sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+ /* do not overwrite aux_sens */
+ if (slv_addr + 2 == aux_sens->addr)
+ slv_config = ST_LSM6DSX_SHIFT_VAL(3, aux_sens->mask);
for (i = 0; i < ARRAY_SIZE(settings->i2c_addr); i++) {
if (!settings->i2c_addr[i])
@@ -704,7 +726,7 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
/* read wai slave register */
config[0] = (settings->i2c_addr[i] << 1) | 0x1;
config[1] = settings->wai.addr;
- config[2] = 0x1;
+ config[2] = 0x1 | slv_config;
err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
sizeof(config));
@@ -717,9 +739,7 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
st_lsm6dsx_shub_wait_complete(hw);
- err = st_lsm6dsx_shub_read_reg(hw,
- hub_settings->shub_out,
- &data, sizeof(data));
+ err = st_lsm6dsx_shub_read_output(hw, &data, sizeof(data));
st_lsm6dsx_shub_master_enable(sensor, false);
@@ -735,7 +755,9 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
}
/* reset SLV0 channel */
- memset(config, 0, sizeof(config));
+ config[0] = hub_settings->pause;
+ config[1] = 0;
+ config[2] = slv_config;
err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
sizeof(config));
if (err < 0)
@@ -770,7 +792,7 @@ int st_lsm6dsx_shub_probe(struct st_lsm6dsx_hw *hw, const char *name)
if (err < 0)
return err;
- if (++num_ext_dev >= ST_LSM6DSX_MAX_SLV_NUM)
+ if (++num_ext_dev >= hw->settings->shub_settings.num_ext_dev)
break;
id++;
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 67ff36eac247..eb1086e4a951 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "st_lsm6dsx.h"
@@ -122,7 +121,7 @@ static struct spi_driver st_lsm6dsx_driver = {
.driver = {
.name = "st_lsm6dsx_spi",
.pm = &st_lsm6dsx_pm_ops,
- .of_match_table = of_match_ptr(st_lsm6dsx_spi_of_match),
+ .of_match_table = st_lsm6dsx_spi_of_match,
},
.probe = st_lsm6dsx_spi_probe,
.id_table = st_lsm6dsx_spi_id_table,
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index c193d64e5217..4ada5592aa2b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -87,7 +87,7 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
}
/**
- * iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ * iio_buffer_read_outer() - chrdev read for buffer access
* @filp: File structure pointer for the char device
* @buf: Destination buffer for iio buffer read
* @n: First n bytes to read
@@ -99,8 +99,8 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
* Return: negative values corresponding to error codes or ret != 0
* for ending the reading activity
**/
-ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
- size_t n, loff_t *f_ps)
+ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps)
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
@@ -112,7 +112,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
if (!indio_dev->info)
return -ENODEV;
- if (!rb || !rb->access->read_first_n)
+ if (!rb || !rb->access->read)
return -EINVAL;
datum_size = rb->bytes_per_datum;
@@ -147,7 +147,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
continue;
}
- ret = rb->access->read_first_n(rb, n, buf);
+ ret = rb->access->read(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
} while (ret == 0);
@@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
unsigned bytes = 0;
- int length, i;
+ int length, i, largest = 0;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
@@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
length = iio_storage_bytes_for_si(indio_dev, i);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
if (timestamp) {
length = iio_storage_bytes_for_timestamp(indio_dev);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
+
+ bytes = ALIGN(bytes, largest);
return bytes;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index a46cdf2d8833..65ff0d067018 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -161,6 +161,7 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
[IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
+ [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
};
/**
@@ -596,6 +597,8 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
}
return l;
}
+ case IIO_VAL_CHAR:
+ return snprintf(buf, len, "%c", (char)vals[0]);
default:
return 0;
}
@@ -837,7 +840,8 @@ static ssize_t iio_write_channel_info(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, fract_mult = 100000;
- int integer, fract;
+ int integer, fract = 0;
+ bool is_char = false;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
@@ -855,13 +859,24 @@ static ssize_t iio_write_channel_info(struct device *dev,
case IIO_VAL_INT_PLUS_NANO:
fract_mult = 100000000;
break;
+ case IIO_VAL_CHAR:
+ is_char = true;
+ break;
default:
return -EINVAL;
}
- ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
- if (ret)
- return ret;
+ if (is_char) {
+ char ch;
+
+ if (sscanf(buf, "%c", &ch) != 1)
+ return -EINVAL;
+ integer = ch;
+ } else {
+ ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
+ if (ret)
+ return ret;
+ }
ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
integer, fract, this_attr->address);
@@ -1617,7 +1632,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
static const struct file_operations iio_buffer_fileops = {
- .read = iio_buffer_read_first_n_outer_addr,
+ .read = iio_buffer_read_outer_addr,
.release = iio_chrdev_release,
.open = iio_chrdev_open,
.poll = iio_buffer_poll_addr,
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index c5dfb9a6b5a1..52f86bc777dd 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -15,7 +15,6 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -24,7 +23,6 @@
#include <linux/iio/events.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/sysfs.h>
-#include <linux/of_gpio.h>
#define APDS9960_REGMAP_NAME "apds9960_regmap"
#define APDS9960_DRV_NAME "apds9960"
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index 6733b52c22df..bc196c212881 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -742,7 +742,7 @@ static int lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
if (val < LM3533_ALS_RESISTOR_MIN || val > LM3533_ALS_RESISTOR_MAX) {
dev_err(&als->pdev->dev, "invalid resistor value\n");
return -EINVAL;
- };
+ }
ret = lm3533_write(als->lm3533, LM3533_REG_ALS_RESISTOR_SELECT, val);
if (ret) {
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 982bba0c54e7..0476c2bc8138 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index dacbac6a6662..4889bbeb0c73 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 16dacea9eadf..b0e241aaefb4 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -163,7 +163,6 @@ static int vcnl4200_init(struct vcnl4000_data *data)
if (ret < 0)
return ret;
- data->al_scale = 24000;
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
switch (id) {
@@ -172,11 +171,13 @@ static int vcnl4200_init(struct vcnl4000_data *data)
/* show 54ms in total. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+ data->al_scale = 24000;
break;
case VCNL4040_PROD_ID:
/* Integration time is 80ms, add 10ms. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+ data->al_scale = 120000;
break;
}
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 893bec5a0312..3c881541ae72 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -16,8 +16,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/bitops.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/acpi.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
@@ -29,8 +28,6 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/magnetometer/ak8975.h>
-
/*
* Register definitions, as well as various shifts and masks to get at the
* individual fields of the registers.
@@ -206,11 +203,11 @@ static long ak09912_raw_to_gauss(u16 data)
/* Compatible Asahi Kasei Compass parts */
enum asahi_compass_chipset {
+ AKXXXX = 0,
AK8975,
AK8963,
AK09911,
AK09912,
- AK_MAX_TYPE
};
enum ak_ctrl_reg_addr {
@@ -248,7 +245,7 @@ struct ak_def {
u8 data_regs[3];
};
-static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
+static const struct ak_def ak_def_array[] = {
{
.type = AK8975,
.raw_to_gauss = ak8975_raw_to_gauss,
@@ -360,7 +357,7 @@ struct ak8975_data {
struct mutex lock;
u8 asa[3];
long raw_to_gauss[3];
- int eoc_gpio;
+ struct gpio_desc *eoc_gpiod;
int eoc_irq;
wait_queue_head_t data_ready_queue;
unsigned long flags;
@@ -498,15 +495,13 @@ static int ak8975_setup_irq(struct ak8975_data *data)
if (client->irq)
irq = client->irq;
else
- irq = gpio_to_irq(data->eoc_gpio);
+ irq = gpiod_to_irq(data->eoc_gpiod);
rc = devm_request_irq(&client->dev, irq, ak8975_irq_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev_name(&client->dev), data);
if (rc < 0) {
- dev_err(&client->dev,
- "irq %d request failed, (gpio %d): %d\n",
- irq, data->eoc_gpio, rc);
+ dev_err(&client->dev, "irq %d request failed: %d\n", irq, rc);
return rc;
}
@@ -549,7 +544,7 @@ static int ak8975_setup(struct i2c_client *client)
return ret;
}
- if (data->eoc_gpio > 0 || client->irq > 0) {
+ if (data->eoc_gpiod || client->irq > 0) {
ret = ak8975_setup_irq(data);
if (ret < 0) {
dev_err(&client->dev,
@@ -574,7 +569,7 @@ static int wait_conversion_complete_gpio(struct ak8975_data *data)
/* Wait for the conversion to complete. */
while (timeout_ms) {
msleep(AK8975_CONVERSION_DONE_POLL_TIME);
- if (gpio_get_value(data->eoc_gpio))
+ if (gpiod_get_value(data->eoc_gpiod))
break;
timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME;
}
@@ -646,7 +641,7 @@ static int ak8975_start_read_axis(struct ak8975_data *data,
/* Wait for the conversion to complete. */
if (data->eoc_irq)
ret = wait_conversion_complete_interrupt(data);
- else if (gpio_is_valid(data->eoc_gpio))
+ else if (data->eoc_gpiod)
ret = wait_conversion_complete_gpio(data);
else
ret = wait_conversion_complete_polled(data);
@@ -786,19 +781,6 @@ static const struct acpi_device_id ak_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
#endif
-static const char *ak8975_match_acpi_device(struct device *dev,
- enum asahi_compass_chipset *chipset)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return NULL;
- *chipset = (int)id->driver_data;
-
- return dev_name(dev);
-}
-
static void ak8975_fill_buffer(struct iio_dev *indio_dev)
{
struct ak8975_data *data = iio_priv(indio_dev);
@@ -856,36 +838,23 @@ static int ak8975_probe(struct i2c_client *client,
{
struct ak8975_data *data;
struct iio_dev *indio_dev;
- int eoc_gpio;
+ struct gpio_desc *eoc_gpiod;
+ const void *match;
+ unsigned int i;
int err;
+ enum asahi_compass_chipset chipset;
const char *name = NULL;
- enum asahi_compass_chipset chipset = AK_MAX_TYPE;
- const struct ak8975_platform_data *pdata =
- dev_get_platdata(&client->dev);
-
- /* Grab and set up the supplied GPIO. */
- if (pdata)
- eoc_gpio = pdata->eoc_gpio;
- else if (client->dev.of_node)
- eoc_gpio = of_get_gpio(client->dev.of_node, 0);
- else
- eoc_gpio = -1;
- if (eoc_gpio == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- /* We may not have a GPIO based IRQ to scan, that is fine, we will
- poll if so */
- if (gpio_is_valid(eoc_gpio)) {
- err = devm_gpio_request_one(&client->dev, eoc_gpio,
- GPIOF_IN, "ak_8975");
- if (err < 0) {
- dev_err(&client->dev,
- "failed to request GPIO %d, error %d\n",
- eoc_gpio, err);
- return err;
- }
- }
+ /*
+ * Grab and set up the supplied GPIO.
+ * We may not have a GPIO based IRQ to scan, that is fine, we will
+ * poll if so.
+ */
+ eoc_gpiod = devm_gpiod_get_optional(&client->dev, NULL, GPIOD_IN);
+ if (IS_ERR(eoc_gpiod))
+ return PTR_ERR(eoc_gpiod);
+ if (eoc_gpiod)
+ gpiod_set_consumer_name(eoc_gpiod, "ak_8975");
/* Register with IIO */
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
@@ -896,35 +865,35 @@ static int ak8975_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->eoc_gpio = eoc_gpio;
+ data->eoc_gpiod = eoc_gpiod;
data->eoc_irq = 0;
- if (!pdata) {
- err = iio_read_mount_matrix(&client->dev, "mount-matrix",
- &data->orientation);
- if (err)
- return err;
- } else
- data->orientation = pdata->orientation;
+ err = iio_read_mount_matrix(&client->dev, "mount-matrix", &data->orientation);
+ if (err)
+ return err;
/* id will be NULL when enumerated via ACPI */
- if (id) {
+ match = device_get_match_data(&client->dev);
+ if (match) {
+ chipset = (enum asahi_compass_chipset)(match);
+ name = dev_name(&client->dev);
+ } else if (id) {
chipset = (enum asahi_compass_chipset)(id->driver_data);
name = id->name;
- } else if (ACPI_HANDLE(&client->dev)) {
- name = ak8975_match_acpi_device(&client->dev, &chipset);
- if (!name)
- return -ENODEV;
} else
return -ENOSYS;
- if (chipset >= AK_MAX_TYPE) {
+ for (i = 0; i < ARRAY_SIZE(ak_def_array); i++)
+ if (ak_def_array[i].type == chipset)
+ break;
+
+ if (i == ARRAY_SIZE(ak_def_array)) {
dev_err(&client->dev, "AKM device type unsupported: %d\n",
chipset);
return -ENODEV;
}
- data->def = &ak_def_array[chipset];
+ data->def = &ak_def_array[i];
/* Fetch the regulators */
data->vdd = devm_regulator_get(&client->dev, "vdd");
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index fdba480a12be..c6bb4ce77594 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_magn.h"
-#ifdef CONFIG_OF
static const struct of_device_id st_magn_of_match[] = {
{
.compatible = "st,lsm303dlh-magn",
@@ -50,9 +49,6 @@ static const struct of_device_id st_magn_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
-#else
-#define st_magn_of_match NULL
-#endif
static int st_magn_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -62,8 +58,7 @@ static int st_magn_i2c_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&client->dev, st_magn_of_match,
- client->name, sizeof(client->name));
+ st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
settings = st_magn_get_settings(client->name);
if (!settings) {
@@ -113,7 +108,7 @@ MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
static struct i2c_driver st_magn_driver = {
.driver = {
.name = "st-magn-i2c",
- .of_match_table = of_match_ptr(st_magn_of_match),
+ .of_match_table = st_magn_of_match,
},
.probe = st_magn_i2c_probe,
.remove = st_magn_i2c_remove,
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index fbf909bde841..3d08d74c367d 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_magn.h"
-#ifdef CONFIG_OF
/*
* For new single-chip sensors use <device_name> as compatible string.
* For old single-chip devices keep <device_name>-magn to maintain
@@ -45,9 +44,6 @@ static const struct of_device_id st_magn_of_match[] = {
{}
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
-#else
-#define st_magn_of_match NULL
-#endif
static int st_magn_spi_probe(struct spi_device *spi)
{
@@ -56,8 +52,7 @@ static int st_magn_spi_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&spi->dev, st_magn_of_match,
- spi->modalias, sizeof(spi->modalias));
+ st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
settings = st_magn_get_settings(spi->modalias);
if (!settings) {
@@ -104,7 +99,7 @@ MODULE_DEVICE_TABLE(spi, st_magn_id_table);
static struct spi_driver st_magn_driver = {
.driver = {
.name = "st-magn-spi",
- .of_match_table = of_match_ptr(st_magn_of_match),
+ .of_match_table = st_magn_of_match,
},
.probe = st_magn_spi_probe,
.remove = st_magn_spi_remove,
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index ba420e484787..9c2d9bf8f100 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -53,6 +53,18 @@ config IIO_CROS_EC_BARO
To compile this driver as a module, choose M here: the module
will be called cros_ec_baro.
+config DLHL60D
+ tristate "All Sensors DLHL60D and DLHL60G low voltage digital pressure sensors"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the All Sensors DLH series
+ pressure sensors driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called dlhl60d.
+
config DPS310
tristate "Infineon DPS310 pressure and temperature sensor"
depends on I2C
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index d8f5ace1f25d..5a79192d8cb5 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_BMP280) += bmp280.o
bmp280-objs := bmp280-core.o bmp280-regmap.o
obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
obj-$(CONFIG_BMP280_SPI) += bmp280-spi.o
+obj-$(CONFIG_DLHL60D) += dlhl60d.o
obj-$(CONFIG_DPS310) += dps310.o
obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 3109c8e2cc11..8b03ea15c0d0 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/acpi.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "bmp280.h"
@@ -38,16 +36,6 @@ static int bmp280_i2c_probe(struct i2c_client *client,
client->irq);
}
-static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
- {"BMP0280", BMP280_CHIP_ID },
- {"BMP0180", BMP180_CHIP_ID },
- {"BMP0085", BMP180_CHIP_ID },
- {"BME0280", BME280_CHIP_ID },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, bmp280_acpi_i2c_match);
-
-#ifdef CONFIG_OF
static const struct of_device_id bmp280_of_i2c_match[] = {
{ .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
{ .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
@@ -56,9 +44,6 @@ static const struct of_device_id bmp280_of_i2c_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
-#else
-#define bmp280_of_i2c_match NULL
-#endif
static const struct i2c_device_id bmp280_i2c_id[] = {
{"bmp280", BMP280_CHIP_ID },
@@ -72,8 +57,7 @@ MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
static struct i2c_driver bmp280_i2c_driver = {
.driver = {
.name = "bmp280",
- .acpi_match_table = ACPI_PTR(bmp280_acpi_i2c_match),
- .of_match_table = of_match_ptr(bmp280_of_i2c_match),
+ .of_match_table = bmp280_of_i2c_match,
.pm = &bmp280_dev_pm_ops,
},
.probe = bmp280_i2c_probe,
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
new file mode 100644
index 000000000000..b8c99e7bd6cf
--- /dev/null
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * All Sensors DLH series low voltage digital pressure sensors
+ *
+ * Copyright (c) 2019 AVL DiTEST GmbH
+ * Tomislav Denis <tomislav.denis@avl.com>
+ *
+ * Datasheet: http://www.allsensors.com/cad/DS-0355_Rev_B.PDF
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <asm/unaligned.h>
+
+/* Commands */
+#define DLH_START_SINGLE 0xAA
+
+/* Status bits */
+#define DLH_STATUS_OK 0x40
+
+/* DLH data format */
+#define DLH_NUM_READ_BYTES 7
+#define DLH_NUM_DATA_BYTES 3
+#define DLH_NUM_PR_BITS 24
+#define DLH_NUM_TEMP_BITS 24
+
+/* DLH timings */
+#define DLH_SINGLE_DUT_MS 5
+
+enum dhl_ids {
+ dlhl60d,
+ dlhl60g,
+};
+
+struct dlh_info {
+ u8 osdig; /* digital offset factor */
+ unsigned int fss; /* full scale span (inch H2O) */
+};
+
+struct dlh_state {
+ struct i2c_client *client;
+ struct dlh_info info;
+ bool use_interrupt;
+ struct completion completion;
+ u8 rx_buf[DLH_NUM_READ_BYTES] ____cacheline_aligned;
+};
+
+static struct dlh_info dlh_info_tbl[] = {
+ [dlhl60d] = {
+ .osdig = 2,
+ .fss = 120,
+ },
+ [dlhl60g] = {
+ .osdig = 10,
+ .fss = 60,
+ },
+};
+
+
+static int dlh_cmd_start_single(struct dlh_state *st)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte(st->client, DLH_START_SINGLE);
+ if (ret)
+ dev_err(&st->client->dev,
+ "%s: I2C write byte failed\n", __func__);
+
+ return ret;
+}
+
+static int dlh_cmd_read_data(struct dlh_state *st)
+{
+ int ret;
+
+ ret = i2c_master_recv(st->client, st->rx_buf, DLH_NUM_READ_BYTES);
+ if (ret < 0) {
+ dev_err(&st->client->dev,
+ "%s: I2C read block failed\n", __func__);
+ return ret;
+ }
+
+ if (st->rx_buf[0] != DLH_STATUS_OK) {
+ dev_err(&st->client->dev,
+ "%s: invalid status 0x%02x\n", __func__, st->rx_buf[0]);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int dlh_start_capture_and_read(struct dlh_state *st)
+{
+ int ret;
+
+ if (st->use_interrupt)
+ reinit_completion(&st->completion);
+
+ ret = dlh_cmd_start_single(st);
+ if (ret)
+ return ret;
+
+ if (st->use_interrupt) {
+ ret = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(DLH_SINGLE_DUT_MS));
+ if (!ret) {
+ dev_err(&st->client->dev,
+ "%s: conversion timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+ } else {
+ mdelay(DLH_SINGLE_DUT_MS);
+ }
+
+ return dlh_cmd_read_data(st);
+}
+
+static int dlh_read_direct(struct dlh_state *st,
+ unsigned int *pressure, unsigned int *temperature)
+{
+ int ret;
+
+ ret = dlh_start_capture_and_read(st);
+ if (ret)
+ return ret;
+
+ *pressure = get_unaligned_be32(&st->rx_buf[1]) >> 8;
+ *temperature = get_unaligned_be32(&st->rx_buf[3]) &
+ GENMASK(DLH_NUM_TEMP_BITS - 1, 0);
+
+ return 0;
+}
+
+static int dlh_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *value,
+ int *value2, long mask)
+{
+ struct dlh_state *st = iio_priv(indio_dev);
+ unsigned int pressure, temperature;
+ int ret;
+ s64 tmp;
+ s32 rem;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = dlh_read_direct(st, &pressure, &temperature);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ switch (channel->type) {
+ case IIO_PRESSURE:
+ *value = pressure;
+ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ *value = temperature;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (channel->type) {
+ case IIO_PRESSURE:
+ tmp = div_s64(125LL * st->info.fss * 24909 * 100,
+ 1 << DLH_NUM_PR_BITS);
+ tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+ *value = tmp;
+ *value2 = rem;
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case IIO_TEMP:
+ *value = 125 * 1000;
+ *value2 = DLH_NUM_TEMP_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (channel->type) {
+ case IIO_PRESSURE:
+ *value = -125 * st->info.fss * 24909;
+ *value2 = 100 * st->info.osdig * 100000;
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_TEMP:
+ *value = -40 * 1000;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info dlh_info = {
+ .read_raw = dlh_read_raw,
+};
+
+static const struct iio_chan_spec dlh_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = DLH_NUM_PR_BITS,
+ .storagebits = 32,
+ .shift = 8,
+ .endianness = IIO_BE,
+ },
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = DLH_NUM_TEMP_BITS,
+ .storagebits = 32,
+ .shift = 8,
+ .endianness = IIO_BE,
+ },
+ }
+};
+
+static irqreturn_t dlh_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct dlh_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int chn, i = 0;
+ __be32 tmp_buf[2];
+
+ ret = dlh_start_capture_and_read(st);
+ if (ret)
+ goto out;
+
+ for_each_set_bit(chn, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ memcpy(tmp_buf + i,
+ &st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
+ DLH_NUM_DATA_BYTES);
+ i++;
+ }
+
+ iio_push_to_buffers(indio_dev, tmp_buf);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dlh_interrupt(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct dlh_state *st = iio_priv(indio_dev);
+
+ complete(&st->completion);
+
+ return IRQ_HANDLED;
+};
+
+static int dlh_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct dlh_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE)) {
+ dev_err(&client->dev,
+ "adapter doesn't support required i2c functionality\n");
+ return -EOPNOTSUPP;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev) {
+ dev_err(&client->dev, "failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+
+ st = iio_priv(indio_dev);
+ st->info = dlh_info_tbl[id->driver_data];
+ st->client = client;
+ st->use_interrupt = false;
+
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
+ indio_dev->info = &dlh_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dlh_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dlh_channels);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ dlh_interrupt, NULL,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ id->name, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to allocate threaded irq");
+ return ret;
+ }
+
+ st->use_interrupt = true;
+ init_completion(&st->completion);
+ }
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ NULL, &dlh_trigger_handler, NULL);
+ if (ret) {
+ dev_err(&client->dev, "failed to setup iio buffer\n");
+ return ret;
+ }
+
+ ret = devm_iio_device_register(&client->dev, indio_dev);
+ if (ret)
+ dev_err(&client->dev, "failed to register iio device\n");
+
+ return ret;
+}
+
+static const struct of_device_id dlh_of_match[] = {
+ { .compatible = "asc,dlhl60d" },
+ { .compatible = "asc,dlhl60g" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dlh_of_match);
+
+static const struct i2c_device_id dlh_id[] = {
+ { "dlhl60d", dlhl60d },
+ { "dlhl60g", dlhl60g },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dlh_id);
+
+static struct i2c_driver dlh_driver = {
+ .driver = {
+ .name = "dlhl60d",
+ .of_match_table = dlh_of_match,
+ },
+ .probe = dlh_probe,
+ .id_table = dlh_id,
+};
+module_i2c_driver(dlh_driver);
+
+MODULE_AUTHOR("Tomislav Denis <tomislav.denis@avl.com>");
+MODULE_DESCRIPTION("Driver for All Sensors DLH series pressure sensors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index c2e47a6c3118..5c746ff6087e 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -37,7 +37,7 @@ enum st_press_type {
* struct st_sensors_platform_data - default press platform data
* @drdy_int_pin: default press DRDY is available on INT1 pin.
*/
-static const struct st_sensors_platform_data default_press_pdata = {
+static __maybe_unused const struct st_sensors_platform_data default_press_pdata = {
.drdy_int_pin = 1,
};
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 71d2ed6b4948..09c6903f99b8 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -18,7 +17,6 @@
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_pressure.h"
-#ifdef CONFIG_OF
static const struct of_device_id st_press_of_match[] = {
{
.compatible = "st,lps001wp-press",
@@ -51,9 +49,6 @@ static const struct of_device_id st_press_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
-#else
-#define st_press_of_match NULL
-#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_press_acpi_match[] = {
@@ -61,8 +56,6 @@ static const struct acpi_device_id st_press_acpi_match[] = {
{ },
};
MODULE_DEVICE_TABLE(acpi, st_press_acpi_match);
-#else
-#define st_press_acpi_match NULL
#endif
static const struct i2c_device_id st_press_id_table[] = {
@@ -85,18 +78,7 @@ static int st_press_i2c_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int ret;
- if (client->dev.of_node) {
- st_sensors_of_name_probe(&client->dev, st_press_of_match,
- client->name, sizeof(client->name));
- } else if (ACPI_HANDLE(&client->dev)) {
- ret = st_sensors_match_acpi_device(&client->dev);
- if ((ret < 0) || (ret >= ST_PRESS_MAX))
- return -ENODEV;
-
- strlcpy(client->name, st_press_id_table[ret].name,
- sizeof(client->name));
- } else if (!id)
- return -ENODEV;
+ st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
settings = st_press_get_settings(client->name);
if (!settings) {
@@ -133,7 +115,7 @@ static int st_press_i2c_remove(struct i2c_client *client)
static struct i2c_driver st_press_driver = {
.driver = {
.name = "st-press-i2c",
- .of_match_table = of_match_ptr(st_press_of_match),
+ .of_match_table = st_press_of_match,
.acpi_match_table = ACPI_PTR(st_press_acpi_match),
},
.probe = st_press_i2c_probe,
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 7c8b70221e70..b5ee3ec2764f 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_pressure.h"
-#ifdef CONFIG_OF
/*
* For new single-chip sensors use <device_name> as compatible string.
* For old single-chip devices keep <device_name>-press to maintain
@@ -55,9 +54,6 @@ static const struct of_device_id st_press_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
-#else
-#define st_press_of_match NULL
-#endif
static int st_press_spi_probe(struct spi_device *spi)
{
@@ -66,8 +62,7 @@ static int st_press_spi_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&spi->dev, st_press_of_match,
- spi->modalias, sizeof(spi->modalias));
+ st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
settings = st_press_get_settings(spi->modalias);
if (!settings) {
@@ -116,7 +111,7 @@ MODULE_DEVICE_TABLE(spi, st_press_id_table);
static struct spi_driver st_press_driver = {
.driver = {
.name = "st-press-spi",
- .of_match_table = of_match_ptr(st_press_of_match),
+ .of_match_table = st_press_of_match,
},
.probe = st_press_spi_probe,
.remove = st_press_spi_remove,
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index d53601447da4..37606d400805 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -58,6 +58,21 @@ config MB1232
To compile this driver as a module, choose M here: the
module will be called mb1232.
+config PING
+ tristate "Parallax GPIO bitbanged ranger sensors"
+ depends on GPIOLIB
+ help
+ Say Y here to build a driver for GPIO bitbanged ranger sensors
+ with just one GPIO for the trigger and echo. This driver can be
+ used to measure the distance of objects.
+
+ Actually supported are:
+ - Parallax PING))) (ultrasonic)
+ - Parallax LaserPING (time-of-flight)
+
+ To compile this driver as a module, choose M here: the
+ module will be called ping.
+
config RFD77402
tristate "RFD77402 ToF sensor"
depends on I2C
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index 0bb5f9de13d6..c591b019304e 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_ISL29501) += isl29501.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
obj-$(CONFIG_MB1232) += mb1232.o
+obj-$(CONFIG_PING) += ping.o
obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index b591c63bd6c4..bac9a433dd19 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -22,8 +21,6 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
-#include <linux/of_gpio.h>
-
#define AS3935_AFE_GAIN 0x00
#define AS3935_AFE_MASK 0x3F
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
new file mode 100644
index 000000000000..34aff108dff5
--- /dev/null
+++ b/drivers/iio/proximity/ping.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PING: ultrasonic sensor for distance measuring by using only one GPIOs
+ *
+ * Copyright (c) 2019 Andreas Klinger <ak@it-klinger.de>
+ *
+ * For details about the devices see:
+ * http://parallax.com/sites/default/files/downloads/28041-LaserPING-2m-Rangefinder-Guide.pdf
+ * http://parallax.com/sites/default/files/downloads/28015-PING-Documentation-v1.6.pdf
+ *
+ * the measurement cycle as timing diagram looks like:
+ *
+ * GPIO ___ ________________________
+ * ping: __/ \____________/ \________________
+ * ^ ^ ^ ^
+ * |<->| interrupt interrupt
+ * udelay(5) (ts_rising) (ts_falling)
+ * |<---------------------->|
+ * . pulse time measured .
+ * . --> one round trip of ultra sonic waves
+ * ultra . .
+ * sonic _ _ _. .
+ * burst: _________/ \_/ \_/ \_________________________________________
+ * .
+ * ultra .
+ * sonic _ _ _.
+ * echo: __________________________________/ \_/ \_/ \________________
+ */
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+struct ping_cfg {
+ unsigned long trigger_pulse_us; /* length of trigger pulse */
+ int laserping_error; /* support error code in */
+ /* pulse width of laser */
+ /* ping sensors */
+ s64 timeout_ns; /* timeout in ns */
+};
+
+struct ping_data {
+ struct device *dev;
+ struct gpio_desc *gpiod_ping;
+ struct mutex lock;
+ int irqnr;
+ ktime_t ts_rising;
+ ktime_t ts_falling;
+ struct completion rising;
+ struct completion falling;
+ const struct ping_cfg *cfg;
+};
+
+static const struct ping_cfg pa_ping_cfg = {
+ .trigger_pulse_us = 5,
+ .laserping_error = 0,
+ .timeout_ns = 18500000, /* 3 meters */
+};
+
+static const struct ping_cfg pa_laser_ping_cfg = {
+ .trigger_pulse_us = 5,
+ .laserping_error = 1,
+ .timeout_ns = 15500000, /* 2 meters plus error codes */
+};
+
+static irqreturn_t ping_handle_irq(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct ping_data *data = iio_priv(indio_dev);
+ ktime_t now = ktime_get();
+
+ if (gpiod_get_value(data->gpiod_ping)) {
+ data->ts_rising = now;
+ complete(&data->rising);
+ } else {
+ data->ts_falling = now;
+ complete(&data->falling);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ping_read(struct ping_data *data)
+{
+ int ret;
+ ktime_t ktime_dt;
+ s64 dt_ns;
+ u32 time_ns, distance_mm;
+ struct platform_device *pdev = to_platform_device(data->dev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(data);
+
+ /*
+ * just one read-echo-cycle can take place at a time
+ * ==> lock against concurrent reading calls
+ */
+ mutex_lock(&data->lock);
+
+ reinit_completion(&data->rising);
+ reinit_completion(&data->falling);
+
+ gpiod_set_value(data->gpiod_ping, 1);
+ udelay(data->cfg->trigger_pulse_us);
+ gpiod_set_value(data->gpiod_ping, 0);
+
+ ret = gpiod_direction_input(data->gpiod_ping);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ data->irqnr = gpiod_to_irq(data->gpiod_ping);
+ if (data->irqnr < 0) {
+ dev_err(data->dev, "gpiod_to_irq: %d\n", data->irqnr);
+ mutex_unlock(&data->lock);
+ return data->irqnr;
+ }
+
+ ret = request_irq(data->irqnr, ping_handle_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ pdev->name, indio_dev);
+ if (ret < 0) {
+ dev_err(data->dev, "request_irq: %d\n", ret);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ /* it should not take more than 20 ms until echo is rising */
+ ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
+ if (ret < 0)
+ goto err_reset_direction;
+ else if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto err_reset_direction;
+ }
+
+ /* it cannot take more than 50 ms until echo is falling */
+ ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
+ if (ret < 0)
+ goto err_reset_direction;
+ else if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto err_reset_direction;
+ }
+
+ ktime_dt = ktime_sub(data->ts_falling, data->ts_rising);
+
+ free_irq(data->irqnr, indio_dev);
+
+ ret = gpiod_direction_output(data->gpiod_ping, GPIOD_OUT_LOW);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ mutex_unlock(&data->lock);
+
+ dt_ns = ktime_to_ns(ktime_dt);
+ if (dt_ns > data->cfg->timeout_ns) {
+ dev_dbg(data->dev, "distance out of range: dt=%lldns\n",
+ dt_ns);
+ return -EIO;
+ }
+
+ time_ns = dt_ns;
+
+ /*
+ * read error code of laser ping sensor and give users chance to
+ * figure out error by using dynamic debuggging
+ */
+ if (data->cfg->laserping_error) {
+ if ((time_ns > 12500000) && (time_ns <= 13500000)) {
+ dev_dbg(data->dev, "target too close or to far\n");
+ return -EIO;
+ }
+ if ((time_ns > 13500000) && (time_ns <= 14500000)) {
+ dev_dbg(data->dev, "internal sensor error\n");
+ return -EIO;
+ }
+ if ((time_ns > 14500000) && (time_ns <= 15500000)) {
+ dev_dbg(data->dev, "internal sensor timeout\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * the speed as function of the temperature is approximately:
+ *
+ * speed = 331,5 + 0,6 * Temp
+ * with Temp in °C
+ * and speed in m/s
+ *
+ * use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the
+ * temperature
+ *
+ * therefore:
+ * time 343,5 time * 232
+ * distance = ------ * ------- = ------------
+ * 10^6 2 1350800
+ * with time in ns
+ * and distance in mm (one way)
+ *
+ * because we limit to 3 meters the multiplication with 232 just
+ * fits into 32 bit
+ */
+ distance_mm = time_ns * 232 / 1350800;
+
+ return distance_mm;
+
+err_reset_direction:
+ free_irq(data->irqnr, indio_dev);
+ mutex_unlock(&data->lock);
+
+ if (gpiod_direction_output(data->gpiod_ping, GPIOD_OUT_LOW))
+ dev_dbg(data->dev, "error in gpiod_direction_output\n");
+ return ret;
+}
+
+static int ping_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long info)
+{
+ struct ping_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (channel->type != IIO_DISTANCE)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ping_read(data);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * maximum resolution in datasheet is 1 mm
+ * 1 LSB is 1 mm
+ */
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ping_iio_info = {
+ .read_raw = ping_read_raw,
+};
+
+static const struct iio_chan_spec ping_chan_spec[] = {
+ {
+ .type = IIO_DISTANCE,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static const struct of_device_id of_ping_match[] = {
+ { .compatible = "parallax,ping", .data = &pa_ping_cfg},
+ { .compatible = "parallax,laserping", .data = &pa_ping_cfg},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_ping_match);
+
+static int ping_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ping_data *data;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(struct ping_data));
+ if (!indio_dev) {
+ dev_err(dev, "failed to allocate IIO device\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->cfg = of_device_get_match_data(dev);
+
+ mutex_init(&data->lock);
+ init_completion(&data->rising);
+ init_completion(&data->falling);
+
+ data->gpiod_ping = devm_gpiod_get(dev, "ping", GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpiod_ping)) {
+ dev_err(dev, "failed to get ping-gpios: err=%ld\n",
+ PTR_ERR(data->gpiod_ping));
+ return PTR_ERR(data->gpiod_ping);
+ }
+
+ if (gpiod_cansleep(data->gpiod_ping)) {
+ dev_err(data->dev, "cansleep-GPIOs not supported\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->name = "ping";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &ping_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ping_chan_spec;
+ indio_dev->num_channels = ARRAY_SIZE(ping_chan_spec);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct platform_driver ping_driver = {
+ .probe = ping_probe,
+ .driver = {
+ .name = "ping-gpio",
+ .of_match_table = of_ping_match,
+ },
+};
+
+module_platform_driver(ping_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("PING sensors for distance measuring using one GPIOs");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ping");
diff --git a/drivers/iio/resolver/ad2s1200.c b/drivers/iio/resolver/ad2s1200.c
index 17b89623418c..a391f46ee06b 100644
--- a/drivers/iio/resolver/ad2s1200.c
+++ b/drivers/iio/resolver/ad2s1200.c
@@ -10,7 +10,6 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index 73ed550e3fc9..b4cb21ab2e85 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -6,12 +6,14 @@
* Copyright (C) 2018-2019 Rockwell Collins
*/
+#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/util_macros.h>
#include <dt-bindings/iio/temperature/thermocouple.h>
/*
* The MSB of the register value determines whether the following byte will
@@ -23,6 +25,9 @@
#define MAX31856_CR0_1SHOT BIT(6)
#define MAX31856_CR0_OCFAULT BIT(4)
#define MAX31856_CR0_OCFAULT_MASK GENMASK(5, 4)
+#define MAX31856_CR0_FILTER_50HZ BIT(0)
+#define MAX31856_AVERAGING_MASK GENMASK(6, 4)
+#define MAX31856_AVERAGING_SHIFT 4
#define MAX31856_TC_TYPE_MASK GENMASK(3, 0)
#define MAX31856_FAULT_OVUV BIT(1)
#define MAX31856_FAULT_OPEN BIT(0)
@@ -49,7 +54,10 @@ static const struct iio_chan_spec max31856_channels[] = {
{ /* Thermocouple Temperature */
.type = IIO_TEMP,
.info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)
},
{ /* Cold Junction Temperature */
.type = IIO_TEMP,
@@ -57,12 +65,20 @@ static const struct iio_chan_spec max31856_channels[] = {
.modified = 1,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)
},
};
struct max31856_data {
struct spi_device *spi;
u32 thermocouple_type;
+ bool filter_50hz;
+ int averaging;
+};
+
+static const char max31856_tc_types[] = {
+ 'B', 'E', 'J', 'K', 'N', 'R', 'S', 'T'
};
static int max31856_read(struct max31856_data *data, u8 reg,
@@ -107,6 +123,10 @@ static int max31856_init(struct max31856_data *data)
reg_cr1_val &= ~MAX31856_TC_TYPE_MASK;
reg_cr1_val |= data->thermocouple_type;
+
+ reg_cr1_val &= ~MAX31856_AVERAGING_MASK;
+ reg_cr1_val |= data->averaging << MAX31856_AVERAGING_SHIFT;
+
ret = max31856_write(data, MAX31856_CR1_REG, reg_cr1_val);
if (ret)
return ret;
@@ -123,6 +143,11 @@ static int max31856_init(struct max31856_data *data)
reg_cr0_val &= ~MAX31856_CR0_1SHOT;
reg_cr0_val |= MAX31856_CR0_AUTOCONVERT;
+ if (data->filter_50hz)
+ reg_cr0_val |= MAX31856_CR0_FILTER_50HZ;
+ else
+ reg_cr0_val &= ~MAX31856_CR0_FILTER_50HZ;
+
return max31856_write(data, MAX31856_CR0_REG, reg_cr0_val);
}
@@ -210,6 +235,12 @@ static int max31856_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_MICRO;
}
break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = 1 << data->averaging;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ *val = max31856_tc_types[data->thermocouple_type];
+ return IIO_VAL_CHAR;
default:
ret = -EINVAL;
break;
@@ -218,6 +249,62 @@ static int max31856_read_raw(struct iio_dev *indio_dev,
return ret;
}
+static int max31856_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ return IIO_VAL_CHAR;
+ default:
+ return IIO_VAL_INT;
+ }
+}
+
+static int max31856_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct max31856_data *data = iio_priv(indio_dev);
+ int msb;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if (val > 16 || val < 1)
+ return -EINVAL;
+ msb = fls(val) - 1;
+ /* Round up to next 2pow if needed */
+ if (BIT(msb) < val)
+ msb++;
+
+ data->averaging = msb;
+ max31856_init(data);
+ break;
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ {
+ int tc_type = -1;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max31856_tc_types); i++) {
+ if (max31856_tc_types[i] == toupper(val)) {
+ tc_type = i;
+ break;
+ }
+ }
+ if (tc_type < 0)
+ return -EINVAL;
+
+ data->thermocouple_type = tc_type;
+ max31856_init(data);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static ssize_t show_fault(struct device *dev, u8 faultbit, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -249,12 +336,54 @@ static ssize_t show_fault_oc(struct device *dev,
return show_fault(dev, MAX31856_FAULT_OPEN, buf);
}
+static ssize_t show_filter(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max31856_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", data->filter_50hz ? 50 : 60);
+}
+
+static ssize_t set_filter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max31856_data *data = iio_priv(indio_dev);
+ unsigned int freq;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &freq);
+ if (ret)
+ return ret;
+
+ switch (freq) {
+ case 50:
+ data->filter_50hz = true;
+ break;
+ case 60:
+ data->filter_50hz = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ max31856_init(data);
+ return len;
+}
+
static IIO_DEVICE_ATTR(fault_ovuv, 0444, show_fault_ovuv, NULL, 0);
static IIO_DEVICE_ATTR(fault_oc, 0444, show_fault_oc, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp_filter_notch_center_frequency, 0644,
+ show_filter, set_filter, 0);
static struct attribute *max31856_attributes[] = {
&iio_dev_attr_fault_ovuv.dev_attr.attr,
&iio_dev_attr_fault_oc.dev_attr.attr,
+ &iio_dev_attr_in_temp_filter_notch_center_frequency.dev_attr.attr,
NULL,
};
@@ -264,6 +393,8 @@ static const struct attribute_group max31856_group = {
static const struct iio_info max31856_info = {
.read_raw = max31856_read_raw,
+ .write_raw = max31856_write_raw,
+ .write_raw_get_fmt = max31856_write_raw_get_fmt,
.attrs = &max31856_group,
};
@@ -280,6 +411,7 @@ static int max31856_probe(struct spi_device *spi)
data = iio_priv(indio_dev);
data->spi = spi;
+ data->filter_50hz = false;
spi_set_drvdata(spi, indio_dev);
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index d1360605209c..8d21116c7a22 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
@@ -24,13 +25,25 @@
enum {
MAX6675,
MAX31855,
+ MAX31855K,
+ MAX31855J,
+ MAX31855N,
+ MAX31855S,
+ MAX31855T,
+ MAX31855E,
+ MAX31855R,
+};
+
+static const char maxim_tc_types[] = {
+ 'K', '?', 'K', 'J', 'N', 'S', 'T', 'E', 'R'
};
static const struct iio_chan_spec max6675_channels[] = {
{ /* thermocouple temperature */
.type = IIO_TEMP,
.info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -48,7 +61,8 @@ static const struct iio_chan_spec max31855_channels[] = {
.type = IIO_TEMP,
.address = 2,
.info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -110,6 +124,7 @@ struct maxim_thermocouple_data {
const struct maxim_thermocouple_chip *chip;
u8 buffer[16] ____cacheline_aligned;
+ char tc_type;
};
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
@@ -196,6 +211,10 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
}
break;
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ *val = data->tc_type;
+ ret = IIO_VAL_CHAR;
+ break;
}
return ret;
@@ -210,8 +229,9 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
struct iio_dev *indio_dev;
struct maxim_thermocouple_data *data;
+ const int chip_type = (id->driver_data == MAX6675) ? MAX6675 : MAX31855;
const struct maxim_thermocouple_chip *chip =
- &maxim_thermocouple_chips[id->driver_data];
+ &maxim_thermocouple_chips[chip_type];
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
@@ -229,6 +249,7 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
data = iio_priv(indio_dev);
data->spi = spi;
data->chip = chip;
+ data->tc_type = maxim_tc_types[id->driver_data];
ret = devm_iio_triggered_buffer_setup(&spi->dev,
indio_dev, NULL,
@@ -236,12 +257,22 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
if (ret)
return ret;
+ if (id->driver_data == MAX31855)
+ dev_warn(&spi->dev, "generic max31855 ID is deprecated\nplease use more specific part type");
+
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id maxim_thermocouple_id[] = {
{"max6675", MAX6675},
{"max31855", MAX31855},
+ {"max31855k", MAX31855K},
+ {"max31855j", MAX31855J},
+ {"max31855n", MAX31855N},
+ {"max31855s", MAX31855S},
+ {"max31855t", MAX31855T},
+ {"max31855e", MAX31855E},
+ {"max31855r", MAX31855R},
{},
};
MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
@@ -249,6 +280,13 @@ MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
static const struct of_device_id maxim_thermocouple_of_match[] = {
{ .compatible = "maxim,max6675" },
{ .compatible = "maxim,max31855" },
+ { .compatible = "maxim,max31855k" },
+ { .compatible = "maxim,max31855j" },
+ { .compatible = "maxim,max31855n" },
+ { .compatible = "maxim,max31855s" },
+ { .compatible = "maxim,max31855t" },
+ { .compatible = "maxim,max31855e" },
+ { .compatible = "maxim,max31855r" },
{ },
};
MODULE_DEVICE_TABLE(of, maxim_thermocouple_of_match);
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index a5dfe65cd9b9..2e0d32aa8436 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -297,9 +297,6 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
strlen(master_mode_table[i]))) {
regmap_update_bits(priv->regmap, TIM_CR2, mask,
i << shift);
- /* Make sure that registers are updated */
- regmap_update_bits(priv->regmap, TIM_EGR,
- TIM_EGR_UG, TIM_EGR_UG);
return len;
}
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index ac4738d2e0dd..06b6125b5ae1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -54,7 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
page = sg_page_iter_page(&sg_iter);
- put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
+ unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
}
sg_free_table(&umem->sg_head);
@@ -260,16 +260,13 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
sg = umem->sg_head.sgl;
while (npages) {
- down_read(&mm->mmap_sem);
- ret = get_user_pages(cur_base,
- min_t(unsigned long, npages,
- PAGE_SIZE / sizeof (struct page *)),
- gup_flags | FOLL_LONGTERM,
- page_list, NULL);
- if (ret < 0) {
- up_read(&mm->mmap_sem);
+ ret = pin_user_pages_fast(cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE /
+ sizeof(struct page *)),
+ gup_flags | FOLL_LONGTERM, page_list);
+ if (ret < 0)
goto umem_release;
- }
cur_base += ret * PAGE_SIZE;
npages -= ret;
@@ -277,8 +274,6 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
sg = ib_umem_add_sg_table(sg, page_list, ret,
dma_get_max_seg_size(device->dma_device),
&umem->sg_nents);
-
- up_read(&mm->mmap_sem);
}
sg_mark_end(sg);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index cb3b17a7b9b0..b8c657b28380 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -281,9 +281,8 @@ EXPORT_SYMBOL(ib_umem_odp_release);
* The function returns -EFAULT if the DMA mapping operation fails. It returns
* -EAGAIN if a concurrent invalidation prevents us from updating the page.
*
- * The page is released via put_user_page even if the operation failed. For
- * on-demand pinning, the page is released whenever it isn't stored in the
- * umem.
+ * The page is released via put_page even if the operation failed. For on-demand
+ * pinning, the page is released whenever it isn't stored in the umem.
*/
static int ib_umem_odp_map_dma_single_page(
struct ib_umem_odp *umem_odp,
@@ -336,7 +335,7 @@ static int ib_umem_odp_map_dma_single_page(
}
out:
- put_user_page(page);
+ put_page(page);
return ret;
}
@@ -446,7 +445,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
ret = -EFAULT;
break;
}
- put_user_page(local_page_list[j]);
+ put_page(local_page_list[j]);
continue;
}
@@ -473,8 +472,8 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
* ib_umem_odp_map_dma_single_page().
*/
if (npages - (j + 1) > 0)
- put_user_pages(&local_page_list[j+1],
- npages - (j + 1));
+ release_pages(&local_page_list[j+1],
+ npages - (j + 1));
break;
}
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 4d07d22bfa7b..020f70e6865e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -442,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
goto fail;
}
/* Unconditionally map 8 bytes to support 57500 series */
- nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
+ nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8);
if (!nq->bar_reg_iomem) {
rc = -ENOMEM;
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 5cdfa84faf85..1291b12287a5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -717,7 +717,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
if (!res_base)
return -ENOMEM;
- rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
+ rcfw->cmdq_bar_reg_iomem = ioremap(res_base +
RCFW_COMM_BASE_OFFSET,
RCFW_COMM_SIZE);
if (!rcfw->cmdq_bar_reg_iomem) {
@@ -739,7 +739,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
"CREQ BAR region %d resc start is 0!\n",
rcfw->creq_bar_reg);
/* Unconditionally map 8 bytes to support 57500 series */
- rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
+ rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off,
8);
if (!rcfw->creq_bar_reg_iomem) {
dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index bdbde8e22420..60ea1b924b67 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -704,7 +704,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
return -ENOMEM;
}
- dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
+ dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
dbr_len);
if (!dpit->dbr_bar_reg_iomem) {
dev_err(&res->pdev->dev,
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 61362bd6d3ce..1a6268d61977 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -161,7 +161,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
return -EINVAL;
}
- dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY);
+ dd->kregbase1 = ioremap(addr, RCV_ARRAY);
if (!dd->kregbase1) {
dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
return -ENOMEM;
@@ -179,7 +179,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
- dd->kregbase2 = ioremap_nocache(
+ dd->kregbase2 = ioremap(
addr + dd->base2_start,
TXE_PIO_SEND - dd->base2_start);
if (!dd->kregbase2) {
diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h
index 343fb9894a82..985ffa9cc958 100644
--- a/drivers/infiniband/hw/hfi1/trace_tid.h
+++ b/drivers/infiniband/hw/hfi1/trace_tid.h
@@ -138,10 +138,10 @@ TRACE_EVENT(/* put_tid */
TP_ARGS(dd, index, type, pa, order),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd)
- __field(unsigned long, pa);
- __field(u32, index);
- __field(u32, type);
- __field(u16, order);
+ __field(unsigned long, pa)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u16, order)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd);
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index 09eb0c9ada00..769e5e4710c6 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -588,7 +588,7 @@ TRACE_EVENT(hfi1_sdma_user_reqinfo,
TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
TP_ARGS(dd, ctxt, subctxt, i),
TP_STRUCT__entry(
- DD_DEV_ENTRY(dd);
+ DD_DEV_ENTRY(dd)
__field(u16, ctxt)
__field(u8, subctxt)
__field(u8, ver_opcode)
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 469acb961fbd..3b505006c0a6 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -106,7 +106,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
int ret;
unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
- ret = get_user_pages_fast(vaddr, npages, gup_flags, pages);
+ ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
if (ret < 0)
return ret;
@@ -118,7 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
size_t npages, bool dirty)
{
- put_user_pages_dirty_lock(p, npages, dirty);
+ unpin_user_pages_dirty_lock(p, npages, dirty);
if (mm) { /* during close after signal, mm can be NULL */
atomic64_sub(npages, &mm->pinned_vm);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 0ca958143280..e874d688d040 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/bitmap.h>
#if defined(CONFIG_X86)
-#include <asm/pat.h>
+#include <asm/memtype.h>
#endif
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -3311,12 +3311,14 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
int num_entries, int num_groups,
u32 flags)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
- ft = mlx5_create_auto_grouped_flow_table(ns, priority,
- num_entries,
- num_groups,
- 0, flags);
+ ft_attr.prio = priority;
+ ft_attr.max_fte = num_entries;
+ ft_attr.flags = flags;
+ ft_attr.autogroup.max_num_groups = num_groups;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return ERR_CAST(ft);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index edccfd6e178f..78a48aea3faf 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages_fast(uaddr & PAGE_MASK, 1,
+ ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
FOLL_WRITE | FOLL_LONGTERM, pages);
if (ret < 0)
goto out;
@@ -482,7 +482,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
if (ret < 0) {
- put_user_page(pages[0]);
+ unpin_user_page(pages[0]);
goto out;
}
@@ -490,7 +490,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
mthca_uarc_virt(dev, uar, i));
if (ret) {
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_user_page(sg_page(&db_tab->page[i].mem));
+ unpin_user_page(sg_page(&db_tab->page[i].mem));
goto out;
}
@@ -556,7 +556,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_user_page(sg_page(&db_tab->page[i].mem));
+ unpin_user_page(sg_page(&db_tab->page[i].mem));
}
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index dd4843379f51..91d64dd71a8a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6630,7 +6630,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
/* vl15 buffers start just after the 4k buffers */
vl15off = dd->physaddr + (dd->piobufbase >> 32) +
dd->piobcnt4k * dd->align4k;
- dd->piovl15base = ioremap_nocache(vl15off,
+ dd->piovl15base = ioremap(vl15off,
NUM_VL15_BUFS * dd->align4k);
if (!dd->piovl15base) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index d4fd8a6cff7b..43c8ee1f46e0 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1759,7 +1759,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
qib_userlen = dd->ureg_align * dd->cfgctxts;
/* Sanity checks passed, now create the new mappings */
- qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
+ qib_kregbase = ioremap(qib_physaddr, qib_kreglen);
if (!qib_kregbase)
goto bail;
@@ -1768,7 +1768,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
goto bail_kregbase;
if (qib_userlen) {
- qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
+ qib_userbase = ioremap(qib_physaddr + dd->uregbase,
qib_userlen);
if (!qib_userbase)
goto bail_piobase;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 864f2af171f7..3dc6ce033319 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -145,7 +145,7 @@ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
- dd->kregbase = ioremap_nocache(addr, len);
+ dd->kregbase = ioremap(addr, len);
if (!dd->kregbase)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 6bf764e41891..342e3172ca40 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -40,7 +40,7 @@
static void __qib_release_user_pages(struct page **p, size_t num_pages,
int dirty)
{
- put_user_pages_dirty_lock(p, num_pages, dirty);
+ unpin_user_pages_dirty_lock(p, num_pages, dirty);
}
/**
@@ -108,7 +108,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_read(&current->mm->mmap_sem);
for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages(start_page + got * PAGE_SIZE,
+ ret = pin_user_pages(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 05190edc2611..a67599b5a550 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -317,7 +317,7 @@ static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
* the caller can ignore this page.
*/
if (put) {
- put_user_page(page);
+ unpin_user_page(page);
} else {
/* coalesce case */
kunmap(page);
@@ -631,7 +631,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
kunmap(pkt->addr[i].page);
if (pkt->addr[i].put_page)
- put_user_page(pkt->addr[i].page);
+ unpin_user_page(pkt->addr[i].page);
else
__free_page(pkt->addr[i].page);
} else if (pkt->addr[i].kvaddr) {
@@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
else
j = npages;
- ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
+ ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
if (ret != j) {
i = 0;
j = ret;
@@ -706,7 +706,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
/* if error, return all pages not managed by pkt */
free_pages:
while (i < j)
- put_user_page(pages[i++]);
+ unpin_user_page(pages[i++]);
done:
return ret;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 62e6ffa9ad78..bd9f944b68fc 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -75,7 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
page = sg_page(sg);
pa = sg_phys(sg);
- put_user_pages_dirty_lock(&page, 1, dirty);
+ unpin_user_pages_dirty_lock(&page, 1, dirty);
usnic_dbg("pa: %pa\n", &pa);
}
kfree(chunk);
@@ -141,7 +141,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0;
while (npages) {
- ret = get_user_pages(cur_base,
+ ret = pin_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
gup_flags | FOLL_LONGTERM,
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index e99983f07663..e2061dc0b043 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -63,7 +63,7 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
bool dirty)
{
- put_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
+ unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
}
void siw_umem_release(struct siw_umem *umem, bool dirty)
@@ -426,7 +426,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
while (nents) {
struct page **plist = &umem->page_chunk[i].plist[got];
- rv = get_user_pages(first_page_va, nents,
+ rv = pin_user_pages(first_page_va, nents,
foll_flags | FOLL_LONGTERM,
plist, NULL);
if (rv < 0)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e5f438ab716c..4a0d3a9e72e1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1182,7 +1182,7 @@ unref:
return NETDEV_TX_OK;
}
-static void ipoib_timeout(struct net_device *dev)
+static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a1a035270cab..b273e421e910 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
}
}
-static void
-isert_wait4cmds(struct iscsi_conn *conn)
-{
- isert_info("iscsi_conn %p\n", conn);
-
- if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
- }
-}
-
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
- isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index f918fca9ada3..cb6e3a5f509c 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file)
struct evdev_client *client;
int error;
- client = kzalloc(struct_size(client, buffer, bufsize),
- GFP_KERNEL | __GFP_NOWARN);
- if (!client)
- client = vzalloc(struct_size(client, buffer, bufsize));
+ client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
if (!client)
return -ENOMEM;
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index f7414091d94e..2fe9dcfe0a6f 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -107,7 +107,7 @@ static int pxa930_rotary_probe(struct platform_device *pdev)
if (!r)
return -ENOMEM;
- r->mmio_base = ioremap_nocache(res->start, resource_size(res));
+ r->mmio_base = ioremap(res->start, resource_size(res));
if (r->mmio_base == NULL) {
dev_err(&pdev->dev, "failed to remap IO memory\n");
err = -ENXIO;
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 27ad73f43451..c155adebf96e 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -195,7 +195,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
memcpy(&priv->pdata, dev_get_platdata(&pdev->dev), sizeof(priv->pdata));
pdata = &priv->pdata;
- priv->iomem_base = ioremap_nocache(res->start, resource_size(res));
+ priv->iomem_base = ioremap(res->start, resource_size(res));
if (priv->iomem_base == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
error = -ENXIO;
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 83368f1e7c4e..4650f4a94989 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
int retval = 0;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
+ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
__func__, retval);
@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x44, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
__func__, retval);
@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x22, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
__func__, retval);
diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c
index 4d875f2ac13d..ee55f22dbca5 100644
--- a/drivers/input/misc/max77650-onkey.c
+++ b/drivers/input/misc/max77650-onkey.c
@@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev)
return input_register_device(onkey->input);
}
+static const struct of_device_id max77650_onkey_of_match[] = {
+ { .compatible = "maxim,max77650-onkey" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_onkey_of_match);
+
static struct platform_driver max77650_onkey_driver = {
.driver = {
.name = "max77650-onkey",
+ .of_match_table = max77650_onkey_of_match,
},
.probe = max77650_onkey_probe,
};
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index ecd762f93732..53ad25eaf1a2 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
if (regs->enable_mask)
rc = regmap_update_bits(vib->regmap, regs->enable_addr,
- on ? regs->enable_mask : 0, val);
+ regs->enable_mask, on ? ~0 : 0);
return rc;
}
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 41acde60b60f..3332b77eef2a 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -167,7 +167,7 @@ static int pxa930_trkball_probe(struct platform_device *pdev)
goto failed;
}
- trkball->mmio_base = ioremap_nocache(res->start, resource_size(res));
+ trkball->mmio_base = ioremap(res->start, resource_size(res));
if (!trkball->mmio_base) {
dev_err(&pdev->dev, "failed to ioremap registers\n");
error = -ENXIO;
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 0bc01cfc2b51..6b23e679606e 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -24,6 +24,12 @@
#define F54_NUM_TX_OFFSET 1
#define F54_NUM_RX_OFFSET 0
+/*
+ * The smbus protocol can read only 32 bytes max at a time.
+ * But this should be fine for i2c/spi as well.
+ */
+#define F54_REPORT_DATA_SIZE 32
+
/* F54 commands */
#define F54_GET_REPORT 1
#define F54_FORCE_CAL 2
@@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work)
int report_size;
u8 command;
int error;
+ int i;
report_size = rmi_f54_get_report_size(f54);
if (report_size == 0) {
@@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work)
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
- fifo[0] = 0;
- fifo[1] = 0;
- error = rmi_write_block(fn->rmi_dev,
- fn->fd.data_base_addr + F54_FIFO_OFFSET,
- fifo, sizeof(fifo));
- if (error) {
- dev_err(&fn->dev, "Failed to set fifo start offset\n");
- goto abort;
- }
+ for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) {
+ int size = min(F54_REPORT_DATA_SIZE, report_size - i);
+
+ fifo[0] = i & 0xff;
+ fifo[1] = i >> 8;
+ error = rmi_write_block(fn->rmi_dev,
+ fn->fd.data_base_addr + F54_FIFO_OFFSET,
+ fifo, sizeof(fifo));
+ if (error) {
+ dev_err(&fn->dev, "Failed to set fifo start offset\n");
+ goto abort;
+ }
- error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
- F54_REPORT_DATA_OFFSET, f54->report_data,
- report_size);
- if (error) {
- dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
- __func__, report_size, error);
- goto abort;
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+ F54_REPORT_DATA_OFFSET,
+ f54->report_data + i, size);
+ if (error) {
+ dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+ __func__, size, error);
+ goto abort;
+ }
}
abort:
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index b313c579914f..2407ea43de59 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to write next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
}
exit:
mutex_unlock(&rmi_smb->page_mutex);
@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to read next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
}
retval = 0;
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 96f9b5397367..2f9775de3c5b 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -349,7 +349,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
ps2port->port = serio;
ps2port->padev = dev;
- ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4);
+ ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 2ca586fb914f..e08b0ef078e8 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
aiptek->inputdev = inputdev;
aiptek->intf = intf;
- aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
+ aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
aiptek->inDelay = 0;
aiptek->endDelay = 0;
aiptek->previousJitterable = 0;
@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
/* Verify that a device really has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev,
"interface has %d endpoints, but must have minimum 1\n",
- intf->altsetting[0].desc.bNumEndpoints);
+ intf->cur_altsetting->desc.bNumEndpoints);
err = -EINVAL;
goto fail3;
}
- endpoint = &intf->altsetting[0].endpoint[0].desc;
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 35031228a6d0..96d65575f75a 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
}
/* Sanity check that a device has an endpoint */
- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&usbinterface->dev,
"Invalid number of endpoints\n");
error = -EINVAL;
goto err_free_urb;
}
- /*
- * The endpoint is always altsetting 0, we know this since we know
- * this device only has one interrupt endpoint
- */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
/* Some debug */
dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
@@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
if (usb_endpoint_xfer_int(endpoint))
dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n");
- dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen);
+ dev_dbg(&usbinterface->dev, "interface extra len:%d\n",
+ usbinterface->cur_altsetting->extralen);
/*
* Find the HID descriptor so we can find out the size of the
@@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
input_dev->dev.parent = &usbinterface->dev;
/* Setup the URB, it will be posted later on open of input device */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
-
usb_fill_int_urb(gtco->urbinfo,
udev,
usb_rcvintpipe(udev,
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index a1f3a0cb197e..38f087404f7a 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
return -ENODEV;
/* Sanity check that the device has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev, "Invalid number of endpoints\n");
return -EINVAL;
}
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 0af0fe8c40d7..742a7e96c1b5 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device *hwmon;
+ struct thermal_zone_device *thermal;
int error;
u32 reg;
bool ts_attached;
@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
+ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(thermal))
+ return PTR_ERR(thermal);
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 1dd47dda71cd..34d31c7ec8ba 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface,
int error;
/* Check if we really have the right interface. */
- iface_desc = &interface->altsetting[0];
+ iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 28f2ab0824d5..725029ae7a2c 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+CFLAGS_core.o := -I$(src)
icc-core-objs := core.o
obj-$(CONFIG_INTERCONNECT) += icc-core.o
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index c498796adc07..f277e467156f 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -19,45 +19,22 @@
#include <linux/of.h>
#include <linux/overflow.h>
+#include "internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
static DEFINE_IDR(icc_idr);
static LIST_HEAD(icc_providers);
static DEFINE_MUTEX(icc_lock);
static struct dentry *icc_debugfs_dir;
-/**
- * struct icc_req - constraints that are attached to each node
- * @req_node: entry in list of requests for the particular @node
- * @node: the interconnect node to which this constraint applies
- * @dev: reference to the device that sets the constraints
- * @tag: path tag (optional)
- * @avg_bw: an integer describing the average bandwidth in kBps
- * @peak_bw: an integer describing the peak bandwidth in kBps
- */
-struct icc_req {
- struct hlist_node req_node;
- struct icc_node *node;
- struct device *dev;
- u32 tag;
- u32 avg_bw;
- u32 peak_bw;
-};
-
-/**
- * struct icc_path - interconnect path structure
- * @num_nodes: number of hops (nodes)
- * @reqs: array of the requests applicable to this path of nodes
- */
-struct icc_path {
- size_t num_nodes;
- struct icc_req reqs[];
-};
-
static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
{
if (!n)
return;
- seq_printf(s, "%-30s %12u %12u\n",
+ seq_printf(s, "%-42s %12u %12u\n",
n->name, n->avg_bw, n->peak_bw);
}
@@ -65,8 +42,8 @@ static int icc_summary_show(struct seq_file *s, void *data)
{
struct icc_provider *provider;
- seq_puts(s, " node avg peak\n");
- seq_puts(s, "--------------------------------------------------------\n");
+ seq_puts(s, " node tag avg peak\n");
+ seq_puts(s, "--------------------------------------------------------------------\n");
mutex_lock(&icc_lock);
@@ -81,8 +58,8 @@ static int icc_summary_show(struct seq_file *s, void *data)
if (!r->dev)
continue;
- seq_printf(s, " %-26s %12u %12u\n",
- dev_name(r->dev), r->avg_bw,
+ seq_printf(s, " %-27s %12u %12u %12u\n",
+ dev_name(r->dev), r->tag, r->avg_bw,
r->peak_bw);
}
}
@@ -94,6 +71,70 @@ static int icc_summary_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(icc_summary);
+static void icc_graph_show_link(struct seq_file *s, int level,
+ struct icc_node *n, struct icc_node *m)
+{
+ seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
+ level == 2 ? "\t\t" : "\t",
+ n->id, n->name, m->id, m->name);
+}
+
+static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
+{
+ seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
+ n->id, n->name, n->id, n->name);
+ seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
+ seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
+ seq_puts(s, "\"]\n");
+}
+
+static int icc_graph_show(struct seq_file *s, void *data)
+{
+ struct icc_provider *provider;
+ struct icc_node *n;
+ int cluster_index = 0;
+ int i;
+
+ seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
+ mutex_lock(&icc_lock);
+
+ /* draw providers as cluster subgraphs */
+ cluster_index = 0;
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
+ if (provider->dev)
+ seq_printf(s, "\t\tlabel = \"%s\"\n",
+ dev_name(provider->dev));
+
+ /* draw nodes */
+ list_for_each_entry(n, &provider->nodes, node_list)
+ icc_graph_show_node(s, n);
+
+ /* draw internal links */
+ list_for_each_entry(n, &provider->nodes, node_list)
+ for (i = 0; i < n->num_links; ++i)
+ if (n->provider == n->links[i]->provider)
+ icc_graph_show_link(s, 2, n,
+ n->links[i]);
+
+ seq_puts(s, "\t}\n");
+ }
+
+ /* draw external links */
+ list_for_each_entry(provider, &icc_providers, provider_list)
+ list_for_each_entry(n, &provider->nodes, node_list)
+ for (i = 0; i < n->num_links; ++i)
+ if (n->provider != n->links[i]->provider)
+ icc_graph_show_link(s, 1, n,
+ n->links[i]);
+
+ mutex_unlock(&icc_lock);
+ seq_puts(s, "}");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(icc_graph);
+
static struct icc_node *node_find(const int id)
{
return idr_find(&icc_idr, id);
@@ -244,6 +285,16 @@ out:
return ret;
}
+int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_std_aggregate);
+
/* of_icc_xlate_onecell() - Translate function using a single index.
* @spec: OF phandle args to map into an interconnect node.
* @data: private data (pointer to struct icc_onecell_data)
@@ -382,9 +433,17 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
mutex_lock(&icc_lock);
path = path_find(dev, src_node, dst_node);
- if (IS_ERR(path))
- dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
mutex_unlock(&icc_lock);
+ if (IS_ERR(path)) {
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ return path;
+ }
+
+ if (name)
+ path->name = kstrdup_const(name, GFP_KERNEL);
+ else
+ path->name = kasprintf(GFP_KERNEL, "%s-%s",
+ src_node->name, dst_node->name);
return path;
}
@@ -436,9 +495,12 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
size_t i;
int ret;
- if (!path || !path->num_nodes)
+ if (!path)
return 0;
+ if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ return -EINVAL;
+
mutex_lock(&icc_lock);
old_avg = path->reqs[0].avg_bw;
@@ -453,6 +515,8 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
/* aggregate requests for this node */
aggregate_requests(node);
+
+ trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
}
ret = apply_constraints(path);
@@ -471,6 +535,8 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
mutex_unlock(&icc_lock);
+ trace_icc_set_bw_end(path, ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(icc_set_bw);
@@ -507,9 +573,12 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
goto out;
path = path_find(dev, src, dst);
- if (IS_ERR(path))
+ if (IS_ERR(path)) {
dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ goto out;
+ }
+ path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
out:
mutex_unlock(&icc_lock);
return path;
@@ -545,6 +614,7 @@ void icc_put(struct icc_path *path)
}
mutex_unlock(&icc_lock);
+ kfree_const(path->name);
kfree(path);
}
EXPORT_SYMBOL_GPL(icc_put);
@@ -743,6 +813,28 @@ void icc_node_del(struct icc_node *node)
EXPORT_SYMBOL_GPL(icc_node_del);
/**
+ * icc_nodes_remove() - remove all previously added nodes from provider
+ * @provider: the interconnect provider we are removing nodes from
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_nodes_remove(struct icc_provider *provider)
+{
+ struct icc_node *n, *tmp;
+
+ if (WARN_ON(IS_ERR_OR_NULL(provider)))
+ return -EINVAL;
+
+ list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_nodes_remove);
+
+/**
* icc_provider_add() - add a new interconnect provider
* @provider: the interconnect provider that will be added into topology
*
@@ -802,6 +894,8 @@ static int __init icc_init(void)
icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
debugfs_create_file("interconnect_summary", 0444,
icc_debugfs_dir, NULL, &icc_summary_fops);
+ debugfs_create_file("interconnect_graph", 0444,
+ icc_debugfs_dir, NULL, &icc_graph_fops);
return 0;
}
diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
new file mode 100644
index 000000000000..bf18cb7239df
--- /dev/null
+++ b/drivers/interconnect/internal.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework internal structs
+ *
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_INTERNAL_H
+#define __DRIVERS_INTERCONNECT_INTERNAL_H
+
+/**
+ * struct icc_req - constraints that are attached to each node
+ * @req_node: entry in list of requests for the particular @node
+ * @node: the interconnect node to which this constraint applies
+ * @dev: reference to the device that sets the constraints
+ * @tag: path tag (optional)
+ * @avg_bw: an integer describing the average bandwidth in kBps
+ * @peak_bw: an integer describing the peak bandwidth in kBps
+ */
+struct icc_req {
+ struct hlist_node req_node;
+ struct icc_node *node;
+ struct device *dev;
+ u32 tag;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/**
+ * struct icc_path - interconnect path structure
+ * @name: a string name of the path (useful for ftrace)
+ * @num_nodes: number of hops (nodes)
+ * @reqs: array of the requests applicable to this path of nodes
+ */
+struct icc_path {
+ const char *name;
+ size_t num_nodes;
+ struct icc_req reqs[];
+};
+
+#endif
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 2f9304d1db49..76938ece1658 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_MSM8916
+ tristate "Qualcomm MSM8916 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8916-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8974
tristate "Qualcomm MSM8974 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 9adf9e380545..e8271575e3d8 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+qnoc-msm8916-objs := msm8916.o
qnoc-msm8974-objs := msm8974.o
qnoc-qcs404-objs := qcs404.o
qnoc-sdm845-objs := sdm845.o
icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
new file mode 100644
index 000000000000..e94f3c5228b7
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Linaro Ltd
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/interconnect/qcom,msm8916.h>
+
+#include "smd-rpm.h"
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+enum {
+ MSM8916_BIMC_SNOC_MAS = 1,
+ MSM8916_BIMC_SNOC_SLV,
+ MSM8916_MASTER_AMPSS_M0,
+ MSM8916_MASTER_LPASS,
+ MSM8916_MASTER_BLSP_1,
+ MSM8916_MASTER_DEHR,
+ MSM8916_MASTER_GRAPHICS_3D,
+ MSM8916_MASTER_JPEG,
+ MSM8916_MASTER_MDP_PORT0,
+ MSM8916_MASTER_CRYPTO_CORE0,
+ MSM8916_MASTER_SDCC_1,
+ MSM8916_MASTER_SDCC_2,
+ MSM8916_MASTER_QDSS_BAM,
+ MSM8916_MASTER_QDSS_ETR,
+ MSM8916_MASTER_SNOC_CFG,
+ MSM8916_MASTER_SPDM,
+ MSM8916_MASTER_TCU0,
+ MSM8916_MASTER_TCU1,
+ MSM8916_MASTER_USB_HS,
+ MSM8916_MASTER_VFE,
+ MSM8916_MASTER_VIDEO_P0,
+ MSM8916_SNOC_MM_INT_0,
+ MSM8916_SNOC_MM_INT_1,
+ MSM8916_SNOC_MM_INT_2,
+ MSM8916_SNOC_MM_INT_BIMC,
+ MSM8916_PNOC_INT_0,
+ MSM8916_PNOC_INT_1,
+ MSM8916_PNOC_MAS_0,
+ MSM8916_PNOC_MAS_1,
+ MSM8916_PNOC_SLV_0,
+ MSM8916_PNOC_SLV_1,
+ MSM8916_PNOC_SLV_2,
+ MSM8916_PNOC_SLV_3,
+ MSM8916_PNOC_SLV_4,
+ MSM8916_PNOC_SLV_8,
+ MSM8916_PNOC_SLV_9,
+ MSM8916_PNOC_SNOC_MAS,
+ MSM8916_PNOC_SNOC_SLV,
+ MSM8916_SNOC_QDSS_INT,
+ MSM8916_SLAVE_AMPSS_L2,
+ MSM8916_SLAVE_APSS,
+ MSM8916_SLAVE_LPASS,
+ MSM8916_SLAVE_BIMC_CFG,
+ MSM8916_SLAVE_BLSP_1,
+ MSM8916_SLAVE_BOOT_ROM,
+ MSM8916_SLAVE_CAMERA_CFG,
+ MSM8916_SLAVE_CATS_128,
+ MSM8916_SLAVE_OCMEM_64,
+ MSM8916_SLAVE_CLK_CTL,
+ MSM8916_SLAVE_CRYPTO_0_CFG,
+ MSM8916_SLAVE_DEHR_CFG,
+ MSM8916_SLAVE_DISPLAY_CFG,
+ MSM8916_SLAVE_EBI_CH0,
+ MSM8916_SLAVE_GRAPHICS_3D_CFG,
+ MSM8916_SLAVE_IMEM_CFG,
+ MSM8916_SLAVE_IMEM,
+ MSM8916_SLAVE_MPM,
+ MSM8916_SLAVE_MSG_RAM,
+ MSM8916_SLAVE_MSS,
+ MSM8916_SLAVE_PDM,
+ MSM8916_SLAVE_PMIC_ARB,
+ MSM8916_SLAVE_PNOC_CFG,
+ MSM8916_SLAVE_PRNG,
+ MSM8916_SLAVE_QDSS_CFG,
+ MSM8916_SLAVE_QDSS_STM,
+ MSM8916_SLAVE_RBCPR_CFG,
+ MSM8916_SLAVE_SDCC_1,
+ MSM8916_SLAVE_SDCC_2,
+ MSM8916_SLAVE_SECURITY,
+ MSM8916_SLAVE_SNOC_CFG,
+ MSM8916_SLAVE_SPDM,
+ MSM8916_SLAVE_SRVC_SNOC,
+ MSM8916_SLAVE_TCSR,
+ MSM8916_SLAVE_TLMM,
+ MSM8916_SLAVE_USB_HS,
+ MSM8916_SLAVE_VENUS_CFG,
+ MSM8916_SNOC_BIMC_0_MAS,
+ MSM8916_SNOC_BIMC_0_SLV,
+ MSM8916_SNOC_BIMC_1_MAS,
+ MSM8916_SNOC_BIMC_1_SLV,
+ MSM8916_SNOC_INT_0,
+ MSM8916_SNOC_INT_1,
+ MSM8916_SNOC_INT_BIMC,
+ MSM8916_SNOC_PNOC_MAS,
+ MSM8916_SNOC_PNOC_SLV,
+};
+
+#define to_msm8916_provider(_provider) \
+ container_of(_provider, struct msm8916_icc_provider, provider)
+
+static const struct clk_bulk_data msm8916_bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+/**
+ * struct msm8916_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct msm8916_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+#define MSM8916_MAX_LINKS 8
+
+/**
+ * struct msm8916_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM ID for devices that are bus masters
+ * @slv_rpm_id: RPM ID for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct msm8916_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[MSM8916_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct msm8916_icc_desc {
+ struct msm8916_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct msm8916_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(bimc_snoc_mas, MSM8916_BIMC_SNOC_MAS, 8, -1, -1, MSM8916_BIMC_SNOC_SLV);
+DEFINE_QNODE(bimc_snoc_slv, MSM8916_BIMC_SNOC_SLV, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_1);
+DEFINE_QNODE(mas_apss, MSM8916_MASTER_AMPSS_M0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_audio, MSM8916_MASTER_LPASS, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_blsp_1, MSM8916_MASTER_BLSP_1, 4, -1, -1, MSM8916_PNOC_MAS_1);
+DEFINE_QNODE(mas_dehr, MSM8916_MASTER_DEHR, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_gfx, MSM8916_MASTER_GRAPHICS_3D, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_jpeg, MSM8916_MASTER_JPEG, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_mdp, MSM8916_MASTER_MDP_PORT0, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_pcnoc_crypto_0, MSM8916_MASTER_CRYPTO_CORE0, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8916_MASTER_SDCC_1, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8916_MASTER_SDCC_2, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_qdss_bam, MSM8916_MASTER_QDSS_BAM, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_qdss_etr, MSM8916_MASTER_QDSS_ETR, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, 20, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_spdm, MSM8916_MASTER_SPDM, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_tcu0, MSM8916_MASTER_TCU0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_tcu1, MSM8916_MASTER_TCU1, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_usb_hs, MSM8916_MASTER_USB_HS, 4, -1, -1, MSM8916_PNOC_MAS_1);
+DEFINE_QNODE(mas_vfe, MSM8916_MASTER_VFE, 16, -1, -1, MSM8916_SNOC_MM_INT_1, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_video, MSM8916_MASTER_VIDEO_P0, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mm_int_0, MSM8916_SNOC_MM_INT_0, 16, -1, -1, MSM8916_SNOC_MM_INT_BIMC);
+DEFINE_QNODE(mm_int_1, MSM8916_SNOC_MM_INT_1, 16, -1, -1, MSM8916_SNOC_MM_INT_BIMC);
+DEFINE_QNODE(mm_int_2, MSM8916_SNOC_MM_INT_2, 16, -1, -1, MSM8916_SNOC_INT_0);
+DEFINE_QNODE(mm_int_bimc, MSM8916_SNOC_MM_INT_BIMC, 16, -1, -1, MSM8916_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS, MSM8916_PNOC_SLV_0, MSM8916_PNOC_SLV_1, MSM8916_PNOC_SLV_2, MSM8916_PNOC_SLV_3, MSM8916_PNOC_SLV_4, MSM8916_PNOC_SLV_8, MSM8916_PNOC_SLV_9);
+DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0);
+DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
+DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
+DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
+DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
+DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
+DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
+DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
+DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV);
+DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
+DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
+DEFINE_QNODE(slv_apps_l2, MSM8916_SLAVE_AMPSS_L2, 8, -1, -1, 0);
+DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_audio, MSM8916_SLAVE_LPASS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_bimc_cfg, MSM8916_SLAVE_BIMC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_1, MSM8916_SLAVE_BLSP_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_boot_rom, MSM8916_SLAVE_BOOT_ROM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_camera_cfg, MSM8916_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, 106, 0);
+DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_clk_ctl, MSM8916_SLAVE_CLK_CTL, 4, -1, -1, 0);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8916_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_dehr_cfg, MSM8916_SLAVE_DEHR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_display_cfg, MSM8916_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8916_SLAVE_EBI_CH0, 8, -1, 0, 0);
+DEFINE_QNODE(slv_gfx_cfg, MSM8916_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem_cfg, MSM8916_SLAVE_IMEM_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem, MSM8916_SLAVE_IMEM, 8, -1, 26, 0);
+DEFINE_QNODE(slv_mpm, MSM8916_SLAVE_MPM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_msg_ram, MSM8916_SLAVE_MSG_RAM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_mss, MSM8916_SLAVE_MSS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pdm, MSM8916_SLAVE_PDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, MSM8916_SLAVE_PMIC_ARB, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pcnoc_cfg, MSM8916_SLAVE_PNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_prng, MSM8916_SLAVE_PRNG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_cfg, MSM8916_SLAVE_QDSS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_stm, MSM8916_SLAVE_QDSS_STM, 4, -1, 30, 0);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8916_SLAVE_RBCPR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_1, MSM8916_SLAVE_SDCC_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_2, MSM8916_SLAVE_SDCC_2, 4, -1, -1, 0);
+DEFINE_QNODE(slv_security, MSM8916_SLAVE_SECURITY, 4, -1, -1, 0);
+DEFINE_QNODE(slv_snoc_cfg, MSM8916_SLAVE_SNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_spdm, MSM8916_SLAVE_SPDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_tcsr, MSM8916_SLAVE_TCSR, 4, -1, -1, 0);
+DEFINE_QNODE(slv_tlmm, MSM8916_SLAVE_TLMM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs, MSM8916_SLAVE_USB_HS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_venus_cfg, MSM8916_SLAVE_VENUS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8916_SNOC_BIMC_0_MAS, 8, 3, -1, MSM8916_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8916_SNOC_BIMC_0_SLV, 8, -1, 24, MSM8916_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_1_mas, MSM8916_SNOC_BIMC_1_MAS, 16, -1, -1, MSM8916_SNOC_BIMC_1_SLV);
+DEFINE_QNODE(snoc_bimc_1_slv, MSM8916_SNOC_BIMC_1_SLV, 8, -1, -1, MSM8916_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_int_0, MSM8916_SNOC_INT_0, 8, 99, 130, MSM8916_SLAVE_QDSS_STM, MSM8916_SLAVE_IMEM, MSM8916_SNOC_PNOC_MAS);
+DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, 100, 131, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIMC_0_MAS);
+DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
+DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
+
+static struct msm8916_icc_node *msm8916_snoc_nodes[] = {
+ [BIMC_SNOC_SLV] = &bimc_snoc_slv,
+ [MASTER_JPEG] = &mas_jpeg,
+ [MASTER_MDP_PORT0] = &mas_mdp,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [MASTER_VFE] = &mas_vfe,
+ [MASTER_VIDEO_P0] = &mas_video,
+ [SNOC_MM_INT_0] = &mm_int_0,
+ [SNOC_MM_INT_1] = &mm_int_1,
+ [SNOC_MM_INT_2] = &mm_int_2,
+ [SNOC_MM_INT_BIMC] = &mm_int_bimc,
+ [PCNOC_SNOC_SLV] = &pcnoc_snoc_slv,
+ [SLAVE_APSS] = &slv_apss,
+ [SLAVE_CATS_128] = &slv_cats_0,
+ [SLAVE_OCMEM_64] = &slv_cats_1,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
+ [SNOC_BIMC_0_MAS] = &snoc_bimc_0_mas,
+ [SNOC_BIMC_1_MAS] = &snoc_bimc_1_mas,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_BIMC] = &snoc_int_bimc,
+ [SNOC_PCNOC_MAS] = &snoc_pcnoc_mas,
+ [SNOC_QDSS_INT] = &qdss_int,
+};
+
+static struct msm8916_icc_desc msm8916_snoc = {
+ .nodes = msm8916_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
+};
+
+static struct msm8916_icc_node *msm8916_bimc_nodes[] = {
+ [BIMC_SNOC_MAS] = &bimc_snoc_mas,
+ [MASTER_AMPSS_M0] = &mas_apss,
+ [MASTER_GRAPHICS_3D] = &mas_gfx,
+ [MASTER_TCU0] = &mas_tcu0,
+ [MASTER_TCU1] = &mas_tcu1,
+ [SLAVE_AMPSS_L2] = &slv_apps_l2,
+ [SLAVE_EBI_CH0] = &slv_ebi_ch0,
+ [SNOC_BIMC_0_SLV] = &snoc_bimc_0_slv,
+ [SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
+};
+
+static struct msm8916_icc_desc msm8916_bimc = {
+ .nodes = msm8916_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
+};
+
+static struct msm8916_icc_node *msm8916_pcnoc_nodes[] = {
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_DEHR] = &mas_dehr,
+ [MASTER_LPASS] = &mas_audio,
+ [MASTER_CRYPTO_CORE0] = &mas_pcnoc_crypto_0,
+ [MASTER_SDCC_1] = &mas_pcnoc_sdcc_1,
+ [MASTER_SDCC_2] = &mas_pcnoc_sdcc_2,
+ [MASTER_SPDM] = &mas_spdm,
+ [MASTER_USB_HS] = &mas_usb_hs,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_MAS_0] = &pcnoc_m_0,
+ [PCNOC_MAS_1] = &pcnoc_m_1,
+ [PCNOC_SLV_0] = &pcnoc_s_0,
+ [PCNOC_SLV_1] = &pcnoc_s_1,
+ [PCNOC_SLV_2] = &pcnoc_s_2,
+ [PCNOC_SLV_3] = &pcnoc_s_3,
+ [PCNOC_SLV_4] = &pcnoc_s_4,
+ [PCNOC_SLV_8] = &pcnoc_s_8,
+ [PCNOC_SLV_9] = &pcnoc_s_9,
+ [PCNOC_SNOC_MAS] = &pcnoc_snoc_mas,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_BOOT_ROM] = &slv_boot_rom,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLAVE_DEHR_CFG] = &slv_dehr_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_GRAPHICS_3D_CFG] = &slv_gfx_cfg,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_LPASS] = &slv_audio,
+ [SLAVE_MPM] = &slv_mpm,
+ [SLAVE_MSG_RAM] = &slv_msg_ram,
+ [SLAVE_MSS] = &slv_mss,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_PCNOC_CFG] = &slv_pcnoc_cfg,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_RBCPR_CFG] = &slv_rbcpr_cfg,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_SECURITY] = &slv_security,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_SPDM] = &slv_spdm,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_TLMM] = &slv_tlmm,
+ [SLAVE_USB_HS] = &slv_usb_hs,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
+};
+
+static struct msm8916_icc_desc msm8916_pcnoc = {
+ .nodes = msm8916_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
+};
+
+static int msm8916_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct msm8916_icc_provider *qp;
+ struct msm8916_icc_node *qn;
+ u64 sum_bw, max_peak_bw, rate;
+ u32 agg_avg = 0, agg_peak = 0;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ int ret, i;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_msm8916_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* send bandwidth request message to the RPM processor */
+ if (qn->mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_MASTER_REQ,
+ qn->mas_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ qn->mas_rpm_id, ret);
+ return ret;
+ }
+ }
+
+ if (qn->slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_SLAVE_REQ,
+ qn->slv_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, qn->buswidth);
+
+ if (qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ pr_err("%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ return ret;
+ }
+ }
+
+ qn->rate = rate;
+
+ return 0;
+}
+
+static int msm8916_qnoc_probe(struct platform_device *pdev)
+{
+ const struct msm8916_icc_desc *desc;
+ struct msm8916_icc_node **qnodes;
+ struct msm8916_icc_provider *qp;
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, msm8916_bus_clocks,
+ sizeof(msm8916_bus_clocks), GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = ARRAY_SIZE(msm8916_bus_clocks);
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = msm8916_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return ret;
+}
+
+static int msm8916_qnoc_remove(struct platform_device *pdev)
+{
+ struct msm8916_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id msm8916_noc_of_match[] = {
+ { .compatible = "qcom,msm8916-bimc", .data = &msm8916_bimc },
+ { .compatible = "qcom,msm8916-pcnoc", .data = &msm8916_pcnoc },
+ { .compatible = "qcom,msm8916-snoc", .data = &msm8916_snoc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
+
+static struct platform_driver msm8916_noc_driver = {
+ .probe = msm8916_qnoc_probe,
+ .remove = msm8916_qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8916",
+ .of_match_table = msm8916_noc_of_match,
+ },
+};
+module_platform_driver(msm8916_noc_driver);
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm MSM8916 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index bf8bd1aee358..3a313e11e73d 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -550,15 +550,6 @@ static struct msm8974_icc_desc msm8974_snoc = {
.num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
};
-static int msm8974_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
- u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
-{
- *agg_avg += avg_bw;
- *agg_peak = max(*agg_peak, peak_bw);
-
- return 0;
-}
-
static void msm8974_icc_rpm_smd_send(struct device *dev, int rsc_type,
char *name, int id, u64 val)
{
@@ -603,8 +594,8 @@ static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
qp = to_msm8974_icc_provider(provider);
list_for_each_entry(n, &provider->nodes, node_list)
- msm8974_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
sum_bw = icc_units_to_bps(agg_avg);
max_peak_bw = icc_units_to_bps(agg_peak);
@@ -652,7 +643,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct icc_node *node, *tmp;
+ struct icc_node *node;
size_t num_nodes, i;
int ret;
@@ -694,7 +685,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = msm8974_icc_set;
- provider->aggregate = msm8974_icc_aggregate;
+ provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
@@ -732,10 +723,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
return 0;
err_del_icc:
- list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
- icc_node_del(node);
- icc_node_destroy(node->id);
- }
+ icc_nodes_remove(provider);
icc_provider_del(provider);
err_disable_clks:
@@ -747,16 +735,10 @@ err_disable_clks:
static int msm8974_icc_remove(struct platform_device *pdev)
{
struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
- struct icc_provider *provider = &qp->provider;
- struct icc_node *n, *tmp;
- list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
- icc_node_del(n);
- icc_node_destroy(n->id);
- }
+ icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
- return icc_provider_del(provider);
+ return icc_provider_del(&qp->provider);
}
static const struct of_device_id msm8974_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 8e0735a87040..d4769a5ea182 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -327,15 +327,6 @@ static struct qcom_icc_desc qcs404_snoc = {
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
};
-static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
- u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
-{
- *agg_avg += avg_bw;
- *agg_peak = max(*agg_peak, peak_bw);
-
- return 0;
-}
-
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
@@ -354,8 +345,8 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
qp = to_qcom_provider(provider);
list_for_each_entry(n, &provider->nodes, node_list)
- qcom_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
sum_bw = icc_units_to_bps(agg_avg);
max_peak_bw = icc_units_to_bps(agg_peak);
@@ -414,7 +405,7 @@ static int qnoc_probe(struct platform_device *pdev)
struct icc_provider *provider;
struct qcom_icc_node **qnodes;
struct qcom_icc_provider *qp;
- struct icc_node *node, *tmp;
+ struct icc_node *node;
size_t num_nodes, i;
int ret;
@@ -456,7 +447,7 @@ static int qnoc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = qcom_icc_set;
- provider->aggregate = qcom_icc_aggregate;
+ provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
@@ -494,10 +485,7 @@ static int qnoc_probe(struct platform_device *pdev)
return 0;
err:
- list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
- icc_node_del(node);
- icc_node_destroy(node->id);
- }
+ icc_nodes_remove(provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
icc_provider_del(provider);
@@ -507,16 +495,10 @@ err:
static int qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
- struct icc_provider *provider = &qp->provider;
- struct icc_node *n, *tmp;
- list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
- icc_node_del(n);
- icc_node_destroy(n->id);
- }
+ icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
- return icc_provider_del(provider);
+ return icc_provider_del(&qp->provider);
}
static const struct of_device_id qcs404_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 387267ee9648..f078cf0fce56 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -855,11 +855,7 @@ static int qnoc_probe(struct platform_device *pdev)
return ret;
err:
- list_for_each_entry(node, &provider->nodes, node_list) {
- icc_node_del(node);
- icc_node_destroy(node->id);
- }
-
+ icc_nodes_remove(provider);
icc_provider_del(provider);
return ret;
}
@@ -867,15 +863,9 @@ err:
static int qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
- struct icc_provider *provider = &qp->provider;
- struct icc_node *n, *tmp;
-
- list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
- icc_node_del(n);
- icc_node_destroy(n->id);
- }
- return icc_provider_del(provider);
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
}
static const struct of_device_id qnoc_of_match[] = {
diff --git a/drivers/interconnect/trace.h b/drivers/interconnect/trace.h
new file mode 100644
index 000000000000..3d668ff566bf
--- /dev/null
+++ b/drivers/interconnect/trace.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework tracepoints
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM interconnect
+
+#if !defined(_TRACE_INTERCONNECT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTERCONNECT_H
+
+#include <linux/interconnect.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(icc_set_bw,
+
+ TP_PROTO(struct icc_path *p, struct icc_node *n, int i,
+ u32 avg_bw, u32 peak_bw),
+
+ TP_ARGS(p, n, i, avg_bw, peak_bw),
+
+ TP_STRUCT__entry(
+ __string(path_name, p->name)
+ __string(dev, dev_name(p->reqs[i].dev))
+ __string(node_name, n->name)
+ __field(u32, avg_bw)
+ __field(u32, peak_bw)
+ __field(u32, node_avg_bw)
+ __field(u32, node_peak_bw)
+ ),
+
+ TP_fast_assign(
+ __assign_str(path_name, p->name);
+ __assign_str(dev, dev_name(p->reqs[i].dev));
+ __assign_str(node_name, n->name);
+ __entry->avg_bw = avg_bw;
+ __entry->peak_bw = peak_bw;
+ __entry->node_avg_bw = n->avg_bw;
+ __entry->node_peak_bw = n->peak_bw;
+ ),
+
+ TP_printk("path=%s dev=%s node=%s avg_bw=%u peak_bw=%u agg_avg=%u agg_peak=%u",
+ __get_str(path_name),
+ __get_str(dev),
+ __get_str(node_name),
+ __entry->avg_bw,
+ __entry->peak_bw,
+ __entry->node_avg_bw,
+ __entry->node_peak_bw)
+);
+
+TRACE_EVENT(icc_set_bw_end,
+
+ TP_PROTO(struct icc_path *p, int ret),
+
+ TP_ARGS(p, ret),
+
+ TP_STRUCT__entry(
+ __string(path_name, p->name)
+ __string(dev, dev_name(p->reqs[0].dev))
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __assign_str(path_name, p->name);
+ __assign_str(dev, dev_name(p->reqs[0].dev));
+ __entry->ret = ret;
+ ),
+
+ TP_printk("path=%s dev=%s ret=%d",
+ __get_str(path_name),
+ __get_str(dev),
+ __entry->ret)
+);
+
+#endif /* _TRACE_INTERCONNECT_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 568c52317757..823cc4ef51fd 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -440,7 +440,7 @@ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
return NULL;
}
- return (u8 __iomem *)ioremap_nocache(address, end);
+ return (u8 __iomem *)ioremap(address, end);
}
static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
@@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0;
+ u64 val = 0xabcd, val2 = 0, save_reg = 0;
if (!iommu_feature(iommu, FEATURE_PC))
return;
amd_iommu_pc_present = true;
+ /* save the value to restore, if writable */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+ goto pc_false;
+
/* Check if the performance counters can be written to */
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
- (val != val2)) {
- pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
- amd_iommu_pc_present = false;
- return;
- }
+ (val != val2))
+ goto pc_false;
+
+ /* restore */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+ goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
iommu->max_banks = (u8) ((val >> 12) & 0x3f);
iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+ return;
+
+pc_false:
+ pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+ amd_iommu_pc_present = false;
+ return;
}
static ssize_t amd_iommu_show_cap(struct device *dev,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1801f0aaf013..932267f49f9a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5163,7 +5163,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
- if (info)
+ if (info && info != DEFER_DEVICE_DOMAIN_INFO
+ && info != DUMMY_DEVICE_DOMAIN_INFO)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index d246d74ec3a5..23445ebfda5c 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -298,7 +298,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
/* Map internal tpci200 driver user space */
tpci200->info->interface_regs =
- ioremap_nocache(pci_resource_start(tpci200->info->pdev,
+ ioremap(pci_resource_start(tpci200->info->pdev,
TPCI200_IP_INTERFACE_BAR),
TPCI200_IFACE_SIZE);
if (!tpci200->info->interface_regs) {
@@ -541,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
ret = -EBUSY;
goto out_err_pci_request;
}
- tpci200->info->cfg_regs = ioremap_nocache(
+ tpci200->info->cfg_regs = ioremap(
pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
pci_resource_len(pdev, TPCI200_CFG_MEM_BAR));
if (!tpci200->info->cfg_regs) {
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 9c2a4b5d30cf..d480a514c983 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -276,7 +276,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal->board_id = ipoctal->dev->id_device;
region = &ipoctal->dev->region[IPACK_IO_SPACE];
- addr = devm_ioremap_nocache(&ipoctal->dev->dev,
+ addr = devm_ioremap(&ipoctal->dev->dev,
region->start, region->size);
if (!addr) {
dev_err(&ipoctal->dev->dev,
@@ -292,7 +292,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
region = &ipoctal->dev->region[IPACK_INT_SPACE];
ipoctal->int_space =
- devm_ioremap_nocache(&ipoctal->dev->dev,
+ devm_ioremap(&ipoctal->dev->dev,
region->start, region->size);
if (!ipoctal->int_space) {
dev_err(&ipoctal->dev->dev,
@@ -303,7 +303,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
region = &ipoctal->dev->region[IPACK_MEM8_SPACE];
ipoctal->mem8_space =
- devm_ioremap_nocache(&ipoctal->dev->dev,
+ devm_ioremap(&ipoctal->dev->dev,
region->start, 0x8000);
if (!ipoctal->mem8_space) {
dev_err(&ipoctal->dev->dev,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 697e6a8ccaae..1006c694d9fb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -457,6 +457,12 @@ config IMX_IRQSTEER
help
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+config IMX_INTMUX
+ def_bool y if ARCH_MXC
+ select IRQ_DOMAIN
+ help
+ Support for the i.MX INTMUX interrupt multiplexer.
+
config LS1X_IRQ
bool "Loongson-1 Interrupt Controller"
depends on MACH_LOONGSON32
@@ -490,6 +496,7 @@ config TI_SCI_INTA_IRQCHIP
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
help
This enables support for the PLIC chip found in SiFive (and
potentially other) RISC-V systems. The PLIC controls devices
@@ -499,4 +506,11 @@ config SIFIVE_PLIC
If you don't know what to do here, say Y.
+config EXYNOS_IRQ_COMBINER
+ bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
+ depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST
+ help
+ Say yes here to add support for the IRQ combiner devices embedded
+ in Samsung Exynos chips.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e806dda690ea..eae0d78cbf22 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o
obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
-obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
+obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
@@ -87,7 +87,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
-obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
+obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
@@ -100,6 +100,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
+obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
new file mode 100644
index 000000000000..c90a3346b985
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller
+ * Copyright 2019 IBM Corporation
+ *
+ * Eddie James <eajames@linux.ibm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#define ASPEED_SCU_IC_REG 0x018
+#define ASPEED_SCU_IC_SHIFT 0
+#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT)
+#define ASPEED_SCU_IC_NUM_IRQS 7
+#define ASPEED_SCU_IC_STATUS_SHIFT 16
+
+#define ASPEED_AST2600_SCU_IC0_REG 0x560
+#define ASPEED_AST2600_SCU_IC0_SHIFT 0
+#define ASPEED_AST2600_SCU_IC0_ENABLE \
+ GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT)
+#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6
+
+#define ASPEED_AST2600_SCU_IC1_REG 0x570
+#define ASPEED_AST2600_SCU_IC1_SHIFT 4
+#define ASPEED_AST2600_SCU_IC1_ENABLE \
+ GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
+#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
+
+struct aspeed_scu_ic {
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ unsigned int reg;
+ struct regmap *scu;
+ struct irq_domain *irq_domain;
+};
+
+static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq;
+ unsigned int sts;
+ unsigned long bit;
+ unsigned long enabled;
+ unsigned long max;
+ unsigned long status;
+ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
+
+ chained_irq_enter(chip, desc);
+
+ /*
+ * The SCU IC has just one register to control its operation and read
+ * status. The interrupt enable bits occupy the lower 16 bits of the
+ * register, while the interrupt status bits occupy the upper 16 bits.
+ * The status bit for a given interrupt is always 16 bits shifted from
+ * the enable bit for the same interrupt.
+ * Therefore, perform the IRQ operations in the enable bit space by
+ * shifting the status down to get the mapping and then back up to
+ * clear the bit.
+ */
+ regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+ enabled = sts & scu_ic->irq_enable;
+ status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
+
+ bit = scu_ic->irq_shift;
+ max = scu_ic->num_irqs + bit;
+
+ for_each_set_bit_from(bit, &status, max) {
+ irq = irq_find_mapping(scu_ic->irq_domain,
+ bit - scu_ic->irq_shift);
+ generic_handle_irq(irq);
+
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
+ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+ /*
+ * Status bits are cleared by writing 1. In order to prevent the mask
+ * operation from clearing the status bits, they should be under the
+ * mask and written with 0.
+ */
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
+}
+
+static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+ unsigned int mask = bit |
+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+ /*
+ * Status bits are cleared by writing 1. In order to prevent the unmask
+ * operation from clearing the status bits, they should be under the
+ * mask and written with 0.
+ */
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
+}
+
+static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip aspeed_scu_ic_chip = {
+ .name = "aspeed-scu-ic",
+ .irq_mask = aspeed_scu_ic_irq_mask,
+ .irq_unmask = aspeed_scu_ic_irq_unmask,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+};
+
+static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops aspeed_scu_ic_domain_ops = {
+ .map = aspeed_scu_ic_map,
+};
+
+static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
+ struct device_node *node)
+{
+ int irq;
+ int rc = 0;
+
+ if (!node->parent) {
+ rc = -ENODEV;
+ goto err;
+ }
+
+ scu_ic->scu = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(scu_ic->scu)) {
+ rc = PTR_ERR(scu_ic->scu);
+ goto err;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ rc = irq;
+ goto err;
+ }
+
+ scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs,
+ &aspeed_scu_ic_domain_ops,
+ scu_ic);
+ if (!scu_ic->irq_domain) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler,
+ scu_ic);
+
+ return 0;
+
+err:
+ kfree(scu_ic);
+
+ return rc;
+}
+
+static int __init aspeed_scu_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE;
+ scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT;
+ scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS;
+ scu_ic->reg = ASPEED_SCU_IC_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE;
+ scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT;
+ scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS;
+ scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE;
+ scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT;
+ scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS;
+ scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
+ aspeed_ast2600_scu_ic0_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
+ aspeed_ast2600_scu_ic1_of_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e05673bcd52b..f71758632f8d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -106,6 +106,7 @@ struct its_node {
u64 typer;
u64 cbaser_save;
u32 ctlr_save;
+ u32 mpidr;
struct list_head its_device_list;
u64 flags;
unsigned long list_nr;
@@ -116,12 +117,22 @@ struct its_node {
};
#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
+#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
#define ITS_ITT_ALIGN SZ_256
/* The maximum number of VPEID bits supported by VLPI commands */
-#define ITS_MAX_VPEID_BITS (16)
+#define ITS_MAX_VPEID_BITS \
+ ({ \
+ int nvpeid = 16; \
+ if (gic_rdists->has_rvpeid && \
+ gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
+ nvpeid = 1 + (gic_rdists->gicd_typer2 & \
+ GICD_TYPER2_VID); \
+ \
+ nvpeid; \
+ })
#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
/* Convert page order to size in bytes */
@@ -216,11 +227,27 @@ static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
return &its_dev->event_map.vlpi_maps[event];
}
-static struct its_collection *irq_to_col(struct irq_data *d)
+static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+{
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ return dev_event_to_vlpi_map(its_dev, event);
+ }
+
+ return NULL;
+}
+
+static int irq_to_cpuid(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_vlpi_map *map = get_vlpi_map(d);
- return dev_event_to_col(its_dev, its_get_event_id(d));
+ if (map)
+ return map->vpe->col_idx;
+
+ return its_dev->event_map.col_map[its_get_event_id(d)];
}
static struct its_collection *valid_col(struct its_collection *col)
@@ -322,6 +349,10 @@ struct its_cmd_desc {
u16 seq_num;
u16 its_list;
} its_vmovp_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ } its_invdb_cmd;
};
};
@@ -438,6 +469,38 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
}
+static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
+{
+ its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
+}
+
+static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
+{
+ its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
+}
+
+static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
+{
+ its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
+}
+
+static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
+ u32 vpe_db_lpi)
+{
+ its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
+ u32 vpe_db_lpi)
+{
+ its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_db(struct its_cmd_block *cmd, bool db)
+{
+ its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
+}
+
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
@@ -621,19 +684,45 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
- unsigned long vpt_addr;
+ unsigned long vpt_addr, vconf_addr;
u64 target;
-
- vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
- target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+ bool alloc;
its_encode_cmd(cmd, GITS_CMD_VMAPP);
its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+
+ if (!desc->its_vmapp_cmd.valid) {
+ if (is_v4_1(its)) {
+ alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+ its_encode_alloc(cmd, alloc);
+ }
+
+ goto out;
+ }
+
+ vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+ target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+
its_encode_target(cmd, target);
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+ if (!is_v4_1(its))
+ goto out;
+
+ vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
+
+ alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
+ its_encode_alloc(cmd, alloc);
+
+ /* We can only signal PTZ when alloc==1. Why do we have two bits? */
+ its_encode_ptz(cmd, alloc);
+ its_encode_vconf_addr(cmd, vconf_addr);
+ its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
+
+out:
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vmapp_cmd.vpe);
@@ -645,7 +734,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
{
u32 db;
- if (desc->its_vmapti_cmd.db_enabled)
+ if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
else
db = 1023;
@@ -668,7 +757,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
{
u32 db;
- if (desc->its_vmovi_cmd.db_enabled)
+ if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
else
db = 1023;
@@ -698,6 +787,11 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
its_encode_target(cmd, target);
+ if (is_v4_1(its)) {
+ its_encode_db(cmd, true);
+ its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
+ }
+
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vmovp_cmd.vpe);
@@ -757,6 +851,21 @@ static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
return valid_vpe(its, map->vpe);
}
+static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ if (WARN_ON(!is_v4_1(its)))
+ return NULL;
+
+ its_encode_cmd(cmd, GITS_CMD_INVDB);
+ its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_invdb_cmd.vpe);
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -1165,20 +1274,17 @@ static void its_send_vclear(struct its_device *dev, u32 event_id)
its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
}
-/*
- * irqchip functions - assumes MSI, mostly.
- */
-static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
{
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
-
- if (!irqd_is_forwarded_to_vcpu(d))
- return NULL;
+ struct its_cmd_desc desc;
- return dev_event_to_vlpi_map(its_dev, event);
+ desc.its_invdb_cmd.vpe = vpe;
+ its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
}
+/*
+ * irqchip functions - assumes MSI, mostly.
+ */
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
struct its_vlpi_map *map = get_vlpi_map(d);
@@ -1221,13 +1327,25 @@ static void wait_for_syncr(void __iomem *rdbase)
static void direct_lpi_inv(struct irq_data *d)
{
- struct its_collection *col;
+ struct its_vlpi_map *map = get_vlpi_map(d);
void __iomem *rdbase;
+ u64 val;
+
+ if (map) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ WARN_ON(!is_v4_1(its_dev->its));
+
+ val = GICR_INVLPIR_V;
+ val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
+ val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
+ } else {
+ val = d->hwirq;
+ }
/* Target the redistributor this LPI is currently routed to */
- col = irq_to_col(d);
- rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
- gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
}
@@ -1237,7 +1355,8 @@ static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
lpi_write_config(d, clr, set);
- if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
+ if (gic_rdists->has_direct_lpi &&
+ (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
direct_lpi_inv(d);
else if (!irqd_is_forwarded_to_vcpu(d))
its_send_inv(its_dev, its_get_event_id(d));
@@ -1251,6 +1370,13 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
u32 event = its_get_event_id(d);
struct its_vlpi_map *map;
+ /*
+ * GICv4.1 does away with the per-LPI nonsense, nothing to do
+ * here.
+ */
+ if (is_v4_1(its_dev->its))
+ return;
+
map = dev_event_to_vlpi_map(its_dev, event);
if (map->db_enabled == enable)
@@ -2090,6 +2216,65 @@ static bool its_parse_indirect_baser(struct its_node *its,
return indirect;
}
+static u32 compute_common_aff(u64 val)
+{
+ u32 aff, clpiaff;
+
+ aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
+ clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
+
+ return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
+}
+
+static u32 compute_its_aff(struct its_node *its)
+{
+ u64 val;
+ u32 svpet;
+
+ /*
+ * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
+ * the resulting affinity. We then use that to see if this match
+ * our own affinity.
+ */
+ svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
+ val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
+ val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
+ return compute_common_aff(val);
+}
+
+static struct its_node *find_sibling_its(struct its_node *cur_its)
+{
+ struct its_node *its;
+ u32 aff;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
+ return NULL;
+
+ aff = compute_its_aff(cur_its);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 baser;
+
+ if (!is_v4_1(its) || its == cur_its)
+ continue;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+ continue;
+
+ if (aff != compute_its_aff(its))
+ continue;
+
+ /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+ baser = its->tables[2].val;
+ if (!(baser & GITS_BASER_VALID))
+ continue;
+
+ return its;
+ }
+
+ return NULL;
+}
+
static void its_free_tables(struct its_node *its)
{
int i;
@@ -2132,6 +2317,17 @@ static int its_alloc_tables(struct its_node *its)
break;
case GITS_BASER_TYPE_VCPU:
+ if (is_v4_1(its)) {
+ struct its_node *sibling;
+
+ WARN_ON(i != 2);
+ if ((sibling = find_sibling_its(its))) {
+ *baser = sibling->tables[2];
+ its_write_baser(its, baser, baser->val);
+ continue;
+ }
+ }
+
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
ITS_MAX_VPEID_BITS);
@@ -2153,6 +2349,220 @@ static int its_alloc_tables(struct its_node *its)
return 0;
}
+static u64 inherit_vpe_l1_table_from_its(void)
+{
+ struct its_node *its;
+ u64 val;
+ u32 aff;
+
+ val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ aff = compute_common_aff(val);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 baser, addr;
+
+ if (!is_v4_1(its))
+ continue;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+ continue;
+
+ if (aff != compute_its_aff(its))
+ continue;
+
+ /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+ baser = its->tables[2].val;
+ if (!(baser & GITS_BASER_VALID))
+ continue;
+
+ /* We have a winner! */
+ val = GICR_VPROPBASER_4_1_VALID;
+ if (baser & GITS_BASER_INDIRECT)
+ val |= GICR_VPROPBASER_4_1_INDIRECT;
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
+ FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
+ switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
+ case GIC_PAGE_SIZE_64K:
+ addr = GITS_BASER_ADDR_48_to_52(baser);
+ break;
+ default:
+ addr = baser & GENMASK_ULL(47, 12);
+ break;
+ }
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
+ val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+ FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+ FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
+
+ return val;
+ }
+
+ return 0;
+}
+
+static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
+{
+ u32 aff;
+ u64 val;
+ int cpu;
+
+ val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ aff = compute_common_aff(val);
+
+ for_each_possible_cpu(cpu) {
+ void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
+ u32 tmp;
+
+ if (!base || cpu == smp_processor_id())
+ continue;
+
+ val = gic_read_typer(base + GICR_TYPER);
+ tmp = compute_common_aff(val);
+ if (tmp != aff)
+ continue;
+
+ /*
+ * At this point, we have a victim. This particular CPU
+ * has already booted, and has an affinity that matches
+ * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
+ * Make sure we don't write the Z bit in that case.
+ */
+ val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+ val &= ~GICR_VPROPBASER_4_1_Z;
+
+ *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
+
+ return val;
+ }
+
+ return 0;
+}
+
+static int allocate_vpe_l1_table(void)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val, gpsz, npg, pa;
+ unsigned int psz = SZ_64K;
+ unsigned int np, epp, esz;
+ struct page *page;
+
+ if (!gic_rdists->has_rvpeid)
+ return 0;
+
+ /*
+ * if VPENDBASER.Valid is set, disable any previously programmed
+ * VPE by setting PendingLast while clearing Valid. This has the
+ * effect of making sure no doorbell will be generated and we can
+ * then safely clear VPROPBASER.Valid.
+ */
+ if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
+ gits_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+ vlpi_base + GICR_VPENDBASER);
+
+ /*
+ * If we can inherit the configuration from another RD, let's do
+ * so. Otherwise, we have to go through the allocation process. We
+ * assume that all RDs have the exact same requirements, as
+ * nothing will work otherwise.
+ */
+ val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
+ if (val & GICR_VPROPBASER_4_1_VALID)
+ goto out;
+
+ gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL);
+ if (!gic_data_rdist()->vpe_table_mask)
+ return -ENOMEM;
+
+ val = inherit_vpe_l1_table_from_its();
+ if (val & GICR_VPROPBASER_4_1_VALID)
+ goto out;
+
+ /* First probe the page size */
+ val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
+ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
+ gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
+ esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
+
+ switch (gpsz) {
+ default:
+ gpsz = GIC_PAGE_SIZE_4K;
+ /* fall through */
+ case GIC_PAGE_SIZE_4K:
+ psz = SZ_4K;
+ break;
+ case GIC_PAGE_SIZE_16K:
+ psz = SZ_16K;
+ break;
+ case GIC_PAGE_SIZE_64K:
+ psz = SZ_64K;
+ break;
+ }
+
+ /*
+ * Start populating the register from scratch, including RO fields
+ * (which we want to print in debug cases...)
+ */
+ val = 0;
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
+
+ /* How many entries per GIC page? */
+ esz++;
+ epp = psz / (esz * SZ_8);
+
+ /*
+ * If we need more than just a single L1 page, flag the table
+ * as indirect and compute the number of required L1 pages.
+ */
+ if (epp < ITS_MAX_VPEID) {
+ int nl2;
+
+ val |= GICR_VPROPBASER_4_1_INDIRECT;
+
+ /* Number of L2 pages required to cover the VPEID space */
+ nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
+
+ /* Number of L1 pages to point to the L2 pages */
+ npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
+ } else {
+ npg = 1;
+ }
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg);
+
+ /* Right, that's the number of CPU pages we need for L1 */
+ np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
+
+ pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
+ np, npg, psz, epp, esz);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE));
+ if (!page)
+ return -ENOMEM;
+
+ gic_data_rdist()->vpe_l1_page = page;
+ pa = virt_to_phys(page_address(page));
+ WARN_ON(!IS_ALIGNED(pa, psz));
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ val |= GICR_VPROPBASER_4_1_Z;
+ val |= GICR_VPROPBASER_4_1_VALID;
+
+out:
+ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
+
+ pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
+ smp_processor_id(), val,
+ cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
+
+ return 0;
+}
+
static int its_alloc_collections(struct its_node *its)
{
int i;
@@ -2244,7 +2654,7 @@ static int __init allocate_lpi_tables(void)
return 0;
}
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
{
u32 count = 1000000; /* 1s! */
bool clean;
@@ -2252,6 +2662,8 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val &= ~GICR_VPENDBASER_Valid;
+ val &= ~clr;
+ val |= set;
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
do {
@@ -2264,6 +2676,11 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
}
} while (!clean && count);
+ if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+ pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+ val |= GICR_VPENDBASER_PendingLast;
+ }
+
return val;
}
@@ -2352,7 +2769,7 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
- if (gic_rdists->has_vlpis) {
+ if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
/*
@@ -2372,10 +2789,20 @@ static void its_cpu_init_lpis(void)
* ancient programming gets left in and has possibility of
* corrupting memory.
*/
- val = its_clear_vpend_valid(vlpi_base);
+ val = its_clear_vpend_valid(vlpi_base, 0, 0);
WARN_ON(val & GICR_VPENDBASER_Dirty);
}
+ if (allocate_vpe_l1_table()) {
+ /*
+ * If the allocation has failed, we're in massive trouble.
+ * Disable direct injection, and pray that no VM was
+ * already running...
+ */
+ gic_rdists->has_rvpeid = false;
+ gic_rdists->has_vlpis = false;
+ }
+
/* Make sure the GIC has seen the above */
dsb(sy);
out:
@@ -2859,7 +3286,7 @@ static const struct irq_domain_ops its_domain_ops = {
/*
* This is insane.
*
- * If a GICv4 doesn't implement Direct LPIs (which is extremely
+ * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
* likely), the only way to perform an invalidate is to use a fake
* device to issue an INV command, implying that the LPI has first
* been mapped to some event on that device. Since this is not exactly
@@ -2867,9 +3294,20 @@ static const struct irq_domain_ops its_domain_ops = {
* only issue an UNMAP if we're short on available slots.
*
* Broken by design(tm).
+ *
+ * GICv4.1, on the other hand, mandates that we're able to invalidate
+ * by writing to a MMIO register. It doesn't implement the whole of
+ * DirectLPI, but that's good enough. And most of the time, we don't
+ * even have to invalidate anything, as the redistributor can be told
+ * whether to generate a doorbell or not (we thus leave it enabled,
+ * always).
*/
static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
/* Already unmapped? */
if (vpe->vpe_proxy_event == -1)
return;
@@ -2892,6 +3330,10 @@ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
if (!gic_rdists->has_direct_lpi) {
unsigned long flags;
@@ -2903,6 +3345,10 @@ static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
/* Already mapped? */
if (vpe->vpe_proxy_event != -1)
return;
@@ -2925,6 +3371,10 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
unsigned long flags;
struct its_collection *target_col;
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
@@ -2951,7 +3401,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- int cpu = cpumask_first(mask_val);
+ int from, cpu = cpumask_first(mask_val);
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -2959,14 +3409,24 @@ static int its_vpe_set_affinity(struct irq_data *d,
* into the proxy device, we need to move the doorbell
* interrupt to its new location.
*/
- if (vpe->col_idx != cpu) {
- int from = vpe->col_idx;
+ if (vpe->col_idx == cpu)
+ goto out;
- vpe->col_idx = cpu;
- its_send_vmovp(vpe);
- its_vpe_db_proxy_move(vpe, from, cpu);
- }
+ from = vpe->col_idx;
+ vpe->col_idx = cpu;
+ /*
+ * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
+ * is sharing its VPE table with the current one.
+ */
+ if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
+ cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
+ goto out;
+
+ its_send_vmovp(vpe);
+ its_vpe_db_proxy_move(vpe, from, cpu);
+
+out:
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
@@ -3009,16 +3469,10 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
u64 val;
- val = its_clear_vpend_valid(vlpi_base);
+ val = its_clear_vpend_valid(vlpi_base, 0, 0);
- if (unlikely(val & GICR_VPENDBASER_Dirty)) {
- pr_err_ratelimited("ITS virtual pending table not cleaning\n");
- vpe->idai = false;
- vpe->pending_last = true;
- } else {
- vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
- vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
- }
+ vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
}
static void its_vpe_invall(struct its_vpe *vpe)
@@ -3151,6 +3605,139 @@ static struct irq_chip its_vpe_irq_chip = {
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
};
+static struct its_node *find_4_1_its(void)
+{
+ static struct its_node *its = NULL;
+
+ if (!its) {
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (is_v4_1(its))
+ return its;
+ }
+
+ /* Oops? */
+ its = NULL;
+ }
+
+ return its;
+}
+
+static void its_vpe_4_1_send_inv(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * GICv4.1 wants doorbells to be invalidated using the
+ * INVDB command in order to be broadcast to all RDs. Send
+ * it to the first valid ITS, and let the HW do its magic.
+ */
+ its = find_4_1_its();
+ if (its)
+ its_send_invdb(its, vpe);
+}
+
+static void its_vpe_4_1_mask_irq(struct irq_data *d)
+{
+ lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+ its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_unmask_irq(struct irq_data *d)
+{
+ lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+ its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_schedule(struct its_vpe *vpe,
+ struct its_cmd_info *info)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val = 0;
+
+ /* Schedule the VPE */
+ val |= GICR_VPENDBASER_Valid;
+ val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
+ val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
+ val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
+
+ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
+ struct its_cmd_info *info)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ if (info->req_db) {
+ /*
+ * vPE is going to block: make the vPE non-resident with
+ * PendingLast clear and DB set. The GIC guarantees that if
+ * we read-back PendingLast clear, then a doorbell will be
+ * delivered when an interrupt comes.
+ */
+ val = its_clear_vpend_valid(vlpi_base,
+ GICR_VPENDBASER_PendingLast,
+ GICR_VPENDBASER_4_1_DB);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+ } else {
+ /*
+ * We're not blocking, so just make the vPE non-resident
+ * with PendingLast set, indicating that we'll be back.
+ */
+ val = its_clear_vpend_valid(vlpi_base,
+ 0,
+ GICR_VPENDBASER_PendingLast);
+ vpe->pending_last = true;
+ }
+}
+
+static void its_vpe_4_1_invall(struct its_vpe *vpe)
+{
+ void __iomem *rdbase;
+ u64 val;
+
+ val = GICR_INVALLR_V;
+ val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
+
+ /* Target the redistributor this vPE is currently known on */
+ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVALLR);
+}
+
+static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case SCHEDULE_VPE:
+ its_vpe_4_1_schedule(vpe, info);
+ return 0;
+
+ case DESCHEDULE_VPE:
+ its_vpe_4_1_deschedule(vpe, info);
+ return 0;
+
+ case INVALL_VPE:
+ its_vpe_4_1_invall(vpe);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip its_vpe_4_1_irq_chip = {
+ .name = "GICv4.1-vpe",
+ .irq_mask = its_vpe_4_1_mask_irq,
+ .irq_unmask = its_vpe_4_1_unmask_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = its_vpe_set_affinity,
+ .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
+};
+
static int its_vpe_id_alloc(void)
{
return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
@@ -3186,7 +3773,10 @@ static int its_vpe_init(struct its_vpe *vpe)
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
- vpe->vpe_proxy_event = -1;
+ if (gic_rdists->has_rvpeid)
+ atomic_set(&vpe->vmapp_count, 0);
+ else
+ vpe->vpe_proxy_event = -1;
return 0;
}
@@ -3228,6 +3818,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
+ struct irq_chip *irqchip = &its_vpe_irq_chip;
struct its_vm *vm = args;
unsigned long *bitmap;
struct page *vprop_page;
@@ -3255,6 +3846,9 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
+ if (gic_rdists->has_rvpeid)
+ irqchip = &its_vpe_4_1_irq_chip;
+
for (i = 0; i < nr_irqs; i++) {
vm->vpes[i]->vpe_db_lpi = base + i;
err = its_vpe_init(vm->vpes[i]);
@@ -3265,7 +3859,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (err)
break;
irq_domain_set_hwirq_and_chip(domain, virq + i, i,
- &its_vpe_irq_chip, vm->vpes[i]);
+ irqchip, vm->vpes[i]);
set_bit(i, bitmap);
}
@@ -3778,6 +4372,14 @@ static int __init its_probe_one(struct resource *res,
} else {
pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
}
+
+ if (is_v4_1(its)) {
+ u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+ its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
+
+ pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
+ &res->start, its->mpidr, svpet);
+ }
}
its->numa_node = numa_node;
@@ -4138,6 +4740,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_v4 = false;
int err;
+ gic_rdists = rdists;
+
its_parent = parent_domain;
of_node = to_of_node(handle);
if (of_node)
@@ -4150,8 +4754,6 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return -ENXIO;
}
- gic_rdists = rdists;
-
err = allocate_lpi_tables();
if (err)
return err;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d6218012097b..286f98222878 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -858,8 +858,21 @@ static int __gic_update_rdist_properties(struct redist_region *region,
void __iomem *ptr)
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
+
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
- gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
+
+ /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
+ gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
+ gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
+ gic_data.rdists.has_rvpeid);
+
+ /* Detect non-sensical configurations */
+ if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
+ gic_data.rdists.has_direct_lpi = false;
+ gic_data.rdists.has_vlpis = false;
+ gic_data.rdists.has_rvpeid = false;
+ }
+
gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
return 1;
@@ -872,9 +885,10 @@ static void gic_update_rdist_properties(void)
if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
gic_data.ppi_nr = 0;
pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
- pr_info("%sVLPI support, %sdirect LPI support\n",
+ pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n",
!gic_data.rdists.has_vlpis ? "no " : "",
- !gic_data.rdists.has_direct_lpi ? "no " : "");
+ !gic_data.rdists.has_direct_lpi ? "no " : "",
+ !gic_data.rdists.has_rvpeid ? "no " : "");
}
/* Check whether it's single security state view */
@@ -1562,10 +1576,14 @@ static int __init gic_init_bases(void __iomem *dist_base,
pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
+
+ gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
+
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+ gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
new file mode 100644
index 000000000000..c27577c81126
--- /dev/null
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 NXP
+
+/* INTMUX Block Diagram
+ *
+ * ________________
+ * interrupt source # 0 +---->| |
+ * | | |
+ * interrupt source # 1 +++-->| |
+ * ... | | | channel # 0 |--------->interrupt out # 0
+ * ... | | | |
+ * ... | | | |
+ * interrupt source # X-1 +++-->|________________|
+ * | | |
+ * | | |
+ * | | | ________________
+ * +---->| |
+ * | | | | |
+ * | +-->| |
+ * | | | | channel # 1 |--------->interrupt out # 1
+ * | | +>| |
+ * | | | | |
+ * | | | |________________|
+ * | | |
+ * | | |
+ * | | | ...
+ * | | | ...
+ * | | |
+ * | | | ________________
+ * +---->| |
+ * | | | |
+ * +-->| |
+ * | | channel # N |--------->interrupt out # N
+ * +>| |
+ * | |
+ * |________________|
+ *
+ *
+ * N: Interrupt Channel Instance Number (N=7)
+ * X: Interrupt Source Number for each channel (X=32)
+ *
+ * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32
+ * interrupt sources and generates 1 interrupt output.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define CHANIER(n) (0x10 + (0x40 * n))
+#define CHANIPR(n) (0x20 + (0x40 * n))
+
+#define CHAN_MAX_NUM 0x8
+
+struct intmux_irqchip_data {
+ int chanidx;
+ int irq;
+ struct irq_domain *domain;
+};
+
+struct intmux_data {
+ raw_spinlock_t lock;
+ void __iomem *regs;
+ struct clk *ipg_clk;
+ int channum;
+ struct intmux_irqchip_data irqchip_data[];
+};
+
+static void imx_intmux_irq_mask(struct irq_data *d)
+{
+ struct intmux_irqchip_data *irqchip_data = d->chip_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ reg = data->regs + CHANIER(idx);
+ val = readl_relaxed(reg);
+ /* disable the interrupt source of this channel */
+ val &= ~BIT(d->hwirq);
+ writel_relaxed(val, reg);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void imx_intmux_irq_unmask(struct irq_data *d)
+{
+ struct intmux_irqchip_data *irqchip_data = d->chip_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ reg = data->regs + CHANIER(idx);
+ val = readl_relaxed(reg);
+ /* enable the interrupt source of this channel */
+ val |= BIT(d->hwirq);
+ writel_relaxed(val, reg);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static struct irq_chip imx_intmux_irq_chip = {
+ .name = "intmux",
+ .irq_mask = imx_intmux_irq_mask,
+ .irq_unmask = imx_intmux_irq_unmask,
+};
+
+static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, h->host_data);
+ irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ struct intmux_irqchip_data *irqchip_data = d->host_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+
+ /*
+ * two cells needed in interrupt specifier:
+ * the 1st cell: hw interrupt number
+ * the 2nd cell: channel index
+ */
+ if (WARN_ON(intsize != 2))
+ return -EINVAL;
+
+ if (WARN_ON(intspec[1] >= data->channum))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct intmux_irqchip_data *irqchip_data = d->host_data;
+
+ /* Not for us */
+ if (fwspec->fwnode != d->fwnode)
+ return false;
+
+ return irqchip_data->chanidx == fwspec->param[1];
+}
+
+static const struct irq_domain_ops imx_intmux_domain_ops = {
+ .map = imx_intmux_irq_map,
+ .xlate = imx_intmux_irq_xlate,
+ .select = imx_intmux_irq_select,
+};
+
+static void imx_intmux_irq_handler(struct irq_desc *desc)
+{
+ struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc);
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long irqstat;
+ int pos, virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ /* read the interrupt source pending status of this channel */
+ irqstat = readl_relaxed(data->regs + CHANIPR(idx));
+
+ for_each_set_bit(pos, &irqstat, 32) {
+ virq = irq_find_mapping(irqchip_data->domain, pos);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int imx_intmux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct irq_domain *domain;
+ struct intmux_data *data;
+ int channum;
+ int i, ret;
+
+ channum = platform_irq_count(pdev);
+ if (channum == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (channum > CHAN_MAX_NUM) {
+ dev_err(&pdev->dev, "supports up to %d multiplex channels\n",
+ CHAN_MAX_NUM);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data) +
+ channum * sizeof(data->irqchip_data[0]), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
+ return PTR_ERR(data->regs);
+ }
+
+ data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(data->ipg_clk)) {
+ ret = PTR_ERR(data->ipg_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ data->channum = channum;
+ raw_spin_lock_init(&data->lock);
+
+ ret = clk_prepare_enable(data->ipg_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < channum; i++) {
+ data->irqchip_data[i].chanidx = i;
+
+ data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
+ if (data->irqchip_data[i].irq <= 0) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "failed to get irq\n");
+ goto out;
+ }
+
+ domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops,
+ &data->irqchip_data[i]);
+ if (!domain) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ goto out;
+ }
+ data->irqchip_data[i].domain = domain;
+
+ /* disable all interrupt sources of this channel firstly */
+ writel_relaxed(0, data->regs + CHANIER(i));
+
+ irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+ imx_intmux_irq_handler,
+ &data->irqchip_data[i]);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+out:
+ clk_disable_unprepare(data->ipg_clk);
+ return ret;
+}
+
+static int imx_intmux_remove(struct platform_device *pdev)
+{
+ struct intmux_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < data->channum; i++) {
+ /* disable all interrupt sources of this channel */
+ writel_relaxed(0, data->regs + CHANIER(i));
+
+ irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+ NULL, NULL);
+
+ irq_domain_remove(data->irqchip_data[i].domain);
+ }
+
+ clk_disable_unprepare(data->ipg_clk);
+
+ return 0;
+}
+
+static const struct of_device_id imx_intmux_id[] = {
+ { .compatible = "fsl,imx-intmux", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver imx_intmux_driver = {
+ .driver = {
+ .name = "imx-intmux",
+ .of_match_table = imx_intmux_id,
+ },
+ .probe = imx_intmux_probe,
+ .remove = imx_intmux_remove,
+};
+builtin_platform_driver(imx_intmux_driver);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index 01d18b39069e..c5589ee0dfb3 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -17,7 +17,6 @@
#include <linux/delay.h>
#include <asm/io.h>
-#include <asm/mach-jz4740/irq.h>
struct ingenic_intc_data {
void __iomem *base;
@@ -50,7 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data)
while (pending) {
int bit = __fls(pending);
- irq = irq_find_mapping(domain, bit + (i * 32));
+ irq = irq_linear_revmap(domain, bit + (i * 32));
generic_handle_irq(irq);
pending &= ~BIT(bit);
}
@@ -97,8 +96,7 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
- domain = irq_domain_add_legacy(node, num_chips * 32,
- JZ4740_IRQ_BASE, 0,
+ domain = irq_domain_add_linear(node, num_chips * 32,
&irq_generic_chip_ops, NULL);
if (!domain) {
err = -ENOMEM;
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 3f09f658e8e2..6b566bba263b 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = {
.name = "Hisilicon MBIGEN-V2",
.of_match_table = mbigen_of_match,
.acpi_match_table = ACPI_PTR(mbigen_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = mbigen_device_probe,
};
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 829084b568fa..ccc7f823911b 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -24,50 +24,101 @@
#define REG_PIN_47_SEL 0x08
#define REG_FILTER_SEL 0x0c
+/* use for A1 like chips */
+#define REG_PIN_A1_SEL 0x04
+
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
* bits 24 to 31. Tests on the actual HW show that these bits are
* stuck at 0. Bits 8 to 15 are responsive and have the expected
* effect.
*/
-#define REG_EDGE_POL_EDGE(x) BIT(x)
-#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
-#define REG_BOTH_EDGE(x) BIT(8 + (x))
-#define REG_EDGE_POL_MASK(x) ( \
- REG_EDGE_POL_EDGE(x) | \
- REG_EDGE_POL_LOW(x) | \
- REG_BOTH_EDGE(x))
+#define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x))
+#define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x))
+#define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x))
+#define REG_EDGE_POL_MASK(params, x) ( \
+ REG_EDGE_POL_EDGE(params, x) | \
+ REG_EDGE_POL_LOW(params, x) | \
+ REG_BOTH_EDGE(params, x))
#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
+struct meson_gpio_irq_controller;
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq);
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl);
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel,
+ unsigned long hwirq);
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl);
+
+struct irq_ctl_ops {
+ void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq);
+ void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl);
+};
+
struct meson_gpio_irq_params {
unsigned int nr_hwirq;
bool support_edge_both;
+ unsigned int edge_both_offset;
+ unsigned int edge_single_offset;
+ unsigned int pol_low_offset;
+ unsigned int pin_sel_mask;
+ struct irq_ctl_ops ops;
};
+#define INIT_MESON_COMMON(irqs, init, sel) \
+ .nr_hwirq = irqs, \
+ .ops = { \
+ .gpio_irq_init = init, \
+ .gpio_irq_sel_pin = sel, \
+ },
+
+#define INIT_MESON8_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \
+ meson8_gpio_irq_sel_pin) \
+ .edge_single_offset = 0, \
+ .pol_low_offset = 16, \
+ .pin_sel_mask = 0xff, \
+
+#define INIT_MESON_A1_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin) \
+ .support_edge_both = true, \
+ .edge_both_offset = 16, \
+ .edge_single_offset = 8, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0x7f, \
+
static const struct meson_gpio_irq_params meson8_params = {
- .nr_hwirq = 134,
+ INIT_MESON8_COMMON_DATA(134)
};
static const struct meson_gpio_irq_params meson8b_params = {
- .nr_hwirq = 119,
+ INIT_MESON8_COMMON_DATA(119)
};
static const struct meson_gpio_irq_params gxbb_params = {
- .nr_hwirq = 133,
+ INIT_MESON8_COMMON_DATA(133)
};
static const struct meson_gpio_irq_params gxl_params = {
- .nr_hwirq = 110,
+ INIT_MESON8_COMMON_DATA(110)
};
static const struct meson_gpio_irq_params axg_params = {
- .nr_hwirq = 100,
+ INIT_MESON8_COMMON_DATA(100)
};
static const struct meson_gpio_irq_params sm1_params = {
- .nr_hwirq = 100,
+ INIT_MESON8_COMMON_DATA(100)
.support_edge_both = true,
+ .edge_both_offset = 8,
+};
+
+static const struct meson_gpio_irq_params a1_params = {
+ INIT_MESON_A1_COMMON_DATA(62)
};
static const struct of_device_id meson_irq_gpio_matches[] = {
@@ -78,6 +129,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
+ { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ }
};
@@ -100,9 +152,43 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
writel_relaxed(tmp, ctl->base + reg);
}
-static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
+{
+}
+
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq)
+{
+ unsigned int reg_offset;
+ unsigned int bit_offset;
+
+ reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+ bit_offset = REG_PIN_SEL_SHIFT(channel);
+
+ meson_gpio_irq_update_bits(ctl, reg_offset,
+ ctl->params->pin_sel_mask << bit_offset,
+ hwirq << bit_offset);
+}
+
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel,
+ unsigned long hwirq)
{
- return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+ unsigned int reg_offset;
+ unsigned int bit_offset;
+
+ bit_offset = ((channel % 2) == 0) ? 0 : 16;
+ reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2);
+
+ meson_gpio_irq_update_bits(ctl, reg_offset,
+ ctl->params->pin_sel_mask << bit_offset,
+ hwirq << bit_offset);
+}
+
+/* For a1 or later chips like a1 there is a switch to enable/disable irq */
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl)
+{
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31));
}
static int
@@ -110,7 +196,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
unsigned long hwirq,
u32 **channel_hwirq)
{
- unsigned int reg, idx;
+ unsigned int idx;
spin_lock(&ctl->lock);
@@ -129,10 +215,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
* Setup the mux of the channel to route the signal of the pad
* to the appropriate input of the GIC
*/
- reg = meson_gpio_irq_channel_to_reg(idx);
- meson_gpio_irq_update_bits(ctl, reg,
- 0xff << REG_PIN_SEL_SHIFT(idx),
- hwirq << REG_PIN_SEL_SHIFT(idx));
+ ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq);
/*
* Get the hwirq number assigned to this channel through
@@ -173,7 +256,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
{
u32 val = 0;
unsigned int idx;
+ const struct meson_gpio_irq_params *params;
+ params = ctl->params;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
/*
@@ -190,22 +275,22 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
* precedence over the other edge/polarity settings
*/
if (type == IRQ_TYPE_EDGE_BOTH) {
- if (!ctl->params->support_edge_both)
+ if (!params->support_edge_both)
return -EINVAL;
- val |= REG_BOTH_EDGE(idx);
+ val |= REG_BOTH_EDGE(params, idx);
} else {
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
- val |= REG_EDGE_POL_EDGE(idx);
+ val |= REG_EDGE_POL_EDGE(params, idx);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
- val |= REG_EDGE_POL_LOW(idx);
+ val |= REG_EDGE_POL_LOW(params, idx);
}
spin_lock(&ctl->lock);
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
- REG_EDGE_POL_MASK(idx), val);
+ REG_EDGE_POL_MASK(params, idx), val);
spin_unlock(&ctl->lock);
@@ -371,6 +456,8 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node,
return ret;
}
+ ctl->params->ops.gpio_irq_init(ctl);
+
return 0;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index f3985469c221..d70507133c1d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -716,7 +716,7 @@ static int __init gic_of_init(struct device_node *node,
__sync();
}
- mips_gic_base = ioremap_nocache(gic_base, gic_len);
+ mips_gic_base = ioremap(gic_base, gic_len);
gicconfig = read_gic_config();
gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index a166d30deea2..f747e2209ea9 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -45,17 +45,6 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
handle_IRQ(irq, regs);
}
-static int nvic_irq_domain_translate(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *hwirq, unsigned int *type)
-{
- if (WARN_ON(fwspec->param_count < 1))
- return -EINVAL;
- *hwirq = fwspec->param[0];
- *type = IRQ_TYPE_NONE;
- return 0;
-}
-
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -64,7 +53,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int type = IRQ_TYPE_NONE;
struct irq_fwspec *fwspec = arg;
- ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
@@ -75,7 +64,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops nvic_irq_domain_ops = {
- .translate = nvic_irq_domain_translate,
+ .translate = irq_domain_translate_onecell,
.alloc = nvic_irq_domain_alloc,
.free = irq_domain_free_irqs_top,
};
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index f82bc60a6793..6e5e3172796b 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -460,7 +460,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
goto err0;
}
- i->iomem = devm_ioremap_nocache(dev, io[k]->start,
+ i->iomem = devm_ioremap(dev, io[k]->start,
resource_size(io[k]));
if (!i->iomem) {
dev_err(dev, "failed to remap IOMEM\n");
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 0aca5807a119..aa4af886e43a 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -154,15 +154,37 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
- irq_set_chip_data(irq, NULL);
+ irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
irq_set_noprobe(irq);
return 0;
}
+static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct irq_domain_ops plic_irqdomain_ops = {
- .map = plic_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
+ .translate = irq_domain_translate_onecell,
+ .alloc = plic_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
};
static struct irq_domain *plic_irqdomain;
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 63baf27a2c79..d14334f4007e 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -3,6 +3,6 @@
# Object files in subdirectories
-obj-$(CONFIG_ISDN_CAPI) += capi/
+obj-$(CONFIG_BT_CMTP) += capi/
obj-$(CONFIG_MISDN) += mISDN/
obj-$(CONFIG_ISDN) += hardware/
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 573fea5500ce..cc408ad9aafb 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-menuconfig ISDN_CAPI
- tristate "CAPI 2.0 subsystem"
+config ISDN_CAPI
+ def_bool ISDN && BT
help
This provides CAPI (the Common ISDN Application Programming
Interface) Version 2.0, a standard making it easy for programs to
@@ -15,42 +15,18 @@ menuconfig ISDN_CAPI
See CONFIG_BT_CMTP for the last remaining regular driver
in the kernel that uses the CAPI subsystem.
-if ISDN_CAPI
-
config CAPI_TRACE
- bool "CAPI trace support"
- default y
+ def_bool BT_CMTP
help
If you say Y here, the kernelcapi driver can make verbose traces
of CAPI messages. This feature can be enabled/disabled via IOCTL for
every controller (default disabled).
- This will increase the size of the kernelcapi module by 20 KB.
- If unsure, say Y.
-
-config ISDN_CAPI_CAPI20
- tristate "CAPI2.0 /dev/capi20 support"
- help
- This option will provide the CAPI 2.0 interface to userspace
- applications via /dev/capi20. Applications should use the
- standardized libcapi20 to access this functionality. You should say
- Y/M here.
config ISDN_CAPI_MIDDLEWARE
- bool "CAPI2.0 Middleware support"
- depends on ISDN_CAPI_CAPI20 && TTY
+ def_bool BT_CMTP && TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
established via the usual /dev/capi20 interface to a special tty
device. If you want to use pppd with pppdcapiplugin to dial up to
your ISP, say Y here.
-
-config ISDN_CAPI_CAPIDRV_VERBOSE
- bool "Verbose reason code reporting"
- depends on ISDN_CAPI_CAPIDRV
- help
- If you say Y here, the capidrv interface will give verbose reasons
- for disconnecting. This will increase the size of the kernel by 7 KB.
- If unsure, say N.
-
-endif
diff --git a/drivers/isdn/capi/Makefile b/drivers/isdn/capi/Makefile
index d299f3e75f89..352217ebabd8 100644
--- a/drivers/isdn/capi/Makefile
+++ b/drivers/isdn/capi/Makefile
@@ -1,17 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Makefile for the CAPI subsystem.
+# Makefile for the CAPI subsystem used by BT_CMTP
-# Ordering constraints: kernelcapi.o first
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_CAPI) += kernelcapi.o
-obj-$(CONFIG_ISDN_CAPI_CAPI20) += capi.o
-obj-$(CONFIG_ISDN_CAPI_CAPIDRV) += capidrv.o
-
-# Multipart objects.
-
-kernelcapi-y := kcapi.o capiutil.o capilib.o
-kernelcapi-$(CONFIG_PROC_FS) += kcapi_proc.o
-
-ccflags-y += -I$(srctree)/$(src)/../include -I$(srctree)/$(src)/../include/uapi
+obj-$(CONFIG_BT_CMTP) += kernelcapi.o
+kernelcapi-y := kcapi.o capiutil.o capi.o kcapi_proc.o
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 1675da34239b..85767f52fe3c 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -39,7 +39,9 @@
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capicmd.h>
-MODULE_DESCRIPTION("CAPI4Linux: Userspace /dev/capi20 interface");
+#include "kcapi.h"
+
+MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer and /dev/capi20 interface");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
@@ -1412,15 +1414,22 @@ static int __init capi_init(void)
{
const char *compileinfo;
int major_ret;
+ int ret;
+
+ ret = kcapi_init();
+ if (ret)
+ return ret;
major_ret = register_chrdev(capi_major, "capi20", &capi_fops);
if (major_ret < 0) {
printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
+ kcapi_exit();
return major_ret;
}
capi_class = class_create(THIS_MODULE, "capi");
if (IS_ERR(capi_class)) {
unregister_chrdev(capi_major, "capi20");
+ kcapi_exit();
return PTR_ERR(capi_class);
}
@@ -1430,6 +1439,7 @@ static int __init capi_init(void)
device_destroy(capi_class, MKDEV(capi_major, 0));
class_destroy(capi_class);
unregister_chrdev(capi_major, "capi20");
+ kcapi_exit();
return -ENOMEM;
}
@@ -1455,6 +1465,8 @@ static void __exit capi_exit(void)
unregister_chrdev(capi_major, "capi20");
capinc_tty_exit();
+
+ kcapi_exit();
}
module_init(capi_init);
diff --git a/drivers/isdn/capi/capilib.c b/drivers/isdn/capi/capilib.c
deleted file mode 100644
index a39ad3796bba..000000000000
--- a/drivers/isdn/capi/capilib.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/isdn/capilli.h>
-
-#define DBG(format, arg...) do { \
- printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
- } while (0)
-
-struct capilib_msgidqueue {
- struct capilib_msgidqueue *next;
- u16 msgid;
-};
-
-struct capilib_ncci {
- struct list_head list;
- u16 applid;
- u32 ncci;
- u32 winsize;
- int nmsg;
- struct capilib_msgidqueue *msgidqueue;
- struct capilib_msgidqueue *msgidlast;
- struct capilib_msgidqueue *msgidfree;
- struct capilib_msgidqueue msgidpool[CAPI_MAXDATAWINDOW];
-};
-
-// ---------------------------------------------------------------------------
-// NCCI Handling
-
-static inline void mq_init(struct capilib_ncci *np)
-{
- u_int i;
- np->msgidqueue = NULL;
- np->msgidlast = NULL;
- np->nmsg = 0;
- memset(np->msgidpool, 0, sizeof(np->msgidpool));
- np->msgidfree = &np->msgidpool[0];
- for (i = 1; i < np->winsize; i++) {
- np->msgidpool[i].next = np->msgidfree;
- np->msgidfree = &np->msgidpool[i];
- }
-}
-
-static inline int mq_enqueue(struct capilib_ncci *np, u16 msgid)
-{
- struct capilib_msgidqueue *mq;
- if ((mq = np->msgidfree) == NULL)
- return 0;
- np->msgidfree = mq->next;
- mq->msgid = msgid;
- mq->next = NULL;
- if (np->msgidlast)
- np->msgidlast->next = mq;
- np->msgidlast = mq;
- if (!np->msgidqueue)
- np->msgidqueue = mq;
- np->nmsg++;
- return 1;
-}
-
-static inline int mq_dequeue(struct capilib_ncci *np, u16 msgid)
-{
- struct capilib_msgidqueue **pp;
- for (pp = &np->msgidqueue; *pp; pp = &(*pp)->next) {
- if ((*pp)->msgid == msgid) {
- struct capilib_msgidqueue *mq = *pp;
- *pp = mq->next;
- if (mq == np->msgidlast)
- np->msgidlast = NULL;
- mq->next = np->msgidfree;
- np->msgidfree = mq;
- np->nmsg--;
- return 1;
- }
- }
- return 0;
-}
-
-void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize)
-{
- struct capilib_ncci *np;
-
- np = kmalloc(sizeof(*np), GFP_ATOMIC);
- if (!np) {
- printk(KERN_WARNING "capilib_new_ncci: no memory.\n");
- return;
- }
- if (winsize > CAPI_MAXDATAWINDOW) {
- printk(KERN_ERR "capi_new_ncci: winsize %d too big\n",
- winsize);
- winsize = CAPI_MAXDATAWINDOW;
- }
- np->applid = applid;
- np->ncci = ncci;
- np->winsize = winsize;
- mq_init(np);
- list_add_tail(&np->list, head);
- DBG("kcapi: appl %d ncci 0x%x up", applid, ncci);
-}
-
-EXPORT_SYMBOL(capilib_new_ncci);
-
-void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci)
-{
- struct list_head *l;
- struct capilib_ncci *np;
-
- list_for_each(l, head) {
- np = list_entry(l, struct capilib_ncci, list);
- if (np->applid != applid)
- continue;
- if (np->ncci != ncci)
- continue;
- printk(KERN_INFO "kcapi: appl %d ncci 0x%x down\n", applid, ncci);
- list_del(&np->list);
- kfree(np);
- return;
- }
- printk(KERN_ERR "capilib_free_ncci: ncci 0x%x not found\n", ncci);
-}
-
-EXPORT_SYMBOL(capilib_free_ncci);
-
-void capilib_release_appl(struct list_head *head, u16 applid)
-{
- struct list_head *l, *n;
- struct capilib_ncci *np;
-
- list_for_each_safe(l, n, head) {
- np = list_entry(l, struct capilib_ncci, list);
- if (np->applid != applid)
- continue;
- printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", applid, np->ncci);
- list_del(&np->list);
- kfree(np);
- }
-}
-
-EXPORT_SYMBOL(capilib_release_appl);
-
-void capilib_release(struct list_head *head)
-{
- struct list_head *l, *n;
- struct capilib_ncci *np;
-
- list_for_each_safe(l, n, head) {
- np = list_entry(l, struct capilib_ncci, list);
- printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", np->applid, np->ncci);
- list_del(&np->list);
- kfree(np);
- }
-}
-
-EXPORT_SYMBOL(capilib_release);
-
-u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
-{
- struct list_head *l;
- struct capilib_ncci *np;
-
- list_for_each(l, head) {
- np = list_entry(l, struct capilib_ncci, list);
- if (np->applid != applid)
- continue;
- if (np->ncci != ncci)
- continue;
-
- if (mq_enqueue(np, msgid) == 0)
- return CAPI_SENDQUEUEFULL;
-
- return CAPI_NOERROR;
- }
- printk(KERN_ERR "capilib_data_b3_req: ncci 0x%x not found\n", ncci);
- return CAPI_NOERROR;
-}
-
-EXPORT_SYMBOL(capilib_data_b3_req);
-
-void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
-{
- struct list_head *l;
- struct capilib_ncci *np;
-
- list_for_each(l, head) {
- np = list_entry(l, struct capilib_ncci, list);
- if (np->applid != applid)
- continue;
- if (np->ncci != ncci)
- continue;
-
- if (mq_dequeue(np, msgid) == 0) {
- printk(KERN_ERR "kcapi: msgid %hu ncci 0x%x not on queue\n",
- msgid, ncci);
- }
- return;
- }
- printk(KERN_ERR "capilib_data_b3_conf: ncci 0x%x not found\n", ncci);
-}
-
-EXPORT_SYMBOL(capilib_data_b3_conf);
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index 9846d82eb097..f26bf3c66d7e 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -20,6 +20,8 @@
#include <linux/isdn/capiutil.h>
#include <linux/slab.h>
+#include "kcapi.h"
+
/* from CAPI2.0 DDK AVM Berlin GmbH */
typedef struct {
@@ -245,190 +247,6 @@ static void jumpcstruct(_cmsg *cmsg)
}
}
}
-/*-------------------------------------------------------*/
-static void pars_2_message(_cmsg *cmsg)
-{
-
- for (; TYP != _CEND; cmsg->p++) {
- switch (TYP) {
- case _CBYTE:
- byteTLcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l++;
- break;
- case _CWORD:
- wordTLcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l += 2;
- break;
- case _CDWORD:
- dwordTLcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l += 4;
- break;
- case _CSTRUCT:
- if (*(u8 **) OFF == NULL) {
- *(cmsg->m + cmsg->l) = '\0';
- cmsg->l++;
- } else if (**(_cstruct *) OFF != 0xff) {
- structTLcpy(cmsg->m + cmsg->l, *(_cstruct *) OFF, 1 + **(_cstruct *) OFF);
- cmsg->l += 1 + **(_cstruct *) OFF;
- } else {
- _cstruct s = *(_cstruct *) OFF;
- structTLcpy(cmsg->m + cmsg->l, s, 3 + *(u16 *) (s + 1));
- cmsg->l += 3 + *(u16 *) (s + 1);
- }
- break;
- case _CMSTRUCT:
-/*----- Metastruktur 0 -----*/
- if (*(_cmstruct *) OFF == CAPI_DEFAULT) {
- *(cmsg->m + cmsg->l) = '\0';
- cmsg->l++;
- jumpcstruct(cmsg);
- }
-/*----- Metastruktur wird composed -----*/
- else {
- unsigned _l = cmsg->l;
- unsigned _ls;
- cmsg->l++;
- cmsg->p++;
- pars_2_message(cmsg);
- _ls = cmsg->l - _l - 1;
- if (_ls < 255)
- (cmsg->m + _l)[0] = (u8) _ls;
- else {
- structTLcpyovl(cmsg->m + _l + 3, cmsg->m + _l + 1, _ls);
- (cmsg->m + _l)[0] = 0xff;
- wordTLcpy(cmsg->m + _l + 1, &_ls);
- }
- }
- break;
- }
- }
-}
-
-/**
- * capi_cmsg2message() - assemble CAPI 2.0 message from _cmsg structure
- * @cmsg: _cmsg structure
- * @msg: buffer for assembled message
- *
- * Return value: 0 for success
- */
-
-unsigned capi_cmsg2message(_cmsg *cmsg, u8 *msg)
-{
- cmsg->m = msg;
- cmsg->l = 8;
- cmsg->p = 0;
- cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
- if (!cmsg->par)
- return 1; /* invalid command/subcommand */
-
- pars_2_message(cmsg);
-
- wordTLcpy(msg + 0, &cmsg->l);
- byteTLcpy(cmsg->m + 4, &cmsg->Command);
- byteTLcpy(cmsg->m + 5, &cmsg->Subcommand);
- wordTLcpy(cmsg->m + 2, &cmsg->ApplId);
- wordTLcpy(cmsg->m + 6, &cmsg->Messagenumber);
-
- return 0;
-}
-
-/*-------------------------------------------------------*/
-static void message_2_pars(_cmsg *cmsg)
-{
- for (; TYP != _CEND; cmsg->p++) {
-
- switch (TYP) {
- case _CBYTE:
- byteTRcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l++;
- break;
- case _CWORD:
- wordTRcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l += 2;
- break;
- case _CDWORD:
- dwordTRcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l += 4;
- break;
- case _CSTRUCT:
- *(u8 **) OFF = cmsg->m + cmsg->l;
-
- if (cmsg->m[cmsg->l] != 0xff)
- cmsg->l += 1 + cmsg->m[cmsg->l];
- else
- cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1);
- break;
- case _CMSTRUCT:
-/*----- Metastruktur 0 -----*/
- if (cmsg->m[cmsg->l] == '\0') {
- *(_cmstruct *) OFF = CAPI_DEFAULT;
- cmsg->l++;
- jumpcstruct(cmsg);
- } else {
- unsigned _l = cmsg->l;
- *(_cmstruct *) OFF = CAPI_COMPOSE;
- cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1;
- cmsg->p++;
- message_2_pars(cmsg);
- }
- break;
- }
- }
-}
-
-/**
- * capi_message2cmsg() - disassemble CAPI 2.0 message into _cmsg structure
- * @cmsg: _cmsg structure
- * @msg: buffer for assembled message
- *
- * Return value: 0 for success
- */
-
-unsigned capi_message2cmsg(_cmsg *cmsg, u8 *msg)
-{
- memset(cmsg, 0, sizeof(_cmsg));
- cmsg->m = msg;
- cmsg->l = 8;
- cmsg->p = 0;
- byteTRcpy(cmsg->m + 4, &cmsg->Command);
- byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
- cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
- if (!cmsg->par)
- return 1; /* invalid command/subcommand */
-
- message_2_pars(cmsg);
-
- wordTRcpy(msg + 0, &cmsg->l);
- wordTRcpy(cmsg->m + 2, &cmsg->ApplId);
- wordTRcpy(cmsg->m + 6, &cmsg->Messagenumber);
-
- return 0;
-}
-
-/**
- * capi_cmsg_header() - initialize header part of _cmsg structure
- * @cmsg: _cmsg structure
- * @_ApplId: ApplID field value
- * @_Command: Command field value
- * @_Subcommand: Subcommand field value
- * @_Messagenumber: Message Number field value
- * @_Controller: Controller/PLCI/NCCI field value
- *
- * Return value: 0 for success
- */
-
-unsigned capi_cmsg_header(_cmsg *cmsg, u16 _ApplId,
- u8 _Command, u8 _Subcommand,
- u16 _Messagenumber, u32 _Controller)
-{
- memset(cmsg, 0, sizeof(_cmsg));
- cmsg->ApplId = _ApplId;
- cmsg->Command = _Command;
- cmsg->Subcommand = _Subcommand;
- cmsg->Messagenumber = _Messagenumber;
- cmsg->adr.adrController = _Controller;
- return 0;
-}
/*-------------------------------------------------------*/
@@ -561,8 +379,6 @@ static char *pnames[] =
/*2f */ "Useruserdata"
};
-
-
#include <stdarg.h>
/*-------------------------------------------------------*/
@@ -800,37 +616,6 @@ _cdebbuf *capi_message2str(u8 *msg)
return cdb;
}
-/**
- * capi_cmsg2str() - format _cmsg structure for printing
- * @cmsg: _cmsg structure
- *
- * Allocates a CAPI debug buffer and fills it with a printable representation
- * of the CAPI 2.0 message stored in @cmsg by a previous call to
- * capi_cmsg2message() or capi_message2cmsg().
- * Return value: allocated debug buffer, NULL on error
- * The returned buffer should be freed by a call to cdebbuf_free() after use.
- */
-
-_cdebbuf *capi_cmsg2str(_cmsg *cmsg)
-{
- _cdebbuf *cdb;
-
- if (!cmsg->m)
- return NULL; /* no message */
- cdb = cdebbuf_alloc();
- if (!cdb)
- return NULL;
- cmsg->l = 8;
- cmsg->p = 0;
- cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n",
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- ((u16 *) cmsg->m)[1],
- ((u16 *) cmsg->m)[3],
- ((u16 *) cmsg->m)[0]);
- cdb = protocol_message_2_pars(cdb, cmsg, 1);
- return cdb;
-}
-
int __init cdebug_init(void)
{
g_cmsg = kmalloc(sizeof(_cmsg), GFP_KERNEL);
@@ -854,7 +639,7 @@ int __init cdebug_init(void)
return 0;
}
-void __exit cdebug_exit(void)
+void cdebug_exit(void)
{
if (g_debbuf)
kfree(g_debbuf->buf);
@@ -885,16 +670,8 @@ int __init cdebug_init(void)
return 0;
}
-void __exit cdebug_exit(void)
+void cdebug_exit(void)
{
}
#endif
-
-EXPORT_SYMBOL(cdebbuf_free);
-EXPORT_SYMBOL(capi_cmsg2message);
-EXPORT_SYMBOL(capi_message2cmsg);
-EXPORT_SYMBOL(capi_cmsg_header);
-EXPORT_SYMBOL(capi_cmd2str);
-EXPORT_SYMBOL(capi_cmsg2str);
-EXPORT_SYMBOL(capi_message2str);
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index a4ceb61c5b60..7168778fbbe1 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -10,8 +10,6 @@
*
*/
-#define AVMB1_COMPAT
-
#include "kcapi.h"
#include <linux/module.h>
#include <linux/mm.h>
@@ -31,18 +29,12 @@
#include <linux/uaccess.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
-#ifdef AVMB1_COMPAT
-#include <linux/b1lli.h>
-#endif
#include <linux/mutex.h>
#include <linux/rcupdate.h>
static int showcapimsgs = 0;
static struct workqueue_struct *kcapi_wq;
-MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
module_param(showcapimsgs, uint, 0);
/* ------------------------------------------------------------- */
@@ -61,9 +53,6 @@ static char capi_manufakturer[64] = "AVM Berlin";
#define NCCI2CTRL(ncci) (((ncci) >> 24) & 0x7f)
-LIST_HEAD(capi_drivers);
-DEFINE_MUTEX(capi_drivers_lock);
-
struct capi_ctr *capi_controller[CAPI_MAXCONTR];
DEFINE_MUTEX(capi_controller_lock);
@@ -71,8 +60,6 @@ struct capi20_appl *capi_applications[CAPI_MAXAPPL];
static int ncontrollers;
-static BLOCKING_NOTIFIER_HEAD(ctr_notifier_list);
-
/* -------- controller ref counting -------------------------------------- */
static inline struct capi_ctr *
@@ -200,8 +187,6 @@ static void notify_up(u32 contr)
if (ap)
register_appl(ctr, applid, &ap->rparam);
}
-
- wake_up_interruptible_all(&ctr->state_wait_queue);
} else
printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
@@ -229,8 +214,6 @@ static void ctr_down(struct capi_ctr *ctr, int new_state)
if (ap)
capi_ctr_put(ctr);
}
-
- wake_up_interruptible_all(&ctr->state_wait_queue);
}
static void notify_down(u32 contr)
@@ -251,36 +234,23 @@ static void notify_down(u32 contr)
mutex_unlock(&capi_controller_lock);
}
-static int
-notify_handler(struct notifier_block *nb, unsigned long val, void *v)
+static void do_notify_work(struct work_struct *work)
{
- u32 contr = (long)v;
+ struct capictr_event *event =
+ container_of(work, struct capictr_event, work);
- switch (val) {
+ switch (event->type) {
case CAPICTR_UP:
- notify_up(contr);
+ notify_up(event->controller);
break;
case CAPICTR_DOWN:
- notify_down(contr);
+ notify_down(event->controller);
break;
}
- return NOTIFY_OK;
-}
-static void do_notify_work(struct work_struct *work)
-{
- struct capictr_event *event =
- container_of(work, struct capictr_event, work);
-
- blocking_notifier_call_chain(&ctr_notifier_list, event->type,
- (void *)(long)event->controller);
kfree(event);
}
-/*
- * The notifier will result in adding/deleteing of devices. Devices can
- * only removed in user process, not in bh.
- */
static int notify_push(unsigned int event_type, u32 controller)
{
struct capictr_event *event = kmalloc(sizeof(*event), GFP_ATOMIC);
@@ -296,18 +266,6 @@ static int notify_push(unsigned int event_type, u32 controller)
return 0;
}
-int register_capictr_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&ctr_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(register_capictr_notifier);
-
-int unregister_capictr_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&ctr_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_capictr_notifier);
-
/* -------- Receiver ------------------------------------------ */
static void recv_handler(struct work_struct *work)
@@ -454,48 +412,6 @@ void capi_ctr_down(struct capi_ctr *ctr)
EXPORT_SYMBOL(capi_ctr_down);
-/**
- * capi_ctr_suspend_output() - suspend controller
- * @ctr: controller descriptor structure.
- *
- * Called by hardware driver to stop data flow.
- *
- * Note: The caller is responsible for synchronizing concurrent state changes
- * as well as invocations of capi_ctr_handle_message.
- */
-
-void capi_ctr_suspend_output(struct capi_ctr *ctr)
-{
- if (!ctr->blocked) {
- printk(KERN_DEBUG "kcapi: controller [%03d] suspend\n",
- ctr->cnr);
- ctr->blocked = 1;
- }
-}
-
-EXPORT_SYMBOL(capi_ctr_suspend_output);
-
-/**
- * capi_ctr_resume_output() - resume controller
- * @ctr: controller descriptor structure.
- *
- * Called by hardware driver to resume data flow.
- *
- * Note: The caller is responsible for synchronizing concurrent state changes
- * as well as invocations of capi_ctr_handle_message.
- */
-
-void capi_ctr_resume_output(struct capi_ctr *ctr)
-{
- if (ctr->blocked) {
- printk(KERN_DEBUG "kcapi: controller [%03d] resumed\n",
- ctr->cnr);
- ctr->blocked = 0;
- }
-}
-
-EXPORT_SYMBOL(capi_ctr_resume_output);
-
/* ------------------------------------------------------------- */
/**
@@ -531,7 +447,6 @@ int attach_capi_ctr(struct capi_ctr *ctr)
ctr->state = CAPI_CTR_DETECTED;
ctr->blocked = 0;
ctr->traceflag = showcapimsgs;
- init_waitqueue_head(&ctr->state_wait_queue);
sprintf(ctr->procfn, "capi/controllers/%d", ctr->cnr);
ctr->procent = proc_create_single_data(ctr->procfn, 0, NULL,
@@ -586,38 +501,6 @@ unlock_out:
EXPORT_SYMBOL(detach_capi_ctr);
-/**
- * register_capi_driver() - register CAPI driver
- * @driver: driver descriptor structure.
- *
- * Called by hardware driver to register itself with the CAPI subsystem.
- */
-
-void register_capi_driver(struct capi_driver *driver)
-{
- mutex_lock(&capi_drivers_lock);
- list_add_tail(&driver->list, &capi_drivers);
- mutex_unlock(&capi_drivers_lock);
-}
-
-EXPORT_SYMBOL(register_capi_driver);
-
-/**
- * unregister_capi_driver() - unregister CAPI driver
- * @driver: driver descriptor structure.
- *
- * Called by hardware driver to unregister itself from the CAPI subsystem.
- */
-
-void unregister_capi_driver(struct capi_driver *driver)
-{
- mutex_lock(&capi_drivers_lock);
- list_del(&driver->list);
- mutex_unlock(&capi_drivers_lock);
-}
-
-EXPORT_SYMBOL(unregister_capi_driver);
-
/* ------------------------------------------------------------- */
/* -------- CAPI2.0 Interface ---------------------------------- */
/* ------------------------------------------------------------- */
@@ -648,8 +531,6 @@ u16 capi20_isinstalled(void)
return ret;
}
-EXPORT_SYMBOL(capi20_isinstalled);
-
/**
* capi20_register() - CAPI 2.0 operation CAPI_REGISTER
* @ap: CAPI application descriptor structure.
@@ -711,8 +592,6 @@ u16 capi20_register(struct capi20_appl *ap)
return CAPI_NOERROR;
}
-EXPORT_SYMBOL(capi20_register);
-
/**
* capi20_release() - CAPI 2.0 operation CAPI_RELEASE
* @ap: CAPI application descriptor structure.
@@ -755,8 +634,6 @@ u16 capi20_release(struct capi20_appl *ap)
return CAPI_NOERROR;
}
-EXPORT_SYMBOL(capi20_release);
-
/**
* capi20_put_message() - CAPI 2.0 operation CAPI_PUT_MESSAGE
* @ap: CAPI application descriptor structure.
@@ -834,8 +711,6 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
return ctr->send_message(ctr, skb);
}
-EXPORT_SYMBOL(capi20_put_message);
-
/**
* capi20_get_manufacturer() - CAPI 2.0 operation CAPI_GET_MANUFACTURER
* @contr: controller number.
@@ -869,8 +744,6 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
return ret;
}
-EXPORT_SYMBOL(capi20_get_manufacturer);
-
/**
* capi20_get_version() - CAPI 2.0 operation CAPI_GET_VERSION
* @contr: controller number.
@@ -904,8 +777,6 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp)
return ret;
}
-EXPORT_SYMBOL(capi20_get_version);
-
/**
* capi20_get_serial() - CAPI 2.0 operation CAPI_GET_SERIAL_NUMBER
* @contr: controller number.
@@ -939,8 +810,6 @@ u16 capi20_get_serial(u32 contr, u8 *serial)
return ret;
}
-EXPORT_SYMBOL(capi20_get_serial);
-
/**
* capi20_get_profile() - CAPI 2.0 operation CAPI_GET_PROFILE
* @contr: controller number.
@@ -974,209 +843,6 @@ u16 capi20_get_profile(u32 contr, struct capi_profile *profp)
return ret;
}
-EXPORT_SYMBOL(capi20_get_profile);
-
-/* Must be called with capi_controller_lock held. */
-static int wait_on_ctr_state(struct capi_ctr *ctr, unsigned int state)
-{
- DEFINE_WAIT(wait);
- int retval = 0;
-
- ctr = capi_ctr_get(ctr);
- if (!ctr)
- return -ESRCH;
-
- for (;;) {
- prepare_to_wait(&ctr->state_wait_queue, &wait,
- TASK_INTERRUPTIBLE);
-
- if (ctr->state == state)
- break;
- if (ctr->state == CAPI_CTR_DETACHED) {
- retval = -ESRCH;
- break;
- }
- if (signal_pending(current)) {
- retval = -EINTR;
- break;
- }
-
- mutex_unlock(&capi_controller_lock);
- schedule();
- mutex_lock(&capi_controller_lock);
- }
- finish_wait(&ctr->state_wait_queue, &wait);
-
- capi_ctr_put(ctr);
-
- return retval;
-}
-
-#ifdef AVMB1_COMPAT
-static int old_capi_manufacturer(unsigned int cmd, void __user *data)
-{
- avmb1_loadandconfigdef ldef;
- avmb1_extcarddef cdef;
- avmb1_resetdef rdef;
- capicardparams cparams;
- struct capi_ctr *ctr;
- struct capi_driver *driver = NULL;
- capiloaddata ldata;
- struct list_head *l;
- int retval;
-
- switch (cmd) {
- case AVMB1_ADDCARD:
- case AVMB1_ADDCARD_WITH_TYPE:
- if (cmd == AVMB1_ADDCARD) {
- if ((retval = copy_from_user(&cdef, data,
- sizeof(avmb1_carddef))))
- return -EFAULT;
- cdef.cardtype = AVM_CARDTYPE_B1;
- cdef.cardnr = 0;
- } else {
- if ((retval = copy_from_user(&cdef, data,
- sizeof(avmb1_extcarddef))))
- return -EFAULT;
- }
- cparams.port = cdef.port;
- cparams.irq = cdef.irq;
- cparams.cardnr = cdef.cardnr;
-
- mutex_lock(&capi_drivers_lock);
-
- switch (cdef.cardtype) {
- case AVM_CARDTYPE_B1:
- list_for_each(l, &capi_drivers) {
- driver = list_entry(l, struct capi_driver, list);
- if (strcmp(driver->name, "b1isa") == 0)
- break;
- }
- break;
- case AVM_CARDTYPE_T1:
- list_for_each(l, &capi_drivers) {
- driver = list_entry(l, struct capi_driver, list);
- if (strcmp(driver->name, "t1isa") == 0)
- break;
- }
- break;
- default:
- driver = NULL;
- break;
- }
- if (!driver) {
- printk(KERN_ERR "kcapi: driver not loaded.\n");
- retval = -EIO;
- } else if (!driver->add_card) {
- printk(KERN_ERR "kcapi: driver has no add card function.\n");
- retval = -EIO;
- } else
- retval = driver->add_card(driver, &cparams);
-
- mutex_unlock(&capi_drivers_lock);
- return retval;
-
- case AVMB1_LOAD:
- case AVMB1_LOAD_AND_CONFIG:
-
- if (cmd == AVMB1_LOAD) {
- if (copy_from_user(&ldef, data,
- sizeof(avmb1_loaddef)))
- return -EFAULT;
- ldef.t4config.len = 0;
- ldef.t4config.data = NULL;
- } else {
- if (copy_from_user(&ldef, data,
- sizeof(avmb1_loadandconfigdef)))
- return -EFAULT;
- }
-
- mutex_lock(&capi_controller_lock);
-
- ctr = get_capi_ctr_by_nr(ldef.contr);
- if (!ctr) {
- retval = -EINVAL;
- goto load_unlock_out;
- }
-
- if (ctr->load_firmware == NULL) {
- printk(KERN_DEBUG "kcapi: load: no load function\n");
- retval = -ESRCH;
- goto load_unlock_out;
- }
-
- if (ldef.t4file.len <= 0) {
- printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
- retval = -EINVAL;
- goto load_unlock_out;
- }
- if (ldef.t4file.data == NULL) {
- printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
- retval = -EINVAL;
- goto load_unlock_out;
- }
-
- ldata.firmware.user = 1;
- ldata.firmware.data = ldef.t4file.data;
- ldata.firmware.len = ldef.t4file.len;
- ldata.configuration.user = 1;
- ldata.configuration.data = ldef.t4config.data;
- ldata.configuration.len = ldef.t4config.len;
-
- if (ctr->state != CAPI_CTR_DETECTED) {
- printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr);
- retval = -EBUSY;
- goto load_unlock_out;
- }
- ctr->state = CAPI_CTR_LOADING;
-
- retval = ctr->load_firmware(ctr, &ldata);
- if (retval) {
- ctr->state = CAPI_CTR_DETECTED;
- goto load_unlock_out;
- }
-
- retval = wait_on_ctr_state(ctr, CAPI_CTR_RUNNING);
-
- load_unlock_out:
- mutex_unlock(&capi_controller_lock);
- return retval;
-
- case AVMB1_RESETCARD:
- if (copy_from_user(&rdef, data, sizeof(avmb1_resetdef)))
- return -EFAULT;
-
- retval = 0;
-
- mutex_lock(&capi_controller_lock);
-
- ctr = get_capi_ctr_by_nr(rdef.contr);
- if (!ctr) {
- retval = -ESRCH;
- goto reset_unlock_out;
- }
-
- if (ctr->state == CAPI_CTR_DETECTED)
- goto reset_unlock_out;
-
- if (ctr->reset_ctr == NULL) {
- printk(KERN_DEBUG "kcapi: reset: no reset function\n");
- retval = -ESRCH;
- goto reset_unlock_out;
- }
-
- ctr->reset_ctr(ctr);
-
- retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
-
- reset_unlock_out:
- mutex_unlock(&capi_controller_lock);
- return retval;
- }
- return -EINVAL;
-}
-#endif
-
/**
* capi20_manufacturer() - CAPI 2.0 operation CAPI_MANUFACTURER
* @cmd: command.
@@ -1192,14 +858,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
int retval;
switch (cmd) {
-#ifdef AVMB1_COMPAT
- case AVMB1_LOAD:
- case AVMB1_LOAD_AND_CONFIG:
- case AVMB1_RESETCARD:
- case AVMB1_GET_CARDINFO:
- case AVMB1_REMOVECARD:
- return old_capi_manufacturer(cmd, data);
-#endif
case KCAPI_CMD_TRACE:
{
kcapi_flagdef fdef;
@@ -1222,43 +880,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
return retval;
}
- case KCAPI_CMD_ADDCARD:
- {
- struct list_head *l;
- struct capi_driver *driver = NULL;
- capicardparams cparams;
- kcapi_carddef cdef;
-
- if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
- return -EFAULT;
-
- cparams.port = cdef.port;
- cparams.irq = cdef.irq;
- cparams.membase = cdef.membase;
- cparams.cardnr = cdef.cardnr;
- cparams.cardtype = 0;
- cdef.driver[sizeof(cdef.driver) - 1] = 0;
-
- mutex_lock(&capi_drivers_lock);
-
- list_for_each(l, &capi_drivers) {
- driver = list_entry(l, struct capi_driver, list);
- if (strcmp(driver->name, cdef.driver) == 0)
- break;
- }
- if (driver == NULL) {
- printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n",
- cdef.driver);
- retval = -ESRCH;
- } else if (!driver->add_card) {
- printk(KERN_ERR "kcapi: driver \"%s\" has no add card function.\n", cdef.driver);
- retval = -EIO;
- } else
- retval = driver->add_card(driver, &cparams);
-
- mutex_unlock(&capi_drivers_lock);
- return retval;
- }
default:
printk(KERN_ERR "kcapi: manufacturer command %lu unknown.\n",
@@ -1269,8 +890,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
return -EINVAL;
}
-EXPORT_SYMBOL(capi20_manufacturer);
-
/* ------------------------------------------------------------- */
/* -------- Init & Cleanup ------------------------------------- */
/* ------------------------------------------------------------- */
@@ -1279,12 +898,7 @@ EXPORT_SYMBOL(capi20_manufacturer);
* init / exit functions
*/
-static struct notifier_block capictr_nb = {
- .notifier_call = notify_handler,
- .priority = INT_MAX,
-};
-
-static int __init kcapi_init(void)
+int __init kcapi_init(void)
{
int err;
@@ -1292,11 +906,8 @@ static int __init kcapi_init(void)
if (!kcapi_wq)
return -ENOMEM;
- register_capictr_notifier(&capictr_nb);
-
err = cdebug_init();
if (err) {
- unregister_capictr_notifier(&capictr_nb);
destroy_workqueue(kcapi_wq);
return err;
}
@@ -1305,14 +916,10 @@ static int __init kcapi_init(void)
return 0;
}
-static void __exit kcapi_exit(void)
+void kcapi_exit(void)
{
kcapi_proc_exit();
- unregister_capictr_notifier(&capictr_nb);
cdebug_exit();
destroy_workqueue(kcapi_wq);
}
-
-module_init(kcapi_init);
-module_exit(kcapi_exit);
diff --git a/drivers/isdn/capi/kcapi.h b/drivers/isdn/capi/kcapi.h
index 6d439f9a76b2..479623e1db2a 100644
--- a/drivers/isdn/capi/kcapi.h
+++ b/drivers/isdn/capi/kcapi.h
@@ -30,22 +30,153 @@ enum {
CAPI_CTR_RUNNING = 3,
};
-extern struct list_head capi_drivers;
-extern struct mutex capi_drivers_lock;
-
extern struct capi_ctr *capi_controller[CAPI_MAXCONTR];
extern struct mutex capi_controller_lock;
extern struct capi20_appl *capi_applications[CAPI_MAXAPPL];
-#ifdef CONFIG_PROC_FS
-
void kcapi_proc_init(void);
void kcapi_proc_exit(void);
-#else
+struct capi20_appl {
+ u16 applid;
+ capi_register_params rparam;
+ void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
+ void *private;
-static inline void kcapi_proc_init(void) { };
-static inline void kcapi_proc_exit(void) { };
+ /* internal to kernelcapi.o */
+ unsigned long nrecvctlpkt;
+ unsigned long nrecvdatapkt;
+ unsigned long nsentctlpkt;
+ unsigned long nsentdatapkt;
+ struct mutex recv_mtx;
+ struct sk_buff_head recv_queue;
+ struct work_struct recv_work;
+ int release_in_progress;
+};
-#endif
+u16 capi20_isinstalled(void);
+u16 capi20_register(struct capi20_appl *ap);
+u16 capi20_release(struct capi20_appl *ap);
+u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
+u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
+u16 capi20_get_version(u32 contr, struct capi_version *verp);
+u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
+u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
+int capi20_manufacturer(unsigned long cmd, void __user *data);
+
+#define CAPICTR_UP 0
+#define CAPICTR_DOWN 1
+
+int kcapi_init(void);
+void kcapi_exit(void);
+
+/*----- basic-type definitions -----*/
+
+typedef __u8 *_cstruct;
+
+typedef enum {
+ CAPI_COMPOSE,
+ CAPI_DEFAULT
+} _cmstruct;
+
+/*
+ The _cmsg structure contains all possible CAPI 2.0 parameter.
+ All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE
+ assembles the parameter and builds CAPI2.0 conform messages.
+ CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the
+ parameter in the _cmsg structure
+ */
+
+typedef struct {
+ /* Header */
+ __u16 ApplId;
+ __u8 Command;
+ __u8 Subcommand;
+ __u16 Messagenumber;
+
+ /* Parameter */
+ union {
+ __u32 adrController;
+ __u32 adrPLCI;
+ __u32 adrNCCI;
+ } adr;
+
+ _cmstruct AdditionalInfo;
+ _cstruct B1configuration;
+ __u16 B1protocol;
+ _cstruct B2configuration;
+ __u16 B2protocol;
+ _cstruct B3configuration;
+ __u16 B3protocol;
+ _cstruct BC;
+ _cstruct BChannelinformation;
+ _cmstruct BProtocol;
+ _cstruct CalledPartyNumber;
+ _cstruct CalledPartySubaddress;
+ _cstruct CallingPartyNumber;
+ _cstruct CallingPartySubaddress;
+ __u32 CIPmask;
+ __u32 CIPmask2;
+ __u16 CIPValue;
+ __u32 Class;
+ _cstruct ConnectedNumber;
+ _cstruct ConnectedSubaddress;
+ __u32 Data;
+ __u16 DataHandle;
+ __u16 DataLength;
+ _cstruct FacilityConfirmationParameter;
+ _cstruct Facilitydataarray;
+ _cstruct FacilityIndicationParameter;
+ _cstruct FacilityRequestParameter;
+ __u16 FacilitySelector;
+ __u16 Flags;
+ __u32 Function;
+ _cstruct HLC;
+ __u16 Info;
+ _cstruct InfoElement;
+ __u32 InfoMask;
+ __u16 InfoNumber;
+ _cstruct Keypadfacility;
+ _cstruct LLC;
+ _cstruct ManuData;
+ __u32 ManuID;
+ _cstruct NCPI;
+ __u16 Reason;
+ __u16 Reason_B3;
+ __u16 Reject;
+ _cstruct Useruserdata;
+
+ /* intern */
+ unsigned l, p;
+ unsigned char *par;
+ __u8 *m;
+
+ /* buffer to construct message */
+ __u8 buf[180];
+
+} _cmsg;
+
+/*-----------------------------------------------------------------------*/
+
+/*
+ * Debugging / Tracing functions
+ */
+
+char *capi_cmd2str(__u8 cmd, __u8 subcmd);
+
+typedef struct {
+ u_char *buf;
+ u_char *p;
+ size_t size;
+ size_t pos;
+} _cdebbuf;
+
+#define CDEBUG_SIZE 1024
+#define CDEBUG_GSIZE 4096
+
+void cdebbuf_free(_cdebbuf *cdb);
+int cdebug_init(void);
+void cdebug_exit(void);
+
+_cdebbuf *capi_message2str(__u8 *msg);
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index c94bd12c0f7c..eadbe59b3753 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -192,37 +192,15 @@ static const struct seq_operations seq_applstats_ops = {
// ---------------------------------------------------------------------------
-static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
- __acquires(&capi_drivers_lock)
+/* /proc/capi/drivers is always empty */
+static ssize_t empty_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
{
- mutex_lock(&capi_drivers_lock);
- return seq_list_start(&capi_drivers, *pos);
-}
-
-static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return seq_list_next(v, &capi_drivers, pos);
-}
-
-static void capi_driver_stop(struct seq_file *seq, void *v)
- __releases(&capi_drivers_lock)
-{
- mutex_unlock(&capi_drivers_lock);
-}
-
-static int capi_driver_show(struct seq_file *seq, void *v)
-{
- struct capi_driver *drv = list_entry(v, struct capi_driver, list);
-
- seq_printf(seq, "%-32s %s\n", drv->name, drv->revision);
return 0;
}
-static const struct seq_operations seq_capi_driver_ops = {
- .start = capi_driver_start,
- .next = capi_driver_next,
- .stop = capi_driver_stop,
- .show = capi_driver_show,
+static const struct file_operations empty_fops = {
+ .read = empty_read,
};
// ---------------------------------------------------------------------------
@@ -236,10 +214,10 @@ kcapi_proc_init(void)
proc_create_seq("capi/contrstats", 0, NULL, &seq_contrstats_ops);
proc_create_seq("capi/applications", 0, NULL, &seq_applications_ops);
proc_create_seq("capi/applstats", 0, NULL, &seq_applstats_ops);
- proc_create_seq("capi/driver", 0, NULL, &seq_capi_driver_ops);
+ proc_create("capi/driver", 0, NULL, &empty_fops);
}
-void __exit
+void
kcapi_proc_exit(void)
{
remove_proc_entry("capi/driver", NULL);
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index b7e0ae1af8fa..e8922fa03379 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -493,16 +493,17 @@ static int as3645a_parse_node(struct as3645a *flash,
switch (id) {
case AS_LED_FLASH:
flash->flash_node = child;
+ fwnode_handle_get(child);
break;
case AS_LED_INDICATOR:
flash->indicator_node = child;
+ fwnode_handle_get(child);
break;
default:
dev_warn(&flash->client->dev,
"unknown LED %u encountered, ignoring\n", id);
break;
}
- fwnode_handle_get(child);
}
if (!flash->flash_node) {
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index a5c73f3d5f79..2bf74595610f 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
struct gpio_led led = {};
const char *state = NULL;
+ /*
+ * Acquire gpiod from DT with uninitialized label, which
+ * will be updated after LED class device is registered,
+ * Only then the final LED name is known.
+ */
led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
GPIOD_ASIS,
- led.name);
+ NULL);
if (IS_ERR(led.gpiod)) {
fwnode_handle_put(child);
return ERR_CAST(led.gpiod);
@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
fwnode_handle_put(child);
return ERR_PTR(ret);
}
+ /* Set gpiod label to match the corresponding LED name. */
+ gpiod_set_consumer_name(led_dat->gpiod,
+ led_dat->cdev.dev->kobj.name);
priv->num_leds++;
}
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 0507c6575c08..491268bb34a7 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// TI LM3532 LED driver
// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// http://www.ti.com/lit/ds/symlink/lm3532.pdf
#include <linux/i2c.h>
#include <linux/leds.h>
@@ -623,7 +624,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3532_MAX_LED_STRINGS) {
- dev_err(&priv->client->dev, "To many LED string defined\n");
+ dev_err(&priv->client->dev, "Too many LED string defined\n");
continue;
}
diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c
index 4c2d0b3c6dad..a0d4b725c917 100644
--- a/drivers/leds/leds-max77650.c
+++ b/drivers/leds/leds-max77650.c
@@ -135,9 +135,16 @@ err_node_put:
return rv;
}
+static const struct of_device_id max77650_led_of_match[] = {
+ { .compatible = "maxim,max77650-led" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_led_of_match);
+
static struct platform_driver max77650_led_driver = {
.driver = {
.name = "max77650-led",
+ .of_match_table = max77650_led_of_match,
},
.probe = max77650_led_probe,
};
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index db5af83f0cec..b6447c1721b4 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -21,7 +21,6 @@ static void rb532_led_set(struct led_classdev *cdev,
{
if (brightness)
set_latch_u5(LO_ULED, 0);
-
else
set_latch_u5(0, LO_ULED);
}
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index 718729c89440..3abcafe46278 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -455,7 +455,7 @@ static void __exit pattern_trig_exit(void)
module_init(pattern_trig_init);
module_exit(pattern_trig_exit);
-MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com");
-MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org");
+MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>");
MODULE_DESCRIPTION("LED Pattern trigger");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h
index 9534503b69d9..47b67c6bff7a 100644
--- a/drivers/lightnvm/pblk-trace.h
+++ b/drivers/lightnvm/pblk-trace.h
@@ -46,7 +46,7 @@ TRACE_EVENT(pblk_chunk_reset,
TP_STRUCT__entry(
__string(name, name)
__field(u64, ppa)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -72,7 +72,7 @@ TRACE_EVENT(pblk_chunk_state,
TP_STRUCT__entry(
__string(name, name)
__field(u64, ppa)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -98,7 +98,7 @@ TRACE_EVENT(pblk_line_state,
TP_STRUCT__entry(
__string(name, name)
__field(int, line)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -121,7 +121,7 @@ TRACE_EVENT(pblk_state,
TP_STRUCT__entry(
__string(name, name)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 9198c1b480d9..adf26a21fcd1 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -301,6 +301,7 @@ struct cached_dev {
struct block_device *bdev;
struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
struct bio sb_bio;
struct bio_vec sb_bv[1];
struct closure sb_write;
@@ -403,6 +404,7 @@ enum alloc_reserve {
struct cache {
struct cache_set *set;
struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
struct bio sb_bio;
struct bio_vec sb_bv[1];
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index cffcdc9feefb..4385303836d8 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1257,6 +1257,11 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
* Our temporary buffer is the same size as the btree node's
* buffer, we can just swap buffers instead of doing a big
* memcpy()
+ *
+ * Don't worry event 'out' is allocated from mempool, it can
+ * still be swapped here. Because state->pool is a page mempool
+ * creaated by by mempool_init_page_pool(), which allocates
+ * pages by alloc_pages() indeed.
*/
out->magic = b->set->data->magic;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 14d6c33b0957..fa872df4e770 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -734,34 +734,32 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
i = 0;
btree_cache_used = c->btree_cache_used;
- list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
if (nr <= 0)
goto out;
- if (++i > 3 &&
- !mca_reap(b, 0, false)) {
+ if (!mca_reap(b, 0, false)) {
mca_data_free(b);
rw_unlock(true, b);
freed++;
}
nr--;
+ i++;
}
- for (; (nr--) && i < btree_cache_used; i++) {
- if (list_empty(&c->btree_cache))
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ if (nr <= 0 || i >= btree_cache_used)
goto out;
- b = list_first_entry(&c->btree_cache, struct btree, list);
- list_rotate_left(&c->btree_cache);
-
- if (!b->accessed &&
- !mca_reap(b, 0, false)) {
+ if (!mca_reap(b, 0, false)) {
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
freed++;
- } else
- b->accessed = 0;
+ }
+
+ nr--;
+ i++;
}
out:
mutex_unlock(&c->bucket_lock);
@@ -1069,7 +1067,6 @@ retry:
BUG_ON(!b->written);
b->parent = parent;
- b->accessed = 1;
for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
prefetch(b->keys.set[i].tree);
@@ -1160,7 +1157,6 @@ retry:
goto retry;
}
- b->accessed = 1;
b->parent = parent;
bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 76cfd121a486..f4dcca449391 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -121,8 +121,6 @@ struct btree {
/* Key/pointer for this btree node */
BKEY_PADDED(key);
- /* Single bit - set when accessed, cleared by shrinker */
- unsigned long accessed;
unsigned long seq;
struct rw_semaphore lock;
struct cache_set *c;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index be2a2a201603..33ddc5269e8d 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -417,10 +417,14 @@ err:
/* Journalling */
+#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
+
static void btree_flush_write(struct cache_set *c)
{
struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
- unsigned int i, n;
+ unsigned int i, nr, ref_nr;
+ atomic_t *fifo_front_p, *now_fifo_front_p;
+ size_t mask;
if (c->journal.btree_flushing)
return;
@@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
c->journal.btree_flushing = true;
spin_unlock(&c->journal.flush_write_lock);
+ /* get the oldest journal entry and check its refcount */
+ spin_lock(&c->journal.lock);
+ fifo_front_p = &fifo_front(&c->journal.pin);
+ ref_nr = atomic_read(fifo_front_p);
+ if (ref_nr <= 0) {
+ /*
+ * do nothing if no btree node references
+ * the oldest journal entry
+ */
+ spin_unlock(&c->journal.lock);
+ goto out;
+ }
+ spin_unlock(&c->journal.lock);
+
+ mask = c->journal.pin.mask;
+ nr = 0;
atomic_long_inc(&c->flush_write);
memset(btree_nodes, 0, sizeof(btree_nodes));
- n = 0;
mutex_lock(&c->bucket_lock);
list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ /*
+ * It is safe to get now_fifo_front_p without holding
+ * c->journal.lock here, because we don't need to know
+ * the exactly accurate value, just check whether the
+ * front pointer of c->journal.pin is changed.
+ */
+ now_fifo_front_p = &fifo_front(&c->journal.pin);
+ /*
+ * If the oldest journal entry is reclaimed and front
+ * pointer of c->journal.pin changes, it is unnecessary
+ * to scan c->btree_cache anymore, just quit the loop and
+ * flush out what we have already.
+ */
+ if (now_fifo_front_p != fifo_front_p)
+ break;
+ /*
+ * quit this loop if all matching btree nodes are
+ * scanned and record in btree_nodes[] already.
+ */
+ ref_nr = atomic_read(fifo_front_p);
+ if (nr >= ref_nr)
+ break;
+
if (btree_node_journal_flush(b))
pr_err("BUG: flush_write bit should not be set here!");
@@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
continue;
}
+ /*
+ * Only select the btree node which exactly references
+ * the oldest journal entry.
+ *
+ * If the journal entry pointed by fifo_front_p is
+ * reclaimed in parallel, don't worry:
+ * - the list_for_each_xxx loop will quit when checking
+ * next now_fifo_front_p.
+ * - If there are matched nodes recorded in btree_nodes[],
+ * they are clean now (this is why and how the oldest
+ * journal entry can be reclaimed). These selected nodes
+ * will be ignored and skipped in the folowing for-loop.
+ */
+ if (nr_to_fifo_front(btree_current_write(b)->journal,
+ fifo_front_p,
+ mask) != 0) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
set_btree_node_journal_flush(b);
mutex_unlock(&b->write_lock);
- btree_nodes[n++] = b;
- if (n == BTREE_FLUSH_NR)
+ btree_nodes[nr++] = b;
+ /*
+ * To avoid holding c->bucket_lock too long time,
+ * only scan for BTREE_FLUSH_NR matched btree nodes
+ * at most. If there are more btree nodes reference
+ * the oldest journal entry, try to flush them next
+ * time when btree_flush_write() is called.
+ */
+ if (nr == BTREE_FLUSH_NR)
break;
}
mutex_unlock(&c->bucket_lock);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < nr; i++) {
b = btree_nodes[i];
if (!b) {
pr_err("BUG: btree_nodes[%d] is NULL", i);
@@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
mutex_unlock(&b->write_lock);
}
+out:
spin_lock(&c->journal.flush_write_lock);
c->journal.btree_flushing = false;
spin_unlock(&c->journal.flush_write_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 77e9869345e7..3dea1d5acd5c 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -15,7 +15,6 @@
#include "writeback.h"
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
#include <linux/idr.h>
@@ -60,17 +59,18 @@ struct workqueue_struct *bch_journal_wq;
/* Superblock */
static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
- struct page **res)
+ struct cache_sb_disk **res)
{
const char *err;
- struct cache_sb *s;
- struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
+ struct cache_sb_disk *s;
+ struct page *page;
unsigned int i;
- if (!bh)
+ page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+ SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
+ if (IS_ERR(page))
return "IO error";
-
- s = (struct cache_sb *) bh->b_data;
+ s = page_address(page) + offset_in_page(SB_OFFSET);
sb->offset = le64_to_cpu(s->offset);
sb->version = le64_to_cpu(s->version);
@@ -188,12 +188,10 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
}
sb->last_mount = (u32)ktime_get_real_seconds();
- err = NULL;
-
- get_page(bh->b_page);
- *res = bh->b_page;
+ *res = s;
+ return NULL;
err:
- put_bh(bh);
+ put_page(page);
return err;
}
@@ -207,15 +205,15 @@ static void write_bdev_super_endio(struct bio *bio)
closure_put(&dc->sb_write);
}
-static void __write_super(struct cache_sb *sb, struct bio *bio)
+static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
+ struct bio *bio)
{
- struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned int i;
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
bio->bi_iter.bi_sector = SB_SECTOR;
- bio->bi_iter.bi_size = SB_SIZE;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
- bch_bio_map(bio, NULL);
+ __bio_add_page(bio, virt_to_page(out), SB_SIZE,
+ offset_in_page(out));
out->offset = cpu_to_le64(sb->offset);
out->version = cpu_to_le64(sb->version);
@@ -257,14 +255,14 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_reset(bio);
+ bio_init(bio, dc->sb_bv, 1);
bio_set_dev(bio, dc->bdev);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
closure_get(cl);
/* I/O request sent to backing device */
- __write_super(&dc->sb, bio);
+ __write_super(&dc->sb, dc->sb_disk, bio);
closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
}
@@ -306,13 +304,13 @@ void bcache_write_super(struct cache_set *c)
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
- bio_reset(bio);
+ bio_init(bio, ca->sb_bv, 1);
bio_set_dev(bio, ca->bdev);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
closure_get(cl);
- __write_super(&ca->sb, bio);
+ __write_super(&ca->sb, ca->sb_disk, bio);
}
closure_return_with_destructor(cl, bcache_write_super_unlock);
@@ -1275,6 +1273,9 @@ static void cached_dev_free(struct closure *cl)
mutex_unlock(&bch_register_lock);
+ if (dc->sb_disk)
+ put_page(virt_to_page(dc->sb_disk));
+
if (!IS_ERR_OR_NULL(dc->bdev))
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -1350,7 +1351,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
-static int register_bdev(struct cache_sb *sb, struct page *sb_page,
+static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev,
struct cached_dev *dc)
{
@@ -1362,11 +1363,7 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page,
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
-
- bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
- get_page(sb_page);
-
+ dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
@@ -2136,8 +2133,8 @@ void bch_cache_release(struct kobject *kobj)
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&ca->free[i]);
- if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(bio_first_page_all(&ca->sb_bio));
+ if (ca->sb_disk)
+ put_page(virt_to_page(ca->sb_disk));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -2259,7 +2256,7 @@ err_free:
return ret;
}
-static int register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev, struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
@@ -2269,10 +2266,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
-
- bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
- get_page(sb_page);
+ ca->sb_disk = sb_disk;
if (blk_queue_discard(bdev_get_queue(bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
@@ -2372,29 +2366,35 @@ static bool bch_is_open(struct block_device *bdev)
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
- ssize_t ret = -EINVAL;
- const char *err = "cannot allocate memory";
+ const char *err;
char *path = NULL;
- struct cache_sb *sb = NULL;
- struct block_device *bdev = NULL;
- struct page *sb_page = NULL;
+ struct cache_sb *sb;
+ struct cache_sb_disk *sb_disk;
+ struct block_device *bdev;
+ ssize_t ret;
+ ret = -EBUSY;
+ err = "failed to reference bcache module";
if (!try_module_get(THIS_MODULE))
- return -EBUSY;
+ goto out;
/* For latest state of bcache_is_reboot */
smp_mb();
+ err = "bcache is in reboot";
if (bcache_is_reboot)
- return -EBUSY;
+ goto out_module_put;
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
- goto err;
+ goto out_module_put;
sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
if (!sb)
- goto err;
+ goto out_free_path;
+ ret = -EINVAL;
err = "failed to open device";
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
@@ -2411,57 +2411,63 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
- goto quiet_out;
+ goto done;
}
- goto err;
+ goto out_free_sb;
}
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
- goto err_close;
+ goto out_blkdev_put;
- err = read_super(sb, bdev, &sb_page);
+ err = read_super(sb, bdev, &sb_disk);
if (err)
- goto err_close;
+ goto out_blkdev_put;
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
- goto err_close;
+ goto out_put_sb_page;
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_page, bdev, dc);
+ ret = register_bdev(sb, sb_disk, bdev, dc);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
- goto err;
+ goto out_free_sb;
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
- goto err_close;
+ goto out_put_sb_page;
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err;
+ if (register_cache(sb, sb_disk, bdev, ca) != 0)
+ goto out_free_sb;
}
-quiet_out:
- ret = size;
-out:
- if (sb_page)
- put_page(sb_page);
+
+done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
- return ret;
+ return size;
-err_close:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
- pr_info("error %s: %s", path, err);
- goto out;
+out_put_sb_page:
+ put_page(virt_to_page(sb_disk));
+out_blkdev_put:
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+out_free_sb:
+ kfree(sb);
+out_free_path:
+ kfree(path);
+ path = NULL;
+out_module_put:
+ module_put(THIS_MODULE);
+out:
+ pr_info("error %s: %s", path?path:"", err);
+ return ret;
}
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index 8ee019eda32d..9dec3b61cf70 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -324,7 +324,7 @@ static bool __unlock(struct dm_bio_prison_v2 *prison,
bio_list_init(&cell->bios);
if (cell->shared_count) {
- cell->exclusive_lock = 0;
+ cell->exclusive_lock = false;
return false;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index eb9782fc93fe..c6a529873d0f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2003 Jana Saout <jana@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com>
+ * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
*
* This file is released under the GPL.
*/
@@ -115,6 +115,11 @@ struct iv_tcw_private {
u8 *whitening;
};
+#define ELEPHANT_MAX_KEY_SIZE 32
+struct iv_elephant_private {
+ struct crypto_skcipher *tfm;
+};
+
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
@@ -125,6 +130,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
+ CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
};
/*
@@ -152,6 +158,7 @@ struct crypt_config {
struct iv_benbi_private benbi;
struct iv_lmk_private lmk;
struct iv_tcw_private tcw;
+ struct iv_elephant_private elephant;
} iv_gen_private;
u64 iv_offset;
unsigned int iv_size;
@@ -285,6 +292,11 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
* eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
* The IV is encrypted little-endian byte-offset (with the same key
* and cipher as the volume).
+ *
+ * elephant: The extended version of eboiv with additional Elephant diffuser
+ * used with Bitlocker CBC mode.
+ * This mode was used in older Windows systems
+ * http://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
*/
static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
@@ -331,8 +343,14 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
- int log = ilog2(bs);
+ unsigned bs;
+ int log;
+
+ if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
+ bs = crypto_aead_blocksize(any_tfm_aead(cc));
+ else
+ bs = crypto_skcipher_blocksize(any_tfm(cc));
+ log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
* to get the cipher block count, we use this shift in _gen */
@@ -717,7 +735,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
struct crypto_wait wait;
int err;
- req = skcipher_request_alloc(any_tfm(cc), GFP_KERNEL | GFP_NOFS);
+ req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
if (!req)
return -ENOMEM;
@@ -734,6 +752,290 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
return err;
}
+static void crypt_iv_elephant_dtr(struct crypt_config *cc)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+
+ crypto_free_skcipher(elephant->tfm);
+ elephant->tfm = NULL;
+}
+
+static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+ int r;
+
+ elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ if (IS_ERR(elephant->tfm)) {
+ r = PTR_ERR(elephant->tfm);
+ elephant->tfm = NULL;
+ return r;
+ }
+
+ r = crypt_iv_eboiv_ctr(cc, ti, NULL);
+ if (r)
+ crypt_iv_elephant_dtr(cc);
+ return r;
+}
+
+static void diffuser_disk_to_cpu(u32 *d, size_t n)
+{
+#ifndef __LITTLE_ENDIAN
+ int i;
+
+ for (i = 0; i < n; i++)
+ d[i] = le32_to_cpu((__le32)d[i]);
+#endif
+}
+
+static void diffuser_cpu_to_disk(__le32 *d, size_t n)
+{
+#ifndef __LITTLE_ENDIAN
+ int i;
+
+ for (i = 0; i < n; i++)
+ d[i] = cpu_to_le32((u32)d[i]);
+#endif
+}
+
+static void diffuser_a_decrypt(u32 *d, size_t n)
+{
+ int i, i1, i2, i3;
+
+ for (i = 0; i < 5; i++) {
+ i1 = 0;
+ i2 = n - 2;
+ i3 = n - 5;
+
+ while (i1 < (n - 1)) {
+ d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
+ i1++; i2++; i3++;
+
+ if (i3 >= n)
+ i3 -= n;
+
+ d[i1] += d[i2] ^ d[i3];
+ i1++; i2++; i3++;
+
+ if (i2 >= n)
+ i2 -= n;
+
+ d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
+ i1++; i2++; i3++;
+
+ d[i1] += d[i2] ^ d[i3];
+ i1++; i2++; i3++;
+ }
+ }
+}
+
+static void diffuser_a_encrypt(u32 *d, size_t n)
+{
+ int i, i1, i2, i3;
+
+ for (i = 0; i < 5; i++) {
+ i1 = n - 1;
+ i2 = n - 2 - 1;
+ i3 = n - 5 - 1;
+
+ while (i1 > 0) {
+ d[i1] -= d[i2] ^ d[i3];
+ i1--; i2--; i3--;
+
+ d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
+ i1--; i2--; i3--;
+
+ if (i2 < 0)
+ i2 += n;
+
+ d[i1] -= d[i2] ^ d[i3];
+ i1--; i2--; i3--;
+
+ if (i3 < 0)
+ i3 += n;
+
+ d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
+ i1--; i2--; i3--;
+ }
+ }
+}
+
+static void diffuser_b_decrypt(u32 *d, size_t n)
+{
+ int i, i1, i2, i3;
+
+ for (i = 0; i < 3; i++) {
+ i1 = 0;
+ i2 = 2;
+ i3 = 5;
+
+ while (i1 < (n - 1)) {
+ d[i1] += d[i2] ^ d[i3];
+ i1++; i2++; i3++;
+
+ d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
+ i1++; i2++; i3++;
+
+ if (i2 >= n)
+ i2 -= n;
+
+ d[i1] += d[i2] ^ d[i3];
+ i1++; i2++; i3++;
+
+ if (i3 >= n)
+ i3 -= n;
+
+ d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
+ i1++; i2++; i3++;
+ }
+ }
+}
+
+static void diffuser_b_encrypt(u32 *d, size_t n)
+{
+ int i, i1, i2, i3;
+
+ for (i = 0; i < 3; i++) {
+ i1 = n - 1;
+ i2 = 2 - 1;
+ i3 = 5 - 1;
+
+ while (i1 > 0) {
+ d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
+ i1--; i2--; i3--;
+
+ if (i3 < 0)
+ i3 += n;
+
+ d[i1] -= d[i2] ^ d[i3];
+ i1--; i2--; i3--;
+
+ if (i2 < 0)
+ i2 += n;
+
+ d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
+ i1--; i2--; i3--;
+
+ d[i1] -= d[i2] ^ d[i3];
+ i1--; i2--; i3--;
+ }
+ }
+}
+
+static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+ u8 *es, *ks, *data, *data2, *data_offset;
+ struct skcipher_request *req;
+ struct scatterlist *sg, *sg2, src, dst;
+ struct crypto_wait wait;
+ int i, r;
+
+ req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
+ es = kzalloc(16, GFP_NOIO); /* Key for AES */
+ ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
+
+ if (!req || !es || !ks) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
+
+ /* E(Ks, e(s)) */
+ sg_init_one(&src, es, 16);
+ sg_init_one(&dst, ks, 16);
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+ r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ if (r)
+ goto out;
+
+ /* E(Ks, e'(s)) */
+ es[15] = 0x80;
+ sg_init_one(&dst, &ks[16], 16);
+ r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ if (r)
+ goto out;
+
+ sg = crypt_get_sg_data(cc, dmreq->sg_out);
+ data = kmap_atomic(sg_page(sg));
+ data_offset = data + sg->offset;
+
+ /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+ sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
+ data2 = kmap_atomic(sg_page(sg2));
+ memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
+ kunmap_atomic(data2);
+ }
+
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
+ diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+ }
+
+ for (i = 0; i < (cc->sector_size / 32); i++)
+ crypto_xor(data_offset + i * 32, ks, 32);
+
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+ diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+ }
+
+ kunmap_atomic(data);
+out:
+ kzfree(ks);
+ kzfree(es);
+ skcipher_request_free(req);
+ return r;
+}
+
+static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ int r;
+
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+ r = crypt_iv_elephant(cc, dmreq);
+ if (r)
+ return r;
+ }
+
+ return crypt_iv_eboiv_gen(cc, iv, dmreq);
+}
+
+static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
+ return crypt_iv_elephant(cc, dmreq);
+
+ return 0;
+}
+
+static int crypt_iv_elephant_init(struct crypt_config *cc)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+ int key_offset = cc->key_size - cc->key_extra_size;
+
+ return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
+}
+
+static int crypt_iv_elephant_wipe(struct crypt_config *cc)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+ u8 key[ELEPHANT_MAX_KEY_SIZE];
+
+ memset(key, 0, cc->key_extra_size);
+ return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
+}
+
static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -787,6 +1089,15 @@ static struct crypt_iv_operations crypt_iv_eboiv_ops = {
.generator = crypt_iv_eboiv_gen
};
+static struct crypt_iv_operations crypt_iv_elephant_ops = {
+ .ctr = crypt_iv_elephant_ctr,
+ .dtr = crypt_iv_elephant_dtr,
+ .init = crypt_iv_elephant_init,
+ .wipe = crypt_iv_elephant_wipe,
+ .generator = crypt_iv_elephant_gen,
+ .post = crypt_iv_elephant_post
+};
+
/*
* Integrity extensions
*/
@@ -1103,6 +1414,9 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
if (r < 0)
return r;
+ /* Data can be already preprocessed in generator */
+ if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
+ sg_in = sg_out;
/* Store generated IV in integrity metadata */
if (cc->integrity_iv_size)
memcpy(tag_iv, org_iv, cc->integrity_iv_size);
@@ -2191,7 +2505,14 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
cc->iv_gen_ops = &crypt_iv_null_ops;
else if (strcmp(ivmode, "eboiv") == 0)
cc->iv_gen_ops = &crypt_iv_eboiv_ops;
- else if (strcmp(ivmode, "lmk") == 0) {
+ else if (strcmp(ivmode, "elephant") == 0) {
+ cc->iv_gen_ops = &crypt_iv_elephant_ops;
+ cc->key_parts = 2;
+ cc->key_extra_size = cc->key_size / 2;
+ if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
+ return -EINVAL;
+ set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
+ } else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
/*
* Version 2 and 3 is recognised according
@@ -2959,7 +3280,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 19, 0},
+ .version = {1, 20, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index eb37584427a4..ff03b90072c5 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -207,16 +207,16 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
bool fail_read_on_bb)
{
unsigned long flags;
- int ret = DM_MAPIO_REMAPPED;
+ int r = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
- ret = __dust_map_write(dd, thisblock);
+ r = __dust_map_write(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
}
- return ret;
+ return r;
}
static int dust_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e0c32793c248..2bc18c9c3abc 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -20,6 +20,7 @@
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/time.h>
+#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <scsi/scsi_dh.h>
@@ -29,6 +30,9 @@
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
+#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
+
+static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
/* Path properties */
struct pgpath {
@@ -91,6 +95,8 @@ struct multipath {
struct work_struct process_queued_bios;
struct bio_list queued_bios;
+
+ struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
};
/*
@@ -108,6 +114,7 @@ static void trigger_event(struct work_struct *work);
static void activate_or_offline_path(struct pgpath *pgpath);
static void activate_path_work(struct work_struct *work);
static void process_queued_bios(struct work_struct *work);
+static void queue_if_no_path_timeout_work(struct timer_list *t);
/*-----------------------------------------------
* Multipath state flags.
@@ -195,6 +202,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->ti = ti;
ti->private = m;
+
+ timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
}
return m;
@@ -718,6 +727,43 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
}
/*
+ * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
+ * process any queued I/O.
+ */
+static void queue_if_no_path_timeout_work(struct timer_list *t)
+{
+ struct multipath *m = from_timer(m, t, nopath_timer);
+ struct mapped_device *md = dm_table_get_md(m->ti->table);
+
+ DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md));
+ queue_if_no_path(m, false, false);
+}
+
+/*
+ * Enable the queue_if_no_path timeout if necessary.
+ * Called with m->lock held.
+ */
+static void enable_nopath_timeout(struct multipath *m)
+{
+ unsigned long queue_if_no_path_timeout =
+ READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
+
+ lockdep_assert_held(&m->lock);
+
+ if (queue_if_no_path_timeout > 0 &&
+ atomic_read(&m->nr_valid_paths) == 0 &&
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ mod_timer(&m->nopath_timer,
+ jiffies + queue_if_no_path_timeout);
+ }
+}
+
+static void disable_nopath_timeout(struct multipath *m)
+{
+ del_timer_sync(&m->nopath_timer);
+}
+
+/*
* An event is triggered whenever a path is taken out of use.
* Includes path failure and PG bypass.
*/
@@ -1090,6 +1136,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
+ unsigned long flags;
as.argc = argc;
as.argv = argv;
@@ -1154,6 +1201,10 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ spin_lock_irqsave(&m->lock, flags);
+ enable_nopath_timeout(m);
+ spin_unlock_irqrestore(&m->lock, flags);
+
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
@@ -1208,6 +1259,7 @@ static void multipath_dtr(struct dm_target *ti)
{
struct multipath *m = ti->private;
+ disable_nopath_timeout(m);
flush_multipath_work(m);
free_multipath(m);
}
@@ -1241,6 +1293,8 @@ static int fail_path(struct pgpath *pgpath)
schedule_work(&m->trigger_event);
+ enable_nopath_timeout(m);
+
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -1291,6 +1345,9 @@ out:
process_queued_io_list(m);
}
+ if (pgpath->is_active)
+ disable_nopath_timeout(m);
+
return r;
}
@@ -1444,7 +1501,7 @@ static void pg_init_done(void *data, int errors)
break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
- delay_retry = 1;
+ delay_retry = true;
/* fall through */
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
@@ -1789,6 +1846,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
struct dm_dev *dev;
struct multipath *m = ti->private;
action_fn action;
+ unsigned long flags;
mutex_lock(&m->work_mutex);
@@ -1800,9 +1858,13 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, true, false);
+ spin_lock_irqsave(&m->lock, flags);
+ enable_nopath_timeout(m);
+ spin_unlock_irqrestore(&m->lock, flags);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, false, false);
+ disable_nopath_timeout(m);
goto out;
}
}
@@ -2065,6 +2127,10 @@ static void __exit dm_multipath_exit(void)
module_init(dm_multipath_init);
module_exit(dm_multipath_exit);
+module_param_named(queue_if_no_path_timeout_secs,
+ queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
+
MODULE_DESCRIPTION(DM_NAME " multipath target");
MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c412eaa975fc..9a18bef0a5ff 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -129,7 +129,9 @@ struct raid_dev {
CTR_FLAG_RAID10_COPIES | \
CTR_FLAG_RAID10_FORMAT | \
CTR_FLAG_DELTA_DISKS | \
- CTR_FLAG_DATA_OFFSET)
+ CTR_FLAG_DATA_OFFSET | \
+ CTR_FLAG_JOURNAL_DEV | \
+ CTR_FLAG_JOURNAL_MODE)
/* Valid options definitions per raid level... */
@@ -3001,7 +3003,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{ 1, 254, "Cannot understand number of raid devices parameters" }
};
- /* Must have <raid_type> */
arg = dm_shift_arg(&as);
if (!arg) {
ti->error = "No arguments";
@@ -3508,8 +3509,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
unsigned int sz = 0;
- unsigned int rebuild_disks;
- unsigned int write_mostly_params = 0;
+ unsigned int rebuild_writemostly_count = 0;
sector_t progress, resync_max_sectors, resync_mismatches;
enum sync_state state;
struct raid_type *rt;
@@ -3593,18 +3593,20 @@ static void raid_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
/* Report the table line string you would use to construct this raid set */
- /* Calculate raid parameter count */
- for (i = 0; i < rs->raid_disks; i++)
- if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
- write_mostly_params += 2;
- rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
- raid_param_cnt += rebuild_disks * 2 +
- write_mostly_params +
+ /*
+ * Count any rebuild or writemostly argument pairs and subtract the
+ * hweight count being added below of any rebuild and writemostly ctr flags.
+ */
+ for (i = 0; i < rs->raid_disks; i++) {
+ rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) +
+ (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
+ }
+ rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) +
+ (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0);
+ /* Calculate raid parameter count based on ^ rebuild/writemostly argument counts and ctr flags set. */
+ raid_param_cnt += rebuild_writemostly_count +
hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
- hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
- (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0) +
- (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags) ? 2 : 0);
-
+ hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
/* Emit table line */
/* This has to be in the documented order for userspace! */
DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
@@ -3612,11 +3614,10 @@ static void raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
- if (rebuild_disks)
+ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags))
for (i = 0; i < rs->raid_disks; i++)
- if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
- DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
- rs->dev[i].rdev.raid_disk);
+ if (test_bit(i, (void *) rs->rebuild_disks))
+ DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i);
if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
mddev->bitmap_info.daemon_sleep);
@@ -3626,7 +3627,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
mddev->sync_speed_max);
- if (write_mostly_params)
+ if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags))
for (i = 0; i < rs->raid_disks; i++)
if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
@@ -4029,7 +4030,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 15, 0},
+ .version = {1, 15, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3c50c4e4da8f..963d3774c93e 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -17,7 +17,7 @@
#include <linux/dm-bufio.h>
#define DM_MSG_PREFIX "persistent snapshot"
-#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
+#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
#define DM_PREFETCH_CHUNKS 12
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4fb1a40e68a0..6b11a266299f 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1061,7 +1061,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
DMERR("Read error in exception store: "
"shutting down merge");
down_write(&s->lock);
- s->merge_failed = 1;
+ s->merge_failed = true;
up_write(&s->lock);
}
goto shut;
@@ -1149,7 +1149,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
shut:
down_write(&s->lock);
- s->merge_failed = 1;
+ s->merge_failed = true;
b = __release_queued_bios_after_merge(s);
up_write(&s->lock);
error_bios(b);
@@ -1314,7 +1314,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
- s->merge_failed = 0;
+ s->merge_failed = false;
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b88d6d701f5b..fc9947d6210c 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -28,7 +28,7 @@
*
* - A hierarchical btree, with 2 levels which effectively maps (thin
* dev id, virtual block) -> block_time. Block time is a 64-bit
- * field holding the time in the low 24 bits, and block in the top 48
+ * field holding the time in the low 24 bits, and block in the top 40
* bits.
*
* BTrees consist solely of btree_nodes, that fill a block. Some are
@@ -387,16 +387,15 @@ static int subtree_equal(void *context, const void *value1_le, const void *value
* Variant that is used for in-core only changes or code that
* shouldn't put the pool in service on its own (e.g. commit).
*/
-static inline void __pmd_write_lock(struct dm_pool_metadata *pmd)
+static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd)
__acquires(pmd->root_lock)
{
down_write(&pmd->root_lock);
}
-#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd))
static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
{
- __pmd_write_lock(pmd);
+ pmd_write_lock_in_core(pmd);
if (unlikely(!pmd->in_service))
pmd->in_service = true;
}
@@ -811,7 +810,7 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
return r;
if (td->open_count)
- td->changed = 0;
+ td->changed = false;
else {
list_del(&td->list);
kfree(td);
@@ -831,6 +830,7 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
* We need to know if the thin_disk_superblock exceeds a 512-byte sector.
*/
BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
+ BUG_ON(!rwsem_is_locked(&pmd->root_lock));
if (unlikely(!pmd->in_service))
return 0;
@@ -953,6 +953,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
return -EBUSY;
}
+ pmd_write_lock_in_core(pmd);
if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
r = __commit_transaction(pmd);
if (r < 0)
@@ -961,6 +962,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
}
if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd);
+ pmd_write_unlock(pmd);
kfree(pmd);
return 0;
@@ -1106,7 +1108,7 @@ static int __set_snapshot_details(struct dm_pool_metadata *pmd,
if (r)
return r;
- td->changed = 1;
+ td->changed = true;
td->snapshotted_time = time;
snap->mapped_blocks = td->mapped_blocks;
@@ -1618,7 +1620,7 @@ static int __insert(struct dm_thin_device *td, dm_block_t block,
if (r)
return r;
- td->changed = 1;
+ td->changed = true;
if (inserted)
td->mapped_blocks++;
@@ -1649,7 +1651,7 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
return r;
td->mapped_blocks--;
- td->changed = 1;
+ td->changed = true;
return 0;
}
@@ -1703,7 +1705,7 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
}
td->mapped_blocks -= total_count;
- td->changed = 1;
+ td->changed = true;
/*
* Reinsert the mapping tree.
@@ -1841,7 +1843,7 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
* Care is taken to not have commit be what
* triggers putting the thin-pool in-service.
*/
- __pmd_write_lock(pmd);
+ pmd_write_lock_in_core(pmd);
if (pmd->fail_io)
goto out;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 57626c27a54b..fa8d5464c1fb 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -231,6 +231,7 @@ struct pool {
struct dm_target *ti; /* Only set if a pool target is bound */
struct mapped_device *pool_md;
+ struct block_device *data_dev;
struct block_device *md_dev;
struct dm_pool_metadata *pmd;
@@ -281,6 +282,8 @@ struct pool {
struct dm_bio_prison_cell **cell_sort_array;
mempool_t mapping_pool;
+
+ struct bio flush_bio;
};
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
@@ -328,7 +331,6 @@ struct pool_c {
dm_block_t low_water_blocks;
struct pool_features requested_pf; /* Features requested during table load */
struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
- struct bio flush_bio;
};
/*
@@ -2924,6 +2926,7 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, &pool->mapping_pool);
mempool_exit(&pool->mapping_pool);
+ bio_uninit(&pool->flush_bio);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
@@ -2933,6 +2936,7 @@ static struct kmem_cache *_new_mapping_cache;
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
+ struct block_device *data_dev,
unsigned long block_size,
int read_only, char **error)
{
@@ -3003,6 +3007,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->low_water_triggered = false;
pool->suspended = true;
pool->out_of_data_space = false;
+ bio_init(&pool->flush_bio, NULL, 0);
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -3040,6 +3045,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->last_commit_jiffies = jiffies;
pool->pool_md = pool_md;
pool->md_dev = metadata_dev;
+ pool->data_dev = data_dev;
__pool_table_insert(pool);
return pool;
@@ -3081,6 +3087,7 @@ static void __pool_dec(struct pool *pool)
static struct pool *__pool_find(struct mapped_device *pool_md,
struct block_device *metadata_dev,
+ struct block_device *data_dev,
unsigned long block_size, int read_only,
char **error, int *created)
{
@@ -3091,19 +3098,23 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
*error = "metadata device already in use by a pool";
return ERR_PTR(-EBUSY);
}
+ if (pool->data_dev != data_dev) {
+ *error = "data device already in use by a pool";
+ return ERR_PTR(-EBUSY);
+ }
__pool_inc(pool);
} else {
pool = __pool_table_lookup(pool_md);
if (pool) {
- if (pool->md_dev != metadata_dev) {
+ if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) {
*error = "different pool cannot replace a pool";
return ERR_PTR(-EINVAL);
}
__pool_inc(pool);
} else {
- pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
+ pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error);
*created = 1;
}
}
@@ -3124,7 +3135,6 @@ static void pool_dtr(struct dm_target *ti)
__pool_dec(pt->pool);
dm_put_device(ti, pt->metadata_dev);
dm_put_device(ti, pt->data_dev);
- bio_uninit(&pt->flush_bio);
kfree(pt);
mutex_unlock(&dm_thin_pool_table.mutex);
@@ -3203,11 +3213,11 @@ static void metadata_low_callback(void *context)
*/
static int metadata_pre_commit_callback(void *context)
{
- struct pool_c *pt = context;
- struct bio *flush_bio = &pt->flush_bio;
+ struct pool *pool = context;
+ struct bio *flush_bio = &pool->flush_bio;
bio_reset(flush_bio);
- bio_set_dev(flush_bio, pt->data_dev->bdev);
+ bio_set_dev(flush_bio, pool->data_dev);
flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return submit_bio_wait(flush_bio);
@@ -3356,7 +3366,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
- pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
+ pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
if (IS_ERR(pool)) {
r = PTR_ERR(pool);
@@ -3381,7 +3391,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
- bio_init(&pt->flush_bio, NULL, 0);
ti->num_flush_bios = 1;
/*
@@ -3408,9 +3417,8 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto out_flags_changed;
- dm_pool_register_pre_commit_callback(pt->pool->pmd,
- metadata_pre_commit_callback,
- pt);
+ dm_pool_register_pre_commit_callback(pool->pmd,
+ metadata_pre_commit_callback, pool);
pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);
@@ -4099,7 +4107,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 21, 0},
+ .version = {1, 22, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4476,7 +4484,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 21, 0},
+ .version = {1, 22, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 4fb33e7562c5..0d61e9c67986 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -611,8 +611,22 @@ no_prefetch_cluster:
static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
{
+ sector_t block = io->block;
+ unsigned int n_blocks = io->n_blocks;
struct dm_verity_prefetch_work *pw;
+ if (v->validated_blocks) {
+ while (n_blocks && test_bit(block, v->validated_blocks)) {
+ block++;
+ n_blocks--;
+ }
+ while (n_blocks && test_bit(block + n_blocks - 1,
+ v->validated_blocks))
+ n_blocks--;
+ if (!n_blocks)
+ return;
+ }
+
pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
@@ -621,8 +635,8 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
INIT_WORK(&pw->work, verity_prefetch_io);
pw->v = v;
- pw->block = io->block;
- pw->n_blocks = io->n_blocks;
+ pw->block = block;
+ pw->n_blocks = n_blocks;
queue_work(v->verify_wq, &pw->work);
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 7d727a72aa13..b9e27e37a943 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -442,7 +442,13 @@ static void writecache_notify_io(unsigned long error, void *context)
complete(&endio->c);
}
-static void ssd_commit_flushed(struct dm_writecache *wc)
+static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
+{
+ wait_event(wc->bio_in_progress_wait[direction],
+ !atomic_read(&wc->bio_in_progress[direction]));
+}
+
+static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
{
struct dm_io_region region;
struct dm_io_request req;
@@ -488,17 +494,20 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
writecache_notify_io(0, &endio);
wait_for_completion_io(&endio.c);
+ if (wait_for_ios)
+ writecache_wait_for_ios(wc, WRITE);
+
writecache_disk_flush(wc, wc->ssd_dev);
memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
}
-static void writecache_commit_flushed(struct dm_writecache *wc)
+static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
{
if (WC_MODE_PMEM(wc))
wmb();
else
- ssd_commit_flushed(wc);
+ ssd_commit_flushed(wc, wait_for_ios);
}
static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
@@ -522,12 +531,6 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
writecache_error(wc, r, "error flushing metadata: %d", r);
}
-static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
-{
- wait_event(wc->bio_in_progress_wait[direction],
- !atomic_read(&wc->bio_in_progress[direction]));
-}
-
#define WFE_RETURN_FOLLOWING 1
#define WFE_LOWEST_SEQ 2
@@ -622,7 +625,7 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
wc->freelist_size++;
}
-static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
+static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
{
struct wc_entry *e;
@@ -631,6 +634,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
if (unlikely(!wc->current_free))
return NULL;
e = wc->current_free;
+ if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
+ return NULL;
next = rb_next(&e->rb_node);
rb_erase(&e->rb_node, &wc->freetree);
if (unlikely(!next))
@@ -640,6 +645,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
if (unlikely(list_empty(&wc->freelist)))
return NULL;
e = container_of(wc->freelist.next, struct wc_entry, lru);
+ if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
+ return NULL;
list_del(&e->lru);
}
wc->freelist_size--;
@@ -724,15 +731,12 @@ static void writecache_flush(struct dm_writecache *wc)
e = e2;
cond_resched();
}
- writecache_commit_flushed(wc);
-
- if (!WC_MODE_PMEM(wc))
- writecache_wait_for_ios(wc, WRITE);
+ writecache_commit_flushed(wc, true);
wc->seq_count++;
pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
wc->overwrote_committed = false;
@@ -756,7 +760,7 @@ static void writecache_flush(struct dm_writecache *wc)
}
if (need_flush_after_free)
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
}
static void writecache_flush_work(struct work_struct *work)
@@ -809,7 +813,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
}
if (discarded_something)
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
}
static bool writecache_wait_for_writeback(struct dm_writecache *wc)
@@ -958,7 +962,7 @@ erase_this:
if (need_flush) {
writecache_flush_all_metadata(wc);
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
}
wc_unlock(wc);
@@ -1193,7 +1197,7 @@ read_next_block:
goto bio_copy;
}
}
- e = writecache_pop_from_freelist(wc);
+ e = writecache_pop_from_freelist(wc, (sector_t)-1);
if (unlikely(!e)) {
writecache_wait_on_freelist(wc);
continue;
@@ -1205,9 +1209,26 @@ bio_copy:
if (WC_MODE_PMEM(wc)) {
bio_copy_block(wc, bio, memory_data(wc, e));
} else {
- dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
+ unsigned bio_size = wc->block_size;
+ sector_t start_cache_sec = cache_sector(wc, e);
+ sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
+
+ while (bio_size < bio->bi_iter.bi_size) {
+ struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
+ if (!f)
+ break;
+ write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
+ (bio_size >> SECTOR_SHIFT), wc->seq_count);
+ writecache_insert_entry(wc, f);
+ wc->uncommitted_blocks++;
+ bio_size += wc->block_size;
+ current_cache_sec += wc->block_size >> SECTOR_SHIFT;
+ }
+
bio_set_dev(bio, wc->ssd_dev->bdev);
- bio->bi_iter.bi_sector = cache_sector(wc, e);
+ bio->bi_iter.bi_sector = start_cache_sec;
+ dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
+
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
wc->uncommitted_blocks = 0;
queue_work(wc->writeback_wq, &wc->flush_work);
@@ -1342,7 +1363,7 @@ static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *
wc->writeback_size--;
n_walked++;
if (unlikely(n_walked >= ENDIO_LATENCY)) {
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
wc_unlock(wc);
wc_lock(wc);
n_walked = 0;
@@ -1423,7 +1444,7 @@ pop_from_list:
writecache_wait_for_ios(wc, READ);
}
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
wc_unlock(wc);
}
@@ -1766,10 +1787,10 @@ static int init_memory(struct dm_writecache *wc)
write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
writecache_flush_all_metadata(wc);
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
return 0;
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 22b3cb0050a7..516c7b671d25 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -134,6 +134,7 @@ struct dmz_metadata {
sector_t zone_bitmap_size;
unsigned int zone_nr_bitmap_blocks;
+ unsigned int zone_bits_per_mblk;
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
@@ -1161,7 +1162,10 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
/* Init */
zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
- zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT;
+ zmd->zone_nr_bitmap_blocks =
+ max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
+ zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
+ DMZ_BLOCK_SIZE_BITS);
/* Allocate zone array */
zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
@@ -1956,7 +1960,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
dmz_release_mblock(zmd, to_mblk);
dmz_release_mblock(zmd, from_mblk);
- chunk_block += DMZ_BLOCK_SIZE_BITS;
+ chunk_block += zmd->zone_bits_per_mblk;
}
to_zone->weight = from_zone->weight;
@@ -2017,7 +2021,7 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Set bits */
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
- nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
if (count) {
@@ -2096,7 +2100,7 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Clear bits */
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
- nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
count = dmz_clear_bits((unsigned long *)mblk->data,
bit, nr_bits);
@@ -2156,6 +2160,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
{
struct dmz_mblock *mblk;
unsigned int bit, set_bit, nr_bits;
+ unsigned int zone_bits = zmd->zone_bits_per_mblk;
unsigned long *bitmap;
int n = 0;
@@ -2170,15 +2175,15 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Get offset */
bitmap = (unsigned long *) mblk->data;
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
- nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ nr_bits = min(nr_blocks, zone_bits - bit);
if (set)
- set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
+ set_bit = find_next_bit(bitmap, zone_bits, bit);
else
- set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
+ set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
dmz_release_mblock(zmd, mblk);
n += set_bit - bit;
- if (set_bit < DMZ_BLOCK_SIZE_BITS)
+ if (set_bit < zone_bits)
break;
nr_blocks -= nr_bits;
@@ -2281,7 +2286,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Count bits in this block */
bitmap = mblk->data;
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
- nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
n += dmz_count_bits(bitmap, bit, nr_bits);
dmz_release_mblock(zmd, mblk);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e8f9661a10a1..b89f07ee2eff 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1859,6 +1859,7 @@ static void dm_init_normal_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
+ md->queue->backing_dev_info->congested_data = md;
md->queue->backing_dev_info->congested_fn = dm_any_congested;
}
@@ -1949,7 +1950,12 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->queue)
goto bad;
md->queue->queuedata = md;
- md->queue->backing_dev_info->congested_data = md;
+ /*
+ * default to bio-based required ->make_request_fn until DM
+ * table is loaded and md->type established. If request-based
+ * table is loaded: blk-mq will override accordingly.
+ */
+ blk_queue_make_request(md->queue, dm_make_request);
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
@@ -2264,7 +2270,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
dm_init_normal_md_queue(md);
- blk_queue_make_request(md->queue, dm_make_request);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 3ad18246fcb3..e230052c2107 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1019,8 +1019,6 @@ void md_bitmap_unplug(struct bitmap *bitmap)
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
for (i = 0; i < bitmap->storage.file_pages; i++) {
- if (!bitmap->storage.filemap)
- return;
dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
@@ -1338,7 +1336,8 @@ void md_bitmap_daemon_work(struct mddev *mddev)
BITMAP_PAGE_DIRTY))
/* bitmap_unplug will handle the rest */
break;
- if (test_and_clear_page_attr(bitmap, j,
+ if (bitmap->storage.filemap &&
+ test_and_clear_page_attr(bitmap, j,
BITMAP_PAGE_NEEDWRITE)) {
write_page(bitmap, bitmap->storage.filemap[j], 0);
}
@@ -1790,8 +1789,8 @@ void md_bitmap_destroy(struct mddev *mddev)
return;
md_bitmap_wait_behind_writes(mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ if (!mddev->serialize_policy)
+ mddev_destroy_serial_pool(mddev, NULL, true);
mutex_lock(&mddev->bitmap_info.mutex);
spin_lock(&mddev->lock);
@@ -1908,7 +1907,7 @@ int md_bitmap_load(struct mddev *mddev)
goto out;
rdev_for_each(rdev, mddev)
- mddev_create_wb_pool(mddev, rdev, true);
+ mddev_create_serial_pool(mddev, rdev, true);
if (mddev_is_clustered(mddev))
md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
@@ -2475,16 +2474,16 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
if (backlog > COUNTER_MAX)
return -EINVAL;
mddev->bitmap_info.max_write_behind = backlog;
- if (!backlog && mddev->wb_info_pool) {
- /* wb_info_pool is not needed if backlog is zero */
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- } else if (backlog && !mddev->wb_info_pool) {
- /* wb_info_pool is needed since backlog is not zero */
+ if (!backlog && mddev->serial_info_pool) {
+ /* serial_info_pool is not needed if backlog is zero */
+ if (!mddev->serialize_policy)
+ mddev_destroy_serial_pool(mddev, NULL, false);
+ } else if (backlog && !mddev->serial_info_pool) {
+ /* serial_info_pool is needed since backlog is not zero */
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
}
if (old_mwb != backlog)
md_bitmap_update_sb(mddev->bitmap);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e7c9f398bc6..4824d50526fa 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -125,74 +125,165 @@ static inline int speed_max(struct mddev *mddev)
mddev->sync_speed_max : sysctl_speed_limit_max;
}
-static int rdev_init_wb(struct md_rdev *rdev)
+static void rdev_uninit_serial(struct md_rdev *rdev)
{
- if (rdev->bdev->bd_queue->nr_hw_queues == 1)
+ if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
+ return;
+
+ kvfree(rdev->serial);
+ rdev->serial = NULL;
+}
+
+static void rdevs_uninit_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ rdev_uninit_serial(rdev);
+}
+
+static int rdev_init_serial(struct md_rdev *rdev)
+{
+ /* serial_nums equals with BARRIER_BUCKETS_NR */
+ int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
+ struct serial_in_rdev *serial = NULL;
+
+ if (test_bit(CollisionCheck, &rdev->flags))
return 0;
- spin_lock_init(&rdev->wb_list_lock);
- INIT_LIST_HEAD(&rdev->wb_list);
- init_waitqueue_head(&rdev->wb_io_wait);
- set_bit(WBCollisionCheck, &rdev->flags);
+ serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
+ GFP_KERNEL);
+ if (!serial)
+ return -ENOMEM;
- return 1;
+ for (i = 0; i < serial_nums; i++) {
+ struct serial_in_rdev *serial_tmp = &serial[i];
+
+ spin_lock_init(&serial_tmp->serial_lock);
+ serial_tmp->serial_rb = RB_ROOT_CACHED;
+ init_waitqueue_head(&serial_tmp->serial_io_wait);
+ }
+
+ rdev->serial = serial;
+ set_bit(CollisionCheck, &rdev->flags);
+
+ return 0;
+}
+
+static int rdevs_init_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ int ret = 0;
+
+ rdev_for_each(rdev, mddev) {
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ break;
+ }
+
+ /* Free all resources if pool is not existed */
+ if (ret && !mddev->serial_info_pool)
+ rdevs_uninit_serial(mddev);
+
+ return ret;
}
/*
- * Create wb_info_pool if rdev is the first multi-queue device flaged
- * with writemostly, also write-behind mode is enabled.
+ * rdev needs to enable serial stuffs if it meets the conditions:
+ * 1. it is multi-queue device flaged with writemostly.
+ * 2. the write-behind mode is enabled.
*/
-void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend)
+static int rdev_need_serial(struct md_rdev *rdev)
{
- if (mddev->bitmap_info.max_write_behind == 0)
- return;
+ return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
+ rdev->bdev->bd_queue->nr_hw_queues != 1 &&
+ test_bit(WriteMostly, &rdev->flags));
+}
+
+/*
+ * Init resource for rdev(s), then create serial_info_pool if:
+ * 1. rdev is the first device which return true from rdev_enable_serial.
+ * 2. rdev is NULL, means we want to enable serialization for all rdevs.
+ */
+void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
+{
+ int ret = 0;
- if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev))
+ if (rdev && !rdev_need_serial(rdev) &&
+ !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool == NULL) {
+ if (!is_suspend)
+ mddev_suspend(mddev);
+
+ if (!rdev)
+ ret = rdevs_init_serial(mddev);
+ else
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ goto abort;
+
+ if (mddev->serial_info_pool == NULL) {
unsigned int noio_flag;
- if (!is_suspend)
- mddev_suspend(mddev);
noio_flag = memalloc_noio_save();
- mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
memalloc_noio_restore(noio_flag);
- if (!mddev->wb_info_pool)
- pr_err("can't alloc memory pool for writemostly\n");
- if (!is_suspend)
- mddev_resume(mddev);
+ if (!mddev->serial_info_pool) {
+ rdevs_uninit_serial(mddev);
+ pr_err("can't alloc memory pool for serialization\n");
+ }
}
+
+abort:
+ if (!is_suspend)
+ mddev_resume(mddev);
}
-EXPORT_SYMBOL_GPL(mddev_create_wb_pool);
/*
- * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck.
+ * Free resource from rdev(s), and destroy serial_info_pool under conditions:
+ * 1. rdev is the last device flaged with CollisionCheck.
+ * 2. when bitmap is destroyed while policy is not enabled.
+ * 3. for disable policy, the pool is destroyed only when no rdev needs it.
*/
-static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev)
+void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
{
- if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags))
+ if (rdev && !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool) {
+ if (mddev->serial_info_pool) {
struct md_rdev *temp;
- int num = 0;
+ int num = 0; /* used to track if other rdevs need the pool */
- /*
- * Check if other rdevs need wb_info_pool.
- */
- rdev_for_each(temp, mddev)
- if (temp != rdev &&
- test_bit(WBCollisionCheck, &temp->flags))
+ if (!is_suspend)
+ mddev_suspend(mddev);
+ rdev_for_each(temp, mddev) {
+ if (!rdev) {
+ if (!mddev->serialize_policy ||
+ !rdev_need_serial(temp))
+ rdev_uninit_serial(temp);
+ else
+ num++;
+ } else if (temp != rdev &&
+ test_bit(CollisionCheck, &temp->flags))
num++;
- if (!num) {
- mddev_suspend(rdev->mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- mddev_resume(rdev->mddev);
}
+
+ if (rdev)
+ rdev_uninit_serial(rdev);
+
+ if (num)
+ pr_info("The mempool could be used by other devices\n");
+ else {
+ mempool_destroy(mddev->serial_info_pool);
+ mddev->serial_info_pool = NULL;
+ }
+ if (!is_suspend)
+ mddev_resume(mddev);
}
}
@@ -2337,7 +2428,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
pr_debug("md: bind<%s>\n", b);
if (mddev->raid_disks)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
@@ -2375,7 +2466,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
@@ -2888,10 +2979,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
- mddev_create_wb_pool(rdev->mddev, rdev, false);
+ mddev_create_serial_pool(rdev->mddev, rdev, false);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
@@ -5277,6 +5368,57 @@ static struct md_sysfs_entry md_fail_last_dev =
__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
fail_last_dev_store);
+static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
+{
+ if (mddev->pers == NULL || (mddev->pers->level != 1))
+ return sprintf(page, "n/a\n");
+ else
+ return sprintf(page, "%d\n", mddev->serialize_policy);
+}
+
+/*
+ * Setting serialize_policy to true to enforce write IO is not reordered
+ * for raid1.
+ */
+static ssize_t
+serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ int err;
+ bool value;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ if (value == mddev->serialize_policy)
+ return len;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ if (mddev->pers == NULL || (mddev->pers->level != 1)) {
+ pr_err("md: serialize_policy is only effective for raid1\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ mddev_suspend(mddev);
+ if (value)
+ mddev_create_serial_pool(mddev, NULL, true);
+ else
+ mddev_destroy_serial_pool(mddev, NULL, true);
+ mddev->serialize_policy = value;
+ mddev_resume(mddev);
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
+}
+
+static struct md_sysfs_entry md_serialize_policy =
+__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
+ serialize_policy_store);
+
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
@@ -5294,6 +5436,7 @@ static struct attribute *md_default_attrs[] = {
&max_corr_read_errors.attr,
&md_consistency_policy.attr,
&md_fail_last_dev.attr,
+ &md_serialize_policy.attr,
NULL,
};
@@ -5769,18 +5912,18 @@ int md_run(struct mddev *mddev)
goto bitmap_abort;
if (mddev->bitmap_info.max_write_behind > 0) {
- bool creat_pool = false;
+ bool create_pool = false;
rdev_for_each(rdev, mddev) {
if (test_bit(WriteMostly, &rdev->flags) &&
- rdev_init_wb(rdev))
- creat_pool = true;
- }
- if (creat_pool && mddev->wb_info_pool == NULL) {
- mddev->wb_info_pool =
- mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
- if (!mddev->wb_info_pool) {
+ rdev_init_serial(rdev))
+ create_pool = true;
+ }
+ if (create_pool && mddev->serial_info_pool == NULL) {
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
+ if (!mddev->serial_info_pool) {
err = -ENOMEM;
goto bitmap_abort;
}
@@ -6025,8 +6168,9 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ /* disable policy to guarantee rdevs free resources for serialization */
+ mddev->serialize_policy = 0;
+ mddev_destroy_serial_pool(mddev, NULL, true);
}
void md_stop_writes(struct mddev *mddev)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 5f86f8adb0a4..acd681939112 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -32,6 +32,16 @@
* be retried.
*/
#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+
+/*
+ * The struct embedded in rdev is used to serialize IO.
+ */
+struct serial_in_rdev {
+ struct rb_root_cached serial_rb;
+ spinlock_t serial_lock;
+ wait_queue_head_t serial_io_wait;
+};
+
/*
* MD's 'extended' device
*/
@@ -110,12 +120,7 @@ struct md_rdev {
* in superblock.
*/
- /*
- * The members for check collision of write behind IOs.
- */
- struct list_head wb_list;
- spinlock_t wb_list_lock;
- wait_queue_head_t wb_io_wait;
+ struct serial_in_rdev *serial; /* used for raid1 io serialization */
struct work_struct del_work; /* used for delayed sysfs removal */
@@ -201,9 +206,9 @@ enum flag_bits {
* it didn't fail, so don't use FailFast
* any more for metadata
*/
- WBCollisionCheck, /*
- * multiqueue device should check if there
- * is collision between write behind bios.
+ CollisionCheck, /*
+ * check if there is collision between raid1
+ * serial bios.
*/
};
@@ -263,12 +268,13 @@ enum mddev_sb_flags {
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
-#define NR_WB_INFOS 8
-/* record current range of write behind IOs */
-struct wb_info {
- sector_t lo;
- sector_t hi;
- struct list_head list;
+#define NR_SERIAL_INFOS 8
+/* record current range of serialize IOs */
+struct serial_info {
+ struct rb_node node;
+ sector_t start; /* start sector of rb node */
+ sector_t last; /* end sector of rb node */
+ sector_t _subtree_last; /* highest sector in subtree of rb node */
};
struct mddev {
@@ -487,13 +493,14 @@ struct mddev {
*/
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
- mempool_t *wb_info_pool;
+ mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
bool has_superblocks:1;
bool fail_last_dev:1;
+ bool serialize_policy:1;
};
enum recovery_flags {
@@ -737,8 +744,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
-extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend);
+extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
+extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index bd68f6fef694..d8b4125e338c 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -380,6 +380,33 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return -ENOSPC;
}
+int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+ dm_block_t begin, dm_block_t end, dm_block_t *b)
+{
+ int r;
+ uint32_t count;
+
+ do {
+ r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
+ if (r)
+ break;
+
+ /* double check this block wasn't used in the old transaction */
+ if (*b >= old_ll->nr_blocks)
+ count = 0;
+ else {
+ r = sm_ll_lookup(old_ll, *b, &count);
+ if (r)
+ break;
+
+ if (count)
+ begin = *b + 1;
+ }
+ } while (count);
+
+ return r;
+}
+
static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
int (*mutator)(void *context, uint32_t old, uint32_t *new),
void *context, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index b3078d5eda0c..8de63ce39bdd 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result);
+int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+ dm_block_t begin, dm_block_t end, dm_block_t *result);
int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev);
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index 32adf6b4a9c7..bf4c5e2ccb6f 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- /* FIXME: we should loop round a couple of times */
- r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
+ /*
+ * Any block we allocate has to be free in both the old and current ll.
+ */
+ r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
if (r)
return r;
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 25328582cc48..9e3c64ec2026 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -448,7 +448,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
- r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
+ /*
+ * Any block we allocate has to be free in both the old and current ll.
+ */
+ r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
if (r)
return r;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b7c20979bd19..322386ff5d22 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- unsigned short blksize = 512;
+ unsigned blksize = 512;
*private_conf = ERR_PTR(-ENOMEM);
if (!conf)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 201fd8aec59a..cd810e195086 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <linux/interval_tree_generic.h>
#include <trace/events/block.h>
@@ -50,55 +51,71 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#include "raid1-10.c"
-static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
+ START, LAST, static inline, raid1_rb);
+
+static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
+ struct serial_info *si, int idx)
{
- struct wb_info *wi, *temp_wi;
unsigned long flags;
int ret = 0;
- struct mddev *mddev = rdev->mddev;
-
- wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO);
-
- spin_lock_irqsave(&rdev->wb_list_lock, flags);
- list_for_each_entry(temp_wi, &rdev->wb_list, list) {
- /* collision happened */
- if (hi > temp_wi->lo && lo < temp_wi->hi) {
- ret = -EBUSY;
- break;
- }
+ sector_t lo = r1_bio->sector;
+ sector_t hi = lo + r1_bio->sectors;
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ /* collision happened */
+ if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
+ ret = -EBUSY;
+ else {
+ si->start = lo;
+ si->last = hi;
+ raid1_rb_insert(si, &serial->serial_rb);
}
-
- if (!ret) {
- wi->lo = lo;
- wi->hi = hi;
- list_add(&wi->list, &rdev->wb_list);
- } else
- mempool_free(wi, mddev->wb_info_pool);
- spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
return ret;
}
-static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+ struct mddev *mddev = rdev->mddev;
+ struct serial_info *si;
+ int idx = sector_to_idx(r1_bio->sector);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ if (WARN_ON(!mddev->serial_info_pool))
+ return;
+ si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
+ wait_event(serial->serial_io_wait,
+ check_and_add_serial(rdev, r1_bio, si, idx) == 0);
+}
+
+static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
{
- struct wb_info *wi;
+ struct serial_info *si;
unsigned long flags;
int found = 0;
struct mddev *mddev = rdev->mddev;
-
- spin_lock_irqsave(&rdev->wb_list_lock, flags);
- list_for_each_entry(wi, &rdev->wb_list, list)
- if (hi == wi->hi && lo == wi->lo) {
- list_del(&wi->list);
- mempool_free(wi, mddev->wb_info_pool);
+ int idx = sector_to_idx(lo);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
+ si; si = raid1_rb_iter_next(si, lo, hi)) {
+ if (si->start == lo && si->last == hi) {
+ raid1_rb_remove(si, &serial->serial_rb);
+ mempool_free(si, mddev->serial_info_pool);
found = 1;
break;
}
-
+ }
if (!found)
- WARN(1, "The write behind IO is not recorded\n");
- spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
- wake_up(&rdev->wb_io_wait);
+ WARN(1, "The write IO is not recorded for serialization\n");
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
+ wake_up(&serial->serial_io_wait);
}
/*
@@ -430,6 +447,8 @@ static void raid1_end_write_request(struct bio *bio)
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
bool discard_error;
+ sector_t lo = r1_bio->sector;
+ sector_t hi = r1_bio->sector + r1_bio->sectors;
discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
@@ -499,12 +518,8 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- if (test_bit(WBCollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- remove_wb(rdev, lo, hi);
- }
+ if (test_bit(CollisionCheck, &rdev->flags))
+ remove_serial(rdev, lo, hi);
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -527,7 +542,8 @@ static void raid1_end_write_request(struct bio *bio)
call_bio_endio(r1_bio);
}
}
- }
+ } else if (rdev->mddev->serialize_policy)
+ remove_serial(rdev, lo, hi);
if (r1_bio->bios[mirror] == NULL)
rdev_dec_pending(rdev, conf->mddev);
@@ -1479,6 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (i = 0; i < disks; i++) {
struct bio *mbio = NULL;
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
if (!r1_bio->bios[i])
continue;
@@ -1506,18 +1523,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (r1_bio->behind_master_bio) {
- struct md_rdev *rdev = conf->mirrors[i].rdev;
-
- if (test_bit(WBCollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- wait_event(rdev->wb_io_wait,
- check_and_add_wb(rdev, lo, hi) == 0);
- }
+ if (test_bit(CollisionCheck, &rdev->flags))
+ wait_for_serialization(rdev, r1_bio);
if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
- }
+ } else if (mddev->serialize_policy)
+ wait_for_serialization(rdev, r1_bio);
r1_bio->bios[i] = mbio;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d4d3b67ffbba..ba00e9877f02 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6598,7 +6598,6 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
static int alloc_thread_groups(struct r5conf *conf, int cnt,
int *group_cnt,
- int *worker_cnt_per_group,
struct r5worker_group **worker_groups);
static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
@@ -6607,7 +6606,7 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
unsigned int new;
int err;
struct r5worker_group *new_groups, *old_groups;
- int group_cnt, worker_cnt_per_group;
+ int group_cnt;
if (len >= PAGE_SIZE)
return -EINVAL;
@@ -6630,13 +6629,11 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
if (old_groups)
flush_workqueue(raid5_wq);
- err = alloc_thread_groups(conf, new,
- &group_cnt, &worker_cnt_per_group,
- &new_groups);
+ err = alloc_thread_groups(conf, new, &group_cnt, &new_groups);
if (!err) {
spin_lock_irq(&conf->device_lock);
conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_cnt_per_group = new;
conf->worker_groups = new_groups;
spin_unlock_irq(&conf->device_lock);
@@ -6672,16 +6669,13 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
-static int alloc_thread_groups(struct r5conf *conf, int cnt,
- int *group_cnt,
- int *worker_cnt_per_group,
+static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
struct r5worker_group **worker_groups)
{
int i, j, k;
ssize_t size;
struct r5worker *workers;
- *worker_cnt_per_group = cnt;
if (cnt == 0) {
*group_cnt = 0;
*worker_groups = NULL;
@@ -6882,7 +6876,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
struct disk_info *disk;
char pers_name[6];
int i;
- int group_cnt, worker_cnt_per_group;
+ int group_cnt;
struct r5worker_group *new_group;
int ret;
@@ -6928,10 +6922,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
for (i = 0; i < PENDING_IO_MAX; i++)
list_add(&conf->pending_data[i].sibling, &conf->free_list);
/* Don't enable multi-threading by default*/
- if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
- &new_group)) {
+ if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) {
conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_cnt_per_group = 0;
conf->worker_groups = new_group;
} else
goto abort;
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 44cd0e530bbd..d0c9dffe49e5 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -335,13 +335,6 @@ static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
vb2_dc_put(dbuf->priv);
}
-static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
-{
- struct vb2_dc_buf *buf = dbuf->priv;
-
- return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
-}
-
static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
struct vb2_dc_buf *buf = dbuf->priv;
@@ -360,7 +353,6 @@ static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
.detach = vb2_dc_dmabuf_ops_detach,
.map_dma_buf = vb2_dc_dmabuf_ops_map,
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
- .map = vb2_dc_dmabuf_ops_kmap,
.vmap = vb2_dc_dmabuf_ops_vmap,
.mmap = vb2_dc_dmabuf_ops_mmap,
.release = vb2_dc_dmabuf_ops_release,
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index ed706b2a263c..6db60e9d5183 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -470,13 +470,6 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
vb2_dma_sg_put(dbuf->priv);
}
-static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
-{
- struct vb2_dma_sg_buf *buf = dbuf->priv;
-
- return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
-}
-
static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
struct vb2_dma_sg_buf *buf = dbuf->priv;
@@ -495,7 +488,6 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.detach = vb2_dma_sg_dmabuf_ops_detach,
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
- .map = vb2_dma_sg_dmabuf_ops_kmap,
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
.release = vb2_dma_sg_dmabuf_ops_release,
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 04d51ca63223..1a4f0ca87c7c 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -105,7 +105,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
if (nums[i-1] + 1 != nums[i])
goto fail_map;
buf->vaddr = (__force void *)
- ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
+ ioremap(__pfn_to_phys(nums[0]), size + offset);
} else {
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
PAGE_KERNEL);
@@ -319,13 +319,6 @@ static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
vb2_vmalloc_put(dbuf->priv);
}
-static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
-{
- struct vb2_vmalloc_buf *buf = dbuf->priv;
-
- return buf->vaddr + pgnum * PAGE_SIZE;
-}
-
static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
struct vb2_vmalloc_buf *buf = dbuf->priv;
@@ -344,7 +337,6 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
.detach = vb2_vmalloc_dmabuf_ops_detach,
.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
- .map = vb2_vmalloc_dmabuf_ops_kmap,
.vmap = vb2_vmalloc_dmabuf_ops_vmap,
.mmap = vb2_vmalloc_dmabuf_ops_mmap,
.release = vb2_vmalloc_dmabuf_ops_release,
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index fd47bd07ffd8..2f1eeeb6e7c7 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -938,7 +938,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* map io memory */
CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
- cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
+ cx->enc_mem = ioremap(cx->base_addr + CX18_MEM_OFFSET,
CX18_MEM_SIZE);
if (!cx->enc_mem) {
CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n");
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 3f3f40ea890b..1f79700a6307 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1042,7 +1042,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
/* map io memory */
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
- itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
+ itv->enc_mem = ioremap(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n");
@@ -1056,7 +1056,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
if (itv->has_cx23415) {
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
- itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
+ itv->dec_mem = ioremap(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n");
@@ -1075,7 +1075,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
itv->reg_mem =
- ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+ ioremap(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n");
IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n");
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 95a56cce9b65..0c2859844081 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -37,7 +37,7 @@
#include <linux/ivtvfb.h>
#ifdef CONFIG_X86_64
-#include <asm/pat.h>
+#include <asm/memtype.h>
#endif
/* card parameters */
@@ -925,7 +925,7 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
return 0;
}
-static struct fb_ops ivtvfb_ops = {
+static const struct fb_ops ivtvfb_ops = {
.owner = THIS_MODULE,
.fb_write = ivtvfb_write,
.fb_check_var = ivtvfb_check_var,
@@ -1049,7 +1049,6 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
oi->ivtvfb_info.node = -1;
oi->ivtvfb_info.flags = FBINFO_FLAG_DEFAULT;
- oi->ivtvfb_info.fbops = &ivtvfb_ops;
oi->ivtvfb_info.par = itv;
oi->ivtvfb_info.var = oi->ivtvfb_defined;
oi->ivtvfb_info.fix = oi->ivtvfb_fix;
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index f299baf7cbe0..e06d113dfe96 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -883,7 +883,7 @@ static int dm355_ccdc_probe(struct platform_device *pdev)
goto fail_nores;
}
- ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
if (!ccdc_cfg.base_addr) {
status = -ENOMEM;
goto fail_nomem;
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 2fc6c9c38f9c..c6378c4e0074 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -817,7 +817,7 @@ static int dm644x_ccdc_probe(struct platform_device *pdev)
goto fail_nores;
}
- ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
if (!ccdc_cfg.base_addr) {
status = -ENOMEM;
goto fail_nomem;
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index e2e7ab7b7f45..b49378b18e5d 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1045,7 +1045,7 @@ static int isif_probe(struct platform_device *pdev)
status = -EBUSY;
goto fail_nobase_res;
}
- addr = ioremap_nocache(res->start, resource_size(res));
+ addr = ioremap(res->start, resource_size(res));
if (!addr) {
status = -ENOMEM;
goto fail_base_iomap;
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index a99caac59f44..1ac0c70a5981 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -351,7 +351,7 @@ static int tegra_cec_probe(struct platform_device *pdev)
if (cec->tegra_cec_irq <= 0)
return -EBUSY;
- cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ cec->cec_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!cec->cec_base) {
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
index f2e789bdf4a6..fbaec8acc161 100644
--- a/drivers/media/platform/vivid/vivid-osd.c
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -244,7 +244,7 @@ static int vivid_fb_blank(int blank_mode, struct fb_info *info)
return 0;
}
-static struct fb_ops vivid_fb_ops = {
+static const struct fb_ops vivid_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = vivid_fb_check_var,
.fb_set_par = vivid_fb_set_par,
@@ -311,7 +311,6 @@ static int vivid_fb_init_vidmode(struct vivid_dev *dev)
dev->fb_info.node = -1;
dev->fb_info.flags = FBINFO_FLAG_DEFAULT;
- dev->fb_info.fbops = &vivid_fb_ops;
dev->fb_info.par = dev;
dev->fb_info.var = dev->fb_defined;
dev->fb_info.fix = dev->fb_fix;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 66a6c6c236a7..13b65ed9e74c 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -183,12 +183,12 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+ err = pin_user_pages(data & PAGE_MASK, dma->nr_pages,
flags | FOLL_LONGTERM, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages: err=%d [%d]\n", err,
+ dprintk(1, "pin_user_pages: err=%d [%d]\n", err,
dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
@@ -349,8 +349,8 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
BUG_ON(dma->sglen);
if (dma->pages) {
- for (i = 0; i < dma->nr_pages; i++)
- put_page(dma->pages[i]);
+ unpin_user_pages_dirty_lock(dma->pages, dma->nr_pages,
+ dma->direction == DMA_FROM_DEVICE);
kfree(dma->pages);
dma->pages = NULL;
}
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index f9ac22413000..1074b882c57c 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -100,19 +100,19 @@ struct buflist {
* Function prototypes. Called from OS entry point mptctl_ioctl.
* arg contents specific to function.
*/
-static int mptctl_fw_download(unsigned long arg);
-static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_gettargetinfo(unsigned long arg);
-static int mptctl_readtest(unsigned long arg);
-static int mptctl_mpt_command(unsigned long arg);
-static int mptctl_eventquery(unsigned long arg);
-static int mptctl_eventenable(unsigned long arg);
-static int mptctl_eventreport(unsigned long arg);
-static int mptctl_replace_fw(unsigned long arg);
-
-static int mptctl_do_reset(unsigned long arg);
-static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_hp_targetinfo(unsigned long arg);
+static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg);
+
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_probe(struct pci_dev *, const struct pci_device_id *);
static void mptctl_remove(struct pci_dev *);
@@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
/*
* Private function calls.
*/
-static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr);
-static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen);
static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
@@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* by TM and FW reloads.
*/
if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
- return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+ return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd));
} else if (cmd == MPTTARGETINFO) {
- return mptctl_gettargetinfo(arg);
+ return mptctl_gettargetinfo(iocp, arg);
} else if (cmd == MPTTEST) {
- return mptctl_readtest(arg);
+ return mptctl_readtest(iocp, arg);
} else if (cmd == MPTEVENTQUERY) {
- return mptctl_eventquery(arg);
+ return mptctl_eventquery(iocp, arg);
} else if (cmd == MPTEVENTENABLE) {
- return mptctl_eventenable(arg);
+ return mptctl_eventenable(iocp, arg);
} else if (cmd == MPTEVENTREPORT) {
- return mptctl_eventreport(arg);
+ return mptctl_eventreport(iocp, arg);
} else if (cmd == MPTFWREPLACE) {
- return mptctl_replace_fw(arg);
+ return mptctl_replace_fw(iocp, arg);
}
/* All of these commands require an interrupt or
@@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
if (cmd == MPTFWDOWNLOAD)
- ret = mptctl_fw_download(arg);
+ ret = mptctl_fw_download(iocp, arg);
else if (cmd == MPTCOMMAND)
- ret = mptctl_mpt_command(arg);
+ ret = mptctl_mpt_command(iocp, arg);
else if (cmd == MPTHARDRESET)
- ret = mptctl_do_reset(arg);
+ ret = mptctl_do_reset(iocp, arg);
else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
- ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+ ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd));
else if (cmd == HP_GETTARGETINFO)
- ret = mptctl_hp_targetinfo(arg);
+ ret = mptctl_hp_targetinfo(iocp, arg);
else
ret = -EINVAL;
@@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-static int mptctl_do_reset(unsigned long arg)
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
struct mpt_ioctl_diag_reset krinfo;
- MPT_ADAPTER *iocp;
if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
@@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg)
return -EFAULT;
}
- if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n",
- __FILE__, __LINE__, krinfo.hdr.iocnum);
- return -ENODEV; /* (-6) No such device or address */
- }
-
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
iocp->name));
@@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg)
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_fw_download(unsigned long arg)
+mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
struct mpt_fw_xfer kfwdl;
@@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg)
return -EFAULT;
}
- return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+ return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg)
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
{
FWDownload_t *dlmsg;
MPT_FRAME_HDR *mf;
- MPT_ADAPTER *iocp;
FWDownloadTCSGE_t *ptsge;
MptSge_t *sgl, *sgIn;
char *sgOut;
@@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
pFWDownloadReply_t ReplyMsg = NULL;
unsigned long timeleft;
- if (mpt_verify_adapter(ioc, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
- ioc);
- return -ENODEV; /* (-6) No such device or address */
- } else {
-
- /* Valid device. Get a message frame and construct the FW download message.
- */
- if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
- return -EAGAIN;
- }
+ /* Valid device. Get a message frame and construct the FW download message.
+ */
+ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+ return -EAGAIN;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
"mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
@@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
iocp->name, ufwbuf));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
iocp->name, (int)fwlen));
- dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n",
- iocp->name, ioc));
dlmsg = (FWDownload_t*) mf;
ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
@@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
* -ENODEV if no such device/adapter
*/
static int
-mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_iocinfo *karg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
- int iocnum;
unsigned int port;
int cim_rev;
struct scsi_device *sdev;
@@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
return PTR_ERR(karg);
}
- if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- kfree(karg);
- return -ENODEV;
- }
-
/* Verify the data transfer size is correct. */
if (karg->hdr.maxDataSize != data_size) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
@@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_gettargetinfo (unsigned long arg)
+mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_targetinfo karg;
- MPT_ADAPTER *ioc;
VirtDevice *vdevice;
char *pmem;
int *pdata;
- int iocnum;
int numDevices = 0;
int lun;
int maxWordsLeft;
@@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
ioc->name));
/* Get the port number and set the maximum number of bytes
@@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_readtest (unsigned long arg)
+mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_test __user *uarg = (void __user *) arg;
struct mpt_ioctl_test karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
@@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
ioc->name));
/* Fill in the data and return the structure to the calling
@@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_eventquery (unsigned long arg)
+mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventquery karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
@@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
ioc->name));
karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
@@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventenable (unsigned long arg)
+mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventenable karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
@@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
ioc->name));
if (ioc->events == NULL) {
@@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventreport (unsigned long arg)
+mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventreport karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int numBytes, maxEvents, max;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
@@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
ioc->name));
@@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_replace_fw (unsigned long arg)
+mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
struct mpt_ioctl_replace_fw karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int newFwSize;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
@@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
ioc->name));
/* If caching FW, Free the old FW image
@@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg)
* -ENOMEM if memory allocation error
*/
static int
-mptctl_mpt_command (unsigned long arg)
+mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_command __user *uarg = (void __user *) arg;
struct mpt_ioctl_command karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int rc;
@@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
- rc = mptctl_do_mpt_command (karg, &uarg->MF);
+ rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF);
return rc;
}
@@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg)
* -EPERM if SCSI I/O and target is untagged
*/
static int
-mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr)
{
- MPT_ADAPTER *ioc;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *hdr;
char *psge;
@@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
dma_addr_t dma_addr_in;
dma_addr_t dma_addr_out;
int sgSize = 0; /* Num SG elements */
- int iocnum, flagsLength;
+ int flagsLength;
int sz, rc = 0;
int msgContext;
u16 req_idx;
@@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
bufIn.kptr = bufOut.kptr = NULL;
bufIn.len = bufOut.len = 0;
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
@@ -2418,17 +2321,15 @@ done_free_mem:
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
hp_host_info_t __user *uarg = (void __user *) arg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
char *pbuf=NULL;
dma_addr_t buf_dma;
hp_host_info_t karg;
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
- int iocnum;
int rc, cim_rev;
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
MPT_FRAME_HDR *mf = NULL;
@@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
ioc->name));
@@ -2659,15 +2554,13 @@ retry_wait:
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_targetinfo(unsigned long arg)
+mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
{
hp_target_info_t __user *uarg = (void __user *) arg;
SCSIDevicePage0_t *pg0_alloc;
SCSIDevicePage3_t *pg3_alloc;
- MPT_ADAPTER *ioc;
MPT_SCSI_HOST *hd = NULL;
hp_target_info_t karg;
- int iocnum;
int data_sz;
dma_addr_t page_dma;
CONFIGPARMS cfg;
@@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
@@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
kfw.fwlen = kfw32.fwlen;
kfw.bufp = compat_ptr(kfw32.bufp);
- ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+ ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen);
mutex_unlock(&iocp->ioctl_cmds.mutex);
@@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
/* Pass new structure to do_mpt_command
*/
- ret = mptctl_do_mpt_command (karg, &uarg->MF);
+ ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF);
mutex_unlock(&iocp->ioctl_cmds.mutex);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index ebc00d47abf5..7d3784aa20e5 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -552,7 +552,7 @@ mpt_lan_close(struct net_device *dev)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Tx timeout handler. */
static void
-mpt_lan_tx_timeout(struct net_device *dev)
+mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 420900852166..7a4a41dddc97 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2004,5 +2004,18 @@ config RAVE_SP_CORE
Select this to get support for the Supervisory Processor
device found on several devices in RAVE line of hardware.
+config SGI_MFD_IOC3
+ tristate "SGI IOC3 core driver"
+ depends on PCI && MIPS && 64BIT
+ select MFD_CORE
+ help
+ This option enables basic support for the SGI IOC3-based
+ controller cards. This option does not enable any specific
+ functions on such a card, but provides necessary infrastructure
+ for other drivers to utilize.
+
+ If you have an SGI Origin, Octane, or a PCI IOC3 card,
+ then say Y. Otherwise say N.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index aed99f08739f..5fe930c76ade 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -255,3 +255,4 @@ obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o
obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
obj-$(CONFIG_MFD_STMFX) += stmfx.o
+obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index c9f35378d391..ddd64f9e3341 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -9,8 +9,6 @@
*/
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -25,20 +23,9 @@
#define BYT_CRC_HRV 2
#define CHT_CRC_HRV 3
-/* Lookup table for the Panel Enable/Disable line as GPIO signals */
-static struct gpiod_lookup_table panel_gpio_table = {
- /* Intel GFX is consumer */
- .dev_id = "0000:00:02.0",
- .table = {
- /* Panel EN/DISABLE */
- GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
/* PWM consumed by the Intel GFX */
static struct pwm_lookup crc_pwm_lookup[] = {
- PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_backlight", 0, PWM_POLARITY_NORMAL),
+ PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
};
static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
@@ -96,9 +83,6 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
if (ret)
dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
- /* Add lookup table binding for Panel Control to the GPIO Chip */
- gpiod_add_lookup_table(&panel_gpio_table);
-
/* Add lookup table for crc-pwm */
pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
@@ -121,9 +105,6 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
- /* Remove lookup table for Panel Control from the GPIO Chip */
- gpiod_remove_lookup_table(&panel_gpio_table);
-
/* remove crc-pwm lookup table */
pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c
new file mode 100644
index 000000000000..02998d4eb74b
--- /dev/null
+++ b/drivers/mfd/ioc3.c
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 multifunction device driver
+ *
+ * Copyright (C) 2018, 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * Based on work by:
+ * Stanislaw Skowronek <skylark@unaligned.org>
+ * Joshua Kinard <kumba@gentoo.org>
+ * Brent Casavant <bcasavan@sgi.com> - IOC4 master driver
+ * Pat Gefre <pfg@sgi.com> - IOC3 serial port IRQ demuxer
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+#include <linux/rtc/ds1685.h>
+
+#include <asm/pci/bridge.h>
+#include <asm/sn/ioc3.h>
+
+#define IOC3_IRQ_SERIAL_A 6
+#define IOC3_IRQ_SERIAL_B 15
+#define IOC3_IRQ_KBD 22
+
+/* Bitmask for selecting which IRQs are level triggered */
+#define IOC3_LVL_MASK (BIT(IOC3_IRQ_SERIAL_A) | BIT(IOC3_IRQ_SERIAL_B))
+
+#define M48T35_REG_SIZE 32768 /* size of m48t35 registers */
+
+/* 1.2 us latency timer (40 cycles at 33 MHz) */
+#define IOC3_LATENCY 40
+
+struct ioc3_priv_data {
+ struct irq_domain *domain;
+ struct ioc3 __iomem *regs;
+ struct pci_dev *pdev;
+ int domain_irq;
+};
+
+static void ioc3_irq_ack(struct irq_data *d)
+{
+ struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ writel(BIT(hwirq), &ipd->regs->sio_ir);
+}
+
+static void ioc3_irq_mask(struct irq_data *d)
+{
+ struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ writel(BIT(hwirq), &ipd->regs->sio_iec);
+}
+
+static void ioc3_irq_unmask(struct irq_data *d)
+{
+ struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ writel(BIT(hwirq), &ipd->regs->sio_ies);
+}
+
+static struct irq_chip ioc3_irq_chip = {
+ .name = "IOC3",
+ .irq_ack = ioc3_irq_ack,
+ .irq_mask = ioc3_irq_mask,
+ .irq_unmask = ioc3_irq_unmask,
+};
+
+static int ioc3_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ /* Set level IRQs for every interrupt contained in IOC3_LVL_MASK */
+ if (BIT(hwirq) & IOC3_LVL_MASK)
+ irq_set_chip_and_handler(irq, &ioc3_irq_chip, handle_level_irq);
+ else
+ irq_set_chip_and_handler(irq, &ioc3_irq_chip, handle_edge_irq);
+
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static void ioc3_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ioc3_irq_domain_ops = {
+ .map = ioc3_irq_domain_map,
+ .unmap = ioc3_irq_domain_unmap,
+};
+
+static void ioc3_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct ioc3_priv_data *ipd = domain->host_data;
+ struct ioc3 __iomem *regs = ipd->regs;
+ u32 pending, mask;
+ unsigned int irq;
+
+ pending = readl(&regs->sio_ir);
+ mask = readl(&regs->sio_ies);
+ pending &= mask; /* Mask off not enabled interrupts */
+
+ if (pending) {
+ irq = irq_find_mapping(domain, __ffs(pending));
+ if (irq)
+ generic_handle_irq(irq);
+ } else {
+ spurious_interrupt();
+ }
+}
+
+/*
+ * System boards/BaseIOs use more interrupt pins of the bridge ASIC
+ * to which the IOC3 is connected. Since the IOC3 MFD driver
+ * knows wiring of these extra pins, we use the map_irq function
+ * to get interrupts activated
+ */
+static int ioc3_map_irq(struct pci_dev *pdev, int slot, int pin)
+{
+ struct pci_host_bridge *hbrg = pci_find_host_bridge(pdev->bus);
+
+ return hbrg->map_irq(pdev, slot, pin);
+}
+
+static int ioc3_irq_domain_setup(struct ioc3_priv_data *ipd, int irq)
+{
+ struct irq_domain *domain;
+ struct fwnode_handle *fn;
+
+ fn = irq_domain_alloc_named_fwnode("IOC3");
+ if (!fn)
+ goto err;
+
+ domain = irq_domain_create_linear(fn, 24, &ioc3_irq_domain_ops, ipd);
+ if (!domain)
+ goto err;
+
+ irq_domain_free_fwnode(fn);
+ ipd->domain = domain;
+
+ irq_set_chained_handler_and_data(irq, ioc3_irq_handler, domain);
+ ipd->domain_irq = irq;
+ return 0;
+
+err:
+ dev_err(&ipd->pdev->dev, "irq domain setup failed\n");
+ return -ENOMEM;
+}
+
+static struct resource ioc3_uarta_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, sregs.uarta),
+ sizeof_field(struct ioc3, sregs.uarta)),
+ DEFINE_RES_IRQ(IOC3_IRQ_SERIAL_A)
+};
+
+static struct resource ioc3_uartb_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, sregs.uartb),
+ sizeof_field(struct ioc3, sregs.uartb)),
+ DEFINE_RES_IRQ(IOC3_IRQ_SERIAL_B)
+};
+
+static struct mfd_cell ioc3_serial_cells[] = {
+ {
+ .name = "ioc3-serial8250",
+ .resources = ioc3_uarta_resources,
+ .num_resources = ARRAY_SIZE(ioc3_uarta_resources),
+ },
+ {
+ .name = "ioc3-serial8250",
+ .resources = ioc3_uartb_resources,
+ .num_resources = ARRAY_SIZE(ioc3_uartb_resources),
+ }
+};
+
+static int ioc3_serial_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ /* Set gpio pins for RS232/RS422 mode selection */
+ writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL,
+ &ipd->regs->gpcr_s);
+ /* Select RS232 mode for uart a */
+ writel(0, &ipd->regs->gppr[6]);
+ /* Select RS232 mode for uart b */
+ writel(0, &ipd->regs->gppr[7]);
+
+ /* Switch both ports to 16650 mode */
+ writel(readl(&ipd->regs->port_a.sscr) & ~SSCR_DMA_EN,
+ &ipd->regs->port_a.sscr);
+ writel(readl(&ipd->regs->port_b.sscr) & ~SSCR_DMA_EN,
+ &ipd->regs->port_b.sscr);
+ udelay(1000); /* Wait until mode switch is done */
+
+ ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+ ioc3_serial_cells, ARRAY_SIZE(ioc3_serial_cells),
+ &ipd->pdev->resource[0], 0, ipd->domain);
+ if (ret) {
+ dev_err(&ipd->pdev->dev, "Failed to add 16550 subdevs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct resource ioc3_kbd_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, serio),
+ sizeof_field(struct ioc3, serio)),
+ DEFINE_RES_IRQ(IOC3_IRQ_KBD)
+};
+
+static struct mfd_cell ioc3_kbd_cells[] = {
+ {
+ .name = "ioc3-kbd",
+ .resources = ioc3_kbd_resources,
+ .num_resources = ARRAY_SIZE(ioc3_kbd_resources),
+ }
+};
+
+static int ioc3_kbd_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+ ioc3_kbd_cells, ARRAY_SIZE(ioc3_kbd_cells),
+ &ipd->pdev->resource[0], 0, ipd->domain);
+ if (ret) {
+ dev_err(&ipd->pdev->dev, "Failed to add 16550 subdevs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct resource ioc3_eth_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, eth),
+ sizeof_field(struct ioc3, eth)),
+ DEFINE_RES_MEM(offsetof(struct ioc3, ssram),
+ sizeof_field(struct ioc3, ssram)),
+ DEFINE_RES_IRQ(0)
+};
+
+static struct resource ioc3_w1_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, mcr),
+ sizeof_field(struct ioc3, mcr)),
+};
+static struct sgi_w1_platform_data ioc3_w1_platform_data;
+
+static struct mfd_cell ioc3_eth_cells[] = {
+ {
+ .name = "ioc3-eth",
+ .resources = ioc3_eth_resources,
+ .num_resources = ARRAY_SIZE(ioc3_eth_resources),
+ },
+ {
+ .name = "sgi_w1",
+ .resources = ioc3_w1_resources,
+ .num_resources = ARRAY_SIZE(ioc3_w1_resources),
+ .platform_data = &ioc3_w1_platform_data,
+ .pdata_size = sizeof(ioc3_w1_platform_data),
+ }
+};
+
+static int ioc3_eth_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ /* Enable One-Wire bus */
+ writel(GPCR_MLAN_EN, &ipd->regs->gpcr_s);
+
+ /* Generate unique identifier */
+ snprintf(ioc3_w1_platform_data.dev_id,
+ sizeof(ioc3_w1_platform_data.dev_id), "ioc3-%012llx",
+ ipd->pdev->resource->start);
+
+ ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+ ioc3_eth_cells, ARRAY_SIZE(ioc3_eth_cells),
+ &ipd->pdev->resource[0], ipd->pdev->irq, NULL);
+ if (ret) {
+ dev_err(&ipd->pdev->dev, "Failed to add ETH/W1 subdev\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct resource ioc3_m48t35_resources[] = {
+ DEFINE_RES_MEM(IOC3_BYTEBUS_DEV0, M48T35_REG_SIZE)
+};
+
+static struct mfd_cell ioc3_m48t35_cells[] = {
+ {
+ .name = "rtc-m48t35",
+ .resources = ioc3_m48t35_resources,
+ .num_resources = ARRAY_SIZE(ioc3_m48t35_resources),
+ }
+};
+
+static int ioc3_m48t35_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+ ioc3_m48t35_cells, ARRAY_SIZE(ioc3_m48t35_cells),
+ &ipd->pdev->resource[0], 0, ipd->domain);
+ if (ret)
+ dev_err(&ipd->pdev->dev, "Failed to add M48T35 subdev\n");
+
+ return ret;
+}
+
+static struct ds1685_rtc_platform_data ip30_rtc_platform_data = {
+ .bcd_mode = false,
+ .no_irq = false,
+ .uie_unsupported = true,
+ .access_type = ds1685_reg_indirect,
+};
+
+static struct resource ioc3_rtc_ds1685_resources[] = {
+ DEFINE_RES_MEM(IOC3_BYTEBUS_DEV1, 1),
+ DEFINE_RES_MEM(IOC3_BYTEBUS_DEV2, 1),
+ DEFINE_RES_IRQ(0)
+};
+
+static struct mfd_cell ioc3_ds1685_cells[] = {
+ {
+ .name = "rtc-ds1685",
+ .resources = ioc3_rtc_ds1685_resources,
+ .num_resources = ARRAY_SIZE(ioc3_rtc_ds1685_resources),
+ .platform_data = &ip30_rtc_platform_data,
+ .pdata_size = sizeof(ip30_rtc_platform_data),
+ .id = PLATFORM_DEVID_NONE,
+ }
+};
+
+static int ioc3_ds1685_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, irq;
+
+ irq = ioc3_map_irq(ipd->pdev, 6, 0);
+
+ ret = mfd_add_devices(&ipd->pdev->dev, 0, ioc3_ds1685_cells,
+ ARRAY_SIZE(ioc3_ds1685_cells),
+ &ipd->pdev->resource[0], irq, NULL);
+ if (ret)
+ dev_err(&ipd->pdev->dev, "Failed to add DS1685 subdev\n");
+
+ return ret;
+};
+
+
+static struct resource ioc3_leds_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, gppr[0]),
+ sizeof_field(struct ioc3, gppr[0])),
+ DEFINE_RES_MEM(offsetof(struct ioc3, gppr[1]),
+ sizeof_field(struct ioc3, gppr[1])),
+};
+
+static struct mfd_cell ioc3_led_cells[] = {
+ {
+ .name = "ip30-leds",
+ .resources = ioc3_leds_resources,
+ .num_resources = ARRAY_SIZE(ioc3_leds_resources),
+ .id = PLATFORM_DEVID_NONE,
+ }
+};
+
+static int ioc3_led_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ ret = mfd_add_devices(&ipd->pdev->dev, 0, ioc3_led_cells,
+ ARRAY_SIZE(ioc3_led_cells),
+ &ipd->pdev->resource[0], 0, ipd->domain);
+ if (ret)
+ dev_err(&ipd->pdev->dev, "Failed to add LED subdev\n");
+
+ return ret;
+}
+
+static int ip27_baseio_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_serial_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_m48t35_setup(ipd);
+}
+
+static int ip27_baseio6g_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_serial_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_m48t35_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_kbd_setup(ipd);
+}
+
+static int ip27_mio_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ ret = ioc3_irq_domain_setup(ipd, ipd->pdev->irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_serial_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_kbd_setup(ipd);
+}
+
+static int ip30_sysboard_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_serial_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_kbd_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_ds1685_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_led_setup(ipd);
+}
+
+static int ioc3_menet_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_serial_setup(ipd);
+}
+
+static int ioc3_menet4_setup(struct ioc3_priv_data *ipd)
+{
+ return ioc3_eth_setup(ipd);
+}
+
+static int ioc3_cad_duo_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_kbd_setup(ipd);
+}
+
+/* Helper macro for filling ioc3_info array */
+#define IOC3_SID(_name, _sid, _setup) \
+ { \
+ .name = _name, \
+ .sid = PCI_VENDOR_ID_SGI | (IOC3_SUBSYS_ ## _sid << 16), \
+ .setup = _setup, \
+ }
+
+static struct {
+ const char *name;
+ u32 sid;
+ int (*setup)(struct ioc3_priv_data *ipd);
+} ioc3_infos[] = {
+ IOC3_SID("IP27 BaseIO6G", IP27_BASEIO6G, &ip27_baseio6g_setup),
+ IOC3_SID("IP27 MIO", IP27_MIO, &ip27_mio_setup),
+ IOC3_SID("IP27 BaseIO", IP27_BASEIO, &ip27_baseio_setup),
+ IOC3_SID("IP29 System Board", IP29_SYSBOARD, &ip27_baseio6g_setup),
+ IOC3_SID("IP30 System Board", IP30_SYSBOARD, &ip30_sysboard_setup),
+ IOC3_SID("MENET", MENET, &ioc3_menet_setup),
+ IOC3_SID("MENET4", MENET4, &ioc3_menet4_setup)
+};
+#undef IOC3_SID
+
+static int ioc3_setup(struct ioc3_priv_data *ipd)
+{
+ u32 sid;
+ int i;
+
+ /* Clear IRQs */
+ writel(~0, &ipd->regs->sio_iec);
+ writel(~0, &ipd->regs->sio_ir);
+ writel(0, &ipd->regs->eth.eier);
+ writel(~0, &ipd->regs->eth.eisr);
+
+ /* Read subsystem vendor id and subsystem id */
+ pci_read_config_dword(ipd->pdev, PCI_SUBSYSTEM_VENDOR_ID, &sid);
+
+ for (i = 0; i < ARRAY_SIZE(ioc3_infos); i++)
+ if (sid == ioc3_infos[i].sid) {
+ pr_info("ioc3: %s\n", ioc3_infos[i].name);
+ return ioc3_infos[i].setup(ipd);
+ }
+
+ /* Treat everything not identified by PCI subid as CAD DUO */
+ pr_info("ioc3: CAD DUO\n");
+ return ioc3_cad_duo_setup(ipd);
+}
+
+static int ioc3_mfd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct ioc3_priv_data *ipd;
+ struct ioc3 __iomem *regs;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, IOC3_LATENCY);
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ pr_err("%s: No usable DMA configuration, aborting.\n",
+ pci_name(pdev));
+ goto out_disable_device;
+ }
+
+ /* Set up per-IOC3 data */
+ ipd = devm_kzalloc(&pdev->dev, sizeof(struct ioc3_priv_data),
+ GFP_KERNEL);
+ if (!ipd) {
+ ret = -ENOMEM;
+ goto out_disable_device;
+ }
+ ipd->pdev = pdev;
+
+ /*
+ * Map all IOC3 registers. These are shared between subdevices
+ * so the main IOC3 module manages them.
+ */
+ regs = pci_ioremap_bar(pdev, 0);
+ if (!regs) {
+ dev_warn(&pdev->dev, "ioc3: Unable to remap PCI BAR for %s.\n",
+ pci_name(pdev));
+ ret = -ENOMEM;
+ goto out_disable_device;
+ }
+ ipd->regs = regs;
+
+ /* Track PCI-device specific data */
+ pci_set_drvdata(pdev, ipd);
+
+ ret = ioc3_setup(ipd);
+ if (ret) {
+ /* Remove all already added MFD devices */
+ mfd_remove_devices(&ipd->pdev->dev);
+ if (ipd->domain) {
+ irq_domain_remove(ipd->domain);
+ free_irq(ipd->domain_irq, (void *)ipd);
+ }
+ pci_iounmap(pdev, regs);
+ goto out_disable_device;
+ }
+
+ return 0;
+
+out_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void ioc3_mfd_remove(struct pci_dev *pdev)
+{
+ struct ioc3_priv_data *ipd;
+
+ ipd = pci_get_drvdata(pdev);
+
+ /* Clear and disable all IRQs */
+ writel(~0, &ipd->regs->sio_iec);
+ writel(~0, &ipd->regs->sio_ir);
+
+ /* Release resources */
+ mfd_remove_devices(&ipd->pdev->dev);
+ if (ipd->domain) {
+ irq_domain_remove(ipd->domain);
+ free_irq(ipd->domain_irq, (void *)ipd);
+ }
+ pci_iounmap(pdev, ipd->regs);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id ioc3_mfd_id_table[] = {
+ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, ioc3_mfd_id_table);
+
+static struct pci_driver ioc3_mfd_driver = {
+ .name = "IOC3",
+ .id_table = ioc3_mfd_id_table,
+ .probe = ioc3_mfd_probe,
+ .remove = ioc3_mfd_remove,
+};
+
+module_pci_driver(ioc3_mfd_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 MFD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index 259fe1dfec03..cd402c89189e 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -38,12 +38,18 @@ static const struct alcor_dev_cfg au6621_cfg = {
.dma = 1,
};
+static const struct alcor_dev_cfg au6625_cfg = {
+ .dma = 0,
+};
+
static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6601),
.driver_data = (kernel_ulong_t)&alcor_cfg },
{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6621),
.driver_data = (kernel_ulong_t)&au6621_cfg },
- { },
+ { PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6625),
+ .driver_data = (kernel_ulong_t)&au6625_cfg },
+ {},
};
MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 32dcec2e9dfd..bc4967a6efa1 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -628,7 +628,8 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
{
int err, clk;
- u8 n, clk_divider, mcu_cnt, div;
+ u16 n;
+ u8 clk_divider, mcu_cnt, div;
static const u8 depth[] = {
[RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M,
[RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M,
@@ -661,13 +662,13 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return 0;
if (pcr->ops->conv_clk_and_div_n)
- n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
else
- n = (u8)(clk - 4);
+ n = clk - 4;
if ((clk <= 4) || (n > 396))
return -EINVAL;
- mcu_cnt = (u8)(125/clk + 3);
+ mcu_cnt = 125/clk + 3;
if (mcu_cnt > 15)
mcu_cnt = 15;
@@ -676,7 +677,7 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (pcr->ops->conv_clk_and_div_n) {
int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
DIV_N_TO_CLK) * 2;
- n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ n = pcr->ops->conv_clk_and_div_n(dbl_clk,
CLK_TO_DIV_N);
} else {
n = (n + 4) * 2 - 4;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index fd7b2167103d..06038b325b02 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1512,7 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
bar = 1;
len = pci_resource_len(pcidev, bar);
base = pci_resource_start(pcidev, bar);
- pcr->remap_addr = ioremap_nocache(base, len);
+ pcr->remap_addr = ioremap(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
goto free_handle;
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index aed9c445d1e2..fb2eff69e449 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -352,7 +352,7 @@ void cxl_context_free(struct cxl_context *ctx)
void cxl_context_mm_count_get(struct cxl_context *ctx)
{
if (ctx->mm)
- atomic_inc(&ctx->mm->mm_count);
+ mmgrab(ctx->mm);
}
void cxl_context_mm_count_put(struct cxl_context *ctx)
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 6d27ccfe0680..3c2d405bc79b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
cdev = &edev->component[i];
if (cdev->dev == dev) {
enclosure_remove_links(cdev);
- device_del(&cdev->cdev);
put_device(dev);
cdev->dev = NULL;
- return device_add(&cdev->cdev);
+ return 0;
}
}
return -ENODEV;
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index ae4ee27a63c4..e3e085e33d46 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -581,13 +581,6 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
kfree(a);
}
-static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
-{
- struct fastrpc_buf *buf = dmabuf->priv;
-
- return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
-}
-
static void *fastrpc_vmap(struct dma_buf *dmabuf)
{
struct fastrpc_buf *buf = dmabuf->priv;
@@ -611,7 +604,6 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = {
.map_dma_buf = fastrpc_map_dma_buf,
.unmap_dma_buf = fastrpc_unmap_dma_buf,
.mmap = fastrpc_mmap,
- .map = fastrpc_kmap,
.vmap = fastrpc_vmap,
.release = fastrpc_release,
};
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 026c6ca24540..905106579935 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -1084,7 +1084,7 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
queue->ddcb_daddr);
queue->ddcb_vaddr = NULL;
queue->ddcb_daddr = 0ull;
- return -ENODEV;
+ return rc;
}
@@ -1179,7 +1179,7 @@ static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
*/
static int genwqe_card_thread(void *data)
{
- int should_stop = 0, rc = 0;
+ int should_stop = 0;
struct genwqe_dev *cd = (struct genwqe_dev *)data;
while (!kthread_should_stop()) {
@@ -1187,12 +1187,12 @@ static int genwqe_card_thread(void *data)
genwqe_check_ddcb_queue(cd, &cd->queue);
if (GENWQE_POLLING_ENABLED) {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_ddcbs_in_flight(cd) ||
(should_stop = kthread_should_stop()), 1);
} else {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_next_ddcb_ready(cd) ||
(should_stop = kthread_should_stop()), HZ);
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index b6125620eb8f..fc5ff2805b94 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -173,6 +173,7 @@ static int isl29020_probe(struct i2c_client *client,
static int isl29020_remove(struct i2c_client *client)
{
+ pm_runtime_disable(&client->dev);
sysfs_remove_group(&client->dev.kobj, &m_als_gr);
return 0;
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index a4fdad04809a..de87693cf557 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -278,7 +278,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
void lkdtm_UNSET_SMEP(void)
{
-#ifdef CONFIG_X86_64
+#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
void (*direct_write_cr4)(unsigned long val);
unsigned char *insn;
@@ -338,13 +338,13 @@ void lkdtm_UNSET_SMEP(void)
native_write_cr4(cr4);
}
#else
- pr_err("FAIL: this test is x86_64-only\n");
+ pr_err("XFAIL: this test is x86_64-only\n");
#endif
}
-#ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void)
{
+#ifdef CONFIG_X86_32
/*
* Trigger #DF by setting the stack limit to zero. This clobbers
* a GDT TLS slot, which is okay because the current task will die
@@ -373,6 +373,8 @@ void lkdtm_DOUBLE_FAULT(void)
asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
"r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
- panic("tried to double fault but didn't die\n");
-}
+ pr_err("FAIL: tried to double fault but didn't die\n");
+#else
+ pr_err("XFAIL: this test is ia32-only\n");
#endif
+}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index a0a495c95e3c..8d468e0a950a 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -765,7 +765,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%pUl", uuid);
+ return sprintf(buf, "%pUl", uuid);
}
static DEVICE_ATTR_RO(uuid);
@@ -775,7 +775,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 version = mei_me_cl_ver(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%02X", version);
+ return sprintf(buf, "%02X", version);
}
static DEVICE_ATTR_RO(version);
@@ -797,7 +797,7 @@ static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%d", maxconn);
+ return sprintf(buf, "%d", maxconn);
}
static DEVICE_ATTR_RO(max_conn);
@@ -807,7 +807,7 @@ static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 fixed = mei_me_cl_fixed(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%d", fixed);
+ return sprintf(buf, "%d", fixed);
}
static DEVICE_ATTR_RO(fixed);
@@ -817,7 +817,7 @@ static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%u", maxlen);
+ return sprintf(buf, "%u", maxlen);
}
static DEVICE_ATTR_RO(max_len);
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 93027fd96c71..4c596c646ac0 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -757,11 +757,38 @@ static const struct component_master_ops mei_component_master_ops = {
.unbind = mei_component_master_unbind,
};
+/**
+ * mei_hdcp_component_match - compare function for matching mei hdcp.
+ *
+ * The function checks if the driver is i915, the subcomponent is HDCP
+ * and the grand parent of hdcp and the parent of i915 are the same
+ * PCH device.
+ *
+ * @dev: master device
+ * @subcomponent: subcomponent to match (I915_COMPONENT_HDCP)
+ * @data: compare data (mei hdcp device)
+ *
+ * Return:
+ * * 1 - if components match
+ * * 0 - otherwise
+ */
static int mei_hdcp_component_match(struct device *dev, int subcomponent,
void *data)
{
- return !strcmp(dev->driver->name, "i915") &&
- subcomponent == I915_COMPONENT_HDCP;
+ struct device *base = data;
+
+ if (strcmp(dev->driver->name, "i915") ||
+ subcomponent != I915_COMPONENT_HDCP)
+ return 0;
+
+ base = base->parent;
+ if (!base)
+ return 0;
+
+ base = base->parent;
+ dev = dev->parent;
+
+ return (base && dev && dev == base);
}
static int mei_hdcp_probe(struct mei_cl_device *cldev,
@@ -785,7 +812,7 @@ static int mei_hdcp_probe(struct mei_cl_device *cldev,
master_match = NULL;
component_match_add_typed(&cldev->dev, &master_match,
- mei_hdcp_component_match, comp_master);
+ mei_hdcp_component_match, &cldev->dev);
if (IS_ERR_OR_NULL(master_match)) {
ret = -ENOMEM;
goto err_exit;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 7cd67fb2365d..87a0201ba6b3 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,10 +81,16 @@
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
+
#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */
+#define MEI_DEV_ID_CMP_H 0x06e0 /* Comet Lake H */
+#define MEI_DEV_ID_CMP_H_3 0x06e4 /* Comet Lake H 3 (iTouch) */
+
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
+
#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index c845b7e40f26..2711451b3d87 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -99,11 +99,15 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
+
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
index 3ee3d2402634..b58608829b18 100644
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ b/drivers/misc/mic/card/mic_debugfs.c
@@ -65,9 +65,6 @@ void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
*/
void mic_delete_card_debug_dir(struct mic_driver *mdrv)
{
- if (!mdrv->dbg_dir)
- return;
-
debugfs_remove_recursive(mdrv->dbg_dir);
}
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
index 2fc9f4bf7001..68a731fd86de 100644
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ b/drivers/misc/mic/cosm/cosm_debugfs.c
@@ -102,9 +102,6 @@ void cosm_create_debug_dir(struct cosm_device *cdev)
void cosm_delete_debug_dir(struct cosm_device *cdev)
{
- if (!cdev->dbg_dir)
- return;
-
debugfs_remove_recursive(cdev->dbg_dir);
}
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 8a8e41677501..ab0db7a2ac8c 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -129,9 +129,6 @@ void mic_create_debug_dir(struct mic_device *mdev)
*/
void mic_delete_debug_dir(struct mic_device *mdev)
{
- if (!mdev->dbg_dir)
- return;
-
debugfs_remove_recursive(mdev->dbg_dir);
}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index c25fd40f3bd0..fcd999f50d14 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -788,7 +788,7 @@ scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
"failed to setup interrupts for %d\n", msg->src.node);
goto interrupt_setup_error;
}
- newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len);
+ newdev->mmio.va = ioremap(msg->payload[1], sdev->mmio->len);
if (!newdev->mmio.va) {
dev_err(&scifdev->sdev->dev,
"failed to map mmio for %d\n", msg->src.node);
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 359c5bab45ac..b7f510676cd6 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -792,7 +792,7 @@ static int pti_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int a;
- int retval = -EINVAL;
+ int retval;
int pci_bar = 1;
dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
@@ -834,7 +834,7 @@ static int pti_pci_probe(struct pci_dev *pdev,
}
drv_data->aperture_base = drv_data->pti_addr+APERTURE_14;
drv_data->pti_ioaddr =
- ioremap_nocache((u32)drv_data->aperture_base,
+ ioremap((u32)drv_data->aperture_base,
APERTURE_LEN);
if (!drv_data->pti_ioaddr) {
retval = -ENOMEM;
@@ -910,7 +910,7 @@ static struct pci_driver pti_pci_driver = {
*/
static int __init pti_init(void)
{
- int retval = -EINVAL;
+ int retval;
/* First register module as tty device */
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 95ff7c5a1dfb..a6e1a8983e1f 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -10,16 +10,16 @@
#include <linux/acpi.h>
#include <linux/kernel.h>
+#include <linux/kexec.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+#include <uapi/misc/pvpanic.h>
static void __iomem *base;
-#define PVPANIC_PANICKED (1 << 0)
-
MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
@@ -34,7 +34,13 @@ static int
pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
void *unused)
{
- pvpanic_send_event(PVPANIC_PANICKED);
+ unsigned int event = PVPANIC_PANICKED;
+
+ if (kexec_crash_loaded())
+ event = PVPANIC_CRASH_LOADED;
+
+ pvpanic_send_event(event);
+
return NOTIFY_DONE;
}
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index f7d610a22347..ada94e6a3c91 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -496,7 +496,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Deal with transmit timeouts coming from the network layer.
*/
static void
-xpnet_dev_tx_timeout(struct net_device *dev)
+xpnet_dev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
dev->stats.tx_errors++;
}
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index d054e2842a5f..cb57ac6ab4c3 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -85,6 +85,7 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
unsigned long base;
int pages;
void *dst_cpy;
+ int ret;
mutex_lock(&exec_pool_list_mutex);
list_for_each_entry(p, &exec_pool_list, list) {
@@ -104,16 +105,28 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
mutex_lock(&part->lock);
- set_memory_nx((unsigned long)base, pages);
- set_memory_rw((unsigned long)base, pages);
+ ret = set_memory_nx((unsigned long)base, pages);
+ if (ret)
+ goto error_out;
+ ret = set_memory_rw((unsigned long)base, pages);
+ if (ret)
+ goto error_out;
dst_cpy = fncpy(dst, src, size);
- set_memory_ro((unsigned long)base, pages);
- set_memory_x((unsigned long)base, pages);
+ ret = set_memory_ro((unsigned long)base, pages);
+ if (ret)
+ goto error_out;
+ ret = set_memory_x((unsigned long)base, pages);
+ if (ret)
+ goto error_out;
mutex_unlock(&part->lock);
return dst_cpy;
+
+error_out:
+ mutex_unlock(&part->lock);
+ return NULL;
}
EXPORT_SYMBOL_GPL(sram_exec_copy);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 2ae9948a91e1..14136d2cc8f9 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -736,8 +736,8 @@ static int st_tty_open(struct tty_struct *tty)
static void st_tty_close(struct tty_struct *tty)
{
- unsigned char i = ST_MAX_CHANNELS;
- unsigned long flags = 0;
+ unsigned char i;
+ unsigned long flags;
struct st_data_s *st_gdata = tty->disc_data;
pr_info("%s ", __func__);
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 09db397df287..6d71865c8042 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -148,16 +148,14 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
u16 c0 = count_lut[ch0];
u16 c1 = count_lut[ch1];
- /*
- * Calculate ratio.
- * Note: the "128" is a scaling factor
- */
- u8 r = 128;
-
/* Avoid division by 0 and count 1 cannot be greater than count 0 */
if (c1 <= c0)
if (c0) {
- r = c1 * 128 / c0;
+ /*
+ * Calculate ratio.
+ * Note: the "128" is a scaling factor
+ */
+ u8 r = c1 * 128 / c0;
/* Calculate LUX */
lux = ((c0 - c1) * ratio_lut[r]) / 256;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 5e6be1527571..b837e7eba5f7 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 11835969e982..71bbaa56bdb5 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -733,7 +733,7 @@ static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
enum xsdfec_order order;
int err;
- err = get_user(order, (enum xsdfec_order *)arg);
+ err = get_user(order, (enum xsdfec_order __user *)arg);
if (err)
return -EFAULT;
@@ -1025,25 +1025,25 @@ static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
}
#endif
-static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
+static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct xsdfec_dev *xsdfec;
xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
if (!xsdfec)
- return POLLNVAL | POLLHUP;
+ return EPOLLNVAL | EPOLLHUP;
poll_wait(file, &xsdfec->waitq, wait);
/* XSDFEC ISR detected an error */
spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
if (xsdfec->state_updated)
- mask |= POLLIN | POLLPRI;
+ mask |= EPOLLIN | EPOLLPRI;
if (xsdfec->stats_updated)
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
return mask;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 95b41c0891d0..663d87924e5e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
card->erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
- 0);
+ card->ext_csd.generic_cmd6_time);
}
if (!err)
err = mmc_erase(card, from, nr, card->erase_arg);
@@ -1149,7 +1149,7 @@ retry:
arg == MMC_SECURE_TRIM1_ARG ?
INAND_CMD38_ARG_SECTRIM1 :
INAND_CMD38_ARG_SECERASE,
- 0);
+ card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
@@ -1167,7 +1167,7 @@ retry:
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
INAND_CMD38_ARG_SECTRIM2,
- 0);
+ card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index abf8f5eb0a1c..aa54d359dab7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work)
}
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ unsigned int freq = freqs[i];
+ if (freq > host->f_max) {
+ if (i + 1 < ARRAY_SIZE(freqs))
+ continue;
+ freq = host->f_max;
+ }
+ if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
break;
if (freqs[i] <= host->f_min)
break;
@@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
- host->f_init = max(freqs[0], host->f_min);
+ host->f_init = max(min(freqs[0], host->f_max), host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 105b7a7c0251..c8768726d925 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host)
struct device *dev = host->parent;
u32 bus_width, drv_type, cd_debounce_delay_ms;
int ret;
- bool cd_cap_invert, cd_gpio_invert = false;
- bool ro_cap_invert, ro_gpio_invert = false;
if (!dev || !dev_fwnode(dev))
return 0;
@@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host)
*/
/* Parse Card Detection */
+
if (device_property_read_bool(dev, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
- cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+ if (device_property_read_bool(dev, "cd-inverted"))
+ host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
if (device_property_read_u32(dev, "cd-debounce-delay-ms",
&cd_debounce_delay_ms))
@@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host)
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, false,
- cd_debounce_delay_ms * 1000,
- &cd_gpio_invert);
+ cd_debounce_delay_ms * 1000);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
-
- /*
- * There are two ways to flag that the CD line is inverted:
- * through the cd-inverted flag and by the GPIO line itself
- * being inverted from the GPIO subsystem. This is a leftover
- * from the times when the GPIO subsystem did not make it
- * possible to flag a line as inverted.
- *
- * If the capability on the host AND the GPIO line are
- * both inverted, the end result is that the CD line is
- * not inverted.
- */
- if (cd_cap_invert ^ cd_gpio_invert)
- host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
}
/* Parse Write Protection */
- ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
- ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
+ if (device_property_read_bool(dev, "wp-inverted"))
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
@@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "disable-wp"))
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
- /* See the comment on CD inversion above */
- if (ro_cap_invert ^ ro_gpio_invert)
- host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
if (device_property_read_bool(dev, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED;
if (device_property_read_bool(dev, "cap-mmc-highspeed"))
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 09113b9ad679..da425ee2d9bf 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -19,7 +19,9 @@
#include "host.h"
#include "mmc_ops.h"
-#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
+#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
+#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool expired = false;
bool busy = false;
- /* We have an unspecified cmd timeout, use the fallback value. */
- if (!timeout_ms)
- timeout_ms = MMC_OPS_TIMEOUT_MS;
-
/*
* In cases when not allowed to poll by using CMD13 or because we aren't
* capable of polling by using ->card_busy(), then rely on waiting the
@@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
mmc_retune_hold(host);
+ if (!timeout_ms) {
+ pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
+ mmc_hostname(host));
+ timeout_ms = card->ext_csd.generic_cmd6_time;
+ }
+
/*
- * If the cmd timeout and the max_busy_timeout of the host are both
- * specified, let's validate them. A failure means we need to prevent
- * the host from doing hw busy detection, which is done by converting
- * to a R1 response instead of a R1B.
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B.
*/
- if (timeout_ms && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout))
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
use_r1b_resp = false;
cmd.opcode = MMC_SWITCH;
@@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
cmd.flags = MMC_CMD_AC;
if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- /*
- * A busy_timeout of zero means the host can decide to use
- * whatever value it finds suitable.
- */
cmd.busy_timeout = timeout_ms;
} else {
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
@@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card)
* urgent levels by using an asynchronous background task, when idle.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
+ EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
if (err)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
@@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card)
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1, 0);
+ EXT_CSD_FLUSH_CACHE, 1,
+ MMC_CACHE_FLUSH_TIMEOUT_MS);
if (err)
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index da2596c5fa28..05e907451df9 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -19,7 +19,6 @@
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
- bool override_cd_active_level;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
char *cd_label;
@@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host)
return -ENOSYS;
cansleep = gpiod_cansleep(ctx->cd_gpio);
- if (ctx->override_cd_active_level) {
- int value = cansleep ?
- gpiod_get_raw_value_cansleep(ctx->cd_gpio) :
- gpiod_get_raw_value(ctx->cd_gpio);
- return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
- }
-
return cansleep ?
gpiod_get_value_cansleep(ctx->cd_gpio) :
gpiod_get_value(ctx->cd_gpio);
@@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
* @idx: index of the GPIO to obtain in the consumer
* @override_active_level: ignore %GPIO_ACTIVE_LOW flag
* @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not, set
- * to NULL to ignore
*
* Note that this must be called prior to mmc_add_host()
* otherwise the caller must also call mmc_gpiod_request_cd_irq().
@@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
*/
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce, bool *gpio_invert)
+ unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
@@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
ctx->cd_debounce_delay_ms = debounce / 1000;
}
- if (gpio_invert)
- *gpio_invert = !gpiod_is_active_low(desc);
+ /* override forces default (active-low) polarity ... */
+ if (override_active_level && !gpiod_is_active_low(desc))
+ gpiod_toggle_active_low(desc);
+
+ /* ... or active-high */
+ if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
- ctx->override_cd_active_level = override_active_level;
ctx->cd_gpio = desc;
return 0;
@@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
* @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not,
- * set to NULL to ignore
*
* Returns zero on success, else an error.
*/
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx,
- unsigned int debounce, bool *gpio_invert)
+ unsigned int idx, unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
@@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return ret;
}
- if (gpio_invert)
- *gpio_invert = !gpiod_is_active_low(desc);
+ if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
ctx->ro_gpio = desc;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d06b2dfe3c95..3a5089f0332c 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -501,6 +501,7 @@ config MMC_SDHCI_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB
tristate "Broadcom SDIO/SD/MMC support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC
depends on MMC_SDHCI_PLTFM
+ select MMC_CQHCI
default y
help
This selects support for the SDIO/SD/MMC Host Controller on
@@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP
depends on MMC_SDHCI_PLTFM && OF
select THERMAL
imply TI_SOC_THERMAL
+ select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's DRA7 SOCs. The controller supports
@@ -1040,3 +1043,6 @@ config MMC_OWL
help
This selects support for the SD/MMC Host Controller on
Actions Semi Owl SoCs.
+
+config MMC_SDHCI_EXTERNAL_DMA
+ bool
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 6f065bb5c55a..aeaaa5314924 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
return clk_prepare_enable(host->mck);
}
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index bc8aeb47a7b4..8823680ca42c 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev)
goto out2;
}
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r) {
- dev_err(&pdev->dev, "no IRQ defined\n");
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
goto out3;
- }
- host->irq = r->start;
mmc->ops = &au1xmmc_ops;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 99f61fd2a658..c3d949847cbd 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev)
host->dma_chan = NULL;
host->dma_desc = NULL;
- host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx");
+ host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
+ if (IS_ERR(host->dma_chan_rxtx)) {
+ ret = PTR_ERR(host->dma_chan_rxtx);
+ host->dma_chan_rxtx = NULL;
+
+ if (ret == -EPROBE_DEFER)
+ goto err;
+
+ /* Ignore errors to fall back to PIO mode */
+ }
+
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index eee08d81b242..76013bbbcff3 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
return ret;
host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
- if (!host->base)
- return -EINVAL;
+ if (!host->base) {
+ ret = -EINVAL;
+ goto error;
+ }
/* On ThunderX these are identical */
host->dma_base = host->base;
@@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
host->reg_off_dma = 0x160;
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk))
- return PTR_ERR(host->clk);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto error;
+ }
ret = clk_prepare_enable(host->clk);
if (ret)
- return ret;
+ goto error;
host->sys_freq = clk_get_rate(host->clk);
spin_lock_init(&host->irq_handler_lock);
@@ -157,6 +161,7 @@ error:
}
}
clk_disable_unprepare(host->clk);
+ pci_release_regions(pdev);
return ret;
}
@@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
clk_disable_unprepare(host->clk);
+ pci_release_regions(pdev);
}
static const struct pci_device_id thunder_mmc_id_table[] = {
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index ebfaeb33bc8c..f01fecd75833 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
mmc->caps |= pdata->caps;
/* Register a cd gpio, if there is not one, enable polling */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index fc9d4d000f97..bc5278ab5707 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host)
if (!host->dms)
return -ENOMEM;
- host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
- if (!host->dms->ch) {
+ host->dms->ch = dma_request_chan(host->dev, "rx-tx");
+ if (IS_ERR(host->dms->ch)) {
+ int ret = PTR_ERR(host->dms->ch);
+
dev_err(host->dev, "Failed to get external DMA channel.\n");
kfree(host->dms);
host->dms = NULL;
- return -ENXIO;
+ return ret;
}
return 0;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 78383f60a3dc..fbae87d1f017 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev)
static int jz4740_mmc_resume(struct device *dev)
{
- return pinctrl_pm_select_default_state(dev);
+ return pinctrl_select_default_state(dev);
}
static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index e712315c7e8d..35400cf2a2e4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -161,7 +161,6 @@ struct meson_host {
bool dram_access_quirk;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_clk_gate;
unsigned int bounce_buf_size;
@@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host)
u32 cfg;
if (host->pins_clk_gate)
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ pinctrl_select_default_state(host->dev);
/* Make sure the clock is not stopped in the controller */
cfg = readl(host->regs + SD_EMMC_CFG);
@@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
goto free_host;
}
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(host->pins_default)) {
- ret = PTR_ERR(host->pins_default);
- goto free_host;
- }
-
host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
"clk-gate");
if (IS_ERR(host->pins_clk_gate)) {
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index ba9a63db73da..8b038e7b2cd3 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
struct platform_device *slot_pdev;
struct mmc_host *mmc;
struct meson_mx_mmc_host *host;
- struct resource *res;
int ret, irq;
u32 conf;
@@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(host->controller_dev, res);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto error_free_mmc;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 74c6cfbf9172..951f76dc1ddd 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
* SPI protocol. Another is that when chipselect is released while
* the card returns BUSY status, the clock must issue several cycles
* with chipselect high before the card will stop driving its output.
+ *
+ * SPI_CS_HIGH means "asserted" here. In some cases like when using
+ * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
+ * inverted by gpiolib, so if we want to ascertain to drive it high
+ * we should toggle the default with an XOR as we do here.
*/
- host->spi->mode |= SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Just warn; most cards work without it. */
dev_warn(&host->spi->dev,
"can't change chip-select polarity\n");
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
} else {
mmc_spi_readbytes(host, 18);
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Wot, we can't get the same setup we had before? */
dev_err(&host->spi->dev,
@@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi)
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
- status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
+ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status) {
@@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */
- status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
+ status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 40e72c30ea84..e9ffce8d41ea 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = {
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = {
.datacnt_useless = true,
.datalength_bits = 25,
.datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
.stm32_idmabsize_mask = GENMASK(12, 5),
.busy_timeout = true,
.busy_detect = true,
@@ -284,6 +289,7 @@ static struct variant_data variant_qcom = {
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
@@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host)
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
+
if (!data)
return 0;
-
- if (!is_power_of_2(data->blksz)) {
+ if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
@@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
- host->ops->dma_start(host, &datactrl);
+ ret = host->ops->dma_start(host, &datactrl);
+ if (ret)
+ return ret;
/* Trigger the DMA transfer */
mmci_write_datactrlreg(host, datactrl);
@@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host)
host->dma_priv = dmae;
- dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
- "rx");
- dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
- "tx");
+ dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
+ if (IS_ERR(dmae->rx_channel)) {
+ int ret = PTR_ERR(dmae->rx_channel);
+ dmae->rx_channel = NULL;
+ return ret;
+ }
+
+ dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
+ if (IS_ERR(dmae->tx_channel)) {
+ if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
+ dev_warn(mmc_dev(host->mmc),
+ "Deferred probe for TX channel ignored\n");
+ dmae->tx_channel = NULL;
+ }
/*
* If only an RX channel is specified, the driver will
@@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
+ /*
+ * This is necessary to get SDIO working on the Ux500. We do not yet
+ * know if this is a bug in:
+ * - The Ux500 DMA controller (DMA40)
+ * - The MMCI DMA interface on the Ux500
+ * some power of two blocks (such as 64 bytes) are sent regularly
+ * during SDIO traffic and those work fine so for these we enable DMA
+ * transfers.
+ */
+ if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
+ return -EINVAL;
+
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
@@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host,
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
+ int ret;
host->dma_in_progress = true;
- dmaengine_submit(dmae->desc_current);
+ ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
+ if (ret < 0) {
+ host->dma_in_progress = false;
+ return ret;
+ }
dma_async_issue_pending(dmae->cur);
*datactrl |= MCI_DPSM_DMAENABLE;
@@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
+ host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
return;
}
}
- mmci_request_end(host, host->mrq);
+
+ if (host->irq_action != IRQ_WAKE_THREAD)
+ mmci_request_end(host, host->mrq);
+
} else if (sbc) {
mmci_start_command(host, host->mrq->cmd, 0);
} else if (!host->variant->datactrl_first &&
@@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
u32 status;
- int ret = 0;
spin_lock(&host->lock);
+ host->irq_action = IRQ_HANDLED;
do {
status = readl(host->base + MMCISTATUS);
@@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
if (host->variant->busy_detect_flag)
status &= ~host->variant->busy_detect_flag;
- ret = 1;
} while (status);
spin_unlock(&host->lock);
- return IRQ_RETVAL(ret);
+ return host->irq_action;
+}
+
+/*
+ * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
+ *
+ * A reset is needed for some variants, where a datatimeout for a R1B request
+ * causes the DPSM to stay busy (non-functional).
+ */
+static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+ unsigned long flags;
+
+ if (host->rst) {
+ reset_control_assert(host->rst);
+ udelay(2);
+ reset_control_deassert(host->rst);
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(MCI_IRQENABLE | host->variant->start_err,
+ host->base + MMCIMASK0);
+
+ host->irq_action = IRQ_HANDLED;
+ mmci_request_end(host, host->mrq);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return host->irq_action;
}
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pinctrl_select_state(host->pinctrl, host->pins_opendrain);
else
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ pinctrl_select_default_state(mmc_dev(mmc));
}
/*
@@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev,
goto host_free;
}
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(host->pins_default)) {
- dev_err(mmc_dev(mmc), "Can't select default pins\n");
- ret = PTR_ERR(host->pins_default);
- goto host_free;
- }
-
host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
MMCI_PINCTRL_STATE_OPENDRAIN);
if (IS_ERR(host->pins_opendrain)) {
@@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev,
* silently of these do not exist
*/
if (!np) {
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
}
- ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
- DRIVER_NAME " (cmd)", host);
+ ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
+ mmci_irq_thread, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
if (ret)
goto clk_disable;
@@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev)
struct mmci_host *host = mmc_priv(mmc);
clk_prepare_enable(host->clk);
mmci_restore(host);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
}
return 0;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 158e1231aa23..ea6a0b5779d4 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -279,7 +279,11 @@ struct mmci_host;
* @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @datactrl_blksz: block size in power of two
+ * @datactrl_blocksz: block size in power of two
+ * @datactrl_any_blocksz: true if block any block sizes are accepted by
+ * hardware, such as with some SDIO traffic that send
+ * odd packets.
+ * @dma_power_of_2: DMA only works with blocks that are a power of 2.
* @datactrl_first: true if data must be setup before send command
* @datacnt_useless: true if you could not use datacnt register to read
* remaining data
@@ -326,6 +330,8 @@ struct variant_data {
unsigned int datactrl_mask_ddrmode;
unsigned int datactrl_mask_sdio;
unsigned int datactrl_blocksz;
+ u8 datactrl_any_blocksz:1;
+ u8 dma_power_of_2:1;
u8 datactrl_first:1;
u8 datacnt_useless:1;
u8 st_sdio:1;
@@ -404,7 +410,6 @@ struct mmci_host {
struct mmci_host_ops *ops;
struct variant_data *variant;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_opendrain;
u8 hw_designer;
@@ -412,6 +417,7 @@ struct mmci_host {
struct timer_list timer;
unsigned int oldstat;
+ u32 irq_action;
/* pio stuff */
struct sg_mapping_iter sg_miter;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 010fe29a4888..7726dcf48f2c 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2194,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
if (ret)
goto host_free;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(&pdev->dev, res);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto host_free;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 74a0a7fbbf7f..203b61712601 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev)
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mbus_dram_target_info *dram;
- struct resource *r;
int ret, irq;
if (!np) {
dev_err(&pdev->dev, "no DT node\n");
return -ENODEV;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
+ if (irq < 0)
return -ENXIO;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = devm_ioremap_resource(&pdev->dev, r);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 011b59a3602e..b3d654c688e5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev)
mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
if (!host->pdata) {
- host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(host->dma)) {
+ if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk_put;
+ }
+
+ /* Ignore errors to fall back to PIO mode */
+ host->dma = NULL;
+ }
} else {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 4031217d21c3..d82674aed447 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
goto out_clk_disable;
}
- ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!ssp->dmach) {
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(ssp->dmach);
goto out_clk_disable;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 767e964ca5a2..a379c45b985c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
ret = PTR_ERR(p);
goto err_free_irq;
}
- if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
- dev_info(host->dev, "missing default pinctrl state\n");
- devm_pinctrl_put(p);
- ret = -EINVAL;
- goto err_free_irq;
- }
if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
dev_info(host->dev, "missing idle pinctrl state\n");
@@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
- pinctrl_pm_select_default_state(host->dev);
+ pinctrl_select_default_state(host->dev);
/* irq lost, if pinmux incorrect */
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
} else {
- pinctrl_pm_select_default_state(host->dev);
+ pinctrl_select_default_state(host->dev);
}
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 771e3d00f1bb..01ffe51f413d 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
- if (!owl_host->dma) {
+ owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
+ if (IS_ERR(owl_host->dma)) {
dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
- ret = -ENXIO;
+ ret = PTR_ERR(owl_host->dma);
goto err_free_host;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 024acc1b0a2e..3a9333475a2b 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
- host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
- if (host->dma_chan_rx == NULL) {
+ host->dma_chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_chan_rx)) {
dev_err(dev, "unable to request rx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(host->dma_chan_rx);
+ host->dma_chan_rx = NULL;
goto out;
}
- host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
- if (host->dma_chan_tx == NULL) {
+ host->dma_chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_chan_tx)) {
dev_err(dev, "unable to request tx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(host->dma_chan_tx);
+ host->dma_chan_tx = NULL;
goto out;
}
@@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev)
}
/* FIXME: should we pass detection delay to debounce? */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_cd\n");
goto out;
}
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ if (!host->pdata->gpio_card_ro_invert)
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_ro\n");
goto out;
}
- if (!ret) {
+ if (!ret)
host->use_ro_gpio = true;
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
- }
if (host->pdata->init)
host->pdata->init(dev, pxamci_detect_irq, mmc);
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index c0504aa90857..f524251d5113 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -14,8 +14,8 @@
struct renesas_sdhi_scc {
unsigned long clk_rate; /* clock rate for SDR104 */
- u32 tap; /* sampling clock position for SDR104 */
- u32 tap_hs400; /* sampling clock position for HS400 */
+ u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */
+ u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */
};
struct renesas_sdhi_of_data {
@@ -33,6 +33,11 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct renesas_sdhi_quirks {
+ bool hs400_disabled;
+ bool hs400_4taps;
+};
+
struct tmio_mmc_dma {
enum dma_slave_buswidth dma_buswidth;
bool (*filter)(struct dma_chan *chan, void *arg);
@@ -46,6 +51,7 @@ struct renesas_sdhi {
struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
+ const struct renesas_sdhi_quirks *quirks;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default, *pins_uhs;
void __iomem *scc_ctl;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 234551a68739..35cb24cd45b4 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -46,11 +46,6 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-struct renesas_sdhi_quirks {
- bool hs400_disabled;
- bool hs400_4taps;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
- if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400)
+ if (priv->quirks && priv->quirks->hs400_4taps)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
host->tap_set / 2);
@@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
- bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
/*
* Skip checking SCC errors when running on 4 taps in HS400 mode as
@@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
};
static const struct soc_device_attribute sdhi_quirks_match[] = {
+ { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
- { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ /* Sentinel. */ },
};
@@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!priv)
return -ENOMEM;
+ priv->quirks = quirks;
mmc_data = &priv->mmc_data;
dma_priv = &priv->dma_priv;
@@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (quirks && quirks->hs400_disabled)
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
- if (quirks && quirks->hs400_4taps)
- mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400;
-
/* For some SoC, we disable internal WP. GPIO may override this */
if (mmc_can_gpio_ro(host->mmc))
mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
@@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
MMC_CAP2_HS400_1_8V))) {
const struct renesas_sdhi_scc *taps = of_data->taps;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
bool hit = false;
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
priv->scc_tappos = taps->tap;
- priv->scc_tappos_hs400 = taps->tap_hs400;
+ priv->scc_tappos_hs400 = use_4tap ?
+ taps->tap_hs400_4tap :
+ taps->tap;
hit = true;
break;
}
}
if (!hit)
- dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+ dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
host->init_tuning = renesas_sdhi_init_tuning;
host->prepare_tuning = renesas_sdhi_prepare_tuning;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 18839a10594c..47ac53e91241 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
{
.clk_rate = 0,
.tap = 0x00000300,
- .tap_hs400 = 0x00000704,
+ .tap_hs400_4tap = 0x00000100,
},
};
@@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
* Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
* implementation as others may use a different implementation.
*/
-static const struct soc_device_attribute soc_whitelist[] = {
- /* specific ones */
+static const struct soc_device_attribute soc_dma_quirks[] = {
{ .soc_id = "r7s9210",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) },
{ .soc_id = "r8a7795", .revision = "ES1.*",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
{ .soc_id = "r8a7796", .revision = "ES1.0",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
- /* generic ones */
- { .soc_id = "r8a774a1" },
- { .soc_id = "r8a774b1" },
- { .soc_id = "r8a774c0" },
- { .soc_id = "r8a77470" },
- { .soc_id = "r8a7795" },
- { .soc_id = "r8a7796" },
- { .soc_id = "r8a77965" },
- { .soc_id = "r8a77970" },
- { .soc_id = "r8a77980" },
- { .soc_id = "r8a77990" },
- { .soc_id = "r8a77995" },
{ /* sentinel */ }
};
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
- const struct soc_device_attribute *soc = soc_device_match(soc_whitelist);
+ const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
struct device *dev = &pdev->dev;
- if (!soc)
- return -ENODEV;
-
- global_flags |= (unsigned long)soc->data;
+ if (soc)
+ global_flags |= (unsigned long)soc->data;
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
if (!dev->dma_parms)
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index bce9c33bc4b5..1e616ae56b13 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host)
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
/* If we get -ENOENT we have no card detect GPIO line */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
ret);
return ret;
}
- ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
ret);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 105e73d4a3b9..9651dca6863e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -719,7 +719,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
goto err_free;
}
- host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
+ host->ioaddr = devm_ioremap(dev, iomem->start,
resource_size(iomem));
if (host->ioaddr == NULL) {
err = -ENOMEM;
@@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
- err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
+ err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
if (err) {
if (err == -EPROBE_DEFER)
goto err_free;
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 73bb440aaf93..ad01f6451a95 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -9,29 +9,236 @@
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
-static const struct sdhci_ops sdhci_brcmstb_ops = {
+#define SDHCI_VENDOR 0x78
+#define SDHCI_VENDOR_ENHANCED_STRB 0x1
+
+#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0)
+#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1)
+
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
+
+struct sdhci_brcmstb_priv {
+ void __iomem *cfg_regs;
+ bool has_cqe;
+};
+
+struct brcmstb_match_priv {
+ void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
+ struct sdhci_ops *ops;
+ unsigned int flags;
+};
+
+static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ u32 reg;
+
+ dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n",
+ __func__);
+ reg = readl(host->ioaddr + SDHCI_VENDOR);
+ if (ios->enhanced_strobe)
+ reg |= SDHCI_VENDOR_ENHANCED_STRB;
+ else
+ reg &= ~SDHCI_VENDOR_ENHANCED_STRB;
+ writel(reg, host->ioaddr + SDHCI_VENDOR);
+}
+
+static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+
+ host->mmc->actual_clock = 0;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ sdhci_enable_clk(host, clk);
+}
+
+static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ u16 ctrl_2;
+
+ dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n",
+ __func__, timing);
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((timing == MMC_TIMING_MMC_HS200) ||
+ (timing == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if (timing == MMC_TIMING_SD_HS ||
+ timing == MMC_TIMING_MMC_HS ||
+ timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((timing == MMC_TIMING_UHS_DDR50) ||
+ (timing == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ else if (timing == MMC_TIMING_MMC_HS400)
+ ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = {
+ .enable = sdhci_brcmstb_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_brcmstb_dumpregs,
+};
+
+static struct sdhci_ops sdhci_brcmstb_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
-static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = {
+static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
+ .set_clock = sdhci_brcmstb_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+};
+
+static struct brcmstb_match_priv match_priv_7425 = {
+ .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
+ BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
.ops = &sdhci_brcmstb_ops,
};
+static struct brcmstb_match_priv match_priv_7445 = {
+ .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+ .ops = &sdhci_brcmstb_ops,
+};
+
+static const struct brcmstb_match_priv match_priv_7216 = {
+ .hs400es = sdhci_brcmstb_hs400es,
+ .ops = &sdhci_brcmstb_ops_7216,
+};
+
+static const struct of_device_id sdhci_brcm_of_match[] = {
+ { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
+ { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
+ { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
+ {},
+};
+
+static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+ struct sdhci_brcmstb_priv *priv)
+{
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!priv->has_cqe)
+ return sdhci_add_host(host);
+
+ dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(mmc_dev(host->mmc),
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_brcmstb_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64) {
+ dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ }
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_brcmstb_probe(struct platform_device *pdev)
{
- struct sdhci_host *host;
+ const struct brcmstb_match_priv *match_priv;
+ struct sdhci_pltfm_data brcmstb_pdata;
struct sdhci_pltfm_host *pltfm_host;
+ const struct of_device_id *match;
+ struct sdhci_brcmstb_priv *priv;
+ struct sdhci_host *host;
+ struct resource *iomem;
+ bool has_cqe = false;
struct clk *clk;
int res;
+ match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
+ match_priv = match->data;
+
+ dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible);
+
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_err(&pdev->dev, "Clock not found in Device Tree\n");
clk = NULL;
}
@@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (res)
return res;
- host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0);
+ memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
+ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+ has_cqe = true;
+ match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+ }
+ brcmstb_pdata.ops = match_priv->ops;
+ host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
+ sizeof(struct sdhci_brcmstb_priv));
if (IS_ERR(host)) {
res = PTR_ERR(host);
goto err_clk;
}
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
+ priv->has_cqe = has_cqe;
+
+ /* Map in the non-standard CFG registers */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(priv->cfg_regs)) {
+ res = PTR_ERR(priv->cfg_regs);
+ goto err;
+ }
+
sdhci_get_of_property(pdev);
res = mmc_of_parse(host->mmc);
if (res)
goto err;
/*
+ * If the chip has enhanced strobe and it's enabled, add
+ * callback
+ */
+ if (match_priv->hs400es &&
+ (host->mmc->caps2 & MMC_CAP2_HS400_ES))
+ host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es;
+
+ /*
* Supply the existing CAPS, but clear the UHS modes. This
* will allow these modes to be specified by device tree
* properties through mmc_of_parse().
*/
host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci"))
+ if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
host->caps &= ~SDHCI_CAN_64BIT;
host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
- SDHCI_SUPPORT_DDR50);
- host->quirks |= SDHCI_QUIRK_MISSING_CAPS |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ SDHCI_SUPPORT_DDR50);
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+ if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- res = sdhci_add_host(host);
+ res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
- pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
return res;
@@ -79,11 +314,15 @@ err_clk:
return res;
}
-static const struct of_device_id sdhci_brcm_of_match[] = {
- { .compatible = "brcm,bcm7425-sdhci" },
- { .compatible = "brcm,bcm7445-sdhci" },
- {},
-};
+static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = sdhci_pltfm_unregister(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
static struct platform_driver sdhci_brcmstb_driver = {
@@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = {
},
.probe = sdhci_brcmstb_probe,
.remove = sdhci_pltfm_unregister,
+ .shutdown = sdhci_brcmstb_shutdown,
};
module_platform_driver(sdhci_brcmstb_driver);
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index ae0ec27dd7cc..5827d3751b81 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
return 0;
}
-static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+static void *sdhci_cdns_priv(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1c988d6a2433..382f25b2fa45 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
struct pltfm_imx_data {
u32 scratchpad;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_100mhz;
struct pinctrl_state *pins_200mhz;
const struct esdhc_soc_data *socdata;
@@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
if (IS_ERR(imx_data->pinctrl) ||
- IS_ERR(imx_data->pins_default) ||
IS_ERR(imx_data->pins_100mhz) ||
IS_ERR(imx_data->pins_200mhz))
return -EINVAL;
@@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
break;
default:
/* back to default state for other legacy timing */
- pinctrl = imx_data->pins_default;
+ return pinctrl_select_default_state(mmc_dev(host->mmc));
}
return pinctrl_select_state(imx_data->pinctrl, pinctrl);
@@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
mmc_of_parse_voltage(np, &host->ocr_mask);
- if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) {
+ if (esdhc_is_usdhc(imx_data)) {
imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
ESDHC_PINCTRL_STATE_100MHZ);
imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
@@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
host->mmc->parent->platform_data);
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
- err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request write-protect gpio!\n");
return err;
}
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
}
/* card_detect */
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request card-detect gpio!\n");
@@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto disable_ahb_clk;
}
- imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(imx_data->pins_default))
- dev_warn(mmc_dev(host->mmc), "could not get default state\n");
-
if (esdhc_is_usdhc(imx_data)) {
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
index a1aa21b9ae1c..92f30a1db435 100644
--- a/drivers/mmc/host/sdhci-milbeaut.c
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq, ret = 0;
struct f_sdhost_priv *priv;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "%s: no irq specified\n", __func__);
+ if (irq < 0)
return irq;
- }
host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
@@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
host->ops = &sdhci_milbeaut_ops;
host->irq = irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3d0bb5e2e09b..c3a160c18047 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -15,6 +15,7 @@
#include <linux/regulator/consumer.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
#define CORE_MCI_VERSION 0x50
#define CORE_VERSION_MAJOR_SHIFT 28
@@ -122,6 +123,10 @@
#define msm_host_writel(msm_host, val, host, offset) \
msm_host->var_ops->msm_writel_relaxed(val, host, offset)
+/* CQHCI vendor specific registers */
+#define CQHCI_VENDOR_CFG1 0xA00
+#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
+
struct sdhci_msm_offset {
u32 core_hc_mode;
u32 core_mci_data_cnt;
@@ -1567,6 +1572,127 @@ out:
__sdhci_msm_set_clock(host, clock);
}
+/*****************************************************************************\
+ * *
+ * MSM Command Queue Engine (CQE) *
+ * *
+\*****************************************************************************/
+
+static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+ return 0;
+}
+
+void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 ctrl;
+
+ /*
+ * When CQE is halted, the legacy SDHCI path operates only
+ * on 16-byte descriptors in 64bit mode.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 16;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * During CQE command transfers, command complete bit gets latched.
+ * So s/w should clear command complete interrupt status when CQE is
+ * either halted or disabled. Otherwise unexpected SDCHI legacy
+ * interrupt gets triggered when CQE is halted/disabled.
+ */
+ ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
+ ctrl |= SDHCI_INT_RESPONSE;
+ sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ sdhci_cqe_disable(mmc, recovery);
+}
+
+static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_msm_cqe_disable,
+};
+
+static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct cqhci_host *cq_host;
+ bool dma64;
+ u32 cqcfg;
+ int ret;
+
+ /*
+ * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
+ * So ensure ADMA table is allocated for 16byte descriptors.
+ */
+ if (host->caps & SDHCI_CAN_64BIT)
+ host->alloc_desc_sz = 16;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = cqhci_pltfm_init(pdev);
+ if (IS_ERR(cq_host)) {
+ ret = PTR_ERR(cq_host);
+ dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
+ goto cleanup;
+ }
+
+ msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host->ops = &sdhci_msm_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ goto cleanup;
+ }
+
+ /* Disable cqe reset due to cqe enable signal */
+ cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
+ cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
+ cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
+
+ /*
+ * SDHC expects 12byte ADMA descriptors till CQE is enabled.
+ * So limit desc_sz to 12 so that the data commands that are sent
+ * during card initialization (before CQE gets enabled) would
+ * get executed without any issues.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 12;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ dev_info(&pdev->dev, "%s: CQE init: success\n",
+ mmc_hostname(host->mmc));
+ return ret;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
/*
* Platform specific register write functions. This is so that, if any
* register write needs to be followed up by platform specific actions,
@@ -1731,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.write_w = sdhci_msm_writew,
.write_b = sdhci_msm_writeb,
+ .irq = sdhci_msm_cqe_irq,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1746,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
- struct resource *core_memres;
struct clk *clk;
int ret;
u16 host_version, core_minor;
@@ -1754,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
u8 core_major;
const struct sdhci_msm_offset *msm_offset;
const struct sdhci_msm_variant_info *var_info;
+ struct device_node *node = pdev->dev.of_node;
host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
if (IS_ERR(host))
@@ -1847,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
if (!msm_host->mci_removed) {
- core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
- core_memres);
-
+ msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(msm_host->core_mem)) {
ret = PTR_ERR(msm_host->core_mem);
goto clk_disable;
@@ -1952,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
- ret = sdhci_add_host(host);
+ if (of_property_read_bool(node, "supports-cqe"))
+ ret = sdhci_msm_cqe_add_host(host, pdev);
+ else
+ ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
sdhci_msm_set_regulator_caps(msm_host);
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 5959e394b416..ab2bd314a390 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -33,7 +33,14 @@
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
+struct sdhci_at91_soc_data {
+ const struct sdhci_pltfm_data *pdata;
+ bool baseclk_is_generated_internally;
+ unsigned int divider_for_baseclk;
+};
+
struct sdhci_at91_priv {
+ const struct sdhci_at91_soc_data *soc_data;
struct clk *hclock;
struct clk *gck;
struct clk *mainck;
@@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_power = sdhci_at91_set_power,
};
-static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
.ops = &sdhci_at91_sama5d2_ops,
};
+static const struct sdhci_at91_soc_data soc_data_sama5d2 = {
+ .pdata = &sdhci_sama5d2_pdata,
+ .baseclk_is_generated_internally = false,
+};
+
+static const struct sdhci_at91_soc_data soc_data_sam9x60 = {
+ .pdata = &sdhci_sama5d2_pdata,
+ .baseclk_is_generated_internally = true,
+ .divider_for_baseclk = 2,
+};
+
static const struct of_device_id sdhci_at91_dt_match[] = {
{ .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+ { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
@@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
- int ret;
unsigned int caps0, caps1;
unsigned int clk_base, clk_mul;
- unsigned int gck_rate, real_gck_rate;
+ unsigned int gck_rate, clk_base_rate;
unsigned int preset_div;
- /*
- * The mult clock is provided by as a generated clock by the PMC
- * controller. In order to set the rate of gck, we have to get the
- * base clock rate and the clock mult from capabilities.
- */
clk_prepare_enable(priv->hclock);
caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
- clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
- clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
- gck_rate = clk_base * 1000000 * (clk_mul + 1);
- ret = clk_set_rate(priv->gck, gck_rate);
- if (ret < 0) {
- dev_err(dev, "failed to set gck");
- clk_disable_unprepare(priv->hclock);
- return ret;
- }
- /*
- * We need to check if we have the requested rate for gck because in
- * some cases this rate could be not supported. If it happens, the rate
- * is the closest one gck can provide. We have to update the value
- * of clk mul.
- */
- real_gck_rate = clk_get_rate(priv->gck);
- if (real_gck_rate != gck_rate) {
- clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
- caps1 &= (~SDHCI_CLOCK_MUL_MASK);
- caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) &
- SDHCI_CLOCK_MUL_MASK);
- /* Set capabilities in r/w mode. */
- writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN,
- host->ioaddr + SDMMC_CACR);
- writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
- /* Set capabilities in ro mode. */
- writel(0, host->ioaddr + SDMMC_CACR);
- dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n",
- clk_mul, real_gck_rate);
- }
+
+ gck_rate = clk_get_rate(priv->gck);
+ if (priv->soc_data->baseclk_is_generated_internally)
+ clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk;
+ else
+ clk_base_rate = clk_get_rate(priv->mainck);
+
+ clk_base = clk_base_rate / 1000000;
+ clk_mul = gck_rate / clk_base_rate - 1;
+
+ caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK;
+ caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK;
+ caps1 &= ~SDHCI_CLOCK_MUL_MASK;
+ caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK;
+ /* Set capabilities in r/w mode. */
+ writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+ writel(caps0, host->ioaddr + SDHCI_CAPABILITIES);
+ writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+ /* Set capabilities in ro mode. */
+ writel(0, host->ioaddr + SDMMC_CACR);
+
+ dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
+ clk_mul, gck_rate, clk_base_rate);
/*
* We have to set preset values because it depends on the clk_mul
@@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
* maximum sd clock value is 120 MHz instead of 208 MHz. For that
* reason, we need to use presets to support SDR104.
*/
- preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR12);
- preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR25);
- preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR50);
- preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR104);
- preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_DDR50);
@@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
static int sdhci_at91_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- const struct sdhci_pltfm_data *soc_data;
+ const struct sdhci_at91_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_at91_priv *priv;
@@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev)
return -EINVAL;
soc_data = match->data;
- host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv));
+ host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
+ priv->soc_data = soc_data;
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
- dev_err(&pdev->dev, "failed to get baseclk\n");
- return PTR_ERR(priv->mainck);
+ if (soc_data->baseclk_is_generated_internally) {
+ priv->mainck = NULL;
+ } else {
+ dev_err(&pdev->dev, "failed to get baseclk\n");
+ ret = PTR_ERR(priv->mainck);
+ goto sdhci_pltfm_free;
+ }
}
priv->hclock = devm_clk_get(&pdev->dev, "hclock");
if (IS_ERR(priv->hclock)) {
dev_err(&pdev->dev, "failed to get hclock\n");
- return PTR_ERR(priv->hclock);
+ ret = PTR_ERR(priv->hclock);
+ goto sdhci_pltfm_free;
}
priv->gck = devm_clk_get(&pdev->dev, "multclk");
if (IS_ERR(priv->gck)) {
dev_err(&pdev->dev, "failed to get multclk\n");
- return PTR_ERR(priv->gck);
+ ret = PTR_ERR(priv->gck);
+ goto sdhci_pltfm_free;
}
ret = sdhci_at91_set_clks_presets(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 500f70a6ee42..5d8dd870bd44 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -173,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
u16 ret;
int shift = (spec_reg & 0x2) * 8;
+ if (spec_reg == SDHCI_TRANSFER_MODE)
+ return pltfm_host->xfer_mode_shadow;
+
if (spec_reg == SDHCI_HOST_VERSION)
ret = value & 0xffff;
else
@@ -562,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
{
- u32 val;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
ktime_t timeout;
+ u32 val, clk_en;
+
+ clk_en = ESDHC_CLOCK_SDCLKEN;
+
+ /*
+ * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
+ * is 2.2 or lower.
+ */
+ if (esdhc->vendor_ver <= VENDOR_V_22)
+ clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
+ ESDHC_CLOCK_PEREN);
val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
+ val |= clk_en;
else
- val &= ~ESDHC_CLOCK_SDCLKEN;
+ val &= ~clk_en;
sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
- /* Wait max 20 ms */
+ /*
+ * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+ * wait clock stable bit which does not exist.
+ */
timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (1) {
+ while (esdhc->vendor_ver > VENDOR_V_22) {
bool timedout = ktime_after(ktime_get(), timeout);
- if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
break;
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
break;
}
- udelay(10);
+ usleep_range(10, 20);
}
}
@@ -621,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- int pre_div = 1;
- int div = 1;
- int division;
+ unsigned int pre_div = 1, div = 1;
+ unsigned int clock_fixup = 0;
ktime_t timeout;
- long fixup = 0;
u32 temp;
- host->mmc->actual_clock = 0;
-
if (clock == 0) {
+ host->mmc->actual_clock = 0;
esdhc_clock_enable(host, false);
return;
}
- /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+ /* Start pre_div at 2 for vendor version < 2.3. */
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
+ /* Fix clock value. */
if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
- esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
- fixup = esdhc->clk_fixup->sd_dflt_max_clk;
+ esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
+ clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
else if (esdhc->clk_fixup)
- fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
-
- if (fixup && clock > fixup)
- clock = fixup;
+ clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
- ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ if (clock_fixup == 0 || clock < clock_fixup)
+ clock_fixup = clock;
- while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ /* Calculate pre_div and div. */
+ while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
pre_div *= 2;
- while (host->max_clk / pre_div / div > clock && div < 16)
+ while (host->max_clk / pre_div / div > clock_fixup && div < 16)
div++;
+ esdhc->div_ratio = pre_div * div;
+
+ /* Limit clock division for HS400 200MHz clock for quirk. */
if (esdhc->quirk_limited_clk_division &&
clock == MMC_HS200_MAX_DTR &&
(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
host->flags & SDHCI_HS400_TUNING)) {
- division = pre_div * div;
- if (division <= 4) {
+ if (esdhc->div_ratio <= 4) {
pre_div = 4;
div = 1;
- } else if (division <= 8) {
+ } else if (esdhc->div_ratio <= 8) {
pre_div = 4;
div = 2;
- } else if (division <= 12) {
+ } else if (esdhc->div_ratio <= 12) {
pre_div = 4;
div = 3;
} else {
pr_warn("%s: using unsupported clock division.\n",
mmc_hostname(host->mmc));
}
+ esdhc->div_ratio = pre_div * div;
}
+ host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
+
dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
- host->mmc->actual_clock = host->max_clk / pre_div / div;
- esdhc->div_ratio = pre_div * div;
+ clock, host->mmc->actual_clock);
+
+ /* Set clock division into register. */
pre_div >>= 1;
div--;
+ esdhc_clock_enable(host, false);
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | (div << ESDHC_DIVIDER_SHIFT)
- | (pre_div << ESDHC_PREDIV_SHIFT));
+ temp &= ~ESDHC_CLOCK_MASK;
+ temp |= ((div << ESDHC_DIVIDER_SHIFT) |
+ (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ /*
+ * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+ * wait clock stable bit which does not exist.
+ */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (esdhc->vendor_ver > VENDOR_V_22) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ /* Additional setting for HS400. */
if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
clock == MMC_HS200_MAX_DTR) {
temp = sdhci_readl(host, ESDHC_TBCTL);
@@ -711,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
esdhc_clock_enable(host, false);
esdhc_flush_async_fifo(host);
}
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- while (1) {
- bool timedout = ktime_after(ktime_get(), timeout);
-
- if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
- break;
- if (timedout) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- return;
- }
- udelay(10);
- }
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= ESDHC_CLOCK_SDCLKEN;
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ esdhc_clock_enable(host, true);
}
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
@@ -758,23 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u32 val;
+ u32 val, bus_width = 0;
+ /*
+ * Add delay to make sure all the DMA transfers are finished
+ * for quirk.
+ */
if (esdhc->quirk_delay_before_data_reset &&
(mask & SDHCI_RESET_DATA) &&
(host->flags & SDHCI_REQ_USE_DMA))
mdelay(5);
+ /*
+ * Save bus-width for eSDHC whose vendor version is 2.2
+ * or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
+ }
+
sdhci_reset(host, mask);
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ /*
+ * Restore bus-width setting and interrupt registers for eSDHC
+ * whose vendor version is 2.2 or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
+ val |= bus_width;
+ sdhci_writel(host, val, ESDHC_PROCTL);
- if (mask & SDHCI_RESET_ALL) {
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+ /*
+ * Some bits have to be cleaned manually for eSDHC whose spec
+ * version is higher than 3.0 for all reset.
+ */
+ if ((mask & SDHCI_RESET_ALL) &&
+ (esdhc->spec_ver >= SDHCI_SPEC_300)) {
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_EN;
sdhci_writel(host, val, ESDHC_TBCTL);
+ /*
+ * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
+ * 0 for quirk.
+ */
if (esdhc->quirk_unreliable_pulse_detection) {
val = sdhci_readl(host, ESDHC_DLLCFG1);
val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
@@ -854,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
}
static struct soc_device_attribute soc_tuning_erratum_type1[] = {
- { .family = "QorIQ T1023", .revision = "1.0", },
- { .family = "QorIQ T1040", .revision = "1.0", },
- { .family = "QorIQ T2080", .revision = "1.0", },
- { .family = "QorIQ LS1021A", .revision = "1.0", },
+ { .family = "QorIQ T1023", },
+ { .family = "QorIQ T1040", },
+ { .family = "QorIQ T2080", },
+ { .family = "QorIQ LS1021A", },
{ },
};
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
- { .family = "QorIQ LS1012A", .revision = "1.0", },
- { .family = "QorIQ LS1043A", .revision = "1.*", },
- { .family = "QorIQ LS1046A", .revision = "1.0", },
- { .family = "QorIQ LS1080A", .revision = "1.0", },
- { .family = "QorIQ LS2080A", .revision = "1.0", },
- { .family = "QorIQ LA1575A", .revision = "1.0", },
+ { .family = "QorIQ LS1012A", },
+ { .family = "QorIQ LS1043A", },
+ { .family = "QorIQ LS1046A", },
+ { .family = "QorIQ LS1080A", },
+ { .family = "QorIQ LS2080A", },
+ { .family = "QorIQ LA1575A", },
{ },
};
@@ -888,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
esdhc_clock_enable(host, true);
}
-static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
u8 *window_end)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u8 tbstat_15_8, tbstat_7_0;
u32 val;
- if (esdhc->quirk_tuning_erratum_type1) {
- *window_start = 5 * esdhc->div_ratio;
- *window_end = 3 * esdhc->div_ratio;
- return;
- }
-
/* Write TBCTL[11:8]=4'h8 */
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~(0xf << 8);
@@ -920,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
val = sdhci_readl(host, ESDHC_TBSTAT);
val = sdhci_readl(host, ESDHC_TBSTAT);
+ *window_end = val & 0xff;
+ *window_start = (val >> 8) & 0xff;
+}
+
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+ u8 *window_end)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 start_ptr, end_ptr;
+
+ if (esdhc->quirk_tuning_erratum_type1) {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ return;
+ }
+
+ esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
+
/* Reset data lines by setting ESDHCCTL[RSTD] */
sdhci_reset(host, SDHCI_RESET_DATA);
/* Write 32'hFFFF_FFFF to IRQSTAT register */
sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
- /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
- * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
+ * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
* then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
* and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
*/
- tbstat_7_0 = val & 0xff;
- tbstat_15_8 = (val >> 8) & 0xff;
- if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+ if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
*window_start = 8 * esdhc->div_ratio;
*window_end = 4 * esdhc->div_ratio;
} else {
@@ -1006,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (ret)
break;
+ /* For type2 affected platforms of the tuning erratum,
+ * tuning may succeed although eSDHC might not have
+ * tuned properly. Need to check tuning window.
+ */
+ if (esdhc->quirk_tuning_erratum_type2 &&
+ !host->tuning_err) {
+ esdhc_tuning_window_ptr(host, &window_start,
+ &window_end);
+ if (abs(window_start - window_end) >
+ (4 * esdhc->div_ratio + 2))
+ host->tuning_err = -EAGAIN;
+ }
+
/* If HW tuning fails and triggers erratum,
* try workaround.
*/
@@ -1238,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
* 1/2 peripheral clock.
*/
if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
- of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
+ of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
+ of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
esdhc->peripheral_clock = clk_get_rate(clk) / 2;
else
esdhc->peripheral_clock = clk_get_rate(clk);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 083e7e053c95..882053151a47 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -7,6 +7,7 @@
*/
#include <linux/delay.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -85,6 +86,7 @@
/* sdhci-omap controller flags */
#define SDHCI_OMAP_REQUIRE_IODELAY BIT(0)
+#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
u32 offset;
@@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host)
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
- reg |= CON_DMA_MASTER;
+ reg &= ~CON_DMA_MASTER;
+ /* Switch to DMA slave mode when using external DMA */
+ if (!host->use_external_dma)
+ reg |= CON_DMA_MASTER;
+
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
return 0;
@@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
sdhci_omap_start_clock(omap_host);
}
+#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long limit = MMC_TIMEOUT_US;
+ unsigned long i = 0;
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
mask &= ~SDHCI_RESET_DATA;
+ if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) {
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+ while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) &&
+ (i++ < limit))
+ udelay(1);
+ i = 0;
+ while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) &&
+ (i++ < limit))
+ udelay(1);
+
+ if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)
+ dev_err(mmc_dev(host->mmc),
+ "Timeout waiting on controller reset in %s\n",
+ __func__);
+ return;
+ }
+
sdhci_reset(host, mask);
}
@@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
return intmask;
}
+static void sdhci_omap_set_timeout(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_ERASE)
+ sdhci_set_data_timeout_irq(host, false);
+
+ __sdhci_set_timeout(host, cmd);
+}
+
static struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
@@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = {
.reset = sdhci_omap_reset,
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
.irq = sdhci_omap_irq,
+ .set_timeout = sdhci_omap_set_timeout,
};
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
@@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = {
.offset = 0x200,
};
+static const struct sdhci_omap_data am335_data = {
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data am437_data = {
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
static const struct sdhci_omap_data dra7_data = {
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
@@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = {
static const struct of_device_id omap_sdhci_match[] = {
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
+ { .compatible = "ti,am335-sdhci", .data = &am335_data },
+ { .compatible = "ti,am437-sdhci", .data = &am437_data },
{},
};
MODULE_DEVICE_TABLE(of, omap_sdhci_match);
@@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct sdhci_omap_data *data;
const struct soc_device_attribute *soc;
+ struct resource *regs;
match = of_match_device(omap_sdhci_match, dev);
if (!match)
@@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
}
offset = data->offset;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
sizeof(*omap_host));
if (IS_ERR(host)) {
@@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
host->ioaddr += offset;
+ host->mapbase = regs->start + offset;
mmc = host->mmc;
sdhci_get_of_property(pdev);
@@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
+ /* Switch to external DMA only if there is the "dmas" property */
+ if (of_find_property(dev->of_node, "dmas", NULL))
+ sdhci_switch_external_dma(host, true);
+
ret = sdhci_setup_host(host);
if (ret)
goto err_put_sync;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 5091e2c1c0e5..525de2454a4d 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1991,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
- slot->cd_override_level, 0, NULL);
+ slot->cd_override_level, 0);
if (ret && ret != -EPROBE_DEFER)
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
slot->cd_override_level,
- 0, NULL);
+ 0);
if (ret == -EPROBE_DEFER)
goto remove;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 51e096f27388..64200c78e90d 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -117,7 +117,6 @@ struct sdhci_s3c {
struct s3c_sdhci_platdata *pdata;
int cur_clk;
int ext_cd_irq;
- int ext_cd_gpio;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
@@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host;
struct sdhci_s3c *sc;
- struct resource *res;
int ret, irq, ptr, clks;
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
@@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_pdata_io_clk;
} else {
memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
- sc->ext_cd_gpio = -1; /* invalid gpio number */
}
drv_data = sdhci_s3c_get_driver_data(pdev);
@@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_no_busclks;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err_req_regs;
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index e43143223320..f4b05dd6c20a 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
* We must request the IRQ after sdhci_add_host(), as the tasklet only
* gets setup in sdhci_add_host() and we oops.
*/
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto err_request_cd;
if (!ret)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 916b5b09c3d1..b4b63089a4e2 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = {
static int sdhci_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- struct resource *iomem;
struct spear_sdhci *sdhci;
struct device *dev;
int ret;
@@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev)
goto err;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
@@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev)
* It is optional to use GPIOs for sdhci card detection. If we
* find a descriptor using slot GPIO, we use it.
*/
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto disable_clk;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 7bc950520fd9..403ac44a7378 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 1b1c26da3fe0..63db84481dff 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -10,6 +10,7 @@
*/
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/ktime.h>
#include <linux/highmem.h>
#include <linux/io.h>
@@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
{
if (enable)
host->ier |= SDHCI_INT_DATA_TIMEOUT;
@@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
+EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- u8 count;
-
- if (host->ops->set_timeout) {
- host->ops->set_timeout(host, cmd);
- } else {
- bool too_big = false;
-
- count = sdhci_calc_timeout(host, cmd, &too_big);
-
- if (too_big &&
- host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
- sdhci_calc_sw_timeout(host, cmd);
- sdhci_set_data_timeout_irq(host, false);
- } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
- sdhci_set_data_timeout_irq(host, true);
- }
-
- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+ bool too_big = false;
+ u8 count = sdhci_calc_timeout(host, cmd, &too_big);
+
+ if (too_big &&
+ host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+ sdhci_calc_sw_timeout(host, cmd);
+ sdhci_set_data_timeout_irq(host, false);
+ } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+ sdhci_set_data_timeout_irq(host, true);
}
+
+ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
}
+EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- struct mmc_data *data = cmd->data;
-
- host->data_timeout = 0;
-
- if (sdhci_data_line_cmd(cmd))
- sdhci_set_timeout(host, cmd);
-
- if (!data)
- return;
+ if (host->ops->set_timeout)
+ host->ops->set_timeout(host, cmd);
+ else
+ __sdhci_set_timeout(host, cmd);
+}
+static void sdhci_initialize_data(struct sdhci_host *host,
+ struct mmc_data *data)
+{
WARN_ON(host->data);
/* Sanity checks */
@@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
host->data = data;
host->data_early = 0;
host->data->bytes_xfered = 0;
+}
+
+static inline void sdhci_set_block_info(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ /* Set the DMA boundary value and block size */
+ sdhci_writew(host,
+ SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
+ SDHCI_BLOCK_SIZE);
+ /*
+ * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
+ * can be supported, in that case 16-bit block count register must be 0.
+ */
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+ (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
+ if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
+ sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
+ sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+ } else {
+ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ }
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
struct scatterlist *sg;
@@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_set_transfer_irqs(host);
- /* Set the DMA boundary value and block size */
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_block_info(host, data);
+}
- /*
- * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
- * can be supported, in that case 16-bit block count register must be 0.
- */
- if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
- (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
- if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
- sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
- sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+
+static int sdhci_external_dma_init(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
+
+ host->tx_chan = dma_request_chan(mmc->parent, "tx");
+ if (IS_ERR(host->tx_chan)) {
+ ret = PTR_ERR(host->tx_chan);
+ if (ret != -EPROBE_DEFER)
+ pr_warn("Failed to request TX DMA channel.\n");
+ host->tx_chan = NULL;
+ return ret;
+ }
+
+ host->rx_chan = dma_request_chan(mmc->parent, "rx");
+ if (IS_ERR(host->rx_chan)) {
+ if (host->tx_chan) {
+ dma_release_channel(host->tx_chan);
+ host->tx_chan = NULL;
+ }
+
+ ret = PTR_ERR(host->rx_chan);
+ if (ret != -EPROBE_DEFER)
+ pr_warn("Failed to request RX DMA channel.\n");
+ host->rx_chan = NULL;
+ }
+
+ return ret;
+}
+
+static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
+static int sdhci_external_dma_setup(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ int ret, i;
+ enum dma_transfer_direction dir;
+ struct dma_async_tx_descriptor *desc;
+ struct mmc_data *data = cmd->data;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ dma_cookie_t cookie;
+ int sg_cnt;
+
+ if (!host->mapbase)
+ return -EINVAL;
+
+ cfg.src_addr = host->mapbase + SDHCI_BUFFER;
+ cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = data->blksz / 4;
+ cfg.dst_maxburst = data->blksz / 4;
+
+ /* Sanity check: all the SG entries must be aligned by block size. */
+ for (i = 0; i < data->sg_len; i++) {
+ if ((data->sg + i)->length % data->blksz)
+ return -EINVAL;
+ }
+
+ chan = sdhci_external_dma_channel(host, data);
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
+ return ret;
+
+ sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
+ if (sg_cnt <= 0)
+ return -EINVAL;
+
+ dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ desc->callback = NULL;
+ desc->callback_param = NULL;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie))
+ ret = cookie;
+
+ return ret;
+}
+
+static void sdhci_external_dma_release(struct sdhci_host *host)
+{
+ if (host->tx_chan) {
+ dma_release_channel(host->tx_chan);
+ host->tx_chan = NULL;
+ }
+
+ if (host->rx_chan) {
+ dma_release_channel(host->rx_chan);
+ host->rx_chan = NULL;
+ }
+
+ sdhci_switch_external_dma(host, false);
+}
+
+static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
+
+ host->flags |= SDHCI_REQ_USE_DMA;
+ sdhci_set_transfer_irqs(host);
+
+ sdhci_set_block_info(host, data);
+}
+
+static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (!sdhci_external_dma_setup(host, cmd)) {
+ __sdhci_external_dma_prepare_data(host, cmd);
} else {
- sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ sdhci_external_dma_release(host);
+ pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
+ mmc_hostname(host->mmc));
+ sdhci_prepare_data(host, cmd);
}
}
+static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ struct dma_chan *chan;
+
+ if (!cmd->data)
+ return;
+
+ chan = sdhci_external_dma_channel(host, cmd->data);
+ if (chan)
+ dma_async_issue_pending(chan);
+}
+
+#else
+
+static inline int sdhci_external_dma_init(struct sdhci_host *host)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sdhci_external_dma_release(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ /* This should never happen */
+ WARN_ON_ONCE(1);
+}
+
+static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+}
+
+static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ return NULL;
+}
+
+#endif
+
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
+{
+ host->use_external_dma = en;
+}
+EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
+
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
struct mmc_request *mrq)
{
@@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}
-static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
{
int i;
- if (host->cmd && host->cmd->mrq == mrq)
- host->cmd = NULL;
-
- if (host->data_cmd && host->data_cmd->mrq == mrq)
- host->data_cmd = NULL;
-
- if (host->data && host->data->mrq == mrq)
- host->data = NULL;
-
- if (sdhci_needs_reset(host, mrq))
- host->pending_reset = true;
-
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
if (host->mrqs_done[i] == mrq) {
WARN_ON(1);
@@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
}
WARN_ON(i >= SDHCI_MAX_MRQS);
+}
+
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ if (host->cmd && host->cmd->mrq == mrq)
+ host->cmd = NULL;
+
+ if (host->data_cmd && host->data_cmd->mrq == mrq)
+ host->data_cmd = NULL;
+
+ if (host->data && host->data->mrq == mrq)
+ host->data = NULL;
+
+ if (sdhci_needs_reset(host, mrq))
+ host->pending_reset = true;
+
+ sdhci_set_mrq_done(host, mrq);
sdhci_del_timer(host, mrq);
@@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host)
/*
* Need to send CMD12 if -
- * a) open-ended multiblock transfer (no CMD23)
+ * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
* b) error in multiblock transfer
*/
if (data->stop &&
- (data->error ||
- !data->mrq->sbc)) {
+ ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
+ data->error)) {
/*
* 'cap_cmd_during_tfr' request must not use the command line
* after mmc_command_done() has been called. It is upper layer's
@@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
}
host->cmd = cmd;
+ host->data_timeout = 0;
if (sdhci_data_line_cmd(cmd)) {
WARN_ON(host->data_cmd);
host->data_cmd = cmd;
+ sdhci_set_timeout(host, cmd);
}
- sdhci_prepare_data(host, cmd);
+ if (cmd->data) {
+ if (host->use_external_dma)
+ sdhci_external_dma_prepare_data(host, cmd);
+ else
+ sdhci_prepare_data(host, cmd);
+ }
sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
@@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
timeout += 10 * HZ;
sdhci_mod_timer(host, cmd->mrq, timeout);
+ if (host->use_external_dma)
+ sdhci_external_dma_pre_transfer(host, cmd);
+
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_led_activate(host);
- /*
- * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
- * requests if Auto-CMD12 is enabled.
- */
- if (sdhci_auto_cmd12(host, mrq)) {
- if (mrq->stop) {
- mrq->data->stop = NULL;
- mrq->stop = NULL;
- }
- }
-
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
mrq->cmd->error = -ENOMEDIUM;
sdhci_finish_mrq(host, mrq);
@@ -2661,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
if (host->flags & SDHCI_REQ_USE_DMA) {
struct mmc_data *data = mrq->data;
+ if (host->use_external_dma && data &&
+ (mrq->cmd->error || data->error)) {
+ struct dma_chan *chan = sdhci_external_dma_channel(host, data);
+
+ host->mrqs_done[i] = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+ dmaengine_terminate_sync(chan);
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_set_mrq_done(host, mrq);
+ }
+
if (data && data->host_cookie == COOKIE_MAPPED) {
if (host->bounce_buffer) {
/*
@@ -3796,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host)
if (sdhci_can_64bit_dma(host))
host->flags |= SDHCI_USE_64_BIT_DMA;
+ if (host->use_external_dma) {
+ ret = sdhci_external_dma_init(host);
+ if (ret == -EPROBE_DEFER)
+ goto unreg;
+ /*
+ * Fall back to use the DMA/PIO integrated in standard SDHCI
+ * instead of external DMA devices.
+ */
+ else if (ret)
+ sdhci_switch_external_dma(host, false);
+ /* Disable internal DMA sources */
+ else
+ host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+ }
+
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->set_dma_mask)
ret = host->ops->set_dma_mask(host);
@@ -3822,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host)
dma_addr_t dma;
void *buf;
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
- host->adma_table_sz = host->adma_table_cnt *
- SDHCI_ADMA2_64_DESC_SZ(host);
- host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
- } else {
- host->adma_table_sz = host->adma_table_cnt *
- SDHCI_ADMA2_32_DESC_SZ;
- host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
- }
+ if (!(host->flags & SDHCI_USE_64_BIT_DMA))
+ host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+ else if (!host->alloc_desc_sz)
+ host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
+
+ host->desc_sz = host->alloc_desc_sz;
+ host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
/*
@@ -3913,11 +4132,13 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
- if (host->clk_mul) {
- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+ if (host->clk_mul)
max_clk = host->max_clk * host->clk_mul;
- } else
- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+ /*
+ * Divided Clock Mode minimum clock rate is always less than
+ * Programmable Clock Mode minimum clock rate.
+ */
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
@@ -4276,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host)
dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
host->adma_table_sz, host->align_buffer,
host->align_addr);
+
+ if (host->use_external_dma)
+ sdhci_external_dma_release(host);
+
host->adma_table = NULL;
host->align_buffer = NULL;
}
@@ -4321,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host)
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ host->use_external_dma ? "External DMA" :
(host->flags & SDHCI_USE_ADMA) ?
(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
@@ -4409,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
host->adma_table_sz, host->align_buffer,
host->align_addr);
+ if (host->use_external_dma)
+ sdhci_external_dma_release(host);
+
host->adma_table = NULL;
host->align_buffer = NULL;
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index fe83ece6965b..a6a3ddcf97e7 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -487,6 +487,7 @@ struct sdhci_host {
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
+ phys_addr_t mapbase; /* physical address base */
char *bounce_buffer; /* For packing SDMA reads/writes */
dma_addr_t bounce_addr;
unsigned int bounce_buffer_size;
@@ -535,6 +536,7 @@ struct sdhci_host {
bool pending_reset; /* Cmd/data reset is pending */
bool irq_wake_enabled; /* IRQ wakeup is enabled */
bool v4_mode; /* Host Version 4 Enable */
+ bool use_external_dma; /* Host selects to use external DMA */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -556,7 +558,8 @@ struct sdhci_host {
dma_addr_t adma_addr; /* Mapped ADMA descr. table */
dma_addr_t align_addr; /* Mapped bounce buffer */
- unsigned int desc_sz; /* ADMA descriptor size */
+ unsigned int desc_sz; /* ADMA current descriptor size */
+ unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */
struct workqueue_struct *complete_wq; /* Request completion wq */
struct work_struct complete_work; /* Request completion work */
@@ -564,6 +567,11 @@ struct sdhci_host {
struct timer_list timer; /* Timer for timeouts */
struct timer_list data_timer; /* Timer for data timeouts */
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+#endif
+
u32 caps; /* CAPABILITY_0 */
u32 caps1; /* CAPABILITY_1 */
bool read_caps; /* Capability flags have been read */
@@ -795,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host);
void sdhci_reset_tuning(struct sdhci_host *host);
void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en);
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index b8e897e31e2e..3afea580fbea 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
writeb(val, host->ioaddr + reg);
}
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err = sdhci_execute_tuning(mmc, opcode);
+
+ if (err)
+ return err;
+ /*
+ * Tuning data remains in the buffer after tuning.
+ * Do a command and data reset to get rid of it
+ */
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ return 0;
+}
+
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_am654_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
.ops = &sdhci_am654_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
-static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
-{
- int cmd_error = 0;
- int data_error = 0;
-
- if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
- return intmask;
-
- cqhci_irq(host->mmc, intmask, cmd_error, data_error);
-
- return 0;
-}
-
static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
.ops = &sdhci_j721e_8bit_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
.ops = &sdhci_j721e_4bit_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -491,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
struct sdhci_am654_data *sdhci_am654;
const struct of_device_id *match;
struct sdhci_host *host;
- struct resource *res;
struct clk *clk_xin;
struct device *dev = &pdev->dev;
void __iomem *base;
@@ -524,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto pm_runtime_disable;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto pm_runtime_put;
@@ -549,6 +561,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto pm_runtime_put;
}
+ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
ret = sdhci_am654_init(host);
if (ret)
goto pm_runtime_put;
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index fa0dfc657c22..4625cc071b61 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq, ctrl = 0, ret = 0;
struct f_sdhost_priv *priv;
u32 reg = 0;
@@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 98c575de43c7..7e1fd557109c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
host->chan_rx = sh_mmcif_request_dma_pdata(host,
pdata->slave_id_rx);
} else {
- host->chan_tx = dma_request_slave_channel(dev, "tx");
- host->chan_rx = dma_request_slave_channel(dev, "rx");
+ host->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->chan_tx))
+ host->chan_tx = NULL;
+ host->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->chan_rx))
+ host->chan_rx = NULL;
}
dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
host->chan_rx);
@@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev)
struct sh_mmcif_host *host;
struct device *dev = &pdev->dev;
struct sh_mmcif_plat_data *pd = dev->platform_data;
- struct resource *res;
void __iomem *reg;
const char *name;
@@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
if (irq[0] < 0)
return -ENXIO;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d577a6b0ceae..f87d7967457f 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
if (ret)
return ret;
- host->reg_base = devm_ioremap_resource(&pdev->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->reg_base))
return PTR_ERR(host->reg_base);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index c4a1d49fbea4..1e424bcdbd5f 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
- struct resource *res;
void __iomem *ctl;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctl = devm_ioremap_resource(&pdev->dev, res);
+ ctl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctl))
return ERR_CAST(ctl);
@@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
* Look for a card detect GPIO, if it fails with anything
* else than a probe deferral, just live without it.
*/
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index 0c72ec5546c3..a1683c49cb90 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -59,7 +59,6 @@
struct uniphier_sd_priv {
struct tmio_mmc_data tmio_data;
struct pinctrl *pinctrl;
- struct pinctrl_state *pinstate_default;
struct pinctrl_state *pinstate_uhs;
struct clk *clk;
struct reset_control *rst;
@@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
- struct pinctrl_state *pinstate;
+ struct pinctrl_state *pinstate = NULL;
u32 val, tmp;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
val = UNIPHIER_SD_VOLT_330;
- pinstate = priv->pinstate_default;
break;
case MMC_SIGNAL_VOLTAGE_180:
val = UNIPHIER_SD_VOLT_180;
@@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
- pinctrl_select_state(priv->pinctrl, pinstate);
+ if (pinstate)
+ pinctrl_select_state(priv->pinctrl, pinstate);
+ else
+ pinctrl_select_default_state(mmc_dev(mmc));
return 0;
}
@@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
if (IS_ERR(priv->pinctrl))
return PTR_ERR(priv->pinctrl);
- priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(priv->pinstate_default))
- return PTR_ERR(priv->pinstate_default);
-
priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
if (IS_ERR(priv->pinstate_uhs))
return PTR_ERR(priv->pinstate_uhs);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b11ac2314328..9a0b1e4e405d 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -199,7 +199,6 @@ struct usdhi6_host {
/* Pin control */
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
};
@@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
};
int ret;
- host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+ host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
host->chan_tx);
- if (!host->chan_tx)
+ if (IS_ERR(host->chan_tx)) {
+ host->chan_tx = NULL;
return;
+ }
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = start + USDHI6_SD_BUF0;
@@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
if (ret < 0)
goto e_release_tx;
- host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+ host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
host->chan_rx);
- if (!host->chan_rx)
+ if (IS_ERR(host->chan_rx)) {
+ host->chan_rx = NULL;
goto e_release_tx;
+ }
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr;
@@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
host->pins_uhs);
default:
- return pinctrl_select_state(host->pinctrl,
- host->pins_default);
+ return pinctrl_select_default_state(mmc_dev(host->mmc));
}
}
@@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev)
}
host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
- if (!IS_ERR(host->pins_uhs)) {
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
-
- if (IS_ERR(host->pins_default)) {
- dev_err(dev,
- "UHS pinctrl requires a default pin state.\n");
- ret = PTR_ERR(host->pins_default);
- goto e_free_mmc;
- }
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(dev, res);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index f4ac064ff471..e48bddd95ce6 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1106,7 +1106,7 @@ static int via_sd_probe(struct pci_dev *pcidev,
len = pci_resource_len(pcidev, 0);
base = pci_resource_start(pcidev, 0);
- sdhost->mmiobase = ioremap_nocache(base, len);
+ sdhost->mmiobase = ioremap(base, len);
if (!sdhost->mmiobase) {
ret = -ENOMEM;
goto free_mmc_host;
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index eccf2e5d905e..3af50db8b21b 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -320,7 +320,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
* ChipCommon revision.
*/
if (b47s->bcma_cc->core->id.rev == 54)
- b47s->window = ioremap_nocache(res->start, resource_size(res));
+ b47s->window = ioremap(res->start, resource_size(res));
else
b47s->window = ioremap_cache(res->start, resource_size(res));
if (!b47s->window) {
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 410a321682e6..36aa082f6db0 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -44,7 +44,7 @@ struct block2mtd_dev {
static LIST_HEAD(blkmtd_device_list);
-static struct page *page_read(struct address_space *mapping, int index)
+static struct page *page_read(struct address_space *mapping, pgoff_t index)
{
return read_mapping_page(mapping, index, NULL);
}
@@ -54,7 +54,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
struct page *page;
- int index = to >> PAGE_SHIFT; // page index
+ pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
u_long *p;
u_long *max;
@@ -103,7 +103,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
{
struct block2mtd_dev *dev = mtd->priv;
struct page *page;
- int index = from >> PAGE_SHIFT;
+ pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
int cpylen;
@@ -137,7 +137,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
{
struct page *page;
struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
- int index = to >> PAGE_SHIFT; // page index
+ pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 462fadb56bdb..42a95ba40f2c 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -163,7 +163,7 @@ static int amd76xrom_init_one(struct pci_dev *pdev,
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index c9b7b4d5a923..460494212f6a 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -191,7 +191,7 @@ static int __init ck804xrom_init_one(struct pci_dev *pdev,
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 5c27c6994896..85e14150a073 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -249,7 +249,7 @@ static int __init esb2rom_init_one(struct pci_dev *pdev,
}
/* Map the firmware hub into my address space. */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6b989f391baa..fda72c5fd8f9 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -184,7 +184,7 @@ static int __init ichxrom_init_one(struct pci_dev *pdev,
}
/* Map the firmware hub into my address space. */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 69503aef981e..d67b845b0e89 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -133,7 +133,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p)
if (win_len < (CS0_START + CS0_SIZE))
return -ENXIO;
- p->csr_base = ioremap_nocache(csr_phys, csr_len);
+ p->csr_base = ioremap(csr_phys, csr_len);
if (!p->csr_base)
return -ENOMEM;
@@ -152,7 +152,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p)
p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p->map.phys = win_phys + CS0_START;
p->map.size = CS0_SIZE;
- p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
+ p->map.virt = ioremap(p->map.phys, p->map.size);
if (!p->map.virt) {
err = -ENOMEM;
goto release;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 0eeadfeb620d..832b880d1aaf 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -78,7 +78,7 @@ static int __init init_l440gx(void)
return -ENODEV;
}
- l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+ l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
if (!l440gx_map.virt) {
printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index abc52b70bb00..0bb651624f05 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -82,10 +82,10 @@ static int __init init_netsc520(void)
printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n",
(unsigned long long)netsc520_map.size,
(unsigned long long)netsc520_map.phys);
- netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size);
+ netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size);
if (!netsc520_map.virt) {
- printk("Failed to ioremap_nocache\n");
+ printk("Failed to ioremap\n");
return -EIO;
}
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 50046d497398..7d349874ffeb 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -176,7 +176,7 @@ static int __init nettel_init(void)
#endif
int rc = 0;
- nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
+ nettel_mmcrp = (void *) ioremap(0xfffef000, 4096);
if (nettel_mmcrp == NULL) {
printk("SNAPGEAR: failed to disable MMCR cache??\n");
return(-EIO);
@@ -217,7 +217,7 @@ static int __init nettel_init(void)
__asm__ ("wbinvd");
nettel_amd_map.phys = amdaddr;
- nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
+ nettel_amd_map.virt = ioremap(amdaddr, maxsize);
if (!nettel_amd_map.virt) {
printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
iounmap(nettel_mmcrp);
@@ -303,7 +303,7 @@ static int __init nettel_init(void)
/* Probe for the size of the first Intel flash */
nettel_intel_map.size = maxsize;
nettel_intel_map.phys = intel0addr;
- nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ nettel_intel_map.virt = ioremap(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
rc = -EIO;
@@ -337,7 +337,7 @@ static int __init nettel_init(void)
iounmap(nettel_intel_map.virt);
nettel_intel_map.size = maxsize;
- nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ nettel_intel_map.virt = ioremap(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
rc = -EIO;
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 9a49f8a06fb8..377ef0fc4e3e 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -94,7 +94,7 @@ intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
map->map.write = mtd_pci_write8,
map->map.size = 0x00800000;
- map->base = ioremap_nocache(pci_resource_start(dev, 0),
+ map->base = ioremap(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
if (!map->base)
@@ -188,7 +188,7 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
map->map.read = mtd_pci_read32,
map->map.write = mtd_pci_write32,
map->map.size = len;
- map->base = ioremap_nocache(base, len);
+ map->base = ioremap(base, len);
if (!map->base)
return -ENOMEM;
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 70bb403f69f7..2ac79e1cedd9 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -294,16 +294,15 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
}
-static DEFINE_SPINLOCK(pcmcia_vpp_lock);
+static DEFINE_MUTEX(pcmcia_vpp_lock);
static int pcmcia_vpp_refcnt;
static void pcmciamtd_set_vpp(struct map_info *map, int on)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
struct pcmcia_device *link = dev->p_dev;
- unsigned long flags;
pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
- spin_lock_irqsave(&pcmcia_vpp_lock, flags);
+ mutex_lock(&pcmcia_vpp_lock);
if (on) {
if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
pcmcia_fixup_vpp(link, dev->vpp);
@@ -311,7 +310,7 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
pcmcia_fixup_vpp(link, 0);
}
- spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
+ mutex_unlock(&pcmcia_vpp_lock);
}
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index a9f7964e2edb..8f7f966fa9a7 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -38,6 +38,7 @@
#include <linux/mtd/cfi_endian.h>
#include <linux/io.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/gpio/consumer.h>
#include "physmap-gemini.h"
@@ -64,16 +65,16 @@ static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
- int i, err;
+ int i, err = 0;
info = platform_get_drvdata(dev);
if (!info)
- return 0;
+ goto out;
if (info->cmtd) {
err = mtd_device_unregister(info->cmtd);
if (err)
- return err;
+ goto out;
if (info->cmtd != info->mtds[0])
mtd_concat_destroy(info->cmtd);
@@ -88,7 +89,10 @@ static int physmap_flash_remove(struct platform_device *dev)
if (physmap_data && physmap_data->exit)
physmap_data->exit(dev);
- return 0;
+out:
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+ return err;
}
static void physmap_set_vpp(struct map_info *map, int state)
@@ -484,13 +488,19 @@ static int physmap_flash_probe(struct platform_device *dev)
return -EINVAL;
}
+ pm_runtime_enable(&dev->dev);
+ pm_runtime_get_sync(&dev->dev);
+
if (dev->dev.of_node)
err = physmap_flash_of_init(dev);
else
err = physmap_flash_pdata_init(dev);
- if (err)
+ if (err) {
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
return err;
+ }
for (i = 0; i < info->nmaps; i++) {
struct resource *res;
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 03af2df90d47..9902b37e18b4 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -174,8 +174,8 @@ static void sc520cdp_setup_par(void)
int i, j;
/* map in SC520's MMCR area */
- mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
- if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */
+ mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
+ if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */
/* force physical address fields to BIOS defaults: */
for(i = 0; i < NUM_FLASH_BANKS; i++)
sc520cdp_map[i].phys = par_table[i].default_address;
@@ -225,10 +225,10 @@ static int __init init_sc520cdp(void)
(unsigned long long)sc520cdp_map[i].size,
(unsigned long long)sc520cdp_map[i].phys);
- sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size);
+ sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size);
if (!sc520cdp_map[i].virt) {
- printk("Failed to ioremap_nocache\n");
+ printk("Failed to ioremap\n");
for (j = 0; j < i; j++) {
if (mymtd[j]) {
map_destroy(mymtd[j]);
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 2afb253bf456..57303f904bc1 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -152,7 +152,7 @@ static int scb2_flash_probe(struct pci_dev *dev,
}
/* remap the IO window (w/o caching) */
- scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
+ scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW);
if (!scb2_ioaddr) {
printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
if (!region_fail)
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 6cfc8783c0e5..70d6e865f555 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -56,10 +56,10 @@ static int __init init_ts5500_map(void)
{
int rc = 0;
- ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size);
+ ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size);
if (!ts5500_map.virt) {
- printk(KERN_ERR "Failed to ioremap_nocache\n");
+ printk(KERN_ERR "Failed to ioremap\n");
rc = -EIO;
goto err2;
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 170a7221b35f..1d6c9e7e7b7d 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -841,10 +841,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
return &concat->mtd;
}
-/*
- * This function destroys an MTD object obtained from concat_mtd_devs()
- */
-
+/* Cleans the context obtained from mtd_concat_create() */
void mtd_concat_destroy(struct mtd_info *mtd)
{
struct mtd_concat *concat = CONCAT(mtd);
diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig
index ae0b8fe5b990..572b8fe69abb 100644
--- a/drivers/mtd/nand/onenand/Kconfig
+++ b/drivers/mtd/nand/onenand/Kconfig
@@ -25,7 +25,7 @@ config MTD_ONENAND_GENERIC
config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM)
depends on OF || COMPILE_TEST
help
Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
@@ -33,12 +33,12 @@ config MTD_ONENAND_OMAP2
Enable dmaengine and gpiolib for better performance.
config MTD_ONENAND_SAMSUNG
- tristate "OneNAND on Samsung SOC controller support"
- depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4
- help
- Support for a OneNAND flash device connected to an Samsung SOC.
- S3C64XX uses command mapping method.
- S5PC110/S5PC210 use generic OneNAND method.
+ tristate "OneNAND on Samsung SOC controller support"
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4 || COMPILE_TEST
+ help
+ Support for a OneNAND flash device connected to an Samsung SOC.
+ S3C64XX uses command mapping method.
+ S5PC110/S5PC210 use generic OneNAND method.
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile
index a27b635eb23a..a0761c7e0288 100644
--- a/drivers/mtd/nand/onenand/Makefile
+++ b/drivers/mtd/nand/onenand/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
-obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
-obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung_mtd.o
+obj-$(CONFIG_MTD_ONENAND_OMAP2) += onenand_omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += onenand_samsung.o
onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 85640ee11c86..d5326d19b136 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
- /* Read-while-load method */
+ /* Read-while-load method */
- /* Do first load to bufferRAM */
- if (read < len) {
- if (!onenand_check_bufferram(mtd, from)) {
+ /* Do first load to bufferRAM */
+ if (read < len) {
+ if (!onenand_check_bufferram(mtd, from)) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
- ret = this->wait(mtd, FL_READING);
- onenand_update_bufferram(mtd, from, !ret);
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
- }
- }
+ }
+ }
thislen = min_t(int, writesize, len - read);
column = from & (writesize - 1);
if (column + thislen > writesize)
thislen = writesize - column;
- while (!ret) {
- /* If there is more to load then start next load */
- from += thislen;
- if (read + thislen < len) {
+ while (!ret) {
+ /* If there is more to load then start next load */
+ from += thislen;
+ if (read + thislen < len) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
- /*
- * Chip boundary handling in DDP
- * Now we issued chip 1 read and pointed chip 1
+ /*
+ * Chip boundary handling in DDP
+ * Now we issued chip 1 read and pointed chip 1
* bufferram so we have to point chip 0 bufferram.
- */
- if (ONENAND_IS_DDP(this) &&
- unlikely(from == (this->chipsize >> 1))) {
- this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
- boundary = 1;
- } else
- boundary = 0;
- ONENAND_SET_PREV_BUFFERRAM(this);
- }
- /* While load is going, read from last bufferRAM */
- this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ */
+ if (ONENAND_IS_DDP(this) &&
+ unlikely(from == (this->chipsize >> 1))) {
+ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
+ boundary = 1;
+ } else
+ boundary = 0;
+ ONENAND_SET_PREV_BUFFERRAM(this);
+ }
+ /* While load is going, read from last bufferRAM */
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
/* Read oob area if needed */
if (oobbuf) {
@@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
oobcolumn = 0;
}
- /* See if we are done */
- read += thislen;
- if (read == len)
- break;
- /* Set up for next read from bufferRAM */
- if (unlikely(boundary))
- this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
- ONENAND_SET_NEXT_BUFFERRAM(this);
- buf += thislen;
+ /* See if we are done */
+ read += thislen;
+ if (read == len)
+ break;
+ /* Set up for next read from bufferRAM */
+ if (unlikely(boundary))
+ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ buf += thislen;
thislen = min_t(int, writesize, len - read);
- column = 0;
- cond_resched();
- /* Now wait for load */
- ret = this->wait(mtd, FL_READING);
- onenand_update_bufferram(mtd, from, !ret);
+ column = 0;
+ cond_resched();
+ /* Now wait for load */
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
- }
+ }
/*
* Return success, if no ECC failures, else -EBADMSG
diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
index aa9368bf7a0c..aa9368bf7a0c 100644
--- a/drivers/mtd/nand/onenand/omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
diff --git a/drivers/mtd/nand/onenand/samsung_mtd.c b/drivers/mtd/nand/onenand/onenand_samsung.c
index beb7987e4c2b..87b28e397d67 100644
--- a/drivers/mtd/nand/onenand/samsung_mtd.c
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -248,7 +248,7 @@ static unsigned short s3c_onenand_readw(void __iomem *addr)
}
/* BootRAM access control */
- if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
+ if ((unsigned long)addr < ONENAND_DATARAM && onenand->bootram_command) {
if (word_addr == 0)
return s3c_read_reg(MANUFACT_ID_OFFSET);
if (word_addr == 1)
@@ -289,7 +289,7 @@ static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
}
/* BootRAM access control */
- if ((unsigned int)addr < ONENAND_DATARAM) {
+ if ((unsigned long)addr < ONENAND_DATARAM) {
if (value == ONENAND_CMD_READID) {
onenand->bootram_command = 1;
return;
@@ -658,7 +658,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
}
if (dma_mapping_error(dev, dma_dst)) {
- dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
+ dev_err(dev, "Couldn't map a %zu byte buffer for DMA\n", count);
goto normal;
}
err = s5pc110_dma_ops(dma_dst, dma_src,
@@ -728,13 +728,12 @@ static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
struct onenand_chip *this = mtd->priv;
struct device *dev = &onenand->pdev->dev;
unsigned int block, end;
- int tmp;
end = this->chipsize >> this->erase_shift;
for (block = 0; block < end; block++) {
unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
- tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+ s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
dev_err(dev, "block %d is write-protected!\n", block);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 74fb91adeb46..a80a46bb5b8b 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -452,7 +452,7 @@ config MTD_NAND_PLATFORM
config MTD_NAND_CADENCE
tristate "Support Cadence NAND (HPNFC) controller"
- depends on OF || COMPILE_TEST
+ depends on (OF || COMPILE_TEST) && HAS_IOMEM
help
Enable the driver for NAND flash on platforms using a Cadence NAND
controller.
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 8d6be90a6fe8..3ba17a98df4d 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1578,9 +1578,8 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
nand->numcs = numcs;
- gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
- &np->fwnode, GPIOD_IN,
- "nand-det");
+ gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
+ "det", GPIOD_IN, "nand-det");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get detect gpio (err = %ld)\n",
@@ -1624,9 +1623,10 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
nand->cs[i].rb.id = val;
} else {
- gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
- "rb", i, &np->fwnode,
- GPIOD_IN, "nand-rb");
+ gpio = devm_fwnode_gpiod_get_index(nc->dev,
+ of_fwnode_handle(np),
+ "rb", i, GPIOD_IN,
+ "nand-rb");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get R/B gpio (err = %ld)\n",
@@ -1640,10 +1640,10 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
}
}
- gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
- i, &np->fwnode,
- GPIOD_OUT_HIGH,
- "nand-cs");
+ gpio = devm_fwnode_gpiod_get_index(nc->dev,
+ of_fwnode_handle(np),
+ "cs", i, GPIOD_OUT_HIGH,
+ "nand-cs");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get CS gpio (err = %ld)\n",
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index e10b76089048..75eb3e97fae3 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -404,7 +404,7 @@ static int au1550nd_probe(struct platform_device *pdev)
goto out1;
}
- ctx->base = ioremap_nocache(r->start, 0x1000);
+ ctx->base = ioremap(r->start, 0x1000);
if (!ctx->base) {
dev_err(&pdev->dev, "cannot remap NAND memory area\n");
ret = -ENODEV;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 1a66b1cd51c0..44518dada75b 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2635,6 +2635,16 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
/* initialize the dma version */
brcmnand_flash_dma_revision_init(ctrl);
+ ret = -EIO;
+ if (ctrl->nand_version >= 0x0700)
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(40));
+ if (ret)
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (ret)
+ goto err;
+
/* linked-list and stop on error */
flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 3102ddbd8abd..fafd0a0aa8e2 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -21,7 +21,6 @@
#include "denali.h"
#define DENALI_NAND_NAME "denali-nand"
-#define DENALI_DEFAULT_OOB_SKIP_BYTES 8
/* for Indexed Addressing */
#define DENALI_INDEXED_CTRL 0x00
@@ -1302,15 +1301,16 @@ int denali_init(struct denali_controller *denali)
/*
* Set how many bytes should be skipped before writing data in OOB.
- * If a non-zero value has already been set (by firmware or something),
- * just use it. Otherwise, set the driver's default.
+ * If a platform requests a non-zero value, set it to the register.
+ * Otherwise, read the value out, expecting it has already been set up
+ * by firmware.
*/
- denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- if (!denali->oob_skip_bytes) {
- denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
+ if (denali->oob_skip_bytes)
iowrite32(denali->oob_skip_bytes,
denali->reg + SPARE_AREA_SKIP_BYTES);
- }
+ else
+ denali->oob_skip_bytes = ioread32(denali->reg +
+ SPARE_AREA_SKIP_BYTES);
iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 8b779a899dcf..f08740ae282b 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include "denali.h"
@@ -22,11 +24,14 @@ struct denali_dt {
struct clk *clk; /* core clock */
struct clk *clk_x; /* bus interface clock */
struct clk *clk_ecc; /* ECC circuit clock */
+ struct reset_control *rst; /* core reset */
+ struct reset_control *rst_reg; /* register reset */
};
struct denali_dt_data {
unsigned int revision;
unsigned int caps;
+ unsigned int oob_skip_bytes;
const struct nand_ecc_caps *ecc_caps;
};
@@ -34,6 +39,7 @@ NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes,
512, 8, 15);
static const struct denali_dt_data denali_socfpga_data = {
.caps = DENALI_CAP_HW_ECC_FIXUP,
+ .oob_skip_bytes = 2,
.ecc_caps = &denali_socfpga_ecc_caps,
};
@@ -42,6 +48,7 @@ NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes,
static const struct denali_dt_data denali_uniphier_v5a_data = {
.caps = DENALI_CAP_HW_ECC_FIXUP |
DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
.ecc_caps = &denali_uniphier_v5a_ecc_caps,
};
@@ -51,6 +58,7 @@ static const struct denali_dt_data denali_uniphier_v5b_data = {
.revision = 0x0501,
.caps = DENALI_CAP_HW_ECC_FIXUP |
DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
.ecc_caps = &denali_uniphier_v5b_ecc_caps,
};
@@ -118,11 +126,13 @@ static int denali_dt_probe(struct platform_device *pdev)
denali = &dt->controller;
data = of_device_get_match_data(dev);
- if (data) {
- denali->revision = data->revision;
- denali->caps = data->caps;
- denali->ecc_caps = data->ecc_caps;
- }
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ denali->revision = data->revision;
+ denali->caps = data->caps;
+ denali->oob_skip_bytes = data->oob_skip_bytes;
+ denali->ecc_caps = data->ecc_caps;
denali->dev = dev;
denali->irq = platform_get_irq(pdev, 0);
@@ -151,6 +161,14 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(dt->clk_ecc))
return PTR_ERR(dt->clk_ecc);
+ dt->rst = devm_reset_control_get_optional_shared(dev, "nand");
+ if (IS_ERR(dt->rst))
+ return PTR_ERR(dt->rst);
+
+ dt->rst_reg = devm_reset_control_get_optional_shared(dev, "reg");
+ if (IS_ERR(dt->rst_reg))
+ return PTR_ERR(dt->rst_reg);
+
ret = clk_prepare_enable(dt->clk);
if (ret)
return ret;
@@ -166,10 +184,30 @@ static int denali_dt_probe(struct platform_device *pdev)
denali->clk_rate = clk_get_rate(dt->clk);
denali->clk_x_rate = clk_get_rate(dt->clk_x);
- ret = denali_init(denali);
+ /*
+ * Deassert the register reset, and the core reset in this order.
+ * Deasserting the core reset while the register reset is asserted
+ * will cause unpredictable behavior in the controller.
+ */
+ ret = reset_control_deassert(dt->rst_reg);
if (ret)
goto out_disable_clk_ecc;
+ ret = reset_control_deassert(dt->rst);
+ if (ret)
+ goto out_assert_rst_reg;
+
+ /*
+ * When the reset is deasserted, the initialization sequence is kicked
+ * (bootstrap process). The driver must wait until it finished.
+ * Otherwise, it will result in unpredictable behavior.
+ */
+ usleep_range(200, 1000);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_assert_rst;
+
for_each_child_of_node(dev->of_node, np) {
ret = denali_dt_chip_init(denali, np);
if (ret) {
@@ -184,6 +222,10 @@ static int denali_dt_probe(struct platform_device *pdev)
out_remove_denali:
denali_remove(denali);
+out_assert_rst:
+ reset_control_assert(dt->rst);
+out_assert_rst_reg:
+ reset_control_assert(dt->rst_reg);
out_disable_clk_ecc:
clk_disable_unprepare(dt->clk_ecc);
out_disable_clk_x:
@@ -199,6 +241,8 @@ static int denali_dt_remove(struct platform_device *pdev)
struct denali_dt *dt = platform_get_drvdata(pdev);
denali_remove(&dt->controller);
+ reset_control_assert(dt->rst);
+ reset_control_assert(dt->rst_reg);
clk_disable_unprepare(dt->clk_ecc);
clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index d62aa5271753..2f77ee55e1bf 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -74,15 +74,15 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return ret;
}
- denali->reg = ioremap_nocache(csr_base, csr_len);
+ denali->reg = ioremap(csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
}
- denali->host = ioremap_nocache(mem_base, mem_len);
+ denali->host = ioremap(mem_base, mem_len);
if (!denali->host) {
- dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
+ dev_err(&dev->dev, "Spectra: ioremap failed!");
ret = -ENOMEM;
goto out_unmap_reg;
}
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 1054cc070747..f31fae3a4c68 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -285,7 +285,7 @@ static int fun_probe(struct platform_device *ofdev)
fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
FSL_UPM_WAIT_WRITE_BYTE;
- fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
+ fun->io_base = devm_ioremap(&ofdev->dev, io_res.start,
resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 334fe3130285..b9d5d55a5edb 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this)
struct resources *r = &this->resources;
int ret;
+ ret = pm_runtime_get_sync(this->dev);
+ if (ret < 0)
+ return ret;
+
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
goto err_out;
@@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this)
*/
writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
- return 0;
err_out:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
return ret;
}
@@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev)
return ret;
}
+ /* Set flag to get timing setup restored for next exec_op */
+ if (this->hw.clk_rate)
+ this->hw.must_apply_timings = true;
+
/* re-init the BCH registers */
ret = bch_set_geometry(this);
if (ret) {
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 8b90def6686f..a2fcb739e5f8 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -438,7 +438,7 @@ static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
buffer += blksize;
offset += blksize;
size -= blksize;
- };
+ }
}
/* Copy data from/to NFC main and spare buffers */
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 58511aeb0c9a..3ff7ce00cbdb 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -59,7 +59,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip)
*/
static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
{
- unsigned int i;
+ int i;
static const char * const broken_get_timings[] = {
"MX30LF1G18AC",
"MX30LF1G28AC",
@@ -80,12 +80,9 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
if (!chip->parameters.supports_set_get_features)
return;
- for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
- if (!strcmp(broken_get_timings[i], chip->parameters.model))
- break;
- }
-
- if (i == ARRAY_SIZE(broken_get_timings))
+ i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings),
+ chip->parameters.model);
+ if (i < 0)
return;
bitmap_clear(chip->parameters.get_feature_list,
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 1cb3760ff779..0db5ee4e82af 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -124,6 +124,16 @@ static const struct spinand_info toshiba_spinand_table[] = {
0,
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
tc58cxgxsx_ecc_get_status)),
+ /* 3.3V 4Gb */
+ SPINAND_INFO("TC58CVG2S0", 0xED,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
/* 1.8V 1Gb */
SPINAND_INFO("TC58CYG0S3", 0xB2,
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
index e5ea6127ab5a..671a61845bd5 100644
--- a/drivers/mtd/parsers/sharpslpart.c
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob)
static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
{
- unsigned int block_num, log_num, phymax;
+ unsigned int block_num, phymax;
+ int i, ret, log_num;
loff_t block_adr;
u8 *oob;
- int i, ret;
oob = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!oob)
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index f237fcdf7f86..c1eda67d1ad2 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -46,11 +46,11 @@ config SPI_CADENCE_QUADSPI
Flash as an MTD device.
config SPI_HISI_SFC
- tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
+ tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
depends on ARCH_HISI || COMPILE_TEST
depends on HAS_IOMEM
help
- This enables support for hisilicon SPI-NOR flash controller.
+ This enables support for HiSilicon FMC SPI-NOR flash controller.
config SPI_MTK_QUADSPI
tristate "MediaTek Quad SPI controller"
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
index 2b7cabbb680c..395127349aa8 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -305,7 +305,7 @@ static void aspeed_smc_stop_user(struct spi_nor *nor)
writel(ctl, chip->ctl); /* default to fread or read mode */
}
-static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int aspeed_smc_prep(struct spi_nor *nor)
{
struct aspeed_smc_chip *chip = nor->priv;
@@ -313,7 +313,7 @@ static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
return 0;
}
-static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void aspeed_smc_unprep(struct spi_nor *nor)
{
struct aspeed_smc_chip *chip = nor->priv;
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 06f997247d0f..494dcab4aaaa 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -1062,7 +1062,7 @@ static int cqspi_erase(struct spi_nor *nor, loff_t offs)
return 0;
}
-static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int cqspi_prep(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -1072,7 +1072,7 @@ static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
return 0;
}
-static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void cqspi_unprep(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index a1258216f89d..6c7a4118752e 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * HiSilicon SPI Nor Flash Controller Driver
+ * HiSilicon FMC SPI-NOR flash controller driver
*
* Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
*/
@@ -144,7 +144,7 @@ static void hisi_spi_nor_init(struct hifmc_host *host)
writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
}
-static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int hisi_spi_nor_prep(struct spi_nor *nor)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -167,7 +167,7 @@ out:
return ret;
}
-static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void hisi_spi_nor_unprep(struct spi_nor *nor)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 3d8987baea2a..81329f680bec 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -70,10 +70,12 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index aeb3ad2dbfb8..4fc632ec18fe 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -85,7 +85,7 @@ struct sfdp_header {
#define BFPT_DWORD(i) ((i) - 1)
#define BFPT_DWORD_MAX 16
-/* The first version of JESB216 defined only 9 DWORDs. */
+/* The first version of JESD216 defined only 9 DWORDs. */
#define BFPT_DWORD_MAX_JESD216 9
/* 1st DWORD. */
@@ -196,7 +196,7 @@ struct flash_info {
u16 page_size;
u16 addr_width;
- u16 flags;
+ u32 flags;
#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
#define SST_WRITE BIT(2) /* use SST byte programming */
@@ -233,6 +233,11 @@ struct flash_info {
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
+#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
+ * Top/Bottom (TB) is bit 6 of
+ * status register. Must be used with
+ * SPI_NOR_HAS_TB.
+ */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -1307,14 +1312,14 @@ static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
}
}
-static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int spi_nor_lock_and_prep(struct spi_nor *nor)
{
int ret = 0;
mutex_lock(&nor->lock);
if (nor->controller_ops && nor->controller_ops->prepare) {
- ret = nor->controller_ops->prepare(nor, ops);
+ ret = nor->controller_ops->prepare(nor);
if (ret) {
mutex_unlock(&nor->lock);
return ret;
@@ -1323,10 +1328,10 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
return ret;
}
-static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void spi_nor_unlock_and_unprep(struct spi_nor *nor)
{
if (nor->controller_ops && nor->controller_ops->unprepare)
- nor->controller_ops->unprepare(nor, ops);
+ nor->controller_ops->unprepare(nor);
mutex_unlock(&nor->lock);
}
@@ -1688,7 +1693,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
addr = instr->addr;
len = instr->len;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
@@ -1751,7 +1756,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
ret = spi_nor_write_disable(nor);
erase_err:
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -1761,9 +1766,13 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
{
struct mtd_info *mtd = &nor->mtd;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 tb_mask = SR_TB_BIT5;
int shift = ffs(mask) - 1;
int pow;
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ tb_mask = SR_TB_BIT6;
+
if (!(sr & mask)) {
/* No protection */
*ofs = 0;
@@ -1771,7 +1780,7 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
} else {
pow = ((sr & mask) ^ mask) >> shift;
*len = mtd->size >> pow;
- if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
*ofs = 0;
else
*ofs = mtd->size - *len;
@@ -1850,6 +1859,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
struct mtd_info *mtd = &nor->mtd;
int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 tb_mask = SR_TB_BIT5;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
@@ -1886,6 +1896,9 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
else
lock_len = ofs + len;
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ tb_mask = SR_TB_BIT6;
+
/*
* Need smallest pow such that:
*
@@ -1903,13 +1916,13 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if (!(val & mask))
return -EINVAL;
- status_new = (status_old & ~mask & ~SR_TB) | val;
+ status_new = (status_old & ~mask & ~tb_mask) | val;
/* Disallow further writes if WP pin is asserted */
status_new |= SR_SRWD;
if (!use_top)
- status_new |= SR_TB;
+ status_new |= tb_mask;
/* Don't bother if they're the same */
if (status_new == status_old)
@@ -1932,6 +1945,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
struct mtd_info *mtd = &nor->mtd;
int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 tb_mask = SR_TB_BIT5;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
@@ -1968,6 +1982,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
else
lock_len = ofs;
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ tb_mask = SR_TB_BIT6;
/*
* Need largest pow such that:
*
@@ -1987,14 +2003,14 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return -EINVAL;
}
- status_new = (status_old & ~mask & ~SR_TB) | val;
+ status_new = (status_old & ~mask & ~tb_mask) | val;
/* Don't protect status register if we're fully unlocked */
if (lock_len == 0)
status_new &= ~SR_SRWD;
if (!use_top)
- status_new |= SR_TB;
+ status_new |= tb_mask;
/* Don't bother if they're the same */
if (status_new == status_old)
@@ -2036,13 +2052,13 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
ret = nor->params.locking_ops->lock(nor, ofs, len);
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2051,13 +2067,13 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
ret = nor->params.locking_ops->unlock(nor, ofs, len);
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2066,13 +2082,13 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
ret = nor->params.locking_ops->is_locked(nor, ofs, len);
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2124,6 +2140,8 @@ static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
return 0;
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
@@ -2307,6 +2325,9 @@ static const struct flash_info spi_nor_ids[] = {
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+ { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
@@ -2372,6 +2393,11 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
+ "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
"gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -2379,7 +2405,8 @@ static const struct flash_info spi_nor_ids[] = {
{
"gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_TB_SR_BIT6)
.fixups = &gd25q256_fixups,
},
@@ -2434,6 +2461,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
@@ -2454,20 +2483,35 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
+ USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
NO_CHIP_ERASE) },
- { "mt25qu512a (n25q512a)", INFO(0x20bb20, 0, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
{ "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
/* Micron */
@@ -2538,6 +2582,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32, SECT_4K |
+ SPI_NOR_DUAL_READ) },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
/* ST Microelectronics -- newer production may have feature updates */
@@ -2608,6 +2654,11 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{
@@ -2628,7 +2679,9 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
@@ -2699,7 +2752,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
@@ -2726,7 +2779,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
ret = 0;
read_err:
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2739,7 +2792,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
@@ -2812,7 +2865,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
}
out:
*retlen += actual;
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2830,7 +2883,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
@@ -2876,7 +2929,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
}
write_err:
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -4769,9 +4822,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
{
- struct mtd_info *mtd = &nor->mtd;
-
- if (mtd->size <= SZ_16M)
+ if (nor->params.size <= SZ_16M)
return;
nor->flags |= SNOR_F_4B_OPCODES;
@@ -5143,8 +5194,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & USE_FSR)
nor->flags |= SNOR_F_USE_FSR;
- if (info->flags & SPI_NOR_HAS_TB)
+ if (info->flags & SPI_NOR_HAS_TB) {
nor->flags |= SNOR_F_HAS_SR_TB;
+ if (info->flags & SPI_NOR_TB_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+ }
+
if (info->flags & NO_CHIP_ERASE)
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
if (info->flags & USE_CLSR)
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 10b2459f8951..ea7440ac913b 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1640,7 +1640,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
out_wl:
ubi_wl_close(ubi);
out_vtbl:
- ubi_free_internal_volumes(ubi);
+ ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
out_ai:
destroy_ai(ai);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index d636bbe214cb..25fb72b2efa0 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -503,21 +503,42 @@ static void uif_close(struct ubi_device *ubi)
}
/**
- * ubi_free_internal_volumes - free internal volumes.
+ * ubi_free_volumes_from - free volumes from specific index.
* @ubi: UBI device description object
+ * @from: the start index used for volume free.
*/
-void ubi_free_internal_volumes(struct ubi_device *ubi)
+static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
{
int i;
- for (i = ubi->vtbl_slots;
- i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ if (!ubi->volumes[i])
+ continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
kfree(ubi->volumes[i]);
+ ubi->volumes[i] = NULL;
}
}
+/**
+ * ubi_free_all_volumes - free all volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_all_volumes(struct ubi_device *ubi)
+{
+ ubi_free_volumes_from(ubi, 0);
+}
+
+/**
+ * ubi_free_internal_volumes - free internal volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_internal_volumes(struct ubi_device *ubi)
+{
+ ubi_free_volumes_from(ubi, ubi->vtbl_slots);
+}
+
static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
{
int limit, device_pebs;
@@ -1013,7 +1034,7 @@ out_uif:
out_detach:
ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
- ubi_free_internal_volumes(ubi);
+ ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
out_free:
vfree(ubi->peb_buf);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 1c7be4eb3ba6..53f448e7433a 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -64,7 +64,7 @@ static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
- if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
+ if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
@@ -1137,7 +1137,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
- unsigned long *seen_pebs = NULL;
+ unsigned long *seen_pebs;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -1151,7 +1151,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvbuf) {
ret = -ENOMEM;
- goto out_kfree;
+ goto out_free_avbuf;
}
avhdr = ubi_get_vid_hdr(avbuf);
@@ -1160,7 +1160,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
seen_pebs = init_seen(ubi);
if (IS_ERR(seen_pebs)) {
ret = PTR_ERR(seen_pebs);
- goto out_kfree;
+ goto out_free_dvbuf;
}
spin_lock(&ubi->volumes_lock);
@@ -1328,7 +1328,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
- goto out_kfree;
+ goto out_free_seen;
}
for (i = 0; i < new_fm->used_blocks; i++) {
@@ -1350,7 +1350,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
- goto out_kfree;
+ goto out_free_seen;
}
}
@@ -1360,7 +1360,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
- goto out_kfree;
+ goto out_free_seen;
}
}
@@ -1370,10 +1370,13 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = self_check_seen(ubi, seen_pebs);
dbg_bld("fastmap written!");
-out_kfree:
- ubi_free_vid_buf(avbuf);
- ubi_free_vid_buf(dvbuf);
+out_free_seen:
free_seen(seen_pebs);
+out_free_dvbuf:
+ ubi_free_vid_buf(dvbuf);
+out_free_avbuf:
+ ubi_free_vid_buf(avbuf);
+
out:
return ret;
}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 9688b411c930..73c67e5c08f8 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -950,6 +950,7 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_notify_all(struct ubi_device *ubi, int ntype,
struct notifier_block *nb);
int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_all_volumes(struct ubi_device *ubi);
void ubi_free_internal_volumes(struct ubi_device *ubi);
/* kapi.c */
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 53d8ab54e181..f700f0e4f2ec 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -782,7 +782,7 @@ static int check_attaching_info(const struct ubi_device *ubi,
*/
int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- int i, err;
+ int err;
struct ubi_ainf_volume *av;
empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -851,11 +851,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
out_free:
vfree(ubi->vtbl);
- for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
- kfree(ubi->volumes[i]);
- ubi->volumes[i] = NULL;
- }
+ ubi_free_all_volumes(ubi);
return err;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5d77a38dba54..837d690a8c60 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -319,7 +319,7 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct rb_root *root, int diff)
{
struct rb_node *p;
- struct ubi_wl_entry *e, *prev_e = NULL;
+ struct ubi_wl_entry *e;
int max;
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
@@ -334,7 +334,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
p = p->rb_left;
else {
p = p->rb_right;
- prev_e = e;
e = e1;
}
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d02f12a5254e..25a8f9387d5a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -71,6 +71,49 @@ config DUMMY
To compile this driver as a module, choose M here: the module
will be called dummy.
+config WIREGUARD
+ tristate "WireGuard secure network tunnel"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select DST_CACHE
+ select CRYPTO
+ select CRYPTO_LIB_CURVE25519
+ select CRYPTO_LIB_CHACHA20POLY1305
+ select CRYPTO_LIB_BLAKE2S
+ select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
+ select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
+ select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
+ select CRYPTO_CURVE25519_X86 if X86 && 64BIT
+ select ARM_CRYPTO if ARM
+ select ARM64_CRYPTO if ARM64
+ select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
+ select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_POLY1305_ARM if ARM
+ select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
+ select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
+ select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
+ help
+ WireGuard is a secure, fast, and easy to use replacement for IPSec
+ that uses modern cryptography and clever networking tricks. It's
+ designed to be fairly general purpose and abstract enough to fit most
+ use cases, while at the same time remaining extremely simple to
+ configure. See www.wireguard.com for more info.
+
+ It's safe to say Y or M here, as the driver is very lightweight and
+ is only in use when an administrator chooses to add an interface.
+
+config WIREGUARD_DEBUG
+ bool "Debugging checks and verbose messages"
+ depends on WIREGUARD
+ help
+ This will write log messages for handshake and other events
+ that occur for a WireGuard interface. It will also perform some
+ extra validation checks and unit tests at various points. This is
+ only useful for debugging.
+
+ Say N here unless you know what you're doing.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
---help---
@@ -489,12 +532,12 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
-config THUNDERBOLT_NET
- tristate "Networking over Thunderbolt cable"
- depends on THUNDERBOLT && INET
+config USB4_NET
+ tristate "Networking over USB4 and Thunderbolt cables"
+ depends on USB4 && INET
help
- Select this if you want to create network between two
- computers over a Thunderbolt cable. The driver supports Apple
+ Select this if you want to create network between two computers
+ over a USB4 and Thunderbolt cables. The driver supports Apple
ThunderboltIP protocol and allows communication with any host
supporting the same protocol including Windows and macOS.
@@ -506,6 +549,8 @@ source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
tristate "Simulated networking device"
depends on DEBUG_FS
+ depends on INET
+ depends on IPV6 || IPV6=n
select NET_DEVLINK
help
This driver is a developer testing tool and software model that can
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 0d3ba056cda3..71b88ffc5587 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_WIREGUARD) += wireguard/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
@@ -76,6 +77,6 @@ obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
thunderbolt-net-y += thunderbolt.o
-obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
+obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index b3c63d2f16aa..18428e104445 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -189,7 +189,7 @@ static int cops_nodeid (struct net_device *dev, int nodeid);
static irqreturn_t cops_interrupt (int irq, void *dev_id);
static void cops_poll(struct timer_list *t);
-static void cops_timeout(struct net_device *dev);
+static void cops_timeout(struct net_device *dev, unsigned int txqueue);
static void cops_rx (struct net_device *dev);
static netdev_tx_t cops_send_packet (struct sk_buff *skb,
struct net_device *dev);
@@ -844,7 +844,7 @@ static void cops_rx(struct net_device *dev)
netif_rx(skb);
}
-static void cops_timeout(struct net_device *dev)
+static void cops_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index b0f5bc07aef5..22a49c6d7ae6 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -356,7 +356,7 @@ int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev);
-void arcnet_timeout(struct net_device *dev);
+void arcnet_timeout(struct net_device *dev, unsigned int txqueue);
/* I/O equivalents */
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 553776cc1d29..e04efc0a5c97 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -763,7 +763,7 @@ static int go_tx(struct net_device *dev)
}
/* Called by the kernel when transmit times out */
-void arcnet_timeout(struct net_device *dev)
+void arcnet_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
struct arcnet_local *lp = netdev_priv(dev);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index e3b25f310936..31e43a2197a3 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -31,16 +31,6 @@
#define AD_CHURN_DETECTION_TIME 60
#define AD_AGGREGATE_WAIT_TIME 2
-/* Port state definitions (43.4.2.2 in the 802.3ad standard) */
-#define AD_STATE_LACP_ACTIVITY 0x1
-#define AD_STATE_LACP_TIMEOUT 0x2
-#define AD_STATE_AGGREGATION 0x4
-#define AD_STATE_SYNCHRONIZATION 0x8
-#define AD_STATE_COLLECTING 0x10
-#define AD_STATE_DISTRIBUTING 0x20
-#define AD_STATE_DEFAULTED 0x40
-#define AD_STATE_EXPIRED 0x80
-
/* Port Variables definitions used by the State Machines (43.4.7 in the
* 802.3ad standard)
*/
@@ -457,8 +447,8 @@ static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
(ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
(ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
- ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
- ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
+ ((lacpdu->partner_state & LACP_STATE_AGGREGATION) == (port->actor_oper_port_state & LACP_STATE_AGGREGATION))) ||
+ ((lacpdu->actor_state & LACP_STATE_AGGREGATION) == 0)
) {
port->sm_vars |= AD_PORT_MATCHED;
} else {
@@ -492,18 +482,18 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
partner->port_state = lacpdu->actor_state;
/* set actor_oper_port_state.defaulted to FALSE */
- port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
+ port->actor_oper_port_state &= ~LACP_STATE_DEFAULTED;
/* set the partner sync. to on if the partner is sync,
* and the port is matched
*/
if ((port->sm_vars & AD_PORT_MATCHED) &&
- (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
- partner->port_state |= AD_STATE_SYNCHRONIZATION;
+ (lacpdu->actor_state & LACP_STATE_SYNCHRONIZATION)) {
+ partner->port_state |= LACP_STATE_SYNCHRONIZATION;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"partner sync=1\n");
} else {
- partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
+ partner->port_state &= ~LACP_STATE_SYNCHRONIZATION;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"partner sync=0\n");
}
@@ -526,7 +516,7 @@ static void __record_default(struct port *port)
sizeof(struct port_params));
/* set actor_oper_port_state.defaulted to true */
- port->actor_oper_port_state |= AD_STATE_DEFAULTED;
+ port->actor_oper_port_state |= LACP_STATE_DEFAULTED;
}
}
@@ -556,7 +546,7 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
!MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
ntohs(lacpdu->actor_key) != partner->key ||
- (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
+ (lacpdu->actor_state & LACP_STATE_AGGREGATION) != (partner->port_state & LACP_STATE_AGGREGATION)) {
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -588,8 +578,8 @@ static void __update_default_selected(struct port *port)
!MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
admin->system_priority != oper->system_priority ||
admin->key != oper->key ||
- (admin->port_state & AD_STATE_AGGREGATION)
- != (oper->port_state & AD_STATE_AGGREGATION)) {
+ (admin->port_state & LACP_STATE_AGGREGATION)
+ != (oper->port_state & LACP_STATE_AGGREGATION)) {
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -619,10 +609,10 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
!MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
(ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
(ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
- ((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
- ((lacpdu->partner_state & AD_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)) ||
- ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
- ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
+ ((lacpdu->partner_state & LACP_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY)) ||
+ ((lacpdu->partner_state & LACP_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT)) ||
+ ((lacpdu->partner_state & LACP_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) ||
+ ((lacpdu->partner_state & LACP_STATE_AGGREGATION) != (port->actor_oper_port_state & LACP_STATE_AGGREGATION))
) {
port->ntt = true;
}
@@ -978,7 +968,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
* edable port will take place only after this timer)
*/
if ((port->sm_vars & AD_PORT_SELECTED) &&
- (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
if (port->aggregator->is_active)
port->sm_mux_state =
@@ -996,14 +986,14 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
port->sm_mux_state = AD_MUX_DETACHED;
} else if (port->aggregator->is_active) {
port->actor_oper_port_state |=
- AD_STATE_SYNCHRONIZATION;
+ LACP_STATE_SYNCHRONIZATION;
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
- !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) ||
- !(port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) {
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
port->sm_mux_state = AD_MUX_ATTACHED;
} else {
/* if port state hasn't changed make
@@ -1032,11 +1022,11 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
ad_disable_collecting_distributing(port,
update_slave_arr);
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
port->ntt = true;
break;
case AD_MUX_WAITING:
@@ -1045,20 +1035,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
case AD_MUX_ATTACHED:
if (port->aggregator->is_active)
port->actor_oper_port_state |=
- AD_STATE_SYNCHRONIZATION;
+ LACP_STATE_SYNCHRONIZATION;
else
port->actor_oper_port_state &=
- ~AD_STATE_SYNCHRONIZATION;
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ ~LACP_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
ad_disable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
- port->actor_oper_port_state |= AD_STATE_COLLECTING;
- port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
- port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
+ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
ad_enable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
@@ -1156,7 +1146,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_vars |= AD_PORT_LACP_ENABLED;
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* Fall Through */
@@ -1166,9 +1156,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
case AD_RX_LACP_DISABLED:
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
- port->partner_oper.port_state &= ~AD_STATE_AGGREGATION;
+ port->partner_oper.port_state &= ~LACP_STATE_AGGREGATION;
port->sm_vars |= AD_PORT_MATCHED;
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
case AD_RX_EXPIRED:
/* Reset of the Synchronization flag (Standard 43.4.12)
@@ -1177,19 +1167,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
- port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
- port->partner_oper.port_state |= AD_STATE_LACP_TIMEOUT;
- port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY;
+ port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
+ port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
- port->actor_oper_port_state |= AD_STATE_EXPIRED;
+ port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
break;
case AD_RX_DEFAULTED:
__update_default_selected(port);
__record_default(port);
port->sm_vars |= AD_PORT_MATCHED;
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
case AD_RX_CURRENT:
/* detect loopback situation */
@@ -1202,8 +1192,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
__update_selected(lacpdu, port);
__update_ntt(lacpdu, port);
__record_pdu(lacpdu, port);
- port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT));
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
default:
break;
@@ -1231,7 +1221,7 @@ static void ad_churn_machine(struct port *port)
if (port->sm_churn_actor_timer_counter &&
!(--port->sm_churn_actor_timer_counter) &&
port->sm_churn_actor_state == AD_CHURN_MONITOR) {
- if (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION) {
+ if (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION) {
port->sm_churn_actor_state = AD_NO_CHURN;
} else {
port->churn_actor_count++;
@@ -1241,7 +1231,7 @@ static void ad_churn_machine(struct port *port)
if (port->sm_churn_partner_timer_counter &&
!(--port->sm_churn_partner_timer_counter) &&
port->sm_churn_partner_state == AD_CHURN_MONITOR) {
- if (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) {
+ if (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) {
port->sm_churn_partner_state = AD_NO_CHURN;
} else {
port->churn_partner_count++;
@@ -1298,7 +1288,7 @@ static void ad_periodic_machine(struct port *port)
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & AD_STATE_LACP_ACTIVITY))
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))
) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
@@ -1315,11 +1305,11 @@ static void ad_periodic_machine(struct port *port)
switch (port->sm_periodic_state) {
case AD_FAST_PERIODIC:
if (!(port->partner_oper.port_state
- & AD_STATE_LACP_TIMEOUT))
+ & LACP_STATE_LACP_TIMEOUT))
port->sm_periodic_state = AD_SLOW_PERIODIC;
break;
case AD_SLOW_PERIODIC:
- if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
+ if ((port->partner_oper.port_state & LACP_STATE_LACP_TIMEOUT)) {
port->sm_periodic_timer_counter = 0;
port->sm_periodic_state = AD_PERIODIC_TX;
}
@@ -1335,7 +1325,7 @@ static void ad_periodic_machine(struct port *port)
break;
case AD_PERIODIC_TX:
if (!(port->partner_oper.port_state &
- AD_STATE_LACP_TIMEOUT))
+ LACP_STATE_LACP_TIMEOUT))
port->sm_periodic_state = AD_SLOW_PERIODIC;
else
port->sm_periodic_state = AD_FAST_PERIODIC;
@@ -1542,7 +1532,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
ad_agg_selection_logic(aggregator, update_slave_arr);
if (!port->aggregator->is_active)
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
}
/* Decide if "agg" is a better choice for the new active aggregator that
@@ -1848,13 +1838,13 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_state = AD_STATE_AGGREGATION |
- AD_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = AD_STATE_AGGREGATION |
- AD_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = LACP_STATE_AGGREGATION |
+ LACP_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state = LACP_STATE_AGGREGATION |
+ LACP_STATE_LACP_ACTIVITY;
if (lacp_fast)
- port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
@@ -2105,10 +2095,10 @@ void bond_3ad_unbind_slave(struct slave *slave)
aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
- port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state &= ~LACP_STATE_AGGREGATION;
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);
@@ -2695,9 +2685,9 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave)->port);
if (lacp_fast)
- port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
else
- port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state &= ~LACP_STATE_LACP_TIMEOUT;
}
spin_unlock_bh(&bond->mode_lock);
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index bd40b114d6cd..d737ceb61203 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -270,7 +270,9 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
- BUG_ON(dev == NULL);
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index c8e1a04ba384..9df2007b5e56 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1302,7 +1302,7 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_put;
}
- addr = ioremap_nocache(res->start, resource_size(res));
+ addr = ioremap(res->start, resource_size(res));
if (!addr) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index b9047d8110d5..194c86e0f340 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -175,7 +175,7 @@ static int cc770_isa_probe(struct platform_device *pdev)
err = -EBUSY;
goto exit;
}
- base = ioremap_nocache(mem[idx], iosize);
+ base = ioremap(mem[idx], iosize);
if (!base) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 1c4d32d1a542..d513fac50718 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -130,7 +130,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
err = -EBUSY;
goto exit;
}
- base = ioremap_nocache(mem[idx], iosize);
+ base = ioremap(mem[idx], iosize);
if (!base) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index ff5a96f34085..d7222ba46622 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -229,7 +229,7 @@ static int sp_probe(struct platform_device *pdev)
resource_size(res_mem), DRV_NAME))
return -EBUSY;
- addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+ addr = devm_ioremap(&pdev->dev, res_mem->start,
resource_size(res_mem));
if (!addr)
return -ENOMEM;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 2e57122f02fb..2f5c287eac95 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
- struct slcan *sl = tty->disc_data;
+ struct slcan *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
/* Send a can_frame to a TTY queue. */
@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* Flush network side */
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 8242fb287cbb..d1ddf763b188 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -777,7 +777,7 @@ static int softing_pdev_probe(struct platform_device *pdev)
goto platform_resource_failed;
card->dpram_phys = pres->start;
card->dpram_size = resource_size(pres);
- card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+ card->dpram = ioremap(card->dpram_phys, card->dpram_size);
if (!card->dpram) {
dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
goto ioremap_failed;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index c7667645f04a..2d38dbc9dd8c 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -54,6 +54,8 @@ source "drivers/net/dsa/mv88e6xxx/Kconfig"
source "drivers/net/dsa/ocelot/Kconfig"
+source "drivers/net/dsa/qca/Kconfig"
+
source "drivers/net/dsa/sja1105/Kconfig"
config NET_DSA_QCA8K
@@ -103,7 +105,6 @@ config NET_DSA_SMSC_LAN9303_MDIO
config NET_DSA_VITESSE_VSC73XX
tristate
- depends on OF
depends on NET_DSA
select FIXED_PHY
select VITESSE_PHY
@@ -114,7 +115,6 @@ config NET_DSA_VITESSE_VSC73XX
config NET_DSA_VITESSE_VSC73XX_SPI
tristate "Vitesse VSC7385/7388/7395/7398 SPI mode support"
- depends on OF
depends on NET_DSA
depends on SPI
select NET_DSA_VITESSE_VSC73XX
@@ -124,7 +124,6 @@ config NET_DSA_VITESSE_VSC73XX_SPI
config NET_DSA_VITESSE_VSC73XX_PLATFORM
tristate "Vitesse VSC7385/7388/7395/7398 Platform mode support"
- depends on OF
depends on NET_DSA
depends on HAS_IOMEM
select NET_DSA_VITESSE_VSC73XX
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 9d384a32b3a2..4a943ccc2ca4 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -21,4 +21,5 @@ obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
+obj-y += qca/
obj-y += sja1105/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index edacacfc9365..060497512159 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -371,8 +371,6 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
- mgmt &= ~SM_SW_FWD_MODE;
-
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
@@ -573,9 +571,8 @@ EXPORT_SYMBOL(b53_disable_port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
{
- bool tag_en = !(ds->ops->get_tag_protocol(ds, port) ==
- DSA_TAG_PROTO_NONE);
struct b53_device *dev = ds->priv;
+ bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
u8 hdr_ctl, val;
u16 reg;
@@ -595,6 +592,22 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
break;
}
+ /* Enable management mode if tagging is requested */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
+ if (tag_en)
+ hdr_ctl |= SM_SW_FWD_MODE;
+ else
+ hdr_ctl &= ~SM_SW_FWD_MODE;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
+
+ /* Configure the appropriate IMP port */
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
+ if (port == 8)
+ hdr_ctl |= GC_FRM_MGMT_PORT_MII;
+ else if (port == 5)
+ hdr_ctl |= GC_FRM_MGMT_PORT_M;
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
@@ -1866,36 +1879,57 @@ static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
return false;
}
-static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
+static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol tag_protocol)
{
bool ret = b53_possible_cpu_port(ds, port);
- if (!ret)
+ if (!ret) {
dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
port);
+ return ret;
+ }
+
+ switch (tag_protocol) {
+ case DSA_TAG_PROTO_BRCM:
+ case DSA_TAG_PROTO_BRCM_PREPEND:
+ dev_warn(ds->dev,
+ "Port %d is stacked to Broadcom tag switch\n", port);
+ ret = false;
+ break;
+ default:
+ ret = true;
+ break;
+ }
+
return ret;
}
-enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot)
{
struct b53_device *dev = ds->priv;
/* Older models (5325, 5365) support a different tag format that we do
- * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
- * mode to be turned on which means we need to specifically manage ARL
- * misses on multicast addresses (TBD).
+ * not support in net/dsa/tag_brcm.c yet.
*/
- if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
- !b53_can_enable_brcm_tags(ds, port))
- return DSA_TAG_PROTO_NONE;
+ if (is5325(dev) || is5365(dev) ||
+ !b53_can_enable_brcm_tags(ds, port, mprot)) {
+ dev->tag_protocol = DSA_TAG_PROTO_NONE;
+ goto out;
+ }
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
* which requires us to use the prepended Broadcom tag type
*/
- if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT)
- return DSA_TAG_PROTO_BRCM_PREPEND;
+ if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
+ goto out;
+ }
- return DSA_TAG_PROTO_BRCM;
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM;
+out:
+ return dev->tag_protocol;
}
EXPORT_SYMBOL(b53_get_tag_protocol);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 1877acf05081..3c30f3a7eb29 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -118,6 +118,7 @@ struct b53_device {
u8 jumbo_size_reg;
int reset_gpio;
u8 num_arl_entries;
+ enum dsa_tag_protocol tag_protocol;
/* used ports mask */
u16 enabled_ports;
@@ -359,7 +360,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
-enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e43040c9f9ee..3e8635311d0d 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -68,7 +68,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
/* Force link status for IMP port */
reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
+ reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index c8d7ef27fd72..fdcb70b9f0e4 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -61,7 +61,8 @@ struct dsa_loop_priv {
static struct phy_device *phydevs[PHY_MAX_ADDR];
static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
dev_dbg(ds->dev, "%s: port: %d\n", __func__, port);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index e3c333a8f45d..cc17a44dd3a8 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -883,7 +883,8 @@ static int lan9303_check_device(struct lan9303 *chip)
/* ---------------------------- DSA -----------------------------------*/
static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_LAN9303;
}
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 955324968b74..0369c22fe3e1 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -841,7 +841,8 @@ static int gswip_setup(struct dsa_switch *ds)
}
static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_GSWIP;
}
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 24a5e99f7fd5..47d65b77caf7 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -645,7 +645,8 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
}
static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_KSZ8795;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 50ffc63d6231..9a51b8a4de5d 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -295,7 +295,8 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
}
static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
struct ksz_device *dev = ds->priv;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ed1ec10ec62b..022466ca1c19 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1223,7 +1223,8 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
}
static enum dsa_tag_protocol
-mtk_get_tag_protocol(struct dsa_switch *ds, int port)
+mtk_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
struct mt7530_priv *priv = ds->priv;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a5a37f47b320..24b8219fd607 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -43,7 +43,8 @@ static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
}
static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol m)
{
return DSA_TAG_PROTO_TRAILER;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3bd988529178..8c9289549688 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -340,11 +340,14 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
*/
irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
+ snprintf(chip->irq_name, sizeof(chip->irq_name),
+ "mv88e6xxx-%s", dev_name(chip->dev));
+
mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
IRQF_ONESHOT | IRQF_SHARED,
- dev_name(chip->dev), chip);
+ chip->irq_name, chip);
mv88e6xxx_reg_lock(chip);
if (err)
mv88e6xxx_g1_irq_free_common(chip);
@@ -2303,10 +2306,14 @@ static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
if (!irq)
return 0;
+ snprintf(dev_id->serdes_irq_name, sizeof(dev_id->serdes_irq_name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(chip->dev), port);
+
/* Requesting the IRQ will trigger IRQ callbacks, so release the lock */
mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-serdes", dev_id);
+ IRQF_ONESHOT, dev_id->serdes_irq_name,
+ dev_id);
mv88e6xxx_reg_lock(chip);
if (err)
return err;
@@ -3838,6 +3845,9 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -3889,6 +3899,9 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
};
@@ -3939,6 +3952,9 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_validate = mv88e6390_phylink_validate,
@@ -4085,6 +4101,9 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4424,6 +4443,9 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -4476,6 +4498,9 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5207,7 +5232,8 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
}
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol m)
{
struct mv88e6xxx_chip *chip = ds->priv;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8a8e38bfb161..f332cb4b2fbf 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -236,6 +236,7 @@ struct mv88e6xxx_port {
bool mirror_ingress;
bool mirror_egress;
unsigned int serdes_irq;
+ char serdes_irq_name[32];
};
struct mv88e6xxx_chip {
@@ -292,11 +293,16 @@ struct mv88e6xxx_chip {
struct mv88e6xxx_irq g1_irq;
struct mv88e6xxx_irq g2_irq;
int irq;
+ char irq_name[32];
int device_irq;
+ char device_irq_name[32];
int watchdog_irq;
+ char watchdog_irq_name[32];
int atu_prob_irq;
+ char atu_prob_irq_name[32];
int vtu_prob_irq;
+ char vtu_prob_irq_name[32];
struct kthread_worker *kworker;
struct kthread_delayed_work irq_poll_work;
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index bdcd25560dd2..bac9a8a68e50 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -425,9 +425,12 @@ int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip)
if (chip->atu_prob_irq < 0)
return chip->atu_prob_irq;
+ snprintf(chip->atu_prob_irq_name, sizeof(chip->atu_prob_irq_name),
+ "mv88e6xxx-%s-g1-atu-prob", dev_name(chip->dev));
+
err = request_threaded_irq(chip->atu_prob_irq, NULL,
mv88e6xxx_g1_atu_prob_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g1-atu-prob",
+ IRQF_ONESHOT, chip->atu_prob_irq_name,
chip);
if (err)
irq_dispose_mapping(chip->atu_prob_irq);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 33056a609e96..48390b7b18ad 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -631,9 +631,12 @@ int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip)
if (chip->vtu_prob_irq < 0)
return chip->vtu_prob_irq;
+ snprintf(chip->vtu_prob_irq_name, sizeof(chip->vtu_prob_irq_name),
+ "mv88e6xxx-%s-g1-vtu-prob", dev_name(chip->dev));
+
err = request_threaded_irq(chip->vtu_prob_irq, NULL,
mv88e6xxx_g1_vtu_prob_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g1-vtu-prob",
+ IRQF_ONESHOT, chip->vtu_prob_irq_name,
chip);
if (err)
irq_dispose_mapping(chip->vtu_prob_irq);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 87bfe7c8c9cd..01503014b1ee 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -948,10 +948,13 @@ static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
if (chip->watchdog_irq < 0)
return chip->watchdog_irq;
+ snprintf(chip->watchdog_irq_name, sizeof(chip->watchdog_irq_name),
+ "mv88e6xxx-%s-watchdog", dev_name(chip->dev));
+
err = request_threaded_irq(chip->watchdog_irq, NULL,
mv88e6xxx_g2_watchdog_thread_fn,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- "mv88e6xxx-watchdog", chip);
+ chip->watchdog_irq_name, chip);
if (err)
return err;
@@ -1114,9 +1117,12 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
goto out;
}
+ snprintf(chip->device_irq_name, sizeof(chip->device_irq_name),
+ "mv88e6xxx-%s-g2", dev_name(chip->dev));
+
err = request_threaded_irq(chip->device_irq, NULL,
mv88e6xxx_g2_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g2", chip);
+ IRQF_ONESHOT, chip->device_irq_name, chip);
if (err)
goto out;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 902feb398746..8d8b3b74aee1 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -405,22 +405,116 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane,
return err;
}
+struct mv88e6390_serdes_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int reg;
+};
+
+static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
+ { "serdes_rx_pkts", 0xf021 },
+ { "serdes_rx_bytes", 0xf024 },
+ { "serdes_rx_pkts_error", 0xf027 },
+};
+
+int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
+{
+ if (mv88e6390_serdes_get_lane(chip, port) == 0)
+ return 0;
+
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data)
+{
+ struct mv88e6390_serdes_hw_stat *stat;
+ int i;
+
+ if (mv88e6390_serdes_get_lane(chip, port) == 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
+ stat = &mv88e6390_serdes_hw_stats[i];
+ memcpy(data + i * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ }
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane,
+ struct mv88e6390_serdes_hw_stat *stat)
+{
+ u16 reg[3];
+ int err, i;
+
+ for (i = 0; i < 3; i++) {
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ stat->reg + i, &reg[i]);
+ if (err) {
+ dev_err(chip->dev, "failed to read statistic\n");
+ return 0;
+ }
+ }
+
+ return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32);
+}
+
+int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ struct mv88e6390_serdes_hw_stat *stat;
+ int lane;
+ int i;
+
+ lane = mv88e6390_serdes_get_lane(chip, port);
+ if (lane == 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
+ stat = &mv88e6390_serdes_hw_stats[i];
+ data[i] = mv88e6390_serdes_get_stat(chip, lane, stat);
+ }
+
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PG_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg |= MV88E6390_PG_CONTROL_ENABLE_PC;
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PG_CONTROL, reg);
+}
+
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool up)
{
u8 cmode = chip->ports[port].cmode;
+ int err = 0;
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_power_sgmii(chip, lane, up);
+ err = mv88e6390_serdes_power_sgmii(chip, lane, up);
+ break;
case MV88E6XXX_PORT_STS_CMODE_XAUI:
case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- return mv88e6390_serdes_power_10g(chip, lane, up);
+ err = mv88e6390_serdes_power_10g(chip, lane, up);
+ break;
}
- return 0;
+ if (!err && up)
+ err = mv88e6390_serdes_enable_checker(chip, lane);
+
+ return err;
}
static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index bd8df36ab537..d16ef4da20b0 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -74,6 +74,10 @@
#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11)
#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10)
+/* Packet generator pad packet checker */
+#define MV88E6390_PG_CONTROL 0xf010
+#define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0)
+
u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
@@ -99,6 +103,11 @@ int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
/* Return the (first) SERDES lane address a port is using, 0 otherwise. */
static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 6f9804093150..a5b7cca03d09 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -3,8 +3,10 @@ config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI
depends on NET_VENDOR_MICROSEMI
+ depends on NET_VENDOR_FREESCALE
select MSCC_OCELOT_SWITCH
select NET_DSA_TAG_OCELOT
+ select FSL_ENETC_MDIO
help
This driver supports the VSC9959 network switch, which is a member of
the Vitesse / Microsemi / Microchip Ocelot family of switching cores.
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index b7f92464815d..3257962c147e 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -2,16 +2,22 @@
/* Copyright 2019 NXP Semiconductors
*/
#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot_qsys.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot.h>
#include <linux/packing.h>
#include <linux/module.h>
+#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <net/dsa.h>
#include "felix.h"
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_OCELOT;
}
@@ -26,14 +32,6 @@ static int felix_set_ageing_time(struct dsa_switch *ds,
return 0;
}
-static void felix_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct ocelot *ocelot = ds->priv;
-
- ocelot_adjust_link(ocelot, port, phydev);
-}
-
static int felix_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -155,6 +153,141 @@ static void felix_port_disable(struct dsa_switch *ds, int port)
return ocelot_port_disable(ocelot, port);
}
+static void felix_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != ocelot_port->phy_mode) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ /* No half-duplex. */
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ /* The internal ports that run at 2.5G are overclocked GMII */
+ if (state->interface == PHY_INTERFACE_MODE_GMII ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->pcs_link_state)
+ felix->info->pcs_link_state(ocelot, port, state);
+
+ return 0;
+}
+
+static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct felix *felix = ocelot_to_felix(ocelot);
+ u32 mac_fc_cfg;
+
+ /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
+ * PORT_RST bits in CLOCK_CFG
+ */
+ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(state->speed),
+ DEV_CLOCK_CFG);
+
+ /* Flow control. Link speed is only used here to evaluate the time
+ * specification in incoming pause frames.
+ */
+ mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(state->speed);
+
+ /* handle Rx pause in all cases, with 2500base-X this is used for rate
+ * adaptation.
+ */
+ mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
+
+ if (state->pause & MLO_PAUSE_TX)
+ mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
+ ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
+
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
+
+ if (felix->info->pcs_init)
+ felix->info->pcs_init(ocelot, port, link_an_mode, state);
+}
+
+static void felix_phylink_mac_an_restart(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->pcs_an_restart)
+ felix->info->pcs_an_restart(ocelot, port);
+}
+
+static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
+ ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+
+static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ /* Enable MAC module */
+ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
+ DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
+ ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+
+ /* Core: Enable port for frame transfer */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -185,10 +318,76 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port,
return ocelot_get_ts_info(ocelot, port, info);
}
+static int felix_parse_ports_node(struct felix *felix,
+ struct device_node *ports_node,
+ phy_interface_t *port_phy_modes)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct device *dev = felix->ocelot.dev;
+ struct device_node *child;
+
+ for_each_available_child_of_node(ports_node, child) {
+ phy_interface_t phy_mode;
+ u32 port;
+ int err;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &port) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ port);
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
+ if (err < 0) {
+ dev_err(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
+ return err;
+ }
+
+ port_phy_modes[port] = phy_mode;
+ }
+
+ return 0;
+}
+
+static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
+{
+ struct device *dev = felix->ocelot.dev;
+ struct device_node *switch_node;
+ struct device_node *ports_node;
+ int err;
+
+ switch_node = dev->of_node;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ return -ENODEV;
+ }
+
+ err = felix_parse_ports_node(felix, ports_node, port_phy_modes);
+ of_node_put(ports_node);
+
+ return err;
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
- resource_size_t base;
+ phy_interface_t *port_phy_modes;
+ resource_size_t switch_base;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -203,7 +402,19 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->ops = felix->info->ops;
- base = pci_resource_start(felix->pdev, felix->info->pci_bar);
+ port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
+ GFP_KERNEL);
+ if (!port_phy_modes)
+ return -ENOMEM;
+
+ err = felix_parse_dt(felix, port_phy_modes);
+ if (err) {
+ kfree(port_phy_modes);
+ return err;
+ }
+
+ switch_base = pci_resource_start(felix->pdev,
+ felix->info->switch_pci_bar);
for (i = 0; i < TARGET_MAX; i++) {
struct regmap *target;
@@ -214,13 +425,14 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
res = &felix->info->target_io_res[i];
res->flags = IORESOURCE_MEM;
- res->start += base;
- res->end += base;
+ res->start += switch_base;
+ res->end += switch_base;
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map device memory space\n");
+ kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -230,6 +442,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
err = ocelot_regfields_init(ocelot, felix->info->regfields);
if (err) {
dev_err(ocelot->dev, "failed to init reg fields map\n");
+ kfree(port_phy_modes);
return err;
}
@@ -244,26 +457,37 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
if (!ocelot_port) {
dev_err(ocelot->dev,
"failed to allocate port memory\n");
+ kfree(port_phy_modes);
return -ENOMEM;
}
res = &felix->info->port_io_res[port];
res->flags = IORESOURCE_MEM;
- res->start += base;
- res->end += base;
+ res->start += switch_base;
+ res->end += switch_base;
port_regs = devm_ioremap_resource(ocelot->dev, res);
if (IS_ERR(port_regs)) {
dev_err(ocelot->dev,
"failed to map registers for port %d\n", port);
+ kfree(port_phy_modes);
return PTR_ERR(port_regs);
}
+ ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->regs = port_regs;
ocelot->ports[port] = ocelot_port;
}
+ kfree(port_phy_modes);
+
+ if (felix->info->mdio_bus_alloc) {
+ err = felix->info->mdio_bus_alloc(ocelot);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
@@ -293,12 +517,22 @@ static int felix_setup(struct dsa_switch *ds)
OCELOT_TAG_PREFIX_LONG);
}
+ /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
+ * isn't instantiated for the Felix PF.
+ * In-band AN may take a few ms to complete, so we need to poll.
+ */
+ ds->pcs_poll = true;
+
return 0;
}
static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
/* stop workqueue thread */
ocelot_deinit(ocelot);
@@ -369,7 +603,12 @@ static const struct dsa_switch_ops felix_switch_ops = {
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
- .adjust_link = felix_adjust_link,
+ .phylink_validate = felix_phylink_validate,
+ .phylink_mac_link_state = felix_phylink_mac_pcs_get_state,
+ .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_an_restart = felix_phylink_mac_an_restart,
+ .phylink_mac_link_down = felix_phylink_mac_link_down,
+ .phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
.port_disable = felix_port_disable,
.port_fdb_dump = felix_fdb_dump,
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 204296e51d0c..3a7580015b62 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -10,6 +10,7 @@
struct felix_info {
struct resource *target_io_res;
struct resource *port_io_res;
+ struct resource *imdio_res;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
@@ -17,7 +18,18 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
- int pci_bar;
+ int switch_pci_bar;
+ int imdio_pci_bar;
+ int (*mdio_bus_alloc)(struct ocelot *ocelot);
+ void (*mdio_bus_free)(struct ocelot *ocelot);
+ void (*pcs_init)(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state);
+ void (*pcs_an_restart)(struct ocelot *ocelot, int port);
+ void (*pcs_link_state)(struct ocelot *ocelot, int port,
+ struct phylink_link_state *state);
+ int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
+ phy_interface_t phy_mode);
};
extern struct felix_info felix_info_vsc9959;
@@ -32,6 +44,8 @@ struct felix {
struct pci_dev *pdev;
struct felix_info *info;
struct ocelot ocelot;
+ struct mii_bus *imdio;
+ struct phy_device **pcs;
};
#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index b9758b0d18c7..2c812b481778 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2,12 +2,33 @@
/* Copyright 2017 Microsemi Corporation
* Copyright 2018-2019 NXP Semiconductors
*/
+#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "felix.h"
+/* TODO: should find a better place for these */
+#define USXGMII_BMCR_RESET BIT(15)
+#define USXGMII_BMCR_AN_EN BIT(12)
+#define USXGMII_BMCR_RST_AN BIT(9)
+#define USXGMII_BMSR_LNKS(status) (((status) & GENMASK(2, 2)) >> 2)
+#define USXGMII_BMSR_AN_CMPL(status) (((status) & GENMASK(5, 5)) >> 5)
+#define USXGMII_ADVERTISE_LNKS(x) (((x) << 15) & BIT(15))
+#define USXGMII_ADVERTISE_FDX BIT(12)
+#define USXGMII_ADVERTISE_SPEED(x) (((x) << 9) & GENMASK(11, 9))
+#define USXGMII_LPA_LNKS(lpa) ((lpa) >> 15)
+#define USXGMII_LPA_DUPLEX(lpa) (((lpa) & GENMASK(12, 12)) >> 12)
+#define USXGMII_LPA_SPEED(lpa) (((lpa) & GENMASK(11, 9)) >> 9)
+
+enum usxgmii_speed {
+ USXGMII_SPEED_10 = 0,
+ USXGMII_SPEED_100 = 1,
+ USXGMII_SPEED_1000 = 2,
+ USXGMII_SPEED_2500 = 4,
+};
+
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
REG(ANA_VLANMASK, 0x0089a4),
@@ -386,6 +407,15 @@ static struct resource vsc9959_port_io_res[] = {
},
};
+/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
+ * SGMII/QSGMII MAC PCS can be found.
+ */
+static struct resource vsc9959_imdio_res = {
+ .start = 0x8030,
+ .end = 0x8040,
+ .name = "imdio",
+};
+
static const struct reg_field vsc9959_regfields[] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
[ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
@@ -565,13 +595,495 @@ static int vsc9959_reset(struct ocelot *ocelot)
return 0;
}
+static void vsc9959_pcs_an_restart_sgmii(struct phy_device *pcs)
+{
+ phy_set_bits(pcs, MII_BMCR, BMCR_ANRESTART);
+}
+
+static void vsc9959_pcs_an_restart_usxgmii(struct phy_device *pcs)
+{
+ phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR,
+ USXGMII_BMCR_RESET |
+ USXGMII_BMCR_AN_EN |
+ USXGMII_BMCR_RST_AN);
+}
+
+static void vsc9959_pcs_an_restart(struct ocelot *ocelot, int port)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_an_restart_sgmii(pcs);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_an_restart_usxgmii(pcs);
+ break;
+ default:
+ dev_err(ocelot->dev, "Invalid PCS interface type %s\n",
+ phy_modes(pcs->interface));
+ break;
+ }
+}
+
+/* We enable SGMII AN only when the PHY has managed = "in-band-status" in the
+ * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed
+ * into the PCS, which is retrieved out-of-band over MDIO. This also has the
+ * benefit of working with SGMII fixed-links, like downstream switches, where
+ * both link partners attempt to operate as AN slaves and therefore AN never
+ * completes. But it also has the disadvantage that some PHY chips don't pass
+ * traffic if SGMII AN is enabled but not completed (acknowledged by us), so
+ * setting MLO_AN_INBAND is actually required for those.
+ */
+static void vsc9959_pcs_init_sgmii(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode == MLO_AN_INBAND) {
+ int bmsr, bmcr;
+
+ /* Some PHYs like VSC8234 don't like it when AN restarts on
+ * their system side and they restart line side AN too, going
+ * into an endless link up/down loop. Don't restart PCS AN if
+ * link is up already.
+ * We do check that AN is enabled just in case this is the 1st
+ * call, PCS detects a carrier but AN is disabled from power on
+ * or by boot loader.
+ */
+ bmcr = phy_read(pcs, MII_BMCR);
+ if (bmcr < 0)
+ return;
+
+ bmsr = phy_read(pcs, MII_BMSR);
+ if (bmsr < 0)
+ return;
+
+ if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS))
+ return;
+
+ /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
+ * for the MAC PCS in order to acknowledge the AN.
+ */
+ phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII |
+ ADVERTISE_LPACK);
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_USE_SGMII_AN);
+
+ /* Adjust link timer for SGMII */
+ phy_write(pcs, ENETC_PCS_LINK_TIMER1,
+ ENETC_PCS_LINK_TIMER1_VAL);
+ phy_write(pcs, ENETC_PCS_LINK_TIMER2,
+ ENETC_PCS_LINK_TIMER2_VAL);
+
+ phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE);
+ } else {
+ int speed;
+
+ if (state->duplex == DUPLEX_HALF) {
+ phydev_err(pcs, "Half duplex not supported\n");
+ return;
+ }
+ switch (state->speed) {
+ case SPEED_1000:
+ speed = ENETC_PCS_SPEED_1000;
+ break;
+ case SPEED_100:
+ speed = ENETC_PCS_SPEED_100;
+ break;
+ case SPEED_10:
+ speed = ENETC_PCS_SPEED_10;
+ break;
+ case SPEED_UNKNOWN:
+ /* Silently don't do anything */
+ return;
+ default:
+ phydev_err(pcs, "Invalid PCS speed %d\n", state->speed);
+ return;
+ }
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_SGMII_SPEED(speed));
+
+ /* Yes, not a mistake: speed is given by IF_MODE. */
+ phy_write(pcs, MII_BMCR, BMCR_RESET |
+ BMCR_SPEED1000 |
+ BMCR_FULLDPLX);
+ }
+}
+
+/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
+ * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
+ * auto-negotiation of any link parameters. Electrically it is compatible with
+ * a single lane of XAUI.
+ * The hardware reference manual wants to call this mode SGMII, but it isn't
+ * really, since the fundamental features of SGMII:
+ * - Downgrading the link speed by duplicating symbols
+ * - Auto-negotiation
+ * are not there.
+ * The speed is configured at 1000 in the IF_MODE and BMCR MDIO registers
+ * because the clock frequency is actually given by a PLL configured in the
+ * Reset Configuration Word (RCW).
+ * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
+ * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
+ * lower link speed on line side, the system-side interface remains fixed at
+ * 2500 Mbps and we do rate adaptation through pause frames.
+ */
+static void vsc9959_pcs_init_2500basex(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode == MLO_AN_INBAND) {
+ phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n");
+ return;
+ }
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500));
+
+ phy_write(pcs, MII_BMCR, BMCR_SPEED1000 |
+ BMCR_FULLDPLX |
+ BMCR_RESET);
+}
+
+static void vsc9959_pcs_init_usxgmii(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode != MLO_AN_INBAND) {
+ phydev_err(pcs, "USXGMII only supports in-band AN for now\n");
+ return;
+ }
+
+ /* Configure device ability for the USXGMII Replicator */
+ phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
+ USXGMII_ADVERTISE_SPEED(USXGMII_SPEED_2500) |
+ USXGMII_ADVERTISE_LNKS(1) |
+ ADVERTISE_SGMII |
+ ADVERTISE_LPACK |
+ USXGMII_ADVERTISE_FDX);
+}
+
+static void vsc9959_pcs_init(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ /* The PCS does not implement the BMSR register fully, so capability
+ * detection via genphy_read_abilities does not work. Since we can get
+ * the PHY config word from the LPA register though, there is still
+ * value in using the generic phy_resolve_aneg_linkmode function. So
+ * populate the supported and advertising link modes manually here.
+ */
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported);
+ if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ pcs->interface == PHY_INTERFACE_MODE_USXGMII)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ pcs->supported);
+ if (pcs->interface != PHY_INTERFACE_MODE_2500BASEX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ pcs->supported);
+ phy_advertise_supported(pcs);
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_init_sgmii(pcs, link_an_mode, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ vsc9959_pcs_init_2500basex(pcs, link_an_mode, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_init_usxgmii(pcs, link_an_mode, state);
+ break;
+ default:
+ dev_err(ocelot->dev, "Unsupported link mode %s\n",
+ phy_modes(pcs->interface));
+ }
+}
+
+static void vsc9959_pcs_link_state_resolve(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ state->an_complete = pcs->autoneg_complete;
+ state->an_enabled = pcs->autoneg;
+ state->link = pcs->link;
+ state->duplex = pcs->duplex;
+ state->speed = pcs->speed;
+ /* SGMII AN does not negotiate flow control, but that's ok,
+ * since phylink already knows that, and does:
+ * link_state.pause |= pl->phy_state.pause;
+ */
+ state->pause = MLO_PAUSE_NONE;
+
+ phydev_dbg(pcs,
+ "mode=%s/%s/%s adv=%*pb lpa=%*pb link=%u an_enabled=%u an_complete=%u\n",
+ phy_modes(pcs->interface),
+ phy_speed_to_str(pcs->speed),
+ phy_duplex_to_str(pcs->duplex),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->lp_advertising,
+ pcs->link, pcs->autoneg, pcs->autoneg_complete);
+}
+
+static void vsc9959_pcs_link_state_sgmii(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int err;
+
+ err = genphy_update_link(pcs);
+ if (err < 0)
+ return;
+
+ if (pcs->autoneg_complete) {
+ u16 lpa = phy_read(pcs, MII_LPA);
+
+ mii_lpa_to_linkmode_lpa_sgmii(pcs->lp_advertising, lpa);
+
+ phy_resolve_aneg_linkmode(pcs);
+ }
+}
+
+static void vsc9959_pcs_link_state_2500basex(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int err;
+
+ err = genphy_update_link(pcs);
+ if (err < 0)
+ return;
+
+ pcs->speed = SPEED_2500;
+ pcs->asym_pause = true;
+ pcs->pause = true;
+}
+
+static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int status, lpa;
+
+ status = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_BMSR);
+ if (status < 0)
+ return;
+
+ pcs->autoneg = true;
+ pcs->autoneg_complete = USXGMII_BMSR_AN_CMPL(status);
+ pcs->link = USXGMII_BMSR_LNKS(status);
+
+ if (!pcs->link || !pcs->autoneg_complete)
+ return;
+
+ lpa = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_LPA);
+ if (lpa < 0)
+ return;
+
+ switch (USXGMII_LPA_SPEED(lpa)) {
+ case USXGMII_SPEED_10:
+ pcs->speed = SPEED_10;
+ break;
+ case USXGMII_SPEED_100:
+ pcs->speed = SPEED_100;
+ break;
+ case USXGMII_SPEED_1000:
+ pcs->speed = SPEED_1000;
+ break;
+ case USXGMII_SPEED_2500:
+ pcs->speed = SPEED_2500;
+ break;
+ default:
+ break;
+ }
+
+ if (USXGMII_LPA_DUPLEX(lpa))
+ pcs->duplex = DUPLEX_FULL;
+ else
+ pcs->duplex = DUPLEX_HALF;
+}
+
+static void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
+ struct phylink_link_state *state)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ pcs->speed = SPEED_UNKNOWN;
+ pcs->duplex = DUPLEX_UNKNOWN;
+ pcs->pause = 0;
+ pcs->asym_pause = 0;
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_link_state_sgmii(pcs, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ vsc9959_pcs_link_state_2500basex(pcs, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_link_state_usxgmii(pcs, state);
+ break;
+ default:
+ return;
+ }
+
+ vsc9959_pcs_link_state_resolve(pcs, state);
+}
+
+static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
+ phy_interface_t phy_mode)
+{
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_GMII:
+ /* Only supported on internal to-CPU ports */
+ if (port != 4 && port != 5)
+ return -ENOTSUPP;
+ return 0;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ /* Not supported on internal to-CPU ports */
+ if (port == 4 || port == 5)
+ return -ENOTSUPP;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
};
+static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
+ void __iomem *imdio_regs;
+ struct resource *res;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int port;
+ int rc;
+
+ felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
+ sizeof(struct phy_device *),
+ GFP_KERNEL);
+ if (!felix->pcs) {
+ dev_err(dev, "failed to allocate array for PCS PHYs\n");
+ return -ENOMEM;
+ }
+
+ imdio_base = pci_resource_start(felix->pdev,
+ felix->info->imdio_pci_bar);
+
+ res = felix->info->imdio_res;
+ res->flags = IORESOURCE_MEM;
+ res->start += imdio_base;
+ res->end += imdio_base;
+
+ imdio_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(imdio_regs)) {
+ dev_err(dev, "failed to map internal MDIO registers\n");
+ return PTR_ERR(imdio_regs);
+ }
+
+ hw = enetc_hw_alloc(dev, imdio_regs);
+ if (IS_ERR(hw)) {
+ dev_err(dev, "failed to allocate ENETC HW structure\n");
+ return PTR_ERR(hw);
+ }
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "VSC9959 internal MDIO bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ /* This gets added to imdio_regs, which already maps addresses
+ * starting with the proper offset.
+ */
+ mdio_priv->mdio_base = 0;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ /* Needed in order to initialize the bus mutex lock */
+ rc = mdiobus_register(bus);
+ if (rc < 0) {
+ dev_err(dev, "failed to register MDIO bus\n");
+ return rc;
+ }
+
+ felix->imdio = bus;
+
+ for (port = 0; port < felix->info->num_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phy_device *pcs;
+ bool is_c45 = false;
+
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_USXGMII)
+ is_c45 = true;
+
+ pcs = get_phy_device(felix->imdio, port, is_c45);
+ if (IS_ERR(pcs))
+ continue;
+
+ pcs->interface = ocelot_port->phy_mode;
+ felix->pcs[port] = pcs;
+
+ dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
+ }
+
+ return 0;
+}
+
+static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ continue;
+
+ put_device(&pcs->mdio.dev);
+ }
+ mdiobus_unregister(felix->imdio);
+}
+
struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
+ .imdio_res = &vsc9959_imdio_res,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
@@ -579,5 +1091,12 @@ struct felix_info felix_info_vsc9959 = {
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.shared_queue_sz = 128 * 1024,
.num_ports = 6,
- .pci_bar = 4,
+ .switch_pci_bar = 4,
+ .imdio_pci_bar = 0,
+ .mdio_bus_alloc = vsc9959_mdio_bus_alloc,
+ .mdio_bus_free = vsc9959_mdio_bus_free,
+ .pcs_init = vsc9959_pcs_init,
+ .pcs_an_restart = vsc9959_pcs_an_restart,
+ .pcs_link_state = vsc9959_pcs_link_state,
+ .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
};
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
new file mode 100644
index 000000000000..e3c8d715a18f
--- /dev/null
+++ b/drivers/net/dsa/qca/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_AR9331
+ tristate "Qualcomm Atheros AR9331 Ethernet switch support"
+ depends on NET_DSA
+ select NET_DSA_TAG_AR9331
+ select REGMAP
+ ---help---
+ This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
+ switch.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
new file mode 100644
index 000000000000..274022319066
--- /dev/null
+++ b/drivers/net/dsa/qca/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
new file mode 100644
index 000000000000..de25f99e995a
--- /dev/null
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2019 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+/*
+ * +----------------------+
+ * GMAC1----RGMII----|--MAC0 |
+ * \---MDIO1----|--REGs |----MDIO3----\
+ * | | | +------+
+ * | | +--| |
+ * | MAC1-|----RMII--M-----| PHY0 |-o P0
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC2-|----RMII--------| PHY1 |-o P1
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC3-|----RMII--------| PHY2 |-o P2
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC4-|----RMII--------| PHY3 |-o P3
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC5-|--+-RMII--M-----|-PHY4-|-o P4
+ * | | | | +------+
+ * +----------------------+ | \--CFG_SW_PHY_SWAP
+ * GMAC0---------------RMII--------------------/ \-CFG_SW_PHY_ADDR_SWAP
+ * \---MDIO0--NC
+ *
+ * GMAC0 and MAC5 are connected together and use same PHY. Depending on
+ * configuration it can be PHY4 (default) or PHY0. Only GMAC0 or MAC5 can be
+ * used at same time. If GMAC0 is used (default) then MAC5 should be disabled.
+ *
+ * CFG_SW_PHY_SWAP - swap connections of PHY0 and PHY4. If this bit is not set
+ * PHY4 is connected to GMAC0/MAC5 bundle and PHY0 is connected to MAC1. If this
+ * bit is set, PHY4 is connected to MAC1 and PHY0 is connected to GMAC0/MAC5
+ * bundle.
+ *
+ * CFG_SW_PHY_ADDR_SWAP - swap addresses of PHY0 and PHY4
+ *
+ * CFG_SW_PHY_SWAP and CFG_SW_PHY_ADDR_SWAP are part of SoC specific register
+ * set and not related to switch internal registers.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#define AR9331_SW_NAME "ar9331_switch"
+#define AR9331_SW_PORTS 6
+
+/* dummy reg to change page */
+#define AR9331_SW_REG_PAGE 0x40000
+
+/* Global Interrupt */
+#define AR9331_SW_REG_GINT 0x10
+#define AR9331_SW_REG_GINT_MASK 0x14
+#define AR9331_SW_GINT_PHY_INT BIT(2)
+
+#define AR9331_SW_REG_FLOOD_MASK 0x2c
+#define AR9331_SW_FLOOD_MASK_BROAD_TO_CPU BIT(26)
+
+#define AR9331_SW_REG_GLOBAL_CTRL 0x30
+#define AR9331_SW_GLOBAL_CTRL_MFS_M GENMASK(13, 0)
+
+#define AR9331_SW_REG_MDIO_CTRL 0x98
+#define AR9331_SW_MDIO_CTRL_BUSY BIT(31)
+#define AR9331_SW_MDIO_CTRL_MASTER_EN BIT(30)
+#define AR9331_SW_MDIO_CTRL_CMD_READ BIT(27)
+#define AR9331_SW_MDIO_CTRL_PHY_ADDR_M GENMASK(25, 21)
+#define AR9331_SW_MDIO_CTRL_REG_ADDR_M GENMASK(20, 16)
+#define AR9331_SW_MDIO_CTRL_DATA_M GENMASK(16, 0)
+
+#define AR9331_SW_REG_PORT_STATUS(_port) (0x100 + (_port) * 0x100)
+
+/* FLOW_LINK_EN - enable mac flow control config auto-neg with phy.
+ * If not set, mac can be config by software.
+ */
+#define AR9331_SW_PORT_STATUS_FLOW_LINK_EN BIT(12)
+
+/* LINK_EN - If set, MAC is configured from PHY link status.
+ * If not set, MAC should be configured by software.
+ */
+#define AR9331_SW_PORT_STATUS_LINK_EN BIT(9)
+#define AR9331_SW_PORT_STATUS_DUPLEX_MODE BIT(6)
+#define AR9331_SW_PORT_STATUS_RX_FLOW_EN BIT(5)
+#define AR9331_SW_PORT_STATUS_TX_FLOW_EN BIT(4)
+#define AR9331_SW_PORT_STATUS_RXMAC BIT(3)
+#define AR9331_SW_PORT_STATUS_TXMAC BIT(2)
+#define AR9331_SW_PORT_STATUS_SPEED_M GENMASK(1, 0)
+#define AR9331_SW_PORT_STATUS_SPEED_1000 2
+#define AR9331_SW_PORT_STATUS_SPEED_100 1
+#define AR9331_SW_PORT_STATUS_SPEED_10 0
+
+#define AR9331_SW_PORT_STATUS_MAC_MASK \
+ (AR9331_SW_PORT_STATUS_TXMAC | AR9331_SW_PORT_STATUS_RXMAC)
+
+#define AR9331_SW_PORT_STATUS_LINK_MASK \
+ (AR9331_SW_PORT_STATUS_LINK_EN | AR9331_SW_PORT_STATUS_FLOW_LINK_EN | \
+ AR9331_SW_PORT_STATUS_DUPLEX_MODE | \
+ AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
+ AR9331_SW_PORT_STATUS_SPEED_M)
+
+/* Phy bypass mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ *
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b00 |PhyAdd[2:0]| Reg Addr[4:0] | TA |
+ *
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| Data |
+ *
+ * ------------------------------------------------------------------------
+ * Page address mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b11 | 8'b0 | TA |
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| | Page [9:0] |
+ */
+/* In case of Page Address mode, Bit[18:9] of 32 bit register address should be
+ * written to bits[9:0] of mdio data register.
+ */
+#define AR9331_SW_ADDR_PAGE GENMASK(18, 9)
+
+/* ------------------------------------------------------------------------
+ * Normal register access mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b10 | low_addr[7:0] | TA |
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| Data |
+ * ------------------------------------------------------------------------
+ */
+#define AR9331_SW_LOW_ADDR_PHY GENMASK(8, 6)
+#define AR9331_SW_LOW_ADDR_REG GENMASK(5, 1)
+
+#define AR9331_SW_MDIO_PHY_MODE_M GENMASK(4, 3)
+#define AR9331_SW_MDIO_PHY_MODE_PAGE 3
+#define AR9331_SW_MDIO_PHY_MODE_REG 2
+#define AR9331_SW_MDIO_PHY_MODE_BYPASS 0
+#define AR9331_SW_MDIO_PHY_ADDR_M GENMASK(2, 0)
+
+/* Empirical determined values */
+#define AR9331_SW_MDIO_POLL_SLEEP_US 1
+#define AR9331_SW_MDIO_POLL_TIMEOUT_US 20
+
+struct ar9331_sw_priv {
+ struct device *dev;
+ struct dsa_switch ds;
+ struct dsa_switch_ops ops;
+ struct irq_domain *irqdomain;
+ struct mii_bus *mbus; /* mdio master */
+ struct mii_bus *sbus; /* mdio slave */
+ struct regmap *regmap;
+ struct reset_control *sw_reset;
+};
+
+/* Warning: switch reset will reset last AR9331_SW_MDIO_PHY_MODE_PAGE request
+ * If some kind of optimization is used, the request should be repeated.
+ */
+static int ar9331_sw_reset(struct ar9331_sw_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->sw_reset);
+ if (ret)
+ goto error;
+
+ /* AR9331 doc do not provide any information about proper reset
+ * sequence. The AR8136 (the closes switch to the AR9331) doc says:
+ * reset duration should be greater than 10ms. So, let's use this value
+ * for now.
+ */
+ usleep_range(10000, 15000);
+ ret = reset_control_deassert(priv->sw_reset);
+ if (ret)
+ goto error;
+ /* There is no information on how long should we wait after reset.
+ * AR8136 has an EEPROM and there is an Interrupt for EEPROM load
+ * status. AR9331 has no EEPROM support.
+ * For now, do not wait. In case AR8136 will be needed, the after
+ * reset delay can be added as well.
+ */
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_write(struct mii_bus *mbus, int port, int regnum,
+ u16 data)
+{
+ struct ar9331_sw_priv *priv = mbus->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
+ AR9331_SW_MDIO_CTRL_BUSY |
+ AR9331_SW_MDIO_CTRL_MASTER_EN |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_DATA_M, data));
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
+ !(val & AR9331_SW_MDIO_CTRL_BUSY),
+ AR9331_SW_MDIO_POLL_SLEEP_US,
+ AR9331_SW_MDIO_POLL_TIMEOUT_US);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "PHY write error: %i\n", ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_read(struct mii_bus *mbus, int port, int regnum)
+{
+ struct ar9331_sw_priv *priv = mbus->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
+ AR9331_SW_MDIO_CTRL_BUSY |
+ AR9331_SW_MDIO_CTRL_MASTER_EN |
+ AR9331_SW_MDIO_CTRL_CMD_READ |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum));
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
+ !(val & AR9331_SW_MDIO_CTRL_BUSY),
+ AR9331_SW_MDIO_POLL_SLEEP_US,
+ AR9331_SW_MDIO_POLL_TIMEOUT_US);
+ if (ret)
+ goto error;
+
+ ret = regmap_read(regmap, AR9331_SW_REG_MDIO_CTRL, &val);
+ if (ret)
+ goto error;
+
+ return FIELD_GET(AR9331_SW_MDIO_CTRL_DATA_M, val);
+
+error:
+ dev_err_ratelimited(priv->dev, "PHY read error: %i\n", ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct mii_bus *mbus;
+ struct device_node *np, *mnp;
+ int ret;
+
+ np = dev->of_node;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = np->full_name;
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%pOF", np);
+
+ mbus->read = ar9331_sw_mbus_read;
+ mbus->write = ar9331_sw_mbus_write;
+ mbus->priv = priv;
+ mbus->parent = dev;
+
+ mnp = of_get_child_by_name(np, "mdio");
+ if (!mnp)
+ return -ENODEV;
+
+ ret = of_mdiobus_register(mbus, mnp);
+ of_node_put(mnp);
+ if (ret)
+ return ret;
+
+ priv->mbus = mbus;
+
+ return 0;
+}
+
+static int ar9331_sw_setup(struct dsa_switch *ds)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = ar9331_sw_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Reset will set proper defaults. CPU - Port0 will be enabled and
+ * configured. All other ports (ports 1 - 5) are disabled
+ */
+ ret = ar9331_sw_mbus_init(priv);
+ if (ret)
+ return ret;
+
+ /* Do not drop broadcast frames */
+ ret = regmap_write_bits(regmap, AR9331_SW_REG_FLOOD_MASK,
+ AR9331_SW_FLOOD_MASK_BROAD_TO_CPU,
+ AR9331_SW_FLOOD_MASK_BROAD_TO_CPU);
+ if (ret)
+ goto error;
+
+ /* Set max frame size to the maximum supported value */
+ ret = regmap_write_bits(regmap, AR9331_SW_REG_GLOBAL_CTRL,
+ AR9331_SW_GLOBAL_CTRL_MFS_M,
+ AR9331_SW_GLOBAL_CTRL_MFS_M);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return ret;
+}
+
+static void ar9331_sw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_PORT_STATUS(port), 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_AR9331;
+}
+
+static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0:
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
+ goto unsupported;
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return;
+
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
+ state->interface, port);
+}
+
+static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+ u32 val;
+
+ switch (state->speed) {
+ case SPEED_1000:
+ val = AR9331_SW_PORT_STATUS_SPEED_1000;
+ break;
+ case SPEED_100:
+ val = AR9331_SW_PORT_STATUS_SPEED_100;
+ break;
+ case SPEED_10:
+ val = AR9331_SW_PORT_STATUS_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ if (state->duplex)
+ val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE;
+
+ if (state->pause & MLO_PAUSE_TX)
+ val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN;
+
+ if (state->pause & MLO_PAUSE_RX)
+ val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_LINK_MASK, val);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_MAC_MASK, 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_MAC_MASK,
+ AR9331_SW_PORT_STATUS_MAC_MASK);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static const struct dsa_switch_ops ar9331_sw_ops = {
+ .get_tag_protocol = ar9331_sw_get_tag_protocol,
+ .setup = ar9331_sw_setup,
+ .port_disable = ar9331_sw_port_disable,
+ .phylink_validate = ar9331_sw_phylink_validate,
+ .phylink_mac_config = ar9331_sw_phylink_mac_config,
+ .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
+ .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
+};
+
+static irqreturn_t ar9331_sw_irq(int irq, void *data)
+{
+ struct ar9331_sw_priv *priv = data;
+ struct regmap *regmap = priv->regmap;
+ u32 stat;
+ int ret;
+
+ ret = regmap_read(regmap, AR9331_SW_REG_GINT, &stat);
+ if (ret) {
+ dev_err(priv->dev, "can't read interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & AR9331_SW_GINT_PHY_INT) {
+ int child_irq;
+
+ child_irq = irq_find_mapping(priv->irqdomain, 0);
+ handle_nested_irq(child_irq);
+ }
+
+ ret = regmap_write(regmap, AR9331_SW_REG_GINT, stat);
+ if (ret) {
+ dev_err(priv->dev, "can't write interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ar9331_sw_mask_irq(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
+ AR9331_SW_GINT_PHY_INT, 0);
+ if (ret)
+ dev_err(priv->dev, "could not mask IRQ\n");
+}
+
+static void ar9331_sw_unmask_irq(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
+ AR9331_SW_GINT_PHY_INT,
+ AR9331_SW_GINT_PHY_INT);
+ if (ret)
+ dev_err(priv->dev, "could not unmask IRQ\n");
+}
+
+static struct irq_chip ar9331_sw_irq_chip = {
+ .name = AR9331_SW_NAME,
+ .irq_mask = ar9331_sw_mask_irq,
+ .irq_unmask = ar9331_sw_unmask_irq,
+};
+
+static int ar9331_sw_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &ar9331_sw_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void ar9331_sw_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ar9331_sw_irqdomain_ops = {
+ .map = ar9331_sw_irq_map,
+ .unmap = ar9331_sw_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ int ret, irq;
+
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ\n");
+ return irq ? irq : -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, ar9331_sw_irq,
+ IRQF_ONESHOT, AR9331_SW_NAME, priv);
+ if (ret) {
+ dev_err(dev, "unable to request irq: %d\n", ret);
+ return ret;
+ }
+
+ priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -EINVAL;
+ }
+
+ irq_set_parent(irq_create_mapping(priv->irqdomain, 0), irq);
+
+ return 0;
+}
+
+static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
+{
+ u8 r, p;
+
+ p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, mode) |
+ FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
+ r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
+
+ return mdiobus_write(sbus, p, r, val);
+}
+
+static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
+{
+ u8 r, p;
+
+ p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, AR9331_SW_MDIO_PHY_MODE_REG) |
+ FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
+ r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
+
+ return mdiobus_read(sbus, p, r);
+}
+
+static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct ar9331_sw_priv *priv = ctx;
+ struct mii_bus *sbus = priv->sbus;
+ u32 reg = *(u32 *)reg_buf;
+ int ret;
+
+ if (reg == AR9331_SW_REG_PAGE) {
+ /* We cannot read the page selector register from hardware and
+ * we cache its value in regmap. Return all bits set here,
+ * that regmap will always write the page on first use.
+ */
+ *(u32 *)val_buf = GENMASK(9, 0);
+ return 0;
+ }
+
+ ret = __ar9331_mdio_read(sbus, reg);
+ if (ret < 0)
+ goto error;
+
+ *(u32 *)val_buf = ret;
+ ret = __ar9331_mdio_read(sbus, reg + 2);
+ if (ret < 0)
+ goto error;
+
+ *(u32 *)val_buf |= ret << 16;
+
+ return 0;
+error:
+ dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
+ return ret;
+}
+
+static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ctx;
+ struct mii_bus *sbus = priv->sbus;
+ int ret;
+
+ if (reg == AR9331_SW_REG_PAGE) {
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
+ 0, val);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+ }
+
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
+ if (ret < 0)
+ goto error;
+
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
+ val >> 16);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
+ return ret;
+}
+
+static int ar9331_sw_bus_write(void *context, const void *data, size_t count)
+{
+ u32 reg = *(u32 *)data;
+ u32 val = *((u32 *)data + 1);
+
+ return ar9331_mdio_write(context, reg, val);
+}
+
+static const struct regmap_range ar9331_valid_regs[] = {
+ regmap_reg_range(0x0, 0x0),
+ regmap_reg_range(0x10, 0x14),
+ regmap_reg_range(0x20, 0x24),
+ regmap_reg_range(0x2c, 0x30),
+ regmap_reg_range(0x40, 0x44),
+ regmap_reg_range(0x50, 0x78),
+ regmap_reg_range(0x80, 0x98),
+
+ regmap_reg_range(0x100, 0x120),
+ regmap_reg_range(0x200, 0x220),
+ regmap_reg_range(0x300, 0x320),
+ regmap_reg_range(0x400, 0x420),
+ regmap_reg_range(0x500, 0x520),
+ regmap_reg_range(0x600, 0x620),
+
+ regmap_reg_range(0x20000, 0x200a4),
+ regmap_reg_range(0x20100, 0x201a4),
+ regmap_reg_range(0x20200, 0x202a4),
+ regmap_reg_range(0x20300, 0x203a4),
+ regmap_reg_range(0x20400, 0x204a4),
+ regmap_reg_range(0x20500, 0x205a4),
+
+ /* dummy page selector reg */
+ regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
+};
+
+static const struct regmap_range ar9331_nonvolatile_regs[] = {
+ regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
+};
+
+static const struct regmap_range_cfg ar9331_regmap_range[] = {
+ {
+ .selector_reg = AR9331_SW_REG_PAGE,
+ .selector_mask = GENMASK(9, 0),
+ .selector_shift = 0,
+
+ .window_start = 0,
+ .window_len = 512,
+
+ .range_min = 0,
+ .range_max = AR9331_SW_REG_PAGE - 4,
+ },
+};
+
+static const struct regmap_access_table ar9331_register_set = {
+ .yes_ranges = ar9331_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ar9331_valid_regs),
+};
+
+static const struct regmap_access_table ar9331_volatile_set = {
+ .no_ranges = ar9331_nonvolatile_regs,
+ .n_no_ranges = ARRAY_SIZE(ar9331_nonvolatile_regs),
+};
+
+static const struct regmap_config ar9331_mdio_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = AR9331_SW_REG_PAGE,
+
+ .ranges = ar9331_regmap_range,
+ .num_ranges = ARRAY_SIZE(ar9331_regmap_range),
+
+ .volatile_table = &ar9331_volatile_set,
+ .wr_table = &ar9331_register_set,
+ .rd_table = &ar9331_register_set,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static struct regmap_bus ar9331_sw_bus = {
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .read = ar9331_mdio_read,
+ .write = ar9331_sw_bus_write,
+ .max_raw_read = 4,
+ .max_raw_write = 4,
+};
+
+static int ar9331_sw_probe(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv;
+ struct dsa_switch *ds;
+ int ret;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&mdiodev->dev, &ar9331_sw_bus, priv,
+ &ar9331_mdio_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->sw_reset = devm_reset_control_get(&mdiodev->dev, "switch");
+ if (IS_ERR(priv->sw_reset)) {
+ dev_err(&mdiodev->dev, "missing switch reset\n");
+ return PTR_ERR(priv->sw_reset);
+ }
+
+ priv->sbus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+
+ ret = ar9331_sw_irq_init(priv);
+ if (ret)
+ return ret;
+
+ ds = &priv->ds;
+ ds->dev = &mdiodev->dev;
+ ds->num_ports = AR9331_SW_PORTS;
+ ds->priv = priv;
+ priv->ops = ar9331_sw_ops;
+ ds->ops = &priv->ops;
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ ret = dsa_register_switch(ds);
+ if (ret)
+ goto err_remove_irq;
+
+ return 0;
+
+err_remove_irq:
+ irq_domain_remove(priv->irqdomain);
+
+ return ret;
+}
+
+static void ar9331_sw_remove(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ irq_domain_remove(priv->irqdomain);
+ mdiobus_unregister(priv->mbus);
+ dsa_unregister_switch(&priv->ds);
+
+ reset_control_assert(priv->sw_reset);
+}
+
+static const struct of_device_id ar9331_sw_of_match[] = {
+ { .compatible = "qca,ar9331-switch" },
+ { },
+};
+
+static struct mdio_driver ar9331_sw_mdio_driver = {
+ .probe = ar9331_sw_probe,
+ .remove = ar9331_sw_remove,
+ .mdiodrv.driver = {
+ .name = AR9331_SW_NAME,
+ .of_match_table = ar9331_sw_of_match,
+ },
+};
+
+mdio_module_driver(ar9331_sw_mdio_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Driver for Atheros AR9331 switch");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index e548289df31e..9f4205b4439b 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1017,7 +1017,8 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds, int port)
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_QCA;
}
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index f5cc8b0a7c74..fd1977590cb4 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -964,7 +964,8 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
}
static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
/* For now, the RTL switches are handled without any custom tags.
*
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 1da5ac111499..03ba6d25f7fe 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -426,14 +426,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.tpid2 = ETH_P_SJA1105,
};
struct sja1105_table *table;
- int i, k = 0;
-
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (dsa_is_dsa_port(priv->ds, i))
- default_general_params.casc_port = i;
- else if (dsa_is_user_port(priv->ds, i))
- priv->ports[i].mgmt_slot = k++;
- }
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
@@ -582,7 +574,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device *dev = &priv->spidev->dev;
struct device_node *child;
- for_each_child_of_node(ports_node, child) {
+ for_each_available_child_of_node(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
@@ -1542,7 +1534,8 @@ static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
}
static enum dsa_tag_protocol
-sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_SJA1105;
}
@@ -1740,6 +1733,16 @@ static int sja1105_setup(struct dsa_switch *ds)
static void sja1105_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
+ int port;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ kthread_destroy_worker(sp->xmit_worker);
+ }
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
@@ -1761,6 +1764,18 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port,
return 0;
}
+static void sja1105_port_disable(struct dsa_switch *ds, int port)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ kthread_cancel_work_sync(&sp->xmit_work);
+ skb_queue_purge(&sp->xmit_queue);
+}
+
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets)
{
@@ -1819,47 +1834,36 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
return NETDEV_TX_OK;
}
+#define work_to_port(work) \
+ container_of((work), struct sja1105_port, xmit_work)
+#define tagger_to_sja1105(t) \
+ container_of((t), struct sja1105_private, tagger_data)
+
/* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable
* lock on the bus)
*/
-static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
- struct sk_buff *skb)
+static void sja1105_port_deferred_xmit(struct kthread_work *work)
{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
- int slot = sp->mgmt_slot;
- struct sk_buff *clone;
-
- /* The tragic fact about the switch having 4x2 slots for installing
- * management routes is that all of them except one are actually
- * useless.
- * If 2 slots are simultaneously configured for two BPDUs sent to the
- * same (multicast) DMAC but on different egress ports, the switch
- * would confuse them and redirect first frame it receives on the CPU
- * port towards the port configured on the numerically first slot
- * (therefore wrong port), then second received frame on second slot
- * (also wrong port).
- * So for all practical purposes, there needs to be a lock that
- * prevents that from happening. The slot used here is utterly useless
- * (could have simply been 0 just as fine), but we are doing it
- * nonetheless, in case a smarter idea ever comes up in the future.
- */
- mutex_lock(&priv->mgmt_lock);
+ struct sja1105_port *sp = work_to_port(work);
+ struct sja1105_tagger_data *tagger_data = sp->data;
+ struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
+ int port = sp - priv->ports;
+ struct sk_buff *skb;
- /* The clone, if there, was made by dsa_skb_tx_timestamp */
- clone = DSA_SKB_CB(skb)->clone;
+ while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
+ struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
- sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
+ mutex_lock(&priv->mgmt_lock);
- if (!clone)
- goto out;
+ sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
- sja1105_ptp_txtstamp_skb(ds, port, clone);
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
-out:
- mutex_unlock(&priv->mgmt_lock);
- return NETDEV_TX_OK;
+ mutex_unlock(&priv->mgmt_lock);
+ }
}
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
@@ -1990,6 +1994,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
.port_enable = sja1105_port_enable,
+ .port_disable = sja1105_port_disable,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
@@ -2003,7 +2008,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_mdb_prepare = sja1105_mdb_prepare,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
- .port_deferred_xmit = sja1105_port_deferred_xmit,
.port_hwtstamp_get = sja1105_hwtstamp_get,
.port_hwtstamp_set = sja1105_hwtstamp_set,
.port_rxtstamp = sja1105_port_rxtstamp,
@@ -2055,7 +2059,7 @@ static int sja1105_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct sja1105_private *priv;
struct dsa_switch *ds;
- int rc, i;
+ int rc, port;
if (!dev->of_node) {
dev_err(dev, "No DTS bindings for SJA1105 driver\n");
@@ -2120,15 +2124,42 @@ static int sja1105_probe(struct spi_device *spi)
return rc;
/* Connections between dsa_port and sja1105_port */
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- struct sja1105_port *sp = &priv->ports[i];
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *slave;
- dsa_to_port(ds, i)->priv = sp;
- sp->dp = dsa_to_port(ds, i);
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp->priv = sp;
+ sp->dp = dp;
sp->data = tagger_data;
+ slave = dp->slave;
+ kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
+ sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
+ slave->name);
+ if (IS_ERR(sp->xmit_worker)) {
+ rc = PTR_ERR(sp->xmit_worker);
+ dev_err(ds->dev,
+ "failed to create deferred xmit thread: %d\n",
+ rc);
+ goto out;
+ }
+ skb_queue_head_init(&sp->xmit_queue);
}
return 0;
+out:
+ while (port-- > 0) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ kthread_destroy_worker(sp->xmit_worker);
+ }
+ return rc;
}
static int sja1105_remove(struct spi_device *spi)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 43ab7589d0d0..a836fc38c4a4 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -83,6 +83,7 @@ static int sja1105_init_avb_params(struct sja1105_private *priv,
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
bool on)
{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
int rc;
@@ -101,6 +102,8 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
kfree_skb(priv->tagger_data.stampable_skb);
priv->tagger_data.stampable_skb = NULL;
}
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
}
@@ -367,22 +370,16 @@ static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
ptp_sts);
}
-#define rxtstamp_to_tagger(d) \
- container_of((d), struct sja1105_tagger_data, rxtstamp_work)
-#define tagger_to_sja1105(d) \
- container_of((d), struct sja1105_private, tagger_data)
-
-static void sja1105_rxtstamp_work(struct work_struct *work)
+static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
{
- struct sja1105_tagger_data *tagger_data = rxtstamp_to_tagger(work);
- struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
- struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
struct dsa_switch *ds = priv->ds;
struct sk_buff *skb;
mutex_lock(&ptp_data->lock);
- while ((skb = skb_dequeue(&tagger_data->skb_rxtstamp_queue)) != NULL) {
+ while ((skb = skb_dequeue(&ptp_data->skb_rxtstamp_queue)) != NULL) {
struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
u64 ticks, ts;
int rc;
@@ -404,6 +401,9 @@ static void sja1105_rxtstamp_work(struct work_struct *work)
}
mutex_unlock(&ptp_data->lock);
+
+ /* Don't restart */
+ return -1;
}
/* Called from dsa_skb_defer_rx_timestamp */
@@ -411,16 +411,16 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- if (!test_bit(SJA1105_HWTS_RX_EN, &tagger_data->state))
+ if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
* timestamp. For that we need a sleepable context.
*/
- skb_queue_tail(&tagger_data->skb_rxtstamp_queue, skb);
- schedule_work(&tagger_data->rxtstamp_work);
+ skb_queue_tail(&ptp_data->skb_rxtstamp_queue, skb);
+ ptp_schedule_worker(ptp_data->clock, 0);
return true;
}
@@ -628,11 +628,11 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.adjtime = sja1105_ptp_adjtime,
.gettimex64 = sja1105_ptp_gettimex,
.settime64 = sja1105_ptp_settime,
+ .do_aux_work = sja1105_rxtstamp_work,
.max_adj = SJA1105_MAX_ADJ_PPB,
};
- skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
- INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
spin_lock_init(&tagger_data->meta_lock);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
@@ -653,8 +653,8 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- cancel_work_sync(&priv->tagger_data.rxtstamp_work);
- skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 470f44b76318..6f4a19eec709 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -30,6 +30,7 @@ struct sja1105_ptp_cmd {
};
struct sja1105_ptp_data {
+ struct sk_buff_head skb_rxtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 42c1574d45f2..6e21a2a5cf01 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -542,7 +542,8 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
}
static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
/* The switch internally uses a 8 byte header with length,
* source port, tag, LPA and priority. This is supposedly
@@ -1111,7 +1112,9 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
+#if IS_ENABLED(CONFIG_OF_GPIO)
vsc->gc.of_node = vsc->dev->of_node;
+#endif
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
vsc->gc.set = vsc73xx_gpio_set;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 3da97996bdf3..8cafd06ff0c4 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -196,7 +196,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev);
static int el3_close(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void el3_tx_timeout (struct net_device *dev);
+static void el3_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void el3_down(struct net_device *dev);
static void el3_up(struct net_device *dev);
static const struct ethtool_ops ethtool_ops;
@@ -689,7 +689,7 @@ el3_open(struct net_device *dev)
}
static void
-el3_tx_timeout (struct net_device *dev)
+el3_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index b15752267c8d..1e233e2f0a5a 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -371,7 +371,7 @@ static void corkscrew_timer(struct timer_list *t);
static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int corkscrew_rx(struct net_device *dev);
-static void corkscrew_timeout(struct net_device *dev);
+static void corkscrew_timeout(struct net_device *dev, unsigned int txqueue);
static int boomerang_rx(struct net_device *dev);
static irqreturn_t corkscrew_interrupt(int irq, void *dev_id);
static int corkscrew_close(struct net_device *dev);
@@ -961,7 +961,7 @@ static void corkscrew_timer(struct timer_list *t)
#endif /* AUTOMEDIA */
}
-static void corkscrew_timeout(struct net_device *dev)
+static void corkscrew_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct corkscrew_private *vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 3044a6f35f04..ef1c3151fbb2 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -234,7 +234,7 @@ static void update_stats(struct net_device *dev);
static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev, int worklimit);
static int el3_close(struct net_device *dev);
-static void el3_tx_timeout(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -690,7 +690,7 @@ static int el3_open(struct net_device *dev)
return 0;
}
-static void el3_tx_timeout(struct net_device *dev)
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 2b2695311bda..d47cde6c5f08 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -173,7 +173,7 @@ static void update_stats(struct net_device *dev);
static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev);
static int el3_close(struct net_device *dev);
-static void el3_tx_timeout(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static const struct ethtool_ops netdev_ethtool_ops;
@@ -526,7 +526,7 @@ static int el3_open(struct net_device *dev)
return 0;
}
-static void el3_tx_timeout(struct net_device *dev)
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8785c2ff3825..a2b7f7ab8170 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -776,7 +776,7 @@ static void set_rx_mode(struct net_device *dev);
#ifdef CONFIG_PCI
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#endif
-static void vortex_tx_timeout(struct net_device *dev);
+static void vortex_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void acpi_set_WOL(struct net_device *dev);
static const struct ethtool_ops vortex_ethtool_ops;
static void set_8021q_mode(struct net_device *dev, int enable);
@@ -1548,7 +1548,7 @@ vortex_up(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned int config;
- int i, mii_reg1, mii_reg5, err = 0;
+ int i, mii_reg5, err = 0;
if (VORTEX_PCI(vp)) {
pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
@@ -1605,7 +1605,7 @@ vortex_up(struct net_device *dev)
window_write32(vp, config, 3, Wn3_Config);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
- mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
+ mdio_read(dev, vp->phys[0], MII_BMSR);
mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
vp->mii.full_duplex = vp->full_duplex;
@@ -1877,7 +1877,7 @@ leave_media_alone:
iowrite16(FakeIntr, ioaddr + EL3_CMD);
}
-static void vortex_tx_timeout(struct net_device *dev)
+static void vortex_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index be823c186517..14fce6658106 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2013,7 +2013,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
}
static void
-typhoon_tx_timeout(struct net_device *dev)
+typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct typhoon *tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 78f3e532c600..0e0aa4016858 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -36,9 +36,9 @@ void ei_set_multicast_list(struct net_device *dev)
}
EXPORT_SYMBOL(ei_set_multicast_list);
-void ei_tx_timeout(struct net_device *dev)
+void ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- __ei_tx_timeout(dev);
+ __ei_tx_timeout(dev, txqueue);
}
EXPORT_SYMBOL(ei_tx_timeout);
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 3e2f2c2e7b58..529c728f334a 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -32,7 +32,7 @@ void NS8390_init(struct net_device *dev, int startp);
int ei_open(struct net_device *dev);
int ei_close(struct net_device *dev);
irqreturn_t ei_interrupt(int irq, void *dev_id);
-void ei_tx_timeout(struct net_device *dev);
+void ei_tx_timeout(struct net_device *dev, unsigned int txqueue);
netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
void ei_set_multicast_list(struct net_device *dev);
struct net_device_stats *ei_get_stats(struct net_device *dev);
@@ -50,7 +50,7 @@ void NS8390p_init(struct net_device *dev, int startp);
int eip_open(struct net_device *dev);
int eip_close(struct net_device *dev);
irqreturn_t eip_interrupt(int irq, void *dev_id);
-void eip_tx_timeout(struct net_device *dev);
+void eip_tx_timeout(struct net_device *dev, unsigned int txqueue);
netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
void eip_set_multicast_list(struct net_device *dev);
struct net_device_stats *eip_get_stats(struct net_device *dev);
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6cf36992a2c6..6834742057b3 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -41,9 +41,9 @@ void eip_set_multicast_list(struct net_device *dev)
}
EXPORT_SYMBOL(eip_set_multicast_list);
-void eip_tx_timeout(struct net_device *dev)
+void eip_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- __ei_tx_timeout(dev);
+ __ei_tx_timeout(dev, txqueue);
}
EXPORT_SYMBOL(eip_tx_timeout);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 0b6bbf63f7ca..aeae7966a082 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -83,7 +83,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void axnet_tx_timeout(struct net_device *dev);
+static void axnet_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(struct timer_list *t);
static void axnet_reset_8390(struct net_device *dev);
@@ -903,7 +903,7 @@ static int ax_close(struct net_device *dev)
* completed (or failed) - i.e. never posted a Tx related interrupt.
*/
-static void axnet_tx_timeout(struct net_device *dev)
+static void axnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = netdev_priv(dev);
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index c9c55c9eab9f..babc92e2692e 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -251,7 +251,7 @@ static int __ei_close(struct net_device *dev)
* completed (or failed) - i.e. never posted a Tx related interrupt.
*/
-static void __ei_tx_timeout(struct net_device *dev)
+static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long e8390_base = dev->base_addr;
struct ei_device *ei_local = netdev_priv(dev);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 816540e6beac..165d18405b0c 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -576,7 +576,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int netdev_open(struct net_device *dev);
static void check_duplex(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
@@ -1105,7 +1105,7 @@ static void check_duplex(struct net_device *dev)
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 174344c450af..cb6a761d5c11 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3651,15 +3651,6 @@ static int et131x_close(struct net_device *netdev)
return del_timer_sync(&adapter->error_timer);
}
-static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
- int cmd)
-{
- if (!netdev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
-}
-
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
static int et131x_set_packet_filter(struct et131x_adapter *adapter)
{
@@ -3811,7 +3802,7 @@ drop_err:
* specified by the 'tx_timeo" element in the net_device structure (see
* et131x_alloc_device() to see how this value is set).
*/
-static void et131x_tx_timeout(struct net_device *netdev)
+static void et131x_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -3899,7 +3890,7 @@ static const struct net_device_ops et131x_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats = et131x_stats,
- .ndo_do_ioctl = et131x_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
};
static int et131x_pci_setup(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 80ef3e15bd22..9daef4c8feef 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1791,7 +1791,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sdev->is_fiber = slic_is_fiber(pdev->subsystem_device);
sdev->pdev = pdev;
sdev->netdev = dev;
- sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0),
+ sdev->regs = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!sdev->regs) {
dev_err(&pdev->dev, "failed to map registers\n");
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0537df06a9b5..22cadfbeedfb 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -207,19 +207,6 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
readsl(reg, data, round_up(count, 4) / 4);
}
-static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
/* ethtool ops */
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -407,7 +394,7 @@ static void emac_init_device(struct net_device *dev)
}
/* Our watchdog timed out. Called by the networking layer */
-static void emac_timeout(struct net_device *dev)
+static void emac_timeout(struct net_device *dev, unsigned int txqueue)
{
struct emac_board_info *db = netdev_priv(dev);
unsigned long flags;
@@ -791,7 +778,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_tx_timeout = emac_timeout,
.ndo_set_rx_mode = emac_set_rx_mode,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 46b4207d3266..f366faf88eee 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -437,7 +437,7 @@ static const struct ethtool_ops ace_ethtool_ops = {
.set_link_ksettings = ace_set_link_ksettings,
};
-static void ace_watchdog(struct net_device *dev);
+static void ace_watchdog(struct net_device *dev, unsigned int txqueue);
static const struct net_device_ops ace_netdev_ops = {
.ndo_open = ace_open,
@@ -1542,7 +1542,7 @@ static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
}
-static void ace_watchdog(struct net_device *data)
+static void ace_watchdog(struct net_device *data, unsigned int txqueue)
{
struct net_device *dev = data;
struct ace_private *ap = netdev_priv(dev);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 4cd53fc338b5..1671c1f36691 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1332,10 +1332,10 @@ static int request_and_map(struct platform_device *pdev, const char *name,
return -EBUSY;
}
- *ptr = devm_ioremap_nocache(device, region->start,
+ *ptr = devm_ioremap(device, region->start,
resource_size(region));
if (*ptr == NULL) {
- dev_err(device, "ioremap_nocache of %s failed!", name);
+ dev_err(device, "ioremap of %s failed!", name);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index fc96c66b44cb..b4e891d49a94 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -740,7 +740,9 @@ static int ena_set_channels(struct net_device *netdev,
struct ena_adapter *adapter = netdev_priv(netdev);
u32 count = channels->combined_count;
/* The check for max value is already done in ethtool */
- if (count < ENA_MIN_NUM_IO_QUEUES)
+ if (count < ENA_MIN_NUM_IO_QUEUES ||
+ (ena_xdp_present(adapter) &&
+ !ena_xdp_legal_queue_count(adapter, channels->combined_count)))
return -EINVAL;
return ena_update_queue_count(adapter, count);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 948583fdcc28..894e8c1a8cf1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -36,7 +36,6 @@
#include <linux/cpu_rmap.h>
#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/numa.h>
@@ -47,6 +46,7 @@
#include <net/ip.h>
#include "ena_netdev.h"
+#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
@@ -78,7 +78,37 @@ static void check_for_admin_com_state(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
-static void ena_tx_timeout(struct net_device *dev)
+static void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count);
+static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
+ int count);
+static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
+ int count);
+static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
+static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count);
+static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
+static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
+static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
+static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
+static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
+static void ena_napi_disable_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+static void ena_napi_enable_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+static int ena_up(struct ena_adapter *adapter);
+static void ena_down(struct ena_adapter *adapter);
+static void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
+static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
+static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info);
+static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+
+static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -123,6 +153,448 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
+static int ena_xmit_common(struct net_device *dev,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ int rc, nb_hw_desc;
+
+ if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
+ ena_tx_ctx))) {
+ netif_dbg(adapter, tx_queued, dev,
+ "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+ ring->qid);
+ ena_com_write_sq_doorbell(ring->ena_com_io_sq);
+ }
+
+ /* prepare the packet's descriptors to dma engine */
+ rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
+ &nb_hw_desc);
+
+ /* In case there isn't enough space in the queue for the packet,
+ * we simply drop it. All other failure reasons of
+ * ena_com_prepare_tx() are fatal and therefore require a device reset.
+ */
+ if (unlikely(rc)) {
+ netif_err(adapter, tx_queued, dev,
+ "failed to prepare tx bufs\n");
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.prepare_ctx_err++;
+ u64_stats_update_end(&ring->syncp);
+ if (rc != -ENOMEM) {
+ adapter->reset_reason =
+ ENA_REGS_RESET_DRIVER_INVALID_STATE;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+ return rc;
+ }
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.cnt++;
+ ring->tx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->syncp);
+
+ tx_info->tx_descs = nb_hw_desc;
+ tx_info->last_jiffies = jiffies;
+ tx_info->print_once = 0;
+
+ ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
+ ring->ring_size);
+ return 0;
+}
+
+/* This is the XDP napi callback. XDP queues use a separate napi callback
+ * than Rx/Tx queues.
+ */
+static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ u32 xdp_work_done, xdp_budget;
+ struct ena_ring *xdp_ring;
+ int napi_comp_call = 0;
+ int ret;
+
+ xdp_ring = ena_napi->xdp_ring;
+ xdp_ring->first_interrupt = ena_napi->first_interrupt;
+
+ xdp_budget = budget;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
+
+ /* If the device is about to reset or down, avoid unmask
+ * the interrupt and return 0 so NAPI won't reschedule
+ */
+ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
+ napi_complete_done(napi, 0);
+ ret = 0;
+ } else if (xdp_budget > xdp_work_done) {
+ napi_comp_call = 1;
+ if (napi_complete_done(napi, xdp_work_done))
+ ena_unmask_interrupt(xdp_ring, NULL);
+ ena_update_ring_numa_node(xdp_ring, NULL);
+ ret = xdp_work_done;
+ } else {
+ ret = xdp_budget;
+ }
+
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.napi_comp += napi_comp_call;
+ xdp_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&xdp_ring->syncp);
+
+ return ret;
+}
+
+static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_buff *xdp,
+ void **push_hdr,
+ u32 *push_len)
+{
+ struct ena_adapter *adapter = xdp_ring->adapter;
+ struct ena_com_buf *ena_buf;
+ dma_addr_t dma = 0;
+ u32 size;
+
+ tx_info->xdpf = convert_to_xdp_frame(xdp);
+ size = tx_info->xdpf->len;
+ ena_buf = tx_info->bufs;
+
+ /* llq push buffer */
+ *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+ *push_hdr = tx_info->xdpf->data;
+
+ if (size - *push_len > 0) {
+ dma = dma_map_single(xdp_ring->dev,
+ *push_hdr + *push_len,
+ size - *push_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
+ goto error_report_dma_error;
+
+ tx_info->map_linear_data = 1;
+ tx_info->num_of_bufs = 1;
+ }
+
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ return 0;
+
+error_report_dma_error:
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.dma_mapping_err++;
+ u64_stats_update_end(&xdp_ring->syncp);
+ netdev_warn(adapter->netdev, "failed to map xdp buff\n");
+
+ xdp_return_frame_rx_napi(tx_info->xdpf);
+ tx_info->xdpf = NULL;
+ tx_info->num_of_bufs = 0;
+
+ return -EINVAL;
+}
+
+static int ena_xdp_xmit_buff(struct net_device *dev,
+ struct xdp_buff *xdp,
+ int qid,
+ struct ena_rx_buffer *rx_info)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_com_tx_ctx ena_tx_ctx = {0};
+ struct ena_tx_buffer *tx_info;
+ struct ena_ring *xdp_ring;
+ u16 next_to_use, req_id;
+ int rc;
+ void *push_hdr;
+ u32 push_len;
+
+ xdp_ring = &adapter->tx_ring[qid];
+ next_to_use = xdp_ring->next_to_use;
+ req_id = xdp_ring->free_ids[next_to_use];
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+ page_ref_inc(rx_info->page);
+ tx_info->xdp_rx_page = rx_info->page;
+
+ rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
+ if (unlikely(rc))
+ goto error_drop_packet;
+
+ ena_tx_ctx.ena_bufs = tx_info->bufs;
+ ena_tx_ctx.push_header = push_hdr;
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ ena_tx_ctx.req_id = req_id;
+ ena_tx_ctx.header_len = push_len;
+
+ rc = ena_xmit_common(dev,
+ xdp_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ xdp->data_end - xdp->data);
+ if (rc)
+ goto error_unmap_dma;
+ /* trigger the dma engine. ena_com_write_sq_doorbell()
+ * has a mb
+ */
+ ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.doorbells++;
+ u64_stats_update_end(&xdp_ring->syncp);
+
+ return NETDEV_TX_OK;
+
+error_unmap_dma:
+ ena_unmap_tx_buff(xdp_ring, tx_info);
+ tx_info->xdpf = NULL;
+error_drop_packet:
+
+ return NETDEV_TX_OK;
+}
+
+static int ena_xdp_execute(struct ena_ring *rx_ring,
+ struct xdp_buff *xdp,
+ struct ena_rx_buffer *rx_info)
+{
+ struct bpf_prog *xdp_prog;
+ u32 verdict = XDP_PASS;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
+
+ if (!xdp_prog)
+ goto out;
+
+ verdict = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (verdict == XDP_TX)
+ ena_xdp_xmit_buff(rx_ring->netdev,
+ xdp,
+ rx_ring->qid + rx_ring->adapter->num_io_queues,
+ rx_info);
+ else if (unlikely(verdict == XDP_ABORTED))
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ else if (unlikely(verdict > XDP_TX))
+ bpf_warn_invalid_xdp_action(verdict);
+out:
+ rcu_read_unlock();
+ return verdict;
+}
+
+static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+{
+ adapter->xdp_first_ring = adapter->num_io_queues;
+ adapter->xdp_num_queues = adapter->num_io_queues;
+
+ ena_init_io_rings(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+}
+
+static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+{
+ int rc = 0;
+
+ rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+ if (rc)
+ goto setup_err;
+
+ rc = ena_create_io_tx_queues_in_range(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+ if (rc)
+ goto create_err;
+
+ return 0;
+
+create_err:
+ ena_free_all_io_tx_resources(adapter);
+setup_err:
+ return rc;
+}
+
+/* Provides a way for both kernel and bpf-prog to know
+ * more about the RX-queue a given XDP frame arrived on.
+ */
+static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
+{
+ int rc;
+
+ rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ goto err;
+ }
+
+ rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+err:
+ return rc;
+}
+
+static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
+{
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+}
+
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first,
+ int count)
+{
+ struct ena_ring *rx_ring;
+ int i = 0;
+
+ for (i = first; i < count; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ xchg(&rx_ring->xdp_bpf_prog, prog);
+ if (prog) {
+ ena_xdp_register_rxq_info(rx_ring);
+ rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ ena_xdp_unregister_rxq_info(rx_ring);
+ rx_ring->rx_headroom = 0;
+ }
+ }
+}
+
+void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
+
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ prog,
+ 0,
+ adapter->num_io_queues);
+
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+}
+
+static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
+{
+ bool was_up;
+ int rc;
+
+ was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ if (was_up)
+ ena_down(adapter);
+
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+ ena_xdp_exchange_program(adapter, NULL);
+ if (was_up) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ int rc, prev_mtu;
+ bool is_up;
+
+ is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ rc = ena_xdp_allowed(adapter);
+ if (rc == ENA_XDP_ALLOWED) {
+ old_bpf_prog = adapter->xdp_bpf_prog;
+ if (prog) {
+ if (!is_up) {
+ ena_init_all_xdp_queues(adapter);
+ } else if (!old_bpf_prog) {
+ ena_down(adapter);
+ ena_init_all_xdp_queues(adapter);
+ }
+ ena_xdp_exchange_program(adapter, prog);
+
+ if (is_up && !old_bpf_prog) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ } else if (old_bpf_prog) {
+ rc = ena_destroy_and_free_all_xdp_queues(adapter);
+ if (rc)
+ return rc;
+ }
+
+ prev_mtu = netdev->max_mtu;
+ netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
+
+ if (!old_bpf_prog)
+ netif_info(adapter, drv, adapter->netdev,
+ "xdp program set, changing the max_mtu from %d to %d",
+ prev_mtu, netdev->max_mtu);
+
+ } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
+ netdev->mtu, ENA_XDP_MAX_MTU);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
+ return -EINVAL;
+ } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
+ adapter->num_io_queues, adapter->max_num_io_queues);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
+ * program as well as to query the current xdp program id.
+ */
+static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ena_xdp_set(netdev, bpf);
+ case XDP_QUERY_PROG:
+ bpf->prog_id = adapter->xdp_bpf_prog ?
+ adapter->xdp_bpf_prog->aux->id : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
{
#ifdef CONFIG_RFS_ACCEL
@@ -164,7 +636,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
u64_stats_init(&ring->syncp);
}
-static void ena_init_io_rings(struct ena_adapter *adapter)
+static void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev;
struct ena_ring *txr, *rxr;
@@ -172,13 +645,12 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_dev = adapter->ena_dev;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
- /* TX/RX common ring state */
+ /* TX common ring state */
ena_init_io_rings_common(adapter, txr, i);
- ena_init_io_rings_common(adapter, rxr, i);
/* TX specific ring state */
txr->ring_size = adapter->requested_tx_ring_size;
@@ -188,14 +660,20 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
txr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
- /* RX specific ring state */
- rxr->ring_size = adapter->requested_rx_ring_size;
- rxr->rx_copybreak = adapter->rx_copybreak;
- rxr->sgl_size = adapter->max_rx_sgl_size;
- rxr->smoothed_interval =
- ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
- rxr->empty_rx_queue = 0;
- adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ /* Don't init RX queues for xdp queues */
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* RX common ring state */
+ ena_init_io_rings_common(adapter, rxr, i);
+
+ /* RX specific ring state */
+ rxr->ring_size = adapter->requested_rx_ring_size;
+ rxr->rx_copybreak = adapter->rx_copybreak;
+ rxr->sgl_size = adapter->max_rx_sgl_size;
+ rxr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+ rxr->empty_rx_queue = 0;
+ adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
}
}
@@ -285,16 +763,13 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->push_buf_intermediate_buf = NULL;
}
-/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
- * @adapter: private structure
- *
- * Return 0 on success, negative on failure
- */
-static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
+static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
rc = ena_setup_tx_resources(adapter, i);
if (rc)
goto err_setup_tx;
@@ -308,11 +783,20 @@ err_setup_tx:
"Tx queue %d: allocation failed\n", i);
/* rewind the index freeing the rings as we go */
- while (i--)
+ while (first_index < i--)
ena_free_tx_resources(adapter, i);
return rc;
}
+static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
+{
+ int i;
+
+ for (i = first_index; i < first_index + count; i++)
+ ena_free_tx_resources(adapter, i);
+}
+
/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
* @adapter: board private structure
*
@@ -320,10 +804,10 @@ err_setup_tx:
*/
static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
- int i;
-
- for (i = 0; i < adapter->num_io_queues; i++)
- ena_free_tx_resources(adapter, i);
+ ena_free_all_io_tx_resources_in_range(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
}
static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
@@ -495,8 +979,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
rx_info->page = page;
rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
- ena_buf->paddr = dma;
- ena_buf->len = ENA_PAGE_SIZE;
+ ena_buf->paddr = dma + rx_ring->rx_headroom;
+ ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
return 0;
}
@@ -513,7 +997,9 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
return;
}
- dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
+ dma_unmap_page(rx_ring->dev,
+ ena_buf->paddr - rx_ring->rx_headroom,
+ ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(page);
@@ -620,8 +1106,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
-static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info)
+static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
{
struct ena_com_buf *ena_buf;
u32 cnt;
@@ -675,7 +1161,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
tx_ring->qid, i);
}
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
dev_kfree_skb_any(tx_info->skb);
}
@@ -688,7 +1174,7 @@ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
tx_ring = &adapter->tx_ring[i];
ena_free_tx_bufs(tx_ring);
}
@@ -699,7 +1185,7 @@ static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
@@ -723,6 +1209,32 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
ena_destroy_all_rx_queues(adapter);
}
+static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp)
+{
+ if (tx_info)
+ netif_err(ring->adapter,
+ tx_done,
+ ring->netdev,
+ "tx_info doesn't have valid %s",
+ is_xdp ? "xdp frame" : "skb");
+ else
+ netif_err(ring->adapter,
+ tx_done,
+ ring->netdev,
+ "Invalid req_id: %hu\n",
+ req_id);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.bad_req_id++;
+ u64_stats_update_end(&ring->syncp);
+
+ /* Trigger device reset */
+ ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
+ return -EFAULT;
+}
+
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{
struct ena_tx_buffer *tx_info = NULL;
@@ -733,21 +1245,20 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
return 0;
}
- if (tx_info)
- netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
- "tx_info doesn't have valid skb\n");
- else
- netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
- "Invalid req_id: %hu\n", req_id);
+ return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
+}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.bad_req_id++;
- u64_stats_update_end(&tx_ring->syncp);
+static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
- /* Trigger device reset */
- tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
- set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
- return -EFAULT;
+ if (likely(req_id < xdp_ring->ring_size)) {
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
+ }
+
+ return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
}
static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
@@ -786,7 +1297,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_info->skb = NULL;
tx_info->last_jiffies = 0;
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
@@ -1037,6 +1548,33 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
+int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+{
+ struct ena_rx_buffer *rx_info;
+ int ret;
+
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ xdp->data = page_address(rx_info->page) +
+ rx_info->page_offset + rx_ring->rx_headroom;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_hard_start = page_address(rx_info->page);
+ xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+ /* If for some reason we received a bigger packet than
+ * we expect, then we simply drop it
+ */
+ if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
+ return XDP_DROP;
+
+ ret = ena_xdp_execute(rx_ring, xdp, rx_info);
+
+ /* The xdp program might expand the headers */
+ if (ret == XDP_PASS) {
+ rx_info->page_offset = xdp->data - xdp->data_hard_start;
+ rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
+ }
+
+ return ret;
+}
/* ena_clean_rx_irq - Cleanup RX irq
* @rx_ring: RX ring to clean
* @napi: napi handler
@@ -1048,23 +1586,27 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
u32 budget)
{
u16 next_to_clean = rx_ring->next_to_clean;
- u32 res_budget, work_done;
-
struct ena_com_rx_ctx ena_rx_ctx;
struct ena_adapter *adapter;
+ u32 res_budget, work_done;
+ int rx_copybreak_pkt = 0;
+ int refill_threshold;
struct sk_buff *skb;
int refill_required;
- int refill_threshold;
- int rc = 0;
+ struct xdp_buff xdp;
int total_len = 0;
- int rx_copybreak_pkt = 0;
+ int xdp_verdict;
+ int rc = 0;
int i;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
+ xdp.rxq = &rx_ring->xdp_rxq;
do {
+ xdp_verdict = XDP_PASS;
+ skb = NULL;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
ena_rx_ctx.descs = 0;
@@ -1082,12 +1624,22 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+ if (ena_xdp_present_ring(rx_ring))
+ xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
+
/* allocate skb and fill it */
- skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
- &next_to_clean);
+ if (xdp_verdict == XDP_PASS)
+ skb = ena_rx_skb(rx_ring,
+ rx_ring->ena_bufs,
+ ena_rx_ctx.descs,
+ &next_to_clean);
- /* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) {
+ if (xdp_verdict == XDP_TX) {
+ ena_free_rx_page(rx_ring,
+ &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
+ res_budget--;
+ }
for (i = 0; i < ena_rx_ctx.descs; i++) {
rx_ring->free_ids[next_to_clean] =
rx_ring->ena_bufs[i].req_id;
@@ -1095,6 +1647,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size);
}
+ if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP)
+ continue;
break;
}
@@ -1188,9 +1742,14 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
struct ena_eth_io_intr_reg intr_reg;
- u32 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
- rx_ring->smoothed_interval :
- ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
+ u32 rx_interval = 0;
+ /* Rx ring can be NULL when for XDP tx queues which don't have an
+ * accompanying rx_ring pair.
+ */
+ if (rx_ring)
+ rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
+ rx_ring->smoothed_interval :
+ ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
/* Update intr register: rx intr delay,
* tx intr delay and interrupt unmask
@@ -1203,8 +1762,9 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
+ * The Tx ring is used because the rx_ring is NULL for XDP queues
*/
- ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+ ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
}
static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
@@ -1222,22 +1782,82 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
- ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
+ if (rx_ring)
+ ena_com_update_numa_node(rx_ring->ena_com_io_cq,
+ numa_node);
}
tx_ring->cpu = cpu;
- rx_ring->cpu = cpu;
+ if (rx_ring)
+ rx_ring->cpu = cpu;
return;
out:
put_cpu();
}
+static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
+{
+ u32 total_done = 0;
+ u16 next_to_clean;
+ u32 tx_bytes = 0;
+ int tx_pkts = 0;
+ u16 req_id;
+ int rc;
+
+ if (unlikely(!xdp_ring))
+ return 0;
+ next_to_clean = xdp_ring->next_to_clean;
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct xdp_frame *xdpf;
+
+ rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
+ &req_id);
+ if (rc)
+ break;
+
+ rc = validate_xdp_req_id(xdp_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ xdpf = tx_info->xdpf;
+
+ tx_info->xdpf = NULL;
+ tx_info->last_jiffies = 0;
+ ena_unmap_tx_buff(xdp_ring, tx_info);
+
+ netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+ "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
+ xdpf);
+
+ tx_bytes += xdpf->len;
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+
+ __free_page(tx_info->xdp_rx_page);
+ xdp_ring->free_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ xdp_ring->ring_size);
+ }
+
+ xdp_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
+ ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
+
+ netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ xdp_ring->qid, tx_pkts);
+
+ return tx_pkts;
+}
+
static int ena_io_poll(struct napi_struct *napi, int budget)
{
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
struct ena_ring *tx_ring, *rx_ring;
-
int tx_work_done;
int rx_work_done = 0;
int tx_budget;
@@ -1247,6 +1867,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_ring = ena_napi->tx_ring;
rx_ring = ena_napi->rx_ring;
+ tx_ring->first_interrupt = ena_napi->first_interrupt;
+ rx_ring->first_interrupt = ena_napi->first_interrupt;
+
tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
@@ -1322,8 +1945,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
- ena_napi->tx_ring->first_interrupt = true;
- ena_napi->rx_ring->first_interrupt = true;
+ ena_napi->first_interrupt = true;
napi_schedule_irqoff(&ena_napi->napi);
@@ -1398,10 +2020,12 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
{
struct net_device *netdev;
int irq_idx, i, cpu;
+ int io_queue_count;
netdev = adapter->netdev;
+ io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < io_queue_count; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
cpu = i % num_online_cpus();
@@ -1529,45 +2153,64 @@ static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
synchronize_irq(adapter->irq_tbl[i].vector);
}
-static void ena_del_napi(struct ena_adapter *adapter)
+static void ena_del_napi_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
- netif_napi_del(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ /* Check if napi was initialized before */
+ if (!ENA_IS_XDP_INDEX(adapter, i) ||
+ adapter->ena_napi[i].xdp_ring)
+ netif_napi_del(&adapter->ena_napi[i].napi);
+ else
+ WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].xdp_ring);
+ }
}
-static void ena_init_napi(struct ena_adapter *adapter)
+static void ena_init_napi_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
- struct ena_napi *napi;
+ struct ena_napi *napi = {0};
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
napi = &adapter->ena_napi[i];
netif_napi_add(adapter->netdev,
&adapter->ena_napi[i].napi,
- ena_io_poll,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
ENA_NAPI_BUDGET);
- napi->rx_ring = &adapter->rx_ring[i];
- napi->tx_ring = &adapter->tx_ring[i];
+
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ napi->rx_ring = &adapter->rx_ring[i];
+ napi->tx_ring = &adapter->tx_ring[i];
+ } else {
+ napi->xdp_ring = &adapter->tx_ring[i];
+ }
napi->qid = i;
}
}
-static void ena_napi_disable_all(struct ena_adapter *adapter)
+static void ena_napi_disable_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = first_index; i < first_index + count; i++)
napi_disable(&adapter->ena_napi[i].napi);
}
-static void ena_napi_enable_all(struct ena_adapter *adapter)
+static void ena_napi_enable_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = first_index; i < first_index + count; i++)
napi_enable(&adapter->ena_napi[i].napi);
}
@@ -1582,7 +2225,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -1620,7 +2263,9 @@ static int ena_up_complete(struct ena_adapter *adapter)
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
- ena_napi_enable_all(adapter);
+ ena_napi_enable_in_range(adapter,
+ 0,
+ adapter->xdp_num_queues + adapter->num_io_queues);
return 0;
}
@@ -1653,7 +2298,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
if (rc) {
netif_err(adapter, ifup, adapter->netdev,
"Failed to create I/O TX queue num %d rc: %d\n",
- qid, rc);
+ qid, rc);
return rc;
}
@@ -1672,12 +2317,13 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
return rc;
}
-static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
+static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
rc = ena_create_io_tx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1686,7 +2332,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
return 0;
create_err:
- while (i--)
+ while (i-- > first_index)
ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
return rc;
@@ -1731,13 +2377,15 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
netif_err(adapter, ifup, adapter->netdev,
"Failed to get RX queue handlers. RX queue num %d rc: %d\n",
qid, rc);
- ena_com_destroy_io_queue(ena_dev, ena_qid);
- return rc;
+ goto err;
}
ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
return rc;
+err:
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
@@ -1764,7 +2412,8 @@ create_err:
}
static void set_io_rings_size(struct ena_adapter *adapter,
- int new_tx_size, int new_rx_size)
+ int new_tx_size,
+ int new_rx_size)
{
int i;
@@ -1798,14 +2447,24 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
* ones due to past queue allocation failures.
*/
set_io_rings_size(adapter, adapter->requested_tx_ring_size,
- adapter->requested_rx_ring_size);
+ adapter->requested_rx_ring_size);
while (1) {
- rc = ena_setup_all_tx_resources(adapter);
+ if (ena_xdp_present(adapter)) {
+ rc = ena_setup_and_create_all_xdp_queues(adapter);
+
+ if (rc)
+ goto err_setup_tx;
+ }
+ rc = ena_setup_tx_resources_in_range(adapter,
+ 0,
+ adapter->num_io_queues);
if (rc)
goto err_setup_tx;
- rc = ena_create_all_io_tx_queues(adapter);
+ rc = ena_create_io_tx_queues_in_range(adapter,
+ 0,
+ adapter->num_io_queues);
if (rc)
goto err_create_tx_queues;
@@ -1829,7 +2488,7 @@ err_setup_tx:
if (rc != -ENOMEM) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with error code %d\n",
- rc);
+ rc);
return rc;
}
@@ -1852,7 +2511,7 @@ err_setup_tx:
new_rx_ring_size = cur_rx_ring_size / 2;
if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
- new_rx_ring_size < ENA_MIN_RING_SIZE) {
+ new_rx_ring_size < ENA_MIN_RING_SIZE) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
ENA_MIN_RING_SIZE);
@@ -1871,10 +2530,11 @@ err_setup_tx:
static int ena_up(struct ena_adapter *adapter)
{
- int rc, i;
+ int io_queue_count, rc, i;
netdev_dbg(adapter->netdev, "%s\n", __func__);
+ io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
ena_setup_io_intr(adapter);
/* napi poll functions should be initialized before running
@@ -1882,7 +2542,7 @@ static int ena_up(struct ena_adapter *adapter)
* interrupt, causing the ISR to fire immediately while the poll
* function wasn't set yet, causing a null dereference
*/
- ena_init_napi(adapter);
+ ena_init_napi_in_range(adapter, 0, io_queue_count);
rc = ena_request_io_irq(adapter);
if (rc)
@@ -1913,7 +2573,7 @@ static int ena_up(struct ena_adapter *adapter)
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = 0; i < io_queue_count; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return rc;
@@ -1926,13 +2586,15 @@ err_up:
err_create_queues_with_backoff:
ena_free_io_irq(adapter);
err_req_irq:
- ena_del_napi(adapter);
+ ena_del_napi_in_range(adapter, 0, io_queue_count);
return rc;
}
static void ena_down(struct ena_adapter *adapter)
{
+ int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
+
netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -1945,7 +2607,7 @@ static void ena_down(struct ena_adapter *adapter)
netif_tx_disable(adapter->netdev);
/* After this point the napi handler won't enable the tx queue */
- ena_napi_disable_all(adapter);
+ ena_napi_disable_in_range(adapter, 0, io_queue_count);
/* After destroy the queue there won't be any new interrupts */
@@ -1963,7 +2625,7 @@ static void ena_down(struct ena_adapter *adapter)
ena_disable_io_intr_sync(adapter);
ena_free_io_irq(adapter);
- ena_del_napi(adapter);
+ ena_del_napi_in_range(adapter, 0, io_queue_count);
ena_free_all_tx_bufs(adapter);
ena_free_all_rx_bufs(adapter);
@@ -2053,23 +2715,47 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
ena_close(adapter->netdev);
adapter->requested_tx_ring_size = new_tx_size;
adapter->requested_rx_ring_size = new_rx_size;
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
return dev_was_up ? ena_up(adapter) : 0;
}
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int prev_channel_count;
bool dev_was_up;
dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
+ prev_channel_count = adapter->num_io_queues;
adapter->num_io_queues = new_channel_count;
+ if (ena_xdp_present(adapter) &&
+ ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
+ adapter->xdp_first_ring = new_channel_count;
+ adapter->xdp_num_queues = new_channel_count;
+ if (prev_channel_count > new_channel_count)
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ NULL,
+ new_channel_count,
+ prev_channel_count);
+ else
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ adapter->xdp_bpf_prog,
+ prev_channel_count,
+ new_channel_count);
+ }
+
/* We need to destroy the rss table so that the indirection
* table will be reinitialized by ena_up()
*/
ena_com_rss_destroy(ena_dev);
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
return dev_was_up ? ena_open(adapter->netdev) : 0;
}
@@ -2253,7 +2939,7 @@ error_report_dma_error:
tx_info->skb = NULL;
tx_info->num_of_bufs += i;
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
return -EINVAL;
}
@@ -2268,7 +2954,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
void *push_hdr;
u16 next_to_use, req_id, header_len;
- int qid, rc, nb_hw_desc;
+ int qid, rc;
netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
/* Determine which tx ring we will be placed on */
@@ -2303,50 +2989,17 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb);
- if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
- netif_dbg(adapter, tx_queued, dev,
- "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
- qid);
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- }
-
- /* prepare the packet's descriptors to dma engine */
- rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
- &nb_hw_desc);
-
- /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
- * since the number of free descriptors in the queue is checked
- * after sending the previous packet. In case there isn't enough
- * space in the queue for the next packet, it is stopped
- * until there is again enough available space in the queue.
- * All other failure reasons of ena_com_prepare_tx() are fatal
- * and therefore require a device reset.
- */
- if (unlikely(rc)) {
- netif_err(adapter, tx_queued, dev,
- "failed to prepare tx bufs\n");
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.prepare_ctx_err++;
- u64_stats_update_end(&tx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ rc = ena_xmit_common(dev,
+ tx_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ skb->len);
+ if (rc)
goto error_unmap_dma;
- }
netdev_tx_sent_queue(txq, skb->len);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.cnt++;
- tx_ring->tx_stats.bytes += skb->len;
- u64_stats_update_end(&tx_ring->syncp);
-
- tx_info->tx_descs = nb_hw_desc;
- tx_info->last_jiffies = jiffies;
- tx_info->print_once = 0;
-
- tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
- tx_ring->ring_size);
-
/* stop the queue when no more space available, the packet can have up
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
@@ -2393,7 +3046,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
error_unmap_dma:
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
tx_info->skb = NULL;
error_drop_packet:
@@ -2572,6 +3225,7 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_change_mtu = ena_change_mtu,
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_bpf = ena_xdp,
};
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2951,7 +3605,9 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
int i, budget, rc;
+ int io_queue_count;
+ io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -2966,7 +3622,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
budget = ENA_MONITORED_TX_QUEUES;
- for (i = adapter->last_monitored_tx_qid; i < adapter->num_io_queues; i++) {
+ for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
@@ -2974,7 +3630,8 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (unlikely(rc))
return;
- rc = check_for_rx_interrupt_queue(adapter, rx_ring);
+ rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
@@ -2983,7 +3640,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
break;
}
- adapter->last_monitored_tx_qid = i % adapter->num_io_queues;
+ adapter->last_monitored_tx_qid = i % io_queue_count;
}
/* trigger napi schedule after 2 consecutive detections */
@@ -3560,6 +4217,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+
adapter->last_monitored_tx_qid = 0;
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
@@ -3573,7 +4233,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Failed to query interrupt moderation feature\n");
goto err_netdev_destroy;
}
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
netdev->netdev_ops = &ena_netdev_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index bffd778f2ce3..094324fd0edc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -36,6 +36,7 @@
#include <linux/bitops.h>
#include <linux/dim.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/inetdevice.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -142,6 +143,18 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+/* The max MTU size is configured to be the ethernet frame size without
+ * the overhead of the ethernet header, which can have a VLAN header, and
+ * a frame check sequence (FCS).
+ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
+ */
+
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM)
+
+#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
+ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -155,6 +168,8 @@ struct ena_napi {
struct napi_struct napi ____cacheline_aligned;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
+ struct ena_ring *xdp_ring;
+ bool first_interrupt;
u32 qid;
struct dim dim;
};
@@ -180,6 +195,17 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
+ /* XDP buffer structure which is used for sending packets in
+ * the xdp queues
+ */
+ struct xdp_frame *xdpf;
+ /* The rx page for the rx buffer that was received in rx and
+ * re transmitted on xdp tx queues as a result of XDP_TX action.
+ * We need to free the page once we finished cleaning the buffer in
+ * clean_xdp_irq()
+ */
+ struct page *xdp_rx_page;
+
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
@@ -258,10 +284,13 @@ struct ena_ring {
struct ena_adapter *adapter;
struct ena_com_io_cq *ena_com_io_cq;
struct ena_com_io_sq *ena_com_io_sq;
+ struct bpf_prog *xdp_bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
u16 next_to_use;
u16 next_to_clean;
u16 rx_copybreak;
+ u16 rx_headroom;
u16 qid;
u16 mtu;
u16 sgl_size;
@@ -379,6 +408,10 @@ struct ena_adapter {
u32 last_monitored_tx_qid;
enum ena_regs_reset_reason_types reset_reason;
+
+ struct bpf_prog *xdp_bpf_prog;
+ u32 xdp_first_ring;
+ u32 xdp_num_queues;
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -390,8 +423,48 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
+
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
+enum ena_xdp_errors_t {
+ ENA_XDP_ALLOWED = 0,
+ ENA_XDP_CURRENT_MTU_TOO_LARGE,
+ ENA_XDP_NO_ENOUGH_QUEUES,
+};
+
+static inline bool ena_xdp_queues_present(struct ena_adapter *adapter)
+{
+ return adapter->xdp_first_ring != 0;
+}
+
+static inline bool ena_xdp_present(struct ena_adapter *adapter)
+{
+ return !!adapter->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+{
+ return !!ring->xdp_bpf_prog;
+}
+
+static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
+{
+ return 2 * queues <= adapter->max_num_io_queues;
+}
+
+static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+{
+ enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
+
+ if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
+ rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
+ else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ rc = ENA_XDP_NO_ENOUGH_QUEUES;
+
+ return rc;
+}
+
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index ab30761003da..cf3562e82ca9 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -527,7 +527,7 @@ int lance_close(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(lance_close);
-void lance_tx_timeout(struct net_device *dev)
+void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 741cdc392c6b..8266b3c1fefc 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -243,7 +243,7 @@ int lance_open(struct net_device *dev);
int lance_close(struct net_device *dev);
int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
void lance_set_multicast(struct net_device *dev);
-void lance_tx_timeout(struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
void lance_poll(struct net_device *dev);
#endif
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 212fe72a190b..2f808dbc8b0e 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -118,10 +118,6 @@ struct lance_private {
int auto_select; /* cable-selection by carrier */
unsigned short busmaster_regval;
-#ifdef CONFIG_SUNLANCE
- struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
- int burst_sizes; /* ledma SBus burst sizes */
-#endif
struct timer_list multicast_timer;
struct net_device *dev;
};
@@ -522,7 +518,7 @@ static inline int lance_reset(struct net_device *dev)
return status;
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
@@ -551,11 +547,10 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
if (!lance_tx_buffs_avail(lp))
goto out_free;
-#ifdef DEBUG
/* dump the packet */
- print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
- 16, 1, skb->data, 64, true);
-#endif
+ print_hex_dump_debug("skb->data: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
+ 64, true);
+
entry = lp->tx_new & lp->tx_ring_mod_mask;
ib->btx_ring[entry].length = (-skblen) | 0xf000;
ib->btx_ring[entry].misc = 0;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 0842da492a64..1c53408f5d47 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -422,7 +422,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
spin_unlock_irqrestore(&priv->chip_lock, flags);
}
-static void am79c961_timeout(struct net_device *dev)
+static void am79c961_timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
dev->name);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 573e88fc8ede..0f3b743425e8 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1569,7 +1569,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
* failed or the interface is locked up. This function will reinitialize
* the hardware.
*/
-static void amd8111e_tx_timeout(struct net_device *dev)
+static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct amd8111e_priv *lp = netdev_priv(dev);
int err;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 4b6a5cb85dd2..5e0f645f5bde 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -530,7 +530,7 @@ static inline void ariadne_reset(struct net_device *dev)
netif_start_queue(dev);
}
-static void ariadne_tx_timeout(struct net_device *dev)
+static void ariadne_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index d3d44e07afbc..4e36122609a3 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -346,7 +346,7 @@ static int lance_rx( struct net_device *dev );
static int lance_close( struct net_device *dev );
static void set_multicast_list( struct net_device *dev );
static int lance_set_mac_address( struct net_device *dev, void *addr );
-static void lance_tx_timeout (struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
/************************* End of Prototypes **************************/
@@ -727,7 +727,7 @@ static void lance_init_ring( struct net_device *dev )
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
-static void lance_tx_timeout (struct net_device *dev)
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
struct lance_ioreg *IO = lp->iobase;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 1793950f0582..089a4fbc61a0 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1014,7 +1014,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
* The Tx ring has been full longer than the watchdog timeout
* value. The transmitter must be hung?
*/
-static void au1000_tx_timeout(struct net_device *dev)
+static void au1000_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
au1000_reset_mac(dev);
@@ -1053,23 +1053,12 @@ static void au1000_multicast_list(struct net_device *dev)
writel(reg, &aup->mac->control);
}
-static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -EINVAL; /* PHY not controllable */
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static const struct net_device_ops au1000_netdev_ops = {
.ndo_open = au1000_open,
.ndo_stop = au1000_close,
.ndo_start_xmit = au1000_tx,
.ndo_set_rx_mode = au1000_multicast_list,
- .ndo_do_ioctl = au1000_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = au1000_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1161,7 +1150,7 @@ static int au1000_probe(struct platform_device *pdev)
/* aup->mac is the base address of the MAC's registers */
aup->mac = (struct mac_reg *)
- ioremap_nocache(base->start, resource_size(base));
+ ioremap(base->start, resource_size(base));
if (!aup->mac) {
dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
@@ -1169,7 +1158,7 @@ static int au1000_probe(struct platform_device *pdev)
}
/* Setup some variables for quick register address access */
- aup->enable = (u32 *)ioremap_nocache(macen->start,
+ aup->enable = (u32 *)ioremap(macen->start,
resource_size(macen));
if (!aup->enable) {
dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
@@ -1178,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mac_id = pdev->id;
- aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
+ aup->macdma = ioremap(macdma->start, resource_size(macdma));
if (!aup->macdma) {
dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
err = -ENXIO;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index dac4a2fcad6a..7282ce55ffb8 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -608,7 +608,7 @@ static int lance_rx(struct net_device *dev)
len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == 0) {
+ if (!skb) {
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
@@ -884,7 +884,7 @@ static inline int lance_reset(struct net_device *dev)
return status;
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index f90b454b1642..aff44241988c 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -306,7 +306,7 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id);
static int lance_close(struct net_device *dev);
static struct net_device_stats *lance_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void lance_tx_timeout (struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
@@ -913,7 +913,7 @@ lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
}
-static void lance_tx_timeout (struct net_device *dev)
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = (struct lance_private *) dev->ml_priv;
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index c6c2a54c1121..c38edf6f03a3 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -254,7 +254,7 @@ static int ni65_lance_reinit(struct net_device *dev);
static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
struct net_device *dev);
-static void ni65_timeout(struct net_device *dev);
+static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
static int ni65_close(struct net_device *dev);
static int ni65_alloc_buffer(struct net_device *dev);
static void ni65_free_buffer(struct priv *p);
@@ -1133,7 +1133,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
* kick xmitter ..
*/
-static void ni65_timeout(struct net_device *dev)
+static void ni65_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct priv *p = dev->ml_priv;
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 9c152d85840d..023aecf6ab30 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -407,7 +407,7 @@ static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-static void mace_tx_timeout(struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static struct net_device_stats *mace_get_stats(struct net_device *dev);
static int mace_rx(struct net_device *dev, unsigned char RxCnt);
@@ -837,7 +837,7 @@ mace_start_xmit
failed, put skb back into a list."
---------------------------------------------------------------------------- */
-static void mace_tx_timeout(struct net_device *dev)
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5ad12c10934..dc7d88227e76 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -314,7 +314,7 @@ static int pcnet32_open(struct net_device *);
static int pcnet32_init_ring(struct net_device *);
static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
struct net_device *);
-static void pcnet32_tx_timeout(struct net_device *dev);
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t pcnet32_interrupt(int, void *);
static int pcnet32_close(struct net_device *);
static struct net_device_stats *pcnet32_get_stats(struct net_device *);
@@ -2455,7 +2455,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
lp->a->write_csr(ioaddr, CSR0, csr0_bits);
}
-static void pcnet32_tx_timeout(struct net_device *dev)
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr, flags;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ebcbf8ca4829..b00e00881253 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1097,7 +1097,7 @@ static void lance_piozero(void __iomem *dest, int len)
sbus_writeb(0, piobuf);
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 98f8f2033154..b71f9b04a51e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2152,7 +2152,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
-static void xgbe_tx_timeout(struct net_device *netdev)
+static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 128cd648ba99..46c3c1ca38d6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1227,7 +1227,7 @@ static bool xgbe_phy_sfp_verify_eeprom(u8 cc_in, u8 *buf, unsigned int len)
for (cc = 0; len; buf++, len--)
cc += *buf;
- return (cc == cc_in) ? true : false;
+ return cc == cc_in;
}
static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 02b4f3af02b5..c48f60996761 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -575,7 +575,7 @@ static void xge_free_pending_skb(struct net_device *ndev)
}
}
-static void xge_timeout(struct net_device *ndev)
+static void xge_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct xge_pdata *pdata = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d8612131c55e..6aee2f0fc0db 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -859,7 +859,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget)
return processed;
}
-static void xgene_enet_timeout(struct net_device *ndev)
+static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct netdev_queue *txq;
@@ -2020,7 +2020,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
int ret;
ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
+ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 8d03578d5e8c..95d3061c61be 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -91,7 +91,7 @@ static int mace_set_address(struct net_device *dev, void *addr);
static void mace_reset(struct net_device *dev);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_dma_intr(int irq, void *dev_id);
-static void mace_tx_timeout(struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void __mace_set_address(struct net_device *dev, void *addr);
/*
@@ -600,7 +600,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mace_tx_timeout(struct net_device *dev)
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 2bb329606794..6b27af0db499 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -253,7 +253,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_free_aq_hw;
}
- self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
+ self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
goto err_free_aq_hw;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 6f2c867785fe..17bda4e8cc45 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -781,18 +781,6 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
return 0;
}
-static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-
/**
* arc_emac_restart - Restart EMAC
* @ndev: Pointer to net_device structure.
@@ -857,7 +845,7 @@ static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_set_mac_address = arc_emac_set_address,
.ndo_get_stats = arc_emac_stats,
.ndo_set_rx_mode = arc_emac_set_rx_mode,
- .ndo_do_ioctl = arc_emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = arc_emac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 61a334d1b5e6..e95687a780fb 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1394,14 +1394,6 @@ err_drop:
return NETDEV_TX_OK;
}
-static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- if (!ndev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(ndev->phydev, ifr, cmd);
-}
-
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
struct ag71xx *ag = from_timer(ag, t, oom_timer);
@@ -1409,7 +1401,7 @@ static void ag71xx_oom_timer_handler(struct timer_list *t)
napi_schedule(&ag->napi);
}
-static void ag71xx_tx_timeout(struct net_device *ndev)
+static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ag71xx *ag = netdev_priv(ndev);
@@ -1618,7 +1610,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_do_ioctl = ag71xx_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1687,8 +1679,7 @@ static int ag71xx_probe(struct platform_device *pdev)
goto err_free;
}
- ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
+ ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ag->mac_base) {
err = -ENOMEM;
goto err_free;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index d4bbcdfd691a..1dcbc486eca9 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1553,7 +1553,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
}
-static void alx_tx_timeout(struct net_device *dev)
+static void alx_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct alx_priv *alx = netdev_priv(dev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2b239ecea05f..4c0b1f8551dd 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -350,7 +350,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl1c_tx_timeout(struct net_device *netdev)
+static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 4f7b65825c15..e0d89942d537 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -251,7 +251,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl1e_tx_timeout(struct net_device *netdev)
+static void atl1e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 3aba38322717..b81a4e0c5b57 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1001,7 +1001,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl2_tx_timeout(struct net_device *netdev)
+static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 505a22c703f7..0941d07d0833 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -183,7 +183,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
* atlx_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atlx_tx_timeout(struct net_device *netdev)
+static void atlx_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 30b455013bf3..bc273e0db7ff 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1005,18 +1005,13 @@ static int nb8800_stop(struct net_device *dev)
return 0;
}
-static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static const struct net_device_ops nb8800_netdev_ops = {
.ndo_open = nb8800_open,
.ndo_stop = nb8800_stop,
.ndo_start_xmit = nb8800_xmit,
.ndo_set_mac_address = nb8800_set_mac_address,
.ndo_set_rx_mode = nb8800_set_rx_mode,
- .ndo_do_ioctl = nb8800_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index ec25fd81985d..a780b7215021 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -948,7 +948,7 @@ irq_ack:
return IRQ_RETVAL(handled);
}
-static void b44_tx_timeout(struct net_device *dev)
+static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct b44 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 825af709708e..f07ac0e0af59 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1354,7 +1354,7 @@ out:
return ret;
}
-static void bcm_sysport_tx_timeout(struct net_device *dev)
+static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
netdev_warn(dev, "transmit timeout!\n");
@@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
ring->switch_queue = qp;
ring->switch_port = port;
ring->inspect = true;
- priv->ring_map[q + port * num_tx_queues] = ring;
+ priv->ring_map[qp + port * num_tx_queues] = ring;
qp++;
}
@@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
struct net_device *slave_dev;
unsigned int num_tx_queues;
struct net_device *dev;
- unsigned int q, port;
+ unsigned int q, qp, port;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
if (priv->netdev != info->master)
@@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
continue;
ring->inspect = false;
- priv->ring_map[q + port * num_tx_queues] = NULL;
+ qp = ring->switch_queue;
+ priv->ring_map[qp + port * num_tx_queues] = NULL;
}
return 0;
@@ -2427,6 +2428,14 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (!of_id || !of_id->data)
return -EINVAL;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
/* Fairly quickly we need to know the type of adapter we have */
params = of_id->data;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 148734b166f0..1bb07a5d82c9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1248,14 +1248,6 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
return 0;
}
-static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(net_dev))
- return -EINVAL;
-
- return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
-}
-
static const struct net_device_ops bgmac_netdev_ops = {
.ndo_open = bgmac_open,
.ndo_stop = bgmac_stop,
@@ -1263,7 +1255,7 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_set_rx_mode = bgmac_set_rx_mode,
.ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = bgmac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
};
/**************************************************
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index fbc196b480b6..dbb7874607ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6575,7 +6575,7 @@ bnx2_dump_state(struct bnx2 *bp)
}
static void
-bnx2_tx_timeout(struct net_device *dev)
+bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnx2 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5e037a305b83..ee9e9290f112 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4970,7 +4970,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
-void bnx2x_tx_timeout(struct net_device *dev)
+void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3f63ffd7561b..6f1352d51cb2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -617,7 +617,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
*
* @dev: net device
*/
-void bnx2x_tx_timeout(struct net_device *dev);
+void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue);
/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
* c2s_map should have BNX2X_MAX_PRIORITY entries.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 066765fbef06..0a59a09ef82f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -296,7 +296,6 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
* possible, the driver should only write the valid vnics into the internal
* ram according to the appropriate port mode.
*/
-#define BITS_TO_BYTES(x) ((x)/8)
/* CMNG constants, as derived from system spec calculations */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index cff64e43bdd8..1c26fa962233 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14053,7 +14053,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = -ENOMEM;
goto init_one_freemem;
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ bp->doorbells = ioremap(pci_resource_start(pdev, 2),
doorbell_size);
}
if (!bp->doorbells) {
@@ -15410,6 +15410,7 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
BNX2X_ERR("One-step timestamping is not supported\n");
return -ERANGE;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c779f9cf8822..483935b001c8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -944,6 +944,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
+ page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -6997,7 +6998,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
- bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
@@ -7288,6 +7288,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
bp->chip_num = le16_to_cpu(resp->chip_num);
+ bp->chip_rev = resp->chip_rev;
if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
!resp->chip_metal)
bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
@@ -9063,7 +9064,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!netif_carrier_ok(bp->dev))
+ if (!bp->link_info.link_up)
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -9975,7 +9976,7 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
}
}
-static void bnxt_tx_timeout(struct net_device *dev)
+static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnxt *bp = netdev_priv(dev);
@@ -10040,6 +10041,13 @@ static void bnxt_timer(struct timer_list *t)
bnxt_queue_sp_work(bp);
}
+#ifdef CONFIG_RFS_ACCEL
+ if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
+ set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+#endif /*CONFIG_RFS_ACCEL*/
+
if (bp->link_info.phy_retry) {
if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
bp->link_info.phy_retry = false;
@@ -10050,7 +10058,8 @@ static void bnxt_timer(struct timer_list *t)
}
}
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
+ netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -10568,7 +10577,7 @@ static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
+ if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
@@ -10822,6 +10831,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_ulp_start(bp, rc);
+ bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
break;
@@ -11065,11 +11075,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
- keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
- keys1->ports.ports == keys2->ports.ports &&
- keys1->basic.ip_proto == keys2->basic.ip_proto &&
- keys1->basic.n_proto == keys2->basic.n_proto &&
+ if (keys1->basic.n_proto != keys2->basic.n_proto ||
+ keys1->basic.ip_proto != keys2->basic.ip_proto)
+ return false;
+
+ if (keys1->basic.n_proto == htons(ETH_P_IP)) {
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+ return false;
+ } else {
+ if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
+ sizeof(keys1->addrs.v6addrs.src)) ||
+ memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
+ sizeof(keys1->addrs.v6addrs.dst)))
+ return false;
+ }
+
+ if (keys1->ports.ports == keys2->ports.ports &&
keys1->control.flags == keys2->control.flags &&
ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
@@ -11087,6 +11109,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
int rc = 0, idx, bit_id, l2_idx = 0;
struct hlist_head *head;
+ u32 flags;
if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
@@ -11126,8 +11149,9 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
- bp->hwrm_spec_code < 0x10601) {
+ flags = fkeys->control.flags;
+ if (((flags & FLOW_DIS_ENCAPSULATION) &&
+ bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
rc = -EPROTONOSUPPORT;
goto err_free;
}
@@ -11361,11 +11385,11 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return -EOPNOTSUPP;
/* The PF and it's VF-reps only support the switchdev framework */
- if (!BNXT_PF(bp))
+ if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
return -EOPNOTSUPP;
- ppid->id_len = sizeof(bp->switch_id);
- memcpy(ppid->id, bp->switch_id, ppid->id_len);
+ ppid->id_len = sizeof(bp->dsn);
+ memcpy(ppid->id, bp->dsn, ppid->id_len);
return 0;
}
@@ -11421,9 +11445,9 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_sriov_disable(bp);
bnxt_dl_fw_reporters_destroy(bp, true);
- bnxt_dl_unregister(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
bnxt_cancel_sp_work(bp);
bp->sp_event = 0;
@@ -11457,6 +11481,9 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
rc);
return rc;
}
+ if (!fw_dflt)
+ return 0;
+
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -11470,9 +11497,6 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
link_info->support_auto_speeds = link_info->support_speeds;
- if (!fw_dflt)
- return 0;
-
bnxt_init_ethtool_link_settings(bp);
return 0;
}
@@ -11734,6 +11758,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
put_unaligned_le32(dw, &dsn[0]);
pci_read_config_dword(pdev, pos + 4, &dw);
put_unaligned_le32(dw, &dsn[4]);
+ bp->flags |= BNXT_FLAG_DSN_VALID;
return 0;
}
@@ -11845,9 +11870,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp)) {
/* Read the adapter's DSN to use as the eswitch switch_id */
- rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
- if (rc)
- goto init_err_pci_clean;
+ rc = bnxt_pcie_dsn_get(bp, bp->dsn);
}
/* MTU range: 60 - FW defined max */
@@ -11894,11 +11917,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_init_tc(bp);
}
+ bnxt_dl_register(bp);
+
rc = register_netdev(dev);
if (rc)
- goto init_err_cleanup_tc;
+ goto init_err_cleanup;
- bnxt_dl_register(bp);
+ if (BNXT_PF(bp))
+ devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
@@ -11908,7 +11934,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-init_err_cleanup_tc:
+init_err_cleanup:
+ bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 505af5cfb1bd..cabef0b4f5fb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1457,6 +1457,8 @@ struct bnxt {
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
+ u8 chip_rev;
+
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1532,6 +1534,7 @@ struct bnxt {
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_MULTI_HOST 0x100000
+ #define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
@@ -1845,7 +1848,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
- u8 switch_id[8];
+ u8 dsn[8];
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
struct notifier_block tc_netdev_nb;
@@ -1936,9 +1939,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type)
case HWRM_CFA_ENCAP_RECORD_FREE:
case HWRM_CFA_DECAP_FILTER_ALLOC:
case HWRM_CFA_DECAP_FILTER_FREE:
- case HWRM_CFA_NTUPLE_FILTER_ALLOC:
- case HWRM_CFA_NTUPLE_FILTER_FREE:
- case HWRM_CFA_NTUPLE_FILTER_CFG:
case HWRM_CFA_EM_FLOW_ALLOC:
case HWRM_CFA_EM_FLOW_FREE:
case HWRM_CFA_EM_FLOW_CFG:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 3eedd4477218..eec0168330b7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,6 +21,7 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename,
const char *region, struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
if (region)
return -EOPNOTSUPP;
@@ -31,7 +32,18 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename,
return -EPERM;
}
- return bnxt_flash_package_from_file(bp->dev, filename, 0);
+ devlink_flash_update_begin_notify(dl);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0,
+ 0);
+ rc = bnxt_flash_package_from_file(bp->dev, filename, 0);
+ if (!rc)
+ devlink_flash_update_status_notify(dl, "Flashing done", region,
+ 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flashing failed",
+ region, 0, 0);
+ devlink_flash_update_end_notify(dl);
+ return rc;
}
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
@@ -89,7 +101,7 @@ static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
return -EOPNOTSUPP;
bnxt_fw_reset(bp);
- return 0;
+ return -EINPROGRESS;
}
static const
@@ -116,7 +128,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
bnxt_fw_exception(bp);
- return 0;
+ return -EINPROGRESS;
}
static const
@@ -262,11 +274,25 @@ void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
health->fatal = false;
}
+void bnxt_dl_health_recovery_done(struct bnxt *bp)
+{
+ struct bnxt_fw_health *hlth = bp->fw_health;
+
+ if (hlth->fatal)
+ devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
+ else
+ devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
+}
+
+static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
.eswitch_mode_get = bnxt_dl_eswitch_mode_get,
#endif /* CONFIG_BNXT_SRIOV */
+ .info_get = bnxt_dl_info_get,
.flash_update = bnxt_dl_flash_update,
};
@@ -333,6 +359,136 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
dst->vu8 = (u8)val32;
}
+static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
+ union devlink_param_value *nvm_cfg_ver)
+{
+ struct hwrm_nvm_get_variable_input req = {0};
+ union bnxt_nvm_data *data;
+ dma_addr_t data_dma_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
+ data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
+ &data_dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ req.dest_data_addr = cpu_to_le64(data_dma_addr);
+ req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
+ req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
+
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
+ BNXT_NVM_CFG_VER_BITS,
+ BNXT_NVM_CFG_VER_BYTES);
+
+ dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+ return rc;
+}
+
+static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ union devlink_param_value nvm_cfg_ver;
+ struct hwrm_ver_get_output *ver_resp;
+ char mgmt_ver[FW_VER_STR_LEN];
+ char roce_ver[FW_VER_STR_LEN];
+ char fw_ver[FW_VER_STR_LEN];
+ char buf[32];
+ int rc;
+
+ rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
+ if (rc)
+ return rc;
+
+ sprintf(buf, "%X", bp->chip_num);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc)
+ return rc;
+
+ ver_resp = &bp->ver_resp;
+ sprintf(buf, "%X", ver_resp->chip_rev);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc)
+ return rc;
+
+ if (BNXT_PF(bp)) {
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
+ bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (strlen(ver_resp->active_pkg_name)) {
+ rc =
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ ver_resp->active_pkg_name);
+ if (rc)
+ return rc;
+ }
+
+ if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
+ u32 ver = nvm_cfg_ver.vu32;
+
+ sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
+ ver & 0xF);
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
+ snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
+ ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
+ ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
+ ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
+ } else {
+ snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
+ ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
+ ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
+ ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
+ }
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_APP, fw_ver);
+ if (rc)
+ return rc;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
int msg_len, union devlink_param_value *val)
{
@@ -475,15 +631,48 @@ static const struct devlink_param bnxt_dl_params[] = {
static const struct devlink_param bnxt_dl_port_params[] = {
};
-int bnxt_dl_register(struct bnxt *bp)
+static int bnxt_dl_params_register(struct bnxt *bp)
{
- struct devlink *dl;
int rc;
- if (bp->hwrm_spec_code < 0x10600) {
- netdev_warn(bp->dev, "Firmware does not support NVM params");
- return -ENOTSUPP;
+ if (bp->hwrm_spec_code < 0x10600)
+ return 0;
+
+ rc = devlink_params_register(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ if (rc) {
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ rc);
+ return rc;
}
+ rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
+ ARRAY_SIZE(bnxt_dl_port_params));
+ if (rc) {
+ netdev_err(bp->dev, "devlink_port_params_register failed");
+ devlink_params_unregister(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ return rc;
+ }
+ devlink_params_publish(bp->dl);
+
+ return 0;
+}
+
+static void bnxt_dl_params_unregister(struct bnxt *bp)
+{
+ if (bp->hwrm_spec_code < 0x10600)
+ return;
+
+ devlink_params_unregister(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
+ ARRAY_SIZE(bnxt_dl_port_params));
+}
+
+int bnxt_dl_register(struct bnxt *bp)
+{
+ struct devlink *dl;
+ int rc;
if (BNXT_PF(bp))
dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
@@ -510,40 +699,23 @@ int bnxt_dl_register(struct bnxt *bp)
if (!BNXT_PF(bp))
return 0;
- rc = devlink_params_register(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- if (rc) {
- netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
- rc);
- goto err_dl_unreg;
- }
-
devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- bp->pf.port_id, false, 0,
- bp->switch_id, sizeof(bp->switch_id));
+ bp->pf.port_id, false, 0, bp->dsn,
+ sizeof(bp->dsn));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed");
- goto err_dl_param_unreg;
+ goto err_dl_unreg;
}
- devlink_port_type_eth_set(&bp->dl_port, bp->dev);
- rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed");
+ rc = bnxt_dl_params_register(bp);
+ if (rc)
goto err_dl_port_unreg;
- }
-
- devlink_params_publish(dl);
return 0;
err_dl_port_unreg:
devlink_port_unregister(&bp->dl_port);
-err_dl_param_unreg:
- devlink_params_unregister(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
err_dl_unreg:
devlink_unregister(dl);
err_dl_free:
@@ -560,12 +732,8 @@ void bnxt_dl_unregister(struct bnxt *bp)
return;
if (BNXT_PF(bp)) {
- devlink_port_params_unregister(&bp->dl_port,
- bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
+ bnxt_dl_params_unregister(bp);
devlink_port_unregister(&bp->dl_port);
- devlink_params_unregister(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
}
devlink_unregister(dl);
devlink_free(dl);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 6db6c3dac472..95f893f2a74d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -38,6 +38,10 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define NVM_OFF_IGNORE_ARI 164
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_NVM_CFG_VER 602
+
+#define BNXT_NVM_CFG_VER_BITS 24
+#define BNXT_NVM_CFG_VER_BYTES 4
#define BNXT_MSIX_VEC_MAX 1280
#define BNXT_MSIX_VEC_MIN_MAX 128
@@ -58,6 +62,7 @@ struct bnxt_dl_nvm_param {
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
+void bnxt_dl_health_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
int bnxt_dl_register(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 08d56ec7b68a..6171fa8b3677 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1462,15 +1462,15 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
ethtool_link_ksettings_add_link_mode(lk_ksettings,
advertising, Autoneg);
base->autoneg = AUTONEG_ENABLE;
- if (link_info->phy_link_status == BNXT_LINK_LINK)
+ base->duplex = DUPLEX_UNKNOWN;
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
+ else
+ base->duplex = DUPLEX_HALF;
+ }
ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
- if (!netif_carrier_ok(dev))
- base->duplex = DUPLEX_UNKNOWN;
- else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- base->duplex = DUPLEX_FULL;
- else
- base->duplex = DUPLEX_HALF;
} else {
base->autoneg = AUTONEG_DISABLE;
ethtool_speed =
@@ -2707,7 +2707,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (netif_carrier_ok(bp->dev))
+ if (bp->link_info.link_up)
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index f9bf7d7250ab..b010b34cdaf8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
struct net_device *dev;
int rc, i;
+ if (!(bp->flags & BNXT_FLAG_DSN_VALID))
+ return -ENODEV;
+
bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
if (!bp->vf_reps)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 120fa05a39ff..e50a15397e11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2019 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -508,8 +508,8 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev,
return phy_ethtool_ksettings_set(dev->phydev, cmd);
}
-static int bcmgenet_set_rx_csum(struct net_device *dev,
- netdev_features_t wanted)
+static void bcmgenet_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 rbuf_chk_ctrl;
@@ -521,7 +521,7 @@ static int bcmgenet_set_rx_csum(struct net_device *dev,
/* enable rx checksumming */
if (rx_csum_en)
- rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+ rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
else
rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
priv->desc_rxchk_en = rx_csum_en;
@@ -535,12 +535,10 @@ static int bcmgenet_set_rx_csum(struct net_device *dev,
rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
-
- return 0;
}
-static int bcmgenet_set_tx_csum(struct net_device *dev,
- netdev_features_t wanted)
+static void bcmgenet_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
bool desc_64b_en;
@@ -549,7 +547,7 @@ static int bcmgenet_set_tx_csum(struct net_device *dev,
tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
- desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ desc_64b_en = !!(wanted & NETIF_F_HW_CSUM);
/* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
if (desc_64b_en) {
@@ -563,21 +561,27 @@ static int bcmgenet_set_tx_csum(struct net_device *dev,
bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
-
- return 0;
}
static int bcmgenet_set_features(struct net_device *dev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ dev->features;
- netdev_features_t wanted = dev->wanted_features;
- int ret = 0;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
- if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
- ret = bcmgenet_set_tx_csum(dev, wanted);
- if (changed & (NETIF_F_RXCSUM))
- ret = bcmgenet_set_rx_csum(dev, wanted);
+ /* Make sure we reflect the value of CRC_CMD_FWD */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+ bcmgenet_set_tx_csum(dev, features);
+ bcmgenet_set_rx_csum(dev, features);
+
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -857,6 +861,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
+ STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
+ mib.tx_realloc_tsb_failed),
/* Per TX queues */
STAT_GENET_Q(0),
STAT_GENET_Q(1),
@@ -1218,18 +1225,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
}
-/* ioctl handle special commands that are not present in ethtool. */
-static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *ring)
{
@@ -1483,6 +1478,7 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
struct sk_buff *skb)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
u16 offset;
@@ -1495,12 +1491,15 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
* enough headroom for us to insert 64B status block.
*/
new_skb = skb_realloc_headroom(skb, sizeof(*status));
- dev_kfree_skb(skb);
if (!new_skb) {
+ dev_kfree_skb_any(skb);
+ priv->mib.tx_realloc_tsb_failed++;
dev->stats.tx_dropped++;
return NULL;
}
+ dev_consume_skb_any(skb);
skb = new_skb;
+ priv->mib.tx_realloc_tsb++;
}
skb_push(skb, sizeof(*status));
@@ -1516,24 +1515,19 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
ip_proto = ipv6_hdr(skb)->nexthdr;
break;
default:
- return skb;
+ /* don't use UDP flag */
+ ip_proto = 0;
+ break;
}
offset = skb_checksum_start_offset(skb) - sizeof(*status);
tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
- (offset + skb->csum_offset);
+ (offset + skb->csum_offset) |
+ STATUS_TX_CSUM_LV;
- /* Set the length valid bit for TCP and UDP and just set
- * the special UDP flag for IPv4, else just set to 0.
- */
- if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
- tx_csum_info |= STATUS_TX_CSUM_LV;
- if (ip_proto == IPPROTO_UDP &&
- ip_ver == htons(ETH_P_IP))
- tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
- } else {
- tx_csum_info = 0;
- }
+ /* Set the special UDP flag for UDP */
+ if (ip_proto == IPPROTO_UDP)
+ tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
status->tx_csum_info = tx_csum_info;
}
@@ -1744,7 +1738,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int bytes_processed = 0;
unsigned int p_index, mask;
unsigned int discards;
- unsigned int chksum_ok = 0;
/* Clear status before servicing to reduce spurious interrupts */
if (ring->index == DESC_INDEX) {
@@ -1795,9 +1788,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dmadesc_get_length_status(priv, cb->bd_addr);
} else {
struct status_64 *status;
+ __be16 rx_csum;
status = (struct status_64 *)skb->data;
dma_length_status = status->length_status;
+ rx_csum = (__force __be16)(status->rx_csum & 0xffff);
+ if (priv->desc_rxchk_en) {
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
/* DMA flags and length are still valid no matter how
@@ -1840,18 +1839,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
goto next;
} /* error packet */
- chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
- priv->desc_rxchk_en;
-
skb_put(skb, len);
if (priv->desc_64b_en) {
skb_pull(skb, 64);
len -= 64;
}
- if (likely(chksum_ok))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
/* remove hardware 2bytes added for IP alignment */
skb_pull(skb, 2);
len -= 2;
@@ -2164,8 +2157,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
DMA_END_ADDR);
/* Initialize Tx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
+ NAPI_POLL_WEIGHT);
}
/* Initialize a RDMA ring */
@@ -2886,9 +2879,10 @@ static int bcmgenet_open(struct net_device *dev)
init_umac(priv);
- /* Make sure we reflect the value of CRC_CMD_FWD */
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+ /* Apply features again in case we changed them while interface was
+ * down
+ */
+ bcmgenet_set_features(dev, dev->features);
bcmgenet_set_hw_addr(priv, dev->dev_addr);
@@ -3055,7 +3049,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
ring->cb_ptr, ring->end_ptr);
}
-static void bcmgenet_timeout(struct net_device *dev)
+static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 int0_enable = 0;
@@ -3216,7 +3210,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_tx_timeout = bcmgenet_timeout,
.ndo_set_rx_mode = bcmgenet_set_rx_mode,
.ndo_set_mac_address = bcmgenet_set_mac_addr,
- .ndo_do_ioctl = bcmgenet_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
@@ -3327,19 +3321,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v4;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
} else if (GENET_IS_V3(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
} else if (GENET_IS_V2(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
} else if (GENET_IS_V1(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
}
/* enum genet_version starts at 1 */
@@ -3535,9 +3525,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
- /* Set hardware features */
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ /* Set default features */
+ dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM;
+ dev->hw_features |= dev->features;
+ dev->vlan_features |= dev->features;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
@@ -3574,6 +3566,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
+ err = -EIO;
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err;
+
/* Mii wait queue */
init_waitqueue_head(&priv->wq);
/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
@@ -3689,6 +3689,9 @@ static int bcmgenet_resume(struct device *d)
genphy_config_aneg(dev->phydev);
bcmgenet_mii_config(priv->dev, false);
+ /* Restore enabled features */
+ bcmgenet_set_features(dev, dev->features);
+
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index a5659197598f..61a6fe9f4cec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -144,6 +144,8 @@ struct bcmgenet_mib_counters {
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 tx_dma_failed;
+ u32 tx_realloc_tsb;
+ u32 tx_realloc_tsb_failed;
};
#define UMAC_HD_BKP_CTRL 0x004
@@ -251,6 +253,7 @@ struct bcmgenet_mib_counters {
#define RBUF_CHK_CTRL 0x14
#define RBUF_RXCHK_EN (1 << 0)
#define RBUF_SKIP_FCS (1 << 4)
+#define RBUF_L3_PARSE_DIS (1 << 5)
#define RBUF_ENERGY_CTRL 0x9c
#define RBUF_EEE_EN (1 << 0)
@@ -663,7 +666,6 @@ struct bcmgenet_priv {
bool desc_rxchk_en;
bool crc_fwd_en;
- unsigned int dma_rx_chk_bit;
u32 dma_max_burst_length;
u32 msg_enable;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 1604ad32e920..5b4568c2ad1c 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -294,7 +294,7 @@ static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
enum sbmac_fc fc);
static int sbmac_open(struct net_device *dev);
-static void sbmac_tx_timeout (struct net_device *dev);
+static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void sbmac_set_rx_mode(struct net_device *dev);
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int sbmac_close(struct net_device *dev);
@@ -2419,7 +2419,7 @@ static void sbmac_mii_poll(struct net_device *dev)
}
-static void sbmac_tx_timeout (struct net_device *dev)
+static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct sbmac_softc *sc = netdev_priv(dev);
unsigned long flags;
@@ -2537,7 +2537,7 @@ static int sbmac_probe(struct platform_device *pldev)
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
BUG_ON(!res);
- sbm_base = ioremap_nocache(res->start, resource_size(res));
+ sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
dev_name(&pldev->dev));
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ca3aa1250dd1..88466255bf66 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7645,7 +7645,7 @@ static void tg3_poll_controller(struct net_device *dev)
}
#endif
-static void tg3_tx_timeout(struct net_device *dev)
+static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7874,8 +7874,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
struct netdev_queue *txq, struct sk_buff *skb)
{
- struct sk_buff *segs, *nskb;
u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
+ struct sk_buff *segs, *seg, *next;
/* Estimate the number of fragments in the worst case */
if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
@@ -7898,12 +7898,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
if (IS_ERR(segs) || !segs)
goto tg3_tso_bug_end;
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- tg3_start_xmit(nskb, tp->dev);
- } while (segs);
+ skb_list_walk_safe(segs, seg, next) {
+ skb_mark_not_on_list(seg);
+ tg3_start_xmit(seg, tp->dev);
+ }
tg3_tso_bug_end:
dev_consume_skb_any(skb);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 4042c2185e98..e17bfc87da90 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1124,11 +1124,10 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
static void
bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
{
- u32 pgnum, pgoff, loff = 0;
+ u32 pgnum, loff = 0;
int i;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
- pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index e338272931d1..01a50a4b2113 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3477,7 +3477,7 @@ bnad_init(struct bnad *bnad,
bnad->pcidev = pdev;
bnad->mmio_start = pci_resource_start(pdev, 0);
bnad->mmio_len = pci_resource_len(pdev, 0);
- bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
+ bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
if (!bnad->bar0) {
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 19fe4f4867c7..dbf7070fcdba 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -630,10 +630,17 @@
#define GEM_CLK_DIV96 5
/* Constants for MAN register */
-#define MACB_MAN_SOF 1
-#define MACB_MAN_WRITE 1
-#define MACB_MAN_READ 2
-#define MACB_MAN_CODE 2
+#define MACB_MAN_C22_SOF 1
+#define MACB_MAN_C22_WRITE 1
+#define MACB_MAN_C22_READ 2
+#define MACB_MAN_C22_CODE 2
+
+#define MACB_MAN_C45_SOF 0
+#define MACB_MAN_C45_ADDR 0
+#define MACB_MAN_C45_WRITE 1
+#define MACB_MAN_C45_POST_READ_INCR 2
+#define MACB_MAN_C45_READ 3
+#define MACB_MAN_C45_CODE 2
/* Capability mask bits */
#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a0503b99dc79..7a2fe63d1136 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -337,11 +337,30 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (status < 0)
goto mdio_read_exit;
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
- | MACB_BF(RW, MACB_MAN_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_CODE)));
+ if (regnum & MII_ADDR_C45) {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+ } else {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -370,12 +389,32 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (status < 0)
goto mdio_write_exit;
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
- | MACB_BF(RW, MACB_MAN_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_CODE)
- | MACB_BF(DATA, value)));
+ if (regnum & MII_ADDR_C45) {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
+ | MACB_BF(DATA, value)));
+ } else {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
+ | MACB_BF(DATA, value)));
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -611,21 +650,24 @@ static const struct phylink_mac_ops macb_phylink_ops = {
.mac_link_up = macb_mac_link_up,
};
+static bool macb_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
static int macb_phylink_connect(struct macb *bp)
{
+ struct device_node *dn = bp->pdev->dev.of_node;
struct net_device *dev = bp->dev;
struct phy_device *phydev;
int ret;
- if (bp->pdev->dev.of_node &&
- of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
- ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
- 0);
- if (ret) {
- netdev_err(dev, "Could not attach PHY (%d)\n", ret);
- return ret;
- }
- } else {
+ if (dn)
+ ret = phylink_of_phy_connect(bp->phylink, dn, 0);
+
+ if (!dn || (ret && !macb_phy_handle_exists(dn))) {
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
netdev_err(dev, "no PHY found\n");
@@ -634,10 +676,11 @@ static int macb_phylink_connect(struct macb *bp)
/* attach the mac to the phy */
ret = phylink_connect_phy(bp->phylink, phydev);
- if (ret) {
- netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
- return ret;
- }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
}
phylink_start(bp->phylink);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index af04a2c81adb..05a3d067c3fc 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1251,7 +1251,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void xgmac_tx_timeout(struct net_device *dev)
+static void xgmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct xgmac_priv *priv = netdev_priv(dev);
schedule_work(&priv->tx_timeout_work);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7f3b2e3b0868..eab05b5534ea 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2562,7 +2562,7 @@ lio_xmit_failed:
/** \brief Network device Tx timeout
* @param netdev pointer to network device
*/
-static void liquidio_tx_timeout(struct net_device *netdev)
+static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct lio *lio;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 370d76822ee0..7a77544a54f5 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1628,7 +1628,7 @@ lio_xmit_failed:
/** \brief Network device Tx timeout
* @param netdev pointer to network device
*/
-static void liquidio_tx_timeout(struct net_device *netdev)
+static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct lio *lio;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index f3f2e71431ac..600de587d7a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -31,7 +31,7 @@ static int lio_vf_rep_open(struct net_device *ndev);
static int lio_vf_rep_stop(struct net_device *ndev);
static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
struct net_device *ndev);
-static void lio_vf_rep_tx_timeout(struct net_device *netdev);
+static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int lio_vf_rep_phys_port_name(struct net_device *dev,
char *buf, size_t len);
static void lio_vf_rep_get_stats64(struct net_device *dev,
@@ -172,7 +172,7 @@ lio_vf_rep_stop(struct net_device *ndev)
}
static void
-lio_vf_rep_tx_timeout(struct net_device *ndev)
+lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
netif_trans_update(ndev);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index cdd7e5da4a74..e9575887a4f8 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -790,9 +790,7 @@ static int octeon_mgmt_ioctl(struct net_device *netdev,
case SIOCSHWTSTAMP:
return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
default:
- if (netdev->phydev)
- return phy_mii_ioctl(netdev->phydev, rq, cmd);
- return -EINVAL;
+ return phy_do_ioctl(netdev, rq, cmd);
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f28409279ea4..016957285f99 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1741,7 +1741,7 @@ static void nicvf_get_stats64(struct net_device *netdev,
}
-static void nicvf_tx_timeout(struct net_device *dev)
+static void nicvf_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct nicvf *nic = netdev_priv(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 58f89f6a040f..883cfa9c4b6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (!is_offload(adapter))
return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
if (!(adapter->flags & FULL_INIT_DONE))
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
@@ -3265,7 +3267,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
}
- adapter->regs = ioremap_nocache(mmio_start, mmio_len);
+ adapter->regs = ioremap(mmio_start, mmio_len);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index becee29f5df7..8b7d156f79d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -56,6 +56,7 @@
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
+#include "t4fw_api.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
extern struct list_head adapter_list;
@@ -68,6 +69,16 @@ extern struct mutex uld_mutex;
#define ETHTXQ_STOP_THRES \
(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0))
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index aca9f7a20a2a..9d1f2f88b945 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = seq_tab_get_idx(seq->private, *pos + 1);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
@@ -3175,14 +3174,12 @@ static const struct file_operations mem_debugfs_fops = {
static int tid_info_show(struct seq_file *seq, void *v)
{
- unsigned int tid_start = 0;
struct adapter *adap = seq->private;
- const struct tid_info *t = &adap->tids;
- enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
-
- if (chip > CHELSIO_T5)
- tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+ const struct tid_info *t;
+ enum chip_type chip;
+ t = &adap->tids;
+ chip = CHELSIO_CHIP_VERSION(adap->params.chip);
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
seq_printf(seq, "Connections in use: %u\n",
@@ -3194,9 +3191,9 @@ static int tid_info_show(struct seq_file *seq, void *v)
sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
if (sb) {
- seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+ seq_printf(seq, "TID range: %u..%u/%u..%u", t->tid_base,
sb - 1, adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
@@ -3205,14 +3202,14 @@ static int tid_info_show(struct seq_file *seq, void *v)
t->aftid_base,
t->aftid_end,
adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
} else {
seq_printf(seq, "TID range: %u..%u",
adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->hash_tids_in_use));
}
@@ -3220,8 +3217,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "Connections in use: %u\n",
atomic_read(&t->conns_in_use));
- seq_printf(seq, "TID range: %u..%u", tid_start,
- tid_start + t->ntids - 1);
+ seq_printf(seq, "TID range: %u..%u", t->tid_base,
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
@@ -3244,6 +3241,9 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
t->sftid_base, t->sftid_base + t->nsftids - 2,
t->sftids_in_use);
+ if (t->nhpftids)
+ seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
+ t->hpftid_base + t->nhpftids - 1);
if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 1d39fca11810..2a2938bbb93a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -361,20 +361,22 @@ static int get_filter_count(struct adapter *adapter, unsigned int fidx,
tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
if (is_hashfilter(adapter) && hash) {
- if (fidx < adapter->tids.ntids) {
- f = adapter->tids.tid_tab[fidx];
- if (!f)
- return -EINVAL;
- } else {
+ if (tid_out_of_range(&adapter->tids, fidx))
return -E2BIG;
- }
+ f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base];
+ if (!f)
+ return -EINVAL;
} else {
- if ((fidx != (adapter->tids.nftids +
- adapter->tids.nsftids - 1)) &&
- fidx >= adapter->tids.nftids)
+ if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids +
+ adapter->tids.nhpftids - 1)) &&
+ fidx >= (adapter->tids.nftids + adapter->tids.nhpftids))
return -E2BIG;
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx -
+ adapter->tids.nhpftids];
if (!f->valid)
return -EINVAL;
}
@@ -480,6 +482,7 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
ftid -= n;
}
spin_unlock_bh(&t->ftid_lock);
+ ftid += t->nhpftids;
return found ? ftid : -ENOMEM;
}
@@ -507,6 +510,24 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
return 0;
}
+static int cxgb4_set_hpftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (test_bit(fidx, t->hpftid_bmap)) {
+ spin_unlock_bh(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == PF_INET)
+ __set_bit(fidx, t->hpftid_bmap);
+ else
+ bitmap_allocate_region(t->hpftid_bmap, fidx, 1);
+
+ spin_unlock_bh(&t->ftid_lock);
+ return 0;
+}
+
static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
unsigned int chip_ver)
{
@@ -522,33 +543,58 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
spin_unlock_bh(&t->ftid_lock);
}
+static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (family == PF_INET)
+ __clear_bit(fidx, t->hpftid_bmap);
+ else
+ bitmap_release_region(t->hpftid_bmap, fidx, 1);
+
+ spin_unlock_bh(&t->ftid_lock);
+}
+
bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
{
+ struct filter_entry *prev_fe, *next_fe, *tab;
struct adapter *adap = netdev2adap(dev);
- struct filter_entry *prev_fe, *next_fe;
+ u32 prev_ftid, next_ftid, max_tid;
struct tid_info *t = &adap->tids;
- u32 prev_ftid, next_ftid;
+ unsigned long *bmap;
bool valid = true;
+ if (idx < t->nhpftids) {
+ bmap = t->hpftid_bmap;
+ tab = t->hpftid_tab;
+ max_tid = t->nhpftids;
+ } else {
+ idx -= t->nhpftids;
+ bmap = t->ftid_bmap;
+ tab = t->ftid_tab;
+ max_tid = t->nftids;
+ }
+
/* Only insert the rule if both of the following conditions
* are met:
* 1. The immediate previous rule has priority <= @prio.
* 2. The immediate next rule has priority >= @prio.
*/
spin_lock_bh(&t->ftid_lock);
+
/* Don't insert if there's a rule already present at @idx. */
- if (test_bit(idx, t->ftid_bmap)) {
+ if (test_bit(idx, bmap)) {
valid = false;
goto out_unlock;
}
- next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
- if (next_ftid >= t->nftids)
+ next_ftid = find_next_bit(bmap, max_tid, idx);
+ if (next_ftid >= max_tid)
next_ftid = idx;
- next_fe = &adap->tids.ftid_tab[next_ftid];
+ next_fe = &tab[next_ftid];
- prev_ftid = find_last_bit(t->ftid_bmap, idx);
+ prev_ftid = find_last_bit(bmap, idx);
if (prev_ftid >= idx)
prev_ftid = idx;
@@ -558,13 +604,13 @@ bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
* accordingly.
*/
if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
- prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3];
+ prev_fe = &tab[prev_ftid & ~0x3];
if (!prev_fe->fs.type)
- prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ prev_fe = &tab[prev_ftid];
} else {
- prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1];
+ prev_fe = &tab[prev_ftid & ~0x1];
if (!prev_fe->fs.type)
- prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ prev_fe = &tab[prev_ftid];
}
if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
@@ -579,11 +625,16 @@ out_unlock:
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct fw_filter_wr *fwr;
+ struct filter_entry *f;
struct sk_buff *skb;
unsigned int len;
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
+
len = sizeof(*fwr);
skb = alloc_skb(len, GFP_KERNEL);
@@ -609,10 +660,15 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
*/
int set_filter_wr(struct adapter *adapter, int fidx)
{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct fw_filter2_wr *fwr;
+ struct filter_entry *f;
struct sk_buff *skb;
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
+
skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -762,10 +818,14 @@ int delete_filter(struct adapter *adapter, unsigned int fidx)
struct filter_entry *f;
int ret;
- if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids +
+ adapter->tids.nhpftids)
return -EINVAL;
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
ret = writable_filter(f);
if (ret)
return ret;
@@ -811,12 +871,22 @@ void clear_all_filters(struct adapter *adapter)
struct net_device *dev = adapter->port[0];
unsigned int i;
+ if (adapter->tids.hpftid_tab) {
+ struct filter_entry *f = &adapter->tids.hpftid_tab[0];
+
+ for (i = 0; i < adapter->tids.nhpftids; i++, f++)
+ if (f->valid || f->pending)
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
if (adapter->tids.ftid_tab) {
struct filter_entry *f = &adapter->tids.ftid_tab[0];
unsigned int max_ftid = adapter->tids.nftids +
- adapter->tids.nsftids;
+ adapter->tids.nsftids +
+ adapter->tids.nhpftids;
+
/* Clear all TCAM filters */
- for (i = 0; i < max_ftid; i++, f++)
+ for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++)
if (f->valid || f->pending)
cxgb4_del_filter(dev, i, &f->fs);
}
@@ -1319,17 +1389,17 @@ out_err:
* filter specification in order to facilitate signaling completion of the
* operation.
*/
-int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+int __cxgb4_set_filter(struct net_device *dev, int ftid,
struct ch_filter_specification *fs,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
- unsigned int max_fidx, fidx;
- struct filter_entry *f;
+ unsigned int max_fidx, fidx, chip_ver;
+ int iq, ret, filter_id = ftid;
+ struct filter_entry *f, *tab;
u32 iconf;
- int iq, ret;
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
if (fs->hash) {
if (is_hashfilter(adapter))
return cxgb4_set_hash_filter(dev, fs, ctx);
@@ -1338,7 +1408,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return -EINVAL;
}
- max_fidx = adapter->tids.nftids;
+ max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
return -E2BIG;
@@ -1353,6 +1423,13 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
if (iq < 0)
return iq;
+ if (fs->prio) {
+ tab = &adapter->tids.hpftid_tab[0];
+ } else {
+ tab = &adapter->tids.ftid_tab[0];
+ filter_id = ftid - adapter->tids.nhpftids;
+ }
+
/* IPv6 filters occupy four slots and must be aligned on
* four-slot boundaries. IPv4 filters only occupy a single
* slot and have no alignment requirements but writing a new
@@ -1373,9 +1450,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
else
fidx = filter_id & ~0x1;
- if (fidx != filter_id &&
- adapter->tids.ftid_tab[fidx].fs.type) {
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx != filter_id && tab[fidx].fs.type) {
+ f = &tab[fidx];
if (f->valid) {
dev_err(adapter->pdev_dev,
"Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
@@ -1399,7 +1475,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
*/
for (fidx = filter_id + 1; fidx < filter_id + 4;
fidx++) {
- f = &adapter->tids.ftid_tab[fidx];
+ f = &tab[fidx];
if (f->valid) {
dev_err(adapter->pdev_dev,
"Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
@@ -1415,7 +1491,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return -EINVAL;
/* Check overlapping IPv4 filter slot */
fidx = filter_id + 1;
- f = &adapter->tids.ftid_tab[fidx];
+ f = &tab[fidx];
if (f->valid) {
pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
__func__, fidx);
@@ -1427,36 +1503,35 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
/* Check to make sure that provided filter index is not
* already in use by someone else
*/
- f = &adapter->tids.ftid_tab[filter_id];
+ f = &tab[filter_id];
if (f->valid)
return -EBUSY;
- fidx = filter_id + adapter->tids.ftid_base;
- ret = cxgb4_set_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET,
- chip_ver);
+ if (fs->prio) {
+ fidx = filter_id + adapter->tids.hpftid_base;
+ ret = cxgb4_set_hpftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ } else {
+ fidx = filter_id + adapter->tids.ftid_base;
+ ret = cxgb4_set_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
+ }
+
if (ret)
return ret;
/* Check t make sure the filter requested is writable ... */
ret = writable_filter(f);
- if (ret) {
- /* Clear the bits we have set above */
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET,
- chip_ver);
- return ret;
- }
+ if (ret)
+ goto free_tid;
if (is_t6(adapter->params.chip) && fs->type &&
ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
IPV6_ADDR_ANY) {
ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
- if (ret) {
- cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
- chip_ver);
- return ret;
- }
+ if (ret)
+ goto free_tid;
}
/* Convert the filter specification into our internal format.
@@ -1487,7 +1562,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
f->fs.mask.vni,
0, 1, 1);
if (ret < 0)
- goto free_clip;
+ goto free_tid;
f->fs.val.ovlan = ret;
f->fs.mask.ovlan = 0x1ff;
@@ -1501,21 +1576,22 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
*/
f->ctx = ctx;
f->tid = fidx; /* Save the actual tid */
- ret = set_filter_wr(adapter, filter_id);
- if (ret) {
+ ret = set_filter_wr(adapter, ftid);
+ if (ret)
+ goto free_tid;
+
+ return ret;
+
+free_tid:
+ if (f->fs.prio)
+ cxgb4_clear_hpftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ else
cxgb4_clear_ftid(&adapter->tids, filter_id,
fs->type ? PF_INET6 : PF_INET,
chip_ver);
- clear_filter(adapter, f);
- }
-
- return ret;
-free_clip:
- if (is_t6(adapter->params.chip) && f->fs.type)
- cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET, chip_ver);
+ clear_filter(adapter, f);
return ret;
}
@@ -1537,7 +1613,7 @@ static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
__func__, filter_id, adapter->tids.nftids);
- if (filter_id > adapter->tids.ntids)
+ if (tid_out_of_range(t, filter_id))
return -E2BIG;
f = lookup_tid(t, filter_id);
@@ -1590,11 +1666,11 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ unsigned int max_fidx, chip_ver;
struct filter_entry *f;
- unsigned int max_fidx;
int ret;
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
if (fs && fs->hash) {
if (is_hashfilter(adapter))
return cxgb4_del_hash_filter(dev, filter_id, ctx);
@@ -1603,21 +1679,31 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
return -EINVAL;
}
- max_fidx = adapter->tids.nftids;
+ max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
return -E2BIG;
- f = &adapter->tids.ftid_tab[filter_id];
+ if (filter_id < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[filter_id];
+ else
+ f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
+
ret = writable_filter(f);
if (ret)
return ret;
if (f->valid) {
f->ctx = ctx;
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- f->fs.type ? PF_INET6 : PF_INET,
- chip_ver);
+ if (f->fs.prio)
+ cxgb4_clear_hpftid(&adapter->tids,
+ f->tid - adapter->tids.hpftid_base,
+ f->fs.type ? PF_INET6 : PF_INET);
+ else
+ cxgb4_clear_ftid(&adapter->tids,
+ f->tid - adapter->tids.ftid_base,
+ f->fs.type ? PF_INET6 : PF_INET,
+ chip_ver);
return del_filter_wr(adapter, filter_id);
}
@@ -1842,11 +1928,18 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
max_fidx = adap->tids.nftids + adap->tids.nsftids;
/* Get the corresponding filter entry for this tid */
if (adap->tids.ftid_tab) {
- /* Check this in normal filter region */
- idx = tid - adap->tids.ftid_base;
- if (idx >= max_fidx)
- return;
- f = &adap->tids.ftid_tab[idx];
+ idx = tid - adap->tids.hpftid_base;
+ if (idx < adap->tids.nhpftids) {
+ f = &adap->tids.hpftid_tab[idx];
+ } else {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+ f = &adap->tids.ftid_tab[idx];
+ idx += adap->tids.nhpftids;
+ }
+
if (f->tid != tid)
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 12ff69b3ba91..649842a8aa28 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -804,6 +804,26 @@ static int setup_ppod_edram(struct adapter *adap)
return 0;
}
+static void adap_config_hpfilter(struct adapter *adapter)
+{
+ u32 param, val = 0;
+ int ret;
+
+ /* Enable HP filter region. Older fw will fail this request and
+ * it is fine.
+ */
+ param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
+ ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
+ 1, &param, &val);
+
+ /* An error means FW doesn't know about HP filter support,
+ * it's not a problem, don't return an error.
+ */
+ if (ret < 0)
+ dev_err(adapter->pdev_dev,
+ "HP filter region isn't supported by FW\n");
+}
+
/**
* cxgb4_write_rss - write the RSS table for a given port
* @pi: the port
@@ -1427,8 +1447,8 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
unsigned int tid)
{
- void **p = &t->tid_tab[tid];
struct adapter *adap = container_of(t, struct adapter, tids);
+ void **p = &t->tid_tab[tid - t->tid_base];
spin_lock_bh(&adap->tid_release_lock);
*p = adap->tid_release_head;
@@ -1480,13 +1500,13 @@ static void process_tid_release_list(struct work_struct *work)
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
unsigned short family)
{
- struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
+ struct sk_buff *skb;
- WARN_ON(tid >= t->ntids);
+ WARN_ON(tid_out_of_range(&adap->tids, tid));
- if (t->tid_tab[tid]) {
- t->tid_tab[tid] = NULL;
+ if (t->tid_tab[tid - adap->tids.tid_base]) {
+ t->tid_tab[tid - adap->tids.tid_base] = NULL;
atomic_dec(&t->conns_in_use);
if (t->hash_base && (tid >= t->hash_base)) {
if (family == AF_INET6)
@@ -1518,6 +1538,7 @@ static int tid_init(struct tid_info *t)
struct adapter *adap = container_of(t, struct adapter, tids);
unsigned int max_ftids = t->nftids + t->nsftids;
unsigned int natids = t->natids;
+ unsigned int hpftid_bmap_size;
unsigned int eotid_bmap_size;
unsigned int stid_bmap_size;
unsigned int ftid_bmap_size;
@@ -1525,12 +1546,15 @@ static int tid_init(struct tid_info *t)
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
ftid_bmap_size = BITS_TO_LONGS(t->nftids);
+ hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
eotid_bmap_size = BITS_TO_LONGS(t->neotids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
+ t->nhpftids * sizeof(*t->hpftid_tab) +
+ hpftid_bmap_size * sizeof(long) +
max_ftids * sizeof(*t->ftid_tab) +
ftid_bmap_size * sizeof(long) +
t->neotids * sizeof(*t->eotid_tab) +
@@ -1543,7 +1567,9 @@ static int tid_init(struct tid_info *t)
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
- t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
+ t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
@@ -1578,6 +1604,8 @@ static int tid_init(struct tid_info *t)
bitmap_zero(t->eotid_bmap, t->neotids);
}
+ if (t->nhpftids)
+ bitmap_zero(t->hpftid_bmap, t->nhpftids);
bitmap_zero(t->ftid_bmap, t->nftids);
return 0;
}
@@ -3135,9 +3163,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
+ struct ch_sched_queue qe = { 0 };
+ struct ch_sched_params p = { 0 };
struct sched_class *e;
- struct ch_sched_params p;
- struct ch_sched_queue qe;
u32 req_rate;
int err = 0;
@@ -3154,6 +3182,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return -EINVAL;
}
+ qe.queue = index;
+ e = cxgb4_sched_queue_lookup(dev, &qe);
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
+ dev_err(adap->pdev_dev,
+ "Queue %u already bound to class %u of type: %u\n",
+ index, e->idx, e->info.u.params.level);
+ return -EBUSY;
+ }
+
/* Convert from Mbps to Kbps */
req_rate = rate * 1000;
@@ -3183,7 +3220,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return 0;
/* Fetch any available unused or matching scheduling class */
- memset(&p, 0, sizeof(p));
p.type = SCHED_CLASS_TYPE_PACKET;
p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
p.u.params.mode = SCHED_CLASS_MODE_CLASS;
@@ -4351,6 +4387,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
"HMA configuration failed with error %d\n", ret);
if (is_t6(adapter->params.chip)) {
+ adap_config_hpfilter(adapter);
ret = setup_ppod_edram(adapter);
if (!ret)
dev_info(adapter->pdev_dev, "Successfully enabled "
@@ -4660,16 +4697,6 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/*
* Grab some of our basic fundamental operating parameters.
*/
-#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
-
-#define FW_PARAM_PFVF(param) \
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
- FW_PARAMS_PARAM_Y_V(0) | \
- FW_PARAMS_PARAM_Z_V(0)
-
params[0] = FW_PARAM_PFVF(EQ_START);
params[1] = FW_PARAM_PFVF(L2T_START);
params[2] = FW_PARAM_PFVF(L2T_END);
@@ -4687,6 +4714,16 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
adap->sge.ingr_start = val[5];
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ params[0] = FW_PARAM_PFVF(HPFILTER_START);
+ params[1] = FW_PARAM_PFVF(HPFILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (ret < 0)
+ goto bye;
+
+ adap->tids.hpftid_base = val[0];
+ adap->tids.nhpftids = val[1] - val[0] + 1;
+
/* Read the raw mps entries. In T6, the last 2 tcam entries
* are reserved for raw mac addresses (rawf = 2, one per port).
*/
@@ -4698,6 +4735,9 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
adap->rawf_start = val[0];
adap->rawf_cnt = val[1] - val[0] + 1;
}
+
+ adap->tids.tid_base =
+ t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
}
/* qids (ingress/egress) returned from firmware can be anywhere
@@ -5050,8 +5090,6 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
}
adap->params.crypto = ntohs(caps_cmd.cryptocaps);
}
-#undef FW_PARAM_PFVF
-#undef FW_PARAM_DEV
/* The MTU/MSS Table is initialized by now, so load their values. If
* we're initializing the adapter, then we'll make any modifications
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 0fa80bef575d..bb5513bdd293 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -672,10 +672,14 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
* 0 to driver. However, the hardware TCAM index
* starts from 0. Hence, the -1 here.
*/
- if (cls->common.prio <= adap->tids.nftids)
+ if (cls->common.prio <= (adap->tids.nftids +
+ adap->tids.nhpftids)) {
fidx = cls->common.prio - 1;
- else
+ if (fidx < adap->tids.nhpftids)
+ fs->prio = 1;
+ } else {
fidx = cxgb4_get_free_ftid(dev, inet_family);
+ }
/* Only insert FLOWER rule if its priority doesn't
* conflict with existing rules in the LETCAM.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 102b370fbd3e..1b7681a4eb32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct flow_action *actions = &cls->rule->action;
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
+ struct ch_sched_queue qe;
+ struct sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
@@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
}
}
+ for (i = 0; i < pi->nqsets; i++) {
+ memset(&qe, 0, sizeof(qe));
+ qe.queue = i;
+
+ e = cxgb4_sched_queue_lookup(dev, &qe);
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Some queues are already bound to different class");
+ return -EBUSY;
+ }
+ }
+
return 0;
}
+static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct ch_sched_queue qe;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qe.queue = i;
+ qe.class = tc;
+ ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
+ if (ret)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ while (i--) {
+ qe.queue = i;
+ qe.class = SCHED_CLS_NONE;
+ cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+ }
+
+ return ret;
+}
+
+static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct ch_sched_queue qe;
+ u32 i;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qe.queue = i;
+ qe.class = SCHED_CLS_NONE;
+ cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+ }
+}
+
static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
@@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
struct sched_class *e;
+ int ret;
u32 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
@@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
return -ENOMEM;
}
+ ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not bind queues to traffic class");
+ goto out_free;
+ }
+
tc_port_matchall->egress.hwtc = e->idx;
tc_port_matchall->egress.cookie = cls->cookie;
tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
+
+out_free:
+ cxgb4_sched_class_free(dev, e->idx);
+ return ret;
}
static void cxgb4_matchall_free_tc(struct net_device *dev)
@@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
struct adapter *adap = netdev2adap(dev);
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ cxgb4_matchall_tc_unbind_queues(dev);
cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
@@ -137,7 +204,7 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
* -1 here. 1 slot is enough to create a wildcard matchall
* VIID rule.
*/
- if (cls->common.prio <= adap->tids.nftids)
+ if (cls->common.prio <= (adap->tids.nftids + adap->tids.nhpftids))
fidx = cls->common.prio - 1;
else
fidx = cxgb4_get_free_ftid(dev, PF_INET);
@@ -156,6 +223,8 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
fs = &tc_port_matchall->ingress.fs;
memset(fs, 0, sizeof(*fs));
+ if (fidx < adap->tids.nhpftids)
+ fs->prio = 1;
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
fs->hitcnts = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 8971dddcdb7a..ec3eb45ee3b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -12,8 +12,9 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u32 speed, qcount = 0, qoffset = 0;
+ u32 start_a, start_b, end_a, end_b;
int ret;
- u8 i;
+ u8 i, j;
if (!mqprio->qopt.num_tc)
return 0;
@@ -47,6 +48,31 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
qcount += mqprio->qopt.count[i];
+ start_a = mqprio->qopt.offset[i];
+ end_a = start_a + mqprio->qopt.count[i] - 1;
+ for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
+ start_b = mqprio->qopt.offset[j];
+ end_b = start_b + mqprio->qopt.count[j] - 1;
+
+ /* If queue count is 0, then the traffic
+ * belonging to this class will not use
+ * ETHOFLD queues. So, no need to validate
+ * further.
+ */
+ if (!mqprio->qopt.count[i])
+ break;
+
+ if (!mqprio->qopt.count[j])
+ continue;
+
+ if (max_t(u32, start_a, start_b) <=
+ min_t(u32, end_a, end_b)) {
+ netdev_err(dev,
+ "Queues can't overlap across tc\n");
+ return -EINVAL;
+ }
+ }
+
/* Convert byte per second to bits per second */
min_rate += (mqprio->min_rate[i] * 8);
max_rate += (mqprio->max_rate[i] * 8);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 133f8623ba86..269b8d9e25e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -176,7 +176,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* Only insert U32 rule if its priority doesn't conflict with
* existing rules in the LETCAM.
*/
- if (filter_id >= adapter->tids.nftids ||
+ if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids ||
!cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
@@ -199,6 +199,8 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
memset(&fs, 0, sizeof(fs));
+ if (filter_id < adapter->tids.nhpftids)
+ fs.prio = 1;
fs.tc_prio = cls->common.prio;
fs.tc_cookie = cls->knode.handle;
@@ -355,6 +357,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
unsigned int filter_id, max_tids, i, j;
struct cxgb4_link *link = NULL;
struct cxgb4_tc_u32_table *t;
+ struct filter_entry *f;
u32 handle, uhtid;
int ret;
@@ -363,8 +366,15 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* Fetch the location to delete the filter. */
filter_id = TC_U32_NODE(cls->knode.handle) - 1;
- if (filter_id >= adapter->tids.nftids ||
- cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie)
+ if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids)
+ return -ERANGE;
+
+ if (filter_id < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[filter_id];
+ else
+ f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
+
+ if (cls->knode.handle != f->fs.tc_cookie)
return -ERANGE;
t = adapter->tc_u32;
@@ -445,7 +455,7 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap)
struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
{
- unsigned int max_tids = adap->tids.nftids;
+ unsigned int max_tids = adap->tids.nftids + adap->tids.nhpftids;
struct cxgb4_tc_u32_table *t;
unsigned int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 861b25d28ed6..d9d27bc1ae67 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -99,6 +99,7 @@ struct eotid_entry {
*/
struct tid_info {
void **tid_tab;
+ unsigned int tid_base;
unsigned int ntids;
struct serv_entry *stid_tab;
@@ -111,6 +112,11 @@ struct tid_info {
unsigned int natids;
unsigned int atid_base;
+ struct filter_entry *hpftid_tab;
+ unsigned long *hpftid_bmap;
+ unsigned int nhpftids;
+ unsigned int hpftid_base;
+
struct filter_entry *ftid_tab;
unsigned long *ftid_bmap;
unsigned int nftids;
@@ -147,9 +153,15 @@ struct tid_info {
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
{
+ tid -= t->tid_base;
return tid < t->ntids ? t->tid_tab[tid] : NULL;
}
+static inline bool tid_out_of_range(const struct tid_info *t, unsigned int tid)
+{
+ return ((tid - t->tid_base) >= t->ntids);
+}
+
static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
{
return atid < t->natids ? t->atid_tab[atid].data : NULL;
@@ -171,7 +183,7 @@ static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
unsigned int tid, unsigned short family)
{
- t->tid_tab[tid] = data;
+ t->tid_tab[tid - t->tid_base] = data;
if (t->hash_base && (tid >= t->hash_base)) {
if (family == AF_INET6)
atomic_add(2, &t->hash_tids_in_use);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index e9e45006632d..1a16449e9deb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 3e61bd5d0c29..cebe1412d960 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_txq *txq;
+
+ if (p->queue < 0 || p->queue >= pi->nqsets)
+ return NULL;
+
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+ return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
+}
+
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
struct sched_queue_entry *qe = NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index e92ff68bdd0a..5cc74a5a1774 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ac4fb43bdec6..accad1101ad1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1321,6 +1321,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_PPOD_EDRAM = 0x23,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
+ FW_PARAMS_PARAM_DEV_HPFILTER_REGION_SUPPORT = 0x26,
FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index c9aebcde403a..33ace3307059 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1128,7 +1128,7 @@ net_get_stats(struct net_device *dev)
return &dev->stats;
}
-static void net_timeout(struct net_device *dev)
+static void net_timeout(struct net_device *dev, unsigned int txqueue)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index acb2856936d2..bbd7b3175f09 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1095,7 +1095,7 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
/* netif_tx_lock held, BHs disabled */
-static void enic_tx_timeout(struct net_device *netdev)
+static void enic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct enic *enic = netdev_priv(netdev);
schedule_work(&enic->tx_hang_reset);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 2814b96751b4..f30fa8e6ef80 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1298,7 +1298,7 @@ out_drop:
return NETDEV_TX_OK;
}
-static void gmac_tx_timeout(struct net_device *netdev)
+static void gmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
netdev_err(netdev, "Tx timeout\n");
gmac_dump_dma_state(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cce90b5925d9..1ea3372775e6 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -964,7 +964,7 @@ dm9000_init_dm9000(struct net_device *dev)
}
/* Our watchdog timed out. Called by the networking layer */
-static void dm9000_timeout(struct net_device *dev)
+static void dm9000_timeout(struct net_device *dev, unsigned int txqueue)
{
struct board_info *db = netdev_priv(dev);
u8 reg_save;
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index f1a2da15dd0a..d305d1b24b0a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1436,7 +1436,7 @@ static int de_close (struct net_device *dev)
return 0;
}
-static void de_tx_timeout (struct net_device *dev)
+static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct de_private *de = netdev_priv(dev);
const int irq = de->pdev->irq;
@@ -2039,7 +2039,7 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* remap CSR registers */
- regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
+ regs = ioremap(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 0efdbd1a4a6f..32d470d4122a 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -2214,15 +2214,16 @@ static int __init dmfe_init_module(void)
if (cr6set)
dmfe_cr6_user_set = cr6set;
- switch(mode) {
- case DMFE_10MHF:
+ switch (mode) {
+ case DMFE_10MHF:
case DMFE_100MHF:
case DMFE_10MFD:
case DMFE_100MFD:
case DMFE_1M_HPNA:
dmfe_media_mode = mode;
break;
- default:dmfe_media_mode = DMFE_AUTO;
+ default:
+ dmfe_media_mode = DMFE_AUTO;
break;
}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3e3e08698876..9e9d9eee29d9 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
const char tulip_media_cap[32] =
{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
-static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void tulip_init_ring(struct net_device *dev);
static void tulip_free_ring(struct net_device *dev);
static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
@@ -534,7 +534,7 @@ free_ring:
}
-static void tulip_tx_timeout(struct net_device *dev)
+static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index b1f30b194300..117ffe08800d 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1809,8 +1809,8 @@ static int __init uli526x_init_module(void)
if (cr6set)
uli526x_cr6_user_set = cr6set;
- switch (mode) {
- case ULI526X_10MHF:
+ switch (mode) {
+ case ULI526X_10MHF:
case ULI526X_100MHF:
case ULI526X_10MFD:
case ULI526X_100MFD:
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 70cb2d689c2c..7f136488e67c 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -331,7 +331,7 @@ static void netdev_timer(struct timer_list *t);
static void init_rxtx_rings(struct net_device *dev);
static void free_rxtx_rings(struct netdev_private *np);
static void init_registers(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static int alloc_ringdesc(struct net_device *dev);
static void free_ringdesc(struct netdev_private *np);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
@@ -921,7 +921,7 @@ static void init_registers(struct net_device *dev)
iowrite32(0, ioaddr + RxStartDemand);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base_addr;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 55e720d2ea0c..26c5da032b1e 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -66,7 +66,7 @@ static const int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
static void rio_timer (struct timer_list *t);
-static void rio_tx_timeout (struct net_device *dev);
+static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue);
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
static void rio_free_tx (struct net_device *dev, int irq);
@@ -696,7 +696,7 @@ rio_timer (struct timer_list *t)
}
static void
-rio_tx_timeout (struct net_device *dev)
+rio_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 4a37a69764ce..b91387c456ba 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -432,7 +432,7 @@ static int mdio_wait_link(struct net_device *dev, int wait);
static int netdev_open(struct net_device *dev);
static void check_duplex(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static int reset_tx (struct net_device *dev);
@@ -969,7 +969,7 @@ static void netdev_timer(struct timer_list *t)
add_timer(&np->timer);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index e24979010969..5f8fa1145db6 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -725,19 +725,6 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
return nstat;
}
-static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -759,7 +746,7 @@ static const struct net_device_ops dnet_netdev_ops = {
.ndo_stop = dnet_close,
.ndo_get_stats = dnet_get_stats,
.ndo_start_xmit = dnet_start_xmit,
- .ndo_do_ioctl = dnet_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 39eb7d525043..56f59db6ebf2 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1417,7 +1417,7 @@ drop:
return NETDEV_TX_OK;
}
-static void be_tx_timeout(struct net_device *netdev)
+static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ea4f17f5cce7..a817ca661c1f 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -869,7 +869,7 @@ static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
return -ENOSYS;
}
-static void ethoc_tx_timeout(struct net_device *dev)
+static void ethoc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ethoc *priv = netdev_priv(dev);
u32 pending = ethoc_read(priv, INT_SOURCE);
@@ -1087,7 +1087,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
- priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
+ priv->iobase = devm_ioremap(&pdev->dev, netdev->base_addr,
resource_size(mmio));
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
@@ -1096,7 +1096,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
if (netdev->mem_end) {
- priv->membase = devm_ioremap_nocache(&pdev->dev,
+ priv->membase = devm_ioremap(&pdev->dev,
netdev->mem_start, resource_size(mem));
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 8ed85037f021..4572797f00d7 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1536,16 +1536,7 @@ static int ftgmac100_stop(struct net_device *netdev)
return 0;
}
-/* optional */
-static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- if (!netdev->phydev)
- return -ENXIO;
-
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
-}
-
-static void ftgmac100_tx_timeout(struct net_device *netdev)
+static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1597,7 +1588,7 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_start_xmit = ftgmac100_hard_start_xmit,
.ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ftgmac100_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ftgmac100_tx_timeout,
.ndo_set_rx_mode = ftgmac100_set_rx_mode,
.ndo_set_features = ftgmac100_set_features,
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c24fd56a2c71..84f10970299a 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -428,7 +428,7 @@ static void getlinktype(struct net_device *dev);
static void getlinkstatus(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
static void reset_timer(struct timer_list *t);
-static void fealnx_tx_timeout(struct net_device *dev);
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
@@ -1191,7 +1191,7 @@ static void reset_timer(struct timer_list *t)
}
-static void fealnx_tx_timeout(struct net_device *dev)
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->mem;
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 6a93293d31e0..67c436400352 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
obj-$(CONFIG_FSL_ENETC) += enetc/
+obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
obj-$(CONFIG_FSL_ENETC_VF) += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index a301f0095223..09dbcd819d84 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -288,7 +288,7 @@ static int dpaa_stop(struct net_device *net_dev)
return err;
}
-static void dpaa_tx_timeout(struct net_device *net_dev)
+static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
struct dpaa_percpu_priv *percpu_priv;
const struct dpaa_priv *priv;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 6437fe6b9abf..cc1b7f85e433 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -27,6 +27,20 @@ static int dpaa2_ptp_enable(struct ptp_clock_info *ptp,
mc_dev = to_fsl_mc_device(dev);
switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = DPRTC_EVENT_ETS1;
+ break;
+ case 1:
+ bit = DPRTC_EVENT_ETS2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (on)
+ extts_clean_up(ptp_qoriq, rq->extts.index, false);
+ break;
case PTP_CLK_REQ_PPS:
bit = DPRTC_EVENT_PPS;
break;
@@ -96,6 +110,12 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
ptp_clock_event(ptp_qoriq->clock, &event);
}
+ if (status & DPRTC_EVENT_ETS1)
+ extts_clean_up(ptp_qoriq, 0, true);
+
+ if (status & DPRTC_EVENT_ETS2)
+ extts_clean_up(ptp_qoriq, 1, true);
+
err = dprtc_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
DPRTC_IRQ_INDEX, status);
if (unlikely(err)) {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
index 4ac05bfef338..96ffeb948f08 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -9,9 +9,11 @@
/* Command versioning */
#define DPRTC_CMD_BASE_VERSION 1
+#define DPRTC_CMD_VERSION_2 2
#define DPRTC_CMD_ID_OFFSET 4
#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+#define DPRTC_CMD_V2(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_VERSION_2)
/* Command IDs */
#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
@@ -19,7 +21,7 @@
#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
-#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
+#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD_V2(0x014)
#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
index 311c184e1aef..05c413719e55 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -20,6 +20,8 @@ struct fsl_mc_io;
#define DPRTC_IRQ_INDEX 0
#define DPRTC_EVENT_PPS 0x08000000
+#define DPRTC_EVENT_ETS1 0x00800000
+#define DPRTC_EVENT_ETS2 0x00400000
int dprtc_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index edad4ca46327..fe942de19597 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -2,6 +2,7 @@
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ select FSL_ENETC_MDIO
select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index d0db33e5b6b7..74f7ac253b8b 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -3,7 +3,7 @@
common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
+fsl-enetc-y := enetc_pf.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 17739906c966..1f79e36116a3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -149,11 +149,21 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
if (enetc_tx_csum(skb, &temp_bd))
flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
+ else if (tx_ring->tsd_enable)
+ flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
/* first BD needs frm_len and offload flags set */
temp_bd.frm_len = cpu_to_le16(skb->len);
temp_bd.flags = flags;
+ if (flags & ENETC_TXBD_FLAGS_TSE) {
+ u32 temp;
+
+ temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
+ | (flags << ENETC_TXBD_FLAGS_OFFSET);
+ temp_bd.txstart = cpu_to_le32(temp);
+ }
+
if (flags & ENETC_TXBD_FLAGS_EX) {
u8 e_flags = 0;
*txbd = temp_bd;
@@ -227,6 +237,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
enetc_bdr_idx_inc(tx_ring, &i);
tx_ring->next_to_use = i;
+ skb_tx_timestamp(skb);
+
/* let H/W know BD ring has been updated */
enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
@@ -1503,6 +1515,8 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return enetc_setup_tc_taprio(ndev, type_data);
case TC_SETUP_QDISC_CBS:
return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 7ee0da6d0015..dd4a227ffc7a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -72,6 +72,7 @@ struct enetc_bdr {
struct enetc_ring_stats stats;
dma_addr_t bd_dma_base;
+ u8 tsd_enable; /* Time specific departure */
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
@@ -256,8 +257,10 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct net_device *ndev);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
#else
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(ndev) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 880a8ed8bb47..301ee0dde02d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -579,6 +579,7 @@ static int enetc_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_FILTER_ALL);
#else
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
#endif
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 51f543ef37a8..62554f28ce07 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -200,6 +200,7 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFPMR 0x1900
#define ENETC_PFPMR_PMACE BIT(1)
#define ENETC_PFPMR_MWLM BIT(0)
+#define ENETC_EMDIO_BASE 0x1c00
#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
@@ -358,6 +359,7 @@ union enetc_tx_bd {
u8 l4_csoff;
u8 flags;
}; /* default layout */
+ __le32 txstart;
__le32 lstatus;
};
};
@@ -378,11 +380,14 @@ union enetc_tx_bd {
};
#define ENETC_TXBD_FLAGS_L4CS BIT(0)
+#define ENETC_TXBD_FLAGS_TSE BIT(1)
#define ENETC_TXBD_FLAGS_W BIT(2)
#define ENETC_TXBD_FLAGS_CSUM BIT(3)
+#define ENETC_TXBD_FLAGS_TXSTART BIT(4)
#define ENETC_TXBD_FLAGS_EX BIT(6)
#define ENETC_TXBD_FLAGS_F BIT(7)
-
+#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
+#define ENETC_TXBD_FLAGS_OFFSET 24
static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
{
memset(txbd, 0, sizeof(*txbd));
@@ -615,3 +620,7 @@ struct enetc_cbd {
/* Port time gating capability register */
#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+
+/* Port time specific departure */
+#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
+#define ENETC_TSDE BIT(31)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 149883c8f0b8..48c32a171afa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -1,41 +1,56 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
+#include <linux/fsl/enetc_mdio.h>
#include <linux/mdio.h>
#include <linux/of_mdio.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include "enetc_mdio.h"
+#include "enetc_pf.h"
-#define ENETC_MDIO_REG_OFFSET 0x1c00
#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */
#define ENETC_MDIO_CTL 0x4 /* MDIO control */
#define ENETC_MDIO_DATA 0x8 /* MDIO data */
#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-#define enetc_mdio_rd(hw, off) \
- enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET)
-#define enetc_mdio_wr(hw, off, val) \
- enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val)
-#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off)
+static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
-#define ENETC_MDC_DIV 258
+#define enetc_mdio_rd(mdio_priv, off) \
+ _enetc_mdio_rd(mdio_priv, ENETC_##off)
+#define enetc_mdio_wr(mdio_priv, off, val) \
+ _enetc_mdio_wr(mdio_priv, ENETC_##off, val)
+#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off)
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
#define MDIO_CFG_BSY BIT(0)
#define MDIO_CFG_RD_ER BIT(1)
+#define MDIO_CFG_HOLD(x) (((x) << 2) & GENMASK(4, 2))
#define MDIO_CFG_ENC45 BIT(6)
/* external MDIO only - driven on neg MDC edge */
#define MDIO_CFG_NEG BIT(23)
+#define ENETC_EMDIO_CFG \
+ (MDIO_CFG_HOLD(2) | \
+ MDIO_CFG_CLKDIV(258) | \
+ MDIO_CFG_NEG)
+
#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) ((x) & 0xffff)
#define TIMEOUT 1000
-static int enetc_mdio_wait_complete(struct enetc_hw *hw)
+static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
{
u32 val;
@@ -46,12 +61,11 @@ static int enetc_mdio_wait_complete(struct enetc_hw *hw)
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
- struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr;
int ret;
- mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+ mdio_cfg = ENETC_EMDIO_CFG;
if (regnum & MII_ADDR_C45) {
dev_addr = (regnum >> 16) & 0x1f;
mdio_cfg |= MDIO_CFG_ENC45;
@@ -61,44 +75,44 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
}
/* write the value */
- enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value));
+ enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value));
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_mdio_write);
int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
- struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr, value;
int ret;
- mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+ mdio_cfg = ENETC_EMDIO_CFG;
if (regnum & MII_ADDR_C45) {
dev_addr = (regnum >> 16) & 0x1f;
mdio_cfg |= MDIO_CFG_ENC45;
@@ -107,86 +121,56 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
}
/* initiate the read */
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff;
+ value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff;
return value;
}
+EXPORT_SYMBOL_GPL(enetc_mdio_read);
-int enetc_mdio_probe(struct enetc_pf *pf)
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct device_node *np;
- struct mii_bus *bus;
- int err;
-
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
- bus->parent = dev;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
-
- np = of_get_child_by_name(dev->of_node, "mdio");
- if (!np) {
- dev_err(dev, "MDIO node missing\n");
- return -EINVAL;
- }
-
- err = of_mdiobus_register(bus, np);
- if (err) {
- of_node_put(np);
- dev_err(dev, "cannot register MDIO bus\n");
- return err;
- }
+ struct enetc_hw *hw;
- of_node_put(np);
- pf->mdio = bus;
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
- return 0;
-}
+ hw->port = port_regs;
-void enetc_mdio_remove(struct enetc_pf *pf)
-{
- if (pf->mdio)
- mdiobus_unregister(pf->mdio);
+ return hw;
}
+EXPORT_SYMBOL_GPL(enetc_hw_alloc);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
deleted file mode 100644
index 60c9a3889824..000000000000
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2019 NXP */
-
-#include <linux/phy.h>
-#include "enetc_pf.h"
-
-struct enetc_mdio_priv {
- struct enetc_hw *hw;
-};
-
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index fbd41ce01f06..ebc635f8a4cc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
+#include <linux/fsl/enetc_mdio.h>
#include <linux/of_mdio.h>
-#include "enetc_mdio.h"
+#include "enetc_pf.h"
#define ENETC_MDIO_DEV_ID 0xee01
#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
@@ -13,17 +14,29 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
{
struct enetc_mdio_priv *mdio_priv;
struct device *dev = &pdev->dev;
+ void __iomem *port_regs;
struct enetc_hw *hw;
struct mii_bus *bus;
int err;
- hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return -ENOMEM;
+ port_regs = pci_iomap(pdev, 0, 0);
+ if (!port_regs) {
+ dev_err(dev, "iomap failed\n");
+ err = -ENXIO;
+ goto err_ioremap;
+ }
+
+ hw = enetc_hw_alloc(dev, port_regs);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ goto err_hw_alloc;
+ }
bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
bus->name = ENETC_MDIO_BUS_NAME;
bus->read = enetc_mdio_read;
@@ -31,13 +44,14 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
pcie_flr(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(dev, "device enable failed\n");
- return err;
+ goto err_pci_enable;
}
err = pci_request_region(pdev, 0, KBUILD_MODNAME);
@@ -46,13 +60,6 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
goto err_pci_mem_reg;
}
- hw->port = pci_iomap(pdev, 0, 0);
- if (!hw->port) {
- err = -ENXIO;
- dev_err(dev, "iomap failed\n");
- goto err_ioremap;
- }
-
err = of_mdiobus_register(bus, dev->of_node);
if (err)
goto err_mdiobus_reg;
@@ -62,12 +69,14 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
- iounmap(mdio_priv->hw->port);
-err_ioremap:
pci_release_mem_regions(pdev);
err_pci_mem_reg:
pci_disable_device(pdev);
-
+err_pci_enable:
+err_mdiobus_alloc:
+ iounmap(port_regs);
+err_hw_alloc:
+err_ioremap:
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index e7482d483b28..fc0d7d99e9a1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -2,6 +2,7 @@
/* Copyright 2017-2019 NXP */
#include <linux/module.h>
+#include <linux/fsl/enetc_mdio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "enetc_pf.h"
@@ -749,6 +750,52 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
+static int enetc_mdio_probe(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ np = of_get_child_by_name(dev->of_node, "mdio");
+ if (!np) {
+ dev_err(dev, "MDIO node missing\n");
+ return -EINVAL;
+ }
+
+ err = of_mdiobus_register(bus, np);
+ if (err) {
+ of_node_put(np);
+ dev_err(dev, "cannot register MDIO bus\n");
+ return err;
+ }
+
+ of_node_put(np);
+ pf->mdio = bus;
+
+ return 0;
+}
+
+static void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio)
+ mdiobus_unregister(pf->mdio);
+}
+
static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
{
struct enetc_pf *pf = enetc_si_priv(priv->si);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 10dd1b53bb08..59e65a6f6c3e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -49,7 +49,3 @@ struct enetc_pf {
int enetc_msg_psi_init(struct enetc_pf *pf);
void enetc_msg_psi_free(struct enetc_pf *pf);
void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
-
-/* MDIO */
-int enetc_mdio_probe(struct enetc_pf *pf);
-void enetc_mdio_remove(struct enetc_pf *pf);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 2e99438cb1bf..0c6bf3a55a9a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -36,7 +36,6 @@ void enetc_sched_speed_set(struct net_device *ndev)
case SPEED_10:
default:
pspeed = ENETC_PMR_PSPEED_10M;
- netdev_err(ndev, "Qbv PSPEED set speed link down.\n");
}
priv->speed = speed;
@@ -156,6 +155,11 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
int err;
int i;
+ /* TSD and Qbv are mutually exclusive in hardware */
+ for (i = 0; i < priv->num_tx_rings; i++)
+ if (priv->tx_ring[i]->tsd_enable)
+ return -EBUSY;
+
for (i = 0; i < priv->num_tx_rings; i++)
enetc_set_bdr_prio(&priv->si->hw,
priv->tx_ring[i]->index,
@@ -192,7 +196,6 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
- u32 tc_max_sized_frame;
u8 tc = cbs->queue;
u8 prio_top, prio_next;
int bw_sum = 0;
@@ -250,7 +253,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- tc_max_sized_frame = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -298,3 +301,33 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_etf_qopt_offload *qopt = type_data;
+ u8 tc_nums = netdev_get_num_tc(ndev);
+ int tc;
+
+ if (!tc_nums)
+ return -EOPNOTSUPP;
+
+ tc = qopt->queue;
+
+ if (tc < 0 || tc >= priv->num_tx_rings)
+ return -EINVAL;
+
+ /* Do not support TXSTART and TX CSUM offload simutaniously */
+ if (ndev->features & NETIF_F_CSUM_MASK)
+ return -EBUSY;
+
+ /* TSD and Qbv are mutually exclusive in hardware */
+ if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ return -EBUSY;
+
+ priv->tx_ring[tc]->tsd_enable = qopt->enable;
+ enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
+ qopt->enable ? ENETC_TSDE : 0);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9294027e9d90..4432a59904c7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1141,7 +1141,7 @@ fec_stop(struct net_device *ndev)
static void
-fec_timeout(struct net_device *ndev)
+fec_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 30cdb246d020..7a3f066e611d 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -84,7 +84,7 @@ static int debug = -1; /* the above default */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "debugging messages level");
-static void mpc52xx_fec_tx_timeout(struct net_device *dev)
+static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
unsigned long flags;
@@ -785,16 +785,6 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
};
-static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!phydev)
- return -ENOTSUPP;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_open = mpc52xx_fec_open,
.ndo_stop = mpc52xx_fec_close,
@@ -802,7 +792,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
.ndo_set_mac_address = mpc52xx_fec_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mpc52xx_fec_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = mpc52xx_fec_tx_timeout,
.ndo_get_stats = mpc52xx_fec_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 41c6fa200e74..e1901874c19f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -110,7 +110,7 @@ do { \
/* Interface Mode Register (IF_MODE) */
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
-#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
tmp = 0;
switch (phy_if) {
case PHY_INTERFACE_MODE_XGMII:
- tmp |= IF_MODE_XGMII;
+ tmp |= IF_MODE_10G;
break;
default:
tmp |= IF_MODE_GMII;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index f0806ace1ae2..55f2122c3217 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -692,7 +692,7 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->res = __devm_request_region(dev,
fman_get_mem_region(priv->fman),
- res.start, res.end + 1 - res.start,
+ res.start, resource_size(&res),
"mac");
if (!mac_dev->res) {
dev_err(dev, "__devm_request_mem_region(mac) failed\n");
@@ -701,7 +701,7 @@ static int mac_probe(struct platform_device *_of_dev)
}
priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- mac_dev->res->end + 1 - mac_dev->res->start);
+ resource_size(mac_dev->res));
if (!priv->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
err = -EIO;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 3981c06f082f..add61fed33ee 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -641,7 +641,7 @@ static void fs_timeout_work(struct work_struct *work)
netif_wake_queue(dev);
}
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -882,14 +882,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_tunable = fs_set_tunable,
};
-static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
extern int fs_mii_connect(struct net_device *dev);
extern void fs_mii_disconnect(struct net_device *dev);
@@ -907,7 +899,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
- .ndo_do_ioctl = fs_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 72868a28b621..f7e5cafe89a9 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2093,7 +2093,7 @@ static void gfar_reset_task(struct work_struct *work)
reset_gfar(priv->ndev);
}
-static void gfar_timeout(struct net_device *dev)
+static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -2205,13 +2205,17 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+ bool do_tstamp;
+
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
frags = skb_shinfo(skb)->nr_frags;
/* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if (unlikely(do_tstamp))
nr_txbds = frags + 2;
else
nr_txbds = frags + 1;
@@ -2225,7 +2229,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(do_tstamp)) {
next = next_txbd(bdp, base, tx_ring_size);
buflen = be16_to_cpu(next->length) +
GMAC_FCB_LEN + GMAC_TXPAL_LEN;
@@ -2235,7 +2239,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
buflen, DMA_TO_DEVICE);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(do_tstamp)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
~0x7UL);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index f839fa94ebdd..0d101c00286f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3545,7 +3545,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
* ucc_geth_timeout gets called when a packet has not been
* transmitted after a set amount of time.
*/
-static void ucc_geth_timeout(struct net_device *dev)
+static void ucc_geth_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index e03b30c60dcf..c82c85ef5fb3 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -49,6 +49,7 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
+ bool has_a011043;
};
static u32 xgmac_read32(void __iomem *regs,
@@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
/* Return all Fs if nothing was there */
- if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+ if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
@@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
"little-endian");
+ priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
+ "fsl,erratum-a011043");
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 1eca0fdb9933..a7b7a4aace79 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -93,7 +93,7 @@ static irqreturn_t fjn_interrupt(int irq, void *dev_id);
static void fjn_rx(struct net_device *dev);
static void fjn_reset(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
-static void fjn_tx_timeout(struct net_device *dev);
+static void fjn_tx_timeout(struct net_device *dev, unsigned int txqueue);
static const struct ethtool_ops netdev_ethtool_ops;
/*
@@ -774,7 +774,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
/*====================================================================*/
-static void fjn_tx_timeout(struct net_device *dev)
+static void fjn_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct local_info *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 9b7a8db9860f..e032563ceefd 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -845,7 +845,7 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_napi_enabled(priv);
}
-static void gve_tx_timeout(struct net_device *dev)
+static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gve_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 150a8ccfb8b1..d9718b87279d 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -779,7 +779,7 @@ static int hip04_mac_stop(struct net_device *ndev)
return 0;
}
-static void hip04_timeout(struct net_device *ndev)
+static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hip04_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 90ab7ade44c4..57c3bc4f7089 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -675,18 +675,6 @@ static void hisi_femac_net_set_rx_mode(struct net_device *dev)
}
}
-static int hisi_femac_net_ioctl(struct net_device *dev,
- struct ifreq *ifreq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifreq, cmd);
-}
-
static const struct ethtool_ops hisi_femac_ethtools_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
@@ -697,7 +685,7 @@ static const struct net_device_ops hisi_femac_netdev_ops = {
.ndo_open = hisi_femac_net_open,
.ndo_stop = hisi_femac_net_close,
.ndo_start_xmit = hisi_femac_net_xmit,
- .ndo_do_ioctl = hisi_femac_net_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = hisi_femac_set_mac_address,
.ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
};
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 247de9105d10..4fb776920a93 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -893,7 +893,7 @@ static void hix5hd2_tx_timeout_task(struct work_struct *work)
hix5hd2_net_open(priv->netdev);
}
-static void hix5hd2_net_timeout(struct net_device *dev)
+static void hix5hd2_net_timeout(struct net_device *dev, unsigned int txqueue)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 14ab20491fd0..c117074c16e3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
if (unlikely(!skb)) {
- netdev_err(ndev, "alloc rx skb fail\n");
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
@@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
container_of(napi, struct hns_nic_ring_data, napi);
struct hnae_ring *ring = ring_data->ring;
-try_again:
clean_complete += ring_data->poll_one(
ring_data, budget - clean_complete,
ring_data->ex_process);
@@ -1066,7 +1064,7 @@ try_again:
napi_complete(napi);
ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
} else {
- goto try_again;
+ return budget;
}
}
@@ -1485,7 +1483,7 @@ static int hns_nic_net_stop(struct net_device *ndev)
static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
#define HNS_TX_TIMEO_LIMIT (40 * HZ)
-static void hns_nic_net_timeout(struct net_device *ndev)
+static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -1499,20 +1497,6 @@ static void hns_nic_net_timeout(struct net_device *ndev)
}
}
-static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
-{
- struct phy_device *phy_dev = netdev->phydev;
-
- if (!netif_running(netdev))
- return -EINVAL;
-
- if (!phy_dev)
- return -ENOTSUPP;
-
- return phy_mii_ioctl(phy_dev, ifr, cmd);
-}
-
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -1960,7 +1944,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_tx_timeout = hns_nic_net_timeout,
.ndo_set_mac_address = hns_nic_net_set_mac_address,
.ndo_change_mtu = hns_nic_change_mtu,
- .ndo_do_ioctl = hns_nic_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index d01bf536eb86..7aa2fac76c5e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -3,6 +3,8 @@
# Makefile for the HISILICON network device drivers.
#
+ccflags-y += -I$(srctree)/$(src)
+
obj-$(CONFIG_HNS3) += hns3pf/
obj-$(CONFIG_HNS3) += hns3vf/
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 3b5e2d7251e7..a3e4081b84ba 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -164,11 +164,7 @@ enum hnae3_reset_type {
HNAE3_IMP_RESET,
HNAE3_UNKNOWN_RESET,
HNAE3_NONE_RESET,
-};
-
-enum hnae3_flr_state {
- HNAE3_FLR_DOWN,
- HNAE3_FLR_DONE,
+ HNAE3_MAX_RESET,
};
enum hnae3_port_base_vlan_state {
@@ -575,8 +571,7 @@ struct hnae3_ae_algo {
const struct pci_device_id *pdev_id_table;
};
-#define HNAE3_INT_NAME_EXT_LEN 32 /* Max extra information length */
-#define HNAE3_INT_NAME_LEN (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN)
+#define HNAE3_INT_NAME_LEN 32
#define HNAE3_ITR_COUNTDOWN_START 100
struct hnae3_tc_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 6b328a259efc..1d4ffc5f408a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -176,7 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL;
}
- ring = &priv->ring[q_num];
+ ring = &priv->ring[q_num];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index;
@@ -209,10 +209,10 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
- ring = &priv->ring[q_num + h->kinfo.num_tqps];
+ ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index;
- rx_desc = &ring->desc[rx_index];
+ rx_desc = &ring->desc[rx_index];
addr = le64_to_cpu(rx_desc->addr);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
@@ -297,8 +297,8 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOMEM;
- len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
- "Please echo help to cmd to get help information");
+ len = scnprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
uncopy_bytes = copy_to_user(buffer, buf, len);
kfree(buf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 69545dd6c938..acb796cc10d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -24,6 +24,12 @@
#include "hnae3.h"
#include "hns3_enet.h"
+/* All hns3 tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "hns3_trace.h"
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
@@ -54,6 +60,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
#define HNS3_INNER_VLAN_TAG 1
#define HNS3_OUTER_VLAN_TAG 2
+#define HNS3_MIN_TX_LEN 33U
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -127,18 +135,21 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
continue;
if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "TxRx",
- txrx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "TxRx", txrx_int_idx++);
txrx_int_idx++;
} else if (tqp_vectors->rx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "Rx",
- rx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Rx", rx_int_idx++);
} else if (tqp_vectors->tx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "Tx",
- tx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Tx", tx_int_idx++);
} else {
/* Skip this unused q_vector */
continue;
@@ -155,6 +166,8 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
return ret;
}
+ disable_irq(tqp_vectors->vector_irq);
+
irq_set_affinity_hint(tqp_vectors->vector_irq,
&tqp_vectors->affinity_mask);
@@ -173,6 +186,7 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
+ enable_irq(tqp_vector->vector_irq);
/* enable vector */
hns3_mask_vector_irq(tqp_vector, 1);
@@ -372,18 +386,6 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
return ret;
- /* the device can work without cpu rmap, only aRFS needs it */
- ret = hns3_set_rx_cpu_rmap(netdev);
- if (ret)
- netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
-
- /* get irq resource for all vectors */
- ret = hns3_nic_init_irq(priv);
- if (ret) {
- netdev_err(netdev, "init irq failed! ret=%d\n", ret);
- goto free_rmap;
- }
-
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
/* enable the vectors */
@@ -396,22 +398,15 @@ static int hns3_nic_net_up(struct net_device *netdev)
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
- if (ret)
- goto out_start_err;
-
- return 0;
-
-out_start_err:
- set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ for (j = i - 1; j >= 0; j--)
+ hns3_vector_disable(&priv->tqp_vector[j]);
+ }
- hns3_nic_uninit_irq(priv);
-free_rmap:
- hns3_free_rx_cpu_rmap(netdev);
return ret;
}
@@ -508,11 +503,6 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (ops->stop)
ops->stop(priv->ae_handle);
- hns3_free_rx_cpu_rmap(netdev);
-
- /* free irq resources */
- hns3_nic_uninit_irq(priv);
-
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
* to disable the ring through firmware when downing the netdev.
@@ -734,6 +724,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
+ trace_hns3_tso(skb);
+
return 0;
}
@@ -1138,6 +1130,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+ trace_hns3_tx_desc(ring, ring->next_to_use);
ring_ptr_move_fw(ring, next_to_use);
return HNS3_LIKELY_BD_NUM;
}
@@ -1161,6 +1154,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+ trace_hns3_tx_desc(ring, ring->next_to_use);
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
@@ -1286,6 +1280,14 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
return false;
}
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_SKB_FRAGS; i++)
+ size[i] = skb_frag_size(&shinfo->frags[i]);
+}
+
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct net_device *netdev,
struct sk_buff *skb)
@@ -1297,8 +1299,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_num(skb, bd_size);
if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
- !hns3_skb_need_linearized(skb, bd_size, bd_num))
+ !hns3_skb_need_linearized(skb, bd_size, bd_num)) {
+ trace_hns3_over_8bd(skb);
goto out;
+ }
if (__skb_linearize(skb))
return -ENOMEM;
@@ -1306,8 +1310,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
(!skb_is_gso(skb) &&
- bd_num > HNS3_MAX_NON_TSO_BD_NUM))
+ bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
+ trace_hns3_over_8bd(skb);
return -ENOMEM;
+ }
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
@@ -1405,6 +1411,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
int bd_num = 0;
int ret;
+ /* Hardware can only handle short frames above 32 bytes */
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+ return NETDEV_TX_OK;
+
/* Prefetch the data used later */
prefetch(skb->data);
@@ -1448,6 +1458,7 @@ out:
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
cpu_to_le16(BIT(HNS3_TXD_FE_B));
+ trace_hns3_tx_desc(ring, pre_ntu);
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
@@ -1556,6 +1567,37 @@ static int hns3_nic_set_features(struct net_device *netdev,
return 0;
}
+static netdev_features_t hns3_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+#define HNS3_MAX_HDR_LEN 480U
+#define HNS3_MAX_L4_HDR_LEN 60U
+
+ size_t len;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ if (skb->encapsulation)
+ len = skb_inner_transport_header(skb) - skb->data;
+ else
+ len = skb_transport_header(skb) - skb->data;
+
+ /* Assume L4 is 60 byte as TCP is the only protocol with a
+ * a flexible value, and it's max len is 60 bytes.
+ */
+ len += HNS3_MAX_L4_HDR_LEN;
+
+ /* Hardware only supports checksum on the skb with a max header
+ * len of 480 bytes.
+ */
+ if (len > HNS3_MAX_HDR_LEN)
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+
static void hns3_nic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -1869,7 +1911,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
return true;
}
-static void hns3_nic_net_timeout(struct net_device *ndev)
+static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
@@ -1970,6 +2012,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_do_ioctl = hns3_nic_do_ioctl,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
+ .ndo_features_check = hns3_features_check,
.ndo_get_stats64 = hns3_nic_get_stats64,
.ndo_setup_tc = hns3_nic_setup_tc,
.ndo_set_rx_mode = hns3_nic_set_rx_mode,
@@ -2051,10 +2094,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
- if (!ae_dev) {
- ret = -ENOMEM;
- return ret;
- }
+ if (!ae_dev)
+ return -ENOMEM;
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
@@ -2497,8 +2538,8 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
rmb(); /* Make sure head is ready before touch any data */
if (unlikely(!is_valid_clean_head(ring, head))) {
- netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
- ring->next_to_use, ring->next_to_clean);
+ hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head,
+ ring->next_to_use, ring->next_to_clean);
u64_stats_update_begin(&ring->syncp);
ring->stats.io_err_cnt++;
@@ -2584,6 +2625,12 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
+static bool hns3_page_is_reusable(struct page *page)
+{
+ return page_to_nid(page) == numa_mem_id() &&
+ !page_is_pfmemalloc(page);
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2598,7 +2645,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Avoid re-using remote pages, or the stack is still using the page
* when page_offset rollback to zero, flag default unreuse
*/
- if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
+ if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
@@ -2668,6 +2715,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
+
+ trace_hns3_gro(skb);
+
return 0;
}
@@ -2788,7 +2838,6 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
-#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb;
@@ -2805,6 +2854,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
return -ENOMEM;
}
+ trace_hns3_rx_desc(ring);
prefetchw(skb->data);
ring->pending_buf = 1;
@@ -2814,7 +2864,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
+ if (likely(hns3_page_is_reusable(desc_cb->priv)))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
put_page(desc_cb->priv);
@@ -2832,33 +2882,19 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
- return HNS3_NEED_ADD_FRAG;
+ return 0;
}
-static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
- bool pending)
+static int hns3_add_frag(struct hns3_enet_ring *ring)
{
struct sk_buff *skb = ring->skb;
struct sk_buff *head_skb = skb;
struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb;
- struct hns3_desc *pre_desc;
+ struct hns3_desc *desc;
u32 bd_base_info;
- int pre_bd;
- /* if there is pending bd, the SW param next_to_clean has moved
- * to next and the next is NULL
- */
- if (pending) {
- pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
- ring->desc_num;
- pre_desc = &ring->desc[pre_bd];
- bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
- } else {
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- }
-
- while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
+ do {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2893,9 +2929,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
}
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ trace_hns3_rx_desc(ring);
ring_ptr_move_fw(ring, next_to_clean);
ring->pending_buf++;
- }
+ } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
return 0;
}
@@ -3063,28 +3100,23 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
if (ret < 0) /* alloc buffer fail */
return ret;
- if (ret > 0) { /* need add frag */
- ret = hns3_add_frag(ring, desc, false);
+ if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
+ ret = hns3_add_frag(ring);
if (ret)
return ret;
-
- /* As the head data may be changed when GRO enable, copy
- * the head data in after other data rx completed
- */
- memcpy(skb->data, ring->va,
- ALIGN(ring->pull_len, sizeof(long)));
}
} else {
- ret = hns3_add_frag(ring, desc, true);
+ ret = hns3_add_frag(ring);
if (ret)
return ret;
+ }
- /* As the head data may be changed when GRO enable, copy
- * the head data in after other data rx completed
- */
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ if (skb->len > HNS3_RX_HEAD_SIZE)
memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long)));
- }
ret = hns3_handle_bdinfo(ring, skb);
if (unlikely(ret)) {
@@ -3590,26 +3622,25 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
continue;
- hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ /* Since the mapping can be overwritten, when fail to get the
+ * chain between vector and ring, we should go on to deal with
+ * the remaining options.
+ */
+ if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
+ dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
- if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
- irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
- free_irq(tqp_vector->vector_irq, tqp_vector);
- tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
- }
-
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi);
}
}
-static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
+static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
struct pci_dev *pdev = h->pdev;
@@ -3621,11 +3652,10 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[i];
ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
if (ret)
- return ret;
+ return;
}
devm_kfree(&pdev->dev, priv->tqp_vector);
- return 0;
}
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
@@ -4024,6 +4054,18 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto out_init_irq_fail;
+ }
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
@@ -4045,6 +4087,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
out_client_start:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+out_init_irq_fail:
unregister_netdev(netdev);
out_reg_netdev_fail:
hns3_uninit_phy(netdev);
@@ -4082,15 +4127,17 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
goto out_netdev_free;
}
+ hns3_free_rx_cpu_rmap(netdev);
+
+ hns3_nic_uninit_irq(priv);
+
hns3_del_all_fd_rules(netdev, true);
hns3_clear_all_ring(handle, true);
hns3_nic_uninit_vector_data(priv);
- ret = hns3_nic_dealloc_vector_data(priv);
- if (ret)
- netdev_err(netdev, "dealloc vector error\n");
+ hns3_nic_dealloc_vector_data(priv);
ret = hns3_uninit_all_ring(priv);
if (ret)
@@ -4417,17 +4464,32 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_uninit_vector;
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto err_init_irq_fail;
+ }
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
- goto err_uninit_ring;
+ goto err_client_start_fail;
}
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
-err_uninit_ring:
+err_client_start_fail:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+err_init_irq_fail:
hns3_uninit_all_ring(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
@@ -4477,6 +4539,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
hns3_clear_all_ring(handle, true);
hns3_reset_tx_queue(priv->ae_handle);
@@ -4484,9 +4548,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
- ret = hns3_nic_dealloc_vector_data(priv);
- if (ret)
- netdev_err(netdev, "dealloc vector error\n");
+ hns3_nic_dealloc_vector_data(priv);
ret = hns3_uninit_all_ring(priv);
if (ret)
@@ -4652,7 +4714,7 @@ static int __init hns3_init_module(void)
pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
- snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
+ snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
hns3_driver_name);
client.ops = &client_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 9d47abd5c37c..abefd7a179f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -673,4 +673,5 @@ void hns3_dbg_init(struct hnae3_handle *handle);
void hns3_dbg_uninit(struct hnae3_handle *handle);
void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
void hns3_dbg_unregister_debugfs(void);
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6e0212b79438..c03856e63320 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -423,9 +423,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_",
- prefix, i);
- n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
+ prefix, i);
size_left = (ETH_GSTRING_LEN - 1) - n1;
/* now, concatenate the stats string to it */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
new file mode 100644
index 000000000000..7bddcca148a5
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HNS3_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HNS3_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define DESC_NR (sizeof(struct hns3_desc) / sizeof(u32))
+
+DECLARE_EVENT_CLASS(hns3_skb_template,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, headlen)
+ __field(unsigned int, len)
+ __field(__u8, nr_frags)
+ __field(__u8, ip_summed)
+ __field(unsigned int, hdr_len)
+ __field(unsigned short, gso_size)
+ __field(unsigned short, gso_segs)
+ __field(unsigned int, gso_type)
+ __field(bool, fraglist)
+ __array(__u32, size, MAX_SKB_FRAGS)
+ ),
+
+ TP_fast_assign(
+ __entry->headlen = skb_headlen(skb);
+ __entry->len = skb->len;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->gso_size = skb_shinfo(skb)->gso_size;
+ __entry->gso_segs = skb_shinfo(skb)->gso_segs;
+ __entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->hdr_len = skb->encapsulation ?
+ skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
+ skb_transport_offset(skb) + tcp_hdrlen(skb);
+ __entry->ip_summed = skb->ip_summed;
+ __entry->fraglist = skb_has_frag_list(skb);
+ hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
+ ),
+
+ TP_printk(
+ "len: %u, %u, %u, cs: %u, gso: %u, %u, %x, frag(%d %u): %s",
+ __entry->headlen, __entry->len, __entry->hdr_len,
+ __entry->ip_summed, __entry->gso_size, __entry->gso_segs,
+ __entry->gso_type, __entry->fraglist, __entry->nr_frags,
+ __print_array(__entry->size, MAX_SKB_FRAGS, sizeof(__u32))
+ )
+);
+
+DEFINE_EVENT(hns3_skb_template, hns3_over_8bd,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_gro,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_tso,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+TRACE_EVENT(hns3_tx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring, int cur_ntu),
+ TP_ARGS(ring, cur_ntu),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr,
+ memcpy(__entry->desc, &ring->desc[cur_ntu],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hns3_rx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __field(dma_addr_t, buf_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr;
+ __entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma;
+ memcpy(__entry->desc, &ring->desc[ring->next_to_clean],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad) buf(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma, &__entry->buf_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+#endif /* _HNS3_TRACE_H_ */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns3_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 940ead3970d1..7f509eff562e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -479,19 +479,6 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
-static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
-{
- spin_lock(&ring->lock);
- hclge_free_cmd_desc(ring);
- spin_unlock(&ring->lock);
-}
-
-static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
-{
- hclge_destroy_queue(&hw->cmq.csq);
- hclge_destroy_queue(&hw->cmq.crq);
-}
-
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@@ -501,5 +488,6 @@ void hclge_cmd_uninit(struct hclge_dev *hdev)
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
- hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclge_free_cmd_desc(&hdev->hw.cmq.crq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d97da67f07a1..96498d9b4754 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
+#include "hnae3.h"
#define HCLGE_CMDQ_TX_TIMEOUT 30000
#define HCLGE_DESC_DATA_LEN 6
@@ -63,6 +64,7 @@ enum hclge_cmd_status {
struct hclge_misc_vector {
u8 __iomem *addr;
int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
};
struct hclge_cmq {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 112df34b3869..67fad80035d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -73,8 +73,6 @@ static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{
-#define HCLGE_GET_DFX_REG_TYPE_CNT 4
-
struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int entries_per_desc;
int index;
@@ -886,8 +884,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
}
}
-static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
- bool sel_x, u32 loc)
+static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
+ bool sel_x, u32 loc)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
@@ -912,7 +910,7 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret)
- return;
+ return ret;
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
@@ -931,16 +929,76 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ return ret;
+}
+
+static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int cnt = 0;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (cnt != hdev->hclge_fd_rule_num)
+ return -EINVAL;
+
+ return cnt;
}
static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
{
- u32 i;
+ int i, ret, rule_cnt;
+ u16 *rule_locs;
+
+ if (!hnae3_dev_fd_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "Only FD-supported dev supports dump fd tcam\n");
+ return;
+ }
+
+ if (!hdev->hclge_fd_rule_num ||
+ !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return;
- for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
- hclge_dbg_fd_tcam_read(hdev, 0, true, i);
- hclge_dbg_fd_tcam_read(hdev, 0, false, i);
+ rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ sizeof(u16), GFP_KERNEL);
+ if (!rule_locs)
+ return;
+
+ rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
+ if (rule_cnt <= 0) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get rule number, ret = %d\n", rule_cnt);
+ kfree(rule_locs);
+ return;
}
+
+ for (i = 0; i < rule_cnt; i++) {
+ ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key x, ret = %d\n", ret);
+ kfree(rule_locs);
+ return;
+ }
+
+ ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key y, ret = %d\n", ret);
+ kfree(rule_locs);
+ return;
+ }
+ }
+
+ kfree(rule_locs);
}
void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
@@ -976,6 +1034,14 @@ void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
}
+static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
+ hdev->last_serv_processed);
+ dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
+ hdev->serv_processed_cnt);
+}
+
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
{
struct hclge_desc *desc_src, *desc_tmp;
@@ -1227,6 +1293,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
+ hclge_dbg_dump_serv_info(hdev);
} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
hclge_dbg_get_m7_stats_info(hdev);
} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index dc66b4e13377..c85b72dc44d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -505,7 +505,7 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
+ .reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
@@ -599,7 +599,7 @@ static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
+ .reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(9), .msg = "low_water_line_err_port",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(10), .msg = "hi_water_line_err_port",
@@ -1898,10 +1898,8 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!desc)
+ return -ENOMEM;
ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
reset_requests);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 13dbd249f35f..ec5f6eeb639b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -72,6 +72,8 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
+static struct workqueue_struct *hclge_wq;
+
static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
@@ -416,7 +418,7 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
int i, k, n;
@@ -453,7 +455,7 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
{
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
u16 i, k, n;
@@ -802,7 +804,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@ -815,8 +817,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
hclge_update_stats(handle, NULL);
- mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
- mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+ mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
@@ -860,9 +862,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
usleep_range(1000, 2000);
} while (timeout++ < HCLGE_QUERY_MAX_CNT);
- ret = hclge_parse_func_status(hdev, req);
-
- return ret;
+ return hclge_parse_func_status(hdev, req);
}
static int hclge_query_pf_resource(struct hclge_dev *hdev)
@@ -880,12 +880,12 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
}
req = (struct hclge_pf_res_cmd *)desc.data;
- hdev->num_tqps = __le16_to_cpu(req->tqp_num);
- hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
+ hdev->num_tqps = le16_to_cpu(req->tqp_num);
+ hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size)
hdev->tx_buf_size =
- __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
+ le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
@@ -893,7 +893,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (req->dv_buf_size)
hdev->dv_buf_size =
- __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
+ le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->dv_buf_size = HCLGE_DEFAULT_DV;
@@ -901,10 +901,10 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* nic's msix numbers is always equals to the roce's. */
@@ -917,7 +917,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
hdev->num_nic_msi = hdev->num_msi;
@@ -1331,11 +1331,7 @@ static int hclge_get_cap(struct hclge_dev *hdev)
}
/* get pf resource */
- ret = hclge_query_pf_resource(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
-
- return ret;
+ return hclge_query_pf_resource(hdev);
}
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
@@ -2619,30 +2615,21 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mac speed dup fail ret=%d\n", ret);
+ if (ret)
return ret;
- }
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mac autoneg fail ret=%d\n", ret);
+ if (ret)
return ret;
- }
}
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Fec mode init fail, ret = %d\n", ret);
+ if (ret)
return ret;
- }
}
ret = hclge_set_mac_mtu(hdev, hdev->mps);
@@ -2665,31 +2652,27 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
- if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
- &hdev->mbx_service_task);
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
- &hdev->rst_service_task);
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
- if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
- !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
- hdev->hw_stats.stats_timer++;
- hdev->fd_arfs_expire_timer++;
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- system_wq, &hdev->service_task,
+ hclge_wq, &hdev->service_task,
delay_time);
- }
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
@@ -2748,6 +2731,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (!client)
return;
+
+ if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
state = hclge_get_mac_phy_link(hdev);
if (state != hdev->hw.mac.link) {
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
@@ -2761,6 +2748,8 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
hdev->hw.mac.link = state;
}
+
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
static void hclge_update_port_capability(struct hclge_mac *mac)
@@ -2831,6 +2820,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
return ret;
}
+ /* In some case, mac speed get from IMP may be 0, it shouldn't be
+ * set to mac->speed.
+ */
+ if (!le32_to_cpu(resp->speed))
+ return 0;
+
mac->speed = le32_to_cpu(resp->speed);
/* if resp->speed_ability is 0, it means it's an old version
* firmware, do not update these params
@@ -2906,7 +2901,7 @@ static int hclge_get_status(struct hnae3_handle *handle)
static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
{
- if (pci_num_vf(hdev->pdev) == 0) {
+ if (!pci_num_vf(hdev->pdev)) {
dev_err(&hdev->pdev->dev,
"SRIOV is disabled, can not get vport(%d) info.\n", vf);
return NULL;
@@ -2940,6 +2935,9 @@ static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
ivf->trusted = vport->vf_info.trusted;
ivf->min_tx_rate = 0;
ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
+ ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
ether_addr_copy(ivf->mac, vport->vf_info.mac);
return 0;
@@ -2998,8 +2996,6 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- dev_info(&hdev->pdev->dev, "received event 0x%x\n",
- msix_src_reg);
*clearval = msix_src_reg;
return HCLGE_VECTOR0_EVENT_ERR;
}
@@ -3172,8 +3168,10 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
/* this would be explicitly freed in the end */
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, "hclge_misc", hdev);
+ 0, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -3247,7 +3245,8 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 200
+#define HCLGE_RESET_WAIT_CNT 350
+
u32 val, reg, reg_bit;
u32 cnt = 0;
@@ -3264,8 +3263,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
- case HNAE3_FLR_RESET:
- break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -3273,20 +3270,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
- if (hdev->reset_type == HNAE3_FLR_RESET) {
- while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
- cnt++ < HCLGE_RESET_WAIT_CNT)
- msleep(HCLGE_RESET_WATI_MS);
-
- if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
- dev_err(&hdev->pdev->dev,
- "flr wait timeout: %u\n", cnt);
- return -EBUSY;
- }
-
- return 0;
- }
-
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -3352,7 +3335,19 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return 0;
}
-static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
{
struct hclge_pf_rst_sync_cmd *req;
struct hclge_desc desc;
@@ -3363,26 +3358,28 @@ static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
do {
+ /* vf need to down netdev by mbx during PF or FLR reset */
+ hclge_mailbox_service_task(hdev);
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
/* for compatible with old firmware, wait
* 100 ms for VF to stop IO
*/
if (ret == -EOPNOTSUPP) {
msleep(HCLGE_RESET_SYNC_TIME);
- return 0;
+ return;
} else if (ret) {
- dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
- ret);
- return ret;
+ dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return;
} else if (req->all_vf_ready) {
- return 0;
+ return;
}
msleep(HCLGE_PF_RESET_SYNC_TIME);
hclge_cmd_reuse_desc(&desc, true);
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
- dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
- return -ETIME;
+ dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
}
void hclge_report_hw_error(struct hclge_dev *hdev,
@@ -3462,12 +3459,6 @@ static void hclge_do_reset(struct hclge_dev *hdev)
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
- case HNAE3_FLR_RESET:
- dev_info(&pdev->dev, "FLR requested\n");
- /* schedule again to check later */
- set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
- break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -3483,10 +3474,15 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
+ u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
- hclge_handle_hw_msix_error(hdev, addr);
+ if (hclge_handle_hw_msix_error(hdev, addr))
+ dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+
clear_bit(HNAE3_UNKNOWN_RESET, addr);
/* We defered the clearing of the error event which caused
* interrupt since it was not posssible to do that in
@@ -3551,23 +3547,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
-static int hclge_reset_prepare_down(struct hclge_dev *hdev)
-{
- int ret = 0;
-
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- /* fall through */
- case HNAE3_FLR_RESET:
- ret = hclge_set_all_vf_rst(hdev, true);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
u32 reg_val;
@@ -3581,6 +3560,19 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
}
+static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_set_all_vf_rst(hdev, true);
+ if (ret)
+ return ret;
+
+ hclge_func_reset_sync_vf(hdev);
+
+ return 0;
+}
+
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
u32 reg_val;
@@ -3588,10 +3580,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* to confirm whether all running VF is ready
- * before request PF reset
- */
- ret = hclge_func_reset_sync_vf(hdev);
+ ret = hclge_func_reset_notify_vf(hdev);
if (ret)
return ret;
@@ -3611,16 +3600,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
- /* to confirm whether all running VF is ready
- * before request PF reset
- */
- ret = hclge_func_reset_sync_vf(hdev);
+ ret = hclge_func_reset_notify_vf(hdev);
if (ret)
return ret;
-
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
hclge_handle_imp_error(hdev);
@@ -3672,6 +3654,8 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hclge_dbg_dump_rst_info(hdev);
+ set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
return false;
}
@@ -3747,10 +3731,9 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}
-static void hclge_reset(struct hclge_dev *hdev)
+static int hclge_reset_prepare(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- enum hnae3_reset_type reset_level;
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
@@ -3761,45 +3744,41 @@ static void hclge_reset(struct hclge_dev *hdev)
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
- goto err_reset;
-
- ret = hclge_reset_prepare_down(hdev);
- if (ret)
- goto err_reset;
+ return ret;
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
-
- ret = hclge_reset_prepare_wait(hdev);
if (ret)
- goto err_reset;
+ return ret;
- if (hclge_reset_wait(hdev))
- goto err_reset;
+ return hclge_reset_prepare_wait(hdev);
+}
+
+static int hclge_reset_rebuild(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_level;
+ int ret;
hdev->rst_stats.hw_reset_done_cnt++;
ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
- goto err_reset;
+ return ret;
rtnl_lock();
-
ret = hclge_reset_stack(hdev);
+ rtnl_unlock();
if (ret)
- goto err_reset_lock;
+ return ret;
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
if (ret)
- goto err_reset_lock;
+ return ret;
- rtnl_unlock();
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
@@ -3807,24 +3786,23 @@ static void hclge_reset(struct hclge_dev *hdev)
*/
if (ret &&
hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
- goto err_reset;
+ return ret;
rtnl_lock();
-
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
+ if (ret)
+ return ret;
ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
if (ret)
- goto err_reset;
+ return ret;
hdev->last_reset_time = jiffies;
hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
/* if default_reset_request has a higher level reset request,
* it should be handled as soon as possible. since some errors
@@ -3835,10 +3813,22 @@ static void hclge_reset(struct hclge_dev *hdev)
if (reset_level != HNAE3_NONE_RESET)
set_bit(reset_level, &hdev->reset_request);
+ return 0;
+}
+
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ if (hclge_reset_prepare(hdev))
+ goto err_reset;
+
+ if (hclge_reset_wait(hdev))
+ goto err_reset;
+
+ if (hclge_reset_rebuild(hdev))
+ goto err_reset;
+
return;
-err_reset_lock:
- rtnl_unlock();
err_reset:
if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev);
@@ -3939,34 +3929,18 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
hdev->reset_type = HNAE3_NONE_RESET;
}
-static void hclge_reset_service_task(struct work_struct *work)
+static void hclge_reset_service_task(struct hclge_dev *hdev)
{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, rst_service_task);
-
- if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
hclge_reset_subtask(hdev);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
-}
-
-static void hclge_mailbox_service_task(struct work_struct *work)
-{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, mbx_service_task);
-
- if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
- return;
-
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
-
- hclge_mbx_handler(hdev);
-
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static void hclge_update_vport_alive(struct hclge_dev *hdev)
@@ -3986,29 +3960,62 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
}
}
-static void hclge_service_task(struct work_struct *work)
+static void hclge_periodic_service_task(struct hclge_dev *hdev)
{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, service_task.work);
+ unsigned long delta = round_jiffies_relative(HZ);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ /* Always handle the link updating to make sure link state is
+ * updated when it is triggered by mbx.
+ */
+ hclge_update_link_status(hdev);
- if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
- hclge_update_stats_for_all(hdev);
- hdev->hw_stats.stats_timer = 0;
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
+
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
}
- hclge_update_port_info(hdev);
- hclge_update_link_status(hdev);
+ hdev->serv_processed_cnt++;
hclge_update_vport_alive(hdev);
+
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
+ hclge_update_stats_for_all(hdev);
+
+ hclge_update_port_info(hdev);
hclge_sync_vlan_filter(hdev);
- if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
+ if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
hclge_rfs_filter_expire(hdev);
- hdev->fd_arfs_expire_timer = 0;
- }
- hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ hdev->last_serv_processed = jiffies;
+
+out:
+ hclge_task_schedule(hdev, delta);
+}
+
+static void hclge_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, service_task.work);
+
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
+ hclge_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclge_task_schedule() in
+ * hclge_periodic_service_task().
+ */
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
@@ -4079,7 +4086,7 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. vector_id =%d\n", vector_id);
+ "Get vector index fail. vector = %d\n", vector);
return vector_id;
}
@@ -4654,7 +4661,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. vector_id =%d\n", vector_id);
+ "failed to get vector index. vector=%d\n", vector);
return vector_id;
}
@@ -6562,7 +6569,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
hclge_cfg_mac_mode(hdev, en);
- ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, false);
if (ret)
dev_err(&hdev->pdev->dev,
"serdes loopback config mac mode timeout\n");
@@ -6620,7 +6627,7 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
hclge_cfg_mac_mode(hdev, en);
- ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, true);
if (ret)
dev_err(&hdev->pdev->dev,
"phy loopback config mac mode timeout\n");
@@ -6734,6 +6741,19 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
}
}
+static void hclge_flush_link_update(struct hclge_dev *hdev)
+{
+#define HCLGE_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6742,12 +6762,12 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
if (enable) {
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
} else {
- /* Set the DOWN flag here to disable the service to be
- * scheduled again
- */
+ /* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- cancel_delayed_work_sync(&hdev->service_task);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclge_flush_link_update(hdev);
}
}
@@ -7483,7 +7503,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
@@ -7496,7 +7515,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
kfree(mac);
}
}
- mutex_unlock(&hdev->vport_cfg_mutex);
}
static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
@@ -8257,7 +8275,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@@ -8265,7 +8282,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
- mutex_unlock(&hdev->vport_cfg_mutex);
}
static void hclge_restore_vlan_table(struct hnae3_handle *handle)
@@ -8277,7 +8293,6 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
u16 state, vlan_id;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
@@ -8303,8 +8318,6 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
break;
}
}
-
- mutex_unlock(&hdev->vport_cfg_mutex);
}
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@@ -9256,6 +9269,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
@@ -9269,38 +9283,57 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.work.func)
cancel_delayed_work_sync(&hdev->service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
}
static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGE_FLR_WAIT_MS 100
-#define HCLGE_FLR_WAIT_CNT 50
- struct hclge_dev *hdev = ae_dev->priv;
- int cnt = 0;
+#define HCLGE_FLR_RETRY_WAIT_MS 500
+#define HCLGE_FLR_RETRY_CNT 5
- clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
- set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
- hclge_reset_event(hdev->pdev, NULL);
+ struct hclge_dev *hdev = ae_dev->priv;
+ int retry_cnt = 0;
+ int ret;
- while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
- cnt++ < HCLGE_FLR_WAIT_CNT)
- msleep(HCLGE_FLR_WAIT_MS);
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclge_reset_prepare(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
- if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
- dev_err(&hdev->pdev->dev,
- "flr wait down timeout: %d\n", cnt);
+ /* disable misc vector before FLR done */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hdev->rst_stats.flr_rst_cnt++;
}
static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclge_reset_rebuild(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
- set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
@@ -9342,21 +9375,17 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
- mutex_init(&hdev->vport_cfg_mutex);
spin_lock_init(&hdev->fd_rule_lock);
+ sema_init(&hdev->reset_sem, 1);
ret = hclge_pci_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "PCI init failed\n");
+ if (ret)
goto out;
- }
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
+ if (ret)
goto err_pci_uninit;
- }
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
@@ -9364,11 +9393,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_cmd_uninit;
ret = hclge_get_cap(hdev);
- if (ret) {
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
- ret);
+ if (ret)
goto err_cmd_uninit;
- }
ret = hclge_configure(hdev);
if (ret) {
@@ -9383,12 +9409,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclge_misc_irq_init(hdev);
- if (ret) {
- dev_err(&pdev->dev,
- "Misc IRQ(vector0) init error, ret = %d.\n",
- ret);
+ if (ret)
goto err_msi_uninit;
- }
ret = hclge_alloc_tqps(hdev);
if (ret) {
@@ -9397,31 +9419,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclge_alloc_vport(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
ret = hclge_map_tqp(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
ret = hclge_mac_mdio_config(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mdio config fail ret=%d\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
}
ret = hclge_init_umv_space(hdev);
- if (ret) {
- dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+ if (ret)
goto err_mdiobus_unreg;
- }
ret = hclge_mac_init(hdev);
if (ret) {
@@ -9477,8 +9490,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
- INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
- INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Setup affinity after service timer setup because add_timer_on
* is called in affinity notify.
@@ -9512,6 +9523,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+
return 0;
err_mdiobus_unreg:
@@ -9534,7 +9547,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
- memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
+ memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -9895,7 +9908,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
- mutex_destroy(&hdev->vport_cfg_mutex);
ae_dev->priv = NULL;
}
@@ -10157,10 +10169,8 @@ static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
int *bd_num_list,
u32 type_num)
{
-#define HCLGE_DFX_REG_BD_NUM 4
-
u32 entries_per_desc, desc_index, index, offset, i;
- struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int ret;
ret = hclge_query_bd_num_cmd_send(hdev, desc);
@@ -10273,10 +10283,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
buf_len = sizeof(*desc_src) * bd_num_max;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
+ if (!desc_src)
return -ENOMEM;
- }
for (i = 0; i < dfx_reg_type_num; i++) {
bd_num = bd_num_list[i];
@@ -10611,6 +10619,12 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
+ if (!hclge_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
+ return -ENOMEM;
+ }
+
hnae3_register_ae_algo(&ae_algo);
return 0;
@@ -10619,6 +10633,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
hnae3_unregister_ae_algo(&ae_algo);
+ destroy_workqueue(hclge_wq);
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ebb4c6e9aed3..f78cbb4cc85e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -139,6 +139,8 @@
#define HCLGE_PHY_MDIX_STATUS_B 6
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
+#define HCLGE_GET_DFX_REG_TYPE_CNT 4
+
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
@@ -208,13 +210,14 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_NIC_REGISTERED,
HCLGE_STATE_ROCE_REGISTERED,
HCLGE_STATE_SERVICE_INITED,
- HCLGE_STATE_SERVICE_SCHED,
HCLGE_STATE_RST_SERVICE_SCHED,
HCLGE_STATE_RST_HANDLING,
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
+ HCLGE_STATE_LINK_UPDATING,
+ HCLGE_STATE_RST_FAIL,
HCLGE_STATE_MAX
};
@@ -454,11 +457,7 @@ struct hclge_mac_stats {
u64 mac_rx_ctrl_pkt_num;
};
-#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
-struct hclge_hw_stats {
- struct hclge_mac_stats mac_stats;
- u32 stats_timer;
-};
+#define HCLGE_STATS_TIMER_INTERVAL 300UL
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
@@ -549,7 +548,7 @@ struct key_info {
/* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096
-#define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
+#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
@@ -712,7 +711,7 @@ struct hclge_dev {
struct hnae3_ae_dev *ae_dev;
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
- struct hclge_hw_stats hw_stats;
+ struct hclge_mac_stats mac_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -723,6 +722,7 @@ struct hclge_dev {
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
struct hclge_rst_stats rst_stats;
+ struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -774,8 +774,6 @@ struct hclge_dev {
unsigned long service_timer_previous;
struct timer_list reset_timer;
struct delayed_work service_task;
- struct work_struct rst_service_task;
- struct work_struct mbx_service_task;
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
@@ -811,7 +809,8 @@ struct hclge_dev {
struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num;
- u16 fd_arfs_expire_timer;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
@@ -825,8 +824,6 @@ struct hclge_dev {
u16 share_umv_size;
struct mutex umv_mutex; /* protect share_umv_size */
- struct mutex vport_cfg_mutex; /* Protect stored vf table */
-
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 0b433ebe6a2d..a3c0822191a9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -86,10 +86,12 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- enum hnae3_reset_type reset_type;
+ u16 reset_type;
u8 msg_data[2];
u8 dest_vfid;
+ BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
+
dest_vfid = (u8)vport->vport_id;
if (hdev->reset_type == HNAE3_FUNC_RESET)
@@ -635,7 +637,6 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
#define LINK_STATUS_OFFSET 1
#define LINK_FAIL_CODE_OFFSET 2
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
hclge_task_schedule(hdev, 0);
if (!req->msg[LINK_STATUS_OFFSET])
@@ -798,13 +799,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_get_link_mode(vport, req);
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
- mutex_lock(&hdev->vport_cfg_mutex);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true);
- mutex_unlock(&hdev->vport_cfg_mutex);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
ret = hclge_get_vf_media_type(vport, req);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index af2245e3bb95..f38d236ebf4f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -443,7 +443,7 @@ void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock(&hdev->hw.cmq.crq.lock);
- clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
hclgevf_cmd_uninit_regs(&hdev->hw);
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 25d78a5aaa34..d6597206e692 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -16,6 +16,8 @@
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
+static struct workqueue_struct *hclgevf_wq;
+
static const struct pci_device_id ae_algovf_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
@@ -440,6 +442,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
struct hnae3_client *rclient;
struct hnae3_client *client;
+ if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
client = handle->client;
rclient = hdev->roce_client;
@@ -452,6 +457,8 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
rclient->ops->link_status_change(rhandle, !!link_state);
hdev->hw.mac.link = link_state;
}
+
+ clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
}
static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
@@ -1309,14 +1316,13 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
msg_data[0] = is_kill;
memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
memcpy(&msg_data[3], &proto, sizeof(proto));
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
-
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
if (is_kill && ret)
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
@@ -1404,32 +1410,6 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
return ret;
}
-static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
-}
-
-static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
- unsigned long delay_us,
- unsigned long wait_cnt)
-{
- unsigned long cnt = 0;
-
- while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
- cnt++ < wait_cnt)
- usleep_range(delay_us, delay_us * 2);
-
- if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
- dev_err(&hdev->pdev->dev,
- "flr wait timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1440,11 +1420,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
u32 val;
int ret;
- if (hdev->reset_type == HNAE3_FLR_RESET)
- return hclgevf_flr_poll_timeout(hdev,
- HCLGEVF_RESET_WAIT_US,
- HCLGEVF_RESET_WAIT_CNT);
- else if (hdev->reset_type == HNAE3_VF_RESET)
+ if (hdev->reset_type == HNAE3_VF_RESET)
ret = readl_poll_timeout(hdev->hw.io_base +
HCLGEVF_VF_RST_ING, val,
!(val & HCLGEVF_VF_RST_ING_BIT),
@@ -1516,7 +1492,8 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
/* clear handshake status with IMP */
hclgevf_reset_handshake(hdev, false);
- return 0;
+ /* bring up the nic to enable TX/RX again */
+ return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -1525,18 +1502,10 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
int ret = 0;
- switch (hdev->reset_type) {
- case HNAE3_VF_FUNC_RESET:
+ if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8));
hdev->rst_stats.vf_func_rst_cnt++;
- break;
- case HNAE3_FLR_RESET:
- set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- hdev->rst_stats.flr_rst_cnt++;
- break;
- default:
- break;
}
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
@@ -1591,11 +1560,12 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
} else {
+ set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
hclgevf_dump_rst_info(hdev);
}
}
-static int hclgevf_reset(struct hclgevf_dev *hdev)
+static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
@@ -1605,61 +1575,64 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
*/
ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.rst_cnt++;
- rtnl_lock();
+ rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
-
- ret = hclgevf_reset_prepare_wait(hdev);
if (ret)
- goto err_reset;
+ return ret;
- /* check if VF could successfully fetch the hardware reset completion
- * status from the hardware
- */
- ret = hclgevf_reset_wait(hdev);
- if (ret) {
- /* can't do much in this situation, will disable VF */
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to fetch H/W reset completion status\n",
- ret);
- goto err_reset;
- }
+ return hclgevf_reset_prepare_wait(hdev);
+}
+
+static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
hdev->rst_stats.hw_rst_done_cnt++;
rtnl_lock();
-
/* now, re-initialize the nic client and ae device */
ret = hclgevf_reset_stack(hdev);
+ rtnl_unlock();
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
- goto err_reset_lock;
+ return ret;
}
- /* bring up the nic to enable TX/RX again */
- ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
- if (ret)
- goto err_reset_lock;
-
- rtnl_unlock();
-
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0;
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+
+ return 0;
+}
+
+static void hclgevf_reset(struct hclgevf_dev *hdev)
+{
+ if (hclgevf_reset_prepare(hdev))
+ goto err_reset;
+
+ /* check if VF could successfully fetch the hardware reset completion
+ * status from the hardware
+ */
+ if (hclgevf_reset_wait(hdev)) {
+ /* can't do much in this situation, will disable VF */
+ dev_err(&hdev->pdev->dev,
+ "failed to fetch H/W reset completion status\n");
+ goto err_reset;
+ }
+
+ if (hclgevf_reset_rebuild(hdev))
+ goto err_reset;
+
+ return;
- return ret;
-err_reset_lock:
- rtnl_unlock();
err_reset:
hclgevf_reset_err_handle(hdev);
-
- return ret;
}
static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
@@ -1722,25 +1695,60 @@ static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
set_bit(rst_type, &hdev->default_reset_request);
}
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGEVF_FLR_WAIT_MS 100
-#define HCLGEVF_FLR_WAIT_CNT 50
+#define HCLGEVF_FLR_RETRY_WAIT_MS 500
+#define HCLGEVF_FLR_RETRY_CNT 5
+
struct hclgevf_dev *hdev = ae_dev->priv;
- int cnt = 0;
+ int retry_cnt = 0;
+ int ret;
- clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
- set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
- hclgevf_reset_event(hdev->pdev, NULL);
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclgevf_reset_prepare(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGEVF_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
- while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
- cnt++ < HCLGEVF_FLR_WAIT_CNT)
- msleep(HCLGEVF_FLR_WAIT_MS);
+ /* disable misc vector before FLR done */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ hdev->rst_stats.flr_rst_cnt++;
+}
- if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
- dev_err(&hdev->pdev->dev,
- "flr wait down timeout: %d\n", cnt);
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclgevf_reset_rebuild(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
+ ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1767,62 +1775,37 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) {
- set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->rst_service_task);
- }
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
}
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
- set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->mbx_service_task);
- }
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
}
-static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
+ unsigned long delay)
{
- if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
- !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->service_task);
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
}
-static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
-{
- /* if we have any pending mailbox event then schedule the mbx task */
- if (hdev->mbx_event_pending)
- hclgevf_mbx_task_schedule(hdev);
-
- if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
- hclgevf_reset_task_schedule(hdev);
-}
-
-static void hclgevf_service_timer(struct timer_list *t)
-{
- struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
-
- mod_timer(&hdev->service_timer, jiffies +
- HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
-
- hdev->stats_timer++;
- hclgevf_task_schedule(hdev);
-}
-
-static void hclgevf_reset_service_task(struct work_struct *work)
+static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
{
#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
- struct hclgevf_dev *hdev =
- container_of(work, struct hclgevf_dev, rst_service_task);
- int ret;
-
- if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
- clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
@@ -1836,12 +1819,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
hdev->last_reset_time = jiffies;
while ((hdev->reset_type =
hclgevf_get_reset_level(hdev, &hdev->reset_pending))
- != HNAE3_NONE_RESET) {
- ret = hclgevf_reset(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF stack reset failed %d.\n", ret);
- }
+ != HNAE3_NONE_RESET)
+ hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
@@ -1882,42 +1861,29 @@ static void hclgevf_reset_service_task(struct work_struct *work)
hclgevf_reset_task_schedule(hdev);
}
+ hdev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
-static void hclgevf_mailbox_service_task(struct work_struct *work)
+static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
{
- struct hclgevf_dev *hdev;
-
- hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+ if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ return;
if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
return;
- clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
-
hclgevf_mbx_async_handler(hdev);
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
}
-static void hclgevf_keep_alive_timer(struct timer_list *t)
-{
- struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
-
- schedule_work(&hdev->keep_alive_task);
- mod_timer(&hdev->keep_alive_timer, jiffies +
- HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
-}
-
-static void hclgevf_keep_alive_task(struct work_struct *work)
+static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
{
- struct hclgevf_dev *hdev;
u8 respmsg;
int ret;
- hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
-
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
@@ -1928,19 +1894,32 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
"VF sends keep alive cmd failed(=%d)\n", ret);
}
-static void hclgevf_service_task(struct work_struct *work)
+static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *handle;
- struct hclgevf_dev *hdev;
+ unsigned long delta = round_jiffies_relative(HZ);
+ struct hnae3_handle *handle = &hdev->nic;
- hdev = container_of(work, struct hclgevf_dev, service_task);
- handle = &hdev->nic;
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
- if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
- hclgevf_tqps_update_stats(handle);
- hdev->stats_timer = 0;
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
}
+ hdev->serv_processed_cnt++;
+ if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
+ hclgevf_keep_alive(hdev);
+
+ if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
+ hclgevf_tqps_update_stats(handle);
+
/* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later
*/
@@ -1950,9 +1929,27 @@ static void hclgevf_service_task(struct work_struct *work)
hclgevf_sync_vlan_filter(hdev);
- hclgevf_deferred_task_schedule(hdev);
+ hdev->last_serv_processed = jiffies;
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+out:
+ hclgevf_task_schedule(hdev, delta);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
+ service_task.work);
+
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
+ hclgevf_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclgevf_task_schedule() in
+ * hclgevf_periodic_service_task()
+ */
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
}
static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
@@ -2010,11 +2007,6 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
-static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
-{
- writel(en ? 1 : 0, vector->addr);
-}
-
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
enum hclgevf_evt_cause event_cause;
@@ -2189,16 +2181,31 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
false);
}
+static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
if (enable) {
- mod_timer(&hdev->service_timer, jiffies + HZ);
+ hclgevf_task_schedule(hdev, 0);
} else {
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclgevf_flush_link_update(hdev);
}
}
@@ -2245,16 +2252,12 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
ret = hclgevf_set_alive(handle, true);
if (ret)
return ret;
- mod_timer(&hdev->keep_alive_timer, jiffies +
- HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
-
return 0;
}
@@ -2267,27 +2270,18 @@ static void hclgevf_client_stop(struct hnae3_handle *handle)
if (ret)
dev_warn(&hdev->pdev->dev,
"%s failed %d\n", __func__, ret);
-
- del_timer_sync(&hdev->keep_alive_timer);
- cancel_work_sync(&hdev->keep_alive_task);
}
static void hclgevf_state_init(struct hclgevf_dev *hdev)
{
- /* setup tasks for the MBX */
- INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
- /* setup tasks for service timer */
- timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
-
- INIT_WORK(&hdev->service_task, hclgevf_service_task);
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
-
- INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
+ INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
mutex_init(&hdev->mbx_resp.mbx_mutex);
+ sema_init(&hdev->reset_sem, 1);
/* bring the device down */
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
@@ -2298,18 +2292,8 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
- if (hdev->keep_alive_timer.function)
- del_timer_sync(&hdev->keep_alive_timer);
- if (hdev->keep_alive_task.func)
- cancel_work_sync(&hdev->keep_alive_task);
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
mutex_destroy(&hdev->mbx_resp.mbx_mutex);
}
@@ -2383,8 +2367,10 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
hclgevf_get_misc_vector(hdev);
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGEVF_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
- 0, "hclgevf_cmd", hdev);
+ 0, hdev->misc_vector.name, hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
hdev->misc_vector.vector_irq);
@@ -2611,11 +2597,11 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGEVF_MSIX_OFT_ROCEE_M,
HCLGEVF_MSIX_OFT_ROCEE_S);
hdev->num_roce_msix =
- hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
/* nic's msix numbers is always equals to the roce's. */
@@ -2628,7 +2614,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
hdev->num_nic_msix = hdev->num_msi;
@@ -2725,16 +2711,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
int ret;
ret = hclgevf_pci_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "PCI initialization failed\n");
+ if (ret)
return ret;
- }
ret = hclgevf_cmd_queue_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+ if (ret)
goto err_cmd_queue_init;
- }
ret = hclgevf_cmd_init(hdev);
if (ret)
@@ -2742,11 +2724,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
/* Get vf resource */
ret = hclgevf_query_vf_resource(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query vf status error, ret = %d.\n", ret);
+ if (ret)
goto err_cmd_init;
- }
ret = hclgevf_init_msi(hdev);
if (ret) {
@@ -2756,13 +2735,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_state_init(hdev);
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ hdev->reset_type = HNAE3_NONE_RESET;
ret = hclgevf_misc_irq_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
- ret);
+ if (ret)
goto err_misc_irq_init;
- }
set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
@@ -2779,10 +2756,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
}
ret = hclgevf_set_handle_info(hdev);
- if (ret) {
- dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+ if (ret)
goto err_config;
- }
ret = hclgevf_config_gro(hdev, true);
if (ret)
@@ -2807,6 +2782,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+
return 0;
err_config:
@@ -2838,7 +2815,6 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
- struct hclgevf_dev *hdev;
int ret;
ret = hclgevf_alloc_hdev(ae_dev);
@@ -2853,10 +2829,6 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- hdev = ae_dev->priv;
- timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
- INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
-
return 0;
}
@@ -3213,6 +3185,12 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME);
+ if (!hclgevf_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
+ return -ENOMEM;
+ }
+
hnae3_register_ae_algo(&ae_algovf);
return 0;
@@ -3221,6 +3199,7 @@ static int hclgevf_init(void)
static void hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
+ destroy_workqueue(hclgevf_wq);
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 2f4c81bf4169..fee8d97f323c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -142,12 +142,13 @@ enum hclgevf_states {
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
/* task states */
- HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
+ HCLGEVF_STATE_LINK_UPDATING,
+ HCLGEVF_STATE_RST_FAIL,
};
struct hclgevf_mac {
@@ -220,6 +221,7 @@ struct hclgevf_rss_cfg {
struct hclgevf_misc_vector {
u8 __iomem *addr;
int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
};
struct hclgevf_rst_stats {
@@ -251,6 +253,7 @@ struct hclgevf_dev {
unsigned long reset_state; /* requested, pending */
struct hclgevf_rst_stats rst_stats;
u32 reset_attempts;
+ struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
u16 num_tqps; /* num task queue pairs of this PF */
@@ -283,12 +286,7 @@ struct hclgevf_dev {
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
- struct timer_list service_timer;
- struct timer_list keep_alive_timer;
- struct work_struct service_task;
- struct work_struct keep_alive_task;
- struct work_struct rst_service_task;
- struct work_struct mbx_service_task;
+ struct delayed_work service_task;
struct hclgevf_tqp *htqp;
@@ -298,7 +296,8 @@ struct hclgevf_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
u32 flag;
- u32 stats_timer;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 2411ad270c98..02a14f5e7fe3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -766,7 +766,7 @@ static void hinic_set_rx_mode(struct net_device *netdev)
queue_work(nic_dev->workq, &rx_mode_work->work);
}
-static void hinic_tx_timeout(struct net_device *netdev)
+static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 92929750f832..bef676d93339 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -363,7 +363,7 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void i596_tx_timeout (struct net_device *dev);
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void print_eth(unsigned char *buf, char *str);
static void set_multicast_list(struct net_device *dev);
@@ -1019,7 +1019,7 @@ err_irq_dev:
return res;
}
-static void i596_tx_timeout (struct net_device *dev)
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct i596_private *lp = dev->ml_priv;
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index bb3b8adbe4f0..a0bfb509e002 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -66,7 +66,7 @@ static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev);
-static void ether1_timeout(struct net_device *dev);
+static void ether1_timeout(struct net_device *dev, unsigned int txqueue);
/* ------------------------------------------------------------------------- */
@@ -650,7 +650,7 @@ ether1_open (struct net_device *dev)
}
static void
-ether1_timeout(struct net_device *dev)
+ether1_timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n",
dev->name);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f9742af7f142..b03757e169e4 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -351,7 +351,7 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void i596_tx_timeout (struct net_device *dev);
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void print_eth(unsigned char *buf, char *str);
static void set_multicast_list(struct net_device *dev);
static inline void ca(struct net_device *dev);
@@ -936,7 +936,7 @@ out_remove_rx_bufs:
return -EAGAIN;
}
-static void i596_tx_timeout (struct net_device *dev)
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct i596_private *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 6436a98c5953..22f5887578b2 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -91,10 +91,10 @@ static int sni_82596_probe(struct platform_device *dev)
idprom = platform_get_resource(dev, IORESOURCE_MEM, 2);
if (!res || !ca || !options || !idprom)
return -ENODEV;
- mpu_addr = ioremap_nocache(res->start, 4);
+ mpu_addr = ioremap(res->start, 4);
if (!mpu_addr)
return -ENOMEM;
- ca_addr = ioremap_nocache(ca->start, 4);
+ ca_addr = ioremap(ca->start, 4);
if (!ca_addr)
goto probe_failed_free_mpu;
@@ -110,7 +110,7 @@ static int sni_82596_probe(struct platform_device *dev)
netdevice->base_addr = res->start;
netdevice->irq = platform_get_irq(dev, 0);
- eth_addr = ioremap_nocache(idprom->start, 0x10);
+ eth_addr = ioremap(idprom->start, 0x10);
if (!eth_addr)
goto probe_failed;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 1a86184d44c0..4564ee02c95f 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -125,7 +125,7 @@ static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
struct net_device *);
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void sun3_82586_timeout(struct net_device *dev);
+static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue);
#if 0
static void sun3_82586_dump(struct net_device *,void *);
#endif
@@ -965,7 +965,7 @@ static void startrecv586(struct net_device *dev)
WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
}
-static void sun3_82586_timeout(struct net_device *dev)
+static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
{
struct priv *p = netdev_priv(dev);
#ifndef NO_NOPCOMMANDS
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 13e30eba5349..0273fb7a9d01 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2786,7 +2786,7 @@ out:
return;
}
-static void ehea_tx_watchdog(struct net_device *dev)
+static void ehea_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct ehea_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 2e40425d8a34..b7fc17756c51 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -776,7 +776,7 @@ static void emac_reset_work(struct work_struct *work)
mutex_unlock(&dev->link_lock);
}
-static void emac_tx_timeout(struct net_device *ndev)
+static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct emac_instance *dev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 830791ab4619..c75239d8820f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2282,7 +2282,7 @@ err:
return -ret;
}
-static void ibmvnic_tx_timeout(struct net_device *dev)
+static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index a65d5a9ba7db..1b8d015ebfb0 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2316,7 +2316,7 @@ static void e100_down(struct nic *nic)
e100_rx_clean_list(nic);
}
-static void e100_tx_timeout(struct net_device *netdev)
+static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nic *nic = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index aca97b084003..2bced34c19ba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -134,7 +134,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
-static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void e1000_reset_task(struct work_struct *work);
static void e1000_smartspeed(struct e1000_adapter *adapter);
static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
@@ -3488,7 +3488,7 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void e1000_tx_timeout(struct net_device *netdev)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6c51b1bad8c4..37a2314d3e6b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -185,13 +185,12 @@ struct e1000_phy_regs {
/* board specific private data structure */
struct e1000_adapter {
+ struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
- struct delayed_work watchdog_task;
-
- struct workqueue_struct *e1000_workqueue;
+ struct work_struct watchdog_task;
const struct e1000_info *ei;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fe7997c18a10..db4ea58bac82 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -4284,6 +4281,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
napi_synchronize(&adapter->napi);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
@@ -4723,7 +4721,7 @@ int e1000e_close(struct net_device *netdev)
e1000_free_irq(adapter);
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
}
napi_disable(&adapter->napi);
@@ -5073,12 +5071,13 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- adapter->netdev->name, adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
- (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
- (ctrl & E1000_CTRL_RFCE) ? "Rx" :
- (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
+ netdev_info(adapter->netdev,
+ "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -5155,11 +5154,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
}
}
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(struct timer_list *t)
+{
+ struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+
+ /* TODO: make this use queue_delayed_work() */
+}
+
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
- watchdog_task.work);
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -5307,7 +5320,7 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->link_speed = 0;
adapter->link_duplex = 0;
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
netif_stop_queue(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -5407,9 +5420,8 @@ link_up:
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
- queue_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task,
- round_jiffies(2 * HZ));
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -5929,7 +5941,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void e1000_tx_timeout(struct net_device *netdev)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -7449,21 +7461,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
- e1000e_driver_name);
-
- if (!adapter->e1000_workqueue) {
- err = -ENOMEM;
- goto err_workqueue;
- }
-
- INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
- queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
- 0);
-
+ timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
@@ -7557,9 +7559,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_register:
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-err_workqueue:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
@@ -7604,17 +7603,15 @@ static void e1000_remove(struct pci_dev *pdev)
* from being rescheduled.
*/
set_bit(__E1000_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
- cancel_delayed_work(&adapter->watchdog_task);
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 68baee04dc58..0637ccadee79 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -696,21 +696,24 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
/**
* fm10k_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: the index of the Tx queue that timed out
**/
-static void fm10k_tx_timeout(struct net_device *netdev)
+static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_ring *tx_ring;
bool real_tx_hang = false;
- int i;
-
-#define TX_TIMEO_LIMIT 16000
- for (i = 0; i < interface->num_tx_queues; i++) {
- struct fm10k_ring *tx_ring = interface->tx_ring[i];
- if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
- real_tx_hang = true;
+ if (txqueue >= interface->num_tx_queues) {
+ WARN(1, "invalid Tx queue index %d", txqueue);
+ return;
}
+ tx_ring = interface->tx_ring[txqueue];
+ if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
+ real_tx_hang = true;
+
+#define TX_TIMEO_LIMIT 16000
if (real_tx_hang) {
fm10k_tx_timeout_reset(interface);
} else {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9f0a4e92a231..37514a75f928 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -536,6 +536,11 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
(aq->api_maj_ver == 1 &&
aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
/* fall through */
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d4055037af89..45b90eb11adb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
*/
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
- hw_dbg(hw, "Buffer to small for PBA data.\n");
+ hw_dbg(hw, "Buffer too small for PBA data.\n");
return I40E_ERR_PARAM;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2c5af6d4a6b1..8c3e753bfb9d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -301,43 +301,24 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
* device is munged, not just the one netdev port, so go for the full
* reset.
**/
-static void i40e_tx_timeout(struct net_device *netdev)
+static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ring *tx_ring = NULL;
- unsigned int i, hung_queue = 0;
+ unsigned int i;
u32 head, val;
pf->tx_timeout_count++;
- /* find the stopped queue the same way the stack does */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
- unsigned long trans_start;
-
- q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start;
- if (netif_xmit_stopped(q) &&
- time_after(jiffies,
- (trans_start + netdev->watchdog_timeo))) {
- hung_queue = i;
- break;
- }
- }
-
- if (i == netdev->num_tx_queues) {
- netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- } else {
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (hung_queue ==
- vsi->tx_rings[i]->queue_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
+ /* with txqueue index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (txqueue ==
+ vsi->tx_rings[i]->queue_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
}
}
}
@@ -363,14 +344,14 @@ static void i40e_tx_timeout(struct net_device *netdev)
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
- vsi->seid, hung_queue, tx_ring->next_to_clean,
+ vsi->seid, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use,
readl(tx_ring->tail), val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
- pf->tx_timeout_recovery_level, hung_queue);
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
case 1:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 6a3f0fc56c3b..69523ac85639 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2322,6 +2322,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
}
/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+ vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+ return false;
+
+ return true;
+}
+
+/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2346,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if (i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2408,9 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
- vqs->rx_queues > I40E_MAX_VF_QUEUES ||
- vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+ if (i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index f73cd917c44f..42058fad6a3c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -269,7 +269,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -306,7 +306,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 29de3ae96ef2..bd1b1ed323f4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 821987da5698..62fe56ddcb6e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -159,7 +159,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
* iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void iavf_tx_timeout(struct net_device *netdev)
+static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
*
* Returns ptr to the filter object or NULL when no memory available.
**/
-static struct
-iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
- const u8 *macaddr)
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr)
{
struct iavf_mac_filter *f;
@@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work)
struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
+ struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
- struct iavf_mac_filter *f;
u32 reg_val;
int i = 0, err;
bool running;
@@ -2181,6 +2180,16 @@ continue_reset:
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* Delete filter for the current MAC address, it could have
+ * been changed by the PF via administratively set MAC.
+ * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index c46770eba320..1ab9cb339acb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ iavf_add_filter(adapter, adapter->hw.mac.addr);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 7cb829132d28..59544b0fc086 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,7 +17,8 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx_lib.o \
ice_txrx.o \
- ice_flex_pipe.o \
+ ice_flex_pipe.o \
+ ice_flow.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index f972dce8aebb..cb10abb14e11 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -174,6 +174,8 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
+ struct ice_vsi *dflt_vsi; /* default VSI for this switch */
+ u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_state {
@@ -275,6 +277,7 @@ struct ice_vsi {
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
u8 vlan_ena:1;
+ u16 num_vlan;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -462,12 +465,13 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
{
struct xdp_umem **umems = ring->vsi->xsk_umems;
- int qid = ring->q_index;
+ u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi))
+ if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+ !ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
return umems[qid];
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5421fc413f94..4459bc564b11 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -232,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+
+#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
+#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+
+#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
@@ -1849,6 +1856,7 @@ enum ice_adminq_opc {
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* debug commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 77d6a0291e97..d8e975cceb21 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -93,7 +93,8 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ * We allocate one q_vector and set default value for ITR setting associated
+ * with this q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
@@ -108,6 +109,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
+ q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
+ q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
if (vsi->type == ICE_VSI_VF)
goto out;
/* only set affinity_mask if the CPU is online */
@@ -299,6 +302,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
@@ -323,7 +327,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
ring->q_index);
} else {
+ ring->zca.free = NULL;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
ring->netdev,
ring->q_index);
@@ -674,10 +680,6 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_RX_ITR;
-
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
@@ -688,10 +690,6 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
if (q_vector->num_ring_tx) {
struct ice_ring_container *rc = &q_vector->tx;
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_TX_ITR;
-
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index fb1d930470c7..0207e28c2682 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4,28 +4,10 @@
#include "ice_common.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
+#include "ice_flow.h"
#define ICE_PF_RESET_WAIT_COUNT 200
-#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
- wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
- ((ICE_RX_OPC_MDID << \
- GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
- GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
- (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
- GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
-
-#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
- wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
- (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
- (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
- (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
- (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
-
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -348,88 +330,6 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
- * ice_init_flex_flags
- * @hw: pointer to the hardware structure
- * @prof_id: Rx Descriptor Builder profile ID
- *
- * Function to initialize Rx flex flags
- */
-static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
-{
- u8 idx = 0;
-
- /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
- * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
- * flexiflags1[3:0] - Not used for flag programming
- * flexiflags2[7:0] - Tunnel and VLAN types
- * 2 invalid fields in last index
- */
- switch (prof_id) {
- /* Rx flex flags are currently programmed for the NIC profiles only.
- * Different flag bit programming configurations can be added per
- * profile as needed.
- */
- case ICE_RXDID_FLEX_NIC:
- case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
- ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
- ICE_FLG_FIN, idx++);
- /* flex flag 1 is not used for flexi-flag programming, skipping
- * these four FLG64 bits.
- */
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
- ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
- ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
- ICE_FLG_EVLAN_x9100, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
- ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
- ICE_FLG_TNL0, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
- ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
- break;
-
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Flag programming for profile ID %d not supported\n",
- prof_id);
- }
-}
-
-/**
- * ice_init_flex_flds
- * @hw: pointer to the hardware structure
- * @prof_id: Rx Descriptor Builder profile ID
- *
- * Function to initialize flex descriptors
- */
-static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
-{
- enum ice_flex_rx_mdid mdid;
-
- switch (prof_id) {
- case ICE_RXDID_FLEX_NIC:
- case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
-
- mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
- ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
-
- ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
-
- ice_init_flex_flags(hw, prof_id);
- break;
-
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Field init for profile ID %d not supported\n",
- prof_id);
- }
-}
-
-/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
@@ -882,9 +782,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
-
- ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
- ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -1601,6 +1498,114 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
+ * ice_aq_alloc_free_res - command to allocate/free resources
+ * @hw: pointer to the HW struct
+ * @num_entries: number of resource entries in buffer
+ * @buf: Indirect buffer to hold data parameters and response
+ * @buf_size: size of buffer for indirect commands
+ * @opc: pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Helper function to allocate/free resources using the admin queue commands
+ */
+enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_alloc_free_res_cmd *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.sw_res_ctrl;
+
+ if (!buf)
+ return ICE_ERR_PARAM;
+
+ if (buf_size < (num_entries * sizeof(buf->elem[0])))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_entries = cpu_to_le16(num_entries);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_alloc_hw_res - allocate resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @btm: allocate from bottom
+ * @res: pointer to array that will receive the resources
+ */
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(buf, elem, num - 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to allocate resource. */
+ buf->num_elems = cpu_to_le16(num);
+ buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
+ ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
+ if (btm)
+ buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto ice_alloc_res_exit;
+
+ memcpy(res, buf->elem, sizeof(buf->elem) * num);
+
+ice_alloc_res_exit:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_free_hw_res - free allocated HW resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: pointer to array that contains the resources to free
+ */
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(buf, elem, num - 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to free resource. */
+ buf->num_elems = cpu_to_le16(num);
+ buf->res_type = cpu_to_le16(type);
+ memcpy(buf->elem, res, sizeof(buf->elem) * num);
+
+ status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
+
+ kfree(buf);
+ return status;
+}
+
+/**
* ice_get_num_per_func - determine number of resources per PF
* @hw: pointer to the HW structure
* @max: value to be evenly split between each PF
@@ -3510,7 +3515,10 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
if (status)
return status;
}
-
+ /* Replay per VSI all RSS configurations */
+ status = ice_replay_rss_cfg(hw, vsi_handle);
+ if (status)
+ return status;
/* Replay per VSI all filters */
status = ice_replay_vsi_all_fltr(hw, vsi_handle);
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index b22aa561e253..b5c013fdaaf9 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -34,10 +34,18 @@ enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index d3d3ec29def9..0664e5b8d130 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -396,6 +396,12 @@ dcb_error:
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
+ /* Coverity warns the return code of ice_pf_dcb_cfg() is not checked
+ * here as is done for other calls to that function. That check is
+ * not necessary since this is in this function's error cleanup path.
+ * Suppress the Coverity warning with the following comment...
+ */
+ /* coverity[check_return] */
ice_pf_dcb_cfg(pf, prev_cfg, false);
kfree(prev_cfg);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index f8d5c661d0ba..ce63017c56c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -11,5 +11,23 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Connection E822-C for backplane */
+#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
+/* Intel(R) Ethernet Connection E822-C for QSFP */
+#define ICE_DEV_ID_E822C_QSFP 0x1891
+/* Intel(R) Ethernet Connection E822-C for SFP */
+#define ICE_DEV_ID_E822C_SFP 0x1892
+/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
+/* Intel(R) Ethernet Connection E822-C 1GbE */
+#define ICE_DEV_ID_E822C_SGMII 0x1894
+/* Intel(R) Ethernet Connection E822-X for backplane */
+#define ICE_DEV_ID_E822X_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for SFP */
+#define ICE_DEV_ID_E822L_SFP 0x1898
+/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
+/* Intel(R) Ethernet Connection E822-L 1GbE */
+#define ICE_DEV_ID_E822L_SGMII 0x189A
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 9ebd93e79aeb..90c6a3ca20c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4,6 +4,7 @@
/* ethtool support for ice */
#include "ice.h"
+#include "ice_flow.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -283,12 +284,15 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- struct ice_vf *vf = pf->vf;
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++)
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
return true;
+ }
+
return false;
}
@@ -2531,6 +2535,243 @@ done:
}
/**
+ * ice_parse_hdrs - parses headers from RSS hash input
+ * @nfc: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended
+ * header types for RSS configuration
+ */
+static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
+{
+ u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case UDP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case SCTP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case UDP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case SCTP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ default:
+ break;
+ }
+ return hdrs;
+}
+
+#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
+#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
+#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
+#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
+#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
+/**
+ * ice_parse_hash_flds - parses hash fields from RSS hash input
+ * @nfc: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended
+ * hash fields for RSS configuration
+ */
+static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
+{
+ u64 hfld = ICE_HASH_INVALID;
+
+ if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) {
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if (nfc->data & RXH_IP_SRC)
+ hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
+ if (nfc->data & RXH_IP_DST)
+ hfld |= ICE_FLOW_HASH_FLD_IPV4_DA;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (nfc->data & RXH_IP_SRC)
+ hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
+ if (nfc->data & RXH_IP_DST)
+ hfld |= ICE_FLOW_HASH_FLD_IPV6_DA;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) {
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return hfld;
+}
+
+/**
+ * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @vsi: the VSI being configured
+ * @nfc: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ */
+static int
+ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ u64 hashed_flds;
+ u32 hdrs;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ hashed_flds = ice_parse_hash_flds(nfc);
+ if (hashed_flds == ICE_HASH_INVALID) {
+ dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ hdrs = ice_parse_hdrs(nfc);
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
+ dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
+ if (status) {
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
+ vsi->vsi_num, status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
+ * @vsi: the VSI being configured
+ * @nfc: ethtool rxnfc command
+ */
+static void
+ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ u64 hash_flds;
+ u32 hdrs;
+
+ dev = ice_pf_to_dev(pf);
+
+ nfc->data = 0;
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ hdrs = ice_parse_hdrs(nfc);
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
+ dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
+ if (hash_flds == ICE_HASH_INVALID) {
+ dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
+ hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA)
+ nfc->data |= (u64)RXH_IP_SRC;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA ||
+ hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA)
+ nfc->data |= (u64)RXH_IP_DST;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT)
+ nfc->data |= (u64)RXH_L4_B_0_1;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
+ nfc->data |= (u64)RXH_L4_B_2_3;
+}
+
+/**
+ * ice_set_rxnfc - command to set Rx flow rules.
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns 0 for success and negative values for errors
+ */
+static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ return ice_set_rss_hash_opt(vsi, cmd);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -2551,6 +2792,10 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = vsi->rss_size;
ret = 0;
break;
+ case ETHTOOL_GRXFH:
+ ice_get_rss_hash_opt(vsi, cmd);
+ ret = 0;
+ break;
default:
break;
}
@@ -3585,6 +3830,53 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
}
/**
+ * ice_is_coalesce_param_invalid - check for unsupported coalesce parameters
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ *
+ * Print netdev info if driver doesn't support one of the parameters
+ * and return error. When any parameters will be implemented, remove only
+ * this parameter from param array.
+ */
+static int
+ice_is_coalesce_param_invalid(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ice_ethtool_not_used {
+ u32 value;
+ const char *name;
+ } param[] = {
+ {ec->stats_block_coalesce_usecs, "stats-block-usecs"},
+ {ec->rate_sample_interval, "sample-interval"},
+ {ec->pkt_rate_low, "pkt-rate-low"},
+ {ec->pkt_rate_high, "pkt-rate-high"},
+ {ec->rx_max_coalesced_frames, "rx-frames"},
+ {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
+ {ec->rx_max_coalesced_frames_irq, "rx-frames-irq"},
+ {ec->tx_max_coalesced_frames, "tx-frames"},
+ {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
+ {ec->tx_max_coalesced_frames_irq, "tx-frames-irq"},
+ {ec->rx_coalesce_usecs_low, "rx-usecs-low"},
+ {ec->rx_max_coalesced_frames_low, "rx-frames-low"},
+ {ec->tx_coalesce_usecs_low, "tx-usecs-low"},
+ {ec->tx_max_coalesced_frames_low, "tx-frames-low"},
+ {ec->rx_max_coalesced_frames_high, "rx-frames-high"},
+ {ec->tx_max_coalesced_frames_high, "tx-frames-high"}
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(param); i++) {
+ if (param[i].value) {
+ netdev_info(netdev, "Setting %s not supported\n",
+ param[i].name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
* __ice_set_coalesce - set ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
@@ -3600,6 +3892,9 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ if (ice_is_coalesce_param_invalid(netdev, ec))
+ return -EINVAL;
+
if (q_num < 0) {
int v_idx;
@@ -3804,6 +4099,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_priv_flags = ice_set_priv_flags,
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
+ .set_rxnfc = ice_set_rxnfc,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index cbd53b586c36..99208946224c 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -3,6 +3,87 @@
#include "ice_common.h"
#include "ice_flex_pipe.h"
+#include "ice_flow.h"
+
+static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
+ /* SWITCH */
+ {
+ ICE_SID_XLT0_SW,
+ ICE_SID_XLT_KEY_BUILDER_SW,
+ ICE_SID_XLT1_SW,
+ ICE_SID_XLT2_SW,
+ ICE_SID_PROFID_TCAM_SW,
+ ICE_SID_PROFID_REDIR_SW,
+ ICE_SID_FLD_VEC_SW,
+ ICE_SID_CDID_KEY_BUILDER_SW,
+ ICE_SID_CDID_REDIR_SW
+ },
+
+ /* ACL */
+ {
+ ICE_SID_XLT0_ACL,
+ ICE_SID_XLT_KEY_BUILDER_ACL,
+ ICE_SID_XLT1_ACL,
+ ICE_SID_XLT2_ACL,
+ ICE_SID_PROFID_TCAM_ACL,
+ ICE_SID_PROFID_REDIR_ACL,
+ ICE_SID_FLD_VEC_ACL,
+ ICE_SID_CDID_KEY_BUILDER_ACL,
+ ICE_SID_CDID_REDIR_ACL
+ },
+
+ /* FD */
+ {
+ ICE_SID_XLT0_FD,
+ ICE_SID_XLT_KEY_BUILDER_FD,
+ ICE_SID_XLT1_FD,
+ ICE_SID_XLT2_FD,
+ ICE_SID_PROFID_TCAM_FD,
+ ICE_SID_PROFID_REDIR_FD,
+ ICE_SID_FLD_VEC_FD,
+ ICE_SID_CDID_KEY_BUILDER_FD,
+ ICE_SID_CDID_REDIR_FD
+ },
+
+ /* RSS */
+ {
+ ICE_SID_XLT0_RSS,
+ ICE_SID_XLT_KEY_BUILDER_RSS,
+ ICE_SID_XLT1_RSS,
+ ICE_SID_XLT2_RSS,
+ ICE_SID_PROFID_TCAM_RSS,
+ ICE_SID_PROFID_REDIR_RSS,
+ ICE_SID_FLD_VEC_RSS,
+ ICE_SID_CDID_KEY_BUILDER_RSS,
+ ICE_SID_CDID_REDIR_RSS
+ },
+
+ /* PE */
+ {
+ ICE_SID_XLT0_PE,
+ ICE_SID_XLT_KEY_BUILDER_PE,
+ ICE_SID_XLT1_PE,
+ ICE_SID_XLT2_PE,
+ ICE_SID_PROFID_TCAM_PE,
+ ICE_SID_PROFID_REDIR_PE,
+ ICE_SID_FLD_VEC_PE,
+ ICE_SID_CDID_KEY_BUILDER_PE,
+ ICE_SID_CDID_REDIR_PE
+ }
+};
+
+/**
+ * ice_sect_id - returns section ID
+ * @blk: block type
+ * @sect: section type
+ *
+ * This helper function returns the proper section ID given a block type and a
+ * section type.
+ */
+static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
+{
+ return ice_sect_lkup[blk][sect];
+}
/**
* ice_pkg_val_buf
@@ -158,6 +239,176 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
return state->sect;
}
+/* Key creation */
+
+#define ICE_DC_KEY 0x1 /* don't care */
+#define ICE_DC_KEYINV 0x1
+#define ICE_NM_KEY 0x0 /* never match */
+#define ICE_NM_KEYINV 0x0
+#define ICE_0_KEY 0x1 /* match 0 */
+#define ICE_0_KEYINV 0x0
+#define ICE_1_KEY 0x0 /* match 1 */
+#define ICE_1_KEYINV 0x1
+
+/**
+ * ice_gen_key_word - generate 16-bits of a key/mask word
+ * @val: the value
+ * @valid: valid bits mask (change only the valid bits)
+ * @dont_care: don't care mask
+ * @nvr_mtch: never match mask
+ * @key: pointer to an array of where the resulting key portion
+ * @key_inv: pointer to an array of where the resulting key invert portion
+ *
+ * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
+ * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
+ * of key and 8 bits of key invert.
+ *
+ * '0' = b01, always match a 0 bit
+ * '1' = b10, always match a 1 bit
+ * '?' = b11, don't care bit (always matches)
+ * '~' = b00, never match bit
+ *
+ * Input:
+ * val: b0 1 0 1 0 1
+ * dont_care: b0 0 1 1 0 0
+ * never_mtch: b0 0 0 0 1 1
+ * ------------------------------
+ * Result: key: b01 10 11 11 00 00
+ */
+static enum ice_status
+ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
+ u8 *key_inv)
+{
+ u8 in_key = *key, in_key_inv = *key_inv;
+ u8 i;
+
+ /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
+ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
+ return ICE_ERR_CFG;
+
+ *key = 0;
+ *key_inv = 0;
+
+ /* encode the 8 bits into 8-bit key and 8-bit key invert */
+ for (i = 0; i < 8; i++) {
+ *key >>= 1;
+ *key_inv >>= 1;
+
+ if (!(valid & 0x1)) { /* change only valid bits */
+ *key |= (in_key & 0x1) << 7;
+ *key_inv |= (in_key_inv & 0x1) << 7;
+ } else if (dont_care & 0x1) { /* don't care bit */
+ *key |= ICE_DC_KEY << 7;
+ *key_inv |= ICE_DC_KEYINV << 7;
+ } else if (nvr_mtch & 0x1) { /* never match bit */
+ *key |= ICE_NM_KEY << 7;
+ *key_inv |= ICE_NM_KEYINV << 7;
+ } else if (val & 0x01) { /* exact 1 match */
+ *key |= ICE_1_KEY << 7;
+ *key_inv |= ICE_1_KEYINV << 7;
+ } else { /* exact 0 match */
+ *key |= ICE_0_KEY << 7;
+ *key_inv |= ICE_0_KEYINV << 7;
+ }
+
+ dont_care >>= 1;
+ nvr_mtch >>= 1;
+ valid >>= 1;
+ val >>= 1;
+ in_key >>= 1;
+ in_key_inv >>= 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_bits_max_set - determine if the number of bits set is within a maximum
+ * @mask: pointer to the byte array which is the mask
+ * @size: the number of bytes in the mask
+ * @max: the max number of set bits
+ *
+ * This function determines if there are at most 'max' number of bits set in an
+ * array. Returns true if the number for bits set is <= max or will return false
+ * otherwise.
+ */
+static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
+{
+ u16 count = 0;
+ u16 i;
+
+ /* check each byte */
+ for (i = 0; i < size; i++) {
+ /* if 0, go to next byte */
+ if (!mask[i])
+ continue;
+
+ /* We know there is at least one set bit in this byte because of
+ * the above check; if we already have found 'max' number of
+ * bits set, then we can return failure now.
+ */
+ if (count == max)
+ return false;
+
+ /* count the bits in this byte, checking threshold */
+ count += hweight8(mask[i]);
+ if (count > max)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_key - generate a variable sized key with multiples of 16-bits
+ * @key: pointer to where the key will be stored
+ * @size: the size of the complete key in bytes (must be even)
+ * @val: array of 8-bit values that makes up the value portion of the key
+ * @upd: array of 8-bit masks that determine what key portion to update
+ * @dc: array of 8-bit masks that make up the don't care mask
+ * @nm: array of 8-bit masks that make up the never match mask
+ * @off: the offset of the first byte in the key to update
+ * @len: the number of bytes in the key update
+ *
+ * This function generates a key from a value, a don't care mask and a never
+ * match mask.
+ * upd, dc, and nm are optional parameters, and can be NULL:
+ * upd == NULL --> udp mask is all 1's (update all bits)
+ * dc == NULL --> dc mask is all 0's (no don't care bits)
+ * nm == NULL --> nm mask is all 0's (no never match bits)
+ */
+static enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+ u16 len)
+{
+ u16 half_size;
+ u16 i;
+
+ /* size must be a multiple of 2 bytes. */
+ if (size % 2)
+ return ICE_ERR_CFG;
+
+ half_size = size / 2;
+ if (off + len > half_size)
+ return ICE_ERR_CFG;
+
+ /* Make sure at most one bit is set in the never match mask. Having more
+ * than one never match mask bit set will cause HW to consume excessive
+ * power otherwise; this is a power management efficiency check.
+ */
+#define ICE_NVR_MTCH_BITS_MAX 1
+ if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
+ return ICE_ERR_CFG;
+
+ for (i = 0; i < len; i++)
+ if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
+ dc ? dc[i] : 0, nm ? nm[i] : 0,
+ key + off + i, key + half_size + off + i))
+ return ICE_ERR_CFG;
+
+ return 0;
+}
+
/**
* ice_acquire_global_cfg_lock
* @hw: pointer to the HW structure
@@ -205,6 +456,31 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
}
/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+static enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+ return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+ ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+static void ice_release_change_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
* ice_aq_download_pkg
* @hw: pointer to the hardware structure
* @pkg_buf: the package buffer to transfer
@@ -253,6 +529,54 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+ bool last_buf, u32 *error_offset, u32 *error_info,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
* ice_find_seg_in_pkg
* @hw: pointer to the hardware structure
* @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
@@ -287,6 +611,44 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
}
/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+static enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+ u32 offset, info, i;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+
+ status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
@@ -767,6 +1129,169 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
return status;
}
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
+ section_entry));
+ return bld;
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ devm_kfree(ice_hw_to_dev(hw), bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+static enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = le16_to_cpu(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries += count;
+
+ data_end = le16_to_cpu(buf->data_end) +
+ (count * sizeof(buf->section_entry[0]));
+ buf->data_end = cpu_to_le16(data_end);
+
+ return 0;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+static void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = le16_to_cpu(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = le16_to_cpu(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
+ buf->section_entry[sect_count].size = cpu_to_le16(size);
+ buf->section_entry[sect_count].type = cpu_to_le32(type);
+
+ data_end += size;
+ buf->data_end = cpu_to_le16(data_end);
+
+ buf->section_count = cpu_to_le16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return le16_to_cpu(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (!bld)
+ return NULL;
+
+ return &bld->buf;
+}
+
/* PTG Management */
/**
@@ -951,6 +1476,48 @@ enum ice_sid_all {
ICE_SID_OFF_COUNT,
};
+/* Characteristic handling */
+
+/**
+ * ice_match_prop_lst - determine if properties of two lists match
+ * @list1: first properties list
+ * @list2: second properties list
+ *
+ * Count, cookies and the order must match in order to be considered equivalent.
+ */
+static bool
+ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
+{
+ struct ice_vsig_prof *tmp1;
+ struct ice_vsig_prof *tmp2;
+ u16 chk_count = 0;
+ u16 count = 0;
+
+ /* compare counts */
+ list_for_each_entry(tmp1, list1, list)
+ count++;
+ list_for_each_entry(tmp2, list2, list)
+ chk_count++;
+ if (!count || count != chk_count)
+ return false;
+
+ tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
+ tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
+
+ /* profile cookies must compare, and in the exact same order to take
+ * into account priority
+ */
+ while (count--) {
+ if (tmp2->profile_cookie != tmp1->profile_cookie)
+ return false;
+
+ tmp1 = list_next_entry(tmp1, list);
+ tmp2 = list_next_entry(tmp2, list);
+ }
+
+ return true;
+}
+
/* VSIG Management */
/**
@@ -999,6 +1566,117 @@ static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
}
/**
+ * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will iterate through the VSIG list and mark the first
+ * unused entry for the new VSIG entry as used and return that value.
+ */
+static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ return ice_vsig_alloc_val(hw, blk, i);
+
+ return ICE_DEFAULT_VSIG;
+}
+
+/**
+ * ice_find_dup_props_vsig - find VSI group with a specified set of properties
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @chs: characteristic list
+ * @vsig: returns the VSIG with the matching profiles, if found
+ *
+ * Each VSIG is associated with a characteristic set; i.e. all VSIs under
+ * a group have the same characteristic set. To check if there exists a VSIG
+ * which has the same characteristics as the input characteristics; this
+ * function will iterate through the XLT2 list and return the VSIG that has a
+ * matching configuration. In order to make sure that priorities are accounted
+ * for, the list must match exactly, including the order in which the
+ * characteristics are listed.
+ */
+static enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *chs, u16 *vsig)
+{
+ struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
+ u16 i;
+
+ for (i = 0; i < xlt2->count; i++)
+ if (xlt2->vsig_tbl[i].in_use &&
+ ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
+ *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_vsig_free - free VSI group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to remove
+ *
+ * The function will remove all VSIs associated with the input VSIG and move
+ * them to the DEFAULT_VSIG and mark the VSIG available.
+ */
+static enum ice_status
+ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ struct ice_vsig_prof *dtmp, *del;
+ struct ice_vsig_vsi *vsi_cur;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+ if (idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
+
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the
+ * list and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur) {
+ /* remove all vsis associated with this VSIG XLT2 entry */
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+ vsi_cur = tmp;
+ } while (vsi_cur);
+
+ /* NULL terminate head of VSI list */
+ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+ }
+
+ /* free characteristic list */
+ list_for_each_entry_safe(del, dtmp,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ list_del(&del->list);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ /* if VSIG characteristic list was cleared for reset
+ * re-initialize the list head
+ */
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+
+ return 0;
+}
+
+/**
* ice_vsig_remove_vsi - remove VSI from VSIG
* @hw: pointer to the hardware structure
* @blk: HW block
@@ -1117,6 +1795,215 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
return 0;
}
+/**
+ * ice_find_prof_id - find profile ID for a given field vector
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @fv: field vector to search for
+ * @prof_id: receives the profile ID
+ */
+static enum ice_status
+ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_fv_word *fv, u8 *prof_id)
+{
+ struct ice_es *es = &hw->blk[blk].es;
+ u16 off, i;
+
+ for (i = 0; i < es->count; i++) {
+ off = i * es->fvw;
+
+ if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+ continue;
+
+ *prof_id = i;
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_prof_id_rsrc_type - get profile ID resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_alloc_tcam_ent - allocate hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the TCAM for
+ * @tcam_idx: pointer to variable to receive the TCAM entry
+ *
+ * This function allocates a new entry in a Profile ID TCAM for a specific
+ * block.
+ */
+static enum ice_status
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+}
+
+/**
+ * ice_free_tcam_ent - free hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the TCAM entry
+ * @tcam_idx: the TCAM entry to free
+ *
+ * This function frees an entry in a Profile ID TCAM for a specific block.
+ */
+static enum ice_status
+ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
+}
+
+/**
+ * ice_alloc_prof_id - allocate profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the profile ID for
+ * @prof_id: pointer to variable to receive the profile ID
+ *
+ * This function allocates a new profile ID, which also corresponds to a Field
+ * Vector (Extraction Sequence) entry.
+ */
+static enum ice_status
+ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+{
+ enum ice_status status;
+ u16 res_type;
+ u16 get_prof;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
+ if (!status)
+ *prof_id = (u8)get_prof;
+
+ return status;
+}
+
+/**
+ * ice_free_prof_id - free profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID to free
+ *
+ * This function frees a profile ID, which also corresponds to a Field Vector.
+ */
+static enum ice_status
+ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ u16 tmp_prof_id = (u16)prof_id;
+ u16 res_type;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
+}
+
+/**
+ * ice_prof_inc_ref - increment reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to increment the reference count
+ */
+static enum ice_status
+ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ hw->blk[blk].es.ref_count[prof_id]++;
+
+ return 0;
+}
+
+/**
+ * ice_write_es - write an extraction sequence to hardware
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write the extraction sequence
+ * @prof_id: the profile ID to write
+ * @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ */
+static void
+ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
+ struct ice_fv_word *fv)
+{
+ u16 off;
+
+ off = prof_id * hw->blk[blk].es.fvw;
+ if (!fv) {
+ memset(&hw->blk[blk].es.t[off], 0,
+ hw->blk[blk].es.fvw * sizeof(*fv));
+ hw->blk[blk].es.written[prof_id] = false;
+ } else {
+ memcpy(&hw->blk[blk].es.t[off], fv,
+ hw->blk[blk].es.fvw * sizeof(*fv));
+ }
+}
+
+/**
+ * ice_prof_dec_ref - decrement reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to decrement the reference count
+ */
+static enum ice_status
+ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (hw->blk[blk].es.ref_count[prof_id] > 0) {
+ if (!--hw->blk[blk].es.ref_count[prof_id]) {
+ ice_write_es(hw, blk, prof_id, NULL);
+ return ice_free_prof_id(hw, blk, prof_id);
+ }
+ }
+
+ return 0;
+}
+
/* Block / table section IDs */
static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
/* SWITCH */
@@ -1374,16 +2261,85 @@ void ice_fill_blk_tbls(struct ice_hw *hw)
}
/**
+ * ice_free_prof_map - free profile map
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_es *es = &hw->blk[blk_idx].es;
+ struct ice_prof_map *del, *tmp;
+
+ mutex_lock(&es->prof_map_lock);
+ list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
+ list_del(&del->list);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+ INIT_LIST_HEAD(&es->prof_map);
+ mutex_unlock(&es->prof_map_lock);
+}
+
+/**
+ * ice_free_flow_profs - free flow profile entries
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_flow_prof *p, *tmp;
+
+ mutex_lock(&hw->fl_profs_locks[blk_idx]);
+ list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
+ list_del(&p->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), p);
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk_idx]);
+
+ /* if driver is in reset and tables are being cleared
+ * re-initialize the flow profile list heads
+ */
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
+ * ice_free_vsig_tbl - free complete VSIG table entries
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block on which to free the VSIG table entries
+ */
+static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl)
+ return;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ ice_vsig_free(hw, blk, i);
+}
+
+/**
* ice_free_hw_tbls - free hardware table memory
* @hw: pointer to the hardware structure
*/
void ice_free_hw_tbls(struct ice_hw *hw)
{
+ struct ice_rss_cfg *r, *rt;
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
- hw->blk[i].is_list_init = false;
+ if (hw->blk[i].is_list_init) {
+ struct ice_es *es = &hw->blk[i].es;
+
+ ice_free_prof_map(hw, i);
+ mutex_destroy(&es->prof_map_lock);
+ ice_free_flow_profs(hw, i);
+ mutex_destroy(&hw->fl_profs_locks[i]);
+
+ hw->blk[i].is_list_init = false;
+ }
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
@@ -1397,10 +2353,26 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
}
+ list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ mutex_destroy(&hw->rss_locks);
memset(hw->blk, 0, sizeof(hw->blk));
}
/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ mutex_init(&hw->fl_profs_locks[blk_idx]);
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
* ice_clear_hw_tbls - clear HW tables and flow profiles
* @hw: pointer to the hardware structure
*/
@@ -1415,6 +2387,13 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
struct ice_es *es = &hw->blk[i].es;
+ if (hw->blk[i].is_list_init) {
+ ice_free_prof_map(hw, i);
+ ice_free_flow_profs(hw, i);
+ }
+
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
+
memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
memset(xlt1->ptg_tbl, 0,
ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
@@ -1443,6 +2422,8 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
+ mutex_init(&hw->rss_locks);
+ INIT_LIST_HEAD(&hw->rss_list_head);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
@@ -1454,6 +2435,9 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
if (hw->blk[i].is_list_init)
continue;
+ ice_init_flow_profs(hw, i);
+ mutex_init(&es->prof_map_lock);
+ INIT_LIST_HEAD(&es->prof_map);
hw->blk[i].is_list_init = true;
hw->blk[i].overwrite = blk_sizes[i].overwrite;
@@ -1547,3 +2531,1580 @@ err:
ice_free_hw_tbls(hw);
return ICE_ERR_NO_MEMORY;
}
+
+/**
+ * ice_prof_gen_key - generate profile ID key
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ * @key: output of profile ID key
+ */
+static enum ice_status
+ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
+ u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 key[ICE_TCAM_KEY_SZ])
+{
+ struct ice_prof_id_key inkey;
+
+ inkey.xlt1 = ptg;
+ inkey.xlt2_cdid = cpu_to_le16(vsig);
+ inkey.flags = cpu_to_le16(flags);
+
+ switch (hw->blk[blk].prof.cdid_bits) {
+ case 0:
+ break;
+ case 2:
+#define ICE_CD_2_M 0xC000U
+#define ICE_CD_2_S 14
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
+ break;
+ case 4:
+#define ICE_CD_4_M 0xF000U
+#define ICE_CD_4_S 12
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
+ break;
+ case 8:
+#define ICE_CD_8_M 0xFF00U
+#define ICE_CD_8_S 16
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
+ break;
+ }
+
+ return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
+ nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
+}
+
+/**
+ * ice_tcam_write_entry - write TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @idx: the entry index to write to
+ * @prof_id: profile ID
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ */
+static enum ice_status
+ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
+ u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
+{
+ struct ice_prof_tcam_entry;
+ enum ice_status status;
+
+ status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
+ dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
+ if (!status) {
+ hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
+ hw->blk[blk].prof.t[idx].prof_id = prof_id;
+ }
+
+ return status;
+}
+
+/**
+ * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to query
+ * @refs: pointer to variable to receive the reference count
+ */
+static enum ice_status
+ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *ptr;
+
+ *refs = 0;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ while (ptr) {
+ (*refs)++;
+ ptr = ptr->next_vsi;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_has_prof_vsig - check to see if VSIG has a specific profile
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to check against
+ * @hdl: profile handle
+ */
+static bool
+ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *ent;
+
+ list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ if (ent->profile_cookie == hdl)
+ return true;
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "Characteristic list for VSI group %d not found.\n",
+ vsig);
+ return false;
+}
+
+/**
+ * ice_prof_bld_es - build profile ID extraction sequence changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct list_head *chgs)
+{
+ u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
+ u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
+ struct ice_pkg_es *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_VEC_TBL);
+ p = (struct ice_pkg_es *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+ vec_size -
+ sizeof(p->es[0]));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->prof_id);
+
+ memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_tcam - build profile ID TCAM changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
+ struct ice_prof_id_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_PROF_TCAM);
+ p = (struct ice_prof_id_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
+ p->entry[0].prof_id = tmp->prof_id;
+
+ memcpy(p->entry[0].key,
+ &hw->blk[blk].prof.t[tmp->tcam_idx].key,
+ sizeof(hw->blk[blk].prof.t->key));
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_xlt1 - build XLT1 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
+ struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
+ struct ice_xlt1_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_XLT1);
+ p = (struct ice_xlt1_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->ptype);
+ p->value[0] = tmp->ptg;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_xlt2 - build XLT2 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
+ struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry) {
+ struct ice_xlt2_section *p;
+ u32 id;
+
+ switch (tmp->type) {
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ id = ice_sect_id(blk, ICE_XLT2);
+ p = (struct ice_xlt2_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->vsi);
+ p->value[0] = cpu_to_le16(tmp->vsig);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_upd_prof_hw - update hardware using the change list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *chgs)
+{
+ struct ice_buf_build *b;
+ struct ice_chs_chg *tmp;
+ enum ice_status status;
+ u16 pkg_sects;
+ u16 xlt1 = 0;
+ u16 xlt2 = 0;
+ u16 tcam = 0;
+ u16 es = 0;
+ u16 sects;
+
+ /* count number of sections we need */
+ list_for_each_entry(tmp, chgs, list_entry) {
+ switch (tmp->type) {
+ case ICE_PTG_ES_ADD:
+ if (tmp->add_ptg)
+ xlt1++;
+ if (tmp->add_prof)
+ es++;
+ break;
+ case ICE_TCAM_ADD:
+ tcam++;
+ break;
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ xlt2++;
+ break;
+ default:
+ break;
+ }
+ }
+ sects = xlt1 + xlt2 + tcam + es;
+
+ if (!sects)
+ return 0;
+
+ /* Build update package buffer */
+ b = ice_pkg_buf_alloc(hw);
+ if (!b)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_pkg_buf_reserve_section(b, sects);
+ if (status)
+ goto error_tmp;
+
+ /* Preserve order of table update: ES, TCAM, PTG, VSIG */
+ if (es) {
+ status = ice_prof_bld_es(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (tcam) {
+ status = ice_prof_bld_tcam(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt1) {
+ status = ice_prof_bld_xlt1(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt2) {
+ status = ice_prof_bld_xlt2(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ /* After package buffer build check if the section count in buffer is
+ * non-zero and matches the number of sections detected for package
+ * update.
+ */
+ pkg_sects = ice_pkg_buf_get_active_sections(b);
+ if (!pkg_sects || pkg_sects != sects) {
+ status = ICE_ERR_INVAL_SIZE;
+ goto error_tmp;
+ }
+
+ /* update package */
+ status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
+ if (status == ICE_ERR_AQ_ERROR)
+ ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
+
+error_tmp:
+ ice_pkg_buf_free(hw, b);
+ return status;
+}
+
+/**
+ * ice_add_prof - add profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @es: extraction sequence (length of array is determined by the block)
+ *
+ * This function registers a profile, which matches a set of PTGs with a
+ * particular extraction sequence. While the hardware profile is allocated
+ * it will not be written until the first call to ice_add_flow that specifies
+ * the ID value used here.
+ */
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es)
+{
+ u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+ DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_prof_map *prof;
+ enum ice_status status;
+ u32 byte = 0;
+ u8 prof_id;
+
+ bitmap_zero(ptgs_used, ICE_XLT1_CNT);
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+
+ /* search for existing profile */
+ status = ice_find_prof_id(hw, blk, es, &prof_id);
+ if (status) {
+ /* allocate profile ID */
+ status = ice_alloc_prof_id(hw, blk, &prof_id);
+ if (status)
+ goto err_ice_add_prof;
+
+ /* and write new es */
+ ice_write_es(hw, blk, prof_id, es);
+ }
+
+ ice_prof_inc_ref(hw, blk, prof_id);
+
+ /* add profile info */
+ prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
+ if (!prof)
+ goto err_ice_add_prof;
+
+ prof->profile_cookie = id;
+ prof->prof_id = prof_id;
+ prof->ptg_cnt = 0;
+ prof->context = 0;
+
+ /* build list of ptgs */
+ while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
+ u32 bit;
+
+ if (!ptypes[byte]) {
+ bytes--;
+ byte++;
+ continue;
+ }
+
+ /* Examine 8 bits per byte */
+ for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
+ BITS_PER_BYTE) {
+ u16 ptype;
+ u8 ptg;
+ u8 m;
+
+ ptype = byte * BITS_PER_BYTE + bit;
+
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
+
+ /* If PTG is already added, skip and continue */
+ if (test_bit(ptg, ptgs_used))
+ continue;
+
+ set_bit(ptg, ptgs_used);
+ prof->ptg[prof->ptg_cnt] = ptg;
+
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ break;
+
+ /* nothing left in byte, then exit */
+ m = ~((1 << (bit + 1)) - 1);
+ if (!(ptypes[byte] & m))
+ break;
+ }
+
+ bytes--;
+ byte++;
+ }
+
+ list_add(&prof->list, &hw->blk[blk].es.prof_map);
+ status = 0;
+
+err_ice_add_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added. This
+ * version assumes that the caller has already acquired the prof map lock.
+ */
+static struct ice_prof_map *
+ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry = NULL;
+ struct ice_prof_map *map;
+
+ list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
+ if (map->profile_cookie == id) {
+ entry = map;
+ break;
+ }
+
+ return entry;
+}
+
+/**
+ * ice_search_prof_id - Search for a profile tracking ID
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added.
+ */
+static struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry;
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+ entry = ice_search_prof_id_low(hw, blk, id);
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+
+ return entry;
+}
+
+/**
+ * ice_vsig_prof_id_count - count profiles in a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ */
+static u16
+ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
+ struct ice_vsig_prof *p;
+
+ list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ count++;
+
+ return count;
+}
+
+/**
+ * ice_rel_tcam_idx - release a TCAM index
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @idx: the index to release
+ */
+static enum ice_status
+ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+{
+ /* Masks to invoke a never match entry */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
+ dc_msk, nm_msk);
+ if (status)
+ return status;
+
+ /* release the TCAM entry */
+ status = ice_free_tcam_ent(hw, blk, idx);
+
+ return status;
+}
+
+/**
+ * ice_rem_prof_id - remove one profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof: pointer to profile structure to remove
+ */
+static enum ice_status
+ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_vsig_prof *prof)
+{
+ enum ice_status status;
+ u16 i;
+
+ for (i = 0; i < prof->tcam_count; i++)
+ if (prof->tcam[i].in_use) {
+ prof->tcam[i].in_use = false;
+ status = ice_rel_tcam_idx(hw, blk,
+ prof->tcam[i].tcam_idx);
+ if (status)
+ return ICE_ERR_HW_TABLE;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_rem_vsig - remove VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to remove
+ * @chg: the change list
+ */
+static enum ice_status
+ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *vsi_cur;
+ struct ice_vsig_prof *d, *t;
+ enum ice_status status;
+
+ /* remove TCAM entries */
+ list_for_each_entry_safe(d, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ status = ice_rem_prof_id(hw, blk, d);
+ if (status)
+ return status;
+
+ list_del(&d->list);
+ devm_kfree(ice_hw_to_dev(hw), d);
+ }
+
+ /* Move all VSIS associated with this VSIG to the default VSIG */
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the list
+ * and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur)
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+ struct ice_chs_chg *p;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->type = ICE_VSIG_REM;
+ p->orig_vsig = vsig;
+ p->vsig = ICE_DEFAULT_VSIG;
+ p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+
+ list_add(&p->list_entry, chg);
+
+ vsi_cur = tmp;
+ } while (vsi_cur);
+
+ return ice_vsig_free(hw, blk, vsig);
+}
+
+/**
+ * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @hdl: profile handle indicating which profile to remove
+ * @chg: list to receive a record of changes
+ */
+static enum ice_status
+ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ struct list_head *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *p, *t;
+ enum ice_status status;
+
+ list_for_each_entry_safe(p, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ if (p->profile_cookie == hdl) {
+ if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
+ /* this is the last profile, remove the VSIG */
+ return ice_rem_vsig(hw, blk, vsig, chg);
+
+ status = ice_rem_prof_id(hw, blk, p);
+ if (!status) {
+ list_del(&p->list);
+ devm_kfree(ice_hw_to_dev(hw), p);
+ }
+ return status;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_flow_all - remove all flows with a particular profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ */
+static enum ice_status
+ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_chs_chg *del, *tmp;
+ enum ice_status status;
+ struct list_head chg;
+ u16 i;
+
+ INIT_LIST_HEAD(&chg);
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
+ if (ice_has_prof_vsig(hw, blk, i, id)) {
+ status = ice_rem_prof_id_vsig(hw, blk, i, id,
+ &chg);
+ if (status)
+ goto err_ice_rem_flow_all;
+ }
+ }
+
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_flow_all:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_prof - remove profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will remove the profile specified by the ID parameter, which was
+ * previously created through ice_add_prof. If any existing entries
+ * are associated with this profile, they will be removed as well.
+ */
+enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *pmap;
+ enum ice_status status;
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+
+ pmap = ice_search_prof_id_low(hw, blk, id);
+ if (!pmap) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_rem_prof;
+ }
+
+ /* remove all flows with this profile */
+ status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
+ if (status)
+ goto err_ice_rem_prof;
+
+ /* dereference profile, and possibly remove */
+ ice_prof_dec_ref(hw, blk, pmap->prof_id);
+
+ list_del(&pmap->list);
+ devm_kfree(ice_hw_to_dev(hw), pmap);
+
+err_ice_rem_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_get_prof - get profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: profile handle
+ * @chg: change list
+ */
+static enum ice_status
+ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+ struct list_head *chg)
+{
+ struct ice_prof_map *map;
+ struct ice_chs_chg *p;
+ u16 i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ for (i = 0; i < map->ptg_cnt; i++)
+ if (!hw->blk[blk].es.written[map->prof_id]) {
+ /* add ES to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ goto err_ice_get_prof;
+
+ p->type = ICE_PTG_ES_ADD;
+ p->ptype = 0;
+ p->ptg = map->ptg[i];
+ p->add_ptg = 0;
+
+ p->add_prof = 1;
+ p->prof_id = map->prof_id;
+
+ hw->blk[blk].es.written[map->prof_id] = true;
+
+ list_add(&p->list_entry, chg);
+ }
+
+ return 0;
+
+err_ice_get_prof:
+ /* let caller clean up the change list */
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG from which to copy the list
+ * @lst: output list
+ *
+ * This routine makes a copy of the list of profiles in the specified VSIG.
+ */
+static enum ice_status
+ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *lst)
+{
+ struct ice_vsig_prof *ent1, *ent2;
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+
+ list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ struct ice_vsig_prof *p;
+
+ /* copy to the input list */
+ p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ goto err_ice_get_profs_vsig;
+
+ list_add_tail(&p->list, lst);
+ }
+
+ return 0;
+
+err_ice_get_profs_vsig:
+ list_for_each_entry_safe(ent1, ent2, lst, list) {
+ list_del(&ent1->list);
+ devm_kfree(ice_hw_to_dev(hw), ent1);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_add_prof_to_lst - add profile entry to a list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @lst: the list to be added to
+ * @hdl: profile handle of entry to add
+ */
+static enum ice_status
+ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *lst, u64 hdl)
+{
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *p;
+ u16 i;
+
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->profile_cookie = map->profile_cookie;
+ p->prof_id = map->prof_id;
+ p->tcam_count = map->ptg_cnt;
+
+ for (i = 0; i < map->ptg_cnt; i++) {
+ p->tcam[i].prof_id = map->prof_id;
+ p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
+ p->tcam[i].ptg = map->ptg[i];
+ }
+
+ list_add(&p->list, lst);
+
+ return 0;
+}
+
+/**
+ * ice_move_vsi - move VSI to another VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to move
+ * @vsig: the VSIG to move the VSI to
+ * @chg: the change list
+ */
+static enum ice_status
+ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 orig_vsig;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+ if (!status)
+ status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+ }
+
+ p->type = ICE_VSI_MOVE;
+ p->vsi = vsi;
+ p->orig_vsig = orig_vsig;
+ p->vsig = vsig;
+
+ list_add(&p->list_entry, chg);
+
+ return 0;
+}
+
+/**
+ * ice_prof_tcam_ena_dis - add enable or disable TCAM change
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @enable: true to enable, false to disable
+ * @vsig: the VSIG of the TCAM entry
+ * @tcam: pointer the TCAM info structure of the TCAM to disable
+ * @chg: the change list
+ *
+ * This function appends an enable or disable TCAM entry in the change log
+ */
+static enum ice_status
+ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
+ u16 vsig, struct ice_tcam_inf *tcam,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+
+ /* Default: enable means change the low flag bit to don't care */
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+
+ /* if disabling, free the TCAM */
+ if (!enable) {
+ status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+ tcam->tcam_idx = 0;
+ tcam->in_use = 0;
+ return status;
+ }
+
+ /* for re-enabling, reallocate a TCAM */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ if (status)
+ return status;
+
+ /* add TCAM to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
+ tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
+ nm_msk);
+ if (status)
+ goto err_ice_prof_tcam_ena_dis;
+
+ tcam->in_use = 1;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = tcam->prof_id;
+ p->ptg = tcam->ptg;
+ p->vsig = 0;
+ p->tcam_idx = tcam->tcam_idx;
+
+ /* log change */
+ list_add(&p->list_entry, chg);
+
+ return 0;
+
+err_ice_prof_tcam_ena_dis:
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+}
+
+/**
+ * ice_adj_prof_priorities - adjust profile based on priorities
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG for which to adjust profile priorities
+ * @chg: the change list
+ */
+static enum ice_status
+ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *chg)
+{
+ DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ u16 idx;
+
+ bitmap_zero(ptgs_used, ICE_XLT1_CNT);
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ /* Priority is based on the order in which the profiles are added. The
+ * newest added profile has highest priority and the oldest added
+ * profile has the lowest priority. Since the profile property list for
+ * a VSIG is sorted from newest to oldest, this code traverses the list
+ * in order and enables the first of each PTG that it finds (that is not
+ * already enabled); it also disables any duplicate PTGs that it finds
+ * in the older profiles (that are currently enabled).
+ */
+
+ list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ u16 i;
+
+ for (i = 0; i < t->tcam_count; i++) {
+ /* Scan the priorities from newest to oldest.
+ * Make sure that the newest profiles take priority.
+ */
+ if (test_bit(t->tcam[i].ptg, ptgs_used) &&
+ t->tcam[i].in_use) {
+ /* need to mark this PTG as never match, as it
+ * was already in use and therefore duplicate
+ * (and lower priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, false,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
+ !t->tcam[i].in_use) {
+ /* need to enable this PTG, as it in not in use
+ * and not enabled (highest priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, true,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ }
+
+ /* keep track of used ptgs */
+ set_bit(t->tcam[i].ptg, ptgs_used);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_prof_id_vsig - add profile to VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to which this profile is to be added
+ * @hdl: the profile handle indicating the profile to add
+ * @chg: the change list
+ */
+static enum ice_status
+ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ struct list_head *chg)
+{
+ /* Masks that ignore flags */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *t;
+ struct ice_chs_chg *p;
+ u16 i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Error, if this VSIG already has this profile */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl))
+ return ICE_ERR_ALREADY_EXISTS;
+
+ /* new VSIG profile structure */
+ t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return ICE_ERR_NO_MEMORY;
+
+ t->profile_cookie = map->profile_cookie;
+ t->prof_id = map->prof_id;
+ t->tcam_count = map->ptg_cnt;
+
+ /* create TCAM entries */
+ for (i = 0; i < map->ptg_cnt; i++) {
+ enum ice_status status;
+ u16 tcam_idx;
+
+ /* add TCAM to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto err_ice_add_prof_id_vsig;
+
+ /* allocate the TCAM entry index */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
+ goto err_ice_add_prof_id_vsig;
+ }
+
+ t->tcam[i].ptg = map->ptg[i];
+ t->tcam[i].prof_id = map->prof_id;
+ t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].in_use = true;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = t->tcam[i].prof_id;
+ p->ptg = t->tcam[i].ptg;
+ p->vsig = vsig;
+ p->tcam_idx = t->tcam[i].tcam_idx;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
+ t->tcam[i].prof_id,
+ t->tcam[i].ptg, vsig, 0, 0,
+ vl_msk, dc_msk, nm_msk);
+ if (status)
+ goto err_ice_add_prof_id_vsig;
+
+ /* log change */
+ list_add(&p->list_entry, chg);
+ }
+
+ /* add profile to VSIG */
+ list_add(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+
+ return 0;
+
+err_ice_add_prof_id_vsig:
+ /* let caller clean up the change list */
+ devm_kfree(ice_hw_to_dev(hw), t);
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_create_prof_id_vsig - add a new VSIG with a single profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @hdl: the profile handle of the profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 new_vsig;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ new_vsig = ice_vsig_alloc(hw, blk);
+ if (!new_vsig) {
+ status = ICE_ERR_HW_TABLE;
+ goto err_ice_create_prof_id_vsig;
+ }
+
+ status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ p->type = ICE_VSIG_ADD;
+ p->vsi = vsi;
+ p->orig_vsig = ICE_DEFAULT_VSIG;
+ p->vsig = new_vsig;
+
+ list_add(&p->list_entry, chg);
+
+ return 0;
+
+err_ice_create_prof_id_vsig:
+ /* let caller clean up the change list */
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+}
+
+/**
+ * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @lst: the list of profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+ struct list_head *lst, struct list_head *chg)
+{
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ u16 vsig;
+
+ vsig = ice_vsig_alloc(hw, blk);
+ if (!vsig)
+ return ICE_ERR_HW_TABLE;
+
+ status = ice_move_vsi(hw, blk, vsi, vsig, chg);
+ if (status)
+ return status;
+
+ list_for_each_entry(t, lst, list) {
+ status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
+ chg);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_find_prof_vsig - find a VSIG with a specific profile handle
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: the profile handle of the profile to search for
+ * @vsig: returns the VSIG with the matching profile
+ */
+static bool
+ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
+{
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ struct list_head lst;
+
+ INIT_LIST_HEAD(&lst);
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return false;
+
+ t->profile_cookie = hdl;
+ list_add(&t->list, &lst);
+
+ status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
+
+ list_del(&t->list);
+ kfree(t);
+
+ return !status;
+}
+
+/**
+ * ice_add_prof_id_flow - add profile flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to enable with the profile specified by ID
+ * @hdl: profile handle
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct ice_chs_chg *tmp, *del;
+ struct list_head union_lst;
+ enum ice_status status;
+ struct list_head chg;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&union_lst);
+ INIT_LIST_HEAD(&chg);
+
+ /* Get profile */
+ status = ice_get_prof(hw, blk, hdl, &chg);
+ if (status)
+ return status;
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool only_vsi;
+ u16 or_vsig;
+ u16 ref;
+
+ /* found in VSIG */
+ or_vsig = vsig;
+
+ /* make sure that there is no overlap/conflict between the new
+ * characteristics and the existing ones; we don't support that
+ * scenario
+ */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto err_ice_add_prof_id_flow;
+ }
+
+ /* last VSI in the VSIG? */
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ /* create a union of the current profiles and the one being
+ * added
+ */
+ status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* search for an existing VSIG with an exact charc match */
+ status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
+ if (!status) {
+ /* move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* VSI has been moved out of or_vsig. If the or_vsig had
+ * only that VSI it is now empty and can be removed.
+ */
+ if (only_vsi) {
+ status = ice_rem_vsig(hw, blk, or_vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else if (only_vsi) {
+ /* If the original VSIG only contains one VSI, then it
+ * will be the requesting VSI. In this case the VSI is
+ * not sharing entries and we can simply add the new
+ * profile to the VSIG.
+ */
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* No match, so we need a new VSIG */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &union_lst, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else {
+ /* need to find or add a VSIG */
+ /* search for an existing VSIG with an exact charc match */
+ if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
+ /* found an exact match */
+ /* add or move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* we did not find an exact match */
+ /* we need to add a VSIG */
+ status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
+ &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ }
+
+ /* update hardware */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_add_prof_id_flow:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
+ list_del(&del1->list);
+ devm_kfree(ice_hw_to_dev(hw), del1);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_prof_from_list - remove a profile from list
+ * @hw: pointer to the HW struct
+ * @lst: list to remove the profile from
+ * @hdl: the profile handle indicating the profile to remove
+ */
+static enum ice_status
+ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
+{
+ struct ice_vsig_prof *ent, *tmp;
+
+ list_for_each_entry_safe(ent, tmp, lst, list)
+ if (ent->profile_cookie == hdl) {
+ list_del(&ent->list);
+ devm_kfree(ice_hw_to_dev(hw), ent);
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_prof_id_flow - remove flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI from which to remove the profile specified by ID
+ * @hdl: profile tracking handle
+ *
+ * Calling this function will update the hardware tables to remove the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct ice_chs_chg *tmp, *del;
+ struct list_head chg, copy;
+ enum ice_status status;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&copy);
+ INIT_LIST_HEAD(&chg);
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool last_profile;
+ bool only_vsi;
+ u16 ref;
+
+ /* found in VSIG */
+ last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ if (only_vsi) {
+ /* If the original VSIG only contains one reference,
+ * which will be the requesting VSI, then the VSI is not
+ * sharing entries and we can simply remove the specific
+ * characteristics from the VSIG.
+ */
+
+ if (last_profile) {
+ /* If there are no profiles left for this VSIG,
+ * then simply remove the the VSIG.
+ */
+ status = ice_rem_vsig(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ } else {
+ status = ice_rem_prof_id_vsig(hw, blk, vsig,
+ hdl, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ }
+
+ } else {
+ /* Make a copy of the VSIG's list of Profiles */
+ status = ice_get_profs_vsig(hw, blk, vsig, &copy);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Remove specified profile entry from the list */
+ status = ice_rem_prof_from_list(hw, &copy, hdl);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ if (list_empty(&copy)) {
+ status = ice_move_vsi(hw, blk, vsi,
+ ICE_DEFAULT_VSIG, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ } else if (!ice_find_dup_props_vsig(hw, blk, &copy,
+ &vsig)) {
+ /* found an exact match */
+ /* add or move VSI to the VSIG that matches */
+ /* Search for a VSIG with a matching profile
+ * list
+ */
+
+ /* Found match, move VSI to the matching VSIG */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ } else {
+ /* since no existing VSIG supports this
+ * characteristic pattern, we need to create a
+ * new VSIG and TCAM entries
+ */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &copy, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ }
+ }
+ } else {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ }
+
+ /* update hardware tables */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_prof_id_flow:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ list_for_each_entry_safe(del1, tmp1, &copy, list) {
+ list_del(&del1->list);
+ devm_kfree(ice_hw_to_dev(hw), del1);
+ }
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 37eb282742d1..c7b5e1a6ea2b 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,13 @@
#define ICE_PKG_CNT 4
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es);
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
@@ -26,4 +33,6 @@ void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
+enum ice_status
+ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 5d5a7eaffa30..0fb3fe3ff3ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -3,6 +3,9 @@
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
+
+#define ICE_FV_OFFSET_INVAL 0x1FF
+
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
@@ -105,37 +108,57 @@ struct ice_buf_hdr {
sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
/* ice package section IDs */
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+#define ICE_SID_CDID_REDIR_SW 18
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
@@ -152,6 +175,19 @@ enum ice_block {
ICE_BLK_COUNT
};
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
/* package labels */
struct ice_label {
__le16 value;
@@ -234,6 +270,13 @@ struct ice_prof_redir_section {
u8 redir_value[1];
};
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
@@ -248,6 +291,12 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
+struct ice_pkg_es {
+ __le16 count;
+ __le16 offset;
+ struct ice_fv_word es[1];
+};
+
struct ice_es {
u32 sid;
u16 count;
@@ -280,6 +329,35 @@ struct ice_ptg_ptype {
u8 ptg;
};
+#define ICE_MAX_TCAM_PER_PROFILE 32
+#define ICE_MAX_PTG_PER_PROFILE 32
+
+struct ice_prof_map {
+ struct list_head list;
+ u64 profile_cookie;
+ u64 context;
+ u8 prof_id;
+ u8 ptg_cnt;
+ u8 ptg[ICE_MAX_PTG_PER_PROFILE];
+};
+
+#define ICE_INVALID_TCAM 0xFFFF
+
+struct ice_tcam_inf {
+ u16 tcam_idx;
+ u8 ptg;
+ u8 prof_id;
+ u8 in_use;
+};
+
+struct ice_vsig_prof {
+ struct list_head list;
+ u64 profile_cookie;
+ u8 prof_id;
+ u8 tcam_count;
+ struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
+};
+
struct ice_vsig_entry {
struct list_head prop_lst;
struct ice_vsig_vsi *first_vsi;
@@ -329,6 +407,13 @@ struct ice_xlt2 {
u16 count;
};
+/* Profile ID Management */
+struct ice_prof_id_key {
+ __le16 flags;
+ u8 xlt1;
+ __le16 xlt2_cdid;
+} __packed;
+
/* Keys are made up of two values, each one-half the size of the key.
* For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
*/
@@ -371,4 +456,31 @@ struct ice_blk_info {
u8 is_list_init;
};
+enum ice_chg_type {
+ ICE_TCAM_NONE = 0,
+ ICE_PTG_ES_ADD,
+ ICE_TCAM_ADD,
+ ICE_VSIG_ADD,
+ ICE_VSIG_REM,
+ ICE_VSI_MOVE,
+};
+
+struct ice_chs_chg {
+ struct list_head list_entry;
+ enum ice_chg_type type;
+
+ u8 add_ptg;
+ u8 add_vsig;
+ u8 add_tcam_idx;
+ u8 add_prof;
+ u16 ptype;
+ u8 ptg;
+ u8 prof_id;
+ u16 vsi;
+ u16 vsig;
+ u16 orig_vsig;
+ u16 tcam_idx;
+};
+
+#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
new file mode 100644
index 000000000000..a05ceb59863b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -0,0 +1,1275 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_flow.h"
+
+/* Describe properties of a protocol header field */
+struct ice_flow_field_info {
+ enum ice_flow_seg_hdr hdr;
+ s16 off; /* Offset from start of a protocol header, in bits */
+ u16 size; /* Size of fields in bits */
+};
+
+#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
+ .hdr = _hdr, \
+ .off = (_offset_bytes) * BITS_PER_BYTE, \
+ .size = (_size_bytes) * BITS_PER_BYTE, \
+}
+
+/* Table containing properties of supported protocol header fields */
+static const
+struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
+ /* IPv4 / IPv6 */
+ /* ICE_FLOW_FIELD_IDX_IPV4_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV4_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV6_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV6_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
+ /* Transport */
+ /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
+
+};
+
+/* Bitmaps indicating relevant packet types for a particular protocol header
+ *
+ * Packet types for packets with an Outer/First/Single IPv4 header
+ */
+static const u32 ice_ptypes_ipv4_ofos[] = {
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header */
+static const u32 ice_ptypes_ipv4_il[] = {
+ 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
+ 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header */
+static const u32 ice_ptypes_ipv6_ofos[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header */
+static const u32 ice_ptypes_ipv6_il[] = {
+ 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
+ 0x00000770, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* UDP Packet types for non-tunneled packets or tunneled
+ * packets with inner UDP.
+ */
+static const u32 ice_ptypes_udp_il[] = {
+ 0x81000000, 0x20204040, 0x04000010, 0x80810102,
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last TCP header */
+static const u32 ice_ptypes_tcp_il[] = {
+ 0x04000000, 0x80810102, 0x10000040, 0x02040408,
+ 0x00000102, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last SCTP header */
+static const u32 ice_ptypes_sctp_il[] = {
+ 0x08000000, 0x01020204, 0x20000081, 0x04080810,
+ 0x00000204, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Manage parameters and info. used during the creation of a flow profile */
+struct ice_flow_prof_params {
+ enum ice_block blk;
+ u16 entry_length; /* # of bytes formatted entry will require */
+ u8 es_cnt;
+ struct ice_flow_prof *prof;
+
+ /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
+ * This will give us the direction flags.
+ */
+ struct ice_fv_word es[ICE_MAX_FV_WORDS];
+ DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
+};
+
+#define ICE_FLOW_SEG_HDRS_L3_MASK \
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+#define ICE_FLOW_SEG_HDRS_L4_MASK \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
+
+/**
+ * ice_flow_val_hdrs - validates packet segments for valid protocol headers
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+static enum ice_status
+ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+ u8 i;
+
+ for (i = 0; i < segs_cnt; i++) {
+ /* Multiple L3 headers */
+ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
+ !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
+ return ICE_ERR_PARAM;
+
+ /* Multiple L4 headers */
+ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
+ !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
+ return ICE_ERR_PARAM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
+ * @params: information about the flow to be processed
+ *
+ * This function identifies the packet types associated with the protocol
+ * headers being present in packet segments of the specified flow profile.
+ */
+static enum ice_status
+ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+{
+ struct ice_flow_prof *prof;
+ u8 i;
+
+ memset(params->ptypes, 0xff, sizeof(params->ptypes));
+
+ prof = params->prof;
+
+ for (i = 0; i < params->prof->segs_cnt; i++) {
+ const unsigned long *src;
+ u32 hdrs;
+
+ hdrs = prof->segs[i].hdrs;
+
+ if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
+ (const unsigned long *)ice_ptypes_ipv4_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
+ (const unsigned long *)ice_ptypes_ipv6_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+ src = (const unsigned long *)ice_ptypes_udp_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
+ bitmap_and(params->ptypes, params->ptypes,
+ (const unsigned long *)ice_ptypes_tcp_il,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
+ src = (const unsigned long *)ice_ptypes_sctp_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: packet segment index of the field to be extracted
+ * @fld: ID of field to be extracted
+ *
+ * This function determines the protocol ID, offset, and size of the given
+ * field. It then allocates one or more extraction sequence entries for the
+ * given field, and fill the entries with protocol ID and offset information.
+ */
+static enum ice_status
+ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
+ u8 seg, enum ice_flow_field fld)
+{
+ enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
+ u8 fv_words = hw->blk[params->blk].es.fvw;
+ struct ice_flow_fld_info *flds;
+ u16 cnt, ese_bits, i;
+ u16 off;
+
+ flds = params->prof->segs[seg].fields;
+
+ switch (fld) {
+ case ICE_FLOW_FIELD_IDX_IPV4_SA:
+ case ICE_FLOW_FIELD_IDX_IPV4_DA:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_DA:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+ prot_id = ICE_PROT_TCP_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
+ prot_id = ICE_PROT_SCTP_IL;
+ break;
+ default:
+ return ICE_ERR_NOT_IMPL;
+ }
+
+ /* Each extraction sequence entry is a word in size, and extracts a
+ * word-aligned offset from a protocol header.
+ */
+ ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
+
+ flds[fld].xtrct.prot_id = prot_id;
+ flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
+ ICE_FLOW_FV_EXTRACT_SZ;
+ flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
+ flds[fld].xtrct.idx = params->es_cnt;
+
+ /* Adjust the next field-entry index after accommodating the number of
+ * entries this field consumes
+ */
+ cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
+ ese_bits);
+
+ /* Fill in the extraction sequence entries needed for this field */
+ off = flds[fld].xtrct.off;
+ for (i = 0; i < cnt; i++) {
+ u8 idx;
+
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= fv_words)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = prot_id;
+ params->es[idx].off = off;
+ params->es_cnt++;
+
+ off += ICE_FLOW_FV_EXTRACT_SZ;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ *
+ * This function iterates through all matched fields in the given segments, and
+ * creates an extraction sequence for the fields.
+ */
+static enum ice_status
+ice_flow_create_xtrct_seq(struct ice_hw *hw,
+ struct ice_flow_prof_params *params)
+{
+ struct ice_flow_prof *prof = params->prof;
+ enum ice_status status = 0;
+ u8 i;
+
+ for (i = 0; i < prof->segs_cnt; i++) {
+ u8 j;
+
+ for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ status = ice_flow_xtract_fld(hw, params, i,
+ (enum ice_flow_field)j);
+ if (status)
+ return status;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_proc_segs - process all packet segments associated with a profile
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+ enum ice_status status;
+
+ status = ice_flow_proc_seg_hdrs(params);
+ if (status)
+ return status;
+
+ status = ice_flow_create_xtrct_seq(hw, params);
+ if (status)
+ return status;
+
+ switch (params->blk) {
+ case ICE_BLK_RSS:
+ /* Only header information is provided for RSS configuration.
+ * No further processing is needed.
+ */
+ status = 0;
+ break;
+ default:
+ return ICE_ERR_NOT_IMPL;
+ }
+
+ return status;
+}
+
+#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
+#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
+#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
+
+/**
+ * ice_flow_find_prof_conds - Find a profile matching headers and conditions
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
+ * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
+ enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
+ u8 segs_cnt, u16 vsi_handle, u32 conds)
+{
+ struct ice_flow_prof *p, *prof = NULL;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+ list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
+ if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
+ segs_cnt && segs_cnt == p->segs_cnt) {
+ u8 i;
+
+ /* Check for profile-VSI association if specified */
+ if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
+ ice_is_vsi_valid(hw, vsi_handle) &&
+ !test_bit(vsi_handle, p->vsis))
+ continue;
+
+ /* Protocol headers must be checked. Matched fields are
+ * checked if specified.
+ */
+ for (i = 0; i < segs_cnt; i++)
+ if (segs[i].hdrs != p->segs[i].hdrs ||
+ ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
+ segs[i].match != p->segs[i].match))
+ break;
+
+ /* A match is found if all segments are matched */
+ if (i == segs_cnt) {
+ prof = p;
+ break;
+ }
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return prof;
+}
+
+/**
+ * ice_flow_find_prof_id - Look up a profile with given profile ID
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: unique ID to identify this flow profile
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *p;
+
+ list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
+ if (p->id == prof_id)
+ return p;
+
+ return NULL;
+}
+
+/**
+ * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @prof: stores the returned flow profile added
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
+ enum ice_flow_dir dir, u64 prof_id,
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof)
+{
+ struct ice_flow_prof_params params;
+ enum ice_status status;
+ u8 i;
+
+ if (!prof)
+ return ICE_ERR_BAD_PTR;
+
+ memset(&params, 0, sizeof(params));
+ params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
+ GFP_KERNEL);
+ if (!params.prof)
+ return ICE_ERR_NO_MEMORY;
+
+ /* initialize extraction sequence to all invalid (0xff) */
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ params.es[i].prot_id = ICE_PROT_INVALID;
+ params.es[i].off = ICE_FV_OFFSET_INVAL;
+ }
+
+ params.blk = blk;
+ params.prof->id = prof_id;
+ params.prof->dir = dir;
+ params.prof->segs_cnt = segs_cnt;
+
+ /* Make a copy of the segments that need to be persistent in the flow
+ * profile instance
+ */
+ for (i = 0; i < segs_cnt; i++)
+ memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs));
+
+ status = ice_flow_proc_segs(hw, &params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW,
+ "Error processing a flow's packet segments\n");
+ goto out;
+ }
+
+ /* Add a HW profile for this flow profile */
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&params.prof->entries);
+ mutex_init(&params.prof->entries_lock);
+ *prof = params.prof;
+
+out:
+ if (status)
+ devm_kfree(ice_hw_to_dev(hw), params.prof);
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof_sync - remove a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile to remove
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof)
+{
+ enum ice_status status;
+
+ /* Remove all hardware profiles associated with this flow profile */
+ status = ice_rem_prof(hw, blk, prof->id);
+ if (!status) {
+ list_del(&prof->l_entry);
+ mutex_destroy(&prof->entries_lock);
+ devm_kfree(ice_hw_to_dev(hw), prof);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_assoc_prof - associate a VSI with a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software VSI handle has been validated
+ */
+static enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+
+ if (!test_bit(vsi_handle, prof->vsis)) {
+ status = ice_add_prof_id_flow(hw, blk,
+ ice_get_hw_vsi_num(hw,
+ vsi_handle),
+ prof->id);
+ if (!status)
+ set_bit(vsi_handle, prof->vsis);
+ else
+ ice_debug(hw, ICE_DBG_FLOW,
+ "HW profile add failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software VSI handle has been validated
+ */
+static enum ice_status
+ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+
+ if (test_bit(vsi_handle, prof->vsis)) {
+ status = ice_rem_prof_id_flow(hw, blk,
+ ice_get_hw_vsi_num(hw,
+ vsi_handle),
+ prof->id);
+ if (!status)
+ clear_bit(vsi_handle, prof->vsis);
+ else
+ ice_debug(hw, ICE_DBG_FLOW,
+ "HW profile remove failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @prof: stores the returned flow profile added
+ */
+static enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof)
+{
+ enum ice_status status;
+
+ if (segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_MAX_LIMIT;
+
+ if (!segs_cnt)
+ return ICE_ERR_PARAM;
+
+ if (!segs)
+ return ICE_ERR_BAD_PTR;
+
+ status = ice_flow_val_hdrs(segs, segs_cnt);
+ if (status)
+ return status;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
+ prof);
+ if (!status)
+ list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
+
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
+ * @hw: pointer to the HW struct
+ * @blk: the block for which the flow profile is to be removed
+ * @prof_id: unique ID of the flow profile to be removed
+ */
+static enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto out;
+ }
+
+ /* prof becomes invalid after the call */
+ status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+out:
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @type: type of the field
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ * input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ * entry's input buffer
+ *
+ * This helper function stores information of a field being matched, including
+ * the type of the field and the locations of the value to match, the mask, and
+ * and the upper-bound value in the start of the input buffer for a flow entry.
+ * This function should only be used for fixed-size data structures.
+ *
+ * This function also opportunistically determines the protocol headers to be
+ * present based on the fields being set. Some fields cannot be used alone to
+ * determine the protocol headers present. Sometimes, fields for particular
+ * protocol headers are not matched. In those cases, the protocol headers
+ * must be explicitly set.
+ */
+static void
+ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ enum ice_flow_fld_match_type type, u16 val_loc,
+ u16 mask_loc, u16 last_loc)
+{
+ u64 bit = BIT_ULL(fld);
+
+ seg->match |= bit;
+ if (type == ICE_FLOW_FLD_TYPE_RANGE)
+ seg->range |= bit;
+
+ seg->fields[fld].type = type;
+ seg->fields[fld].src.val = val_loc;
+ seg->fields[fld].src.mask = mask_loc;
+ seg->fields[fld].src.last = last_loc;
+
+ ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
+}
+
+/**
+ * ice_flow_set_fld - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ * input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ * entry's input buffer
+ * @range: indicate if field being matched is to be in a range
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match,
+ * the mask value, and upper value can be extracted. These locations are then
+ * stored in the flow profile. When adding a flow entry associated with the
+ * flow profile, these locations will be used to quickly extract the values and
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+static void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
+{
+ enum ice_flow_fld_match_type t = range ?
+ ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
+
+ ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
+}
+
+#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+
+#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
+
+#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
+ (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+ ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
+
+/**
+ * ice_flow_set_rss_seg_info - setup packet segments for RSS
+ * @segs: pointer to the flow field segment(s)
+ * @hash_fields: fields to be hashed on for the segment(s)
+ * @flow_hdr: protocol header fields within a packet segment
+ *
+ * Helper function to extract fields from hash bitmap and use flow
+ * header value to set flow field segment for further use in flow
+ * profile entry or removal.
+ */
+static enum ice_status
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
+ u32 flow_hdr)
+{
+ u64 val;
+ u8 i;
+
+ for_each_set_bit(i, (unsigned long *)&hash_fields,
+ ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ ICE_FLOW_SET_HDRS(segs, flow_hdr);
+
+ if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+ return ICE_ERR_PARAM;
+
+ val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+ if (val && !is_power_of_2(val))
+ return ICE_ERR_CFG;
+
+ val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+ if (val && !is_power_of_2(val))
+ return ICE_ERR_CFG;
+
+ return 0;
+}
+
+/**
+ * ice_rem_vsi_rss_list - remove VSI from RSS list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * Remove the VSI from all RSS configurations in the list.
+ */
+void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_rss_cfg *r, *tmp;
+
+ if (list_empty(&hw->rss_list_head))
+ return;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
+ if (test_and_clear_bit(vsi_handle, r->vsis))
+ if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ mutex_unlock(&hw->rss_locks);
+}
+
+/**
+ * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function will iterate through all flow profiles and disassociate
+ * the VSI from that profile. If the flow profile has no VSIs it will
+ * be removed.
+ */
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_prof *p, *t;
+ enum ice_status status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (list_empty(&hw->fl_profs[blk]))
+ return 0;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+ list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
+ if (test_bit(vsi_handle, p->vsis)) {
+ status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
+ if (status)
+ break;
+
+ if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
+ status = ice_flow_rem_prof_sync(hw, blk, p);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_rem_rss_list - remove RSS configuration from list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static void
+ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
+{
+ struct ice_rss_cfg *r, *tmp;
+
+ /* Search for RSS hash fields associated to the VSI that match the
+ * hash configurations associated to the flow profile. If found
+ * remove from the RSS entry list of the VSI context and delete entry.
+ */
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
+ if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ clear_bit(vsi_handle, r->vsis);
+ if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ return;
+ }
+}
+
+/**
+ * ice_add_rss_list - add RSS configuration to list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
+{
+ struct ice_rss_cfg *r, *rss_cfg;
+
+ list_for_each_entry(r, &hw->rss_list_head, l_entry)
+ if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ set_bit(vsi_handle, r->vsis);
+ return 0;
+ }
+
+ rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
+ GFP_KERNEL);
+ if (!rss_cfg)
+ return ICE_ERR_NO_MEMORY;
+
+ rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
+ rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ set_bit(vsi_handle, rss_cfg->vsis);
+
+ list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
+
+ return 0;
+}
+
+#define ICE_FLOW_PROF_HASH_S 0
+#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
+#define ICE_FLOW_PROF_HDR_S 32
+#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+#define ICE_FLOW_PROF_ENCAP_S 63
+#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+
+#define ICE_RSS_OUTER_HEADERS 1
+
+/* Flow profile ID format:
+ * [0:31] - Packet match fields
+ * [32:62] - Protocol header
+ * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+ */
+#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
+ (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+ (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
+ ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
+
+/**
+ * ice_add_rss_cfg_sync - add an RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ * @segs_cnt: packet segment count
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs, u8 segs_cnt)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_flow_seg_info *segs;
+ enum ice_status status;
+
+ if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_PARAM;
+
+ segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
+ if (!segs)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Construct the packet segment info from the hashed fields */
+ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
+ addl_hdrs);
+ if (status)
+ goto exit;
+
+ /* Search for a flow profile that has matching headers, hash fields
+ * and has the input VSI associated to it. If found, no further
+ * operations required and exit.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS |
+ ICE_FLOW_FIND_PROF_CHK_VSI);
+ if (prof)
+ goto exit;
+
+ /* Check if a flow profile exists with the same protocol headers and
+ * associated with the input VSI. If so disassociate the VSI from
+ * this profile. The VSI will be added to a new profile created with
+ * the protocol header and new hash field configuration.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+ if (prof) {
+ status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+ if (!status)
+ ice_rem_rss_list(hw, vsi_handle, prof);
+ else
+ goto exit;
+
+ /* Remove profile if it has no VSIs associated */
+ if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
+ status = ice_flow_rem_prof(hw, blk, prof->id);
+ if (status)
+ goto exit;
+ }
+ }
+
+ /* Search for a profile that has same match fields only. If this
+ * exists then associate the VSI to this profile.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS);
+ if (prof) {
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ if (!status)
+ status = ice_add_rss_list(hw, vsi_handle, prof);
+ goto exit;
+ }
+
+ /* Create a new flow profile with generated profile and packet
+ * segment information.
+ */
+ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
+ ICE_FLOW_GEN_PROFID(hashed_flds,
+ segs[segs_cnt - 1].hdrs,
+ segs_cnt),
+ segs, segs_cnt, &prof);
+ if (status)
+ goto exit;
+
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ /* If association to a new flow profile failed then this profile can
+ * be removed.
+ */
+ if (status) {
+ ice_flow_rem_prof(hw, blk, prof->id);
+ goto exit;
+ }
+
+ status = ice_add_rss_list(hw, vsi_handle, prof);
+
+exit:
+ kfree(segs);
+ return status;
+}
+
+/**
+ * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ *
+ * This function will generate a flow profile based on fields associated with
+ * the input fields to hash on, the flow type and use the VSI number to add
+ * a flow entry to the profile.
+ */
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs)
+{
+ enum ice_status status;
+
+ if (hashed_flds == ICE_HASH_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->rss_locks);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
+ ICE_RSS_OUTER_HEADERS);
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
+/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
+ * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
+ * convert its values to their appropriate flow L3, L4 values.
+ */
+#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
+ (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+
+#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
+ (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+
+/**
+ * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ *
+ * This function will take the hash bitmap provided by the AVF driver via a
+ * message, convert it to ICE-compatible values, and configure RSS flow
+ * profiles.
+ */
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+{
+ enum ice_status status = 0;
+ u64 hash_flds;
+
+ if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Make sure no unsupported bits are specified */
+ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
+ ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
+ return ICE_ERR_CFG;
+
+ hash_flds = avf_hash;
+
+ /* Always create an L3 RSS configuration for any L4 RSS configuration */
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
+ hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
+
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
+ hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
+
+ /* Create the corresponding RSS configuration for each valid hash bit */
+ while (hash_flds) {
+ u64 rss_hash = ICE_HASH_INVALID;
+
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
+ if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_TCP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_UDP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
+ } else if (hash_flds &
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_SCTP_PORT;
+ hash_flds &=
+ ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ }
+ } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
+ if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_TCP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_UDP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
+ } else if (hash_flds &
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_SCTP_PORT;
+ hash_flds &=
+ ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ }
+ }
+
+ if (rss_hash == ICE_HASH_INVALID)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
+ ICE_FLOW_SEG_HDR_NONE);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_replay_rss_cfg - replay RSS configurations associated with VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ */
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+ struct ice_rss_cfg *r;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry(r, &hw->rss_list_head, l_entry) {
+ if (test_bit(vsi_handle, r->vsis)) {
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_OUTER_HEADERS);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
+/**
+ * ice_get_rss_cfg - returns hashed fields for the given header types
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hdrs: protocol header type
+ *
+ * This function will return the match fields of the first instance of flow
+ * profile having the given header types and containing input VSI
+ */
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+{
+ struct ice_rss_cfg *r, *rss_cfg = NULL;
+
+ /* verify if the protocol header is non zero and VSI is valid */
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_HASH_INVALID;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry(r, &hw->rss_list_head, l_entry)
+ if (test_bit(vsi_handle, r->vsis) &&
+ r->packet_hdr == hdrs) {
+ rss_cfg = r;
+ break;
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
new file mode 100644
index 000000000000..5558627bd5eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_FLOW_H_
+#define _ICE_FLOW_H_
+
+#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
+#define ICE_FLOW_FLD_OFF_INVAL 0xffff
+
+/* Generate flow hash field from flow field type(s) */
+#define ICE_FLOW_HASH_IPV4 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
+#define ICE_FLOW_HASH_IPV6 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_TCP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
+#define ICE_FLOW_HASH_UDP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
+#define ICE_FLOW_HASH_SCTP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
+
+#define ICE_HASH_INVALID 0
+#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+
+/* Protocol header fields within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization such as GRE,
+ * VxLAN, etc.
+ */
+enum ice_flow_seg_hdr {
+ ICE_FLOW_SEG_HDR_NONE = 0x00000000,
+ ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
+ ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
+ ICE_FLOW_SEG_HDR_TCP = 0x00000040,
+ ICE_FLOW_SEG_HDR_UDP = 0x00000080,
+ ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
+};
+
+enum ice_flow_field {
+ /* L3 */
+ ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_DA,
+ /* L4 */
+ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ /* The total number of enums must not exceed 64 */
+ ICE_FLOW_FIELD_IDX_MAX
+};
+
+/* Flow headers and fields for AVF support */
+enum ice_flow_avf_hdr_field {
+ /* Values 0 - 28 are reserved for future use */
+ ICE_AVF_FLOW_FIELD_INVALID = 0,
+ ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
+ ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
+ ICE_AVF_FLOW_FIELD_IPV4_UDP,
+ ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
+ ICE_AVF_FLOW_FIELD_IPV4_TCP,
+ ICE_AVF_FLOW_FIELD_IPV4_SCTP,
+ ICE_AVF_FLOW_FIELD_IPV4_OTHER,
+ ICE_AVF_FLOW_FIELD_FRAG_IPV4,
+ /* Values 37-38 are reserved */
+ ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
+ ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
+ ICE_AVF_FLOW_FIELD_IPV6_UDP,
+ ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
+ ICE_AVF_FLOW_FIELD_IPV6_TCP,
+ ICE_AVF_FLOW_FIELD_IPV6_SCTP,
+ ICE_AVF_FLOW_FIELD_IPV6_OTHER,
+ ICE_AVF_FLOW_FIELD_FRAG_IPV6,
+ ICE_AVF_FLOW_FIELD_RSVD47,
+ ICE_AVF_FLOW_FIELD_FCOE_OX,
+ ICE_AVF_FLOW_FIELD_FCOE_RX,
+ ICE_AVF_FLOW_FIELD_FCOE_OTHER,
+ /* Values 51-62 are reserved */
+ ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
+ ICE_AVF_FLOW_FIELD_MAX
+};
+
+/* Supported RSS offloads This macro is defined to support
+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * capabilities to the caller of this ops.
+ */
+#define ICE_DEFAULT_RSS_HENA ( \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+
+enum ice_flow_dir {
+ ICE_FLOW_RX = 0x02,
+};
+
+enum ice_flow_priority {
+ ICE_FLOW_PRIO_LOW,
+ ICE_FLOW_PRIO_NORMAL,
+ ICE_FLOW_PRIO_HIGH
+};
+
+#define ICE_FLOW_SEG_MAX 2
+#define ICE_FLOW_FV_EXTRACT_SZ 2
+
+#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
+
+struct ice_flow_seg_xtrct {
+ u8 prot_id; /* Protocol ID of extracted header field */
+ u16 off; /* Starting offset of the field in header in bytes */
+ u8 idx; /* Index of FV entry used */
+ u8 disp; /* Displacement of field in bits fr. FV entry's start */
+};
+
+enum ice_flow_fld_match_type {
+ ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
+ ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
+ ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
+ ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
+};
+
+struct ice_flow_fld_loc {
+ /* Describe offsets of field information relative to the beginning of
+ * input buffer provided when adding flow entries.
+ */
+ u16 val; /* Offset where the value is located */
+ u16 mask; /* Offset where the mask/prefix value is located */
+ u16 last; /* Length or offset where the upper value is located */
+};
+
+struct ice_flow_fld_info {
+ enum ice_flow_fld_match_type type;
+ /* Location where to retrieve data from an input buffer */
+ struct ice_flow_fld_loc src;
+ /* Location where to put the data into the final entry buffer */
+ struct ice_flow_fld_loc entry;
+ struct ice_flow_seg_xtrct xtrct;
+};
+
+struct ice_flow_seg_info {
+ u32 hdrs; /* Bitmask indicating protocol headers present */
+ u64 match; /* Bitmask indicating header fields to be matched */
+ u64 range; /* Bitmask indicating header fields matched as ranges */
+
+ struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+};
+
+struct ice_flow_prof {
+ struct list_head l_entry;
+
+ u64 id;
+ enum ice_flow_dir dir;
+ u8 segs_cnt;
+
+ /* Keep track of flow entries associated with this flow profile */
+ struct mutex entries_lock;
+ struct list_head entries;
+
+ struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
+
+ /* software VSI handles referenced by this flow profile */
+ DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+};
+
+struct ice_rss_cfg {
+ struct list_head l_entry;
+ /* bitmap of VSIs added to the RSS entry */
+ DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+ u64 hashed_flds;
+ u32 packet_hdr;
+};
+
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
+#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index e8f32350fed2..f2cababf2561 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -60,15 +60,6 @@
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
-#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 0997d352709b..878e125d8b42 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -199,6 +199,14 @@ enum ice_rxdid {
/* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01
+/* Receive Descriptor MDID values that access packet flags */
+enum ice_flex_mdid_pkt_flags {
+ ICE_RX_MDID_PKT_FLAGS_15_0 = 20,
+ ICE_RX_MDID_PKT_FLAGS_31_16,
+ ICE_RX_MDID_PKT_FLAGS_47_32,
+ ICE_RX_MDID_PKT_FLAGS_63_48,
+};
+
/* Receive Descriptor MDID values */
enum ice_flex_rx_mdid {
ICE_RX_MDID_FLOW_ID_LOWER = 5,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index e7449248fab4..1874c9f51a32 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_base.h"
+#include "ice_flow.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -493,7 +494,28 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
- * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
+ * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
+ * @vsi: the VSI being cleaned up
+ *
+ * This function deletes RSS input set for all flows that were configured
+ * for this VSI
+ */
+static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+
+ if (ice_is_safe_mode(pf))
+ return;
+
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status)
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_rss_clean - Delete RSS related VSI structures and configuration
* @vsi: the VSI being removed
*/
static void ice_rss_clean(struct ice_vsi *vsi)
@@ -507,6 +529,11 @@ static void ice_rss_clean(struct ice_vsi *vsi)
devm_kfree(dev, vsi->rss_hkey_user);
if (vsi->rss_lut_user)
devm_kfree(dev, vsi->rss_lut_user);
+
+ ice_vsi_clean_rss_flow_fld(vsi);
+ /* remove RSS replay list */
+ if (!ice_is_safe_mode(pf))
+ ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
}
/**
@@ -817,12 +844,23 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
- /* Enable MAC Antispoof with new VSI being initialized or updated */
- if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
+ /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
+ * respectively
+ */
+ if (vsi->type == ICE_VSI_VF) {
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ if (pf->vf[vsi->vf_id].spoofchk) {
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ } else {
+ ctxt->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
+ }
}
/* Allow control frames out of main VSI */
@@ -1076,6 +1114,115 @@ ice_vsi_cfg_rss_exit:
}
/**
+ * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
+ * @vsi: VSI to be configured
+ *
+ * This function will only be called during the VF VSI setup. Upon successful
+ * completion of package download, this function will configure default RSS
+ * input sets for VF VSI.
+ */
+static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
+ if (status)
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
+ * @vsi: VSI to be configured
+ *
+ * This function will only be called after successful download package call
+ * during initialization of PF. Since the downloaded package will erase the
+ * RSS section, this function will configure RSS input sets for different
+ * flow types. The last profile added has the highest priority, therefore 2
+ * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
+ * (i.e. IPv4 src/dst TCP src/dst port).
+ */
+static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
+{
+ u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi_num);
+ return;
+ }
+ /* configure RSS for IPv4 with input set IP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for IPv6 with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for sctp4 with input set IP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for sctp6 with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+}
+
+/**
* ice_add_mac_to_list - Add a MAC address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
@@ -1636,22 +1783,14 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
ctxt->info = vsi->info;
- if (ena) {
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ if (ena)
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- } else {
- ctxt->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ else
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- }
if (!vlan_promisc)
ctxt->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
+ cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -1661,7 +1800,6 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
goto err_out;
}
- vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
kfree(ctxt);
@@ -1899,8 +2037,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
@@ -1924,8 +2064,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_vf_rss_flow_fld(vsi);
+ }
break;
case ICE_VSI_LB:
ret = ice_vsi_alloc_rings(vsi);
@@ -2402,6 +2544,97 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
+ * @q_vector: pointer to q_vector which is being updated
+ * @coalesce: pointer to array of struct with stored coalesce
+ *
+ * Set coalesce param in q_vector and update these parameters in HW.
+ */
+static void
+ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
+ struct ice_coalesce_stored *coalesce)
+{
+ struct ice_ring_container *rx_rc = &q_vector->rx;
+ struct ice_ring_container *tx_rc = &q_vector->tx;
+ struct ice_hw *hw = &q_vector->vsi->back->hw;
+
+ tx_rc->itr_setting = coalesce->itr_tx;
+ rx_rc->itr_setting = coalesce->itr_rx;
+
+ /* dynamic ITR values will be updated during Tx/Rx */
+ if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
+ wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(tx_rc->itr_setting) >>
+ ICE_ITR_GRAN_S);
+ if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
+ wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rx_rc->itr_setting) >>
+ ICE_ITR_GRAN_S);
+
+ q_vector->intrl = coalesce->intrl;
+ wr32(hw, GLINT_RATE(q_vector->reg_idx),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+}
+
+/**
+ * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
+ * @vsi: VSI connected with q_vectors
+ * @coalesce: array of struct with stored coalesce
+ *
+ * Returns array size.
+ */
+static int
+ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
+ struct ice_coalesce_stored *coalesce)
+{
+ int i;
+
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ coalesce[i].itr_tx = q_vector->tx.itr_setting;
+ coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ coalesce[i].intrl = q_vector->intrl;
+ }
+
+ return vsi->num_q_vectors;
+}
+
+/**
+ * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
+ * @vsi: VSI connected with q_vectors
+ * @coalesce: pointer to array of struct with stored coalesce
+ * @size: size of coalesce array
+ *
+ * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
+ * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
+ * to default value.
+ */
+static void
+ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ struct ice_coalesce_stored *coalesce, int size)
+{
+ int i;
+
+ if ((size && !coalesce) || !vsi)
+ return;
+
+ for (i = 0; i < size && i < vsi->num_q_vectors; i++)
+ ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+ &coalesce[i]);
+
+ for (; i < vsi->num_q_vectors; i++) {
+ struct ice_coalesce_stored coalesce_dflt = {
+ .itr_tx = ICE_DFLT_TX_ITR,
+ .itr_rx = ICE_DFLT_RX_ITR,
+ .intrl = 0
+ };
+ ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+ &coalesce_dflt);
+ }
+}
+
+/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
* @init_vsi: is this an initialization or a reconfigure of the VSI
@@ -2411,6 +2644,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_coalesce_stored *coalesce;
+ int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
enum ice_status status;
struct ice_pf *pf;
@@ -2423,6 +2658,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (vsi->type == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id];
+ coalesce = kcalloc(vsi->num_q_vectors,
+ sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+ if (coalesce)
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
+ coalesce);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
@@ -2535,6 +2775,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
+ ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
+ kfree(coalesce);
+
return 0;
err_vectors:
@@ -2549,6 +2792,7 @@ err_rings:
err_vsi:
ice_vsi_clear(vsi);
set_bit(__ICE_RESET_FAILED, pf->state);
+ kfree(coalesce);
return ret;
}
@@ -2740,3 +2984,121 @@ cfg_mac_fltr_exit:
ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
return status;
}
+
+/**
+ * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
+ * @sw: switch to check if its default forwarding VSI is free
+ *
+ * Return true if the default forwarding VSI is already being used, else returns
+ * false signalling that it's available to use.
+ */
+bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
+{
+ return (sw->dflt_vsi && sw->dflt_vsi_ena);
+}
+
+/**
+ * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
+ * @sw: switch for the default forwarding VSI to compare against
+ * @vsi: VSI to compare against default forwarding VSI
+ *
+ * If this VSI passed in is the default forwarding VSI then return true, else
+ * return false
+ */
+bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+{
+ return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
+}
+
+/**
+ * ice_set_dflt_vsi - set the default forwarding VSI
+ * @sw: switch used to assign the default forwarding VSI
+ * @vsi: VSI getting set as the default forwarding VSI on the switch
+ *
+ * If the VSI passed in is already the default VSI and it's enabled just return
+ * success.
+ *
+ * If there is already a default VSI on the switch and it's enabled then return
+ * -EEXIST since there can only be one default VSI per switch.
+ *
+ * Otherwise try to set the VSI passed in as the switch's default VSI and
+ * return the result.
+ */
+int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+{
+ enum ice_status status;
+ struct device *dev;
+
+ if (!sw || !vsi)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ /* the VSI passed in is already the default VSI */
+ if (ice_is_vsi_dflt_vsi(sw, vsi)) {
+ dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
+ vsi->vsi_num);
+ return 0;
+ }
+
+ /* another VSI is already the default VSI for this switch */
+ if (ice_is_dflt_vsi_in_use(sw)) {
+ dev_err(dev,
+ "Default forwarding VSI %d already in use, disable it and try again\n",
+ sw->dflt_vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
+ if (status) {
+ dev_err(dev,
+ "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ vsi->vsi_num, status);
+ return -EIO;
+ }
+
+ sw->dflt_vsi = vsi;
+ sw->dflt_vsi_ena = true;
+
+ return 0;
+}
+
+/**
+ * ice_clear_dflt_vsi - clear the default forwarding VSI
+ * @sw: switch used to clear the default VSI
+ *
+ * If the switch has no default VSI or it's not enabled then return error.
+ *
+ * Otherwise try to clear the default VSI and return the result.
+ */
+int ice_clear_dflt_vsi(struct ice_sw *sw)
+{
+ struct ice_vsi *dflt_vsi;
+ enum ice_status status;
+ struct device *dev;
+
+ if (!sw)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(sw->pf);
+
+ dflt_vsi = sw->dflt_vsi;
+
+ /* there is no default VSI configured */
+ if (!ice_is_dflt_vsi_in_use(sw))
+ return -ENODEV;
+
+ status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
+ ICE_FLTR_RX);
+ if (status) {
+ dev_err(dev,
+ "Failed to clear the default forwarding VSI %d, error %d\n",
+ dflt_vsi->vsi_num, status);
+ return -EIO;
+ }
+
+ sw->dflt_vsi = NULL;
+ sw->dflt_vsi_ena = false;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6e31e30aba39..68fd0d4505c2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -103,4 +103,12 @@ enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
+
+bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
+
+bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
+
+int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
+
+int ice_clear_dflt_vsi(struct ice_sw *sw);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 69bff085acf7..5ae671609f98 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -13,7 +13,7 @@
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
@@ -379,25 +379,29 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
- ICE_FLTR_RX);
- if (status) {
- netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags &= ~IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
+ if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
+ err = ice_set_dflt_vsi(pf->first_sw, vsi);
+ if (err && err != -EEXIST) {
+ netdev_err(netdev,
+ "Error %d setting default VSI %i Rx rule\n",
+ err, vsi->vsi_num);
+ vsi->current_netdev_flags &=
+ ~IFF_PROMISC;
+ goto out_promisc;
+ }
}
} else {
/* Clear Rx filter to remove traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
- ICE_FLTR_RX);
- if (status) {
- netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags |= IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
+ if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
+ err = ice_clear_dflt_vsi(pf->first_sw);
+ if (err) {
+ netdev_err(netdev,
+ "Error %d clearing default VSI %i Rx rule\n",
+ err, vsi->vsi_num);
+ vsi->current_netdev_flags |=
+ IFF_PROMISC;
+ goto out_promisc;
+ }
}
}
}
@@ -472,7 +476,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
/* clear SW filtering DB */
@@ -840,8 +844,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
- if (pf->num_alloc_vfs)
- ice_vc_notify_link_state(pf);
+ ice_vc_notify_link_state(pf);
return result;
}
@@ -1291,7 +1294,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
/* check to see if one of the VFs caused the MDD */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
bool vf_mdd_detected = false;
@@ -2330,7 +2333,8 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- tso_features = NETIF_F_TSO;
+ tso_features = NETIF_F_TSO |
+ NETIF_F_GSO_UDP_L4;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
@@ -3568,6 +3572,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
/* required last entry */
{ 0, }
};
@@ -4670,6 +4683,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
+ if (pf->first_sw->dflt_vsi_ena)
+ dev_info(dev,
+ "Clearing default VSI, re-enable after reset completes\n");
+ /* clear the default VSI configuration if it exists */
+ pf->first_sw->dflt_vsi = NULL;
+ pf->first_sw->dflt_vsi_ena = false;
+
ice_clear_pxe_mode(hw);
ret = ice_get_caps(hw);
@@ -4825,7 +4845,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- netdev_info(netdev, "changed MTU to %d\n", new_mtu);
+ netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
return 0;
}
@@ -5060,42 +5080,23 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
* ice_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void ice_tx_timeout(struct net_device *netdev)
+static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- int hung_queue = -1;
u32 i;
pf->tx_timeout_count++;
- /* find the stopped queue the same way dev_watchdog() does */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- unsigned long trans_start;
- struct netdev_queue *q;
-
- q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start;
- if (netif_xmit_stopped(q) &&
- time_after(jiffies,
- trans_start + netdev->watchdog_timeo)) {
- hung_queue = i;
- break;
- }
- }
-
- if (i == netdev->num_tx_queues)
- netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- else
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- if (hung_queue == vsi->tx_rings[i]->q_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_txq; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ if (txqueue == vsi->tx_rings[i]->q_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period.
@@ -5110,19 +5111,19 @@ static void ice_tx_timeout(struct net_device *netdev)
struct ice_hw *hw = &pf->hw;
u32 head, val = 0;
- head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
+ head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
- vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
+ vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
- pf->tx_timeout_recovery_level, hung_queue);
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
case 1:
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 57c73f613f32..7525ac50742e 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -289,6 +289,18 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+ /* the following devices do not have boot_cfg_tlv yet */
+ if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
+ hw->device_id == ICE_DEV_ID_E822C_QSFP ||
+ hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
+ hw->device_id == ICE_DEV_ID_E822C_SGMII ||
+ hw->device_id == ICE_DEV_ID_E822C_SFP ||
+ hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
+ hw->device_id == ICE_DEV_ID_E822L_SFP ||
+ hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
+ hw->device_id == ICE_DEV_ID_E822L_SGMII)
+ return status;
+
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
new file mode 100644
index 000000000000..71647566964e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_PROTOCOL_TYPE_H_
+#define _ICE_PROTOCOL_TYPE_H_
+/* Decoders for ice_prot_id:
+ * - F: First
+ * - I: Inner
+ * - L: Last
+ * - O: Outer
+ * - S: Single
+ */
+enum ice_prot_id {
+ ICE_PROT_ID_INVAL = 0,
+ ICE_PROT_IPV4_OF_OR_S = 32,
+ ICE_PROT_IPV4_IL = 33,
+ ICE_PROT_IPV6_OF_OR_S = 40,
+ ICE_PROT_IPV6_IL = 41,
+ ICE_PROT_TCP_IL = 49,
+ ICE_PROT_UDP_IL_OR_S = 53,
+ ICE_PROT_SCTP_IL = 96,
+ ICE_PROT_META_ID = 255, /* when offset == metadata */
+ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
+};
+#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index c01597885629..a9a8bc3aca42 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -26,6 +26,7 @@ enum ice_status {
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
+ ICE_ERR_HW_TABLE = -19,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index b5a53f862a83..431266081a80 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -50,42 +50,6 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
/**
- * ice_aq_alloc_free_res - command to allocate/free resources
- * @hw: pointer to the HW struct
- * @num_entries: number of resource entries in buffer
- * @buf: Indirect buffer to hold data parameters and response
- * @buf_size: size of buffer for indirect commands
- * @opc: pass in the command opcode
- * @cd: pointer to command details structure or NULL
- *
- * Helper function to allocate/free resources using the admin queue commands
- */
-static enum ice_status
-ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
-{
- struct ice_aqc_alloc_free_res_cmd *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.sw_res_ctrl;
-
- if (!buf)
- return ICE_ERR_PARAM;
-
- if (buf_size < (num_entries * sizeof(buf->elem[0])))
- return ICE_ERR_PARAM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, opc);
-
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- cmd->num_entries = cpu_to_le16(num_entries);
-
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
-/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 2c212f64d99f..fd17ace6b226 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1071,13 +1071,16 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ice_put_rx_buf(rx_ring, rx_buf);
continue;
construct_skb:
- if (skb)
+ if (skb) {
ice_add_rx_frag(rx_ring, rx_buf, skb, size);
- else if (ice_ring_uses_build_skb(rx_ring))
- skb = ice_build_skb(rx_ring, rx_buf, &xdp);
- else
+ } else if (likely(xdp.data)) {
+ if (ice_ring_uses_build_skb(rx_ring))
+ skb = ice_build_skb(rx_ring, rx_buf, &xdp);
+ else
+ skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
+ } else {
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
-
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
@@ -1925,6 +1928,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
@@ -1958,10 +1962,18 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
/* remove payload length from checksum */
paylen = skb->len - l4_start;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
- /* compute length of segmentation header */
- off->header_len = (l4.tcp->doff * 4) + l4_start;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of UDP segmentation header */
+ off->header_len = sizeof(l4.udp) + l4_start;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of TCP segmentation header */
+ off->header_len = (l4.tcp->doff * 4) + l4_start;
+ }
/* update gso_segs and bytecount */
first->gso_segs = skb_shinfo(skb)->gso_segs;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a84cc0e6dd27..a86270696df1 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -341,6 +341,12 @@ struct ice_ring_container {
u16 itr_setting;
};
+struct ice_coalesce_stored {
+ u16 itr_tx;
+ u16 itr_rx;
+ u8 intrl;
+};
+
/* iterator for handling rings in ring container */
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index c4854a987130..b361ffabb0ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -13,6 +13,7 @@
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
+#include "ice_protocol_type.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -41,6 +42,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
+#define ICE_DBG_FLOW BIT_ULL(9)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
#define ICE_DBG_PKG BIT_ULL(16)
@@ -559,6 +561,10 @@ struct ice_hw {
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
+ struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
+ struct list_head fl_profs[ICE_BLK_COUNT];
+ struct mutex rss_locks; /* protect RSS configuration */
+ struct list_head rss_list_head;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index edb374296d1f..82b1e7a4cb92 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -35,37 +35,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_err_to_virt err - translate errors for VF return code
- * @ice_err: error return code
- */
-static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
-{
- switch (ice_err) {
- case ICE_SUCCESS:
- return VIRTCHNL_STATUS_SUCCESS;
- case ICE_ERR_BAD_PTR:
- case ICE_ERR_INVAL_SIZE:
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- case ICE_ERR_PARAM:
- case ICE_ERR_CFG:
- return VIRTCHNL_STATUS_ERR_PARAM;
- case ICE_ERR_NO_MEMORY:
- return VIRTCHNL_STATUS_ERR_NO_MEMORY;
- case ICE_ERR_NOT_READY:
- case ICE_ERR_RESET_FAILED:
- case ICE_ERR_FW_API_VER:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_FULL:
- case ICE_ERR_AQ_NO_WORK:
- case ICE_ERR_AQ_EMPTY:
- return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- default:
- return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- }
-}
-
-/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -78,10 +47,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
- struct ice_vf *vf = pf->vf;
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
@@ -121,26 +91,6 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
/**
- * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
- * @vf: pointer to the VF structure
- * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
- * @link_up: whether or not to set the link up/down
- */
-static void
-ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
- bool link_up)
-{
- u16 link_speed;
-
- if (link_up)
- link_speed = ICE_AQ_LINK_SPEED_100GB;
- else
- link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
-
- ice_set_pfe_link(vf, pfe, link_speed, link_up);
-}
-
-/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
*
@@ -160,13 +110,17 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
pfe.severity = PF_EVENT_SEVERITY_INFO;
/* Always report link is down if the VF queues aren't enabled */
- if (!vf->num_qs_ena)
+ if (!vf->num_qs_ena) {
ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
- else if (vf->link_forced)
- ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
- else
- ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
- ICE_AQ_LINK_UP);
+ } else if (vf->link_forced) {
+ u16 link_speed = vf->link_up ?
+ ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
+ } else {
+ ice_set_pfe_link(vf, &pfe, ls->link_speed,
+ ls->link_info & ICE_AQ_LINK_UP);
+ }
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
@@ -331,7 +285,7 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
ice_dis_vf_qs(&pf->vf[i]);
@@ -991,10 +945,17 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
/* reallocate VF resources to finish resetting the VSI state */
if (!ice_alloc_vf_res(vf)) {
+ struct ice_vsi *vsi;
+
ice_ena_vf_mappings(vf);
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- vf->num_vlan = 0;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_add_vlan(vsi, 0))
+ dev_warn(ice_pf_to_dev(pf),
+ "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
+ vf->vf_id);
}
/* Tell the VF driver the reset is done. This needs to be done only
@@ -1023,7 +984,7 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
struct ice_hw *hw;
hw = &pf->hw;
- if (vf->num_vlan) {
+ if (vsi->num_vlan) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
rm_promisc);
} else if (vf->port_vlan_id) {
@@ -1070,7 +1031,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
ice_irq_dynamic_ena(hw, NULL, NULL);
/* Finish resetting each VF and allocate resources */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
struct ice_vf *vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
@@ -1113,10 +1074,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_for_each_vf(pf, v)
ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
struct ice_vsi *vsi;
vf = &pf->vf[v];
@@ -1161,7 +1122,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_warn(dev, "VF reset check timeout\n");
/* free VF resources to begin resetting the VSI state */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
ice_free_vf_res(vf);
@@ -1273,7 +1234,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
*/
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_id || vf->num_vlan)
+ if (vf->port_vlan_id || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
@@ -1301,7 +1262,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf)
{
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
ice_vc_notify_vf_link_state(&pf->vf[i]);
}
@@ -1385,9 +1346,10 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
goto err_pci_disable_sriov;
}
pf->vf = vfs;
+ pf->num_alloc_vfs = num_alloc_vfs;
/* apply default profile */
- for (i = 0; i < num_alloc_vfs; i++) {
+ ice_for_each_vf(pf, i) {
vfs[i].pf = pf;
vfs[i].vf_sw_id = pf->first_sw;
vfs[i].vf_id = i;
@@ -1396,7 +1358,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
}
- pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated with initialization */
if (!ice_config_res_vfs(pf)) {
@@ -1535,7 +1496,7 @@ void ice_process_vflr_event(struct ice_pf *pf)
!pf->num_alloc_vfs)
return;
- for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx;
@@ -1918,6 +1879,89 @@ error_param:
}
/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi_ctx *ctx;
+ struct ice_vsi *vf_vsi;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+
+ if (ice_check_vf_init(pf, vf))
+ return -EBUSY;
+
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vf_vsi) {
+ netdev_err(netdev, "VSI %d for VF %d is null\n",
+ vf->lan_vsi_idx, vf->vf_id);
+ return -EINVAL;
+ }
+
+ if (vf_vsi->type != ICE_VSI_VF) {
+ netdev_err(netdev,
+ "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
+ return -ENODEV;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
+ return 0;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vf_vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ if (ena) {
+ ctx->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ } else {
+ ctx->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
+ }
+
+ status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
+ if (status) {
+ dev_err(dev,
+ "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* only update spoofchk state and VSI context on success */
+ vf_vsi->info.sec_flags = ctx->info.sec_flags;
+ vf->spoofchk = ena;
+
+out:
+ kfree(ctx);
+ return ret;
+}
+
+/**
* ice_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2409,6 +2453,83 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
}
/**
+ * ice_vc_add_mac_addr - attempt to add the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @mac_addr: MAC address to add
+ */
+static int
+ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+
+ /* default unicast MAC already added */
+ if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ return 0;
+
+ if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
+ dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return -EPERM;
+ }
+
+ status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
+ vf->vf_id);
+ return -EEXIST;
+ } else if (status) {
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, status);
+ return -EIO;
+ }
+
+ /* only set dflt_lan_addr once */
+ if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
+ is_unicast_ether_addr(mac_addr))
+ ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
+
+ vf->num_mac++;
+
+ return 0;
+}
+
+/**
+ * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @mac_addr: MAC address to delete
+ */
+static int
+ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+
+ if (!ice_can_vf_change_mac(vf) &&
+ ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ return 0;
+
+ status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
+ vf->vf_id);
+ return -ENOENT;
+ } else if (status) {
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+ mac_addr, vf->vf_id, status);
+ return -EIO;
+ }
+
+ if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ eth_zero_addr(vf->dflt_lan_addr.addr);
+
+ vf->num_mac--;
+
+ return 0;
+}
+
+/**
* ice_vc_handle_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2419,23 +2540,23 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
+ int (*ice_vc_cfg_mac)
+ (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- enum ice_status status;
struct ice_vsi *vsi;
- struct device *dev;
- int mac_count = 0;
int i;
- dev = ice_pf_to_dev(pf);
-
- if (set)
+ if (set) {
vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- else
+ ice_vc_cfg_mac = ice_vc_add_mac_addr;
+ } else {
vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+ ice_vc_cfg_mac = ice_vc_del_mac_addr;
+ }
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
@@ -2443,14 +2564,15 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
+ /* If this VF is not privileged, then we can't add more than a
+ * limited number of addresses. Check to make sure that the
+ * additions do not push us over the limit.
+ */
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(dev,
+ dev_err(ice_pf_to_dev(pf),
"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
- /* There is no need to let VF know about not being trusted
- * to add more MAC addr, so we can just return success message.
- */
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
@@ -2462,70 +2584,22 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
}
for (i = 0; i < al->num_elements; i++) {
- u8 *maddr = al->list[i].addr;
+ u8 *mac_addr = al->list[i].addr;
+ int result;
- if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
- is_broadcast_ether_addr(maddr)) {
- if (set) {
- /* VF is trying to add filters that the PF
- * already added. Just continue.
- */
- dev_info(dev,
- "MAC %pM already set for VF %d\n",
- maddr, vf->vf_id);
- continue;
- } else {
- /* VF can't remove dflt_lan_addr/bcast MAC */
- dev_err(dev,
- "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
- maddr, vf->vf_id);
- continue;
- }
- }
-
- /* check for the invalid cases and bail if necessary */
- if (is_zero_ether_addr(maddr)) {
- dev_err(dev,
- "invalid MAC %pM provided for VF %d\n",
- maddr, vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- if (is_unicast_ether_addr(maddr) &&
- !ice_can_vf_change_mac(vf)) {
- dev_err(dev,
- "can't change unicast MAC for untrusted VF %d\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
+ if (is_broadcast_ether_addr(mac_addr) ||
+ is_zero_ether_addr(mac_addr))
+ continue;
- /* program the updated filter list */
- status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
- if (status == ICE_ERR_DOES_NOT_EXIST ||
- status == ICE_ERR_ALREADY_EXISTS) {
- dev_info(dev,
- "can't %s MAC filters %pM for VF %d, error %d\n",
- set ? "add" : "remove", maddr, vf->vf_id,
- status);
- } else if (status) {
- dev_err(dev,
- "can't %s MAC filters for VF %d, error %d\n",
- set ? "add" : "remove", vf->vf_id, status);
- v_ret = ice_err_to_virt_err(status);
+ result = ice_vc_cfg_mac(vf, vsi, mac_addr);
+ if (result == -EEXIST || result == -ENOENT) {
+ continue;
+ } else if (result) {
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
goto handle_mac_exit;
}
-
- mac_count++;
}
- /* Track number of MAC filters programmed for the VF VSI */
- if (set)
- vf->num_mac += mac_count;
- else
- vf->num_mac -= mac_count;
-
handle_mac_exit:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
@@ -2744,17 +2818,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (add_v && !ice_is_vf_trusted(vf) &&
- vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev,
- "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
- vf->vf_id);
- /* There is no need to let VF know about being not trusted,
- * so we can just return success message here
- */
- goto error_param;
- }
-
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2771,6 +2834,17 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
+ if (add_v && !ice_is_vf_trusted(vf) &&
+ vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(dev,
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
+ goto error_param;
+ }
+
if (vsi->info.pvid) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2785,7 +2859,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
u16 vid = vfl->vlan_id[i];
if (!ice_is_vf_trusted(vf) &&
- vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
@@ -2796,12 +2870,20 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (ice_vsi_add_vlan(vsi, vid)) {
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't need to add it again here
+ */
+ if (!vid)
+ continue;
+
+ status = ice_vsi_add_vlan(vsi, vid);
+ if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vf->num_vlan++;
+ vsi->num_vlan++;
/* Enable VLAN pruning when VLAN is added */
if (!vlan_promisc) {
status = ice_cfg_vlan_pruning(vsi, true, false);
@@ -2837,21 +2919,29 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
*/
int num_vf_vlan;
- num_vf_vlan = vf->num_vlan;
+ num_vf_vlan = vsi->num_vlan;
for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't want a VIRTCHNL request to remove it
+ */
+ if (!vid)
+ continue;
+
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
- if (ice_vsi_kill_vlan(vsi, vid)) {
+ status = ice_vsi_kill_vlan(vsi, vid);
+ if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vf->num_vlan--;
+ vsi->num_vlan--;
/* Disable VLAN pruning when the last VLAN is removed */
- if (!vf->num_vlan)
+ if (!vsi->num_vlan)
ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
@@ -3165,65 +3255,6 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
}
/**
- * ice_set_vf_spoofchk
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ena: flag to enable or disable feature
- *
- * Enable or disable VF spoof checking
- */
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi = pf->vsi[0];
- struct ice_vsi_ctx *ctx;
- enum ice_status status;
- struct device *dev;
- struct ice_vf *vf;
- int ret = 0;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- if (ena == vf->spoofchk) {
- dev_dbg(dev, "VF spoofchk already %s\n",
- ena ? "ON" : "OFF");
- return 0;
- }
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
-
- if (ena) {
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
- ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
- }
-
- status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_dbg(dev,
- "Error %d, failed to update VSI* parameters\n", status);
- ret = -EIO;
- goto out;
- }
-
- vf->spoofchk = ena;
- vsi->info.sec_flags = ctx->info.sec_flags;
- vsi->info.sw_flags2 = ctx->info.sw_flags2;
-out:
- kfree(ctx);
- return ret;
-}
-
-/**
* ice_wait_on_vf_reset
* @vf: The VF being resseting
*
@@ -3344,28 +3375,18 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct virtchnl_pf_event pfe = { 0 };
- struct ice_link_status *ls;
struct ice_vf *vf;
- struct ice_hw *hw;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
- hw = &pf->hw;
- ls = &pf->hw.port_info->phy.link_info;
-
if (ice_check_vf_init(pf, vf))
return -EBUSY;
- pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = PF_EVENT_SEVERITY_INFO;
-
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
vf->link_forced = false;
- vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
break;
case IFLA_VF_LINK_STATE_ENABLE:
vf->link_forced = true;
@@ -3379,15 +3400,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
return -EINVAL;
}
- if (vf->link_forced)
- ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
- else
- ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
-
- /* Notify the VF of its new link state */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
- sizeof(pfe), NULL);
+ ice_vc_notify_vf_link_state(vf);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 88aa65d5cb31..4647d636ed36 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -40,6 +40,9 @@
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_WAIT 15
+#define ice_for_each_vf(pf, i) \
+ for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
+
/* Specific VF states */
enum ice_vf_states {
ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
@@ -91,7 +94,6 @@ struct ice_vf {
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
- u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index cf9b8b22d24f..149dca0012ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -414,7 +414,8 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (!vsi->num_xsk_umems)
+ vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
if (qid >= vsi->num_xsk_umems)
return -EINVAL;
@@ -555,7 +556,7 @@ ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_buf->handle = handle + umem->headroom;
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -591,7 +592,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_buf->handle = handle + umem->headroom;
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
@@ -1019,8 +1020,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
s16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
- bool xmit_done = true;
u32 xsk_frames = 0;
+ bool xmit_done;
tx_desc = ICE_TX_DESC(xdp_ring, ntc);
tx_buf = &xdp_ring->tx_buf[ntc];
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 8a6ef3514129..438b42ce2cd9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
dev_spec->module_plugged = true;
if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e100_base_fx) {
+ } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
} else if (eth_flags->e1000_base_t) {
@@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* do not change link mode for 100BaseFX */
- if (dev_spec->eth_flags.e100_base_fx)
- break;
-
/* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
- if (hw->phy.media_type == e1000_media_type_copper)
+ if (dev_spec->sgmii_active)
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ca54e268d157..49b5fa9d4783 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -661,6 +661,7 @@ void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
+void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 4690d6c87f39..f96ffa83efbe 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev,
advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
- if (eth_flags->e100_base_fx) {
+ if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
supported |= SUPPORTED_100baseT_Full;
advertising |= ADVERTISED_100baseT_Full;
}
@@ -396,6 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int retval = 0;
+ int i;
/* 100basefx does not support setting link flow control */
if (hw->dev_spec._82575.eth_flags.e100_base_fx)
@@ -428,6 +429,13 @@ static int igb_set_pauseparam(struct net_device *netdev,
retval = ((hw->phy.media_type == e1000_media_type_copper) ?
igb_force_mac_fc(hw) : igb_setup_link(hw));
+
+ /* Make sure SRRCTL considers new fc settings for each ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ igb_setup_srrctl(adapter, ring);
+ }
}
clear_bit(__IGB_RESETTING, &adapter->state);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 98346eb064d5..b46bff8fe056 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -146,7 +146,7 @@ static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
-static void igb_tx_timeout(struct net_device *);
+static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(struct net_device *netdev,
netdev_features_t features);
@@ -4468,6 +4468,37 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
}
/**
+ * igb_setup_srrctl - configure the split and replication receive control
+ * registers
+ * @adapter: Board private structure
+ * @ring: receive ring to be configured
+ **/
+void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int reg_idx = ring->reg_idx;
+ u32 srrctl = 0;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ if (hw->mac.type >= e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
+ /* Only set Drop Enable if VFs allocated, or we are supporting multiple
+ * queues and rx flow control is disabled
+ */
+ if (adapter->vfs_allocated_count ||
+ (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
+ adapter->num_rx_queues > 1))
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ wr32(E1000_SRRCTL(reg_idx), srrctl);
+}
+
+/**
* igb_configure_rx_ring - Configure a receive ring after Reset
* @adapter: board private structure
* @ring: receive ring to be configured
@@ -4481,7 +4512,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
- u32 srrctl = 0, rxdctl = 0;
+ u32 rxdctl = 0;
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4499,19 +4530,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
writel(0, ring->tail);
/* set descriptor configuration */
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (hw->mac.type >= e1000_82580)
- srrctl |= E1000_SRRCTL_TIMESTAMP;
- /* Only set Drop Enable if we are supporting multiple queues */
- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
- srrctl |= E1000_SRRCTL_DROP_EN;
-
- wr32(E1000_SRRCTL(reg_idx), srrctl);
+ igb_setup_srrctl(adapter, ring);
/* set filtering for VMDQ pools */
igb_set_vmolr(adapter, reg_idx & 0x7, true);
@@ -6184,7 +6203,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
* igb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void igb_tx_timeout(struct net_device *netdev)
+static void igb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 6003dc3ff5fd..5b1800c3ba82 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2375,7 +2375,7 @@ static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
* igbvf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void igbvf_tx_timeout(struct net_device *netdev)
+static void igbvf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 88c6f88baac5..49fb1e1965cd 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o
+igc_ethtool.o igc_ptp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 0868677d43ed..52066bdbbad0 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -10,6 +10,9 @@
#include <linux/vmalloc.h>
#include <linux/ethtool.h>
#include <linux/sctp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
#include "igc_hw.h"
@@ -45,11 +48,15 @@ extern char igc_driver_version[];
#define IGC_REGS_LEN 740
#define IGC_RETA_SIZE 128
+/* flags controlling PTP/1588 function */
+#define IGC_PTP_ENABLED BIT(0)
+
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
+#define IGC_FLAG_PTP BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
@@ -100,6 +107,20 @@ extern char igc_driver_version[];
#define AUTO_ALL_MODES 0
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
+/* Transmit and receive latency (for PTP timestamps) */
+/* FIXME: These values were estimated using the ones that i210 has as
+ * basis, they seem to provide good numbers with ptp4l/phc2sys, but we
+ * need to confirm them.
+ */
+#define IGC_I225_TX_LATENCY_10 9542
+#define IGC_I225_TX_LATENCY_100 1024
+#define IGC_I225_TX_LATENCY_1000 178
+#define IGC_I225_TX_LATENCY_2500 64
+#define IGC_I225_RX_LATENCY_10 20662
+#define IGC_I225_RX_LATENCY_100 2213
+#define IGC_I225_RX_LATENCY_1000 448
+#define IGC_I225_RX_LATENCY_2500 160
+
/* RX and TX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
* descriptors available in its onboard memory.
@@ -370,6 +391,8 @@ struct igc_adapter {
struct timer_list dma_err_timer;
struct timer_list phy_info_timer;
+ u32 wol;
+ u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
@@ -430,6 +453,20 @@ struct igc_adapter {
unsigned long link_check_timeout;
struct igc_info ei;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
+ /* System time value lock */
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
};
/* igc_desc_unused - calculate if we have unused descriptors */
@@ -513,6 +550,16 @@ int igc_add_filter(struct igc_adapter *adapter,
int igc_erase_filter(struct igc_adapter *adapter,
struct igc_nfc_filter *input);
+void igc_ptp_init(struct igc_adapter *adapter);
+void igc_ptp_reset(struct igc_adapter *adapter);
+void igc_ptp_stop(struct igc_adapter *adapter);
+void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ struct sk_buff *skb);
+int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igc_ptp_tx_hang(struct igc_adapter *adapter);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index db289bcce21d..5a506440560a 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -212,6 +212,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_I:
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f3788f0b95b4..58efa7a02c68 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -10,6 +10,37 @@
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */
+
+/* Wake Up Filter Control */
+#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+
+#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+
+/* Wake Up Status */
+#define IGC_WUS_EX 0x00000004 /* Directed Exact */
+#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */
+#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */
+#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */
+#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */
+
+/* Packet types that are enabled for wake packet delivery */
+#define WAKE_PKT_WUS ( \
+ IGC_WUS_EX | \
+ IGC_WUS_ARPD | \
+ IGC_WUS_IPV4 | \
+ IGC_WUS_IPV6 | \
+ IGC_WUS_NSD)
+
+/* Wake Up Packet Length */
+#define IGC_WUPL_MASK 0x00000FFF
+
+/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
+#define IGC_WUPM_BYTES 128
+
/* Physical Func Reset Done Indication */
#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -187,6 +218,7 @@
#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */
#define IGC_ICR_RXO BIT(6) /* Rx overrun */
#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
+#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */
#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
@@ -209,6 +241,7 @@
#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */
#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
+#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */
#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */
#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */
@@ -249,6 +282,10 @@
#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+/* IPSec Encrypt Enable */
+#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
/* Transmit Control */
#define IGC_TCTL_EN 0x00000002 /* enable Tx */
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
@@ -281,12 +318,21 @@
#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
+/* Split Replication Receive Control */
+#define IGC_SRRCTL_TIMESTAMP 0x40000000
+#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14)
+#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
+
/* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+/* Advanced Receive Descriptor bit definitions */
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+
#define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000
#define IGC_RXDEXT_STATERR_SEQ 0x04000000
@@ -323,6 +369,61 @@
#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+
+/* Time Sync Interrupt Causes */
+#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
+#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
+#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */
+#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */
+#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */
+
+#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS
+
+/* PTP Queue Filter */
+#define IGC_ETQF_1588 BIT(30)
+
+#define IGC_FTQF_VF_BP 0x00008000
+#define IGC_FTQF_1588_TIME_STAMP 0x08000000
+#define IGC_FTQF_MASK 0xF0000000
+#define IGC_FTQF_MASK_PROTO_BP 0x10000000
+
+/* Time Sync Receive Control bit definitions */
+#define IGC_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IGC_TSYNCRXCTL_TYPE_ALL 0x08
+#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */
+
+/* Time Sync Receive Configuration */
+#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+
+/* Immediate Interrupt Receive */
+#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+
+/* Immediate Interrupt Receive Extended */
+#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+
+/* Time Sync Transmit Control bit definitions */
+#define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
+#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
+#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
+#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
+#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
@@ -363,6 +464,7 @@
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 455c1cdceb6e..ee07011e13e9 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1600,6 +1600,39 @@ static int igc_set_channels(struct net_device *netdev,
return 0;
}
+static int igc_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static u32 igc_get_priv_flags(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -1847,6 +1880,7 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_rxfh_indir_size = igc_get_rxfh_indir_size,
.get_rxfh = igc_get_rxfh,
.set_rxfh = igc_set_rxfh,
+ .get_ts_info = igc_get_ts_info,
.get_channels = igc_get_channels,
.set_channels = igc_set_channels,
.get_priv_flags = igc_get_priv_flags,
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 20f710645746..90ac0e0144d8 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -21,8 +21,7 @@
#define IGC_DEV_ID_I225_I 0x15F8
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
-
-#define IGC_FUNC_0 0
+#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
struct igc_mac_operations {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9700527dd797..d9d5425fe8d9 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -8,6 +8,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ip.h>
+#include <linux/pm_runtime.h>
#include <net/ipv6.h>
@@ -44,31 +45,13 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
-/* forward declaration */
-static void igc_clean_tx_ring(struct igc_ring *tx_ring);
-static int igc_sw_init(struct igc_adapter *);
-static void igc_configure(struct igc_adapter *adapter);
-static void igc_power_down_link(struct igc_adapter *adapter);
-static void igc_set_default_mac_filter(struct igc_adapter *adapter);
-static void igc_set_rx_mode(struct net_device *netdev);
-static void igc_write_itr(struct igc_q_vector *q_vector);
-static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
-static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
-static void igc_set_interrupt_capability(struct igc_adapter *adapter,
- bool msix);
-static void igc_free_q_vectors(struct igc_adapter *adapter);
-static void igc_irq_disable(struct igc_adapter *adapter);
-static void igc_irq_enable(struct igc_adapter *adapter);
-static void igc_configure_msix(struct igc_adapter *adapter);
-static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
- struct igc_rx_buffer *bi);
-
enum latency_range {
lowest_latency = 0,
low_latency = 1,
@@ -76,6 +59,16 @@ enum latency_range {
latency_invalid = 255
};
+/**
+ * igc_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igc_power_down_link(struct igc_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == igc_media_type_copper)
+ igc_power_down_phy_copper_base(&adapter->hw);
+}
+
void igc_reset(struct igc_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -110,11 +103,14 @@ void igc_reset(struct igc_adapter *adapter)
if (!netif_running(adapter->netdev))
igc_power_down_link(adapter);
+ /* Re-enable PTP, where applicable. */
+ igc_ptp_reset(adapter);
+
igc_get_phy_info(hw);
}
/**
- * igc_power_up_link - Power up the phy/serdes link
+ * igc_power_up_link - Power up the phy link
* @adapter: address of board private structure
*/
static void igc_power_up_link(struct igc_adapter *adapter)
@@ -128,16 +124,6 @@ static void igc_power_up_link(struct igc_adapter *adapter)
}
/**
- * igc_power_down_link - Power down the phy/serdes link
- * @adapter: address of board private structure
- */
-static void igc_power_down_link(struct igc_adapter *adapter)
-{
- if (adapter->hw.phy.media_type == igc_media_type_copper)
- igc_power_down_phy_copper_base(&adapter->hw);
-}
-
-/**
* igc_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure
*
@@ -176,43 +162,6 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
}
/**
- * igc_free_tx_resources - Free Tx Resources per Queue
- * @tx_ring: Tx descriptor ring for a specific queue
- *
- * Free all transmit software resources
- */
-void igc_free_tx_resources(struct igc_ring *tx_ring)
-{
- igc_clean_tx_ring(tx_ring);
-
- vfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
-
- /* if not set, then don't free */
- if (!tx_ring->desc)
- return;
-
- dma_free_coherent(tx_ring->dev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
-
- tx_ring->desc = NULL;
-}
-
-/**
- * igc_free_all_tx_resources - Free Tx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all transmit software resources
- */
-static void igc_free_all_tx_resources(struct igc_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- igc_free_tx_resources(adapter->tx_ring[i]);
-}
-
-/**
* igc_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
*/
@@ -274,6 +223,43 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
}
/**
+ * igc_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ */
+void igc_free_tx_resources(struct igc_ring *tx_ring)
+{
+ igc_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igc_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void igc_free_all_tx_resources(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igc_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
* igc_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
*/
@@ -771,6 +757,51 @@ static void igc_setup_tctl(struct igc_adapter *adapter)
}
/**
+ * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
+ * @adapter: address of board private structure
+ * @index: Index of the RAR entry which need to be synced with MAC table
+ */
+static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
+{
+ u8 *addr = adapter->mac_table[index].addr;
+ struct igc_hw *hw = &adapter->hw;
+ u32 rar_low, rar_high;
+
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
+ */
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
+
+ /* Indicate to hardware the Address is Valid. */
+ if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
+ if (is_valid_ether_addr(addr))
+ rar_high |= IGC_RAH_AV;
+
+ rar_high |= IGC_RAH_POOL_1 <<
+ adapter->mac_table[index].queue;
+ }
+
+ wr32(IGC_RAL(index), rar_low);
+ wrfl();
+ wr32(IGC_RAH(index), rar_high);
+ wrfl();
+}
+
+/* Set default MAC address for the PF in the first RAR entry */
+static void igc_set_default_mac_filter(struct igc_adapter *adapter)
+{
+ struct igc_mac_addr *mac_table = &adapter->mac_table[0];
+
+ ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
+ mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, 0);
+}
+
+/**
* igc_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -850,7 +881,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
/* set bits to identify this as an advanced context descriptor */
type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
- /* For 82575, context index must be unique per ring. */
+ /* For i225, context index must be unique per ring. */
if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
mss_l4len_idx |= tx_ring->reg_idx << 4;
@@ -957,6 +988,11 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
return __igc_maybe_stop_tx(tx_ring, size);
}
+#define IGC_SET_FLAG(_input, _flag, _result) \
+ (((_flag) <= (_result)) ? \
+ ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
+ ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
+
static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
@@ -964,6 +1000,14 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS;
+ /* set segmentation bits for TSO */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
+ (IGC_ADVTXD_DCMD_TSE));
+
+ /* set timestamp bit if present */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
+ (IGC_ADVTXD_MAC_TSTAMP));
+
return cmd_type;
}
@@ -1131,6 +1175,100 @@ dma_error:
return -1;
}
+static int igc_tso(struct igc_ring *tx_ring,
+ struct igc_tx_buffer *first,
+ u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+ struct sk_buff *skb = first->skb;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
+ int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_partial(trans_start,
+ csum_start - trans_start,
+ 0));
+ type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
+ first->tx_flags |= IGC_TX_FLAGS_TSO |
+ IGC_TX_FLAGS_CSUM |
+ IGC_TX_FLAGS_IPV4;
+ } else {
+ ip.v6->payload_len = 0;
+ first->tx_flags |= IGC_TX_FLAGS_TSO |
+ IGC_TX_FLAGS_CSUM;
+ }
+
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
+
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
+
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
+
+ igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return 1;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1140,6 +1278,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
u32 tx_flags = 0;
unsigned short f;
u8 hdr_len = 0;
+ int tso = 0;
/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
@@ -1162,15 +1301,45 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+ /* FIXME: add support for retrieving timestamps from
+ * the other timer registers before skipping the
+ * timestamping request.
+ */
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IGC_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ } else {
+ adapter->tx_hwtstamp_skipped++;
+ }
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
- igc_tx_csum(tx_ring, first);
+ tso = igc_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (!tso)
+ igc_tx_csum(tx_ring, first);
igc_tx_map(tx_ring, first, hdr_len);
return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
+ return NETDEV_TX_OK;
}
static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
@@ -1269,6 +1438,10 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
igc_rx_checksum(rx_ring, rx_desc, skb);
+ if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TS) &&
+ !igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))
+ igc_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -1388,6 +1561,12 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
if (unlikely(!skb))
return NULL;
+ if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
+ igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGC_TS_HDR_LEN;
+ size -= IGC_TS_HDR_LEN;
+ }
+
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGC_RX_HDR_LEN)
@@ -1485,7 +1664,6 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
* igc_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1565,9 +1743,56 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring,
rx_buffer->page = NULL;
}
+static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
+}
+
+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ /* alloc new page for storage */
+ page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ igc_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
+
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = igc_rx_offset(rx_ring);
+ bi->pagecnt_bias = 1;
+
+ return true;
+}
+
/**
* igc_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring
+ * @cleaned_count: number of buffers to clean
*/
static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
{
@@ -1725,52 +1950,6 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
return total_packets;
}
-static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
-}
-
-static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
- struct igc_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
-
- /* alloc new page for storage */
- page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- igc_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IGC_RX_DMA_ATTR);
-
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
-
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = igc_rx_offset(rx_ring);
- bi->pagecnt_bias = 1;
-
- return true;
-}
-
/**
* igc_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
@@ -1942,6 +2121,1128 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
return !!budget;
}
+static void igc_nfc_filter_restore(struct igc_adapter *adapter)
+{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_add_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
+
+/* If the filter to be added and an already existing filter express
+ * the same address and address type, it should be possible to only
+ * override the other configurations, for example the queue to steer
+ * traffic.
+ */
+static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
+ const u8 *addr, const u8 flags)
+{
+ if (!(entry->state & IGC_MAC_STATE_IN_USE))
+ return true;
+
+ if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
+ (flags & IGC_MAC_STATE_SRC_ADDR))
+ return false;
+
+ if (!ether_addr_equal(addr, entry->addr))
+ return false;
+
+ return true;
+}
+
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, 0))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if (adapter->mac_table[i].state != 0)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ adapter->mac_table[i].queue = 0;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return min_t(int, ret, 0);
+}
+
+static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return 0;
+}
+
+/**
+ * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void igc_set_rx_mode(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= IGC_RCTL_MPE;
+ } else {
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = igc_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= IGC_RCTL_MPE;
+ }
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
+ rctl |= IGC_RCTL_UPE;
+
+ /* update state of unicast and multicast */
+ rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
+ wr32(IGC_RCTL, rctl);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
+ rlpml = IGC_MAX_FRAME_BUILD_SKB;
+#endif
+ wr32(IGC_RLPML, rlpml);
+}
+
+/**
+ * igc_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ */
+static void igc_configure(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i = 0;
+
+ igc_get_hw_control(adapter);
+ igc_set_rx_mode(netdev);
+
+ igc_setup_tctl(adapter);
+ igc_setup_mrqc(adapter);
+ igc_setup_rctl(adapter);
+
+ igc_nfc_filter_restore(adapter);
+ igc_configure_tx(adapter);
+ igc_configure_rx(adapter);
+
+ igc_rx_fifo_flush_base(&adapter->hw);
+
+ /* call igc_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+ }
+}
+
+/**
+ * igc_write_ivar - configure ivar for given MSI-X vector
+ * @hw: pointer to the HW structure
+ * @msix_vector: vector number we are allocating to a given ring
+ * @index: row index of IVAR register to write within IVAR table
+ * @offset: column offset of in IVAR, should be multiple of 8
+ *
+ * The IVAR table consists of 2 columns,
+ * each containing an cause allocation for an Rx and Tx ring, and a
+ * variable number of rows depending on the number of queues supported.
+ */
+static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
+ int index, int offset)
+{
+ u32 ivar = array_rd32(IGC_IVAR0, index);
+
+ /* clear any bits that are currently set */
+ ivar &= ~((u32)0xFF << offset);
+
+ /* write vector and valid bit */
+ ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
+
+ array_wr32(IGC_IVAR0, index, ivar);
+}
+
+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+ int rx_queue = IGC_N0_QUEUE;
+ int tx_queue = IGC_N0_QUEUE;
+
+ if (q_vector->rx.ring)
+ rx_queue = q_vector->rx.ring->reg_idx;
+ if (q_vector->tx.ring)
+ tx_queue = q_vector->tx.ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case igc_i225:
+ if (rx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ rx_queue >> 1,
+ (rx_queue & 0x1) << 4);
+ if (tx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ tx_queue >> 1,
+ ((tx_queue & 0x1) << 4) + 8);
+ q_vector->eims_value = BIT(msix_vector);
+ break;
+ default:
+ WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
+ break;
+ }
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
+}
+
+/**
+ * igc_configure_msix - Configure MSI-X hardware
+ * @adapter: Pointer to adapter structure
+ *
+ * igc_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ */
+static void igc_configure_msix(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i, vector = 0;
+ u32 tmp;
+
+ adapter->eims_enable_mask = 0;
+
+ /* set vector for other causes, i.e. link changes */
+ switch (hw->mac.type) {
+ case igc_i225:
+ /* Turn on MSI-X capability first, or our settings
+ * won't stick. And it will take days to debug.
+ */
+ wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
+ IGC_GPIE_PBA | IGC_GPIE_EIAME |
+ IGC_GPIE_NSICR);
+
+ /* enable msix_other interrupt */
+ adapter->eims_other = BIT(vector);
+ tmp = (vector++ | IGC_IVAR_VALID) << 8;
+
+ wr32(IGC_IVAR_MISC, tmp);
+ break;
+ default:
+ /* do nothing, since nothing else supports MSI-X */
+ break;
+ } /* switch (hw->mac.type) */
+
+ adapter->eims_enable_mask |= adapter->eims_other;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igc_assign_vector(adapter->q_vector[i], vector++);
+
+ wrfl();
+}
+
+/**
+ * igc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void igc_irq_enable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
+ u32 regval = rd32(IGC_EIAC);
+
+ wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAM);
+ wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
+ wr32(IGC_EIMS, adapter->eims_enable_mask);
+ wr32(IGC_IMS, ims);
+ } else {
+ wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ }
+}
+
+/**
+ * igc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void igc_irq_disable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 regval = rd32(IGC_EIAM);
+
+ wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
+ wr32(IGC_EIMC, adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAC);
+ wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
+ }
+
+ wr32(IGC_IAM, 0);
+ wr32(IGC_IMC, ~0);
+ wrfl();
+
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues)
+{
+ /* Determine if we need to pair queues. */
+ /* If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ else
+ adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
+}
+
+unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
+{
+ unsigned int max_rss_queues;
+
+ /* Determine the maximum number of RSS queues supported. */
+ max_rss_queues = IGC_MAX_RX_QUEUES;
+
+ return max_rss_queues;
+}
+
+static void igc_init_queue_configuration(struct igc_adapter *adapter)
+{
+ u32 max_rss_queues;
+
+ max_rss_queues = igc_get_max_rss_queues(adapter);
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ igc_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+/**
+ * igc_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igc_free_q_vector.
+ */
+static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ /* if we're coming from igc_set_interrupt_capability, the vectors are
+ * not yet allocated
+ */
+ if (!q_vector)
+ return;
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ netif_napi_del(&q_vector->napi);
+}
+
+/**
+ * igc_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.
+ */
+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igc_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ if (q_vector)
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * igc_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ */
+static void igc_free_q_vectors(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--) {
+ igc_reset_q_vector(adapter, v_idx);
+ igc_free_q_vector(adapter, v_idx);
+ }
+}
+
+/**
+ * igc_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
+ */
+static void igc_update_itr(struct igc_q_vector *q_vector,
+ struct igc_ring_container *ring_container)
+{
+ unsigned int packets = ring_container->total_packets;
+ unsigned int bytes = ring_container->total_bytes;
+ u8 itrval = ring_container->itr;
+
+ /* no packets, exit with status unchanged */
+ if (packets == 0)
+ return;
+
+ switch (itrval) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ itrval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 10) || ((bytes / packets) > 1200))
+ itrval = bulk_latency;
+ else if ((packets > 35))
+ itrval = lowest_latency;
+ } else if (bytes / packets > 2000) {
+ itrval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ itrval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ itrval = low_latency;
+ } else if (bytes < 1500) {
+ itrval = low_latency;
+ }
+ break;
+ }
+
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itrval;
+}
+
+static void igc_set_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ u32 new_itr = q_vector->itr_val;
+ u8 current_itr = 0;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ current_itr = 0;
+ new_itr = IGC_4K_ITR;
+ goto set_itr_now;
+ default:
+ break;
+ }
+
+ igc_update_itr(q_vector, &q_vector->tx);
+ igc_update_itr(q_vector, &q_vector->rx);
+
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
+ break;
+ case low_latency:
+ new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
+ break;
+ case bulk_latency:
+ new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != q_vector->itr_val) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > q_vector->itr_val ?
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) : new_itr;
+ /* Don't write the value here; it resets the adapter's
+ * internal timer, and causes us to delay far longer than
+ * we should between interrupts. Instead, we write the ITR
+ * value at the beginning of the next interrupt so the timing
+ * ends up being correct.
+ */
+ q_vector->itr_val = new_itr;
+ q_vector->set_itr = 1;
+ }
+}
+
+static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
+ }
+
+ while (v_idx--)
+ igc_reset_q_vector(adapter, v_idx);
+}
+
+/**
+ * igc_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: Pointer to adapter structure
+ * @msix: boolean value for MSI-X capability
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ */
+static void igc_set_interrupt_capability(struct igc_adapter *adapter,
+ bool msix)
+{
+ int numvecs, i;
+ int err;
+
+ if (!msix)
+ goto msi_only;
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ /* Number of supported queues. */
+ adapter->num_rx_queues = adapter->rss_queues;
+
+ adapter->num_tx_queues = adapter->rss_queues;
+
+ /* start with one vector for every Rx queue */
+ numvecs = adapter->num_rx_queues;
+
+ /* if Tx handler is separate add 1 for every Tx queue */
+ if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
+ numvecs += adapter->num_tx_queues;
+
+ /* store the number of vectors reserved for queues */
+ adapter->num_q_vectors = numvecs;
+
+ /* add 1 vector for link status interrupts */
+ numvecs++;
+
+ adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (!adapter->msix_entries)
+ return;
+
+ /* populate entry values */
+ for (i = 0; i < numvecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
+ return;
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ igc_reset_interrupt_capability(adapter);
+
+msi_only:
+ adapter->flags &= ~IGC_FLAG_HAS_MSIX;
+
+ adapter->rss_queues = 1;
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_q_vectors = 1;
+ if (!pci_enable_msi(adapter->pdev))
+ adapter->flags |= IGC_FLAG_HAS_MSI;
+}
+
+/**
+ * igc_update_ring_itr - update the dynamic ITR value based on packet size
+ * @q_vector: pointer to q_vector
+ *
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igc_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
+ */
+static void igc_update_ring_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ int new_val = q_vector->itr_val;
+ int avg_wire_size = 0;
+ unsigned int packets;
+
+ /* For non-gigabit speeds, just fix the interrupt rate at 4000
+ * ints/sec - ITR timer value of 120 ticks.
+ */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ new_val = IGC_4K_ITR;
+ goto set_itr_val;
+ default:
+ break;
+ }
+
+ packets = q_vector->rx.total_packets;
+ if (packets)
+ avg_wire_size = q_vector->rx.total_bytes / packets;
+
+ packets = q_vector->tx.total_packets;
+ if (packets)
+ avg_wire_size = max_t(u32, avg_wire_size,
+ q_vector->tx.total_bytes / packets);
+
+ /* if avg_wire_size isn't set no work was done */
+ if (!avg_wire_size)
+ goto clear_counts;
+
+ /* Add 24 bytes to size to account for CRC, preamble, and gap */
+ avg_wire_size += 24;
+
+ /* Don't starve jumbo frames */
+ avg_wire_size = min(avg_wire_size, 3000);
+
+ /* Give a little boost to mid-size frames */
+ if (avg_wire_size > 300 && avg_wire_size < 1200)
+ new_val = avg_wire_size / 3;
+ else
+ new_val = avg_wire_size / 2;
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (new_val < IGC_20K_ITR &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ new_val = IGC_20K_ITR;
+
+set_itr_val:
+ if (new_val != q_vector->itr_val) {
+ q_vector->itr_val = new_val;
+ q_vector->set_itr = 1;
+ }
+clear_counts:
+ q_vector->rx.total_bytes = 0;
+ q_vector->rx.total_packets = 0;
+ q_vector->tx.total_bytes = 0;
+ q_vector->tx.total_packets = 0;
+}
+
+static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+
+ if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+ (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+ if (adapter->num_q_vectors == 1)
+ igc_set_itr(q_vector);
+ else
+ igc_update_ring_itr(q_vector);
+ }
+
+ if (!test_bit(__IGC_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ wr32(IGC_EIMS, q_vector->eims_value);
+ else
+ igc_irq_enable(adapter);
+ }
+}
+
+static void igc_add_ring(struct igc_ring *ring,
+ struct igc_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * igc_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ */
+static void igc_cache_ring_register(struct igc_adapter *adapter)
+{
+ int i = 0, j = 0;
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ /* Fall through */
+ default:
+ for (; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (; j < adapter->num_tx_queues; j++)
+ adapter->tx_ring[j]->reg_idx = j;
+ break;
+ }
+}
+
+/**
+ * igc_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
+ */
+static int igc_poll(struct napi_struct *napi, int budget)
+{
+ struct igc_q_vector *q_vector = container_of(napi,
+ struct igc_q_vector,
+ napi);
+ bool clean_complete = true;
+ int work_done = 0;
+
+ if (q_vector->tx.ring)
+ clean_complete = igc_clean_tx_irq(q_vector, budget);
+
+ if (q_vector->rx.ring) {
+ int cleaned = igc_clean_rx_irq(q_vector, budget);
+
+ work_done += cleaned;
+ if (cleaned >= budget)
+ clean_complete = false;
+ }
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igc_ring_irq_enable(q_vector);
+
+ return min(work_done, budget - 1);
+}
+
+/**
+ * igc_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int igc_alloc_q_vector(struct igc_adapter *adapter,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct igc_q_vector *q_vector;
+ struct igc_ring *ring;
+ int ring_count;
+
+ /* igc only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+
+ /* allocate q_vector and rings */
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL);
+ else
+ memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igc_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
+ q_vector->itr_val = IGC_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ /* initialize ITR */
+ if (rxr_count) {
+ /* rx or rx/tx vector */
+ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ } else {
+ /* tx only vector */
+ if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+ q_vector->itr_val = adapter->tx_itr_setting;
+ }
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igc_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ igc_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
+}
+
+/**
+ * igc_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+static int igc_alloc_q_vectors(struct igc_adapter *adapter)
+{
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int err;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
+ }
+ }
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igc_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ * @adapter: Pointer to adapter structure
+ * @msix: boolean for MSI-X capability
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ */
+static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ igc_set_interrupt_capability(adapter, msix);
+
+ err = igc_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ igc_cache_ring_register(adapter);
+
+ return 0;
+
+err_alloc_q_vectors:
+ igc_reset_interrupt_capability(adapter);
+ return err;
+}
+
+/**
+ * igc_sw_init - Initialize general software structures (struct igc_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igc_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int igc_sw_init(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGC_DEFAULT_TXD;
+ adapter->rx_ring_count = IGC_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGC_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGC_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->nfc_lock);
+ spin_lock_init(&adapter->stats64_lock);
+ /* Assume MSI-X interrupts, will be checked during IRQ allocation */
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ adapter->mac_table = kzalloc(size, GFP_ATOMIC);
+ if (!adapter->mac_table)
+ return -ENOMEM;
+
+ igc_init_queue_configuration(adapter);
+
+ /* This call may decrease the number of queues */
+ if (igc_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igc_irq_disable(adapter);
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ return 0;
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2163,18 +3464,6 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter)
spin_unlock(&adapter->nfc_lock);
}
-static void igc_nfc_filter_restore(struct igc_adapter *adapter)
-{
- struct igc_nfc_filter *rule;
-
- spin_lock(&adapter->nfc_lock);
-
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
- igc_add_filter(adapter, rule);
-
- spin_unlock(&adapter->nfc_lock);
-}
-
/**
* igc_down - Close the interface
* @adapter: board private structure
@@ -2398,105 +3687,6 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
-/**
- * igc_configure - configure the hardware for RX and TX
- * @adapter: private board structure
- */
-static void igc_configure(struct igc_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int i = 0;
-
- igc_get_hw_control(adapter);
- igc_set_rx_mode(netdev);
-
- igc_setup_tctl(adapter);
- igc_setup_mrqc(adapter);
- igc_setup_rctl(adapter);
-
- igc_nfc_filter_restore(adapter);
- igc_configure_tx(adapter);
- igc_configure_rx(adapter);
-
- igc_rx_fifo_flush_base(&adapter->hw);
-
- /* call igc_desc_unused which always leaves
- * at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean
- */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igc_ring *ring = adapter->rx_ring[i];
-
- igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
- }
-}
-
-/**
- * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
- * @adapter: address of board private structure
- * @index: Index of the RAR entry which need to be synced with MAC table
- */
-static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
-{
- u8 *addr = adapter->mac_table[index].addr;
- struct igc_hw *hw = &adapter->hw;
- u32 rar_low, rar_high;
-
- /* HW expects these to be in network order when they are plugged
- * into the registers which are little endian. In order to guarantee
- * that ordering we need to do an leXX_to_cpup here in order to be
- * ready for the byteswap that occurs with writel
- */
- rar_low = le32_to_cpup((__le32 *)(addr));
- rar_high = le16_to_cpup((__le16 *)(addr + 4));
-
- /* Indicate to hardware the Address is Valid. */
- if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
- if (is_valid_ether_addr(addr))
- rar_high |= IGC_RAH_AV;
-
- rar_high |= IGC_RAH_POOL_1 <<
- adapter->mac_table[index].queue;
- }
-
- wr32(IGC_RAL(index), rar_low);
- wrfl();
- wr32(IGC_RAH(index), rar_high);
- wrfl();
-}
-
-/* Set default MAC address for the PF in the first RAR entry */
-static void igc_set_default_mac_filter(struct igc_adapter *adapter)
-{
- struct igc_mac_addr *mac_table = &adapter->mac_table[0];
-
- ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
- mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
-
- igc_rar_set_index(adapter, 0);
-}
-
-/* If the filter to be added and an already existing filter express
- * the same address and address type, it should be possible to only
- * override the other configurations, for example the queue to steer
- * traffic.
- */
-static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
- const u8 *addr, const u8 flags)
-{
- if (!(entry->state & IGC_MAC_STATE_IN_USE))
- return true;
-
- if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
- (flags & IGC_MAC_STATE_SRC_ADDR))
- return false;
-
- if (!ether_addr_equal(addr, entry->addr))
- return false;
-
- return true;
-}
-
/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
* 'flags' is used to indicate what kind of match is made, match is by
* default for the destination address, if matching by source address
@@ -2597,159 +3787,20 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
IGC_MAC_STATE_QUEUE_STEERING | flags);
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_add_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, 0))
- continue;
-
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
-
- igc_rar_set_index(adapter, i);
- return i;
- }
-
- return -ENOSPC;
-}
-
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_del_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if (adapter->mac_table[i].state != 0)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
-
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
- */
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- adapter->mac_table[i].queue = 0;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
+ u32 tsicr = rd32(IGC_TSICR);
+ u32 ack = 0;
- igc_rar_set_index(adapter, i);
- return 0;
+ if (tsicr & IGC_TSICR_TXTS) {
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ ack |= IGC_TSICR_TXTS;
}
- return -ENOENT;
-}
-
-static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
- int ret;
-
- ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
-
- return min_t(int, ret, 0);
-}
-
-static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
-
- return 0;
-}
-
-/**
- * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_rx_mode entry point is called whenever the unicast or multicast
- * address lists or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper unicast, multicast,
- * promiscuous mode, and all-multi behavior.
- */
-static void igc_set_rx_mode(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
- struct igc_hw *hw = &adapter->hw;
- u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
- int count;
-
- /* Check for Promiscuous and All Multicast modes */
- if (netdev->flags & IFF_PROMISC) {
- rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
- } else {
- if (netdev->flags & IFF_ALLMULTI) {
- rctl |= IGC_RCTL_MPE;
- } else {
- /* Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- count = igc_write_mc_addr_list(netdev);
- if (count < 0)
- rctl |= IGC_RCTL_MPE;
- }
- }
-
- /* Write addresses to available RAR registers, if there is not
- * sufficient space to store all the addresses then enable
- * unicast promiscuous mode
- */
- if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
- rctl |= IGC_RCTL_UPE;
-
- /* update state of unicast and multicast */
- rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
- wr32(IGC_RCTL, rctl);
-
-#if (PAGE_SIZE < 8192)
- if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
- rlpml = IGC_MAX_FRAME_BUILD_SKB;
-#endif
- wr32(IGC_RLPML, rlpml);
+ /* acknowledge the interrupts */
+ wr32(IGC_TSICR, ack);
}
/**
@@ -2779,114 +3830,28 @@ static irqreturn_t igc_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
wr32(IGC_EIMS, adapter->eims_other);
return IRQ_HANDLED;
}
-/**
- * igc_write_ivar - configure ivar for given MSI-X vector
- * @hw: pointer to the HW structure
- * @msix_vector: vector number we are allocating to a given ring
- * @index: row index of IVAR register to write within IVAR table
- * @offset: column offset of in IVAR, should be multiple of 8
- *
- * The IVAR table consists of 2 columns,
- * each containing an cause allocation for an Rx and Tx ring, and a
- * variable number of rows depending on the number of queues supported.
- */
-static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
- int index, int offset)
-{
- u32 ivar = array_rd32(IGC_IVAR0, index);
-
- /* clear any bits that are currently set */
- ivar &= ~((u32)0xFF << offset);
-
- /* write vector and valid bit */
- ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
-
- array_wr32(IGC_IVAR0, index, ivar);
-}
-
-static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- struct igc_hw *hw = &adapter->hw;
- int rx_queue = IGC_N0_QUEUE;
- int tx_queue = IGC_N0_QUEUE;
-
- if (q_vector->rx.ring)
- rx_queue = q_vector->rx.ring->reg_idx;
- if (q_vector->tx.ring)
- tx_queue = q_vector->tx.ring->reg_idx;
-
- switch (hw->mac.type) {
- case igc_i225:
- if (rx_queue > IGC_N0_QUEUE)
- igc_write_ivar(hw, msix_vector,
- rx_queue >> 1,
- (rx_queue & 0x1) << 4);
- if (tx_queue > IGC_N0_QUEUE)
- igc_write_ivar(hw, msix_vector,
- tx_queue >> 1,
- ((tx_queue & 0x1) << 4) + 8);
- q_vector->eims_value = BIT(msix_vector);
- break;
- default:
- WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
- break;
- }
-
- /* add q_vector eims value to global eims_enable_mask */
- adapter->eims_enable_mask |= q_vector->eims_value;
-
- /* configure q_vector to set itr on first interrupt */
- q_vector->set_itr = 1;
-}
-
-/**
- * igc_configure_msix - Configure MSI-X hardware
- * @adapter: Pointer to adapter structure
- *
- * igc_configure_msix sets up the hardware to properly
- * generate MSI-X interrupts.
- */
-static void igc_configure_msix(struct igc_adapter *adapter)
+static void igc_write_itr(struct igc_q_vector *q_vector)
{
- struct igc_hw *hw = &adapter->hw;
- int i, vector = 0;
- u32 tmp;
-
- adapter->eims_enable_mask = 0;
-
- /* set vector for other causes, i.e. link changes */
- switch (hw->mac.type) {
- case igc_i225:
- /* Turn on MSI-X capability first, or our settings
- * won't stick. And it will take days to debug.
- */
- wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
- IGC_GPIE_PBA | IGC_GPIE_EIAME |
- IGC_GPIE_NSICR);
-
- /* enable msix_other interrupt */
- adapter->eims_other = BIT(vector);
- tmp = (vector++ | IGC_IVAR_VALID) << 8;
+ u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
- wr32(IGC_IVAR_MISC, tmp);
- break;
- default:
- /* do nothing, since nothing else supports MSI-X */
- break;
- } /* switch (hw->mac.type) */
+ if (!q_vector->set_itr)
+ return;
- adapter->eims_enable_mask |= adapter->eims_other;
+ if (!itr_val)
+ itr_val = IGC_ITR_VAL_MASK;
- for (i = 0; i < adapter->num_q_vectors; i++)
- igc_assign_vector(adapter->q_vector[i], vector++);
+ itr_val |= IGC_EITR_CNT_IGNR;
- wrfl();
+ writel(itr_val, q_vector->itr_register);
+ q_vector->set_itr = 0;
}
static irqreturn_t igc_msix_ring(int irq, void *data)
@@ -2961,49 +3926,6 @@ err_out:
}
/**
- * igc_reset_q_vector - Reset config for interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be reset
- *
- * If NAPI is enabled it will delete any references to the
- * NAPI struct. This is preparation for igc_free_q_vector.
- */
-static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
-{
- struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
-
- /* if we're coming from igc_set_interrupt_capability, the vectors are
- * not yet allocated
- */
- if (!q_vector)
- return;
-
- if (q_vector->tx.ring)
- adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
-
- if (q_vector->rx.ring)
- adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
-
- netif_napi_del(&q_vector->napi);
-}
-
-static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
-{
- int v_idx = adapter->num_q_vectors;
-
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-
- while (v_idx--)
- igc_reset_q_vector(adapter, v_idx);
-}
-
-/**
* igc_clear_interrupt_scheme - reset the device to a state of no interrupts
* @adapter: Pointer to adapter structure
*
@@ -3016,48 +3938,6 @@ static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
igc_reset_interrupt_capability(adapter);
}
-/**
- * igc_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * This function frees the memory allocated to the q_vectors. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- */
-static void igc_free_q_vectors(struct igc_adapter *adapter)
-{
- int v_idx = adapter->num_q_vectors;
-
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
- adapter->num_q_vectors = 0;
-
- while (v_idx--) {
- igc_reset_q_vector(adapter, v_idx);
- igc_free_q_vector(adapter, v_idx);
- }
-}
-
-/**
- * igc_free_q_vector - Free memory allocated for specific interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be freed
- *
- * This function frees the memory allocated to the q_vector.
- */
-static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
-{
- struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
-
- adapter->q_vector[v_idx] = NULL;
-
- /* igc_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- if (q_vector)
- kfree_rcu(q_vector, rcu);
-}
-
/* Need to wait a few seconds after link up to get diagnostic information from
* the phy
*/
@@ -3109,7 +3989,7 @@ bool igc_has_link(struct igc_adapter *adapter)
/**
* igc_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: timer for the watchdog
*/
static void igc_watchdog(struct timer_list *t)
{
@@ -3282,6 +4162,8 @@ no_wait:
wr32(IGC_ICS, IGC_ICS_RXDMT0);
}
+ igc_ptp_tx_hang(adapter);
+
/* Reset the timer */
if (!test_bit(__IGC_DOWN, &adapter->state)) {
if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
@@ -3294,149 +4176,6 @@ no_wait:
}
/**
- * igc_update_ring_itr - update the dynamic ITR value based on packet size
- * @q_vector: pointer to q_vector
- *
- * Stores a new ITR value based on strictly on packet size. This
- * algorithm is less sophisticated than that used in igc_update_itr,
- * due to the difficulty of synchronizing statistics across multiple
- * receive rings. The divisors and thresholds used by this function
- * were determined based on theoretical maximum wire speed and testing
- * data, in order to minimize response time while increasing bulk
- * throughput.
- * NOTE: This function is called only when operating in a multiqueue
- * receive environment.
- */
-static void igc_update_ring_itr(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- int new_val = q_vector->itr_val;
- int avg_wire_size = 0;
- unsigned int packets;
-
- /* For non-gigabit speeds, just fix the interrupt rate at 4000
- * ints/sec - ITR timer value of 120 ticks.
- */
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- new_val = IGC_4K_ITR;
- goto set_itr_val;
- default:
- break;
- }
-
- packets = q_vector->rx.total_packets;
- if (packets)
- avg_wire_size = q_vector->rx.total_bytes / packets;
-
- packets = q_vector->tx.total_packets;
- if (packets)
- avg_wire_size = max_t(u32, avg_wire_size,
- q_vector->tx.total_bytes / packets);
-
- /* if avg_wire_size isn't set no work was done */
- if (!avg_wire_size)
- goto clear_counts;
-
- /* Add 24 bytes to size to account for CRC, preamble, and gap */
- avg_wire_size += 24;
-
- /* Don't starve jumbo frames */
- avg_wire_size = min(avg_wire_size, 3000);
-
- /* Give a little boost to mid-size frames */
- if (avg_wire_size > 300 && avg_wire_size < 1200)
- new_val = avg_wire_size / 3;
- else
- new_val = avg_wire_size / 2;
-
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (new_val < IGC_20K_ITR &&
- ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
- (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
- new_val = IGC_20K_ITR;
-
-set_itr_val:
- if (new_val != q_vector->itr_val) {
- q_vector->itr_val = new_val;
- q_vector->set_itr = 1;
- }
-clear_counts:
- q_vector->rx.total_bytes = 0;
- q_vector->rx.total_packets = 0;
- q_vector->tx.total_bytes = 0;
- q_vector->tx.total_packets = 0;
-}
-
-/**
- * igc_update_itr - update the dynamic ITR value based on statistics
- * @q_vector: pointer to q_vector
- * @ring_container: ring info to update the itr for
- *
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
- * NOTE: These calculations are only valid when operating in a single-
- * queue environment.
- */
-static void igc_update_itr(struct igc_q_vector *q_vector,
- struct igc_ring_container *ring_container)
-{
- unsigned int packets = ring_container->total_packets;
- unsigned int bytes = ring_container->total_bytes;
- u8 itrval = ring_container->itr;
-
- /* no packets, exit with status unchanged */
- if (packets == 0)
- return;
-
- switch (itrval) {
- case lowest_latency:
- /* handle TSO and jumbo frames */
- if (bytes / packets > 8000)
- itrval = bulk_latency;
- else if ((packets < 5) && (bytes > 512))
- itrval = low_latency;
- break;
- case low_latency: /* 50 usec aka 20000 ints/s */
- if (bytes > 10000) {
- /* this if handles the TSO accounting */
- if (bytes / packets > 8000)
- itrval = bulk_latency;
- else if ((packets < 10) || ((bytes / packets) > 1200))
- itrval = bulk_latency;
- else if ((packets > 35))
- itrval = lowest_latency;
- } else if (bytes / packets > 2000) {
- itrval = bulk_latency;
- } else if (packets <= 2 && bytes < 512) {
- itrval = lowest_latency;
- }
- break;
- case bulk_latency: /* 250 usec aka 4000 ints/s */
- if (bytes > 25000) {
- if (packets > 35)
- itrval = low_latency;
- } else if (bytes < 1500) {
- itrval = low_latency;
- }
- break;
- }
-
- /* clear work counters since we have the values we need */
- ring_container->total_bytes = 0;
- ring_container->total_packets = 0;
-
- /* write updated itr to ring container */
- ring_container->itr = itrval;
-}
-
-/**
* igc_intr_msi - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
@@ -3513,424 +4252,6 @@ static irqreturn_t igc_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static void igc_set_itr(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- u32 new_itr = q_vector->itr_val;
- u8 current_itr = 0;
-
- /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- current_itr = 0;
- new_itr = IGC_4K_ITR;
- goto set_itr_now;
- default:
- break;
- }
-
- igc_update_itr(q_vector, &q_vector->tx);
- igc_update_itr(q_vector, &q_vector->rx);
-
- current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
-
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (current_itr == lowest_latency &&
- ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
- (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
- current_itr = low_latency;
-
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
- break;
- case low_latency:
- new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
- break;
- case bulk_latency:
- new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
- break;
- default:
- break;
- }
-
-set_itr_now:
- if (new_itr != q_vector->itr_val) {
- /* this attempts to bias the interrupt rate towards Bulk
- * by adding intermediate steps when interrupt rate is
- * increasing
- */
- new_itr = new_itr > q_vector->itr_val ?
- max((new_itr * q_vector->itr_val) /
- (new_itr + (q_vector->itr_val >> 2)),
- new_itr) : new_itr;
- /* Don't write the value here; it resets the adapter's
- * internal timer, and causes us to delay far longer than
- * we should between interrupts. Instead, we write the ITR
- * value at the beginning of the next interrupt so the timing
- * ends up being correct.
- */
- q_vector->itr_val = new_itr;
- q_vector->set_itr = 1;
- }
-}
-
-static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- struct igc_hw *hw = &adapter->hw;
-
- if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
- (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
- if (adapter->num_q_vectors == 1)
- igc_set_itr(q_vector);
- else
- igc_update_ring_itr(q_vector);
- }
-
- if (!test_bit(__IGC_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
- wr32(IGC_EIMS, q_vector->eims_value);
- else
- igc_irq_enable(adapter);
- }
-}
-
-/**
- * igc_poll - NAPI Rx polling callback
- * @napi: napi polling structure
- * @budget: count of how many packets we should handle
- */
-static int igc_poll(struct napi_struct *napi, int budget)
-{
- struct igc_q_vector *q_vector = container_of(napi,
- struct igc_q_vector,
- napi);
- bool clean_complete = true;
- int work_done = 0;
-
- if (q_vector->tx.ring)
- clean_complete = igc_clean_tx_irq(q_vector, budget);
-
- if (q_vector->rx.ring) {
- int cleaned = igc_clean_rx_irq(q_vector, budget);
-
- work_done += cleaned;
- if (cleaned >= budget)
- clean_complete = false;
- }
-
- /* If all work not completed, return budget and keep polling */
- if (!clean_complete)
- return budget;
-
- /* Exit the polling mode, but don't re-enable interrupts if stack might
- * poll us due to busy-polling
- */
- if (likely(napi_complete_done(napi, work_done)))
- igc_ring_irq_enable(q_vector);
-
- return min(work_done, budget - 1);
-}
-
-/**
- * igc_set_interrupt_capability - set MSI or MSI-X if supported
- * @adapter: Pointer to adapter structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- */
-static void igc_set_interrupt_capability(struct igc_adapter *adapter,
- bool msix)
-{
- int numvecs, i;
- int err;
-
- if (!msix)
- goto msi_only;
- adapter->flags |= IGC_FLAG_HAS_MSIX;
-
- /* Number of supported queues. */
- adapter->num_rx_queues = adapter->rss_queues;
-
- adapter->num_tx_queues = adapter->rss_queues;
-
- /* start with one vector for every Rx queue */
- numvecs = adapter->num_rx_queues;
-
- /* if Tx handler is separate add 1 for every Tx queue */
- if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
- numvecs += adapter->num_tx_queues;
-
- /* store the number of vectors reserved for queues */
- adapter->num_q_vectors = numvecs;
-
- /* add 1 vector for link status interrupts */
- numvecs++;
-
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
-
- if (!adapter->msix_entries)
- return;
-
- /* populate entry values */
- for (i = 0; i < numvecs; i++)
- adapter->msix_entries[i].entry = i;
-
- err = pci_enable_msix_range(adapter->pdev,
- adapter->msix_entries,
- numvecs,
- numvecs);
- if (err > 0)
- return;
-
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
-
- igc_reset_interrupt_capability(adapter);
-
-msi_only:
- adapter->flags &= ~IGC_FLAG_HAS_MSIX;
-
- adapter->rss_queues = 1;
- adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
- adapter->num_q_vectors = 1;
- if (!pci_enable_msi(adapter->pdev))
- adapter->flags |= IGC_FLAG_HAS_MSI;
-}
-
-static void igc_add_ring(struct igc_ring *ring,
- struct igc_ring_container *head)
-{
- head->ring = ring;
- head->count++;
-}
-
-/**
- * igc_alloc_q_vector - Allocate memory for a single interrupt vector
- * @adapter: board private structure to initialize
- * @v_count: q_vectors allocated on adapter, used for ring interleaving
- * @v_idx: index of vector in adapter struct
- * @txr_count: total number of Tx rings to allocate
- * @txr_idx: index of first Tx ring to allocate
- * @rxr_count: total number of Rx rings to allocate
- * @rxr_idx: index of first Rx ring to allocate
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int igc_alloc_q_vector(struct igc_adapter *adapter,
- unsigned int v_count, unsigned int v_idx,
- unsigned int txr_count, unsigned int txr_idx,
- unsigned int rxr_count, unsigned int rxr_idx)
-{
- struct igc_q_vector *q_vector;
- struct igc_ring *ring;
- int ring_count;
-
- /* igc only supports 1 Tx and/or 1 Rx queue per vector */
- if (txr_count > 1 || rxr_count > 1)
- return -ENOMEM;
-
- ring_count = txr_count + rxr_count;
-
- /* allocate q_vector and rings */
- q_vector = adapter->q_vector[v_idx];
- if (!q_vector)
- q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
- GFP_KERNEL);
- else
- memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
- if (!q_vector)
- return -ENOMEM;
-
- /* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
-
- /* tie q_vector and adapter together */
- adapter->q_vector[v_idx] = q_vector;
- q_vector->adapter = adapter;
-
- /* initialize work limits */
- q_vector->tx.work_limit = adapter->tx_work_limit;
-
- /* initialize ITR configuration */
- q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
- q_vector->itr_val = IGC_START_ITR;
-
- /* initialize pointer to rings */
- ring = q_vector->ring;
-
- /* initialize ITR */
- if (rxr_count) {
- /* rx or rx/tx vector */
- if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
- q_vector->itr_val = adapter->rx_itr_setting;
- } else {
- /* tx only vector */
- if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
- q_vector->itr_val = adapter->tx_itr_setting;
- }
-
- if (txr_count) {
- /* assign generic ring traits */
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /* configure backlink on ring */
- ring->q_vector = q_vector;
-
- /* update q_vector Tx values */
- igc_add_ring(ring, &q_vector->tx);
-
- /* apply Tx specific ring traits */
- ring->count = adapter->tx_ring_count;
- ring->queue_index = txr_idx;
-
- /* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
-
- /* push pointer to next ring */
- ring++;
- }
-
- if (rxr_count) {
- /* assign generic ring traits */
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /* configure backlink on ring */
- ring->q_vector = q_vector;
-
- /* update q_vector Rx values */
- igc_add_ring(ring, &q_vector->rx);
-
- /* apply Rx specific ring traits */
- ring->count = adapter->rx_ring_count;
- ring->queue_index = rxr_idx;
-
- /* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
- }
-
- return 0;
-}
-
-/**
- * igc_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int igc_alloc_q_vectors(struct igc_adapter *adapter)
-{
- int rxr_remaining = adapter->num_rx_queues;
- int txr_remaining = adapter->num_tx_queues;
- int rxr_idx = 0, txr_idx = 0, v_idx = 0;
- int q_vectors = adapter->num_q_vectors;
- int err;
-
- if (q_vectors >= (rxr_remaining + txr_remaining)) {
- for (; rxr_remaining; v_idx++) {
- err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
- 0, 0, 1, rxr_idx);
-
- if (err)
- goto err_out;
-
- /* update counts and index */
- rxr_remaining--;
- rxr_idx++;
- }
- }
-
- for (; v_idx < q_vectors; v_idx++) {
- int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
- int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
-
- err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
- tqpv, txr_idx, rqpv, rxr_idx);
-
- if (err)
- goto err_out;
-
- /* update counts and index */
- rxr_remaining -= rqpv;
- txr_remaining -= tqpv;
- rxr_idx++;
- txr_idx++;
- }
-
- return 0;
-
-err_out:
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
- adapter->num_q_vectors = 0;
-
- while (v_idx--)
- igc_free_q_vector(adapter, v_idx);
-
- return -ENOMEM;
-}
-
-/**
- * igc_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
- *
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
- */
-static void igc_cache_ring_register(struct igc_adapter *adapter)
-{
- int i = 0, j = 0;
-
- switch (adapter->hw.mac.type) {
- case igc_i225:
- /* Fall through */
- default:
- for (; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (; j < adapter->num_tx_queues; j++)
- adapter->tx_ring[j]->reg_idx = j;
- break;
- }
-}
-
-/**
- * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
- * @adapter: Pointer to adapter structure
- *
- * This function initializes the interrupts and allocates all of the queues.
- */
-static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
-{
- struct pci_dev *pdev = adapter->pdev;
- int err = 0;
-
- igc_set_interrupt_capability(adapter, msix);
-
- err = igc_alloc_q_vectors(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
- goto err_alloc_q_vectors;
- }
-
- igc_cache_ring_register(adapter);
-
- return 0;
-
-err_alloc_q_vectors:
- igc_reset_interrupt_capability(adapter);
- return err;
-}
-
static void igc_free_irq(struct igc_adapter *adapter)
{
if (adapter->msix_entries) {
@@ -3947,62 +4268,6 @@ static void igc_free_irq(struct igc_adapter *adapter)
}
/**
- * igc_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- */
-static void igc_irq_disable(struct igc_adapter *adapter)
-{
- struct igc_hw *hw = &adapter->hw;
-
- if (adapter->msix_entries) {
- u32 regval = rd32(IGC_EIAM);
-
- wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
- wr32(IGC_EIMC, adapter->eims_enable_mask);
- regval = rd32(IGC_EIAC);
- wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
- }
-
- wr32(IGC_IAM, 0);
- wr32(IGC_IMC, ~0);
- wrfl();
-
- if (adapter->msix_entries) {
- int vector = 0, i;
-
- synchronize_irq(adapter->msix_entries[vector++].vector);
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- synchronize_irq(adapter->msix_entries[vector++].vector);
- } else {
- synchronize_irq(adapter->pdev->irq);
- }
-}
-
-/**
- * igc_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- */
-static void igc_irq_enable(struct igc_adapter *adapter)
-{
- struct igc_hw *hw = &adapter->hw;
-
- if (adapter->msix_entries) {
- u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
- u32 regval = rd32(IGC_EIAC);
-
- wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
- regval = rd32(IGC_EIAM);
- wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
- wr32(IGC_EIMS, adapter->eims_enable_mask);
- wr32(IGC_IMS, ims);
- } else {
- wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
- wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
- }
-}
-
-/**
* igc_request_irq - initialize interrupts
* @adapter: Pointer to adapter structure
*
@@ -4056,25 +4321,10 @@ request_done:
return err;
}
-static void igc_write_itr(struct igc_q_vector *q_vector)
-{
- u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
-
- if (!q_vector->set_itr)
- return;
-
- if (!itr_val)
- itr_val = IGC_ITR_VAL_MASK;
-
- itr_val |= IGC_EITR_CNT_IGNR;
-
- writel(itr_val, q_vector->itr_register);
- q_vector->set_itr = 0;
-}
-
/**
- * igc_open - Called when a network interface is made active
+ * __igc_open - Called when a network interface is made active
* @netdev: network interface device structure
+ * @resuming: boolean indicating if the device is resuming
*
* Returns 0 on success, negative value on failure
*
@@ -4164,8 +4414,9 @@ static int igc_open(struct net_device *netdev)
}
/**
- * igc_close - Disables a network interface
+ * __igc_close - Disables a network interface
* @netdev: network interface device structure
+ * @suspending: boolean indicating the device is suspending
*
* Returns 0, this is not allowed to fail
*
@@ -4199,6 +4450,24 @@ static int igc_close(struct net_device *netdev)
return 0;
}
+/**
+ * igc_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ * @cmd: ioctl command
+ **/
+static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return igc_ptp_get_ts_config(netdev, ifr);
+ case SIOCSHWTSTAMP:
+ return igc_ptp_set_ts_config(netdev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -4210,6 +4479,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
+ .ndo_do_ioctl = igc_ioctl,
};
/* PCIe configuration access */
@@ -4345,32 +4615,26 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err;
+ int err, pci_using_dac;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(64));
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "igc: Wrong DMA config\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
- err = pci_request_selected_regions(pdev,
- pci_select_bars(pdev,
- IORESOURCE_MEM),
- igc_driver_name);
+ err = pci_request_mem_regions(pdev, igc_driver_name);
if (err)
goto err_pci_reg;
@@ -4433,6 +4697,9 @@ static int igc_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Add supported features to the features list*/
+ netdev->features |= NETIF_F_SG;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
@@ -4446,6 +4713,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= netdev->features;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -4512,6 +4782,9 @@ static int igc_probe(struct pci_dev *pdev,
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ /* do hw tstamp init after resetting */
+ igc_ptp_init(adapter);
+
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
@@ -4532,8 +4805,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -4554,6 +4826,8 @@ static void igc_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ igc_ptp_stop(adapter);
+
set_bit(__IGC_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
@@ -4580,105 +4854,216 @@ static void igc_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_driver igc_driver = {
- .name = igc_driver_name,
- .id_table = igc_pci_tbl,
- .probe = igc_probe,
- .remove = igc_remove,
-};
-
-void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
- const u32 max_rss_queues)
+static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
- /* Determine if we need to pair queues. */
- /* If rss_queues > half of max_rss_queues, pair the queues in
- * order to conserve interrupts due to limited supply.
- */
- if (adapter->rss_queues > (max_rss_queues / 2))
- adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl, rctl, status;
+ bool wake;
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __igc_close(netdev, true);
+
+ igc_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
+
+ status = rd32(IGC_STATUS);
+ if (status & IGC_STATUS_LU)
+ wufc &= ~IGC_WUFC_LNKC;
+
+ if (wufc) {
+ igc_setup_rctl(adapter);
+ igc_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & IGC_WUFC_MC) {
+ rctl = rd32(IGC_RCTL);
+ rctl |= IGC_RCTL_MPE;
+ wr32(IGC_RCTL, rctl);
+ }
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl |= IGC_CTRL_ADVD3WUC;
+ wr32(IGC_CTRL, ctrl);
+
+ /* Allow time for pending master requests to run */
+ igc_disable_pcie_master(hw);
+
+ wr32(IGC_WUC, IGC_WUC_PME_EN);
+ wr32(IGC_WUFC, wufc);
+ } else {
+ wr32(IGC_WUC, 0);
+ wr32(IGC_WUFC, 0);
+ }
+
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
+ igc_power_down_link(adapter);
else
- adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
-}
+ igc_power_up_link(adapter);
-unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
-{
- unsigned int max_rss_queues;
+ if (enable_wake)
+ *enable_wake = wake;
- /* Determine the maximum number of RSS queues supported. */
- max_rss_queues = IGC_MAX_RX_QUEUES;
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ igc_release_hw_control(adapter);
- return max_rss_queues;
+ pci_disable_device(pdev);
+
+ return 0;
}
-static void igc_init_queue_configuration(struct igc_adapter *adapter)
+#ifdef CONFIG_PM
+static int __maybe_unused igc_runtime_suspend(struct device *dev)
{
- u32 max_rss_queues;
-
- max_rss_queues = igc_get_max_rss_queues(adapter);
- adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
-
- igc_set_flag_queue_pairs(adapter, max_rss_queues);
+ return __igc_shutdown(to_pci_dev(dev), NULL, 1);
}
-/**
- * igc_sw_init - Initialize general software structures (struct igc_adapter)
- * @adapter: board private structure to initialize
- *
- * igc_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- */
-static int igc_sw_init(struct igc_adapter *adapter)
+static void igc_deliver_wake_packet(struct net_device *netdev)
{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
+ struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
+ struct sk_buff *skb;
+ u32 wupl;
- int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
+ wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
- pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+ /* WUPM stores only the first 128 bytes of the wake packet.
+ * Read the packet only if we have the whole thing.
+ */
+ if (wupl == 0 || wupl > IGC_WUPM_BYTES)
+ return;
- /* set default ring sizes */
- adapter->tx_ring_count = IGC_DEFAULT_TXD;
- adapter->rx_ring_count = IGC_DEFAULT_RXD;
+ skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
+ if (!skb)
+ return;
- /* set default ITR values */
- adapter->rx_itr_setting = IGC_DEFAULT_ITR;
- adapter->tx_itr_setting = IGC_DEFAULT_ITR;
+ skb_put(skb, wupl);
- /* set default work limits */
- adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
+ /* Ensure reads are 32-bit aligned */
+ wupl = roundup(wupl, 4);
- /* adjust max frame to be at least the size of a standard frame */
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
- spin_lock_init(&adapter->nfc_lock);
- spin_lock_init(&adapter->stats64_lock);
- /* Assume MSI-X interrupts, will be checked during IRQ allocation */
- adapter->flags |= IGC_FLAG_HAS_MSIX;
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+}
- adapter->mac_table = kzalloc(size, GFP_ATOMIC);
- if (!adapter->mac_table)
- return -ENOMEM;
+static int __maybe_unused igc_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 err, val;
- igc_init_queue_configuration(adapter);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!pci_device_is_present(pdev))
+ return -ENODEV;
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "igc: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
- /* This call may decrease the number of queues */
if (igc_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
- /* Explicitly disable IRQ since the NIC can be in any state. */
- igc_irq_disable(adapter);
+ igc_reset(adapter);
- set_bit(__IGC_DOWN, &adapter->state);
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igc_get_hw_control(adapter);
- return 0;
+ val = rd32(IGC_WUS);
+ if (val & WAKE_PKT_WUS)
+ igc_deliver_wake_packet(netdev);
+
+ wr32(IGC_WUS, ~0);
+
+ rtnl_lock();
+ if (!err && netif_running(netdev))
+ err = __igc_open(netdev, true);
+
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int __maybe_unused igc_runtime_resume(struct device *dev)
+{
+ return igc_resume(dev);
+}
+
+static int __maybe_unused igc_suspend(struct device *dev)
+{
+ return __igc_shutdown(to_pci_dev(dev), NULL, 0);
}
+static int __maybe_unused igc_runtime_idle(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (!igc_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+ return -EBUSY;
+}
+#endif /* CONFIG_PM */
+
+static void igc_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ __igc_shutdown(pdev, &wake, 0);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops igc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
+ SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
+ igc_runtime_idle)
+};
+#endif
+
+static struct pci_driver igc_driver = {
+ .name = igc_driver_name,
+ .id_table = igc_pci_tbl,
+ .probe = igc_probe,
+ .remove = igc_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &igc_pm_ops,
+#endif
+ .shutdown = igc_shutdown,
+};
+
/**
* igc_reinit_queues - return error
* @adapter: pointer to adapter structure
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index f4b05af0dd2f..8e1799508edc 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -173,6 +173,7 @@ s32 igc_check_downshift(struct igc_hw *hw)
s32 igc_phy_hw_reset(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
+ u32 phpm = 0, timeout = 10000;
s32 ret_val;
u32 ctrl;
@@ -186,6 +187,8 @@ s32 igc_phy_hw_reset(struct igc_hw *hw)
if (ret_val)
goto out;
+ phpm = rd32(IGC_I225_PHPM);
+
ctrl = rd32(IGC_CTRL);
wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
wrfl();
@@ -195,7 +198,18 @@ s32 igc_phy_hw_reset(struct igc_hw *hw)
wr32(IGC_CTRL, ctrl);
wrfl();
- usleep_range(1500, 2000);
+ /* SW should guarantee 100us for the completion of the PHY reset */
+ usleep_range(100, 150);
+ do {
+ phpm = rd32(IGC_I225_PHPM);
+ timeout--;
+ udelay(1);
+ } while (!(phpm & IGC_PHY_RST_COMP) && timeout);
+
+ if (!timeout)
+ hw_dbg("Timeout is expired after a phy reset\n");
+
+ usleep_range(100, 150);
phy->ops.release(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
new file mode 100644
index 000000000000..693506587198
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Intel Corporation */
+
+#include "igc.h"
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
+
+#define INCVALUE_MASK 0x7fffffff
+#define ISGN 0x80000000
+
+#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGC_PTP_TX_TIMEOUT (HZ * 15)
+
+/* SYSTIM read access for I225 */
+static void igc_ptp_read_i225(struct igc_adapter *adapter,
+ struct timespec64 *ts)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 sec, nsec;
+
+ /* The timestamp latches on lowest register read. For I210/I211, the
+ * lowest register is SYSTIMR. Since we only need to provide nanosecond
+ * resolution, we can ignore it.
+ */
+ rd32(IGC_SYSTIMR);
+ nsec = rd32(IGC_SYSTIML);
+ sec = rd32(IGC_SYSTIMH);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void igc_ptp_write_i225(struct igc_adapter *adapter,
+ const struct timespec64 *ts)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Writing the SYSTIMR register is not necessary as it only
+ * provides sub-nanosecond resolution.
+ */
+ wr32(IGC_SYSTIML, ts->tv_nsec);
+ wr32(IGC_SYSTIMH, ts->tv_sec);
+}
+
+static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ int neg_adj = 0;
+ u64 rate;
+ u32 inca;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+ rate = scaled_ppm;
+ rate <<= 14;
+ rate = div_u64(rate, 78125);
+
+ inca = rate & INCVALUE_MASK;
+ if (neg_adj)
+ inca |= ISGN;
+
+ wr32(IGC_TIMINCA, inca);
+
+ return 0;
+}
+
+static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct timespec64 now, then = ns_to_timespec64(delta);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ igc_ptp_read_i225(igc, &now);
+ now = timespec64_add(now, then);
+ igc_ptp_write_i225(igc, (const struct timespec64 *)&now);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ ptp_read_system_prets(sts);
+ rd32(IGC_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = rd32(IGC_SYSTIML);
+ ts->tv_sec = rd32(IGC_SYSTIMH);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_settime_i225(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ igc_ptp_write_i225(igc, ts);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ **/
+static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(systim >> 32,
+ systim & 0xFFFFFFFF);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve the first timestamp from the
+ * first buffer of an incoming frame. The value is stored in little
+ * endian format starting on byte 0. There's a second timestamp
+ * starting on byte 8.
+ **/
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ struct sk_buff *skb)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ __le64 *regval = (__le64 *)va;
+ int adjust = 0;
+
+ /* The timestamp is recorded in little endian format.
+ * DWORD: | 0 | 1 | 2 | 3
+ * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
+ */
+ igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[0]));
+
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == igc_i225) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGC_I225_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGC_I225_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGC_I225_RX_LATENCY_1000;
+ break;
+ case SPEED_2500:
+ adjust = IGC_I225_RX_LATENCY_2500;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+}
+
+/**
+ * igc_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+ u64 regval;
+
+ /* If this bit is set, then the RX registers contain the time
+ * stamp. No other packet will be time stamped until we read
+ * these registers, so read the registers to make them
+ * available again. Because only one packet can be time
+ * stamped at a time, we know that the register values must
+ * belong to this one here and therefore we don't need to
+ * compare any of the additional attributes stored for it.
+ *
+ * If nothing went wrong, then it should have a shared
+ * tx_flags that we can turn into a skb_shared_hwtstamps.
+ */
+ if (!(rd32(IGC_TSYNCRXCTL) & IGC_TSYNCRXCTL_VALID))
+ return;
+
+ regval = rd32(IGC_RXSTMPL);
+ regval |= (u64)rd32(IGC_RXSTMPH) << 32;
+
+ igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ adapter->last_rx_timestamp = jiffies;
+}
+
+/**
+ * igc_ptp_enable_tstamp_rxqueue - Enable RX timestamp for a queue
+ * @rx_ring: Pointer to RX queue
+ * @timer: Index for timer
+ *
+ * This function enables RX timestamping for a queue, and selects
+ * which 1588 timer will provide the timestamp.
+ */
+static void igc_ptp_enable_tstamp_rxqueue(struct igc_adapter *adapter,
+ struct igc_ring *rx_ring, u8 timer)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int reg_idx = rx_ring->reg_idx;
+ u32 srrctl = rd32(IGC_SRRCTL(reg_idx));
+
+ srrctl |= IGC_SRRCTL_TIMESTAMP;
+ srrctl |= IGC_SRRCTL_TIMER1SEL(timer);
+ srrctl |= IGC_SRRCTL_TIMER0SEL(timer);
+
+ wr32(IGC_SRRCTL(reg_idx), srrctl);
+}
+
+static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter,
+ u8 timer)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+
+ igc_ptp_enable_tstamp_rxqueue(adapter, ring, timer);
+ }
+}
+
+/**
+ * igc_ptp_set_timestamp_mode - setup hardware for timestamping
+ * @adapter: networking device structure
+ * @config: hwtstamp configuration
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ */
+static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
+ struct hwtstamp_config *config)
+{
+ u32 tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
+ struct igc_hw *hw = &adapter->hw;
+ u32 tsync_rx_cfg = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ /* fall through */
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ /* Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as long
+ * as one Rx filter was configured.
+ */
+ if (tsync_rx_ctl) {
+ tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
+
+ if (hw->mac.type == igc_i225) {
+ regval = rd32(IGC_RXPBS);
+ regval |= IGC_RXPBS_CFG_TS_EN;
+ wr32(IGC_RXPBS, regval);
+
+ /* FIXME: For now, only support retrieving RX
+ * timestamps from timer 0
+ */
+ igc_ptp_enable_tstamp_all_rxqueues(adapter, 0);
+ }
+ }
+
+ if (tsync_tx_ctl) {
+ tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
+ tsync_tx_ctl |= IGC_TSYNCTXCTL_TXSYNSIG;
+ }
+
+ /* enable/disable TX */
+ regval = rd32(IGC_TSYNCTXCTL);
+ regval &= ~IGC_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32(IGC_TSYNCTXCTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(IGC_TSYNCRXCTL);
+ regval &= ~(IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(IGC_TSYNCRXCTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(IGC_TSYNCRXCFG, tsync_rx_cfg);
+
+ /* define ethertype filter for timestamped packets */
+ if (is_l2)
+ wr32(IGC_ETQF(3),
+ (IGC_ETQF_FILTER_ENABLE | /* enable filter */
+ IGC_ETQF_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(IGC_ETQF(3), 0);
+
+ /* L4 Queue Filter[3]: filter by destination port and protocol */
+ if (is_l4) {
+ u32 ftqf = (IPPROTO_UDP /* UDP */
+ | IGC_FTQF_VF_BP /* VF not compared */
+ | IGC_FTQF_1588_TIME_STAMP /* Enable Timestamp */
+ | IGC_FTQF_MASK); /* mask all inputs */
+ ftqf &= ~IGC_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+ wr32(IGC_IMIR(3), htons(PTP_EV_PORT));
+ wr32(IGC_IMIREXT(3),
+ (IGC_IMIREXT_SIZE_BP | IGC_IMIREXT_CTRL_BP));
+ wr32(IGC_FTQF(3), ftqf);
+ } else {
+ wr32(IGC_FTQF(3), IGC_FTQF_MASK);
+ }
+ wrfl();
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = rd32(IGC_TXSTMPL);
+ regval = rd32(IGC_TXSTMPH);
+ regval = rd32(IGC_RXSTMPL);
+ regval = rd32(IGC_RXSTMPH);
+
+ return 0;
+}
+
+void igc_ptp_tx_hang(struct igc_adapter *adapter)
+{
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IGC_PTP_TX_TIMEOUT);
+ struct igc_hw *hw = &adapter->hw;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ cancel_work_sync(&adapter->ptp_tx_work);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ adapter->tx_hwtstamp_timeouts++;
+ /* Clear the Tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(IGC_TXSTMPH);
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+/**
+ * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: Board private structure
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
+{
+ struct sk_buff *skb = adapter->ptp_tx_skb;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct igc_hw *hw = &adapter->hw;
+ u64 regval;
+
+ regval = rd32(IGC_TXSTMPL);
+ regval |= (u64)rd32(IGC_TXSTMPH) << 32;
+ igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+
+ /* Clear the lock early before calling skb_tstamp_tx so that
+ * applications are not woken up before the lock bit is clear. We use
+ * a copy of the skb pointer to ensure other threads can't change it
+ * while we're notifying the stack.
+ */
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * igc_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+ */
+void igc_ptp_tx_work(struct work_struct *work)
+{
+ struct igc_adapter *adapter = container_of(work, struct igc_adapter,
+ ptp_tx_work);
+ struct igc_hw *hw = &adapter->hw;
+ u32 tsynctxctl;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (time_is_before_jiffies(adapter->ptp_tx_start +
+ IGC_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(IGC_TXSTMPH);
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ return;
+ }
+
+ tsynctxctl = rd32(IGC_TSYNCTXCTL);
+ if (tsynctxctl & IGC_TSYNCTXCTL_VALID)
+ igc_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to check later */
+ schedule_work(&adapter->ptp_tx_work);
+}
+
+/**
+ * igc_ptp_set_ts_config - set hardware time stamping config
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ *
+ **/
+int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = igc_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igc_ptp_get_ts_config - get hardware time stamping config
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ *
+ * Get the hwtstamp_config settings to return to the user. Rather than attempt
+ * to deconstruct the settings from the registers, just return a shadow copy
+ * of the last known settings.
+ **/
+int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igc_ptp_init - Initialize PTP functionality
+ * @adapter: Board private structure
+ *
+ * This function is called at device probe to initialize the PTP
+ * functionality.
+ */
+void igc_ptp_init(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case igc_i225:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
+ adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225;
+ adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
+ adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
+ adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
+ break;
+ default:
+ adapter->ptp_clock = NULL;
+ return;
+ }
+
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work);
+
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ igc_ptp_reset(adapter);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+ } else if (adapter->ptp_clock) {
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags |= IGC_PTP_ENABLED;
+ }
+}
+
+/**
+ * igc_ptp_suspend - Disable PTP work items and prepare for suspend
+ * @adapter: Board private structure
+ *
+ * This function stops the overflow check work and PTP Tx timestamp work, and
+ * will prepare the device for OS suspend.
+ */
+void igc_ptp_suspend(struct igc_adapter *adapter)
+{
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
+ cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ }
+}
+
+/**
+ * igc_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igc_ptp_stop(struct igc_adapter *adapter)
+{
+ igc_ptp_suspend(adapter);
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags &= ~IGC_PTP_ENABLED;
+ }
+}
+
+/**
+ * igc_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
+ *
+ * This function handles the reset work required to re-enable the PTP device.
+ **/
+void igc_ptp_reset(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ /* reset the tstamp_config */
+ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ wr32(IGC_TSAUXC, 0x0);
+ wr32(IGC_TSSDP, 0x0);
+ wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS);
+ wr32(IGC_IMS, IGC_IMS_TS);
+ break;
+ default:
+ /* No work to do. */
+ goto out;
+ }
+
+ /* Re-initialize the timer. */
+ if (hw->mac.type == igc_i225) {
+ struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real());
+
+ igc_ptp_write_i225(adapter, &ts64);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+out:
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ wrfl();
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 50d7c04dccf5..c9029b549b90 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -12,6 +12,7 @@
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
@@ -209,12 +210,48 @@
#define IGC_LENERRS 0x04138 /* Length Errors Count */
#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+/* Time sync registers */
+#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */
+#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */
+#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
+
+#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
+
+#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+
+#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+
+/* System Time Registers */
+#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
+#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
+#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
+
+#define IGC_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define IGC_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+
/* Management registers */
#define IGC_MANC 0x05820 /* Management Control - RW */
/* Shadow Ram Write Register - RW */
#define IGC_SRWR 0x12018
+/* Wake Up registers */
+#define IGC_WUC 0x05800 /* Wakeup Control - RW */
+#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */
+#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */
+
+/* Wake Up packet memory */
+#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+
/* forward declaration */
struct igc_hw;
u32 igc_rd32(struct igc_hw *hw, u32 reg);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 3d8c051dd327..b64e91ea3465 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -70,7 +70,7 @@ static int ixgb_clean(struct napi_struct *, int);
static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
-static void ixgb_tx_timeout(struct net_device *dev);
+static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
@@ -1538,7 +1538,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
**/
static void
-ixgb_tx_timeout(struct net_device *netdev)
+ixgb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 171cdc552961..5b1cf49df3d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -166,7 +166,9 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
ixgbe_dbg_netdev_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
- adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
+ /* TX Queue number below is wrong, but ixgbe does not use it */
+ adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev,
+ UINT_MAX);
e_dev_info("tx_timeout called\n");
} else {
e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 82a30b597cf9..718931d951bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
- u64 action;
+ u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
@@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
- action = filter->action;
- if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
- action =
- (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+ if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+ u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+ if (!vf && (ring >= adapter->num_rx_queues)) {
+ e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+ ring);
+ continue;
+ } else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool)) {
+ e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+ vf, ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
ixgbe_fdir_write_perfect_filter_82599(hw,
- &filter->filter,
- filter->sw_idx,
- (action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[action]->reg_idx);
+ &filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);
@@ -6158,7 +6175,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void ixgbe_tx_timeout(struct net_device *netdev)
+static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b43be9f14105..74b540ebb3dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -277,7 +277,7 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -304,7 +304,7 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 076f2da36f27..4622c4ea2e46 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -250,7 +250,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
* ixgbevf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void ixgbevf_tx_timeout(struct net_device *netdev)
+static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
- if ((netdev_uc_count(netdev)) > 10) {
- pr_err("Too many unicast filters - No Space\n");
- return -ENOSPC;
- }
-
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 25aa400e2e3c..2e4975572e9f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2337,7 +2337,7 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
}
static void
-jme_tx_timeout(struct net_device *netdev)
+jme_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct jme_adapter *jme = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index ae195f8adff5..f1d84921e42b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -917,7 +917,7 @@ static void korina_restart_task(struct work_struct *work)
enable_irq(lp->rx_irq);
}
-static void korina_tx_timeout(struct net_device *dev)
+static void korina_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct korina_private *lp = netdev_priv(dev);
@@ -1043,7 +1043,7 @@ static int korina_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
dev->base_addr = r->start;
- lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->eth_regs = ioremap(r->start, resource_size(r));
if (!lp->eth_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
rc = -ENXIO;
@@ -1051,7 +1051,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
- lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->rx_dma_regs = ioremap(r->start, resource_size(r));
if (!lp->rx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
rc = -ENXIO;
@@ -1059,7 +1059,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
- lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->tx_dma_regs = ioremap(r->start, resource_size(r));
if (!lp->tx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
rc = -ENXIO;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6e73ffe6f928..2d0c52f7106b 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -510,13 +510,6 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
}
static int
-ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-static int
ltq_etop_set_mac_address(struct net_device *dev, void *p)
{
int ret = eth_mac_addr(dev, p);
@@ -594,7 +587,7 @@ err_hw:
}
static void
-ltq_etop_tx_timeout(struct net_device *dev)
+ltq_etop_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
int err;
@@ -616,7 +609,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_stop = ltq_etop_stop,
.ndo_start_xmit = ltq_etop_tx,
.ndo_change_mtu = ltq_etop_change_mtu,
- .ndo_do_ioctl = ltq_etop_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
@@ -649,7 +642,7 @@ ltq_etop_probe(struct platform_device *pdev)
goto err_out;
}
- ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
+ ltq_etop_membase = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!ltq_etop_membase) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 65a093216dac..3c8125cbc84d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2590,7 +2590,7 @@ static void tx_timeout_task(struct work_struct *ugly)
}
}
-static void mv643xx_eth_tx_timeout(struct net_device *dev)
+static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 71a872d46bc4..037e054b01a2 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -324,8 +324,7 @@
ETH_HLEN + ETH_FCS_LEN, \
cache_line_size())
-#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
- NET_IP_ALIGN)
+#define MVNETA_SKB_HEADROOM max(XDP_PACKET_HEADROOM, NET_SKB_PAD)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
@@ -1167,6 +1166,7 @@ bm_mtu_err:
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
pp->bm_priv = NULL;
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
}
@@ -2081,7 +2081,11 @@ static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp)
{
- u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+ unsigned int len;
+ u32 ret, act;
+
+ len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
@@ -2094,9 +2098,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
if (err) {
ret = MVNETA_XDP_DROPPED;
__page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ virt_to_head_page(xdp->data),
+ len, true);
} else {
ret = MVNETA_XDP_REDIR;
}
@@ -2106,9 +2109,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
__page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ virt_to_head_page(xdp->data),
+ len, true);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2119,8 +2121,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_DROP:
__page_pool_put_page(rxq->page_pool,
virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ len, true);
ret = MVNETA_XDP_DROPPED;
break;
}
@@ -3071,7 +3072,7 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
.order = 0,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
- .nid = cpu_to_node(0),
+ .nid = NUMA_NO_NODE,
.dev = pp->dev->dev.parent,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = pp->rx_offset_correction,
@@ -4225,6 +4226,12 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
+ if (pp->bm_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Hardware Buffer Management not supported on XDP");
+ return -EOPNOTSUPP;
+ }
+
need_update = !!pp->xdp_prog != !!prog;
if (running && need_update)
mvneta_stop(dev);
@@ -4941,7 +4948,6 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
- pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
@@ -4966,6 +4972,10 @@ static int mvneta_probe(struct platform_device *pdev)
}
of_node_put(bm_node);
+ /* sw buffer management */
+ if (!pp->bm_priv)
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
+
err = mvneta_init(&pdev->dev, pp);
if (err < 0)
goto err_netdev;
@@ -5123,6 +5133,7 @@ static int mvneta_resume(struct device *device)
err = mvneta_bm_port_init(pdev, pp);
if (err < 0) {
dev_info(&pdev->dev, "use SW buffer management\n");
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
pp->bm_priv = NULL;
}
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 14e372cda7f4..72133cbe55d4 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1114,7 +1114,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
/* Port configuration routines */
static bool mvpp2_is_xlg(phy_interface_t interface)
{
- return interface == PHY_INTERFACE_MODE_10GKR ||
+ return interface == PHY_INTERFACE_MODE_10GBASER ||
interface == PHY_INTERFACE_MODE_XAUI;
}
@@ -1200,7 +1200,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
if (port->gop_id != 0)
goto invalid_conf;
mvpp22_gop_init_10gkr(port);
@@ -1649,7 +1649,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
MAC_CLK_RESET_SD_TX;
@@ -4758,7 +4758,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
/* Invalid combinations */
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
if (port->gop_id != 0)
goto empty_set;
@@ -4780,7 +4780,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_NA:
if (port->gop_id == 0) {
@@ -4792,6 +4792,8 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 10000baseER_Full);
phylink_set(mask, 10000baseKR_Full);
}
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
/* Fall-through */
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4802,13 +4804,23 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
/* Fall-through */
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
+ if (port->comphy ||
+ state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (port->comphy ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
break;
default:
goto empty_set;
@@ -4817,6 +4829,8 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ phylink_helper_basex_speed(state);
return;
empty_set:
@@ -5233,6 +5247,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_netdev;
}
+ /*
+ * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
+ * Existing usage of 10GBASE-KR is not correct; no backplane
+ * negotiation is done, and this driver does not actually support
+ * 10GBASE-KR.
+ */
+ if (phy_mode == PHY_INTERFACE_MODE_10GKR)
+ phy_mode = PHY_INTERFACE_MODE_10GBASER;
+
if (port_node) {
comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
if (IS_ERR(comphy)) {
@@ -5411,6 +5434,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink = NULL;
}
+ /* Cycle the comphy to power it down, saving 270mW per port -
+ * don't worry about an error powering it up. When the comphy
+ * driver does this, we can remove this code.
+ */
+ if (port->comphy) {
+ err = mvpp22_comphy_init(port);
+ if (err == 0)
+ phy_power_off(port->comphy);
+ }
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index fb34fbd62088..ced514c05c97 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -25,3 +25,11 @@ config NDC_DIS_DYNAMIC_CACHING
This config option disables caching of dynamic entries such as NIX SQEs
, NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
NPA Aura/Pool contexts.
+
+config OCTEONTX2_PF
+ tristate "Marvell OcteonTX2 NIC Physical Function driver"
+ select OCTEONTX2_MBOX
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ depends on PCI
+ help
+ This driver supports Marvell's OcteonTX2 NIC physical function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
index e579dcd54c97..0064a69e0f72 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -3,4 +3,6 @@
# Makefile for Marvell OcteonTX2 device drivers.
#
+obj-$(CONFIG_OCTEONTX2_MBOX) += af/
obj-$(CONFIG_OCTEONTX2_AF) += af/
+obj-$(CONFIG_OCTEONTX2_PF) += nic/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 784207bae5f8..cd33c2e6ca5f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,8 +143,13 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5,
};
-#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
-#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
+#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define MAX_SCHED_WEIGHT 0xFF
+#define DFLT_RR_WEIGHT 71
+#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
+ / MAX_SCHED_WEIGHT)
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a589748f1240..8bbc1f1d81f5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -210,7 +210,8 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
-M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
+M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -618,6 +619,11 @@ struct nix_set_mac_addr {
u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
};
+struct nix_get_mac_addr_rsp {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
struct nix_mark_format_cfg {
struct mbox_msghdr hdr;
u8 offset;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 8a59f7d53fbf..eb5e542424e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2546,6 +2546,23 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_get_mac_addr_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ return 0;
+}
+
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
new file mode 100644
index 000000000000..41bf00cf5b1d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 ethernet device drivers
+#
+
+obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
+
+octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
new file mode 100644
index 000000000000..8247d21d0432
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -0,0 +1,1410 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <net/tso.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_struct.h"
+
+static void otx2_nix_rq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+static void otx2_nix_sq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+void otx2_update_lmac_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return;
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+}
+
+int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
+
+ if (!pfvf->qset.rq)
+ return 0;
+
+ otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
+ return 1;
+}
+
+int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
+
+ if (!pfvf->qset.sq)
+ return 0;
+
+ otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
+ return 1;
+}
+
+void otx2_get_dev_stats(struct otx2_nic *pfvf)
+{
+ struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
+
+#define OTX2_GET_RX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
+#define OTX2_GET_TX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
+
+ dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+ dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
+ dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
+ dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
+ dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
+ dev_stats->rx_frames = dev_stats->rx_bcast_frames +
+ dev_stats->rx_mcast_frames +
+ dev_stats->rx_ucast_frames;
+
+ dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
+ dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
+ dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
+ dev_stats->tx_frames = dev_stats->tx_bcast_frames +
+ dev_stats->tx_mcast_frames +
+ dev_stats->tx_ucast_frames;
+}
+
+void otx2_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_dev_stats *dev_stats;
+
+ otx2_get_dev_stats(pfvf);
+
+ dev_stats = &pfvf->hw.dev_stats;
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_frames;
+ stats->rx_dropped = dev_stats->rx_drops;
+ stats->multicast = dev_stats->rx_mcast_frames;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_frames;
+ stats->tx_dropped = dev_stats->tx_drops;
+}
+
+/* Sync MAC address with RVU AF */
+static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
+{
+ struct nix_set_mac_addr *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
+ struct net_device *netdev)
+{
+ struct nix_get_mac_addr_rsp *rsp;
+ struct mbox_msghdr *msghdr;
+ struct msg_req *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+
+ msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!msghdr) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+ rsp = (struct nix_get_mac_addr_rsp *)msghdr;
+ ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ return 0;
+}
+
+int otx2_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
+{
+ struct nix_frs_cfg *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ /* SMQ config limits maximum pkt size that can be transmitted */
+ req->update_smq = true;
+ pfvf->max_frs = mtu + OTX2_ETH_HLEN;
+ req->maxlen = pfvf->max_frs;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct nix_rss_flowkey_cfg *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+ req->mcam_index = -1; /* Default or reserved index */
+ req->flowkey_cfg = rss->flowkey_cfg;
+ req->group = DEFAULT_RSS_CONTEXT_GROUP;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+int otx2_set_rss_table(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct mbox *mbox = &pfvf->mbox;
+ struct nix_aq_enq_req *aq;
+ int idx, err;
+
+ otx2_mbox_lock(mbox);
+ /* Get memory to put this msg */
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ err = otx2_sync_mbox_msg(mbox);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ otx2_mbox_unlock(mbox);
+ return -ENOMEM;
+ }
+ }
+
+ aq->rss.rq = rss->ind_tbl[idx];
+
+ /* Fill AQ info */
+ aq->qidx = idx;
+ aq->ctype = NIX_AQ_CTYPE_RSS;
+ aq->op = NIX_AQ_INSTOP_INIT;
+ }
+ err = otx2_sync_mbox_msg(mbox);
+ otx2_mbox_unlock(mbox);
+ return err;
+}
+
+void otx2_set_rss_key(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u64 *key = (u64 *)&rss->key[4];
+ int idx;
+
+ /* 352bit or 44byte key needs to be configured as below
+ * NIX_LF_RX_SECRETX0 = key<351:288>
+ * NIX_LF_RX_SECRETX1 = key<287:224>
+ * NIX_LF_RX_SECRETX2 = key<223:160>
+ * NIX_LF_RX_SECRETX3 = key<159:96>
+ * NIX_LF_RX_SECRETX4 = key<95:32>
+ * NIX_LF_RX_SECRETX5<63:32> = key<31:0>
+ */
+ otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
+ (u64)(*((u32 *)&rss->key)) << 32);
+ idx = sizeof(rss->key) / sizeof(u64);
+ while (idx > 0) {
+ idx--;
+ otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
+ }
+}
+
+int otx2_rss_init(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ int idx, ret = 0;
+
+ rss->rss_size = sizeof(rss->ind_tbl);
+
+ /* Init RSS key if it is not setup already */
+ if (!rss->enable)
+ netdev_rss_key_fill(rss->key, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+
+ if (!netif_is_rxfh_configured(pfvf->netdev)) {
+ /* Default indirection table */
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] =
+ ethtool_rxfh_indir_default(idx,
+ pfvf->hw.rx_queues);
+ }
+ ret = otx2_set_rss_table(pfvf);
+ if (ret)
+ return ret;
+
+ /* Flowkey or hash config to be used for generating flow tag */
+ rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
+ NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
+ NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+
+ ret = otx2_set_flowkey_cfg(pfvf);
+ if (ret)
+ return ret;
+
+ rss->enable = true;
+ return 0;
+}
+
+void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
+{
+ /* Configure CQE interrupt coalescing parameters
+ *
+ * HW triggers an irq when ECOUNT > cq_ecount_wait, hence
+ * set 1 less than cq_ecount_wait. And cq_time_wait is in
+ * usecs, convert that to 100ns count.
+ */
+ otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
+ ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
+ ((u64)pfvf->hw.cq_qcount_wait << 32) |
+ (pfvf->hw.cq_ecount_wait - 1));
+}
+
+dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ gfp_t gfp)
+{
+ dma_addr_t iova;
+
+ /* Check if request can be accommodated in previous allocated page */
+ if (pool->page && ((pool->page_offset + pool->rbsize) <=
+ (PAGE_SIZE << pool->rbpage_order))) {
+ pool->pageref++;
+ goto ret;
+ }
+
+ otx2_get_page(pool);
+
+ /* Allocate a new page */
+ pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ pool->rbpage_order);
+ if (unlikely(!pool->page))
+ return -ENOMEM;
+
+ pool->page_offset = 0;
+ret:
+ iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
+ pool->rbsize, DMA_FROM_DEVICE);
+ if (!iova) {
+ if (!pool->page_offset)
+ __free_pages(pool->page, pool->rbpage_order);
+ pool->page = NULL;
+ return -ENOMEM;
+ }
+ pool->page_offset += pool->rbsize;
+ return iova;
+}
+
+void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ schedule_work(&pfvf->reset_task);
+}
+
+void otx2_get_mac_from_af(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int err;
+
+ err = otx2_hw_get_mac_addr(pfvf, netdev);
+ if (err)
+ dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
+
+ /* If AF doesn't provide a valid MAC, generate a random one */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ eth_hw_addr_random(netdev);
+}
+
+static int otx2_get_link(struct otx2_nic *pfvf)
+{
+ int link = 0;
+ u16 map;
+
+ /* cgx lmac link */
+ if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
+ map = pfvf->hw.tx_chan_base & 0x7FF;
+ link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
+ }
+ /* LBK channel */
+ if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
+ link = 12;
+
+ return link;
+}
+
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ struct nix_txschq_config *req;
+ u64 schq, parent;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->lvl = lvl;
+ req->num_regs = 1;
+
+ schq = hw->txschq_list[lvl][0];
+ /* Set topology e.t.c configuration */
+ if (lvl == NIX_TXSCH_LVL_SMQ) {
+ req->reg[0] = NIX_AF_SMQX_CFG(schq);
+ req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) |
+ OTX2_MIN_MTU;
+
+ req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
+ (0x2ULL << 36);
+ req->num_regs++;
+ /* MDQ config */
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ req->reg[1] = NIX_AF_MDQX_PARENT(schq);
+ req->regval[1] = parent << 16;
+ req->num_regs++;
+ /* Set DWRR quantum */
+ req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
+ req->regval[2] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL4) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ req->reg[0] = NIX_AF_TL4X_PARENT(schq);
+ req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL3) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ req->reg[0] = NIX_AF_TL3X_PARENT(schq);
+ req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ req->reg[0] = NIX_AF_TL2X_PARENT(schq);
+ req->regval[0] = parent << 16;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
+
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ otx2_get_link(pfvf));
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+
+ } else if (lvl == NIX_TXSCH_LVL_TL1) {
+ /* Default config for TL1.
+ * For VF this is always ignored.
+ */
+
+ /* Set DWRR quantum */
+ req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
+ req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
+ req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL1X_CIR(schq);
+ req->regval[2] = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_txsch_alloc(struct otx2_nic *pfvf)
+{
+ struct nix_txsch_alloc_req *req;
+ int lvl;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ req->schq[lvl] = 1;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_txschq_stop(struct otx2_nic *pfvf)
+{
+ struct nix_txsch_free_req *free_req;
+ int lvl, schq, err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ /* Free the transmit schedulers */
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ free_req->flags = TXSCHQ_FREE_ALL;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ /* Clear the txschq list */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
+ pfvf->hw.txschq_list[lvl][schq] = 0;
+ }
+ return err;
+}
+
+void otx2_sqb_flush(struct otx2_nic *pfvf)
+{
+ int qidx, sqe_tail, sqe_head;
+ u64 incr, *ptr, val;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ incr = (u64)qidx << 32;
+ while (1) {
+ val = otx2_atomic64_add(incr, ptr);
+ sqe_head = (val >> 20) & 0x3F;
+ sqe_tail = (val >> 28) & 0x3F;
+ if (sqe_head == sqe_tail)
+ break;
+ usleep_range(1, 3);
+ }
+ }
+}
+
+/* RED and drop levels of CQ on packet reception.
+ * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
+ */
+#define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize))
+#define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize))
+
+/* RED and drop levels of AURA for packet reception.
+ * For AURA level is measure of fullness (0x0 = empty, 255 = full).
+ * Eg: For RQ length 1K, for pass/drop level 204/230.
+ * RED accepts pkts if free pointers > 102 & <= 205.
+ * Drops pkts if free pointers < 102.
+ */
+#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
+#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
+
+/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
+#define SEND_CQ_SKID 2000
+
+static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct nix_aq_enq_req *aq;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->rq.cq = qidx;
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 1;
+ aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
+ aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
+ aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
+ aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
+ aq->rq.qint_idx = 0;
+ aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
+ aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
+ aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
+ aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_snd_queue *sq;
+ struct nix_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[sqb_aura];
+ sq = &qset->sq[qidx];
+ sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
+ sq->sqe_cnt = qset->sqe_cnt;
+
+ err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+
+ sq->sqe_base = sq->sqe->base;
+ sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
+ if (!sq->sg)
+ return -ENOMEM;
+
+ sq->head = 0;
+ sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
+ sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
+ /* Set SQE threshold to 10% of total SQEs */
+ sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
+ sq->aura_id = sqb_aura;
+ sq->aura_fc_addr = pool->fc_addr->base;
+ sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
+ sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
+
+ sq->stats.bytes = 0;
+ sq->stats.pkts = 0;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_quantum = DFLT_RR_QTM;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct nix_aq_enq_req *aq;
+ struct otx2_cq_queue *cq;
+ int err, pool_id;
+
+ cq = &qset->cq[qidx];
+ cq->cq_idx = qidx;
+ if (qidx < pfvf->hw.rx_queues) {
+ cq->cq_type = CQ_RX;
+ cq->cint_idx = qidx;
+ cq->cqe_cnt = qset->rqe_cnt;
+ } else {
+ cq->cq_type = CQ_TX;
+ cq->cint_idx = qidx - pfvf->hw.rx_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
+ }
+ cq->cqe_size = pfvf->qset.xqe_size;
+
+ /* Allocate memory for CQEs */
+ err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
+ if (err)
+ return err;
+
+ /* Save CQE CPU base for faster reference */
+ cq->cqe_base = cq->cqe->base;
+ /* In case where all RQs auras point to single pool,
+ * all CQs receive buffer pool also point to same pool.
+ */
+ pool_id = ((cq->cq_type == CQ_RX) &&
+ (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
+ cq->rbpool = &qset->pool[pool_id];
+ cq->refill_task_sched = false;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->cq.ena = 1;
+ aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
+ aq->cq.caching = 1;
+ aq->cq.base = cq->cqe->iova;
+ aq->cq.cint_idx = cq->cint_idx;
+ aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
+ aq->cq.qint_idx = 0;
+ aq->cq.avg_level = 255;
+
+ if (qidx < pfvf->hw.rx_queues) {
+ aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
+ aq->cq.drop_ena = 1;
+ }
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static void otx2_pool_refill_task(struct work_struct *work)
+{
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *rbpool;
+ struct refill_work *wrk;
+ int qidx, free_ptrs = 0;
+ struct otx2_nic *pfvf;
+ s64 bufptr;
+
+ wrk = container_of(work, struct refill_work, pool_refill_work.work);
+ pfvf = wrk->pf;
+ qidx = wrk - pfvf->refill_wrk;
+ cq = &pfvf->qset.cq[qidx];
+ rbpool = cq->rbpool;
+ free_ptrs = cq->pool_ptrs;
+
+ while (cq->pool_ptrs) {
+ bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
+ if (bufptr <= 0) {
+ /* Schedule a WQ if we fails to free atleast half of the
+ * pointers else enable napi for this RQ.
+ */
+ if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
+ struct delayed_work *dwork;
+
+ dwork = &wrk->pool_refill_work;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ } else {
+ cq->refill_task_sched = false;
+ }
+ return;
+ }
+ otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ cq->pool_ptrs--;
+ }
+ cq->refill_task_sched = false;
+}
+
+int otx2_config_nix_queues(struct otx2_nic *pfvf)
+{
+ int qidx, err;
+
+ /* Initialize RX queues */
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+
+ err = otx2_rq_init(pfvf, qidx, lpb_aura);
+ if (err)
+ return err;
+ }
+
+ /* Initialize TX queues */
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+
+ err = otx2_sq_init(pfvf, qidx, sqb_aura);
+ if (err)
+ return err;
+ }
+
+ /* Initialize completion queues */
+ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
+ err = otx2_cq_init(pfvf, qidx);
+ if (err)
+ return err;
+ }
+
+ /* Initialize work queue for receive buffer refill */
+ pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
+ sizeof(struct refill_work), GFP_KERNEL);
+ if (!pfvf->refill_wrk)
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
+ pfvf->refill_wrk[qidx].pf = pfvf;
+ INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
+ otx2_pool_refill_task);
+ }
+ return 0;
+}
+
+int otx2_config_nix(struct otx2_nic *pfvf)
+{
+ struct nix_lf_alloc_req *nixlf;
+ struct nix_lf_alloc_rsp *rsp;
+ int err;
+
+ pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+
+ /* Get memory to put this msg */
+ nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
+ if (!nixlf)
+ return -ENOMEM;
+
+ /* Set RQ/SQ/CQ counts */
+ nixlf->rq_cnt = pfvf->hw.rx_queues;
+ nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->cq_cnt = pfvf->qset.cq_cnt;
+ nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
+ nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
+ nixlf->xqe_sz = NIX_XQESZ_W16;
+ /* We don't know absolute NPA LF idx attached.
+ * AF will replace 'RVU_DEFAULT_PF_FUNC' with
+ * NPA LF attached to this RVU PF/VF.
+ */
+ nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
+ /* Disable alignment pad, enable L2 length check,
+ * enable L4 TCP/UDP checksum verification.
+ */
+ nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+
+ rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
+ &nixlf->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->qints < 1)
+ return -ENXIO;
+
+ return rsp->hdr.rc;
+}
+
+void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ int sqb, qidx;
+ u64 iova, pa;
+
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ sq = &qset->sq[qidx];
+ if (!sq->sqb_ptrs)
+ continue;
+ for (sqb = 0; sqb < sq->sqb_count; sqb++) {
+ if (!sq->sqb_ptrs[sqb])
+ continue;
+ iova = sq->sqb_ptrs[sqb];
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ }
+ sq->sqb_count = 0;
+ }
+}
+
+void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
+{
+ int pool_id, pool_start = 0, pool_end = 0, size = 0;
+ u64 iova, pa;
+
+ if (type == AURA_NIX_SQ) {
+ pool_start = otx2_get_pool_idx(pfvf, type, 0);
+ pool_end = pool_start + pfvf->hw.sqpool_cnt;
+ size = pfvf->hw.sqb_size;
+ }
+ if (type == AURA_NIX_RQ) {
+ pool_start = otx2_get_pool_idx(pfvf, type, 0);
+ pool_end = pfvf->hw.rqpool_cnt;
+ size = pfvf->rbsize;
+ }
+
+ /* Free SQB and RQB pointers from the aura pool */
+ for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
+ iova = otx2_aura_allocptr(pfvf, pool_id);
+ while (iova) {
+ if (type == AURA_NIX_RQ)
+ iova -= OTX2_HEAD_ROOM;
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ iova = otx2_aura_allocptr(pfvf, pool_id);
+ }
+ }
+}
+
+void otx2_aura_pool_free(struct otx2_nic *pfvf)
+{
+ struct otx2_pool *pool;
+ int pool_id;
+
+ if (!pfvf->qset.pool)
+ return;
+
+ for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
+ pool = &pfvf->qset.pool[pool_id];
+ qmem_free(pfvf->dev, pool->stack);
+ qmem_free(pfvf->dev, pool->fc_addr);
+ }
+ devm_kfree(pfvf->dev, pfvf->qset.pool);
+}
+
+static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err)
+ return err;
+ }
+
+ /* Initialize this aura's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+
+ aq->aura_id = aura_id;
+ /* Will be filled by AF with correct pool context address */
+ aq->aura.pool_addr = pool_id;
+ aq->aura.pool_caching = 1;
+ aq->aura.shift = ilog2(numptrs) - 8;
+ aq->aura.count = numptrs;
+ aq->aura.limit = numptrs;
+ aq->aura.avg_level = 255;
+ aq->aura.ena = 1;
+ aq->aura.fc_ena = 1;
+ aq->aura.fc_addr = pool->fc_addr->iova;
+ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+ /* Alloc memory for stack which is used to store buffer pointers */
+ err = qmem_alloc(pfvf->dev, &pool->stack,
+ stack_pages, pfvf->hw.stack_pg_bytes);
+ if (err)
+ return err;
+
+ pool->rbsize = buf_size;
+ pool->rbpage_order = get_order(buf_size);
+
+ /* Initialize this pool's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ qmem_free(pfvf->dev, pool->stack);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ qmem_free(pfvf->dev, pool->stack);
+ return -ENOMEM;
+ }
+ }
+
+ aq->aura_id = pool_id;
+ aq->pool.stack_base = pool->stack->iova;
+ aq->pool.stack_caching = 1;
+ aq->pool.ena = 1;
+ aq->pool.buf_size = buf_size / 128;
+ aq->pool.stack_max_pages = stack_pages;
+ aq->pool.shift = ilog2(numptrs) - 8;
+ aq->pool.ptr_start = 0;
+ aq->pool.ptr_end = ~0ULL;
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+{
+ int qidx, pool_id, stack_pages, num_sqbs;
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ struct otx2_pool *pool;
+ int err, ptr;
+ s64 bufptr;
+
+ /* Calculate number of SQBs needed.
+ *
+ * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
+ * Last SQE is used for pointing to next SQB.
+ */
+ num_sqbs = (hw->sqb_size / 128) - 1;
+ num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
+
+ /* Get no of stack pages needed */
+ stack_pages =
+ (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
+ if (err)
+ goto fail;
+
+ /* Initialize pool context */
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_sqbs, hw->sqb_size);
+ if (err)
+ goto fail;
+ }
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ sq = &qset->sq[qidx];
+ sq->sqb_count = 0;
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
+ if (!sq->sqb_ptrs)
+ return -ENOMEM;
+
+ for (ptr = 0; ptr < num_sqbs; ptr++) {
+ bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ if (bufptr <= 0)
+ return bufptr;
+ otx2_aura_freeptr(pfvf, pool_id, bufptr);
+ sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
+ }
+ otx2_get_page(pool);
+ }
+
+ return 0;
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ otx2_aura_pool_free(pfvf);
+ return err;
+}
+
+int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int stack_pages, pool_id, rq;
+ struct otx2_pool *pool;
+ int err, ptr, num_ptrs;
+ s64 bufptr;
+
+ num_ptrs = pfvf->qset.rqe_cnt;
+
+ stack_pages =
+ (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ for (rq = 0; rq < hw->rx_queues; rq++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
+ if (err)
+ goto fail;
+ }
+ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_ptrs, pfvf->rbsize);
+ if (err)
+ goto fail;
+ }
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ pool = &pfvf->qset.pool[pool_id];
+ for (ptr = 0; ptr < num_ptrs; ptr++) {
+ bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ if (bufptr <= 0)
+ return bufptr;
+ otx2_aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
+ }
+ otx2_get_page(pool);
+ }
+
+ return 0;
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ otx2_aura_pool_free(pfvf);
+ return err;
+}
+
+int otx2_config_npa(struct otx2_nic *pfvf)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct npa_lf_alloc_req *npalf;
+ struct otx2_hw *hw = &pfvf->hw;
+ int aura_cnt;
+
+ /* Pool - Stack of free buffer pointers
+ * Aura - Alloc/frees pointers from/to pool for NIX DMA.
+ */
+
+ if (!hw->pool_cnt)
+ return -EINVAL;
+
+ qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) *
+ hw->pool_cnt, GFP_KERNEL);
+ if (!qset->pool)
+ return -ENOMEM;
+
+ /* Get memory to put this msg */
+ npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
+ if (!npalf)
+ return -ENOMEM;
+
+ /* Set aura and pool counts */
+ npalf->nr_pools = hw->pool_cnt;
+ aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
+ npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_detach_resources(struct mbox *mbox)
+{
+ struct rsrc_detach *detach;
+
+ otx2_mbox_lock(mbox);
+ detach = otx2_mbox_alloc_msg_detach_resources(mbox);
+ if (!detach) {
+ otx2_mbox_unlock(mbox);
+ return -ENOMEM;
+ }
+
+ /* detach all */
+ detach->partial = false;
+
+ /* Send detach request to AF */
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ otx2_mbox_unlock(mbox);
+ return 0;
+}
+
+int otx2_attach_npa_nix(struct otx2_nic *pfvf)
+{
+ struct rsrc_attach *attach;
+ struct msg_req *msix;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
+ if (!attach) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ attach->npalf = true;
+ attach->nixlf = true;
+
+ /* Send attach request to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+
+ /* If the platform has two NIX blocks then LF may be
+ * allocated from NIX1.
+ */
+ if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+
+ /* Get NPA and NIX MSIX vector offsets */
+ msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
+ if (!msix) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
+ pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
+ dev_err(pfvf->dev,
+ "RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
+{
+ struct hwctx_disable_req *req;
+
+ otx2_mbox_lock(mbox);
+ /* Request AQ to disable this context */
+ if (npa)
+ req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
+
+ if (!req) {
+ otx2_mbox_unlock(mbox);
+ return;
+ }
+
+ req->ctype = type;
+
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
+ __func__);
+
+ otx2_mbox_unlock(mbox);
+}
+
+/* Mbox message handlers */
+void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
+ struct cgx_stats_rsp *rsp)
+{
+ int id;
+
+ for (id = 0; id < CGX_RX_STATS_COUNT; id++)
+ pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
+ for (id = 0; id < CGX_TX_STATS_COUNT; id++)
+ pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
+}
+
+void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ int lvl, schq;
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ for (schq = 0; schq < rsp->schq[lvl]; schq++)
+ pf->hw.txschq_list[lvl][schq] =
+ rsp->schq_list[lvl][schq];
+}
+
+void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
+ pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
+}
+
+void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ pfvf->hw.sqb_size = rsp->sqb_size;
+ pfvf->hw.rx_chan_base = rsp->rx_chan_base;
+ pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
+ pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
+}
+
+void mbox_handler_msix_offset(struct otx2_nic *pfvf,
+ struct msix_offset_rsp *rsp)
+{
+ pfvf->hw.npa_msixoff = rsp->npa_msixoff;
+ pfvf->hw.nix_msixoff = rsp->nix_msixoff;
+}
+
+void otx2_free_cints(struct otx2_nic *pfvf, int n)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ int irq, qidx;
+
+ for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ qidx < n;
+ qidx++, irq++) {
+ int vector = pci_irq_vector(pfvf->pdev, irq);
+
+ irq_set_affinity_hint(vector, NULL);
+ free_cpumask_var(hw->affinity_mask[irq]);
+ free_irq(vector, &qset->napi[qidx]);
+ }
+}
+
+void otx2_set_cints_affinity(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int vec, cpu, irq, cint;
+
+ vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ cpu = cpumask_first(cpu_online_mask);
+
+ /* CQ interrupts */
+ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
+ if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
+ return;
+
+ cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
+
+ irq = pci_irq_vector(pfvf->pdev, vec);
+ irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
+
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ if (unlikely(cpu >= nr_cpu_ids))
+ cpu = 0;
+ }
+}
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+int __weak \
+otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
+ struct _req_type *req, \
+ struct _rsp_type *rsp) \
+{ \
+ /* Nothing to do here */ \
+ return 0; \
+} \
+EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
+MBOX_UP_CGX_MESSAGES
+#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
new file mode 100644
index 000000000000..320f3b7bf57f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -0,0 +1,615 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_COMMON_H
+#define OTX2_COMMON_H
+
+#include <linux/pci.h>
+#include <linux/iommu.h>
+
+#include <mbox.h>
+#include "otx2_reg.h"
+#include "otx2_txrx.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+
+#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+
+enum arua_mapped_qtypes {
+ AURA_NIX_RQ,
+ AURA_NIX_SQ,
+};
+
+/* NIX LF interrupts range*/
+#define NIX_LF_QINT_VEC_START 0x00
+#define NIX_LF_CINT_VEC_START 0x40
+#define NIX_LF_GINT_VEC 0x80
+#define NIX_LF_ERR_VEC 0x81
+#define NIX_LF_POISON_VEC 0x82
+
+/* RSS configuration */
+struct otx2_rss_info {
+ u8 enable;
+ u32 flowkey_cfg;
+ u16 rss_size;
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
+ u8 key[RSS_HASH_KEY_SIZE];
+};
+
+/* NIX (or NPC) RX errors */
+enum otx2_errlvl {
+ NPC_ERRLVL_RE,
+ NPC_ERRLVL_LID_LA,
+ NPC_ERRLVL_LID_LB,
+ NPC_ERRLVL_LID_LC,
+ NPC_ERRLVL_LID_LD,
+ NPC_ERRLVL_LID_LE,
+ NPC_ERRLVL_LID_LF,
+ NPC_ERRLVL_LID_LG,
+ NPC_ERRLVL_LID_LH,
+ NPC_ERRLVL_NIX = 0x0F,
+};
+
+enum otx2_errcodes_re {
+ /* NPC_ERRLVL_RE errcodes */
+ ERRCODE_FCS = 0x7,
+ ERRCODE_FCS_RCV = 0x8,
+ ERRCODE_UNDERSIZE = 0x10,
+ ERRCODE_OVERSIZE = 0x11,
+ ERRCODE_OL2_LEN_MISMATCH = 0x12,
+ /* NPC_ERRLVL_NIX errcodes */
+ ERRCODE_OL3_LEN = 0x10,
+ ERRCODE_OL4_LEN = 0x11,
+ ERRCODE_OL4_CSUM = 0x12,
+ ERRCODE_IL3_LEN = 0x20,
+ ERRCODE_IL4_LEN = 0x21,
+ ERRCODE_IL4_CSUM = 0x22,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+struct otx2_dev_stats {
+ u64 rx_bytes;
+ u64 rx_frames;
+ u64 rx_ucast_frames;
+ u64 rx_bcast_frames;
+ u64 rx_mcast_frames;
+ u64 rx_drops;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_ucast_frames;
+ u64 tx_bcast_frames;
+ u64 tx_mcast_frames;
+ u64 tx_drops;
+};
+
+/* Driver counted stats */
+struct otx2_drv_stats {
+ atomic_t rx_fcs_errs;
+ atomic_t rx_oversize_errs;
+ atomic_t rx_undersize_errs;
+ atomic_t rx_csum_errs;
+ atomic_t rx_len_errs;
+ atomic_t rx_other_errs;
+};
+
+struct mbox {
+ struct otx2_mbox mbox;
+ struct work_struct mbox_wrk;
+ struct otx2_mbox mbox_up;
+ struct work_struct mbox_up_wrk;
+ struct otx2_nic *pfvf;
+ void *bbuf_base; /* Bounce buffer for mbox memory */
+ struct mutex lock; /* serialize mailbox access */
+ int num_msgs; /* mbox number of messages */
+ int up_num_msgs; /* mbox_up number of messages */
+};
+
+struct otx2_hw {
+ struct pci_dev *pdev;
+ struct otx2_rss_info rss_info;
+ u16 rx_queues;
+ u16 tx_queues;
+ u16 max_queues;
+ u16 pool_cnt;
+ u16 rqpool_cnt;
+ u16 sqpool_cnt;
+
+ /* NPA */
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 sqb_size;
+
+ /* NIX */
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+
+ /* HW settings, coalescing etc */
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u16 cq_qcount_wait;
+ u16 cq_ecount_wait;
+ u16 rq_skid;
+ u8 cq_time_wait;
+
+ /* For TSO segmentation */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 hw_tso;
+
+ /* MSI-X */
+ u8 cint_cnt; /* CQ interrupt count */
+ u16 npa_msixoff; /* Offset of NPA vectors */
+ u16 nix_msixoff; /* Offset of NIX vectors */
+ char *irq_name;
+ cpumask_var_t *affinity_mask;
+
+ /* Stats */
+ struct otx2_dev_stats dev_stats;
+ struct otx2_drv_stats drv_stats;
+ u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
+ u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+};
+
+struct refill_work {
+ struct delayed_work pool_refill_work;
+ struct otx2_nic *pf;
+};
+
+struct otx2_nic {
+ void __iomem *reg_base;
+ struct net_device *netdev;
+ void *iommu_domain;
+ u16 max_frs;
+ u16 rbsize; /* Receive buffer size */
+
+#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+ u64 flags;
+
+ struct otx2_qset qset;
+ struct otx2_hw hw;
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* Mbox */
+ struct mbox mbox;
+ struct workqueue_struct *mbox_wq;
+
+ u16 pcifunc; /* RVU PF_FUNC */
+ struct cgx_link_user_info linfo;
+
+ u64 reset_count;
+ struct work_struct reset_task;
+ struct refill_work *refill_wrk;
+
+ /* Ethtool stuff */
+ u32 msg_enable;
+
+ /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
+ int nix_blkaddr;
+};
+
+static inline bool is_96xx_A0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x00) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
+static inline bool is_96xx_B0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x01) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
+static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+
+ pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
+
+ hw->hw_tso = true;
+
+ if (is_96xx_A0(pfvf->pdev)) {
+ hw->hw_tso = false;
+
+ /* Time based irq coalescing is not supported */
+ pfvf->hw.cq_qcount_wait = 0x0;
+
+ /* Due to HW issue previous silicons required minimum
+ * 600 unused CQE to avoid CQ overflow.
+ */
+ pfvf->hw.rq_skid = 600;
+ pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
+ }
+}
+
+/* Register read/write APIs */
+static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
+{
+ u64 blkaddr;
+
+ switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
+ case BLKTYPE_NIX:
+ blkaddr = nic->nix_blkaddr;
+ break;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ break;
+ default:
+ blkaddr = BLKADDR_RVUM;
+ break;
+ };
+
+ offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
+ offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
+
+ return nic->reg_base + offset;
+}
+
+static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
+{
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ writeq(val, addr);
+}
+
+static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
+{
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ return readq(addr);
+}
+
+/* Mbox bounce buffer APIs */
+static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
+{
+ struct otx2_mbox *otx2_mbox;
+ struct otx2_mbox_dev *mdev;
+
+ mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
+ if (!mbox->bbuf_base)
+ return -ENOMEM;
+
+ /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
+ * prepare all mbox messages in bounce buffer instead of directly
+ * in hw mbox memory.
+ */
+ otx2_mbox = &mbox->mbox;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = mbox->bbuf_base;
+
+ otx2_mbox = &mbox->mbox_up;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = mbox->bbuf_base;
+ return 0;
+}
+
+static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
+{
+ u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *hdr;
+ u64 msg_size;
+
+ if (mdev->mbase == hw_mbase)
+ return;
+
+ hdr = hw_mbase + mbox->rx_start;
+ msg_size = hdr->msg_size;
+
+ if (msg_size > mbox->rx_size - msgs_offset)
+ msg_size = mbox->rx_size - msgs_offset;
+
+ /* Copy mbox messages from mbox memory to bounce buffer */
+ memcpy(mdev->mbase + mbox->rx_start,
+ hw_mbase + mbox->rx_start, msg_size + msgs_offset);
+}
+
+static inline void otx2_mbox_lock_init(struct mbox *mbox)
+{
+ mutex_init(&mbox->lock);
+}
+
+static inline void otx2_mbox_lock(struct mbox *mbox)
+{
+ mutex_lock(&mbox->lock);
+}
+
+static inline void otx2_mbox_unlock(struct mbox *mbox)
+{
+ mutex_unlock(&mbox->lock);
+}
+
+/* With the absence of API for 128-bit IO memory access for arm64,
+ * implement required operations at place.
+ */
+#if defined(CONFIG_ARM64)
+static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
+{
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
+ ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
+}
+
+static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldadd %x[i], %x[r], [%[b]]"
+ : [r]"=r"(result), "+m"(*ptr)
+ : [i]"r"(incr), [b]"r"(ptr)
+ : "memory");
+ return result;
+}
+
+static inline u64 otx2_lmt_flush(uint64_t addr)
+{
+ u64 result = 0;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldeor xzr,%x[rf],[%[rs]]"
+ : [rf]"=r"(result)
+ : [rs]"r"(addr));
+ return result;
+}
+
+#else
+#define otx2_write128(lo, hi, addr)
+#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
+#define otx2_lmt_flush(addr) ({ 0; })
+#endif
+
+/* Alloc pointer from pool/aura */
+static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
+{
+ u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
+ NPA_LF_AURA_OP_ALLOCX(0));
+ u64 incr = (u64)aura | BIT_ULL(63);
+
+ return otx2_atomic64_add(incr, ptr);
+}
+
+/* Free pointer to a pool/aura */
+static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
+ int aura, s64 buf)
+{
+ otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
+ otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
+}
+
+/* Update page ref count */
+static inline void otx2_get_page(struct otx2_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ if (pool->pageref)
+ page_ref_add(pool->page, pool->pageref);
+ pool->pageref = 0;
+ pool->page = NULL;
+}
+
+static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
+{
+ if (type == AURA_NIX_SQ)
+ return pfvf->hw.rqpool_cnt + idx;
+
+ /* AURA_NIX_RQ */
+ return idx;
+}
+
+/* Mbox APIs */
+static inline int otx2_sync_mbox_msg(struct mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
+}
+
+static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox_up, devid);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
+}
+
+/* Use this API to send mbox msgs in atomic context
+ * where sleeping is not allowed
+ */
+static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
+}
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &mbox->mbox, 0, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_MESSAGES
+#undef M
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+int \
+otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
+ struct _req_type *req, \
+ struct _rsp_type *rsp); \
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+/* Time to wait before watchdog kicks off */
+#define OTX2_TX_TIMEOUT (100 * HZ)
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+static inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
+ struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t iova;
+
+ iova = dma_map_page_attrs(pfvf->dev, page,
+ offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova)))
+ return (dma_addr_t)NULL;
+ return iova;
+}
+
+static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_unmap_page_attrs(pfvf->dev, addr, size,
+ dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+/* MSI-X APIs */
+void otx2_free_cints(struct otx2_nic *pfvf, int n);
+void otx2_set_cints_affinity(struct otx2_nic *pfvf);
+int otx2_set_mac_address(struct net_device *netdev, void *p);
+int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
+void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
+void otx2_get_mac_from_af(struct net_device *netdev);
+void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+
+/* RVU block related APIs */
+int otx2_attach_npa_nix(struct otx2_nic *pfvf);
+int otx2_detach_resources(struct mbox *mbox);
+int otx2_config_npa(struct otx2_nic *pfvf);
+int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
+int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
+void otx2_aura_pool_free(struct otx2_nic *pfvf);
+void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
+void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
+int otx2_config_nix(struct otx2_nic *pfvf);
+int otx2_config_nix_queues(struct otx2_nic *pfvf);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
+int otx2_txsch_alloc(struct otx2_nic *pfvf);
+int otx2_txschq_stop(struct otx2_nic *pfvf);
+void otx2_sqb_flush(struct otx2_nic *pfvf);
+dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ gfp_t gfp);
+int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
+void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+
+/* RSS configuration APIs*/
+int otx2_rss_init(struct otx2_nic *pfvf);
+int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
+void otx2_set_rss_key(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf);
+
+/* Mbox handlers */
+void mbox_handler_msix_offset(struct otx2_nic *pfvf,
+ struct msix_offset_rsp *rsp);
+void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ struct npa_lf_alloc_rsp *rsp);
+void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
+ struct nix_lf_alloc_rsp *rsp);
+void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
+ struct nix_txsch_alloc_rsp *rsp);
+void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
+ struct cgx_stats_rsp *rsp);
+
+/* Device stats APIs */
+void otx2_get_dev_stats(struct otx2_nic *pfvf);
+void otx2_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
+int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
+void otx2_set_ethtool_ops(struct net_device *netdev);
+
+int otx2_open(struct net_device *netdev);
+int otx2_stop(struct net_device *netdev);
+int otx2_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
new file mode 100644
index 000000000000..60fcf82dd8cb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/stddef.h>
+#include <linux/etherdevice.h>
+#include <linux/log2.h>
+
+#include "otx2_common.h"
+
+#define DRV_NAME "octeontx2-nicpf"
+
+struct otx2_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+/* HW device stats */
+#define OTX2_DEV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
+}
+
+static const struct otx2_stat otx2_dev_stats[] = {
+ OTX2_DEV_STAT(rx_ucast_frames),
+ OTX2_DEV_STAT(rx_bcast_frames),
+ OTX2_DEV_STAT(rx_mcast_frames),
+
+ OTX2_DEV_STAT(tx_ucast_frames),
+ OTX2_DEV_STAT(tx_bcast_frames),
+ OTX2_DEV_STAT(tx_mcast_frames),
+};
+
+/* Driver level stats */
+#define OTX2_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
+}
+
+static const struct otx2_stat otx2_drv_stats[] = {
+ OTX2_DRV_STAT(rx_fcs_errs),
+ OTX2_DRV_STAT(rx_oversize_errs),
+ OTX2_DRV_STAT(rx_undersize_errs),
+ OTX2_DRV_STAT(rx_csum_errs),
+ OTX2_DRV_STAT(rx_len_errs),
+ OTX2_DRV_STAT(rx_other_errs),
+};
+
+static const struct otx2_stat otx2_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
+static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
+static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+
+static void otx2_dev_open(struct net_device *netdev)
+{
+ otx2_open(netdev);
+}
+
+static void otx2_dev_stop(struct net_device *netdev)
+{
+ otx2_stop(netdev);
+}
+
+static void otx2_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+}
+
+static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
+{
+ int start_qidx = qset * pfvf->hw.rx_queues;
+ int qidx, stats;
+
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ sprintf(*data, "txq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int stats;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < otx2_n_dev_stats; stats++) {
+ memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ otx2_get_qset_strings(pfvf, &data, 0);
+
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
+}
+
+static void otx2_get_qset_stats(struct otx2_nic *pfvf,
+ struct ethtool_stats *stats, u64 **data)
+{
+ int stat, qidx;
+
+ if (!pfvf)
+ return;
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ if (!otx2_update_rq_stats(pfvf, qidx)) {
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = 0;
+ continue;
+ }
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
+ [otx2_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ if (!otx2_update_sq_stats(pfvf, qidx)) {
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = 0;
+ continue;
+ }
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
+ [otx2_queue_stats[stat].index];
+ }
+}
+
+/* Get device and per queue statistics */
+static void otx2_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int stat;
+
+ otx2_get_dev_stats(pfvf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&pfvf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
+
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
+
+ otx2_get_qset_stats(pfvf, stats, &data);
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ *(data++) = pfvf->reset_count;
+}
+
+static int otx2_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int qstats_count;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = otx2_n_queue_stats *
+ (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
+ CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+}
+
+/* Get no of queues device supports and current queue count */
+static void otx2_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ channel->max_rx = pfvf->hw.max_queues;
+ channel->max_tx = pfvf->hw.max_queues;
+
+ channel->rx_count = pfvf->hw.rx_queues;
+ channel->tx_count = pfvf->hw.tx_queues;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int otx2_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ bool if_up = netif_running(dev);
+ int err = 0;
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+
+ if (if_up)
+ otx2_dev_stop(dev);
+
+ err = otx2_set_real_num_queues(dev, channel->tx_count,
+ channel->rx_count);
+ if (err)
+ goto fail;
+
+ pfvf->hw.rx_queues = channel->rx_count;
+ pfvf->hw.tx_queues = channel->tx_count;
+ pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
+
+fail:
+ if (if_up)
+ otx2_dev_open(dev);
+
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ pfvf->hw.tx_queues, pfvf->hw.rx_queues);
+
+ return err;
+}
+
+static void otx2_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_qset *qs = &pfvf->qset;
+
+ ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
+ ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
+ ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
+ ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+}
+
+static int otx2_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool if_up = netif_running(netdev);
+ struct otx2_qset *qs = &pfvf->qset;
+ u32 rx_count, tx_count;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
+ rx_count = ring->rx_pending;
+ /* On some silicon variants a skid or reserved CQEs are
+ * needed to avoid CQ overflow.
+ */
+ if (rx_count < pfvf->hw.rq_skid)
+ rx_count = pfvf->hw.rq_skid;
+ rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
+
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to be maintained to avoid CQ overflow, hence the
+ * minimum 4K size.
+ */
+ tx_count = clamp_t(u32, ring->tx_pending,
+ Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
+ tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
+
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
+ return 0;
+
+ if (if_up)
+ otx2_dev_stop(netdev);
+
+ /* Assigned to the nearest possible exponent. */
+ qs->sqe_cnt = tx_count;
+ qs->rqe_cnt = rx_count;
+
+ if (if_up)
+ otx2_dev_open(netdev);
+ return 0;
+}
+
+static int otx2_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
+
+ cmd->rx_coalesce_usecs = hw->cq_time_wait;
+ cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
+ cmd->tx_coalesce_usecs = hw->cq_time_wait;
+ cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
+
+ return 0;
+}
+
+static int otx2_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx;
+
+ if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
+ ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq ||
+ ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs || ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high || ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high || ec->rate_sample_interval)
+ return -EOPNOTSUPP;
+
+ if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
+ return 0;
+
+ /* 'cq_time_wait' is 8bit and is in multiple of 100ns,
+ * so clamp the user given value to the range of 1 to 25usec.
+ */
+ ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
+ 1, CQ_TIMER_THRESH_MAX);
+ ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
+ 1, CQ_TIMER_THRESH_MAX);
+
+ /* Rx and Tx are mapped to same CQ, check which one
+ * is changed, if both then choose the min.
+ */
+ if (hw->cq_time_wait == ec->rx_coalesce_usecs)
+ hw->cq_time_wait = ec->tx_coalesce_usecs;
+ else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
+ hw->cq_time_wait = ec->rx_coalesce_usecs;
+ else
+ hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
+ ec->tx_coalesce_usecs);
+
+ /* Max ecount_wait supported is 16bit,
+ * so clamp the user given value to the range of 1 to 64k.
+ */
+ ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
+ 1, U16_MAX);
+ ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
+ 1, U16_MAX);
+
+ /* Rx and Tx are mapped to same CQ, check which one
+ * is changed, if both then choose the min.
+ */
+ if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
+ else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
+ else
+ hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
+ ec->tx_max_coalesced_frames);
+
+ if (netif_running(netdev)) {
+ for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
+ otx2_config_irq_coalescing(pfvf, qidx);
+ }
+
+ return 0;
+}
+
+static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+
+ if (!(rss->flowkey_cfg &
+ (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
+ return 0;
+
+ /* Mimimum is IPv4 and IPv6, SIP/DIP */
+ nfc->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ u32 rss_cfg = rss->flowkey_cfg;
+
+ if (!rss->enable) {
+ netdev_err(pfvf->netdev,
+ "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ /* Mimimum is IPv4 and IPv6, SIP/DIP */
+ if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* Different config for v4 and v6 is not supported.
+ * Both of them have to be either 4-tuple or 2-tuple.
+ */
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rss->flowkey_cfg = rss_cfg;
+ otx2_set_flowkey_cfg(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rules)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ nfc->data = pfvf->hw.rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return otx2_get_rss_hash_opts(pfvf, nfc);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = otx2_set_rss_hash_opts(pfvf, nfc);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
+
+ return sizeof(rss->key);
+}
+
+static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ return pfvf->hw.rss_info.rss_size;
+}
+
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ int idx;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss->ind_tbl[idx];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ int idx;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (!rss->enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = indir[idx];
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+
+ otx2_set_rss_table(pfvf);
+ return 0;
+}
+
+static u32 otx2_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ return pfvf->msg_enable;
+}
+
+static void otx2_set_msglevel(struct net_device *netdev, u32 val)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ pfvf->msg_enable = val;
+}
+
+static u32 otx2_get_link(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ return pfvf->linfo.link_up;
+}
+
+static const struct ethtool_ops otx2_ethtool_ops = {
+ .get_link = otx2_get_link,
+ .get_drvinfo = otx2_get_drvinfo,
+ .get_strings = otx2_get_strings,
+ .get_ethtool_stats = otx2_get_ethtool_stats,
+ .get_sset_count = otx2_get_sset_count,
+ .set_channels = otx2_set_channels,
+ .get_channels = otx2_get_channels,
+ .get_ringparam = otx2_get_ringparam,
+ .set_ringparam = otx2_set_ringparam,
+ .get_coalesce = otx2_get_coalesce,
+ .set_coalesce = otx2_set_coalesce,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
+ .get_rxfh_key_size = otx2_get_rxfh_key_size,
+ .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
+ .get_rxfh = otx2_get_rxfh,
+ .set_rxfh = otx2_set_rxfh,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
+};
+
+void otx2_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
new file mode 100644
index 000000000000..85f9b9ba6bd5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -0,0 +1,1349 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/if_vlan.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_txrx.h"
+#include "otx2_struct.h"
+
+#define DRV_NAME "octeontx2-nicpf"
+#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
+#define DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id otx2_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+
+enum {
+ TYPE_PFAF,
+ TYPE_PFVF,
+};
+
+static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (if_up)
+ otx2_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2_open(netdev);
+
+ return err;
+}
+
+static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr, int type)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ if (type == TYPE_PFAF)
+ otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ /* The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+ if (hdr->num_msgs) {
+ mw[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ if (type == TYPE_PFAF)
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr),
+ sizeof(u64)));
+
+ queue_work(mbox_wq, &mw[i].mbox_wrk);
+ }
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ if (type == TYPE_PFAF)
+ otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs) {
+ mw[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ if (type == TYPE_PFAF)
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr),
+ sizeof(u64)));
+
+ queue_work(mbox_wq, &mw[i].mbox_up_wrk);
+ }
+ }
+}
+
+static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(pf->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(pf->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ pf->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_TXSCH_ALLOC:
+ mbox_handler_nix_txsch_alloc(pf,
+ (struct nix_txsch_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_CGX_STATS:
+ mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(pf->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+}
+
+static void otx2_pfaf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ struct otx2_nic *pf;
+ int offset, id;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ pf = af_mbox->pfvf;
+
+ for (id = 0; id < af_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2_process_pfaf_mbox_msg(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static void otx2_handle_link_event(struct otx2_nic *pf)
+{
+ struct cgx_link_user_info *linfo = &pf->linfo;
+ struct net_device *netdev = pf->netdev;
+
+ pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed,
+ linfo->full_duplex ? "Full" : "Half");
+ if (linfo->link_up) {
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+}
+
+int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
+ struct cgx_link_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ /* Copy the link info sent by AF */
+ pf->linfo = msg->link_info;
+
+ /* interface has not been fully configured yet */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
+ otx2_handle_link_event(pf);
+ return 0;
+}
+
+static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
+ struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+
+ switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &pf->mbox.mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = 0; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ pf, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+#undef M
+ break;
+ default:
+ otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+{
+ struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ struct otx2_mbox *mbox = &af_mbox->mbox_up;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct otx2_nic *pf = af_mbox->pfvf;
+ int offset, id, devid = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < af_mbox->up_num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ /* Skip processing VF's messages */
+ if (!devid)
+ otx2_process_mbox_msg_up(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ struct mbox *mbox;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+
+ mbox = &pf->mbox;
+ otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
+
+ return IRQ_HANDLED;
+}
+
+static void otx2_disable_mbox_intr(struct otx2_nic *pf)
+{
+ int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+
+ /* Disable AF => PF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ free_irq(vector, pf);
+}
+
+static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
+{
+ struct otx2_hw *hw = &pf->hw;
+ struct msg_req *req;
+ char *irq_name;
+ int err;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
+ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFAF mbox irq\n");
+ return err;
+ }
+
+ /* Enable mailbox interrupt for msgs coming from AF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+
+ if (!probe_af)
+ return 0;
+
+ /* Check mailbox communication with AF */
+ req = otx2_mbox_alloc_msg_ready(&pf->mbox);
+ if (!req) {
+ otx2_disable_mbox_intr(pf);
+ return -ENOMEM;
+ }
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ dev_warn(pf->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ otx2_disable_mbox_intr(pf);
+ return -EPROBE_DEFER;
+ }
+
+ return 0;
+}
+
+static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+
+ if (pf->mbox_wq) {
+ flush_workqueue(pf->mbox_wq);
+ destroy_workqueue(pf->mbox_wq);
+ pf->mbox_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap((void __iomem *)mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+ otx2_mbox_destroy(&mbox->mbox_up);
+}
+
+static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ void __iomem *hwbase;
+ int err;
+
+ mbox->pfvf = pf;
+ pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!pf->mbox_wq)
+ return -ENOMEM;
+
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e AF) and this PF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFAF, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFAF_UP, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_bbuf_init(mbox, pf->pdev);
+ if (err)
+ goto exit;
+
+ INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
+ otx2_mbox_lock_init(&pf->mbox);
+
+ return 0;
+exit:
+ otx2_pfaf_mbox_destroy(pf);
+ return err;
+}
+
+static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+ return err;
+}
+
+static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+ return err;
+}
+
+int otx2_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+
+static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+{
+ struct otx2_nic *pf = data;
+ u64 val, *ptr;
+ u64 qidx = 0;
+
+ /* CQ */
+ for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
+ ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
+ val = otx2_atomic64_add((qidx << 44), ptr);
+
+ otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
+ (val & NIX_CQERRINT_BITS));
+ if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
+ continue;
+
+ if (val & BIT_ULL(42)) {
+ netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ qidx);
+ if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+ netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ qidx);
+ }
+
+ schedule_work(&pf->reset_task);
+ }
+
+ /* SQ */
+ for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
+ val = otx2_atomic64_add((qidx << 44), ptr);
+ otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
+ (val & NIX_SQINT_BITS));
+
+ if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
+ continue;
+
+ if (val & BIT_ULL(42)) {
+ netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
+ qidx,
+ otx2_read64(pf,
+ NIX_LF_SQ_OP_ERR_DBG));
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
+ qidx,
+ otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
+ qidx,
+ otx2_read64(pf,
+ NIX_LF_SEND_ERR_DBG));
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+ netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
+ qidx);
+ }
+
+ schedule_work(&pf->reset_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
+{
+ struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
+ struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
+ int qidx = cq_poll->cint_idx;
+
+ /* Disable interrupts.
+ *
+ * Completion interrupts behave in a level-triggered interrupt
+ * fashion, and hence have to be cleared only after it is serviced.
+ */
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ /* Schedule NAPI */
+ napi_schedule_irqoff(&cq_poll->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void otx2_disable_napi(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_cq_poll *cq_poll;
+ int qidx;
+
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ }
+}
+
+static void otx2_free_cq_res(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_cq_queue *cq;
+ int qidx;
+
+ /* Disable CQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
+ for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
+ cq = &qset->cq[qidx];
+ qmem_free(pf->dev, cq->cqe);
+ }
+}
+
+static void otx2_free_sq_res(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_snd_queue *sq;
+ int qidx;
+
+ /* Disable SQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
+ /* Free SQB pointers */
+ otx2_sq_free_sqbs(pf);
+ for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ sq = &qset->sq[qidx];
+ qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->tso_hdrs);
+ kfree(sq->sg);
+ kfree(sq->sqb_ptrs);
+ }
+}
+
+static int otx2_init_hw_resources(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ struct otx2_hw *hw = &pf->hw;
+ struct msg_req *req;
+ int err = 0, lvl;
+
+ /* Set required NPA LF's pool counts
+ * Auras and Pools are used in a 1:1 mapping,
+ * so, aura count = pool count.
+ */
+ hw->rqpool_cnt = hw->rx_queues;
+ hw->sqpool_cnt = hw->tx_queues;
+ hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
+
+ /* Get the size of receive buffers to allocate */
+ pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
+
+ otx2_mbox_lock(mbox);
+ /* NPA init */
+ err = otx2_config_npa(pf);
+ if (err)
+ goto exit;
+
+ /* NIX init */
+ err = otx2_config_nix(pf);
+ if (err)
+ goto err_free_npa_lf;
+
+ /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
+ err = otx2_rq_aura_pool_init(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_nix_lf;
+ }
+ /* Init Auras and pools used by NIX SQ, for queueing SQEs */
+ err = otx2_sq_aura_pool_init(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_rq_ptrs;
+ }
+
+ err = otx2_txsch_alloc(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_sq_ptrs;
+ }
+
+ err = otx2_config_nix_queues(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_txsch;
+ }
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pf, lvl);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_nix_queues;
+ }
+ }
+ otx2_mbox_unlock(mbox);
+ return err;
+
+err_free_nix_queues:
+ otx2_free_sq_res(pf);
+ otx2_free_cq_res(pf);
+ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+err_free_txsch:
+ if (otx2_txschq_stop(pf))
+ dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
+err_free_sq_ptrs:
+ otx2_sq_free_sqbs(pf);
+err_free_rq_ptrs:
+ otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
+ otx2_aura_pool_free(pf);
+err_free_nix_lf:
+ otx2_mbox_lock(mbox);
+ req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
+ }
+err_free_npa_lf:
+ /* Reset NPA LF */
+ req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
+exit:
+ otx2_mbox_unlock(mbox);
+ return err;
+}
+
+static void otx2_free_hw_resources(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct mbox *mbox = &pf->mbox;
+ struct otx2_cq_queue *cq;
+ struct msg_req *req;
+ int qidx, err;
+
+ /* Ensure all SQE are processed */
+ otx2_sqb_flush(pf);
+
+ /* Stop transmission */
+ err = otx2_txschq_stop(pf);
+ if (err)
+ dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+
+ /* Disable RQs */
+ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+
+ /*Dequeue all CQEs */
+ for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
+ cq = &qset->cq[qidx];
+ if (cq->cq_type == CQ_RX)
+ otx2_cleanup_rx_cqes(pf, cq);
+ else
+ otx2_cleanup_tx_cqes(pf, cq);
+ }
+
+ otx2_free_sq_res(pf);
+
+ /* Free RQ buffer pointers*/
+ otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+
+ otx2_free_cq_res(pf);
+
+ otx2_mbox_lock(mbox);
+ /* Reset NIX LF */
+ req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
+ }
+ otx2_mbox_unlock(mbox);
+
+ /* Disable NPA Pool and Aura hw context */
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
+ otx2_aura_pool_free(pf);
+
+ otx2_mbox_lock(mbox);
+ /* Reset NPA LF */
+ req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
+ otx2_mbox_unlock(mbox);
+}
+
+int otx2_open(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ netif_carrier_off(netdev);
+
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ /* RQ and SQs are mapped to different CQs,
+ * so find out max CQ IRQs (i.e CINTs) needed.
+ */
+ pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
+ qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ if (!qset->napi)
+ return -ENOMEM;
+
+ /* CQ size of RQ */
+ qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
+ /* CQ size of SQ */
+ qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+
+ err = -ENOMEM;
+ qset->cq = kcalloc(pf->qset.cq_cnt,
+ sizeof(struct otx2_cq_queue), GFP_KERNEL);
+ if (!qset->cq)
+ goto err_free_mem;
+
+ qset->sq = kcalloc(pf->hw.tx_queues,
+ sizeof(struct otx2_snd_queue), GFP_KERNEL);
+ if (!qset->sq)
+ goto err_free_mem;
+
+ qset->rq = kcalloc(pf->hw.rx_queues,
+ sizeof(struct otx2_rcv_queue), GFP_KERNEL);
+ if (!qset->rq)
+ goto err_free_mem;
+
+ err = otx2_init_hw_resources(pf);
+ if (err)
+ goto err_free_mem;
+
+ /* Register NAPI handler */
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ cq_poll->cint_idx = qidx;
+ /* RQ0 & SQ0 are mapped to CINT0 and so on..
+ * 'cq_ids[0]' points to RQ's CQ and
+ * 'cq_ids[1]' points to SQ's CQ and
+ */
+ cq_poll->cq_ids[CQ_RX] =
+ (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
+ qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ cq_poll->dev = (void *)pf;
+ netif_napi_add(netdev, &cq_poll->napi,
+ otx2_napi_handler, NAPI_POLL_WEIGHT);
+ napi_enable(&cq_poll->napi);
+ }
+
+ /* Set maximum frame size allowed in HW */
+ err = otx2_hw_set_mtu(pf, netdev->mtu);
+ if (err)
+ goto err_disable_napi;
+
+ /* Initialize RSS */
+ err = otx2_rss_init(pf);
+ if (err)
+ goto err_disable_napi;
+
+ /* Register Queue IRQ handlers */
+ vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
+ irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
+
+ err = request_irq(pci_irq_vector(pf->pdev, vec),
+ otx2_q_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for QERR\n",
+ rvu_get_pf(pf->pcifunc));
+ goto err_disable_napi;
+ }
+
+ /* Enable QINT IRQ */
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
+
+ /* Register CQ IRQ handlers */
+ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
+ qidx);
+
+ err = request_irq(pci_irq_vector(pf->pdev, vec),
+ otx2_cq_intr_handler, 0, irq_name,
+ &qset->napi[qidx]);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for CQ%d\n",
+ rvu_get_pf(pf->pcifunc), qidx);
+ goto err_free_cints;
+ }
+ vec++;
+
+ otx2_config_irq_coalescing(pf, qidx);
+
+ /* Enable CQ IRQ */
+ otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
+ }
+
+ otx2_set_cints_affinity(pf);
+
+ pf->flags &= ~OTX2_FLAG_INTF_DOWN;
+ /* 'intf_down' may be checked on any cpu */
+ smp_wmb();
+
+ /* we have already received link status notification */
+ if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_handle_link_event(pf);
+
+ err = otx2_rxtx_enable(pf, true);
+ if (err)
+ goto err_free_cints;
+
+ return 0;
+
+err_free_cints:
+ otx2_free_cints(pf, qidx);
+ vec = pci_irq_vector(pf->pdev,
+ pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+ synchronize_irq(vec);
+ free_irq(vec, pf);
+err_disable_napi:
+ otx2_disable_napi(pf);
+ otx2_free_hw_resources(pf);
+err_free_mem:
+ kfree(qset->sq);
+ kfree(qset->cq);
+ kfree(qset->rq);
+ kfree(qset->napi);
+ return err;
+}
+
+int otx2_stop(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int qidx, vec, wrk;
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+ /* 'intf_down' may be checked on any cpu */
+ smp_wmb();
+
+ /* First stop packet Rx/Tx */
+ otx2_rxtx_enable(pf, false);
+
+ /* Cleanup Queue IRQ */
+ vec = pci_irq_vector(pf->pdev,
+ pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+ synchronize_irq(vec);
+ free_irq(vec, pf);
+
+ /* Cleanup CQ NAPI and IRQ */
+ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ /* Disable interrupt */
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ synchronize_irq(pci_irq_vector(pf->pdev, vec));
+
+ cq_poll = &qset->napi[qidx];
+ napi_synchronize(&cq_poll->napi);
+ vec++;
+ }
+
+ netif_tx_disable(netdev);
+
+ otx2_free_hw_resources(pf);
+ otx2_free_cints(pf, pf->hw.cint_cnt);
+ otx2_disable_napi(pf);
+
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
+ for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
+ cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
+ devm_kfree(pf->dev, pf->refill_wrk);
+
+ kfree(qset->sq);
+ kfree(qset->cq);
+ kfree(qset->rq);
+ kfree(qset->napi);
+ /* Do not clear RQ/SQ ringsize settings */
+ memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
+ sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
+ return 0;
+}
+
+static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sq = &pf->qset.sq[qidx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, incase SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void otx2_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct nix_rx_mode *req;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ otx2_mbox_lock(&pf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pf->mbox);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ /* We don't support MAC address filtering yet */
+ if (netdev->flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+}
+
+static int otx2_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+ return otx2_cgx_config_loopback(pf,
+ features & NETIF_F_LOOPBACK);
+ return 0;
+}
+
+static void otx2_reset_task(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
+
+ if (!netif_running(pf->netdev))
+ return;
+
+ otx2_stop(pf->netdev);
+ pf->reset_count++;
+ otx2_open(pf->netdev);
+ netif_trans_update(pf->netdev);
+}
+
+static const struct net_device_ops otx2_netdev_ops = {
+ .ndo_open = otx2_open,
+ .ndo_stop = otx2_stop,
+ .ndo_start_xmit = otx2_xmit,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2_change_mtu,
+ .ndo_set_rx_mode = otx2_set_rx_mode,
+ .ndo_set_features = otx2_set_features,
+ .ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_get_stats64 = otx2_get_stats64,
+};
+
+static int otx2_check_pf_usable(struct otx2_nic *nic)
+{
+ u64 rev;
+
+ rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(nic->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
+{
+ struct otx2_hw *hw = &pf->hw;
+ int num_vec, err;
+
+ /* NPA interrupts are inot registered, so alloc only
+ * upto NIX vector offset.
+ */
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2_disable_mbox_intr(pf);
+ pci_free_irq_vectors(hw->pdev);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ return otx2_register_mbox_intr(pf, false);
+}
+
+static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct otx2_nic *pf;
+ struct otx2_hw *hw;
+ int err, qcount;
+ int num_vec;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ /* Set number of queues */
+ qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
+
+ netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pf = netdev_priv(netdev);
+ pf->netdev = netdev;
+ pf->pdev = pdev;
+ pf->dev = dev;
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+
+ hw = &pf->hw;
+ hw->pdev = pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->max_queues = qcount;
+
+ num_vec = pci_msix_vec_count(pdev);
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name)
+ goto err_free_netdev;
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask)
+ goto err_free_netdev;
+
+ /* Map CSRs */
+ pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!pf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = otx2_check_pf_usable(pf);
+ if (err)
+ goto err_free_netdev;
+
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ goto err_free_netdev;
+ }
+
+ /* Init PF <=> AF mailbox stuff */
+ err = otx2_pfaf_mbox_init(pf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2_register_mbox_intr(pf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and NIX LFs to this PF.
+ * NIX and NPA LFs are needed for this PF to function as a NIC.
+ */
+ err = otx2_attach_npa_nix(pf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2_realloc_msix_vectors(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
+ if (err)
+ goto err_detach_rsrc;
+
+ otx2_setup_dev_hw_settings(pf);
+
+ /* Assign default mac address */
+ otx2_get_mac_from_af(netdev);
+
+ /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
+ * HW allocates buffer pointer from stack and uses it for DMA'ing
+ * ingress packet. In some scenarios HW can free back allocated buffer
+ * pointers to pool. This makes it impossible for SW to maintain a
+ * parallel list where physical addresses of buffer pointers (IOVAs)
+ * given to HW can be saved for later reference.
+ *
+ * So the only way to convert Rx packet's buffer address is to use
+ * IOMMU's iova_to_phys() handler which translates the address by
+ * walking through the translation tables.
+ */
+ pf->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
+
+ netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+
+ netdev->netdev_ops = &otx2_netdev_ops;
+
+ /* MTU range: 64 - 9190 */
+ netdev->min_mtu = OTX2_MIN_MTU;
+ netdev->max_mtu = OTX2_MAX_MTU;
+
+ INIT_WORK(&pf->reset_task, otx2_reset_task);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_detach_rsrc;
+ }
+
+ otx2_set_ethtool_ops(netdev);
+
+ /* Enable link notifications */
+ otx2_cgx_config_linkevents(pf, true);
+
+ return 0;
+
+err_detach_rsrc:
+ otx2_detach_resources(&pf->mbox);
+err_disable_mbox_intr:
+ otx2_disable_mbox_intr(pf);
+err_mbox_destroy:
+ otx2_pfaf_mbox_destroy(pf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void otx2_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf;
+
+ if (!netdev)
+ return;
+
+ pf = netdev_priv(netdev);
+
+ /* Disable link notifications */
+ otx2_cgx_config_linkevents(pf, false);
+
+ unregister_netdev(netdev);
+ otx2_detach_resources(&pf->mbox);
+ otx2_disable_mbox_intr(pf);
+ otx2_pfaf_mbox_destroy(pf);
+ pci_free_irq_vectors(pf->pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver otx2_pf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_pf_id_table,
+ .probe = otx2_probe,
+ .shutdown = otx2_remove,
+ .remove = otx2_remove,
+};
+
+static int __init otx2_rvupf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ return pci_register_driver(&otx2_pf_driver);
+}
+
+static void __exit otx2_rvupf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2_pf_driver);
+}
+
+module_init(otx2_rvupf_init_module);
+module_exit(otx2_rvupf_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
new file mode 100644
index 000000000000..7963d418886a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_REG_H
+#define OTX2_REG_H
+
+#include <rvu_struct.h>
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
+#define RVU_FUNC_BLKADDR_SHIFT 20
+#define RVU_FUNC_BLKADDR_MASK 0x1FULL
+
+/* NPA LF registers */
+#define NPA_LFBASE (BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT)
+#define NPA_LF_AURA_OP_ALLOCX(a) (NPA_LFBASE | 0x10 | (a) << 3)
+#define NPA_LF_AURA_OP_FREE0 (NPA_LFBASE | 0x20)
+#define NPA_LF_AURA_OP_FREE1 (NPA_LFBASE | 0x28)
+#define NPA_LF_AURA_OP_CNT (NPA_LFBASE | 0x30)
+#define NPA_LF_AURA_OP_LIMIT (NPA_LFBASE | 0x50)
+#define NPA_LF_AURA_OP_INT (NPA_LFBASE | 0x60)
+#define NPA_LF_AURA_OP_THRESH (NPA_LFBASE | 0x70)
+#define NPA_LF_POOL_OP_PC (NPA_LFBASE | 0x100)
+#define NPA_LF_POOL_OP_AVAILABLE (NPA_LFBASE | 0x110)
+#define NPA_LF_POOL_OP_PTR_START0 (NPA_LFBASE | 0x120)
+#define NPA_LF_POOL_OP_PTR_START1 (NPA_LFBASE | 0x128)
+#define NPA_LF_POOL_OP_PTR_END0 (NPA_LFBASE | 0x130)
+#define NPA_LF_POOL_OP_PTR_END1 (NPA_LFBASE | 0x138)
+#define NPA_LF_POOL_OP_INT (NPA_LFBASE | 0x160)
+#define NPA_LF_POOL_OP_THRESH (NPA_LFBASE | 0x170)
+#define NPA_LF_ERR_INT (NPA_LFBASE | 0x200)
+#define NPA_LF_ERR_INT_W1S (NPA_LFBASE | 0x208)
+#define NPA_LF_ERR_INT_ENA_W1C (NPA_LFBASE | 0x210)
+#define NPA_LF_ERR_INT_ENA_W1S (NPA_LFBASE | 0x218)
+#define NPA_LF_RAS (NPA_LFBASE | 0x220)
+#define NPA_LF_RAS_W1S (NPA_LFBASE | 0x228)
+#define NPA_LF_RAS_ENA_W1C (NPA_LFBASE | 0x230)
+#define NPA_LF_RAS_ENA_W1S (NPA_LFBASE | 0x238)
+#define NPA_LF_QINTX_CNT(a) (NPA_LFBASE | 0x300 | (a) << 12)
+#define NPA_LF_QINTX_INT(a) (NPA_LFBASE | 0x310 | (a) << 12)
+#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12)
+#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12)
+#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
+
+/* NIX LF registers */
+#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
+#define NIX_LF_RX_SECRETX(a) (NIX_LFBASE | 0x0 | (a) << 3)
+#define NIX_LF_CFG (NIX_LFBASE | 0x100)
+#define NIX_LF_GINT (NIX_LFBASE | 0x200)
+#define NIX_LF_GINT_W1S (NIX_LFBASE | 0x208)
+#define NIX_LF_GINT_ENA_W1C (NIX_LFBASE | 0x210)
+#define NIX_LF_GINT_ENA_W1S (NIX_LFBASE | 0x218)
+#define NIX_LF_ERR_INT (NIX_LFBASE | 0x220)
+#define NIX_LF_ERR_INT_W1S (NIX_LFBASE | 0x228)
+#define NIX_LF_ERR_INT_ENA_W1C (NIX_LFBASE | 0x230)
+#define NIX_LF_ERR_INT_ENA_W1S (NIX_LFBASE | 0x238)
+#define NIX_LF_RAS (NIX_LFBASE | 0x240)
+#define NIX_LF_RAS_W1S (NIX_LFBASE | 0x248)
+#define NIX_LF_RAS_ENA_W1C (NIX_LFBASE | 0x250)
+#define NIX_LF_RAS_ENA_W1S (NIX_LFBASE | 0x258)
+#define NIX_LF_SQ_OP_ERR_DBG (NIX_LFBASE | 0x260)
+#define NIX_LF_MNQ_ERR_DBG (NIX_LFBASE | 0x270)
+#define NIX_LF_SEND_ERR_DBG (NIX_LFBASE | 0x280)
+#define NIX_LF_TX_STATX(a) (NIX_LFBASE | 0x300 | (a) << 3)
+#define NIX_LF_RX_STATX(a) (NIX_LFBASE | 0x400 | (a) << 3)
+#define NIX_LF_OP_SENDX(a) (NIX_LFBASE | 0x800 | (a) << 3)
+#define NIX_LF_RQ_OP_INT (NIX_LFBASE | 0x900)
+#define NIX_LF_RQ_OP_OCTS (NIX_LFBASE | 0x910)
+#define NIX_LF_RQ_OP_PKTS (NIX_LFBASE | 0x920)
+#define NIX_LF_OP_IPSEC_DYNO_CN (NIX_LFBASE | 0x980)
+#define NIX_LF_SQ_OP_INT (NIX_LFBASE | 0xa00)
+#define NIX_LF_SQ_OP_OCTS (NIX_LFBASE | 0xa10)
+#define NIX_LF_SQ_OP_PKTS (NIX_LFBASE | 0xa20)
+#define NIX_LF_SQ_OP_STATUS (NIX_LFBASE | 0xa30)
+#define NIX_LF_CQ_OP_INT (NIX_LFBASE | 0xb00)
+#define NIX_LF_CQ_OP_DOOR (NIX_LFBASE | 0xb30)
+#define NIX_LF_CQ_OP_STATUS (NIX_LFBASE | 0xb40)
+#define NIX_LF_QINTX_CNT(a) (NIX_LFBASE | 0xC00 | (a) << 12)
+#define NIX_LF_QINTX_INT(a) (NIX_LFBASE | 0xC10 | (a) << 12)
+#define NIX_LF_QINTX_INT_W1S(a) (NIX_LFBASE | 0xC18 | (a) << 12)
+#define NIX_LF_QINTX_ENA_W1S(a) (NIX_LFBASE | 0xC20 | (a) << 12)
+#define NIX_LF_QINTX_ENA_W1C(a) (NIX_LFBASE | 0xC30 | (a) << 12)
+#define NIX_LF_CINTX_CNT(a) (NIX_LFBASE | 0xD00 | (a) << 12)
+#define NIX_LF_CINTX_WAIT(a) (NIX_LFBASE | 0xD10 | (a) << 12)
+#define NIX_LF_CINTX_INT(a) (NIX_LFBASE | 0xD20 | (a) << 12)
+#define NIX_LF_CINTX_INT_W1S(a) (NIX_LFBASE | 0xD30 | (a) << 12)
+#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12)
+#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
+
+/* NIX AF transmit scheduler registers */
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+
+/* LMT LF registers */
+#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
+#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
+#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+
+#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
new file mode 100644
index 000000000000..cba59ddf71bb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_STRUCT_H
+#define OTX2_STRUCT_H
+
+/* NIX WQE/CQE size 128 byte or 512 byte */
+enum nix_cqesz_e {
+ NIX_XQESZ_W64 = 0x0,
+ NIX_XQESZ_W16 = 0x1,
+};
+
+enum nix_sqes_e {
+ NIX_SQESZ_W16 = 0x0,
+ NIX_SQESZ_W8 = 0x1,
+};
+
+enum nix_send_ldtype {
+ NIX_SEND_LDTYPE_LDD = 0x0,
+ NIX_SEND_LDTYPE_LDT = 0x1,
+ NIX_SEND_LDTYPE_LDWB = 0x2,
+};
+
+/* CSUM offload */
+enum nix_sendl3type {
+ NIX_SENDL3TYPE_NONE = 0x0,
+ NIX_SENDL3TYPE_IP4 = 0x2,
+ NIX_SENDL3TYPE_IP4_CKSUM = 0x3,
+ NIX_SENDL3TYPE_IP6 = 0x4,
+};
+
+enum nix_sendl4type {
+ NIX_SENDL4TYPE_NONE,
+ NIX_SENDL4TYPE_TCP_CKSUM,
+ NIX_SENDL4TYPE_SCTP_CKSUM,
+ NIX_SENDL4TYPE_UDP_CKSUM,
+};
+
+/* NIX wqe/cqe types */
+enum nix_xqe_type {
+ NIX_XQE_TYPE_INVALID = 0x0,
+ NIX_XQE_TYPE_RX = 0x1,
+ NIX_XQE_TYPE_RX_IPSECS = 0x2,
+ NIX_XQE_TYPE_RX_IPSECH = 0x3,
+ NIX_XQE_TYPE_RX_IPSECD = 0x4,
+ NIX_XQE_TYPE_SEND = 0x8,
+};
+
+/* NIX CQE/SQE subdescriptor types */
+enum nix_subdc {
+ NIX_SUBDC_NOP = 0x0,
+ NIX_SUBDC_EXT = 0x1,
+ NIX_SUBDC_CRC = 0x2,
+ NIX_SUBDC_IMM = 0x3,
+ NIX_SUBDC_SG = 0x4,
+ NIX_SUBDC_MEM = 0x5,
+ NIX_SUBDC_JUMP = 0x6,
+ NIX_SUBDC_WORK = 0x7,
+ NIX_SUBDC_SOD = 0xf,
+};
+
+/* Algorithm for nix_sqe_mem_s header (value of the `alg` field) */
+enum nix_sendmemalg {
+ NIX_SENDMEMALG_E_SET = 0x0,
+ NIX_SENDMEMALG_E_SETTSTMP = 0x1,
+ NIX_SENDMEMALG_E_SETRSLT = 0x2,
+ NIX_SENDMEMALG_E_ADD = 0x8,
+ NIX_SENDMEMALG_E_SUB = 0x9,
+ NIX_SENDMEMALG_E_ADDLEN = 0xa,
+ NIX_SENDMEMALG_E_SUBLEN = 0xb,
+ NIX_SENDMEMALG_E_ADDMBUF = 0xc,
+ NIX_SENDMEMALG_E_SUBMBUF = 0xd,
+ NIX_SENDMEMALG_E_ENUM_LAST = 0xe,
+};
+
+/* NIX CQE header structure */
+struct nix_cqe_hdr_s {
+ u64 flow_tag : 32;
+ u64 q : 20;
+ u64 reserved_52_57 : 6;
+ u64 node : 2;
+ u64 cqe_type : 4;
+};
+
+/* NIX CQE RX parse structure */
+struct nix_rx_parse_s {
+ u64 chan : 12;
+ u64 desc_sizem1 : 5;
+ u64 rsvd_17 : 1;
+ u64 express : 1;
+ u64 wqwd : 1;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 latype : 4;
+ u64 lbtype : 4;
+ u64 lctype : 4;
+ u64 ldtype : 4;
+ u64 letype : 4;
+ u64 lftype : 4;
+ u64 lgtype : 4;
+ u64 lhtype : 4;
+ u64 pkt_lenm1 : 16; /* W1 */
+ u64 l2m : 1;
+ u64 l2b : 1;
+ u64 l3m : 1;
+ u64 l3b : 1;
+ u64 vtag0_valid : 1;
+ u64 vtag0_gone : 1;
+ u64 vtag1_valid : 1;
+ u64 vtag1_gone : 1;
+ u64 pkind : 6;
+ u64 rsvd_95_94 : 2;
+ u64 vtag0_tci : 16;
+ u64 vtag1_tci : 16;
+ u64 laflags : 8; /* W2 */
+ u64 lbflags : 8;
+ u64 lcflags : 8;
+ u64 ldflags : 8;
+ u64 leflags : 8;
+ u64 lfflags : 8;
+ u64 lgflags : 8;
+ u64 lhflags : 8;
+ u64 eoh_ptr : 8; /* W3 */
+ u64 wqe_aura : 20;
+ u64 pb_aura : 20;
+ u64 match_id : 16;
+ u64 laptr : 8; /* W4 */
+ u64 lbptr : 8;
+ u64 lcptr : 8;
+ u64 ldptr : 8;
+ u64 leptr : 8;
+ u64 lfptr : 8;
+ u64 lgptr : 8;
+ u64 lhptr : 8;
+ u64 vtag0_ptr : 8; /* W5 */
+ u64 vtag1_ptr : 8;
+ u64 flow_key_alg : 5;
+ u64 rsvd_383_341 : 43;
+ u64 rsvd_447_384; /* W6 */
+};
+
+/* NIX CQE RX scatter/gather subdescriptor structure */
+struct nix_rx_sg_s {
+ u64 seg_size : 16; /* W0 */
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_59_50 : 10;
+ u64 subdc : 4;
+ u64 seg_addr;
+ u64 seg2_addr;
+ u64 seg3_addr;
+};
+
+struct nix_send_comp_s {
+ u64 status : 8;
+ u64 sqe_id : 16;
+ u64 rsvd_24_63 : 40;
+};
+
+struct nix_cqe_rx_s {
+ struct nix_cqe_hdr_s hdr;
+ struct nix_rx_parse_s parse;
+ struct nix_rx_sg_s sg;
+};
+
+struct nix_cqe_tx_s {
+ struct nix_cqe_hdr_s hdr;
+ struct nix_send_comp_s comp;
+};
+
+/* NIX SQE header structure */
+struct nix_sqe_hdr_s {
+ u64 total : 18; /* W0 */
+ u64 reserved_18 : 1;
+ u64 df : 1;
+ u64 aura : 20;
+ u64 sizem1 : 3;
+ u64 pnc : 1;
+ u64 sq : 20;
+ u64 ol3ptr : 8; /* W1 */
+ u64 ol4ptr : 8;
+ u64 il3ptr : 8;
+ u64 il4ptr : 8;
+ u64 ol3type : 4;
+ u64 ol4type : 4;
+ u64 il3type : 4;
+ u64 il4type : 4;
+ u64 sqe_id : 16;
+
+};
+
+/* NIX send extended header subdescriptor structure */
+struct nix_sqe_ext_s {
+ u64 lso_mps : 14; /* W0 */
+ u64 lso : 1;
+ u64 tstmp : 1;
+ u64 lso_sb : 8;
+ u64 lso_format : 5;
+ u64 rsvd_31_29 : 3;
+ u64 shp_chg : 9;
+ u64 shp_dis : 1;
+ u64 shp_ra : 2;
+ u64 markptr : 8;
+ u64 markform : 7;
+ u64 mark_en : 1;
+ u64 subdc : 4;
+ u64 vlan0_ins_ptr : 8; /* W1 */
+ u64 vlan0_ins_tci : 16;
+ u64 vlan1_ins_ptr : 8;
+ u64 vlan1_ins_tci : 16;
+ u64 vlan0_ins_ena : 1;
+ u64 vlan1_ins_ena : 1;
+ u64 rsvd_127_114 : 14;
+};
+
+struct nix_sqe_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_54_50 : 5;
+ u64 i1 : 1;
+ u64 i2 : 1;
+ u64 i3 : 1;
+ u64 ld_type : 2;
+ u64 subdc : 4;
+};
+
+/* NIX send memory subdescriptor structure */
+struct nix_sqe_mem_s {
+ u64 offset : 16; /* W0 */
+ u64 rsvd_52_16 : 37;
+ u64 wmem : 1;
+ u64 dsz : 2;
+ u64 alg : 4;
+ u64 subdc : 4;
+ u64 addr; /* W1 */
+};
+
+enum nix_cqerrint_e {
+ NIX_CQERRINT_DOOR_ERR = 0,
+ NIX_CQERRINT_WR_FULL = 1,
+ NIX_CQERRINT_CQE_FAULT = 2,
+};
+
+#define NIX_CQERRINT_BITS (BIT_ULL(NIX_CQERRINT_DOOR_ERR) | \
+ BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+
+enum nix_rqint_e {
+ NIX_RQINT_DROP = 0,
+ NIX_RQINT_RED = 1,
+};
+
+#define NIX_RQINT_BITS (BIT_ULL(NIX_RQINT_DROP) | BIT_ULL(NIX_RQINT_RED))
+
+enum nix_sqint_e {
+ NIX_SQINT_LMT_ERR = 0,
+ NIX_SQINT_MNQ_ERR = 1,
+ NIX_SQINT_SEND_ERR = 2,
+ NIX_SQINT_SQB_ALLOC_FAIL = 3,
+};
+
+#define NIX_SQINT_BITS (BIT_ULL(NIX_SQINT_LMT_ERR) | \
+ BIT_ULL(NIX_SQINT_MNQ_ERR) | \
+ BIT_ULL(NIX_SQINT_SEND_ERR) | \
+ BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+
+#endif /* OTX2_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
new file mode 100644
index 000000000000..bef4c20fe314
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "otx2_txrx.h"
+
+#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+
+static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
+{
+ struct nix_cqe_hdr_s *cqe_hdr;
+
+ cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
+ return NULL;
+
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
+
+ return cqe_hdr;
+}
+
+static unsigned int frag_num(unsigned int i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
+}
+
+static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ int seg;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], DMA_TO_DEVICE);
+ }
+ sg->num_segs = 0;
+}
+
+static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe,
+ int budget, int *tx_pkts, int *tx_bytes)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sk_buff *skb = NULL;
+ struct sg_list *sg;
+
+ if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
+ net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
+ pfvf->netdev->name, cq->cint_idx,
+ snd_comp->status);
+
+ sg = &sq->sg[snd_comp->sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(!skb))
+ return;
+
+ *tx_bytes += skb->len;
+ (*tx_pkts)++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ napi_consume_skb(skb, budget);
+ sg->skb = (u64)NULL;
+}
+
+static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len)
+{
+ struct page *page;
+ void *va;
+
+ va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+ page = virt_to_page(va);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page), len, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+}
+
+static void otx2_set_rxhash(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
+{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
+ struct otx2_rss_info *rss;
+ u32 hash = 0;
+
+ if (!(pfvf->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss = &pfvf->hw.rss_info;
+ if (rss->flowkey_cfg) {
+ if (rss->flowkey_cfg &
+ ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
+ hash_type = PKT_HASH_TYPE_L4;
+ else
+ hash_type = PKT_HASH_TYPE_L3;
+ hash = cqe->hdr.flow_tag;
+ }
+ skb_set_hash(skb, hash, hash_type);
+}
+
+static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, int qidx)
+{
+ struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
+ struct nix_rx_parse_s *parse = &cqe->parse;
+
+ if (netif_msg_rx_err(pfvf))
+ netdev_err(pfvf->netdev,
+ "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
+ qidx, parse->errlev, parse->errcode);
+
+ if (parse->errlev == NPC_ERRLVL_RE) {
+ switch (parse->errcode) {
+ case ERRCODE_FCS:
+ case ERRCODE_FCS_RCV:
+ atomic_inc(&stats->rx_fcs_errs);
+ break;
+ case ERRCODE_UNDERSIZE:
+ atomic_inc(&stats->rx_undersize_errs);
+ break;
+ case ERRCODE_OVERSIZE:
+ atomic_inc(&stats->rx_oversize_errs);
+ break;
+ case ERRCODE_OL2_LEN_MISMATCH:
+ atomic_inc(&stats->rx_len_errs);
+ break;
+ default:
+ atomic_inc(&stats->rx_other_errs);
+ break;
+ }
+ } else if (parse->errlev == NPC_ERRLVL_NIX) {
+ switch (parse->errcode) {
+ case ERRCODE_OL3_LEN:
+ case ERRCODE_OL4_LEN:
+ case ERRCODE_IL3_LEN:
+ case ERRCODE_IL4_LEN:
+ atomic_inc(&stats->rx_len_errs);
+ break;
+ case ERRCODE_OL4_CSUM:
+ case ERRCODE_IL4_CSUM:
+ atomic_inc(&stats->rx_csum_errs);
+ break;
+ default:
+ atomic_inc(&stats->rx_other_errs);
+ break;
+ }
+ } else {
+ atomic_inc(&stats->rx_other_errs);
+ /* For now ignore all the NPC parser errors and
+ * pass the packets to stack.
+ */
+ return false;
+ }
+
+ /* If RXALL is enabled pass on packets to stack. */
+ if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL))
+ return false;
+
+ /* Free buffer back to pool */
+ if (cqe->sg.segs)
+ otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL);
+ return true;
+}
+
+static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq,
+ struct nix_cqe_rx_s *cqe)
+{
+ struct nix_rx_parse_s *parse = &cqe->parse;
+ struct sk_buff *skb = NULL;
+
+ if (unlikely(parse->errlev || parse->errcode)) {
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ return;
+ }
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb))
+ return;
+
+ otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
+ cq->pool_ptrs++;
+
+ otx2_set_rxhash(pfvf, cqe, skb);
+
+ skb_record_rx_queue(skb, cq->cq_idx);
+ if (pfvf->netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_frags(napi);
+}
+
+static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq, int budget)
+{
+ struct nix_cqe_rx_s *cqe;
+ int processed_cqe = 0;
+ s64 bufptr;
+
+ while (likely(processed_cqe < budget)) {
+ cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
+ !cqe->sg.seg_addr) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
+
+ otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ cqe->sg.seg_addr = 0x00;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ if (unlikely(!cq->pool_ptrs))
+ return 0;
+
+ /* Refill pool with new buffers */
+ while (cq->pool_ptrs) {
+ bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
+ if (unlikely(bufptr <= 0)) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ break;
+ }
+ otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
+ cq->pool_ptrs--;
+ }
+
+ return processed_cqe;
+}
+
+static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq, int budget)
+{
+ int tx_pkts = 0, tx_bytes = 0;
+ struct nix_cqe_tx_s *cqe;
+ int processed_cqe = 0;
+
+ while (likely(processed_cqe < budget)) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ if (unlikely(!cqe)) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+ otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ if (likely(tx_pkts)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ /* Check if queue was stopped earlier due to ring full */
+ smp_mb();
+ if (netif_tx_queue_stopped(txq) &&
+ netif_carrier_ok(pfvf->netdev))
+ netif_tx_wake_queue(txq);
+ }
+ return 0;
+}
+
+int otx2_napi_handler(struct napi_struct *napi, int budget)
+{
+ struct otx2_cq_poll *cq_poll;
+ int workdone = 0, cq_idx, i;
+ struct otx2_cq_queue *cq;
+ struct otx2_qset *qset;
+ struct otx2_nic *pfvf;
+
+ cq_poll = container_of(napi, struct otx2_cq_poll, napi);
+ pfvf = (struct otx2_nic *)cq_poll->dev;
+ qset = &pfvf->qset;
+
+ for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ cq_idx = cq_poll->cq_ids[i];
+ if (unlikely(cq_idx == CINT_INVALID_CQ))
+ continue;
+ cq = &qset->cq[cq_idx];
+ if (cq->cq_type == CQ_RX) {
+ /* If the RQ refill WQ task is running, skip napi
+ * scheduler for this queue.
+ */
+ if (cq->refill_task_sched)
+ continue;
+ workdone += otx2_rx_napi_handler(pfvf, napi,
+ cq, budget);
+ } else {
+ workdone += otx2_tx_napi_handler(pfvf, cq, budget);
+ }
+ }
+
+ /* Clear the IRQ */
+ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
+
+ if (workdone < budget && napi_complete_done(napi, workdone)) {
+ /* If interface is going down, don't re-enable IRQ */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return workdone;
+
+ /* Re-enable interrupts */
+ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+ BIT_ULL(0));
+ }
+ return workdone;
+}
+
+static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+{
+ u64 status;
+
+ /* Packet data stores should finish before SQE is flushed to HW */
+ dma_wmb();
+
+ do {
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ status = otx2_lmt_flush(sq->io_addr);
+ } while (status == 0);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+#define MAX_SEGS_PER_SG 3
+/* Add SQE scatter/gather subdescriptor structure */
+static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+/* Add SQE extended header subdescriptor */
+static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int *offset)
+{
+ struct nix_sqe_ext_s *ext;
+
+ ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
+ ext->subdc = NIX_SUBDC_EXT;
+ if (skb_shinfo(skb)->gso_size) {
+ ext->lso = 1;
+ ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_mps = skb_shinfo(skb)->gso_size;
+
+ /* Only TSOv4 and TSOv6 GSO offloads are supported */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ ext->lso_format = pfvf->hw.lso_tsov4_idx;
+
+ /* HW adds payload size to 'ip_hdr->tot_len' while
+ * sending TSO segment, hence set payload length
+ * in IP header of the packet to just header length.
+ */
+ ip_hdr(skb)->tot_len =
+ htons(ext->lso_sb - skb_network_offset(skb));
+ } else {
+ ext->lso_format = pfvf->hw.lso_tsov6_idx;
+ ipv6_hdr(skb)->payload_len =
+ htons(ext->lso_sb - skb_network_offset(skb));
+ }
+ }
+ *offset += sizeof(*ext);
+}
+
+/* Add SQE header subdescriptor structure */
+static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct nix_sqe_hdr_s *sqe_hdr,
+ struct sk_buff *skb, u16 qidx)
+{
+ int proto = 0;
+
+ /* Check if SQE was framed before, if yes then no need to
+ * set these constants again and again.
+ */
+ if (!sqe_hdr->total) {
+ /* Don't free Tx buffers to Aura */
+ sqe_hdr->df = 1;
+ sqe_hdr->aura = sq->aura_id;
+ /* Post a CQE Tx after pkt transmission */
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sq = qidx;
+ }
+ sqe_hdr->total = skb->len;
+ /* Set SQE identifier which will be used later for freeing SKB */
+ sqe_hdr->sqe_id = sq->head;
+
+ /* Offload TCP/UDP checksum to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ sqe_hdr->ol3ptr = skb_network_offset(skb);
+ sqe_hdr->ol4ptr = skb_transport_offset(skb);
+ /* get vlan protocol Ethertype */
+ if (eth_type_vlan(skb->protocol))
+ skb->protocol = vlan_get_protocol(skb);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ proto = ip_hdr(skb)->protocol;
+ /* In case of TSO, HW needs this to be explicitly set.
+ * So set this always, instead of adding a check.
+ */
+ sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+
+ if (proto == IPPROTO_TCP)
+ sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ else if (proto == IPPROTO_UDP)
+ sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
+ }
+}
+
+static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int sqe, int hdr_len)
+{
+ int num_segs = skb_shinfo(skb)->nr_frags + 1;
+ struct sg_list *sg = &sq->sg[sqe];
+ u64 dma_addr;
+ int seg, len;
+
+ sg->num_segs = 0;
+
+ /* Get payload length at skb->data */
+ len = skb_headlen(skb) - hdr_len;
+
+ for (seg = 0; seg < num_segs; seg++) {
+ /* Skip skb->data, if there is no payload */
+ if (!seg && !len)
+ continue;
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ goto unmap;
+
+ /* Save DMA mapping info for later unmapping */
+ sg->dma_addr[sg->num_segs] = dma_addr;
+ sg->size[sg->num_segs] = len;
+ sg->num_segs++;
+ }
+ return 0;
+unmap:
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ return -EINVAL;
+}
+
+static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int seg,
+ u64 seg_addr, int hdr_len, int sqe)
+{
+ struct sg_list *sg = &sq->sg[sqe];
+ const skb_frag_t *frag;
+ int offset;
+
+ if (seg < 0)
+ return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
+
+ frag = &skb_shinfo(skb)->frags[seg];
+ offset = seg_addr - (u64)skb_frag_address(frag);
+ if (skb_headlen(skb) - hdr_len)
+ seg++;
+ return sg->dma_addr[seg] + offset;
+}
+
+static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
+ struct sg_list *list, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u16 *sg_lens = NULL;
+ u64 *iova = NULL;
+ int seg;
+
+ /* Add SG descriptors with buffer addresses */
+ for (seg = 0; seg < list->num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+ }
+ sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
+ *iova++ = list->dma_addr[seg];
+ sg->segs++;
+ }
+}
+
+static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tcp_data, seg_len, pkt_len, offset;
+ struct nix_sqe_hdr_s *sqe_hdr;
+ int first_sqe = sq->head;
+ struct sg_list list;
+ struct tso_t tso;
+
+ /* Map SKB's fragments to DMA.
+ * It's done here to avoid mapping for every TSO segment's packet.
+ */
+ if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ tso_start(skb, &tso);
+ tcp_data = skb->len - hdr_len;
+ while (tcp_data > 0) {
+ char *hdr;
+
+ seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
+ tcp_data -= seg_len;
+
+ /* Set SQE's SEND_HDR */
+ memset(sq->sqe_base, 0, sq->sqe_size);
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+ otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
+ offset = sizeof(*sqe_hdr);
+
+ /* Add TSO segment's pkt header */
+ hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
+ tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
+ list.dma_addr[0] =
+ sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
+ list.size[0] = hdr_len;
+ list.num_segs = 1;
+
+ /* Add TSO segment's payload data fragments */
+ pkt_len = hdr_len;
+ while (seg_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, seg_len);
+
+ list.size[list.num_segs] = size;
+ list.dma_addr[list.num_segs] =
+ otx2_tso_frag_dma_addr(sq, skb,
+ tso.next_frag_idx - 1,
+ (u64)tso.data, hdr_len,
+ first_sqe);
+ list.num_segs++;
+ pkt_len += size;
+ seg_len -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ sqe_hdr->total = pkt_len;
+ otx2_sqe_tso_add_sg(sq, &list, &offset);
+
+ /* DMA mappings and skb needs to be freed only after last
+ * TSO segment is transmitted out. So set 'PNC' only for
+ * last segment. Also point last segment's sqe_id to first
+ * segment's SQE index where skb address and DMA mappings
+ * are saved.
+ */
+ if (!tcp_data) {
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sqe_id = first_sqe;
+ sq->sg[first_sqe].skb = (u64)skb;
+ } else {
+ sqe_hdr->pnc = 0;
+ }
+
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+
+ /* Flush SQE to HW */
+ otx2_sqe_flush(sq, offset);
+ }
+}
+
+static bool is_hw_tso_supported(struct otx2_nic *pfvf,
+ struct sk_buff *skb)
+{
+ int payload_len, last_seg_size;
+
+ if (!pfvf->hw.hw_tso)
+ return false;
+
+ /* HW has an issue due to which when the payload of the last LSO
+ * segment is shorter than 16 bytes, some header fields may not
+ * be correctly modified, hence don't offload such TSO segments.
+ */
+ if (!is_96xx_B0(pfvf->pdev))
+ return true;
+
+ payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
+ if (last_seg_size && last_seg_size < 16)
+ return false;
+
+ return true;
+}
+
+static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
+{
+ if (!skb_shinfo(skb)->gso_size)
+ return 1;
+
+ /* HW TSO */
+ if (is_hw_tso_supported(pfvf, skb))
+ return 1;
+
+ /* SW TSO */
+ return skb_shinfo(skb)->gso_segs;
+}
+
+bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int offset, num_segs, free_sqe;
+ struct nix_sqe_hdr_s *sqe_hdr;
+
+ /* Check if there is room for new SQE.
+ * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
+ * will give free SQE count.
+ */
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+
+ if (free_sqe < sq->sqe_thresh ||
+ free_sqe < otx2_get_sqe_count(pfvf, skb))
+ return false;
+
+ num_segs = skb_shinfo(skb)->nr_frags + 1;
+
+ /* If SKB doesn't fit in a single SQE, linearize it.
+ * TODO: Consider adding JUMP descriptor instead.
+ */
+ if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ num_segs = skb_shinfo(skb)->nr_frags + 1;
+ }
+
+ if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ otx2_sq_append_tso(pfvf, sq, skb, qidx);
+ return true;
+ }
+
+ /* Set SQE's SEND_HDR.
+ * Do not clear the first 64bit as it contains constant info.
+ */
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+ otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
+ offset = sizeof(*sqe_hdr);
+
+ /* Add extended header if needed */
+ otx2_sqe_add_ext(pfvf, sq, skb, &offset);
+
+ /* Add SG subdesc with data frags */
+ if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
+ return false;
+ }
+
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Flush SQE to HW */
+ otx2_sqe_flush(sq, offset);
+
+ return true;
+}
+
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ struct nix_cqe_rx_s *cqe;
+ int processed_cqe = 0;
+ u64 iova, pa;
+
+ while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
+ if (!cqe->sg.subdc)
+ continue;
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct nix_cqe_tx_s *cqe;
+ int processed_cqe = 0;
+ struct sg_list *sg;
+
+ sq = &pfvf->qset.sq[cq->cint_idx];
+
+ while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ sg = &sq->sg[cqe->comp.sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
new file mode 100644
index 000000000000..4ab32d3adb78
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_TXRX_H
+#define OTX2_TXRX_H
+
+#include <linux/etherdevice.h>
+#include <linux/iommu.h>
+#include <linux/if_vlan.h>
+
+#define LBK_CHAN_BASE 0x000
+#define SDP_CHAN_BASE 0x700
+#define CGX_CHAN_BASE 0x800
+
+#define OTX2_DATA_ALIGN(X) ALIGN(X, OTX2_ALIGN)
+#define OTX2_HEAD_ROOM OTX2_ALIGN
+
+#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
+#define OTX2_MIN_MTU 64
+#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
+
+#define OTX2_MAX_GSO_SEGS 255
+#define OTX2_MAX_FRAGS_IN_SQE 9
+
+/* Rx buffer size should be in multiples of 128bytes */
+#define RCV_FRAG_LEN1(x) \
+ ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* Prefer 2048 byte buffers for better last level cache
+ * utilization or data distribution across regions.
+ */
+#define RCV_FRAG_LEN(x) \
+ ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
+
+#define DMA_BUFFER_LEN(x) \
+ ((x) - OTX2_HEAD_ROOM - \
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
+ * is equal to this value.
+ */
+#define CQ_CQE_THRESH_DEFAULT 10
+
+/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
+ * is nonzero and this much time elapses after that.
+ */
+#define CQ_TIMER_THRESH_DEFAULT 1 /* 1 usec */
+#define CQ_TIMER_THRESH_MAX 25 /* 25 usec */
+
+/* Min number of CQs (of the ones mapped to this CINT)
+ * with valid CQEs.
+ */
+#define CQ_QCOUNT_DEFAULT 1
+
+struct queue_stats {
+ u64 bytes;
+ u64 pkts;
+};
+
+struct otx2_rcv_queue {
+ struct queue_stats stats;
+};
+
+struct sg_list {
+ u16 num_segs;
+ u64 skb;
+ u64 size[OTX2_MAX_FRAGS_IN_SQE];
+ u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
+};
+
+struct otx2_snd_queue {
+ u8 aura_id;
+ u16 head;
+ u16 sqe_size;
+ u32 sqe_cnt;
+ u16 num_sqbs;
+ u16 sqe_thresh;
+ u8 sqe_per_sqb;
+ u64 io_addr;
+ u64 *aura_fc_addr;
+ u64 *lmt_addr;
+ void *sqe_base;
+ struct qmem *sqe;
+ struct qmem *tso_hdrs;
+ struct sg_list *sg;
+ struct queue_stats stats;
+ u16 sqb_count;
+ u64 *sqb_ptrs;
+} ____cacheline_aligned_in_smp;
+
+enum cq_type {
+ CQ_RX,
+ CQ_TX,
+ CQS_PER_CINT = 2, /* RQ + SQ */
+};
+
+struct otx2_cq_poll {
+ void *dev;
+#define CINT_INVALID_CQ 255
+ u8 cint_idx;
+ u8 cq_ids[CQS_PER_CINT];
+ struct napi_struct napi;
+};
+
+struct otx2_pool {
+ struct qmem *stack;
+ struct qmem *fc_addr;
+ u8 rbpage_order;
+ u16 rbsize;
+ u32 page_offset;
+ u16 pageref;
+ struct page *page;
+};
+
+struct otx2_cq_queue {
+ u8 cq_idx;
+ u8 cq_type;
+ u8 cint_idx; /* CQ interrupt id */
+ u8 refill_task_sched;
+ u16 cqe_size;
+ u16 pool_ptrs;
+ u32 cqe_cnt;
+ u32 cq_head;
+ void *cqe_base;
+ struct qmem *cqe;
+ struct otx2_pool *rbpool;
+} ____cacheline_aligned_in_smp;
+
+struct otx2_qset {
+ u32 rqe_cnt;
+ u32 sqe_cnt; /* Keep these two at top */
+#define OTX2_MAX_CQ_CNT 64
+ u16 cq_cnt;
+ u16 xqe_size;
+ struct otx2_pool *pool;
+ struct otx2_cq_poll *napi;
+ struct otx2_cq_queue *cq;
+ struct otx2_snd_queue *sq;
+ struct otx2_rcv_queue *rq;
+};
+
+/* Translate IOVA to physical address */
+static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (likely(iommu_domain))
+ return iommu_iova_to_phys(iommu_domain, dma_addr);
+ return dma_addr;
+}
+
+int otx2_napi_handler(struct napi_struct *napi, int budget);
+bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx);
+#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3fb7ee3d4d13..7a0d785b826c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -742,7 +742,7 @@ txq_reclaim_end:
return released;
}
-static void pxa168_eth_tx_timeout(struct net_device *dev)
+static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1344,15 +1344,6 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
return 0;
}
-static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
-{
- if (dev->phydev)
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-
- return -EOPNOTSUPP;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void pxa168_eth_netpoll(struct net_device *dev)
{
@@ -1387,7 +1378,7 @@ static const struct net_device_ops pxa168_eth_netdev_ops = {
.ndo_set_rx_mode = pxa168_eth_set_rx_mode,
.ndo_set_mac_address = pxa168_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = pxa168_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_change_mtu = pxa168_eth_change_mtu,
.ndo_tx_timeout = pxa168_eth_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 095f6c71b4fa..97f270d30cce 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2884,7 +2884,7 @@ static void skge_tx_clean(struct net_device *dev)
skge->tx_ring.to_clean = e;
}
-static void skge_tx_timeout(struct net_device *dev)
+static void skge_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct skge_port *skge = netdev_priv(dev);
@@ -3932,7 +3932,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&hw->phy_lock);
tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
- hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+ hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
goto err_out_free_hw;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 5f56ee83e3b1..ebfd0ceac884 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2358,7 +2358,7 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
/* Transmit timeout is only called if we are running, carrier is up
* and tx queue is full (stopped).
*/
-static void sky2_tx_timeout(struct net_device *dev)
+static void sky2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -5022,7 +5022,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->pdev = pdev;
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
- hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+ hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
goto err_out_free_hw;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 527ad2aadcca..8c6cfd15481c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2081,7 +2081,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
kfree(eth->scratch_head);
}
-static void mtk_tx_timeout(struct net_device *dev)
+static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index eaf08f7ad128..64ed725aec28 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -182,7 +182,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
crdump_enable_crspace_access(dev, cr_space);
/* Get the available snapshot ID for the dumps */
- id = devlink_region_shapshot_id_get(devlink);
+ id = devlink_region_snapshot_id_get(devlink);
/* Try to capture dumps */
mlx4_crdump_collect_crspace(dev, cr_space, id);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7af75b63245f..43dcbd8214c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1363,24 +1363,18 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
}
}
-static void mlx4_en_tx_timeout(struct net_device *dev)
+static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int i;
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][txqueue];
if (netif_msg_timer(priv))
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
- for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
-
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
- continue;
- en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, tx_ring->qpn, tx_ring->sp_cqn,
- tx_ring->cons, tx_ring->prod);
- }
+ en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
+ txqueue, tx_ring->qpn, tx_ring->sp_cqn,
+ tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
en_dbg(DRV, priv, "Scheduling watchdog\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a6f390fdb971..d3e06cec8317 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -42,7 +42,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o
+ ecpf.o rdma.o eswitch_offloads_chains.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 549f962cd86e..42198e64a7f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,8 +71,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node)
+static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_frag_buf *buf, int node)
{
dma_addr_t t;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 9c8427698238..220ef9f06f84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -135,7 +135,7 @@ struct page_pool;
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
-#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
+#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
@@ -892,6 +892,8 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
+ unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
+ mlx5e_stats_grp_t *stats_grps;
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
@@ -964,7 +966,6 @@ struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
-void mlx5e_update_stats(struct mlx5e_priv *priv);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -1175,11 +1176,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+void mlx5e_build_nic_params(struct mlx5e_priv *priv,
struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
- u16 max_channels, u16 mtu);
+ u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index d48292ccda29..0416f7712109 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -21,6 +21,7 @@ struct mlx5e_tc_table {
DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
};
struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 475b6bd5d29b..62fc8a128a8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -35,7 +35,7 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
*/
dma_info->addr = xdp_umem_get_dma(umem, handle);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 778dab1af8fc..f260dd96873b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct tx_sync_info {
u64 rcd_sn;
- s32 sync_len;
+ u32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
- u32 tcp_seq, struct tx_sync_info *info)
+ u32 tcp_seq, int datalen, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
+ bool ends_before;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto out;
}
- if (unlikely(tcp_seq < tls_record_start_seq(record))) {
- ret = tls_record_is_start_marker(record) ?
- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ /* There are the following cases:
+ * 1. packet ends before start marker: bypass offload.
+ * 2. packet starts before start marker and ends after it: drop,
+ * not supported, breaks contract with kernel.
+ * 3. packet ends before tls record info starts: drop,
+ * this packet was already acknowledged and its record info
+ * was released.
+ */
+ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+
+ if (unlikely(tls_record_is_start_marker(record))) {
+ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ goto out;
+ } else if (ends_before) {
+ ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u8 num_wqebbs;
int i = 0;
- ret = tx_sync_info_get(priv_tx, seq, &info);
+ ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto err_out;
}
- if (unlikely(info.sync_len < 0)) {
- if (likely(datalen <= -info.sync_len))
- return MLX5E_KTLS_SYNC_DONE;
-
- stats->tls_drop_bypass_req++;
- goto err_out;
- }
-
stats->tls_ooo++;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
if (unlikely(contig_wqebbs_room < num_wqebbs))
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- tx_post_resync_params(sq, priv_tx, info.rcd_sn);
-
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
- if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+ switch (ret) {
+ case MLX5E_KTLS_SYNC_DONE:
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
- else if (ret == MLX5E_KTLS_SYNC_FAIL)
+ break;
+ case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ if (likely(!skb->decrypted))
+ goto out;
+ WARN_ON_ONCE(1);
+ /* fall-through */
+ default: /* MLX5E_KTLS_SYNC_FAIL */
goto err_out;
- else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
- goto out;
+ }
}
priv_tx->expected_seq = seq + datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c6776f308d5e..d674cb679895 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -218,13 +218,9 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
- int i, num_stats = 0;
-
switch (sset) {
case ETH_SS_STATS:
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
- return num_stats;
+ return mlx5e_stats_total_num(priv);
case ETH_SS_PRIV_FLAGS:
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
@@ -242,14 +238,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return mlx5e_ethtool_get_sset_count(priv, sset);
}
-static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
-{
- int i, idx = 0;
-
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
-}
-
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
{
int i;
@@ -268,7 +256,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break;
case ETH_SS_STATS:
- mlx5e_fill_stats_strings(priv, data);
+ mlx5e_stats_fill_strings(priv, data);
break;
}
}
@@ -283,14 +271,13 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
- int i, idx = 0;
+ int idx = 0;
mutex_lock(&priv->state_lock);
- mlx5e_update_stats(priv);
+ mlx5e_stats_update(priv);
mutex_unlock(&priv->state_lock);
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
+ mlx5e_stats_fill(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index acd946f2ddbe..3bc2ac3d53fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -58,6 +58,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
@@ -102,9 +103,11 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),
MLX5E_ETHTOOL_NUM_ENTRIES);
- ft = mlx5_create_auto_grouped_flow_table(ns, prio,
- table_size,
- MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
+
+ ft_attr.prio = prio;
+ ft_attr.max_fte = table_size;
+ ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return (void *)ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4997b8a51994..454d3459bd8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -159,23 +159,14 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-void mlx5e_update_stats(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
- if (mlx5e_stats_grps[i].update_stats)
- mlx5e_stats_grps[i].update_stats(priv);
-}
-
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
int i;
- for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
- if (mlx5e_stats_grps[i].update_stats_mask &
+ for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--)
+ if (mlx5e_nic_stats_grps[i]->update_stats_mask &
MLX5E_NDO_UPDATE_STATS)
- mlx5e_stats_grps[i].update_stats(priv);
+ mlx5e_nic_stats_grps[i]->update_stats(priv);
}
static void mlx5e_update_stats_work(struct work_struct *work)
@@ -4325,7 +4316,7 @@ unlock:
rtnl_unlock();
}
-static void mlx5e_tx_timeout(struct net_device *dev)
+static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -4739,17 +4730,19 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
tirc_default_config[tt].rx_hash_fields;
}
-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+void mlx5e_build_nic_params(struct mlx5e_priv *priv,
struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
- u16 max_channels, u16 mtu)
+ u16 mtu)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
- params->num_channels = max_channels;
+ params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
+ priv->max_nch);
params->num_tc = 1;
/* SQ */
@@ -4876,6 +4869,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
@@ -4986,8 +4981,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
return err;
- mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
- priv->max_nch, netdev->mtu);
+ mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params,
+ netdev->mtu);
mlx5e_timestamp_init(priv);
@@ -5149,6 +5144,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
+ struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -5169,7 +5165,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove(mdev, netdev);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
@@ -5193,6 +5189,8 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
+ .stats_grps = mlx5e_nic_stats_grps,
+ .stats_grps_num = mlx5e_nic_stats_grps_num,
};
/* mlx5e generic netdev management API (move to en_common.c) */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f175cb24bb67..7b48ccacebe2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -41,6 +41,7 @@
#include <net/ipv6_stubs.h>
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -116,24 +117,71 @@ static const struct counter_desc vport_rep_stats_desc[] = {
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
-static void mlx5e_rep_get_strings(struct net_device *dev,
- u32 stringset, uint8_t *data)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
{
- int i, j;
+ return NUM_VPORT_REP_SW_COUNTERS;
+}
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- strcpy(data + (i * ETH_GSTRING_LEN),
- sw_rep_stats_desc[i].format);
- for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
- strcpy(data + (i * ETH_GSTRING_LEN),
- vport_rep_stats_desc[j].format);
- break;
- }
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ sw_rep_stats_desc[i].format);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
+{
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
+ struct rtnl_link_stats64 stats64 = {};
+
+ memset(s, 0, sizeof(*s));
+ mlx5e_fold_sw_stats64(priv, &stats64);
+
+ s->rx_packets = stats64.rx_packets;
+ s->rx_bytes = stats64.rx_bytes;
+ s->tx_packets = stats64.tx_packets;
+ s->tx_bytes = stats64.tx_bytes;
+ s->tx_queue_dropped = stats64.tx_dropped;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
+{
+ return NUM_VPORT_REP_HW_COUNTERS;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ return idx;
}
-static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
+ vport_rep_stats_desc, i);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -156,64 +204,33 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = vf_stats.rx_bytes;
}
-static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- struct rtnl_link_stats64 *vport_stats;
-
- mlx5e_grp_802_3_update_stats(priv);
-
- vport_stats = &priv->stats.vf_vport;
-
- vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
- vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
- vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
- vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
-}
-
-static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
+static void mlx5e_rep_get_strings(struct net_device *dev,
+ u32 stringset, uint8_t *data)
{
- struct mlx5e_sw_stats *s = &priv->stats.sw;
- struct rtnl_link_stats64 stats64 = {};
-
- memset(s, 0, sizeof(*s));
- mlx5e_fold_sw_stats64(priv, &stats64);
+ struct mlx5e_priv *priv = netdev_priv(dev);
- s->rx_packets = stats64.rx_packets;
- s->rx_bytes = stats64.rx_bytes;
- s->tx_packets = stats64.tx_packets;
- s->tx_bytes = stats64.tx_bytes;
- s->tx_queue_dropped = stats64.tx_dropped;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ mlx5e_stats_fill_strings(priv, data);
+ break;
+ }
}
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int i, j;
-
- if (!data)
- return;
-
- mutex_lock(&priv->state_lock);
- mlx5e_rep_update_sw_counters(priv);
- priv->profile->update_stats(priv);
- mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_rep_stats_desc, i);
-
- for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
- data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
- vport_rep_stats_desc, j);
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
}
static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
+ return mlx5e_stats_total_num(priv);
default:
return -EOPNOTSUPP;
}
@@ -1247,8 +1264,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
- struct flow_cls_offload *f = type_data;
- struct flow_cls_offload cls_flower;
+ struct flow_cls_offload tmp, *f = type_data;
struct mlx5e_priv *priv = cb_priv;
struct mlx5_eswitch *esw;
unsigned long flags;
@@ -1261,16 +1277,30 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index)
+ memcpy(&tmp, f, sizeof(*f));
+
+ if (!mlx5_esw_chains_prios_supported(esw) ||
+ tmp.common.chain_index)
return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the
* reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
*/
- memcpy(&cls_flower, f, sizeof(*f));
- cls_flower.common.chain_index = FDB_FT_CHAIN;
- err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags);
- memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats));
+ if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
+ return -EOPNOTSUPP;
+ if (tmp.common.chain_index != 0)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
return err;
default:
return -EOPNOTSUPP;
@@ -1660,10 +1690,65 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_close_drop_rq(&priv->drop_rq);
}
+static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
+{
+ int err = mlx5e_init_rep_rx(priv);
+
+ if (err)
+ return err;
+
+ mlx5e_create_q_counters(priv);
+ return 0;
+}
+
+static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_q_counters(priv);
+ mlx5e_cleanup_rep_rx(priv);
+}
+
+static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = rpriv->netdev;
+ priv = netdev_priv(netdev);
+ uplink_priv = &rpriv->uplink_priv;
+
+ mutex_init(&uplink_priv->unready_flows_lock);
+ INIT_LIST_HEAD(&uplink_priv->unready_flows);
+
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ if (err)
+ return err;
+
+ mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
+ uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
+ err = register_netdevice_notifier_dev_net(rpriv->netdev,
+ &uplink_priv->netdevice_nb,
+ &uplink_priv->netdevice_nn);
+ if (err) {
+ mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
+ goto tc_esw_cleanup;
+ }
+
+ return 0;
+
+tc_esw_cleanup:
+ mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
+ return err;
+}
+
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_rep_uplink_priv *uplink_priv;
int err;
err = mlx5e_create_tises(priv);
@@ -1673,52 +1758,41 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
}
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
- uplink_priv = &rpriv->uplink_priv;
-
- mutex_init(&uplink_priv->unready_flows_lock);
- INIT_LIST_HEAD(&uplink_priv->unready_flows);
-
- /* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
goto destroy_tises;
-
- mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
-
- /* init indirect block notifications */
- INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
- uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
- err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
- if (err) {
- mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
- goto tc_esw_cleanup;
- }
}
return 0;
-tc_esw_cleanup:
- mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
destroy_tises:
mlx5e_destroy_tises(priv);
return err;
}
+static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+
+ /* clean indirect TC block notifications */
+ unregister_netdevice_notifier_dev_net(rpriv->netdev,
+ &uplink_priv->netdevice_nb,
+ &uplink_priv->netdevice_nn);
+ mlx5e_rep_indr_clean_block_privs(rpriv);
+
+ /* delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
+}
+
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_destroy_tises(priv);
- if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
- /* clean indirect TC block notifications */
- unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
- mlx5e_rep_indr_clean_block_privs(rpriv);
-
- /* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
- mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
- }
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+ mlx5e_cleanup_uplink_rep_tx(rpriv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1787,6 +1861,7 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
+ struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1795,7 +1870,44 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove(mdev, netdev);
+}
+
+static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
+static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
+
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw_rep),
+ &MLX5E_STATS_GRP(vport_rep),
+};
+
+static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_rep_stats_grps);
+}
+
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(eth_ext),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
+};
+
+static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
}
static const struct mlx5e_profile mlx5e_rep_profile = {
@@ -1807,29 +1919,33 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_rep_enable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_rep_update_hw_counters,
+ .update_stats = mlx5e_update_ndo_stats,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5e_rep_stats_grps,
+ .stats_grps_num = mlx5e_rep_stats_grps_num,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.init = mlx5e_init_rep,
.cleanup = mlx5e_cleanup_rep,
- .init_rx = mlx5e_init_rep_rx,
- .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_rx = mlx5e_init_ul_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_ul_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_uplink_rep_enable,
.disable = mlx5e_uplink_rep_disable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_uplink_rep_update_hw_counters,
+ .update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5e_ul_rep_stats_grps,
+ .stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
static bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 31f83c8adcc9..3f756d51435f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -73,6 +73,7 @@ struct mlx5_rep_uplink_priv {
*/
struct list_head tc_indr_block_priv_list;
struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
struct mlx5_tun_entropy tun_entropy;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 9f09253f9f46..30b216d9284c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -35,6 +35,58 @@
#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
+static unsigned int stats_grps_num(struct mlx5e_priv *priv)
+{
+ return !priv->profile->stats_grps_num ? 0 :
+ priv->profile->stats_grps_num(priv);
+}
+
+unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ unsigned int total = 0;
+ int i;
+
+ for (i = 0; i < num_stats_grps; i++)
+ total += stats_grps[i]->get_num_stats(priv);
+
+ return total;
+}
+
+void mlx5e_stats_update(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i;
+
+ for (i = num_stats_grps - 1; i >= 0; i--)
+ if (stats_grps[i]->update_stats)
+ stats_grps[i]->update_stats(priv);
+}
+
+void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i;
+
+ for (i = 0; i < num_stats_grps; i++)
+ idx = stats_grps[i]->fill_stats(priv, data, idx);
+}
+
+void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i, idx = 0;
+
+ for (i = 0; i < num_stats_grps; i++)
+ idx = stats_grps[i]->fill_strings(priv, data, idx);
+}
+
+/* Concrete NIC Stats */
+
static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
@@ -146,12 +198,12 @@ static const struct counter_desc sw_stats_desc[] = {
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
-static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
{
return NUM_SW_COUNTERS;
}
-static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
{
int i;
@@ -160,7 +212,7 @@ static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
return idx;
}
-static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
{
int i;
@@ -169,7 +221,7 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
int i;
@@ -297,6 +349,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
#endif
s->tx_cqes += sq_stats->cqes;
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
}
}
}
@@ -312,7 +367,7 @@ static const struct counter_desc drop_rq_stats_desc[] = {
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
-static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
{
int num_stats = 0;
@@ -325,7 +380,7 @@ static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
{
int i;
@@ -340,7 +395,7 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
return idx;
}
-static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
{
int i;
@@ -353,7 +408,7 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
@@ -388,14 +443,13 @@ static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
-static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
{
return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
}
-static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
{
int i;
@@ -409,8 +463,7 @@ static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
{
int i;
@@ -424,7 +477,7 @@ static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
{
u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
@@ -487,13 +540,12 @@ static const struct counter_desc vport_stats_desc[] = {
#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
-static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
{
return NUM_VPORT_COUNTERS;
}
-static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
{
int i;
@@ -502,8 +554,7 @@ static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
{
int i;
@@ -513,7 +564,7 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
@@ -552,13 +603,12 @@ static const struct counter_desc pport_802_3_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
-static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
{
return NUM_PPORT_802_3_COUNTERS;
}
-static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
{
int i;
@@ -567,8 +617,7 @@ static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
{
int i;
@@ -581,7 +630,7 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
-void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -609,13 +658,12 @@ static const struct counter_desc pport_2863_stats_desc[] = {
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
-static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
{
return NUM_PPORT_2863_COUNTERS;
}
-static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
{
int i;
@@ -624,8 +672,7 @@ static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
{
int i;
@@ -635,7 +682,7 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -670,13 +717,12 @@ static const struct counter_desc pport_2819_stats_desc[] = {
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
{
return NUM_PPORT_2819_COUNTERS;
}
-static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
{
int i;
@@ -685,8 +731,7 @@ static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
{
int i;
@@ -696,7 +741,7 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -734,7 +779,7 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
-static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
@@ -751,8 +796,7 @@ static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
@@ -774,7 +818,7 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
@@ -800,7 +844,7 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -830,7 +874,7 @@ static const struct counter_desc pport_eth_ext_stats_desc[] = {
#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
-static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
{
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
return NUM_PPORT_ETH_EXT_COUNTERS;
@@ -838,8 +882,7 @@ static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
return 0;
}
-static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
{
int i;
@@ -850,8 +893,7 @@ static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
{
int i;
@@ -863,7 +905,7 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -904,7 +946,7 @@ static const struct counter_desc pcie_perf_stall_stats_desc[] = {
#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
-static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
{
int num_stats = 0;
@@ -920,8 +962,7 @@ static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
{
int i;
@@ -942,8 +983,7 @@ static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
{
int i;
@@ -967,7 +1007,7 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1015,8 +1055,7 @@ static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
}
-static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv,
- u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i, prio;
@@ -1036,8 +1075,7 @@ static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *pri
return idx;
}
-static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv,
- u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
{
struct mlx5e_pport_stats *pport = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1112,13 +1150,13 @@ static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
}
}
-static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
{
return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
}
-static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
{
mlx5e_grp_per_tc_prio_update_stats(priv);
mlx5e_grp_per_tc_congest_prio_update_stats(priv);
@@ -1130,6 +1168,7 @@ static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *pr
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
};
@@ -1292,29 +1331,27 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
return idx;
}
-static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
{
return mlx5e_grp_per_prio_traffic_get_num_stats() +
mlx5e_grp_per_prio_pfc_get_num_stats(priv);
}
-static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
{
idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
return idx;
}
-static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
{
idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
return idx;
}
-static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1349,13 +1386,12 @@ static const struct counter_desc mlx5e_pme_error_desc[] = {
#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
-static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
{
return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
}
-static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
{
int i;
@@ -1368,8 +1404,7 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
{
struct mlx5_pme_stats pme_stats;
int i;
@@ -1387,45 +1422,46 @@ static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
{
return mlx5e_ipsec_get_count(priv);
}
-static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
{
return idx + mlx5e_ipsec_get_strings(priv,
data + idx * ETH_GSTRING_LEN);
}
-static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
{
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
}
-static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
{
mlx5e_ipsec_update_stats(priv);
}
-static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
return mlx5e_tls_get_count(priv);
}
-static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
}
-static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
return idx + mlx5e_tls_get_stats(priv, data + idx);
}
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -1559,7 +1595,7 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
-static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{
int max_nch = priv->max_nch;
@@ -1572,8 +1608,7 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
(NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
}
-static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
@@ -1615,8 +1650,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
@@ -1664,104 +1698,46 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
+
+MLX5E_DEFINE_STATS_GRP(sw, 0);
+MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
+MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(2863, 0);
+MLX5E_DEFINE_STATS_GRP(2819, 0);
+MLX5E_DEFINE_STATS_GRP(phy, 0);
+MLX5E_DEFINE_STATS_GRP(pcie, 0);
+MLX5E_DEFINE_STATS_GRP(per_prio, 0);
+MLX5E_DEFINE_STATS_GRP(pme, 0);
+MLX5E_DEFINE_STATS_GRP(channels, 0);
+MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
+MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
+static MLX5E_DEFINE_STATS_GRP(ipsec, 0);
+static MLX5E_DEFINE_STATS_GRP(tls, 0);
+
/* The stats groups order is opposite to the update_stats() order calls */
-const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
- {
- .get_num_stats = mlx5e_grp_sw_get_num_stats,
- .fill_strings = mlx5e_grp_sw_fill_strings,
- .fill_stats = mlx5e_grp_sw_fill_stats,
- .update_stats = mlx5e_grp_sw_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_q_get_num_stats,
- .fill_strings = mlx5e_grp_q_fill_strings,
- .fill_stats = mlx5e_grp_q_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_q_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
- .fill_strings = mlx5e_grp_vnic_env_fill_strings,
- .fill_stats = mlx5e_grp_vnic_env_fill_stats,
- .update_stats = mlx5e_grp_vnic_env_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_vport_get_num_stats,
- .fill_strings = mlx5e_grp_vport_fill_strings,
- .fill_stats = mlx5e_grp_vport_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_vport_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_802_3_get_num_stats,
- .fill_strings = mlx5e_grp_802_3_fill_strings,
- .fill_stats = mlx5e_grp_802_3_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_802_3_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_2863_get_num_stats,
- .fill_strings = mlx5e_grp_2863_fill_strings,
- .fill_stats = mlx5e_grp_2863_fill_stats,
- .update_stats = mlx5e_grp_2863_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_2819_get_num_stats,
- .fill_strings = mlx5e_grp_2819_fill_strings,
- .fill_stats = mlx5e_grp_2819_fill_stats,
- .update_stats = mlx5e_grp_2819_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_phy_get_num_stats,
- .fill_strings = mlx5e_grp_phy_fill_strings,
- .fill_stats = mlx5e_grp_phy_fill_stats,
- .update_stats = mlx5e_grp_phy_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
- .fill_strings = mlx5e_grp_eth_ext_fill_strings,
- .fill_stats = mlx5e_grp_eth_ext_fill_stats,
- .update_stats = mlx5e_grp_eth_ext_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_pcie_get_num_stats,
- .fill_strings = mlx5e_grp_pcie_fill_strings,
- .fill_stats = mlx5e_grp_pcie_fill_stats,
- .update_stats = mlx5e_grp_pcie_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_fill_stats,
- .update_stats = mlx5e_grp_per_prio_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_pme_get_num_stats,
- .fill_strings = mlx5e_grp_pme_fill_strings,
- .fill_stats = mlx5e_grp_pme_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
- .fill_strings = mlx5e_grp_ipsec_fill_strings,
- .fill_stats = mlx5e_grp_ipsec_fill_stats,
- .update_stats = mlx5e_grp_ipsec_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_tls_get_num_stats,
- .fill_strings = mlx5e_grp_tls_fill_strings,
- .fill_stats = mlx5e_grp_tls_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_channels_get_num_stats,
- .fill_strings = mlx5e_grp_channels_fill_strings,
- .fill_stats = mlx5e_grp_channels_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats,
- .fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings,
- .fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats,
- .update_stats = mlx5e_grp_per_port_buffer_congest_update_stats,
- },
+mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(eth_ext),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(ipsec),
+ &MLX5E_STATS_GRP(tls),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
};
-const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
+unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_nic_stats_grps);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 869f3502f631..092b39ffa32a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
#ifndef __MLX5_EN_STATS_H__
#define __MLX5_EN_STATS_H__
@@ -55,6 +56,56 @@ struct counter_desc {
size_t offset; /* Byte offset */
};
+enum {
+ MLX5E_NDO_UPDATE_STATS = BIT(0x1),
+};
+
+struct mlx5e_priv;
+struct mlx5e_stats_grp {
+ u16 update_stats_mask;
+ int (*get_num_stats)(struct mlx5e_priv *priv);
+ int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
+ int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*update_stats)(struct mlx5e_priv *priv);
+};
+
+typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
+
+#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
+
+#define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
+ void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
+
+#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
+
+#define MLX5E_DECLARE_STATS_GRP(grp) \
+ const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
+
+#define MLX5E_DEFINE_STATS_GRP(grp, mask) \
+MLX5E_DECLARE_STATS_GRP(grp) = { \
+ .get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
+ .fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \
+ .fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \
+ .update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \
+ .update_stats_mask = mask, \
+}
+
+unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
+void mlx5e_stats_update(struct mlx5e_priv *priv);
+void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
+void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
+
+/* Concrete NIC Stats */
+
struct mlx5e_sw_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -322,22 +373,22 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
-enum {
- MLX5E_NDO_UPDATE_STATS = BIT(0x1),
-};
-
-struct mlx5e_priv;
-struct mlx5e_stats_grp {
- u16 update_stats_mask;
- int (*get_num_stats)(struct mlx5e_priv *priv);
- int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
- int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
- void (*update_stats)(struct mlx5e_priv *priv);
-};
-
-extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
-extern const int mlx5e_num_stats_grps;
+extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
+unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
-void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
+extern MLX5E_DECLARE_STATS_GRP(sw);
+extern MLX5E_DECLARE_STATS_GRP(qcnt);
+extern MLX5E_DECLARE_STATS_GRP(vnic_env);
+extern MLX5E_DECLARE_STATS_GRP(vport);
+extern MLX5E_DECLARE_STATS_GRP(802_3);
+extern MLX5E_DECLARE_STATS_GRP(2863);
+extern MLX5E_DECLARE_STATS_GRP(2819);
+extern MLX5E_DECLARE_STATS_GRP(phy);
+extern MLX5E_DECLARE_STATS_GRP(eth_ext);
+extern MLX5E_DECLARE_STATS_GRP(pcie);
+extern MLX5E_DECLARE_STATS_GRP(per_prio);
+extern MLX5E_DECLARE_STATS_GRP(pme);
+extern MLX5E_DECLARE_STATS_GRP(channels);
+extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 024e1cddfd0e..74091f72c9a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,6 +51,7 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
@@ -960,7 +961,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock);
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
- int tc_grp_size, tc_tbl_size;
+ struct mlx5_flow_table_attr ft_attr = {};
+ int tc_grp_size, tc_tbl_size, tc_num_grps;
u32 max_flow_counter;
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
@@ -970,13 +972,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+ tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
+ ft_attr.prio = MLX5E_TC_PRIO;
+ ft_attr.max_fte = tc_tbl_size;
+ ft_attr.level = MLX5E_TC_FT_LEVEL;
+ ft_attr.autogroup.max_num_groups = tc_num_grps;
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
- MLX5E_TC_PRIO,
- tc_tbl_size,
- MLX5E_TC_TABLE_NUM_GROUPS,
- MLX5E_TC_FT_LEVEL, 0);
+ &ft_attr);
if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
@@ -1080,7 +1084,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -1097,7 +1101,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1157,19 +1161,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- u32 max_chain = mlx5_eswitch_get_chain_range(esw);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
- u16 max_prio = mlx5_eswitch_get_prio_range(esw);
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
bool encap_valid = true;
+ u32 max_prio, max_chain;
int err = 0;
int out_index;
- if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
+ if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
}
@@ -1179,11 +1182,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb().
*/
+ max_chain = mlx5_esw_chains_get_chain_range(esw);
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
+ max_prio = mlx5_esw_chains_get_prio_range(esw);
if (attr->prio > max_prio) {
NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
return -EOPNOTSUPP;
@@ -1805,6 +1810,40 @@ static void *get_match_headers_value(u32 flags,
outer_headers);
}
+static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct net_device *ingress_dev;
+ struct flow_match_meta match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return 0;
+
+ flow_rule_match_meta(rule, &match);
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
+ return -EINVAL;
+ }
+
+ ingress_dev = __dev_get_by_index(dev_net(filter_dev),
+ match.key->ingress_ifindex);
+ if (!ingress_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't find the ingress port to match on");
+ return -EINVAL;
+ }
+
+ if (ingress_dev != filter_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't match on the ingress filter port");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
@@ -1825,6 +1864,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
+ int err;
match_level = outer_match_level;
@@ -1868,6 +1908,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
spec);
}
+ err = mlx5e_flower_parse_meta(filter_dev, f);
+ if (err)
+ return err;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -2842,6 +2886,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -3462,7 +3510,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_GOTO: {
u32 dest_chain = act->chain_index;
- u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+ u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
@@ -4036,6 +4084,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
u32 rate_mbps;
int err;
+ vport_num = rpriv->rep->vport;
+ if (vport_num >= MLX5_VPORT_ECPF) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress rate limit is supported only for Eswitch ports connected to VFs");
+ return -EOPNOTSUPP;
+ }
+
esw = priv->mdev->priv.eswitch;
/* rate is given in bytes/sec.
* First convert to bits/sec and then round to the nearest mbit/secs.
@@ -4044,8 +4099,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* 1 mbit/sec.
*/
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
- vport_num = rpriv->rep->vport;
-
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
@@ -4198,7 +4251,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
return err;
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
- if (register_netdevice_notifier(&tc->netdevice_nb)) {
+ err = register_netdevice_notifier_dev_net(priv->netdev,
+ &tc->netdevice_nb,
+ &tc->netdevice_nn);
+ if (err) {
tc->netdevice_nb.notifier_call = NULL;
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
}
@@ -4220,7 +4276,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
if (tc->netdevice_nb.notifier_call)
- unregister_netdevice_notifier(&tc->netdevice_nb);
+ unregister_netdevice_notifier_dev_net(priv->netdev,
+ &tc->netdevice_nb,
+ &tc->netdevice_nn);
mutex_destroy(&tc->mod_hdr.lock);
mutex_destroy(&tc->hairpin_tbl_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 580c71cb9dfa..cccea3a8eddd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -156,7 +156,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
cq->comp(cq, eqe);
mlx5_cq_put(cq);
} else {
- mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
+ dev_dbg_ratelimited(eq->dev->device,
+ "Completion event for bogus CQ 0x%x\n", cqn);
}
++eq->cons_index;
@@ -563,6 +564,39 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
gather_user_async_events(dev, mask);
}
+static int
+setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
+ struct mlx5_eq_param *param, const char *name)
+{
+ int err;
+
+ eq->irq_nb.notifier_call = mlx5_eq_async_int;
+
+ err = create_async_eq(dev, &eq->core, param);
+ if (err) {
+ mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
+ return err;
+ }
+ err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
+ if (err) {
+ mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
+ destroy_async_eq(dev, &eq->core);
+ }
+ return err;
+}
+
+static void cleanup_async_eq(struct mlx5_core_dev *dev,
+ struct mlx5_eq_async *eq, const char *name)
+{
+ int err;
+
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
+ err = destroy_async_eq(dev, &eq->core);
+ if (err)
+ mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
+ name, err);
+}
+
static int create_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -572,77 +606,45 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
- table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = MLX5_NUM_CMD_EQE,
+ .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
-
- param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
- err = create_async_eq(dev, &table->cmd_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
- goto err0;
- }
- err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err);
+ err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
+ if (err)
goto err1;
- }
+
mlx5_cmd_use_events(dev);
- table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = MLX5_NUM_ASYNC_EQE,
};
gather_async_events_mask(dev, param.mask);
- err = create_async_eq(dev, &table->async_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
+ err = setup_async_eq(dev, &table->async_eq, &param, "async");
+ if (err)
goto err2;
- }
- err = mlx5_eq_enable(dev, &table->async_eq.core,
- &table->async_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable async EQ %d\n", err);
- goto err3;
- }
- table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = /* TODO: sriov max_vf + */ 1,
+ .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
- param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
- err = create_async_eq(dev, &table->pages_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
- goto err4;
- }
- err = mlx5_eq_enable(dev, &table->pages_eq.core,
- &table->pages_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err);
- goto err5;
- }
+ err = setup_async_eq(dev, &table->pages_eq, &param, "pages");
+ if (err)
+ goto err3;
- return err;
+ return 0;
-err5:
- destroy_async_eq(dev, &table->pages_eq.core);
-err4:
- mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err3:
- destroy_async_eq(dev, &table->async_eq.core);
+ cleanup_async_eq(dev, &table->async_eq, "async");
err2:
mlx5_cmd_use_polling(dev);
- mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
+ cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1:
- destroy_async_eq(dev, &table->cmd_eq.core);
-err0:
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}
@@ -650,28 +652,11 @@ err0:
static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int err;
-
- mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb);
- err = destroy_async_eq(dev, &table->pages_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
- err);
-
- mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
- err = destroy_async_eq(dev, &table->async_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
- err);
+ cleanup_async_eq(dev, &table->pages_eq, "pages");
+ cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_use_polling(dev);
-
- mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
- err = destroy_async_eq(dev, &table->cmd_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
- err);
-
+ cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2c965ad0d744..5acf60b1bbfe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -277,6 +277,7 @@ enum {
static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
@@ -289,8 +290,10 @@ static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
}
/* num FTE 2, num FG 2 */
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
- 2, 2, 0, 0);
+ ft_attr.prio = LEGACY_VEPA_PRIO;
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
@@ -1928,8 +1931,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
int i;
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->info, 0, sizeof(vport->info));
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ }
}
/* Public E-Switch API */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ffcff3ba3701..4472710ccc9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -157,7 +157,7 @@ enum offloads_fdb_flags {
ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
};
-extern const unsigned int ESW_POOLS[4];
+struct mlx5_esw_chains_priv;
struct mlx5_eswitch_fdb {
union {
@@ -182,14 +182,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
- struct {
- struct mlx5_flow_table *fdb;
- u32 num_rules;
- } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
- /* Protects fdb_prio table */
- struct mutex fdb_prio_lock;
-
- int fdb_left[ARRAY_SIZE(ESW_POOLS)];
+ struct mlx5_esw_chains_priv *esw_chains_priv;
} offloads;
};
u32 flags;
@@ -355,15 +348,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr);
-bool
-mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);
-
-u16
-mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);
-
-u32
-mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);
-
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest);
@@ -388,6 +372,11 @@ enum {
MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
};
+enum {
+ MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
+ MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
+};
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
@@ -401,7 +390,6 @@ struct mlx5_esw_flow_attr {
u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan;
- bool vlan_handled;
struct {
u32 flags;
struct mlx5_eswitch_rep *rep;
@@ -416,6 +404,7 @@ struct mlx5_esw_flow_attr {
u32 chain;
u16 prio;
u32 dest_chain;
+ u32 flags;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 243a5440867e..979f13bdc203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -47,10 +48,6 @@
* one for multicast.
*/
#define MLX5_ESW_MISS_FLOWS (2)
-
-#define fdb_prio_table(esw, chain, prio, level) \
- (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
-
#define UPLINK_REP_INDEX 0
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
@@ -62,32 +59,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
-static struct mlx5_flow_table *
-esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
-static void
-esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
-
-bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
-{
- return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
-}
-
-u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
-{
- if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_TC_MAX_CHAIN;
-
- return 0;
-}
-
-u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
-{
- if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_TC_MAX_PRIO;
-
- return 1;
-}
-
static bool
esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
@@ -175,10 +146,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- if (attr->dest_chain) {
- struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *ft;
- ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
+ i++;
+ } else if (attr->dest_chain) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
+ 1, 0);
if (IS_ERR(ft)) {
rule = ERR_CAST(ft);
goto err_create_goto_table;
@@ -223,7 +201,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
- fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
+ fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
+ !!split);
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
@@ -242,10 +221,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
err_esw_get:
- if (attr->dest_chain)
- esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
+ mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
err_create_goto_table:
return rule;
}
@@ -262,13 +241,13 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule;
int i;
- fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
+ fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
goto err_get_fast;
}
- fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
+ fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@@ -296,6 +275,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule))
@@ -305,9 +285,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- esw_put_prio_table(esw, attr->chain, attr->prio, 1);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
err_get_fwd:
- esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast:
return rule;
}
@@ -332,12 +312,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
- esw_put_prio_table(esw, attr->chain, attr->prio, 1);
- esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else {
- esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
+ !!split);
if (attr->dest_chain)
- esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+ mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
}
}
@@ -451,7 +432,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (err)
goto unlock;
- attr->vlan_handled = false;
+ attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
vport = esw_vlan_action_get_vport(attr, push, pop);
@@ -459,7 +440,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
/* tracks VF --> wire rules without vlan push action */
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
- attr->vlan_handled = true;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
}
goto unlock;
@@ -490,7 +471,7 @@ skip_set_push:
}
out:
if (!err)
- attr->vlan_handled = true;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
unlock:
mutex_unlock(&esw->state_lock);
return err;
@@ -508,7 +489,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
- if (!attr->vlan_handled)
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -582,8 +563,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
dest.vport.num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out:
@@ -824,8 +805,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
@@ -839,8 +820,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
@@ -855,174 +836,6 @@ out:
return err;
}
-#define ESW_OFFLOADS_NUM_GROUPS 4
-
-/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
- * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
- * for each flow table pool. We can allocate up to 16M of each pool,
- * and we keep track of how much we used via put/get_sz_to_pool.
- * Firmware doesn't report any of this for now.
- * ESW_POOL is expected to be sorted from large to small
- */
-#define ESW_SIZE (16 * 1024 * 1024)
-const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
- 64 * 1024, 4 * 1024 };
-
-static int
-get_sz_from_pool(struct mlx5_eswitch *esw)
-{
- int sz = 0, i;
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
- if (esw->fdb_table.offloads.fdb_left[i]) {
- --esw->fdb_table.offloads.fdb_left[i];
- sz = ESW_POOLS[i];
- break;
- }
- }
-
- return sz;
-}
-
-static void
-put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
- if (sz >= ESW_POOLS[i]) {
- ++esw->fdb_table.offloads.fdb_left[i];
- break;
- }
- }
-}
-
-static struct mlx5_flow_table *
-create_next_size_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_namespace *ns,
- u16 table_prio,
- int level,
- u32 flags)
-{
- struct mlx5_flow_table *fdb;
- int sz;
-
- sz = get_sz_from_pool(esw);
- if (!sz)
- return ERR_PTR(-ENOSPC);
-
- fdb = mlx5_create_auto_grouped_flow_table(ns,
- table_prio,
- sz,
- ESW_OFFLOADS_NUM_GROUPS,
- level,
- flags);
- if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
- (int)PTR_ERR(fdb), table_prio, level, sz);
- put_sz_to_pool(esw, sz);
- }
-
- return fdb;
-}
-
-static struct mlx5_flow_table *
-esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_table *fdb = NULL;
- struct mlx5_flow_namespace *ns;
- int table_prio, l = 0;
- u32 flags = 0;
-
- if (chain == FDB_TC_SLOW_PATH_CHAIN)
- return esw->fdb_table.offloads.slow_fdb;
-
- mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
-
- fdb = fdb_prio_table(esw, chain, prio, level).fdb;
- if (fdb) {
- /* take ref on earlier levels as well */
- while (level >= 0)
- fdb_prio_table(esw, chain, prio, level--).num_rules++;
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return fdb;
- }
-
- ns = mlx5_get_fdb_sub_ns(dev, chain);
- if (!ns) {
- esw_warn(dev, "Failed to get FDB sub namespace\n");
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
- flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
- MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
-
- table_prio = prio - 1;
-
- /* create earlier levels for correct fs_core lookup when
- * connecting tables
- */
- for (l = 0; l <= level; l++) {
- if (fdb_prio_table(esw, chain, prio, l).fdb) {
- fdb_prio_table(esw, chain, prio, l).num_rules++;
- continue;
- }
-
- fdb = create_next_size_table(esw, ns, table_prio, l, flags);
- if (IS_ERR(fdb)) {
- l--;
- goto err_create_fdb;
- }
-
- fdb_prio_table(esw, chain, prio, l).fdb = fdb;
- fdb_prio_table(esw, chain, prio, l).num_rules = 1;
- }
-
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return fdb;
-
-err_create_fdb:
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- if (l >= 0)
- esw_put_prio_table(esw, chain, prio, l);
-
- return fdb;
-}
-
-static void
-esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
-{
- int l;
-
- if (chain == FDB_TC_SLOW_PATH_CHAIN)
- return;
-
- mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
-
- for (l = level; l >= 0; l--) {
- if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
- continue;
-
- put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
- mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
- fdb_prio_table(esw, chain, prio, l).fdb = NULL;
- }
-
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
-}
-
-static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
-{
- /* If lazy creation isn't supported, deref the fast path tables */
- if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
- esw_put_prio_table(esw, 0, 1, 1);
- esw_put_prio_table(esw, 0, 1, 0);
- }
-}
-
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
@@ -1055,16 +868,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
- u32 *flow_group_in, max_flow_counter;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- int table_size, ix, err = 0, i;
+ u32 flags = 0, *flow_group_in;
+ int table_size, ix, err = 0;
struct mlx5_flow_group *g;
- u32 flags = 0, fdb_max;
void *match_criteria;
u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
+
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1083,19 +896,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
goto ns_err;
}
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-
- esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
- fdb_max);
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
- esw->fdb_table.offloads.fdb_left[i] =
- ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
-
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
MLX5_ESW_MISS_FLOWS + esw->total_vports;
@@ -1118,16 +918,10 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
esw->fdb_table.offloads.slow_fdb = fdb;
- /* If lazy creation isn't supported, open the fast path tables now */
- if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
- esw_get_prio_table(esw, 0, 1, 0);
- esw_get_prio_table(esw, 0, 1, 1);
- } else {
- esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
- esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ err = mlx5_esw_chains_create(esw);
+ if (err) {
+ esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ goto fdb_chains_err;
}
/* create send-to-vport group */
@@ -1218,7 +1012,8 @@ miss_err:
peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
- esw_destroy_offloads_fast_fdb_tables(esw);
+ mlx5_esw_chains_destroy(esw);
+fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
@@ -1240,8 +1035,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+ mlx5_esw_chains_destroy(esw);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
- esw_destroy_offloads_fast_fdb_tables(esw);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
@@ -1377,7 +1172,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw, false);
+ mlx5_eswitch_disable(esw, true);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
@@ -2111,7 +1906,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
- mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
@@ -2220,7 +2014,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
- int err;
+ struct mlx5_vport *vport;
+ int err, i;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
@@ -2237,6 +2032,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
+ /* Representor will control the vport link state */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
goto err_vports;
@@ -2266,7 +2065,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw, false);
+ mlx5_eswitch_disable(esw, true);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
new file mode 100644
index 000000000000..c5a446e295aa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
@@ -0,0 +1,758 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies.
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/fs.h>
+
+#include "eswitch_offloads_chains.h"
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en.h"
+
+#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
+#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
+#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
+#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
+#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
+#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
+#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
+#define fdb_ignore_flow_level_supported(esw) \
+ (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
+
+#define ESW_OFFLOADS_NUM_GROUPS 4
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
+ * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via get_next_avail_sz_from_pool.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define ESW_SIZE (16 * 1024 * 1024)
+static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 4 * 1024, };
+
+struct mlx5_esw_chains_priv {
+ struct rhashtable chains_ht;
+ struct rhashtable prios_ht;
+ /* Protects above chains_ht and prios_ht */
+ struct mutex lock;
+
+ struct mlx5_flow_table *tc_end_fdb;
+
+ int fdb_left[ARRAY_SIZE(ESW_POOLS)];
+};
+
+struct fdb_chain {
+ struct rhash_head node;
+
+ u32 chain;
+
+ int ref;
+
+ struct mlx5_eswitch *esw;
+ struct list_head prios_list;
+};
+
+struct fdb_prio_key {
+ u32 chain;
+ u32 prio;
+ u32 level;
+};
+
+struct fdb_prio {
+ struct rhash_head node;
+ struct list_head list;
+
+ struct fdb_prio_key key;
+
+ int ref;
+
+ struct fdb_chain *fdb_chain;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *next_fdb;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+};
+
+static const struct rhashtable_params chain_params = {
+ .head_offset = offsetof(struct fdb_chain, node),
+ .key_offset = offsetof(struct fdb_chain, chain),
+ .key_len = sizeof_field(struct fdb_chain, chain),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params prio_params = {
+ .head_offset = offsetof(struct fdb_prio, node),
+ .key_offset = offsetof(struct fdb_prio, key),
+ .key_len = sizeof_field(struct fdb_prio, key),
+ .automatic_shrinking = true,
+};
+
+bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+}
+
+u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ return 1;
+
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX - 1;
+
+ return FDB_TC_MAX_CHAIN;
+}
+
+u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_chains_get_chain_range(esw) + 1;
+}
+
+u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ return 1;
+
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX;
+
+ return FDB_TC_MAX_PRIO;
+}
+
+static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw)
+{
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX;
+
+ return FDB_TC_LEVELS_PER_PRIO;
+}
+
+#define POOL_NEXT_SIZE 0
+static int
+mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw,
+ int desired_size)
+{
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
+ if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --fdb_pool_left(esw)[found_i];
+ return ESW_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+static void
+mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
+ if (sz == ESW_POOLS[i]) {
+ ++fdb_pool_left(esw)[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz);
+}
+
+static void
+mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw)
+{
+ u32 fdb_max;
+ int i;
+
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size);
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--)
+ fdb_pool_left(esw)[i] =
+ ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
+}
+
+static struct mlx5_flow_table *
+mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
+ u32 chain, u32 prio, u32 level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+ int sz;
+
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
+ if (!sz)
+ return ERR_PTR(-ENOSPC);
+ ft_attr.max_fte = sz;
+
+ /* We use tc_slow_fdb(esw) as the table's next_ft till
+ * ignore_flow_level is allowed on FT creation and not just for FTEs.
+ * Instead caller should add an explicit miss rule if needed.
+ */
+ ft_attr.next_ft = tc_slow_fdb(esw);
+
+ /* The root table(chain 0, prio 1, level 0) is required to be
+ * connected to the previous prio (FDB_BYPASS_PATH if exists).
+ * We always create it, as a managed table, in order to align with
+ * fs_core logic.
+ */
+ if (!fdb_ignore_flow_level_supported(esw) ||
+ (chain == 0 && prio == 1 && level == 0)) {
+ ft_attr.level = level;
+ ft_attr.prio = prio - 1;
+ ns = mlx5_get_fdb_sub_ns(esw->dev, chain);
+ } else {
+ ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_TC_OFFLOAD;
+ /* Firmware doesn't allow us to create another level 0 table,
+ * so we create all unmanaged tables as level 1.
+ *
+ * To connect them, we use explicit miss rules with
+ * ignore_flow_level. Caller is responsible to create
+ * these rules (if needed).
+ */
+ ft_attr.level = 1;
+ ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
+ }
+
+ ft_attr.autogroup.num_reserved_entries = 2;
+ ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev,
+ "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(fdb), chain, prio, level, sz);
+ mlx5_esw_chains_put_sz_to_pool(esw, sz);
+ return fdb;
+ }
+
+ return fdb;
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb)
+{
+ mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte);
+ mlx5_destroy_flow_table(fdb);
+}
+
+static struct fdb_chain *
+mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
+{
+ struct fdb_chain *fdb_chain = NULL;
+ int err;
+
+ fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL);
+ if (!fdb_chain)
+ return ERR_PTR(-ENOMEM);
+
+ fdb_chain->esw = esw;
+ fdb_chain->chain = chain;
+ INIT_LIST_HEAD(&fdb_chain->prios_list);
+
+ err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
+ chain_params);
+ if (err)
+ goto err_insert;
+
+ return fdb_chain;
+
+err_insert:
+ kvfree(fdb_chain);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
+{
+ struct mlx5_eswitch *esw = fdb_chain->esw;
+
+ rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
+ chain_params);
+ kvfree(fdb_chain);
+}
+
+static struct fdb_chain *
+mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
+{
+ struct fdb_chain *fdb_chain;
+
+ fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain,
+ chain_params);
+ if (!fdb_chain) {
+ fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain);
+ if (IS_ERR(fdb_chain))
+ return fdb_chain;
+ }
+
+ fdb_chain->ref++;
+
+ return fdb_chain;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
+ struct mlx5_flow_table *next_fdb)
+{
+ static const struct mlx5_flow_spec spec = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_fdb;
+
+ return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1);
+}
+
+static int
+mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
+ struct mlx5_flow_table *next_fdb)
+{
+ struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
+ struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
+ struct fdb_prio *pos;
+ int n = 0, err;
+
+ if (fdb_prio->key.level)
+ return 0;
+
+ /* Iterate in reverse order until reaching the level 0 rule of
+ * the previous priority, adding all the miss rules first, so we can
+ * revert them if any of them fails.
+ */
+ pos = fdb_prio;
+ list_for_each_entry_continue_reverse(pos,
+ &fdb_chain->prios_list,
+ list) {
+ miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb,
+ next_fdb);
+ if (IS_ERR(miss_rules[n])) {
+ err = PTR_ERR(miss_rules[n]);
+ goto err_prev_rule;
+ }
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ /* Success, delete old miss rules, and update the pointers. */
+ n = 0;
+ pos = fdb_prio;
+ list_for_each_entry_continue_reverse(pos,
+ &fdb_chain->prios_list,
+ list) {
+ mlx5_del_flow_rules(pos->miss_rule);
+
+ pos->miss_rule = miss_rules[n];
+ pos->next_fdb = next_fdb;
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ return 0;
+
+err_prev_rule:
+ while (--n >= 0)
+ mlx5_del_flow_rules(miss_rules[n]);
+
+ return err;
+}
+
+static void
+mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain)
+{
+ if (--fdb_chain->ref == 0)
+ mlx5_esw_chains_destroy_fdb_chain(fdb_chain);
+}
+
+static struct fdb_prio *
+mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
+ u32 chain, u32 prio, u32 level)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_group *miss_group;
+ struct fdb_prio *fdb_prio = NULL;
+ struct mlx5_flow_table *next_fdb;
+ struct fdb_chain *fdb_chain;
+ struct mlx5_flow_table *fdb;
+ struct list_head *pos;
+ u32 *flow_group_in;
+ int err;
+
+ fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain);
+ if (IS_ERR(fdb_chain))
+ return ERR_CAST(fdb_chain);
+
+ fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!fdb_prio || !flow_group_in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Chain's prio list is sorted by prio and level.
+ * And all levels of some prio point to the next prio's level 0.
+ * Example list (prio, level):
+ * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
+ * In hardware, we will we have the following pointers:
+ * (3,0) -> (5,0) -> (7,0) -> Slow path
+ * (3,1) -> (5,0)
+ * (5,1) -> (7,0)
+ * (6,1) -> (7,0)
+ */
+
+ /* Default miss for each chain: */
+ next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
+ tc_slow_fdb(esw) :
+ tc_end_fdb(esw);
+ list_for_each(pos, &fdb_chain->prios_list) {
+ struct fdb_prio *p = list_entry(pos, struct fdb_prio, list);
+
+ /* exit on first pos that is larger */
+ if (prio < p->key.prio || (prio == p->key.prio &&
+ level < p->key.level)) {
+ /* Get next level 0 table */
+ next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb;
+ break;
+ }
+ }
+
+ fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ goto err_create;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ fdb->max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ fdb->max_fte - 1);
+ miss_group = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ goto err_group;
+ }
+
+ /* Add miss rule to next_fdb */
+ miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb);
+ if (IS_ERR(miss_rule)) {
+ err = PTR_ERR(miss_rule);
+ goto err_miss_rule;
+ }
+
+ fdb_prio->miss_group = miss_group;
+ fdb_prio->miss_rule = miss_rule;
+ fdb_prio->next_fdb = next_fdb;
+ fdb_prio->fdb_chain = fdb_chain;
+ fdb_prio->key.chain = chain;
+ fdb_prio->key.prio = prio;
+ fdb_prio->key.level = level;
+ fdb_prio->fdb = fdb;
+
+ err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+ if (err)
+ goto err_insert;
+
+ list_add(&fdb_prio->list, pos->prev);
+
+ /* Table is ready, connect it */
+ err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb);
+ if (err)
+ goto err_update;
+
+ kvfree(flow_group_in);
+ return fdb_prio;
+
+err_update:
+ list_del(&fdb_prio->list);
+ rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+err_insert:
+ mlx5_del_flow_rules(miss_rule);
+err_miss_rule:
+ mlx5_destroy_flow_group(miss_group);
+err_group:
+ mlx5_esw_chains_destroy_fdb_table(esw, fdb);
+err_create:
+err_alloc:
+ kvfree(fdb_prio);
+ kvfree(flow_group_in);
+ mlx5_esw_chains_put_fdb_chain(fdb_chain);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw,
+ struct fdb_prio *fdb_prio)
+{
+ struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
+
+ WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio,
+ fdb_prio->next_fdb));
+
+ list_del(&fdb_prio->list);
+ rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+ mlx5_del_flow_rules(fdb_prio->miss_rule);
+ mlx5_destroy_flow_group(fdb_prio->miss_group);
+ mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb);
+ mlx5_esw_chains_put_fdb_chain(fdb_chain);
+ kvfree(fdb_prio);
+}
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level)
+{
+ struct mlx5_flow_table *prev_fts;
+ struct fdb_prio *fdb_prio;
+ struct fdb_prio_key key;
+ int l = 0;
+
+ if ((chain > mlx5_esw_chains_get_chain_range(esw) &&
+ chain != mlx5_esw_chains_get_ft_chain(esw)) ||
+ prio > mlx5_esw_chains_get_prio_range(esw) ||
+ level > mlx5_esw_chains_get_level_range(esw))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables.
+ */
+ for (l = 0; l < level; l++) {
+ prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l);
+ if (IS_ERR(prev_fts)) {
+ fdb_prio = ERR_CAST(prev_fts);
+ goto err_get_prevs;
+ }
+ }
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&esw_chains_lock(esw));
+ fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
+ prio_params);
+ if (!fdb_prio) {
+ fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain,
+ prio, level);
+ if (IS_ERR(fdb_prio))
+ goto err_create_prio;
+ }
+
+ ++fdb_prio->ref;
+ mutex_unlock(&esw_chains_lock(esw));
+
+ return fdb_prio->fdb;
+
+err_create_prio:
+ mutex_unlock(&esw_chains_lock(esw));
+err_get_prevs:
+ while (--l >= 0)
+ mlx5_esw_chains_put_table(esw, chain, prio, l);
+ return ERR_CAST(fdb_prio);
+}
+
+void
+mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level)
+{
+ struct fdb_prio *fdb_prio;
+ struct fdb_prio_key key;
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&esw_chains_lock(esw));
+ fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
+ prio_params);
+ if (!fdb_prio)
+ goto err_get_prio;
+
+ if (--fdb_prio->ref == 0)
+ mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio);
+ mutex_unlock(&esw_chains_lock(esw));
+
+ while (level-- > 0)
+ mlx5_esw_chains_put_table(esw, chain, prio, level);
+
+ return;
+
+err_get_prio:
+ mutex_unlock(&esw_chains_lock(esw));
+ WARN_ONCE(1,
+ "Couldn't find table: (chain: %d prio: %d level: %d)",
+ chain, prio, level);
+}
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
+{
+ return tc_end_fdb(esw);
+}
+
+static int
+mlx5_esw_chains_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_chains_priv *chains_priv;
+ struct mlx5_core_dev *dev = esw->dev;
+ u32 max_flow_counter, fdb_max;
+ int err;
+
+ chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
+ if (!chains_priv)
+ return -ENOMEM;
+ esw_chains_priv(esw) = chains_priv;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+
+ esw_debug(dev,
+ "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
+ max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max);
+
+ mlx5_esw_chains_init_sz_pool(esw);
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
+ } else {
+ esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
+ mlx5_esw_chains_get_chain_range(esw),
+ mlx5_esw_chains_get_prio_range(esw));
+ }
+
+ err = rhashtable_init(&esw_chains_ht(esw), &chain_params);
+ if (err)
+ goto init_chains_ht_err;
+
+ err = rhashtable_init(&esw_prios_ht(esw), &prio_params);
+ if (err)
+ goto init_prios_ht_err;
+
+ mutex_init(&esw_chains_lock(esw));
+
+ return 0;
+
+init_prios_ht_err:
+ rhashtable_destroy(&esw_chains_ht(esw));
+init_chains_ht_err:
+ kfree(chains_priv);
+ return err;
+}
+
+static void
+mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
+{
+ mutex_destroy(&esw_chains_lock(esw));
+ rhashtable_destroy(&esw_prios_ht(esw));
+ rhashtable_destroy(&esw_chains_ht(esw));
+
+ kfree(esw_chains_priv(esw));
+}
+
+static int
+mlx5_esw_chains_open(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_table *ft;
+ int err;
+
+ /* Create tc_end_fdb(esw) which is the always created ft chain */
+ ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw),
+ 1, 0);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ tc_end_fdb(esw) = ft;
+
+ /* Always open the root for fast path */
+ ft = mlx5_esw_chains_get_table(esw, 0, 1, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_0_err;
+ }
+
+ /* Open level 1 for split rules now if prios isn't supported */
+ if (!mlx5_esw_chains_prios_supported(esw)) {
+ ft = mlx5_esw_chains_get_table(esw, 0, 1, 1);
+
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_1_err;
+ }
+ }
+
+ return 0;
+
+level_1_err:
+ mlx5_esw_chains_put_table(esw, 0, 1, 0);
+level_0_err:
+ mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
+ return err;
+}
+
+static void
+mlx5_esw_chains_close(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ mlx5_esw_chains_put_table(esw, 0, 1, 1);
+ mlx5_esw_chains_put_table(esw, 0, 1, 0);
+ mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
+}
+
+int
+mlx5_esw_chains_create(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ err = mlx5_esw_chains_init(esw);
+ if (err)
+ return err;
+
+ err = mlx5_esw_chains_open(esw);
+ if (err)
+ goto err_open;
+
+ return 0;
+
+err_open:
+ mlx5_esw_chains_cleanup(esw);
+ return err;
+}
+
+void
+mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
+{
+ mlx5_esw_chains_close(esw);
+ mlx5_esw_chains_cleanup(esw);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
new file mode 100644
index 000000000000..2e13097fe348
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __ML5_ESW_CHAINS_H__
+#define __ML5_ESW_CHAINS_H__
+
+bool
+mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw);
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level);
+void
+mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level);
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
+
+int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
+void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
+
+#endif /* __ML5_ESW_CHAINS_H__ */
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 366bda1bb1c3..dc08ed9339ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -50,8 +50,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_flow_act *flow_act)
{
static const struct mlx5_flow_spec spec = {};
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
- int prio, flags;
int err;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
@@ -63,10 +63,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- prio = FDB_SLOW_PATH;
- flags = MLX5_FLOW_TABLE_TERMINATION;
- tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1,
- 0, flags);
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 3c816e81f8d9..b25465d9e030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -432,6 +432,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ MLX5_SET(set_fte_in, in, ignore_flow_level,
+ !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
+
if (ft->vport) {
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8c5df6c7d7b6..c7a16ae05fa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -579,7 +579,9 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator);
- if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
+ if (ft->autogroup.active &&
+ fg->max_ftes == ft->autogroup.group_size &&
+ fg->start_index < ft->autogroup.max_fte)
ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
@@ -1006,7 +1008,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
u16 vport)
{
struct mlx5_flow_root_namespace *root = find_root(&ns->node);
- struct mlx5_flow_table *next_ft = NULL;
+ bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
+ struct mlx5_flow_table *next_ft;
struct fs_prio *fs_prio = NULL;
struct mlx5_flow_table *ft;
int log_table_sz;
@@ -1023,14 +1026,21 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
err = -EINVAL;
goto unlock_root;
}
- if (ft_attr->level >= fs_prio->num_levels) {
- err = -ENOSPC;
- goto unlock_root;
+ if (!unmanaged) {
+ /* The level is related to the
+ * priority level range.
+ */
+ if (ft_attr->level >= fs_prio->num_levels) {
+ err = -ENOSPC;
+ goto unlock_root;
+ }
+
+ ft_attr->level += fs_prio->start_level;
}
+
/* The level is related to the
* priority level range.
*/
- ft_attr->level += fs_prio->start_level;
ft = alloc_flow_table(ft_attr->level,
vport,
ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
@@ -1043,19 +1053,27 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
- next_ft = find_next_chained_ft(fs_prio);
+ next_ft = unmanaged ? ft_attr->next_ft :
+ find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
- err = connect_flow_table(root->dev, ft, fs_prio);
- if (err)
- goto destroy_ft;
+ if (!unmanaged) {
+ err = connect_flow_table(root->dev, ft, fs_prio);
+ if (err)
+ goto destroy_ft;
+ }
+
ft->node.active = true;
down_write_ref_node(&fs_prio->node, false);
- tree_add_node(&ft->node, &fs_prio->node);
- list_add_flow_table(ft, fs_prio);
+ if (!unmanaged) {
+ tree_add_node(&ft->node, &fs_prio->node);
+ list_add_flow_table(ft, fs_prio);
+ } else {
+ ft->node.root = fs_prio->node.root;
+ }
fs_prio->num_ft++;
up_write_ref_node(&fs_prio->node, false);
mutex_unlock(&root->chain_lock);
@@ -1103,31 +1121,27 @@ EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags)
+ struct mlx5_flow_table_attr *ft_attr)
{
- struct mlx5_flow_table_attr ft_attr = {};
+ int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
+ int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
+ int max_num_groups = ft_attr->autogroup.max_num_groups;
struct mlx5_flow_table *ft;
- if (max_num_groups > num_flow_table_entries)
+ if (max_num_groups > autogroups_max_fte)
+ return ERR_PTR(-EINVAL);
+ if (num_reserved_entries > ft_attr->max_fte)
return ERR_PTR(-EINVAL);
- ft_attr.max_fte = num_flow_table_entries;
- ft_attr.prio = prio;
- ft_attr.level = level;
- ft_attr.flags = flags;
-
- ft = mlx5_create_flow_table(ns, &ft_attr);
+ ft = mlx5_create_flow_table(ns, ft_attr);
if (IS_ERR(ft))
return ft;
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
+ ft->autogroup.max_fte = autogroups_max_fte;
/* We save place for flow groups in addition to max types */
- ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
+ ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
return ft;
}
@@ -1149,7 +1163,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg;
int err;
- if (ft->autogroup.active)
+ if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
return ERR_PTR(-EPERM);
down_write_ref_node(&ft->node, false);
@@ -1322,9 +1336,10 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
const struct mlx5_flow_spec *spec)
{
struct list_head *prev = &ft->node.children;
- struct mlx5_flow_group *fg;
+ u32 max_fte = ft->autogroup.max_fte;
unsigned int candidate_index = 0;
unsigned int group_size = 0;
+ struct mlx5_flow_group *fg;
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
@@ -1332,7 +1347,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
group_size = ft->autogroup.group_size;
- /* ft->max_fte == ft->autogroup.max_types */
+ /* max_fte == ft->autogroup.max_types */
if (group_size == 0)
group_size = 1;
@@ -1345,7 +1360,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
prev = &fg->node.list;
}
- if (candidate_index + group_size > ft->max_fte)
+ if (candidate_index + group_size > max_fte)
return ERR_PTR(-ENOSPC);
fg = alloc_insert_flow_group(ft,
@@ -1529,18 +1544,30 @@ static bool counter_is_valid(u32 action)
}
static bool dest_is_valid(struct mlx5_flow_destination *dest,
- u32 action,
+ struct mlx5_flow_act *flow_act,
struct mlx5_flow_table *ft)
{
+ bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
+ u32 action = flow_act->action;
+
if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
return counter_is_valid(action);
if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return true;
+ if (ignore_level) {
+ if (ft->type != FS_FT_FDB)
+ return false;
+
+ if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ dest->ft->type != FS_FT_FDB)
+ return false;
+ }
+
if (!dest || ((dest->type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
- (dest->ft->level <= ft->level)))
+ (dest->ft->level <= ft->level && !ignore_level)))
return false;
return true;
}
@@ -1770,7 +1797,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
return ERR_PTR(-EINVAL);
for (i = 0; i < dest_num; i++) {
- if (!dest_is_valid(&dest[i], flow_act->action, ft))
+ if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
}
nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
@@ -2033,7 +2060,8 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
int err = 0;
mutex_lock(&root->chain_lock);
- err = disconnect_flow_table(ft);
+ if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
+ err = disconnect_flow_table(ft);
if (err) {
mutex_unlock(&root->chain_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c2621b911563..be5f5e32c1e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -164,6 +164,7 @@ struct mlx5_flow_table {
unsigned int required_groups;
unsigned int group_size;
unsigned int num_groups;
+ unsigned int max_fte;
} autogroup;
/* Protect fwd_rules */
struct mutex lock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index c375edfe528c..d89ff1d09119 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -131,11 +131,11 @@ static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
MLX5_PCAM_REGS_5000_TO_507F);
}
-static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
+static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev,
+ enum mlx5_mcam_reg_groups group)
{
- return mlx5_query_mcam_reg(dev, dev->caps.mcam,
- MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
- MLX5_MCAM_REGS_FIRST_128);
+ return mlx5_query_mcam_reg(dev, dev->caps.mcam[group],
+ MLX5_MCAM_FEATURE_ENHANCED_FEATURES, group);
}
static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
@@ -221,8 +221,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
- if (MLX5_CAP_GEN(dev, mcam_reg))
- mlx5_get_mcam_reg(dev);
+ if (MLX5_CAP_GEN(dev, mcam_reg)) {
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
+ }
if (MLX5_CAP_GEN(dev, qcam_reg))
mlx5_get_qcam_reg(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 3ed8ab2d703d..56078b23f1a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -87,8 +87,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_set_netdev_mtu_boundaries(priv);
netdev->mtu = netdev->max_mtu;
- mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
- priv->max_nch, netdev->mtu);
+ mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params,
+ netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -419,6 +419,28 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
}
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
+};
+
+static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5i_stats_grps);
+}
+
static const struct mlx5e_profile mlx5i_nic_profile = {
.init = mlx5i_init,
.cleanup = mlx5i_cleanup,
@@ -435,6 +457,8 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5i_stats_grps,
+ .stats_grps_num = mlx5i_stats_grps_num,
};
/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index fc0d9583475d..b91eabc09fbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -586,7 +586,8 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier(&ldev->nb)) {
+ if (register_netdevice_notifier_dev_net(netdev, &ldev->nb,
+ &ldev->nn)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -599,7 +600,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
/* Must be called with intf_mutex held */
-void mlx5_lag_remove(struct mlx5_core_dev *dev)
+void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev;
int i;
@@ -619,7 +620,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
if (i == MLX5_MAX_PORTS) {
if (ldev->nb.notifier_call)
- unregister_netdevice_notifier(&ldev->nb);
+ unregister_netdevice_notifier_dev_net(netdev, &ldev->nb,
+ &ldev->nn);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
mlx5_lag_dev_free(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index f1068aac6406..316ab09e2664 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -44,6 +44,7 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
+ struct netdev_net_notifier nn;
struct lag_mp lag_mp;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index b70afa310ad2..416676c35b1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -200,8 +200,6 @@ static void mlx5_lag_fib_update(struct work_struct *work)
rtnl_lock();
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
mlx5_lag_fib_route_event(ldev, fib_work->event,
fib_work->fen_info.fi);
@@ -259,8 +257,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
switch (event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index cf7b8da0f010..f554cfddcf4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1563,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
+ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index da67b28d6e23..fcce9e0fc82c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -157,7 +157,7 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
-void mlx5_lag_remove(struct mlx5_core_dev *dev);
+void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 004c56c2fc0c..6dec2a550a10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -677,9 +677,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
goto out_invalid_arg;
}
if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
+ mlx5_core_warn_once(dmn->mdev,
+ "Connecting table to a lower/same level destination table\n");
mlx5dr_dbg(dmn,
- "Destination table level should be higher than source table\n");
- goto out_invalid_arg;
+ "Connecting table at level %d to a destination table at level %d\n",
+ matcher->tbl->level,
+ action->dest_tbl.tbl->level);
}
attr.final_icm_addr = rx_rule ?
action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
@@ -690,9 +693,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* get the relevant addresses */
if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
- ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev,
- action->dest_tbl.fw_tbl.ft->type,
- action->dest_tbl.fw_tbl.ft->id,
+ ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
+ action->dest_tbl.fw_tbl.type,
+ action->dest_tbl.fw_tbl.id,
&output);
if (!ret) {
action->dest_tbl.fw_tbl.tx_icm_addr =
@@ -982,8 +985,106 @@ dec_ref:
}
struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev)
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests)
+{
+ struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
+ struct mlx5dr_action **ref_actions;
+ struct mlx5dr_action *action;
+ bool reformat_req = false;
+ u32 num_of_ref = 0;
+ int ret;
+ int i;
+
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+ mlx5dr_err(dmn, "Multiple destination support is for FDB only\n");
+ return NULL;
+ }
+
+ hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL);
+ if (!hw_dests)
+ return NULL;
+
+ ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL);
+ if (!ref_actions)
+ goto free_hw_dests;
+
+ for (i = 0; i < num_of_dests; i++) {
+ struct mlx5dr_action *reformat_action = dests[i].reformat;
+ struct mlx5dr_action *dest_action = dests[i].dest;
+
+ ref_actions[num_of_ref++] = dest_action;
+
+ switch (dest_action->action_type) {
+ case DR_ACTION_TYP_VPORT:
+ hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ hw_dests[i].vport.num = dest_action->vport.caps->num;
+ hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi;
+ if (reformat_action) {
+ reformat_req = true;
+ hw_dests[i].vport.reformat_id =
+ reformat_action->reformat.reformat_id;
+ ref_actions[num_of_ref++] = reformat_action;
+ hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ }
+ break;
+
+ case DR_ACTION_TYP_FT:
+ hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ if (dest_action->dest_tbl.is_fw_tbl)
+ hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id;
+ else
+ hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id;
+ break;
+
+ default:
+ mlx5dr_dbg(dmn, "Invalid multiple destinations action\n");
+ goto free_ref_actions;
+ }
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ goto free_ref_actions;
+
+ ret = mlx5dr_fw_create_md_tbl(dmn,
+ hw_dests,
+ num_of_dests,
+ reformat_req,
+ &action->dest_tbl.fw_tbl.id,
+ &action->dest_tbl.fw_tbl.group_id);
+ if (ret)
+ goto free_action;
+
+ refcount_inc(&dmn->refcount);
+
+ for (i = 0; i < num_of_ref; i++)
+ refcount_inc(&ref_actions[i]->refcount);
+
+ action->dest_tbl.is_fw_tbl = true;
+ action->dest_tbl.fw_tbl.dmn = dmn;
+ action->dest_tbl.fw_tbl.type = FS_FT_FDB;
+ action->dest_tbl.fw_tbl.ref_actions = ref_actions;
+ action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref;
+
+ kfree(hw_dests);
+
+ return action;
+
+free_action:
+ kfree(action);
+free_ref_actions:
+ kfree(ref_actions);
+free_hw_dests:
+ kfree(hw_dests);
+ return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
+ struct mlx5_flow_table *ft)
{
struct mlx5dr_action *action;
@@ -992,8 +1093,11 @@ mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
return NULL;
action->dest_tbl.is_fw_tbl = 1;
- action->dest_tbl.fw_tbl.ft = ft;
- action->dest_tbl.fw_tbl.mdev = mdev;
+ action->dest_tbl.fw_tbl.type = ft->type;
+ action->dest_tbl.fw_tbl.id = ft->id;
+ action->dest_tbl.fw_tbl.dmn = dmn;
+
+ refcount_inc(&dmn->refcount);
return action;
}
@@ -1213,58 +1317,85 @@ not_found:
}
static int
-dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
- __be64 *sw_action,
- __be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_hw_info)
{
const struct dr_action_modify_field_conv *hw_action_info;
- u8 offset, length, max_length, action;
+ u8 max_length;
u16 sw_field;
- u8 hw_opcode;
u32 data;
/* Get SW modify action data */
- action = MLX5_GET(set_action_in, sw_action, action_type);
- length = MLX5_GET(set_action_in, sw_action, length);
- offset = MLX5_GET(set_action_in, sw_action, offset);
sw_field = MLX5_GET(set_action_in, sw_action, field);
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
hw_action_info = dr_action_modify_get_hw_info(sw_field);
if (!hw_action_info) {
- mlx5dr_dbg(dmn, "Modify action invalid field given\n");
+ mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
}
max_length = hw_action_info->end - hw_action_info->start + 1;
- switch (action) {
- case MLX5_ACTION_TYPE_SET:
- hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET;
- /* PRM defines that length zero specific length of 32bits */
- if (!length)
- length = 32;
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
- if (length + offset > max_length) {
- mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
- return -EINVAL;
- }
- break;
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
+ hw_action_info->hw_field);
- case MLX5_ACTION_TYPE_ADD:
- hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD;
- offset = 0;
- length = max_length;
- break;
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
+ hw_action_info->start);
- default:
- mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
- return -EOPNOTSUPP;
+ /* PRM defines that length zero specific length of 32bits */
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length,
+ max_length == 32 ? 0 : max_length);
+
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+
+ *ret_hw_info = hw_action_info;
+
+ return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_hw_info)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+ u8 offset, length, max_length;
+ u16 sw_field;
+ u32 data;
+
+ /* Get SW modify action data */
+ length = MLX5_GET(set_action_in, sw_action, length);
+ offset = MLX5_GET(set_action_in, sw_action, offset);
+ sw_field = MLX5_GET(set_action_in, sw_action, field);
+ data = MLX5_GET(set_action_in, sw_action, data);
+
+ /* Convert SW data to HW modify action format */
+ hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ if (!hw_action_info) {
+ mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
+ return -EINVAL;
+ }
+
+ /* PRM defines that length zero specific length of 32bits */
+ length = length ? length : 32;
+
+ max_length = hw_action_info->end - hw_action_info->start + 1;
+
+ if (length + offset > max_length) {
+ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+ return -EINVAL;
}
- MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
hw_action_info->hw_field);
@@ -1283,48 +1414,236 @@ dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
}
static int
-dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn,
- const __be64 *sw_action)
+dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_dst_hw_info,
+ const struct dr_action_modify_field_conv **ret_src_hw_info)
+{
+ u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
+ const struct dr_action_modify_field_conv *hw_dst_action_info;
+ const struct dr_action_modify_field_conv *hw_src_action_info;
+ u16 src_field, dst_field;
+
+ /* Get SW modify action data */
+ src_field = MLX5_GET(copy_action_in, sw_action, src_field);
+ dst_field = MLX5_GET(copy_action_in, sw_action, dst_field);
+ src_offset = MLX5_GET(copy_action_in, sw_action, src_offset);
+ dst_offset = MLX5_GET(copy_action_in, sw_action, dst_offset);
+ length = MLX5_GET(copy_action_in, sw_action, length);
+
+ /* Convert SW data to HW modify action format */
+ hw_src_action_info = dr_action_modify_get_hw_info(src_field);
+ hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
+ if (!hw_src_action_info || !hw_dst_action_info) {
+ mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
+ return -EINVAL;
+ }
+
+ /* PRM defines that length zero specific length of 32bits */
+ length = length ? length : 32;
+
+ src_max_length = hw_src_action_info->end -
+ hw_src_action_info->start + 1;
+ dst_max_length = hw_dst_action_info->end -
+ hw_dst_action_info->start + 1;
+
+ if (length + src_offset > src_max_length ||
+ length + dst_offset > dst_max_length) {
+ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+ return -EINVAL;
+ }
+
+ MLX5_SET(dr_action_hw_copy, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
+ hw_dst_action_info->hw_field);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
+ hw_dst_action_info->start + dst_offset);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
+ length == 32 ? 0 : length);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
+ hw_src_action_info->hw_field);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
+ hw_src_action_info->start + dst_offset);
+
+ *ret_dst_hw_info = hw_dst_action_info;
+ *ret_src_hw_info = hw_src_action_info;
+
+ return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_dst_hw_info,
+ const struct dr_action_modify_field_conv **ret_src_hw_info)
{
- u16 sw_field;
u8 action;
+ int ret;
- sw_field = MLX5_GET(set_action_in, sw_action, field);
+ *hw_action = 0;
+ *ret_src_hw_info = NULL;
+
+ /* Get SW modify action type */
action = MLX5_GET(set_action_in, sw_action, action_type);
- /* Check if SW field is supported in current domain (RX/TX) */
- if (action == MLX5_ACTION_TYPE_SET) {
- if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ switch (action) {
+ case MLX5_ACTION_TYPE_SET:
+ ret = dr_action_modify_sw_to_hw_set(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info);
+ break;
+
+ case MLX5_ACTION_TYPE_ADD:
+ ret = dr_action_modify_sw_to_hw_add(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info);
+ break;
+
+ case MLX5_ACTION_TYPE_COPY:
+ ret = dr_action_modify_sw_to_hw_copy(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info,
+ ret_src_hw_info);
+ break;
+
+ default:
+ mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int
+dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+
+ if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ action->rewrite.allow_rx = 0;
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ action->rewrite.allow_tx = 0;
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ }
+
+ if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+
+ if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for add action\n",
+ sw_field);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ u16 sw_fields[2];
+ int i;
+
+ sw_fields[0] = MLX5_GET(copy_action_in, sw_action, src_field);
+ sw_fields[1] = MLX5_GET(copy_action_in, sw_action, dst_field);
+
+ for (i = 0; i < 2; i++) {
+ if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ action->rewrite.allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
- sw_field);
+ sw_fields[i]);
return -EINVAL;
}
- }
-
- if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ action->rewrite.allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
- sw_field);
+ sw_fields[i]);
return -EINVAL;
}
}
- } else if (action == MLX5_ACTION_TYPE_ADD) {
- if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
- mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field);
- return -EINVAL;
- }
- } else {
- mlx5dr_info(dmn, "Unsupported action %d modify action\n", action);
- return -EOPNOTSUPP;
+ }
+
+ if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
+ return -EINVAL;
}
return 0;
}
+static int
+dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ u8 action_type;
+ int ret;
+
+ action_type = MLX5_GET(set_action_in, sw_action, action_type);
+
+ switch (action_type) {
+ case MLX5_ACTION_TYPE_SET:
+ ret = dr_action_modify_check_set_field_limitation(action,
+ sw_action);
+ break;
+
+ case MLX5_ACTION_TYPE_ADD:
+ ret = dr_action_modify_check_add_field_limitation(action,
+ sw_action);
+ break;
+
+ case MLX5_ACTION_TYPE_COPY:
+ ret = dr_action_modify_check_copy_field_limitation(action,
+ sw_action);
+ break;
+
+ default:
+ mlx5dr_info(dmn, "Unsupported action %d modify action\n",
+ action_type);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static bool
dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
{
@@ -1333,7 +1652,7 @@ dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
-static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
+static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
__be64 sw_actions[],
@@ -1341,20 +1660,26 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
u32 *num_hw_actions,
bool *modify_ttl)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct dr_action_modify_field_conv *hw_dst_action_info;
+ const struct dr_action_modify_field_conv *hw_src_action_info;
u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
*modify_ttl = false;
+ action->rewrite.allow_rx = 1;
+ action->rewrite.allow_tx = 1;
+
for (i = 0; i < num_sw_actions; i++) {
sw_action = &sw_actions[i];
- ret = dr_action_modify_check_field_limitation(dmn, sw_action);
+ ret = dr_action_modify_check_field_limitation(action,
+ sw_action);
if (ret)
return ret;
@@ -1365,32 +1690,35 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
ret = dr_action_modify_sw_to_hw(dmn,
sw_action,
&hw_action,
- &hw_action_info);
+ &hw_dst_action_info,
+ &hw_src_action_info);
if (ret)
return ret;
/* Due to a HW limitation we cannot modify 2 different L3 types */
- if (l3_type && hw_action_info->l3_type &&
- hw_action_info->l3_type != l3_type) {
+ if (l3_type && hw_dst_action_info->l3_type &&
+ hw_dst_action_info->l3_type != l3_type) {
mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n");
return -EINVAL;
}
- if (hw_action_info->l3_type)
- l3_type = hw_action_info->l3_type;
+ if (hw_dst_action_info->l3_type)
+ l3_type = hw_dst_action_info->l3_type;
/* Due to a HW limitation we cannot modify two different L4 types */
- if (l4_type && hw_action_info->l4_type &&
- hw_action_info->l4_type != l4_type) {
+ if (l4_type && hw_dst_action_info->l4_type &&
+ hw_dst_action_info->l4_type != l4_type) {
mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n");
return -EINVAL;
}
- if (hw_action_info->l4_type)
- l4_type = hw_action_info->l4_type;
+ if (hw_dst_action_info->l4_type)
+ l4_type = hw_dst_action_info->l4_type;
/* HW reads and executes two actions at once this means we
* need to create a gap if two actions access the same field
*/
- if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) {
+ if ((hw_idx % 2) && (hw_field == hw_dst_action_info->hw_field ||
+ (hw_src_action_info &&
+ hw_field == hw_src_action_info->hw_field))) {
/* Check if after gap insertion the total number of HW
* modify actions doesn't exceeds the limit
*/
@@ -1400,7 +1728,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
return -EINVAL;
}
}
- hw_field = hw_action_info->hw_field;
+ hw_field = hw_dst_action_info->hw_field;
hw_actions[hw_idx] = hw_action;
hw_idx++;
@@ -1443,7 +1771,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
goto free_chunk;
}
- ret = dr_actions_convert_modify_header(dmn,
+ ret = dr_actions_convert_modify_header(action,
max_hw_actions,
num_sw_actions,
actions,
@@ -1559,8 +1887,26 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
switch (action->action_type) {
case DR_ACTION_TYP_FT:
- if (!action->dest_tbl.is_fw_tbl)
+ if (action->dest_tbl.is_fw_tbl)
+ refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount);
+ else
refcount_dec(&action->dest_tbl.tbl->refcount);
+
+ if (action->dest_tbl.is_fw_tbl &&
+ action->dest_tbl.fw_tbl.num_of_ref_actions) {
+ struct mlx5dr_action **ref_actions;
+ int i;
+
+ ref_actions = action->dest_tbl.fw_tbl.ref_actions;
+ for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++)
+ refcount_dec(&ref_actions[i]->refcount);
+
+ kfree(ref_actions);
+
+ mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn,
+ action->dest_tbl.fw_tbl.id,
+ action->dest_tbl.fw_tbl.group_id);
+ }
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
refcount_dec(&action->reformat.dmn->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 41662c4e2664..461b39376daf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -320,12 +320,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
}
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
- u32 table_type,
- u64 icm_addr_rx,
- u64 icm_addr_tx,
- u8 level,
- bool sw_owner,
- bool term_tbl,
+ struct mlx5dr_cmd_create_flow_table_attr *attr,
u64 *fdb_rx_icm_addr,
u32 *table_id)
{
@@ -335,37 +330,43 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
int err;
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
- MLX5_SET(create_flow_table_in, in, table_type, table_type);
+ MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
- MLX5_SET(flow_table_context, ft_mdev, termination_table, term_tbl);
- MLX5_SET(flow_table_context, ft_mdev, sw_owner, sw_owner);
- MLX5_SET(flow_table_context, ft_mdev, level, level);
+ MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
+ MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
+ MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
- if (sw_owner) {
+ if (attr->sw_owner) {
/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
* icm_addr_1 used for FDB TX
*/
- if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
+ if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_rx);
- } else if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
+ sw_owner_icm_root_0, attr->icm_addr_rx);
+ } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_tx);
- } else if (table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
+ sw_owner_icm_root_0, attr->icm_addr_tx);
+ } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_rx);
+ sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_1, icm_addr_tx);
+ sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
+ MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
+ attr->decap_en);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
+ attr->reformat_en);
+
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*table_id = MLX5_GET(create_flow_table_out, out, table_id);
- if (!sw_owner && table_type == MLX5_FLOW_TABLE_TYPE_FDB)
+ if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
+ fdb_rx_icm_addr)
*fdb_rx_icm_addr =
(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
@@ -478,3 +479,208 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
return 0;
}
+
+static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
+ struct mlx5dr_cmd_fte_info *fte,
+ bool *extended_dest)
+{
+ int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
+ int num_fwd_destinations = 0;
+ int num_encap = 0;
+ int i;
+
+ *extended_dest = false;
+ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return 0;
+ for (i = 0; i < fte->dests_size; i++) {
+ if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+ if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ num_encap++;
+ num_fwd_destinations++;
+ }
+
+ if (num_fwd_destinations > 1 && num_encap > 0)
+ *extended_dest = true;
+
+ if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
+ mlx5_core_warn(dev, "FW does not support extended destination");
+ return -EOPNOTSUPP;
+ }
+ if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
+ mlx5_core_warn(dev, "FW does not support more than %d encaps",
+ 1 << fw_log_max_fdb_encap_uplink);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
+ int opmod, int modify_mask,
+ struct mlx5dr_cmd_ft_info *ft,
+ u32 group_id,
+ struct mlx5dr_cmd_fte_info *fte)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
+ void *in_flow_context, *vlan;
+ bool extended_dest = false;
+ void *in_match_value;
+ unsigned int inlen;
+ int dst_cnt_size;
+ void *in_dests;
+ u32 *in;
+ int err;
+ int i;
+
+ if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
+ return -EOPNOTSUPP;
+
+ if (!extended_dest)
+ dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
+ else
+ dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
+
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, op_mod, opmod);
+ MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ if (ft->vport) {
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport, 1);
+ }
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+
+ MLX5_SET(flow_context, in_flow_context, flow_tag,
+ fte->flow_context.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, flow_source,
+ fte->flow_context.flow_source);
+
+ MLX5_SET(flow_context, in_flow_context, extended_destination,
+ extended_dest);
+ if (extended_dest) {
+ u32 action;
+
+ action = fte->action.action &
+ ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ MLX5_SET(flow_context, in_flow_context, action, action);
+ } else {
+ MLX5_SET(flow_context, in_flow_context, action,
+ fte->action.action);
+ if (fte->action.pkt_reformat)
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.pkt_reformat->id);
+ }
+ if (fte->action.modify_hdr)
+ MLX5_SET(flow_context, in_flow_context, modify_header_id,
+ fte->action.modify_hdr->id);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+
+ in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
+ match_value);
+ memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
+
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ int list_size = 0;
+
+ for (i = 0; i < fte->dests_size; i++) {
+ unsigned int id, type = fte->dest_arr[i].type;
+
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ id = fte->dest_arr[i].ft_num;
+ type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ id = fte->dest_arr[i].ft_id;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ id = fte->dest_arr[i].vport.num;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid,
+ !!(fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id,
+ fte->dest_arr[i].vport.vhca_id);
+ if (extended_dest && (fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
+ MLX5_SET(dest_format_struct, in_dests,
+ packet_reformat,
+ !!(fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
+ MLX5_SET(extended_dest_format, in_dests,
+ packet_reformat_id,
+ fte->dest_arr[i].vport.reformat_id);
+ }
+ break;
+ default:
+ id = fte->dest_arr[i].tir_num;
+ }
+
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ type);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, id);
+ in_dests += dst_cnt_size;
+ list_size++;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, destination_list_size,
+ list_size);
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+ log_max_flow_counter,
+ ft->type));
+ int list_size = 0;
+
+ for (i = 0; i < fte->dests_size; i++) {
+ if (fte->dest_arr[i].type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+ fte->dest_arr[i].counter_id);
+ in_dests += dst_cnt_size;
+ list_size++;
+ }
+ if (list_size > max_list_size) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+ list_size);
+ }
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
+ kvfree(in);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 60ef6e6171e3..1fbcd012bb85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -7,6 +7,7 @@
struct mlx5dr_fw_recalc_cs_ft *
mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
{
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
u32 table_id, group_id, modify_hdr_id;
u64 rx_icm_addr, modify_ttl_action;
@@ -16,9 +17,14 @@ mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
if (!recalc_cs_ft)
return NULL;
- ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
- 0, 0, dmn->info.caps.max_ft_level - 1,
- false, true, &rx_icm_addr, &table_id);
+ ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+ ft_attr.level = dmn->info.caps.max_ft_level - 1;
+ ft_attr.term_tbl = true;
+
+ ret = mlx5dr_cmd_create_flow_table(dmn->mdev,
+ &ft_attr,
+ &rx_icm_addr,
+ &table_id);
if (ret) {
mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret);
goto free_ttl_tbl;
@@ -91,3 +97,70 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
kfree(recalc_cs_ft);
}
+
+int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_flow_destination_hw_info *dest,
+ int num_dest,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id)
+{
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
+ struct mlx5dr_cmd_fte_info fte_info = {};
+ u32 val[MLX5_ST_SZ_DW_MATCH_PARAM] = {};
+ struct mlx5dr_cmd_ft_info ft_info = {};
+ int ret;
+
+ ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+ ft_attr.level = dmn->info.caps.max_ft_level - 2;
+ ft_attr.reformat_en = reformat_req;
+ ft_attr.decap_en = reformat_req;
+
+ ret = mlx5dr_cmd_create_flow_table(dmn->mdev, &ft_attr, NULL, tbl_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating multi dest FW flow table %d\n", ret);
+ return ret;
+ }
+
+ ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ *tbl_id, group_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating multi dest FW flow group %d\n", ret);
+ goto free_flow_table;
+ }
+
+ ft_info.id = *tbl_id;
+ ft_info.type = FS_FT_FDB;
+ fte_info.action.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_info.dests_size = num_dest;
+ fte_info.val = val;
+ fte_info.dest_arr = dest;
+
+ ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed setting fte into table %d\n", ret);
+ goto free_flow_group;
+ }
+
+ return 0;
+
+free_flow_group:
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
+ *tbl_id, *group_id);
+free_flow_table:
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev, *tbl_id,
+ MLX5_FLOW_TABLE_TYPE_FDB);
+ return ret;
+}
+
+void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn,
+ u32 tbl_id, u32 group_id)
+{
+ mlx5dr_cmd_del_flow_table_entry(dmn->mdev, FS_FT_FDB, tbl_id);
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ tbl_id, group_id);
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev, tbl_id,
+ MLX5_FLOW_TABLE_TYPE_FDB);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 51803eef13dd..c7f10d4f8f8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
+#include <linux/smp.h>
#include "dr_types.h"
#define QUEUE_SIZE 128
@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index e178d8d3dbc9..14ce2d7dbb66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -211,6 +211,9 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
{
+ bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+ bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
u64 icm_addr_rx = 0;
u64 icm_addr_tx = 0;
int ret;
@@ -221,18 +224,21 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
if (tbl->tx.s_anchor)
icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
- ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev,
- tbl->table_type,
- icm_addr_rx,
- icm_addr_tx,
- tbl->dmn->info.caps.max_ft_level - 1,
- true, false, NULL,
- &tbl->table_id);
+ ft_attr.table_type = tbl->table_type;
+ ft_attr.icm_addr_rx = icm_addr_rx;
+ ft_attr.icm_addr_tx = icm_addr_tx;
+ ft_attr.level = tbl->dmn->info.caps.max_ft_level - 1;
+ ft_attr.sw_owner = true;
+ ft_attr.decap_en = en_decap;
+ ft_attr.reformat_en = en_encap;
+
+ ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
+ NULL, &tbl->table_id);
return ret;
}
-struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags)
{
struct mlx5dr_table *tbl;
int ret;
@@ -245,6 +251,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
tbl->dmn = dmn;
tbl->level = level;
+ tbl->flags = flags;
refcount_set(&tbl->refcount, 1);
ret = dr_table_init(tbl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 3fdf4a5eb031..dffe35145d19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -681,6 +681,7 @@ struct mlx5dr_table {
u32 level;
u32 table_type;
u32 table_id;
+ u32 flags;
struct list_head matcher_list;
struct mlx5dr_action *miss_action;
refcount_t refcount;
@@ -744,10 +745,14 @@ struct mlx5dr_action {
union {
struct mlx5dr_table *tbl;
struct {
- struct mlx5_flow_table *ft;
+ struct mlx5dr_domain *dmn;
+ u32 id;
+ u32 group_id;
+ enum fs_flow_table_type type;
u64 rx_icm_addr;
u64 tx_icm_addr;
- struct mlx5_core_dev *mdev;
+ struct mlx5dr_action **ref_actions;
+ u32 num_of_ref_actions;
} fw_tbl;
};
} dest_tbl;
@@ -869,6 +874,17 @@ struct mlx5dr_cmd_query_flow_table_details {
u64 sw_owner_icm_root_0;
};
+struct mlx5dr_cmd_create_flow_table_attr {
+ u32 table_type;
+ u64 icm_addr_rx;
+ u64 icm_addr_tx;
+ u8 level;
+ bool sw_owner;
+ bool term_tbl;
+ bool decap_en;
+ bool reformat_en;
+};
+
/* internal API functions */
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps);
@@ -906,12 +922,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id);
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
- u32 table_type,
- u64 icm_addr_rx,
- u64 icm_addr_tx,
- u8 level,
- bool sw_owner,
- bool term_tbl,
+ struct mlx5dr_cmd_create_flow_table_attr *attr,
u64 *fdb_rx_icm_addr,
u32 *table_id);
int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
@@ -1053,6 +1064,43 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action);
+struct mlx5dr_cmd_ft_info {
+ u32 id;
+ u16 vport;
+ enum fs_flow_table_type type;
+};
+
+struct mlx5dr_cmd_flow_destination_hw_info {
+ enum mlx5_flow_destination_type type;
+ union {
+ u32 tir_num;
+ u32 ft_num;
+ u32 ft_id;
+ u32 counter_id;
+ struct {
+ u16 num;
+ u16 vhca_id;
+ u32 reformat_id;
+ u8 flags;
+ } vport;
+ };
+};
+
+struct mlx5dr_cmd_fte_info {
+ u32 dests_size;
+ u32 index;
+ struct mlx5_flow_context flow_context;
+ u32 *val;
+ struct mlx5_flow_act action;
+ struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
+};
+
+int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
+ int opmod, int modify_mask,
+ struct mlx5dr_cmd_ft_info *ft,
+ u32 group_id,
+ struct mlx5dr_cmd_fte_info *fte);
+
struct mlx5dr_fw_recalc_cs_ft {
u64 rx_icm_addr;
u32 table_id;
@@ -1067,4 +1115,12 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
u32 vport_num,
u64 *rx_icm_addr);
+int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_flow_destination_hw_info *dest,
+ int num_dest,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id);
+void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
+ u32 group_id);
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3d587d0bdbbe..3abfc8125926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -74,7 +74,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
next_ft);
tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
- ft->level);
+ ft->level, ft->flags);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
@@ -184,13 +184,13 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
dest_attr->vport.vhca_id);
}
-static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev,
+static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
if (mlx5_dr_is_fw_table(dest_ft->flags))
- return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev);
+ return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
}
@@ -206,6 +206,12 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai
return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
}
+static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
+{
+ return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+}
+
#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
@@ -213,7 +219,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte *fte)
{
struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
- struct mlx5dr_action *term_action = NULL;
+ struct mlx5dr_action_dest *term_actions;
struct mlx5dr_match_parameters params;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5dr_action **fs_dr_actions;
@@ -223,6 +229,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_rule *rule;
struct mlx5_flow_rule *dst;
int fs_dr_num_actions = 0;
+ int num_term_actions = 0;
int num_actions = 0;
size_t match_sz;
int err = 0;
@@ -233,18 +240,38 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
GFP_KERNEL);
- if (!actions)
- return -ENOMEM;
+ if (!actions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
sizeof(*fs_dr_actions), GFP_KERNEL);
if (!fs_dr_actions) {
- kfree(actions);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*term_actions), GFP_KERNEL);
+ if (!term_actions) {
+ err = -ENOMEM;
+ goto free_fs_dr_actions_alloc;
}
match_sz = sizeof(fte->val);
+ /* Drop reformat action bit if destination vport set with reformat */
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (!contain_vport_reformat_action(dst))
+ continue;
+
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ break;
+ }
+ }
+
/* The order of the actions are must to be keep, only the following
* order is supported by SW steering:
* TX: push vlan -> modify header -> encap
@@ -335,7 +362,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
}
if (fte->flow_context.flow_tag) {
@@ -352,34 +379,25 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
- u32 id;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
goto free_actions;
}
- switch (type) {
- case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
- id = dst->dest_attr.counter_id;
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
- tmp_action =
- mlx5dr_action_create_flow_counter(id);
- if (!tmp_action) {
- err = -ENOMEM;
- goto free_actions;
- }
- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- actions[num_actions++] = tmp_action;
- break;
+ switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
- tmp_action = create_ft_action(dev, dst);
+ tmp_action = create_ft_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
tmp_action = create_vport_action(domain, dst);
@@ -388,7 +406,14 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions].dest = tmp_action;
+
+ if (dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ term_actions[num_term_actions].reformat =
+ dst->dest_attr.vport.pkt_reformat->action.dr_action;
+
+ num_term_actions++;
break;
default:
err = -EOPNOTSUPP;
@@ -397,11 +422,50 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ u32 id;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -ENOSPC;
+ goto free_actions;
+ }
+
+ id = dst->dest_attr.counter_id;
+ tmp_action =
+ mlx5dr_action_create_flow_counter(id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
-
- if (term_action)
- actions[num_actions++] = term_action;
+ if (num_term_actions == 1) {
+ if (term_actions->reformat)
+ actions[num_actions++] = term_actions->reformat;
+
+ actions[num_actions++] = term_actions->dest;
+ } else if (num_term_actions > 1) {
+ tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
+ term_actions,
+ num_term_actions);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
&params,
@@ -412,7 +476,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
+ kfree(term_actions);
kfree(actions);
+
fte->fs_dr_rule.dr_rule = rule;
fte->fs_dr_rule.num_actions = fs_dr_num_actions;
fte->fs_dr_rule.dr_actions = fs_dr_actions;
@@ -420,13 +486,18 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- for (i = 0; i < fs_dr_num_actions; i++)
+ /* Free in reverse order to handle action dependencies */
+ for (i = fs_dr_num_actions - 1; i >= 0; i--)
if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
mlx5dr_action_destroy(fs_dr_actions[i]);
- mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
- kfree(actions);
+ kfree(term_actions);
+free_fs_dr_actions_alloc:
kfree(fs_dr_actions);
+free_actions_alloc:
+ kfree(actions);
+out_err:
+ mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
return err;
}
@@ -533,7 +604,8 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
if (err)
return err;
- for (i = 0; i < rule->num_actions; i++)
+ /* Free in reverse order to handle action dependencies */
+ for (i = rule->num_actions - 1; i >= 0; i--)
if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
mlx5dr_action_destroy(rule->dr_actions[i]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 1722f4668269..e01c3766c7de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -32,6 +32,7 @@ enum {
};
enum {
+ MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
};
@@ -625,4 +626,19 @@ struct mlx5_ifc_dr_action_hw_set_bits {
u8 inline_data[0x20];
};
+struct mlx5_ifc_dr_action_hw_copy_bits {
+ u8 opcode[0x8];
+ u8 destination_field_code[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x2];
+ u8 destination_length[0x6];
+
+ u8 reserved_at_20[0x8];
+ u8 source_field_code[0x8];
+ u8 reserved_at_30[0x2];
+ u8 source_left_shifter[0x6];
+ u8 reserved_at_38[0x8];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index adda9cbfba45..e1edc9c247b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -33,6 +33,11 @@ struct mlx5dr_match_parameters {
u64 *match_buf; /* Device spec format */
};
+struct mlx5dr_action_dest {
+ struct mlx5dr_action *dest;
+ struct mlx5dr_action *reformat;
+};
+
#ifdef CONFIG_MLX5_SW_STEERING
struct mlx5dr_domain *
@@ -46,7 +51,7 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn);
struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level);
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
int mlx5dr_table_destroy(struct mlx5dr_table *table);
@@ -75,14 +80,19 @@ struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev);
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
+ struct mlx5_flow_table *ft);
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u32 vport, u8 vhca_id_valid,
u16 vhca_id);
+struct mlx5dr_action *
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests);
+
struct mlx5dr_action *mlx5dr_action_create_drop(void);
struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
@@ -131,7 +141,7 @@ mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn) { }
static inline struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level) { return NULL; }
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; }
static inline int
mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
@@ -165,8 +175,8 @@ static inline struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
static inline struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev) { return NULL; }
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
+ struct mlx5_flow_table *ft) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
@@ -174,6 +184,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u16 vhca_id) { return NULL; }
static inline struct mlx5dr_action *
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests) { return NULL; }
+
+static inline struct mlx5dr_action *
mlx5dr_action_create_drop(void) { return NULL; }
static inline struct mlx5dr_action *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index f2a0e72285ba..02f7e4a39578 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -89,7 +89,7 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
len = nstrides << wq->fbc.log_stride;
wqe = mlx5_wq_cyc_get_wqe(wq, ix);
- pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
+ pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 2b543911ae00..c4caeeadcba9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -213,8 +213,8 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
err_register_netdev:
mlxsw_m->ports[local_port] = NULL;
- free_netdev(dev);
err_dev_addr_get:
+ free_netdev(dev);
err_alloc_etherdev:
mlxsw_core_port_fini(mlxsw_m->core, local_port);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index af30e8a76682..dd6685156396 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3477,10 +3477,10 @@ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
enum mlxsw_reg_qeec_hr {
- MLXSW_REG_QEEC_HIERARCY_PORT,
- MLXSW_REG_QEEC_HIERARCY_GROUP,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_PORT,
+ MLXSW_REG_QEEC_HR_GROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ MLXSW_REG_QEEC_HR_TC,
};
/* reg_qeec_element_hierarchy
@@ -3563,8 +3563,8 @@ MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
*/
MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
-/* A large max rate will disable the max shaper. */
-#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
+/* The largest max shaper value possible to disable the shaper. */
+#define MLXSW_REG_QEEC_MAS_DIS ((1u << 31) - 1) /* Kbps */
/* reg_qeec_max_shaper_rate
* Max shaper information rate.
@@ -3602,6 +3602,21 @@ MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
*/
MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
+/* reg_qeec_max_shaper_bs
+ * Max shaper burst size
+ * Burst size is 2^max_shaper_bs * 512 bits
+ * For Spectrum-1: Range is: 5..25
+ * For Spectrum-2: Range is: 11..25
+ * Reserved when ptps = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
+
+#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
+
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index)
@@ -3618,8 +3633,7 @@ static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port,
{
MLXSW_REG_ZERO(qeec, payload);
mlxsw_reg_qeec_local_port_set(payload, local_port);
- mlxsw_reg_qeec_element_hierarchy_set(payload,
- MLXSW_REG_QEEC_HIERARCY_PORT);
+ mlxsw_reg_qeec_element_hierarchy_set(payload, MLXSW_REG_QEEC_HR_PORT);
mlxsw_reg_qeec_ptps_set(payload, ptps);
}
@@ -3749,6 +3763,38 @@ mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp)
mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp);
}
+/* QPDP - QoS Port DSCP to Priority Mapping Register
+ * -------------------------------------------------
+ * This register controls the port default Switch Priority and Color. The
+ * default Switch Priority and Color are used for frames where the trust state
+ * uses default values. All member ports of a LAG should be configured with the
+ * same default values.
+ */
+#define MLXSW_REG_QPDP_ID 0x4007
+#define MLXSW_REG_QPDP_LEN 0x8
+
+MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN);
+
+/* reg_qpdp_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
+
+/* reg_qpdp_switch_prio
+ * Default port Switch Priority (default 0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qpdp_pack(char *payload, u8 local_port,
+ u8 switch_prio)
+{
+ MLXSW_REG_ZERO(qpdp, payload);
+ mlxsw_reg_qpdp_local_port_set(payload, local_port);
+ mlxsw_reg_qpdp_switch_prio_set(payload, switch_prio);
+}
+
/* QPDPM - QoS Port DSCP to Priority Mapping Register
* --------------------------------------------------
* This register controls the mapping from DSCP field to
@@ -5482,6 +5528,7 @@ enum mlxsw_reg_htgt_discard_trap_group {
MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
};
/* reg_htgt_trap_group
@@ -10109,6 +10156,92 @@ static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
}
+/* TIEEM - Tunneling IPinIP Encapsulation ECN Mapping Register
+ * -----------------------------------------------------------
+ * The TIEEM register maps ECN of the IP header at the ingress to the
+ * encapsulation to the ECN of the underlay network.
+ */
+#define MLXSW_REG_TIEEM_ID 0xA812
+#define MLXSW_REG_TIEEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tieem, MLXSW_REG_TIEEM_ID, MLXSW_REG_TIEEM_LEN);
+
+/* reg_tieem_overlay_ecn
+ * ECN of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tieem, overlay_ecn, 0x04, 24, 2);
+
+/* reg_tineem_underlay_ecn
+ * ECN of the IP header in the underlay network.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tieem, underlay_ecn, 0x04, 16, 2);
+
+static inline void mlxsw_reg_tieem_pack(char *payload, u8 overlay_ecn,
+ u8 underlay_ecn)
+{
+ MLXSW_REG_ZERO(tieem, payload);
+ mlxsw_reg_tieem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tieem_underlay_ecn_set(payload, underlay_ecn);
+}
+
+/* TIDEM - Tunneling IPinIP Decapsulation ECN Mapping Register
+ * -----------------------------------------------------------
+ * The TIDEM register configures the actions that are done in the
+ * decapsulation.
+ */
+#define MLXSW_REG_TIDEM_ID 0xA813
+#define MLXSW_REG_TIDEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tidem, MLXSW_REG_TIDEM_ID, MLXSW_REG_TIDEM_LEN);
+
+/* reg_tidem_underlay_ecn
+ * ECN field of the IP header in the underlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tidem, underlay_ecn, 0x04, 24, 2);
+
+/* reg_tidem_overlay_ecn
+ * ECN field of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tidem, overlay_ecn, 0x04, 16, 2);
+
+/* reg_tidem_eip_ecn
+ * Egress IP ECN. ECN field of the IP header of the packet which goes out
+ * from the decapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, eip_ecn, 0x04, 8, 2);
+
+/* reg_tidem_trap_en
+ * Trap enable:
+ * 0 - No trap due to decap ECN
+ * 1 - Trap enable with trap_id
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, trap_en, 0x08, 28, 4);
+
+/* reg_tidem_trap_id
+ * Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
+ * Reserved when trap_en is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, trap_id, 0x08, 0, 9);
+
+static inline void mlxsw_reg_tidem_pack(char *payload, u8 underlay_ecn,
+ u8 overlay_ecn, u8 eip_ecn,
+ bool trap_en, u16 trap_id)
+{
+ MLXSW_REG_ZERO(tidem, payload);
+ mlxsw_reg_tidem_underlay_ecn_set(payload, underlay_ecn);
+ mlxsw_reg_tidem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tidem_eip_ecn_set(payload, eip_ecn);
+ mlxsw_reg_tidem_trap_en_set(payload, trap_en);
+ mlxsw_reg_tidem_trap_id_set(payload, trap_id);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -10581,6 +10714,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(qeec),
MLXSW_REG(qrwe),
MLXSW_REG(qpdsm),
+ MLXSW_REG(qpdp),
MLXSW_REG(qpdpm),
MLXSW_REG(qtctm),
MLXSW_REG(qpsc),
@@ -10652,6 +10786,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(tndem),
MLXSW_REG(tnpc),
MLXSW_REG(tigcr),
+ MLXSW_REG(tieem),
+ MLXSW_REG(tidem),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f7fd5e8fbf96..7358b5bc7eb6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,11 +45,9 @@
#include "spectrum_ptp.h"
#include "../mlxfw/mlxfw.h"
-#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
-
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 2308
+#define MLXSW_SP1_FWREV_SUBMINOR 2714
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -66,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2000
-#define MLXSW_SP2_FWREV_SUBMINOR 2308
+#define MLXSW_SP2_FWREV_SUBMINOR 2714
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -197,6 +195,10 @@ struct mlxsw_sp_ptp_ops {
u64 *data, int data_index);
};
+struct mlxsw_sp_span_ops {
+ u32 (*buffsize_get)(int mtu, u32 speed);
+};
+
static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
u16 component_index, u32 *p_max_size,
u8 *p_align_bits, u16 *p_max_write_size)
@@ -423,13 +425,12 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
rev->major, req_rev->major);
return -EINVAL;
}
- if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
- MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
- mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
- rev->major, rev->minor, rev->subminor);
+ dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, req_rev->major,
+ req_rev->minor, req_rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
fw_filename);
@@ -860,23 +861,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
- struct sk_buff *skb_orig = skb;
-
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb_orig);
- return NETDEV_TX_OK;
- }
- dev_consume_skb_any(skb_orig);
- }
-
if (eth_skb_pad(skb)) {
this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
@@ -1215,6 +1210,9 @@ static void update_stats_cache(struct work_struct *work)
periodic_hw_stats.update_dw.work);
if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
+ * necessary when port goes down.
+ */
goto out;
mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
@@ -1796,6 +1794,10 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_PRIO:
return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -3539,6 +3541,27 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
+int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
+{
+ const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_oper;
+ int err;
+
+ port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
+ port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
+ mlxsw_sp_port->local_port, 0,
+ false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
+ &eth_proto_oper);
+ *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
+ return 0;
+}
+
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight)
@@ -3556,7 +3579,7 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
- u8 next_index, u32 maxrate)
+ u8 next_index, u32 maxrate, u8 burst_size)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char qeec_pl[MLXSW_REG_QEEC_LEN];
@@ -3565,6 +3588,7 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
next_index);
mlxsw_reg_qeec_mase_set(qeec_pl, true);
mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
+ mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
}
@@ -3602,26 +3626,25 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
* one subgroup, which are all member in the same group.
*/
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
- 0);
+ MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, false, 0);
if (err)
return err;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC, i, i,
+ MLXSW_REG_QEEC_HR_TC, i, i,
false, 0);
if (err)
return err;
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
true, 100);
if (err)
@@ -3633,30 +3656,30 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
* for the initial configuration.
*/
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_HR_PORT, 0, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i, i,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
}
@@ -3664,7 +3687,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
/* Configure the min shaper for multicast TCs. */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
MLXSW_REG_QEEC_MIS_MIN);
if (err)
@@ -3888,6 +3911,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
mlxsw_sp->ptp_ops->shaper_work);
+ INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
+ mlxsw_sp_span_speed_update_work);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
@@ -3944,6 +3969,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
+ cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
@@ -4324,6 +4350,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
}
+static void
+mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ for (i = 0; i < TC_MAX_QUEUE; i++)
+ mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
+}
+
static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
char *pude_pl, void *priv)
{
@@ -4342,9 +4377,11 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
netdev_info(mlxsw_sp_port->dev, "link up\n");
netif_carrier_on(mlxsw_sp_port->dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
} else {
netdev_info(mlxsw_sp_port->dev, "link down\n");
netif_carrier_off(mlxsw_sp_port->dev);
+ mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
}
}
@@ -4540,10 +4577,16 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
false, SP_IP2ME, DISCARD),
@@ -4882,6 +4925,33 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats = mlxsw_sp2_get_stats,
};
+static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
+{
+ return mtu * 5 / 2;
+}
+
+static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
+ .buffsize_get = mlxsw_sp1_span_buffsize_get,
+};
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+
+static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
+{
+ return 3 * mtu + MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR * speed / 1000;
+}
+
+static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
+ .buffsize_get = mlxsw_sp2_span_buffsize_get,
+};
+
+u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
+{
+ u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
+
+ return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
@@ -5103,8 +5173,10 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -5128,6 +5200,31 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
+static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -5634,7 +5731,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
static struct mlxsw_driver mlxsw_sp3_driver = {
.kind = mlxsw_sp3_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp2_init,
+ .init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 347bec9d1ecf..a0f1f9dceec5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -140,6 +140,7 @@ struct mlxsw_sp_sb_vals;
struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
+struct mlxsw_sp_span_ops;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -185,8 +186,10 @@ struct mlxsw_sp {
const struct mlxsw_sp_sb_vals *sb_vals;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
const struct mlxsw_sp_ptp_ops *ptp_ops;
+ const struct mlxsw_sp_span_ops *span_ops;
const struct mlxsw_listener *listeners;
size_t listeners_count;
+ u32 lowest_shaper_bs;
};
static inline struct mlxsw_sp_upper *
@@ -292,6 +295,9 @@ struct mlxsw_sp_port {
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
u8 split_base_local_port;
+ struct {
+ struct delayed_work speed_update_dw;
+ } span;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -471,6 +477,7 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u8 local_port, void *priv);
+int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -481,7 +488,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
struct ieee_pfc *my_pfc);
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
- u8 next_index, u32 maxrate);
+ u8 next_index, u32 maxrate, u8 burst_size);
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state);
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u8 state);
@@ -501,6 +508,7 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -852,6 +860,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p);
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 150b3a144b83..3d3cca596116 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
struct mlxsw_sp_fid *dummy_fid;
struct rhashtable ruleset_ht;
struct list_head rules;
+ struct mutex rules_lock; /* Protects rules list */
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
goto err_ruleset_block_bind;
}
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
block->rule_count++;
block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
ruleset->ht_key.block->rule_count--;
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset))
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
struct mlxsw_sp_acl_rule *rule;
int err;
- /* Protect internal structures from changes */
- rtnl_lock();
+ mutex_lock(&acl->rules_lock);
list_for_each_entry(rule, &acl->rules, list) {
err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
rule);
if (err)
goto err_rule_update;
}
- rtnl_unlock();
+ mutex_unlock(&acl->rules_lock);
return 0;
err_rule_update:
- rtnl_unlock();
+ mutex_unlock(&acl->rules_lock);
return err;
}
@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
+ mutex_init(&acl->rules_lock);
err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_acl_ops_init:
+ mutex_destroy(&acl->rules_lock);
mlxsw_sp_fid_put(fid);
err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
+ mutex_destroy(&acl->rules_lock);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 21296fa7f7fb..49a72a8f1f57 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -160,7 +160,7 @@ static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
u8 weight = ets->tc_tx_bw[i];
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, dwrr, weight);
if (err) {
netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
@@ -198,7 +198,7 @@ err_port_ets_set:
u8 weight = my_ets->tc_tx_bw[i];
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, dwrr, weight);
}
return err;
@@ -369,6 +369,17 @@ err_update_qrwe:
}
static int
+mlxsw_sp_port_dcb_app_update_qpdp(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdp_pl[MLXSW_REG_QPDP_LEN];
+
+ mlxsw_reg_qpdp_pack(qpdp_pl, mlxsw_sp_port->local_port, default_prio);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdp), qpdp_pl);
+}
+
+static int
mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
struct dcb_ieee_app_dscp_map *map)
{
@@ -405,6 +416,12 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
int err;
default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
+ err = mlxsw_sp_port_dcb_app_update_qpdp(mlxsw_sp_port, default_prio);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure port default priority\n");
+ return err;
+ }
+
have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
&prio_map);
@@ -507,9 +524,9 @@ static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0,
- maxrate->tc_maxrate[i]);
+ maxrate->tc_maxrate[i], 0);
if (err) {
netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
goto err_port_ets_maxrate_set;
@@ -523,8 +540,9 @@ static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
err_port_ets_maxrate_set:
for (i--; i >= 0; i--)
mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
- i, 0, my_maxrate->tc_maxrate[i]);
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0,
+ my_maxrate->tc_maxrate[i], 0);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 6400cd644b7a..a8525992528f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -3,8 +3,10 @@
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
+#include <net/inet_ecn.h>
#include "spectrum_ipip.h"
+#include "reg.h"
struct ip_tunnel_parm
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
@@ -338,3 +340,61 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
};
+
+static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tieem_pl[MLXSW_REG_TIEEM_LEN];
+
+ mlxsw_reg_tieem_pack(tieem_pl, inner_ecn, outer_ecn);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tieem), tieem_pl);
+}
+
+int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ u8 outer_ecn = INET_ECN_encapsulate(0, i);
+ int err;
+
+ err = mlxsw_sp_ipip_ecn_encap_init_one(mlxsw_sp, i, outer_ecn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tidem_pl[MLXSW_REG_TIDEM_LEN];
+ bool trap_en, set_ce = false;
+ u8 new_inner_ecn;
+
+ trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+ new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
+
+ mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
+}
+
+int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i, j, err;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ /* Iterate over outer ECN values */
+ for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
+ err = mlxsw_sp_ipip_ecn_decap_init_one(mlxsw_sp, i, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index ec2ff3d7f41c..34f7c3501b08 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -920,6 +920,7 @@ static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
egr_types = 0xff;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
return -ERANGE;
}
@@ -1015,27 +1016,17 @@ mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
{
- const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_oper, speed;
bool ptps = false;
int err, i;
+ u32 speed;
if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
- port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
- port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
- mlxsw_sp_port->local_port, 0,
- false);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
if (err)
return err;
- port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
- &eth_proto_oper);
- speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
ptps = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 46d43cfd04e9..79a2801d59f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -18,6 +18,8 @@ enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_NO_QDISC,
MLXSW_SP_QDISC_RED,
MLXSW_SP_QDISC_PRIO,
+ MLXSW_SP_QDISC_ETS,
+ MLXSW_SP_QDISC_TBF,
};
struct mlxsw_sp_qdisc_ops {
@@ -195,6 +197,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static u64
+mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->backlog[tclass_num] +
+ xstats->backlog[tclass_num + 8];
+}
+
+static u64
+mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->tail_drop[tclass_num] +
+ xstats->tail_drop[tclass_num + 8];
+}
+
static void
mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
u8 prio_bitmap, u64 *tx_packets,
@@ -212,6 +228,70 @@ mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
}
}
+static void
+mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u64 *p_tx_bytes, u64 *p_tx_packets,
+ u64 *p_drops, u64 *p_backlog)
+{
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_port_xstats *xstats;
+ u64 tx_bytes, tx_packets;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &tx_packets, &tx_bytes);
+
+ *p_tx_packets += tx_packets;
+ *p_tx_bytes += tx_bytes;
+ *p_drops += xstats->wred_drop[tclass_num] +
+ mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
+ *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
+}
+
+static void
+mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u64 tx_bytes, u64 tx_packets,
+ u64 drops, u64 backlog,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes -= stats_base->tx_bytes;
+ tx_packets -= stats_base->tx_packets;
+ drops -= stats_base->drops;
+ backlog -= stats_base->backlog;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
+
+ stats_base->backlog += backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+}
+
+static void
+mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 backlog = 0;
+ u64 drops = 0;
+
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog);
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
+ tx_bytes, tx_packets, drops, backlog,
+ stats_ptr);
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -269,7 +349,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
&stats_base->tx_bytes);
red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
- red_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
stats_base->drops = red_base->prob_drop + red_base->pdrop;
@@ -342,19 +422,28 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct gnet_stats_queue *qstats)
{
- struct tc_red_qopt_offload_params *p = params;
u64 backlog;
backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_qdisc->stats_base.backlog);
- p->qstats->backlog -= backlog;
+ qstats->backlog -= backlog;
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
+}
+
static int
mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -370,7 +459,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
marks = xstats->ecn - xstats_base->prob_mark;
- pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
+ xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
@@ -387,40 +477,21 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, overlimits, drops, backlog;
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
+ u64 overlimits;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
- &tx_packets, &tx_bytes);
- tx_bytes = tx_bytes - stats_base->tx_bytes;
- tx_packets = tx_packets - stats_base->tx_packets;
-
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
stats_base->overlimits;
- drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- stats_base->drops;
- backlog = xstats->backlog[tclass_num];
- _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
stats_ptr->qstats->overlimits += overlimits;
- stats_ptr->qstats->drops += drops;
- stats_ptr->qstats->backlog +=
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- backlog) -
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- stats_base->backlog);
-
- stats_base->backlog = backlog;
- stats_base->drops += drops;
stats_base->overlimits += overlimits;
- stats_base->tx_bytes += tx_bytes;
- stats_base->tx_packets += tx_packets;
+
return 0;
}
@@ -470,15 +541,215 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+static void
+mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ u64 backlog_cells = 0;
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 drops = 0;
+
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog_cells);
+
+ mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
+ mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
+ mlxsw_sp_qdisc->stats_base.drops = drops;
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
static int
-mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ mlxsw_sp_qdisc->tclass_num, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
+}
+
+static int
+mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 max_size, u8 *p_burst_size)
+{
+ /* TBF burst size is configured in bytes. The ASIC burst size value is
+ * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
+ */
+ u32 bs512 = max_size / 64;
+ u8 bs = fls(bs512);
+
+ if (!bs)
+ return -EINVAL;
+ --bs;
+
+ /* Demand a power of two. */
+ if ((1 << bs) != bs512)
+ return -EINVAL;
+
+ if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
+ bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
+ return -EINVAL;
+
+ *p_burst_size = bs;
+ return 0;
+}
+
+static u32
+mlxsw_sp_qdisc_tbf_max_size(u8 bs)
+{
+ return (1U << bs) * 64;
+}
+
+static u64
+mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
+{
+ /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
+ * Kbits/s.
+ */
+ return p->rate.rate_bytes_ps / 1000 * 8;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ u8 burst_size;
+ int err;
+
+ if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
+ dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
+ "spectrum: TBF: rate of %lluKbps must be below %u\n",
+ rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
+ if (err) {
+ u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
+
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
+ p->max_size,
+ mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
+ mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ u8 burst_size;
+ int err;
+
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
+ if (WARN_ON_ONCE(err))
+ /* check_params above was supposed to reject this value. */
+ return -EINVAL;
+
+ /* Configure subgroup shaper, so that both UC and MC traffic is subject
+ * to shaping. That is unlike RED, however UC queue lengths are going to
+ * be different than MC ones due to different pool and quota
+ * configurations, so the configuration is not applicable. For shaper on
+ * the other hand, subjecting the overall stream to the configured
+ * shaper makes sense. Also note that that is what we do for
+ * ieee_setmaxrate().
+ */
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ mlxsw_sp_qdisc->tclass_num, 0,
+ rate_kbps, burst_size);
+}
+
+static void
+mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ stats_ptr);
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
+ .type = MLXSW_SP_QDISC_TBF,
+ .check_params = mlxsw_sp_qdisc_tbf_check_params,
+ .replace = mlxsw_sp_qdisc_tbf_replace,
+ .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
+ .destroy = mlxsw_sp_qdisc_tbf_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_TBF_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_tbf,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_TBF))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_TBF_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_TBF_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
MLXSW_SP_PORT_DEFAULT_TCLASS);
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0, false, 0);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
&mlxsw_sp_port->tclass_qdiscs[i]);
mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
@@ -488,36 +759,58 @@ mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct tc_prio_qopt_offload_params *p = params;
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+}
- if (p->bands > IEEE_8021QAZ_MAX_TCS)
+static int
+__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
+{
+ if (nbands > IEEE_8021QAZ_MAX_TCS)
return -EOPNOTSUPP;
return 0;
}
static int
-mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct tc_prio_qopt_offload_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
+}
+
+static int
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ unsigned int nbands,
+ const unsigned int *quanta,
+ const unsigned int *weights,
+ const u8 *priomap)
+{
struct mlxsw_sp_qdisc *child_qdisc;
int tclass, i, band, backlog;
u8 old_priomap;
int err;
- for (band = 0; band < p->bands; band++) {
+ for (band = 0; band < nbands; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
old_priomap = child_qdisc->prio_bitmap;
child_qdisc->prio_bitmap = 0;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ tclass, 0, !!quanta[band],
+ weights[band]);
+ if (err)
+ return err;
+
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (p->priomap[i] == band) {
+ if (priomap[i] == band) {
child_qdisc->prio_bitmap |= BIT(i);
if (BIT(i) & old_priomap)
continue;
@@ -540,21 +833,46 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ tclass, 0, false, 0);
}
return 0;
}
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
+
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ zeroes, zeroes, p->priomap);
+}
+
+static void
+__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct gnet_stats_queue *qstats)
+{
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ qstats->backlog -= backlog;
+}
+
static void
mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
- u64 backlog;
- backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_qdisc->stats_base.backlog);
- p->qstats->backlog -= backlog;
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->qstats);
}
static int
@@ -562,37 +880,23 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
- struct mlxsw_sp_qdisc_stats *stats_base;
- struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
+ struct mlxsw_sp_qdisc *tc_qdisc;
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 backlog = 0;
+ u64 drops = 0;
int i;
- xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
- stats_base = &mlxsw_sp_qdisc->stats_base;
-
- tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
- tx_packets = stats->tx_packets - stats_base->tx_packets;
-
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- drops += xstats->tail_drop[i];
- drops += xstats->wred_drop[i];
- backlog += xstats->backlog[i];
+ tc_qdisc = &mlxsw_sp_port->tclass_qdiscs[i];
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog);
}
- drops = drops - stats_base->drops;
- _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
- stats_ptr->qstats->drops += drops;
- stats_ptr->qstats->backlog +=
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- backlog) -
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- stats_base->backlog);
- stats_base->backlog = backlog;
- stats_base->drops += drops;
- stats_base->tx_bytes += tx_bytes;
- stats_base->tx_packets += tx_packets;
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
+ tx_bytes, tx_packets, drops, backlog,
+ stats_ptr);
return 0;
}
@@ -614,7 +918,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base->drops = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- stats_base->drops += xstats->tail_drop[i];
+ stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
stats_base->drops += xstats->wred_drop[i];
}
@@ -631,27 +935,93 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
};
-/* Grafting is not supported in mlxsw. It will result in un-offloading of the
- * grafted qdisc as well as the qdisc in the qdisc new location.
- * (However, if the graft is to the location where the qdisc is already at, it
- * will be ignored completely and won't cause un-offloading).
+static int
+mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
+}
+
+static int
+mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ p->quanta, p->weights, p->priomap);
+}
+
+static void
+mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
+ .type = MLXSW_SP_QDISC_ETS,
+ .check_params = mlxsw_sp_qdisc_ets_check_params,
+ .replace = mlxsw_sp_qdisc_ets_replace,
+ .unoffload = mlxsw_sp_qdisc_ets_unoffload,
+ .destroy = mlxsw_sp_qdisc_ets_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
+ * graph is free of cycles). These operations do not change the parent handle
+ * though, which means it can be incomplete (if there is more than one class
+ * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
+ * linked to a different class and then removed from the original class).
+ *
+ * E.g. consider this sequence of operations:
+ *
+ * # tc qdisc add dev swp1 root handle 1: prio
+ * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
+ * RED: set bandwidth to 10Mbit
+ * # tc qdisc link dev swp1 handle 13: parent 1:2
+ *
+ * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
+ * child. But RED will still only claim that 1:3 is its parent. If it's removed
+ * from that band, its only parent will be 1:2, but it will continue to claim
+ * that it is in fact 1:3.
+ *
+ * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
+ * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
+ * notification to offload the child Qdisc, based on its parent handle, and use
+ * the graft operation to validate that the class where the child is actually
+ * grafted corresponds to the parent handle. If the two don't match, we
+ * unoffload the child.
*/
static int
-mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct tc_prio_qopt_offload_graft_params *p)
+__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle)
{
- int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
struct mlxsw_sp_qdisc *old_qdisc;
- /* Check if the grafted qdisc is already in its "new" location. If so -
- * nothing needs to be done.
- */
- if (p->band < IEEE_8021QAZ_MAX_TCS &&
- mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
+ if (band < IEEE_8021QAZ_MAX_TCS &&
+ mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle)
return 0;
- if (!p->child_handle) {
+ if (!child_handle) {
/* This is an invisible FIFO replacing the original Qdisc.
* Ignore it--the original Qdisc's destroy will follow.
*/
@@ -662,7 +1032,7 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
* unoffload it.
*/
old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
- p->child_handle);
+ child_handle);
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
@@ -671,6 +1041,15 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static int
+mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_prio_qopt_offload_graft_params *p)
+{
+ return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->band, p->child_handle);
+}
+
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
@@ -704,6 +1083,40 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_ETS_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_ets,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_ETS))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_ETS_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_ETS_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_ETS_GRAFT:
+ return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 8290e82240fc..ce707723f8cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -382,9 +382,10 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
+struct mlxsw_sp_fib_entry;
struct mlxsw_sp_fib_node {
- struct list_head entry_list;
+ struct mlxsw_sp_fib_entry *fib_entry;
struct list_head list;
struct rhash_head ht_node;
struct mlxsw_sp_fib *fib;
@@ -397,7 +398,6 @@ struct mlxsw_sp_fib_entry_decap {
};
struct mlxsw_sp_fib_entry {
- struct list_head list;
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
@@ -1162,7 +1162,6 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
const union mlxsw_sp_l3addr *addr,
enum mlxsw_sp_fib_entry_type type)
{
- struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_node *fib_node;
unsigned char addr_prefix_len;
struct mlxsw_sp_fib *fib;
@@ -1191,15 +1190,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
addr_prefix_len);
- if (!fib_node || list_empty(&fib_node->entry_list))
+ if (!fib_node || fib_node->fib_entry->type != type)
return NULL;
- fib_entry = list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list);
- if (fib_entry->type != type)
- return NULL;
-
- return fib_entry;
+ return fib_node->fib_entry;
}
/* Given an IPIP entry, find the corresponding decap route. */
@@ -1209,7 +1203,6 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
{
static struct mlxsw_sp_fib_node *fib_node;
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct mlxsw_sp_fib_entry *fib_entry;
unsigned char saddr_prefix_len;
union mlxsw_sp_l3addr saddr;
struct mlxsw_sp_fib *ul_fib;
@@ -1244,15 +1237,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
saddr_prefix_len);
- if (!fib_node || list_empty(&fib_node->entry_list))
+ if (!fib_node ||
+ fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
return NULL;
- fib_entry = list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list);
- if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
- return NULL;
-
- return fib_entry;
+ return fib_node->fib_entry;
}
static struct mlxsw_sp_ipip_entry *
@@ -3231,10 +3220,6 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static bool
-mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib_entry *fib_entry);
-
static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@@ -3243,9 +3228,6 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
- fib_entry))
- continue;
err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
return err;
@@ -3253,24 +3235,6 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static void
-mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op, int err);
-
-static void
-mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
-{
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
- struct mlxsw_sp_fib_entry *fib_entry;
-
- list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
- fib_entry))
- continue;
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
- }
-}
-
static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
{
/* Valid sizes for an adjacency group are:
@@ -3374,6 +3338,73 @@ mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
}
}
+static struct mlxsw_sp_nexthop *
+mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
+
+static void
+mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int i;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+
+ if (nh->offloaded)
+ nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ else
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ }
+}
+
+static void
+__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct mlxsw_sp_nexthop *nh;
+
+ nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
+ if (nh && nh->offloaded)
+ fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ else
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ }
+}
+
+static void
+mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+
+ /* Unfortunately, in IPv6 the route and the nexthop are described by
+ * the same struct, so we need to iterate over all the routes using the
+ * nexthop group and set / clear the offload indication for them.
+ */
+ list_for_each_entry(fib6_entry, &nh_grp->fib_list,
+ common.nexthop_group_node)
+ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+}
+
+static void
+mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
+ case AF_INET:
+ mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ case AF_INET6:
+ mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ }
+}
+
static void
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@@ -3447,6 +3478,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
+
if (!old_adj_index_valid) {
/* The trap was set for fib entries, so we have to call
* fib entry update to unset it and use adjacency index.
@@ -3468,9 +3501,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
- /* Offload state within the group changed, so update the flags. */
- mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
-
return;
set_trap:
@@ -3483,6 +3513,7 @@ set_trap:
err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
if (old_adj_index_valid)
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
nh_grp->ecmp_size, nh_grp->adj_index);
@@ -3845,7 +3876,7 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
key.fib_nh = fib_nh;
nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
- if (WARN_ON_ONCE(!nh))
+ if (!nh)
return;
switch (event) {
@@ -4065,131 +4096,128 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
}
static void
-mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- int i;
-
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
- nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- return;
- }
-
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
+ u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
+ int dst_len = fib_entry->fib_node->key.prefix_len;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct fib_rt_info fri;
+ bool should_offload;
- if (nh->offloaded)
- nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- else
- nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ fri.fi = fi;
+ fri.tb_id = fib4_entry->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_entry->tos;
+ fri.type = fib4_entry->type;
+ fri.offload = should_offload;
+ fri.trap = !should_offload;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
static void
-mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- int i;
-
- if (!list_is_singular(&nh_grp->fib_list))
- return;
-
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
+ u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
+ int dst_len = fib_entry->fib_node->key.prefix_len;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct fib_rt_info fri;
- nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ fri.fi = fi;
+ fri.tb_id = fib4_entry->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_entry->tos;
+ fri.type = fib4_entry->type;
+ fri.offload = false;
+ fri.trap = false;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
static void
-mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ bool should_offload;
+
+ should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
+ /* In IPv6 a multipath route is represented using multiple routes, so
+ * we need to set the flags on all of them.
+ */
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
-
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
- list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- return;
- }
-
- list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
- struct mlxsw_sp_nexthop *nh;
-
- nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
- if (nh && nh->offloaded)
- fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- else
- fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
+ fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
+ !should_offload);
}
static void
-mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
- list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct fib6_info *rt = mlxsw_sp_rt6->rt;
-
- rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
+ fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
}
-static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+static void
+mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_sp_fib4_entry_offload_set(fib_entry);
+ mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_sp_fib6_entry_offload_set(fib_entry);
+ mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
}
}
static void
-mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_sp_fib4_entry_offload_unset(fib_entry);
+ mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_sp_fib6_entry_offload_unset(fib_entry);
+ mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
}
}
static void
-mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op, int err)
+mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
{
switch (op) {
- case MLXSW_REG_RALUE_OP_WRITE_DELETE:
- return mlxsw_sp_fib_entry_offload_unset(fib_entry);
case MLXSW_REG_RALUE_OP_WRITE_WRITE:
- if (err)
- return;
- if (mlxsw_sp_fib_entry_should_offload(fib_entry))
- mlxsw_sp_fib_entry_offload_set(fib_entry);
- else
- mlxsw_sp_fib_entry_offload_unset(fib_entry);
- return;
+ mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
+ break;
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
+ break;
default:
- return;
+ break;
}
}
@@ -4416,7 +4444,10 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
{
int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
+ if (err)
+ return err;
+
+ mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
return err;
}
@@ -4491,6 +4522,19 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
+ mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
+ break;
+ default:
+ break;
+ }
+}
+
static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
@@ -4523,6 +4567,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return fib4_entry;
err_nexthop4_group_get:
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
err_fib4_entry_type_set:
kfree(fib4_entry);
return ERR_PTR(err);
@@ -4532,6 +4577,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib4_entry *fib4_entry)
{
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
kfree(fib4_entry);
}
@@ -4555,15 +4601,14 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (!fib_node)
return NULL;
- list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
- if (fib4_entry->tb_id == fen_info->tb_id &&
- fib4_entry->tos == fen_info->tos &&
- fib4_entry->type == fen_info->type &&
- mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
- fen_info->fi) {
- return fib4_entry;
- }
- }
+ fib4_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ if (fib4_entry->tb_id == fen_info->tb_id &&
+ fib4_entry->tos == fen_info->tos &&
+ fib4_entry->type == fen_info->type &&
+ mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
+ fen_info->fi)
+ return fib4_entry;
return NULL;
}
@@ -4611,7 +4656,6 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
if (!fib_node)
return NULL;
- INIT_LIST_HEAD(&fib_node->entry_list);
list_add(&fib_node->list, &fib->node_list);
memcpy(fib_node->key.addr, addr, addr_len);
fib_node->key.prefix_len = prefix_len;
@@ -4622,18 +4666,9 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
{
list_del(&fib_node->list);
- WARN_ON(!list_empty(&fib_node->entry_list));
kfree(fib_node);
}
-static bool
-mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib_entry *fib_entry)
-{
- return list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list) == fib_entry;
-}
-
static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
@@ -4773,200 +4808,48 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_vr *vr = fib_node->fib->vr;
- if (!list_empty(&fib_node->entry_list))
+ if (fib_node->fib_entry)
return;
mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static struct mlxsw_sp_fib4_entry *
-mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib4_entry *new4_entry)
-{
- struct mlxsw_sp_fib4_entry *fib4_entry;
-
- list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
- if (fib4_entry->tb_id > new4_entry->tb_id)
- continue;
- if (fib4_entry->tb_id != new4_entry->tb_id)
- break;
- if (fib4_entry->tos > new4_entry->tos)
- continue;
- if (fib4_entry->prio >= new4_entry->prio ||
- fib4_entry->tos < new4_entry->tos)
- return fib4_entry;
- }
-
- return NULL;
-}
-
-static int
-mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
- struct mlxsw_sp_fib4_entry *new4_entry)
-{
- struct mlxsw_sp_fib_node *fib_node;
-
- if (WARN_ON(!fib4_entry))
- return -EINVAL;
-
- fib_node = fib4_entry->common.fib_node;
- list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
- common.list) {
- if (fib4_entry->tb_id != new4_entry->tb_id ||
- fib4_entry->tos != new4_entry->tos ||
- fib4_entry->prio != new4_entry->prio)
- break;
- }
-
- list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
- return 0;
-}
-
-static int
-mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
- bool replace, bool append)
-{
- struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
- struct mlxsw_sp_fib4_entry *fib4_entry;
-
- fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
-
- if (append)
- return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
- if (replace && WARN_ON(!fib4_entry))
- return -EINVAL;
-
- /* Insert new entry before replaced one, so that we can later
- * remove the second.
- */
- if (fib4_entry) {
- list_add_tail(&new4_entry->common.list,
- &fib4_entry->common.list);
- } else {
- struct mlxsw_sp_fib4_entry *last;
-
- list_for_each_entry(last, &fib_node->entry_list, common.list) {
- if (new4_entry->tb_id > last->tb_id)
- break;
- fib4_entry = last;
- }
-
- if (fib4_entry)
- list_add(&new4_entry->common.list,
- &fib4_entry->common.list);
- else
- list_add(&new4_entry->common.list,
- &fib_node->entry_list);
- }
-
- return 0;
-}
-
-static void
-mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
-{
- list_del(&fib4_entry->common.list);
-}
-
-static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
-
- if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
- return 0;
-
- /* To prevent packet loss, overwrite the previously offloaded
- * entry.
- */
- if (!list_is_singular(&fib_node->entry_list)) {
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
- struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
-
- mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
- }
-
- return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
-}
-
-static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
+static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
-
- if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
- return;
-
- /* Promote the next entry by overwriting the deleted entry */
- if (!list_is_singular(&fib_node->entry_list)) {
- struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
-
- mlxsw_sp_fib_entry_update(mlxsw_sp, n);
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
- return;
- }
-
- mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
-}
-
-static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry,
- bool replace, bool append)
-{
int err;
- err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
- if (err)
- return err;
+ fib_node->fib_entry = fib_entry;
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
- goto err_fib_node_entry_add;
+ goto err_fib_entry_update;
return 0;
-err_fib_node_entry_add:
- mlxsw_sp_fib4_node_list_remove(fib4_entry);
+err_fib_entry_update:
+ fib_node->fib_entry = NULL;
return err;
}
static void
-mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry)
-{
- mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
- mlxsw_sp_fib4_node_list_remove(fib4_entry);
-
- if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
- mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
-}
-
-static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry,
- bool replace)
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
- struct mlxsw_sp_fib4_entry *replaced;
-
- if (!replace)
- return;
-
- /* We inserted the new entry before replaced one */
- replaced = list_next_entry(fib4_entry, common.list);
+ struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
- mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ fib_node->fib_entry = NULL;
}
static int
-mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
- const struct fib_entry_notifier_info *fen_info,
- bool replace, bool append)
+mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
+ struct mlxsw_sp_fib_entry *replaced;
struct mlxsw_sp_fib_node *fib_node;
int err;
@@ -4989,18 +4872,26 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
goto err_fib4_entry_create;
}
- err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
- append);
+ replaced = fib_node->fib_entry;
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
- goto err_fib4_node_entry_link;
+ goto err_fib_node_entry_link;
}
- mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
+ /* Nothing to replace */
+ if (!replaced)
+ return 0;
+
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
+ fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
+ common);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
return 0;
-err_fib4_node_entry_link:
+err_fib_node_entry_link:
+ fib_node->fib_entry = replaced;
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
err_fib4_entry_create:
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
@@ -5021,7 +4912,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
return;
fib_node = fib4_entry->common.fib_node;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
@@ -5083,13 +4974,6 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
kfree(mlxsw_sp_rt6);
}
-static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
-{
- /* RTF_CACHE routes are ignored */
- return !(rt->fib6_flags & RTF_ADDRCONF) &&
- rt->fib6_nh->fib_nh_gw_family;
-}
-
static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
{
@@ -5097,37 +4981,6 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
list)->rt;
}
-static struct mlxsw_sp_fib6_entry *
-mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct fib6_info *nrt, bool replace)
-{
- struct mlxsw_sp_fib6_entry *fib6_entry;
-
- if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
- return NULL;
-
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
- * virtual router.
- */
- if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
- continue;
- if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
- break;
- if (rt->fib6_metric < nrt->fib6_metric)
- continue;
- if (rt->fib6_metric == nrt->fib6_metric &&
- mlxsw_sp_fib6_rt_can_mp(rt))
- return fib6_entry;
- if (rt->fib6_metric > nrt->fib6_metric)
- break;
- }
-
- return NULL;
-}
-
static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
const struct fib6_info *rt)
@@ -5313,6 +5166,11 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
&nh_grp->fib_list);
fib6_entry->common.nh_group = nh_grp;
+ /* The route and the nexthop are described by the same struct, so we
+ * need to the update the nexthop offload indication for the new route.
+ */
+ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+
return 0;
}
@@ -5345,16 +5203,16 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
if (err)
- goto err_fib_node_entry_add;
+ goto err_fib_entry_update;
if (list_empty(&old_nh_grp->fib_list))
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
return 0;
-err_fib_node_entry_add:
+err_fib_entry_update:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
err_nexthop6_group_get:
list_add_tail(&fib6_entry->common.nexthop_group_node,
@@ -5519,112 +5377,13 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_fib6_entry *
-mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct fib6_info *nrt, bool replace)
-{
- struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
-
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
- continue;
- if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
- break;
- if (replace && rt->fib6_metric == nrt->fib6_metric) {
- if (mlxsw_sp_fib6_rt_can_mp(rt) ==
- mlxsw_sp_fib6_rt_can_mp(nrt))
- return fib6_entry;
- if (mlxsw_sp_fib6_rt_can_mp(nrt))
- fallback = fallback ?: fib6_entry;
- }
- if (rt->fib6_metric > nrt->fib6_metric)
- return fallback ?: fib6_entry;
- }
-
- return fallback;
-}
-
-static int
-mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
- bool *p_replace)
-{
- struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
- struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
- struct mlxsw_sp_fib6_entry *fib6_entry;
-
- fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, *p_replace);
-
- if (*p_replace && !fib6_entry)
- *p_replace = false;
-
- if (fib6_entry) {
- list_add_tail(&new6_entry->common.list,
- &fib6_entry->common.list);
- } else {
- struct mlxsw_sp_fib6_entry *last;
-
- list_for_each_entry(last, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
-
- if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
- break;
- fib6_entry = last;
- }
-
- if (fib6_entry)
- list_add(&new6_entry->common.list,
- &fib6_entry->common.list);
- else
- list_add(&new6_entry->common.list,
- &fib_node->entry_list);
- }
-
- return 0;
-}
-
-static void
-mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
-{
- list_del(&fib6_entry->common.list);
-}
-
-static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry,
- bool *p_replace)
-{
- int err;
-
- err = mlxsw_sp_fib6_node_list_insert(fib6_entry, p_replace);
- if (err)
- return err;
-
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
- if (err)
- goto err_fib_node_entry_add;
-
- return 0;
-
-err_fib_node_entry_add:
- mlxsw_sp_fib6_node_list_remove(fib6_entry);
- return err;
-}
-
-static void
-mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
-{
- mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
- mlxsw_sp_fib6_node_list_remove(fib6_entry);
-}
-
-static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct mlxsw_sp_fib *fib;
+ struct fib6_info *cmp_rt;
struct mlxsw_sp_vr *vr;
vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
@@ -5638,40 +5397,23 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (!fib_node)
return NULL;
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
- rt->fib6_metric == iter_rt->fib6_metric &&
- mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
- return fib6_entry;
- }
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
+ rt->fib6_metric == cmp_rt->fib6_metric &&
+ mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
+ return fib6_entry;
return NULL;
}
-static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry,
- bool replace)
-{
- struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
- struct mlxsw_sp_fib6_entry *replaced;
-
- if (!replace)
- return;
-
- replaced = list_next_entry(fib6_entry, common.list);
-
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
- mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
-}
-
-static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6, bool replace)
+static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
- struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
+ struct mlxsw_sp_fib_entry *replaced;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
int err;
@@ -5693,18 +5435,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(fib_node))
return PTR_ERR(fib_node);
- /* Before creating a new entry, try to append route to an existing
- * multipath entry.
- */
- fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
- if (fib6_entry) {
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry,
- rt_arr, nrt6);
- if (err)
- goto err_fib6_entry_nexthop_add;
- return 0;
- }
-
fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
nrt6);
if (IS_ERR(fib6_entry)) {
@@ -5712,17 +5442,70 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
goto err_fib6_entry_create;
}
- err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, &replace);
+ replaced = fib_node->fib_entry;
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
- goto err_fib6_node_entry_link;
+ goto err_fib_node_entry_link;
+
+ /* Nothing to replace */
+ if (!replaced)
+ return 0;
- mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
+ fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
+ common);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
return 0;
-err_fib6_node_entry_link:
+err_fib_node_entry_link:
+ fib_node->fib_entry = replaced;
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
err_fib6_entry_create:
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
+}
+
+static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ struct fib6_info *rt = rt_arr[0];
+ int err;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ if (rt->fib6_src.plen)
+ return -EINVAL;
+
+ if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ return 0;
+
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(fib_node))
+ return PTR_ERR(fib_node);
+
+ if (WARN_ON_ONCE(!fib_node->fib_entry)) {
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return -EINVAL;
+ }
+
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ if (err)
+ goto err_fib6_entry_nexthop_add;
+
+ return 0;
+
err_fib6_entry_nexthop_add:
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
return err;
@@ -5762,7 +5545,7 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
fib_node = fib6_entry->common.fib_node;
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
@@ -5916,39 +5699,25 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
-
- list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
- common.list) {
- bool do_break = &tmp->common.list == &fib_node->entry_list;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
- mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- /* Break when entry list is empty and node was freed.
- * Otherwise, we'll access freed memory in the next
- * iteration.
- */
- if (do_break)
- break;
- }
+ fib4_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
-
- list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
- common.list) {
- bool do_break = &tmp->common.list == &fib_node->entry_list;
+ struct mlxsw_sp_fib6_entry *fib6_entry;
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
- mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- if (do_break)
- break;
- }
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
@@ -6099,7 +5868,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- bool replace, append;
int err;
/* Protect internal structures from changes */
@@ -6107,13 +5875,9 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
- err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
- replace, append);
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
fib_info_put(fib_work->fen_info.fi);
@@ -6138,20 +5902,24 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- bool replace;
int err;
rtnl_lock();
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fib6_add(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6,
- replace);
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib_work->fib6_work.rt_arr,
+ fib_work->fib6_work.nrt6);
+ if (err)
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ break;
+ case FIB_EVENT_ENTRY_APPEND:
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib_work->fib6_work.rt_arr,
+ fib_work->fib6_work.nrt6);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
@@ -6216,8 +5984,6 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
@@ -6245,7 +6011,7 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
@@ -6348,9 +6114,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
return notifier_from_errno(err);
- case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND:
if (router->aborted) {
NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
return notifier_from_errno(-EINVAL);
@@ -8025,8 +7791,18 @@ mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
+ int err;
+
mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
+
+ err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
+ if (err)
+ return err;
+
return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index cc1de91e8217..c9b94f435cdd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -104,4 +104,7 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
return !memcmp(addr1, addr2, sizeof(*addr1));
}
+int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
+
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 200d324e6d99..0cdd7954a085 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -748,33 +748,50 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
return false;
}
-static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
+static int
+mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
- return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ u32 buffsize;
+ u32 speed;
+ int err;
+
+ err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
+ if (err)
+ return err;
+ if (speed == SPEED_UNKNOWN)
+ speed = 0;
+
+ buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
+ mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int err;
-
/* If port is egress mirrored, the shared buffer size should be
* updated according to the mtu value
*/
- if (mlxsw_sp_span_is_egress_mirror(port)) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+ if (mlxsw_sp_span_is_egress_mirror(port))
+ return mlxsw_sp_span_port_buffsize_update(port, mtu);
+ return 0;
+}
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
- return err;
- }
- }
+void mlxsw_sp_span_speed_update_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp_port *mlxsw_sp_port;
- return 0;
+ mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
+ span.speed_update_dw);
+
+ /* If port is egress mirrored, the shared buffer size should be
+ * updated according to the speed value.
+ */
+ if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
+ mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
+ mlxsw_sp_port->dev->mtu);
}
static struct mlxsw_sp_span_inspected_port *
@@ -836,15 +853,9 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
/* if it is an egress SPAN, bind a shared buffer to it */
if (type == MLXSW_SP_SPAN_EGRESS) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
- port->dev->mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
+ err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
+ if (err)
return err;
- }
}
if (bind) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 5e04252f2a11..59724335525f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -74,5 +74,6 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry);
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
+void mlxsw_sp_span_speed_update_work(struct work_struct *work);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index e0d7c49ffae0..60205aa3f6a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -9,6 +9,20 @@
#include "reg.h"
#include "spectrum.h"
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink/mlxsw.rst
+ */
+enum {
+ DEVLINK_MLXSW_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
+ DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
+};
+
+#define DEVLINK_MLXSW_TRAP_NAME_IRIF_DISABLED \
+ "irif_disabled"
+#define DEVLINK_MLXSW_TRAP_NAME_ERIF_DISABLED \
+ "erif_disabled"
+
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
@@ -21,6 +35,12 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \
+ DEVLINK_MLXSW_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
@@ -58,6 +78,11 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS),
MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS),
MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS),
+ MLXSW_SP_TRAP_DRIVER_DROP(IRIF_DISABLED, L3_DROPS),
+ MLXSW_SP_TRAP_DRIVER_DROP(ERIF_DISABLED, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
+ MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
};
static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
@@ -90,6 +115,15 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP,
TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
+ MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, ROUTER_EXP, TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
};
/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
@@ -123,6 +157,13 @@ static u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
+ DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
+ DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
+ DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
@@ -304,8 +345,9 @@ mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
u32 rate;
switch (group->id) {
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:/* fall through */
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS: /* fall through */
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS: /* fall through */
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
policer_id = MLXSW_SP_DISCARD_POLICER_ID;
ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
is_bytes = false;
@@ -342,6 +384,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
priority = 0;
tc = 1;
break;
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index de6cb22f68b1..f0e98ec8f1ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
u64 len;
int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
- struct sk_buff *skb_orig = skb;
-
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
- if (!skb) {
- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb_orig);
- return NETDEV_TX_OK;
- }
- dev_consume_skb_any(skb_orig);
- }
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 0c1c142bb6b0..12e1fa998d42 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -67,6 +67,7 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
@@ -80,12 +81,20 @@ enum {
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_UC_DIP_MC_DMAC = 0x161,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LB = 0x162,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_MC = 0x163,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_CLASS_E = 0x164,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LB = 0x165,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_CORRUPTED_IP_HDR = 0x167,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_MC_DMAC = 0x168,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_DIP = 0x169,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM6 = 0x17C,
+ MLXSW_TRAP_ID_DISCARD_DEC_PKT = 0x188,
+ MLXSW_TRAP_ID_DISCARD_OVERLAY_SMAC_MC = 0x190,
MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_RESERVED_SCOPE = 0x1B0,
MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 0x1B1,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index da329ca115cc..f3f6dfe3eddc 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1103,7 +1103,7 @@ static void ks8842_tx_timeout_work(struct work_struct *work)
__ks8842_start_new_rx_dma(netdev);
}
-static void ks8842_tx_timeout(struct net_device *netdev)
+static void ks8842_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index e102e1560ac7..d1444ba36e10 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4896,7 +4896,7 @@ unlock:
* triggered to free up resources so that the transmit routine can continue
* sending out packets. The hardware is reset to correct the problem.
*/
-static void netdev_tx_timeout(struct net_device *dev)
+static void netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
static unsigned long last_reset;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 0567e4f387a5..09cdc2f2e7ff 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1325,7 +1325,7 @@ static irqreturn_t enc28j60_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void enc28j60_tx_timeout(struct net_device *ndev)
+static void enc28j60_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct enc28j60_net *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 52c41d11f565..39925e4bf2ec 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -892,7 +892,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
}
/* Deal with a transmit timeout */
-static void encx24j600_tx_timeout(struct net_device *dev)
+static void encx24j600_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct encx24j600_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index afe52463dc57..9399f6a98748 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1265,6 +1265,9 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
lan743x_ptp_set_sync_ts_insert(adapter, true);
break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ ret = -ERANGE;
+ break;
default:
netif_warn(adapter, drv, adapter->netdev,
" tx_type = %d, UNKNOWN\n", config.tx_type);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 985b46d7e3d1..86d543ab1ab9 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -500,13 +500,14 @@ EXPORT_SYMBOL(ocelot_port_enable);
static int ocelot_port_open(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
int err;
if (priv->serdes) {
err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
- priv->phy_mode);
+ ocelot_port->phy_mode);
if (err) {
netdev_err(dev, "Could not set mode of SerDes\n");
return err;
@@ -514,7 +515,7 @@ static int ocelot_port_open(struct net_device *dev)
}
err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
- priv->phy_mode);
+ ocelot_port->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index c259114c48fd..04372ba72fec 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -18,11 +18,11 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
+#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot.h>
-#include "ocelot_ana.h"
-#include "ocelot_dev.h"
-#include "ocelot_qsys.h"
#include "ocelot_rew.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
@@ -68,7 +68,6 @@ struct ocelot_port_private {
u8 vlan_aware;
- phy_interface_t phy_mode;
struct phy *serdes;
struct ocelot_port_tc tc;
diff --git a/drivers/net/ethernet/mscc/ocelot_ana.h b/drivers/net/ethernet/mscc/ocelot_ana.h
deleted file mode 100644
index 841c6ec22b64..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_ana.h
+++ /dev/null
@@ -1,625 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_ANA_H_
-#define _MSCC_OCELOT_ANA_H_
-
-#define ANA_ANAGEFIL_B_DOM_EN BIT(22)
-#define ANA_ANAGEFIL_B_DOM_VAL BIT(21)
-#define ANA_ANAGEFIL_AGE_LOCKED BIT(20)
-#define ANA_ANAGEFIL_PID_EN BIT(19)
-#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14))
-#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14)
-#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14)
-#define ANA_ANAGEFIL_VID_EN BIT(13)
-#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0))
-#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0)
-
-#define ANA_STORMLIMIT_CFG_RSZ 0x4
-
-#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3))
-#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3)
-#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3)
-#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2)
-#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0))
-#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0)
-
-#define ANA_AUTOAGE_AGE_FAST BIT(21)
-#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1))
-#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1)
-#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1)
-#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0)
-
-#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1)
-#define ANA_MACTOPTIONS_SHADOW BIT(0)
-
-#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12))
-#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12)
-#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12)
-#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11)
-#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10)
-#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9)
-#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8)
-#define ANA_AGENCTRL_MIRROR_CPU BIT(7)
-#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6)
-#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5)
-#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4)
-#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3)
-#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2)
-#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1)
-#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0)
-
-#define ANA_FLOODING_RSZ 0x4
-
-#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12))
-#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12)
-#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6)
-#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0))
-#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0)
-
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18))
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18)
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12))
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12)
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6)
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0))
-#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0)
-
-#define ANA_SFLOW_CFG_RSZ 0x4
-
-#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2))
-#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2)
-#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2)
-#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1)
-#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0)
-
-#define ANA_PORT_MODE_RSZ 0x4
-
-#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3)
-#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1))
-#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1)
-#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0)
-
-#define ANA_CUT_THRU_CFG_RSZ 0x4
-
-#define ANA_PGID_PGID_RSZ 0x4
-
-#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0))
-#define ANA_PGID_PGID_PGID_M GENMASK(11, 0)
-#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27))
-#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27)
-#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27)
-
-#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16))
-#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16)
-#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16)
-#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0))
-#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0)
-
-#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16)
-#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9))
-#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9)
-#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9)
-#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8)
-#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0)
-
-#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15)
-#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14)
-#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13)
-#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12)
-#define ANA_TABLES_MACACCESS_VALID BIT(11)
-#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9))
-#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9)
-#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9)
-#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3))
-#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3)
-#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3)
-#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0))
-#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0)
-#define MACACCESS_CMD_IDLE 0
-#define MACACCESS_CMD_LEARN 1
-#define MACACCESS_CMD_FORGET 2
-#define MACACCESS_CMD_AGE 3
-#define MACACCESS_CMD_GET_NEXT 4
-#define MACACCESS_CMD_INIT 5
-#define MACACCESS_CMD_READ 6
-#define MACACCESS_CMD_WRITE 7
-
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2))
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2)
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2)
-#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0)
-#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0
-#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2
-#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3
-
-#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17)
-#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16)
-#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15)
-#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14)
-#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13)
-#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12)
-#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0))
-#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0)
-
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2))
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2)
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2)
-#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21))
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21)
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21)
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15))
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15)
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15)
-#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14)
-#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10)
-#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0)
-
-#define ANA_TABLES_ENTRYLIM_RSZ 0x4
-
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14))
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14)
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14)
-#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0))
-#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0)
-
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4))
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4)
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4)
-#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3)
-#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2)
-#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30))
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30)
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30)
-#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16))
-#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16)
-#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16)
-#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14)
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8))
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8)
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8)
-#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7)
-#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6)
-#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5)
-#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0))
-#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0)
-
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16))
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16)
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16)
-#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0))
-#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0)
-
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1))
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1)
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1)
-#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0)
-
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22)
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19))
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19)
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18)
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2))
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2)
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2)
-#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
-#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
-#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
-#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18)
-#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17)
-#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8))
-#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8)
-#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8)
-#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0)
-
-#define ANA_MSTI_STATE_RSZ 0x4
-
-#define ANA_OAM_UPM_LM_CNT_RSZ 0x4
-
-#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0))
-#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0)
-#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
-
-#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16))
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
-#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
-
-#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
-
-#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0))
-#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0)
-#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
-
-#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4
-
-#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
-#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16)
-#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20))
-#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20)
-#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20)
-#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
-
-#define ANA_PORT_VLAN_CFG_GSZ 0x100
-
-#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21)
-#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18))
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18)
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18)
-#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17)
-#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16)
-#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15)
-#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12))
-#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12)
-#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
-#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0))
-#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0)
-
-#define ANA_PORT_DROP_CFG_GSZ 0x100
-
-#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
-#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5)
-#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4)
-#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
-#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
-#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1)
-#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
-
-#define ANA_PORT_QOS_CFG_GSZ 0x100
-
-#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8)
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5))
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5)
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5)
-#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4)
-#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3)
-#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2)
-#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0)
-
-#define ANA_PORT_VCAP_CFG_GSZ 0x100
-
-#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14)
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11))
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11)
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8))
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8)
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0))
-#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0)
-
-#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100
-#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4
-
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0)
-
-#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100
-
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17))
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17)
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17)
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15))
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15)
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15)
-#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14)
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12))
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12)
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12)
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10))
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10)
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2)
-#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0)
-
-#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100
-#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4
-
-#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3)
-#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0))
-#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0)
-
-#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7)
-#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6)
-#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5)
-#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4)
-#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3)
-#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2)
-#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1)
-#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0)
-
-#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_PORT_CFG_GSZ 0x100
-
-#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15)
-#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14)
-#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13)
-#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12)
-#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11)
-#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10)
-#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9)
-#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8)
-#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7)
-#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6)
-#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2))
-#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2)
-#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2)
-#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1)
-#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0)
-
-#define ANA_PORT_POL_CFG_GSZ 0x100
-
-#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19)
-#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18)
-#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17)
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9))
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9)
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9)
-#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0))
-#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0)
-
-#define ANA_PORT_PTP_CFG_GSZ 0x100
-
-#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0)
-
-#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100
-
-#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100
-
-#define ANA_PORT_SFID_CFG_GSZ 0x100
-#define ANA_PORT_SFID_CFG_RSZ 0x4
-
-#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8)
-#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0))
-#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0)
-
-#define ANA_PFC_PFC_CFG_GSZ 0x40
-
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2))
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2)
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2)
-#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0))
-#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0)
-
-#define ANA_PFC_PFC_TIMER_GSZ 0x40
-#define ANA_PFC_PFC_TIMER_RSZ 0x4
-
-#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8
-
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6))
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1))
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0)
-
-#define ANA_IPT_IPT_GSZ 0x8
-
-#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15))
-#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15)
-#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15)
-#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7))
-#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7)
-#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7)
-#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0))
-#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0)
-
-#define ANA_PPT_PPT_RSZ 0x4
-
-#define ANA_FID_MAP_FID_MAP_RSZ 0x4
-
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6)
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0))
-#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0)
-
-#define ANA_AGGR_CFG_AC_RND_ENA BIT(7)
-#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6)
-#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5)
-#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4)
-#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3)
-#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2)
-#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1)
-#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0)
-
-#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27))
-#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27)
-#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27)
-#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24))
-#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24)
-#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24)
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21))
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21)
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21)
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18))
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18)
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18)
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15))
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15)
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15)
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12))
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12)
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12)
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9))
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9)
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9)
-#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6))
-#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6)
-#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3))
-#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3)
-#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3)
-#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0))
-#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0)
-
-#define ANA_CPUQ_8021_CFG_RSZ 0x4
-
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6))
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6)
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3))
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3)
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3)
-#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0))
-#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0)
-
-#define ANA_DSCP_CFG_RSZ 0x4
-
-#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
-#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8))
-#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8)
-#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2))
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2)
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2)
-#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
-#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
-
-#define ANA_DSCP_REWR_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16)
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0))
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0)
-
-#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12)
-#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0))
-#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0)
-
-#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3)
-#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2)
-#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1)
-#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0)
-
-#define ANA_FID_CFG_VID_MC_ENA BIT(0)
-
-#define ANA_POL_PIR_CFG_GSZ 0x20
-
-#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6)
-#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0))
-#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0)
-
-#define ANA_POL_CIR_CFG_GSZ 0x20
-
-#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
-#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
-#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
-
-#define ANA_POL_MODE_CFG_GSZ 0x20
-
-#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5))
-#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5)
-#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3))
-#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3)
-#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3)
-#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2)
-#define ANA_POL_MODE_CFG_CIR_ENA BIT(1)
-#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0)
-
-#define ANA_POL_PIR_STATE_GSZ 0x20
-
-#define ANA_POL_CIR_STATE_GSZ 0x20
-
-#define ANA_POL_STATE_GSZ 0x20
-
-#define ANA_POL_FLOWC_RSZ 0x4
-
-#define ANA_POL_FLOWC_POL_FLOWC BIT(0)
-
-#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4))
-#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4)
-#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4)
-#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0))
-#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0)
-
-#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1)
-#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 2da8eee27e98..b38820849faa 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -402,9 +402,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
of_get_phy_mode(portnp, &phy_mode);
- priv->phy_mode = phy_mode;
+ ocelot_port->phy_mode = phy_mode;
- switch (priv->phy_mode) {
+ switch (ocelot_port->phy_mode) {
case PHY_INTERFACE_MODE_NA:
continue;
case PHY_INTERFACE_MODE_SGMII:
diff --git a/drivers/net/ethernet/mscc/ocelot_dev.h b/drivers/net/ethernet/mscc/ocelot_dev.h
deleted file mode 100644
index 0a50d53bbd3f..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_dev.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_DEV_H_
-#define _MSCC_OCELOT_DEV_H_
-
-#define DEV_CLOCK_CFG 0x0
-
-#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
-#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
-#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
-#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
-#define DEV_CLOCK_CFG_PORT_RST BIT(3)
-#define DEV_CLOCK_CFG_PHY_RST BIT(2)
-#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
-#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
-
-#define DEV_PORT_MISC 0x4
-
-#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
-#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
-#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
-#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
-#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
-
-#define DEV_EVENTS 0x8
-
-#define DEV_EEE_CFG 0xc
-
-#define DEV_EEE_CFG_EEE_ENA BIT(22)
-#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
-#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
-#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
-#define DEV_EEE_CFG_PORT_LPI BIT(0)
-
-#define DEV_RX_PATH_DELAY 0x10
-
-#define DEV_TX_PATH_DELAY 0x14
-
-#define DEV_PTP_PREDICT_CFG 0x18
-
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
-#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
-#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
-
-#define DEV_MAC_ENA_CFG 0x1c
-
-#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
-#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
-
-#define DEV_MAC_MODE_CFG 0x20
-
-#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
-#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
-#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
-
-#define DEV_MAC_MAXLEN_CFG 0x24
-
-#define DEV_MAC_TAGS_CFG 0x28
-
-#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
-#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
-#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
-#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1)
-#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
-
-#define DEV_MAC_ADV_CHK_CFG 0x2c
-
-#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
-
-#define DEV_MAC_IFG_CFG 0x30
-
-#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
-#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
-#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
-#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
-#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
-#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
-#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
-#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
-#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
-
-#define DEV_MAC_HDX_CFG 0x34
-
-#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
-#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
-#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
-#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
-#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
-#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
-#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
-#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
-#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
-
-#define DEV_MAC_DBG_CFG 0x38
-
-#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
-#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
-
-#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
-
-#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
-
-#define DEV_MAC_STICKY 0x44
-
-#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
-#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
-#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
-#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
-#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5)
-#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
-#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3)
-#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
-#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
-#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
-
-#define PCS1G_CFG 0x48
-
-#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
-#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
-#define PCS1G_CFG_PCS_ENA BIT(0)
-
-#define PCS1G_MODE_CFG 0x4c
-
-#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
-#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
-
-#define PCS1G_SD_CFG 0x50
-
-#define PCS1G_SD_CFG_SD_SEL BIT(8)
-#define PCS1G_SD_CFG_SD_POL BIT(4)
-#define PCS1G_SD_CFG_SD_ENA BIT(0)
-
-#define PCS1G_ANEG_CFG 0x54
-
-#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
-#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
-#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
-#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
-
-#define PCS1G_ANEG_NP_CFG 0x58
-
-#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
-#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
-
-#define PCS1G_LB_CFG 0x5c
-
-#define PCS1G_LB_CFG_RA_ENA BIT(4)
-#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
-#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
-
-#define PCS1G_DBG_CFG 0x60
-
-#define PCS1G_DBG_CFG_UDLT BIT(0)
-
-#define PCS1G_CDET_CFG 0x64
-
-#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
-
-#define PCS1G_ANEG_STATUS 0x68
-
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_STATUS_PR BIT(4)
-#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
-#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
-
-#define PCS1G_ANEG_NP_STATUS 0x6c
-
-#define PCS1G_LINK_STATUS 0x70
-
-#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
-#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
-#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
-#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
-#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
-#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
-
-#define PCS1G_LINK_DOWN_CNT 0x74
-
-#define PCS1G_STICKY 0x78
-
-#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
-#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
-
-#define PCS1G_DEBUG_STATUS 0x7c
-
-#define PCS1G_LPI_CFG 0x80
-
-#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
-#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
-#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
-#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4))
-#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4)
-#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
-
-#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
-
-#define PCS1G_LPI_STATUS 0x88
-
-#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
-#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
-#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
-#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8)
-#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4)
-#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
-#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
-
-#define PCS1G_TSTPAT_MODE_CFG 0x8c
-
-#define PCS1G_TSTPAT_STATUS 0x90
-
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
-#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
-
-#define DEV_PCS_FX100_CFG 0x94
-
-#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
-#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
-#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
-#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
-#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
-#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12))
-#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12)
-#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12)
-#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9))
-#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9)
-#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9)
-#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4))
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4)
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
-#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
-#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
-#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
-
-#define DEV_PCS_FX100_STATUS 0x98
-
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
-#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
-#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
-#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
-#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
-#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2)
-#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
-#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h
deleted file mode 100644
index d8c63aa761be..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_qsys.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_QSYS_H_
-#define _MSCC_OCELOT_QSYS_H_
-
-#define QSYS_PORT_MODE_RSZ 0x4
-
-#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
-#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
-
-#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
-
-#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
-#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
-
-#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
-#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
-#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
-#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2)
-#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1)
-#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0)
-
-#define QSYS_EEE_CFG_RSZ 0x4
-
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8)
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0))
-#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0)
-
-#define QSYS_SW_STATUS_RSZ 0x4
-
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8))
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8)
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8)
-#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0))
-#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0)
-
-#define QSYS_QMAP_GSZ 0x4
-
-#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5))
-#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5)
-#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5)
-#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2))
-#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2)
-#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2)
-#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0))
-#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0)
-
-#define QSYS_ISDX_SGRP_GSZ 0x4
-
-#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4
-
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9))
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9)
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9)
-#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8)
-#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7)
-#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0))
-#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0)
-
-#define QSYS_RED_PROFILE_RSZ 0x4
-
-#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8)
-#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0))
-#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0)
-
-#define QSYS_RES_CFG_GSZ 0x8
-
-#define QSYS_RES_STAT_GSZ 0x8
-
-#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
-#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
-#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
-#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
-#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
-
-#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
-#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
-#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2)
-#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0))
-#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0)
-
-#define QSYS_QMAXSDU_CFG_0_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_1_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_2_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_3_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_4_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_5_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_6_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_7_RSZ 0x4
-
-#define QSYS_PREEMPTION_CFG_RSZ 0x4
-
-#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0))
-#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0)
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8))
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8)
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12))
-#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12)
-#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12)
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16)
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_CIR_CFG_GSZ 0x80
-
-#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
-#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
-#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
-
-#define QSYS_EIR_CFG_GSZ 0x80
-
-#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7))
-#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7)
-#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7)
-#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1))
-#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1)
-#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1)
-#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0)
-
-#define QSYS_SE_CFG_GSZ 0x80
-
-#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6))
-#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6)
-#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6)
-#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
-#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
-#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2))
-#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2)
-#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
-#define QSYS_SE_CFG_SE_EXC_ENA BIT(1)
-#define QSYS_SE_CFG_SE_EXC_FWD BIT(0)
-
-#define QSYS_SE_DWRR_CFG_GSZ 0x80
-#define QSYS_SE_DWRR_CFG_RSZ 0x4
-
-#define QSYS_SE_CONNECT_GSZ 0x80
-
-#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17))
-#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17)
-#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17)
-#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9))
-#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9)
-#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9)
-#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5))
-#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5)
-#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5)
-#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1))
-#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1)
-#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1)
-#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0)
-
-#define QSYS_SE_DLB_SENSE_GSZ 0x80
-
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11)
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7))
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3))
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3)
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
-
-#define QSYS_CIR_STATE_GSZ 0x80
-
-#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4))
-#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4)
-#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4)
-#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0))
-#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0)
-
-#define QSYS_EIR_STATE_GSZ 0x80
-
-#define QSYS_SE_STATE_GSZ 0x80
-
-#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1))
-#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1)
-#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define QSYS_SE_STATE_SE_WAS_YEL BIT(0)
-
-#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8)
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3))
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3)
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3)
-#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2)
-#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1)
-#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0)
-
-#define QSYS_TAG_CONFIG_RSZ 0x4
-
-#define QSYS_TAG_CONFIG_ENABLE BIT(0)
-#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4))
-#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4)
-#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16))
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16)
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16)
-
-#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0))
-#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0)
-#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8)
-#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16)
-
-#define QSYS_PORT_MAX_SDU_RSZ 0x4
-
-#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16)
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
-#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
-#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-
-#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16)
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16))
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16)
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24)
-
-#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
-#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-
-#endif
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c979f38a2e0c..2ee0d0be113a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2892,7 +2892,7 @@ drop:
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev)
{
- struct sk_buff *segs, *curr;
+ struct sk_buff *segs, *curr, *next;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
netdev_tx_t status;
@@ -2901,10 +2901,8 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
if (IS_ERR(segs))
goto drop;
- while (segs) {
- curr = segs;
- segs = segs->next;
- curr->next = NULL;
+ skb_list_walk_safe(segs, curr, next) {
+ skb_mark_not_on_list(curr);
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 1a2634cbbb69..d21d706b83a7 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -612,7 +612,7 @@ static void undo_cable_magic(struct net_device *dev);
static void check_link(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
static void dump_ring(struct net_device *dev);
-static void ns_tx_timeout(struct net_device *dev);
+static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int alloc_ring(struct net_device *dev);
static void refill_rx(struct net_device *dev);
static void init_ring(struct net_device *dev);
@@ -1881,7 +1881,7 @@ static void dump_ring(struct net_device *dev)
}
}
-static void ns_tx_timeout(struct net_device *dev)
+static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem * ioaddr = ns_ioaddr(dev);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 6af9a7eee114..8e24c7acf79b 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1549,7 +1549,7 @@ static int ns83820_stop(struct net_device *ndev)
return 0;
}
-static void ns83820_tx_timeout(struct net_device *ndev)
+static void ns83820_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ns83820 *dev = PRIV(ndev);
u32 tx_done_idx;
@@ -1603,7 +1603,7 @@ static void ns83820_tx_watch(struct timer_list *t)
ndev->name,
dev->tx_done_idx, dev->tx_free_idx,
atomic_read(&dev->nr_tx_skbs));
- ns83820_tx_timeout(ndev);
+ ns83820_tx_timeout(ndev, UINT_MAX);
}
mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
@@ -1937,7 +1937,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
addr = pci_resource_start(pci_dev, 1);
- dev->base = ioremap_nocache(addr, PAGE_SIZE);
+ dev->base = ioremap(addr, PAGE_SIZE);
dev->tx_descs = pci_alloc_consistent(pci_dev,
4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
dev->rx_info.descs = pci_alloc_consistent(pci_dev,
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index b339125b2f09..31be3ba66877 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
+ spin_lock_init(&lp->lock);
+
for (i = 0; i < SONIC_NUM_RRS; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (skb == NULL) {
@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
return 0;
}
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+ int i;
+ u16 bits;
+
+ for (i = 0; i < 1000; ++i) {
+ bits = SONIC_READ(SONIC_CMD) & mask;
+ if (!bits)
+ return;
+ if (irqs_disabled() || in_interrupt())
+ udelay(20);
+ else
+ usleep_range(100, 200);
+ }
+ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
/*
* Close the SONIC device
@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
/*
* stop the SONIC, disable interrupts
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -161,7 +184,7 @@ static int sonic_close(struct net_device *dev)
return 0;
}
-static void sonic_tx_timeout(struct net_device *dev)
+static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
* put the Sonic into software-reset mode and
* disable all interrupts before releasing DMA buffers
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
* wake the tx queue
* Concurrently with all of this, the SONIC is potentially writing to
* the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
*/
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
- int entry = lp->next_tx;
+ int entry;
+ unsigned long flags;
netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->next_tx;
+
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- /*
- * Must set tx_skb[entry] only after clearing status, and
- * before clearing EOL and before stopping queue
- */
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return NETDEV_TX_OK;
}
@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct sonic_local *lp = netdev_priv(dev);
int status;
+ unsigned long flags;
+
+ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+ * with sonic_send_packet() so that the two functions can share state.
+ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+ * by macsonic which must use two IRQs with different priority levels.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ if (!status) {
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
return IRQ_NONE;
+ }
do {
+ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
+
if (status & SONIC_INT_PKTRX) {
netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
sonic_rx(dev); /* got packet(s) */
- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
}
if (status & SONIC_INT_TXDN) {
@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
int td_status;
int freed_some = 0;
- /* At this point, cur_tx is the index of a TD that is one of:
- * unallocated/freed (status set & tx_skb[entry] clear)
- * allocated and sent (status set & tx_skb[entry] set )
- * allocated and not yet sent (status clear & tx_skb[entry] set )
- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+ /* The state of a Transmit Descriptor may be inferred
+ * from { tx_skb[entry], td_status } as follows.
+ * { clear, clear } => the TD has never been used
+ * { set, clear } => the TD was handed to SONIC
+ * { set, set } => the TD was handed back
+ * { clear, set } => the TD is available for re-use
*/
netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
break;
- if (td_status & 0x0001) {
+ if (td_status & SONIC_TCR_PTX) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
} else {
- lp->stats.tx_errors++;
- if (td_status & 0x0642)
+ if (td_status & (SONIC_TCR_EXD |
+ SONIC_TCR_EXC | SONIC_TCR_BCM))
lp->stats.tx_aborted_errors++;
- if (td_status & 0x0180)
+ if (td_status &
+ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
lp->stats.tx_carrier_errors++;
- if (td_status & 0x0020)
+ if (td_status & SONIC_TCR_OWC)
lp->stats.tx_window_errors++;
- if (td_status & 0x0004)
+ if (td_status & SONIC_TCR_FU)
lp->stats.tx_fifo_errors++;
}
@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if (freed_some || lp->tx_skb[entry] == NULL)
netif_wake_queue(dev); /* The ring is no longer full */
lp->cur_tx = entry;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
}
/*
@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if (status & SONIC_INT_RFO) {
netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
__func__);
- lp->stats.rx_fifo_errors++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
}
if (status & SONIC_INT_RDE) {
netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
}
if (status & SONIC_INT_RBAE) {
netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
}
/* counter overruns; all counters are 16bit wide */
- if (status & SONIC_INT_FAE) {
+ if (status & SONIC_INT_FAE)
lp->stats.rx_frame_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
- }
- if (status & SONIC_INT_CRC) {
+ if (status & SONIC_INT_CRC)
lp->stats.rx_crc_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
- }
- if (status & SONIC_INT_MP) {
+ if (status & SONIC_INT_MP)
lp->stats.rx_missed_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
- }
/* transmit error */
if (status & SONIC_INT_TXER) {
- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+ u16 tcr = SONIC_READ(SONIC_TCR);
+
+ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
+ __func__, tcr);
+
+ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
+ SONIC_TCR_FU | SONIC_TCR_BCM)) {
+ /* Aborted transmission. Try again. */
+ netif_stop_queue(dev);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ }
}
/* bus retry */
@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
/* ... to help debug DMA problems causing endless interrupts. */
/* Bounce the eth interface to turn on the interrupt again. */
SONIC_WRITE(SONIC_IMR, 0);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
}
- /* load CAM done */
- if (status & SONIC_INT_LCD)
- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ } while (status);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return IRQ_HANDLED;
}
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+ unsigned int last)
+{
+ unsigned int i = last;
+
+ do {
+ i = (i + 1) & SONIC_RRS_MASK;
+ if (addr == lp->rx_laddr[i])
+ return i;
+ } while (i != last);
+
+ return -ENOENT;
+}
+
+/* Allocate and map a new skb to be used as a receive buffer. */
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+ struct sk_buff **new_skb, dma_addr_t *new_addr)
+{
+ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+ if (!*new_skb)
+ return false;
+
+ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(*new_skb, 2);
+
+ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!*new_addr) {
+ dev_kfree_skb(*new_skb);
+ *new_skb = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
+ dma_addr_t old_addr, dma_addr_t new_addr)
+{
+ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
+ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
+ u32 buf;
+
+ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
+ * scans the other resources in the RRA, those in the range [RWP, RRP).
+ */
+ do {
+ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
+ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
+
+ if (buf == old_addr)
+ break;
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+ } while (entry != end);
+
+ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
+
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
+}
+
/*
* We have a good packet(s), pass it/them up the network stack.
*/
static void sonic_rx(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
- int status;
int entry = lp->cur_rx;
+ int prev_entry = lp->eol_rx;
+ bool rbe = false;
while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
- struct sk_buff *used_skb;
- struct sk_buff *new_skb;
- dma_addr_t new_laddr;
- u16 bufadr_l;
- u16 bufadr_h;
- int pkt_len;
-
- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
- if (status & SONIC_RCR_PRX) {
- /* Malloc up new buffer. */
- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
- if (new_skb == NULL) {
- lp->stats.rx_dropped++;
+ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+ /* If the RD has LPKT set, the chip has finished with the RB */
+ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+ struct sk_buff *new_skb;
+ dma_addr_t new_laddr;
+ u32 addr = (sonic_rda_get(dev, entry,
+ SONIC_RD_PKTPTR_H) << 16) |
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+ int i = index_from_addr(lp, addr, entry);
+
+ if (i < 0) {
+ WARN_ONCE(1, "failed to find buffer!\n");
break;
}
- /* provide 16 byte IP header alignment unless DMA requires otherwise */
- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
- skb_reserve(new_skb, 2);
-
- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
- SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!new_laddr) {
- dev_kfree_skb(new_skb);
- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+
+ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
+ struct sk_buff *used_skb = lp->rx_skb[i];
+ int pkt_len;
+
+ /* Pass the used buffer up the stack */
+ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
+ DMA_FROM_DEVICE);
+
+ pkt_len = sonic_rda_get(dev, entry,
+ SONIC_RD_PKTLEN);
+ skb_trim(used_skb, pkt_len);
+ used_skb->protocol = eth_type_trans(used_skb,
+ dev);
+ netif_rx(used_skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ lp->rx_skb[i] = new_skb;
+ lp->rx_laddr[i] = new_laddr;
+ } else {
+ /* Failed to obtain a new buffer so re-use it */
+ new_laddr = addr;
lp->stats.rx_dropped++;
- break;
}
-
- /* now we have a new skb to replace it, pass the used one up the stack */
- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
- used_skb = lp->rx_skb[entry];
- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
- skb_trim(used_skb, pkt_len);
- used_skb->protocol = eth_type_trans(used_skb, dev);
- netif_rx(used_skb);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
-
- /* and insert the new skb */
- lp->rx_laddr[entry] = new_laddr;
- lp->rx_skb[entry] = new_skb;
-
- bufadr_l = (unsigned long)new_laddr & 0xffff;
- bufadr_h = (unsigned long)new_laddr >> 16;
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
- } else {
- /* This should only happen, if we enable accepting broken packets. */
- lp->stats.rx_errors++;
- if (status & SONIC_RCR_FAER)
- lp->stats.rx_frame_errors++;
- if (status & SONIC_RCR_CRCR)
- lp->stats.rx_crc_errors++;
- }
- if (status & SONIC_RCR_LPKT) {
- /*
- * this was the last packet out of the current receive buffer
- * give the buffer back to the SONIC
+ /* If RBE is already asserted when RWP advances then
+ * it's safe to clear RBE after processing this packet.
*/
- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
- }
- } else
- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
- dev->name);
+ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
+ sonic_update_rra(dev, lp, addr, new_laddr);
+ }
/*
* give back the descriptor
*/
- sonic_rda_put(dev, entry, SONIC_RD_LINK,
- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
- lp->eol_rx = entry;
- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+
+ prev_entry = entry;
+ entry = (entry + 1) & SONIC_RDS_MASK;
+ }
+
+ lp->cur_rx = entry;
+
+ if (prev_entry != lp->eol_rx) {
+ /* Advance the EOL flag to put descriptors back into service */
+ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
+ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
+ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+ lp->eol_rx = prev_entry;
}
+
+ if (rbe)
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
/*
* If any worth-while packets have been received, netif_rx()
* has done a mark_bh(NET_BH) for us and will work on them
@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
+ unsigned long flags;
+
netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
i++;
}
SONIC_WRITE(SONIC_CDC, 16);
- /* issue Load CAM command */
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+
+ /* LCAM and TXP commands can't be used simultaneously */
+ spin_lock_irqsave(&lp->lock, flags);
+ sonic_quiesce(dev, SONIC_CR_TXP);
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
}
@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
*/
static int sonic_init(struct net_device *dev)
{
- unsigned int cmd;
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ /* While in reset mode, clear CAM Enable register */
+ SONIC_WRITE(SONIC_CE, 0);
+
/*
* clear software reset flag, disable receiver, clear and
* enable interrupts, then completely initialize the SONIC
*/
SONIC_WRITE(SONIC_CMD, 0);
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+ sonic_quiesce(dev, SONIC_CR_ALL);
/*
* initialize the receive resource area
@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
}
/* initialize all RRA registers */
- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-
- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_REA, lp->rra_end);
- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
+ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
- break;
- }
-
- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), i);
+ sonic_quiesce(dev, SONIC_CR_RRRA);
/*
* Initialize the receive descriptors so that they
@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
-
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
- break;
- }
- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
/*
* enable receiver, disable loopback
* and enable all interrupts
*/
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
-
- cmd = SONIC_READ(SONIC_CMD);
- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
SONIC_READ(SONIC_CMD));
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 2b27f7049acb..e0e4cba6f6f6 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -110,6 +110,9 @@
#define SONIC_CR_TXP 0x0002
#define SONIC_CR_HTX 0x0001
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
+ SONIC_CR_RXEN | SONIC_CR_TXP)
+
/*
* SONIC data configuration bits
*/
@@ -175,6 +178,7 @@
#define SONIC_TCR_NCRS 0x0100
#define SONIC_TCR_CRLS 0x0080
#define SONIC_TCR_EXC 0x0040
+#define SONIC_TCR_OWC 0x0020
#define SONIC_TCR_PMB 0x0008
#define SONIC_TCR_FU 0x0004
#define SONIC_TCR_BCM 0x0002
@@ -274,8 +278,9 @@
#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
+#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
+#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
#define SONIC_RBSIZE 1520 /* size of one resource buffer */
@@ -312,8 +317,6 @@ struct sonic_local {
u32 rda_laddr; /* logical DMA address of RDA */
dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
- unsigned int rra_end;
- unsigned int cur_rwp;
unsigned int cur_rx;
unsigned int cur_tx; /* first unacked transmit packet */
unsigned int eol_rx;
@@ -322,6 +325,7 @@ struct sonic_local {
int msg_enable;
struct device *device; /* generic device */
struct net_device_stats stats;
+ spinlock_t lock;
};
#define TX_TIMEOUT (3 * HZ)
@@ -336,7 +340,7 @@ static int sonic_close(struct net_device *dev);
static struct net_device_stats *sonic_get_stats(struct net_device *dev);
static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
-static void sonic_tx_timeout(struct net_device *dev);
+static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void sonic_msg_init(struct net_device *dev);
/* Internal inlines for reading/writing DMA buffers. Note that bus
@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
as far as we can tell. */
/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
is a much better name. */
-static inline void sonic_buf_put(void* base, int bitmode,
+static inline void sonic_buf_put(u16 *base, int bitmode,
int offset, __u16 val)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- ((__u16 *) base + (offset*2))[1] = val;
+ __raw_writew(val, base + (offset * 2) + 1);
#else
- ((__u16 *) base + (offset*2))[0] = val;
+ __raw_writew(val, base + (offset * 2) + 0);
#endif
else
- ((__u16 *) base)[offset] = val;
+ __raw_writew(val, base + (offset * 1) + 0);
}
-static inline __u16 sonic_buf_get(void* base, int bitmode,
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
int offset)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- return ((volatile __u16 *) base + (offset*2))[1];
+ return __raw_readw(base + (offset * 2) + 1);
#else
- return ((volatile __u16 *) base + (offset*2))[0];
+ return __raw_readw(base + (offset * 2) + 0);
#endif
else
- return ((volatile __u16 *) base)[offset];
+ return __raw_readw(base + (offset * 1) + 0);
}
/* Inlines that you should actually use for reading/writing DMA buffers */
@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
(entry * SIZEOF_SONIC_RR) + offset);
}
+static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ return lp->rra_laddr +
+ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+}
+
+static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
+ SONIC_BUS_SCALE(lp->dma_bitmode));
+}
+
static const char version[] =
"sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index e0b2bf327905..0ec6b8e8b549 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7238,7 +7238,7 @@ out_unlock:
* void
*/
-static void s2io_tx_watchdog(struct net_device *dev)
+static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct s2io_nic *sp = netdev_priv(dev);
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 0a921f30f98f..6fa3159a977f 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1065,7 +1065,7 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp);
static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
-static void s2io_tx_watchdog(struct net_device *dev);
+static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue);
static void s2io_set_multicast(struct net_device *dev);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
static void s2io_link(struct s2io_nic * sp, int link);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1d334f2e0a56..9b63574b6202 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3273,7 +3273,7 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
* This function is triggered if the Tx Queue is stopped
* for a pre-defined amount of time when the Interface is still up.
*/
-static void vxge_tx_watchdog(struct net_device *dev)
+static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct vxgedev *vdev;
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index bac5be4d4f43..a3f68a718813 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -31,6 +31,7 @@ config NFP_APP_FLOWER
bool "NFP4000/NFP6000 TC Flower offload support"
depends on NFP
depends on NET_SWITCHDEV
+ depends on IPV6!=m || NFP=m
default y
---help---
Enable driver support for TC Flower offload on NFP4000 and NFP6000.
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index 9f8a1f69c0c4..23ebddfb9532 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -176,10 +176,8 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
u8 mask, val;
int err;
- if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
- err = -EOPNOTSUPP;
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
goto err_delete;
- }
tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
@@ -200,18 +198,14 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
if ((iter->val & cmask) == (val & cmask) &&
iter->band != knode->res->classid) {
NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
- err = -EOPNOTSUPP;
goto err_delete;
}
}
if (!match) {
match = kzalloc(sizeof(*match), GFP_KERNEL);
- if (!match) {
- err = -ENOMEM;
- goto err_delete;
- }
-
+ if (!match)
+ return -ENOMEM;
list_add(&match->list, &alink->dscp_map);
}
match->handle = knode->handle;
@@ -227,7 +221,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
err_delete:
nfp_abm_u32_knode_delete(alink, knode);
- return err;
+ return -EOPNOTSUPP;
}
static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
index a460c75522be..d81d450be50e 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.h
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -26,6 +26,7 @@ enum nfp_ccm_type {
NFP_CCM_TYPE_CRYPTO_ADD = 10,
NFP_CCM_TYPE_CRYPTO_DEL = 11,
NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
+ NFP_CCM_TYPE_CRYPTO_RESYNC = 13,
__NFP_CCM_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
index 60372ddf69f0..bffe58bb2f27 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -4,6 +4,10 @@
#ifndef NFP_CRYPTO_H
#define NFP_CRYPTO_H 1
+struct net_device;
+struct nfp_net;
+struct nfp_net_tls_resync_req;
+
struct nfp_net_tls_offload_ctx {
__be32 fw_handle[2];
@@ -17,11 +21,22 @@ struct nfp_net_tls_offload_ctx {
#ifdef CONFIG_TLS_DEVICE
int nfp_net_tls_init(struct nfp_net *nn);
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len);
#else
static inline int nfp_net_tls_init(struct nfp_net *nn)
{
return 0;
}
+
+static inline int
+nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
index 67413d946c4a..8d1458896bcb 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -9,6 +9,14 @@
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+struct nfp_net_tls_resync_req {
+ __be32 fw_handle[2];
+ __be32 tcp_seq;
+ u8 l3_offset;
+ u8 l4_offset;
+ u8 resv[2];
+};
+
struct nfp_crypto_reply_simple {
struct nfp_ccm_hdr hdr;
__be32 error;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 96a96b35c0ca..7c50e3dfb9d5 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -5,6 +5,7 @@
#include <linux/ipv6.h>
#include <linux/skbuff.h>
#include <linux/string.h>
+#include <net/inet6_hashtables.h>
#include <net/tls.h>
#include "../ccm.h"
@@ -391,8 +392,9 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
return 0;
- tls_offload_rx_resync_set_type(sk,
- TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+ if (!nn->tlv_caps.tls_resync_ss)
+ tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+
return 0;
err_fw_remove:
@@ -424,6 +426,7 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_tls_offload_ctx *ntls;
struct nfp_crypto_req_update *req;
+ enum nfp_ccm_type type;
struct sk_buff *skb;
gfp_t flags;
int err;
@@ -442,15 +445,18 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
req->tcp_seq = cpu_to_be32(seq);
memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
+ type = NFP_CCM_TYPE_CRYPTO_UPDATE;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- err = nfp_net_tls_communicate_simple(nn, skb, "sync",
- NFP_CCM_TYPE_CRYPTO_UPDATE);
+ err = nfp_net_tls_communicate_simple(nn, skb, "sync", type);
if (err)
return err;
ntls->next_seq = seq;
} else {
- nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
+ if (nn->tlv_caps.tls_resync_ss)
+ type = NFP_CCM_TYPE_CRYPTO_RESYNC;
+ nfp_ccm_mbox_post(nn, skb, type,
sizeof(struct nfp_crypto_reply_simple));
+ atomic_inc(&nn->ktls_rx_resync_sent);
}
return 0;
@@ -462,6 +468,79 @@ static const struct tlsdev_ops nfp_net_tls_ops = {
.tls_dev_resync = nfp_net_tls_resync,
};
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ struct iphdr *iph;
+ struct sock *sk;
+ __be32 tcp_seq;
+ int err;
+
+ iph = pkt + req->l3_offset;
+ ipv6h = pkt + req->l3_offset;
+ th = pkt + req->l4_offset;
+
+ if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) {
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n",
+ req->l3_offset, req->l4_offset, pkt_len);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ switch (iph->version) {
+ case 4:
+ sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case 6:
+ sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, ntohs(th->dest),
+ netdev->ifindex, 0);
+ break;
+#endif
+ default:
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n",
+ req->l3_offset, req->l4_offset, iph->version);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ err = 0;
+ if (!sk)
+ goto err_cnt_ign;
+ if (!tls_is_sk_rx_device_offloaded(sk) ||
+ sk->sk_shutdown & RCV_SHUTDOWN)
+ goto err_put_sock;
+
+ ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
+ /* some FW versions can't report the handle and report 0s */
+ if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) &&
+ memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle)))
+ goto err_put_sock;
+
+ /* copy to ensure alignment */
+ memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq));
+ tls_offload_rx_resync_request(sk, tcp_seq);
+ atomic_inc(&nn->ktls_rx_resync_req);
+
+ sock_gen_put(sk);
+ return 0;
+
+err_put_sock:
+ sock_gen_put(sk);
+err_cnt_ign:
+ atomic_inc(&nn->ktls_rx_resync_ign);
+ return err;
+}
+
static int nfp_net_tls_reset(struct nfp_net *nn)
{
struct nfp_crypto_req_reset *req;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 1b019fdfcd97..c06600fb47ff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -22,8 +22,9 @@
#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
-#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
-#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
+ IP_TUNNEL_INFO_IPV6)
+#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
@@ -394,19 +395,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
}
static int
-nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
- const struct flow_action_entry *act,
- struct nfp_fl_pre_tunnel *pre_tun,
- enum nfp_flower_tun_type tun_type,
- struct net_device *netdev, struct netlink_ext_ack *extack)
+nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
+ const struct flow_action_entry *act,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type,
+ struct net_device *netdev, struct netlink_ext_ack *extack)
{
- size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel;
+ bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6;
+ size_t act_size = sizeof(struct nfp_fl_set_tun);
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
+ return -EOPNOTSUPP;
+
+ if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
+ return -EOPNOTSUPP;
+
BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
@@ -417,19 +425,35 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
return -EOPNOTSUPP;
}
- set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
tmp_set_ip_tun_type_index |=
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
- FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
+ FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
+ FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
set_tun->ttl = ip_tun->key.ttl;
+#ifdef CONFIG_IPV6
+ } else if (ipv6) {
+ struct net *net = dev_net(netdev);
+ struct flowi6 flow = {};
+ struct dst_entry *dst;
+
+ flow.daddr = ip_tun->key.u.ipv6.dst;
+ flow.flowi4_proto = IPPROTO_UDP;
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL);
+ if (!IS_ERR(dst)) {
+ set_tun->ttl = ip6_dst_hoplimit(dst);
+ dst_release(dst);
+ } else {
+ set_tun->ttl = net->ipv6.devconf_all->hop_limit;
+ }
+#endif
} else {
struct net *net = dev_net(netdev);
struct flowi4 flow = {};
@@ -455,7 +479,7 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
set_tun->tos = ip_tun->key.tos;
if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
}
@@ -467,7 +491,12 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
}
/* Complete pre_tunnel action. */
- pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+ if (ipv6) {
+ pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6);
+ pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst;
+ } else {
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+ }
return 0;
}
@@ -956,8 +985,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
- struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
+ struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v;
@@ -1032,7 +1061,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
+ sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP;
}
@@ -1046,11 +1075,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
- *tun_type, netdev, extack);
+ err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
+ netdev, extack);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
+ *a_len += sizeof(struct nfp_fl_set_tun);
}
break;
case FLOW_ACTION_TUNNEL_DECAP:
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 05981b54eaab..a595ddb92bff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -270,11 +270,17 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
}
goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
- nfp_tunnel_request_route(app, skb);
+ nfp_tunnel_request_route_v4(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6:
+ nfp_tunnel_request_route_v6(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6:
+ nfp_tunnel_keep_alive_v6(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
nfp_flower_stats_rlim_reply(app, skb);
break;
@@ -361,7 +367,8 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb);
- } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
+ cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 7eb2ec8969c3..9b50d76bbc09 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,6 +26,7 @@
#define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
+#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
@@ -63,6 +64,7 @@
#define NFP_FL_MAX_GENEVE_OPT_ACT 32
#define NFP_FL_MAX_GENEVE_OPT_CNT 64
#define NFP_FL_MAX_GENEVE_OPT_KEY 32
+#define NFP_FL_MAX_GENEVE_OPT_KEY_V6 8
/* Action opcodes */
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
@@ -70,7 +72,7 @@
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
-#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
@@ -99,8 +101,8 @@
/* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN 0x50000000
-#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
-#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+#define NFP_FL_TUNNEL_TYPE GENMASK(7, 4)
+#define NFP_FL_PRE_TUN_INDEX GENMASK(2, 0)
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
@@ -206,13 +208,16 @@ struct nfp_fl_pre_lag {
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
- __be16 reserved;
- __be32 ipv4_dst;
- /* reserved for use with IPv6 addresses */
- __be32 extra[3];
+ __be16 flags;
+ union {
+ __be32 ipv4_dst;
+ struct in6_addr ipv6_dst;
+ };
};
-struct nfp_fl_set_ipv4_tun {
+#define NFP_FL_PRE_TUN_IPV6 BIT(0)
+
+struct nfp_fl_set_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id __packed;
@@ -387,6 +392,11 @@ struct nfp_flower_tun_ipv4 {
__be32 dst;
};
+struct nfp_flower_tun_ipv6 {
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
struct nfp_flower_tun_ip_ext {
u8 tos;
u8 ttl;
@@ -416,6 +426,42 @@ struct nfp_flower_ipv4_udp_tun {
__be32 tun_id;
};
+/* Flow Frame IPv6 UDP TUNNEL --> Tunnel details (11W/44B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_udp_tun {
+ struct nfp_flower_tun_ipv6 ipv6;
+ __be16 reserved1;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be32 reserved2;
+ __be32 tun_id;
+};
+
/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
* -----------------------------------------------------------------
* 3 2 1
@@ -445,6 +491,46 @@ struct nfp_flower_ipv4_gre_tun {
__be32 reserved2;
};
+/* Flow Frame GRE TUNNEL V6 --> Tunnel details (12W/48B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Ethertype |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_gre_tun {
+ struct nfp_flower_tun_ipv6 ipv6;
+ __be16 tun_flags;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be16 reserved1;
+ __be16 ethertype;
+ __be32 tun_key;
+ __be32 reserved2;
+};
+
struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
};
@@ -485,6 +571,10 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6 = 22,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6 = 23,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 = 24,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6 = 25,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e0c985fcaec1..d55d0d33bc45 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -43,6 +43,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
+#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
@@ -62,18 +63,26 @@ struct nfp_fl_stats_id {
* struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
* @offloaded_macs: Hashtable of the offloaded MAC addresses
* @ipv4_off_list: List of IPv4 addresses to offload
- * @neigh_off_list: List of neighbour offloads
+ * @ipv6_off_list: List of IPv6 addresses to offload
+ * @neigh_off_list_v4: List of IPv4 neighbour offloads
+ * @neigh_off_list_v6: List of IPv6 neighbour offloads
* @ipv4_off_lock: Lock for the IPv4 address list
- * @neigh_off_lock: Lock for the neighbour address list
+ * @ipv6_off_lock: Lock for the IPv6 address list
+ * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list
+ * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list
* @mac_off_ids: IDA to manage id assignment for offloaded MACs
* @neigh_nb: Notifier to monitor neighbour state
*/
struct nfp_fl_tunnel_offloads {
struct rhashtable offloaded_macs;
struct list_head ipv4_off_list;
- struct list_head neigh_off_list;
+ struct list_head ipv6_off_list;
+ struct list_head neigh_off_list_v4;
+ struct list_head neigh_off_list_v6;
struct mutex ipv4_off_lock;
- spinlock_t neigh_off_lock;
+ struct mutex ipv6_off_lock;
+ spinlock_t neigh_off_lock_v4;
+ spinlock_t neigh_off_lock_v6;
struct ida mac_off_ids;
struct notifier_block neigh_nb;
};
@@ -273,12 +282,25 @@ struct nfp_fl_stats {
u64 used;
};
+/**
+ * struct nfp_ipv6_addr_entry - cached IPv6 addresses
+ * @ipv6_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list: list pointer
+ */
+struct nfp_ipv6_addr_entry {
+ struct in6_addr ipv6_addr;
+ int ref_count;
+ struct list_head list;
+};
+
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
struct rhash_head fl_node;
struct rcu_head rcu;
__be32 nfp_tun_ipv4_addr;
+ struct nfp_ipv6_addr_entry *nfp_tun_ipv6;
struct net_device *ingress_dev;
char *unmasked_data;
char *mask_data;
@@ -396,8 +418,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
unsigned long event, void *ptr);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
-void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
+void
+nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry);
+struct nfp_ipv6_addr_entry *
+nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6);
+void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9cc3ba17ff69..546bc01d507d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,9 +10,8 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct flow_cls_offload *flow, u8 key_type)
+ struct flow_rule *rule, u8 key_type)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
@@ -77,11 +76,8 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
- struct nfp_flower_mac_mpls *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -130,10 +126,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
@@ -150,11 +144,8 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
- struct nfp_flower_ip_ext *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -224,10 +215,8 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
- struct nfp_flower_ipv4 *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
@@ -241,16 +230,13 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
msk->ipv4_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
- struct nfp_flower_ipv6 *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
@@ -264,16 +250,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
msk->ipv6_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static int
-nfp_flower_compile_geneve_opt(void *ext, void *msk,
- struct flow_cls_offload *flow)
+nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
{
struct flow_match_enc_opts match;
- flow_rule_match_enc_opts(flow->rule, &match);
+ flow_rule_match_enc_opts(rule, &match);
memcpy(ext, match.key->data, match.key->len);
memcpy(msk, match.mask->data, match.mask->len);
@@ -283,10 +268,8 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
static void
nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct nfp_flower_tun_ipv4 *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
@@ -299,12 +282,26 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
}
static void
+nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
+ struct nfp_flower_tun_ipv6 *msk,
+ struct flow_rule *rule)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ ext->src = match.key->src;
+ ext->dst = match.key->dst;
+ msk->src = match.mask->src;
+ msk->dst = match.mask->dst;
+ }
+}
+
+static void
nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct nfp_flower_tun_ip_ext *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_match_ip match;
@@ -317,57 +314,97 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
}
static void
-nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
- struct nfp_flower_ipv4_gre_tun *msk,
- struct flow_cls_offload *flow)
+nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
- memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+ u32 vni;
- /* NVGRE is the only supported GRE tunnel type */
- ext->ethertype = cpu_to_be16(ETH_P_TEB);
- msk->ethertype = cpu_to_be16(~0);
+ flow_rule_match_enc_keyid(rule, &match);
+ vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ *key = cpu_to_be32(vni);
+ vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ *key_msk = cpu_to_be32(vni);
+ }
+}
+static void
+nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
+ __be16 *flags_msk, struct flow_rule *rule)
+{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match);
- ext->tun_key = match.key->keyid;
- msk->tun_key = match.mask->keyid;
+ *key = match.key->keyid;
+ *key_msk = match.mask->keyid;
- ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
- msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
}
+}
+
+static void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
- nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
- nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
+ &ext->tun_flags, &msk->tun_flags, rule);
}
static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_match_enc_keyid match;
- u32 temp_vni;
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
+}
- flow_rule_match_enc_keyid(rule, &match);
- temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
- ext->tun_id = cpu_to_be32(temp_vni);
- temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
- msk->tun_id = cpu_to_be32(temp_vni);
- }
+static void
+nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
+ struct nfp_flower_ipv6_udp_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
+
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
+}
+
+static void
+nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
+ struct nfp_flower_ipv6_gre_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
- nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
- nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
+ &ext->tun_flags, &msk->tun_flags, rule);
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
@@ -378,6 +415,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u32 port_id;
int err;
u8 *ext;
@@ -393,7 +431,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
(struct nfp_flower_meta_tci *)msk,
- flow, key_ls->key_layer);
+ rule, key_ls->key_layer);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -425,7 +463,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
@@ -433,7 +471,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
(struct nfp_flower_tp_ports *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_tp_ports);
msk += sizeof(struct nfp_flower_tp_ports);
}
@@ -441,7 +479,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
(struct nfp_flower_ipv4 *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_ipv4);
msk += sizeof(struct nfp_flower_ipv4);
}
@@ -449,43 +487,83 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
(struct nfp_flower_ipv6 *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_ipv6);
msk += sizeof(struct nfp_flower_ipv6);
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
- __be32 tun_dst;
-
- nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
- ext += sizeof(struct nfp_flower_ipv4_gre_tun);
- msk += sizeof(struct nfp_flower_ipv4_gre_tun);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(app, tun_dst);
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_gre_tun *gre_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ nfp_flower_compile_ipv6_gre_tun((void *)ext,
+ (void *)msk, rule);
+ gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
+ dst = &gre_match->ipv6.dst;
+ ext += sizeof(struct nfp_flower_ipv6_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv6_gre_tun);
+
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
+ if (!entry)
+ return -EOPNOTSUPP;
+
+ nfp_flow->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ nfp_flower_compile_ipv4_gre_tun((void *)ext,
+ (void *)msk, rule);
+ dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(app, dst);
+ }
}
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
- __be32 tun_dst;
-
- nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
- ext += sizeof(struct nfp_flower_ipv4_udp_tun);
- msk += sizeof(struct nfp_flower_ipv4_udp_tun);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(app, tun_dst);
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_udp_tun *udp_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ nfp_flower_compile_ipv6_udp_tun((void *)ext,
+ (void *)msk, rule);
+ udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
+ dst = &udp_match->ipv6.dst;
+ ext += sizeof(struct nfp_flower_ipv6_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv6_udp_tun);
+
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
+ if (!entry)
+ return -EOPNOTSUPP;
+
+ nfp_flow->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ nfp_flower_compile_ipv4_udp_tun((void *)ext,
+ (void *)msk, rule);
+ dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(app, dst);
+ }
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
- err = nfp_flower_compile_geneve_opt(ext, msk, flow);
+ err = nfp_flower_compile_geneve_opt(ext, msk, rule);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 987ae221f6be..7ca5c1becfcf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -54,6 +54,10 @@
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
+
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
@@ -64,7 +68,8 @@
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
- NFP_FLOWER_LAYER_IPV4)
+ NFP_FLOWER_LAYER_IPV4 | \
+ NFP_FLOWER_LAYER_IPV6)
struct nfp_flower_merge_check {
union {
@@ -146,10 +151,11 @@ static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
- u32 *key_layer_two, int *key_size,
+ u32 *key_layer_two, int *key_size, bool ipv6,
struct netlink_ext_ack *extack)
{
- if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
+ (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP;
}
@@ -167,7 +173,7 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
struct flow_dissector_key_enc_opts *enc_op,
u32 *key_layer_two, u8 *key_layer, int *key_size,
struct nfp_flower_priv *priv,
- enum nfp_flower_tun_type *tun_type,
+ enum nfp_flower_tun_type *tun_type, bool ipv6,
struct netlink_ext_ack *extack)
{
int err;
@@ -176,7 +182,15 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
*key_layer |= NFP_FLOWER_LAYER_VXLAN;
- *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (ipv6) {
+ *key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ *key_size += sizeof(struct nfp_flower_ext_meta);
+ *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ } else {
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
if (enc_op) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
@@ -192,7 +206,13 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
*key_layer |= NFP_FLOWER_LAYER_EXT_META;
*key_size += sizeof(struct nfp_flower_ext_meta);
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
- *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (ipv6) {
+ *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ } else {
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
if (!enc_op)
break;
@@ -200,8 +220,8 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
return -EOPNOTSUPP;
}
- err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
- key_size, extack);
+ err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
+ ipv6, extack);
if (err)
return err;
break;
@@ -237,6 +257,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
/* If any tun dissector is used then the required set must be used. */
if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
@@ -268,8 +290,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_match_enc_opts enc_op = { NULL, NULL };
struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
struct flow_match_control enc_ctl;
struct flow_match_ports enc_ports;
+ bool ipv6_tun = false;
flow_rule_match_enc_control(rule, &enc_ctl);
@@ -277,38 +301,62 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP;
}
- if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
+
+ ipv6_tun = enc_ctl.key->addr_type ==
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ if (ipv6_tun &&
+ !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
return -EOPNOTSUPP;
}
- /* These fields are already verified as used. */
- flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
- if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
+ if (!ipv6_tun &&
+ enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
return -EOPNOTSUPP;
}
+ if (ipv6_tun) {
+ flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
+ if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
+ sizeof(ipv6_addrs.mask->dst))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
/* check if GRE, which has no enc_ports */
- if (netif_is_gretap(netdev)) {
- *tun_type = NFP_FL_TUNNEL_GRE;
- key_layer |= NFP_FLOWER_LAYER_EXT_META;
- key_size += sizeof(struct nfp_flower_ext_meta);
- key_layer_two |= NFP_FLOWER_LAYER2_GRE;
- key_size +=
- sizeof(struct nfp_flower_ipv4_gre_tun);
+ if (!netif_is_gretap(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
+ return -EOPNOTSUPP;
+ }
- if (enc_op.key) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
- return -EOPNOTSUPP;
- }
+ *tun_type = NFP_FL_TUNNEL_GRE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GRE;
+
+ if (ipv6_tun) {
+ key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ key_size +=
+ sizeof(struct nfp_flower_ipv6_udp_tun);
} else {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
+ key_size +=
+ sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
+
+ if (enc_op.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
return -EOPNOTSUPP;
}
} else {
@@ -323,7 +371,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
&key_layer_two,
&key_layer,
&key_size, priv,
- tun_type, extack);
+ tun_type, ipv6_tun,
+ extack);
if (err)
return err;
@@ -491,6 +540,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
goto err_free_mask;
flow_pay->nfp_tun_ipv4_addr = 0;
+ flow_pay->nfp_tun_ipv6 = NULL;
flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
@@ -517,10 +567,12 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
struct nfp_fl_set_ip4_addrs *ipv4_add;
struct nfp_fl_set_ipv6_addr *ipv6_add;
struct nfp_fl_push_vlan *push_vlan;
+ struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tport *tport;
struct nfp_fl_set_eth *eth;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
+ bool ipv6_tun = false;
u8 act_id = 0;
u8 *ports;
int i;
@@ -542,14 +594,18 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
case NFP_FL_ACTION_OPCODE_POP_VLAN:
merge->tci = cpu_to_be16(0);
break;
- case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
+ case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
/* New tunnel header means l2 to l4 can be matched. */
eth_broadcast_addr(&merge->l2.mac_dst[0]);
eth_broadcast_addr(&merge->l2.mac_src[0]);
memset(&merge->l4, 0xff,
sizeof(struct nfp_flower_tp_ports));
- memset(&merge->ipv4, 0xff,
- sizeof(struct nfp_flower_ipv4));
+ if (ipv6_tun)
+ memset(&merge->ipv6, 0xff,
+ sizeof(struct nfp_flower_ipv6));
+ else
+ memset(&merge->ipv4, 0xff,
+ sizeof(struct nfp_flower_ipv4));
break;
case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
eth = (struct nfp_fl_set_eth *)a;
@@ -597,6 +653,10 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
ports[i] |= tport->tp_port_mask[i];
break;
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ pre_tun = (struct nfp_fl_pre_tunnel *)a;
+ ipv6_tun = be16_to_cpu(pre_tun->flags) &
+ NFP_FL_PRE_TUN_IPV6;
+ break;
case NFP_FL_ACTION_OPCODE_PRE_LAG:
case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
break;
@@ -765,15 +825,15 @@ nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
static int
nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
{
- struct nfp_fl_set_ipv4_tun *tun;
+ struct nfp_fl_set_tun *tun;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
- if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
- tun = (struct nfp_fl_set_ipv4_tun *)a;
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
+ tun = (struct nfp_fl_set_tun *)a;
tun->outer_vlan_tpid = vlan->vlan_tpid;
tun->outer_vlan_tci = vlan->vlan_tci;
@@ -1058,15 +1118,22 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
- if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
+ key_layer & NFP_FLOWER_LAYER_IPV6) {
+ /* Flags and proto fields have same offset in IPv4 and IPv6. */
int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
+ int size;
int i;
+ size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
+ sizeof(struct nfp_flower_ipv4) :
+ sizeof(struct nfp_flower_ipv6);
+
mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
- for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
+ for (i = 0; i < size; i++)
if (mask[i] && i != ip_flags && i != ip_proto) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
@@ -1195,6 +1262,8 @@ err_remove_rhash:
err_release_metadata:
nfp_modify_flow_metadata(app, flow_pay);
err_destroy_flow:
+ if (flow_pay->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
kfree(flow_pay->action_data);
kfree(flow_pay->mask_data);
kfree(flow_pay->unmasked_data);
@@ -1311,6 +1380,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+ if (nfp_flow->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
+
if (!nfp_flow->in_hw) {
err = 0;
goto err_free_merge_flow;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 2600ce476d6b..2df3deedf9fd 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -55,6 +55,25 @@ struct nfp_tun_active_tuns {
};
/**
+ * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
+ * @seq: sequence number of the message
+ * @count: number of tunnels report in message
+ * @flags: options part of the request
+ * @tun_info.ipv6: dest IPv6 address of active route
+ * @tun_info.egress_port: port the encapsulated packet egressed
+ * @tun_info: tunnels that have sent traffic in reported period
+ */
+struct nfp_tun_active_tuns_v6 {
+ __be32 seq;
+ __be32 count;
+ __be32 flags;
+ struct route_ip_info_v6 {
+ struct in6_addr ipv6;
+ __be32 egress_port;
+ } tun_info[];
+};
+
+/**
* struct nfp_tun_neigh - neighbour/route entry on the NFP
* @dst_ipv4: destination IPv4 address
* @src_ipv4: source IPv4 address
@@ -71,6 +90,22 @@ struct nfp_tun_neigh {
};
/**
+ * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
+ * @dst_ipv6: destination IPv6 address
+ * @src_ipv6: source IPv6 address
+ * @dst_addr: destination MAC address
+ * @src_addr: source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv6
+ */
+struct nfp_tun_neigh_v6 {
+ struct in6_addr dst_ipv6;
+ struct in6_addr src_ipv6;
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
* struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
* @ingress_port: ingress port of packet that signalled request
* @ipv4_addr: destination ipv4 address for route
@@ -83,13 +118,23 @@ struct nfp_tun_req_route_ipv4 {
};
/**
- * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
- * @ipv4_addr: destination of route
+ * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
+ * @ingress_port: ingress port of packet that signalled request
+ * @ipv6_addr: destination ipv6 address for route
+ */
+struct nfp_tun_req_route_ipv6 {
+ __be32 ingress_port;
+ struct in6_addr ipv6_addr;
+};
+
+/**
+ * struct nfp_offloaded_route - routes that are offloaded to the NFP
* @list: list pointer
+ * @ip_add: destination of route - can be IPv4 or IPv6
*/
-struct nfp_ipv4_route_entry {
- __be32 ipv4_addr;
+struct nfp_offloaded_route {
struct list_head list;
+ u8 ip_add[];
};
#define NFP_FL_IPV4_ADDRS_MAX 32
@@ -116,6 +161,18 @@ struct nfp_ipv4_addr_entry {
struct list_head list;
};
+#define NFP_FL_IPV6_ADDRS_MAX 4
+
+/**
+ * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
+ * @count: number of IPs populated in the array
+ * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
+ */
+struct nfp_tun_ipv6_addr {
+ __be32 count;
+ struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
+};
+
#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
/**
@@ -206,6 +263,49 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
rcu_read_unlock();
}
+void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct nfp_tun_active_tuns_v6 *payload;
+ struct net_device *netdev;
+ int count, i, pay_len;
+ struct neighbour *n;
+ void *ipv6_add;
+ u32 port;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+ count = be32_to_cpu(payload->count);
+ if (count > NFP_FL_IPV6_ADDRS_MAX) {
+ nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
+ return;
+ }
+
+ pay_len = nfp_flower_cmsg_get_data_len(skb);
+ if (pay_len != struct_size(payload, tun_info, count)) {
+ nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
+ return;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ ipv6_add = &payload->tun_info[i].ipv6;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+ netdev = nfp_app_dev_get(app, port, NULL);
+ if (!netdev)
+ continue;
+
+ n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
+ if (!n)
+ continue;
+
+ /* Update the used timestamp of neighbour */
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+ rcu_read_unlock();
+#endif
+}
+
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
@@ -224,71 +324,126 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
return 0;
}
-static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
+static bool
+__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
+ void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
+ spin_unlock_bh(list_lock);
return true;
}
- }
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_unlock_bh(list_lock);
return false;
}
-static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
+static int
+__nfp_tun_add_route_to_cache(struct list_head *route_list,
+ spinlock_t *list_lock, void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
- return;
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
+ spin_unlock_bh(list_lock);
+ return 0;
}
- }
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+
+ entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
if (!entry) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
- nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
- return;
+ spin_unlock_bh(list_lock);
+ return -ENOMEM;
}
- entry->ipv4_addr = ipv4_addr;
- list_add_tail(&entry->list, &priv->tun.neigh_off_list);
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ memcpy(entry->ip_add, add, add_len);
+ list_add_tail(&entry->list, route_list);
+ spin_unlock_bh(list_lock);
+
+ return 0;
}
-static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
+static void
+__nfp_tun_del_route_from_cache(struct list_head *route_list,
+ spinlock_t *list_lock, void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
list_del(&entry->list);
kfree(entry);
break;
}
- }
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_unlock_bh(list_lock);
+}
+
+static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static bool
+nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
+}
+
+static void
+nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static void
+nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
}
static void
-nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
- struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
+nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static void
+nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
+}
+
+static void
+nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
+ struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh payload;
u32 port_id;
@@ -302,7 +457,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
/* If entry has expired send dst IP with all other fields 0. */
if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
- nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
+ nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
goto send_msg;
@@ -314,7 +469,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */
- nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
+ nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
send_msg:
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
@@ -322,16 +477,54 @@ send_msg:
(unsigned char *)&payload, flag);
}
+static void
+nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
+ struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
+{
+ struct nfp_tun_neigh_v6 payload;
+ u32 port_id;
+
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
+ if (!port_id)
+ return;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
+ payload.dst_ipv6 = flow->daddr;
+
+ /* If entry has expired send dst IP with all other fields 0. */
+ if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
+ nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
+ /* Trigger probe to verify invalid neighbour state. */
+ neigh_event_send(neigh, NULL);
+ goto send_msg;
+ }
+
+ /* Have a valid neighbour so populate rest of entry. */
+ payload.src_ipv6 = flow->saddr;
+ ether_addr_copy(payload.src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
+ payload.port_id = cpu_to_be32(port_id);
+ /* Add destination of new route to NFP cache. */
+ nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
+
+send_msg:
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
+ sizeof(struct nfp_tun_neigh_v6),
+ (unsigned char *)&payload, flag);
+}
+
static int
nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct nfp_flower_priv *app_priv;
struct netevent_redirect *redir;
- struct flowi4 flow = {};
+ struct flowi4 flow4 = {};
+ struct flowi6 flow6 = {};
struct neighbour *n;
struct nfp_app *app;
struct rtable *rt;
+ bool ipv6 = false;
int err;
switch (event) {
@@ -346,7 +539,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
- flow.daddr = *(__be32 *)n->primary_key;
+ if (n->tbl->family == AF_INET6)
+ ipv6 = true;
+
+ if (ipv6)
+ flow6.daddr = *(struct in6_addr *)n->primary_key;
+ else
+ flow4.daddr = *(__be32 *)n->primary_key;
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
@@ -356,28 +555,46 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
/* Only concerned with changes to routes already added to NFP. */
- if (!nfp_tun_has_route(app, flow.daddr))
+ if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
+ (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
return NOTIFY_DONE;
#if IS_ENABLED(CONFIG_INET)
- /* Do a route lookup to populate flow data. */
- rt = ip_route_output_key(dev_net(n->dev), &flow);
- err = PTR_ERR_OR_ZERO(rt);
- if (err)
+ if (ipv6) {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *dst;
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
+ &flow6, NULL);
+ if (IS_ERR(dst))
+ return NOTIFY_DONE;
+
+ dst_release(dst);
+ flow6.flowi6_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
+#else
return NOTIFY_DONE;
+#endif /* CONFIG_IPV6 */
+ } else {
+ /* Do a route lookup to populate flow data. */
+ rt = ip_route_output_key(dev_net(n->dev), &flow4);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
- ip_rt_put(rt);
+ ip_rt_put(rt);
+
+ flow4.flowi4_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
+ }
#else
return NOTIFY_DONE;
-#endif
-
- flow.flowi4_proto = IPPROTO_UDP;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+#endif /* CONFIG_INET */
return NOTIFY_OK;
}
-void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_tun_req_route_ipv4 *payload;
struct net_device *netdev;
@@ -411,7 +628,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+ nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
neigh_release(n);
rcu_read_unlock();
return;
@@ -421,6 +638,48 @@ fail_rcu_unlock:
nfp_flower_cmsg_warn(app, "Requested route not found.\n");
}
+void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_req_route_ipv6 *payload;
+ struct net_device *netdev;
+ struct flowi6 flow = {};
+ struct dst_entry *dst;
+ struct neighbour *n;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
+ rcu_read_lock();
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
+ if (!netdev)
+ goto fail_rcu_unlock;
+
+ flow.daddr = payload->ipv6_addr;
+ flow.flowi6_proto = IPPROTO_UDP;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
+ NULL);
+ if (IS_ERR(dst))
+ goto fail_rcu_unlock;
+#else
+ goto fail_rcu_unlock;
+#endif
+
+ n = dst_neigh_lookup(dst, &flow.daddr);
+ dst_release(dst);
+ if (!n)
+ goto fail_rcu_unlock;
+
+ nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
+ neigh_release(n);
+ rcu_read_unlock();
+ return;
+
+fail_rcu_unlock:
+ rcu_read_unlock();
+ nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
+}
+
static void nfp_tun_write_ipv4_list(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -502,6 +761,78 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
nfp_tun_write_ipv4_list(app);
}
+static void nfp_tun_write_ipv6_list(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv6_addr_entry *entry;
+ struct nfp_tun_ipv6_addr payload;
+ int count = 0;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
+ if (count >= NFP_FL_IPV6_ADDRS_MAX) {
+ nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
+ break;
+ }
+ payload.ipv6_addr[count++] = entry->ipv6_addr;
+ }
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ payload.count = cpu_to_be32(count);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
+ sizeof(struct nfp_tun_ipv6_addr),
+ &payload, GFP_KERNEL);
+}
+
+struct nfp_ipv6_addr_entry *
+nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv6_addr_entry *entry;
+
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
+ if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
+ entry->ref_count++;
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ return entry;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+ return NULL;
+ }
+ entry->ipv6_addr = *ipv6;
+ entry->ref_count = 1;
+ list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+
+ nfp_tun_write_ipv6_list(app);
+
+ return entry;
+}
+
+void
+nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ bool freed = false;
+
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ if (!--entry->ref_count) {
+ list_del(&entry->list);
+ kfree(entry);
+ freed = true;
+ }
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+
+ if (freed)
+ nfp_tun_write_ipv6_list(app);
+}
+
static int
__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
{
@@ -1013,13 +1344,17 @@ int nfp_tunnel_config_start(struct nfp_app *app)
ida_init(&priv->tun.mac_off_ids);
- /* Initialise priv data for IPv4 offloading. */
+ /* Initialise priv data for IPv4/v6 offloading. */
mutex_init(&priv->tun.ipv4_off_lock);
INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
+ mutex_init(&priv->tun.ipv6_off_lock);
+ INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
/* Initialise priv data for neighbour offloading. */
- spin_lock_init(&priv->tun.neigh_off_lock);
- INIT_LIST_HEAD(&priv->tun.neigh_off_list);
+ spin_lock_init(&priv->tun.neigh_off_lock_v4);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
+ spin_lock_init(&priv->tun.neigh_off_lock_v6);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
err = register_netevent_notifier(&priv->tun.neigh_nb);
@@ -1034,9 +1369,11 @@ int nfp_tunnel_config_start(struct nfp_app *app)
void nfp_tunnel_config_stop(struct nfp_app *app)
{
+ struct nfp_offloaded_route *route_entry, *temp;
struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *route_entry;
struct nfp_ipv4_addr_entry *ip_entry;
+ struct nfp_tun_neigh_v6 ipv6_route;
+ struct nfp_tun_neigh ipv4_route;
struct list_head *ptr, *storage;
unregister_netevent_notifier(&priv->tun.neigh_nb);
@@ -1050,12 +1387,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
kfree(ip_entry);
}
- /* Free any memory that may be occupied by the route list. */
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
- list);
+ mutex_destroy(&priv->tun.ipv6_off_lock);
+
+ /* Free memory in the route list and remove entries from fw cache. */
+ list_for_each_entry_safe(route_entry, temp,
+ &priv->tun.neigh_off_list_v4, list) {
+ memset(&ipv4_route, 0, sizeof(ipv4_route));
+ memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
+ sizeof(ipv4_route.dst_ipv4));
list_del(&route_entry->list);
kfree(route_entry);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&ipv4_route,
+ GFP_KERNEL);
+ }
+
+ list_for_each_entry_safe(route_entry, temp,
+ &priv->tun.neigh_off_list_v6, list) {
+ memset(&ipv6_route, 0, sizeof(ipv6_route));
+ memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
+ sizeof(ipv6_route.dst_ipv6));
+ list_del(&route_entry->list);
+ kfree(route_entry);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&ipv6_route,
+ GFP_KERNEL);
}
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 250f510b1d21..ff4438478ea9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -586,6 +586,9 @@ struct nfp_net_dp {
* @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
* @ktls_no_space: Counter of firmware rejecting kTLS connection due to
* lack of space
+ * @ktls_rx_resync_req: Counter of TLS RX resync requested
+ * @ktls_rx_resync_ign: Counter of TLS RX resync requests ignored
+ * @ktls_rx_resync_sent: Counter of TLS RX resync completed
* @mbox_cmsg: Common Control Message via vNIC mailbox state
* @mbox_cmsg.queue: CCM mbox queue of pending messages
* @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
@@ -674,6 +677,9 @@ struct nfp_net {
atomic64_t ktls_conn_id_gen;
atomic_t ktls_no_space;
+ atomic_t ktls_rx_resync_req;
+ atomic_t ktls_rx_resync_ign;
+ atomic_t ktls_rx_resync_sent;
struct {
struct sk_buff_head queue;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index bcdcd6de7dea..9bfb3b077bc1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -47,6 +47,7 @@
#include "nfp_net_sriov.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
+#include "crypto/fw.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -1321,17 +1322,11 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
netdev_tx_reset_queue(nd_q);
}
-static void nfp_net_tx_timeout(struct net_device *netdev)
+static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nfp_net *nn = netdev_priv(netdev);
- int i;
- for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
- continue;
- nn_warn(nn, "TX timeout on ring: %d\n", i);
- }
- nn_warn(nn, "TX watchdog timeout\n");
+ nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
}
/* Receive processing
@@ -1667,9 +1662,9 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
&rx_hash->hash);
}
-static void *
+static bool
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, int meta_len)
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
u32 meta_info;
@@ -1699,14 +1694,20 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
(__force __wsum)__get_unaligned_cpu32(data);
data += 4;
break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return NULL;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
default:
- return NULL;
+ return true;
}
meta_info >>= NFP_NET_META_FIELD_SIZE;
}
- return data;
+ return data != pkt;
}
static void
@@ -1891,12 +1892,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_set_hash_desc(dp->netdev, &meta,
rxbuf->frag + meta_off, rxd);
} else if (meta_len) {
- void *end;
-
- end = nfp_net_parse_meta(dp->netdev, &meta,
- rxbuf->frag + meta_off,
- meta_len);
- if (unlikely(end != rxbuf->frag + pkt_off)) {
+ if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index d835c14b7257..c3a763134e79 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -17,6 +17,30 @@ static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
}
+static bool
+nfp_net_tls_parse_crypto_ops(struct device *dev, struct nfp_net_tlv_caps *caps,
+ u8 __iomem *ctrl_mem, u8 __iomem *data,
+ unsigned int length, unsigned int offset,
+ bool rx_stream_scan)
+{
+ /* Ignore the legacy TLV if new one was already parsed */
+ if (caps->tls_resync_ss && !rx_stream_scan)
+ return true;
+
+ if (length < 32) {
+ dev_err(dev,
+ "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
+ length, offset);
+ return false;
+ }
+
+ caps->crypto_ops = readl(data);
+ caps->crypto_enable_off = data - ctrl_mem + 16;
+ caps->tls_resync_ss = rx_stream_scan;
+
+ return true;
+}
+
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
struct nfp_net_tlv_caps *caps)
{
@@ -104,15 +128,25 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->mbox_cmsg_types = readl(data);
break;
case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
- if (length < 32) {
- dev_err(dev,
- "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
- length, offset);
+ if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem,
+ data, length, offset,
+ false))
return -EINVAL;
+ break;
+ case NFP_NET_CFG_TLV_TYPE_VNIC_STATS:
+ if ((data - ctrl_mem) % 8) {
+ dev_warn(dev, "VNIC STATS TLV misaligned, ignoring offset:%u len:%u\n",
+ offset, length);
+ break;
}
-
- caps->crypto_ops = readl(data);
- caps->crypto_enable_off = data - ctrl_mem + 16;
+ caps->vnic_stats_off = data - ctrl_mem;
+ caps->vnic_stats_cnt = length / 10;
+ break;
+ case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN:
+ if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem,
+ data, length, offset,
+ true))
+ return -EINVAL;
break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ee6b24e4eacd..3d61a8cb60b0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -45,6 +45,7 @@
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
+#define NFP_NET_META_RESYNC_INFO 8 /* RX resync info request */
#define NFP_META_PORT_ID_CTRL ~0U
@@ -479,6 +480,22 @@
* 8 words, bitmaps of supported and enabled crypto operations.
* First 16B (4 words) contains a bitmap of supported crypto operations,
* and next 16B contain the enabled operations.
+ * This capability is made obsolete by ones with better sync methods.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_VNIC_STATS:
+ * Variable, per-vNIC statistics, data should be 8B aligned (FW should insert
+ * zero-length RESERVED TLV to pad).
+ * TLV data has two sections. First is an array of statistics' IDs (2B each).
+ * Second 8B statistics themselves. Statistics are 8B aligned, meaning there
+ * may be a padding between sections.
+ * Number of statistics can be determined as floor(tlv.length / (2 + 8)).
+ * This TLV overwrites %NFP_NET_CFG_STATS_* values (statistics in this TLV
+ * duplicate the old ones, so driver should be careful not to unnecessarily
+ * render both).
+ *
+ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN:
+ * Same as %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS, but crypto TLS does stream scan
+ * RX sync, rather than kernel-assisted sync.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -490,6 +507,8 @@
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
+#define NFP_NET_CFG_TLV_TYPE_VNIC_STATS 12
+#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN 13
struct device;
@@ -502,6 +521,9 @@ struct device;
* @mbox_cmsg_types: cmsgs which can be passed through the mailbox
* @crypto_ops: supported crypto operations
* @crypto_enable_off: offset of crypto ops enable region
+ * @vnic_stats_off: offset of vNIC stats area
+ * @vnic_stats_cnt: number of vNIC stats
+ * @tls_resync_ss: TLS resync will be performed via stream scan
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
@@ -511,6 +533,9 @@ struct nfp_net_tlv_caps {
u32 mbox_cmsg_types;
u32 crypto_ops;
unsigned int crypto_enable_off;
+ unsigned int vnic_stats_off;
+ unsigned int vnic_stats_cnt;
+ unsigned int tls_resync_ss:1;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1b840ee47339..d648e32c0520 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -148,11 +148,33 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
{ "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
};
+static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = {
+ [1] = "dev_rx_discards",
+ [2] = "dev_rx_errors",
+ [3] = "dev_rx_bytes",
+ [4] = "dev_rx_uc_bytes",
+ [5] = "dev_rx_mc_bytes",
+ [6] = "dev_rx_bc_bytes",
+ [7] = "dev_rx_pkts",
+ [8] = "dev_rx_mc_pkts",
+ [9] = "dev_rx_bc_pkts",
+
+ [10] = "dev_tx_discards",
+ [11] = "dev_tx_errors",
+ [12] = "dev_tx_bytes",
+ [13] = "dev_tx_uc_bytes",
+ [14] = "dev_tx_mc_bytes",
+ [15] = "dev_tx_bc_bytes",
+ [16] = "dev_tx_pkts",
+ [17] = "dev_tx_mc_pkts",
+ [18] = "dev_tx_bc_pkts",
+};
+
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
#define NN_RVEC_GATHER_STATS 13
#define NN_RVEC_PER_Q_STATS 3
-#define NN_CTRL_PATH_STATS 1
+#define NN_CTRL_PATH_STATS 4
#define SFP_SFF_REV_COMPLIANCE 1
@@ -454,6 +476,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
data = nfp_pr_et(data, "hw_tls_no_space");
+ data = nfp_pr_et(data, "rx_tls_resync_req_ok");
+ data = nfp_pr_et(data, "rx_tls_resync_req_ign");
+ data = nfp_pr_et(data, "rx_tls_resync_sent");
return data;
}
@@ -502,6 +527,9 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
*data++ = gathered_stats[j];
*data++ = atomic_read(&nn->ktls_no_space);
+ *data++ = atomic_read(&nn->ktls_rx_resync_req);
+ *data++ = atomic_read(&nn->ktls_rx_resync_ign);
+ *data++ = atomic_read(&nn->ktls_rx_resync_sent);
return data;
}
@@ -560,6 +588,65 @@ nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
return data;
}
+static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn)
+{
+ return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4;
+}
+
+static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
+{
+ unsigned int i, id;
+ u8 __iomem *mem;
+ u64 id_word = 0;
+
+ mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
+ for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) {
+ if (!(i % 4))
+ id_word = readq(mem + i * 2);
+
+ id = (u16)id_word;
+ id_word >>= 16;
+
+ if (id < ARRAY_SIZE(nfp_tlv_stat_names) &&
+ nfp_tlv_stat_names[id][0]) {
+ memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ } else {
+ data = nfp_pr_et(data, "dev_unknown_stat%u", id);
+ }
+ }
+
+ for (i = 0; i < nn->max_r_vecs; i++) {
+ data = nfp_pr_et(data, "rxq_%u_pkts", i);
+ data = nfp_pr_et(data, "rxq_%u_bytes", i);
+ data = nfp_pr_et(data, "txq_%u_pkts", i);
+ data = nfp_pr_et(data, "txq_%u_bytes", i);
+ }
+
+ return data;
+}
+
+static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data)
+{
+ u8 __iomem *mem;
+ unsigned int i;
+
+ mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
+ mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8);
+ for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++)
+ *data++ = readq(mem + i * 8);
+
+ mem = nn->dp.ctrl_bar;
+ for (i = 0; i < nn->max_r_vecs; i++) {
+ *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
+ }
+
+ return data;
+}
+
static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
{
struct nfp_port *port;
@@ -609,8 +696,12 @@ static void nfp_net_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
data = nfp_vnic_get_sw_stats_strings(netdev, data);
- data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
- false);
+ if (!nn->tlv_caps.vnic_stats_off)
+ data = nfp_vnic_get_hw_stats_strings(data,
+ nn->max_r_vecs,
+ false);
+ else
+ data = nfp_vnic_get_tlv_stats_strings(nn, data);
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
break;
@@ -624,7 +715,11 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_net *nn = netdev_priv(netdev);
data = nfp_vnic_get_sw_stats(netdev, data);
- data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
+ if (!nn->tlv_caps.vnic_stats_off)
+ data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
+ nn->max_r_vecs);
+ else
+ data = nfp_vnic_get_tlv_stats(nn, data);
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(nn->port, data);
}
@@ -632,13 +727,18 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
{
struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int cnt;
switch (sset) {
case ETH_SS_STATS:
- return nfp_vnic_get_sw_stats_count(netdev) +
- nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
- nfp_mac_get_stats_count(netdev) +
- nfp_app_port_get_stats_count(nn->port);
+ cnt = nfp_vnic_get_sw_stats_count(netdev);
+ if (!nn->tlv_caps.vnic_stats_off)
+ cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs);
+ else
+ cnt += nfp_vnic_get_tlv_stats_count(nn);
+ cnt += nfp_mac_get_stats_count(netdev);
+ cnt += nfp_app_port_get_stats_count(nn->port);
+ return cnt;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index e4977cdf7678..c0e2f4394aef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -106,7 +106,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
* first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
* the identical for PF and VF drivers.
*/
- ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
+ ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
NFP_NET_CFG_BAR_SZ);
if (!ctrl_bar) {
dev_err(&pdev->dev,
@@ -200,7 +200,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
- vf->q_bar = ioremap_nocache(map_addr, bar_sz);
+ vf->q_bar = ioremap(map_addr, bar_sz);
if (!vf->q_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
@@ -216,7 +216,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
/* TX queues */
map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
- nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz);
+ nn->tx_bar = ioremap(map_addr, tx_bar_sz);
if (!nn->tx_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
@@ -225,7 +225,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
/* RX queues */
map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
- nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz);
+ nn->rx_bar = ioremap(map_addr, rx_bar_sz);
if (!nn->rx_bar) {
nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
err = -EIO;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 85d46f206b3c..b454db283aef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -611,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
bar = &nfp->bar[0];
if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ bar->iomem = ioremap(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
int pf;
@@ -677,7 +677,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
}
bar = &nfp->bar[4 + i];
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ bar->iomem = ioremap(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
msg += snprintf(msg, end - msg,
@@ -858,7 +858,7 @@ static int nfp6000_area_acquire(struct nfp_cpp_area *area)
priv->iomem = priv->bar->iomem + priv->bar_offset;
else
/* Must have been too big. Sub-allocate. */
- priv->iomem = ioremap_nocache(priv->phys, priv->size);
+ priv->iomem = ioremap(priv->phys, priv->size);
if (IS_ERR_OR_NULL(priv->iomem)) {
dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 6b54cb3b681d..2fc10a36afa4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2739,7 +2739,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
* nv_tx_timeout: dev->tx_timeout function
* Called with netif_tx_lock held.
*/
-static void nv_tx_timeout(struct net_device *dev)
+static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 656169214cdb..d20cf03a3ea0 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1149,19 +1149,6 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev)
spin_unlock_irqrestore(&pldat->lock, flags);
}
-static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
-{
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, req, cmd);
-}
-
static int lpc_eth_open(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
@@ -1229,7 +1216,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_stop = lpc_eth_close,
.ndo_start_xmit = lpc_eth_hard_start_xmit,
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
- .ndo_do_ioctl = lpc_eth_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = lpc_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 18e6d87c607b..73ec195fbc30 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2271,7 +2271,7 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* pch_gbe_tx_timeout - Respond to a Tx Hang
* @netdev: Network interface device structure
*/
-static void pch_gbe_tx_timeout(struct net_device *netdev)
+static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index eee883a2aa8d..70816d2e2990 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -548,7 +548,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
static int hamachi_open(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void hamachi_timer(struct timer_list *t);
-static void hamachi_tx_timeout(struct net_device *dev);
+static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void hamachi_init_ring(struct net_device *dev);
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -1042,7 +1042,7 @@ static void hamachi_timer(struct timer_list *t)
add_timer(&hmp->timer);
}
-static void hamachi_tx_timeout(struct net_device *dev)
+static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct hamachi_private *hmp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 5113ee647090..520779f05e1a 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -344,7 +344,7 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int yellowfin_open(struct net_device *dev);
static void yellowfin_timer(struct timer_list *t);
-static void yellowfin_tx_timeout(struct net_device *dev);
+static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int yellowfin_init_ring(struct net_device *dev);
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -677,7 +677,7 @@ static void yellowfin_timer(struct timer_list *t)
add_timer(&yp->timer);
}
-static void yellowfin_tx_timeout(struct net_device *dev)
+static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 98e102af7756..bb106a32f416 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,19 +12,27 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.18.0-k"
+#define IONIC_DRV_VERSION "0.20.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define IONIC_SUBDEV_ID_NAPLES_25 0x4000
-#define IONIC_SUBDEV_ID_NAPLES_100_4 0x4001
-#define IONIC_SUBDEV_ID_NAPLES_100_8 0x4002
-
#define DEVCMD_TIMEOUT 10
+struct ionic_vf {
+ u16 index;
+ u8 macaddr[6];
+ __le32 maxrate;
+ __le16 vlanid;
+ u8 spoofchk;
+ u8 trusted;
+ u8 linkstate;
+ dma_addr_t stats_pa;
+ struct ionic_lif_stats stats;
+};
+
struct ionic {
struct pci_dev *pdev;
struct device *dev;
@@ -46,6 +54,9 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
struct notifier_block nb;
+ struct rw_semaphore vf_op_lock; /* lock for VF operations */
+ struct ionic_vf *vfs;
+ int num_vfs;
struct timer_list watchdog_timer;
int watchdog_period;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 9a9ab8cb2cb3..448d7b23b2f7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -104,10 +104,112 @@ void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
iounmap(page);
}
+static void ionic_vf_dealloc_locked(struct ionic *ionic)
+{
+ struct ionic_vf *v;
+ dma_addr_t dma = 0;
+ int i;
+
+ if (!ionic->vfs)
+ return;
+
+ for (i = ionic->num_vfs - 1; i >= 0; i--) {
+ v = &ionic->vfs[i];
+
+ if (v->stats_pa) {
+ (void)ionic_set_vf_config(ionic, i,
+ IONIC_VF_ATTR_STATSADDR,
+ (u8 *)&dma);
+ dma_unmap_single(ionic->dev, v->stats_pa,
+ sizeof(v->stats), DMA_FROM_DEVICE);
+ v->stats_pa = 0;
+ }
+ }
+
+ kfree(ionic->vfs);
+ ionic->vfs = NULL;
+ ionic->num_vfs = 0;
+}
+
+static void ionic_vf_dealloc(struct ionic *ionic)
+{
+ down_write(&ionic->vf_op_lock);
+ ionic_vf_dealloc_locked(ionic);
+ up_write(&ionic->vf_op_lock);
+}
+
+static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
+{
+ struct ionic_vf *v;
+ int err = 0;
+ int i;
+
+ down_write(&ionic->vf_op_lock);
+
+ ionic->vfs = kcalloc(num_vfs, sizeof(struct ionic_vf), GFP_KERNEL);
+ if (!ionic->vfs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_vfs; i++) {
+ v = &ionic->vfs[i];
+ v->stats_pa = dma_map_single(ionic->dev, &v->stats,
+ sizeof(v->stats), DMA_FROM_DEVICE);
+ if (dma_mapping_error(ionic->dev, v->stats_pa)) {
+ v->stats_pa = 0;
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* ignore failures from older FW, we just won't get stats */
+ (void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR,
+ (u8 *)&v->stats_pa);
+ ionic->num_vfs++;
+ }
+
+out:
+ if (err)
+ ionic_vf_dealloc_locked(ionic);
+ up_write(&ionic->vf_op_lock);
+ return err;
+}
+
+static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct device *dev = ionic->dev;
+ int ret = 0;
+
+ if (num_vfs > 0) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot enable SRIOV: %d\n", ret);
+ goto out;
+ }
+
+ ret = ionic_vf_alloc(ionic, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot alloc VFs: %d\n", ret);
+ pci_disable_sriov(pdev);
+ goto out;
+ }
+
+ ret = num_vfs;
+ } else {
+ pci_disable_sriov(pdev);
+ ionic_vf_dealloc(ionic);
+ }
+
+out:
+ return ret;
+}
+
static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ionic *ionic;
+ int num_vfs;
int err;
ionic = ionic_devlink_alloc(dev);
@@ -206,6 +308,15 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_lifs;
}
+ init_rwsem(&ionic->vf_op_lock);
+ num_vfs = pci_num_vf(pdev);
+ if (num_vfs) {
+ dev_info(dev, "%d VFs found already enabled\n", num_vfs);
+ err = ionic_vf_alloc(ionic, num_vfs);
+ if (err)
+ dev_err(dev, "Cannot enable existing VFs: %d\n", err);
+ }
+
err = ionic_lifs_register(ionic);
if (err) {
dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
@@ -223,6 +334,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_deregister_lifs:
ionic_lifs_unregister(ionic);
err_out_deinit_lifs:
+ ionic_vf_dealloc(ionic);
ionic_lifs_deinit(ionic);
err_out_free_lifs:
ionic_lifs_free(ionic);
@@ -279,6 +391,7 @@ static struct pci_driver ionic_driver = {
.id_table = ionic_id_table,
.probe = ionic_probe,
.remove = ionic_remove,
+ .sriov_configure = ionic_sriov_configure,
};
int ionic_bus_register_driver(void)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 5f9d2ec70446..87f82f36812f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -286,6 +286,64 @@ void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
ionic_dev_cmd_go(idev, &cmd);
}
+/* VF commands */
+int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
+{
+ union ionic_dev_cmd cmd = {
+ .vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
+ .vf_setattr.attr = attr,
+ .vf_setattr.vf_index = vf,
+ };
+ int err;
+
+ switch (attr) {
+ case IONIC_VF_ATTR_SPOOFCHK:
+ cmd.vf_setattr.spoofchk = *data;
+ dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_TRUST:
+ cmd.vf_setattr.trust = *data;
+ dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_LINKSTATE:
+ cmd.vf_setattr.linkstate = *data;
+ dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_MAC:
+ ether_addr_copy(cmd.vf_setattr.macaddr, data);
+ dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
+ __func__, vf, data);
+ break;
+ case IONIC_VF_ATTR_VLAN:
+ cmd.vf_setattr.vlanid = cpu_to_le16(*(u16 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
+ __func__, vf, *(u16 *)data);
+ break;
+ case IONIC_VF_ATTR_RATE:
+ cmd.vf_setattr.maxrate = cpu_to_le32(*(u32 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
+ __func__, vf, *(u32 *)data);
+ break;
+ case IONIC_VF_ATTR_STATSADDR:
+ cmd.vf_setattr.stats_pa = cpu_to_le64(*(u64 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d stats_pa 0x%08llx\n",
+ __func__, vf, *(u64 *)data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
/* LIF commands */
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 4665c5dc5324..7838e342c4fd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -113,6 +113,12 @@ static_assert(sizeof(struct ionic_rxq_desc) == 16);
static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
static_assert(sizeof(struct ionic_rxq_comp) == 16);
+/* SR/IOV */
+static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
+static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
+static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
+static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+
struct ionic_devinfo {
u8 asic_type;
u8 asic_rev;
@@ -275,6 +281,7 @@ void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable);
void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
+int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 39317cdfa6cf..f131adad96e3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -51,6 +51,10 @@ enum ionic_cmd_opcode {
IONIC_CMD_RDMA_CREATE_CQ = 52,
IONIC_CMD_RDMA_CREATE_ADMINQ = 53,
+ /* SR/IOV commands */
+ IONIC_CMD_VF_GETATTR = 60,
+ IONIC_CMD_VF_SETATTR = 61,
+
/* QoS commands */
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
@@ -1639,6 +1643,93 @@ enum ionic_qos_sched_type {
IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */
};
+enum ionic_vf_attr {
+ IONIC_VF_ATTR_SPOOFCHK = 1,
+ IONIC_VF_ATTR_TRUST = 2,
+ IONIC_VF_ATTR_MAC = 3,
+ IONIC_VF_ATTR_LINKSTATE = 4,
+ IONIC_VF_ATTR_VLAN = 5,
+ IONIC_VF_ATTR_RATE = 6,
+ IONIC_VF_ATTR_STATSADDR = 7,
+};
+
+/**
+ * VF link status
+ */
+enum ionic_vf_link_status {
+ IONIC_VF_LINK_STATUS_AUTO = 0, /* link state of the uplink */
+ IONIC_VF_LINK_STATUS_UP = 1, /* link is always up */
+ IONIC_VF_LINK_STATUS_DOWN = 2, /* link is always down */
+};
+
+/**
+ * struct ionic_vf_setattr_cmd - Set VF attributes on the NIC
+ * @opcode: Opcode
+ * @index: VF index
+ * @attr: Attribute type (enum ionic_vf_attr)
+ * macaddr mac address
+ * vlanid vlan ID
+ * maxrate max Tx rate in Mbps
+ * spoofchk enable address spoof checking
+ * trust enable VF trust
+ * linkstate set link up or down
+ * stats_pa set DMA address for VF stats
+ */
+struct ionic_vf_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[60];
+ };
+};
+
+struct ionic_vf_setattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ __le16 comp_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct ionic_vf_getattr_cmd - Get VF attributes from the NIC
+ * @opcode: Opcode
+ * @index: VF index
+ * @attr: Attribute type (enum ionic_vf_attr)
+ */
+struct ionic_vf_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ u8 rsvd[60];
+};
+
+struct ionic_vf_getattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[11];
+ };
+ u8 color;
+};
+
/**
* union ionic_qos_config - Qos configuration structure
* @flags: Configuration flags
@@ -2289,6 +2380,9 @@ union ionic_dev_cmd {
struct ionic_port_getattr_cmd port_getattr;
struct ionic_port_setattr_cmd port_setattr;
+ struct ionic_vf_setattr_cmd vf_setattr;
+ struct ionic_vf_getattr_cmd vf_getattr;
+
struct ionic_lif_identify_cmd lif_identify;
struct ionic_lif_init_cmd lif_init;
struct ionic_lif_reset_cmd lif_reset;
@@ -2318,6 +2412,9 @@ union ionic_dev_cmd_comp {
struct ionic_port_getattr_comp port_getattr;
struct ionic_port_setattr_comp port_setattr;
+ struct ionic_vf_setattr_comp vf_setattr;
+ struct ionic_vf_getattr_comp vf_getattr;
+
struct ionic_lif_identify_comp lif_identify;
struct ionic_lif_init_comp lif_init;
ionic_lif_reset_comp lif_reset;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index ef8258713369..191271f6260d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1285,7 +1285,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
rtnl_unlock();
}
-static void ionic_tx_timeout(struct net_device *netdev)
+static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1619,6 +1619,227 @@ int ionic_stop(struct net_device *netdev)
return err;
}
+static int ionic_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivf)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret = 0;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ivf->vf = vf;
+ ivf->vlan = ionic->vfs[vf].vlanid;
+ ivf->qos = 0;
+ ivf->spoofchk = ionic->vfs[vf].spoofchk;
+ ivf->linkstate = ionic->vfs[vf].linkstate;
+ ivf->max_tx_rate = ionic->vfs[vf].maxrate;
+ ivf->trusted = ionic->vfs[vf].trusted;
+ ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_get_vf_stats(struct net_device *netdev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ struct ionic_lif_stats *vs;
+ int ret = 0;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ memset(vf_stats, 0, sizeof(*vf_stats));
+ vs = &ionic->vfs[vf].stats;
+
+ vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
+ vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
+ vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
+ vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
+ vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
+ vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
+ vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
+ le64_to_cpu(vs->rx_mcast_drop_packets) +
+ le64_to_cpu(vs->rx_bcast_drop_packets);
+ vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
+ le64_to_cpu(vs->tx_mcast_drop_packets) +
+ le64_to_cpu(vs->tx_bcast_drop_packets);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
+ return -EINVAL;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
+ if (!ret)
+ ether_addr_copy(ionic->vfs[vf].macaddr, mac);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 proto)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ /* until someday when we support qos */
+ if (qos)
+ return -EINVAL;
+
+ if (vlan > 4095)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
+ if (!ret)
+ ionic->vfs[vf].vlanid = vlan;
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_rate(struct net_device *netdev, int vf,
+ int tx_min, int tx_max)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ /* setting the min just seems silly */
+ if (tx_min)
+ return -EINVAL;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
+ if (!ret)
+ lif->ionic->vfs[vf].maxrate = tx_max;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data = set; /* convert to u8 for config */
+ int ret;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_SPOOFCHK, &data);
+ if (!ret)
+ ionic->vfs[vf].spoofchk = data;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data = set; /* convert to u8 for config */
+ int ret;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_TRUST, &data);
+ if (!ret)
+ ionic->vfs[vf].trusted = data;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data;
+ int ret;
+
+ switch (set) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ data = IONIC_VF_LINK_STATUS_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ data = IONIC_VF_LINK_STATUS_DOWN;
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ data = IONIC_VF_LINK_STATUS_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_LINKSTATE, &data);
+ if (!ret)
+ ionic->vfs[vf].linkstate = set;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
@@ -1632,6 +1853,14 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_change_mtu = ionic_change_mtu,
.ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
+ .ndo_set_vf_vlan = ionic_set_vf_vlan,
+ .ndo_set_vf_trust = ionic_set_vf_trust,
+ .ndo_set_vf_mac = ionic_set_vf_mac,
+ .ndo_set_vf_rate = ionic_set_vf_rate,
+ .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
+ .ndo_get_vf_config = ionic_get_vf_config,
+ .ndo_set_vf_link_state = ionic_set_vf_link_state,
+ .ndo_get_vf_stats = ionic_get_vf_stats,
};
int ionic_reset_queues(struct ionic_lif *lif)
@@ -1965,18 +2194,22 @@ static int ionic_station_set(struct ionic_lif *lif)
if (err)
return err;
+ if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
+ return 0;
+
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
- if (err)
- return err;
-
- if (!is_zero_ether_addr(netdev->dev_addr)) {
- netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
- netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, false);
+ if (err) {
+ netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM\n",
+ addr.sa_data);
+ return 0;
}
+ netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
+ netdev->dev_addr);
+ ionic_lif_addr(lif, netdev->dev_addr, false);
+
eth_commit_mac_addr_change(netdev, &addr);
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index a55fd1f8c31b..9c5a7dd45f9d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -37,6 +37,7 @@ struct ionic_rx_stats {
u64 csum_complete;
u64 csum_error;
u64 buffers_posted;
+ u64 dropped;
};
#define IONIC_QCQ_F_INITED BIT(0)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 3590ea7fd88a..a8e3fb73b465 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -165,6 +165,10 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_FW_DOWNLOAD";
case IONIC_CMD_FW_CONTROL:
return "IONIC_CMD_FW_CONTROL";
+ case IONIC_CMD_VF_GETATTR:
+ return "IONIC_CMD_VF_GETATTR";
+ case IONIC_CMD_VF_SETATTR:
+ return "IONIC_CMD_VF_SETATTR";
default:
return "DEVCMD_UNKNOWN";
}
@@ -326,9 +330,9 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
unsigned long max_wait;
unsigned long duration;
int opcode;
+ int hb = 0;
int done;
int err;
- int hb;
WARN_ON(in_interrupt());
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 03916b6d47f2..a1e9796a660a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -39,6 +39,7 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(csum_none),
IONIC_RX_STAT_DESC(csum_complete),
IONIC_RX_STAT_DESC(csum_error),
+ IONIC_RX_STAT_DESC(dropped),
};
static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 97e79949b359..e452f4242ba0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -152,12 +152,16 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
stats = q_to_rx_stats(q);
netdev = q->lif->netdev;
- if (comp->status)
+ if (comp->status) {
+ stats->dropped++;
return;
+ }
/* no packet processing while resetting */
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state)))
+ if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
+ stats->dropped++;
return;
+ }
stats->pkts++;
stats->bytes += le16_to_cpu(comp->len);
@@ -167,8 +171,10 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
else
skb = ionic_rx_frags(q, desc_info, cq_info);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ stats->dropped++;
return;
+ }
skb_record_rx_queue(skb, q->index);
@@ -337,6 +343,8 @@ void ionic_rx_fill(struct ionic_queue *q)
struct ionic_rxq_sg_desc *sg_desc;
struct ionic_rxq_sg_elem *sg_elem;
struct ionic_rxq_desc *desc;
+ unsigned int remain_len;
+ unsigned int seg_len;
unsigned int nfrags;
bool ring_doorbell;
unsigned int i, j;
@@ -346,6 +354,7 @@ void ionic_rx_fill(struct ionic_queue *q)
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
+ remain_len = len;
desc_info = q->head;
desc = desc_info->desc;
sg_desc = desc_info->sg_desc;
@@ -369,7 +378,9 @@ void ionic_rx_fill(struct ionic_queue *q)
return;
}
desc->addr = cpu_to_le64(page_info->dma_addr);
- desc->len = cpu_to_le16(PAGE_SIZE);
+ seg_len = min_t(unsigned int, PAGE_SIZE, len);
+ desc->len = cpu_to_le16(seg_len);
+ remain_len -= seg_len;
page_info++;
/* fill sg descriptors - pages[1..n] */
@@ -385,7 +396,9 @@ void ionic_rx_fill(struct ionic_queue *q)
return;
}
sg_elem->addr = cpu_to_le64(page_info->dma_addr);
- sg_elem->len = cpu_to_le16(PAGE_SIZE);
+ seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
+ sg_elem->len = cpu_to_le16(seg_len);
+ remain_len -= seg_len;
page_info++;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index c692a41e4548..8067ea04d455 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -49,7 +49,7 @@ static int netxen_nic_open(struct net_device *netdev);
static int netxen_nic_close(struct net_device *netdev);
static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
struct net_device *);
-static void netxen_tx_timeout(struct net_device *netdev);
+static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static void netxen_tx_timeout_task(struct work_struct *work);
static void netxen_fw_poll_work(struct work_struct *work);
static void netxen_schedule_work(struct netxen_adapter *adapter,
@@ -2222,7 +2222,7 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
netxen_advert_link_change(adapter, linkup);
}
-static void netxen_tx_timeout(struct net_device *netdev)
+static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 89fe091c958d..fa41bf08a589 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -253,7 +253,8 @@ enum qed_resources {
QED_VLAN,
QED_RDMA_CNQ_RAM,
QED_ILT,
- QED_LL2_QUEUE,
+ QED_LL2_RAM_QUEUE,
+ QED_LL2_CTX_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_BDQ,
@@ -461,6 +462,8 @@ struct qed_fw_data {
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
u32 init_ops_size;
};
@@ -531,6 +534,23 @@ struct qed_nvm_image_info {
bool valid;
};
+enum qed_hsi_def_type {
+ QED_HSI_DEF_MAX_NUM_VFS,
+ QED_HSI_DEF_MAX_NUM_L2_QUEUES,
+ QED_HSI_DEF_MAX_NUM_PORTS,
+ QED_HSI_DEF_MAX_SB_PER_PATH,
+ QED_HSI_DEF_MAX_NUM_PFS,
+ QED_HSI_DEF_MAX_NUM_VPORTS,
+ QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
+ QED_HSI_DEF_MAX_QM_TX_QUEUES,
+ QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
+ QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
+ QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
+ QED_HSI_DEF_MAX_PBF_CMD_LINES,
+ QED_HSI_DEF_MAX_BTB_BLOCKS,
+ QED_NUM_HSI_DEFS
+};
+
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
@@ -646,6 +666,7 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
void *dbg_user_info;
+ struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
/* PWM region specific data */
u16 wid_count;
@@ -668,6 +689,7 @@ struct qed_hwfn {
/* Nvm images number and attributes */
struct qed_nvm_image_info nvm_info;
+ struct phys_mem_desc *fw_overlay_mem;
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
@@ -796,8 +818,8 @@ struct qed_dev {
u8 cache_shift;
/* Init */
- const struct iro *iro_arr;
-#define IRO (p_hwfn->cdev->iro_arr)
+ const u32 *iro_arr;
+#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
/* HW functions */
u8 num_hwfns;
@@ -856,6 +878,8 @@ struct qed_dev {
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
+ struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
@@ -868,16 +892,35 @@ struct qed_dev {
bool iwarp_cmt;
};
-#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
- : MAX_NUM_VFS_K2)
-#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
- : MAX_NUM_L2_QUEUES_K2)
-#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
- : MAX_NUM_PORTS_K2)
-#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
- : MAX_SB_PER_PATH_K2)
-#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
- : MAX_NUM_PFS_K2)
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
+
+#define NUM_OF_VFS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
+#define NUM_OF_L2_QUEUES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
+#define NUM_OF_PORTS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
+#define NUM_OF_SBS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
+#define NUM_OF_ENG_PFS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
+#define NUM_OF_VPORTS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
+#define NUM_OF_RSS_ENGINES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
+#define NUM_OF_QM_TX_QUEUES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
+#define NUM_OF_PXP_ILT_RECORDS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
+#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
+#define NUM_OF_QM_GLOBAL_RLS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
+#define NUM_OF_PBF_CMD_LINES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
+#define NUM_OF_BTB_BLOCKS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
+
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 8e1bdf58b9e7..fbfff2b1dc93 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -50,12 +50,6 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
-/* Max number of connection types in HW (DQ/CDU etc.) */
-#define MAX_CONN_TYPES PROTOCOLID_COMMON
-#define NUM_TASK_TYPES 2
-#define NUM_TASK_PF_SEGMENTS 4
-#define NUM_TASK_VF_SEGMENTS 1
-
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
@@ -123,126 +117,6 @@ struct src_ent {
/* Alignment is inherent to the type1_task_context structure */
#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
-/* PF per protocl configuration object */
-#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
-#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
-
-struct qed_tid_seg {
- u32 count;
- u8 type;
- bool has_fl_mem;
-};
-
-struct qed_conn_type_cfg {
- u32 cid_count;
- u32 cids_per_vf;
- struct qed_tid_seg tid_seg[TASK_SEGMENTS];
-};
-
-/* ILT Client configuration, Per connection type (protocol) resources. */
-#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
-#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
-#define CDUC_BLK (0)
-#define SRQ_BLK (0)
-#define CDUT_SEG_BLK(n) (1 + (u8)(n))
-#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
-
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_MAX
-};
-
-struct ilt_cfg_pair {
- u32 reg;
- u32 val;
-};
-
-struct qed_ilt_cli_blk {
- u32 total_size; /* 0 means not active */
- u32 real_size_in_page;
- u32 start_line;
- u32 dynamic_line_cnt;
-};
-
-struct qed_ilt_client_cfg {
- bool active;
-
- /* ILT boundaries */
- struct ilt_cfg_pair first;
- struct ilt_cfg_pair last;
- struct ilt_cfg_pair p_size;
-
- /* ILT client blocks for PF */
- struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
- u32 pf_total_lines;
-
- /* ILT client blocks for VFs */
- struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
- u32 vf_total_lines;
-};
-
-/* Per Path -
- * ILT shadow table
- * Protocol acquired CID lists
- * PF start line in ILT
- */
-struct qed_dma_mem {
- dma_addr_t p_phys;
- void *p_virt;
- size_t size;
-};
-
-struct qed_cid_acquired_map {
- u32 start_cid;
- u32 max_count;
- unsigned long *cid_map;
-};
-
-struct qed_cxt_mngr {
- /* Per protocl configuration */
- struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
-
- /* computed ILT structure */
- struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
-
- /* Task type sizes */
- u32 task_type_size[NUM_TASK_TYPES];
-
- /* total number of VFs for this hwfn -
- * ALL VFs are symmetric in terms of HW resources
- */
- u32 vf_count;
-
- /* Acquired CIDs */
- struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
-
- struct qed_cid_acquired_map
- acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
-
- /* ILT shadow table */
- struct qed_dma_mem *ilt_shadow;
- u32 pf_start_line;
-
- /* Mutex for a dynamic ILT allocation */
- struct mutex mutex;
-
- /* SRC T2 */
- struct qed_dma_mem *t2;
- u32 t2_num_pages;
- u64 first_free;
- u64 last_free;
-
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
- /* Maximal number of L2 steering filters */
- u32 arfs_count;
-};
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
@@ -880,30 +754,60 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
{
- struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
u32 i;
- if (!p_mngr->t2)
+ if (!p_t2 || !p_t2->dma_mem)
return;
- for (i = 0; i < p_mngr->t2_num_pages; i++)
- if (p_mngr->t2[i].p_virt)
+ for (i = 0; i < p_t2->num_pages; i++)
+ if (p_t2->dma_mem[i].virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_mngr->t2[i].size,
- p_mngr->t2[i].p_virt,
- p_mngr->t2[i].p_phys);
+ p_t2->dma_mem[i].size,
+ p_t2->dma_mem[i].virt_addr,
+ p_t2->dma_mem[i].phys_addr);
+
+ kfree(p_t2->dma_mem);
+ p_t2->dma_mem = NULL;
+}
+
+static int
+qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
+ struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
+{
+ void **p_virt;
+ u32 size, i;
+
+ if (!p_t2 || !p_t2->dma_mem)
+ return -EINVAL;
- kfree(p_mngr->t2);
- p_mngr->t2 = NULL;
+ for (i = 0; i < p_t2->num_pages; i++) {
+ size = min_t(u32, total_size, page_size);
+ p_virt = &p_t2->dma_mem[i].virt_addr;
+
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_t2->dma_mem[i].phys_addr,
+ GFP_KERNEL);
+ if (!p_t2->dma_mem[i].virt_addr)
+ return -ENOMEM;
+
+ memset(*p_virt, 0, size);
+ p_t2->dma_mem[i].size = size;
+ total_size -= size;
+ }
+
+ return 0;
}
static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_num, total_size, ent_per_page, psz, i;
+ struct phys_mem_desc *p_t2_last_page;
struct qed_ilt_client_cfg *p_src;
struct qed_src_iids src_iids;
- struct qed_dma_mem *p_t2;
+ struct qed_src_t2 *p_t2;
int rc;
memset(&src_iids, 0, sizeof(src_iids));
@@ -921,49 +825,39 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
/* use the same page size as the SRC ILT client */
psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
- p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+ p_t2 = &p_mngr->src_t2;
+ p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
- GFP_KERNEL);
- if (!p_mngr->t2) {
+ p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!p_t2->dma_mem) {
+ DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
rc = -ENOMEM;
goto t2_fail;
}
- /* allocate t2 pages */
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
- u32 size = min_t(u32, total_size, psz);
- void **p_virt = &p_mngr->t2[i].p_virt;
-
- *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
- &p_mngr->t2[i].p_phys,
- GFP_KERNEL);
- if (!p_mngr->t2[i].p_virt) {
- rc = -ENOMEM;
- goto t2_fail;
- }
- p_mngr->t2[i].size = size;
- total_size -= size;
- }
+ rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
+ if (rc)
+ goto t2_fail;
/* Set the t2 pointers */
/* entries per page - must be a power of two */
ent_per_page = psz / sizeof(struct src_ent);
- p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
+ p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
- p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
- p_mngr->last_free = (u64) p_t2->p_phys +
+ p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
+ p_t2->last_free = (u64)p_t2_last_page->phys_addr +
((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ for (i = 0; i < p_t2->num_pages; i++) {
u32 ent_num = min_t(u32,
ent_per_page,
conn_num);
- struct src_ent *entries = p_mngr->t2[i].p_virt;
- u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
+ struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
+ u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
u32 j;
for (j = 0; j < ent_num - 1; j++) {
@@ -971,8 +865,8 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
entries[j].next = cpu_to_be64(val);
}
- if (i < p_mngr->t2_num_pages - 1)
- val = (u64) p_mngr->t2[i + 1].p_phys;
+ if (i < p_t2->num_pages - 1)
+ val = (u64)p_t2->dma_mem[i + 1].phys_addr;
else
val = 0;
entries[j].next = cpu_to_be64(val);
@@ -988,7 +882,7 @@ t2_fail:
}
#define for_each_ilt_valid_client(pos, clients) \
- for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
if (!clients[pos].active) { \
continue; \
} else \
@@ -1014,13 +908,13 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
ilt_size = qed_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
- struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+ struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
- if (p_dma->p_virt)
+ if (p_dma->virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_dma->size, p_dma->p_virt,
- p_dma->p_phys);
- p_dma->p_virt = NULL;
+ p_dma->size, p_dma->virt_addr,
+ p_dma->phys_addr);
+ p_dma->virt_addr = NULL;
}
kfree(p_mngr->ilt_shadow);
}
@@ -1030,7 +924,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
enum ilt_clients ilt_client,
u32 start_line_offset)
{
- struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
u32 lines, line, sz_left, lines_to_skip = 0;
/* Special handling for RoCE that supports dynamic allocation */
@@ -1059,8 +953,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
if (!p_virt)
return -ENOMEM;
- ilt_shadow[line].p_phys = p_phys;
- ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].phys_addr = p_phys;
+ ilt_shadow[line].virt_addr = p_virt;
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -1083,7 +977,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
int rc;
size = qed_cxt_ilt_shadow_size(clients);
- p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+ p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
GFP_KERNEL);
if (!p_mngr->ilt_shadow) {
rc = -ENOMEM;
@@ -1092,7 +986,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Allocated 0x%x bytes for ilt shadow\n",
- (u32)(size * sizeof(struct qed_dma_mem)));
+ (u32)(size * sizeof(struct phys_mem_desc)));
for_each_ilt_valid_client(i, clients) {
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
@@ -1238,15 +1132,20 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 64K */
- for (i = 0; i < ILT_CLI_MAX; i++)
+ for (i = 0; i < MAX_ILT_CLIENTS; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
+
/* Initialize task sizes */
p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
- if (p_hwfn->cdev->p_iov_info)
+ if (p_hwfn->cdev->p_iov_info) {
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+ p_mngr->first_vf_in_pf =
+ p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ }
/* Initialize the dynamic ILT allocation mutex */
mutex_init(&p_mngr->mutex);
@@ -1522,7 +1421,6 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
- params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
@@ -1674,7 +1572,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *clients;
struct qed_cxt_mngr *p_mngr;
- struct qed_dma_mem *p_shdw;
+ struct phys_mem_desc *p_shdw;
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
@@ -1699,15 +1597,15 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
/** p_virt could be NULL incase of dynamic
* allocation
*/
- if (p_shdw[line].p_virt) {
+ if (p_shdw[line].virt_addr) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
- (p_shdw[line].p_phys >> 12));
+ (p_shdw[line].phys_addr >> 12));
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
rt_offst, line, i,
- (u64)(p_shdw[line].p_phys >> 12));
+ (u64)(p_shdw[line].phys_addr >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
@@ -2050,10 +1948,10 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
line = p_info->iid / cxts_per_p;
/* Make sure context is allocated (dynamic allocation) */
- if (!p_mngr->ilt_shadow[line].p_virt)
+ if (!p_mngr->ilt_shadow[line].virt_addr)
return -EINVAL;
- p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+ p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
p_info->iid % cxts_per_p * conn_cxt_size;
DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
@@ -2234,7 +2132,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < total_lines; i++) {
shadow_line = i + p_fl_seg->start_line -
p_hwfn->p_cxt_mngr->pf_start_line;
- p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
}
p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
p_fl_seg->real_size_in_page;
@@ -2296,7 +2194,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
- if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
goto out0;
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -2334,8 +2232,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
}
}
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
p_blk->real_size_in_page;
@@ -2345,9 +2243,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
ilt_hw_entry = 0;
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
- SET_FIELD(ilt_hw_entry,
- ILT_ENTRY_PHY_ADDR,
- (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
+ >> 12));
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
@@ -2434,16 +2332,16 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
}
for (i = shadow_start_line; i < shadow_end_line; i++) {
- if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
continue;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
/* compute absolute offset */
@@ -2547,8 +2445,76 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
ilt_idx = tid / num_tids_per_block + p_seg->start_line -
p_mngr->pf_start_line;
- *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
(tid % num_tids_per_block) * tid_size;
return 0;
}
+
+static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
+{
+ if (p_blk->real_size_in_page == 0)
+ return 0;
+
+ return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+}
+
+u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 pages = 0, i;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 758a8b4c0de8..c4e815f6cabd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -242,4 +242,134 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_FL_MEM 1
int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
u32 tid, u8 ctx_type, void **task_ctx);
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct qed_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
+struct qed_conn_type_cfg {
+ u32 cid_count;
+ u32 cids_per_vf;
+ struct qed_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct qed_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+ u32 dynamic_line_offset;
+ u32 dynamic_line_cnt;
+};
+
+struct qed_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
+};
+
+struct qed_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct qed_src_t2 {
+ struct phys_mem_desc *dma_mem;
+ u32 num_pages;
+ u64 first_free;
+ u64 last_free;
+};
+
+struct qed_cxt_mngr {
+ /* Per protocl configuration */
+ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS];
+
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+ u32 first_vf_in_pf;
+
+ /* Acquired CIDs */
+ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ struct qed_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
+
+ /* ILT shadow table */
+ struct phys_mem_desc *ilt_shadow;
+ u32 ilt_shadow_size;
+ u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ struct mutex mutex;
+
+ /* SRC T2 */
+ struct qed_src_t2 src_t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ u8 task_type_id;
+ u16 task_ctx_size;
+ u16 conn_ctx_size;
+};
+
+u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 859caa6c1a1f..f4eebaabb6d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7,6 +7,7 @@
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include "qed.h"
+#include "qed_cxt.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
@@ -22,27 +23,28 @@ enum mem_groups {
MEM_GROUP_BRB_RAM,
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
+ MEM_GROUP_SDM_MEM,
+ MEM_GROUP_PBUF,
MEM_GROUP_IOR,
+ MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
- MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
+ MEM_GROUP_CAU_MEM_EXT,
MEM_GROUP_PXP_ILT,
- MEM_GROUP_TM_MEM,
- MEM_GROUP_SDM_MEM,
- MEM_GROUP_PBUF,
- MEM_GROUP_RAM,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
- MEM_GROUP_RDIF_CTX,
- MEM_GROUP_TDIF_CTX,
- MEM_GROUP_CFC_MEM,
MEM_GROUP_IGU_MEM,
MEM_GROUP_IGU_MSIX,
MEM_GROUP_CAU_SB,
MEM_GROUP_BMB_RAM,
MEM_GROUP_BMB_MEM,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_TASK_CFC_MEM,
MEM_GROUPS_NUM
};
@@ -56,27 +58,28 @@ static const char * const s_mem_group_names[] = {
"BRB_RAM",
"BRB_MEM",
"PRS_MEM",
+ "SDM_MEM",
+ "PBUF",
"IOR",
+ "RAM",
"BTB_RAM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CFC_MEM",
"CONN_CFC_MEM",
- "TASK_CFC_MEM",
"CAU_PI",
"CAU_MEM",
+ "CAU_MEM_EXT",
"PXP_ILT",
- "TM_MEM",
- "SDM_MEM",
- "PBUF",
- "RAM",
"MULD_MEM",
"BTB_MEM",
- "RDIF_CTX",
- "TDIF_CTX",
- "CFC_MEM",
"IGU_MEM",
"IGU_MSIX",
"CAU_SB",
"BMB_RAM",
"BMB_MEM",
+ "TM_MEM",
+ "TASK_CFC_MEM",
};
/* Idle check conditions */
@@ -170,35 +173,66 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond13,
};
+#define NUM_PHYS_BLOCKS 84
+
+#define NUM_DBG_RESET_REGS 8
+
/******************************* Data Types **********************************/
-enum platform_ids {
- PLATFORM_ASIC,
+enum hw_types {
+ HW_TYPE_ASIC,
PLATFORM_RESERVED,
PLATFORM_RESERVED2,
PLATFORM_RESERVED3,
- MAX_PLATFORM_IDS
+ PLATFORM_RESERVED4,
+ MAX_HW_TYPES
+};
+
+/* CM context types */
+enum cm_ctx_types {
+ CM_CTX_CONN_AG,
+ CM_CTX_CONN_ST,
+ CM_CTX_TASK_AG,
+ CM_CTX_TASK_ST,
+ NUM_CM_CTX_TYPES
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */
+ DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */
+ DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */
+ DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */
+ DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */
+ DBG_BUS_NUM_FRAME_MODES
};
/* Chip constant definitions */
struct chip_defs {
const char *name;
+ u32 num_ilt_pages;
};
-/* Platform constant definitions */
-struct platform_defs {
+/* HW type constant definitions */
+struct hw_type_defs {
const char *name;
u32 delay_factor;
u32 dmae_thresh;
u32 log_thresh;
};
+/* RBC reset definitions */
+struct rbc_reset_defs {
+ u32 reset_reg_addr;
+ u32 reset_val[MAX_CHIP_IDS];
+};
+
/* Storm constant definitions.
* Addresses are in bytes, sizes are in quad-regs.
*/
struct storm_defs {
char letter;
- enum block_id block_id;
+ enum block_id sem_block_id;
enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
bool has_vfc;
u32 sem_fast_mem_addr;
@@ -207,47 +241,26 @@ struct storm_defs {
u32 sem_slow_mode_addr;
u32 sem_slow_mode1_conf_addr;
u32 sem_sync_dbg_empty_addr;
- u32 sem_slow_dbg_empty_addr;
+ u32 sem_gpre_vect_addr;
u32 cm_ctx_wr_addr;
- u32 cm_conn_ag_ctx_lid_size;
- u32 cm_conn_ag_ctx_rd_addr;
- u32 cm_conn_st_ctx_lid_size;
- u32 cm_conn_st_ctx_rd_addr;
- u32 cm_task_ag_ctx_lid_size;
- u32 cm_task_ag_ctx_rd_addr;
- u32 cm_task_st_ctx_lid_size;
- u32 cm_task_st_ctx_rd_addr;
+ u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
+ u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
};
-/* Block constant definitions */
-struct block_defs {
- const char *name;
- bool exists[MAX_CHIP_IDS];
- bool associated_to_storm;
-
- /* Valid only if associated_to_storm is true */
- u32 storm_id;
- enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
- u32 dbg_select_addr;
- u32 dbg_enable_addr;
- u32 dbg_shift_addr;
- u32 dbg_force_valid_addr;
- u32 dbg_force_frame_addr;
- bool has_reset_bit;
-
- /* If true, block is taken out of reset before dump */
- bool unreset;
- enum dbg_reset_regs reset_reg;
-
- /* Bit offset in reset register */
- u8 reset_bit_offset;
+/* Debug Bus Constraint operation constant definitions */
+struct dbg_bus_constraint_op_defs {
+ u8 hw_op_val;
+ bool is_cyclic;
};
-/* Reset register definitions */
-struct reset_reg_defs {
- u32 addr;
+/* Storm Mode definitions */
+struct storm_mode_defs {
+ const char *name;
+ bool is_fast_dbg;
+ u8 id_in_hw;
+ u32 src_disable_reg_addr;
+ u32 src_enable_val;
bool exists[MAX_CHIP_IDS];
- u32 unreset_val[MAX_CHIP_IDS];
};
struct grc_param_defs {
@@ -257,7 +270,7 @@ struct grc_param_defs {
bool is_preset;
bool is_persistent;
u32 exclude_all_preset_val;
- u32 crash_preset_val;
+ u32 crash_preset_val[MAX_CHIP_IDS];
};
/* Address is in 128b units. Width is in bits. */
@@ -314,15 +327,7 @@ struct split_type_defs {
/******************************** Constants **********************************/
-#define MAX_LCIDS 320
-#define MAX_LTIDS 320
-
-#define NUM_IOR_SETS 2
-#define IORS_PER_SET 176
-#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
-
#define BYTES_IN_DWORD sizeof(u32)
-
/* In the macros below, size and offset are specified in bits */
#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
@@ -348,20 +353,17 @@ struct split_type_defs {
qed_wr(dev, ptt, addr, (arr)[i]); \
} while (0)
-#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
- do { \
- for (i = 0; i < (arr_size); i++) \
- (arr)[i] = qed_rd(dev, ptt, addr); \
- } while (0)
-
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-/* Extra lines include a signature line + optional latency events line */
-#define NUM_EXTRA_DBG_LINES(block_desc) \
- (1 + ((block_desc)->has_latency_events ? 1 : 0))
-#define NUM_DBG_LINES(block_desc) \
- ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
+/* extra lines include a signature line + optional latency events line */
+#define NUM_EXTRA_DBG_LINES(block) \
+ (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
+#define NUM_DBG_LINES(block) \
+ ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
+
+#define USE_DMAE true
+#define PROTECT_WIDE_BUS true
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
@@ -380,6 +382,9 @@ struct split_type_defs {
#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+#define PAGE_MEM_DESC_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
+
#define IDLE_CHK_MAX_ENTRIES_SIZE 32
/* The sizes and offsets below are specified in bits */
@@ -425,7 +430,9 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 8
+#define NUM_COMMON_GLOBAL_PARAMS 9
+
+#define MAX_RECURSION_DEPTH 10
#define FW_IMG_MAIN 1
@@ -449,1054 +456,121 @@ struct split_type_defs {
(MCP_REG_SCRATCH + \
offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+#define MAX_SW_PLTAFORM_STR_SIZE 64
+
#define EMPTY_FW_VERSION_STR "???_???_???_???"
#define EMPTY_FW_IMAGE_STR "???????????????"
/***************************** Constant Arrays *******************************/
-struct dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
-/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
-
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- {"bb"},
- {"ah"},
- {"reserved"},
+ {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
+ {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
};
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCT}, true,
- TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
- TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- TCM_REG_CTX_RBC_ACCS,
- 4, TCM_REG_AGG_CON_CTX,
- 16, TCM_REG_SM_CON_CTX,
- 2, TCM_REG_AGG_TASK_CTX,
- 4, TCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ true,
+ TSEM_REG_FAST_MEMORY,
+ TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
+ TCM_REG_CTX_RBC_ACCS,
+ {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
+ TCM_REG_SM_TASK_CTX},
+ {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
+ },
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
- DBG_BUS_CLIENT_RBCM}, false,
- MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
- MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- MCM_REG_CTX_RBC_ACCS,
- 1, MCM_REG_AGG_CON_CTX,
- 10, MCM_REG_SM_CON_CTX,
- 2, MCM_REG_AGG_TASK_CTX,
- 7, MCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ false,
+ MSEM_REG_FAST_MEMORY,
+ MSEM_REG_DBG_FRAME_MODE_BB_K2,
+ MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ MSEM_REG_SLOW_DBG_MODE_BB_K2,
+ MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_SYNC_DBG_EMPTY,
+ MSEM_REG_DBG_GPRE_VECT,
+ MCM_REG_CTX_RBC_ACCS,
+ {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
+ MCM_REG_SM_TASK_CTX },
+ {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
+ },
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCU}, false,
- USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
- USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
- UCM_REG_CTX_RBC_ACCS,
- 2, UCM_REG_AGG_CON_CTX,
- 13, UCM_REG_SM_CON_CTX,
- 3, UCM_REG_AGG_TASK_CTX,
- 3, UCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ false,
+ USEM_REG_FAST_MEMORY,
+ USEM_REG_DBG_FRAME_MODE_BB_K2,
+ USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ USEM_REG_SLOW_DBG_MODE_BB_K2,
+ USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_SYNC_DBG_EMPTY,
+ USEM_REG_DBG_GPRE_VECT,
+ UCM_REG_CTX_RBC_ACCS,
+ {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
+ UCM_REG_SM_TASK_CTX},
+ {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
+ },
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCX}, false,
- XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
- XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- XCM_REG_CTX_RBC_ACCS,
- 9, XCM_REG_AGG_CON_CTX,
- 15, XCM_REG_SM_CON_CTX,
- 0, 0,
- 0, 0},
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ false,
+ XSEM_REG_FAST_MEMORY,
+ XSEM_REG_DBG_FRAME_MODE_BB_K2,
+ XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ XSEM_REG_SLOW_DBG_MODE_BB_K2,
+ XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_SYNC_DBG_EMPTY,
+ XSEM_REG_DBG_GPRE_VECT,
+ XCM_REG_CTX_RBC_ACCS,
+ {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
+ {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */
+ },
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
- DBG_BUS_CLIENT_RBCY}, false,
- YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
- YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- YCM_REG_CTX_RBC_ACCS,
- 2, YCM_REG_AGG_CON_CTX,
- 3, YCM_REG_SM_CON_CTX,
- 2, YCM_REG_AGG_TASK_CTX,
- 12, YCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ false,
+ YSEM_REG_FAST_MEMORY,
+ YSEM_REG_DBG_FRAME_MODE_BB_K2,
+ YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ YSEM_REG_SLOW_DBG_MODE_BB_K2,
+ YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_SYNC_DBG_EMPTY,
+ YSEM_REG_DBG_GPRE_VECT,
+ YCM_REG_CTX_RBC_ACCS,
+ {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
+ YCM_REG_SM_TASK_CTX},
+ {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
+ },
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCS}, true,
- PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
- PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- PCM_REG_CTX_RBC_ACCS,
- 0, 0,
- 10, PCM_REG_SM_CON_CTX,
- 0, 0,
- 0, 0}
-};
-
-/* Block definitions array */
-
-static struct block_defs block_grc_defs = {
- "grc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
- GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
- GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
- GRC_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_UA, 1
-};
-
-static struct block_defs block_miscs_defs = {
- "miscs", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_misc_defs = {
- "misc", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_dbu_defs = {
- "dbu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_pglue_b_defs = {
- "pglue_b",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
- PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
- PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
- PGLUE_B_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 1
-};
-
-static struct block_defs block_cnig_defs = {
- "cnig",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
- DBG_BUS_CLIENT_RBCW},
- CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
- CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
- CNIG_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 0
-};
-
-static struct block_defs block_cpmu_defs = {
- "cpmu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 8
-};
-
-static struct block_defs block_ncsi_defs = {
- "ncsi",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
- NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
- NCSI_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 5
-};
-
-static struct block_defs block_opte_defs = {
- "opte", {true, true, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 4
-};
-
-static struct block_defs block_bmb_defs = {
- "bmb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
- BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
- BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
- BMB_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 7
-};
-
-static struct block_defs block_pcie_defs = {
- "pcie",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2_E5,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
- PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_mcp_defs = {
- "mcp", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_mcp2_defs = {
- "mcp2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
- MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
- MCP2_REG_DBG_FORCE_FRAME,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_pswhst_defs = {
- "pswhst",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
- PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
- PSWHST_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 0
-};
-
-static struct block_defs block_pswhst2_defs = {
- "pswhst2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
- PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
- PSWHST2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 0
-};
-
-static struct block_defs block_pswrd_defs = {
- "pswrd",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
- PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
- PSWRD_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 2
-};
-
-static struct block_defs block_pswrd2_defs = {
- "pswrd2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
- PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
- PSWRD2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 2
-};
-
-static struct block_defs block_pswwr_defs = {
- "pswwr",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
- PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
- PSWWR_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 3
-};
-
-static struct block_defs block_pswwr2_defs = {
- "pswwr2", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISC_PL_HV, 3
-};
-
-static struct block_defs block_pswrq_defs = {
- "pswrq",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
- PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
- PSWRQ_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 1
-};
-
-static struct block_defs block_pswrq2_defs = {
- "pswrq2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
- PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
- PSWRQ2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 1
-};
-
-static struct block_defs block_pglcs_defs = {
- "pglcs",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
- PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
- PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 2
-};
-
-static struct block_defs block_ptu_defs = {
- "ptu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
- PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
- PTU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
-};
-
-static struct block_defs block_dmae_defs = {
- "dmae",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
- DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
- DMAE_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
-};
-
-static struct block_defs block_tcm_defs = {
- "tcm",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
- TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
- TCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
-};
-
-static struct block_defs block_mcm_defs = {
- "mcm",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
- MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
- MCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
-};
-
-static struct block_defs block_ucm_defs = {
- "ucm",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
- UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
- UCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
-};
-
-static struct block_defs block_xcm_defs = {
- "xcm",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
- XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
- XCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
-};
-
-static struct block_defs block_ycm_defs = {
- "ycm",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
- YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
- YCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
-};
-
-static struct block_defs block_pcm_defs = {
- "pcm",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
- PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
- PCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
-};
-
-static struct block_defs block_qm_defs = {
- "qm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
- QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
- QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
- QM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
-};
-
-static struct block_defs block_tm_defs = {
- "tm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
- TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
- TM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
-};
-
-static struct block_defs block_dorq_defs = {
- "dorq",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
- DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
- DORQ_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
-};
-
-static struct block_defs block_brb_defs = {
- "brb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
- BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
- BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
- BRB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
-};
-
-static struct block_defs block_src_defs = {
- "src",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
- SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
- SRC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
-};
-
-static struct block_defs block_prs_defs = {
- "prs",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
- PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
- PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
- PRS_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
-};
-
-static struct block_defs block_tsdm_defs = {
- "tsdm",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
- TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
- TSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
-};
-
-static struct block_defs block_msdm_defs = {
- "msdm",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
- MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
- MSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
-};
-
-static struct block_defs block_usdm_defs = {
- "usdm",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
- USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
- USDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
-};
-
-static struct block_defs block_xsdm_defs = {
- "xsdm",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
- XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
- XSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
-};
-
-static struct block_defs block_ysdm_defs = {
- "ysdm",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
- YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
- YSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
-};
-
-static struct block_defs block_psdm_defs = {
- "psdm",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
- PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
- PSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
-};
-
-static struct block_defs block_tsem_defs = {
- "tsem",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
- TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
- TSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
-};
-
-static struct block_defs block_msem_defs = {
- "msem",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
- MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
- MSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
-};
-
-static struct block_defs block_usem_defs = {
- "usem",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
- USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
- USEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
-};
-
-static struct block_defs block_xsem_defs = {
- "xsem",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
- XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
- XSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
-};
-
-static struct block_defs block_ysem_defs = {
- "ysem",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
- YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
- YSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
-};
-
-static struct block_defs block_psem_defs = {
- "psem",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
- PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
- PSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
-};
-
-static struct block_defs block_rss_defs = {
- "rss",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
- RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
- RSS_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
-};
-
-static struct block_defs block_tmld_defs = {
- "tmld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
- TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
- TMLD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
-};
-
-static struct block_defs block_muld_defs = {
- "muld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
- MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
- MULD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
-};
-
-static struct block_defs block_yuld_defs = {
- "yuld",
- {true, true, false}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- MAX_DBG_BUS_CLIENTS},
- YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
- YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
- YULD_REG_DBG_FORCE_FRAME_BB_K2,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 15
-};
-
-static struct block_defs block_xyld_defs = {
- "xyld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
- XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
- XYLD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
-};
-
-static struct block_defs block_ptld_defs = {
- "ptld",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
- PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
- PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
- PTLD_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 28
-};
-
-static struct block_defs block_ypld_defs = {
- "ypld",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
- YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
- YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
- YPLD_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 27
-};
-
-static struct block_defs block_prm_defs = {
- "prm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
- PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
- PRM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
-};
-
-static struct block_defs block_pbf_pb1_defs = {
- "pbf_pb1",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
- PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
- PBF_PB1_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 11
-};
-
-static struct block_defs block_pbf_pb2_defs = {
- "pbf_pb2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
- PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
- PBF_PB2_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 12
-};
-
-static struct block_defs block_rpb_defs = {
- "rpb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
- RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
- RPB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
-};
-
-static struct block_defs block_btb_defs = {
- "btb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
- BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
- BTB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
-};
-
-static struct block_defs block_pbf_defs = {
- "pbf",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
- PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
- PBF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
-};
-
-static struct block_defs block_rdif_defs = {
- "rdif",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
- RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
- RDIF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
-};
-
-static struct block_defs block_tdif_defs = {
- "tdif",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
- TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
- TDIF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
-};
-
-static struct block_defs block_cdu_defs = {
- "cdu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
- CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
- CDU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
-};
-
-static struct block_defs block_ccfc_defs = {
- "ccfc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
- CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
- CCFC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
-};
-
-static struct block_defs block_tcfc_defs = {
- "tcfc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
- TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
- TCFC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
-};
-
-static struct block_defs block_igu_defs = {
- "igu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
- IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
- IGU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
-};
-
-static struct block_defs block_cau_defs = {
- "cau",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
- CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
- CAU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
-};
-
-static struct block_defs block_rgfs_defs = {
- "rgfs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
-};
-
-static struct block_defs block_rgsrc_defs = {
- "rgsrc",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
- RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
- RGSRC_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 30
-};
-
-static struct block_defs block_tgfs_defs = {
- "tgfs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
-};
-
-static struct block_defs block_tgsrc_defs = {
- "tgsrc",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
- TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
- TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
- TGSRC_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 31
-};
-
-static struct block_defs block_umac_defs = {
- "umac",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
- DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
- UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
- UMAC_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 6
-};
-
-static struct block_defs block_xmac_defs = {
- "xmac", {true, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_dbg_defs = {
- "dbg", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
-};
-
-static struct block_defs block_nig_defs = {
- "nig",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
- NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
- NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
- NIG_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
-};
-
-static struct block_defs block_wol_defs = {
- "wol",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
- WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
- WOL_REG_DBG_FORCE_FRAME_K2_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
-};
-
-static struct block_defs block_bmbn_defs = {
- "bmbn",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
- DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
- BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
- BMBN_REG_DBG_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ipc_defs = {
- "ipc", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 8
-};
-
-static struct block_defs block_nwm_defs = {
- "nwm",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
- NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
- NWM_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
-};
-
-static struct block_defs block_nws_defs = {
- "nws",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
- NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
- NWS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 12
-};
-
-static struct block_defs block_ms_defs = {
- "ms",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
- MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
- MS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 13
-};
-
-static struct block_defs block_phy_pcie_defs = {
- "phy_pcie",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2_E5,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
- PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_led_defs = {
- "led", {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 14
-};
-
-static struct block_defs block_avs_wrap_defs = {
- "avs_wrap", {false, true, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 11
-};
-
-static struct block_defs block_pxpreqbus_defs = {
- "pxpreqbus", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_bar0_map_defs = {
- "bar0_map", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
- &block_grc_defs,
- &block_miscs_defs,
- &block_misc_defs,
- &block_dbu_defs,
- &block_pglue_b_defs,
- &block_cnig_defs,
- &block_cpmu_defs,
- &block_ncsi_defs,
- &block_opte_defs,
- &block_bmb_defs,
- &block_pcie_defs,
- &block_mcp_defs,
- &block_mcp2_defs,
- &block_pswhst_defs,
- &block_pswhst2_defs,
- &block_pswrd_defs,
- &block_pswrd2_defs,
- &block_pswwr_defs,
- &block_pswwr2_defs,
- &block_pswrq_defs,
- &block_pswrq2_defs,
- &block_pglcs_defs,
- &block_dmae_defs,
- &block_ptu_defs,
- &block_tcm_defs,
- &block_mcm_defs,
- &block_ucm_defs,
- &block_xcm_defs,
- &block_ycm_defs,
- &block_pcm_defs,
- &block_qm_defs,
- &block_tm_defs,
- &block_dorq_defs,
- &block_brb_defs,
- &block_src_defs,
- &block_prs_defs,
- &block_tsdm_defs,
- &block_msdm_defs,
- &block_usdm_defs,
- &block_xsdm_defs,
- &block_ysdm_defs,
- &block_psdm_defs,
- &block_tsem_defs,
- &block_msem_defs,
- &block_usem_defs,
- &block_xsem_defs,
- &block_ysem_defs,
- &block_psem_defs,
- &block_rss_defs,
- &block_tmld_defs,
- &block_muld_defs,
- &block_yuld_defs,
- &block_xyld_defs,
- &block_ptld_defs,
- &block_ypld_defs,
- &block_prm_defs,
- &block_pbf_pb1_defs,
- &block_pbf_pb2_defs,
- &block_rpb_defs,
- &block_btb_defs,
- &block_pbf_defs,
- &block_rdif_defs,
- &block_tdif_defs,
- &block_cdu_defs,
- &block_ccfc_defs,
- &block_tcfc_defs,
- &block_igu_defs,
- &block_cau_defs,
- &block_rgfs_defs,
- &block_rgsrc_defs,
- &block_tgfs_defs,
- &block_tgsrc_defs,
- &block_umac_defs,
- &block_xmac_defs,
- &block_dbg_defs,
- &block_nig_defs,
- &block_wol_defs,
- &block_bmbn_defs,
- &block_ipc_defs,
- &block_nwm_defs,
- &block_nws_defs,
- &block_ms_defs,
- &block_phy_pcie_defs,
- &block_led_defs,
- &block_avs_wrap_defs,
- &block_pxpreqbus_defs,
- &block_misc_aeu_defs,
- &block_bar0_map_defs,
-};
-
-static struct platform_defs s_platform_defs[] = {
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ true,
+ PSEM_REG_FAST_MEMORY,
+ PSEM_REG_DBG_FRAME_MODE_BB_K2,
+ PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ PSEM_REG_SLOW_DBG_MODE_BB_K2,
+ PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_SYNC_DBG_EMPTY,
+ PSEM_REG_DBG_GPRE_VECT,
+ PCM_REG_CTX_RBC_ACCS,
+ {0, PCM_REG_SM_CON_CTX, 0, 0},
+ {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
+ },
+};
+
+static struct hw_type_defs s_hw_type_defs[] = {
+ /* HW_TYPE_ASIC */
{"asic", 1, 256, 32768},
{"reserved", 0, 0, 0},
{"reserved2", 0, 0, 0},
@@ -1505,146 +579,159 @@ static struct platform_defs s_platform_defs[] = {
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PBUF */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_VFC */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
- /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
- {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
+ /* DBG_GRC_PARAM_DUMP_DORQ */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_IGU */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BMB */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ /* DBG_GRC_PARAM_RESERVED1 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_STATIC */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_UNSTALL */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
- MAX_LCIDS, MAX_LCIDS},
+ /* DBG_GRC_PARAM_RESERVED2 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_NUM_LTIDS */
- {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
- MAX_LTIDS, MAX_LTIDS},
+ /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
+ {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
/* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0, 0}, 0, 1, true, false, 0, 0},
+ {{0, 0}, 0, 1, true, false, 0, {0, 0}},
/* DBG_GRC_PARAM_CRASH */
- {{0, 0, 0}, 0, 1, true, false, 0, 0},
+ {{0, 0}, 0, 1, true, false, 0, {0, 0}},
/* DBG_GRC_PARAM_PARITY_SAFE */
- {{0, 0, 0}, 0, 1, false, false, 1, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PHY */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_NO_MCP */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_NO_FW_VER */
- {{0, 0, 0}, 0, 1, false, false, 0, 0}
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_RESERVED3 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
+ {{0, 1}, 0, 1, false, false, 0, {0, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT_CDUC */
+ {{1, 1}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT_CDUT */
+ {{1, 1}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_CAU_EXT */
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
- { "rss_mem_cid", "rss_cid", 0, 32,
- {256, 320, 512} },
+ {"rss_mem_cid", "rss_cid", 0, 32,
+ {256, 320}},
- { "rss_mem_key_msb", "rss_key", 1024, 256,
- {128, 208, 257} },
+ {"rss_mem_key_msb", "rss_key", 1024, 256,
+ {128, 208}},
- { "rss_mem_key_lsb", "rss_key", 2048, 64,
- {128, 208, 257} },
+ {"rss_mem_key_lsb", "rss_key", 2048, 64,
+ {128, 208}},
- { "rss_mem_info", "rss_info", 3072, 16,
- {128, 208, 256} },
+ {"rss_mem_info", "rss_info", 3072, 16,
+ {128, 208}},
- { "rss_mem_ind", "rss_ind", 4096, 16,
- {16384, 26624, 32768} }
+ {"rss_mem_ind", "rss_ind", 4096, 16,
+ {16384, 26624}}
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1655,54 +742,31 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
};
static struct big_ram_defs s_big_ram_defs[] = {
- { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
- BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- MISC_REG_BLOCK_256B_EN, {0, 0, 0},
- {153600, 180224, 282624} },
-
- { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
- BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- MISC_REG_BLOCK_256B_EN, {0, 1, 1},
- {92160, 117760, 168960} },
-
- { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
- BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
- {36864, 36864, 36864} }
-};
-
-static struct reset_reg_defs s_reset_regs_defs[] = {
- /* DBG_RESET_REG_MISCS_PL_UA */
- { MISCS_REG_RESET_PL_UA,
- {true, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV,
- {true, true, true}, {0x0, 0x400, 0x600} },
-
- /* DBG_RESET_REG_MISCS_PL_HV_2 */
- { MISCS_REG_RESET_PL_HV_2_K2_E5,
- {false, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISC_PL_UA */
- { MISC_REG_RESET_PL_UA,
- {true, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISC_PL_HV */
- { MISC_REG_RESET_PL_HV,
- {true, true, true}, {0x0, 0x0, 0x0} },
+ {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+ BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+ MISC_REG_BLOCK_256B_EN, {0, 0},
+ {153600, 180224}},
- /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
- { MISC_REG_RESET_PL_PDA_VMAIN_1,
- {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
+ {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+ BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+ MISC_REG_BLOCK_256B_EN, {0, 1},
+ {92160, 117760}},
- /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
- { MISC_REG_RESET_PL_PDA_VMAIN_2,
- {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
+ {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+ BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+ MISCS_REG_BLOCK_256B_EN, {0, 0},
+ {36864, 36864}}
+};
- /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
- { MISC_REG_RESET_PL_PDA_VAUX,
- {true, true, true}, {0x2, 0x2, 0x2} },
+static struct rbc_reset_defs s_rbc_reset_defs[] = {
+ {MISCS_REG_RESET_PL_HV,
+ {0x0, 0x400}},
+ {MISC_REG_RESET_PL_PDA_VMAIN_1,
+ {0x4404040, 0x4404040}},
+ {MISC_REG_RESET_PL_PDA_VMAIN_2,
+ {0x7, 0x7c00007}},
+ {MISC_REG_RESET_PL_PDA_VAUX,
+ {0x2, 0x2}},
};
static struct phy_defs s_phy_defs[] = {
@@ -1785,9 +849,19 @@ static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
}
}
+/* Sets pointer and size for the specified binary buffer type */
+static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
+ enum bin_dbg_buffer_type buf_type,
+ const u32 *ptr, u32 size)
+{
+ struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
+
+ buf->ptr = (void *)ptr;
+ buf->size = size;
+}
+
/* Initializes debug data for the specified device */
-static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 num_pfs = 0, max_pfs_per_port = 0;
@@ -1812,26 +886,25 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
return DBG_STATUS_UNKNOWN_CHIP;
}
- /* Set platofrm */
- dev_data->platform_id = PLATFORM_ASIC;
+ /* Set HW type */
+ dev_data->hw_type = HW_TYPE_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
/* Set port mode */
- switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
- case 0:
+ switch (p_hwfn->cdev->num_ports_in_engine) {
+ case 1:
dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
break;
- case 1:
+ case 2:
dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
break;
- case 2:
+ case 4:
dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
break;
}
/* Set 100G mode */
- if (dev_data->chip_id == CHIP_BB &&
- qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
+ if (QED_IS_CMT(p_hwfn->cdev))
dev_data->mode_enable[MODE_100G] = 1;
/* Set number of ports */
@@ -1857,14 +930,36 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
-static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
- enum block_id block_id)
+static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ const struct dbg_block *dbg_block;
+
+ dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
+ return dbg_block + block_id;
+}
+
+static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
+ *p_hwfn,
+ enum block_id
+ block_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
- MAX_CHIP_IDS +
- dev_data->chip_id];
+ return (const struct dbg_block_chip *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
+ block_id * MAX_CHIP_IDS + dev_data->chip_id;
+}
+
+static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
+ *p_hwfn,
+ u8 reset_reg_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return (const struct dbg_reset_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
+ reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
}
/* Reads the FW info structure for the specified Storm from the chip,
@@ -1885,8 +980,9 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
* The address is located in the last line of the Storm RAM.
*/
addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
- sizeof(fw_info_location);
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(fw_info_location);
+
dest = (u32 *)&fw_info_location;
for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
@@ -2081,6 +1177,29 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
}
+/* Reads the chip revision from the chip and writes it as a param to the
+ * specified buffer. Returns the dumped size in dwords.
+ */
+static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char param_str[3] = "??";
+
+ if (dev_data->hw_type == HW_TYPE_ASIC) {
+ u32 chip_rev, chip_metal;
+
+ chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+ chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+
+ param_str[0] = 'a' + (u8)chip_rev;
+ param_str[1] = '0' + (u8)chip_metal;
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
+}
+
/* Writes a section header to the specified buffer.
* Returns the dumped size in dwords.
*/
@@ -2104,7 +1223,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
u8 num_params;
/* Dump global params section header */
- num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
+ num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
+ (dev_data->chip_id == CHIP_BB ? 1 : 0);
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "global_params", num_params);
@@ -2112,6 +1232,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
offset += qed_dump_mfw_ver_param(p_hwfn,
p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_chip_revision_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
offset += qed_dump_num_param(dump_buf + offset,
dump, "tools-version", TOOLS_VERSION);
offset += qed_dump_str_param(dump_buf + offset,
@@ -2121,11 +1243,12 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump,
"platform",
- s_platform_defs[dev_data->platform_id].
- name);
- offset +=
- qed_dump_num_param(dump_buf + offset, dump, "pci-func",
- p_hwfn->abs_pf_id);
+ s_hw_type_defs[dev_data->hw_type].name);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "pci-func", p_hwfn->abs_pf_id);
+ if (dev_data->chip_id == CHIP_BB)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "path", QED_PATH_ID(p_hwfn));
return offset;
}
@@ -2156,24 +1279,87 @@ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 i;
+ u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
+ u8 rst_reg_id;
+ u32 blk_id;
/* Read reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++)
- if (s_reset_regs_defs[i].exists[dev_data->chip_id])
- reg_val[i] = qed_rd(p_hwfn,
- p_ptt, s_reset_regs_defs[i].addr);
+ for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
+ const struct dbg_reset_reg *rst_reg;
+ bool rst_reg_removed;
+ u32 rst_reg_addr;
+
+ rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
+ rst_reg_removed = GET_FIELD(rst_reg->data,
+ DBG_RESET_REG_IS_REMOVED);
+ rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
+ DBG_RESET_REG_ADDR));
+
+ if (!rst_reg_removed)
+ reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
+ rst_reg_addr);
+ }
/* Check if blocks are in reset */
- for (i = 0; i < MAX_BLOCK_ID; i++) {
- struct block_defs *block = s_block_defs[i];
+ for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
+ const struct dbg_block_chip *blk;
+ bool has_rst_reg;
+ bool is_removed;
+
+ blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
+ is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
+ has_rst_reg = GET_FIELD(blk->flags,
+ DBG_BLOCK_CHIP_HAS_RESET_REG);
+
+ if (!is_removed && has_rst_reg)
+ dev_data->block_in_reset[blk_id] =
+ !(reg_val[blk->reset_reg_id] &
+ BIT(blk->reset_reg_bit_offset));
+ }
+}
+
+/* is_mode_match recursive function */
+static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
+ u16 *modes_buf_offset, u8 rec_depth)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 *dbg_array;
+ bool arg1, arg2;
+ u8 tree_val;
+
+ if (rec_depth > MAX_RECURSION_DEPTH) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
+ return false;
+ }
- dev_data->block_in_reset[i] = block->has_reset_bit &&
- !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
+ /* Get next element from modes tree buffer */
+ dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ tree_val = dbg_array[(*modes_buf_offset)++];
+
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return !qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ case INIT_MODE_OP_OR:
+ case INIT_MODE_OP_AND:
+ arg1 = qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ arg2 = qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+ arg2) : (arg1 && arg2);
+ default:
+ return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
}
}
+/* Returns true if the mode (specified using modes_buf_offset) is enabled */
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+ return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
+}
+
/* Enable / disable the Debug block */
static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -2185,23 +1371,21 @@ static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
- u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
- struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
+ u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+ const struct dbg_reset_reg *reset_reg;
+ const struct dbg_block_chip *block;
- dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
- old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
- new_reset_reg_val =
- old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
+ block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
+ reset_reg_addr =
+ DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
- qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
- qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
-}
+ old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
+ new_reset_reg_val =
+ old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
-static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum dbg_bus_frame_modes mode)
-{
- qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
+ qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
+ qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
}
/* Enable / disable Debug Bus clients according to the specified mask
@@ -2213,28 +1397,65 @@ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
}
-static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ u8 line_id,
+ u8 enable_mask,
+ u8 right_shift,
+ u8 force_valid_mask, u8 force_frame_mask)
+{
+ const struct dbg_block_chip *block =
+ qed_get_dbg_block_per_chip(p_hwfn, block_id);
+
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
+ line_id);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
+ enable_mask);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
+ right_shift);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
+ force_valid_mask);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
+ force_frame_mask);
+}
+
+/* Disable debug bus in all blocks */
+static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- bool arg1, arg2;
- const u32 *ptr;
- u8 tree_val;
+ u32 block_id;
- /* Get next element from modes tree buffer */
- ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
- tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
+ /* Disable all blocks */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_block_chip *block_per_chip =
+ qed_get_dbg_block_per_chip(p_hwfn,
+ (enum block_id)block_id);
- switch (tree_val) {
- case INIT_MODE_OP_NOT:
- return !qed_is_mode_match(p_hwfn, modes_buf_offset);
- case INIT_MODE_OP_OR:
- case INIT_MODE_OP_AND:
- arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
- arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
- return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
- arg2) : (arg1 && arg2);
- default:
- return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+ if (GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_IS_REMOVED) ||
+ dev_data->block_in_reset[block_id])
+ continue;
+
+ /* Disable debug bus */
+ if (GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
+ u32 dbg_en_addr =
+ block_per_chip->dbg_dword_enable_reg_addr;
+ u16 modes_buf_offset =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ bool eval_mode =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ qed_wr(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(dbg_en_addr),
+ 0);
+ }
}
}
@@ -2247,6 +1468,20 @@ static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
return qed_grc_get_param(p_hwfn, grc_param) > 0;
}
+/* Returns the storm_id that matches the specified Storm letter,
+ * or MAX_DBG_STORMS if invalid storm letter.
+ */
+static enum dbg_storms qed_get_id_from_letter(char storm_letter)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
+ if (s_storm_defs[storm_id].letter == storm_letter)
+ return (enum dbg_storms)storm_id;
+
+ return MAX_DBG_STORMS;
+}
+
/* Returns true of the specified Storm should be included in the dump, false
* otherwise.
*/
@@ -2262,14 +1497,20 @@ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
enum block_id block_id, u8 mem_group_id)
{
- struct block_defs *block = s_block_defs[block_id];
+ const struct dbg_block *block;
u8 i;
- /* Check Storm match */
- if (block->associated_to_storm &&
- !qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)block->storm_id))
- return false;
+ block = get_dbg_block(p_hwfn, block_id);
+
+ /* If the block is associated with a Storm, check Storm match */
+ if (block->associated_storm_letter) {
+ enum dbg_storms associated_storm_id =
+ qed_get_id_from_letter(block->associated_storm_letter);
+
+ if (associated_storm_id == MAX_DBG_STORMS ||
+ !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
+ return false;
+ }
for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
struct big_ram_defs *big_ram = &s_big_ram_defs[i];
@@ -2291,6 +1532,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_CAU_SB:
case MEM_GROUP_CAU_PI:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ case MEM_GROUP_CAU_MEM_EXT:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
case MEM_GROUP_QM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
case MEM_GROUP_CFC_MEM:
@@ -2298,6 +1541,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_TASK_CFC_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
+ case MEM_GROUP_DORQ_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
case MEM_GROUP_IGU_MEM:
case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2343,64 +1588,104 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
msleep(STALL_DELAY_MS);
}
-/* Takes all blocks out of reset */
+/* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
+ * taken out of reset.
+ */
static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+ struct qed_ptt *p_ptt, bool rbc_only)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 block_id, i;
+ u8 chip_id = dev_data->chip_id;
+ u32 i;
- /* Fill reset regs values */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
+ /* Take RBCs out of reset */
+ for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
+ if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_rbc_reset_defs[i].reset_reg_addr +
+ RESET_REG_UNRESET_OFFSET,
+ s_rbc_reset_defs[i].reset_val[chip_id]);
- if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
- block->unreset)
- reg_val[block->reset_reg] |=
- BIT(block->reset_bit_offset);
- }
+ if (!rbc_only) {
+ u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
+ u8 reset_reg_id;
+ u32 block_id;
- /* Write reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
- continue;
+ /* Fill reset regs values */
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
+ bool is_removed, has_reset_reg, unreset_before_dump;
+ const struct dbg_block_chip *block;
+
+ block = qed_get_dbg_block_per_chip(p_hwfn,
+ (enum block_id)
+ block_id);
+ is_removed =
+ GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
+ has_reset_reg =
+ GET_FIELD(block->flags,
+ DBG_BLOCK_CHIP_HAS_RESET_REG);
+ unreset_before_dump =
+ GET_FIELD(block->flags,
+ DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
+
+ if (!is_removed && has_reset_reg && unreset_before_dump)
+ reg_val[block->reset_reg_id] |=
+ BIT(block->reset_reg_bit_offset);
+ }
- reg_val[i] |=
- s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
+ /* Write reset registers */
+ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
+ reset_reg_id++) {
+ const struct dbg_reset_reg *reset_reg;
+ u32 reset_reg_addr;
- if (reg_val[i])
- qed_wr(p_hwfn,
- p_ptt,
- s_reset_regs_defs[i].addr +
- RESET_REG_UNRESET_OFFSET, reg_val[i]);
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
+
+ if (GET_FIELD
+ (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
+ continue;
+
+ if (reg_val[reset_reg_id]) {
+ reset_reg_addr =
+ GET_FIELD(reset_reg->data,
+ DBG_RESET_REG_ADDR);
+ qed_wr(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(reset_reg_addr) +
+ RESET_REG_UNRESET_OFFSET,
+ reg_val[reset_reg_id]);
+ }
+ }
}
}
/* Returns the attention block data of the specified block */
static const struct dbg_attn_block_type_data *
-qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
+qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, enum dbg_attn_type attn_type)
{
const struct dbg_attn_block *base_attn_block_arr =
- (const struct dbg_attn_block *)
- s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+ (const struct dbg_attn_block *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
return &base_attn_block_arr[block_id].per_type_data[attn_type];
}
/* Returns the attention registers of the specified block */
static const struct dbg_attn_reg *
-qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
+qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, enum dbg_attn_type attn_type,
u8 *num_attn_regs)
{
const struct dbg_attn_block_type_data *block_type_data =
- qed_get_block_attn_data(block_id, attn_type);
+ qed_get_block_attn_data(p_hwfn, block_id, attn_type);
*num_attn_regs = block_type_data->num_regs;
- return &((const struct dbg_attn_reg *)
- s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
- regs_offset];
+ return (const struct dbg_attn_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
+ block_type_data->regs_offset;
}
/* For each block, clear the status of all parities */
@@ -2412,11 +1697,12 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
u8 reg_idx, num_attn_regs;
u32 block_id;
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id])
continue;
- attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ (enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
@@ -2444,22 +1730,20 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
}
/* Dumps GRC registers section header. Returns the dumped size in dwords.
- * The following parameters are dumped:
+ * the following parameters are dumped:
* - count: no. of dumped entries
* - split_type: split type
* - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
- * - param_name: user parameter value (dumped only if param_name != NULL
- * and param_val != NULL).
+ * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
u32 num_reg_entries,
enum init_split_types split_type,
- u8 split_id,
- const char *param_name, const char *param_val)
+ u8 split_id, const char *reg_type_name)
{
u8 num_params = 2 +
- (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
+ (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
u32 offset = 0;
offset += qed_dump_section_hdr(dump_buf + offset,
@@ -2472,9 +1756,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
if (split_type != SPLIT_TYPE_NONE)
offset += qed_dump_num_param(dump_buf + offset,
dump, "id", split_id);
- if (param_name && param_val)
+ if (reg_type_name)
offset += qed_dump_str_param(dump_buf + offset,
- dump, param_name, param_val);
+ dump, "type", reg_type_name);
return offset;
}
@@ -2504,21 +1788,12 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
+ bool read_using_dmae = false;
+ u32 thresh;
if (!dump)
return len;
- /* Print log if needed */
- dev_data->num_regs_read += len;
- if (dev_data->num_regs_read >=
- s_platform_defs[dev_data->platform_id].log_thresh) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers...\n",
- dev_data->num_regs_read);
- dev_data->num_regs_read = 0;
- }
-
switch (split_type) {
case SPLIT_TYPE_PORT:
port_id = split_id;
@@ -2539,38 +1814,77 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
}
/* Try reading using DMAE */
- if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
- (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
- wide_bus)) {
- if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
- (u64)(uintptr_t)(dump_buf), len, NULL))
- return len;
- dev_data->use_dmae = 0;
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Failed reading from chip using DMAE, using GRC instead\n");
+ if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
+ (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
+ (PROTECT_WIDE_BUS && wide_bus))) {
+ struct qed_dmae_params dmae_params;
+
+ /* Set DMAE params */
+ memset(&dmae_params, 0, sizeof(dmae_params));
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+ switch (split_type) {
+ case SPLIT_TYPE_PORT:
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
+ 1);
+ dmae_params.port_id = port_id;
+ break;
+ case SPLIT_TYPE_PF:
+ SET_FIELD(dmae_params.flags,
+ QED_DMAE_PARAMS_SRC_PF_VALID, 1);
+ dmae_params.src_pfid = pf_id;
+ break;
+ case SPLIT_TYPE_PORT_PF:
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
+ 1);
+ SET_FIELD(dmae_params.flags,
+ QED_DMAE_PARAMS_SRC_PF_VALID, 1);
+ dmae_params.port_id = port_id;
+ dmae_params.src_pfid = pf_id;
+ break;
+ default:
+ break;
+ }
+
+ /* Execute DMAE command */
+ read_using_dmae = !qed_dmae_grc2host(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(addr),
+ (u64)(uintptr_t)(dump_buf),
+ len, &dmae_params);
+ if (!read_using_dmae) {
+ dev_data->use_dmae = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed reading from chip using DMAE, using GRC instead\n");
+ }
}
+ if (read_using_dmae)
+ goto print_log;
+
/* If not read using DMAE, read using GRC */
/* Set pretend */
- if (split_type != dev_data->pretend.split_type || split_id !=
- dev_data->pretend.split_id) {
+ if (split_type != dev_data->pretend.split_type ||
+ split_id != dev_data->pretend.split_id) {
switch (split_type) {
case SPLIT_TYPE_PORT:
qed_port_pretend(p_hwfn, p_ptt, port_id);
break;
case SPLIT_TYPE_PF:
- fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ pf_id);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break;
case SPLIT_TYPE_PORT_PF:
- fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ pf_id);
qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
break;
case SPLIT_TYPE_VF:
- fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
- (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
+ | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
+ vf_id);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break;
default:
@@ -2584,6 +1898,16 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
/* Read registers using GRC */
qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+print_log:
+ /* Print log */
+ dev_data->num_regs_read += len;
+ thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
+ if ((dev_data->num_regs_read / thresh) >
+ ((dev_data->num_regs_read - len) / thresh))
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumped %d registers...\n", dev_data->num_regs_read);
+
return len;
}
@@ -2668,7 +1992,7 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_regs_arr,
+ struct virt_mem_desc input_regs_arr,
u32 *dump_buf,
bool dump,
enum init_split_types split_type,
@@ -2681,10 +2005,10 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
*num_dumped_reg_entries = 0;
- while (input_offset < input_regs_arr.size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
const struct dbg_dump_cond_hdr *cond_hdr =
(const struct dbg_dump_cond_hdr *)
- &input_regs_arr.ptr[input_offset++];
+ input_regs_arr.ptr + input_offset++;
u16 modes_buf_offset;
bool eval_mode;
@@ -2707,7 +2031,7 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
const struct dbg_dump_reg *reg =
(const struct dbg_dump_reg *)
- &input_regs_arr.ptr[input_offset];
+ input_regs_arr.ptr + input_offset;
u32 addr, len;
bool wide_bus;
@@ -2732,14 +2056,12 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_regs_arr,
+ struct virt_mem_desc input_regs_arr,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
enum init_split_types split_type,
- u8 split_id,
- const char *param_name,
- const char *param_val)
+ u8 split_id, const char *reg_type_name)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
enum init_split_types hdr_split_type = split_type;
@@ -2757,7 +2079,7 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
false,
0,
hdr_split_type,
- hdr_split_id, param_name, param_val);
+ hdr_split_id, reg_type_name);
/* Dump registers */
offset += qed_grc_dump_regs_entries(p_hwfn,
@@ -2776,7 +2098,7 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
dump,
num_dumped_reg_entries,
hdr_split_type,
- hdr_split_id, param_name, param_val);
+ hdr_split_id, reg_type_name);
return num_dumped_reg_entries > 0 ? offset : 0;
}
@@ -2789,32 +2111,33 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
- const char *param_name, const char *param_val)
+ const char *reg_type_name)
{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0, input_offset = 0;
- u16 fid;
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_dump_split_hdr *split_hdr;
- struct dbg_array curr_input_regs_arr;
+ struct virt_mem_desc curr_input_regs_arr;
enum init_split_types split_type;
u16 split_count = 0;
u32 split_data_size;
u8 split_id;
split_hdr =
- (const struct dbg_dump_split_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+ (const struct dbg_dump_split_hdr *)
+ dbg_buf->ptr + input_offset++;
split_type =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- split_data_size =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
curr_input_regs_arr.ptr =
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
- curr_input_regs_arr.size_in_dwords = split_data_size;
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
+ input_offset;
+ curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
switch (split_type) {
case SPLIT_TYPE_NONE:
@@ -2842,16 +2165,16 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
dump, block_enable,
split_type,
split_id,
- param_name,
- param_val);
+ reg_type_name);
input_offset += split_data_size;
}
/* Cancel pretends (pretend to original PF) */
if (dump) {
- fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
- qed_fid_pretend(p_hwfn, p_ptt, fid);
+ qed_fid_pretend(p_hwfn, p_ptt,
+ FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ p_hwfn->rel_pf_id));
dev_data->pretend.split_type = SPLIT_TYPE_NONE;
dev_data->pretend.split_id = 0;
}
@@ -2864,26 +2187,32 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i, offset = 0, num_regs = 0;
+ u32 offset = 0, num_regs = 0;
+ u8 reset_reg_id;
/* Calculate header size */
offset += qed_grc_dump_regs_hdr(dump_buf,
- false, 0,
- SPLIT_TYPE_NONE, 0, NULL, NULL);
+ false,
+ 0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
/* Write reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
+ reset_reg_id++) {
+ const struct dbg_reset_reg *reset_reg;
+ u32 reset_reg_addr;
+
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
+
+ if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
continue;
+ reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (s_reset_regs_defs[i].addr), 1,
- false, SPLIT_TYPE_NONE, 0);
+ reset_reg_addr,
+ 1, false, SPLIT_TYPE_NONE, 0);
num_regs++;
}
@@ -2891,7 +2220,7 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true, num_regs, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ 0, "RESET_REGS");
return offset;
}
@@ -2904,21 +2233,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 block_id, offset = 0, num_reg_entries = 0;
+ u32 block_id, offset = 0, stall_regs_offset;
const struct dbg_attn_reg *attn_reg_arr;
u8 storm_id, reg_idx, num_attn_regs;
+ u32 num_reg_entries = 0;
- /* Calculate header size */
+ /* Write empty header for attention registers */
offset += qed_grc_dump_regs_hdr(dump_buf,
- false, 0, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ false,
+ 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
/* Write parity registers */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id] && dump)
continue;
- attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ (enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
@@ -2961,16 +2292,29 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
}
}
+ /* Overwrite header for attention registers */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true,
+ num_reg_entries,
+ SPLIT_TYPE_NONE, 0, "ATTN_REGS");
+
+ /* Write empty header for stall registers */
+ stall_regs_offset = offset;
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, SPLIT_TYPE_NONE, 0, "REGS");
+
/* Write Storm stall status registers */
- for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
+ storm_id++) {
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 addr;
- if (dev_data->block_in_reset[storm->block_id] && dump)
+ if (dev_data->block_in_reset[storm->sem_block_id] && dump)
continue;
addr =
- BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+ BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
SEM_FAST_REG_STALLED);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
@@ -2982,12 +2326,12 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
num_reg_entries++;
}
- /* Write header */
+ /* Overwrite header for stall registers */
if (dump)
- qed_grc_dump_regs_hdr(dump_buf,
+ qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
true,
- num_reg_entries, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ num_reg_entries,
+ SPLIT_TYPE_NONE, 0, "REGS");
return offset;
}
@@ -3000,8 +2344,7 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
u32 offset = 0, addr;
offset += qed_grc_dump_regs_hdr(dump_buf,
- dump, 2, SPLIT_TYPE_NONE, 0,
- NULL, NULL);
+ dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
* skipped).
@@ -3049,8 +2392,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 len,
u32 bit_width,
bool packed,
- const char *mem_group,
- bool is_storm, char storm_letter)
+ const char *mem_group, char storm_letter)
{
u8 num_params = 3;
u32 offset = 0;
@@ -3071,7 +2413,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
if (name) {
/* Dump name */
- if (is_storm) {
+ if (storm_letter) {
strcpy(buf, "?STORM_");
buf[0] = storm_letter;
strcpy(buf + strlen(buf), name);
@@ -3103,7 +2445,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
dump, "packed", 1);
/* Dump reg type */
- if (is_storm) {
+ if (storm_letter) {
strcpy(buf, "?STORM_");
buf[0] = storm_letter;
strcpy(buf + strlen(buf), mem_group);
@@ -3130,8 +2472,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
bool wide_bus,
u32 bit_width,
bool packed,
- const char *mem_group,
- bool is_storm, char storm_letter)
+ const char *mem_group, char storm_letter)
{
u32 offset = 0;
@@ -3142,8 +2483,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
addr,
len,
bit_width,
- packed,
- mem_group, is_storm, storm_letter);
+ packed, mem_group, storm_letter);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
@@ -3156,20 +2496,21 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
/* Dumps GRC memories entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_mems_arr,
+ struct virt_mem_desc input_mems_arr,
u32 *dump_buf, bool dump)
{
u32 i, offset = 0, input_offset = 0;
bool mode_match = true;
- while (input_offset < input_mems_arr.size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
const struct dbg_dump_cond_hdr *cond_hdr;
u16 modes_buf_offset;
u32 num_entries;
bool eval_mode;
- cond_hdr = (const struct dbg_dump_cond_hdr *)
- &input_mems_arr.ptr[input_offset++];
+ cond_hdr =
+ (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
+ input_offset++;
num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
/* Check required mode */
@@ -3191,24 +2532,25 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_entries;
i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
const struct dbg_dump_mem *mem =
- (const struct dbg_dump_mem *)
- &input_mems_arr.ptr[input_offset];
- u8 mem_group_id = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_MEM_GROUP_ID);
- bool is_storm = false, mem_wide_bus;
- enum dbg_grc_params grc_param;
- char storm_letter = 'a';
- enum block_id block_id;
+ (const struct dbg_dump_mem *)((u32 *)
+ input_mems_arr.ptr
+ + input_offset);
+ const struct dbg_block *block;
+ char storm_letter = 0;
u32 mem_addr, mem_len;
+ bool mem_wide_bus;
+ u8 mem_group_id;
+ mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
if (mem_group_id >= MEM_GROUPS_NUM) {
DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
return 0;
}
- block_id = (enum block_id)cond_hdr->block_id;
if (!qed_grc_is_mem_included(p_hwfn,
- block_id,
+ (enum block_id)
+ cond_hdr->block_id,
mem_group_id))
continue;
@@ -3217,42 +2559,14 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
mem_wide_bus = GET_FIELD(mem->dword1,
DBG_DUMP_MEM_WIDE_BUS);
- /* Update memory length for CCFC/TCFC memories
- * according to number of LCIDs/LTIDs.
- */
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
- if (mem_len % MAX_LCIDS) {
- DP_NOTICE(p_hwfn,
- "Invalid CCFC connection memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- mem_len = qed_grc_get_param(p_hwfn, grc_param) *
- (mem_len / MAX_LCIDS);
- } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
- if (mem_len % MAX_LTIDS) {
- DP_NOTICE(p_hwfn,
- "Invalid TCFC task memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- mem_len = qed_grc_get_param(p_hwfn, grc_param) *
- (mem_len / MAX_LTIDS);
- }
+ block = get_dbg_block(p_hwfn,
+ cond_hdr->block_id);
- /* If memory is associated with Storm, update Storm
- * details.
+ /* If memory is associated with Storm,
+ * update storm details
*/
- if (s_block_defs
- [cond_hdr->block_id]->associated_to_storm) {
- is_storm = true;
- storm_letter =
- s_storm_defs[s_block_defs
- [cond_hdr->block_id]->
- storm_id].letter;
- }
+ if (block->associated_storm_letter)
+ storm_letter = block->associated_storm_letter;
/* Dump memory */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3266,7 +2580,6 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
0,
false,
s_mem_group_names[mem_group_id],
- is_storm,
storm_letter);
}
}
@@ -3281,26 +2594,25 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
u32 offset = 0, input_offset = 0;
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_dump_split_hdr *split_hdr;
- struct dbg_array curr_input_mems_arr;
+ struct virt_mem_desc curr_input_mems_arr;
enum init_split_types split_type;
u32 split_data_size;
- split_hdr = (const struct dbg_dump_split_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
- split_type =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- split_data_size =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- curr_input_mems_arr.ptr =
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
- curr_input_mems_arr.size_in_dwords = split_data_size;
+ split_hdr =
+ (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
+ input_offset++;
+ split_type = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
+ curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
if (split_type == SPLIT_TYPE_NONE)
offset += qed_grc_dump_mem_entries(p_hwfn,
@@ -3328,17 +2640,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
bool dump,
const char *name,
u32 num_lids,
- u32 lid_size,
- u32 rd_reg_addr,
- u8 storm_id)
+ enum cm_ctx_types ctx_type, u8 storm_id)
{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
struct storm_defs *storm = &s_storm_defs[storm_id];
- u32 i, lid, total_size, offset = 0;
+ u32 i, lid, lid_size, total_size;
+ u32 rd_reg_addr, offset = 0;
+
+ /* Convert quad-regs to dwords */
+ lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
if (!lid_size)
return 0;
- lid_size *= BYTES_IN_DWORD;
total_size = num_lids * lid_size;
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3348,18 +2662,26 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
0,
total_size,
lid_size * 32,
- false, name, true, storm->letter);
+ false, name, storm->letter);
if (!dump)
return offset + total_size;
+ rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
+
/* Dump context data */
for (lid = 0; lid < num_lids; lid++) {
- for (i = 0; i < lid_size; i++, offset++) {
+ for (i = 0; i < lid_size; i++) {
qed_wr(p_hwfn,
p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt, rd_reg_addr);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rd_reg_addr,
+ 1,
+ false,
+ SPLIT_TYPE_NONE, 0);
}
}
@@ -3370,115 +2692,126 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- enum dbg_grc_params grc_param;
u32 offset = 0;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- struct storm_defs *storm = &s_storm_defs[storm_id];
-
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id))
continue;
/* Dump Conn AG context size */
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "CONN_AG_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_conn_ag_ctx_lid_size,
- storm->cm_conn_ag_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_AG_CTX",
+ NUM_OF_LCIDS,
+ CM_CTX_CONN_AG, storm_id);
/* Dump Conn ST context size */
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "CONN_ST_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_conn_st_ctx_lid_size,
- storm->cm_conn_st_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_ST_CTX",
+ NUM_OF_LCIDS,
+ CM_CTX_CONN_ST, storm_id);
/* Dump Task AG context size */
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "TASK_AG_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_task_ag_ctx_lid_size,
- storm->cm_task_ag_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_AG_CTX",
+ NUM_OF_LTIDS,
+ CM_CTX_TASK_AG, storm_id);
/* Dump Task ST context size */
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "TASK_ST_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_task_st_ctx_lid_size,
- storm->cm_task_st_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_ST_CTX",
+ NUM_OF_LTIDS,
+ CM_CTX_TASK_ST, storm_id);
}
return offset;
}
-/* Dumps GRC IORs data. Returns the dumped size in dwords. */
-static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
-{
- char buf[10] = "IOR_SET_?";
- u32 addr, offset = 0;
- u8 storm_id, set_id;
+#define VFC_STATUS_RESP_READY_BIT 0
+#define VFC_STATUS_BUSY_BIT 1
+#define VFC_STATUS_SENDING_CMD_BIT 2
- for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- struct storm_defs *storm = &s_storm_defs[storm_id];
+#define VFC_POLLING_DELAY_MS 1
+#define VFC_POLLING_COUNT 20
- if (!qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id))
- continue;
+/* Reads data from VFC. Returns the number of dwords read (0 on error).
+ * Sizes are specified in dwords.
+ */
+static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct storm_defs *storm,
+ u32 *cmd_data,
+ u32 cmd_size,
+ u32 *addr_data,
+ u32 addr_size,
+ u32 resp_size, u32 *dump_buf)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 vfc_status, polling_ms, polling_count = 0, i;
+ u32 reg_addr, sem_base;
+ bool is_ready = false;
+
+ sem_base = storm->sem_fast_mem_addr;
+ polling_ms = VFC_POLLING_DELAY_MS *
+ s_hw_type_defs[dev_data->hw_type].delay_factor;
+
+ /* Write VFC command */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ sem_base + SEM_FAST_REG_VFC_DATA_WR,
+ cmd_data, cmd_size);
+
+ /* Write VFC address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ sem_base + SEM_FAST_REG_VFC_ADDR,
+ addr_data, addr_size);
+
+ /* Read response */
+ for (i = 0; i < resp_size; i++) {
+ /* Poll until ready */
+ do {
+ reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ &vfc_status,
+ true,
+ BYTES_TO_DWORDS(reg_addr),
+ 1,
+ false, SPLIT_TYPE_NONE, 0);
+ is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
+
+ if (!is_ready) {
+ if (polling_count++ == VFC_POLLING_COUNT)
+ return 0;
- for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE) +
- IOR_SET_OFFSET(set_id);
- if (strlen(buf) > 0)
- buf[strlen(buf) - 1] = '0' + set_id;
- offset += qed_grc_dump_mem(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- buf,
- addr,
- IORS_PER_SET,
- false,
- 32,
- false,
- "ior",
- true,
- storm->letter);
- }
+ msleep(polling_ms);
+ }
+ } while (!is_ready);
+
+ reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + i,
+ true,
+ BYTES_TO_DWORDS(reg_addr),
+ 1, false, SPLIT_TYPE_NONE, 0);
}
- return offset;
+ return resp_size;
}
/* Dump VFC CAM. Returns the dumped size in dwords. */
@@ -3490,7 +2823,7 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
- u32 row, i, offset = 0;
+ u32 row, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3499,7 +2832,7 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
0,
total_size,
256,
- false, "vfc_cam", true, storm->letter);
+ false, "vfc_cam", storm->letter);
if (!dump)
return offset + total_size;
@@ -3507,26 +2840,18 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
/* Prepare CAM address */
SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
- for (row = 0; row < VFC_CAM_NUM_ROWS;
- row++, offset += VFC_CAM_RESP_DWORDS) {
- /* Write VFC CAM command */
+ /* Read VFC CAM data */
+ for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
- cam_cmd, VFC_CAM_CMD_DWORDS);
-
- /* Write VFC CAM address */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
- cam_addr, VFC_CAM_ADDR_DWORDS);
-
- /* Read VFC CAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_CAM_RESP_DWORDS);
+ offset += qed_grc_dump_read_from_vfc(p_hwfn,
+ p_ptt,
+ storm,
+ cam_cmd,
+ VFC_CAM_CMD_DWORDS,
+ cam_addr,
+ VFC_CAM_ADDR_DWORDS,
+ VFC_CAM_RESP_DWORDS,
+ dump_buf + offset);
}
return offset;
@@ -3543,7 +2868,7 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
- u32 row, i, offset = 0;
+ u32 row, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3554,35 +2879,27 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
256,
false,
ram_defs->type_name,
- true, storm->letter);
-
- /* Prepare RAM address */
- SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+ storm->letter);
if (!dump)
return offset + total_size;
+ /* Prepare RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+ /* Read VFC RAM data */
for (row = ram_defs->base_row;
- row < ram_defs->base_row + ram_defs->num_rows;
- row++, offset += VFC_RAM_RESP_DWORDS) {
- /* Write VFC RAM command */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
- ram_cmd, VFC_RAM_CMD_DWORDS);
-
- /* Write VFC RAM address */
+ row < ram_defs->base_row + ram_defs->num_rows; row++) {
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
- ram_addr, VFC_RAM_ADDR_DWORDS);
-
- /* Read VFC RAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_RAM_RESP_DWORDS);
+ offset += qed_grc_dump_read_from_vfc(p_hwfn,
+ p_ptt,
+ storm,
+ ram_cmd,
+ VFC_RAM_CMD_DWORDS,
+ ram_addr,
+ VFC_RAM_ADDR_DWORDS,
+ VFC_RAM_RESP_DWORDS,
+ dump_buf + offset);
}
return offset;
@@ -3592,16 +2909,13 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 storm_id, i;
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id) ||
- !s_storm_defs[storm_id].has_vfc ||
- (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
- PLATFORM_ASIC))
+ !s_storm_defs[storm_id].has_vfc)
continue;
/* Read CAM */
@@ -3651,7 +2965,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
total_dwords,
rss_defs->entry_width,
packed,
- rss_defs->type_name, false, 0);
+ rss_defs->type_name, 0);
/* Dump RSS data */
if (!dump) {
@@ -3711,7 +3025,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
0,
ram_size,
block_size * 8,
- false, type_name, false, 0);
+ false, type_name, 0);
/* Read and dump Big RAM data */
if (!dump)
@@ -3737,6 +3051,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps MCP scratchpad. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
@@ -3758,8 +3073,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump,
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
- MCP_REG_SCRATCH_SIZE_BB_K2,
- false, 0, false, "MCP", false, 0);
+ MCP_REG_SCRATCH_SIZE,
+ false, 0, false, "MCP", 0);
/* Dump MCP cpu_reg_file */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3769,19 +3084,19 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
NULL,
BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
- false, 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", 0);
/* Dump MCP registers */
block_enable[BLOCK_MCP] = true;
offset += qed_grc_dump_registers(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, block_enable, "block", "MCP");
+ dump, block_enable, "MCP");
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
dump, 1, SPLIT_TYPE_NONE, 0,
- "block", "MCP");
+ "MCP");
addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
@@ -3798,7 +3113,9 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Dumps the tbus indirect memory for all PHYs. */
+/* Dumps the tbus indirect memory for all PHYs.
+ * Returns the dumped size in dwords.
+ */
static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
@@ -3832,7 +3149,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
mem_name,
0,
PHY_DUMP_SIZE_DWORDS,
- 16, true, mem_name, false, 0);
+ 16, true, mem_name, 0);
if (!dump) {
offset += PHY_DUMP_SIZE_DWORDS;
@@ -3863,21 +3180,58 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
return offset;
}
-static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum block_id block_id,
- u8 line_id,
- u8 enable_mask,
- u8 right_shift,
- u8 force_valid_mask, u8 force_frame_mask)
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes);
+
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf);
+
+/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
{
- struct block_defs *block = s_block_defs[block_id];
+ u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
+ u32 hw_dump_size_dwords = 0, offset = 0;
+ enum dbg_status status;
- qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
- qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
- qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
- qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
+ /* Read HW dump image from NVRAM */
+ status = qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ NVM_TYPE_HW_DUMP_OUT,
+ &hw_dump_offset_bytes,
+ &hw_dump_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return 0;
+
+ hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
+
+ /* Dump HW dump image section */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_hw_dump", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", hw_dump_size_dwords);
+
+ /* Read MCP HW dump image into dump buffer */
+ if (dump && hw_dump_size_dwords) {
+ status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ hw_dump_offset_bytes,
+ hw_dump_size_bytes, dump_buf + offset);
+ if (status != DBG_STATUS_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to read MCP HW Dump image from NVRAM\n");
+ return 0;
+ }
+ }
+ offset += hw_dump_size_dwords;
+
+ return offset;
}
/* Dumps Static Debug data. Returns the dumped size in dwords. */
@@ -3886,26 +3240,19 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 block_id, line_id, offset = 0;
+ u32 block_id, line_id, offset = 0, addr, len;
/* Don't dump static debug if a debug bus recording is in progress */
if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
return 0;
if (dump) {
- /* Disable all blocks debug output */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
-
- if (block->dbg_client_id[dev_data->chip_id] !=
- MAX_DBG_BUS_CLIENTS)
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
- 0);
- }
+ /* Disable debug bus in all blocks */
+ qed_bus_disable_blocks(p_hwfn, p_ptt);
qed_bus_reset_dbg_block(p_hwfn, p_ptt);
- qed_bus_set_framing_mode(p_hwfn,
- p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
qed_wr(p_hwfn,
p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
@@ -3914,28 +3261,48 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Dump all static debug lines for each relevant block */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
- struct dbg_bus_block *block_desc;
- u32 block_dwords, addr, len;
- u8 dbg_client_id;
+ const struct dbg_block_chip *block_per_chip;
+ const struct dbg_block *block;
+ bool is_removed, has_dbg_bus;
+ u16 modes_buf_offset;
+ u32 block_dwords;
+
+ block_per_chip =
+ qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
+ is_removed = GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_IS_REMOVED);
+ has_dbg_bus = GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_HAS_DBG_BUS);
- if (block->dbg_client_id[dev_data->chip_id] ==
- MAX_DBG_BUS_CLIENTS)
+ /* read+clear for NWS parity is not working, skip NWS block */
+ if (block_id == BLOCK_NWS)
continue;
- block_desc = get_dbg_bus_block_desc(p_hwfn,
- (enum block_id)block_id);
- block_dwords = NUM_DBG_LINES(block_desc) *
+ if (!is_removed && has_dbg_bus &&
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0) {
+ modes_buf_offset =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ has_dbg_bus = false;
+ }
+
+ if (is_removed || !has_dbg_bus)
+ continue;
+
+ block_dwords = NUM_DBG_LINES(block_per_chip) *
STATIC_DEBUG_LINE_DWORDS;
/* Dump static section params */
+ block = get_dbg_block(p_hwfn, (enum block_id)block_id);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
block->name,
0,
block_dwords,
- 32, false, "STATIC", false, 0);
+ 32, false, "STATIC", 0);
if (!dump) {
offset += block_dwords;
@@ -3951,20 +3318,19 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
}
/* Enable block's client */
- dbg_client_id = block->dbg_client_id[dev_data->chip_id];
qed_bus_enable_clients(p_hwfn,
p_ptt,
- BIT(dbg_client_id));
+ BIT(block_per_chip->dbg_client_id));
addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
len = STATIC_DEBUG_LINE_DWORDS;
- for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
+ for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
line_id++) {
/* Configure debug line ID */
- qed_config_dbg_line(p_hwfn,
- p_ptt,
- (enum block_id)block_id,
- (u8)line_id, 0xf, 0, 0, 0);
+ qed_bus_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id, 0xf, 0, 0, 0);
/* Read debug line info */
offset += qed_grc_dump_addr_range(p_hwfn,
@@ -3979,7 +3345,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Disable block's client and debug output */
qed_bus_enable_clients(p_hwfn, p_ptt, 0);
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
+ qed_bus_config_dbg_line(p_hwfn, p_ptt,
+ (enum block_id)block_id, 0, 0, 0, 0, 0);
}
if (dump) {
@@ -3999,8 +3366,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 dwords_read, offset = 0;
bool parities_masked = false;
- u32 offset = 0;
u8 i;
*num_dumped_dwords = 0;
@@ -4019,13 +3386,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"num-lcids",
- qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS));
+ NUM_OF_LCIDS);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"num-ltids",
- qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS));
+ NUM_OF_LTIDS);
offset += qed_dump_num_param(dump_buf + offset,
dump, "num-ports", dev_data->num_ports);
@@ -4037,7 +3402,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
/* Take all blocks out of reset (using reset registers) */
if (dump) {
- qed_grc_unreset_blocks(p_hwfn, p_ptt);
+ qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
qed_update_blocks_reset_state(p_hwfn, p_ptt);
}
@@ -4080,7 +3445,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf +
offset,
dump,
- block_enable, NULL, NULL);
+ block_enable, NULL);
/* Dump special registers */
offset += qed_grc_dump_special_regs(p_hwfn,
@@ -4114,23 +3479,29 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump, i);
- /* Dump IORs */
- if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
- offset += qed_grc_dump_iors(p_hwfn,
- p_ptt, dump_buf + offset, dump);
-
/* Dump VFC */
- if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
- offset += qed_grc_dump_vfc(p_hwfn,
- p_ptt, dump_buf + offset, dump);
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
+ dwords_read = qed_grc_dump_vfc(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += dwords_read;
+ if (!dwords_read)
+ return DBG_STATUS_VFC_READ_ERROR;
+ }
/* Dump PHY tbus */
if (qed_grc_is_included(p_hwfn,
DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
- CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
+ CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
offset += qed_grc_dump_phy(p_hwfn,
p_ptt, dump_buf + offset, dump);
+ /* Dump MCP HW Dump */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
+ offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
/* Dump static debug data (only if not during debug bus recording) */
if (qed_grc_is_included(p_hwfn,
DBG_GRC_PARAM_DUMP_STATIC) &&
@@ -4181,8 +3552,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
u8 reg_id;
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
- regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
+ regs = (const union dbg_idle_chk_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
+ rule->reg_offset;
cond_regs = &regs[0].cond_reg;
info_regs = &regs[rule->num_cond_regs].info_reg;
@@ -4202,8 +3574,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
struct dbg_idle_chk_result_reg_hdr *reg_hdr;
- reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
- (dump_buf + offset);
+ reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
/* Write register header */
if (!dump) {
@@ -4320,12 +3692,13 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
const u32 *imm_values;
rule = &input_rules[i];
- regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
- [rule->reg_offset];
+ regs = (const union dbg_idle_chk_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
+ rule->reg_offset;
cond_regs = &regs[0].cond_reg;
- imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
- [rule->imm_offset];
+ imm_values =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
+ rule->imm_offset;
/* Check if all condition register blocks are out of reset, and
* find maximal number of entries (all condition registers that
@@ -4443,10 +3816,12 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- u32 num_failing_rules_offset, offset = 0, input_offset = 0;
- u32 num_failing_rules = 0;
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
+ u32 num_failing_rules_offset, offset = 0,
+ input_offset = 0, num_failing_rules = 0;
- /* Dump global params */
+ /* Dump global params - 1 must match below amount of params */
offset += qed_dump_common_global_params(p_hwfn,
p_ptt,
dump_buf + offset, dump, 1);
@@ -4458,12 +3833,10 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
num_failing_rules_offset = offset;
offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_idle_chk_cond_hdr *cond_hdr =
- (const struct dbg_idle_chk_cond_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
- [input_offset++];
+ (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
+ input_offset++;
bool eval_mode, mode_match = true;
u32 curr_failing_rules;
u16 modes_buf_offset;
@@ -4480,16 +3853,21 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
}
if (mode_match) {
+ const struct dbg_idle_chk_rule *rule =
+ (const struct dbg_idle_chk_rule *)((u32 *)
+ dbg_buf->ptr
+ + input_offset);
+ u32 num_input_rules =
+ cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
offset +=
qed_idle_chk_dump_rule_entries(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- (const struct dbg_idle_chk_rule *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
- ptr[input_offset],
- cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
- &curr_failing_rules);
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ rule,
+ num_input_rules,
+ &curr_failing_rules);
num_failing_rules += curr_failing_rules;
}
@@ -4556,7 +3934,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
{
u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
s32 bytes_left = nvram_size_bytes;
- u32 read_offset = 0;
+ u32 read_offset = 0, param = 0;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
@@ -4569,14 +3947,14 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
/* Call NVRAM read command */
+ SET_MFW_FIELD(param,
+ DRV_MB_PARAM_NVM_OFFSET,
+ nvram_offset_bytes + read_offset);
+ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NVM_READ_NVRAM,
- (nvram_offset_bytes +
- read_offset) |
- (bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_OFFSET),
- &ret_mcp_resp, &ret_mcp_param,
- &ret_read_size,
+ DRV_MSG_CODE_NVM_READ_NVRAM, param,
+ &ret_mcp_resp,
+ &ret_mcp_param, &ret_read_size,
(u32 *)((u8 *)ret_buf + read_offset)))
return DBG_STATUS_NVRAM_READ_FAILED;
@@ -4714,12 +4092,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
enum dbg_status status;
- bool mcp_access;
int halted = 0;
+ bool use_mfw;
*num_dumped_dwords = 0;
- mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+ use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
/* Get trace data info */
status = qed_mcp_trace_get_data_info(p_hwfn,
@@ -4740,7 +4118,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
* consistent. if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
- if (dump && mcp_access) {
+ if (dump && use_mfw) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4780,17 +4158,15 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
*/
trace_meta_size_bytes =
qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
- if ((!trace_meta_size_bytes || dump) && mcp_access) {
+ if ((!trace_meta_size_bytes || dump) && use_mfw)
status = qed_mcp_trace_get_meta_info(p_hwfn,
p_ptt,
trace_data_size_bytes,
&running_bundle_id,
&trace_meta_offset_bytes,
&trace_meta_size_bytes);
- if (status == DBG_STATUS_OK)
- trace_meta_size_dwords =
- BYTES_TO_DWORDS(trace_meta_size_bytes);
- }
+ if (status == DBG_STATUS_OK)
+ trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
/* Dump trace meta size param */
offset += qed_dump_num_param(dump_buf + offset,
@@ -4814,7 +4190,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* If no mcp access, indicate that the dump doesn't contain the meta
* data from NVRAM.
*/
- return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+ return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
}
/* Dump GRC FIFO */
@@ -4992,16 +4368,18 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
override_window_dwords =
qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
PROTECTION_OVERRIDE_ELEMENT_DWORDS;
- addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- true,
- addr,
- override_window_dwords,
- true, SPLIT_TYPE_NONE, 0);
- qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
- override_window_dwords);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ override_window_dwords,
+ true, SPLIT_TYPE_NONE, 0);
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ override_window_dwords);
+ }
out:
/* Dump last section */
offset += qed_dump_last_section(dump_buf, offset, dump);
@@ -5037,7 +4415,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 last_list_idx, addr;
- if (dev_data->block_in_reset[storm->block_id])
+ if (dev_data->block_in_reset[storm->sem_block_id])
continue;
/* Read FW info for the current Storm */
@@ -5088,20 +4466,362 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps the specified ILT pages to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
+ bool dump,
+ u32 start_page_id,
+ u32 num_pages,
+ struct phys_mem_desc *ilt_pages,
+ bool dump_page_ids)
+{
+ u32 page_id, end_page_id, offset = 0;
+
+ if (num_pages == 0)
+ return offset;
+
+ end_page_id = start_page_id + num_pages - 1;
+
+ for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
+ struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
+
+ /**
+ *
+ * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
+ * break;
+ */
+
+ if (!ilt_pages[page_id].virt_addr)
+ continue;
+
+ if (dump_page_ids) {
+ /* Copy page ID to dump buffer */
+ if (dump)
+ *(dump_buf + offset) = page_id;
+ offset++;
+ } else {
+ /* Copy page memory to dump buffer */
+ if (dump)
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr, mem_desc->size);
+ offset += BYTES_TO_DWORDS(mem_desc->size);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps a section containing the dumped ILT pages.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump,
+ u32 valid_conn_pf_pages,
+ u32 valid_conn_vf_pages,
+ struct phys_mem_desc *ilt_pages,
+ bool dump_page_ids)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 pf_start_line, start_page_id, offset = 0;
+ u32 cdut_pf_init_pages, cdut_vf_init_pages;
+ u32 cdut_pf_work_pages, cdut_vf_work_pages;
+ u32 base_data_offset, size_param_offset;
+ u32 cdut_pf_pages, cdut_vf_pages;
+ const char *section_name;
+ u8 i;
+
+ section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
+ cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
+ cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
+ cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
+ cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
+ cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
+ cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
+ pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
+
+ offset +=
+ qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
+
+ /* Dump size parameter (0 for now, overwritten with real size later) */
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+ base_data_offset = offset;
+
+ /* CDUC pages are ordered as follows:
+ * - PF pages - valid section (included in PF connection type mapping)
+ * - PF pages - invalid section (not dumped)
+ * - For each VF in the PF:
+ * - VF pages - valid section (included in VF connection type mapping)
+ * - VF pages - invalid section (not dumped)
+ */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
+ /* Dump connection PF pages */
+ start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ valid_conn_pf_pages,
+ ilt_pages, dump_page_ids);
+
+ /* Dump connection VF pages */
+ start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
+ for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
+ i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ valid_conn_vf_pages,
+ ilt_pages,
+ dump_page_ids);
+ }
+
+ /* CDUT pages are ordered as follows:
+ * - PF init pages (not dumped)
+ * - PF work pages
+ * - For each VF in the PF:
+ * - VF init pages (not dumped)
+ * - VF work pages
+ */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
+ /* Dump task PF pages */
+ start_page_id = clients[ILT_CLI_CDUT].first.val +
+ cdut_pf_init_pages - pf_start_line;
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ cdut_pf_work_pages,
+ ilt_pages, dump_page_ids);
+
+ /* Dump task VF pages */
+ start_page_id = clients[ILT_CLI_CDUT].first.val +
+ cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
+ for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
+ i++, start_page_id += cdut_vf_pages)
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ cdut_vf_work_pages,
+ ilt_pages,
+ dump_page_ids);
+ }
+
+ /* Overwrite size param */
+ if (dump)
+ qed_dump_num_param(dump_buf + size_param_offset,
+ dump, "size", offset - base_data_offset);
+
+ return offset;
+}
+
+/* Performs ILT Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
+ u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
+ u32 num_cids_per_page, conn_ctx_size;
+ u32 cduc_page_size, cdut_page_size;
+ struct phys_mem_desc *ilt_pages;
+ u8 conn_type;
+
+ cduc_page_size = 1 <<
+ (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ cdut_page_size = 1 <<
+ (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
+ num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
+ ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+
+ /* Dump global params - 22 must match number of params below */
+ offset += qed_dump_common_global_params(p_hwfn, p_ptt,
+ dump_buf + offset, dump, 22);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "ilt-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-page-size", cduc_page_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-first-page-id",
+ clients[ILT_CLI_CDUC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-last-page-id",
+ clients[ILT_CLI_CDUC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-num-pf-pages",
+ clients
+ [ILT_CLI_CDUC].pf_total_lines);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-num-vf-pages",
+ clients
+ [ILT_CLI_CDUC].vf_total_lines);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "max-conn-ctx-size",
+ conn_ctx_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-page-size", cdut_page_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-first-page-id",
+ clients[ILT_CLI_CDUT].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-last-page-id",
+ clients[ILT_CLI_CDUT].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-pf-init-pages",
+ qed_get_cdut_num_pf_init_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-vf-init-pages",
+ qed_get_cdut_num_vf_init_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-pf-work-pages",
+ qed_get_cdut_num_pf_work_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-vf-work-pages",
+ qed_get_cdut_num_vf_work_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "max-task-ctx-size",
+ p_hwfn->p_cxt_mngr->task_ctx_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "task-type-id",
+ p_hwfn->p_cxt_mngr->task_type_id);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "first-vf-id-in-pf",
+ p_hwfn->p_cxt_mngr->first_vf_in_pf);
+ offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-vfs-in-pf",
+ p_hwfn->p_cxt_mngr->vf_count);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ptr-size-bytes", sizeof(void *));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "pf-start-line",
+ p_hwfn->p_cxt_mngr->pf_start_line);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "page-mem-desc-size-dwords",
+ PAGE_MEM_DESC_SIZE_DWORDS);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ilt-shadow-size",
+ p_hwfn->p_cxt_mngr->ilt_shadow_size);
+ /* Additional/Less parameters require matching of number in call to
+ * dump_common_global_params()
+ */
+
+ /* Dump section containing number of PF CIDs per connection type */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "num_pf_cids_per_conn_type", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", NUM_OF_CONNECTION_TYPES_E4);
+ for (conn_type = 0, valid_conn_pf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
+ u32 num_pf_cids =
+ p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
+
+ if (dump)
+ *(dump_buf + offset) = num_pf_cids;
+ valid_conn_pf_cids += num_pf_cids;
+ }
+
+ /* Dump section containing number of VF CIDs per connection type */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "num_vf_cids_per_conn_type", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", NUM_OF_CONNECTION_TYPES_E4);
+ for (conn_type = 0, valid_conn_vf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
+ u32 num_vf_cids =
+ p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
+
+ if (dump)
+ *(dump_buf + offset) = num_vf_cids;
+ valid_conn_vf_cids += num_vf_cids;
+ }
+
+ /* Dump section containing physical memory descs for each ILT page */
+ num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "ilt_page_desc", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+
+ /* Copy memory descriptors to dump buffer */
+ if (dump) {
+ u32 page_id;
+
+ for (page_id = 0; page_id < num_pages;
+ page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
+ memcpy(dump_buf + offset,
+ &ilt_pages[page_id],
+ DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
+ } else {
+ offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
+ }
+
+ valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
+ num_cids_per_page);
+ valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
+ num_cids_per_page);
+
+ /* Dump ILT pages IDs */
+ offset += qed_ilt_dump_pages_section(p_hwfn,
+ dump_buf + offset,
+ dump,
+ valid_conn_pf_pages,
+ valid_conn_vf_pages,
+ ilt_pages, true);
+
+ /* Dump ILT pages memory */
+ offset += qed_ilt_dump_pages_section(p_hwfn,
+ dump_buf + offset,
+ dump,
+ valid_conn_pf_pages,
+ valid_conn_vf_pages,
+ ilt_pages, false);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ return offset;
+}
+
/***************************** Public Functions *******************************/
-enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr)
{
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- /* convert binary data to debug arrays */
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
+ /* Convert binary data to debug arrays */
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
+ qed_set_dbg_bin_buf(p_hwfn,
+ buf_id,
+ (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
+ buf_hdrs[buf_id].length);
return DBG_STATUS_OK;
}
@@ -5116,7 +4836,7 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
/* Skip Storm if it's in reset */
- if (dev_data->block_in_reset[storm->block_id])
+ if (dev_data->block_in_reset[storm->sem_block_id])
continue;
/* Read FW info for the current Storm */
@@ -5129,16 +4849,17 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
}
enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum dbg_grc_params grc_param, u32 val)
{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
enum dbg_status status;
int i;
- DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
"dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
- status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ status = qed_dbg_dev_init(p_hwfn);
if (status != DBG_STATUS_OK)
return status;
@@ -5164,24 +4885,23 @@ enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
/* Update all params with the preset values */
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
+ struct grc_param_defs *defs = &s_grc_param_defs[i];
u32 preset_val;
-
/* Skip persistent params */
- if (s_grc_param_defs[i].is_persistent)
+ if (defs->is_persistent)
continue;
/* Find preset value */
if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
preset_val =
- s_grc_param_defs[i].exclude_all_preset_val;
+ defs->exclude_all_preset_val;
else if (grc_param == DBG_GRC_PARAM_CRASH)
preset_val =
- s_grc_param_defs[i].crash_preset_val;
+ defs->crash_preset_val[dev_data->chip_id];
else
return DBG_STATUS_INVALID_ARGS;
- qed_grc_set_param(p_hwfn,
- (enum dbg_grc_params)i, preset_val);
+ qed_grc_set_param(p_hwfn, i, preset_val);
}
} else {
/* Regular param - set its value */
@@ -5207,18 +4927,18 @@ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
@@ -5258,20 +4978,19 @@ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
u32 *buf_size)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct idle_chk_data *idle_chk;
+ struct idle_chk_data *idle_chk = &dev_data->idle_chk;
enum dbg_status status;
- idle_chk = &dev_data->idle_chk;
*buf_size = 0;
- status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ status = qed_dbg_dev_init(p_hwfn);
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
if (!idle_chk->buf_size_set) {
@@ -5306,6 +5025,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
/* Update reset state */
+ qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
qed_update_blocks_reset_state(p_hwfn, p_ptt);
/* Idle Check Dump */
@@ -5321,7 +5041,7 @@ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5368,7 +5088,7 @@ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5414,7 +5134,7 @@ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5460,7 +5180,7 @@ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5510,7 +5230,7 @@ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5554,6 +5274,50 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
+static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
+
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Reveret GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return DBG_STATUS_OK;
+}
+
enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum block_id block_id,
@@ -5561,19 +5325,20 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
bool clear_status,
struct dbg_attn_block_result *results)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
u8 reg_idx, num_attn_regs, num_result_regs = 0;
const struct dbg_attn_reg *attn_reg_arr;
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- attn_reg_arr = qed_get_block_attn_regs(block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ block_id,
attn_type, &num_attn_regs);
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
@@ -5618,7 +5383,7 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
results->block_id = (u8)block_id;
results->names_offset =
- qed_get_block_attn_data(block_id, attn_type)->names_offset;
+ qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
SET_FIELD(results->data,
DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
@@ -5628,11 +5393,6 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
/******************************* Data Types **********************************/
-struct block_info {
- const char *name;
- enum block_id id;
-};
-
/* REG fifo element */
struct reg_fifo_element {
u64 data;
@@ -5656,6 +5416,12 @@ struct reg_fifo_element {
#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
};
+/* REG fifo error element */
+struct reg_fifo_err {
+ u32 err_code;
+ const char *err_msg;
+};
+
/* IGU fifo element */
struct igu_fifo_element {
u32 dword0;
@@ -5755,20 +5521,6 @@ struct igu_fifo_addr_data {
enum igu_fifo_addr_types type;
};
-struct mcp_trace_meta {
- u32 modules_num;
- char **modules;
- u32 formats_num;
- struct mcp_trace_format *formats;
- bool is_allocated;
-};
-
-/* Debug Tools user data */
-struct dbg_tools_user_data {
- struct mcp_trace_meta mcp_trace_meta;
- const u32 *mcp_trace_user_meta_buf;
-};
-
/******************************** Constants **********************************/
#define MAX_MSG_LEN 1024
@@ -5776,7 +5528,7 @@ struct dbg_tools_user_data {
#define MCP_TRACE_MAX_MODULE_LEN 8
#define MCP_TRACE_FORMAT_MAX_PARAMS 3
#define MCP_TRACE_FORMAT_PARAM_WIDTH \
- (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+ (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
@@ -5785,107 +5537,6 @@ struct dbg_tools_user_data {
/***************************** Constant Arrays *******************************/
-struct user_dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
-/* Debug arrays */
-static struct user_dbg_array
-s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
-
-/* Block names array */
-static struct block_info s_block_info_arr[] = {
- {"grc", BLOCK_GRC},
- {"miscs", BLOCK_MISCS},
- {"misc", BLOCK_MISC},
- {"dbu", BLOCK_DBU},
- {"pglue_b", BLOCK_PGLUE_B},
- {"cnig", BLOCK_CNIG},
- {"cpmu", BLOCK_CPMU},
- {"ncsi", BLOCK_NCSI},
- {"opte", BLOCK_OPTE},
- {"bmb", BLOCK_BMB},
- {"pcie", BLOCK_PCIE},
- {"mcp", BLOCK_MCP},
- {"mcp2", BLOCK_MCP2},
- {"pswhst", BLOCK_PSWHST},
- {"pswhst2", BLOCK_PSWHST2},
- {"pswrd", BLOCK_PSWRD},
- {"pswrd2", BLOCK_PSWRD2},
- {"pswwr", BLOCK_PSWWR},
- {"pswwr2", BLOCK_PSWWR2},
- {"pswrq", BLOCK_PSWRQ},
- {"pswrq2", BLOCK_PSWRQ2},
- {"pglcs", BLOCK_PGLCS},
- {"ptu", BLOCK_PTU},
- {"dmae", BLOCK_DMAE},
- {"tcm", BLOCK_TCM},
- {"mcm", BLOCK_MCM},
- {"ucm", BLOCK_UCM},
- {"xcm", BLOCK_XCM},
- {"ycm", BLOCK_YCM},
- {"pcm", BLOCK_PCM},
- {"qm", BLOCK_QM},
- {"tm", BLOCK_TM},
- {"dorq", BLOCK_DORQ},
- {"brb", BLOCK_BRB},
- {"src", BLOCK_SRC},
- {"prs", BLOCK_PRS},
- {"tsdm", BLOCK_TSDM},
- {"msdm", BLOCK_MSDM},
- {"usdm", BLOCK_USDM},
- {"xsdm", BLOCK_XSDM},
- {"ysdm", BLOCK_YSDM},
- {"psdm", BLOCK_PSDM},
- {"tsem", BLOCK_TSEM},
- {"msem", BLOCK_MSEM},
- {"usem", BLOCK_USEM},
- {"xsem", BLOCK_XSEM},
- {"ysem", BLOCK_YSEM},
- {"psem", BLOCK_PSEM},
- {"rss", BLOCK_RSS},
- {"tmld", BLOCK_TMLD},
- {"muld", BLOCK_MULD},
- {"yuld", BLOCK_YULD},
- {"xyld", BLOCK_XYLD},
- {"ptld", BLOCK_PTLD},
- {"ypld", BLOCK_YPLD},
- {"prm", BLOCK_PRM},
- {"pbf_pb1", BLOCK_PBF_PB1},
- {"pbf_pb2", BLOCK_PBF_PB2},
- {"rpb", BLOCK_RPB},
- {"btb", BLOCK_BTB},
- {"pbf", BLOCK_PBF},
- {"rdif", BLOCK_RDIF},
- {"tdif", BLOCK_TDIF},
- {"cdu", BLOCK_CDU},
- {"ccfc", BLOCK_CCFC},
- {"tcfc", BLOCK_TCFC},
- {"igu", BLOCK_IGU},
- {"cau", BLOCK_CAU},
- {"rgfs", BLOCK_RGFS},
- {"rgsrc", BLOCK_RGSRC},
- {"tgfs", BLOCK_TGFS},
- {"tgsrc", BLOCK_TGSRC},
- {"umac", BLOCK_UMAC},
- {"xmac", BLOCK_XMAC},
- {"dbg", BLOCK_DBG},
- {"nig", BLOCK_NIG},
- {"wol", BLOCK_WOL},
- {"bmbn", BLOCK_BMBN},
- {"ipc", BLOCK_IPC},
- {"nwm", BLOCK_NWM},
- {"nws", BLOCK_NWS},
- {"ms", BLOCK_MS},
- {"phy_pcie", BLOCK_PHY_PCIE},
- {"led", BLOCK_LED},
- {"avs_wrap", BLOCK_AVS_WRAP},
- {"pxpreqbus", BLOCK_PXPREQBUS},
- {"misc_aeu", BLOCK_MISC_AEU},
- {"bar0_map", BLOCK_BAR0_MAP}
-};
-
/* Status string array */
static const char * const s_status_str[] = {
/* DBG_STATUS_OK */
@@ -5915,14 +5566,12 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
"A PCI buffer wasn't allocated",
- /* DBG_STATUS_TOO_MANY_INPUTS */
- "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
+ /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
+ "The filter/trigger constraint dword offsets are not enabled for recording",
- /* DBG_STATUS_INPUT_OVERLAP */
- "Overlapping debug bus inputs",
- /* DBG_STATUS_HW_ONLY_RECORDING */
- "Cannot record Storm data since the entire recording cycle is used by HW",
+ /* DBG_STATUS_VFC_READ_ERROR */
+ "Error reading from VFC",
/* DBG_STATUS_STORM_ALREADY_ENABLED */
"The Storm was already enabled",
@@ -5939,8 +5588,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_NO_INPUT_ENABLED */
"No input was enabled for recording",
- /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
- "Filters and triggers are not allowed when recording in 64b units",
+ /* DBG_STATUS_NO_FILTER_TRIGGER_256B */
+ "Filters and triggers are not allowed in E4 256-bit mode",
/* DBG_STATUS_FILTER_ALREADY_ENABLED */
"The filter was already enabled",
@@ -6014,8 +5663,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
- /* DBG_STATUS_RESERVED2 */
- "Reserved debug status - shouldn't be returned",
+ /* DBG_STATUS_RESERVED0 */
+ "",
/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
@@ -6038,17 +5687,32 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_DBG_ARRAY_NOT_SET */
"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
- /* DBG_STATUS_FILTER_BUG */
- "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
+ /* DBG_STATUS_RESERVED1 */
+ "",
/* DBG_STATUS_NON_MATCHING_LINES */
- "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
+ "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
- /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
- "The selected trigger dword offset wasn't enabled in the recorded HW block",
+ /* DBG_STATUS_INSUFFICIENT_HW_IDS */
+ "Insufficient HW IDs. Try to record less Storms/blocks",
/* DBG_STATUS_DBG_BUS_IN_USE */
- "The debug bus is in use"
+ "The debug bus is in use",
+
+ /* DBG_STATUS_INVALID_STORM_DBG_MODE */
+ "The storm debug mode is not supported in the current chip",
+
+ /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
+ "Other engine is supported only in BB",
+
+ /* DBG_STATUS_FILTER_SINGLE_HW_ID */
+ "The configured filter mode requires a single Storm/block input",
+
+ /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
+ "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
+
+ /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
+ "When triggering on Storm data, the Storm to trigger on must be specified"
};
/* Idle check severity names array */
@@ -6104,7 +5768,7 @@ static const char * const s_master_strs[] = {
"xsdm",
"dbu",
"dmae",
- "???",
+ "jdap",
"???",
"???",
"???",
@@ -6112,12 +5776,13 @@ static const char * const s_master_strs[] = {
};
/* REG FIFO error messages array */
-static const char * const s_reg_fifo_error_strs[] = {
- "grc timeout",
- "address doesn't belong to any block",
- "reserved address in block or write to read-only address",
- "privilege/protection mismatch",
- "path isolation error"
+static struct reg_fifo_err s_reg_fifo_errors[] = {
+ {1, "grc timeout"},
+ {2, "address doesn't belong to any block"},
+ {4, "reserved address in block or write to read-only address"},
+ {8, "privilege/protection mismatch"},
+ {16, "path isolation error"},
+ {17, "RSL error"}
};
/* IGU FIFO sources array */
@@ -6357,8 +6022,21 @@ static u32 qed_print_section_params(u32 *dump_buf,
return dump_offset;
}
-static struct dbg_tools_user_data *
-qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
+/* Returns the block name that matches the specified block ID,
+ * or NULL if not found.
+ */
+static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ const struct dbg_block_user *block =
+ (const struct dbg_block_user *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
+
+ return (const char *)block->name;
+}
+
+static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
+ *p_hwfn)
{
return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
}
@@ -6366,7 +6044,8 @@ qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
/* Parses the idle check rules and returns the number of characters printed.
* In case of parsing error, returns 0.
*/
-static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
u32 *dump_buf_end,
u32 num_rules,
bool print_fw_idle_chk,
@@ -6394,19 +6073,18 @@ static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
rule_parsing_data =
- (const struct dbg_idle_chk_rule_parsing_data *)
- &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
- ptr[hdr->rule_id];
+ (const struct dbg_idle_chk_rule_parsing_data *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
+ hdr->rule_id;
parsing_str_offset =
- GET_FIELD(rule_parsing_data->data,
- DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
has_fw_msg =
- GET_FIELD(rule_parsing_data->data,
- DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
- parsing_str =
- &((const char *)
- s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
- [parsing_str_offset];
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+ parsing_str = (const char *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
+ parsing_str_offset;
lsi_msg = parsing_str;
curr_reg_id = 0;
@@ -6510,7 +6188,8 @@ static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes,
@@ -6528,8 +6207,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
*num_errors = 0;
*num_warnings = 0;
- if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
/* Read global_params section */
@@ -6562,7 +6241,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
results_offset),
"FW_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(dump_buf,
+ qed_parse_idle_chk_dump_rules(p_hwfn,
+ dump_buf,
dump_buf_end,
num_rules,
true,
@@ -6582,7 +6262,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
results_offset),
"\nLSI_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(dump_buf,
+ qed_parse_idle_chk_dump_rules(p_hwfn,
+ dump_buf,
dump_buf_end,
num_rules,
false,
@@ -6694,9 +6375,8 @@ qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
&offset);
- format_len =
- (format_ptr->data &
- MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
+ format_len = GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_LEN);
format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
if (!format_ptr->format_str) {
/* Update number of modules to be released */
@@ -6719,7 +6399,7 @@ qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
* trace_buf - MCP trace cyclic buffer
* trace_buf_size - MCP trace cyclic buffer size in bytes
* data_offset - offset in bytes of the data to parse in the MCP trace cyclic
- * buffer.
+ * buffer.
* data_size - size in bytes of data to parse.
* parsed_buf - destination buffer for parsed data.
* parsed_results_bytes - size of parsed data in bytes.
@@ -6764,9 +6444,8 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
/* Skip message if its index doesn't exist in the meta data */
if (format_idx >= meta->formats_num) {
- u8 format_size =
- (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
- MFW_TRACE_PRM_SIZE_SHIFT);
+ u8 format_size = (u8)GET_MFW_FIELD(header,
+ MFW_TRACE_PRM_SIZE);
if (data_size < format_size)
return DBG_STATUS_MCP_TRACE_BAD_DATA;
@@ -6781,11 +6460,10 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
format_ptr = &meta->formats[format_idx];
for (i = 0,
- param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
- param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+ MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
i < MCP_TRACE_FORMAT_MAX_PARAMS;
- i++,
- param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
/* Extract param size (0..3) */
u8 param_size = (u8)((format_ptr->data & param_mask) >>
@@ -6813,12 +6491,10 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
data_size -= param_size;
}
- format_level = (u8)((format_ptr->data &
- MCP_TRACE_FORMAT_LEVEL_MASK) >>
- MCP_TRACE_FORMAT_LEVEL_SHIFT);
- format_module = (u8)((format_ptr->data &
- MCP_TRACE_FORMAT_MODULE_MASK) >>
- MCP_TRACE_FORMAT_MODULE_SHIFT);
+ format_level = (u8)GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_LEVEL);
+ format_module = (u8)GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_MODULE);
if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
return DBG_STATUS_MCP_TRACE_BAD_DATA;
@@ -6960,7 +6636,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
const char *section_name, *param_name, *param_str_val;
u32 param_num_val, num_section_params, num_elements;
struct reg_fifo_element *elements;
- u8 i, j, err_val, vf_val;
+ u8 i, j, err_code, vf_val;
u32 results_offset = 0;
char vf_str[4];
@@ -6991,7 +6667,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
/* Decode elements */
for (i = 0; i < num_elements; i++) {
- bool err_printed = false;
+ const char *err_msg = NULL;
/* Discover if element belongs to a VF or a PF */
vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
@@ -7000,11 +6676,17 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
else
sprintf(vf_str, "%d", vf_val);
+ /* Find error message */
+ err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
+ for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
+ if (err_code == s_reg_fifo_errors[j].err_code)
+ err_msg = s_reg_fifo_errors[j].err_msg;
+
/* Add parsed element to parsed buffer */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
elements[i].data,
(u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ADDRESS) *
@@ -7021,30 +6703,8 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
s_protection_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PROTECTION)],
s_master_strs[GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_MASTER)]);
-
- /* Print errors */
- for (j = 0,
- err_val = GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_ERROR);
- j < ARRAY_SIZE(s_reg_fifo_error_strs);
- j++, err_val >>= 1) {
- if (err_val & 0x1) {
- if (err_printed)
- results_offset +=
- sprintf(qed_get_buf_ptr
- (results_buf,
- results_offset), ", ");
- results_offset +=
- sprintf(qed_get_buf_ptr
- (results_buf, results_offset), "%s",
- s_reg_fifo_error_strs[j]);
- err_printed = true;
- }
- }
-
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ REG_FIFO_ELEMENT_MASTER)],
+ err_msg ? err_msg : "unknown error code");
}
results_offset += sprintf(qed_get_buf_ptr(results_buf,
@@ -7398,27 +7058,28 @@ static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
/***************************** Public Functions *******************************/
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr)
{
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
/* Convert binary data to debug arrays */
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_user_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_user_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
+ qed_set_dbg_bin_buf(p_hwfn,
+ (enum bin_dbg_buffer_type)buf_id,
+ (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
+ buf_hdrs[buf_id].length);
return DBG_STATUS_OK;
}
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr)
{
- p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
- GFP_KERNEL);
- if (!p_hwfn->dbg_user_info)
+ *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
+ GFP_KERNEL);
+ if (!(*user_data_ptr))
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
return DBG_STATUS_OK;
@@ -7437,7 +7098,8 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
{
u32 num_errors, num_warnings;
- return qed_parse_idle_chk_dump(dump_buf,
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
num_dumped_dwords,
NULL,
results_buf_size,
@@ -7453,7 +7115,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_idle_chk_dump(dump_buf,
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
num_dumped_dwords,
results_buf,
&parsed_buf_size,
@@ -7624,25 +7287,28 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results)
{
- struct user_dbg_array *block_attn, *pstrings;
const u32 *block_attn_name_offsets;
- enum dbg_attn_type attn_type;
+ const char *attn_name_base;
const char *block_name;
+ enum dbg_attn_type attn_type;
u8 num_regs, i, j;
num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
- attn_type = (enum dbg_attn_type)
- GET_FIELD(results->data,
- DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
- block_name = s_block_info_arr[results->block_id].name;
-
- if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
+ block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
+ if (!block_name)
+ return DBG_STATUS_INVALID_ARGS;
+
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
- block_attn_name_offsets = &block_attn->ptr[results->names_offset];
+ block_attn_name_offsets =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
+ results->names_offset;
+
+ attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
/* Go over registers with a non-zero attention status */
for (i = 0; i < num_regs; i++) {
@@ -7653,18 +7319,17 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
reg_result = &results->reg_results[i];
num_reg_attn = GET_FIELD(reg_result->data,
DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
- block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
- bit_mapping = &((struct dbg_attn_bit_mapping *)
- block_attn->ptr)[reg_result->block_attn_offset];
-
- pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
+ bit_mapping = (struct dbg_attn_bit_mapping *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
+ reg_result->block_attn_offset;
/* Go over attention status bits */
- for (j = 0; j < num_reg_attn; j++) {
+ for (j = 0; j < num_reg_attn; j++, bit_idx++) {
u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
- u32 attn_name_offset, sts_addr;
+ u32 attn_name_offset;
+ u32 sts_addr;
/* Check if bit mask should be advanced (due to unused
* bits).
@@ -7676,18 +7341,19 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Check current bit index */
- if (!(reg_result->sts_val & BIT(bit_idx))) {
- bit_idx++;
+ if (!(reg_result->sts_val & BIT(bit_idx)))
continue;
- }
- /* Find attention name */
+ /* An attention bit with value=1 was found
+ * Find attention name
+ */
attn_name_offset =
block_attn_name_offsets[attn_idx_val];
- attn_name = &((const char *)
- pstrings->ptr)[attn_name_offset];
- attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
- "Interrupt" : "Parity";
+ attn_name = attn_name_base + attn_name_offset;
+ attn_type_str =
+ (attn_type ==
+ ATTN_TYPE_INTERRUPT ? "Interrupt" :
+ "Parity");
masked_str = reg_result->mask_val & BIT(bit_idx) ?
" [masked]" : "";
sts_addr = GET_FIELD(reg_result->data,
@@ -7695,15 +7361,15 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"%s (%s) : %s [address 0x%08x, bit %d]%s\n",
block_name, attn_type_str, attn_name,
- sts_addr, bit_idx, masked_str);
-
- bit_idx++;
+ sts_addr * 4, bit_idx, masked_str);
}
}
return DBG_STATUS_OK;
}
+static DEFINE_MUTEX(qed_dbg_lock);
+
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
@@ -7763,7 +7429,10 @@ static struct {
qed_dbg_fw_asserts_get_dump_buf_size,
qed_dbg_fw_asserts_dump,
qed_print_fw_asserts_results,
- qed_get_fw_asserts_results_buf_size},};
+ qed_get_fw_asserts_results_buf_size}, {
+ "ilt",
+ qed_dbg_ilt_get_dump_buf_size,
+ qed_dbg_ilt_dump, NULL, NULL},};
static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
{
@@ -7846,6 +7515,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
return rc;
}
+#define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF
+
/* Generic function for performing the dump of a debug feature. */
static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -7875,6 +7546,17 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
&buf_size_dwords);
if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return rc;
+
+ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
+ feature->buf_size = 0;
+ DP_NOTICE(p_hwfn->cdev,
+ "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
+ qed_features_lookup[feature_idx].name,
+ buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
+
+ return DBG_STATUS_OK;
+ }
+
feature->buf_size = buf_size_dwords * sizeof(u32);
feature->dump_buf = vmalloc(feature->buf_size);
if (!feature->dump_buf)
@@ -8021,6 +7703,16 @@ int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
}
+int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
+}
+
+int qed_dbg_ilt_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
+}
+
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes)
{
@@ -8037,9 +7729,17 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
* feature buffer.
*/
#define REGDUMP_HEADER_SIZE sizeof(u32)
+#define REGDUMP_HEADER_SIZE_SHIFT 0
+#define REGDUMP_HEADER_SIZE_MASK 0xffffff
#define REGDUMP_HEADER_FEATURE_SHIFT 24
-#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_FEATURE_MASK 0x3f
#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
+#define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
+#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_ENGINE_MASK 0x1
+#define REGDUMP_MAX_SIZE 0x1000000
+#define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15)
+
enum debug_print_features {
OLD_MODE = 0,
IDLE_CHK = 1,
@@ -8053,17 +7753,27 @@ enum debug_print_features {
NVM_CFG1 = 9,
DEFAULT_CFG = 10,
NVM_META = 11,
+ MDUMP = 12,
+ ILT_DUMP = 13,
};
-static u32 qed_calc_regdump_header(enum debug_print_features feature,
+static u32 qed_calc_regdump_header(struct qed_dev *cdev,
+ enum debug_print_features feature,
int engine, u32 feature_size, u8 omit_engine)
{
- /* Insert the engine, feature and mode inside the header and combine it
- * with feature size.
- */
- return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
- (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
- (engine << REGDUMP_HEADER_ENGINE_SHIFT);
+ u32 res = 0;
+
+ SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
+ if (res != feature_size)
+ DP_NOTICE(cdev,
+ "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
+ feature, feature_size);
+
+ SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+ SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
+ SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
+
+ return res;
}
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
@@ -8079,9 +7789,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
grc_params[i] = dev_data->grc.param_val[i];
- if (cdev->num_hwfns == 1)
+ if (!QED_IS_CMT(cdev))
omit_engine = 1;
+ mutex_lock(&qed_dbg_lock);
+
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
/* Collect idle_chks and grcDump for each hw function */
@@ -8094,7 +7806,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8106,7 +7818,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8118,7 +7830,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(REG_FIFO, cur_engine,
+ qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8130,7 +7842,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IGU_FIFO, cur_engine,
+ qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8143,7 +7855,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(PROTECTION_OVERRIDE,
+ qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
@@ -8158,25 +7870,45 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(FW_ASSERTS, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, FW_ASSERTS,
+ cur_engine, feature_size,
+ omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
rc);
}
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- dev_data->grc.param_val[i] = grc_params[i];
+ feature_size = qed_dbg_ilt_size(cdev);
+ if (!cdev->disable_ilt_dump &&
+ feature_size < ILT_DUMP_MAX_SIZE) {
+ rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, ILT_DUMP,
+ cur_engine,
+ feature_size,
+ omit_engine);
+ offset += feature_size + REGDUMP_HEADER_SIZE;
+ } else {
+ DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
+ rc);
+ }
+ }
/* GRC dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] = grc_params[i];
+
rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(GRC_DUMP, cur_engine,
+ qed_calc_regdump_header(cdev, GRC_DUMP,
+ cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8185,12 +7917,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
}
qed_set_debug_engine(cdev, org_engine);
+
/* mcp_trace */
rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(MCP_TRACE, cur_engine,
+ qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8199,11 +7932,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_NVM_CFG1);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_CFG1);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(NVM_CFG1, cur_engine,
+ qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
@@ -8218,7 +7952,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
+ qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
@@ -8234,8 +7968,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size, QED_NVM_IMAGE_NVM_META);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(NVM_META, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, NVM_META, cur_engine,
+ feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
@@ -8243,6 +7977,23 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
}
+ /* nvm mdump */
+ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_MDUMP);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, MDUMP, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
+ }
+
+ mutex_unlock(&qed_dbg_lock);
+
return 0;
}
@@ -8250,9 +8001,10 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
- u32 regs_len = 0, image_len = 0;
+ u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
u8 cur_engine, org_engine;
+ cdev->disable_ilt_dump = false;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
/* Engine specific */
@@ -8267,6 +8019,12 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
REGDUMP_HEADER_SIZE +
qed_dbg_protection_override_size(cdev) +
REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+
+ ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
+ if (ilt_len < ILT_DUMP_MAX_SIZE) {
+ total_ilt_len += ilt_len;
+ regs_len += ilt_len;
+ }
}
qed_set_debug_engine(cdev, org_engine);
@@ -8282,6 +8040,17 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
if (image_len)
regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+
+ if (regs_len > REGDUMP_MAX_SIZE) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "Dump exceeds max size 0x%x, disable ILT dump\n",
+ REGDUMP_MAX_SIZE);
+ cdev->disable_ilt_dump = true;
+ regs_len -= total_ilt_len;
+ }
return regs_len;
}
@@ -8327,9 +8096,8 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
- struct qed_dbg_feature *qed_feature =
- &cdev->dbg_params.features[feature];
u32 buf_size_dwords;
enum dbg_status rc;
@@ -8341,6 +8109,10 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
if (rc != DBG_STATUS_OK)
buf_size_dwords = 0;
+ /* Feature will not be dumped if it exceeds maximum size */
+ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
+ buf_size_dwords = 0;
+
qed_ptt_release(p_hwfn, p_ptt);
qed_feature->buf_size = buf_size_dwords * sizeof(u32);
return qed_feature->buf_size;
@@ -8360,14 +8132,21 @@ void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
void qed_dbg_pf_init(struct qed_dev *cdev)
{
- const u8 *dbg_values;
+ const u8 *dbg_values = NULL;
+ int i;
/* Debug values are after init values.
* The offset is the first dword of the file.
*/
dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
- qed_dbg_set_bin_ptr((u8 *)dbg_values);
- qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
+
+ for_each_hwfn(cdev, i) {
+ qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
+ qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
+ }
+
+ /* Set the hwfn to be 0 as default */
+ cdev->dbg_params.engine_for_debug = 0;
}
void qed_dbg_pf_exit(struct qed_dev *cdev)
@@ -8375,11 +8154,11 @@ void qed_dbg_pf_exit(struct qed_dev *cdev)
struct qed_dbg_feature *feature = NULL;
enum qed_dbg_features feature_idx;
- /* Debug features' buffers may be allocated if debug feature was used
- * but dump wasn't called.
+ /* debug features' buffers may be allocated if debug feature was used
+ * but dump wasn't called
*/
for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
- feature = &cdev->dbg_params.features[feature_idx];
+ feature = &cdev->dbg_features[feature_idx];
if (feature->dump_buf) {
vfree(feature->dump_buf);
feature->dump_buf = NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index e47e0e8d75b0..edf99d296bd1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -14,11 +14,13 @@ enum qed_dbg_features {
DBG_FEATURE_IGU_FIFO,
DBG_FEATURE_PROTECTION_OVERRIDE,
DBG_FEATURE_FW_ASSERTS,
+ DBG_FEATURE_ILT,
DBG_FEATURE_NUM
};
/* Forward Declaration */
struct qed_dev;
+struct qed_hwfn;
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_grc_size(struct qed_dev *cdev);
@@ -37,6 +39,8 @@ int qed_dbg_protection_override_size(struct qed_dev *cdev);
int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index a1ebc2b1ca0b..7912911337d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -907,7 +907,7 @@ qed_llh_access_filter(struct qed_hwfn *p_hwfn,
/* Filter value */
addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
- params.flags = QED_DMAE_FLAG_PF_DST;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1);
params.dst_pfid = pfid;
rc = qed_dmae_host2grc(p_hwfn,
p_ptt,
@@ -1412,6 +1412,7 @@ void qed_resc_free(struct qed_dev *cdev)
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
+ qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn);
@@ -1571,7 +1572,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
/* all vports participate in weighted fair queueing */
for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+ qm_info->qm_vport_params[i].wfq = 1;
}
/* initialize qm port params */
@@ -1579,6 +1580,7 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
+ struct qed_dev *cdev = p_hwfn->cdev;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -1588,11 +1590,13 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
for (i = 0; i < num_ports; i++) {
struct init_qm_port_params *p_qm_port =
&p_hwfn->qm_info.qm_port_params[i];
+ u16 pbf_max_cmd_lines;
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
- p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev);
+ p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
+ p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports;
}
}
@@ -2034,9 +2038,8 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
vport = &(qm_info->qm_vport_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
- qm_info->start_vport + i,
- vport->vport_rl, vport->vport_wfq);
+ "vport idx %d, wfq %d, first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->wfq);
for (tc = 0; tc < NUM_OF_TCS; tc++)
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
@@ -2049,11 +2052,11 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
pq = &(qm_info->qm_pq_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n",
qm_info->start_pq + i,
pq->port_id,
pq->vport_id,
- pq->tc_id, pq->wrr_group, pq->rl_valid);
+ pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
}
}
@@ -2103,9 +2106,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (!b_rc)
return -EINVAL;
- /* clear the QM_PF runtime phase leftovers from previous init */
- qed_init_clear_rt_data(p_hwfn);
-
/* prepare QM portion of runtime array */
qed_qm_init_pf(p_hwfn, p_ptt, false);
@@ -2346,7 +2346,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
- rc = qed_dbg_alloc_user_data(p_hwfn);
+ rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info);
if (rc)
goto alloc_err;
}
@@ -2623,7 +2623,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en;
- params.vport_rl_en = qm_info->vport_rl_en;
+ params.global_rl_en = qm_info->vport_rl_en;
params.vport_wfq_en = qm_info->vport_wfq_en;
params.port_params = qm_info->qm_port_params;
@@ -2891,6 +2891,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
+
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
@@ -3000,8 +3002,10 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
- int rc = 0, i;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
u16 ether_type;
+ int rc = 0, i;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
@@ -3102,6 +3106,17 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
*/
qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+ fw_overlays = cdev->fw_data->fw_overlays;
+ fw_overlays_len = cdev->fw_data->fw_overlays_len;
+ p_hwfn->fw_overlay_mem =
+ qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
+ fw_overlays_len);
+ if (!p_hwfn->fw_overlay_mem) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate fw overlay memory\n");
+ goto load_err;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -3566,8 +3581,10 @@ const char *qed_hw_get_resc_name(enum qed_resources res_id)
return "RDMA_CNQ_RAM";
case QED_ILT:
return "ILT";
- case QED_LL2_QUEUE:
- return "LL2_QUEUE";
+ case QED_LL2_RAM_QUEUE:
+ return "LL2_RAM_QUEUE";
+ case QED_LL2_CTX_QUEUE:
+ return "LL2_CTX_QUEUE";
case QED_CMDQS_CQS:
return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
@@ -3606,18 +3623,46 @@ __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
return 0;
}
+static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = {
+ {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
+ {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
+ {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
+ {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,},
+ {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
+ {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
+ {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
+ {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
+ {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
+ {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
+ {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
+ {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
+ {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
+};
+
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
+{
+ enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
+
+ if (type >= QED_NUM_HSI_DEFS) {
+ DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type);
+ return 0;
+ }
+
+ return qed_hsi_def_val[type][chip_id];
+}
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
-
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
- case QED_LL2_QUEUE:
- resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+ case QED_LL2_RAM_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES;
+ break;
+ case QED_LL2_CTX_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES;
break;
case QED_RDMA_CNQ_RAM:
/* No need for a case for QED_CMDQS_CQS since
@@ -3626,8 +3671,8 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
resc_max_val = NUM_OF_GLOBAL_QUEUES;
break;
case QED_RDMA_STATS_QUEUE:
- resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
- : RDMA_NUM_STATISTIC_COUNTERS_BB;
+ resc_max_val =
+ NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev);
break;
case QED_BDQ:
resc_max_val = BDQ_NUM_RESOURCES;
@@ -3660,28 +3705,24 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ struct qed_dev *cdev = p_hwfn->cdev;
switch (res_id) {
case QED_L2_QUEUE:
- *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
- MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs;
break;
case QED_VPORT:
- *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
- MAX_NUM_VPORTS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs;
break;
case QED_RSS_ENG:
- *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
- ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs;
break;
case QED_PQ:
- *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
- MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs;
*p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case QED_RL:
- *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
@@ -3689,11 +3730,13 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
- *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
- PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs;
+ break;
+ case QED_LL2_RAM_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
break;
- case QED_LL2_QUEUE:
- *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ case QED_LL2_CTX_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
@@ -3701,8 +3744,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
- *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
- RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs;
break;
case QED_BDQ:
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
@@ -5087,11 +5129,11 @@ static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
- vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+ vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) /
min_pf_rate;
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
- vport_params[i].vport_wfq);
+ vport_params[i].wfq);
}
}
@@ -5102,7 +5144,7 @@ static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
int i;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
- p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+ p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
}
static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
@@ -5118,7 +5160,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
qed_init_wfq_default_param(p_hwfn, min_pf_rate);
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
- vport_params[i].vport_wfq);
+ vport_params[i].wfq);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 47376d4d071f..eb4808b3bf67 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -230,30 +230,6 @@ enum qed_dmae_address_type_t {
QED_DMAE_ADDRESS_GRC
};
-/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
- * source is a block of length DMAE_MAX_RW_SIZE and the
- * destination is larger, the source block will be duplicated as
- * many times as required to fill the destination block. This is
- * used mostly to write a zeroed buffer to destination address
- * using DMA
- */
-#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
-#define QED_DMAE_FLAG_VF_SRC 0x00000002
-#define QED_DMAE_FLAG_VF_DST 0x00000004
-#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
-#define QED_DMAE_FLAG_PORT 0x00000010
-#define QED_DMAE_FLAG_PF_SRC 0x00000020
-#define QED_DMAE_FLAG_PF_DST 0x00000040
-
-struct qed_dmae_params {
- u32 flags; /* consists of QED_DMAE_FLAG_* values */
- u8 src_vfid;
- u8 dst_vfid;
- u8 port_id;
- u8 src_pfid;
- u8 dst_pfid;
-};
-
/**
* @brief qed_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index de31a382f58e..4c7fa391fd33 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -167,6 +167,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
goto err;
}
p_cxt = cxt_info.p_cxt;
+ memset(p_cxt, 0, sizeof(*p_cxt));
+
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index cf3ceb62e397..4597015b8bff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -98,6 +98,7 @@ enum core_event_opcode {
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
CORE_EVENT_TX_QUEUE_UPDATE,
+ CORE_EVENT_QUEUE_STATS_QUERY,
MAX_CORE_EVENT_OPCODE
};
@@ -116,7 +117,7 @@ struct core_ll2_port_stats {
struct regpair gsi_crcchksm_error;
};
-/* Ethernet TX Per Queue Stats */
+/* LL2 TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
@@ -124,13 +125,13 @@ struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_pkts;
struct regpair sent_mcast_pkts;
struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
};
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
- __le32 reserved;
};
struct core_ll2_tstorm_per_queue_stat {
@@ -147,6 +148,18 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Structure for doorbell data, in PWM mode, for RX producers update. */
+struct core_pwm_prod_update_data {
+ __le16 icid; /* internal CID */
+ u8 reserved0;
+ u8 params;
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK 0x3
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT 0
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0 */
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
+ struct core_ll2_rx_prod prod; /* Producers */
+};
+
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
@@ -156,6 +169,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
CORE_RAMROD_TX_QUEUE_UPDATE,
+ CORE_RAMROD_QUEUE_STATS_QUERY,
MAX_CORE_RAMROD_CMD_ID
};
@@ -236,7 +250,8 @@ struct core_rx_gsi_offload_cqe {
__le16 src_mac_addrlo;
__le16 qp_id;
__le32 src_qp;
- __le32 reserved[3];
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved;
};
/* Core RX CQE for Light L2 */
@@ -274,8 +289,11 @@ struct core_rx_start_ramrod_data {
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
+ u8 vport_id_valid;
+ u8 vport_id;
+ u8 zero_prod_flg;
u8 wipe_inner_vlan_pri_en;
- u8 reserved[5];
+ u8 reserved[2];
};
/* Ramrod data for rx queue stop ramrod */
@@ -352,8 +370,11 @@ struct core_tx_start_ramrod_data {
__le16 pbl_size;
__le16 qm_pq_id;
u8 gsi_offload_flag;
+ u8 ctx_stats_en;
+ u8 vport_id_valid;
u8 vport_id;
- u8 resrved[2];
+ u8 enforce_security_flag;
+ u8 reserved[7];
};
/* Ramrod data for tx queue stop ramrod */
@@ -385,7 +406,7 @@ struct ystorm_core_conn_st_ctx {
/* The core storm context for the Pstorm */
struct pstorm_core_conn_st_ctx {
- __le32 reserved[4];
+ __le32 reserved[20];
};
/* Core Slowpath Connection storm context of Xstorm */
@@ -761,7 +782,7 @@ struct e4_tstorm_core_conn_ag_ctx {
__le16 word1;
__le16 word2;
__le16 word3;
- __le32 reg9;
+ __le32 ll2_rx_prod;
__le32 reg10;
};
@@ -836,11 +857,16 @@ struct e4_ustorm_core_conn_ag_ctx {
/* The core storm context for the Mstorm */
struct mstorm_core_conn_st_ctx {
- __le32 reserved[24];
+ __le32 reserved[40];
};
/* The core storm context for the Ustorm */
struct ustorm_core_conn_st_ctx {
+ __le32 reserved[20];
+};
+
+/* The core storm context for the Tstorm */
+struct tstorm_core_conn_st_ctx {
__le32 reserved[4];
};
@@ -857,6 +883,8 @@ struct e4_core_conn_context {
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
+ struct tstorm_core_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
};
struct eth_mstorm_per_pf_stat {
@@ -888,12 +916,21 @@ struct eth_pstorm_per_pf_stat {
struct regpair sent_gre_bytes;
struct regpair sent_vxlan_bytes;
struct regpair sent_geneve_bytes;
+ struct regpair sent_mpls_bytes;
+ struct regpair sent_gre_mpls_bytes;
+ struct regpair sent_udp_mpls_bytes;
struct regpair sent_gre_pkts;
struct regpair sent_vxlan_pkts;
struct regpair sent_geneve_pkts;
+ struct regpair sent_mpls_pkts;
+ struct regpair sent_gre_mpls_pkts;
+ struct regpair sent_udp_mpls_pkts;
struct regpair gre_drop_pkts;
struct regpair vxlan_drop_pkts;
struct regpair geneve_drop_pkts;
+ struct regpair mpls_drop_pkts;
+ struct regpair gre_mpls_drop_pkts;
+ struct regpair udp_mpls_drop_pkts;
};
/* Ethernet TX Per Queue Stats */
@@ -983,7 +1020,8 @@ union event_ring_data {
struct event_ring_entry {
u8 protocol_id;
u8 opcode;
- __le16 reserved0;
+ u8 reserved0;
+ u8 vf_id;
__le16 echo;
u8 fw_return_code;
u8 flags;
@@ -1061,7 +1099,20 @@ enum malicious_vf_error_id {
ETH_CONTROL_PACKET_VIOLATION,
ETH_ANTI_SPOOFING_ERR,
ETH_PACKET_SIZE_TOO_LARGE,
- MAX_MALICIOUS_VF_ERROR_ID
+ CORE_ILLEGAL_VLAN_MODE,
+ CORE_ILLEGAL_NBDS,
+ CORE_FIRST_BD_WO_SOP,
+ CORE_INSUFFICIENT_BDS,
+ CORE_PACKET_TOO_SMALL,
+ CORE_ILLEGAL_INBAND_TAGS,
+ CORE_VLAN_INSERT_AND_INBAND_VLAN,
+ CORE_MTU_VIOLATION,
+ CORE_CONTROL_PACKET_VIOLATION,
+ CORE_ANTI_SPOOFING_ERR,
+ CORE_PACKET_SIZE_TOO_LARGE,
+ CORE_ILLEGAL_BD_FLAGS,
+ CORE_GSI_PACKET_VIOLATION,
+ MAX_MALICIOUS_VF_ERROR_ID,
};
/* Mstorm non-triggering VF zone */
@@ -1367,6 +1418,16 @@ enum vf_zone_size_mode {
MAX_VF_ZONE_SIZE_MODE
};
+/* Xstorm non-triggering VF zone */
+struct xstorm_non_trigger_vf_zone {
+ struct regpair non_edpm_ack_pkts;
+};
+
+/* Tstorm VF zone */
+struct xstorm_vf_zone {
+ struct xstorm_non_trigger_vf_zone non_trigger;
+};
+
/* Attentions status block */
struct atten_status_block {
__le32 atten_bits;
@@ -1435,7 +1496,11 @@ struct dmae_cmd {
__le16 crc16;
__le16 crc16_c;
__le16 crc10;
- __le16 reserved;
+ __le16 error_bit_reserved;
+#define DMAE_CMD_ERROR_BIT_MASK 0x1
+#define DMAE_CMD_ERROR_BIT_SHIFT 0
+#define DMAE_CMD_RESERVED_MASK 0x7FFF
+#define DMAE_CMD_RESERVED_SHIFT 1
__le16 xsum16;
__le16 xsum8;
};
@@ -1566,6 +1631,41 @@ struct e4_ystorm_core_conn_ag_ctx {
__le32 reg3;
};
+/* DMAE parameters */
+struct qed_dmae_params {
+ u32 flags;
+/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_PARAMS_RW_REPL_SRC_MASK 0x1
+#define QED_DMAE_PARAMS_RW_REPL_SRC_SHIFT 0
+#define QED_DMAE_PARAMS_SRC_VF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_SRC_VF_VALID_SHIFT 1
+#define QED_DMAE_PARAMS_DST_VF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_DST_VF_VALID_SHIFT 2
+#define QED_DMAE_PARAMS_COMPLETION_DST_MASK 0x1
+#define QED_DMAE_PARAMS_COMPLETION_DST_SHIFT 3
+#define QED_DMAE_PARAMS_PORT_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_PORT_VALID_SHIFT 4
+#define QED_DMAE_PARAMS_SRC_PF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_SRC_PF_VALID_SHIFT 5
+#define QED_DMAE_PARAMS_DST_PF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_DST_PF_VALID_SHIFT 6
+#define QED_DMAE_PARAMS_RESERVED_MASK 0x1FFFFFF
+#define QED_DMAE_PARAMS_RESERVED_SHIFT 7
+ u8 src_vfid;
+ u8 dst_vfid;
+ u8 port_id;
+ u8 src_pfid;
+ u8 dst_pfid;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
/* IGU cleanup command */
struct igu_cleanup {
__le32 sb_id_and_flags;
@@ -1743,102 +1843,23 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
+/* Physical memory descriptor */
+struct phys_mem_desc {
+ dma_addr_t phys_addr;
+ void *virt_addr;
+ u32 size; /* In bytes */
+};
+
+/* Virtual memory descriptor */
+struct virt_mem_desc {
+ void *ptr;
+ u32 size; /* In bytes */
+};
+
/****************************************/
/* Debug Tools HSI constants and macros */
/****************************************/
-enum block_addr {
- GRCBASE_GRC = 0x50000,
- GRCBASE_MISCS = 0x9000,
- GRCBASE_MISC = 0x8000,
- GRCBASE_DBU = 0xa000,
- GRCBASE_PGLUE_B = 0x2a8000,
- GRCBASE_CNIG = 0x218000,
- GRCBASE_CPMU = 0x30000,
- GRCBASE_NCSI = 0x40000,
- GRCBASE_OPTE = 0x53000,
- GRCBASE_BMB = 0x540000,
- GRCBASE_PCIE = 0x54000,
- GRCBASE_MCP = 0xe00000,
- GRCBASE_MCP2 = 0x52000,
- GRCBASE_PSWHST = 0x2a0000,
- GRCBASE_PSWHST2 = 0x29e000,
- GRCBASE_PSWRD = 0x29c000,
- GRCBASE_PSWRD2 = 0x29d000,
- GRCBASE_PSWWR = 0x29a000,
- GRCBASE_PSWWR2 = 0x29b000,
- GRCBASE_PSWRQ = 0x280000,
- GRCBASE_PSWRQ2 = 0x240000,
- GRCBASE_PGLCS = 0x0,
- GRCBASE_DMAE = 0xc000,
- GRCBASE_PTU = 0x560000,
- GRCBASE_TCM = 0x1180000,
- GRCBASE_MCM = 0x1200000,
- GRCBASE_UCM = 0x1280000,
- GRCBASE_XCM = 0x1000000,
- GRCBASE_YCM = 0x1080000,
- GRCBASE_PCM = 0x1100000,
- GRCBASE_QM = 0x2f0000,
- GRCBASE_TM = 0x2c0000,
- GRCBASE_DORQ = 0x100000,
- GRCBASE_BRB = 0x340000,
- GRCBASE_SRC = 0x238000,
- GRCBASE_PRS = 0x1f0000,
- GRCBASE_TSDM = 0xfb0000,
- GRCBASE_MSDM = 0xfc0000,
- GRCBASE_USDM = 0xfd0000,
- GRCBASE_XSDM = 0xf80000,
- GRCBASE_YSDM = 0xf90000,
- GRCBASE_PSDM = 0xfa0000,
- GRCBASE_TSEM = 0x1700000,
- GRCBASE_MSEM = 0x1800000,
- GRCBASE_USEM = 0x1900000,
- GRCBASE_XSEM = 0x1400000,
- GRCBASE_YSEM = 0x1500000,
- GRCBASE_PSEM = 0x1600000,
- GRCBASE_RSS = 0x238800,
- GRCBASE_TMLD = 0x4d0000,
- GRCBASE_MULD = 0x4e0000,
- GRCBASE_YULD = 0x4c8000,
- GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PTLD = 0x5a0000,
- GRCBASE_YPLD = 0x5c0000,
- GRCBASE_PRM = 0x230000,
- GRCBASE_PBF_PB1 = 0xda0000,
- GRCBASE_PBF_PB2 = 0xda4000,
- GRCBASE_RPB = 0x23c000,
- GRCBASE_BTB = 0xdb0000,
- GRCBASE_PBF = 0xd80000,
- GRCBASE_RDIF = 0x300000,
- GRCBASE_TDIF = 0x310000,
- GRCBASE_CDU = 0x580000,
- GRCBASE_CCFC = 0x2e0000,
- GRCBASE_TCFC = 0x2d0000,
- GRCBASE_IGU = 0x180000,
- GRCBASE_CAU = 0x1c0000,
- GRCBASE_RGFS = 0xf00000,
- GRCBASE_RGSRC = 0x320000,
- GRCBASE_TGFS = 0xd00000,
- GRCBASE_TGSRC = 0x322000,
- GRCBASE_UMAC = 0x51000,
- GRCBASE_XMAC = 0x210000,
- GRCBASE_DBG = 0x10000,
- GRCBASE_NIG = 0x500000,
- GRCBASE_WOL = 0x600000,
- GRCBASE_BMBN = 0x610000,
- GRCBASE_IPC = 0x20000,
- GRCBASE_NWM = 0x800000,
- GRCBASE_NWS = 0x700000,
- GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x620000,
- GRCBASE_LED = 0x6b8000,
- GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_PXPREQBUS = 0x56000,
- GRCBASE_MISC_AEU = 0x8000,
- GRCBASE_BAR0_MAP = 0x1c00000,
- MAX_BLOCK_ADDR
-};
-
enum block_id {
BLOCK_GRC,
BLOCK_MISCS,
@@ -1893,8 +1914,6 @@ enum block_id {
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -1908,12 +1927,9 @@ enum block_id {
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
- BLOCK_RGFS,
- BLOCK_RGSRC,
- BLOCK_TGFS,
- BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
+ BLOCK_MSTAT,
BLOCK_DBG,
BLOCK_NIG,
BLOCK_WOL,
@@ -1926,8 +1942,17 @@ enum block_id {
BLOCK_LED,
BLOCK_AVS_WRAP,
BLOCK_PXPREQBUS,
- BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
+ BLOCK_MCP_FIO,
+ BLOCK_LAST_INIT,
+ BLOCK_PRS_FC,
+ BLOCK_PBF_FC,
+ BLOCK_NIG_LB_FC,
+ BLOCK_NIG_LB_FC_PLLH,
+ BLOCK_NIG_TX_FC_PLLH,
+ BLOCK_NIG_TX_FC,
+ BLOCK_NIG_RX_FC_PLLH,
+ BLOCK_NIG_RX_FC,
MAX_BLOCK_ID
};
@@ -1944,10 +1969,13 @@ enum bin_dbg_buffer_type {
BIN_BUF_DBG_ATTN_REGS,
BIN_BUF_DBG_ATTN_INDEXES,
BIN_BUF_DBG_ATTN_NAME_OFFSETS,
- BIN_BUF_DBG_BUS_BLOCKS,
+ BIN_BUF_DBG_BLOCKS,
+ BIN_BUF_DBG_BLOCKS_CHIP_DATA,
BIN_BUF_DBG_BUS_LINES,
- BIN_BUF_DBG_BUS_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
+ BIN_BUF_DBG_RESET_REGS,
BIN_BUF_DBG_PARSING_STRINGS,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -2031,20 +2059,54 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
-/* Debug Bus block data */
-struct dbg_bus_block {
- u8 num_of_lines;
- u8 has_latency_events;
- u16 lines_offset;
+/* Block debug data */
+struct dbg_block {
+ u8 name[15];
+ u8 associated_storm_letter;
};
-/* Debug Bus block user data */
-struct dbg_bus_block_user_data {
- u8 num_of_lines;
+/* Chip-specific block debug data */
+struct dbg_block_chip {
+ u8 flags;
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
+ u8 dbg_client_id;
+ u8 reset_reg_id;
+ u8 reset_reg_bit_offset;
+ struct dbg_mode_hdr dbg_bus_mode;
+ u16 reserved1;
+ u8 reserved2;
+ u8 num_of_dbg_bus_lines;
+ u16 dbg_bus_lines_offset;
+ u32 dbg_select_reg_addr;
+ u32 dbg_dword_enable_reg_addr;
+ u32 dbg_shift_reg_addr;
+ u32 dbg_force_valid_reg_addr;
+ u32 dbg_force_frame_reg_addr;
+};
+
+/* Chip-specific block user debug data */
+struct dbg_block_chip_user {
+ u8 num_of_dbg_bus_lines;
u8 has_latency_events;
u16 names_offset;
};
+/* Block user debug data */
+struct dbg_block_user {
+ u8 name[16];
+};
+
/* Block Debug line data */
struct dbg_bus_line {
u8 data;
@@ -2197,22 +2259,33 @@ enum dbg_idle_chk_severity_types {
MAX_DBG_IDLE_CHK_SEVERITY_TYPES
};
+/* Reset register */
+struct dbg_reset_reg {
+ u32 data;
+#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
+#define DBG_RESET_REG_ADDR_SHIFT 0
+#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK 0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT 25
+};
+
/* Debug Bus block data */
struct dbg_bus_block_data {
- u16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+ u8 enable_mask;
+ u8 right_shift;
+ u8 force_valid_mask;
+ u8 force_frame_mask;
+ u8 dword_mask;
u8 line_num;
u8 hw_id;
+ u8 flags;
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
};
-/* Debug Bus Clients */
enum dbg_bus_clients {
DBG_BUS_CLIENT_RBCN,
DBG_BUS_CLIENT_RBCP,
@@ -2253,11 +2326,10 @@ enum dbg_bus_constraint_ops {
/* Debug Bus trigger state data */
struct dbg_bus_trigger_state_data {
- u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+ u8 msg_len;
+ u8 constraint_dword_mask;
+ u8 storm_id;
+ u8 reserved;
};
/* Debug Bus memory address */
@@ -2307,8 +2379,7 @@ struct dbg_bus_storm_data {
struct dbg_bus_data {
u32 app_version;
u8 state;
- u8 hw_dwords;
- u16 hw_id_mask;
+ u8 mode_256b_en;
u8 num_enabled_blocks;
u8 num_enabled_storms;
u8 target;
@@ -2319,67 +2390,21 @@ struct dbg_bus_data {
u8 adding_filter;
u8 filter_pre_trigger;
u8 filter_post_trigger;
- u16 reserved;
u8 trigger_en;
- struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_constraint_dword_mask;
u8 next_trigger_state;
u8 next_constraint_id;
- u8 unify_inputs;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_msg_len;
u8 rcv_from_other_engine;
+ u8 blocks_dword_mask;
+ u8 blocks_dword_overlap;
+ u32 hw_id_mask;
struct dbg_bus_pci_buf_data pci_buf;
- struct dbg_bus_block_data blocks[88];
+ struct dbg_bus_block_data blocks[132];
struct dbg_bus_storm_data storms[6];
};
-/* Debug bus filter types */
-enum dbg_bus_filter_types {
- DBG_BUS_FILTER_TYPE_OFF,
- DBG_BUS_FILTER_TYPE_PRE,
- DBG_BUS_FILTER_TYPE_POST,
- DBG_BUS_FILTER_TYPE_ON,
- MAX_DBG_BUS_FILTER_TYPES
-};
-
-/* Debug bus frame modes */
-enum dbg_bus_frame_modes {
- DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
- DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */
- DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */
- MAX_DBG_BUS_FRAME_MODES
-};
-
-/* Debug bus other engine mode */
-enum dbg_bus_other_engine_modes {
- DBG_BUS_OTHER_ENGINE_MODE_NONE,
- DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
- DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
- DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
- DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
- MAX_DBG_BUS_OTHER_ENGINE_MODES
-};
-
-/* Debug bus post-trigger recording types */
-enum dbg_bus_post_trigger_types {
- DBG_BUS_POST_TRIGGER_RECORD,
- DBG_BUS_POST_TRIGGER_DROP,
- MAX_DBG_BUS_POST_TRIGGER_TYPES
-};
-
-/* Debug bus pre-trigger recording types */
-enum dbg_bus_pre_trigger_types {
- DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
- DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
- DBG_BUS_PRE_TRIGGER_DROP,
- MAX_DBG_BUS_PRE_TRIGGER_TYPES
-};
-
-/* Debug bus SEMI frame modes */
-enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
- MAX_DBG_BUS_SEMI_FRAME_MODES
-};
-
/* Debug bus states */
enum dbg_bus_states {
DBG_BUS_STATE_IDLE,
@@ -2397,7 +2422,9 @@ enum dbg_bus_storm_modes {
DBG_BUS_STORM_MODE_DRA_W,
DBG_BUS_STORM_MODE_LD_ST_ADDR,
DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_FAST_DBGMUX,
DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_RH_WITH_STORE,
DBG_BUS_STORM_MODE_FOC,
DBG_BUS_STORM_MODE_EXT_STORE,
MAX_DBG_BUS_STORM_MODES
@@ -2438,13 +2465,13 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_CAU,
DBG_GRC_PARAM_DUMP_QM,
DBG_GRC_PARAM_DUMP_MCP,
- DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_DUMP_DORQ,
DBG_GRC_PARAM_DUMP_CFC,
DBG_GRC_PARAM_DUMP_IGU,
DBG_GRC_PARAM_DUMP_BRB,
DBG_GRC_PARAM_DUMP_BTB,
DBG_GRC_PARAM_DUMP_BMB,
- DBG_GRC_PARAM_DUMP_NIG,
+ DBG_GRC_PARAM_RESERVD1,
DBG_GRC_PARAM_DUMP_MULD,
DBG_GRC_PARAM_DUMP_PRS,
DBG_GRC_PARAM_DUMP_DMAE,
@@ -2453,8 +2480,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_DIF,
DBG_GRC_PARAM_DUMP_STATIC,
DBG_GRC_PARAM_UNSTALL,
- DBG_GRC_PARAM_NUM_LCIDS,
- DBG_GRC_PARAM_NUM_LTIDS,
+ DBG_GRC_PARAM_RESERVED2,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
DBG_GRC_PARAM_EXCLUDE_ALL,
DBG_GRC_PARAM_CRASH,
DBG_GRC_PARAM_PARITY_SAFE,
@@ -2462,22 +2489,14 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_PHY,
DBG_GRC_PARAM_NO_MCP,
DBG_GRC_PARAM_NO_FW_VER,
+ DBG_GRC_PARAM_RESERVED3,
+ DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
+ DBG_GRC_PARAM_DUMP_ILT_CDUC,
+ DBG_GRC_PARAM_DUMP_ILT_CDUT,
+ DBG_GRC_PARAM_DUMP_CAU_EXT,
MAX_DBG_GRC_PARAMS
};
-/* Debug reset registers */
-enum dbg_reset_regs {
- DBG_RESET_REG_MISCS_PL_UA,
- DBG_RESET_REG_MISCS_PL_HV,
- DBG_RESET_REG_MISCS_PL_HV_2,
- DBG_RESET_REG_MISC_PL_UA,
- DBG_RESET_REG_MISC_PL_HV,
- DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- DBG_RESET_REG_MISC_PL_PDA_VAUX,
- MAX_DBG_RESET_REGS
-};
-
/* Debug status codes */
enum dbg_status {
DBG_STATUS_OK,
@@ -2489,15 +2508,15 @@ enum dbg_status {
DBG_STATUS_INVALID_PCI_BUF_SIZE,
DBG_STATUS_PCI_BUF_ALLOC_FAILED,
DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
- DBG_STATUS_TOO_MANY_INPUTS,
- DBG_STATUS_INPUT_OVERLAP,
- DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+ DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+ DBG_STATUS_VFC_READ_ERROR,
DBG_STATUS_STORM_ALREADY_ENABLED,
DBG_STATUS_STORM_NOT_ENABLED,
DBG_STATUS_BLOCK_ALREADY_ENABLED,
DBG_STATUS_BLOCK_NOT_ENABLED,
DBG_STATUS_NO_INPUT_ENABLED,
- DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_NO_FILTER_TRIGGER_256B,
DBG_STATUS_FILTER_ALREADY_ENABLED,
DBG_STATUS_TRIGGER_ALREADY_ENABLED,
DBG_STATUS_TRIGGER_NOT_ENABLED,
@@ -2522,7 +2541,7 @@ enum dbg_status {
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_RESERVED2,
+ DBG_STATUS_RESERVED0,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2530,10 +2549,15 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_RESERVED1,
DBG_STATUS_NON_MATCHING_LINES,
- DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_INSUFFICIENT_HW_IDS,
DBG_STATUS_DBG_BUS_IN_USE,
+ DBG_STATUS_INVALID_STORM_DBG_MODE,
+ DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+ DBG_STATUS_FILTER_SINGLE_HW_ID,
+ DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+ DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
MAX_DBG_STATUS
};
@@ -2569,9 +2593,9 @@ struct dbg_tools_data {
struct dbg_bus_data bus;
struct idle_chk_data idle_chk;
u8 mode_enable[40];
- u8 block_in_reset[88];
+ u8 block_in_reset[132];
u8 chip_id;
- u8 platform_id;
+ u8 hw_type;
u8 num_ports;
u8 num_pfs_per_port;
u8 num_vfs;
@@ -2582,6 +2606,19 @@ struct dbg_tools_data {
u32 num_regs_read;
};
+/* ILT Clients */
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ MAX_ILT_CLIENTS
+};
+
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -2630,13 +2667,18 @@ struct init_nig_pri_tc_map_req {
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
+/* QM per global RL init parameters */
+struct init_qm_global_rl_params {
+ u32 rate_limit;
+};
+
/* QM per-port init parameters */
struct init_qm_port_params {
- u8 active;
- u8 active_phys_tcs;
+ u16 active_phys_tcs;
u16 num_pbf_cmd_lines;
u16 num_btb_blocks;
- u16 reserved;
+ u8 active;
+ u8 reserved;
};
/* QM per-PQ init parameters */
@@ -2645,15 +2687,14 @@ struct init_qm_pq_params {
u8 tc_id;
u8 wrr_group;
u8 rl_valid;
+ u16 rl_id;
u8 port_id;
- u8 reserved0;
- u16 reserved1;
+ u8 reserved;
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
- u32 vport_rl;
- u16 vport_wfq;
+ u16 wfq;
u16 first_tx_pq_id[NUM_OF_TCS];
};
@@ -2673,13 +2714,12 @@ struct init_qm_vport_params {
enum chip_ids {
CHIP_BB,
CHIP_K2,
- CHIP_RESERVED,
MAX_CHIP_IDS
};
struct fw_asserts_ram_section {
- u16 section_ram_line_offset;
- u16 section_ram_line_size;
+ __le16 section_ram_line_offset;
+ __le16 section_ram_line_size;
u8 list_dword_offset;
u8 list_element_dword_size;
u8 list_num_elements;
@@ -2729,6 +2769,7 @@ enum init_modes {
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_RESERVED6,
+ MODE_RESERVED7,
MAX_INIT_MODES
};
@@ -2763,9 +2804,19 @@ enum bin_init_buffer_type {
BIN_BUF_INIT_VAL,
BIN_BUF_INIT_MODE_TREE,
BIN_BUF_INIT_IRO,
+ BIN_BUF_INIT_OVERLAYS,
MAX_BIN_INIT_BUFFER_TYPE
};
+/* FW overlay buffer header */
+struct fw_overlay_buf_hdr {
+ u32 data;
+#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK 0xFF
+#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK 0xFFFFFF
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8
+};
+
/* init array header: raw */
struct init_array_raw_hdr {
u32 data;
@@ -2859,10 +2910,8 @@ struct init_if_phase_op {
u32 op_data;
#define INIT_IF_PHASE_OP_OP_MASK 0xF
#define INIT_IF_PHASE_OP_OP_SHIFT 0
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
-#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
u32 phase_data;
@@ -2991,9 +3040,11 @@ struct iro {
* @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays.
*
+ * @param p_hwfn - HW device data
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
-enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
/**
* @brief qed_read_regs - Reads registers into a buffer (using GRC).
@@ -3037,7 +3088,6 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
* - val is outside the allowed boundaries
*/
enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum dbg_grc_params grc_param, u32 val);
/**
@@ -3358,20 +3408,36 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct mcp_trace_format {
u32 data;
#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
-#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
+#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
-#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
+#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
-#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
-#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
-#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
-#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+#define MCP_TRACE_FORMAT_LEN_OFFSET 24
+
char *format_str;
};
+/* MCP Trace Meta data structure */
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
/******************************** Constants **********************************/
#define MAX_NAME_LEN 16
@@ -3382,16 +3448,20 @@ struct mcp_trace_format {
* @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
* debug arrays.
*
+ * @param p_hwfn - HW device data
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
/**
* @brief qed_dbg_alloc_user_data - Allocates user debug data.
*
* @param p_hwfn - HW device data
+ * @param user_data_ptr - OUT: a pointer to the allocated memory.
*/
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn);
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
/**
* @brief qed_dbg_get_status_str - Returns a string for the specified status.
@@ -3664,271 +3734,6 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
-/* Debug Bus blocks */
-static const u32 dbg_bus_blocks[] = {
- 0x0000000f, /* grc, bb, 15 lines */
- 0x0000000f, /* grc, k2, 15 lines */
- 0x00000000,
- 0x00000000, /* miscs, bb, 0 lines */
- 0x00000000, /* miscs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* misc, bb, 0 lines */
- 0x00000000, /* misc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* dbu, bb, 0 lines */
- 0x00000000, /* dbu, k2, 0 lines */
- 0x00000000,
- 0x000f0127, /* pglue_b, bb, 39 lines */
- 0x0036012a, /* pglue_b, k2, 42 lines */
- 0x00000000,
- 0x00000000, /* cnig, bb, 0 lines */
- 0x00120102, /* cnig, k2, 2 lines */
- 0x00000000,
- 0x00000000, /* cpmu, bb, 0 lines */
- 0x00000000, /* cpmu, k2, 0 lines */
- 0x00000000,
- 0x00000001, /* ncsi, bb, 1 lines */
- 0x00000001, /* ncsi, k2, 1 lines */
- 0x00000000,
- 0x00000000, /* opte, bb, 0 lines */
- 0x00000000, /* opte, k2, 0 lines */
- 0x00000000,
- 0x00600085, /* bmb, bb, 133 lines */
- 0x00600085, /* bmb, k2, 133 lines */
- 0x00000000,
- 0x00000000, /* pcie, bb, 0 lines */
- 0x00e50033, /* pcie, k2, 51 lines */
- 0x00000000,
- 0x00000000, /* mcp, bb, 0 lines */
- 0x00000000, /* mcp, k2, 0 lines */
- 0x00000000,
- 0x01180009, /* mcp2, bb, 9 lines */
- 0x01180009, /* mcp2, k2, 9 lines */
- 0x00000000,
- 0x01210104, /* pswhst, bb, 4 lines */
- 0x01210104, /* pswhst, k2, 4 lines */
- 0x00000000,
- 0x01250103, /* pswhst2, bb, 3 lines */
- 0x01250103, /* pswhst2, k2, 3 lines */
- 0x00000000,
- 0x00340101, /* pswrd, bb, 1 lines */
- 0x00340101, /* pswrd, k2, 1 lines */
- 0x00000000,
- 0x01280119, /* pswrd2, bb, 25 lines */
- 0x01280119, /* pswrd2, k2, 25 lines */
- 0x00000000,
- 0x01410109, /* pswwr, bb, 9 lines */
- 0x01410109, /* pswwr, k2, 9 lines */
- 0x00000000,
- 0x00000000, /* pswwr2, bb, 0 lines */
- 0x00000000, /* pswwr2, k2, 0 lines */
- 0x00000000,
- 0x001c0001, /* pswrq, bb, 1 lines */
- 0x001c0001, /* pswrq, k2, 1 lines */
- 0x00000000,
- 0x014a0015, /* pswrq2, bb, 21 lines */
- 0x014a0015, /* pswrq2, k2, 21 lines */
- 0x00000000,
- 0x00000000, /* pglcs, bb, 0 lines */
- 0x00120006, /* pglcs, k2, 6 lines */
- 0x00000000,
- 0x00100001, /* dmae, bb, 1 lines */
- 0x00100001, /* dmae, k2, 1 lines */
- 0x00000000,
- 0x015f0105, /* ptu, bb, 5 lines */
- 0x015f0105, /* ptu, k2, 5 lines */
- 0x00000000,
- 0x01640120, /* tcm, bb, 32 lines */
- 0x01640120, /* tcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* mcm, bb, 32 lines */
- 0x01640120, /* mcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* ucm, bb, 32 lines */
- 0x01640120, /* ucm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* xcm, bb, 32 lines */
- 0x01640120, /* xcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* ycm, bb, 32 lines */
- 0x01640120, /* ycm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* pcm, bb, 32 lines */
- 0x01640120, /* pcm, k2, 32 lines */
- 0x00000000,
- 0x01840062, /* qm, bb, 98 lines */
- 0x01840062, /* qm, k2, 98 lines */
- 0x00000000,
- 0x01e60021, /* tm, bb, 33 lines */
- 0x01e60021, /* tm, k2, 33 lines */
- 0x00000000,
- 0x02070107, /* dorq, bb, 7 lines */
- 0x02070107, /* dorq, k2, 7 lines */
- 0x00000000,
- 0x00600185, /* brb, bb, 133 lines */
- 0x00600185, /* brb, k2, 133 lines */
- 0x00000000,
- 0x020e0019, /* src, bb, 25 lines */
- 0x020c001a, /* src, k2, 26 lines */
- 0x00000000,
- 0x02270104, /* prs, bb, 4 lines */
- 0x02270104, /* prs, k2, 4 lines */
- 0x00000000,
- 0x022b0133, /* tsdm, bb, 51 lines */
- 0x022b0133, /* tsdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* msdm, bb, 51 lines */
- 0x022b0133, /* msdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* usdm, bb, 51 lines */
- 0x022b0133, /* usdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* xsdm, bb, 51 lines */
- 0x022b0133, /* xsdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* ysdm, bb, 51 lines */
- 0x022b0133, /* ysdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* psdm, bb, 51 lines */
- 0x022b0133, /* psdm, k2, 51 lines */
- 0x00000000,
- 0x025e010c, /* tsem, bb, 12 lines */
- 0x025e010c, /* tsem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* msem, bb, 12 lines */
- 0x025e010c, /* msem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* usem, bb, 12 lines */
- 0x025e010c, /* usem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* xsem, bb, 12 lines */
- 0x025e010c, /* xsem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* ysem, bb, 12 lines */
- 0x025e010c, /* ysem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* psem, bb, 12 lines */
- 0x025e010c, /* psem, k2, 12 lines */
- 0x00000000,
- 0x026a000d, /* rss, bb, 13 lines */
- 0x026a000d, /* rss, k2, 13 lines */
- 0x00000000,
- 0x02770106, /* tmld, bb, 6 lines */
- 0x02770106, /* tmld, k2, 6 lines */
- 0x00000000,
- 0x027d0106, /* muld, bb, 6 lines */
- 0x027d0106, /* muld, k2, 6 lines */
- 0x00000000,
- 0x02770005, /* yuld, bb, 5 lines */
- 0x02770005, /* yuld, k2, 5 lines */
- 0x00000000,
- 0x02830107, /* xyld, bb, 7 lines */
- 0x027d0107, /* xyld, k2, 7 lines */
- 0x00000000,
- 0x00000000, /* ptld, bb, 0 lines */
- 0x00000000, /* ptld, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* ypld, bb, 0 lines */
- 0x00000000, /* ypld, k2, 0 lines */
- 0x00000000,
- 0x028a010e, /* prm, bb, 14 lines */
- 0x02980110, /* prm, k2, 16 lines */
- 0x00000000,
- 0x02a8000d, /* pbf_pb1, bb, 13 lines */
- 0x02a8000d, /* pbf_pb1, k2, 13 lines */
- 0x00000000,
- 0x02a8000d, /* pbf_pb2, bb, 13 lines */
- 0x02a8000d, /* pbf_pb2, k2, 13 lines */
- 0x00000000,
- 0x02a8000d, /* rpb, bb, 13 lines */
- 0x02a8000d, /* rpb, k2, 13 lines */
- 0x00000000,
- 0x00600185, /* btb, bb, 133 lines */
- 0x00600185, /* btb, k2, 133 lines */
- 0x00000000,
- 0x02b50117, /* pbf, bb, 23 lines */
- 0x02b50117, /* pbf, k2, 23 lines */
- 0x00000000,
- 0x02cc0006, /* rdif, bb, 6 lines */
- 0x02cc0006, /* rdif, k2, 6 lines */
- 0x00000000,
- 0x02d20006, /* tdif, bb, 6 lines */
- 0x02d20006, /* tdif, k2, 6 lines */
- 0x00000000,
- 0x02d80003, /* cdu, bb, 3 lines */
- 0x02db000e, /* cdu, k2, 14 lines */
- 0x00000000,
- 0x02e9010d, /* ccfc, bb, 13 lines */
- 0x02f60117, /* ccfc, k2, 23 lines */
- 0x00000000,
- 0x02e9010d, /* tcfc, bb, 13 lines */
- 0x02f60117, /* tcfc, k2, 23 lines */
- 0x00000000,
- 0x030d0133, /* igu, bb, 51 lines */
- 0x030d0133, /* igu, k2, 51 lines */
- 0x00000000,
- 0x03400106, /* cau, bb, 6 lines */
- 0x03400106, /* cau, k2, 6 lines */
- 0x00000000,
- 0x00000000, /* rgfs, bb, 0 lines */
- 0x00000000, /* rgfs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* rgsrc, bb, 0 lines */
- 0x00000000, /* rgsrc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* tgfs, bb, 0 lines */
- 0x00000000, /* tgfs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* tgsrc, bb, 0 lines */
- 0x00000000, /* tgsrc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* umac, bb, 0 lines */
- 0x00120006, /* umac, k2, 6 lines */
- 0x00000000,
- 0x00000000, /* xmac, bb, 0 lines */
- 0x00000000, /* xmac, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* dbg, bb, 0 lines */
- 0x00000000, /* dbg, k2, 0 lines */
- 0x00000000,
- 0x0346012b, /* nig, bb, 43 lines */
- 0x0346011d, /* nig, k2, 29 lines */
- 0x00000000,
- 0x00000000, /* wol, bb, 0 lines */
- 0x001c0002, /* wol, k2, 2 lines */
- 0x00000000,
- 0x00000000, /* bmbn, bb, 0 lines */
- 0x00210008, /* bmbn, k2, 8 lines */
- 0x00000000,
- 0x00000000, /* ipc, bb, 0 lines */
- 0x00000000, /* ipc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* nwm, bb, 0 lines */
- 0x0371000b, /* nwm, k2, 11 lines */
- 0x00000000,
- 0x00000000, /* nws, bb, 0 lines */
- 0x037c0009, /* nws, k2, 9 lines */
- 0x00000000,
- 0x00000000, /* ms, bb, 0 lines */
- 0x00120004, /* ms, k2, 4 lines */
- 0x00000000,
- 0x00000000, /* phy_pcie, bb, 0 lines */
- 0x00e5001a, /* phy_pcie, k2, 26 lines */
- 0x00000000,
- 0x00000000, /* led, bb, 0 lines */
- 0x00000000, /* led, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* avs_wrap, bb, 0 lines */
- 0x00000000, /* avs_wrap, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* bar0_map, bb, 0 lines */
- 0x00000000, /* bar0_map, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* bar0_map, bb, 0 lines */
- 0x00000000, /* bar0_map, k2, 0 lines */
- 0x00000000,
-};
-
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3942,22 +3747,28 @@ static const u32 dbg_bus_blocks[] = {
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL
/* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL
/* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL
/* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL
/* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL
/* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL
+
+/* Win 12 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL
+
+/* Win 13 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
/**
* @brief qed_qm_pf_mem_size - prepare QM ILT sizes
@@ -3982,7 +3793,7 @@ struct qed_qm_common_rt_init_params {
u8 max_phys_tcs_per_port;
bool pf_rl_en;
bool pf_wfq_en;
- bool vport_rl_en;
+ bool global_rl_en;
bool vport_wfq_en;
struct init_qm_port_params *port_params;
};
@@ -4001,11 +3812,10 @@ struct qed_qm_pf_rt_init_params {
u16 start_pq;
u16 num_pf_pqs;
u16 num_vf_pqs;
- u8 start_vport;
- u8 num_vports;
+ u16 start_vport;
+ u16 num_vports;
u16 pf_wfq;
u32 pf_rl;
- u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
};
@@ -4054,22 +3864,22 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
*/
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
/**
- * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT
+ * @brief qed_init_global_rl - Initializes the rate limit of the specified
+ * rate limiter
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
- * @param link_speed - link speed in Mbps.
+ * @param rl_id - RL ID
+ * @param rate_limit - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id, u32 vport_rl, u32 link_speed);
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rl_id, u32 rate_limit);
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
@@ -4157,7 +3967,7 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
/**
* @brief qed_gft_config - Enable and configure HW for GFT
*
- * @param p_hwfn
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to enable GFT.
* @param tcp - set profile tcp packets.
@@ -4242,6 +4052,42 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS]);
+/**
+ * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
+ *
+ * @param p_hwfn - HW device data
+ * @param fw_overlay_in_buf - the input FW overlay buffer.
+ * @param buf_size - the size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
+ * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
+ *
+ * @return a pointer to the allocated overlays memory,
+ * or NULL in case of failures.
+ */
+struct phys_mem_desc *
+qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 * const fw_overlay_in_buf,
+ u32 buf_size_in_bytes);
+
+/**
+ * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
+ *
+ * @param p_hwfn - HW device data.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param fw_overlay_mem - the allocated FW overlay memory.
+ */
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem);
+
+/**
+ * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
+ *
+ * @param p_hwfn - HW device data.
+ * @param fw_overlay_mem - the allocated FW overlay memory to free.
+ */
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc *fw_overlay_mem);
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
@@ -4282,851 +4128,807 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[8].base + ((pq_id) * IRO[8].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
+
/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
+
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
+
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
+
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
+
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
+
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
+
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
/* Tstorm producers */
#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+ (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
/* Tstorm LightL2 queue statistics */
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+ (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
/* Ustorm LiteL2 queue statistics */
#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+ (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
/* Pstorm LiteL2 queue statistics */
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+ (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
/* Mstorm queue statistics */
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+ (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
- (IRO[19].base + ((queue_id) * IRO[19].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
- * mode.
+ * mode
*/
#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+ (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+ (IRO[28].base + ((queue_id) * IRO[28].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
/* Mstorm pf statistics */
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+ (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
/* Ustorm queue statistics */
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+ (IRO[30].base + ((stat_counter_id) * IRO[30].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
/* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
- (IRO[24].base + ((pf_id) * IRO[24].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[31].base + ((pf_id) * IRO[31].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[32].base + ((stat_counter_id) * IRO[32].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
/* Pstorm pf statistics */
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[26].base + ((pf_id) * IRO[26].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
/* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
- (IRO[27].base + ((eth_type_id) * IRO[27].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+ (IRO[34].base + ((eth_type_id) * IRO[34].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[36].base + ((pf_id) * IRO[36].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
- * Use eth_tstorm_rss_update_data for update.
+ * Use eth_tstorm_rss_update_data for update
*/
#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
- (IRO[30].base + ((pf_id) * IRO[30].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size)
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
/* Xstorm queue zone */
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[31].base + ((queue_id) * IRO[31].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
+ (IRO[38].base + ((queue_id) * IRO[38].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
/* Ystorm cqe producer */
#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[32].base + ((rss_id) * IRO[32].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+ (IRO[39].base + ((rss_id) * IRO[39].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
/* Ustorm cqe producer */
#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[33].base + ((rss_id) * IRO[33].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
+ (IRO[40].base + ((rss_id) * IRO[40].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
/* Ustorm grq producer */
#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[34].base + ((pf_id) * IRO[34].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
+ (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
- * BDqueue-id.
+ * BDqueue-id
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+ (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
+ ((bdq_id) * IRO[43].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+ (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
+ ((bdq_id) * IRO[44].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[45].base + ((storage_func_id) * IRO[45].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[46].base + ((storage_func_id) * IRO[46].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[47].base + ((storage_func_id) * IRO[47].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[48].base + ((storage_func_id) * IRO[48].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[42].base + ((pf_id) * IRO[42].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[49].base + ((storage_func_id) * IRO[49].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[43].base + ((pf_id) * IRO[43].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[50].base + ((storage_func_id) * IRO[50].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[44].base + ((pf_id) * IRO[44].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
+ (IRO[51].base + ((pf_id) * IRO[51].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[45].base + ((pf_id) * IRO[45].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
+ (IRO[52].base + ((pf_id) * IRO[52].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
/* Pstorm RDMA queue statistics */
#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+ (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
/* Tstorm RDMA queue statistics */
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
+ (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
/* Xstorm error level for assert */
#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[48].base + ((pf_id) * IRO[48].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
+ (IRO[55].base + ((pf_id) * IRO[55].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
/* Ystorm error level for assert */
#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[49].base + ((pf_id) * IRO[49].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
+ (IRO[56].base + ((pf_id) * IRO[56].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
/* Pstorm error level for assert */
#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[50].base + ((pf_id) * IRO[50].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
+ (IRO[57].base + ((pf_id) * IRO[57].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
/* Tstorm error level for assert */
#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[51].base + ((pf_id) * IRO[51].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
+ (IRO[58].base + ((pf_id) * IRO[58].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
/* Mstorm error level for assert */
#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[52].base + ((pf_id) * IRO[52].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
+ (IRO[59].base + ((pf_id) * IRO[59].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size)
/* Ustorm error level for assert */
#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[53].base + ((pf_id) * IRO[53].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
+ (IRO[60].base + ((pf_id) * IRO[60].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
/* Xstorm iWARP rxmit stats */
#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[54].base + ((pf_id) * IRO[54].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
+ (IRO[61].base + ((pf_id) * IRO[61].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size)
/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[55].base + ((roce_pf_id) * IRO[55].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[62].base + ((roce_pf_id) * IRO[62].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size)
/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
- (IRO[56].base + ((roce_pf_id) * IRO[56].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\
+ (IRO[63].base + ((roce_pf_id) * IRO[63].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size)
/* RoCE Error Statistics */
-#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
- (IRO[57].base + ((roce_pf_id) * IRO[57].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[64].base + ((roce_pf_id) * IRO[64].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size)
/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[58].base + ((roce_pf_id) * IRO[58].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[65].base + ((roce_pf_id) * IRO[65].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size)
/* RoCE CQEs Statistics */
-#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
- (IRO[59].base + ((roce_pf_id) * IRO[59].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
-
-static const struct iro iro_arr[60] = {
- {0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb8, 0x88, 0x0, 0x0, 0x88},
- {0x6530, 0x20, 0x0, 0x0, 0x20},
- {0xb00, 0x8, 0x0, 0x0, 0x4},
- {0xa80, 0x8, 0x0, 0x0, 0x4},
- {0x0, 0x8, 0x0, 0x0, 0x2},
- {0x80, 0x8, 0x0, 0x0, 0x4},
- {0x84, 0x8, 0x0, 0x0, 0x2},
- {0x4c48, 0x0, 0x0, 0x0, 0x78},
- {0x3e38, 0x0, 0x0, 0x0, 0x78},
- {0x3ef8, 0x0, 0x0, 0x0, 0x78},
- {0x4c40, 0x0, 0x0, 0x0, 0x78},
- {0x4998, 0x0, 0x0, 0x0, 0x78},
- {0x7f50, 0x0, 0x0, 0x0, 0x78},
- {0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x6210, 0x10, 0x0, 0x0, 0x10},
- {0xb820, 0x30, 0x0, 0x0, 0x30},
- {0xa990, 0x30, 0x0, 0x0, 0x30},
- {0x4b68, 0x80, 0x0, 0x0, 0x40},
- {0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0x53a8, 0x80, 0x4, 0x0, 0x4},
- {0xc7d0, 0x0, 0x0, 0x0, 0x4},
- {0x4ba8, 0x80, 0x0, 0x0, 0x20},
- {0x8158, 0x40, 0x0, 0x0, 0x30},
- {0xe770, 0x60, 0x0, 0x0, 0x60},
- {0x4090, 0x80, 0x0, 0x0, 0x38},
- {0xfea8, 0x78, 0x0, 0x0, 0x78},
- {0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xaf20, 0x0, 0x0, 0x0, 0xf0},
- {0xb010, 0x8, 0x0, 0x0, 0x8},
- {0xc00, 0x8, 0x0, 0x0, 0x8},
- {0x1f8, 0x8, 0x0, 0x0, 0x8},
- {0xac0, 0x8, 0x0, 0x0, 0x8},
- {0x2578, 0x8, 0x0, 0x0, 0x8},
- {0x24f8, 0x8, 0x0, 0x0, 0x8},
- {0x0, 0x8, 0x0, 0x0, 0x8},
- {0x400, 0x18, 0x8, 0x0, 0x8},
- {0xb78, 0x18, 0x8, 0x0, 0x2},
- {0xd898, 0x50, 0x0, 0x0, 0x3c},
- {0x12908, 0x18, 0x0, 0x0, 0x10},
- {0x11aa8, 0x40, 0x0, 0x0, 0x18},
- {0xa588, 0x50, 0x0, 0x0, 0x20},
- {0x8f00, 0x40, 0x0, 0x0, 0x28},
- {0x10e30, 0x18, 0x0, 0x0, 0x10},
- {0xde48, 0x48, 0x0, 0x0, 0x38},
- {0x11298, 0x20, 0x0, 0x0, 0x20},
- {0x40c8, 0x80, 0x0, 0x0, 0x10},
- {0x5048, 0x10, 0x0, 0x0, 0x10},
- {0xc748, 0x8, 0x0, 0x0, 0x1},
- {0xa928, 0x8, 0x0, 0x0, 0x1},
- {0x11a30, 0x8, 0x0, 0x0, 0x1},
- {0xf030, 0x8, 0x0, 0x0, 0x1},
- {0x13028, 0x8, 0x0, 0x0, 0x1},
- {0x12c58, 0x8, 0x0, 0x0, 0x1},
- {0xc9b8, 0x30, 0x0, 0x0, 0x10},
- {0xed90, 0x28, 0x0, 0x0, 0x28},
- {0xad20, 0x18, 0x0, 0x0, 0x18},
- {0xaea0, 0x8, 0x0, 0x0, 0x8},
- {0x13c38, 0x8, 0x0, 0x0, 0x8},
- {0x13c50, 0x18, 0x0, 0x0, 0x18},
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[66].base + ((roce_pf_id) * IRO[66].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size)
+
+/* IRO Array */
+static const u32 iro_arr[] = {
+ 0x00000000, 0x00000000, 0x00080000,
+ 0x00003288, 0x00000088, 0x00880000,
+ 0x000058e8, 0x00000020, 0x00200000,
+ 0x00000b00, 0x00000008, 0x00040000,
+ 0x00000a80, 0x00000008, 0x00040000,
+ 0x00000000, 0x00000008, 0x00020000,
+ 0x00000080, 0x00000008, 0x00040000,
+ 0x00000084, 0x00000008, 0x00020000,
+ 0x00005718, 0x00000004, 0x00040000,
+ 0x00004dd0, 0x00000000, 0x00780000,
+ 0x00003e40, 0x00000000, 0x00780000,
+ 0x00004480, 0x00000000, 0x00780000,
+ 0x00003210, 0x00000000, 0x00780000,
+ 0x00003b50, 0x00000000, 0x00780000,
+ 0x00007f58, 0x00000000, 0x00780000,
+ 0x00005f58, 0x00000000, 0x00080000,
+ 0x00007100, 0x00000000, 0x00080000,
+ 0x0000aea0, 0x00000000, 0x00080000,
+ 0x00004398, 0x00000000, 0x00080000,
+ 0x0000a5a0, 0x00000000, 0x00080000,
+ 0x0000bde8, 0x00000000, 0x00080000,
+ 0x00000020, 0x00000004, 0x00040000,
+ 0x000056c8, 0x00000010, 0x00100000,
+ 0x0000c210, 0x00000030, 0x00300000,
+ 0x0000b088, 0x00000038, 0x00380000,
+ 0x00003d20, 0x00000080, 0x00400000,
+ 0x0000bf60, 0x00000000, 0x00040000,
+ 0x00004560, 0x00040080, 0x00040000,
+ 0x000001f8, 0x00000004, 0x00040000,
+ 0x00003d60, 0x00000080, 0x00200000,
+ 0x00008960, 0x00000040, 0x00300000,
+ 0x0000e840, 0x00000060, 0x00600000,
+ 0x00004618, 0x00000080, 0x00380000,
+ 0x00010738, 0x000000c0, 0x00c00000,
+ 0x000001f8, 0x00000002, 0x00020000,
+ 0x0000a2a0, 0x00000000, 0x01080000,
+ 0x0000a3a8, 0x00000008, 0x00080000,
+ 0x000001c0, 0x00000008, 0x00080000,
+ 0x000001f8, 0x00000008, 0x00080000,
+ 0x00000ac0, 0x00000008, 0x00080000,
+ 0x00002578, 0x00000008, 0x00080000,
+ 0x000024f8, 0x00000008, 0x00080000,
+ 0x00000280, 0x00000008, 0x00080000,
+ 0x00000680, 0x00080018, 0x00080000,
+ 0x00000b78, 0x00080018, 0x00020000,
+ 0x0000c640, 0x00000050, 0x003c0000,
+ 0x00012038, 0x00000018, 0x00100000,
+ 0x00011b00, 0x00000040, 0x00180000,
+ 0x000095d0, 0x00000050, 0x00200000,
+ 0x00008b10, 0x00000040, 0x00280000,
+ 0x00011640, 0x00000018, 0x00100000,
+ 0x0000c828, 0x00000048, 0x00380000,
+ 0x00011710, 0x00000020, 0x00200000,
+ 0x00004650, 0x00000080, 0x00100000,
+ 0x00003618, 0x00000010, 0x00100000,
+ 0x0000a968, 0x00000008, 0x00010000,
+ 0x000097a0, 0x00000008, 0x00010000,
+ 0x00011990, 0x00000008, 0x00010000,
+ 0x0000f018, 0x00000008, 0x00010000,
+ 0x00012628, 0x00000008, 0x00010000,
+ 0x00011da8, 0x00000008, 0x00010000,
+ 0x0000aa78, 0x00000030, 0x00100000,
+ 0x0000d768, 0x00000028, 0x00280000,
+ 0x00009a58, 0x00000018, 0x00180000,
+ 0x00009bd8, 0x00000008, 0x00080000,
+ 0x00013a18, 0x00000008, 0x00080000,
+ 0x000126e8, 0x00000018, 0x00180000,
+ 0x0000e608, 0x00500288, 0x00100000,
+ 0x00012970, 0x00000138, 0x00280000,
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
-#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
-#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
-#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
-#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
-#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
-#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
-#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
-#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
-#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
-#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
-#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6527
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6529
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
-#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
-#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
-#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
-#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
-#define QM_REG_PTRTBLOTHER_RT_SIZE 256
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
-#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
-#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
-#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
-#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
-#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
-#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
-#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
-#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
-#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
-#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
-#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
-#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
-#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
-#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
-#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
-#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
-#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
-#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
-#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
-#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
-#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
-#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
-#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
-#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
-#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
-#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
-#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
-#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
-#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
-#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
-#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
-#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
-#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
-#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
-#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
-#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
-#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
-#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
-#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
-#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
-#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
-#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
-#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
-#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
-#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
-#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
-#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
-#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
-#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
-#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
-#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
-#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
-#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
-#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
-#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
-#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
-#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
-#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
-#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
-#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
-#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
-#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
-#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
-#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
-#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
-#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 35389
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 35405
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 35439
-#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
-#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 36209
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 37233
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 37745
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_PTRTBLTX_RT_OFFSET 38257
-#define QM_REG_PTRTBLTX_RT_SIZE 1024
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
-#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973
-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976
-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979
-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982
-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985
-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988
-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991
-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994
-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997
-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000
-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003
-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006
-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009
-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012
-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015
-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018
-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021
-
-#define RUNTIME_ARRAY_SIZE 43022
-
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_VF_ICID_BIT_SHIFT_NORM_RT_OFFSET 16
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 17
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 18
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 21
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 22
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 23
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 24
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 25
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 26
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 762
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 1498
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 5914
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 5915
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 5916
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 5917
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 5918
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 5919
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 5920
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 5921
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 5922
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 5923
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 5924
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 5925
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 5926
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 5927
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 5928
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 5929
+#define SRC_REG_FIRSTFREE_RT_OFFSET 5930
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 5932
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 5934
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 5935
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 5936
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 5937
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 5938
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 5939
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 5940
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 5941
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 5942
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 5943
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 5944
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 5945
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 5946
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 5947
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 5948
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 5949
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 5950
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 5951
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 5952
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 5953
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5954
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5955
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5956
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 5957
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 5958
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 5959
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 5960
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 5961
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 5962
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 5963
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 5964
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 5965
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 5966
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 5967
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 27967
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 27968
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 27969
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 27970
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 27971
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 27972
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 27973
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 27974
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 27975
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 27976
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 27977
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 27978
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 27979
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 28395
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 28907
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 28908
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 28909
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 28910
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 28911
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 28912
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 28913
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 28914
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 28915
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 28916
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 28917
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 28918
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 28919
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 28920
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 28921
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 28922
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 28923
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 28924
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 28925
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 28926
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 28927
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 28928
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 28929
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 28930
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 28931
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 28932
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 28933
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 28934
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 28935
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 28936
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 28937
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 28938
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 28939
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 28940
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 28941
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 28942
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 28943
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 28944
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 28945
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 28946
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 28947
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 28948
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 28949
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 28950
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 28951
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 28952
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 28953
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 28954
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 28955
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 28956
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 28957
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 28958
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 28959
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 28960
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 28961
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 28962
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 28963
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 28964
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 28965
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 28966
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 28967
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 28968
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 28969
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 28970
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 28971
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 28972
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 28973
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 28974
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 29102
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29358
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29378
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29398
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29399
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29400
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29401
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29402
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29403
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29404
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29405
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29406
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29407
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29408
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29409
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29410
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29411
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29412
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29413
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29414
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29415
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29416
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29417
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29418
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29419
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29420
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29421
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29422
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29423
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29424
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29425
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29426
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29427
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29428
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29429
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29430
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29431
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29432
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29433
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29434
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29435
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29436
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29437
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29438
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29439
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29440
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29441
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29442
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29443
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29444
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29445
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29446
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29447
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29448
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29449
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29450
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29451
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29452
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29453
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29454
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29455
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29456
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29457
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29458
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29459
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29460
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29461
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29462
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29463
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29464
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29465
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29466
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29467
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29468
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29469
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29470
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29471
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29472
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29473
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29474
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29475
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29476
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29477
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29478
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29479
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29480
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29481
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29482
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29483
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29484
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29485
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29486
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29487
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29488
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29489
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29490
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29491
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29492
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29493
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29494
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29495
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29496
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29497
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29498
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29499
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29500
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29501
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29502
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29503
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29504
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29505
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29506
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29507
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29508
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29509
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29510
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29511
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29512
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29513
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29514
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29515
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29516
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29517
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 29773
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30029
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30285
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30286
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30287
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30288
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30304
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30320
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30336
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30337
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30338
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30354
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30370
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 30530
+#define QM_REG_WFQVPENABLE_RT_OFFSET 30531
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 30532
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31044
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32068
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 32580
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_PTRTBLTX_RT_OFFSET 33092
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471
+
+#define RUNTIME_ARRAY_SIZE 34472
/* Init Callbacks */
#define DMAE_READY_CB 0
@@ -5648,9 +5450,9 @@ struct e4_eth_conn_context {
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
@@ -5680,6 +5482,16 @@ enum eth_error_code {
ETH_FILTERS_VNI_ADD_FAIL_FULL,
ETH_FILTERS_VNI_ADD_FAIL_DUP,
ETH_FILTERS_GFT_UPDATE_FAIL,
+ ETH_RX_QUEUE_FAIL_LOAD_VF_DATA,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_MAX_HOPS,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_NO_FREE_ENRTY,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_ALREADY_EXISTS,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_PCI_ERROR,
+ ETH_FILTERS_GFS_ADD_FINLER_FAIL_MAGIC_NUM_ERROR,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAX_HOPS,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_NO_MATCH_ENRTY,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_PCI_ERROR,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAGIC_NUM_ERROR,
MAX_ETH_ERROR_CODE
};
@@ -5703,6 +5515,11 @@ enum eth_event_opcode {
ETH_EVENT_RX_CREATE_GFT_ACTION,
ETH_EVENT_RX_GFT_UPDATE_FILTER,
ETH_EVENT_TX_QUEUE_UPDATE,
+ ETH_EVENT_RGFS_ADD_FILTER,
+ ETH_EVENT_RGFS_DEL_FILTER,
+ ETH_EVENT_TGFS_ADD_FILTER,
+ ETH_EVENT_TGFS_DEL_FILTER,
+ ETH_EVENT_GFS_COUNTERS_REPORT_REQUEST,
MAX_ETH_EVENT_OPCODE
};
@@ -5795,18 +5612,31 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_CREATE_GFT_ACTION,
ETH_RAMROD_GFT_UPDATE_FILTER,
ETH_RAMROD_TX_QUEUE_UPDATE,
+ ETH_RAMROD_RGFS_FILTER_ADD,
+ ETH_RAMROD_RGFS_FILTER_DEL,
+ ETH_RAMROD_TGFS_FILTER_ADD,
+ ETH_RAMROD_TGFS_FILTER_DEL,
+ ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST,
MAX_ETH_RAMROD_CMD_ID
};
/* Return code from eth sp ramrods */
struct eth_return_code {
u8 value;
-#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
-#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
-#define ETH_RETURN_CODE_RESERVED_MASK 0x3
-#define ETH_RETURN_CODE_RESERVED_SHIFT 5
-#define ETH_RETURN_CODE_RX_TX_MASK 0x1
-#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x3F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x1
+#define ETH_RETURN_CODE_RESERVED_SHIFT 6
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+/* tx destination enum */
+enum eth_tx_dst_mode_config_enum {
+ ETH_TX_DST_MODE_CONFIG_DISABLE,
+ ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD,
+ ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT,
+ MAX_ETH_TX_DST_MODE_CONFIG_ENUM
};
/* What to do in case an error occurs */
@@ -5833,8 +5663,10 @@ struct eth_tx_err_vals {
#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
-#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
-#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_SHIFT 7
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0xFF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 8
};
/* vport rss configuration data */
@@ -5864,7 +5696,6 @@ struct eth_vport_rss_config {
u8 tbl_size;
__le32 reserved2[2];
__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
-
__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
__le32 reserved3[2];
};
@@ -6066,7 +5897,7 @@ struct rx_update_gft_filter_data {
u8 inner_vlan_removal_en;
};
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for tx queue start ramrod */
struct tx_queue_start_ramrod_data {
__le16 sb_id;
u8 sb_index;
@@ -6079,16 +5910,14 @@ struct tx_queue_start_ramrod_data {
#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 2
#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 3
#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x7
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 5
u8 pxp_st_hint;
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
@@ -6144,18 +5973,22 @@ struct vport_start_ramrod_data {
__le16 default_vlan;
u8 tx_switching_en;
u8 anti_spoofing_en;
-
u8 default_vlan_en;
-
u8 handle_ptp_pkts;
u8 silent_vlan_removal_en;
u8 untagged;
struct eth_tx_err_vals tx_err_behav;
-
u8 zero_placement_offset;
u8 ctl_frame_mac_check_en;
u8 ctl_frame_ethtype_check_en;
+ u8 reserved0;
+ u8 reserved1;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
u8 wipe_inner_vlan_pri_en;
+ u8 reserved2[2];
struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
};
@@ -6715,19 +6548,6 @@ struct e4_xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi;
};
-/* GFT CAM line struct */
-struct gft_cam_line {
- __le32 camline;
-#define GFT_CAM_LINE_VALID_MASK 0x1
-#define GFT_CAM_LINE_VALID_SHIFT 0
-#define GFT_CAM_LINE_DATA_MASK 0x3FFF
-#define GFT_CAM_LINE_DATA_SHIFT 1
-#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
-#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
-#define GFT_CAM_LINE_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_RESERVED1_SHIFT 29
-};
-
/* GFT CAM line struct with fields breakout */
struct gft_cam_line_mapped {
__le32 camline;
@@ -6757,10 +6577,6 @@ struct gft_cam_line_mapped {
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
-union gft_cam_line_union {
- struct gft_cam_line cam_line;
- struct gft_cam_line_mapped cam_line_mapped;
-};
/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
@@ -7039,6 +6855,11 @@ struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[6];
+};
+
struct e4_ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 state;
@@ -7048,8 +6869,8 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
@@ -7079,29 +6900,29 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
- __le32 sq_cons;
- __le32 dif_runt_value;
+ __le32 dif_rxmit_cons;
+ __le32 dif_rxmit_prod;
__le32 sge_index;
- __le32 reg5;
+ __le32 sq_cons;
u8 byte2;
u8 byte3;
- __le16 word1;
- __le16 word2;
+ __le16 dif_write_cons;
+ __le16 dif_write_prod;
__le16 word3;
- __le32 reg6;
- __le32 reg7;
+ __le32 dif_error_buffer_address_lo;
+ __le32 dif_error_buffer_address_hi;
};
/* RDMA task context */
@@ -7112,6 +6933,8 @@ struct e4_rdma_task_context {
struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
};
@@ -7147,7 +6970,12 @@ struct rdma_create_cq_ramrod_data {
u8 pbl_log_page_size;
u8 toggle_bit;
__le16 int_timeout;
- __le16 reserved1;
+ u8 vf_id;
+ u8 flags;
+#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 0
+#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_MASK 0x7F
+#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_SHIFT 1
};
/* rdma deregister tid ramrod data */
@@ -7191,6 +7019,7 @@ enum rdma_fw_return_code {
RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
RDMA_RETURN_RESIZE_CQ_ERR,
RDMA_RETURN_NIG_DRAIN_REQ,
+ RDMA_RETURN_GENERAL_ERR,
MAX_RDMA_FW_RETURN_CODE
};
@@ -7204,7 +7033,10 @@ struct rdma_init_func_hdr {
u8 relaxed_ordering;
__le16 first_reg_srq_id;
__le32 reg_srq_base_addr;
- __le32 reserved;
+ u8 searcher_mode;
+ u8 pvrdma_mode;
+ u8 max_num_ns_log;
+ u8 reserved;
};
/* rdma function init ramrod data */
@@ -7294,16 +7126,20 @@ struct rdma_resize_cq_ramrod_data {
#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 3
u8 pbl_log_page_size;
__le16 pbl_num_pages;
__le32 max_cqes;
struct regpair pbl_addr;
struct regpair output_params_addr;
+ u8 vf_id;
+ u8 reserved1[7];
};
-/* The rdma storm context of Mstorm */
+/* The rdma SRQ context */
struct rdma_srq_context {
struct regpair temp[8];
};
@@ -7350,6 +7186,7 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
+/* The rdma XRC SRQ context */
struct rdma_xrc_srq_context {
struct regpair temp[9];
};
@@ -7531,12 +7368,12 @@ struct e4_xstorm_roce_conn_ag_ctx {
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
@@ -7860,9 +7697,9 @@ struct mstorm_roce_conn_st_ctx {
struct regpair temp[6];
};
-/* The roce storm context of Ystorm */
+/* The roce storm context of Ustorm */
struct ustorm_roce_conn_st_ctx {
- struct regpair temp[12];
+ struct regpair temp[14];
};
/* roce connection context */
@@ -7880,6 +7717,7 @@ struct e4_roce_conn_context {
struct mstorm_roce_conn_st_ctx mstorm_st_context;
struct regpair mstorm_st_padding[2];
struct ustorm_roce_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
};
/* roce cqes statistics */
@@ -7934,12 +7772,17 @@ struct roce_create_qp_req_ramrod_data {
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
u8 stats_counter_id;
- u8 reserved3[6];
+ u8 vf_id;
+ u8 vport_id;
u8 flags2;
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2
+ u8 name_space;
+ u8 reserved3[3];
__le16 regular_latency_phy_queue;
__le16 dpi;
};
@@ -7967,8 +7810,10 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x7FFF
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 17
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18
__le16 xrc_domain;
u8 max_ird;
u8 traffic_class;
@@ -7995,10 +7840,14 @@ struct roce_create_qp_resp_ramrod_data {
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
__le16 low_latency_phy_queue;
- u8 reserved2[2];
+ u8 vf_id;
+ u8 vport_id;
__le32 cq_cid;
__le16 regular_latency_phy_queue;
__le16 dpi;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved3[3];
};
/* roce DCQCN received statistics */
@@ -8032,6 +7881,8 @@ struct roce_destroy_qp_resp_output_params {
/* RoCE destroy qp responder ramrod data */
struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
+ __le32 src_qp_id;
+ __le32 reserved;
};
/* roce error statistics */
@@ -8115,8 +7966,8 @@ struct roce_modify_qp_req_ramrod_data {
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
u8 fields;
@@ -8162,8 +8013,8 @@ struct roce_modify_qp_resp_ramrod_data {
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
u8 fields;
@@ -8204,7 +8055,7 @@ struct roce_query_qp_req_ramrod_data {
/* RoCE query qp responder output params */
struct roce_query_qp_resp_output_params {
__le32 psn;
- __le32 err_flag;
+ __le32 flags;
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
@@ -8271,12 +8122,12 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
u8 flags2;
@@ -8649,8 +8500,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
u8 flags5;
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
@@ -8663,13 +8514,13 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0;
+ __le32 dif_rxmit_cnt;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
__le32 orq_prod;
__le32 reg4;
- __le32 reg5;
- __le32 reg6;
+ __le32 dif_acked_cnt;
+ __le32 dif_cnt;
__le32 reg7;
__le32 reg8;
u8 tx_cqe_error_type;
@@ -8680,7 +8531,7 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
__le16 snd_sq_cons;
__le16 conn_dpi;
__le16 force_comp_cons;
- __le32 reg9;
+ __le32 dif_rxmit_acked_cnt;
__le32 reg10;
};
@@ -8955,10 +8806,10 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -9184,10 +9035,10 @@ struct e4_xstorm_roce_resp_conn_ag_ctx {
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -9914,7 +9765,7 @@ struct mstorm_iwarp_conn_st_ctx {
/* The iwarp storm context of Ustorm */
struct ustorm_iwarp_conn_st_ctx {
- __le32 reserved[24];
+ struct regpair reserved[14];
};
/* iwarp connection context */
@@ -9932,6 +9783,7 @@ struct e4_iwarp_conn_context {
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
};
/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
@@ -9984,7 +9836,8 @@ enum iwarp_eqe_async_opcode {
struct iwarp_eqe_data_mpa_async_completion {
__le16 ulp_data_len;
- u8 reserved[6];
+ u8 rtr_type_sent;
+ u8 reserved[5];
};
struct iwarp_eqe_data_tcp_async_completion {
@@ -10009,7 +9862,7 @@ enum iwarp_eqe_sync_opcode {
/* iWARP EQE completion status */
enum iwarp_fw_return_code {
- IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
+ IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 6,
IWARP_CONN_ERROR_TCP_CONNECTION_RST,
IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT,
IWARP_CONN_ERROR_MPA_ERROR_REJECT,
@@ -10178,8 +10031,8 @@ struct iwarp_rxmit_stats_drv {
* offload ramrod.
*/
struct iwarp_tcp_offload_ramrod_data {
- struct iwarp_offload_params iwarp;
struct tcp_offload_params_opt2 tcp;
+ struct iwarp_offload_params iwarp;
};
/* iWARP MPA negotiation types */
@@ -11471,8 +11324,8 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
u8 flags3;
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
@@ -11494,8 +11347,8 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
@@ -11727,7 +11580,7 @@ struct e4_ystorm_iscsi_conn_ag_ctx {
/* The trace in the buffer */
#define MFW_TRACE_EVENTID_MASK 0x00ffff
#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
-#define MFW_TRACE_PRM_SIZE_SHIFT 16
+#define MFW_TRACE_PRM_SIZE_OFFSET 16
#define MFW_TRACE_ENTRY_SIZE 3
struct mcp_trace {
@@ -12485,6 +12338,11 @@ enum resource_id_enum {
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
RESOURCE_BDQ_E = 17,
+ RESOURCE_QCN_E = 18,
+ RESOURCE_LLH_FILTER_E = 19,
+ RESOURCE_VF_MAC_ADDR = 20,
+ RESOURCE_LL2_CQS_E = 21,
+ RESOURCE_VF_CNQS = 22,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -12675,7 +12533,10 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
@@ -13436,6 +13297,21 @@ enum nvm_image_type {
NVM_TYPE_FCOE_CFG = 0x1f,
NVM_TYPE_ETH_PHY_FW1 = 0x20,
NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_BDN = 0x22,
+ NVM_TYPE_8485X_PHY_FW = 0x23,
+ NVM_TYPE_PUB_KEY = 0x24,
+ NVM_TYPE_RECOVERY = 0x25,
+ NVM_TYPE_PLDM = 0x26,
+ NVM_TYPE_UPK1 = 0x27,
+ NVM_TYPE_UPK2 = 0x28,
+ NVM_TYPE_MASTER_KC = 0x29,
+ NVM_TYPE_BACKUP_KC = 0x2a,
+ NVM_TYPE_HW_DUMP = 0x2b,
+ NVM_TYPE_HW_DUMP_OUT = 0x2c,
+ NVM_TYPE_BIN_NVM_META = 0x30,
+ NVM_TYPE_ROM_TEST = 0xf0,
+ NVM_TYPE_88X33X0_PHY_FW = 0x31,
+ NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
NVM_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a4de9e3ef72c..4ab8cfaf63d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -393,7 +393,7 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
/* DMAE */
#define QED_DMAE_FLAGS_IS_SET(params, flag) \
- ((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag))
+ ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
@@ -408,62 +408,55 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
* 0- The source is the PCIe
* 1- The source is the GRC.
*/
- opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
- : DMAE_CMD_SRC_MASK_PCIE) <<
- DMAE_CMD_SRC_SHIFT;
- src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
- p_params->src_pfid : p_hwfn->rel_pf_id;
- opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
- DMAE_CMD_SRC_PF_ID_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_SRC,
+ (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie));
+ src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
+ p_params->src_pfid : p_hwfn->rel_pf_id;
+ SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
- opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
- : DMAE_CMD_DST_MASK_PCIE) <<
- DMAE_CMD_DST_SHIFT;
- dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
- p_params->dst_pfid : p_hwfn->rel_pf_id;
- opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
- DMAE_CMD_DST_PF_ID_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_DST,
+ (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie));
+ dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
+ p_params->dst_pfid : p_hwfn->rel_pf_id;
+ SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid);
+
/* Whether to write a completion word to the completion destination:
* 0-Do not write a completion word
* 1-Write the completion word
*/
- opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
- opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
- DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1);
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
- opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1);
- opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+ /* swapping mode 3 - big endian */
+ SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY);
- port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
- p_params->port_id : p_hwfn->port_id;
- opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT);
+ port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
+ p_params->port_id : p_hwfn->port_id;
+ SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id);
/* reset source address in next go */
- opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
- DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
/* reset dest address in next go */
- opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
- DMAE_CMD_DST_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
- if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
- opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
- opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+ if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
+ SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid);
} else {
- opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
- DMAE_CMD_SRC_VF_ID_SHIFT;
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF);
}
-
- if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
- opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
- opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
+ SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid);
} else {
- opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF);
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d6430dfebd83..2f1049b0b93a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -44,9 +44,9 @@
#define CDU_VALIDATION_DEFAULT_CFG 61
static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
- {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+ {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
+ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
+ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
@@ -61,6 +61,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
+/* Max link speed (in Mbps) */
+#define QM_MAX_LINK_SPEED 100000
+
/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
@@ -128,8 +131,6 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
-
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
(ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
@@ -140,6 +141,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+/* Returns the VOQ line credit for the specified number of PBF command lines.
+ * PBF lines are specified in 256b units.
+ */
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
@@ -178,14 +182,14 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
cmd ## _ ## field, \
value)
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \
ext_voq, wrr) \
do { \
typeof(map) __map; \
memset(&__map, 0, sizeof(__map)); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
- rl_valid); \
+ rl_valid ? 1 : 0);\
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
vp_pq_id); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
@@ -200,9 +204,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define WRITE_PQ_INFO_TO_RAM 1
#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
- ((rl_valid) << 22) | ((rl) << 24))
+ ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
+ (((rl) >> 8) << 9))
+
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+ XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id)
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -228,9 +235,6 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
STORE_RT_REG(p_hwfn,
QM_REG_RLPFVOQENABLE_RT_OFFSET,
(u32)voq_bit_mask);
- if (num_ext_voqs >= 32)
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
- (u32)(voq_bit_mask >> 32));
/* Write RL period */
STORE_RT_REG(p_hwfn,
@@ -259,12 +263,12 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
QM_WFQ_UPPER_BOUND);
}
-/* Prepare VPORT RL enable/disable runtime init values */
-static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
+/* Prepare global RL enable/disable runtime init values */
+static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
- vport_rl_en ? 1 : 0);
- if (vport_rl_en) {
+ global_rl_en ? 1 : 0);
+ if (global_rl_en) {
/* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
@@ -331,8 +335,7 @@ static void qed_cmdq_lines_rt_init(
continue;
/* Find number of command queue lines to divide between the
- * active physical TCs. In E5, 1/8 of the lines are reserved.
- * the lines for pure LB TC are subtracted.
+ * active physical TCs.
*/
phys_lines = port_params[port_id].num_pbf_cmd_lines;
phys_lines -= PBF_CMDQ_PURE_LB_LINES;
@@ -361,11 +364,30 @@ static void qed_cmdq_lines_rt_init(
ext_voq = qed_get_ext_voq(p_hwfn,
port_id,
PURE_LB_TC, max_phys_tcs_per_port);
- qed_cmdq_lines_voq_rt_init(p_hwfn,
- ext_voq, PBF_CMDQ_PURE_LB_LINES);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ PBF_CMDQ_PURE_LB_LINES);
}
}
+/* Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ * B - BTB blocks for this port
+ * C - Number of physical TCs for this port
+ * 2. Calculation:
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom.
+ * b. B = B - 38 (remainder after global headroom allocation).
+ * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space
+ * is not enabled and allocated for each TC.
+ */
static void qed_btb_blocks_rt_init(
struct qed_hwfn *p_hwfn,
u8 max_ports_per_engine,
@@ -424,6 +446,34 @@ static void qed_btb_blocks_rt_init(
}
}
+/* Prepare runtime init values for the specified RL.
+ * Set max link speed (100Gbps) per rate limiter.
+ * Return -1 on error.
+ */
+static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT;
+ u32 inc_val;
+ u16 rl_id;
+
+ /* Go over all global RLs */
+ for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
+ inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -460,18 +510,17 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
- u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
- u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u16 *p_first_tx_pq_id, vport_id_in_pf;
struct qm_rf_pq_map_e4 tx_pq_map;
- bool is_vf_pq, rl_valid;
- u16 *p_first_tx_pq_id;
+ u8 tc_id = pq_params[i].tc_id;
+ bool is_vf_pq;
+ u8 ext_voq;
ext_voq = qed_get_ext_voq(p_hwfn,
pq_params[i].port_id,
tc_id,
p_params->max_phys_tcs_per_port);
is_vf_pq = (i >= p_params->num_pf_pqs);
- rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
@@ -492,21 +541,14 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
map_val);
}
- /* Check RL ID */
- if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
- rl_valid = false;
- }
-
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
tx_pq_map,
E4,
pq_id,
- rl_valid ? 1 : 0,
*p_first_tx_pq_id,
- rl_valid ? pq_params[i].vport_id : 0,
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id,
ext_voq, pq_params[i].wrr_group);
/* Set PQ base address */
@@ -529,9 +571,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
p_params->pf_id,
tc_id,
pq_params[i].port_id,
- rl_valid ? 1 : 0,
- rl_valid ?
- pq_params[i].vport_id : 0);
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id);
qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
pq_info);
}
@@ -669,19 +710,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
* Return -1 on error.
*/
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
- u8 num_vports,
+ u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id;
+ u16 vport_pq_id, i;
u32 inc_val;
- u8 tc, i;
+ u8 tc;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].vport_wfq)
+ if (!vport_params[i].wfq)
continue;
- inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
"Invalid VPORT WFQ weight configuration\n");
@@ -706,48 +747,6 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
-/* Prepare VPORT RL runtime init values for the specified VPORTs.
- * Return -1 on error.
- */
-static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
- u8 start_vport,
- u8 num_vports,
- u32 link_speed,
- struct init_qm_vport_params *vport_params)
-{
- u8 i, vport_id;
- u32 inc_val;
-
- if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
- return -1;
- }
-
- /* Go over all PF VPORTs */
- for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
- vport_params[i].vport_rl :
- link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_VP_RL_UPPER_BOUND(link_speed) |
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
- inc_val);
- }
-
- return 0;
-}
-
static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -799,23 +798,20 @@ u32 qed_qm_pf_mem_size(u32 num_pf_cids,
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params)
{
- /* Init AFullOprtnstcCrdMask */
- u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
- (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
- (p_params->pf_wfq_en <<
- QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
- (p_params->vport_wfq_en <<
- QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
- (p_params->pf_rl_en <<
- QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
- (p_params->vport_rl_en <<
- QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
- (QM_OPPOR_FW_STOP_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
- (QM_OPPOR_PQ_EMPTY_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ u32 mask = 0;
+ /* Init AFullOprtnstcCrdMask */
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
+ QM_OPPOR_LINE_VOQ_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
+ p_params->global_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
+ SET_FIELD(mask,
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
/* Enable/disable PF RL */
@@ -824,8 +820,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
/* Enable/disable PF WFQ */
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
- /* Enable/disable VPORT RL */
- qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+ /* Enable/disable global RL */
+ qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
/* Enable/disable VPORT WFQ */
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
@@ -842,6 +838,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+ qed_global_rl_rt_init(p_hwfn);
+
return 0;
}
@@ -853,7 +851,9 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
p_params->num_tids) *
QM_OTHER_PQS_PER_PF;
- u8 tc, i;
+ u16 i;
+ u8 tc;
+
/* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
@@ -878,16 +878,10 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
- /* Set VPORT WFQ */
+ /* Init VPORT WFQ */
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
- /* Set VPORT RL */
- if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, p_params->link_speed,
- vport_params))
- return -1;
-
return 0;
}
@@ -925,18 +919,19 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
{
u16 vport_pq_id;
u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
return -1;
}
+ /* A VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
@@ -948,28 +943,20 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id, u32 vport_rl, u32 link_speed)
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
{
- u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u32 inc_val;
- if (vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
+ inc_val = QM_RL_INC_VAL(rate_limit);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
+ DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
return -1;
}
- inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- qed_wr(p_hwfn,
- p_ptt,
- QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
- qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
return 0;
}
@@ -1013,7 +1000,6 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
-
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
do { \
typeof(var) *__p_var = &(var); \
@@ -1021,8 +1007,59 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
*__p_var = (*__p_var & ~BIT(__offset)) | \
((enable) ? BIT(__offset) : 0); \
} while (0)
-#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
-#define PRS_ETH_OUTPUT_FORMAT -46832
+
+#define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
+#define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
+
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ u32 i; \
+ \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, \
+ ((addr) + (4 * i)), \
+ ((u32 *)&(arr))[i]); \
+ } while (0)
+
+/**
+ * @brief qed_dmae_to_grc - is an internal function - writes from host to
+ * wide-bus registers (split registers are not supported yet)
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param p_data - pointer to source data.
+ * @param addr - Destination register address.
+ * @param len_in_dwords - data length in DWARDS (u32)
+ */
+static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_data, u32 addr, u32 len_in_dwords)
+{
+ struct qed_dmae_params params = {};
+ int rc;
+
+ if (!p_data)
+ return -1;
+
+ /* Set DMAE params */
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+
+ /* Execute DMAE command */
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)(p_data),
+ addr, len_in_dwords, &params);
+
+ /* If not read using DMAE, read using GRC */
+ if (rc) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed writing to chip using DMAE, using GRC instead\n");
+ /* write to registers using GRC */
+ ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords);
+ }
+
+ return len_in_dwords;
+}
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
@@ -1166,8 +1203,8 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
-#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -1208,6 +1245,8 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
+ struct regpair ram_line = { };
+
/* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
@@ -1217,12 +1256,9 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
/* Zero ramline */
- qed_wr(p_hwfn,
- p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
- 0);
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
}
void qed_gft_config(struct qed_hwfn *p_hwfn,
@@ -1232,7 +1268,8 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool udp,
bool ipv4, bool ipv6, enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
+ u32 reg_val, cam_line, search_non_ip_as_gft;
+ struct regpair ram_line = { };
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
@@ -1298,35 +1335,33 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ram_line_lo = 0;
- ram_line_hi = 0;
/* Search no IP as GFT */
search_non_ip_as_gft = 0;
/* Tunnel type */
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
/* Allow tunneled traffic without inner IP */
search_non_ip_as_gft = 1;
@@ -1334,24 +1369,17 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn,
p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- ram_line_lo);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
- ram_line_hi);
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
/* Set default profile so that no filter match will happen */
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+ ram_line.lo = 0xffffffff;
+ ram_line.hi = 0x3ff;
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH,
+ sizeof(ram_line) / REG_SIZE);
/* Enable gft search */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
@@ -1544,3 +1572,144 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
}
}
+
+#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
+#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
+
+static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
+{
+ switch (storm_id) {
+ case 0:
+ return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 1:
+ return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 2:
+ return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 3:
+ return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 4:
+ return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 5:
+ return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_OVERLAY_BUF_ADDR_OFFSET;
+
+ default:
+ return 0;
+ }
+}
+
+struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 * const
+ fw_overlay_in_buf,
+ u32 buf_size_in_bytes)
+{
+ u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
+ struct phys_mem_desc *allocated_mem;
+
+ if (!buf_size)
+ return NULL;
+
+ allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!allocated_mem)
+ return NULL;
+
+ memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
+
+ /* For each Storm, set physical address in RAM */
+ while (buf_offset < buf_size) {
+ struct phys_mem_desc *storm_mem_desc;
+ struct fw_overlay_buf_hdr *hdr;
+ u32 storm_buf_size;
+ u8 storm_id;
+
+ hdr =
+ (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
+ storm_buf_size = GET_FIELD(hdr->data,
+ FW_OVERLAY_BUF_HDR_BUF_SIZE);
+ storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ storm_mem_desc = allocated_mem + storm_id;
+ storm_mem_desc->size = storm_buf_size * sizeof(u32);
+
+ /* Allocate physical memory for Storm's overlays buffer */
+ storm_mem_desc->virt_addr =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ &storm_mem_desc->phys_addr, GFP_KERNEL);
+ if (!storm_mem_desc->virt_addr)
+ break;
+
+ /* Skip overlays buffer header */
+ buf_offset += OVERLAY_HDR_SIZE_DWORDS;
+
+ /* Copy Storm's overlays buffer to allocated memory */
+ memcpy(storm_mem_desc->virt_addr,
+ &fw_overlay_in_buf[buf_offset], storm_mem_desc->size);
+
+ /* Advance to next Storm */
+ buf_offset += storm_buf_size;
+ }
+
+ /* If memory allocation has failed, free all allocated memory */
+ if (buf_offset < buf_size) {
+ qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ return NULL;
+ }
+
+ return allocated_mem;
+}
+
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ u32 ram_addr, i;
+
+ /* Skip Storms with no FW overlays */
+ if (!storm_mem_desc->virt_addr)
+ continue;
+
+ /* Calculate overlay RAM GRC address of current PF */
+ ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
+ sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
+
+ /* Write Storm's overlay physical address to RAM */
+ for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, ram_addr,
+ ((u32 *)&storm_mem_desc->phys_addr)[i]);
+ }
+}
+
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ if (!fw_overlay_mem)
+ return;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+
+ /* Free Storm's physical memory */
+ if (storm_mem_desc->virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ storm_mem_desc->virt_addr,
+ storm_mem_desc->phys_addr);
+ }
+
+ /* Free allocated virtual memory */
+ kfree(fw_overlay_mem);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index a868d7f88601..5a6e4ac4fef4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -54,15 +54,15 @@ static u32 pxp_global_win[] = {
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
- 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
- 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
- 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
- 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
- 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
- 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
- 0,
- 0,
- 0,
+ 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
+ 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
+ 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
+ 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
+ 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
0,
0,
0,
@@ -74,15 +74,6 @@ void qed_init_iro_array(struct qed_dev *cdev)
cdev->iro_arr = iro_arr;
}
-/* Runtime configuration helpers */
-void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
-{
- int i;
-
- for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
- p_hwfn->rt_data.b_valid[i] = false;
-}
-
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
@@ -106,7 +97,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
- u16 i, segment;
+ u16 i, j, segment;
int rc = 0;
/* Since not all RT entries are initialized, go over the RT and
@@ -121,6 +112,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
*/
if (!b_must_dmae) {
qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+ p_valid[i] = false;
continue;
}
@@ -135,6 +127,10 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ /* invalidate after writing */
+ for (j = i; j < i + segment; j++)
+ p_valid[j] = false;
+
/* Jump over the entire segment, including invalid entry */
i += segment;
}
@@ -215,7 +211,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
* 3. p_hwfb->temp_data,
* 4. fill_count
*/
- params.flags = QED_DMAE_FLAG_RW_REPL_SRC;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
addr, fill_count, &params);
@@ -490,10 +486,10 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
+ bool b_dmae = (phase != PHASE_ENGINE);
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
- bool b_dmae = false;
int rc = 0;
num_init_ops = cdev->fw_data->init_ops_size;
@@ -522,7 +518,6 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_IF_PHASE:
cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
- b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* qed_init_run is always invoked from
@@ -533,6 +528,9 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_CALLBACK:
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ if (phase == PHASE_ENGINE &&
+ cmd->callback.callback_id == DMAE_READY_CB)
+ b_dmae = true;
break;
}
@@ -587,5 +585,10 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
+ offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
+ fw->fw_overlays = (u32 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
+ fw->fw_overlays_len = len;
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
index 555dd086796d..e9e8ade50ed3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -81,14 +81,6 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn);
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_clear_rt_data - Clears the runtime init array.
- *
- *
- * @param p_hwfn
- */
-void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
-
-/**
* @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
*
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 5585c18053ec..7245a615517a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -204,17 +204,14 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- SET_FIELD(p_init->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
- p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
-
val = p_params->half_way_close_timeout;
p_init->half_way_close_timeout = cpu_to_le16(val);
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
- p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
- p_params->ll2_ooo_queue_id;
+ p_init->ll2_rx_queue_id =
+ p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] +
+ p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
@@ -331,12 +328,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_conn->physical_q1 = cpu_to_le16(physical_q);
p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
- SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
- p_conn->layer_code);
-
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
@@ -492,12 +484,8 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_update;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->flags = p_conn->update_flag;
p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
dval = p_conn->max_recv_pdu_length;
@@ -537,12 +525,8 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
ucval = p_conn->remote_mac[1];
((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
ucval = p_conn->remote_mac[0];
@@ -583,12 +567,8 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->abortive = p_conn->abortive_dsconnect;
DMA_REGPAIR_LE(p_ramrod->query_params_addr,
@@ -603,7 +583,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct iscsi_slow_path_hdr *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
@@ -621,11 +600,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.iscsi_empty;
- p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
- SET_FIELD(p_ramrod->flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
-
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -633,7 +607,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct iscsi_spe_func_dstry *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = 0;
@@ -651,9 +624,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.iscsi_destroy;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
-
rc = qed_spq_post(p_hwfn, p_ent, NULL);
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 65ec16a31658..d2fe61a5cf56 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -137,8 +137,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod)
{
p_ramrod->iwarp.ll2_ooo_q_index =
- RESC_START(p_hwfn, QED_LL2_QUEUE) +
- p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
+ p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
@@ -2651,6 +2651,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
+ /* SYN will use ctx based queues */
+ data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
@@ -2683,6 +2685,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
/* Start OOO connection */
data.input.conn_type = QED_LL2_TYPE_OOO;
+ /* OOO/unaligned will use legacy ll2 queues (ram based) */
+ data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
data.input.mtu = params->max_mtu;
n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 19a1a58d60f8..037e5978787e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -962,7 +962,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.core_rx_queue_start;
-
+ memset(p_ramrod, 0, sizeof(*p_ramrod));
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
@@ -996,6 +996,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->action_on_error.error_type = action_on_error;
p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
+ p_ramrod->zero_prod_flg = 1;
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1317,6 +1319,25 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
return 0;
}
+static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_acquire_data *data,
+ u8 *start_idx, u8 *last_idx)
+{
+ /* LL2 queues handles will be split as follows:
+ * First will be the legacy queues, and then the ctx based.
+ */
+ if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
+ *start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
+ *last_idx = *start_idx +
+ QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
+ } else {
+ /* QED_LL2_RX_TYPE_CTX */
+ *start_idx = QED_LL2_CTX_CONN_BASE_PF;
+ *last_idx = *start_idx +
+ QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
+ }
+}
+
static enum core_error_handle
qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
{
@@ -1337,14 +1358,16 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
struct qed_hwfn *p_hwfn = cxt;
qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
struct qed_ll2_info *p_ll2_info = NULL;
- u8 i, *p_tx_max;
+ u8 i, first_idx, last_idx, *p_tx_max;
int rc;
if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
return -EINVAL;
+ _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
+
/* Find a free connection to be used */
- for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
+ for (i = first_idx; i < last_idx; i++) {
mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
if (p_hwfn->p_ll2_info[i].b_active) {
mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
@@ -1448,6 +1471,7 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
enum qed_ll2_error_handle error_input;
enum core_error_handle error_mode;
u8 action_on_error = 0;
+ int rc;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
@@ -1461,7 +1485,18 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
error_mode = qed_ll2_get_error_choice(error_input);
SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
- return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+ rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+ if (rc)
+ return rc;
+
+ if (p_ll2_conn->rx_queue.ctx_based) {
+ rc = qed_db_recovery_add(p_hwfn->cdev,
+ p_ll2_conn->rx_queue.set_prod_addr,
+ &p_ll2_conn->rx_queue.db_data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ }
+
+ return rc;
}
static void
@@ -1475,13 +1510,41 @@ qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
}
+static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
+ u8 handle,
+ u8 ll2_queue_type)
+{
+ u8 qid;
+
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
+
+ /* QED_LL2_RX_TYPE_CTX
+ * FW distinguishes between the legacy queues (ram based) and the
+ * ctx based queues by the queue_id.
+ * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
+ * and the queue ids above that are ctx base.
+ */
+ qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
+ MAX_NUM_LL2_RX_RAM_QUEUES;
+
+ /* See comment on the acquire connection for how the ll2
+ * queues handles are divided.
+ */
+ qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
+
+ return qid;
+}
+
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
- struct qed_hwfn *p_hwfn = cxt;
- struct qed_ll2_info *p_ll2_conn;
+ struct e4_core_conn_context *p_cxt;
struct qed_ll2_tx_packet *p_pkt;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
+ struct qed_cxt_info cxt_info;
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
@@ -1539,13 +1602,46 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
if (rc)
goto out;
+ cxt_info.iid = p_ll2_conn->cid;
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_ll2_conn->cid);
+ goto out;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ memset(p_cxt, 0, sizeof(*p_cxt));
- qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
+ qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
+ p_ll2_conn->input.rx_conn_type);
p_ll2_conn->queue_id = qid;
p_ll2_conn->tx_stats_id = qid;
- p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_LL2_RX_PRODS_OFFSET(qid);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
+ p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
+
+ if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
+ p_rx->set_prod_addr = p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ } else {
+ /* QED_LL2_RX_TYPE_CTX - using doorbell */
+ p_rx->ctx_based = 1;
+
+ p_rx->set_prod_addr = p_hwfn->doorbells +
+ p_hwfn->dpi_start_offset +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
+
+ /* prepare db data */
+ p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
+ SET_FIELD(p_rx->db_data.params,
+ CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(p_rx->db_data.params,
+ CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
+ }
+
p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
qed_db_addr(p_ll2_conn->cid,
DQ_DEMS_LEGACY);
@@ -1556,7 +1652,6 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
DQ_XCM_CORE_TX_BD_PROD_CMD);
p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
goto out;
@@ -1590,7 +1685,7 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
struct qed_ll2_rx_packet *p_curp)
{
struct qed_ll2_rx_packet *p_posting_packet = NULL;
- struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
+ struct core_ll2_rx_prod rx_prod = { 0, 0 };
bool b_notify_fw = false;
u16 bd_prod, cq_prod;
@@ -1615,13 +1710,27 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
- rx_prod.bd_prod = cpu_to_le16(bd_prod);
- rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+ if (p_rx->ctx_based) {
+ /* update producer by giving a doorbell */
+ p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
+ p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
+ /* Make sure chain element is updated before ringing the
+ * doorbell
+ */
+ dma_wmb();
+ DIRECT_REG_WR64(p_rx->set_prod_addr,
+ *((u64 *)&p_rx->db_data));
+ } else {
+ rx_prod.bd_prod = cpu_to_le16(bd_prod);
+ rx_prod.cqe_prod = cpu_to_le16(cq_prod);
- /* Make sure chain element is updated before ringing the doorbell */
- dma_wmb();
+ /* Make sure chain element is updated before ringing the
+ * doorbell
+ */
+ dma_wmb();
- DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+ DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+ }
}
int qed_ll2_post_rx_buffer(void *cxt,
@@ -1965,6 +2074,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
p_ll2_conn->rx_queue.b_cb_registered = false;
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+
+ if (p_ll2_conn->rx_queue.ctx_based)
+ qed_db_recovery_del(p_hwfn->cdev,
+ p_ll2_conn->rx_queue.set_prod_addr,
+ &p_ll2_conn->rx_queue.db_data);
+
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
goto out;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 5f01fbd3c073..288642d526b7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -46,6 +46,18 @@
#include "qed_sp.h"
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
+/* LL2 queues handles will be split as follows:
+ * first will be legacy queues, and then the ctx based queues.
+ */
+#define QED_MAX_NUM_OF_LL2_CONNS_PF (4)
+#define QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF (3)
+
+#define QED_MAX_NUM_OF_CTX_LL2_CONNS_PF \
+ (QED_MAX_NUM_OF_LL2_CONNS_PF - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF)
+
+#define QED_LL2_LEGACY_CONN_BASE_PF 0
+#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
+
struct qed_ll2_rx_packet {
struct list_head list_entry;
@@ -79,6 +91,7 @@ struct qed_ll2_rx_queue {
struct qed_chain rxq_chain;
struct qed_chain rcq_chain;
u8 rx_sb_index;
+ u8 ctx_based;
bool b_cb_registered;
__le16 *p_fw_cons;
struct list_head active_descq;
@@ -86,6 +99,7 @@ struct qed_ll2_rx_queue {
struct list_head posting_descq;
struct qed_ll2_rx_packet *descq_array;
void __iomem *set_prod_addr;
+ struct core_pwm_prod_update_data db_data;
};
struct qed_ll2_tx_queue {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 38f7f40b3a4d..2c189c637cca 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -2637,7 +2637,7 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
if (!ptt)
return -EAGAIN;
- rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
+ rc = qed_dbg_grc_config(hwfn, cfg_id, val);
qed_ptt_release(hwfn, ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 36ddb89856a8..280527cc0578 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,6 +48,8 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
+#define GRCBASE_MCP 0xe00000
+
#define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
@@ -3165,6 +3167,9 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
case QED_NVM_IMAGE_FCOE_CFG:
type = NVM_TYPE_FCOE_CFG;
break;
+ case QED_NVM_IMAGE_MDUMP:
+ type = NVM_TYPE_MDUMP;
+ break;
case QED_NVM_IMAGE_NVM_CFG1:
type = NVM_TYPE_NVM_CFG1;
break;
@@ -3261,9 +3266,12 @@ static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
case QED_ILT:
mfw_res_id = RESOURCE_ILT_E;
break;
- case QED_LL2_QUEUE:
+ case QED_LL2_RAM_QUEUE:
mfw_res_id = RESOURCE_LL2_QUEUE_E;
break;
+ case QED_LL2_CTX_QUEUE:
+ mfw_res_id = RESOURCE_LL2_CQS_E;
+ break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 60f850c3bdd6..3dcb6ff58e73 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -178,6 +178,8 @@
0x008c80UL
#define MCP_REG_SCRATCH \
0xe20000UL
+#define MCP_REG_SCRATCH_SIZE \
+ 57344
#define CNIG_REG_NW_PORT_MODE_BB \
0x218200UL
#define MISCS_REG_CHIP_NUM \
@@ -212,6 +214,8 @@
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
+#define DBG_REG_TIMESTAMP_VALID_EN \
+ 0x010b58UL
#define DMAE_REG_INIT \
0x00c000UL
#define DORQ_REG_IFEN \
@@ -350,6 +354,10 @@
0x24000cUL
#define PSWRQ2_REG_ILT_MEMORY \
0x260000UL
+#define PSWRQ2_REG_ILT_MEMORY_SIZE_BB \
+ 15200
+#define PSWRQ2_REG_ILT_MEMORY_SIZE_K2 \
+ 22000
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
@@ -1453,6 +1461,8 @@
0x1401404UL
#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1401408UL
+#define XSEM_REG_DBG_GPRE_VECT \
+ 0x1401410UL
#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
@@ -1465,6 +1475,8 @@
0x1501404UL
#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1501408UL
+#define YSEM_REG_DBG_GPRE_VECT \
+ 0x1501410UL
#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
@@ -1479,6 +1491,8 @@
0x1601404UL
#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1601408UL
+#define PSEM_REG_DBG_GPRE_VECT \
+ 0x1601410UL
#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
@@ -1493,6 +1507,8 @@
0x1701404UL
#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1701408UL
+#define TSEM_REG_DBG_GPRE_VECT \
+ 0x1701410UL
#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
@@ -1507,12 +1523,16 @@
0x1801404UL
#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1801408UL
+#define MSEM_REG_DBG_GPRE_VECT \
+ 0x1801410UL
#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1901140UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+ 20480
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
@@ -1521,14 +1541,26 @@
0x1901404UL
#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1901408UL
+#define USEM_REG_DBG_GPRE_VECT \
+ 0x1901410UL
#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
+#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
+ 0x000748UL
+#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \
+ 0x00074cUL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
+ 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE \
+ 0x000740UL
#define SEM_FAST_REG_INT_RAM \
0x020000UL
#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
20480
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE \
+ 0x000768UL
#define GRC_REG_TRACE_FIFO_VALID_DATA \
0x050064UL
#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
@@ -1583,14 +1615,20 @@
0x181530UL
#define DBG_REG_DBG_BLOCK_ON \
0x010454UL
+#define DBG_REG_FILTER_ENABLE \
+ 0x0109d0UL
#define DBG_REG_FRAMING_MODE \
0x010058UL
+#define DBG_REG_TRIGGER_ENABLE \
+ 0x01054cUL
#define SEM_FAST_REG_VFC_DATA_WR \
0x000b40UL
#define SEM_FAST_REG_VFC_ADDR \
0x000b44UL
#define SEM_FAST_REG_VFC_DATA_RD \
0x000b48UL
+#define SEM_FAST_REG_VFC_STATUS \
+ 0x000b4cUL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
#define RSS_REG_RSS_RAM_DATA_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index e49fada85410..37e70562a964 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -900,7 +900,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
goto err_resp;
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
- rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 96ab77ae6af5..b7b4fbbbccfe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -120,9 +120,7 @@ union ramrod_data {
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
struct fcoe_stat_ramrod_params fcoe_stat;
- struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
- struct iscsi_spe_func_dstry iscsi_destroy;
struct iscsi_spe_conn_offload iscsi_conn_offload;
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 7e0b795230b2..900bc603e30a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -331,8 +331,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
u8 page_cnt, i;
+ int rc;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
@@ -447,7 +447,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -471,7 +471,7 @@ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EOPNOTSUPP;
+ int rc;
if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
DP_INFO(p_hwfn, "Invalid priority type %d\n",
@@ -509,7 +509,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
@@ -546,7 +546,7 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index dcb5c917f373..66876af814c4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -352,7 +352,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
/* propagate bulletin board via dmae to vm memory */
memset(&params, 0, sizeof(params));
- params.flags = QED_DMAE_FLAG_VF_DST;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vfid = p_vf->abs_vf_id;
return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
p_vf->vf_bulletin, p_vf->bulletin.size / 4,
@@ -1225,8 +1225,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
eng_vf_id = p_vf->abs_vf_id;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = QED_DMAE_FLAG_VF_DST;
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vfid = eng_vf_id;
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
@@ -4103,8 +4103,9 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
if (!vf_info)
return -EINVAL;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
params.src_vfid = vf_info->abs_vf_id;
if (qed_dmae_host2host(p_hwfn, ptt,
@@ -4354,9 +4355,9 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid, int val)
{
- struct qed_mcp_link_state *p_link;
struct qed_vf_info *vf;
u8 abs_vp_id = 0;
+ u16 rl_id;
int rc;
vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
@@ -4367,10 +4368,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
-
- return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
- p_link->speed);
+ rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
+ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
}
static int
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 004c0bfec41d..c6c20776b474 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -848,13 +848,13 @@ static void qede_tpa_start(struct qede_dev *edev,
qede_set_gro_params(edev, tpa_info->skb, cqe);
cons_buf: /* We still need to handle bd_len_list to consume buffers */
- if (likely(cqe->ext_bd_len_list[0]))
+ if (likely(cqe->bw_ext_bd_len_list[0]))
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
- le16_to_cpu(cqe->ext_bd_len_list[0]));
+ le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
- if (unlikely(cqe->ext_bd_len_list[1])) {
+ if (unlikely(cqe->bw_ext_bd_len_list[1])) {
DP_ERR(edev,
- "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+ "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
tpa_info->state = QEDE_AGG_STATE_ERROR;
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index f815435cf106..4c7f7a7fc151 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -247,6 +247,7 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
DP_ERR(edev, "One-step timestamping is not supported\n");
return -ERANGE;
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 986f26578d34..0fade19e00d4 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3602,7 +3602,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-static void ql3xxx_tx_timeout(struct net_device *ndev)
+static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index a496390b8632..07f9067affc6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
break;
}
entry += p_hdr->size;
+ cond_resched();
}
p_dev->ahw->reset.seq_index = index;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c07438db30ba..9dd6cb36f366 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -56,7 +56,7 @@ static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void qlcnic_remove(struct pci_dev *pdev);
static int qlcnic_open(struct net_device *netdev);
static int qlcnic_close(struct net_device *netdev);
-static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
@@ -3068,7 +3068,7 @@ static void qlcnic_dump_rings(struct qlcnic_adapter *adapter)
}
-static void qlcnic_tx_timeout(struct net_device *netdev)
+static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index afa10a163da1..f34ae8c75bc5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
addr += 16;
reg_read -= 16;
ret += 16;
+ cond_resched();
}
out:
mutex_unlock(&adapter->ahw->mem_lock);
@@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
+ cond_resched();
}
fw_dump->clr = 1;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 98f92268cbaa..18b0c7a2d6dc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -282,25 +282,13 @@ static int emac_close(struct net_device *netdev)
}
/* Respond to a TX hang */
-static void emac_tx_timeout(struct net_device *netdev)
+static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct emac_adapter *adpt = netdev_priv(netdev);
schedule_work(&adpt->work_thread);
}
-/* IOCTL support for the interface */
-static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
-
- if (!netdev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
-}
-
/**
* emac_update_hw_stats - read the EMAC stat registers
*
@@ -387,7 +375,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = emac_change_mtu,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_get_stats64 = emac_get_stats64,
.ndo_set_features = emac_set_features,
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index baac016f3ec0..5a3b65a6eb4f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -785,7 +785,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
}
static void
-qcaspi_netdev_tx_timeout(struct net_device *dev)
+qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qcaspi *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 0981068504fa..375a844cd27c 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -248,7 +248,7 @@ out:
return NETDEV_TX_OK;
}
-static void qcauart_netdev_tx_timeout(struct net_device *dev)
+static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qcauart *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 274e5b4bc4ac..f5ecc410ff85 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -410,7 +410,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(TM2TX, ioaddr + MTPR);
}
-static void r6040_tx_timeout(struct net_device *dev)
+static void r6040_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
@@ -498,14 +498,6 @@ static int r6040_close(struct net_device *dev)
return 0;
}
-static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static int r6040_rx(struct net_device *dev, int limit)
{
struct r6040_private *priv = netdev_priv(dev);
@@ -957,7 +949,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_rx_mode = r6040_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = r6040_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = r6040_poll_controller,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 4f910c4f67b0..60d342f82fb3 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1235,7 +1235,7 @@ static int cp_close (struct net_device *dev)
return 0;
}
-static void cp_tx_timeout(struct net_device *dev)
+static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 55d01266e615..5caeb8368eab 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -642,7 +642,7 @@ static int mdio_read (struct net_device *dev, int phy_id, int location);
static void mdio_write (struct net_device *dev, int phy_id, int location,
int val);
static void rtl8139_start_thread(struct rtl8139_private *tp);
-static void rtl8139_tx_timeout (struct net_device *dev);
+static void rtl8139_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void rtl8139_init_ring (struct net_device *dev);
static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
struct net_device *dev);
@@ -1700,7 +1700,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_bh(&tp->rx_lock);
}
-static void rtl8139_tx_timeout (struct net_device *dev)
+static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8139_private *tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index d5304bad2372..2e1d78b106b0 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ATP) += atp.o
-r8169-objs += r8169_main.o r8169_firmware.o
+r8169-objs += r8169_main.o r8169_firmware.o r8169_phy_config.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 58e0ca9093d3..9e3b35c97e63 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -204,7 +204,7 @@ static void net_rx(struct net_device *dev);
static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
static int net_close(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
/* A list of all installed ATP devices, for removing the driver module. */
@@ -533,7 +533,7 @@ static void write_packet(long ioaddr, int length, unsigned char *packet, int pad
outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
long ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
new file mode 100644
index 000000000000..22a6a057b11e
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* r8169.h: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/types.h>
+#include <linux/phy.h>
+
+enum mac_version {
+ /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
+ RTL_GIGA_MAC_VER_02,
+ RTL_GIGA_MAC_VER_03,
+ RTL_GIGA_MAC_VER_04,
+ RTL_GIGA_MAC_VER_05,
+ RTL_GIGA_MAC_VER_06,
+ RTL_GIGA_MAC_VER_07,
+ RTL_GIGA_MAC_VER_08,
+ RTL_GIGA_MAC_VER_09,
+ RTL_GIGA_MAC_VER_10,
+ RTL_GIGA_MAC_VER_11,
+ RTL_GIGA_MAC_VER_12,
+ RTL_GIGA_MAC_VER_13,
+ RTL_GIGA_MAC_VER_14,
+ RTL_GIGA_MAC_VER_15,
+ RTL_GIGA_MAC_VER_16,
+ RTL_GIGA_MAC_VER_17,
+ RTL_GIGA_MAC_VER_18,
+ RTL_GIGA_MAC_VER_19,
+ RTL_GIGA_MAC_VER_20,
+ RTL_GIGA_MAC_VER_21,
+ RTL_GIGA_MAC_VER_22,
+ RTL_GIGA_MAC_VER_23,
+ RTL_GIGA_MAC_VER_24,
+ RTL_GIGA_MAC_VER_25,
+ RTL_GIGA_MAC_VER_26,
+ RTL_GIGA_MAC_VER_27,
+ RTL_GIGA_MAC_VER_28,
+ RTL_GIGA_MAC_VER_29,
+ RTL_GIGA_MAC_VER_30,
+ RTL_GIGA_MAC_VER_31,
+ RTL_GIGA_MAC_VER_32,
+ RTL_GIGA_MAC_VER_33,
+ RTL_GIGA_MAC_VER_34,
+ RTL_GIGA_MAC_VER_35,
+ RTL_GIGA_MAC_VER_36,
+ RTL_GIGA_MAC_VER_37,
+ RTL_GIGA_MAC_VER_38,
+ RTL_GIGA_MAC_VER_39,
+ RTL_GIGA_MAC_VER_40,
+ RTL_GIGA_MAC_VER_41,
+ RTL_GIGA_MAC_VER_42,
+ RTL_GIGA_MAC_VER_43,
+ RTL_GIGA_MAC_VER_44,
+ RTL_GIGA_MAC_VER_45,
+ RTL_GIGA_MAC_VER_46,
+ RTL_GIGA_MAC_VER_47,
+ RTL_GIGA_MAC_VER_48,
+ RTL_GIGA_MAC_VER_49,
+ RTL_GIGA_MAC_VER_50,
+ RTL_GIGA_MAC_VER_51,
+ RTL_GIGA_MAC_VER_52,
+ RTL_GIGA_MAC_VER_60,
+ RTL_GIGA_MAC_VER_61,
+ RTL_GIGA_MAC_NONE
+};
+
+struct rtl8169_private;
+
+void r8169_apply_firmware(struct rtl8169_private *tp);
+u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
+u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr);
+void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
+ enum mac_version ver);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 67a4d5d45e3a..aaa316be6183 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -31,6 +31,7 @@
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include "r8169.h"
#include "r8169_firmware.h"
#define MODULENAME "r8169"
@@ -84,65 +85,6 @@
#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
-enum mac_version {
- /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
- RTL_GIGA_MAC_VER_02,
- RTL_GIGA_MAC_VER_03,
- RTL_GIGA_MAC_VER_04,
- RTL_GIGA_MAC_VER_05,
- RTL_GIGA_MAC_VER_06,
- RTL_GIGA_MAC_VER_07,
- RTL_GIGA_MAC_VER_08,
- RTL_GIGA_MAC_VER_09,
- RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
- RTL_GIGA_MAC_VER_12,
- RTL_GIGA_MAC_VER_13,
- RTL_GIGA_MAC_VER_14,
- RTL_GIGA_MAC_VER_15,
- RTL_GIGA_MAC_VER_16,
- RTL_GIGA_MAC_VER_17,
- RTL_GIGA_MAC_VER_18,
- RTL_GIGA_MAC_VER_19,
- RTL_GIGA_MAC_VER_20,
- RTL_GIGA_MAC_VER_21,
- RTL_GIGA_MAC_VER_22,
- RTL_GIGA_MAC_VER_23,
- RTL_GIGA_MAC_VER_24,
- RTL_GIGA_MAC_VER_25,
- RTL_GIGA_MAC_VER_26,
- RTL_GIGA_MAC_VER_27,
- RTL_GIGA_MAC_VER_28,
- RTL_GIGA_MAC_VER_29,
- RTL_GIGA_MAC_VER_30,
- RTL_GIGA_MAC_VER_31,
- RTL_GIGA_MAC_VER_32,
- RTL_GIGA_MAC_VER_33,
- RTL_GIGA_MAC_VER_34,
- RTL_GIGA_MAC_VER_35,
- RTL_GIGA_MAC_VER_36,
- RTL_GIGA_MAC_VER_37,
- RTL_GIGA_MAC_VER_38,
- RTL_GIGA_MAC_VER_39,
- RTL_GIGA_MAC_VER_40,
- RTL_GIGA_MAC_VER_41,
- RTL_GIGA_MAC_VER_42,
- RTL_GIGA_MAC_VER_43,
- RTL_GIGA_MAC_VER_44,
- RTL_GIGA_MAC_VER_45,
- RTL_GIGA_MAC_VER_46,
- RTL_GIGA_MAC_VER_47,
- RTL_GIGA_MAC_VER_48,
- RTL_GIGA_MAC_VER_49,
- RTL_GIGA_MAC_VER_50,
- RTL_GIGA_MAC_VER_51,
- RTL_GIGA_MAC_VER_52,
- RTL_GIGA_MAC_VER_60,
- RTL_GIGA_MAC_VER_61,
- RTL_GIGA_MAC_NONE
-};
-
-#define JUMBO_1K ETH_DATA_LEN
#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
@@ -492,6 +434,7 @@ enum rtl_register_content {
/* CPlusCmd p.31 */
EnableBist = (1 << 15), // 8168 8101
Mac_dbgo_oe = (1 << 14), // 8168 8101
+ EnAnaPLL = (1 << 14), // 8169
Normal_mode = (1 << 13), // unused
Force_half_dup = (1 << 12), // 8168 8101
Force_rxflow_en = (1 << 11), // 8168 8101
@@ -1078,52 +1021,6 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
}
}
-static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
-{
- rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
-}
-
-static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
-{
- int val;
-
- val = rtl_readphy(tp, reg_addr);
- rtl_writephy(tp, reg_addr, (val & ~m) | p);
-}
-
-static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
- int reg, u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0007);
-
- __phy_write(phydev, 0x1e, extpage);
- __phy_modify(phydev, reg, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
-static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
- u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0005);
-
- __phy_write(phydev, 0x05, parm);
- __phy_modify(phydev, 0x06, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
-static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
- u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0a43);
-
- __phy_write(phydev, 0x13, parm);
- __phy_modify(phydev, 0x14, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1372,7 +1269,7 @@ DECLARE_RTL_COND(rtl_efusear_cond)
return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
}
-static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
+u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
@@ -1596,7 +1493,7 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
if (dev->mtu > TD_MSS_MAX)
features &= ~NETIF_F_ALL_TSO;
- if (dev->mtu > JUMBO_1K &&
+ if (dev->mtu > ETH_DATA_LEN &&
tp->mac_version > RTL_GIGA_MAC_VER_06)
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
@@ -2268,22 +2165,6 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
}
}
-struct phy_reg {
- u16 reg;
- u16 val;
-};
-
-static void __rtl_writephy_batch(struct rtl8169_private *tp,
- const struct phy_reg *regs, int len)
-{
- while (len-- > 0) {
- rtl_writephy(tp, regs->reg, regs->val);
- regs++;
- }
-}
-
-#define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
-
static void rtl_release_firmware(struct rtl8169_private *tp)
{
if (tp->rtl_fw) {
@@ -2293,7 +2174,7 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
}
}
-static void rtl_apply_firmware(struct rtl8169_private *tp)
+void r8169_apply_firmware(struct rtl8169_private *tp)
{
/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
if (tp->rtl_fw)
@@ -2315,594 +2196,6 @@ static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
}
-static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
- r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
-}
-
-static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
-{
- phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
-}
-
-static void rtl8168h_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168g_config_eee_phy(tp);
-
- phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
- phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
-}
-
-static void rtl8125_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168h_config_eee_phy(tp);
-
- phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
-}
-
-static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x06, 0x006e },
- { 0x08, 0x0708 },
- { 0x15, 0x4000 },
- { 0x18, 0x65c7 },
-
- { 0x1f, 0x0001 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x0000 },
-
- { 0x03, 0xff41 },
- { 0x02, 0xdf60 },
- { 0x01, 0x0140 },
- { 0x00, 0x0077 },
- { 0x04, 0x7800 },
- { 0x04, 0x7000 },
-
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf0f9 },
- { 0x04, 0x9800 },
- { 0x04, 0x9000 },
-
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xa000 },
-
- { 0x03, 0xff41 },
- { 0x02, 0xdf20 },
- { 0x01, 0x0140 },
- { 0x00, 0x00bb },
- { 0x04, 0xb800 },
- { 0x04, 0xb000 },
-
- { 0x03, 0xdf41 },
- { 0x02, 0xdc60 },
- { 0x01, 0x6340 },
- { 0x00, 0x007d },
- { 0x04, 0xd800 },
- { 0x04, 0xd000 },
-
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x100a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0xf000 },
-
- { 0x1f, 0x0000 },
- { 0x0b, 0x0000 },
- { 0x00, 0x9200 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
-
-static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0002, 0x01, 0x90d0);
-}
-
-static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
-{
- struct pci_dev *pdev = tp->pci_dev;
-
- if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
- (pdev->subsystem_device != 0xe000))
- return;
-
- phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
-}
-
-static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x04, 0x0000 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x9000 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0xa000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x10, 0xf41b },
- { 0x14, 0xfb54 },
- { 0x18, 0xf5c7 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl8169scd_hw_phy_config_quirk(tp);
-}
-
-static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x04, 0x0000 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x9000 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0xa000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x0b, 0x8480 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x18, 0x67c7 },
- { 0x04, 0x2000 },
- { 0x03, 0x002f },
- { 0x02, 0x4360 },
- { 0x01, 0x0109 },
- { 0x00, 0x3022 },
- { 0x04, 0x2800 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
-
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0001);
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_writephy(tp, 0x10, 0xf41b);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf41b);
-}
-
-static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write(tp->phydev, 0x1d, 0x0f00);
- phy_write_paged(tp->phydev, 0x0002, 0x0c, 0x1ec8);
-}
-
-static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_set_bits(tp->phydev, 0x14, BIT(5));
- phy_set_bits(tp->phydev, 0x0d, BIT(5));
- phy_write_paged(tp->phydev, 0x0001, 0x1d, 0x3d98);
-}
-
-static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x1f, 0x0002 },
- { 0x00, 0x88d4 },
- { 0x01, 0x82b1 },
- { 0x03, 0x7002 },
- { 0x08, 0x9e30 },
- { 0x09, 0x01f0 },
- { 0x0a, 0x5500 },
- { 0x0c, 0x00c8 },
- { 0x1f, 0x0003 },
- { 0x12, 0xc096 },
- { 0x16, 0x000a },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x09, 0x2000 },
- { 0x09, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0x9000 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x7eb8 },
- { 0x06, 0x0761 },
- { 0x1f, 0x0003 },
- { 0x16, 0x0f0a },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x7eb8 },
- { 0x06, 0x5461 },
- { 0x1f, 0x0003 },
- { 0x16, 0x0f0a },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
- /* Channel Estimation */
- { 0x1f, 0x0001 },
- { 0x06, 0x4064 },
- { 0x07, 0x2863 },
- { 0x08, 0x059c },
- { 0x09, 0x26b4 },
- { 0x0a, 0x6a19 },
- { 0x0b, 0xdcc8 },
- { 0x10, 0xf06d },
- { 0x14, 0x7f68 },
- { 0x18, 0x7fd9 },
- { 0x1c, 0xf0ff },
- { 0x1d, 0x3d9c },
- { 0x1f, 0x0003 },
- { 0x12, 0xf49f },
- { 0x13, 0x070b },
- { 0x1a, 0x05ad },
- { 0x14, 0x94c0 },
-
- /*
- * Tx Error Issue
- * Enhance line driver power
- */
- { 0x1f, 0x0002 },
- { 0x06, 0x5561 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8332 },
- { 0x06, 0x5561 },
-
- /*
- * Can not link to 1Gbps with bad cable
- * Decrease SNR threshold form 21.07dB to 19.04dB
- */
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 }
-};
-
-static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
- { 0x1f, 0x0002 }
-};
-
-static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, u16 val)
-{
- u16 reg_val;
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
- reg_val = rtl_readphy(tp, 0x06);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- if (reg_val != val)
- netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
- else
- rtl_apply_firmware(tp);
-}
-
-static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
-
- /*
- * Rx Error Issue
- * Fine Tune Switching regulator parameter
- */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef);
- rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
-
- if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
-
- val = rtl_readphy(tp, 0x0d);
-
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- rtl_writephy(tp, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- rtl_writephy(tp, 0x0d, val | set[i]);
- }
- } else {
- phy_write_paged(tp->phydev, 0x0002, 0x05, 0x6662);
- r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x6662);
- }
-
- /* RSET couple improve */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_patchphy(tp, 0x0d, 0x0300);
- rtl_patchphy(tp, 0x0f, 0x0010);
-
- /* Fine tune PLL performance */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
- rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168d_apply_firmware_cond(tp, 0xbf00);
-}
-
-static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
-
- if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
-
- val = rtl_readphy(tp, 0x0d);
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- rtl_writephy(tp, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- rtl_writephy(tp, 0x0d, val | set[i]);
- }
- } else {
- phy_write_paged(tp->phydev, 0x0002, 0x05, 0x2642);
- r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x2642);
- }
-
- /* Fine tune PLL performance */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
- rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
-
- /* Switching regulator Slew rate */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_patchphy(tp, 0x0f, 0x0017);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168d_apply_firmware_cond(tp, 0xb300);
-}
-
-static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x10, 0x0008 },
- { 0x0d, 0x006c },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0001 },
- { 0x0b, 0xa4d8 },
- { 0x09, 0x281c },
- { 0x07, 0x2883 },
- { 0x0a, 0x6b35 },
- { 0x1d, 0x3da4 },
- { 0x1c, 0xeffd },
- { 0x14, 0x7f52 },
- { 0x18, 0x7fc6 },
- { 0x08, 0x0601 },
- { 0x06, 0x4063 },
- { 0x10, 0xf074 },
- { 0x1f, 0x0003 },
- { 0x13, 0x0789 },
- { 0x12, 0xf4bd },
- { 0x1a, 0x04fd },
- { 0x14, 0x84b0 },
- { 0x1f, 0x0000 },
- { 0x00, 0x9200 },
-
- { 0x1f, 0x0005 },
- { 0x01, 0x0340 },
- { 0x1f, 0x0001 },
- { 0x04, 0x4000 },
- { 0x03, 0x1d21 },
- { 0x02, 0x0c32 },
- { 0x01, 0x0200 },
- { 0x00, 0x5554 },
- { 0x04, 0x4800 },
- { 0x04, 0x4000 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0xf000 },
- { 0x1f, 0x0000 },
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- r8168d_modify_extpage(tp->phydev, 0x0023, 0x16, 0xffff, 0x0000);
-}
-
-static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0001, 0x17, 0x0cc0);
- r8168d_modify_extpage(tp->phydev, 0x002d, 0x18, 0xffff, 0x0040);
- phy_set_bits(tp->phydev, 0x0d, BIT(5));
-}
-
-static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0001 },
- { 0x0b, 0x6c20 },
- { 0x07, 0x2872 },
- { 0x1c, 0xefff },
- { 0x1f, 0x0003 },
- { 0x14, 0x6420 },
- { 0x1f, 0x0000 },
- };
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Enable Delay cap */
- r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- /* Update PFM & 10M TX idle timer */
- r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
-
- r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
-
- /* DCO enable for 10M IDLE Power */
- r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
-
- /* For impedance matching */
- phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
- r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
-
- r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
- phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
-}
-
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
{
const u16 w[] = {
@@ -2917,698 +2210,20 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
}
-static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
+u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Enable Delay cap */
- r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Green Setting */
- r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
- r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
- r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
-
- /* For 4-corner performance improve */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b80);
- rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- /* improve 10M EEE waveform */
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-
- rtl8168f_config_eee_phy(tp);
- rtl_enable_eee(tp);
-
- /* Green feature */
- rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
- rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
- rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
-}
-
-static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* For 4-corner performance improve */
- r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- /* Improve 10M EEE waveform */
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
-
- rtl8168f_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Modify green table for giga & fnet */
- r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
-
- /* Modify green table for 10M */
- r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
-
- /* Disable hiimpedance detection (RTCT) */
- phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
-
- rtl8168f_hw_phy_config(tp);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-}
-
-static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_apply_firmware(tp);
-
- rtl8168f_hw_phy_config(tp);
-}
-
-static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- rtl8168f_hw_phy_config(tp);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Modify green table for giga & fnet */
- r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
-
- /* Modify green table for 10M */
- r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
-
- /* Disable hiimpedance detection (RTCT) */
- phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
-
- /* Modify green table for giga */
- r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
- r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
- r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
- r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
-
- /* uc same-seed solution */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
-
- /* Green feature */
- rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
-{
- phy_modify_paged(tp->phydev, 0x0a43, 0x10, BIT(2), 0);
-}
-
-static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
- r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
- phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
-}
-
-static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
-{
- int ret;
-
- rtl_apply_firmware(tp);
-
- ret = phy_read_paged(tp->phydev, 0x0a46, 0x10);
- if (ret & BIT(8))
- phy_modify_paged(tp->phydev, 0x0bcc, 0x12, BIT(15), 0);
- else
- phy_modify_paged(tp->phydev, 0x0bcc, 0x12, 0, BIT(15));
-
- ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
- if (ret & BIT(8))
- phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1));
- else
- phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0);
-
- /* Enable PHY auto speed down */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* EEE auto-fallback function */
- phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(tp->phydev, 0x8012, 0x0000, 0x8000);
-
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- /* Improve SWR Efficiency */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x5065);
- rtl_writephy(tp, 0x14, 0xd065);
- rtl_writephy(tp, 0x1f, 0x0bc8);
- rtl_writephy(tp, 0x11, 0x5655);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x14, 0x9065);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_apply_firmware(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- u16 dout_tapbin;
- u32 data;
-
- rtl_apply_firmware(tp);
-
- /* CHN EST parameters adjust - giga master */
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
- r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
-
- /* CHN EST parameters adjust - giga slave */
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
- r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
-
- /* enable R-tune & PGA-retune function */
- dout_tapbin = 0;
- data = phy_read_paged(phydev, 0x0a46, 0x13);
- data &= 3;
- data <<= 2;
- dout_tapbin |= data;
- data = phy_read_paged(phydev, 0x0a46, 0x12);
- data &= 0xc000;
- data >>= 14;
- dout_tapbin |= data;
- dout_tapbin = ~(dout_tapbin^0x08);
- dout_tapbin <<= 12;
- dout_tapbin &= 0xf000;
-
- r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
-
- /* SAR ADC performance */
- phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
-
- r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
-
- /* disable phy pfm mode */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(tp);
- rtl8168h_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
-{
- u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
- struct phy_device *phydev = tp->phydev;
- u16 rlen;
- u32 data;
-
- rtl_apply_firmware(tp);
-
- /* CHIN EST parameter update */
- r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
-
- /* enable R-tune & PGA-retune function */
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
+ u16 data1, data2, ioffset;
r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
- data = r8168_mac_ocp_read(tp, 0xdd02);
- ioffset_p3 = ((data & 0x80)>>7);
- ioffset_p3 <<= 3;
-
- data = r8168_mac_ocp_read(tp, 0xdd00);
- ioffset_p3 |= ((data & (0xe000))>>13);
- ioffset_p2 = ((data & (0x1e00))>>9);
- ioffset_p1 = ((data & (0x01e0))>>5);
- ioffset_p0 = ((data & 0x0010)>>4);
- ioffset_p0 <<= 3;
- ioffset_p0 |= (data & (0x07));
- data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
-
- if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
- (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f))
- phy_write_paged(phydev, 0x0bcf, 0x16, data);
-
- /* Modify rlen (TX LPF corner frequency) level */
- data = phy_read_paged(phydev, 0x0bcd, 0x16);
- data &= 0x000f;
- rlen = 0;
- if (data > 3)
- rlen = data - 3;
- data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
- phy_write_paged(phydev, 0x0bcd, 0x17, data);
-
- /* disable phy pfm mode */
- phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* Enable PHY auto speed down */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* Enable EEE auto-fallback function */
- phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* set rg_sel_sdm_rate */
- phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* Set rg_sel_sdm_rate */
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- /* Channel estimation parameters */
- r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
- r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
- r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
- r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
- r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
- r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
- r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
- r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
- r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
- r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
- r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
- r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
- r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
- r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
- r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
- r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
- r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
- r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
- r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
- r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
-
- /* Force PWM-mode */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x5065);
- rtl_writephy(tp, 0x14, 0xd065);
- rtl_writephy(tp, 0x1f, 0x0bc8);
- rtl_writephy(tp, 0x12, 0x00ed);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x14, 0x9065);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8117_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
-
- r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
- r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
- r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
- r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
- r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
- r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
- r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
- r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
- r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
- r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
- r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
- r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
- r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
- r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
-
- r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
-
- r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
-
- rtl8168g_disable_aldps(tp);
- rtl8168h_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0003 },
- { 0x08, 0x441d },
- { 0x01, 0x9100 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_patchphy(tp, 0x11, 1 << 12);
- rtl_patchphy(tp, 0x19, 1 << 13);
- rtl_patchphy(tp, 0x10, 1 << 15);
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
-
-static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
-{
- /* Disable ALDPS before ram code */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(100);
-
- rtl_apply_firmware(tp);
-
- phy_write_paged(tp->phydev, 0x0005, 0x1a, 0x0000);
- phy_write_paged(tp->phydev, 0x0004, 0x1c, 0x0000);
- phy_write_paged(tp->phydev, 0x0001, 0x15, 0x7701);
-}
-
-static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
-{
- /* Disable ALDPS before setting firmware */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(20);
-
- rtl_apply_firmware(tp);
-
- /* EEE setting */
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x10, 0x401f);
- rtl_writephy(tp, 0x19, 0x7030);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0004 },
- { 0x10, 0xc07f },
- { 0x19, 0x7030 },
- { 0x1f, 0x0000 }
- };
-
- /* Disable ALDPS before ram code */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(100);
-
- rtl_apply_firmware(tp);
-
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
-}
-
-static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
- phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
- phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
- phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
- phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
-
- r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
- r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
- r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
- r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
- r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
- r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
- r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
- r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
- r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
- r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
- r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
-
- phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
-
- phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
- phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
- phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
-
- rtl8125_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int i;
+ data1 = r8168_mac_ocp_read(tp, 0xdd02);
+ data2 = r8168_mac_ocp_read(tp, 0xdd00);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000);
- phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002);
- phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044);
- phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000);
- phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000);
- phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002);
- phy_write_paged(phydev, 0xad4, 0x16, 0x00a8);
- phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
- phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
-
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80a2);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x16, 0x809c);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x1f, 0x0000);
-
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x81B3);
- phy_write(phydev, 0x14, 0x0043);
- phy_write(phydev, 0x14, 0x00A7);
- phy_write(phydev, 0x14, 0x00D6);
- phy_write(phydev, 0x14, 0x00EC);
- phy_write(phydev, 0x14, 0x00F6);
- phy_write(phydev, 0x14, 0x00FB);
- phy_write(phydev, 0x14, 0x00FD);
- phy_write(phydev, 0x14, 0x00FF);
- phy_write(phydev, 0x14, 0x00BB);
- phy_write(phydev, 0x14, 0x0058);
- phy_write(phydev, 0x14, 0x0029);
- phy_write(phydev, 0x14, 0x0013);
- phy_write(phydev, 0x14, 0x0009);
- phy_write(phydev, 0x14, 0x0004);
- phy_write(phydev, 0x14, 0x0002);
- for (i = 0; i < 25; i++)
- phy_write(phydev, 0x14, 0x0000);
- phy_write(phydev, 0x1f, 0x0000);
-
- r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
- r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
-
- rtl_apply_firmware(tp);
-
- phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
-
- r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
-
- phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
- phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
- phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
-
- rtl8125_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl_hw_phy_config(struct net_device *dev)
-{
- static const rtl_generic_fct phy_configs[] = {
- /* PCI devices. */
- [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
- [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
- [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
- [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
- [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
- /* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
- [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_13] = NULL,
- [RTL_GIGA_MAC_VER_14] = NULL,
- [RTL_GIGA_MAC_VER_15] = NULL,
- [RTL_GIGA_MAC_VER_16] = NULL,
- [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
- [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
- [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
- [RTL_GIGA_MAC_VER_31] = NULL,
- [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
- [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
- [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
- [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_41] = NULL,
- [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
- };
- struct rtl8169_private *tp = netdev_priv(dev);
+ ioffset = (data2 >> 1) & 0x7ff8;
+ ioffset |= data2 & 0x0007;
+ if (data1 & BIT(7))
+ ioffset |= BIT(15);
- if (phy_configs[tp->mac_version])
- phy_configs[tp->mac_version](tp);
+ return ioffset;
}
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
@@ -3617,21 +2232,28 @@ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
schedule_work(&tp->wk.work);
}
-static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
+static void rtl8169_init_phy(struct rtl8169_private *tp)
{
- rtl_hw_phy_config(dev);
+ r8169_hw_phy_config(tp, tp->phydev, tp->mac_version);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
- netif_dbg(tp, drv, dev,
- "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ /* set undocumented MAC Reg C+CR Offset 0x82h */
RTL_W8(tp, 0x82, 0x01);
}
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05 &&
+ tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE &&
+ tp->pci_dev->subsystem_device == 0xe000)
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
+
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
+ if (rtl_supports_eee(tp))
+ rtl_enable_eee(tp);
+
genphy_soft_reset(tp->phydev);
}
@@ -3675,16 +2297,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -ENODEV;
-
- return phy_mii_ioctl(tp->phydev, ifr, cmd);
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -4710,9 +3322,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
- rtl_writephy(tp, 0x1f, 0x0c42);
- rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
- rtl_writephy(tp, 0x1f, 0x0000);
+ rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
if (rg_saw_cnt > 0) {
u16 sw_cnt_1ms_ini;
@@ -4887,7 +3497,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
/* firmware is for MAC only */
- rtl_apply_firmware(tp);
+ r8169_apply_firmware(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -4991,6 +3601,9 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
+ /* disable EEE */
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5005,6 +3618,11 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
+
+ /* disable EEE */
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+
rtl_pcie_state_l2l3_disable(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5222,11 +3840,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
tp->cp_cmd |= PCIMulRW;
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- netif_dbg(tp, drv, tp->dev,
- "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
- tp->cp_cmd |= (1 << 14);
- }
+ tp->mac_version == RTL_GIGA_MAC_VER_03)
+ tp->cp_cmd |= EnAnaPLL;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
@@ -5435,7 +4050,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
netif_wake_queue(dev);
}
-static void rtl8169_tx_timeout(struct net_device *dev)
+static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -6240,7 +4855,7 @@ static int rtl_open(struct net_device *dev)
napi_enable(&tp->napi);
- rtl8169_init_phy(dev, tp);
+ rtl8169_init_phy(tp);
rtl_pll_power_up(tp);
@@ -6371,7 +4986,7 @@ static void __rtl8169_resume(struct net_device *dev)
netif_device_attach(dev);
rtl_pll_power_up(tp);
- rtl8169_init_phy(dev, tp);
+ rtl8169_init_phy(tp);
phy_start(tp->phydev);
@@ -6542,7 +5157,7 @@ static const struct net_device_ops rtl_netdev_ops = {
.ndo_fix_features = rtl8169_fix_features,
.ndo_set_features = rtl8169_set_features,
.ndo_set_mac_address = rtl_set_mac_address,
- .ndo_do_ioctl = rtl8169_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = rtl_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8169_netpoll,
@@ -6744,7 +5359,7 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
{
/* Non-GBit versions don't support jumbo frames */
if (!tp->supports_gmii)
- return JUMBO_1K;
+ return 0;
switch (tp->mac_version) {
/* RTL8169 */
@@ -6825,6 +5440,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int chipset, region;
int jumbo_max, rc;
+ /* Some tools for creating an initramfs don't consider softdeps, then
+ * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
+ * PHY driver is used that doesn't work with most chip versions.
+ */
+ if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
+ dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
+ return -ENOENT;
+ }
+
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
return -ENOMEM;
@@ -6966,10 +5590,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
- /* MTU range: 60 - hw-specific max */
- dev->min_mtu = ETH_ZLEN;
jumbo_max = rtl_jumbo_max(tp);
- dev->max_mtu = jumbo_max;
+ if (jumbo_max)
+ dev->max_mtu = jumbo_max;
rtl_set_irq_mask(tp);
@@ -6999,7 +5622,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
pci_irq_vector(pdev, 0));
- if (jumbo_max > JUMBO_1K)
+ if (jumbo_max)
netif_info(tp, probe, dev,
"jumbo features [frames: %d bytes, tx checksumming: %s]\n",
jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
new file mode 100644
index 000000000000..e367e77c773b
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -0,0 +1,1307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/delay.h>
+#include <linux/phy.h>
+
+#include "r8169.h"
+
+typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp,
+ struct phy_device *phydev);
+
+static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
+ int reg, u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0007);
+
+ __phy_write(phydev, 0x1e, extpage);
+ __phy_modify(phydev, reg, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0005);
+
+ __phy_write(phydev, 0x05, parm);
+ __phy_modify(phydev, 0x06, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0a43);
+
+ __phy_write(phydev, 0x13, parm);
+ __phy_modify(phydev, 0x14, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+struct phy_reg {
+ u16 reg;
+ u16 val;
+};
+
+static void __rtl_writephy_batch(struct phy_device *phydev,
+ const struct phy_reg *regs, int len)
+{
+ phy_lock_mdio_bus(phydev);
+
+ while (len-- > 0) {
+ __phy_write(phydev, regs->reg, regs->val);
+ regs++;
+ }
+
+ phy_unlock_mdio_bus(phydev);
+}
+
+#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a))
+
+static void rtl8168f_config_eee_phy(struct phy_device *phydev)
+{
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
+ r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
+}
+
+static void rtl8168g_config_eee_phy(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4));
+}
+
+static void rtl8168h_config_eee_phy(struct phy_device *phydev)
+{
+ rtl8168g_config_eee_phy(phydev);
+
+ phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
+ phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
+}
+
+static void rtl8125_config_eee_phy(struct phy_device *phydev)
+{
+ rtl8168h_config_eee_phy(phydev);
+
+ phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
+}
+
+static void rtl8169s_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x06, 0x006e },
+ { 0x08, 0x0708 },
+ { 0x15, 0x4000 },
+ { 0x18, 0x65c7 },
+
+ { 0x1f, 0x0001 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x0000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf60 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x0077 },
+ { 0x04, 0x7800 },
+ { 0x04, 0x7000 },
+
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf0f9 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xa000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x00bb },
+ { 0x04, 0xb800 },
+ { 0x04, 0xb000 },
+
+ { 0x03, 0xdf41 },
+ { 0x02, 0xdc60 },
+ { 0x01, 0x6340 },
+ { 0x00, 0x007d },
+ { 0x04, 0xd800 },
+ { 0x04, 0xd000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x100a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+
+ { 0x1f, 0x0000 },
+ { 0x0b, 0x0000 },
+ { 0x00, 0x9200 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0002, 0x01, 0x90d0);
+}
+
+static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x10, 0xf41b },
+ { 0x14, 0xfb54 },
+ { 0x18, 0xf5c7 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x8480 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x18, 0x67c7 },
+ { 0x04, 0x2000 },
+ { 0x03, 0x002f },
+ { 0x02, 0x4360 },
+ { 0x01, 0x0109 },
+ { 0x00, 0x3022 },
+ { 0x04, 0x2800 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write(phydev, 0x1f, 0x0001);
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_write(phydev, 0x10, 0xf41b);
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0001, 0x10, 0xf41b);
+}
+
+static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write(phydev, 0x1d, 0x0f00);
+ phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8);
+}
+
+static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+ phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98);
+}
+
+static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1f, 0x0002 },
+ { 0x00, 0x88d4 },
+ { 0x01, 0x82b1 },
+ { 0x03, 0x7002 },
+ { 0x08, 0x9e30 },
+ { 0x09, 0x01f0 },
+ { 0x0a, 0x5500 },
+ { 0x0c, 0x00c8 },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xc096 },
+ { 0x16, 0x000a },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+ { 0x09, 0x2000 },
+ { 0x09, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x0761 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x5461 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
+
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+};
+
+static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+ { 0x1f, 0x0002 }
+};
+
+static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
+ struct phy_device *phydev,
+ u16 val)
+{
+ u16 reg_val;
+
+ phy_write(phydev, 0x1f, 0x0005);
+ phy_write(phydev, 0x05, 0x001b);
+ reg_val = phy_read(phydev, 0x06);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ if (reg_val != val)
+ phydev_warn(phydev, "chipset not ready for firmware\n");
+ else
+ r8169_apply_firmware(tp);
+}
+
+static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
+
+ /*
+ * Rx Error Issue
+ * Fine Tune Switching regulator parameter
+ */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x0b, 0x00ef, 0x0010);
+ phy_modify(phydev, 0x0c, 0x5d00, 0xa200);
+
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
+ int val;
+
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
+
+ val = phy_read(phydev, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+ } else {
+ phy_write_paged(phydev, 0x0002, 0x05, 0x6662);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662);
+ }
+
+ /* RSET couple improve */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_set_bits(phydev, 0x0d, 0x0300);
+ phy_set_bits(phydev, 0x0f, 0x0010);
+
+ /* Fine tune PLL performance */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x02, 0x0600, 0x0100);
+ phy_clear_bits(phydev, 0x03, 0xe000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00);
+}
+
+static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
+
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
+ int val;
+
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
+
+ val = phy_read(phydev, 0x0d);
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+ } else {
+ phy_write_paged(phydev, 0x0002, 0x05, 0x2642);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642);
+ }
+
+ /* Fine tune PLL performance */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x02, 0x0600, 0x0100);
+ phy_clear_bits(phydev, 0x03, 0xe000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* Switching regulator Slew rate */
+ phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017);
+
+ rtl8168d_apply_firmware_cond(tp, phydev, 0xb300);
+}
+
+static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x10, 0x0008 },
+ { 0x0d, 0x006c },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0xa4d8 },
+ { 0x09, 0x281c },
+ { 0x07, 0x2883 },
+ { 0x0a, 0x6b35 },
+ { 0x1d, 0x3da4 },
+ { 0x1c, 0xeffd },
+ { 0x14, 0x7f52 },
+ { 0x18, 0x7fc6 },
+ { 0x08, 0x0601 },
+ { 0x06, 0x4063 },
+ { 0x10, 0xf074 },
+ { 0x1f, 0x0003 },
+ { 0x13, 0x0789 },
+ { 0x12, 0xf4bd },
+ { 0x1a, 0x04fd },
+ { 0x14, 0x84b0 },
+ { 0x1f, 0x0000 },
+ { 0x00, 0x9200 },
+
+ { 0x1f, 0x0005 },
+ { 0x01, 0x0340 },
+ { 0x1f, 0x0001 },
+ { 0x04, 0x4000 },
+ { 0x03, 0x1d21 },
+ { 0x02, 0x0c32 },
+ { 0x01, 0x0200 },
+ { 0x00, 0x5554 },
+ { 0x04, 0x4800 },
+ { 0x04, 0x4000 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+ { 0x1f, 0x0000 },
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+ r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000);
+}
+
+static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040);
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x6c20 },
+ { 0x07, 0x2872 },
+ { 0x1c, 0xefff },
+ { 0x1f, 0x0003 },
+ { 0x14, 0x6420 },
+ { 0x1f, 0x0000 },
+ };
+
+ r8169_apply_firmware(tp);
+
+ /* Enable Delay cap */
+ r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ /* Update PFM & 10M TX idle timer */
+ r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
+
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
+ /* DCO enable for 10M IDLE Power */
+ r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
+
+ /* For impedance matching */
+ phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+ r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
+
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
+ phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
+}
+
+static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ /* Enable Delay cap */
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Green Setting */
+ r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
+ r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
+ r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
+
+ /* For 4-corner performance improve */
+ phy_write(phydev, 0x1f, 0x0005);
+ phy_write(phydev, 0x05, 0x8b80);
+ phy_set_bits(phydev, 0x17, 0x0006);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ /* improve 10M EEE waveform */
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+
+ rtl8168f_config_eee_phy(phydev);
+
+ /* Green feature */
+ phy_write(phydev, 0x1f, 0x0003);
+ phy_set_bits(phydev, 0x19, BIT(0));
+ phy_set_bits(phydev, 0x10, BIT(10));
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8));
+}
+
+static void rtl8168f_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* For 4-corner performance improve */
+ r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ /* Improve 10M EEE waveform */
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+
+ rtl8168f_config_eee_phy(phydev);
+}
+
+static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+}
+
+static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+}
+
+static void rtl8411_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
+
+ /* Modify green table for giga */
+ r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
+ r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
+
+ /* uc same-seed solution */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
+
+ /* Green feature */
+ phy_write(phydev, 0x1f, 0x0003);
+ phy_clear_bits(phydev, 0x19, BIT(0));
+ phy_clear_bits(phydev, 0x10, BIT(10));
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8168g_disable_aldps(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0);
+}
+
+static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
+ r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
+}
+
+static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ r8169_apply_firmware(tp);
+
+ ret = phy_read_paged(phydev, 0x0a46, 0x10);
+ if (ret & BIT(8))
+ phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0);
+ else
+ phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15));
+
+ ret = phy_read_paged(phydev, 0x0a46, 0x13);
+ if (ret & BIT(8))
+ phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1));
+ else
+ phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0);
+
+ /* Enable PHY auto speed down */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* EEE auto-fallback function */
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ /* Improve SWR Efficiency */
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x5065);
+ phy_write(phydev, 0x14, 0xd065);
+ phy_write(phydev, 0x1f, 0x0bc8);
+ phy_write(phydev, 0x11, 0x5655);
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x14, 0x9065);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ u16 dout_tapbin;
+ u32 data;
+
+ r8169_apply_firmware(tp);
+
+ /* CHN EST parameters adjust - giga master */
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
+ r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
+
+ /* CHN EST parameters adjust - giga slave */
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
+ r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
+
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
+
+ /* enable R-tune & PGA-retune function */
+ dout_tapbin = 0;
+ data = phy_read_paged(phydev, 0x0a46, 0x13);
+ data &= 3;
+ data <<= 2;
+ dout_tapbin |= data;
+ data = phy_read_paged(phydev, 0x0a46, 0x12);
+ data &= 0xc000;
+ data >>= 14;
+ dout_tapbin |= data;
+ dout_tapbin = ~(dout_tapbin ^ 0x08);
+ dout_tapbin <<= 12;
+ dout_tapbin &= 0xf000;
+
+ r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ /* SAR ADC performance */
+ phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
+
+ r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
+
+ /* disable phy pfm mode */
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168h_config_eee_phy(phydev);
+}
+
+static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ u16 ioffset, rlen;
+ u32 data;
+
+ r8169_apply_firmware(tp);
+
+ /* CHIN EST parameter update */
+ r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
+
+ /* enable R-tune & PGA-retune function */
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ ioffset = rtl8168h_2_get_adc_bias_ioffset(tp);
+ if (ioffset != 0xffff)
+ phy_write_paged(phydev, 0x0bcf, 0x16, ioffset);
+
+ /* Modify rlen (TX LPF corner frequency) level */
+ data = phy_read_paged(phydev, 0x0bcd, 0x16);
+ data &= 0x000f;
+ rlen = 0;
+ if (data > 3)
+ rlen = data - 3;
+ data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12);
+ phy_write_paged(phydev, 0x0bcd, 0x17, data);
+
+ /* disable phy pfm mode */
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Enable PHY auto speed down */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* Enable EEE auto-fallback function */
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ /* set rg_sel_sdm_rate */
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ /* Set rg_sel_sdm_rate */
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ /* Channel estimation parameters */
+ r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
+ r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
+ r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
+ r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
+ r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
+ r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
+ r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
+ r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
+ r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
+ r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
+ r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
+ r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
+ r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
+ r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
+ r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
+ r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
+ r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
+
+ /* Force PWM-mode */
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x5065);
+ phy_write(phydev, 0x14, 0xd065);
+ phy_write(phydev, 0x1f, 0x0bc8);
+ phy_write(phydev, 0x12, 0x00ed);
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x14, 0x9065);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8117_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
+
+ r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
+ r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
+ r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
+ r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
+ r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
+ r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
+ r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
+ r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
+ r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
+ r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
+
+ r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168h_config_eee_phy(phydev);
+}
+
+static void rtl8102e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0003 },
+ { 0x08, 0x441d },
+ { 0x01, 0x9100 },
+ { 0x1f, 0x0000 }
+ };
+
+ phy_set_bits(phydev, 0x11, BIT(12));
+ phy_set_bits(phydev, 0x19, BIT(13));
+ phy_set_bits(phydev, 0x10, BIT(15));
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8105e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Disable ALDPS before ram code */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(100);
+
+ r8169_apply_firmware(tp);
+
+ phy_write_paged(phydev, 0x0005, 0x1a, 0x0000);
+ phy_write_paged(phydev, 0x0004, 0x1c, 0x0000);
+ phy_write_paged(phydev, 0x0001, 0x15, 0x7701);
+}
+
+static void rtl8402_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Disable ALDPS before setting firmware */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(20);
+
+ r8169_apply_firmware(tp);
+
+ /* EEE setting */
+ phy_write(phydev, 0x1f, 0x0004);
+ phy_write(phydev, 0x10, 0x401f);
+ phy_write(phydev, 0x19, 0x7030);
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8106e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0004 },
+ { 0x10, 0xc07f },
+ { 0x19, 0x7030 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(100);
+
+ r8169_apply_firmware(tp);
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
+ phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
+ phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
+ phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
+
+ r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
+ r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
+ r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
+ r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
+ r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
+ r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
+ r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
+ r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
+ r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
+ r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
+ r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
+
+ phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
+ r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
+
+ phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
+ phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
+ phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(phydev);
+}
+
+static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ int i;
+
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000);
+ phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002);
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044);
+ phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000);
+ phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000);
+ phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002);
+ phy_write_paged(phydev, 0xad4, 0x16, 0x00a8);
+ phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
+
+ phy_write(phydev, 0x1f, 0x0b87);
+ phy_write(phydev, 0x16, 0x80a2);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x16, 0x809c);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x81B3);
+ phy_write(phydev, 0x14, 0x0043);
+ phy_write(phydev, 0x14, 0x00A7);
+ phy_write(phydev, 0x14, 0x00D6);
+ phy_write(phydev, 0x14, 0x00EC);
+ phy_write(phydev, 0x14, 0x00F6);
+ phy_write(phydev, 0x14, 0x00FB);
+ phy_write(phydev, 0x14, 0x00FD);
+ phy_write(phydev, 0x14, 0x00FF);
+ phy_write(phydev, 0x14, 0x00BB);
+ phy_write(phydev, 0x14, 0x0058);
+ phy_write(phydev, 0x14, 0x0029);
+ phy_write(phydev, 0x14, 0x0013);
+ phy_write(phydev, 0x14, 0x0009);
+ phy_write(phydev, 0x14, 0x0004);
+ phy_write(phydev, 0x14, 0x0002);
+ for (i = 0; i < 25; i++)
+ phy_write(phydev, 0x14, 0x0000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
+ r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
+
+ r8169_apply_firmware(tp);
+
+ phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
+
+ r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
+
+ phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
+ phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
+ phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(phydev);
+}
+
+void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
+ enum mac_version ver)
+{
+ static const rtl_phy_cfg_fct phy_configs[] = {
+ /* PCI devices. */
+ [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
+ [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
+ /* PCI-E devices. */
+ [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_10] = NULL,
+ [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_13] = NULL,
+ [RTL_GIGA_MAC_VER_14] = NULL,
+ [RTL_GIGA_MAC_VER_15] = NULL,
+ [RTL_GIGA_MAC_VER_16] = NULL,
+ [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_31] = NULL,
+ [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
+ [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
+ [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_41] = NULL,
+ [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
+ [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
+ };
+
+ if (phy_configs[ver])
+ phy_configs[ver](tp, phydev);
+}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4b13a184bfc7..067ad25553b9 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1425,7 +1425,7 @@ out_napi_off:
}
/* Timeout function for Ethernet AVB */
-static void ravb_tx_timeout(struct net_device *ndev)
+static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ravb_private *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e19b49c4013e..58ca126518a2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
if (cd->tsu) {
add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
- add_tsu_reg(TSU_FWEN0);
- add_tsu_reg(TSU_FWEN1);
- add_tsu_reg(TSU_FCM);
- add_tsu_reg(TSU_BSYSL0);
- add_tsu_reg(TSU_BSYSL1);
- add_tsu_reg(TSU_PRISL0);
- add_tsu_reg(TSU_PRISL1);
- add_tsu_reg(TSU_FWSL0);
- add_tsu_reg(TSU_FWSL1);
+ if (cd->dual_port) {
+ add_tsu_reg(TSU_FWEN0);
+ add_tsu_reg(TSU_FWEN1);
+ add_tsu_reg(TSU_FCM);
+ add_tsu_reg(TSU_BSYSL0);
+ add_tsu_reg(TSU_BSYSL1);
+ add_tsu_reg(TSU_PRISL0);
+ add_tsu_reg(TSU_PRISL1);
+ add_tsu_reg(TSU_FWSL0);
+ add_tsu_reg(TSU_FWSL1);
+ }
add_tsu_reg(TSU_FWSLC);
- add_tsu_reg(TSU_QTAGM0);
- add_tsu_reg(TSU_QTAGM1);
- add_tsu_reg(TSU_FWSR);
- add_tsu_reg(TSU_FWINMK);
- add_tsu_reg(TSU_ADQT0);
- add_tsu_reg(TSU_ADQT1);
- add_tsu_reg(TSU_VTAG0);
- add_tsu_reg(TSU_VTAG1);
+ if (cd->dual_port) {
+ add_tsu_reg(TSU_QTAGM0);
+ add_tsu_reg(TSU_QTAGM1);
+ add_tsu_reg(TSU_FWSR);
+ add_tsu_reg(TSU_FWINMK);
+ add_tsu_reg(TSU_ADQT0);
+ add_tsu_reg(TSU_ADQT1);
+ add_tsu_reg(TSU_VTAG0);
+ add_tsu_reg(TSU_VTAG1);
+ }
add_tsu_reg(TSU_ADSBSY);
add_tsu_reg(TSU_TEN);
add_tsu_reg(TSU_POST1);
@@ -2478,7 +2482,7 @@ out_napi_off:
}
/* Timeout function */
-static void sh_eth_tx_timeout(struct net_device *ndev)
+static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
@@ -2643,20 +2647,6 @@ static int sh_eth_close(struct net_device *ndev)
return 0;
}
-/* ioctl to device function */
-static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
{
if (netif_running(ndev))
@@ -3155,7 +3145,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_get_stats = sh_eth_get_stats,
.ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -3171,7 +3161,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index bc4f951315da..7585cd2270ba 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2159,7 +2159,7 @@ static void rocker_router_fib_event_work(struct work_struct *work)
/* Protect internal structures from changes */
rtnl_lock();
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
err = rocker_world_fib4_add(rocker, &fib_work->fen_info);
if (err)
rocker_world_fib4_abort(rocker);
@@ -2201,7 +2201,7 @@ static int rocker_router_fib_event(struct notifier_block *nb,
fib_work->event = event;
switch (event) {
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_DEL:
if (info->family == AF_INET) {
struct fib_entry_notifier_info *fen_info = ptr;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 52ed111d98f4..c705743d69f7 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1572,7 +1572,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void sxgbe_tx_timeout(struct net_device *dev)
+static void sxgbe_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -1939,9 +1939,7 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!dev->phydev)
- return -EINVAL;
- ret = phy_mii_ioctl(dev->phydev, rq, cmd);
+ ret = phy_do_ioctl(dev, rq, cmd);
break;
default:
break;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 632a7c85964d..128ee7cda1ed 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -79,7 +79,7 @@ static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
-static void ether3_timeout(struct net_device *dev);
+static void ether3_timeout(struct net_device *dev, unsigned int txqueue);
#define BUS_16 2
#define BUS_8 1
@@ -450,7 +450,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
}
-static void ether3_timeout(struct net_device *dev)
+static void ether3_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 276c7cae7cee..8507ff242014 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -645,7 +645,7 @@ sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void timeout(struct net_device *dev)
+static void timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 5f36774bf4b8..ea5a9220196c 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -21,8 +21,6 @@ config SFC
depends on PCI
select MDIO
select CRC32
- select I2C
- select I2C_ALGOBIT
imply PTP_1588_CLOCK
---help---
This driver supports 10/40-gigabit Ethernet cards based on
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index c5c297e78d06..87d093da22ca 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-sfc-y += efx.o nic.o farch.o siena.o ef10.o tx.o rx.o \
- selftest.o ethtool.o ptp.o tx_tso.o \
- mcdi.o mcdi_port.o mcdi_mon.o
+sfc-y += efx.o efx_common.o efx_channels.o nic.o \
+ farch.o siena.o ef10.o \
+ tx.o tx_common.o tx_tso.o rx.o rx_common.o \
+ selftest.o ethtool.o ethtool_common.o ptp.o \
+ mcdi.o mcdi_port.o mcdi_port_common.o \
+ mcdi_functions.o mcdi_filters.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 4d9bbccc6f89..52113b7529d6 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -5,11 +5,15 @@
*/
#include "net_driver.h"
+#include "rx_common.h"
#include "ef10_regs.h"
#include "io.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
+#include "mcdi_functions.h"
#include "nic.h"
+#include "mcdi_filters.h"
#include "workarounds.h"
#include "selftest.h"
#include "ef10_sriov.h"
@@ -25,28 +29,6 @@ enum {
EFX_EF10_TEST = 1,
EFX_EF10_REFILL,
};
-/* The maximum size of a shared RSS context */
-/* TODO: this should really be from the mcdi protocol export */
-#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
-
-/* The filter table(s) are managed by firmware and we have write-only
- * access. When removing filters we must identify them to the
- * firmware by a 64-bit handle, but this is too wide for Linux kernel
- * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
- * be able to tell in advance whether a requested insertion will
- * replace an existing filter. Therefore we maintain a software hash
- * table, which should be at least as large as the hardware hash
- * table.
- *
- * Huntington has a single 8K filter table shared between all filter
- * types and both ports.
- */
-#define HUNT_FILTER_TBL_ROWS 8192
-
-#define EFX_EF10_FILTER_ID_INVALID 0xffff
-
-#define EFX_EF10_FILTER_DEV_UC_MAX 32
-#define EFX_EF10_FILTER_DEV_MC_MAX 256
/* VLAN list entry */
struct efx_ef10_vlan {
@@ -54,95 +36,8 @@ struct efx_ef10_vlan {
u16 vid;
};
-enum efx_ef10_default_filters {
- EFX_EF10_BCAST,
- EFX_EF10_UCDEF,
- EFX_EF10_MCDEF,
- EFX_EF10_VXLAN4_UCDEF,
- EFX_EF10_VXLAN4_MCDEF,
- EFX_EF10_VXLAN6_UCDEF,
- EFX_EF10_VXLAN6_MCDEF,
- EFX_EF10_NVGRE4_UCDEF,
- EFX_EF10_NVGRE4_MCDEF,
- EFX_EF10_NVGRE6_UCDEF,
- EFX_EF10_NVGRE6_MCDEF,
- EFX_EF10_GENEVE4_UCDEF,
- EFX_EF10_GENEVE4_MCDEF,
- EFX_EF10_GENEVE6_UCDEF,
- EFX_EF10_GENEVE6_MCDEF,
-
- EFX_EF10_NUM_DEFAULT_FILTERS
-};
-
-/* Per-VLAN filters information */
-struct efx_ef10_filter_vlan {
- struct list_head list;
- u16 vid;
- u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
- u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
- u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
-};
-
-struct efx_ef10_dev_addr {
- u8 addr[ETH_ALEN];
-};
-
-struct efx_ef10_filter_table {
-/* The MCDI match masks supported by this fw & hw, in order of priority */
- u32 rx_match_mcdi_flags[
- MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
- unsigned int rx_match_count;
-
- struct rw_semaphore lock; /* Protects entries */
- struct {
- unsigned long spec; /* pointer to spec plus flag bits */
-/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
-/* unused flag 1UL */
-#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
-#define EFX_EF10_FILTER_FLAGS 3UL
- u64 handle; /* firmware handle */
- } *entry;
-/* Shadow of net_device address lists, guarded by mac_lock */
- struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
- struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
- int dev_uc_count;
- int dev_mc_count;
- bool uc_promisc;
- bool mc_promisc;
-/* Whether in multicast promiscuous mode when last changed */
- bool mc_promisc_last;
- bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
- bool vlan_filter;
- struct list_head vlan_list;
-};
-
-/* An arbitrary search limit for the software hash table */
-#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-
-static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
-static void efx_ef10_filter_table_remove(struct efx_nic *efx);
-static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
-static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan);
-static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
-static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
-{
- WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
- return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
-}
-
-static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
-{
- return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
-}
-
-static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
-{
- return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
-}
-
static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
{
efx_dword_t reg;
@@ -185,24 +80,6 @@ static bool efx_ef10_is_vf(struct efx_nic *efx)
return efx->type->is_vf;
}
-static int efx_ef10_get_pf_index(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
- sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < sizeof(outbuf))
- return -EIO;
-
- nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
- return 0;
-}
-
#ifdef CONFIG_SFC_SRIOV
static int efx_ef10_get_vf_index(struct efx_nic *efx)
{
@@ -273,24 +150,9 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
u8 vi_window_mode = MCDI_BYTE(outbuf,
GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
- switch (vi_window_mode) {
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
- efx->vi_stride = 8192;
- break;
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
- efx->vi_stride = 16384;
- break;
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
- efx->vi_stride = 65536;
- break;
- default:
- netif_err(efx, probe, efx->net_dev,
- "Unrecognised VI window mode %d\n",
- vi_window_mode);
- return -EIO;
- }
- netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
- efx->vi_stride);
+ rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
+ if (rc)
+ return rc;
} else {
/* keep default VI stride */
netif_dbg(efx, probe, efx->net_dev,
@@ -576,7 +438,7 @@ static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
if (efx->filter_state) {
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
if (rc)
@@ -605,7 +467,7 @@ static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
if (efx->filter_state) {
down_write(&efx->filter_sem);
- efx_ef10_filter_del_vlan(efx, vlan->vid);
+ efx_mcdi_filter_del_vlan(efx, vlan->vid);
up_write(&efx->filter_sem);
}
@@ -689,7 +551,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
}
nic_data->warm_boot_count = rc;
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
@@ -725,7 +587,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail4;
- rc = efx_ef10_get_pf_index(efx);
+ rc = efx_get_pf_index(efx, &nic_data->pf_index);
if (rc)
goto fail5;
@@ -831,22 +693,6 @@ fail1:
return rc;
}
-static int efx_ef10_free_vis(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF_ERR(outbuf);
- size_t outlen;
- int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
-
- /* -EALREADY means nothing to free, so ignore */
- if (rc == -EALREADY)
- rc = 0;
- if (rc)
- efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
- rc);
- return rc;
-}
-
#ifdef EFX_USE_PIO
static void efx_ef10_free_piobufs(struct efx_nic *efx)
@@ -1084,12 +930,12 @@ static void efx_ef10_remove(struct efx_nic *efx)
efx_mcdi_mon_remove(efx);
- efx_ef10_rx_free_indir_table(efx);
+ efx_mcdi_rx_free_indir_table(efx);
if (nic_data->wc_membase)
iounmap(nic_data->wc_membase);
- rc = efx_ef10_free_vis(efx);
+ rc = efx_mcdi_free_vis(efx);
WARN_ON(rc != 0);
if (!nic_data->must_restore_piobufs)
@@ -1260,28 +1106,10 @@ static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
static int efx_ef10_alloc_vis(struct efx_nic *efx,
unsigned int min_vis, unsigned int max_vis)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
- MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
- rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
- return -EIO;
- netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
- MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
-
- nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
- nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
- return 0;
+ return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base,
+ &nic_data->n_allocated_vis);
}
/* Note that the failure path of this function does not free
@@ -1363,7 +1191,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
}
/* In case the last attached driver failed to free VIs, do it now */
- rc = efx_ef10_free_vis(efx);
+ rc = efx_mcdi_free_vis(efx);
if (rc != 0)
return rc;
@@ -1384,7 +1212,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
efx->max_tx_channels =
nic_data->n_allocated_vis / EFX_TXQ_TYPES;
- efx_ef10_free_vis(efx);
+ efx_mcdi_free_vis(efx);
return -EAGAIN;
}
@@ -1401,7 +1229,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
}
/* Shrink the original UC mapping of the memory BAR */
- membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+ membase = ioremap(efx->membase_phys, uc_mem_map_size);
if (!membase) {
netif_err(efx, probe, efx->net_dev,
"could not shrink memory BAR to %x\n",
@@ -1490,7 +1318,7 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
return 0;
}
-static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
#ifdef CONFIG_SFC_SRIOV
@@ -1503,7 +1331,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
@@ -1571,7 +1399,7 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
*/
if ((reset_type == RESET_TYPE_ALL ||
reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
- efx_ef10_reset_mc_allocations(efx);
+ efx_ef10_table_reset_mc_allocations(efx);
return rc;
}
@@ -2187,7 +2015,7 @@ static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
/* All our allocations have been reset */
- efx_ef10_reset_mc_allocations(efx);
+ efx_ef10_table_reset_mc_allocations(efx);
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
@@ -2408,20 +2236,15 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
- EFX_BUF_SIZE));
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_nic_data *nic_data;
bool tso_v2 = false;
- size_t inlen;
- dma_addr_t dma_addr;
efx_qword_t *txd;
int rc;
- int i;
- BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+
+ nic_data = efx->nic_data;
/* Only attempt to enable TX timestamping if we have the license for it,
* otherwise TXQ init will fail
@@ -2448,51 +2271,9 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
channel->channel);
}
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
-
- dma_addr = tx_queue->txd.buf.dma_addr;
-
- netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
- tx_queue->queue, entries, (u64)dma_addr);
-
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
-
- do {
- MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
- /* This flag was removed from mcdi_pcol.h for
- * the non-_EXT version of INIT_TXQ. However,
- * firmware still honours it.
- */
- INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
- INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
- INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
- INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
- tx_queue->timestamping);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
- NULL, 0, NULL);
- if (rc == -ENOSPC && tso_v2) {
- /* Retry without TSOv2 if we're short on contexts. */
- tso_v2 = false;
- netif_warn(efx, probe, efx->net_dev,
- "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
- } else if (rc) {
- efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
- MC_CMD_INIT_TXQ_EXT_IN_LEN,
- NULL, 0, rc);
- goto fail;
- }
- } while (rc);
+ rc = efx_mcdi_tx_init(tx_queue, tso_v2);
+ if (rc)
+ goto fail;
/* A previous user of this TX queue might have set us up the
* bomb by writing a descriptor to the TX push collector but
@@ -2530,35 +2311,6 @@ fail:
tx_queue->queue);
}
-static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = tx_queue->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
- tx_queue->queue);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
-static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
-{
- efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
-}
-
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
@@ -2637,527 +2389,6 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
-#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
- 1 << RSS_MODE_HASH_DST_ADDR_LBN)
-#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
- 1 << RSS_MODE_HASH_DST_PORT_LBN)
-#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
- (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
- (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
-
-static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
-{
- /* Firmware had a bug (sfc bug 61952) where it would not actually
- * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
- * This meant that it would always contain whatever was previously
- * in the MCDI buffer. Fortunately, all firmware versions with
- * this bug have the same default flags value for a newly-allocated
- * RSS context, and the only time we want to get the flags is just
- * after allocating. Moreover, the response has a 32-bit hole
- * where the context ID would be in the request, so we can use an
- * overlength buffer in the request and pre-fill the flags field
- * with what we believe the default to be. Thus if the firmware
- * has the bug, it will leave our pre-filled value in the flags
- * field of the response, and we will get the right answer.
- *
- * However, this does mean that this function should NOT be used if
- * the RSS context flags might not be their defaults - it is ONLY
- * reliably correct for a newly-allocated RSS context.
- */
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
- size_t outlen;
- int rc;
-
- /* Check we have a hole for the context ID */
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
- RSS_CONTEXT_FLAGS_DEFAULT);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
- sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
- if (rc == 0) {
- if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
- rc = -EIO;
- else
- *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
- }
- return rc;
-}
-
-/* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
- * If we fail, we just leave the RSS context at its default hash settings,
- * which is safe but may slightly reduce performance.
- * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
- * just need to set the UDP ports flags (for both IP versions).
- */
-static void efx_ef10_set_rss_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
- u32 flags;
-
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
-
- if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0)
- return;
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
- flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
- if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
- NULL, 0, NULL))
- /* Succeeded, so UDP 4-tuple is now enabled */
- ctx->rx_hash_udp_4tuple = true;
-}
-
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive,
- struct efx_rss_context *ctx,
- unsigned *context_size)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
- u32 alloc_type = exclusive ?
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
- unsigned rss_spread = exclusive ?
- efx->rss_spread :
- min(rounddown_pow_of_two(efx->rss_spread),
- EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
-
- if (!exclusive && rss_spread == 1) {
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- if (context_size)
- *context_size = 1;
- return 0;
- }
-
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
- return -EOPNOTSUPP;
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
- return -EIO;
-
- ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
-
- if (context_size)
- *context_size = rss_spread;
-
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
- efx_ef10_set_rss_flags(efx, ctx);
-
- return 0;
-}
-
-static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
- context);
- return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
- const u32 *rx_indir_table, const u8 *key)
-{
- MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
- MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
- int i, rc;
-
- MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
- context);
- BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
- MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
-
- /* This iterates over the length of efx->rss_context.rx_indir_table, but
- * copies bytes from rx_indir_table. That's because the latter is a
- * pointer rather than an array, but should have the same length.
- * The efx->rss_context.rx_hash_key loop below is similar.
- */
- for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
- MCDI_PTR(tablebuf,
- RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
- (u8) rx_indir_table[i];
-
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
- sizeof(tablebuf), NULL, 0, NULL);
- if (rc != 0)
- return rc;
-
- MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
- context);
- BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
- MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
- for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
- MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
-
- return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
- sizeof(keybuf), NULL, 0, NULL);
-}
-
-static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
-{
- int rc;
-
- if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id);
- WARN_ON(rc != 0);
- }
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
-}
-
-static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
- unsigned *context_size)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context,
- context_size);
-
- if (rc != 0)
- return rc;
-
- nic_data->rx_rss_context_exclusive = false;
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
- return 0;
-}
-
-static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- u32 old_rx_rss_context = efx->rss_context.context_id;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc;
-
- if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID ||
- !nic_data->rx_rss_context_exclusive) {
- rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context,
- NULL);
- if (rc == -EOPNOTSUPP)
- return rc;
- else if (rc != 0)
- goto fail1;
- }
-
- rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id,
- rx_indir_table, key);
- if (rc != 0)
- goto fail2;
-
- if (efx->rss_context.context_id != old_rx_rss_context &&
- old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
- WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0);
- nic_data->rx_rss_context_exclusive = true;
- if (rx_indir_table != efx->rss_context.rx_indir_table)
- memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- if (key != efx->rss_context.rx_hash_key)
- memcpy(efx->rss_context.rx_hash_key, key,
- efx->type->rx_hash_key_size);
-
- return 0;
-
-fail2:
- if (old_rx_rss_context != efx->rss_context.context_id) {
- WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0);
- efx->rss_context.context_id = old_rx_rss_context;
- }
-fail1:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
- if (rc)
- return rc;
- }
-
- if (!rx_indir_table) /* Delete this context */
- return efx_ef10_free_rss_context(efx, ctx->context_id);
-
- rc = efx_ef10_populate_rss_table(efx, ctx->context_id,
- rx_indir_table, key);
- if (rc)
- return rc;
-
- memcpy(ctx->rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
-
- return 0;
-}
-
-static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
- MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
- MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
- size_t outlen;
- int rc, i;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
- MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
-
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
- return -ENOENT;
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
- MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
- tablebuf, sizeof(tablebuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
- return -EIO;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
- RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
- MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
- keybuf, sizeof(keybuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
- return -EIO;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
- ctx->rx_hash_key[i] = MCDI_PTR(
- keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
-
- return 0;
-}
-
-static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
-{
- int rc;
-
- mutex_lock(&efx->rss_lock);
- rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
-static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_rss_context *ctx;
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- if (!nic_data->must_restore_rss_contexts)
- return;
-
- list_for_each_entry(ctx, &efx->rss_context.list, list) {
- /* previous NIC RSS context is gone */
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- /* so try to allocate a new one */
- rc = efx_ef10_rx_push_rss_context_config(efx, ctx,
- ctx->rx_indir_table,
- ctx->rx_hash_key);
- if (rc)
- netif_warn(efx, probe, efx->net_dev,
- "failed to restore RSS context %u, rc=%d"
- "; RSS filters may fail to be applied\n",
- ctx->user_id, rc);
- }
- nic_data->must_restore_rss_contexts = false;
-}
-
-static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- int rc;
-
- if (efx->rss_spread == 1)
- return 0;
-
- if (!key)
- key = efx->rss_context.rx_hash_key;
-
- rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
-
- if (rc == -ENOBUFS && !user) {
- unsigned context_size;
- bool mismatch = false;
- size_t i;
-
- for (i = 0;
- i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
- i++)
- mismatch = rx_indir_table[i] !=
- ethtool_rxfh_indir_default(i, efx->rss_spread);
-
- rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
- if (rc == 0) {
- if (context_size != efx->rss_spread)
- netif_warn(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one of"
- " different size."
- " Wanted %u, got %u.\n",
- efx->rss_spread, context_size);
- else if (mismatch)
- netif_warn(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one but"
- " could not apply custom"
- " indirection.\n");
- else
- netif_info(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one.\n");
- }
- }
- return rc;
-}
-
-static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table
- __attribute__ ((unused)),
- const u8 *key
- __attribute__ ((unused)))
-{
- if (user)
- return -EOPNOTSUPP;
- if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID)
- return 0;
- return efx_ef10_rx_push_shared_rss_config(efx, NULL);
-}
-
-static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
-{
- return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
- (rx_queue->ptr_mask + 1) *
- sizeof(efx_qword_t),
- GFP_KERNEL);
-}
-
-static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
-{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
- EFX_BUF_SIZE));
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
- struct efx_nic *efx = rx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t inlen;
- dma_addr_t dma_addr;
- int rc;
- int i;
- BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
-
- rx_queue->scatter_n = 0;
- rx_queue->scatter_len = 0;
-
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
- efx_rx_queue_index(rx_queue));
- MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
- INIT_RXQ_IN_FLAG_PREFIX, 1,
- INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
-
- dma_addr = rx_queue->rxd.buf.dma_addr;
-
- netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
- efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
-
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
- NULL, 0, NULL);
- if (rc)
- netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
- efx_rx_queue_index(rx_queue));
-}
-
-static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = rx_queue->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
- efx_rx_queue_index(rx_queue));
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
-static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
-{
- efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
-}
-
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -3229,106 +2460,20 @@ efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
/* nothing to do */
}
-static int efx_ef10_ev_probe(struct efx_channel *channel)
-{
- return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
- (channel->eventq_mask + 1) *
- sizeof(efx_qword_t),
- GFP_KERNEL);
-}
-
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = channel->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
static int efx_ef10_ev_init(struct efx_channel *channel)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
- EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
- size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- size_t inlen, outlen;
unsigned int enabled, implemented;
- dma_addr_t dma_addr;
+ bool use_v2, cut_thru;
int rc;
- int i;
nic_data = efx->nic_data;
-
- /* Fill event queue with all ones (i.e. empty events) */
- memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
-
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
- /* INIT_EVQ expects index in vector table, not absolute */
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
- MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
- MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
-
- if (nic_data->datapath_caps2 &
- 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
- /* Use the new generic approach to specifying event queue
- * configuration, requesting lower latency or higher throughput.
- * The options that actually get used appear in the output.
- */
- MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
- INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
- INIT_EVQ_V2_IN_FLAG_TYPE,
- MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
- } else {
- bool cut_thru = !(nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
-
- MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
- INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
- INIT_EVQ_IN_FLAG_RX_MERGE, 1,
- INIT_EVQ_IN_FLAG_TX_MERGE, 1,
- INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
- }
-
- dma_addr = channel->eventq.buf.dma_addr;
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
-
- if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
- netif_dbg(efx, drv, efx->net_dev,
- "Channel %d using event queue flags %08x\n",
- channel->channel,
- MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
+ use_v2 = nic_data->datapath_caps2 &
+ 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
+ cut_thru = !(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+ rc = efx_mcdi_ev_init(channel, cut_thru, use_v2);
/* IRQ return is ignored */
if (channel->channel || rc)
@@ -3386,15 +2531,10 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
return 0;
fail:
- efx_ef10_ev_fini(channel);
+ efx_mcdi_ev_fini(channel);
return rc;
}
-static void efx_ef10_ev_remove(struct efx_channel *channel)
-{
- efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
-}
-
static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
unsigned int rx_queue_label)
{
@@ -3976,9 +3116,9 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
if (efx->state != STATE_RECOVERY) {
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_ef10_rx_fini(rx_queue);
+ efx_mcdi_rx_fini(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_ef10_tx_fini(tx_queue);
+ efx_mcdi_tx_fini(tx_queue);
}
wait_event_timeout(efx->flush_wq,
@@ -4000,1538 +3140,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
atomic_set(&efx->active_queues, 0);
}
-/* Decide whether a filter should be exclusive or else should allow
- * delivery to additional recipients. Currently we decide that
- * filters for specific local unicast MAC and IP addresses are
- * exclusive.
- */
-static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
-{
- if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
- !is_multicast_ether_addr(spec->loc_mac))
- return true;
-
- if ((spec->match_flags &
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
- if (spec->ether_type == htons(ETH_P_IP) &&
- !ipv4_is_multicast(spec->loc_host[0]))
- return true;
- if (spec->ether_type == htons(ETH_P_IPV6) &&
- ((const u8 *)spec->loc_host)[0] != 0xff)
- return true;
- }
-
- return false;
-}
-
-static struct efx_filter_spec *
-efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
- unsigned int filter_idx)
-{
- return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
- ~EFX_EF10_FILTER_FLAGS);
-}
-
-static unsigned int
-efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
- unsigned int filter_idx)
-{
- return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
-}
-
-static void
-efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
- unsigned int filter_idx,
- const struct efx_filter_spec *spec,
- unsigned int flags)
-{
- table->entry[filter_idx].spec = (unsigned long)spec | flags;
-}
-
-static void
-efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- efx_dword_t *inbuf)
-{
- enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
- u32 match_fields = 0, uc_match, mc_match;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_INSERT :
- MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
-
- /* Convert match flags and values. Unlike almost
- * everything else in MCDI, these fields are in
- * network byte order.
- */
-#define COPY_VALUE(value, mcdi_field) \
- do { \
- match_fields |= \
- 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN; \
- BUILD_BUG_ON( \
- MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
- sizeof(value)); \
- memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
- &value, sizeof(value)); \
- } while (0)
-#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
- if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
- COPY_VALUE(spec->gen_field, mcdi_field); \
- }
- /* Handle encap filters first. They will always be mismatch
- * (unknown UC or MC) filters
- */
- if (encap_type) {
- /* ether_type and outer_ip_proto need to be variables
- * because COPY_VALUE wants to memcpy them
- */
- __be16 ether_type =
- htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
- ETH_P_IPV6 : ETH_P_IP);
- u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
- u8 outer_ip_proto;
-
- switch (encap_type & EFX_ENCAP_TYPES_MASK) {
- case EFX_ENCAP_TYPE_VXLAN:
- vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
- /* fallthrough */
- case EFX_ENCAP_TYPE_GENEVE:
- COPY_VALUE(ether_type, ETHER_TYPE);
- outer_ip_proto = IPPROTO_UDP;
- COPY_VALUE(outer_ip_proto, IP_PROTO);
- /* We always need to set the type field, even
- * though we're not matching on the TNI.
- */
- MCDI_POPULATE_DWORD_1(inbuf,
- FILTER_OP_EXT_IN_VNI_OR_VSID,
- FILTER_OP_EXT_IN_VNI_TYPE,
- vni_type);
- break;
- case EFX_ENCAP_TYPE_NVGRE:
- COPY_VALUE(ether_type, ETHER_TYPE);
- outer_ip_proto = IPPROTO_GRE;
- COPY_VALUE(outer_ip_proto, IP_PROTO);
- break;
- default:
- WARN_ON(1);
- }
-
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
- } else {
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
- }
-
- if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
- match_fields |=
- is_multicast_ether_addr(spec->loc_mac) ?
- 1 << mc_match :
- 1 << uc_match;
- COPY_FIELD(REM_HOST, rem_host, SRC_IP);
- COPY_FIELD(LOC_HOST, loc_host, DST_IP);
- COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
- COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
- COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
- COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
- COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
- COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
- COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
- COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
-#undef COPY_FIELD
-#undef COPY_VALUE
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
- match_fields);
-}
-
-static void efx_ef10_filter_push_prep(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- efx_dword_t *inbuf, u64 handle,
- struct efx_rss_context *ctx,
- bool replacing)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- u32 flags = spec->flags;
-
- memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
-
- /* If RSS filter, caller better have given us an RSS context */
- if (flags & EFX_FILTER_FLAG_RX_RSS) {
- /* We don't have the ability to return an error, so we'll just
- * log a warning and disable RSS for the filter.
- */
- if (WARN_ON_ONCE(!ctx))
- flags &= ~EFX_FILTER_FLAG_RX_RSS;
- else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID))
- flags &= ~EFX_FILTER_FLAG_RX_RSS;
- }
-
- if (replacing) {
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- MC_CMD_FILTER_OP_IN_OP_REPLACE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
- } else {
- efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
- }
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
- MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
- MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
- MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
- 0 : spec->dmaq_id);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
- (flags & EFX_FILTER_FLAG_RX_RSS) ?
- MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
- MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
- if (flags & EFX_FILTER_FLAG_RX_RSS)
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
-}
-
-static int efx_ef10_filter_push(struct efx_nic *efx,
- const struct efx_filter_spec *spec, u64 *handle,
- struct efx_rss_context *ctx, bool replacing)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
- size_t outlen;
- int rc;
-
- efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc && spec->priority != EFX_FILTER_PRI_HINT)
- efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
- outbuf, outlen, rc);
- if (rc == 0)
- *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
- if (rc == -ENOSPC)
- rc = -EBUSY; /* to match efx_farch_filter_insert() */
- return rc;
-}
-
-static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
-{
- enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
- unsigned int match_flags = spec->match_flags;
- unsigned int uc_match, mc_match;
- u32 mcdi_flags = 0;
-
-#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
- unsigned int old_match_flags = match_flags; \
- match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
- if (match_flags != old_match_flags) \
- mcdi_flags |= \
- (1 << ((encap) ? \
- MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
- mcdi_field ## _LBN : \
- MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
- mcdi_field ## _LBN)); \
- }
- /* inner or outer based on encap type */
- MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
- /* always outer */
- MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
- MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
-#undef MAP_FILTER_TO_MCDI_FLAG
-
- /* special handling for encap type, and mismatch */
- if (encap_type) {
- match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
- mcdi_flags |=
- (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
- mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
-
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
- } else {
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
- }
-
- if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
- match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
- mcdi_flags |=
- is_multicast_ether_addr(spec->loc_mac) ?
- 1 << mc_match :
- 1 << uc_match;
- }
-
- /* Did we map them all? */
- WARN_ON_ONCE(match_flags);
-
- return mcdi_flags;
-}
-
-static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
- const struct efx_filter_spec *spec)
-{
- u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
- unsigned int match_pri;
-
- for (match_pri = 0;
- match_pri < table->rx_match_count;
- match_pri++)
- if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
- return match_pri;
-
- return -EPROTONOSUPPORT;
-}
-
-static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace_equal)
-{
- DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_ef10_filter_table *table;
- struct efx_filter_spec *saved_spec;
- struct efx_rss_context *ctx = NULL;
- unsigned int match_pri, hash;
- unsigned int priv_flags;
- bool rss_locked = false;
- bool replacing = false;
- unsigned int depth, i;
- int ins_index = -1;
- DEFINE_WAIT(wait);
- bool is_mc_recip;
- s32 rc;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
- table = efx->filter_state;
- down_write(&table->lock);
-
- /* For now, only support RX filters */
- if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
- EFX_FILTER_FLAG_RX) {
- rc = -EINVAL;
- goto out_unlock;
- }
-
- rc = efx_ef10_filter_pri(table, spec);
- if (rc < 0)
- goto out_unlock;
- match_pri = rc;
-
- hash = efx_filter_spec_hash(spec);
- is_mc_recip = efx_filter_is_mc_recipient(spec);
- if (is_mc_recip)
- bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
-
- if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- mutex_lock(&efx->rss_lock);
- rss_locked = true;
- if (spec->rss_context)
- ctx = efx_find_rss_context_entry(efx, spec->rss_context);
- else
- ctx = &efx->rss_context;
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
-
- /* Find any existing filters with the same match tuple or
- * else a free slot to insert at.
- */
- for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- saved_spec = efx_ef10_filter_entry_spec(table, i);
-
- if (!saved_spec) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_filter_spec_equal(spec, saved_spec)) {
- if (spec->priority < saved_spec->priority &&
- spec->priority != EFX_FILTER_PRI_AUTO) {
- rc = -EPERM;
- goto out_unlock;
- }
- if (!is_mc_recip) {
- /* This is the only one */
- if (spec->priority ==
- saved_spec->priority &&
- !replace_equal) {
- rc = -EEXIST;
- goto out_unlock;
- }
- ins_index = i;
- break;
- } else if (spec->priority >
- saved_spec->priority ||
- (spec->priority ==
- saved_spec->priority &&
- replace_equal)) {
- if (ins_index < 0)
- ins_index = i;
- else
- __set_bit(depth, mc_rem_map);
- }
- }
- }
-
- /* Once we reach the maximum search depth, use the first suitable
- * slot, or return -EBUSY if there was none
- */
- if (ins_index < 0) {
- rc = -EBUSY;
- goto out_unlock;
- }
-
- /* Create a software table entry if necessary. */
- saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
- if (saved_spec) {
- if (spec->priority == EFX_FILTER_PRI_AUTO &&
- saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
- /* Just make sure it won't be removed */
- if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
- saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
- table->entry[ins_index].spec &=
- ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
- rc = ins_index;
- goto out_unlock;
- }
- replacing = true;
- priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
- } else {
- saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
- if (!saved_spec) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- *saved_spec = *spec;
- priv_flags = 0;
- }
- efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
-
- /* Actually insert the filter on the HW */
- rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
- ctx, replacing);
-
- if (rc == -EINVAL && nic_data->must_realloc_vis)
- /* The MC rebooted under us, causing it to reject our filter
- * insertion as pointing to an invalid VI (spec->dmaq_id).
- */
- rc = -EAGAIN;
-
- /* Finalise the software table entry */
- if (rc == 0) {
- if (replacing) {
- /* Update the fields that may differ */
- if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
- saved_spec->flags |=
- EFX_FILTER_FLAG_RX_OVER_AUTO;
- saved_spec->priority = spec->priority;
- saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
- saved_spec->flags |= spec->flags;
- saved_spec->rss_context = spec->rss_context;
- saved_spec->dmaq_id = spec->dmaq_id;
- }
- } else if (!replacing) {
- kfree(saved_spec);
- saved_spec = NULL;
- } else {
- /* We failed to replace, so the old filter is still present.
- * Roll back the software table to reflect this. In fact the
- * efx_ef10_filter_set_entry() call below will do the right
- * thing, so nothing extra is needed here.
- */
- }
- efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
-
- /* Remove and finalise entries for lower-priority multicast
- * recipients
- */
- if (is_mc_recip) {
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- unsigned int depth, i;
-
- memset(inbuf, 0, sizeof(inbuf));
-
- for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
- if (!test_bit(depth, mc_rem_map))
- continue;
-
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- saved_spec = efx_ef10_filter_entry_spec(table, i);
- priv_flags = efx_ef10_filter_entry_flags(table, i);
-
- if (rc == 0) {
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[i].handle);
- rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
- inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- }
-
- if (rc == 0) {
- kfree(saved_spec);
- saved_spec = NULL;
- priv_flags = 0;
- }
- efx_ef10_filter_set_entry(table, i, saved_spec,
- priv_flags);
- }
- }
-
- /* If successful, return the inserted filter ID */
- if (rc == 0)
- rc = efx_ef10_make_filter_id(match_pri, ins_index);
-
-out_unlock:
- if (rss_locked)
- mutex_unlock(&efx->rss_lock);
- up_write(&table->lock);
- return rc;
-}
-
-static s32 efx_ef10_filter_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace_equal)
-{
- s32 ret;
-
- down_read(&efx->filter_sem);
- ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
- up_read(&efx->filter_sem);
-
- return ret;
-}
-
-static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
-{
- /* no need to do anything here on EF10 */
-}
-
-/* Remove a filter.
- * If !by_index, remove by ID
- * If by_index, remove by index
- * Filter ID may come from userland and must be range-checked.
- * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
- * for write.
- */
-static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
- unsigned int priority_mask,
- u32 filter_id, bool by_index)
-{
- unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
- struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_FILTER_OP_IN_HANDLE_OFST +
- MC_CMD_FILTER_OP_IN_HANDLE_LEN);
- struct efx_filter_spec *spec;
- DEFINE_WAIT(wait);
- int rc;
-
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec ||
- (!by_index &&
- efx_ef10_filter_pri(table, spec) !=
- efx_ef10_filter_get_unsafe_pri(filter_id)))
- return -ENOENT;
-
- if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
- priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
- /* Just remove flags */
- spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
- table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
- return 0;
- }
-
- if (!(priority_mask & (1U << spec->priority)))
- return -ENOENT;
-
- if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
- /* Reset to an automatic filter */
-
- struct efx_filter_spec new_spec = *spec;
-
- new_spec.priority = EFX_FILTER_PRI_AUTO;
- new_spec.flags = (EFX_FILTER_FLAG_RX |
- (efx_rss_active(&efx->rss_context) ?
- EFX_FILTER_FLAG_RX_RSS : 0));
- new_spec.dmaq_id = 0;
- new_spec.rss_context = 0;
- rc = efx_ef10_filter_push(efx, &new_spec,
- &table->entry[filter_idx].handle,
- &efx->rss_context,
- true);
-
- if (rc == 0)
- *spec = new_spec;
- } else {
- /* Really remove the filter */
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_REMOVE :
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
- inbuf, sizeof(inbuf), NULL, 0, NULL);
-
- if ((rc == 0) || (rc == -ENOENT)) {
- /* Filter removed OK or didn't actually exist */
- kfree(spec);
- efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
- } else {
- efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
- MC_CMD_FILTER_OP_EXT_IN_LEN,
- NULL, 0, rc);
- }
- }
-
- return rc;
-}
-
-static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_ef10_filter_table *table;
- int rc;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
- false);
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-/* Caller must hold efx->filter_sem for read */
-static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
-
- if (filter_id == EFX_EF10_FILTER_ID_INVALID)
- return;
-
- down_write(&table->lock);
- efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
- true);
- up_write(&table->lock);
-}
-
-static int efx_ef10_filter_get_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *spec)
-{
- unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
- const struct efx_filter_spec *saved_spec;
- struct efx_ef10_filter_table *table;
- int rc;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
- saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (saved_spec && saved_spec->priority == priority &&
- efx_ef10_filter_pri(table, saved_spec) ==
- efx_ef10_filter_get_unsafe_pri(filter_id)) {
- *spec = *saved_spec;
- rc = 0;
- } else {
- rc = -ENOENT;
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_ef10_filter_table *table;
- unsigned int priority_mask;
- unsigned int i;
- int rc;
-
- priority_mask = (((1U << (priority + 1)) - 1) &
- ~(1U << EFX_FILTER_PRI_AUTO));
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- rc = efx_ef10_filter_remove_internal(efx, priority_mask,
- i, true);
- if (rc && rc != -ENOENT)
- break;
- rc = 0;
- }
-
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_ef10_filter_table *table;
- unsigned int filter_idx;
- s32 count = 0;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- if (table->entry[filter_idx].spec &&
- efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
- priority)
- ++count;
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return count;
-}
-
-static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
-
- return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
-}
-
-static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size)
-{
- struct efx_ef10_filter_table *table;
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- s32 count = 0;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (spec && spec->priority == priority) {
- if (count == size) {
- count = -EMSGSIZE;
- break;
- }
- buf[count++] =
- efx_ef10_make_filter_id(
- efx_ef10_filter_pri(table, spec),
- filter_idx);
- }
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return count;
-}
-
-#ifdef CONFIG_RFS_ACCEL
-
-static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
- unsigned int filter_idx)
-{
- struct efx_filter_spec *spec, saved_spec;
- struct efx_ef10_filter_table *table;
- struct efx_arfs_rule *rule = NULL;
- bool ret = true, force = false;
- u16 arfs_id;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
-
- if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
- goto out_unlock;
-
- spin_lock_bh(&efx->rps_hash_lock);
- if (!efx->rps_hash_table) {
- /* In the absence of the table, we always return 0 to ARFS. */
- arfs_id = 0;
- } else {
- rule = efx_rps_hash_find(efx, spec);
- if (!rule)
- /* ARFS table doesn't know of this filter, so remove it */
- goto expire;
- arfs_id = rule->arfs_id;
- ret = efx_rps_check_rule(rule, filter_idx, &force);
- if (force)
- goto expire;
- if (!ret) {
- spin_unlock_bh(&efx->rps_hash_lock);
- goto out_unlock;
- }
- }
- if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
- ret = false;
- else if (rule)
- rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
-expire:
- saved_spec = *spec; /* remove operation will kfree spec */
- spin_unlock_bh(&efx->rps_hash_lock);
- /* At this point (since we dropped the lock), another thread might queue
- * up a fresh insertion request (but the actual insertion will be held
- * up by our possession of the filter table lock). In that case, it
- * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
- * the rule is not removed by efx_rps_hash_del() below.
- */
- if (ret)
- ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
- filter_idx, true) == 0;
- /* While we can't safely dereference rule (we dropped the lock), we can
- * still test it for NULL.
- */
- if (ret && rule) {
- /* Expiring, so remove entry from ARFS table */
- spin_lock_bh(&efx->rps_hash_lock);
- efx_rps_hash_del(efx, &saved_spec);
- spin_unlock_bh(&efx->rps_hash_lock);
- }
-out_unlock:
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return ret;
-}
-
-#endif /* CONFIG_RFS_ACCEL */
-
-static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
-{
- int match_flags = 0;
-
-#define MAP_FLAG(gen_flag, mcdi_field) do { \
- u32 old_mcdi_flags = mcdi_flags; \
- mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
- mcdi_field ## _LBN); \
- if (mcdi_flags != old_mcdi_flags) \
- match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
- } while (0)
-
- if (encap) {
- /* encap filters must specify encap type */
- match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
- /* and imply ethertype and ip proto */
- mcdi_flags &=
- ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
- mcdi_flags &=
- ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
- /* VLAN tags refer to the outer packet */
- MAP_FLAG(INNER_VID, INNER_VLAN);
- MAP_FLAG(OUTER_VID, OUTER_VLAN);
- /* everything else refers to the inner packet */
- MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
- MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
- MAP_FLAG(REM_HOST, IFRM_SRC_IP);
- MAP_FLAG(LOC_HOST, IFRM_DST_IP);
- MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
- MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
- MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
- MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
- MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
- MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
- } else {
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
- MAP_FLAG(REM_HOST, SRC_IP);
- MAP_FLAG(LOC_HOST, DST_IP);
- MAP_FLAG(REM_MAC, SRC_MAC);
- MAP_FLAG(REM_PORT, SRC_PORT);
- MAP_FLAG(LOC_MAC, DST_MAC);
- MAP_FLAG(LOC_PORT, DST_PORT);
- MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
- MAP_FLAG(INNER_VID, INNER_VLAN);
- MAP_FLAG(OUTER_VID, OUTER_VLAN);
- MAP_FLAG(IP_PROTO, IP_PROTO);
- }
-#undef MAP_FLAG
-
- /* Did we map them all? */
- if (mcdi_flags)
- return -EINVAL;
-
- return match_flags;
-}
-
-static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan, *next_vlan;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- if (!table)
- return;
-
- list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
- efx_ef10_filter_del_vlan_internal(efx, vlan);
-}
-
-static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
- bool encap,
- enum efx_filter_match_flags match_flags)
-{
- unsigned int match_pri;
- int mf;
-
- for (match_pri = 0;
- match_pri < table->rx_match_count;
- match_pri++) {
- mf = efx_ef10_filter_match_flags_from_mcdi(encap,
- table->rx_match_mcdi_flags[match_pri]);
- if (mf == match_flags)
- return true;
- }
-
- return false;
-}
-
-static int
-efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
- struct efx_ef10_filter_table *table,
- bool encap)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
- unsigned int pd_match_pri, pd_match_count;
- size_t outlen;
- int rc;
-
- /* Find out which RX filter types are supported, and their priorities */
- MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
- encap ?
- MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
- MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
- inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
- &outlen);
- if (rc)
- return rc;
-
- pd_match_count = MCDI_VAR_ARRAY_LEN(
- outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
-
- for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
- u32 mcdi_flags =
- MCDI_ARRAY_DWORD(
- outbuf,
- GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
- pd_match_pri);
- rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
- if (rc < 0) {
- netif_dbg(efx, probe, efx->net_dev,
- "%s: fw flags %#x pri %u not supported in driver\n",
- __func__, mcdi_flags, pd_match_pri);
- } else {
- netif_dbg(efx, probe, efx->net_dev,
- "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
- __func__, mcdi_flags, pd_match_pri,
- rc, table->rx_match_count);
- table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
- table->rx_match_count++;
- }
- }
-
- return 0;
-}
-
-static int efx_ef10_filter_table_probe(struct efx_nic *efx)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct net_device *net_dev = efx->net_dev;
- struct efx_ef10_filter_table *table;
- struct efx_ef10_vlan *vlan;
- int rc;
-
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return -EINVAL;
-
- if (efx->filter_state) /* already probed */
- return 0;
-
- table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- table->rx_match_count = 0;
- rc = efx_ef10_filter_table_probe_matches(efx, table, false);
- if (rc)
- goto fail;
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
- rc = efx_ef10_filter_table_probe_matches(efx, table, true);
- if (rc)
- goto fail;
- if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(efx_ef10_filter_match_supported(table, false,
- (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
- efx_ef10_filter_match_supported(table, false,
- (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
- netif_info(efx, probe, net_dev,
- "VLAN filters are not supported in this firmware variant\n");
- net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- }
-
- table->entry = vzalloc(array_size(HUNT_FILTER_TBL_ROWS,
- sizeof(*table->entry)));
- if (!table->entry) {
- rc = -ENOMEM;
- goto fail;
- }
-
- table->mc_promisc_last = false;
- table->vlan_filter =
- !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
- INIT_LIST_HEAD(&table->vlan_list);
- init_rwsem(&table->lock);
-
- efx->filter_state = table;
-
- list_for_each_entry(vlan, &nic_data->vlan_list, list) {
- rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
- if (rc)
- goto fail_add_vlan;
- }
-
- return 0;
-
-fail_add_vlan:
- efx_ef10_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
-fail:
- kfree(table);
- return rc;
-}
-
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_table_restore(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- unsigned int invalid_filters = 0, failed = 0;
- struct efx_ef10_filter_vlan *vlan;
- struct efx_filter_spec *spec;
- struct efx_rss_context *ctx;
- unsigned int filter_idx;
- u32 mcdi_flags;
- int match_pri;
- int rc, i;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
-
- if (!nic_data->must_restore_filters)
- return;
-
- if (!table)
- return;
-
- down_write(&table->lock);
- mutex_lock(&efx->rss_lock);
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec)
- continue;
-
- mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
- match_pri = 0;
- while (match_pri < table->rx_match_count &&
- table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
- ++match_pri;
- if (match_pri >= table->rx_match_count) {
- invalid_filters++;
- goto not_restored;
- }
- if (spec->rss_context)
- ctx = efx_find_rss_context_entry(efx, spec->rss_context);
- else
- ctx = &efx->rss_context;
- if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- if (!ctx) {
- netif_warn(efx, drv, efx->net_dev,
- "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
- spec->rss_context);
- invalid_filters++;
- goto not_restored;
- }
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- netif_warn(efx, drv, efx->net_dev,
- "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
- spec->rss_context);
- invalid_filters++;
- goto not_restored;
- }
- }
-
- rc = efx_ef10_filter_push(efx, spec,
- &table->entry[filter_idx].handle,
- ctx, false);
- if (rc)
- failed++;
-
- if (rc) {
-not_restored:
- list_for_each_entry(vlan, &table->vlan_list, list)
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
- if (vlan->default_filters[i] == filter_idx)
- vlan->default_filters[i] =
- EFX_EF10_FILTER_ID_INVALID;
-
- kfree(spec);
- efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
- }
- }
-
- mutex_unlock(&efx->rss_lock);
- up_write(&table->lock);
-
- /* This can happen validly if the MC's capabilities have changed, so
- * is not an error.
- */
- if (invalid_filters)
- netif_dbg(efx, drv, efx->net_dev,
- "Did not restore %u filters that are now unsupported.\n",
- invalid_filters);
-
- if (failed)
- netif_err(efx, hw, efx->net_dev,
- "unable to restore %u filters\n", failed);
- else
- nic_data->must_restore_filters = false;
-}
-
-static void efx_ef10_filter_table_remove(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- int rc;
-
- efx_ef10_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
- /* If we were called without locking, then it's not safe to free
- * the table as others might be using it. So we just WARN, leak
- * the memory, and potentially get an inconsistent filter table
- * state.
- * This should never actually happen.
- */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- if (!table)
- return;
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec)
- continue;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_REMOVE :
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
- if (rc)
- netif_info(efx, drv, efx->net_dev,
- "%s: filter %04x remove failed\n",
- __func__, filter_idx);
- kfree(spec);
- }
-
- vfree(table->entry);
- kfree(table);
-}
-
-static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int filter_idx;
-
- efx_rwsem_assert_write_locked(&table->lock);
-
- if (*id != EFX_EF10_FILTER_ID_INVALID) {
- filter_idx = efx_ef10_filter_get_unsafe_id(*id);
- if (!table->entry[filter_idx].spec)
- netif_dbg(efx, drv, efx->net_dev,
- "marked null spec old %04x:%04x\n", *id,
- filter_idx);
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- *id = EFX_EF10_FILTER_ID_INVALID;
- }
-}
-
-/* Mark old per-VLAN filters that may need to be removed */
-static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int i;
-
- for (i = 0; i < table->dev_uc_count; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
- for (i = 0; i < table->dev_mc_count; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
-}
-
-/* Mark old filters that may need to be removed.
- * Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_mark_old(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
-
- down_write(&table->lock);
- list_for_each_entry(vlan, &table->vlan_list, list)
- _efx_ef10_filter_vlan_mark_old(efx, vlan);
- up_write(&table->lock);
-}
-
-static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct netdev_hw_addr *uc;
- unsigned int i;
-
- table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
- ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
- i = 1;
- netdev_for_each_uc_addr(uc, net_dev) {
- if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
- table->uc_promisc = true;
- break;
- }
- ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- i++;
- }
-
- table->dev_uc_count = i;
-}
-
-static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct netdev_hw_addr *mc;
- unsigned int i;
-
- table->mc_overflow = false;
- table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
-
- i = 0;
- netdev_for_each_mc_addr(mc, net_dev) {
- if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
- table->mc_promisc = true;
- table->mc_overflow = true;
- break;
- }
- ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- i++;
- }
-
- table->dev_mc_count = i;
-}
-
-static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan,
- bool multicast, bool rollback)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_dev_addr *addr_list;
- enum efx_filter_flags filter_flags;
- struct efx_filter_spec spec;
- u8 baddr[ETH_ALEN];
- unsigned int i, j;
- int addr_count;
- u16 *ids;
- int rc;
-
- if (multicast) {
- addr_list = table->dev_mc_list;
- addr_count = table->dev_mc_count;
- ids = vlan->mc;
- } else {
- addr_list = table->dev_uc_list;
- addr_count = table->dev_uc_count;
- ids = vlan->uc;
- }
-
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
-
- /* Insert/renew filters */
- for (i = 0; i < addr_count; i++) {
- EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- if (rollback) {
- netif_info(efx, drv, efx->net_dev,
- "efx_ef10_filter_insert failed rc=%d\n",
- rc);
- /* Fall back to promiscuous */
- for (j = 0; j < i; j++) {
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- ids[j]);
- ids[j] = EFX_EF10_FILTER_ID_INVALID;
- }
- return rc;
- } else {
- /* keep invalid ID, and carry on */
- }
- } else {
- ids[i] = efx_ef10_filter_get_unsafe_id(rc);
- }
- }
-
- if (multicast && rollback) {
- /* Also need an Ethernet broadcast filter */
- EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
- EFX_EF10_FILTER_ID_INVALID);
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- netif_warn(efx, drv, efx->net_dev,
- "Broadcast filter insert failed rc=%d\n", rc);
- /* Fall back to promiscuous */
- for (j = 0; j < i; j++) {
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- ids[j]);
- ids[j] = EFX_EF10_FILTER_ID_INVALID;
- }
- return rc;
- } else {
- vlan->default_filters[EFX_EF10_BCAST] =
- efx_ef10_filter_get_unsafe_id(rc);
- }
- }
-
- return 0;
-}
-
-static int efx_ef10_filter_insert_def(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan,
- enum efx_encap_type encap_type,
- bool multicast, bool rollback)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- enum efx_filter_flags filter_flags;
- struct efx_filter_spec spec;
- u8 baddr[ETH_ALEN];
- int rc;
- u16 *id;
-
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
-
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
-
- if (multicast)
- efx_filter_set_mc_def(&spec);
- else
- efx_filter_set_uc_def(&spec);
-
- if (encap_type) {
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
- efx_filter_set_encap_type(&spec, encap_type);
- else
- /* don't insert encap filters on non-supporting
- * platforms. ID will be left as INVALID.
- */
- return 0;
- }
-
- if (vlan->vid != EFX_FILTER_VID_UNSPEC)
- efx_filter_set_eth_local(&spec, vlan->vid, NULL);
-
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- const char *um = multicast ? "Multicast" : "Unicast";
- const char *encap_name = "";
- const char *encap_ipv = "";
-
- if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_VXLAN)
- encap_name = "VXLAN ";
- else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_NVGRE)
- encap_name = "NVGRE ";
- else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_GENEVE)
- encap_name = "GENEVE ";
- if (encap_type & EFX_ENCAP_FLAG_IPV6)
- encap_ipv = "IPv6 ";
- else if (encap_type)
- encap_ipv = "IPv4 ";
-
- /* unprivileged functions can't insert mismatch filters
- * for encapsulated or unicast traffic, so downgrade
- * those warnings to debug.
- */
- netif_cond_dbg(efx, drv, efx->net_dev,
- rc == -EPERM && (encap_type || !multicast), warn,
- "%s%s%s mismatch filter insert failed rc=%d\n",
- encap_name, encap_ipv, um, rc);
- } else if (multicast) {
- /* mapping from encap types to default filter IDs (multicast) */
- static enum efx_ef10_default_filters map[] = {
- [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
- [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
- [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
- [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
- [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_VXLAN6_MCDEF,
- [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_NVGRE6_MCDEF,
- [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_GENEVE6_MCDEF,
- };
-
- /* quick bounds check (BCAST result impossible) */
- BUILD_BUG_ON(EFX_EF10_BCAST != 0);
- if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
- WARN_ON(1);
- return -EINVAL;
- }
- /* then follow map */
- id = &vlan->default_filters[map[encap_type]];
-
- EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
- *id = efx_ef10_filter_get_unsafe_id(rc);
- if (!nic_data->workaround_26807 && !encap_type) {
- /* Also need an Ethernet broadcast filter */
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- filter_flags, 0);
- eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- netif_warn(efx, drv, efx->net_dev,
- "Broadcast filter insert failed rc=%d\n",
- rc);
- if (rollback) {
- /* Roll back the mc_def filter */
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- *id);
- *id = EFX_EF10_FILTER_ID_INVALID;
- return rc;
- }
- } else {
- EFX_WARN_ON_PARANOID(
- vlan->default_filters[EFX_EF10_BCAST] !=
- EFX_EF10_FILTER_ID_INVALID);
- vlan->default_filters[EFX_EF10_BCAST] =
- efx_ef10_filter_get_unsafe_id(rc);
- }
- }
- rc = 0;
- } else {
- /* mapping from encap types to default filter IDs (unicast) */
- static enum efx_ef10_default_filters map[] = {
- [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
- [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
- [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
- [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
- [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_VXLAN6_UCDEF,
- [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_NVGRE6_UCDEF,
- [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_GENEVE6_UCDEF,
- };
-
- /* quick bounds check (BCAST result impossible) */
- BUILD_BUG_ON(EFX_EF10_BCAST != 0);
- if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
- WARN_ON(1);
- return -EINVAL;
- }
- /* then follow map */
- id = &vlan->default_filters[map[encap_type]];
- EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
- *id = rc;
- rc = 0;
- }
- return rc;
-}
-
-/* Remove filters that weren't renewed. */
-static void efx_ef10_filter_remove_old(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- int remove_failed = 0;
- int remove_noent = 0;
- int rc;
- int i;
-
- down_write(&table->lock);
- for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- if (READ_ONCE(table->entry[i].spec) &
- EFX_EF10_FILTER_FLAG_AUTO_OLD) {
- rc = efx_ef10_filter_remove_internal(efx,
- 1U << EFX_FILTER_PRI_AUTO, i, true);
- if (rc == -ENOENT)
- remove_noent++;
- else if (rc)
- remove_failed++;
- }
- }
- up_write(&table->lock);
-
- if (remove_failed)
- netif_info(efx, drv, efx->net_dev,
- "%s: failed to remove %d filters\n",
- __func__, remove_failed);
- if (remove_noent)
- netif_info(efx, drv, efx->net_dev,
- "%s: failed to remove %d non-existent filters\n",
- __func__, remove_noent);
-}
-
static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -5545,7 +3153,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
down_write(&efx->filter_sem);
- efx_ef10_filter_table_remove(efx);
+ efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
@@ -5577,7 +3185,7 @@ restore_vadaptor:
goto reset_nic;
restore_filters:
down_write(&efx->filter_sem);
- rc2 = efx_ef10_filter_table_probe(efx);
+ rc2 = efx_mcdi_filter_table_probe(efx);
up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -5598,256 +3206,6 @@ reset_nic:
return rc ? rc : rc2;
}
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- /* Do not install unspecified VID if VLAN filtering is enabled.
- * Do not install all specified VIDs if VLAN filtering is disabled.
- */
- if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
- return;
-
- /* Insert/renew unicast filters */
- if (table->uc_promisc) {
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
- false, false);
- efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
- } else {
- /* If any of the filters failed to insert, fall back to
- * promiscuous mode - add in the uc_def filter. But keep
- * our individual unicast filters.
- */
- if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
- efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- false, false);
- }
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
-
- /* Insert/renew multicast filters */
- /* If changing promiscuous state with cascaded multicast filters, remove
- * old filters first, so that packets are dropped rather than duplicated
- */
- if (nic_data->workaround_26807 &&
- table->mc_promisc_last != table->mc_promisc)
- efx_ef10_filter_remove_old(efx);
- if (table->mc_promisc) {
- if (nic_data->workaround_26807) {
- /* If we failed to insert promiscuous filters, rollback
- * and fall back to individual multicast filters
- */
- if (efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, true)) {
- /* Changing promisc state, so remove old filters */
- efx_ef10_filter_remove_old(efx);
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- } else {
- /* If we failed to insert promiscuous filters, don't
- * rollback. Regardless, also insert the mc_list,
- * unless it's incomplete due to overflow
- */
- efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, false);
- if (!table->mc_overflow)
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- } else {
- /* If any filters failed to insert, rollback and fall back to
- * promiscuous mode - mc_def filter and maybe broadcast. If
- * that fails, roll back again and insert as many of our
- * individual multicast filters as we can.
- */
- if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
- /* Changing promisc state, so remove old filters */
- if (nic_data->workaround_26807)
- efx_ef10_filter_remove_old(efx);
- if (efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, true))
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- }
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
-}
-
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct efx_ef10_filter_vlan *vlan;
- bool vlan_filter;
-
- if (!efx_dev_registered(efx))
- return;
-
- if (!table)
- return;
-
- efx_ef10_filter_mark_old(efx);
-
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
- */
- netif_addr_lock_bh(net_dev);
- efx_ef10_filter_uc_addr_list(efx);
- efx_ef10_filter_mc_addr_list(efx);
- netif_addr_unlock_bh(net_dev);
-
- /* If VLAN filtering changes, all old filters are finally removed.
- * Do it in advance to avoid conflicts for unicast untagged and
- * VLAN 0 tagged filters.
- */
- vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
- if (table->vlan_filter != vlan_filter) {
- table->vlan_filter = vlan_filter;
- efx_ef10_filter_remove_old(efx);
- }
-
- list_for_each_entry(vlan, &table->vlan_list, list)
- efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
-
- efx_ef10_filter_remove_old(efx);
- table->mc_promisc_last = table->mc_promisc;
-}
-
-static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
-
- list_for_each_entry(vlan, &table->vlan_list, list) {
- if (vlan->vid == vid)
- return vlan;
- }
-
- return NULL;
-}
-
-static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
- unsigned int i;
-
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return -EINVAL;
-
- vlan = efx_ef10_filter_find_vlan(efx, vid);
- if (WARN_ON(vlan)) {
- netif_err(efx, drv, efx->net_dev,
- "VLAN %u already added\n", vid);
- return -EALREADY;
- }
-
- vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
- return -ENOMEM;
-
- vlan->vid = vid;
-
- for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
- vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
- for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
- vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
-
- list_add_tail(&vlan->list, &table->vlan_list);
-
- if (efx_dev_registered(efx))
- efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
-
- return 0;
-}
-
-static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- unsigned int i;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- list_del(&vlan->list);
-
- for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->uc[i]);
- for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->mc[i]);
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->default_filters[i]);
-
- kfree(vlan);
-}
-
-static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_vlan *vlan;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- vlan = efx_ef10_filter_find_vlan(efx, vid);
- if (!vlan) {
- netif_err(efx, drv, efx->net_dev,
- "VLAN %u not found in filter state\n", vid);
- return;
- }
-
- efx_ef10_filter_del_vlan_internal(efx, vlan);
-}
-
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -5860,7 +3218,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- efx_ef10_filter_table_remove(efx);
+ efx_mcdi_filter_table_remove(efx);
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
@@ -5869,7 +3227,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
sizeof(inbuf), NULL, 0, NULL);
- efx_ef10_filter_table_probe(efx);
+ efx_mcdi_filter_table_probe(efx);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -5931,14 +3289,14 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
{
- efx_ef10_filter_sync_rx_mode(efx);
+ efx_mcdi_filter_sync_rx_mode(efx);
return efx_mcdi_set_mac(efx);
}
static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
{
- efx_ef10_filter_sync_rx_mode(efx);
+ efx_mcdi_filter_sync_rx_mode(efx);
return 0;
}
@@ -6650,36 +4008,36 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init,
- .tx_remove = efx_ef10_tx_remove,
+ .tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
- .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
- .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
- .rx_probe = efx_ef10_rx_probe,
- .rx_init = efx_ef10_rx_init,
- .rx_remove = efx_ef10_rx_remove,
+ .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
- .ev_probe = efx_ef10_ev_probe,
+ .ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
- .ev_fini = efx_ef10_ev_fini,
- .ev_remove = efx_ef10_ev_remove,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_ef10_filter_table_probe,
- .filter_table_restore = efx_ef10_filter_table_restore,
- .filter_table_remove = efx_ef10_filter_table_remove,
- .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
- .filter_insert = efx_ef10_filter_insert,
- .filter_remove_safe = efx_ef10_filter_remove_safe,
- .filter_get_safe = efx_ef10_filter_get_safe,
- .filter_clear_rx = efx_ef10_filter_clear_rx,
- .filter_count_rx_used = efx_ef10_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+ .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = efx_port_dummy_op_int,
@@ -6709,7 +4067,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
- .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
@@ -6759,39 +4117,39 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init,
- .tx_remove = efx_ef10_tx_remove,
+ .tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
- .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
- .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
- .rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config,
- .rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config,
- .rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts,
- .rx_probe = efx_ef10_rx_probe,
- .rx_init = efx_ef10_rx_init,
- .rx_remove = efx_ef10_rx_remove,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
- .ev_probe = efx_ef10_ev_probe,
+ .ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
- .ev_fini = efx_ef10_ev_fini,
- .ev_remove = efx_ef10_ev_remove,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_ef10_filter_table_probe,
- .filter_table_restore = efx_ef10_filter_table_restore,
- .filter_table_remove = efx_ef10_filter_table_remove,
- .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
- .filter_insert = efx_ef10_filter_insert,
- .filter_remove_safe = efx_ef10_filter_remove_safe,
- .filter_get_safe = efx_ef10_filter_get_safe,
- .filter_clear_rx = efx_ef10_filter_clear_rx,
- .filter_count_rx_used = efx_ef10_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+ .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = efx_ef10_mtd_probe,
@@ -6844,7 +4202,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
- .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 52bd43f45761..14393767ef9f 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -522,10 +522,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
if (!is_zero_ether_addr(mac)) {
rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
- if (rc) {
- eth_zero_addr(vf->mac);
+ if (rc)
goto fail;
- }
+
if (vf->efx)
ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 7a38d7f282a1..4481f21a1f43 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -23,6 +23,10 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
#include "nic.h"
#include "io.h"
#include "selftest.h"
@@ -39,56 +43,6 @@
**************************************************************************
*/
-/* Loopback mode names (see LOOPBACK_MODE()) */
-const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
-const char *const efx_loopback_mode_names[] = {
- [LOOPBACK_NONE] = "NONE",
- [LOOPBACK_DATA] = "DATAPATH",
- [LOOPBACK_GMAC] = "GMAC",
- [LOOPBACK_XGMII] = "XGMII",
- [LOOPBACK_XGXS] = "XGXS",
- [LOOPBACK_XAUI] = "XAUI",
- [LOOPBACK_GMII] = "GMII",
- [LOOPBACK_SGMII] = "SGMII",
- [LOOPBACK_XGBR] = "XGBR",
- [LOOPBACK_XFI] = "XFI",
- [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
- [LOOPBACK_GMII_FAR] = "GMII_FAR",
- [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
- [LOOPBACK_XFI_FAR] = "XFI_FAR",
- [LOOPBACK_GPHY] = "GPHY",
- [LOOPBACK_PHYXS] = "PHYXS",
- [LOOPBACK_PCS] = "PCS",
- [LOOPBACK_PMAPMD] = "PMA/PMD",
- [LOOPBACK_XPORT] = "XPORT",
- [LOOPBACK_XGMII_WS] = "XGMII_WS",
- [LOOPBACK_XAUI_WS] = "XAUI_WS",
- [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
- [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
- [LOOPBACK_GMII_WS] = "GMII_WS",
- [LOOPBACK_XFI_WS] = "XFI_WS",
- [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
- [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
-};
-
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *const efx_reset_type_names[] = {
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
- [RESET_TYPE_ALL] = "ALL",
- [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
- [RESET_TYPE_WORLD] = "WORLD",
- [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
- [RESET_TYPE_DATAPATH] = "DATAPATH",
- [RESET_TYPE_MC_BIST] = "MC_BIST",
- [RESET_TYPE_DISABLE] = "DISABLE",
- [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
- [RESET_TYPE_TX_SKIP] = "TX_SKIP",
- [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
- [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
-};
-
/* UDP tunnel type names */
static const char *const efx_udp_tunnel_type_names[] = {
[TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
@@ -104,18 +58,6 @@ void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
snprintf(buf, buflen, "type %d", type);
}
-/* Reset workqueue. If any NIC has a hardware failure then a reset will be
- * queued onto this work queue. This is not a per-nic work queue, because
- * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
- */
-static struct workqueue_struct *reset_workqueue;
-
-/* How often and how many times to poll for a reset while waiting for a
- * BIST that another function started to complete.
- */
-#define BIST_WAIT_DELAY_MS 100
-#define BIST_WAIT_DELAY_COUNT 100
-
/**************************************************************************
*
* Configurable values
@@ -135,21 +77,6 @@ module_param(efx_separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(efx_separate_tx_channels,
"Use separate channels for TX and RX");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
-/* This is the time (in jiffies) between invocations of the hardware
- * monitor.
- * On Falcon-based NICs, this will:
- * - Check the on-board hardware monitor;
- * - Poll the link state and reconfigure the hardware as necessary.
- * On Siena-based NICs for power systems with EEH support, this will give EEH a
- * chance to start.
- */
-static unsigned int efx_monitor_interval = 1 * HZ;
-
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
@@ -169,38 +96,10 @@ static unsigned int rx_irq_mod_usec = 60;
*/
static unsigned int tx_irq_mod_usec = 150;
-/* This is the first interrupt mode to try out of:
- * 0 => MSI-X
- * 1 => MSI
- * 2 => legacy
- */
-static unsigned int interrupt_mode;
-
-/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
- * i.e. the number of CPUs among which we may distribute simultaneous
- * interrupt handling.
- *
- * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
- * The default (0) means to assign an interrupt to each core.
- */
-static unsigned int rss_cpus;
-module_param(rss_cpus, uint, 0444);
-MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
-
static bool phy_flash_cfg;
module_param(phy_flash_cfg, bool, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
-static unsigned irq_adapt_low_thresh = 8000;
-module_param(irq_adapt_low_thresh, uint, 0644);
-MODULE_PARM_DESC(irq_adapt_low_thresh,
- "Threshold score for reducing IRQ moderation");
-
-static unsigned irq_adapt_high_thresh = 16000;
-module_param(irq_adapt_high_thresh, uint, 0644);
-MODULE_PARM_DESC(irq_adapt_high_thresh,
- "Threshold score for increasing IRQ moderation");
-
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
@@ -214,18 +113,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static int efx_soft_enable_interrupts(struct efx_nic *efx);
-static void efx_soft_disable_interrupts(struct efx_nic *efx);
-static void efx_remove_channel(struct efx_channel *channel);
-static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
-static void efx_init_napi_channel(struct efx_channel *channel);
-static void efx_fini_napi(struct efx_nic *efx);
-static void efx_fini_napi_channel(struct efx_channel *channel);
-static void efx_fini_struct(struct efx_nic *efx);
-static void efx_start_all(struct efx_nic *efx);
-static void efx_stop_all(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
@@ -239,776 +128,12 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
ASSERT_RTNL(); \
} while (0)
-static int efx_check_disabled(struct efx_nic *efx)
-{
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
- netif_err(efx, drv, efx->net_dev,
- "device is disabled due to earlier errors\n");
- return -EIO;
- }
- return 0;
-}
-
-/**************************************************************************
- *
- * Event queue processing
- *
- *************************************************************************/
-
-/* Process channel's event queue
- *
- * This function is responsible for processing the event queue of a
- * single channel. The caller must guarantee that this function will
- * never be concurrently called more than once on the same channel,
- * though different channels may be being processed concurrently.
- */
-static int efx_process_channel(struct efx_channel *channel, int budget)
-{
- struct efx_tx_queue *tx_queue;
- struct list_head rx_list;
- int spent;
-
- if (unlikely(!channel->enabled))
- return 0;
-
- /* Prepare the batch receive list */
- EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
- INIT_LIST_HEAD(&rx_list);
- channel->rx_list = &rx_list;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->pkts_compl = 0;
- tx_queue->bytes_compl = 0;
- }
-
- spent = efx_nic_process_eventq(channel, budget);
- if (spent && efx_channel_has_rx_queue(channel)) {
- struct efx_rx_queue *rx_queue =
- efx_channel_get_rx_queue(channel);
-
- efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue, true);
- }
-
- /* Update BQL */
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->bytes_compl) {
- netdev_tx_completed_queue(tx_queue->core_txq,
- tx_queue->pkts_compl, tx_queue->bytes_compl);
- }
- }
-
- /* Receive any packets we queued up */
- netif_receive_skb_list(channel->rx_list);
- channel->rx_list = NULL;
-
- return spent;
-}
-
-/* NAPI poll handler
- *
- * NAPI guarantees serialisation of polls of the same device, which
- * provides the guarantee required by efx_process_channel().
- */
-static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
-{
- int step = efx->irq_mod_step_us;
-
- if (channel->irq_mod_score < irq_adapt_low_thresh) {
- if (channel->irq_moderation_us > step) {
- channel->irq_moderation_us -= step;
- efx->type->push_irq_moderation(channel);
- }
- } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
- if (channel->irq_moderation_us <
- efx->irq_rx_moderation_us) {
- channel->irq_moderation_us += step;
- efx->type->push_irq_moderation(channel);
- }
- }
-
- channel->irq_count = 0;
- channel->irq_mod_score = 0;
-}
-
-static int efx_poll(struct napi_struct *napi, int budget)
-{
- struct efx_channel *channel =
- container_of(napi, struct efx_channel, napi_str);
- struct efx_nic *efx = channel->efx;
- int spent;
-
- netif_vdbg(efx, intr, efx->net_dev,
- "channel %d NAPI poll executing on CPU %d\n",
- channel->channel, raw_smp_processor_id());
-
- spent = efx_process_channel(channel, budget);
-
- xdp_do_flush_map();
-
- if (spent < budget) {
- if (efx_channel_has_rx_queue(channel) &&
- efx->irq_rx_adaptive &&
- unlikely(++channel->irq_count == 1000)) {
- efx_update_irq_mod(efx, channel);
- }
-
-#ifdef CONFIG_RFS_ACCEL
- /* Perhaps expire some ARFS filters */
- mod_delayed_work(system_wq, &channel->filter_work, 0);
-#endif
-
- /* There is no race here; although napi_disable() will
- * only wait for napi_complete(), this isn't a problem
- * since efx_nic_eventq_read_ack() will have no effect if
- * interrupts have already been disabled.
- */
- if (napi_complete_done(napi, spent))
- efx_nic_eventq_read_ack(channel);
- }
-
- return spent;
-}
-
-/* Create event queue
- * Event queue memory allocations are done only once. If the channel
- * is reset, the memory buffer will be reused; this guards against
- * errors during channel reset and also simplifies interrupt handling.
- */
-static int efx_probe_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- unsigned long entries;
-
- netif_dbg(efx, probe, efx->net_dev,
- "chan %d create event queue\n", channel->channel);
-
- /* Build an event queue with room for one event per tx and rx buffer,
- * plus some extra for link state events and MCDI completions. */
- entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
- channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
-
- return efx_nic_probe_eventq(channel);
-}
-
-/* Prepare channel's event queue */
-static int efx_init_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- int rc;
-
- EFX_WARN_ON_PARANOID(channel->eventq_init);
-
- netif_dbg(efx, drv, efx->net_dev,
- "chan %d init event queue\n", channel->channel);
-
- rc = efx_nic_init_eventq(channel);
- if (rc == 0) {
- efx->type->push_irq_moderation(channel);
- channel->eventq_read_ptr = 0;
- channel->eventq_init = true;
- }
- return rc;
-}
-
-/* Enable event queue processing and NAPI */
-void efx_start_eventq(struct efx_channel *channel)
-{
- netif_dbg(channel->efx, ifup, channel->efx->net_dev,
- "chan %d start event queue\n", channel->channel);
-
- /* Make sure the NAPI handler sees the enabled flag set */
- channel->enabled = true;
- smp_wmb();
-
- napi_enable(&channel->napi_str);
- efx_nic_eventq_read_ack(channel);
-}
-
-/* Disable event queue processing and NAPI */
-void efx_stop_eventq(struct efx_channel *channel)
-{
- if (!channel->enabled)
- return;
-
- napi_disable(&channel->napi_str);
- channel->enabled = false;
-}
-
-static void efx_fini_eventq(struct efx_channel *channel)
-{
- if (!channel->eventq_init)
- return;
-
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d fini event queue\n", channel->channel);
-
- efx_nic_fini_eventq(channel);
- channel->eventq_init = false;
-}
-
-static void efx_remove_eventq(struct efx_channel *channel)
-{
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d remove event queue\n", channel->channel);
-
- efx_nic_remove_eventq(channel);
-}
-
-/**************************************************************************
- *
- * Channel handling
- *
- *************************************************************************/
-
-/* Allocate and initialise a channel structure. */
-static struct efx_channel *
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int j;
-
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
-
- channel->efx = efx;
- channel->channel = i;
- channel->type = &efx_default_channel_type;
-
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- tx_queue->efx = efx;
- tx_queue->queue = i * EFX_TXQ_TYPES + j;
- tx_queue->channel = channel;
- }
-
-#ifdef CONFIG_RFS_ACCEL
- INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
-#endif
-
- rx_queue = &channel->rx_queue;
- rx_queue->efx = efx;
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
-
- return channel;
-}
-
-/* Allocate and initialise a channel structure, copying parameters
- * (but not resources) from an old channel structure.
- */
-static struct efx_channel *
-efx_copy_channel(const struct efx_channel *old_channel)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int j;
-
- channel = kmalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
-
- *channel = *old_channel;
-
- channel->napi_dev = NULL;
- INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
- channel->napi_str.napi_id = 0;
- channel->napi_str.state = 0;
- memset(&channel->eventq, 0, sizeof(channel->eventq));
-
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- if (tx_queue->channel)
- tx_queue->channel = channel;
- tx_queue->buffer = NULL;
- memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
- }
-
- rx_queue = &channel->rx_queue;
- rx_queue->buffer = NULL;
- memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
-#ifdef CONFIG_RFS_ACCEL
- INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
-#endif
-
- return channel;
-}
-
-static int efx_probe_channel(struct efx_channel *channel)
-{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int rc;
-
- netif_dbg(channel->efx, probe, channel->efx->net_dev,
- "creating channel %d\n", channel->channel);
-
- rc = channel->type->pre_probe(channel);
- if (rc)
- goto fail;
-
- rc = efx_probe_eventq(channel);
- if (rc)
- goto fail;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- rc = efx_probe_tx_queue(tx_queue);
- if (rc)
- goto fail;
- }
-
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- rc = efx_probe_rx_queue(rx_queue);
- if (rc)
- goto fail;
- }
-
- channel->rx_list = NULL;
-
- return 0;
-
-fail:
- efx_remove_channel(channel);
- return rc;
-}
-
-static void
-efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
-{
- struct efx_nic *efx = channel->efx;
- const char *type;
- int number;
-
- number = channel->channel;
-
- if (number >= efx->xdp_channel_offset &&
- !WARN_ON_ONCE(!efx->n_xdp_channels)) {
- type = "-xdp";
- number -= efx->xdp_channel_offset;
- } else if (efx->tx_channel_offset == 0) {
- type = "";
- } else if (number < efx->tx_channel_offset) {
- type = "-rx";
- } else {
- type = "-tx";
- number -= efx->tx_channel_offset;
- }
- snprintf(buf, len, "%s%s-%d", efx->name, type, number);
-}
-
-static void efx_set_channel_names(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- channel->type->get_name(channel,
- efx->msi_context[channel->channel].name,
- sizeof(efx->msi_context[0].name));
-}
-
-static int efx_probe_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- int rc;
-
- /* Restart special buffer allocation */
- efx->next_buffer_table = 0;
-
- /* Probe channels in reverse, so that any 'extra' channels
- * use the start of the buffer table. This allows the traffic
- * channels to be resized without moving them or wasting the
- * entries before them.
- */
- efx_for_each_channel_rev(channel, efx) {
- rc = efx_probe_channel(channel);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to create channel %d\n",
- channel->channel);
- goto fail;
- }
- }
- efx_set_channel_names(efx);
-
- return 0;
-
-fail:
- efx_remove_channels(efx);
- return rc;
-}
-
-/* Channels are shutdown and reinitialised whilst the NIC is running
- * to propagate configuration changes (mtu, checksum offload), or
- * to clear hardware error conditions
- */
-static void efx_start_datapath(struct efx_nic *efx)
-{
- netdev_features_t old_features = efx->net_dev->features;
- bool old_rx_scatter = efx->rx_scatter;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- struct efx_channel *channel;
- size_t rx_buf_len;
-
- /* Calculate the rx buffer allocation parameters required to
- * support the current MTU, including padding for header
- * alignment and overruns.
- */
- efx->rx_dma_len = (efx->rx_prefix_size +
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
- efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
- efx->rx_ip_align + efx->rx_dma_len);
- if (rx_buf_len <= PAGE_SIZE) {
- efx->rx_scatter = efx->type->always_rx_scatter;
- efx->rx_buffer_order = 0;
- } else if (efx->type->can_rx_scatter) {
- BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
- BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
- 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
- EFX_RX_BUF_ALIGNMENT) >
- PAGE_SIZE);
- efx->rx_scatter = true;
- efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
- efx->rx_buffer_order = 0;
- } else {
- efx->rx_scatter = false;
- efx->rx_buffer_order = get_order(rx_buf_len);
- }
-
- efx_rx_config_page_split(efx);
- if (efx->rx_buffer_order)
- netif_dbg(efx, drv, efx->net_dev,
- "RX buf len=%u; page order=%u batch=%u\n",
- efx->rx_dma_len, efx->rx_buffer_order,
- efx->rx_pages_per_batch);
- else
- netif_dbg(efx, drv, efx->net_dev,
- "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
- efx->rx_dma_len, efx->rx_page_buf_step,
- efx->rx_bufs_per_page, efx->rx_pages_per_batch);
-
- /* Restore previously fixed features in hw_features and remove
- * features which are fixed now
- */
- efx->net_dev->hw_features |= efx->net_dev->features;
- efx->net_dev->hw_features &= ~efx->fixed_features;
- efx->net_dev->features |= efx->fixed_features;
- if (efx->net_dev->features != old_features)
- netdev_features_change(efx->net_dev);
-
- /* RX filters may also have scatter-enabled flags */
- if (efx->rx_scatter != old_rx_scatter)
- efx->type->filter_update_rx_scatter(efx);
-
- /* We must keep at least one descriptor in a TX ring empty.
- * We could avoid this when the queue size does not exactly
- * match the hardware ring size, but it's not that important.
- * Therefore we stop the queue when one more skb might fill
- * the ring completely. We wake it when half way back to
- * empty.
- */
- efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
- efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
-
- /* Initialise the channels */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_init_tx_queue(tx_queue);
- atomic_inc(&efx->active_queues);
- }
-
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- efx_init_rx_queue(rx_queue);
- atomic_inc(&efx->active_queues);
- efx_stop_eventq(channel);
- efx_fast_push_rx_descriptors(rx_queue, false);
- efx_start_eventq(channel);
- }
-
- WARN_ON(channel->rx_pkt_n_frags);
- }
-
- efx_ptp_start_datapath(efx);
-
- if (netif_device_present(efx->net_dev))
- netif_tx_wake_all_queues(efx->net_dev);
-}
-
-static void efx_stop_datapath(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
- BUG_ON(efx->port_enabled);
-
- efx_ptp_stop_datapath(efx);
-
- /* Stop RX refill */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel)
- rx_queue->refill_enabled = false;
- }
-
- efx_for_each_channel(channel, efx) {
- /* RX packet processing is pipelined, so wait for the
- * NAPI handler to complete. At least event queue 0
- * might be kept active by non-data events, so don't
- * use napi_synchronize() but actually disable NAPI
- * temporarily.
- */
- if (efx_channel_has_rx_queue(channel)) {
- efx_stop_eventq(channel);
- efx_start_eventq(channel);
- }
- }
-
- rc = efx->type->fini_dmaq(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
- } else {
- netif_dbg(efx, drv, efx->net_dev,
- "successfully flushed all queues\n");
- }
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_fini_rx_queue(rx_queue);
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
- efx_fini_tx_queue(tx_queue);
- }
- efx->xdp_rxq_info_failed = false;
-}
-
-static void efx_remove_channel(struct efx_channel *channel)
-{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
-
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "destroy chan %d\n", channel->channel);
-
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_remove_rx_queue(rx_queue);
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
- efx_remove_tx_queue(tx_queue);
- efx_remove_eventq(channel);
- channel->type->post_remove(channel);
-}
-
-static void efx_remove_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
-
- kfree(efx->xdp_tx_queues);
-}
-
-int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
-{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
- u32 old_rxq_entries, old_txq_entries;
- unsigned i, next_buffer_table = 0;
- int rc, rc2;
-
- rc = efx_check_disabled(efx);
- if (rc)
- return rc;
-
- /* Not all channels should be reallocated. We must avoid
- * reallocating their buffer table entries.
- */
- efx_for_each_channel(channel, efx) {
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
-
- if (channel->type->copy)
- continue;
- next_buffer_table = max(next_buffer_table,
- channel->eventq.index +
- channel->eventq.entries);
- efx_for_each_channel_rx_queue(rx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- rx_queue->rxd.index +
- rx_queue->rxd.entries);
- efx_for_each_channel_tx_queue(tx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- tx_queue->txd.index +
- tx_queue->txd.entries);
- }
-
- efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_soft_disable_interrupts(efx);
-
- /* Clone channels (where possible) */
- memset(other_channel, 0, sizeof(other_channel));
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- if (channel->type->copy)
- channel = channel->type->copy(channel);
- if (!channel) {
- rc = -ENOMEM;
- goto out;
- }
- other_channel[i] = channel;
- }
-
- /* Swap entry counts and channel pointers */
- old_rxq_entries = efx->rxq_entries;
- old_txq_entries = efx->txq_entries;
- efx->rxq_entries = rxq_entries;
- efx->txq_entries = txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
-
- /* Restart buffer table allocation */
- efx->next_buffer_table = next_buffer_table;
-
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- if (!channel->type->copy)
- continue;
- rc = efx_probe_channel(channel);
- if (rc)
- goto rollback;
- efx_init_napi_channel(efx->channel[i]);
- }
-
-out:
- /* Destroy unused channel structures */
- for (i = 0; i < efx->n_channels; i++) {
- channel = other_channel[i];
- if (channel && channel->type->copy) {
- efx_fini_napi_channel(channel);
- efx_remove_channel(channel);
- kfree(channel);
- }
- }
-
- rc2 = efx_soft_enable_interrupts(efx);
- if (rc2) {
- rc = rc ? rc : rc2;
- netif_err(efx, drv, efx->net_dev,
- "unable to restart interrupts on channel reallocation\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- } else {
- efx_start_all(efx);
- efx_device_attach_if_not_resetting(efx);
- }
- return rc;
-
-rollback:
- /* Swap back */
- efx->rxq_entries = old_rxq_entries;
- efx->txq_entries = old_txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
- goto out;
-}
-
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
-{
- mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
-}
-
-static bool efx_default_channel_want_txqs(struct efx_channel *channel)
-{
- return channel->channel - channel->efx->tx_channel_offset <
- channel->efx->n_tx_channels;
-}
-
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
-
-int efx_channel_dummy_op_int(struct efx_channel *channel)
-{
- return 0;
-}
-
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
-
/**************************************************************************
*
* Port handling
*
**************************************************************************/
-/* This ensures that the kernel is kept informed (via
- * netif_carrier_on/off) of the link status, and also maintains the
- * link status's stop on the port's TX queue.
- */
-void efx_link_status_changed(struct efx_nic *efx)
-{
- struct efx_link_state *link_state = &efx->link_state;
-
- /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
- * that no events are triggered between unregister_netdev() and the
- * driver unloading. A more general condition is that NETDEV_CHANGE
- * can only be generated between NETDEV_UP and NETDEV_DOWN */
- if (!netif_running(efx->net_dev))
- return;
-
- if (link_state->up != netif_carrier_ok(efx->net_dev)) {
- efx->n_link_state_changes++;
-
- if (link_state->up)
- netif_carrier_on(efx->net_dev);
- else
- netif_carrier_off(efx->net_dev);
- }
-
- /* Status message for kernel log */
- if (link_state->up)
- netif_info(efx, link, efx->net_dev,
- "link up at %uMbps %s-duplex (MTU %d)\n",
- link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu);
- else
- netif_info(efx, link, efx->net_dev, "link down\n");
-}
-
-void efx_link_set_advertising(struct efx_nic *efx,
- const unsigned long *advertising)
-{
- memcpy(efx->link_advertising, advertising,
- sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
-
- efx->link_advertising[0] |= ADVERTISED_Autoneg;
- if (advertising[0] & ADVERTISED_Pause)
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
- else
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
- if (advertising[0] & ADVERTISED_Asym_Pause)
- efx->wanted_fc ^= EFX_FC_TX;
-}
-
/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
* force the Autoneg bit on.
*/
@@ -1035,73 +160,6 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
static void efx_fini_port(struct efx_nic *efx);
-/* We assume that efx->type->reconfigure_mac will always try to sync RX
- * filters and therefore needs to read-lock the filter table against freeing
- */
-void efx_mac_reconfigure(struct efx_nic *efx)
-{
- down_read(&efx->filter_sem);
- efx->type->reconfigure_mac(efx);
- up_read(&efx->filter_sem);
-}
-
-/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
- * the MAC appropriately. All other PHY configuration changes are pushed
- * through phy_op->set_settings(), and pushed asynchronously to the MAC
- * through efx_monitor().
- *
- * Callers must hold the mac_lock
- */
-int __efx_reconfigure_port(struct efx_nic *efx)
-{
- enum efx_phy_mode phy_mode;
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- /* Disable PHY transmit in mac level loopbacks */
- phy_mode = efx->phy_mode;
- if (LOOPBACK_INTERNAL(efx))
- efx->phy_mode |= PHY_MODE_TX_DISABLED;
- else
- efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
-
- rc = efx->type->reconfigure_port(efx);
-
- if (rc)
- efx->phy_mode = phy_mode;
-
- return rc;
-}
-
-/* Reinitialise the MAC to pick up new PHY settings, even if the port is
- * disabled. */
-int efx_reconfigure_port(struct efx_nic *efx)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- rc = __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-/* Asynchronous work item for changing MAC promiscuity and multicast
- * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
- * MAC directly. */
-static void efx_mac_work(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
-
- mutex_lock(&efx->mac_lock);
- if (efx->port_enabled)
- efx_mac_reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
-}
-
static int efx_probe_port(struct efx_nic *efx)
{
int rc;
@@ -1155,44 +213,6 @@ fail1:
return rc;
}
-static void efx_start_port(struct efx_nic *efx)
-{
- netif_dbg(efx, ifup, efx->net_dev, "start port\n");
- BUG_ON(efx->port_enabled);
-
- mutex_lock(&efx->mac_lock);
- efx->port_enabled = true;
-
- /* Ensure MAC ingress/egress is enabled */
- efx_mac_reconfigure(efx);
-
- mutex_unlock(&efx->mac_lock);
-}
-
-/* Cancel work for MAC reconfiguration, periodic hardware monitoring
- * and the async self-test, wait for them to finish and prevent them
- * being scheduled again. This doesn't cover online resets, which
- * should only be cancelled when removing the device.
- */
-static void efx_stop_port(struct efx_nic *efx)
-{
- netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- efx->port_enabled = false;
- mutex_unlock(&efx->mac_lock);
-
- /* Serialise against efx_set_multicast_list() */
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
-
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- cancel_work_sync(&efx->mac_work);
-}
-
static void efx_fini_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
@@ -1291,582 +311,6 @@ static void efx_dissociate(struct efx_nic *efx)
}
}
-/* This configures the PCI device to enable I/O and DMA. */
-static int efx_init_io(struct efx_nic *efx)
-{
- struct pci_dev *pci_dev = efx->pci_dev;
- dma_addr_t dma_mask = efx->type->max_dma_mask;
- unsigned int mem_map_size = efx->type->mem_map_size(efx);
- int rc, bar;
-
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
-
- bar = efx->type->mem_bar(efx);
-
- rc = pci_enable_device(pci_dev);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to enable PCI device\n");
- goto fail1;
- }
-
- pci_set_master(pci_dev);
-
- /* Set the PCI DMA mask. Try all possibilities from our genuine mask
- * down to 32 bits, because some architectures will allow 40 bit
- * masks event though they reject 46 bit masks.
- */
- while (dma_mask > 0x7fffffffUL) {
- rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
- if (rc == 0)
- break;
- dma_mask >>= 1;
- }
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "could not find a suitable DMA mask\n");
- goto fail2;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "using DMA mask %llx\n", (unsigned long long) dma_mask);
-
- efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
- rc = pci_request_region(pci_dev, bar, "sfc");
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "request for memory BAR failed\n");
- rc = -EIO;
- goto fail3;
- }
- efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
- if (!efx->membase) {
- netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys, mem_map_size);
- rc = -ENOMEM;
- goto fail4;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys, mem_map_size,
- efx->membase);
-
- return 0;
-
- fail4:
- pci_release_region(efx->pci_dev, bar);
- fail3:
- efx->membase_phys = 0;
- fail2:
- pci_disable_device(efx->pci_dev);
- fail1:
- return rc;
-}
-
-static void efx_fini_io(struct efx_nic *efx)
-{
- int bar;
-
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
-
- if (efx->membase) {
- iounmap(efx->membase);
- efx->membase = NULL;
- }
-
- if (efx->membase_phys) {
- bar = efx->type->mem_bar(efx);
- pci_release_region(efx->pci_dev, bar);
- efx->membase_phys = 0;
- }
-
- /* Don't disable bus-mastering if VFs are assigned */
- if (!pci_vfs_assigned(efx->pci_dev))
- pci_disable_device(efx->pci_dev);
-}
-
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
-}
-
-static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
-{
- cpumask_var_t thread_mask;
- unsigned int count;
- int cpu;
-
- if (rss_cpus) {
- count = rss_cpus;
- } else {
- if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
- netif_warn(efx, probe, efx->net_dev,
- "RSS disabled due to allocation failure\n");
- return 1;
- }
-
- count = 0;
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, thread_mask)) {
- ++count;
- cpumask_or(thread_mask, thread_mask,
- topology_sibling_cpumask(cpu));
- }
- }
-
- free_cpumask_var(thread_mask);
- }
-
- if (count > EFX_MAX_RX_QUEUES) {
- netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
- "Reducing number of rx queues from %u to %u.\n",
- count, EFX_MAX_RX_QUEUES);
- count = EFX_MAX_RX_QUEUES;
- }
-
- /* If RSS is requested for the PF *and* VFs then we can't write RSS
- * table entries that are inaccessible to VFs
- */
-#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted) {
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
- count > efx_vf_size(efx)) {
- netif_warn(efx, probe, efx->net_dev,
- "Reducing number of RSS channels from %u to %u for "
- "VF support. Increase vf-msix-limit to use more "
- "channels on the PF.\n",
- count, efx_vf_size(efx));
- count = efx_vf_size(efx);
- }
- }
-#endif
-
- return count;
-}
-
-static int efx_allocate_msix_channels(struct efx_nic *efx,
- unsigned int max_channels,
- unsigned int extra_channels,
- unsigned int parallelism)
-{
- unsigned int n_channels = parallelism;
- int vec_count;
- int n_xdp_tx;
- int n_xdp_ev;
-
- if (efx_separate_tx_channels)
- n_channels *= 2;
- n_channels += extra_channels;
-
- /* To allow XDP transmit to happen from arbitrary NAPI contexts
- * we allocate a TX queue per CPU. We share event queues across
- * multiple tx queues, assuming tx and ev queues are both
- * maximum size.
- */
-
- n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
-
- vec_count = pci_msix_vec_count(efx->pci_dev);
- if (vec_count < 0)
- return vec_count;
-
- max_channels = min_t(unsigned int, vec_count, max_channels);
-
- /* Check resources.
- * We need a channel per event queue, plus a VI per tx queue.
- * This may be more pessimistic than it needs to be.
- */
- if (n_channels + n_xdp_ev > max_channels) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
- n_xdp_ev, n_channels, max_channels);
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
- } else {
- efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
- efx->xdp_tx_queue_count = n_xdp_tx;
- n_channels += n_xdp_ev;
- netif_dbg(efx, drv, efx->net_dev,
- "Allocating %d TX and %d event queues for XDP\n",
- n_xdp_tx, n_xdp_ev);
- }
-
- if (vec_count < n_channels) {
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
- vec_count, n_channels);
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Performance may be reduced.\n");
- n_channels = vec_count;
- }
-
- n_channels = min(n_channels, max_channels);
-
- efx->n_channels = n_channels;
-
- /* Ignore XDP tx channels when creating rx channels. */
- n_channels -= efx->n_xdp_channels;
-
- if (efx_separate_tx_channels) {
- efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
- efx->tx_channel_offset =
- n_channels - efx->n_tx_channels;
- efx->n_rx_channels =
- max(n_channels -
- efx->n_tx_channels, 1U);
- } else {
- efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
- efx->tx_channel_offset = 0;
- efx->n_rx_channels = n_channels;
- }
-
- efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
- efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
-
- efx->xdp_channel_offset = n_channels;
-
- netif_dbg(efx, drv, efx->net_dev,
- "Allocating %u RX channels\n",
- efx->n_rx_channels);
-
- return efx->n_channels;
-}
-
-/* Probe the number and type of interrupts we are able to obtain, and
- * the resulting numbers of channels and RX queues.
- */
-static int efx_probe_interrupts(struct efx_nic *efx)
-{
- unsigned int extra_channels = 0;
- unsigned int rss_spread;
- unsigned int i, j;
- int rc;
-
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
- if (efx->extra_channel_type[i])
- ++extra_channels;
-
- if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
- unsigned int parallelism = efx_wanted_parallelism(efx);
- struct msix_entry xentries[EFX_MAX_CHANNELS];
- unsigned int n_channels;
-
- rc = efx_allocate_msix_channels(efx, efx->max_channels,
- extra_channels, parallelism);
- if (rc >= 0) {
- n_channels = rc;
- for (i = 0; i < n_channels; i++)
- xentries[i].entry = i;
- rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
- n_channels);
- }
- if (rc < 0) {
- /* Fall back to single channel MSI */
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI-X\n");
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
- efx->interrupt_mode = EFX_INT_MODE_MSI;
- else
- return rc;
- } else if (rc < n_channels) {
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Insufficient MSI-X vectors"
- " available (%d < %u).\n", rc, n_channels);
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Performance may be reduced.\n");
- n_channels = rc;
- }
-
- if (rc > 0) {
- for (i = 0; i < efx->n_channels; i++)
- efx_get_channel(efx, i)->irq =
- xentries[i].vector;
- }
- }
-
- /* Try single interrupt MSI */
- if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->n_channels = 1;
- efx->n_rx_channels = 1;
- efx->n_tx_channels = 1;
- efx->n_xdp_channels = 0;
- efx->xdp_channel_offset = efx->n_channels;
- rc = pci_enable_msi(efx->pci_dev);
- if (rc == 0) {
- efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
- } else {
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI\n");
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
- efx->interrupt_mode = EFX_INT_MODE_LEGACY;
- else
- return rc;
- }
- }
-
- /* Assume legacy interrupts */
- if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
- efx->n_rx_channels = 1;
- efx->n_tx_channels = 1;
- efx->n_xdp_channels = 0;
- efx->xdp_channel_offset = efx->n_channels;
- efx->legacy_irq = efx->pci_dev->irq;
- }
-
- /* Assign extra channels if possible, before XDP channels */
- efx->n_extra_tx_channels = 0;
- j = efx->xdp_channel_offset;
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
- if (!efx->extra_channel_type[i])
- continue;
- if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
- efx->extra_channel_type[i]->handle_no_channel(efx);
- } else {
- --j;
- efx_get_channel(efx, j)->type =
- efx->extra_channel_type[i];
- if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
- efx->n_extra_tx_channels++;
- }
- }
-
- rss_spread = efx->n_rx_channels;
- /* RSS might be usable on VFs even if it is disabled on the PF */
-#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted) {
- efx->rss_spread = ((rss_spread > 1 ||
- !efx->type->sriov_wanted(efx)) ?
- rss_spread : efx_vf_size(efx));
- return 0;
- }
-#endif
- efx->rss_spread = rss_spread;
-
- return 0;
-}
-
-#if defined(CONFIG_SMP)
-static void efx_set_interrupt_affinity(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- unsigned int cpu;
-
- efx_for_each_channel(channel, efx) {
- cpu = cpumask_local_spread(channel->channel,
- pcibus_to_node(efx->pci_dev->bus));
- irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
- }
-}
-
-static void efx_clear_interrupt_affinity(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- irq_set_affinity_hint(channel->irq, NULL);
-}
-#else
-static void
-efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
-{
-}
-
-static void
-efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
-{
-}
-#endif /* CONFIG_SMP */
-
-static int efx_soft_enable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel, *end_channel;
- int rc;
-
- BUG_ON(efx->state == STATE_DISABLED);
-
- efx->irq_soft_enabled = true;
- smp_wmb();
-
- efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq) {
- rc = efx_init_eventq(channel);
- if (rc)
- goto fail;
- }
- efx_start_eventq(channel);
- }
-
- efx_mcdi_mode_event(efx);
-
- return 0;
-fail:
- end_channel = channel;
- efx_for_each_channel(channel, efx) {
- if (channel == end_channel)
- break;
- efx_stop_eventq(channel);
- if (!channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- return rc;
-}
-
-static void efx_soft_disable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- if (efx->state == STATE_DISABLED)
- return;
-
- efx_mcdi_mode_poll(efx);
-
- efx->irq_soft_enabled = false;
- smp_wmb();
-
- if (efx->legacy_irq)
- synchronize_irq(efx->legacy_irq);
-
- efx_for_each_channel(channel, efx) {
- if (channel->irq)
- synchronize_irq(channel->irq);
-
- efx_stop_eventq(channel);
- if (!channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- /* Flush the asynchronous MCDI request queue */
- efx_mcdi_flush_async(efx);
-}
-
-static int efx_enable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel, *end_channel;
- int rc;
-
- BUG_ON(efx->state == STATE_DISABLED);
-
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
-
- efx->type->irq_enable_master(efx);
-
- efx_for_each_channel(channel, efx) {
- if (channel->type->keep_eventq) {
- rc = efx_init_eventq(channel);
- if (rc)
- goto fail;
- }
- }
-
- rc = efx_soft_enable_interrupts(efx);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- end_channel = channel;
- efx_for_each_channel(channel, efx) {
- if (channel == end_channel)
- break;
- if (channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- efx->type->irq_disable_non_ev(efx);
-
- return rc;
-}
-
-static void efx_disable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_soft_disable_interrupts(efx);
-
- efx_for_each_channel(channel, efx) {
- if (channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- efx->type->irq_disable_non_ev(efx);
-}
-
-static void efx_remove_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- /* Remove MSI/MSI-X interrupts */
- efx_for_each_channel(channel, efx)
- channel->irq = 0;
- pci_disable_msi(efx->pci_dev);
- pci_disable_msix(efx->pci_dev);
-
- /* Remove legacy interrupt */
- efx->legacy_irq = 0;
-}
-
-static int efx_set_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- int xdp_queue_number;
-
- efx->tx_channel_offset =
- efx_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
- if (efx->xdp_tx_queue_count) {
- EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
-
- /* Allocate array for XDP TX queue lookup. */
- efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
- sizeof(*efx->xdp_tx_queues),
- GFP_KERNEL);
- if (!efx->xdp_tx_queues)
- return -ENOMEM;
- }
-
- /* We need to mark which channels really have RX and TX
- * queues, and adjust the TX queue numbers if we have separate
- * RX-only and TX-only channels.
- */
- xdp_queue_number = 0;
- efx_for_each_channel(channel, efx) {
- if (channel->channel < efx->n_rx_channels)
- channel->rx_queue.core_index = channel->channel;
- else
- channel->rx_queue.core_index = -1;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue -= (efx->tx_channel_offset *
- EFX_TXQ_TYPES);
-
- if (efx_channel_is_xdp_tx(channel) &&
- xdp_queue_number < efx->xdp_tx_queue_count) {
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- xdp_queue_number++;
- }
- }
- }
- return 0;
-}
-
static int efx_probe_nic(struct efx_nic *efx)
{
int rc;
@@ -1939,70 +383,6 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx);
}
-static int efx_probe_filters(struct efx_nic *efx)
-{
- int rc;
-
- init_rwsem(&efx->filter_sem);
- mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- rc = efx->type->filter_table_probe(efx);
- if (rc)
- goto out_unlock;
-
-#ifdef CONFIG_RFS_ACCEL
- if (efx->type->offload_features & NETIF_F_NTUPLE) {
- struct efx_channel *channel;
- int i, success = 1;
-
- efx_for_each_channel(channel, efx) {
- channel->rps_flow_id =
- kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*channel->rps_flow_id),
- GFP_KERNEL);
- if (!channel->rps_flow_id)
- success = 0;
- else
- for (i = 0;
- i < efx->type->max_rx_ip_filters;
- ++i)
- channel->rps_flow_id[i] =
- RPS_FLOW_ID_INVALID;
- channel->rfs_expire_index = 0;
- channel->rfs_filter_count = 0;
- }
-
- if (!success) {
- efx_for_each_channel(channel, efx)
- kfree(channel->rps_flow_id);
- efx->type->filter_table_remove(efx);
- rc = -ENOMEM;
- goto out_unlock;
- }
- }
-#endif
-out_unlock:
- up_write(&efx->filter_sem);
- mutex_unlock(&efx->mac_lock);
- return rc;
-}
-
-static void efx_remove_filters(struct efx_nic *efx)
-{
-#ifdef CONFIG_RFS_ACCEL
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx) {
- cancel_delayed_work_sync(&channel->filter_work);
- kfree(channel->rps_flow_id);
- }
-#endif
- down_write(&efx->filter_sem);
- efx->type->filter_table_remove(efx);
- up_write(&efx->filter_sem);
-}
-
-
/**************************************************************************
*
* NIC startup/shutdown
@@ -2067,81 +447,6 @@ static int efx_probe_all(struct efx_nic *efx)
return rc;
}
-/* If the interface is supposed to be running but is not, start
- * the hardware and software data path, regular activity for the port
- * (MAC statistics, link polling, etc.) and schedule the port to be
- * reconfigured. Interrupts must already be enabled. This function
- * is safe to call multiple times, so long as the NIC is not disabled.
- * Requires the RTNL lock.
- */
-static void efx_start_all(struct efx_nic *efx)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
- BUG_ON(efx->state == STATE_DISABLED);
-
- /* Check that it is appropriate to restart the interface. All
- * of these flags are safe to read under just the rtnl lock */
- if (efx->port_enabled || !netif_running(efx->net_dev) ||
- efx->reset_pending)
- return;
-
- efx_start_port(efx);
- efx_start_datapath(efx);
-
- /* Start the hardware monitor if there is one */
- if (efx->type->monitor != NULL)
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
-
- /* Link state detection is normally event-driven; we have
- * to poll now because we could have missed a change
- */
- mutex_lock(&efx->mac_lock);
- if (efx->phy_op->poll(efx))
- efx_link_status_changed(efx);
- mutex_unlock(&efx->mac_lock);
-
- efx->type->start_stats(efx);
- efx->type->pull_stats(efx);
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, NULL);
- spin_unlock_bh(&efx->stats_lock);
-}
-
-/* Quiesce the hardware and software data path, and regular activity
- * for the port without bringing the link down. Safe to call multiple
- * times with the NIC in almost any state, but interrupts should be
- * enabled. Requires the RTNL lock.
- */
-static void efx_stop_all(struct efx_nic *efx)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- /* port_enabled can be read safely under the rtnl lock */
- if (!efx->port_enabled)
- return;
-
- /* update stats before we go down so we can accurately count
- * rx_nodesc_drops
- */
- efx->type->pull_stats(efx);
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, NULL);
- spin_unlock_bh(&efx->stats_lock);
- efx->type->stop_stats(efx);
- efx_stop_port(efx);
-
- /* Stop the kernel transmit interface. This is only valid if
- * the device is stopped or detached; otherwise the watchdog
- * may fire immediately.
- */
- WARN_ON(netif_running(efx->net_dev) &&
- netif_device_present(efx->net_dev));
- netif_tx_disable(efx->net_dev);
-
- efx_stop_datapath(efx);
-}
-
static void efx_remove_all(struct efx_nic *efx)
{
rtnl_lock();
@@ -2237,36 +542,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * Hardware monitor
- *
- **************************************************************************/
-
-/* Run periodically off the general workqueue */
-static void efx_monitor(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic,
- monitor_work.work);
-
- netif_vdbg(efx, timer, efx->net_dev,
- "hardware monitor executing on CPU %d\n",
- raw_smp_processor_id());
- BUG_ON(efx->type->monitor == NULL);
-
- /* If the mac_lock is already held then it is likely a port
- * reconfiguration is already in place, which will likely do
- * most of the work of monitor() anyway. */
- if (mutex_trylock(&efx->mac_lock)) {
- if (efx->port_enabled)
- efx->type->monitor(efx);
- mutex_unlock(&efx->mac_lock);
- }
-
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
-}
-
-/**************************************************************************
- *
* ioctls
*
*************************************************************************/
@@ -2294,45 +569,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/**************************************************************************
*
- * NAPI interface
- *
- **************************************************************************/
-
-static void efx_init_napi_channel(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- efx_poll, napi_weight);
-}
-
-static void efx_init_napi(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_init_napi_channel(channel);
-}
-
-static void efx_fini_napi_channel(struct efx_channel *channel)
-{
- if (channel->napi_dev)
- netif_napi_del(&channel->napi_str);
-
- channel->napi_dev = NULL;
-}
-
-static void efx_fini_napi(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_fini_napi_channel(channel);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2382,19 +618,8 @@ int efx_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static void efx_net_stats(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, stats);
- spin_unlock_bh(&efx->stats_lock);
-}
-
/* Context: netif_tx_lock held, BHs disabled. */
-static void efx_watchdog(struct net_device *net_dev)
+static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2405,51 +630,6 @@ static void efx_watchdog(struct net_device *net_dev)
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
-static unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
-{
- /* The maximum MTU that we can fit in a single page, allowing for
- * framing, overhead and XDP headroom.
- */
- int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
- efx->rx_prefix_size + efx->type->rx_buffer_padding +
- efx->rx_ip_align + XDP_PACKET_HEADROOM;
-
- return PAGE_SIZE - overhead;
-}
-
-/* Context: process, rtnl_lock() held. */
-static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int rc;
-
- rc = efx_check_disabled(efx);
- if (rc)
- return rc;
-
- if (rtnl_dereference(efx->xdp_prog) &&
- new_mtu > efx_xdp_max_mtu(efx)) {
- netif_err(efx, drv, efx->net_dev,
- "Requested MTU of %d too big for XDP (max: %d)\n",
- new_mtu, efx_xdp_max_mtu(efx));
- return -EINVAL;
- }
-
- netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
-
- efx_device_detach_sync(efx);
- efx_stop_all(efx);
-
- mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
- efx_mac_reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
-
- efx_start_all(efx);
- efx_device_attach_if_not_resetting(efx);
- return 0;
-}
-
static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2726,28 +906,6 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
-#ifdef CONFIG_SFC_MCDI_LOGGING
-static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct efx_nic *efx = dev_get_drvdata(dev);
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
-}
-static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct efx_nic *efx = dev_get_drvdata(dev);
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- bool enable = count > 0 && *buf != '0';
-
- mcdi->logging_enabled = enable;
- return count;
-}
-static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
-#endif
-
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -2807,21 +965,11 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n");
goto fail_registered;
}
-#ifdef CONFIG_SFC_MCDI_LOGGING
- rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "failed to init net dev attributes\n");
- goto fail_attr_mcdi_logging;
- }
-#endif
+
+ efx_init_mcdi_logging(efx);
return 0;
-#ifdef CONFIG_SFC_MCDI_LOGGING
-fail_attr_mcdi_logging:
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-#endif
fail_registered:
rtnl_lock();
efx_dissociate(efx);
@@ -2842,9 +990,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
-#ifdef CONFIG_SFC_MCDI_LOGGING
- device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
-#endif
+ efx_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
@@ -2852,292 +998,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/**************************************************************************
*
- * Device reset and suspend
- *
- **************************************************************************/
-
-/* Tears down the entire software state and most of the hardware state
- * before reset. */
-void efx_reset_down(struct efx_nic *efx, enum reset_type method)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- if (method == RESET_TYPE_MCDI_TIMEOUT)
- efx->type->prepare_flr(efx);
-
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
-
- mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH)
- efx->phy_op->fini(efx);
- efx->type->fini(efx);
-}
-
-/* This function will always ensure that the locks acquired in
- * efx_reset_down() are released. A failure return code indicates
- * that we were unable to reinitialise the hardware, and the
- * driver should be disabled. If ok is false, then the rx and tx
- * engines are not restarted, pending a RESET_DISABLE. */
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- if (method == RESET_TYPE_MCDI_TIMEOUT)
- efx->type->finish_flr(efx);
-
- /* Ensure that SRAM is initialised even if we're disabling the device */
- rc = efx->type->init(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
- goto fail;
- }
-
- if (!ok)
- goto fail;
-
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH) {
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail;
- rc = efx->phy_op->reconfigure(efx);
- if (rc && rc != -EPERM)
- netif_err(efx, drv, efx->net_dev,
- "could not restore PHY settings\n");
- }
-
- rc = efx_enable_interrupts(efx);
- if (rc)
- goto fail;
-
-#ifdef CONFIG_SFC_SRIOV
- rc = efx->type->vswitching_restore(efx);
- if (rc) /* not fatal; the PF will still work fine */
- netif_warn(efx, probe, efx->net_dev,
- "failed to restore vswitching rc=%d;"
- " VFs may not function\n", rc);
-#endif
-
- if (efx->type->rx_restore_rss_contexts)
- efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
- efx->type->filter_table_restore(efx);
- up_write(&efx->filter_sem);
- if (efx->type->sriov_reset)
- efx->type->sriov_reset(efx);
-
- mutex_unlock(&efx->mac_lock);
-
- efx_start_all(efx);
-
- if (efx->type->udp_tnl_push_ports)
- efx->type->udp_tnl_push_ports(efx);
-
- return 0;
-
-fail:
- efx->port_initialized = false;
-
- mutex_unlock(&efx->rss_lock);
- up_write(&efx->filter_sem);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-/* Reset the NIC using the specified method. Note that the reset may
- * fail, in which case the card will be left in an unusable state.
- *
- * Caller must hold the rtnl_lock.
- */
-int efx_reset(struct efx_nic *efx, enum reset_type method)
-{
- int rc, rc2;
- bool disabled;
-
- netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
- RESET_TYPE(method));
-
- efx_device_detach_sync(efx);
- efx_reset_down(efx, method);
-
- rc = efx->type->reset(efx, method);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
- goto out;
- }
-
- /* Clear flags for the scopes we covered. We assume the NIC and
- * driver are now quiescent so that there is no race here.
- */
- if (method < RESET_TYPE_MAX_METHOD)
- efx->reset_pending &= -(1 << (method + 1));
- else /* it doesn't fit into the well-ordered scope hierarchy */
- __clear_bit(method, &efx->reset_pending);
-
- /* Reinitialise bus-mastering, which may have been turned off before
- * the reset was scheduled. This is still appropriate, even in the
- * RESET_TYPE_DISABLE since this driver generally assumes the hardware
- * can respond to requests. */
- pci_set_master(efx->pci_dev);
-
-out:
- /* Leave device stopped if necessary */
- disabled = rc ||
- method == RESET_TYPE_DISABLE ||
- method == RESET_TYPE_RECOVER_OR_DISABLE;
- rc2 = efx_reset_up(efx, method, !disabled);
- if (rc2) {
- disabled = true;
- if (!rc)
- rc = rc2;
- }
-
- if (disabled) {
- dev_close(efx->net_dev);
- netif_err(efx, drv, efx->net_dev, "has been disabled\n");
- efx->state = STATE_DISABLED;
- } else {
- netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
- efx_device_attach_if_not_resetting(efx);
- }
- return rc;
-}
-
-/* Try recovery mechanisms.
- * For now only EEH is supported.
- * Returns 0 if the recovery mechanisms are unsuccessful.
- * Returns a non-zero value otherwise.
- */
-int efx_try_recovery(struct efx_nic *efx)
-{
-#ifdef CONFIG_EEH
- /* A PCI error can occur and not be seen by EEH because nothing
- * happens on the PCI bus. In this case the driver may fail and
- * schedule a 'recover or reset', leading to this recovery handler.
- * Manually call the eeh failure check function.
- */
- struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
- if (eeh_dev_check_failure(eehdev)) {
- /* The EEH mechanisms will handle the error and reset the
- * device if necessary.
- */
- return 1;
- }
-#endif
- return 0;
-}
-
-static void efx_wait_for_bist_end(struct efx_nic *efx)
-{
- int i;
-
- for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
- if (efx_mcdi_poll_reboot(efx))
- goto out;
- msleep(BIST_WAIT_DELAY_MS);
- }
-
- netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
-out:
- /* Either way unset the BIST flag. If we found no reboot we probably
- * won't recover, but we should try.
- */
- efx->mc_bist_for_other_fn = false;
-}
-
-/* The worker thread exists so that code that cannot sleep can
- * schedule a reset for later.
- */
-static void efx_reset_work(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
- unsigned long pending;
- enum reset_type method;
-
- pending = READ_ONCE(efx->reset_pending);
- method = fls(pending) - 1;
-
- if (method == RESET_TYPE_MC_BIST)
- efx_wait_for_bist_end(efx);
-
- if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
- method == RESET_TYPE_RECOVER_OR_ALL) &&
- efx_try_recovery(efx))
- return;
-
- if (!pending)
- return;
-
- rtnl_lock();
-
- /* We checked the state in efx_schedule_reset() but it may
- * have changed by now. Now that we have the RTNL lock,
- * it cannot change again.
- */
- if (efx->state == STATE_READY)
- (void)efx_reset(efx, method);
-
- rtnl_unlock();
-}
-
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
-{
- enum reset_type method;
-
- if (efx->state == STATE_RECOVERY) {
- netif_dbg(efx, drv, efx->net_dev,
- "recovering: skip scheduling %s reset\n",
- RESET_TYPE(type));
- return;
- }
-
- switch (type) {
- case RESET_TYPE_INVISIBLE:
- case RESET_TYPE_ALL:
- case RESET_TYPE_RECOVER_OR_ALL:
- case RESET_TYPE_WORLD:
- case RESET_TYPE_DISABLE:
- case RESET_TYPE_RECOVER_OR_DISABLE:
- case RESET_TYPE_DATAPATH:
- case RESET_TYPE_MC_BIST:
- case RESET_TYPE_MCDI_TIMEOUT:
- method = type;
- netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
- RESET_TYPE(method));
- break;
- default:
- method = efx->type->map_reset_reason(type);
- netif_dbg(efx, drv, efx->net_dev,
- "scheduling %s reset for %s\n",
- RESET_TYPE(method), RESET_TYPE(type));
- break;
- }
-
- set_bit(method, &efx->reset_pending);
- smp_mb(); /* ensure we change reset_pending before checking state */
-
- /* If we're not READY then just leave the flags set as the cue
- * to abort probing or reschedule the reset later.
- */
- if (READ_ONCE(efx->state) != STATE_READY)
- return;
-
- /* efx_process_channel() will no longer read events once a
- * reset is scheduled. So switch back to poll'd MCDI completions. */
- efx_mcdi_mode_poll(efx);
-
- queue_work(reset_workqueue, &efx->reset_work);
-}
-
-/**************************************************************************
- *
* List of NICs we support
*
**************************************************************************/
@@ -3169,139 +1029,10 @@ static const struct pci_device_id efx_pci_table[] = {
/**************************************************************************
*
- * Dummy PHY/MAC operations
- *
- * Can be used for some unimplemented operations
- * Needed so all function pointers are valid and do not have to be tested
- * before use
- *
- **************************************************************************/
-int efx_port_dummy_op_int(struct efx_nic *efx)
-{
- return 0;
-}
-void efx_port_dummy_op_void(struct efx_nic *efx) {}
-
-static bool efx_port_dummy_op_poll(struct efx_nic *efx)
-{
- return false;
-}
-
-static const struct efx_phy_operations efx_dummy_phy_operations = {
- .init = efx_port_dummy_op_int,
- .reconfigure = efx_port_dummy_op_int,
- .poll = efx_port_dummy_op_poll,
- .fini = efx_port_dummy_op_void,
-};
-
-/**************************************************************************
- *
* Data housekeeping
*
**************************************************************************/
-/* This zeroes out and then fills in the invariants in a struct
- * efx_nic (including all sub-structures).
- */
-static int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
-{
- int rc = -ENOMEM, i;
-
- /* Initialise common structures */
- INIT_LIST_HEAD(&efx->node);
- INIT_LIST_HEAD(&efx->secondary_list);
- spin_lock_init(&efx->biu_lock);
-#ifdef CONFIG_SFC_MTD
- INIT_LIST_HEAD(&efx->mtd_list);
-#endif
- INIT_WORK(&efx->reset_work, efx_reset_work);
- INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
- INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
- efx->pci_dev = pci_dev;
- efx->msg_enable = debug;
- efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
-
- efx->net_dev = net_dev;
- efx->rx_prefix_size = efx->type->rx_prefix_size;
- efx->rx_ip_align =
- NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
- efx->rx_packet_hash_offset =
- efx->type->rx_hash_offset - efx->type->rx_prefix_size;
- efx->rx_packet_ts_offset =
- efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
- mutex_init(&efx->rss_lock);
- spin_lock_init(&efx->stats_lock);
- efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
- efx->num_mac_stats = MC_CMD_MAC_NSTATS;
- BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
- mutex_init(&efx->mac_lock);
-#ifdef CONFIG_RFS_ACCEL
- mutex_init(&efx->rps_mutex);
- spin_lock_init(&efx->rps_hash_lock);
- /* Failure to allocate is not fatal, but may degrade ARFS performance */
- efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
- sizeof(*efx->rps_hash_table), GFP_KERNEL);
-#endif
- efx->phy_op = &efx_dummy_phy_operations;
- efx->mdio.dev = net_dev;
- INIT_WORK(&efx->mac_work, efx_mac_work);
- init_waitqueue_head(&efx->flush_wq);
-
- for (i = 0; i < EFX_MAX_CHANNELS; i++) {
- efx->channel[i] = efx_alloc_channel(efx, i, NULL);
- if (!efx->channel[i])
- goto fail;
- efx->msi_context[i].efx = efx;
- efx->msi_context[i].index = i;
- }
-
- /* Higher numbered interrupt modes are less capable! */
- if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
- efx->type->min_interrupt_mode)) {
- rc = -EIO;
- goto fail;
- }
- efx->interrupt_mode = max(efx->type->max_interrupt_mode,
- interrupt_mode);
- efx->interrupt_mode = min(efx->type->min_interrupt_mode,
- interrupt_mode);
-
- /* Would be good to use the net_dev name, but we're too early */
- snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
- pci_name(pci_dev));
- efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
- if (!efx->workqueue)
- goto fail;
-
- return 0;
-
-fail:
- efx_fini_struct(efx);
- return rc;
-}
-
-static void efx_fini_struct(struct efx_nic *efx)
-{
- int i;
-
-#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_hash_table);
-#endif
-
- for (i = 0; i < EFX_MAX_CHANNELS; i++)
- kfree(efx->channel[i]);
-
- kfree(efx->vpd_sn);
-
- if (efx->workqueue) {
- destroy_workqueue(efx->workqueue);
- efx->workqueue = NULL;
- }
-}
-
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
{
u64 n_rx_nodesc_trunc = 0;
@@ -3313,197 +1044,6 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
}
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if ((left->match_flags ^ right->match_flags) |
- ((left->flags ^ right->flags) &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
- return false;
-
- return memcmp(&left->outer_vid, &right->outer_vid,
- sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
-}
-
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
- (sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
- 0);
-}
-
-#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force)
-{
- if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
- /* ARFS is currently updating this entry, leave it */
- return false;
- }
- if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
- /* ARFS tried and failed to update this, so it's probably out
- * of date. Remove the filter and the ARFS rule entry.
- */
- rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
- *force = true;
- return true;
- } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
- /* ARFS has moved on, so old filter is not needed. Since we did
- * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
- * not be removed by efx_rps_hash_del() subsequently.
- */
- *force = true;
- return true;
- }
- /* Remove it iff ARFS wants to. */
- return true;
-}
-
-static
-struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
- const struct efx_filter_spec *spec)
-{
- u32 hash = efx_filter_spec_hash(spec);
-
- lockdep_assert_held(&efx->rps_hash_lock);
- if (!efx->rps_hash_table)
- return NULL;
- return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
-}
-
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
- const struct efx_filter_spec *spec)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (!head)
- return NULL;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec))
- return rule;
- }
- return NULL;
-}
-
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (!head)
- return NULL;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
- *new = false;
- return rule;
- }
- }
- rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
- *new = true;
- if (rule) {
- memcpy(&rule->spec, spec, sizeof(rule->spec));
- hlist_add_head(&rule->node, head);
- }
- return rule;
-}
-
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (WARN_ON(!head))
- return;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
- /* Someone already reused the entry. We know that if
- * this check doesn't fire (i.e. filter_id == REMOVING)
- * then the REMOVING mark was put there by our caller,
- * because caller is holding a lock on filter table and
- * only holders of that lock set REMOVING.
- */
- if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
- return;
- hlist_del(node);
- kfree(rule);
- return;
- }
- }
- /* We didn't find it. */
- WARN_ON(1);
-}
-#endif
-
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
-
- /* Create the new entry */
- new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
- if (!new)
- return NULL;
- new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
-}
-
/**************************************************************************
*
* PCI interface
@@ -3519,7 +1059,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
* are not READY.
*/
BUG_ON(efx->state == STATE_READY);
- cancel_work_sync(&efx->reset_work);
+ efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
efx_clear_interrupt_affinity(efx);
@@ -3559,7 +1099,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
- efx_fini_io(efx);
+ efx_fini_io(efx, efx->type->mem_bar(efx));
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
@@ -3782,7 +1322,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
- rc = efx_init_io(efx);
+ rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
+ efx->type->mem_map_size(efx));
if (rc)
goto fail2;
@@ -3826,7 +1367,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return 0;
fail3:
- efx_fini_io(efx);
+ efx_fini_io(efx, efx->type->mem_bar(efx));
fail2:
efx_fini_struct(efx);
fail1:
@@ -3904,7 +1445,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
- queue_work(reset_workqueue, &efx->reset_work);
+ efx_queue_reset_work(efx);
return 0;
@@ -4083,10 +1624,6 @@ static struct pci_driver efx_pci_driver = {
*
*************************************************************************/
-module_param(interrupt_mode, uint, 0444);
-MODULE_PARM_DESC(interrupt_mode,
- "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
-
static int __init efx_init_module(void)
{
int rc;
@@ -4103,11 +1640,9 @@ static int __init efx_init_module(void)
goto err_sriov;
#endif
- reset_workqueue = create_singlethread_workqueue("sfc_reset");
- if (!reset_workqueue) {
- rc = -ENOMEM;
+ rc = efx_create_reset_workqueue();
+ if (rc)
goto err_reset;
- }
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
@@ -4116,7 +1651,7 @@ static int __init efx_init_module(void)
return 0;
err_pci:
- destroy_workqueue(reset_workqueue);
+ efx_destroy_reset_workqueue();
err_reset:
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
@@ -4132,7 +1667,7 @@ static void __exit efx_exit_module(void)
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
- destroy_workqueue(reset_workqueue);
+ efx_destroy_reset_workqueue();
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
#endif
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2dd8d5002315..f1bdb04efbe4 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,31 +15,17 @@ int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);
/* TX */
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
/* RX */
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx);
-void efx_rx_config_page_split(struct efx_nic *efx);
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
-void efx_rx_slow_fill(struct timer_list *t);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
@@ -48,7 +34,6 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel);
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -80,8 +65,6 @@ static inline bool efx_rss_enabled(struct efx_nic *efx)
/* Filters */
-void efx_mac_reconfigure(struct efx_nic *efx);
-
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -186,58 +169,17 @@ static inline void efx_filter_rfs_expire(struct work_struct *data)
static inline void efx_filter_rfs_expire(struct work_struct *data) {}
#define efx_filter_rfs_enabled() 0
#endif
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
-
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right);
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
-
-#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force);
-
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
- const struct efx_filter_spec *spec);
-
-/* @new is written to indicate if entry was newly added (true) or if an old
- * entry was found and returned (false).
- */
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new);
-
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
-#endif
/* RSS contexts */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
-void efx_free_rss_context_entry(struct efx_rss_context *ctx);
static inline bool efx_rss_active(struct efx_rss_context *ctx)
{
- return ctx->context_id != EFX_EF10_RSS_CONTEXT_INVALID;
+ return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
}
-/* Channels */
-int efx_channel_dummy_op_int(struct efx_channel *channel);
-void efx_channel_dummy_op_void(struct efx_channel *channel);
-int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-
-/* Ports */
-int efx_reconfigure_port(struct efx_nic *efx);
-int __efx_reconfigure_port(struct efx_nic *efx);
-
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;
-/* Reset handling */
-int efx_reset(struct efx_nic *efx, enum reset_type method);
-void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-int efx_try_recovery(struct efx_nic *efx);
-
/* Global */
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
@@ -245,8 +187,6 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
bool rx_may_override_tx);
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive);
-void efx_stop_eventq(struct efx_channel *channel);
-void efx_start_eventq(struct efx_channel *channel);
/* Dummy PHY ops for PHY drivers */
int efx_port_dummy_op_int(struct efx_nic *efx);
@@ -293,9 +233,6 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
efx_schedule_channel(channel);
}
-void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx,
- const unsigned long *advertising);
void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
new file mode 100644
index 000000000000..aeb5e8aa2f2a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -0,0 +1,1234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include "efx_channels.h"
+#include "efx.h"
+#include "efx_common.h"
+#include "tx_common.h"
+#include "rx_common.h"
+#include "nic.h"
+#include "sriov.h"
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each core.
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+static unsigned int irq_adapt_low_thresh = 8000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned int irq_adapt_high_thresh = 16000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/***************
+ * Housekeeping
+ ***************/
+
+int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
+
+/*************
+ * INTERRUPTS
+ *************/
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+ cpumask_var_t thread_mask;
+ unsigned int count;
+ int cpu;
+
+ if (rss_cpus) {
+ count = rss_cpus;
+ } else {
+ if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, thread_mask)) {
+ ++count;
+ cpumask_or(thread_mask, thread_mask,
+ topology_sibling_cpumask(cpu));
+ }
+ }
+
+ free_cpumask_var(thread_mask);
+ }
+
+ if (count > EFX_MAX_RX_QUEUES) {
+ netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+ "Reducing number of rx queues from %u to %u.\n",
+ count, EFX_MAX_RX_QUEUES);
+ count = EFX_MAX_RX_QUEUES;
+ }
+
+ /* If RSS is requested for the PF *and* VFs then we can't write RSS
+ * table entries that are inaccessible to VFs
+ */
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
+ }
+#endif
+
+ return count;
+}
+
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+ unsigned int max_channels,
+ unsigned int extra_channels,
+ unsigned int parallelism)
+{
+ unsigned int n_channels = parallelism;
+ int vec_count;
+ int n_xdp_tx;
+ int n_xdp_ev;
+
+ if (efx_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
+ * we allocate a TX queue per CPU. We share event queues across
+ * multiple tx queues, assuming tx and ev queues are both
+ * maximum size.
+ */
+
+ n_xdp_tx = num_possible_cpus();
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+
+ vec_count = pci_msix_vec_count(efx->pci_dev);
+ if (vec_count < 0)
+ return vec_count;
+
+ max_channels = min_t(unsigned int, vec_count, max_channels);
+
+ /* Check resources.
+ * We need a channel per event queue, plus a VI per tx queue.
+ * This may be more pessimistic than it needs to be.
+ */
+ if (n_channels + n_xdp_ev > max_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = 0;
+ } else {
+ efx->n_xdp_channels = n_xdp_ev;
+ efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ n_channels += n_xdp_ev;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %d TX and %d event queues for XDP\n",
+ n_xdp_tx, n_xdp_ev);
+ }
+
+ if (vec_count < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+ vec_count, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = vec_count;
+ }
+
+ n_channels = min(n_channels, max_channels);
+
+ efx->n_channels = n_channels;
+
+ /* Ignore XDP tx channels when creating rx channels. */
+ n_channels -= efx->n_xdp_channels;
+
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels =
+ min(max(n_channels / 2, 1U),
+ efx->max_tx_channels);
+ efx->tx_channel_offset =
+ n_channels - efx->n_tx_channels;
+ efx->n_rx_channels =
+ max(n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+ efx->tx_channel_offset = 0;
+ efx->n_rx_channels = n_channels;
+ }
+
+ efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
+ efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
+
+ efx->xdp_channel_offset = n_channels;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %u RX channels\n",
+ efx->n_rx_channels);
+
+ return efx->n_channels;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+int efx_probe_interrupts(struct efx_nic *efx)
+{
+ unsigned int extra_channels = 0;
+ unsigned int rss_spread;
+ unsigned int i, j;
+ int rc;
+
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
+ if (efx->extra_channel_type[i])
+ ++extra_channels;
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ unsigned int parallelism = efx_wanted_parallelism(efx);
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ unsigned int n_channels;
+
+ rc = efx_allocate_msix_channels(efx, efx->max_channels,
+ extra_channels, parallelism);
+ if (rc >= 0) {
+ n_channels = rc;
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+ n_channels);
+ }
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ else
+ return rc;
+ } else if (rc < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %u).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = rc;
+ }
+
+ if (rc > 0) {
+ for (i = 0; i < efx->n_channels; i++)
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ else
+ return rc;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ /* Assign extra channels if possible, before XDP channels */
+ efx->n_extra_tx_channels = 0;
+ j = efx->xdp_channel_offset;
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
+ if (!efx->extra_channel_type[i])
+ continue;
+ if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
+ efx->extra_channel_type[i]->handle_no_channel(efx);
+ } else {
+ --j;
+ efx_get_channel(efx, j)->type =
+ efx->extra_channel_type[i];
+ if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+ efx->n_extra_tx_channels++;
+ }
+ }
+
+ rss_spread = efx->n_rx_channels;
+ /* RSS might be usable on VFs even if it is disabled on the PF */
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((rss_spread > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ rss_spread : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = rss_spread;
+
+ return 0;
+}
+
+#if defined(CONFIG_SMP)
+void efx_set_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = cpumask_local_spread(channel->channel,
+ pcibus_to_node(efx->pci_dev->bus));
+ irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+ }
+}
+
+void efx_clear_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+void
+efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+
+void
+efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+#endif /* CONFIG_SMP */
+
+void efx_remove_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+/***************
+ * EVENT QUEUES
+ ***************/
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+int efx_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions.
+ */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+ return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+int efx_init_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
+}
+
+/* Enable event queue processing and NAPI */
+void efx_start_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "chan %d start event queue\n", channel->channel);
+
+ /* Make sure the NAPI handler sees the enabled flag set */
+ channel->enabled = true;
+ smp_wmb();
+
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+void efx_stop_eventq(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ napi_disable(&channel->napi_str);
+ channel->enabled = false;
+}
+
+void efx_fini_eventq(struct efx_channel *channel)
+{
+ if (!channel->eventq_init)
+ return;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
+}
+
+void efx_remove_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Allocate and initialise a channel structure. */
+struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+ channel->type = &efx_default_channel_type;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+
+ return channel;
+}
+
+int efx_init_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ return -ENOMEM;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
+ }
+
+ /* Higher numbered interrupt modes are less capable! */
+ if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
+ efx->type->min_interrupt_mode)) {
+ return -EIO;
+ }
+ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+ interrupt_mode);
+ efx->interrupt_mode = min(efx->type->min_interrupt_mode,
+ interrupt_mode);
+
+ return 0;
+}
+
+void efx_fini_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ if (efx->channel[i]) {
+ kfree(efx->channel[i]);
+ efx->channel[i] = NULL;
+ }
+}
+
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+ channel->napi_str.napi_id = 0;
+ channel->napi_str.state = 0;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = channel->type->pre_probe(channel);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_eventq(channel);
+ if (rc)
+ goto fail;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = efx_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ channel->rx_list = NULL;
+
+ return 0;
+
+fail:
+ efx_remove_channel(channel);
+ return rc;
+}
+
+void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+{
+ struct efx_nic *efx = channel->efx;
+ const char *type;
+ int number;
+
+ number = channel->channel;
+
+ if (number >= efx->xdp_channel_offset &&
+ !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+ type = "-xdp";
+ number -= efx->xdp_channel_offset;
+ } else if (efx->tx_channel_offset == 0) {
+ type = "";
+ } else if (number < efx->tx_channel_offset) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->tx_channel_offset;
+ }
+ snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
+
+void efx_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ channel->type->get_name(channel,
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
+}
+
+int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ /* Probe channels in reverse, so that any 'extra' channels
+ * use the start of the buffer table. This allows the traffic
+ * channels to be resized without moving them or wasting the
+ * entries before them.
+ */
+ efx_for_each_channel_rev(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
+void efx_remove_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ efx_remove_eventq(channel);
+ channel->type->post_remove(channel);
+}
+
+void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+
+ kfree(efx->xdp_tx_queues);
+}
+
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ unsigned int i, next_buffer_table = 0;
+ u32 old_rxq_entries, old_txq_entries;
+ int rc, rc2;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ /* Not all channels should be reallocated. We must avoid
+ * reallocating their buffer table entries.
+ */
+ efx_for_each_channel(channel, efx) {
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+
+ if (channel->type->copy)
+ continue;
+ next_buffer_table = max(next_buffer_table,
+ channel->eventq.index +
+ channel->eventq.entries);
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ rx_queue->rxd.index +
+ rx_queue->rxd.entries);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ tx_queue->txd.index +
+ tx_queue->txd.entries);
+ }
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+ efx_soft_disable_interrupts(efx);
+
+ /* Clone channels (where possible) */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (channel->type->copy)
+ channel = channel->type->copy(channel);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ /* Restart buffer table allocation */
+ efx->next_buffer_table = next_buffer_table;
+
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (!channel->type->copy)
+ continue;
+ rc = efx_probe_channel(channel);
+ if (rc)
+ goto rollback;
+ efx_init_napi_channel(efx->channel[i]);
+ }
+
+out:
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = other_channel[i];
+ if (channel && channel->type->copy) {
+ efx_fini_napi_channel(channel);
+ efx_remove_channel(channel);
+ kfree(channel);
+ }
+ }
+
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
+int efx_set_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ int xdp_queue_number;
+
+ efx->tx_channel_offset =
+ efx_separate_tx_channels ?
+ efx->n_channels - efx->n_tx_channels : 0;
+
+ if (efx->xdp_tx_queue_count) {
+ EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+ /* Allocate array for XDP TX queue lookup. */
+ efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+ sizeof(*efx->xdp_tx_queues),
+ GFP_KERNEL);
+ if (!efx->xdp_tx_queues)
+ return -ENOMEM;
+ }
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ xdp_queue_number = 0;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue -= (efx->tx_channel_offset *
+ EFX_TXQ_TYPES);
+
+ if (efx_channel_is_xdp_tx(channel) &&
+ xdp_queue_number < efx->xdp_tx_queue_count) {
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ xdp_queue_number++;
+ }
+ }
+ }
+ return 0;
+}
+
+bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
+/*************
+ * START/STOP
+ *************/
+
+int efx_soft_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ efx->irq_soft_enabled = true;
+ smp_wmb();
+
+ efx_for_each_channel(channel, efx) {
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ efx_start_eventq(channel);
+ }
+
+ efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
+}
+
+void efx_soft_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ if (efx->state == STATE_DISABLED)
+ return;
+
+ efx_mcdi_mode_poll(efx);
+
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
+ synchronize_irq(efx->legacy_irq);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ /* TODO: Is this really a bug? */
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+}
+
+void efx_start_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
+ }
+
+ WARN_ON(channel->rx_pkt_n_frags);
+ }
+}
+
+void efx_stop_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+ int rc = 0;
+
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
+ }
+
+ efx_for_each_channel(channel, efx) {
+ /* RX packet processing is pipelined, so wait for the
+ * NAPI handler to complete. At least event queue 0
+ * might be kept active by non-data events, so don't
+ * use napi_synchronize() but actually disable NAPI
+ * temporarily.
+ */
+ if (efx_channel_has_rx_queue(channel)) {
+ efx_stop_eventq(channel);
+ efx_start_eventq(channel);
+ }
+ }
+
+ if (efx->type->fini_dmaq)
+ rc = efx->type->fini_dmaq(efx);
+
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fini_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_fini_tx_queue(tx_queue);
+ }
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+ struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
+ int spent;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent && efx_channel_has_rx_queue(channel)) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ efx_rx_flush_packet(channel);
+ efx_fast_push_rx_descriptors(rx_queue, true);
+ }
+
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl,
+ tx_queue->bytes_compl);
+ }
+ }
+
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
+ return spent;
+}
+
+static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
+{
+ int step = efx->irq_mod_step_us;
+
+ if (channel->irq_mod_score < irq_adapt_low_thresh) {
+ if (channel->irq_moderation_us > step) {
+ channel->irq_moderation_us -= step;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+ if (channel->irq_moderation_us <
+ efx->irq_rx_moderation_us) {
+ channel->irq_moderation_us += step;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = efx_process_channel(channel, budget);
+
+ xdp_do_flush_map();
+
+ if (spent < budget) {
+ if (efx_channel_has_rx_queue(channel) &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ efx_update_irq_mod(efx, channel);
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ /* Perhaps expire some ARFS filters */
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
+#endif
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since efx_nic_eventq_read_ack() will have no effect if
+ * interrupts have already been disabled.
+ */
+ if (napi_complete_done(napi, spent))
+ efx_nic_eventq_read_ack(channel);
+ }
+
+ return spent;
+}
+
+void efx_init_napi_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ efx_poll, napi_weight);
+}
+
+void efx_init_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_init_napi_channel(channel);
+}
+
+void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+
+ channel->napi_dev = NULL;
+}
+
+void efx_fini_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
+}
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
new file mode 100644
index 000000000000..8d7b8c4142d7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_CHANNELS_H
+#define EFX_CHANNELS_H
+
+int efx_probe_interrupts(struct efx_nic *efx);
+void efx_remove_interrupts(struct efx_nic *efx);
+int efx_soft_enable_interrupts(struct efx_nic *efx);
+void efx_soft_disable_interrupts(struct efx_nic *efx);
+int efx_enable_interrupts(struct efx_nic *efx);
+void efx_disable_interrupts(struct efx_nic *efx);
+
+void efx_set_interrupt_affinity(struct efx_nic *efx);
+void efx_clear_interrupt_affinity(struct efx_nic *efx);
+
+int efx_probe_eventq(struct efx_channel *channel);
+int efx_init_eventq(struct efx_channel *channel);
+void efx_start_eventq(struct efx_channel *channel);
+void efx_stop_eventq(struct efx_channel *channel);
+void efx_fini_eventq(struct efx_channel *channel);
+void efx_remove_eventq(struct efx_channel *channel);
+
+struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
+void efx_set_channel_names(struct efx_nic *efx);
+int efx_init_channels(struct efx_nic *efx);
+int efx_probe_channels(struct efx_nic *efx);
+int efx_set_channels(struct efx_nic *efx);
+bool efx_default_channel_want_txqs(struct efx_channel *channel);
+void efx_remove_channel(struct efx_channel *channel);
+void efx_remove_channels(struct efx_nic *efx);
+void efx_fini_channels(struct efx_nic *efx);
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
+void efx_start_channels(struct efx_nic *efx);
+void efx_stop_channels(struct efx_nic *efx);
+
+void efx_init_napi_channel(struct efx_channel *channel);
+void efx_init_napi(struct efx_nic *efx);
+void efx_fini_napi_channel(struct efx_channel *channel);
+void efx_fini_napi(struct efx_nic *efx);
+
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
new file mode 100644
index 000000000000..b0d76bc19673
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "selftest.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "nic.h"
+#include "io.h"
+#include "mcdi_pcol.h"
+
+static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor.
+ * On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
+/* Default stats update time */
+#define STATS_PERIOD_MS_DEFAULT 1000
+
+const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+const char *const efx_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
+};
+
+#define RESET_TYPE(type) \
+ STRING_TABLE_LOOKUP(type, efx_reset_type)
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *const efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+int efx_create_reset_workqueue(void)
+{
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ printk(KERN_ERR "Failed to create reset workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void efx_queue_reset_work(struct efx_nic *efx)
+{
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+void efx_flush_reset_workqueue(struct efx_nic *efx)
+{
+ cancel_work_sync(&efx->reset_work);
+}
+
+void efx_destroy_reset_workqueue(void)
+{
+ if (reset_workqueue) {
+ destroy_workqueue(reset_workqueue);
+ reset_workqueue = NULL;
+ }
+}
+
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+ if (efx->type->reconfigure_mac) {
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+ }
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly.
+ */
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ efx_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_link_status_changed(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN
+ */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up)
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu);
+ else
+ netif_info(efx, link, efx->net_dev, "link down\n");
+}
+
+unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+{
+ /* The maximum MTU that we can fit in a single page, allowing for
+ * framing, overhead and XDP headroom.
+ */
+ int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+ efx->rx_prefix_size + efx->type->rx_buffer_padding +
+ efx->rx_ip_align + XDP_PACKET_HEADROOM;
+
+ return PAGE_SIZE - overhead;
+}
+
+/* Context: process, rtnl_lock() held. */
+int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ if (rtnl_dereference(efx->xdp_prog) &&
+ new_mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big for XDP (max: %d)\n",
+ new_mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+
+ mutex_lock(&efx->mac_lock);
+ net_dev->mtu = new_mtu;
+ efx_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway.
+ */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled && efx->type->monitor)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ efx_start_monitor(efx);
+}
+
+void efx_start_monitor(struct efx_nic *efx)
+{
+ if (efx->type->monitor)
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_start_datapath(struct efx_nic *efx)
+{
+ netdev_features_t old_features = efx->net_dev->features;
+ bool old_rx_scatter = efx->rx_scatter;
+ size_t rx_buf_len;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_dma_len = (efx->rx_prefix_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
+ efx->rx_ip_align + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+ EFX_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
+ /* RX filters may also have scatter-enabled flags */
+ if ((efx->rx_scatter != old_rx_scatter) &&
+ efx->type->filter_update_rx_scatter)
+ efx->type->filter_update_rx_scatter(efx);
+
+ /* We must keep at least one descriptor in a TX ring empty.
+ * We could avoid this when the queue size does not exactly
+ * match the hardware ring size, but it's not that important.
+ * Therefore we stop the queue when one more skb might fill
+ * the ring completely. We wake it when half way back to
+ * empty.
+ */
+ efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+ efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
+ /* Initialise the channels */
+ efx_start_channels(efx);
+
+ efx_ptp_start_datapath(efx);
+
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+}
+
+static void efx_stop_datapath(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ efx_ptp_stop_datapath(efx);
+
+ efx_stop_channels(efx);
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+static void efx_start_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* Ensure MAC ingress/egress is enabled */
+ efx_mac_reconfigure(efx);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
+static void efx_stop_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against efx_set_multicast_list() */
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
+}
+
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured. Interrupts must already be enabled. This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
+ */
+void efx_start_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock
+ */
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
+ return;
+
+ efx_start_port(efx);
+ efx_start_datapath(efx);
+
+ /* Start the hardware monitor if there is one */
+ efx_start_monitor(efx);
+
+ /* Link state detection is normally event-driven; we have
+ * to poll now because we could have missed a change
+ */
+ mutex_lock(&efx->mac_lock);
+ if (efx->phy_op->poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ if (efx->type->start_stats) {
+ efx->type->start_stats(efx);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ }
+}
+
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down. Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled. Requires the RTNL lock.
+ */
+void efx_stop_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ if (efx->type->update_stats) {
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ efx->type->stop_stats(efx);
+ }
+
+ efx_stop_port(efx);
+
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
+ netif_tx_disable(efx->net_dev);
+
+ efx_stop_datapath(efx);
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, stats);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
+{
+ enum efx_phy_mode phy_mode;
+ int rc = 0;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ if (efx->type->reconfigure_port)
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled.
+ */
+int efx_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset.
+ */
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->prepare_flr(efx);
+
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ mutex_lock(&efx->rss_lock);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
+ efx->phy_op->fini(efx);
+ efx->type->fini(efx);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE.
+ */
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->finish_flr(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail;
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ if (efx->type->rx_restore_rss_contexts)
+ efx->type->rx_restore_rss_contexts(efx);
+ mutex_unlock(&efx->rss_lock);
+ efx->type->filter_table_restore(efx);
+ up_write(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
+
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ if (efx->type->udp_tnl_push_ports)
+ efx->type->udp_tnl_push_ports(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->rss_lock);
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_reset(struct efx_nic *efx, enum reset_type method)
+{
+ bool disabled;
+ int rc, rc2;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ efx_device_detach_sync(efx);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests.
+ */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
+ rc2 = efx_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = READ_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_try_recovery(efx))
+ return;
+
+ if (!pending)
+ return;
+
+ rtnl_lock();
+
+ /* We checked the state in efx_schedule_reset() but it may
+ * have changed by now. Now that we have the RTNL lock,
+ * it cannot change again.
+ */
+ if (efx->state == STATE_READY)
+ (void)efx_reset(efx, method);
+
+ rtnl_unlock();
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
+ case RESET_TYPE_MC_BIST:
+ case RESET_TYPE_MCDI_TIMEOUT:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+ smp_mb(); /* ensure we change reset_pending before checking state */
+
+ /* If we're not READY then just leave the flags set as the cue
+ * to abort probing or reschedule the reset later.
+ */
+ if (READ_ONCE(efx->state) != STATE_READY)
+ return;
+
+ /* efx_process_channel() will no longer read events once a
+ * reset is scheduled. So switch back to poll'd MCDI completions.
+ */
+ efx_mcdi_mode_poll(efx);
+
+ efx_queue_reset_work(efx);
+}
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+ return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+static bool efx_port_dummy_op_poll(struct efx_nic *efx)
+{
+ return false;
+}
+
+static const struct efx_phy_operations efx_dummy_phy_operations = {
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_port_dummy_op_int,
+ .poll = efx_port_dummy_op_poll,
+ .fini = efx_port_dummy_op_void,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+int efx_init_struct(struct efx_nic *efx,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int rc = -ENOMEM;
+
+ /* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, efx_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ efx_selftest_async_init(efx);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_UNINIT;
+ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
+ INIT_LIST_HEAD(&efx->rss_context.list);
+ mutex_init(&efx->rss_lock);
+ spin_lock_init(&efx->stats_lock);
+ efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+ efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
+ mutex_init(&efx->mac_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mutex_init(&efx->rps_mutex);
+ spin_lock_init(&efx->rps_hash_lock);
+ /* Failure to allocate is not fatal, but may degrade ARFS performance */
+ efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+ sizeof(*efx->rps_hash_table), GFP_KERNEL);
+#endif
+ efx->phy_op = &efx_dummy_phy_operations;
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, efx_mac_work);
+ init_waitqueue_head(&efx->flush_wq);
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return rc;
+}
+
+void efx_fini_struct(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_hash_table);
+#endif
+
+ efx_fini_channels(efx);
+
+ kfree(efx->vpd_sn);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+/* This configures the PCI device to enable I/O and DMA. */
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ /* Set the PCI DMA mask. Try all possibilities from our
+ * genuine mask down to 32 bits, because some architectures
+ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ * masks event though they reject 46 bit masks.
+ */
+ while (dma_mask > 0x7fffffffUL) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
+ if (rc == 0)
+ break;
+ dma_mask >>= 1;
+ }
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long)dma_mask);
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ if (!efx->membase_phys) {
+ netif_err(efx, probe, efx->net_dev,
+ "ERROR: No BAR%d mapping from the BIOS. "
+ "Try pci=realloc on the kernel command line\n", bar);
+ rc = -ENODEV;
+ goto fail3;
+ }
+
+ rc = pci_request_region(pci_dev, bar, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
+ rc = -EIO;
+ goto fail3;
+ }
+
+ efx->membase = ioremap(efx->membase_phys, mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys, mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
+
+ return 0;
+
+fail4:
+ pci_release_region(efx->pci_dev, bar);
+fail3:
+ efx->membase_phys = 0;
+fail2:
+ pci_disable_device(efx->pci_dev);
+fail1:
+ return rc;
+}
+
+void efx_fini_io(struct efx_nic *efx, int bar)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ pci_release_region(efx->pci_dev, bar);
+ efx->membase_phys = 0;
+ }
+
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+
+void efx_init_mcdi_logging(struct efx_nic *efx)
+{
+ int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+
+ if (rc) {
+ netif_warn(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ }
+}
+
+void efx_fini_mcdi_logging(struct efx_nic *efx)
+{
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+}
+#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
new file mode 100644
index 000000000000..fa2fc681e7f9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_COMMON_H
+#define EFX_COMMON_H
+
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size);
+void efx_fini_io(struct efx_nic *efx, int bar);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
+ struct net_device *net_dev);
+void efx_fini_struct(struct efx_nic *efx);
+
+void efx_start_all(struct efx_nic *efx);
+void efx_stop_all(struct efx_nic *efx);
+
+void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats);
+
+int efx_create_reset_workqueue(void);
+void efx_queue_reset_work(struct efx_nic *efx);
+void efx_flush_reset_workqueue(struct efx_nic *efx);
+void efx_destroy_reset_workqueue(void);
+
+void efx_start_monitor(struct efx_nic *efx);
+
+int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+int efx_try_recovery(struct efx_nic *efx);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+
+static inline int efx_check_disabled(struct efx_nic *efx)
+{
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ netif_err(efx, drv, efx->net_dev,
+ "device is disabled due to earlier errors\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+void efx_init_mcdi_logging(struct efx_nic *efx);
+void efx_fini_mcdi_logging(struct efx_nic *efx);
+#else
+static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
+static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
+#endif
+
+void efx_mac_reconfigure(struct efx_nic *efx);
+void efx_link_status_changed(struct efx_nic *efx);
+unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
+int efx_change_mtu(struct net_device *net_dev, int new_mtu);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index b31032da4bcb..993b5769525b 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -13,92 +13,13 @@
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "ethtool_common.h"
#include "filter.h"
#include "nic.h"
-struct efx_sw_stat_desc {
- const char *name;
- enum {
- EFX_ETHTOOL_STAT_SOURCE_nic,
- EFX_ETHTOOL_STAT_SOURCE_channel,
- EFX_ETHTOOL_STAT_SOURCE_tx_queue
- } source;
- unsigned offset;
- u64(*get_stat) (void *field); /* Reader function */
-};
-
-/* Initialiser for a struct efx_sw_stat_desc with type-checking */
-#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
- get_stat_function) { \
- .name = #stat_name, \
- .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
- .offset = ((((field_type *) 0) == \
- &((struct efx_##source_name *)0)->field) ? \
- offsetof(struct efx_##source_name, field) : \
- offsetof(struct efx_##source_name, field)), \
- .get_stat = get_stat_function, \
-}
-
-static u64 efx_get_uint_stat(void *field)
-{
- return *(unsigned int *)field;
-}
-
-static u64 efx_get_atomic_stat(void *field)
-{
- return atomic_read((atomic_t *) field);
-}
-
-#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
- EFX_ETHTOOL_STAT(field, nic, field, \
- atomic_t, efx_get_atomic_stat)
-
-#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
- EFX_ETHTOOL_STAT(field, channel, n_##field, \
- unsigned int, efx_get_uint_stat)
-#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
- EFX_ETHTOOL_STAT(field, channel, field, \
- unsigned int, efx_get_uint_stat)
-
-#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
- EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
- unsigned int, efx_get_uint_stat)
-
-static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
- EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
- EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
- EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
- EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
- EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
-#ifdef CONFIG_RFS_ACCEL
- EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
-#endif
-};
-
-#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
-
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
/**************************************************************************
@@ -185,18 +106,6 @@ efx_ethtool_set_link_ksettings(struct net_device *net_dev,
return rc;
}
-static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
- struct ethtool_drvinfo *info)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
- efx_mcdi_print_fwver(efx, info->fw_version,
- sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
-}
-
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
return efx_nic_get_regs_len(netdev_priv(net_dev));
@@ -211,341 +120,6 @@ static void efx_ethtool_get_regs(struct net_device *net_dev,
efx_nic_get_regs(efx, buf);
}
-static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- return efx->msg_enable;
-}
-
-static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- efx->msg_enable = msg_enable;
-}
-
-/**
- * efx_fill_test - fill in an individual self-test entry
- * @test_index: Index of the test
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- * @test: Pointer to test result (used only if data != %NULL)
- * @unit_format: Unit name format (e.g. "chan\%d")
- * @unit_id: Unit id (e.g. 0 for "chan0")
- * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
- * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
- *
- * Fill in an individual self-test entry.
- */
-static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
- int *test, const char *unit_format, int unit_id,
- const char *test_format, const char *test_id)
-{
- char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
-
- /* Fill data value, if applicable */
- if (data)
- data[test_index] = *test;
-
- /* Fill string, if applicable */
- if (strings) {
- if (strchr(unit_format, '%'))
- snprintf(unit_str, sizeof(unit_str),
- unit_format, unit_id);
- else
- strcpy(unit_str, unit_format);
- snprintf(test_str, sizeof(test_str), test_format, test_id);
- snprintf(strings + test_index * ETH_GSTRING_LEN,
- ETH_GSTRING_LEN,
- "%-6s %-24s", unit_str, test_str);
- }
-}
-
-#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
-#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
-#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
-#define EFX_LOOPBACK_NAME(_mode, _counter) \
- "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
-
-/**
- * efx_fill_loopback_test - fill in a block of loopback self-test entries
- * @efx: Efx NIC
- * @lb_tests: Efx loopback self-test results structure
- * @mode: Loopback test mode
- * @test_index: Starting index of the test
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- *
- * Fill in a block of loopback self-test entries. Return new test
- * index.
- */
-static int efx_fill_loopback_test(struct efx_nic *efx,
- struct efx_loopback_self_tests *lb_tests,
- enum efx_loopback_mode mode,
- unsigned int test_index,
- u8 *strings, u64 *data)
-{
- struct efx_channel *channel =
- efx_get_channel(efx, efx->tx_channel_offset);
- struct efx_tx_queue *tx_queue;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_fill_test(test_index++, strings, data,
- &lb_tests->tx_sent[tx_queue->queue],
- EFX_TX_QUEUE_NAME(tx_queue),
- EFX_LOOPBACK_NAME(mode, "tx_sent"));
- efx_fill_test(test_index++, strings, data,
- &lb_tests->tx_done[tx_queue->queue],
- EFX_TX_QUEUE_NAME(tx_queue),
- EFX_LOOPBACK_NAME(mode, "tx_done"));
- }
- efx_fill_test(test_index++, strings, data,
- &lb_tests->rx_good,
- "rx", 0,
- EFX_LOOPBACK_NAME(mode, "rx_good"));
- efx_fill_test(test_index++, strings, data,
- &lb_tests->rx_bad,
- "rx", 0,
- EFX_LOOPBACK_NAME(mode, "rx_bad"));
-
- return test_index;
-}
-
-/**
- * efx_ethtool_fill_self_tests - get self-test details
- * @efx: Efx NIC
- * @tests: Efx self-test results structure, or %NULL
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- *
- * Get self-test number of strings, strings, and/or test results.
- * Return number of strings (== number of test results).
- *
- * The reason for merging these three functions is to make sure that
- * they can never be inconsistent.
- */
-static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
- struct efx_self_tests *tests,
- u8 *strings, u64 *data)
-{
- struct efx_channel *channel;
- unsigned int n = 0, i;
- enum efx_loopback_mode mode;
-
- efx_fill_test(n++, strings, data, &tests->phy_alive,
- "phy", 0, "alive", NULL);
- efx_fill_test(n++, strings, data, &tests->nvram,
- "core", 0, "nvram", NULL);
- efx_fill_test(n++, strings, data, &tests->interrupt,
- "core", 0, "interrupt", NULL);
-
- /* Event queues */
- efx_for_each_channel(channel, efx) {
- efx_fill_test(n++, strings, data,
- &tests->eventq_dma[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.dma", NULL);
- efx_fill_test(n++, strings, data,
- &tests->eventq_int[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.int", NULL);
- }
-
- efx_fill_test(n++, strings, data, &tests->memory,
- "core", 0, "memory", NULL);
- efx_fill_test(n++, strings, data, &tests->registers,
- "core", 0, "registers", NULL);
-
- if (efx->phy_op->run_tests != NULL) {
- EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
-
- for (i = 0; true; ++i) {
- const char *name;
-
- EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
- name = efx->phy_op->test_name(efx, i);
- if (name == NULL)
- break;
-
- efx_fill_test(n++, strings, data, &tests->phy_ext[i],
- "phy", 0, name, NULL);
- }
- }
-
- /* Loopback tests */
- for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
- if (!(efx->loopback_modes & (1 << mode)))
- continue;
- n = efx_fill_loopback_test(efx,
- &tests->loopback[mode], mode, n,
- strings, data);
- }
-
- return n;
-}
-
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
-{
- size_t n_stats = 0;
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_tx_queues(channel)) {
- n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_TXQ_TYPES);
-
- strings += ETH_GSTRING_LEN;
- }
- }
- }
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_rx_queue(channel)) {
- n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
- }
- }
- if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
- unsigned short xdp;
-
- for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
- n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
- }
- }
-
- return n_stats;
-}
-
-static int efx_ethtool_get_sset_count(struct net_device *net_dev,
- int string_set)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- switch (string_set) {
- case ETH_SS_STATS:
- return efx->type->describe_stats(efx, NULL) +
- EFX_ETHTOOL_SW_STAT_COUNT +
- efx_describe_per_queue_stats(efx, NULL) +
- efx_ptp_describe_stats(efx, NULL);
- case ETH_SS_TEST:
- return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
- default:
- return -EINVAL;
- }
-}
-
-static void efx_ethtool_get_strings(struct net_device *net_dev,
- u32 string_set, u8 *strings)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int i;
-
- switch (string_set) {
- case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
- for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_ptp_describe_stats(efx, strings);
- break;
- case ETH_SS_TEST:
- efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
- break;
- default:
- /* No other string sets */
- break;
- }
-}
-
-static void efx_ethtool_get_stats(struct net_device *net_dev,
- struct ethtool_stats *stats,
- u64 *data)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- const struct efx_sw_stat_desc *stat;
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int i;
-
- spin_lock_bh(&efx->stats_lock);
-
- /* Get NIC statistics */
- data += efx->type->update_stats(efx, data, NULL);
-
- /* Get software statistics */
- for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
- stat = &efx_sw_stat_desc[i];
- switch (stat->source) {
- case EFX_ETHTOOL_STAT_SOURCE_nic:
- data[i] = stat->get_stat((void *)efx + stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_channel:
- data[i] = 0;
- efx_for_each_channel(channel, efx)
- data[i] += stat->get_stat((void *)channel +
- stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
- data[i] = 0;
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- data[i] +=
- stat->get_stat((void *)tx_queue
- + stat->offset);
- }
- break;
- }
- }
- data += EFX_ETHTOOL_SW_STAT_COUNT;
-
- spin_unlock_bh(&efx->stats_lock);
-
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_tx_queues(channel)) {
- *data = 0;
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- *data += tx_queue->tx_packets;
- }
- data++;
- }
- }
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_rx_queue(channel)) {
- *data = 0;
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- *data += rx_queue->rx_packets;
- }
- data++;
- }
- }
- if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
- int xdp;
-
- for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
- data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
- data++;
- }
- }
-
- efx_ptp_update_stats(efx, data);
-}
-
static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
@@ -787,16 +361,6 @@ out:
return rc;
}
-static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
- struct ethtool_pauseparam *pause)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
- pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
- pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
-}
-
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
@@ -1456,7 +1020,7 @@ static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
rc = -ENOMEM;
goto out_unlock;
}
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Initialise indir table and key to defaults */
efx_set_default_rx_indir_table(efx, ctx);
netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
new file mode 100644
index 000000000000..b8d281ab6c7a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "net_driver.h"
+#include "mcdi.h"
+#include "nic.h"
+#include "selftest.h"
+#include "ethtool_common.h"
+
+struct efx_sw_stat_desc {
+ const char *name;
+ enum {
+ EFX_ETHTOOL_STAT_SOURCE_nic,
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
+ } source;
+ unsigned int offset;
+ u64 (*get_stat)(void *field); /* Reader function */
+};
+
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+ get_stat_function) { \
+ .name = #stat_name, \
+ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
+ .offset = ((((field_type *) 0) == \
+ &((struct efx_##source_name *)0)->field) ? \
+ offsetof(struct efx_##source_name, field) : \
+ offsetof(struct efx_##source_name, field)), \
+ .get_stat = get_stat_function, \
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+ return *(unsigned int *)field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+ return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
+ EFX_ETHTOOL_STAT(field, nic, field, \
+ atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
+ EFX_ETHTOOL_STAT(field, channel, n_##field, \
+ unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
+ EFX_ETHTOOL_STAT(field, channel, field, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
+ EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+#ifdef CONFIG_RFS_ACCEL
+ EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
+#endif
+};
+
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+
+void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->msg_enable;
+}
+
+void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ efx->msg_enable = msg_enable;
+}
+
+void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
+ pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
+ pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
+}
+
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "chan\%d")
+ * @unit_id: Unit id (e.g. 0 for "chan0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ if (strchr(unit_format, '%'))
+ snprintf(unit_str, sizeof(unit_str),
+ unit_format, unit_id);
+ else
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
+ }
+}
+
+#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
+ */
+int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0, i;
+ enum efx_loopback_mode mode;
+
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
+ efx_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ }
+
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
+ efx_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+
+ if (efx->phy_op->run_tests != NULL) {
+ EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
+
+ for (i = 0; true; ++i) {
+ const char *name;
+
+ EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx->phy_op->test_name(efx, i);
+ if (name == NULL)
+ break;
+
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
+ "phy", 0, name, NULL);
+ }
+ }
+
+ /* Loopback tests */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+{
+ size_t n_stats = 0;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_TXQ_TYPES);
+
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "rx-%d.rx_packets", channel->channel);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ unsigned short xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ n_stats++;
+ if (strings) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-xdp-cpu-%hu.tx_packets", xdp);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+
+ return n_stats;
+}
+
+int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_describe_per_queue_stats(efx, NULL) +
+ efx_ptp_describe_stats(efx, NULL);
+ case ETH_SS_TEST:
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+void efx_ethtool_get_strings(struct net_device *net_dev,
+ u32 string_set, u8 *strings)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int i;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ strings += (efx_describe_per_queue_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ efx_ptp_describe_stats(efx, strings);
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
+}
+
+void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ const struct efx_sw_stat_desc *stat;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int i;
+
+ spin_lock_bh(&efx->stats_lock);
+
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
+
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
+ switch (stat->source) {
+ case EFX_ETHTOOL_STAT_SOURCE_nic:
+ data[i] = stat->get_stat((void *)efx + stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_channel:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx)
+ data[i] += stat->get_stat((void *)channel +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
+ }
+ }
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ *data = 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ *data += tx_queue->tx_packets;
+ }
+ data++;
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ *data = 0;
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ *data += rx_queue->rx_packets;
+ }
+ data++;
+ }
+ }
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ int xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+ data++;
+ }
+ }
+
+ efx_ptp_update_stats(efx, data);
+}
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
new file mode 100644
index 000000000000..fa624313f330
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ETHTOOL_COMMON_H
+#define EFX_ETHTOOL_COMMON_H
+
+void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info);
+u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
+void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
+void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause);
+int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ u8 *strings, u64 *data);
+int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
+void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
+ u8 *strings);
+void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats __attribute__ ((unused)),
+ u64 *data);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index eecc348b1c32..42bcd34fc508 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1265,7 +1265,7 @@ static int ef4_init_io(struct ef4_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
+ efx->membase = ioremap(efx->membase_phys, mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
@@ -2108,7 +2108,7 @@ static void ef4_net_stats(struct net_device *net_dev,
}
/* Context: netif_tx_lock held, BHs disabled. */
-static void ef4_watchdog(struct net_device *net_dev)
+static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
struct ef4_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index eedd32e2bfcb..dbbb898adddb 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -15,6 +15,7 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
+#include "rx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 9081f84a2604..54a45010b576 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,11 +346,8 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx);
int efx_mcdi_port_probe(struct efx_nic *efx);
void efx_mcdi_port_remove(struct efx_nic *efx);
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-int efx_mcdi_port_get_number(struct efx_nic *efx);
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-int efx_mcdi_set_mac(struct efx_nic *efx);
-#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
new file mode 100644
index 000000000000..4310ae5bd898
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -0,0 +1,2270 @@
+#include "mcdi_filters.h"
+#include "mcdi.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
+
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+
+/* An arbitrary search limit for the software hash table */
+#define EFX_EF10_FILTER_SEARCH_LIMIT 200
+
+static struct efx_filter_spec *
+efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx)
+{
+ return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
+ ~EFX_EF10_FILTER_FLAGS);
+}
+
+static unsigned int
+efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx)
+{
+ return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
+}
+
+static u32 efx_mcdi_filter_get_unsafe_id(u32 filter_id)
+{
+ WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
+ return filter_id & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+}
+
+static unsigned int efx_mcdi_filter_get_unsafe_pri(u32 filter_id)
+{
+ return filter_id / (EFX_MCDI_FILTER_TBL_ROWS * 2);
+}
+
+static u32 efx_mcdi_filter_make_filter_id(unsigned int pri, u16 idx)
+{
+ return pri * EFX_MCDI_FILTER_TBL_ROWS * 2 + idx;
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static bool efx_mcdi_filter_is_exclusive(const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
+ !is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ !ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] != 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx,
+ const struct efx_filter_spec *spec,
+ unsigned int flags)
+{
+ table->entry[filter_idx].spec = (unsigned long)spec | flags;
+}
+
+static void
+efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf)
+{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+ u32 match_fields = 0, uc_match, mc_match;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /*
+ * Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+#define COPY_VALUE(value, mcdi_field) \
+ do { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(value)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &value, sizeof(value)); \
+ } while (0)
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ COPY_VALUE(spec->gen_field, mcdi_field); \
+ }
+ /*
+ * Handle encap filters first. They will always be mismatch
+ * (unknown UC or MC) filters
+ */
+ if (encap_type) {
+ /*
+ * ether_type and outer_ip_proto need to be variables
+ * because COPY_VALUE wants to memcpy them
+ */
+ __be16 ether_type =
+ htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
+ ETH_P_IPV6 : ETH_P_IP);
+ u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
+ u8 outer_ip_proto;
+
+ switch (encap_type & EFX_ENCAP_TYPES_MASK) {
+ case EFX_ENCAP_TYPE_VXLAN:
+ vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
+ /* fallthrough */
+ case EFX_ENCAP_TYPE_GENEVE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_UDP;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ /*
+ * We always need to set the type field, even
+ * though we're not matching on the TNI.
+ */
+ MCDI_POPULATE_DWORD_1(inbuf,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VNI_TYPE,
+ vni_type);
+ break;
+ case EFX_ENCAP_TYPE_NVGRE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_GRE;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+#undef COPY_VALUE
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+}
+
+static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf, u64 handle,
+ struct efx_rss_context *ctx,
+ bool replacing)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 flags = spec->flags;
+
+ memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
+
+ /* If RSS filter, caller better have given us an RSS context */
+ if (flags & EFX_FILTER_FLAG_RX_RSS) {
+ /*
+ * We don't have the ability to return an error, so we'll just
+ * log a warning and disable RSS for the filter.
+ */
+ if (WARN_ON_ONCE(!ctx))
+ flags &= ~EFX_FILTER_FLAG_RX_RSS;
+ else if (WARN_ON_ONCE(ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID))
+ flags &= ~EFX_FILTER_FLAG_RX_RSS;
+ }
+
+ if (replacing) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
+ } else {
+ efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
+ }
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ 0 : spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ if (flags & EFX_FILTER_FLAG_RX_RSS)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
+}
+
+static int efx_mcdi_filter_push(struct efx_nic *efx,
+ const struct efx_filter_spec *spec, u64 *handle,
+ struct efx_rss_context *ctx, bool replacing)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ efx_mcdi_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc && spec->priority != EFX_FILTER_PRI_HINT)
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
+ outbuf, outlen, rc);
+ if (rc == 0)
+ *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
+ return rc;
+}
+
+static u32 efx_mcdi_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
+{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+ unsigned int match_flags = spec->match_flags;
+ unsigned int uc_match, mc_match;
+ u32 mcdi_flags = 0;
+
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
+ unsigned int old_match_flags = match_flags; \
+ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
+ if (match_flags != old_match_flags) \
+ mcdi_flags |= \
+ (1 << ((encap) ? \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
+ mcdi_field ## _LBN : \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
+ mcdi_field ## _LBN)); \
+ }
+ /* inner or outer based on encap type */
+ MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
+ /* always outer */
+ MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
+ MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
+#undef MAP_FILTER_TO_MCDI_FLAG
+
+ /* special handling for encap type, and mismatch */
+ if (encap_type) {
+ match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
+ mcdi_flags |=
+ (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+ match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
+ mcdi_flags |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ }
+
+ /* Did we map them all? */
+ WARN_ON_ONCE(match_flags);
+
+ return mcdi_flags;
+}
+
+static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table,
+ const struct efx_filter_spec *spec)
+{
+ u32 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec);
+ unsigned int match_pri;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++)
+ if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
+ return match_pri;
+
+ return -EPROTONOSUPPORT;
+}
+
+static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table;
+ struct efx_filter_spec *saved_spec;
+ struct efx_rss_context *ctx = NULL;
+ unsigned int match_pri, hash;
+ unsigned int priv_flags;
+ bool rss_locked = false;
+ bool replacing = false;
+ unsigned int depth, i;
+ int ins_index = -1;
+ DEFINE_WAIT(wait);
+ bool is_mc_recip;
+ s32 rc;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+ table = efx->filter_state;
+ down_write(&table->lock);
+
+ /* For now, only support RX filters */
+ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
+ EFX_FILTER_FLAG_RX) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_filter_pri(table, spec);
+ if (rc < 0)
+ goto out_unlock;
+ match_pri = rc;
+
+ hash = efx_filter_spec_hash(spec);
+ is_mc_recip = efx_filter_is_mc_recipient(spec);
+ if (is_mc_recip)
+ bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
+ mutex_lock(&efx->rss_lock);
+ rss_locked = true;
+ if (spec->rss_context)
+ ctx = efx_find_rss_context_entry(efx, spec->rss_context);
+ else
+ ctx = &efx->rss_context;
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ }
+
+ /* Find any existing filters with the same match tuple or
+ * else a free slot to insert at.
+ */
+ for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_mcdi_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_filter_spec_equal(spec, saved_spec)) {
+ if (spec->priority < saved_spec->priority &&
+ spec->priority != EFX_FILTER_PRI_AUTO) {
+ rc = -EPERM;
+ goto out_unlock;
+ }
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
+ goto out_unlock;
+ }
+ ins_index = i;
+ break;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
+ }
+ }
+ }
+
+ /* Once we reach the maximum search depth, use the first suitable
+ * slot, or return -EBUSY if there was none
+ */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Create a software table entry if necessary. */
+ saved_spec = efx_mcdi_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ if (spec->priority == EFX_FILTER_PRI_AUTO &&
+ saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
+ /* Just make sure it won't be removed */
+ if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[ins_index].spec &=
+ ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = ins_index;
+ goto out_unlock;
+ }
+ replacing = true;
+ priv_flags = efx_mcdi_filter_entry_flags(table, ins_index);
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ *saved_spec = *spec;
+ priv_flags = 0;
+ }
+ efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Actually insert the filter on the HW */
+ rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
+ ctx, replacing);
+
+ if (rc == -EINVAL && nic_data->must_realloc_vis)
+ /* The MC rebooted under us, causing it to reject our filter
+ * insertion as pointing to an invalid VI (spec->dmaq_id).
+ */
+ rc = -EAGAIN;
+
+ /* Finalise the software table entry */
+ if (rc == 0) {
+ if (replacing) {
+ /* Update the fields that may differ */
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |=
+ EFX_FILTER_FLAG_RX_OVER_AUTO;
+ saved_spec->priority = spec->priority;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ saved_spec->flags |= spec->flags;
+ saved_spec->rss_context = spec->rss_context;
+ saved_spec->dmaq_id = spec->dmaq_id;
+ }
+ } else if (!replacing) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ } else {
+ /* We failed to replace, so the old filter is still present.
+ * Roll back the software table to reflect this. In fact the
+ * efx_mcdi_filter_set_entry() call below will do the right
+ * thing, so nothing extra is needed here.
+ */
+ }
+ efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Remove and finalise entries for lower-priority multicast
+ * recipients
+ */
+ if (is_mc_recip) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ unsigned int depth, i;
+
+ memset(inbuf, 0, sizeof(inbuf));
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ if (!test_bit(depth, mc_rem_map))
+ continue;
+
+ i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_mcdi_filter_entry_spec(table, i);
+ priv_flags = efx_mcdi_filter_entry_flags(table, i);
+
+ if (rc == 0) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[i].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ }
+
+ if (rc == 0) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ priv_flags = 0;
+ }
+ efx_mcdi_filter_set_entry(table, i, saved_spec,
+ priv_flags);
+ }
+ }
+
+ /* If successful, return the inserted filter ID */
+ if (rc == 0)
+ rc = efx_mcdi_filter_make_filter_id(match_pri, ins_index);
+
+out_unlock:
+ if (rss_locked)
+ mutex_unlock(&efx->rss_lock);
+ up_write(&table->lock);
+ return rc;
+}
+
+s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ s32 ret;
+
+ down_read(&efx->filter_sem);
+ ret = efx_mcdi_filter_insert_locked(efx, spec, replace_equal);
+ up_read(&efx->filter_sem);
+
+ return ret;
+}
+
+/*
+ * Remove a filter.
+ * If !by_index, remove by ID
+ * If by_index, remove by index
+ * Filter ID may come from userland and must be range-checked.
+ * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
+ * for write.
+ */
+static int efx_mcdi_filter_remove_internal(struct efx_nic *efx,
+ unsigned int priority_mask,
+ u32 filter_id, bool by_index)
+{
+ unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id);
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+ struct efx_filter_spec *spec;
+ DEFINE_WAIT(wait);
+ int rc;
+
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec ||
+ (!by_index &&
+ efx_mcdi_filter_pri(table, spec) !=
+ efx_mcdi_filter_get_unsafe_pri(filter_id)))
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+ priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+ /* Just remove flags */
+ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ return 0;
+ }
+
+ if (!(priority_mask & (1U << spec->priority)))
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ /* Reset to an automatic filter */
+
+ struct efx_filter_spec new_spec = *spec;
+
+ new_spec.priority = EFX_FILTER_PRI_AUTO;
+ new_spec.flags = (EFX_FILTER_FLAG_RX |
+ (efx_rss_active(&efx->rss_context) ?
+ EFX_FILTER_FLAG_RX_RSS : 0));
+ new_spec.dmaq_id = 0;
+ new_spec.rss_context = 0;
+ rc = efx_mcdi_filter_push(efx, &new_spec,
+ &table->entry[filter_idx].handle,
+ &efx->rss_context,
+ true);
+
+ if (rc == 0)
+ *spec = new_spec;
+ } else {
+ /* Really remove the filter */
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if ((rc == 0) || (rc == -ENOENT)) {
+ /* Filter removed OK or didn't actually exist */
+ kfree(spec);
+ efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
+ MC_CMD_FILTER_OP_EXT_IN_LEN,
+ NULL, 0, rc);
+ }
+ }
+
+ return rc;
+}
+
+/* Remove filters that weren't renewed. */
+static void efx_mcdi_filter_remove_old(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ int remove_failed = 0;
+ int remove_noent = 0;
+ int rc;
+ int i;
+
+ down_write(&table->lock);
+ for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) {
+ if (READ_ONCE(table->entry[i].spec) &
+ EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+ rc = efx_mcdi_filter_remove_internal(efx,
+ 1U << EFX_FILTER_PRI_AUTO, i, true);
+ if (rc == -ENOENT)
+ remove_noent++;
+ else if (rc)
+ remove_failed++;
+ }
+ }
+ up_write(&table->lock);
+
+ if (remove_failed)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d filters\n",
+ __func__, remove_failed);
+ if (remove_noent)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d non-existent filters\n",
+ __func__, remove_noent);
+}
+
+int efx_mcdi_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_mcdi_filter_table *table;
+ int rc;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ rc = efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id,
+ false);
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+/* Caller must hold efx->filter_sem for read */
+static void efx_mcdi_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ if (filter_id == EFX_EF10_FILTER_ID_INVALID)
+ return;
+
+ down_write(&table->lock);
+ efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id,
+ true);
+ up_write(&table->lock);
+}
+
+int efx_mcdi_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id);
+ const struct efx_filter_spec *saved_spec;
+ struct efx_mcdi_filter_table *table;
+ int rc;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+ saved_spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (saved_spec && saved_spec->priority == priority &&
+ efx_mcdi_filter_pri(table, saved_spec) ==
+ efx_mcdi_filter_get_unsafe_pri(filter_id)) {
+ *spec = *saved_spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan,
+ bool multicast, bool rollback)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_dev_addr *addr_list;
+ enum efx_filter_flags filter_flags;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ unsigned int i, j;
+ int addr_count;
+ u16 *ids;
+ int rc;
+
+ if (multicast) {
+ addr_list = table->dev_mc_list;
+ addr_count = table->dev_mc_count;
+ ids = vlan->mc;
+ } else {
+ addr_list = table->dev_uc_list;
+ addr_count = table->dev_uc_count;
+ ids = vlan->uc;
+ }
+
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
+ /* Insert/renew filters */
+ for (i = 0; i < addr_count; i++) {
+ EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+ efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ if (rollback) {
+ netif_info(efx, drv, efx->net_dev,
+ "efx_mcdi_filter_insert failed rc=%d\n",
+ rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ /* keep invalid ID, and carry on */
+ }
+ } else {
+ ids[i] = efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+
+ if (multicast && rollback) {
+ /* Also need an Ethernet broadcast filter */
+ EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n", rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+
+ return 0;
+}
+
+static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan,
+ enum efx_encap_type encap_type,
+ bool multicast, bool rollback)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ enum efx_filter_flags filter_flags;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ int rc;
+ u16 *id;
+
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+
+ if (multicast)
+ efx_filter_set_mc_def(&spec);
+ else
+ efx_filter_set_uc_def(&spec);
+
+ if (encap_type) {
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ efx_filter_set_encap_type(&spec, encap_type);
+ else
+ /*
+ * don't insert encap filters on non-supporting
+ * platforms. ID will be left as INVALID.
+ */
+ return 0;
+ }
+
+ if (vlan->vid != EFX_FILTER_VID_UNSPEC)
+ efx_filter_set_eth_local(&spec, vlan->vid, NULL);
+
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ const char *um = multicast ? "Multicast" : "Unicast";
+ const char *encap_name = "";
+ const char *encap_ipv = "";
+
+ if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_VXLAN)
+ encap_name = "VXLAN ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_NVGRE)
+ encap_name = "NVGRE ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_GENEVE)
+ encap_name = "GENEVE ";
+ if (encap_type & EFX_ENCAP_FLAG_IPV6)
+ encap_ipv = "IPv6 ";
+ else if (encap_type)
+ encap_ipv = "IPv4 ";
+
+ /*
+ * unprivileged functions can't insert mismatch filters
+ * for encapsulated or unicast traffic, so downgrade
+ * those warnings to debug.
+ */
+ netif_cond_dbg(efx, drv, efx->net_dev,
+ rc == -EPERM && (encap_type || !multicast), warn,
+ "%s%s%s mismatch filter insert failed rc=%d\n",
+ encap_name, encap_ipv, um, rc);
+ } else if (multicast) {
+ /* mapping from encap types to default filter IDs (multicast) */
+ static enum efx_mcdi_filter_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_MCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = efx_mcdi_filter_get_unsafe_id(rc);
+ if (!nic_data->workaround_26807 && !encap_type) {
+ /* Also need an Ethernet broadcast filter */
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags, 0);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n",
+ rc);
+ if (rollback) {
+ /* Roll back the mc_def filter */
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ *id);
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ return rc;
+ }
+ } else {
+ EFX_WARN_ON_PARANOID(
+ vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+ rc = 0;
+ } else {
+ /* mapping from encap types to default filter IDs (unicast) */
+ static enum efx_mcdi_filter_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_UCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = rc;
+ rc = 0;
+ }
+ return rc;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /*
+ * Do not install unspecified VID if VLAN filtering is enabled.
+ * Do not install all specified VIDs if VLAN filtering is disabled.
+ */
+ if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
+ return;
+
+ /* Insert/renew unicast filters */
+ if (table->uc_promisc) {
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
+ false, false);
+ efx_mcdi_filter_insert_addr_list(efx, vlan, false, false);
+ } else {
+ /*
+ * If any of the filters failed to insert, fall back to
+ * promiscuous mode - add in the uc_def filter. But keep
+ * our individual unicast filters.
+ */
+ if (efx_mcdi_filter_insert_addr_list(efx, vlan, false, false))
+ efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ false, false);
+ }
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+
+ /*
+ * Insert/renew multicast filters
+ *
+ * If changing promiscuous state with cascaded multicast filters, remove
+ * old filters first, so that packets are dropped rather than duplicated
+ */
+ if (nic_data->workaround_26807 &&
+ table->mc_promisc_last != table->mc_promisc)
+ efx_mcdi_filter_remove_old(efx);
+ if (table->mc_promisc) {
+ if (nic_data->workaround_26807) {
+ /*
+ * If we failed to insert promiscuous filters, rollback
+ * and fall back to individual multicast filters
+ */
+ if (efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true)) {
+ /* Changing promisc state, so remove old filters */
+ efx_mcdi_filter_remove_old(efx);
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ } else {
+ /*
+ * If we failed to insert promiscuous filters, don't
+ * rollback. Regardless, also insert the mc_list,
+ * unless it's incomplete due to overflow
+ */
+ efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, false);
+ if (!table->mc_overflow)
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ } else {
+ /*
+ * If any filters failed to insert, rollback and fall back to
+ * promiscuous mode - mc_def filter and maybe broadcast. If
+ * that fails, roll back again and insert as many of our
+ * individual multicast filters as we can.
+ */
+ if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ if (nic_data->workaround_26807)
+ efx_mcdi_filter_remove_old(efx);
+ if (efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true))
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ }
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+}
+
+int efx_mcdi_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_mcdi_filter_table *table;
+ unsigned int priority_mask;
+ unsigned int i;
+ int rc;
+
+ priority_mask = (((1U << (priority + 1)) - 1) &
+ ~(1U << EFX_FILTER_PRI_AUTO));
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) {
+ rc = efx_mcdi_filter_remove_internal(efx, priority_mask,
+ i, true);
+ if (rc && rc != -ENOENT)
+ break;
+ rc = 0;
+ }
+
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_mcdi_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ if (table->entry[filter_idx].spec &&
+ efx_mcdi_filter_entry_spec(table, filter_idx)->priority ==
+ priority)
+ ++count;
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return count;
+}
+
+u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ return table->rx_match_count * EFX_MCDI_FILTER_TBL_ROWS * 2;
+}
+
+s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_mcdi_filter_table *table;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (spec && spec->priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ break;
+ }
+ buf[count++] =
+ efx_mcdi_filter_make_filter_id(
+ efx_mcdi_filter_pri(table, spec),
+ filter_idx);
+ }
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return count;
+}
+
+static int efx_mcdi_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
+{
+ int match_flags = 0;
+
+#define MAP_FLAG(gen_flag, mcdi_field) do { \
+ u32 old_mcdi_flags = mcdi_flags; \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ if (mcdi_flags != old_mcdi_flags) \
+ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ } while (0)
+
+ if (encap) {
+ /* encap filters must specify encap type */
+ match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ /* and imply ethertype and ip proto */
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ /* VLAN tags refer to the outer packet */
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ /* everything else refers to the inner packet */
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, IFRM_SRC_IP);
+ MAP_FLAG(LOC_HOST, IFRM_DST_IP);
+ MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
+ MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
+ MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
+ MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
+ MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
+ MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
+ } else {
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
+ }
+#undef MAP_FLAG
+
+ /* Did we map them all? */
+ if (mcdi_flags)
+ return -EINVAL;
+
+ return match_flags;
+}
+
+bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
+ bool encap,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+ int mf;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++) {
+ mf = efx_mcdi_filter_match_flags_from_mcdi(encap,
+ table->rx_match_mcdi_flags[match_pri]);
+ if (mf == match_flags)
+ return true;
+ }
+
+ return false;
+}
+
+static int
+efx_mcdi_filter_table_probe_matches(struct efx_nic *efx,
+ struct efx_mcdi_filter_table *table,
+ bool encap)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ unsigned int pd_match_pri, pd_match_count;
+ size_t outlen;
+ int rc;
+
+ /* Find out which RX filter types are supported, and their priorities */
+ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ encap ?
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ return rc;
+
+ pd_match_count = MCDI_VAR_ARRAY_LEN(
+ outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
+
+ for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
+ u32 mcdi_flags =
+ MCDI_ARRAY_DWORD(
+ outbuf,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
+ pd_match_pri);
+ rc = efx_mcdi_filter_match_flags_from_mcdi(encap, mcdi_flags);
+ if (rc < 0) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u not supported in driver\n",
+ __func__, mcdi_flags, pd_match_pri);
+ } else {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
+ __func__, mcdi_flags, pd_match_pri,
+ rc, table->rx_match_count);
+ table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
+ table->rx_match_count++;
+ }
+ }
+
+ return 0;
+}
+
+int efx_mcdi_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_mcdi_filter_table *table;
+ struct efx_mcdi_filter_vlan *vlan;
+ int rc;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ if (efx->filter_state) /* already probed */
+ return 0;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ table->rx_match_count = 0;
+ rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
+ if (rc)
+ goto fail;
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
+ if (rc)
+ goto fail;
+ if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(efx_mcdi_filter_match_supported(table, false,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
+ efx_mcdi_filter_match_supported(table, false,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
+ netif_info(efx, probe, net_dev,
+ "VLAN filters are not supported in this firmware variant\n");
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ table->entry = vzalloc(array_size(EFX_MCDI_FILTER_TBL_ROWS,
+ sizeof(*table->entry)));
+ if (!table->entry) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ table->mc_promisc_last = false;
+ table->vlan_filter =
+ !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ INIT_LIST_HEAD(&table->vlan_list);
+ init_rwsem(&table->lock);
+
+ efx->filter_state = table;
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+
+ return 0;
+
+fail_add_vlan:
+ efx_mcdi_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
+fail:
+ kfree(table);
+ return rc;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+void efx_mcdi_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int invalid_filters = 0, failed = 0;
+ struct efx_mcdi_filter_vlan *vlan;
+ struct efx_filter_spec *spec;
+ struct efx_rss_context *ctx;
+ unsigned int filter_idx;
+ u32 mcdi_flags;
+ int match_pri;
+ int rc, i;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ if (!nic_data->must_restore_filters)
+ return;
+
+ if (!table)
+ return;
+
+ down_write(&table->lock);
+ mutex_lock(&efx->rss_lock);
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec);
+ match_pri = 0;
+ while (match_pri < table->rx_match_count &&
+ table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
+ ++match_pri;
+ if (match_pri >= table->rx_match_count) {
+ invalid_filters++;
+ goto not_restored;
+ }
+ if (spec->rss_context)
+ ctx = efx_find_rss_context_entry(efx, spec->rss_context);
+ else
+ ctx = &efx->rss_context;
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
+ if (!ctx) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
+ spec->rss_context);
+ invalid_filters++;
+ goto not_restored;
+ }
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
+ spec->rss_context);
+ invalid_filters++;
+ goto not_restored;
+ }
+ }
+
+ rc = efx_mcdi_filter_push(efx, spec,
+ &table->entry[filter_idx].handle,
+ ctx, false);
+ if (rc)
+ failed++;
+
+ if (rc) {
+not_restored:
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
+ if (vlan->default_filters[i] == filter_idx)
+ vlan->default_filters[i] =
+ EFX_EF10_FILTER_ID_INVALID;
+
+ kfree(spec);
+ efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ }
+
+ mutex_unlock(&efx->rss_lock);
+ up_write(&table->lock);
+
+ /*
+ * This can happen validly if the MC's capabilities have changed, so
+ * is not an error.
+ */
+ if (invalid_filters)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Did not restore %u filters that are now unsupported.\n",
+ invalid_filters);
+
+ if (failed)
+ netif_err(efx, hw, efx->net_dev,
+ "unable to restore %u filters\n", failed);
+ else
+ nic_data->must_restore_filters = false;
+}
+
+void efx_mcdi_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ efx_mcdi_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
+ /*
+ * If we were called without locking, then it's not safe to free
+ * the table as others might be using it. So we just WARN, leak
+ * the memory, and potentially get an inconsistent filter table
+ * state.
+ * This should never actually happen.
+ */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ if (rc)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: filter %04x remove failed\n",
+ __func__, filter_idx);
+ kfree(spec);
+ }
+
+ vfree(table->entry);
+ kfree(table);
+}
+
+static void efx_mcdi_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ unsigned int filter_idx;
+
+ efx_rwsem_assert_write_locked(&table->lock);
+
+ if (*id != EFX_EF10_FILTER_ID_INVALID) {
+ filter_idx = efx_mcdi_filter_get_unsafe_id(*id);
+ if (!table->entry[filter_idx].spec)
+ netif_dbg(efx, drv, efx->net_dev,
+ "marked null spec old %04x:%04x\n", *id,
+ filter_idx);
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ }
+}
+
+/* Mark old per-VLAN filters that may need to be removed */
+static void _efx_mcdi_filter_vlan_mark_old(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ unsigned int i;
+
+ for (i = 0; i < table->dev_uc_count; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->uc[i]);
+ for (i = 0; i < table->dev_mc_count; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->mc[i]);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->default_filters[i]);
+}
+
+/*
+ * Mark old filters that may need to be removed.
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+static void efx_mcdi_filter_mark_old(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+
+ down_write(&table->lock);
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ _efx_mcdi_filter_vlan_mark_old(efx, vlan);
+ up_write(&table->lock);
+}
+
+int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+ unsigned int i;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ vlan = efx_mcdi_filter_find_vlan(efx, vid);
+ if (WARN_ON(vlan)) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ return -EALREADY;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ vlan->vid = vid;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
+
+ list_add_tail(&vlan->list, &table->vlan_list);
+
+ if (efx_dev_registered(efx))
+ efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan);
+
+ return 0;
+}
+
+static void efx_mcdi_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ unsigned int i;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ list_del(&vlan->list);
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->uc[i]);
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->mc[i]);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->default_filters[i]);
+
+ kfree(vlan);
+}
+
+void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_mcdi_filter_vlan *vlan;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ vlan = efx_mcdi_filter_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u not found in filter state\n", vid);
+ return;
+ }
+
+ efx_mcdi_filter_del_vlan_internal(efx, vlan);
+}
+
+struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx,
+ u16 vid)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ list_for_each_entry(vlan, &table->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan, *next_vlan;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
+ efx_mcdi_filter_del_vlan_internal(efx, vlan);
+}
+
+static void efx_mcdi_filter_uc_addr_list(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *uc;
+ unsigned int i;
+
+ table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ table->uc_promisc = true;
+ break;
+ }
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+ i++;
+ }
+
+ table->dev_uc_count = i;
+}
+
+static void efx_mcdi_filter_mc_addr_list(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *mc;
+ unsigned int i;
+
+ table->mc_overflow = false;
+ table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
+
+ i = 0;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ table->mc_promisc = true;
+ table->mc_overflow = true;
+ break;
+ }
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+ i++;
+ }
+
+ table->dev_mc_count = i;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_mcdi_filter_vlan *vlan;
+ bool vlan_filter;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_mcdi_filter_mark_old(efx);
+
+ /*
+ * Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_mcdi_filter_uc_addr_list(efx);
+ efx_mcdi_filter_mc_addr_list(efx);
+ netif_addr_unlock_bh(net_dev);
+
+ /*
+ * If VLAN filtering changes, all old filters are finally removed.
+ * Do it in advance to avoid conflicts for unicast untagged and
+ * VLAN 0 tagged filters.
+ */
+ vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ if (table->vlan_filter != vlan_filter) {
+ table->vlan_filter = vlan_filter;
+ efx_mcdi_filter_remove_old(efx);
+ }
+
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan);
+
+ efx_mcdi_filter_remove_old(efx);
+ table->mc_promisc_last = table->mc_promisc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx)
+{
+ struct efx_filter_spec *spec, saved_spec;
+ struct efx_mcdi_filter_table *table;
+ struct efx_arfs_rule *rule = NULL;
+ bool ret = true, force = false;
+ u16 arfs_id;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+
+ if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
+ goto out_unlock;
+
+ spin_lock_bh(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table) {
+ /* In the absence of the table, we always return 0 to ARFS. */
+ arfs_id = 0;
+ } else {
+ rule = efx_rps_hash_find(efx, spec);
+ if (!rule)
+ /* ARFS table doesn't know of this filter, so remove it */
+ goto expire;
+ arfs_id = rule->arfs_id;
+ ret = efx_rps_check_rule(rule, filter_idx, &force);
+ if (force)
+ goto expire;
+ if (!ret) {
+ spin_unlock_bh(&efx->rps_hash_lock);
+ goto out_unlock;
+ }
+ }
+ if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
+ ret = false;
+ else if (rule)
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+expire:
+ saved_spec = *spec; /* remove operation will kfree spec */
+ spin_unlock_bh(&efx->rps_hash_lock);
+ /*
+ * At this point (since we dropped the lock), another thread might queue
+ * up a fresh insertion request (but the actual insertion will be held
+ * up by our possession of the filter table lock). In that case, it
+ * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
+ * the rule is not removed by efx_rps_hash_del() below.
+ */
+ if (ret)
+ ret = efx_mcdi_filter_remove_internal(efx, 1U << spec->priority,
+ filter_idx, true) == 0;
+ /*
+ * While we can't safely dereference rule (we dropped the lock), we can
+ * still test it for NULL.
+ */
+ if (ret && rule) {
+ /* Expiring, so remove entry from ARFS table */
+ spin_lock_bh(&efx->rps_hash_lock);
+ efx_rps_hash_del(efx, &saved_spec);
+ spin_unlock_bh(&efx->rps_hash_lock);
+ }
+out_unlock:
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return ret;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
+ 1 << RSS_MODE_HASH_DST_ADDR_LBN)
+#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
+ 1 << RSS_MODE_HASH_DST_PORT_LBN)
+#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
+ (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
+ (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
+
+int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
+{
+ /*
+ * Firmware had a bug (sfc bug 61952) where it would not actually
+ * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
+ * This meant that it would always contain whatever was previously
+ * in the MCDI buffer. Fortunately, all firmware versions with
+ * this bug have the same default flags value for a newly-allocated
+ * RSS context, and the only time we want to get the flags is just
+ * after allocating. Moreover, the response has a 32-bit hole
+ * where the context ID would be in the request, so we can use an
+ * overlength buffer in the request and pre-fill the flags field
+ * with what we believe the default to be. Thus if the firmware
+ * has the bug, it will leave our pre-filled value in the flags
+ * field of the response, and we will get the right answer.
+ *
+ * However, this does mean that this function should NOT be used if
+ * the RSS context flags might not be their defaults - it is ONLY
+ * reliably correct for a newly-allocated RSS context.
+ */
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ /* Check we have a hole for the context ID */
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
+ RSS_CONTEXT_FLAGS_DEFAULT);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc == 0) {
+ if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
+ rc = -EIO;
+ else
+ *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
+ }
+ return rc;
+}
+
+/*
+ * Attempt to enable 4-tuple UDP hashing on the specified RSS context.
+ * If we fail, we just leave the RSS context at its default hash settings,
+ * which is safe but may slightly reduce performance.
+ * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
+ * just need to set the UDP ports flags (for both IP versions).
+ */
+void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
+ u32 flags;
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
+
+ if (efx_mcdi_get_rss_context_flags(efx, ctx->context_id, &flags) != 0)
+ return;
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
+ flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
+ if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL))
+ /* Succeeded, so UDP 4-tuple is now enabled */
+ ctx->rx_hash_udp_4tuple = true;
+}
+
+static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive,
+ struct efx_rss_context *ctx,
+ unsigned *context_size)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+ u32 alloc_type = exclusive ?
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ unsigned rss_spread = exclusive ?
+ efx->rss_spread :
+ min(rounddown_pow_of_two(efx->rss_spread),
+ EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+ if (!exclusive && rss_spread == 1) {
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ if (context_size)
+ *context_size = 1;
+ return 0;
+ }
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ return -EOPNOTSUPP;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+
+ if (context_size)
+ *context_size = rss_spread;
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
+ efx_mcdi_set_rss_context_flags(efx, ctx);
+
+ return 0;
+}
+
+static int efx_mcdi_filter_free_rss_context(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
+ context);
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_mcdi_filter_populate_rss_table(struct efx_nic *efx, u32 context,
+ const u32 *rx_indir_table, const u8 *key)
+{
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
+ int i, rc;
+
+ MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+
+ /* This iterates over the length of efx->rss_context.rx_indir_table, but
+ * copies bytes from rx_indir_table. That's because the latter is a
+ * pointer rather than an array, but should have the same length.
+ * The efx->rss_context.rx_hash_key loop below is similar.
+ */
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
+ MCDI_PTR(tablebuf,
+ RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
+ (u8) rx_indir_table[i];
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
+ sizeof(tablebuf), NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
+
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
+ sizeof(keybuf), NULL, 0, NULL);
+}
+
+void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
+{
+ int rc;
+
+ if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id);
+ WARN_ON(rc != 0);
+ }
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+}
+
+static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
+ unsigned *context_size)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
+ context_size);
+
+ if (rc != 0)
+ return rc;
+
+ nic_data->rx_rss_context_exclusive = false;
+ efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ return 0;
+}
+
+static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ u32 old_rx_rss_context = efx->rss_context.context_id;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
+ !nic_data->rx_rss_context_exclusive) {
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
+ NULL);
+ if (rc == -EOPNOTSUPP)
+ return rc;
+ else if (rc != 0)
+ goto fail1;
+ }
+
+ rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id,
+ rx_indir_table, key);
+ if (rc != 0)
+ goto fail2;
+
+ if (efx->rss_context.context_id != old_rx_rss_context &&
+ old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
+ nic_data->rx_rss_context_exclusive = true;
+ if (rx_indir_table != efx->rss_context.rx_indir_table)
+ memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ if (key != efx->rss_context.rx_hash_key)
+ memcpy(efx->rss_context.rx_hash_key, key,
+ efx->type->rx_hash_key_size);
+
+ return 0;
+
+fail2:
+ if (old_rx_rss_context != efx->rss_context.context_id) {
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0);
+ efx->rss_context.context_id = old_rx_rss_context;
+ }
+fail1:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL);
+ if (rc)
+ return rc;
+ }
+
+ if (!rx_indir_table) /* Delete this context */
+ return efx_mcdi_filter_free_rss_context(efx, ctx->context_id);
+
+ rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
+ rx_indir_table, key);
+ if (rc)
+ return rc;
+
+ memcpy(ctx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
+
+ return 0;
+}
+
+int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
+ size_t outlen;
+ int rc, i;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
+ MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
+
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
+ return -ENOENT;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
+ tablebuf, sizeof(tablebuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
+ ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
+ RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
+ keybuf, sizeof(keybuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
+ ctx->rx_hash_key[i] = MCDI_PTR(
+ keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
+
+ return 0;
+}
+
+int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
+{
+ int rc;
+
+ mutex_lock(&efx->rss_lock);
+ rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
+ mutex_unlock(&efx->rss_lock);
+ return rc;
+}
+
+void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_rss_context *ctx;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ if (!nic_data->must_restore_rss_contexts)
+ return;
+
+ list_for_each_entry(ctx, &efx->rss_context.list, list) {
+ /* previous NIC RSS context is gone */
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ /* so try to allocate a new one */
+ rc = efx_mcdi_rx_push_rss_context_config(efx, ctx,
+ ctx->rx_indir_table,
+ ctx->rx_hash_key);
+ if (rc)
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore RSS context %u, rc=%d"
+ "; RSS filters may fail to be applied\n",
+ ctx->user_id, rc);
+ }
+ nic_data->must_restore_rss_contexts = false;
+}
+
+int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ int rc;
+
+ if (efx->rss_spread == 1)
+ return 0;
+
+ if (!key)
+ key = efx->rss_context.rx_hash_key;
+
+ rc = efx_mcdi_filter_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
+
+ if (rc == -ENOBUFS && !user) {
+ unsigned context_size;
+ bool mismatch = false;
+ size_t i;
+
+ for (i = 0;
+ i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
+ i++)
+ mismatch = rx_indir_table[i] !=
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+ rc = efx_mcdi_filter_rx_push_shared_rss_config(efx, &context_size);
+ if (rc == 0) {
+ if (context_size != efx->rss_spread)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one of"
+ " different size."
+ " Wanted %u, got %u.\n",
+ efx->rss_spread, context_size);
+ else if (mismatch)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one but"
+ " could not apply custom"
+ " indirection.\n");
+ else
+ netif_info(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one.\n");
+ }
+ }
+ return rc;
+}
+
+int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)),
+ const u8 *key
+ __attribute__ ((unused)))
+{
+ if (user)
+ return -EOPNOTSUPP;
+ if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
+ return 0;
+ return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
new file mode 100644
index 000000000000..1837f4f5d661
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_FILTERS_H
+#define EFX_MCDI_FILTERS_H
+
+#include "net_driver.h"
+#include "filter.h"
+#include "mcdi_pcol.h"
+
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
+
+enum efx_mcdi_filter_default_filters {
+ EFX_EF10_BCAST,
+ EFX_EF10_UCDEF,
+ EFX_EF10_MCDEF,
+ EFX_EF10_VXLAN4_UCDEF,
+ EFX_EF10_VXLAN4_MCDEF,
+ EFX_EF10_VXLAN6_UCDEF,
+ EFX_EF10_VXLAN6_MCDEF,
+ EFX_EF10_NVGRE4_UCDEF,
+ EFX_EF10_NVGRE4_MCDEF,
+ EFX_EF10_NVGRE6_UCDEF,
+ EFX_EF10_NVGRE6_MCDEF,
+ EFX_EF10_GENEVE4_UCDEF,
+ EFX_EF10_GENEVE4_MCDEF,
+ EFX_EF10_GENEVE6_UCDEF,
+ EFX_EF10_GENEVE6_MCDEF,
+
+ EFX_EF10_NUM_DEFAULT_FILTERS
+};
+
+/* Per-VLAN filters information */
+struct efx_mcdi_filter_vlan {
+ struct list_head list;
+ u16 vid;
+ u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
+ u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
+ u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
+};
+
+struct efx_mcdi_dev_addr {
+ u8 addr[ETH_ALEN];
+};
+
+struct efx_mcdi_filter_table {
+/* The MCDI match masks supported by this fw & hw, in order of priority */
+ u32 rx_match_mcdi_flags[
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
+ unsigned int rx_match_count;
+
+ struct rw_semaphore lock; /* Protects entries */
+ struct {
+ unsigned long spec; /* pointer to spec plus flag bits */
+/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
+/* unused flag 1UL */
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
+#define EFX_EF10_FILTER_FLAGS 3UL
+ u64 handle; /* firmware handle */
+ } *entry;
+/* Shadow of net_device address lists, guarded by mac_lock */
+ struct efx_mcdi_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+ struct efx_mcdi_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count;
+ int dev_mc_count;
+ bool uc_promisc;
+ bool mc_promisc;
+/* Whether in multicast promiscuous mode when last changed */
+ bool mc_promisc_last;
+ bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
+ bool vlan_filter;
+ struct list_head vlan_list;
+};
+
+int efx_mcdi_filter_table_probe(struct efx_nic *efx);
+void efx_mcdi_filter_table_remove(struct efx_nic *efx);
+void efx_mcdi_filter_table_restore(struct efx_nic *efx);
+
+/*
+ * The filter table(s) are managed by firmware and we have write-only
+ * access. When removing filters we must identify them to the
+ * firmware by a 64-bit handle, but this is too wide for Linux kernel
+ * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
+ * be able to tell in advance whether a requested insertion will
+ * replace an existing filter. Therefore we maintain a software hash
+ * table, which should be at least as large as the hardware hash
+ * table.
+ *
+ * Huntington has a single 8K filter table shared between all filter
+ * types and both ports.
+ */
+#define EFX_MCDI_FILTER_TBL_ROWS 8192
+
+bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
+ bool encap,
+ enum efx_filter_match_flags match_flags);
+
+void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx);
+s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace_equal);
+int efx_mcdi_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+int efx_mcdi_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec);
+
+u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+int efx_mcdi_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+
+void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx);
+int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid);
+struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx, u16 vid);
+void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid);
+
+void efx_mcdi_rx_free_indir_table(struct efx_nic *efx);
+int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx,
+ const u32 *rx_indir_table,
+ const u8 *key);
+int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table,
+ const u8 *key);
+int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)),
+ const u8 *key
+ __attribute__ ((unused)));
+int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
+int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
+ u32 *flags);
+void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx);
+
+static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx)
+{
+ /* no need to do anything here */
+}
+
+bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
new file mode 100644
index 000000000000..dcfe78b0fa5a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_functions.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+int efx_mcdi_free_vis(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+
+ /* -EALREADY means nothing to free, so ignore */
+ if (rc == -EALREADY)
+ rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
+ return rc;
+}
+
+int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
+ unsigned int max_vis, unsigned int *vi_base,
+ unsigned int *allocated_vis)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
+ return -EIO;
+
+ netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
+ MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
+
+ if (vi_base)
+ *vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
+ if (allocated_vis)
+ *allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
+ return 0;
+}
+
+int efx_mcdi_ev_probe(struct efx_channel *channel)
+{
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
+ size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = channel->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc, i;
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
+ /* INIT_EVQ expects index in vector table, not absolute */
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ if (v2) {
+ /* Use the new generic approach to specifying event queue
+ * configuration, requesting lower latency or higher throughput.
+ * The options that actually get used appear in the output.
+ */
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_V2_IN_FLAG_TYPE,
+ MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
+ } else {
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
+ }
+
+ dma_addr = channel->eventq.buf.dma_addr;
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %d using event queue flags %08x\n",
+ channel->channel,
+ MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
+
+ return rc;
+}
+
+void efx_mcdi_ev_remove(struct efx_channel *channel)
+{
+ efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+}
+
+void efx_mcdi_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ struct efx_channel *channel = tx_queue->channel;
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_ef10_nic_data *nic_data;
+ dma_addr_t dma_addr;
+ size_t inlen;
+ int rc, i;
+
+ BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+
+ nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
+
+ dma_addr = tx_queue->txd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
+ tx_queue->queue, entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
+
+ do {
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
+ /* This flag was removed from mcdi_pcol.h for
+ * the non-_EXT version of INIT_TXQ. However,
+ * firmware still honours it.
+ */
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
+ tx_queue->timestamping);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ NULL, 0, NULL);
+ if (rc == -ENOSPC && tso_v2) {
+ /* Retry without TSOv2 if we're short on contexts. */
+ tso_v2 = false;
+ netif_warn(efx, probe, efx->net_dev,
+ "TSOv2 context not available to segment in "
+ "hardware. TCP performance may be reduced.\n"
+ );
+ } else if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
+ MC_CMD_INIT_TXQ_EXT_IN_LEN,
+ NULL, 0, rc);
+ goto fail;
+ }
+ } while (rc);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+}
+
+void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = tx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
+ tx_queue->queue);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ (rx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ dma_addr_t dma_addr;
+ size_t inlen;
+ int rc;
+ int i;
+ BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
+
+ dma_addr = rx_queue->rxd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
+ efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+ NULL, 0, NULL);
+ if (rc)
+ netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+ efx_rx_queue_index(rx_queue));
+}
+
+void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+}
+
+void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = rx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
+{
+ switch (vi_window_mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ efx->vi_stride = 8192;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ efx->vi_stride = 16384;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ efx->vi_stride = 65536;
+ break;
+ default:
+ netif_err(efx, probe, efx->net_dev,
+ "Unrecognised VI window mode %d\n",
+ vi_window_mode);
+ return -EIO;
+ }
+ netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
+ efx->vi_stride);
+ return 0;
+}
+
+int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ *pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.h b/drivers/net/ethernet/sfc/mcdi_functions.h
new file mode 100644
index 000000000000..ca4a5ac1a66b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_functions.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_FUNCTIONS_H
+#define EFX_MCDI_FUNCTIONS_H
+
+int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
+ unsigned int max_vis, unsigned int *vi_base,
+ unsigned int *allocated_vis);
+int efx_mcdi_free_vis(struct efx_nic *efx);
+
+int efx_mcdi_ev_probe(struct efx_channel *channel);
+int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2);
+void efx_mcdi_ev_remove(struct efx_channel *channel);
+void efx_mcdi_ev_fini(struct efx_channel *channel);
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2);
+void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
+int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
+int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode);
+int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb7cde4980ed..ab5227b13ae6 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -14,106 +14,7 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "selftest.h"
-
-struct efx_mcdi_phy_data {
- u32 flags;
- u32 type;
- u32 supported_cap;
- u32 channel;
- u32 port;
- u32 stats_mask;
- u8 name[20];
- u32 media;
- u32 mmd_mask;
- u8 revision[20];
- u32 forced_cap;
-};
-
-static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
- BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
- rc = -EIO;
- goto fail;
- }
-
- cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
- cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
- cfg->supported_cap =
- MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
- cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
- cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
- cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
- memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
- sizeof(cfg->name));
- cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
- cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
- memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
- sizeof(cfg->revision));
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
- u32 flags, u32 loopback_mode,
- u32 loopback_speed)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
- int rc;
-
- BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
-
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- return rc;
-}
-
-static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
- MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
- rc = -EIO;
- goto fail;
- }
-
- *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
+#include "mcdi_port_common.h"
static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
@@ -168,246 +69,6 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
-{
- #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
- linkset)
-
- bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
- switch (media) {
- case MC_CMD_MEDIA_KX4:
- SET_BIT(Backplane);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseKX_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseKX4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- SET_BIT(40000baseKR4_Full);
- break;
-
- case MC_CMD_MEDIA_XFP:
- case MC_CMD_MEDIA_SFP_PLUS:
- case MC_CMD_MEDIA_QSFP_PLUS:
- SET_BIT(FIBRE);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- SET_BIT(40000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
- SET_BIT(100000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
- SET_BIT(25000baseCR_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
- SET_BIT(50000baseCR2_Full);
- break;
-
- case MC_CMD_MEDIA_BASE_T:
- SET_BIT(TP);
- if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- SET_BIT(10baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- SET_BIT(10baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- SET_BIT(100baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- SET_BIT(100baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- SET_BIT(1000baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- break;
- }
-
- if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- SET_BIT(Pause);
- if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- SET_BIT(Asym_Pause);
- if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- SET_BIT(Autoneg);
-
- #undef SET_BIT
-}
-
-static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
-{
- u32 result = 0;
-
- #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
- linkset)
-
- if (TEST_BIT(10baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
- if (TEST_BIT(10baseT_Full))
- result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
- if (TEST_BIT(100baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
- if (TEST_BIT(100baseT_Full))
- result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
- if (TEST_BIT(1000baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
- result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
- result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
- result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (TEST_BIT(100000baseCR4_Full))
- result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
- if (TEST_BIT(25000baseCR_Full))
- result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
- if (TEST_BIT(50000baseCR2_Full))
- result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
- if (TEST_BIT(Pause))
- result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
- if (TEST_BIT(Asym_Pause))
- result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
- if (TEST_BIT(Autoneg))
- result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
-
- #undef TEST_BIT
-
- return result;
-}
-
-static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- enum efx_phy_mode mode, supported;
- u32 flags;
-
- /* TODO: Advertise the capabilities supported by this PHY */
- supported = 0;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
- supported |= PHY_MODE_TX_DISABLED;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
- supported |= PHY_MODE_LOW_POWER;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
- supported |= PHY_MODE_OFF;
-
- mode = efx->phy_mode & supported;
-
- flags = 0;
- if (mode & PHY_MODE_TX_DISABLED)
- flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
- if (mode & PHY_MODE_LOW_POWER)
- flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
- if (mode & PHY_MODE_OFF)
- flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
-
- return flags;
-}
-
-static u8 mcdi_to_ethtool_media(u32 media)
-{
- switch (media) {
- case MC_CMD_MEDIA_XAUI:
- case MC_CMD_MEDIA_CX4:
- case MC_CMD_MEDIA_KX4:
- return PORT_OTHER;
-
- case MC_CMD_MEDIA_XFP:
- case MC_CMD_MEDIA_SFP_PLUS:
- case MC_CMD_MEDIA_QSFP_PLUS:
- return PORT_FIBRE;
-
- case MC_CMD_MEDIA_BASE_T:
- return PORT_TP;
-
- default:
- return PORT_OTHER;
- }
-}
-
-static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl)
-{
- switch (fcntl) {
- case MC_CMD_FCNTL_AUTO:
- WARN_ON(1); /* This is not a link mode */
- link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_BIDIR:
- link_state->fc = EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_RESPOND:
- link_state->fc = EFX_FC_RX;
- break;
- default:
- WARN_ON(1);
- /* Fall through */
- case MC_CMD_FCNTL_OFF:
- link_state->fc = 0;
- break;
- }
-
- link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
- link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- link_state->speed = speed;
-}
-
-/* The semantics of the ethtool FEC mode bitmask are not well defined,
- * particularly the meaning of combinations of bits. Which means we get to
- * define our own semantics, as follows:
- * OFF overrides any other bits, and means "disable all FEC" (with the
- * exception of 25G KR4/CR4, where it is not possible to reject it if AN
- * partner requests it).
- * AUTO on its own means use cable requirements and link partner autoneg with
- * fw-default preferences for the cable type.
- * AUTO and either RS or BASER means use the specified FEC type if cable and
- * link partner support it, otherwise autoneg/fw-default.
- * RS or BASER alone means use the specified FEC type if cable and link partner
- * support it and either requests it, otherwise no FEC.
- * Both RS and BASER (whether AUTO or not) means use FEC if cable and link
- * partner support it, preferring RS to BASER.
- */
-static u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
-{
- u32 ret = 0;
-
- if (ethtool_cap & ETHTOOL_FEC_OFF)
- return 0;
-
- if (ethtool_cap & ETHTOOL_FEC_AUTO)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
- if (ethtool_cap & ETHTOOL_FEC_RS)
- ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
- if (ethtool_cap & ETHTOOL_FEC_BASER)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
- return ret;
-}
-
-/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
- * can never produce, (baser xor rs) and neither req; the implementation below
- * maps both of those to AUTO. This should never matter, and it's not clear
- * what a better mapping would be anyway.
- */
-static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
-{
- bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
- rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
- baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
- : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
- baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
- : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
-
- if (!baser && !rs)
- return ETHTOOL_FEC_OFF;
- return (rs_req ? ETHTOOL_FEC_RS : 0) |
- (baser_req ? ETHTOOL_FEC_BASER : 0) |
- (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
-}
-
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
@@ -527,58 +188,6 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx)
efx->loopback_mode, 0);
}
-/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
- * supported by the link partner. Warn the user if this isn't the case
- */
-static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 rmtadv;
-
- /* The link partner capabilities are only relevant if the
- * link supports flow control autonegotiation */
- if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- return;
-
- /* If flow control autoneg is supported and enabled, then fine */
- if (efx->wanted_fc & EFX_FC_AUTO)
- return;
-
- rmtadv = 0;
- if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- rmtadv |= ADVERTISED_Pause;
- if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- rmtadv |= ADVERTISED_Asym_Pause;
-
- if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
- netif_err(efx, link, efx->net_dev,
- "warning: link partner doesn't support pause frames");
-}
-
-static bool efx_mcdi_phy_poll(struct efx_nic *efx)
-{
- struct efx_link_state old_state = efx->link_state;
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- efx->link_state.up = false;
- else
- efx_mcdi_phy_decode_link(
- efx, &efx->link_state,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
-
- return !efx_link_state_equal(&efx->link_state, &old_state);
-}
-
static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -666,58 +275,6 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return 0;
}
-static int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
- struct ethtool_fecparam *fec)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
- u32 caps, active, speed; /* MCDI format */
- bool is_25g = false;
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
- return -EOPNOTSUPP;
-
- /* behaviour for 25G/50G links depends on 25G BASER bit */
- speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
- is_25g = speed == 25000 || speed == 50000;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
- fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
- /* BASER is never supported on 100G */
- if (speed == 100000)
- fec->fec &= ~ETHTOOL_FEC_BASER;
-
- active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
- switch (active) {
- case MC_CMD_FEC_NONE:
- fec->active_fec = ETHTOOL_FEC_OFF;
- break;
- case MC_CMD_FEC_BASER:
- fec->active_fec = ETHTOOL_FEC_BASER;
- break;
- case MC_CMD_FEC_RS:
- fec->active_fec = ETHTOOL_FEC_RS;
- break;
- default:
- netif_warn(efx, hw, efx->net_dev,
- "Firmware reports unrecognised FEC_TYPE %u\n",
- active);
- /* We don't know what firmware has picked. AUTO is as good a
- * "can't happen" value as any other.
- */
- fec->active_fec = ETHTOOL_FEC_AUTO;
- break;
- }
-
- return 0;
-}
-
static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
const struct ethtool_fecparam *fec)
{
@@ -745,27 +302,6 @@ static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
return 0;
}
-static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
- return -EIO;
- if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
- return -EINVAL;
-
- return 0;
-}
-
static const char *const mcdi_sft9001_cable_diag_names[] = {
"cable.pairA.length",
"cable.pairB.length",
@@ -1139,84 +675,6 @@ u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
return phy_data->supported_cap;
}
-static unsigned int efx_mcdi_event_link_speed[] = {
- [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
- [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
- [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
- [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
- [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
- [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
- [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
-};
-
-void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
-{
- u32 flags, fcntl, speed, lpa;
-
- speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
- EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
- speed = efx_mcdi_event_link_speed[speed];
-
- flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
- fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
- lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
-
- /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
- * which is only run after flushing the event queues. Therefore, it
- * is safe to modify the link state outside of the mac_lock here.
- */
- efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
-
- efx_mcdi_phy_check_fcntl(efx, lpa);
-
- efx_link_status_changed(efx);
-}
-
-int efx_mcdi_set_mac(struct efx_nic *efx)
-{
- u32 fcntl;
- MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
-
- BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
-
- /* This has no effect on EF10 */
- ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
- efx->net_dev->dev_addr);
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
-
- /* Set simple MAC filter for Siena */
- MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
- SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
-
- MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
- SET_MAC_IN_FLAG_INCLUDE_FCS,
- !!(efx->net_dev->features & NETIF_F_RXFCS));
-
- switch (efx->wanted_fc) {
- case EFX_FC_RX | EFX_FC_TX:
- fcntl = MC_CMD_FCNTL_BIDIR;
- break;
- case EFX_FC_RX:
- fcntl = MC_CMD_FCNTL_RESPOND;
- break;
- default:
- fcntl = MC_CMD_FCNTL_OFF;
- break;
- }
- if (efx->wanted_fc & EFX_FC_AUTO)
- fcntl = MC_CMD_FCNTL_AUTO;
- if (efx->fc_disable)
- fcntl = MC_CMD_FCNTL_OFF;
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
- NULL, 0, NULL);
-}
-
bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
@@ -1348,17 +806,3 @@ void efx_mcdi_port_remove(struct efx_nic *efx)
efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
-
-/* Get physical port number (EF10 only; on Siena it is same as PF number) */
-int efx_mcdi_port_get_number(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- return rc;
-
- return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
-}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
new file mode 100644
index 000000000000..a6a072ba46d3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "mcdi_port_common.h"
+#include "efx_common.h"
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
+ cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
+ cfg->supported_cap =
+ MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+ cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
+ cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
+ cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
+ memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
+ sizeof(cfg->name));
+ cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
+ memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
+ sizeof(cfg->revision));
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
+{
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode, u32 loopback_speed)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
+{
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ switch (media) {
+ case MC_CMD_MEDIA_KX4:
+ SET_BIT(Backplane);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseKX_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseKX4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(40000baseKR4_Full);
+ break;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ SET_BIT(FIBRE);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(40000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(100000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(25000baseCR_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
+ break;
+
+ case MC_CMD_MEDIA_BASE_T:
+ SET_BIT(TP);
+ if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ SET_BIT(10baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ SET_BIT(10baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ SET_BIT(100baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ SET_BIT(100baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ SET_BIT(1000baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseT_Full);
+ break;
+ }
+
+ if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ SET_BIT(Pause);
+ if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ SET_BIT(Asym_Pause);
+ if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ SET_BIT(Autoneg);
+
+ #undef SET_BIT
+}
+
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
+{
+ u32 result = 0;
+
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
+ if (TEST_BIT(10baseT_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
+ if (TEST_BIT(100baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
+ if (TEST_BIT(100baseT_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
+ if (TEST_BIT(1000baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
+ result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
+ if (TEST_BIT(100000baseCR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
+ result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
+ if (TEST_BIT(Asym_Pause))
+ result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
+ if (TEST_BIT(Autoneg))
+ result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+
+ #undef TEST_BIT
+
+ return result;
+}
+
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ enum efx_phy_mode mode, supported;
+ u32 flags;
+
+ /* TODO: Advertise the capabilities supported by this PHY */
+ supported = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
+ supported |= PHY_MODE_TX_DISABLED;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
+ supported |= PHY_MODE_LOW_POWER;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
+ supported |= PHY_MODE_OFF;
+
+ mode = efx->phy_mode & supported;
+
+ flags = 0;
+ if (mode & PHY_MODE_TX_DISABLED)
+ flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
+ if (mode & PHY_MODE_LOW_POWER)
+ flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
+ if (mode & PHY_MODE_OFF)
+ flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
+
+ return flags;
+}
+
+u8 mcdi_to_ethtool_media(u32 media)
+{
+ switch (media) {
+ case MC_CMD_MEDIA_XAUI:
+ case MC_CMD_MEDIA_CX4:
+ case MC_CMD_MEDIA_KX4:
+ return PORT_OTHER;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ return PORT_FIBRE;
+
+ case MC_CMD_MEDIA_BASE_T:
+ return PORT_TP;
+
+ default:
+ return PORT_OTHER;
+ }
+}
+
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ /* Fall through */
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
+/* The semantics of the ethtool FEC mode bitmask are not well defined,
+ * particularly the meaning of combinations of bits. Which means we get to
+ * define our own semantics, as follows:
+ * OFF overrides any other bits, and means "disable all FEC" (with the
+ * exception of 25G KR4/CR4, where it is not possible to reject it if AN
+ * partner requests it).
+ * AUTO on its own means use cable requirements and link partner autoneg with
+ * fw-default preferences for the cable type.
+ * AUTO and either RS or BASER means use the specified FEC type if cable and
+ * link partner support it, otherwise autoneg/fw-default.
+ * RS or BASER alone means use the specified FEC type if cable and link partner
+ * support it and either requests it, otherwise no FEC.
+ * Both RS and BASER (whether AUTO or not) means use FEC if cable and link
+ * partner support it, preferring RS to BASER.
+ */
+u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
+{
+ u32 ret = 0;
+
+ if (ethtool_cap & ETHTOOL_FEC_OFF)
+ return 0;
+
+ if (ethtool_cap & ETHTOOL_FEC_AUTO)
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_RS)
+ ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_BASER)
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+ return ret;
+}
+
+/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
+ * can never produce, (baser xor rs) and neither req; the implementation below
+ * maps both of those to AUTO. This should never matter, and it's not clear
+ * what a better mapping would be anyway.
+ */
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
+{
+ bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
+ rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
+ baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
+ : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
+ baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
+ : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+
+ if (!baser && !rs)
+ return ETHTOOL_FEC_OFF;
+ return (rs_req ? ETHTOOL_FEC_RS : 0) |
+ (baser_req ? ETHTOOL_FEC_BASER : 0) |
+ (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
+}
+
+/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
+ * supported by the link partner. Warn the user if this isn't the case
+ */
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 rmtadv;
+
+ /* The link partner capabilities are only relevant if the
+ * link supports flow control autonegotiation
+ */
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ return;
+
+ /* If flow control autoneg is supported and enabled, then fine */
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ return;
+
+ rmtadv = 0;
+ if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ rmtadv |= ADVERTISED_Pause;
+ if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ rmtadv |= ADVERTISED_Asym_Pause;
+
+ if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
+ netif_err(efx, link, efx->net_dev,
+ "warning: link partner doesn't support pause frames");
+}
+
+bool efx_mcdi_phy_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ efx->link_state.up = false;
+ else
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
+ u32 caps, active, speed; /* MCDI format */
+ bool is_25g = false;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
+ return -EOPNOTSUPP;
+
+ /* behaviour for 25G/50G links depends on 25G BASER bit */
+ speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
+ is_25g = speed == 25000 || speed == 50000;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
+ fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
+ /* BASER is never supported on 100G */
+ if (speed == 100000)
+ fec->fec &= ~ETHTOOL_FEC_BASER;
+
+ active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
+ switch (active) {
+ case MC_CMD_FEC_NONE:
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ case MC_CMD_FEC_BASER:
+ fec->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ case MC_CMD_FEC_RS:
+ fec->active_fec = ETHTOOL_FEC_RS;
+ break;
+ default:
+ netif_warn(efx, hw, efx->net_dev,
+ "Firmware reports unrecognised FEC_TYPE %u\n",
+ active);
+ /* We don't know what firmware has picked. AUTO is as good a
+ * "can't happen" value as any other.
+ */
+ fec->active_fec = ETHTOOL_FEC_AUTO;
+ break;
+ }
+
+ return 0;
+}
+
+int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ /* This has no effect on EF10 */
+ ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS,
+ !!(efx->net_dev->features & NETIF_F_RXFCS));
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h
new file mode 100644
index 000000000000..b16f11265269
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_PORT_COMMON_H
+#define EFX_MCDI_PORT_COMMON_H
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+struct efx_mcdi_phy_data {
+ u32 flags;
+ u32 type;
+ u32 supported_cap;
+ u32 channel;
+ u32 port;
+ u32 stats_mask;
+ u8 name[20];
+ u32 media;
+ u32 mmd_mask;
+ u8 revision[20];
+ u32 forced_cap;
+};
+
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode, u32 loopback_speed);
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes);
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset);
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset);
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx);
+u8 mcdi_to_ethtool_media(u32 media);
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl);
+u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap);
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
+bool efx_mcdi_phy_poll(struct efx_nic *efx);
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
+ struct ethtool_fecparam *fec);
+int efx_mcdi_phy_test_alive(struct efx_nic *efx);
+int efx_mcdi_set_mac(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index dfd5182d9e47..9f9886f222c8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -24,7 +24,6 @@
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/vmalloc.h>
-#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <net/busy_poll.h>
#include <net/xdp.h>
@@ -139,6 +138,8 @@ struct efx_special_buffer {
* freed when descriptor completes
* @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
* member is the associated buffer to drop a page reference on.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option
+ * descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -153,7 +154,7 @@ struct efx_tx_buffer {
struct xdp_frame *xdpf;
};
union {
- efx_qword_t option;
+ efx_qword_t option; /* EF10 */
dma_addr_t dma_addr;
};
unsigned short flags;
@@ -743,13 +744,13 @@ union efx_multicast_hash {
struct vfdi_status;
/* The reserved RSS context value */
-#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
* struct efx_rss_context - A user-defined RSS context for filtering
* @list: node of linked list on which this struct is stored
* @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
- * %EFX_EF10_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_EF10_RSS_CONTEXT_INVALID.
+ * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
+ * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
* @user_id: the rss_context ID exposed to userspace over ethtool.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @rx_hash_key: Toeplitz hash key for this RSS context
@@ -1611,6 +1612,15 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1f7c5717de75..6670fda8f35a 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -9,9 +9,9 @@
#define EFX_NIC_H
#include <linux/net_tstamp.h>
-#include <linux/i2c-algo-bit.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_common.h"
#include "mcdi.h"
enum {
@@ -506,6 +506,9 @@ static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
tx_queue->efx->type->tx_write(tx_queue);
}
+int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped);
+
/* RX data path */
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
@@ -554,6 +557,7 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
+
void efx_nic_event_test_start(struct efx_channel *channel);
/* Falcon/Siena queue operations */
@@ -671,6 +675,7 @@ struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
+
int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index c29bf862a94c..a2042f16babc 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -21,6 +21,7 @@
#include <linux/bpf_trace.h>
#include "net_driver.h"
#include "efx.h"
+#include "rx_common.h"
#include "filter.h"
#include "nic.h"
#include "selftest.h"
@@ -32,60 +33,13 @@
/* Maximum rx prefix used by any architecture. */
#define EFX_MAX_RX_PREFIX_SIZE 16
-/* Number of RX buffers to recycle pages for. When creating the RX page recycle
- * ring, this number is divided by the number of buffers per page to calculate
- * the number of pages to store in the RX page recycle ring.
- */
-#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
-#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
-
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 128u
-/* This is the percentage fill level below which new RX descriptors
- * will be added to the RX descriptor ring.
- */
-static unsigned int rx_refill_threshold;
-
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
-/*
- * RX maximum head room required.
- *
- * This must be at least 1 to prevent overflow, plus one packet-worth
- * to allow pipelined receives.
- */
-#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
-
-static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
-{
- return page_address(buf->page) + buf->page_offset;
-}
-
-static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
-{
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
-#else
- const u8 *data = eh + efx->rx_packet_hash_offset;
- return (u32)data[0] |
- (u32)data[1] << 8 |
- (u32)data[2] << 16 |
- (u32)data[3] << 24;
-#endif
-}
-
-static inline struct efx_rx_buffer *
-efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
-{
- if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
- return efx_rx_buffer(rx_queue, 0);
- else
- return rx_buf + 1;
-}
-
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf,
unsigned int len)
@@ -94,301 +48,6 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
DMA_FROM_DEVICE);
}
-void efx_rx_config_page_split(struct efx_nic *efx)
-{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
- XDP_PACKET_HEADROOM,
- EFX_RX_BUF_ALIGNMENT);
- efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
- ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
- efx->rx_page_buf_step);
- efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
- efx->rx_bufs_per_page;
- efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
- efx->rx_bufs_per_page);
-}
-
-/* Check the RX page recycle ring for a page that can be reused. */
-static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- struct page *page;
- struct efx_rx_page_state *state;
- unsigned index;
-
- index = rx_queue->page_remove & rx_queue->page_ptr_mask;
- page = rx_queue->page_ring[index];
- if (page == NULL)
- return NULL;
-
- rx_queue->page_ring[index] = NULL;
- /* page_remove cannot exceed page_add. */
- if (rx_queue->page_remove != rx_queue->page_add)
- ++rx_queue->page_remove;
-
- /* If page_count is 1 then we hold the only reference to this page. */
- if (page_count(page) == 1) {
- ++rx_queue->page_recycle_count;
- return page;
- } else {
- state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- put_page(page);
- ++rx_queue->page_recycle_failed;
- }
-
- return NULL;
-}
-
-/**
- * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
- *
- * @rx_queue: Efx RX queue
- *
- * This allocates a batch of pages, maps them for DMA, and populates
- * struct efx_rx_buffers for each one. Return a negative error code or
- * 0 on success. If a single page can be used for multiple buffers,
- * then the page will either be inserted fully, or not at all.
- */
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
-{
- struct efx_nic *efx = rx_queue->efx;
- struct efx_rx_buffer *rx_buf;
- struct page *page;
- unsigned int page_offset;
- struct efx_rx_page_state *state;
- dma_addr_t dma_addr;
- unsigned index, count;
-
- count = 0;
- do {
- page = efx_reuse_page(rx_queue);
- if (page == NULL) {
- page = alloc_pages(__GFP_COMP |
- (atomic ? GFP_ATOMIC : GFP_KERNEL),
- efx->rx_buffer_order);
- if (unlikely(page == NULL))
- return -ENOMEM;
- dma_addr =
- dma_map_page(&efx->pci_dev->dev, page, 0,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
- dma_addr))) {
- __free_pages(page, efx->rx_buffer_order);
- return -EIO;
- }
- state = page_address(page);
- state->dma_addr = dma_addr;
- } else {
- state = page_address(page);
- dma_addr = state->dma_addr;
- }
-
- dma_addr += sizeof(struct efx_rx_page_state);
- page_offset = sizeof(struct efx_rx_page_state);
-
- do {
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
- rx_buf->page = page;
- rx_buf->page_offset = page_offset + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
- rx_buf->len = efx->rx_dma_len;
- rx_buf->flags = 0;
- ++rx_queue->added_count;
- get_page(page);
- dma_addr += efx->rx_page_buf_step;
- page_offset += efx->rx_page_buf_step;
- } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
-
- rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
- } while (++count < efx->rx_pages_per_batch);
-
- return 0;
-}
-
-/* Unmap a DMA-mapped page. This function is only called for the final RX
- * buffer in a page.
- */
-static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
-{
- struct page *page = rx_buf->page;
-
- if (page) {
- struct efx_rx_page_state *state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev,
- state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- }
-}
-
-static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- unsigned int num_bufs)
-{
- do {
- if (rx_buf->page) {
- put_page(rx_buf->page);
- rx_buf->page = NULL;
- }
- rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
- } while (--num_bufs);
-}
-
-/* Attempt to recycle the page if there is an RX recycle ring; the page can
- * only be added if this is the final RX buffer, to prevent pages being used in
- * the descriptor ring and appearing in the recycle ring simultaneously.
- */
-static void efx_recycle_rx_page(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
-{
- struct page *page = rx_buf->page;
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- struct efx_nic *efx = rx_queue->efx;
- unsigned index;
-
- /* Only recycle the page after processing the final buffer. */
- if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
- return;
-
- index = rx_queue->page_add & rx_queue->page_ptr_mask;
- if (rx_queue->page_ring[index] == NULL) {
- unsigned read_index = rx_queue->page_remove &
- rx_queue->page_ptr_mask;
-
- /* The next slot in the recycle ring is available, but
- * increment page_remove if the read pointer currently
- * points here.
- */
- if (read_index == index)
- ++rx_queue->page_remove;
- rx_queue->page_ring[index] = page;
- ++rx_queue->page_add;
- return;
- }
- ++rx_queue->page_recycle_full;
- efx_unmap_rx_buffer(efx, rx_buf);
- put_page(rx_buf->page);
-}
-
-static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
-{
- /* Release the page reference we hold for the buffer. */
- if (rx_buf->page)
- put_page(rx_buf->page);
-
- /* If this is the last buffer in a page, unmap and free it. */
- if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
- }
- rx_buf->page = NULL;
-}
-
-/* Recycle the pages that are used by buffers that have just been received. */
-static void efx_recycle_rx_pages(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
-{
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
-
- do {
- efx_recycle_rx_page(channel, rx_buf);
- rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
- } while (--n_frags);
-}
-
-static void efx_discard_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
-{
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
-
- efx_recycle_rx_pages(channel, rx_buf, n_frags);
-
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
-}
-
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue: RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@max_fill. If there is insufficient atomic
- * memory to do so, a slow fill will be scheduled.
- *
- * The caller must provide serialisation (none is used here). In practise,
- * this means this function must run from the NAPI handler, or be called
- * when NAPI is disabled.
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int fill_level, batch_size;
- int space, rc = 0;
-
- if (!rx_queue->refill_enabled)
- return;
-
- /* Calculate current fill level, and exit if we don't need to fill */
- fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
- if (fill_level >= rx_queue->fast_fill_trigger)
- goto out;
-
- /* Record minimum fill level */
- if (unlikely(fill_level < rx_queue->min_fill)) {
- if (fill_level)
- rx_queue->min_fill = fill_level;
- }
-
- batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
- space = rx_queue->max_fill - fill_level;
- EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
-
- netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
- "RX queue %d fast-filling descriptor ring from"
- " level %d to level %d\n",
- efx_rx_queue_index(rx_queue), fill_level,
- rx_queue->max_fill);
-
-
- do {
- rc = efx_init_rx_buffers(rx_queue, atomic);
- if (unlikely(rc)) {
- /* Ensure that we don't leave the rx queue empty */
- efx_schedule_slow_fill(rx_queue);
- goto out;
- }
- } while ((space -= batch_size) >= batch_size);
-
- netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
- "RX queue %d fast-filled descriptor ring "
- "to level %d\n", efx_rx_queue_index(rx_queue),
- rx_queue->added_count - rx_queue->removed_count);
-
- out:
- if (rx_queue->notified_count != rx_queue->added_count)
- efx_nic_notify_rx_desc(rx_queue);
-}
-
-void efx_rx_slow_fill(struct timer_list *t)
-{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
-
- /* Post an event to cause NAPI to run and refill the queue */
- efx_nic_generate_fill_event(rx_queue);
- ++rx_queue->slow_fill_count;
-}
-
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
int len)
@@ -412,53 +71,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
-/* Pass a received packet up through GRO. GRO can handle pages
- * regardless of checksum state and skbs with a good checksum.
- */
-static void
-efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
- unsigned int n_frags, u8 *eh)
-{
- struct napi_struct *napi = &channel->napi_str;
- struct efx_nic *efx = channel->efx;
- struct sk_buff *skb;
-
- skb = napi_get_frags(napi);
- if (unlikely(!skb)) {
- struct efx_rx_queue *rx_queue;
-
- rx_queue = efx_channel_get_rx_queue(channel);
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
- return;
- }
-
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
- PKT_HASH_TYPE_L3);
- skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
- CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
- skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
-
- for (;;) {
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buf->page, rx_buf->page_offset,
- rx_buf->len);
- rx_buf->page = NULL;
- skb->len += rx_buf->len;
- if (skb_shinfo(skb)->nr_frags == n_frags)
- break;
-
- rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
- }
-
- skb->data_len = skb->len;
- skb->truesize += n_frags * efx->rx_buffer_truesize;
-
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
-
- napi_gro_frags(napi);
-}
-
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
@@ -805,174 +417,6 @@ out:
channel->rx_pkt_n_frags = 0;
}
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int entries;
- int rc;
-
- /* Create the smallest power-of-two aligned ring */
- entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
- rx_queue->ptr_mask = entries - 1;
-
- netif_dbg(efx, probe, efx->net_dev,
- "creating RX queue %d size %#x mask %#x\n",
- efx_rx_queue_index(rx_queue), efx->rxq_entries,
- rx_queue->ptr_mask);
-
- /* Allocate RX buffers */
- rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
- GFP_KERNEL);
- if (!rx_queue->buffer)
- return -ENOMEM;
-
- rc = efx_nic_probe_rx(rx_queue);
- if (rc) {
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
- }
-
- return rc;
-}
-
-static void efx_init_rx_recycle_ring(struct efx_nic *efx,
- struct efx_rx_queue *rx_queue)
-{
- unsigned int bufs_in_recycle_ring, page_ring_size;
-
- /* Set the RX recycle ring size */
-#ifdef CONFIG_PPC64
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
-#else
- if (iommu_present(&pci_bus_type))
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
- else
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
-#endif /* CONFIG_PPC64 */
-
- page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
- efx->rx_bufs_per_page);
- rx_queue->page_ring = kcalloc(page_ring_size,
- sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
-}
-
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int max_fill, trigger, max_trigger;
- int rc = 0;
-
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- /* Initialise ptr fields */
- rx_queue->added_count = 0;
- rx_queue->notified_count = 0;
- rx_queue->removed_count = 0;
- rx_queue->min_fill = -1U;
- efx_init_rx_recycle_ring(efx, rx_queue);
-
- rx_queue->page_remove = 0;
- rx_queue->page_add = rx_queue->page_ptr_mask + 1;
- rx_queue->page_recycle_count = 0;
- rx_queue->page_recycle_failed = 0;
- rx_queue->page_recycle_full = 0;
-
- /* Initialise limit fields */
- max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
- max_trigger =
- max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
- if (rx_refill_threshold != 0) {
- trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
- if (trigger > max_trigger)
- trigger = max_trigger;
- } else {
- trigger = max_trigger;
- }
-
- rx_queue->max_fill = max_fill;
- rx_queue->fast_fill_trigger = trigger;
- rx_queue->refill_enabled = true;
-
- /* Initialise XDP queue information */
- rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
- rx_queue->core_index);
-
- if (rc) {
- netif_err(efx, rx_err, efx->net_dev,
- "Failure to initialise XDP queue information rc=%d\n",
- rc);
- efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
- }
-
- /* Set up RX descriptor ring */
- efx_nic_init_rx(rx_queue);
-}
-
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
-{
- int i;
- struct efx_nic *efx = rx_queue->efx;
- struct efx_rx_buffer *rx_buf;
-
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- del_timer_sync(&rx_queue->slow_fill);
-
- /* Release RX buffers from the current read ptr to the write ptr */
- if (rx_queue->buffer) {
- for (i = rx_queue->removed_count; i < rx_queue->added_count;
- i++) {
- unsigned index = i & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- efx_fini_rx_buffer(rx_queue, rx_buf);
- }
- }
-
- /* Unmap and release the pages in the recycle ring. Remove the ring. */
- for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
- struct page *page = rx_queue->page_ring[i];
- struct efx_rx_page_state *state;
-
- if (page == NULL)
- continue;
-
- state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- put_page(page);
- }
- kfree(rx_queue->page_ring);
- rx_queue->page_ring = NULL;
-
- if (rx_queue->xdp_rxq_info_valid)
- xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
-}
-
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
-{
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- efx_nic_remove_rx(rx_queue);
-
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
-}
-
-
-module_param(rx_refill_threshold, uint, 0444);
-MODULE_PARM_DESC(rx_refill_threshold,
- "RX descriptor ring refill threshold (%)");
-
#ifdef CONFIG_RFS_ACCEL
static void efx_filter_rfs_work(struct work_struct *data)
@@ -1206,37 +650,3 @@ bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
}
#endif /* CONFIG_RFS_ACCEL */
-
-/**
- * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
- * @spec: Specification to test
- *
- * Return: %true if the specification is a non-drop RX filter that
- * matches a local MAC address I/G bit value of 1 or matches a local
- * IPv4 or IPv6 address value in the respective multicast address
- * range. Otherwise %false.
- */
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
-{
- if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
- return false;
-
- if (spec->match_flags &
- (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
- is_multicast_ether_addr(spec->loc_mac))
- return true;
-
- if ((spec->match_flags &
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
- if (spec->ether_type == htons(ETH_P_IP) &&
- ipv4_is_multicast(spec->loc_host[0]))
- return true;
- if (spec->ether_type == htons(ETH_P_IPV6) &&
- ((const u8 *)spec->loc_host)[0] == 0xff)
- return true;
- }
-
- return false;
-}
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
new file mode 100644
index 000000000000..ee8beb87bdc1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/iommu.h>
+#include "efx.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold;
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+ "RX descriptor ring refill threshold (%)");
+
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
+#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
+
+/* RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ unsigned int index;
+ struct page *page;
+
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
+
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
+ }
+
+ return NULL;
+}
+
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = rx_queue->efx;
+ struct page *page = rx_buf->page;
+ unsigned int index;
+
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
+ return;
+
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned int read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
+
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ efx_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
+}
+
+/* Recycle the pages that are used by buffers that have just been received. */
+void efx_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ do {
+ efx_recycle_rx_page(channel, rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
+}
+
+void efx_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+}
+
+static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+#else
+ if (iommu_present(&pci_bus_type))
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+ else
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
+static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ int i;
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
+}
+
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ }
+ rx_buf->page = NULL;
+}
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
+
+ /* Allocate RX buffers */
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
+ if (!rx_queue->buffer)
+ return -ENOMEM;
+
+ rc = efx_nic_probe_rx(rx_queue);
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
+
+ return rc;
+}
+
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ unsigned int max_fill, trigger, max_trigger;
+ struct efx_nic *efx = rx_queue->efx;
+ int rc = 0;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ /* Initialise ptr fields */
+ rx_queue->added_count = 0;
+ rx_queue->notified_count = 0;
+ rx_queue->removed_count = 0;
+ rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
+
+ /* Initialise limit fields */
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ if (rx_refill_threshold != 0) {
+ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+ if (trigger > max_trigger)
+ trigger = max_trigger;
+ } else {
+ trigger = max_trigger;
+ }
+
+ rx_queue->max_fill = max_fill;
+ rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
+
+ /* Initialise XDP queue information */
+ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+ rx_queue->core_index);
+
+ if (rc) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "Failure to initialise XDP queue information rc=%d\n",
+ rc);
+ efx->xdp_rxq_info_failed = true;
+ } else {
+ rx_queue->xdp_rxq_info_valid = true;
+ }
+
+ /* Set up RX descriptor ring */
+ efx_nic_init_rx(rx_queue);
+}
+
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_rx_buffer *rx_buf;
+ int i;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ del_timer_sync(&rx_queue->slow_fill);
+
+ /* Release RX buffers from the current read ptr to the write ptr */
+ if (rx_queue->buffer) {
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned int index = i & rx_queue->ptr_mask;
+
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_fini_rx_buffer(rx_queue, rx_buf);
+ }
+ }
+
+ efx_fini_rx_recycle_ring(rx_queue);
+
+ if (rx_queue->xdp_rxq_info_valid)
+ xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+ rx_queue->xdp_rxq_info_valid = false;
+}
+
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ efx_nic_remove_rx(rx_queue);
+
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+}
+
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
+void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ }
+}
+
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
+{
+ do {
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ }
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--num_bufs);
+}
+
+void efx_rx_slow_fill(struct timer_list *t)
+{
+ struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(rx_queue);
+ ++rx_queue->slow_fill_count;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
+}
+
+/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
+ */
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ unsigned int page_offset, index, count;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ struct efx_rx_buffer *rx_buf;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
+ }
+
+ dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
+
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+ get_page(page);
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
+
+ return 0;
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM,
+ EFX_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue: RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@max_fill. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
+ int space, rc = 0;
+
+ if (!rx_queue->refill_enabled)
+ return;
+
+ /* Calculate current fill level, and exit if we don't need to fill */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ if (fill_level >= rx_queue->fast_fill_trigger)
+ goto out;
+
+ /* Record minimum fill level */
+ if (unlikely(fill_level < rx_queue->min_fill)) {
+ if (fill_level)
+ rx_queue->min_fill = fill_level;
+ }
+
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ space = rx_queue->max_fill - fill_level;
+ EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d\n",
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->max_fill);
+
+ do {
+ rc = efx_init_rx_buffers(rx_queue, atomic);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
+ }
+ } while ((space -= batch_size) >= batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", efx_rx_queue_index(rx_queue),
+ rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
+}
+
+/* Pass a received packet up through GRO. GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
+ */
+void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh)
+{
+ struct napi_struct *napi = &channel->napi_str;
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ napi_gro_frags(napi);
+}
+
+/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
+ * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
+ */
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+{
+ struct list_head *head = &efx->rss_context.list;
+ struct efx_rss_context *ctx, *new;
+ u32 id = 1; /* Don't use zero, that refers to the master RSS context */
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ /* Search for first gap in the numbering */
+ list_for_each_entry(ctx, head, list) {
+ if (ctx->user_id != id)
+ break;
+ id++;
+ /* Check for wrap. If this happens, we have nearly 2^32
+ * allocated RSS contexts, which seems unlikely.
+ */
+ if (WARN_ON_ONCE(!id))
+ return NULL;
+ }
+
+ /* Create the new entry */
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+ new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ new->rx_hash_udp_4tuple = false;
+
+ /* Insert the new entry into the gap */
+ new->user_id = id;
+ list_add_tail(&new->list, &ctx->list);
+ return new;
+}
+
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
+{
+ struct list_head *head = &efx->rss_context.list;
+ struct efx_rss_context *ctx;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ list_for_each_entry(ctx, head, list)
+ if (ctx->user_id == id)
+ return ctx;
+ return NULL;
+}
+
+void efx_free_rss_context_entry(struct efx_rss_context *ctx)
+{
+ list_del(&ctx->list);
+ kfree(ctx);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
+ ctx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+}
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force)
+{
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
+ /* ARFS is currently updating this entry, leave it */
+ return false;
+ }
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
+ /* ARFS tried and failed to update this, so it's probably out
+ * of date. Remove the filter and the ARFS rule entry.
+ */
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+ *force = true;
+ return true;
+ } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
+ /* ARFS has moved on, so old filter is not needed. Since we did
+ * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
+ * not be removed by efx_rps_hash_del() subsequently.
+ */
+ *force = true;
+ return true;
+ }
+ /* Remove it iff ARFS wants to. */
+ return true;
+}
+
+static
+struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ u32 hash = efx_filter_spec_hash(spec);
+
+ lockdep_assert_held(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table)
+ return NULL;
+ return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
+}
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec))
+ return rule;
+ }
+ return NULL;
+}
+
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ *new = false;
+ return rule;
+ }
+ }
+ rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
+ *new = true;
+ if (rule) {
+ memcpy(&rule->spec, spec, sizeof(rule->spec));
+ hlist_add_head(&rule->node, head);
+ }
+ return rule;
+}
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (WARN_ON(!head))
+ return;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ /* Someone already reused the entry. We know that if
+ * this check doesn't fire (i.e. filter_id == REMOVING)
+ * then the REMOVING mark was put there by our caller,
+ * because caller is holding a lock on filter table and
+ * only holders of that lock set REMOVING.
+ */
+ if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
+ return;
+ hlist_del(node);
+ kfree(rule);
+ return;
+ }
+ }
+ /* We didn't find it. */
+ WARN_ON(1);
+}
+#endif
+
+int efx_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ init_rwsem(&efx->filter_sem);
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ goto out_unlock;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ channel->rfs_expire_index = 0;
+ channel->rfs_filter_count = 0;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
+ efx->type->filter_table_remove(efx);
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+#endif
+out_unlock:
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ cancel_delayed_work_sync(&channel->filter_work);
+ kfree(channel->rps_flow_id);
+ }
+#endif
+ down_write(&efx->filter_sem);
+ efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
new file mode 100644
index 000000000000..c41f12a89477
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_RX_COMMON_H
+#define EFX_RX_COMMON_H
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
+{
+ return page_address(buf->page) + buf->page_offset;
+}
+
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_hash_offset;
+
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void efx_rx_slow_fill(struct timer_list *t);
+
+void efx_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+void efx_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
+
+void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct page *page,
+ unsigned int page_offset,
+ u16 flags);
+void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs);
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_rx_config_page_split(struct efx_nic *efx);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
+
+void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh);
+
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
+void efx_free_rss_context_entry(struct efx_rss_context *ctx);
+void efx_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right);
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force);
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec);
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new);
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
+#endif
+
+int efx_probe_filters(struct efx_nic *efx);
+void efx_remove_filters(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 8474cf8ea7d3..1ae369022d7d 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
@@ -783,7 +785,7 @@ void efx_selftest_async_cancel(struct efx_nic *efx)
cancel_delayed_work_sync(&efx->selftest_work);
}
-void efx_selftest_async_work(struct work_struct *data)
+static void efx_selftest_async_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
selftest_work.work);
@@ -802,3 +804,8 @@ void efx_selftest_async_work(struct work_struct *data)
channel->channel, cpu);
}
}
+
+void efx_selftest_async_init(struct efx_nic *efx)
+{
+ INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
+}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a3553816d92c..ca88ebb4f6b1 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -45,8 +45,8 @@ void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
int pkt_len);
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned flags);
+void efx_selftest_async_init(struct efx_nic *efx);
void efx_selftest_async_start(struct efx_nic *efx);
void efx_selftest_async_cancel(struct efx_nic *efx);
-void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 81499244a4b4..baa464161626 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -14,12 +14,14 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
+#include "efx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
#include "selftest.h"
#include "siena_sriov.h"
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index dfbdf05dcf79..83dcfcae3d4b 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_channels.h"
#include "nic.h"
#include "io.h"
#include "mcdi.h"
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 00c1c4402451..04d7f41d7ed9 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -20,6 +20,7 @@
#include "io.h"
#include "nic.h"
#include "tx.h"
+#include "tx_common.h"
#include "workarounds.h"
#include "ef10_regs.h"
@@ -56,72 +57,6 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
return efx_tx_get_copy_buffer(tx_queue, buffer);
}
-static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- if (buffer->unmap_len) {
- struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
- dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
- if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
- dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- buffer->unmap_len = 0;
- }
-
- if (buffer->flags & EFX_TX_BUF_SKB) {
- struct sk_buff *skb = (struct sk_buff *)buffer->skb;
-
- EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
- (*pkts_compl)++;
- (*bytes_compl) += skb->len;
- if (tx_queue->timestamping &&
- (tx_queue->completed_timestamp_major ||
- tx_queue->completed_timestamp_minor)) {
- struct skb_shared_hwtstamps hwtstamp;
-
- hwtstamp.hwtstamp =
- efx_ptp_nic_to_kernel_time(tx_queue);
- skb_tstamp_tx(skb, &hwtstamp);
-
- tx_queue->completed_timestamp_major = 0;
- tx_queue->completed_timestamp_minor = 0;
- }
- dev_consume_skb_any((struct sk_buff *)buffer->skb);
- netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
- "TX queue %d transmission id %x complete\n",
- tx_queue->queue, tx_queue->read_count);
- } else if (buffer->flags & EFX_TX_BUF_XDP) {
- xdp_return_frame_rx_napi(buffer->xdpf);
- }
-
- buffer->len = 0;
- buffer->flags = 0;
-}
-
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
-{
- /* Header and payload descriptor for each output segment, plus
- * one for every input fragment boundary within a segment
- */
- unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
-
- /* Possibly one more per segment for option descriptors */
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
- max_descs += EFX_TSO_MAX_SEGS;
-
- /* Possibly more for PCIe page boundaries within input fragments */
- if (PAGE_SIZE > EFX_PAGE_SIZE)
- max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
-
- return max_descs;
-}
-
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider both queues that the net core sees as one */
@@ -333,125 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
}
#endif /* EFX_USE_PIO */
-static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr,
- size_t len)
-{
- const struct efx_nic_type *nic_type = tx_queue->efx->type;
- struct efx_tx_buffer *buffer;
- unsigned int dma_len;
-
- /* Map the fragment taking account of NIC-dependent DMA limits. */
- do {
- buffer = efx_tx_queue_get_insert_buffer(tx_queue);
- dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
-
- buffer->len = dma_len;
- buffer->dma_addr = dma_addr;
- buffer->flags = EFX_TX_BUF_CONT;
- len -= dma_len;
- dma_addr += dma_len;
- ++tx_queue->insert_count;
- } while (len);
-
- return buffer;
-}
-
-/* Map all data from an SKB for DMA and create descriptors on the queue.
- */
-static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
- unsigned int segment_count)
-{
- struct efx_nic *efx = tx_queue->efx;
- struct device *dma_dev = &efx->pci_dev->dev;
- unsigned int frag_index, nr_frags;
- dma_addr_t dma_addr, unmap_addr;
- unsigned short dma_flags;
- size_t len, unmap_len;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- frag_index = 0;
-
- /* Map header data. */
- len = skb_headlen(skb);
- dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
- dma_flags = EFX_TX_BUF_MAP_SINGLE;
- unmap_len = len;
- unmap_addr = dma_addr;
-
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- return -EIO;
-
- if (segment_count) {
- /* For TSO we need to put the header in to a separate
- * descriptor. Map this separately if necessary.
- */
- size_t header_len = skb_transport_header(skb) - skb->data +
- (tcp_hdr(skb)->doff << 2u);
-
- if (header_len != len) {
- tx_queue->tso_long_headers++;
- efx_tx_map_chunk(tx_queue, dma_addr, header_len);
- len -= header_len;
- dma_addr += header_len;
- }
- }
-
- /* Add descriptors for each fragment. */
- do {
- struct efx_tx_buffer *buffer;
- skb_frag_t *fragment;
-
- buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
-
- /* The final descriptor for a fragment is responsible for
- * unmapping the whole fragment.
- */
- buffer->flags = EFX_TX_BUF_CONT | dma_flags;
- buffer->unmap_len = unmap_len;
- buffer->dma_offset = buffer->dma_addr - unmap_addr;
-
- if (frag_index >= nr_frags) {
- /* Store SKB details with the final buffer for
- * the completion.
- */
- buffer->skb = skb;
- buffer->flags = EFX_TX_BUF_SKB | dma_flags;
- return 0;
- }
-
- /* Move on to the next fragment. */
- fragment = &skb_shinfo(skb)->frags[frag_index++];
- len = skb_frag_size(fragment);
- dma_addr = skb_frag_dma_map(dma_dev, fragment,
- 0, len, DMA_TO_DEVICE);
- dma_flags = 0;
- unmap_len = len;
- unmap_addr = dma_addr;
-
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- return -EIO;
- } while (1);
-}
-
-/* Remove buffers put into a tx_queue for the current packet.
- * None of the buffers must have an skb attached.
- */
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
- unsigned int insert_count)
-{
- struct efx_tx_buffer *buffer;
- unsigned int bytes_compl = 0;
- unsigned int pkts_compl = 0;
-
- /* Work backwards until we hit the original insert pointer value */
- while (tx_queue->insert_count != insert_count) {
- --tx_queue->insert_count;
- buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
- }
-}
-
/*
* Fallback to software TSO.
*
@@ -473,12 +289,9 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
dev_consume_skb_any(skb);
skb = segments;
- while (skb) {
- next = skb->next;
- skb->next = NULL;
-
+ skb_list_walk_safe(skb, skb, next) {
+ skb_mark_not_on_list(skb);
efx_enqueue_skb(tx_queue, skb);
- skb = next;
}
return 0;
@@ -687,41 +500,6 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
return i;
}
-/* Remove packets from the TX queue
- *
- * This removes packets from the TX queue, up to and including the
- * specified index.
- */
-static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned int stop_index, read_ptr;
-
- stop_index = (index + 1) & tx_queue->ptr_mask;
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
-
- while (read_ptr != stop_index) {
- struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
-
- if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
- unlikely(buffer->len == 0)) {
- netif_err(efx, tx_err, efx->net_dev,
- "TX queue %d spurious TX completion id %x\n",
- tx_queue->queue, read_ptr);
- efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
- return;
- }
-
- efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-
- ++tx_queue->read_count;
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
- }
-}
-
/* Initiate a packet transmission. We use one channel per CPU
* (sharing when we have more CPUs than channels). On Falcon, the TX
* completion events will be directed back to the CPU that transmitted
@@ -834,173 +612,3 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
net_dev->num_tc = num_tc;
return 0;
}
-
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
-{
- unsigned fill_level;
- struct efx_nic *efx = tx_queue->efx;
- struct efx_tx_queue *txq2;
- unsigned int pkts_compl = 0, bytes_compl = 0;
-
- EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
-
- efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
- tx_queue->pkts_compl += pkts_compl;
- tx_queue->bytes_compl += bytes_compl;
-
- if (pkts_compl > 1)
- ++tx_queue->merge_events;
-
- /* See if we need to restart the netif queue. This memory
- * barrier ensures that we write read_count (inside
- * efx_dequeue_buffers()) before reading the queue status.
- */
- smp_mb();
- if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
- likely(efx->port_enabled) &&
- likely(netif_device_present(efx->net_dev))) {
- txq2 = efx_tx_queue_partner(tx_queue);
- fill_level = max(tx_queue->insert_count - tx_queue->read_count,
- txq2->insert_count - txq2->read_count);
- if (fill_level <= efx->txq_wake_thresh)
- netif_tx_wake_queue(tx_queue->core_txq);
- }
-
- /* Check whether the hardware queue is now empty */
- if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
- if (tx_queue->read_count == tx_queue->old_write_count) {
- smp_mb();
- tx_queue->empty_read_count =
- tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
- }
- }
-}
-
-static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
-{
- return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
-}
-
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned int entries;
- int rc;
-
- /* Create the smallest power-of-two aligned ring */
- entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
- tx_queue->ptr_mask = entries - 1;
-
- netif_dbg(efx, probe, efx->net_dev,
- "creating TX queue %d size %#x mask %#x\n",
- tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
-
- /* Allocate software ring */
- tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
- GFP_KERNEL);
- if (!tx_queue->buffer)
- return -ENOMEM;
-
- tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
- sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
- if (!tx_queue->cb_page) {
- rc = -ENOMEM;
- goto fail1;
- }
-
- /* Allocate hardware ring */
- rc = efx_nic_probe_tx(tx_queue);
- if (rc)
- goto fail2;
-
- return 0;
-
-fail2:
- kfree(tx_queue->cb_page);
- tx_queue->cb_page = NULL;
-fail1:
- kfree(tx_queue->buffer);
- tx_queue->buffer = NULL;
- return rc;
-}
-
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
-
- netif_dbg(efx, drv, efx->net_dev,
- "initialising TX queue %d\n", tx_queue->queue);
-
- tx_queue->insert_count = 0;
- tx_queue->write_count = 0;
- tx_queue->packet_write_count = 0;
- tx_queue->old_write_count = 0;
- tx_queue->read_count = 0;
- tx_queue->old_read_count = 0;
- tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
- tx_queue->xmit_more_available = false;
- tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
- tx_queue->channel == efx_ptp_channel(efx));
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
- tx_queue->completed_timestamp_major = 0;
- tx_queue->completed_timestamp_minor = 0;
-
- tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
-
- /* Set up default function pointers. These may get replaced by
- * efx_nic_init_tx() based off NIC/queue capabilities.
- */
- tx_queue->handle_tso = efx_enqueue_skb_tso;
-
- /* Set up TX descriptor ring */
- efx_nic_init_tx(tx_queue);
-
- tx_queue->initialised = true;
-}
-
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_tx_buffer *buffer;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- if (!tx_queue->buffer)
- return;
-
- /* Free any buffers left in the ring */
- while (tx_queue->read_count != tx_queue->write_count) {
- unsigned int pkts_compl = 0, bytes_compl = 0;
- buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-
- ++tx_queue->read_count;
- }
- tx_queue->xmit_more_available = false;
- netdev_tx_reset_queue(tx_queue->core_txq);
-}
-
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
-{
- int i;
-
- if (!tx_queue->buffer)
- return;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "destroying TX queue %d\n", tx_queue->queue);
- efx_nic_remove_tx(tx_queue);
-
- if (tx_queue->cb_page) {
- for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
- efx_nic_free_buffer(tx_queue->efx,
- &tx_queue->cb_page[i]);
- kfree(tx_queue->cb_page);
- tx_queue->cb_page = NULL;
- }
-
- kfree(tx_queue->buffer);
- tx_queue->buffer = NULL;
-}
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
new file mode 100644
index 000000000000..b1571e9789d0
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "tx_common.h"
+
+static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
+{
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
+ PAGE_SIZE >> EFX_TX_CB_ORDER);
+}
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+ /* Allocate software ring */
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
+ if (!tx_queue->buffer)
+ return -ENOMEM;
+
+ tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
+ sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
+ if (!tx_queue->cb_page) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Allocate hardware ring */
+ rc = efx_nic_probe_tx(tx_queue);
+ if (rc)
+ goto fail2;
+
+ return 0;
+
+fail2:
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+fail1:
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ return rc;
+}
+
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
+
+ tx_queue->insert_count = 0;
+ tx_queue->write_count = 0;
+ tx_queue->packet_write_count = 0;
+ tx_queue->old_write_count = 0;
+ tx_queue->read_count = 0;
+ tx_queue->old_read_count = 0;
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+ tx_queue->xmit_more_available = false;
+ tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+ tx_queue->channel == efx_ptp_channel(efx));
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+
+ tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+
+ /* Set up default function pointers. These may get replaced by
+ * efx_nic_init_tx() based off NIC/queue capabilities.
+ */
+ tx_queue->handle_tso = efx_enqueue_skb_tso;
+
+ /* Set up TX descriptor ring */
+ efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
+}
+
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ }
+ tx_queue->xmit_more_available = false;
+ netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ int i;
+
+ if (!tx_queue->buffer)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
+ efx_nic_remove_tx(tx_queue);
+
+ if (tx_queue->cb_page) {
+ for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
+ efx_nic_free_buffer(tx_queue->efx,
+ &tx_queue->cb_page[i]);
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+ }
+
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+}
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ if (buffer->unmap_len) {
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
+
+ if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ buffer->unmap_len = 0;
+ }
+
+ if (buffer->flags & EFX_TX_BUF_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ if (tx_queue->timestamping &&
+ (tx_queue->completed_timestamp_major ||
+ tx_queue->completed_timestamp_minor)) {
+ struct skb_shared_hwtstamps hwtstamp;
+
+ hwtstamp.hwtstamp =
+ efx_ptp_nic_to_kernel_time(tx_queue);
+ skb_tstamp_tx(skb, &hwtstamp);
+
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+ }
+ dev_consume_skb_any((struct sk_buff *)buffer->skb);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_XDP) {
+ xdp_return_frame_rx_napi(buffer->xdpf);
+ }
+
+ buffer->len = 0;
+ buffer->flags = 0;
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int stop_index, read_ptr;
+
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (read_ptr != stop_index) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %x\n",
+ tx_queue->queue, read_ptr);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+}
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_tx_queue *txq2;
+
+ EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
+
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
+
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
+ /* See if we need to restart the netif queue. This memory
+ * barrier ensures that we write read_count (inside
+ * efx_dequeue_buffers()) before reading the queue status.
+ */
+ smp_mb();
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled) &&
+ likely(netif_device_present(efx->net_dev))) {
+ txq2 = efx_tx_queue_partner(tx_queue);
+ fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+ txq2->insert_count - txq2->read_count);
+ if (fill_level <= efx->txq_wake_thresh)
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+
+ /* Check whether the hardware queue is now empty */
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
+/* Remove buffers put into a tx_queue for the current packet.
+ * None of the buffers must have an skb attached.
+ */
+void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count)
+{
+ struct efx_tx_buffer *buffer;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts_compl = 0;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != insert_count) {
+ --tx_queue->insert_count;
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ }
+}
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len)
+{
+ const struct efx_nic_type *nic_type = tx_queue->efx->type;
+ struct efx_tx_buffer *buffer;
+ unsigned int dma_len;
+
+ /* Map the fragment taking account of NIC-dependent DMA limits. */
+ do {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ buffer->flags = EFX_TX_BUF_CONT;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ return buffer;
+}
+
+/* Map all data from an SKB for DMA and create descriptors on the queue. */
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int frag_index, nr_frags;
+ dma_addr_t dma_addr, unmap_addr;
+ unsigned short dma_flags;
+ size_t len, unmap_len;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag_index = 0;
+
+ /* Map header data. */
+ len = skb_headlen(skb);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
+ dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+
+ if (segment_count) {
+ /* For TSO we need to put the header in to a separate
+ * descriptor. Map this separately if necessary.
+ */
+ size_t header_len = skb_transport_header(skb) - skb->data +
+ (tcp_hdr(skb)->doff << 2u);
+
+ if (header_len != len) {
+ tx_queue->tso_long_headers++;
+ efx_tx_map_chunk(tx_queue, dma_addr, header_len);
+ len -= header_len;
+ dma_addr += header_len;
+ }
+ }
+
+ /* Add descriptors for each fragment. */
+ do {
+ struct efx_tx_buffer *buffer;
+ skb_frag_t *fragment;
+
+ buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+
+ /* The final descriptor for a fragment is responsible for
+ * unmapping the whole fragment.
+ */
+ buffer->flags = EFX_TX_BUF_CONT | dma_flags;
+ buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
+
+ if (frag_index >= nr_frags) {
+ /* Store SKB details with the final buffer for
+ * the completion.
+ */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+ return 0;
+ }
+
+ /* Move on to the next fragment. */
+ fragment = &skb_shinfo(skb)->frags[frag_index++];
+ len = skb_frag_size(fragment);
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
+ DMA_TO_DEVICE);
+ dma_flags = 0;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+ } while (1);
+}
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for option descriptors */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
new file mode 100644
index 000000000000..f92f1fe3a87f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TX_COMMON_H
+#define EFX_TX_COMMON_H
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl);
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+
+void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count);
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len);
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count);
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
index 37f048e1230c..bc26fa0d196f 100644
--- a/drivers/net/ethernet/sgi/Kconfig
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_SGI
bool "SGI devices"
default y
- depends on (PCI && SGI_IP27) || SGI_IP32
+ depends on (PCI && SGI_MFD_IOC3) || SGI_IP32
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,7 +19,8 @@ if NET_VENDOR_SGI
config SGI_IOC3_ETH
bool "SGI IOC3 Ethernet"
- depends on PCI && SGI_IP27
+ depends on PCI && SGI_MFD_IOC3
+ select CRC16
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index d242906ae233..e61eb891c0f7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -14,7 +14,6 @@
* o Use prefetching for large packets. What is a good lower limit for
* prefetching?
* o Use hardware checksums.
- * o Convert to using a IOC3 meta driver.
* o Which PHYs might possibly be attached to the IOC3 in real live,
* which workarounds are required for them? Do we ever have Lucent's?
* o For the 2.5 branch kill the mii-tool ioctls.
@@ -28,7 +27,8 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/crc16.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/in.h>
@@ -37,28 +37,22 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/gfp.h>
-
-#ifdef CONFIG_SERIAL_8250
-#include <linux/serial_core.h>
-#include <linux/serial_8250.h>
-#include <linux/serial_reg.h>
-#endif
-
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/nvmem-consumer.h>
#include <net/ip.h>
-#include <asm/byteorder.h>
-#include <asm/pgtable.h>
-#include <linux/uaccess.h>
-#include <asm/sn/types.h>
#include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h>
+#define CRC16_INIT 0
+#define CRC16_VALID 0xb001
+
/* Number of RX buffers. This is tunable in the range of 16 <= x < 512.
* The value must be a power of two.
*/
@@ -85,7 +79,6 @@
/* Private per NIC data of the driver. */
struct ioc3_private {
struct ioc3_ethregs *regs;
- struct ioc3 *all_regs;
struct device *dma_dev;
u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */
@@ -104,9 +97,6 @@ struct ioc3_private {
spinlock_t ioc3_lock;
struct mii_if_info mii;
- struct net_device *dev;
- struct pci_dev *pdev;
-
/* Members used by autonegotiation */
struct timer_list ioc3_timer;
};
@@ -114,7 +104,7 @@ struct ioc3_private {
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void ioc3_timeout(struct net_device *dev);
+static void ioc3_timeout(struct net_device *dev, unsigned int txqueue);
static inline unsigned int ioc3_hash(const unsigned char *addr);
static void ioc3_start(struct ioc3_private *ip);
static inline void ioc3_stop(struct ioc3_private *ip);
@@ -123,10 +113,8 @@ static int ioc3_alloc_rx_bufs(struct net_device *dev);
static void ioc3_free_rx_bufs(struct ioc3_private *ip);
static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
-static const char ioc3_str[] = "IOC3 Ethernet";
static const struct ethtool_ops ioc3_ethtool_ops;
-
static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
{
return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL);
@@ -179,225 +167,61 @@ static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
#define ERBAR_VAL 0
#endif
-#define IOC3_SIZE 0x100000
-
-static inline u32 mcr_pack(u32 pulse, u32 sample)
-{
- return (pulse << 10) | (sample << 2);
-}
-
-static int nic_wait(u32 __iomem *mcr)
-{
- u32 m;
-
- do {
- m = readl(mcr);
- } while (!(m & 2));
-
- return m & 1;
-}
-
-static int nic_reset(u32 __iomem *mcr)
-{
- int presence;
-
- writel(mcr_pack(500, 65), mcr);
- presence = nic_wait(mcr);
-
- writel(mcr_pack(0, 500), mcr);
- nic_wait(mcr);
-
- return presence;
-}
-
-static inline int nic_read_bit(u32 __iomem *mcr)
-{
- int result;
-
- writel(mcr_pack(6, 13), mcr);
- result = nic_wait(mcr);
- writel(mcr_pack(0, 100), mcr);
- nic_wait(mcr);
-
- return result;
-}
-
-static inline void nic_write_bit(u32 __iomem *mcr, int bit)
+static int ioc3eth_nvmem_match(struct device *dev, const void *data)
{
- if (bit)
- writel(mcr_pack(6, 110), mcr);
- else
- writel(mcr_pack(80, 30), mcr);
+ const char *name = dev_name(dev);
+ const char *prefix = data;
+ int prefix_len;
- nic_wait(mcr);
-}
-
-/* Read a byte from an iButton device
- */
-static u32 nic_read_byte(u32 __iomem *mcr)
-{
- u32 result = 0;
- int i;
+ prefix_len = strlen(prefix);
+ if (strlen(name) < (prefix_len + 3))
+ return 0;
- for (i = 0; i < 8; i++)
- result = (result >> 1) | (nic_read_bit(mcr) << 7);
+ if (memcmp(prefix, name, prefix_len) != 0)
+ return 0;
- return result;
-}
-
-/* Write a byte to an iButton device
- */
-static void nic_write_byte(u32 __iomem *mcr, int byte)
-{
- int i, bit;
-
- for (i = 8; i; i--) {
- bit = byte & 1;
- byte >>= 1;
-
- nic_write_bit(mcr, bit);
- }
-}
-
-static u64 nic_find(u32 __iomem *mcr, int *last)
-{
- int a, b, index, disc;
- u64 address = 0;
-
- nic_reset(mcr);
- /* Search ROM. */
- nic_write_byte(mcr, 0xf0);
-
- /* Algorithm from ``Book of iButton Standards''. */
- for (index = 0, disc = 0; index < 64; index++) {
- a = nic_read_bit(mcr);
- b = nic_read_bit(mcr);
-
- if (a && b) {
- pr_warn("NIC search failed (not fatal).\n");
- *last = 0;
- return 0;
- }
-
- if (!a && !b) {
- if (index == *last) {
- address |= 1UL << index;
- } else if (index > *last) {
- address &= ~(1UL << index);
- disc = index;
- } else if ((address & (1UL << index)) == 0) {
- disc = index;
- }
- nic_write_bit(mcr, address & (1UL << index));
- continue;
- } else {
- if (a)
- address |= 1UL << index;
- else
- address &= ~(1UL << index);
- nic_write_bit(mcr, a);
- continue;
- }
- }
-
- *last = disc;
-
- return address;
-}
-
-static int nic_init(u32 __iomem *mcr)
-{
- const char *unknown = "unknown";
- const char *type = unknown;
- u8 crc;
- u8 serial[6];
- int save = 0, i;
-
- while (1) {
- u64 reg;
-
- reg = nic_find(mcr, &save);
-
- switch (reg & 0xff) {
- case 0x91:
- type = "DS1981U";
- break;
- default:
- if (save == 0) {
- /* Let the caller try again. */
- return -1;
- }
- continue;
- }
-
- nic_reset(mcr);
-
- /* Match ROM. */
- nic_write_byte(mcr, 0x55);
- for (i = 0; i < 8; i++)
- nic_write_byte(mcr, (reg >> (i << 3)) & 0xff);
-
- reg >>= 8; /* Shift out type. */
- for (i = 0; i < 6; i++) {
- serial[i] = reg & 0xff;
- reg >>= 8;
- }
- crc = reg & 0xff;
- break;
- }
-
- pr_info("Found %s NIC", type);
- if (type != unknown)
- pr_cont(" registration number %pM, CRC %02x", serial, crc);
- pr_cont(".\n");
+ /* found nvmem device which is attached to our ioc3
+ * now check for one wire family code 09, 89 and 91
+ */
+ if (memcmp(name + prefix_len, "09-", 3) == 0)
+ return 1;
+ if (memcmp(name + prefix_len, "89-", 3) == 0)
+ return 1;
+ if (memcmp(name + prefix_len, "91-", 3) == 0)
+ return 1;
return 0;
}
-/* Read the NIC (Number-In-a-Can) device used to store the MAC address on
- * SN0 / SN00 nodeboards and PCI cards.
- */
-static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
+static int ioc3eth_get_mac_addr(struct resource *res, u8 mac_addr[6])
{
- u32 __iomem *mcr = &ip->all_regs->mcr;
- int tries = 2; /* There may be some problem with the battery? */
- u8 nic[14];
+ struct nvmem_device *nvmem;
+ char prefix[24];
+ u8 prom[16];
+ int ret;
int i;
- writel(1 << 21, &ip->all_regs->gpcr_s);
+ snprintf(prefix, sizeof(prefix), "ioc3-%012llx-",
+ res->start & ~0xffff);
- while (tries--) {
- if (!nic_init(mcr))
- break;
- udelay(500);
- }
-
- if (tries < 0) {
- pr_err("Failed to read MAC address\n");
- return;
- }
+ nvmem = nvmem_device_find(prefix, ioc3eth_nvmem_match);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
- /* Read Memory. */
- nic_write_byte(mcr, 0xf0);
- nic_write_byte(mcr, 0x00);
- nic_write_byte(mcr, 0x00);
+ ret = nvmem_device_read(nvmem, 0, 16, prom);
+ nvmem_device_put(nvmem);
+ if (ret < 0)
+ return ret;
- for (i = 13; i >= 0; i--)
- nic[i] = nic_read_byte(mcr);
+ /* check, if content is valid */
+ if (prom[0] != 0x0a ||
+ crc16(CRC16_INIT, prom, 13) != CRC16_VALID)
+ return -EINVAL;
- for (i = 2; i < 8; i++)
- ip->dev->dev_addr[i - 2] = nic[i];
-}
-
-/* Ok, this is hosed by design. It's necessary to know what machine the
- * NIC is in in order to know how to read the NIC address. We also have
- * to know if it's a PCI card or a NIC in on the node board ...
- */
-static void ioc3_get_eaddr(struct ioc3_private *ip)
-{
- ioc3_get_eaddr_nic(ip);
+ for (i = 0; i < 6; i++)
+ mac_addr[i] = prom[10 - i];
- pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr);
+ return 0;
}
static void __ioc3_set_mac_address(struct net_device *dev)
@@ -770,7 +594,7 @@ static int ioc3_mii_init(struct ioc3_private *ip)
u16 word;
for (i = 0; i < 32; i++) {
- word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
+ word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
if (word != 0xffff && word != 0x0000) {
found = 1;
@@ -975,12 +799,6 @@ static int ioc3_open(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
- netdev_err(dev, "Can't get irq %d\n", dev->irq);
-
- return -EAGAIN;
- }
-
ip->ehar_h = 0;
ip->ehar_l = 0;
@@ -1013,147 +831,6 @@ static int ioc3_close(struct net_device *dev)
return 0;
}
-/* MENET cards have four IOC3 chips, which are attached to two sets of
- * PCI slot resources each: the primary connections are on slots
- * 0..3 and the secondaries are on 4..7
- *
- * All four ethernets are brought out to connectors; six serial ports
- * (a pair from each of the first three IOC3s) are brought out to
- * MiniDINs; all other subdevices are left swinging in the wind, leave
- * them disabled.
- */
-
-static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
-{
- struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
- int ret = 0;
-
- if (dev) {
- if (dev->vendor == PCI_VENDOR_ID_SGI &&
- dev->device == PCI_DEVICE_ID_SGI_IOC3)
- ret = 1;
- pci_dev_put(dev);
- }
-
- return ret;
-}
-
-static int ioc3_is_menet(struct pci_dev *pdev)
-{
- return !pdev->bus->parent &&
- ioc3_adjacent_is_ioc3(pdev, 0) &&
- ioc3_adjacent_is_ioc3(pdev, 1) &&
- ioc3_adjacent_is_ioc3(pdev, 2);
-}
-
-#ifdef CONFIG_SERIAL_8250
-/* Note about serial ports and consoles:
- * For console output, everyone uses the IOC3 UARTA (offset 0x178)
- * connected to the master node (look in ip27_setup_console() and
- * ip27prom_console_write()).
- *
- * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port
- * addresses on a partitioned machine. Since we currently use the ioc3
- * serial ports, we use dynamic serial port discovery that the serial.c
- * driver uses for pci/pnp ports (there is an entry for the SGI ioc3
- * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater
- * than UARTB's, although UARTA on o200s has traditionally been known as
- * port 0. So, we just use one serial port from each ioc3 (since the
- * serial driver adds addresses to get to higher ports).
- *
- * The first one to do a register_console becomes the preferred console
- * (if there is no kernel command line console= directive). /dev/console
- * (ie 5, 1) is then "aliased" into the device number returned by the
- * "device" routine referred to in this console structure
- * (ip27prom_console_dev).
- *
- * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working
- * around ioc3 oddities in this respect.
- *
- * The IOC3 serials use a 22MHz clock rate with an additional divider which
- * can be programmed in the SCR register if the DLAB bit is set.
- *
- * Register to interrupt zero because we share the interrupt with
- * the serial driver which we don't properly support yet.
- *
- * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
- * registered.
- */
-static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
-{
-#define COSMISC_CONSTANT 6
-
- struct uart_8250_port port = {
- .port = {
- .irq = 0,
- .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
- .iotype = UPIO_MEM,
- .regshift = 0,
- .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
-
- .membase = (unsigned char __iomem *)uart,
- .mapbase = (unsigned long)uart,
- }
- };
- unsigned char lcr;
-
- lcr = readb(&uart->iu_lcr);
- writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
- writeb(COSMISC_CONSTANT, &uart->iu_scr);
- writeb(lcr, &uart->iu_lcr);
- readb(&uart->iu_lcr);
- serial8250_register_8250_port(&port);
-}
-
-static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
-{
- u32 sio_iec;
-
- /* We need to recognice and treat the fourth MENET serial as it
- * does not have an SuperIO chip attached to it, therefore attempting
- * to access it will result in bus errors. We call something an
- * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
- * in it. This is paranoid but we want to avoid blowing up on a
- * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
- * not paranoid enough ...
- */
- if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
- return;
-
- /* Switch IOC3 to PIO mode. It probably already was but let's be
- * paranoid
- */
- writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s);
- readl(&ioc3->gpcr_s);
- writel(0, &ioc3->gppr[6]);
- readl(&ioc3->gppr[6]);
- writel(0, &ioc3->gppr[7]);
- readl(&ioc3->gppr[7]);
- writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr);
- readl(&ioc3->port_a.sscr);
- writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr);
- readl(&ioc3->port_b.sscr);
- /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
- sio_iec = readl(&ioc3->sio_iec);
- sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
- SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
- SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
- SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
- sio_iec |= SIO_IR_SA_INT;
- sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
- SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
- SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
- SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
- sio_iec |= SIO_IR_SB_INT;
- writel(sio_iec, &ioc3->sio_iec);
- writel(0, &ioc3->port_a.sscr);
- writel(0, &ioc3->port_b.sscr);
-
- ioc3_8250_register(&ioc3->sregs.uarta);
- ioc3_8250_register(&ioc3->sregs.uartb);
-}
-#endif
-
static const struct net_device_ops ioc3_netdev_ops = {
.ndo_open = ioc3_open,
.ndo_stop = ioc3_close,
@@ -1166,61 +843,52 @@ static const struct net_device_ops ioc3_netdev_ops = {
.ndo_set_mac_address = ioc3_set_mac_address,
};
-static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int ioc3eth_probe(struct platform_device *pdev)
{
- unsigned int sw_physid1, sw_physid2;
- struct net_device *dev = NULL;
+ u32 sw_physid1, sw_physid2, vendor, model, rev;
struct ioc3_private *ip;
- struct ioc3 *ioc3;
- unsigned long ioc3_base, ioc3_size;
- u32 vendor, model, rev;
+ struct net_device *dev;
+ struct resource *regs;
+ u8 mac_addr[6];
int err;
- /* Configure DMA attributes. */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- pr_err("%s: No usable DMA configuration, aborting.\n",
- pci_name(pdev));
- goto out;
- }
-
- if (pci_enable_device(pdev))
- return -ENODEV;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* get mac addr from one wire prom */
+ if (ioc3eth_get_mac_addr(regs, mac_addr))
+ return -EPROBE_DEFER; /* not available yet */
dev = alloc_etherdev(sizeof(struct ioc3_private));
- if (!dev) {
- err = -ENOMEM;
- goto out_disable;
- }
-
- err = pci_request_regions(pdev, "ioc3");
- if (err)
- goto out_free;
+ if (!dev)
+ return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
ip = netdev_priv(dev);
- ip->dev = dev;
- ip->dma_dev = &pdev->dev;
-
- dev->irq = pdev->irq;
+ ip->dma_dev = pdev->dev.parent;
+ ip->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (!ip->regs) {
+ err = -ENOMEM;
+ goto out_free;
+ }
- ioc3_base = pci_resource_start(pdev, 0);
- ioc3_size = pci_resource_len(pdev, 0);
- ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size);
- if (!ioc3) {
- pr_err("ioc3eth(%s): ioremap failed, goodbye.\n",
- pci_name(pdev));
+ ip->ssram = devm_platform_ioremap_resource(pdev, 1);
+ if (!ip->ssram) {
err = -ENOMEM;
- goto out_res;
+ goto out_free;
}
- ip->regs = &ioc3->eth;
- ip->ssram = ioc3->ssram;
- ip->all_regs = ioc3;
-#ifdef CONFIG_SERIAL_8250
- ioc3_serial_probe(pdev, ioc3);
-#endif
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0) {
+ err = dev->irq;
+ goto out_free;
+ }
+
+ if (devm_request_irq(&pdev->dev, dev->irq, ioc3_interrupt,
+ IRQF_SHARED, "ioc3-eth", dev)) {
+ dev_err(&pdev->dev, "Can't get irq %d\n", dev->irq);
+ err = -ENODEV;
+ goto out_free;
+ }
spin_lock_init(&ip->ioc3_lock);
timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
@@ -1250,8 +918,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_init(dev);
- ip->pdev = pdev;
-
ip->mii.phy_id_mask = 0x1f;
ip->mii.reg_num_mask = 0x1f;
ip->mii.dev = dev;
@@ -1261,15 +927,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_mii_init(ip);
if (ip->mii.phy_id == -1) {
- pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
- pci_name(pdev));
+ netdev_err(dev, "Didn't find a PHY, goodbye.\n");
err = -ENODEV;
goto out_stop;
}
ioc3_mii_start(ip);
ioc3_ssram_disc(ip);
- ioc3_get_eaddr(ip);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
/* The IOC3-specific entries in the device structure. */
dev->watchdog_timeo = 5 * HZ;
@@ -1306,21 +971,14 @@ out_stop:
if (ip->tx_ring)
dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
ip->txr_dma);
-out_res:
- pci_release_regions(pdev);
out_free:
free_netdev(dev);
-out_disable:
- /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
- * such a weird device ...
- */
-out:
return err;
}
-static void ioc3_remove_one(struct pci_dev *pdev)
+static int ioc3eth_remove(struct platform_device *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = platform_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
@@ -1328,27 +986,11 @@ static void ioc3_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
-
- iounmap(ip->all_regs);
- pci_release_regions(pdev);
free_netdev(dev);
- /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
- * such a weird device ...
- */
-}
-static const struct pci_device_id ioc3_pci_tbl[] = {
- { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
+ return 0;
+}
-static struct pci_driver ioc3_driver = {
- .name = "ioc3-eth",
- .id_table = ioc3_pci_tbl,
- .probe = ioc3_probe,
- .remove = ioc3_remove_one,
-};
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -1479,7 +1121,7 @@ drop_packet:
return NETDEV_TX_OK;
}
-static void ioc3_timeout(struct net_device *dev)
+static void ioc3_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ioc3_private *ip = netdev_priv(dev);
@@ -1530,11 +1172,10 @@ static inline unsigned int ioc3_hash(const unsigned char *addr)
static void ioc3_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- struct ioc3_private *ip = netdev_priv(dev);
-
strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
+ strlcpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ sizeof(info->bus_info));
}
static int ioc3_get_link_ksettings(struct net_device *dev,
@@ -1646,7 +1287,16 @@ static void ioc3_set_multicast_list(struct net_device *dev)
spin_unlock_irq(&ip->ioc3_lock);
}
-module_pci_driver(ioc3_driver);
+static struct platform_driver ioc3eth_driver = {
+ .probe = ioc3eth_probe,
+ .remove = ioc3eth_remove,
+ .driver = {
+ .name = "ioc3-eth",
+ }
+};
+
+module_platform_driver(ioc3eth_driver);
+
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 539bc5db989c..0c396ecd3389 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -90,7 +90,7 @@ struct meth_private {
spinlock_t meth_lock;
};
-static void meth_tx_timeout(struct net_device *dev);
+static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t meth_interrupt(int irq, void *dev_id);
/* global, initialized in ip32-setup.c */
@@ -727,7 +727,7 @@ static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
/*
* Deal with a transmit timeout.
*/
-static void meth_tx_timeout(struct net_device *dev)
+static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct meth_private *priv = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index c7641a236eb8..cb043eb1bdc1 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1078,7 +1078,7 @@ static void sc92031_set_multicast_list(struct net_device *dev)
spin_unlock_bh(&priv->lock);
}
-static void sc92031_tx_timeout(struct net_device *dev)
+static void sc92031_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sc92031_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 5b351beb78cb..5a4b6e3ab38f 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1538,7 +1538,7 @@ err_out_0:
goto out;
}
-static void sis190_tx_timeout(struct net_device *dev)
+static void sis190_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sis190_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 85eaccbbbac1..81ed7589e33c 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -222,7 +222,7 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location);
static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
static void sis900_timer(struct timer_list *t);
static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
-static void sis900_tx_timeout(struct net_device *net_dev);
+static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue);
static void sis900_init_tx_ring(struct net_device *net_dev);
static void sis900_init_rx_ring(struct net_device *net_dev);
static netdev_tx_t sis900_start_xmit(struct sk_buff *skb,
@@ -1537,7 +1537,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
* disable interrupts and do some tasks
*/
-static void sis900_tx_timeout(struct net_device *net_dev)
+static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index be47d864f8b9..61ddee0c2a2e 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -280,6 +280,7 @@ struct epic_private {
signed char phys[4]; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
int mii_phy_cnt;
+ u32 ethtool_ops_nesting;
struct mii_if_info mii;
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int default_port:4; /* Last dev->if_port value. */
@@ -291,7 +292,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
static void epic_restart(struct net_device *dev);
static void epic_timer(struct timer_list *t);
-static void epic_tx_timeout(struct net_device *dev);
+static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void epic_init_ring(struct net_device *dev);
static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -861,7 +862,7 @@ static void epic_timer(struct timer_list *t)
add_timer(&ep->timer);
}
-static void epic_tx_timeout(struct net_device *dev)
+static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct epic_private *ep = netdev_priv(dev);
void __iomem *ioaddr = ep->ioaddr;
@@ -1435,8 +1436,10 @@ static int ethtool_begin(struct net_device *dev)
struct epic_private *ep = netdev_priv(dev);
void __iomem *ioaddr = ep->ioaddr;
+ if (ep->ethtool_ops_nesting == U32_MAX)
+ return -EBUSY;
/* power-up, if interface is down */
- if (!netif_running(dev)) {
+ if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
ew32(GENCTL, 0x0200);
ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
}
@@ -1449,7 +1452,7 @@ static void ethtool_complete(struct net_device *dev)
void __iomem *ioaddr = ep->ioaddr;
/* power-down, if interface is down */
- if (!netif_running(dev)) {
+ if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
ew32(GENCTL, 0x0008);
ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8d88e4083456..186c0bddbe5f 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -936,7 +936,7 @@ static void smc911x_phy_configure(struct work_struct *work)
if (lp->ctl_rspeed != 100)
my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
- if (!lp->ctl_rfduplx)
+ if (!lp->ctl_rfduplx)
my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
/* Update our Auto-Neg Advertisement Register */
@@ -1245,7 +1245,7 @@ static void smc911x_poll_controller(struct net_device *dev)
#endif
/* Our watchdog timed out. Called by the networking layer */
-static void smc911x_timeout(struct net_device *dev)
+static void smc911x_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc911x_local *lp = netdev_priv(dev);
int status, mask;
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index d3bb2ba51f40..4b2330deed47 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -216,7 +216,7 @@ static int smc_open(struct net_device *dev);
/*
. Our watchdog timed out. Called by the networking layer
*/
-static void smc_timeout(struct net_device *dev);
+static void smc_timeout(struct net_device *dev, unsigned int txqueue);
/*
. This is called by the kernel in response to 'ifconfig ethX down'. It
@@ -1094,7 +1094,7 @@ static int smc_open(struct net_device *dev)
.--------------------------------------------------------
*/
-static void smc_timeout(struct net_device *dev)
+static void smc_timeout(struct net_device *dev, unsigned int txqueue)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index a55f430f6a7b..f2a50eb3c1e0 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -271,7 +271,7 @@ static void smc91c92_release(struct pcmcia_device *link);
static int smc_open(struct net_device *dev);
static int smc_close(struct net_device *dev);
static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static void smc_tx_timeout(struct net_device *dev);
+static void smc_tx_timeout(struct net_device *dev, unsigned int txqueue);
static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t smc_interrupt(int irq, void *dev_id);
@@ -1178,7 +1178,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
/*====================================================================*/
-static void smc_tx_timeout(struct net_device *dev)
+static void smc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 3a6761131f4c..90410f9d3b1a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1321,7 +1321,7 @@ static void smc_poll_controller(struct net_device *dev)
#endif
/* Our watchdog timed out. Called by the networking layer */
-static void smc_timeout(struct net_device *dev)
+static void smc_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 38068fc34141..49a6a9167af4 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1943,15 +1943,6 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-/* Standard ioctls for mii-tool */
-static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(dev) || !dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -2151,7 +2142,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_start_xmit = smsc911x_hard_start_xmit,
.ndo_get_stats = smsc911x_get_stats,
.ndo_set_rx_mode = smsc911x_set_multicast_list,
- .ndo_do_ioctl = smsc911x_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = smsc911x_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2454,7 +2445,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev);
dev->irq = irq;
- pdata->ioaddr = ioremap_nocache(res->start, res_size);
+ pdata->ioaddr = ioremap(res->start, res_size);
if (!pdata->ioaddr) {
retval = -ENOMEM;
goto out_ioremap_fail;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index a6962a41c3d2..7312e522c022 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -210,15 +210,6 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
return -EIO;
}
-/* Standard ioctls for mii-tool */
-static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(dev) || !dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@@ -1504,7 +1495,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
.ndo_start_xmit = smsc9420_hard_start_xmit,
.ndo_get_stats = smsc9420_get_stats,
.ndo_set_rx_mode = smsc9420_set_multicast_list,
- .ndo_do_ioctl = smsc9420_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 869a498e3b5e..e8224b543dfc 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -243,6 +243,7 @@
NET_IP_ALIGN)
#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
#define DESC_SZ sizeof(struct netsec_de)
@@ -719,7 +720,6 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- enum dma_data_direction dma_dir;
struct page *page;
page = page_pool_dev_alloc_pages(dring->page_pool);
@@ -734,9 +734,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
/* Make sure the incoming payload fits in the page for XDP and non-XDP
* cases and reserve enough space for headroom + skb_shared_info
*/
- *desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
- dma_dir = page_pool_get_dma_dir(dring->page_pool);
- dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
+ *desc_len = NETSEC_RX_BUF_SIZE;
return page_address(page);
}
@@ -883,6 +881,8 @@ static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
struct xdp_buff *xdp)
{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ unsigned int len = xdp->data_end - xdp->data;
u32 ret = NETSEC_XDP_PASS;
int err;
u32 act;
@@ -896,7 +896,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
if (ret != NETSEC_XDP_TX)
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -904,7 +906,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
}
break;
default:
@@ -915,7 +919,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
break;
}
@@ -929,7 +935,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_rx_pkt_info rx_info;
enum dma_data_direction dma_dir;
struct bpf_prog *xdp_prog;
- struct sk_buff *skb = NULL;
u16 xdp_xmit = 0;
u32 xdp_act = 0;
int done = 0;
@@ -943,7 +948,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = XDP_PASS;
+ u32 xdp_result = NETSEC_XDP_PASS;
+ struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
struct xdp_buff xdp;
@@ -1014,7 +1020,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
* cache state. Since we paid the allocation cost if
* building an skb fails try to put the page into cache
*/
- page_pool_recycle_direct(dring->page_pool, page);
+ __page_pool_put_page(dring->page_pool, page,
+ pkt_len, true);
netif_err(priv, drv, priv->ndev,
"rx failed to build skb\n");
break;
@@ -1272,17 +1279,19 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
- struct page_pool_params pp_params = { 0 };
+ struct page_pool_params pp_params = {
+ .order = 0,
+ /* internal DMA mapping in page_pool */
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = DESC_NUM,
+ .nid = NUMA_NO_NODE,
+ .dev = priv->dev,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = NETSEC_RXBUF_HEADROOM,
+ .max_len = NETSEC_RX_BUF_SIZE,
+ };
int i, err;
- pp_params.order = 0;
- /* internal DMA mapping in page_pool */
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = DESC_NUM;
- pp_params.nid = cpu_to_node(0);
- pp_params.dev = priv->dev;
- pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
-
dring->page_pool = page_pool_create(&pp_params);
if (IS_ERR(dring->page_pool)) {
err = PTR_ERR(dring->page_pool);
@@ -1731,12 +1740,6 @@ static int netsec_netdev_set_features(struct net_device *ndev,
return 0;
}
-static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
- int cmd)
-{
- return phy_mii_ioctl(ndev->phydev, ifr, cmd);
-}
-
static int netsec_xdp_xmit(struct net_device *ndev, int n,
struct xdp_frame **frames, u32 flags)
{
@@ -1821,7 +1824,7 @@ static const struct net_device_ops netsec_netdev_ops = {
.ndo_set_features = netsec_netdev_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = netsec_netdev_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_xdp_xmit = netsec_xdp_xmit,
.ndo_bpf = netsec_xdp,
};
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f7e927ad67fa..b7032422393f 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev,
phy_ethtool_get_wol(ndev->phydev, wol);
}
-static int ave_ethtool_set_wol(struct net_device *ndev,
- struct ethtool_wolinfo *wol)
+static int __ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
{
- int ret;
-
if (!ndev->phydev ||
(wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
return -EOPNOTSUPP;
- ret = phy_ethtool_set_wol(ndev->phydev, wol);
+ return phy_ethtool_set_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ ret = __ave_ethtool_set_wol(ndev, wol);
if (!ret)
device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
@@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev)
/* set wol initial state disabled */
wol.wolopts = 0;
- ave_ethtool_set_wol(ndev, &wol);
+ __ave_ethtool_set_wol(ndev, &wol);
if (!phy_interface_is_rgmii(phydev))
phy_set_max_speed(phydev, SPEED_100);
@@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev)
ave_ethtool_get_wol(ndev, &wol);
wol.wolopts = priv->wolopts;
- ave_ethtool_set_wol(ndev, &wol);
+ __ave_ethtool_set_wol(ndev, &wol);
if (ndev->phydev) {
ret = phy_resume(ndev->phydev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 94f94686cf7d..487099092693 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -363,6 +363,12 @@ struct dma_features {
unsigned int dvlan;
unsigned int l3l4fnum;
unsigned int arpoffsel;
+ /* TSN Features */
+ unsigned int estwid;
+ unsigned int estdep;
+ unsigned int estsel;
+ unsigned int fpesel;
+ unsigned int tbssel;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 9f0b9a9e63b3..49d6a866244f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -171,6 +171,15 @@ struct dma_extended_desc {
__le32 des7; /* Tx/Rx Timestamp High */
};
+/* Enhanced descriptor for TBS */
+struct dma_edesc {
+ __le32 des4;
+ __le32 des5;
+ __le32 des6;
+ __le32 des7;
+ struct dma_desc basic;
+};
+
/* Transmit checksum insertion control */
#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index dd9967aeda22..2342d497348e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -40,7 +40,7 @@ struct tegra_eqos {
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
u32 burst_map = 0;
u32 bit_index = 0;
u32 a_index = 0;
@@ -52,9 +52,10 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
return -ENOMEM;
}
- plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi");
- if (of_property_read_u32(np, "snps,write-requests",
- &plat_dat->axi->axi_wr_osr_lmt)) {
+ plat_dat->axi->axi_lpi_en = device_property_read_bool(dev,
+ "snps,en-lpi");
+ if (device_property_read_u32(dev, "snps,write-requests",
+ &plat_dat->axi->axi_wr_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property
* is missing, default to 1.
@@ -68,8 +69,8 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
plat_dat->axi->axi_wr_osr_lmt--;
}
- if (of_property_read_u32(np, "snps,read-requests",
- &plat_dat->axi->axi_rd_osr_lmt)) {
+ if (device_property_read_u32(dev, "snps,read-requests",
+ &plat_dat->axi->axi_rd_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property
* is missing, default to 1.
@@ -82,7 +83,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
*/
plat_dat->axi->axi_rd_osr_lmt--;
}
- of_property_read_u32(np, "snps,burst-map", &burst_map);
+ device_property_read_u32(dev, "snps,burst-map", &burst_map);
/* converts burst-map bitmask to burst array */
for (bit_index = 0; bit_index < 7; bit_index++) {
@@ -270,6 +271,7 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *data,
struct stmmac_resources *res)
{
+ struct device *dev = &pdev->dev;
struct tegra_eqos *eqos;
int err;
@@ -282,6 +284,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
+ if (!is_of_node(dev->fwnode))
+ goto bypass_clk_reset_gpio;
+
eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
if (IS_ERR(eqos->clk_master)) {
err = PTR_ERR(eqos->clk_master);
@@ -354,6 +359,7 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
+bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
@@ -421,7 +427,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
void *priv;
int ret;
- data = of_device_get_match_data(&pdev->dev);
+ data = device_get_match_data(&pdev->dev);
memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
@@ -478,7 +484,7 @@ static int dwc_eth_dwmac_remove(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
int err;
- data = of_device_get_match_data(&pdev->dev);
+ data = device_get_match_data(&pdev->dev);
err = stmmac_dvr_remove(&pdev->dev);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index bdb80421acac..9e4b83832938 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -55,6 +55,8 @@ struct mediatek_dwmac_plat_data {
struct regmap *peri_regmap;
struct device *dev;
phy_interface_t phy_mode;
+ int num_clks_to_config;
+ bool rmii_clk_from_mac;
bool rmii_rxc;
};
@@ -73,21 +75,33 @@ struct mediatek_dwmac_variant {
/* list of clocks required for mac */
static const char * const mt2712_dwmac_clk_l[] = {
- "axi", "apb", "mac_main", "ptp_ref"
+ "axi", "apb", "mac_main", "ptp_ref", "rmii_internal"
};
static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
{
+ int rmii_clk_from_mac = plat->rmii_clk_from_mac ? RMII_CLK_SRC_INTERNAL : 0;
int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
u32 intf_val = 0;
+ /* The clock labeled as "rmii_internal" in mt2712_dwmac_clk_l is needed
+ * only in RMII(when MAC provides the reference clock), and useless for
+ * RGMII/MII/RMII(when PHY provides the reference clock).
+ * num_clks_to_config indicates the real number of clocks should be
+ * configured, equals to (plat->variant->num_clks - 1) in default for all the case,
+ * then +1 for rmii_clk_from_mac case.
+ */
+ plat->num_clks_to_config = plat->variant->num_clks - 1;
+
/* select phy interface in top control domain */
switch (plat->phy_mode) {
case PHY_INTERFACE_MODE_MII:
intf_val |= PHY_INTF_MII;
break;
case PHY_INTERFACE_MODE_RMII:
- intf_val |= (PHY_INTF_RMII | rmii_rxc);
+ if (plat->rmii_clk_from_mac)
+ plat->num_clks_to_config++;
+ intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -173,35 +187,50 @@ static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
break;
case PHY_INTERFACE_MODE_RMII:
- /* the rmii reference clock is from external phy,
- * and the property "rmii_rxc" indicates which pin(TXC/RXC)
- * the reference clk is connected to. The reference clock is a
- * received signal, so rx_delay/rx_inv are used to indicate
- * the reference clock timing adjustment
- */
- if (plat->rmii_rxc) {
- /* the rmii reference clock from outside is connected
- * to RXC pin, the reference clock will be adjusted
- * by RXC delay macro circuit.
- */
- delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
- delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
- delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
- } else {
- /* the rmii reference clock from outside is connected
- * to TXC pin, the reference clock will be adjusted
- * by TXC delay macro circuit.
+ if (plat->rmii_clk_from_mac) {
+ /* case 1: mac provides the rmii reference clock,
+ * and the clock output to TXC pin.
+ * The egress timing can be adjusted by GTXC delay macro circuit.
+ * The ingress timing can be adjusted by TXC delay macro circuit.
*/
delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv);
+ } else {
+ /* case 2: the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+ }
+ /* tx_inv will inverse the tx clock inside mac relateive to
+ * reference clock from external phy,
+ * and this bit is located in the same register with fine-tune
+ */
+ if (mac_delay->tx_inv)
+ fine_val = ETH_RMII_DLY_TX_INV;
}
- /* tx_inv will inverse the tx clock inside mac relateive to
- * reference clock from external phy,
- * and this bit is located in the same register with fine-tune
- */
- if (mac_delay->tx_inv)
- fine_val = ETH_RMII_DLY_TX_INV;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -278,6 +307,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
mac_delay->tx_inv = of_property_read_bool(plat->np, "mediatek,txc-inverse");
mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
+ plat->rmii_clk_from_mac = of_property_read_bool(plat->np, "mediatek,rmii-clk-from-mac");
return 0;
}
@@ -294,6 +324,8 @@ static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
for (i = 0; i < num; i++)
plat->clks[i].id = variant->clk_list[i];
+ plat->num_clks_to_config = variant->num_clks;
+
return devm_clk_bulk_get(plat->dev, num, plat->clks);
}
@@ -321,7 +353,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
- ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ ret = clk_bulk_prepare_enable(plat->num_clks_to_config, plat->clks);
if (ret) {
dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
return ret;
@@ -336,9 +368,8 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
- const struct mediatek_dwmac_variant *variant = plat->variant;
- clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ clk_bulk_disable_unprepare(plat->num_clks_to_config, plat->clks);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 01b484cb177e..58e0511badba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -335,14 +335,30 @@ static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
}
}
-static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
+ u32 value = readl(ioaddr + EMAC_INT_EN);
+
+ if (rx)
+ value |= EMAC_RX_INT;
+ if (tx)
+ value |= EMAC_TX_INT;
+
+ writel(value, ioaddr + EMAC_INT_EN);
}
-static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(0, ioaddr + EMAC_INT_EN);
+ u32 value = readl(ioaddr + EMAC_INT_EN);
+
+ if (rx)
+ value &= ~EMAC_RX_INT;
+ if (tx)
+ value &= ~EMAC_TX_INT;
+
+ writel(value, ioaddr + EMAC_INT_EN);
}
static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 2dc70d104161..af50af27550b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -64,6 +64,8 @@
#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
#define GMAC_RXQCTRL_TACPQE BIT(21)
#define GMAC_RXQCTRL_TACPQE_SHIFT 21
+#define GMAC_RXQCTRL_FPRQ GENMASK(26, 24)
+#define GMAC_RXQCTRL_FPRQ_SHIFT 24
/* MAC Packet Filtering */
#define GMAC_PACKET_FILTER_PR BIT(0)
@@ -176,6 +178,8 @@ enum power_event {
#define GMAC_CONFIG_SARC GENMASK(30, 28)
#define GMAC_CONFIG_SARC_SHIFT 28
#define GMAC_CONFIG_IPC BIT(27)
+#define GMAC_CONFIG_IPG GENMASK(26, 24)
+#define GMAC_CONFIG_IPG_SHIFT 24
#define GMAC_CONFIG_2K BIT(22)
#define GMAC_CONFIG_ACS BIT(20)
#define GMAC_CONFIG_BE BIT(18)
@@ -183,6 +187,7 @@ enum power_event {
#define GMAC_CONFIG_JE BIT(16)
#define GMAC_CONFIG_PS BIT(15)
#define GMAC_CONFIG_FES BIT(14)
+#define GMAC_CONFIG_FES_SHIFT 14
#define GMAC_CONFIG_DM BIT(13)
#define GMAC_CONFIG_LM BIT(12)
#define GMAC_CONFIG_DCRS BIT(9)
@@ -190,6 +195,9 @@ enum power_event {
#define GMAC_CONFIG_RE BIT(0)
/* MAC extended config */
+#define GMAC_CONFIG_EIPG GENMASK(29, 25)
+#define GMAC_CONFIG_EIPG_SHIFT 25
+#define GMAC_CONFIG_EIPG_EN BIT(24)
#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
#define GMAC_CONFIG_HDSMS_SHIFT 20
#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
@@ -231,6 +239,11 @@ enum power_event {
/* MAC HW features3 bitmap */
#define GMAC_HW_FEAT_ASP GENMASK(29, 28)
+#define GMAC_HW_FEAT_TBSSEL BIT(27)
+#define GMAC_HW_FEAT_FPESEL BIT(26)
+#define GMAC_HW_FEAT_ESTWID GENMASK(21, 20)
+#define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17)
+#define GMAC_HW_FEAT_ESTSEL BIT(16)
#define GMAC_HW_FEAT_FRPES GENMASK(14, 13)
#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11)
#define GMAC_HW_FEAT_FRPSEL BIT(10)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 40ca00e596dd..f0c0ea616032 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -984,6 +984,8 @@ const struct stmmac_ops dwmac410_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
+ .est_configure = dwmac5_est_configure,
+ .fpe_configure = dwmac5_fpe_configure,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1027,6 +1029,8 @@ const struct stmmac_ops dwmac510_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
+ .est_configure = dwmac5_est_configure,
+ .fpe_configure = dwmac5_fpe_configure,
};
int dwmac4_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 3e14da69f378..eff82065a501 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -10,6 +10,7 @@
#include <linux/stmmac.h>
#include "common.h"
+#include "dwmac4.h"
#include "dwmac4_descs.h"
static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
@@ -505,6 +506,14 @@ static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
}
+static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+{
+ p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV);
+ p->des5 = cpu_to_le32(nsec & TDES5_LT);
+ p->des6 = 0;
+ p->des7 = 0;
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -534,6 +543,7 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_vlan = dwmac4_set_vlan,
.get_rx_header_len = dwmac4_get_rx_header_len,
.set_sec_addr = dwmac4_set_sec_addr,
+ .set_tbs = dwmac4_set_tbs,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 6d92109dc9aa..6da070ccd737 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -73,6 +73,13 @@
#define TDES3_CONTEXT_TYPE BIT(30)
#define TDES3_CONTEXT_TYPE_SHIFT 30
+/* TDES4 */
+#define TDES4_LTV BIT(31)
+#define TDES4_LT GENMASK(7, 0)
+
+/* TDES5 */
+#define TDES5_LT GENMASK(31, 8)
+
/* TDS3 use for both format (read and write back) */
#define TDES3_OWN BIT(31)
#define TDES3_OWN_SHIFT 31
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index c15409030710..bb29bfcd62c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -404,6 +404,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* 5.10 Features */
dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
+ dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27;
+ dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26;
+ dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20;
+ dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17;
+ dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16;
dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
@@ -467,6 +472,25 @@ static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
+static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ if (en)
+ value |= DMA_CONTROL_EDSE;
+ else
+ value &= ~DMA_CONTROL_EDSE;
+
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE;
+ if (en && !value)
+ return -EIO;
+
+ writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL);
+ return 0;
+}
+
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
@@ -523,4 +547,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
.enable_sph = dwmac4_enable_sph,
+ .enable_tbs = dwmac4_enable_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 589931795847..8391ca63d943 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -22,6 +22,7 @@
#define DMA_DEBUG_STATUS_1 0x00001010
#define DMA_DEBUG_STATUS_2 0x00001014
#define DMA_AXI_BUS_MODE 0x00001028
+#define DMA_TBS_CTRL 0x00001050
/* DMA Bus Mode bitmap */
#define DMA_BUS_MODE_SFT_RESET BIT(0)
@@ -82,6 +83,11 @@
#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+/* DMA TBS Control */
+#define DMA_TBS_FTOS GENMASK(31, 8)
+#define DMA_TBS_FTOV BIT(0)
+#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
+
/* Following DMA defines are chanels oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
@@ -114,6 +120,7 @@
#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
/* DMA Tx Channel X Control register defines */
+#define DMA_CONTROL_EDSE BIT(28)
#define DMA_CONTROL_TSE BIT(12)
#define DMA_CONTROL_OSP BIT(4)
#define DMA_CONTROL_ST BIT(0)
@@ -168,6 +175,8 @@
/* DMA default interrupt mask for 4.00 */
#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
DMA_CHAN_INTR_ABNORMAL)
+#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
DMA_CHAN_INTR_ENA_RIE | \
@@ -178,6 +187,8 @@
/* DMA default interrupt mask for 4.10a */
#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
DMA_CHAN_INTR_ABNORMAL_4_10)
+#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
/* channel 0 specific fields */
#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
@@ -186,9 +197,10 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr);
-void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index f2a29a90e085..9becca280074 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -97,21 +97,52 @@ void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
}
-void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
- DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value |= DMA_CHAN_INTR_DEFAULT_RX;
+ if (tx)
+ value |= DMA_CHAN_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
-void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
- ioaddr + DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value |= DMA_CHAN_INTR_DEFAULT_RX_4_10;
+ if (tx)
+ value |= DMA_CHAN_INTR_DEFAULT_TX_4_10;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
-void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_RX;
+ if (tx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10;
+ if (tx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
int dwmac4_dma_interrupt(void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e436fa160c7d..494c859b4ade 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -550,3 +550,122 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
+
+static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, ioaddr + MTL_EST_GCL_DATA);
+
+ ctrl = (reg << ADDR_SHIFT);
+ ctrl |= gcl ? 0 : GCRR;
+
+ writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
+
+ ctrl |= SRWO;
+ writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
+
+ return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
+ ctrl, !(ctrl & SRWO), 100, 5000);
+}
+
+int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ u32 speed, total_offset, offset, ctrl, ctr_low;
+ u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
+ u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
+ int i, ret = 0x0;
+ u64 total_ctr;
+
+ if (extcfg & GMAC_CONFIG_EIPG_EN) {
+ offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
+ offset = 104 + (offset * 8);
+ } else {
+ offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
+ offset = 96 - (offset * 8);
+ }
+
+ speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
+ speed = speed >> GMAC_CONFIG_FES_SHIFT;
+
+ switch (speed) {
+ case 0x0:
+ offset = offset * 1000; /* 1G */
+ break;
+ case 0x1:
+ offset = offset * 400; /* 2.5G */
+ break;
+ case 0x2:
+ offset = offset * 100000; /* 10M */
+ break;
+ case 0x3:
+ offset = offset * 10000; /* 100M */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ offset = offset / 1000;
+
+ ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
+ ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
+ ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
+ ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+ if (ret)
+ return ret;
+
+ total_offset = 0;
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+ if (ret)
+ return ret;
+
+ total_offset += offset;
+ }
+
+ total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
+ total_ctr += total_offset;
+
+ ctr_low = do_div(total_ctr, 1000000000);
+
+ ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
+ ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
+ if (ret)
+ return ret;
+
+ ctrl = readl(ioaddr + MTL_EST_CONTROL);
+ ctrl &= ~PTOV;
+ ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
+ if (cfg->enable)
+ ctrl |= EEST | SSWL;
+ else
+ ctrl &= ~EEST;
+
+ writel(ctrl, ioaddr + MTL_EST_CONTROL);
+ return 0;
+}
+
+void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable)
+{
+ u32 value;
+
+ if (!enable) {
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ value &= ~EFPE;
+
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+ return;
+ }
+
+ value = readl(ioaddr + GMAC_RXQ_CTRL1);
+ value &= ~GMAC_RXQCTRL_FPRQ;
+ value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+ writel(value, ioaddr + GMAC_RXQ_CTRL1);
+
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+ value |= EFPE;
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 23fecf68f781..3e8faa96b4d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -11,6 +11,9 @@
#define PRTYEN BIT(1)
#define TMOUTEN BIT(0)
+#define MAC_FPE_CTRL_STS 0x00000234
+#define EFPE BIT(0)
+
#define MAC_PPS_CONTROL 0x00000b70
#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
#define PPS_MINIDX(x) ((x) * 8)
@@ -30,6 +33,23 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+#define MTL_EST_CONTROL 0x00000c50
+#define PTOV GENMASK(31, 24)
+#define PTOV_SHIFT 24
+#define SSWL BIT(1)
+#define EEST BIT(0)
+#define MTL_EST_GCL_CONTROL 0x00000c80
+#define BTR_LOW 0x0
+#define BTR_HIGH 0x1
+#define CTR_LOW 0x2
+#define CTR_HIGH 0x3
+#define TER 0x4
+#define LLR 0x5
+#define ADDR_SHIFT 8
+#define GCRR BIT(2)
+#define SRWO BIT(0)
+#define MTL_EST_GCL_DATA 0x00000c84
+
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -83,5 +103,9 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
+int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 292b880f3f9f..e5dbd0bc257e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -96,6 +96,8 @@
/* DMA default interrupt mask */
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
+#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
/* DMA Status register defines */
#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
@@ -130,8 +132,8 @@
#define NUM_DWMAC1000_DMA_REGS 23
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 1bc25aa86dbd..688d36095333 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -37,14 +37,28 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
}
-void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+
+ if (rx)
+ value |= DMA_INTR_DEFAULT_RX;
+ if (tx)
+ value |= DMA_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_INTR_ENA);
}
-void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(0, ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+
+ if (rx)
+ value &= ~DMA_INTR_DEFAULT_RX;
+ if (tx)
+ value &= ~DMA_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_INTR_ENA);
}
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index ef8a07c68ca7..6c3b8a950f58 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -73,6 +73,9 @@
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
+#define XGMAC_RXQ_CTRL1 0x000000a4
+#define XGMAC_RQ GENMASK(7, 4)
+#define XGMAC_RQ_SHIFT 4
#define XGMAC_RXQ_CTRL2 0x000000a8
#define XGMAC_RXQ_CTRL3 0x000000ac
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
@@ -136,6 +139,11 @@
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_TBSSEL BIT(27)
+#define XGMAC_HWFEAT_FPESEL BIT(26)
+#define XGMAC_HWFEAT_ESTWID GENMASK(24, 23)
+#define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20)
+#define XGMAC_HWFEAT_ESTSEL BIT(19)
#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
#define XGMAC_HWFEAT_DVLAN BIT(13)
#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
@@ -148,6 +156,8 @@
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
+#define XGMAC_FPE_CTRL_STS 0x00000280
+#define XGMAC_EFPE BIT(0)
#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
@@ -237,6 +247,22 @@
#define XGMAC_TC_PRTY_MAP1 0x00001044
#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_EST_CONTROL 0x00001050
+#define XGMAC_PTOV GENMASK(31, 23)
+#define XGMAC_PTOV_SHIFT 23
+#define XGMAC_SSWL BIT(1)
+#define XGMAC_EEST BIT(0)
+#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080
+#define XGMAC_BTR_LOW 0x0
+#define XGMAC_BTR_HIGH 0x1
+#define XGMAC_CTR_LOW 0x2
+#define XGMAC_CTR_HIGH 0x3
+#define XGMAC_TER 0x4
+#define XGMAC_LLR 0x5
+#define XGMAC_ADDR_SHIFT 8
+#define XGMAC_GCRR BIT(2)
+#define XGMAC_SRWO BIT(0)
+#define XGMAC_MTL_EST_GCL_DATA 0x00001084
#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
#define XGMAC_RXPI BIT(31)
#define XGMAC_NPE GENMASK(23, 16)
@@ -321,6 +347,13 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
+#define XGMAC_DMA_TBS_CTRL0 0x00003054
+#define XGMAC_DMA_TBS_CTRL1 0x00003058
+#define XGMAC_DMA_TBS_CTRL2 0x0000305c
+#define XGMAC_DMA_TBS_CTRL3 0x00003060
+#define XGMAC_FTOS GENMASK(31, 8)
+#define XGMAC_FTOV BIT(0)
+#define XGMAC_DEF_FTOS (XGMAC_FTOS | XGMAC_FTOV)
#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
#define XGMAC_MCSIS BIT(31)
#define XGMAC_MSUIS BIT(29)
@@ -335,6 +368,7 @@
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
+#define XGMAC_EDSE BIT(28)
#define XGMAC_TxPBL GENMASK(21, 16)
#define XGMAC_TxPBL_SHIFT 16
#define XGMAC_TSE BIT(12)
@@ -363,6 +397,8 @@
#define XGMAC_TIE BIT(0)
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
XGMAC_RIE | XGMAC_TIE)
+#define XGMAC_DMA_INT_DEFAULT_RX (XGMAC_RBUE | XGMAC_RIE)
+#define XGMAC_DMA_INT_DEFAULT_TX (XGMAC_TIE)
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
#define XGMAC_RWT GENMASK(7, 0)
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
@@ -377,6 +413,9 @@
#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
/* Descriptors */
+#define XGMAC_TDES0_LTV BIT(31)
+#define XGMAC_TDES0_LT GENMASK(7, 0)
+#define XGMAC_TDES1_LT GENMASK(31, 8)
#define XGMAC_TDES2_IVT GENMASK(31, 16)
#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
@@ -395,6 +434,7 @@
#define XGMAC_TDES3_TCMSSV BIT(26)
#define XGMAC_TDES3_SAIC GENMASK(25, 23)
#define XGMAC_TDES3_SAIC_SHIFT 23
+#define XGMAC_TDES3_TBSV BIT(24)
#define XGMAC_TDES3_THL GENMASK(22, 19)
#define XGMAC_TDES3_THL_SHIFT 19
#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 082f5ee9e525..2af3ac5409b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -1359,6 +1359,81 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
+static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
+
+ ctrl = (reg << XGMAC_ADDR_SHIFT);
+ ctrl |= gcl ? 0 : XGMAC_GCRR;
+
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
+
+ ctrl |= XGMAC_SRWO;
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
+
+ return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
+ ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
+}
+
+static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ int i, ret = 0x0;
+ u32 ctrl;
+
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
+ ctrl &= ~XGMAC_PTOV;
+ ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
+ if (cfg->enable)
+ ctrl |= XGMAC_EEST | XGMAC_SSWL;
+ else
+ ctrl &= ~XGMAC_EEST;
+
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
+ return 0;
+}
+
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
+ u32 num_rxq, bool enable)
+{
+ u32 value;
+
+ if (!enable) {
+ value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
+
+ value &= ~XGMAC_EFPE;
+
+ writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
+ return;
+ }
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL1);
+ value &= ~XGMAC_RQ;
+ value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
+ writel(value, ioaddr + XGMAC_RXQ_CTRL1);
+
+ value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
+ value |= XGMAC_EFPE;
+ writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
@@ -1402,6 +1477,8 @@ const struct stmmac_ops dwxgmac210_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
+ .est_configure = dwxgmac3_est_configure,
+ .fpe_configure = dwxgmac3_fpe_configure,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index bd5838ce1e8a..c3d654cfa9ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -339,6 +339,14 @@ static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
}
+static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+{
+ p->des4 = cpu_to_le32((sec & XGMAC_TDES0_LT) | XGMAC_TDES0_LTV);
+ p->des5 = cpu_to_le32(nsec & XGMAC_TDES1_LT);
+ p->des6 = 0;
+ p->des7 = 0;
+}
+
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
@@ -368,4 +376,5 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_sarc = dwxgmac2_set_sarc,
.set_vlan_tag = dwxgmac2_set_vlan_tag,
.set_vlan = dwxgmac2_set_vlan,
+ .set_tbs = dwxgmac2_set_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index f3f08ccc379b..77308c5c5d29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -248,14 +248,30 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
-static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+
+ if (rx)
+ value |= XGMAC_DMA_INT_DEFAULT_RX;
+ if (tx)
+ value |= XGMAC_DMA_INT_DEFAULT_TX;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
-static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+
+ if (rx)
+ value &= ~XGMAC_DMA_INT_DEFAULT_RX;
+ if (tx)
+ value &= ~XGMAC_DMA_INT_DEFAULT_TX;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
@@ -413,6 +429,11 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 3 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
+ dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
+ dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
+ dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
+ dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
@@ -503,6 +524,28 @@ static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
}
+static int dwxgmac2_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ if (en)
+ value |= XGMAC_EDSE;
+ else
+ value &= ~XGMAC_EDSE;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
+ if (en && !value)
+ return -EIO;
+
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
+ return 0;
+}
+
const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.reset = dwxgmac2_dma_reset,
.init = dwxgmac2_dma_init,
@@ -530,4 +573,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
.enable_sph = dwxgmac2_enable_sph,
+ .enable_tbs = dwxgmac2_enable_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index aa5b917398fe..df63b0367aff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -29,6 +29,7 @@ struct stmmac_extra_stats;
struct stmmac_safety_stats;
struct dma_desc;
struct dma_extended_desc;
+struct dma_edesc;
/* Descriptors helpers */
struct stmmac_desc_ops {
@@ -95,6 +96,7 @@ struct stmmac_desc_ops {
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
u32 inner_type);
void (*set_vlan)(struct dma_desc *p, u32 type);
+ void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -157,6 +159,8 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
#define stmmac_set_desc_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_vlan, __args)
+#define stmmac_set_desc_tbs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_tbs, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -187,8 +191,10 @@ struct stmmac_dma_ops {
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
- void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
- void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx);
+ void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx);
void (*start_tx)(void __iomem *ioaddr, u32 chan);
void (*stop_tx)(void __iomem *ioaddr, u32 chan);
void (*start_rx)(void __iomem *ioaddr, u32 chan);
@@ -208,6 +214,7 @@ struct stmmac_dma_ops {
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
+ int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -266,6 +273,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
#define stmmac_enable_sph(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_sph, __args)
+#define stmmac_enable_tbs(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, enable_tbs, __args)
struct mac_device_info;
struct net_device;
@@ -274,6 +283,7 @@ struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
+struct stmmac_est;
/* Helpers to program the MAC core */
struct stmmac_ops {
@@ -371,6 +381,10 @@ struct stmmac_ops {
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
+ int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable);
};
#define stmmac_core_init(__priv, __args...) \
@@ -457,6 +471,10 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
+#define stmmac_est_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, est_configure, __args)
+#define stmmac_fpe_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -514,6 +532,8 @@ struct stmmac_priv;
struct tc_cls_u32_offload;
struct tc_cbs_qopt_offload;
struct flow_cls_offload;
+struct tc_taprio_qopt_offload;
+struct tc_etf_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
@@ -523,6 +543,10 @@ struct stmmac_tc_ops {
struct tc_cbs_qopt_offload *qopt);
int (*setup_cls)(struct stmmac_priv *priv,
struct flow_cls_offload *cls);
+ int (*setup_taprio)(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt);
+ int (*setup_etf)(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -533,6 +557,10 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_cbs, __args)
#define stmmac_tc_setup_cls(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls, __args)
+#define stmmac_tc_setup_taprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_taprio, __args)
+#define stmmac_tc_setup_etf(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_etf, __args)
struct stmmac_counters;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 252cf48c5816..a57b0fa815ab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -119,6 +119,13 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
+#define MMC_TX_FPE_FRAG 0x1a8
+#define MMC_TX_HOLD_REQ 0x1ac
+#define MMC_RX_PKT_ASSEMBLY_ERR 0x1c8
+#define MMC_RX_PKT_SMD_ERR 0x1cc
+#define MMC_RX_PKT_ASSEMBLY_OK 0x1d0
+#define MMC_RX_FPE_FRAG 0x1d4
+
/* XGMAC MMC Registers */
#define MMC_XGMAC_TX_OCTET_GB 0x14
#define MMC_XGMAC_TX_PKT_GB 0x1c
@@ -315,6 +322,15 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr += readl(mmcaddr + MMC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr += readl(mmcaddr + MMC_RX_FPE_FRAG);
}
const struct stmmac_mmc_ops dwmac_mmc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d993fc7e82c3..9c02fc754bf1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -39,13 +39,18 @@ struct stmmac_tx_info {
bool is_jumbo;
};
+#define STMMAC_TBS_AVAIL BIT(0)
+#define STMMAC_TBS_EN BIT(1)
+
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
u32 tx_count_frames;
+ int tbs;
struct timer_list txtimer;
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
+ struct dma_edesc *dma_entx;
struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
struct stmmac_tx_info *tx_skbuff_dma;
@@ -88,6 +93,7 @@ struct stmmac_channel {
struct napi_struct rx_napi ____cacheline_aligned_in_smp;
struct napi_struct tx_napi ____cacheline_aligned_in_smp;
struct stmmac_priv *priv_data;
+ spinlock_t lock;
u32 index;
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 80d59b775907..ff1cbfc834b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -388,9 +388,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
- if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
- (priv->hw->pcs == STMMAC_PCS_TBI) ||
- (priv->hw->pcs == STMMAC_PCS_RTBI))
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
+ priv->hw->pcs == STMMAC_PCS_RTBI)
return false;
/* Check if MAC core supports the EEE feature. */
@@ -1090,6 +1089,8 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
if (priv->extend_desc)
head_tx = (void *)tx_q->dma_etx;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ head_tx = (void *)tx_q->dma_entx;
else
head_tx = (void *)tx_q->dma_tx;
@@ -1163,13 +1164,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the TX descriptors */
- for (i = 0; i < DMA_TX_SIZE; i++)
+ for (i = 0; i < DMA_TX_SIZE; i++) {
+ int last = (i == (DMA_TX_SIZE - 1));
+ struct dma_desc *p;
+
if (priv->extend_desc)
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
- priv->mode, (i == DMA_TX_SIZE - 1));
+ p = &tx_q->dma_etx[i].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[i].basic;
else
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
- priv->mode, (i == DMA_TX_SIZE - 1));
+ p = &tx_q->dma_tx[i];
+
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
+ }
}
/**
@@ -1383,7 +1390,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
- else
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
}
@@ -1392,6 +1399,8 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
struct dma_desc *p;
if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &((tx_q->dma_entx + i)->basic);
else
p = tx_q->dma_tx + i;
@@ -1511,19 +1520,26 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
/* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
/* Release the DMA TX socket buffers */
dma_free_tx_skbufs(priv, queue);
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc)
- dma_free_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- tx_q->dma_tx, tx_q->dma_tx_phy);
- else
- dma_free_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_extended_desc),
- tx_q->dma_etx, tx_q->dma_tx_phy);
+ if (priv->extend_desc) {
+ size = sizeof(struct dma_extended_desc);
+ addr = tx_q->dma_etx;
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ size = sizeof(struct dma_edesc);
+ addr = tx_q->dma_entx;
+ } else {
+ size = sizeof(struct dma_desc);
+ addr = tx_q->dma_tx;
+ }
+
+ size *= DMA_TX_SIZE;
+
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
kfree(tx_q->tx_skbuff_dma);
kfree(tx_q->tx_skbuff);
@@ -1616,6 +1632,8 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
tx_q->queue_index = queue;
tx_q->priv_data = priv;
@@ -1632,28 +1650,32 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
if (!tx_q->tx_skbuff)
goto err_dma;
- if (priv->extend_desc) {
- tx_q->dma_etx = dma_alloc_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_extended_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
- if (!tx_q->dma_etx)
- goto err_dma;
- } else {
- tx_q->dma_tx = dma_alloc_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
- if (!tx_q->dma_tx)
- goto err_dma;
- }
+ if (priv->extend_desc)
+ size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ size = sizeof(struct dma_edesc);
+ else
+ size = sizeof(struct dma_desc);
+
+ size *= DMA_TX_SIZE;
+
+ addr = dma_alloc_coherent(priv->device, size,
+ &tx_q->dma_tx_phy, GFP_KERNEL);
+ if (!addr)
+ goto err_dma;
+
+ if (priv->extend_desc)
+ tx_q->dma_etx = addr;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_q->dma_entx = addr;
+ else
+ tx_q->dma_tx = addr;
}
return 0;
err_dma:
free_dma_tx_desc_resources(priv);
-
return ret;
}
@@ -1885,6 +1907,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->extend_desc)
p = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[entry].basic;
else
p = tx_q->dma_tx + entry;
@@ -1966,7 +1990,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -1983,19 +2007,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- int i;
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan);
dma_free_tx_skbufs(priv, chan);
- for (i = 0; i < DMA_TX_SIZE; i++)
- if (priv->extend_desc)
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
- priv->mode, (i == DMA_TX_SIZE - 1));
- else
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
- priv->mode, (i == DMA_TX_SIZE - 1));
+ stmmac_clear_tx_descriptors(priv, chan);
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
@@ -2060,17 +2077,25 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan);
struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
if (napi_schedule_prep(&ch->rx_napi)) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule_irqoff(&ch->rx_napi);
- status |= handle_tx;
}
}
- if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
- napi_schedule_irqoff(&ch->tx_napi);
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
+ if (napi_schedule_prep(&ch->tx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule_irqoff(&ch->tx_napi);
+ }
+ }
return status;
}
@@ -2265,14 +2290,14 @@ static void stmmac_tx_timer(struct timer_list *t)
ch = &priv->channel[tx_q->queue_index];
- /*
- * If NAPI is already running we can miss some events. Let's rearm
- * the timer and try again.
- */
- if (likely(napi_schedule_prep(&ch->tx_napi)))
+ if (likely(napi_schedule_prep(&ch->tx_napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule(&ch->tx_napi);
- else
- mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
+ }
}
/**
@@ -2624,6 +2649,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->dma_cap.vlins)
stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+ /* TBS */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
+
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
+ }
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2653,8 +2686,7 @@ static int stmmac_open(struct net_device *dev)
u32 chan;
int ret;
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
ret = stmmac_init_phy(dev);
if (ret) {
@@ -2681,6 +2713,16 @@ static int stmmac_open(struct net_device *dev)
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
+ tx_q->tbs &= ~STMMAC_TBS_AVAIL;
+ }
+
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
@@ -2829,7 +2871,11 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
tag = skb_vlan_tag_get(skb);
- p = tx_q->dma_tx + tx_q->cur_tx;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ p = &tx_q->dma_tx[tx_q->cur_tx];
+
if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
return false;
@@ -2864,7 +2910,11 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
- desc = tx_q->dma_tx + tx_q->cur_tx;
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
if (priv->dma_cap.addr64 <= 32)
@@ -2915,13 +2965,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
+ int desc_size, tmp_pay_len = 0, first_tx;
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
- int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- u8 proto_hdr_len, hdr;
bool has_vlan, set_ic;
+ u8 proto_hdr_len, hdr;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -2958,7 +3008,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* set new MSS value if needed */
if (mss != tx_q->mss) {
- mss_desc = tx_q->dma_tx + tx_q->cur_tx;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
+
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
@@ -2978,7 +3032,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
- desc = tx_q->dma_tx + first_entry;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[first_entry].basic;
+ else
+ desc = &tx_q->dma_tx[first_entry];
first = desc;
if (has_vlan)
@@ -3050,7 +3107,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
set_ic = false;
if (set_ic) {
- desc = &tx_q->dma_tx[tx_q->cur_tx];
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
@@ -3113,16 +3174,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
tx_q->cur_tx, first, nfrags);
-
- stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
-
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb_headlen(skb));
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -3152,10 +3215,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type;
+ struct dma_edesc *tbs_desc = NULL;
+ int entry, desc_size, first_tx;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
- int entry, first_tx;
dma_addr_t des;
tx_q = &priv->tx_queue[queue];
@@ -3195,6 +3259,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = tx_q->dma_tx + entry;
@@ -3224,6 +3290,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = tx_q->dma_tx + entry;
@@ -3270,6 +3338,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (set_ic) {
if (likely(priv->extend_desc))
desc = &tx_q->dma_etx[entry].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = &tx_q->dma_tx[entry];
@@ -3287,20 +3357,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
- void *tx_head;
-
netdev_dbg(priv->dev,
"%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
entry, first, nfrags);
- if (priv->extend_desc)
- tx_head = (void *)tx_q->dma_etx;
- else
- tx_head = (void *)tx_q->dma_tx;
-
- stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
-
netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
@@ -3346,12 +3407,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the first descriptor setting the OWN bit too */
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
- csum_insertion, priv->mode, 1, last_segment,
+ csum_insertion, priv->mode, 0, last_segment,
skb->len);
- } else {
- stmmac_set_tx_owner(priv, first);
}
+ if (tx_q->tbs & STMMAC_TBS_EN) {
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
+
+ tbs_desc = &tx_q->dma_entx[first_entry];
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
+ }
+
+ stmmac_set_tx_owner(priv, first);
+
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
@@ -3362,7 +3430,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
+ if (likely(priv->extend_desc))
+ desc_size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -3751,8 +3826,14 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++;
work_done = stmmac_rx(priv, budget, chan);
- if (work_done < budget && napi_complete_done(napi, work_done))
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
return work_done;
}
@@ -3761,7 +3842,6 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
int work_done;
@@ -3770,15 +3850,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
work_done = min(work_done, budget);
- if (work_done < budget)
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
- /* Force transmission restart */
- tx_q = &priv->tx_queue[chan];
- if (tx_q->cur_tx != tx_q->dirty_tx) {
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
- chan);
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
}
return work_done;
@@ -3792,7 +3869,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void stmmac_tx_timeout(struct net_device *dev)
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -4078,6 +4155,10 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
priv, priv, true);
case TC_SETUP_QDISC_CBS:
return stmmac_tc_setup_cbs(priv, priv, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return stmmac_tc_setup_etf(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4181,7 +4262,7 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
DMA_TX_SIZE, 1, seq);
- } else {
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
DMA_TX_SIZE, 0, seq);
@@ -4250,9 +4331,44 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
priv->dma_cap.number_rx_channel);
seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
+ priv->dma_cap.number_rx_queues);
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
+ priv->dma_cap.number_tx_queues);
seq_printf(seq, "\tEnhanced descriptors: %s\n",
(priv->dma_cap.enh_desc) ? "Y" : "N");
-
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
+ priv->dma_cap.pps_out_num);
+ seq_printf(seq, "\tSafety Features: %s\n",
+ priv->dma_cap.asp ? "Y" : "N");
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
+ priv->dma_cap.frpsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
+ priv->dma_cap.addr64);
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
+ priv->dma_cap.rssen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
+ priv->dma_cap.vlhash ? "Y" : "N");
+ seq_printf(seq, "\tSplit Header: %s\n",
+ priv->dma_cap.sphen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
+ priv->dma_cap.vlins ? "Y" : "N");
+ seq_printf(seq, "\tDouble VLAN: %s\n",
+ priv->dma_cap.dvlan ? "Y" : "N");
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
+ priv->dma_cap.l3l4fnum);
+ seq_printf(seq, "\tARP Offloading: %s\n",
+ priv->dma_cap.arpoffsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
+ priv->dma_cap.estsel ? "Y" : "N");
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
+ priv->dma_cap.fpesel ? "Y" : "N");
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
+ priv->dma_cap.tbssel ? "Y" : "N");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
@@ -4728,6 +4844,7 @@ int stmmac_dvr_probe(struct device *device,
for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue];
+ spin_lock_init(&ch->lock);
ch->priv_data = priv;
ch->index = queue;
@@ -4757,8 +4874,7 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_pcs_mode(priv);
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
@@ -4792,8 +4908,7 @@ int stmmac_dvr_probe(struct device *device,
error_netdev_register:
phylink_destroy(priv->phylink);
error_phy_setup:
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
@@ -4838,8 +4953,7 @@ int stmmac_dvr_remove(struct device *dev)
reset_control_assert(priv->plat->stmmac_rst);
clk_disable_unprepare(priv->plat->pclk);
clk_disable_unprepare(priv->plat->stmmac_clk);
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 8237dbc3e991..623521052152 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -65,7 +65,6 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
- plat->mdio_bus_data->phy_mask = 0;
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -154,8 +153,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->tx_queues_cfg[6].weight = 0x0F;
plat->tx_queues_cfg[7].weight = 0x10;
- plat->mdio_bus_data->phy_mask = 0;
-
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
plat->dma_cfg->fixed_burst = 0;
@@ -386,8 +383,6 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->tso_en = 1;
plat->pmt = 1;
- plat->mdio_bus_data->phy_mask = 0;
-
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -406,6 +401,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->tx_queues_cfg[i].use_prio = false;
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[i].weight = 25;
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
}
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 4775f49d7f3b..d10ac54bf385 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
*mac = NULL;
}
- rc = of_get_phy_mode(np, &plat->phy_interface);
- if (rc)
- return ERR_PTR(rc);
+ plat->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (plat->phy_interface < 0)
+ return ERR_PTR(plat->phy_interface);
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 13227909287c..2aba2673d6c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -14,6 +14,7 @@
#include <linux/phy.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
@@ -50,6 +51,7 @@ struct stmmac_packet_attrs {
u8 id;
int sarc;
u16 queue_mapping;
+ u64 timestamp;
};
static u8 stmmac_test_next_id;
@@ -80,7 +82,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
if (attr->max_size && (attr->max_size > size))
size = attr->max_size;
- skb = netdev_alloc_skb_ip_align(priv->dev, size);
+ skb = netdev_alloc_skb(priv->dev, size);
if (!skb)
return NULL;
@@ -208,6 +210,9 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
skb->pkt_type = PACKET_HOST;
skb->dev = priv->dev;
+ if (attr->timestamp)
+ skb->tstamp = ns_to_ktime(attr->timestamp);
+
return skb;
}
@@ -244,6 +249,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ unsigned char *src = tpriv->packet->src;
+ unsigned char *dst = tpriv->packet->dst;
struct stmmachdr *shdr;
struct ethhdr *ehdr;
struct udphdr *uhdr;
@@ -260,15 +267,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
goto out;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (tpriv->packet->dst) {
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ if (dst) {
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
goto out;
}
if (tpriv->packet->sarc) {
- if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+ if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
goto out;
- } else if (tpriv->packet->src) {
- if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
+ } else if (src) {
+ if (!ether_addr_equal_unaligned(ehdr->h_source, src))
goto out;
}
@@ -339,8 +346,7 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
goto cleanup;
}
- skb_set_queue_mapping(skb, attr->queue_mapping);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, attr->queue_mapping);
if (ret)
goto cleanup;
@@ -714,7 +720,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
struct ethhdr *ehdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
goto out;
if (ehdr->h_proto != htons(ETH_P_PAUSE))
goto out;
@@ -851,12 +857,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb,
if (tpriv->vlan_id) {
if (skb->vlan_proto != htons(proto))
goto out;
- if (skb->vlan_tci != tpriv->vlan_id)
+ if (skb->vlan_tci != tpriv->vlan_id) {
+ /* Means filter did not work. */
+ tpriv->ok = false;
+ complete(&tpriv->comp);
goto out;
+ }
}
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
goto out;
ihdr = ip_hdr(skb);
@@ -926,8 +936,7 @@ static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
goto vlan_del;
}
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -965,6 +974,9 @@ static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
{
int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ return -EOPNOTSUPP;
+
priv->dma_cap.vlhash = 0;
ret = __stmmac_test_vlanfilt(priv);
priv->dma_cap.vlhash = prev_cap;
@@ -1018,8 +1030,7 @@ static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
goto vlan_del;
}
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -1057,6 +1068,9 @@ static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
{
int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
+ return -EOPNOTSUPP;
+
priv->dma_cap.vlhash = 0;
ret = __stmmac_test_dvlanfilt(priv);
priv->dma_cap.vlhash = prev_cap;
@@ -1286,8 +1300,7 @@ static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
__vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
skb->protocol = htons(proto);
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -1323,16 +1336,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
struct flow_rule *rule;
- int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
- if (priv->rss.enable)
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use);
+ }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
@@ -1399,7 +1415,8 @@ cleanup_cls:
cleanup_dissector:
kfree(dissector);
cleanup_rss:
- if (priv->rss.enable) {
+ if (old_enable) {
+ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
@@ -1444,16 +1461,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
struct flow_rule *rule;
- int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
- if (priv->rss.enable)
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use);
+ }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
@@ -1525,7 +1545,8 @@ cleanup_cls:
cleanup_dissector:
kfree(dissector);
cleanup_rss:
- if (priv->rss.enable) {
+ if (old_enable) {
+ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
@@ -1578,7 +1599,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb,
struct arphdr *ahdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
goto out;
ahdr = arp_hdr(skb);
@@ -1639,8 +1660,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
if (ret)
goto cleanup;
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto cleanup_promisc;
@@ -1728,6 +1748,68 @@ static int stmmac_test_sph(struct stmmac_priv *priv)
return 0;
}
+static int stmmac_test_tbs(struct stmmac_priv *priv)
+{
+#define STMMAC_TBS_LT_OFFSET (500 * 1000 * 1000) /* 500 ms*/
+ struct stmmac_packet_attrs attr = { };
+ struct tc_etf_qopt_offload qopt;
+ u64 start_time, curr_time = 0;
+ unsigned long flags;
+ int ret, i;
+
+ if (!priv->hwts_tx_en)
+ return -EOPNOTSUPP;
+
+ /* Find first TBS enabled Queue, if any */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
+ break;
+
+ if (i >= priv->plat->tx_queues_to_use)
+ return -EOPNOTSUPP;
+
+ qopt.enable = true;
+ qopt.queue = i;
+
+ ret = stmmac_tc_setup_etf(priv, priv, &qopt);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ if (!curr_time) {
+ ret = -EOPNOTSUPP;
+ goto fail_disable;
+ }
+
+ start_time = curr_time;
+ curr_time += STMMAC_TBS_LT_OFFSET;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.timestamp = curr_time;
+ attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET);
+ attr.queue_mapping = i;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto fail_disable;
+
+ /* Check if expected time has elapsed */
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
+ ret = -EINVAL;
+
+fail_disable:
+ qopt.enable = false;
+ stmmac_tc_setup_etf(priv, priv, &qopt);
+ return ret;
+}
+
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
@@ -1861,6 +1943,10 @@ static const struct stmmac_test {
.name = "Split Header ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_sph,
+ }, {
+ .name = "TBS (ETF Scheduler) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_tbs,
},
};
@@ -1869,7 +1955,6 @@ void stmmac_selftest_run(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
int count = stmmac_selftest_get_count(priv);
- int carrier = netif_carrier_ok(dev);
int i, ret;
memset(buf, 0, sizeof(*buf) * count);
@@ -1879,15 +1964,12 @@ void stmmac_selftest_run(struct net_device *dev,
netdev_err(priv->dev, "Only offline tests are supported\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
- } else if (!carrier) {
+ } else if (!netif_carrier_ok(dev)) {
netdev_err(priv->dev, "You need valid Link to execute tests\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
- /* We don't want extra traffic */
- netif_carrier_off(dev);
-
/* Wait for queues drain */
msleep(200);
@@ -1942,10 +2024,6 @@ void stmmac_selftest_run(struct net_device *dev,
break;
}
}
-
- /* Restart everything */
- if (carrier)
- netif_carrier_on(dev);
}
void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 7d972e0fd2b0..7a01dee2f9a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -577,6 +577,10 @@ static int tc_setup_cls(struct stmmac_priv *priv,
{
int ret = 0;
+ /* When RSS is enabled, the filtering will be bypassed */
+ if (priv->rss.enable)
+ return -EBUSY;
+
switch (cls->command) {
case FLOW_CLS_REPLACE:
ret = tc_add_flow(priv, cls);
@@ -591,9 +595,167 @@ static int tc_setup_cls(struct stmmac_priv *priv,
return ret;
}
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
+ struct plat_stmmacenet_data *plat = priv->plat;
+ struct timespec64 time;
+ bool fpe = false;
+ int i, ret = 0;
+ u64 ctr;
+
+ if (!priv->dma_cap.estsel)
+ return -EOPNOTSUPP;
+
+ switch (wid) {
+ case 0x1:
+ wid = 16;
+ break;
+ case 0x2:
+ wid = 20;
+ break;
+ case 0x3:
+ wid = 24;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (dep) {
+ case 0x1:
+ dep = 64;
+ break;
+ case 0x2:
+ dep = 128;
+ break;
+ case 0x3:
+ dep = 256;
+ break;
+ case 0x4:
+ dep = 512;
+ break;
+ case 0x5:
+ dep = 1024;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!qopt->enable)
+ goto disable;
+ if (qopt->num_entries >= dep)
+ return -EINVAL;
+ if (!qopt->base_time)
+ return -ERANGE;
+ if (!qopt->cycle_time)
+ return -ERANGE;
+
+ if (!plat->est) {
+ plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
+ GFP_KERNEL);
+ if (!plat->est)
+ return -ENOMEM;
+ } else {
+ memset(plat->est, 0, sizeof(*plat->est));
+ }
+
+ size = qopt->num_entries;
+
+ priv->plat->est->gcl_size = size;
+ priv->plat->est->enable = qopt->enable;
+
+ for (i = 0; i < size; i++) {
+ s64 delta_ns = qopt->entries[i].interval;
+ u32 gates = qopt->entries[i].gate_mask;
+
+ if (delta_ns > GENMASK(wid, 0))
+ return -ERANGE;
+ if (gates > GENMASK(31 - wid, 0))
+ return -ERANGE;
+
+ switch (qopt->entries[i].command) {
+ case TC_TAPRIO_CMD_SET_GATES:
+ if (fpe)
+ return -EINVAL;
+ break;
+ case TC_TAPRIO_CMD_SET_AND_HOLD:
+ gates |= BIT(0);
+ fpe = true;
+ break;
+ case TC_TAPRIO_CMD_SET_AND_RELEASE:
+ gates &= ~BIT(0);
+ fpe = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ }
+
+ /* Adjust for real system time */
+ time = ktime_to_timespec64(qopt->base_time);
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+
+ ctr = qopt->cycle_time;
+ priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
+ priv->plat->est->ctr[1] = (u32)ctr;
+
+ if (fpe && !priv->dma_cap.fpesel)
+ return -EOPNOTSUPP;
+
+ ret = stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, fpe);
+ if (ret && fpe) {
+ netdev_err(priv->dev, "failed to enable Frame Preemption\n");
+ return ret;
+ }
+
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ if (ret) {
+ netdev_err(priv->dev, "failed to configure EST\n");
+ goto disable;
+ }
+
+ netdev_info(priv->dev, "configured EST\n");
+ return 0;
+
+disable:
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ return ret;
+}
+
+static int tc_setup_etf(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt)
+{
+ if (!priv->dma_cap.tbssel)
+ return -EOPNOTSUPP;
+ if (qopt->queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+ if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
+ return -EINVAL;
+
+ if (qopt->enable)
+ priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
+ else
+ priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
+
+ netdev_info(priv->dev, "%s ETF for Queue %d\n",
+ qopt->enable ? "enabled" : "disabled", qopt->queue);
+ return 0;
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
.setup_cbs = tc_setup_cbs,
.setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio,
+ .setup_etf = tc_setup_etf,
};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index c91876f8c536..6ec9163e232c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2666,7 +2666,7 @@ static void cas_netpoll(struct net_device *dev)
}
#endif
-static void cas_tx_timeout(struct net_device *dev)
+static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cas *cp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f5fd1f3c07cc..9a5004f674c7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6517,7 +6517,7 @@ static void niu_reset_task(struct work_struct *work)
spin_unlock_irqrestore(&np->lock, flags);
}
-static void niu_tx_timeout(struct net_device *dev)
+static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct niu *np = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index e9b757b03b56..c5add0b45eed 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -941,7 +941,7 @@ static int bigmac_close(struct net_device *dev)
return 0;
}
-static void bigmac_tx_timeout(struct net_device *dev)
+static void bigmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bigmac *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3e7631160384..8358064fbd48 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -970,7 +970,7 @@ static void gem_poll_controller(struct net_device *dev)
}
#endif
-static void gem_tx_timeout(struct net_device *dev)
+static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gem *gp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index d007dfeba5c3..f0fe7bb2a750 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2246,7 +2246,7 @@ static int happy_meal_close(struct net_device *dev)
#define SXD(x)
#endif
-static void happy_meal_tx_timeout(struct net_device *dev)
+static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct happy_meal *hp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 1468fa0a54e9..2102b95ec347 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -544,7 +544,7 @@ static void qe_tx_reclaim(struct sunqe *qep)
qep->tx_old = elem;
}
-static void qe_tx_timeout(struct net_device *dev)
+static void qe_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sunqe *qep = netdev_priv(dev);
int tx_full;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 8b94d9ad9e2b..c23ce838ff63 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1223,7 +1223,7 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
{
struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct sk_buff *segs;
+ struct sk_buff *segs, *curr, *next;
int maclen, datalen;
int status;
int gso_size, gso_type, gso_segs;
@@ -1282,11 +1282,8 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
skb_reset_mac_header(skb);
status = 0;
- while (segs) {
- struct sk_buff *curr = segs;
-
- segs = segs->next;
- curr->next = NULL;
+ skb_list_walk_safe(segs, curr, next) {
+ skb_mark_not_on_list(curr);
if (port->tso && curr->len > dev->mtu) {
skb_shinfo(curr)->gso_size = gso_size;
skb_shinfo(curr)->gso_type = gso_type;
@@ -1539,7 +1536,7 @@ out_dropped:
}
EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
-void sunvnet_tx_timeout_common(struct net_device *dev)
+void sunvnet_tx_timeout_common(struct net_device *dev, unsigned int txqueue)
{
/* XXX Implement me XXX */
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index 2b808d2482d6..5416a3cb9e7d 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -135,7 +135,7 @@ int sunvnet_open_common(struct net_device *dev);
int sunvnet_close_common(struct net_device *dev);
void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
-void sunvnet_tx_timeout_common(struct net_device *dev);
+void sunvnet_tx_timeout_common(struct net_device *dev, unsigned int txqueue);
netdev_tx_t
sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
struct vnet_port *(*vnet_tx_port)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index a1f5a1e61040..07046a2370b3 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -689,7 +689,7 @@ static int xlgmac_close(struct net_device *netdev)
return 0;
}
-static void xlgmac_tx_timeout(struct net_device *netdev)
+static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct xlgmac_pdata *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 3a655a4dc10e..a530afe3ce12 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -797,7 +797,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void cpmac_tx_timeout(struct net_device *dev)
+static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -816,16 +816,6 @@ static void cpmac_tx_timeout(struct net_device *dev)
netif_tx_wake_all_queues(priv->dev);
}
-static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!(netif_running(dev)))
- return -EINVAL;
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void cpmac_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
@@ -1054,7 +1044,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
- .ndo_do_ioctl = cpmac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 707d5eb480ce..97a058ca60ac 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -272,7 +272,7 @@ void soft_reset(const char *module, void __iomem *reg)
WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
}
-void cpsw_ndo_tx_timeout(struct net_device *ndev)
+void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index bc726356a72c..b8d7b924ee3d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -449,7 +449,7 @@ int cpsw_rx_poll(struct napi_struct *napi_rx, int budget);
void cpsw_rx_vlan_encap(struct sk_buff *skb);
void soft_reset(const char *module, void __iomem *reg);
void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
-void cpsw_ndo_tx_timeout(struct net_device *ndev);
+void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
int cpsw_need_resplit(struct cpsw_common *cpsw);
int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ae27be85e363..75d4e16c692b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -983,7 +983,7 @@ fail_tx:
* error and re-initialize the TX channel for hardware operation
*
*/
-static void emac_dev_tx_timeout(struct net_device *ndev)
+static void emac_dev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1b2702f74455..d7a144b4a09f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1811,7 +1811,7 @@ out:
return (ret == 0) ? 0 : err;
}
-static void netcp_ndo_tx_timeout(struct net_device *ndev)
+static void netcp_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct netcp_intf *netcp = netdev_priv(ndev);
unsigned int descs = knav_pool_count(netcp->tx_pool);
@@ -2019,7 +2019,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
goto quit;
}
- efuse = devm_ioremap_nocache(dev, res.start, size);
+ efuse = devm_ioremap(dev, res.start, size);
if (!efuse) {
dev_err(dev, "could not map resource\n");
devm_release_mem_region(dev, res.start, size);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index d6a192c1f337..fb36115e9c51 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2533,8 +2533,6 @@ static int gbe_del_vid(void *intf_priv, int vid)
}
#if IS_ENABLED(CONFIG_TI_CPTS)
-#define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
-#define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
static void gbe_txtstamp(void *context, struct sk_buff *skb)
{
@@ -2566,7 +2564,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
* We mark it here because skb_tx_timestamp() is called
* after all the txhooks are called.
*/
- if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
+ if (phy_has_txtstamp(phydev)) {
skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
return 0;
}
@@ -2588,7 +2586,7 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
if (p_info->rxtstamp_complete)
return 0;
- if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
+ if (phy_has_rxtstamp(phydev)) {
p_info->rxtstamp_complete = true;
return 0;
}
@@ -2830,7 +2828,7 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
struct gbe_intf *gbe_intf = intf_priv;
struct phy_device *phy = gbe_intf->slave->phy;
- if (!phy || !phy->drv->hwtstamp) {
+ if (!phy_has_hwtstamp(phy)) {
switch (cmd) {
case SIOCGHWTSTAMP:
return gbe_hwtstamp_get(gbe_intf, req);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 78f0f2d59e22..ad465202980a 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -161,7 +161,7 @@ static void tlan_set_multicast_list(struct net_device *);
static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
int irq, int rev, const struct pci_device_id *ent);
-static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void tlan_tx_timeout_work(struct work_struct *work);
static int tlan_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
@@ -997,7 +997,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*
**************************************************************/
-static void tlan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
@@ -1028,7 +1028,7 @@ static void tlan_tx_timeout_work(struct work_struct *work)
struct tlan_priv *priv =
container_of(work, struct tlan_priv, tlan_tqueue);
- tlan_tx_timeout(priv->dev);
+ tlan_tx_timeout(priv->dev, UINT_MAX);
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9d9f8acb7ee3..070dd6fa9401 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1405,7 +1405,7 @@ out:
*
* called, if tx hangs. Schedules a task that resets the interface
*/
-void gelic_net_tx_timeout(struct net_device *netdev)
+void gelic_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct gelic_card *card;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 051033580f0a..805903dbddcc 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,7 +359,7 @@ int gelic_net_open(struct net_device *netdev);
int gelic_net_stop(struct net_device *netdev);
netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
void gelic_net_set_multi(struct net_device *netdev);
-void gelic_net_tx_timeout(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev, unsigned int txqueue);
int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
/* shared ethtool ops */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 538e70810d3d..6576271642c1 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2180,7 +2180,7 @@ out:
* called, if tx hangs. Schedules a task that resets the interface
*/
static void
-spider_net_tx_timeout(struct net_device *netdev)
+spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct spider_net_card *card;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 12466a72cefc..3fd43d30b20d 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -483,8 +483,7 @@ static void tc35815_txdone(struct net_device *dev);
static int tc35815_close(struct net_device *dev);
static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
static void tc35815_set_multicast_list(struct net_device *dev);
-static void tc35815_tx_timeout(struct net_device *dev);
-static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tc35815_poll_controller(struct net_device *dev);
#endif
@@ -751,7 +750,7 @@ static const struct net_device_ops tc35815_netdev_ops = {
.ndo_get_stats = tc35815_get_stats,
.ndo_set_rx_mode = tc35815_set_multicast_list,
.ndo_tx_timeout = tc35815_tx_timeout,
- .ndo_do_ioctl = tc35815_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1189,7 +1188,7 @@ static void tc35815_schedule_restart(struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
}
-static void tc35815_tx_timeout(struct net_device *dev)
+static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
@@ -2009,15 +2008,6 @@ static const struct ethtool_ops tc35815_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
- if (!dev->phydev)
- return -ENODEV;
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static void tc35815_chip_reset(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ed12dbd156f0..803247d51fe9 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -506,7 +506,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
static int rhine_open(struct net_device *dev);
static void rhine_reset_task(struct work_struct *work);
static void rhine_slow_event_task(struct work_struct *work);
-static void rhine_tx_timeout(struct net_device *dev);
+static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
@@ -1761,7 +1761,7 @@ out_unlock:
mutex_unlock(&rp->task_lock);
}
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 346e44115c4e..4b556b74541a 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3257,12 +3257,16 @@ static struct platform_driver velocity_platform_driver = {
* @dev: network device
*
* Called before an ethtool operation. We need to make sure the
- * chip is out of D3 state before we poke at it.
+ * chip is out of D3 state before we poke at it. In case of ethtool
+ * ops nesting, only wake the device up in the outermost block.
*/
static int velocity_ethtool_up(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
- if (!netif_running(dev))
+
+ if (vptr->ethtool_ops_nesting == U32_MAX)
+ return -EBUSY;
+ if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
velocity_set_power_state(vptr, PCI_D0);
return 0;
}
@@ -3272,12 +3276,14 @@ static int velocity_ethtool_up(struct net_device *dev)
* @dev: network device
*
* Called after an ethtool operation. Restore the chip back to D3
- * state if it isn't running.
+ * state if it isn't running. In case of ethtool ops nesting, only
+ * put the device to sleep in the outermost block.
*/
static void velocity_ethtool_down(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
- if (!netif_running(dev))
+
+ if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
velocity_set_power_state(vptr, PCI_D3hot);
}
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index cdfe7809e3c1..f196e71d2c04 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1483,6 +1483,7 @@ struct velocity_info {
struct velocity_context context;
u32 ticks;
+ u32 ethtool_ops_nesting;
u8 rev_id;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index bede1ff289c5..c0d181a7f83a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -790,7 +790,7 @@ static void w5100_restart_work(struct work_struct *work)
w5100_restart(priv->ndev);
}
-static void w5100_tx_timeout(struct net_device *ndev)
+static void w5100_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct w5100_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 6ba2747779ce..46aae30c4636 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -341,7 +341,7 @@ static void w5300_get_regs(struct net_device *ndev,
}
}
-static void w5300_tx_timeout(struct net_device *ndev)
+static void w5300_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct w5300_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 21c1b4322ea7..6f11f52c9a9e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1080,17 +1080,6 @@ temac_poll_controller(struct net_device *ndev)
}
#endif
-static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!ndev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(ndev->phydev, rq, cmd);
-}
-
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
@@ -1098,7 +1087,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_set_rx_mode = temac_set_multicast_list,
.ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = temac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
@@ -1202,7 +1191,7 @@ static int temac_probe(struct platform_device *pdev)
/* map device registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ lp->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
@@ -1296,7 +1285,7 @@ static int temac_probe(struct platform_device *pdev)
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0de52e70abcc..0c26f5bcc523 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -521,7 +521,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
*
* This function is called when Tx time out occurs for Emaclite device.
*/
-static void xemaclite_tx_timeout(struct net_device *dev)
+static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index fd5288ff53b5..480ab7251515 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -288,7 +288,7 @@ struct local_info {
*/
static netdev_tx_t do_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-static void xirc_tx_timeout(struct net_device *dev);
+static void xirc_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void xirc2ps_tx_timeout_task(struct work_struct *work);
static void set_addresses(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -1203,7 +1203,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
}
static void
-xirc_tx_timeout(struct net_device *dev)
+xirc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct local_info *lp = netdev_priv(dev);
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index cd0a8f46e7c6..98aa7b8ddb06 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -27,4 +27,18 @@ config IXP4XX_ETH
Say Y here if you want to use built-in Ethernet ports
on IXP4xx processor.
+config PTP_1588_CLOCK_IXP46X
+ tristate "Intel IXP46x as PTP clock"
+ depends on IXP4XX_ETH
+ depends on PTP_1588_CLOCK
+ default y
+ help
+ This driver adds support for using the IXP46X as a PTP
+ clock. This clock is only useful if your PTP programs are
+ getting hardware time stamps on the PTP Ethernet packets
+ using the SO_TIMESTAMPING API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_ixp46x.
+
endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
index 794a519d07b3..607f91b1e878 100644
--- a/drivers/net/ethernet/xscale/Makefile
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -3,4 +3,5 @@
# Makefile for the Intel XScale IXP device drivers.
#
-obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
+obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
+obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
diff --git a/drivers/net/ethernet/xscale/ixp46x_ts.h b/drivers/net/ethernet/xscale/ixp46x_ts.h
new file mode 100644
index 000000000000..d792130e27b0
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp46x_ts.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PTP 1588 clock using the IXP46X
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ */
+
+#ifndef _IXP46X_TS_H_
+#define _IXP46X_TS_H_
+
+#define DEFAULT_ADDEND 0xF0000029
+#define TICKS_NS_SHIFT 4
+
+struct ixp46x_channel_ctl {
+ u32 ch_control; /* 0x40 Time Synchronization Channel Control */
+ u32 ch_event; /* 0x44 Time Synchronization Channel Event */
+ u32 tx_snap_lo; /* 0x48 Transmit Snapshot Low Register */
+ u32 tx_snap_hi; /* 0x4C Transmit Snapshot High Register */
+ u32 rx_snap_lo; /* 0x50 Receive Snapshot Low Register */
+ u32 rx_snap_hi; /* 0x54 Receive Snapshot High Register */
+ u32 src_uuid_lo; /* 0x58 Source UUID0 Low Register */
+ u32 src_uuid_hi; /* 0x5C Sequence Identifier/Source UUID0 High */
+};
+
+struct ixp46x_ts_regs {
+ u32 control; /* 0x00 Time Sync Control Register */
+ u32 event; /* 0x04 Time Sync Event Register */
+ u32 addend; /* 0x08 Time Sync Addend Register */
+ u32 accum; /* 0x0C Time Sync Accumulator Register */
+ u32 test; /* 0x10 Time Sync Test Register */
+ u32 unused; /* 0x14 */
+ u32 rsystime_lo; /* 0x18 RawSystemTime_Low Register */
+ u32 rsystime_hi; /* 0x1C RawSystemTime_High Register */
+ u32 systime_lo; /* 0x20 SystemTime_Low Register */
+ u32 systime_hi; /* 0x24 SystemTime_High Register */
+ u32 trgt_lo; /* 0x28 TargetTime_Low Register */
+ u32 trgt_hi; /* 0x2C TargetTime_High Register */
+ u32 asms_lo; /* 0x30 Auxiliary Slave Mode Snapshot Low */
+ u32 asms_hi; /* 0x34 Auxiliary Slave Mode Snapshot High */
+ u32 amms_lo; /* 0x38 Auxiliary Master Mode Snapshot Low */
+ u32 amms_hi; /* 0x3C Auxiliary Master Mode Snapshot High */
+
+ struct ixp46x_channel_ctl channel[3];
+};
+
+/* 0x00 Time Sync Control Register Bits */
+#define TSCR_AMM (1<<3)
+#define TSCR_ASM (1<<2)
+#define TSCR_TTM (1<<1)
+#define TSCR_RST (1<<0)
+
+/* 0x04 Time Sync Event Register Bits */
+#define TSER_SNM (1<<3)
+#define TSER_SNS (1<<2)
+#define TTIPEND (1<<1)
+
+/* 0x40 Time Synchronization Channel Control Register Bits */
+#define MASTER_MODE (1<<0)
+#define TIMESTAMP_ALL (1<<1)
+
+/* 0x44 Time Synchronization Channel Event Register Bits */
+#define TX_SNAPSHOT_LOCKED (1<<0)
+#define RX_SNAPSHOT_LOCKED (1<<1)
+
+/* The ptp_ixp46x module will set this variable */
+extern int ixp46x_phc_index;
+
+#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 6fc04ffb22c2..269596c15133 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -29,14 +29,16 @@
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/platform_data/eth_ixp4xx.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <mach/ixp46x_ts.h>
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
+#include "ixp46x_ts.h"
+
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
@@ -517,25 +519,14 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
return ret;
}
-static int ixp4xx_mdio_register(void)
+static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
{
int err;
if (!(mdio_bus = mdiobus_alloc()))
return -ENOMEM;
- if (cpu_is_ixp43x()) {
- /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
- if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
- return -ENODEV;
- mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
- } else {
- /* All MII PHY accesses use NPE-B Ethernet registers */
- if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
- return -ENODEV;
- mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
- }
-
+ mdio_regs = regs;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
spin_lock_init(&mdio_lock);
mdio_bus->name = "IXP4xx MII Bus";
@@ -581,8 +572,8 @@ static void ixp4xx_adjust_link(struct net_device *dev)
__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
&port->regs->tx_control[0]);
- printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
- dev->name, port->speed, port->duplex ? "full" : "half");
+ netdev_info(dev, "%s: link up, speed %u Mb/s, %s duplex\n",
+ dev->name, port->speed, port->duplex ? "full" : "half");
}
@@ -592,7 +583,7 @@ static inline void debug_pkt(struct net_device *dev, const char *func,
#if DEBUG_PKT_BYTES
int i;
- printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
+ netdev_debug(dev, "%s(%i) ", func, len);
for (i = 0; i < len; i++) {
if (i >= DEBUG_PKT_BYTES)
break;
@@ -683,7 +674,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
int received = 0;
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
+ netdev_debug(dev, "eth_poll\n");
#endif
while (received < budget) {
@@ -697,23 +688,20 @@ static int eth_poll(struct napi_struct *napi, int budget)
if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
- dev->name);
+ netdev_debug(dev, "eth_poll napi_complete\n");
#endif
napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_below_low_watermark(rxq) &&
napi_reschedule(napi)) { /* not empty again */
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
- dev->name);
+ netdev_debug(dev, "eth_poll napi_reschedule succeeded\n");
#endif
qmgr_disable_irq(rxq);
continue;
}
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll all done\n",
- dev->name);
+ netdev_debug(dev, "eth_poll all done\n");
#endif
return received; /* all work done */
}
@@ -778,7 +766,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
}
#if DEBUG_RX
- printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
+ netdev_debug(dev, "eth_poll(): end, not all work done\n");
#endif
return received; /* not all work done */
}
@@ -842,7 +830,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct desc *desc;
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
+ netdev_debug(dev, "eth_xmit\n");
#endif
if (unlikely(skb->len > MAX_MRU)) {
@@ -897,22 +885,21 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
+ netdev_debug(dev, "eth_xmit queue full\n");
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
/* really empty in fact */
if (!qmgr_stat_below_low_watermark(txreadyq)) {
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit ready again\n",
- dev->name);
+ netdev_debug(dev, "eth_xmit ready again\n");
#endif
netif_wake_queue(dev);
}
}
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
+ netdev_debug(dev, "eth_xmit end\n");
#endif
ixp_tx_timestamp(port, skb);
@@ -1099,7 +1086,7 @@ static int init_queues(struct port *port)
int i;
if (!ports_open) {
- dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
POOL_ALLOC_SIZE, 32, 0);
if (!dma_pool)
return -ENOMEM;
@@ -1186,8 +1173,7 @@ static int eth_open(struct net_device *dev)
return err;
if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
- printk(KERN_ERR "%s: %s not responding\n", dev->name,
- npe_name(npe));
+ netdev_err(dev, "%s not responding\n", npe_name(npe));
return -EIO;
}
port->firmware[0] = msg.byte4;
@@ -1299,7 +1285,7 @@ static int eth_close(struct net_device *dev)
msg.eth_id = port->id;
msg.byte3 = 1;
if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
- printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
+ netdev_crit(dev, "unable to enable loopback\n");
i = 0;
do { /* drain RX buffers */
@@ -1323,11 +1309,11 @@ static int eth_close(struct net_device *dev)
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
- printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
- " left in NPE\n", dev->name, buffs);
+ netdev_crit(dev, "unable to drain RX queue, %i buffer(s)"
+ " left in NPE\n", buffs);
#if DEBUG_CLOSE
if (!buffs)
- printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
+ netdev_debug(dev, "draining RX queue took %i cycles\n", i);
#endif
buffs = TX_DESCS;
@@ -1343,17 +1329,16 @@ static int eth_close(struct net_device *dev)
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
- printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
- "left in NPE\n", dev->name, buffs);
+ netdev_crit(dev, "unable to drain TX queue, %i buffer(s) "
+ "left in NPE\n", buffs);
#if DEBUG_CLOSE
if (!buffs)
- printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
+ netdev_debug(dev, "draining TX queues took %i cycles\n", i);
#endif
msg.byte3 = 0;
if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
- printk(KERN_CRIT "%s: unable to disable loopback\n",
- dev->name);
+ netdev_crit(dev, "unable to disable loopback\n");
phy_stop(dev->phydev);
@@ -1374,54 +1359,88 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int eth_init_one(struct platform_device *pdev)
+static int ixp4xx_eth_probe(struct platform_device *pdev)
{
- struct port *port;
- struct net_device *dev;
- struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
- struct phy_device *phydev = NULL;
- u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
+ struct phy_device *phydev = NULL;
+ struct device *dev = &pdev->dev;
+ struct eth_plat_info *plat;
+ resource_size_t regs_phys;
+ struct net_device *ndev;
+ struct resource *res;
+ struct port *port;
int err;
- if (!(dev = alloc_etherdev(sizeof(struct port))))
+ plat = dev_get_platdata(dev);
+
+ if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port))))
return -ENOMEM;
- SET_NETDEV_DEV(dev, &pdev->dev);
- port = netdev_priv(dev);
- port->netdev = dev;
+ SET_NETDEV_DEV(ndev, dev);
+ port = netdev_priv(ndev);
+ port->netdev = ndev;
port->id = pdev->id;
+ /* Get the port resource and remap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ regs_phys = res->start;
+ port->regs = devm_ioremap_resource(dev, res);
+
switch (port->id) {
case IXP4XX_ETH_NPEA:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
- regs_phys = IXP4XX_EthA_BASE_PHYS;
+ /* If the MDIO bus is not up yet, defer probe */
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
case IXP4XX_ETH_NPEB:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
- regs_phys = IXP4XX_EthB_BASE_PHYS;
+ /*
+ * On all except IXP43x, NPE-B is used for the MDIO bus.
+ * If there is no NPE-B in the feature set, bail out, else
+ * register the MDIO bus.
+ */
+ if (!cpu_is_ixp43x()) {
+ if (!(ixp4xx_read_feature_bits() &
+ IXP4XX_FEATURE_NPEB_ETH0))
+ return -ENODEV;
+ /* Else register the MDIO bus on NPE-B */
+ if ((err = ixp4xx_mdio_register(port->regs)))
+ return err;
+ }
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
case IXP4XX_ETH_NPEC:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
- regs_phys = IXP4XX_EthC_BASE_PHYS;
+ /*
+ * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access,
+ * of there is no NPE-C, no bus, nothing works, so bail out.
+ */
+ if (cpu_is_ixp43x()) {
+ if (!(ixp4xx_read_feature_bits() &
+ IXP4XX_FEATURE_NPEC_ETH))
+ return -ENODEV;
+ /* Else register the MDIO bus on NPE-C */
+ if ((err = ixp4xx_mdio_register(port->regs)))
+ return err;
+ }
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
default:
- err = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
- dev->netdev_ops = &ixp4xx_netdev_ops;
- dev->ethtool_ops = &ixp4xx_ethtool_ops;
- dev->tx_queue_len = 100;
+ ndev->netdev_ops = &ixp4xx_netdev_ops;
+ ndev->ethtool_ops = &ixp4xx_ethtool_ops;
+ ndev->tx_queue_len = 100;
- netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
+ netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
- if (!(port->npe = npe_request(NPE_ID(port->id)))) {
- err = -EIO;
- goto err_free;
- }
+ if (!(port->npe = npe_request(NPE_ID(port->id))))
+ return -EIO;
- port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
+ port->mem_res = request_mem_region(regs_phys, REGS_SIZE, ndev->name);
if (!port->mem_res) {
err = -EBUSY;
goto err_npe_rel;
@@ -1429,9 +1448,9 @@ static int eth_init_one(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
+ memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN);
- platform_set_drvdata(pdev, dev);
+ platform_set_drvdata(pdev, ndev);
__raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
&port->regs->core_control);
@@ -1441,7 +1460,7 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
+ phydev = phy_connect(ndev, phy_id, &ixp4xx_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
err = PTR_ERR(phydev);
@@ -1450,11 +1469,11 @@ static int eth_init_one(struct platform_device *pdev)
phydev->irq = PHY_POLL;
- if ((err = register_netdev(dev)))
+ if ((err = register_netdev(ndev)))
goto err_phy_dis;
- printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
- npe_name(port->npe));
+ netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy,
+ npe_name(port->npe));
return 0;
@@ -1465,58 +1484,32 @@ err_free_mem:
release_resource(port->mem_res);
err_npe_rel:
npe_release(port->npe);
-err_free:
- free_netdev(dev);
return err;
}
-static int eth_remove_one(struct platform_device *pdev)
+static int ixp4xx_eth_remove(struct platform_device *pdev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct phy_device *phydev = dev->phydev;
- struct port *port = netdev_priv(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct phy_device *phydev = ndev->phydev;
+ struct port *port = netdev_priv(ndev);
- unregister_netdev(dev);
+ unregister_netdev(ndev);
phy_disconnect(phydev);
+ ixp4xx_mdio_remove();
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
release_resource(port->mem_res);
- free_netdev(dev);
return 0;
}
static struct platform_driver ixp4xx_eth_driver = {
.driver.name = DRV_NAME,
- .probe = eth_init_one,
- .remove = eth_remove_one,
+ .probe = ixp4xx_eth_probe,
+ .remove = ixp4xx_eth_remove,
};
-
-static int __init eth_init_module(void)
-{
- int err;
-
- /*
- * FIXME: we bail out on device tree boot but this really needs
- * to be fixed in a nicer way: this registers the MDIO bus before
- * even matching the driver infrastructure, we should only probe
- * detected hardware.
- */
- if (of_have_populated_dt())
- return -ENODEV;
- if ((err = ixp4xx_mdio_register()))
- return err;
- return platform_driver_register(&ixp4xx_eth_driver);
-}
-
-static void __exit eth_cleanup_module(void)
-{
- platform_driver_unregister(&ixp4xx_eth_driver);
- ixp4xx_mdio_remove();
-}
+module_platform_driver(ixp4xx_eth_driver);
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_eth");
-module_init(eth_init_module);
-module_exit(eth_cleanup_module);
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 67028484e9a0..9ecc395239e9 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -15,7 +15,8 @@
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
-#include <mach/ixp46x_ts.h>
+
+#include "ixp46x_ts.h"
#define DRIVER "ptp_ixp46x"
#define N_EXT_TS 2
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 56b7791911bf..077c68498f04 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -614,7 +614,7 @@ static int dfx_register(struct device *bdev)
/* Set up I/O base address. */
if (dfx_use_mmio) {
- bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]);
+ bp->base.mem = ioremap(bar_start[0], bar_len[0]);
if (!bp->base.mem) {
printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
err = -ENOMEM;
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 060712c666bf..eaf85db53a5e 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1318,7 +1318,7 @@ static int fza_probe(struct device *bdev)
}
/* MMIO mapping setup. */
- mmio = ioremap_nocache(start, len);
+ mmio = ioremap(start, len);
if (!mmio) {
pr_err("%s: cannot map MMIO\n", fp->name);
ret = -ENOMEM;
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 15754451cb87..69c29a2ef95d 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -1520,22 +1520,8 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
#ifdef DUMPPACKETS
void dump_data(unsigned char *Data, int length)
{
- int i, j;
- unsigned char s[255], sh[10];
- if (length > 64) {
- length = 64;
- }
printk(KERN_INFO "---Packet start---\n");
- for (i = 0, j = 0; i < length / 8; i++, j += 8)
- printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
- Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
- Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
- strcpy(s, "");
- for (i = 0; i < length % 8; i++) {
- sprintf(sh, "%02x ", Data[j + i]);
- strcat(s, sh);
- }
- printk(KERN_INFO "%s\n", s);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
printk(KERN_INFO "------------------\n");
} // dump_data
#else
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 8a4fbfacad7e..065bb0a40b1d 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -40,7 +40,7 @@ static u8 *fjes_hw_iomap(struct fjes_hw *hw)
return NULL;
}
- base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
+ base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size);
return base;
}
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 91a1059517f5..8c810edece86 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -48,7 +48,7 @@ static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
static int fjes_change_mtu(struct net_device *, int);
static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
-static void fjes_tx_retry(struct net_device *);
+static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
static int fjes_acpi_add(struct acpi_device *);
static int fjes_acpi_remove(struct acpi_device *);
@@ -795,7 +795,7 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return ret;
}
-static void fjes_tx_retry(struct net_device *netdev)
+static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
{
struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h
index c611b6a80b20..9237b69d8e21 100644
--- a/drivers/net/fjes/fjes_trace.h
+++ b/drivers/net/fjes/fjes_trace.h
@@ -28,7 +28,7 @@ TRACE_EVENT(fjes_hw_issue_request_command,
__field(u8, cs_busy)
__field(u8, cs_complete)
__field(int, timeout)
- __field(int, ret);
+ __field(int, ret)
),
TP_fast_assign(
__entry->cr_req = cr->bits.req_code;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index f6222ada6818..7032a2405e1a 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
return NULL;
}
- if (sock->sk->sk_protocol != IPPROTO_UDP) {
+ sk = sock->sk;
+ if (sk->sk_protocol != IPPROTO_UDP ||
+ sk->sk_type != SOCK_DGRAM ||
+ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
pr_debug("socket fd=%d not UDP\n", fd);
sk = ERR_PTR(-EINVAL);
goto out_sock;
}
- lock_sock(sock->sk);
- if (sock->sk->sk_user_data) {
+ lock_sock(sk);
+ if (sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_rel_sock;
}
- sk = sock->sk;
sock_hold(sk);
tuncfg.sk_user_data = gtp;
@@ -852,8 +854,7 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
if (IS_ERR(sk1u)) {
- if (sk0)
- gtp_encap_disable_sock(sk0);
+ gtp_encap_disable_sock(sk0);
return PTR_ERR(sk1u);
}
}
@@ -861,10 +862,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN) {
- if (sk0)
- gtp_encap_disable_sock(sk0);
- if (sk1u)
- gtp_encap_disable_sock(sk1u);
+ gtp_encap_disable_sock(sk0);
+ gtp_encap_disable_sock(sk1u);
return -EINVAL;
}
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index df495b5595f5..e7413a643929 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -687,8 +687,6 @@ struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops,
struct hdlcdrv_state *s;
int err;
- BUG_ON(ops == NULL);
-
if (privsize < sizeof(struct hdlcdrv_state))
privsize = sizeof(struct hdlcdrv_state);
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
index 3a2aa0708166..0db7ccaec4a4 100644
--- a/drivers/net/hyperv/Makefile
+++ b/drivers/net/hyperv/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
-hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o
+hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o netvsc_bpf.o
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index dc44819946e6..abda736e7c7d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -142,6 +142,8 @@ struct netvsc_device_info {
u32 send_section_size;
u32 recv_section_size;
+ struct bpf_prog *bprog;
+
u8 rss_key[NETVSC_HASH_KEYLEN];
};
@@ -189,7 +191,8 @@ int netvsc_send(struct net_device *net,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *page_buffer,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ bool xdp_tx);
void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp);
int netvsc_recv_callback(struct net_device *net,
@@ -198,6 +201,16 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
+u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp);
+unsigned int netvsc_xdp_fraglen(unsigned int len);
+struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev);
+int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack,
+ struct netvsc_device *nvdev);
+int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
+int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_device_info *dev_info);
@@ -832,6 +845,8 @@ struct nvsp_message {
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
+#define NETVSC_XDP_HDRM 256
+
struct multi_send_data {
struct sk_buff *skb; /* skb containing the pkt */
struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
@@ -867,6 +882,7 @@ struct netvsc_stats {
u64 bytes;
u64 broadcast;
u64 multicast;
+ u64 xdp_drop;
struct u64_stats_sync syncp;
};
@@ -972,6 +988,9 @@ struct netvsc_channel {
atomic_t queue_sends;
struct nvsc_rsc rsc;
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+
struct netvsc_stats tx_stats;
struct netvsc_stats rx_stats;
};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index eab83e71567a..ae3f3084c2ed 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -122,8 +122,10 @@ static void free_netvsc_device(struct rcu_head *head)
vfree(nvdev->send_buf);
kfree(nvdev->send_section_map);
- for (i = 0; i < VRSS_CHANNEL_MAX; i++)
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
vfree(nvdev->chan_table[i].mrc.slots);
+ }
kfree(nvdev);
}
@@ -900,7 +902,8 @@ int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *pb,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool xdp_tx)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *net_device
@@ -923,10 +926,11 @@ int netvsc_send(struct net_device *ndev,
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
- /* Send control message directly without accessing msd (Multi-Send
- * Data) field which may be changed during data packet processing.
+ /* Send a control message or XDP packet directly without accessing
+ * msd (Multi-Send Data) field which may be changed during data packet
+ * processing.
*/
- if (!skb)
+ if (!skb || xdp_tx)
return netvsc_send_pkt(device, packet, net_device, pb, skb);
/* batch packets in send buffer if possible */
@@ -1392,6 +1396,21 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
nvchan->net_device = net_device;
u64_stats_init(&nvchan->tx_stats.syncp);
u64_stats_init(&nvchan->rx_stats.syncp);
+
+ ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
+
+ if (ret) {
+ netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
+ goto cleanup2;
+ }
+
+ ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+
+ if (ret) {
+ netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
+ goto cleanup2;
+ }
}
/* Enable NAPI handler before init callbacks */
@@ -1437,6 +1456,8 @@ close:
cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
+
+cleanup2:
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
new file mode 100644
index 000000000000..20adfe544294
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/kernel.h>
+#include <net/xdp.h>
+
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+
+#include "hyperv_net.h"
+
+u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp)
+{
+ void *data = nvchan->rsc.data[0];
+ u32 len = nvchan->rsc.len[0];
+ struct page *page = NULL;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ xdp->data_hard_start = NULL;
+
+ rcu_read_lock();
+ prog = rcu_dereference(nvchan->bpf_prog);
+
+ if (!prog)
+ goto out;
+
+ /* allocate page buffer for data */
+ page = alloc_page(GFP_ATOMIC);
+ if (!page) {
+ act = XDP_DROP;
+ goto out;
+ }
+
+ xdp->data_hard_start = page_address(page);
+ xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_end = xdp->data + len;
+ xdp->rxq = &nvchan->xdp_rxq;
+ xdp->handle = 0;
+
+ memcpy(xdp->data, data, len);
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_DROP:
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ }
+
+out:
+ rcu_read_unlock();
+
+ if (page && act != XDP_PASS && act != XDP_TX) {
+ __free_page(page);
+ xdp->data_hard_start = NULL;
+ }
+
+ return act;
+}
+
+unsigned int netvsc_xdp_fraglen(unsigned int len)
+{
+ return SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
+{
+ return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
+}
+
+int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack,
+ struct netvsc_device *nvdev)
+{
+ struct bpf_prog *old_prog;
+ int buf_max, i;
+
+ old_prog = netvsc_xdp_get(nvdev);
+
+ if (!old_prog && !prog)
+ return 0;
+
+ buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
+ if (prog && buf_max > PAGE_SIZE) {
+ netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
+ dev->mtu, buf_max);
+ NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && (dev->features & NETIF_F_LRO)) {
+ netdev_err(dev, "XDP: not support LRO\n");
+ NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (prog)
+ bpf_prog_add(prog, nvdev->num_chn);
+
+ for (i = 0; i < nvdev->num_chn; i++)
+ rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < nvdev->num_chn; i++)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
+{
+ struct netdev_bpf xdp;
+ bpf_op_t ndo_bpf;
+
+ ASSERT_RTNL();
+
+ if (!vf_netdev)
+ return 0;
+
+ ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
+ if (!ndo_bpf)
+ return 0;
+
+ memset(&xdp, 0, sizeof(xdp));
+
+ xdp.command = XDP_SETUP_PROG;
+ xdp.prog = prog;
+
+ return ndo_bpf(vf_netdev, &xdp);
+}
+
+static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
+{
+ struct bpf_prog *prog = netvsc_xdp_get(nvdev);
+
+ if (prog)
+ return prog->aux->id;
+
+ return 0;
+}
+
+int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct net_device_context *ndevctx = netdev_priv(dev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+ struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
+ struct netlink_ext_ack *extack = bpf->extack;
+ int ret;
+
+ if (!nvdev || nvdev->destroy) {
+ if (bpf->command == XDP_QUERY_PROG) {
+ bpf->prog_id = 0;
+ return 0; /* Query must always succeed */
+ } else {
+ return -ENODEV;
+ }
+ }
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
+
+ if (ret)
+ return ret;
+
+ ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
+
+ if (ret) {
+ netdev_err(dev, "vf_setxdp failed:%d\n", ret);
+ NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
+
+ netvsc_xdp_set(dev, NULL, extack, nvdev);
+ }
+
+ return ret;
+
+ case XDP_QUERY_PROG:
+ bpf->prog_id = netvsc_xdp_query(nvdev);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f3f9eb8a402a..8fc71bd49894 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
+#include <linux/bpf.h>
#include <net/arp.h>
#include <net/route.h>
@@ -519,7 +520,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
return rc;
}
-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
+static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet = NULL;
@@ -686,7 +687,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* timestamp packet in software */
skb_tx_timestamp(skb);
- ret = netvsc_send(net, packet, rndis_msg, pb, skb);
+ ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
if (likely(ret == 0))
return NETDEV_TX_OK;
@@ -709,6 +710,11 @@ no_memory:
goto drop;
}
+static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ return netvsc_xmit(skb, ndev, false);
+}
+
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
@@ -751,6 +757,22 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int rc;
+
+ skb->queue_mapping = skb_get_rx_queue(skb);
+ __skb_push(skb, ETH_HLEN);
+
+ rc = netvsc_xmit(skb, ndev, true);
+
+ if (dev_xmit_complete(rc))
+ return;
+
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+}
+
static void netvsc_comp_ipcsum(struct sk_buff *skb)
{
struct iphdr *iph = (struct iphdr *)skb->data;
@@ -760,7 +782,8 @@ static void netvsc_comp_ipcsum(struct sk_buff *skb)
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
- struct netvsc_channel *nvchan)
+ struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp)
{
struct napi_struct *napi = &nvchan->napi;
const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
@@ -768,18 +791,37 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
nvchan->rsc.csum_info;
const u32 *hash_info = nvchan->rsc.hash_info;
struct sk_buff *skb;
+ void *xbuf = xdp->data_hard_start;
int i;
- skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
- if (!skb)
- return skb;
+ if (xbuf) {
+ unsigned int hdroom = xdp->data - xdp->data_hard_start;
+ unsigned int xlen = xdp->data_end - xdp->data;
+ unsigned int frag_size = netvsc_xdp_fraglen(hdroom + xlen);
- /*
- * Copy to skb. This copy is needed here since the memory pointed by
- * hv_netvsc_packet cannot be deallocated
- */
- for (i = 0; i < nvchan->rsc.cnt; i++)
- skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
+ skb = build_skb(xbuf, frag_size);
+
+ if (!skb) {
+ __free_page(virt_to_page(xbuf));
+ return NULL;
+ }
+
+ skb_reserve(skb, hdroom);
+ skb_put(skb, xlen);
+ skb->dev = napi->dev;
+ } else {
+ skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
+
+ if (!skb)
+ return NULL;
+
+ /* Copy to skb. This copy is needed here since the memory
+ * pointed by hv_netvsc_packet cannot be deallocated.
+ */
+ for (i = 0; i < nvchan->rsc.cnt; i++)
+ skb_put_data(skb, nvchan->rsc.data[i],
+ nvchan->rsc.len[i]);
+ }
skb->protocol = eth_type_trans(skb, net);
@@ -829,13 +871,25 @@ int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats;
+ struct netvsc_stats *rx_stats = &nvchan->rx_stats;
+ struct xdp_buff xdp;
+ u32 act;
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
+ act = netvsc_run_xdp(net, nvchan, &xdp);
+
+ if (act != XDP_PASS && act != XDP_TX) {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ return NVSP_STAT_SUCCESS; /* consumed by XDP */
+ }
+
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netvsc_alloc_recv_skb(net, nvchan);
+ skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
if (unlikely(!skb)) {
++net_device_ctx->eth_stats.rx_no_memory;
@@ -849,7 +903,6 @@ int netvsc_recv_callback(struct net_device *net,
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
- rx_stats = &nvchan->rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
@@ -860,6 +913,11 @@ int netvsc_recv_callback(struct net_device *net,
++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
+ if (act == XDP_TX) {
+ netvsc_xdp_xmit(skb, net);
+ return NVSP_STAT_SUCCESS;
+ }
+
napi_gro_receive(&nvchan->napi, skb);
return NVSP_STAT_SUCCESS;
}
@@ -886,10 +944,11 @@ static void netvsc_get_channels(struct net_device *net,
/* Alloc struct netvsc_device_info, and initialize it from either existing
* struct netvsc_device, or from default values.
*/
-static struct netvsc_device_info *netvsc_devinfo_get
- (struct netvsc_device *nvdev)
+static
+struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
{
struct netvsc_device_info *dev_info;
+ struct bpf_prog *prog;
dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
@@ -897,6 +956,8 @@ static struct netvsc_device_info *netvsc_devinfo_get
return NULL;
if (nvdev) {
+ ASSERT_RTNL();
+
dev_info->num_chn = nvdev->num_chn;
dev_info->send_sections = nvdev->send_section_cnt;
dev_info->send_section_size = nvdev->send_section_size;
@@ -905,6 +966,12 @@ static struct netvsc_device_info *netvsc_devinfo_get
memcpy(dev_info->rss_key, nvdev->extension->rss_key,
NETVSC_HASH_KEYLEN);
+
+ prog = netvsc_xdp_get(nvdev);
+ if (prog) {
+ bpf_prog_inc(prog);
+ dev_info->bprog = prog;
+ }
} else {
dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
dev_info->send_sections = NETVSC_DEFAULT_TX;
@@ -916,6 +983,17 @@ static struct netvsc_device_info *netvsc_devinfo_get
return dev_info;
}
+/* Free struct netvsc_device_info */
+static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
+{
+ if (dev_info->bprog) {
+ ASSERT_RTNL();
+ bpf_prog_put(dev_info->bprog);
+ }
+
+ kfree(dev_info);
+}
+
static int netvsc_detach(struct net_device *ndev,
struct netvsc_device *nvdev)
{
@@ -927,6 +1005,8 @@ static int netvsc_detach(struct net_device *ndev,
if (cancel_work_sync(&nvdev->subchan_work))
nvdev->num_chn = 1;
+ netvsc_xdp_set(ndev, NULL, NULL, nvdev);
+
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
netvsc_tx_disable(nvdev, ndev);
@@ -960,7 +1040,8 @@ static int netvsc_attach(struct net_device *ndev,
struct hv_device *hdev = ndev_ctx->device_ctx;
struct netvsc_device *nvdev;
struct rndis_device *rdev;
- int ret;
+ struct bpf_prog *prog;
+ int ret = 0;
nvdev = rndis_filter_device_add(hdev, dev_info);
if (IS_ERR(nvdev))
@@ -976,6 +1057,13 @@ static int netvsc_attach(struct net_device *ndev,
}
}
+ prog = dev_info->bprog;
+ if (prog) {
+ ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
+ if (ret)
+ goto err1;
+ }
+
/* In any case device is now ready */
netif_device_attach(ndev);
@@ -985,7 +1073,7 @@ static int netvsc_attach(struct net_device *ndev,
if (netif_running(ndev)) {
ret = rndis_filter_open(nvdev);
if (ret)
- goto err;
+ goto err2;
rdev = nvdev->extension;
if (!rdev->link_state)
@@ -994,9 +1082,10 @@ static int netvsc_attach(struct net_device *ndev,
return 0;
-err:
+err2:
netif_device_detach(ndev);
+err1:
rndis_filter_device_remove(hdev, nvdev);
return ret;
@@ -1046,7 +1135,7 @@ static int netvsc_set_channels(struct net_device *net,
}
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
@@ -1153,7 +1242,7 @@ rollback_vf:
dev_set_mtu(vf_netdev, orig_mtu);
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
@@ -1378,8 +1467,8 @@ static const struct {
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
-/* 4 statistics per queue (rx/tx packets/bytes) */
-#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
+/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
@@ -1411,6 +1500,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
+ u64 xdp_drop;
int i, j, cpu;
if (!nvdev)
@@ -1439,9 +1529,11 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&qstats->syncp);
packets = qstats->packets;
bytes = qstats->bytes;
+ xdp_drop = qstats->xdp_drop;
} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_drop;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
@@ -1489,6 +1581,8 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_xdp_drop", i);
+ p += ETH_GSTRING_LEN;
}
for_each_present_cpu(cpu) {
@@ -1785,10 +1879,27 @@ static int netvsc_set_ringparam(struct net_device *ndev,
}
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
+static netdev_features_t netvsc_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+
+ if (!nvdev || nvdev->destroy)
+ return features;
+
+ if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
+ features ^= NETIF_F_LRO;
+ netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
+ }
+
+ return features;
+}
+
static int netvsc_set_features(struct net_device *ndev,
netdev_features_t features)
{
@@ -1875,12 +1986,14 @@ static const struct net_device_ops device_ops = {
.ndo_start_xmit = netvsc_start_xmit,
.ndo_change_rx_flags = netvsc_change_rx_flags,
.ndo_set_rx_mode = netvsc_set_rx_mode,
+ .ndo_fix_features = netvsc_fix_features,
.ndo_set_features = netvsc_set_features,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
+ .ndo_bpf = netvsc_bpf,
};
/*
@@ -2167,6 +2280,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
+ struct bpf_prog *prog;
struct net_device *ndev;
int ret;
@@ -2211,6 +2325,9 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
vf_netdev->wanted_features = ndev->features;
netdev_update_features(vf_netdev);
+ prog = netvsc_xdp_get(netvsc_dev);
+ netvsc_vf_setxdp(vf_netdev, prog);
+
return NOTIFY_OK;
}
@@ -2252,6 +2369,8 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
+ netvsc_vf_setxdp(vf_netdev, NULL);
+
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@@ -2363,14 +2482,14 @@ static int netvsc_probe(struct hv_device *dev,
list_add(&net_device_ctx->list, &netvsc_dev_list);
rtnl_unlock();
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return 0;
register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
no_stats:
@@ -2398,8 +2517,10 @@ static int netvsc_remove(struct hv_device *dev)
rtnl_lock();
nvdev = rtnl_dereference(ndev_ctx->nvdev);
- if (nvdev)
+ if (nvdev) {
cancel_work_sync(&nvdev->subchan_work);
+ netvsc_xdp_set(net, NULL, NULL, nvdev);
+ }
/*
* Call to the vsc driver to let it know that the device is being
@@ -2472,11 +2593,11 @@ static int netvsc_resume(struct hv_device *dev)
ret = netvsc_attach(net, device_info);
- rtnl_unlock();
-
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
net_device_ctx->saved_netvsc_dev_info = NULL;
+ rtnl_unlock();
+
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 857c4bea451c..b81ceba38218 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -235,7 +235,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
+ ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
rcu_read_unlock_bh();
return ret;
@@ -1443,8 +1443,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
/* Halt and release the rndis device */
rndis_filter_halt_device(net_dev, rndis_dev);
- net_dev->extension = NULL;
-
netvsc_device_remove(dev);
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index afd8b2a08245..45bfd99f17fa 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -11,16 +11,17 @@
#include <linux/module.h>
#include <crypto/aead.h>
#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/refcount.h>
#include <net/genetlink.h>
#include <net/sock.h>
#include <net/gro_cells.h>
+#include <net/macsec.h>
+#include <linux/phy.h>
#include <uapi/linux/if_macsec.h>
-typedef u64 __bitwise sci_t;
-
#define MACSEC_SCI_LEN 8
/* SecTAG length = macsec_eth_header without the optional SCI */
@@ -58,8 +59,6 @@ struct macsec_eth_header {
#define GCM_AES_IV_LEN 12
#define DEFAULT_ICV_LEN 16
-#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
-
#define for_each_rxsc(secy, sc) \
for (sc = rcu_dereference_bh(secy->rx_sc); \
sc; \
@@ -77,49 +76,6 @@ struct gcm_iv {
__be32 pn;
};
-/**
- * struct macsec_key - SA key
- * @id: user-provided key identifier
- * @tfm: crypto struct, key storage
- */
-struct macsec_key {
- u8 id[MACSEC_KEYID_LEN];
- struct crypto_aead *tfm;
-};
-
-struct macsec_rx_sc_stats {
- __u64 InOctetsValidated;
- __u64 InOctetsDecrypted;
- __u64 InPktsUnchecked;
- __u64 InPktsDelayed;
- __u64 InPktsOK;
- __u64 InPktsInvalid;
- __u64 InPktsLate;
- __u64 InPktsNotValid;
- __u64 InPktsNotUsingSA;
- __u64 InPktsUnusedSA;
-};
-
-struct macsec_rx_sa_stats {
- __u32 InPktsOK;
- __u32 InPktsInvalid;
- __u32 InPktsNotValid;
- __u32 InPktsNotUsingSA;
- __u32 InPktsUnusedSA;
-};
-
-struct macsec_tx_sa_stats {
- __u32 OutPktsProtected;
- __u32 OutPktsEncrypted;
-};
-
-struct macsec_tx_sc_stats {
- __u64 OutPktsProtected;
- __u64 OutPktsEncrypted;
- __u64 OutOctetsProtected;
- __u64 OutOctetsEncrypted;
-};
-
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
@@ -131,124 +87,8 @@ struct macsec_dev_stats {
__u64 InPktsOverrun;
};
-/**
- * struct macsec_rx_sa - receive secure association
- * @active:
- * @next_pn: packet number expected for the next packet
- * @lock: protects next_pn manipulations
- * @key: key structure
- * @stats: per-SA stats
- */
-struct macsec_rx_sa {
- struct macsec_key key;
- spinlock_t lock;
- u32 next_pn;
- refcount_t refcnt;
- bool active;
- struct macsec_rx_sa_stats __percpu *stats;
- struct macsec_rx_sc *sc;
- struct rcu_head rcu;
-};
-
-struct pcpu_rx_sc_stats {
- struct macsec_rx_sc_stats stats;
- struct u64_stats_sync syncp;
-};
-
-/**
- * struct macsec_rx_sc - receive secure channel
- * @sci: secure channel identifier for this SC
- * @active: channel is active
- * @sa: array of secure associations
- * @stats: per-SC stats
- */
-struct macsec_rx_sc {
- struct macsec_rx_sc __rcu *next;
- sci_t sci;
- bool active;
- struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
- struct pcpu_rx_sc_stats __percpu *stats;
- refcount_t refcnt;
- struct rcu_head rcu_head;
-};
-
-/**
- * struct macsec_tx_sa - transmit secure association
- * @active:
- * @next_pn: packet number to use for the next packet
- * @lock: protects next_pn manipulations
- * @key: key structure
- * @stats: per-SA stats
- */
-struct macsec_tx_sa {
- struct macsec_key key;
- spinlock_t lock;
- u32 next_pn;
- refcount_t refcnt;
- bool active;
- struct macsec_tx_sa_stats __percpu *stats;
- struct rcu_head rcu;
-};
-
-struct pcpu_tx_sc_stats {
- struct macsec_tx_sc_stats stats;
- struct u64_stats_sync syncp;
-};
-
-/**
- * struct macsec_tx_sc - transmit secure channel
- * @active:
- * @encoding_sa: association number of the SA currently in use
- * @encrypt: encrypt packets on transmit, or authenticate only
- * @send_sci: always include the SCI in the SecTAG
- * @end_station:
- * @scb: single copy broadcast flag
- * @sa: array of secure associations
- * @stats: stats for this TXSC
- */
-struct macsec_tx_sc {
- bool active;
- u8 encoding_sa;
- bool encrypt;
- bool send_sci;
- bool end_station;
- bool scb;
- struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
- struct pcpu_tx_sc_stats __percpu *stats;
-};
-
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
-/**
- * struct macsec_secy - MACsec Security Entity
- * @netdev: netdevice for this SecY
- * @n_rx_sc: number of receive secure channels configured on this SecY
- * @sci: secure channel identifier used for tx
- * @key_len: length of keys used by the cipher suite
- * @icv_len: length of ICV used by the cipher suite
- * @validate_frames: validation mode
- * @operational: MAC_Operational flag
- * @protect_frames: enable protection for this SecY
- * @replay_protect: enable packet number checks on receive
- * @replay_window: size of the replay window
- * @tx_sc: transmit secure channel
- * @rx_sc: linked list of receive secure channels
- */
-struct macsec_secy {
- struct net_device *netdev;
- unsigned int n_rx_sc;
- sci_t sci;
- u16 key_len;
- u16 icv_len;
- enum macsec_validation_type validate_frames;
- bool operational;
- bool protect_frames;
- bool replay_protect;
- u32 replay_window;
- struct macsec_tx_sc tx_sc;
- struct macsec_rx_sc __rcu *rx_sc;
-};
-
struct pcpu_secy_stats {
struct macsec_dev_stats stats;
struct u64_stats_sync syncp;
@@ -260,6 +100,7 @@ struct pcpu_secy_stats {
* @real_dev: pointer to underlying netdevice
* @stats: MACsec device stats
* @secys: linked list of SecY's on the underlying device
+ * @offload: status of offloading on the MACsec device
*/
struct macsec_dev {
struct macsec_secy secy;
@@ -267,6 +108,7 @@ struct macsec_dev {
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
+ enum macsec_offload offload;
};
/**
@@ -480,6 +322,56 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
h->short_length = data_len;
}
+/* Checks if a MACsec interface is being offloaded to an hardware engine */
+static bool macsec_is_offloaded(struct macsec_dev *macsec)
+{
+ if (macsec->offload == MACSEC_OFFLOAD_PHY)
+ return true;
+
+ return false;
+}
+
+/* Checks if underlying layers implement MACsec offloading functions. */
+static bool macsec_check_offload(enum macsec_offload offload,
+ struct macsec_dev *macsec)
+{
+ if (!macsec || !macsec->real_dev)
+ return false;
+
+ if (offload == MACSEC_OFFLOAD_PHY)
+ return macsec->real_dev->phydev &&
+ macsec->real_dev->phydev->macsec_ops;
+
+ return false;
+}
+
+static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
+ struct macsec_dev *macsec,
+ struct macsec_context *ctx)
+{
+ if (ctx) {
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->offload = offload;
+
+ if (offload == MACSEC_OFFLOAD_PHY)
+ ctx->phydev = macsec->real_dev->phydev;
+ }
+
+ return macsec->real_dev->phydev->macsec_ops;
+}
+
+/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
+ * context device reference if provided.
+ */
+static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
+ struct macsec_context *ctx)
+{
+ if (!macsec_check_offload(macsec->offload, macsec))
+ return NULL;
+
+ return __macsec_get_ops(macsec->offload, macsec, ctx);
+}
+
/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
{
@@ -532,6 +424,23 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
+static void __macsec_pn_wrapped(struct macsec_secy *secy,
+ struct macsec_tx_sa *tx_sa)
+{
+ pr_debug("PN wrapped, transitioning to !oper\n");
+ tx_sa->active = false;
+ if (secy->protect_frames)
+ secy->operational = false;
+}
+
+void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
+{
+ spin_lock_bh(&tx_sa->lock);
+ __macsec_pn_wrapped(secy, tx_sa);
+ spin_unlock_bh(&tx_sa->lock);
+}
+EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
+
static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
{
u32 pn;
@@ -540,12 +449,8 @@ static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
pn = tx_sa->next_pn;
tx_sa->next_pn++;
- if (tx_sa->next_pn == 0) {
- pr_debug("PN wrapped, transitioning to !oper\n");
- tx_sa->active = false;
- if (secy->protect_frames)
- secy->operational = false;
- }
+ if (tx_sa->next_pn == 0)
+ __macsec_pn_wrapped(secy, tx_sa);
spin_unlock_bh(&tx_sa->lock);
return pn;
@@ -1029,8 +934,10 @@ static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
return NULL;
}
-static void handle_not_macsec(struct sk_buff *skb)
+static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
{
+ /* Deliver to the uncontrolled port by default */
+ enum rx_handler_result ret = RX_HANDLER_PASS;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
@@ -1045,7 +952,8 @@ static void handle_not_macsec(struct sk_buff *skb)
struct sk_buff *nskb;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
- if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ if (!macsec_is_offloaded(macsec) &&
+ macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
@@ -1064,9 +972,17 @@ static void handle_not_macsec(struct sk_buff *skb)
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
}
+
+ if (netif_running(macsec->secy.netdev) &&
+ macsec_is_offloaded(macsec)) {
+ ret = RX_HANDLER_EXACT;
+ goto out;
+ }
}
+out:
rcu_read_unlock();
+ return ret;
}
static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
@@ -1091,12 +1007,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
goto drop_direct;
hdr = macsec_ethhdr(skb);
- if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
- handle_not_macsec(skb);
-
- /* and deliver to the uncontrolled port */
- return RX_HANDLER_PASS;
- }
+ if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
+ return handle_not_macsec(skb);
skb = skb_unshare(skb, GFP_ATOMIC);
*pskb = skb;
@@ -1585,6 +1497,7 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
+ [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
};
static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
@@ -1602,6 +1515,44 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
.len = MACSEC_MAX_KEY_LEN, },
};
+static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
+ [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
+};
+
+/* Offloads an operation to a device driver */
+static int macsec_offload(int (* const func)(struct macsec_context *),
+ struct macsec_context *ctx)
+{
+ int ret;
+
+ if (unlikely(!func))
+ return 0;
+
+ if (ctx->offload == MACSEC_OFFLOAD_PHY)
+ mutex_lock(&ctx->phydev->lock);
+
+ /* Phase I: prepare. The drive should fail here if there are going to be
+ * issues in the commit phase.
+ */
+ ctx->prepare = true;
+ ret = (*func)(ctx);
+ if (ret)
+ goto phy_unlock;
+
+ /* Phase II: commit. This step cannot fail. */
+ ctx->prepare = false;
+ ret = (*func)(ctx);
+ /* This should never happen: commit is not allowed to fail */
+ if (unlikely(ret))
+ WARN(1, "MACsec offloading commit failed (%d)\n", ret);
+
+phy_unlock:
+ if (ctx->offload == MACSEC_OFFLOAD_PHY)
+ mutex_unlock(&ctx->phydev->lock);
+
+ return ret;
+}
+
static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
{
if (!attrs[MACSEC_ATTR_SA_CONFIG])
@@ -1717,13 +1668,40 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
- nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rx_sa->sc = rx_sc;
+
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+ memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ MACSEC_KEYID_LEN);
+
+ err = macsec_offload(ops->mdo_add_rxsa, &ctx);
+ if (err)
+ goto cleanup;
+ }
+
+ nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ kfree(rx_sa);
+ rtnl_unlock();
+ return err;
}
static bool validate_add_rxsc(struct nlattr **attrs)
@@ -1746,6 +1724,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct nlattr **attrs = info->attrs;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ bool was_active;
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1771,12 +1751,35 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(rx_sc);
}
+ was_active = rx_sc->active;
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+
+ ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ rx_sc->active = was_active;
+ rtnl_unlock();
+ return ret;
}
static bool validate_add_txsa(struct nlattr **attrs)
@@ -1813,6 +1816,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
unsigned char assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_operational;
int err;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -1863,8 +1867,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
return err;
}
- nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
-
spin_lock_bh(&tx_sa->lock);
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
@@ -1872,14 +1874,43 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ was_operational = secy->operational;
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+ memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ MACSEC_KEYID_LEN);
+
+ err = macsec_offload(ops->mdo_add_txsa, &ctx);
+ if (err)
+ goto cleanup;
+ }
+
+ nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ secy->operational = was_operational;
+ kfree(tx_sa);
+ rtnl_unlock();
+ return err;
}
static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
@@ -1892,6 +1923,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1915,12 +1947,35 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+
+ ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
clear_rx_sa(rx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
@@ -1931,6 +1986,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc;
sci_t sci;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1957,10 +2013,31 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
return -ENODEV;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+ ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
free_rx_sc(rx_sc);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
@@ -1972,6 +2049,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1992,12 +2070,35 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+
+ ret = macsec_offload(ops->mdo_del_txsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
clear_tx_sa(tx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static bool validate_upd_sa(struct nlattr **attrs)
@@ -2030,6 +2131,9 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_operational, was_active;
+ u32 prev_pn = 0;
+ int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2050,19 +2154,52 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&tx_sa->lock);
+ prev_pn = tx_sa->next_pn;
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
+ was_active = tx_sa->active;
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ was_operational = secy->operational;
if (assoc_num == tx_sc->encoding_sa)
secy->operational = tx_sa->active;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+
+ ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&tx_sa->lock);
+ tx_sa->next_pn = prev_pn;
+ spin_unlock_bh(&tx_sa->lock);
+ }
+ tx_sa->active = was_active;
+ secy->operational = was_operational;
+ rtnl_unlock();
+ return ret;
}
static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
@@ -2075,6 +2212,9 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_active;
+ u32 prev_pn = 0;
+ int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2098,15 +2238,46 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
+ prev_pn = rx_sa->next_pn;
rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
+ was_active = rx_sa->active;
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+
+ ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&rx_sa->lock);
+ rx_sa->next_pn = prev_pn;
+ spin_unlock_bh(&rx_sa->lock);
+ }
+ rx_sa->active = was_active;
+ rtnl_unlock();
+ return ret;
}
static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
@@ -2116,6 +2287,9 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ unsigned int prev_n_rx_sc;
+ bool was_active;
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2133,6 +2307,8 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(rx_sc);
}
+ was_active = rx_sc->active;
+ prev_n_rx_sc = secy->n_rx_sc;
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
@@ -2142,9 +2318,153 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
rx_sc->active = new;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+
+ ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
+ rtnl_unlock();
+
+ return 0;
+
+cleanup:
+ secy->n_rx_sc = prev_n_rx_sc;
+ rx_sc->active = was_active;
rtnl_unlock();
+ return ret;
+}
+
+static bool macsec_is_configured(struct macsec_dev *macsec)
+{
+ struct macsec_secy *secy = &macsec->secy;
+ struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ int i;
+
+ if (secy->n_rx_sc > 0)
+ return true;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++)
+ if (tx_sc->sa[i])
+ return true;
+
+ return false;
+}
+
+static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
+ enum macsec_offload offload, prev_offload;
+ int (*func)(struct macsec_context *ctx);
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev, *loop_dev;
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ struct macsec_dev *macsec;
+ struct net *loop_net;
+ int ret;
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (!attrs[MACSEC_ATTR_OFFLOAD])
+ return -EINVAL;
+
+ if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
+ attrs[MACSEC_ATTR_OFFLOAD],
+ macsec_genl_offload_policy, NULL))
+ return -EINVAL;
+
+ dev = get_dev_from_nl(genl_info_net(info), attrs);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ macsec = macsec_priv(dev);
+
+ offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
+ if (macsec->offload == offload)
+ return 0;
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(offload, macsec))
+ return -EOPNOTSUPP;
+
+ if (offload == MACSEC_OFFLOAD_OFF)
+ goto skip_limitation;
+ /* Check the physical interface isn't offloading another interface
+ * first.
+ */
+ for_each_net(loop_net) {
+ for_each_netdev(loop_net, loop_dev) {
+ struct macsec_dev *priv;
+
+ if (!netif_is_macsec(loop_dev))
+ continue;
+
+ priv = macsec_priv(loop_dev);
+
+ if (priv->real_dev == macsec->real_dev &&
+ priv->offload != MACSEC_OFFLOAD_OFF)
+ return -EBUSY;
+ }
+ }
+
+skip_limitation:
+ /* Check if the net device is busy. */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ rtnl_lock();
+
+ prev_offload = macsec->offload;
+ macsec->offload = offload;
+
+ /* Check if the device already has rules configured: we do not support
+ * rules migration.
+ */
+ if (macsec_is_configured(macsec)) {
+ ret = -EBUSY;
+ goto rollback;
+ }
+
+ ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
+ macsec, &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto rollback;
+ }
+
+ if (prev_offload == MACSEC_OFFLOAD_OFF)
+ func = ops->mdo_add_secy;
+ else
+ func = ops->mdo_del_secy;
+
+ ctx.secy = &macsec->secy;
+ ret = macsec_offload(func, &ctx);
+ if (ret)
+ goto rollback;
+
+ rtnl_unlock();
return 0;
+
+rollback:
+ macsec->offload = prev_offload;
+
+ rtnl_unlock();
+ return ret;
}
static int copy_tx_sa_stats(struct sk_buff *skb,
@@ -2408,12 +2728,13 @@ static noinline_for_stack int
dump_secy(struct macsec_secy *secy, struct net_device *dev,
struct sk_buff *skb, struct netlink_callback *cb)
{
- struct macsec_rx_sc *rx_sc;
+ struct macsec_dev *macsec = netdev_priv(dev);
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *txsa_list, *rxsc_list;
- int i, j;
- void *hdr;
+ struct macsec_rx_sc *rx_sc;
struct nlattr *attr;
+ void *hdr;
+ int i, j;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
@@ -2425,6 +2746,13 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
+ attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
+ if (!attr)
+ goto nla_put_failure;
+ if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
+ goto nla_put_failure;
+ nla_nest_end(skb, attr);
+
if (nla_put_secy(secy, skb))
goto nla_put_failure;
@@ -2690,6 +3018,12 @@ static const struct genl_ops macsec_genl_ops[] = {
.doit = macsec_upd_rxsa,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = MACSEC_CMD_UPD_OFFLOAD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = macsec_upd_offload,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family macsec_fam __ro_after_init = {
@@ -2712,6 +3046,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
struct pcpu_secy_stats *secy_stats;
int ret, len;
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ skb->dev = macsec->real_dev;
+ return dev_queue_xmit(skb);
+ }
+
/* 10.5 */
if (!secy->protect_frames) {
secy_stats = this_cpu_ptr(macsec->stats);
@@ -2825,6 +3164,22 @@ static int macsec_dev_open(struct net_device *dev)
goto clear_allmulti;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto clear_allmulti;
+ }
+
+ err = macsec_offload(ops->mdo_dev_open, &ctx);
+ if (err)
+ goto clear_allmulti;
+ }
+
if (netif_carrier_ok(real_dev))
netif_carrier_on(dev);
@@ -2845,6 +3200,16 @@ static int macsec_dev_stop(struct net_device *dev)
netif_carrier_off(dev);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops)
+ macsec_offload(ops->mdo_dev_stop, &ctx);
+ }
+
dev_mc_unsync(real_dev, dev);
dev_uc_unsync(real_dev, dev);
@@ -3076,6 +3441,11 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct macsec_tx_sa tx_sc;
+ struct macsec_secy secy;
+ int ret;
+
if (!data)
return 0;
@@ -3085,7 +3455,41 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
data[IFLA_MACSEC_PORT])
return -EINVAL;
- return macsec_changelink_common(dev, data);
+ /* Keep a copy of unmodified secy and tx_sc, in case the offload
+ * propagation fails, to revert macsec_changelink_common.
+ */
+ memcpy(&secy, &macsec->secy, sizeof(secy));
+ memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
+
+ ret = macsec_changelink_common(dev, data);
+ if (ret)
+ return ret;
+
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ int ret;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.secy = &macsec->secy;
+ ret = macsec_offload(ops->mdo_upd_secy, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
+ memcpy(&macsec->secy, &secy, sizeof(secy));
+
+ return ret;
}
static void macsec_del_dev(struct macsec_dev *macsec)
@@ -3128,6 +3532,18 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ macsec_offload(ops->mdo_del_secy, &ctx);
+ }
+ }
+
macsec_common_dellink(dev, head);
if (list_empty(&rxd->secys)) {
@@ -3239,6 +3655,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
macsec->real_dev = real_dev;
+ /* MACsec offloading is off by default */
+ macsec->offload = MACSEC_OFFLOAD_OFF;
+
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 747c0542a53c..81aa7adf4801 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -259,7 +259,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct net_device *src,
enum macvlan_mode mode)
{
- const struct ethhdr *eth = skb_eth_hdr(skb);
+ const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
struct sk_buff *nskb;
unsigned int i;
@@ -513,10 +513,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *dest;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
- const struct ethhdr *eth = (void *)skb->data;
+ const struct ethhdr *eth = skb_eth_hdr(skb);
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
goto xmit_world;
}
@@ -1036,8 +1037,8 @@ static int macvlan_ethtool_get_ts_info(struct net_device *dev,
const struct ethtool_ops *ops = real_dev->ethtool_ops;
struct phy_device *phydev = real_dev->phydev;
- if (phydev && phydev->drv && phydev->drv->ts_info) {
- return phydev->drv->ts_info(phydev, info);
+ if (phy_has_tsinfo(phydev)) {
+ return phy_ts_info(phydev, info);
} else if (ops->get_ts_info) {
return ops->get_ts_info(real_dev, info);
} else {
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 059711edfc61..b53fbc06e104 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -53,7 +53,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file,
get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
- id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev));
+ id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev));
err = devlink_region_snapshot_create(nsim_dev->dummy_region,
dummy_data, id, kfree);
if (err) {
@@ -270,7 +270,7 @@ struct nsim_trap_data {
};
/* All driver-specific traps must be documented in
- * Documentation/networking/devlink-trap-netdevsim.rst
+ * Documentation/networking/devlink/netdevsim.rst
*/
enum {
NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 13540dee7364..f32d56ac3e80 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -14,6 +14,12 @@
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
+#include <linux/in6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
#include <net/fib_notifier.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
@@ -36,6 +42,48 @@ struct nsim_fib_data {
struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
+ struct rhashtable fib_rt_ht;
+ struct list_head fib_rt_list;
+ spinlock_t fib_lock; /* Protects hashtable, list and accounting */
+ struct devlink *devlink;
+};
+
+struct nsim_fib_rt_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ unsigned char prefix_len;
+ int family;
+ u32 tb_id;
+};
+
+struct nsim_fib_rt {
+ struct nsim_fib_rt_key key;
+ struct rhash_head ht_node;
+ struct list_head list; /* Member of fib_rt_list */
+};
+
+struct nsim_fib4_rt {
+ struct nsim_fib_rt common;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+};
+
+struct nsim_fib6_rt {
+ struct nsim_fib_rt common;
+ struct list_head nh_list;
+ unsigned int nhs;
+};
+
+struct nsim_fib6_rt_nh {
+ struct list_head list; /* Member of nh_list */
+ struct fib6_info *rt;
+};
+
+static const struct rhashtable_params nsim_fib_rt_ht_params = {
+ .key_offset = offsetof(struct nsim_fib_rt, key),
+ .head_offset = offsetof(struct nsim_fib_rt, ht_node),
+ .key_len = sizeof(struct nsim_fib_rt_key),
+ .automatic_shrinking = true,
};
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
@@ -144,18 +192,556 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
+static void nsim_fib_rt_init(struct nsim_fib_data *data,
+ struct nsim_fib_rt *fib_rt, const void *addr,
+ size_t addr_len, unsigned int prefix_len,
+ int family, u32 tb_id)
+{
+ memcpy(fib_rt->key.addr, addr, addr_len);
+ fib_rt->key.prefix_len = prefix_len;
+ fib_rt->key.family = family;
+ fib_rt->key.tb_id = tb_id;
+ list_add(&fib_rt->list, &data->fib_rt_list);
+}
+
+static void nsim_fib_rt_fini(struct nsim_fib_rt *fib_rt)
+{
+ list_del(&fib_rt->list);
+}
+
+static struct nsim_fib_rt *nsim_fib_rt_lookup(struct rhashtable *fib_rt_ht,
+ const void *addr, size_t addr_len,
+ unsigned int prefix_len,
+ int family, u32 tb_id)
+{
+ struct nsim_fib_rt_key key;
+
+ memset(&key, 0, sizeof(key));
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ key.family = family;
+ key.tb_id = tb_id;
+
+ return rhashtable_lookup_fast(fib_rt_ht, &key, nsim_fib_rt_ht_params);
+}
+
+static struct nsim_fib4_rt *
+nsim_fib4_rt_create(struct nsim_fib_data *data,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = kzalloc(sizeof(*fib4_rt), GFP_ATOMIC);
+ if (!fib4_rt)
+ return NULL;
+
+ nsim_fib_rt_init(data, &fib4_rt->common, &fen_info->dst, sizeof(u32),
+ fen_info->dst_len, AF_INET, fen_info->tb_id);
+
+ fib4_rt->fi = fen_info->fi;
+ fib_info_hold(fib4_rt->fi);
+ fib4_rt->tos = fen_info->tos;
+ fib4_rt->type = fen_info->type;
+
+ return fib4_rt;
+}
+
+static void nsim_fib4_rt_destroy(struct nsim_fib4_rt *fib4_rt)
+{
+ fib_info_put(fib4_rt->fi);
+ nsim_fib_rt_fini(&fib4_rt->common);
+ kfree(fib4_rt);
+}
+
+static struct nsim_fib4_rt *
+nsim_fib4_rt_lookup(struct rhashtable *fib_rt_ht,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct nsim_fib_rt *fib_rt;
+
+ fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &fen_info->dst, sizeof(u32),
+ fen_info->dst_len, AF_INET,
+ fen_info->tb_id);
+ if (!fib_rt)
+ return NULL;
+
+ return container_of(fib_rt, struct nsim_fib4_rt, common);
+}
+
+static void nsim_fib4_rt_hw_flags_set(struct net *net,
+ const struct nsim_fib4_rt *fib4_rt,
+ bool trap)
+{
+ u32 *p_dst = (u32 *) fib4_rt->common.key.addr;
+ int dst_len = fib4_rt->common.key.prefix_len;
+ struct fib_rt_info fri;
+
+ fri.fi = fib4_rt->fi;
+ fri.tb_id = fib4_rt->common.key.tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_rt->tos;
+ fri.type = fib4_rt->type;
+ fri.offload = false;
+ fri.trap = trap;
+ fib_alias_hw_flags_set(net, &fri);
+}
+
+static int nsim_fib4_rt_add(struct nsim_fib_data *data,
+ struct nsim_fib4_rt *fib4_rt,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ err = nsim_fib_account(&data->ipv4.fib, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->fib_rt_ht,
+ &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv4 route");
+ goto err_fib_dismiss;
+ }
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
+
+ return 0;
+
+err_fib_dismiss:
+ nsim_fib_account(&data->ipv4.fib, false, extack);
+ return err;
+}
+
+static int nsim_fib4_rt_replace(struct nsim_fib_data *data,
+ struct nsim_fib4_rt *fib4_rt,
+ struct nsim_fib4_rt *fib4_rt_old,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ /* We are replacing a route, so no need to change the accounting. */
+ err = rhashtable_replace_fast(&data->fib_rt_ht,
+ &fib4_rt_old->common.ht_node,
+ &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv4 route");
+ return err;
+ }
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt_old, false);
+ nsim_fib4_rt_destroy(fib4_rt_old);
+
+ return 0;
+}
+
+static int nsim_fib4_rt_insert(struct nsim_fib_data *data,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct netlink_ext_ack *extack = fen_info->info.extack;
+ struct nsim_fib4_rt *fib4_rt, *fib4_rt_old;
+ int err;
+
+ fib4_rt = nsim_fib4_rt_create(data, fen_info);
+ if (!fib4_rt)
+ return -ENOMEM;
+
+ fib4_rt_old = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
+ if (!fib4_rt_old)
+ err = nsim_fib4_rt_add(data, fib4_rt, extack);
+ else
+ err = nsim_fib4_rt_replace(data, fib4_rt, fib4_rt_old, extack);
+
+ if (err)
+ nsim_fib4_rt_destroy(fib4_rt);
+
+ return err;
+}
+
+static void nsim_fib4_rt_remove(struct nsim_fib_data *data,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct netlink_ext_ack *extack = fen_info->info.extack;
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
+ if (WARN_ON_ONCE(!fib4_rt))
+ return;
+
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_account(&data->ipv4.fib, false, extack);
+ nsim_fib4_rt_destroy(fib4_rt);
+}
+
+static int nsim_fib4_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct fib_entry_notifier_info *fen_info;
+ int err = 0;
+
+ fen_info = container_of(info, struct fib_entry_notifier_info, info);
+
+ if (fen_info->fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return 0;
+ }
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib4_rt_insert(data, fen_info);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib4_rt_remove(data, fen_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static struct nsim_fib6_rt_nh *
+nsim_fib6_rt_nh_find(const struct nsim_fib6_rt *fib6_rt,
+ const struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list) {
+ if (fib6_rt_nh->rt == rt)
+ return fib6_rt_nh;
+ }
+
+ return NULL;
+}
+
+static int nsim_fib6_rt_nh_add(struct nsim_fib6_rt *fib6_rt,
+ struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ fib6_rt_nh = kzalloc(sizeof(*fib6_rt_nh), GFP_ATOMIC);
+ if (!fib6_rt_nh)
+ return -ENOMEM;
+
+ fib6_info_hold(rt);
+ fib6_rt_nh->rt = rt;
+ list_add_tail(&fib6_rt_nh->list, &fib6_rt->nh_list);
+ fib6_rt->nhs++;
+
+ return 0;
+}
+
+static void nsim_fib6_rt_nh_del(struct nsim_fib6_rt *fib6_rt,
+ const struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ fib6_rt_nh = nsim_fib6_rt_nh_find(fib6_rt, rt);
+ if (WARN_ON_ONCE(!fib6_rt_nh))
+ return;
+
+ fib6_rt->nhs--;
+ list_del(&fib6_rt_nh->list);
+#if IS_ENABLED(CONFIG_IPV6)
+ fib6_info_release(fib6_rt_nh->rt);
+#endif
+ kfree(fib6_rt_nh);
+}
+
+static struct nsim_fib6_rt *
+nsim_fib6_rt_create(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *iter, *rt = fen6_info->rt;
+ struct nsim_fib6_rt *fib6_rt;
+ int i = 0;
+ int err;
+
+ fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_ATOMIC);
+ if (!fib6_rt)
+ return ERR_PTR(-ENOMEM);
+
+ nsim_fib_rt_init(data, &fib6_rt->common, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6,
+ rt->fib6_table->tb6_id);
+
+ /* We consider a multipath IPv6 route as one entry, but it can be made
+ * up from several fib6_info structs (one for each nexthop), so we
+ * add them all to the same list under the entry.
+ */
+ INIT_LIST_HEAD(&fib6_rt->nh_list);
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt);
+ if (err)
+ goto err_fib_rt_fini;
+
+ if (!fen6_info->nsiblings)
+ return fib6_rt;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ if (err)
+ goto err_fib6_rt_nh_del;
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return fib6_rt;
+
+err_fib6_rt_nh_del:
+ list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
+ fib6_siblings)
+ nsim_fib6_rt_nh_del(fib6_rt, iter);
+ nsim_fib6_rt_nh_del(fib6_rt, rt);
+err_fib_rt_fini:
+ nsim_fib_rt_fini(&fib6_rt->common);
+ kfree(fib6_rt);
+ return ERR_PTR(err);
+}
+
+static void nsim_fib6_rt_destroy(struct nsim_fib6_rt *fib6_rt)
+{
+ struct nsim_fib6_rt_nh *iter, *tmp;
+
+ list_for_each_entry_safe(iter, tmp, &fib6_rt->nh_list, list)
+ nsim_fib6_rt_nh_del(fib6_rt, iter->rt);
+ WARN_ON_ONCE(!list_empty(&fib6_rt->nh_list));
+ nsim_fib_rt_fini(&fib6_rt->common);
+ kfree(fib6_rt);
+}
+
+static struct nsim_fib6_rt *
+nsim_fib6_rt_lookup(struct rhashtable *fib_rt_ht, const struct fib6_info *rt)
+{
+ struct nsim_fib_rt *fib_rt;
+
+ fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen, AF_INET6,
+ rt->fib6_table->tb6_id);
+ if (!fib_rt)
+ return NULL;
+
+ return container_of(fib_rt, struct nsim_fib6_rt, common);
+}
+
+static int nsim_fib6_rt_append(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *iter, *rt = fen6_info->rt;
+ struct nsim_fib6_rt *fib6_rt;
+ int i = 0;
+ int err;
+
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
+ if (WARN_ON_ONCE(!fib6_rt))
+ return -EINVAL;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt);
+ if (err)
+ return err;
+ rt->trap = true;
+
+ if (!fen6_info->nsiblings)
+ return 0;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ if (err)
+ goto err_fib6_rt_nh_del;
+ iter->trap = true;
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return 0;
+
+err_fib6_rt_nh_del:
+ list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
+ fib6_siblings) {
+ iter->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, iter);
+ }
+ rt->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, rt);
+ return err;
+}
+
+static void nsim_fib6_rt_hw_flags_set(const struct nsim_fib6_rt *fib6_rt,
+ bool trap)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list)
+ fib6_info_hw_flags_set(fib6_rt_nh->rt, false, trap);
+}
+
+static int nsim_fib6_rt_add(struct nsim_fib_data *data,
+ struct nsim_fib6_rt *fib6_rt,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = nsim_fib_account(&data->ipv6.fib, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->fib_rt_ht,
+ &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv6 route");
+ goto err_fib_dismiss;
+ }
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+
+ return 0;
+
+err_fib_dismiss:
+ nsim_fib_account(&data->ipv6.fib, false, extack);
+ return err;
+}
+
+static int nsim_fib6_rt_replace(struct nsim_fib_data *data,
+ struct nsim_fib6_rt *fib6_rt,
+ struct nsim_fib6_rt *fib6_rt_old,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* We are replacing a route, so no need to change the accounting. */
+ err = rhashtable_replace_fast(&data->fib_rt_ht,
+ &fib6_rt_old->common.ht_node,
+ &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv6 route");
+ return err;
+ }
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt_old, false);
+ nsim_fib6_rt_destroy(fib6_rt_old);
+
+ return 0;
+}
+
+static int nsim_fib6_rt_insert(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct nsim_fib6_rt *fib6_rt, *fib6_rt_old;
+ int err;
+
+ fib6_rt = nsim_fib6_rt_create(data, fen6_info);
+ if (IS_ERR(fib6_rt))
+ return PTR_ERR(fib6_rt);
+
+ fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ if (!fib6_rt_old)
+ err = nsim_fib6_rt_add(data, fib6_rt, extack);
+ else
+ err = nsim_fib6_rt_replace(data, fib6_rt, fib6_rt_old, extack);
+
+ if (err)
+ nsim_fib6_rt_destroy(fib6_rt);
+
+ return err;
+}
+
+static void
+nsim_fib6_rt_remove(struct nsim_fib_data *data,
+ const struct fib6_entry_notifier_info *fen6_info)
+{
+ struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct nsim_fib6_rt *fib6_rt;
+
+ /* Multipath routes are first added to the FIB trie and only then
+ * notified. If we vetoed the addition, we will get a delete
+ * notification for a route we do not have. Therefore, do not warn if
+ * route was not found.
+ */
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ if (!fib6_rt)
+ return;
+
+ /* If not all the nexthops are deleted, then only reduce the nexthop
+ * group.
+ */
+ if (fen6_info->nsiblings + 1 != fib6_rt->nhs) {
+ nsim_fib6_rt_nh_del(fib6_rt, fen6_info->rt);
+ return;
+ }
+
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_account(&data->ipv6.fib, false, extack);
+ nsim_fib6_rt_destroy(fib6_rt);
+}
+
+static int nsim_fib6_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ int err = 0;
+
+ fen6_info = container_of(info, struct fib6_entry_notifier_info, info);
+
+ if (fen6_info->rt->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
+ return 0;
+ }
+
+ if (fen6_info->rt->fib6_src.plen) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 source-specific route is not supported");
+ return 0;
+ }
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib6_rt_insert(data, fen6_info);
+ break;
+ case FIB_EVENT_ENTRY_APPEND:
+ err = nsim_fib6_rt_append(data, fen6_info);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib6_rt_remove(data, fen6_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
static int nsim_fib_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info, bool add)
+ struct fib_notifier_info *info, unsigned long event)
{
- struct netlink_ext_ack *extack = info->extack;
int err = 0;
switch (info->family) {
case AF_INET:
- err = nsim_fib_account(&data->ipv4.fib, add, extack);
+ err = nsim_fib4_event(data, info, event);
break;
case AF_INET6:
- err = nsim_fib_account(&data->ipv6.fib, add, extack);
+ err = nsim_fib6_event(data, info, event);
break;
}
@@ -170,6 +756,9 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
struct fib_notifier_info *info = ptr;
int err = 0;
+ /* IPv6 routes can be added via RAs from softIRQ. */
+ spin_lock_bh(&data->fib_lock);
+
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
@@ -177,25 +766,75 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
event == FIB_EVENT_RULE_ADD);
break;
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(data, info,
- event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(data, info, event);
break;
}
+ spin_unlock_bh(&data->fib_lock);
+
return notifier_from_errno(err);
}
+static void nsim_fib4_rt_free(struct nsim_fib_rt *fib_rt,
+ struct nsim_fib_data *data)
+{
+ struct devlink *devlink = data->devlink;
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = container_of(fib_rt, struct nsim_fib4_rt, common);
+ nsim_fib4_rt_hw_flags_set(devlink_net(devlink), fib4_rt, false);
+ nsim_fib_account(&data->ipv4.fib, false, NULL);
+ nsim_fib4_rt_destroy(fib4_rt);
+}
+
+static void nsim_fib6_rt_free(struct nsim_fib_rt *fib_rt,
+ struct nsim_fib_data *data)
+{
+ struct nsim_fib6_rt *fib6_rt;
+
+ fib6_rt = container_of(fib_rt, struct nsim_fib6_rt, common);
+ nsim_fib6_rt_hw_flags_set(fib6_rt, false);
+ nsim_fib_account(&data->ipv6.fib, false, NULL);
+ nsim_fib6_rt_destroy(fib6_rt);
+}
+
+static void nsim_fib_rt_free(void *ptr, void *arg)
+{
+ struct nsim_fib_rt *fib_rt = ptr;
+ struct nsim_fib_data *data = arg;
+
+ switch (fib_rt->key.family) {
+ case AF_INET:
+ nsim_fib4_rt_free(fib_rt, data);
+ break;
+ case AF_INET6:
+ nsim_fib6_rt_free(fib_rt, data);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
fib_nb);
+ struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
+
+ /* The notifier block is still not registered, so we do not need to
+ * take any locks here.
+ */
+ list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_rt_free(fib_rt, data);
+ }
- data->ipv4.fib.num = 0ULL;
data->ipv4.rules.num = 0ULL;
- data->ipv6.fib.num = 0ULL;
data->ipv6.rules.num = 0ULL;
}
@@ -256,6 +895,13 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
+ data->devlink = devlink;
+
+ spin_lock_init(&data->fib_lock);
+ INIT_LIST_HEAD(&data->fib_rt_list);
+ err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params);
+ if (err)
+ goto err_data_free;
nsim_fib_set_max_all(data, devlink);
@@ -264,7 +910,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
nsim_fib_dump_inconsistent, extack);
if (err) {
pr_err("Failed to register fib notifier\n");
- goto err_out;
+ goto err_rhashtable_destroy;
}
devlink_resource_occ_get_register(devlink,
@@ -285,7 +931,10 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
data);
return data;
-err_out:
+err_rhashtable_destroy:
+ rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
+ data);
+err_data_free:
kfree(data);
return ERR_PTR(err);
}
@@ -301,5 +950,8 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
+ data);
+ WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
kfree(data);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 5848219005d7..9dabe03a668c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -38,6 +38,7 @@ config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller"
depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_IOMEM && OF_MDIO
+ default ARCH_BCM_IPROC
help
This module provides a driver for the MDIO busses found in the
Broadcom iProc SoC's.
@@ -324,6 +325,12 @@ config BROADCOM_PHY
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
BCM5481, BCM54810 and BCM5482 PHYs.
+config BCM84881_PHY
+ bool "Broadcom BCM84881 PHY"
+ depends on PHYLIB=y
+ ---help---
+ Support the Broadcom BCM84881 PHY.
+
config CICADA_PHY
tristate "Cicada PHYs"
---help---
@@ -340,14 +347,15 @@ config DAVICOM_PHY
Currently supports dm9161e and dm9131
config DP83822_PHY
- tristate "Texas Instruments DP83822 PHY"
+ tristate "Texas Instruments DP83822/825/826 PHYs"
---help---
- Supports the DP83822 PHY.
+ Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
+ DP83826C and DP83826NC PHYs.
config DP83TC811_PHY
- tristate "Texas Instruments DP83TC822 PHY"
+ tristate "Texas Instruments DP83TC811 PHY"
---help---
- Supports the DP83TC822 PHY.
+ Supports the DP83TC811 PHY.
config DP83848_PHY
tristate "Texas Instruments DP83848 PHY"
@@ -431,6 +439,9 @@ config MICROCHIP_T1_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
+ depends on MACSEC || MACSEC=n
+ select CRYPTO_AES
+ select CRYPTO_ECB
---help---
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b433ec3bf9a6..fe5badf13b65 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -43,6 +43,8 @@ obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
+
obj-$(CONFIG_SFP) += sfp.o
sfp-obj-$(CONFIG_SFP) += sfp-bus.o
obj-y += $(sfp-obj-y) $(sfp-obj-m)
@@ -62,6 +64,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index cf5a391c93e6..c7eabe4382fb 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -145,7 +145,7 @@ struct adin_clause45_mmd_map {
u16 adin_regnum;
};
-static struct adin_clause45_mmd_map adin_clause45_mmd_map[] = {
+static const struct adin_clause45_mmd_map adin_clause45_mmd_map[] = {
{ MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE, ADIN1300_EEE_CAP_REG },
{ MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, ADIN1300_EEE_LPABLE_REG },
{ MDIO_MMD_AN, MDIO_AN_EEE_ADV, ADIN1300_EEE_ADV_REG },
@@ -159,7 +159,7 @@ struct adin_hw_stat {
u16 reg2;
};
-static struct adin_hw_stat adin_hw_stats[] = {
+static const struct adin_hw_stat adin_hw_stats[] = {
{ "total_frames_checked_count", 0x940A, 0x940B }, /* hi + lo */
{ "length_error_frames_count", 0x940C },
{ "alignment_error_frames_count", 0x940D },
@@ -456,7 +456,7 @@ static int adin_phy_config_intr(struct phy_device *phydev)
static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad,
u16 cl45_regnum)
{
- struct adin_clause45_mmd_map *m;
+ const struct adin_clause45_mmd_map *m;
int i;
if (devad == MDIO_MMD_VEND1)
@@ -625,7 +625,7 @@ static int adin_soft_reset(struct phy_device *phydev)
if (rc < 0)
return rc;
- msleep(10);
+ msleep(20);
/* If we get a read error something may be wrong */
rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
@@ -650,7 +650,7 @@ static void adin_get_strings(struct phy_device *phydev, u8 *data)
}
static int adin_read_mmd_stat_regs(struct phy_device *phydev,
- struct adin_hw_stat *stat,
+ const struct adin_hw_stat *stat,
u32 *val)
{
int ret;
@@ -676,7 +676,7 @@ static int adin_read_mmd_stat_regs(struct phy_device *phydev,
static u64 adin_get_stat(struct phy_device *phydev, int i)
{
- struct adin_hw_stat *stat = &adin_hw_stats[i];
+ const struct adin_hw_stat *stat = &adin_hw_stats[i];
struct adin_priv *priv = phydev->priv;
u32 val;
int ret;
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 975789d9349d..31927b2c7d5a 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -358,9 +358,11 @@ static int aqr107_read_status(struct phy_device *phydev)
switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
phydev->interface = PHY_INTERFACE_MODE_USXGMII;
break;
@@ -493,7 +495,8 @@ static int aqr107_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
- phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ phydev->interface != PHY_INTERFACE_MODE_10GKR &&
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
new file mode 100644
index 000000000000..14d55a77eb28
--- /dev/null
+++ b/drivers/net/phy/bcm84881.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+// Broadcom BCM84881 NBASE-T PHY driver, as found on a SFP+ module.
+// Copyright (C) 2019 Russell King, Deep Blue Solutions Ltd.
+//
+// Like the Marvell 88x3310, the Broadcom 84881 changes its host-side
+// interface according to the operating speed between 10GBASE-R,
+// 2500BASE-X and SGMII (but unlike the 88x3310, without the control
+// word).
+//
+// This driver only supports those aspects of the PHY that I'm able to
+// observe and test with the SFP+ module, which is an incomplete subset
+// of what this PHY is able to support. For example, I only assume it
+// supports a single lane Serdes connection, but it may be that the PHY
+// is able to support more than that.
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+enum {
+ MDIO_AN_C22 = 0xffe0,
+};
+
+static int bcm84881_wait_init(struct phy_device *phydev)
+{
+ unsigned int tries = 20;
+ int ret, val;
+
+ do {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+ if (val < 0) {
+ ret = val;
+ break;
+ }
+ if (!(val & MDIO_CTRL1_RESET)) {
+ ret = 0;
+ break;
+ }
+ if (!--tries) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ msleep(100);
+ } while (1);
+
+ if (ret)
+ phydev_err(phydev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int bcm84881_config_init(struct phy_device *phydev)
+{
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GBASER:
+ break;
+ default:
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int bcm84881_probe(struct phy_device *phydev)
+{
+ /* This driver requires PMAPMD and AN blocks */
+ const u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
+
+ if (!phydev->is_c45 ||
+ (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int bcm84881_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* Although the PHY sets bit 1.11.8, it does not support 10M modes */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int bcm84881_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 adv;
+ int ret;
+
+ /* Wait for the PHY to finish initialising, otherwise our
+ * advertisement may be overwritten.
+ */
+ ret = bcm84881_wait_init(phydev);
+ if (ret)
+ return ret;
+
+ /* We don't support manual MDI control */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ /* disabled autoneg doesn't seem to work with this PHY */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_C22 + MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
+ adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int bcm84881_aneg_done(struct phy_device *phydev)
+{
+ int bmsr, val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
+ if (bmsr < 0)
+ return val;
+
+ return !!(val & MDIO_AN_STAT1_COMPLETE) &&
+ !!(bmsr & BMSR_ANEGCOMPLETE);
+}
+
+static int bcm84881_read_status(struct phy_device *phydev)
+{
+ unsigned int mode;
+ int bmsr, val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_AN_CTRL1_RESTART) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
+ if (bmsr < 0)
+ return val;
+
+ phydev->autoneg_complete = !!(val & MDIO_AN_STAT1_COMPLETE) &&
+ !!(bmsr & BMSR_ANEGCOMPLETE);
+ phydev->link = !!(val & MDIO_STAT1_LSTATUS) &&
+ !!(bmsr & BMSR_LSTATUS);
+ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
+ phydev->link = false;
+
+ if (!phydev->link)
+ return 0;
+
+ linkmode_zero(phydev->lp_advertising);
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->mdix = 0;
+
+ if (phydev->autoneg_complete) {
+ val = genphy_c45_read_lpa(phydev);
+ if (val < 0)
+ return val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_C22 + MII_STAT1000);
+ if (val < 0)
+ return val;
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ phy_resolve_aneg_linkmode(phydev);
+ }
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ /* disabled autoneg doesn't seem to work, so force the link
+ * down.
+ */
+ phydev->link = 0;
+ return 0;
+ }
+
+ /* Set the host link mode - we set the phy interface mode and
+ * the speed according to this register so that downshift works.
+ * We leave the duplex setting as per the resolution from the
+ * above.
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, 0x4011);
+ mode = (val & 0x1e) >> 1;
+ if (mode == 1 || mode == 2)
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ else if (mode == 3)
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ else if (mode == 4)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ switch (mode & 7) {
+ case 1:
+ phydev->speed = SPEED_100;
+ break;
+ case 2:
+ phydev->speed = SPEED_1000;
+ break;
+ case 3:
+ phydev->speed = SPEED_10000;
+ break;
+ case 4:
+ phydev->speed = SPEED_2500;
+ break;
+ case 5:
+ phydev->speed = SPEED_5000;
+ break;
+ }
+
+ return genphy_c45_read_mdix(phydev);
+}
+
+static struct phy_driver bcm84881_drivers[] = {
+ {
+ .phy_id = 0xae025150,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM84881",
+ .config_init = bcm84881_config_init,
+ .probe = bcm84881_probe,
+ .get_features = bcm84881_get_features,
+ .config_aneg = bcm84881_config_aneg,
+ .aneg_done = bcm84881_aneg_done,
+ .read_status = bcm84881_read_status,
+ },
+};
+
+module_phy_driver(bcm84881_drivers);
+
+/* FIXME: module auto-loading for Clause 45 PHYs seems non-functional */
+static struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
+ { 0xae025150, 0xfffffff0 },
+ { },
+};
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Broadcom BCM84881 PHY driver");
+MODULE_DEVICE_TABLE(mdio, bcm84881_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 8f241b57fcf6..ac72a324fcd1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -98,6 +98,7 @@ struct dp83640_private {
struct list_head list;
struct dp83640_clock *clock;
struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
struct delayed_work ts_work;
int hwts_tx_en;
int hwts_rx_en;
@@ -1131,96 +1132,6 @@ static void dp83640_clock_put(struct dp83640_clock *clock)
mutex_unlock(&clock->clock_lock);
}
-static int dp83640_probe(struct phy_device *phydev)
-{
- struct dp83640_clock *clock;
- struct dp83640_private *dp83640;
- int err = -ENOMEM, i;
-
- if (phydev->mdio.addr == BROADCAST_ADDR)
- return 0;
-
- clock = dp83640_clock_get_bus(phydev->mdio.bus);
- if (!clock)
- goto no_clock;
-
- dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
- if (!dp83640)
- goto no_memory;
-
- dp83640->phydev = phydev;
- INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
-
- INIT_LIST_HEAD(&dp83640->rxts);
- INIT_LIST_HEAD(&dp83640->rxpool);
- for (i = 0; i < MAX_RXTS; i++)
- list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
-
- phydev->priv = dp83640;
-
- spin_lock_init(&dp83640->rx_lock);
- skb_queue_head_init(&dp83640->rx_queue);
- skb_queue_head_init(&dp83640->tx_queue);
-
- dp83640->clock = clock;
-
- if (choose_this_phy(clock, phydev)) {
- clock->chosen = dp83640;
- clock->ptp_clock = ptp_clock_register(&clock->caps,
- &phydev->mdio.dev);
- if (IS_ERR(clock->ptp_clock)) {
- err = PTR_ERR(clock->ptp_clock);
- goto no_register;
- }
- } else
- list_add_tail(&dp83640->list, &clock->phylist);
-
- dp83640_clock_put(clock);
- return 0;
-
-no_register:
- clock->chosen = NULL;
- kfree(dp83640);
-no_memory:
- dp83640_clock_put(clock);
-no_clock:
- return err;
-}
-
-static void dp83640_remove(struct phy_device *phydev)
-{
- struct dp83640_clock *clock;
- struct list_head *this, *next;
- struct dp83640_private *tmp, *dp83640 = phydev->priv;
-
- if (phydev->mdio.addr == BROADCAST_ADDR)
- return;
-
- enable_status_frames(phydev, false);
- cancel_delayed_work_sync(&dp83640->ts_work);
-
- skb_queue_purge(&dp83640->rx_queue);
- skb_queue_purge(&dp83640->tx_queue);
-
- clock = dp83640_clock_get(dp83640->clock);
-
- if (dp83640 == clock->chosen) {
- ptp_clock_unregister(clock->ptp_clock);
- clock->chosen = NULL;
- } else {
- list_for_each_safe(this, next, &clock->phylist) {
- tmp = list_entry(this, struct dp83640_private, list);
- if (tmp == dp83640) {
- list_del_init(&tmp->list);
- break;
- }
- }
- }
-
- dp83640_clock_put(clock);
- kfree(dp83640);
-}
-
static int dp83640_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -1319,9 +1230,10 @@ static int dp83640_config_intr(struct phy_device *phydev)
}
}
-static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
{
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
struct hwtstamp_config cfg;
u16 txcfg0, rxcfg0;
@@ -1397,8 +1309,8 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
mutex_lock(&dp83640->clock->extreg_lock);
- ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
- ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
+ ext_write(0, dp83640->phydev, PAGE5, PTP_TXCFG0, txcfg0);
+ ext_write(0, dp83640->phydev, PAGE5, PTP_RXCFG0, rxcfg0);
mutex_unlock(&dp83640->clock->extreg_lock);
@@ -1428,10 +1340,11 @@ static void rx_timestamp_work(struct work_struct *work)
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
}
-static bool dp83640_rxtstamp(struct phy_device *phydev,
+static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
struct list_head *this, *next;
struct rxts *rxts;
@@ -1477,11 +1390,12 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
return true;
}
-static void dp83640_txtstamp(struct phy_device *phydev,
+static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
switch (dp83640->hwts_tx_en) {
@@ -1504,9 +1418,11 @@ static void dp83640_txtstamp(struct phy_device *phydev,
}
}
-static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
+static int dp83640_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
{
- struct dp83640_private *dp83640 = dev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
@@ -1526,6 +1442,103 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
return 0;
}
+static int dp83640_probe(struct phy_device *phydev)
+{
+ struct dp83640_clock *clock;
+ struct dp83640_private *dp83640;
+ int err = -ENOMEM, i;
+
+ if (phydev->mdio.addr == BROADCAST_ADDR)
+ return 0;
+
+ clock = dp83640_clock_get_bus(phydev->mdio.bus);
+ if (!clock)
+ goto no_clock;
+
+ dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
+ if (!dp83640)
+ goto no_memory;
+
+ dp83640->phydev = phydev;
+ dp83640->mii_ts.rxtstamp = dp83640_rxtstamp;
+ dp83640->mii_ts.txtstamp = dp83640_txtstamp;
+ dp83640->mii_ts.hwtstamp = dp83640_hwtstamp;
+ dp83640->mii_ts.ts_info = dp83640_ts_info;
+
+ INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
+ INIT_LIST_HEAD(&dp83640->rxts);
+ INIT_LIST_HEAD(&dp83640->rxpool);
+ for (i = 0; i < MAX_RXTS; i++)
+ list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
+
+ phydev->mii_ts = &dp83640->mii_ts;
+ phydev->priv = dp83640;
+
+ spin_lock_init(&dp83640->rx_lock);
+ skb_queue_head_init(&dp83640->rx_queue);
+ skb_queue_head_init(&dp83640->tx_queue);
+
+ dp83640->clock = clock;
+
+ if (choose_this_phy(clock, phydev)) {
+ clock->chosen = dp83640;
+ clock->ptp_clock = ptp_clock_register(&clock->caps,
+ &phydev->mdio.dev);
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto no_register;
+ }
+ } else
+ list_add_tail(&dp83640->list, &clock->phylist);
+
+ dp83640_clock_put(clock);
+ return 0;
+
+no_register:
+ clock->chosen = NULL;
+ kfree(dp83640);
+no_memory:
+ dp83640_clock_put(clock);
+no_clock:
+ return err;
+}
+
+static void dp83640_remove(struct phy_device *phydev)
+{
+ struct dp83640_clock *clock;
+ struct list_head *this, *next;
+ struct dp83640_private *tmp, *dp83640 = phydev->priv;
+
+ if (phydev->mdio.addr == BROADCAST_ADDR)
+ return;
+
+ phydev->mii_ts = NULL;
+
+ enable_status_frames(phydev, false);
+ cancel_delayed_work_sync(&dp83640->ts_work);
+
+ skb_queue_purge(&dp83640->rx_queue);
+ skb_queue_purge(&dp83640->tx_queue);
+
+ clock = dp83640_clock_get(dp83640->clock);
+
+ if (dp83640 == clock->chosen) {
+ ptp_clock_unregister(clock->ptp_clock);
+ clock->chosen = NULL;
+ } else {
+ list_for_each_safe(this, next, &clock->phylist) {
+ tmp = list_entry(this, struct dp83640_private, list);
+ if (tmp == dp83640) {
+ list_del_init(&tmp->list);
+ break;
+ }
+ }
+ }
+
+ dp83640_clock_put(clock);
+ kfree(dp83640);
+}
+
static struct phy_driver dp83640_driver = {
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
@@ -1537,10 +1550,6 @@ static struct phy_driver dp83640_driver = {
.config_init = dp83640_config_init,
.ack_interrupt = dp83640_ack_interrupt,
.config_intr = dp83640_config_intr,
- .ts_info = dp83640_ts_info,
- .hwtstamp = dp83640_hwtstamp,
- .rxtstamp = dp83640_rxtstamp,
- .txtstamp = dp83640_txtstamp,
};
static int __init dp83640_init(void)
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 8a4b1d167ce2..fe9aa3ad52a7 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the Texas Instruments DP83822 PHY
+/* Driver for the Texas Instruments DP83822, DP83825 and DP83826 PHYs.
*
* Copyright (C) 2017 Texas Instruments Inc.
*/
@@ -15,7 +14,12 @@
#include <linux/netdevice.h>
#define DP83822_PHY_ID 0x2000a240
+#define DP83825S_PHY_ID 0x2000a140
#define DP83825I_PHY_ID 0x2000a150
+#define DP83825CM_PHY_ID 0x2000a160
+#define DP83825CS_PHY_ID 0x2000a170
+#define DP83826C_PHY_ID 0x2000a130
+#define DP83826NC_PHY_ID 0x2000a110
#define DP83822_DEVADDR 0x1f
@@ -319,12 +323,22 @@ static int dp83822_resume(struct phy_device *phydev)
static struct phy_driver dp83822_driver[] = {
DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
+ DP83822_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
+ DP83822_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
+ DP83822_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
+ DP83822_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
+ DP83822_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
};
module_phy_driver(dp83822_driver);
static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
{ DP83825I_PHY_ID, 0xfffffff0 },
+ { DP83826C_PHY_ID, 0xfffffff0 },
+ { DP83826NC_PHY_ID, 0xfffffff0 },
+ { DP83825S_PHY_ID, 0xfffffff0 },
+ { DP83825CM_PHY_ID, 0xfffffff0 },
+ { DP83825CS_PHY_ID, 0xfffffff0 },
{ },
};
MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 9cd9dcee4eb2..967f57ed0b65 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -93,10 +93,13 @@
#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
/* PHY CTRL bits */
-#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT 12
#define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03
-#define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14)
+#define DP83867_PHYCR_TX_FIFO_DEPTH_MASK GENMASK(15, 14)
+#define DP83867_PHYCR_RX_FIFO_DEPTH_MASK GENMASK(13, 12)
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
+#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
@@ -131,7 +134,8 @@ enum {
struct dp83867_private {
u32 rx_id_delay;
u32 tx_id_delay;
- u32 fifo_depth;
+ u32 tx_fifo_depth;
+ u32 rx_fifo_depth;
int io_impedance;
int port_mirroring;
bool rxctrl_strap_quirk;
@@ -408,18 +412,32 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
ret = of_property_read_u32(of_node, "ti,fifo-depth",
- &dp83867->fifo_depth);
+ &dp83867->tx_fifo_depth);
if (ret) {
- phydev_err(phydev,
- "ti,fifo-depth property is required\n");
- return ret;
+ ret = of_property_read_u32(of_node, "tx-fifo-depth",
+ &dp83867->tx_fifo_depth);
+ if (ret)
+ dp83867->tx_fifo_depth =
+ DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
}
- if (dp83867->fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
- phydev_err(phydev,
- "ti,fifo-depth value %u out of range\n",
- dp83867->fifo_depth);
+
+ if (dp83867->tx_fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
+ phydev_err(phydev, "tx-fifo-depth value %u out of range\n",
+ dp83867->tx_fifo_depth);
return -EINVAL;
}
+
+ ret = of_property_read_u32(of_node, "rx-fifo-depth",
+ &dp83867->rx_fifo_depth);
+ if (ret)
+ dp83867->rx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ if (dp83867->rx_fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
+ phydev_err(phydev, "rx-fifo-depth value %u out of range\n",
+ dp83867->rx_fifo_depth);
+ return -EINVAL;
+ }
+
return 0;
}
#else
@@ -458,12 +476,31 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
BIT(7));
+ if (phy_interface_is_rgmii(phydev) ||
+ phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+
+ val &= ~DP83867_PHYCR_TX_FIFO_DEPTH_MASK;
+ val |= (dp83867->tx_fifo_depth <<
+ DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~DP83867_PHYCR_RX_FIFO_DEPTH_MASK;
+ val |= (dp83867->rx_fifo_depth <<
+ DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT);
+ }
+
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
+ if (ret)
+ return ret;
+ }
+
if (phy_interface_is_rgmii(phydev)) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
if (val < 0)
return val;
- val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
- val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
/* The code below checks if "port mirroring" N/A MODE4 has been
* enabled during power on bootstrap.
@@ -599,7 +636,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
usleep_range(10, 20);
- return 0;
+ /* After reset FORCE_LINK_GOOD bit is set. Although the
+ * default value should be unset. Disable FORCE_LINK_GOOD
+ * for the phy to work properly.
+ */
+ return phy_modify(phydev, MII_DP83867_PHYCTRL,
+ DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
static struct phy_driver dp83867_driver[] = {
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 93021904c5e4..7996a4aea8d2 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -334,7 +334,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
break;
default:
return -EINVAL;
- };
+ }
return ret;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 7c5265fd2b94..4a3d34f40cb9 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -210,18 +210,15 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
* Linux device associated with it, we simply have obtain
* the GPIO descriptor from the device tree like this.
*/
- gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0,
- GPIOD_IN, "mdio");
- of_node_put(fixed_link_node);
- if (IS_ERR(gpiod)) {
- if (PTR_ERR(gpiod) == -EPROBE_DEFER)
- return gpiod;
-
+ gpiod = fwnode_gpiod_get_index(of_fwnode_handle(fixed_link_node),
+ "link", 0, GPIOD_IN, "mdio");
+ if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) {
if (PTR_ERR(gpiod) != -ENOENT)
pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
fixed_link_node);
gpiod = NULL;
}
+ of_node_put(fixed_link_node);
return gpiod;
}
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 356bd6472f49..fec58ad69e02 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -190,27 +190,11 @@ static int lxt973a2_read_status(struct phy_device *phydev)
phydev->duplex = DUPLEX_FULL;
}
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
+ phy_resolve_aneg_pause(phydev);
} else {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (bmcr & BMCR_SPEED1000)
- phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
- phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ err = genphy_read_status_fixed(phydev);
+ if (err < 0)
+ return err;
phydev->pause = phydev->asym_pause = 0;
linkmode_zero(phydev->lp_advertising);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b1fbd1937328..28e33ece4ce1 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -162,19 +162,9 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
-#define LPA_FIBER_1000HALF 0x40
-#define LPA_FIBER_1000FULL 0x20
-
#define LPA_PAUSE_FIBER 0x180
#define LPA_PAUSE_ASYM_FIBER 0x100
-#define ADVERTISE_FIBER_1000HALF 0x40
-#define ADVERTISE_FIBER_1000FULL 0x20
-
-#define ADVERTISE_PAUSE_FIBER 0x180
-#define ADVERTISE_PAUSE_ASYM_FIBER 0x100
-
-#define REGISTER_LINK_STATUS 0x400
#define NB_FIBER_STATS 1
MODULE_DESCRIPTION("Marvell PHY driver");
@@ -497,16 +487,15 @@ static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise)
u32 result = 0;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise))
- result |= ADVERTISE_FIBER_1000HALF;
+ result |= ADVERTISE_1000XHALF;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise))
- result |= ADVERTISE_FIBER_1000FULL;
+ result |= ADVERTISE_1000XFULL;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) &&
linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
- result |= LPA_PAUSE_ASYM_FIBER;
+ result |= ADVERTISE_1000XPSE_ASYM;
else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
- result |= (ADVERTISE_PAUSE_FIBER
- & (~ADVERTISE_PAUSE_ASYM_FIBER));
+ result |= ADVERTISE_1000XPAUSE;
return result;
}
@@ -524,7 +513,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
{
int changed = 0;
int err;
- int adv, oldadv;
+ u16 adv;
if (phydev->autoneg != AUTONEG_ENABLE)
return genphy_setup_forced(phydev);
@@ -533,44 +522,19 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
linkmode_and(phydev->advertising, phydev->advertising,
phydev->supported);
- /* Setup fiber advertisement */
- adv = phy_read(phydev, MII_ADVERTISE);
- if (adv < 0)
- return adv;
-
- oldadv = adv;
- adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL
- | LPA_PAUSE_FIBER);
- adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising);
-
- if (adv != oldadv) {
- err = phy_write(phydev, MII_ADVERTISE, adv);
- if (err < 0)
- return err;
+ adv = linkmode_adv_to_fiber_adv_t(phydev->advertising);
+ /* Setup fiber advertisement */
+ err = phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_1000XHALF | ADVERTISE_1000XFULL |
+ ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM,
+ adv);
+ if (err < 0)
+ return err;
+ if (err > 0)
changed = 1;
- }
-
- if (changed == 0) {
- /* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated?
- */
- int ctl = phy_read(phydev, MII_BMCR);
-
- if (ctl < 0)
- return ctl;
-
- if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- changed = 1; /* do restart aneg */
- }
- /* Only restart aneg if we are advertising something different
- * than we were before.
- */
- if (changed > 0)
- changed = genphy_restart_aneg(phydev);
-
- return changed;
+ return genphy_check_and_restart_aneg(phydev, changed);
}
static int m88e1510_config_aneg(struct phy_device *phydev)
@@ -1302,93 +1266,29 @@ static int m88e6390_config_aneg(struct phy_device *phydev)
static void fiber_lpa_mod_linkmode_lpa_t(unsigned long *advertising, u32 lpa)
{
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- advertising, lpa & LPA_FIBER_1000HALF);
+ advertising, lpa & LPA_1000XHALF);
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- advertising, lpa & LPA_FIBER_1000FULL);
-}
-
-/**
- * marvell_update_link - update link status in real time in @phydev
- * @phydev: target phy_device struct
- *
- * Description: Update the value in phydev->link to reflect the
- * current link value.
- */
-static int marvell_update_link(struct phy_device *phydev, int fiber)
-{
- int status;
-
- /* Use the generic register for copper link, or specific
- * register for fiber case
- */
- if (fiber) {
- status = phy_read(phydev, MII_M1011_PHY_STATUS);
- if (status < 0)
- return status;
-
- if ((status & REGISTER_LINK_STATUS) == 0)
- phydev->link = 0;
- else
- phydev->link = 1;
- } else {
- return genphy_update_link(phydev);
- }
-
- return 0;
+ advertising, lpa & LPA_1000XFULL);
}
static int marvell_read_status_page_an(struct phy_device *phydev,
- int fiber)
+ int fiber, int status)
{
- int status;
int lpa;
- int lpagb;
-
- status = phy_read(phydev, MII_M1011_PHY_STATUS);
- if (status < 0)
- return status;
-
- lpa = phy_read(phydev, MII_LPA);
- if (lpa < 0)
- return lpa;
-
- lpagb = phy_read(phydev, MII_STAT1000);
- if (lpagb < 0)
- return lpagb;
-
- if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- status = status & MII_M1011_PHY_STATUS_SPD_MASK;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- switch (status) {
- case MII_M1011_PHY_STATUS_1000:
- phydev->speed = SPEED_1000;
- break;
-
- case MII_M1011_PHY_STATUS_100:
- phydev->speed = SPEED_100;
- break;
-
- default:
- phydev->speed = SPEED_10;
- break;
- }
+ int err;
if (!fiber) {
- mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb);
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
+ phy_resolve_aneg_pause(phydev);
} else {
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
/* The fiber link is only 1000M capable */
fiber_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
@@ -1405,31 +1305,25 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
}
}
- return 0;
-}
-static int marvell_read_status_page_fixed(struct phy_device *phydev)
-{
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
+ if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
- if (bmcr & BMCR_SPEED1000)
+ switch (status & MII_M1011_PHY_STATUS_SPD_MASK) {
+ case MII_M1011_PHY_STATUS_1000:
phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
+ break;
+
+ case MII_M1011_PHY_STATUS_100:
phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ break;
- phydev->pause = 0;
- phydev->asym_pause = 0;
- linkmode_zero(phydev->lp_advertising);
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
return 0;
}
@@ -1444,25 +1338,38 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev)
*/
static int marvell_read_status_page(struct phy_device *phydev, int page)
{
+ int status;
int fiber;
int err;
- /* Detect and update the link, but return if there
- * was an error
+ status = phy_read(phydev, MII_M1011_PHY_STATUS);
+ if (status < 0)
+ return status;
+
+ /* Use the generic register for copper link status,
+ * and the PHY status register for fiber link status.
*/
+ if (page == MII_MARVELL_FIBER_PAGE) {
+ phydev->link = !!(status & MII_M1011_PHY_STATUS_LINK);
+ } else {
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+ }
+
if (page == MII_MARVELL_FIBER_PAGE)
fiber = 1;
else
fiber = 0;
- err = marvell_update_link(phydev, fiber);
- if (err)
- return err;
+ linkmode_zero(phydev->lp_advertising);
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
if (phydev->autoneg == AUTONEG_ENABLE)
- err = marvell_read_status_page_an(phydev, fiber);
+ err = marvell_read_status_page_an(phydev, fiber, status);
else
- err = marvell_read_status_page_fixed(phydev);
+ err = genphy_read_status_fixed(phydev);
return err;
}
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1bf13017d288..64c9f3bba2cd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -214,9 +214,9 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
phy_interface_t iface;
sfp_parse_support(phydev->sfp_bus, id, support);
- iface = sfp_select_interface(phydev->sfp_bus, id, support);
+ iface = sfp_select_interface(phydev->sfp_bus, support);
- if (iface != PHY_INTERFACE_MODE_10GKR) {
+ if (iface != PHY_INTERFACE_MODE_10GBASER) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
return -EINVAL;
}
@@ -304,7 +304,7 @@ static int mv3310_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
- phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
return 0;
@@ -386,16 +386,17 @@ static void mv3310_update_interface(struct phy_device *phydev)
{
if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
phydev->interface == PHY_INTERFACE_MODE_2500BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+ phydev->interface == PHY_INTERFACE_MODE_10GBASER) &&
+ phydev->link) {
/* The PHY automatically switches its serdes interface (and
- * active PHYXS instance) between Cisco SGMII, 10GBase-KR and
+ * active PHYXS instance) between Cisco SGMII, 10GBase-R and
* 2500BaseX modes according to the speed. Florian suggests
* setting phydev->interface to communicate this to the MAC.
* Only do this if we are already in one of the above modes.
*/
switch (phydev->speed) {
case SPEED_10000:
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
break;
case SPEED_2500:
phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c
index 0dce67672548..0746e2cc39ae 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/phy/mdio-i2c.c
@@ -33,17 +33,24 @@ static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msgs[2];
- u8 data[2], dev_addr = reg;
+ u8 addr[3], data[2], *p;
int bus_addr, ret;
if (!i2c_mii_valid_phy_id(phy_id))
return 0xffff;
+ p = addr;
+ if (reg & MII_ADDR_C45) {
+ *p++ = 0x20 | ((reg >> 16) & 31);
+ *p++ = reg >> 8;
+ }
+ *p++ = reg;
+
bus_addr = i2c_mii_phy_addr(phy_id);
msgs[0].addr = bus_addr;
msgs[0].flags = 0;
- msgs[0].len = 1;
- msgs[0].buf = &dev_addr;
+ msgs[0].len = p - addr;
+ msgs[0].buf = addr;
msgs[1].addr = bus_addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = sizeof(data);
@@ -61,18 +68,23 @@ static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msg;
int ret;
- u8 data[3];
+ u8 data[5], *p;
if (!i2c_mii_valid_phy_id(phy_id))
return 0;
- data[0] = reg;
- data[1] = val >> 8;
- data[2] = val;
+ p = data;
+ if (reg & MII_ADDR_C45) {
+ *p++ = (reg >> 16) & 31;
+ *p++ = reg >> 8;
+ }
+ *p++ = reg;
+ *p++ = val >> 8;
+ *p++ = val;
msg.addr = i2c_mii_phy_addr(phy_id);
msg.flags = 0;
- msg.len = 3;
+ msg.len = p - data;
msg.buf = data;
ret = i2c_transfer(i2c, &msg, 1);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 229e480179ff..9bb9f37f21dc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -59,17 +59,11 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
static int mdiobus_register_reset(struct mdio_device *mdiodev)
{
- struct reset_control *reset = NULL;
-
- if (mdiodev->dev.of_node)
- reset = of_reset_control_get_exclusive(mdiodev->dev.of_node,
- "phy");
- if (IS_ERR(reset)) {
- if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
- reset = NULL;
- else
- return PTR_ERR(reset);
- }
+ struct reset_control *reset;
+
+ reset = reset_control_get_optional_exclusive(&mdiodev->dev, "phy");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
mdiodev->reset_ctrl = reset;
@@ -164,9 +158,11 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
if (size)
bus->priv = (void *)bus + aligned_size;
- /* Initialise the interrupts to polling */
- for (i = 0; i < PHY_MAX_ADDR; i++)
+ /* Initialise the interrupts to polling and 64-bit seqcounts */
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
bus->irq[i] = PHY_POLL;
+ u64_stats_init(&bus->stats[i].syncp);
+ }
return bus;
}
@@ -255,9 +251,215 @@ static void mdiobus_release(struct device *d)
kfree(bus);
}
+struct mdio_bus_stat_attr {
+ int addr;
+ unsigned int field_offset;
+};
+
+static u64 mdio_bus_get_stat(struct mdio_bus_stats *s, unsigned int offset)
+{
+ const char *p = (const char *)s + offset;
+ unsigned int start;
+ u64 val = 0;
+
+ do {
+ start = u64_stats_fetch_begin(&s->syncp);
+ val = u64_stats_read((const u64_stats_t *)p);
+ } while (u64_stats_fetch_retry(&s->syncp, start));
+
+ return val;
+}
+
+static u64 mdio_bus_get_global_stat(struct mii_bus *bus, unsigned int offset)
+{
+ unsigned int i;
+ u64 val = 0;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ val += mdio_bus_get_stat(&bus->stats[i], offset);
+
+ return val;
+}
+
+static ssize_t mdio_bus_stat_field_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mii_bus *bus = to_mii_bus(dev);
+ struct mdio_bus_stat_attr *sattr;
+ struct dev_ext_attribute *eattr;
+ u64 val;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ sattr = eattr->var;
+
+ if (sattr->addr < 0)
+ val = mdio_bus_get_global_stat(bus, sattr->field_offset);
+ else
+ val = mdio_bus_get_stat(&bus->stats[sattr->addr],
+ sattr->field_offset);
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mdio_device *mdiodev = to_mdio_device(dev);
+ struct mii_bus *bus = mdiodev->bus;
+ struct mdio_bus_stat_attr *sattr;
+ struct dev_ext_attribute *eattr;
+ int addr = mdiodev->addr;
+ u64 val;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ sattr = eattr->var;
+
+ val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset);
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+#define MDIO_BUS_STATS_ATTR_DECL(field, file) \
+static struct dev_ext_attribute dev_attr_mdio_bus_##field = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ -1, offsetof(struct mdio_bus_stats, field) \
+ }), \
+}; \
+static struct dev_ext_attribute dev_attr_mdio_bus_device_##field = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_device_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ -1, offsetof(struct mdio_bus_stats, field) \
+ }), \
+};
+
+#define MDIO_BUS_STATS_ATTR(field) \
+ MDIO_BUS_STATS_ATTR_DECL(field, __stringify(field))
+
+MDIO_BUS_STATS_ATTR(transfers);
+MDIO_BUS_STATS_ATTR(errors);
+MDIO_BUS_STATS_ATTR(writes);
+MDIO_BUS_STATS_ATTR(reads);
+
+#define MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, file) \
+static struct dev_ext_attribute dev_attr_mdio_bus_addr_##field##_##addr = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ addr, offsetof(struct mdio_bus_stats, field) \
+ }), \
+}
+
+#define MDIO_BUS_STATS_ADDR_ATTR(field, addr) \
+ MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, \
+ __stringify(field) "_" __stringify(addr))
+
+#define MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(addr) \
+ MDIO_BUS_STATS_ADDR_ATTR(transfers, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(errors, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(writes, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(reads, addr) \
+
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(0);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(1);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(2);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(3);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(4);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(5);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(6);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(7);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(8);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(9);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(10);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(11);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(12);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(13);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(14);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(15);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(16);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(17);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(18);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(19);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(20);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(21);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(22);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(23);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(24);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(25);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(26);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(27);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(28);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(29);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(30);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(31);
+
+#define MDIO_BUS_STATS_ADDR_ATTR_GROUP(addr) \
+ &dev_attr_mdio_bus_addr_transfers_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_errors_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_writes_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_reads_##addr.attr.attr \
+
+static struct attribute *mdio_bus_statistics_attrs[] = {
+ &dev_attr_mdio_bus_transfers.attr.attr,
+ &dev_attr_mdio_bus_errors.attr.attr,
+ &dev_attr_mdio_bus_writes.attr.attr,
+ &dev_attr_mdio_bus_reads.attr.attr,
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(0),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(1),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(2),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(3),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(4),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(5),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(6),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(7),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(8),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(9),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(10),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(11),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(12),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(13),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(14),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(15),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(16),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(17),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(18),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(19),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(20),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(21),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(22),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(23),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(24),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(25),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(26),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(27),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(28),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(29),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(30),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(31),
+ NULL,
+};
+
+static const struct attribute_group mdio_bus_statistics_group = {
+ .name = "statistics",
+ .attrs = mdio_bus_statistics_attrs,
+};
+
+static const struct attribute_group *mdio_bus_groups[] = {
+ &mdio_bus_statistics_group,
+ NULL,
+};
+
static struct class mdio_bus_class = {
.name = "mdio_bus",
.dev_release = mdiobus_release,
+ .dev_groups = mdio_bus_groups,
};
#if IS_ENABLED(CONFIG_OF_MDIO)
@@ -536,6 +738,24 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
}
EXPORT_SYMBOL(mdiobus_scan);
+static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
+{
+ u64_stats_update_begin(&stats->syncp);
+
+ u64_stats_inc(&stats->transfers);
+ if (ret < 0) {
+ u64_stats_inc(&stats->errors);
+ goto out;
+ }
+
+ if (op)
+ u64_stats_inc(&stats->reads);
+ else
+ u64_stats_inc(&stats->writes);
+out:
+ u64_stats_update_end(&stats->syncp);
+}
+
/**
* __mdiobus_read - Unlocked version of the mdiobus_read function
* @bus: the mii_bus struct
@@ -555,6 +775,7 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
retval = bus->read(bus, addr, regnum);
trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+ mdiobus_stats_acct(&bus->stats[addr], true, retval);
return retval;
}
@@ -580,6 +801,7 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
err = bus->write(bus, addr, regnum, val);
trace_mdio_access(bus, 0, addr, regnum, val, err);
+ mdiobus_stats_acct(&bus->stats[addr], false, err);
return err;
}
@@ -725,8 +947,27 @@ static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static struct attribute *mdio_bus_device_statistics_attrs[] = {
+ &dev_attr_mdio_bus_device_transfers.attr.attr,
+ &dev_attr_mdio_bus_device_errors.attr.attr,
+ &dev_attr_mdio_bus_device_writes.attr.attr,
+ &dev_attr_mdio_bus_device_reads.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mdio_bus_device_statistics_group = {
+ .name = "statistics",
+ .attrs = mdio_bus_device_statistics_attrs,
+};
+
+static const struct attribute_group *mdio_bus_dev_groups[] = {
+ &mdio_bus_device_statistics_group,
+ NULL,
+};
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
+ .dev_groups = mdio_bus_dev_groups,
.match = mdio_bus_match,
.uevent = mdio_uevent,
};
diff --git a/drivers/net/phy/mii_timestamper.c b/drivers/net/phy/mii_timestamper.c
new file mode 100644
index 000000000000..2f12c5d901df
--- /dev/null
+++ b/drivers/net/phy/mii_timestamper.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Support for generic time stamping devices on MII buses.
+// Copyright (C) 2018 Richard Cochran <richardcochran@gmail.com>
+//
+
+#include <linux/mii_timestamper.h>
+
+static LIST_HEAD(mii_timestamping_devices);
+static DEFINE_MUTEX(tstamping_devices_lock);
+
+struct mii_timestamping_desc {
+ struct list_head list;
+ struct mii_timestamping_ctrl *ctrl;
+ struct device *device;
+};
+
+/**
+ * register_mii_tstamp_controller() - registers an MII time stamping device.
+ *
+ * @device: The device to be registered.
+ * @ctrl: Pointer to device's control interface.
+ *
+ * Returns zero on success or non-zero on failure.
+ */
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl)
+{
+ struct mii_timestamping_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&desc->list);
+ desc->ctrl = ctrl;
+ desc->device = device;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_add_tail(&mii_timestamping_devices, &desc->list);
+ mutex_unlock(&tstamping_devices_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(register_mii_tstamp_controller);
+
+/**
+ * unregister_mii_tstamp_controller() - unregisters an MII time stamping device.
+ *
+ * @device: A device previously passed to register_mii_tstamp_controller().
+ */
+void unregister_mii_tstamp_controller(struct device *device)
+{
+ struct mii_timestamping_desc *desc;
+ struct list_head *this, *next;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each_safe(this, next, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device == device) {
+ list_del_init(&desc->list);
+ kfree(desc);
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+}
+EXPORT_SYMBOL(unregister_mii_tstamp_controller);
+
+/**
+ * register_mii_timestamper - Enables a given port of an MII time stamper.
+ *
+ * @node: The device tree node of the MII time stamp controller.
+ * @port: The index of the port to be enabled.
+ *
+ * Returns a valid interface on success or ERR_PTR otherwise.
+ */
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port)
+{
+ struct mii_timestamper *mii_ts = NULL;
+ struct mii_timestamping_desc *desc;
+ struct list_head *this;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each(this, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device->of_node == node) {
+ mii_ts = desc->ctrl->probe_channel(desc->device, port);
+ if (!IS_ERR(mii_ts)) {
+ mii_ts->device = desc->device;
+ get_device(desc->device);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+
+ return mii_ts ? mii_ts : ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(register_mii_timestamper);
+
+/**
+ * unregister_mii_timestamper - Disables a given MII time stamper.
+ *
+ * @mii_ts: An interface obtained via register_mii_timestamper().
+ *
+ */
+void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
+{
+ struct mii_timestamping_desc *desc;
+ struct list_head *this;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each(this, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device == mii_ts->device) {
+ desc->ctrl->release_channel(desc->device, mii_ts);
+ put_device(desc->device);
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+}
+EXPORT_SYMBOL(unregister_mii_timestamper);
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index d5f8f351d9ef..937ac7da2789 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -18,6 +18,17 @@
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
+#include <linux/scatterlist.h>
+#include <crypto/skcipher.h>
+
+#if IS_ENABLED(CONFIG_MACSEC)
+#include <net/macsec.h>
+#endif
+
+#include "mscc_macsec.h"
+#include "mscc_mac.h"
+#include "mscc_fc_buffer.h"
+
enum rgmii_rx_clock_delay {
RGMII_RX_CLK_DELAY_0_2_NS = 0,
RGMII_RX_CLK_DELAY_0_8_NS = 1,
@@ -69,7 +80,7 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_EXT_PHY_CNTL_2 24
#define MII_VSC85XX_INT_MASK 25
-#define MII_VSC85XX_INT_MASK_MASK 0xa000
+#define MII_VSC85XX_INT_MASK_MASK 0xa020
#define MII_VSC85XX_INT_MASK_WOL 0x0040
#define MII_VSC85XX_INT_STATUS 26
@@ -121,6 +132,26 @@ enum rgmii_rx_clock_delay {
#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
#define PHY_S6G_PLL_FSM_ENA_POS 7
+#define MSCC_EXT_PAGE_MACSEC_17 17
+#define MSCC_EXT_PAGE_MACSEC_18 18
+
+#define MSCC_EXT_PAGE_MACSEC_19 19
+#define MSCC_PHY_MACSEC_19_REG_ADDR(x) (x)
+#define MSCC_PHY_MACSEC_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_MACSEC_19_READ BIT(14)
+#define MSCC_PHY_MACSEC_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_MACSEC_20 20
+#define MSCC_PHY_MACSEC_20_TARGET(x) (x)
+enum macsec_bank {
+ FC_BUFFER = 0x04,
+ HOST_MAC = 0x05,
+ LINE_MAC = 0x06,
+ IP_1588 = 0x0e,
+ MACSEC_INGR = 0x38,
+ MACSEC_EGR = 0x3c,
+};
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
@@ -128,6 +159,7 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
+#define MSCC_PHY_PAGE_MACSEC MSCC_PHY_PAGE_EXTENDED_4
/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
* in the same package.
*/
@@ -175,6 +207,9 @@ enum rgmii_rx_clock_delay {
#define SECURE_ON_ENABLE 0x8000
#define SECURE_ON_PASSWD_LEN_4 0x4000
+#define MSCC_PHY_EXTENDED_INT 28
+#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
+
/* Extended Page 3 Registers */
#define MSCC_PHY_SERDES_TX_VALID_CNT 21
#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
@@ -411,6 +446,44 @@ static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
},
};
+#if IS_ENABLED(CONFIG_MACSEC)
+struct macsec_flow {
+ struct list_head list;
+ enum mscc_macsec_destination_ports port;
+ enum macsec_bank bank;
+ u32 index;
+ int assoc_num;
+ bool has_transformation;
+
+ /* Highest takes precedence [0..15] */
+ u8 priority;
+
+ u8 key[MACSEC_KEYID_LEN];
+
+ union {
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_tx_sa *tx_sa;
+ };
+
+ /* Matching */
+ struct {
+ u8 sci:1;
+ u8 tagged:1;
+ u8 untagged:1;
+ u8 etype:1;
+ } match;
+
+ u16 etype;
+
+ /* Action */
+ struct {
+ u8 bypass:1;
+ u8 drop:1;
+ } action;
+
+};
+#endif
+
struct vsc8531_private {
int rate_magic;
u16 supp_led_modes;
@@ -424,6 +497,19 @@ struct vsc8531_private {
* package.
*/
unsigned int base_addr;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec fields:
+ * - One SecY per device (enforced at the s/w implementation level)
+ * - macsec_flows: list of h/w flows
+ * - ingr_flows: bitmap of ingress flows
+ * - egr_flows: bitmap of egress flows
+ */
+ struct macsec_secy *secy;
+ struct list_head macsec_flows;
+ unsigned long ingr_flows;
+ unsigned long egr_flows;
+#endif
};
#ifdef CONFIG_OF_MDIO
@@ -1584,6 +1670,978 @@ out:
return ret;
}
+#if IS_ENABLED(CONFIG_MACSEC)
+static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg)
+{
+ u32 val, val_l = 0, val_h = 0;
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if (bank >> 2 == 0x1)
+ /* non-MACsec access */
+ bank &= 0x3;
+ else
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
+ MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+ val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
+ val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+
+ return (val_h << 16) | val_l;
+}
+
+static void vsc8584_macsec_phy_write(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg, u32 val)
+{
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
+ bank &= 0x3;
+ else
+ /* MACsec access */
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+}
+
+static void vsc8584_macsec_classification(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ /* enable VLAN tag parsing */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
+ MSCC_MS_SAM_CP_TAG_PARSE_STAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
+ enum macsec_bank bank,
+ bool block)
+{
+ u32 port = (bank == MACSEC_INGR) ?
+ MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
+ u32 action = MSCC_MS_FLOW_BYPASS;
+
+ if (block)
+ action = MSCC_MS_FLOW_DROP;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
+}
+
+static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+
+ if (bank != MACSEC_INGR)
+ return;
+
+ /* Set default rules to pass unmatched frames */
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MS_PARAMS2_IG_CC_CONTROL);
+ val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
+ MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
+ val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_block_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_SW_RST |
+ MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
+
+ /* Set the MACsec block out of s/w reset and enable clocks */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
+ bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
+ MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
+ MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
+
+ /* Clear the counters */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Enable octet increment mode */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
+ MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Set the MTU */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
+
+ for (i = 0; i < 8; i++)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
+
+ if (bank == MACSEC_EGR) {
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
+ val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
+ MSCC_MS_FC_CFG_FCBUF_ENA |
+ MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
+ MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
+ MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
+ MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
+ }
+
+ vsc8584_macsec_classification(phydev, bank);
+ vsc8584_macsec_flow_default_action(phydev, bank, false);
+ vsc8584_macsec_integrity_checks(phydev, bank);
+
+ /* Enable the MACsec block */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
+}
+
+static void vsc8584_macsec_mac_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ /* Clear host & line stats */
+ for (i = 0; i < 36; i++)
+ vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
+ val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
+ val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
+ val |= 0xffff;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
+ if (bank == HOST_MAC)
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
+ else
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
+ (bank == HOST_MAC ?
+ MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
+ val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
+ val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
+ val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
+ MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
+ val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
+ MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_RX_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_ENA);
+}
+
+/* Must be called with mdio_lock taken */
+static int vsc8584_macsec_init(struct phy_device *phydev)
+{
+ u32 val;
+
+ vsc8584_macsec_block_init(phydev, MACSEC_INGR);
+ vsc8584_macsec_block_init(phydev, MACSEC_EGR);
+ vsc8584_macsec_mac_init(phydev, HOST_MAC);
+ vsc8584_macsec_mac_init(phydev, LINE_MAC);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_FC_READ_THRESH_CFG,
+ MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
+ MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
+ val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
+ MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
+ MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG);
+ val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
+ val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
+ val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, IP_1588,
+ MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
+ val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
+ val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
+ vsc8584_macsec_phy_write(phydev, IP_1588,
+ MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
+
+ return 0;
+}
+
+static void vsc8584_macsec_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
+
+ if (flow->match.tagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
+ if (flow->match.untagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
+
+ if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
+ match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
+ mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
+ }
+
+ if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
+ match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
+ mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
+ MSCC_MS_SAM_MASK_SCI_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
+ lower_32_bits(flow->rx_sa->sc->sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
+ upper_32_bits(flow->rx_sa->sc->sci));
+ }
+
+ if (flow->match.etype) {
+ mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
+ MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
+ }
+
+ match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
+
+ /* Action for matching packets */
+ if (flow->action.drop)
+ action = MSCC_MS_FLOW_DROP;
+ else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
+ action = MSCC_MS_FLOW_BYPASS;
+ else
+ action = (bank == MACSEC_INGR) ?
+ MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
+
+ val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
+ MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
+
+ if (action == MSCC_MS_FLOW_BYPASS)
+ goto write_ctrl;
+
+ if (bank == MACSEC_INGR) {
+ if (priv->secy->replay_protect)
+ val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
+ if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
+ else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
+ } else if (bank == MACSEC_EGR) {
+ if (priv->secy->protect_frames)
+ val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
+ if (priv->secy->tx_sc.encrypt)
+ val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
+ if (priv->secy->tx_sc.send_sci)
+ val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
+ }
+
+write_ctrl:
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
+ enum macsec_bank bank)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
+ if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
+ return pos;
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
+ (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
+ return;
+
+ /* Enable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
+
+ /* Set in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ /* Disable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
+
+ /* Clear in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
+{
+ if (flow->bank == MACSEC_INGR)
+ return flow->index + MSCC_MS_MAX_FLOWS;
+
+ return flow->index;
+}
+
+/* Derive the AES key to get a key for the hash autentication */
+static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
+ u16 key_len, u8 hkey[16])
+{
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ struct skcipher_request *req = NULL;
+ struct scatterlist src, dst;
+ DECLARE_CRYPTO_WAIT(wait);
+ u32 input[4] = {0};
+ int ret;
+
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
+ &wait);
+ ret = crypto_skcipher_setkey(tfm, key, key_len);
+ if (ret < 0)
+ goto out;
+
+ sg_init_one(&src, input, 16);
+ sg_init_one(&dst, hkey, 16);
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
+
+ ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return ret;
+}
+
+static int vsc8584_macsec_transformation(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ int i, ret, index = flow->index;
+ u32 rec = 0, control = 0;
+ u8 hkey[16];
+ sci_t sci;
+
+ ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
+ if (ret)
+ return ret;
+
+ switch (priv->secy->key_len) {
+ case 16:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
+ break;
+ case 32:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ control |= (bank == MACSEC_EGR) ?
+ (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
+ (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
+
+ control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
+ CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
+ CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
+ CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
+
+ /* Set the control word */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ control);
+
+ /* Set the context ID. Must be unique. */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ vsc8584_macsec_flow_context_id(flow));
+
+ /* Set the encryption/decryption key */
+ for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)flow->key)[i]);
+
+ /* Set the authentication key */
+ for (i = 0; i < 4; i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)hkey)[i]);
+
+ /* Initial sequence number */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ bank == MACSEC_INGR ?
+ flow->rx_sa->next_pn : flow->tx_sa->next_pn);
+
+ if (bank == MACSEC_INGR)
+ /* Set the mask (replay window size) */
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ priv->secy->replay_window);
+
+ /* Set the input vectors */
+ sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ lower_32_bits(sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ upper_32_bits(sci));
+
+ while (rec < 20)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ 0);
+
+ flow->has_transformation = true;
+ return 0;
+}
+
+static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
+ enum macsec_bank bank)
+{
+ unsigned long *bitmap = bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+ struct macsec_flow *flow;
+ int index;
+
+ index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
+
+ if (index == MSCC_MS_MAX_FLOWS)
+ return ERR_PTR(-ENOMEM);
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(index, bitmap);
+ flow->index = index;
+ flow->bank = bank;
+ flow->priority = 8;
+ flow->assoc_num = -1;
+
+ list_add_tail(&flow->list, &priv->macsec_flows);
+ return flow;
+}
+
+static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
+ struct macsec_flow *flow)
+{
+ unsigned long *bitmap = flow->bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+
+ list_del(&flow->list);
+ clear_bit(flow->index, bitmap);
+ kfree(flow);
+}
+
+static int vsc8584_macsec_add_flow(struct phy_device *phydev,
+ struct macsec_flow *flow, bool update)
+{
+ int ret;
+
+ flow->port = MSCC_MS_PORT_CONTROLLED;
+ vsc8584_macsec_flow(phydev, flow);
+
+ if (update)
+ return 0;
+
+ ret = vsc8584_macsec_transformation(phydev, flow);
+ if (ret) {
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_default_flows(struct phy_device *phydev)
+{
+ struct macsec_flow *flow;
+
+ /* Add a rule to let the MKA traffic go through, ingress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_UNCONTROLLED;
+ flow->match.tagged = 1;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ /* Add a rule to let the MKA traffic go through, egress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_COMMON;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ return 0;
+}
+
+static void vsc8584_macsec_del_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ vsc8584_macsec_flow_disable(phydev, flow);
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+}
+
+static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->rx_sa = ctx->sa.rx_sa;
+
+ /* Always match tagged packets on ingress */
+ flow->match.tagged = 1;
+ flow->match.sci = 1;
+
+ if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->tx_sa = ctx->sa.tx_sa;
+
+ /* Always match untagged packets on egress */
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_secy *secy = ctx->secy;
+
+ if (ctx->prepare) {
+ if (priv->secy)
+ return -EEXIST;
+
+ return 0;
+ }
+
+ priv->secy = secy;
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+
+ return vsc8584_macsec_default_flows(ctx->phydev);
+}
+
+static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
+
+ priv->secy = NULL;
+ return 0;
+}
+
+static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
+{
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_secy(ctx);
+ return vsc8584_macsec_add_secy(ctx);
+}
+
+static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ if (flow->bank == MACSEC_INGR && flow->rx_sa &&
+ flow->rx_sa->sc->sci == ctx->rx_sc->sci)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_txsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_txsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static struct macsec_ops vsc8584_macsec_ops = {
+ .mdo_dev_open = vsc8584_macsec_dev_open,
+ .mdo_dev_stop = vsc8584_macsec_dev_stop,
+ .mdo_add_secy = vsc8584_macsec_add_secy,
+ .mdo_upd_secy = vsc8584_macsec_upd_secy,
+ .mdo_del_secy = vsc8584_macsec_del_secy,
+ .mdo_add_rxsc = vsc8584_macsec_add_rxsc,
+ .mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
+ .mdo_del_rxsc = vsc8584_macsec_del_rxsc,
+ .mdo_add_rxsa = vsc8584_macsec_add_rxsa,
+ .mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
+ .mdo_del_rxsa = vsc8584_macsec_del_rxsa,
+ .mdo_add_txsa = vsc8584_macsec_add_txsa,
+ .mdo_upd_txsa = vsc8584_macsec_upd_txsa,
+ .mdo_del_txsa = vsc8584_macsec_del_txsa,
+};
+#endif /* CONFIG_MACSEC */
+
/* Check if one PHY has already done the init of the parts common to all PHYs
* in the Quad PHY package.
*/
@@ -1733,6 +2791,24 @@ static int vsc8584_config_init(struct phy_device *phydev)
mutex_unlock(&phydev->mdio.bus->mdio_lock);
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec */
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
+ INIT_LIST_HEAD(&vsc8531->macsec_flows);
+ vsc8531->secy = NULL;
+
+ phydev->macsec_ops = &vsc8584_macsec_ops;
+
+ ret = vsc8584_macsec_init(phydev);
+ if (ret)
+ goto err;
+ }
+#endif
+
phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
@@ -1758,6 +2834,43 @@ err:
return ret;
}
+static int vsc8584_handle_interrupt(struct phy_device *phydev)
+{
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow, *tmp;
+ u32 cause, rec;
+
+ /* Check MACsec PN rollover */
+ cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_INTR_CTRL_STATUS);
+ cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
+ if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
+ goto skip_rollover;
+
+ rec = 6 + priv->secy->key_len / sizeof(u32);
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ u32 val;
+
+ if (flow->bank != MACSEC_EGR || !flow->has_transformation)
+ continue;
+
+ val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_XFORM_REC(flow->index, rec));
+ if (val == 0xffffffff) {
+ vsc8584_macsec_flow_disable(phydev, flow);
+ macsec_pn_wrapped(priv->secy, flow->tx_sa);
+ break;
+ }
+ }
+
+skip_rollover:
+#endif
+
+ phy_mac_interrupt(phydev);
+ return 0;
+}
+
static int vsc85xx_config_init(struct phy_device *phydev)
{
int rc, i, phy_id;
@@ -2201,6 +3314,20 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+#if IS_ENABLED(CONFIG_MACSEC)
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_2);
+ phy_write(phydev, MSCC_PHY_EXTENDED_INT,
+ MSCC_PHY_EXTENDED_INT_MS_EGR);
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
+ MSCC_MS_AIC_CTRL, 0xf);
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
+ MSCC_MS_INTR_CTRL_STATUS,
+ MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));
+#endif
rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
MII_VSC85XX_INT_MASK_MASK);
} else {
@@ -2404,7 +3531,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2429,7 +3555,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2454,7 +3579,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2479,7 +3603,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2504,7 +3627,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2530,7 +3652,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2556,6 +3677,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2608,6 +3730,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2632,6 +3755,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2656,6 +3780,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
diff --git a/drivers/net/phy/mscc_fc_buffer.h b/drivers/net/phy/mscc_fc_buffer.h
new file mode 100644
index 000000000000..7e9c0e877895
--- /dev/null
+++ b/drivers/net/phy/mscc_fc_buffer.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (C) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_FC_BUFFER_H_
+#define _MSCC_OCELOT_FC_BUFFER_H_
+
+#define MSCC_FCBUF_ENA_CFG 0x00
+#define MSCC_FCBUF_MODE_CFG 0x01
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG 0x02
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG 0x03
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG 0x04
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG 0x05
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG 0x06
+#define MSCC_FCBUF_FC_READ_THRESH_CFG 0x07
+#define MSCC_FCBUF_TX_FRM_GAP_COMP 0x08
+
+#define MSCC_FCBUF_ENA_CFG_TX_ENA BIT(0)
+#define MSCC_FCBUF_ENA_CFG_RX_ENA BIT(4)
+
+#define MSCC_FCBUF_MODE_CFG_DROP_BEHAVIOUR BIT(4)
+#define MSCC_FCBUF_MODE_CFG_PAUSE_REACT_ENA BIT(8)
+#define MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA BIT(12)
+#define MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA BIT(16)
+#define MSCC_FCBUF_MODE_CFG_TX_CTRL_QUEUE_ENA BIT(20)
+#define MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA BIT(24)
+#define MSCC_FCBUF_MODE_CFG_INCLUDE_PAUSE_RCVD_IN_PAUSE_GEN BIT(28)
+
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(x) (x)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(x) ((x) << 16)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET_M GENMASK(19, 16)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_RX_THRESH(x) ((x) << 20)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_RX_THRESH_M GENMASK(31, 20)
+
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XOFF_THRESH(x) (x)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XOFF_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XON_THRESH(x) ((x) << 16)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XON_THRESH_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(x) (x)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(x) ((x) << 16)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH_M GENMASK(31, 16)
+
+#endif
diff --git a/drivers/net/phy/mscc_mac.h b/drivers/net/phy/mscc_mac.h
new file mode 100644
index 000000000000..9420ee5175a6
--- /dev/null
+++ b/drivers/net/phy/mscc_mac.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_LINE_MAC_H_
+#define _MSCC_OCELOT_LINE_MAC_H_
+
+#define MSCC_MAC_CFG_ENA_CFG 0x00
+#define MSCC_MAC_CFG_MODE_CFG 0x01
+#define MSCC_MAC_CFG_MAXLEN_CFG 0x02
+#define MSCC_MAC_CFG_NUM_TAGS_CFG 0x03
+#define MSCC_MAC_CFG_TAGS_CFG 0x04
+#define MSCC_MAC_CFG_ADV_CHK_CFG 0x07
+#define MSCC_MAC_CFG_LFS_CFG 0x08
+#define MSCC_MAC_CFG_LB_CFG 0x09
+#define MSCC_MAC_CFG_PKTINF_CFG 0x0a
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL 0x0b
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2 0x0c
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL 0x0d
+#define MSCC_MAC_PAUSE_CFG_STATE 0x0e
+#define MSCC_MAC_PAUSE_CFG_MAC_ADDRESS_LSB 0x0f
+#define MSCC_MAC_PAUSE_CFG_MAC_ADDRESS_MSB 0x10
+#define MSCC_MAC_STATUS_RX_LANE_STICKY_0 0x11
+#define MSCC_MAC_STATUS_RX_LANE_STICKY_1 0x12
+#define MSCC_MAC_STATUS_TX_MONITOR_STICKY 0x13
+#define MSCC_MAC_STATUS_TX_MONITOR_STICKY_MASK 0x14
+#define MSCC_MAC_STATUS_STICKY 0x15
+#define MSCC_MAC_STATUS_STICKY_MASK 0x16
+#define MSCC_MAC_STATS_32BIT_RX_HIH_CKSM_ERR_CNT 0x17
+#define MSCC_MAC_STATS_32BIT_RX_XGMII_PROT_ERR_CNT 0x18
+#define MSCC_MAC_STATS_32BIT_RX_SYMBOL_ERR_CNT 0x19
+#define MSCC_MAC_STATS_32BIT_RX_PAUSE_CNT 0x1a
+#define MSCC_MAC_STATS_32BIT_RX_UNSUP_OPCODE_CNT 0x1b
+#define MSCC_MAC_STATS_32BIT_RX_UC_CNT 0x1c
+#define MSCC_MAC_STATS_32BIT_RX_MC_CNT 0x1d
+#define MSCC_MAC_STATS_32BIT_RX_BC_CNT 0x1e
+#define MSCC_MAC_STATS_32BIT_RX_CRC_ERR_CNT 0x1f
+#define MSCC_MAC_STATS_32BIT_RX_UNDERSIZE_CNT 0x20
+#define MSCC_MAC_STATS_32BIT_RX_FRAGMENTS_CNT 0x21
+#define MSCC_MAC_STATS_32BIT_RX_IN_RANGE_LEN_ERR_CNT 0x22
+#define MSCC_MAC_STATS_32BIT_RX_OUT_OF_RANGE_LEN_ERR_CNT 0x23
+#define MSCC_MAC_STATS_32BIT_RX_OVERSIZE_CNT 0x24
+#define MSCC_MAC_STATS_32BIT_RX_JABBERS_CNT 0x25
+#define MSCC_MAC_STATS_32BIT_RX_SIZE64_CNT 0x26
+#define MSCC_MAC_STATS_32BIT_RX_SIZE65TO127_CNT 0x27
+#define MSCC_MAC_STATS_32BIT_RX_SIZE128TO255_CNT 0x28
+#define MSCC_MAC_STATS_32BIT_RX_SIZE256TO511_CNT 0x29
+#define MSCC_MAC_STATS_32BIT_RX_SIZE512TO1023_CNT 0x2a
+#define MSCC_MAC_STATS_32BIT_RX_SIZE1024TO1518_CNT 0x2b
+#define MSCC_MAC_STATS_32BIT_RX_SIZE1519TOMAX_CNT 0x2c
+#define MSCC_MAC_STATS_32BIT_RX_IPG_SHRINK_CNT 0x2d
+#define MSCC_MAC_STATS_32BIT_TX_PAUSE_CNT 0x2e
+#define MSCC_MAC_STATS_32BIT_TX_UC_CNT 0x2f
+#define MSCC_MAC_STATS_32BIT_TX_MC_CNT 0x30
+#define MSCC_MAC_STATS_32BIT_TX_BC_CNT 0x31
+#define MSCC_MAC_STATS_32BIT_TX_SIZE64_CNT 0x32
+#define MSCC_MAC_STATS_32BIT_TX_SIZE65TO127_CNT 0x33
+#define MSCC_MAC_STATS_32BIT_TX_SIZE128TO255_CNT 0x34
+#define MSCC_MAC_STATS_32BIT_TX_SIZE256TO511_CNT 0x35
+#define MSCC_MAC_STATS_32BIT_TX_SIZE512TO1023_CNT 0x36
+#define MSCC_MAC_STATS_32BIT_TX_SIZE1024TO1518_CNT 0x37
+#define MSCC_MAC_STATS_32BIT_TX_SIZE1519TOMAX_CNT 0x38
+#define MSCC_MAC_STATS_40BIT_RX_BAD_BYTES_CNT 0x39
+#define MSCC_MAC_STATS_40BIT_RX_BAD_BYTES_MSB_CNT 0x3a
+#define MSCC_MAC_STATS_40BIT_RX_OK_BYTES_CNT 0x3b
+#define MSCC_MAC_STATS_40BIT_RX_OK_BYTES_MSB_CNT 0x3c
+#define MSCC_MAC_STATS_40BIT_RX_IN_BYTES_CNT 0x3d
+#define MSCC_MAC_STATS_40BIT_RX_IN_BYTES_MSB_CNT 0x3e
+#define MSCC_MAC_STATS_40BIT_TX_OK_BYTES_CNT 0x3f
+#define MSCC_MAC_STATS_40BIT_TX_OK_BYTES_MSB_CNT 0x40
+#define MSCC_MAC_STATS_40BIT_TX_OUT_BYTES_CNT 0x41
+#define MSCC_MAC_STATS_40BIT_TX_OUT_BYTES_MSB_CNT 0x42
+
+#define MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA BIT(0)
+#define MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA BIT(4)
+#define MSCC_MAC_CFG_ENA_CFG_RX_SW_RST BIT(8)
+#define MSCC_MAC_CFG_ENA_CFG_TX_SW_RST BIT(12)
+#define MSCC_MAC_CFG_ENA_CFG_RX_ENA BIT(16)
+#define MSCC_MAC_CFG_ENA_CFG_TX_ENA BIT(20)
+
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE_INTERVAL(x) ((x) << 20)
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE_INTERVAL_M GENMASK(29, 20)
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE BIT(16)
+#define MSCC_MAC_CFG_MODE_CFG_TUNNEL_PAUSE_FRAMES BIT(14)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_PREAMBLE_CFG(x) ((x) << 10)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_PREAMBLE_CFG_M GENMASK(12, 10)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_IPG_CFG BIT(6)
+#define MSCC_MAC_CFG_MODE_CFG_XGMII_GEN_MODE_ENA BIT(4)
+#define MSCC_MAC_CFG_MODE_CFG_HIH_CRC_CHECK BIT(2)
+#define MSCC_MAC_CFG_MODE_CFG_UNDERSIZED_FRAME_DROP_DIS BIT(1)
+#define MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC BIT(0)
+
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(x) (x)
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M GENMASK(15, 0)
+
+#define MSCC_MAC_CFG_TAGS_CFG_RSZ 0x4
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ID(x) ((x) << 16)
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ENA BIT(4)
+
+#define MSCC_MAC_CFG_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+
+#define MSCC_MAC_CFG_LFS_CFG_LFS_INH_TX BIT(8)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_DIS_TX BIT(4)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_UNIDIR_ENA BIT(3)
+#define MSCC_MAC_CFG_LFS_CFG_USE_LEADING_EDGE_DETECT BIT(2)
+#define MSCC_MAC_CFG_LFS_CFG_SPURIOUS_Q_DIS BIT(1)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA BIT(0)
+
+#define MSCC_MAC_CFG_LB_CFG_XGMII_HOST_LB_ENA BIT(4)
+#define MSCC_MAC_CFG_LB_CFG_XGMII_PHY_LB_ENA BIT(0)
+
+#define MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA BIT(0)
+#define MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA BIT(4)
+#define MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA BIT(8)
+#define MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA BIT(12)
+#define MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA BIT(16)
+#define MSCC_MAC_CFG_PKTINF_CFG_LF_RELAY_ENA BIT(20)
+#define MSCC_MAC_CFG_PKTINF_CFG_RF_RELAY_ENA BIT(24)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING BIT(25)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_RX_PADDING BIT(26)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_4BYTE_PREAMBLE BIT(27)
+#define MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS(x) ((x) << 28)
+#define MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS_M GENMASK(30, 28)
+
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(x) ((x) << 16)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE_M GENMASK(31, 16)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_WAIT_FOR_LPI_LOW BIT(12)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_USE_PAUSE_STALL_ENA BIT(8)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_REPL_MODE BIT(4)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_FRC_FRAME BIT(2)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(x) (x)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M GENMASK(1, 0)
+
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA BIT(16)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PRE_CRC_MODE BIT(20)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA BIT(12)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA BIT(8)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA BIT(4)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE BIT(0)
+
+#define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0)
+#define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4)
+
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x)
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0)
+
+#endif /* _MSCC_OCELOT_LINE_MAC_H_ */
diff --git a/drivers/net/phy/mscc_macsec.h b/drivers/net/phy/mscc_macsec.h
new file mode 100644
index 000000000000..d9ab6aba7482
--- /dev/null
+++ b/drivers/net/phy/mscc_macsec.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_MACSEC_H_
+#define _MSCC_OCELOT_MACSEC_H_
+
+#define MSCC_MS_MAX_FLOWS 16
+
+#define CONTROL_TYPE_EGRESS 0x6
+#define CONTROL_TYPE_INGRESS 0xf
+#define CONTROL_IV0 BIT(5)
+#define CONTROL_IV1 BIT(6)
+#define CONTROL_IV2 BIT(7)
+#define CONTROL_UPDATE_SEQ BIT(13)
+#define CONTROL_IV_IN_SEQ BIT(14)
+#define CONTROL_ENCRYPT_AUTH BIT(15)
+#define CONTROL_KEY_IN_CTX BIT(16)
+#define CONTROL_CRYPTO_ALG(x) ((x) << 17)
+#define CTRYPTO_ALG_AES_CTR_128 0x5
+#define CTRYPTO_ALG_AES_CTR_192 0x6
+#define CTRYPTO_ALG_AES_CTR_256 0x7
+#define CONTROL_DIGEST_TYPE(x) ((x) << 21)
+#define CONTROL_AUTH_ALG(x) ((x) << 23)
+#define AUTH_ALG_AES_GHAS 0x4
+#define CONTROL_AN(x) ((x) << 26)
+#define CONTROL_SEQ_TYPE(x) ((x) << 28)
+#define CONTROL_SEQ_MASK BIT(30)
+#define CONTROL_CONTEXT_ID BIT(31)
+
+enum mscc_macsec_destination_ports {
+ MSCC_MS_PORT_COMMON = 0,
+ MSCC_MS_PORT_RSVD = 1,
+ MSCC_MS_PORT_CONTROLLED = 2,
+ MSCC_MS_PORT_UNCONTROLLED = 3,
+};
+
+enum mscc_macsec_drop_actions {
+ MSCC_MS_ACTION_BYPASS_CRC = 0,
+ MSCC_MS_ACTION_BYPASS_BAD = 1,
+ MSCC_MS_ACTION_DROP = 2,
+ MSCC_MS_ACTION_BYPASS = 3,
+};
+
+enum mscc_macsec_flow_types {
+ MSCC_MS_FLOW_BYPASS = 0,
+ MSCC_MS_FLOW_DROP = 1,
+ MSCC_MS_FLOW_INGRESS = 2,
+ MSCC_MS_FLOW_EGRESS = 3,
+};
+
+enum mscc_macsec_validate_levels {
+ MSCC_MS_VALIDATE_DISABLED = 0,
+ MSCC_MS_VALIDATE_CHECK = 1,
+ MSCC_MS_VALIDATE_STRICT = 2,
+};
+
+#define MSCC_MS_XFORM_REC(x, y) (((x) << 5) + (y))
+#define MSCC_MS_ENA_CFG 0x800
+#define MSCC_MS_FC_CFG 0x804
+#define MSCC_MS_SAM_MAC_SA_MATCH_LO(x) (0x1000 + ((x) << 4))
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI(x) (0x1001 + ((x) << 4))
+#define MSCC_MS_SAM_MISC_MATCH(x) (0x1004 + ((x) << 4))
+#define MSCC_MS_SAM_MATCH_SCI_LO(x) (0x1005 + ((x) << 4))
+#define MSCC_MS_SAM_MATCH_SCI_HI(x) (0x1006 + ((x) << 4))
+#define MSCC_MS_SAM_MASK(x) (0x1007 + ((x) << 4))
+#define MSCC_MS_SAM_ENTRY_SET1 0x1808
+#define MSCC_MS_SAM_ENTRY_CLEAR1 0x180c
+#define MSCC_MS_SAM_FLOW_CTRL(x) (0x1c00 + (x))
+#define MSCC_MS_SAM_CP_TAG 0x1e40
+#define MSCC_MS_SAM_NM_FLOW_NCP 0x1e51
+#define MSCC_MS_SAM_NM_FLOW_CP 0x1e52
+#define MSCC_MS_MISC_CONTROL 0x1e5f
+#define MSCC_MS_COUNT_CONTROL 0x3204
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL 0x3a10
+#define MSCC_MS_PARAMS2_IG_CP_TAG 0x3a14
+#define MSCC_MS_VLAN_MTU_CHECK(x) (0x3c40 + (x))
+#define MSCC_MS_NON_VLAN_MTU_CHECK 0x3c48
+#define MSCC_MS_PP_CTRL 0x3c4b
+#define MSCC_MS_STATUS_CONTEXT_CTRL 0x3d02
+#define MSCC_MS_INTR_CTRL_STATUS 0x3d04
+#define MSCC_MS_BLOCK_CTX_UPDATE 0x3d0c
+#define MSCC_MS_AIC_CTRL 0x3e02
+
+/* MACSEC_ENA_CFG */
+#define MSCC_MS_ENA_CFG_CLK_ENA BIT(0)
+#define MSCC_MS_ENA_CFG_SW_RST BIT(1)
+#define MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA BIT(8)
+#define MSCC_MS_ENA_CFG_MACSEC_ENA BIT(9)
+#define MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(x) ((x) << 10)
+#define MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE_M GENMASK(12, 10)
+
+/* MACSEC_FC_CFG */
+#define MSCC_MS_FC_CFG_FCBUF_ENA BIT(0)
+#define MSCC_MS_FC_CFG_USE_PKT_EXPANSION_INDICATION BIT(1)
+#define MSCC_MS_FC_CFG_LOW_THRESH(x) ((x) << 4)
+#define MSCC_MS_FC_CFG_LOW_THRESH_M GENMASK(7, 4)
+#define MSCC_MS_FC_CFG_HIGH_THRESH(x) ((x) << 8)
+#define MSCC_MS_FC_CFG_HIGH_THRESH_M GENMASK(11, 8)
+#define MSCC_MS_FC_CFG_LOW_BYTES_VAL(x) ((x) << 12)
+#define MSCC_MS_FC_CFG_LOW_BYTES_VAL_M GENMASK(14, 12)
+#define MSCC_MS_FC_CFG_HIGH_BYTES_VAL(x) ((x) << 16)
+#define MSCC_MS_FC_CFG_HIGH_BYTES_VAL_M GENMASK(18, 16)
+
+/* MSCC_MS_SAM_MAC_SA_MATCH_HI */
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE_M GENMASK(31, 16)
+
+/* MACSEC_SAM_MISC_MATCH */
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_VALID BIT(0)
+#define MSCC_MS_SAM_MISC_MATCH_QINQ_FOUND BIT(1)
+#define MSCC_MS_SAM_MISC_MATCH_STAG_VALID BIT(2)
+#define MSCC_MS_SAM_MISC_MATCH_QTAG_VALID BIT(3)
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_UP(x) ((x) << 4)
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_UP_M GENMASK(6, 4)
+#define MSCC_MS_SAM_MISC_MATCH_CONTROL_PACKET BIT(7)
+#define MSCC_MS_SAM_MISC_MATCH_UNTAGGED BIT(8)
+#define MSCC_MS_SAM_MISC_MATCH_TAGGED BIT(9)
+#define MSCC_MS_SAM_MISC_MATCH_BAD_TAG BIT(10)
+#define MSCC_MS_SAM_MISC_MATCH_KAY_TAG BIT(11)
+#define MSCC_MS_SAM_MISC_MATCH_SOURCE_PORT(x) ((x) << 12)
+#define MSCC_MS_SAM_MISC_MATCH_SOURCE_PORT_M GENMASK(13, 12)
+#define MSCC_MS_SAM_MISC_MATCH_PRIORITY(x) ((x) << 16)
+#define MSCC_MS_SAM_MISC_MATCH_PRIORITY_M GENMASK(19, 16)
+#define MSCC_MS_SAM_MISC_MATCH_AN(x) ((x) << 24)
+#define MSCC_MS_SAM_MISC_MATCH_TCI(x) ((x) << 26)
+
+/* MACSEC_SAM_MASK */
+#define MSCC_MS_SAM_MASK_MAC_SA_MASK(x) (x)
+#define MSCC_MS_SAM_MASK_MAC_SA_MASK_M GENMASK(5, 0)
+#define MSCC_MS_SAM_MASK_MAC_DA_MASK(x) ((x) << 6)
+#define MSCC_MS_SAM_MASK_MAC_DA_MASK_M GENMASK(11, 6)
+#define MSCC_MS_SAM_MASK_MAC_ETYPE_MASK BIT(12)
+#define MSCC_MS_SAM_MASK_VLAN_VLD_MASK BIT(13)
+#define MSCC_MS_SAM_MASK_QINQ_FOUND_MASK BIT(14)
+#define MSCC_MS_SAM_MASK_STAG_VLD_MASK BIT(15)
+#define MSCC_MS_SAM_MASK_QTAG_VLD_MASK BIT(16)
+#define MSCC_MS_SAM_MASK_VLAN_UP_MASK BIT(17)
+#define MSCC_MS_SAM_MASK_VLAN_ID_MASK BIT(18)
+#define MSCC_MS_SAM_MASK_SOURCE_PORT_MASK BIT(19)
+#define MSCC_MS_SAM_MASK_CTL_PACKET_MASK BIT(20)
+#define MSCC_MS_SAM_MASK_VLAN_UP_INNER_MASK BIT(21)
+#define MSCC_MS_SAM_MASK_VLAN_ID_INNER_MASK BIT(22)
+#define MSCC_MS_SAM_MASK_SCI_MASK BIT(23)
+#define MSCC_MS_SAM_MASK_AN_MASK(x) ((x) << 24)
+#define MSCC_MS_SAM_MASK_TCI_MASK(x) ((x) << 26)
+
+/* MACSEC_SAM_FLOW_CTRL_EGR */
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE_M GENMASK(1, 0)
+#define MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_FLOW_CTRL_DEST_PORT_M GENMASK(3, 2)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_4 BIT(4)
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_CRYPT_AUTH BIT(5)
+#define MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION_M GENMASK(7, 6)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_15_TO_8(x) ((x) << 8)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_15_TO_8_M GENMASK(15, 8)
+#define MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME BIT(16)
+#define MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT BIT(16)
+#define MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE BIT(17)
+#define MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI BIT(18)
+#define MSCC_MS_SAM_FLOW_CTRL_USE_ES BIT(19)
+#define MSCC_MS_SAM_FLOW_CTRL_USE_SCB BIT(20)
+#define MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(x) ((x) << 19)
+#define MSCC_MS_SAM_FLOW_CTRL_TAG_BYPASS_SIZE(x) ((x) << 21)
+#define MSCC_MS_SAM_FLOW_CTRL_TAG_BYPASS_SIZE_M GENMASK(22, 21)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_23 BIT(23)
+#define MSCC_MS_SAM_FLOW_CTRL_CONFIDENTIALITY_OFFSET(x) ((x) << 24)
+#define MSCC_MS_SAM_FLOW_CTRL_CONFIDENTIALITY_OFFSET_M GENMASK(30, 24)
+#define MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT BIT(31)
+
+/* MACSEC_SAM_CP_TAG */
+#define MSCC_MS_SAM_CP_TAG_MAP_TBL(x) (x)
+#define MSCC_MS_SAM_CP_TAG_MAP_TBL_M GENMASK(23, 0)
+#define MSCC_MS_SAM_CP_TAG_DEF_UP(x) ((x) << 24)
+#define MSCC_MS_SAM_CP_TAG_DEF_UP_M GENMASK(26, 24)
+#define MSCC_MS_SAM_CP_TAG_STAG_UP_EN BIT(27)
+#define MSCC_MS_SAM_CP_TAG_QTAG_UP_EN BIT(28)
+#define MSCC_MS_SAM_CP_TAG_PARSE_QINQ BIT(29)
+#define MSCC_MS_SAM_CP_TAG_PARSE_STAG BIT(30)
+#define MSCC_MS_SAM_CP_TAG_PARSE_QTAG BIT(31)
+
+/* MACSEC_SAM_NM_FLOW_NCP */
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(x) ((x) << 8)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(x) ((x) << 10)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(x) ((x) << 14)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(x) ((x) << 18)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(x) ((x) << 22)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(x) ((x) << 24)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(x) ((x) << 26)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(x) ((x) << 30)
+
+/* MACSEC_SAM_NM_FLOW_CP */
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_FLOW_TYPE(x) ((x) << 8)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(x) ((x) << 10)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(x) ((x) << 14)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_FLOW_TYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(x) ((x) << 18)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(x) ((x) << 22)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_FLOW_TYPE(x) ((x) << 24)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(x) ((x) << 26)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(x) ((x) << 30)
+
+/* MACSEC_MISC_CONTROL */
+#define MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(x) (x)
+#define MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX_M GENMASK(5, 0)
+#define MSCC_MS_MISC_CONTROL_STATIC_BYPASS BIT(8)
+#define MSCC_MS_MISC_CONTROL_NM_MACSEC_EN BIT(9)
+#define MSCC_MS_MISC_CONTROL_VALIDATE_FRAMES(x) ((x) << 10)
+#define MSCC_MS_MISC_CONTROL_VALIDATE_FRAMES_M GENMASK(11, 10)
+#define MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(x) ((x) << 24)
+#define MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE_M GENMASK(25, 24)
+
+/* MACSEC_COUNT_CONTROL */
+#define MSCC_MS_COUNT_CONTROL_RESET_ALL BIT(0)
+#define MSCC_MS_COUNT_CONTROL_DEBUG_ACCESS BIT(1)
+#define MSCC_MS_COUNT_CONTROL_SATURATE_CNTRS BIT(2)
+#define MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET BIT(3)
+
+/* MACSEC_PARAMS2_IG_CC_CONTROL */
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT BIT(14)
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT BIT(15)
+
+/* MACSEC_PARAMS2_IG_CP_TAG */
+#define MSCC_MS_PARAMS2_IG_CP_TAG_MAP_TBL(x) (x)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_MAP_TBL_M GENMASK(23, 0)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_DEF_UP(x) ((x) << 24)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_DEF_UP_M GENMASK(26, 24)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_STAG_UP_EN BIT(27)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_QTAG_UP_EN BIT(28)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ BIT(29)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG BIT(30)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG BIT(31)
+
+/* MACSEC_VLAN_MTU_CHECK */
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(x) (x)
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE_M GENMASK(14, 0)
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP BIT(15)
+
+/* MACSEC_NON_VLAN_MTU_CHECK */
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(x) (x)
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE_M GENMASK(14, 0)
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP BIT(15)
+
+/* MACSEC_PP_CTRL */
+#define MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE BIT(0)
+
+/* MACSEC_INTR_CTRL_STATUS */
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS(x) (x)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M GENMASK(15, 0)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(x) ((x) << 16)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M GENMASK(31, 16)
+#define MACSEC_INTR_CTRL_STATUS_ROLLOVER BIT(5)
+
+#endif
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 769a076514b0..a4d2d59fceca 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -387,7 +387,7 @@ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv->read_mmd) {
+ if (phydev->drv && phydev->drv->read_mmd) {
val = phydev->drv->read_mmd(phydev, devad, regnum);
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
@@ -444,7 +444,7 @@ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv->write_mmd) {
+ if (phydev->drv && phydev->drv->write_mmd) {
ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 80be4d691e5b..d76e038cf2cb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -422,8 +422,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
return 0;
case SIOCSHWTSTAMP:
- if (phydev->drv && phydev->drv->hwtstamp)
- return phydev->drv->hwtstamp(phydev, ifr);
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
/* fall through */
default:
@@ -432,6 +432,31 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
+/**
+ * phy_do_ioctl - generic ndo_do_ioctl implementation
+ * @dev: the net_device struct
+ * @ifr: &struct ifreq for socket ioctl's
+ * @cmd: ioctl cmd to execute
+ */
+int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ if (!dev->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+EXPORT_SYMBOL(phy_do_ioctl);
+
+/* same as phy_do_ioctl, but ensures that net_device is running */
+int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(dev))
+ return -ENODEV;
+
+ return phy_do_ioctl(dev, ifr, cmd);
+}
+EXPORT_SYMBOL(phy_do_ioctl_running);
+
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b13c52873ef5..6a5056e0ae77 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -881,6 +881,9 @@ EXPORT_SYMBOL(phy_device_register);
*/
void phy_device_remove(struct phy_device *phydev)
{
+ if (phydev->mii_ts)
+ unregister_mii_timestamper(phydev->mii_ts);
+
device_del(&phydev->mdio.dev);
/* Assert the reset signal */
@@ -919,6 +922,8 @@ static void phy_link_change(struct phy_device *phydev, bool up, bool do_carrier)
netif_carrier_off(netdev);
}
phydev->adjust_link(netdev);
+ if (phydev->mii_ts && phydev->mii_ts->link_state)
+ phydev->mii_ts->link_state(phydev->mii_ts, phydev);
}
/**
@@ -1102,9 +1107,8 @@ void phy_attached_info(struct phy_device *phydev)
EXPORT_SYMBOL(phy_attached_info);
#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%s)"
-void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
+char *phy_attached_info_irq(struct phy_device *phydev)
{
- const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
char *irq_str;
char irq_num[8];
@@ -1121,6 +1125,14 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
break;
}
+ return kasprintf(GFP_KERNEL, "%s", irq_str);
+}
+EXPORT_SYMBOL(phy_attached_info_irq);
+
+void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
+{
+ const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+ char *irq_str = phy_attached_info_irq(phydev);
if (!fmt) {
phydev_info(phydev, ATTACHED_FMT "\n",
@@ -1137,6 +1149,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
vprintk(fmt, ap);
va_end(ap);
}
+ kfree(irq_str);
}
EXPORT_SYMBOL(phy_attached_print);
@@ -1771,6 +1784,36 @@ int genphy_restart_aneg(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_restart_aneg);
/**
+ * genphy_check_and_restart_aneg - Enable and restart auto-negotiation
+ * @phydev: target phy_device struct
+ * @restart: whether aneg restart is requested
+ *
+ * Check, and restart auto-negotiation if needed.
+ */
+int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart)
+{
+ int ret = 0;
+
+ if (!restart) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated?
+ */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & BMCR_ANENABLE) || (ret & BMCR_ISOLATE))
+ restart = true;
+ }
+
+ if (restart)
+ ret = genphy_restart_aneg(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL(genphy_check_and_restart_aneg);
+
+/**
* __genphy_config_aneg - restart auto-negotiation or write BMCR
* @phydev: target phy_device struct
* @changed: whether autoneg is requested
@@ -1795,23 +1838,7 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
else if (err)
changed = true;
- if (!changed) {
- /* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated?
- */
- int ctl = phy_read(phydev, MII_BMCR);
-
- if (ctl < 0)
- return ctl;
-
- if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- changed = true; /* do restart aneg */
- }
-
- /* Only restart aneg if we are advertising something different
- * than we were before.
- */
- return changed ? genphy_restart_aneg(phydev) : 0;
+ return genphy_check_and_restart_aneg(phydev, changed);
}
EXPORT_SYMBOL(__genphy_config_aneg);
@@ -1979,6 +2006,36 @@ int genphy_read_lpa(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_read_lpa);
/**
+ * genphy_read_status_fixed - read the link parameters for !aneg mode
+ * @phydev: target phy_device struct
+ *
+ * Read the current duplex and speed state for a PHY operating with
+ * autonegotiation disabled.
+ */
+int genphy_read_status_fixed(struct phy_device *phydev)
+{
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_read_status_fixed);
+
+/**
* genphy_read_status - check the link status and update current link state
* @phydev: target phy_device struct
*
@@ -2012,22 +2069,9 @@ int genphy_read_status(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
phy_resolve_aneg_linkmode(phydev);
} else if (phydev->autoneg == AUTONEG_DISABLE) {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (bmcr & BMCR_SPEED1000)
- phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
- phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ err = genphy_read_status_fixed(phydev);
+ if (err < 0)
+ return err;
}
return 0;
@@ -2575,7 +2619,6 @@ static struct phy_driver genphy_driver = {
.name = "Generic PHY",
.soft_reset = genphy_no_soft_reset,
.get_features = genphy_read_abilities,
- .aneg_done = genphy_aneg_done,
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ee7a718662c6..70b9a143db84 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -48,7 +48,8 @@ struct phylink {
unsigned long phylink_disable_state; /* bitmask of disables */
struct phy_device *phydev;
phy_interface_t link_interface; /* PHY_INTERFACE_xxx */
- u8 link_an_mode; /* MLO_AN_xxx */
+ u8 cfg_link_an_mode; /* MLO_AN_xxx */
+ u8 cur_link_an_mode;
u8 link_port; /* The current non-phy ethtool port */
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
@@ -71,6 +72,9 @@ struct phylink {
bool mac_link_dropped;
struct sfp_bus *sfp_bus;
+ bool sfp_may_have_phy;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ u8 sfp_port;
};
#define phylink_printk(level, pl, fmt, ...) \
@@ -182,8 +186,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
pl->link_config.pause |= MLO_PAUSE_ASYM;
if (ret == 0) {
- desc = fwnode_get_named_gpiod(fixed_node, "link-gpios",
- 0, GPIOD_IN, "?");
+ desc = fwnode_gpiod_get_index(fixed_node, "link", 0,
+ GPIOD_IN, "?");
if (!IS_ERR(desc))
pl->link_gpio = desc;
@@ -256,12 +260,12 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
dn = fwnode_get_named_child_node(fwnode, "fixed-link");
if (dn || fwnode_property_present(fwnode, "fixed-link"))
- pl->link_an_mode = MLO_AN_FIXED;
+ pl->cfg_link_an_mode = MLO_AN_FIXED;
fwnode_handle_put(dn);
if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
- if (pl->link_an_mode == MLO_AN_FIXED) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
phylink_err(pl,
"can't use both fixed-link and in-band-status\n");
return -EINVAL;
@@ -273,10 +277,11 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, Asym_Pause);
phylink_set(pl->supported, Pause);
pl->link_config.an_enabled = true;
- pl->link_an_mode = MLO_AN_INBAND;
+ pl->cfg_link_an_mode = MLO_AN_INBAND;
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -293,7 +298,9 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 2500baseX_Full);
break;
+ case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -301,6 +308,10 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 1000baseT_Half);
phylink_set(pl->supported, 1000baseT_Full);
phylink_set(pl->supported, 1000baseX_Full);
+ phylink_set(pl->supported, 2500baseT_Full);
+ phylink_set(pl->supported, 2500baseX_Full);
+ phylink_set(pl->supported, 5000baseT_Full);
+ phylink_set(pl->supported, 10000baseT_Full);
phylink_set(pl->supported, 10000baseKR_Full);
phylink_set(pl->supported, 10000baseCR_Full);
phylink_set(pl->supported, 10000baseSR_Full);
@@ -333,14 +344,14 @@ static void phylink_mac_config(struct phylink *pl,
{
phylink_dbg(pl,
"%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
- __func__, phylink_an_mode_str(pl->link_an_mode),
+ __func__, phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(state->interface),
phy_speed_to_str(state->speed),
phy_duplex_to_str(state->duplex),
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
- pl->ops->mac_config(pl->config, pl->link_an_mode, state);
+ pl->ops->mac_config(pl->config, pl->cur_link_an_mode, state);
}
static void phylink_mac_config_up(struct phylink *pl,
@@ -441,7 +452,7 @@ static void phylink_mac_link_up(struct phylink *pl,
struct net_device *ndev = pl->netdev;
pl->cur_interface = link_state.interface;
- pl->ops->mac_link_up(pl->config, pl->link_an_mode,
+ pl->ops->mac_link_up(pl->config, pl->cur_link_an_mode,
pl->cur_interface, pl->phydev);
if (ndev)
@@ -460,7 +471,7 @@ static void phylink_mac_link_down(struct phylink *pl)
if (ndev)
netif_carrier_off(ndev);
- pl->ops->mac_link_down(pl->config, pl->link_an_mode,
+ pl->ops->mac_link_down(pl->config, pl->cur_link_an_mode,
pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
@@ -479,7 +490,7 @@ static void phylink_resolve(struct work_struct *w)
} else if (pl->mac_link_dropped) {
link_state.link = false;
} else {
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
phylink_resolve_flow(pl, &link_state);
@@ -650,7 +661,7 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(ret);
}
- if (pl->link_an_mode == MLO_AN_FIXED) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
ret = phylink_parse_fixedlink(pl, fwnode);
if (ret < 0) {
kfree(pl);
@@ -658,6 +669,8 @@ struct phylink *phylink_create(struct phylink_config *config,
}
}
+ pl->cur_link_an_mode = pl->cfg_link_an_mode;
+
ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
kfree(pl);
@@ -713,10 +726,12 @@ static void phylink_phy_change(struct phy_device *phydev, bool up,
phy_duplex_to_str(phydev->duplex));
}
-static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
+static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
{
struct phylink_link_state config;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ char *irq_str;
int ret;
/*
@@ -731,7 +746,19 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
memset(&config, 0, sizeof(config));
linkmode_copy(supported, phy->supported);
linkmode_copy(config.advertising, phy->advertising);
- config.interface = pl->link_config.interface;
+
+ /* Clause 45 PHYs switch their Serdes lane between several different
+ * modes, normally 10GBASE-R, SGMII. Some use 2500BASE-X for 2.5G
+ * speeds. We really need to know which interface modes the PHY and
+ * MAC supports to properly work out which linkmodes can be supported.
+ */
+ if (phy->is_c45 &&
+ interface != PHY_INTERFACE_MODE_RXAUI &&
+ interface != PHY_INTERFACE_MODE_XAUI &&
+ interface != PHY_INTERFACE_MODE_USXGMII)
+ config.interface = PHY_INTERFACE_MODE_NA;
+ else
+ config.interface = interface;
ret = phylink_validate(pl, supported, &config);
if (ret)
@@ -740,13 +767,16 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
phy->phylink = pl;
phy->phy_link_change = phylink_phy_change;
+ irq_str = phy_attached_info_irq(phy);
phylink_info(pl,
- "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev),
- phy->drv->name);
+ "PHY [%s] driver [%s] (irq=%s)\n",
+ dev_name(&phy->mdio.dev), phy->drv->name, irq_str);
+ kfree(irq_str);
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = phy;
+ pl->phy_state.interface = interface;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
@@ -766,28 +796,18 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
return 0;
}
-static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
- phy_interface_t interface)
+static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
{
- int ret;
-
- if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
+ if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
+ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
phy_interface_mode_is_8023z(interface))))
return -EINVAL;
if (pl->phydev)
return -EBUSY;
- ret = phy_attach_direct(pl->netdev, phy, 0, interface);
- if (ret)
- return ret;
-
- ret = phylink_bringup_phy(pl, phy);
- if (ret)
- phy_detach(phy);
-
- return ret;
+ return phy_attach_direct(pl->netdev, phy, 0, interface);
}
/**
@@ -807,13 +827,23 @@ static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
*/
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
{
+ int ret;
+
/* Use PHY device/driver interface */
if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
pl->link_interface = phy->interface;
pl->link_config.interface = pl->link_interface;
}
- return __phylink_connect_phy(pl, phy, pl->link_interface);
+ ret = phylink_attach_phy(pl, phy, pl->link_interface);
+ if (ret < 0)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy, pl->link_config.interface);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(phylink_connect_phy);
@@ -837,8 +867,8 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
int ret;
/* Fixed links and 802.3z are handled without needing a PHY */
- if (pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
+ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
phy_interface_mode_is_8023z(pl->link_interface)))
return 0;
@@ -849,20 +879,23 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
phy_node = of_parse_phandle(dn, "phy-device", 0);
if (!phy_node) {
- if (pl->link_an_mode == MLO_AN_PHY)
+ if (pl->cfg_link_an_mode == MLO_AN_PHY)
return -ENODEV;
return 0;
}
- phy_dev = of_phy_attach(pl->netdev, phy_node, flags,
- pl->link_interface);
+ phy_dev = of_phy_find_device(phy_node);
/* We're done with the phy_node handle */
of_node_put(phy_node);
-
if (!phy_dev)
return -ENODEV;
- ret = phylink_bringup_phy(pl, phy_dev);
+ ret = phy_attach_direct(pl->netdev, phy_dev, flags,
+ pl->link_interface);
+ if (ret)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
phy_detach(phy_dev);
@@ -912,7 +945,7 @@ int phylink_fixed_state_cb(struct phylink *pl,
/* It does not make sense to let the link be overriden unless we use
* MLO_AN_FIXED
*/
- if (pl->link_an_mode != MLO_AN_FIXED)
+ if (pl->cfg_link_an_mode != MLO_AN_FIXED)
return -EINVAL;
mutex_lock(&pl->state_mutex);
@@ -962,7 +995,7 @@ void phylink_start(struct phylink *pl)
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->link_an_mode),
+ phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(pl->link_config.interface));
/* Always set the carrier off */
@@ -985,7 +1018,7 @@ void phylink_start(struct phylink *pl)
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
- if (pl->link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
int irq = gpiod_to_irq(pl->link_gpio);
if (irq > 0) {
@@ -1000,7 +1033,8 @@ void phylink_start(struct phylink *pl)
if (irq <= 0)
mod_timer(&pl->link_poll, jiffies + HZ);
}
- if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
+ if ((pl->cfg_link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) ||
+ pl->config->pcs_poll)
mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->phydev)
phy_start(pl->phydev);
@@ -1127,7 +1161,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
linkmode_copy(kset->link_modes.supported, pl->supported);
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
/* We are using fixed settings. Report these as the
* current link settings - and note that these also
@@ -1199,7 +1233,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* If we have a fixed link (as specified by firmware), refuse
* to change link parameters.
*/
- if (pl->link_an_mode == MLO_AN_FIXED &&
+ if (pl->cur_link_an_mode == MLO_AN_FIXED &&
(s->speed != pl->link_config.speed ||
s->duplex != pl->link_config.duplex))
return -EINVAL;
@@ -1211,7 +1245,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
} else {
/* If we have a fixed link, refuse to enable autonegotiation */
- if (pl->link_an_mode == MLO_AN_FIXED)
+ if (pl->cur_link_an_mode == MLO_AN_FIXED)
return -EINVAL;
config.speed = SPEED_UNKNOWN;
@@ -1221,44 +1255,66 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
}
- if (phylink_validate(pl, support, &config))
- return -EINVAL;
-
- /* If autonegotiation is enabled, we must have an advertisement */
- if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
- return -EINVAL;
-
- our_kset = *kset;
- linkmode_copy(our_kset.link_modes.advertising, config.advertising);
- our_kset.base.speed = config.speed;
- our_kset.base.duplex = config.duplex;
-
- /* If we have a PHY, configure the phy */
if (pl->phydev) {
+ /* If we have a PHY, we process the kset change via phylib.
+ * phylib will call our link state function if the PHY
+ * parameters have changed, which will trigger a resolve
+ * and update the MAC configuration.
+ */
+ our_kset = *kset;
+ linkmode_copy(our_kset.link_modes.advertising,
+ config.advertising);
+ our_kset.base.speed = config.speed;
+ our_kset.base.duplex = config.duplex;
+
ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset);
if (ret)
return ret;
- }
- mutex_lock(&pl->state_mutex);
- /* Configure the MAC to match the new settings */
- linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
- pl->link_config.interface = config.interface;
- pl->link_config.speed = our_kset.base.speed;
- pl->link_config.duplex = our_kset.base.duplex;
- pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
+ mutex_lock(&pl->state_mutex);
+ /* Save the new configuration */
+ linkmode_copy(pl->link_config.advertising,
+ our_kset.link_modes.advertising);
+ pl->link_config.interface = config.interface;
+ pl->link_config.speed = our_kset.base.speed;
+ pl->link_config.duplex = our_kset.base.duplex;
+ pl->link_config.an_enabled = our_kset.base.autoneg !=
+ AUTONEG_DISABLE;
+ mutex_unlock(&pl->state_mutex);
+ } else {
+ /* For a fixed link, this isn't able to change any parameters,
+ * which just leaves inband mode.
+ */
+ if (phylink_validate(pl, support, &config))
+ return -EINVAL;
- /* If we have a PHY, phylib will call our link state function if the
- * mode has changed, which will trigger a resolve and update the MAC
- * configuration. For a fixed link, this isn't able to change any
- * parameters, which just leaves inband mode.
- */
- if (pl->link_an_mode == MLO_AN_INBAND &&
- !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
- phylink_mac_config(pl, &pl->link_config);
- phylink_mac_an_restart(pl);
+ /* If autonegotiation is enabled, we must have an advertisement */
+ if (config.an_enabled &&
+ phylink_is_empty_linkmode(config.advertising))
+ return -EINVAL;
+
+ mutex_lock(&pl->state_mutex);
+ linkmode_copy(pl->link_config.advertising, config.advertising);
+ pl->link_config.interface = config.interface;
+ pl->link_config.speed = config.speed;
+ pl->link_config.duplex = config.duplex;
+ pl->link_config.an_enabled = kset->base.autoneg !=
+ AUTONEG_DISABLE;
+
+ if (pl->cur_link_an_mode == MLO_AN_INBAND &&
+ !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state)) {
+ /* If in 802.3z mode, this updates the advertisement.
+ *
+ * If we are in SGMII mode without a PHY, there is no
+ * advertisement; the only thing we have is the pause
+ * modes which can only come from a PHY.
+ */
+ phylink_mac_config(pl, &pl->link_config);
+ phylink_mac_an_restart(pl);
+ }
+ mutex_unlock(&pl->state_mutex);
}
- mutex_unlock(&pl->state_mutex);
return 0;
}
@@ -1343,7 +1399,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
pause->tx_pause);
} else if (!test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state)) {
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
/* Should we allow fixed links to change against the config? */
phylink_resolve_flow(pl, config);
@@ -1550,7 +1606,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
struct phylink_link_state state;
int val = 0xffff;
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
@@ -1575,7 +1631,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
unsigned int reg, unsigned int val)
{
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
break;
@@ -1681,25 +1737,21 @@ static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
pl->netdev->sfp_bus = NULL;
}
-static int phylink_sfp_module_insert(void *upstream,
- const struct sfp_eeprom_id *id)
+static int phylink_sfp_config(struct phylink *pl, u8 mode,
+ const unsigned long *supported,
+ const unsigned long *advertising)
{
- struct phylink *pl = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
phy_interface_t iface;
- int ret = 0;
bool changed;
- u8 port;
-
- ASSERT_RTNL();
+ int ret;
- sfp_parse_support(pl->sfp_bus, id, support);
- port = sfp_parse_port(pl->sfp_bus, id, support);
+ linkmode_copy(support, supported);
memset(&config, 0, sizeof(config));
- linkmode_copy(config.advertising, support);
+ linkmode_copy(config.advertising, advertising);
config.interface = PHY_INTERFACE_MODE_NA;
config.speed = SPEED_UNKNOWN;
config.duplex = DUPLEX_UNKNOWN;
@@ -1714,9 +1766,7 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
- linkmode_copy(support1, support);
-
- iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
+ iface = sfp_select_interface(pl->sfp_bus, config.advertising);
if (iface == PHY_INTERFACE_MODE_NA) {
phylink_err(pl,
"selection of interface failed, advertisement %*pb\n",
@@ -1725,18 +1775,18 @@ static int phylink_sfp_module_insert(void *upstream,
}
config.interface = iface;
+ linkmode_copy(support1, support);
ret = phylink_validate(pl, support1, &config);
if (ret) {
phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n",
- phylink_an_mode_str(MLO_AN_INBAND),
+ phylink_an_mode_str(mode),
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface),
+ phylink_an_mode_str(mode), phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
@@ -1748,19 +1798,19 @@ static int phylink_sfp_module_insert(void *upstream,
linkmode_copy(pl->link_config.advertising, config.advertising);
}
- if (pl->link_an_mode != MLO_AN_INBAND ||
+ if (pl->cur_link_an_mode != mode ||
pl->link_config.interface != config.interface) {
pl->link_config.interface = config.interface;
- pl->link_an_mode = MLO_AN_INBAND;
+ pl->cur_link_an_mode = mode;
changed = true;
phylink_info(pl, "switched to %s/%s link mode\n",
- phylink_an_mode_str(MLO_AN_INBAND),
+ phylink_an_mode_str(mode),
phy_modes(config.interface));
}
- pl->link_port = port;
+ pl->link_port = pl->sfp_port;
if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state))
@@ -1769,6 +1819,55 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
+static int phylink_sfp_module_insert(void *upstream,
+ const struct sfp_eeprom_id *id)
+{
+ struct phylink *pl = upstream;
+ unsigned long *support = pl->sfp_support;
+
+ ASSERT_RTNL();
+
+ linkmode_zero(support);
+ sfp_parse_support(pl->sfp_bus, id, support);
+ pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, support);
+
+ /* If this module may have a PHY connecting later, defer until later */
+ pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
+ if (pl->sfp_may_have_phy)
+ return 0;
+
+ return phylink_sfp_config(pl, MLO_AN_INBAND, support, support);
+}
+
+static int phylink_sfp_module_start(void *upstream)
+{
+ struct phylink *pl = upstream;
+
+ /* If this SFP module has a PHY, start the PHY now. */
+ if (pl->phydev) {
+ phy_start(pl->phydev);
+ return 0;
+ }
+
+ /* If the module may have a PHY but we didn't detect one we
+ * need to configure the MAC here.
+ */
+ if (!pl->sfp_may_have_phy)
+ return 0;
+
+ return phylink_sfp_config(pl, MLO_AN_INBAND,
+ pl->sfp_support, pl->sfp_support);
+}
+
+static void phylink_sfp_module_stop(void *upstream)
+{
+ struct phylink *pl = upstream;
+
+ /* If this SFP module has a PHY, stop it. */
+ if (pl->phydev)
+ phy_stop(pl->phydev);
+}
+
static void phylink_sfp_link_down(void *upstream)
{
struct phylink *pl = upstream;
@@ -1788,11 +1887,51 @@ static void phylink_sfp_link_up(void *upstream)
phylink_run_resolve(pl);
}
+/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
+ * or 802.3z control word, so inband will not work.
+ */
+static bool phylink_phy_no_inband(struct phy_device *phy)
+{
+ return phy->is_c45 &&
+ (phy->c45_ids.device_ids[1] & 0xfffffff0) == 0xae025150;
+}
+
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
struct phylink *pl = upstream;
+ phy_interface_t interface;
+ u8 mode;
+ int ret;
- return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
+ /*
+ * This is the new way of dealing with flow control for PHYs,
+ * as described by Timur Tabi in commit 529ed1275263 ("net: phy:
+ * phy drivers should not set SUPPORTED_[Asym_]Pause") except
+ * using our validate call to the MAC, we rely upon the MAC
+ * clearing the bits from both supported and advertising fields.
+ */
+ phy_support_asym_pause(phy);
+
+ if (phylink_phy_no_inband(phy))
+ mode = MLO_AN_PHY;
+ else
+ mode = MLO_AN_INBAND;
+
+ /* Do the initial configuration */
+ ret = phylink_sfp_config(pl, mode, phy->supported, phy->advertising);
+ if (ret < 0)
+ return ret;
+
+ interface = pl->link_config.interface;
+ ret = phylink_attach_phy(pl, phy, interface);
+ if (ret < 0)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy, interface);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
}
static void phylink_sfp_disconnect_phy(void *upstream)
@@ -1804,6 +1943,8 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
.attach = phylink_sfp_attach,
.detach = phylink_sfp_detach,
.module_insert = phylink_sfp_module_insert,
+ .module_start = phylink_sfp_module_start,
+ .module_stop = phylink_sfp_module_stop,
.link_up = phylink_sfp_link_up,
.link_down = phylink_sfp_link_down,
.connect_phy = phylink_sfp_connect_phy,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 476db5345e1a..f5fa2fff3ddc 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -29,6 +29,8 @@
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
+#define RTL8211F_RX_DELAY BIT(3)
+
#define RTL8211E_TX_DELAY BIT(1)
#define RTL8211E_RX_DELAY BIT(2)
#define RTL8211E_MODE_MII_GMII BIT(3)
@@ -171,25 +173,66 @@ static int rtl8211c_config_init(struct phy_device *phydev)
static int rtl8211f_config_init(struct phy_device *phydev)
{
- u16 val;
+ struct device *dev = &phydev->mdio.dev;
+ u16 val_txdly, val_rxdly;
+ int ret;
- /* enable TX-delay for rgmii-{id,txid}, and disable it for rgmii and
- * rgmii-rxid. The RX-delay can be enabled by the external RXDLY pin.
- */
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
+ val_txdly = 0;
+ val_rxdly = 0;
+ break;
+
case PHY_INTERFACE_MODE_RGMII_RXID:
- val = 0;
+ val_txdly = 0;
+ val_rxdly = RTL8211F_RX_DELAY;
break;
- case PHY_INTERFACE_MODE_RGMII_ID:
+
case PHY_INTERFACE_MODE_RGMII_TXID:
- val = RTL8211F_TX_DELAY;
+ val_txdly = RTL8211F_TX_DELAY;
+ val_rxdly = 0;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val_txdly = RTL8211F_TX_DELAY;
+ val_rxdly = RTL8211F_RX_DELAY;
break;
+
default: /* the rest of the modes imply leaving delay as is. */
return 0;
}
- return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
+ ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY,
+ val_txdly);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update the TX delay register\n");
+ return ret;
+ } else if (ret) {
+ dev_dbg(dev,
+ "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
+ val_txdly ? "Enabling" : "Disabling");
+ } else {
+ dev_dbg(dev,
+ "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
+ val_txdly ? "enabled" : "disabled");
+ }
+
+ ret = phy_modify_paged_changed(phydev, 0xd08, 0x15, RTL8211F_RX_DELAY,
+ val_rxdly);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update the RX delay register\n");
+ return ret;
+ } else if (ret) {
+ dev_dbg(dev,
+ "%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n",
+ val_rxdly ? "Enabling" : "Disabling");
+ } else {
+ dev_dbg(dev,
+ "2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n",
+ val_rxdly ? "enabled" : "disabled");
+ }
+
+ return 0;
}
static int rtl8211e_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 5a72093ab6e7..d949ea7b4f8c 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -103,6 +103,7 @@ static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
return NULL;
}
+
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -124,35 +125,35 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
/* port is the physical connector, set this from the connector field. */
switch (id->base.connector) {
- case SFP_CONNECTOR_SC:
- case SFP_CONNECTOR_FIBERJACK:
- case SFP_CONNECTOR_LC:
- case SFP_CONNECTOR_MT_RJ:
- case SFP_CONNECTOR_MU:
- case SFP_CONNECTOR_OPTICAL_PIGTAIL:
+ case SFF8024_CONNECTOR_SC:
+ case SFF8024_CONNECTOR_FIBERJACK:
+ case SFF8024_CONNECTOR_LC:
+ case SFF8024_CONNECTOR_MT_RJ:
+ case SFF8024_CONNECTOR_MU:
+ case SFF8024_CONNECTOR_OPTICAL_PIGTAIL:
+ case SFF8024_CONNECTOR_MPO_1X12:
+ case SFF8024_CONNECTOR_MPO_2X16:
port = PORT_FIBRE;
break;
- case SFP_CONNECTOR_RJ45:
+ case SFF8024_CONNECTOR_RJ45:
port = PORT_TP;
break;
- case SFP_CONNECTOR_COPPER_PIGTAIL:
+ case SFF8024_CONNECTOR_COPPER_PIGTAIL:
port = PORT_DA;
break;
- case SFP_CONNECTOR_UNSPEC:
+ case SFF8024_CONNECTOR_UNSPEC:
if (id->base.e1000_base_t) {
port = PORT_TP;
break;
}
/* fallthrough */
- case SFP_CONNECTOR_SG: /* guess */
- case SFP_CONNECTOR_MPO_1X12:
- case SFP_CONNECTOR_MPO_2X16:
- case SFP_CONNECTOR_HSSDC_II:
- case SFP_CONNECTOR_NOSEPARATE:
- case SFP_CONNECTOR_MXC_2X16:
+ case SFF8024_CONNECTOR_SG: /* guess */
+ case SFF8024_CONNECTOR_HSSDC_II:
+ case SFF8024_CONNECTOR_NOSEPARATE:
+ case SFF8024_CONNECTOR_MXC_2X16:
port = PORT_OTHER;
break;
default:
@@ -179,6 +180,33 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
EXPORT_SYMBOL_GPL(sfp_parse_port);
/**
+ * sfp_may_have_phy() - indicate whether the module may have a PHY
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ *
+ * Parse the EEPROM identification given in @id, and return whether
+ * this module may have a PHY.
+ */
+bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+{
+ if (id->base.e1000_base_t)
+ return true;
+
+ if (id->base.phys_id != SFF8024_ID_DWDM_SFP) {
+ switch (id->base.extended_cc) {
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ case SFF8024_ECC_5GBASE_T:
+ case SFF8024_ECC_2_5GBASE_T:
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(sfp_may_have_phy);
+
+/**
* sfp_parse_support() - Parse the eeprom id for supported link modes
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
* @id: a pointer to the module's &struct sfp_eeprom_id
@@ -261,22 +289,33 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
}
switch (id->base.extended_cc) {
- case 0x00: /* Unspecified */
+ case SFF8024_ECC_UNSPEC:
break;
- case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */
+ case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
phylink_set(modes, 100000baseSR4_Full);
phylink_set(modes, 25000baseSR_Full);
break;
- case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */
- case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */
+ case SFF8024_ECC_100GBASE_LR4_25GBASE_LR:
+ case SFF8024_ECC_100GBASE_ER4_25GBASE_ER:
phylink_set(modes, 100000baseLR4_ER4_Full);
break;
- case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */
- case 0x0c: /* 25Gbase-CR CA-S */
- case 0x0d: /* 25Gbase-CR CA-N */
+ case SFF8024_ECC_100GBASE_CR4:
phylink_set(modes, 100000baseCR4_Full);
+ /* fallthrough */
+ case SFF8024_ECC_25GBASE_CR_S:
+ case SFF8024_ECC_25GBASE_CR_N:
phylink_set(modes, 25000baseCR_Full);
break;
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ phylink_set(modes, 10000baseT_Full);
+ break;
+ case SFF8024_ECC_5GBASE_T:
+ phylink_set(modes, 5000baseT_Full);
+ break;
+ case SFF8024_ECC_2_5GBASE_T:
+ phylink_set(modes, 2500baseT_Full);
+ break;
default:
dev_warn(bus->sfp_dev,
"Unknown/unsupported extended compliance code: 0x%02x\n",
@@ -301,7 +340,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
*/
if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
/* If the encoding and bit rate allows 1000baseX */
- if (id->base.encoding == SFP_ENCODING_8B10B && br_nom &&
+ if (id->base.encoding == SFF8024_ENCODING_8B10B && br_nom &&
br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
}
@@ -320,31 +359,27 @@ EXPORT_SYMBOL_GPL(sfp_parse_support);
/**
* sfp_select_interface() - Select appropriate phy_interface_t mode
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
* @link_modes: ethtool link modes mask
*
- * Derive the phy_interface_t mode for the information found in the
- * module's identifying EEPROM and the link modes mask. There is no
- * standard or defined way to derive this information, so we decide
- * based upon the link mode mask.
+ * Derive the phy_interface_t mode for the SFP module from the link
+ * modes mask.
*/
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
unsigned long *link_modes)
{
if (phylink_test(link_modes, 10000baseCR_Full) ||
phylink_test(link_modes, 10000baseSR_Full) ||
phylink_test(link_modes, 10000baseLR_Full) ||
phylink_test(link_modes, 10000baseLRM_Full) ||
- phylink_test(link_modes, 10000baseER_Full))
- return PHY_INTERFACE_MODE_10GKR;
+ phylink_test(link_modes, 10000baseER_Full) ||
+ phylink_test(link_modes, 10000baseT_Full))
+ return PHY_INTERFACE_MODE_10GBASER;
if (phylink_test(link_modes, 2500baseX_Full))
return PHY_INTERFACE_MODE_2500BASEX;
- if (id->base.e1000_base_t ||
- id->base.e100_base_lx ||
- id->base.e100_base_fx)
+ if (phylink_test(link_modes, 1000baseT_Half) ||
+ phylink_test(link_modes, 1000baseT_Full))
return PHY_INTERFACE_MODE_SGMII;
if (phylink_test(link_modes, 1000baseX_Full))
@@ -705,6 +740,27 @@ void sfp_module_remove(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
+int sfp_module_start(struct sfp_bus *bus)
+{
+ const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
+ int ret = 0;
+
+ if (ops && ops->module_start)
+ ret = ops->module_start(bus->upstream);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sfp_module_start);
+
+void sfp_module_stop(struct sfp_bus *bus)
+{
+ const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
+
+ if (ops && ops->module_stop)
+ ops->module_stop(bus->upstream);
+}
+EXPORT_SYMBOL_GPL(sfp_module_stop);
+
static void sfp_socket_clear(struct sfp_bus *bus)
{
bus->sfp_dev = NULL;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index c0b9a8e4e65a..73c2969f11a4 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -59,8 +59,10 @@ enum {
SFP_DEV_UP,
SFP_S_DOWN = 0,
+ SFP_S_FAIL,
SFP_S_WAIT,
SFP_S_INIT,
+ SFP_S_INIT_PHY,
SFP_S_INIT_TX_FAULT,
SFP_S_WAIT_LOS,
SFP_S_LINK_UP,
@@ -122,8 +124,10 @@ static const char *event_to_str(unsigned short event)
static const char * const sm_state_strings[] = {
[SFP_S_DOWN] = "down",
+ [SFP_S_FAIL] = "fail",
[SFP_S_WAIT] = "wait",
[SFP_S_INIT] = "init",
+ [SFP_S_INIT_PHY] = "init_phy",
[SFP_S_INIT_TX_FAULT] = "init_tx_fault",
[SFP_S_WAIT_LOS] = "wait_los",
[SFP_S_LINK_UP] = "link_up",
@@ -155,10 +159,34 @@ static const enum gpiod_flags gpio_flags[] = {
GPIOD_ASIS,
};
-#define T_WAIT msecs_to_jiffies(50)
-#define T_INIT_JIFFIES msecs_to_jiffies(300)
-#define T_RESET_US 10
-#define T_FAULT_RECOVER msecs_to_jiffies(1000)
+/* t_start_up (SFF-8431) or t_init (SFF-8472) is the time required for a
+ * non-cooled module to initialise its laser safety circuitry. We wait
+ * an initial T_WAIT period before we check the tx fault to give any PHY
+ * on board (for a copper SFP) time to initialise.
+ */
+#define T_WAIT msecs_to_jiffies(50)
+#define T_START_UP msecs_to_jiffies(300)
+#define T_START_UP_BAD_GPON msecs_to_jiffies(60000)
+
+/* t_reset is the time required to assert the TX_DISABLE signal to reset
+ * an indicated TX_FAULT.
+ */
+#define T_RESET_US 10
+#define T_FAULT_RECOVER msecs_to_jiffies(1000)
+
+/* N_FAULT_INIT is the number of recovery attempts at module initialisation
+ * time. If the TX_FAULT signal is not deasserted after this number of
+ * attempts at clearing it, we decide that the module is faulty.
+ * N_FAULT is the same but after the module has initialised.
+ */
+#define N_FAULT_INIT 5
+#define N_FAULT 5
+
+/* T_PHY_RETRY is the time interval between attempts to probe the PHY.
+ * R_PHY_RETRY is the number of attempts.
+ */
+#define T_PHY_RETRY msecs_to_jiffies(50)
+#define R_PHY_RETRY 12
/* SFP module presence detection is poor: the three MOD DEF signals are
* the same length on the PCB, which means it's possible for MOD DEF 0 to
@@ -214,10 +242,12 @@ struct sfp {
unsigned char sm_mod_tries;
unsigned char sm_dev_state;
unsigned short sm_state;
- unsigned int sm_retries;
+ unsigned char sm_fault_retries;
+ unsigned char sm_phy_retries;
struct sfp_eeprom_id id;
unsigned int module_power_mW;
+ unsigned int module_t_start_up;
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
@@ -231,7 +261,7 @@ struct sfp {
static bool sff_module_supported(const struct sfp_eeprom_id *id)
{
- return id->base.phys_id == SFP_PHYS_ID_SFF &&
+ return id->base.phys_id == SFF8024_ID_SFF_8472 &&
id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
}
@@ -242,7 +272,7 @@ static const struct sff_data sff_data = {
static bool sfp_module_supported(const struct sfp_eeprom_id *id)
{
- return id->base.phys_id == SFP_PHYS_ID_SFP &&
+ return id->base.phys_id == SFF8024_ID_SFP &&
id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
}
@@ -412,13 +442,20 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp)
{
unsigned int state = 0;
u8 status;
+ int ret;
- if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) ==
- sizeof(status)) {
+ ret = sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status));
+ if (ret == sizeof(status)) {
if (status & SFP_STATUS_RX_LOS)
state |= SFP_F_LOS;
if (status & SFP_STATUS_TX_FAULT)
state |= SFP_F_TX_FAULT;
+ } else {
+ dev_err_ratelimited(sfp->dev,
+ "failed to read SFP soft status: %d\n",
+ ret);
+ /* Preserve the current state */
+ state = sfp->state;
}
return state & sfp->state_soft_mask;
@@ -1383,26 +1420,30 @@ static void sfp_sm_mod_next(struct sfp *sfp, unsigned int state,
static void sfp_sm_phy_detach(struct sfp *sfp)
{
- phy_stop(sfp->mod_phy);
sfp_remove_phy(sfp->sfp_bus);
phy_device_remove(sfp->mod_phy);
phy_device_free(sfp->mod_phy);
sfp->mod_phy = NULL;
}
-static void sfp_sm_probe_phy(struct sfp *sfp)
+static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
{
struct phy_device *phy;
int err;
- phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
- if (phy == ERR_PTR(-ENODEV)) {
- dev_info(sfp->dev, "no PHY detected\n");
- return;
- }
+ phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+ if (phy == ERR_PTR(-ENODEV))
+ return PTR_ERR(phy);
if (IS_ERR(phy)) {
dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
- return;
+ return PTR_ERR(phy);
+ }
+
+ err = phy_device_register(phy);
+ if (err) {
+ phy_device_free(phy);
+ dev_err(sfp->dev, "phy_device_register failed: %d\n", err);
+ return err;
}
err = sfp_add_phy(sfp->sfp_bus, phy);
@@ -1410,11 +1451,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
phy_device_remove(phy);
phy_device_free(phy);
dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err);
- return;
+ return err;
}
sfp->mod_phy = phy;
- phy_start(phy);
+
+ return 0;
}
static void sfp_sm_link_up(struct sfp *sfp)
@@ -1464,7 +1506,7 @@ static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
{
- if (sfp->sm_retries && !--sfp->sm_retries) {
+ if (sfp->sm_fault_retries && !--sfp->sm_fault_retries) {
dev_err(sfp->dev,
"module persistently indicates fault, disabling\n");
sfp_sm_next(sfp, SFP_S_TX_DISABLE, 0);
@@ -1476,21 +1518,35 @@ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
}
}
-static void sfp_sm_probe_for_phy(struct sfp *sfp)
+/* Probe a SFP for a PHY device if the module supports copper - the PHY
+ * normally sits at I2C bus address 0x56, and may either be a clause 22
+ * or clause 45 PHY.
+ *
+ * Clause 22 copper SFP modules normally operate in Cisco SGMII mode with
+ * negotiation enabled, but some may be in 1000base-X - which is for the
+ * PHY driver to determine.
+ *
+ * Clause 45 copper SFP+ modules (10G) appear to switch their interface
+ * mode according to the negotiated line speed.
+ */
+static int sfp_sm_probe_for_phy(struct sfp *sfp)
{
- /* Setting the serdes link mode is guesswork: there's no
- * field in the EEPROM which indicates what mode should
- * be used.
- *
- * If it's a gigabit-only fiber module, it probably does
- * not have a PHY, so switch to 802.3z negotiation mode.
- * Otherwise, switch to SGMII mode (which is required to
- * support non-gigabit speeds) and probe for a PHY.
- */
- if (sfp->id.base.e1000_base_t ||
- sfp->id.base.e100_base_lx ||
- sfp->id.base.e100_base_fx)
- sfp_sm_probe_phy(sfp);
+ int err = 0;
+
+ switch (sfp->id.base.extended_cc) {
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ case SFF8024_ECC_5GBASE_T:
+ case SFF8024_ECC_2_5GBASE_T:
+ err = sfp_sm_probe_phy(sfp, true);
+ break;
+
+ default:
+ if (sfp->id.base.e1000_base_t)
+ err = sfp_sm_probe_phy(sfp, false);
+ break;
+ }
+ return err;
}
static int sfp_module_parse_power(struct sfp *sfp)
@@ -1550,6 +1606,13 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
return -EAGAIN;
}
+ /* DM7052 reports as a high power module, responds to reads (with
+ * all bytes 0xff) at 0x51 but does not accept writes. In any case,
+ * if the bit is already set, we're already in high power mode.
+ */
+ if (!!(val & BIT(0)) == enable)
+ return 0;
+
if (enable)
val |= BIT(0);
else
@@ -1655,6 +1718,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
if (ret < 0)
return ret;
+ if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) &&
+ !memcmp(id.base.vendor_pn, "3FE46541AA ", 16))
+ sfp->module_t_start_up = T_START_UP_BAD_GPON;
+ else
+ sfp->module_t_start_up = T_START_UP;
+
return 0;
}
@@ -1812,6 +1881,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
static void sfp_sm_main(struct sfp *sfp, unsigned int event)
{
unsigned long timeout;
+ int ret;
/* Some events are global */
if (sfp->sm_state != SFP_S_DOWN &&
@@ -1820,6 +1890,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
if (sfp->sm_state == SFP_S_LINK_UP &&
sfp->sm_dev_state == SFP_DEV_UP)
sfp_sm_link_down(sfp);
+ if (sfp->sm_state > SFP_S_INIT)
+ sfp_module_stop(sfp->sfp_bus);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
sfp_module_tx_disable(sfp);
@@ -1841,7 +1913,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp_module_tx_enable(sfp);
/* Initialise the fault clearance retries */
- sfp->sm_retries = 5;
+ sfp->sm_fault_retries = N_FAULT_INIT;
/* We need to check the TX_FAULT state, which is not defined
* while TX_DISABLE is asserted. The earliest we want to do
@@ -1855,11 +1927,12 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
break;
if (sfp->state & SFP_F_TX_FAULT) {
- /* Wait t_init before indicating that the link is up,
- * provided the current state indicates no TX_FAULT. If
- * TX_FAULT clears before this time, that's fine too.
+ /* Wait up to t_init (SFF-8472) or t_start_up (SFF-8431)
+ * from the TX_DISABLE deassertion for the module to
+ * initialise, which is indicated by TX_FAULT
+ * deasserting.
*/
- timeout = T_INIT_JIFFIES;
+ timeout = sfp->module_t_start_up;
if (timeout > T_WAIT)
timeout -= T_WAIT;
else
@@ -1876,27 +1949,51 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
case SFP_S_INIT:
if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
- /* TX_FAULT is still asserted after t_init, so assume
- * there is a fault.
+ /* TX_FAULT is still asserted after t_init or
+ * or t_start_up, so assume there is a fault.
*/
sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
- sfp->sm_retries == 5);
+ sfp->sm_fault_retries == N_FAULT_INIT);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
- init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
- * clear. Probe for the PHY and check the LOS state.
- */
- sfp_sm_probe_for_phy(sfp);
- sfp_sm_link_check_los(sfp);
+ init_done:
+ sfp->sm_phy_retries = R_PHY_RETRY;
+ goto phy_probe;
+ }
+ break;
- /* Reset the fault retry count */
- sfp->sm_retries = 5;
+ case SFP_S_INIT_PHY:
+ if (event != SFP_E_TIMEOUT)
+ break;
+ phy_probe:
+ /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+ */
+ ret = sfp_sm_probe_for_phy(sfp);
+ if (ret == -ENODEV) {
+ if (--sfp->sm_phy_retries) {
+ sfp_sm_next(sfp, SFP_S_INIT_PHY, T_PHY_RETRY);
+ break;
+ } else {
+ dev_info(sfp->dev, "no PHY detected\n");
+ }
+ } else if (ret) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
}
+ if (sfp_module_start(sfp->sfp_bus)) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
+ }
+ sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+ sfp->sm_fault_retries = N_FAULT;
break;
case SFP_S_INIT_TX_FAULT:
if (event == SFP_E_TIMEOUT) {
sfp_module_tx_fault_reset(sfp);
- sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
+ sfp_sm_next(sfp, SFP_S_INIT, sfp->module_t_start_up);
}
break;
@@ -1920,7 +2017,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
case SFP_S_TX_FAULT:
if (event == SFP_E_TIMEOUT) {
sfp_module_tx_fault_reset(sfp);
- sfp_sm_next(sfp, SFP_S_REINIT, T_INIT_JIFFIES);
+ sfp_sm_next(sfp, SFP_S_REINIT, sfp->module_t_start_up);
}
break;
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 64f54b0bbd8c..b83f70526270 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -22,6 +22,8 @@ void sfp_link_up(struct sfp_bus *bus);
void sfp_link_down(struct sfp_bus *bus);
int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_module_remove(struct sfp_bus *bus);
+int sfp_module_start(struct sfp_bus *bus);
+void sfp_module_stop(struct sfp_bus *bus);
int sfp_link_configure(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
const struct sfp_socket_ops *ops);
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index a32b3fd8a370..38834347a427 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -68,12 +68,7 @@ static int upd60620_read_status(struct phy_device *phydev)
mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising,
phy_state);
- if (phydev->duplex == DUPLEX_FULL) {
- if (phy_state & LPA_PAUSE_CAP)
- phydev->pause = 1;
- if (phy_state & LPA_PAUSE_ASYM)
- phydev->asym_pause = 1;
- }
+ phy_resolve_aneg_pause(phydev);
}
}
return 0;
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index a7b9cf3269bf..29a0917a81e6 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -874,15 +874,15 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb)
goto nomem;
- ap->rpkt = skb;
- }
- if (skb->len == 0) {
- /* Try to get the payload 4-byte aligned.
- * This should match the
- * PPP_ALLSTATIONS/PPP_UI/compressed tests in
- * process_input_packet, but we do not have
- * enough chars here to test buf[1] and buf[2].
- */
+ ap->rpkt = skb;
+ }
+ if (skb->len == 0) {
+ /* Try to get the payload 4-byte aligned.
+ * This should match the
+ * PPP_ALLSTATIONS/PPP_UI/compressed tests in
+ * process_input_packet, but we do not have
+ * enough chars here to test buf[1] and buf[2].
+ */
if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1));
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3bf8a8b42983..22cc2cb9d878 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -296,8 +296,6 @@ static struct class *ppp_class;
/* per net-namespace data */
static inline struct ppp_net *ppp_pernet(struct net *net)
{
- BUG_ON(!net);
-
return net_generic(net, ppp_net_id);
}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index e1fabb3e3246..acccb747aeda 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -155,7 +155,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0, IPPROTO_GRE,
- RT_TOS(0), 0);
+ RT_TOS(0), sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto tx_error;
@@ -444,7 +444,8 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,
- IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
+ IPPROTO_GRE, RT_CONN_FLAGS(sk),
+ sk->sk_bound_dev_if);
if (IS_ERR(rt)) {
error = -EHOSTUNREACH;
goto end;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 2a91c192659f..6f4d7ba8b109 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -452,12 +452,19 @@ static void slip_transmit(struct work_struct *work)
*/
static void slip_write_wakeup(struct tty_struct *tty)
{
- struct slip *sl = tty->disc_data;
+ struct slip *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
-static void sl_tx_timeout(struct net_device *dev)
+static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct slip *sl = netdev_priv(dev);
@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* VSV = very important to remove timers */
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index a6d63665ad03..1f4bdd94407a 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -341,6 +341,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
features |= tap->tap_features;
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
+ struct sk_buff *next;
if (IS_ERR(segs))
goto drop;
@@ -352,16 +353,13 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
}
consume_skb(skb);
- while (segs) {
- struct sk_buff *nskb = segs->next;
-
- segs->next = NULL;
- if (ptr_ring_produce(&q->ring, segs)) {
- kfree_skb(segs);
- kfree_skb_list(nskb);
+ skb_list_walk_safe(segs, skb, next) {
+ skb_mark_not_on_list(skb);
+ if (ptr_ring_produce(&q->ring, skb)) {
+ kfree_skb(skb);
+ kfree_skb_list(next);
break;
}
- segs = nskb;
}
} else {
/* If we receive a partial checksum and the tap side
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 683d371e6e82..650c937ed56b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1718,7 +1718,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (err < 0)
goto err_xdp;
if (err == XDP_REDIRECT)
- xdp_do_flush_map();
+ xdp_do_flush();
if (err != XDP_PASS)
goto out;
@@ -1936,6 +1936,10 @@ drop:
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
+ if (frags) {
+ tfile->napi.skb = NULL;
+ mutex_unlock(&tfile->napi_mutex);
+ }
return total_len;
}
}
@@ -2549,7 +2553,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
}
if (flush)
- xdp_do_flush_map();
+ xdp_do_flush();
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index af3994e0853b..4e514f5d7c6c 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -39,17 +39,6 @@ static int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum,
return 0;
}
-static int ax88172a_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-{
- if (!netif_running(net))
- return -EINVAL;
-
- if (!net->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(net->phydev, rq, cmd);
-}
-
/* set MAC link settings according to information from phylib */
static void ax88172a_adjust_link(struct net_device *netdev)
{
@@ -134,7 +123,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ax88172a_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 1e58702c737f..d387bc7ac1b6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -447,7 +447,7 @@ static netdev_tx_t catc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void catc_tx_timeout(struct net_device *netdev)
+static void catc_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct catc *catc = netdev_priv(netdev);
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 9df3c1ffff35..d7f3b70d5477 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -111,8 +111,8 @@ static int control_read(struct usbnet *dev,
request_type = (USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE);
- netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n",
- index, size);
+ netdev_dbg(dev->net, "%s() index=0x%02x size=%d\n",
+ __func__, index, size);
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
@@ -130,8 +130,6 @@ static int control_read(struct usbnet *dev,
err = -EINVAL;
kfree(buf);
- return err;
-
err_out:
return err;
}
@@ -151,8 +149,8 @@ static int control_write(struct usbnet *dev, unsigned char request,
request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE);
- netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n",
- index, size);
+ netdev_dbg(dev->net, "%s() index=0x%02x size=%d\n",
+ __func__, index, size);
if (data) {
buf = kmemdup(data, size, GFP_KERNEL);
@@ -181,8 +179,8 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
- netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n",
- phy_id, loc);
+ netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
+ __func__, phy_id, loc);
if (phy_id != 0)
return -ENODEV;
@@ -199,8 +197,8 @@ static void ch9200_mdio_write(struct net_device *netdev,
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
- netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n",
- phy_id, loc);
+ netdev_dbg(netdev, "%s() phy_id=%02x loc:%02x\n",
+ __func__, phy_id, loc);
if (phy_id != 0)
return;
@@ -219,8 +217,8 @@ static int ch9200_link_reset(struct usbnet *dev)
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
- netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n",
- ecmd.speed, ecmd.duplex);
+ netdev_dbg(dev->net, "%s() speed:%d duplex:%d\n",
+ __func__, ecmd.speed, ecmd.duplex);
return 0;
}
@@ -309,7 +307,7 @@ static int get_mac_address(struct usbnet *dev, unsigned char *data)
unsigned char mac_addr[0x06];
int rd_mac_len = 0;
- netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
+ netdev_dbg(dev->net, "%s:\n\tusbnet VID:%0x PID:%0x\n", __func__,
le16_to_cpu(dev->udev->descriptor.idVendor),
le16_to_cpu(dev->udev->descriptor.idProduct));
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index ca827802f291..417e42c9fd03 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -820,7 +820,7 @@ static const struct ethtool_ops ops = {
};
/* called when a packet did not ack after watchdogtimeout */
-static void hso_net_tx_timeout(struct net_device *net)
+static void hso_net_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct hso_net *odev = netdev_priv(net);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 8c01fbf68a89..c792d65dd7b4 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -400,7 +400,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static void ipheth_tx_timeout(struct net_device *net)
+static void ipheth_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct ipheth_device *dev = netdev_priv(net);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 8e210ba4a313..ed01dc964c99 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -894,7 +894,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
/****************************************************************
* kaweth_tx_timeout
****************************************************************/
-static void kaweth_tx_timeout(struct net_device *net)
+static void kaweth_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct kaweth_device *kaweth = netdev_priv(net);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index fb4781080d6d..eccbf4cd7149 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -20,6 +20,7 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <net/ip6_checksum.h>
+#include <net/vxlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
@@ -1663,14 +1664,6 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
-static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
-
- return phy_mii_ioctl(netdev->phydev, rq, cmd);
-}
-
static void lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
@@ -3660,7 +3653,7 @@ static void lan78xx_disconnect(struct usb_interface *intf)
usb_put_dev(udev);
}
-static void lan78xx_tx_timeout(struct net_device *net)
+static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct lan78xx_net *dev = netdev_priv(net);
@@ -3668,6 +3661,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
tasklet_schedule(&dev->bh);
}
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ return features;
+}
+
static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_open = lan78xx_open,
.ndo_stop = lan78xx_stop,
@@ -3676,11 +3682,12 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_change_mtu = lan78xx_change_mtu,
.ndo_set_mac_address = lan78xx_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = lan78xx_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = lan78xx_set_multicast,
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
+ .ndo_features_check = lan78xx_features_check,
};
static void lan78xx_stat_monitor(struct timer_list *t)
@@ -3750,6 +3757,7 @@ static int lan78xx_probe(struct usb_interface *intf,
/* MTU range: 68 - 9000 */
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+ netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f7d117d80cfb..8783e2ab3ec0 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -693,7 +693,7 @@ static void intr_callback(struct urb *urb)
"can't resubmit interrupt urb, %d\n", res);
}
-static void pegasus_tx_timeout(struct net_device *net)
+static void pegasus_tx_timeout(struct net_device *net, unsigned int txqueue)
{
pegasus_t *pegasus = netdev_priv(net);
netif_warn(pegasus, timer, net, "tx timeout\n");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 4196c0e32740..9485c8d1de8a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1062,6 +1062,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c5ebf35d2488..e8cd8c05b156 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -31,7 +31,7 @@
#define NETNEXT_VERSION "11"
/* Information for net */
-#define NET_VERSION "10"
+#define NET_VERSION "11"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -68,6 +68,7 @@
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
#define PLA_BOOT_CTRL 0xe004
+#define PLA_LWAKE_CTRL_REG 0xe007
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
@@ -95,6 +96,7 @@
#define PLA_TALLYCNT 0xe890
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
+#define PLA_CONFIG6 0xe90a /* CONFIG6 */
#define PLA_BP_BA 0xfc26
#define PLA_BP_0 0xfc28
#define PLA_BP_1 0xfc2a
@@ -107,6 +109,7 @@
#define PLA_BP_EN 0xfc38
#define USB_USB2PHY 0xb41e
+#define USB_SSPHYLINK1 0xb426
#define USB_SSPHYLINK2 0xb428
#define USB_U2P3_CTRL 0xb460
#define USB_CSR_DUMMY1 0xb464
@@ -300,6 +303,9 @@
#define LINK_ON_WAKE_EN 0x0010
#define LINK_OFF_WAKE_EN 0x0008
+/* PLA_CONFIG6 */
+#define LANWAKE_CLR_EN BIT(0)
+
/* PLA_CONFIG5 */
#define BWF_EN 0x0040
#define MWF_EN 0x0020
@@ -312,6 +318,7 @@
/* PLA_PHY_PWR */
#define TX_10M_IDLE_EN 0x0080
#define PFM_PWM_SWITCH 0x0040
+#define TEST_IO_OFF BIT(4)
/* PLA_MAC_PWR_CTRL */
#define D3_CLK_GATED_EN 0x00004000
@@ -324,6 +331,7 @@
#define MAC_CLK_SPDWN_EN BIT(15)
/* PLA_MAC_PWR_CTRL3 */
+#define PLA_MCU_SPDWN_EN BIT(14)
#define PKT_AVAIL_SPDWN_EN 0x0100
#define SUSPEND_SPDWN_EN 0x0004
#define U1U2_SPDWN_EN 0x0002
@@ -354,6 +362,9 @@
/* PLA_BOOT_CTRL */
#define AUTOLOAD_DONE 0x0002
+/* PLA_LWAKE_CTRL_REG */
+#define LANWAKE_PIN BIT(7)
+
/* PLA_SUSPEND_FLAG */
#define LINK_CHG_EVENT BIT(0)
@@ -365,13 +376,18 @@
#define DEBUG_LTSSM 0x0082
/* PLA_EXTRA_STATUS */
+#define CUR_LINK_OK BIT(15)
#define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */
#define LINK_CHANGE_FLAG BIT(8)
+#define POLL_LINK_CHG BIT(0)
/* USB_USB2PHY */
#define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002
+/* USB_SSPHYLINK1 */
+#define DELAY_PHY_PWR_CHG BIT(1)
+
/* USB_SSPHYLINK2 */
#define pwd_dn_scale_mask 0x3ffe
#define pwd_dn_scale(x) ((x) << 1)
@@ -1897,8 +1913,8 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
{
if (skb_shinfo(skb)->gso_size) {
netdev_features_t features = tp->netdev->features;
+ struct sk_buff *segs, *seg, *next;
struct sk_buff_head seg_list;
- struct sk_buff *segs, *nskb;
features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
segs = skb_gso_segment(skb, features);
@@ -1907,12 +1923,10 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
__skb_queue_head_init(&seg_list);
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- __skb_queue_tail(&seg_list, nskb);
- } while (segs);
+ skb_list_walk_safe(segs, seg, next) {
+ skb_mark_not_on_list(seg);
+ __skb_queue_tail(&seg_list, seg);
+ }
skb_queue_splice(&seg_list, list);
dev_kfree_skb(skb);
@@ -2507,7 +2521,7 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
}
}
-static void rtl8152_tx_timeout(struct net_device *netdev)
+static void rtl8152_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -2863,6 +2877,17 @@ static int rtl8153_enable(struct r8152 *tp)
r8153_set_rx_early_timeout(tp);
r8153_set_rx_early_size(tp);
+ if (tp->version == RTL_VER_09) {
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data &= ~FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ usleep_range(1000, 2000);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ }
+
return rtl_enable(tp);
}
@@ -3376,8 +3401,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- r8153_u2p3en(tp, true);
- r8153b_u1u2en(tp, true);
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
}
}
@@ -4675,7 +4700,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
- r8153_u2p3en(tp, true);
set_bit(PHY_RESET, &tp->flags);
}
@@ -4954,6 +4978,8 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_up(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -4961,6 +4987,19 @@ static void rtl8153_up(struct r8152 *tp)
r8153_u2p3en(tp, false);
r8153_aldps_en(tp, false);
r8153_first_init(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data |= LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+ ocp_data &= ~LANWAKE_PIN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
+ ocp_data &= ~DELAY_PHY_PWR_CHG;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
+
r8153_aldps_en(tp, true);
switch (tp->version) {
@@ -4979,11 +5018,17 @@ static void rtl8153_up(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data &= ~LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153_power_cut_en(tp, false);
@@ -4994,6 +5039,8 @@ static void rtl8153_down(struct r8152 *tp)
static void rtl8153b_up(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -5004,18 +5051,29 @@ static void rtl8153b_up(struct r8152 *tp)
r8153_first_init(tp);
ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
r8153_aldps_en(tp, true);
- r8153_u2p3en(tp, true);
- r8153b_u1u2en(tp, true);
+
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
}
static void rtl8153b_down(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data |= PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
r8153b_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153b_power_cut_en(tp, false);
@@ -5387,6 +5445,16 @@ static void r8153_init(struct r8152 *tp)
else
ocp_data |= DYNAMIC_BURST;
ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+
+ r8153_queue_wake(tp, false);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
}
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -5416,10 +5484,19 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
r8153_power_cut_en(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
r8153_u1u2en(tp, true);
r8153_mac_clk_spd(tp, false);
usb_enable_lpm(tp->udev);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data |= LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+ ocp_data &= ~LANWAKE_PIN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
@@ -5484,7 +5561,17 @@ static void r8153b_init(struct r8152 *tp)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- r8153b_u1u2en(tp, true);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
usb_enable_lpm(tp->udev);
/* MAC clock speed down */
@@ -5492,6 +5579,19 @@ static void r8153b_init(struct r8152 *tp)
ocp_data |= MAC_CLK_SPDWN_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ if (tp->version == RTL_VER_09) {
+ /* Disable Test IO for 32QFN */
+ if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= TEST_IO_OFF;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+ }
+ }
+
set_bit(GREEN_ETHERNET, &tp->flags);
/* rx aggregation */
@@ -6597,6 +6697,9 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENODEV;
}
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@@ -6704,6 +6807,11 @@ static int rtl8152_probe(struct usb_interface *intf,
intf->needs_remote_wakeup = 1;
+ if (!rtl_can_wakeup(tp))
+ __rtl_set_wol(tp, 0);
+ else
+ tp->saved_wolopts = __rtl_get_wol(tp);
+
tp->rtl_ops.init(tp);
#if IS_BUILTIN(CONFIG_USB_RTL8152)
/* Retry in case request_firmware() is not ready yet. */
@@ -6721,10 +6829,6 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
- if (!rtl_can_wakeup(tp))
- __rtl_set_wol(tp, 0);
-
- tp->saved_wolopts = __rtl_get_wol(tp);
if (tp->saved_wolopts)
device_set_wakeup_enable(&udev->dev, true);
else
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 13e51ccf0214..e7c630d37589 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -655,7 +655,7 @@ static void disable_net_traffic(rtl8150_t * dev)
set_registers(dev, CR, 1, &cr);
}
-static void rtl8150_tx_timeout(struct net_device *netdev)
+static void rtl8150_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
rtl8150_t *dev = netdev_priv(netdev);
dev_warn(&netdev->dev, "Tx timeout.\n");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9ce6d30576dd..5ec97def3513 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1293,7 +1293,7 @@ static void tx_complete (struct urb *urb)
/*-------------------------------------------------------------------------*/
-void usbnet_tx_timeout (struct net_device *net)
+void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
{
struct usbnet *dev = netdev_priv(net);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a552df37a347..8cdc4415fa70 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -377,6 +377,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
unsigned int max_len;
struct veth_rq *rq;
+ rcu_read_lock();
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
ret = -EINVAL;
goto drop;
@@ -418,11 +419,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- if (likely(!drops))
+ if (likely(!drops)) {
+ rcu_read_unlock();
return n;
+ }
ret = n - drops;
drop:
+ rcu_read_unlock();
atomic64_add(drops, &priv->dropped);
return ret;
@@ -769,7 +773,7 @@ static int veth_poll(struct napi_struct *napi, int budget)
if (xdp_xmit & VETH_XDP_TX)
veth_xdp_flush(rq->dev, &bq);
if (xdp_xmit & VETH_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
xdp_clear_return_frame_no_direct();
return done;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4d7d5434cc5d..2fe7a3188282 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -501,7 +501,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
- xdp_prog = rcu_dereference(rq->xdp_prog);
+ xdp_prog = rcu_access_pointer(rq->xdp_prog);
if (!xdp_prog)
return -ENXIO;
@@ -1432,7 +1432,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
virtqueue_napi_complete(napi, rq->vq, received);
if (xdp_xmit & VIRTIO_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_sq(vi);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 216acf37ca7c..18f152fa0068 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3198,7 +3198,7 @@ vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
static void
-vmxnet3_tx_timeout(struct net_device *netdev)
+vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 0a38c76688ab..1e4b9ba70983 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -555,10 +555,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
}
if (VMXNET3_VERSION_GE_3(adapter)) {
- if (param->rx_mini_pending < 0 ||
- param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) {
+ if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE)
return -EINVAL;
- }
} else if (param->rx_mini_pending != 0) {
return -EINVAL;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1c5159dcc720..d3b08b76b1ec 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1060,6 +1060,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
return err;
} else {
union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
+
if (remote->sa.sa_family == AF_INET) {
ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
ip->sa.sa_family = AF_INET;
@@ -1696,7 +1697,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
!net_eq(vxlan->net, dev_net(vxlan->dev))))
- goto drop;
+ goto drop;
if (vxlan_collect_metadata(vs)) {
struct metadata_dst *tun_dst;
@@ -4128,30 +4129,30 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
- !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
- !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
nla_put_u8(skb, IFLA_VXLAN_RSC,
!!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
nla_put_u8(skb, IFLA_VXLAN_L2MISS,
- !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
- !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
!!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
- !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
+ !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
- !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
- !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
- !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
- !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index dd1a147f2971..4530840e15ef 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -315,7 +315,8 @@ config DSCC4_PCI_RST
config IXP4XX_HSS
tristate "Intel IXP4xx HSS (synchronous serial port) support"
- depends on HDLC && ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ depends on HDLC && IXP4XX_NPE && IXP4XX_QMGR
+ depends on ARCH_IXP4XX
help
Say Y here if you want to use built-in HSS ports
on IXP4xx processor.
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index af539151d663..5d6532ad6b78 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -268,7 +268,7 @@ static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity);
static int cosa_net_open(struct net_device *d);
static int cosa_net_close(struct net_device *d);
-static void cosa_net_timeout(struct net_device *d);
+static void cosa_net_timeout(struct net_device *d, unsigned int txqueue);
static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
static char *cosa_net_setup_rx(struct channel_data *channel, int size);
static int cosa_net_rx_done(struct channel_data *channel);
@@ -670,7 +670,7 @@ static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void cosa_net_timeout(struct net_device *dev)
+static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue)
{
struct channel_data *chan = dev_to_chan(dev);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 1901ec7948d8..7916efce7188 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2239,7 +2239,7 @@ fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parit
}
static void
-fst_tx_timeout(struct net_device *dev)
+fst_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fst_port_info *port;
struct fst_card_info *card;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index ca0f3be2b6bf..3998cac49d7f 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = {
},
};
-static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
static int uhdlc_init(struct ucc_hdlc_private *priv)
{
@@ -635,11 +635,9 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
struct net_device *dev = priv->ndev;
struct ucc_fast_private *uccf;
- struct ucc_tdm_info *ut_info;
u32 ucce;
u32 uccm;
- ut_info = priv->ut_info;
uccf = priv->uccf;
ucce = ioread32be(uccf->p_ucce);
@@ -872,7 +870,6 @@ static void resume_clk_config(struct ucc_hdlc_private *priv)
static int uhdlc_suspend(struct device *dev)
{
struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
- struct ucc_tdm_info *ut_info;
struct ucc_fast __iomem *uf_regs;
if (!priv)
@@ -884,7 +881,6 @@ static int uhdlc_suspend(struct device *dev)
netif_device_detach(priv->ndev);
napi_disable(&priv->napi);
- ut_info = priv->ut_info;
uf_regs = priv->uf_regs;
/* backup gumr guemr*/
@@ -917,7 +913,7 @@ static int uhdlc_resume(struct device *dev)
struct ucc_fast __iomem *uf_regs;
struct ucc_fast_private *uccf;
struct ucc_fast_info *uf_info;
- int ret, i;
+ int i;
u32 cecr_subblock;
u16 bd_status;
@@ -962,16 +958,16 @@ static int uhdlc_resume(struct device *dev)
/* Write to QE CECR, UCCx channel to Stop Transmission */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
- ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
- (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
/* Set UPSMR normal mode */
iowrite32be(0, &uf_regs->upsmr);
/* init parameter base */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
- ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
- QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
qe_muram_addr(priv->ucc_pram_offset);
@@ -1039,7 +1035,7 @@ static const struct dev_pm_ops uhdlc_pm_ops = {
#define HDLC_PM_OPS NULL
#endif
-static void uhdlc_tx_timeout(struct net_device *ndev)
+static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
netdev_err(ndev, "%s\n", __func__);
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index a030f5aa6b95..d8cba3625c18 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -75,7 +75,7 @@ static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
{
struct hdlc_header *data;
#ifdef DEBUG_HARD_HEADER
- printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
+ netdev_dbg(dev, "%s called\n", __func__);
#endif
skb_push(skb, sizeof(struct hdlc_header));
@@ -101,7 +101,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cisco_packet));
if (!skb) {
- netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
+ netdev_warn(dev, "Memory squeeze on %s()\n", __func__);
return;
}
skb_reserve(skb, 4);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 5643675ff724..c84536b03aa8 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -21,8 +21,17 @@
#include <linux/skbuff.h>
#include <net/x25device.h>
+struct x25_state {
+ x25_hdlc_proto settings;
+};
+
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
+static struct x25_state *state(hdlc_device *hdlc)
+{
+ return hdlc->state;
+}
+
/* These functions are callbacks called by LAPB layer */
static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
@@ -62,11 +71,12 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
unsigned char *ptr;
- skb_push(skb, 1);
-
if (skb_cow(skb, 1))
return NET_RX_DROP;
+ skb_push(skb, 1);
+ skb_reset_network_header(skb);
+
ptr = skb->data;
*ptr = X25_IFACE_DATA;
@@ -79,6 +89,13 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ skb_reset_network_header(skb);
+ skb->protocol = hdlc_type_trans(skb, dev);
+
+ if (dev_nit_active(dev))
+ dev_queue_xmit_nit(skb, dev);
+
hdlc->xmit(skb, dev); /* Ignore return value :-( */
}
@@ -93,6 +110,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->data[0]) {
case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
+ skb_reset_network_header(skb);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -131,7 +149,6 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
static int x25_open(struct net_device *dev)
{
- int result;
static const struct lapb_register_struct cb = {
.connect_confirmation = x25_connected,
.connect_indication = x25_connected,
@@ -140,10 +157,33 @@ static int x25_open(struct net_device *dev)
.data_indication = x25_data_indication,
.data_transmit = x25_data_transmit,
};
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct lapb_parms_struct params;
+ int result;
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
return result;
+
+ result = lapb_getparms(dev, &params);
+ if (result != LAPB_OK)
+ return result;
+
+ if (state(hdlc)->settings.dce)
+ params.mode = params.mode | LAPB_DCE;
+
+ if (state(hdlc)->settings.modulo == 128)
+ params.mode = params.mode | LAPB_EXTENDED;
+
+ params.window = state(hdlc)->settings.window;
+ params.t1 = state(hdlc)->settings.t1;
+ params.t2 = state(hdlc)->settings.t2;
+ params.n2 = state(hdlc)->settings.n2;
+
+ result = lapb_setparms(dev, &params);
+ if (result != LAPB_OK)
+ return result;
+
return 0;
}
@@ -186,7 +226,10 @@ static struct hdlc_proto proto = {
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
{
+ x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
+ const size_t size = sizeof(x25_hdlc_proto);
hdlc_device *hdlc = dev_to_hdlc(dev);
+ x25_hdlc_proto new_settings;
int result;
switch (ifr->ifr_settings.type) {
@@ -194,7 +237,13 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifr->ifr_settings.type = IF_PROTO_X25;
- return 0; /* return protocol only, no settable parameters */
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(x25_s, &state(hdlc)->settings, size))
+ return -EFAULT;
+ return 0;
case IF_PROTO_X25:
if (!capable(CAP_NET_ADMIN))
@@ -203,12 +252,46 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if (dev->flags & IFF_UP)
return -EBUSY;
+ /* backward compatibility */
+ if (ifr->ifr_settings.size == 0) {
+ new_settings.dce = 0;
+ new_settings.modulo = 8;
+ new_settings.window = 7;
+ new_settings.t1 = 3;
+ new_settings.t2 = 1;
+ new_settings.n2 = 10;
+ }
+ else {
+ if (copy_from_user(&new_settings, x25_s, size))
+ return -EFAULT;
+
+ if ((new_settings.dce != 0 &&
+ new_settings.dce != 1) ||
+ (new_settings.modulo != 8 &&
+ new_settings.modulo != 128) ||
+ new_settings.window < 1 ||
+ (new_settings.modulo == 8 &&
+ new_settings.window > 7) ||
+ (new_settings.modulo == 128 &&
+ new_settings.window > 127) ||
+ new_settings.t1 < 1 ||
+ new_settings.t1 > 255 ||
+ new_settings.t2 < 1 ||
+ new_settings.t2 > 255 ||
+ new_settings.n2 < 1 ||
+ new_settings.n2 > 255)
+ return -EINVAL;
+ }
+
result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
if (result)
return result;
- if ((result = attach_hdlc_protocol(dev, &proto, 0)))
+ if ((result = attach_hdlc_protocol(dev, &proto,
+ sizeof(struct x25_state))))
return result;
+
+ memcpy(&state(hdlc)->settings, &new_settings, size);
dev->type = ARPHRD_X25;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index ea6ee6a608ce..7c5cf77e9ef1 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/wan_ixp4xx_hss.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/soc/ixp4xx/npe.h>
@@ -258,7 +259,7 @@ struct port {
struct hss_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
- u32 desc_tab_phys;
+ dma_addr_t desc_tab_phys;
unsigned int id;
unsigned int clock_type, clock_rate, loopback;
unsigned int initialized, carrier;
@@ -858,7 +859,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+ memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif
@@ -1182,14 +1183,14 @@ static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
}
}
-static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
+static u32 check_clock(u32 timer_freq, u32 rate, u32 a, u32 b, u32 c,
u32 *best, u32 *best_diff, u32 *reg)
{
/* a is 10-bit, b is 10-bit, c is 12-bit */
u64 new_rate;
u32 new_diff;
- new_rate = ixp4xx_timer_freq * (u64)(c + 1);
+ new_rate = timer_freq * (u64)(c + 1);
do_div(new_rate, a * (c + 1) + b + 1);
new_diff = abs((u32)new_rate - rate);
@@ -1201,40 +1202,43 @@ static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
return new_diff;
}
-static void find_best_clock(u32 rate, u32 *best, u32 *reg)
+static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
{
u32 a, b, diff = 0xFFFFFFFF;
- a = ixp4xx_timer_freq / rate;
+ a = timer_freq / rate;
if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
- check_clock(rate, 0x3FF, 1, 1, best, &diff, reg);
+ check_clock(timer_freq, rate, 0x3FF, 1, 1, best, &diff, reg);
return;
}
if (a == 0) { /* > 66.666 MHz */
a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
- rate = ixp4xx_timer_freq;
+ rate = timer_freq;
}
- if (rate * a == ixp4xx_timer_freq) { /* don't divide by 0 later */
- check_clock(rate, a - 1, 1, 1, best, &diff, reg);
+ if (rate * a == timer_freq) { /* don't divide by 0 later */
+ check_clock(timer_freq, rate, a - 1, 1, 1, best, &diff, reg);
return;
}
for (b = 0; b < 0x400; b++) {
u64 c = (b + 1) * (u64)rate;
- do_div(c, ixp4xx_timer_freq - rate * a);
+ do_div(c, timer_freq - rate * a);
c--;
if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
if (b == 0 && /* also try a bit higher rate */
- !check_clock(rate, a - 1, 1, 1, best, &diff, reg))
+ !check_clock(timer_freq, rate, a - 1, 1, 1, best,
+ &diff, reg))
return;
- check_clock(rate, a, b, 0xFFF, best, &diff, reg);
+ check_clock(timer_freq, rate, a, b, 0xFFF, best,
+ &diff, reg);
return;
}
- if (!check_clock(rate, a, b, c, best, &diff, reg))
+ if (!check_clock(timer_freq, rate, a, b, c, best, &diff, reg))
return;
- if (!check_clock(rate, a, b, c + 1, best, &diff, reg))
+ if (!check_clock(timer_freq, rate, a, b, c + 1, best, &diff,
+ reg))
return;
}
}
@@ -1285,8 +1289,9 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
port->clock_type = clk; /* Update settings */
if (clk == CLOCK_INT)
- find_best_clock(new_line.clock_rate, &port->clock_rate,
- &port->clock_reg);
+ find_best_clock(port->plat->timer_freq,
+ new_line.clock_rate,
+ &port->clock_rate, &port->clock_reg);
else {
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 0f1217b506ad..e30d91a38cfb 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -64,7 +64,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
{
struct lapbethdev *lapbeth;
- list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
+ list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
if (lapbeth->ethdev == dev)
return lapbeth;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 0e6a51525d91..a20f467ca48a 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -99,7 +99,7 @@ static int lmc_ifdown(struct net_device * const);
static void lmc_watchdog(struct timer_list *t);
static void lmc_reset(lmc_softc_t * const sc);
static void lmc_dec_reset(lmc_softc_t * const sc);
-static void lmc_driver_timeout(struct net_device *dev);
+static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
/*
* linux reserves 16 device specific IOCTLs. We call them
@@ -2044,7 +2044,7 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
lmc_trace(sc->lmc_device, "lmc_initcsrs out");
}
-static void lmc_driver_timeout(struct net_device *dev)
+static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
{
lmc_softc_t *sc = dev_to_sc(dev);
u32 csr6;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 34e94ee806d6..23f93f1c815d 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -635,7 +635,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
/* set up PLX mapping */
plx_phy = pci_resource_start(pdev, 0);
- card->plx = ioremap_nocache(plx_phy, 0x70);
+ card->plx = ioremap(plx_phy, 0x70);
if (!card->plx) {
pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
@@ -704,7 +704,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
PCI_DMA_FROMDEVICE);
}
- mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
+ mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
if (!mem) {
pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 914be5847386..69773d228ec1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -276,7 +276,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
sl->xhead += actual;
}
-static void x25_asy_timeout(struct net_device *dev)
+static void x25_asy_timeout(struct net_device *dev, unsigned int txqueue)
{
struct x25_asy *sl = netdev_priv(dev);
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index a5db3c06b646..a7fcbceb6e6b 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -380,7 +380,7 @@ drop:
static
-void i2400m_tx_timeout(struct net_device *net_dev)
+void i2400m_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
/*
* We might want to kick the device
diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
new file mode 100644
index 000000000000..fc52b2cb500b
--- /dev/null
+++ b/drivers/net/wireguard/Makefile
@@ -0,0 +1,18 @@
+ccflags-y := -O3
+ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
+wireguard-y := main.o
+wireguard-y += noise.o
+wireguard-y += device.o
+wireguard-y += peer.o
+wireguard-y += timers.o
+wireguard-y += queueing.o
+wireguard-y += send.o
+wireguard-y += receive.o
+wireguard-y += socket.o
+wireguard-y += peerlookup.o
+wireguard-y += allowedips.o
+wireguard-y += ratelimiter.o
+wireguard-y += cookie.o
+wireguard-y += netlink.o
+obj-$(CONFIG_WIREGUARD) := wireguard.o
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
new file mode 100644
index 000000000000..121d9ea0f135
--- /dev/null
+++ b/drivers/net/wireguard/allowedips.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "allowedips.h"
+#include "peer.h"
+
+static void swap_endian(u8 *dst, const u8 *src, u8 bits)
+{
+ if (bits == 32) {
+ *(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
+ } else if (bits == 128) {
+ ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
+ ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
+ }
+}
+
+static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
+ u8 cidr, u8 bits)
+{
+ node->cidr = cidr;
+ node->bit_at_a = cidr / 8U;
+#ifdef __LITTLE_ENDIAN
+ node->bit_at_a ^= (bits / 8U - 1U) % 8U;
+#endif
+ node->bit_at_b = 7U - (cidr % 8U);
+ node->bitlen = bits;
+ memcpy(node->bits, src, bits / 8U);
+}
+#define CHOOSE_NODE(parent, key) \
+ parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static void push_rcu(struct allowedips_node **stack,
+ struct allowedips_node __rcu *p, unsigned int *len)
+{
+ if (rcu_access_pointer(p)) {
+ WARN_ON(IS_ENABLED(DEBUG) && *len >= 128);
+ stack[(*len)++] = rcu_dereference_raw(p);
+ }
+}
+
+static void root_free_rcu(struct rcu_head *rcu)
+{
+ struct allowedips_node *node, *stack[128] = {
+ container_of(rcu, struct allowedips_node, rcu) };
+ unsigned int len = 1;
+
+ while (len > 0 && (node = stack[--len])) {
+ push_rcu(stack, node->bit[0], &len);
+ push_rcu(stack, node->bit[1], &len);
+ kfree(node);
+ }
+}
+
+static void root_remove_peer_lists(struct allowedips_node *root)
+{
+ struct allowedips_node *node, *stack[128] = { root };
+ unsigned int len = 1;
+
+ while (len > 0 && (node = stack[--len])) {
+ push_rcu(stack, node->bit[0], &len);
+ push_rcu(stack, node->bit[1], &len);
+ if (rcu_access_pointer(node->peer))
+ list_del(&node->peer_list);
+ }
+}
+
+static void walk_remove_by_peer(struct allowedips_node __rcu **top,
+ struct wg_peer *peer, struct mutex *lock)
+{
+#define REF(p) rcu_access_pointer(p)
+#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
+#define PUSH(p) ({ \
+ WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \
+ stack[len++] = p; \
+ })
+
+ struct allowedips_node __rcu **stack[128], **nptr;
+ struct allowedips_node *node, *prev;
+ unsigned int len;
+
+ if (unlikely(!peer || !REF(*top)))
+ return;
+
+ for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
+ nptr = stack[len - 1];
+ node = DEREF(nptr);
+ if (!node) {
+ --len;
+ continue;
+ }
+ if (!prev || REF(prev->bit[0]) == node ||
+ REF(prev->bit[1]) == node) {
+ if (REF(node->bit[0]))
+ PUSH(&node->bit[0]);
+ else if (REF(node->bit[1]))
+ PUSH(&node->bit[1]);
+ } else if (REF(node->bit[0]) == prev) {
+ if (REF(node->bit[1]))
+ PUSH(&node->bit[1]);
+ } else {
+ if (rcu_dereference_protected(node->peer,
+ lockdep_is_held(lock)) == peer) {
+ RCU_INIT_POINTER(node->peer, NULL);
+ list_del_init(&node->peer_list);
+ if (!node->bit[0] || !node->bit[1]) {
+ rcu_assign_pointer(*nptr, DEREF(
+ &node->bit[!REF(node->bit[0])]));
+ kfree_rcu(node, rcu);
+ node = DEREF(nptr);
+ }
+ }
+ --len;
+ }
+ }
+
+#undef REF
+#undef DEREF
+#undef PUSH
+}
+
+static unsigned int fls128(u64 a, u64 b)
+{
+ return a ? fls64(a) + 64U : fls64(b);
+}
+
+static u8 common_bits(const struct allowedips_node *node, const u8 *key,
+ u8 bits)
+{
+ if (bits == 32)
+ return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key);
+ else if (bits == 128)
+ return 128U - fls128(
+ *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0],
+ *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]);
+ return 0;
+}
+
+static bool prefix_matches(const struct allowedips_node *node, const u8 *key,
+ u8 bits)
+{
+ /* This could be much faster if it actually just compared the common
+ * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and
+ * the rest, but it turns out that common_bits is already super fast on
+ * modern processors, even taking into account the unfortunate bswap.
+ * So, we just inline it like this instead.
+ */
+ return common_bits(node, key, bits) >= node->cidr;
+}
+
+static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
+ const u8 *key)
+{
+ struct allowedips_node *node = trie, *found = NULL;
+
+ while (node && prefix_matches(node, key, bits)) {
+ if (rcu_access_pointer(node->peer))
+ found = node;
+ if (node->cidr == bits)
+ break;
+ node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+ }
+ return found;
+}
+
+/* Returns a strong reference to a peer */
+static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits,
+ const void *be_ip)
+{
+ /* Aligned so it can be passed to fls/fls64 */
+ u8 ip[16] __aligned(__alignof(u64));
+ struct allowedips_node *node;
+ struct wg_peer *peer = NULL;
+
+ swap_endian(ip, be_ip, bits);
+
+ rcu_read_lock_bh();
+retry:
+ node = find_node(rcu_dereference_bh(root), bits, ip);
+ if (node) {
+ peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer));
+ if (!peer)
+ goto retry;
+ }
+ rcu_read_unlock_bh();
+ return peer;
+}
+
+static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
+ u8 cidr, u8 bits, struct allowedips_node **rnode,
+ struct mutex *lock)
+{
+ struct allowedips_node *node = rcu_dereference_protected(trie,
+ lockdep_is_held(lock));
+ struct allowedips_node *parent = NULL;
+ bool exact = false;
+
+ while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) {
+ parent = node;
+ if (parent->cidr == cidr) {
+ exact = true;
+ break;
+ }
+ node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
+ lockdep_is_held(lock));
+ }
+ *rnode = parent;
+ return exact;
+}
+
+static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ struct allowedips_node *node, *parent, *down, *newnode;
+
+ if (unlikely(cidr > bits || !peer))
+ return -EINVAL;
+
+ if (!rcu_access_pointer(*trie)) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node))
+ return -ENOMEM;
+ RCU_INIT_POINTER(node->peer, peer);
+ list_add_tail(&node->peer_list, &peer->allowedips_list);
+ copy_and_assign_cidr(node, key, cidr, bits);
+ rcu_assign_pointer(*trie, node);
+ return 0;
+ }
+ if (node_placement(*trie, key, cidr, bits, &node, lock)) {
+ rcu_assign_pointer(node->peer, peer);
+ list_move_tail(&node->peer_list, &peer->allowedips_list);
+ return 0;
+ }
+
+ newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+ if (unlikely(!newnode))
+ return -ENOMEM;
+ RCU_INIT_POINTER(newnode->peer, peer);
+ list_add_tail(&newnode->peer_list, &peer->allowedips_list);
+ copy_and_assign_cidr(newnode, key, cidr, bits);
+
+ if (!node) {
+ down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
+ } else {
+ down = rcu_dereference_protected(CHOOSE_NODE(node, key),
+ lockdep_is_held(lock));
+ if (!down) {
+ rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+ return 0;
+ }
+ }
+ cidr = min(cidr, common_bits(down, key, bits));
+ parent = node;
+
+ if (newnode->cidr == cidr) {
+ rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+ if (!parent)
+ rcu_assign_pointer(*trie, newnode);
+ else
+ rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
+ newnode);
+ } else {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node)) {
+ kfree(newnode);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&node->peer_list);
+ copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+ rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
+ rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
+ if (!parent)
+ rcu_assign_pointer(*trie, node);
+ else
+ rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
+ node);
+ }
+ return 0;
+}
+
+void wg_allowedips_init(struct allowedips *table)
+{
+ table->root4 = table->root6 = NULL;
+ table->seq = 1;
+}
+
+void wg_allowedips_free(struct allowedips *table, struct mutex *lock)
+{
+ struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6;
+
+ ++table->seq;
+ RCU_INIT_POINTER(table->root4, NULL);
+ RCU_INIT_POINTER(table->root6, NULL);
+ if (rcu_access_pointer(old4)) {
+ struct allowedips_node *node = rcu_dereference_protected(old4,
+ lockdep_is_held(lock));
+
+ root_remove_peer_lists(node);
+ call_rcu(&node->rcu, root_free_rcu);
+ }
+ if (rcu_access_pointer(old6)) {
+ struct allowedips_node *node = rcu_dereference_protected(old6,
+ lockdep_is_held(lock));
+
+ root_remove_peer_lists(node);
+ call_rcu(&node->rcu, root_free_rcu);
+ }
+}
+
+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls */
+ u8 key[4] __aligned(__alignof(u32));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 32);
+ return add(&table->root4, 32, key, cidr, peer, lock);
+}
+
+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls64 */
+ u8 key[16] __aligned(__alignof(u64));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 128);
+ return add(&table->root6, 128, key, cidr, peer, lock);
+}
+
+void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wg_peer *peer, struct mutex *lock)
+{
+ ++table->seq;
+ walk_remove_by_peer(&table->root4, peer, lock);
+ walk_remove_by_peer(&table->root6, peer, lock);
+}
+
+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
+{
+ const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U);
+ swap_endian(ip, node->bits, node->bitlen);
+ memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes);
+ if (node->cidr)
+ ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U);
+
+ *cidr = node->cidr;
+ return node->bitlen == 32 ? AF_INET : AF_INET6;
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
+ struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return lookup(table->root4, 32, &ip_hdr(skb)->daddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr);
+ return NULL;
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return lookup(table->root4, 32, &ip_hdr(skb)->saddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr);
+ return NULL;
+}
+
+#include "selftest/allowedips.c"
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
new file mode 100644
index 000000000000..e5c83cafcef4
--- /dev/null
+++ b/drivers/net/wireguard/allowedips.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_ALLOWEDIPS_H
+#define _WG_ALLOWEDIPS_H
+
+#include <linux/mutex.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+struct wg_peer;
+
+struct allowedips_node {
+ struct wg_peer __rcu *peer;
+ struct allowedips_node __rcu *bit[2];
+ /* While it may seem scandalous that we waste space for v4,
+ * we're alloc'ing to the nearest power of 2 anyway, so this
+ * doesn't actually make a difference.
+ */
+ u8 bits[16] __aligned(__alignof(u64));
+ u8 cidr, bit_at_a, bit_at_b, bitlen;
+
+ /* Keep rarely used list at bottom to be beyond cache line. */
+ union {
+ struct list_head peer_list;
+ struct rcu_head rcu;
+ };
+};
+
+struct allowedips {
+ struct allowedips_node __rcu *root4;
+ struct allowedips_node __rcu *root6;
+ u64 seq;
+};
+
+void wg_allowedips_init(struct allowedips *table);
+void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wg_peer *peer, struct mutex *lock);
+/* The ip input pointer should be __aligned(__alignof(u64))) */
+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr);
+
+/* These return a strong reference to a peer: */
+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
+ struct sk_buff *skb);
+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ struct sk_buff *skb);
+
+#ifdef DEBUG
+bool wg_allowedips_selftest(void);
+#endif
+
+#endif /* _WG_ALLOWEDIPS_H */
diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c
new file mode 100644
index 000000000000..4956f0499c19
--- /dev/null
+++ b/drivers/net/wireguard/cookie.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "cookie.h"
+#include "peer.h"
+#include "device.h"
+#include "messages.h"
+#include "ratelimiter.h"
+#include "timers.h"
+
+#include <crypto/blake2s.h>
+#include <crypto/chacha20poly1305.h>
+
+#include <net/ipv6.h>
+#include <crypto/algapi.h>
+
+void wg_cookie_checker_init(struct cookie_checker *checker,
+ struct wg_device *wg)
+{
+ init_rwsem(&checker->secret_lock);
+ checker->secret_birthdate = ktime_get_coarse_boottime_ns();
+ get_random_bytes(checker->secret, NOISE_HASH_LEN);
+ checker->device = wg;
+}
+
+enum { COOKIE_KEY_LABEL_LEN = 8 };
+static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----";
+static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--";
+
+static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN],
+ const u8 label[COOKIE_KEY_LABEL_LEN])
+{
+ struct blake2s_state blake;
+
+ blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN);
+ blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN);
+ blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN);
+ blake2s_final(&blake, key);
+}
+
+/* Must hold peer->handshake.static_identity->lock */
+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker)
+{
+ if (likely(checker->device->static_identity.has_identity)) {
+ precompute_key(checker->cookie_encryption_key,
+ checker->device->static_identity.static_public,
+ cookie_key_label);
+ precompute_key(checker->message_mac1_key,
+ checker->device->static_identity.static_public,
+ mac1_key_label);
+ } else {
+ memset(checker->cookie_encryption_key, 0,
+ NOISE_SYMMETRIC_KEY_LEN);
+ memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN);
+ }
+}
+
+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer)
+{
+ precompute_key(peer->latest_cookie.cookie_decryption_key,
+ peer->handshake.remote_static, cookie_key_label);
+ precompute_key(peer->latest_cookie.message_mac1_key,
+ peer->handshake.remote_static, mac1_key_label);
+}
+
+void wg_cookie_init(struct cookie *cookie)
+{
+ memset(cookie, 0, sizeof(*cookie));
+ init_rwsem(&cookie->lock);
+}
+
+static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len,
+ const u8 key[NOISE_SYMMETRIC_KEY_LEN])
+{
+ len = len - sizeof(struct message_macs) +
+ offsetof(struct message_macs, mac1);
+ blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN);
+}
+
+static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len,
+ const u8 cookie[COOKIE_LEN])
+{
+ len = len - sizeof(struct message_macs) +
+ offsetof(struct message_macs, mac2);
+ blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN);
+}
+
+static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb,
+ struct cookie_checker *checker)
+{
+ struct blake2s_state state;
+
+ if (wg_birthdate_has_expired(checker->secret_birthdate,
+ COOKIE_SECRET_MAX_AGE)) {
+ down_write(&checker->secret_lock);
+ checker->secret_birthdate = ktime_get_coarse_boottime_ns();
+ get_random_bytes(checker->secret, NOISE_HASH_LEN);
+ up_write(&checker->secret_lock);
+ }
+
+ down_read(&checker->secret_lock);
+
+ blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN);
+ if (skb->protocol == htons(ETH_P_IP))
+ blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr,
+ sizeof(struct in_addr));
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16));
+ blake2s_final(&state, cookie);
+
+ up_read(&checker->secret_lock);
+}
+
+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
+ struct sk_buff *skb,
+ bool check_cookie)
+{
+ struct message_macs *macs = (struct message_macs *)
+ (skb->data + skb->len - sizeof(*macs));
+ enum cookie_mac_state ret;
+ u8 computed_mac[COOKIE_LEN];
+ u8 cookie[COOKIE_LEN];
+
+ ret = INVALID_MAC;
+ compute_mac1(computed_mac, skb->data, skb->len,
+ checker->message_mac1_key);
+ if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN))
+ goto out;
+
+ ret = VALID_MAC_BUT_NO_COOKIE;
+
+ if (!check_cookie)
+ goto out;
+
+ make_cookie(cookie, skb, checker);
+
+ compute_mac2(computed_mac, skb->data, skb->len, cookie);
+ if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN))
+ goto out;
+
+ ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED;
+ if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev)))
+ goto out;
+
+ ret = VALID_MAC_WITH_COOKIE;
+
+out:
+ return ret;
+}
+
+void wg_cookie_add_mac_to_packet(void *message, size_t len,
+ struct wg_peer *peer)
+{
+ struct message_macs *macs = (struct message_macs *)
+ ((u8 *)message + len - sizeof(*macs));
+
+ down_write(&peer->latest_cookie.lock);
+ compute_mac1(macs->mac1, message, len,
+ peer->latest_cookie.message_mac1_key);
+ memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN);
+ peer->latest_cookie.have_sent_mac1 = true;
+ up_write(&peer->latest_cookie.lock);
+
+ down_read(&peer->latest_cookie.lock);
+ if (peer->latest_cookie.is_valid &&
+ !wg_birthdate_has_expired(peer->latest_cookie.birthdate,
+ COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY))
+ compute_mac2(macs->mac2, message, len,
+ peer->latest_cookie.cookie);
+ else
+ memset(macs->mac2, 0, COOKIE_LEN);
+ up_read(&peer->latest_cookie.lock);
+}
+
+void wg_cookie_message_create(struct message_handshake_cookie *dst,
+ struct sk_buff *skb, __le32 index,
+ struct cookie_checker *checker)
+{
+ struct message_macs *macs = (struct message_macs *)
+ ((u8 *)skb->data + skb->len - sizeof(*macs));
+ u8 cookie[COOKIE_LEN];
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE);
+ dst->receiver_index = index;
+ get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN);
+
+ make_cookie(cookie, skb, checker);
+ xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN,
+ macs->mac1, COOKIE_LEN, dst->nonce,
+ checker->cookie_encryption_key);
+}
+
+void wg_cookie_message_consume(struct message_handshake_cookie *src,
+ struct wg_device *wg)
+{
+ struct wg_peer *peer = NULL;
+ u8 cookie[COOKIE_LEN];
+ bool ret;
+
+ if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable,
+ INDEX_HASHTABLE_HANDSHAKE |
+ INDEX_HASHTABLE_KEYPAIR,
+ src->receiver_index, &peer)))
+ return;
+
+ down_read(&peer->latest_cookie.lock);
+ if (unlikely(!peer->latest_cookie.have_sent_mac1)) {
+ up_read(&peer->latest_cookie.lock);
+ goto out;
+ }
+ ret = xchacha20poly1305_decrypt(
+ cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie),
+ peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce,
+ peer->latest_cookie.cookie_decryption_key);
+ up_read(&peer->latest_cookie.lock);
+
+ if (ret) {
+ down_write(&peer->latest_cookie.lock);
+ memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN);
+ peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns();
+ peer->latest_cookie.is_valid = true;
+ peer->latest_cookie.have_sent_mac1 = false;
+ up_write(&peer->latest_cookie.lock);
+ } else {
+ net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n",
+ wg->dev->name);
+ }
+
+out:
+ wg_peer_put(peer);
+}
diff --git a/drivers/net/wireguard/cookie.h b/drivers/net/wireguard/cookie.h
new file mode 100644
index 000000000000..c4bd61ca03f2
--- /dev/null
+++ b/drivers/net/wireguard/cookie.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_COOKIE_H
+#define _WG_COOKIE_H
+
+#include "messages.h"
+#include <linux/rwsem.h>
+
+struct wg_peer;
+
+struct cookie_checker {
+ u8 secret[NOISE_HASH_LEN];
+ u8 cookie_encryption_key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN];
+ u64 secret_birthdate;
+ struct rw_semaphore secret_lock;
+ struct wg_device *device;
+};
+
+struct cookie {
+ u64 birthdate;
+ bool is_valid;
+ u8 cookie[COOKIE_LEN];
+ bool have_sent_mac1;
+ u8 last_mac1_sent[COOKIE_LEN];
+ u8 cookie_decryption_key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN];
+ struct rw_semaphore lock;
+};
+
+enum cookie_mac_state {
+ INVALID_MAC,
+ VALID_MAC_BUT_NO_COOKIE,
+ VALID_MAC_WITH_COOKIE_BUT_RATELIMITED,
+ VALID_MAC_WITH_COOKIE
+};
+
+void wg_cookie_checker_init(struct cookie_checker *checker,
+ struct wg_device *wg);
+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker);
+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer);
+void wg_cookie_init(struct cookie *cookie);
+
+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
+ struct sk_buff *skb,
+ bool check_cookie);
+void wg_cookie_add_mac_to_packet(void *message, size_t len,
+ struct wg_peer *peer);
+
+void wg_cookie_message_create(struct message_handshake_cookie *src,
+ struct sk_buff *skb, __le32 index,
+ struct cookie_checker *checker);
+void wg_cookie_message_consume(struct message_handshake_cookie *src,
+ struct wg_device *wg);
+
+#endif /* _WG_COOKIE_H */
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
new file mode 100644
index 000000000000..16b19824b9ad
--- /dev/null
+++ b/drivers/net/wireguard/device.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "socket.h"
+#include "timers.h"
+#include "device.h"
+#include "ratelimiter.h"
+#include "peer.h"
+#include "messages.h"
+
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_arp.h>
+#include <linux/icmp.h>
+#include <linux/suspend.h>
+#include <net/icmp.h>
+#include <net/rtnetlink.h>
+#include <net/ip_tunnels.h>
+#include <net/addrconf.h>
+
+static LIST_HEAD(device_list);
+
+static int wg_open(struct net_device *dev)
+{
+ struct in_device *dev_v4 = __in_dev_get_rtnl(dev);
+ struct inet6_dev *dev_v6 = __in6_dev_get(dev);
+ struct wg_device *wg = netdev_priv(dev);
+ struct wg_peer *peer;
+ int ret;
+
+ if (dev_v4) {
+ /* At some point we might put this check near the ip_rt_send_
+ * redirect call of ip_forward in net/ipv4/ip_forward.c, similar
+ * to the current secpath check.
+ */
+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
+ IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
+ }
+ if (dev_v6)
+ dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
+
+ ret = wg_socket_init(wg, wg->incoming_port);
+ if (ret < 0)
+ return ret;
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ wg_packet_send_staged_packets(peer);
+ if (peer->persistent_keepalive_interval)
+ wg_packet_send_keepalive(peer);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct wg_device *wg;
+ struct wg_peer *peer;
+
+ /* If the machine is constantly suspending and resuming, as part of
+ * its normal operation rather than as a somewhat rare event, then we
+ * don't actually want to clear keys.
+ */
+ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
+ return 0;
+
+ if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
+ return 0;
+
+ rtnl_lock();
+ list_for_each_entry(wg, &device_list, device_list) {
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ del_timer(&peer->timer_zero_key_material);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ }
+ rtnl_unlock();
+ rcu_barrier();
+ return 0;
+}
+
+static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
+#endif
+
+static int wg_stop(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ struct wg_peer *peer;
+
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ wg_packet_purge_staged_packets(peer);
+ wg_timers_stop(peer);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ skb_queue_purge(&wg->incoming_handshakes);
+ wg_socket_reinit(wg, NULL, NULL);
+ return 0;
+}
+
+static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ struct sk_buff_head packets;
+ struct wg_peer *peer;
+ struct sk_buff *next;
+ sa_family_t family;
+ u32 mtu;
+ int ret;
+
+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
+ ret = -EPROTONOSUPPORT;
+ net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
+ goto err;
+ }
+
+ peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
+ if (unlikely(!peer)) {
+ ret = -ENOKEY;
+ if (skb->protocol == htons(ETH_P_IP))
+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
+ dev->name, &ip_hdr(skb)->daddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
+ dev->name, &ipv6_hdr(skb)->daddr);
+ goto err;
+ }
+
+ family = READ_ONCE(peer->endpoint.addr.sa_family);
+ if (unlikely(family != AF_INET && family != AF_INET6)) {
+ ret = -EDESTADDRREQ;
+ net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
+ dev->name, peer->internal_id);
+ goto err_peer;
+ }
+
+ mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+
+ __skb_queue_head_init(&packets);
+ if (!skb_is_gso(skb)) {
+ skb_mark_not_on_list(skb);
+ } else {
+ struct sk_buff *segs = skb_gso_segment(skb, 0);
+
+ if (unlikely(IS_ERR(segs))) {
+ ret = PTR_ERR(segs);
+ goto err_peer;
+ }
+ dev_kfree_skb(skb);
+ skb = segs;
+ }
+
+ skb_list_walk_safe(skb, skb, next) {
+ skb_mark_not_on_list(skb);
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ continue;
+
+ /* We only need to keep the original dst around for icmp,
+ * so at this point we're in a position to drop it.
+ */
+ skb_dst_drop(skb);
+
+ PACKET_CB(skb)->mtu = mtu;
+
+ __skb_queue_tail(&packets, skb);
+ }
+
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ /* If the queue is getting too big, we start removing the oldest packets
+ * until it's small again. We do this before adding the new packet, so
+ * we don't remove GSO segments that are in excess.
+ */
+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
+ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
+ ++dev->stats.tx_dropped;
+ }
+ skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+
+ wg_packet_send_staged_packets(peer);
+
+ wg_peer_put(peer);
+ return NETDEV_TX_OK;
+
+err_peer:
+ wg_peer_put(peer);
+err:
+ ++dev->stats.tx_errors;
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ kfree_skb(skb);
+ return ret;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = wg_open,
+ .ndo_stop = wg_stop,
+ .ndo_start_xmit = wg_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64
+};
+
+static void wg_destruct(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+
+ rtnl_lock();
+ list_del(&wg->device_list);
+ rtnl_unlock();
+ mutex_lock(&wg->device_update_lock);
+ wg->incoming_port = 0;
+ wg_socket_reinit(wg, NULL, NULL);
+ /* The final references are cleared in the below calls to destroy_workqueue. */
+ wg_peer_remove_all(wg);
+ destroy_workqueue(wg->handshake_receive_wq);
+ destroy_workqueue(wg->handshake_send_wq);
+ destroy_workqueue(wg->packet_crypt_wq);
+ wg_packet_queue_free(&wg->decrypt_queue, true);
+ wg_packet_queue_free(&wg->encrypt_queue, true);
+ rcu_barrier(); /* Wait for all the peers to be actually freed. */
+ wg_ratelimiter_uninit();
+ memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
+ skb_queue_purge(&wg->incoming_handshakes);
+ free_percpu(dev->tstats);
+ free_percpu(wg->incoming_handshakes_worker);
+ if (wg->have_creating_net_ref)
+ put_net(wg->creating_net);
+ kvfree(wg->index_hashtable);
+ kvfree(wg->peer_hashtable);
+ mutex_unlock(&wg->device_update_lock);
+
+ pr_debug("%s: Interface deleted\n", dev->name);
+ free_netdev(dev);
+}
+
+static const struct device_type device_type = { .name = KBUILD_MODNAME };
+
+static void wg_setup(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
+
+ dev->netdev_ops = &netdev_ops;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
+ dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE);
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= WG_NETDEV_FEATURES;
+ dev->hw_features |= WG_NETDEV_FEATURES;
+ dev->hw_enc_features |= WG_NETDEV_FEATURES;
+ dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH -
+ sizeof(struct udphdr) -
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
+
+ SET_NETDEV_DEVTYPE(dev, &device_type);
+
+ /* We need to keep the dst around in case of icmp replies. */
+ netif_keep_dst(dev);
+
+ memset(wg, 0, sizeof(*wg));
+ wg->dev = dev;
+}
+
+static int wg_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ int ret = -ENOMEM;
+
+ wg->creating_net = src_net;
+ init_rwsem(&wg->static_identity.lock);
+ mutex_init(&wg->socket_update_lock);
+ mutex_init(&wg->device_update_lock);
+ skb_queue_head_init(&wg->incoming_handshakes);
+ wg_allowedips_init(&wg->peer_allowedips);
+ wg_cookie_checker_init(&wg->cookie_checker, wg);
+ INIT_LIST_HEAD(&wg->peer_list);
+ wg->device_update_gen = 1;
+
+ wg->peer_hashtable = wg_pubkey_hashtable_alloc();
+ if (!wg->peer_hashtable)
+ return ret;
+
+ wg->index_hashtable = wg_index_hashtable_alloc();
+ if (!wg->index_hashtable)
+ goto err_free_peer_hashtable;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ goto err_free_index_hashtable;
+
+ wg->incoming_handshakes_worker =
+ wg_packet_percpu_multicore_worker_alloc(
+ wg_packet_handshake_receive_worker, wg);
+ if (!wg->incoming_handshakes_worker)
+ goto err_free_tstats;
+
+ wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
+ WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
+ if (!wg->handshake_receive_wq)
+ goto err_free_incoming_handshakes;
+
+ wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
+ WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
+ if (!wg->handshake_send_wq)
+ goto err_destroy_handshake_receive;
+
+ wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
+ if (!wg->packet_crypt_wq)
+ goto err_destroy_handshake_send;
+
+ ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
+ true, MAX_QUEUED_PACKETS);
+ if (ret < 0)
+ goto err_destroy_packet_crypt;
+
+ ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
+ true, MAX_QUEUED_PACKETS);
+ if (ret < 0)
+ goto err_free_encrypt_queue;
+
+ ret = wg_ratelimiter_init();
+ if (ret < 0)
+ goto err_free_decrypt_queue;
+
+ ret = register_netdevice(dev);
+ if (ret < 0)
+ goto err_uninit_ratelimiter;
+
+ list_add(&wg->device_list, &device_list);
+
+ /* We wait until the end to assign priv_destructor, so that
+ * register_netdevice doesn't call it for us if it fails.
+ */
+ dev->priv_destructor = wg_destruct;
+
+ pr_debug("%s: Interface created\n", dev->name);
+ return ret;
+
+err_uninit_ratelimiter:
+ wg_ratelimiter_uninit();
+err_free_decrypt_queue:
+ wg_packet_queue_free(&wg->decrypt_queue, true);
+err_free_encrypt_queue:
+ wg_packet_queue_free(&wg->encrypt_queue, true);
+err_destroy_packet_crypt:
+ destroy_workqueue(wg->packet_crypt_wq);
+err_destroy_handshake_send:
+ destroy_workqueue(wg->handshake_send_wq);
+err_destroy_handshake_receive:
+ destroy_workqueue(wg->handshake_receive_wq);
+err_free_incoming_handshakes:
+ free_percpu(wg->incoming_handshakes_worker);
+err_free_tstats:
+ free_percpu(dev->tstats);
+err_free_index_hashtable:
+ kvfree(wg->index_hashtable);
+err_free_peer_hashtable:
+ kvfree(wg->peer_hashtable);
+ return ret;
+}
+
+static struct rtnl_link_ops link_ops __read_mostly = {
+ .kind = KBUILD_MODNAME,
+ .priv_size = sizeof(struct wg_device),
+ .setup = wg_setup,
+ .newlink = wg_newlink,
+};
+
+static int wg_netdevice_notification(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
+ struct wg_device *wg = netdev_priv(dev);
+
+ ASSERT_RTNL();
+
+ if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
+ return 0;
+
+ if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
+ put_net(wg->creating_net);
+ wg->have_creating_net_ref = false;
+ } else if (dev_net(dev) != wg->creating_net &&
+ !wg->have_creating_net_ref) {
+ wg->have_creating_net_ref = true;
+ get_net(wg->creating_net);
+ }
+ return 0;
+}
+
+static struct notifier_block netdevice_notifier = {
+ .notifier_call = wg_netdevice_notification
+};
+
+int __init wg_device_init(void)
+{
+ int ret;
+
+#ifdef CONFIG_PM_SLEEP
+ ret = register_pm_notifier(&pm_notifier);
+ if (ret)
+ return ret;
+#endif
+
+ ret = register_netdevice_notifier(&netdevice_notifier);
+ if (ret)
+ goto error_pm;
+
+ ret = rtnl_link_register(&link_ops);
+ if (ret)
+ goto error_netdevice;
+
+ return 0;
+
+error_netdevice:
+ unregister_netdevice_notifier(&netdevice_notifier);
+error_pm:
+#ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&pm_notifier);
+#endif
+ return ret;
+}
+
+void wg_device_uninit(void)
+{
+ rtnl_link_unregister(&link_ops);
+ unregister_netdevice_notifier(&netdevice_notifier);
+#ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&pm_notifier);
+#endif
+ rcu_barrier();
+}
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
new file mode 100644
index 000000000000..b15a8be9d816
--- /dev/null
+++ b/drivers/net/wireguard/device.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_DEVICE_H
+#define _WG_DEVICE_H
+
+#include "noise.h"
+#include "allowedips.h"
+#include "peerlookup.h"
+#include "cookie.h"
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/ptr_ring.h>
+
+struct wg_device;
+
+struct multicore_worker {
+ void *ptr;
+ struct work_struct work;
+};
+
+struct crypt_queue {
+ struct ptr_ring ring;
+ union {
+ struct {
+ struct multicore_worker __percpu *worker;
+ int last_cpu;
+ };
+ struct work_struct work;
+ };
+};
+
+struct wg_device {
+ struct net_device *dev;
+ struct crypt_queue encrypt_queue, decrypt_queue;
+ struct sock __rcu *sock4, *sock6;
+ struct net *creating_net;
+ struct noise_static_identity static_identity;
+ struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
+ struct workqueue_struct *packet_crypt_wq;
+ struct sk_buff_head incoming_handshakes;
+ int incoming_handshake_cpu;
+ struct multicore_worker __percpu *incoming_handshakes_worker;
+ struct cookie_checker cookie_checker;
+ struct pubkey_hashtable *peer_hashtable;
+ struct index_hashtable *index_hashtable;
+ struct allowedips peer_allowedips;
+ struct mutex device_update_lock, socket_update_lock;
+ struct list_head device_list, peer_list;
+ unsigned int num_peers, device_update_gen;
+ u32 fwmark;
+ u16 incoming_port;
+ bool have_creating_net_ref;
+};
+
+int wg_device_init(void);
+void wg_device_uninit(void);
+
+#endif /* _WG_DEVICE_H */
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
new file mode 100644
index 000000000000..7a7d5f1a80fc
--- /dev/null
+++ b/drivers/net/wireguard/main.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "version.h"
+#include "device.h"
+#include "noise.h"
+#include "queueing.h"
+#include "ratelimiter.h"
+#include "netlink.h"
+
+#include <uapi/linux/wireguard.h>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/genetlink.h>
+#include <net/rtnetlink.h>
+
+static int __init mod_init(void)
+{
+ int ret;
+
+#ifdef DEBUG
+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
+ !wg_ratelimiter_selftest())
+ return -ENOTRECOVERABLE;
+#endif
+ wg_noise_init();
+
+ ret = wg_device_init();
+ if (ret < 0)
+ goto err_device;
+
+ ret = wg_genetlink_init();
+ if (ret < 0)
+ goto err_netlink;
+
+ pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n");
+ pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.\n");
+
+ return 0;
+
+err_netlink:
+ wg_device_uninit();
+err_device:
+ return ret;
+}
+
+static void __exit mod_exit(void)
+{
+ wg_genetlink_uninit();
+ wg_device_uninit();
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("WireGuard secure network tunnel");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_VERSION(WIREGUARD_VERSION);
+MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME);
+MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME);
diff --git a/drivers/net/wireguard/messages.h b/drivers/net/wireguard/messages.h
new file mode 100644
index 000000000000..b8a7b9ce32ba
--- /dev/null
+++ b/drivers/net/wireguard/messages.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_MESSAGES_H
+#define _WG_MESSAGES_H
+
+#include <crypto/curve25519.h>
+#include <crypto/chacha20poly1305.h>
+#include <crypto/blake2s.h>
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/skbuff.h>
+
+enum noise_lengths {
+ NOISE_PUBLIC_KEY_LEN = CURVE25519_KEY_SIZE,
+ NOISE_SYMMETRIC_KEY_LEN = CHACHA20POLY1305_KEY_SIZE,
+ NOISE_TIMESTAMP_LEN = sizeof(u64) + sizeof(u32),
+ NOISE_AUTHTAG_LEN = CHACHA20POLY1305_AUTHTAG_SIZE,
+ NOISE_HASH_LEN = BLAKE2S_HASH_SIZE
+};
+
+#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN)
+
+enum cookie_values {
+ COOKIE_SECRET_MAX_AGE = 2 * 60,
+ COOKIE_SECRET_LATENCY = 5,
+ COOKIE_NONCE_LEN = XCHACHA20POLY1305_NONCE_SIZE,
+ COOKIE_LEN = 16
+};
+
+enum counter_values {
+ COUNTER_BITS_TOTAL = 2048,
+ COUNTER_REDUNDANT_BITS = BITS_PER_LONG,
+ COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS
+};
+
+enum limits {
+ REKEY_AFTER_MESSAGES = 1ULL << 60,
+ REJECT_AFTER_MESSAGES = U64_MAX - COUNTER_WINDOW_SIZE - 1,
+ REKEY_TIMEOUT = 5,
+ REKEY_TIMEOUT_JITTER_MAX_JIFFIES = HZ / 3,
+ REKEY_AFTER_TIME = 120,
+ REJECT_AFTER_TIME = 180,
+ INITIATIONS_PER_SECOND = 50,
+ MAX_PEERS_PER_DEVICE = 1U << 20,
+ KEEPALIVE_TIMEOUT = 10,
+ MAX_TIMER_HANDSHAKES = 90 / REKEY_TIMEOUT,
+ MAX_QUEUED_INCOMING_HANDSHAKES = 4096, /* TODO: replace this with DQL */
+ MAX_STAGED_PACKETS = 128,
+ MAX_QUEUED_PACKETS = 1024 /* TODO: replace this with DQL */
+};
+
+enum message_type {
+ MESSAGE_INVALID = 0,
+ MESSAGE_HANDSHAKE_INITIATION = 1,
+ MESSAGE_HANDSHAKE_RESPONSE = 2,
+ MESSAGE_HANDSHAKE_COOKIE = 3,
+ MESSAGE_DATA = 4
+};
+
+struct message_header {
+ /* The actual layout of this that we want is:
+ * u8 type
+ * u8 reserved_zero[3]
+ *
+ * But it turns out that by encoding this as little endian,
+ * we achieve the same thing, and it makes checking faster.
+ */
+ __le32 type;
+};
+
+struct message_macs {
+ u8 mac1[COOKIE_LEN];
+ u8 mac2[COOKIE_LEN];
+};
+
+struct message_handshake_initiation {
+ struct message_header header;
+ __le32 sender_index;
+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 encrypted_static[noise_encrypted_len(NOISE_PUBLIC_KEY_LEN)];
+ u8 encrypted_timestamp[noise_encrypted_len(NOISE_TIMESTAMP_LEN)];
+ struct message_macs macs;
+};
+
+struct message_handshake_response {
+ struct message_header header;
+ __le32 sender_index;
+ __le32 receiver_index;
+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 encrypted_nothing[noise_encrypted_len(0)];
+ struct message_macs macs;
+};
+
+struct message_handshake_cookie {
+ struct message_header header;
+ __le32 receiver_index;
+ u8 nonce[COOKIE_NONCE_LEN];
+ u8 encrypted_cookie[noise_encrypted_len(COOKIE_LEN)];
+};
+
+struct message_data {
+ struct message_header header;
+ __le32 key_idx;
+ __le64 counter;
+ u8 encrypted_data[];
+};
+
+#define message_data_len(plain_len) \
+ (noise_encrypted_len(plain_len) + sizeof(struct message_data))
+
+enum message_alignments {
+ MESSAGE_PADDING_MULTIPLE = 16,
+ MESSAGE_MINIMUM_LENGTH = message_data_len(0)
+};
+
+#define SKB_HEADER_LEN \
+ (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \
+ sizeof(struct udphdr) + NET_SKB_PAD)
+#define DATA_PACKET_HEAD_ROOM \
+ ALIGN(sizeof(struct message_data) + SKB_HEADER_LEN, 4)
+
+enum { HANDSHAKE_DSCP = 0x88 /* AF41, plus 00 ECN */ };
+
+#endif /* _WG_MESSAGES_H */
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
new file mode 100644
index 000000000000..0fdbd1c45977
--- /dev/null
+++ b/drivers/net/wireguard/netlink.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "netlink.h"
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "queueing.h"
+#include "messages.h"
+
+#include <uapi/linux/wireguard.h>
+
+#include <linux/if.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
+#include <crypto/algapi.h>
+
+static struct genl_family genl_family;
+
+static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
+ [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
+ [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+ [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
+ [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
+ [WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
+ [WGDEVICE_A_PEERS] = { .type = NLA_NESTED }
+};
+
+static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
+ [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN },
+ [WGPEER_A_FLAGS] = { .type = NLA_U32 },
+ [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) },
+ [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
+ [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) },
+ [WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
+ [WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
+ [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
+ [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 }
+};
+
+static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
+ [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
+ [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) },
+ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
+};
+
+static struct wg_device *lookup_interface(struct nlattr **attrs,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = NULL;
+
+ if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME])
+ return ERR_PTR(-EBADR);
+ if (attrs[WGDEVICE_A_IFINDEX])
+ dev = dev_get_by_index(sock_net(skb->sk),
+ nla_get_u32(attrs[WGDEVICE_A_IFINDEX]));
+ else if (attrs[WGDEVICE_A_IFNAME])
+ dev = dev_get_by_name(sock_net(skb->sk),
+ nla_data(attrs[WGDEVICE_A_IFNAME]));
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+ if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind ||
+ strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) {
+ dev_put(dev);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ return netdev_priv(dev);
+}
+
+static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr,
+ int family)
+{
+ struct nlattr *allowedip_nest;
+
+ allowedip_nest = nla_nest_start(skb, 0);
+ if (!allowedip_nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) ||
+ nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) ||
+ nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ?
+ sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) {
+ nla_nest_cancel(skb, allowedip_nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, allowedip_nest);
+ return 0;
+}
+
+struct dump_ctx {
+ struct wg_device *wg;
+ struct wg_peer *next_peer;
+ u64 allowedips_seq;
+ struct allowedips_node *next_allowedip;
+};
+
+#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args)
+
+static int
+get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
+{
+
+ struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0);
+ struct allowedips_node *allowedips_node = ctx->next_allowedip;
+ bool fail;
+
+ if (!peer_nest)
+ return -EMSGSIZE;
+
+ down_read(&peer->handshake.lock);
+ fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN,
+ peer->handshake.remote_static);
+ up_read(&peer->handshake.lock);
+ if (fail)
+ goto err;
+
+ if (!allowedips_node) {
+ const struct __kernel_timespec last_handshake = {
+ .tv_sec = peer->walltime_last_handshake.tv_sec,
+ .tv_nsec = peer->walltime_last_handshake.tv_nsec
+ };
+
+ down_read(&peer->handshake.lock);
+ fail = nla_put(skb, WGPEER_A_PRESHARED_KEY,
+ NOISE_SYMMETRIC_KEY_LEN,
+ peer->handshake.preshared_key);
+ up_read(&peer->handshake.lock);
+ if (fail)
+ goto err;
+
+ if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME,
+ sizeof(last_handshake), &last_handshake) ||
+ nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
+ peer->persistent_keepalive_interval) ||
+ nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes,
+ WGPEER_A_UNSPEC) ||
+ nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes,
+ WGPEER_A_UNSPEC) ||
+ nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1))
+ goto err;
+
+ read_lock_bh(&peer->endpoint_lock);
+ if (peer->endpoint.addr.sa_family == AF_INET)
+ fail = nla_put(skb, WGPEER_A_ENDPOINT,
+ sizeof(peer->endpoint.addr4),
+ &peer->endpoint.addr4);
+ else if (peer->endpoint.addr.sa_family == AF_INET6)
+ fail = nla_put(skb, WGPEER_A_ENDPOINT,
+ sizeof(peer->endpoint.addr6),
+ &peer->endpoint.addr6);
+ read_unlock_bh(&peer->endpoint_lock);
+ if (fail)
+ goto err;
+ allowedips_node =
+ list_first_entry_or_null(&peer->allowedips_list,
+ struct allowedips_node, peer_list);
+ }
+ if (!allowedips_node)
+ goto no_allowedips;
+ if (!ctx->allowedips_seq)
+ ctx->allowedips_seq = peer->device->peer_allowedips.seq;
+ else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
+ goto no_allowedips;
+
+ allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
+ if (!allowedips_nest)
+ goto err;
+
+ list_for_each_entry_from(allowedips_node, &peer->allowedips_list,
+ peer_list) {
+ u8 cidr, ip[16] __aligned(__alignof(u64));
+ int family;
+
+ family = wg_allowedips_read_node(allowedips_node, ip, &cidr);
+ if (get_allowedips(skb, ip, cidr, family)) {
+ nla_nest_end(skb, allowedips_nest);
+ nla_nest_end(skb, peer_nest);
+ ctx->next_allowedip = allowedips_node;
+ return -EMSGSIZE;
+ }
+ }
+ nla_nest_end(skb, allowedips_nest);
+no_allowedips:
+ nla_nest_end(skb, peer_nest);
+ ctx->next_allowedip = NULL;
+ ctx->allowedips_seq = 0;
+ return 0;
+err:
+ nla_nest_cancel(skb, peer_nest);
+ return -EMSGSIZE;
+}
+
+static int wg_get_device_start(struct netlink_callback *cb)
+{
+ struct wg_device *wg;
+
+ wg = lookup_interface(genl_dumpit_info(cb)->attrs, cb->skb);
+ if (IS_ERR(wg))
+ return PTR_ERR(wg);
+ DUMP_CTX(cb)->wg = wg;
+ return 0;
+}
+
+static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct wg_peer *peer, *next_peer_cursor;
+ struct dump_ctx *ctx = DUMP_CTX(cb);
+ struct wg_device *wg = ctx->wg;
+ struct nlattr *peers_nest;
+ int ret = -EMSGSIZE;
+ bool done = true;
+ void *hdr;
+
+ rtnl_lock();
+ mutex_lock(&wg->device_update_lock);
+ cb->seq = wg->device_update_gen;
+ next_peer_cursor = ctx->next_peer;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE);
+ if (!hdr)
+ goto out;
+ genl_dump_check_consistent(cb, hdr);
+
+ if (!ctx->next_peer) {
+ if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT,
+ wg->incoming_port) ||
+ nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) ||
+ nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) ||
+ nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name))
+ goto out;
+
+ down_read(&wg->static_identity.lock);
+ if (wg->static_identity.has_identity) {
+ if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY,
+ NOISE_PUBLIC_KEY_LEN,
+ wg->static_identity.static_private) ||
+ nla_put(skb, WGDEVICE_A_PUBLIC_KEY,
+ NOISE_PUBLIC_KEY_LEN,
+ wg->static_identity.static_public)) {
+ up_read(&wg->static_identity.lock);
+ goto out;
+ }
+ }
+ up_read(&wg->static_identity.lock);
+ }
+
+ peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS);
+ if (!peers_nest)
+ goto out;
+ ret = 0;
+ /* If the last cursor was removed via list_del_init in peer_remove, then
+ * we just treat this the same as there being no more peers left. The
+ * reason is that seq_nr should indicate to userspace that this isn't a
+ * coherent dump anyway, so they'll try again.
+ */
+ if (list_empty(&wg->peer_list) ||
+ (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
+ nla_nest_cancel(skb, peers_nest);
+ goto out;
+ }
+ lockdep_assert_held(&wg->device_update_lock);
+ peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
+ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
+ if (get_peer(peer, skb, ctx)) {
+ done = false;
+ break;
+ }
+ next_peer_cursor = peer;
+ }
+ nla_nest_end(skb, peers_nest);
+
+out:
+ if (!ret && !done && next_peer_cursor)
+ wg_peer_get(next_peer_cursor);
+ wg_peer_put(ctx->next_peer);
+ mutex_unlock(&wg->device_update_lock);
+ rtnl_unlock();
+
+ if (ret) {
+ genlmsg_cancel(skb, hdr);
+ return ret;
+ }
+ genlmsg_end(skb, hdr);
+ if (done) {
+ ctx->next_peer = NULL;
+ return 0;
+ }
+ ctx->next_peer = next_peer_cursor;
+ return skb->len;
+
+ /* At this point, we can't really deal ourselves with safely zeroing out
+ * the private key material after usage. This will need an additional API
+ * in the kernel for marking skbs as zero_on_free.
+ */
+}
+
+static int wg_get_device_done(struct netlink_callback *cb)
+{
+ struct dump_ctx *ctx = DUMP_CTX(cb);
+
+ if (ctx->wg)
+ dev_put(ctx->wg->dev);
+ wg_peer_put(ctx->next_peer);
+ return 0;
+}
+
+static int set_port(struct wg_device *wg, u16 port)
+{
+ struct wg_peer *peer;
+
+ if (wg->incoming_port == port)
+ return 0;
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
+ if (!netif_running(wg->dev)) {
+ wg->incoming_port = port;
+ return 0;
+ }
+ return wg_socket_init(wg, port);
+}
+
+static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
+{
+ int ret = -EINVAL;
+ u16 family;
+ u8 cidr;
+
+ if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] ||
+ !attrs[WGALLOWEDIP_A_CIDR_MASK])
+ return ret;
+ family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
+ cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
+
+ if (family == AF_INET && cidr <= 32 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
+ ret = wg_allowedips_insert_v4(
+ &peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
+ &peer->device->device_update_lock);
+ else if (family == AF_INET6 && cidr <= 128 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
+ ret = wg_allowedips_insert_v6(
+ &peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
+ &peer->device->device_update_lock);
+
+ return ret;
+}
+
+static int set_peer(struct wg_device *wg, struct nlattr **attrs)
+{
+ u8 *public_key = NULL, *preshared_key = NULL;
+ struct wg_peer *peer = NULL;
+ u32 flags = 0;
+ int ret;
+
+ ret = -EINVAL;
+ if (attrs[WGPEER_A_PUBLIC_KEY] &&
+ nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN)
+ public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]);
+ else
+ goto out;
+ if (attrs[WGPEER_A_PRESHARED_KEY] &&
+ nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN)
+ preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]);
+
+ if (attrs[WGPEER_A_FLAGS])
+ flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
+ ret = -EOPNOTSUPP;
+ if (flags & ~__WGPEER_F_ALL)
+ goto out;
+
+ ret = -EPFNOSUPPORT;
+ if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
+ if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1)
+ goto out;
+ }
+
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
+ nla_data(attrs[WGPEER_A_PUBLIC_KEY]));
+ ret = 0;
+ if (!peer) { /* Peer doesn't exist yet. Add a new one. */
+ if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY))
+ goto out;
+
+ /* The peer is new, so there aren't allowed IPs to remove. */
+ flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS;
+
+ down_read(&wg->static_identity.lock);
+ if (wg->static_identity.has_identity &&
+ !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]),
+ wg->static_identity.static_public,
+ NOISE_PUBLIC_KEY_LEN)) {
+ /* We silently ignore peers that have the same public
+ * key as the device. The reason we do it silently is
+ * that we'd like for people to be able to reuse the
+ * same set of API calls across peers.
+ */
+ up_read(&wg->static_identity.lock);
+ ret = 0;
+ goto out;
+ }
+ up_read(&wg->static_identity.lock);
+
+ peer = wg_peer_create(wg, public_key, preshared_key);
+ if (IS_ERR(peer)) {
+ /* Similar to the above, if the key is invalid, we skip
+ * it without fanfare, so that services don't need to
+ * worry about doing key validation themselves.
+ */
+ ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer);
+ peer = NULL;
+ goto out;
+ }
+ /* Take additional reference, as though we've just been
+ * looked up.
+ */
+ wg_peer_get(peer);
+ }
+
+ if (flags & WGPEER_F_REMOVE_ME) {
+ wg_peer_remove(peer);
+ goto out;
+ }
+
+ if (preshared_key) {
+ down_write(&peer->handshake.lock);
+ memcpy(&peer->handshake.preshared_key, preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
+ up_write(&peer->handshake.lock);
+ }
+
+ if (attrs[WGPEER_A_ENDPOINT]) {
+ struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
+ size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+
+ if ((len == sizeof(struct sockaddr_in) &&
+ addr->sa_family == AF_INET) ||
+ (len == sizeof(struct sockaddr_in6) &&
+ addr->sa_family == AF_INET6)) {
+ struct endpoint endpoint = { { { 0 } } };
+
+ memcpy(&endpoint.addr, addr, len);
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ }
+ }
+
+ if (flags & WGPEER_F_REPLACE_ALLOWEDIPS)
+ wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer,
+ &wg->device_update_lock);
+
+ if (attrs[WGPEER_A_ALLOWEDIPS]) {
+ struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1];
+ int rem;
+
+ nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
+ ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX,
+ attr, allowedip_policy, NULL);
+ if (ret < 0)
+ goto out;
+ ret = set_allowedip(peer, allowedip);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) {
+ const u16 persistent_keepalive_interval = nla_get_u16(
+ attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]);
+ const bool send_keepalive =
+ !peer->persistent_keepalive_interval &&
+ persistent_keepalive_interval &&
+ netif_running(wg->dev);
+
+ peer->persistent_keepalive_interval = persistent_keepalive_interval;
+ if (send_keepalive)
+ wg_packet_send_keepalive(peer);
+ }
+
+ if (netif_running(wg->dev))
+ wg_packet_send_staged_packets(peer);
+
+out:
+ wg_peer_put(peer);
+ if (attrs[WGPEER_A_PRESHARED_KEY])
+ memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]),
+ nla_len(attrs[WGPEER_A_PRESHARED_KEY]));
+ return ret;
+}
+
+static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
+{
+ struct wg_device *wg = lookup_interface(info->attrs, skb);
+ u32 flags = 0;
+ int ret;
+
+ if (IS_ERR(wg)) {
+ ret = PTR_ERR(wg);
+ goto out_nodev;
+ }
+
+ rtnl_lock();
+ mutex_lock(&wg->device_update_lock);
+
+ if (info->attrs[WGDEVICE_A_FLAGS])
+ flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
+ ret = -EOPNOTSUPP;
+ if (flags & ~__WGDEVICE_F_ALL)
+ goto out;
+
+ ret = -EPERM;
+ if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
+ info->attrs[WGDEVICE_A_FWMARK]) &&
+ !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
+ goto out;
+
+ ++wg->device_update_gen;
+
+ if (info->attrs[WGDEVICE_A_FWMARK]) {
+ struct wg_peer *peer;
+
+ wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
+ }
+
+ if (info->attrs[WGDEVICE_A_LISTEN_PORT]) {
+ ret = set_port(wg,
+ nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT]));
+ if (ret)
+ goto out;
+ }
+
+ if (flags & WGDEVICE_F_REPLACE_PEERS)
+ wg_peer_remove_all(wg);
+
+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY] &&
+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) ==
+ NOISE_PUBLIC_KEY_LEN) {
+ u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
+ u8 public_key[NOISE_PUBLIC_KEY_LEN];
+ struct wg_peer *peer, *temp;
+
+ if (!crypto_memneq(wg->static_identity.static_private,
+ private_key, NOISE_PUBLIC_KEY_LEN))
+ goto skip_set_private_key;
+
+ /* We remove before setting, to prevent race, which means doing
+ * two 25519-genpub ops.
+ */
+ if (curve25519_generate_public(public_key, private_key)) {
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
+ public_key);
+ if (peer) {
+ wg_peer_put(peer);
+ wg_peer_remove(peer);
+ }
+ }
+
+ down_write(&wg->static_identity.lock);
+ wg_noise_set_static_identity_private_key(&wg->static_identity,
+ private_key);
+ list_for_each_entry_safe(peer, temp, &wg->peer_list,
+ peer_list) {
+ if (wg_noise_precompute_static_static(peer))
+ wg_noise_expire_current_peer_keypairs(peer);
+ else
+ wg_peer_remove(peer);
+ }
+ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
+ up_write(&wg->static_identity.lock);
+ }
+skip_set_private_key:
+
+ if (info->attrs[WGDEVICE_A_PEERS]) {
+ struct nlattr *attr, *peer[WGPEER_A_MAX + 1];
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) {
+ ret = nla_parse_nested(peer, WGPEER_A_MAX, attr,
+ peer_policy, NULL);
+ if (ret < 0)
+ goto out;
+ ret = set_peer(wg, peer);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ ret = 0;
+
+out:
+ mutex_unlock(&wg->device_update_lock);
+ rtnl_unlock();
+ dev_put(wg->dev);
+out_nodev:
+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY])
+ memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]),
+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]));
+ return ret;
+}
+
+static const struct genl_ops genl_ops[] = {
+ {
+ .cmd = WG_CMD_GET_DEVICE,
+ .start = wg_get_device_start,
+ .dumpit = wg_get_device_dump,
+ .done = wg_get_device_done,
+ .flags = GENL_UNS_ADMIN_PERM
+ }, {
+ .cmd = WG_CMD_SET_DEVICE,
+ .doit = wg_set_device,
+ .flags = GENL_UNS_ADMIN_PERM
+ }
+};
+
+static struct genl_family genl_family __ro_after_init = {
+ .ops = genl_ops,
+ .n_ops = ARRAY_SIZE(genl_ops),
+ .name = WG_GENL_NAME,
+ .version = WG_GENL_VERSION,
+ .maxattr = WGDEVICE_A_MAX,
+ .module = THIS_MODULE,
+ .policy = device_policy,
+ .netnsok = true
+};
+
+int __init wg_genetlink_init(void)
+{
+ return genl_register_family(&genl_family);
+}
+
+void __exit wg_genetlink_uninit(void)
+{
+ genl_unregister_family(&genl_family);
+}
diff --git a/drivers/net/wireguard/netlink.h b/drivers/net/wireguard/netlink.h
new file mode 100644
index 000000000000..15100d92e2e3
--- /dev/null
+++ b/drivers/net/wireguard/netlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_NETLINK_H
+#define _WG_NETLINK_H
+
+int wg_genetlink_init(void);
+void wg_genetlink_uninit(void);
+
+#endif /* _WG_NETLINK_H */
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
new file mode 100644
index 000000000000..d71c8db68a8c
--- /dev/null
+++ b/drivers/net/wireguard/noise.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "noise.h"
+#include "device.h"
+#include "peer.h"
+#include "messages.h"
+#include "queueing.h"
+#include "peerlookup.h"
+
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+#include <crypto/algapi.h>
+
+/* This implements Noise_IKpsk2:
+ *
+ * <- s
+ * ******
+ * -> e, es, s, ss, {t}
+ * <- e, ee, se, psk, {}
+ */
+
+static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
+static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com";
+static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
+static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
+static atomic64_t keypair_counter = ATOMIC64_INIT(0);
+
+void __init wg_noise_init(void)
+{
+ struct blake2s_state blake;
+
+ blake2s(handshake_init_chaining_key, handshake_name, NULL,
+ NOISE_HASH_LEN, sizeof(handshake_name), 0);
+ blake2s_init(&blake, NOISE_HASH_LEN);
+ blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN);
+ blake2s_update(&blake, identifier_name, sizeof(identifier_name));
+ blake2s_final(&blake, handshake_init_hash);
+}
+
+/* Must hold peer->handshake.static_identity->lock */
+bool wg_noise_precompute_static_static(struct wg_peer *peer)
+{
+ bool ret = true;
+
+ down_write(&peer->handshake.lock);
+ if (peer->handshake.static_identity->has_identity)
+ ret = curve25519(
+ peer->handshake.precomputed_static_static,
+ peer->handshake.static_identity->static_private,
+ peer->handshake.remote_static);
+ else
+ memset(peer->handshake.precomputed_static_static, 0,
+ NOISE_PUBLIC_KEY_LEN);
+ up_write(&peer->handshake.lock);
+ return ret;
+}
+
+bool wg_noise_handshake_init(struct noise_handshake *handshake,
+ struct noise_static_identity *static_identity,
+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+ struct wg_peer *peer)
+{
+ memset(handshake, 0, sizeof(*handshake));
+ init_rwsem(&handshake->lock);
+ handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE;
+ handshake->entry.peer = peer;
+ memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN);
+ if (peer_preshared_key)
+ memcpy(handshake->preshared_key, peer_preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
+ handshake->static_identity = static_identity;
+ handshake->state = HANDSHAKE_ZEROED;
+ return wg_noise_precompute_static_static(peer);
+}
+
+static void handshake_zero(struct noise_handshake *handshake)
+{
+ memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN);
+ memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN);
+ memset(&handshake->hash, 0, NOISE_HASH_LEN);
+ memset(&handshake->chaining_key, 0, NOISE_HASH_LEN);
+ handshake->remote_index = 0;
+ handshake->state = HANDSHAKE_ZEROED;
+}
+
+void wg_noise_handshake_clear(struct noise_handshake *handshake)
+{
+ wg_index_hashtable_remove(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+ down_write(&handshake->lock);
+ handshake_zero(handshake);
+ up_write(&handshake->lock);
+ wg_index_hashtable_remove(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+}
+
+static struct noise_keypair *keypair_create(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL);
+
+ if (unlikely(!keypair))
+ return NULL;
+ keypair->internal_id = atomic64_inc_return(&keypair_counter);
+ keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
+ keypair->entry.peer = peer;
+ kref_init(&keypair->refcount);
+ return keypair;
+}
+
+static void keypair_free_rcu(struct rcu_head *rcu)
+{
+ kzfree(container_of(rcu, struct noise_keypair, rcu));
+}
+
+static void keypair_free_kref(struct kref *kref)
+{
+ struct noise_keypair *keypair =
+ container_of(kref, struct noise_keypair, refcount);
+
+ net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n",
+ keypair->entry.peer->device->dev->name,
+ keypair->internal_id,
+ keypair->entry.peer->internal_id);
+ wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable,
+ &keypair->entry);
+ call_rcu(&keypair->rcu, keypair_free_rcu);
+}
+
+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now)
+{
+ if (unlikely(!keypair))
+ return;
+ if (unlikely(unreference_now))
+ wg_index_hashtable_remove(
+ keypair->entry.peer->device->index_hashtable,
+ &keypair->entry);
+ kref_put(&keypair->refcount, keypair_free_kref);
+}
+
+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
+ "Taking noise keypair reference without holding the RCU BH read lock");
+ if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount)))
+ return NULL;
+ return keypair;
+}
+
+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs)
+{
+ struct noise_keypair *old;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+
+ /* We zero the next_keypair before zeroing the others, so that
+ * wg_noise_received_with_keypair returns early before subsequent ones
+ * are zeroed.
+ */
+ old = rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ old = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ old = rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->current_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+}
+
+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+
+ spin_lock_bh(&peer->keypairs.keypair_update_lock);
+ keypair = rcu_dereference_protected(peer->keypairs.next_keypair,
+ lockdep_is_held(&peer->keypairs.keypair_update_lock));
+ if (keypair)
+ keypair->sending.is_valid = false;
+ keypair = rcu_dereference_protected(peer->keypairs.current_keypair,
+ lockdep_is_held(&peer->keypairs.keypair_update_lock));
+ if (keypair)
+ keypair->sending.is_valid = false;
+ spin_unlock_bh(&peer->keypairs.keypair_update_lock);
+}
+
+static void add_new_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *new_keypair)
+{
+ struct noise_keypair *previous_keypair, *next_keypair, *current_keypair;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+ previous_keypair = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ next_keypair = rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ current_keypair = rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ if (new_keypair->i_am_the_initiator) {
+ /* If we're the initiator, it means we've sent a handshake, and
+ * received a confirmation response, which means this new
+ * keypair can now be used.
+ */
+ if (next_keypair) {
+ /* If there already was a next keypair pending, we
+ * demote it to be the previous keypair, and free the
+ * existing current. Note that this means KCI can result
+ * in this transition. It would perhaps be more sound to
+ * always just get rid of the unused next keypair
+ * instead of putting it in the previous slot, but this
+ * might be a bit less robust. Something to think about
+ * for the future.
+ */
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+ rcu_assign_pointer(keypairs->previous_keypair,
+ next_keypair);
+ wg_noise_keypair_put(current_keypair, true);
+ } else /* If there wasn't an existing next keypair, we replace
+ * the previous with the current one.
+ */
+ rcu_assign_pointer(keypairs->previous_keypair,
+ current_keypair);
+ /* At this point we can get rid of the old previous keypair, and
+ * set up the new keypair.
+ */
+ wg_noise_keypair_put(previous_keypair, true);
+ rcu_assign_pointer(keypairs->current_keypair, new_keypair);
+ } else {
+ /* If we're the responder, it means we can't use the new keypair
+ * until we receive confirmation via the first data packet, so
+ * we get rid of the existing previous one, the possibly
+ * existing next one, and slide in the new next one.
+ */
+ rcu_assign_pointer(keypairs->next_keypair, new_keypair);
+ wg_noise_keypair_put(next_keypair, true);
+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
+ wg_noise_keypair_put(previous_keypair, true);
+ }
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+}
+
+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *received_keypair)
+{
+ struct noise_keypair *old_keypair;
+ bool key_is_new;
+
+ /* We first check without taking the spinlock. */
+ key_is_new = received_keypair ==
+ rcu_access_pointer(keypairs->next_keypair);
+ if (likely(!key_is_new))
+ return false;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+ /* After locking, we double check that things didn't change from
+ * beneath us.
+ */
+ if (unlikely(received_keypair !=
+ rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock)))) {
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+ return false;
+ }
+
+ /* When we've finally received the confirmation, we slide the next
+ * into the current, the current into the previous, and get rid of
+ * the old previous.
+ */
+ old_keypair = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ rcu_assign_pointer(keypairs->previous_keypair,
+ rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock)));
+ wg_noise_keypair_put(old_keypair, true);
+ rcu_assign_pointer(keypairs->current_keypair, received_keypair);
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+ return true;
+}
+
+/* Must hold static_identity->lock */
+void wg_noise_set_static_identity_private_key(
+ struct noise_static_identity *static_identity,
+ const u8 private_key[NOISE_PUBLIC_KEY_LEN])
+{
+ memcpy(static_identity->static_private, private_key,
+ NOISE_PUBLIC_KEY_LEN);
+ curve25519_clamp_secret(static_identity->static_private);
+ static_identity->has_identity = curve25519_generate_public(
+ static_identity->static_public, private_key);
+}
+
+/* This is Hugo Krawczyk's HKDF:
+ * - https://eprint.iacr.org/2010/264.pdf
+ * - https://tools.ietf.org/html/rfc5869
+ */
+static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
+ size_t first_len, size_t second_len, size_t third_len,
+ size_t data_len, const u8 chaining_key[NOISE_HASH_LEN])
+{
+ u8 output[BLAKE2S_HASH_SIZE + 1];
+ u8 secret[BLAKE2S_HASH_SIZE];
+
+ WARN_ON(IS_ENABLED(DEBUG) &&
+ (first_len > BLAKE2S_HASH_SIZE ||
+ second_len > BLAKE2S_HASH_SIZE ||
+ third_len > BLAKE2S_HASH_SIZE ||
+ ((second_len || second_dst || third_len || third_dst) &&
+ (!first_len || !first_dst)) ||
+ ((third_len || third_dst) && (!second_len || !second_dst))));
+
+ /* Extract entropy from data into secret */
+ blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
+
+ if (!first_dst || !first_len)
+ goto out;
+
+ /* Expand first key: key = secret, data = 0x1 */
+ output[0] = 1;
+ blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
+ memcpy(first_dst, output, first_len);
+
+ if (!second_dst || !second_len)
+ goto out;
+
+ /* Expand second key: key = secret, data = first-key || 0x2 */
+ output[BLAKE2S_HASH_SIZE] = 2;
+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
+ BLAKE2S_HASH_SIZE);
+ memcpy(second_dst, output, second_len);
+
+ if (!third_dst || !third_len)
+ goto out;
+
+ /* Expand third key: key = secret, data = second-key || 0x3 */
+ output[BLAKE2S_HASH_SIZE] = 3;
+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
+ BLAKE2S_HASH_SIZE);
+ memcpy(third_dst, output, third_len);
+
+out:
+ /* Clear sensitive data from stack */
+ memzero_explicit(secret, BLAKE2S_HASH_SIZE);
+ memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
+}
+
+static void symmetric_key_init(struct noise_symmetric_key *key)
+{
+ spin_lock_init(&key->counter.receive.lock);
+ atomic64_set(&key->counter.counter, 0);
+ memset(key->counter.receive.backtrack, 0,
+ sizeof(key->counter.receive.backtrack));
+ key->birthdate = ktime_get_coarse_boottime_ns();
+ key->is_valid = true;
+}
+
+static void derive_keys(struct noise_symmetric_key *first_dst,
+ struct noise_symmetric_key *second_dst,
+ const u8 chaining_key[NOISE_HASH_LEN])
+{
+ kdf(first_dst->key, second_dst->key, NULL, NULL,
+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
+ chaining_key);
+ symmetric_key_init(first_dst);
+ symmetric_key_init(second_dst);
+}
+
+static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
+ u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 private[NOISE_PUBLIC_KEY_LEN],
+ const u8 public[NOISE_PUBLIC_KEY_LEN])
+{
+ u8 dh_calculation[NOISE_PUBLIC_KEY_LEN];
+
+ if (unlikely(!curve25519(dh_calculation, private, public)))
+ return false;
+ kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key);
+ memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN);
+ return true;
+}
+
+static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
+{
+ struct blake2s_state blake;
+
+ blake2s_init(&blake, NOISE_HASH_LEN);
+ blake2s_update(&blake, hash, NOISE_HASH_LEN);
+ blake2s_update(&blake, src, src_len);
+ blake2s_final(&blake, hash);
+}
+
+static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN],
+ u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 psk[NOISE_SYMMETRIC_KEY_LEN])
+{
+ u8 temp_hash[NOISE_HASH_LEN];
+
+ kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key);
+ mix_hash(hash, temp_hash, NOISE_HASH_LEN);
+ memzero_explicit(temp_hash, NOISE_HASH_LEN);
+}
+
+static void handshake_init(u8 chaining_key[NOISE_HASH_LEN],
+ u8 hash[NOISE_HASH_LEN],
+ const u8 remote_static[NOISE_PUBLIC_KEY_LEN])
+{
+ memcpy(hash, handshake_init_hash, NOISE_HASH_LEN);
+ memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN);
+ mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN);
+}
+
+static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext,
+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash,
+ NOISE_HASH_LEN,
+ 0 /* Always zero for Noise_IK */, key);
+ mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len));
+}
+
+static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext,
+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len,
+ hash, NOISE_HASH_LEN,
+ 0 /* Always zero for Noise_IK */, key))
+ return false;
+ mix_hash(hash, src_ciphertext, src_len);
+ return true;
+}
+
+static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN],
+ const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN],
+ u8 chaining_key[NOISE_HASH_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ if (ephemeral_dst != ephemeral_src)
+ memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
+ mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
+ kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0,
+ NOISE_PUBLIC_KEY_LEN, chaining_key);
+}
+
+static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN])
+{
+ struct timespec64 now;
+
+ ktime_get_real_ts64(&now);
+
+ /* In order to prevent some sort of infoleak from precise timers, we
+ * round down the nanoseconds part to the closest rounded-down power of
+ * two to the maximum initiations per second allowed anyway by the
+ * implementation.
+ */
+ now.tv_nsec = ALIGN_DOWN(now.tv_nsec,
+ rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND));
+
+ /* https://cr.yp.to/libtai/tai64.html */
+ *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec);
+ *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec);
+}
+
+bool
+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
+ struct noise_handshake *handshake)
+{
+ u8 timestamp[NOISE_TIMESTAMP_LEN];
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ bool ret = false;
+
+ /* We need to wait for crng _before_ taking any locks, since
+ * curve25519_generate_secret uses get_random_bytes_wait.
+ */
+ wait_for_random_bytes();
+
+ down_read(&handshake->static_identity->lock);
+ down_write(&handshake->lock);
+
+ if (unlikely(!handshake->static_identity->has_identity))
+ goto out;
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION);
+
+ handshake_init(handshake->chaining_key, handshake->hash,
+ handshake->remote_static);
+
+ /* e */
+ curve25519_generate_secret(handshake->ephemeral_private);
+ if (!curve25519_generate_public(dst->unencrypted_ephemeral,
+ handshake->ephemeral_private))
+ goto out;
+ message_ephemeral(dst->unencrypted_ephemeral,
+ dst->unencrypted_ephemeral, handshake->chaining_key,
+ handshake->hash);
+
+ /* es */
+ if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private,
+ handshake->remote_static))
+ goto out;
+
+ /* s */
+ message_encrypt(dst->encrypted_static,
+ handshake->static_identity->static_public,
+ NOISE_PUBLIC_KEY_LEN, key, handshake->hash);
+
+ /* ss */
+ kdf(handshake->chaining_key, key, NULL,
+ handshake->precomputed_static_static, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
+ handshake->chaining_key);
+
+ /* {t} */
+ tai64n_now(timestamp);
+ message_encrypt(dst->encrypted_timestamp, timestamp,
+ NOISE_TIMESTAMP_LEN, key, handshake->hash);
+
+ dst->sender_index = wg_index_hashtable_insert(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+
+ handshake->state = HANDSHAKE_CREATED_INITIATION;
+ ret = true;
+
+out:
+ up_write(&handshake->lock);
+ up_read(&handshake->static_identity->lock);
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ return ret;
+}
+
+struct wg_peer *
+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
+ struct wg_device *wg)
+{
+ struct wg_peer *peer = NULL, *ret_peer = NULL;
+ struct noise_handshake *handshake;
+ bool replay_attack, flood_attack;
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+ u8 hash[NOISE_HASH_LEN];
+ u8 s[NOISE_PUBLIC_KEY_LEN];
+ u8 e[NOISE_PUBLIC_KEY_LEN];
+ u8 t[NOISE_TIMESTAMP_LEN];
+ u64 initiation_consumption;
+
+ down_read(&wg->static_identity.lock);
+ if (unlikely(!wg->static_identity.has_identity))
+ goto out;
+
+ handshake_init(chaining_key, hash, wg->static_identity.static_public);
+
+ /* e */
+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
+
+ /* es */
+ if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e))
+ goto out;
+
+ /* s */
+ if (!message_decrypt(s, src->encrypted_static,
+ sizeof(src->encrypted_static), key, hash))
+ goto out;
+
+ /* Lookup which peer we're actually talking to */
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s);
+ if (!peer)
+ goto out;
+ handshake = &peer->handshake;
+
+ /* ss */
+ kdf(chaining_key, key, NULL, handshake->precomputed_static_static,
+ NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
+ chaining_key);
+
+ /* {t} */
+ if (!message_decrypt(t, src->encrypted_timestamp,
+ sizeof(src->encrypted_timestamp), key, hash))
+ goto out;
+
+ down_read(&handshake->lock);
+ replay_attack = memcmp(t, handshake->latest_timestamp,
+ NOISE_TIMESTAMP_LEN) <= 0;
+ flood_attack = (s64)handshake->last_initiation_consumption +
+ NSEC_PER_SEC / INITIATIONS_PER_SECOND >
+ (s64)ktime_get_coarse_boottime_ns();
+ up_read(&handshake->lock);
+ if (replay_attack || flood_attack)
+ goto out;
+
+ /* Success! Copy everything to peer */
+ down_write(&handshake->lock);
+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
+ if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0)
+ memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN);
+ memcpy(handshake->hash, hash, NOISE_HASH_LEN);
+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
+ handshake->remote_index = src->sender_index;
+ if ((s64)(handshake->last_initiation_consumption -
+ (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0)
+ handshake->last_initiation_consumption = initiation_consumption;
+ handshake->state = HANDSHAKE_CONSUMED_INITIATION;
+ up_write(&handshake->lock);
+ ret_peer = peer;
+
+out:
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ memzero_explicit(hash, NOISE_HASH_LEN);
+ memzero_explicit(chaining_key, NOISE_HASH_LEN);
+ up_read(&wg->static_identity.lock);
+ if (!ret_peer)
+ wg_peer_put(peer);
+ return ret_peer;
+}
+
+bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
+ struct noise_handshake *handshake)
+{
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ bool ret = false;
+
+ /* We need to wait for crng _before_ taking any locks, since
+ * curve25519_generate_secret uses get_random_bytes_wait.
+ */
+ wait_for_random_bytes();
+
+ down_read(&handshake->static_identity->lock);
+ down_write(&handshake->lock);
+
+ if (handshake->state != HANDSHAKE_CONSUMED_INITIATION)
+ goto out;
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE);
+ dst->receiver_index = handshake->remote_index;
+
+ /* e */
+ curve25519_generate_secret(handshake->ephemeral_private);
+ if (!curve25519_generate_public(dst->unencrypted_ephemeral,
+ handshake->ephemeral_private))
+ goto out;
+ message_ephemeral(dst->unencrypted_ephemeral,
+ dst->unencrypted_ephemeral, handshake->chaining_key,
+ handshake->hash);
+
+ /* ee */
+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
+ handshake->remote_ephemeral))
+ goto out;
+
+ /* se */
+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
+ handshake->remote_static))
+ goto out;
+
+ /* psk */
+ mix_psk(handshake->chaining_key, handshake->hash, key,
+ handshake->preshared_key);
+
+ /* {} */
+ message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash);
+
+ dst->sender_index = wg_index_hashtable_insert(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+
+ handshake->state = HANDSHAKE_CREATED_RESPONSE;
+ ret = true;
+
+out:
+ up_write(&handshake->lock);
+ up_read(&handshake->static_identity->lock);
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ return ret;
+}
+
+struct wg_peer *
+wg_noise_handshake_consume_response(struct message_handshake_response *src,
+ struct wg_device *wg)
+{
+ enum noise_handshake_state state = HANDSHAKE_ZEROED;
+ struct wg_peer *peer = NULL, *ret_peer = NULL;
+ struct noise_handshake *handshake;
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 hash[NOISE_HASH_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+ u8 e[NOISE_PUBLIC_KEY_LEN];
+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
+ u8 static_private[NOISE_PUBLIC_KEY_LEN];
+
+ down_read(&wg->static_identity.lock);
+
+ if (unlikely(!wg->static_identity.has_identity))
+ goto out;
+
+ handshake = (struct noise_handshake *)wg_index_hashtable_lookup(
+ wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE,
+ src->receiver_index, &peer);
+ if (unlikely(!handshake))
+ goto out;
+
+ down_read(&handshake->lock);
+ state = handshake->state;
+ memcpy(hash, handshake->hash, NOISE_HASH_LEN);
+ memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
+ memcpy(ephemeral_private, handshake->ephemeral_private,
+ NOISE_PUBLIC_KEY_LEN);
+ up_read(&handshake->lock);
+
+ if (state != HANDSHAKE_CREATED_INITIATION)
+ goto fail;
+
+ /* e */
+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
+
+ /* ee */
+ if (!mix_dh(chaining_key, NULL, ephemeral_private, e))
+ goto fail;
+
+ /* se */
+ if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e))
+ goto fail;
+
+ /* psk */
+ mix_psk(chaining_key, hash, key, handshake->preshared_key);
+
+ /* {} */
+ if (!message_decrypt(NULL, src->encrypted_nothing,
+ sizeof(src->encrypted_nothing), key, hash))
+ goto fail;
+
+ /* Success! Copy everything to peer */
+ down_write(&handshake->lock);
+ /* It's important to check that the state is still the same, while we
+ * have an exclusive lock.
+ */
+ if (handshake->state != state) {
+ up_write(&handshake->lock);
+ goto fail;
+ }
+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
+ memcpy(handshake->hash, hash, NOISE_HASH_LEN);
+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
+ handshake->remote_index = src->sender_index;
+ handshake->state = HANDSHAKE_CONSUMED_RESPONSE;
+ up_write(&handshake->lock);
+ ret_peer = peer;
+ goto out;
+
+fail:
+ wg_peer_put(peer);
+out:
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ memzero_explicit(hash, NOISE_HASH_LEN);
+ memzero_explicit(chaining_key, NOISE_HASH_LEN);
+ memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
+ memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
+ up_read(&wg->static_identity.lock);
+ return ret_peer;
+}
+
+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
+ struct noise_keypairs *keypairs)
+{
+ struct noise_keypair *new_keypair;
+ bool ret = false;
+
+ down_write(&handshake->lock);
+ if (handshake->state != HANDSHAKE_CREATED_RESPONSE &&
+ handshake->state != HANDSHAKE_CONSUMED_RESPONSE)
+ goto out;
+
+ new_keypair = keypair_create(handshake->entry.peer);
+ if (!new_keypair)
+ goto out;
+ new_keypair->i_am_the_initiator = handshake->state ==
+ HANDSHAKE_CONSUMED_RESPONSE;
+ new_keypair->remote_index = handshake->remote_index;
+
+ if (new_keypair->i_am_the_initiator)
+ derive_keys(&new_keypair->sending, &new_keypair->receiving,
+ handshake->chaining_key);
+ else
+ derive_keys(&new_keypair->receiving, &new_keypair->sending,
+ handshake->chaining_key);
+
+ handshake_zero(handshake);
+ rcu_read_lock_bh();
+ if (likely(!READ_ONCE(container_of(handshake, struct wg_peer,
+ handshake)->is_dead))) {
+ add_new_keypair(keypairs, new_keypair);
+ net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n",
+ handshake->entry.peer->device->dev->name,
+ new_keypair->internal_id,
+ handshake->entry.peer->internal_id);
+ ret = wg_index_hashtable_replace(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry, &new_keypair->entry);
+ } else {
+ kzfree(new_keypair);
+ }
+ rcu_read_unlock_bh();
+
+out:
+ up_write(&handshake->lock);
+ return ret;
+}
diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h
new file mode 100644
index 000000000000..138a07bb817c
--- /dev/null
+++ b/drivers/net/wireguard/noise.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+#ifndef _WG_NOISE_H
+#define _WG_NOISE_H
+
+#include "messages.h"
+#include "peerlookup.h"
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+
+union noise_counter {
+ struct {
+ u64 counter;
+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
+ spinlock_t lock;
+ } receive;
+ atomic64_t counter;
+};
+
+struct noise_symmetric_key {
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ union noise_counter counter;
+ u64 birthdate;
+ bool is_valid;
+};
+
+struct noise_keypair {
+ struct index_hashtable_entry entry;
+ struct noise_symmetric_key sending;
+ struct noise_symmetric_key receiving;
+ __le32 remote_index;
+ bool i_am_the_initiator;
+ struct kref refcount;
+ struct rcu_head rcu;
+ u64 internal_id;
+};
+
+struct noise_keypairs {
+ struct noise_keypair __rcu *current_keypair;
+ struct noise_keypair __rcu *previous_keypair;
+ struct noise_keypair __rcu *next_keypair;
+ spinlock_t keypair_update_lock;
+};
+
+struct noise_static_identity {
+ u8 static_public[NOISE_PUBLIC_KEY_LEN];
+ u8 static_private[NOISE_PUBLIC_KEY_LEN];
+ struct rw_semaphore lock;
+ bool has_identity;
+};
+
+enum noise_handshake_state {
+ HANDSHAKE_ZEROED,
+ HANDSHAKE_CREATED_INITIATION,
+ HANDSHAKE_CONSUMED_INITIATION,
+ HANDSHAKE_CREATED_RESPONSE,
+ HANDSHAKE_CONSUMED_RESPONSE
+};
+
+struct noise_handshake {
+ struct index_hashtable_entry entry;
+
+ enum noise_handshake_state state;
+ u64 last_initiation_consumption;
+
+ struct noise_static_identity *static_identity;
+
+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
+ u8 remote_static[NOISE_PUBLIC_KEY_LEN];
+ u8 remote_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 precomputed_static_static[NOISE_PUBLIC_KEY_LEN];
+
+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
+
+ u8 hash[NOISE_HASH_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+
+ u8 latest_timestamp[NOISE_TIMESTAMP_LEN];
+ __le32 remote_index;
+
+ /* Protects all members except the immutable (after noise_handshake_
+ * init): remote_static, precomputed_static_static, static_identity.
+ */
+ struct rw_semaphore lock;
+};
+
+struct wg_device;
+
+void wg_noise_init(void);
+bool wg_noise_handshake_init(struct noise_handshake *handshake,
+ struct noise_static_identity *static_identity,
+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+ struct wg_peer *peer);
+void wg_noise_handshake_clear(struct noise_handshake *handshake);
+static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns)
+{
+ atomic64_set(handshake_ns, ktime_get_coarse_boottime_ns() -
+ (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
+}
+
+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now);
+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair);
+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs);
+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *received_keypair);
+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer);
+
+void wg_noise_set_static_identity_private_key(
+ struct noise_static_identity *static_identity,
+ const u8 private_key[NOISE_PUBLIC_KEY_LEN]);
+bool wg_noise_precompute_static_static(struct wg_peer *peer);
+
+bool
+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
+ struct noise_handshake *handshake);
+struct wg_peer *
+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
+ struct wg_device *wg);
+
+bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
+ struct noise_handshake *handshake);
+struct wg_peer *
+wg_noise_handshake_consume_response(struct message_handshake_response *src,
+ struct wg_device *wg);
+
+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
+ struct noise_keypairs *keypairs);
+
+#endif /* _WG_NOISE_H */
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
new file mode 100644
index 000000000000..071eedf33f5a
--- /dev/null
+++ b/drivers/net/wireguard/peer.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "peer.h"
+#include "device.h"
+#include "queueing.h"
+#include "timers.h"
+#include "peerlookup.h"
+#include "noise.h"
+
+#include <linux/kref.h>
+#include <linux/lockdep.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+
+static atomic64_t peer_counter = ATOMIC64_INIT(0);
+
+struct wg_peer *wg_peer_create(struct wg_device *wg,
+ const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
+{
+ struct wg_peer *peer;
+ int ret = -ENOMEM;
+
+ lockdep_assert_held(&wg->device_update_lock);
+
+ if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
+ return ERR_PTR(ret);
+
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (unlikely(!peer))
+ return ERR_PTR(ret);
+ peer->device = wg;
+
+ if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
+ public_key, preshared_key, peer)) {
+ ret = -EKEYREJECTED;
+ goto err_1;
+ }
+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ goto err_1;
+ if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
+ MAX_QUEUED_PACKETS))
+ goto err_2;
+ if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
+ MAX_QUEUED_PACKETS))
+ goto err_3;
+
+ peer->internal_id = atomic64_inc_return(&peer_counter);
+ peer->serial_work_cpu = nr_cpumask_bits;
+ wg_cookie_init(&peer->latest_cookie);
+ wg_timers_init(peer);
+ wg_cookie_checker_precompute_peer_keys(peer);
+ spin_lock_init(&peer->keypairs.keypair_update_lock);
+ INIT_WORK(&peer->transmit_handshake_work,
+ wg_packet_handshake_send_worker);
+ rwlock_init(&peer->endpoint_lock);
+ kref_init(&peer->refcount);
+ skb_queue_head_init(&peer->staged_packet_queue);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&peer->napi);
+ list_add_tail(&peer->peer_list, &wg->peer_list);
+ INIT_LIST_HEAD(&peer->allowedips_list);
+ wg_pubkey_hashtable_add(wg->peer_hashtable, peer);
+ ++wg->num_peers;
+ pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
+ return peer;
+
+err_3:
+ wg_packet_queue_free(&peer->tx_queue, false);
+err_2:
+ dst_cache_destroy(&peer->endpoint_cache);
+err_1:
+ kfree(peer);
+ return ERR_PTR(ret);
+}
+
+struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
+ "Taking peer reference without holding the RCU read lock");
+ if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount)))
+ return NULL;
+ return peer;
+}
+
+static void peer_make_dead(struct wg_peer *peer)
+{
+ /* Remove from configuration-time lookup structures. */
+ list_del_init(&peer->peer_list);
+ wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
+ &peer->device->device_update_lock);
+ wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer);
+
+ /* Mark as dead, so that we don't allow jumping contexts after. */
+ WRITE_ONCE(peer->is_dead, true);
+
+ /* The caller must now synchronize_rcu() for this to take effect. */
+}
+
+static void peer_remove_after_dead(struct wg_peer *peer)
+{
+ WARN_ON(!peer->is_dead);
+
+ /* No more keypairs can be created for this peer, since is_dead protects
+ * add_new_keypair, so we can now destroy existing ones.
+ */
+ wg_noise_keypairs_clear(&peer->keypairs);
+
+ /* Destroy all ongoing timers that were in-flight at the beginning of
+ * this function.
+ */
+ wg_timers_stop(peer);
+
+ /* The transition between packet encryption/decryption queues isn't
+ * guarded by is_dead, but each reference's life is strictly bounded by
+ * two generations: once for parallel crypto and once for serial
+ * ingestion, so we can simply flush twice, and be sure that we no
+ * longer have references inside these queues.
+ */
+
+ /* a) For encrypt/decrypt. */
+ flush_workqueue(peer->device->packet_crypt_wq);
+ /* b.1) For send (but not receive, since that's napi). */
+ flush_workqueue(peer->device->packet_crypt_wq);
+ /* b.2.1) For receive (but not send, since that's wq). */
+ napi_disable(&peer->napi);
+ /* b.2.1) It's now safe to remove the napi struct, which must be done
+ * here from process context.
+ */
+ netif_napi_del(&peer->napi);
+
+ /* Ensure any workstructs we own (like transmit_handshake_work or
+ * clear_peer_work) no longer are in use.
+ */
+ flush_workqueue(peer->device->handshake_send_wq);
+
+ /* After the above flushes, a peer might still be active in a few
+ * different contexts: 1) from xmit(), before hitting is_dead and
+ * returning, 2) from wg_packet_consume_data(), before hitting is_dead
+ * and returning, 3) from wg_receive_handshake_packet() after a point
+ * where it has processed an incoming handshake packet, but where
+ * all calls to pass it off to timers fails because of is_dead. We won't
+ * have new references in (1) eventually, because we're removed from
+ * allowedips; we won't have new references in (2) eventually, because
+ * wg_index_hashtable_lookup will always return NULL, since we removed
+ * all existing keypairs and no more can be created; we won't have new
+ * references in (3) eventually, because we're removed from the pubkey
+ * hash table, which allows for a maximum of one handshake response,
+ * via the still-uncleared index hashtable entry, but not more than one,
+ * and in wg_cookie_message_consume, the lookup eventually gets a peer
+ * with a refcount of zero, so no new reference is taken.
+ */
+
+ --peer->device->num_peers;
+ wg_peer_put(peer);
+}
+
+/* We have a separate "remove" function make sure that all active places where
+ * a peer is currently operating will eventually come to an end and not pass
+ * their reference onto another context.
+ */
+void wg_peer_remove(struct wg_peer *peer)
+{
+ if (unlikely(!peer))
+ return;
+ lockdep_assert_held(&peer->device->device_update_lock);
+
+ peer_make_dead(peer);
+ synchronize_rcu();
+ peer_remove_after_dead(peer);
+}
+
+void wg_peer_remove_all(struct wg_device *wg)
+{
+ struct wg_peer *peer, *temp;
+ LIST_HEAD(dead_peers);
+
+ lockdep_assert_held(&wg->device_update_lock);
+
+ /* Avoid having to traverse individually for each one. */
+ wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
+
+ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
+ peer_make_dead(peer);
+ list_add_tail(&peer->peer_list, &dead_peers);
+ }
+ synchronize_rcu();
+ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
+ peer_remove_after_dead(peer);
+}
+
+static void rcu_release(struct rcu_head *rcu)
+{
+ struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
+
+ dst_cache_destroy(&peer->endpoint_cache);
+ wg_packet_queue_free(&peer->rx_queue, false);
+ wg_packet_queue_free(&peer->tx_queue, false);
+
+ /* The final zeroing takes care of clearing any remaining handshake key
+ * material and other potentially sensitive information.
+ */
+ kzfree(peer);
+}
+
+static void kref_release(struct kref *refcount)
+{
+ struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
+
+ pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ /* Remove ourself from dynamic runtime lookup structures, now that the
+ * last reference is gone.
+ */
+ wg_index_hashtable_remove(peer->device->index_hashtable,
+ &peer->handshake.entry);
+
+ /* Remove any lingering packets that didn't have a chance to be
+ * transmitted.
+ */
+ wg_packet_purge_staged_packets(peer);
+
+ /* Free the memory used. */
+ call_rcu(&peer->rcu, rcu_release);
+}
+
+void wg_peer_put(struct wg_peer *peer)
+{
+ if (unlikely(!peer))
+ return;
+ kref_put(&peer->refcount, kref_release);
+}
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
new file mode 100644
index 000000000000..23af40922997
--- /dev/null
+++ b/drivers/net/wireguard/peer.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_PEER_H
+#define _WG_PEER_H
+
+#include "device.h"
+#include "noise.h"
+#include "cookie.h"
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <net/dst_cache.h>
+
+struct wg_device;
+
+struct endpoint {
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+ };
+ union {
+ struct {
+ struct in_addr src4;
+ /* Essentially the same as addr6->scope_id */
+ int src_if4;
+ };
+ struct in6_addr src6;
+ };
+};
+
+struct wg_peer {
+ struct wg_device *device;
+ struct crypt_queue tx_queue, rx_queue;
+ struct sk_buff_head staged_packet_queue;
+ int serial_work_cpu;
+ struct noise_keypairs keypairs;
+ struct endpoint endpoint;
+ struct dst_cache endpoint_cache;
+ rwlock_t endpoint_lock;
+ struct noise_handshake handshake;
+ atomic64_t last_sent_handshake;
+ struct work_struct transmit_handshake_work, clear_peer_work;
+ struct cookie latest_cookie;
+ struct hlist_node pubkey_hash;
+ u64 rx_bytes, tx_bytes;
+ struct timer_list timer_retransmit_handshake, timer_send_keepalive;
+ struct timer_list timer_new_handshake, timer_zero_key_material;
+ struct timer_list timer_persistent_keepalive;
+ unsigned int timer_handshake_attempts;
+ u16 persistent_keepalive_interval;
+ bool timer_need_another_keepalive;
+ bool sent_lastminute_handshake;
+ struct timespec64 walltime_last_handshake;
+ struct kref refcount;
+ struct rcu_head rcu;
+ struct list_head peer_list;
+ struct list_head allowedips_list;
+ u64 internal_id;
+ struct napi_struct napi;
+ bool is_dead;
+};
+
+struct wg_peer *wg_peer_create(struct wg_device *wg,
+ const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]);
+
+struct wg_peer *__must_check wg_peer_get_maybe_zero(struct wg_peer *peer);
+static inline struct wg_peer *wg_peer_get(struct wg_peer *peer)
+{
+ kref_get(&peer->refcount);
+ return peer;
+}
+void wg_peer_put(struct wg_peer *peer);
+void wg_peer_remove(struct wg_peer *peer);
+void wg_peer_remove_all(struct wg_device *wg);
+
+#endif /* _WG_PEER_H */
diff --git a/drivers/net/wireguard/peerlookup.c b/drivers/net/wireguard/peerlookup.c
new file mode 100644
index 000000000000..e4deb331476b
--- /dev/null
+++ b/drivers/net/wireguard/peerlookup.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "peerlookup.h"
+#include "peer.h"
+#include "noise.h"
+
+static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+{
+ /* siphash gives us a secure 64bit number based on a random key. Since
+ * the bits are uniformly distributed, we can then mask off to get the
+ * bits we need.
+ */
+ const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
+
+ return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
+{
+ struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
+ get_random_bytes(&table->key, sizeof(table->key));
+ hash_init(table->hashtable);
+ mutex_init(&table->lock);
+ return table;
+}
+
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wg_peer *peer)
+{
+ mutex_lock(&table->lock);
+ hlist_add_head_rcu(&peer->pubkey_hash,
+ pubkey_bucket(table, peer->handshake.remote_static));
+ mutex_unlock(&table->lock);
+}
+
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wg_peer *peer)
+{
+ mutex_lock(&table->lock);
+ hlist_del_init_rcu(&peer->pubkey_hash);
+ mutex_unlock(&table->lock);
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+{
+ struct wg_peer *iter_peer, *peer = NULL;
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
+ pubkey_hash) {
+ if (!memcmp(pubkey, iter_peer->handshake.remote_static,
+ NOISE_PUBLIC_KEY_LEN)) {
+ peer = iter_peer;
+ break;
+ }
+ }
+ peer = wg_peer_get_maybe_zero(peer);
+ rcu_read_unlock_bh();
+ return peer;
+}
+
+static struct hlist_head *index_bucket(struct index_hashtable *table,
+ const __le32 index)
+{
+ /* Since the indices are random and thus all bits are uniformly
+ * distributed, we can find its bucket simply by masking.
+ */
+ return &table->hashtable[(__force u32)index &
+ (HASH_SIZE(table->hashtable) - 1)];
+}
+
+struct index_hashtable *wg_index_hashtable_alloc(void)
+{
+ struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
+ hash_init(table->hashtable);
+ spin_lock_init(&table->lock);
+ return table;
+}
+
+/* At the moment, we limit ourselves to 2^20 total peers, which generally might
+ * amount to 2^20*3 items in this hashtable. The algorithm below works by
+ * picking a random number and testing it. We can see that these limits mean we
+ * usually succeed pretty quickly:
+ *
+ * >>> def calculation(tries, size):
+ * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32))
+ * ...
+ * >>> calculation(1, 2**20 * 3)
+ * 0.999267578125
+ * >>> calculation(2, 2**20 * 3)
+ * 0.0007318854331970215
+ * >>> calculation(3, 2**20 * 3)
+ * 5.360489012673497e-07
+ * >>> calculation(4, 2**20 * 3)
+ * 3.9261394135792216e-10
+ *
+ * At the moment, we don't do any masking, so this algorithm isn't exactly
+ * constant time in either the random guessing or in the hash list lookup. We
+ * could require a minimum of 3 tries, which would successfully mask the
+ * guessing. this would not, however, help with the growing hash lengths, which
+ * is another thing to consider moving forward.
+ */
+
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
+{
+ struct index_hashtable_entry *existing_entry;
+
+ spin_lock_bh(&table->lock);
+ hlist_del_init_rcu(&entry->index_hash);
+ spin_unlock_bh(&table->lock);
+
+ rcu_read_lock_bh();
+
+search_unused_slot:
+ /* First we try to find an unused slot, randomly, while unlocked. */
+ entry->index = (__force __le32)get_random_u32();
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
+ if (existing_entry->index == entry->index)
+ /* If it's already in use, we continue searching. */
+ goto search_unused_slot;
+ }
+
+ /* Once we've found an unused slot, we lock it, and then double-check
+ * that nobody else stole it from us.
+ */
+ spin_lock_bh(&table->lock);
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
+ if (existing_entry->index == entry->index) {
+ spin_unlock_bh(&table->lock);
+ /* If it was stolen, we start over. */
+ goto search_unused_slot;
+ }
+ }
+ /* Otherwise, we know we have it exclusively (since we're locked),
+ * so we insert.
+ */
+ hlist_add_head_rcu(&entry->index_hash,
+ index_bucket(table, entry->index));
+ spin_unlock_bh(&table->lock);
+
+ rcu_read_unlock_bh();
+
+ return entry->index;
+}
+
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new)
+{
+ if (unlikely(hlist_unhashed(&old->index_hash)))
+ return false;
+ spin_lock_bh(&table->lock);
+ new->index = old->index;
+ hlist_replace_rcu(&old->index_hash, &new->index_hash);
+
+ /* Calling init here NULLs out index_hash, and in fact after this
+ * function returns, it's theoretically possible for this to get
+ * reinserted elsewhere. That means the RCU lookup below might either
+ * terminate early or jump between buckets, in which case the packet
+ * simply gets dropped, which isn't terrible.
+ */
+ INIT_HLIST_NODE(&old->index_hash);
+ spin_unlock_bh(&table->lock);
+ return true;
+}
+
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
+{
+ spin_lock_bh(&table->lock);
+ hlist_del_init_rcu(&entry->index_hash);
+ spin_unlock_bh(&table->lock);
+}
+
+/* Returns a strong reference to a entry->peer */
+struct index_hashtable_entry *
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wg_peer **peer)
+{
+ struct index_hashtable_entry *iter_entry, *entry = NULL;
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
+ index_hash) {
+ if (iter_entry->index == index) {
+ if (likely(iter_entry->type & type_mask))
+ entry = iter_entry;
+ break;
+ }
+ }
+ if (likely(entry)) {
+ entry->peer = wg_peer_get_maybe_zero(entry->peer);
+ if (likely(entry->peer))
+ *peer = entry->peer;
+ else
+ entry = NULL;
+ }
+ rcu_read_unlock_bh();
+ return entry;
+}
diff --git a/drivers/net/wireguard/peerlookup.h b/drivers/net/wireguard/peerlookup.h
new file mode 100644
index 000000000000..ced811797680
--- /dev/null
+++ b/drivers/net/wireguard/peerlookup.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_PEERLOOKUP_H
+#define _WG_PEERLOOKUP_H
+
+#include "messages.h"
+
+#include <linux/hashtable.h>
+#include <linux/mutex.h>
+#include <linux/siphash.h>
+
+struct wg_peer;
+
+struct pubkey_hashtable {
+ /* TODO: move to rhashtable */
+ DECLARE_HASHTABLE(hashtable, 11);
+ siphash_key_t key;
+ struct mutex lock;
+};
+
+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void);
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wg_peer *peer);
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wg_peer *peer);
+struct wg_peer *
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]);
+
+struct index_hashtable {
+ /* TODO: move to rhashtable */
+ DECLARE_HASHTABLE(hashtable, 13);
+ spinlock_t lock;
+};
+
+enum index_hashtable_type {
+ INDEX_HASHTABLE_HANDSHAKE = 1U << 0,
+ INDEX_HASHTABLE_KEYPAIR = 1U << 1
+};
+
+struct index_hashtable_entry {
+ struct wg_peer *peer;
+ struct hlist_node index_hash;
+ enum index_hashtable_type type;
+ __le32 index;
+};
+
+struct index_hashtable *wg_index_hashtable_alloc(void);
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry);
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new);
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry);
+struct index_hashtable_entry *
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wg_peer **peer);
+
+#endif /* _WG_PEERLOOKUP_H */
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
new file mode 100644
index 000000000000..5c964fcb994e
--- /dev/null
+++ b/drivers/net/wireguard/queueing.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+
+struct multicore_worker __percpu *
+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
+{
+ int cpu;
+ struct multicore_worker __percpu *worker =
+ alloc_percpu(struct multicore_worker);
+
+ if (!worker)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu_ptr(worker, cpu)->ptr = ptr;
+ INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
+ }
+ return worker;
+}
+
+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ bool multicore, unsigned int len)
+{
+ int ret;
+
+ memset(queue, 0, sizeof(*queue));
+ ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
+ if (ret)
+ return ret;
+ if (function) {
+ if (multicore) {
+ queue->worker = wg_packet_percpu_multicore_worker_alloc(
+ function, queue);
+ if (!queue->worker)
+ return -ENOMEM;
+ } else {
+ INIT_WORK(&queue->work, function);
+ }
+ }
+ return 0;
+}
+
+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
+{
+ if (multicore)
+ free_percpu(queue->worker);
+ WARN_ON(!__ptr_ring_empty(&queue->ring));
+ ptr_ring_cleanup(&queue->ring, NULL);
+}
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
new file mode 100644
index 000000000000..fecb559cbdb6
--- /dev/null
+++ b/drivers/net/wireguard/queueing.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_QUEUEING_H
+#define _WG_QUEUEING_H
+
+#include "peer.h"
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+struct wg_device;
+struct wg_peer;
+struct multicore_worker;
+struct crypt_queue;
+struct sk_buff;
+
+/* queueing.c APIs: */
+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ bool multicore, unsigned int len);
+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
+struct multicore_worker __percpu *
+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
+
+/* receive.c APIs: */
+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
+void wg_packet_handshake_receive_worker(struct work_struct *work);
+/* NAPI poll function: */
+int wg_packet_rx_poll(struct napi_struct *napi, int budget);
+/* Workqueue worker: */
+void wg_packet_decrypt_worker(struct work_struct *work);
+
+/* send.c APIs: */
+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
+ bool is_retry);
+void wg_packet_send_handshake_response(struct wg_peer *peer);
+void wg_packet_send_handshake_cookie(struct wg_device *wg,
+ struct sk_buff *initiating_skb,
+ __le32 sender_index);
+void wg_packet_send_keepalive(struct wg_peer *peer);
+void wg_packet_purge_staged_packets(struct wg_peer *peer);
+void wg_packet_send_staged_packets(struct wg_peer *peer);
+/* Workqueue workers: */
+void wg_packet_handshake_send_worker(struct work_struct *work);
+void wg_packet_tx_worker(struct work_struct *work);
+void wg_packet_encrypt_worker(struct work_struct *work);
+
+enum packet_state {
+ PACKET_STATE_UNCRYPTED,
+ PACKET_STATE_CRYPTED,
+ PACKET_STATE_DEAD
+};
+
+struct packet_cb {
+ u64 nonce;
+ struct noise_keypair *keypair;
+ atomic_t state;
+ u32 mtu;
+ u8 ds;
+};
+
+#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
+#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
+
+/* Returns either the correct skb->protocol value, or 0 if invalid. */
+static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
+{
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct iphdr)) <=
+ skb_tail_pointer(skb) &&
+ ip_hdr(skb)->version == 4)
+ return htons(ETH_P_IP);
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
+ skb_tail_pointer(skb) &&
+ ipv6_hdr(skb)->version == 6)
+ return htons(ETH_P_IPV6);
+ return 0;
+}
+
+static inline void wg_reset_packet(struct sk_buff *skb)
+{
+ skb_scrub_packet(skb, true);
+ memset(&skb->headers_start, 0,
+ offsetof(struct sk_buff, headers_end) -
+ offsetof(struct sk_buff, headers_start));
+ skb->queue_mapping = 0;
+ skb->nohdr = 0;
+ skb->peeked = 0;
+ skb->mac_len = 0;
+ skb->dev = NULL;
+#ifdef CONFIG_NET_SCHED
+ skb->tc_index = 0;
+ skb_reset_tc(skb);
+#endif
+ skb->hdr_len = skb_headroom(skb);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb_probe_transport_header(skb);
+ skb_reset_inner_headers(skb);
+}
+
+static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
+{
+ unsigned int cpu = *stored_cpu, cpu_index, i;
+
+ if (unlikely(cpu == nr_cpumask_bits ||
+ !cpumask_test_cpu(cpu, cpu_online_mask))) {
+ cpu_index = id % cpumask_weight(cpu_online_mask);
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < cpu_index; ++i)
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ *stored_cpu = cpu;
+ }
+ return cpu;
+}
+
+/* This function is racy, in the sense that next is unlocked, so it could return
+ * the same CPU twice. A race-free version of this would be to instead store an
+ * atomic sequence number, do an increment-and-return, and then iterate through
+ * every possible CPU until we get to that index -- choose_cpu. However that's
+ * a bit slower, and it doesn't seem like this potential race actually
+ * introduces any performance loss, so we live with it.
+ */
+static inline int wg_cpumask_next_online(int *next)
+{
+ int cpu = *next;
+
+ while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
+ cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ return cpu;
+}
+
+static inline int wg_queue_enqueue_per_device_and_peer(
+ struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
+ struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
+{
+ int cpu;
+
+ atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
+ /* We first queue this up for the peer ingestion, but the consumer
+ * will wait for the state to change to CRYPTED or DEAD before.
+ */
+ if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
+ return -ENOSPC;
+ /* Then we queue it up in the device queue, which consumes the
+ * packet as soon as it can.
+ */
+ cpu = wg_cpumask_next_online(next_cpu);
+ if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
+ return -EPIPE;
+ queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
+ return 0;
+}
+
+static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
+ struct sk_buff *skb,
+ enum packet_state state)
+{
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+ */
+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
+ atomic_set_release(&PACKET_CB(skb)->state, state);
+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
+ peer->internal_id),
+ peer->device->packet_crypt_wq, &queue->work);
+ wg_peer_put(peer);
+}
+
+static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
+ enum packet_state state)
+{
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+ */
+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
+ atomic_set_release(&PACKET_CB(skb)->state, state);
+ napi_schedule(&peer->napi);
+ wg_peer_put(peer);
+}
+
+#ifdef DEBUG
+bool wg_packet_counter_selftest(void);
+#endif
+
+#endif /* _WG_QUEUEING_H */
diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c
new file mode 100644
index 000000000000..3fedd1d21f5e
--- /dev/null
+++ b/drivers/net/wireguard/ratelimiter.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "ratelimiter.h"
+#include <linux/siphash.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <net/ip.h>
+
+static struct kmem_cache *entry_cache;
+static hsiphash_key_t key;
+static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock");
+static DEFINE_MUTEX(init_lock);
+static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */
+static atomic_t total_entries = ATOMIC_INIT(0);
+static unsigned int max_entries, table_size;
+static void wg_ratelimiter_gc_entries(struct work_struct *);
+static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries);
+static struct hlist_head *table_v4;
+#if IS_ENABLED(CONFIG_IPV6)
+static struct hlist_head *table_v6;
+#endif
+
+struct ratelimiter_entry {
+ u64 last_time_ns, tokens, ip;
+ void *net;
+ spinlock_t lock;
+ struct hlist_node hash;
+ struct rcu_head rcu;
+};
+
+enum {
+ PACKETS_PER_SECOND = 20,
+ PACKETS_BURSTABLE = 5,
+ PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND,
+ TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE
+};
+
+static void entry_free(struct rcu_head *rcu)
+{
+ kmem_cache_free(entry_cache,
+ container_of(rcu, struct ratelimiter_entry, rcu));
+ atomic_dec(&total_entries);
+}
+
+static void entry_uninit(struct ratelimiter_entry *entry)
+{
+ hlist_del_rcu(&entry->hash);
+ call_rcu(&entry->rcu, entry_free);
+}
+
+/* Calling this function with a NULL work uninits all entries. */
+static void wg_ratelimiter_gc_entries(struct work_struct *work)
+{
+ const u64 now = ktime_get_coarse_boottime_ns();
+ struct ratelimiter_entry *entry;
+ struct hlist_node *temp;
+ unsigned int i;
+
+ for (i = 0; i < table_size; ++i) {
+ spin_lock(&table_lock);
+ hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) {
+ if (unlikely(!work) ||
+ now - entry->last_time_ns > NSEC_PER_SEC)
+ entry_uninit(entry);
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) {
+ if (unlikely(!work) ||
+ now - entry->last_time_ns > NSEC_PER_SEC)
+ entry_uninit(entry);
+ }
+#endif
+ spin_unlock(&table_lock);
+ if (likely(work))
+ cond_resched();
+ }
+ if (likely(work))
+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
+}
+
+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net)
+{
+ /* We only take the bottom half of the net pointer, so that we can hash
+ * 3 words in the end. This way, siphash's len param fits into the final
+ * u32, and we don't incur an extra round.
+ */
+ const u32 net_word = (unsigned long)net;
+ struct ratelimiter_entry *entry;
+ struct hlist_head *bucket;
+ u64 ip;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip = (u64 __force)ip_hdr(skb)->saddr;
+ bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) &
+ (table_size - 1)];
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* Only use 64 bits, so as to ratelimit the whole /64. */
+ memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip));
+ bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) &
+ (table_size - 1)];
+ }
+#endif
+ else
+ return false;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, bucket, hash) {
+ if (entry->net == net && entry->ip == ip) {
+ u64 now, tokens;
+ bool ret;
+ /* Quasi-inspired by nft_limit.c, but this is actually a
+ * slightly different algorithm. Namely, we incorporate
+ * the burst as part of the maximum tokens, rather than
+ * as part of the rate.
+ */
+ spin_lock(&entry->lock);
+ now = ktime_get_coarse_boottime_ns();
+ tokens = min_t(u64, TOKEN_MAX,
+ entry->tokens + now -
+ entry->last_time_ns);
+ entry->last_time_ns = now;
+ ret = tokens >= PACKET_COST;
+ entry->tokens = ret ? tokens - PACKET_COST : tokens;
+ spin_unlock(&entry->lock);
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+
+ if (atomic_inc_return(&total_entries) > max_entries)
+ goto err_oom;
+
+ entry = kmem_cache_alloc(entry_cache, GFP_KERNEL);
+ if (unlikely(!entry))
+ goto err_oom;
+
+ entry->net = net;
+ entry->ip = ip;
+ INIT_HLIST_NODE(&entry->hash);
+ spin_lock_init(&entry->lock);
+ entry->last_time_ns = ktime_get_coarse_boottime_ns();
+ entry->tokens = TOKEN_MAX - PACKET_COST;
+ spin_lock(&table_lock);
+ hlist_add_head_rcu(&entry->hash, bucket);
+ spin_unlock(&table_lock);
+ return true;
+
+err_oom:
+ atomic_dec(&total_entries);
+ return false;
+}
+
+int wg_ratelimiter_init(void)
+{
+ mutex_lock(&init_lock);
+ if (++init_refcnt != 1)
+ goto out;
+
+ entry_cache = KMEM_CACHE(ratelimiter_entry, 0);
+ if (!entry_cache)
+ goto err;
+
+ /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting,
+ * but what it shares in common is that it uses a massive hashtable. So,
+ * we borrow their wisdom about good table sizes on different systems
+ * dependent on RAM. This calculation here comes from there.
+ */
+ table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 :
+ max_t(unsigned long, 16, roundup_pow_of_two(
+ (totalram_pages() << PAGE_SHIFT) /
+ (1U << 14) / sizeof(struct hlist_head)));
+ max_entries = table_size * 8;
+
+ table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+ if (unlikely(!table_v4))
+ goto err_kmemcache;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+ if (unlikely(!table_v6)) {
+ kvfree(table_v4);
+ goto err_kmemcache;
+ }
+#endif
+
+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
+ get_random_bytes(&key, sizeof(key));
+out:
+ mutex_unlock(&init_lock);
+ return 0;
+
+err_kmemcache:
+ kmem_cache_destroy(entry_cache);
+err:
+ --init_refcnt;
+ mutex_unlock(&init_lock);
+ return -ENOMEM;
+}
+
+void wg_ratelimiter_uninit(void)
+{
+ mutex_lock(&init_lock);
+ if (!init_refcnt || --init_refcnt)
+ goto out;
+
+ cancel_delayed_work_sync(&gc_work);
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+ kvfree(table_v4);
+#if IS_ENABLED(CONFIG_IPV6)
+ kvfree(table_v6);
+#endif
+ kmem_cache_destroy(entry_cache);
+out:
+ mutex_unlock(&init_lock);
+}
+
+#include "selftest/ratelimiter.c"
diff --git a/drivers/net/wireguard/ratelimiter.h b/drivers/net/wireguard/ratelimiter.h
new file mode 100644
index 000000000000..83067f71ea99
--- /dev/null
+++ b/drivers/net/wireguard/ratelimiter.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_RATELIMITER_H
+#define _WG_RATELIMITER_H
+
+#include <linux/skbuff.h>
+
+int wg_ratelimiter_init(void);
+void wg_ratelimiter_uninit(void);
+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net);
+
+#ifdef DEBUG
+bool wg_ratelimiter_selftest(void);
+#endif
+
+#endif /* _WG_RATELIMITER_H */
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
new file mode 100644
index 000000000000..9c6bab9c981f
--- /dev/null
+++ b/drivers/net/wireguard/receive.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "device.h"
+#include "peer.h"
+#include "timers.h"
+#include "messages.h"
+#include "cookie.h"
+#include "socket.h"
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <net/ip_tunnels.h>
+
+/* Must be called with bh disabled. */
+static void update_rx_stats(struct wg_peer *peer, size_t len)
+{
+ struct pcpu_sw_netstats *tstats =
+ get_cpu_ptr(peer->device->dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ ++tstats->rx_packets;
+ tstats->rx_bytes += len;
+ peer->rx_bytes += len;
+ u64_stats_update_end(&tstats->syncp);
+ put_cpu_ptr(tstats);
+}
+
+#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
+
+static size_t validate_header_len(struct sk_buff *skb)
+{
+ if (unlikely(skb->len < sizeof(struct message_header)))
+ return 0;
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) &&
+ skb->len >= MESSAGE_MINIMUM_LENGTH)
+ return sizeof(struct message_data);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) &&
+ skb->len == sizeof(struct message_handshake_initiation))
+ return sizeof(struct message_handshake_initiation);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) &&
+ skb->len == sizeof(struct message_handshake_response))
+ return sizeof(struct message_handshake_response);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) &&
+ skb->len == sizeof(struct message_handshake_cookie))
+ return sizeof(struct message_handshake_cookie);
+ return 0;
+}
+
+static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
+{
+ size_t data_offset, data_len, header_len;
+ struct udphdr *udp;
+
+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol ||
+ skb_transport_header(skb) < skb->head ||
+ (skb_transport_header(skb) + sizeof(struct udphdr)) >
+ skb_tail_pointer(skb)))
+ return -EINVAL; /* Bogus IP header */
+ udp = udp_hdr(skb);
+ data_offset = (u8 *)udp - skb->data;
+ if (unlikely(data_offset > U16_MAX ||
+ data_offset + sizeof(struct udphdr) > skb->len))
+ /* Packet has offset at impossible location or isn't big enough
+ * to have UDP fields.
+ */
+ return -EINVAL;
+ data_len = ntohs(udp->len);
+ if (unlikely(data_len < sizeof(struct udphdr) ||
+ data_len > skb->len - data_offset))
+ /* UDP packet is reporting too small of a size or lying about
+ * its size.
+ */
+ return -EINVAL;
+ data_len -= sizeof(struct udphdr);
+ data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data;
+ if (unlikely(!pskb_may_pull(skb,
+ data_offset + sizeof(struct message_header)) ||
+ pskb_trim(skb, data_len + data_offset) < 0))
+ return -EINVAL;
+ skb_pull(skb, data_offset);
+ if (unlikely(skb->len != data_len))
+ /* Final len does not agree with calculated len */
+ return -EINVAL;
+ header_len = validate_header_len(skb);
+ if (unlikely(!header_len))
+ return -EINVAL;
+ __skb_push(skb, data_offset);
+ if (unlikely(!pskb_may_pull(skb, data_offset + header_len)))
+ return -EINVAL;
+ __skb_pull(skb, data_offset);
+ return 0;
+}
+
+static void wg_receive_handshake_packet(struct wg_device *wg,
+ struct sk_buff *skb)
+{
+ enum cookie_mac_state mac_state;
+ struct wg_peer *peer = NULL;
+ /* This is global, so that our load calculation applies to the whole
+ * system. We don't care about races with it at all.
+ */
+ static u64 last_under_load;
+ bool packet_needs_cookie;
+ bool under_load;
+
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) {
+ net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n",
+ wg->dev->name, skb);
+ wg_cookie_message_consume(
+ (struct message_handshake_cookie *)skb->data, wg);
+ return;
+ }
+
+ under_load = skb_queue_len(&wg->incoming_handshakes) >=
+ MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+ if (under_load)
+ last_under_load = ktime_get_coarse_boottime_ns();
+ else if (last_under_load)
+ under_load = !wg_birthdate_has_expired(last_under_load, 1);
+ mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
+ under_load);
+ if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
+ (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) {
+ packet_needs_cookie = false;
+ } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) {
+ packet_needs_cookie = true;
+ } else {
+ net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+
+ switch (SKB_TYPE_LE32(skb)) {
+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): {
+ struct message_handshake_initiation *message =
+ (struct message_handshake_initiation *)skb->data;
+
+ if (packet_needs_cookie) {
+ wg_packet_send_handshake_cookie(wg, skb,
+ message->sender_index);
+ return;
+ }
+ peer = wg_noise_handshake_consume_initiation(message, wg);
+ if (unlikely(!peer)) {
+ net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+ wg_socket_set_peer_endpoint_from_skb(peer, skb);
+ net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n",
+ wg->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ wg_packet_send_handshake_response(peer);
+ break;
+ }
+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): {
+ struct message_handshake_response *message =
+ (struct message_handshake_response *)skb->data;
+
+ if (packet_needs_cookie) {
+ wg_packet_send_handshake_cookie(wg, skb,
+ message->sender_index);
+ return;
+ }
+ peer = wg_noise_handshake_consume_response(message, wg);
+ if (unlikely(!peer)) {
+ net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+ wg_socket_set_peer_endpoint_from_skb(peer, skb);
+ net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n",
+ wg->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ if (wg_noise_handshake_begin_session(&peer->handshake,
+ &peer->keypairs)) {
+ wg_timers_session_derived(peer);
+ wg_timers_handshake_complete(peer);
+ /* Calling this function will either send any existing
+ * packets in the queue and not send a keepalive, which
+ * is the best case, Or, if there's nothing in the
+ * queue, it will send a keepalive, in order to give
+ * immediate confirmation of the session.
+ */
+ wg_packet_send_keepalive(peer);
+ }
+ break;
+ }
+ }
+
+ if (unlikely(!peer)) {
+ WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n");
+ return;
+ }
+
+ local_bh_disable();
+ update_rx_stats(peer, skb->len);
+ local_bh_enable();
+
+ wg_timers_any_authenticated_packet_received(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_peer_put(peer);
+}
+
+void wg_packet_handshake_receive_worker(struct work_struct *work)
+{
+ struct wg_device *wg = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+ wg_receive_handshake_packet(wg, skb);
+ dev_kfree_skb(skb);
+ cond_resched();
+ }
+}
+
+static void keep_key_fresh(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+ bool send = false;
+
+ if (peer->sent_lastminute_handshake)
+ return;
+
+ rcu_read_lock_bh();
+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
+ keypair->i_am_the_initiator &&
+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
+ send = true;
+ rcu_read_unlock_bh();
+
+ if (send) {
+ peer->sent_lastminute_handshake = true;
+ wg_packet_send_queued_handshake_initiation(peer, false);
+ }
+}
+
+static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
+{
+ struct scatterlist sg[MAX_SKB_FRAGS + 8];
+ struct sk_buff *trailer;
+ unsigned int offset;
+ int num_frags;
+
+ if (unlikely(!key))
+ return false;
+
+ if (unlikely(!READ_ONCE(key->is_valid) ||
+ wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
+ key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
+ WRITE_ONCE(key->is_valid, false);
+ return false;
+ }
+
+ PACKET_CB(skb)->nonce =
+ le64_to_cpu(((struct message_data *)skb->data)->counter);
+
+ /* We ensure that the network header is part of the packet before we
+ * call skb_cow_data, so that there's no chance that data is removed
+ * from the skb, so that later we can extract the original endpoint.
+ */
+ offset = skb->data - skb_network_header(skb);
+ skb_push(skb, offset);
+ num_frags = skb_cow_data(skb, 0, &trailer);
+ offset += sizeof(struct message_data);
+ skb_pull(skb, offset);
+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
+ return false;
+
+ sg_init_table(sg, num_frags);
+ if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0)
+ return false;
+
+ if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
+ PACKET_CB(skb)->nonce,
+ key->key))
+ return false;
+
+ /* Another ugly situation of pushing and pulling the header so as to
+ * keep endpoint information intact.
+ */
+ skb_push(skb, offset);
+ if (pskb_trim(skb, skb->len - noise_encrypted_len(0)))
+ return false;
+ skb_pull(skb, offset);
+
+ return true;
+}
+
+/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
+static bool counter_validate(union noise_counter *counter, u64 their_counter)
+{
+ unsigned long index, index_current, top, i;
+ bool ret = false;
+
+ spin_lock_bh(&counter->receive.lock);
+
+ if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 ||
+ their_counter >= REJECT_AFTER_MESSAGES))
+ goto out;
+
+ ++their_counter;
+
+ if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
+ counter->receive.counter))
+ goto out;
+
+ index = their_counter >> ilog2(BITS_PER_LONG);
+
+ if (likely(their_counter > counter->receive.counter)) {
+ index_current = counter->receive.counter >> ilog2(BITS_PER_LONG);
+ top = min_t(unsigned long, index - index_current,
+ COUNTER_BITS_TOTAL / BITS_PER_LONG);
+ for (i = 1; i <= top; ++i)
+ counter->receive.backtrack[(i + index_current) &
+ ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
+ counter->receive.counter = their_counter;
+ }
+
+ index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
+ ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
+ &counter->receive.backtrack[index]);
+
+out:
+ spin_unlock_bh(&counter->receive.lock);
+ return ret;
+}
+
+#include "selftest/counter.c"
+
+static void wg_packet_consume_data_done(struct wg_peer *peer,
+ struct sk_buff *skb,
+ struct endpoint *endpoint)
+{
+ struct net_device *dev = peer->device->dev;
+ unsigned int len, len_before_trim;
+ struct wg_peer *routed_peer;
+
+ wg_socket_set_peer_endpoint(peer, endpoint);
+
+ if (unlikely(wg_noise_received_with_keypair(&peer->keypairs,
+ PACKET_CB(skb)->keypair))) {
+ wg_timers_handshake_complete(peer);
+ wg_packet_send_staged_packets(peer);
+ }
+
+ keep_key_fresh(peer);
+
+ wg_timers_any_authenticated_packet_received(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+
+ /* A packet with length 0 is a keepalive packet */
+ if (unlikely(!skb->len)) {
+ update_rx_stats(peer, message_data_len(0));
+ net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ goto packet_processed;
+ }
+
+ wg_timers_data_received(peer);
+
+ if (unlikely(skb_network_header(skb) < skb->head))
+ goto dishonest_packet_size;
+ if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) &&
+ (ip_hdr(skb)->version == 4 ||
+ (ip_hdr(skb)->version == 6 &&
+ pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))))))
+ goto dishonest_packet_type;
+
+ skb->dev = dev;
+ /* We've already verified the Poly1305 auth tag, which means this packet
+ * was not modified in transit. We can therefore tell the networking
+ * stack that all checksums of every layer of encapsulation have already
+ * been checked "by the hardware" and therefore is unnecessary to check
+ * again in software.
+ */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = ~0; /* All levels */
+ skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ len = ntohs(ip_hdr(skb)->tot_len);
+ if (unlikely(len < sizeof(struct iphdr)))
+ goto dishonest_packet_size;
+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
+ IP_ECN_set_ce(ip_hdr(skb));
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ len = ntohs(ipv6_hdr(skb)->payload_len) +
+ sizeof(struct ipv6hdr);
+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
+ IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ } else {
+ goto dishonest_packet_type;
+ }
+
+ if (unlikely(len > skb->len))
+ goto dishonest_packet_size;
+ len_before_trim = skb->len;
+ if (unlikely(pskb_trim(skb, len)))
+ goto packet_processed;
+
+ routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips,
+ skb);
+ wg_peer_put(routed_peer); /* We don't need the extra reference. */
+
+ if (unlikely(routed_peer != peer))
+ goto dishonest_packet_peer;
+
+ if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
+ ++dev->stats.rx_dropped;
+ net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ } else {
+ update_rx_stats(peer, message_data_len(len_before_trim));
+ }
+ return;
+
+dishonest_packet_peer:
+ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
+ dev->name, skb, peer->internal_id,
+ &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_frame_errors;
+ goto packet_processed;
+dishonest_packet_type:
+ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_frame_errors;
+ goto packet_processed;
+dishonest_packet_size:
+ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_length_errors;
+ goto packet_processed;
+packet_processed:
+ dev_kfree_skb(skb);
+}
+
+int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
+ struct crypt_queue *queue = &peer->rx_queue;
+ struct noise_keypair *keypair;
+ struct endpoint endpoint;
+ enum packet_state state;
+ struct sk_buff *skb;
+ int work_done = 0;
+ bool free;
+
+ if (unlikely(budget <= 0))
+ return 0;
+
+ while ((skb = __ptr_ring_peek(&queue->ring)) != NULL &&
+ (state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
+ PACKET_STATE_UNCRYPTED) {
+ __ptr_ring_discard_one(&queue->ring);
+ peer = PACKET_PEER(skb);
+ keypair = PACKET_CB(skb)->keypair;
+ free = true;
+
+ if (unlikely(state != PACKET_STATE_CRYPTED))
+ goto next;
+
+ if (unlikely(!counter_validate(&keypair->receiving.counter,
+ PACKET_CB(skb)->nonce))) {
+ net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
+ peer->device->dev->name,
+ PACKET_CB(skb)->nonce,
+ keypair->receiving.counter.receive.counter);
+ goto next;
+ }
+
+ if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
+ goto next;
+
+ wg_reset_packet(skb);
+ wg_packet_consume_data_done(peer, skb, &endpoint);
+ free = false;
+
+next:
+ wg_noise_keypair_put(keypair, false);
+ wg_peer_put(peer);
+ if (unlikely(free))
+ dev_kfree_skb(skb);
+
+ if (++work_done >= budget)
+ break;
+ }
+
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+void wg_packet_decrypt_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *skb;
+
+ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
+ enum packet_state state = likely(decrypt_packet(skb,
+ &PACKET_CB(skb)->keypair->receiving)) ?
+ PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
+ wg_queue_enqueue_per_peer_napi(skb, state);
+ }
+}
+
+static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
+{
+ __le32 idx = ((struct message_data *)skb->data)->key_idx;
+ struct wg_peer *peer = NULL;
+ int ret;
+
+ rcu_read_lock_bh();
+ PACKET_CB(skb)->keypair =
+ (struct noise_keypair *)wg_index_hashtable_lookup(
+ wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
+ &peer);
+ if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair)))
+ goto err_keypair;
+
+ if (unlikely(READ_ONCE(peer->is_dead)))
+ goto err;
+
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
+ &peer->rx_queue, skb,
+ wg->packet_crypt_wq,
+ &wg->decrypt_queue.last_cpu);
+ if (unlikely(ret == -EPIPE))
+ wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD);
+ if (likely(!ret || ret == -EPIPE)) {
+ rcu_read_unlock_bh();
+ return;
+ }
+err:
+ wg_noise_keypair_put(PACKET_CB(skb)->keypair, false);
+err_keypair:
+ rcu_read_unlock_bh();
+ wg_peer_put(peer);
+ dev_kfree_skb(skb);
+}
+
+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
+{
+ if (unlikely(prepare_skb_header(skb, wg) < 0))
+ goto err;
+ switch (SKB_TYPE_LE32(skb)) {
+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
+ case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
+ int cpu;
+
+ if (skb_queue_len(&wg->incoming_handshakes) >
+ MAX_QUEUED_INCOMING_HANDSHAKES ||
+ unlikely(!rng_is_initialized())) {
+ net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ goto err;
+ }
+ skb_queue_tail(&wg->incoming_handshakes, skb);
+ /* Queues up a call to packet_process_queued_handshake_
+ * packets(skb):
+ */
+ cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+ queue_work_on(cpu, wg->handshake_receive_wq,
+ &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+ break;
+ }
+ case cpu_to_le32(MESSAGE_DATA):
+ PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
+ wg_packet_consume_data(wg, skb);
+ break;
+ default:
+ net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ goto err;
+ }
+ return;
+
+err:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
new file mode 100644
index 000000000000..846db14cb046
--- /dev/null
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This contains some basic static unit tests for the allowedips data structure.
+ * It also has two additional modes that are disabled and meant to be used by
+ * folks directly playing with this file. If you define the macro
+ * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in
+ * memory, it will be printed out as KERN_DEBUG in a format that can be passed
+ * to graphviz (the dot command) to visualize it. If you define the macro
+ * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of
+ * randomized tests done against a trivial implementation, which may take
+ * upwards of a half-hour to complete. There's no set of users who should be
+ * enabling these, and the only developers that should go anywhere near these
+ * nobs are the ones who are reading this comment.
+ */
+
+#ifdef DEBUG
+
+#include <linux/siphash.h>
+
+static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
+ u8 cidr)
+{
+ swap_endian(dst, src, bits);
+ memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
+ if (cidr)
+ dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
+}
+
+static __init void print_node(struct allowedips_node *node, u8 bits)
+{
+ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
+ char *fmt_declaration = KERN_DEBUG
+ "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ char *style = "dotted";
+ u8 ip1[16], ip2[16];
+ u32 color = 0;
+
+ if (bits == 32) {
+ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
+ fmt_declaration = KERN_DEBUG
+ "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+ } else if (bits == 128) {
+ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
+ fmt_declaration = KERN_DEBUG
+ "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+ }
+ if (node->peer) {
+ hsiphash_key_t key = { { 0 } };
+
+ memcpy(&key, &node->peer, sizeof(node->peer));
+ color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 |
+ hsiphash_1u32(0xbabecafe, &key) % 200 << 8 |
+ hsiphash_1u32(0xabad1dea, &key) % 200;
+ style = "bold";
+ }
+ swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
+ printk(fmt_declaration, ip1, node->cidr, style, color);
+ if (node->bit[0]) {
+ swap_endian_and_apply_cidr(ip2,
+ rcu_dereference_raw(node->bit[0])->bits, bits,
+ node->cidr);
+ printk(fmt_connection, ip1, node->cidr, ip2,
+ rcu_dereference_raw(node->bit[0])->cidr);
+ print_node(rcu_dereference_raw(node->bit[0]), bits);
+ }
+ if (node->bit[1]) {
+ swap_endian_and_apply_cidr(ip2,
+ rcu_dereference_raw(node->bit[1])->bits,
+ bits, node->cidr);
+ printk(fmt_connection, ip1, node->cidr, ip2,
+ rcu_dereference_raw(node->bit[1])->cidr);
+ print_node(rcu_dereference_raw(node->bit[1]), bits);
+ }
+}
+
+static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
+{
+ printk(KERN_DEBUG "digraph trie {\n");
+ print_node(rcu_dereference_raw(top), bits);
+ printk(KERN_DEBUG "}\n");
+}
+
+enum {
+ NUM_PEERS = 2000,
+ NUM_RAND_ROUTES = 400,
+ NUM_MUTATED_ROUTES = 100,
+ NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30
+};
+
+struct horrible_allowedips {
+ struct hlist_head head;
+};
+
+struct horrible_allowedips_node {
+ struct hlist_node table;
+ union nf_inet_addr ip;
+ union nf_inet_addr mask;
+ u8 ip_version;
+ void *value;
+};
+
+static __init void horrible_allowedips_init(struct horrible_allowedips *table)
+{
+ INIT_HLIST_HEAD(&table->head);
+}
+
+static __init void horrible_allowedips_free(struct horrible_allowedips *table)
+{
+ struct horrible_allowedips_node *node;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(node, h, &table->head, table) {
+ hlist_del(&node->table);
+ kfree(node);
+ }
+}
+
+static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
+{
+ union nf_inet_addr mask;
+
+ memset(&mask, 0x00, 128 / 8);
+ memset(&mask, 0xff, cidr / 8);
+ if (cidr % 32)
+ mask.all[cidr / 32] = (__force u32)htonl(
+ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
+ return mask;
+}
+
+static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet)
+{
+ return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) +
+ hweight32(subnet.all[2]) + hweight32(subnet.all[3]);
+}
+
+static __init inline void
+horrible_mask_self(struct horrible_allowedips_node *node)
+{
+ if (node->ip_version == 4) {
+ node->ip.ip &= node->mask.ip;
+ } else if (node->ip_version == 6) {
+ node->ip.ip6[0] &= node->mask.ip6[0];
+ node->ip.ip6[1] &= node->mask.ip6[1];
+ node->ip.ip6[2] &= node->mask.ip6[2];
+ node->ip.ip6[3] &= node->mask.ip6[3];
+ }
+}
+
+static __init inline bool
+horrible_match_v4(const struct horrible_allowedips_node *node,
+ struct in_addr *ip)
+{
+ return (ip->s_addr & node->mask.ip) == node->ip.ip;
+}
+
+static __init inline bool
+horrible_match_v6(const struct horrible_allowedips_node *node,
+ struct in6_addr *ip)
+{
+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
+ node->ip.ip6[0] &&
+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
+ node->ip.ip6[1] &&
+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
+ node->ip.ip6[2] &&
+ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
+}
+
+static __init void
+horrible_insert_ordered(struct horrible_allowedips *table,
+ struct horrible_allowedips_node *node)
+{
+ struct horrible_allowedips_node *other = NULL, *where = NULL;
+ u8 my_cidr = horrible_mask_to_cidr(node->mask);
+
+ hlist_for_each_entry(other, &table->head, table) {
+ if (!memcmp(&other->mask, &node->mask,
+ sizeof(union nf_inet_addr)) &&
+ !memcmp(&other->ip, &node->ip,
+ sizeof(union nf_inet_addr)) &&
+ other->ip_version == node->ip_version) {
+ other->value = node->value;
+ kfree(node);
+ return;
+ }
+ where = other;
+ if (horrible_mask_to_cidr(other->mask) <= my_cidr)
+ break;
+ }
+ if (!other && !where)
+ hlist_add_head(&node->table, &table->head);
+ else if (!other)
+ hlist_add_behind(&node->table, &where->table);
+ else
+ hlist_add_before(&node->table, &where->table);
+}
+
+static __init int
+horrible_allowedips_insert_v4(struct horrible_allowedips *table,
+ struct in_addr *ip, u8 cidr, void *value)
+{
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+ GFP_KERNEL);
+
+ if (unlikely(!node))
+ return -ENOMEM;
+ node->ip.in = *ip;
+ node->mask = horrible_cidr_to_mask(cidr);
+ node->ip_version = 4;
+ node->value = value;
+ horrible_mask_self(node);
+ horrible_insert_ordered(table, node);
+ return 0;
+}
+
+static __init int
+horrible_allowedips_insert_v6(struct horrible_allowedips *table,
+ struct in6_addr *ip, u8 cidr, void *value)
+{
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+ GFP_KERNEL);
+
+ if (unlikely(!node))
+ return -ENOMEM;
+ node->ip.in6 = *ip;
+ node->mask = horrible_cidr_to_mask(cidr);
+ node->ip_version = 6;
+ node->value = value;
+ horrible_mask_self(node);
+ horrible_insert_ordered(table, node);
+ return 0;
+}
+
+static __init void *
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
+ struct in_addr *ip)
+{
+ struct horrible_allowedips_node *node;
+ void *ret = NULL;
+
+ hlist_for_each_entry(node, &table->head, table) {
+ if (node->ip_version != 4)
+ continue;
+ if (horrible_match_v4(node, ip)) {
+ ret = node->value;
+ break;
+ }
+ }
+ return ret;
+}
+
+static __init void *
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
+ struct in6_addr *ip)
+{
+ struct horrible_allowedips_node *node;
+ void *ret = NULL;
+
+ hlist_for_each_entry(node, &table->head, table) {
+ if (node->ip_version != 6)
+ continue;
+ if (horrible_match_v6(node, ip)) {
+ ret = node->value;
+ break;
+ }
+ }
+ return ret;
+}
+
+static __init bool randomized_test(void)
+{
+ unsigned int i, j, k, mutate_amount, cidr;
+ u8 ip[16], mutate_mask[16], mutated[16];
+ struct wg_peer **peers, *peer;
+ struct horrible_allowedips h;
+ DEFINE_MUTEX(mutex);
+ struct allowedips t;
+ bool ret = false;
+
+ mutex_init(&mutex);
+
+ wg_allowedips_init(&t);
+ horrible_allowedips_init(&h);
+
+ peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL);
+ if (unlikely(!peers)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free;
+ }
+ for (i = 0; i < NUM_PEERS; ++i) {
+ peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL);
+ if (unlikely(!peers[i])) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free;
+ }
+ kref_init(&peers[i]->refcount);
+ }
+
+ mutex_lock(&mutex);
+
+ for (i = 0; i < NUM_RAND_ROUTES; ++i) {
+ prandom_bytes(ip, 4);
+ cidr = prandom_u32_max(32) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
+ peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip,
+ cidr, peer) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
+ memcpy(mutated, ip, 4);
+ prandom_bytes(mutate_mask, 4);
+ mutate_amount = prandom_u32_max(32);
+ for (k = 0; k < mutate_amount / 8; ++k)
+ mutate_mask[k] = 0xff;
+ mutate_mask[k] = 0xff
+ << ((8 - (mutate_amount % 8)) % 8);
+ for (; k < 4; ++k)
+ mutate_mask[k] = 0;
+ for (k = 0; k < 4; ++k)
+ mutated[k] = (mutated[k] & mutate_mask[k]) |
+ (~mutate_mask[k] &
+ prandom_u32_max(256));
+ cidr = prandom_u32_max(32) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v4(&t,
+ (struct in_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
+ pr_err("allowedips random malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v4(&h,
+ (struct in_addr *)mutated, cidr, peer)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ }
+ }
+
+ for (i = 0; i < NUM_RAND_ROUTES; ++i) {
+ prandom_bytes(ip, 16);
+ cidr = prandom_u32_max(128) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
+ peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip,
+ cidr, peer) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
+ memcpy(mutated, ip, 16);
+ prandom_bytes(mutate_mask, 16);
+ mutate_amount = prandom_u32_max(128);
+ for (k = 0; k < mutate_amount / 8; ++k)
+ mutate_mask[k] = 0xff;
+ mutate_mask[k] = 0xff
+ << ((8 - (mutate_amount % 8)) % 8);
+ for (; k < 4; ++k)
+ mutate_mask[k] = 0;
+ for (k = 0; k < 4; ++k)
+ mutated[k] = (mutated[k] & mutate_mask[k]) |
+ (~mutate_mask[k] &
+ prandom_u32_max(256));
+ cidr = prandom_u32_max(128) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v6(&t,
+ (struct in6_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v6(
+ &h, (struct in6_addr *)mutated, cidr,
+ peer)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ }
+ }
+
+ mutex_unlock(&mutex);
+
+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
+ print_tree(t.root4, 32);
+ print_tree(t.root6, 128);
+ }
+
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 4);
+ if (lookup(t.root4, 32, ip) !=
+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+ pr_err("allowedips random self-test: FAIL\n");
+ goto free;
+ }
+ }
+
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 16);
+ if (lookup(t.root6, 128, ip) !=
+ horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+ pr_err("allowedips random self-test: FAIL\n");
+ goto free;
+ }
+ }
+ ret = true;
+
+free:
+ mutex_lock(&mutex);
+free_locked:
+ wg_allowedips_free(&t, &mutex);
+ mutex_unlock(&mutex);
+ horrible_allowedips_free(&h);
+ if (peers) {
+ for (i = 0; i < NUM_PEERS; ++i)
+ kfree(peers[i]);
+ }
+ kfree(peers);
+ return ret;
+}
+
+static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d)
+{
+ static struct in_addr ip;
+ u8 *split = (u8 *)&ip;
+
+ split[0] = a;
+ split[1] = b;
+ split[2] = c;
+ split[3] = d;
+ return &ip;
+}
+
+static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d)
+{
+ static struct in6_addr ip;
+ __be32 *split = (__be32 *)&ip;
+
+ split[0] = cpu_to_be32(a);
+ split[1] = cpu_to_be32(b);
+ split[2] = cpu_to_be32(c);
+ split[3] = cpu_to_be32(d);
+ return &ip;
+}
+
+static __init struct wg_peer *init_peer(void)
+{
+ struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+
+ if (!peer)
+ return NULL;
+ kref_init(&peer->refcount);
+ INIT_LIST_HEAD(&peer->allowedips_list);
+ return peer;
+}
+
+#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
+ wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
+ cidr, mem, &mutex)
+
+#define maybe_fail() do { \
+ ++i; \
+ if (!_s) { \
+ pr_info("allowedips self-test %zu: FAIL\n", i); \
+ success = false; \
+ } \
+ } while (0)
+
+#define test(version, mem, ipa, ipb, ipc, ipd) do { \
+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
+ ip##version(ipa, ipb, ipc, ipd)) == (mem); \
+ maybe_fail(); \
+ } while (0)
+
+#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \
+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
+ ip##version(ipa, ipb, ipc, ipd)) != (mem); \
+ maybe_fail(); \
+ } while (0)
+
+#define test_boolean(cond) do { \
+ bool _s = (cond); \
+ maybe_fail(); \
+ } while (0)
+
+bool __init wg_allowedips_selftest(void)
+{
+ bool found_a = false, found_b = false, found_c = false, found_d = false,
+ found_e = false, found_other = false;
+ struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(),
+ *d = init_peer(), *e = init_peer(), *f = init_peer(),
+ *g = init_peer(), *h = init_peer();
+ struct allowedips_node *iter_node;
+ bool success = false;
+ struct allowedips t;
+ DEFINE_MUTEX(mutex);
+ struct in6_addr ip;
+ size_t i = 0, count = 0;
+ __be64 part;
+
+ mutex_init(&mutex);
+ mutex_lock(&mutex);
+ wg_allowedips_init(&t);
+
+ if (!a || !b || !c || !d || !e || !f || !g || !h) {
+ pr_err("allowedips self-test malloc: FAIL\n");
+ goto free;
+ }
+
+ insert(4, a, 192, 168, 4, 0, 24);
+ insert(4, b, 192, 168, 4, 4, 32);
+ insert(4, c, 192, 168, 0, 0, 16);
+ insert(4, d, 192, 95, 5, 64, 27);
+ /* replaces previous entry, and maskself is required */
+ insert(4, c, 192, 95, 5, 65, 27);
+ insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
+ insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64);
+ insert(4, e, 0, 0, 0, 0, 0);
+ insert(6, e, 0, 0, 0, 0, 0);
+ /* replaces previous entry */
+ insert(6, f, 0, 0, 0, 0, 0);
+ insert(6, g, 0x24046800, 0, 0, 0, 32);
+ /* maskself is required */
+ insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64);
+ insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128);
+ insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ insert(4, g, 64, 15, 112, 0, 20);
+ /* maskself is required */
+ insert(4, h, 64, 15, 123, 211, 25);
+ insert(4, a, 10, 0, 0, 0, 25);
+ insert(4, b, 10, 0, 0, 128, 25);
+ insert(4, a, 10, 1, 0, 0, 30);
+ insert(4, b, 10, 1, 0, 4, 30);
+ insert(4, c, 10, 1, 0, 8, 29);
+ insert(4, d, 10, 1, 0, 16, 29);
+
+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
+ print_tree(t.root4, 32);
+ print_tree(t.root6, 128);
+ }
+
+ success = true;
+
+ test(4, a, 192, 168, 4, 20);
+ test(4, a, 192, 168, 4, 0);
+ test(4, b, 192, 168, 4, 4);
+ test(4, c, 192, 168, 200, 182);
+ test(4, c, 192, 95, 5, 68);
+ test(4, e, 192, 95, 5, 96);
+ test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543);
+ test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee);
+ test(6, f, 0x26075300, 0x60006b01, 0, 0);
+ test(6, g, 0x24046800, 0x40040806, 0, 0x1006);
+ test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678);
+ test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678);
+ test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678);
+ test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678);
+ test(6, h, 0x24046800, 0x40040800, 0, 0);
+ test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010);
+ test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef);
+ test(4, g, 64, 15, 116, 26);
+ test(4, g, 64, 15, 127, 3);
+ test(4, g, 64, 15, 123, 1);
+ test(4, h, 64, 15, 123, 128);
+ test(4, h, 64, 15, 123, 129);
+ test(4, a, 10, 0, 0, 52);
+ test(4, b, 10, 0, 0, 220);
+ test(4, a, 10, 1, 0, 2);
+ test(4, b, 10, 1, 0, 6);
+ test(4, c, 10, 1, 0, 10);
+ test(4, d, 10, 1, 0, 20);
+
+ insert(4, a, 1, 0, 0, 0, 32);
+ insert(4, a, 64, 0, 0, 0, 32);
+ insert(4, a, 128, 0, 0, 0, 32);
+ insert(4, a, 192, 0, 0, 0, 32);
+ insert(4, a, 255, 0, 0, 0, 32);
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
+ test_negative(4, a, 1, 0, 0, 0);
+ test_negative(4, a, 64, 0, 0, 0);
+ test_negative(4, a, 128, 0, 0, 0);
+ test_negative(4, a, 192, 0, 0, 0);
+ test_negative(4, a, 255, 0, 0, 0);
+
+ wg_allowedips_free(&t, &mutex);
+ wg_allowedips_init(&t);
+ insert(4, a, 192, 168, 0, 0, 16);
+ insert(4, a, 192, 168, 0, 0, 24);
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
+ test_negative(4, a, 192, 168, 0, 1);
+
+ /* These will hit the WARN_ON(len >= 128) in free_node if something
+ * goes wrong.
+ */
+ for (i = 0; i < 128; ++i) {
+ part = cpu_to_be64(~(1LLU << (i % 64)));
+ memset(&ip, 0xff, 16);
+ memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ }
+
+ wg_allowedips_free(&t, &mutex);
+
+ wg_allowedips_init(&t);
+ insert(4, a, 192, 95, 5, 93, 27);
+ insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
+ insert(4, a, 10, 1, 0, 20, 29);
+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
+ list_for_each_entry(iter_node, &a->allowedips_list, peer_list) {
+ u8 cidr, ip[16] __aligned(__alignof(u64));
+ int family = wg_allowedips_read_node(iter_node, ip, &cidr);
+
+ count++;
+
+ if (cidr == 27 && family == AF_INET &&
+ !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr)))
+ found_a = true;
+ else if (cidr == 128 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543),
+ sizeof(struct in6_addr)))
+ found_b = true;
+ else if (cidr == 29 && family == AF_INET &&
+ !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr)))
+ found_c = true;
+ else if (cidr == 83 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0),
+ sizeof(struct in6_addr)))
+ found_d = true;
+ else if (cidr == 21 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075000, 0, 0, 0),
+ sizeof(struct in6_addr)))
+ found_e = true;
+ else
+ found_other = true;
+ }
+ test_boolean(count == 5);
+ test_boolean(found_a);
+ test_boolean(found_b);
+ test_boolean(found_c);
+ test_boolean(found_d);
+ test_boolean(found_e);
+ test_boolean(!found_other);
+
+ if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success)
+ success = randomized_test();
+
+ if (success)
+ pr_info("allowedips self-tests: pass\n");
+
+free:
+ wg_allowedips_free(&t, &mutex);
+ kfree(a);
+ kfree(b);
+ kfree(c);
+ kfree(d);
+ kfree(e);
+ kfree(f);
+ kfree(g);
+ kfree(h);
+ mutex_unlock(&mutex);
+
+ return success;
+}
+
+#undef test_negative
+#undef test
+#undef remove
+#undef insert
+#undef init_peer
+
+#endif
diff --git a/drivers/net/wireguard/selftest/counter.c b/drivers/net/wireguard/selftest/counter.c
new file mode 100644
index 000000000000..f4fbb9072ed7
--- /dev/null
+++ b/drivers/net/wireguard/selftest/counter.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifdef DEBUG
+bool __init wg_packet_counter_selftest(void)
+{
+ unsigned int test_num = 0, i;
+ union noise_counter counter;
+ bool success = true;
+
+#define T_INIT do { \
+ memset(&counter, 0, sizeof(union noise_counter)); \
+ spin_lock_init(&counter.receive.lock); \
+ } while (0)
+#define T_LIM (COUNTER_WINDOW_SIZE + 1)
+#define T(n, v) do { \
+ ++test_num; \
+ if (counter_validate(&counter, n) != (v)) { \
+ pr_err("nonce counter self-test %u: FAIL\n", \
+ test_num); \
+ success = false; \
+ } \
+ } while (0)
+
+ T_INIT;
+ /* 1 */ T(0, true);
+ /* 2 */ T(1, true);
+ /* 3 */ T(1, false);
+ /* 4 */ T(9, true);
+ /* 5 */ T(8, true);
+ /* 6 */ T(7, true);
+ /* 7 */ T(7, false);
+ /* 8 */ T(T_LIM, true);
+ /* 9 */ T(T_LIM - 1, true);
+ /* 10 */ T(T_LIM - 1, false);
+ /* 11 */ T(T_LIM - 2, true);
+ /* 12 */ T(2, true);
+ /* 13 */ T(2, false);
+ /* 14 */ T(T_LIM + 16, true);
+ /* 15 */ T(3, false);
+ /* 16 */ T(T_LIM + 16, false);
+ /* 17 */ T(T_LIM * 4, true);
+ /* 18 */ T(T_LIM * 4 - (T_LIM - 1), true);
+ /* 19 */ T(10, false);
+ /* 20 */ T(T_LIM * 4 - T_LIM, false);
+ /* 21 */ T(T_LIM * 4 - (T_LIM + 1), false);
+ /* 22 */ T(T_LIM * 4 - (T_LIM - 2), true);
+ /* 23 */ T(T_LIM * 4 + 1 - T_LIM, false);
+ /* 24 */ T(0, false);
+ /* 25 */ T(REJECT_AFTER_MESSAGES, false);
+ /* 26 */ T(REJECT_AFTER_MESSAGES - 1, true);
+ /* 27 */ T(REJECT_AFTER_MESSAGES, false);
+ /* 28 */ T(REJECT_AFTER_MESSAGES - 1, false);
+ /* 29 */ T(REJECT_AFTER_MESSAGES - 2, true);
+ /* 30 */ T(REJECT_AFTER_MESSAGES + 1, false);
+ /* 31 */ T(REJECT_AFTER_MESSAGES + 2, false);
+ /* 32 */ T(REJECT_AFTER_MESSAGES - 2, false);
+ /* 33 */ T(REJECT_AFTER_MESSAGES - 3, true);
+ /* 34 */ T(0, false);
+
+ T_INIT;
+ for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i)
+ T(i, true);
+ T(0, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i)
+ T(i, true);
+ T(1, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;)
+ T(i, true);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;)
+ T(i, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;)
+ T(i, true);
+ T(COUNTER_WINDOW_SIZE + 1, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;)
+ T(i, true);
+ T(0, true);
+ T(COUNTER_WINDOW_SIZE + 1, true);
+
+#undef T
+#undef T_LIM
+#undef T_INIT
+
+ if (success)
+ pr_info("nonce counter self-tests: pass\n");
+ return success;
+}
+#endif
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
new file mode 100644
index 000000000000..bcd6462e4540
--- /dev/null
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifdef DEBUG
+
+#include <linux/jiffies.h>
+
+static const struct {
+ bool result;
+ unsigned int msec_to_sleep_before;
+} expected_results[] __initconst = {
+ [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
+ [PACKETS_BURSTABLE] = { false, 0 },
+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 2] = { false, 0 },
+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 4] = { true, 0 },
+ [PACKETS_BURSTABLE + 5] = { false, 0 }
+};
+
+static __init unsigned int maximum_jiffies_at_index(int index)
+{
+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ int i;
+
+ for (i = 0; i <= index; ++i)
+ total_msecs += expected_results[i].msec_to_sleep_before;
+ return msecs_to_jiffies(total_msecs);
+}
+
+static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
+ struct sk_buff *skb6, struct ipv6hdr *hdr6,
+ int *test)
+{
+ unsigned long loop_start_time;
+ int i;
+
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+ loop_start_time = jiffies;
+
+ for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
+ if (expected_results[i].msec_to_sleep_before)
+ msleep(expected_results[i].msec_to_sleep_before);
+
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (wg_ratelimiter_allow(skb4, &init_net) !=
+ expected_results[i].result)
+ return -EXFULL;
+ ++(*test);
+
+ hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (!wg_ratelimiter_allow(skb4, &init_net))
+ return -EXFULL;
+ ++(*test);
+
+ hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ hdr6->saddr.in6_u.u6_addr32[2] = htonl(i);
+ hdr6->saddr.in6_u.u6_addr32[3] = htonl(i);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (wg_ratelimiter_allow(skb6, &init_net) !=
+ expected_results[i].result)
+ return -EXFULL;
+ ++(*test);
+
+ hdr6->saddr.in6_u.u6_addr32[0] =
+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (!wg_ratelimiter_allow(skb6, &init_net))
+ return -EXFULL;
+ ++(*test);
+
+ hdr6->saddr.in6_u.u6_addr32[0] =
+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1);
+
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+#endif
+ }
+ return 0;
+}
+
+static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4,
+ int *test)
+{
+ int i;
+
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+
+ if (atomic_read(&total_entries))
+ return -EXFULL;
+ ++(*test);
+
+ for (i = 0; i <= max_entries; ++i) {
+ hdr4->saddr = htonl(i);
+ if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries))
+ return -EXFULL;
+ ++(*test);
+ }
+ return 0;
+}
+
+bool __init wg_ratelimiter_selftest(void)
+{
+ enum { TRIALS_BEFORE_GIVING_UP = 5000 };
+ bool success = false;
+ int test = 0, trials;
+ struct sk_buff *skb4, *skb6;
+ struct iphdr *hdr4;
+ struct ipv6hdr *hdr6;
+
+ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
+ return true;
+
+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+
+ if (wg_ratelimiter_init())
+ goto out;
+ ++test;
+ if (wg_ratelimiter_init()) {
+ wg_ratelimiter_uninit();
+ goto out;
+ }
+ ++test;
+ if (wg_ratelimiter_init()) {
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ goto out;
+ }
+ ++test;
+
+ skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL);
+ if (unlikely(!skb4))
+ goto err_nofree;
+ skb4->protocol = htons(ETH_P_IP);
+ hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4));
+ hdr4->saddr = htonl(8182);
+ skb_reset_network_header(skb4);
+ ++test;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL);
+ if (unlikely(!skb6)) {
+ kfree_skb(skb4);
+ goto err_nofree;
+ }
+ skb6->protocol = htons(ETH_P_IPV6);
+ hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6));
+ hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212);
+ hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188);
+ skb_reset_network_header(skb6);
+ ++test;
+#endif
+
+ for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ int test_count = 0, ret;
+
+ ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
+ if (ret == -ETIMEDOUT) {
+ if (!trials--) {
+ test += test_count;
+ goto err;
+ }
+ msleep(500);
+ continue;
+ } else if (ret < 0) {
+ test += test_count;
+ goto err;
+ } else {
+ test += test_count;
+ break;
+ }
+ }
+
+ for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ int test_count = 0;
+
+ if (capacity_test(skb4, hdr4, &test_count) < 0) {
+ if (!trials--) {
+ test += test_count;
+ goto err;
+ }
+ msleep(50);
+ continue;
+ }
+ test += test_count;
+ break;
+ }
+
+ success = true;
+
+err:
+ kfree_skb(skb4);
+#if IS_ENABLED(CONFIG_IPV6)
+ kfree_skb(skb6);
+#endif
+err_nofree:
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ /* Uninit one extra time to check underflow detection. */
+ wg_ratelimiter_uninit();
+out:
+ if (success)
+ pr_info("ratelimiter self-tests: pass\n");
+ else
+ pr_err("ratelimiter self-test %d: FAIL\n", test);
+
+ return success;
+}
+#endif
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
new file mode 100644
index 000000000000..c13260563446
--- /dev/null
+++ b/drivers/net/wireguard/send.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "timers.h"
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "messages.h"
+#include "cookie.h"
+
+#include <linux/uio.h>
+#include <linux/inetdevice.h>
+#include <linux/socket.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+#include <net/sock.h>
+
+static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
+{
+ struct message_handshake_initiation packet;
+
+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
+ REKEY_TIMEOUT))
+ return; /* This function is rate limited. */
+
+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
+ net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake,
+ ktime_get_coarse_boottime_ns());
+ wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
+ HANDSHAKE_DSCP);
+ wg_timers_handshake_initiated(peer);
+ }
+}
+
+void wg_packet_handshake_send_worker(struct work_struct *work)
+{
+ struct wg_peer *peer = container_of(work, struct wg_peer,
+ transmit_handshake_work);
+
+ wg_packet_send_handshake_initiation(peer);
+ wg_peer_put(peer);
+}
+
+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
+ bool is_retry)
+{
+ if (!is_retry)
+ peer->timer_handshake_attempts = 0;
+
+ rcu_read_lock_bh();
+ /* We check last_sent_handshake here in addition to the actual function
+ * we're queueing up, so that we don't queue things if not strictly
+ * necessary:
+ */
+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
+ REKEY_TIMEOUT) ||
+ unlikely(READ_ONCE(peer->is_dead)))
+ goto out;
+
+ wg_peer_get(peer);
+ /* Queues up calling packet_send_queued_handshakes(peer), where we do a
+ * peer_put(peer) after:
+ */
+ if (!queue_work(peer->device->handshake_send_wq,
+ &peer->transmit_handshake_work))
+ /* If the work was already queued, we want to drop the
+ * extra reference:
+ */
+ wg_peer_put(peer);
+out:
+ rcu_read_unlock_bh();
+}
+
+void wg_packet_send_handshake_response(struct wg_peer *peer)
+{
+ struct message_handshake_response packet;
+
+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
+ net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
+ if (wg_noise_handshake_begin_session(&peer->handshake,
+ &peer->keypairs)) {
+ wg_timers_session_derived(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake,
+ ktime_get_coarse_boottime_ns());
+ wg_socket_send_buffer_to_peer(peer, &packet,
+ sizeof(packet),
+ HANDSHAKE_DSCP);
+ }
+ }
+}
+
+void wg_packet_send_handshake_cookie(struct wg_device *wg,
+ struct sk_buff *initiating_skb,
+ __le32 sender_index)
+{
+ struct message_handshake_cookie packet;
+
+ net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
+ wg->dev->name, initiating_skb);
+ wg_cookie_message_create(&packet, initiating_skb, sender_index,
+ &wg->cookie_checker);
+ wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
+ sizeof(packet));
+}
+
+static void keep_key_fresh(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+ bool send = false;
+
+ rcu_read_lock_bh();
+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
+ (unlikely(atomic64_read(&keypair->sending.counter.counter) >
+ REKEY_AFTER_MESSAGES) ||
+ (keypair->i_am_the_initiator &&
+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
+ REKEY_AFTER_TIME)))))
+ send = true;
+ rcu_read_unlock_bh();
+
+ if (send)
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
+
+static unsigned int calculate_skb_padding(struct sk_buff *skb)
+{
+ /* We do this modulo business with the MTU, just in case the networking
+ * layer gives us a packet that's bigger than the MTU. In that case, we
+ * wouldn't want the final subtraction to overflow in the case of the
+ * padded_size being clamped.
+ */
+ unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu;
+ unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
+
+ if (padded_size > PACKET_CB(skb)->mtu)
+ padded_size = PACKET_CB(skb)->mtu;
+ return padded_size - last_unit;
+}
+
+static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
+{
+ unsigned int padding_len, plaintext_len, trailer_len;
+ struct scatterlist sg[MAX_SKB_FRAGS + 8];
+ struct message_data *header;
+ struct sk_buff *trailer;
+ int num_frags;
+
+ /* Calculate lengths. */
+ padding_len = calculate_skb_padding(skb);
+ trailer_len = padding_len + noise_encrypted_len(0);
+ plaintext_len = skb->len + padding_len;
+
+ /* Expand data section to have room for padding and auth tag. */
+ num_frags = skb_cow_data(skb, trailer_len, &trailer);
+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
+ return false;
+
+ /* Set the padding to zeros, and make sure it and the auth tag are part
+ * of the skb.
+ */
+ memset(skb_tail_pointer(trailer), 0, padding_len);
+
+ /* Expand head section to have room for our header and the network
+ * stack's headers.
+ */
+ if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
+ return false;
+
+ /* Finalize checksum calculation for the inner packet, if required. */
+ if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb)))
+ return false;
+
+ /* Only after checksumming can we safely add on the padding at the end
+ * and the header.
+ */
+ skb_set_inner_network_header(skb, 0);
+ header = (struct message_data *)skb_push(skb, sizeof(*header));
+ header->header.type = cpu_to_le32(MESSAGE_DATA);
+ header->key_idx = keypair->remote_index;
+ header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
+ pskb_put(skb, trailer, trailer_len);
+
+ /* Now we can encrypt the scattergather segments */
+ sg_init_table(sg, num_frags);
+ if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
+ noise_encrypted_len(plaintext_len)) <= 0)
+ return false;
+ return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
+ PACKET_CB(skb)->nonce,
+ keypair->sending.key);
+}
+
+void wg_packet_send_keepalive(struct wg_peer *peer)
+{
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&peer->staged_packet_queue)) {
+ skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
+ GFP_ATOMIC);
+ if (unlikely(!skb))
+ return;
+ skb_reserve(skb, DATA_PACKET_HEAD_ROOM);
+ skb->dev = peer->device->dev;
+ PACKET_CB(skb)->mtu = skb->dev->mtu;
+ skb_queue_tail(&peer->staged_packet_queue, skb);
+ net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ }
+
+ wg_packet_send_staged_packets(peer);
+}
+
+static void wg_packet_create_data_done(struct sk_buff *first,
+ struct wg_peer *peer)
+{
+ struct sk_buff *skb, *next;
+ bool is_keepalive, data_sent = false;
+
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ skb_list_walk_safe(first, skb, next) {
+ is_keepalive = skb->len == message_data_len(0);
+ if (likely(!wg_socket_send_skb_to_peer(peer, skb,
+ PACKET_CB(skb)->ds) && !is_keepalive))
+ data_sent = true;
+ }
+
+ if (likely(data_sent))
+ wg_timers_data_sent(peer);
+
+ keep_key_fresh(peer);
+}
+
+void wg_packet_tx_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct crypt_queue,
+ work);
+ struct noise_keypair *keypair;
+ enum packet_state state;
+ struct sk_buff *first;
+ struct wg_peer *peer;
+
+ while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
+ (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
+ PACKET_STATE_UNCRYPTED) {
+ __ptr_ring_discard_one(&queue->ring);
+ peer = PACKET_PEER(first);
+ keypair = PACKET_CB(first)->keypair;
+
+ if (likely(state == PACKET_STATE_CRYPTED))
+ wg_packet_create_data_done(first, peer);
+ else
+ kfree_skb_list(first);
+
+ wg_noise_keypair_put(keypair, false);
+ wg_peer_put(peer);
+ }
+}
+
+void wg_packet_encrypt_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *first, *skb, *next;
+
+ while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
+ enum packet_state state = PACKET_STATE_CRYPTED;
+
+ skb_list_walk_safe(first, skb, next) {
+ if (likely(encrypt_packet(skb,
+ PACKET_CB(first)->keypair))) {
+ wg_reset_packet(skb);
+ } else {
+ state = PACKET_STATE_DEAD;
+ break;
+ }
+ }
+ wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
+ state);
+
+ }
+}
+
+static void wg_packet_create_data(struct sk_buff *first)
+{
+ struct wg_peer *peer = PACKET_PEER(first);
+ struct wg_device *wg = peer->device;
+ int ret = -EINVAL;
+
+ rcu_read_lock_bh();
+ if (unlikely(READ_ONCE(peer->is_dead)))
+ goto err;
+
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
+ &peer->tx_queue, first,
+ wg->packet_crypt_wq,
+ &wg->encrypt_queue.last_cpu);
+ if (unlikely(ret == -EPIPE))
+ wg_queue_enqueue_per_peer(&peer->tx_queue, first,
+ PACKET_STATE_DEAD);
+err:
+ rcu_read_unlock_bh();
+ if (likely(!ret || ret == -EPIPE))
+ return;
+ wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
+ wg_peer_put(peer);
+ kfree_skb_list(first);
+}
+
+void wg_packet_purge_staged_packets(struct wg_peer *peer)
+{
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
+ __skb_queue_purge(&peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+}
+
+void wg_packet_send_staged_packets(struct wg_peer *peer)
+{
+ struct noise_symmetric_key *key;
+ struct noise_keypair *keypair;
+ struct sk_buff_head packets;
+ struct sk_buff *skb;
+
+ /* Steal the current queue into our local one. */
+ __skb_queue_head_init(&packets);
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ skb_queue_splice_init(&peer->staged_packet_queue, &packets);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+ if (unlikely(skb_queue_empty(&packets)))
+ return;
+
+ /* First we make sure we have a valid reference to a valid key. */
+ rcu_read_lock_bh();
+ keypair = wg_noise_keypair_get(
+ rcu_dereference_bh(peer->keypairs.current_keypair));
+ rcu_read_unlock_bh();
+ if (unlikely(!keypair))
+ goto out_nokey;
+ key = &keypair->sending;
+ if (unlikely(!READ_ONCE(key->is_valid)))
+ goto out_nokey;
+ if (unlikely(wg_birthdate_has_expired(key->birthdate,
+ REJECT_AFTER_TIME)))
+ goto out_invalid;
+
+ /* After we know we have a somewhat valid key, we now try to assign
+ * nonces to all of the packets in the queue. If we can't assign nonces
+ * for all of them, we just consider it a failure and wait for the next
+ * handshake.
+ */
+ skb_queue_walk(&packets, skb) {
+ /* 0 for no outer TOS: no leak. TODO: at some later point, we
+ * might consider using flowi->tos as outer instead.
+ */
+ PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
+ PACKET_CB(skb)->nonce =
+ atomic64_inc_return(&key->counter.counter) - 1;
+ if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
+ goto out_invalid;
+ }
+
+ packets.prev->next = NULL;
+ wg_peer_get(keypair->entry.peer);
+ PACKET_CB(packets.next)->keypair = keypair;
+ wg_packet_create_data(packets.next);
+ return;
+
+out_invalid:
+ WRITE_ONCE(key->is_valid, false);
+out_nokey:
+ wg_noise_keypair_put(keypair, false);
+
+ /* We orphan the packets if we're waiting on a handshake, so that they
+ * don't block a socket's pool.
+ */
+ skb_queue_walk(&packets, skb)
+ skb_orphan(skb);
+ /* Then we put them back on the top of the queue. We're not too
+ * concerned about accidentally getting things a little out of order if
+ * packets are being added really fast, because this queue is for before
+ * packets can even be sent and it's small anyway.
+ */
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ skb_queue_splice(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+
+ /* If we're exiting because there's something wrong with the key, it
+ * means we should initiate a new handshake.
+ */
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
new file mode 100644
index 000000000000..262f3b5c819d
--- /dev/null
+++ b/drivers/net/wireguard/socket.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "queueing.h"
+#include "messages.h"
+
+#include <linux/ctype.h>
+#include <linux/net.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/inetdevice.h>
+#include <net/udp_tunnel.h>
+#include <net/ipv6.h>
+
+static int send4(struct wg_device *wg, struct sk_buff *skb,
+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
+{
+ struct flowi4 fl = {
+ .saddr = endpoint->src4.s_addr,
+ .daddr = endpoint->addr4.sin_addr.s_addr,
+ .fl4_dport = endpoint->addr4.sin_port,
+ .flowi4_mark = wg->fwmark,
+ .flowi4_proto = IPPROTO_UDP
+ };
+ struct rtable *rt = NULL;
+ struct sock *sock;
+ int ret = 0;
+
+ skb_mark_not_on_list(skb);
+ skb->dev = wg->dev;
+ skb->mark = wg->fwmark;
+
+ rcu_read_lock_bh();
+ sock = rcu_dereference_bh(wg->sock4);
+
+ if (unlikely(!sock)) {
+ ret = -ENONET;
+ goto err;
+ }
+
+ fl.fl4_sport = inet_sk(sock)->inet_sport;
+
+ if (cache)
+ rt = dst_cache_get_ip4(cache, &fl.saddr);
+
+ if (!rt) {
+ security_sk_classify_flow(sock, flowi4_to_flowi(&fl));
+ if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
+ fl.saddr, RT_SCOPE_HOST))) {
+ endpoint->src4.s_addr = 0;
+ *(__force __be32 *)&endpoint->src_if4 = 0;
+ fl.saddr = 0;
+ if (cache)
+ dst_cache_reset(cache);
+ }
+ rt = ip_route_output_flow(sock_net(sock), &fl, sock);
+ if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) &&
+ PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
+ rt->dst.dev->ifindex != endpoint->src_if4)))) {
+ endpoint->src4.s_addr = 0;
+ *(__force __be32 *)&endpoint->src_if4 = 0;
+ fl.saddr = 0;
+ if (cache)
+ dst_cache_reset(cache);
+ if (!IS_ERR(rt))
+ ip_rt_put(rt);
+ rt = ip_route_output_flow(sock_net(sock), &fl, sock);
+ }
+ if (unlikely(IS_ERR(rt))) {
+ ret = PTR_ERR(rt);
+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
+ wg->dev->name, &endpoint->addr, ret);
+ goto err;
+ } else if (unlikely(rt->dst.dev == skb->dev)) {
+ ip_rt_put(rt);
+ ret = -ELOOP;
+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
+ wg->dev->name, &endpoint->addr);
+ goto err;
+ }
+ if (cache)
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+ }
+
+ skb->ignore_df = 1;
+ udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds,
+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
+ fl.fl4_dport, false, false);
+ goto out;
+
+err:
+ kfree_skb(skb);
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+static int send6(struct wg_device *wg, struct sk_buff *skb,
+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct flowi6 fl = {
+ .saddr = endpoint->src6,
+ .daddr = endpoint->addr6.sin6_addr,
+ .fl6_dport = endpoint->addr6.sin6_port,
+ .flowi6_mark = wg->fwmark,
+ .flowi6_oif = endpoint->addr6.sin6_scope_id,
+ .flowi6_proto = IPPROTO_UDP
+ /* TODO: addr->sin6_flowinfo */
+ };
+ struct dst_entry *dst = NULL;
+ struct sock *sock;
+ int ret = 0;
+
+ skb_mark_not_on_list(skb);
+ skb->dev = wg->dev;
+ skb->mark = wg->fwmark;
+
+ rcu_read_lock_bh();
+ sock = rcu_dereference_bh(wg->sock6);
+
+ if (unlikely(!sock)) {
+ ret = -ENONET;
+ goto err;
+ }
+
+ fl.fl6_sport = inet_sk(sock)->inet_sport;
+
+ if (cache)
+ dst = dst_cache_get_ip6(cache, &fl.saddr);
+
+ if (!dst) {
+ security_sk_classify_flow(sock, flowi6_to_flowi(&fl));
+ if (unlikely(!ipv6_addr_any(&fl.saddr) &&
+ !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
+ endpoint->src6 = fl.saddr = in6addr_any;
+ if (cache)
+ dst_cache_reset(cache);
+ }
+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl,
+ NULL);
+ if (unlikely(IS_ERR(dst))) {
+ ret = PTR_ERR(dst);
+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
+ wg->dev->name, &endpoint->addr, ret);
+ goto err;
+ } else if (unlikely(dst->dev == skb->dev)) {
+ dst_release(dst);
+ ret = -ELOOP;
+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
+ wg->dev->name, &endpoint->addr);
+ goto err;
+ }
+ if (cache)
+ dst_cache_set_ip6(cache, dst, &fl.saddr);
+ }
+
+ skb->ignore_df = 1;
+ udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
+ fl.fl6_dport, false);
+ goto out;
+
+err:
+ kfree_skb(skb);
+out:
+ rcu_read_unlock_bh();
+ return ret;
+#else
+ return -EAFNOSUPPORT;
+#endif
+}
+
+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds)
+{
+ size_t skb_len = skb->len;
+ int ret = -EAFNOSUPPORT;
+
+ read_lock_bh(&peer->endpoint_lock);
+ if (peer->endpoint.addr.sa_family == AF_INET)
+ ret = send4(peer->device, skb, &peer->endpoint, ds,
+ &peer->endpoint_cache);
+ else if (peer->endpoint.addr.sa_family == AF_INET6)
+ ret = send6(peer->device, skb, &peer->endpoint, ds,
+ &peer->endpoint_cache);
+ else
+ dev_kfree_skb(skb);
+ if (likely(!ret))
+ peer->tx_bytes += skb_len;
+ read_unlock_bh(&peer->endpoint_lock);
+
+ return ret;
+}
+
+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer,
+ size_t len, u8 ds)
+{
+ struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
+
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, SKB_HEADER_LEN);
+ skb_set_inner_network_header(skb, 0);
+ skb_put_data(skb, buffer, len);
+ return wg_socket_send_skb_to_peer(peer, skb, ds);
+}
+
+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
+ struct sk_buff *in_skb, void *buffer,
+ size_t len)
+{
+ int ret = 0;
+ struct sk_buff *skb;
+ struct endpoint endpoint;
+
+ if (unlikely(!in_skb))
+ return -EINVAL;
+ ret = wg_socket_endpoint_from_skb(&endpoint, in_skb);
+ if (unlikely(ret < 0))
+ return ret;
+
+ skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ skb_reserve(skb, SKB_HEADER_LEN);
+ skb_set_inner_network_header(skb, 0);
+ skb_put_data(skb, buffer, len);
+
+ if (endpoint.addr.sa_family == AF_INET)
+ ret = send4(wg, skb, &endpoint, 0, NULL);
+ else if (endpoint.addr.sa_family == AF_INET6)
+ ret = send6(wg, skb, &endpoint, 0, NULL);
+ /* No other possibilities if the endpoint is valid, which it is,
+ * as we checked above.
+ */
+
+ return ret;
+}
+
+int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
+ const struct sk_buff *skb)
+{
+ memset(endpoint, 0, sizeof(*endpoint));
+ if (skb->protocol == htons(ETH_P_IP)) {
+ endpoint->addr4.sin_family = AF_INET;
+ endpoint->addr4.sin_port = udp_hdr(skb)->source;
+ endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
+ endpoint->src4.s_addr = ip_hdr(skb)->daddr;
+ endpoint->src_if4 = skb->skb_iif;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ endpoint->addr6.sin6_family = AF_INET6;
+ endpoint->addr6.sin6_port = udp_hdr(skb)->source;
+ endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
+ endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id(
+ &ipv6_hdr(skb)->saddr, skb->skb_iif);
+ endpoint->src6 = ipv6_hdr(skb)->daddr;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b)
+{
+ return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET &&
+ a->addr4.sin_port == b->addr4.sin_port &&
+ a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr &&
+ a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) ||
+ (a->addr.sa_family == AF_INET6 &&
+ b->addr.sa_family == AF_INET6 &&
+ a->addr6.sin6_port == b->addr6.sin6_port &&
+ ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) &&
+ a->addr6.sin6_scope_id == b->addr6.sin6_scope_id &&
+ ipv6_addr_equal(&a->src6, &b->src6)) ||
+ unlikely(!a->addr.sa_family && !b->addr.sa_family);
+}
+
+void wg_socket_set_peer_endpoint(struct wg_peer *peer,
+ const struct endpoint *endpoint)
+{
+ /* First we check unlocked, in order to optimize, since it's pretty rare
+ * that an endpoint will change. If we happen to be mid-write, and two
+ * CPUs wind up writing the same thing or something slightly different,
+ * it doesn't really matter much either.
+ */
+ if (endpoint_eq(endpoint, &peer->endpoint))
+ return;
+ write_lock_bh(&peer->endpoint_lock);
+ if (endpoint->addr.sa_family == AF_INET) {
+ peer->endpoint.addr4 = endpoint->addr4;
+ peer->endpoint.src4 = endpoint->src4;
+ peer->endpoint.src_if4 = endpoint->src_if4;
+ } else if (endpoint->addr.sa_family == AF_INET6) {
+ peer->endpoint.addr6 = endpoint->addr6;
+ peer->endpoint.src6 = endpoint->src6;
+ } else {
+ goto out;
+ }
+ dst_cache_reset(&peer->endpoint_cache);
+out:
+ write_unlock_bh(&peer->endpoint_lock);
+}
+
+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
+ const struct sk_buff *skb)
+{
+ struct endpoint endpoint;
+
+ if (!wg_socket_endpoint_from_skb(&endpoint, skb))
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+}
+
+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
+{
+ write_lock_bh(&peer->endpoint_lock);
+ memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
+ dst_cache_reset(&peer->endpoint_cache);
+ write_unlock_bh(&peer->endpoint_lock);
+}
+
+static int wg_receive(struct sock *sk, struct sk_buff *skb)
+{
+ struct wg_device *wg;
+
+ if (unlikely(!sk))
+ goto err;
+ wg = sk->sk_user_data;
+ if (unlikely(!wg))
+ goto err;
+ skb_mark_not_on_list(skb);
+ wg_packet_receive(wg, skb);
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void sock_free(struct sock *sock)
+{
+ if (unlikely(!sock))
+ return;
+ sk_clear_memalloc(sock);
+ udp_tunnel_sock_release(sock->sk_socket);
+}
+
+static void set_sock_opts(struct socket *sock)
+{
+ sock->sk->sk_allocation = GFP_ATOMIC;
+ sock->sk->sk_sndbuf = INT_MAX;
+ sk_set_memalloc(sock->sk);
+}
+
+int wg_socket_init(struct wg_device *wg, u16 port)
+{
+ int ret;
+ struct udp_tunnel_sock_cfg cfg = {
+ .sk_user_data = wg,
+ .encap_type = 1,
+ .encap_rcv = wg_receive
+ };
+ struct socket *new4 = NULL, *new6 = NULL;
+ struct udp_port_cfg port4 = {
+ .family = AF_INET,
+ .local_ip.s_addr = htonl(INADDR_ANY),
+ .local_udp_port = htons(port),
+ .use_udp_checksums = true
+ };
+#if IS_ENABLED(CONFIG_IPV6)
+ int retries = 0;
+ struct udp_port_cfg port6 = {
+ .family = AF_INET6,
+ .local_ip6 = IN6ADDR_ANY_INIT,
+ .use_udp6_tx_checksums = true,
+ .use_udp6_rx_checksums = true,
+ .ipv6_v6only = true
+ };
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+retry:
+#endif
+
+ ret = udp_sock_create(wg->creating_net, &port4, &new4);
+ if (ret < 0) {
+ pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
+ return ret;
+ }
+ set_sock_opts(new4);
+ setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6_mod_enabled()) {
+ port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
+ ret = udp_sock_create(wg->creating_net, &port6, &new6);
+ if (ret < 0) {
+ udp_tunnel_sock_release(new4);
+ if (ret == -EADDRINUSE && !port && retries++ < 100)
+ goto retry;
+ pr_err("%s: Could not create IPv6 socket\n",
+ wg->dev->name);
+ return ret;
+ }
+ set_sock_opts(new6);
+ setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
+ }
+#endif
+
+ wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
+ return 0;
+}
+
+void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
+ struct sock *new6)
+{
+ struct sock *old4, *old6;
+
+ mutex_lock(&wg->socket_update_lock);
+ old4 = rcu_dereference_protected(wg->sock4,
+ lockdep_is_held(&wg->socket_update_lock));
+ old6 = rcu_dereference_protected(wg->sock6,
+ lockdep_is_held(&wg->socket_update_lock));
+ rcu_assign_pointer(wg->sock4, new4);
+ rcu_assign_pointer(wg->sock6, new6);
+ if (new4)
+ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
+ mutex_unlock(&wg->socket_update_lock);
+ synchronize_rcu();
+ synchronize_net();
+ sock_free(old4);
+ sock_free(old6);
+}
diff --git a/drivers/net/wireguard/socket.h b/drivers/net/wireguard/socket.h
new file mode 100644
index 000000000000..bab5848efbcd
--- /dev/null
+++ b/drivers/net/wireguard/socket.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_SOCKET_H
+#define _WG_SOCKET_H
+
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+
+int wg_socket_init(struct wg_device *wg, u16 port);
+void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
+ struct sock *new6);
+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *data,
+ size_t len, u8 ds);
+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb,
+ u8 ds);
+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
+ struct sk_buff *in_skb,
+ void *out_buffer, size_t len);
+
+int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
+ const struct sk_buff *skb);
+void wg_socket_set_peer_endpoint(struct wg_peer *peer,
+ const struct endpoint *endpoint);
+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
+ const struct sk_buff *skb);
+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer);
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \
+ struct endpoint __endpoint; \
+ wg_socket_endpoint_from_skb(&__endpoint, skb); \
+ net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \
+ ##__VA_ARGS__); \
+ } while (0)
+#else
+#define net_dbg_skb_ratelimited(fmt, skb, ...)
+#endif
+
+#endif /* _WG_SOCKET_H */
diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c
new file mode 100644
index 000000000000..d54d32ac9bc4
--- /dev/null
+++ b/drivers/net/wireguard/timers.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "timers.h"
+#include "device.h"
+#include "peer.h"
+#include "queueing.h"
+#include "socket.h"
+
+/*
+ * - Timer for retransmitting the handshake if we don't hear back after
+ * `REKEY_TIMEOUT + jitter` ms.
+ *
+ * - Timer for sending empty packet if we have received a packet but after have
+ * not sent one for `KEEPALIVE_TIMEOUT` ms.
+ *
+ * - Timer for initiating new handshake if we have sent a packet but after have
+ * not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) +
+ * jitter` ms.
+ *
+ * - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms
+ * if no new keys have been received.
+ *
+ * - Timer for, if enabled, sending an empty authenticated packet every user-
+ * specified seconds.
+ */
+
+static inline void mod_peer_timer(struct wg_peer *peer,
+ struct timer_list *timer,
+ unsigned long expires)
+{
+ rcu_read_lock_bh();
+ if (likely(netif_running(peer->device->dev) &&
+ !READ_ONCE(peer->is_dead)))
+ mod_timer(timer, expires);
+ rcu_read_unlock_bh();
+}
+
+static void wg_expired_retransmit_handshake(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer,
+ timer_retransmit_handshake);
+
+ if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2);
+
+ del_timer(&peer->timer_send_keepalive);
+ /* We drop all packets without a keypair and don't try again,
+ * if we try unsuccessfully for too long to make a handshake.
+ */
+ wg_packet_purge_staged_packets(peer);
+
+ /* We set a timer for destroying any residue that might be left
+ * of a partial exchange.
+ */
+ if (!timer_pending(&peer->timer_zero_key_material))
+ mod_peer_timer(peer, &peer->timer_zero_key_material,
+ jiffies + REJECT_AFTER_TIME * 3 * HZ);
+ } else {
+ ++peer->timer_handshake_attempts;
+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, REKEY_TIMEOUT,
+ peer->timer_handshake_attempts + 1);
+
+ /* We clear the endpoint address src address, in case this is
+ * the cause of trouble.
+ */
+ wg_socket_clear_peer_endpoint_src(peer);
+
+ wg_packet_send_queued_handshake_initiation(peer, true);
+ }
+}
+
+static void wg_expired_send_keepalive(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive);
+
+ wg_packet_send_keepalive(peer);
+ if (peer->timer_need_another_keepalive) {
+ peer->timer_need_another_keepalive = false;
+ mod_peer_timer(peer, &peer->timer_send_keepalive,
+ jiffies + KEEPALIVE_TIMEOUT * HZ);
+ }
+}
+
+static void wg_expired_new_handshake(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake);
+
+ pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT);
+ /* We clear the endpoint address src address, in case this is the cause
+ * of trouble.
+ */
+ wg_socket_clear_peer_endpoint_src(peer);
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
+
+static void wg_expired_zero_key_material(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material);
+
+ rcu_read_lock_bh();
+ if (!READ_ONCE(peer->is_dead)) {
+ wg_peer_get(peer);
+ if (!queue_work(peer->device->handshake_send_wq,
+ &peer->clear_peer_work))
+ /* If the work was already on the queue, we want to drop
+ * the extra reference.
+ */
+ wg_peer_put(peer);
+ }
+ rcu_read_unlock_bh();
+}
+
+static void wg_queued_expired_zero_key_material(struct work_struct *work)
+{
+ struct wg_peer *peer = container_of(work, struct wg_peer,
+ clear_peer_work);
+
+ pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, REJECT_AFTER_TIME * 3);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ wg_peer_put(peer);
+}
+
+static void wg_expired_send_persistent_keepalive(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer,
+ timer_persistent_keepalive);
+
+ if (likely(peer->persistent_keepalive_interval))
+ wg_packet_send_keepalive(peer);
+}
+
+/* Should be called after an authenticated data packet is sent. */
+void wg_timers_data_sent(struct wg_peer *peer)
+{
+ if (!timer_pending(&peer->timer_new_handshake))
+ mod_peer_timer(peer, &peer->timer_new_handshake,
+ jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ +
+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
+}
+
+/* Should be called after an authenticated data packet is received. */
+void wg_timers_data_received(struct wg_peer *peer)
+{
+ if (likely(netif_running(peer->device->dev))) {
+ if (!timer_pending(&peer->timer_send_keepalive))
+ mod_peer_timer(peer, &peer->timer_send_keepalive,
+ jiffies + KEEPALIVE_TIMEOUT * HZ);
+ else
+ peer->timer_need_another_keepalive = true;
+ }
+}
+
+/* Should be called after any type of authenticated packet is sent, whether
+ * keepalive, data, or handshake.
+ */
+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_send_keepalive);
+}
+
+/* Should be called after any type of authenticated packet is received, whether
+ * keepalive, data, or handshake.
+ */
+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_new_handshake);
+}
+
+/* Should be called after a handshake initiation message is sent. */
+void wg_timers_handshake_initiated(struct wg_peer *peer)
+{
+ mod_peer_timer(peer, &peer->timer_retransmit_handshake,
+ jiffies + REKEY_TIMEOUT * HZ +
+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
+}
+
+/* Should be called after a handshake response message is received and processed
+ * or when getting key confirmation via the first data message.
+ */
+void wg_timers_handshake_complete(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_retransmit_handshake);
+ peer->timer_handshake_attempts = 0;
+ peer->sent_lastminute_handshake = false;
+ ktime_get_real_ts64(&peer->walltime_last_handshake);
+}
+
+/* Should be called after an ephemeral key is created, which is before sending a
+ * handshake response or after receiving a handshake response.
+ */
+void wg_timers_session_derived(struct wg_peer *peer)
+{
+ mod_peer_timer(peer, &peer->timer_zero_key_material,
+ jiffies + REJECT_AFTER_TIME * 3 * HZ);
+}
+
+/* Should be called before a packet with authentication, whether
+ * keepalive, data, or handshakem is sent, or after one is received.
+ */
+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer)
+{
+ if (peer->persistent_keepalive_interval)
+ mod_peer_timer(peer, &peer->timer_persistent_keepalive,
+ jiffies + peer->persistent_keepalive_interval * HZ);
+}
+
+void wg_timers_init(struct wg_peer *peer)
+{
+ timer_setup(&peer->timer_retransmit_handshake,
+ wg_expired_retransmit_handshake, 0);
+ timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0);
+ timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0);
+ timer_setup(&peer->timer_zero_key_material,
+ wg_expired_zero_key_material, 0);
+ timer_setup(&peer->timer_persistent_keepalive,
+ wg_expired_send_persistent_keepalive, 0);
+ INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material);
+ peer->timer_handshake_attempts = 0;
+ peer->sent_lastminute_handshake = false;
+ peer->timer_need_another_keepalive = false;
+}
+
+void wg_timers_stop(struct wg_peer *peer)
+{
+ del_timer_sync(&peer->timer_retransmit_handshake);
+ del_timer_sync(&peer->timer_send_keepalive);
+ del_timer_sync(&peer->timer_new_handshake);
+ del_timer_sync(&peer->timer_zero_key_material);
+ del_timer_sync(&peer->timer_persistent_keepalive);
+ flush_work(&peer->clear_peer_work);
+}
diff --git a/drivers/net/wireguard/timers.h b/drivers/net/wireguard/timers.h
new file mode 100644
index 000000000000..f0653dcb1326
--- /dev/null
+++ b/drivers/net/wireguard/timers.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_TIMERS_H
+#define _WG_TIMERS_H
+
+#include <linux/ktime.h>
+
+struct wg_peer;
+
+void wg_timers_init(struct wg_peer *peer);
+void wg_timers_stop(struct wg_peer *peer);
+void wg_timers_data_sent(struct wg_peer *peer);
+void wg_timers_data_received(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer);
+void wg_timers_handshake_initiated(struct wg_peer *peer);
+void wg_timers_handshake_complete(struct wg_peer *peer);
+void wg_timers_session_derived(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer);
+
+static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds,
+ u64 expiration_seconds)
+{
+ return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC)
+ <= (s64)ktime_get_coarse_boottime_ns();
+}
+
+#endif /* _WG_TIMERS_H */
diff --git a/drivers/net/wireguard/version.h b/drivers/net/wireguard/version.h
new file mode 100644
index 000000000000..a1a269a11634
--- /dev/null
+++ b/drivers/net/wireguard/version.h
@@ -0,0 +1 @@
+#define WIREGUARD_VERSION "1.0.0"
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 7b90b8546162..b10972b6cba4 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -62,5 +62,6 @@ source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
source "drivers/net/wireless/ath/wcn36xx/Kconfig"
+source "drivers/net/wireless/ath/ath11k/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index ee2b2431e5a3..8e4ae9de5ae4 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_WCN36XX) += wcn36xx/
+obj-$(CONFIG_ATH11K) += ath11k/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index da2d179430ca..49cc4b7ed516 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -74,7 +74,7 @@ static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
if (cmd->odata) {
if (cmd->olen < olen) {
- ar5523_err(ar, "olen to small %d < %d\n",
+ ar5523_err(ar, "olen too small %d < %d\n",
cmd->olen, olen);
cmd->olen = 0;
cmd->res = -EOVERFLOW;
@@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
+ SMCWUSBT-G2 */
AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index f80854180e21..ed87bc00f2aa 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -458,7 +458,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ar_ahb->mem_len = resource_size(res);
- ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
+ ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE,
ATH10K_GCC_REG_SIZE);
if (!ar_ahb->gcc_mem) {
ath10k_err(ar, "gcc mem ioremap error\n");
@@ -466,7 +466,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
goto err_mem_unmap;
}
- ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
+ ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE,
ATH10K_TCSR_REG_SIZE);
if (!ar_ahb->tcsr_mem) {
ath10k_err(ar, "tcsr mem ioremap error\n");
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 95dc4be82e5c..ea908107581d 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -346,6 +346,52 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
return 0;
}
+static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
+{
+ struct bmi_cmd *cmd;
+ u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
+ u32 txlen;
+ int ret;
+ size_t buf_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
+ buffer, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
+ cmd = kzalloc(buf_len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ while (length) {
+ txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
+
+ WARN_ON_ONCE(txlen & 3);
+
+ cmd->id = __cpu_to_le32(BMI_LZ_DATA);
+ cmd->lz_data.len = __cpu_to_le32(txlen);
+ memcpy(cmd->lz_data.payload, buffer, txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device\n");
+ return ret;
+ }
+
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ kfree(cmd);
+
+ return 0;
+}
+
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
@@ -430,7 +476,11 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
if (trailer_len > 0)
memcpy(trailer, buffer + head_len, trailer_len);
- ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+ if (ar->hw_params.bmi_large_size_download)
+ ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
+ else
+ ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index ef3bdba43bed..f6fadcbdd86e 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -45,6 +45,15 @@
sizeof(u32) + \
sizeof(u32))
+/* Maximum data size used for large BMI transfers */
+#define BMI_MAX_LARGE_DATA_SIZE 2048
+
+/* len = cmd + addr + length */
+#define BMI_MAX_LARGE_CMDBUF_SIZE (BMI_MAX_LARGE_DATA_SIZE + \
+ sizeof(u32) + \
+ sizeof(u32) + \
+ sizeof(u32))
+
/* BMI Commands */
enum bmi_cmd_id {
@@ -258,6 +267,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 4f76ba5d78a9..5ec16ce19b69 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -189,6 +189,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
+ .bmi_large_size_download = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -714,18 +715,6 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
- /* Explicitly set fwlog prints to zero as target may turn it on
- * based on scratch registers.
- */
- ret = ath10k_bmi_read32(ar, hi_option_flag, &param);
- if (ret)
- return ret;
-
- param |= HI_OPTION_DISABLE_DBGLOG;
- ret = ath10k_bmi_write32(ar, hi_option_flag, param);
- if (ret)
- return ret;
-
return 0;
}
@@ -3231,6 +3220,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_waitqueue_head(&ar->htt.empty_tx_wq);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
+ skb_queue_head_init(&ar->htt.rx_indication_head);
+
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index af68eb5d0776..5101bf2b5b15 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -124,6 +124,7 @@ struct ath10k_skb_cb {
struct ath10k_skb_rxcb {
dma_addr_t paddr;
struct hlist_node hlist;
+ u8 eid;
};
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -1180,6 +1181,7 @@ struct ath10k {
struct {
/* protected by data_lock */
+ u32 rx_crc_err_drop;
u32 fw_crash_counter;
u32 fw_warm_reset_counter;
u32 fw_cold_reset_counter;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 04c50a26a4f4..e000677ac516 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1094,6 +1094,7 @@ static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"d_rts_good",
"d_tx_power", /* in .5 dbM I think */
"d_rx_crc_err", /* fcs_bad */
+ "d_rx_crc_err_drop", /* frame with FCS error, dropped late in kernel */
"d_no_beacon",
"d_tx_mpdus_queued",
"d_tx_msdu_queued",
@@ -1193,6 +1194,7 @@ void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
data[i++] = pdev_stats->rts_good;
data[i++] = pdev_stats->chan_tx_power;
data[i++] = pdev_stats->fcs_bad;
+ data[i++] = ar->stats.rx_crc_err_drop;
data[i++] = pdev_stats->no_beacons;
data[i++] = pdev_stats->mpdu_enqued;
data[i++] = pdev_stats->msdu_enqued;
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 1d4d1a1992fe..2248d6c022f4 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -270,7 +270,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
- if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
+ if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
@@ -800,8 +800,8 @@ setup:
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status) {
- ath10k_warn(ar, "unsupported HTC service id: %d\n",
- ep->service_id);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
+ ep->service_id);
return status;
}
@@ -878,8 +878,8 @@ static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
&ul_pipe_id,
&dl_pipe_id);
if (status) {
- ath10k_warn(ar, "unsupported HTC service id: %d\n",
- ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
+ ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
return false;
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index f55d3caec61f..065c82d9d689 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
+#include <linux/bitfield.h>
struct ath10k;
@@ -39,7 +40,7 @@ struct ath10k;
* 4-byte aligned.
*/
-#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 32
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@@ -49,9 +50,27 @@ enum ath10k_htc_tx_flags {
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
- ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
+#define ATH10K_HTC_FLAG_BUNDLE_MASK GENMASK(7, 4)
+
+/* bits 2-3 are for extra bundle count bits 4-5 */
+#define ATH10K_HTC_BUNDLE_EXTRA_MASK GENMASK(3, 2)
+#define ATH10K_HTC_BUNDLE_EXTRA_SHIFT 4
+
+static inline unsigned int ath10k_htc_get_bundle_count(u8 max_msgs, u8 flags)
+{
+ unsigned int count, extra_count = 0;
+
+ count = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, flags);
+
+ if (max_msgs > 16)
+ extra_count = FIELD_GET(ATH10K_HTC_BUNDLE_EXTRA_MASK, flags) <<
+ ATH10K_HTC_BUNDLE_EXTRA_SHIFT;
+
+ return count + extra_count;
+}
+
struct ath10k_htc_hdr {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 30c080094af1..4a12564fc30e 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1869,6 +1869,8 @@ struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
+ struct sk_buff_head rx_indication_head;
+
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
@@ -2283,6 +2285,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index d95b63f133ab..38a5814cf345 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1285,6 +1285,13 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
status = IEEE80211_SKB_RXCB(skb);
+ if (!(ar->filter_flags & FIF_FCSFAIL) &&
+ status->flag & RX_FLAG_FAILED_FCS_CRC) {
+ ar->stats.rx_crc_err_drop++;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
@@ -2133,7 +2140,7 @@ static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
if (last_pn_valid)
pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
else
- peer->tids_last_pn_valid[tid] = 1;
+ peer->tids_last_pn_valid[tid] = true;
if (!pn_invalid)
last_pn->pn48 = new_pn.pn48;
@@ -2196,8 +2203,8 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
HTT_RX_IND_MPDU_STATUS_OK &&
mpdu_ranges->mpdu_range_status !=
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
- ath10k_warn(ar, "MPDU range status: %d\n",
- mpdu_ranges->mpdu_range_status);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
+ mpdu_ranges->mpdu_range_status);
goto err;
}
@@ -2235,8 +2242,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
hdr = (struct ieee80211_hdr *)skb->data;
qos = ieee80211_is_data_qos(hdr->frame_control);
+
rx_status = IEEE80211_SKB_RXCB(skb);
- rx_status->chains |= BIT(0);
+ memset(rx_status, 0, sizeof(*rx_status));
+
if (rx->ppdu.combined_rssi == 0) {
/* SDIO firmware does not provide signal */
rx_status->signal = 0;
@@ -2350,7 +2359,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
}
- ieee80211_rx_ni(ar->hw, skb);
+ if (ar->napi.dev)
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
+ else
+ ieee80211_rx_ni(ar->hw, skb);
/* We have delivered the skb to the upper layers (mac80211) so we
* must not free it.
@@ -3751,14 +3763,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
- return ath10k_htt_rx_proc_rx_ind_hl(htt,
- &resp->rx_ind_hl,
- skb,
- HTT_RX_PN_CHECK,
- HTT_RX_NON_TKIP_MIC);
- else
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
+ } else {
+ skb_queue_tail(&htt->rx_indication_head, skb);
+ return false;
+ }
break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
@@ -3948,6 +3958,37 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
return quota;
}
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
+{
+ struct htt_resp *resp;
+ struct ath10k_htt *htt = &ar->htt;
+ struct sk_buff *skb;
+ bool release;
+ int quota;
+
+ for (quota = 0; quota < budget; quota++) {
+ skb = skb_dequeue(&htt->rx_indication_head);
+ if (!skb)
+ break;
+
+ resp = (struct htt_resp *)skb->data;
+
+ release = ath10k_htt_rx_proc_rx_ind_hl(htt,
+ &resp->rx_ind_hl,
+ skb,
+ HTT_RX_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
+
+ if (release)
+ dev_kfree_skb_any(skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
+ skb_queue_len(&htt->rx_indication_head));
+ }
+ return quota;
+}
+EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
+
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
struct ath10k_htt *htt = &ar->htt;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 35a362329a4f..775fd62fb92d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -613,6 +613,9 @@ struct ath10k_hw_params {
/* target supporting fw download via diag ce */
bool fw_diag_ce_download;
+ /* target supporting fw download via large size BMI */
+ bool bmi_large_size_download;
+
/* need to set uart pin if disable uart print, workaround for a
* firmware bug
*/
@@ -813,7 +816,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
#define TARGET_10_4_NUM_WDS_ENTRIES 32
-#define TARGET_10_4_DMA_BURST_SIZE 0
+#define TARGET_10_4_DMA_BURST_SIZE 1
#define TARGET_10_4_MAC_AGGR_DELIM 0
#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10_4_VOW_CONFIG 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 978f0037ed52..7fee35ff966b 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1098,7 +1098,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
+ ath10k_warn(ar, "failed to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
@@ -6329,6 +6329,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta && sta->tdls)
ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
ar->wmi.peer_param->authorize, 1);
+ else if (sta && cmd == SET_KEY && (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ ath10k_wmi_peer_set_param(ar, arvif->vdev_id, peer_addr,
+ ar->wmi.peer_param->authorize, 1);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -8908,6 +8911,7 @@ int ath10k_mac_register(struct ath10k *ar)
WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
ar->hw->wiphy->max_sched_scan_plan_iterations =
WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
}
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bb44f5a0941b..ded7a220a4aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1578,7 +1578,7 @@ static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
return 0;
}
-/* if an error happened returns < 0, otherwise the length */
+/* Always returns the length */
static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
const struct ath10k_mem_region *region,
u8 *buf)
@@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 i;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_ON) {
+ ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
+ ret = -EIO;
+ goto done;
+ }
for (i = 0; i < region->len; i += 4)
*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
- return region->len;
+ ret = region->len;
+done:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
}
/* if an error happened returns < 0, otherwise the length */
@@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
break;
case ATH10K_MEM_REGION_TYPE_IOREG:
- count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
+ ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
+ if (ret < 0)
+ break;
+
+ count = ret;
break;
default:
ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index a0ba07b85362..85dce43c5439 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -84,6 +84,9 @@ static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
int ret;
int i;
+ if (qmi->msa_fixed_perm)
+ return 0;
+
for (i = 0; i < qmi->nr_mem_region; i++) {
ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
if (ret)
@@ -102,6 +105,9 @@ static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
{
int i;
+ if (qmi->msa_fixed_perm)
+ return;
+
for (i = 0; i < qmi->nr_mem_region; i++)
ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
}
@@ -279,7 +285,15 @@ static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
if (ret < 0)
goto out;
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* end = 1 triggers a CRC check on the BDF. If this fails, we
+ * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
+ * willing to use the BDF. For some platforms, all the valid
+ * released BDFs fail this CRC check, so attempt to detect this
+ * scenario and treat it as non-fatal.
+ */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
+ !(req->end == 1 &&
+ resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
ath10k_err(ar, "failed to download board data file: %d\n",
resp.resp.error);
ret = -EINVAL;
@@ -635,7 +649,9 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
if (ret < 0)
goto out;
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* older FW didn't support this request, which is not fatal */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
+ resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
ret = -EINVAL;
goto out;
@@ -1025,6 +1041,9 @@ static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
qmi->msa_mem_size = msa_size;
}
+ if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
+ qmi->msa_fixed_perm = true;
+
ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
&qmi->msa_pa,
qmi->msa_va);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index 40aafb875ed0..dc257375f161 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -104,6 +104,7 @@ struct ath10k_qmi {
bool fw_ready;
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
+ bool msa_fixed_perm;
};
int ath10k_qmi_wlan_enable(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 120200a93bcc..e5316b911e1d 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -24,6 +24,8 @@
#include "trace.h"
#include "sdio.h"
+#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
+
/* inlined helper functions */
static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
@@ -417,6 +419,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_sdio_rx_data *pkt;
struct ath10k_htc_ep *ep;
+ struct ath10k_skb_rxcb *cb;
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
@@ -462,10 +465,16 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
if (ret)
goto out;
- if (!pkt->trailer_only)
- ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
- else
+ if (!pkt->trailer_only) {
+ cb = ATH10K_SKB_RXCB(pkt->skb);
+ cb->eid = id;
+
+ skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
+ queue_work(ar->workqueue_aux,
+ &ar_sdio->async_work_rx);
+ } else {
kfree_skb(pkt->skb);
+ }
/* The RX complete handler now owns the skb...*/
pkt->skb = NULL;
@@ -484,21 +493,22 @@ out:
return ret;
}
-static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
- struct ath10k_sdio_rx_data *rx_pkts,
- struct ath10k_htc_hdr *htc_hdr,
- size_t full_len, size_t act_len,
- size_t *bndl_cnt)
+static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *rx_pkts,
+ struct ath10k_htc_hdr *htc_hdr,
+ size_t full_len, size_t act_len,
+ size_t *bndl_cnt)
{
int ret, i;
+ u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
- *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
+ *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
- if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
+ if (*bndl_cnt > max_msgs) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
- HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
+ max_msgs);
return -ENOMEM;
}
@@ -529,12 +539,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
size_t full_len, act_len;
bool last_in_bundle;
int ret, i;
+ int pkt_cnt = 0;
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
- ath10k_warn(ar,
- "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
- n_lookaheads,
- ATH10K_SDIO_MAX_RX_MSGS);
+ ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
+ n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
ret = -ENOMEM;
goto err;
}
@@ -543,10 +552,8 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
last_in_bundle = false;
- if (le16_to_cpu(htc_hdr->len) >
- ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
- ath10k_warn(ar,
- "payload length %d exceeds max htc length: %zu\n",
+ if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
+ ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
le16_to_cpu(htc_hdr->len),
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
ret = -ENOMEM;
@@ -557,36 +564,37 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
- ath10k_warn(ar,
- "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
+ ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
htc_hdr->eid, htc_hdr->flags,
le16_to_cpu(htc_hdr->len));
ret = -EINVAL;
goto err;
}
- if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
+ if (ath10k_htc_get_bundle_count(
+ ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
/* HTC header indicates that every packet to follow
* has the same padded length so that it can be
* optimally fetched as a full bundle.
*/
size_t bndl_cnt;
- ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
- &ar_sdio->rx_pkts[i],
- htc_hdr,
- full_len,
- act_len,
- &bndl_cnt);
+ ret = ath10k_sdio_mbox_alloc_bundle(ar,
+ &ar_sdio->rx_pkts[pkt_cnt],
+ htc_hdr,
+ full_len,
+ act_len,
+ &bndl_cnt);
if (ret) {
- ath10k_warn(ar, "alloc_bundle error %d\n", ret);
+ ath10k_warn(ar, "failed to allocate a bundle: %d\n",
+ ret);
goto err;
}
- n_lookaheads += bndl_cnt;
- i += bndl_cnt;
- /*Next buffer will be the last in the bundle */
+ pkt_cnt += bndl_cnt;
+
+ /* next buffer will be the last in the bundle */
last_in_bundle = true;
}
@@ -597,7 +605,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
- ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
act_len,
full_len,
last_in_bundle,
@@ -606,9 +614,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
goto err;
}
+
+ pkt_cnt++;
}
- ar_sdio->n_rx_pkts = i;
+ ar_sdio->n_rx_pkts = pkt_cnt;
return 0;
@@ -622,10 +632,10 @@ err:
return ret;
}
-static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
- struct ath10k_sdio_rx_data *pkt)
+static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
struct sk_buff *skb = pkt->skb;
struct ath10k_htc_hdr *htc_hdr;
int ret;
@@ -633,48 +643,75 @@ static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
skb->data, pkt->alloc_len);
if (ret)
- goto out;
+ goto err;
- /* Update actual length. The original length may be incorrect,
- * as the FW will bundle multiple packets as long as their sizes
- * fit within the same aligned length (pkt->alloc_len).
- */
htc_hdr = (struct ath10k_htc_hdr *)skb->data;
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+
if (pkt->act_len > pkt->alloc_len) {
- ath10k_warn(ar, "rx packet too large (%zu > %zu)\n",
- pkt->act_len, pkt->alloc_len);
- ret = -EMSGSIZE;
- goto out;
+ ret = -EINVAL;
+ goto err;
}
skb_put(skb, pkt->act_len);
+ return 0;
-out:
- pkt->status = ret;
+err:
+ ar_sdio->n_rx_pkts = 0;
+ ath10k_sdio_mbox_free_rx_pkt(pkt);
return ret;
}
-static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
+static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_rx_data *pkt;
+ struct ath10k_htc_hdr *htc_hdr;
int ret, i;
+ u32 pkt_offset, virt_pkt_len;
+
+ virt_pkt_len = 0;
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
+ virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
+
+ if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
+ ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
+ ret = -E2BIG;
+ goto err;
+ }
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
+ ar_sdio->vsg_buffer, virt_pkt_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read bundle packets: %d", ret);
+ goto err;
+ }
+
+ pkt_offset = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
- ret = ath10k_sdio_mbox_rx_packet(ar,
- &ar_sdio->rx_pkts[i]);
- if (ret)
+ pkt = &ar_sdio->rx_pkts[i];
+ htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
+ pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+
+ if (pkt->act_len > pkt->alloc_len ) {
+ ret = -EINVAL;
goto err;
+ }
+
+ skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
+ pkt_offset += pkt->alloc_len;
}
return 0;
err:
/* Free all packets that was not successfully fetched. */
- for (; i < ar_sdio->n_rx_pkts; i++)
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+ ar_sdio->n_rx_pkts = 0;
+
return ret;
}
@@ -717,7 +754,10 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
*/
*done = false;
- ret = ath10k_sdio_mbox_rx_fetch(ar);
+ if (ar_sdio->n_rx_pkts > 1)
+ ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
+ else
+ ret = ath10k_sdio_mbox_rx_fetch(ar);
/* Process fetched packets. This will potentially update
* n_lookaheads depending on if the packets contain lookahead
@@ -1293,6 +1333,31 @@ static void __ath10k_sdio_write_async(struct ath10k *ar,
ath10k_sdio_free_bus_req(ar, req);
}
+/* To improve throughput use workqueue to deliver packets to HTC layer,
+ * this way SDIO bus is utilised much better.
+ */
+static void ath10k_rx_indication_async_work(struct work_struct *work)
+{
+ struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
+ async_work_rx);
+ struct ath10k *ar = ar_sdio->ar;
+ struct ath10k_htc_ep *ep;
+ struct ath10k_skb_rxcb *cb;
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&ar_sdio->rx_head);
+ if (!skb)
+ break;
+ cb = ATH10K_SKB_RXCB(skb);
+ ep = &ar->htc.endpoint[cb->eid];
+ ep->ep_ops.ep_rx_complete(ar, skb);
+ }
+
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ napi_schedule(&ar->napi);
+}
+
static void ath10k_sdio_write_async_work(struct work_struct *work)
{
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
@@ -1681,6 +1746,8 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
int ret;
+ napi_enable(&ar->napi);
+
/* Sleep 20 ms before HIF interrupts are disabled.
* This will give target plenty of time to process the BMI done
* request before interrupts are disabled.
@@ -1805,13 +1872,16 @@ static void ath10k_sdio_hif_stop(struct ath10k *ar)
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
}
#ifdef CONFIG_PM
static int ath10k_sdio_hif_suspend(struct ath10k *ar)
{
- return -EOPNOTSUPP;
+ return 0;
}
static int ath10k_sdio_hif_resume(struct ath10k *ar)
@@ -1961,7 +2031,26 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
*/
static int ath10k_sdio_pm_suspend(struct device *device)
{
- return 0;
+ struct sdio_func *func = dev_to_sdio_func(device);
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+ mmc_pm_flag_t pm_flag, pm_caps;
+ int ret;
+
+ if (!device_may_wakeup(ar->dev))
+ return 0;
+
+ pm_flag = MMC_PM_KEEP_POWER;
+
+ ret = sdio_set_host_pm_flags(func, pm_flag);
+ if (ret) {
+ pm_caps = sdio_get_host_pm_caps(func);
+ ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
+ pm_flag, pm_caps, ret);
+ return ret;
+ }
+
+ return ret;
}
static int ath10k_sdio_pm_resume(struct device *device)
@@ -1980,6 +2069,20 @@ static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
#endif /* CONFIG_PM_SLEEP */
+static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done;
+
+ done = ath10k_htt_rx_hl_indication(ar, budget);
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
+
+ if (done < budget)
+ napi_complete_done(ctx, done);
+
+ return done;
+}
+
static int ath10k_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -2005,6 +2108,9 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
+ ATH10K_NAPI_BUDGET);
+
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
func->num, func->vendor, func->device,
@@ -2020,6 +2126,12 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_core_destroy;
}
+ ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
+ if (!ar_sdio->vsg_buffer) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
ar_sdio->irq_data.irq_en_reg =
devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
GFP_KERNEL);
@@ -2028,7 +2140,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_core_destroy;
}
- ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
+ ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
if (!ar_sdio->bmi_buf) {
ret = -ENOMEM;
goto err_core_destroy;
@@ -2057,6 +2169,9 @@ static int ath10k_sdio_probe(struct sdio_func *func,
for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
+ skb_queue_head_init(&ar_sdio->rx_head);
+ INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
+
dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
switch (dev_id_base) {
case QCA_MANUFACTURER_ID_AR6005_BASE:
@@ -2080,6 +2195,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
bus_params.chip_id = 0;
bus_params.hl_msdu_ids = true;
+ ar->hw->max_mtu = ETH_DATA_LEN;
+
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2106,6 +2223,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
func->num, func->vendor, func->device);
ath10k_core_unregister(ar);
+
+ netif_napi_del(&ar->napi);
+
ath10k_core_destroy(ar);
flush_workqueue(ar_sdio->workqueue);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index b8c7ac0330bd..33195f49acab 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -89,10 +89,10 @@
* to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
- * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
- (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
+ (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
@@ -126,7 +126,6 @@ struct ath10k_sdio_rx_data {
bool part_of_bundle;
bool last_in_bundle;
bool trailer_only;
- int status;
};
struct ath10k_sdio_irq_proc_regs {
@@ -138,8 +137,8 @@ struct ath10k_sdio_irq_proc_regs {
u8 rx_lookahead_valid;
u8 host_int_status2;
u8 gmbox_rx_avail;
- __le32 rx_lookahead[2];
- __le32 rx_gmbox_lookahead_alias[2];
+ __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX];
+ __le32 int_status_enable;
};
struct ath10k_sdio_irq_enable_regs {
@@ -187,6 +186,9 @@ struct ath10k_sdio {
struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
/* free list of bus requests */
struct list_head bus_req_freeq;
+
+ struct sk_buff_head rx_head;
+
/* protects access to bus_req_freeq */
spinlock_t lock;
@@ -196,6 +198,13 @@ struct ath10k_sdio {
struct ath10k *ar;
struct ath10k_sdio_irq_data irq_data;
+ /* temporary buffer for sdio read.
+ * It is allocated when probe, and used for receive bundled packets,
+ * the read for bundled packets is not parallel, so it does not need
+ * protected.
+ */
+ u8 *vsg_buffer;
+
/* temporary buffer for BMI requests */
u8 *bmi_buf;
@@ -206,6 +215,8 @@ struct ath10k_sdio {
struct list_head wr_asyncq;
/* protects access to wr_asyncq */
spinlock_t wr_async_lock;
+
+ struct work_struct async_work_rx;
};
static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 16177497bba7..21081b4a27d7 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -46,7 +46,7 @@ static const char * const ath10k_regulators[] = {
};
static const char * const ath10k_clocks[] = {
- "cxo_ref_clk_pin",
+ "cxo_ref_clk_pin", "qdss",
};
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
@@ -582,7 +582,7 @@ static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
- ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
@@ -1201,7 +1201,7 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
irqflags, ce_name[id], ar);
if (ret) {
ath10k_err(ar,
- "failed to register IRQ handler for CE %d: %d",
+ "failed to register IRQ handler for CE %d: %d\n",
id, ret);
goto err_irq;
}
@@ -1466,7 +1466,6 @@ MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
static int ath10k_snoc_probe(struct platform_device *pdev)
{
const struct ath10k_snoc_drv_priv *drv_data;
- const struct of_device_id *of_id;
struct ath10k_snoc *ar_snoc;
struct device *dev;
struct ath10k *ar;
@@ -1474,18 +1473,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
int ret;
u32 i;
- of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
- if (!of_id) {
- dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ dev = &pdev->dev;
+ drv_data = device_get_match_data(dev);
+ if (!drv_data) {
+ dev_err(dev, "failed to find matching device tree id\n");
return -EINVAL;
}
- drv_data = of_id->data;
- dev = &pdev->dev;
-
ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
if (ret) {
- dev_err(dev, "failed to set dma mask: %d", ret);
+ dev_err(dev, "failed to set dma mask: %d\n", ret);
return ret;
}
@@ -1563,13 +1560,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
- goto err_core_destroy;
+ goto err_power_off;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0;
+err_power_off:
+ ath10k_hw_power_off(ar);
+
err_free_irq:
ath10k_snoc_free_irq(ar);
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 1bffe3fbea3f..7a9b9bbcdbfc 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -65,7 +65,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
if (ret) {
ath10k_warn(ar,
- "failed to to put testmode wmi event cmd attribute: %d\n",
+ "failed to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
@@ -74,7 +74,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath10k_warn(ar,
- "failed to to put testmode wmi even cmd_id: %d\n",
+ "failed to put testmode wmi event cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index ab916459d237..842e42ec814f 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -239,7 +239,7 @@ TRACE_EVENT(ath10k_wmi_dbglog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
@@ -269,7 +269,7 @@ TRACE_EVENT(ath10k_htt_pktlog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(u16, buf_len)
__dynamic_array(u8, pktlog, buf_len)
),
@@ -435,7 +435,7 @@ TRACE_EVENT(ath10k_htt_rx_desc,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 69a1ec53df29..4e68debda9bf 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -841,7 +841,7 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
const struct wmi_tlv_mgmt_rx_ev *ev;
const u8 *frame;
u32 msdu_len;
- int ret;
+ int ret, i;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@@ -865,6 +865,9 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
arg->phy_mode = ev->phy_mode;
arg->rate = ev->rate;
+ for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
+ arg->rssi[i] = ev->rssi[i];
+
msdu_len = __le32_to_cpu(arg->buf_len);
if (skb->len < (frame - skb->data) + msdu_len) {
@@ -3707,6 +3710,7 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
struct wmi_tlv *tlv;
struct sk_buff *skb;
__le32 *channel_list;
+ u16 tlv_len;
size_t len;
void *ptr;
u32 i;
@@ -3764,10 +3768,12 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
WMI_NLO_MAX_SSIDS));
+ tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
+ sizeof(struct nlo_configured_parameters);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
- tlv->len = __cpu_to_le16(len);
+ tlv->len = __cpu_to_le16(tlv_len);
ptr += sizeof(*tlv);
nlo_list = ptr;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 9f564e2b7a14..61885d4d662c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2463,10 +2463,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rx_status;
u32 channel;
u32 phy_mode;
- u32 snr;
+ u32 snr, rssi;
u32 rate;
u16 fc;
- int ret;
+ int ret, i;
ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
if (ret) {
@@ -2525,6 +2525,20 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+
+ BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
+
+ for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
+ status->chains &= ~BIT(i);
+ rssi = __le32_to_cpu(arg.rssi[i]);
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
+
+ if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
+ status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
+ status->chains |= BIT(i);
+ }
+ }
+
status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -9476,7 +9490,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
msdu = pkt_addr->vaddr;
dma_unmap_single(ar->dev, pkt_addr->paddr,
- msdu->len, DMA_FROM_DEVICE);
+ msdu->len, DMA_TO_DEVICE);
ieee80211_free_txskb(ar->hw, msdu);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 74adce1dd3a9..972d53d77654 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6786,6 +6786,7 @@ struct wmi_peer_delete_resp_ev_arg {
struct wmi_mac_addr peer_addr;
};
+#define WMI_MGMT_RX_NUM_RSSI 4
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@@ -6794,6 +6795,7 @@ struct wmi_mgmt_rx_ev_arg {
__le32 buf_len;
__le32 status; /* %WMI_RX_STATUS_ */
struct wmi_mgmt_rx_ext_info ext_info;
+ __le32 rssi[WMI_MGMT_RX_NUM_RSSI];
};
struct wmi_ch_info_ev_arg {
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
new file mode 100644
index 000000000000..c88e16d4022b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH11K
+ tristate "Qualcomm Technologies 802.11ax chipset support"
+ depends on MAC80211 && HAS_DMA
+ depends on REMOTEPROC
+ depends on ARCH_QCOM || COMPILE_TEST
+ select ATH_COMMON
+ select QCOM_QMI_HELPERS
+ ---help---
+ This module adds support for Qualcomm Technologies 802.11ax family of
+ chipsets.
+
+ If you choose to build a module, it'll be called ath11k.
+
+config ATH11K_DEBUG
+ bool "QCA ath11k debugging"
+ depends on ATH11K
+ ---help---
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_DEBUGFS
+ bool "QCA ath11k debugfs support"
+ depends on ATH11K && DEBUG_FS && MAC80211_DEBUGFS
+ ---help---
+ Enable ath11k debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_TRACING
+ bool "ath11k tracing support"
+ depends on ATH11K && EVENT_TRACING
+ ---help---
+ Select this to use ath11k tracing infrastructure.
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
new file mode 100644
index 000000000000..2761d07d938e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH11K) += ath11k.o
+ath11k-y += core.o \
+ hal.o \
+ hal_tx.o \
+ hal_rx.o \
+ ahb.o \
+ wmi.o \
+ mac.o \
+ reg.o \
+ htc.o \
+ qmi.o \
+ dp.o \
+ dp_tx.o \
+ dp_rx.o \
+ debug.o \
+ ce.o \
+ peer.o
+
+ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
+ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
new file mode 100644
index 000000000000..e7e3e64c07aa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include "ahb.h"
+#include "debug.h"
+#include <linux/remoteproc.h>
+
+static const struct of_device_id ath11k_ahb_of_match[] = {
+ /* TODO: Should we change the compatible string to something similar
+ * to one that ath10k uses?
+ */
+ { .compatible = "qcom,ipq8074-wifi",
+ .data = (void *)ATH11K_HW_IPQ8074,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 Not used */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(0),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(9),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+#define ATH11K_IRQ_CE0_OFFSET 4
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+#define ATH11K_TX_RING_MASK_0 0x1
+#define ATH11K_TX_RING_MASK_1 0x2
+#define ATH11K_TX_RING_MASK_2 0x4
+
+#define ATH11K_RX_RING_MASK_0 0x1
+#define ATH11K_RX_RING_MASK_1 0x2
+#define ATH11K_RX_RING_MASK_2 0x4
+#define ATH11K_RX_RING_MASK_3 0x8
+
+#define ATH11K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH11K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
+#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
+#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
+
+#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
+
+#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
+
+const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+};
+
+const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+};
+
+const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+};
+
+const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RX_ERR_RING_MASK_0,
+};
+
+const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+};
+
+const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+};
+
+const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+};
+
+const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ ATH11K_HOST2RXDMA_RING_MASK_1,
+ ATH11K_HOST2RXDMA_RING_MASK_2,
+};
+
+/* enum ext_irq_num - irq numbers that can be used by external modules
+ * like datapath
+ */
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+
+ while ((skb = __skb_dequeue(&irq_grp->pending_q)))
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_pipe_config *ce_config;
+
+ ce_config = &target_ce_config_wlan[ce_id];
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
+ ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
+ ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ CE_HOST_IE_3_ADDRESS);
+ }
+}
+
+static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_pipe_config *ce_config;
+
+ ce_config = &target_ce_config_wlan[ce_id];
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
+ ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
+ ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ CE_HOST_IE_3_ADDRESS);
+ }
+}
+
+static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+int ath11k_ahb_start(struct ath11k_base *ab)
+{
+ ath11k_ahb_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ napi_enable(&irq_grp->napi);
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_ahb_ext_irq_disable(ab);
+ ath11k_ahb_sync_ext_irqs(ab);
+}
+
+void ath11k_ahb_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_ahb_ce_irqs_disable(ab);
+ ath11k_ahb_sync_ce_irqs(ab);
+ ath11k_ahb_kill_tasklets(ab);
+ del_timer_sync(&ab->rx_replenish_retry);
+ ath11k_ce_cleanup_pipes(ab);
+}
+
+int ath11k_ahb_power_up(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = rproc_boot(ab->tgt_rproc);
+ if (ret)
+ ath11k_err(ab, "failed to boot the remote processor Q6\n");
+
+ return ret;
+}
+
+void ath11k_ahb_power_down(struct ath11k_base *ab)
+{
+ rproc_shutdown(ab->tgt_rproc);
+}
+
+static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ARRAY_SIZE(target_ce_config_wlan) - 1;
+ cfg->tgt_ce = target_ce_config_wlan;
+ cfg->svc_to_ce_map_len = ARRAY_SIZE(target_service_to_ce_map_wlan);
+ cfg->svc_to_ce_map = target_service_to_ce_map_wlan;
+}
+
+static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+ }
+}
+
+static void ath11k_ahb_free_irq(struct ath11k_base *ab)
+{
+ int irq_idx;
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_ahb_free_ext_irq(ab);
+}
+
+static void ath11k_ahb_ce_tasklet(unsigned long data)
+{
+ struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+
+ ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j;
+ int irq;
+ int ret;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+ __skb_queue_head_init(&irq_grp->pending_q);
+
+ for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
+ if (ath11k_tx_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ath11k_rx_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ath11k_rx_err_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ath11k_rx_wbm_rel_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ath11k_reo_status_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (j < MAX_RADIOS) {
+ if (ath11k_rxdma2host_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_destination_ring_mac1
+ - ath11k_core_get_hw_mac_id(ab, j);
+ }
+
+ if (ath11k_host2rxdma_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ host2rxdma_host_buf_ring_mac1
+ - ath11k_core_get_hw_mac_id(ab, j);
+ }
+
+ if (rx_mon_status_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ ppdu_end_interrupts_mac1 -
+ ath11k_core_get_hw_mac_id(ab, j);
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_status_ring_mac1 -
+ ath11k_core_get_hw_mac_id(ab, j);
+ }
+ }
+ }
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request_irq for %d\n",
+ irq);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_ahb_config_irq(struct ath11k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < CE_COUNT; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+
+ tasklet_init(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet,
+ (unsigned long)ce_pipe);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath11k_ahb_ext_irq_config(ab);
+
+ return ret;
+}
+
+int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int ath11k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath11k_base *ab;
+ const struct of_device_id *of_id;
+ struct resource *mem_res;
+ void __iomem *mem;
+ int ret;
+
+ of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ dev_err(&pdev->dev, "failed to get IO memory resource\n");
+ return -ENXIO;
+ }
+
+ mem = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(mem)) {
+ dev_err(&pdev->dev, "ioremap error\n");
+ return PTR_ERR(mem);
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
+ return ret;
+ }
+
+ ab = ath11k_core_alloc(&pdev->dev);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->pdev = pdev;
+ ab->hw_rev = (enum ath11k_hw_rev)of_id->data;
+ ab->mem = mem;
+ ab->mem_len = resource_size(mem_res);
+ platform_set_drvdata(pdev, ab);
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_ahb_init_qmi_ce_config(ab);
+
+ ret = ath11k_ahb_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ return 0;
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_core_free:
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int ath11k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ reinit_completion(&ab->driver_recovery);
+
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
+ wait_for_completion_timeout(&ab->driver_recovery,
+ ATH11K_AHB_RECOVERY_TIMEOUT);
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->restart_work);
+
+ ath11k_core_deinit(ab);
+ ath11k_ahb_free_irq(ab);
+
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ath11k_ahb_driver = {
+ .driver = {
+ .name = "ath11k",
+ .of_match_table = ath11k_ahb_of_match,
+ },
+ .probe = ath11k_ahb_probe,
+ .remove = ath11k_ahb_remove,
+};
+
+int ath11k_ahb_init(void)
+{
+ return platform_driver_register(&ath11k_ahb_driver);
+}
+
+void ath11k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath11k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
new file mode 100644
index 000000000000..93f46dfe22df
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_AHB_H
+#define ATH11K_AHB_H
+
+#include "core.h"
+
+#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+struct ath11k_base;
+
+static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
+{
+ return ioread32(ab->mem + offset);
+}
+
+static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ iowrite32(value, ab->mem + offset);
+}
+
+void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab);
+void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab);
+int ath11k_ahb_start(struct ath11k_base *ab);
+void ath11k_ahb_stop(struct ath11k_base *ab);
+int ath11k_ahb_power_up(struct ath11k_base *ab);
+void ath11k_ahb_power_down(struct ath11k_base *ab);
+int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+
+int ath11k_ahb_init(void);
+void ath11k_ahb_exit(void);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
new file mode 100644
index 000000000000..cdd40c8fc867
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -0,0 +1,808 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "dp_rx.h"
+#include "debug.h"
+
+static const struct ce_attr host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: host->target WMI (mac2) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE11: Not used */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
+ struct sk_buff *skb, dma_addr_t paddr)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct hal_srng *srng;
+ unsigned int write_index;
+ unsigned int nentries_mask = ring->nentries_mask;
+ u32 *desc;
+ int ret;
+
+ lockdep_assert_held(&ab->ce.ce_lock);
+
+ write_index = ring->write_index;
+
+ srng = &ab->hal.srng_list[ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ ath11k_hal_ce_dst_set_desc(desc, paddr);
+
+ ring->skb[write_index] = skb;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ring->write_index = write_index;
+
+ pipe->rx_buf_needed--;
+
+ ret = 0;
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ if (!(pipe->dest_ring || pipe->status_ring))
+ return 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+ while (pipe->rx_buf_needed) {
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr))) {
+ ath11k_warn(ab, "failed to dma map ce rx buf\n");
+ dev_kfree_skb_any(skb);
+ ret = -EIO;
+ goto exit;
+ }
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ dma_unmap_single(ab->dev, paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ }
+
+exit:
+ spin_unlock_bh(&ab->ce.ce_lock);
+ return ret;
+}
+
+static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
+ struct sk_buff **skb, int *nbytes)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ u32 *desc;
+ int ret = 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->dest_ring->sw_index;
+ nentries_mask = pipe->dest_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
+ if (*nbytes == 0) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *skb = pipe->dest_ring->skb[sw_index];
+ pipe->dest_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->dest_ring->sw_index = sw_index;
+
+ pipe->rx_buf_needed++;
+err:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ unsigned int nbytes, max_nbytes;
+ int ret;
+
+ __skb_queue_head_init(&list);
+ while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->recv_cb(ab, skb);
+ }
+
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret && ret != -ENOSPC) {
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ pipe->pipe_num, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+ }
+}
+
+static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ struct sk_buff *skb;
+ u32 *desc;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->src_ring->sw_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_reap_next(ab, srng);
+ if (!desc) {
+ skb = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+
+ skb = pipe->src_ring->skb[sw_index];
+
+ pipe->src_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return skb;
+}
+
+static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+
+ while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static int ath11k_ce_init_ring(struct ath11k_base *ab,
+ struct ath11k_ce_ring *ce_ring,
+ int ce_id, enum hal_ring_type type)
+{
+ struct hal_srng_params params = { 0 };
+ int ret;
+
+ params.ring_base_paddr = ce_ring->base_addr_ce_space;
+ params.ring_base_vaddr = ce_ring->base_addr_owner_space;
+ params.num_entries = ce_ring->nentries;
+
+ switch (type) {
+ case HAL_CE_SRC:
+ if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags))
+ params.intr_batch_cntr_thres_entries = 1;
+ break;
+ case HAL_CE_DST:
+ params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max;
+ if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_timer_thres_us = 1024;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.low_threshold = ce_ring->nentries - 3;
+ }
+ break;
+ case HAL_CE_DST_STATUS:
+ if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = 0x1000;
+ }
+ break;
+ default:
+ ath11k_warn(ab, "Invalid CE ring type %d\n", type);
+ return -EINVAL;
+ }
+
+ /* TODO: Init other params needed by HAL to init the ring */
+
+ ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ce_id);
+ return ret;
+ }
+ ce_ring->hal_ring_id = ret;
+
+ return 0;
+}
+
+static struct ath11k_ce_ring *
+ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
+{
+ struct ath11k_ce_ring *ce_ring;
+ dma_addr_t base_addr;
+
+ ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+ if (ce_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ce_ring->nentries = nentries;
+ ce_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ ce_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ab->dev,
+ nentries * desc_sz + CE_DESC_RING_ALIGN,
+ &base_addr, GFP_KERNEL);
+ if (!ce_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ce_ring->base_addr_ce_space_unaligned = base_addr;
+
+ ce_ring->base_addr_owner_space = PTR_ALIGN(
+ ce_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ ce_ring->base_addr_ce_space = ALIGN(
+ ce_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return ce_ring;
+}
+
+static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &host_ce_config_wlan[ce_id];
+ struct ath11k_ce_ring *ring;
+ int nentries;
+ int desc_sz;
+
+ pipe->attr_flags = attr->flags;
+
+ if (attr->src_nentries) {
+ pipe->send_cb = ath11k_ce_send_done_cb;
+ nentries = roundup_pow_of_two(attr->src_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->src_ring = ring;
+ }
+
+ if (attr->dest_nentries) {
+ pipe->recv_cb = attr->recv_cb;
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->dest_ring = ring;
+
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->status_ring = ring;
+ }
+
+ return 0;
+}
+
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+
+ if (pipe->send_cb)
+ pipe->send_cb(pipe);
+
+ if (pipe->recv_cb)
+ ath11k_ce_recv_process_cb(pipe);
+}
+
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+
+ if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
+ pipe->send_cb(pipe);
+}
+
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ struct hal_srng *srng;
+ u32 *desc;
+ unsigned int write_index, sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+ u8 byte_swap_data = 0;
+ int num_used;
+
+ /* Check if some entries could be regained by handling tx completion if
+ * the CE has interrupts disabled and the used entries is more than the
+ * defined usage threshold.
+ */
+ if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+ spin_lock_bh(&ab->ce.ce_lock);
+ write_index = pipe->src_ring->write_index;
+
+ sw_index = pipe->src_ring->sw_index;
+
+ if (write_index >= sw_index)
+ num_used = write_index - sw_index;
+ else
+ num_used = pipe->src_ring->nentries - sw_index +
+ write_index;
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ if (num_used > ATH11K_CE_USAGE_THRESHOLD)
+ ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ write_index = pipe->src_ring->write_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
+ if (!desc) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+ byte_swap_data = 1;
+
+ ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, transfer_id, byte_swap_data);
+
+ pipe->src_ring->skb[write_index] = skb;
+ pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+ write_index);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct sk_buff *skb;
+ int i;
+
+ if (!(ring && pipe->buf_sz))
+ return;
+
+ for (i = 0; i < ring->nentries; i++) {
+ skb = ring->skb[i];
+ if (!skb)
+ continue;
+
+ ring->skb[i] = NULL;
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ pipe = &ab->ce.ce_pipe[pipe_num];
+ ath11k_ce_rx_pipe_cleanup(pipe);
+
+ /* Cleanup any src CE's which have interrupts disabled */
+ ath11k_ce_poll_send_completed(ab, pipe_num);
+
+ /* NOTE: Should we also clean up tx buffer in all pipes? */
+ }
+}
+
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ continue;
+
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ i, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+
+ return;
+ }
+ }
+}
+
+void ath11k_ce_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
+
+ ath11k_ce_rx_post_buf(ab);
+}
+
+int ath11k_ce_init_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
+ HAL_CE_SRC);
+ if (ret) {
+ ath11k_warn(ab, "failed to init src ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->src_ring->write_index = 0;
+ pipe->src_ring->sw_index = 0;
+ }
+
+ if (pipe->dest_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
+ HAL_CE_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->rx_buf_needed = pipe->dest_ring->nentries ?
+ pipe->dest_ring->nentries - 2 : 0;
+
+ pipe->dest_ring->write_index = 0;
+ pipe->dest_ring->sw_index = 0;
+ }
+
+ if (pipe->status_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
+ HAL_CE_DST_STATUS);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest status ing: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->status_ring->write_index = 0;
+ pipe->status_ring->sw_index = 0;
+ }
+ }
+
+ return 0;
+}
+
+void ath11k_ce_free_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int desc_sz;
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ dma_free_coherent(ab->dev,
+ pipe->src_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->src_ring->base_addr_owner_space,
+ pipe->src_ring->base_addr_ce_space);
+ kfree(pipe->src_ring);
+ pipe->src_ring = NULL;
+ }
+
+ if (pipe->dest_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ dma_free_coherent(ab->dev,
+ pipe->dest_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->dest_ring->base_addr_owner_space,
+ pipe->dest_ring->base_addr_ce_space);
+ kfree(pipe->dest_ring);
+ pipe->dest_ring = NULL;
+ }
+
+ if (pipe->status_ring) {
+ desc_sz =
+ ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ dma_free_coherent(ab->dev,
+ pipe->status_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->status_ring->base_addr_owner_space,
+ pipe->status_ring->base_addr_ce_space);
+ kfree(pipe->status_ring);
+ pipe->status_ring = NULL;
+ }
+ }
+}
+
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+ const struct ce_attr *attr;
+
+ spin_lock_init(&ab->ce.ce_lock);
+
+ for (i = 0; i < CE_COUNT; i++) {
+ attr = &host_ce_config_wlan[i];
+ pipe = &ab->ce.ce_pipe[i];
+ pipe->pipe_num = i;
+ pipe->ab = ab;
+ pipe->buf_sz = attr->src_sz_max;
+
+ ret = ath11k_ce_alloc_pipe(ab, i);
+ if (ret) {
+ /* Free any parial successful allocation */
+ ath11k_ce_free_pipes(ab);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* For Big Endian Host, Copy Engine byte_swap is enabled
+ * When Copy Engine does byte_swap, need to byte swap again for the
+ * Host to get/put buffer content in the correct byte order
+ */
+void ath11k_ce_byte_swap(void *mem, u32 len)
+{
+ int i;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ if (!mem)
+ return;
+
+ for (i = 0; i < (len / 4); i++) {
+ *(u32 *)mem = swab32(*(u32 *)mem);
+ mem += 4;
+ }
+ }
+}
+
+int ath11k_ce_get_attr_flags(int ce_id)
+{
+ if (ce_id >= CE_COUNT)
+ return -EINVAL;
+
+ return host_ce_config_wlan[ce_id].flags;
+}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
new file mode 100644
index 000000000000..e355dfda48bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_CE_H
+#define ATH11K_CE_H
+
+#define CE_COUNT 12
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Host software's Copy Engine configuration. */
+#ifdef __BIG_ENDIAN
+#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA
+#else
+#define CE_ATTR_FLAGS 0
+#endif
+
+/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
+#define ATH11K_CE_USAGE_THRESHOLD 32
+
+void ath11k_ce_byte_swap(void *mem, u32 len);
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
+
+/* CE address/mask */
+#define CE_HOST_IE_ADDRESS 0x00A1803C
+#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+
+#define CE_HOST_IE_3_SHIFT 0xC
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define ATH11K_CE_RX_POST_RETRY_JIFFIES 50
+
+struct ath11k_base;
+
+/*
+ * Establish a mapping between a service/direction and a pipe.
+ * Configuration information for a Copy Engine pipe and services.
+ * Passed from Host to Target through QMI message and must be in
+ * little endian format.
+ */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target through QMI message during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+};
+
+#define CE_DESC_RING_ALIGN 8
+
+struct ath11k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /* For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+
+ /* keep last */
+ struct sk_buff *skb[0];
+};
+
+struct ath11k_ce_pipe {
+ struct ath11k_base *ab;
+ u16 pipe_num;
+ unsigned int attr_flags;
+ unsigned int buf_sz;
+ unsigned int rx_buf_needed;
+
+ void (*send_cb)(struct ath11k_ce_pipe *);
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+
+ struct tasklet_struct intr_tq;
+ struct ath11k_ce_ring *src_ring;
+ struct ath11k_ce_ring *dest_ring;
+ struct ath11k_ce_ring *status_ring;
+};
+
+struct ath11k_ce {
+ struct ath11k_ce_pipe ce_pipe[CE_COUNT];
+ /* Protects rings of all ce pipes */
+ spinlock_t ce_lock;
+};
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
+void ath11k_ce_rx_replenish_retry(struct timer_list *t);
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id);
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id);
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab);
+int ath11k_ce_init_pipes(struct ath11k_base *ab);
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab);
+void ath11k_ce_free_pipes(struct ath11k_base *ab);
+int ath11k_ce_get_attr_flags(int ce_id);
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
new file mode 100644
index 000000000000..9e823056e673
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/firmware.h>
+#include "ahb.h"
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+
+unsigned int ath11k_debug_mask;
+module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+static const struct ath11k_hw_params ath11k_hw_params = {
+ .name = "ipq8074",
+ .fw = {
+ .dir = IPQ8074_FW_DIR,
+ .board_size = IPQ8074_MAX_BOARD_DATA_SZ,
+ .cal_size = IPQ8074_MAX_CAL_DATA_SZ,
+ },
+};
+
+/* Map from pdev index to hw mac index */
+u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx)
+{
+ switch (pdev_idx) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 1;
+ default:
+ ath11k_warn(ab, "Invalid pdev idx %d\n", pdev_idx);
+ return ATH11K_INVALID_HW_MAC_ID;
+ }
+}
+
+static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ /* Note: bus is fixed to ahb. When other bus type supported,
+ * make it to dynamic.
+ */
+ scnprintf(name, name_len,
+ "bus=ahb,qmi-chip-id=%d,qmi-board-id=%d",
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name);
+
+ return 0;
+}
+
+static const struct firmware *ath11k_fetch_fw_file(struct ath11k_base *ab,
+ const char *dir,
+ const char *file)
+{
+ char filename[100];
+ const struct firmware *fw;
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ if (dir == NULL)
+ dir = ".";
+
+ snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+ ret = firmware_request_nowarn(&fw, filename, ab->dev);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+
+ if (ret)
+ return ERR_PTR(ret);
+ ath11k_warn(ab, "Downloading BDF: %s, size: %zu\n",
+ filename, fw->size);
+
+ return fw;
+}
+
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ if (!IS_ERR(bd->fw))
+ release_firmware(bd->fw);
+
+ memset(bd, 0, sizeof(*bd));
+}
+
+static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const void *buf, size_t buf_len,
+ const char *boardname,
+ int bd_ie_type)
+{
+ const struct ath11k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH11K_BD_IE_BOARD_ elements */
+ while (buf_len > sizeof(struct ath11k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath11k_err(ab, "invalid ATH11K_BD_IE_BOARD length: %zu < %zu\n",
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (board_ie_id) {
+ case ATH11K_BD_IE_BOARD_NAME:
+ ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ break;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ break;
+
+ name_match_found = true;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot found match for name '%s'",
+ boardname);
+ break;
+ case ATH11K_BD_IE_BOARD_DATA:
+ if (!name_match_found)
+ /* no match found */
+ break;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot found board data for '%s'", boardname);
+
+ bd->data = board_ie_data;
+ bd->len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ default:
+ ath11k_warn(ab, "unknown ATH11K_BD_IE_BOARD found: %d\n",
+ board_ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *boardname)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ char *filename = ATH11K_BOARD_API2_FILE;
+ size_t ie_len;
+ struct ath11k_fw_ie *hdr;
+ int ret, ie_id;
+
+ if (!bd->fw)
+ bd->fw = ath11k_fetch_fw_file(ab,
+ ab->hw_params.fw.dir,
+ filename);
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ data = bd->fw->data;
+ len = bd->fw->size;
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath11k_err(ab, "failed to find magic value in %s/%s, file too short: %zu\n",
+ ab->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
+ ath11k_err(ab, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath11k_err(ab, "failed: %s/%s too small to contain board data, len: %zu\n",
+ ab->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ while (len > sizeof(struct ath11k_fw_ie)) {
+ hdr = (struct ath11k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ return -EINVAL;
+ }
+
+ switch (ie_id) {
+ case ATH11K_BD_IE_BOARD:
+ ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
+ ie_len,
+ boardname,
+ ATH11K_BD_IE_BOARD);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+ else if (ret)
+ /* there was an error, bail out */
+ goto err;
+ /* either found or error, so stop searching */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ if (!bd->data || !bd->len) {
+ ath11k_err(ab,
+ "failed to fetch board data for %s from %s/%s\n",
+ boardname, ab->hw_params.fw.dir, filename);
+ ret = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath11k_core_free_bdf(ab, bd);
+ return ret;
+}
+
+static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd)
+{
+ bd->fw = ath11k_fetch_fw_file(ab,
+ ab->hw_params.fw.dir,
+ ATH11K_DEFAULT_BOARD_FILE);
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ bd->data = bd->fw->data;
+ bd->len = bd->fw->size;
+
+ return 0;
+}
+
+#define BOARD_NAME_SIZE 100
+int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_err(ab, "failed to create board name: %d", ret);
+ return ret;
+ }
+
+ ab->bd_api = 2;
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname);
+ if (!ret)
+ goto success;
+
+ ab->bd_api = 1;
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd);
+ if (ret) {
+ ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ab->hw_params.fw.dir);
+ return ret;
+ }
+
+success:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", ab->bd_api);
+ return 0;
+}
+
+static void ath11k_core_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_qmi_firmware_stop(ab);
+ ath11k_ahb_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ /* De-Init of components as needed */
+}
+
+static int ath11k_core_soc_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_qmi_init_service(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize qmi :%d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_debug_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create ath11k debugfs\n");
+ goto err_qmi_deinit;
+ }
+
+ ret = ath11k_ahb_power_up(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to power up :%d\n", ret);
+ goto err_debugfs_reg;
+ }
+
+ return 0;
+
+err_debugfs_reg:
+ ath11k_debug_soc_destroy(ab);
+err_qmi_deinit:
+ ath11k_qmi_deinit_service(ab);
+ return ret;
+}
+
+static void ath11k_core_soc_destroy(struct ath11k_base *ab)
+{
+ ath11k_debug_soc_destroy(ab);
+ ath11k_dp_free(ab);
+ ath11k_reg_free(ab);
+ ath11k_qmi_deinit_service(ab);
+}
+
+static int ath11k_core_pdev_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_debug_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ goto err_pdev_debug;
+ }
+
+ ret = ath11k_dp_pdev_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
+ goto err_mac_unregister;
+ }
+
+ return 0;
+
+err_mac_unregister:
+ ath11k_mac_unregister(ab);
+
+err_pdev_debug:
+ ath11k_debug_pdev_destroy(ab);
+
+ return ret;
+}
+
+static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
+{
+ ath11k_mac_unregister(ab);
+ ath11k_ahb_ext_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_debug_pdev_destroy(ab);
+}
+
+static int ath11k_core_start(struct ath11k_base *ab,
+ enum ath11k_firmware_mode mode)
+{
+ int ret;
+
+ ret = ath11k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath11k_err(ab, "failed to attach wmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_attach(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach wmi: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ ret = ath11k_htc_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init htc: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_ahb_start(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to start HIF: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_htc_wait_target(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_dp_htt_connect(&ab->dp);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTT: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_connect(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to connect wmi: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_htc_start(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to start HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_wait_for_service_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi service ready event: %d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_mac_allocate(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ath11k_dp_pdev_pre_alloc(ab);
+
+ ret = ath11k_dp_pdev_reo_setup(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
+ goto err_mac_destroy;
+ }
+
+ ret = ath11k_wmi_cmd_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath11k_wmi_wait_for_unified_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi unified ready event: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send htt version request message: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+err_mac_destroy:
+ ath11k_mac_destroy(ab);
+err_hif_stop:
+ ath11k_ahb_stop(ab);
+err_wmi_detach:
+ ath11k_wmi_detach(ab);
+err_firmware_stop:
+ ath11k_qmi_firmware_stop(ab);
+
+ return ret;
+}
+
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_ce_init_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize CE: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init DP: %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&ab->core_lock);
+ ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath11k_err(ab, "failed to start core: %d\n", ret);
+ goto err_dp_free;
+ }
+
+ ret = ath11k_core_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create pdev core: %d\n", ret);
+ goto err_core_stop;
+ }
+ ath11k_ahb_ext_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
+
+ return 0;
+
+err_core_stop:
+ ath11k_core_stop(ab);
+ ath11k_mac_destroy(ab);
+err_dp_free:
+ ath11k_dp_free(ab);
+ mutex_unlock(&ab->core_lock);
+ return ret;
+}
+
+static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->core_lock);
+ ath11k_ahb_ext_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_ahb_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_dp_free(ab);
+ ath11k_hal_srng_deinit(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ return ret;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret)
+ goto err_hal_srng_deinit;
+
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+
+ return 0;
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+ return ret;
+}
+
+void ath11k_core_halt(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->num_created_vdevs = 0;
+
+ ath11k_mac_scan_finish(ar);
+ ath11k_mac_peer_cleanup_all(ar);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&ar->arvifs);
+ idr_init(&ar->txmgmt_idr);
+}
+
+static void ath11k_core_restart(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i, ret = 0;
+
+ spin_lock_bh(&ab->base_lock);
+ ab->stats.fw_crash_counter++;
+ spin_unlock_bh(&ab->base_lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF)
+ continue;
+
+ ieee80211_stop_queues(ar->hw);
+ ath11k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ }
+
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+ wake_up(&ab->peer_mapping_wq);
+
+ ret = ath11k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ath11k_core_halt(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH11K_STATE_OFF:
+ ath11k_warn(ab,
+ "cannot restart radio %d that hasn't been started\n",
+ i);
+ break;
+ case ATH11K_STATE_RESTARTING:
+ break;
+ case ATH11K_STATE_RESTARTED:
+ ar->state = ATH11K_STATE_WEDGED;
+ /* fall through */
+ case ATH11K_STATE_WEDGED:
+ ath11k_warn(ab,
+ "device is wedged, will not restart radio %d\n", i);
+ break;
+ }
+ mutex_unlock(&ar->conf_mutex);
+ }
+ complete(&ab->driver_recovery);
+}
+
+int ath11k_core_init(struct ath11k_base *ab)
+{
+ struct device *dev = ab->dev;
+ struct rproc *prproc;
+ phandle rproc_phandle;
+ int ret;
+
+ if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
+ ath11k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+ ath11k_err(ab, "failed to get rproc\n");
+ return -EINVAL;
+ }
+ ab->tgt_rproc = prproc;
+ ab->hw_params = ath11k_hw_params;
+
+ ret = ath11k_core_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_core_deinit(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath11k_core_pdev_destroy(ab);
+ ath11k_core_stop(ab);
+
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_ahb_power_down(ab);
+ ath11k_mac_destroy(ab);
+ ath11k_core_soc_destroy(ab);
+}
+
+void ath11k_core_free(struct ath11k_base *ab)
+{
+ kfree(ab);
+}
+
+struct ath11k_base *ath11k_core_alloc(struct device *dev)
+{
+ struct ath11k_base *ab;
+
+ ab = kzalloc(sizeof(*ab), GFP_KERNEL);
+ if (!ab)
+ return NULL;
+
+ init_completion(&ab->driver_recovery);
+
+ ab->workqueue = create_singlethread_workqueue("ath11k_wq");
+ if (!ab->workqueue)
+ goto err_sc_free;
+
+ mutex_init(&ab->core_lock);
+ spin_lock_init(&ab->base_lock);
+
+ INIT_LIST_HEAD(&ab->peers);
+ init_waitqueue_head(&ab->peer_mapping_wq);
+ init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
+ INIT_WORK(&ab->restart_work, ath11k_core_restart);
+ timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
+ ab->dev = dev;
+
+ return ab;
+
+err_sc_free:
+ kfree(ab);
+ return NULL;
+}
+
+static int __init ath11k_init(void)
+{
+ int ret;
+
+ ret = ath11k_ahb_init();
+ if (ret)
+ printk(KERN_ERR "failed to register ath11k ahb driver: %d\n",
+ ret);
+ return ret;
+}
+module_init(ath11k_init);
+
+static void __exit ath11k_exit(void)
+{
+ ath11k_ahb_exit();
+}
+module_exit(ath11k_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
new file mode 100644
index 000000000000..25cdcf71d0c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -0,0 +1,826 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_CORE_H
+#define ATH11K_CORE_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitfield.h>
+#include "qmi.h"
+#include "htc.h"
+#include "wmi.h"
+#include "hal.h"
+#include "dp.h"
+#include "ce.h"
+#include "mac.h"
+#include "hw.h"
+#include "hal_rx.h"
+#include "reg.h"
+
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+
+#define ATH11K_TX_MGMT_NUM_PENDING_MAX 512
+
+#define ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
+
+/* Pending management packets threshold for dropping probe responses */
+#define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
+
+#define ATH11K_INVALID_HW_MAC_ID 0xFF
+
+enum ath11k_supported_bw {
+ ATH11K_BW_20 = 0,
+ ATH11K_BW_40 = 1,
+ ATH11K_BW_80 = 2,
+ ATH11K_BW_160 = 3,
+};
+
+enum wme_ac {
+ WME_AC_BE,
+ WME_AC_BK,
+ WME_AC_VI,
+ WME_AC_VO,
+ WME_NUM_AC
+};
+
+#define ATH11K_HT_MCS_MAX 7
+#define ATH11K_VHT_MCS_MAX 9
+#define ATH11K_HE_MCS_MAX 11
+
+static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
+{
+ return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
+ ((tid == 1) || (tid == 2)) ? WME_AC_BK :
+ ((tid == 4) || (tid == 5)) ? WME_AC_VI :
+ WME_AC_VO);
+}
+
+struct ath11k_skb_cb {
+ dma_addr_t paddr;
+ u8 eid;
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+} __packed;
+
+struct ath11k_skb_rxcb {
+ dma_addr_t paddr;
+ bool is_first_msdu;
+ bool is_last_msdu;
+ bool is_continuation;
+ struct hal_rx_desc *rx_desc;
+ u8 err_rel_src;
+ u8 err_code;
+ u8 mac_id;
+ u8 unmapped;
+};
+
+enum ath11k_hw_rev {
+ ATH11K_HW_IPQ8074,
+};
+
+enum ath11k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH11K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH11K_FIRMWARE_MODE_FTM,
+};
+
+#define ATH11K_IRQ_NUM_MAX 52
+#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
+#define ATH11K_EXT_IRQ_NUM_MAX 16
+
+extern const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+
+struct ath11k_ext_irq_grp {
+ struct ath11k_base *ab;
+ u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
+ u32 num_irq;
+ u32 grp_id;
+ struct napi_struct napi;
+ struct net_device napi_ndev;
+ /* Queue of pending packets, not expected to be accessed concurrently
+ * to avoid locking overhead.
+ */
+ struct sk_buff_head pending_q;
+};
+
+#define HEHANDLE_CAP_PHYINFO_SIZE 3
+#define HECAP_PHYINFO_SIZE 9
+#define HECAP_MACINFO_SIZE 5
+#define HECAP_TXRX_MCS_NSS_SIZE 2
+#define HECAP_PPET16_PPET8_MAX_SIZE 25
+
+#define HE_PPET16_PPET8_SIZE 8
+
+/* 802.11ax PPE (PPDU packet Extension) threshold */
+struct he_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_mask;
+ u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE];
+};
+
+struct ath11k_he {
+ u8 hecap_macinfo[HECAP_MACINFO_SIZE];
+ u32 hecap_rxmcsnssmap;
+ u32 hecap_txmcsnssmap;
+ u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE];
+ struct he_ppe_threshold hecap_ppet;
+ u32 heop_param;
+};
+
+#define MAX_RADIOS 3
+
+enum {
+ WMI_HOST_TP_SCALE_MAX = 0,
+ WMI_HOST_TP_SCALE_50 = 1,
+ WMI_HOST_TP_SCALE_25 = 2,
+ WMI_HOST_TP_SCALE_12 = 3,
+ WMI_HOST_TP_SCALE_MIN = 4,
+ WMI_HOST_TP_SCALE_SIZE = 5,
+};
+
+enum ath11k_scan_state {
+ ATH11K_SCAN_IDLE,
+ ATH11K_SCAN_STARTING,
+ ATH11K_SCAN_RUNNING,
+ ATH11K_SCAN_ABORTING,
+};
+
+enum ath11k_dev_flags {
+ ATH11K_CAC_RUNNING,
+ ATH11K_FLAG_CORE_REGISTERED,
+ ATH11K_FLAG_CRASH_FLUSH,
+ ATH11K_FLAG_RAW_MODE,
+ ATH11K_FLAG_HW_CRYPTO_DISABLED,
+ ATH11K_FLAG_BTCOEX,
+ ATH11K_FLAG_RECOVERY,
+ ATH11K_FLAG_UNREGISTERING,
+ ATH11K_FLAG_REGISTERED,
+};
+
+enum ath11k_monitor_flags {
+ ATH11K_FLAG_MONITOR_ENABLED,
+};
+
+struct ath11k_vif {
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u16 ast_hash;
+ u16 tcl_metadata;
+ u8 hal_addr_search_flags;
+ u8 search_type;
+
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+
+ u16 tx_seq_no;
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct list_head list;
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool is_started;
+ bool is_up;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+};
+
+struct ath11k_vif_iter {
+ u32 vdev_id;
+ struct ath11k_vif *arvif;
+};
+
+struct ath11k_rx_peer_stats {
+ u64 num_msdu;
+ u64 num_mpdu_fcs_ok;
+ u64 num_mpdu_fcs_err;
+ u64 tcp_msdu_count;
+ u64 udp_msdu_count;
+ u64 other_msdu_count;
+ u64 ampdu_msdu_count;
+ u64 non_ampdu_msdu_count;
+ u64 stbc_count;
+ u64 beamformed_count;
+ u64 mcs_count[HAL_RX_MAX_MCS + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
+ u64 tid_count[IEEE80211_NUM_TIDS + 1];
+ u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
+ u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
+ u64 rx_duration;
+};
+
+#define ATH11K_HE_MCS_NUM 12
+#define ATH11K_VHT_MCS_NUM 10
+#define ATH11K_BW_NUM 4
+#define ATH11K_NSS_NUM 4
+#define ATH11K_LEGACY_NUM 12
+#define ATH11K_GI_NUM 4
+#define ATH11K_HT_MCS_NUM 32
+
+enum ath11k_pkt_rx_err {
+ ATH11K_PKT_RX_ERR_FCS,
+ ATH11K_PKT_RX_ERR_TKIP,
+ ATH11K_PKT_RX_ERR_CRYPT,
+ ATH11K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH11K_PKT_RX_ERR_MAX,
+};
+
+enum ath11k_ampdu_subfrm_num {
+ ATH11K_AMPDU_SUBFRM_NUM_10,
+ ATH11K_AMPDU_SUBFRM_NUM_20,
+ ATH11K_AMPDU_SUBFRM_NUM_30,
+ ATH11K_AMPDU_SUBFRM_NUM_40,
+ ATH11K_AMPDU_SUBFRM_NUM_50,
+ ATH11K_AMPDU_SUBFRM_NUM_60,
+ ATH11K_AMPDU_SUBFRM_NUM_MORE,
+ ATH11K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_amsdu_subfrm_num {
+ ATH11K_AMSDU_SUBFRM_NUM_1,
+ ATH11K_AMSDU_SUBFRM_NUM_2,
+ ATH11K_AMSDU_SUBFRM_NUM_3,
+ ATH11K_AMSDU_SUBFRM_NUM_4,
+ ATH11K_AMSDU_SUBFRM_NUM_MORE,
+ ATH11K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_counter_type {
+ ATH11K_COUNTER_TYPE_BYTES,
+ ATH11K_COUNTER_TYPE_PKTS,
+ ATH11K_COUNTER_TYPE_MAX,
+};
+
+enum ath11k_stats_type {
+ ATH11K_STATS_TYPE_SUCC,
+ ATH11K_STATS_TYPE_FAIL,
+ ATH11K_STATS_TYPE_RETRY,
+ ATH11K_STATS_TYPE_AMPDU,
+ ATH11K_STATS_TYPE_MAX,
+};
+
+struct ath11k_htt_data_stats {
+ u64 legacy[ATH11K_COUNTER_TYPE_MAX][ATH11K_LEGACY_NUM];
+ u64 ht[ATH11K_COUNTER_TYPE_MAX][ATH11K_HT_MCS_NUM];
+ u64 vht[ATH11K_COUNTER_TYPE_MAX][ATH11K_VHT_MCS_NUM];
+ u64 he[ATH11K_COUNTER_TYPE_MAX][ATH11K_HE_MCS_NUM];
+ u64 bw[ATH11K_COUNTER_TYPE_MAX][ATH11K_BW_NUM];
+ u64 nss[ATH11K_COUNTER_TYPE_MAX][ATH11K_NSS_NUM];
+ u64 gi[ATH11K_COUNTER_TYPE_MAX][ATH11K_GI_NUM];
+};
+
+struct ath11k_htt_tx_stats {
+ struct ath11k_htt_data_stats stats[ATH11K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+};
+
+struct ath11k_per_ppdu_tx_stats {
+ u16 succ_pkts;
+ u16 failed_pkts;
+ u16 retry_pkts;
+ u32 succ_bytes;
+ u32 failed_bytes;
+ u32 retry_bytes;
+};
+
+struct ath11k_sta {
+ struct ath11k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+
+ struct work_struct update_wk;
+ struct ieee80211_tx_info tx_info;
+ struct rate_info txrate;
+ struct rate_info last_txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ struct ath11k_htt_tx_stats *tx_stats;
+ struct ath11k_rx_peer_stats *rx_stats;
+};
+
+#define ATH11K_NUM_CHANS 41
+#define ATH11K_MAX_5G_CHAN 173
+
+enum ath11k_state {
+ ATH11K_STATE_OFF,
+ ATH11K_STATE_ON,
+ ATH11K_STATE_RESTARTING,
+ ATH11K_STATE_RESTARTED,
+ ATH11K_STATE_WEDGED,
+ /* Add other states as required */
+};
+
+/* Antenna noise floor */
+#define ATH11K_DEFAULT_NOISE_FLOOR -95
+
+struct ath11k_fw_stats {
+ struct dentry *debugfs_fwstats;
+ u32 pdev_id;
+ u32 stats_id;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head bcn;
+};
+
+struct ath11k_dbg_htt_stats {
+ u8 type;
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+ /* protects shared stats req buffer */
+ spinlock_t lock;
+};
+
+struct ath11k_debug {
+ struct dentry *debugfs_pdev;
+ struct ath11k_dbg_htt_stats htt_stats;
+ u32 extd_tx_stats;
+ struct ath11k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+ u32 extd_rx_stats;
+ u32 pktlog_filter;
+ u32 pktlog_mode;
+ u32 pktlog_peer_valid;
+ u8 pktlog_peer_addr[ETH_ALEN];
+};
+
+struct ath11k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u32 duration;
+ u8 ba_fails;
+ bool is_ampdu;
+};
+
+#define ATH11K_FLUSH_TIMEOUT (5 * HZ)
+
+struct ath11k_vdev_stop_status {
+ bool stop_in_progress;
+ u32 vdev_id;
+};
+
+struct ath11k {
+ struct ath11k_base *ab;
+ struct ath11k_pdev *pdev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct ath11k_pdev_wmi *wmi;
+ struct ath11k_pdev_dp dp;
+ u8 mac_addr[ETH_ALEN];
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ struct ath11k_he ar_he;
+ enum ath11k_state state;
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath11k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ struct ieee80211_sband_iftype_data
+ iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+ } mac;
+ unsigned long dev_flags;
+ unsigned int filter_flags;
+ unsigned long monitor_flags;
+ u32 min_tx_power;
+ u32 max_tx_power;
+ u32 txpower_limit_2g;
+ u32 txpower_limit_5g;
+ u32 txpower_scale;
+ u32 power_scale;
+ u32 chan_tx_pwr;
+ u32 num_stations;
+ u32 max_num_stations;
+ bool monitor_present;
+ /* To synchronize concurrent synchronous mac80211 callback operations,
+ * concurrent debugfs configuration and concurrent FW statistics events.
+ */
+ struct mutex conf_mutex;
+ /* protects the radio specific data like debug stats, ppdu_stats_info stats,
+ * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
+ * channel context data, survey info, test mode data.
+ */
+ spinlock_t data_lock;
+
+ struct list_head arvifs;
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+ u8 num_rx_chains;
+ u8 num_tx_chains;
+ /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
+ u8 pdev_idx;
+ u8 lmac_id;
+
+ struct completion peer_assoc_done;
+
+ int install_key_status;
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct ath11k_vdev_stop_status vdev_stop_status;
+ struct completion vdev_setup_done;
+
+ int num_peers;
+ int max_num_peers;
+ u32 num_started_vdevs;
+ u32 num_created_vdevs;
+
+ struct idr txmgmt_idr;
+ /* protects txmgmt_idr data */
+ spinlock_t txmgmt_idr_lock;
+ atomic_t num_pending_mgmt_tx;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct survey_info survey[ATH11K_NUM_CHANS];
+ struct completion bss_survey_done;
+
+ struct work_struct regd_update_work;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ struct ath11k_per_peer_tx_stats peer_tx_stats;
+ struct list_head ppdu_stats_info;
+ u32 ppdu_stat_list_depth;
+
+ struct ath11k_per_peer_tx_stats cached_stats;
+ u32 last_ppdu_id;
+ u32 cached_ppdu_id;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct ath11k_debug debug;
+#endif
+ bool dfs_block_radar_events;
+};
+
+struct ath11k_band_cap {
+ u32 max_bw_supported;
+ u32 ht_cap_info;
+ u32 he_cap_info[2];
+ u32 he_mcs;
+ u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+ struct ath11k_ppe_threshold he_ppet;
+};
+
+struct ath11k_pdev_cap {
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 vht_cap;
+ u32 vht_mcs;
+ u32 he_mcs;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 tx_chain_mask_shift;
+ u32 rx_chain_mask_shift;
+ struct ath11k_band_cap band[NUM_NL80211_BANDS];
+};
+
+struct ath11k_pdev {
+ struct ath11k *ar;
+ u32 pdev_id;
+ struct ath11k_pdev_cap cap;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_board_data {
+ const struct firmware *fw;
+ const void *data;
+ size_t len;
+};
+
+/* IPQ8074 HW channel counters frequency value in hertz */
+#define IPQ8074_CC_FREQ_HERTZ 320000
+
+struct ath11k_soc_dp_rx_stats {
+ u32 err_ring_pkts;
+ u32 invalid_rbm;
+ u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+ u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+ u32 hal_reo_error[DP_REO_DST_RING_MAX];
+};
+
+/* Master structure to hold the hw data which may be used in core module */
+struct ath11k_base {
+ enum ath11k_hw_rev hw_rev;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct ath11k_qmi qmi;
+ struct ath11k_wmi_base wmi_ab;
+ struct completion fw_ready;
+ struct rproc *tgt_rproc;
+ int num_radios;
+ /* HW channel counters frequency value in hertz common to all MACs */
+ u32 cc_freq_hz;
+
+ struct ath11k_htc htc;
+
+ struct ath11k_dp dp;
+
+ void __iomem *mem;
+ unsigned long mem_len;
+
+ const struct ath11k_hif_ops *hif_ops;
+
+ struct ath11k_ce ce;
+ struct timer_list rx_replenish_retry;
+ struct ath11k_hal hal;
+ /* To synchronize core_start/core_stop */
+ struct mutex core_lock;
+ /* Protects data like peers */
+ spinlock_t base_lock;
+ struct ath11k_pdev pdevs[MAX_RADIOS];
+ struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
+ struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
+ unsigned long long free_vdev_map;
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+ u8 mac_addr[ETH_ALEN];
+ bool wmi_ready;
+ u32 wlan_init_status;
+ int irq_num[ATH11K_IRQ_NUM_MAX];
+ struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ struct napi_struct *napi;
+ struct ath11k_targ_cap target_caps;
+ u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
+ bool pdevs_macaddr_valid;
+ int bd_api;
+ struct ath11k_hw_params hw_params;
+ const struct firmware *cal_file;
+
+ /* Below regd's are protected by ab->data_lock */
+ /* This is the regd set for every radio
+ * by the firmware during initializatin
+ */
+ struct ieee80211_regdomain *default_regd[MAX_RADIOS];
+ /* This regd is set during dynamic country setting
+ * This may or may not be used during the runtime
+ */
+ struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+
+ /* Current DFS Regulatory */
+ enum ath11k_dfs_region dfs_region;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_soc;
+ struct dentry *debugfs_ath11k;
+#endif
+ struct ath11k_soc_dp_rx_stats soc_stats;
+
+ unsigned long dev_flags;
+ struct completion driver_recovery;
+ struct workqueue_struct *workqueue;
+ struct work_struct restart_work;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ } stats;
+};
+
+struct ath11k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ /* Cycles spent transmitting frames */
+ u32 tx_frame_count;
+ /* Cycles spent receiving frames */
+ u32 rx_frame_count;
+ /* Total channel busy time, evidently */
+ u32 rx_clear_count;
+ /* Total on-channel time */
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+ /* Num Local frames queued */
+ s32 local_enqued;
+ /* Num Local frames done */
+ s32 local_freed;
+ /* Num queued to HW */
+ s32 hw_queued;
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+ /* Num underruns */
+ s32 underrun;
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+ /* Num MPDUs requed by SW */
+ s32 mpdus_requed;
+ /* excessive retries */
+ u32 tx_ko;
+ /* data hw rate code */
+ u32 data_rc;
+ /* Scheduler self triggers */
+ u32 self_triggers;
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+ /* wal pdev resets */
+ u32 pdev_resets;
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+
+ /* PDEV RX stats */
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+ /* Number of PHY errors */
+ s32 phy_errs;
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+};
+
+struct ath11k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath11k_fw_stats_bcn {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+};
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash);
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
+int ath11k_core_init(struct ath11k_base *ath11k);
+void ath11k_core_deinit(struct ath11k_base *ath11k);
+struct ath11k_base *ath11k_core_alloc(struct device *dev);
+void ath11k_core_free(struct ath11k_base *ath11k);
+int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
+ struct ath11k_board_data *bd);
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
+
+void ath11k_core_halt(struct ath11k *ar);
+u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx);
+
+static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
+{
+ switch (state) {
+ case ATH11K_SCAN_IDLE:
+ return "idle";
+ case ATH11K_SCAN_STARTING:
+ return "starting";
+ case ATH11K_SCAN_RUNNING:
+ return "running";
+ case ATH11K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb)
+{
+ return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath11k_skb_rxcb *ATH11K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath11k_skb_rxcb *)skb->cb;
+}
+
+static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath11k_vif *)vif->drv_priv;
+}
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
new file mode 100644
index 000000000000..8d485171b0b3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -0,0 +1,1075 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "debug_htt_stats.h"
+#include "peer.h"
+
+void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+#ifdef CONFIG_ATH11K_DEBUG
+void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath11k_debug_mask & mask)
+ dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
+
+ /* TODO: trace log */
+
+ va_end(args);
+}
+
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath11k_debug_mask & mask) {
+ if (msg)
+ __ath11k_dbg(ab, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf);
+ }
+ }
+}
+
+#endif
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+static void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_debug_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ bool is_end;
+ static unsigned int num_vdev, num_bcn;
+ size_t total_vdevs_started = 0;
+ int i, ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+ ar->debug.fw_stats_done = true;
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ is_end = ((++num_vdev) == total_vdevs_started ? true : false);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->debug.fw_stats.vdevs);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_vdev = 0;
+ }
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats.bcn)) {
+ ath11k_warn(ab, "empty bcn stats");
+ goto complete;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ is_end = ((++num_bcn) == ar->num_started_vdevs ? true : false);
+
+ list_splice_tail_init(&stats.bcn,
+ &ar->debug.fw_stats.bcn);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_bcn = 0;
+ }
+ }
+complete:
+ complete(&ar->debug.fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_pdevs_free(&stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&stats.vdevs);
+ ath11k_fw_stats_bcn_free(&stats.bcn);
+}
+
+static int ath11k_debug_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * HZ);
+
+ ath11k_debug_fw_stats_reset(ar);
+
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath11k_open_pdev_stats,
+ .release = ath11k_release_pdev_stats,
+ .read = ath11k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath11k_open_vdev_stats,
+ .release = ath11k_release_vdev_stats,
+ .read = ath11k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_vif *arvif;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.stats_id = WMI_REQUEST_BCN_STAT;
+ req_param.pdev_id = ar->pdev->pdev_id;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ req_param.vdev_id = arvif->vdev_id;
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ goto err_free;
+ }
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath11k_open_bcn_stats,
+ .release = ath11k_release_bcn_stats,
+ .read = ath11k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = ab->pdevs[0].ar;
+ char buf[32] = {0};
+ ssize_t rc;
+ int i, ret, radioup = 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar && ar->state == ATH11K_STATE_ON) {
+ radioup = 1;
+ break;
+ }
+ }
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ if (radioup == 0) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "assert")) {
+ ath11k_info(ab, "simulating firmware assert crash\n");
+ ret = ath11k_wmi_force_fw_hang_cmd(ar,
+ ATH11K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH11K_WMI_FW_HANG_DELAY);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath11k_read_simulate_fw_crash,
+ .write = ath11k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (filter == ar->debug.extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_tx_stats = {
+ .read = ath11k_read_enable_extd_tx_stats,
+ .write = ath11k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 enable, rx_filter = 0, ring_id;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable > 1) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ }
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath11k_read_extd_rx_stats,
+ .write = ath11k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
+static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_soc_dp_rx_stats *soc_stats = &ab->soc_stats;
+ int len = 0, i, retval;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
+ "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
+ "AMSDU parse", "SA timeout", "DA timeout",
+ "Flow timeout", "Flush req"};
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ "Desc addr zero", "Desc inval", "AMPDU in non BA",
+ "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
+ "Frame OOR", "BAR OOR", "No BA session",
+ "Frame SN equal SSN", "PN check fail", "2k err",
+ "PN err", "Desc blocked"};
+
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ soc_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ soc_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], soc_stats->rxdma_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], soc_stats->reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+ len += scnprintf(buf + len, size - len,
+ "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
+ soc_stats->hal_reo_error[0],
+ soc_stats->hal_reo_error[1],
+ soc_stats->hal_reo_error[2],
+ soc_stats->hal_reo_error[3]);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_soc_rx_stats = {
+ .read = ath11k_debug_dump_soc_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debug_pdev_create(struct ath11k_base *ab)
+{
+ ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
+
+ if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
+ if (IS_ERR(ab->debugfs_soc))
+ return PTR_ERR(ab->debugfs_soc);
+ return -ENOMEM;
+ }
+
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("soc_rx_stats", 0600, ab->debugfs_soc, ab,
+ &fops_soc_rx_stats);
+
+ return 0;
+}
+
+void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_ath11k);
+ ab->debugfs_ath11k = NULL;
+}
+
+int ath11k_debug_soc_create(struct ath11k_base *ab)
+{
+ ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
+
+ if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) {
+ if (IS_ERR(ab->debugfs_ath11k))
+ return PTR_ERR(ab->debugfs_ath11k);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ath11k_debug_soc_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+}
+
+void ath11k_debug_fw_stats_init(struct ath11k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
+
+ init_completion(&ar->debug.fw_stats_complete);
+}
+
+static ssize_t ath11k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 rx_filter = 0, ring_id, filter, mode;
+ u8 buf[128] = {0};
+ int ret;
+ ssize_t rc;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ buf[rc] = '\0';
+
+ ret = sscanf(buf, "0x%x %u", &filter, &mode);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath11k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+#define HTT_RX_FILTER_TLV_LITE_MODE \
+ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
+
+ if (mode == ATH11K_PKTLOG_MODE_FULL) {
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_PKTLOG);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
+ goto out;
+ }
+
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ } else {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ tlv_filter.rx_filter = rx_filter;
+ if (rx_filter) {
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ }
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
+ filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+ ar->debug.pktlog_filter = filter;
+ ar->debug.pktlog_mode = mode;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_pktlog_filter(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
+ ar->debug.pktlog_filter,
+ ar->debug.pktlog_mode);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath11k_read_pktlog_filter,
+ .write = ath11k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+
+ ret = ath11k_wmi_simulate_radar(ar);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath11k_write_simulate_radar,
+ .open = simple_open
+};
+
+int ath11k_debug_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+
+ if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) {
+ if (IS_ERR(ar->debug.debugfs_pdev))
+ return PTR_ERR(ar->debug.debugfs_pdev);
+
+ return -ENOMEM;
+ }
+
+ /* Create a symlink under ieee80211/phy* */
+ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
+ debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
+
+ ath11k_debug_htt_stats_init(ar);
+
+ ath11k_debug_fw_stats_init(ar);
+
+ debugfs_create_file("ext_tx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_tx_stats);
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
+ debugfs_create_file("pktlog_filter", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_pktlog_filter);
+
+ if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_pdev,
+ &ar->dfs_block_radar_events);
+ }
+
+ return 0;
+}
+
+void ath11k_debug_unregister(struct ath11k *ar)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
new file mode 100644
index 000000000000..8e8d5588b541
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUG_H_
+#define _ATH11K_DEBUG_H_
+
+#include "hal_tx.h"
+#include "trace.h"
+
+#define ATH11K_TX_POWER_MAX_VAL 70
+#define ATH11K_TX_POWER_MIN_VAL 0
+
+enum ath11k_debug_mask {
+ ATH11K_DBG_AHB = 0x00000001,
+ ATH11K_DBG_WMI = 0x00000002,
+ ATH11K_DBG_HTC = 0x00000004,
+ ATH11K_DBG_DP_HTT = 0x00000008,
+ ATH11K_DBG_MAC = 0x00000010,
+ ATH11K_DBG_BOOT = 0x00000020,
+ ATH11K_DBG_QMI = 0x00000040,
+ ATH11K_DBG_DATA = 0x00000080,
+ ATH11K_DBG_MGMT = 0x00000100,
+ ATH11K_DBG_REG = 0x00000200,
+ ATH11K_DBG_TESTMODE = 0x00000400,
+ ATH11k_DBG_HAL = 0x00000800,
+ ATH11K_DBG_ANY = 0xffffffff,
+};
+
+/* htt_dbg_ext_stats_type */
+enum ath11k_dbg_htt_ext_stats_type {
+ ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
+ ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
+ ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
+ ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
+ ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+
+ /* keep this last */
+ ATH11K_DBG_HTT_NUM_EXT_STATS,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ u8 pdev_id;
+ u8 type;
+ u8 peer_addr[ETH_ALEN];
+ struct completion cmpln;
+ u32 buf_len;
+ u8 buf[0];
+};
+
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+
+#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+#define ATH11K_HTT_PKTLOG_MAX_SIZE 2048
+
+enum ath11k_pktlog_filter {
+ ATH11K_PKTLOG_RX = 0x000000001,
+ ATH11K_PKTLOG_TX = 0x000000002,
+ ATH11K_PKTLOG_RCFIND = 0x000000004,
+ ATH11K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
+ ATH11K_PKTLOG_EVENT_SW = 0x000000040,
+ ATH11K_PKTLOG_ANY = 0x00000006f,
+};
+
+enum ath11k_pktlog_mode {
+ ATH11K_PKTLOG_MODE_LITE = 1,
+ ATH11K_PKTLOG_MODE_FULL = 2,
+};
+
+enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH11K_PKTLOG_TYPE_TX_STAT = 2,
+ ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
+ ATH11K_PKTLOG_TYPE_RX_STAT = 5,
+ ATH11K_PKTLOG_TYPE_RC_FIND = 6,
+ ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
+ ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
+ ATH11K_PKTLOG_TYPE_RX_CBF = 10,
+ ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
+ ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
+ ATH11K_PKTLOG_TYPE_LITE_RX = 24,
+};
+
+__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
+
+extern unsigned int ath11k_debug_mask;
+
+#ifdef CONFIG_ATH11K_DEBUG
+__printf(3, 4) void __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *fmt, ...);
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH11K_DEBUG */
+static inline int __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUG */
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+int ath11k_debug_soc_create(struct ath11k_base *ab);
+void ath11k_debug_soc_destroy(struct ath11k_base *ab);
+int ath11k_debug_pdev_create(struct ath11k_base *ab);
+void ath11k_debug_pdev_destroy(struct ath11k_base *ab);
+int ath11k_debug_register(struct ath11k *ar);
+void ath11k_debug_unregister(struct ath11k *ar);
+void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
+
+void ath11k_debug_fw_stats_init(struct ath11k *ar);
+int ath11k_dbg_htt_stats_req(struct ath11k *ar);
+
+static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
+}
+
+static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
+}
+
+static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
+ ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
+}
+
+static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_tx_stats;
+}
+
+static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx);
+void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts);
+#else
+static inline int ath11k_debug_soc_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_soc_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debug_pdev_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debug_register(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_unregister(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath11k_debug_fw_stats_process(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath11k_debug_fw_stats_init(struct ath11k *ar)
+{
+}
+
+static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_dbg_htt_stats_req(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return false;
+}
+
+static inline void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+}
+
+static inline void
+ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+}
+
+#endif /* CONFIG_MAC80211_DEBUGFS*/
+
+#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ if (ath11k_debug_mask & dbg_mask) \
+ __ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _ATH11K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
new file mode 100644
index 000000000000..9939e909628f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
@@ -0,0 +1,4570 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "debug_htt_stats.h"
+
+#define HTT_DBG_OUT(buf, len, fmt, ...) \
+ scnprintf(buf, len, fmt "\n", ##__VA_ARGS__)
+
+#define HTT_MAX_STRING_LEN 256
+#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
+
+#define HTT_TLV_HDR_LEN 4
+
+#define ARRAY_TO_STRING(out, arr, len) \
+ do { \
+ int index = 0; u8 i; \
+ for (i = 0; i < len; i++) { \
+ index += snprintf(out + index, HTT_MAX_STRING_LEN - index, \
+ " %u:%u,", i, arr[i]); \
+ if (index < 0 || index >= HTT_MAX_STRING_LEN) \
+ break; \
+ } \
+ } while (0)
+
+static inline void htt_print_stats_string_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_string_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+ u16 index = 0;
+ char data[HTT_MAX_STRING_LEN] = {0};
+
+ tag_len = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:");
+
+ for (i = 0; i < tag_len; i++) {
+ index += snprintf(&data[index],
+ HTT_MAX_STRING_LEN - index,
+ "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
+ if (index >= HTT_MAX_STRING_LEN)
+ break;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "data = %s\n", data);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
+ htt_stats_buf->hw_queued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
+ htt_stats_buf->hw_reaped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun = %u",
+ htt_stats_buf->underrun);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_paused = %u",
+ htt_stats_buf->hw_paused);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_flush = %u",
+ htt_stats_buf->hw_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_filt = %u",
+ htt_stats_buf->hw_filt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
+ htt_stats_buf->tx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u",
+ htt_stats_buf->mpdu_requed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u",
+ htt_stats_buf->tx_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u",
+ htt_stats_buf->data_rc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_dropped_xretry = %u",
+ htt_stats_buf->mpdu_dropped_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "illegal_rate_phy_err = %u",
+ htt_stats_buf->illgl_rate_phy_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cont_xretry = %u",
+ htt_stats_buf->cont_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_timeout = %u",
+ htt_stats_buf->tx_timeout);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_resets = %u",
+ htt_stats_buf->pdev_resets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_underrun = %u",
+ htt_stats_buf->phy_underrun);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_ovf = %u",
+ htt_stats_buf->txop_ovf);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted = %u",
+ htt_stats_buf->seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_failed_queueing = %u",
+ htt_stats_buf->seq_failed_queueing);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_completed = %u",
+ htt_stats_buf->seq_completed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_restarted = %u",
+ htt_stats_buf->seq_restarted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_seq_posted = %u",
+ htt_stats_buf->mu_seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_switch_hw_paused = %u",
+ htt_stats_buf->seq_switch_hw_paused);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "next_seq_posted_dsr = %u",
+ htt_stats_buf->next_seq_posted_dsr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted_isr = %u",
+ htt_stats_buf->seq_posted_isr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_ctrl_cached = %u",
+ htt_stats_buf->seq_ctrl_cached);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count_tqm = %u",
+ htt_stats_buf->mpdu_count_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count_tqm = %u",
+ htt_stats_buf->msdu_count_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_removed_tqm = %u",
+ htt_stats_buf->mpdu_removed_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_removed_tqm = %u",
+ htt_stats_buf->msdu_removed_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_sw_flush = %u",
+ htt_stats_buf->mpdus_sw_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
+ htt_stats_buf->mpdus_hw_filter);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_truncated = %u",
+ htt_stats_buf->mpdus_truncated);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_ack_failed = %u",
+ htt_stats_buf->mpdus_ack_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_expired = %u",
+ htt_stats_buf->mpdus_expired);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u",
+ htt_stats_buf->mpdus_seq_hw_retry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u",
+ htt_stats_buf->coex_abort_mpdu_cnt_valid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u",
+ htt_stats_buf->coex_abort_mpdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u",
+ htt_stats_buf->num_total_ppdus_tried_ota);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u",
+ htt_stats_buf->num_data_ppdus_tried_ota);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u",
+ htt_stats_buf->local_ctrl_mgmt_enqued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u",
+ htt_stats_buf->local_ctrl_mgmt_freed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_enqued = %u",
+ htt_stats_buf->local_data_enqued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_freed = %u",
+ htt_stats_buf->local_data_freed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried = %u",
+ htt_stats_buf->mpdu_tried);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "isr_wait_seq_posted = %u",
+ htt_stats_buf->isr_wait_seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_low = %u",
+ htt_stats_buf->tx_active_dur_us_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
+ htt_stats_buf->tx_active_dur_us_high);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char urrn_stats[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:");
+
+ ARRAY_TO_STRING(urrn_stats, htt_stats_buf->urrn_stats, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "urrn_stats = %s\n", urrn_stats);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char flush_errs[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:");
+
+ ARRAY_TO_STRING(flush_errs, htt_stats_buf->flush_errs, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_errs = %s\n", flush_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sifs_status[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:");
+
+ ARRAY_TO_STRING(sifs_status, htt_stats_buf->sifs_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_status = %s\n",
+ sifs_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char phy_errs[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(phy_errs, htt_stats_buf->phy_errs, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sifs_hist_status[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:");
+
+ ARRAY_TO_STRING(sifs_hist_status, htt_stats_buf->sifs_hist_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_hist_status = %s\n",
+ sifs_hist_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:");
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u",
+ htt_stats_buf->num_data_ppdus_legacy_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u",
+ htt_stats_buf->num_data_ppdus_ac_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u",
+ htt_stats_buf->num_data_ppdus_ax_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u",
+ htt_stats_buf->num_data_ppdus_ac_su_txbf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n",
+ htt_stats_buf->num_data_ppdus_ax_su_txbf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
+ htt_stats_buf->hist_bin_size);
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(tried_mpdu_cnt_hist,
+ htt_stats_buf->tried_mpdu_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tried_mpdu_cnt_hist = %s\n",
+ tried_mpdu_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER\n");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:");
+ memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
+ HTT_STATS_MAX_HW_INTR_NAME_LEN);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_intr_name = %s ", hw_intr_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mask = %u",
+ htt_stats_buf->mask);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:");
+ memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
+ HTT_STATS_MAX_HW_MODULE_NAME_LEN);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_module_name = %s ",
+ hw_module_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
+ htt_stats_buf->tx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort_fail_count = %u",
+ htt_stats_buf->tx_abort_fail_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort = %u",
+ htt_stats_buf->rx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort_fail_count = %u",
+ htt_stats_buf->rx_abort_fail_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "warm_reset = %u",
+ htt_stats_buf->warm_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cold_reset = %u",
+ htt_stats_buf->cold_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flush = %u",
+ htt_stats_buf->tx_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_glb_reset = %u",
+ htt_stats_buf->tx_glb_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_txq_reset = %u",
+ htt_stats_buf->tx_txq_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_timeout_reset = %u\n",
+ htt_stats_buf->rx_timeout_reset);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_update_timestamp = %u",
+ htt_stats_buf->last_update_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_add_timestamp = %u",
+ htt_stats_buf->last_add_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_remove_timestamp = %u",
+ htt_stats_buf->last_remove_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_processed_msdu_count = %u",
+ htt_stats_buf->total_processed_msdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u",
+ htt_stats_buf->cur_msdu_count_in_flowq);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flow_no = %u",
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xF0000) >>
+ 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_rule = %u",
+ (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0x100000) >>
+ 20);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_enqueue_count = %u",
+ htt_stats_buf->last_cycle_enqueue_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_dequeue_count = %u",
+ htt_stats_buf->last_cycle_dequeue_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_drop_count = %u",
+ htt_stats_buf->last_cycle_drop_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_drop_th = %u\n",
+ htt_stats_buf->current_drop_th);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
+ (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
+ 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
+ htt_stats_buf->tid_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
+ htt_stats_buf->hw_queued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
+ htt_stats_buf->hw_reaped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
+ htt_stats_buf->mpdus_hw_filter);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
+ htt_stats_buf->qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
+ htt_stats_buf->qdepth_num_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
+ htt_stats_buf->pause_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u\n",
+ htt_stats_buf->block_module_id);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
+ (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
+ 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
+ htt_stats_buf->tid_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_bytes = %u",
+ htt_stats_buf->max_qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_n_msdus = %u",
+ htt_stats_buf->max_qdepth_n_msdus);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rsvd = %u",
+ htt_stats_buf->rsvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
+ htt_stats_buf->qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
+ htt_stats_buf->qdepth_num_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
+ htt_stats_buf->pause_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u",
+ htt_stats_buf->block_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "allow_n_flags = 0x%x",
+ htt_stats_buf->allow_n_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sendn_frms_allowed = %u\n",
+ htt_stats_buf->sendn_frms_allowed);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_in_reorder = %u",
+ htt_stats_buf->dup_in_reorder);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_outside_window = %u",
+ htt_stats_buf->dup_past_outside_window);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_within_window = %u",
+ htt_stats_buf->dup_past_within_window);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n",
+ htt_stats_buf->rxdesc_err_decrypt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_counter_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_counter_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char counter_name[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_COUNTER_TLV:");
+
+ ARRAY_TO_STRING(counter_name,
+ htt_stats_buf->counter_name,
+ HTT_MAX_COUNTER_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "counter_name = %s ", counter_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_cnt = %u",
+ htt_stats_buf->ppdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt = %u",
+ htt_stats_buf->mpdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_cnt = %u",
+ htt_stats_buf->msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_bitmap = %u",
+ htt_stats_buf->pause_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_bitmap = %u",
+ htt_stats_buf->block_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_rssi = %d",
+ htt_stats_buf->rssi);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_count = %llu",
+ htt_stats_buf->peer_enqueued_count_low |
+ ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dequeued_count = %llu",
+ htt_stats_buf->peer_dequeued_count_low |
+ ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dropped_count = %llu",
+ htt_stats_buf->peer_dropped_count_low |
+ ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu",
+ htt_stats_buf->ppdu_transmitted_bytes_low |
+ ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ttl_removed_count = %u",
+ htt_stats_buf->peer_ttl_removed_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "inactive_time = %u\n",
+ htt_stats_buf->inactive_time);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_details_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_details_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_type = %u",
+ htt_stats_buf->peer_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
+ htt_stats_buf->vdev_pdev_ast_idx & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
+ (htt_stats_buf->vdev_pdev_ast_idx & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ast_idx = %u",
+ (htt_stats_buf->vdev_pdev_ast_idx & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x",
+ htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF00) >> 8,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF0000) >> 16,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF000000) >> 24,
+ (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF),
+ (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_flags = 0x%x",
+ htt_stats_buf->peer_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_flags = 0x%x\n",
+ htt_stats_buf->qpeer_flags);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+ u8 j;
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!tx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
+ htt_stats_buf->tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
+ htt_stats_buf->ack_rssi);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_su_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_su_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mu_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mu_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j],
+ htt_stats_buf->tx_gi[j],
+ HTT_TX_PEER_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_dcm,
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(tx_gi[j]);
+}
+
+static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+ char *rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+ char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rssi_chain[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
+ htt_stats_buf->nsts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
+ htt_stats_buf->rx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
+ htt_stats_buf->rssi_mgmt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
+ htt_stats_buf->rssi_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
+ htt_stats_buf->rssi_comb);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+ ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
+ HTT_RX_PEER_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
+ j, rssi_chain[j]);
+ }
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rssi_chain[j]);
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(rx_gi[j]);
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u",
+ htt_stats_buf->mu_mimo_err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u",
+ htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n",
+ htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__hwq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u\n",
+ (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ /* TODO: HKDBG */
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__hwq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u",
+ (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "xretry = %u",
+ htt_stats_buf->xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_cnt = %u",
+ htt_stats_buf->underrun_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cnt = %u",
+ htt_stats_buf->flush_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "filt_cnt = %u",
+ htt_stats_buf->filt_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_bmap = %u",
+ htt_stats_buf->null_mpdu_bmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "user_ack_failure = %u",
+ htt_stats_buf->user_ack_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_id_proc = %u",
+ htt_stats_buf->sched_id_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_tx_count = %u",
+ htt_stats_buf->null_mpdu_tx_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u",
+ htt_stats_buf->mpdu_bmap_not_recvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_bar = %u",
+ htt_stats_buf->num_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
+ htt_stats_buf->rts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
+ htt_stats_buf->cts2self);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
+ htt_stats_buf->qos_null);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried_cnt = %u",
+ htt_stats_buf->mpdu_tried_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queued_cnt = %u",
+ htt_stats_buf->mpdu_queued_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u",
+ htt_stats_buf->mpdu_ack_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_filt_cnt = %u",
+ htt_stats_buf->mpdu_filt_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "false_mpdu_ack_count = %u",
+ htt_stats_buf->false_mpdu_ack_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_timeout = %u\n",
+ htt_stats_buf->txq_timeout);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
+ char difs_latency_hist[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hist_intvl = %u",
+ htt_stats_buf->hist_intvl);
+
+ ARRAY_TO_STRING(difs_latency_hist, htt_stats_buf->difs_latency_hist,
+ data_len);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "difs_latency_hist = %s\n",
+ difs_latency_hist);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len;
+ char cmd_result[HTT_MAX_STRING_LEN] = {0};
+
+ data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(cmd_result, htt_stats_buf->cmd_result, data_len);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_result = %s\n", cmd_result);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+ char cmd_stall_status[HTT_MAX_STRING_LEN] = {0};
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(cmd_stall_status, htt_stats_buf->cmd_stall_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_stall_status = %s\n",
+ cmd_stall_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+ char fes_result[HTT_MAX_STRING_LEN] = {0};
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(fes_result, htt_stats_buf->fes_result, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fes_result = %s\n", fes_result);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = ((tag_len -
+ sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
+ htt_stats_buf->hist_bin_size);
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(tried_mpdu_cnt_hist,
+ htt_stats_buf->tried_mpdu_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "tried_mpdu_cnt_hist = %s\n",
+ tried_mpdu_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char txop_used_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = tag_len >> 2;
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:");
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(txop_used_cnt_hist,
+ htt_stats_buf->txop_used_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_used_cnt_hist = %s\n",
+ txop_used_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ s32 i;
+ const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ const u32 *cbf_20 = htt_stats_buf->cbf_20;
+ const u32 *cbf_40 = htt_stats_buf->cbf_40;
+ const u32 *cbf_80 = htt_stats_buf->cbf_80;
+ const u32 *cbf_160 = htt_stats_buf->cbf_160;
+
+ if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ } else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "su_bar = %u",
+ htt_stats_buf->su_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
+ htt_stats_buf->rts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
+ htt_stats_buf->cts2self);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
+ htt_stats_buf->qos_null);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_1 = %u",
+ htt_stats_buf->delayed_bar_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_2 = %u",
+ htt_stats_buf->delayed_bar_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_3 = %u",
+ htt_stats_buf->delayed_bar_3);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_4 = %u",
+ htt_stats_buf->delayed_bar_4);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_5 = %u",
+ htt_stats_buf->delayed_bar_5);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_6 = %u",
+ htt_stats_buf->delayed_bar_6);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_7 = %u\n",
+ htt_stats_buf->delayed_bar_7);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa = %u",
+ htt_stats_buf->ac_su_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp = %u",
+ htt_stats_buf->ac_su_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u",
+ htt_stats_buf->ac_mu_mimo_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u",
+ htt_stats_buf->ac_mu_mimo_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u",
+ htt_stats_buf->ac_mu_mimo_brpoll_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u",
+ htt_stats_buf->ac_mu_mimo_brpoll_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_3);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa = %u",
+ htt_stats_buf->ax_su_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp = %u",
+ htt_stats_buf->ax_su_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u",
+ htt_stats_buf->ax_mu_mimo_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u",
+ htt_stats_buf->ax_mu_mimo_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_3);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_4);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_5);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_6);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_7);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger = %u",
+ htt_stats_buf->ax_basic_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger = %u",
+ htt_stats_buf->ax_bsr_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger = %u",
+ htt_stats_buf->ax_mu_bar_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n",
+ htt_stats_buf->ax_mu_rts_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp_err = %u",
+ htt_stats_buf->ac_su_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa_err = %u",
+ htt_stats_buf->ac_su_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u",
+ htt_stats_buf->ac_mu_mimo_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u",
+ htt_stats_buf->ac_mu_mimo_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u",
+ htt_stats_buf->ac_mu_mimo_brp1_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u",
+ htt_stats_buf->ac_mu_mimo_brp2_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp3_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp_err = %u",
+ htt_stats_buf->ax_su_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa_err = %u",
+ htt_stats_buf->ax_su_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u",
+ htt_stats_buf->ax_mu_mimo_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u",
+ htt_stats_buf->ax_mu_mimo_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp1_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp2_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp3_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp4_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp5_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp6_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp7_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger_err = %u",
+ htt_stats_buf->ax_basic_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger_err = %u",
+ htt_stats_buf->ax_bsr_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u",
+ htt_stats_buf->ax_mu_bar_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n",
+ htt_stats_buf->ax_mu_rts_trigger_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_nusers_%u = %u",
+ i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_nusers_%u = %u",
+ i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u",
+ i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_cmd_posted[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:");
+
+ ARRAY_TO_STRING(sched_cmd_posted, htt_stats_buf->sched_cmd_posted,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_posted = %s\n",
+ sched_cmd_posted);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_cmd_reaped[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:");
+
+ ARRAY_TO_STRING(sched_cmd_reaped, htt_stats_buf->sched_cmd_reaped,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_reaped = %s\n",
+ sched_cmd_reaped);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_order_su[HTT_MAX_STRING_LEN] = {0};
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_order_su_num_entries =
+ min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:");
+
+ ARRAY_TO_STRING(sched_order_su, htt_stats_buf->sched_order_su,
+ sched_order_su_num_entries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_order_su = %s\n",
+ sched_order_su);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_ineligibility[HTT_MAX_STRING_LEN] = {0};
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:");
+
+ ARRAY_TO_STRING(sched_ineligibility, htt_stats_buf->sched_ineligibility,
+ sched_ineligibility_num_entries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_ineligibility = %s\n",
+ sched_ineligibility);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__txq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_id = %u",
+ (htt_stats_buf->mac_id__txq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_policy = %u",
+ htt_stats_buf->sched_policy);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u",
+ htt_stats_buf->last_sched_cmd_posted_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u",
+ htt_stats_buf->last_sched_cmd_compl_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u",
+ htt_stats_buf->sched_2_tac_lwm_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_ring_full = %u",
+ htt_stats_buf->sched_2_tac_ring_full);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_post_failure = %u",
+ htt_stats_buf->sched_cmd_post_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_active_tids = %u",
+ htt_stats_buf->num_active_tids);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ps_schedules = %u",
+ htt_stats_buf->num_ps_schedules);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmds_pending = %u",
+ htt_stats_buf->sched_cmds_pending);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_register = %u",
+ htt_stats_buf->num_tid_register);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_unregister = %u",
+ htt_stats_buf->num_tid_unregister);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_qstats_queried = %u",
+ htt_stats_buf->num_qstats_queried);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qstats_update_pending = %u",
+ htt_stats_buf->qstats_update_pending);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_qstats_query_timestamp = %u",
+ htt_stats_buf->last_qstats_query_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_cmdq_full = %u",
+ htt_stats_buf->num_tqm_cmdq_full);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u",
+ htt_stats_buf->num_de_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u",
+ htt_stats_buf->num_rt_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u",
+ htt_stats_buf->num_tqm_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_sched = %u\n",
+ htt_stats_buf->notify_sched);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
+ htt_stats_buf->dur_based_sendn_term);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_timestamp = %u\n",
+ htt_stats_buf->current_timestamp);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char gen_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(gen_mpdu_end_reason, htt_stats_buf->gen_mpdu_end_reason,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_end_reason = %s\n",
+ gen_mpdu_end_reason);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char list_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(list_mpdu_end_reason, htt_stats_buf->list_mpdu_end_reason,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_end_reason = %s\n",
+ list_mpdu_end_reason);
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char list_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:");
+
+ ARRAY_TO_STRING(list_mpdu_cnt_hist, htt_stats_buf->list_mpdu_cnt_hist,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist = %s\n",
+ list_mpdu_cnt_hist);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count = %u",
+ htt_stats_buf->msdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count = %u",
+ htt_stats_buf->mpdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu = %u",
+ htt_stats_buf->remove_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu = %u",
+ htt_stats_buf->remove_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl = %u",
+ htt_stats_buf->remove_msdu_ttl);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_bar = %u",
+ htt_stats_buf->send_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "bar_sync = %u",
+ htt_stats_buf->bar_sync);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu = %u",
+ htt_stats_buf->notify_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
+ htt_stats_buf->sync_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
+ htt_stats_buf->write_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_trigger = %u",
+ htt_stats_buf->hwsch_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_list_cmd = %u",
+ htt_stats_buf->gen_list_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u",
+ htt_stats_buf->remove_mpdu_tried_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
+ htt_stats_buf->remove_msdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u",
+ htt_stats_buf->remove_msdu_ttl_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
+ htt_stats_buf->flush_cache_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
+ htt_stats_buf->update_mpduq_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue = %u",
+ htt_stats_buf->enqueue);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue_notify = %u",
+ htt_stats_buf->enqueue_notify);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_at_head = %u",
+ htt_stats_buf->notify_mpdu_at_head);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_state_valid = %u",
+ htt_stats_buf->notify_mpdu_state_valid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify1 = %u",
+ htt_stats_buf->sched_udp_notify1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify2 = %u",
+ htt_stats_buf->sched_udp_notify2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify1 = %u",
+ htt_stats_buf->sched_nonudp_notify1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n",
+ htt_stats_buf->sched_nonudp_notify2);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_cmdq_id = %u",
+ htt_stats_buf->max_cmdq_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u",
+ htt_stats_buf->list_mpdu_cnt_hist_intvl);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu = %u",
+ htt_stats_buf->add_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty = %u",
+ htt_stats_buf->q_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty = %u",
+ htt_stats_buf->q_not_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_notification = %u",
+ htt_stats_buf->drop_notification);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "desc_threshold = %u\n",
+ htt_stats_buf->desc_threshold);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty_failure = %u",
+ htt_stats_buf->q_empty_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty_failure = %u",
+ htt_stats_buf->q_not_empty_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu_failure = %u\n",
+ htt_stats_buf->add_msdu_failure);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__cmdq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmdq_id = %u\n",
+ (htt_stats_buf->mac_id__cmdq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
+ htt_stats_buf->sync_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
+ htt_stats_buf->write_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
+ htt_stats_buf->remove_msdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
+ htt_stats_buf->flush_cache_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
+ htt_stats_buf->update_mpduq_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_msduq_cmd = %u\n",
+ htt_stats_buf->update_msduq_cmd);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m1_packets = %u",
+ htt_stats_buf->m1_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m2_packets = %u",
+ htt_stats_buf->m2_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m3_packets = %u",
+ htt_stats_buf->m3_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m4_packets = %u",
+ htt_stats_buf->m4_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "g1_packets = %u",
+ htt_stats_buf->g1_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "g2_packets = %u\n",
+ htt_stats_buf->g2_packets);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bss_peer_not_found = %u",
+ htt_stats_buf->ap_bss_peer_not_found);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u",
+ htt_stats_buf->ap_bcast_mcast_no_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sta_delete_in_progress = %u",
+ htt_stats_buf->sta_delete_in_progress);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ibss_no_bss_peer = %u",
+ htt_stats_buf->ibss_no_bss_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_vdev_type = %u",
+ htt_stats_buf->invalid_vdev_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_ast_peer_entry = %u",
+ htt_stats_buf->invalid_ast_peer_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_entry_invalid = %u",
+ htt_stats_buf->peer_entry_invalid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ethertype_not_ip = %u",
+ htt_stats_buf->ethertype_not_ip);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "eapol_lookup_failed = %u",
+ htt_stats_buf->eapol_lookup_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_not_allow_data = %u",
+ htt_stats_buf->qpeer_not_allow_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_tid_override = %u",
+ htt_stats_buf->fse_tid_override);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u",
+ htt_stats_buf->ipv6_jumbogram_zero_length);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
+ htt_stats_buf->qos_to_non_qos_in_prog);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "arp_packets = %u",
+ htt_stats_buf->arp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "igmp_packets = %u",
+ htt_stats_buf->igmp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dhcp_packets = %u",
+ htt_stats_buf->dhcp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_inspected = %u",
+ htt_stats_buf->host_inspected);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_included = %u",
+ htt_stats_buf->htt_included);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_mcs = %u",
+ htt_stats_buf->htt_valid_mcs);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_nss = %u",
+ htt_stats_buf->htt_valid_nss);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_preamble_type = %u",
+ htt_stats_buf->htt_valid_preamble_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_chainmask = %u",
+ htt_stats_buf->htt_valid_chainmask);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_guard_interval = %u",
+ htt_stats_buf->htt_valid_guard_interval);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_retries = %u",
+ htt_stats_buf->htt_valid_retries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_bw_info = %u",
+ htt_stats_buf->htt_valid_bw_info);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_power = %u",
+ htt_stats_buf->htt_valid_power);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x",
+ htt_stats_buf->htt_valid_key_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_no_encryption = %u",
+ htt_stats_buf->htt_valid_no_encryption);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_entry_count = %u",
+ htt_stats_buf->fse_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_be = %u",
+ htt_stats_buf->fse_priority_be);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_high = %u",
+ htt_stats_buf->fse_priority_high);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_low = %u",
+ htt_stats_buf->fse_priority_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u",
+ htt_stats_buf->fse_traffic_ptrn_be);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u",
+ htt_stats_buf->fse_traffic_ptrn_over_sub);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u",
+ htt_stats_buf->fse_traffic_ptrn_bursty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u",
+ htt_stats_buf->fse_traffic_ptrn_interactive);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u",
+ htt_stats_buf->fse_traffic_ptrn_periodic);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_alloc = %u",
+ htt_stats_buf->fse_hwqueue_alloc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_created = %u",
+ htt_stats_buf->fse_hwqueue_created);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u",
+ htt_stats_buf->fse_hwqueue_send_to_host);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mcast_entry = %u",
+ htt_stats_buf->mcast_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "bcast_entry = %u",
+ htt_stats_buf->bcast_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_update_peer_cache = %u",
+ htt_stats_buf->htt_update_peer_cache);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_learning_frame = %u",
+ htt_stats_buf->htt_learning_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_invalid_peer = %u",
+ htt_stats_buf->fse_invalid_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mec_notify = %u\n",
+ htt_stats_buf->mec_notify);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "eok = %u",
+ htt_stats_buf->eok);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "classify_done = %u",
+ htt_stats_buf->classify_done);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "lookup_failed = %u",
+ htt_stats_buf->lookup_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_dhcp = %u",
+ htt_stats_buf->send_host_dhcp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_mcast = %u",
+ htt_stats_buf->send_host_mcast);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_unknown_dest = %u",
+ htt_stats_buf->send_host_unknown_dest);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host = %u",
+ htt_stats_buf->send_host);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "status_invalid = %u\n",
+ htt_stats_buf->status_invalid);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_pkts = %u",
+ htt_stats_buf->enqueued_pkts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm = %u",
+ htt_stats_buf->to_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm_bypass = %u\n",
+ htt_stats_buf->to_tqm_bypass);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "discarded_pkts = %u",
+ htt_stats_buf->discarded_pkts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_frames = %u",
+ htt_stats_buf->local_frames);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "is_ext_msdu = %u\n",
+ htt_stats_buf->is_ext_msdu);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_dummy_frame = %u",
+ htt_stats_buf->tcl_dummy_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_dummy_frame = %u",
+ htt_stats_buf->tqm_dummy_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_notify_frame = %u",
+ htt_stats_buf->tqm_notify_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw2wbm_enq = %u",
+ htt_stats_buf->fw2wbm_enq);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_bypass_frame = %u\n",
+ htt_stats_buf->tqm_bypass_frame);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw2wbm_ring_full_hist[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = tag_len >> 2;
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(fw2wbm_ring_full_hist,
+ htt_stats_buf->fw2wbm_ring_full_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "fw2wbm_ring_full_hist = %s\n",
+ fw2wbm_ring_full_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl2fw_entry_count = %u",
+ htt_stats_buf->tcl2fw_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "not_to_fw = %u",
+ htt_stats_buf->not_to_fw);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u",
+ htt_stats_buf->invalid_pdev_vdev_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u",
+ htt_stats_buf->tcl_res_invalid_addrx);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm2fw_entry_count = %u",
+ htt_stats_buf->wbm2fw_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev = %u\n",
+ htt_stats_buf->invalid_pdev);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char low_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
+ char high_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr = %u",
+ htt_stats_buf->base_addr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
+ htt_stats_buf->elem_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_elems = %u",
+ htt_stats_buf->num_elems__prefetch_tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_tail_idx = %u",
+ (htt_stats_buf->num_elems__prefetch_tail_idx &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "head_idx = %u",
+ htt_stats_buf->head_idx__tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_idx = %u",
+ (htt_stats_buf->head_idx__tail_idx & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_head_idx = %u",
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_tail_idx = %u",
+ (htt_stats_buf->shadow_head_idx__shadow_tail_idx &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tail_incr = %u",
+ htt_stats_buf->num_tail_incr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "lwm_thresh = %u",
+ htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwm_thresh = %u",
+ (htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "overrun_hit_count = %u",
+ htt_stats_buf->overrun_hit_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_hit_count = %u",
+ htt_stats_buf->underrun_hit_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prod_blockwait_count = %u",
+ htt_stats_buf->prod_blockwait_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cons_blockwait_count = %u",
+ htt_stats_buf->cons_blockwait_count);
+
+ ARRAY_TO_STRING(low_wm_hit_count, htt_stats_buf->low_wm_hit_count,
+ HTT_STATS_LOW_WM_BINS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "low_wm_hit_count = %s ",
+ low_wm_hit_count);
+
+ ARRAY_TO_STRING(high_wm_hit_count, htt_stats_buf->high_wm_hit_count,
+ HTT_STATS_HIGH_WM_BINS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "high_wm_hit_count = %s\n",
+ high_wm_hit_count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char dwords_used_by_user_n[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:");
+
+ ARRAY_TO_STRING(dwords_used_by_user_n,
+ htt_stats_buf->dwords_used_by_user_n,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dwords_used_by_user_n = %s\n",
+ dwords_used_by_user_n);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "client_id = %u",
+ htt_stats_buf->client_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_min = %u",
+ htt_stats_buf->buf_min);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_max = %u",
+ htt_stats_buf->buf_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_busy = %u",
+ htt_stats_buf->buf_busy);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_alloc = %u",
+ htt_stats_buf->buf_alloc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_avail = %u",
+ htt_stats_buf->buf_avail);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_users = %u\n",
+ htt_stats_buf->num_users);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_total = %u",
+ htt_stats_buf->buf_total);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mem_empty = %u",
+ htt_stats_buf->mem_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "deallocate_bufs = %u",
+ htt_stats_buf->deallocate_bufs);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_id = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "arena = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ep = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0x1000000) >> 24);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_lsb = 0x%x",
+ htt_stats_buf->base_addr_lsb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_msb = 0x%x",
+ htt_stats_buf->base_addr_msb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_size = %u",
+ htt_stats_buf->ring_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
+ htt_stats_buf->elem_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_avail_words = %u",
+ htt_stats_buf->num_avail_words__num_valid_words & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_valid_words = %u",
+ (htt_stats_buf->num_avail_words__num_valid_words &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "head_ptr = %u",
+ htt_stats_buf->head_ptr__tail_ptr & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_ptr = %u",
+ (htt_stats_buf->head_ptr__tail_ptr & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "consumer_empty = %u",
+ htt_stats_buf->consumer_empty__producer_full & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "producer_full = %u",
+ (htt_stats_buf->consumer_empty__producer_full &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_count = %u",
+ htt_stats_buf->prefetch_count__internal_tail_ptr & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "internal_tail_ptr = %u\n",
+ (htt_stats_buf->prefetch_count__internal_tail_ptr &
+ 0xFFFF0000) >> 16);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!tx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
+ htt_stats_buf->tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u",
+ htt_stats_buf->ac_mu_mimo_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u",
+ htt_stats_buf->ax_mu_mimo_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_ldpc = %u",
+ htt_stats_buf->ofdma_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_success = %u",
+ htt_stats_buf->rts_success);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
+ htt_stats_buf->ack_rssi);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u",
+ htt_stats_buf->tx_legacy_cck_rate[0],
+ htt_stats_buf->tx_legacy_cck_rate[1],
+ htt_stats_buf->tx_legacy_cck_rate[2],
+ htt_stats_buf->tx_legacy_cck_rate[3]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u",
+ htt_stats_buf->tx_legacy_ofdm_rate[0],
+ htt_stats_buf->tx_legacy_ofdm_rate[1],
+ htt_stats_buf->tx_legacy_ofdm_rate[2],
+ htt_stats_buf->tx_legacy_ofdm_rate[3],
+ htt_stats_buf->tx_legacy_ofdm_rate[4],
+ htt_stats_buf->tx_legacy_ofdm_rate[5],
+ htt_stats_buf->tx_legacy_ofdm_rate[6],
+ htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u",
+ htt_stats_buf->tx_he_ltf[1],
+ htt_stats_buf->tx_he_ltf[2],
+ htt_stats_buf->tx_he_ltf[3]);
+
+ /* SU GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* AC MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* AX MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ax_mu_mimo_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* DL OFDMA GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ofdma_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_dcm,
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+fail:
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(tx_gi[j]);
+}
+
+static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+ u16 index = 0;
+ char *rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+ char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] = {NULL};
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rssi_chain[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_gi[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rx_pilot_evm_db[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_pilot_evm_db[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
+ htt_stats_buf->nsts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
+ htt_stats_buf->rx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
+ htt_stats_buf->rssi_mgmt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
+ htt_stats_buf->rssi_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
+ htt_stats_buf->rssi_comb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_in_dbm = %d",
+ htt_stats_buf->rssi_in_dbm);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_nss_count = %u",
+ htt_stats_buf->nss_count);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_pilot_count = %u",
+ htt_stats_buf->pilot_count);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+
+ for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
+ index += snprintf(&rx_pilot_evm_db[j][index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_pilot_evm_db[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB[%u] = %s ",
+ j, rx_pilot_evm_db[j]);
+ }
+
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
+ j, rssi_chain[j]);
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_su_ext = %u",
+ htt_stats_buf->rx_11ax_su_ext);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ac_mumimo = %u",
+ htt_stats_buf->rx_11ac_mumimo);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_mumimo = %u",
+ htt_stats_buf->rx_11ax_mumimo);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ofdma = %u",
+ htt_stats_buf->rx_11ax_ofdma);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txbf = %u",
+ htt_stats_buf->txbf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_cck_rate,
+ HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_cck_rate = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_ofdm_rate,
+ HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_ofdm_rate = %s ",
+ str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_low = %u",
+ htt_stats_buf->rx_active_dur_us_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_high = %u",
+ htt_stats_buf->rx_active_dur_us_high);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u",
+ htt_stats_buf->rx_11ax_ul_ofdma);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_mcs = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->ul_ofdma_rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_bw = %s ", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u",
+ htt_stats_buf->ul_ofdma_rx_stbc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u",
+ htt_stats_buf->ul_ofdma_rx_ldpc);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_data_ppdu,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_data_ppdu = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_ok,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_fail,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail = %s",
+ str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = %s", j, str_buf);
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x",
+ htt_stats_buf->per_chain_rssi_pkt_type);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] = %s ", j, str_buf);
+ }
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rssi_chain[j]);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rx_pilot_evm_db[j]);
+
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++)
+ kfree(rx_gi[i]);
+}
+
+static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u",
+ htt_stats_buf->fw_reo_ring_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u",
+ htt_stats_buf->fw_to_host_data_msdu_bcmc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u",
+ htt_stats_buf->fw_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_remote_data_buf_recycle_cnt = %u",
+ htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_remote_free_buf_indication_cnt = %u",
+ htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_buf_to_host_data_msdu_uc = %u",
+ htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "reo_fw_ring_to_host_data_msdu_uc = %u",
+ htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_sw_ring_reap = %u",
+ htt_stats_buf->wbm_sw_ring_reap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u",
+ htt_stats_buf->wbm_forward_to_host_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u",
+ htt_stats_buf->wbm_target_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "target_refill_ring_recycle_cnt = %u",
+ htt_stats_buf->target_refill_ring_recycle_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char refill_ring_empty_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:");
+
+ ARRAY_TO_STRING(refill_ring_empty_cnt,
+ htt_stats_buf->refill_ring_empty_cnt,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_empty_cnt = %s\n",
+ refill_ring_empty_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char rxdma_err_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(rxdma_err_cnt,
+ htt_stats_buf->rxdma_err,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdma_err = %s\n",
+ rxdma_err_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char reo_err_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(reo_err_cnt,
+ htt_stats_buf->reo_err,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "reo_err = %s\n",
+ reo_err_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sample_id = %u",
+ htt_stats_buf->sample_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_max = %u",
+ htt_stats_buf->total_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_avg = %u",
+ htt_stats_buf->total_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_sample = %u",
+ htt_stats_buf->total_sample);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_avg = %u",
+ htt_stats_buf->non_zeros_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_sample = %u",
+ htt_stats_buf->non_zeros_sample);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_max = %u",
+ htt_stats_buf->last_non_zeros_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_min %u",
+ htt_stats_buf->last_non_zeros_min);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_avg %u",
+ htt_stats_buf->last_non_zeros_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_sample %u\n",
+ htt_stats_buf->last_non_zeros_sample);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char refill_ring_num_refill[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:");
+
+ ARRAY_TO_STRING(refill_ring_num_refill,
+ htt_stats_buf->refill_ring_num_refill,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_num_refill = %s\n",
+ refill_ring_num_refill);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_ring_mgmt_subtype[HTT_MAX_STRING_LEN] = {0};
+ char fw_ring_ctrl_subtype[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_recvd = %u",
+ htt_stats_buf->ppdu_recvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u",
+ htt_stats_buf->mpdu_cnt_fcs_ok);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u",
+ htt_stats_buf->mpdu_cnt_fcs_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_msdu_cnt = %u",
+ htt_stats_buf->tcp_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u",
+ htt_stats_buf->tcp_ack_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "udp_msdu_cnt = %u",
+ htt_stats_buf->udp_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "other_msdu_cnt = %u",
+ htt_stats_buf->other_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u",
+ htt_stats_buf->fw_ring_mpdu_ind);
+
+ ARRAY_TO_STRING(fw_ring_mgmt_subtype,
+ htt_stats_buf->fw_ring_mgmt_subtype,
+ HTT_STATS_SUBTYPE_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mgmt_subtype = %s ",
+ fw_ring_mgmt_subtype);
+
+ ARRAY_TO_STRING(fw_ring_ctrl_subtype,
+ htt_stats_buf->fw_ring_ctrl_subtype,
+ HTT_STATS_SUBTYPE_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ctrl_subtype = %s ",
+ fw_ring_ctrl_subtype);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u",
+ htt_stats_buf->fw_ring_mcast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u",
+ htt_stats_buf->fw_ring_bcast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u",
+ htt_stats_buf->fw_ring_ucast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u",
+ htt_stats_buf->fw_ring_null_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u",
+ htt_stats_buf->fw_ring_mpdu_drop);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u",
+ htt_stats_buf->ofld_local_data_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u",
+ htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u",
+ htt_stats_buf->drx_local_data_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u",
+ htt_stats_buf->drx_local_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_ind_cnt = %u",
+ htt_stats_buf->local_nondata_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u",
+ htt_stats_buf->local_nondata_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_status_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_status_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_link_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_link_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_status_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_status_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u",
+ htt_stats_buf->mon_dest_ring_update_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u",
+ htt_stats_buf->mon_dest_ring_full_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_cnt = %u",
+ htt_stats_buf->rx_suspend_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u",
+ htt_stats_buf->rx_suspend_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_cnt = %u",
+ htt_stats_buf->rx_resume_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_fail_cnt = %u",
+ htt_stats_buf->rx_resume_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_switch_cnt = %u",
+ htt_stats_buf->rx_ring_switch_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_restore_cnt = %u",
+ htt_stats_buf->rx_ring_restore_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_flush_cnt = %u",
+ htt_stats_buf->rx_flush_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n",
+ htt_stats_buf->rx_recovery_reset_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_ring_mpdu_err[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(fw_ring_mpdu_err,
+ htt_stats_buf->fw_ring_mpdu_err,
+ HTT_RX_STATS_RXDMA_MAX_ERR);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_err = %s\n",
+ fw_ring_mpdu_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_mpdu_drop[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:");
+
+ ARRAY_TO_STRING(fw_mpdu_drop,
+ htt_stats_buf->fw_mpdu_drop,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_mpdu_drop = %s\n", fw_mpdu_drop);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char phy_errs[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id__word = %u",
+ htt_stats_buf->mac_id__word);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_phy_err_nct = %u",
+ htt_stats_buf->total_phy_err_cnt);
+
+ ARRAY_TO_STRING(phy_errs,
+ htt_stats_buf->phy_err,
+ HTT_STATS_PHY_ERR_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "chan_num = %u",
+ htt_stats_buf->chan_num);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u",
+ htt_stats_buf->num_records);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x",
+ htt_stats_buf->valid_cca_counters_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "collection_interval = %u\n",
+ htt_stats_buf->collection_interval);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|",
+ htt_stats_buf->tx_frame_usec,
+ htt_stats_buf->rx_frame_usec,
+ htt_stats_buf->rx_clear_usec,
+ htt_stats_buf->my_rx_frame_usec,
+ htt_stats_buf->usec_cnt,
+ htt_stats_buf->med_rx_idle_usec,
+ htt_stats_buf->med_tx_idle_global_usec,
+ htt_stats_buf->cca_obss_usec);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_unpause_ppdu_id = %u",
+ htt_stats_buf->last_unpause_ppdu_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u",
+ htt_stats_buf->hwsch_unpause_wait_tqm_write);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u",
+ htt_stats_buf->hwsch_dummy_tlv_skipped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u",
+ htt_stats_buf->hwsch_misaligned_offset_received);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_reset_count = %u",
+ htt_stats_buf->hwsch_reset_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dev_reset_war = %u",
+ htt_stats_buf->hwsch_dev_reset_war);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_delayed_pause = %u",
+ htt_stats_buf->hwsch_delayed_pause);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u",
+ htt_stats_buf->hwsch_long_delayed_pause);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u",
+ htt_stats_buf->sch_rx_ppdu_no_response);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_selfgen_response = %u",
+ htt_stats_buf->sch_selfgen_response);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n",
+ htt_stats_buf->sch_rx_sifs_resp_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
+ htt_stats_buf->pdev_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sessions = %u\n",
+ htt_stats_buf->num_sessions);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
+ htt_stats_buf->vdev_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x",
+ htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF00) >> 8,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF0000) >> 16,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF000000) >> 24,
+ (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF),
+ (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flow_id_flags = %u",
+ htt_stats_buf->flow_id_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dialog_id = %u",
+ htt_stats_buf->dialog_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_dura_us = %u",
+ htt_stats_buf->wake_dura_us);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_intvl_us = %u",
+ htt_stats_buf->wake_intvl_us);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sp_offset_us = %u\n",
+ htt_stats_buf->sp_offset_us);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx success PPDU = %u",
+ htt_stats_buf->num_obss_tx_ppdu_success);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
+ htt_stats_buf->num_obss_tx_ppdu_failure);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_htt_stats_debug_dump(const u32 *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 tlv_len = 0, i = 0, word_len = 0;
+
+ tlv_len = FIELD_GET(HTT_TLV_LEN, *tag_buf) + HTT_TLV_HDR_LEN;
+ word_len = (tlv_len % 4) == 0 ? (tlv_len / 4) : ((tlv_len / 4) + 1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "============================================");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HKDBG TLV DUMP: (tag_len=%u bytes, words=%u)",
+ tlv_len, word_len);
+
+ for (i = 0; i + 3 < word_len; i += 4) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "0x%08x 0x%08x 0x%08x 0x%08x",
+ tag_buf[i], tag_buf[i + 1],
+ tag_buf[i + 2], tag_buf[i + 3]);
+ }
+
+ if (i + 3 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x 0x%08x ",
+ tag_buf[i], tag_buf[i + 1], tag_buf[i + 2]);
+ } else if (i + 2 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x ",
+ tag_buf[i], tag_buf[i + 1]);
+ } else if (i + 1 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x ",
+ tag_buf[i]);
+ }
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "============================================");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_PHY_ERR_TAG:
+ htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG:
+ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len,
+ stats_req);
+ break;
+
+ case HTT_STATS_STRING_TAG:
+ htt_print_stats_string_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMN_TAG:
+ htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG:
+ htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_RESULT_TAG:
+ htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_STALL_TAG:
+ htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_FES_STATUS_TAG:
+ htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG:
+ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ htt_print_tx_tqm_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG:
+ htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+ htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+ htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+ htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+ htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+ htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+ htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG:
+ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CMN_TAG:
+ htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_TAG:
+ htt_print_ring_if_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CMN_TAG:
+ htt_print_sfm_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SRING_STATS_TAG:
+ htt_print_sring_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_STATS_TAG:
+ htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG:
+ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG:
+ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_STATS_TAG:
+ htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG:
+ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REFILL_REO_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REO_RESOURCE_STATS_TAG:
+ htt_print_rx_reo_debug_stats_tlv_v(
+ tag_buf, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG:
+ htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+ htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+ htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MPDU_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_CMN_TAG:
+ htt_print_ring_if_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_USER_TAG:
+ htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_TAG:
+ htt_print_sfm_client_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SRING_CMN_TAG:
+ htt_print_sring_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SOUNDING_STATS_TAG:
+ htt_print_tx_sounding_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG:
+ htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_STATS_TAG:
+ htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_STATS_TAG:
+ htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_WD_TIMEOUT_TAG:
+ htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_COUNTER_NAME_TAG:
+ htt_print_counter_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_TAG:
+ htt_print_tx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_V1_TAG:
+ htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_TID_DETAILS_TAG:
+ htt_print_rx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_STATS_CMN_TAG:
+ htt_print_peer_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_DETAILS_TAG:
+ htt_print_peer_details_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_MSDU_FLOWQ_TAG:
+ htt_print_msdu_flow_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_TX_RATE_STATS_TAG:
+ htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_RX_RATE_STATS_TAG:
+ htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+ htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG:
+ htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
+ htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_WHAL_TX_TAG:
+ htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSIONS_TAG:
+ htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSION_TAG:
+ htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_OBSS_PD_TAG:
+ htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath11k *ar;
+ u32 len;
+ u64 cookie;
+ int ret;
+ u8 pdev_id;
+
+ msg = (struct ath11k_htt_extd_stats_msg *)skb->data;
+ cookie = msg->cookie;
+
+ if (FIELD_GET(HTT_STATS_COOKIE_MSB, cookie) != HTT_STATS_MAGIC_VALUE) {
+ ath11k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = FIELD_GET(HTT_STATS_COOKIE_LSB, cookie);
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ rcu_read_unlock();
+ if (!ar) {
+ ath11k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ return;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ return;
+
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+ if (stats_req->done) {
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ return;
+ }
+ stats_req->done = true;
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+
+ len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1);
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ complete(&stats_req->cmpln);
+}
+
+static ssize_t ath11k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
+ return -E2BIG;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO)
+ return -EPERM;
+
+ ar->debug.htt_stats.type = type;
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath11k_read_htt_stats_type,
+ .write = ath11k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
+ const u8 *mac_addr,
+ struct htt_ext_stats_cfg_params *cfg_params)
+{
+ if (!cfg_params)
+ return -EINVAL;
+
+ switch (type) {
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ:
+ case ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_HWQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_TXQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_CMDQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_INFO:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+ cfg_params->cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO:
+ case ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_RINGS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ath11k_dbg_htt_stats_req(struct ath11k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ u8 type = stats_req->type;
+ u64 cookie = 0;
+ int ret, pdev_id = ar->pdev->pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+ init_completion(&stats_req->cmpln);
+
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = FIELD_PREP(HTT_STATS_COOKIE_MSB, HTT_STATS_MAGIC_VALUE) |
+ FIELD_PREP(HTT_STATS_COOKIE_LSB, pdev_id);
+
+ ret = ath11k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr,
+ &cfg_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set htt stats cfg params: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * HZ)) {
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ ath11k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ }
+
+ return 0;
+}
+
+static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ u8 type = ar->debug.htt_stats.type;
+ int ret;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ ret = ath11k_dbg_htt_stats_req(ar);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+ return 0;
+out:
+ vfree(stats_req);
+ return ret;
+}
+
+static int ath11k_release_htt_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static ssize_t ath11k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath11k_open_htt_stats,
+ .release = ath11k_release_htt_stats,
+ .read = ath11k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_htt_stats_reset(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.reset);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .read = ath11k_read_htt_stats_reset,
+ .write = ath11k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_debug_htt_stats_init(struct ath11k *ar)
+{
+ spin_lock_init(&ar->debug.htt_stats.lock);
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
new file mode 100644
index 000000000000..4bdb62dd7b8d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
@@ -0,0 +1,1662 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+
+enum htt_tlv_tag_t {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_TX_PDEV_PHY_ERR_TAG = 4,
+ HTT_STATS_STRING_TAG = 5,
+ HTT_STATS_TX_HWQ_CMN_TAG = 6,
+ HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG = 7,
+ HTT_STATS_TX_HWQ_CMD_RESULT_TAG = 8,
+ HTT_STATS_TX_HWQ_CMD_STALL_TAG = 9,
+ HTT_STATS_TX_HWQ_FES_STATUS_TAG = 10,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_TQM_CMDQ_STATUS_TAG = 16,
+ HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17,
+ HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18,
+ HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19,
+ HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20,
+ HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
+ HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
+ HTT_STATS_TX_DE_CMN_TAG = 23,
+ HTT_STATS_RING_IF_TAG = 24,
+ HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
+ HTT_STATS_SFM_CMN_TAG = 26,
+ HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_RX_PDEV_FW_STATS_TAG = 28,
+ HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG = 29,
+ HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG = 30,
+ HTT_STATS_RX_SOC_FW_STATS_TAG = 31,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG = 32,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG = 33,
+ HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
+ HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG = 38,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_RING_IF_CMN_TAG = 40,
+ HTT_STATS_SFM_CLIENT_USER_TAG = 41,
+ HTT_STATS_SFM_CLIENT_TAG = 42,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_SRING_CMN_TAG = 45,
+ HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46,
+ HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47,
+ HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48,
+ HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49,
+ HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50,
+ HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG = 51,
+ HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG = 52,
+ HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG = 53,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_WD_TIMEOUT_TAG = 55,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_COUNTER_NAME_TAG = 57,
+ HTT_STATS_TX_TID_DETAILS_TAG = 58,
+ HTT_STATS_RX_TID_DETAILS_TAG = 59,
+ HTT_STATS_PEER_STATS_CMN_TAG = 60,
+ HTT_STATS_PEER_DETAILS_TAG = 61,
+ HTT_STATS_PEER_TX_RATE_STATS_TAG = 62,
+ HTT_STATS_PEER_RX_RATE_STATS_TAG = 63,
+ HTT_STATS_PEER_MSDU_FLOWQ_TAG = 64,
+ HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG = 68,
+ HTT_STATS_TX_TID_DETAILS_V1_TAG = 69,
+ HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70,
+ HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71,
+ HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
+ HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
+ HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
+ HTT_STATS_PDEV_TWT_SESSIONS_TAG = 75,
+ HTT_STATS_PDEV_TWT_SESSION_TAG = 76,
+ HTT_STATS_RX_REFILL_RXDMA_ERR_TAG = 77,
+ HTT_STATS_RX_REFILL_REO_ERR_TAG = 78,
+ HTT_STATS_RX_REO_RESOURCE_STATS_TAG = 79,
+ HTT_STATS_TX_SOUNDING_STATS_TAG = 80,
+ HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG = 81,
+ HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG = 82,
+ HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG = 83,
+ HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG = 84,
+ HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG = 85,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_PDEV_OBSS_PD_TAG = 88,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define HTT_STATS_MAX_STRING_SZ32 4
+#define HTT_STATS_MACID_INVALID 0xff
+#define HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS 10
+#define HTT_TX_HWQ_MAX_CMD_RESULT_STATS 13
+#define HTT_TX_HWQ_MAX_CMD_STALL_STATS 5
+#define HTT_TX_HWQ_MAX_FES_RESULT_STATS 10
+
+enum htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71
+#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18
+#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4
+#define HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+#define HTT_RX_STATS_REFILL_MAX_RING 4
+#define HTT_RX_STATS_RXDMA_MAX_ERR 16
+#define HTT_RX_STATS_FW_DROP_REASON_MAX 16
+
+/* Bytes stored in little endian order */
+/* Length should be multiple of DWORD */
+struct htt_stats_string_tlv {
+ u32 data[0]; /* Can be variable length */
+} __packed;
+
+/* == TX PDEV STATS == */
+struct htt_tx_pdev_stats_cmn_tlv {
+ u32 mac_id__word;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 underrun;
+ u32 hw_paused;
+ u32 hw_flush;
+ u32 hw_filt;
+ u32 tx_abort;
+ u32 mpdu_requed;
+ u32 tx_xretry;
+ u32 data_rc;
+ u32 mpdu_dropped_xretry;
+ u32 illgl_rate_phy_err;
+ u32 cont_xretry;
+ u32 tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+ u32 seq_posted;
+ u32 seq_failed_queueing;
+ u32 seq_completed;
+ u32 seq_restarted;
+ u32 mu_seq_posted;
+ u32 seq_switch_hw_paused;
+ u32 next_seq_posted_dsr;
+ u32 seq_posted_isr;
+ u32 seq_ctrl_cached;
+ u32 mpdu_count_tqm;
+ u32 msdu_count_tqm;
+ u32 mpdu_removed_tqm;
+ u32 msdu_removed_tqm;
+ u32 mpdus_sw_flush;
+ u32 mpdus_hw_filter;
+ u32 mpdus_truncated;
+ u32 mpdus_ack_failed;
+ u32 mpdus_expired;
+ u32 mpdus_seq_hw_retry;
+ u32 ack_tlv_proc;
+ u32 coex_abort_mpdu_cnt_valid;
+ u32 coex_abort_mpdu_cnt;
+ u32 num_total_ppdus_tried_ota;
+ u32 num_data_ppdus_tried_ota;
+ u32 local_ctrl_mgmt_enqued;
+ u32 local_ctrl_mgmt_freed;
+ u32 local_data_enqued;
+ u32 local_data_freed;
+ u32 mpdu_tried;
+ u32 isr_wait_seq_posted;
+
+ u32 tx_active_dur_us_low;
+ u32 tx_active_dur_us_high;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_urrn_tlv_v {
+ u32 urrn_stats[0]; /* HTT_TX_PDEV_MAX_URRN_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_flush_tlv_v {
+ u32 flush_errs[0]; /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_tlv_v {
+ u32 sifs_status[0]; /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_phy_err_tlv_v {
+ u32 phy_errs[0]; /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_hist_tlv_v {
+ u32 sifs_hist_status[0]; /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */
+};
+
+struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
+ u32 num_data_ppdus_legacy_su;
+ u32 num_data_ppdus_ac_su;
+ u32 num_data_ppdus_ax_su;
+ u32 num_data_ppdus_ac_su_txbf;
+ u32 num_data_ppdus_ax_su_txbf;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size .
+ *
+ * Tried_mpdu_cnt_hist is the histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the
+ * HW had attempted to transmit on air, for the HWSCH Schedule
+ * command submitted by FW.It is not the retry attempts.
+ * The histogram bins are 0-29, 30-59, 60-89 and so on. The are
+ * 10 bins in this histogram. They are defined in FW using the
+ * following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
+};
+
+/* == SOC ERROR STATS == */
+
+/* =============== PDEV ERROR STATS ============== */
+#define HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct htt_hw_stats_intr_misc_tlv {
+ /* Stored as little endian */
+ u8 hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ u32 mask;
+ u32 count;
+};
+
+#define HTT_STATS_MAX_HW_MODULE_NAME_LEN 8
+struct htt_hw_stats_wd_timeout_tlv {
+ /* Stored as little endian */
+ u8 hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN];
+ u32 count;
+};
+
+struct htt_hw_stats_pdev_errs_tlv {
+ u32 mac_id__word; /* BIT [ 7 : 0] : mac_id */
+ u32 tx_abort;
+ u32 tx_abort_fail_count;
+ u32 rx_abort;
+ u32 rx_abort_fail_count;
+ u32 warm_reset;
+ u32 cold_reset;
+ u32 tx_flush;
+ u32 tx_glb_reset;
+ u32 tx_txq_reset;
+ u32 rx_timeout_reset;
+};
+
+struct htt_hw_stats_whal_tx_tlv {
+ u32 mac_id__word;
+ u32 last_unpause_ppdu_id;
+ u32 hwsch_unpause_wait_tqm_write;
+ u32 hwsch_dummy_tlv_skipped;
+ u32 hwsch_misaligned_offset_received;
+ u32 hwsch_reset_count;
+ u32 hwsch_dev_reset_war;
+ u32 hwsch_delayed_pause;
+ u32 hwsch_long_delayed_pause;
+ u32 sch_rx_ppdu_no_response;
+ u32 sch_selfgen_response;
+ u32 sch_rx_sifs_resp_trigger;
+};
+
+/* ============ PEER STATS ============ */
+struct htt_msdu_flow_stats_tlv {
+ u32 last_update_timestamp;
+ u32 last_add_timestamp;
+ u32 last_remove_timestamp;
+ u32 total_processed_msdu_count;
+ u32 cur_msdu_count_in_flowq;
+ u32 sw_peer_id;
+ u32 tx_flow_no__tid_num__drop_rule;
+ u32 last_cycle_enqueue_count;
+ u32 last_cycle_dequeue_count;
+ u32 last_cycle_drop_count;
+ u32 current_drop_th;
+};
+
+#define MAX_HTT_TID_NAME 8
+
+/* Tidq stats */
+struct htt_tx_tid_stats_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 mpdus_hw_filter;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+};
+
+/* Tidq stats */
+struct htt_tx_tid_stats_v1_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 max_qdepth_bytes;
+ u32 max_qdepth_n_msdus;
+ u32 rsvd;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+ u32 allow_n_flags;
+ u32 sendn_frms_allowed;
+};
+
+struct htt_rx_tid_stats_tlv {
+ u32 sw_peer_id__tid_num;
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 dup_in_reorder;
+ u32 dup_past_outside_window;
+ u32 dup_past_within_window;
+ u32 rxdesc_err_decrypt;
+ u32 tid_rx_airtime;
+};
+
+#define HTT_MAX_COUNTER_NAME 8
+struct htt_counter_tlv {
+ u8 counter_name[HTT_MAX_COUNTER_NAME];
+ u32 count;
+};
+
+struct htt_peer_stats_cmn_tlv {
+ u32 ppdu_cnt;
+ u32 mpdu_cnt;
+ u32 msdu_cnt;
+ u32 pause_bitmap;
+ u32 block_bitmap;
+ u32 current_timestamp;
+ u32 peer_tx_airtime;
+ u32 peer_rx_airtime;
+ s32 rssi;
+ u32 peer_enqueued_count_low;
+ u32 peer_enqueued_count_high;
+ u32 peer_dequeued_count_low;
+ u32 peer_dequeued_count_high;
+ u32 peer_dropped_count_low;
+ u32 peer_dropped_count_high;
+ u32 ppdu_transmitted_bytes_low;
+ u32 ppdu_transmitted_bytes_high;
+ u32 peer_ttl_removed_count;
+ u32 inactive_time;
+};
+
+struct htt_peer_details_tlv {
+ u32 peer_type;
+ u32 sw_peer_id;
+ u32 vdev_pdev_ast_idx;
+ struct htt_mac_addr mac_addr;
+ u32 peer_flags;
+ u32 qpeer_flags;
+};
+
+enum htt_stats_param_type {
+ HTT_STATS_PREAM_OFDM,
+ HTT_STATS_PREAM_CCK,
+ HTT_STATS_PREAM_HT,
+ HTT_STATS_PREAM_VHT,
+ HTT_STATS_PREAM_HE,
+ HTT_STATS_PREAM_RSVD,
+ HTT_STATS_PREAM_RSVD1,
+
+ HTT_STATS_PREAM_COUNT,
+};
+
+#define HTT_TX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_tx_peer_rate_stats_tlv {
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_su_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets in each GI
+ * (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS][HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PEER_STATS_NUM_DCM_COUNTERS];
+
+};
+
+#define HTT_RX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_rx_peer_rate_stats_tlv {
+ u32 nsts;
+
+ /* Number of rx ldpc packets */
+ u32 rx_ldpc;
+ /* Number of rx rts packets */
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PEER_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES];
+ /* units = dB above noise floor */
+ u8 rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+
+ /* Counters to track number of rx packets in each GI in each mcs (0-11) */
+ u32 rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+};
+
+enum htt_peer_stats_req_mode {
+ HTT_PEER_STATS_REQ_MODE_NO_QUERY,
+ HTT_PEER_STATS_REQ_MODE_QUERY_TQM,
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM,
+};
+
+enum htt_peer_stats_tlv_enum {
+ HTT_PEER_STATS_CMN_TLV = 0,
+ HTT_PEER_DETAILS_TLV = 1,
+ HTT_TX_PEER_RATE_STATS_TLV = 2,
+ HTT_RX_PEER_RATE_STATS_TLV = 3,
+ HTT_TX_TID_STATS_TLV = 4,
+ HTT_RX_TID_STATS_TLV = 5,
+ HTT_MSDU_FLOW_STATS_TLV = 6,
+
+ HTT_PEER_STATS_MAX_TLV = 31,
+};
+
+/* =========== MUMIMO HWQ stats =========== */
+/* MU MIMO stats per hwQ */
+struct htt_tx_hwq_mu_mimo_sch_stats_tlv {
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ u32 mu_mimo_ppdu_posted;
+};
+
+struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+};
+
+struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
+ u32 mac_id__hwq_id__word;
+};
+
+/* == TX HWQ STATS == */
+struct htt_tx_hwq_stats_cmn_tlv {
+ u32 mac_id__hwq_id__word;
+
+ /* PPDU level stats */
+ u32 xretry;
+ u32 underrun_cnt;
+ u32 flush_cnt;
+ u32 filt_cnt;
+ u32 null_mpdu_bmap;
+ u32 user_ack_failure;
+ u32 ack_tlv_proc;
+ u32 sched_id_proc;
+ u32 null_mpdu_tx_count;
+ u32 mpdu_bmap_not_recvd;
+
+ /* Selfgen stats per hwQ */
+ u32 num_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+
+ /* MPDU level stats */
+ u32 mpdu_tried_cnt;
+ u32 mpdu_queued_cnt;
+ u32 mpdu_ack_fail_cnt;
+ u32 mpdu_filt_cnt;
+ u32 false_mpdu_ack_count;
+
+ u32 txq_timeout;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_difs_latency_stats_tlv_v {
+ u32 hist_intvl;
+ /* histogram of ppdu post to hwsch - > cmd status received */
+ u32 difs_latency_hist[0]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_result_stats_tlv_v {
+ /* Histogram of sched cmd result */
+ u32 cmd_result[0]; /* HTT_TX_HWQ_MAX_CMD_RESULT_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_stall_stats_tlv_v {
+ /* Histogram of various pause conitions */
+ u32 cmd_stall_status[0]; /* HTT_TX_HWQ_MAX_CMD_STALL_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_fes_result_stats_tlv_v {
+ /* Histogram of number of user fes result */
+ u32 fes_result[0]; /* HTT_TX_HWQ_MAX_FES_RESULT_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The hwq_tried_mpdu_cnt_hist is a histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the HW
+ * had attempted to transmit on air, for the HWSCH Schedule command
+ * submitted by FW in this HWQ .It is not the retry attempts. The
+ * histogram bins are 0-29, 30-59, 60-89 and so on. The are 10 bins
+ * in this histogram.
+ * they are defined in FW using the following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ /* Histogram of number of mpdus on tried mpdu */
+ u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The txop_used_cnt_hist is the histogram of txop per burst. After
+ * completing the burst, we identify the txop used in the burst and
+ * incr the corresponding bin.
+ * Each bin represents 1ms & we have 10 bins in this histogram.
+ * they are deined in FW using the following macros
+ * #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
+ * #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
+ */
+struct htt_tx_hwq_txop_used_cnt_hist_tlv_v {
+ /* Histogram of txop used cnt */
+ u32 txop_used_cnt_hist[0]; /* HTT_TX_HWQ_TXOP_USED_CNT_HIST */
+};
+
+/* == TX SELFGEN STATS == */
+struct htt_tx_selfgen_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 su_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+ u32 delayed_bar_1; /* MU user 1 */
+ u32 delayed_bar_2; /* MU user 2 */
+ u32 delayed_bar_3; /* MU user 3 */
+ u32 delayed_bar_4; /* MU user 4 */
+ u32 delayed_bar_5; /* MU user 5 */
+ u32 delayed_bar_6; /* MU user 6 */
+ u32 delayed_bar_7; /* MU user 7 */
+};
+
+struct htt_tx_selfgen_ac_stats_tlv {
+ /* 11AC */
+ u32 ac_su_ndpa;
+ u32 ac_su_ndp;
+ u32 ac_mu_mimo_ndpa;
+ u32 ac_mu_mimo_ndp;
+ u32 ac_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ac_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ac_mu_mimo_brpoll_3; /* MU user 3 */
+};
+
+struct htt_tx_selfgen_ax_stats_tlv {
+ /* 11AX */
+ u32 ax_su_ndpa;
+ u32 ax_su_ndp;
+ u32 ax_mu_mimo_ndpa;
+ u32 ax_mu_mimo_ndp;
+ u32 ax_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ax_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ax_mu_mimo_brpoll_3; /* MU user 3 */
+ u32 ax_mu_mimo_brpoll_4; /* MU user 4 */
+ u32 ax_mu_mimo_brpoll_5; /* MU user 5 */
+ u32 ax_mu_mimo_brpoll_6; /* MU user 6 */
+ u32 ax_mu_mimo_brpoll_7; /* MU user 7 */
+ u32 ax_basic_trigger;
+ u32 ax_bsr_trigger;
+ u32 ax_mu_bar_trigger;
+ u32 ax_mu_rts_trigger;
+};
+
+struct htt_tx_selfgen_ac_err_stats_tlv {
+ /* 11AC error stats */
+ u32 ac_su_ndp_err;
+ u32 ac_su_ndpa_err;
+ u32 ac_mu_mimo_ndpa_err;
+ u32 ac_mu_mimo_ndp_err;
+ u32 ac_mu_mimo_brp1_err;
+ u32 ac_mu_mimo_brp2_err;
+ u32 ac_mu_mimo_brp3_err;
+};
+
+struct htt_tx_selfgen_ax_err_stats_tlv {
+ /* 11AX error stats */
+ u32 ax_su_ndp_err;
+ u32 ax_su_ndpa_err;
+ u32 ax_mu_mimo_ndpa_err;
+ u32 ax_mu_mimo_ndp_err;
+ u32 ax_mu_mimo_brp1_err;
+ u32 ax_mu_mimo_brp2_err;
+ u32 ax_mu_mimo_brp3_err;
+ u32 ax_mu_mimo_brp4_err;
+ u32 ax_mu_mimo_brp5_err;
+ u32 ax_mu_mimo_brp6_err;
+ u32 ax_mu_mimo_brp7_err;
+ u32 ax_basic_trigger_err;
+ u32 ax_bsr_trigger_err;
+ u32 ax_mu_bar_trigger_err;
+ u32 ax_mu_rts_trigger_err;
+};
+
+/* == TX MU STATS == */
+#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
+
+struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
+ /* mu-mimo sw sched cmd stats */
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ /* MU PPDU stats per hwQ */
+ u32 mu_mimo_ppdu_posted;
+ /*
+ * Counts the number of users in each transmission of
+ * the given TX mode.
+ *
+ * Index is the number of users - 1.
+ */
+ u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+ u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_mu_mimo_mpdus_queued_usr;
+ u32 ax_mu_mimo_mpdus_tried_usr;
+ u32 ax_mu_mimo_mpdus_failed_usr;
+ u32 ax_mu_mimo_mpdus_requeued_usr;
+ u32 ax_mu_mimo_err_no_ba_usr;
+ u32 ax_mu_mimo_mpdu_underrun_usr;
+ u32 ax_mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_ofdma_mpdus_queued_usr;
+ u32 ax_ofdma_mpdus_tried_usr;
+ u32 ax_ofdma_mpdus_failed_usr;
+ u32 ax_ofdma_mpdus_requeued_usr;
+ u32 ax_ofdma_err_no_ba_usr;
+ u32 ax_ofdma_mpdu_underrun_usr;
+ u32 ax_ofdma_ampdu_underrun_usr;
+};
+
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC 1
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX 2
+#define HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX 3
+
+struct htt_tx_pdev_mpdu_stats_tlv {
+ /* mpdu level stats */
+ u32 mpdus_queued_usr;
+ u32 mpdus_tried_usr;
+ u32 mpdus_failed_usr;
+ u32 mpdus_requeued_usr;
+ u32 err_no_ba_usr;
+ u32 mpdu_underrun_usr;
+ u32 ampdu_underrun_usr;
+ u32 user_index;
+ u32 tx_sched_mode; /* HTT_STATS_TX_SCHED_MODE_xxx */
+};
+
+/* == TX SCHED STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_posted_tlv_v {
+ u32 sched_cmd_posted[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_reaped_tlv_v {
+ u32 sched_cmd_reaped[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_order_su_tlv_v {
+ u32 sched_order_su[0]; /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */
+};
+
+enum htt_sched_txq_sched_ineligibility_tlv_enum {
+ HTT_SCHED_TID_SKIP_SCHED_MASK_DISABLED = 0,
+ HTT_SCHED_TID_SKIP_NOTIFY_MPDU,
+ HTT_SCHED_TID_SKIP_MPDU_STATE_INVALID,
+ HTT_SCHED_TID_SKIP_SCHED_DISABLED,
+ HTT_SCHED_TID_SKIP_TQM_BYPASS_CMD_PENDING,
+ HTT_SCHED_TID_SKIP_SECOND_SU_SCHEDULE,
+
+ HTT_SCHED_TID_SKIP_CMD_SLOT_NOT_AVAIL,
+ HTT_SCHED_TID_SKIP_NO_ENQ,
+ HTT_SCHED_TID_SKIP_LOW_ENQ,
+ HTT_SCHED_TID_SKIP_PAUSED,
+ HTT_SCHED_TID_SKIP_UL,
+ HTT_SCHED_TID_REMOVE_PAUSED,
+ HTT_SCHED_TID_REMOVE_NO_ENQ,
+ HTT_SCHED_TID_REMOVE_UL,
+ HTT_SCHED_TID_QUERY,
+ HTT_SCHED_TID_SU_ONLY,
+ HTT_SCHED_TID_ELIGIBLE,
+ HTT_SCHED_INELIGIBILITY_MAX,
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_ineligibility_tlv_v {
+ /* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */
+ u32 sched_ineligibility[0];
+};
+
+struct htt_tx_pdev_stats_sched_per_txq_tlv {
+ u32 mac_id__txq_id__word;
+ u32 sched_policy;
+ u32 last_sched_cmd_posted_timestamp;
+ u32 last_sched_cmd_compl_timestamp;
+ u32 sched_2_tac_lwm_count;
+ u32 sched_2_tac_ring_full;
+ u32 sched_cmd_post_failure;
+ u32 num_active_tids;
+ u32 num_ps_schedules;
+ u32 sched_cmds_pending;
+ u32 num_tid_register;
+ u32 num_tid_unregister;
+ u32 num_qstats_queried;
+ u32 qstats_update_pending;
+ u32 last_qstats_query_timestamp;
+ u32 num_tqm_cmdq_full;
+ u32 num_de_sched_algo_trigger;
+ u32 num_rt_sched_algo_trigger;
+ u32 num_tqm_sched_algo_trigger;
+ u32 notify_sched;
+ u32 dur_based_sendn_term;
+};
+
+struct htt_stats_tx_sched_cmn_tlv {
+ /* BIT [ 7 : 0] :- mac_id
+ * BIT [31 : 8] :- reserved
+ */
+ u32 mac_id__word;
+ /* Current timestamp */
+ u32 current_timestamp;
+};
+
+/* == TQM STATS == */
+#define HTT_TX_TQM_MAX_GEN_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_gen_mpdu_stats_tlv_v {
+ u32 gen_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_stats_tlv_v {
+ u32 list_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_cnt_tlv_v {
+ u32 list_mpdu_cnt_hist[0];
+ /* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */
+};
+
+struct htt_tx_tqm_pdev_stats_tlv_v {
+ u32 msdu_count;
+ u32 mpdu_count;
+ u32 remove_msdu;
+ u32 remove_mpdu;
+ u32 remove_msdu_ttl;
+ u32 send_bar;
+ u32 bar_sync;
+ u32 notify_mpdu;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 hwsch_trigger;
+ u32 ack_tlv_proc;
+ u32 gen_mpdu_cmd;
+ u32 gen_list_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_mpdu_tried_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_msdu_cmd;
+ u32 remove_msdu_ttl_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 enqueue;
+ u32 enqueue_notify;
+ u32 notify_mpdu_at_head;
+ u32 notify_mpdu_state_valid;
+ /*
+ * On receiving TQM_FLOW_NOT_EMPTY_STATUS from TQM, (on MSDUs being enqueued
+ * the flow is non empty), if the number of MSDUs is greater than the threshold,
+ * notify is incremented. UDP_THRESH counters are for UDP MSDUs, and NONUDP are
+ * for non-UDP MSDUs.
+ * MSDUQ_SWNOTIFY_UDP_THRESH1 threshold - sched_udp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_UDP_THRESH2 threshold - sched_udp_notify2 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH1 threshold - sched_nonudp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH2 threshold - sched_nonudp_notify2 is incremented
+ *
+ * Notify signifies that we trigger the scheduler.
+ */
+ u32 sched_udp_notify1;
+ u32 sched_udp_notify2;
+ u32 sched_nonudp_notify1;
+ u32 sched_nonudp_notify2;
+};
+
+struct htt_tx_tqm_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 max_cmdq_id;
+ u32 list_mpdu_cnt_hist_intvl;
+
+ /* Global stats */
+ u32 add_msdu;
+ u32 q_empty;
+ u32 q_not_empty;
+ u32 drop_notification;
+ u32 desc_threshold;
+};
+
+struct htt_tx_tqm_error_stats_tlv {
+ /* Error stats */
+ u32 q_empty_failure;
+ u32 q_not_empty_failure;
+ u32 add_msdu_failure;
+};
+
+/* == TQM CMDQ stats == */
+struct htt_tx_tqm_cmdq_status_tlv {
+ u32 mac_id__cmdq_id__word;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 gen_mpdu_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_msdu_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 update_msduq_cmd;
+};
+
+/* == TX-DE STATS == */
+/* Structures for tx de stats */
+struct htt_tx_de_eapol_packets_stats_tlv {
+ u32 m1_packets;
+ u32 m2_packets;
+ u32 m3_packets;
+ u32 m4_packets;
+ u32 g1_packets;
+ u32 g2_packets;
+};
+
+struct htt_tx_de_classify_failed_stats_tlv {
+ u32 ap_bss_peer_not_found;
+ u32 ap_bcast_mcast_no_peer;
+ u32 sta_delete_in_progress;
+ u32 ibss_no_bss_peer;
+ u32 invalid_vdev_type;
+ u32 invalid_ast_peer_entry;
+ u32 peer_entry_invalid;
+ u32 ethertype_not_ip;
+ u32 eapol_lookup_failed;
+ u32 qpeer_not_allow_data;
+ u32 fse_tid_override;
+ u32 ipv6_jumbogram_zero_length;
+ u32 qos_to_non_qos_in_prog;
+};
+
+struct htt_tx_de_classify_stats_tlv {
+ u32 arp_packets;
+ u32 igmp_packets;
+ u32 dhcp_packets;
+ u32 host_inspected;
+ u32 htt_included;
+ u32 htt_valid_mcs;
+ u32 htt_valid_nss;
+ u32 htt_valid_preamble_type;
+ u32 htt_valid_chainmask;
+ u32 htt_valid_guard_interval;
+ u32 htt_valid_retries;
+ u32 htt_valid_bw_info;
+ u32 htt_valid_power;
+ u32 htt_valid_key_flags;
+ u32 htt_valid_no_encryption;
+ u32 fse_entry_count;
+ u32 fse_priority_be;
+ u32 fse_priority_high;
+ u32 fse_priority_low;
+ u32 fse_traffic_ptrn_be;
+ u32 fse_traffic_ptrn_over_sub;
+ u32 fse_traffic_ptrn_bursty;
+ u32 fse_traffic_ptrn_interactive;
+ u32 fse_traffic_ptrn_periodic;
+ u32 fse_hwqueue_alloc;
+ u32 fse_hwqueue_created;
+ u32 fse_hwqueue_send_to_host;
+ u32 mcast_entry;
+ u32 bcast_entry;
+ u32 htt_update_peer_cache;
+ u32 htt_learning_frame;
+ u32 fse_invalid_peer;
+ /*
+ * mec_notify is HTT TX WBM multicast echo check notification
+ * from firmware to host. FW sends SA addresses to host for all
+ * multicast/broadcast packets received on STA side.
+ */
+ u32 mec_notify;
+};
+
+struct htt_tx_de_classify_status_stats_tlv {
+ u32 eok;
+ u32 classify_done;
+ u32 lookup_failed;
+ u32 send_host_dhcp;
+ u32 send_host_mcast;
+ u32 send_host_unknown_dest;
+ u32 send_host;
+ u32 status_invalid;
+};
+
+struct htt_tx_de_enqueue_packets_stats_tlv {
+ u32 enqueued_pkts;
+ u32 to_tqm;
+ u32 to_tqm_bypass;
+};
+
+struct htt_tx_de_enqueue_discard_stats_tlv {
+ u32 discarded_pkts;
+ u32 local_frames;
+ u32 is_ext_msdu;
+};
+
+struct htt_tx_de_compl_stats_tlv {
+ u32 tcl_dummy_frame;
+ u32 tqm_dummy_frame;
+ u32 tqm_notify_frame;
+ u32 fw2wbm_enq;
+ u32 tqm_bypass_frame;
+};
+
+/*
+ * The htt_tx_de_fw2wbm_ring_full_hist_tlv is a histogram of time we waited
+ * for the fw2wbm ring buffer. we are requesting a buffer in FW2WBM release
+ * ring,which may fail, due to non availability of buffer. Hence we sleep for
+ * 200us & again request for it. This is a histogram of time we wait, with
+ * bin of 200ms & there are 10 bin (2 seconds max)
+ * They are defined by the following macros in FW
+ * #define ENTRIES_PER_BIN_COUNT 1000 // per bin 1000 * 200us = 200ms
+ * #define RING_FULL_BIN_ENTRIES (WAL_TX_DE_FW2WBM_ALLOC_TIMEOUT_COUNT /
+ * ENTRIES_PER_BIN_COUNT)
+ */
+struct htt_tx_de_fw2wbm_ring_full_hist_tlv {
+ u32 fw2wbm_ring_full_hist[0];
+};
+
+struct htt_tx_de_cmn_stats_tlv {
+ u32 mac_id__word;
+
+ /* Global Stats */
+ u32 tcl2fw_entry_count;
+ u32 not_to_fw;
+ u32 invalid_pdev_vdev_peer;
+ u32 tcl_res_invalid_addrx;
+ u32 wbm2fw_entry_count;
+ u32 invalid_pdev;
+};
+
+/* == RING-IF STATS == */
+#define HTT_STATS_LOW_WM_BINS 5
+#define HTT_STATS_HIGH_WM_BINS 5
+
+struct htt_ring_if_stats_tlv {
+ u32 base_addr; /* DWORD aligned base memory address of the ring */
+ u32 elem_size;
+ u32 num_elems__prefetch_tail_idx;
+ u32 head_idx__tail_idx;
+ u32 shadow_head_idx__shadow_tail_idx;
+ u32 num_tail_incr;
+ u32 lwm_thresh__hwm_thresh;
+ u32 overrun_hit_count;
+ u32 underrun_hit_count;
+ u32 prod_blockwait_count;
+ u32 cons_blockwait_count;
+ u32 low_wm_hit_count[HTT_STATS_LOW_WM_BINS];
+ u32 high_wm_hit_count[HTT_STATS_HIGH_WM_BINS];
+};
+
+struct htt_ring_if_cmn_tlv {
+ u32 mac_id__word;
+ u32 num_records;
+};
+
+/* == SFM STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sfm_client_user_tlv_v {
+ /* Number of DWORDS used per user and per client */
+ u32 dwords_used_by_user_n[0];
+};
+
+struct htt_sfm_client_tlv {
+ /* Client ID */
+ u32 client_id;
+ /* Minimum number of buffers */
+ u32 buf_min;
+ /* Maximum number of buffers */
+ u32 buf_max;
+ /* Number of Busy buffers */
+ u32 buf_busy;
+ /* Number of Allocated buffers */
+ u32 buf_alloc;
+ /* Number of Available/Usable buffers */
+ u32 buf_avail;
+ /* Number of users */
+ u32 num_users;
+};
+
+struct htt_sfm_cmn_tlv {
+ u32 mac_id__word;
+ /* Indicates the total number of 128 byte buffers
+ * in the CMEM that are available for buffer sharing
+ */
+ u32 buf_total;
+ /* Indicates for certain client or all the clients
+ * there is no dowrd saved in SFM, refer to SFM_R1_MEM_EMPTY
+ */
+ u32 mem_empty;
+ /* DEALLOCATE_BUFFERS, refer to register SFM_R0_DEALLOCATE_BUFFERS */
+ u32 deallocate_bufs;
+ /* Number of Records */
+ u32 num_records;
+};
+
+/* == SRNG STATS == */
+struct htt_sring_stats_tlv {
+ u32 mac_id__ring_id__arena__ep;
+ u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
+ u32 base_addr_msb;
+ u32 ring_size;
+ u32 elem_size;
+
+ u32 num_avail_words__num_valid_words;
+ u32 head_ptr__tail_ptr;
+ u32 consumer_empty__producer_full;
+ u32 prefetch_count__internal_tail_ptr;
+};
+
+struct htt_sring_cmn_tlv {
+ u32 num_records;
+};
+
+/* == PDEV TX RATE CTRL STATS == */
+#define HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_LTF 4
+
+#define HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+ (HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS)
+
+struct htt_tx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 tx_su_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets
+ * in each GI (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+ /* Number of CTS-acknowledged RTS packets */
+ u32 rts_success;
+
+ /*
+ * Counters for legacy 11a and 11b transmissions.
+ *
+ * The index corresponds to:
+ *
+ * CCK: 0: 1 Mbps, 1: 2 Mbps, 2: 5.5 Mbps, 3: 11 Mbps
+ *
+ * OFDM: 0: 6 Mbps, 1: 9 Mbps, 2: 12 Mbps, 3: 18 Mbps,
+ * 4: 24 Mbps, 5: 36 Mbps, 6: 48 Mbps, 7: 54 Mbps
+ */
+ u32 tx_legacy_cck_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+
+ u32 ac_mu_mimo_tx_ldpc;
+ u32 ax_mu_mimo_tx_ldpc;
+ u32 ofdma_tx_ldpc;
+
+ /*
+ * Counters for 11ax HE LTF selection during TX.
+ *
+ * The index corresponds to:
+ *
+ * 0: unused, 1: 1x LTF, 2: 2x LTF, 3: 4x LTF
+ */
+ u32 tx_he_ltf[HTT_TX_PDEV_STATS_NUM_LTF];
+
+ u32 ac_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 ac_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ax_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+
+ u32 ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+};
+
+/* == PDEV RX RATE CTRL STATS == */
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
+#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
+
+struct htt_rx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 nsts;
+
+ u32 rx_ldpc;
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ u8 rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ /* units = dB above noise floor */
+
+ /* Counters to track number of rx packets
+ * in each GI in each mcs (0-11)
+ */
+ u32 rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ s32 rssi_in_dbm; /* rx Signal Strength value in dBm unit */
+
+ u32 rx_11ax_su_ext;
+ u32 rx_11ac_mumimo;
+ u32 rx_11ax_mumimo;
+ u32 rx_11ax_ofdma;
+ u32 txbf;
+ u32 rx_legacy_cck_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 rx_legacy_ofdm_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ u32 rx_active_dur_us_low;
+ u32 rx_active_dur_us_high;
+
+ u32 rx_11ax_ul_ofdma;
+
+ u32 ul_ofdma_rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ul_ofdma_rx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ul_ofdma_rx_stbc;
+ u32 ul_ofdma_rx_ldpc;
+
+ /* record the stats for each user index */
+ u32 rx_ulofdma_non_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_mpdu_ok[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+ u32 rx_ulofdma_mpdu_fail[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+
+ u32 nss_count;
+ u32 pilot_count;
+ /* RxEVM stats in dB */
+ s32 rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS];
+ /* rx_pilot_evm_db_mean:
+ * EVM mean across pilots, computed as
+ * mean(10*log10(rx_pilot_evm_linear)) = mean(rx_pilot_evm_db)
+ */
+ s32 rx_pilot_evm_db_mean[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ s8 rx_ul_fd_rssi[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* dBm units */
+ /* per_chain_rssi_pkt_type:
+ * This field shows what type of rx frame the per-chain RSSI was computed
+ * on, by recording the frame type and sub-type as bit-fields within this
+ * field:
+ * BIT [3 : 0] :- IEEE80211_FC0_TYPE
+ * BIT [7 : 4] :- IEEE80211_FC0_SUBTYPE
+ * BIT [31 : 8] :- Reserved
+ */
+ u32 per_chain_rssi_pkt_type;
+ s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+};
+
+/* == RX PDEV/SOC STATS == */
+struct htt_rx_soc_fw_stats_tlv {
+ u32 fw_reo_ring_data_msdu;
+ u32 fw_to_host_data_msdu_bcmc;
+ u32 fw_to_host_data_msdu_uc;
+ u32 ofld_remote_data_buf_recycle_cnt;
+ u32 ofld_remote_free_buf_indication_cnt;
+
+ u32 ofld_buf_to_host_data_msdu_uc;
+ u32 reo_fw_ring_to_host_data_msdu_uc;
+
+ u32 wbm_sw_ring_reap;
+ u32 wbm_forward_to_host_cnt;
+ u32 wbm_target_recycle_cnt;
+
+ u32 target_refill_ring_recycle_cnt;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_empty_tlv_v {
+ u32 refill_ring_empty_cnt[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v {
+ u32 refill_ring_num_refill[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
+};
+
+/* RXDMA error code from WBM released packets */
+enum htt_rx_rxdma_error_code_enum {
+ HTT_RX_RXDMA_OVERFLOW_ERR = 0,
+ HTT_RX_RXDMA_MPDU_LENGTH_ERR = 1,
+ HTT_RX_RXDMA_FCS_ERR = 2,
+ HTT_RX_RXDMA_DECRYPT_ERR = 3,
+ HTT_RX_RXDMA_TKIP_MIC_ERR = 4,
+ HTT_RX_RXDMA_UNECRYPTED_ERR = 5,
+ HTT_RX_RXDMA_MSDU_LEN_ERR = 6,
+ HTT_RX_RXDMA_MSDU_LIMIT_ERR = 7,
+ HTT_RX_RXDMA_WIFI_PARSE_ERR = 8,
+ HTT_RX_RXDMA_AMSDU_PARSE_ERR = 9,
+ HTT_RX_RXDMA_SA_TIMEOUT_ERR = 10,
+ HTT_RX_RXDMA_DA_TIMEOUT_ERR = 11,
+ HTT_RX_RXDMA_FLOW_TIMEOUT_ERR = 12,
+ HTT_RX_RXDMA_FLUSH_REQUEST = 13,
+ HTT_RX_RXDMA_ERR_CODE_RVSD0 = 14,
+ HTT_RX_RXDMA_ERR_CODE_RVSD1 = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_RXDMA_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v {
+ u32 rxdma_err[0]; /* HTT_RX_RXDMA_MAX_ERR_CODE */
+};
+
+/* REO error code from WBM released packets */
+enum htt_rx_reo_error_code_enum {
+ HTT_RX_REO_QUEUE_DESC_ADDR_ZERO = 0,
+ HTT_RX_REO_QUEUE_DESC_NOT_VALID = 1,
+ HTT_RX_AMPDU_IN_NON_BA = 2,
+ HTT_RX_NON_BA_DUPLICATE = 3,
+ HTT_RX_BA_DUPLICATE = 4,
+ HTT_RX_REGULAR_FRAME_2K_JUMP = 5,
+ HTT_RX_BAR_FRAME_2K_JUMP = 6,
+ HTT_RX_REGULAR_FRAME_OOR = 7,
+ HTT_RX_BAR_FRAME_OOR = 8,
+ HTT_RX_BAR_FRAME_NO_BA_SESSION = 9,
+ HTT_RX_BAR_FRAME_SN_EQUALS_SSN = 10,
+ HTT_RX_PN_CHECK_FAILED = 11,
+ HTT_RX_2K_ERROR_HANDLING_FLAG_SET = 12,
+ HTT_RX_PN_ERROR_HANDLING_FLAG_SET = 13,
+ HTT_RX_QUEUE_DESCRIPTOR_BLOCKED_SET = 14,
+ HTT_RX_REO_ERR_CODE_RVSD = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_REO_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v {
+ u32 reo_err[0]; /* HTT_RX_REO_MAX_ERR_CODE */
+};
+
+/* == RX PDEV STATS == */
+#define HTT_STATS_SUBTYPE_MAX 16
+
+struct htt_rx_pdev_fw_stats_tlv {
+ u32 mac_id__word;
+ u32 ppdu_recvd;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 tcp_msdu_cnt;
+ u32 tcp_ack_msdu_cnt;
+ u32 udp_msdu_cnt;
+ u32 other_msdu_cnt;
+ u32 fw_ring_mpdu_ind;
+ u32 fw_ring_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_ctrl_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_mcast_data_msdu;
+ u32 fw_ring_bcast_data_msdu;
+ u32 fw_ring_ucast_data_msdu;
+ u32 fw_ring_null_data_msdu;
+ u32 fw_ring_mpdu_drop;
+ u32 ofld_local_data_ind_cnt;
+ u32 ofld_local_data_buf_recycle_cnt;
+ u32 drx_local_data_ind_cnt;
+ u32 drx_local_data_buf_recycle_cnt;
+ u32 local_nondata_ind_cnt;
+ u32 local_nondata_buf_recycle_cnt;
+
+ u32 fw_status_buf_ring_refill_cnt;
+ u32 fw_status_buf_ring_empty_cnt;
+ u32 fw_pkt_buf_ring_refill_cnt;
+ u32 fw_pkt_buf_ring_empty_cnt;
+ u32 fw_link_buf_ring_refill_cnt;
+ u32 fw_link_buf_ring_empty_cnt;
+
+ u32 host_pkt_buf_ring_refill_cnt;
+ u32 host_pkt_buf_ring_empty_cnt;
+ u32 mon_pkt_buf_ring_refill_cnt;
+ u32 mon_pkt_buf_ring_empty_cnt;
+ u32 mon_status_buf_ring_refill_cnt;
+ u32 mon_status_buf_ring_empty_cnt;
+ u32 mon_desc_buf_ring_refill_cnt;
+ u32 mon_desc_buf_ring_empty_cnt;
+ u32 mon_dest_ring_update_cnt;
+ u32 mon_dest_ring_full_cnt;
+
+ u32 rx_suspend_cnt;
+ u32 rx_suspend_fail_cnt;
+ u32 rx_resume_cnt;
+ u32 rx_resume_fail_cnt;
+ u32 rx_ring_switch_cnt;
+ u32 rx_ring_restore_cnt;
+ u32 rx_flush_cnt;
+ u32 rx_recovery_reset_cnt;
+};
+
+#define HTT_STATS_PHY_ERR_MAX 43
+
+struct htt_rx_pdev_fw_stats_phy_err_tlv {
+ u32 mac_id__word;
+ u32 total_phy_err_cnt;
+ /* Counts of different types of phy errs
+ * The mapping of PHY error types to phy_err array elements is HW dependent.
+ * The only currently-supported mapping is shown below:
+ *
+ * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV
+ * 1 phyrx_err_synth_off
+ * 2 phyrx_err_ofdma_timing
+ * 3 phyrx_err_ofdma_signal_parity
+ * 4 phyrx_err_ofdma_rate_illegal
+ * 5 phyrx_err_ofdma_length_illegal
+ * 6 phyrx_err_ofdma_restart
+ * 7 phyrx_err_ofdma_service
+ * 8 phyrx_err_ppdu_ofdma_power_drop
+ * 9 phyrx_err_cck_blokker
+ * 10 phyrx_err_cck_timing
+ * 11 phyrx_err_cck_header_crc
+ * 12 phyrx_err_cck_rate_illegal
+ * 13 phyrx_err_cck_length_illegal
+ * 14 phyrx_err_cck_restart
+ * 15 phyrx_err_cck_service
+ * 16 phyrx_err_cck_power_drop
+ * 17 phyrx_err_ht_crc_err
+ * 18 phyrx_err_ht_length_illegal
+ * 19 phyrx_err_ht_rate_illegal
+ * 20 phyrx_err_ht_zlf
+ * 21 phyrx_err_false_radar_ext
+ * 22 phyrx_err_green_field
+ * 23 phyrx_err_bw_gt_dyn_bw
+ * 24 phyrx_err_leg_ht_mismatch
+ * 25 phyrx_err_vht_crc_error
+ * 26 phyrx_err_vht_siga_unsupported
+ * 27 phyrx_err_vht_lsig_len_invalid
+ * 28 phyrx_err_vht_ndp_or_zlf
+ * 29 phyrx_err_vht_nsym_lt_zero
+ * 30 phyrx_err_vht_rx_extra_symbol_mismatch
+ * 31 phyrx_err_vht_rx_skip_group_id0
+ * 32 phyrx_err_vht_rx_skip_group_id1to62
+ * 33 phyrx_err_vht_rx_skip_group_id63
+ * 34 phyrx_err_ofdm_ldpc_decoder_disabled
+ * 35 phyrx_err_defer_nap
+ * 36 phyrx_err_fdomain_timeout
+ * 37 phyrx_err_lsig_rel_check
+ * 38 phyrx_err_bt_collision
+ * 39 phyrx_err_unsupported_mu_feedback
+ * 40 phyrx_err_ppdu_tx_interrupt_rx
+ * 41 phyrx_err_unsupported_cbf
+ * 42 phyrx_err_other
+ */
+ u32 phy_err[HTT_STATS_PHY_ERR_MAX];
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v {
+ /* Num error MPDU for each RxDMA error type */
+ u32 fw_ring_mpdu_err[0]; /* HTT_RX_STATS_RXDMA_MAX_ERR */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_mpdu_drop_tlv_v {
+ /* Num MPDU dropped */
+ u32 fw_mpdu_drop[0]; /* HTT_RX_STATS_FW_DROP_REASON_MAX */
+};
+
+#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1)
+#define HTT_PDEV_CCA_STATS_RX_FRAME_INFO_PRESENT (0x2)
+#define HTT_PDEV_CCA_STATS_RX_CLEAR_INFO_PRESENT (0x4)
+#define HTT_PDEV_CCA_STATS_MY_RX_FRAME_INFO_PRESENT (0x8)
+#define HTT_PDEV_CCA_STATS_USEC_CNT_INFO_PRESENT (0x10)
+#define HTT_PDEV_CCA_STATS_MED_RX_IDLE_INFO_PRESENT (0x20)
+#define HTT_PDEV_CCA_STATS_MED_TX_IDLE_GLOBAL_INFO_PRESENT (0x40)
+#define HTT_PDEV_CCA_STATS_CCA_OBBS_USEC_INFO_PRESENT (0x80)
+
+struct htt_pdev_stats_cca_counters_tlv {
+ /* Below values are obtained from the HW Cycles counter registers */
+ u32 tx_frame_usec;
+ u32 rx_frame_usec;
+ u32 rx_clear_usec;
+ u32 my_rx_frame_usec;
+ u32 usec_cnt;
+ u32 med_rx_idle_usec;
+ u32 med_tx_idle_global_usec;
+ u32 cca_obss_usec;
+};
+
+struct htt_pdev_cca_stats_hist_v1_tlv {
+ u32 chan_num;
+ /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/
+ u32 num_records;
+ u32 valid_cca_counters_bitmap;
+ u32 collection_interval;
+
+ /* This will be followed by an array which contains the CCA stats
+ * collected in the last N intervals,
+ * if the indication is for last N intervals CCA stats.
+ * Then the pdev_cca_stats[0] element contains the oldest CCA stats
+ * and pdev_cca_stats[N-1] will have the most recent CCA stats.
+ * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1];
+ */
+};
+
+struct htt_pdev_stats_twt_session_tlv {
+ u32 vdev_id;
+ struct htt_mac_addr peer_mac;
+ u32 flow_id_flags;
+
+ /* TWT_DIALOG_ID_UNAVAILABLE is used
+ * when TWT session is not initiated by host
+ */
+ u32 dialog_id;
+ u32 wake_dura_us;
+ u32 wake_intvl_us;
+ u32 sp_offset_us;
+};
+
+struct htt_pdev_stats_twt_sessions_tlv {
+ u32 pdev_id;
+ u32 num_sessions;
+ struct htt_pdev_stats_twt_session_tlv twt_session[0];
+};
+
+enum htt_rx_reo_resource_sample_id_enum {
+ /* Global link descriptor queued in REO */
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_0 = 0,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_1 = 1,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_2 = 2,
+ /*Number of queue descriptors of this aging group */
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC0 = 3,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC1 = 4,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC2 = 5,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC3 = 6,
+ /* Total number of MSDUs buffered in AC */
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC0 = 7,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC1 = 8,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC2 = 9,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC3 = 10,
+
+ HTT_RX_REO_RESOURCE_STATS_MAX = 16
+};
+
+struct htt_rx_reo_resource_stats_tlv_v {
+ /* Variable based on the Number of records. HTT_RX_REO_RESOURCE_STATS_MAX */
+ u32 sample_id;
+ u32 total_max;
+ u32 total_avg;
+ u32 total_sample;
+ u32 non_zeros_avg;
+ u32 non_zeros_sample;
+ u32 last_non_zeros_max;
+ u32 last_non_zeros_min;
+ u32 last_non_zeros_avg;
+ u32 last_non_zeros_sample;
+};
+
+/* == TX SOUNDING STATS == */
+
+enum htt_txbf_sound_steer_modes {
+ HTT_IMPLICIT_TXBF_STEER_STATS = 0,
+ HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS = 1,
+ HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS = 2,
+ HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS = 3,
+ HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS = 4,
+ HTT_TXBF_MAX_NUM_OF_MODES = 5
+};
+
+enum htt_stats_sounding_tx_mode {
+ HTT_TX_AC_SOUNDING_MODE = 0,
+ HTT_TX_AX_SOUNDING_MODE = 1,
+};
+
+struct htt_tx_sounding_stats_tlv {
+ u32 tx_sounding_mode; /* HTT_TX_XX_SOUNDING_MODE */
+ /* Counts number of soundings for all steering modes in each bw */
+ u32 cbf_20[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_40[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_80[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_160[HTT_TXBF_MAX_NUM_OF_MODES];
+ /*
+ * The sounding array is a 2-D array stored as an 1-D array of
+ * u32. The stats for a particular user/bw combination is
+ * referenced with the following:
+ *
+ * sounding[(user* max_bw) + bw]
+ *
+ * ... where max_bw == 4 for 160mhz
+ */
+ u32 sounding[HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+};
+
+struct htt_pdev_obss_pd_stats_tlv {
+ u32 num_obss_tx_ppdu_success;
+ u32 num_obss_tx_ppdu_failure;
+};
+
+void ath11k_debug_htt_stats_init(struct ath11k *ar);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
new file mode 100644
index 000000000000..743760c9bcae
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+ struct rate_info *txrate = &arsta->txrate;
+ struct ath11k_htt_tx_stats *tx_stats;
+ int gi, mcs, bw, nss;
+
+ if (!arsta->tx_stats)
+ return;
+
+ tx_stats = arsta->tx_stats;
+ gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
+ mcs = txrate->mcs;
+ bw = txrate->bw;
+ nss = txrate->nss - 1;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
+ STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts;
+ } else {
+ mcs = legacy_rate_idx;
+
+ STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts;
+ }
+
+ if (peer_stats->is_ampdu) {
+ tx_stats->ba_fails += peer_stats->ba_fails;
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(AMPDU).he[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).he[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(AMPDU).ht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).ht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ }
+ STATS_OP_FMT(AMPDU).bw[0][bw] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).nss[0][nss] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).gi[0][gi] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).bw[1][bw] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).nss[1][nss] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).gi[1][gi] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ tx_stats->ack_fails += peer_stats->ba_fails;
+ }
+
+ STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes;
+
+ STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts;
+
+ STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes;
+
+ STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts;
+
+ STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes;
+
+ STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts;
+
+ tx_stats->tx_duration += peer_stats->duration;
+}
+
+void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum hal_tx_rate_stats_bw bw;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u16 rate;
+ u8 rate_idx;
+ int ret;
+ u8 mcs;
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_warn(ab, "failed to find the peer\n");
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
+ ts->rate_stats);
+ mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
+ ts->rate_stats);
+ sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
+ ts->rate_stats);
+ bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
+
+ if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
+ pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ goto err_out;
+ arsta->txrate.legacy = rate;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
+ if (mcs > 7) {
+ ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs + 8 * (arsta->last_txrate.nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
+ if (mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ /* TODO */
+ }
+
+ arsta->txrate.nss = arsta->last_txrate.nss;
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+
+ ath11k_accumulate_per_peer_tx_stats(arsta, peer_stats, rate_idx);
+err_out:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_htt_data_stats *stats;
+ static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail",
+ "retry", "ampdu"};
+ static const char *str[ATH11K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+ int len = 0, i, j, k, retval = 0;
+ const int size = 2 * 4096;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ for (k = 0; k < ATH11K_STATS_TYPE_MAX; k++) {
+ for (j = 0; j < ATH11K_COUNTER_TYPE_MAX; j++) {
+ stats = &arsta->tx_stats->stats[k];
+ len += scnprintf(buf + len, size - len, "%s_%s\n",
+ str_name[k],
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " HE MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HE_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->he[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " VHT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_VHT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->vht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, " HT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ", stats->ht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " BW %s (20,40,80,160 MHz)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->bw[j][0], stats->bw[j][1],
+ stats->bw[j][2], stats->bw[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->nss[j][0], stats->nss[j][1],
+ stats->nss[j][2], stats->nss[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " GI %s (0.4us,0.8us,1.6us,3.2us)\n",
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->gi[j][0], stats->gi[j][1],
+ stats->gi[j][2], stats->gi[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " legacy rate %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH11K_LEGACY_NUM; i++)
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->legacy[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\nTX duration\n %llu usecs\n",
+ arsta->tx_stats->tx_duration);
+ len += scnprintf(buf + len, size - len,
+ "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+ len += scnprintf(buf + len, size - len,
+ "ack fails\n %llu\n", arsta->tx_stats->ack_fails);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+ .read = ath11k_dbg_sta_dump_tx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ int len = 0, i, retval = 0;
+ const int size = 4096;
+ char *buf;
+
+ if (!rx_stats)
+ return -ENOENT;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "RX peer stats:\n");
+ len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+ rx_stats->num_msdu);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+ rx_stats->tcp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+ rx_stats->udp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+ rx_stats->ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+ rx_stats->non_ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+ rx_stats->stbc_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+ rx_stats->beamformed_count);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+ rx_stats->num_mpdu_fcs_ok);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+ rx_stats->num_mpdu_fcs_err);
+ len += scnprintf(buf + len, size - len,
+ "GI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
+ rx_stats->gi_count[0], rx_stats->gi_count[1],
+ rx_stats->gi_count[2], rx_stats->gi_count[3]);
+ len += scnprintf(buf + len, size - len,
+ "BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu\n",
+ rx_stats->bw_count[0], rx_stats->bw_count[1],
+ rx_stats->bw_count[2], rx_stats->bw_count[3]);
+ len += scnprintf(buf + len, size - len, "BCC %llu LDPC %llu\n",
+ rx_stats->coding_count[0], rx_stats->coding_count[1]);
+ len += scnprintf(buf + len, size - len,
+ "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu\n",
+ rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+ rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+ rx_stats->pream_cnt[4]);
+ len += scnprintf(buf + len, size - len,
+ "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+ rx_stats->reception_type[0], rx_stats->reception_type[1],
+ rx_stats->reception_type[2], rx_stats->reception_type[3]);
+ len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+ len += scnprintf(buf + len, size - len, "\nMCS(0-11) Legacy MCS(12):");
+ for (i = 0; i < HAL_RX_MAX_MCS + 1; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]);
+ len += scnprintf(buf + len, size - len, "\nNSS(1-8):");
+ for (i = 0; i < HAL_RX_MAX_NSS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
+ len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
+ rx_stats->rx_duration);
+ len += scnprintf(buf + len, size - len, "\n");
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_rx_stats = {
+ .read = ath11k_dbg_sta_dump_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int
+ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct debug_htt_stats_req *stats_req;
+ int ret;
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
+ memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
+ ret = ath11k_dbg_htt_stats_req(ar);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+ return 0;
+out:
+ vfree(stats_req);
+ return ret;
+}
+
+static int
+ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_htt_peer_stats = {
+ .open = ath11k_dbg_sta_open_htt_peer_stats,
+ .release = ath11k_dbg_sta_release_htt_peer_stats,
+ .read = ath11k_dbg_sta_read_htt_peer_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ int ret, enable;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = kstrtoint_from_user(buf, count, 0, &enable);
+ if (ret)
+ goto out;
+
+ ar->debug.pktlog_peer_valid = enable;
+ memcpy(ar->debug.pktlog_peer_addr, sta->addr, ETH_ALEN);
+
+ /* Send peer based pktlog enable/disable */
+ ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n",
+ sta->addr, ret);
+ goto out;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "peer pktlog filter set to %d\n",
+ enable);
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[32] = {0};
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%08x %pM\n",
+ ar->debug.pktlog_peer_valid,
+ ar->debug.pktlog_peer_addr);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_pktlog = {
+ .write = ath11k_dbg_sta_write_peer_pktlog,
+ .read = ath11k_dbg_sta_read_peer_pktlog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar))
+ debugfs_create_file("tx_stats", 0400, dir, sta,
+ &fops_tx_stats);
+ if (ath11k_debug_is_extd_rx_stats_enabled(ar))
+ debugfs_create_file("rx_stats", 0400, dir, sta,
+ &fops_rx_stats);
+
+ debugfs_create_file("htt_peer_stats", 0400, dir, sta,
+ &fops_htt_peer_stats);
+
+ debugfs_create_file("peer_pktlog", 0644, dir, sta,
+ &fops_peer_pktlog);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
new file mode 100644
index 000000000000..b112825a52ed
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "hal_tx.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "peer.h"
+
+static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+
+ /* TODO: Any other peer specific DP cleanup */
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+ addr, vdev_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 reo_dest;
+ int ret;
+
+ /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+ reo_dest = ar->dp.mac_id + 1;
+ ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
+ WMI_PEER_SET_DEFAULT_ROUTING,
+ DP_RX_HASH_ENABLE | (reo_dest << 1));
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
+ ret, addr, vdev_id);
+ return ret;
+ }
+
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id,
+ HAL_DESC_REO_NON_QOS_TID, 1, 0);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for non-qos tid %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 0, 1, 0);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for tid 0 %d\n",
+ ret);
+ return ret;
+ }
+
+ /* TODO: Setup other peer specific resource used in data path */
+
+ return 0;
+}
+
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
+{
+ if (!ring->vaddr_unaligned)
+ return;
+
+ dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned);
+
+ ring->vaddr_unaligned = NULL;
+}
+
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries)
+{
+ struct hal_srng_params params = { 0 };
+ int entry_sz = ath11k_hal_srng_get_entrysize(type);
+ int max_entries = ath11k_hal_srng_get_max_entries(type);
+ int ret;
+
+ if (max_entries < 0 || entry_sz < 0)
+ return -EINVAL;
+
+ if (num_entries > max_entries)
+ num_entries = max_entries;
+
+ ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+ ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ GFP_KERNEL);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
+ ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
+ (unsigned long)ring->vaddr_unaligned);
+
+ params.ring_base_vaddr = ring->vaddr;
+ params.ring_base_paddr = ring->paddr;
+ params.num_entries = num_entries;
+
+ switch (type) {
+ case HAL_REO_DST:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_RXDMA_BUF:
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num < 3) {
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+ params.intr_timer_thres_us =
+ HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+ break;
+ }
+ /* follow through when ring_num >= 3 */
+ /* fall through */
+ case HAL_REO_EXCEPTION:
+ case HAL_REO_REINJECT:
+ case HAL_REO_CMD:
+ case HAL_REO_STATUS:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_TCL_STATUS:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_RXDMA_DST:
+ case HAL_RXDMA_MONITOR_DST:
+ case HAL_RXDMA_MONITOR_DESC:
+ case HAL_RXDMA_DIR_BUF:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+ break;
+ default:
+ ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
+ return -EINVAL;
+ }
+
+ ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ return ret;
+ }
+
+ ring->ring_id = ret;
+
+ return 0;
+}
+
+static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
+ }
+ ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
+}
+
+static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ int i, ret;
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
+ HAL_SW2WBM_RELEASE, 0, 0,
+ DP_WBM_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
+ DP_TCL_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
+ 0, 0, DP_TCL_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
+ HAL_TCL_DATA, i, 0,
+ DP_TCL_DATA_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
+ HAL_WBM2SW_RELEASE, i, 0,
+ DP_TX_COMP_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
+ ath11k_hal_tx_init_data_ring(ab, srng);
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
+ 0, 0, DP_REO_REINJECT_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
+ 3, 0, DP_RX_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
+ 0, 0, DP_REO_EXCEPTION_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
+ 0, 0, DP_REO_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ ath11k_hal_reo_init_cmd_ring(ab, srng);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
+ 0, 0, DP_REO_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
+ goto err;
+ }
+
+ ath11k_hal_reo_hw_setup(ab);
+
+ return 0;
+
+err:
+ ath11k_dp_srng_common_cleanup(ab);
+
+ return ret;
+}
+
+static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ int i;
+
+ for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
+ if (!slist[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ slist[i].vaddr, slist[i].paddr);
+ slist[i].vaddr = NULL;
+ }
+}
+
+static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
+ int size,
+ u32 n_link_desc_bank,
+ u32 n_link_desc,
+ u32 last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ u32 n_entries_per_buf;
+ int num_scatter_buf, scatter_idx;
+ struct hal_wbm_link_desc *scatter_buf;
+ int align_bytes, n_entries;
+ dma_addr_t paddr;
+ int rem_entries;
+ int i;
+ int ret = 0;
+ u32 end_offset;
+
+ n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
+ ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK);
+ num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
+
+ if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < num_scatter_buf; i++) {
+ slist[i].vaddr = dma_alloc_coherent(ab->dev,
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ &slist[i].paddr, GFP_KERNEL);
+ if (!slist[i].vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ scatter_idx = 0;
+ scatter_buf = slist[scatter_idx].vaddr;
+ rem_entries = n_entries_per_buf;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries) {
+ ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ if (rem_entries) {
+ rem_entries--;
+ scatter_buf++;
+ continue;
+ }
+
+ rem_entries = n_entries_per_buf;
+ scatter_idx++;
+ scatter_buf = slist[scatter_idx].vaddr;
+ }
+ }
+
+ end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
+ sizeof(struct hal_wbm_link_desc);
+ ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
+ n_link_desc, end_offset);
+
+ return 0;
+
+err:
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+
+ return ret;
+}
+
+static void
+ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks)
+{
+ int i;
+
+ for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
+ if (link_desc_banks[i].vaddr_unaligned) {
+ dma_free_coherent(ab->dev,
+ link_desc_banks[i].size,
+ link_desc_banks[i].vaddr_unaligned,
+ link_desc_banks[i].paddr_unaligned);
+ link_desc_banks[i].vaddr_unaligned = NULL;
+ }
+ }
+}
+
+static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ int n_link_desc_bank,
+ int last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+ int ret = 0;
+ int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ if (i == (n_link_desc_bank - 1) && last_bank_sz)
+ desc_sz = last_bank_sz;
+
+ desc_bank[i].vaddr_unaligned =
+ dma_alloc_coherent(ab->dev, desc_sz,
+ &desc_bank[i].paddr_unaligned,
+ GFP_KERNEL);
+ if (!desc_bank[i].vaddr_unaligned) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
+ HAL_LINK_DESC_ALIGN);
+ desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
+ ((unsigned long)desc_bank[i].vaddr -
+ (unsigned long)desc_bank[i].vaddr_unaligned);
+ desc_bank[i].size = desc_sz;
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
+
+ return ret;
+}
+
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring)
+{
+ ath11k_dp_link_desc_bank_free(ab, desc_bank);
+
+ if (ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ath11k_dp_srng_cleanup(ab, ring);
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+ }
+}
+
+static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ u32 n_mpdu_link_desc, n_mpdu_queue_desc;
+ u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
+ int ret = 0;
+
+ n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
+ HAL_NUM_MPDUS_PER_LINK_DESC;
+
+ n_mpdu_queue_desc = n_mpdu_link_desc /
+ HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
+
+ n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
+ DP_AVG_MSDUS_PER_FLOW) /
+ HAL_NUM_TX_MSDUS_PER_LINK_DESC;
+
+ n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
+ DP_AVG_MSDUS_PER_MPDU) /
+ HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
+ n_tx_msdu_link_desc + n_rx_msdu_link_desc;
+
+ if (*n_link_desc & (*n_link_desc - 1))
+ *n_link_desc = 1 << fls(*n_link_desc);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
+ HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc)
+{
+ u32 tot_mem_sz;
+ u32 n_link_desc_bank, last_bank_sz;
+ u32 entry_sz, align_bytes, n_entries;
+ u32 paddr;
+ u32 *desc;
+ int i, ret;
+
+ tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
+ tot_mem_sz += HAL_LINK_DESC_ALIGN;
+
+ if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
+ n_link_desc_bank = 1;
+ last_bank_sz = tot_mem_sz;
+ } else {
+ n_link_desc_bank = tot_mem_sz /
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+ last_bank_sz = tot_mem_sz %
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+
+ if (last_bank_sz)
+ n_link_desc_bank += 1;
+ }
+
+ if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
+ return -EINVAL;
+
+ ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
+ n_link_desc_bank, last_bank_sz);
+ if (ret)
+ return ret;
+
+ /* Setup link desc idle list for HW internal usage */
+ entry_sz = ath11k_hal_srng_get_entrysize(ring_type);
+ tot_mem_sz = entry_sz * n_link_desc;
+
+ /* Setup scatter desc list when the total memory requirement is more */
+ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
+ ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
+ n_link_desc_bank,
+ n_link_desc,
+ last_bank_sz);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
+ ret);
+ goto fail_desc_bank_free;
+ }
+
+ return 0;
+ }
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (link_desc_banks[i].size - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries &&
+ (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
+ ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
+ i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ }
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+fail_desc_bank_free:
+ ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
+
+ return ret;
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ struct napi_struct *napi = &irq_grp->napi;
+ int grp_id = irq_grp->grp_id;
+ int work_done = 0;
+ int i = 0;
+ int tot_work_done = 0;
+
+ while (ath11k_tx_ring_mask[grp_id] >> i) {
+ if (ath11k_tx_ring_mask[grp_id] & BIT(i))
+ ath11k_dp_tx_completion_handler(ab, i);
+ i++;
+ }
+
+ if (ath11k_rx_err_ring_mask[grp_id]) {
+ work_done = ath11k_dp_process_rx_err(ab, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ath11k_rx_wbm_rel_ring_mask[grp_id]) {
+ work_done = ath11k_dp_rx_process_wbm_err(ab,
+ napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ath11k_rx_ring_mask[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ if (ath11k_rx_ring_mask[grp_id] & BIT(i)) {
+ work_done = ath11k_dp_process_rx(ab, i, napi,
+ &irq_grp->pending_q,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+ if (budget <= 0)
+ goto done;
+ }
+ }
+
+ if (rx_mon_status_ring_mask[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ if (rx_mon_status_ring_mask[grp_id] & BIT(i)) {
+ work_done =
+ ath11k_dp_rx_process_mon_rings(ab,
+ i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+ if (budget <= 0)
+ goto done;
+ }
+ }
+
+ if (ath11k_reo_status_ring_mask[grp_id])
+ ath11k_dp_process_reo_status(ab);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) {
+ work_done = ath11k_dp_process_rxdma_err(ab, i, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+
+ if (budget <= 0)
+ goto done;
+
+ if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) {
+ struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0,
+ HAL_RX_BUF_RBM_SW3_BM,
+ GFP_ATOMIC);
+ }
+ }
+ /* TODO: Implement handler for other interrupts */
+
+done:
+ return tot_work_done;
+}
+
+void ath11k_dp_pdev_free(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ath11k_dp_rx_pdev_free(ab, i);
+ ath11k_debug_unregister(ar);
+ ath11k_dp_rx_pdev_mon_detach(ar);
+ }
+}
+
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ dp->mac_id = i;
+ idr_init(&dp->rx_refill_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ idr_init(&dp->rx_mon_status_refill_ring.bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock);
+ idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
+ }
+}
+
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int ret;
+ int i;
+
+ /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ret = ath11k_dp_rx_pdev_alloc(ab, i);
+ if (ret) {
+ ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
+ i);
+ goto err;
+ }
+ ret = ath11k_dp_rx_pdev_mon_attach(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to initialize mon pdev %d\n",
+ i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_pdev_free(ab);
+
+ return ret;
+}
+
+int ath11k_dp_htt_connect(struct ath11k_dp *dp)
+{
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ dp->eid = conn_resp.eid;
+
+ return 0;
+}
+
+static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
+{
+ /* For STA mode, enable address search index,
+ * tcl uses ast_hash value in the descriptor.
+ */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ break;
+ case WMI_VDEV_TYPE_AP:
+ case WMI_VDEV_TYPE_IBSS:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ default:
+ return;
+ }
+}
+
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
+{
+ arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
+ FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
+ arvif->vdev_id) |
+ FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
+ ar->pdev->pdev_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+ ath11k_dp_update_vdev_search(arvif);
+}
+
+static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k_base *ab = (struct ath11k_base *)ctx;
+ struct sk_buff *msdu = skb;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(msdu);
+
+ return 0;
+}
+
+void ath11k_dp_free(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ ath11k_dp_srng_common_cleanup(ab);
+
+ ath11k_dp_reo_cmd_list_cleanup(ab);
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
+ idr_for_each(&dp->tx_ring[i].txbuf_idr,
+ ath11k_dp_tx_pending_cleanup, ab);
+ idr_destroy(&dp->tx_ring[i].txbuf_idr);
+ spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
+ kfree(dp->tx_ring[i].tx_status);
+ }
+
+ /* Deinit any SOC level resource */
+}
+
+int ath11k_dp_alloc(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng = NULL;
+ size_t size = 0;
+ u32 n_link_desc = 0;
+ int ret;
+ int i;
+
+ dp->ab = ab;
+
+ INIT_LIST_HEAD(&dp->reo_cmd_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ spin_lock_init(&dp->reo_cmd_lock);
+
+ ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+
+ srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, srng, n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_common_setup(ab);
+ if (ret)
+ goto fail_link_desc_cleanup;
+
+ size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ idr_init(&dp->tx_ring[i].txbuf_idr);
+ spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
+ dp->tx_ring[i].tcl_data_ring_id = i;
+
+ dp->tx_ring[i].tx_status_head = 0;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+ if (!dp->tx_ring[i].tx_status)
+ goto fail_cmn_srng_cleanup;
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
+ ath11k_hal_tx_set_dscp_tid_map(ab, i);
+
+ /* Init any SOC level resource for DP */
+
+ return 0;
+
+fail_cmn_srng_cleanup:
+ ath11k_dp_srng_common_cleanup(ab);
+
+fail_link_desc_cleanup:
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
new file mode 100644
index 000000000000..6ef5be4201b2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -0,0 +1,1535 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_H
+#define ATH11K_DP_H
+
+#include "hal_rx.h"
+
+struct ath11k_base;
+struct ath11k_peer;
+struct ath11k_dp;
+struct ath11k_vif;
+struct hal_tcl_status_ring;
+struct ath11k_ext_irq_grp;
+
+struct dp_rx_tid {
+ u8 tid;
+ u32 *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ u32 ba_win_sz;
+ bool active;
+};
+
+#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
+
+struct dp_reo_cache_flush_elem {
+ struct list_head list;
+ struct dp_rx_tid data;
+ unsigned long ts;
+};
+
+struct dp_reo_cmd {
+ struct list_head list;
+ struct dp_rx_tid data;
+ int cmd_num;
+ void (*handler)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status status);
+};
+
+struct dp_srng {
+ u32 *vaddr_unaligned;
+ u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ int size;
+ u32 ring_id;
+};
+
+struct dp_rxdma_ring {
+ struct dp_srng refill_buf_ring;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ int bufs_max;
+};
+
+#define ATH11K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
+
+struct dp_tx_ring {
+ u8 tcl_data_ring_id;
+ struct dp_srng tcl_data_ring;
+ struct dp_srng tcl_comp_ring;
+ struct idr txbuf_idr;
+ /* Protects txbuf_idr and num_pending */
+ spinlock_t tx_idr_lock;
+ struct hal_wbm_release_ring *tx_status;
+ int tx_status_head;
+ int tx_status_tail;
+};
+
+struct ath11k_pdev_mon_stats {
+ u32 status_ppdu_state;
+ u32 status_ppdu_start;
+ u32 status_ppdu_end;
+ u32 status_ppdu_compl;
+ u32 status_ppdu_start_mis;
+ u32 status_ppdu_end_mis;
+ u32 status_ppdu_done;
+ u32 dest_ppdu_done;
+ u32 dest_mpdu_done;
+ u32 dest_mpdu_drop;
+ u32 dup_mon_linkdesc_cnt;
+ u32 dup_mon_buf_cnt;
+};
+
+struct dp_link_desc_bank {
+ void *vaddr_unaligned;
+ void *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+/* Size to enforce scatter idle list mode */
+#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
+#define DP_LINK_DESC_BANKS_MAX 8
+
+#define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff
+#define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000
+#define DP_RX_DESC_COOKIE_MAX \
+ (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX)
+#define DP_NOT_PPDU_ID_WRAP_AROUND 20000
+
+enum ath11k_dp_ppdu_state {
+ DP_PPDU_STATUS_START,
+ DP_PPDU_STATUS_DONE,
+};
+
+struct ath11k_mon_data {
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct hal_rx_mon_ppdu_info mon_ppdu_info;
+
+ u32 mon_ppdu_status;
+ u32 mon_last_buf_cookie;
+ u64 mon_last_linkdesc_paddr;
+ u16 chan_noise_floor;
+
+ struct ath11k_pdev_mon_stats rx_mon_stats;
+ /* lock for monitor data */
+ spinlock_t mon_lock;
+ struct sk_buff_head rx_status_q;
+};
+
+struct ath11k_pdev_dp {
+ u32 mac_id;
+ atomic_t num_tx_pending;
+ wait_queue_head_t tx_empty_waitq;
+ struct dp_srng reo_dst_ring;
+ struct dp_rxdma_ring rx_refill_buf_ring;
+ struct dp_srng rxdma_err_dst_ring;
+ struct dp_srng rxdma_mon_dst_ring;
+ struct dp_srng rxdma_mon_desc_ring;
+
+ struct dp_rxdma_ring rxdma_mon_buf_ring;
+ struct dp_rxdma_ring rx_mon_status_refill_ring;
+ struct ieee80211_rx_status rx_status;
+ struct ath11k_mon_data mon_data;
+};
+
+#define DP_NUM_CLIENTS_MAX 64
+#define DP_AVG_TIDS_PER_CLIENT 2
+#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
+#define DP_AVG_MSDUS_PER_FLOW 128
+#define DP_AVG_FLOWS_PER_TID 2
+#define DP_AVG_MPDUS_PER_TID_MAX 128
+#define DP_AVG_MSDUS_PER_MPDU 4
+
+#define DP_RX_HASH_ENABLE 0 /* Disable hash based Rx steering */
+
+#define DP_BA_WIN_SZ_MAX 256
+
+#define DP_TCL_NUM_RING_MAX 3
+
+#define DP_IDLE_SCATTER_BUFS_MAX 16
+
+#define DP_WBM_RELEASE_RING_SIZE 64
+#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TX_COMP_RING_SIZE 8192
+#define DP_TX_IDR_SIZE (DP_TX_COMP_RING_SIZE << 1)
+#define DP_TCL_CMD_RING_SIZE 32
+#define DP_TCL_STATUS_RING_SIZE 32
+#define DP_REO_DST_RING_MAX 4
+#define DP_REO_DST_RING_SIZE 2048
+#define DP_REO_REINJECT_RING_SIZE 32
+#define DP_RX_RELEASE_RING_SIZE 1024
+#define DP_REO_EXCEPTION_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 128
+#define DP_REO_STATUS_RING_SIZE 256
+#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RXDMA_REFILL_RING_SIZE 2048
+#define DP_RXDMA_ERR_DST_RING_SIZE 1024
+#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+
+#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_ALIGN_SIZE 128
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
+#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(20, 18)
+
+#define DP_HW2SW_MACID(mac_id) ((mac_id) ? ((mac_id) - 1) : 0)
+#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
+
+#define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0)
+#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
+#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+
+struct ath11k_dp {
+ struct ath11k_base *ab;
+ enum ath11k_htc_ep_id eid;
+ struct completion htt_tgt_version_received;
+ u8 htt_tgt_ver_major;
+ u8 htt_tgt_ver_minor;
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct dp_srng wbm_idle_ring;
+ struct dp_srng wbm_desc_rel_ring;
+ struct dp_srng tcl_cmd_ring;
+ struct dp_srng tcl_status_ring;
+ struct dp_srng reo_reinject_ring;
+ struct dp_srng rx_rel_ring;
+ struct dp_srng reo_except_ring;
+ struct dp_srng reo_cmd_ring;
+ struct dp_srng reo_status_ring;
+ struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
+ struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
+ struct list_head reo_cmd_list;
+ struct list_head reo_cmd_cache_flush_list;
+ /* protects access to reo_cmd_list and reo_cmd_cache_flush_list */
+ spinlock_t reo_cmd_lock;
+};
+
+/* HTT definitions */
+
+#define HTT_TCL_META_DATA_TYPE BIT(0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+
+#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+
+/* HTT tx completion is overlayed in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+
+#define HTT_TX_WBM_COMP_INFO1_ACK_RSSI GENMASK(31, 24)
+
+struct htt_tx_wbm_completion {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+} __packed;
+
+enum htt_h2t_msg_type {
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
+ HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+
+struct htt_ver_req_cmd {
+ u32 ver_reg_info;
+} __packed;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING,
+ HTT_RXDMA_MONITOR_STATUS_RING,
+ HTT_RXDMA_MONITOR_BUF_RING,
+ HTT_RXDMA_MONITOR_DESC_RING,
+ HTT_RXDMA_MONITOR_DEST_RING,
+ HTT_HOST1_TO_FW_RXBUF_RING,
+ HTT_HOST2_TO_FW_RXBUF_RING,
+ HTT_RXDMA_NON_MONITOR_DEST_RING,
+};
+
+/* host -> target HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserverd
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG BIT(16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
+
+struct htt_srng_setup_cmd {
+ u32 info0;
+ u32 ring_base_addr_lo;
+ u32 ring_base_addr_hi;
+ u32 info1;
+ u32 ring_head_off32_remote_addr_lo;
+ u32 ring_head_off32_remote_addr_hi;
+ u32 ring_tail_off32_remote_addr_lo;
+ u32 ring_tail_off32_remote_addr_hi;
+ u32 ring_msi_addr_lo;
+ u32 ring_msi_addr_hi;
+ u32 msi_data;
+ u32 intr_info;
+ u32 info2;
+} __packed;
+
+/* host -> target FW PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | REQ bit mask | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ * Value: 0x11
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies which pdevs this PPDU stats configuration applies to
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - REQ_TLV_BIT_MASK
+ * Bits 16:31
+ * Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ * needs to be included in the target's PPDU_STATS_IND messages.
+ * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+ u32 msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(16, 9)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+ HTT_PPDU_STATS_TAG_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMMON,
+ HTT_PPDU_STATS_TAG_USR_RATE,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+ HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+ HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+ HTT_PPDU_STATS_TAG_INFO,
+ HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+ | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_INFO) | \
+ BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+ HTT_PPDU_STATS_TAG_DEFAULT)
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 26|25|24|23 16|15 8|7 0|
+ * |-----------------+----------------+----------------+---------------|
+ * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+
+enum htt_rx_filter_tlv_flags {
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+ (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+ (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+struct htt_rx_ring_selection_cfg_cmd {
+ u32 info0;
+ u32 info1;
+ u32 pkt_type_en_flags0;
+ u32 pkt_type_en_flags1;
+ u32 pkt_type_en_flags2;
+ u32 pkt_type_en_flags3;
+ u32 rx_filter_tlv;
+} __packed;
+
+struct htt_rx_ring_tlv_filter {
+ u32 rx_filter; /* see htt_rx_filter_tlv_flags */
+ u32 pkt_filter_flags0; /* MGMT */
+ u32 pkt_filter_flags1; /* MGMT */
+ u32 pkt_filter_flags2; /* CTRL */
+ u32 pkt_filter_flags3; /* DATA */
+};
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x1f,
+ HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+ HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+ u32 version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
+
+struct htt_t2h_peer_map_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+ u32 info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+} __packed;
+
+struct htt_resp_msg {
+ union {
+ struct htt_t2h_version_conf_msg version_msg;
+ struct htt_t2h_peer_map_event peer_map_ev;
+ struct htt_t2h_peer_unmap_event peer_unmap_ev;
+ };
+} __packed;
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31 16|15 12|11 10|9 8|7 0 |
+ * |----------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id | msg type |
+ * |----------------------------------------------------------------------|
+ * | ppdu_id |
+ * |----------------------------------------------------------------------|
+ * | Timestamp in us |
+ * |----------------------------------------------------------------------|
+ * | reserved |
+ * |----------------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_ppdu_stats.h) |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a PPDU STATS indication
+ * message.
+ * Value: 0x1d
+ * - mac_id
+ * Bits 9:8
+ * Purpose: mac_id of this ppdu_id
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id of this ppdu_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: total tlv size
+ * Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath11k_htt_ppdu_stats_msg {
+ u32 info;
+ u32 ppdu_id;
+ u32 timestamp;
+ u32 rsvd;
+ u8 data[0];
+} __packed;
+
+struct htt_tlv {
+ u32 header;
+ u8 value[0];
+} __packed;
+
+#define HTT_TLV_TAG GENMASK(11, 0)
+#define HTT_TLV_LEN GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+ u32 ppdu_id;
+ u16 sched_cmdid;
+ u8 ring_id;
+ u8 num_users;
+ u32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+ u32 chain_mask;
+ u32 fes_duration_us; /* frame exchange sequence */
+ u32 ppdu_sch_eval_start_tstmp_us;
+ u32 ppdu_sch_end_tstmp_us;
+ u32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ u16 phy_mode;
+ u16 bw_mhz;
+} __packed;
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+
+#define HTT_USR_RATE_PREAMBLE(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M, _val)
+#define HTT_USR_RATE_BW(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M, _val)
+#define HTT_USR_RATE_NSS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M, _val)
+#define HTT_USR_RATE_MCS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M, _val)
+#define HTT_USR_RATE_GI(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M, _val)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+ u8 tid_num;
+ u8 reserved0;
+ u16 sw_peer_id;
+ u32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+ u16 ru_end;
+ u16 ru_start;
+ u16 resp_ru_end;
+ u16 resp_ru_start;
+ u32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+ u32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+ /* Note: resp_rate_info is only valid for if resp_type is UL */
+ u32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M, _flags)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M, _flags)
+#define HTT_TX_INFO_RATECODE(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M, _flags)
+#define HTT_TX_INFO_PEERID(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
+
+struct htt_tx_ppdu_stats_info {
+ struct htt_tlv tlv_hdr;
+ u32 tx_success_bytes;
+ u32 tx_retry_bytes;
+ u32 tx_failed_bytes;
+ u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
+ u16 tx_success_msdus;
+ u16 tx_retry_msdus;
+ u16 tx_failed_msdus;
+ u16 tx_duration; /* united in us */
+} __packed;
+
+enum htt_ppdu_stats_usr_compln_status {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M, _val)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M, _val)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M, _val)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+ u8 status;
+ u8 tid_num;
+ u16 sw_peer_id;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+ u16 mpdu_tried;
+ u16 mpdu_success;
+ u32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID 16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+ u32 ppdu_id;
+ u16 sw_peer_id;
+ u16 reserved0;
+ u32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+ u16 current_seq;
+ u16 start_seq;
+ u32 success_bytes;
+} __packed;
+
+struct htt_ppdu_stats_usr_cmn_array {
+ struct htt_tlv tlv_hdr;
+ u32 num_ppdu_stats;
+ /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
+ * elements.
+ * tx_ppdu_stats_info is variable length, with length =
+ * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
+ */
+ struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
+} __packed;
+
+struct htt_ppdu_user_stats {
+ u16 peer_id;
+ u32 tlv_flags;
+ bool is_valid_peer_id;
+ struct htt_ppdu_stats_user_rate rate;
+ struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS 8
+#define HTT_PPDU_DESC_MAX_DEPTH 16
+
+struct htt_ppdu_stats {
+ struct htt_ppdu_stats_common common;
+ struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+ u32 ppdu_id;
+ struct htt_ppdu_stats ppdu_stats;
+ struct list_head list;
+};
+
+/**
+ * @brief target -> host packet log message
+ *
+ * @details
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 16|15 12|11 10|9 8|7 0|
+ * |------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id| msg type |
+ * |------------------------------------------------------------------|
+ * | payload |
+ * |------------------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a pktlog message
+ * Value: HTT_T2H_MSG_TYPE_PKTLOG
+ * - mac_id
+ * Bits 9:8
+ * Purpose: identifies which MAC/PHY instance generated this pktlog info
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: explicitly specify the payload size
+ * Value: payload size in bytes (payload size is a multiple of 4 bytes)
+ */
+struct htt_pktlog_msg {
+ u32 hdr;
+ u8 payload[0];
+};
+
+/**
+ * @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | reserved | stats type | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * | config param [0] |
+ * |-----------------------------------------------------------|
+ * | config param [1] |
+ * |-----------------------------------------------------------|
+ * | config param [2] |
+ * |-----------------------------------------------------------|
+ * | config param [3] |
+ * |-----------------------------------------------------------|
+ * | reserved |
+ * |-----------------------------------------------------------|
+ * | cookie LSBs |
+ * |-----------------------------------------------------------|
+ * | cookie MSBs |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a extended stats upload request message
+ * Value: 0x10
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies the mask of PDEVs to retrieve stats from
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - STATS_TYPE
+ * Bits 23:16
+ * Purpose: identifies which FW statistics to upload
+ * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ * - Reserved
+ * Bits 31:24
+ * - CONFIG_PARAM [0]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [1]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [2]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [3]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - Reserved [31:0] for future use.
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+ u8 msg_type;
+ u8 pdev_mask;
+ u8 stats_type;
+ u8 reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+ struct htt_ext_stats_cfg_hdr hdr;
+ u32 cfg_param0;
+ u32 cfg_param1;
+ u32 cfg_param2;
+ u32 cfg_param3;
+ u32 reserved;
+ u32 cookie_lsb;
+ u32 cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ * [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ * 0 bit htt_peer_stats_cmn_tlv
+ * 1 bit htt_peer_details_tlv
+ * 2 bit htt_tx_peer_rate_stats_tlv
+ * 3 bit htt_rx_peer_rate_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ * 5 bit htt_rx_tid_stats_tlv
+ * 6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ * [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg2;
+ u32 cfg3;
+};
+
+/**
+ * @brief target -> host extended statistics upload
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats indication
+ * will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: htt_dbg_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value: htt_dbg_ext_stats_status
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath11k_htt_extd_stats_msg {
+ u32 info0;
+ u64 cookie;
+ u32 info1;
+ u8 data[0];
+} __packed;
+
+struct htt_mac_addr {
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+};
+
+static inline void ath11k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
+{
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ addr_l32 = swab32(addr_l32);
+ addr_h16 = swab16(addr_h16);
+ }
+
+ memcpy(addr, &addr_l32, 4);
+ memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget);
+int ath11k_dp_htt_connect(struct ath11k_dp *dp);
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif);
+void ath11k_dp_free(struct ath11k_base *ab);
+int ath11k_dp_alloc(struct ath11k_base *ab);
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_free(struct ath11k_base *ab);
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type);
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring);
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries);
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring);
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
new file mode 100644
index 000000000000..b08da839b7d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -0,0 +1,4195 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ieee80211.h>
+#include "core.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hw.h"
+#include "dp_rx.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+
+static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
+{
+ return desc->hdr_status;
+}
+
+static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
+{
+ if (!(__le32_to_cpu(desc->mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->mpdu_start.info2));
+}
+
+static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE,
+ __le32_to_cpu(desc->mpdu_start.info5));
+}
+
+static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
+ __le32_to_cpu(desc->attention.info2));
+}
+
+static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
+ __le32_to_cpu(desc->attention.info2)) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->attention.info1);
+ u32 errmap = 0;
+
+ if (info & RX_ATTENTION_INFO1_FCS_ERR)
+ errmap |= DP_RX_MPDU_ERR_FCS;
+
+ if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
+ errmap |= DP_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
+ errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
+ errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
+ errmap |= DP_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->msdu_start.info1));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
+{
+ u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->msdu_start.info3));
+
+ return hweight8(mimo_ss_bitmap);
+}
+
+static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
+ sizeof(struct rx_msdu_end));
+ memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
+{
+ struct rx_attention *rx_attn;
+
+ rx_attn = &rx_desc->attention;
+
+ return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
+ __le32_to_cpu(rx_attn->info1));
+}
+
+static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
+{
+ struct rx_msdu_start *rx_msdu_start;
+
+ rx_msdu_start = &rx_desc->msdu_start;
+
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(rx_msdu_start->info2));
+}
+
+static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
+{
+ u8 *rx_pkt_hdr;
+
+ rx_pkt_hdr = &rx_desc->msdu_payload[0];
+
+ return rx_pkt_hdr;
+}
+
+static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(rx_desc->mpdu_start_tag));
+
+ return tlv_tag == HAL_RX_MPDU_START ? true : false;
+}
+
+static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
+{
+ return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
+}
+
+/* Returns number of Rx buffers replenished */
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, gfp);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_idr_remove;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_idr_remove:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *skb;
+ int buf_id;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* TODO: Understand where internal driver does this dma_unmap of
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* XXX: Understand where internal driver does this dma_unmap of
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring,
+ u32 ringtype)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ int num_entries;
+
+ num_entries = rx_ring->refill_buf_ring.size /
+ ath11k_hal_srng_get_entrysize(ringtype);
+
+ rx_ring->bufs_max = num_entries;
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+
+ return 0;
+}
+
+static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+}
+
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_pdev_dp *dp;
+ struct ath11k *ar;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring);
+ }
+}
+
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ int ret;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST,
+ dp->mac_id, dp->mac_id,
+ DP_REO_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n");
+ goto err_reo_cleanup;
+ }
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_srng *srng = NULL;
+ int ret;
+
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_refill_buf_ring.refill_buf_ring,
+ HAL_RXDMA_BUF, 0,
+ dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
+ HAL_RXDMA_DST, 0, dp->mac_id,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
+ return ret;
+ }
+
+ srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
+ ret = ath11k_dp_srng_setup(ar->ab,
+ srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup rx_mon_status_refill_ring\n");
+ return ret;
+ }
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rxdma_mon_buf_ring.refill_buf_ring,
+ HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
+ HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DST\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
+ HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DESC_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DESC\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *cmd, *tmp;
+ struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ dma_unmap_single(ab->dev, cmd->data.paddr,
+ cmd->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd->data.vaddr);
+ kfree(cmd);
+ }
+
+ list_for_each_entry_safe(cmd_cache, tmp_cache,
+ &dp->reo_cmd_cache_flush_list, list) {
+ list_del(&cmd_cache->list);
+ dma_unmap_single(ab->dev, cmd_cache->data.paddr,
+ cmd_cache->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd_cache->data.vaddr);
+ kfree(cmd_cache);
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+}
+
+static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct dp_rx_tid *rx_tid = ctx;
+
+ if (status != HAL_REO_CMD_SUCCESS)
+ ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+ rx_tid->tid, status);
+
+ dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+}
+
+static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
+ struct dp_rx_tid *rx_tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ unsigned long tot_desc_sz, desc_sz;
+ int ret;
+
+ tot_desc_sz = rx_tid->size;
+ desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+ while (tot_desc_sz > desc_sz) {
+ tot_desc_sz -= desc_sz;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE, &cmd,
+ NULL);
+ if (ret)
+ ath11k_warn(ab,
+ "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, ath11k_dp_reo_cmd_free);
+ if (ret) {
+ ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ }
+}
+
+static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath11k_base *ab = dp->ab;
+ struct dp_rx_tid *rx_tid = ctx;
+ struct dp_reo_cache_flush_elem *elem, *tmp;
+
+ if (status == HAL_REO_CMD_DRAIN) {
+ goto free_desc;
+ } else if (status != HAL_REO_CMD_SUCCESS) {
+ /* Shouldn't happen! Cleanup in case of other failure? */
+ ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
+ rx_tid->tid, status);
+ return;
+ }
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ goto free_desc;
+
+ elem->ts = jiffies;
+ memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ /* Flush and invalidate aged REO desc from HW cache */
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
+ list) {
+ if (time_after(jiffies, elem->ts +
+ msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
+ list_del(&elem->list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ ath11k_dp_reo_cache_flush(ab, &elem->data);
+ kfree(elem);
+ spin_lock_bh(&dp->reo_cmd_lock);
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return;
+free_desc:
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+}
+
+static void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+ int ret;
+
+ if (!rx_tid->active)
+ return;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath11k_dp_rx_tid_del_func);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
+ dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ }
+
+ rx_tid->active = false;
+}
+
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ int i;
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ ath11k_peer_rx_tid_delete(ar, peer, i);
+}
+
+static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ int ret;
+
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
+ }
+
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ NULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
+ const u8 *peer_mac, int vdev_id, u8 tid)
+{
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
+ goto unlock_exit;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ goto unlock_exit;
+
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+
+ rx_tid->active = false;
+
+unlock_exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u32 hw_desc_sz;
+ u32 *addr_aligned;
+ void *vaddr;
+ dma_addr_t paddr;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ /* Update the tid queue if it is already setup */
+ if (rx_tid->active) {
+ paddr = rx_tid->paddr;
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
+ ba_win_sz, ssn, true);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ peer_mac, paddr,
+ tid, 1, ba_win_sz);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
+ tid, ret);
+ return ret;
+ }
+
+ rx_tid->tid = tid;
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ /* TODO: Optimize the memory allocation for qos tid based on the
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
+ if (!vaddr) {
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOMEM;
+ }
+
+ addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn);
+
+ paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret) {
+ spin_unlock_bh(&ab->base_lock);
+ goto err_mem_free;
+ }
+
+ rx_tid->vaddr = vaddr;
+ rx_tid->paddr = paddr;
+ rx_tid->size = hw_desc_sz;
+ rx_tid->active = true;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
+ paddr, tid, 1, ba_win_sz);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
+ tid, ret);
+ ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
+ }
+
+ return ret;
+
+err_mem_free:
+ kfree(vaddr);
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ int ret;
+
+ ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
+ params->tid, params->buf_size,
+ params->ssn);
+ if (ret)
+ ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ dma_addr_t paddr;
+ bool active;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ paddr = peer->rx_tid[params->tid].paddr;
+ active = peer->rx_tid[params->tid].active;
+
+ if (!active) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
+ params->tid, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ params->sta->addr, paddr,
+ params->tid, 1, 1);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
+{
+ int i;
+
+ for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
+ if (ppdu_stats->user_stats[i].is_valid_peer_id) {
+ if (peer_id == ppdu_stats->user_stats[i].peer_id)
+ return i;
+ } else {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *ptr,
+ void *data)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct htt_ppdu_user_stats *user_stats;
+ int cur_user;
+ u16 peer_id;
+
+ ppdu_info = (struct htt_ppdu_stats_info *)data;
+
+ switch (tag) {
+ case HTT_PPDU_STATS_TAG_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_common)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
+ sizeof(struct htt_ppdu_stats_common));
+ break;
+ case HTT_PPDU_STATS_TAG_USR_RATE:
+ if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->rate, ptr,
+ sizeof(struct htt_ppdu_stats_user_rate));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->cmpltn_cmn, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
+ if (len <
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id =
+ ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->ack_ba, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ }
+ return 0;
+}
+
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const struct htt_tlv *tlv;
+ const void *begin = ptr;
+ u16 tlv_tag, tlv_len;
+ int ret = -EINVAL;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+ tlv = (struct htt_tlv *)ptr;
+ tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret == -ENOMEM)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+ return 0;
+}
+
+static u32 ath11k_bw_to_mac80211_bwflags(u8 bw)
+{
+ u32 bwflags = 0;
+
+ switch (bw) {
+ case ATH11K_BW_40:
+ bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case ATH11K_BW_80:
+ bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case ATH11K_BW_160:
+ bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ }
+
+ return bwflags;
+}
+
+static void
+ath11k_update_per_peer_tx_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats, u8 user)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ieee80211_chanctx_conf *conf = NULL;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
+ struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
+ struct htt_ppdu_stats_common *common = &ppdu_stats->common;
+ int ret;
+ u8 flags, mcs, nss, bw, sgi, rate_idx = 0;
+ u32 succ_bytes = 0;
+ u16 rate = 0, succ_pkts = 0;
+ u32 tx_duration = 0;
+ u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
+ bool is_ampdu = false;
+
+ if (!usr_stats)
+ return;
+
+ if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+ return;
+
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+ is_ampdu =
+ HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+
+ if (usr_stats->tlv_flags &
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+ succ_bytes = usr_stats->ack_ba.success_bytes;
+ succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
+ usr_stats->ack_ba.info);
+ tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
+ usr_stats->ack_ba.info);
+ }
+
+ if (common->fes_duration_us)
+ tx_duration = common->fes_duration_us;
+
+ user_rate = &usr_stats->rate;
+ flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+ bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
+ nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
+ mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
+ sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+
+ /* Note: If host configured fixed rates and in some other special
+ * cases, the broadcast/management frames are sent in different rates.
+ * Firmware rate's control to be skipped for this?
+ */
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) {
+ ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
+ mcs, nss);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ flags,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ return;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
+
+ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
+
+ switch (flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ arsta->txrate.legacy = rate;
+ if (arsta->arvif && arsta->arvif->vif)
+ conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
+ if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
+ arsta->tx_info.status.rates[0].idx = rate_idx - 4;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->txrate.legacy = rate;
+ arsta->tx_info.status.rates[0].idx = rate_idx;
+ if (mcs > ATH11K_HW_RATE_CCK_LP_1M &&
+ mcs <= ATH11K_HW_RATE_CCK_SP_2M)
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->txrate.mcs = mcs + 8 * (nss - 1);
+ arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
+ if (sgi) {
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ }
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ arsta->txrate.mcs = mcs;
+ ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss);
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
+ if (sgi) {
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ }
+ break;
+ }
+
+ arsta->txrate.nss = nss;
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw);
+ arsta->tx_duration += tx_duration;
+ memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
+
+ if (succ_pkts) {
+ arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
+ arsta->tx_info.status.rates[0].count = 1;
+ ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
+ }
+
+ /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
+ * So skip peer stats update for mgmt packets.
+ */
+ if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
+ memset(peer_stats, 0, sizeof(*peer_stats));
+ peer_stats->succ_pkts = succ_pkts;
+ peer_stats->succ_bytes = succ_bytes;
+ peer_stats->is_ampdu = is_ampdu;
+ peer_stats->duration = tx_duration;
+ peer_stats->ba_fails =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar))
+ ath11k_accumulate_per_peer_tx_stats(arsta,
+ peer_stats, rate_idx);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats)
+{
+ u8 user;
+
+ for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
+ ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
+}
+
+static
+struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
+ u32 ppdu_id)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+
+ spin_lock_bh(&ar->data_lock);
+ if (!list_empty(&ar->ppdu_stats_info)) {
+ list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
+ if (ppdu_info->ppdu_id == ppdu_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ppdu_info;
+ }
+ }
+
+ if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
+ ppdu_info = list_first_entry(&ar->ppdu_stats_info,
+ typeof(*ppdu_info), list);
+ list_del(&ppdu_info->list);
+ ar->ppdu_stat_list_depth--;
+ ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
+ kfree(ppdu_info);
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
+ if (!ppdu_info)
+ return NULL;
+
+ spin_lock_bh(&ar->data_lock);
+ list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
+ ar->ppdu_stat_list_depth++;
+ spin_unlock_bh(&ar->data_lock);
+
+ return ppdu_info;
+}
+
+static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_ppdu_stats_msg *msg;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct ath11k *ar;
+ int ret;
+ u8 pdev_id;
+ u32 ppdu_id, len;
+
+ msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
+ len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
+ ppdu_id = msg->ppdu_id;
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
+ trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
+
+ ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
+ if (!ppdu_info) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ppdu_info->ppdu_id = ppdu_id;
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_htt_tlv_ppdu_stats_parse,
+ (void *)ppdu_info);
+ if (ret) {
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
+ struct ath11k *ar;
+ u32 len;
+ u8 pdev_id;
+
+ len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr);
+ if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) {
+ ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n",
+ len,
+ ATH11K_HTT_PKTLOG_MAX_SIZE);
+ return;
+ }
+
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+ return;
+ }
+
+ trace_ath11k_htt_pktlog(ar, data->payload, len);
+}
+
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
+ enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
+ u16 peer_id;
+ u8 vdev_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 peer_mac_h16;
+ u16 ast_hash;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
+ resp->version_msg.version);
+ dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
+ resp->version_msg.version);
+ complete(&dp->htt_tgt_version_received);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
+ ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
+ resp->peer_map_ev.info2);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
+ resp->peer_unmap_ev.info);
+ ath11k_peer_unmap_event(ab, peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+ ath11k_htt_pull_ppdu_stats(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath11k_dbg_htt_ext_stats_handler(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_PKTLOG:
+ ath11k_htt_pktlog(ab, skb);
+ break;
+ default:
+ ath11k_warn(ab, "htt event %d not handled\n", type);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+}
+
+static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff *first, struct sk_buff *last,
+ u8 l3pad_bytes, int msdu_len)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+ int buf_first_hdr_len, buf_first_len;
+ struct hal_rx_desc *ldesc;
+ int space_extra;
+ int rem_len;
+ int buf_len;
+
+ /* As the msdu is spread across multiple rx buffers,
+ * find the offset to the start of msdu for computing
+ * the length of the msdu in the first buffer.
+ */
+ buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes;
+ buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
+
+ if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
+ skb_put(first, buf_first_hdr_len + msdu_len);
+ skb_pull(first, buf_first_hdr_len);
+ return 0;
+ }
+
+ ldesc = (struct hal_rx_desc *)last->data;
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
+
+ /* MSDU spans over multiple buffers because the length of the MSDU
+ * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
+ * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
+ */
+ skb_put(first, DP_RX_BUFFER_SIZE);
+ skb_pull(first, buf_first_hdr_len);
+
+ /* When an MSDU spread over multiple buffers attention, MSDU_END and
+ * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
+ */
+ ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
+
+ space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
+ if (space_extra > 0 &&
+ (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
+ /* Free up all buffers of the MSDU */
+ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ return -ENOMEM;
+ }
+
+ rem_len = msdu_len - buf_first_len;
+ while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->is_continuation)
+ buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
+ else
+ buf_len = rem_len;
+
+ if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
+ WARN_ON_ONCE(1);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
+ skb_pull(skb, HAL_RX_DESC_SIZE);
+ skb_copy_from_linear_data(skb, skb_put(first, buf_len),
+ buf_len);
+ dev_kfree_skb_any(skb);
+
+ rem_len -= buf_len;
+ if (!rxcb->is_continuation)
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+
+ if (!rxcb->is_continuation)
+ return first;
+
+ skb_queue_walk(msdu_list, skb) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation)
+ return skb;
+ }
+
+ return NULL;
+}
+
+static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff_head *amsdu_list)
+{
+ struct sk_buff *msdu = skb_peek(msdu_list);
+ struct sk_buff *last_buf;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_hdr *hdr;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ u16 msdu_len;
+ u8 l3_pad_bytes;
+ u8 *hdr_status;
+ int ret;
+
+ if (!msdu)
+ return -ENOENT;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ hdr = (struct ieee80211_hdr *)hdr_status;
+ /* Process only data frames */
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ __skb_unlink(msdu, msdu_list);
+ dev_kfree_skb_any(msdu);
+ return -EINVAL;
+ }
+
+ do {
+ __skb_unlink(msdu, msdu_list);
+ last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath11k_warn(ar->ab,
+ "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+
+ if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
+ ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
+
+ if (!rxcb->is_continuation) {
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
+ } else {
+ ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
+ }
+ }
+ __skb_queue_tail(amsdu_list, msdu);
+
+ /* Should we also consider msdu_cnt from mpdu_meta while
+ * preparing amsdu list?
+ */
+ if (rxcb->is_last_msdu)
+ break;
+ } while ((msdu = skb_peek(msdu_list)) != NULL);
+
+ return 0;
+
+free_out:
+ dev_kfree_skb_any(msdu);
+ __skb_queue_purge(amsdu_list);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool ip_csum_fail, l4_csum_fail;
+
+ ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
+ l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
+
+ msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
+static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return 0;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_IV_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+
+ /* pull decapped header and copy SA & DA */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!rxcb->is_first_msdu ||
+ !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (!decrypted)
+ return;
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
+ skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_amsdu;
+
+ is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
+ rfc1042 = hdr;
+
+ if (rxcb->is_first_msdu) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ rfc1042 += hdr_len + crypto_len;
+ }
+
+ if (is_amsdu)
+ rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
+
+ return rfc1042;
+}
+
+static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ void *rfc1042;
+
+ rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
+ sizeof(struct ath11k_dp_rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ u8 *first_hdr;
+ u8 decap;
+
+ first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc);
+
+ switch (decap) {
+ case DP_RX_DECAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_RAW:
+ ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
+ decrypted);
+ break;
+ case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_8023:
+ /* TODO: Handle undecap for these formats */
+ break;
+ }
+}
+
+static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ enum hal_encrypt_type enctype;
+ struct sk_buff *last_msdu;
+ struct sk_buff *msdu;
+ struct ath11k_skb_rxcb *last_rxcb;
+ bool is_decrypted;
+ u32 err_bitmap;
+ u8 *qos;
+
+ if (skb_queue_empty(amsdu_list))
+ return;
+
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc);
+
+ /* Each A-MSDU subframe will use the original header as the base and be
+ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+ */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
+
+ /* Some attention flags are valid only in the last MSDU. */
+ last_msdu = skb_peek_tail(amsdu_list);
+ last_rxcb = ATH11K_SKB_RXCB(last_msdu);
+
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED;
+
+ skb_queue_walk(amsdu_list, msdu) {
+ ath11k_dp_rx_h_csum_offload(msdu);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+ }
+}
+
+static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 bw;
+ u8 rate_mcs, nss;
+ u8 sgi;
+ bool is_cck;
+
+ pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
+ bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
+ rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
+ nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
+ sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH11K_HT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_VHT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->nss = nss;
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->nss = nss;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ }
+}
+
+static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ u8 channel_num;
+
+ rx_status->freq = 0;
+ rx_status->rate_idx = 0;
+ rx_status->nss = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+
+ if (channel_num >= 1 && channel_num <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (channel_num >= 36 && channel_num <= 173) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
+ channel_num);
+ return;
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+
+ ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+}
+
+static void ath11k_dp_rx_process_amsdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct sk_buff *first;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_rx_desc *rx_desc;
+ bool first_mpdu;
+
+ if (skb_queue_empty(amsdu_list))
+ return;
+
+ first = skb_peek(amsdu_list);
+ rxcb = ATH11K_SKB_RXCB(first);
+ rx_desc = rxcb->rx_desc;
+
+ first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc);
+ if (first_mpdu)
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+
+ ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status);
+}
+
+static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
+ size_t size)
+{
+ u8 *qc;
+ int tid;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return "";
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ snprintf(out, size, "tid %d", tid);
+
+ return out;
+}
+
+static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu)
+{
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_rx_status *status;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_radiotap_he *he = NULL;
+ char tid[32];
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ if (status->encoding == RX_ENC_HE) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ ieee80211_get_SA(hdr),
+ ath11k_print_get_tid(hdr, tid, sizeof(tid)),
+ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
+ "mcast" : "ucast",
+ (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ /* TODO: trace rx packet */
+
+ ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
+}
+
+static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct ieee80211_rx_status *rxs)
+{
+ struct sk_buff *msdu;
+ struct sk_buff *first_subframe;
+ struct ieee80211_rx_status *status;
+
+ first_subframe = skb_peek(amsdu_list);
+
+ skb_queue_walk(amsdu_list, msdu) {
+ /* Setup per-MSDU flags */
+ if (skb_queue_empty(amsdu_list))
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (msdu == first_subframe) {
+ first_subframe = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_SKIP_MONITOR;
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ *status = *rxs;
+ }
+}
+
+static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *pending_q,
+ int *quota, u8 mac_id)
+{
+ struct ath11k *ar;
+ struct sk_buff *msdu;
+ struct ath11k_pdev *pdev;
+
+ if (skb_queue_empty(pending_q))
+ return;
+
+ ar = ab->pdevs[mac_id].ar;
+
+ rcu_read_lock();
+ pdev = rcu_dereference(ab->pdevs_active[mac_id]);
+
+ while (*quota && (msdu = __skb_dequeue(pending_q))) {
+ if (!pdev) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ (*quota)--;
+ }
+ rcu_read_unlock();
+}
+
+int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, struct sk_buff_head *pending_q,
+ int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ieee80211_rx_status *rx_status = &dp->rx_status;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list;
+ struct sk_buff_head amsdu_list;
+ struct ath11k_skb_rxcb *rxcb;
+ u32 *rx_desc;
+ int buf_id;
+ int num_buffs_reaped = 0;
+ int quota = budget;
+ int ret;
+ bool done = false;
+
+ /* Process any pending packets from the previous napi poll.
+ * Note: All msdu's in this pending_q corresponds to the same mac id
+ * due to pdev based reo dest mapping and also since each irq group id
+ * maps to specific reo dest ring.
+ */
+ ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, &quota,
+ mac_id);
+
+ /* If all quota is exhausted by processing the pending_q,
+ * Wait for the next napi poll to reap the new info
+ */
+ if (!quota)
+ goto exit;
+
+ __skb_queue_head_init(&msdu_list);
+
+ srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+try_again:
+ while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
+
+ cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ desc->buf_addr_info.info1);
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ cookie);
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped++;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ if (push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ /* TODO: Check if the msdu can be sent up for processing */
+ dev_kfree_skb_any(msdu);
+ ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++;
+ continue;
+ }
+
+ rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ rxcb->mac_id = mac_id;
+ __skb_queue_tail(&msdu_list, msdu);
+
+ /* Stop reaping from the ring once quota is exhausted
+ * and we've received all msdu's in the the AMSDU. The
+ * additional msdu's reaped in excess of quota here would
+ * be pushed into the pending queue to be processed during
+ * the next napi poll.
+ * Note: More profiling can be done to see the impact on
+ * pending_q and throughput during various traffic & density
+ * and how use of budget instead of remaining quota affects it.
+ */
+ if (num_buffs_reaped >= quota && rxcb->is_last_msdu &&
+ !rxcb->is_continuation) {
+ done = true;
+ break;
+ }
+ }
+
+ /* Hw might have updated the head pointer after we cached it.
+ * In this case, even though there are entries in the ring we'll
+ * get rx_desc NULL. Give the read another try with updated cached
+ * head pointer so that we can reap complete MPDU in the current
+ * rx processing.
+ */
+ if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
+ ath11k_hal_srng_access_end(ab, srng);
+ goto try_again;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!num_buffs_reaped)
+ goto exit;
+
+ /* Should we reschedule it later if we are not able to replenish all
+ * the buffers?
+ */
+ ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+
+ rcu_read_lock();
+ if (!rcu_dereference(ab->pdevs_active[mac_id])) {
+ __skb_queue_purge(&msdu_list);
+ goto rcu_unlock;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list);
+ goto rcu_unlock;
+ }
+
+ while (!skb_queue_empty(&msdu_list)) {
+ __skb_queue_head_init(&amsdu_list);
+ ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list);
+ if (ret) {
+ if (ret == -EIO) {
+ ath11k_err(ab, "rx ring got corrupted %d\n", ret);
+ __skb_queue_purge(&msdu_list);
+ /* Should stop processing any more rx in
+ * future from this ring?
+ */
+ goto rcu_unlock;
+ }
+
+ /* A-MSDU retrieval got failed due to non-fatal condition,
+ * continue processing with the next msdu.
+ */
+ continue;
+ }
+
+ ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status);
+
+ ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status);
+ skb_queue_splice_tail(&amsdu_list, pending_q);
+ }
+
+ while (quota && (msdu = __skb_dequeue(pending_q))) {
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ quota--;
+ }
+
+rcu_unlock:
+ rcu_read_unlock();
+exit:
+ return budget - quota;
+}
+
+static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ u32 num_msdu;
+
+ if (!rx_stats)
+ return;
+
+ num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
+ ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
+ ppdu_info->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
+ rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
+ ppdu_info->nss = 1;
+ ppdu_info->mcs = HAL_RX_MAX_MCS;
+ ppdu_info->tid = IEEE80211_NUM_TIDS;
+ }
+
+ if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
+ rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
+
+ if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
+ rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX)
+ rx_stats->gi_count[ppdu_info->gi] += num_msdu;
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX)
+ rx_stats->bw_count[ppdu_info->bw] += num_msdu;
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[ppdu_info->tid] += num_msdu;
+
+ if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+}
+
+static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
+ struct dp_rxdma_ring *rx_ring,
+ int *buf_id, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+
+ if (!skb)
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max, gfp);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (*buf_id < 0)
+ goto fail_dma_unmap;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+ return skb;
+
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ return NULL;
+}
+
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id, gfp);
+ if (!skb)
+ break;
+ paddr = ATH11K_SKB_RXCB(skb)->paddr;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_desc_get;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_desc_get:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+ dev_kfree_skb_any(skb);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
+ struct hal_srng *srng;
+ void *rx_mon_status_desc;
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ u32 cookie;
+ int buf_id;
+ dma_addr_t paddr;
+ u8 rbm;
+ int num_buffs_reaped = 0;
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc =
+ ath11k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc)
+ break;
+
+ ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!skb) {
+ ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ continue;
+ }
+
+ __skb_queue_tail(skb_list, skb);
+ }
+
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id, GFP_ATOMIC);
+
+ if (!skb) {
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+ HAL_RX_BUF_RBM_SW3_BM);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie, HAL_RX_BUF_RBM_SW3_BM);
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct hal_rx_mon_ppdu_info ppdu_info;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ memset(&ppdu_info, 0, sizeof(ppdu_info));
+ ppdu_info.peer_id = HAL_INVALID_PEERID;
+
+ if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
+ trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
+
+ if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info.peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
+
+ if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ }
+exit:
+ return num_buffs_reaped;
+}
+
+static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
+ u32 *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ u32 *desc;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
+ action);
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ u8 rx_channel;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted;
+ u32 err_bitmap;
+
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+
+ if (rx_channel >= 1 && rx_channel <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (rx_channel >= 36 && rx_channel <= 173) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
+ rx_channel);
+ return;
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(rx_channel,
+ rx_status->band);
+ ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+
+ /* Rx fragments are received in raw mode */
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+ }
+}
+
+static int
+ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
+ int buf_id, bool frag)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct ieee80211_rx_status rx_status = {0};
+ struct sk_buff *msdu;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_rx_status *status;
+ struct hal_rx_desc *rx_desc;
+ u16 msdu_len;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ return -EINVAL;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (!frag) {
+ /* Process only rx fragments below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ dev_kfree_skb_any(msdu);
+ return 0;
+ }
+
+ rcu_read_lock();
+ if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+
+ ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status);
+
+ status = IEEE80211_SKB_RXCB(msdu);
+
+ *status = rx_status;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+
+exit:
+ rcu_read_unlock();
+ return 0;
+}
+
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget)
+{
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ struct dp_link_desc_bank *link_desc_banks;
+ enum hal_rx_buf_return_buf_manager rbm;
+ int tot_n_bufs_reaped, quota, ret, i;
+ int n_bufs_reaped[MAX_RADIOS] = {0};
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_srng *reo_except;
+ u32 desc_bank, num_msdus;
+ struct hal_srng *srng;
+ struct ath11k_dp *dp;
+ void *link_desc_va;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ dma_addr_t paddr;
+ u32 *desc;
+ bool is_frag;
+
+ tot_n_bufs_reaped = 0;
+ quota = budget;
+
+ dp = &ab->dp;
+ reo_except = &dp->reo_except_ring;
+ link_desc_banks = dp->link_desc_banks;
+
+ srng = &ab->hal.srng_list[reo_except->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
+
+ ab->soc_stats.err_ring_pkts++;
+ ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
+ &desc_bank);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse error reo desc %d\n",
+ ret);
+ continue;
+ }
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ &rbm);
+ if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_REL_MSDU);
+ continue;
+ }
+
+ is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+ /* Return the link desc back to wbm idle list */
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
+ msdu_cookies[i]);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id,
+ is_frag)) {
+ n_bufs_reaped[mac_id]++;
+ tot_n_bufs_reaped++;
+ }
+ }
+
+ if (tot_n_bufs_reaped >= quota) {
+ tot_n_bufs_reaped = quota;
+ goto exit;
+ }
+
+ budget = quota - tot_n_bufs_reaped;
+ }
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!n_bufs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+
+ return tot_n_bufs_reaped;
+}
+
+static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
+ int msdu_len,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff *skb, *tmp;
+ struct ath11k_skb_rxcb *rxcb;
+ int n_buffs;
+
+ n_buffs = DIV_ROUND_UP(msdu_len,
+ (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
+
+ skb_queue_walk_safe(msdu_list, skb, tmp) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
+ rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
+ if (!n_buffs)
+ break;
+ __skb_unlink(skb, msdu_list);
+ dev_kfree_skb_any(skb);
+ n_buffs--;
+ }
+ }
+}
+
+static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff_head amsdu_list;
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
+
+ if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ /* First buffer will be freed by the caller, so deduct it's length */
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
+ ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
+ return -EINVAL;
+ }
+
+ if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
+ ath11k_warn(ar->ab,
+ "msdu_done bit not set in null_q_des processing\n");
+ __skb_queue_purge(msdu_list);
+ return -EIO;
+ }
+
+ /* Handle NULL queue descriptor violations arising out a missing
+ * REO queue for a given peer or a given TID. This typically
+ * may happen if a packet is received on a QOS enabled TID before the
+ * ADDBA negotiation for that TID, when the TID queue is setup. Or
+ * it may also happen for MC/BC frames if they are not routed to the
+ * non-QOS TID queue, in the absence of any other default TID queue.
+ * This error can show up both in a REO destination or WBM release ring.
+ */
+
+ __skb_queue_head_init(&amsdu_list);
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
+
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
+
+ if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ __skb_queue_tail(&amsdu_list, msdu);
+
+ ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status);
+
+ /* Please note that caller will having the access to msdu and completing
+ * rx with mac80211. Need not worry about cleaning up amsdu_list.
+ */
+
+ return 0;
+}
+
+static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+ if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ drop = true;
+ break;
+ default:
+ /* TODO: Review other errors and process them to mac80211
+ * as appropriate.
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
+
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
+
+ ath11k_dp_rx_h_undecap(ar, msdu, desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+}
+
+static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ break;
+ default:
+ /* TODO: Review other rxdma error code to check if anything is
+ * worth reporting to mac80211
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status rxs = {0};
+ struct ieee80211_rx_status *status;
+ bool drop = true;
+
+ switch (rxcb->err_rel_src) {
+ case HAL_WBM_REL_SRC_MODULE_REO:
+ drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ break;
+ case HAL_WBM_REL_SRC_MODULE_RXDMA:
+ drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ break;
+ default:
+ /* msdu will get freed */
+ break;
+ }
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ *status = rxs;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+}
+
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct hal_rx_wbm_rel_info err_info;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath11k_skb_rxcb *rxcb;
+ u32 *rx_desc;
+ int buf_id, mac_id;
+ int num_buffs_reaped[MAX_RADIOS] = {0};
+ int total_num_buffs_reaped = 0;
+ int ret, i;
+
+ for (i = 0; i < MAX_RADIOS; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget) {
+ rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!rx_desc)
+ break;
+
+ ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to parse rx error in wbm_rel ring desc %d\n",
+ ret);
+ continue;
+ }
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
+ buf_id, mac_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[mac_id]++;
+ total_num_buffs_reaped++;
+ budget--;
+
+ if (err_info.push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ rxcb->err_rel_src = err_info.err_rel_src;
+ rxcb->err_code = err_info.err_code;
+ rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_num_buffs_reaped)
+ goto done;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!rcu_dereference(ab->pdevs_active[i])) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ ar = ab->pdevs[i].ar;
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
+ ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ }
+ rcu_read_unlock();
+done:
+ return total_num_buffs_reaped;
+}
+
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
+ struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
+ struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
+ struct hal_srng *srng;
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ enum hal_rx_buf_return_buf_manager rbm;
+ enum hal_reo_entr_rxdma_ecode rxdma_err_code;
+ struct ath11k_skb_rxcb *rxcb;
+ struct sk_buff *skb;
+ struct hal_reo_entrance_ring *entr_ring;
+ void *desc;
+ int num_buf_freed = 0;
+ int quota = budget;
+ dma_addr_t paddr;
+ u32 desc_bank;
+ void *link_desc_va;
+ int num_msdus;
+ int i;
+ int buf_id;
+
+ srng = &ab->hal.srng_list[err_ring->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (quota-- &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
+
+ entr_ring = (struct hal_reo_entrance_ring *)desc;
+ rxdma_err_code =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ entr_ring->info1);
+ ab->soc_stats.rxdma_error[rxdma_err_code]++;
+
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
+ msdu_cookies, &rbm);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!skb) {
+ ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+
+ num_buf_freed++;
+ }
+
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (num_buf_freed)
+ ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+
+ return budget - quota;
+}
+
+void ath11k_dp_process_reo_status(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ struct dp_reo_cmd *cmd, *tmp;
+ bool found = false;
+ u32 *reo_desc;
+ u16 tag;
+ struct hal_reo_status reo_status;
+
+ srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
+
+ memset(&reo_status, 0, sizeof(reo_status));
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
+
+ switch (tag) {
+ case HAL_REO_GET_QUEUE_STATS_STATUS:
+ ath11k_hal_reo_status_queue_stats(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_QUEUE_STATUS:
+ ath11k_hal_reo_flush_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_CACHE_STATUS:
+ ath11k_hal_reo_flush_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UNBLOCK_CACHE_STATUS:
+ ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+ ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+ ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+ ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo status type %d\n", tag);
+ continue;
+ }
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+ found = true;
+ list_del(&cmd->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ if (found) {
+ cmd->handler(dp, (void *)&cmd->data,
+ reo_status.uniform_hdr.cmd_status);
+ kfree(cmd);
+ }
+
+ found = false;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+
+ ath11k_dp_rx_pdev_srng_free(ar);
+ ath11k_dp_rxdma_pdev_buf_free(ar);
+}
+
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ u32 ring_id;
+ int ret;
+
+ ret = ath11k_dp_rx_pdev_srng_alloc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rx srngs\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxdma ring\n");
+ return ret;
+ }
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ ring_id = dp->rxdma_err_dst_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id, HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_dst_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_desc_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DESC);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+ ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to configure mon_status_refill_ring %d\n",
+ ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
+{
+ if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
+ *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
+ *total_len -= *frag_len;
+ } else {
+ *frag_len = *total_len;
+ *total_len = 0;
+ }
+}
+
+static
+int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
+ void *p_last_buf_addr_info,
+ u8 mac_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_srng *dp_srng;
+ void *hal_srng;
+ void *src_srng_desc;
+ int ret = 0;
+
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+
+ ath11k_hal_srng_access_begin(ar->ab, hal_srng);
+
+ src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
+
+ if (src_srng_desc) {
+ struct ath11k_buffer_addr *src_desc =
+ (struct ath11k_buffer_addr *)src_srng_desc;
+
+ *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "Monitor Link Desc Ring %d Full", mac_id);
+ ret = -ENOMEM;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, hal_srng);
+ return ret;
+}
+
+static
+void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ void **pp_buf_addr_info)
+{
+ struct hal_rx_msdu_link *msdu_link =
+ (struct hal_rx_msdu_link *)rx_msdu_link_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ u8 rbm = 0;
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
+
+ ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
+
+ *pp_buf_addr_info = (void *)buf_addr_info;
+}
+
+static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if ((pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+ return 0;
+}
+
+static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
+ void *msdu_link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus)
+{
+ struct hal_rx_msdu_details *msdu_details = NULL;
+ struct rx_msdu_desc *msdu_desc_info = NULL;
+ struct hal_rx_msdu_link *msdu_link = NULL;
+ int i;
+ u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
+ u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
+ u8 tmp = 0;
+
+ msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
+ msdu_details = &msdu_link->msdu_link[0];
+
+ for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
+ if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu_details[i].buf_addr_info.info0) == 0) {
+ msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
+ msdu_desc_info->info0 |= last;
+ ;
+ break;
+ }
+ msdu_desc_info = &msdu_details[i].rx_msdu_info;
+
+ if (!i)
+ msdu_desc_info->info0 |= first;
+ else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
+ msdu_desc_info->info0 |= last;
+ msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
+ msdu_list->msdu_info[i].msdu_len =
+ HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
+ msdu_list->sw_cookie[i] =
+ FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu_details[i].buf_addr_info.info1);
+ tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu_details[i].buf_addr_info.info1);
+ msdu_list->rbm[i] = tmp;
+ }
+ *num_msdus = i;
+}
+
+static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
+ u32 *rx_bufs_used)
+{
+ u32 ret = 0;
+
+ if ((*ppdu_id < msdu_ppdu_id) &&
+ ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ } else if ((*ppdu_id > msdu_ppdu_id) &&
+ ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* mon_dst is behind than mon_status
+ * skip dst_ring and free it
+ */
+ *rx_bufs_used += 1;
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ }
+ return ret;
+}
+
+static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ if (!*is_frag) {
+ *total_len = info->msdu_len;
+ *is_frag = true;
+ }
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ if (*is_frag) {
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ *frag_len = info->msdu_len;
+ }
+ *is_frag = false;
+ *msdu_cnt -= 1;
+ }
+}
+
+static u32
+ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_rx_msdu_list msdu_list;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ struct hal_rx_desc *rx_desc;
+ void *rx_msdu_link_desc;
+ dma_addr_t paddr;
+ u16 num_msdus = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ u32 rx_bufs_used = 0, i = 0;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_reo_entrance_ring *ent_desc =
+ (struct hal_reo_entrance_ring *)ring_entry;
+ int buf_id;
+
+ ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie, &p_last_buf_addr_info,
+ &msdu_cnt);
+
+ if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
+ ent_desc->info1) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ ent_desc->info1);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ return rx_bufs_used;
+ }
+
+ rx_msdu_link_desc =
+ (void *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "msdu_pop: invalid buf_id %d\n", buf_id);
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+
+ msdu_ppdu_id =
+ ath11k_dp_rxdesc_get_ppduid(rx_desc);
+
+ if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id,
+ &rx_bufs_used)) {
+ if (rx_bufs_used) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ }
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
+ &sw_cookie,
+ &p_buf_addr_info);
+
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dp_rx_monitor_link_desc_return failed");
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
+{
+ u32 rx_pkt_offset, l2_hdr_offset;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
+ skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+}
+
+static struct sk_buff *
+ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
+ u32 mac_id, struct sk_buff *head_msdu,
+ struct sk_buff *last_msdu,
+ struct ieee80211_rx_status *rxs)
+{
+ struct sk_buff *msdu, *mpdu_buf, *prev_buf;
+ u32 decap_format, wifi_hdr_len;
+ struct hal_rx_desc *rx_desc;
+ char *hdr_desc;
+ u8 *dest;
+ struct ieee80211_hdr_3addr *wh;
+
+ mpdu_buf = NULL;
+
+ if (!head_msdu)
+ goto err_merge_fail;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+
+ if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
+ return NULL;
+
+ decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+
+ if (decap_format == DP_RX_DECAP_TYPE_RAW) {
+ ath11k_dp_rx_msdus_set_payload(head_msdu);
+
+ prev_buf = head_msdu;
+ msdu = head_msdu->next;
+
+ while (msdu) {
+ ath11k_dp_rx_msdus_set_payload(msdu);
+
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+
+ prev_buf->next = NULL;
+
+ skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
+ __le16 qos_field;
+ u8 qos_pkt = 0;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
+
+ /* Base size */
+ wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ wh = (struct ieee80211_hdr_3addr *)hdr_desc;
+
+ if (ieee80211_is_data_qos(wh->frame_control)) {
+ struct ieee80211_qos_hdr *qwh =
+ (struct ieee80211_qos_hdr *)hdr_desc;
+
+ qos_field = qwh->qos_ctrl;
+ qos_pkt = 1;
+ }
+ msdu = head_msdu;
+
+ while (msdu) {
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
+
+ if (qos_pkt) {
+ dest = skb_push(msdu, sizeof(__le16));
+ if (!dest)
+ goto err_merge_fail;
+ memcpy(dest, hdr_desc, wifi_hdr_len);
+ memcpy(dest + wifi_hdr_len,
+ (u8 *)&qos_field, sizeof(__le16));
+ }
+ ath11k_dp_rx_msdus_set_payload(msdu);
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+ dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
+ if (!dest)
+ goto err_merge_fail;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "mpdu_buf %pK mpdu_buf->len %u",
+ prev_buf, prev_buf->len);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "decap format %d is not supported!\n",
+ decap_format);
+ goto err_merge_fail;
+ }
+
+ return head_msdu;
+
+err_merge_fail:
+ if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "err_merge_fail mpdu_buf %pK", mpdu_buf);
+ /* Free the head buffer */
+ dev_kfree_skb_any(mpdu_buf);
+ }
+ return NULL;
+}
+
+static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
+ struct sk_buff *head_msdu,
+ struct sk_buff *tail_msdu,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *mon_skb, *skb_next, *header;
+ struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
+
+ mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
+ tail_msdu, rxs);
+
+ if (!mon_skb)
+ goto mon_deliver_fail;
+
+ header = mon_skb;
+
+ rxs->flag = 0;
+ do {
+ skb_next = mon_skb->next;
+ if (!skb_next)
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (mon_skb == header) {
+ header = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
+
+ status = IEEE80211_SKB_RXCB(mon_skb);
+ *status = *rxs;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
+ mon_skb = skb_next;
+ } while (mon_skb);
+ rxs->flag = 0;
+
+ return 0;
+
+mon_deliver_fail:
+ mon_skb = head_msdu;
+ while (mon_skb) {
+ skb_next = mon_skb->next;
+ dev_kfree_skb_any(mon_skb);
+ mon_skb = skb_next;
+ }
+ return -EINVAL;
+}
+
+static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ void *ring_entry;
+ void *mon_dst_srng;
+ u32 ppdu_id;
+ u32 rx_bufs_used;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ u32 npackets = 0;
+
+ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+
+ if (!mon_dst_srng) {
+ ath11k_warn(ar->ab,
+ "HAL Monitor Destination Ring Init Failed -- %pK",
+ mon_dst_srng);
+ return;
+ }
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_bufs_used = 0;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &npackets, &ppdu_id);
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id);
+ break;
+ }
+ if (head_msdu && tail_msdu) {
+ ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ }
+
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+}
+
+static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
+ u32 quota,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ struct hal_rx_mon_ppdu_info *ppdu_info;
+ struct sk_buff *status_skb;
+ u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+
+ ppdu_info = &pmon->mon_ppdu_info;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
+ return;
+
+ while (!skb_queue_empty(&pmon->rx_status_q)) {
+ status_skb = skb_dequeue(&pmon->rx_status_q);
+
+ tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
+ status_skb);
+ if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ ath11k_dp_rx_mon_dest_process(ar, quota, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+ dev_kfree_skb_any(status_skb);
+ }
+}
+
+static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ int num_buffs_reaped = 0;
+
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
+ &pmon->rx_status_q);
+ if (num_buffs_reaped)
+ ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
+
+ return num_buffs_reaped;
+}
+
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ int ret = 0;
+
+ if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
+ ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
+ else
+ ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+
+ skb_queue_head_init(&pmon->rx_status_q);
+
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+
+ memset(&pmon->rx_mon_stats, 0,
+ sizeof(pmon->rx_mon_stats));
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_srng *mon_desc_srng = NULL;
+ struct dp_srng *dp_srng;
+ int ret = 0;
+ u32 n_link_desc = 0;
+
+ ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
+ return ret;
+ }
+
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ n_link_desc = dp_srng->size /
+ ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
+ mon_desc_srng =
+ &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
+ n_link_desc);
+ if (ret) {
+ ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
+ return ret;
+ }
+ pmon->mon_last_linkdesc_paddr = 0;
+ pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+ spin_lock_init(&pmon->mon_lock);
+ return 0;
+}
+
+static int ath11k_dp_mon_link_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+
+ ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC,
+ &dp->rxdma_mon_desc_ring);
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
+{
+ ath11k_dp_mon_link_free(ar);
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
new file mode 100644
index 000000000000..eec5deaa59ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_DP_RX_H
+#define ATH11K_DP_RX_H
+
+#include "core.h"
+#include "rx_desc.h"
+#include "debug.h"
+
+#define DP_RX_MPDU_ERR_FCS BIT(0)
+#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
+#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
+#define DP_RX_MPDU_ERR_AMSDU_ERR BIT(3)
+#define DP_RX_MPDU_ERR_OVERFLOW BIT(4)
+#define DP_RX_MPDU_ERR_MSDU_LEN BIT(5)
+#define DP_RX_MPDU_ERR_MPDU_LEN BIT(6)
+#define DP_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+
+enum dp_rx_decap_type {
+ DP_RX_DECAP_TYPE_RAW,
+ DP_RX_DECAP_TYPE_NATIVE_WIFI,
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX,
+ DP_RX_DECAP_TYPE_8023,
+};
+
+struct ath11k_dp_amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+struct ath11k_dp_rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn);
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab);
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab);
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab);
+void ath11k_dp_process_reo_status(struct ath11k_base *ab);
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget);
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget);
+int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, struct sk_buff_head *pending_q,
+ int budget);
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp);
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp);
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
+
+#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
new file mode 100644
index 000000000000..6d7d33761caf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
+#include "hw.h"
+
+/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
+static const u8
+ath11k_txq_tcl_ring_map[ATH11K_HW_MAX_QUEUES] = { 0x0, 0x1, 0x2, 0x2 };
+
+static enum hal_tcl_encap_type
+ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
+{
+ /* TODO: Determine encap type based on vif_type and configuration */
+ return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
+}
+
+static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ hdr = (void *)skb->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HAL_DESC_REO_NON_QOS_TID;
+ else
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return HAL_ENCRYPT_TYPE_WEP_40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return HAL_ENCRYPT_TYPE_WEP_104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return HAL_ENCRYPT_TYPE_TKIP_MIC;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return HAL_ENCRYPT_TYPE_CCMP_128;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return HAL_ENCRYPT_TYPE_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return HAL_ENCRYPT_TYPE_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return HAL_ENCRYPT_TYPE_AES_GCMP_256;
+ default:
+ return HAL_ENCRYPT_TYPE_OPEN;
+ }
+}
+
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_tx_info ti = {0};
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct hal_srng *tcl_ring;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct dp_tx_ring *tx_ring;
+ void *hal_tcl_desc;
+ u8 pool_id;
+ u8 hal_ring_id;
+ int ret;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return -ENOTSUPP;
+
+ pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
+ ti.ring_id = ath11k_txq_tcl_ring_map[pool_id];
+
+ tx_ring = &dp->tx_ring[ti.ring_id];
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
+ DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (ret < 0)
+ return -ENOSPC;
+
+ ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
+ FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
+ FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
+ ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
+ ti.meta_data_flags = arvif->tcl_metadata;
+
+ if (info->control.hw_key)
+ ti.encrypt_type =
+ ath11k_dp_tx_get_encrypt_type(info->control.hw_key->cipher);
+ else
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+
+ ti.addr_search_flags = arvif->hal_addr_search_flags;
+ ti.search_type = arvif->search_type;
+ ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+ ti.pkt_offset = 0;
+ ti.lmac_id = ar->lmac_id;
+ ti.bss_ast_hash = arvif->ast_hash;
+ ti.dscp_tid_tbl_idx = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
+ }
+
+ if (ieee80211_vif_is_mesh(arvif->vif))
+ ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE, 1);
+
+ ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
+
+ ti.tid = ath11k_dp_tx_get_tid(skb);
+
+ switch (ti.encap_type) {
+ case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_tx_encap_nwifi(skb);
+ break;
+ case HAL_TCL_ENCAP_TYPE_RAW:
+ /* TODO: for CHECKSUM_PARTIAL case in raw mode, HW checksum offload
+ * is not applicable, hence manual checksum calculation using
+ * skb_checksum_help() is needed
+ */
+ case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ case HAL_TCL_ENCAP_TYPE_802_3:
+ /* TODO: Take care of other encap modes as well */
+ ret = -EINVAL;
+ goto fail_remove_idr;
+ }
+
+ ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, ti.paddr)) {
+ ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
+ ret = -ENOMEM;
+ goto fail_remove_idr;
+ }
+
+ ti.data_len = skb->len;
+ skb_cb->paddr = ti.paddr;
+ skb_cb->vif = arvif->vif;
+ skb_cb->ar = ar;
+
+ hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+ tcl_ring = &ab->hal.srng_list[hal_ring_id];
+
+ spin_lock_bh(&tcl_ring->lock);
+
+ ath11k_hal_srng_access_begin(ab, tcl_ring);
+
+ hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
+ if (!hal_tcl_desc) {
+ /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+ * desc because the desc is directly enqueued onto hw queue.
+ * So add tx packet throttling logic in future if required.
+ */
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+ spin_unlock_bh(&tcl_ring->lock);
+ ret = -ENOMEM;
+ goto fail_unmap_dma;
+ }
+
+ ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
+ sizeof(struct hal_tlv_hdr), &ti);
+
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+
+ spin_unlock_bh(&tcl_ring->lock);
+
+ atomic_inc(&ar->dp.num_tx_pending);
+
+ return 0;
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+
+fail_remove_idr:
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ idr_remove(&tx_ring->txbuf_idr,
+ FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ return ret;
+}
+
+static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
+ int msdu_id,
+ struct dp_tx_ring *tx_ring)
+{
+ struct ath11k *ar;
+ struct sk_buff *msdu;
+ struct ath11k_skb_cb *skb_cb;
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(msdu);
+
+ ar = ab->pdevs[mac_id].ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+}
+
+static void
+ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ struct dp_tx_ring *tx_ring,
+ struct ath11k_dp_htt_wbm_tx_status *ts)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k *ar;
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
+ ts->msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ info = IEEE80211_SKB_CB(msdu);
+
+ ar = skb_cb->ar;
+
+ idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ts->acked) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ }
+ }
+
+ ieee80211_tx_status(ar->hw, msdu);
+}
+
+static void
+ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
+ void *desc, u8 mac_id,
+ u32 msdu_id, struct dp_tx_ring *tx_ring)
+{
+ struct htt_tx_wbm_completion *status_desc;
+ struct ath11k_dp_htt_wbm_tx_status ts = {0};
+ enum hal_wbm_htt_tx_comp_status wbm_status;
+
+ status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+
+ wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
+ status_desc->info0);
+
+ switch (wbm_status) {
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ ts.msdu_id = msdu_id;
+ ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
+ status_desc->info1);
+ ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+ /* This event is to be handled only when the driver decides to
+ * use WDS offload functionality.
+ */
+ break;
+ default:
+ ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+
+ if (ts->try_cnt > 1) {
+ peer_stats->retry_pkts += ts->try_cnt - 1;
+ peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
+
+ if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
+ peer_stats->failed_pkts += 1;
+ peer_stats->failed_bytes += msdu->len;
+ }
+ }
+}
+
+static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+
+ if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
+ /* Must not happen */
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ rcu_read_lock();
+
+ if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (!skb_cb->vif) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+ !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ }
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
+ if (ar->last_ppdu_id == 0) {
+ ar->last_ppdu_id = ts->ppdu_id;
+ } else if (ar->last_ppdu_id == ts->ppdu_id ||
+ ar->cached_ppdu_id == ar->last_ppdu_id) {
+ ar->cached_ppdu_id = ar->last_ppdu_id;
+ ar->cached_stats.is_ampdu = true;
+ ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ } else {
+ ar->cached_stats.is_ampdu = false;
+ ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ }
+ ar->last_ppdu_id = ts->ppdu_id;
+ }
+
+ ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
+ }
+
+ /* NOTE: Tx rate status reporting. Tx completion status does not have
+ * necessary information (for example nss) to build the tx rate.
+ * Might end up reporting it out-of-band from HTT stats.
+ */
+
+ ieee80211_tx_status(ar->hw, msdu);
+
+exit:
+ rcu_read_unlock();
+}
+
+static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
+ struct hal_wbm_release_ring *desc,
+ struct hal_tx_status *ts)
+{
+ ts->buf_rel_source =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
+ if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+ return;
+
+ if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+ return;
+
+ ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
+ desc->info0);
+ ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
+ desc->info1);
+ ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
+ desc->info1);
+ ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
+ desc->info2);
+ if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
+ ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
+ ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
+ ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
+ if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
+ ts->rate_stats = desc->rate_stats.info0;
+ else
+ ts->rate_stats = 0;
+}
+
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+ struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+ struct sk_buff *msdu;
+ struct hal_tx_status ts = { 0 };
+ struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+ u32 *desc;
+ u32 msdu_id;
+ u8 mac_id;
+
+ ath11k_hal_srng_access_begin(ab, status_ring);
+
+ while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+ tx_ring->tx_status_tail) &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(struct hal_wbm_release_ring));
+ tx_ring->tx_status_head =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+ }
+
+ if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
+ (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
+ /* TODO: Process pending tx_status messages when kfifo_is_full() */
+ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+ }
+
+ ath11k_hal_srng_access_end(ab, status_ring);
+
+ while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_release_ring *tx_status;
+ u32 desc_id;
+
+ tx_ring->tx_status_tail =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath11k_dp_tx_status_parse(ab, tx_status, &ts);
+
+ desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ tx_status->buf_addr_info.info1);
+ mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
+ msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
+
+ if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+ ath11k_dp_tx_process_htt_tx_complete(ab,
+ (void *)tx_status,
+ mac_id, msdu_id,
+ tx_ring);
+ continue;
+ }
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ continue;
+ }
+ idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
+ }
+}
+
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status))
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num <= 0)
+ return -EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resoruce on the command status
+ * indication?
+ */
+ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
+
+ if (!dp_cmd)
+ return -ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return 0;
+}
+
+static int
+ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
+ int mac_id, u32 ring_id,
+ enum hal_ring_type ring_type,
+ enum htt_srng_ring_type *htt_ring_type,
+ enum htt_srng_ring_id *htt_ring_id)
+{
+ int lmac_ring_id_offset = 0;
+ int ret = 0;
+
+ switch (ring_type) {
+ case HAL_RXDMA_BUF:
+ lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
+ if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
+ lmac_ring_id_offset) ||
+ ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
+ lmac_ring_id_offset))) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_DST:
+ *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DST:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DESC:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ default:
+ ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type)
+{
+ struct htt_srng_setup_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ u32 ring_entry_sz;
+ int len = sizeof(*cmd);
+ dma_addr_t hp_addr, tp_addr;
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
+ tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_srng_setup_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_SRING_SETUP);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
+ htt_ring_type);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
+
+ cmd->ring_base_addr_lo = params.ring_base_paddr &
+ HAL_ADDR_LSB_REG_MASK;
+
+ cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ ret = ath11k_hal_srng_get_entrysize(ring_type);
+ if (ret < 0)
+ goto err_free;
+
+ ring_entry_sz = ret;
+
+ ring_entry_sz >>= 2;
+ cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
+ ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
+ params.num_entries * ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
+ if (htt_ring_type == HTT_SW_TO_HW_RING)
+ cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
+
+ cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_msi_addr_lo = 0;
+ cmd->ring_msi_addr_hi = 0;
+ cmd->msi_data = 0;
+
+ cmd->intr_info = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
+ params.intr_batch_cntr_thres_entries * ring_entry_sz);
+ cmd->intr_info |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
+ params.intr_timer_thres_us >> 3);
+
+ cmd->info2 = 0;
+ if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ cmd->info2 = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
+ params.low_threshold);
+ }
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ver_req_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ init_completion(&dp->htt_tgt_version_received);
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ver_req_cmd *)skb->data;
+ cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
+ HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "htt target version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+ ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
+ dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ppdu_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ u8 pdev_mask;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
+
+ pdev_mask = 1 << (ar->pdev_idx);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+ struct htt_rx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
+ htt_ring_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+
+ cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
+ rx_buf_size);
+ cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
+ cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
+ cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
+ cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
+ cmd->rx_filter_tlv = tlv_filter->rx_filter;
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ext_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
+
+ cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+
+ cmd->hdr.stats_type = type;
+ cmd->cfg_param0 = cfg_params->cfg0;
+ cmd->cfg_param1 = cfg_params->cfg1;
+ cmd->cfg_param2 = cfg_params->cfg2;
+ cmd->cfg_param3 = cfg_params->cfg3;
+ cmd->cookie_lsb = lower_32_bits(cookie);
+ cmd->cookie_msb = upper_32_bits(cookie);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ ath11k_warn(ab, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ int ret = 0, ring_id = 0;
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+
+ if (!reset) {
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.pkt_filter_flags0 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ }
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret)
+ return ret;
+
+ ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ if (!reset)
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ else
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
new file mode 100644
index 000000000000..f8a9f9c8e444
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_TX_H
+#define ATH11K_DP_TX_H
+
+#include "core.h"
+#include "hal_tx.h"
+
+struct ath11k_dp_htt_wbm_tx_status {
+ u32 msdu_id;
+ bool acked;
+ int ack_rssi;
+};
+
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb);
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*func)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status));
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask);
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie);
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset);
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
new file mode 100644
index 000000000000..b58ac11c2747
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -0,0 +1,1124 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/dma-mapping.h>
+#include "ahb.h"
+#include "hal_tx.h"
+#include "debug.h"
+#include "hal_desc.h"
+
+static const struct hal_srng_config hw_srng_config[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ { /* REO_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP,
+ },
+ .reg_size = {
+ HAL_REO2_RING_BASE_LSB - HAL_REO1_RING_BASE_LSB,
+ HAL_REO2_RING_HP - HAL_REO1_RING_HP,
+ },
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_EXCEPTION */
+ /* Designating REO2TCL ring as exception ring. This ring is
+ * similar to other REO2SW rings though it is named as REO2TCL.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP,
+ },
+ .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_REINJECT */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP,
+ },
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP,
+ },
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO_STATUS_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP,
+ },
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_DATA */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 3,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_data_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP,
+ },
+ .reg_size = {
+ HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB,
+ HAL_TCL2_RING_HP - HAL_TCL1_RING_HP,
+ },
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_gse_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP,
+ },
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL_STATUS_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP,
+ },
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_SRC */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_SRC_REG +
+ HAL_CE_DST_RING_BASE_LSB),
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP,
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
+ },
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_RING_BASE_LSB),
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP,
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ },
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_HP),
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ },
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM_IDLE_LINK */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP),
+ },
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* SW2WBM_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_RELEASE_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP),
+ },
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM2SW_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM0_RELEASE_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP),
+ },
+ .reg_size = {
+ (HAL_WBM1_RELEASE_RING_BASE_LSB -
+ HAL_WBM0_RELEASE_RING_BASE_LSB),
+ (HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP),
+ },
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* RXDMA_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DESC */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA DIR BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 1,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+};
+
+static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
+ GFP_KERNEL);
+ if (!hal->rdp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->rdp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ dma_free_coherent(ab->dev, size,
+ hal->rdp.vaddr, hal->rdp.paddr);
+ hal->rdp.vaddr = NULL;
+}
+
+static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
+ GFP_KERNEL);
+ if (!hal->wrp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->wrp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ dma_free_coherent(ab->dev, size,
+ hal->wrp.vaddr, hal->wrp.paddr);
+ hal->wrp.vaddr = NULL;
+}
+
+static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
+ struct hal_srng *srng, int ring_num)
+{
+ const struct hal_srng_config *srng_config = &hw_srng_config[HAL_CE_DST];
+ u32 addr;
+ u32 val;
+
+ addr = HAL_CE_DST_RING_CTRL +
+ srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+ ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+ val = ath11k_ahb_read32(ab, addr);
+ val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+ val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
+ srng->u.dst_ring.max_buffer_length);
+ ath11k_ahb_write32(ab, addr, val);
+}
+
+static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 hp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
+ (u32)srng->msi_addr);
+
+ val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val);
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET,
+ srng->msi_data);
+ }
+
+ ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
+ FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
+
+ /* interrupt setup */
+ val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
+ (srng->intr_timer_thres_us >> 3));
+
+ val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET,
+ val);
+
+ hp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
+ hp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
+ hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_ahb_write32(ab, reg_base, 0);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ *srng->u.dst_ring.hp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_REO1_RING_MISC_MSI_SWAP;
+ val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
+}
+
+static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 tp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
+ (u32)srng->msi_addr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET,
+ val);
+
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_DATA_OFFSET,
+ srng->msi_data);
+ }
+
+ ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
+
+ /* interrupt setup */
+ /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
+ * unit of 8 usecs instead of 1 usec (as required by v1).
+ */
+ val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
+ srng->intr_timer_thres_us);
+
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET,
+ val);
+
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
+ srng->u.src_ring.low_threshold);
+ }
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET,
+ val);
+
+ if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ tp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET,
+ tp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET,
+ tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+ }
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_ahb_write32(ab, reg_base, 0);
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ *srng->u.src_ring.tp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+ /* Loop count is not used for SRC rings */
+ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+ val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
+}
+
+static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_hal_srng_src_hw_init(ab, srng);
+ else
+ ath11k_hal_srng_dst_hw_init(ab, srng);
+}
+
+static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
+{
+ const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ int ring_id;
+
+ if (ring_num >= srng_config->max_rings) {
+ ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
+ return -EINVAL;
+ }
+
+ ring_id = srng_config->start_ring_id + ring_num;
+ if (srng_config->lmac_ring)
+ ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+ if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
+ return -EINVAL;
+
+ return ring_id;
+}
+
+int ath11k_hal_srng_get_entrysize(u32 ring_type)
+{
+ const struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &hw_srng_config[ring_type];
+
+ return (srng_config->entry_size << 2);
+}
+
+int ath11k_hal_srng_get_max_entries(u32 ring_type)
+{
+ const struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &hw_srng_config[ring_type];
+
+ return (srng_config->max_size / srng_config->entry_size);
+}
+
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params)
+{
+ params->ring_base_paddr = srng->ring_base_paddr;
+ params->ring_base_vaddr = srng->ring_base_vaddr;
+ params->num_entries = srng->num_entries;
+ params->intr_timer_thres_us = srng->intr_timer_thres_us;
+ params->intr_batch_cntr_thres_entries =
+ srng->intr_batch_cntr_thres_entries;
+ params->low_threshold = srng->u.src_ring.low_threshold;
+ params->flags = srng->flags;
+}
+
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+ else
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+}
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+ else
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+}
+
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+ switch (type) {
+ case HAL_CE_DESC_SRC:
+ return sizeof(struct hal_ce_srng_src_desc);
+ case HAL_CE_DESC_DST:
+ return sizeof(struct hal_ce_srng_dest_desc);
+ case HAL_CE_DESC_DST_STATUS:
+ return sizeof(struct hal_ce_srng_dst_status_desc);
+ }
+
+ return 0;
+}
+
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data)
+{
+ struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
+ byte_swap_data) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
+ desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
+}
+
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
+{
+ struct hal_ce_srng_dest_desc *desc =
+ (struct hal_ce_srng_dest_desc *)buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
+}
+
+u32 ath11k_hal_ce_dst_status_get_length(void *buf)
+{
+ struct hal_ce_srng_dst_status_desc *desc =
+ (struct hal_ce_srng_dst_status_desc *)buf;
+ u32 len;
+
+ len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
+ desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
+
+ return len;
+}
+
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr)
+{
+ desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ (paddr & HAL_ADDR_LSB_REG_MASK));
+ desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
+}
+
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
+ return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
+
+ return NULL;
+}
+
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+ srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ tp = srng->u.dst_ring.tp;
+
+ if (sync_hw_ptr) {
+ hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = hp;
+ } else {
+ hp = srng->u.dst_ring.cached_hp;
+ }
+
+ if (hp >= tp)
+ return (hp - tp) / srng->entry_size;
+ else
+ return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+/* Returns number of available entries in src ring */
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ hp = srng->u.src_ring.hp;
+
+ if (sync_hw_ptr) {
+ tp = *srng->u.src_ring.tp_addr;
+ srng->u.src_ring.cached_tp = tp;
+ } else {
+ tp = srng->u.src_ring.cached_tp;
+ }
+
+ if (tp > hp)
+ return ((tp - hp) / srng->entry_size) - 1;
+ else
+ return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: Using % is expensive, but we have to do this since size of some
+ * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+ * if separate function is defined for rings having power of 2 ring size
+ * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+ * overhead of % by using mask (with &).
+ */
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = next_hp;
+
+ /* TODO: Reap functionality is not used by all rings. If particular
+ * ring does not use reap functionality, we need not update reap_hp
+ * with next_hp pointer. Need to make sure a separate function is used
+ * before doing any optimization by removing below code updating
+ * reap_hp.
+ */
+ srng->u.src_ring.reap_hp = next_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_reap_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
+ srng->ring_size;
+
+ if (next_reap_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_reap_hp;
+ srng->u.src_ring.reap_hp = next_reap_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
+ srng->u.src_ring.cached_tp)
+ return NULL;
+
+ return srng->ring_base_vaddr + srng->u.src_ring.hp;
+}
+
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ srng->u.src_ring.cached_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ else
+ srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+}
+
+/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
+ * should have been called before this.
+ */
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: See if we need a write memory barrier here */
+ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ /* For LMAC rings, ring pointer updates are done through FW and
+ * hence written to a shared memory location that is read by FW
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ else
+ *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ } else {
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ ath11k_ahb_write32(ab,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem,
+ srng->u.src_ring.hp);
+ } else {
+ ath11k_ahb_write32(ab,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem,
+ srng->u.dst_ring.tp);
+ }
+ }
+}
+
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
+{
+ struct ath11k_buffer_addr *link_addr;
+ int i;
+ u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+
+ link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+ for (i = 1; i < nsbufs; i++) {
+ link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
+ link_addr->info1 = FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL);
+
+ link_addr = (void *)sbuf[i].vaddr +
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+ }
+
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
+ FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
+ reg_scatter_buf_sz * nsbufs));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_LSB,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_MSB,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL));
+
+ /* Setup head and tail pointers for the idle list */
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[nsbufs - 1].paddr));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[nsbufs - 1].paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
+ (end_offset >> 2)));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
+ 0));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
+ 2 * tot_link_desc);
+
+ /* Enable the SRNG */
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
+}
+
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ struct hal_srng *srng;
+ int ring_id;
+ u32 lmac_idx;
+ int i;
+ u32 reg_base;
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
+ if (ring_id < 0)
+ return ring_id;
+
+ srng = &hal->srng_list[ring_id];
+
+ srng->ring_id = ring_id;
+ srng->ring_dir = srng_config->ring_dir;
+ srng->ring_base_paddr = params->ring_base_paddr;
+ srng->ring_base_vaddr = params->ring_base_vaddr;
+ srng->entry_size = srng_config->entry_size;
+ srng->num_entries = params->num_entries;
+ srng->ring_size = srng->entry_size * srng->num_entries;
+ srng->intr_batch_cntr_thres_entries =
+ params->intr_batch_cntr_thres_entries;
+ srng->intr_timer_thres_us = params->intr_timer_thres_us;
+ srng->flags = params->flags;
+ spin_lock_init(&srng->lock);
+
+ for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
+ srng->hwreg_base[i] = srng_config->reg_start[i] +
+ (ring_num * srng_config->reg_size[i]);
+ }
+
+ memset(srng->ring_base_vaddr, 0,
+ (srng->entry_size * srng->num_entries) << 2);
+
+ /* TODO: Add comments on these swap configurations */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
+ HAL_SRNG_FLAGS_RING_PTR_SWAP;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.hp = 0;
+ srng->u.src_ring.cached_tp = 0;
+ srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
+ srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ srng->u.src_ring.low_threshold = params->low_threshold *
+ srng->entry_size;
+ if (srng_config->lmac_ring) {
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ srng->u.src_ring.hp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base);
+ }
+ } else {
+ /* During initialization loop count in all the descriptors
+ * will be set to zero, and HW will set it to 1 on completing
+ * descriptor update in first loop, and increments it by 1 on
+ * subsequent loops (loop count wraps around after reaching
+ * 0xffff). The 'loop_cnt' in SW ring state is the expected
+ * loop count in descriptors updated by HW (to be processed
+ * by SW).
+ */
+ srng->u.dst_ring.loop_cnt = 1;
+ srng->u.dst_ring.tp = 0;
+ srng->u.dst_ring.cached_hp = 0;
+ srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ if (srng_config->lmac_ring) {
+ /* For LMAC rings, tail pointer updates will be done
+ * through FW by writing to a shared memory location
+ */
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ srng->u.dst_ring.tp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base +
+ (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+ }
+ }
+
+ if (srng_config->lmac_ring)
+ return ring_id;
+
+ ath11k_hal_srng_hw_init(ab, srng);
+
+ if (type == HAL_CE_DST) {
+ srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
+ ath11k_hal_ce_dst_setup(ab, srng, ring_num);
+ }
+
+ return ring_id;
+}
+
+int ath11k_hal_srng_init(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ret;
+
+ memset(hal, 0, sizeof(*hal));
+
+ hal->srng_config = hw_srng_config;
+
+ ret = ath11k_hal_alloc_cont_rdp(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath11k_hal_alloc_cont_wrp(ab);
+ if (ret)
+ goto err_free_cont_rdp;
+
+ return 0;
+
+err_free_cont_rdp:
+ ath11k_hal_free_cont_rdp(ab);
+
+err_hal:
+ return ret;
+}
+
+void ath11k_hal_srng_deinit(struct ath11k_base *ab)
+{
+ ath11k_hal_free_cont_rdp(ab);
+ ath11k_hal_free_cont_wrp(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
new file mode 100644
index 000000000000..5b13ccdf39e4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -0,0 +1,897 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_H
+#define ATH11K_HAL_H
+
+#include "hal_desc.h"
+#include "rx_desc.h"
+
+struct ath11k_base;
+
+#define HAL_LINK_DESC_SIZE (32 << 2)
+#define HAL_LINK_DESC_ALIGN 128
+#define HAL_NUM_MPDUS_PER_LINK_DESC 6
+#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7
+#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6
+#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12
+#define HAL_MAX_AVAIL_BLK_RES 3
+
+#define HAL_RING_BASE_ALIGN 8
+
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
+/* TODO: Check with hw team on the supported scatter buf size */
+#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
+ HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
+
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 48
+#define HAL_DSCP_TID_TBL_SIZE 24
+
+/* calculate the register address from bar0 of shadow register x */
+#define SHADOW_BASE_ADDRESS 0x00003024
+#define SHADOW_NUM_REGISTERS 36
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x00a00000
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x00a01000
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x00a02000
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x00a03000
+#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
+#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
+#define HAL_TCL1_RING_BASE_LSB 0x00000510
+#define HAL_TCL1_RING_BASE_MSB 0x00000514
+#define HAL_TCL1_RING_ID 0x00000518
+#define HAL_TCL1_RING_MISC 0x00000520
+#define HAL_TCL1_RING_TP_ADDR_LSB 0x0000052c
+#define HAL_TCL1_RING_TP_ADDR_MSB 0x00000530
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 0x00000540
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 0x00000544
+#define HAL_TCL1_RING_MSI1_BASE_LSB 0x00000558
+#define HAL_TCL1_RING_MSI1_BASE_MSB 0x0000055c
+#define HAL_TCL1_RING_MSI1_DATA 0x00000560
+#define HAL_TCL2_RING_BASE_LSB 0x00000568
+#define HAL_TCL_RING_BASE_LSB 0x00000618
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET \
+ (HAL_TCL1_RING_MSI1_BASE_LSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET \
+ (HAL_TCL1_RING_MSI1_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET \
+ (HAL_TCL1_RING_MSI1_DATA - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_BASE_MSB_OFFSET \
+ (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_ID_OFFSET \
+ (HAL_TCL1_RING_ID - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET \
+ (HAL_TCL1_RING_TP_ADDR_LSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET \
+ (HAL_TCL1_RING_TP_ADDR_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MISC_OFFSET \
+ (HAL_TCL1_RING_MISC - HAL_TCL1_RING_BASE_LSB)
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP 0x00002000
+#define HAL_TCL1_RING_TP 0x00002004
+#define HAL_TCL2_RING_HP 0x00002008
+#define HAL_TCL_RING_HP 0x00002018
+
+#define HAL_TCL1_RING_TP_OFFSET \
+ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB 0x00000720
+#define HAL_TCL_STATUS_RING_HP 0x00002030
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_RING_BASE_LSB 0x0000029c
+#define HAL_REO1_RING_BASE_MSB 0x000002a0
+#define HAL_REO1_RING_ID 0x000002a4
+#define HAL_REO1_RING_MISC 0x000002ac
+#define HAL_REO1_RING_HP_ADDR_LSB 0x000002b0
+#define HAL_REO1_RING_HP_ADDR_MSB 0x000002b4
+#define HAL_REO1_RING_PRODUCER_INT_SETUP 0x000002c0
+#define HAL_REO1_RING_MSI1_BASE_LSB 0x000002e4
+#define HAL_REO1_RING_MSI1_BASE_MSB 0x000002e8
+#define HAL_REO1_RING_MSI1_DATA 0x000002ec
+#define HAL_REO2_RING_BASE_LSB 0x000002f4
+#define HAL_REO1_AGING_THRESH_IX_0 0x00000564
+#define HAL_REO1_AGING_THRESH_IX_1 0x00000568
+#define HAL_REO1_AGING_THRESH_IX_2 0x0000056c
+#define HAL_REO1_AGING_THRESH_IX_3 0x00000570
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET \
+ (HAL_REO1_RING_MSI1_BASE_LSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET \
+ (HAL_REO1_RING_MSI1_BASE_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MSI1_DATA_OFFSET \
+ (HAL_REO1_RING_MSI1_DATA - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_BASE_MSB_OFFSET \
+ (HAL_REO1_RING_BASE_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_ID_OFFSET (HAL_REO1_RING_ID - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET \
+ (HAL_REO1_RING_PRODUCER_INT_SETUP - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET \
+ (HAL_REO1_RING_HP_ADDR_LSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET \
+ (HAL_REO1_RING_HP_ADDR_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MISC_OFFSET (HAL_REO1_RING_MISC - HAL_REO1_RING_BASE_LSB)
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP 0x00003038
+#define HAL_REO1_RING_TP 0x0000303c
+#define HAL_REO2_RING_HP 0x00003040
+
+#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
+
+/* REO2TCL R0 ring configuration address */
+#define HAL_REO_TCL_RING_BASE_LSB 0x000003fc
+
+/* REO2TCL R2 ring pointer (head/tail) address */
+#define HAL_REO_TCL_RING_HP 0x00003058
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP 0x00003020
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB 0x000001ec
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP 0x00003028
+
+/* CE ring R0 address */
+#define HAL_CE_DST_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
+#define HAL_CE_DST_RING_CTRL 0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP 0x00000400
+#define HAL_CE_DST_STATUS_RING_HP 0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB 0x00000504
+#define HAL_REO_STATUS_HP 0x00003070
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR 0x00000870
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c
+#define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058
+#define HAL_WBM_SCATTERED_RING_BASE_MSB 0x0000005c
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0 0x00000068
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1 0x0000006c
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0 0x00000078
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1 0x0000007c
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR 0x00000084
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_RELEASE_RING_BASE_LSB 0x000001d8
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_RELEASE_RING_HP 0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB 0x00000910
+#define HAL_WBM1_RELEASE_RING_BASE_LSB 0x00000968
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
+#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
+
+/* TCL ring feild mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(17)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
+
+/* REO ring feild mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+
+#define HAL_RX_DESC_SIZE (sizeof(struct hal_rx_desc))
+
+/* Add any other errors here and return them in
+ * ath11k_hal_rx_desc_get_err().
+ */
+
+enum hal_srng_ring_id {
+ HAL_SRNG_RING_ID_REO2SW1 = 0,
+ HAL_SRNG_RING_ID_REO2SW2,
+ HAL_SRNG_RING_ID_REO2SW3,
+ HAL_SRNG_RING_ID_REO2SW4,
+ HAL_SRNG_RING_ID_REO2TCL,
+ HAL_SRNG_RING_ID_SW2REO,
+
+ HAL_SRNG_RING_ID_REO_CMD = 8,
+ HAL_SRNG_RING_ID_REO_STATUS,
+
+ HAL_SRNG_RING_ID_SW2TCL1 = 16,
+ HAL_SRNG_RING_ID_SW2TCL2,
+ HAL_SRNG_RING_ID_SW2TCL3,
+ HAL_SRNG_RING_ID_SW2TCL4,
+
+ HAL_SRNG_RING_ID_SW2TCL_CMD = 24,
+ HAL_SRNG_RING_ID_TCL_STATUS,
+
+ HAL_SRNG_RING_ID_CE0_SRC = 32,
+ HAL_SRNG_RING_ID_CE1_SRC,
+ HAL_SRNG_RING_ID_CE2_SRC,
+ HAL_SRNG_RING_ID_CE3_SRC,
+ HAL_SRNG_RING_ID_CE4_SRC,
+ HAL_SRNG_RING_ID_CE5_SRC,
+ HAL_SRNG_RING_ID_CE6_SRC,
+ HAL_SRNG_RING_ID_CE7_SRC,
+ HAL_SRNG_RING_ID_CE8_SRC,
+ HAL_SRNG_RING_ID_CE9_SRC,
+ HAL_SRNG_RING_ID_CE10_SRC,
+ HAL_SRNG_RING_ID_CE11_SRC,
+
+ HAL_SRNG_RING_ID_CE0_DST = 56,
+ HAL_SRNG_RING_ID_CE1_DST,
+ HAL_SRNG_RING_ID_CE2_DST,
+ HAL_SRNG_RING_ID_CE3_DST,
+ HAL_SRNG_RING_ID_CE4_DST,
+ HAL_SRNG_RING_ID_CE5_DST,
+ HAL_SRNG_RING_ID_CE6_DST,
+ HAL_SRNG_RING_ID_CE7_DST,
+ HAL_SRNG_RING_ID_CE8_DST,
+ HAL_SRNG_RING_ID_CE9_DST,
+ HAL_SRNG_RING_ID_CE10_DST,
+ HAL_SRNG_RING_ID_CE11_DST,
+
+ HAL_SRNG_RING_ID_CE0_DST_STATUS = 80,
+ HAL_SRNG_RING_ID_CE1_DST_STATUS,
+ HAL_SRNG_RING_ID_CE2_DST_STATUS,
+ HAL_SRNG_RING_ID_CE3_DST_STATUS,
+ HAL_SRNG_RING_ID_CE4_DST_STATUS,
+ HAL_SRNG_RING_ID_CE5_DST_STATUS,
+ HAL_SRNG_RING_ID_CE6_DST_STATUS,
+ HAL_SRNG_RING_ID_CE7_DST_STATUS,
+ HAL_SRNG_RING_ID_CE8_DST_STATUS,
+ HAL_SRNG_RING_ID_CE9_DST_STATUS,
+ HAL_SRNG_RING_ID_CE10_DST_STATUS,
+ HAL_SRNG_RING_ID_CE11_DST_STATUS,
+
+ HAL_SRNG_RING_ID_WBM_IDLE_LINK = 104,
+ HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+
+ HAL_SRNG_RING_ID_UMAC_ID_END = 127,
+ HAL_SRNG_RING_ID_LMAC1_ID_START,
+
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_RING_ID_LMAC1_ID_START,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+
+ HAL_SRNG_RING_ID_LMAC1_ID_END = 143
+};
+
+/* SRNG registers are split into two groups R0 and R2 */
+#define HAL_SRNG_REG_GRP_R0 0
+#define HAL_SRNG_REG_GRP_R2 1
+#define HAL_SRNG_NUM_REG_GRP 2
+
+#define HAL_SRNG_NUM_LMACS 3
+#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_RING_ID_REO2SW1
+#define HAL_SRNG_RINGS_PER_LMAC (HAL_SRNG_RING_ID_LMAC1_ID_END - \
+ HAL_SRNG_RING_ID_LMAC1_ID_START)
+#define HAL_SRNG_NUM_LMAC_RINGS (HAL_SRNG_NUM_LMACS * HAL_SRNG_RINGS_PER_LMAC)
+#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_UMAC_ID_END + \
+ HAL_SRNG_NUM_LMAC_RINGS)
+
+enum hal_ring_type {
+ HAL_REO_DST,
+ HAL_REO_EXCEPTION,
+ HAL_REO_REINJECT,
+ HAL_REO_CMD,
+ HAL_REO_STATUS,
+ HAL_TCL_DATA,
+ HAL_TCL_CMD,
+ HAL_TCL_STATUS,
+ HAL_CE_SRC,
+ HAL_CE_DST,
+ HAL_CE_DST_STATUS,
+ HAL_WBM_IDLE_LINK,
+ HAL_SW2WBM_RELEASE,
+ HAL_WBM2SW_RELEASE,
+ HAL_RXDMA_BUF,
+ HAL_RXDMA_DST,
+ HAL_RXDMA_MONITOR_BUF,
+ HAL_RXDMA_MONITOR_STATUS,
+ HAL_RXDMA_MONITOR_DST,
+ HAL_RXDMA_MONITOR_DESC,
+ HAL_RXDMA_DIR_BUF,
+ HAL_MAX_RING_TYPES,
+};
+
+#define HAL_RX_MAX_BA_WINDOW 256
+
+#define HAL_DEFAULT_REO_TIMEOUT_USEC (40 * 1000)
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * earlier with a 'REO_FLUSH_CACHE' command
+ * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+ HAL_REO_CMD_GET_QUEUE_STATS = 0,
+ HAL_REO_CMD_FLUSH_QUEUE = 1,
+ HAL_REO_CMD_FLUSH_CACHE = 2,
+ HAL_REO_CMD_UNBLOCK_CACHE = 3,
+ HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4,
+ HAL_REO_CMD_UPDATE_RX_QUEUE = 5,
+};
+
+/**
+ * enum hal_reo_cmd_status: Enum for execution status of REO command
+ * @HAL_REO_CMD_SUCCESS: Command has successfully executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
+ * or cache was blocked
+ * @HAL_REO_CMD_FAILED: Command execution failed, could be due to
+ * invalid queue desc
+ * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_DRAIN:
+ */
+enum hal_reo_cmd_status {
+ HAL_REO_CMD_SUCCESS = 0,
+ HAL_REO_CMD_BLOCKED = 1,
+ HAL_REO_CMD_FAILED = 2,
+ HAL_REO_CMD_RESOURCE_BLOCKED = 3,
+ HAL_REO_CMD_DRAIN = 0xff,
+};
+
+struct hal_wbm_idle_scatter_list {
+ dma_addr_t paddr;
+ struct hal_wbm_link_desc *vaddr;
+};
+
+struct hal_srng_params {
+ dma_addr_t ring_base_paddr;
+ u32 *ring_base_vaddr;
+ int num_entries;
+ u32 intr_batch_cntr_thres_entries;
+ u32 intr_timer_thres_us;
+ u32 flags;
+ u32 max_buffer_len;
+ u32 low_threshold;
+
+ /* Add more params as needed */
+};
+
+enum hal_srng_dir {
+ HAL_SRNG_DIR_SRC,
+ HAL_SRNG_DIR_DST
+};
+
+/* srng flags */
+#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
+#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
+#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
+#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
+#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
+#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+
+#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+ /* Unique SRNG ring ID */
+ u8 ring_id;
+
+ /* Ring initialization done */
+ u8 initialized;
+
+ /* Interrupt/MSI value assigned to this ring */
+ int irq;
+
+ /* Physical base address of the ring */
+ dma_addr_t ring_base_paddr;
+
+ /* Virtual base address of the ring */
+ u32 *ring_base_vaddr;
+
+ /* Number of entries in ring */
+ u32 num_entries;
+
+ /* Ring size */
+ u32 ring_size;
+
+ /* Ring size mask */
+ u32 ring_size_mask;
+
+ /* Size of ring entry */
+ u32 entry_size;
+
+ /* Interrupt timer threshold - in micro seconds */
+ u32 intr_timer_thres_us;
+
+ /* Interrupt batch counter threshold - in number of ring entries */
+ u32 intr_batch_cntr_thres_entries;
+
+ /* MSI Address */
+ dma_addr_t msi_addr;
+
+ /* MSI data */
+ u32 msi_data;
+
+ /* Misc flags */
+ u32 flags;
+
+ /* Lock for serializing ring index updates */
+ spinlock_t lock;
+
+ /* Start offset of SRNG register groups for this ring
+ * TBD: See if this is required - register address can be derived
+ * from ring ID
+ */
+ u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+
+ /* Source or Destination ring */
+ enum hal_srng_dir ring_dir;
+
+ union {
+ struct {
+ /* SW tail pointer */
+ u32 tp;
+
+ /* Shadow head pointer location to be updated by HW */
+ volatile u32 *hp_addr;
+
+ /* Cached head pointer */
+ u32 cached_hp;
+
+ /* Tail pointer location to be updated by SW - This
+ * will be a register address and need not be
+ * accessed through SW structure
+ */
+ u32 *tp_addr;
+
+ /* Current SW loop cnt */
+ u32 loop_cnt;
+
+ /* max transfer size */
+ u16 max_buffer_length;
+ } dst_ring;
+
+ struct {
+ /* SW head pointer */
+ u32 hp;
+
+ /* SW reap head pointer */
+ u32 reap_hp;
+
+ /* Shadow tail pointer location to be updated by HW */
+ u32 *tp_addr;
+
+ /* Cached tail pointer */
+ u32 cached_tp;
+
+ /* Head pointer location to be updated by SW - This
+ * will be a register address and need not be accessed
+ * through SW structure
+ */
+ u32 *hp_addr;
+
+ /* Low threshold - in number of ring entries */
+ u32 low_threshold;
+ } src_ring;
+ } u;
+};
+
+/* Interrupt mitigation - Batch threshold in terms of numer of frames */
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
+
+/* Interrupt mitigation - timer threshold in us */
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 1000
+
+/* HW SRNG configuration table */
+struct hal_srng_config {
+ int start_ring_id;
+ u16 max_rings;
+ u16 entry_size;
+ u32 reg_start[HAL_SRNG_NUM_REG_GRP];
+ u16 reg_size[HAL_SRNG_NUM_REG_GRP];
+ u8 lmac_ring;
+ enum hal_srng_dir ring_dir;
+ u32 max_size;
+};
+
+/**
+ * enum hal_rx_buf_return_buf_manager
+ *
+ * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
+ * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list.
+ * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
+ * @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ */
+
+enum hal_rx_buf_return_buf_manager {
+ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_FW_BM,
+ HAL_RX_BUF_RBM_SW0_BM,
+ HAL_RX_BUF_RBM_SW1_BM,
+ HAL_RX_BUF_RBM_SW2_BM,
+ HAL_RX_BUF_RBM_SW3_BM,
+};
+
+#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_CMD_UPD0_VLD BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_CMD_UPD0_AC BIT(13)
+#define HAL_REO_CMD_UPD0_BAR BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD BIT(25)
+#define HAL_REO_CMD_UPD0_SSN BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
+#define HAL_REO_CMD_UPD0_PN BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+#define HAL_REO_CMD_UPD1_VLD BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+#define HAL_REO_CMD_UPD2_SVLD BIT(10)
+#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
+
+#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8)
+
+struct ath11k_hal_reo_cmd {
+ u32 addr_lo;
+ u32 flag;
+ u32 upd0;
+ u32 upd1;
+ u32 upd2;
+ u32 pn[4];
+ u16 rx_queue_num;
+ u16 min_rel;
+ u16 min_fwd;
+ u8 addr_hi;
+ u8 ac_list;
+ u8 blocking_idx;
+ u16 ba_window_size;
+ u8 pn_size;
+};
+
+enum hal_pn_type {
+ HAL_PN_TYPE_NONE,
+ HAL_PN_TYPE_WPA,
+ HAL_PN_TYPE_WAPI_EVEN,
+ HAL_PN_TYPE_WAPI_UNEVEN,
+};
+
+enum hal_ce_desc {
+ HAL_CE_DESC_SRC,
+ HAL_CE_DESC_DST,
+ HAL_CE_DESC_DST_STATUS,
+};
+
+struct hal_reo_status_header {
+ u16 cmd_num;
+ enum hal_reo_cmd_status cmd_status;
+ u16 cmd_exe_time;
+ u32 timestamp;
+};
+
+struct hal_reo_status_queue_stats {
+ u16 ssn;
+ u16 curr_idx;
+ u32 pn[4];
+ u32 last_rx_queue_ts;
+ u32 last_rx_dequeue_ts;
+ u32 rx_bitmap[8]; /* Bitmap from 0-255 */
+ u32 curr_mpdu_cnt;
+ u32 curr_msdu_cnt;
+ u16 fwd_due_to_bar_cnt;
+ u16 dup_cnt;
+ u32 frames_in_order_cnt;
+ u32 num_mpdu_processed_cnt;
+ u32 num_msdu_processed_cnt;
+ u32 total_num_processed_byte_cnt;
+ u32 late_rx_mpdu_cnt;
+ u32 reorder_hole_cnt;
+ u8 timeout_cnt;
+ u8 bar_rx_cnt;
+ u8 num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+ bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+ bool err_detected;
+ enum hal_reo_status_flush_cache_err_code err_code;
+ bool cache_controller_flush_status_hit;
+ u8 cache_controller_flush_status_desc_type;
+ u8 cache_controller_flush_status_client_id;
+ u8 cache_controller_flush_status_err;
+ u8 cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+ HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+ bool err_detected;
+ enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+ bool err_detected;
+ bool list_empty;
+ u16 release_desc_cnt;
+ u16 fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+ enum hal_reo_threshold_idx threshold_idx;
+ u32 link_desc_counter0;
+ u32 link_desc_counter1;
+ u32 link_desc_counter2;
+ u32 link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+ struct hal_reo_status_header uniform_hdr;
+ u8 loop_cnt;
+ union {
+ struct hal_reo_status_queue_stats queue_stats;
+ struct hal_reo_status_flush_queue flush_queue;
+ struct hal_reo_status_flush_cache flush_cache;
+ struct hal_reo_status_unblock_cache unblock_cache;
+ struct hal_reo_status_flush_timeout_list timeout_list;
+ struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+ } u;
+};
+
+/**
+ * HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct ath11k_hal {
+ /* HAL internal state for all SRNG rings.
+ */
+ struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
+
+ /* SRNG configuration table */
+ const struct hal_srng_config *srng_config;
+
+ /* Remote pointer memory for HW/FW updates */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } rdp;
+
+ /* Shared memory for ring pointer updates from host to FW */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } wrp;
+
+ /* Available REO blocking resources bitmap */
+ u8 avail_blk_resource;
+
+ u8 current_blk_index;
+
+ /* shadow register configuration */
+ u32 shadow_reg_addr[SHADOW_NUM_REGISTERS];
+ int num_shadow_reg_configured;
+};
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seqtype);
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab);
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr);
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type);
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data);
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr);
+u32 ath11k_hal_ce_dst_status_get_length(void *buf);
+int ath11k_hal_srng_get_entrysize(u32 ring_type);
+int ath11k_hal_srng_get_max_entries(u32 ring_type);
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params);
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params);
+int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
new file mode 100644
index 000000000000..5e200380cca4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -0,0 +1,2468 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_HAL_DESC_H
+#define ATH11K_HAL_DESC_H
+
+#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(10, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 11)
+
+struct ath11k_buffer_addr {
+ u32 info0;
+ u32 info1;
+} __packed;
+
+/* ath11k_buffer_addr
+ *
+ * info0
+ * Address (lower 32 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * addr
+ * Address (upper 8 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ * Consumer: WBM
+ * Producer: SW/FW
+ * Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ * descriptor or link descriptor that is being pointed to shall be
+ * returned after the frame has been processed. It is used by WBM
+ * for routing purposes.
+ *
+ * Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ * Cookie field exclusively used by SW. HW ignores the contents,
+ * accept that it passes the programmed value on to other
+ * descriptors together with the physical address.
+ *
+ * Field can be used by SW to for example associate the buffers
+ * physical address with the virtual address.
+ */
+
+enum hal_tlv_tag {
+ HAL_MACTX_CBF_START = 0 /* 0x0 */,
+ HAL_PHYRX_DATA = 1 /* 0x1 */,
+ HAL_PHYRX_CBF_DATA_RESP = 2 /* 0x2 */,
+ HAL_PHYRX_ABORT_REQUEST = 3 /* 0x3 */,
+ HAL_PHYRX_USER_ABORT_NOTIFICATION = 4 /* 0x4 */,
+ HAL_MACTX_DATA_RESP = 5 /* 0x5 */,
+ HAL_MACTX_CBF_DATA = 6 /* 0x6 */,
+ HAL_MACTX_CBF_DONE = 7 /* 0x7 */,
+ HAL_MACRX_CBF_READ_REQUEST = 8 /* 0x8 */,
+ HAL_MACRX_CBF_DATA_REQUEST = 9 /* 0x9 */,
+ HAL_MACRX_EXPECT_NDP_RECEPTION = 10 /* 0xa */,
+ HAL_MACRX_FREEZE_CAPTURE_CHANNEL = 11 /* 0xb */,
+ HAL_MACRX_NDP_TIMEOUT = 12 /* 0xc */,
+ HAL_MACRX_ABORT_ACK = 13 /* 0xd */,
+ HAL_MACRX_REQ_IMPLICIT_FB = 14 /* 0xe */,
+ HAL_MACRX_CHAIN_MASK = 15 /* 0xf */,
+ HAL_MACRX_NAP_USER = 16 /* 0x10 */,
+ HAL_MACRX_ABORT_REQUEST = 17 /* 0x11 */,
+ HAL_PHYTX_OTHER_TRANSMIT_INFO16 = 18 /* 0x12 */,
+ HAL_PHYTX_ABORT_ACK = 19 /* 0x13 */,
+ HAL_PHYTX_ABORT_REQUEST = 20 /* 0x14 */,
+ HAL_PHYTX_PKT_END = 21 /* 0x15 */,
+ HAL_PHYTX_PPDU_HEADER_INFO_REQUEST = 22 /* 0x16 */,
+ HAL_PHYTX_REQUEST_CTRL_INFO = 23 /* 0x17 */,
+ HAL_PHYTX_DATA_REQUEST = 24 /* 0x18 */,
+ HAL_PHYTX_BF_CV_LOADING_DONE = 25 /* 0x19 */,
+ HAL_PHYTX_NAP_ACK = 26 /* 0x1a */,
+ HAL_PHYTX_NAP_DONE = 27 /* 0x1b */,
+ HAL_PHYTX_OFF_ACK = 28 /* 0x1c */,
+ HAL_PHYTX_ON_ACK = 29 /* 0x1d */,
+ HAL_PHYTX_SYNTH_OFF_ACK = 30 /* 0x1e */,
+ HAL_PHYTX_DEBUG16 = 31 /* 0x1f */,
+ HAL_MACTX_ABORT_REQUEST = 32 /* 0x20 */,
+ HAL_MACTX_ABORT_ACK = 33 /* 0x21 */,
+ HAL_MACTX_PKT_END = 34 /* 0x22 */,
+ HAL_MACTX_PRE_PHY_DESC = 35 /* 0x23 */,
+ HAL_MACTX_BF_PARAMS_COMMON = 36 /* 0x24 */,
+ HAL_MACTX_BF_PARAMS_PER_USER = 37 /* 0x25 */,
+ HAL_MACTX_PREFETCH_CV = 38 /* 0x26 */,
+ HAL_MACTX_USER_DESC_COMMON = 39 /* 0x27 */,
+ HAL_MACTX_USER_DESC_PER_USER = 40 /* 0x28 */,
+ HAL_EXAMPLE_USER_TLV_16 = 41 /* 0x29 */,
+ HAL_EXAMPLE_TLV_16 = 42 /* 0x2a */,
+ HAL_MACTX_PHY_OFF = 43 /* 0x2b */,
+ HAL_MACTX_PHY_ON = 44 /* 0x2c */,
+ HAL_MACTX_SYNTH_OFF = 45 /* 0x2d */,
+ HAL_MACTX_EXPECT_CBF_COMMON = 46 /* 0x2e */,
+ HAL_MACTX_EXPECT_CBF_PER_USER = 47 /* 0x2f */,
+ HAL_MACTX_PHY_DESC = 48 /* 0x30 */,
+ HAL_MACTX_L_SIG_A = 49 /* 0x31 */,
+ HAL_MACTX_L_SIG_B = 50 /* 0x32 */,
+ HAL_MACTX_HT_SIG = 51 /* 0x33 */,
+ HAL_MACTX_VHT_SIG_A = 52 /* 0x34 */,
+ HAL_MACTX_VHT_SIG_B_SU20 = 53 /* 0x35 */,
+ HAL_MACTX_VHT_SIG_B_SU40 = 54 /* 0x36 */,
+ HAL_MACTX_VHT_SIG_B_SU80 = 55 /* 0x37 */,
+ HAL_MACTX_VHT_SIG_B_SU160 = 56 /* 0x38 */,
+ HAL_MACTX_VHT_SIG_B_MU20 = 57 /* 0x39 */,
+ HAL_MACTX_VHT_SIG_B_MU40 = 58 /* 0x3a */,
+ HAL_MACTX_VHT_SIG_B_MU80 = 59 /* 0x3b */,
+ HAL_MACTX_VHT_SIG_B_MU160 = 60 /* 0x3c */,
+ HAL_MACTX_SERVICE = 61 /* 0x3d */,
+ HAL_MACTX_HE_SIG_A_SU = 62 /* 0x3e */,
+ HAL_MACTX_HE_SIG_A_MU_DL = 63 /* 0x3f */,
+ HAL_MACTX_HE_SIG_A_MU_UL = 64 /* 0x40 */,
+ HAL_MACTX_HE_SIG_B1_MU = 65 /* 0x41 */,
+ HAL_MACTX_HE_SIG_B2_MU = 66 /* 0x42 */,
+ HAL_MACTX_HE_SIG_B2_OFDMA = 67 /* 0x43 */,
+ HAL_MACTX_DELETE_CV = 68 /* 0x44 */,
+ HAL_MACTX_MU_UPLINK_COMMON = 69 /* 0x45 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP = 70 /* 0x46 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO = 71 /* 0x47 */,
+ HAL_MACTX_PHY_NAP = 72 /* 0x48 */,
+ HAL_MACTX_DEBUG = 73 /* 0x49 */,
+ HAL_PHYRX_ABORT_ACK = 74 /* 0x4a */,
+ HAL_PHYRX_GENERATED_CBF_DETAILS = 75 /* 0x4b */,
+ HAL_PHYRX_RSSI_LEGACY = 76 /* 0x4c */,
+ HAL_PHYRX_RSSI_HT = 77 /* 0x4d */,
+ HAL_PHYRX_USER_INFO = 78 /* 0x4e */,
+ HAL_PHYRX_PKT_END = 79 /* 0x4f */,
+ HAL_PHYRX_DEBUG = 80 /* 0x50 */,
+ HAL_PHYRX_CBF_TRANSFER_DONE = 81 /* 0x51 */,
+ HAL_PHYRX_CBF_TRANSFER_ABORT = 82 /* 0x52 */,
+ HAL_PHYRX_L_SIG_A = 83 /* 0x53 */,
+ HAL_PHYRX_L_SIG_B = 84 /* 0x54 */,
+ HAL_PHYRX_HT_SIG = 85 /* 0x55 */,
+ HAL_PHYRX_VHT_SIG_A = 86 /* 0x56 */,
+ HAL_PHYRX_VHT_SIG_B_SU20 = 87 /* 0x57 */,
+ HAL_PHYRX_VHT_SIG_B_SU40 = 88 /* 0x58 */,
+ HAL_PHYRX_VHT_SIG_B_SU80 = 89 /* 0x59 */,
+ HAL_PHYRX_VHT_SIG_B_SU160 = 90 /* 0x5a */,
+ HAL_PHYRX_VHT_SIG_B_MU20 = 91 /* 0x5b */,
+ HAL_PHYRX_VHT_SIG_B_MU40 = 92 /* 0x5c */,
+ HAL_PHYRX_VHT_SIG_B_MU80 = 93 /* 0x5d */,
+ HAL_PHYRX_VHT_SIG_B_MU160 = 94 /* 0x5e */,
+ HAL_PHYRX_HE_SIG_A_SU = 95 /* 0x5f */,
+ HAL_PHYRX_HE_SIG_A_MU_DL = 96 /* 0x60 */,
+ HAL_PHYRX_HE_SIG_A_MU_UL = 97 /* 0x61 */,
+ HAL_PHYRX_HE_SIG_B1_MU = 98 /* 0x62 */,
+ HAL_PHYRX_HE_SIG_B2_MU = 99 /* 0x63 */,
+ HAL_PHYRX_HE_SIG_B2_OFDMA = 100 /* 0x64 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO = 101 /* 0x65 */,
+ HAL_PHYRX_COMMON_USER_INFO = 102 /* 0x66 */,
+ HAL_PHYRX_DATA_DONE = 103 /* 0x67 */,
+ HAL_RECEIVE_RSSI_INFO = 104 /* 0x68 */,
+ HAL_RECEIVE_USER_INFO = 105 /* 0x69 */,
+ HAL_MIMO_CONTROL_INFO = 106 /* 0x6a */,
+ HAL_RX_LOCATION_INFO = 107 /* 0x6b */,
+ HAL_COEX_TX_REQ = 108 /* 0x6c */,
+ HAL_DUMMY = 109 /* 0x6d */,
+ HAL_RX_TIMING_OFFSET_INFO = 110 /* 0x6e */,
+ HAL_EXAMPLE_TLV_32_NAME = 111 /* 0x6f */,
+ HAL_MPDU_LIMIT = 112 /* 0x70 */,
+ HAL_NA_LENGTH_END = 113 /* 0x71 */,
+ HAL_OLE_BUF_STATUS = 114 /* 0x72 */,
+ HAL_PCU_PPDU_SETUP_DONE = 115 /* 0x73 */,
+ HAL_PCU_PPDU_SETUP_END = 116 /* 0x74 */,
+ HAL_PCU_PPDU_SETUP_INIT = 117 /* 0x75 */,
+ HAL_PCU_PPDU_SETUP_START = 118 /* 0x76 */,
+ HAL_PDG_FES_SETUP = 119 /* 0x77 */,
+ HAL_PDG_RESPONSE = 120 /* 0x78 */,
+ HAL_PDG_TX_REQ = 121 /* 0x79 */,
+ HAL_SCH_WAIT_INSTR = 122 /* 0x7a */,
+ HAL_SCHEDULER_TLV = 123 /* 0x7b */,
+ HAL_TQM_FLOW_EMPTY_STATUS = 124 /* 0x7c */,
+ HAL_TQM_FLOW_NOT_EMPTY_STATUS = 125 /* 0x7d */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST = 126 /* 0x7e */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS = 127 /* 0x7f */,
+ HAL_TQM_GEN_MPDUS = 128 /* 0x80 */,
+ HAL_TQM_GEN_MPDUS_STATUS = 129 /* 0x81 */,
+ HAL_TQM_REMOVE_MPDU = 130 /* 0x82 */,
+ HAL_TQM_REMOVE_MPDU_STATUS = 131 /* 0x83 */,
+ HAL_TQM_REMOVE_MSDU = 132 /* 0x84 */,
+ HAL_TQM_REMOVE_MSDU_STATUS = 133 /* 0x85 */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT = 134 /* 0x86 */,
+ HAL_TQM_WRITE_CMD = 135 /* 0x87 */,
+ HAL_OFDMA_TRIGGER_DETAILS = 136 /* 0x88 */,
+ HAL_TX_DATA = 137 /* 0x89 */,
+ HAL_TX_FES_SETUP = 138 /* 0x8a */,
+ HAL_RX_PACKET = 139 /* 0x8b */,
+ HAL_EXPECTED_RESPONSE = 140 /* 0x8c */,
+ HAL_TX_MPDU_END = 141 /* 0x8d */,
+ HAL_TX_MPDU_START = 142 /* 0x8e */,
+ HAL_TX_MSDU_END = 143 /* 0x8f */,
+ HAL_TX_MSDU_START = 144 /* 0x90 */,
+ HAL_TX_SW_MODE_SETUP = 145 /* 0x91 */,
+ HAL_TXPCU_BUFFER_STATUS = 146 /* 0x92 */,
+ HAL_TXPCU_USER_BUFFER_STATUS = 147 /* 0x93 */,
+ HAL_DATA_TO_TIME_CONFIG = 148 /* 0x94 */,
+ HAL_EXAMPLE_USER_TLV_32 = 149 /* 0x95 */,
+ HAL_MPDU_INFO = 150 /* 0x96 */,
+ HAL_PDG_USER_SETUP = 151 /* 0x97 */,
+ HAL_TX_11AH_SETUP = 152 /* 0x98 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE_STATUS = 153 /* 0x99 */,
+ HAL_TX_PEER_ENTRY = 154 /* 0x9a */,
+ HAL_TX_RAW_OR_NATIVE_FRAME_SETUP = 155 /* 0x9b */,
+ HAL_EXAMPLE_STRUCT_NAME = 156 /* 0x9c */,
+ HAL_PCU_PPDU_SETUP_END_INFO = 157 /* 0x9d */,
+ HAL_PPDU_RATE_SETTING = 158 /* 0x9e */,
+ HAL_PROT_RATE_SETTING = 159 /* 0x9f */,
+ HAL_RX_MPDU_DETAILS = 160 /* 0xa0 */,
+ HAL_EXAMPLE_USER_TLV_42 = 161 /* 0xa1 */,
+ HAL_RX_MSDU_LINK = 162 /* 0xa2 */,
+ HAL_RX_REO_QUEUE = 163 /* 0xa3 */,
+ HAL_ADDR_SEARCH_ENTRY = 164 /* 0xa4 */,
+ HAL_SCHEDULER_CMD = 165 /* 0xa5 */,
+ HAL_TX_FLUSH = 166 /* 0xa6 */,
+ HAL_TQM_ENTRANCE_RING = 167 /* 0xa7 */,
+ HAL_TX_DATA_WORD = 168 /* 0xa8 */,
+ HAL_TX_MPDU_DETAILS = 169 /* 0xa9 */,
+ HAL_TX_MPDU_LINK = 170 /* 0xaa */,
+ HAL_TX_MPDU_LINK_PTR = 171 /* 0xab */,
+ HAL_TX_MPDU_QUEUE_HEAD = 172 /* 0xac */,
+ HAL_TX_MPDU_QUEUE_EXT = 173 /* 0xad */,
+ HAL_TX_MPDU_QUEUE_EXT_PTR = 174 /* 0xae */,
+ HAL_TX_MSDU_DETAILS = 175 /* 0xaf */,
+ HAL_TX_MSDU_EXTENSION = 176 /* 0xb0 */,
+ HAL_TX_MSDU_FLOW = 177 /* 0xb1 */,
+ HAL_TX_MSDU_LINK = 178 /* 0xb2 */,
+ HAL_TX_MSDU_LINK_ENTRY_PTR = 179 /* 0xb3 */,
+ HAL_RESPONSE_RATE_SETTING = 180 /* 0xb4 */,
+ HAL_TXPCU_BUFFER_BASICS = 181 /* 0xb5 */,
+ HAL_UNIFORM_DESCRIPTOR_HEADER = 182 /* 0xb6 */,
+ HAL_UNIFORM_TQM_CMD_HEADER = 183 /* 0xb7 */,
+ HAL_UNIFORM_TQM_STATUS_HEADER = 184 /* 0xb8 */,
+ HAL_USER_RATE_SETTING = 185 /* 0xb9 */,
+ HAL_WBM_BUFFER_RING = 186 /* 0xba */,
+ HAL_WBM_LINK_DESCRIPTOR_RING = 187 /* 0xbb */,
+ HAL_WBM_RELEASE_RING = 188 /* 0xbc */,
+ HAL_TX_FLUSH_REQ = 189 /* 0xbd */,
+ HAL_RX_MSDU_DETAILS = 190 /* 0xbe */,
+ HAL_TQM_WRITE_CMD_STATUS = 191 /* 0xbf */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS = 192 /* 0xc0 */,
+ HAL_TQM_GET_MSDU_FLOW_STATS = 193 /* 0xc1 */,
+ HAL_EXAMPLE_USER_CTLV_32 = 194 /* 0xc2 */,
+ HAL_TX_FES_STATUS_START = 195 /* 0xc3 */,
+ HAL_TX_FES_STATUS_USER_PPDU = 196 /* 0xc4 */,
+ HAL_TX_FES_STATUS_USER_RESPONSE = 197 /* 0xc5 */,
+ HAL_TX_FES_STATUS_END = 198 /* 0xc6 */,
+ HAL_RX_TRIG_INFO = 199 /* 0xc7 */,
+ HAL_RXPCU_TX_SETUP_CLEAR = 200 /* 0xc8 */,
+ HAL_RX_FRAME_BITMAP_REQ = 201 /* 0xc9 */,
+ HAL_RX_FRAME_BITMAP_ACK = 202 /* 0xca */,
+ HAL_COEX_RX_STATUS = 203 /* 0xcb */,
+ HAL_RX_START_PARAM = 204 /* 0xcc */,
+ HAL_RX_PPDU_START = 205 /* 0xcd */,
+ HAL_RX_PPDU_END = 206 /* 0xce */,
+ HAL_RX_MPDU_START = 207 /* 0xcf */,
+ HAL_RX_MPDU_END = 208 /* 0xd0 */,
+ HAL_RX_MSDU_START = 209 /* 0xd1 */,
+ HAL_RX_MSDU_END = 210 /* 0xd2 */,
+ HAL_RX_ATTENTION = 211 /* 0xd3 */,
+ HAL_RECEIVED_RESPONSE_INFO = 212 /* 0xd4 */,
+ HAL_RX_PHY_SLEEP = 213 /* 0xd5 */,
+ HAL_RX_HEADER = 214 /* 0xd6 */,
+ HAL_RX_PEER_ENTRY = 215 /* 0xd7 */,
+ HAL_RX_FLUSH = 216 /* 0xd8 */,
+ HAL_RX_RESPONSE_REQUIRED_INFO = 217 /* 0xd9 */,
+ HAL_RX_FRAMELESS_BAR_DETAILS = 218 /* 0xda */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS = 219 /* 0xdb */,
+ HAL_TQM_GET_MSDU_FLOW_STATS_STATUS = 220 /* 0xdc */,
+ HAL_TX_CBF_INFO = 221 /* 0xdd */,
+ HAL_PCU_PPDU_SETUP_USER = 222 /* 0xde */,
+ HAL_RX_MPDU_PCU_START = 223 /* 0xdf */,
+ HAL_RX_PM_INFO = 224 /* 0xe0 */,
+ HAL_RX_USER_PPDU_END = 225 /* 0xe1 */,
+ HAL_RX_PRE_PPDU_START = 226 /* 0xe2 */,
+ HAL_RX_PREAMBLE = 227 /* 0xe3 */,
+ HAL_TX_FES_SETUP_COMPLETE = 228 /* 0xe4 */,
+ HAL_TX_LAST_MPDU_FETCHED = 229 /* 0xe5 */,
+ HAL_TXDMA_STOP_REQUEST = 230 /* 0xe6 */,
+ HAL_RXPCU_SETUP = 231 /* 0xe7 */,
+ HAL_RXPCU_USER_SETUP = 232 /* 0xe8 */,
+ HAL_TX_FES_STATUS_ACK_OR_BA = 233 /* 0xe9 */,
+ HAL_TQM_ACKED_MPDU = 234 /* 0xea */,
+ HAL_COEX_TX_RESP = 235 /* 0xeb */,
+ HAL_COEX_TX_STATUS = 236 /* 0xec */,
+ HAL_MACTX_COEX_PHY_CTRL = 237 /* 0xed */,
+ HAL_COEX_STATUS_BROADCAST = 238 /* 0xee */,
+ HAL_RESPONSE_START_STATUS = 239 /* 0xef */,
+ HAL_RESPONSE_END_STATUS = 240 /* 0xf0 */,
+ HAL_CRYPTO_STATUS = 241 /* 0xf1 */,
+ HAL_RECEIVED_TRIGGER_INFO = 242 /* 0xf2 */,
+ HAL_REO_ENTRANCE_RING = 243 /* 0xf3 */,
+ HAL_RX_MPDU_LINK = 244 /* 0xf4 */,
+ HAL_COEX_TX_STOP_CTRL = 245 /* 0xf5 */,
+ HAL_RX_PPDU_ACK_REPORT = 246 /* 0xf6 */,
+ HAL_RX_PPDU_NO_ACK_REPORT = 247 /* 0xf7 */,
+ HAL_SCH_COEX_STATUS = 248 /* 0xf8 */,
+ HAL_SCHEDULER_COMMAND_STATUS = 249 /* 0xf9 */,
+ HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 250 /* 0xfa */,
+ HAL_TX_FES_STATUS_PROT = 251 /* 0xfb */,
+ HAL_TX_FES_STATUS_START_PPDU = 252 /* 0xfc */,
+ HAL_TX_FES_STATUS_START_PROT = 253 /* 0xfd */,
+ HAL_TXPCU_PHYTX_DEBUG32 = 254 /* 0xfe */,
+ HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 = 255 /* 0xff */,
+ HAL_TX_MPDU_COUNT_TRANSFER_END = 256 /* 0x100 */,
+ HAL_WHO_ANCHOR_OFFSET = 257 /* 0x101 */,
+ HAL_WHO_ANCHOR_VALUE = 258 /* 0x102 */,
+ HAL_WHO_CCE_INFO = 259 /* 0x103 */,
+ HAL_WHO_COMMIT = 260 /* 0x104 */,
+ HAL_WHO_COMMIT_DONE = 261 /* 0x105 */,
+ HAL_WHO_FLUSH = 262 /* 0x106 */,
+ HAL_WHO_L2_LLC = 263 /* 0x107 */,
+ HAL_WHO_L2_PAYLOAD = 264 /* 0x108 */,
+ HAL_WHO_L3_CHECKSUM = 265 /* 0x109 */,
+ HAL_WHO_L3_INFO = 266 /* 0x10a */,
+ HAL_WHO_L4_CHECKSUM = 267 /* 0x10b */,
+ HAL_WHO_L4_INFO = 268 /* 0x10c */,
+ HAL_WHO_MSDU = 269 /* 0x10d */,
+ HAL_WHO_MSDU_MISC = 270 /* 0x10e */,
+ HAL_WHO_PACKET_DATA = 271 /* 0x10f */,
+ HAL_WHO_PACKET_HDR = 272 /* 0x110 */,
+ HAL_WHO_PPDU_END = 273 /* 0x111 */,
+ HAL_WHO_PPDU_START = 274 /* 0x112 */,
+ HAL_WHO_TSO = 275 /* 0x113 */,
+ HAL_WHO_WMAC_HEADER_PV0 = 276 /* 0x114 */,
+ HAL_WHO_WMAC_HEADER_PV1 = 277 /* 0x115 */,
+ HAL_WHO_WMAC_IV = 278 /* 0x116 */,
+ HAL_MPDU_INFO_END = 279 /* 0x117 */,
+ HAL_MPDU_INFO_BITMAP = 280 /* 0x118 */,
+ HAL_TX_QUEUE_EXTENSION = 281 /* 0x119 */,
+ HAL_RX_PEER_ENTRY_DETAILS = 282 /* 0x11a */,
+ HAL_RX_REO_QUEUE_REFERENCE = 283 /* 0x11b */,
+ HAL_RX_REO_QUEUE_EXT = 284 /* 0x11c */,
+ HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS = 285 /* 0x11d */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS = 286 /* 0x11e */,
+ HAL_TQM_ACKED_MPDU_STATUS = 287 /* 0x11f */,
+ HAL_TQM_ADD_MSDU_STATUS = 288 /* 0x120 */,
+ HAL_RX_MPDU_LINK_PTR = 289 /* 0x121 */,
+ HAL_REO_DESTINATION_RING = 290 /* 0x122 */,
+ HAL_TQM_LIST_GEN_DONE = 291 /* 0x123 */,
+ HAL_WHO_TERMINATE = 292 /* 0x124 */,
+ HAL_TX_LAST_MPDU_END = 293 /* 0x125 */,
+ HAL_TX_CV_DATA = 294 /* 0x126 */,
+ HAL_TCL_ENTRANCE_FROM_PPE_RING = 295 /* 0x127 */,
+ HAL_PPDU_TX_END = 296 /* 0x128 */,
+ HAL_PROT_TX_END = 297 /* 0x129 */,
+ HAL_PDG_RESPONSE_RATE_SETTING = 298 /* 0x12a */,
+ HAL_MPDU_INFO_GLOBAL_END = 299 /* 0x12b */,
+ HAL_TQM_SCH_INSTR_GLOBAL_END = 300 /* 0x12c */,
+ HAL_RX_PPDU_END_USER_STATS = 301 /* 0x12d */,
+ HAL_RX_PPDU_END_USER_STATS_EXT = 302 /* 0x12e */,
+ HAL_NO_ACK_REPORT = 303 /* 0x12f */,
+ HAL_ACK_REPORT = 304 /* 0x130 */,
+ HAL_UNIFORM_REO_CMD_HEADER = 305 /* 0x131 */,
+ HAL_REO_GET_QUEUE_STATS = 306 /* 0x132 */,
+ HAL_REO_FLUSH_QUEUE = 307 /* 0x133 */,
+ HAL_REO_FLUSH_CACHE = 308 /* 0x134 */,
+ HAL_REO_UNBLOCK_CACHE = 309 /* 0x135 */,
+ HAL_UNIFORM_REO_STATUS_HEADER = 310 /* 0x136 */,
+ HAL_REO_GET_QUEUE_STATS_STATUS = 311 /* 0x137 */,
+ HAL_REO_FLUSH_QUEUE_STATUS = 312 /* 0x138 */,
+ HAL_REO_FLUSH_CACHE_STATUS = 313 /* 0x139 */,
+ HAL_REO_UNBLOCK_CACHE_STATUS = 314 /* 0x13a */,
+ HAL_TQM_FLUSH_CACHE = 315 /* 0x13b */,
+ HAL_TQM_UNBLOCK_CACHE = 316 /* 0x13c */,
+ HAL_TQM_FLUSH_CACHE_STATUS = 317 /* 0x13d */,
+ HAL_TQM_UNBLOCK_CACHE_STATUS = 318 /* 0x13e */,
+ HAL_RX_PPDU_END_STATUS_DONE = 319 /* 0x13f */,
+ HAL_RX_STATUS_BUFFER_DONE = 320 /* 0x140 */,
+ HAL_BUFFER_ADDR_INFO = 321 /* 0x141 */,
+ HAL_RX_MSDU_DESC_INFO = 322 /* 0x142 */,
+ HAL_RX_MPDU_DESC_INFO = 323 /* 0x143 */,
+ HAL_TCL_DATA_CMD = 324 /* 0x144 */,
+ HAL_TCL_GSE_CMD = 325 /* 0x145 */,
+ HAL_TCL_EXIT_BASE = 326 /* 0x146 */,
+ HAL_TCL_COMPACT_EXIT_RING = 327 /* 0x147 */,
+ HAL_TCL_REGULAR_EXIT_RING = 328 /* 0x148 */,
+ HAL_TCL_EXTENDED_EXIT_RING = 329 /* 0x149 */,
+ HAL_UPLINK_COMMON_INFO = 330 /* 0x14a */,
+ HAL_UPLINK_USER_SETUP_INFO = 331 /* 0x14b */,
+ HAL_TX_DATA_SYNC = 332 /* 0x14c */,
+ HAL_PHYRX_CBF_READ_REQUEST_ACK = 333 /* 0x14d */,
+ HAL_TCL_STATUS_RING = 334 /* 0x14e */,
+ HAL_TQM_GET_MPDU_HEAD_INFO = 335 /* 0x14f */,
+ HAL_TQM_SYNC_CMD = 336 /* 0x150 */,
+ HAL_TQM_GET_MPDU_HEAD_INFO_STATUS = 337 /* 0x151 */,
+ HAL_TQM_SYNC_CMD_STATUS = 338 /* 0x152 */,
+ HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 339 /* 0x153 */,
+ HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 340 /* 0x154 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST = 341 /* 0x155 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST_STATUS = 342 /* 0x156 */,
+ HAL_REO_TO_PPE_RING = 343 /* 0x157 */,
+ HAL_RX_MPDU_INFO = 344 /* 0x158 */,
+ HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 345 /* 0x159 */,
+ HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 346 /* 0x15a */,
+ HAL_EXAMPLE_USER_TLV_32_NAME = 347 /* 0x15b */,
+ HAL_RX_PPDU_START_USER_INFO = 348 /* 0x15c */,
+ HAL_RX_RXPCU_CLASSIFICATION_OVERVIEW = 349 /* 0x15d */,
+ HAL_RX_RING_MASK = 350 /* 0x15e */,
+ HAL_WHO_CLASSIFY_INFO = 351 /* 0x15f */,
+ HAL_TXPT_CLASSIFY_INFO = 352 /* 0x160 */,
+ HAL_RXPT_CLASSIFY_INFO = 353 /* 0x161 */,
+ HAL_TX_FLOW_SEARCH_ENTRY = 354 /* 0x162 */,
+ HAL_RX_FLOW_SEARCH_ENTRY = 355 /* 0x163 */,
+ HAL_RECEIVED_TRIGGER_INFO_DETAILS = 356 /* 0x164 */,
+ HAL_COEX_MAC_NAP = 357 /* 0x165 */,
+ HAL_MACRX_ABORT_REQUEST_INFO = 358 /* 0x166 */,
+ HAL_MACTX_ABORT_REQUEST_INFO = 359 /* 0x167 */,
+ HAL_PHYRX_ABORT_REQUEST_INFO = 360 /* 0x168 */,
+ HAL_PHYTX_ABORT_REQUEST_INFO = 361 /* 0x169 */,
+ HAL_RXPCU_PPDU_END_INFO = 362 /* 0x16a */,
+ HAL_WHO_MESH_CONTROL = 363 /* 0x16b */,
+ HAL_L_SIG_A_INFO = 364 /* 0x16c */,
+ HAL_L_SIG_B_INFO = 365 /* 0x16d */,
+ HAL_HT_SIG_INFO = 366 /* 0x16e */,
+ HAL_VHT_SIG_A_INFO = 367 /* 0x16f */,
+ HAL_VHT_SIG_B_SU20_INFO = 368 /* 0x170 */,
+ HAL_VHT_SIG_B_SU40_INFO = 369 /* 0x171 */,
+ HAL_VHT_SIG_B_SU80_INFO = 370 /* 0x172 */,
+ HAL_VHT_SIG_B_SU160_INFO = 371 /* 0x173 */,
+ HAL_VHT_SIG_B_MU20_INFO = 372 /* 0x174 */,
+ HAL_VHT_SIG_B_MU40_INFO = 373 /* 0x175 */,
+ HAL_VHT_SIG_B_MU80_INFO = 374 /* 0x176 */,
+ HAL_VHT_SIG_B_MU160_INFO = 375 /* 0x177 */,
+ HAL_SERVICE_INFO = 376 /* 0x178 */,
+ HAL_HE_SIG_A_SU_INFO = 377 /* 0x179 */,
+ HAL_HE_SIG_A_MU_DL_INFO = 378 /* 0x17a */,
+ HAL_HE_SIG_A_MU_UL_INFO = 379 /* 0x17b */,
+ HAL_HE_SIG_B1_MU_INFO = 380 /* 0x17c */,
+ HAL_HE_SIG_B2_MU_INFO = 381 /* 0x17d */,
+ HAL_HE_SIG_B2_OFDMA_INFO = 382 /* 0x17e */,
+ HAL_PDG_SW_MODE_BW_START = 383 /* 0x17f */,
+ HAL_PDG_SW_MODE_BW_END = 384 /* 0x180 */,
+ HAL_PDG_WAIT_FOR_MAC_REQUEST = 385 /* 0x181 */,
+ HAL_PDG_WAIT_FOR_PHY_REQUEST = 386 /* 0x182 */,
+ HAL_SCHEDULER_END = 387 /* 0x183 */,
+ HAL_PEER_TABLE_ENTRY = 388 /* 0x184 */,
+ HAL_SW_PEER_INFO = 389 /* 0x185 */,
+ HAL_RXOLE_CCE_CLASSIFY_INFO = 390 /* 0x186 */,
+ HAL_TCL_CCE_CLASSIFY_INFO = 391 /* 0x187 */,
+ HAL_RXOLE_CCE_INFO = 392 /* 0x188 */,
+ HAL_TCL_CCE_INFO = 393 /* 0x189 */,
+ HAL_TCL_CCE_SUPERRULE = 394 /* 0x18a */,
+ HAL_CCE_RULE = 395 /* 0x18b */,
+ HAL_RX_PPDU_START_DROPPED = 396 /* 0x18c */,
+ HAL_RX_PPDU_END_DROPPED = 397 /* 0x18d */,
+ HAL_RX_PPDU_END_STATUS_DONE_DROPPED = 398 /* 0x18e */,
+ HAL_RX_MPDU_START_DROPPED = 399 /* 0x18f */,
+ HAL_RX_MSDU_START_DROPPED = 400 /* 0x190 */,
+ HAL_RX_MSDU_END_DROPPED = 401 /* 0x191 */,
+ HAL_RX_MPDU_END_DROPPED = 402 /* 0x192 */,
+ HAL_RX_ATTENTION_DROPPED = 403 /* 0x193 */,
+ HAL_TXPCU_USER_SETUP = 404 /* 0x194 */,
+ HAL_RXPCU_USER_SETUP_EXT = 405 /* 0x195 */,
+ HAL_CE_SRC_DESC = 406 /* 0x196 */,
+ HAL_CE_STAT_DESC = 407 /* 0x197 */,
+ HAL_RXOLE_CCE_SUPERRULE = 408 /* 0x198 */,
+ HAL_TX_RATE_STATS_INFO = 409 /* 0x199 */,
+ HAL_CMD_PART_0_END = 410 /* 0x19a */,
+ HAL_MACTX_SYNTH_ON = 411 /* 0x19b */,
+ HAL_SCH_CRITICAL_TLV_REFERENCE = 412 /* 0x19c */,
+ HAL_TQM_MPDU_GLOBAL_START = 413 /* 0x19d */,
+ HAL_EXAMPLE_TLV_32 = 414 /* 0x19e */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW = 415 /* 0x19f */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD = 416 /* 0x1a0 */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS = 417 /* 0x1a1 */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 418 /* 0x1a2 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE = 419 /* 0x1a3 */,
+ HAL_CE_DST_DESC = 420 /* 0x1a4 */,
+ HAL_TLV_BASE = 511 /* 0x1ff */,
+};
+
+#define HAL_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+
+#define HAL_TLV_ALIGN 4
+
+struct hal_tlv_hdr {
+ u32 tl;
+ u8 value[0];
+} __packed;
+
+#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
+#define RX_MPDU_DESC_INFO0_SEQ_NUM GENMASK(19, 8)
+#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(20)
+#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(21)
+#define RX_MPDU_DESC_INFO0_AMPDU_FLAG BIT(22)
+#define RX_MPDU_DESC_INFO0_BAR_FRAME BIT(23)
+#define RX_MPDU_DESC_INFO0_VALID_PN BIT(24)
+#define RX_MPDU_DESC_INFO0_VALID_SA BIT(25)
+#define RX_MPDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(26)
+#define RX_MPDU_DESC_INFO0_VALID_DA BIT(27)
+#define RX_MPDU_DESC_INFO0_DA_MCBC BIT(28)
+#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29)
+#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30)
+
+struct rx_mpdu_desc {
+ u32 info0; /* %RX_MPDU_DESC_INFO */
+ u32 meta_data;
+} __packed;
+
+/* rx_mpdu_desc
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * msdu_count
+ * The number of MSDUs within the MPDU
+ *
+ * mpdu_sequence_number
+ * The field can have two different meanings based on the setting
+ * of field 'bar_frame'. If 'bar_frame' is set, it means the MPDU
+ * start sequence number from the BAR frame otherwise it means
+ * the MPDU sequence number of the received frame.
+ *
+ * fragment_flag
+ * When set, this MPDU is a fragment and REO should forward this
+ * fragment MPDU to the REO destination ring without any reorder
+ * checks, pn checks or bitmap update. This implies that REO is
+ * forwarding the pointer to the MSDU link descriptor.
+ *
+ * mpdu_retry_bit
+ * The retry bit setting from the MPDU header of the received frame
+ *
+ * ampdu_flag
+ * Indicates the MPDU was received as part of an A-MPDU.
+ *
+ * bar_frame
+ * Indicates the received frame is a BAR frame. After processing,
+ * this frame shall be pushed to SW or deleted.
+ *
+ * valid_pn
+ * When not set, REO will not perform a PN sequence number check.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for all MSDUs in this MPDU.
+ *
+ * sa_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC source address search due to the expiration of search timer.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for all MSDUs in this MPDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates at least one of
+ * the DA addresses is a Multicast or Broadcast address.
+ *
+ * da_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC destination address search due to the expiration of search
+ * timer.
+ *
+ * raw_mpdu
+ * Field only valid when first_msdu_in_mpdu_flag is set. Indicates
+ * the contents in the MSDU buffer contains a 'RAW' MPDU.
+ */
+
+enum hal_rx_msdu_desc_reo_dest_ind {
+ HAL_RX_MSDU_DESC_REO_DEST_IND_TCL,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW1,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW2,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW3,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW4,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_FW,
+};
+
+#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU BIT(0)
+#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU BIT(1)
+#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION BIT(2)
+#define RX_MSDU_DESC_INFO0_MSDU_LENGTH GENMASK(16, 3)
+#define RX_MSDU_DESC_INFO0_REO_DEST_IND GENMASK(21, 17)
+#define RX_MSDU_DESC_INFO0_MSDU_DROP BIT(22)
+#define RX_MSDU_DESC_INFO0_VALID_SA BIT(23)
+#define RX_MSDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(24)
+#define RX_MSDU_DESC_INFO0_VALID_DA BIT(25)
+#define RX_MSDU_DESC_INFO0_DA_MCBC BIT(26)
+#define RX_MSDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(27)
+
+#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
+ (FIELD_GET(RX_MSDU_DESC_INFO0_MSDU_LENGTH, (val)))
+
+struct rx_msdu_desc {
+ u32 info0;
+ u32 rsvd0;
+} __packed;
+
+/* rx_msdu_desc
+ *
+ * first_msdu_in_mpdu
+ * Indicates first msdu in mpdu.
+ *
+ * last_msdu_in_mpdu
+ * Indicates last msdu in mpdu. This flag can be true only when
+ * 'Msdu_continuation' set to 0. This implies that when an msdu
+ * is spread out over multiple buffers and thus msdu_continuation
+ * is set, only for the very last buffer of the msdu, can the
+ * 'last_msdu_in_mpdu' be set.
+ *
+ * When both first_msdu_in_mpdu and last_msdu_in_mpdu are set,
+ * the MPDU that this MSDU belongs to only contains a single MSDU.
+ *
+ * msdu_continuation
+ * When set, this MSDU buffer was not able to hold the entire MSDU.
+ * The next buffer will therefor contain additional information
+ * related to this MSDU.
+ *
+ * msdu_length
+ * Field is only valid in combination with the 'first_msdu_in_mpdu'
+ * being set. Full MSDU length in bytes after decapsulation. This
+ * field is still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation Or in case of RAW
+ * MPDUs, it indicates the length of the entire MPDU (without FCS
+ * field).
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * msdu_drop
+ * Indicates that REO shall drop this MSDU and not forward it to
+ * any other ring.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for this MSDU.
+ *
+ * sa_idx_timeout
+ * Indicates, an unsuccessful MAC source address search due to
+ * the expiration of search timer for this MSDU.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for this MSDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates the DA address
+ * is a Multicast or Broadcast address for this MSDU.
+ *
+ * da_idx_timeout
+ * Indicates, an unsuccessful MAC destination address search due
+ * to the expiration of search timer fot this MSDU.
+ */
+
+enum hal_reo_dest_ring_buffer_type {
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_dest_ring_error_code {
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+ HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+ HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+ HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+ HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+#define HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(8)
+#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(10, 9)
+#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(15, 11)
+#define HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM GENMASK(31, 16)
+
+#define HAL_REO_DEST_RING_INFO1_REORDER_INFO_VALID BIT(0)
+#define HAL_REO_DEST_RING_INFO1_REORDER_OPCODE GENMASK(4, 1)
+#define HAL_REO_DEST_RING_INFO1_REORDER_SLOT_IDX GENMASK(12, 5)
+
+#define HAL_REO_DEST_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_REO_DEST_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_dest_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_DEST_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_DEST_RING_INFO1_ */
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+} __packed;
+
+/* hal_reo_dest_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * rx_msdu_info
+ * General information related to the MSDU that is passed
+ * on from RXDMA all the way to to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * buffer_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * rx_queue_num
+ * Indicates the REO MPDU reorder queue id from which this frame
+ * originated.
+ *
+ * reorder_info_valid
+ * When set, REO has been instructed to not perform the actual
+ * re-ordering of frames for this queue, but just to insert
+ * the reorder opcodes.
+ *
+ * reorder_opcode
+ * Field is valid when 'reorder_info_valid' is set. This field is
+ * always valid for debug purpose as well.
+ *
+ * reorder_slot_idx
+ * Valid only when 'reorder_info_valid' is set.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+enum hal_reo_entr_rxdma_ecode {
+ HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT GENMASK(21, 8)
+#define HAL_REO_ENTR_RING_INFO0_DEST_IND GENMASK(26, 22)
+#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR BIT(27)
+
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE GENMASK(6, 2)
+
+struct hal_reo_entrance_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_ENTR_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_ENTR_RING_INFO1_ */
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+
+} __packed;
+
+/* hal_reo_entrance_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * mpdu_byte_count
+ * An approximation of the number of bytes received in this MPDU.
+ * Used to keeps stats on the amount of data flowing
+ * through a queue.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * frameless_bar
+ * Indicates that this REO entrance ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO descriptors
+ * is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit is set
+ * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number,
+ * bar_frame and peer_meta_data.
+ *
+ * rxdma_push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
+#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)
+
+struct hal_reo_cmd_hdr {
+ u32 info0;
+} __packed;
+
+#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8)
+
+struct hal_reo_get_queue_stats {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+/* hal_reo_get_queue_stats
+ * Producer: SW
+ * Consumer: REO
+ *
+ * cmd
+ * Details for command execution tracking purposes.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * clear_stats
+ * Clear stats settings. When set, Clear the stats after
+ * generating the status.
+ *
+ * Following stats will be cleared.
+ * Timeout_count
+ * Forward_due_to_bar_count
+ * Duplicate_count
+ * Frames_in_order_count
+ * BAR_received_count
+ * MPDU_Frames_processed_count
+ * MSDU_Frames_processed_count
+ * Total_processed_byte_count
+ * Late_receive_MPDU_count
+ * window_jump_2k
+ * Hole_count
+ */
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9)
+
+struct hal_reo_flush_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 desc_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS BIT(8)
+#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX BIT(9)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX GENMASK(11, 10)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+
+struct hal_reo_flush_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 cache_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO0_EPD BIT(1)
+#define HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE GENMASK(3, 2)
+#define HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE GENMASK(7, 4)
+#define HAL_TCL_DATA_CMD_INFO0_SRC_BUF_SWAP BIT(8)
+#define HAL_TCL_DATA_CMD_INFO0_LNK_META_SWAP BIT(9)
+#define HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE GENMASK(13, 12)
+#define HAL_TCL_DATA_CMD_INFO0_ADDR_EN GENMASK(15, 14)
+#define HAL_TCL_DATA_CMD_INFO0_CMD_NUM GENMASK(31, 16)
+
+#define HAL_TCL_DATA_CMD_INFO1_DATA_LEN GENMASK(15, 0)
+#define HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN BIT(16)
+#define HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN BIT(17)
+#define HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN BIT(18)
+#define HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN BIT(19)
+#define HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN BIT(20)
+#define HAL_TCL_DATA_CMD_INFO1_TO_FW BIT(21)
+#define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET GENMASK(31, 23)
+
+#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0)
+#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19)
+#define HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22)
+#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26)
+
+#define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX GENMASK(5, 0)
+#define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX GENMASK(25, 6)
+#define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM GENMASK(29, 26)
+
+#define HAL_TCL_DATA_CMD_INFO4_RING_ID GENMASK(27, 20)
+#define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT GENMASK(31, 28)
+
+enum hal_encrypt_type {
+ HAL_ENCRYPT_TYPE_WEP_40,
+ HAL_ENCRYPT_TYPE_WEP_104,
+ HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+ HAL_ENCRYPT_TYPE_WEP_128,
+ HAL_ENCRYPT_TYPE_TKIP_MIC,
+ HAL_ENCRYPT_TYPE_WAPI,
+ HAL_ENCRYPT_TYPE_CCMP_128,
+ HAL_ENCRYPT_TYPE_OPEN,
+ HAL_ENCRYPT_TYPE_CCMP_256,
+ HAL_ENCRYPT_TYPE_GCMP_128,
+ HAL_ENCRYPT_TYPE_AES_GCMP_256,
+ HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tcl_encap_type {
+ HAL_TCL_ENCAP_TYPE_RAW,
+ HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+ HAL_TCL_ENCAP_TYPE_ETHERNET,
+ HAL_TCL_ENCAP_TYPE_802_3 = 3,
+};
+
+enum hal_tcl_desc_type {
+ HAL_TCL_DESC_TYPE_BUFFER,
+ HAL_TCL_DESC_TYPE_EXT_DESC,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+};
+
+struct hal_tcl_data_cmd {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+} __packed;
+
+/* hal_tcl_data_cmd
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * desc_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * epd
+ * When this bit is set then input packet is an EPD type.
+ *
+ * encap_type
+ * Indicates the encapsulation that HW will perform. Values are
+ * defined in enum %HAL_TCL_ENCAP_TYPE_.
+ *
+ * encrypt_type
+ * Field only valid for encap_type: RAW
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * src_buffer_swap
+ * Treats source memory (packet buffer) organization as big-endian.
+ * 1'b0: Source memory is little endian
+ * 1'b1: Source memory is big endian
+ *
+ * link_meta_swap
+ * Treats link descriptor and Metadata as big-endian.
+ * 1'b0: memory is little endian
+ * 1'b1: memory is big endian
+ *
+ * search_type
+ * Search type select
+ * 0 - Normal search, 1 - Index based address search,
+ * 2 - Index based flow search
+ *
+ * addrx_en
+ * addry_en
+ * Address X/Y search enable in ASE correspondingly.
+ * 1'b0: Search disable
+ * 1'b1: Search Enable
+ *
+ * cmd_num
+ * This number can be used to match against status.
+ *
+ * data_length
+ * MSDU length in case of direct descriptor. Length of link
+ * extension descriptor in case of Link extension descriptor.
+ *
+ * *_checksum_en
+ * Enable checksum replacement for ipv4, udp_over_ipv4, ipv6,
+ * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6.
+ *
+ * to_fw
+ * Forward packet to FW along with classification result. The
+ * packet will not be forward to TQM when this bit is set.
+ * 1'b0: Use classification result to forward the packet.
+ * 1'b1: Override classification result & forward packet only to fw
+ *
+ * packet_offset
+ * Packet offset from Metadata in case of direct buffer descriptor.
+ *
+ * buffer_timestamp
+ * buffer_timestamp_valid
+ * Frame system entrance timestamp. It shall be filled by first
+ * module (SW, TCL or TQM) that sees the frames first.
+ *
+ * mesh_enable
+ * For raw WiFi frames, this indicates transmission to a mesh STA,
+ * enabling the interpretation of the 'Mesh Control Present' bit
+ * (bit 8) of QoS Control.
+ * For native WiFi frames, this indicates that a 'Mesh Control'
+ * field is present between the header and the LLC.
+ *
+ * hlos_tid_overwrite
+ *
+ * When set, TCL shall ignore the IP DSCP and VLAN PCP
+ * fields and use HLOS_TID as the final TID. Otherwise TCL
+ * shall consider the DSCP and PCP fields as well as HLOS_TID
+ * and choose a final TID based on the configured priority
+ *
+ * hlos_tid
+ * HLOS MSDU priority
+ * Field is used when HLOS_TID_overwrite is set.
+ *
+ * lmac_id
+ * TCL uses this LMAC_ID in address search, i.e, while
+ * finding matching entry for the packet in AST corresponding
+ * to given LMAC_ID
+ *
+ * If LMAC ID is all 1s (=> value 3), it indicates wildcard
+ * match for any MAC
+ *
+ * dscp_tid_table_num
+ * DSCP to TID mapping table number that need to be used
+ * for the MSDU.
+ *
+ * search_index
+ * The index that will be used for index based address or
+ * flow search. The field is valid when 'search_type' is 1 or 2.
+ *
+ * cache_set_num
+ *
+ * Cache set number that should be used to cache the index
+ * based search results, for address and flow search. This
+ * value should be equal to LSB four bits of the hash value of
+ * match data, in case of search index points to an entry which
+ * may be used in content based search also. The value can be
+ * anything when the entry pointed by search index will not be
+ * used for content based search.
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ *
+ * looping_count
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd)
+
+enum hal_tcl_gse_ctrl {
+ HAL_TCL_GSE_CTRL_RD_STAT,
+ HAL_TCL_GSE_CTRL_SRCH_DIS,
+ HAL_TCL_GSE_CTRL_WR_BK_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_ALL,
+ HAL_TCL_GSE_CTRL_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_CLR_STAT_SINGLE,
+};
+
+/* hal_tcl_gse_ctrl
+ *
+ * rd_stat
+ * Report or Read statistics
+ * srch_dis
+ * Search disable. Report only Hash.
+ * wr_bk_single
+ * Write Back single entry
+ * wr_bk_all
+ * Write Back entire cache entry
+ * inval_single
+ * Invalidate single cache entry
+ * inval_all
+ * Invalidate entire cache
+ * wr_bk_inval_single
+ * Write back and invalidate single entry in cache
+ * wr_bk_inval_all
+ * Write back and invalidate entire cache
+ * clr_stat_single
+ * Clear statistics for single entry
+ */
+
+#define HAL_TCL_GSE_CMD_INFO0_CTRL_BUF_ADDR_HI GENMASK(7, 0)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_CTRL GENMASK(11, 8)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_SEL BIT(12)
+#define HAL_TCL_GSE_CMD_INFO0_STATUS_DEST_RING_ID BIT(13)
+#define HAL_TCL_GSE_CMD_INFO0_SWAP BIT(14)
+
+#define HAL_TCL_GSE_CMD_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_TCL_GSE_CMD_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_gse_cmd {
+ u32 ctrl_buf_addr_lo;
+ u32 info0;
+ u32 meta_data[2];
+ u32 rsvd0[2];
+ u32 info1;
+} __packed;
+
+/* hal_tcl_gse_cmd
+ *
+ * ctrl_buf_addr_lo, ctrl_buf_addr_hi
+ * Address of a control buffer containing additional info needed
+ * for this command execution.
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * status_destination_ring_id
+ * TCL status ring to which the GSE status needs to be send.
+ *
+ * swap
+ * Bit to enable byte swapping of contents of buffer.
+ *
+ * meta_data
+ * Meta data to be returned in the status descriptor
+ */
+
+enum hal_tcl_cache_op_res {
+ HAL_TCL_CACHE_OP_RES_DONE,
+ HAL_TCL_CACHE_OP_RES_NOT_FOUND,
+ HAL_TCL_CACHE_OP_RES_TIMEOUT,
+};
+
+#define HAL_TCL_STATUS_RING_INFO0_GSE_CTRL GENMASK(3, 0)
+#define HAL_TCL_STATUS_RING_INFO0_GSE_SEL BIT(4)
+#define HAL_TCL_STATUS_RING_INFO0_CACHE_OP_RES GENMASK(6, 5)
+#define HAL_TCL_STATUS_RING_INFO0_MSDU_CNT GENMASK(31, 8)
+
+#define HAL_TCL_STATUS_RING_INFO1_HASH_IDX GENMASK(19, 0)
+
+#define HAL_TCL_STATUS_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_TCL_STATUS_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_status_ring {
+ u32 info0;
+ u32 msdu_byte_count;
+ u32 msdu_timestamp;
+ u32 meta_data[2];
+ u32 info1;
+ u32 rsvd0;
+ u32 info2;
+} __packed;
+
+/* hal_tcl_status_ring
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * cache_op_res
+ * Cache operation result. Values are defined in enum
+ * %HAL_TCL_CACHE_OP_RES_.
+ *
+ * msdu_cnt
+ * msdu_byte_count
+ * MSDU count of Entry and MSDU byte count for entry 1.
+ *
+ * hash_indx
+ * Hash value of the entry in case of search failed or disabled.
+ */
+
+#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN BIT(8)
+#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP BIT(9)
+#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP BIT(10)
+#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER BIT(11)
+#define HAL_CE_SRC_DESC_ADDR_INFO_LEN GENMASK(31, 16)
+
+#define HAL_CE_SRC_DESC_META_INFO_DATA GENMASK(15, 0)
+
+#define HAL_CE_SRC_DESC_FLAGS_RING_ID GENMASK(27, 20)
+#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_src_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */
+ u32 meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */
+ u32 flags; /* %HAL_CE_SRC_DESC_FLAGS_ */
+} __packed;
+
+/*
+ * hal_ce_srng_src_desc
+ *
+ * buffer_addr_lo
+ * LSB 32 bits of the 40 Bit Pointer to the source buffer
+ *
+ * buffer_addr_hi
+ * MSB 8 bits of the 40 Bit Pointer to the source buffer
+ *
+ * toeplitz_en
+ * Enable generation of 32-bit Toeplitz-LFSR hash for
+ * data transfer. In case of gather field in first source
+ * ring entry of the gather copy cycle in taken into account.
+ *
+ * src_swap
+ * Treats source memory organization as big-endian. For
+ * each dword read (4 bytes), the byte 0 is swapped with byte 3
+ * and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * dest_swap
+ * Treats destination memory organization as big-endian.
+ * For each dword write (4 bytes), the byte 0 is swapped with
+ * byte 3 and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * gather
+ * Enables gather of multiple copy engine source
+ * descriptors to one destination.
+ *
+ * ce_res_0
+ * Reserved
+ *
+ *
+ * length
+ * Length of the buffer in units of octets of the current
+ * descriptor
+ *
+ * fw_metadata
+ * Meta data used by FW.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_1
+ * Reserved
+ *
+ * ce_res_2
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dest_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dest_desc
+ *
+ * dst_buffer_low
+ * LSB 32 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * dst_buffer_high
+ * MSB 8 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * ce_res_4
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN BIT(8)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP BIT(9)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP BIT(10)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16)
+
+#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(7, 0)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dst_status_desc {
+ u32 flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */
+ u32 toeplitz_hash0;
+ u32 toeplitz_hash1;
+ u32 meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dst_status_desc
+ *
+ * ce_res_5
+ * Reserved
+ *
+ * toeplitz_en
+ *
+ * src_swap
+ * Source memory buffer swapped
+ *
+ * dest_swap
+ * Destination memory buffer swapped
+ *
+ * gather
+ * Gather of multiple copy engine source descriptors to one
+ * destination enabled
+ *
+ * ce_res_6
+ * Reserved
+ *
+ * length
+ * Sum of all the Lengths of the source descriptor in the
+ * gather chain
+ *
+ * toeplitz_hash_0
+ * 32 LS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * toeplitz_hash_1
+ * 32 MS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * fw_metadata
+ * Meta data used by FW
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_7
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TX_RATE_STATS_INFO0_VALID BIT(0)
+#define HAL_TX_RATE_STATS_INFO0_BW GENMASK(2, 1)
+#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE GENMASK(6, 3)
+#define HAL_TX_RATE_STATS_INFO0_STBC BIT(7)
+#define HAL_TX_RATE_STATS_INFO0_LDPC BIT(8)
+#define HAL_TX_RATE_STATS_INFO0_SGI GENMASK(10, 9)
+#define HAL_TX_RATE_STATS_INFO0_MCS GENMASK(14, 11)
+#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(15)
+#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(27, 16)
+
+enum hal_tx_rate_stats_bw {
+ HAL_TX_RATE_STATS_BW_20,
+ HAL_TX_RATE_STATS_BW_40,
+ HAL_TX_RATE_STATS_BW_80,
+ HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+ HAL_TX_RATE_STATS_PKT_TYPE_11A,
+ HAL_TX_RATE_STATS_PKT_TYPE_11B,
+ HAL_TX_RATE_STATS_PKT_TYPE_11N,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+};
+
+enum hal_tx_rate_stats_sgi {
+ HAL_TX_RATE_STATS_SGI_08US,
+ HAL_TX_RATE_STATS_SGI_04US,
+ HAL_TX_RATE_STATS_SGI_16US,
+ HAL_TX_RATE_STATS_SGI_32US,
+};
+
+struct hal_tx_rate_stats {
+ u32 info0;
+ u32 tsf;
+} __packed;
+
+struct hal_wbm_link_desc {
+ struct ath11k_buffer_addr buf_addr_info;
+} __packed;
+
+/* hal_wbm_link_desc
+ *
+ * Producer: WBM
+ * Consumer: WBM
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+ HAL_WBM_REL_SRC_MODULE_TQM,
+ HAL_WBM_REL_SRC_MODULE_RXDMA,
+ HAL_WBM_REL_SRC_MODULE_REO,
+ HAL_WBM_REL_SRC_MODULE_FW,
+ HAL_WBM_REL_SRC_MODULE_SW,
+};
+
+enum hal_wbm_rel_desc_type {
+ HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+ HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ * The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ * The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ * The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ * The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ * The address points to an TQM queue extension descriptor. WBM should
+ * treat this is the same way as a link descriptor.
+ */
+
+enum hal_wbm_rel_bm_act {
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+ HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ * Put the buffer or descriptor back in the idle list. In case of MSDU or
+ * MDPU link descriptor, BM does not need to check to release any
+ * individual MSDU buffers.
+ *
+ * release_msdu_list
+ * This BM action can only be used in combination with desc_type being
+ * msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ * pointer in the MSDU link descriptor is the first of an MPDU that is
+ * released. BM shall release all the MSDU buffers linked to this first
+ * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ * set to value 0, which represents the 'NULL' pointer. When all MSDU
+ * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ * descriptor itself shall also be released.
+ */
+
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON GENMASK(16, 13)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+
+#define HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_RELEASE_INFO2_SW_REL_DETAILS_VALID BIT(8)
+#define HAL_WBM_RELEASE_INFO2_FIRST_MSDU BIT(9)
+#define HAL_WBM_RELEASE_INFO2_LAST_MSDU BIT(10)
+#define HAL_WBM_RELEASE_INFO2_MSDU_IN_AMSDU BIT(11)
+#define HAL_WBM_RELEASE_INFO2_FW_TX_NOTIF_FRAME BIT(12)
+#define HAL_WBM_RELEASE_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_RELEASE_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_RELEASE_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_RELEASE_INFO3_RING_ID GENMASK(27, 20)
+#define HAL_WBM_RELEASE_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_REINJ_REASON GENMASK(16, 13)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_EXP_FRAME BIT(17)
+
+struct hal_wbm_release_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ u32 info3;
+} __packed;
+
+/* hal_wbm_release_ring
+ *
+ * Producer: SW/TQM/RXDMA/REO/SWITCH
+ * Consumer: WBM/SW/FW
+ *
+ * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * for software based completions.
+ *
+ * buf_addr_info
+ * Details of the physical address of the buffer or link descriptor.
+ *
+ * release_source_module
+ * Indicates which module initiated the release of this buffer/descriptor.
+ * Values are defined in enum %HAL_WBM_REL_SRC_MODULE_.
+ *
+ * bm_action
+ * Field only valid when the field return_buffer_manager in
+ * Released_buff_or_desc_addr_info indicates:
+ * WBM_IDLE_BUF_LIST / WBM_IDLE_DESC_LIST
+ * Values are defined in enum %HAL_WBM_REL_BM_ACT_.
+ *
+ * buffer_or_desc_type
+ * Field only valid when WBM is marked as the return_buffer_manager in
+ * the Released_Buffer_address_info. Indicates that type of buffer or
+ * descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE.
+ *
+ * first_msdu_index
+ * Field only valid for the bm_action release_msdu_list. The index of the
+ * first MSDU in an MSDU link descriptor all belonging to the same MPDU.
+ *
+ * tqm_release_reason
+ * Field only valid when Release_source_module is set to release_source_TQM
+ * Release reasons are defined in enum %HAL_WBM_TQM_REL_REASON_.
+ *
+ * rxdma_push_reason
+ * reo_push_reason
+ * Indicates why rxdma/reo pushed the frame to this ring and values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Field only valid when 'rxdma_push_reason' set to 'error_detected'.
+ * Values are defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * reo_error_code
+ * Field only valid when 'reo_push_reason' set to 'error_detected'. Values
+ * are defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * wbm_internal_error
+ * Is set when WBM got a buffer pointer but the action was to push it to
+ * the idle link descriptor ring or do link related activity OR
+ * Is set when WBM got a link buffer pointer but the action was to push it
+ * to the buffer descriptor ring.
+ *
+ * tqm_status_number
+ * The value in this field is equal to tqm_cmd_number in TQM command. It is
+ * used to correlate the statu with TQM commands. Only valid when
+ * release_source_module is TQM.
+ *
+ * transmit_count
+ * The number of times the frame has been transmitted, valid only when
+ * release source in TQM.
+ *
+ * ack_frame_rssi
+ * This field is only valid when the source is TQM. If this frame is
+ * removed as the result of the reception of an ACK or BA, this field
+ * indicates the RSSI of the received ACK or BA frame.
+ *
+ * sw_release_details_valid
+ * This is set when WMB got a 'release_msdu_list' command from TQM and
+ * return buffer manager is not WMB. WBM will then de-aggregate all MSDUs
+ * and pass them one at a time on to the 'buffer owner'.
+ *
+ * first_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the first MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * last_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the last MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * msdu_part_of_amsdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU was part of an A-MSDU in MPDU
+ *
+ * fw_tx_notify_frame
+ * Field only valid when SW_release_details_valid is set.
+ *
+ * buffer_timestamp
+ * Field only valid when SW_release_details_valid is set.
+ * This is the Buffer_timestamp field from the
+ * Timestamp in units of 1024 us
+ *
+ * struct hal_tx_rate_stats rate_stats
+ * Details for command execution tracking purposes.
+ *
+ * sw_peer_id
+ * tid
+ * Field only valid when Release_source_module is set to
+ * release_source_TQM
+ *
+ * 1) Release of msdu buffer due to drop_frame = 1. Flow is
+ * not fetched and hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 2) Release of msdu buffer due to Flow is not fetched and
+ * hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 3) Release of msdu link due to remove_mpdu or acked_mpdu
+ * command.
+ *
+ * buffer_or_desc_type = e_num1
+ * msdu_link_descriptortqm_release_reason can be:e_num 1
+ * tqm_rr_rem_cmd_reme_num 2 tqm_rr_rem_cmd_tx
+ * e_num 3 tqm_rr_rem_cmd_notxe_num 4 tqm_rr_rem_cmd_aged
+ *
+ * This field represents the TID from the TX_MSDU_FLOW
+ * descriptor or TX_MPDU_QUEUE descriptor
+ *
+ * rind_id
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ * It help to identify the ring that is being looked
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Buffer Manager Ring has looped
+ * around the ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+/**
+ * enum hal_wbm_tqm_rel_reason - TQM release reason code
+ * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or
+ * mpdus.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by
+ * fw with fw_reason1.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by
+ * fw with fw_reason2.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
+ * fw with fw_reason3.
+ */
+enum hal_wbm_tqm_rel_reason {
+ HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+};
+
+struct hal_wbm_buffer_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+};
+
+enum hal_desc_owner {
+ HAL_DESC_OWNER_WBM,
+ HAL_DESC_OWNER_SW,
+ HAL_DESC_OWNER_TQM,
+ HAL_DESC_OWNER_RXDMA,
+ HAL_DESC_OWNER_REO,
+ HAL_DESC_OWNER_SWITCH,
+};
+
+enum hal_desc_buf_type {
+ HAL_DESC_BUF_TYPE_TX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_TX_FLOW,
+ HAL_DESC_BUF_TYPE_TX_BUFFER,
+ HAL_DESC_BUF_TYPE_RX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_RX_BUFFER,
+ HAL_DESC_BUF_TYPE_IDLE_LINK,
+};
+
+#define HAL_DESC_REO_OWNED 4
+#define HAL_DESC_REO_QUEUE_DESC 8
+#define HAL_DESC_REO_QUEUE_EXT_DESC 9
+#define HAL_DESC_REO_NON_QOS_TID 16
+
+#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0)
+#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4)
+#define HAL_DESC_HDR_INFO0_DBG_RESERVED GENMASK(31, 8)
+
+struct hal_desc_header {
+ u32 info0;
+} __packed;
+
+struct hal_rx_mpdu_link_ptr {
+ struct ath11k_buffer_addr addr_info;
+} __packed;
+
+struct hal_rx_msdu_details {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_msdu_desc rx_msdu_info;
+} __packed;
+
+#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK BIT(16)
+
+struct hal_rx_msdu_link {
+ struct hal_desc_header desc_hdr;
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 pn[4];
+ struct hal_rx_msdu_details msdu_link[6];
+} __packed;
+
+struct hal_rx_reo_queue_ext {
+ struct hal_desc_header desc_hdr;
+ u32 rsvd;
+ struct hal_rx_mpdu_link_ptr mpdu_link[15];
+} __packed;
+
+/* hal_rx_reo_queue_ext
+ * Consumer: REO
+ * Producer: REO
+ *
+ * descriptor_header
+ * Details about which module owns this struct.
+ *
+ * mpdu_link
+ * Pointer to the next MPDU_link descriptor in the MPDU queue.
+ */
+
+enum hal_rx_reo_queue_pn_size {
+ HAL_RX_REO_QUEUE_PN_SIZE_24,
+ HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_PN_SIZE_128,
+};
+
+#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER GENMASK(15, 0)
+
+#define HAL_RX_REO_QUEUE_INFO0_VLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER GENMASK(2, 1)
+#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION BIT(3)
+#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN BIT(4)
+#define HAL_RX_REO_QUEUE_INFO0_AC GENMASK(6, 5)
+#define HAL_RX_REO_QUEUE_INFO0_BAR BIT(7)
+#define HAL_RX_REO_QUEUE_INFO0_RETRY BIT(8)
+#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE BIT(9)
+#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE BIT(10)
+#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE GENMASK(18, 11)
+#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK BIT(19)
+#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN BIT(20)
+#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN BIT(21)
+#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE GENMASK(24, 23)
+#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG BIT(25)
+
+#define HAL_RX_REO_QUEUE_INFO1_SVLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO1_SSN GENMASK(12, 1)
+#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX GENMASK(20, 13)
+#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR BIT(21)
+#define HAL_RX_REO_QUEUE_INFO1_PN_ERR BIT(22)
+#define HAL_RX_REO_QUEUE_INFO1_PN_VALID BIT(31)
+
+#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT GENMASK(6, 0)
+#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT (31, 7)
+
+#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10)
+#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 10)
+
+#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0)
+#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24)
+
+#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT GENMASK(11, 0)
+#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K GENMASK(15, 12)
+#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT GENMASK(31, 16)
+
+struct hal_rx_reo_queue {
+ struct hal_desc_header desc_hdr;
+ u32 rx_queue_num;
+ u32 info0;
+ u32 info1;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 next_aging_queue[2];
+ u32 prev_aging_queue[2];
+ u32 rx_bitmap[8];
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 processed_mpdus;
+ u32 processed_msdus;
+ u32 processed_total_bytes;
+ u32 info5;
+ u32 rsvd[3];
+ struct hal_rx_reo_queue_ext ext_desc[0];
+} __packed;
+
+/* hal_rx_reo_queue
+ *
+ * descriptor_header
+ * Details about which module owns this struct. Note that sub field
+ * Buffer_type shall be set to receive_reo_queue_descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link descriptor belongs.
+ *
+ * vld
+ * Valid bit indicating a session is established and the queue descriptor
+ * is valid.
+ * associated_link_descriptor_counter
+ * Indicates which of the 3 link descriptor counters shall be incremented
+ * or decremented when link descriptors are added or removed from this
+ * flow queue.
+ * disable_duplicate_detection
+ * When set, do not perform any duplicate detection.
+ * soft_reorder_enable
+ * When set, REO has been instructed to not perform the actual re-ordering
+ * of frames for this queue, but just to insert the reorder opcodes.
+ * ac
+ * Indicates the access category of the queue descriptor.
+ * bar
+ * Indicates if BAR has been received.
+ * retry
+ * Retry bit is checked if this bit is set.
+ * chk_2k_mode
+ * Indicates what type of operation is expected from Reo when the received
+ * frame SN falls within the 2K window.
+ * oor_mode
+ * Indicates what type of operation is expected when the received frame
+ * falls within the OOR window.
+ * ba_window_size
+ * Indicates the negotiated (window size + 1). Max of 256 bits.
+ *
+ * A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA
+ * session, with window size of 0). The 3 values here are the main values
+ * validated, but other values should work as well.
+ *
+ * A BA window size of 0 (=> one frame entry bitmat), means that there is
+ * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory.
+ * A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext.
+ * A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext.
+ * A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext.
+ * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable,
+ * pn_size
+ * REO shall perform the PN increment check, even number check, uneven
+ * number check, PN error check and size of the PN field check.
+ * ignore_ampdu_flag
+ * REO shall ignore the ampdu_flag on entrance descriptor for this queue.
+ *
+ * svld
+ * Sequence number in next field is valid one.
+ * ssn
+ * Starting Sequence number of the session.
+ * current_index
+ * Points to last forwarded packet
+ * seq_2k_error_detected_flag
+ * REO has detected a 2k error jump in the sequence number and from that
+ * moment forward, all new frames are forwarded directly to FW, without
+ * duplicate detect, reordering, etc.
+ * pn_error_detected_flag
+ * REO has detected a PN error.
+ */
+
+#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD BIT(9)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC BIT(13)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR BIT(14)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY BIT(15)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE BIT(17)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN BIT(21)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN BIT(30)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER GENMASK(18, 17)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_AC GENMASK(22, 21)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+
+struct hal_reo_update_rx_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 pn[4];
+} __packed;
+
+#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
+
+struct hal_reo_unblock_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 info0;
+ u32 rsvd[7];
+} __packed;
+
+enum hal_reo_exec_status {
+ HAL_REO_EXEC_STATUS_SUCCESS,
+ HAL_REO_EXEC_STATUS_BLOCKED,
+ HAL_REO_EXEC_STATUS_FAILED,
+ HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED,
+};
+
+#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM GENMASK(15, 0)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26)
+
+struct hal_reo_status_hdr {
+ u32 info0;
+ u32 timestamp;
+} __packed;
+
+/* hal_reo_status_hdr
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_num
+ * The value in this field is equal to value of the reo command
+ * number. This field helps to correlate the statuses with the REO
+ * commands.
+ *
+ * execution_time (in us)
+ * The amount of time REO took to excecute the command. Note that
+ * this time does not include the duration of the command waiting
+ * in the command ring, before the execution started.
+ *
+ * execution_status
+ * Execution status of the command. Values are defined in
+ * enum %HAL_REO_EXEC_STATUS_.
+ */
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX GENMASK(19, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT GENMASK(6, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT GENMASK(31, 7)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT GENMASK(15, 10)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT GENMASK(23, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT GENMASK(31, 24)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K GENMASK(15, 12)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT GENMASK(31, 28)
+
+struct hal_reo_get_queue_stats_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 rx_bitmap[8];
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 num_mpdu_frames;
+ u32 num_msdu_frames;
+ u32 total_bytes;
+ u32 info4;
+ u32 info5;
+} __packed;
+
+/* hal_reo_get_queue_stats_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * ssn
+ * Starting Sequence number of the session, this changes whenever
+ * window moves (can be filled by SW then maintained by REO).
+ *
+ * current_index
+ * Points to last forwarded packet.
+ *
+ * pn
+ * Bits of the PN number.
+ *
+ * last_rx_enqueue_timestamp
+ * last_rx_dequeue_timestamp
+ * Timestamp of arrival of the last MPDU for this queue and
+ * Timestamp of forwarding an MPDU accordingly.
+ *
+ * rx_bitmap
+ * When a bit is set, the corresponding frame is currently held
+ * in the re-order queue. The bitmap is Fully managed by HW.
+ *
+ * current_mpdu_count
+ * current_msdu_count
+ * The number of MPDUs and MSDUs in the queue.
+ *
+ * timeout_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Forwarding reason is timeout.
+ *
+ * forward_due_to_bar_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Fwd reason is reception of BAR.
+ *
+ * duplicate_count
+ * The number of duplicate frames that have been detected.
+ *
+ * frames_in_order_count
+ * The number of frames that have been received in order (without
+ * a hole that prevented them from being forwarded immediately).
+ *
+ * bar_received_count
+ * The number of times a BAR frame is received.
+ *
+ * mpdu_frames_processed_count
+ * msdu_frames_processed_count
+ * The total number of MPDU/MSDU frames that have been processed.
+ *
+ * total_bytes
+ * An approximation of the number of bytes received for this queue.
+ *
+ * late_receive_mpdu_count
+ * The number of MPDUs received after the window had already moved
+ * on. The 'late' sequence window is defined as
+ * (Window SSN - 256) - (Window SSN - 1).
+ *
+ * window_jump_2k
+ * The number of times the window moved more than 2K
+ *
+ * hole_count
+ * The number of times a hole was created in the receive bitmap.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD GENMASK(31, 1)
+#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD GENMASK(27, 0)
+
+struct hal_reo_flush_queue_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_queue_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status of blocking resource
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - Error detected. The resource to be used for blocking was
+ * already in use.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE GENMASK(2, 1)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT BIT(8)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE GENMASK(11, 9)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID GENMASK(15, 12)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR GENMASK(17, 16)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT GENMASK(25, 18)
+
+struct hal_reo_flush_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status for blocking resource handling
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - An error in the blocking resource management was detected
+ *
+ * block_error_details
+ * only valid when error_detected is set
+ *
+ * 0 - No blocking related errors found
+ * 1 - Blocking resource is already in use
+ * 2 - Resource requested to be unblocked, was not blocked
+ *
+ * cache_controller_flush_status_hit
+ * The status that the cache controller returned on executing the
+ * flush command.
+ *
+ * 0 - miss; 1 - hit
+ *
+ * cache_controller_flush_status_desc_type
+ * Flush descriptor type
+ *
+ * cache_controller_flush_status_client_id
+ * Module who made the flush request
+ *
+ * In REO, this is always 0
+ *
+ * cache_controller_flush_status_error
+ * Error condition
+ *
+ * 0 - No error found
+ * 1 - HW interface is still busy
+ * 2 - Line currently locked. Used for one line flush command
+ * 3 - At least one line is still locked.
+ * Used for cache flush command.
+ *
+ * cache_controller_flush_count
+ * The number of lines that were actually flushed out
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE BIT(1)
+
+struct hal_reo_unblock_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_unblock_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - The blocking resource was not in use, and therefore it could
+ * not be unblocked.
+ *
+ * unblock_type
+ * Reference to the type of unblock command
+ * 0 - Unblock a blocking resource
+ * 1 - The entire cache usage is unblock
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY BIT(1)
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT GENMASK(15, 0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT GENMASK(31, 16)
+
+struct hal_reo_flush_timeout_list_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 rsvd0[20];
+ u32 info2;
+} __packed;
+
+/* hal_reo_flush_timeout_list_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - Command not properly executed and returned with error
+ *
+ * timeout_list_empty
+ * When set, REO has depleted the timeout list and all entries are
+ * gone.
+ *
+ * release_desc_count
+ * Producer: SW; Consumer: REO
+ * The number of link descriptor released
+ *
+ * forward_buf_count
+ * Producer: SW; Consumer: REO
+ * The number of buffers forwarded to the REO destination rings
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX GENMASK(1, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(23, 0)
+
+struct hal_reo_desc_thresh_reached_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 rsvd0[17];
+ u32 info5;
+} __packed;
+
+/* hal_reo_desc_thresh_reached_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * threshold_index
+ * The index of the threshold register whose value got reached
+ *
+ * link_descriptor_counter0
+ * link_descriptor_counter1
+ * link_descriptor_counter2
+ * link_descriptor_counter_sum
+ * Value of the respective counters at generation of this message
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#endif /* ATH11K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
new file mode 100644
index 000000000000..9e0f8064e427
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -0,0 +1,1190 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "ahb.h"
+#include "debug.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "hal_desc.h"
+
+static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
+ u8 owner, u8 buffer_type, u32 magic)
+{
+ hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
+ FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
+
+ /* Magic pattern in reserved bits for debugging */
+ hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
+}
+
+static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_get_queue_stats *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ memset(&desc->queue_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi);
+ if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+ desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_flush_cache *desc;
+ u8 avail_slot = ffz(hal->avail_blk_resource);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+ return -ENOSPC;
+
+ hal->current_blk_index = avail_slot;
+ }
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_flush_cache *)tlv->value;
+ memset(&desc->cache_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->cache_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
+ cmd->addr_hi);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
+ desc->info0 |=
+ FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
+ avail_slot);
+ }
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_update_rx_queue *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_update_rx_queue *)tlv->value;
+ memset(&desc->queue_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
+
+ desc->info1 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
+ cmd->rx_queue_num) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
+ FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
+ FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
+
+ if (cmd->pn_size == 24)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+ else if (cmd->pn_size == 48)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+ else if (cmd->pn_size == 128)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+ if (cmd->ba_window_size < 1)
+ cmd->ba_window_size = 1;
+
+ if (cmd->ba_window_size == 1)
+ cmd->ba_window_size++;
+
+ desc->info2 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
+ cmd->ba_window_size - 1) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
+ FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_tlv_hdr *reo_desc;
+ int ret;
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ reo_desc = (struct hal_tlv_hdr *)ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_desc) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ switch (type) {
+ case HAL_REO_CMD_GET_QUEUE_STATS:
+ ret = ath11k_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_CACHE:
+ ret = ath11k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_UPDATE_RX_QUEUE:
+ ret = ath11k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_QUEUE:
+ case HAL_REO_CMD_UNBLOCK_CACHE:
+ case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+ ath11k_warn(ab, "Unsupported reo command %d\n", type);
+ ret = -ENOTSUPP;
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo command %d\n", type);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager)
+{
+ struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
+ u32 paddr_lo, paddr_hi;
+
+ paddr_lo = lower_32_bits(paddr);
+ paddr_hi = upper_32_bits(paddr);
+ binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
+ binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
+}
+
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm)
+{
+ struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
+
+ *paddr =
+ (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
+ *cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
+}
+
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm)
+{
+ struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
+ struct hal_rx_msdu_details *msdu;
+ int i;
+
+ *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ msdu = &link->msdu_link[0];
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu->buf_addr_info.info1);
+
+ for (i = 0; i < *num_msdus; i++) {
+ msdu = &link->msdu_link[i];
+
+ if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu->buf_addr_info.info0)) {
+ *num_msdus = i;
+ break;
+ }
+ *msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu->buf_addr_info.info1);
+ msdu_cookies++;
+ }
+}
+
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ enum hal_reo_dest_ring_error_code err_code;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
+ desc->info0);
+ ab->soc_stats.reo_error[err_code]++;
+
+ if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
+ push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ ath11k_warn(ab, "expected error push reason code, received %d\n",
+ push_reason);
+ return -EINVAL;
+ }
+
+ if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
+ ath11k_warn(ab, "expected buffer type link_desc");
+ return -EINVAL;
+ }
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, rx_desc, paddr, desc_bank);
+
+ return 0;
+}
+
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info)
+{
+ struct hal_wbm_release_ring *wbm_desc = desc;
+ enum hal_wbm_rel_desc_type type;
+ enum hal_wbm_rel_src_module rel_src;
+
+ type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ wbm_desc->info0);
+ /* We expect only WBM_REL buffer type */
+ if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ wbm_desc->info0);
+ if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
+ rel_src != HAL_WBM_REL_SRC_MODULE_REO)
+ return -EINVAL;
+
+ if (FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ wbm_desc->buf_addr_info.info1) != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ wbm_desc->buf_addr_info.info1);
+ rel_info->err_rel_src = rel_src;
+ if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE,
+ wbm_desc->info0);
+ } else {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE,
+ wbm_desc->info0);
+ }
+
+ rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
+ wbm_desc->info2);
+ rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
+ wbm_desc->info2);
+ return 0;
+}
+
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct ath11k_buffer_addr *buff_addr = desc;
+
+ *paddr = ((u64)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR, buff_addr->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
+
+ *desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
+}
+
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct hal_wbm_release_ring *dst_desc = desc;
+ struct hal_wbm_release_ring *src_desc = link_desc;
+
+ dst_desc->buf_addr_info = src_desc->buf_addr_info;
+ dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ HAL_WBM_REL_SRC_MODULE_SW) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
+}
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_get_queue_stats_status *desc =
+ (struct hal_reo_get_queue_stats_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "Queue stats status:\n");
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "header: cmd_num %d status %d\n",
+ status->uniform_hdr.cmd_num,
+ status->uniform_hdr.cmd_status);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "ssn %ld cur_idx %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
+ desc->info0),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
+ desc->info0));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
+ desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ desc->last_rx_enqueue_timestamp,
+ desc->last_rx_dequeue_timestamp);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
+ desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
+ desc->rx_bitmap[6], desc->rx_bitmap[7]);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
+ desc->info1),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
+ desc->info1));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
+ desc->info2));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
+ desc->info3),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
+ desc->info3));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
+ desc->num_mpdu_frames, desc->num_msdu_frames,
+ desc->total_bytes);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
+ desc->info4));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "looping count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
+ desc->info5));
+}
+
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *hdr;
+
+ hdr = (struct hal_reo_status_hdr *)tlv->value;
+ *status = FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, hdr->info0);
+
+ return FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, hdr->info0);
+}
+
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_queue_status *desc =
+ (struct hal_reo_flush_queue_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+ status->u.flush_queue.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED,
+ desc->info0);
+}
+
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_cache_status *desc =
+ (struct hal_reo_flush_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.flush_cache.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.flush_cache.err_code =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE,
+ desc->info0);
+ if (!status->u.flush_cache.err_code)
+ hal->avail_blk_resource |= BIT(hal->current_blk_index);
+
+ status->u.flush_cache.cache_controller_flush_status_hit =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT,
+ desc->info0);
+
+ status->u.flush_cache.cache_controller_flush_status_desc_type =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_client_id =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_err =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_cnt =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
+ desc->info0);
+}
+
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_unblock_cache_status *desc =
+ (struct hal_reo_unblock_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.unblock_cache.err_detected =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.unblock_cache.unblock_type =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE,
+ desc->info0);
+
+ if (!status->u.unblock_cache.err_detected &&
+ status->u.unblock_cache.unblock_type ==
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
+ hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
+}
+
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_timeout_list_status *desc =
+ (struct hal_reo_flush_timeout_list_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.timeout_list.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.timeout_list.list_empty =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY,
+ desc->info0);
+
+ status->u.timeout_list.release_desc_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT,
+ desc->info1);
+ status->u.timeout_list.fwd_buf_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT,
+ desc->info1);
+}
+
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_desc_thresh_reached_status *desc =
+ (struct hal_reo_desc_thresh_reached_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.desc_thresh_reached.threshold_idx =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX,
+ desc->info0);
+
+ status->u.desc_thresh_reached.link_desc_counter0 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0,
+ desc->info1);
+
+ status->u.desc_thresh_reached.link_desc_counter1 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1,
+ desc->info2);
+
+ status->u.desc_thresh_reached.link_desc_counter2 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2,
+ desc->info3);
+
+ status->u.desc_thresh_reached.link_desc_counter_sum =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
+ desc->info4);
+}
+
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *desc =
+ (struct hal_reo_status_hdr *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->info0);
+}
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
+{
+ u32 num_ext_desc;
+
+ if (ba_window_size <= 1) {
+ if (tid != HAL_DESC_REO_NON_QOS_TID)
+ num_ext_desc = 1;
+ else
+ num_ext_desc = 0;
+ } else if (ba_window_size <= 105) {
+ num_ext_desc = 1;
+ } else if (ba_window_size <= 210) {
+ num_ext_desc = 2;
+ } else {
+ num_ext_desc = 3;
+ }
+
+ return sizeof(struct hal_rx_reo_queue) +
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seq)
+{
+ struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
+ struct hal_rx_reo_queue_ext *ext_desc;
+
+ memset(qdesc, 0, sizeof(*qdesc));
+
+ ath11k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+ qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
+
+ qdesc->info0 =
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, ath11k_tid_to_ac(tid));
+
+ if (ba_window_size < 1)
+ ba_window_size = 1;
+
+ if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+ ba_window_size++;
+
+ if (ba_window_size == 1)
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
+
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
+ ba_window_size - 1);
+
+ /* TODO: Set Ignore ampdu flags based on BA window size and/or
+ * AMPDU capabilities
+ */
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
+
+ qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
+
+ if (start_seq <= 0xfff)
+ qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
+ start_seq);
+
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ return;
+
+ ext_desc = qdesc->ext_desc;
+
+ /* TODO: HW queue descriptors are currently allocated for max BA
+ * window size for all QOS TIDs so that same descriptor can be used
+ * later when ADDBA request is recevied. This should be changed to
+ * allocate HW queue descriptors based on BA window size being
+ * negotiated (0 for non BA cases), and reallocate when BA window
+ * size changes and also send WMI message to FW to change the REO
+ * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+ */
+ memset(ext_desc, 0, 3 * sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ext_desc++;
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ext_desc++;
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ struct hal_reo_get_queue_stats *desc;
+ int i, cmd_num = 1;
+ int entry_size;
+ u8 *entry;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(HAL_REO_CMD);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ entry = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)entry;
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc->cmd.info0 =
+ FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, cmd_num++);
+ entry += entry_size;
+ }
+}
+
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+}
+
+static enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ u32 tlv_tag, u8 *tlv_data)
+{
+ u32 info0, info1;
+
+ switch (tlv_tag) {
+ case HAL_RX_PPDU_START: {
+ struct hal_rx_ppdu_start *ppdu_start =
+ (struct hal_rx_ppdu_start *)tlv_data;
+
+ ppdu_info->ppdu_id =
+ FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+ ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
+ ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS: {
+ struct hal_rx_ppdu_end_user_stats *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+
+ info0 = __le32_to_cpu(eu_stats->info0);
+ info1 = __le32_to_cpu(eu_stats->info1);
+
+ ppdu_info->tid =
+ ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
+ __le32_to_cpu(eu_stats->info6))) - 1;
+ ppdu_info->tcp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->udp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->other_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->tcp_ack_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->preamble_type =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE, info1);
+ ppdu_info->num_mpdu_fcs_ok =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK,
+ info1);
+ ppdu_info->num_mpdu_fcs_err =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
+ info0);
+ break;
+ }
+ case HAL_PHYRX_HT_SIG: {
+ struct hal_rx_ht_sig_info *ht_sig =
+ (struct hal_rx_ht_sig_info *)tlv_data;
+
+ info0 = __le32_to_cpu(ht_sig->info0);
+ info1 = __le32_to_cpu(ht_sig->info1);
+
+ ppdu_info->mcs = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_MCS, info0);
+ ppdu_info->bw = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_BW, info0);
+ ppdu_info->is_stbc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_STBC,
+ info1);
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING, info1);
+ ppdu_info->gi = info1 & HAL_RX_HT_SIG_INFO_INFO1_GI;
+
+ switch (ppdu_info->mcs) {
+ case 0 ... 7:
+ ppdu_info->nss = 1;
+ break;
+ case 8 ... 15:
+ ppdu_info->nss = 2;
+ break;
+ case 16 ... 23:
+ ppdu_info->nss = 3;
+ break;
+ case 24 ... 31:
+ ppdu_info->nss = 4;
+ break;
+ }
+
+ if (ppdu_info->nss > 1)
+ ppdu_info->mcs = ppdu_info->mcs % 8;
+
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_B: {
+ struct hal_rx_lsig_b_info *lsigb =
+ (struct hal_rx_lsig_b_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_B_INFO_INFO0_RATE,
+ __le32_to_cpu(lsigb->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_A: {
+ struct hal_rx_lsig_a_info *lsiga =
+ (struct hal_rx_lsig_a_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_A_INFO_INFO0_RATE,
+ __le32_to_cpu(lsiga->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_VHT_SIG_A: {
+ struct hal_rx_vht_sig_a_info *vht_sig =
+ (struct hal_rx_vht_sig_a_info *)tlv_data;
+ u32 nsts;
+ u32 group_id;
+ u8 gi_setting;
+
+ info0 = __le32_to_cpu(vht_sig->info0);
+ info1 = __le32_to_cpu(vht_sig->info1);
+
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING,
+ info0);
+ ppdu_info->mcs = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_MCS,
+ info1);
+ gi_setting = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING,
+ info1);
+ switch (gi_setting) {
+ case HAL_RX_VHT_SIG_A_NORMAL_GI:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case HAL_RX_VHT_SIG_A_SHORT_GI:
+ case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
+ ppdu_info->gi = HAL_RX_GI_0_4_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = info0 & HAL_RX_VHT_SIG_A_INFO_INFO0_STBC;
+ nsts = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS, info0);
+ if (ppdu_info->is_stbc && nsts > 0)
+ nsts = ((nsts + 1) >> 1) - 1;
+
+ ppdu_info->nss = (nsts & VHT_SIG_SU_NSS_MASK) + 1;
+ ppdu_info->bw = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_BW,
+ info0);
+ ppdu_info->beamformed = info1 &
+ HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED;
+ group_id = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID,
+ info0);
+ if (group_id == 0 || group_id == 63)
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ else
+ ppdu_info->reception_type =
+ HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_SU: {
+ struct hal_rx_he_sig_a_su_info *he_sig_a =
+ (struct hal_rx_he_sig_a_su_info *)tlv_data;
+ u32 nsts, cp_ltf, dcm;
+
+ info0 = __le32_to_cpu(he_sig_a->info0);
+ info1 = __le32_to_cpu(he_sig_a->info1);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
+ info0);
+ ppdu_info->bw =
+ FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
+ info0);
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
+ ppdu_info->beamformed = info1 &
+ HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
+ dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
+ cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
+ info0);
+ nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+
+ switch (cp_ltf) {
+ case 0:
+ case 1:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case 2:
+ ppdu_info->gi = HAL_RX_GI_1_6_US;
+ break;
+ case 3:
+ if (dcm && ppdu_info->is_stbc)
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ else
+ ppdu_info->gi = HAL_RX_GI_3_2_US;
+ break;
+ }
+
+ ppdu_info->nss = nsts + 1;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_MU_DL: {
+ struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
+ (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
+
+ u32 cp_ltf;
+
+ info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
+ info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
+
+ ppdu_info->bw =
+ FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
+ info0);
+ cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
+ info0);
+
+ switch (cp_ltf) {
+ case 0:
+ case 1:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case 2:
+ ppdu_info->gi = HAL_RX_GI_1_6_US;
+ break;
+ case 3:
+ ppdu_info->gi = HAL_RX_GI_3_2_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B1_MU: {
+ /* TODO: Check if resource unit(RU) allocation stats
+ * are required
+ */
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_MU: {
+ struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
+ (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
+ info0);
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
+ info0) + 1;
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
+ info0);
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_OFDMA: {
+ struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
+ (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
+ info0);
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
+ info0) + 1;
+ ppdu_info->beamformed =
+ info0 &
+ HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
+ info0);
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ }
+ case HAL_PHYRX_RSSI_LEGACY: {
+ struct hal_rx_phyrx_rssi_legacy_info *rssi =
+ (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+
+ /* TODO: Please note that the combined rssi will not be accurate
+ * in MU case. Rssi in MU needs to be retrieved from
+ * PHYRX_OTHER_RECEIVE_INFO TLV.
+ */
+ ppdu_info->rssi_comb =
+ FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB,
+ __le32_to_cpu(rssi->info0));
+ break;
+ }
+ case HAL_RX_MPDU_START: {
+ struct hal_rx_mpdu_info *mpdu_info =
+ (struct hal_rx_mpdu_info *)tlv_data;
+ u16 peer_id;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->info0));
+ if (peer_id)
+ ppdu_info->peer_id = peer_id;
+ break;
+ }
+ case HAL_RXPCU_PPDU_END_INFO: {
+ struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
+ (struct hal_rx_ppdu_end_duration *)tlv_data;
+ ppdu_info->rx_duration =
+ FIELD_GET(HAL_RX_PPDU_END_DURATION,
+ __le32_to_cpu(ppdu_rx_duration->info0));
+ break;
+ }
+ case HAL_DUMMY:
+ return HAL_RX_MON_STATUS_BUF_DONE;
+ case HAL_RX_PPDU_END_STATUS_DONE:
+ case 0:
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+ default:
+ break;
+ }
+
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb)
+{
+ struct hal_tlv_hdr *tlv;
+ enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+ u16 tlv_tag;
+ u16 tlv_len;
+ u8 *ptr = skb->data;
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ ptr += sizeof(*tlv);
+
+ /* The actual length of PPDU_END is the combined length of many PHY
+ * TLVs that follow. Skip the TLV header and
+ * rx_rxpcu_classification_overview that follows the header to get to
+ * next TLV.
+ */
+ if (tlv_tag == HAL_RX_PPDU_END)
+ tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+
+ hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
+ tlv_tag, ptr);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+
+ if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ break;
+ } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+
+ return hal_status;
+}
+
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie, void **pp_buf_addr,
+ u32 *msdu_cnt)
+{
+ struct hal_reo_entrance_ring *reo_ent_ring =
+ (struct hal_reo_entrance_ring *)rx_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details =
+ (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
+
+ *msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
+ rx_mpdu_desc_info_details->info0);
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&reo_ent_ring->buf_addr_info;
+
+ *paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ buf_addr_info->info0);
+
+ *sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ buf_addr_info->info1);
+
+ *pp_buf_addr = (void *)buf_addr_info;
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
new file mode 100644
index 000000000000..bb022c781c48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_RX_H
+#define ATH11K_HAL_RX_H
+
+struct hal_rx_wbm_rel_info {
+ u32 cookie;
+ enum hal_wbm_rel_src_module err_rel_src;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 err_code;
+ bool first_msdu;
+ bool last_msdu;
+};
+
+#define HAL_INVALID_PEERID 0xffff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_NSS 8
+
+struct hal_rx_mon_status_tlv_hdr {
+ u32 hdr;
+ u8 value[0];
+};
+
+enum hal_rx_su_mu_coding {
+ HAL_RX_SU_MU_CODING_BCC,
+ HAL_RX_SU_MU_CODING_LDPC,
+ HAL_RX_SU_MU_CODING_MAX,
+};
+
+enum hal_rx_gi {
+ HAL_RX_GI_0_8_US,
+ HAL_RX_GI_0_4_US,
+ HAL_RX_GI_1_6_US,
+ HAL_RX_GI_3_2_US,
+ HAL_RX_GI_MAX,
+};
+
+enum hal_rx_bw {
+ HAL_RX_BW_20MHZ,
+ HAL_RX_BW_40MHZ,
+ HAL_RX_BW_80MHZ,
+ HAL_RX_BW_160MHZ,
+ HAL_RX_BW_MAX,
+};
+
+enum hal_rx_preamble {
+ HAL_RX_PREAMBLE_11A,
+ HAL_RX_PREAMBLE_11B,
+ HAL_RX_PREAMBLE_11N,
+ HAL_RX_PREAMBLE_11AC,
+ HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_MAX,
+};
+
+enum hal_rx_reception_type {
+ HAL_RX_RECEPTION_TYPE_SU,
+ HAL_RX_RECEPTION_TYPE_MU_MIMO,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
+ HAL_RX_RECEPTION_TYPE_MAX,
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
+#define HAL_TLV_STATUS_PPDU_DONE 1
+#define HAL_TLV_STATUS_BUF_DONE 2
+#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
+#define HAL_RX_FCS_LEN 4
+
+enum hal_rx_mon_status {
+ HAL_RX_MON_STATUS_PPDU_NOT_DONE,
+ HAL_RX_MON_STATUS_PPDU_DONE,
+ HAL_RX_MON_STATUS_BUF_DONE,
+};
+
+struct hal_rx_mon_ppdu_info {
+ u32 ppdu_id;
+ u32 ppdu_ts;
+ u32 num_mpdu_fcs_ok;
+ u32 num_mpdu_fcs_err;
+ u32 preamble_type;
+ u16 chan_num;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 peer_id;
+ u8 rate;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+ u8 is_stbc;
+ u8 gi;
+ u8 ldpc;
+ u8 beamformed;
+ u8 rssi_comb;
+ u8 tid;
+ u8 reception_type;
+ u64 rx_duration;
+};
+
+#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+
+struct hal_rx_ppdu_start {
+ __le32 info0;
+ __le32 chan_num;
+ __le32 ppdu_start_ts;
+} __packed;
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+
+struct hal_rx_ppdu_end_user_stats {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 ht_ctrl;
+ __le32 rsvd1[2];
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le32 rsvd2[11];
+} __packed;
+
+#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
+#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
+
+#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4)
+#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6)
+#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7)
+
+struct hal_rx_ht_sig_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4)
+
+struct hal_rx_lsig_b_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5)
+#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24)
+
+struct hal_rx_lsig_a_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10)
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8)
+
+struct hal_rx_vht_sig_a_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+enum hal_rx_vht_sig_a_gi_setting {
+ HAL_RX_VHT_SIG_A_NORMAL_GI = 0,
+ HAL_RX_VHT_SIG_A_SHORT_GI = 1,
+ HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
+};
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+
+struct hal_rx_he_sig_a_su_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
+
+struct hal_rx_he_sig_a_mu_dl_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0)
+
+struct hal_rx_he_sig_b1_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
+
+struct hal_rx_he_sig_b2_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
+
+struct hal_rx_he_sig_b2_ofdma_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+
+struct hal_rx_phyrx_rssi_legacy_info {
+ __le32 rsvd[35];
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
+struct hal_rx_mpdu_info {
+ __le32 rsvd0;
+ __le32 info0;
+ __le32 rsvd1[21];
+} __packed;
+
+#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
+struct hal_rx_ppdu_end_duration {
+ __le32 rsvd0[9];
+ __le32 info0;
+ __le32 rsvd1[4];
+} __packed;
+
+struct hal_rx_rxpcu_classification_overview {
+ u32 rsvd0;
+} __packed;
+
+struct hal_rx_msdu_desc_info {
+ u32 msdu_flags;
+ u16 msdu_len; /* 14 bits for length */
+};
+
+#define HAL_RX_NUM_MSDU_DESC 6
+struct hal_rx_msdu_list {
+ struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
+ u8 rbm[HAL_RX_NUM_MSDU_DESC];
+};
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status);
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm);
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action);
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager);
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm);
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info);
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ void **pp_buf_addr_info,
+ u32 *msdu_cnt);
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb);
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
new file mode 100644
index 000000000000..e4aa7e8a1284
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "ahb.h"
+#include "hal.h"
+#include "hal_tx.h"
+
+#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
+
+/* dscp_tid_map - Default DSCP-TID mapping
+ *
+ * DSCP TID
+ * 000000 0
+ * 001000 1
+ * 010000 2
+ * 011000 3
+ * 100000 4
+ * 101000 5
+ * 110000 6
+ * 111000 7
+ */
+static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti)
+{
+ struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
+
+ tcl_cmd->buf_addr_info.info0 =
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr);
+ tcl_cmd->buf_addr_info.info1 =
+ FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
+ tcl_cmd->buf_addr_info.info1 |=
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ (ti->ring_id + HAL_RX_BUF_RBM_SW0_BM)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
+
+ tcl_cmd->info0 =
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE,
+ ti->encrypt_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE,
+ ti->search_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN,
+ ti->addr_search_flags) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM,
+ ti->meta_data_flags);
+
+ tcl_cmd->info1 = ti->flags0 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
+
+ tcl_cmd->info2 = ti->flags1 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
+
+ tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
+ ti->dscp_tid_tbl_idx) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
+ ti->bss_ast_hash);
+ tcl_cmd->info4 = 0;
+}
+
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
+{
+ u32 ctrl_reg_val;
+ u32 addr;
+ u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE];
+ int i;
+ u32 value;
+ int cnt = 0;
+
+ ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ /* Enable read/write access */
+ ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+
+ addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
+ (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
+
+ /* Configure each DSCP-TID mapping in three bits there by configure
+ * three bytes in an iteration.
+ */
+ for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
+ value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
+ dscp_tid_map[i]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
+ dscp_tid_map[i + 1]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
+ dscp_tid_map[i + 2]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
+ dscp_tid_map[i + 3]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
+ dscp_tid_map[i + 4]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
+ dscp_tid_map[i + 5]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
+ dscp_tid_map[i + 6]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
+ dscp_tid_map[i + 7]);
+ memcpy(&hw_map_val[cnt], (u8 *)&value, 3);
+ cnt += 3;
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
+ ath11k_ahb_write32(ab, addr, *(u32 *)&hw_map_val[i]);
+ addr += 4;
+ }
+
+ /* Disable read/write access */
+ ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG,
+ ctrl_reg_val);
+}
+
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ int i, entry_size;
+ u8 *desc;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(HAL_TCL_DATA);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ desc = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)desc;
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
+ FIELD_PREP(HAL_TLV_HDR_LEN,
+ sizeof(struct hal_tcl_data_cmd));
+ desc += entry_size;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
new file mode 100644
index 000000000000..ce48a61bfb66
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_TX_H
+#define ATH11K_HAL_TX_H
+
+#include "hal_desc.h"
+
+#define HAL_TX_ADDRX_EN 1
+#define HAL_TX_ADDRY_EN 2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT 0
+#define HAL_TX_ADDR_SEARCH_INDEX 1
+
+struct hal_tx_info {
+ u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
+ u8 ring_id;
+ u32 desc_id;
+ enum hal_tcl_desc_type type;
+ enum hal_tcl_encap_type encap_type;
+ dma_addr_t paddr;
+ u32 data_len;
+ u32 pkt_offset;
+ enum hal_encrypt_type encrypt_type;
+ u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
+ u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
+ u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
+ u16 bss_ast_hash;
+ u8 tid;
+ u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
+ u8 lmac_id;
+ u8 dscp_tid_tbl_idx;
+};
+
+/* TODO: Check if the actual desc macros can be used instead */
+#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
+#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
+#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
+#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
+#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
+#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
+#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
+
+#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
+
+/* Tx status parsed from srng desc */
+struct hal_tx_status {
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason status;
+ u8 ack_rssi;
+ u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
+ u32 ppdu_id;
+ u8 try_cnt;
+ u8 tid;
+ u16 peer_id;
+ u32 rate_stats;
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti);
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id);
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd);
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
new file mode 100644
index 000000000000..8f54f58b83e6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "ahb.h"
+#include "debug.h"
+
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+static void ath11k_htc_control_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH11K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ skb_cb = ATH11K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
+ return skb;
+}
+
+static inline void ath11k_htc_restore_tx_skb(struct ath11k_htc *htc,
+ struct sk_buff *skb)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+
+ dma_unmap_single(htc->ab->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+ skb_pull(skb, sizeof(struct ath11k_htc_hdr));
+}
+
+static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_hdr *hdr;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
+ FIELD_PREP(HTC_HDR_PAYLOADLEN,
+ (skb->len - sizeof(*hdr))) |
+ FIELD_PREP(HTC_HDR_FLAGS,
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath11k_htc_send(struct ath11k_htc *htc,
+ enum ath11k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct device *dev = htc->ab->dev;
+ struct ath11k_base *ab = htc->ab;
+ int credits = 0;
+ int ret;
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath11k_htc_hdr));
+
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath11k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+
+ ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath11k_htc_hdr));
+ return ret;
+}
+
+static void
+ath11k_htc_process_credit_report(struct ath11k_htc *htc,
+ const struct ath11k_htc_credit_report *report,
+ int len,
+ enum ath11k_htc_ep_id eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath11k_warn(ab, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH11K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath11k_htc_ep_id src_eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ int status = 0;
+ struct ath11k_htc_record *record;
+ size_t len;
+
+ while (length > 0) {
+ record = (struct ath11k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath11k_warn(ab, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH11K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath11k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath11k_warn(ab, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath11k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ return status;
+}
+
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_hdr *hdr;
+ struct ath11k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
+
+ if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
+ ath11k_warn(ab, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath11k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
+ ATH11K_HTC_FLAG_TRAILER_PRESENT;
+
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
+ min_len = sizeof(struct ath11k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath11k_warn(ab, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath11k_htc_process_trailer(htc, trailer,
+ trailer_len, eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (trailer_len >= payload_len)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH11K_HTC_EP_0) {
+ struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data;
+
+ switch (FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id)) {
+ case ATH11K_HTC_MSG_READY_ID:
+ case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath11k_warn(ab, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH11K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ default:
+ ath11k_warn(ab, "ignoring unsolicited htc ep0 event\n");
+ break;
+ }
+ goto out;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ab, skb);
+
+ /* poll tx completion for interrupt disabled CE's */
+ ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+
+static void ath11k_htc_control_rx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint.
+ */
+ ath11k_warn(ab, "unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+static const char *htc_service_name(enum ath11k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH11K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH11K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
+ return "WMI MAC1";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
+ return "WMI MAC2";
+ case ATH11K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH11K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH11K_HTC_SVC_ID_IPA_TX:
+ return "IPA TX";
+ case ATH11K_HTC_SVC_ID_PKT_LOG:
+ return "PKT LOG";
+ }
+
+ return "Unknown";
+}
+
+static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_ep *ep;
+ int i;
+
+ for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
+ u16 service_id)
+{
+ u8 i, allocation = 0;
+
+ for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
+ if (htc->service_alloc_table[i].service_id == service_id) {
+ allocation =
+ htc->service_alloc_table[i].credit_allocation;
+ }
+ }
+
+ return allocation;
+}
+
+static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_svc_tx_credits *serv_entry;
+ u32 svc_id[] = {
+ ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
+ };
+ int i, credits;
+
+ credits = htc->total_transmit_credits;
+ serv_entry = htc->service_alloc_table;
+
+ if ((htc->wmi_ep_count == 0) ||
+ (htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
+ return -EINVAL;
+
+ /* Divide credits among number of endpoints for WMI */
+ credits = credits / htc->wmi_ep_count;
+ for (i = 0; i < htc->wmi_ep_count; i++) {
+ serv_entry[i].service_id = svc_id[i];
+ serv_entry[i].credit_allocation = credits;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_wait_target(struct ath11k_htc *htc)
+{
+ int i, status = 0;
+ struct ath11k_base *ab = htc->ab;
+ unsigned long time_left;
+ struct ath11k_htc_ready *ready;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ab, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath11k_ce_per_engine_service(htc->ab, i);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath11k_warn(ab, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(*ready)) {
+ ath11k_warn(ab, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
+ credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
+ ready->id_credit_count);
+ credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
+
+ if (message_id != ATH11K_HTC_MSG_READY_ID) {
+ ath11k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d\n",
+ htc->total_transmit_credits, htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath11k_warn(ab, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ ath11k_htc_setup_target_buffer_assignments(htc);
+
+ return 0;
+}
+
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_conn_svc *req_msg;
+ struct ath11k_htc_conn_svc_resp resp_msg_dummy;
+ struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
+ enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
+ struct ath11k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH11K_HTC_EP_0;
+ max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath11k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb) {
+ ath11k_warn(ab, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(*req_msg);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ req_msg = (struct ath11k_htc_conn_svc *)skb->data;
+ req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
+ flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
+ req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
+ conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_err(ab, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
+ service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
+ resp_msg->msg_svc_id);
+
+ if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(*resp_msg))) {
+ ath11k_err(ab, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "HTC Service %s connect response: status: 0x%lx, assigned ep: 0x%lx\n",
+ htc_service_name(service_id),
+ FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
+ FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
+
+ conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
+ resp_msg->flags_len);
+
+ /* check response status */
+ if (conn_resp->connect_resp_code != ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath11k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ conn_resp->connect_resp_code);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
+ HTC_SVC_RESP_MSG_ENDPOINTID,
+ resp_msg->flags_len);
+
+ max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+setup:
+
+ if (assigned_eid >= ATH11K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+ ep->tx_credits = tx_alloc;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath11k_ahb_map_service_to_pipe(htc->ab,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status)
+ return status;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+int ath11k_htc_start(struct ath11k_htc *htc)
+{
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_setup_complete_extended *msg;
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(*msg));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath11k_htc_setup_complete_extended *)skb->data;
+ msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_init(struct ath11k_base *ab)
+{
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int ret;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath11k_htc_reset_endpoint_states(htc);
+
+ htc->ab = ab;
+
+ switch (ab->wmi_ab.preferred_hw_mode) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ htc->wmi_ep_count = 1;
+ break;
+ case WMI_HOST_HW_MODE_DBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ htc->wmi_ep_count = 2;
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ htc->wmi_ep_count = 3;
+ break;
+ default:
+ htc->wmi_ep_count = 3;
+ break;
+ }
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (ret) {
+ ath11k_err(ab, "could not connect to htc service (%d)\n", ret);
+ return ret;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h
new file mode 100644
index 000000000000..f0a3387567ca
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HTC_H
+#define ATH11K_HTC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct ath11k_base;
+
+#define HTC_HDR_ENDPOINTID GENMASK(7, 0)
+#define HTC_HDR_FLAGS GENMASK(15, 8)
+#define HTC_HDR_PAYLOADLEN GENMASK(31, 16)
+#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0)
+#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8)
+#define HTC_HDR_RESERVED GENMASK(31, 16)
+
+#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16)
+#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0)
+#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16)
+#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16)
+#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0)
+#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16)
+
+#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0)
+#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8)
+
+#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0)
+#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8)
+#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0)
+
+#define HTC_MSG_MESSAGEID GENMASK(15, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24)
+
+enum ath11k_htc_tx_flags {
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH11K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath11k_htc_rx_flags {
+ ATH11K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH11K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath11k_htc_hdr {
+ u32 htc_info;
+ u32 ctrl_info;
+} __packed __aligned(4);
+
+enum ath11k_htc_msg_id {
+ ATH11K_HTC_MSG_READY_ID = 1,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
+};
+
+enum ath11k_htc_version {
+ ATH11K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH11K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+#define ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
+#define ATH11K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
+
+enum ath11k_htc_conn_flags {
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+ ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
+ ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+};
+
+enum ath11k_htc_conn_svc_status {
+ ATH11K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH11K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH11K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath11k_htc_ready {
+ u32 id_credit_count;
+ u32 size_ep;
+} __packed;
+
+struct ath11k_htc_ready_extended {
+ struct ath11k_htc_ready base;
+ u32 ver_bundle;
+} __packed;
+
+struct ath11k_htc_conn_svc {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed;
+
+struct ath11k_htc_conn_svc_resp {
+ u32 msg_svc_id;
+ u32 flags_len;
+ u32 svc_meta_pad;
+} __packed;
+
+struct ath11k_htc_setup_complete_extended {
+ u32 msg_id;
+ u32 flags;
+ u32 max_msgs_per_bundled_recv;
+} __packed;
+
+struct ath11k_htc_msg {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed __aligned(4);
+
+enum ath11k_htc_record_id {
+ ATH11K_HTC_RECORD_NULL = 0,
+ ATH11K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath11k_htc_record_hdr {
+ u8 id; /* @enum ath11k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_credit_report {
+ u8 eid; /* @enum ath11k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_record {
+ struct ath11k_htc_record_hdr hdr;
+ union {
+ struct ath11k_htc_credit_report credit_report[0];
+ u8 pauload[0];
+ };
+} __packed __aligned(4);
+
+/* note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath11k_htc_frame {
+ struct ath11k_htc_hdr hdr;
+ union {
+ struct ath11k_htc_msg msg;
+ u8 payload[0];
+ };
+ struct ath11k_htc_record trailer[0];
+} __packed __aligned(4);
+
+enum ath11k_htc_svc_gid {
+ ATH11K_HTC_SVC_GRP_RSVD = 0,
+ ATH11K_HTC_SVC_GRP_WMI = 1,
+ ATH11K_HTC_SVC_GRP_NMI = 2,
+ ATH11K_HTC_SVC_GRP_HTT = 3,
+ ATH11K_HTC_SVC_GRP_CFG = 4,
+ ATH11K_HTC_SVC_GRP_IPA = 5,
+ ATH11K_HTC_SVC_GRP_PKTLOG = 6,
+
+ ATH11K_HTC_SVC_GRP_TEST = 254,
+ ATH11K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath11k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH11K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH11K_HTC_SVC_ID_UNUSED = ATH11K_HTC_SVC_ID_RESERVED,
+
+ ATH11K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH11K_HTC_SVC_GRP_RSVD, 1),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_WMI, 0),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH11K_HTC_SVC_GRP_WMI, 1),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH11K_HTC_SVC_GRP_WMI, 2),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH11K_HTC_SVC_GRP_WMI, 3),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH11K_HTC_SVC_GRP_WMI, 4),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH11K_HTC_SVC_GRP_WMI, 5),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH11K_HTC_SVC_GRP_WMI, 6),
+
+ ATH11K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_NMI, 0),
+ ATH11K_HTC_SVC_ID_NMI_DATA = SVC(ATH11K_HTC_SVC_GRP_NMI, 1),
+
+ ATH11K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH11K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH11K_HTC_SVC_GRP_TEST, 0),
+ ATH11K_HTC_SVC_ID_IPA_TX = SVC(ATH11K_HTC_SVC_GRP_IPA, 0),
+ ATH11K_HTC_SVC_ID_PKT_LOG = SVC(ATH11K_HTC_SVC_GRP_PKTLOG, 0),
+};
+
+#undef SVC
+
+enum ath11k_htc_ep_id {
+ ATH11K_HTC_EP_UNUSED = -1,
+ ATH11K_HTC_EP_0 = 0,
+ ATH11K_HTC_EP_1 = 1,
+ ATH11K_HTC_EP_2,
+ ATH11K_HTC_EP_3,
+ ATH11K_HTC_EP_4,
+ ATH11K_HTC_EP_5,
+ ATH11K_HTC_EP_6,
+ ATH11K_HTC_EP_7,
+ ATH11K_HTC_EP_8,
+ ATH11K_HTC_EP_COUNT,
+};
+
+struct ath11k_htc_ops {
+ void (*target_send_suspend_complete)(struct ath11k_base *ar);
+};
+
+struct ath11k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath11k_base *);
+};
+
+/* service connection information */
+struct ath11k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath11k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath11k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH11K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH11K_HTC_MAX_LEN 4096
+#define ATH11K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH11K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_CONTROL_BUFFER_SIZE (ATH11K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath11k_htc_hdr))
+#define ATH11K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
+
+struct ath11k_htc_ep {
+ struct ath11k_htc *htc;
+ enum ath11k_htc_ep_id eid;
+ enum ath11k_htc_svc_id service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath11k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath11k_htc {
+ struct ath11k_base *ab;
+ struct ath11k_htc_ep endpoint[ATH11K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ struct ath11k_htc_ops htc_ops;
+
+ u8 control_resp_buffer[ATH11K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath11k_htc_svc_tx_credits
+ service_alloc_table[ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
+ int target_credit_size;
+ u8 wmi_ep_count;
+};
+
+int ath11k_htc_init(struct ath11k_base *ar);
+int ath11k_htc_wait_target(struct ath11k_htc *htc);
+int ath11k_htc_start(struct ath11k_htc *htc);
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp);
+int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size);
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ar,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
new file mode 100644
index 000000000000..dd39333ec0ea
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HW_H
+#define ATH11K_HW_H
+
+/* Target configuration defines */
+
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS (16 + 1)
+
+#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+
+/* Num of peers for Single Radio mode */
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS */
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS_SBS */
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+
+/* Max num of stations (per radio) */
+#define TARGET_NUM_STATIONS 512
+
+#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
+ 4 * TARGET_NUM_VDEVS + 8)
+
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_OFFLD_PEERS 4
+#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
+
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_DECAP_MODE_RAW 0
+#define TARGET_DECAP_MODE_NATIVE_WIFI 1
+#define TARGET_DECAP_MODE_ETH 2
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 12
+#define TARGET_NUM_MCAST_TABLE_ELEMS 64
+#define TARGET_MCAST2UCAST_MODE 2
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (2500)
+#define TARGET_MAX_FRAG_ENTRIES 6
+#define TARGET_MAX_BCN_OFFLD 16
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 1
+#define TARGET_RX_BATCHMODE 1
+
+#define ATH11K_HW_MAX_QUEUES 4
+
+#define ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
+
+#define ATH11K_FW_DIR "ath11k"
+
+/* IPQ8074 definitions */
+#define IPQ8074_FW_DIR "IPQ8074"
+#define IPQ8074_MAX_BOARD_DATA_SZ (256 * 1024)
+#define IPQ8074_MAX_CAL_DATA_SZ IPQ8074_MAX_BOARD_DATA_SZ
+
+#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
+#define ATH11K_BOARD_API2_FILE "board-2.bin"
+#define ATH11K_DEFAULT_BOARD_FILE "bdwlan.bin"
+#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
+
+enum ath11k_hw_rate_cck {
+ ATH11K_HW_RATE_CCK_LP_11M = 0,
+ ATH11K_HW_RATE_CCK_LP_5_5M,
+ ATH11K_HW_RATE_CCK_LP_2M,
+ ATH11K_HW_RATE_CCK_LP_1M,
+ ATH11K_HW_RATE_CCK_SP_11M,
+ ATH11K_HW_RATE_CCK_SP_5_5M,
+ ATH11K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath11k_hw_rate_ofdm {
+ ATH11K_HW_RATE_OFDM_48M = 0,
+ ATH11K_HW_RATE_OFDM_24M,
+ ATH11K_HW_RATE_OFDM_12M,
+ ATH11K_HW_RATE_OFDM_6M,
+ ATH11K_HW_RATE_OFDM_54M,
+ ATH11K_HW_RATE_OFDM_36M,
+ ATH11K_HW_RATE_OFDM_18M,
+ ATH11K_HW_RATE_OFDM_9M,
+};
+
+struct ath11k_hw_params {
+ const char *name;
+ struct {
+ const char *dir;
+ size_t board_size;
+ size_t cal_size;
+ } fw;
+};
+
+struct ath11k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath11k_bd_ie_board_type {
+ ATH11K_BD_IE_BOARD_NAME = 0,
+ ATH11K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath11k_bd_ie_type {
+ /* contains sub IEs of enum ath11k_bd_ie_board_type */
+ ATH11K_BD_IE_BOARD = 0,
+ ATH11K_BD_IE_BOARD_EXT = 1,
+};
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
new file mode 100644
index 000000000000..6640662f5ede
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -0,0 +1,5907 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include "mac.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "testmode.h"
+#include "peer.h"
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath11k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath11k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+};
+
+static struct ieee80211_rate ath11k_legacy_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH11K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH11K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH11K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH11K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH11K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH11K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH11K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH11K_HW_RATE_OFDM_54M },
+};
+
+static const int
+ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = {
+ [NL80211_BAND_2GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
+ },
+ [NL80211_BAND_5GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+};
+
+const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
+ .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
+ .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
+ .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
+ .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_FP_CTRL_FILTER_FLASG3
+};
+
+#define ATH11K_MAC_FIRST_OFDM_RATE_IDX 4
+#define ath11k_g_rates ath11k_legacy_rates
+#define ath11k_g_rates_size (ARRAY_SIZE(ath11k_legacy_rates))
+#define ath11k_a_rates (ath11k_legacy_rates + 4)
+#define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4)
+
+#define ATH11K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */
+
+static const u32 ath11k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
+{
+ u8 ret = 0;
+
+ switch (bw) {
+ case ATH11K_BW_20:
+ ret = RATE_INFO_BW_20;
+ break;
+ case ATH11K_BW_40:
+ ret = RATE_INFO_BW_40;
+ break;
+ case ATH11K_BW_80:
+ ret = RATE_INFO_BW_80;
+ break;
+ case ATH11K_BW_160:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate)
+{
+ /* As default, it is OFDM rates */
+ int i = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ int max_rates_idx = ath11k_g_rates_size;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK) {
+ hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
+ i = 0;
+ max_rates_idx = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ }
+
+ while (i < max_rates_idx) {
+ if (hw_rc == ath11k_legacy_rates[i].hw_value) {
+ *rateidx = i;
+ *rate = ath11k_legacy_rates[i].bitrate;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int get_num_chains(u32 mask)
+{
+ int num_chains = 0;
+
+ while (mask) {
+ if (mask & BIT(0))
+ num_chains++;
+ mask >>= 1;
+ }
+
+ return num_chains;
+}
+
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static u32
+ath11k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
+{
+/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath11k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static bool ath11k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+static u8 ath11k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath11k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+static void ath11k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif_iter *arvif_iter = data;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_vif_iter arvif_iter;
+ u32 flags;
+
+ memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath11k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif)
+ return NULL;
+
+ return arvif_iter.arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif->ar;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+
+ if (WARN_ON(pdev_id > ab->num_radios))
+ return NULL;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+
+ if (pdev && pdev->pdev_id == pdev_id)
+ return (pdev->ar ? pdev->ar : NULL);
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->vdev_stop_status.stop_in_progress &&
+ ar->vdev_stop_status.vdev_id == vdev_id) {
+ ar->vdev_stop_status.stop_in_progress = false;
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath11k_pdev_caps_update(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ ar->max_tx_power = ab->target_caps.hw_max_tx_power;
+
+ /* FIXME Set min_tx_power to ab->target_caps.hw_min_tx_power.
+ * But since the received value in svcrdy is same as hw_max_tx_power,
+ * we can set ar->min_tx_power to 0 currently until
+ * this is fixed in firmware
+ */
+ ar->min_tx_power = 0;
+
+ ar->txpower_limit_2g = ar->max_tx_power;
+ ar->txpower_limit_5g = ar->max_tx_power;
+ ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
+}
+
+static int ath11k_mac_txpower_recalc(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_vif *arvif;
+ int ret, txpower = -1;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->txpower <= 0)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ /* txpwr is set as 2 units per dBm in FW*/
+ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
+ ar->max_tx_power) * 2;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower to set in hw %d\n",
+ txpower / 2);
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ ar->txpower_limit_2g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_2g = txpower;
+ }
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ ar->txpower_limit_5g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_5g = txpower;
+ }
+
+ return 0;
+
+fail:
+ ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
+ txpower / 2, param, ret);
+ return ret;
+}
+
+static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
+
+ /* Enable RTS/CTS protection for sw retries (when legacy stations
+ * are in BSS) or by default only for second rate series.
+ * TODO: Check if we need to enable CTS 2 Self in any case
+ */
+ rts_cts = WMI_USE_RTS_CTS;
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
+ else
+ rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
+
+ /* Need not send duplicate param value to firmware */
+ if (arvif->rtscts_prot_mode == rts_cts)
+ return 0;
+
+ arvif->rtscts_prot_mode = rts_cts;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rts_cts);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return ret;
+}
+
+static int ath11k_mac_set_kickout(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ ATH11K_KICKOUT_THRESHOLD,
+ ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
+{
+ int ret = 0;
+
+ ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i started\n",
+ vdev_id);
+ return 0;
+}
+
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ int ret = 0;
+
+ /* mac80211 requires this op to be present and that's why
+ * there's an empty function, this can be extended when
+ * required.
+ */
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* TODO: Handle configuration changes as appropriate */
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!bcn) {
+ ath11k_warn(ab, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+
+ kfree_skb(bcn);
+
+ if (ret)
+ ath11k_warn(ab, "failed to submit beacon template command: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void ath11k_control_beaconing(struct ath11k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ return;
+ }
+
+ /* Install the beacon template to the FW */
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
+ ret);
+ return;
+ }
+
+ arvif->tx_seq_no = 0x1000;
+
+ arvif->aid = 0;
+
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->bss_conf.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->peer_mac, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_associd = aid;
+ arg->auth_flag = true;
+ /* TODO: STA WAR in ath10k for listen interval required? */
+ arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_nss = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: rsn ie found\n", __func__);
+ arg->need_ptk_4_way = true;
+ }
+
+ if (wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: wpa ie found\n", __func__);
+ arg->need_gtk_2_way = true;
+ }
+
+ if (sta->mfp) {
+ /* TODO: Need to check if FW supports PMF? */
+ arg->is_pmf_enabled = true;
+ }
+
+ /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
+}
+
+static void ath11k_peer_assoc_h_rates(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath11k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+
+ if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask))
+ return;
+
+ arg->ht_flag = true;
+
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->ldpc_flag = true;
+
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->bw_40 = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
+ }
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40))
+ arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /* This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->peer_mac,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_nss);
+}
+
+static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u16
+ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0: /* fall through */
+ case 1: /* fall through */
+ case 2: /* fall through */
+ case 3: /* fall through */
+ case 4: /* fall through */
+ case 5: /* fall through */
+ case 6: /* fall through */
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u16 *vht_mcs_mask;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->vht_flag = true;
+
+ /* TODO: similar flags required? */
+ arg->vht_capable = true;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->vht_ng_flag = true;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
+
+ /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
+ * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+ * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
+ */
+ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
+ arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+
+ /* TODO: Check */
+ arg->tx_max_mcs_nss = 0xFF;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+
+ /* TODO: rxnss_override */
+}
+
+static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u16 v;
+
+ if (!he_cap->has_he)
+ return;
+
+ arg->he_flag = true;
+
+ memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
+ sizeof(arg->peer_he_cap_macinfo));
+ memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
+ sizeof(arg->peer_he_cap_phyinfo));
+ memcpy(&arg->peer_he_ops, &vif->bss_conf.he_operation,
+ sizeof(arg->peer_he_ops));
+
+ /* the top most byte is used to indicate BSS color info */
+ arg->peer_he_ops &= 0xffffff;
+
+ if (he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ int bit = 7;
+ int nss, ru;
+
+ arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK;
+ arg->peer_ppet.ru_bit_mask =
+ (he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+
+ for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u32 val = 0;
+ int i;
+
+ if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ for (i = 0; i < 6; i++) {
+ val >>= 1;
+ val |= ((he_cap->ppe_thres[bit / 8] >>
+ (bit % 8)) & 0x1) << 5;
+ bit++;
+ }
+ arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
+ val << (ru * 6);
+ }
+ }
+ }
+
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
+ arg->twt_responder = true;
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
+ arg->twt_requester = true;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ arg->peer_he_mcs_count++;
+ }
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ arg->peer_he_mcs_count++;
+ /* fall through */
+
+ default:
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ arg->peer_he_mcs_count++;
+ break;
+ }
+}
+
+static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ switch (smps) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ arg->static_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ arg->dynamic_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ arg->spatial_mux_flag = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath11k_peer_assoc_h_qos(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->apsd_flag = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme) {
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, arg->qos_flag);
+}
+
+static int ath11k_peer_assoc_qos_ap(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ap_ps_params params;
+ u32 max_sp;
+ u32 uapsd;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ params.vdev_id = arvif->vdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ uapsd = 0;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ max_sp = 0;
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ params.param = WMI_AP_PS_PEER_PARAM_UAPSD;
+ params.value = uapsd;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
+ params.value = max_sp;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ /* TODO revisit during testing */
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
+ params.param, arvif->vdev_id, ret);
+ return ret;
+}
+
+static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
+ ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return MODE_11AX_HE160;
+ else if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return MODE_11AX_HE80_80;
+ /* not sure if this is a valid case? */
+ return MODE_11AX_HE160;
+ }
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AX_HE80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AX_HE40;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AX_HE20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->he_cap.has_he) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AX_HE80_2G;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AX_HE40_2G;
+ else
+ phymode = MODE_11AX_HE20_2G;
+ } else if (sta->vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath11k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ /* Check HE first */
+ if (sta->he_cap.has_he) {
+ phymode = ath11k_mac_get_phymode_he(ar, sta);
+ } else if (sta->vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath11k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath11k_wmi_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static void ath11k_peer_assoc_prepare(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg,
+ bool reassoc)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ memset(arg, 0, sizeof(*arg));
+
+ reinit_completion(&ar->peer_assoc_done);
+
+ arg->peer_new_assoc = !reassoc;
+ ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_smps(sta, arg);
+
+ /* TODO: amsdu_disable req? */
+}
+
+static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath11k_smps_map))
+ return -EINVAL;
+
+ return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE,
+ ath11k_smps_map[smps]);
+}
+
+static void ath11k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct peer_assoc_params peer_arg;
+ struct ieee80211_sta *ap_sta;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath11k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+
+ rcu_read_unlock();
+
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ return;
+ }
+
+ ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
+ &ap_sta->ht_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = bss_conf->aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
+ /* Authorize BSS Peer */
+ ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+
+ ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &bss_conf->he_obss_pd);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ /* TODO: cancel connection_loss_work */
+}
+
+static u32 ath11k_mac_get_rate_hw_value(int bitrate)
+{
+ u32 preamble;
+ u16 hw_value;
+ int rate;
+ size_t i;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_legacy_rates); i++) {
+ if (ath11k_legacy_rates[i].bitrate != bitrate)
+ continue;
+
+ hw_value = ath11k_legacy_rates[i].hw_value;
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ return rate;
+ }
+
+ return -EINVAL;
+}
+
+static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath11k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+
+ vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
+}
+
+static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ u32 param_id, param_value;
+ enum nl80211_band band;
+ u32 vdev_param;
+ int mcast_rate;
+ u32 preamble;
+ u16 hw_value;
+ u16 bitrate;
+ int ret = 0;
+ u8 rateidx;
+ u32 rate;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+
+ param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->beacon_interval);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Beacon interval: %d set for VDEV: %d\n",
+ arvif->beacon_interval, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
+ param_value = WMI_BEACON_STAGGERED_MODE;
+ ret = ath11k_wmi_pdev_set_param(ar, param_id,
+ param_value, ar->pdev->pdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set staggered beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
+ ret);
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->dtim_period);
+
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
+ arvif->vdev_id, ret);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "DTIM period: %d set for VDEV: %d\n",
+ arvif->dtim_period, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = info->ssid_len;
+ if (info->ssid_len)
+ memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ ath11k_control_beaconing(arvif, info);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ u32 cts_prot;
+
+ cts_prot = !!(info->use_cts_prot);
+ param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
+
+ if (arvif->is_started) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, cts_prot);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
+ cts_prot, arvif->vdev_id);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime;
+
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ param_id = WMI_VDEV_PARAM_SLOT_TIME;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, slottime);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set slottime: %d for VDEV: %d\n",
+ slottime, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u32 preamble;
+
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ param_id = WMI_VDEV_PARAM_PREAMBLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, preamble);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set preamble: %d for VDEV: %d\n",
+ preamble, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc)
+ ath11k_bss_assoc(hw, vif, info);
+ else
+ ath11k_bss_disassoc(hw, vif);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ath11k_mac_txpower_recalc(ar);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath11k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ rateidx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath11k_legacy_rates[rateidx].bitrate;
+ hw_value = ath11k_legacy_rates[rateidx].hw_value;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath11k_mac_vif_chan(arvif->vif, &def))
+ ath11k_recalculate_mgmt_rate(ar, vif, &def);
+
+ if (changed & BSS_CHANGED_TWT) {
+ if (info->twt_requester || info->twt_responder)
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
+ else
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &info->he_obss_pd);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+void __ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = (ar->scan.state ==
+ ATH11K_SCAN_ABORTING),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ } else if (ar->scan.roc_notify) {
+ ieee80211_remain_on_channel_expired(ar->hw);
+ }
+ /* fall through */
+ case ATH11K_SCAN_STARTING:
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ cancel_delayed_work(&ar->scan.timeout);
+ complete(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath11k_scan_stop(struct ath11k *ar)
+{
+ struct scan_cancel_param arg = {
+ .req_type = WLAN_SCAN_CANCEL_SINGLE,
+ .scan_id = ATH11K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* TODO: Fill other STOP Params */
+ arg.pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab,
+ "failed to receive scan abort comple: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH11K_SCAN_IDLE)
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath11k_scan_abort(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ ar->scan.state = ATH11K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath11k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_start_scan(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_wmi_send_scan_start_cmd(ar, arg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
+ if (ret == 0) {
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH11K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct scan_req_params arg;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ memset(&arg, 0, sizeof(arg));
+ ath11k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH11K_SCAN_ID;
+
+ if (req->ie_len) {
+ arg.extraie.len = req->ie_len;
+ arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL);
+ memcpy(arg.extraie.ptr, req->ie, req->ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg.num_ssids = req->n_ssids;
+ for (i = 0; i < arg.num_ssids; i++) {
+ arg.ssid[i].length = req->ssids[i].ssid_len;
+ memcpy(&arg.ssid[i].ssid, req->ssids[i].ssid,
+ req->ssids[i].ssid_len);
+ }
+ } else {
+ arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->n_channels) {
+ arg.num_chan = req->n_channels;
+ for (i = 0; i < arg.num_chan; i++)
+ arg.chan_list[i] = req->channels[i]->center_freq;
+ }
+
+ ret = ath11k_start_scan(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ /* Add a 200ms margin to account for event/command processing */
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(arg.max_scan_time +
+ ATH11K_MAC_SCAN_TIMEOUT_MSECS));
+
+exit:
+ if (req->ie_len)
+ kfree(arg.extraie.ptr);
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static int ath11k_install_key(struct ath11k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ int ret;
+ struct ath11k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (cmd == DISABLE_KEY) {
+ /* TODO: Check if FW expects value other than NONE for del */
+ /* arg.key_cipher = WMI_CIPHER_NONE; */
+ arg.key_len = 0;
+ arg.key_data = NULL;
+ goto install;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ /* TODO: Re-check if flag is valid */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ break;
+ default:
+ ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+install:
+ ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
+ return -ETIMEDOUT;
+
+ return ar->install_key_status ? -EINVAL : 0;
+}
+
+static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (!peer->keys[i])
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath11k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath11k_warn(ab, "failed to remove peer key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ab->base_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ return first_errno;
+}
+
+static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ const u8 *peer_addr;
+ int ret = 0;
+ u32 flags = 0;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath11k_warn(ab, "cannot install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore
+ */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+
+ ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY)
+ peer->keys[key->keyidx] = key;
+ else if (peer && cmd == DISABLE_KEY)
+ peer->keys[key->keyidx] = NULL;
+ else if (!peer)
+ /* impossible unless FW goes crazy */
+ ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+ spin_unlock_bh(&ab->base_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 vht_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ nss = i + 1;
+ vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(vht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_VHT);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update STA %pM Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int ath11k_station_assoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct peer_assoc_params peer_arg;
+ int ret = 0;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ struct cfg80211_bitrate_mask *mask;
+ u8 num_vht_rates;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return -EPERM;
+
+ band = def.chan->band;
+ mask = &arvif->bitrate_mask;
+
+ ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return -ETIMEDOUT;
+ }
+
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+
+ /* If single VHT rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT. This is now enabled by a peer specific
+ * fixed param.
+ * Note that all other rates and NSS will be disabled for this peer.
+ */
+ if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (reassoc)
+ return 0;
+
+ ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->ht_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_station_disassoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ ret = ath11k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 changed, bw, nss, smps;
+ int err, num_vht_rates;
+ const struct cfg80211_bitrate_mask *mask;
+ struct peer_assoc_params peer_arg;
+
+ arsta = container_of(wk, struct ath11k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath11k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE, smps);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ mask = &arvif->bitrate_mask;
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ /* Peer_assoc_prepare will reject vht rates in
+ * bitrate_mask if its not available in range format and
+ * sets vht tx_rateset as unsupported. So multiple VHT MCS
+ * setting(eg. MCS 4,5,6) per peer is not supported here.
+ * But, Single rate in VHT mask can be set as per-peer
+ * fixed rate. But even if any HT rates are configured in
+ * the bitrate mask, device will not switch to those rates
+ * when per-peer Fixed rate is set.
+ * TODO: Check RATEMASK_CMDID to support auto rates selection
+ * across HT/VHT and for multiple VHT MCS support.
+ */
+ if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ } else {
+ /* If the peer is non-VHT or no fixed VHT rate
+ * is provided in the new bitrate mask we set the
+ * other rates using peer_assoc command.
+ */
+ ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
+ &peer_arg, true);
+
+ err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (err)
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, err);
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+ else
+ ath11k_info(ar->ab,
+ "Station %pM moved to assoc state\n",
+ sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ else
+ ath11k_info(ar->ab,
+ "Station %pM moved to disassociated state\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+ s16 txpwr;
+
+ if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH11K_TX_POWER_MAX_VAL || txpwr < ATH11K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_USE_FIXED_PWR, txpwr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct ath11k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->smps_mode);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
+ sta->bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
+ sta->smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 value = 0;
+ int ret = 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath11k_warn(ar->ab, "could not set uapsd params %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath11k_warn(ar->ab, "could not set rx wake param %d\n", ret);
+
+exit:
+ return ret;
+}
+
+static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+ p->txop = params->txop;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct ieee80211_sta_ht_cap
+ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+ u32 ar_vht_cap = ar->pdev->cap.vht_cap;
+
+ if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar_ht_cap;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rx_chains; i++) {
+ if (rate_cap_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath11k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+ u32 vht_cap = ar->pdev->cap.vht_cap;
+ u32 vdev_param = WMI_VDEV_PARAM_TXBF;
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
+ nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+ }
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ sound_dim = vht_cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+ }
+
+ if (!value)
+ return 0;
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ /* TODO: SUBFEE not validated in HK, disable here until validated? */
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, value);
+}
+
+static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
+{
+ bool subfer, subfee;
+ int sound_dim = 0;
+
+ subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
+ subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+
+ if (ar->num_tx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ subfer = false;
+ }
+
+ /* If SU Beaformer is not set, then disable MU Beamformer Capability */
+ if (!subfer)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ /* If SU Beaformee is not set, then disable MU Beamformee Capability */
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ sound_dim = (*vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+
+ /* Enable Sounding Dimension Field only if SU BF is enabled */
+ if (subfer) {
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+
+ sound_dim <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ *vht_cap |= sound_dim;
+ }
+
+ /* Use the STS advertised by FW unless SU Beamformee is not supported*/
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+}
+
+static struct ieee80211_sta_vht_cap
+ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
+ u32 rate_cap_rx_chainmask)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ u16 txmcs_map, rxmcs_map;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->pdev->cap.vht_cap;
+
+ ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
+
+ /* TODO: Enable back VHT160 mode once association issues are fixed */
+ /* Disabling VHT160 and VHT80+80 modes */
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
+ txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
+ rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (rate_cap_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
+
+ return vht_cap;
+}
+
+static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ u32 *ht_cap_info)
+{
+ struct ieee80211_supported_band *band;
+ u32 rate_cap_tx_chainmask;
+ u32 rate_cap_rx_chainmask;
+ u32 ht_cap;
+
+ rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
+ rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask,
+ rate_cap_rx_chainmask);
+ }
+}
+
+static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant)
+{
+ /* TODO: Check the request chainmask against the supported
+ * chainmask table which is advertised in extented_service_ready event
+ */
+
+ return 0;
+}
+
+static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet,
+ u8 *he_ppet)
+{
+ int nss, ru;
+ u8 bit = 7;
+
+ he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (fw_ppet->ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+ for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u8 val;
+ int i;
+
+ if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static void
+ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
+{
+ u8 m;
+
+ m = IEEE80211_HE_MAC_CAP0_TWT_RES |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[0] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP2_TRS |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
+ IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
+ IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ he_cap_elem->mac_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ he_cap_elem->phy_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
+ he_cap_elem->phy_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ he_cap_elem->phy_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ he_cap_elem->phy_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+ he_cap_elem->phy_cap_info[6] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP7_SRP_BASED_SR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ he_cap_elem->phy_cap_info[7] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[8] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ he_cap_elem->phy_cap_info[9] &= ~m;
+}
+
+static int ath11k_mac_copy_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ struct ieee80211_sband_iftype_data *data,
+ int band)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ath11k_band_cap *band_cap = &cap->band[band];
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+ memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
+ sizeof(he_cap_elem->mac_cap_info));
+ memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
+ sizeof(he_cap_elem->phy_cap_info));
+
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
+ he_cap_elem->phy_cap_info[4] &=
+ ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[4] &=
+ ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[4] |= (ar->num_tx_chains - 1) << 2;
+
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] &=
+ ~IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ ath11k_mac_filter_he_cap_mesh(he_cap_elem);
+ break;
+ }
+
+ he_cap->he_mcs_nss_supp.rx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+ ath11k_gen_ppe_thresh(&band_cap->he_ppet,
+ he_cap->ppe_thres);
+ idx++;
+ }
+
+ return idx;
+}
+
+static void ath11k_mac_setup_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap)
+{
+ struct ieee80211_supported_band *band;
+ int count;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_2GHZ],
+ NL80211_BAND_2GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_5GHZ],
+ NL80211_BAND_5GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
+ band->n_iftype_data = count;
+ }
+}
+
+static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath11k_check_chain_mask(ar, tx_ant, true))
+ return -EINVAL;
+
+ if (ath11k_check_chain_mask(ar, rx_ant, false))
+ return -EINVAL;
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if (ar->state != ATH11K_STATE_ON &&
+ ar->state != ATH11K_STATE_RESTARTED)
+ return 0;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ tx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ar->num_tx_chains = get_num_chains(tx_ant);
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ rx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ ar->num_rx_chains = get_num_chains(rx_ant);
+
+ /* Reload HT/VHT/HE capability */
+ ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+ ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
+
+ return 0;
+}
+
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k *ar = ctx;
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *msdu = skb;
+ struct ieee80211_tx_info *info;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
+static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+ struct sk_buff *msdu = skb;
+ struct ath11k *ar = skb_cb->ar;
+ struct ath11k_base *ab = ar->ab;
+
+ if (skb_cb->vif == vif) {
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ dma_addr_t paddr;
+ int buf_id;
+ int ret;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
+ ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ if (buf_id < 0)
+ return -ENOSPC;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr)) {
+ ath11k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
+ ret = -EIO;
+ goto err_free_idr;
+ }
+
+ ATH11K_SKB_CB(skb)->paddr = paddr;
+
+ ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
+ goto err_unmap_buf;
+ }
+
+ return 0;
+
+err_unmap_buf:
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+err_free_idr:
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ return ret;
+}
+
+static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
+ ieee80211_free_txskb(ar->hw, skb);
+}
+
+static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
+ struct ieee80211_tx_info *info;
+ struct ath11k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
+ info = IEEE80211_SKB_CB(skb);
+ arvif = ath11k_vif_to_arvif(info->control.vif);
+
+ ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to transmit management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ }
+ }
+}
+
+static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ /* Drop probe response packets when the pending management tx
+ * count has reached a certain threshold, so as to prioritize
+ * other mgmt packets like auth and assoc to be sent on time
+ * for establishing successful connections.
+ */
+ if (is_prb_rsp &&
+ atomic_read(&ar->num_pending_mgmt_tx) > ATH11K_PRB_RSP_DROP_THRESHOLD) {
+ ath11k_warn(ar->ab,
+ "dropping probe response as pending queue is almost full\n");
+ return -ENOSPC;
+ }
+
+ if (skb_queue_len(q) == ATH11K_TX_MGMT_NUM_PENDING_MAX) {
+ ath11k_warn(ar->ab, "mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool is_prb_rsp;
+ int ret;
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+ ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ return;
+ }
+
+ ret = ath11k_dp_tx(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath11k_mac_drain_tx(struct ath11k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ ath11k_mgmt_over_wmi_tx_purge(ar);
+}
+
+static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
+{
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id;
+
+ if (enable)
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+
+ return ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+}
+
+static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+
+ ath11k_mac_drain_tx(ar);
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_OFF:
+ ar->state = ATH11K_STATE_ON;
+ break;
+ case ATH11K_STATE_RESTARTING:
+ ar->state = ATH11K_STATE_RESTARTED;
+ break;
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ case ATH11K_STATE_ON:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ 0, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to set ac override for ARP: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to offload radar detection: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ab, "failed to req ppdu stats: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ goto err;
+ }
+
+ __ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /* TODO: Do we need to enable ANI? */
+
+ ath11k_reg_update_chan_list(ar);
+
+ ar->num_started_vdevs = 0;
+ ar->num_created_vdevs = 0;
+ ar->num_peers = 0;
+
+ /* Configure monitor status ring with default rx_filter to get rx status
+ * such as rssi, rx_duration.
+ */
+ ret = ath11k_mac_config_mon_status_default(ar, true);
+ if (ret) {
+ ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
+ ret);
+ goto err;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+ &ab->pdevs[ar->pdev_idx]);
+
+ return 0;
+
+err:
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ int ret;
+
+ ath11k_mac_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_mac_config_mon_status_default(ar, false);
+ if (ret)
+ ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
+ ret);
+
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ list_del(&ppdu_stats->list);
+ kfree(ppdu_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+
+ synchronize_rcu();
+
+ atomic_set(&ar->num_pending_mgmt_tx, 0);
+}
+
+static void
+ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif,
+ struct vdev_create_params *params)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_pdev *pdev = ar->pdev;
+
+ params->if_id = arvif->vdev_id;
+ params->type = arvif->vdev_type;
+ params->subtype = arvif->vdev_subtype;
+ params->pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ params->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+}
+
+static u32
+ath11k_mac_prepare_he_mode(struct ath11k_pdev *pdev, u32 viftype)
+{
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+ struct ath11k_band_cap *cap_band = NULL;
+ u32 *hecap_phy_ptr = NULL;
+ u32 hemode = 0;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ else
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+
+ hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
+
+ hemode = FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE) |
+ FIELD_PREP(HE_MODE_SU_TX_BFER, HECAP_PHY_SUBFMR_GET(hecap_phy_ptr)) |
+ FIELD_PREP(HE_MODE_UL_MUMIMO, HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr));
+
+ /* TODO WDS and other modes */
+ if (viftype == NL80211_IFTYPE_AP) {
+ hemode |= FIELD_PREP(HE_MODE_MU_TX_BFER,
+ HECAP_PHY_MUBFMR_GET(hecap_phy_ptr)) |
+ FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+ FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+ } else {
+ hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE);
+ }
+
+ return hemode;
+}
+
+static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar,
+ struct ath11k_vif *arvif)
+{
+ u32 param_id, param_value;
+ struct ath11k_base *ab = ar->ab;
+ int ret = 0;
+
+ param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ param_value = ath11k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
+ arvif->vdev_id, ret, param_value);
+ return ret;
+ }
+ param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+ param_value =
+ FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) |
+ FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE,
+ HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct vdev_create_params vdev_param = {0};
+ struct peer_create_params peer_param;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret;
+ int bit;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ ath11k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ TARGET_NUM_VDEVS);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+
+ /* Should we initialize any worker to handle connection loss indication
+ * from firmware in sta mode?
+ */
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ bit = __ffs64(ab->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+ /* fall through */
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ ab->free_vdev_map);
+
+ vif->cab_queue = arvif->vdev_id % (ATH11K_HW_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = i % (ATH11K_HW_MAX_QUEUES - 1);
+
+ ath11k_mac_setup_vdev_create_params(arvif, &vdev_param);
+
+ ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to create WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->num_created_vdevs++;
+
+ ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = vif->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_set_kickout(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ default:
+ break;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret)
+ goto err_peer_del;
+
+ param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ param_value = ar->hw->wiphy->rts_threshold;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath11k_dp_vdev_tx_attach(ar, arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_peer_del:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ar->num_peers--;
+ ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, arvif->vdev_id);
+ }
+
+err_vdev_del:
+ ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->num_created_vdevs--;
+ ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = (struct ieee80211_vif *)ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+
+ if (skb_cb->vif == vif)
+ skb_cb->vif = NULL;
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_base *ab = ar->ab;
+ int ret;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ ab->free_vdev_map |= 1LL << (arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath11k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ab, "failed to delete WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ ar->num_created_vdevs--;
+
+ ath11k_peer_cleanup(ar, arvif->vdev_id);
+
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_vif_txmgmt_idr_remove, vif);
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ idr_for_each(&ab->dp.tx_ring[i].txbuf_idr,
+ ath11k_mac_vif_unref, vif);
+ spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ }
+
+ /* Recalc txpower for remaining vdev */
+ ath11k_mac_txpower_recalc(ar);
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+
+ /* TODO: recal traffic pause state based on the available vdevs */
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* FIXME: Has to be verified. */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath11k *ar = hw->priv;
+ bool reset_flag = false;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ /* For monitor mode */
+ reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
+
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
+ if (!ret) {
+ if (!reset_flag)
+ set_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ else
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ } else {
+ ath11k_warn(ar->ab,
+ "fail to set monitor filter: %d\n", ret);
+ }
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath11k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ int ret = -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = ath11k_dp_rx_ampdu_start(ar, params);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = ath11k_dp_rx_ampdu_stop(ar, params);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
+ * Tx aggregation requests.
+ */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx add freq %hu width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of multiple channel context, populate rx_channel from
+ * Rx PPDU desc information.
+ */
+ ar->rx_channel = ctx->def.chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx remove freq %hu width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of there is one more channel context left, populate
+ * rx_channel with the channel of that remaining channel context.
+ */
+ ar->rx_channel = NULL;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH11K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static int
+ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct wmi_vdev_start_req_arg arg = {};
+ int he_support = arvif->vif->bss_conf.he_support;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+ arg.channel.mode =
+ ath11k_phymodes[chandef->chan->band][chandef->width];
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.passive = arg.channel.chan_radar;
+
+ spin_lock_bh(&ab->base_lock);
+ arg.regdomain = ar->ab->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ /* TODO: Notify if secondary 80Mhz also needs radar detection */
+ if (he_support) {
+ ret = ath11k_set_he_mu_sounding_mode(ar, arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he mode vdev %i\n",
+ arg.vdev_id);
+ return ret;
+ }
+ }
+ }
+
+ arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath11k_wmi_phymode_str(arg.channel.mode));
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, restart);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to %s WMI vdev %i\n",
+ restart ? "restart" : "start", arg.vdev_id);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
+ arg.vdev_id, restart ? "restart" : "start", ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+
+ /* Enable CAC Flag in the driver by checking the channel DFS cac time,
+ * i.e dfs_cac_ms value which will be valid only for radar channels
+ * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flags is used to drop rx packets.
+ * during CAC.
+ */
+ /* TODO Set the flag for other interface types as required */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
+ chandef->chan->dfs_cac_ms &&
+ chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+ set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "CAC Started in chan_freq %d for vdev %d\n",
+ arg.channel.freq, arg.vdev_id);
+ }
+
+ ret = ath11k_mac_set_txbf_conf(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return 0;
+}
+
+static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->vdev_stop_status.stop_in_progress = true;
+ ar->vdev_stop_status.vdev_id = arvif->vdev_id;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ spin_lock_bh(&ar->data_lock);
+ ar->vdev_stop_status.stop_in_progress = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_mac_vdev_start_restart(arvif, chandef, false);
+}
+
+static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_mac_vdev_start_restart(arvif, chandef, true);
+}
+
+struct ath11k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath11k_mac_update_vif_chan(struct ath11k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* All relevant vdevs are downed and associated channel resources
+ * should be available for the channel switch now.
+ */
+
+ /* TODO: Update ar->rx_channel */
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath11k_warn(ab, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+}
+
+static void
+ath11k_mac_update_active_vif_chan(struct ath11k *ar,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ return;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
+ if (!arg.vifs)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_fill_iter,
+ &arg);
+
+ ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+
+ kfree(arg.vifs);
+}
+
+static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+ ath11k_mac_update_active_vif_chan(ar, ctx);
+
+ /* TODO: Recalc radar detection */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx assign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (WARN_ON(arvif->is_started)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EBUSY;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, &ctx->def);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto err;
+ }
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
+ if (ret)
+ goto err;
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath11k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value)
+{
+ struct ath11k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
+ param, arvif->vdev_id, value);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
+ param, arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* mac80211 stores device specific RTS/Fragmentation threshold value,
+ * this is set interface specific to firmware from ath11k driver
+ */
+static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath11k *ar = hw->priv;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+
+ return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value);
+}
+
+static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI vdev param for fragmentation threshold no
+ * known firmware actually implements it. Moreover it is not possible to
+ * rely frame fragmentation to mac80211 because firmware clears the
+ * "more fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath11k *ar = hw->priv;
+ long time_left;
+
+ if (drop)
+ return;
+
+ time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
+ (atomic_read(&ar->dp.num_tx_pending) == 0),
+ ATH11K_FLUSH_TIMEOUT);
+ if (time_left == 0)
+ ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+}
+
+static int
+ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight16(mask->control[band].ht_mcs[i]);
+
+ return num_rates;
+}
+
+static bool
+ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+
+ num_rates = hweight32(mask->control[band].legacy);
+
+ if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
+ return false;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
+ return false;
+
+ return num_rates == 1;
+}
+
+static bool
+ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
+
+ /* No need to consider legacy here. Basic rates are always present
+ * in bitrate mask
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int
+ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u32 *rate, u8 *nss)
+{
+ int rate_idx;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (hweight32(mask->control[band].legacy) != 1)
+ return -EINVAL;
+
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (band == NL80211_BAND_5GHZ)
+ rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath11k_legacy_rates[rate_idx].hw_value;
+ bitrate = ath11k_legacy_rates[rate_idx].bitrate;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = ATH11K_HW_RATE_CODE(hw_rate, 0, preamble);
+
+ return 0;
+}
+
+static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ arvif->vdev_id, rate, nss, sgi);
+
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_LDPC;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, ldpc);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+ ldpc, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath11k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static void ath11k_mac_disable_peer_fixed_rate(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for STA %pM ret %d\n",
+ sta->addr, ret);
+}
+
+static int
+ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ struct ath11k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+ int num_rates;
+
+ if (ath11k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
+ * requires passing atleast one of used basic rates along with them.
+ * Fixed rate setting across different preambles(legacy, HT, VHT) is
+ * not supported by the FW. Hence use of FIXED_RATE vdev param is not
+ * suitable for setting single HT/VHT rates.
+ * But, there could be a single basic rate passed from userspace which
+ * can be done through the FIXED_RATE param.
+ */
+ if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) {
+ ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate,
+ &nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+ } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min_t(u32, ar->num_tx_chains,
+ max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)));
+
+ /* If multiple rates across different preambles are given
+ * we can reconfigure this info with all peers using PEER_ASSOC
+ * command with the below exception cases.
+ * - Single VHT Rate : peer_assoc command accommodates only MCS
+ * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
+ * mandates passing basic rates along with HT/VHT rates, FW
+ * doesn't allow switching from VHT to Legacy. Hence instead of
+ * setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
+ * we could set this VHT rate as peer fixed rate param, which
+ * will override FIXED rate and FW rate control algorithm.
+ * If single VHT rate is passed along with HT rates, we select
+ * the VHT rate as fixed rate for vht peers.
+ * - Multiple VHT Rates : When Multiple VHT rates are given,this
+ * can be set using RATEMASK CMD which uses FW rate-ctl alg.
+ * TODO: Setting multiple VHT MCS and replacing peer_assoc with
+ * RATEMASK_CMDID can cover all use cases of setting rates
+ * across multiple preambles and rates within same type.
+ * But requires more validation of the command at this point.
+ */
+
+ num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ /* TODO: Handle multiple VHT MCS values setting using
+ * RATEMASK CMD
+ */
+ ath11k_warn(ar->ab,
+ "Setting more than one MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH11K_STATE_RESTARTED) {
+ ath11k_warn(ar->ab, "pdev %d successfully recovered\n",
+ ar->pdev->pdev_id);
+ ar->state = ATH11K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath11k_mac_update_bss_chan_survey(struct ath11k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
+ ar->rx_channel != channel)
+ return;
+
+ if (ar->scan.state != ATH11K_SCAN_IDLE) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "ignoring bss chan info req while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (ret == 0)
+ ath11k_warn(ar->ab, "bss channel survey timed out\n");
+}
+
+static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey;
+ int ret = 0;
+
+ if (idx >= ATH11K_NUM_CHANS)
+ return -ENOENT;
+
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ sinfo->tx_duration = arsta->tx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ /* TODO: Use real NF instead of default one. */
+ sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+}
+
+static const struct ieee80211_ops ath11k_ops = {
+ .tx = ath11k_mac_op_tx,
+ .start = ath11k_mac_op_start,
+ .stop = ath11k_mac_op_stop,
+ .reconfig_complete = ath11k_mac_op_reconfig_complete,
+ .add_interface = ath11k_mac_op_add_interface,
+ .remove_interface = ath11k_mac_op_remove_interface,
+ .config = ath11k_mac_op_config,
+ .bss_info_changed = ath11k_mac_op_bss_info_changed,
+ .configure_filter = ath11k_mac_op_configure_filter,
+ .hw_scan = ath11k_mac_op_hw_scan,
+ .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
+ .set_key = ath11k_mac_op_set_key,
+ .sta_state = ath11k_mac_op_sta_state,
+ .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
+ .sta_rc_update = ath11k_mac_op_sta_rc_update,
+ .conf_tx = ath11k_mac_op_conf_tx,
+ .set_antenna = ath11k_mac_op_set_antenna,
+ .get_antenna = ath11k_mac_op_get_antenna,
+ .ampdu_action = ath11k_mac_op_ampdu_action,
+ .add_chanctx = ath11k_mac_op_add_chanctx,
+ .remove_chanctx = ath11k_mac_op_remove_chanctx,
+ .change_chanctx = ath11k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx,
+ .set_rts_threshold = ath11k_mac_op_set_rts_threshold,
+ .set_frag_threshold = ath11k_mac_op_set_frag_threshold,
+ .set_bitrate_mask = ath11k_mac_op_set_bitrate_mask,
+ .get_survey = ath11k_mac_op_get_survey,
+ .flush = ath11k_mac_op_flush,
+ .sta_statistics = ath11k_mac_op_sta_statistics,
+ CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
+#ifdef CONFIG_ATH11K_DEBUGFS
+ .sta_add_debugfs = ath11k_sta_add_debugfs,
+#endif
+};
+
+static const struct ieee80211_iface_limit ath11k_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+};
+
+static const struct ieee80211_iface_combination ath11k_if_comb[] = {
+ {
+ .limits = ath11k_if_limits,
+ .n_limits = ARRAY_SIZE(ath11k_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 100,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ },
+};
+
+static void ath11k_mac_update_ch_list(struct ath11k *ar,
+ struct ieee80211_supported_band *band,
+ u32 freq_low, u32 freq_high)
+{
+ int i;
+
+ if (!(freq_low && freq_high))
+ return;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < freq_low ||
+ band->channels[i].center_freq > freq_high)
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
+static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
+ u32 supported_bands)
+{
+ struct ieee80211_supported_band *band;
+ struct ath11k_hal_reg_capabilities_ext *reg_cap;
+ void *channels;
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
+ ARRAY_SIZE(ath11k_5ghz_channels)) !=
+ ATH11K_NUM_CHANS);
+
+ reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+
+ if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ channels = kmemdup(ath11k_2ghz_channels,
+ sizeof(ath11k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_g_rates_size;
+ band->bitrates = ath11k_g_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ ath11k_mac_update_ch_list(ar, band,
+ reg_cap->low_2ghz_chan,
+ reg_cap->high_2ghz_chan);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ channels = kmemdup(ath11k_5ghz_channels,
+ sizeof(ath11k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ return -ENOMEM;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_a_rates_size;
+ band->bitrates = ath11k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ ath11k_mac_update_ch_list(ar, band,
+ reg_cap->low_5ghz_chan,
+ reg_cap->high_5ghz_chan);
+ }
+
+ return 0;
+}
+
+static const u8 ath11k_if_types_ext_capa[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const u8 ath11k_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+static const u8 ath11k_if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
+ {
+ .extended_capabilities = ath11k_if_types_ext_capa,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa,
+ .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_sta),
+ }, {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_ap),
+ },
+};
+
+static void __ath11k_mac_unregister(struct ath11k *ar)
+{
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(ar->hw);
+
+ idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
+
+void ath11k_mac_unregister(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ __ath11k_mac_unregister(ar);
+ }
+}
+
+static int __ath11k_mac_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_cap *cap = &ar->pdev->cap;
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ int ret;
+ u32 ht_cap = 0;
+
+ ath11k_pdev_caps_update(ar);
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ab->dev);
+
+ ret = ath11k_mac_setup_channels_rates(ar,
+ cap->supported_bands);
+ if (ret)
+ goto err_free;
+
+ ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+ ath11k_mac_setup_he_cap(ar, cap);
+
+ ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
+ ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
+
+ ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+ if (ht_cap & WMI_HT_CAP_ENABLED) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ }
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ /* TODO: Check if HT capability advertised from firmware is different
+ * for each band for a dual band capable radio. It will be tricky to
+ * handle it when the ht capability different for each band.
+ */
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->max_listen_interval = ATH11K_MAX_HW_LISTEN_INTERVAL;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ar->hw->queues = ATH11K_HW_MAX_QUEUES;
+ ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+
+ ar->hw->vif_data_size = sizeof(struct ath11k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath11k_sta);
+
+ ar->hw->wiphy->iface_combinations = ath11k_if_comb;
+ ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath11k_if_comb);
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa;
+ ar->hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(ath11k_iftypes_ext_capa);
+
+ ath11k_reg_init(ar);
+
+ /* advertise HW checksum offload capabilities */
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ goto err_free;
+ }
+
+ /* Apply the regd received during initialization */
+ ret = ath11k_regd_update(ar, true);
+ if (ret) {
+ ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = ath11k_debug_register(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+int ath11k_mac_register(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ ar->mac_addr[4] += i;
+ }
+
+ ret = __ath11k_mac_register(ar);
+ if (ret)
+ goto err_cleanup;
+
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+ }
+
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ return 0;
+
+err_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ __ath11k_mac_unregister(ar);
+ }
+
+ return ret;
+}
+
+int ath11k_mac_allocate(struct ath11k_base *ab)
+{
+ struct ieee80211_hw *hw;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int ret;
+ int i;
+
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops);
+ if (!hw) {
+ ath11k_warn(ab, "failed to allocate mac80211 hw device\n");
+ ret = -ENOMEM;
+ goto err_free_mac;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ab = ab;
+ ar->pdev = pdev;
+ ar->pdev_idx = i;
+ ar->lmac_id = ath11k_core_get_hw_mac_id(ab, i);
+
+ ar->wmi = &ab->wmi_ab.wmi[i];
+ /* FIXME wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath11k_wmi_pdev_attach(ab, i);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask);
+
+ pdev->ar = ar;
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ }
+
+ return 0;
+
+err_free_mac:
+ ath11k_mac_destroy(ab);
+
+ return ret;
+}
+
+void ath11k_mac_destroy(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ ieee80211_free_hw(ar->hw);
+ pdev->ar = NULL;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
new file mode 100644
index 000000000000..f286531cdd78
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_MAC_H
+#define ATH11K_MAC_H
+
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+
+struct ath11k;
+struct ath11k_base;
+
+struct ath11k_generic_iter {
+ struct ath11k *ar;
+ int ret;
+};
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH11K_KICKOUT_THRESHOLD (20 * 16)
+
+/* Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH11K_KEEPALIVE_MIN_IDLE 3747
+#define ATH11K_KEEPALIVE_MAX_IDLE 3895
+#define ATH11K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+#define WMI_HOST_RC_DS_FLAG 0x01
+#define WMI_HOST_RC_CW40_FLAG 0x02
+#define WMI_HOST_RC_SGI_FLAG 0x04
+#define WMI_HOST_RC_HT_FLAG 0x08
+#define WMI_HOST_RC_RTSCTS_FLAG 0x10
+#define WMI_HOST_RC_TX_STBC_FLAG 0x20
+#define WMI_HOST_RC_RX_STBC_FLAG 0xC0
+#define WMI_HOST_RC_RX_STBC_FLAG_S 6
+#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100
+#define WMI_HOST_RC_TS_FLAG 0x200
+#define WMI_HOST_RC_UAPSD_FLAG 0x400
+
+#define WMI_HT_CAP_ENABLED 0x0001
+#define WMI_HT_CAP_HT20_SGI 0x0002
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004
+#define WMI_HT_CAP_TX_STBC 0x0008
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000
+#define WMI_HT_CAP_TX_LDPC 0x2000
+#define WMI_HT_CAP_IBF_BFER 0x4000
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_HT_CAP_RX_STBC_1SS 0x0010
+#define WMI_HT_CAP_RX_STBC_2SS 0x0020
+#define WMI_HT_CAP_RX_STBC_3SS 0x0030
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100
+#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200
+#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/* FIXME: should these be in ieee80211.h? */
+#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
+#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
+
+#define WMI_MAX_SPATIAL_STREAM 3
+
+#define ATH11K_CHAN_WIDTH_NUM 8
+
+extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
+
+void ath11k_mac_destroy(struct ath11k_base *ab);
+void ath11k_mac_unregister(struct ath11k_base *ab);
+int ath11k_mac_register(struct ath11k_base *ab);
+int ath11k_mac_allocate(struct ath11k_base *ab);
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate);
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+
+void __ath11k_mac_scan_finish(struct ath11k *ar);
+void ath11k_mac_scan_finish(struct ath11k *ar);
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id);
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);
+struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
+ u32 vdev_id);
+
+void ath11k_mac_drain_tx(struct ath11k *ar);
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
new file mode 100644
index 000000000000..4bf1dfa498b6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (memcmp(peer->addr, addr, ETH_ALEN))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (memcmp(peer->addr, addr, ETH_ALEN))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
+ int peer_id)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (peer_id == peer->peer_id)
+ return peer;
+
+ return NULL;
+}
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
+ peer_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, peer_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ab->peer_mapping_wq);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, mac_addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ ether_addr_copy(peer->addr, mac_addr);
+ list_add(&peer->list, &ab->peers);
+ wake_up(&ab->peer_mapping_wq);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ vdev_id, mac_addr, peer_id);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ab->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ab->base_lock);
+ mapped = !!ath11k_peer_find(ab, vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
+ }), 3 * HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
+}
+
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ vdev_id, addr, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
+}
+
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param)
+{
+ struct ath11k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ar->ab,
+ "failed to create peer due to insufficient peer entry resource in firmware\n");
+ return -ENOBUFS;
+ }
+
+ ret = ath11k_wmi_send_peer_create_cmd(ar, param);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send peer create vdev_id %d ret %d\n",
+ param->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
+ param->peer_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+ param->peer_addr, param->vdev_id);
+ ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
+ param->vdev_id);
+ return -ENOENT;
+ }
+
+ peer->sta = sta;
+ arvif->ast_hash = peer->ast_hash;
+
+ ar->num_peers++;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
new file mode 100644
index 000000000000..9a40d1f6ccd9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_PEER_H
+#define ATH11K_PEER_H
+
+struct ath11k_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ int peer_id;
+ u16 ast_hash;
+
+ /* protected by ab->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+};
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash);
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id);
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr);
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param);
+
+#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
new file mode 100644
index 000000000000..2377895a58ec
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -0,0 +1,2433 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "qmi.h"
+#include "core.h"
+#include "debug.h"
+#include <linux/of.h>
+#include <linux/firmware.h>
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
+ .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = qmi_wlanfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_host_cap_req_msg_v01 req;
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.num_clients_valid = 1;
+ req.num_clients = 1;
+ req.mem_cfg_mode = ab->qmi.target_mem_mode;
+ req.mem_cfg_mode_valid = 1;
+ req.bdf_support_valid = 1;
+ req.bdf_support = 1;
+
+ req.m3_support_valid = 0;
+ req.m3_support = 0;
+
+ req.m3_cache_support_valid = 0;
+ req.m3_cache_support = 0;
+
+ req.cal_done_valid = 1;
+ req.cal_done = ab->qmi.cal_done;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_HOST_CAP_REQ_V01,
+ QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Host capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_ind_register_req_msg_v01 *req;
+ struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
+ struct qmi_handle *handle = &ab->qmi.handle;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ goto resp_out;
+
+ req->client_id_valid = 1;
+ req->client_id = QMI_WLANFW_CLIENT_ID;
+ req->fw_ready_enable_valid = 1;
+ req->fw_ready_enable = 1;
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ req->cal_done_enable_valid = 1;
+ req->cal_done_enable = 1;
+ req->fw_init_done_enable_valid = 1;
+ req->fw_init_done_enable = 1;
+
+ req->pin_connect_result_enable_valid = 0;
+ req->pin_connect_result_enable = 0;
+
+ ret = qmi_txn_init(handle, &txn,
+ qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_IND_REGISTER_REQ_V01,
+ QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "Failed to send indication register request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to register fw indication %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(resp);
+resp_out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0, i;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ }
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_RESPOND_MEM_REQ_V01,
+ QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to respond memory request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed memory request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i, idx;
+
+ for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
+ switch (ab->qmi.target_mem[i].type) {
+ case BDF_MEM_REGION_TYPE:
+ ab->qmi.target_mem[idx].paddr = ATH11K_QMI_BDF_ADDRESS;
+ ab->qmi.target_mem[idx].vaddr = ATH11K_QMI_BDF_ADDRESS;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case CALDB_MEM_REGION_TYPE:
+ if (ab->qmi.target_mem[i].size > ATH11K_QMI_CALDB_SIZE) {
+ ath11k_warn(ab, "qmi mem size is low to load caldata\n");
+ return -EINVAL;
+ }
+ /* TODO ath11k does not support cold boot calibration */
+ ab->qmi.target_mem[idx].paddr = 0;
+ ab->qmi.target_mem[idx].vaddr = 0;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ default:
+ ath11k_warn(ab, "qmi ignore invalid mem req type %d\n",
+ ab->qmi.target_mem[i].type);
+ break;
+ }
+ }
+ ab->qmi.mem_seg_count = idx;
+
+ return 0;
+}
+
+static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_cap_req_msg_v01 req;
+ struct qmi_wlanfw_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_CAP_REQ_V01,
+ QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send target cap request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed target cap request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.chip_info_valid) {
+ ab->qmi.target.chip_id = resp.chip_info.chip_id;
+ ab->qmi.target.chip_family = resp.chip_info.chip_family;
+ }
+
+ if (resp.board_info_valid)
+ ab->qmi.target.board_id = resp.board_info.board_id;
+ else
+ ab->qmi.target.board_id = 0xFF;
+
+ if (resp.soc_info_valid)
+ ab->qmi.target.soc_id = resp.soc_info.soc_id;
+
+ if (resp.fw_version_info_valid) {
+ ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
+ strlcpy(ab->qmi.target.fw_build_timestamp,
+ resp.fw_version_info.fw_build_timestamp,
+ sizeof(ab->qmi.target.fw_build_timestamp));
+ }
+
+ if (resp.fw_build_id_valid)
+ strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ sizeof(ab->qmi.target.fw_build_id));
+
+ ath11k_info(ab, "qmi target: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x\n",
+ ab->qmi.target.chip_id, ab->qmi.target.chip_family,
+ ab->qmi.target.board_id, ab->qmi.target.soc_id);
+
+ ath11k_info(ab, "qmi fw_version: 0x%x fw_build_timestamp: %s fw_build_id: %s",
+ ab->qmi.target.fw_version,
+ ab->qmi.target.fw_build_timestamp,
+ ab->qmi.target.fw_build_id);
+
+out:
+ return ret;
+}
+
+static int
+ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req,
+ void __iomem *bdf_addr)
+{
+ struct device *dev = ab->dev;
+ char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
+ struct ath11k_board_data bd;
+ u32 fw_size;
+ int ret = 0;
+
+ memset(&bd, 0, sizeof(bd));
+
+ switch (type) {
+ case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN:
+ ret = ath11k_core_fetch_bdf(ab, &bd);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to load BDF\n");
+ goto out;
+ }
+
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
+ memcpy_toio(bdf_addr, bd.data, fw_size);
+ ath11k_core_free_bdf(ab, &bd);
+ break;
+ case ATH11K_QMI_FILE_TYPE_CALDATA:
+ snprintf(filename, sizeof(filename),
+ "%s/%s", ab->hw_params.fw.dir, ATH11K_QMI_DEFAULT_CAL_FILE_NAME);
+ ret = request_firmware(&fw_entry, filename, dev);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to load CAL: %s\n", filename);
+ goto out;
+ }
+
+ fw_size = min_t(u32, ab->hw_params.fw.board_size,
+ fw_entry->size);
+
+ memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET,
+ fw_entry->data, fw_size);
+ ath11k_info(ab, "qmi downloading BDF: %s, size: %zu\n",
+ filename, fw_entry->size);
+
+ release_firmware(fw_entry);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ req->total_size = fw_size;
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ void __iomem *bdf_addr = NULL;
+ int type, ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memset(&resp, 0, sizeof(resp));
+
+ bdf_addr = ioremap(ATH11K_QMI_BDF_ADDRESS, ATH11K_QMI_BDF_MAX_SIZE);
+ if (!bdf_addr) {
+ ath11k_warn(ab, "qmi ioremap error for BDF\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ for (type = 0; type < ATH11K_QMI_MAX_FILE_TYPE; type++) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->seg_id_valid = 1;
+ req->seg_id = type;
+ req->data_valid = 0;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ req->bdf_type = 0;
+ req->bdf_type_valid = 0;
+ req->end_valid = 1;
+ req->end = 1;
+
+ ret = ath11k_qmi_prepare_bdf_download(ab, type, req, bdf_addr);
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out_qmi_bdf;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out_qmi_bdf;
+ }
+ }
+ ath11k_info(ab, "qmi BDF downloaded\n");
+
+out_qmi_bdf:
+ iounmap(bdf_addr);
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_m3_info_req_msg_v01 req;
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.addr = 0;
+ req.size = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_M3_INFO_REQ_V01,
+ QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed M3 information request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
+ u32 mode)
+{
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_MODE_REQ_V01,
+ QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ if (mode == ATH11K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
+ ath11k_warn(ab, "WLFW service is dis-connected\n");
+ return 0;
+ }
+ ath11k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct ce_pipe_config *ce_cfg;
+ struct service_to_pipe *svc_cfg;
+ struct qmi_txn txn = {};
+ int ret = 0, pipe_num;
+
+ ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
+ svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->host_version_valid = 1;
+ strlcpy(req->host_version, ATH11K_HOST_VERSION_STRING,
+ sizeof(req->host_version));
+
+ req->tgt_cfg_valid = 1;
+ /* This is number of CE configs */
+ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
+ for (pipe_num = 0; pipe_num <= req->tgt_cfg_len ; pipe_num++) {
+ req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
+ req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
+ req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
+ req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
+ req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ /* This is number of Service/CE configs */
+ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
+ for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
+ req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
+ req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
+ req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ }
+ req->shadow_reg_valid = 0;
+ req->shadow_reg_v2_valid = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_CFG_REQ_V01,
+ QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed wlan config request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan mode off\n");
+ return;
+ }
+}
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode)
+{
+ int ret;
+
+ ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, mode);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
+ enum ath11k_qmi_event_type type,
+ void *data)
+{
+ struct ath11k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static void ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_respond_fw_mem_request(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_request_target_cap(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to req target capabilities:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_load_bdf(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send m3 info req:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
+ int i, ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware request memory request\n");
+
+ if (msg->mem_seg_len == 0 ||
+ msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
+ ath11k_warn(ab, "Invalid memory segment length: %u\n",
+ msg->mem_seg_len);
+
+ ab->qmi.mem_seg_count = msg->mem_seg_len;
+
+ for (i = 0; i < qmi->mem_seg_count ; i++) {
+ ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
+ ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi mem seg type %d size %d\n",
+ msg->mem_seg[i].type, msg->mem_seg[i].size);
+ }
+
+ ret = ath11k_qmi_alloc_target_mem_chunk(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to alloc target memory:%d\n", ret);
+ return;
+ }
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL);
+}
+
+static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware memory ready indication\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_MEM_READY, NULL);
+}
+
+static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware ready\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
+}
+
+static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+}
+
+static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
+ .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_request_mem_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_mem_request_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
+ .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_mem_ready_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_mem_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_fw_ready_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_fw_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
+ .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
+ },
+};
+
+static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
+ sizeof(*sq), 0);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to connect to remote service %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wifi fw qmi service connected\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return 0;
+}
+
+static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wifi fw del server\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static const struct qmi_ops ath11k_qmi_ops = {
+ .new_server = ath11k_qmi_ops_new_server,
+ .del_server = ath11k_qmi_ops_del_server,
+};
+
+static void ath11k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi,
+ event_work);
+ struct ath11k_qmi_driver_event *event;
+ struct ath11k_base *ab = qmi->ab;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath11k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))
+ return;
+
+ switch (event->type) {
+ case ATH11K_QMI_EVENT_SERVER_ARRIVE:
+ ath11k_qmi_event_server_arrive(qmi);
+ break;
+ case ATH11K_QMI_EVENT_SERVER_EXIT:
+ set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ break;
+ case ATH11K_QMI_EVENT_REQUEST_MEM:
+ ath11k_qmi_event_mem_request(qmi);
+ break;
+ case ATH11K_QMI_EVENT_FW_MEM_READY:
+ ath11k_qmi_event_load_bdf(qmi);
+ break;
+ case ATH11K_QMI_EVENT_FW_READY:
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ queue_work(ab->workqueue, &ab->restart_work);
+ break;
+ }
+
+ ath11k_core_qmi_firmware_ready(ab);
+ ab->qmi.cal_done = 1;
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
+ case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
+ break;
+ default:
+ ath11k_warn(ab, "invalid event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+int ath11k_qmi_init_service(struct ath11k_base *ab)
+{
+ int ret;
+
+ memset(&ab->qmi.target, 0, sizeof(struct target_info));
+ memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
+ ab->qmi.ab = ab;
+
+ ab->qmi.target_mem_mode = ATH11K_QMI_TARGET_MEM_MODE_DEFAULT;
+ ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
+ &ath11k_qmi_ops, ath11k_qmi_msg_handlers);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to initialize qmi handle\n");
+ return ret;
+ }
+
+ ab->qmi.event_wq = alloc_workqueue("ath11k_qmi_driver_event",
+ WQ_UNBOUND, 1);
+ if (!ab->qmi.event_wq) {
+ ath11k_err(ab, "failed to allocate workqueue\n");
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&ab->qmi.event_list);
+ spin_lock_init(&ab->qmi.event_lock);
+ INIT_WORK(&ab->qmi.event_work, ath11k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01,
+ ATH11K_QMI_WLFW_SERVICE_VERS_V01,
+ ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to add qmi lookup\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+void ath11k_qmi_deinit_service(struct ath11k_base *ab)
+{
+ qmi_handle_release(&ab->qmi.handle);
+ cancel_work_sync(&ab->qmi.event_work);
+ destroy_workqueue(ab->qmi.event_wq);
+}
+
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
new file mode 100644
index 000000000000..3f7db642d869
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_QMI_H
+#define ATH11K_QMI_H
+
+#include <linux/mutex.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define ATH11K_HOST_VERSION_STRING "WIN"
+#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
+#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
+#define ATH11K_QMI_BDF_ADDRESS 0x4B0C0000
+#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
+#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
+#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
+#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
+#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
+#define ATH11K_QMI_RESP_LEN_MAX 8192
+#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32
+#define ATH11K_QMI_CALDB_SIZE 0x480000
+#define ATH11K_QMI_DEFAULT_CAL_FILE_NAME "caldata.bin"
+
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0021
+#define QMI_WLFW_FW_READY_IND_V01 0x0038
+
+#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
+#define ATH11K_FIRMWARE_MODE_OFF 4
+#define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0
+
+struct ath11k_base;
+
+enum ath11k_qmi_file_type {
+ ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
+ ATH11K_QMI_FILE_TYPE_CALDATA,
+ ATH11K_QMI_MAX_FILE_TYPE,
+};
+
+enum ath11k_qmi_event_type {
+ ATH11K_QMI_EVENT_SERVER_ARRIVE,
+ ATH11K_QMI_EVENT_SERVER_EXIT,
+ ATH11K_QMI_EVENT_REQUEST_MEM,
+ ATH11K_QMI_EVENT_FW_MEM_READY,
+ ATH11K_QMI_EVENT_FW_READY,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_START,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE,
+ ATH11K_QMI_EVENT_REGISTER_DRIVER,
+ ATH11K_QMI_EVENT_UNREGISTER_DRIVER,
+ ATH11K_QMI_EVENT_RECOVERY,
+ ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
+ ATH11K_QMI_EVENT_POWER_UP,
+ ATH11K_QMI_EVENT_POWER_DOWN,
+ ATH11K_QMI_EVENT_MAX,
+};
+
+struct ath11k_qmi_driver_event {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+ void *data;
+};
+
+struct ath11k_qmi_ce_cfg {
+ const struct ce_pipe_config *tgt_ce;
+ int tgt_ce_len;
+ const struct service_to_pipe *svc_to_ce_map;
+ int svc_to_ce_map_len;
+ const u8 *shadow_reg;
+ int shadow_reg_len;
+ u8 *shadow_reg_v2;
+ int shadow_reg_v2_len;
+};
+
+struct ath11k_qmi_event_msg {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+};
+
+struct target_mem_chunk {
+ u32 size;
+ u32 type;
+ dma_addr_t paddr;
+ u32 vaddr;
+};
+
+struct target_info {
+ u32 chip_id;
+ u32 chip_family;
+ u32 board_id;
+ u32 soc_id;
+ u32 fw_version;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+};
+
+struct ath11k_qmi {
+ struct ath11k_base *ab;
+ struct qmi_handle handle;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ struct ath11k_qmi_ce_cfg ce_cfg;
+ struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ u32 mem_seg_count;
+ u32 target_mem_mode;
+ u8 cal_done;
+ struct target_info target;
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
+#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_IPQ8074_FW_MEM_MODE 0xFF
+#define HOST_DDR_REGION_TYPE 0x1
+#define BDF_MEM_REGION_TYPE 0x2
+#define CALDB_MEM_REGION_TYPE 0x4
+
+struct qmi_wlanfw_host_cap_req_msg_v01 {
+ u8 num_clients_valid;
+ u32 num_clients;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+};
+
+struct qmi_wlanfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
+#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020
+#define QMI_WLANFW_CLIENT_ID 0x4b4e454c
+
+struct qmi_wlanfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
+};
+
+struct qmi_wlanfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1124
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 548
+#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2
+
+struct qmi_wlanfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+enum qmi_wlanfw_mem_type_enum_v01 {
+ WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLANFW_MEM_BDF_V01 = 2,
+ QMI_WLANFW_MEM_M3_V01 = 3,
+ QMI_WLANFW_MEM_CAL_V01 = 4,
+ QMI_WLANFW_MEM_DPD_V01 = 5,
+ WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_mem_seg_s_v01 {
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct qmi_wlanfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
+struct qmi_wlanfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+
+enum qmi_wlanfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct qmi_wlanfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct qmi_wlanfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct qmi_wlanfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct qmi_wlanfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct qmi_wlanfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+enum qmi_wlanfw_cal_temp_id_enum_v01 {
+ QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
+ QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
+};
+
+struct qmi_wlanfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct qmi_wlanfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct qmi_wlanfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+};
+
+struct qmi_wlanfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025
+/* TODO: Need to check with MCL and FW team that data can be pointer and
+ * can be last element in structure
+ */
+struct qmi_wlanfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+
+};
+
+struct qmi_wlanfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C
+
+struct qmi_wlanfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+struct qmi_wlanfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
+#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
+#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+#define QMI_WLANFW_MAX_NUM_CE_V01 12
+#define QMI_WLANFW_MAX_NUM_SVC_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01 36
+
+struct qmi_wlanfw_wlan_mode_req_msg_v01 {
+ u32 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
+ tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
+ svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct qmi_wlanfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01
+ shadow_reg_v2[QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode);
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
+void ath11k_qmi_event_work(struct work_struct *work);
+void ath11k_qmi_msg_recv_work(struct work_struct *work);
+void ath11k_qmi_deinit_service(struct ath11k_base *ab);
+int ath11k_qmi_init_service(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
new file mode 100644
index 000000000000..453aa9c06969
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include "core.h"
+#include "debug.h"
+
+/* World regdom to be used in case default regd from fw is unavailable */
+#define ATH11K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
+#define ATH11K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH11K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+
+#define ETSI_WEATHER_RADAR_BAND_LOW 5590
+#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
+#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
+
+static const struct ieee80211_regdomain ath11k_world_regd = {
+ .n_reg_rules = 3,
+ .alpha2 = "00",
+ .reg_rules = {
+ ATH11K_2GHZ_CH01_11,
+ ATH11K_5GHZ_5150_5350,
+ ATH11K_5GHZ_5725_5850,
+ }
+};
+
+static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2)
+{
+ const struct ieee80211_regdomain *regd;
+
+ regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+ /* This can happen during wiphy registration where the previous
+ * user request is received before we update the regd received
+ * from firmware.
+ */
+ if (!regd)
+ return true;
+
+ return memcmp(regd->alpha2, alpha2, 2) != 0;
+}
+
+static void
+ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct wmi_init_country_params init_country_param;
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+
+ /* Currently supporting only General User Hints. Cell base user
+ * hints to be handled later.
+ * Hints from other sources like Core, Beacons are not expected for
+ * self managed wiphy's
+ */
+ if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
+ ath11k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
+ return;
+ }
+
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Country Setting is not allowed\n");
+ return;
+ }
+
+ if (!ath11k_regdom_changes(ar, request->alpha2)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Country is already set\n");
+ return;
+ }
+
+ /* Set the country code to the firmware and wait for
+ * the WMI_REG_CHAN_LIST_CC EVENT for updating the
+ * reg info
+ */
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+}
+
+int ath11k_reg_update_chan_list(struct ath11k *ar)
+{
+ struct ieee80211_supported_band **bands;
+ struct scan_chan_list_params *params;
+ struct ieee80211_channel *channel;
+ struct ieee80211_hw *hw = ar->hw;
+ struct channel_param *ch;
+ enum nl80211_band band;
+ int num_channels = 0;
+ int params_len;
+ int i, ret;
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ num_channels++;
+ }
+ }
+
+ if (WARN_ON(!num_channels))
+ return -EINVAL;
+
+ params_len = sizeof(struct scan_chan_list_params) +
+ num_channels * sizeof(struct channel_param);
+ params = kzalloc(params_len, GFP_KERNEL);
+
+ if (!params)
+ return -ENOMEM;
+
+ params->pdev_id = ar->pdev->pdev_id;
+ params->nallchans = num_channels;
+
+ ch = params->ch_param;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ /* TODO: Set to true/false based on some condition? */
+ ch->allow_ht = true;
+ ch->allow_vht = true;
+ ch->allow_he = true;
+
+ ch->dfs_set =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+ ch->is_chan_passive = !!(channel->flags &
+ IEEE80211_CHAN_NO_IR);
+ ch->is_chan_passive |= ch->dfs_set;
+ ch->mhz = channel->center_freq;
+ ch->cfreq1 = channel->center_freq;
+ ch->minpower = 0;
+ ch->maxpower = channel->max_power * 2;
+ ch->maxregpower = channel->max_reg_power * 2;
+ ch->antennamax = channel->max_antenna_gain * 2;
+
+ /* TODO: Use appropriate phymodes */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->phy_mode = MODE_11G;
+ else
+ ch->phy_mode = MODE_11A;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ i, params->nallchans,
+ ch->mhz, ch->maxpower, ch->maxregpower,
+ ch->antennamax, ch->phy_mode);
+
+ ch++;
+ /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
+ * set_agile, reg_class_idx
+ */
+ }
+ }
+
+ ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ kfree(params);
+
+ return ret;
+}
+
+static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
+ struct ieee80211_regdomain *regd_copy)
+{
+ u8 i;
+
+ /* The caller should have checked error conditions */
+ memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
+
+ for (i = 0; i < regd_orig->n_reg_rules; i++)
+ memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+}
+
+int ath11k_regd_update(struct ath11k *ar, bool init)
+{
+ struct ieee80211_regdomain *regd, *regd_copy = NULL;
+ int ret, regd_len, pdev_id;
+ struct ath11k_base *ab;
+
+ ab = ar->ab;
+ pdev_id = ar->pdev_idx;
+
+ spin_lock(&ab->base_lock);
+
+ if (init) {
+ /* Apply the regd received during init through
+ * WMI_REG_CHAN_LIST_CC event. In case of failure to
+ * receive the regd, initialize with a default world
+ * regulatory.
+ */
+ if (ab->default_regd[pdev_id]) {
+ regd = ab->default_regd[pdev_id];
+ } else {
+ ath11k_warn(ab,
+ "failed to receive default regd during init\n");
+ regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
+ }
+ } else {
+ regd = ab->new_regd[pdev_id];
+ }
+
+ if (!regd) {
+ ret = -EINVAL;
+ spin_unlock(&ab->base_lock);
+ goto err;
+ }
+
+ regd_len = sizeof(*regd) + (regd->n_reg_rules *
+ sizeof(struct ieee80211_reg_rule));
+
+ regd_copy = kzalloc(regd_len, GFP_ATOMIC);
+ if (regd_copy)
+ ath11k_copy_regd(regd, regd_copy);
+
+ spin_unlock(&ab->base_lock);
+
+ if (!regd_copy) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rtnl_lock();
+ ret = regulatory_set_wiphy_regd_sync_rtnl(ar->hw->wiphy, regd_copy);
+ rtnl_unlock();
+
+ kfree(regd_copy);
+
+ if (ret)
+ goto err;
+
+ if (ar->state == ATH11K_STATE_ON) {
+ ret = ath11k_reg_update_chan_list(ar);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
+ return ret;
+}
+
+static enum nl80211_dfs_regions
+ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
+{
+ switch (dfs_region) {
+ case ATH11K_DFS_REG_FCC:
+ case ATH11K_DFS_REG_CN:
+ return NL80211_DFS_FCC;
+ case ATH11K_DFS_REG_ETSI:
+ case ATH11K_DFS_REG_KR:
+ return NL80211_DFS_ETSI;
+ case ATH11K_DFS_REG_MKK:
+ return NL80211_DFS_JP;
+ default:
+ return NL80211_DFS_UNSET;
+ }
+}
+
+static u32 ath11k_map_fw_reg_flags(u16 reg_flags)
+{
+ u32 flags = 0;
+
+ if (reg_flags & REGULATORY_CHAN_NO_IR)
+ flags = NL80211_RRF_NO_IR;
+
+ if (reg_flags & REGULATORY_CHAN_RADAR)
+ flags |= NL80211_RRF_DFS;
+
+ if (reg_flags & REGULATORY_CHAN_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (reg_flags & REGULATORY_CHAN_NO_HT40)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+static bool
+ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ if ((start_freq1 >= start_freq2 &&
+ start_freq1 < end_freq2) ||
+ (start_freq2 > start_freq1 &&
+ start_freq2 < end_freq1))
+ return true;
+
+ /* TODO: Should we restrict intersection feasibility
+ * based on min bandwidth of the intersected region also,
+ * say the intersected rule should have a min bandwidth
+ * of 20MHz?
+ */
+
+ return false;
+}
+
+static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2,
+ struct ieee80211_reg_rule *new_rule)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+ u32 freq_diff, max_bw;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
+ start_freq2);
+ new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
+
+ freq_diff = new_rule->freq_range.end_freq_khz -
+ new_rule->freq_range.start_freq_khz;
+ max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
+ rule2->freq_range.max_bandwidth_khz);
+ new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
+
+ new_rule->power_rule.max_antenna_gain =
+ min_t(u32, rule1->power_rule.max_antenna_gain,
+ rule2->power_rule.max_antenna_gain);
+
+ new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
+ rule2->power_rule.max_eirp);
+
+ /* Use the flags of both the rules */
+ new_rule->flags = rule1->flags | rule2->flags;
+
+ /* To be safe, lts use the max cac timeout of both rules */
+ new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
+ rule2->dfs_cac_ms);
+}
+
+static struct ieee80211_regdomain *
+ath11k_regd_intersect(struct ieee80211_regdomain *default_regd,
+ struct ieee80211_regdomain *curr_regd)
+{
+ u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
+ struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ u8 i, j, k;
+
+ num_old_regd_rules = default_regd->n_reg_rules;
+ num_curr_regd_rules = curr_regd->n_reg_rules;
+ num_new_regd_rules = 0;
+
+ /* Find the number of intersecting rules to allocate new regd memory */
+ for (i = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ num_new_regd_rules++;
+ }
+ }
+
+ if (!num_new_regd_rules)
+ return NULL;
+
+ new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
+ sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+
+ if (!new_regd)
+ return NULL;
+
+ /* We set the new country and dfs region directly and only trim
+ * the freq, power, antenna gain by intersecting with the
+ * default regdomain. Also MAX of the dfs cac timeout is selected.
+ */
+ new_regd->n_reg_rules = num_new_regd_rules;
+ memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
+ new_regd->dfs_region = curr_regd->dfs_region;
+ new_rule = new_regd->reg_rules;
+
+ for (i = 0, k = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ ath11k_reg_intersect_rules(old_rule, curr_rule,
+ (new_rule + k++));
+ }
+ }
+ return new_regd;
+}
+
+static const char *
+ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_FCC:
+ return "FCC";
+ case NL80211_DFS_ETSI:
+ return "ETSI";
+ case NL80211_DFS_JP:
+ return "JP";
+ default:
+ return "UNSET";
+ }
+}
+
+static u16
+ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
+{
+ u16 bw;
+
+ bw = end_freq - start_freq;
+ bw = min_t(u16, bw, max_bw);
+
+ if (bw >= 80 && bw < 160)
+ bw = 80;
+ else if (bw >= 40 && bw < 80)
+ bw = 40;
+ else if (bw < 40)
+ bw = 20;
+
+ return bw;
+}
+
+static void
+ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
+ u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
+ u32 reg_flags)
+{
+ reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
+ reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
+ reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
+ reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
+ reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->flags = reg_flags;
+}
+
+static void
+ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
+ struct ieee80211_regdomain *regd,
+ struct cur_reg_rule *reg_rule,
+ u8 *rule_idx, u32 flags, u16 max_bw)
+{
+ u32 end_freq;
+ u16 bw;
+ u8 i;
+
+ i = *rule_idx;
+
+ bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
+
+ ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
+ end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
+ else
+ end_freq = reg_rule->end_freq;
+
+ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ max_bw);
+
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (end_freq == reg_rule->end_freq) {
+ regd->n_reg_rules--;
+ *rule_idx = i;
+ return;
+ }
+
+ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, max_bw);
+
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ *rule_idx = i;
+}
+
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect)
+{
+ struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+ struct cur_reg_rule *reg_rule;
+ u8 i = 0, j = 0;
+ u8 num_rules;
+ u16 max_bw;
+ u32 flags;
+ char alpha2[3];
+
+ num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
+
+ if (!num_rules)
+ goto ret;
+
+ /* Add max additional rules to accommodate weather radar band */
+ if (reg_info->dfs_region == ATH11K_DFS_REG_ETSI)
+ num_rules += 2;
+
+ tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ (num_rules * sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+ if (!tmp_regd)
+ goto ret;
+
+ tmp_regd->n_reg_rules = num_rules;
+ memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ alpha2[2] = '\0';
+ tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
+ alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region),
+ reg_info->dfs_region, num_rules);
+ /* Update reg_rules[] below. Firmware is expected to
+ * send these rules in order(2G rules first and then 5G)
+ */
+ for (; i < tmp_regd->n_reg_rules; i++) {
+ if (reg_info->num_2g_reg_rules &&
+ (i < reg_info->num_2g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_2g_ptr + i;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_2g);
+ flags = 0;
+ } else if (reg_info->num_5g_reg_rules &&
+ (j < reg_info->num_5g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_5g_ptr + j++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_5g);
+
+ /* FW doesn't pass NL80211_RRF_AUTO_BW flag for
+ * BW Auto correction, we can enable this by default
+ * for all 5G rules here. The regulatory core performs
+ * BW correction if required and applies flags as
+ * per other BW rule flags we pass from here
+ */
+ flags = NL80211_RRF_AUTO_BW;
+ } else {
+ break;
+ }
+
+ flags |= ath11k_map_fw_reg_flags(reg_rule->flags);
+
+ ath11k_reg_update_rule(tmp_regd->reg_rules + i,
+ reg_rule->start_freq,
+ reg_rule->end_freq, max_bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ /* Update dfs cac timeout if the dfs domain is ETSI and the
+ * new rule covers weather radar band.
+ * Default value of '0' corresponds to 60s timeout, so no
+ * need to update that for other rules.
+ */
+ if (flags & NL80211_RRF_DFS &&
+ reg_info->dfs_region == ATH11K_DFS_REG_ETSI &&
+ (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
+ reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
+ ath11k_reg_update_weather_radar_band(ab, tmp_regd,
+ reg_rule, &i,
+ flags, max_bw);
+ continue;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+
+ if (intersect) {
+ default_regd = ab->default_regd[reg_info->phy_id];
+
+ /* Get a new regd by intersecting the received regd with
+ * our default regd.
+ */
+ new_regd = ath11k_regd_intersect(default_regd, tmp_regd);
+ kfree(tmp_regd);
+ if (!new_regd) {
+ ath11k_warn(ab, "Unable to create intersected regdomain\n");
+ goto ret;
+ }
+ } else {
+ new_regd = tmp_regd;
+ }
+
+ret:
+ return new_regd;
+}
+
+void ath11k_regd_update_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ regd_update_work);
+ int ret;
+
+ ret = ath11k_regd_update(ar, false);
+ if (ret) {
+ /* Firmware has already moved to the new regd. We need
+ * to maintain channel consistency across FW, Host driver
+ * and userspace. Hence as a fallback mechanism we can set
+ * the prev or default country code to the firmware.
+ */
+ /* TODO: Implement Fallback Mechanism */
+ }
+}
+
+void ath11k_reg_init(struct ath11k *ar)
+{
+ ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
+}
+
+void ath11k_reg_free(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < MAX_RADIOS; i++) {
+ kfree(ab->default_regd[i]);
+ kfree(ab->new_regd[i]);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
new file mode 100644
index 000000000000..39b7fc943541
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_REG_H
+#define ATH11K_REG_H
+
+#include <linux/kernel.h>
+#include <net/regulatory.h>
+
+struct ath11k_base;
+struct ath11k;
+
+/* DFS regdomains supported by Firmware */
+enum ath11k_dfs_region {
+ ATH11K_DFS_REG_UNSET,
+ ATH11K_DFS_REG_FCC,
+ ATH11K_DFS_REG_ETSI,
+ ATH11K_DFS_REG_MKK,
+ ATH11K_DFS_REG_CN,
+ ATH11K_DFS_REG_KR,
+ ATH11K_DFS_REG_UNDEF,
+};
+
+/* ATH11K Regulatory API's */
+void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_free(struct ath11k_base *ab);
+void ath11k_regd_update_work(struct work_struct *work);
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect);
+int ath11k_regd_update(struct ath11k *ar, bool init);
+int ath11k_reg_update_chan_list(struct ath11k *ar);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
new file mode 100644
index 000000000000..a5aff801f17f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -0,0 +1,1212 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_RX_DESC_H
+#define ATH11K_RX_DESC_H
+
+enum rx_desc_rxpcu_filter {
+ RX_DESC_RXPCU_FILTER_PASS,
+ RX_DESC_RXPCU_FILTER_MONITOR_CLIENT,
+ RX_DESC_RXPCU_FILTER_MONITOR_OTHER,
+};
+
+/* rxpcu_filter_pass
+ * This MPDU passed the normal frame filter programming of rxpcu.
+ *
+ * rxpcu_filter_monitor_client
+ * This MPDU did not pass the regular frame filter and would
+ * have been dropped, were it not for the frame fitting into the
+ * 'monitor_client' category.
+ *
+ * rxpcu_filter_monitor_other
+ * This MPDU did not pass the regular frame filter and also did
+ * not pass the rxpcu_monitor_client filter. It would have been
+ * dropped accept that it did pass the 'monitor_other' category.
+ */
+
+#define RX_DESC_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_DESC_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+enum rx_desc_sw_frame_grp_id {
+ RX_DESC_SW_FRAME_GRP_ID_NDP_FRAME,
+ RX_DESC_SW_FRAME_GRP_ID_MCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_UCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_NULL_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0111,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1111,
+ RX_DESC_SW_FRAME_GRP_ID_UNSUPPORTED,
+ RX_DESC_SW_FRAME_GRP_ID_PHY_ERR,
+};
+
+enum rx_desc_decap_type {
+ RX_DESC_DECAP_TYPE_RAW,
+ RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+ RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+ RX_DESC_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decrypt_status_code {
+ RX_DESC_DECRYPT_STATUS_CODE_OK,
+ RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
+ RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR,
+ RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_OTHER,
+};
+
+#define RX_ATTENTION_INFO1_FIRST_MPDU BIT(0)
+#define RX_ATTENTION_INFO1_RSVD_1A BIT(1)
+#define RX_ATTENTION_INFO1_MCAST_BCAST BIT(2)
+#define RX_ATTENTION_INFO1_AST_IDX_NOT_FOUND BIT(3)
+#define RX_ATTENTION_INFO1_AST_IDX_TIMEDOUT BIT(4)
+#define RX_ATTENTION_INFO1_POWER_MGMT BIT(5)
+#define RX_ATTENTION_INFO1_NON_QOS BIT(6)
+#define RX_ATTENTION_INFO1_NULL_DATA BIT(7)
+#define RX_ATTENTION_INFO1_MGMT_TYPE BIT(8)
+#define RX_ATTENTION_INFO1_CTRL_TYPE BIT(9)
+#define RX_ATTENTION_INFO1_MORE_DATA BIT(10)
+#define RX_ATTENTION_INFO1_EOSP BIT(11)
+#define RX_ATTENTION_INFO1_A_MSDU_ERROR BIT(12)
+#define RX_ATTENTION_INFO1_FRAGMENT BIT(13)
+#define RX_ATTENTION_INFO1_ORDER BIT(14)
+#define RX_ATTENTION_INFO1_CCE_MATCH BIT(15)
+#define RX_ATTENTION_INFO1_OVERFLOW_ERR BIT(16)
+#define RX_ATTENTION_INFO1_MSDU_LEN_ERR BIT(17)
+#define RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL BIT(18)
+#define RX_ATTENTION_INFO1_IP_CKSUM_FAIL BIT(19)
+#define RX_ATTENTION_INFO1_SA_IDX_INVALID BIT(20)
+#define RX_ATTENTION_INFO1_DA_IDX_INVALID BIT(21)
+#define RX_ATTENTION_INFO1_RSVD_1B BIT(22)
+#define RX_ATTENTION_INFO1_RX_IN_TX_DECRYPT_BYP BIT(23)
+#define RX_ATTENTION_INFO1_ENCRYPT_REQUIRED BIT(24)
+#define RX_ATTENTION_INFO1_DIRECTED BIT(25)
+#define RX_ATTENTION_INFO1_BUFFER_FRAGMENT BIT(26)
+#define RX_ATTENTION_INFO1_MPDU_LEN_ERR BIT(27)
+#define RX_ATTENTION_INFO1_TKIP_MIC_ERR BIT(28)
+#define RX_ATTENTION_INFO1_DECRYPT_ERR BIT(29)
+#define RX_ATTENTION_INFO1_UNDECRYPT_FRAME_ERR BIT(30)
+#define RX_ATTENTION_INFO1_FCS_ERR BIT(31)
+
+#define RX_ATTENTION_INFO2_FLOW_IDX_TIMEOUT BIT(0)
+#define RX_ATTENTION_INFO2_FLOW_IDX_INVALID BIT(1)
+#define RX_ATTENTION_INFO2_WIFI_PARSER_ERR BIT(2)
+#define RX_ATTENTION_INFO2_AMSDU_PARSER_ERR BIT(3)
+#define RX_ATTENTION_INFO2_SA_IDX_TIMEOUT BIT(4)
+#define RX_ATTENTION_INFO2_DA_IDX_TIMEOUT BIT(5)
+#define RX_ATTENTION_INFO2_MSDU_LIMIT_ERR BIT(6)
+#define RX_ATTENTION_INFO2_DA_IS_VALID BIT(7)
+#define RX_ATTENTION_INFO2_DA_IS_MCBC BIT(8)
+#define RX_ATTENTION_INFO2_SA_IS_VALID BIT(9)
+#define RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE GENMASK(12, 10)
+#define RX_ATTENTION_INFO2_RX_BITMAP_NOT_UPDED BIT(13)
+#define RX_ATTENTION_INFO2_MSDU_DONE BIT(31)
+
+struct rx_attention {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+/* rx_attention
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * a_msdu_error
+ * Set if number of MSDUs in A-MSDU is above a threshold or if the
+ * size of the MSDU is invalid. This receive buffer will contain
+ * all of the remainder of MSDUs in this MPDU w/o decapsulation.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * cce_match
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is busy
+ * with TX packet processing.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus
+ * all the rest of the MSDUs will not be scattered and will not
+ * be decasulated but will be DMA'ed in RAW format as a single
+ * MSDU buffer.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast or Broadcast address.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values are
+ * defined in enum %RX_DESC_DECRYPT_STATUS_CODE*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+#define RX_MPDU_START_INFO0_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO0_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO0_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO0_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO0_AST_LOOKUP_VALID BIT(13)
+
+#define RX_MPDU_START_INFO1_MPDU_CTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO1_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO1_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO1_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO1_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO1_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO1_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO1_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO1_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO1_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO1_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO1_TO_DS BIT(17)
+#define RX_MPDU_START_INFO1_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO1_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO2_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO2_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO2_MESH_STA BIT(8)
+#define RX_MPDU_START_INFO2_BSSID_HIT BIT(9)
+#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(13, 10)
+#define RX_MPDU_START_INFO2_TID GENMASK(17, 14)
+
+#define RX_MPDU_START_INFO3_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO3_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO3_RXDMA0_SRC_RING_SEL GENMASK(12, 11)
+#define RX_MPDU_START_INFO3_RXDMA0_DST_RING_SEL GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO4_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO4_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO4_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO4_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO5_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO5_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO5_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO5_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO5_BAR_FRAME BIT(29)
+
+#define RX_MPDU_START_INFO6_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO6_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO6_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO6_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO6_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO6_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO6_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO6_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO6_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO6_EOSP BIT(24)
+#define RX_MPDU_START_INFO6_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO6_ORDER BIT(26)
+#define RX_MPDU_START_INFO6_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO6_DIRECTED BIT(29)
+
+#define RX_MPDU_START_RAW_MPDU BIT(0)
+
+struct rx_mpdu_start {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 pn[4];
+ __le32 peer_meta_data;
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+ __le32 raw;
+} __packed;
+
+/* rx_mpdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ * Note: for ndp frame, if it was expected because the preceding
+ * NDPA was filter_pass, the setting rxpcu_filter_pass will be
+ * used. This setting will also be used for every ndp frame in
+ * case Promiscuous mode is enabled.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * ndp_frame
+ * Indicates that the received frame was an NDP frame.
+ *
+ * phy_err
+ * Indicates that PHY error was received before MAC received data.
+ *
+ * phy_err_during_mpdu_header
+ * PHY error was received before MAC received the complete MPDU
+ * header which was needed for proper decoding.
+ *
+ * protocol_version_err
+ * RXPCU detected a version error in the frame control field.
+ *
+ * ast_based_lookup_valid
+ * AST based lookup for this frame has found a valid result.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ast_index
+ * This field indicates the index of the AST entry corresponding
+ * to this MPDU. It is provided by the GSE module instantiated in
+ * RXPCU. A value of 0xFFFF indicates an invalid AST index.
+ *
+ * sw_peer_id
+ * This field indicates a unique peer identifier. It is set equal
+ * to field 'sw_peer_id' from the AST entry.
+ *
+ * mpdu_frame_control_valid, mpdu_duration_valid, mpdu_qos_control_valid,
+ * mpdu_ht_control_valid, frame_encryption_info_valid
+ * Indicates that each fields have valid entries.
+ *
+ * mac_addr_adx_valid
+ * Corresponding mac_addr_adx_{lo/hi} has valid entries.
+ *
+ * from_ds, to_ds
+ * Valid only when mpdu_frame_control_valid is set. Indicates that
+ * frame is received from DS and sent to DS.
+ *
+ * encrypted
+ * Protected bit from the frame control.
+ *
+ * mpdu_retry
+ * Retry bit from frame control. Only valid when first_msdu is set.
+ *
+ * mpdu_sequence_number
+ * The sequence number from the 802.11 header.
+ *
+ * epd_en
+ * If set, use EPD instead of LPD.
+ *
+ * all_frames_shall_be_encrypted
+ * If set, all frames (data only?) shall be encrypted. If not,
+ * RX CRYPTO shall set an error flag.
+ *
+ * encrypt_type
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * mesh_sta
+ * Indicates a Mesh (11s) STA.
+ *
+ * bssid_hit
+ * BSSID of the incoming frame matched one of the 8 BSSID
+ * register values.
+ *
+ * bssid_number
+ * This number indicates which one out of the 8 BSSID register
+ * values matched the incoming frame.
+ *
+ * tid
+ * TID field in the QoS control field
+ *
+ * pn
+ * The PN number.
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * rx_reo_queue_desc_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * rx_reo_queue_desc_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link
+ * descriptor belongs.
+ *
+ * pre_delim_err_warning
+ * Indicates that a delimiter FCS error was found in between the
+ * previous MPDU and this MPDU. Note that this is just a warning,
+ * and does not mean that this MPDU is corrupted in any way. If
+ * it is, there will be other errors indicated such as FCS or
+ * decrypt errors.
+ *
+ * first_delim_err
+ * Indicates that the first delimiter had a FCS failure.
+ *
+ * key_id
+ * The key ID octet from the IV.
+ *
+ * new_peer_entry
+ * Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY
+ * doesn't follow so RX DECRYPTION module either uses old peer
+ * entry or not decrypt.
+ *
+ * decrypt_needed
+ * When RXPCU sets bit 'ast_index_not_found or ast_index_timeout',
+ * RXPCU will also ensure that this bit is NOT set. CRYPTO for that
+ * reason only needs to evaluate this bit and non of the other ones
+ *
+ * decap_type
+ * Used by the OLE during decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * rx_insert_vlan_c_tag_padding
+ * rx_insert_vlan_s_tag_padding
+ * Insert 4 byte of all zeros as VLAN tag or double VLAN tag if
+ * the rx payload does not have VLAN.
+ *
+ * strip_vlan_c_tag_decap
+ * strip_vlan_s_tag_decap
+ * Strip VLAN or double VLAN during decapsulation.
+ *
+ * pre_delim_count
+ * The number of delimiters before this MPDU. Note that this
+ * number is cleared at PPDU start. If this MPDU is the first
+ * received MPDU in the PPDU and this MPDU gets filtered-in,
+ * this field will indicate the number of delimiters located
+ * after the last MPDU in the previous PPDU.
+ *
+ * If this MPDU is located after the first received MPDU in
+ * an PPDU, this field will indicate the number of delimiters
+ * located between the previous MPDU and this MPDU.
+ *
+ * ampdu_flag
+ * Received frame was part of an A-MPDU.
+ *
+ * bar_frame
+ * Received frame is a BAR frame
+ *
+ * mpdu_length
+ * MPDU length before decapsulation.
+ *
+ * first_mpdu..directed
+ * See definition in RX attention descriptor
+ *
+ */
+
+enum rx_msdu_start_pkt_type {
+ RX_MSDU_START_PKT_TYPE_11A,
+ RX_MSDU_START_PKT_TYPE_11B,
+ RX_MSDU_START_PKT_TYPE_11N,
+ RX_MSDU_START_PKT_TYPE_11AC,
+ RX_MSDU_START_PKT_TYPE_11AX,
+};
+
+enum rx_msdu_start_sgi {
+ RX_MSDU_START_SGI_0_8_US,
+ RX_MSDU_START_SGI_0_4_US,
+ RX_MSDU_START_SGI_1_6_US,
+ RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+ RX_MSDU_START_RECV_BW_20MHZ,
+ RX_MSDU_START_RECV_BW_40MHZ,
+ RX_MSDU_START_RECV_BW_80MHZ,
+ RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+ RX_MSDU_START_RECEPTION_TYPE_SU,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+#define RX_MSDU_START_INFO1_MSDU_LENGTH GENMASK(13, 0)
+#define RX_MSDU_START_INFO1_RSVD_1A BIT(14)
+#define RX_MSDU_START_INFO1_IPSEC_ESP BIT(15)
+#define RX_MSDU_START_INFO1_L3_OFFSET GENMASK(22, 16)
+#define RX_MSDU_START_INFO1_IPSEC_AH BIT(23)
+#define RX_MSDU_START_INFO1_L4_OFFSET GENMASK(31, 24)
+
+#define RX_MSDU_START_INFO2_MSDU_NUMBER GENMASK(7, 0)
+#define RX_MSDU_START_INFO2_DECAP_TYPE GENMASK(9, 8)
+#define RX_MSDU_START_INFO2_IPV4 BIT(10)
+#define RX_MSDU_START_INFO2_IPV6 BIT(11)
+#define RX_MSDU_START_INFO2_TCP BIT(12)
+#define RX_MSDU_START_INFO2_UDP BIT(13)
+#define RX_MSDU_START_INFO2_IP_FRAG BIT(14)
+#define RX_MSDU_START_INFO2_TCP_ONLY_ACK BIT(15)
+#define RX_MSDU_START_INFO2_DA_IS_BCAST_MCAST BIT(16)
+#define RX_MSDU_START_INFO2_SELECTED_TOEPLITZ_HASH GENMASK(18, 17)
+#define RX_MSDU_START_INFO2_IP_FIXED_HDR_VALID BIT(19)
+#define RX_MSDU_START_INFO2_IP_EXTN_HDR_VALID BIT(20)
+#define RX_MSDU_START_INFO2_IP_TCP_UDP_HDR_VALID BIT(21)
+#define RX_MSDU_START_INFO2_MESH_CTRL_PRESENT BIT(22)
+#define RX_MSDU_START_INFO2_LDPC BIT(23)
+#define RX_MSDU_START_INFO2_IP4_IP6_NXT_HDR GENMASK(31, 24)
+#define RX_MSDU_START_INFO2_DECAP_FORMAT GENMASK(9, 8)
+
+#define RX_MSDU_START_INFO3_USER_RSSI GENMASK(7, 0)
+#define RX_MSDU_START_INFO3_PKT_TYPE GENMASK(11, 8)
+#define RX_MSDU_START_INFO3_STBC BIT(12)
+#define RX_MSDU_START_INFO3_SGI GENMASK(14, 13)
+#define RX_MSDU_START_INFO3_RATE_MCS GENMASK(18, 15)
+#define RX_MSDU_START_INFO3_RECV_BW GENMASK(20, 19)
+#define RX_MSDU_START_INFO3_RECEPTION_TYPE GENMASK(23, 21)
+#define RX_MSDU_START_INFO3_MIMO_SS_BITMAP GENMASK(31, 24)
+
+struct rx_msdu_start {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+} __packed;
+
+/* rx_msdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * msdu_length
+ * MSDU length in bytes after decapsulation.
+ *
+ * ipsec_esp
+ * Set if IPv4/v6 packet is using IPsec ESP.
+ *
+ * l3_offset
+ * Depending upon mode bit, this field either indicates the
+ * L3 offset in bytes from the start of the RX_HEADER or the IP
+ * offset in bytes from the start of the packet after
+ * decapsulation. The latter is only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ipsec_ah
+ * Set if IPv4/v6 packet is using IPsec AH
+ *
+ * l4_offset
+ * Depending upon mode bit, this field either indicates the
+ * L4 offset nin bytes from the start of RX_HEADER (only valid
+ * if either ipv4_proto or ipv6_proto is set to 1) or indicates
+ * the offset in bytes to the start of TCP or UDP header from
+ * the start of the IP header after decapsulation (Only valid if
+ * tcp_proto or udp_proto is set). The value 0 indicates that
+ * the offset is longer than 127 bytes.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_type
+ * Indicates the format after decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a fragmented
+ * IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * da_is_bcast_mcast
+ * The destination address is broadcast or multicast.
+ *
+ * toeplitz_hash
+ * Actual chosen Hash.
+ * 0 - Toeplitz hash of 2-tuple (IP source address, IP
+ * destination address)
+ * 1 - Toeplitz hash of 4-tuple (IP source address,
+ * IP destination address, L4 (TCP/UDP) source port,
+ * L4 (TCP/UDP) destination port)
+ * 2 - Toeplitz of flow_id
+ * 3 - Zero is used
+ *
+ * ip_fixed_header_valid
+ * Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed
+ * fully within first 256 bytes of the packet
+ *
+ * ip_extn_header_valid
+ * IPv6/IPv6 header, including IPv4 options and
+ * recognizable extension headers parsed fully within first 256
+ * bytes of the packet
+ *
+ * tcp_udp_header_valid
+ * Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP
+ * header parsed fully within first 256 bytes of the packet
+ *
+ * mesh_control_present
+ * When set, this MSDU includes the 'Mesh Control' field
+ *
+ * ldpc
+ *
+ * ip4_protocol_ip6_next_header
+ * For IPv4, this is the 8 bit protocol field set). For IPv6 this
+ * is the 8 bit next_header field.
+ *
+ * toeplitz_hash_2_or_4
+ * Controlled by RxOLE register - If register bit set to 0,
+ * Toeplitz hash is computed over 2-tuple IPv4 or IPv6 src/dest
+ * addresses; otherwise, toeplitz hash is computed over 4-tuple
+ * IPv4 or IPv6 src/dest addresses and src/dest ports.
+ *
+ * flow_id_toeplitz
+ * Toeplitz hash of 5-tuple
+ * {IP source address, IP destination address, IP source port, IP
+ * destination port, L4 protocol} in case of non-IPSec.
+ *
+ * In case of IPSec - Toeplitz hash of 4-tuple
+ * {IP source address, IP destination address, SPI, L4 protocol}
+ *
+ * The relevant Toeplitz key registers are provided in RxOLE's
+ * instance of common parser module. These registers are separate
+ * from the Toeplitz keys used by ASE/FSE modules inside RxOLE.
+ * The actual value will be passed on from common parser module
+ * to RxOLE in one of the WHO_* TLVs.
+ *
+ * user_rssi
+ * RSSI for this user
+ *
+ * pkt_type
+ * Values are defined in enum %RX_MSDU_START_PKT_TYPE_*.
+ *
+ * stbc
+ * When set, use STBC transmission rates.
+ *
+ * sgi
+ * Field only valid when pkt type is HT, VHT or HE. Values are
+ * defined in enum %RX_MSDU_START_SGI_*.
+ *
+ * rate_mcs
+ * MCS Rate used.
+ *
+ * receive_bandwidth
+ * Full receive Bandwidth. Values are defined in enum
+ * %RX_MSDU_START_RECV_*.
+ *
+ * reception_type
+ * Indicates what type of reception this is and defined in enum
+ * %RX_MSDU_START_RECEPTION_TYPE_*.
+ *
+ * mimo_ss_bitmap
+ * Field only valid when
+ * Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or
+ * RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO.
+ *
+ * Bitmap, with each bit indicating if the related spatial
+ * stream is used for this STA
+ *
+ * LSB related to SS 0
+ *
+ * 0 - spatial stream not used for this reception
+ * 1 - spatial stream used for this reception
+ *
+ * ppdu_start_timestamp
+ * Timestamp that indicates when the PPDU that contained this MPDU
+ * started on the medium.
+ *
+ * phy_meta_data
+ * SW programmed Meta data provided by the PHY. Can be used for SW
+ * to indicate the channel the device is on.
+ */
+
+#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+#define RX_MSDU_END_INFO1_KEY_ID GENMASK(7, 0)
+#define RX_MSDU_END_INFO1_CCE_SUPER_RULE GENMASK(13, 8)
+#define RX_MSDU_END_INFO1_CCND_TRUNCATE BIT(14)
+#define RX_MSDU_END_INFO1_CCND_CCE_DIS BIT(15)
+#define RX_MSDU_END_INFO1_EXT_WAPI_PN GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO2_REPORTED_MPDU_LEN GENMASK(13, 0)
+#define RX_MSDU_END_INFO2_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO2_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO2_SA_IDX_TIMEOUT BIT(16)
+#define RX_MSDU_END_INFO2_DA_IDX_TIMEOUT BIT(17)
+#define RX_MSDU_END_INFO2_MSDU_LIMIT_ERR BIT(18)
+#define RX_MSDU_END_INFO2_FLOW_IDX_TIMEOUT BIT(19)
+#define RX_MSDU_END_INFO2_FLOW_IDX_INVALID BIT(20)
+#define RX_MSDU_END_INFO2_WIFI_PARSER_ERR BIT(21)
+#define RX_MSDU_END_INFO2_AMSDU_PARSET_ERR BIT(22)
+#define RX_MSDU_END_INFO2_SA_IS_VALID BIT(23)
+#define RX_MSDU_END_INFO2_DA_IS_VALID BIT(24)
+#define RX_MSDU_END_INFO2_DA_IS_MCBC BIT(25)
+#define RX_MSDU_END_INFO2_L3_HDR_PADDING GENMASK(27, 26)
+
+#define RX_MSDU_END_INFO3_TCP_FLAG GENMASK(8, 0)
+#define RX_MSDU_END_INFO3_LRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO4_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO4_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO4_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO4_SA_OFFSET_VALID BIT(13)
+#define RX_MSDU_END_INFO4_L3_TYPE GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO5_MSDU_DROP BIT(0)
+#define RX_MSDU_END_INFO5_REO_DEST_IND GENMASK(5, 1)
+#define RX_MSDU_END_INFO5_FLOW_IDX GENMASK(25, 6)
+
+struct rx_msdu_end {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 tcp_udp_cksum;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info2;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info4;
+ __le32 rule_indication[2];
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+} __packed;
+
+/* rx_msdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ip_hdr_cksum
+ * This can include the IP header checksum or the pseudo
+ * header checksum used by TCP/UDP checksum.
+ *
+ * tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ * key_id
+ * The key ID octet from the IV. Only valid when first_msdu is set.
+ *
+ * cce_super_rule
+ * Indicates the super filter rule.
+ *
+ * cce_classify_not_done_truncate
+ * Classification failed due to truncated frame.
+ *
+ * cce_classify_not_done_cce_dis
+ * Classification failed due to CCE global disable
+ *
+ * ext_wapi_pn*
+ * Extension PN (packet number) which is only used by WAPI.
+ *
+ * reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when first_msdu is
+ * set. This field is taken directly from the length field of the
+ * A-MPDU delimiter or the preamble length field for non-A-MPDU
+ * frames.
+ *
+ * first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated MSDU
+ * frame: normal MPDU. Interior MSDU in an A-MSDU shall have both
+ * first_mpdu and last_mpdu bits set to 0.
+ *
+ * last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is only
+ * valid when last_msdu is set.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful MAC source address search due to the
+ * expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful MAC destination address search due to
+ * the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus all the
+ * rest of the MSDUs will not be scattered and will not be
+ * decapsulated but will be DMA'ed in RAW format as a single MSDU.
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast of Broadcast address.
+ *
+ * l3_header_padding
+ * Number of bytes padded to make sure that the L3 header will
+ * always start of a Dword boundary.
+ *
+ * ipv6_options_crc
+ * 32 bit CRC computed out of IP v6 extension headers.
+ *
+ * tcp_seq_number
+ * TCP sequence number.
+ *
+ * tcp_ack_number
+ * TCP acknowledge number.
+ *
+ * tcp_flag
+ * TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}.
+ *
+ * lro_eligible
+ * Computed out of TCP and IP fields to indicate that this
+ * MSDU is eligible for LRO.
+ *
+ * window_size
+ * TCP receive window size.
+ *
+ * da_offset
+ * Offset into MSDU buffer for DA.
+ *
+ * sa_offset
+ * Offset into MSDU buffer for SA.
+ *
+ * da_offset_valid
+ * da_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when DA is compressed.
+ *
+ * sa_offset_valid
+ * sa_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when SA is compressed.
+ *
+ * l3_type
+ * The 16-bit type value indicating the type of L3 later
+ * extracted from LLC/SNAP, set to zero if SNAP is not
+ * available.
+ *
+ * rule_indication
+ * Bitmap indicating which of rules have matched.
+ *
+ * sa_idx
+ * The offset in the address table which matches MAC source address
+ *
+ * da_idx
+ * The offset in the address table which matches MAC destination
+ * address.
+ *
+ * msdu_drop
+ * REO shall drop this MSDU and not forward it to any other ring.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * flow_idx
+ * Flow table index.
+ *
+ * fse_metadata
+ * FSE related meta data.
+ *
+ * cce_metadata
+ * CCE related meta data.
+ *
+ * sa_sw_peer_id
+ * sw_peer_id from the address search entry corresponding to the
+ * source address of the MSDU.
+ */
+
+enum rx_mpdu_end_rxdma_dest_ring {
+ RX_MPDU_END_RXDMA_DEST_RING_RELEASE,
+ RX_MPDU_END_RXDMA_DEST_RING_FW,
+ RX_MPDU_END_RXDMA_DEST_RING_SW,
+ RX_MPDU_END_RXDMA_DEST_RING_REO,
+};
+
+#define RX_MPDU_END_INFO1_UNSUP_KTYPE_SHORT_FRAME BIT(11)
+#define RX_MPDU_END_INFO1_RX_IN_TX_DECRYPT_BYT BIT(12)
+#define RX_MPDU_END_INFO1_OVERFLOW_ERR BIT(13)
+#define RX_MPDU_END_INFO1_MPDU_LEN_ERR BIT(14)
+#define RX_MPDU_END_INFO1_TKIP_MIC_ERR BIT(15)
+#define RX_MPDU_END_INFO1_DECRYPT_ERR BIT(16)
+#define RX_MPDU_END_INFO1_UNENCRYPTED_FRAME_ERR BIT(17)
+#define RX_MPDU_END_INFO1_PN_FIELDS_VALID BIT(18)
+#define RX_MPDU_END_INFO1_FCS_ERR BIT(19)
+#define RX_MPDU_END_INFO1_MSDU_LEN_ERR BIT(20)
+#define RX_MPDU_END_INFO1_RXDMA0_DEST_RING GENMASK(22, 21)
+#define RX_MPDU_END_INFO1_RXDMA1_DEST_RING GENMASK(24, 23)
+#define RX_MPDU_END_INFO1_DECRYPT_STATUS_CODE GENMASK(27, 25)
+#define RX_MPDU_END_INFO1_RX_BITMAP_NOT_UPD BIT(28)
+
+struct rx_mpdu_end {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+} __packed;
+
+/* rx_mpdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * unsup_ktype_short_frame
+ * This bit will be '1' when WEP or TKIP or WAPI key type is
+ * received for 11ah short frame. Crypto will bypass the received
+ * packet without decryption to RxOLE after setting this bit.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is
+ * busy with TX packet processing.
+ *
+ * overflow_err
+ * RXPCU Receive FIFO ran out of space to receive the full MPDU.
+ * Therefore this MPDU is terminated early and is thus corrupted.
+ *
+ * This MPDU will not be ACKed.
+ *
+ * RXPCU might still be able to correctly receive the following
+ * MPDUs in the PPDU if enough fifo space became available in time.
+ *
+ * mpdu_length_err
+ * Set by RXPCU if the expected MPDU length does not correspond
+ * with the actually received number of bytes in the MPDU.
+ *
+ * tkip_mic_err
+ * Set by Rx crypto when crypto detected a TKIP MIC error for
+ * this MPDU.
+ *
+ * decrypt_err
+ * Set by RX CRYPTO when CRYPTO detected a decrypt error for this
+ * MPDU or CRYPTO received an encrypted frame, but did not get a
+ * valid corresponding key id in the peer entry.
+ *
+ * unencrypted_frame_err
+ * Set by RX CRYPTO when CRYPTO detected an unencrypted frame while
+ * in the peer entry field 'All_frames_shall_be_encrypted' is set.
+ *
+ * pn_fields_contain_valid_info
+ * Set by RX CRYPTO to indicate that there is a valid PN field
+ * present in this MPDU.
+ *
+ * fcs_err
+ * Set by RXPCU when there is an FCS error detected for this MPDU.
+ *
+ * msdu_length_err
+ * Set by RXOLE when there is an msdu length error detected
+ * in at least 1 of the MSDUs embedded within the MPDU.
+ *
+ * rxdma0_destination_ring
+ * rxdma1_destination_ring
+ * The ring to which RXDMA0/1 shall push the frame, assuming
+ * no MPDU level errors are detected. In case of MPDU level
+ * errors, RXDMA0/1 might change the RXDMA0/1 destination. Values
+ * are defined in %enum RX_MPDU_END_RXDMA_DEST_RING_*.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values
+ * are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ */
+
+/* Padding bytes to avoid TLV's spanning across 128 byte boundary */
+#define HAL_RX_DESC_PADDING0_BYTES 4
+#define HAL_RX_DESC_PADDING1_BYTES 16
+
+#define HAL_RX_DESC_HDR_STATUS_LEN 120
+
+struct hal_rx_desc {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+} __packed;
+
+#endif /* ATH11K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
new file mode 100644
index 000000000000..d2dc9db01491
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "testmode_i.h"
+
+static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
+ [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH11K_TM_DATA_MAX_LEN },
+ [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ bool consumed;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
+ cmd_id, skb, skb->len);
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ spin_lock_bh(&ar->data_lock);
+
+ consumed = true;
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + skb->len,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath11k_warn(ar->ab,
+ "failed to allocate skb for testmode wmi event\n");
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to to put testmode wmi event cmd attribute: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to to put testmode wmi even cmd_id: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to copy skb to testmode wmi event: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+
+ return consumed;
+}
+
+static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode cmd get version_major %d version_minor %d\n",
+ ATH11K_TESTMODE_VERSION_MAJOR,
+ ATH11K_TESTMODE_VERSION_MINOR);
+
+ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+ nla_total_size(sizeof(u32)));
+ if (!skb)
+ return -ENOMEM;
+
+ ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
+ ATH11K_TESTMODE_VERSION_MAJOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
+ ATH11K_TESTMODE_VERSION_MINOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct sk_buff *skb;
+ u32 cmd_id, buf_len;
+ int ret;
+ void *buf;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (!tb[ATH11K_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ cmd_id, buf, buf_len);
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ dev_kfree_skb(skb);
+ ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath11k *ar = hw->priv;
+ struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+ int ret;
+
+ ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH11K_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
+ case ATH11K_TM_CMD_GET_VERSION:
+ return ath11k_tm_cmd_get_version(ar, tb);
+ case ATH11K_TM_CMD_WMI:
+ return ath11k_tm_cmd_wmi(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/testmode.h b/drivers/net/wireless/ath/ath11k/testmode.h
new file mode 100644
index 000000000000..aaa122ed9069
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb);
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline int ath11k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/testmode_i.h b/drivers/net/wireless/ath/ath11k/testmode_i.h
new file mode 100644
index 000000000000..4bae2a9eeea4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode_i.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+/* "API" level of the ath11k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH11K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH11K_TESTMODE_VERSION_MINOR 0
+
+#define ATH11K_TM_DATA_MAX_LEN 5000
+
+enum ath11k_tm_attr {
+ __ATH11K_TM_ATTR_INVALID = 0,
+ ATH11K_TM_ATTR_CMD = 1,
+ ATH11K_TM_ATTR_DATA = 2,
+ ATH11K_TM_ATTR_WMI_CMDID = 3,
+ ATH11K_TM_ATTR_VERSION_MAJOR = 4,
+ ATH11K_TM_ATTR_VERSION_MINOR = 5,
+ ATH11K_TM_ATTR_WMI_OP_VERSION = 6,
+
+ /* keep last */
+ __ATH11K_TM_ATTR_AFTER_LAST,
+ ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath11k testmode interface commands specified in
+ * ATH11K_TM_ATTR_CMD
+ */
+enum ath11k_tm_cmd {
+ /* Returns the supported ath11k testmode interface version in
+ * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
+ * uses this to verify it's using the correct version of the
+ * testmode interface
+ */
+ ATH11K_TM_CMD_GET_VERSION = 0,
+
+ /* The command used to transmit a WMI command to the firmware and
+ * the event to receive WMI events from the firmware. Without
+ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+ * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
+ * ATH11K_TM_ATTR_DATA.
+ */
+ ATH11K_TM_CMD_WMI = 1,
+};
diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c
new file mode 100644
index 000000000000..f0cc49ba0387
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
new file mode 100644
index 000000000000..8700a622be7b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH11K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath11k
+
+TRACE_EVENT(ath11k_htt_pktlog,
+ TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, buf_len)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %hu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_htt_ppdu_stats,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, ppdu, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(ppdu), data, len);
+ ),
+
+ TP_printk(
+ "%s %s ppdu len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath11k_htt_rxdesc,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
new file mode 100644
index 000000000000..a9b301ceb24b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -0,0 +1,5810 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "peer.h"
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+struct wmi_tlv_svc_ready_parse {
+ bool wmi_svc_bitmap_done;
+};
+
+struct wmi_tlv_svc_rdy_ext_parse {
+ struct ath11k_service_ext_param param;
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ u32 n_hw_mode_caps;
+ u32 tot_phy_id;
+ struct wmi_hw_mode_capabilities pref_hw_mode_caps;
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ u32 n_mac_phy_caps;
+ struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
+ struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
+ u32 n_ext_hal_reg_caps;
+ bool hw_mode_done;
+ bool mac_phy_done;
+ bool ext_hal_reg_done;
+};
+
+struct wmi_tlv_rdy_parse {
+ u32 num_extra_mac_addr;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TAG_ARRAY_BYTE]
+ = { .min_len = 0 },
+ [WMI_TAG_ARRAY_UINT32]
+ = { .min_len = 0 },
+ [WMI_TAG_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_event) },
+ [WMI_TAG_SERVICE_READY_EXT_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_ext_event) },
+ [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
+ = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
+ [WMI_TAG_SOC_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
+ [WMI_TAG_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
+ [WMI_TAG_PEER_DELETE_RESP_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
+ [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
+ [WMI_TAG_VDEV_STOPPED_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_stopped_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EVENT]
+ = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
+ [WMI_TAG_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
+ [WMI_TAG_MGMT_TX_COMPL_EVENT]
+ = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
+ [WMI_TAG_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TAG_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TAG_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_roam_event) },
+ [WMI_TAG_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
+ [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
+ [WMI_TAG_READY_EVENT]
+ = {.min_len = sizeof(struct wmi_ready_event) },
+ [WMI_TAG_SERVICE_AVAILABLE_EVENT]
+ = {.min_len = sizeof(struct wmi_service_available_event) },
+ [WMI_TAG_PEER_ASSOC_CONF_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+ [WMI_TAG_STATS_EVENT]
+ = { .min_len = sizeof(struct wmi_stats_event) },
+ [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
+};
+
+#define PRIMAP(_hw_mode_) \
+ [_hw_mode_] = _hw_mode_##_PRI
+
+static const int ath11k_hw_mode_pri_map[] = {
+ PRIMAP(WMI_HOST_HW_MODE_SINGLE),
+ PRIMAP(WMI_HOST_HW_MODE_DBS),
+ PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
+ PRIMAP(WMI_HOST_HW_MODE_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
+ /* keep last */
+ PRIMAP(WMI_HOST_HW_MODE_MAX),
+};
+
+static int
+ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = cmd;
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ wait_event_timeout(wmi_sc->tx_credits_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+
+ if (ret == -EAGAIN)
+ ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
+
+ return ret;
+}
+
+static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ const void *ptr,
+ struct ath11k_service_ext_param *param)
+{
+ const struct wmi_service_ready_ext_event *ev = ptr;
+
+ if (!ev)
+ return -EINVAL;
+
+ /* Move this to host based bitmap */
+ param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
+ param->default_fw_config_bits = ev->default_fw_config_bits;
+ param->he_cap_info = ev->he_cap_info;
+ param->mpdu_density = ev->mpdu_density;
+ param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
+ memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
+
+ return 0;
+}
+
+static int
+ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
+ struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
+ struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
+ struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
+ u8 hw_mode_id, u8 phy_id,
+ struct ath11k_pdev *pdev)
+{
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ struct ath11k_band_cap *cap_band;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+ u32 phy_map;
+ u32 hw_idx, phy_idx = 0;
+
+ if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
+ return -EINVAL;
+
+ for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
+ if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
+ break;
+
+ phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
+ while (phy_map) {
+ phy_map >>= 1;
+ phy_idx++;
+ }
+ }
+
+ if (hw_idx == hw_caps->num_hw_modes)
+ return -EINVAL;
+
+ phy_idx += phy_id;
+ if (phy_id >= hal_reg_caps->num_phy)
+ return -EINVAL;
+
+ mac_phy_caps = wmi_mac_phy_caps + phy_idx;
+
+ pdev->pdev_id = mac_phy_caps->pdev_id;
+ pdev_cap->supported_bands = mac_phy_caps->supported_bands;
+ pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
+
+ /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
+ * band to band for a single radio, need to see how this should be
+ * handled.
+ */
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
+ } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
+ pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
+ pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+ } else {
+ return -EINVAL;
+ }
+
+ /* tx/rx chainmask reported from fw depends on the actual hw chains used,
+ * For example, for 4x4 capable macphys, first 4 chains can be used for first
+ * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
+ * will be advertised for second mac or vice-versa. Compute the shift value for
+ * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
+ * mac80211.
+ */
+ pdev_cap->tx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
+ pdev_cap->rx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
+
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
+ sizeof(struct ath11k_ppe_threshold));
+
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+
+ return 0;
+}
+
+static int
+ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_hal_reg_capabilities *reg_caps,
+ struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
+ u8 phy_idx,
+ struct ath11k_hal_reg_capabilities_ext *param)
+{
+ struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
+
+ if (!reg_caps || !wmi_ext_reg_cap)
+ return -EINVAL;
+
+ if (phy_idx >= reg_caps->num_phy)
+ return -EINVAL;
+
+ ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
+
+ param->phy_id = ext_reg_cap->phy_id;
+ param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
+ param->eeprom_reg_domain_ext =
+ ext_reg_cap->eeprom_reg_domain_ext;
+ param->regcap1 = ext_reg_cap->regcap1;
+ param->regcap2 = ext_reg_cap->regcap2;
+ /* check if param->wireless_mode is needed */
+ param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
+ param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
+ param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
+ param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
+
+ return 0;
+}
+
+static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
+ const void *evt_buf,
+ struct ath11k_targ_cap *cap)
+{
+ const struct wmi_service_ready_event *ev = evt_buf;
+
+ if (!ev) {
+ ath11k_err(ab, "%s: failed by NULL param\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cap->phy_capability = ev->phy_capability;
+ cap->max_frag_entry = ev->max_frag_entry;
+ cap->num_rf_chains = ev->num_rf_chains;
+ cap->ht_cap_info = ev->ht_cap_info;
+ cap->vht_cap_info = ev->vht_cap_info;
+ cap->vht_supp_mcs = ev->vht_supp_mcs;
+ cap->hw_min_tx_power = ev->hw_min_tx_power;
+ cap->hw_max_tx_power = ev->hw_max_tx_power;
+ cap->sys_cap_info = ev->sys_cap_info;
+ cap->min_pkt_size_enable = ev->min_pkt_size_enable;
+ cap->max_bcn_ie_size = ev->max_bcn_ie_size;
+ cap->max_num_scan_channels = ev->max_num_scan_channels;
+ cap->max_supported_macs = ev->max_supported_macs;
+ cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
+ cap->txrx_chainmask = ev->txrx_chainmask;
+ cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
+ cap->num_msdu_desc = ev->num_msdu_desc;
+
+ return 0;
+}
+
+/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
+ * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
+ * 4-byte word.
+ */
+static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi,
+ const u32 *wmi_svc_bm)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
+ do {
+ if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, wmi->wmi_ab->svc_map);
+ } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
+ }
+}
+
+static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_ready_parse *svc_ready = data;
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ u16 expect_len;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EVENT:
+ if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
+ return -EINVAL;
+ break;
+
+ case WMI_TAG_ARRAY_UINT32:
+ if (!svc_ready->wmi_svc_bitmap_done) {
+ expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
+ if (len < expect_len) {
+ ath11k_warn(ab, "invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ath11k_wmi_service_bitmap_copy(wmi_handle, ptr);
+
+ svc_ready->wmi_svc_bitmap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_ready_parse svc_ready = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_parse,
+ &svc_ready);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
+{
+ struct sk_buff *skb;
+ struct ath11k_base *ab = wmi_sc->ab;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "unaligned WMI skb data\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_mgmt_send_cmd *cmd;
+ struct wmi_tlv *frame_tlv;
+ struct sk_buff *skb;
+ u32 buf_len;
+ int ret, len;
+
+ buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ?
+ frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
+
+ len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->desc_id = buf_id;
+ cmd->chanfreq = 0;
+ cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->frame_len = frame->len;
+ cmd->buf_len = buf_len;
+ cmd->tx_params_valid = 0;
+
+ frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, buf_len);
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
+ ath11k_ce_byte_swap(frame_tlv->value, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_vdev_txrx_streams *txrx_streams;
+ struct wmi_tlv *tlv;
+ int ret, len;
+ void *ptr;
+
+ /* It can be optimized my sending tx/rx chain configuration
+ * only for supported bands instead of always sending it for
+ * both the bands.
+ */
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->if_id;
+ cmd->vdev_type = param->type;
+ cmd->vdev_subtype = param->subtype;
+ cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
+ cmd->pdev_id = param->pdev_id;
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ptr = skb->data + sizeof(*cmd);
+ len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ txrx_streams = ptr;
+ len = sizeof(*txrx_streams);
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_2GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_2GHZ].rx;
+
+ txrx_streams++;
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_5GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_5GHZ].rx;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_VDEV_CREATE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
+ param->if_id, param->type, param->subtype,
+ macaddr, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
+ struct wmi_vdev_start_req_arg *arg)
+{
+ memset(chan, 0, sizeof(*chan));
+
+ chan->mhz = arg->channel.freq;
+ chan->band_center_freq1 = arg->channel.band_center_freq1;
+ if (arg->channel.mode == MODE_11AC_VHT80_80)
+ chan->band_center_freq2 = arg->channel.band_center_freq2;
+ else
+ chan->band_center_freq2 = 0;
+
+ chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
+ if (arg->channel.passive)
+ chan->info |= WMI_CHAN_INFO_PASSIVE;
+ if (arg->channel.allow_ibss)
+ chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
+ if (arg->channel.allow_ht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (arg->channel.allow_vht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ if (arg->channel.allow_he)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HE;
+ if (arg->channel.ht40plus)
+ chan->info |= WMI_CHAN_INFO_HT40_PLUS;
+ if (arg->channel.chan_radar)
+ chan->info |= WMI_CHAN_INFO_DFS;
+ if (arg->channel.freq2_radar)
+ chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
+
+ chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ arg->channel.max_power) |
+ FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ arg->channel.max_reg_power);
+
+ chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ arg->channel.max_antenna_gain) |
+ FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ arg->channel.max_power);
+}
+
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return -EINVAL;
+
+ len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_START_REQUEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->beacon_interval = arg->bcn_intval;
+ cmd->bcn_tx_rate = arg->bcn_tx_rate;
+ cmd->dtim_period = arg->dtim_period;
+ cmd->num_noa_descriptors = arg->num_noa_descriptors;
+ cmd->preferred_rx_streams = arg->pref_rx_streams;
+ cmd->preferred_tx_streams = arg->pref_tx_streams;
+ cmd->cac_duration_ms = arg->cac_duration_ms;
+ cmd->regdomain = arg->regdomain;
+ cmd->he_ops = arg->he_ops;
+
+ if (!restart) {
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = arg->ssid_len;
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+ if (arg->hidden_ssid)
+ cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
+ }
+
+ cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+
+ ptr = skb->data + sizeof(*cmd);
+ chan = ptr;
+
+ ath11k_wmi_put_wmi_channel(chan, arg);
+
+ chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*chan) - TLV_HDR_SIZE);
+ ptr += sizeof(*chan);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+
+ if (restart)
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_RESTART_REQUEST_CMDID);
+ else
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_START_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
+ restart ? "restart" : "start");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
+ restart ? "restart" : "start", arg->vdev_id,
+ arg->channel.freq, arg->channel.mode);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->vdev_assoc_id = aid;
+
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr);
+ cmd->peer_type = param->peer_type;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer create vdev_id %d peer_addr %pM\n",
+ param->vdev_id, param->peer_addr);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->reg_domain = param->current_rd_in_use;
+ cmd->reg_domain_2g = param->current_rd_2g;
+ cmd->reg_domain_5g = param->current_rd_5g;
+ cmd->conformance_test_limit_2g = param->ctl_2g;
+ cmd->conformance_test_limit_5g = param->ctl_5g;
+ cmd->dfs_domain = param->dfs_domain;
+ cmd->pdev_id = param->pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
+ param->current_rd_in_use, param->current_rd_2g,
+ param->current_rd_5g, param->dfs_domain, param->pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_val;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_val);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_tid_bitmap = param->peer_tid_bitmap;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ param->vdev_id, peer_addr, param->peer_tid_bitmap);
+
+ return ret;
+}
+
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size)
+{
+ struct wmi_peer_reorder_queue_setup_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+ cmd->vdev_id = vdev_id;
+ cmd->tid = tid;
+ cmd->queue_ptr_lo = lower_32_bits(paddr);
+ cmd->queue_ptr_hi = upper_32_bits(paddr);
+ cmd->queue_no = tid;
+ cmd->ba_window_size_valid = ba_window_size_valid;
+ cmd->ba_window_size = ba_window_size;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
+ addr, vdev_id, tid);
+
+ return ret;
+}
+
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_reorder_queue_remove_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr);
+ cmd->vdev_id = param->vdev_id;
+ cmd->tid_mask = param->peer_tid_bitmap;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
+ param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev set param %d pdev id %d value %d\n",
+ param_id, pdev_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_ps_mode_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->sta_ps_mode = enable;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev set psmode %d vdev id %d\n",
+ enable, vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->suspend_opt = suspend_opt;
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev suspend pdev_id %d\n", pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_resume_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_resume_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev resume pdev id %d\n", pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+/* TODO FW Support for the cmd is not available yet.
+ * Can be tested once the command and corresponding
+ * event is implemented in FW
+ */
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_bss_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->req_type = type;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI bss chan info req type %d\n", type);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->param = param->param;
+ cmd->value = param->value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
+ param->vdev_id, peer_addr, param->param, param->value);
+
+ return ret;
+}
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param = param;
+ cmd->value = param_value;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI set sta ps vdev_id %d param %d value %d\n",
+ vdev_id, param, param_value);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->type = type;
+ cmd->delay_time_ms = delay_time_ms;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->stats_id = param->stats_id;
+ cmd->vdev_id = param->vdev_id;
+ cmd->pdev_id = param->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI request stats 0x%x vdev id %d pdev id %d\n",
+ param->stats_id, param->vdev_id, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_offload_ctrl_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->bcn_ctrl_op = bcn_ctrl_op;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
+ vdev_id, bcn_ctrl_op);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_tmpl_cmd *cmd;
+ struct wmi_bcn_prb_info *bcn_prb_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(bcn->len, 4);
+
+ len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->tim_ie_offset = offs->tim_offset;
+ cmd->csa_switch_count_offset = offs->csa_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->csa_counter_offs[1];
+ cmd->buf_len = bcn->len;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ bcn_prb_info = ptr;
+ len = sizeof(*bcn_prb_info);
+ bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_PRB_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ bcn_prb_info->caps = 0;
+ bcn_prb_info->erp = 0;
+
+ ptr += sizeof(*bcn_prb_info);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len;
+ int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ cmd->key_idx = arg->key_idx;
+ cmd->key_flags = arg->key_flags;
+ cmd->key_cipher = arg->key_cipher;
+ cmd->key_len = arg->key_len;
+ cmd->key_txmic_len = arg->key_txmic_len;
+ cmd->key_rxmic_len = arg->key_rxmic_len;
+
+ if (arg->key_rsc_counter)
+ memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
+ sizeof(struct wmi_key_seq_counter));
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
+ memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+
+ return ret;
+}
+
+static inline void
+ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ struct peer_assoc_params *param)
+{
+ cmd->peer_flags = 0;
+
+ if (param->is_wme_set) {
+ if (param->qos_flag)
+ cmd->peer_flags |= WMI_PEER_QOS;
+ if (param->apsd_flag)
+ cmd->peer_flags |= WMI_PEER_APSD;
+ if (param->ht_flag)
+ cmd->peer_flags |= WMI_PEER_HT;
+ if (param->bw_40)
+ cmd->peer_flags |= WMI_PEER_40MHZ;
+ if (param->bw_80)
+ cmd->peer_flags |= WMI_PEER_80MHZ;
+ if (param->bw_160)
+ cmd->peer_flags |= WMI_PEER_160MHZ;
+
+ /* Typically if STBC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->stbc_flag)
+ cmd->peer_flags |= WMI_PEER_STBC;
+
+ /* Typically if LDPC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->ldpc_flag)
+ cmd->peer_flags |= WMI_PEER_LDPC;
+
+ if (param->static_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+ if (param->dynamic_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
+ if (param->spatial_mux_flag)
+ cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
+ if (param->vht_flag)
+ cmd->peer_flags |= WMI_PEER_VHT;
+ if (param->he_flag)
+ cmd->peer_flags |= WMI_PEER_HE;
+ if (param->twt_requester)
+ cmd->peer_flags |= WMI_PEER_TWT_REQ;
+ if (param->twt_responder)
+ cmd->peer_flags |= WMI_PEER_TWT_RESP;
+ }
+
+ /* Suppress authorization for all AUTH modes that need 4-way handshake
+ * (during re-association).
+ * Authorization will be done for these modes on key installation.
+ */
+ if (param->auth_flag)
+ cmd->peer_flags |= WMI_PEER_AUTH;
+ if (param->need_ptk_4_way)
+ cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+ else
+ cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY;
+ if (param->need_gtk_2_way)
+ cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+ /* safe mode bypass the 4-way handshake */
+ if (param->safe_mode_enabled)
+ cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
+ WMI_PEER_NEED_GTK_2_WAY);
+
+ if (param->is_pmf_enabled)
+ cmd->peer_flags |= WMI_PEER_PMF;
+
+ /* Disable AMSDU for station transmit, if user configures it */
+ /* Disable AMSDU for AP transmit to 11n Stations, if user configures
+ * it
+ * if (param->amsdu_disable) Add after FW support
+ **/
+
+ /* Target asserts if node is marked HT and all MCS is set to 0.
+ * Mark the node as non-HT if all the mcs rates are disabled through
+ * iwpriv
+ **/
+ if (param->peer_ht_rates.num_rates == 0)
+ cmd->peer_flags &= ~WMI_PEER_HT;
+}
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_assoc_complete_cmd *cmd;
+ struct wmi_vht_rate_set *mcs;
+ struct wmi_he_rate_set *he_mcs;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 peer_legacy_rates_align;
+ u32 peer_ht_rates_align;
+ int i, ret, len;
+
+ peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
+ sizeof(u32));
+ peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
+ sizeof(u32));
+
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
+ TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
+ sizeof(*mcs) + TLV_HDR_SIZE +
+ (sizeof(*he_mcs) * param->peer_he_mcs_count);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+
+ cmd->peer_new_assoc = param->peer_new_assoc;
+ cmd->peer_associd = param->peer_associd;
+
+ ath11k_wmi_copy_peer_flags(cmd, param);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac);
+
+ cmd->peer_rate_caps = param->peer_rate_caps;
+ cmd->peer_caps = param->peer_caps;
+ cmd->peer_listen_intval = param->peer_listen_intval;
+ cmd->peer_ht_caps = param->peer_ht_caps;
+ cmd->peer_max_mpdu = param->peer_max_mpdu;
+ cmd->peer_mpdu_density = param->peer_mpdu_density;
+ cmd->peer_vht_caps = param->peer_vht_caps;
+ cmd->peer_phymode = param->peer_phymode;
+
+ /* Update 11ax capabilities */
+ cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
+ cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
+ cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
+ cmd->peer_he_ops = param->peer_he_ops;
+ memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
+ sizeof(param->peer_he_cap_phyinfo));
+ memcpy(&cmd->peer_ppet, &param->peer_ppet,
+ sizeof(param->peer_ppet));
+
+ /* Update peer legacy rate information */
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
+
+ ptr += TLV_HDR_SIZE;
+
+ cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
+ memcpy(ptr, param->peer_legacy_rates.rates,
+ param->peer_legacy_rates.num_rates);
+
+ /* Update peer HT rate information */
+ ptr += peer_legacy_rates_align;
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
+ ptr += TLV_HDR_SIZE;
+ cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
+ memcpy(ptr, param->peer_ht_rates.rates,
+ param->peer_ht_rates.num_rates);
+
+ /* VHT Rates */
+ ptr += peer_ht_rates_align;
+
+ mcs = ptr;
+
+ mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
+
+ cmd->peer_nss = param->peer_nss;
+
+ /* Update bandwidth-NSS mapping */
+ cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
+
+ if (param->vht_capable) {
+ mcs->rx_max_rate = param->rx_max_rate;
+ mcs->rx_mcs_set = param->rx_mcs_set;
+ mcs->tx_max_rate = param->tx_max_rate;
+ mcs->tx_mcs_set = param->tx_mcs_set;
+ }
+
+ /* HE Rates */
+ cmd->peer_he_mcs = param->peer_he_mcs_count;
+
+ ptr += sizeof(*mcs);
+
+ len = param->peer_he_mcs_count * sizeof(*he_mcs);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ /* Loop through the HE rate set */
+ for (i = 0; i < param->peer_he_mcs_count; i++) {
+ he_mcs = ptr;
+ he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_HE_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*he_mcs) - TLV_HDR_SIZE);
+
+ he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i];
+ ptr += sizeof(*he_mcs);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_ASSOC_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+ cmd->vdev_id, cmd->peer_associd, param->peer_mac,
+ cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
+ cmd->peer_listen_intval, cmd->peer_ht_caps,
+ cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
+ cmd->peer_mpdu_density,
+ cmd->peer_vht_caps, cmd->peer_he_cap_info,
+ cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
+ cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
+ cmd->peer_he_cap_phy[2],
+ cmd->peer_bw_rxnss_override);
+
+ return ret;
+}
+
+void ath11k_wmi_start_scan_init(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_active_2g = 0;
+ arg->dwell_time_passive = 150;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+ WMI_SCAN_EVENT_COMPLETED |
+ WMI_SCAN_EVENT_BSS_CHANNEL |
+ WMI_SCAN_EVENT_FOREIGN_CHAN |
+ WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->num_bssid = 1;
+}
+
+static inline void
+ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
+ struct scan_req_params *param)
+{
+ /* Scan events subscription */
+ if (param->scan_ev_started)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED;
+ if (param->scan_ev_completed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED;
+ if (param->scan_ev_bss_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL;
+ if (param->scan_ev_foreign_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN;
+ if (param->scan_ev_dequeued)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED;
+ if (param->scan_ev_preempted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED;
+ if (param->scan_ev_start_failed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED;
+ if (param->scan_ev_restarted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED;
+ if (param->scan_ev_foreign_chn_exit)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
+ if (param->scan_ev_suspended)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED;
+ if (param->scan_ev_resumed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED;
+
+ /** Set scan control flags */
+ cmd->scan_ctrl_flags = 0;
+ if (param->scan_f_passive)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ if (param->scan_f_strict_passive_pch)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
+ if (param->scan_f_promisc_mode)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS;
+ if (param->scan_f_capture_phy_err)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR;
+ if (param->scan_f_half_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
+ if (param->scan_f_quarter_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
+ if (param->scan_f_cck_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+ if (param->scan_f_ofdm_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+ if (param->scan_f_chan_stat_evnt)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ if (param->scan_f_filter_prb_req)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ if (param->scan_f_bcast_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ;
+ if (param->scan_f_offchan_mgmt_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX;
+ if (param->scan_f_offchan_data_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX;
+ if (param->scan_f_force_active_dfs_chn)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
+ if (param->scan_f_add_tpc_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_ds_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_spoofed_mac_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
+ if (param->scan_f_add_rand_seq_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ if (param->scan_f_en_ie_whitelist_in_probe)
+ cmd->scan_ctrl_flags |=
+ WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
+
+ /* for adaptive scan mode using 3 bits (21 - 23 bits) */
+ WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
+ param->adaptive_dwell_time_mode);
+}
+
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_start_scan_cmd *cmd;
+ struct wmi_ssid *ssid = NULL;
+ struct wmi_mac_addr *bssid;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *tmp_ptr;
+ u8 extraie_len_with_pad = 0;
+
+ len = sizeof(*cmd);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_chan)
+ len += params->num_chan * sizeof(u32);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_ssids)
+ len += params->num_ssids * sizeof(*ssid);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_bssid)
+ len += sizeof(*bssid) * params->num_bssid;
+
+ len += TLV_HDR_SIZE;
+ if (params->extraie.len)
+ extraie_len_with_pad =
+ roundup(params->extraie.len, sizeof(u32));
+ len += extraie_len_with_pad;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->scan_id = params->scan_id;
+ cmd->scan_req_id = params->scan_req_id;
+ cmd->vdev_id = params->vdev_id;
+ cmd->scan_priority = params->scan_priority;
+ cmd->notify_scan_events = params->notify_scan_events;
+
+ ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params);
+
+ cmd->dwell_time_active = params->dwell_time_active;
+ cmd->dwell_time_active_2g = params->dwell_time_active_2g;
+ cmd->dwell_time_passive = params->dwell_time_passive;
+ cmd->min_rest_time = params->min_rest_time;
+ cmd->max_rest_time = params->max_rest_time;
+ cmd->repeat_probe_time = params->repeat_probe_time;
+ cmd->probe_spacing_time = params->probe_spacing_time;
+ cmd->idle_time = params->idle_time;
+ cmd->max_scan_time = params->max_scan_time;
+ cmd->probe_delay = params->probe_delay;
+ cmd->burst_duration = params->burst_duration;
+ cmd->num_chan = params->num_chan;
+ cmd->num_bssid = params->num_bssid;
+ cmd->num_ssids = params->num_ssids;
+ cmd->ie_len = params->extraie.len;
+ cmd->n_probes = params->n_probes;
+
+ ptr += sizeof(*cmd);
+
+ len = params->num_chan * sizeof(u32);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ tmp_ptr = (u32 *)ptr;
+
+ for (i = 0; i < params->num_chan; ++i)
+ tmp_ptr[i] = params->chan_list[i];
+
+ ptr += len;
+
+ len = params->num_ssids * sizeof(*ssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+
+ if (params->num_ssids) {
+ ssid = ptr;
+ for (i = 0; i < params->num_ssids; ++i) {
+ ssid->ssid_len = params->ssid[i].length;
+ memcpy(ssid->ssid, params->ssid[i].ssid,
+ params->ssid[i].length);
+ ssid++;
+ }
+ }
+
+ ptr += (params->num_ssids * sizeof(*ssid));
+ len = params->num_bssid * sizeof(*bssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ bssid = ptr;
+
+ if (params->num_bssid) {
+ for (i = 0; i < params->num_bssid; ++i) {
+ ether_addr_copy(bssid->addr,
+ params->bssid_list[i].addr);
+ bssid++;
+ }
+ }
+
+ ptr += params->num_bssid * sizeof(*bssid);
+
+ len = extraie_len_with_pad;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ if (params->extraie.len)
+ memcpy(ptr, params->extraie.ptr,
+ params->extraie.len);
+
+ ptr += extraie_len_with_pad;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_START_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->requestor = param->requester;
+ cmd->scan_id = param->scan_id;
+ cmd->pdev_id = param->pdev_id;
+ /* stop the scan with the corresponding scan_id */
+ if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
+ /* Cancelling all scans */
+ cmd->req_type = WMI_SCAN_STOP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
+ /* Cancelling VAP scans */
+ cmd->req_type = WMI_SCN_STOP_VAP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
+ /* Cancelling specific scan */
+ cmd->req_type = WMI_SCAN_STOP_ONE;
+ } else {
+ ath11k_warn(ar->ab, "invalid scan cancel param %d",
+ param->req_type);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_STOP_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan_info;
+ struct channel_param *tchan_info;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *reg1, *reg2;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ sizeof(*chan_info) * chan_list->nallchans;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI no.of chan = %d len = %d\n", chan_list->nallchans, len);
+ cmd->pdev_id = chan_list->pdev_id;
+ cmd->num_scan_chans = chan_list->nallchans;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ len = sizeof(*chan_info) * chan_list->nallchans;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ ptr += TLV_HDR_SIZE;
+
+ tchan_info = &chan_list->ch_param[0];
+
+ for (i = 0; i < chan_list->nallchans; ++i) {
+ chan_info = ptr;
+ memset(chan_info, 0, sizeof(*chan_info));
+ len = sizeof(*chan_info);
+ chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+
+ reg1 = &chan_info->reg_info_1;
+ reg2 = &chan_info->reg_info_2;
+ chan_info->mhz = tchan_info->mhz;
+ chan_info->band_center_freq1 = tchan_info->cfreq1;
+ chan_info->band_center_freq2 = tchan_info->cfreq2;
+
+ if (tchan_info->is_chan_passive)
+ chan_info->info |= WMI_CHAN_INFO_PASSIVE;
+ if (tchan_info->allow_he)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
+ else if (tchan_info->allow_vht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ else if (tchan_info->allow_ht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (tchan_info->half_rate)
+ chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
+ if (tchan_info->quarter_rate)
+ chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
+
+ chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
+ tchan_info->phy_mode);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
+ tchan_info->minpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ tchan_info->maxpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ tchan_info->maxregpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
+ tchan_info->reg_class_id);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ tchan_info->antennamax);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI chan scan list chan[%d] = %u\n",
+ i, chan_info->mhz);
+
+ ptr += sizeof(*chan_info);
+
+ tchan_info++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_wmm_params_cmd *cmd;
+ struct wmi_wmm_params *wmm_param;
+ struct wmi_wmm_params_arg *wmi_wmm_arg;
+ struct sk_buff *skb;
+ int ret, ac;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->wmm_param_type = 0;
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ switch (ac) {
+ case WME_AC_BE:
+ wmi_wmm_arg = &param->ac_be;
+ break;
+ case WME_AC_BK:
+ wmi_wmm_arg = &param->ac_bk;
+ break;
+ case WME_AC_VI:
+ wmi_wmm_arg = &param->ac_vi;
+ break;
+ case WME_AC_VO:
+ wmi_wmm_arg = &param->ac_vo;
+ break;
+ }
+
+ wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
+ wmm_param->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*wmm_param) - TLV_HDR_SIZE);
+
+ wmm_param->aifs = wmi_wmm_arg->aifs;
+ wmm_param->cwmin = wmi_wmm_arg->cwmin;
+ wmm_param->cwmax = wmi_wmm_arg->cwmax;
+ wmm_param->txoplimit = wmi_wmm_arg->txop;
+ wmm_param->acm = wmi_wmm_arg->acm;
+ wmm_param->no_ack = wmi_wmm_arg->no_ack;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ ac, wmm_param->aifs, wmm_param->cwmin,
+ wmm_param->cwmax, wmm_param->txoplimit,
+ wmm_param->acm, wmm_param->no_ack);
+ }
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_dfs_phyerr_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_pktlog_filter_cmd *cmd;
+ struct wmi_pdev_pktlog_filter_info *info;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->num_mac = 1;
+ cmd->enable = enable;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*info));
+
+ ptr += TLV_HDR_SIZE;
+ info = ptr;
+
+ ether_addr_copy(info->peer_macaddr.addr, addr);
+ info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*info) - TLV_HDR_SIZE);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_FILTER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_init_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_INIT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ switch (init_cc_params.flags) {
+ case ALPHA_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
+ memcpy((u8 *)&cmd->cc_info.alpha2,
+ init_cc_params.cc_info.alpha2, 3);
+ break;
+ case CC_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE;
+ cmd->cc_info.country_code = init_cc_params.cc_info.country_code;
+ break;
+ case REGDMN_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN;
+ cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_SET_INIT_COUNTRY_CMDID);
+
+out:
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->evlist = pktlog_filter;
+ cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_disable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ cmd->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ cmd->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ cmd->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ cmd->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ cmd->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ cmd->mbss_support = 0;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_disable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_spatial_reuse_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = he_obss_pd->enable;
+ cmd->obss_min = he_obss_pd->min_offset;
+ cmd->obss_max = he_obss_pd->max_offset;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+static void
+ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
+ struct wmi_host_pdev_band_to_mac *band_to_mac)
+{
+ u8 i;
+ struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
+ struct ath11k_pdev *pdev;
+
+ for (i = 0; i < soc->num_radios; i++) {
+ pdev = &soc->pdevs[i];
+ hal_reg_cap = &soc->hal_reg_cap[i];
+ band_to_mac[i].pdev_id = pdev->pdev_id;
+
+ switch (pdev->cap.supported_bands) {
+ case WMI_HOST_WLAN_2G_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ case WMI_HOST_WLAN_2G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
+ break;
+ case WMI_HOST_WLAN_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
+ struct target_resource_config *tg_cfg)
+{
+ wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
+ wmi_cfg->num_peers = tg_cfg->num_peers;
+ wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
+ wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
+ wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
+ wmi_cfg->num_tids = tg_cfg->num_tids;
+ wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
+ wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
+ wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
+ wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
+ wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
+ wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
+ wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
+ wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
+ wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
+ wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
+ wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
+ wmi_cfg->roam_offload_max_ap_profiles =
+ tg_cfg->roam_offload_max_ap_profiles;
+ wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
+ wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
+ wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
+ wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
+ wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
+ wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
+ wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
+ wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
+ tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
+ wmi_cfg->vow_config = tg_cfg->vow_config;
+ wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
+ wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
+ wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
+ wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
+ wmi_cfg->num_tdls_conn_table_entries =
+ tg_cfg->num_tdls_conn_table_entries;
+ wmi_cfg->beacon_tx_offload_max_vdev =
+ tg_cfg->beacon_tx_offload_max_vdev;
+ wmi_cfg->num_multicast_filter_entries =
+ tg_cfg->num_multicast_filter_entries;
+ wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
+ wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
+ wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
+ wmi_cfg->max_tdls_concurrent_sleep_sta =
+ tg_cfg->max_tdls_concurrent_sleep_sta;
+ wmi_cfg->max_tdls_concurrent_buffer_sta =
+ tg_cfg->max_tdls_concurrent_buffer_sta;
+ wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
+ wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
+ wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
+ wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
+ wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
+ wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
+ wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
+ wmi_cfg->flag1 = tg_cfg->atf_config;
+ wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
+ wmi_cfg->sched_params = tg_cfg->sched_params;
+ wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
+ wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
+}
+
+static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
+ struct wmi_init_cmd_param *param)
+{
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct sk_buff *skb;
+ struct wmi_init_cmd *cmd;
+ struct wmi_resource_config *cfg;
+ struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
+ struct wmi_pdev_band_to_mac *band_to_mac;
+ struct wlan_host_mem_chunk *host_mem_chunks;
+ struct wmi_tlv *tlv;
+ size_t ret, len;
+ void *ptr;
+ u32 hw_mode_len = 0;
+ u16 idx;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
+ hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
+ (param->num_band_to_mac * sizeof(*band_to_mac));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
+ (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ptr = skb->data + sizeof(*cmd);
+ cfg = ptr;
+
+ ath11k_wmi_copy_resource_config(cfg, param->res_cfg);
+
+ cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
+
+ ptr += sizeof(*cfg);
+ host_mem_chunks = ptr + TLV_HDR_SIZE;
+ len = sizeof(struct wlan_host_mem_chunk);
+
+ for (idx = 0; idx < param->num_mem_chunks; ++idx) {
+ host_mem_chunks[idx].tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
+ host_mem_chunks[idx].size = param->mem_chunks[idx].len;
+ host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
+ param->mem_chunks[idx].req_id,
+ (u64)param->mem_chunks[idx].paddr,
+ param->mem_chunks[idx].len);
+ }
+ cmd->num_host_mem_chunks = param->num_mem_chunks;
+ len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
+
+ /* num_mem_chunks is zero */
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE + len;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
+ hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
+ hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*hw_mode) - TLV_HDR_SIZE);
+
+ hw_mode->hw_mode_index = param->hw_mode_id;
+ hw_mode->num_band_to_mac = param->num_band_to_mac;
+
+ ptr += sizeof(*hw_mode);
+
+ len = param->num_band_to_mac * sizeof(*band_to_mac);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ len = sizeof(*band_to_mac);
+
+ for (idx = 0; idx < param->num_band_to_mac; idx++) {
+ band_to_mac = (void *)ptr;
+
+ band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BAND_TO_MAC) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+ band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
+ band_to_mac->start_freq =
+ param->band_to_mac[idx].start_freq;
+ band_to_mac->end_freq =
+ param->band_to_mac[idx].end_freq;
+ ptr += sizeof(*band_to_mac);
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_cmd_init(struct ath11k_base *ab)
+{
+ struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab;
+ struct wmi_init_cmd_param init_param;
+ struct target_resource_config config;
+
+ memset(&init_param, 0, sizeof(init_param));
+ memset(&config, 0, sizeof(config));
+
+ config.num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+
+ if (ab->num_radios == 2) {
+ config.num_peers = TARGET_NUM_PEERS(DBS);
+ config.num_tids = TARGET_NUM_TIDS(DBS);
+ } else if (ab->num_radios == 3) {
+ config.num_peers = TARGET_NUM_PEERS(DBS_SBS);
+ config.num_tids = TARGET_NUM_TIDS(DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config.num_peers = TARGET_NUM_PEERS(SINGLE);
+ config.num_tids = TARGET_NUM_TIDS(SINGLE);
+ }
+ config.num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config.num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config.num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config.ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config.tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config.rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config.rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config.rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config.scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config.bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config.roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config.roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config.num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config.num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config.mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config.tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config.num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config.dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config.rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.vow_config = TARGET_VOW_CONFIG;
+ config.gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config.num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config.rx_batchmode = TARGET_RX_BATCHMODE;
+ config.peer_map_unmap_v2_support = 1;
+ config.twt_ap_pdev_count = 2;
+ config.twt_ap_sta_count = 1000;
+
+ memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
+
+ init_param.res_cfg = &wmi_sc->wlan_resource_config;
+ init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
+ init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
+ init_param.mem_chunks = wmi_sc->mem_chunks;
+
+ if (wmi_sc->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE)
+ init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
+
+ init_param.num_band_to_mac = ab->num_radios;
+
+ ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+
+ return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_cap;
+ u32 phy_map = 0;
+
+ if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
+ return -ENOBUFS;
+
+ hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
+ hw_mode_id);
+ svc_rdy_ext->n_hw_mode_caps++;
+
+ phy_map = hw_mode_cap->phy_id_map;
+ while (phy_map) {
+ svc_rdy_ext->tot_phy_id++;
+ phy_map = phy_map >> 1;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ enum wmi_host_hw_mode_config_type mode, pref;
+ u32 i;
+ int ret;
+
+ svc_rdy_ext->n_hw_mode_caps = 0;
+ svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_hw_mode_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ i = 0;
+ while (i < svc_rdy_ext->n_hw_mode_caps) {
+ hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+ mode = hw_mode_caps->hw_mode_id;
+ pref = soc->wmi_ab.preferred_hw_mode;
+
+ if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) {
+ svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
+ soc->wmi_ab.preferred_hw_mode = mode;
+ }
+ i++;
+ }
+
+ if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
+ return -ENOBUFS;
+
+ len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
+ if (!svc_rdy_ext->n_mac_phy_caps) {
+ svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
+ GFP_ATOMIC);
+ if (!svc_rdy_ext->mac_phy_caps)
+ return -ENOMEM;
+ }
+
+ memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
+ svc_rdy_ext->n_mac_phy_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
+ return -ENOBUFS;
+
+ svc_rdy_ext->n_ext_hal_reg_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath11k_hal_reg_capabilities_ext reg_cap;
+ int ret;
+ u32 i;
+
+ svc_rdy_ext->n_ext_hal_reg_caps = 0;
+ svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr;
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_ext_hal_reg_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
+ ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->ext_hal_reg_caps, i,
+ &reg_cap);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
+
+ memcpy(&soc->hal_reg_cap[reg_cap.phy_id],
+ &reg_cap, sizeof(reg_cap));
+ }
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 len, const void *ptr,
+ void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
+ u32 phy_id_map;
+ int ret;
+
+ svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
+ svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
+
+ soc->num_radios = 0;
+ phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
+
+ while (phy_id_map && soc->num_radios < MAX_RADIOS) {
+ ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
+ svc_rdy_ext->hw_caps,
+ svc_rdy_ext->hw_mode_caps,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->mac_phy_caps,
+ hw_mode_id, soc->num_radios,
+ &soc->pdevs[soc->num_radios]);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
+ soc->num_radios);
+ return ret;
+ }
+
+ soc->num_radios++;
+
+ /* TODO: mac_phy_cap prints */
+ phy_id_map >>= 1;
+ }
+ return 0;
+}
+
+static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EXT_EVENT:
+ ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr,
+ &svc_rdy_ext->param);
+ if (ret) {
+ ath11k_warn(ab, "unable to extract ext params\n");
+ return ret;
+ }
+ break;
+
+ case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
+ svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
+ svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
+ break;
+
+ case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
+ ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+ break;
+
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!svc_rdy_ext->hw_mode_done) {
+ ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->hw_mode_done = true;
+ } else if (!svc_rdy_ext->mac_phy_done) {
+ svc_rdy_ext->n_mac_phy_caps = 0;
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_mac_phy_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ svc_rdy_ext->mac_phy_done = true;
+ } else if (!svc_rdy_ext->ext_hal_reg_done) {
+ ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->ext_hal_reg_done = true;
+ complete(&ab->wmi_ab.service_ready);
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_ext_parse,
+ &svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ kfree(svc_rdy_ext.mac_phy_caps);
+ return 0;
+}
+
+static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+ const void **tb;
+ const struct wmi_vdev_start_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev start resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(vdev_rsp, 0, sizeof(*vdev_rsp));
+
+ vdev_rsp->vdev_id = ev->vdev_id;
+ vdev_rsp->requestor_id = ev->requestor_id;
+ vdev_rsp->resp_type = ev->resp_type;
+ vdev_rsp->status = ev->status;
+ vdev_rsp->chain_mask = ev->chain_mask;
+ vdev_rsp->smps_mode = ev->smps_mode;
+ vdev_rsp->mac_id = ev->mac_id;
+ vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
+ vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+
+ kfree(tb);
+ return 0;
+}
+
+static struct cur_reg_rule
+*create_reg_rules_from_wmi(u32 num_reg_rules,
+ struct wmi_regulatory_rule_struct *wmi_reg_rule)
+{
+ struct cur_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
+ GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ FIELD_GET(REG_RULE_START_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].end_freq =
+ FIELD_GET(REG_RULE_END_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].max_bw =
+ FIELD_GET(REG_RULE_MAX_BW,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].reg_power =
+ FIELD_GET(REG_RULE_REG_PWR,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].ant_gain =
+ FIELD_GET(REG_RULE_ANT_GAIN,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].flags =
+ FIELD_GET(REG_RULE_FLAGS,
+ wmi_reg_rule[count].flag_info);
+ }
+
+ return reg_rule_ptr;
+}
+
+static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct cur_regulatory_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
+ struct wmi_regulatory_rule_struct *wmi_reg_rule;
+ u32 num_2g_reg_rules, num_5g_reg_rules;
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
+ if (!chan_list_event_hdr) {
+ ath11k_warn(ab, "failed to fetch reg chan list update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules;
+ reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules;
+
+ if (!(reg_info->num_2g_reg_rules + reg_info->num_5g_reg_rules)) {
+ ath11k_warn(ab, "No regulatory rules available in the event info\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2,
+ REG_ALPHA2_LEN);
+ reg_info->dfs_region = chan_list_event_hdr->dfs_region;
+ reg_info->phybitmap = chan_list_event_hdr->phybitmap;
+ reg_info->num_phy = chan_list_event_hdr->num_phy;
+ reg_info->phy_id = chan_list_event_hdr->phy_id;
+ reg_info->ctry_code = chan_list_event_hdr->country_id;
+ reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
+ if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_PASS)
+ reg_info->status_code = REG_SET_CC_STATUS_PASS;
+ else if (chan_list_event_hdr->status_code == WMI_REG_CURRENT_ALPHA2_NOT_FOUND)
+ reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
+ else if (chan_list_event_hdr->status_code == WMI_REG_INIT_ALPHA2_NOT_FOUND)
+ reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_CHANGE_NOT_ALLOWED)
+ reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_NO_MEMORY)
+ reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_FAIL)
+ reg_info->status_code = REG_SET_CC_STATUS_FAIL;
+
+ reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g;
+ reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g;
+ reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g;
+ reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g;
+
+ num_2g_reg_rules = reg_info->num_2g_reg_rules;
+ num_5g_reg_rules = reg_info->num_5g_reg_rules;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
+ __func__, reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2g, reg_info->max_bw_2g,
+ reg_info->min_bw_5g, reg_info->max_bw_5g);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__,
+ num_2g_reg_rules, num_5g_reg_rules);
+
+ wmi_reg_rule =
+ (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr
+ + sizeof(*chan_list_event_hdr)
+ + sizeof(struct wmi_tlv));
+
+ if (num_2g_reg_rules) {
+ reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_2g_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 2g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (num_5g_reg_rules) {
+ wmi_reg_rule += num_2g_reg_rules;
+ reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_5g_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 5g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+ peer_del_resp->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer_del_resp->peer_macaddr.addr,
+ ev->peer_macaddr.addr);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
+ u32 len, u32 *vdev_id,
+ u32 *tx_status)
+{
+ const void **tb;
+ const struct wmi_bcn_tx_status_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch bcn tx status ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+ *tx_status = ev->tx_status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_stopped_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev stop ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct mgmt_rx_event_params *hdr)
+{
+ const void **tb;
+ const struct wmi_mgmt_rx_hdr *ev;
+ const u8 *frame;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_RX_HDR];
+ frame = tb[WMI_TAG_ARRAY_BYTE];
+
+ if (!ev || !frame) {
+ ath11k_warn(ab, "failed to fetch mgmt rx hdr");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ hdr->pdev_id = ev->pdev_id;
+ hdr->channel = ev->channel;
+ hdr->snr = ev->snr;
+ hdr->rate = ev->rate;
+ hdr->phy_mode = ev->phy_mode;
+ hdr->buf_len = ev->buf_len;
+ hdr->status = ev->status;
+ hdr->flags = ev->flags;
+ hdr->rssi = ev->rssi;
+ hdr->tsf_delta = ev->tsf_delta;
+ memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
+
+ if (skb->len < (frame - skb->data) + hdr->buf_len) {
+ ath11k_warn(ab, "invalid length in mgmt rx hdr ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, hdr->buf_len);
+
+ ath11k_ce_byte_swap(skb->data, hdr->buf_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
+ u32 status)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_find(&ar->txmgmt_idr, desc_id);
+
+ if (!msdu) {
+ ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
+ desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ return -ENOENT;
+ }
+
+ idr_remove(&ar->txmgmt_idr, desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ WARN_ON_ONCE(atomic_read(&ar->num_pending_mgmt_tx) == 0);
+ atomic_dec(&ar->num_pending_mgmt_tx);
+
+ return 0;
+}
+
+static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct wmi_mgmt_tx_compl_event *param)
+{
+ const void **tb;
+ const struct wmi_mgmt_tx_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch mgmt tx compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ param->pdev_id = ev->pdev_id;
+ param->desc_id = ev->desc_id;
+ param->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_event_scan_started(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ ar->scan.state = ATH11K_SCAN_RUNNING;
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_completed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ break;
+ }
+}
+
+static const char *
+ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_scan_event *scan_evt_param)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_SCAN_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch scan ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ scan_evt_param->event_type = ev->event_type;
+ scan_evt_param->reason = ev->reason;
+ scan_evt_param->channel_freq = ev->channel_freq;
+ scan_evt_param->scan_req_id = ev->scan_req_id;
+ scan_evt_param->scan_id = ev->scan_id;
+ scan_evt_param->vdev_id = ev->vdev_id;
+ scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_sta_kickout_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer sta kickout ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_roam_event *roam_ev)
+{
+ const void **tb;
+ const struct wmi_roam_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_ROAM_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch roam ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ roam_ev->vdev_id = ev->vdev_id;
+ roam_ev->reason = ev->reason;
+ roam_ev->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int freq_to_idx(struct ath11k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
+ u32 len, struct wmi_chan_info_event *ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ch_info_ev->err_code = ev->err_code;
+ ch_info_ev->freq = ev->freq;
+ ch_info_ev->cmd_flags = ev->cmd_flags;
+ ch_info_ev->noise_floor = ev->noise_floor;
+ ch_info_ev->rx_clear_count = ev->rx_clear_count;
+ ch_info_ev->cycle_count = ev->cycle_count;
+ ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ ch_info_ev->rx_frame_count = ev->rx_frame_count;
+ ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
+ ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
+ ch_info_ev->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_pdev_bss_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev bss chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ bss_ch_info_ev->pdev_id = ev->pdev_id;
+ bss_ch_info_ev->freq = ev->freq;
+ bss_ch_info_ev->noise_floor = ev->noise_floor;
+ bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
+ bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
+ bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
+ bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
+ bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
+ bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
+ bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
+ bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
+ bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
+ bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_install_key_complete_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_install_key_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev install key compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->macaddr = ev->peer_macaddr.addr;
+ arg->key_idx = ev->key_idx;
+ arg->key_flags = ev->key_flags;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
+{
+ const void **tb;
+ const struct wmi_peer_assoc_conf_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer assoc conf ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ peer_assoc_conf->vdev_id = ev->vdev_id;
+ peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = src->chan_nf;
+ dst->tx_frame_count = src->tx_frame_count;
+ dst->rx_frame_count = src->rx_frame_count;
+ dst->rx_clear_count = src->rx_clear_count;
+ dst->cycle_count = src->cycle_count;
+ dst->phy_err_count = src->phy_err_count;
+ dst->chan_tx_power = src->chan_tx_pwr;
+}
+
+static void
+ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = src->comp_queued;
+ dst->comp_delivered = src->comp_delivered;
+ dst->msdu_enqued = src->msdu_enqued;
+ dst->mpdu_enqued = src->mpdu_enqued;
+ dst->wmm_drop = src->wmm_drop;
+ dst->local_enqued = src->local_enqued;
+ dst->local_freed = src->local_freed;
+ dst->hw_queued = src->hw_queued;
+ dst->hw_reaped = src->hw_reaped;
+ dst->underrun = src->underrun;
+ dst->tx_abort = src->tx_abort;
+ dst->mpdus_requed = src->mpdus_requed;
+ dst->tx_ko = src->tx_ko;
+ dst->data_rc = src->data_rc;
+ dst->self_triggers = src->self_triggers;
+ dst->sw_retry_failure = src->sw_retry_failure;
+ dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
+ dst->pdev_cont_xretry = src->pdev_cont_xretry;
+ dst->pdev_tx_timeout = src->pdev_tx_timeout;
+ dst->pdev_resets = src->pdev_resets;
+ dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
+ dst->phy_underrun = src->phy_underrun;
+ dst->txop_ovf = src->txop_ovf;
+}
+
+static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
+ dst->status_rcvd = src->status_rcvd;
+ dst->r0_frags = src->r0_frags;
+ dst->r1_frags = src->r1_frags;
+ dst->r2_frags = src->r2_frags;
+ dst->r3_frags = src->r3_frags;
+ dst->htt_msdus = src->htt_msdus;
+ dst->htt_mpdus = src->htt_mpdus;
+ dst->loc_msdus = src->loc_msdus;
+ dst->loc_mpdus = src->loc_mpdus;
+ dst->oversize_amsdu = src->oversize_amsdu;
+ dst->phy_errs = src->phy_errs;
+ dst->phy_err_drop = src->phy_err_drop;
+ dst->mpdu_errs = src->mpdu_errs;
+}
+
+static void
+ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
+ struct ath11k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = src->vdev_id;
+ dst->beacon_snr = src->beacon_snr;
+ dst->data_snr = src->data_snr;
+ dst->num_rx_frames = src->num_rx_frames;
+ dst->num_rts_fail = src->num_rts_fail;
+ dst->num_rts_success = src->num_rts_success;
+ dst->num_rx_err = src->num_rx_err;
+ dst->num_rx_discard = src->num_rx_discard;
+ dst->num_tx_not_acked = src->num_tx_not_acked;
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] = src->num_tx_frames[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] = src->tx_rate_history[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
+}
+
+static void
+ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
+ struct ath11k_fw_stats_bcn *dst)
+{
+ dst->vdev_id = src->vdev_id;
+ dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
+ dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
+}
+
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats)
+{
+ const void **tb;
+ const struct wmi_stats_event *ev;
+ const void *data;
+ int i, ret;
+ u32 len = skb->len;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_STATS_EVENT];
+ data = tb[WMI_TAG_ARRAY_BYTE];
+ if (!ev || !data) {
+ ath11k_warn(ab, "failed to fetch update stats ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
+ ev->pdev_id,
+ ev->num_pdev_stats, ev->num_vdev_stats,
+ ev->num_bcn_stats);
+
+ stats->pdev_id = ev->pdev_id;
+ stats->stats_id = 0;
+
+ for (i = 0; i < ev->num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath11k_fw_stats_pdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_PDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < ev->num_vdev_stats; i++) {
+ const struct wmi_vdev_stats *src;
+ struct ath11k_fw_stats_vdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_VDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < ev->num_bcn_stats; i++) {
+ const struct wmi_bcn_stats *src;
+ struct ath11k_fw_stats_bcn *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_BCN_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_bcn_stats(src, dst);
+ list_add_tail(&dst->list, &stats->bcn);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static void
+ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requed", pdev->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Stateless TIDs alloc failures",
+ pdev->stateless_tid_alloc_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_vdev *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id);
+ u8 *vif_macaddr;
+ int i;
+
+ /* VDEV stats has all the active VDEVs of other PDEVs as well,
+ * ignoring those not part of requested PDEV
+ */
+ if (!arvif)
+ return;
+
+ vif_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vif_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_bcn *bcn,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id);
+ u8 *vdev_macaddr;
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats",
+ bcn->vdev_id);
+ return;
+ }
+
+ vdev_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", bcn->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vdev_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "================");
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats,
+ u32 stats_id, char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ const struct ath11k_fw_stats_pdev *pdev;
+ const struct ath11k_fw_stats_vdev *vdev;
+ const struct ath11k_fw_stats_bcn *bcn;
+ size_t num_bcn;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats_id == WMI_REQUEST_PDEV_STAT) {
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath11k_warn(ar->ab, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_VDEV_STAT) {
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k VDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list)
+ ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_BCN_STAT) {
+ num_bcn = ath11k_wmi_fw_stats_num_bcn(&fw_stats->bcn);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath11k Beacon stats", num_bcn);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "===================");
+
+ list_for_each_entry(bcn, &fw_stats->bcn, list)
+ ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
+{
+ /* try to send pending beacons first. they take priority */
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+}
+
+static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ return alpha[0] == '0' && alpha[1] == '0';
+}
+
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct cur_regulatory_info *reg_info = NULL;
+ struct ieee80211_regdomain *regd = NULL;
+ bool intersect = false;
+ int ret = 0, pdev_idx;
+ struct ath11k *ar;
+
+ reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+ if (!reg_info) {
+ ret = -ENOMEM;
+ goto fallback;
+ }
+
+ ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
+ if (ret) {
+ ath11k_warn(ab, "failed to extract regulatory info from received event\n");
+ goto fallback;
+ }
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ goto mem_free;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ if (pdev_idx >= ab->num_radios)
+ goto fallback;
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto mem_free;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ /* Once mac is registered, ar is valid and all CC events from
+ * fw is considered to be received due to user requests
+ * currently.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ ieee80211_queue_work(ar->hw, &ar->regd_update_work);
+ } else {
+ /* Multiple events for the same *ar is not expected. But we
+ * can still clear any previously stored default_regd if we
+ * are receiving this event for the same radio by mistake.
+ * NULL pointer handling will be taken care by kfree itself.
+ */
+ kfree(ab->default_regd[pdev_idx]);
+ /* This regd would be applied during mac registration */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock(&ab->base_lock);
+
+ goto mem_free;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+mem_free:
+ if (reg_info) {
+ kfree(reg_info->reg_rules_2g_ptr);
+ kfree(reg_info->reg_rules_5g_ptr);
+ kfree(reg_info);
+ }
+ return ret;
+}
+
+static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_rdy_parse *rdy_parse = data;
+ struct wmi_ready_event *fixed_param;
+ struct wmi_mac_addr *addr_list;
+ struct ath11k_pdev *pdev;
+ u32 num_mac_addr;
+ int i;
+
+ switch (tag) {
+ case WMI_TAG_READY_EVENT:
+ fixed_param = (struct wmi_ready_event *)ptr;
+ ab->wlan_init_status = fixed_param->status;
+ rdy_parse->num_extra_mac_addr = fixed_param->num_extra_mac_addr;
+
+ ether_addr_copy(ab->mac_addr, fixed_param->mac_addr.addr);
+ ab->wmi_ready = true;
+ break;
+ case WMI_TAG_ARRAY_FIXED_STRUCT:
+ addr_list = (struct wmi_mac_addr *)ptr;
+ num_mac_addr = rdy_parse->num_extra_mac_addr;
+
+ if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
+ break;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+ }
+ ab->pdevs_macaddr_valid = true;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_rdy_parse rdy_parse = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_rdy_parse, &rdy_parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ complete(&ab->wmi_ab.unified_ready);
+ return 0;
+}
+
+static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_event peer_del_resp;
+
+ if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
+ ath11k_warn(ab, "failed to extract peer delete resp");
+ return;
+ }
+
+ /* TODO: Do we need to validate whether ath11k_peer_find() return NULL
+ * Why this is needed when there is HTT event for peer delete
+ */
+}
+
+static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
+{
+ switch (vdev_resp_status) {
+ case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+ return "invalid vdev id";
+ case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+ return "not supported";
+ case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+ return "dfs violation";
+ case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+ return "invalid regdomain";
+ default:
+ return "unknown";
+ }
+}
+
+static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_resp_event vdev_start_resp;
+ struct ath11k *ar;
+ u32 status;
+
+ if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
+ ath11k_warn(ab, "failed to extract vdev start resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d",
+ vdev_start_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->last_wmi_vdev_start_status = 0;
+
+ status = vdev_start_resp.status;
+
+ if (WARN_ON_ONCE(status)) {
+ ath11k_warn(ab, "vdev start resp error status %d (%s)\n",
+ status, ath11k_wmi_vdev_resp_print(status));
+ ar->last_wmi_vdev_start_status = status;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d",
+ vdev_start_resp.vdev_id);
+}
+
+static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ u32 vdev_id, tx_status;
+
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
+ &vdev_id, &tx_status) != 0) {
+ ath11k_warn(ab, "failed to extract bcn tx status");
+ return;
+ }
+}
+
+static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ u32 vdev_id = 0;
+
+ if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
+ ath11k_warn(ab, "failed to extract vdev stopped event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_vdev_stop_status(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
+}
+
+static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct mgmt_rx_event_params rx_ev = {0};
+ struct ath11k *ar;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ struct ieee80211_supported_band *sband;
+
+ if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt rx event");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ memset(status, 0, sizeof(*status));
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT, "mgmt rx event status %08x\n",
+ rx_ev.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
+ rx_ev.pdev_id);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
+ sband = &ar->mac.sbands[status->band];
+
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+ status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
+ * Don't clear that. Also, FW delivers broadcast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_robust_mgmt_frame(skb)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ /* TODO: Pending handle beacon implementation
+ *if (ieee80211_is_beacon(hdr->frame_control))
+ * ath11k_mac_handle_beacon(ar, skb);
+ */
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt tx compl event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
+ tx_compl_param.pdev_id);
+ goto exit;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, tx_compl_param.desc_id,
+ tx_compl_param.status);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
+ tx_compl_param.pdev_id, tx_compl_param.desc_id,
+ tx_compl_param.status);
+
+exit:
+ rcu_read_unlock();
+}
+
+static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH11K_SCAN_ABORTING &&
+ ar->scan.vdev_id == vdev_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ struct wmi_scan_event scan_ev = {0};
+
+ if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
+ ath11k_warn(ab, "failed to extract scan event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* In case the scan was cancelled, ex. during interface teardown,
+ * the interface will not be found in active interfaces.
+ * Rather, in such scenarios, iterate over the active pdev's to
+ * search 'ar' if the corresponding 'ar' scan is ABORTING and the
+ * aborting scan's vdev id matches this event info.
+ */
+ if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
+ scan_ev.reason == WMI_SCAN_REASON_CANCELLED)
+ ar = ath11k_get_ar_on_scan_abort(ab, scan_ev.vdev_id);
+ else
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "Received scan event for unknown vdev");
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
+ scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
+ scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
+ ath11k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (scan_ev.event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath11k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath11k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath11k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq);
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath11k_warn(ab, "received scan start failure event\n");
+ ath11k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_kickout_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
+ ath11k_warn(ab, "failed to extract peer sta kickout event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, arg.mac_addr);
+
+ if (!peer) {
+ ath11k_warn(ab, "peer not found %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
+ peer->vdev_id);
+ goto exit;
+ }
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arg.mac_addr, NULL);
+ if (!sta) {
+ ath11k_warn(ab, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "peer sta kickout event %pM",
+ arg.mac_addr);
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_roam_event roam_ev = {};
+ struct ath11k *ar;
+
+ if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
+ ath11k_warn(ab, "failed to extract roam event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in roam ev %d",
+ roam_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
+ ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+
+ switch (roam_ev.reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ /* TODO: Pending beacon miss and connection_loss_work
+ * implementation
+ * ath11k_mac_handle_beacon_miss(ar, vdev_id);
+ */
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_chan_info_event ch_info_ev = {0};
+ struct ath11k *ar;
+ struct survey_info *survey;
+ int idx;
+ /* HW channel counters frequency value in hertz */
+ u32 cc_freq_hz = ab->cc_freq_hz;
+
+ if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract chan info event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
+ ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
+ ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
+ ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
+ ch_info_ev.mac_clk_mhz);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in chan info ev %d",
+ ch_info_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ab, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ /* If FW provides MAC clock frequency in Mhz, overriding the initialized
+ * HW channel counters frequency value
+ */
+ if (ch_info_ev.mac_clk_mhz)
+ cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
+ survey = &ar->survey[idx];
+ memset(survey, 0, sizeof(*survey));
+ survey->noise = ch_info_ev.noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+ survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz);
+ survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz);
+ }
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void
+ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
+ struct survey_info *survey;
+ struct ath11k *ar;
+ u32 cc_freq_hz = ab->cc_freq_hz;
+ u64 busy, total, tx, rx, rx_bss;
+ int idx;
+
+ if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract pdev bss chan info event");
+ return;
+ }
+
+ busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 |
+ bss_ch_info_ev.rx_clear_count_low;
+
+ total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 |
+ bss_ch_info_ev.cycle_count_low;
+
+ tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 |
+ bss_ch_info_ev.tx_cycle_count_low;
+
+ rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_cycle_count_low;
+
+ rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_bss_cycle_count_low;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
+ bss_ch_info_ev.noise_floor, busy, total,
+ tx, rx, rx_bss);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
+ bss_ch_info_ev.pdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, bss_ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ bss_ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = bss_ch_info_ev.noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
+ ath11k_warn(ab, "failed to extract install key compl event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
+ install_key_compl.key_idx, install_key_compl.key_flags,
+ install_key_compl.macaddr, install_key_compl.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in install key compl ev %d",
+ install_key_compl.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->install_key_status = 0;
+
+ if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
+ ath11k_warn(ab, "install key failed for %pM status %d\n",
+ install_key_compl.macaddr, install_key_compl.status);
+ ar->install_key_status = install_key_compl.status;
+ }
+
+ complete(&ar->install_key_done);
+ rcu_read_unlock();
+}
+
+static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_service_available_event *ev;
+ int ret;
+ int i, j;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch svc available ev");
+ kfree(tb);
+ return;
+ }
+
+ /* TODO: Use wmi_service_segment_offset information to get the service
+ * especially when more services are advertised in multiple sevice
+ * available events.
+ */
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (ev->wmi_service_segment_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
+ ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+
+ kfree(tb);
+}
+
+static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
+ ath11k_warn(ab, "failed to extract peer assoc conf event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "peer assoc conf ev vdev id %d macaddr %pM\n",
+ peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
+ peer_assoc_conf.vdev_id);
+ return;
+ }
+
+ complete(&ar->peer_assoc_done);
+}
+
+static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ ath11k_debug_fw_stats_process(ab, skb);
+}
+
+/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
+ * is not part of BDF CTL(Conformance test limits) table entries.
+ */
+static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_ctl_failsafe_chk_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev ctl failsafe check ev status %d\n",
+ ev->ctl_failsafe_status);
+
+ /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
+ * to 10 dBm else the CTL power entry in the BDF would be picked up.
+ */
+ if (ev->ctl_failsafe_status != 0)
+ ath11k_warn(ab, "pdev ctl failsafe failure status %d",
+ ev->ctl_failsafe_status);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
+ const struct wmi_pdev_csa_switch_ev *ev,
+ const u32 *vdev_ids)
+{
+ int i;
+ struct ath11k_vif *arvif;
+
+ /* Finish CSA once the switch count becomes NULL */
+ if (ev->current_switch_count)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < ev->num_vdevs; i++) {
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
+
+ if (!arvif) {
+ ath11k_warn(ab, "Recvd csa status for unknown vdev %d",
+ vdev_ids[i]);
+ continue;
+ }
+
+ if (arvif->is_up && arvif->vif->csa_active)
+ ieee80211_csa_finish(arvif->vif);
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_csa_switch_ev *ev;
+ const u32 *vdev_ids;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
+ vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
+
+ if (!ev || !vdev_ids) {
+ ath11k_warn(ab, "failed to fetch pdev csa switch count ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev csa switch count %d for pdev %d, num_vdevs %d",
+ ev->current_switch_count, ev->pdev_id,
+ ev->num_vdevs);
+
+ ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_radar_ev *ev;
+ struct ath11k *ar;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
+
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
+ ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "radar detected in invalid pdev %d\n",
+ ev->pdev_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n",
+ ev->pdev_id);
+
+ if (ar->dfs_block_radar_events)
+ ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw);
+
+exit:
+ kfree(tb);
+}
+
+static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ switch (id) {
+ /* Process all the WMI events here */
+ case WMI_SERVICE_READY_EVENTID:
+ ath11k_service_ready_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT_EVENTID:
+ ath11k_service_ready_ext_event(ab, skb);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EVENTID:
+ ath11k_reg_chan_list_event(ab, skb);
+ break;
+ case WMI_READY_EVENTID:
+ ath11k_ready_event(ab, skb);
+ break;
+ case WMI_PEER_DELETE_RESP_EVENTID:
+ ath11k_peer_delete_resp_event(ab, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath11k_vdev_start_resp_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath11k_bcn_tx_status_event(ab, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath11k_vdev_stopped_event(ab, skb);
+ break;
+ case WMI_MGMT_RX_EVENTID:
+ ath11k_mgmt_rx_event(ab, skb);
+ /* mgmt_rx_event() owns the skb now! */
+ return;
+ case WMI_MGMT_TX_COMPLETION_EVENTID:
+ ath11k_mgmt_tx_compl_event(ab, skb);
+ break;
+ case WMI_SCAN_EVENTID:
+ ath11k_scan_event(ab, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath11k_peer_sta_kickout_event(ab, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath11k_roam_event(ab, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath11k_chan_info_event(ab, skb);
+ break;
+ case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath11k_pdev_bss_chan_info_event(ab, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath11k_vdev_install_key_compl_event(ab, skb);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath11k_service_available_event(ab, skb);
+ break;
+ case WMI_PEER_ASSOC_CONF_EVENTID:
+ ath11k_peer_assoc_conf_event(ab, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath11k_update_stats_event(ab, skb);
+ break;
+ case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
+ ath11k_pdev_ctl_failsafe_check_event(ab, skb);
+ break;
+ case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
+ ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
+ break;
+ /* add Unsupported events here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_VDEV_DELETE_RESP_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_TWT_ENABLE_EVENTID:
+ case WMI_TWT_DISABLE_EVENTID:
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
+ ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
+ break;
+ /* TODO: Add remaining events */
+ default:
+ ath11k_warn(ab, "Unknown eventid: 0x%x\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
+ u32 pdev_idx)
+{
+ int status;
+ u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx;
+ conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = svc_id[pdev_idx];
+
+ status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
+ ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
+ ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+
+ return 0;
+}
+
+static int
+ath11k_wmi_send_unit_test_cmd(struct ath11k *ar,
+ struct wmi_unit_test_cmd ut_cmd,
+ u32 *test_args)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_unit_test_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 *ut_cmd_args;
+ int buf_len, arg_len;
+ int ret;
+ int i;
+
+ arg_len = sizeof(u32) * ut_cmd.num_args;
+ buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_unit_test_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = ut_cmd.vdev_id;
+ cmd->module_id = ut_cmd.module_id;
+ cmd->num_args = ut_cmd.num_args;
+ cmd->diag_token = ut_cmd.diag_token;
+
+ ptr = skb->data + sizeof(ut_cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, arg_len);
+
+ ptr += TLV_HDR_SIZE;
+
+ ut_cmd_args = ptr;
+ for (i = 0; i < ut_cmd.num_args; i++)
+ ut_cmd_args[i] = test_args[i];
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI unit test : module %d vdev %d n_args %d token %d\n",
+ cmd->module_id, cmd->vdev_id, cmd->num_args,
+ cmd->diag_token);
+
+ return ret;
+}
+
+int ath11k_wmi_simulate_radar(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 dfs_args[DFS_MAX_TEST_ARGS];
+ struct wmi_unit_test_cmd wmi_ut;
+ bool arvif_found = false;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arvif_found = true;
+ break;
+ }
+ }
+
+ if (!arvif_found)
+ return -EINVAL;
+
+ dfs_args[DFS_TEST_CMDID] = 0;
+ dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
+ /* Currently we could pass segment_id(b0 - b1), chirp(b2)
+ * freq offset (b3 - b10) to unit test. For simulation
+ * purpose this can be set to 0 which is valid.
+ */
+ dfs_args[DFS_TEST_RADAR_PARAM] = 0;
+
+ wmi_ut.vdev_id = arvif->vdev_id;
+ wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
+ wmi_ut.num_args = DFS_MAX_TEST_ARGS;
+ wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n");
+
+ return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
+}
+
+int ath11k_wmi_connect(struct ath11k_base *ab)
+{
+ u32 i;
+ u8 wmi_ep_count;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > MAX_RADIOS)
+ return -1;
+
+ for (i = 0; i < wmi_ep_count; i++)
+ ath11k_connect_pdev_htc_service(ab, i);
+
+ return 0;
+}
+
+static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id)
+{
+ if (WARN_ON(pdev_id >= MAX_RADIOS))
+ return;
+
+ /* TODO: Deinit any pdev specific wmi resource */
+}
+
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi_handle;
+
+ if (pdev_id >= MAX_RADIOS)
+ return -EINVAL;
+
+ wmi_handle = &ab->wmi_ab.wmi[pdev_id];
+
+ wmi_handle->wmi_ab = &ab->wmi_ab;
+
+ ab->wmi_ab.ab = ab;
+ /* TODO: Init remaining resource specific to pdev */
+
+ return 0;
+}
+
+int ath11k_wmi_attach(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_wmi_pdev_attach(ab, 0);
+ if (ret)
+ return ret;
+
+ ab->wmi_ab.ab = ab;
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+
+ /* TODO: Init remaining wmi soc resources required */
+ init_completion(&ab->wmi_ab.service_ready);
+ init_completion(&ab->wmi_ab.unified_ready);
+
+ return 0;
+}
+
+void ath11k_wmi_detach(struct ath11k_base *ab)
+{
+ int i;
+
+ /* TODO: Deinit wmi resource specific to SOC as required */
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++)
+ ath11k_wmi_pdev_detach(ab, i);
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
new file mode 100644
index 000000000000..1fde15c762ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -0,0 +1,4764 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_WMI_H
+#define ATH11K_WMI_H
+
+#include <net/mac80211.h>
+#include "htc.h"
+
+struct ath11k_base;
+struct ath11k;
+struct ath11k_fw_stats;
+
+#define PSOC_HOST_MAX_NUM_SS (8)
+
+/* defines to set Packet extension values whic can be 0 us, 8 usec or 16 usec */
+#define MAX_HE_NSS 8
+#define MAX_HE_MODULATION 8
+#define MAX_HE_RU 4
+#define HE_MODULATION_NONE 7
+#define HE_PET_0_USEC 0
+#define HE_PET_8_USEC 1
+#define HE_PET_16_USEC 2
+
+#define WMI_MAX_NUM_SS MAX_HE_NSS
+#define WMI_MAX_NUM_RU MAX_HE_RU
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+struct wmi_cmd_hdr {
+ u32 cmd_id;
+} __packed;
+
+struct wmi_tlv {
+ u32 header;
+ u8 value[0];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define TLV_HDR_SIZE sizeof_field(struct wmi_tlv, header)
+
+#define WMI_CMD_HDR_CMD_ID GENMASK(23, 0)
+#define WMI_MAX_MEM_REQS 32
+#define ATH11K_MAX_HW_LISTEN_INTERVAL 5
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+#define WMI_BA_MODE_BUFFER_SIZE_256 3
+/*
+ * HW mode config type replicated from FW header
+ * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
+ * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands,
+ * one in 2G and another in 5G.
+ * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in
+ * same band; no tx allowed.
+ * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band.
+ * Support for both PHYs within one band is planned
+ * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES),
+ * but could be extended to other bands in the future.
+ * The separation of the band between the two PHYs needs
+ * to be communicated separately.
+ * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS
+ * as in WMI_HW_MODE_SBS, and 3rd on the other band
+ * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and
+ * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G).
+ * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode.
+ */
+enum wmi_host_hw_mode_config_type {
+ WMI_HOST_HW_MODE_SINGLE = 0,
+ WMI_HOST_HW_MODE_DBS = 1,
+ WMI_HOST_HW_MODE_SBS_PASSIVE = 2,
+ WMI_HOST_HW_MODE_SBS = 3,
+ WMI_HOST_HW_MODE_DBS_SBS = 4,
+ WMI_HOST_HW_MODE_DBS_OR_SBS = 5,
+
+ /* keep last */
+ WMI_HOST_HW_MODE_MAX
+};
+
+/* HW mode priority values used to detect the preferred HW mode
+ * on the available modes.
+ */
+enum wmi_host_hw_mode_priority {
+ WMI_HOST_HW_MODE_DBS_SBS_PRI,
+ WMI_HOST_HW_MODE_DBS_PRI,
+ WMI_HOST_HW_MODE_DBS_OR_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PASSIVE_PRI,
+ WMI_HOST_HW_MODE_SINGLE_PRI,
+
+ /* keep last the lowest priority */
+ WMI_HOST_HW_MODE_MAX_PRI
+};
+
+enum {
+ WMI_HOST_WLAN_2G_CAP = 0x1,
+ WMI_HOST_WLAN_5G_CAP = 0x2,
+ WMI_HOST_WLAN_2G_5G_CAP = 0x3,
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV = 0x4,
+ WMI_GRP_VDEV = 0x5,
+ WMI_GRP_PEER = 0x6,
+ WMI_GRP_MGMT = 0x7,
+ WMI_GRP_BA_NEG = 0x8,
+ WMI_GRP_STA_PS = 0x9,
+ WMI_GRP_DFS = 0xa,
+ WMI_GRP_ROAM = 0xb,
+ WMI_GRP_OFL_SCAN = 0xc,
+ WMI_GRP_P2P = 0xd,
+ WMI_GRP_AP_PS = 0xe,
+ WMI_GRP_RATE_CTRL = 0xf,
+ WMI_GRP_PROFILE = 0x10,
+ WMI_GRP_SUSPEND = 0x11,
+ WMI_GRP_BCN_FILTER = 0x12,
+ WMI_GRP_WOW = 0x13,
+ WMI_GRP_RTT = 0x14,
+ WMI_GRP_SPECTRAL = 0x15,
+ WMI_GRP_STATS = 0x16,
+ WMI_GRP_ARP_NS_OFL = 0x17,
+ WMI_GRP_NLO_OFL = 0x18,
+ WMI_GRP_GTK_OFL = 0x19,
+ WMI_GRP_CSA_OFL = 0x1a,
+ WMI_GRP_CHATTER = 0x1b,
+ WMI_GRP_TID_ADDBA = 0x1c,
+ WMI_GRP_MISC = 0x1d,
+ WMI_GRP_GPIO = 0x1e,
+ WMI_GRP_FWTEST = 0x1f,
+ WMI_GRP_TDLS = 0x20,
+ WMI_GRP_RESMGR = 0x21,
+ WMI_GRP_STA_SMPS = 0x22,
+ WMI_GRP_WLAN_HB = 0x23,
+ WMI_GRP_RMC = 0x24,
+ WMI_GRP_MHF_OFL = 0x25,
+ WMI_GRP_LOCATION_SCAN = 0x26,
+ WMI_GRP_OEM = 0x27,
+ WMI_GRP_NAN = 0x28,
+ WMI_GRP_COEX = 0x29,
+ WMI_GRP_OBSS_OFL = 0x2a,
+ WMI_GRP_LPI = 0x2b,
+ WMI_GRP_EXTSCAN = 0x2c,
+ WMI_GRP_DHCP_OFL = 0x2d,
+ WMI_GRP_IPA = 0x2e,
+ WMI_GRP_MDNS_OFL = 0x2f,
+ WMI_GRP_SAP_OFL = 0x30,
+ WMI_GRP_OCB = 0x31,
+ WMI_GRP_SOC = 0x32,
+ WMI_GRP_PKT_FILTER = 0x33,
+ WMI_GRP_MAWC = 0x34,
+ WMI_GRP_PMF_OFFLOAD = 0x35,
+ WMI_GRP_BPF_OFFLOAD = 0x36,
+ WMI_GRP_NAN_DATA = 0x37,
+ WMI_GRP_PROTOTYPE = 0x38,
+ WMI_GRP_MONITOR = 0x39,
+ WMI_GRP_REGULATORY = 0x3a,
+ WMI_GRP_HW_DATA_FILTER = 0x3b,
+ WMI_GRP_WLM = 0x3c,
+ WMI_GRP_11K_OFFLOAD = 0x3d,
+ WMI_GRP_TWT = 0x3e,
+ WMI_GRP_MOTION_DET = 0x3f,
+ WMI_GRP_SPATIAL_REUSE = 0x40,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+enum wmi_tlv_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+ WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID,
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_PDEV_DUMP_CMDID,
+ WMI_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_PDEV_FIPS_CMDID,
+ WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_PDEV_GET_TPC_CMDID,
+ WMI_MIB_STATS_ENABLE_CMDID,
+ WMI_PDEV_SET_PCL_CMDID,
+ WMI_PDEV_SET_HW_MODE_CMDID,
+ WMI_PDEV_SET_MAC_CONFIG_CMDID,
+ WMI_PDEV_SET_ANTENNA_MODE_CMDID,
+ WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
+ WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+ WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+ WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+ WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+ WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PKT_ROUTING_CMDID,
+ WMI_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
+ WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
+ WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+ WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
+ WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID,
+ WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID,
+ WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
+ WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
+ WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+ WMI_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_VDEV_WMM_ADDTS_CMDID,
+ WMI_VDEV_WMM_DELTS_CMDID,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_VDEV_PLMREQ_START_CMDID,
+ WMI_VDEV_PLMREQ_STOP_CMDID,
+ WMI_VDEV_TSF_TSTAMP_ACTION_CMDID,
+ WMI_VDEV_SET_IE_CMDID,
+ WMI_VDEV_RATEMASK_CMDID,
+ WMI_VDEV_ATF_REQUEST_CMDID,
+ WMI_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_VDEV_SET_QUIET_MODE_CMDID,
+ WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+ WMI_PEER_INFO_REQ_CMDID,
+ WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID,
+ WMI_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_PEER_ATF_REQUEST_CMDID,
+ WMI_PEER_BWF_REQUEST_CMDID,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+ WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+ WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+ WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+ WMI_MGMT_TX_SEND_CMDID,
+ WMI_OFFCHAN_DATA_TX_SEND_CMDID,
+ WMI_PDEV_SEND_FD_CMDID,
+ WMI_BCN_OFFLOAD_CTRL_CMDID,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
+ WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+ WMI_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+ WMI_VDEV_ADFS_CH_CFG_CMDID,
+ WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
+ WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+ WMI_ROAM_CHAN_LIST,
+ WMI_ROAM_SCAN_CMD,
+ WMI_ROAM_SYNCH_COMPLETE,
+ WMI_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_ROAM_INVOKE_CMDID,
+ WMI_ROAM_FILTER_CMDID,
+ WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID,
+ WMI_ROAM_CONFIGURE_MAWC_CMDID,
+ WMI_ROAM_SET_MBO_PARAM_CMDID,
+ WMI_ROAM_PER_CONFIG_CMDID,
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_START_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID,
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_AP_PS_EGAP_PARAM_CMDID,
+ WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL),
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+ WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_EXTWOW_ENABLE_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID,
+ WMI_WOW_UDP_SVC_OFLD_CMDID,
+ WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID,
+ WMI_WOW_SET_ACTION_WAKE_UP_CMDID,
+ WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_REQUEST_STATS_EXT_CMDID,
+ WMI_REQUEST_LINK_STATS_CMDID,
+ WMI_START_LINK_STATS_CMDID,
+ WMI_CLEAR_LINK_STATS_CMDID,
+ WMI_GET_FW_MEM_DUMP_CMDID,
+ WMI_DEBUG_MESG_FLUSH_CMDID,
+ WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_REQUEST_WLAN_STATS_CMDID,
+ WMI_REQUEST_RCPI_CMDID,
+ WMI_REQUEST_PEER_STATS_INFO_CMDID,
+ WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
+ WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_APFIND_CMDID,
+ WMI_PASSPOINT_LIST_CONFIG_CMDID,
+ WMI_NLO_CONFIGURE_MAWC_CMDID,
+ WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_STA_KEEPALIVE_CMDID,
+ WMI_BA_REQ_SSN_CMDID,
+ WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+ WMI_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_THERMAL_MGMT_CMDID,
+ WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_OCB_SET_SCHED_CMDID,
+ WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID,
+ WMI_LRO_CONFIG_CMDID,
+ WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+ WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID,
+ WMI_VDEV_WISA_CMDID,
+ WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+ WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+ WMI_TXBF_CMDID,
+ WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST),
+ WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_UNIT_TEST_CMDID,
+ WMI_FWTEST_CMDID,
+ WMI_QBOOST_CFG_CMDID,
+ WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_TDLS_PEER_UPDATE_CMDID,
+ WMI_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR),
+ WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_STA_SMPS_PARAM_CMDID,
+ WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB),
+ WMI_HB_SET_TCP_PARAMS_CMDID,
+ WMI_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_HB_SET_UDP_PARAMS_CMDID,
+ WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_RMC_CONFIG_CMDID,
+ WMI_RMC_SET_MANUAL_LEADER_CMDID,
+ WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL),
+ WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_DISABLE_CMDID,
+ WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_REQUEST_CMDID,
+ WMI_LPI_OEM_REQ_CMDID,
+ WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_CHAN_AVOID_UPDATE_CMDID,
+ WMI_COEX_CONFIG_CMDID,
+ WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+ WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+ WMI_SAR_LIMITS_CMDID,
+ WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
+ WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_START_SCAN_CMDID,
+ WMI_LPI_STOP_SCAN_CMDID,
+ WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_STOP_CMDID,
+ WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_MAWC_CMDID,
+ WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL),
+ WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA),
+ WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_MDNS_SET_FQDN_CMDID,
+ WMI_MDNS_SET_RESPONSE_CMDID,
+ WMI_MDNS_GET_STATS_CMDID,
+ WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_SET_BLACKLIST_PARAM_CMDID,
+ WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_SET_UTC_TIME_CMDID,
+ WMI_OCB_START_TIMING_ADVERT_CMDID,
+ WMI_OCB_STOP_TIMING_ADVERT_CMDID,
+ WMI_OCB_GET_TSF_TIMER_CMDID,
+ WMI_DCC_GET_STATS_CMDID,
+ WMI_DCC_CLEAR_STATS_CMDID,
+ WMI_DCC_UPDATE_NDL_CMDID,
+ WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_SET_HW_MODE_CMDID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID,
+ WMI_SOC_SET_ANTENNA_MODE_CMDID,
+ WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER),
+ WMI_PACKET_FILTER_ENABLE_CMDID,
+ WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD),
+ WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_GET_VDEV_STATS_CMDID,
+ WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID,
+ WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR),
+ WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_SCAN_START_CMDID,
+ WMI_11D_SCAN_STOP_CMDID,
+ WMI_SET_INIT_COUNTRY_CMDID,
+ WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_REQ_CMDID,
+ WMI_NDP_RESPONDER_REQ_CMDID,
+ WMI_NDP_END_REQ_CMDID,
+ WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER),
+ WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_CMDID,
+ WMI_TWT_ADD_DIALOG_CMDID,
+ WMI_TWT_DEL_DIALOG_CMDID,
+ WMI_TWT_PAUSE_DIALOG_CMDID,
+ WMI_TWT_RESUME_DIALOG_CMDID,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
+ WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+};
+
+enum wmi_tlv_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+ WMI_PDEV_DUMP_EVENTID,
+ WMI_TX_PAUSE_EVENTID,
+ WMI_DFS_RADAR_EVENTID,
+ WMI_PDEV_L1SS_TRACK_EVENTID,
+ WMI_PDEV_TEMPERATURE_EVENTID,
+ WMI_SERVICE_READY_EXT_EVENTID,
+ WMI_PDEV_FIPS_EVENTID,
+ WMI_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_PDEV_TPC_EVENTID,
+ WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
+ WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
+ WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+ WMI_PDEV_ANTDIV_STATUS_EVENTID,
+ WMI_PDEV_CHIP_POWER_STATS_EVENTID,
+ WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID,
+ WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_PDEV_DIV_RSSI_ANTID_EVENTID,
+ WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+ WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
+ WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
+ WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+ WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_VDEV_TSF_REPORT_EVENTID,
+ WMI_VDEV_DELETE_RESP_EVENTID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID,
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_INFO_EVENTID,
+ WMI_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_PEER_STATE_EVENTID,
+ WMI_PEER_ASSOC_CONF_EVENTID,
+ WMI_PEER_DELETE_RESP_EVENTID,
+ WMI_PEER_RATECODE_LIST_EVENTID,
+ WMI_WDS_PEER_EVENTID,
+ WMI_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_PEER_ANTDIV_INFO_EVENTID,
+ WMI_PEER_RESERVED0_EVENTID,
+ WMI_PEER_RESERVED1_EVENTID,
+ WMI_PEER_RESERVED2_EVENTID,
+ WMI_PEER_RESERVED3_EVENTID,
+ WMI_PEER_RESERVED4_EVENTID,
+ WMI_PEER_RESERVED5_EVENTID,
+ WMI_PEER_RESERVED6_EVENTID,
+ WMI_PEER_RESERVED7_EVENTID,
+ WMI_PEER_RESERVED8_EVENTID,
+ WMI_PEER_RESERVED9_EVENTID,
+ WMI_PEER_RESERVED10_EVENTID,
+ WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_MGMT_TX_COMPLETION_EVENTID,
+ WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_BA_RSP_SSN_EVENTID,
+ WMI_AGGR_STATE_TRIG_EVENTID,
+ WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+ WMI_ROAM_SYNCH_EVENTID,
+ WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_NOA_EVENTID,
+ WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID,
+ WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_WOW_INITIAL_WAKEUP_EVENTID,
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+ WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_IFACE_LINK_STATS_EVENTID,
+ WMI_PEER_LINK_STATS_EVENTID,
+ WMI_RADIO_LINK_STATS_EVENTID,
+ WMI_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_INST_RSSI_STATS_EVENTID,
+ WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_REPORT_STATS_EVENTID,
+ WMI_UPDATE_RCPI_EVENTID,
+ WMI_PEER_STATS_INFO_EVENTID,
+ WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_APFIND_EVENTID,
+ WMI_PASSPOINT_MATCH_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+ WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+ WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
+ WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_THERMAL_MGMT_EVENTID,
+ WMI_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_DIAG_EVENTID,
+ WMI_OCB_SET_SCHED_EVENTID,
+ WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID,
+ WMI_RSSI_BREACH_EVENTID,
+ WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+ WMI_PDEV_UTF_SCPC_EVENTID,
+ WMI_READ_DATA_FROM_FLASH_EVENTID,
+ WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+ WMI_PKGID_EVENTID,
+ WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_UPLOADH_EVENTID,
+ WMI_CAPTUREH_EVENTID,
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_RESULT_EVENTID,
+ WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_OEM_ERROR_REPORT_EVENTID,
+ WMI_OEM_RESPONSE_EVENTID,
+ WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_NAN_DISC_IFACE_CREATED_EVENTID,
+ WMI_NAN_DISC_IFACE_DELETED_EVENTID,
+ WMI_NAN_STARTED_CLUSTER_EVENTID,
+ WMI_NAN_JOINED_CLUSTER_EVENTID,
+ WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_STATUS_EVENTID,
+ WMI_LPI_HANDOFF_EVENTID,
+ WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_OPERATION_EVENTID,
+ WMI_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID,
+ WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_DCC_GET_STATS_RESP_EVENTID,
+ WMI_DCC_UPDATE_NDL_RESP_EVENTID,
+ WMI_DCC_STATS_EVENTID,
+ WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_HW_MODE_TRANSITION_EVENTID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID,
+ WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_VDEV_STATS_INFO_EVENTID,
+ WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_NEW_COUNTRY_EVENTID,
+ WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_RSP_EVENTID,
+ WMI_NDP_RESPONDER_RSP_EVENTID,
+ WMI_NDP_END_RSP_EVENTID,
+ WMI_NDP_INDICATION_EVENTID,
+ WMI_NDP_CONFIRM_EVENTID,
+ WMI_NDP_END_INDICATION_EVENTID,
+
+ WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_EVENTID,
+ WMI_TWT_ADD_DIALOG_EVENTID,
+ WMI_TWT_DEL_DIALOG_EVENTID,
+ WMI_TWT_PAUSE_DIALOG_EVENTID,
+ WMI_TWT_RESUME_DIALOG_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PMF_QOS,
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_PDEV_PARAM_DCS,
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ WMI_PDEV_PARAM_PROXY_STA,
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_PDEV_PARAM_BURST_DUR,
+ WMI_PDEV_PARAM_BURST_ENABLE,
+ WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_PDEV_PARAM_L1SS_TRACK,
+ WMI_PDEV_PARAM_HYST_EN,
+ WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_PDEV_PARAM_LED_SYS_STATE,
+ WMI_PDEV_PARAM_LED_ENABLE,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_CTS_CBW,
+ WMI_PDEV_PARAM_WNTS_CONFIG,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP,
+ WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP,
+ WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP,
+ WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS,
+ WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG,
+ WMI_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_PDEV_PARAM_AGGR_BURST,
+ WMI_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_PDEV_PARAM_RX_FILTER,
+ WMI_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_PDEV_PARAM_EN_STATS,
+ WMI_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_PDEV_PARAM_NOISE_DETECTION,
+ WMI_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_PDEV_PARAM_DPD_ENABLE,
+ WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_PDEV_PARAM_ANT_PLZN,
+ WMI_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_PDEV_PARAM_PDEV_RESET,
+ WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_PDEV_PARAM_ARP_DBG_SRCADDR,
+ WMI_PDEV_PARAM_ARP_DBG_DSTADDR,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+ WMI_PDEV_PARAM_PROPAGATION_DELAY,
+ WMI_PDEV_PARAM_ENA_ANT_DIV,
+ WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+ WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_SCH_DELAY,
+ WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+ WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+ WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+ WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
+ WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
+ WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
+ WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_VDEV_PARAM_SLOT_TIME,
+ WMI_VDEV_PARAM_PREAMBLE,
+ WMI_VDEV_PARAM_SWBA_TIME,
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_VDEV_PARAM_WDS,
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ WMI_VDEV_PARAM_CHWIDTH,
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_VDEV_PARAM_MGMT_RATE,
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ WMI_VDEV_PARAM_FIXED_RATE,
+ WMI_VDEV_PARAM_SGI,
+ WMI_VDEV_PARAM_LDPC,
+ WMI_VDEV_PARAM_TX_STBC,
+ WMI_VDEV_PARAM_RX_STBC,
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_VDEV_PARAM_DEF_KEYID,
+ WMI_VDEV_PARAM_NSS,
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_VDEV_PARAM_TXBF,
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_VDEV_PARAM_ENABLE_RMC,
+ WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_VDEV_PARAM_MAX_RATE,
+ WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+ WMI_VDEV_PARAM_RX_LEAK_WINDOW,
+ WMI_VDEV_PARAM_STATS_AVG_FACTOR,
+ WMI_VDEV_PARAM_DISCONNECT_TH,
+ WMI_VDEV_PARAM_RTSCTS_RATE,
+ WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE,
+ WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB,
+ WMI_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_VDEV_PARAM_MFPTEST_SET,
+ WMI_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_VDEV_PARAM_VHT_SGIMASK,
+ WMI_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_VDEV_PARAM_PROXY_STA,
+ WMI_VDEV_PARAM_VIRTUAL_CELL_MODE,
+ WMI_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_VDEV_PARAM_SENSOR_AP,
+ WMI_VDEV_PARAM_BEACON_RATE,
+ WMI_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_VDEV_PARAM_STA_KICKOUT,
+ WMI_VDEV_PARAM_CAPABILITIES,
+ WMI_VDEV_PARAM_TSF_INCREMENT,
+ WMI_VDEV_PARAM_AMPDU_PER_AC,
+ WMI_VDEV_PARAM_RX_FILTER,
+ WMI_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_VDEV_PARAM_HE_DCM,
+ WMI_VDEV_PARAM_HE_RANGE_EXT,
+ WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
+ WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+ WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
+ WMI_VDEV_PARAM_BSS_COLOR,
+ WMI_VDEV_PARAM_SET_HEMU_MODE,
+ WMI_VDEV_PARAM_TX_OFDMA_CPLEN,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
+ WMI_PEER_160MHZ = 0x40000000,
+ WMI_PEER_SAFEMODE_EN = 0x80000000,
+
+};
+
+/** Enum list of TLV Tags for each parameter structure type. */
+enum wmi_tlv_tag {
+ WMI_TAG_LAST_RESERVED = 15,
+ WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_BYTE,
+ WMI_TAG_ARRAY_STRUCT,
+ WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_LAST_ARRAY_ENUM = 31,
+ WMI_TAG_SERVICE_READY_EVENT,
+ WMI_TAG_HAL_REG_CAPABILITIES,
+ WMI_TAG_WLAN_HOST_MEM_REQ,
+ WMI_TAG_READY_EVENT,
+ WMI_TAG_SCAN_EVENT,
+ WMI_TAG_PDEV_TPC_CONFIG_EVENT,
+ WMI_TAG_CHAN_INFO_EVENT,
+ WMI_TAG_COMB_PHYERR_RX_HDR,
+ WMI_TAG_VDEV_START_RESPONSE_EVENT,
+ WMI_TAG_VDEV_STOPPED_EVENT,
+ WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TAG_PEER_STA_KICKOUT_EVENT,
+ WMI_TAG_MGMT_RX_HDR,
+ WMI_TAG_TBTT_OFFSET_EVENT,
+ WMI_TAG_TX_DELBA_COMPLETE_EVENT,
+ WMI_TAG_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TAG_ROAM_EVENT,
+ WMI_TAG_WOW_EVENT_INFO,
+ WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TAG_RTT_EVENT_HEADER,
+ WMI_TAG_RTT_ERROR_REPORT_EVENT,
+ WMI_TAG_RTT_MEAS_EVENT,
+ WMI_TAG_ECHO_EVENT,
+ WMI_TAG_FTM_INTG_EVENT,
+ WMI_TAG_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TAG_GPIO_INPUT_EVENT,
+ WMI_TAG_CSA_EVENT,
+ WMI_TAG_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TAG_IGTK_INFO,
+ WMI_TAG_DCS_INTERFERENCE_EVENT,
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_PROFILE_CTX_T,
+ WMI_TAG_WLAN_PROFILE_T,
+ WMI_TAG_PDEV_QVIT_EVENT,
+ WMI_TAG_HOST_SWBA_EVENT,
+ WMI_TAG_TIM_INFO,
+ WMI_TAG_P2P_NOA_INFO,
+ WMI_TAG_STATS_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGES_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGE_DESC,
+ WMI_TAG_GTK_REKEY_FAIL_EVENT,
+ WMI_TAG_INIT_CMD,
+ WMI_TAG_RESOURCE_CONFIG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TAG_START_SCAN_CMD,
+ WMI_TAG_STOP_SCAN_CMD,
+ WMI_TAG_SCAN_CHAN_LIST_CMD,
+ WMI_TAG_CHANNEL,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TAG_PDEV_SET_PARAM_CMD,
+ WMI_TAG_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_WMM_PARAMS,
+ WMI_TAG_PDEV_SET_QUIET_CMD,
+ WMI_TAG_VDEV_CREATE_CMD,
+ WMI_TAG_VDEV_DELETE_CMD,
+ WMI_TAG_VDEV_START_REQUEST_CMD,
+ WMI_TAG_P2P_NOA_DESCRIPTOR,
+ WMI_TAG_P2P_GO_SET_BEACON_IE,
+ WMI_TAG_GTK_OFFLOAD_CMD,
+ WMI_TAG_VDEV_UP_CMD,
+ WMI_TAG_VDEV_STOP_CMD,
+ WMI_TAG_VDEV_DOWN_CMD,
+ WMI_TAG_VDEV_SET_PARAM_CMD,
+ WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ WMI_TAG_PEER_CREATE_CMD,
+ WMI_TAG_PEER_DELETE_CMD,
+ WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ WMI_TAG_PEER_SET_PARAM_CMD,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TAG_VHT_RATE_SET,
+ WMI_TAG_BCN_TMPL_CMD,
+ WMI_TAG_PRB_TMPL_CMD,
+ WMI_TAG_BCN_PRB_INFO,
+ WMI_TAG_PEER_TID_ADDBA_CMD,
+ WMI_TAG_PEER_TID_DELBA_CMD,
+ WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ WMI_TAG_STA_DTIM_PS_METHOD_CMD,
+ WMI_TAG_ROAM_SCAN_MODE,
+ WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TAG_ROAM_SCAN_PERIOD,
+ WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TAG_PDEV_SUSPEND_CMD,
+ WMI_TAG_PDEV_RESUME_CMD,
+ WMI_TAG_ADD_BCN_FILTER_CMD,
+ WMI_TAG_RMV_BCN_FILTER_CMD,
+ WMI_TAG_WOW_ENABLE_CMD,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TAG_ARP_OFFLOAD_TUPLE,
+ WMI_TAG_NS_OFFLOAD_TUPLE,
+ WMI_TAG_FTM_INTG_CMD,
+ WMI_TAG_STA_KEEPALIVE_CMD,
+ WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TAG_AP_PS_PEER_CMD,
+ WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TAG_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TAG_WOW_DEL_PATTERN_CMD,
+ WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ WMI_TAG_RTT_MEASREQ_HEAD,
+ WMI_TAG_RTT_MEASREQ_BODY,
+ WMI_TAG_RTT_TSF_CMD,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TAG_REQUEST_STATS_CMD,
+ WMI_TAG_NLO_CONFIG_CMD,
+ WMI_TAG_NLO_CONFIGURED_PARAMETERS,
+ WMI_TAG_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TAG_CHATTER_SET_MODE_CMD,
+ WMI_TAG_ECHO_CMD,
+ WMI_TAG_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TAG_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TAG_FORCE_FW_HANG_CMD,
+ WMI_TAG_GPIO_CONFIG_CMD,
+ WMI_TAG_GPIO_OUTPUT_CMD,
+ WMI_TAG_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TAG_BCN_TX_HDR,
+ WMI_TAG_BCN_SEND_FROM_HOST_CMD,
+ WMI_TAG_MGMT_TX_HDR,
+ WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ WMI_TAG_ADDBA_SEND_CMD,
+ WMI_TAG_DELBA_SEND_CMD,
+ WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ WMI_TAG_SEND_SINGLEAMSDU_CMD,
+ WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TAG_PDEV_SET_HT_IE_CMD,
+ WMI_TAG_PDEV_SET_VHT_IE_CMD,
+ WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TAG_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TAG_PEER_MCAST_GROUP_CMD,
+ WMI_TAG_ROAM_AP_PROFILE,
+ WMI_TAG_AP_PROFILE,
+ WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TAG_PDEV_DFS_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_DISABLE_CMD,
+ WMI_TAG_WOW_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_BITMAP_PATTERN_T,
+ WMI_TAG_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TAG_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TAG_WOW_MAGIC_PATTERN_CMD,
+ WMI_TAG_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TAG_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TAG_TXBF_CMD,
+ WMI_TAG_DEBUG_LOG_CONFIG_CMD,
+ WMI_TAG_NLO_EVENT,
+ WMI_TAG_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TAG_UPLOAD_H_HDR,
+ WMI_TAG_CAPTURE_H_EVENT_HDR,
+ WMI_TAG_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TAG_VDEV_WMM_ADDTS_CMD,
+ WMI_TAG_VDEV_WMM_DELTS_CMD,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_TDLS_SET_STATE_CMD,
+ WMI_TAG_TDLS_PEER_UPDATE_CMD,
+ WMI_TAG_TDLS_PEER_EVENT,
+ WMI_TAG_TDLS_PEER_CAPABILITIES,
+ WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TAG_ROAM_CHAN_LIST,
+ WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD,
+ WMI_TAG_BA_RSP_SSN_EVENT,
+ WMI_TAG_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TAG_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TAG_P2P_SET_OPPPS_CMD,
+ WMI_TAG_P2P_SET_NOA_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TAG_STA_SMPS_PARAM_CMD,
+ WMI_TAG_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TAG_P2P_NOA_EVENT,
+ WMI_TAG_HB_SET_ENABLE_CMD,
+ WMI_TAG_HB_SET_TCP_PARAMS_CMD,
+ WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TAG_HB_SET_UDP_PARAMS_CMD,
+ WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TAG_HB_IND_EVENT,
+ WMI_TAG_TX_PAUSE_EVENT,
+ WMI_TAG_RFKILL_EVENT,
+ WMI_TAG_DFS_RADAR_EVENT,
+ WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TAG_BATCH_SCAN_ENABLE_CMD,
+ WMI_TAG_BATCH_SCAN_DISABLE_CMD,
+ WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TAG_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TAG_BATCH_SCAN_RESULT_EVENT,
+ WMI_TAG_VDEV_PLMREQ_START_CMD,
+ WMI_TAG_VDEV_PLMREQ_STOP_CMD,
+ WMI_TAG_THERMAL_MGMT_CMD,
+ WMI_TAG_THERMAL_MGMT_EVENT,
+ WMI_TAG_PEER_INFO_REQ_CMD,
+ WMI_TAG_PEER_INFO_EVENT,
+ WMI_TAG_PEER_INFO,
+ WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TAG_RMC_SET_MODE_CMD,
+ WMI_TAG_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TAG_RMC_CONFIG_CMD,
+ WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_NAN_CMD_PARAM,
+ WMI_TAG_NAN_EVENT_HDR,
+ WMI_TAG_PDEV_L1SS_TRACK_EVENT,
+ WMI_TAG_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TAG_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TAG_AGGR_STATE_TRIG_EVENT,
+ WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TAG_ROAM_SCAN_CMD,
+ WMI_TAG_REQ_STATS_EXT_CMD,
+ WMI_TAG_STATS_EXT_EVENT,
+ WMI_TAG_OBSS_SCAN_ENABLE_CMD,
+ WMI_TAG_OBSS_SCAN_DISABLE_CMD,
+ WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TAG_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TAG_WOW_IOAC_PKT_PATTERN_T,
+ WMI_TAG_WOW_IOAC_TMR_PATTERN_T,
+ WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_KEEPALIVE_T,
+ WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD,
+ WMI_TAG_START_LINK_STATS_CMD,
+ WMI_TAG_CLEAR_LINK_STATS_CMD,
+ WMI_TAG_REQUEST_LINK_STATS_CMD,
+ WMI_TAG_IFACE_LINK_STATS_EVENT,
+ WMI_TAG_RADIO_LINK_STATS_EVENT,
+ WMI_TAG_PEER_STATS_EVENT,
+ WMI_TAG_CHANNEL_STATS,
+ WMI_TAG_RADIO_LINK_STATS,
+ WMI_TAG_RATE_STATS,
+ WMI_TAG_PEER_LINK_STATS,
+ WMI_TAG_WMM_AC_STATS,
+ WMI_TAG_IFACE_LINK_STATS,
+ WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TAG_LPI_START_SCAN_CMD,
+ WMI_TAG_LPI_STOP_SCAN_CMD,
+ WMI_TAG_LPI_RESULT_EVENT,
+ WMI_TAG_PEER_STATE_EVENT,
+ WMI_TAG_EXTSCAN_BUCKET_CMD,
+ WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TAG_EXTSCAN_START_CMD,
+ WMI_TAG_EXTSCAN_STOP_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_OPERATION_EVENT,
+ WMI_TAG_EXTSCAN_START_STOP_EVENT,
+ WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TAG_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TAG_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TAG_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TAG_UNIT_TEST_CMD,
+ WMI_TAG_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_SYNCH_EVENT,
+ WMI_TAG_ROAM_SYNCH_COMPLETE,
+ WMI_TAG_EXTWOW_ENABLE_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TAG_LPI_STATUS_EVENT,
+ WMI_TAG_LPI_HANDOFF_EVENT,
+ WMI_TAG_VDEV_RATE_STATS_EVENT,
+ WMI_TAG_VDEV_RATE_HT_INFO,
+ WMI_TAG_RIC_REQUEST,
+ WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TAG_PDEV_TEMPERATURE_EVENT,
+ WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TAG_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_RIC_TSPEC,
+ WMI_TAG_TPC_CHAINMASK_CONFIG,
+ WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TAG_KEY_MATERIAL,
+ WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TAG_SET_LED_FLASHING_CMD,
+ WMI_TAG_MDNS_OFFLOAD_CMD,
+ WMI_TAG_MDNS_SET_FQDN_CMD,
+ WMI_TAG_MDNS_SET_RESP_CMD,
+ WMI_TAG_MDNS_GET_STATS_CMD,
+ WMI_TAG_MDNS_STATS_EVENT,
+ WMI_TAG_ROAM_INVOKE_CMD,
+ WMI_TAG_PDEV_RESUME_EVENT,
+ WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TAG_SAP_OFL_ENABLE_CMD,
+ WMI_TAG_SAP_OFL_ADD_STA_EVENT,
+ WMI_TAG_SAP_OFL_DEL_STA_EVENT,
+ WMI_TAG_APFIND_CMD_PARAM,
+ WMI_TAG_APFIND_EVENT_HDR,
+ WMI_TAG_OCB_SET_SCHED_CMD,
+ WMI_TAG_OCB_SET_SCHED_EVENT,
+ WMI_TAG_OCB_SET_CONFIG_CMD,
+ WMI_TAG_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TAG_OCB_SET_UTC_TIME_CMD,
+ WMI_TAG_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TAG_DCC_GET_STATS_CMD,
+ WMI_TAG_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TAG_DCC_GET_STATS_RESP_EVENT,
+ WMI_TAG_DCC_CLEAR_STATS_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TAG_DCC_STATS_EVENT,
+ WMI_TAG_OCB_CHANNEL,
+ WMI_TAG_OCB_SCHEDULE_ELEMENT,
+ WMI_TAG_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TAG_DCC_NDL_CHAN,
+ WMI_TAG_QOS_PARAMETER,
+ WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TAG_ROAM_FILTER,
+ WMI_TAG_PASSPOINT_CONFIG_CMD,
+ WMI_TAG_PASSPOINT_EVENT_HDR,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TAG_VDEV_TSF_REPORT_EVENT,
+ WMI_TAG_GET_FW_MEM_DUMP,
+ WMI_TAG_UPDATE_FW_MEM_DUMP,
+ WMI_TAG_FW_MEM_DUMP_PARAMS,
+ WMI_TAG_DEBUG_MESG_FLUSH,
+ WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TAG_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TAG_VDEV_SET_IE_CMD,
+ WMI_TAG_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TAG_RSSI_BREACH_EVENT,
+ WMI_TAG_WOW_EVENT_INITIAL_WAKEUP,
+ WMI_TAG_SOC_SET_PCL_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_VDEV_TXRX_STREAMS,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_WOW_IOAC_SOCK_PATTERN_T,
+ WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TAG_DIAG_EVENT_LOG_CONFIG,
+ WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS,
+ WMI_TAG_PACKET_FILTER_CONFIG,
+ WMI_TAG_PACKET_FILTER_ENABLE,
+ WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TAG_MGMT_TX_SEND_CMD,
+ WMI_TAG_MGMT_TX_COMPL_EVENT,
+ WMI_TAG_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TAG_LRO_INFO_CMD,
+ WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TAG_SERVICE_READY_EXT_EVENT,
+ WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TAG_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TAG_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TAG_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TAG_PEER_ASSOC_CONF_EVENT,
+ WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TAG_AP_PS_EGAP_PARAM_CMD,
+ WMI_TAG_AP_PS_EGAP_INFO_EVENT,
+ WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TAG_SCPC_EVENT,
+ WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TAG_BPF_GET_CAPABILITY_CMD,
+ WMI_TAG_BPF_CAPABILITY_INFO_EVT,
+ WMI_TAG_BPF_GET_VDEV_STATS_CMD,
+ WMI_TAG_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_VDEV_DELETE_RESP_EVENT,
+ WMI_TAG_PEER_DELETE_RESP_EVENT,
+ WMI_TAG_ROAM_DENSE_THRES_PARAM,
+ WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TAG_VDEV_CONFIG_RATEMASK,
+ WMI_TAG_PDEV_FIPS_CMD,
+ WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TAG_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TAG_FWTEST_SET_PARAM_CMD,
+ WMI_TAG_PEER_ATF_REQUEST,
+ WMI_TAG_VDEV_ATF_REQUEST,
+ WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TAG_INST_RSSI_STATS_RESP,
+ WMI_TAG_MED_UTIL_REPORT_EVENT,
+ WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TAG_WDS_ADDR_EVENT,
+ WMI_TAG_PEER_RATECODE_LIST_EVENT,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TAG_PDEV_TPC_EVENT,
+ WMI_TAG_ANI_OFDM_EVENT,
+ WMI_TAG_ANI_CCK_EVENT,
+ WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TAG_PDEV_FIPS_EVENT,
+ WMI_TAG_ATF_PEER_INFO,
+ WMI_TAG_PDEV_GET_TPC_CMD,
+ WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TAG_QBOOST_CFG_CMD,
+ WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TAG_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TAG_PEER_MCS_RATE_INFO,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TAG_MU_REPORT_TOTAL_MU,
+ WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_ROAM_SET_MBO,
+ WMI_TAG_MIB_STATS_ENABLE_CMD,
+ WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TAG_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TAG_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TAG_NDI_GET_CAP_REQ,
+ WMI_TAG_NDP_INITIATOR_REQ,
+ WMI_TAG_NDP_RESPONDER_REQ,
+ WMI_TAG_NDP_END_REQ,
+ WMI_TAG_NDI_CAP_RSP_EVENT,
+ WMI_TAG_NDP_INITIATOR_RSP_EVENT,
+ WMI_TAG_NDP_RESPONDER_RSP_EVENT,
+ WMI_TAG_NDP_END_RSP_EVENT,
+ WMI_TAG_NDP_INDICATION_EVENT,
+ WMI_TAG_NDP_CONFIRM_EVENT,
+ WMI_TAG_NDP_END_INDICATION_EVENT,
+ WMI_TAG_VDEV_SET_QUIET_CMD,
+ WMI_TAG_PDEV_SET_PCL_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_COEX_CONFIG_CMD,
+ WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TAG_MAC_PHY_CAPABILITIES,
+ WMI_TAG_HW_MODE_CAPABILITIES,
+ WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TAG_HAL_REG_CAPABILITIES_EXT,
+ WMI_TAG_SOC_HAL_REG_CAPABILITIES,
+ WMI_TAG_VDEV_WISA_CMD,
+ WMI_TAG_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TAG_NDP_END_RSP_PER_NDI,
+ WMI_TAG_PEER_BWF_REQUEST,
+ WMI_TAG_BWF_PEER_INFO,
+ WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TAG_RMC_SET_LEADER_CMD,
+ WMI_TAG_RMC_MANUAL_LEADER_EVENT,
+ WMI_TAG_PER_CHAIN_RSSI_STATS,
+ WMI_TAG_RSSI_STATS,
+ WMI_TAG_P2P_LO_START_CMD,
+ WMI_TAG_P2P_LO_STOP_CMD,
+ WMI_TAG_P2P_LO_STOPPED_EVENT,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TAG_READ_DATA_FROM_FLASH_CMD,
+ WMI_TAG_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TAG_TLV_BUF_LEN_PARAM,
+ WMI_TAG_SERVICE_AVAILABLE_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TAG_PEER_ANTDIV_INFO_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO,
+ WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TAG_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TAG_MNT_FILTER_CMD,
+ WMI_TAG_GET_CHIP_POWER_STATS_CMD,
+ WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TAG_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TAG_CHAN_CCA_STATS,
+ WMI_TAG_PEER_SIGNAL_STATS,
+ WMI_TAG_TX_STATS,
+ WMI_TAG_PEER_AC_TX_STATS,
+ WMI_TAG_RX_STATS,
+ WMI_TAG_PEER_AC_RX_STATS,
+ WMI_TAG_REPORT_STATS_EVENT,
+ WMI_TAG_CHAN_CCA_STATS_THRESH,
+ WMI_TAG_PEER_SIGNAL_STATS_THRESH,
+ WMI_TAG_TX_STATS_THRESH,
+ WMI_TAG_RX_STATS_THRESH,
+ WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TAG_REQUEST_WLAN_STATS_CMD,
+ WMI_TAG_RX_AGGR_FAILURE_EVENT,
+ WMI_TAG_RX_AGGR_FAILURE_INFO,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TAG_PDEV_BAND_TO_MAC,
+ WMI_TAG_TBTT_OFFSET_INFO,
+ WMI_TAG_TBTT_OFFSET_EXT_EVENT,
+ WMI_TAG_SAR_LIMITS_CMD,
+ WMI_TAG_SAR_LIMIT_CMD_ROW,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TAG_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TAG_VENDOR_OUI,
+ WMI_TAG_REQUEST_RCPI_CMD,
+ WMI_TAG_UPDATE_RCPI_EVENT,
+ WMI_TAG_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TAG_PEER_STATS_INFO,
+ WMI_TAG_PEER_STATS_INFO_EVENT,
+ WMI_TAG_PKGID_EVENT,
+ WMI_TAG_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ WMI_TAG_REGULATORY_RULE_STRUCT,
+ WMI_TAG_REG_CHAN_LIST_CC_EVENT,
+ WMI_TAG_11D_SCAN_START_CMD,
+ WMI_TAG_11D_SCAN_STOP_CMD,
+ WMI_TAG_11D_NEW_COUNTRY_EVENT,
+ WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TAG_RADIO_CHAN_STATS,
+ WMI_TAG_RADIO_CHAN_STATS_EVENT,
+ WMI_TAG_ROAM_PER_CONFIG,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TAG_HW_DATA_FILTER_CMD,
+ WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TAG_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TAG_VDEV_SET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TAG_IFACE_OFFLOAD_STATS,
+ WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_RSSI_CTL_EXT,
+ WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TAG_COEX_BT_ACTIVITY_EVENT,
+ WMI_TAG_VDEV_GET_TX_POWER_CMD,
+ WMI_TAG_VDEV_TX_POWER_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TAG_TX_SEND_PARAMS,
+ WMI_TAG_HE_RATE_SET,
+ WMI_TAG_CONGESTION_STATS,
+ WMI_TAG_SET_INIT_COUNTRY_CMD,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TAG_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+ WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TAG_THERM_THROT_STATS_EVENT,
+ WMI_TAG_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TAG_OEM_DMA_RING_CAPABILITIES,
+ WMI_TAG_OEM_DMA_RING_CFG_REQ,
+ WMI_TAG_OEM_DMA_RING_CFG_RSP,
+ WMI_TAG_OEM_INDIRECT_DATA,
+ WMI_TAG_OEM_DMA_BUF_RELEASE,
+ WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TAG_ROAM_LCA_DISALLOW_CONFIG,
+ WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG,
+ WMI_TAG_UNIT_TEST_EVENT,
+ WMI_TAG_ROAM_FILS_OFFLOAD,
+ WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TAG_PMK_CACHE,
+ WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TAG_ROAM_FILS_SYNCH,
+ WMI_TAG_GTK_OFFLOAD_EXTENDED,
+ WMI_TAG_ROAM_BG_SCAN_ROAMING,
+ WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TAG_OIC_PING_HANDOFF_EVENT,
+ WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TAG_DHCP_LEASE_RENEW_EVENT,
+ WMI_TAG_BTM_CONFIG,
+ WMI_TAG_DEBUG_MESG_FW_DATA_STALL,
+ WMI_TAG_WLM_CONFIG_CMD,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TAG_ROAM_CND_SCORING_PARAM,
+ WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TAG_VENDOR_OUI_EXT,
+ WMI_TAG_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TAG_FD_SEND_FROM_HOST_CMD,
+ WMI_TAG_ENABLE_FILS_CMD,
+ WMI_TAG_HOST_SWFDA_EVENT,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD,
+ WMI_TAG_STATS_PERIOD,
+ WMI_TAG_NDL_SCHEDULE_UPDATE,
+ WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD,
+ WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE,
+ WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD,
+ WMI_TAG_SAR2_RESULT_EVENT,
+ WMI_TAG_SAR_CAPABILITIES,
+ WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD,
+ WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT,
+ WMI_TAG_DMA_RING_CAPABILITIES,
+ WMI_TAG_DMA_RING_CFG_REQ,
+ WMI_TAG_DMA_RING_CFG_RSP,
+ WMI_TAG_DMA_BUF_RELEASE,
+ WMI_TAG_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_SAR_GET_LIMITS_CMD,
+ WMI_TAG_SAR_GET_LIMITS_EVENT,
+ WMI_TAG_SAR_GET_LIMITS_EVENT_ROW,
+ WMI_TAG_OFFLOAD_11K_REPORT,
+ WMI_TAG_INVOKE_NEIGHBOR_REPORT,
+ WMI_TAG_NEIGHBOR_REPORT_OFFLOAD,
+ WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_BPF_SET_VDEV_ENABLE_CMD,
+ WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT,
+ WMI_TAG_PDEV_GET_NFCAL_POWER,
+ WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ WMI_TAG_OBSS_COLOR_COLLISION_EVT,
+ WMI_TAG_RUNTIME_DPD_RECAL_CMD,
+ WMI_TAG_TWT_ENABLE_CMD,
+ WMI_TAG_TWT_DISABLE_CMD,
+ WMI_TAG_TWT_ADD_DIALOG_CMD,
+ WMI_TAG_TWT_DEL_DIALOG_CMD,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD,
+ WMI_TAG_TWT_ENABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_DISABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD,
+ WMI_TAG_ROAM_SCAN_STATS_EVENT,
+ WMI_TAG_PEER_TID_CONFIGURATIONS_CMD,
+ WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD,
+ WMI_TAG_GET_TPC_POWER_CMD,
+ WMI_TAG_GET_TPC_POWER_EVENT,
+ WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA,
+ WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_EVENT,
+ WMI_TAG_MOTION_DET_BASE_LINE_EVENT,
+ WMI_TAG_NDP_TRANSPORT_IP,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ WMI_TAG_ESP_ESTIMATE_EVENT,
+ WMI_TAG_NAN_HOST_CONFIG,
+ WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS,
+ WMI_TAG_PEER_CFR_CAPTURE_CMD,
+ WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+ WMI_TAG_CHAN_WIDTH_PEER_LIST,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD,
+ WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD,
+ WMI_TAG_PEER_EXTD2_STATS,
+ WMI_TAG_HPCS_PULSE_START_CMD,
+ WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT,
+ WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD,
+ WMI_TAG_NAN_EVENT_INFO,
+ WMI_TAG_NDP_CHANNEL_INFO,
+ WMI_TAG_NDP_CMD,
+ WMI_TAG_NDP_EVENT,
+ /* TODO add all the missing cmds */
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+ WMI_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD = 1,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3,
+ WMI_TLV_SERVICE_STA_PWRSAVE = 4,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5,
+ WMI_TLV_SERVICE_AP_UAPSD = 6,
+ WMI_TLV_SERVICE_AP_DFS = 7,
+ WMI_TLV_SERVICE_11AC = 8,
+ WMI_TLV_SERVICE_BLOCKACK = 9,
+ WMI_TLV_SERVICE_PHYERR = 10,
+ WMI_TLV_SERVICE_BCN_FILTER = 11,
+ WMI_TLV_SERVICE_RTT = 12,
+ WMI_TLV_SERVICE_WOW = 13,
+ WMI_TLV_SERVICE_RATECTRL_CACHE = 14,
+ WMI_TLV_SERVICE_IRAM_TIDS = 15,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16,
+ WMI_TLV_SERVICE_NLO = 17,
+ WMI_TLV_SERVICE_GTK_OFFLOAD = 18,
+ WMI_TLV_SERVICE_SCAN_SCH = 19,
+ WMI_TLV_SERVICE_CSA_OFFLOAD = 20,
+ WMI_TLV_SERVICE_CHATTER = 21,
+ WMI_TLV_SERVICE_COEX_FREQAVOID = 22,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23,
+ WMI_TLV_SERVICE_FORCE_FW_HANG = 24,
+ WMI_TLV_SERVICE_GPIO = 25,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+ WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29,
+ WMI_TLV_SERVICE_TX_ENCAP = 30,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+ WMI_TLV_SERVICE_EARLY_RX = 32,
+ WMI_TLV_SERVICE_STA_SMPS = 33,
+ WMI_TLV_SERVICE_FWTEST = 34,
+ WMI_TLV_SERVICE_STA_WMMAC = 35,
+ WMI_TLV_SERVICE_TDLS = 36,
+ WMI_TLV_SERVICE_BURST = 37,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS = 39,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+ WMI_TLV_SERVICE_WLAN_HB = 42,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+ WMI_TLV_SERVICE_BATCH_SCAN = 44,
+ WMI_TLV_SERVICE_QPOWER = 45,
+ WMI_TLV_SERVICE_PLMREQ = 46,
+ WMI_TLV_SERVICE_THERMAL_MGMT = 47,
+ WMI_TLV_SERVICE_RMC = 48,
+ WMI_TLV_SERVICE_MHF_OFFLOAD = 49,
+ WMI_TLV_SERVICE_COEX_SAR = 50,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+ WMI_TLV_SERVICE_NAN = 52,
+ WMI_TLV_SERVICE_L1SS_STAT = 53,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54,
+ WMI_TLV_SERVICE_OBSS_SCAN = 55,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN = 56,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE = 59,
+ WMI_TLV_SERVICE_LPASS = 60,
+ WMI_TLV_SERVICE_EXTSCAN = 61,
+ WMI_TLV_SERVICE_D0WOW = 62,
+ WMI_TLV_SERVICE_HSOFFLOAD = 63,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64,
+ WMI_TLV_SERVICE_RX_FULL_REORDER = 65,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD = 66,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD = 68,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+ WMI_TLV_SERVICE_OCB = 71,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74,
+ WMI_TLV_SERVICE_MGMT_TX_HTT = 75,
+ WMI_TLV_SERVICE_MGMT_TX_WMI = 76,
+ WMI_TLV_SERVICE_EXT_MSG = 77,
+ WMI_TLV_SERVICE_MAWC = 78,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79,
+ WMI_TLV_SERVICE_EGAP = 80,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83,
+ WMI_TLV_SERVICE_ATF = 84,
+ WMI_TLV_SERVICE_COEX_GPIO = 85,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH = 89,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90,
+ WMI_TLV_SERVICE_BPF_OFFLOAD = 91,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+ WMI_TLV_SERVICE_NAN_DATA = 96,
+ WMI_TLV_SERVICE_NAN_RTT = 97,
+ WMI_TLV_SERVICE_11AX = 98,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+ WMI_TLV_SERVICE_MESH_11S = 103,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER = 105,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113,
+ WMI_TLV_SERVICE_RCPI_SUPPORT = 114,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115,
+ WMI_TLV_SERVICE_PEER_STATS_INFO = 116,
+ WMI_TLV_SERVICE_REGULATORY_DB = 117,
+ WMI_TLV_SERVICE_11D_OFFLOAD = 118,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING = 119,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120,
+ WMI_TLV_SERVICE_PKT_ROUTING = 121,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123,
+ WMI_TLV_SERVICE_8SS_TX_BFEE = 124,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125,
+ WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+
+ WMI_MAX_SERVICE = 128,
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130,
+ WMI_TLV_SERVICE_FILS_SUPPORT = 131,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133,
+ WMI_TLV_SERVICE_MAWC_SUPPORT = 134,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139,
+ WMI_TLV_SERVICE_THERM_THROT = 140,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+
+ WMI_MAX_EXT_SERVICE
+
+};
+
+enum {
+ WMI_SMPS_FORCED_MODE_NONE = 0,
+ WMI_SMPS_FORCED_MODE_DISABLED,
+ WMI_SMPS_FORCED_MODE_STATIC,
+ WMI_SMPS_FORCED_MODE_DYNAMIC
+};
+
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_2G 0
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_5G 1
+#define WMI_NUM_SUPPORTED_BAND_MAX 2
+
+#define WMI_PEER_MIMO_PS_STATE 0x1
+#define WMI_PEER_AMPDU 0x2
+#define WMI_PEER_AUTHORIZE 0x3
+#define WMI_PEER_CHWIDTH 0x4
+#define WMI_PEER_NSS 0x5
+#define WMI_PEER_USE_4ADDR 0x6
+#define WMI_PEER_MEMBERSHIP 0x7
+#define WMI_PEER_USERPOS 0x8
+#define WMI_PEER_CRIT_PROTO_HINT_ENABLED 0x9
+#define WMI_PEER_TX_FAIL_CNT_THR 0xA
+#define WMI_PEER_SET_HW_RETRY_CTS2S 0xB
+#define WMI_PEER_IBSS_ATIM_WINDOW_LENGTH 0xC
+#define WMI_PEER_PHYMODE 0xD
+#define WMI_PEER_USE_FIXED_PWR 0xE
+#define WMI_PEER_PARAM_FIXED_RATE 0xF
+#define WMI_PEER_SET_MU_WHITELIST 0x10
+#define WMI_PEER_SET_MAX_TX_RATE 0x11
+#define WMI_PEER_SET_MIN_TX_RATE 0x12
+#define WMI_PEER_SET_DEFAULT_ROUTING 0x13
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+struct wmi_host_pdev_band_to_mac {
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+};
+
+struct ath11k_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_bit_mask;
+ u32 ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS];
+};
+
+struct ath11k_service_ext_param {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct ath11k_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 num_hw_modes;
+ u32 num_phy;
+};
+
+struct ath11k_hw_mode_caps {
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+};
+
+#define PSOC_HOST_MAX_PHY_SIZE (3)
+#define ATH11K_11B_SUPPORT BIT(0)
+#define ATH11K_11G_SUPPORT BIT(1)
+#define ATH11K_11A_SUPPORT BIT(2)
+#define ATH11K_11N_SUPPORT BIT(3)
+#define ATH11K_11AC_SUPPORT BIT(4)
+#define ATH11K_11AX_SUPPORT BIT(5)
+
+struct ath11k_hal_reg_capabilities_ext {
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+#define WMI_HOST_MAX_PDEV 3
+
+struct wlan_host_mem_chunk {
+ u32 tlv_header;
+ u32 req_id;
+ u32 ptr;
+ u32 size;
+} __packed;
+
+struct wmi_host_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct wmi_init_cmd_param {
+ u32 tlv_header;
+ struct target_resource_config *res_cfg;
+ u8 num_mem_chunks;
+ struct wmi_host_mem_chunk *mem_chunks;
+ u32 hw_mode_id;
+ u32 num_band_to_mac;
+ struct wmi_host_pdev_band_to_mac band_to_mac[WMI_HOST_MAX_PDEV];
+};
+
+struct wmi_pdev_band_to_mac {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+} __packed;
+
+struct wmi_pdev_set_hw_mode_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 hw_mode_index;
+ u32 num_band_to_mac;
+} __packed;
+
+struct wmi_ppe_threshold {
+ u32 numss_m1; /** NSS - 1*/
+ union {
+ u32 ru_count;
+ u32 ru_mask;
+ } __packed;
+ u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
+#define HW_BD_INFO_SIZE 5
+
+struct wmi_abi_version {
+ u32 abi_version_0;
+ u32 abi_version_1;
+ u32 abi_version_ns_0;
+ u32 abi_version_ns_1;
+ u32 abi_version_ns_2;
+ u32 abi_version_ns_3;
+} __packed;
+
+struct wmi_init_cmd {
+ u32 tlv_header;
+ struct wmi_abi_version host_abi_vers;
+ u32 num_host_mem_chunks;
+} __packed;
+
+struct wmi_resource_config {
+ u32 tlv_header;
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 flag1;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 alloc_frag_desc_for_data_pkt;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 max_num_dbs_scan_duty_cycle;
+ u32 max_num_group_keys;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+} __packed;
+
+struct wmi_service_ready_event {
+ u32 fw_build_vers;
+ struct wmi_abi_version fw_abi_vers;
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 num_mem_reqs;
+ u32 max_num_scan_channels;
+ u32 hw_bd_id;
+ u32 hw_bd_info[HW_BD_INFO_SIZE];
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 num_dbs_hw_modes;
+ /* txrx_chainmask
+ * [7:0] - 2G band tx chain mask
+ * [15:8] - 2G band rx chain mask
+ * [23:16] - 5G band tx chain mask
+ * [31:24] - 5G band rx chain mask
+ */
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+} __packed;
+
+#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(u32) - 1) / sizeof(u32))
+
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x u32 = 128 bits */
+#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32))
+#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32
+#define WMI_SERVICE_BITS_IN_SIZE32 4
+
+struct wmi_service_ready_ext_event {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct wmi_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 fw_build_vers_ext;
+ u32 max_nlo_ssids;
+ u32 max_bssid_indicator;
+ u32 he_cap_info_ext;
+} __packed;
+
+struct wmi_soc_mac_phy_hw_mode_caps {
+ u32 num_hw_modes;
+ u32 num_chainmask_tables;
+} __packed;
+
+struct wmi_hw_mode_capabilities {
+ u32 tlv_header;
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+} __packed;
+
+#define WMI_MAX_HECAP_PHY_SIZE (3)
+
+struct wmi_mac_phy_capabilities {
+ u32 hw_mode_id;
+ u32 pdev_id;
+ u32 phy_id;
+ u32 supported_flags;
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 max_bw_supported_2g;
+ u32 ht_cap_info_2g;
+ u32 vht_cap_info_2g;
+ u32 vht_supp_mcs_2g;
+ u32 he_cap_info_2g;
+ u32 he_supp_mcs_2g;
+ u32 tx_chain_mask_2g;
+ u32 rx_chain_mask_2g;
+ u32 max_bw_supported_5g;
+ u32 ht_cap_info_5g;
+ u32 vht_cap_info_5g;
+ u32 vht_supp_mcs_5g;
+ u32 he_cap_info_5g;
+ u32 he_supp_mcs_5g;
+ u32 tx_chain_mask_5g;
+ u32 rx_chain_mask_5g;
+ u32 he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE];
+ u32 he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE];
+ struct wmi_ppe_threshold he_ppet2g;
+ struct wmi_ppe_threshold he_ppet5g;
+ u32 chainmask_table_id;
+ u32 lmac_id;
+ u32 he_cap_info_2g_ext;
+ u32 he_cap_info_5g_ext;
+ u32 he_cap_info_internal;
+} __packed;
+
+struct wmi_hal_reg_capabilities_ext {
+ u32 tlv_header;
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+} __packed;
+
+struct wmi_soc_hal_reg_capabilities {
+ u32 num_phy;
+} __packed;
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_ready_event {
+ struct wmi_abi_version fw_abi_vers;
+ struct wmi_mac_addr mac_addr;
+ u32 status;
+ u32 num_dscp_table;
+ u32 num_extra_mac_addr;
+ u32 num_total_peers;
+ u32 num_extra_peers;
+} __packed;
+
+struct wmi_service_available_event {
+ u32 wmi_service_segment_offset;
+ u32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} __packed;
+
+struct ath11k_pdev_wmi {
+ struct ath11k_wmi_base *wmi_ab;
+ enum ath11k_htc_ep_id eid;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 rx_decap_mode;
+};
+
+struct vdev_create_params {
+ u8 if_id;
+ u32 type;
+ u32 subtype;
+ struct {
+ u8 tx;
+ u8 rx;
+ } chains[NUM_NL80211_BANDS];
+ u32 pdev_id;
+};
+
+struct wmi_vdev_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_type;
+ u32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+ u32 num_cfg_txrx_streams;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_vdev_txrx_streams {
+ u32 tlv_header;
+ u32 band;
+ u32 supported_tx_streams;
+ u32 supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+ struct wmi_mac_addr trans_bssid;
+ u32 profile_idx;
+ u32 profile_num;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
+#define WMI_VDEV_START_PMF_ENABLED BIT(1)
+#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+
+struct wmi_ssid {
+ u32 ssid_len;
+ u32 ssid[8];
+} __packed;
+
+#define ATH11K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+
+struct wmi_vdev_start_request_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 requestor_id;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u32 flags;
+ struct wmi_ssid ssid;
+ u32 bcn_tx_rate;
+ u32 bcn_txpower;
+ u32 num_noa_descriptors;
+ u32 disable_hw_ack;
+ u32 preferred_tx_streams;
+ u32 preferred_rx_streams;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+} __packed;
+
+#define MGMT_TX_DL_FRM_LEN 64
+#define WMI_MAC_MAX_SSID_LENGTH 32
+struct mac_ssid {
+ u8 length;
+ u8 mac_ssid[WMI_MAC_MAX_SSID_LENGTH];
+} __packed;
+
+struct wmi_p2p_noa_descriptor {
+ u32 type_count;
+ u32 duration;
+ u32 interval;
+ u32 start_time;
+};
+
+struct channel_param {
+ u8 chan_id;
+ u8 pwr;
+ u32 mhz;
+ u32 half_rate:1,
+ quarter_rate:1,
+ dfs_set:1,
+ dfs_set_cfreq2:1,
+ is_chan_passive:1,
+ allow_ht:1,
+ allow_vht:1,
+ allow_he:1,
+ set_agile:1;
+ u32 phy_mode;
+ u32 cfreq1;
+ u32 cfreq2;
+ char maxpower;
+ char minpower;
+ char maxregpower;
+ u8 antennamax;
+ u8 reg_class_id;
+} __packed;
+
+enum wmi_phy_mode {
+ MODE_11A = 0,
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4,
+ MODE_11NG_HT20 = 5,
+ MODE_11NA_HT40 = 6,
+ MODE_11NG_HT40 = 7,
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_11AX_HE20 = 16,
+ MODE_11AX_HE40 = 17,
+ MODE_11AX_HE80 = 18,
+ MODE_11AX_HE80_80 = 19,
+ MODE_11AX_HE160 = 20,
+ MODE_11AX_HE20_2G = 21,
+ MODE_11AX_HE40_2G = 22,
+ MODE_11AX_HE80_2G = 23,
+ MODE_UNKNOWN = 24,
+ MODE_MAX = 24
+};
+
+static inline const char *ath11k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_11AX_HE20:
+ return "11ax-he20";
+ case MODE_11AX_HE40:
+ return "11ax-he40";
+ case MODE_11AX_HE80:
+ return "11ax-he80";
+ case MODE_11AX_HE80_80:
+ return "11ax-he80+80";
+ case MODE_11AX_HE160:
+ return "11ax-he160";
+ case MODE_11AX_HE20_2G:
+ return "11ax-he20-2g";
+ case MODE_11AX_HE40_2G:
+ return "11ax-he40-2g";
+ case MODE_11AX_HE80_2G:
+ return "11ax-he80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<unknown>";
+}
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ bool freq2_radar;
+ bool allow_he;
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ enum wmi_phy_mode mode;
+};
+
+struct wmi_vdev_start_req_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 pref_rx_streams;
+ u32 pref_tx_streams;
+ u32 num_noa_descriptors;
+};
+
+struct peer_create_params {
+ const u8 *peer_addr;
+ u32 peer_type;
+ u32 vdev_id;
+};
+
+struct peer_delete_params {
+ u8 vdev_id;
+};
+
+struct peer_flush_params {
+ u32 peer_tid_bitmap;
+ u8 vdev_id;
+};
+
+struct pdev_set_regdomain_params {
+ u16 current_rd_in_use;
+ u16 current_rd_2g;
+ u16 current_rd_5g;
+ u32 ctl_2g;
+ u32 ctl_5g;
+ u8 dfs_domain;
+ u32 pdev_id;
+};
+
+struct rx_reorder_queue_remove_params {
+ u8 *peer_macaddr;
+ u16 vdev_id;
+ u32 peer_tid_bitmap;
+};
+
+#define WMI_HOST_PDEV_ID_SOC 0xFF
+#define WMI_HOST_PDEV_ID_0 0
+#define WMI_HOST_PDEV_ID_1 1
+#define WMI_HOST_PDEV_ID_2 2
+
+#define WMI_PDEV_ID_SOC 0
+#define WMI_PDEV_ID_1ST 1
+#define WMI_PDEV_ID_2ND 2
+#define WMI_PDEV_ID_3RD 3
+
+/* Freq units in MHz */
+#define REG_RULE_START_FREQ 0x0000ffff
+#define REG_RULE_END_FREQ 0xffff0000
+#define REG_RULE_FLAGS 0x0000ffff
+#define REG_RULE_MAX_BW 0x0000ffff
+#define REG_RULE_REG_PWR 0x00ff0000
+#define REG_RULE_ANT_GAIN 0xff000000
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define HECAP_PHYDWORD_0 0
+#define HECAP_PHYDWORD_1 1
+#define HECAP_PHYDWORD_2 2
+
+#define HECAP_PHY_SU_BFER BIT(31)
+#define HECAP_PHY_SU_BFEE BIT(0)
+#define HECAP_PHY_MU_BFER BIT(1)
+#define HECAP_PHY_UL_MUMIMO BIT(22)
+#define HECAP_PHY_UL_MUOFDMA BIT(23)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFER, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFEE, hecap_phy[HECAP_PHYDWORD_1])
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_MU_BFER, hecap_phy[HECAP_PHYDWORD_1])
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUMIMO, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUOFDMA, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HE_MODE_SU_TX_BFEE BIT(0)
+#define HE_MODE_SU_TX_BFER BIT(1)
+#define HE_MODE_MU_TX_BFEE BIT(2)
+#define HE_MODE_MU_TX_BFER BIT(3)
+#define HE_MODE_DL_OFDMA BIT(4)
+#define HE_MODE_UL_OFDMA BIT(5)
+#define HE_MODE_UL_MUMIMO BIT(6)
+
+#define HE_DL_MUOFDMA_ENABLE 1
+#define HE_UL_MUOFDMA_ENABLE 1
+#define HE_DL_MUMIMO_ENABLE 1
+#define HE_MU_BFEE_ENABLE 1
+#define HE_SU_BFEE_ENABLE 1
+
+#define HE_VHT_SOUNDING_MODE_ENABLE 1
+#define HE_SU_MU_SOUNDING_MODE_ENABLE 1
+#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE 1
+
+/* HE or VHT Sounding */
+#define HE_VHT_SOUNDING_MODE BIT(0)
+/* SU or MU Sounding */
+#define HE_SU_MU_SOUNDING_MODE BIT(2)
+/* Trig or Non-Trig Sounding */
+#define HE_TRIG_NONTRIG_SOUNDING_MODE BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0x700
+
+struct pdev_params {
+ u32 param_id;
+ u32 param_value;
+};
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_type;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_reorder_queue_setup_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 queue_ptr_lo;
+ u32 queue_ptr_hi;
+ u32 queue_no;
+ u32 ba_window_size_valid;
+ u32 ba_window_size;
+} __packed;
+
+struct wmi_peer_reorder_queue_remove_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid_mask;
+} __packed;
+
+struct gpio_config_params {
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+enum wmi_gpio_type {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN
+};
+
+enum wmi_gpio_intr_type {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+enum wmi_bss_chan_info_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_gpio_config_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+struct gpio_output_params {
+ u32 gpio_num;
+ u32 set;
+};
+
+struct wmi_gpio_output_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 set;
+};
+
+struct set_fwtest_params {
+ u32 arg;
+ u32 value;
+};
+
+struct wmi_fwtest_set_param_cmd_param {
+ u32 tlv_header;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct wmi_pdev_set_param_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_pdev_set_ps_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+} __packed;
+
+struct wmi_pdev_suspend_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 suspend_opt;
+} __packed;
+
+struct wmi_pdev_resume_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_req_cmd {
+ u32 tlv_header;
+ /* ref wmi_bss_chan_info_req_type */
+ u32 req_type;
+} __packed;
+
+struct wmi_ap_ps_peer_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_sta_powersave_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 reg_domain;
+ u32 reg_domain_2g;
+ u32 reg_domain_5g;
+ u32 conformance_test_limit_2g;
+ u32 conformance_test_limit_5g;
+ u32 dfs_domain;
+} __packed;
+
+struct wmi_peer_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_dfs_phyerr_offload_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_bcn_offload_ctrl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 bcn_ctrl_op;
+} __packed;
+
+enum scan_dwelltime_adaptive_mode {
+ SCAN_DWELL_MODE_DEFAULT = 0,
+ SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ SCAN_DWELL_MODE_MODERATE = 2,
+ SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ SCAN_DWELL_MODE_STATIC = 4
+};
+
+#define WLAN_SCAN_MAX_NUM_SSID 10
+#define WLAN_SCAN_MAX_NUM_BSSID 10
+#define WLAN_SCAN_MAX_NUM_CHANNELS 40
+
+#define WLAN_SSID_MAX_LEN 32
+
+struct element_info {
+ u32 len;
+ u8 *ptr;
+};
+
+struct wlan_ssid {
+ u8 length;
+ u8 ssid[WLAN_SSID_MAX_LEN];
+};
+
+#define WMI_IE_BITMAP_SIZE 8
+
+#define WMI_SCAN_MAX_NUM_SSID 0x0A
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHAN = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT = BIT(8),
+ WMI_SCAN_EVENT_SUSPENDED = BIT(9),
+ WMI_SCAN_EVENT_RESUMED = BIT(10),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_start_scan_cmd {
+ u32 tlv_header;
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 ie_len;
+ u32 n_probes;
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+ u32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ u32 num_vendor_oui;
+ u32 scan_ctrl_flags_ext;
+ u32 dwell_time_active_2g;
+} __packed;
+
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+#define WMI_SCAN_FILTER_PROMISCUOS 0x100
+#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200
+#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ 0x400
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800
+#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ 0x1000
+#define WMI_SCAN_OFFCHAN_MGMT_TX 0x2000
+#define WMI_SCAN_OFFCHAN_DATA_TX 0x4000
+#define WMI_SCAN_CAPTURE_PHY_ERROR 0x8000
+#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000
+
+#define WMI_SCAN_DWELL_MODE_MASK 0x00E00000
+#define WMI_SCAN_DWELL_MODE_SHIFT 21
+
+enum {
+ WMI_SCAN_DWELL_MODE_DEFAULT = 0,
+ WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ WMI_SCAN_DWELL_MODE_MODERATE = 2,
+ WMI_SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ WMI_SCAN_DWELL_MODE_STATIC = 4,
+};
+
+#define WMI_SCAN_SET_DWELL_MODE(flag, mode) \
+ ((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \
+ WMI_SCAN_DWELL_MODE_MASK))
+
+struct scan_req_params {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 pdev_id;
+ enum wmi_scan_priority scan_priority;
+ union {
+ struct {
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
+ };
+ u32 scan_events;
+ };
+ u32 dwell_time_active;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ union {
+ struct {
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
+ };
+ u32 scan_flags;
+ };
+ enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 n_probes;
+ u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS];
+ u32 notify_scan_events;
+ struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
+ struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
+ struct element_info extraie;
+ struct element_info htcap;
+ struct element_info vhtcap;
+};
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u32 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+#define WMI_SCAN_STOP_ONE 0x00000000
+#define WMI_SCN_STOP_VAP_ALL 0x01000000
+#define WMI_SCAN_STOP_ALL 0x04000000
+
+/* Prefix 0xA000 indicates that the scan request
+ * is trigger by HOST
+ */
+#define ATH11K_SCAN_ID 0xA000
+
+enum scan_cancel_req_type {
+ WLAN_SCAN_CANCEL_SINGLE = 1,
+ WLAN_SCAN_CANCEL_VDEV_ALL,
+ WLAN_SCAN_CANCEL_PDEV_ALL,
+};
+
+struct scan_cancel_param {
+ u32 requester;
+ u32 scan_id;
+ enum scan_cancel_req_type req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct wmi_bcn_send_from_host_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 data_len;
+ union {
+ u32 frag_ptr;
+ u32 frag_ptr_lo;
+ };
+ u32 frame_ctrl;
+ u32 dtim_flag;
+ u32 bcn_antenna;
+ u32 frag_ptr_hi;
+};
+
+#define WMI_CHAN_INFO_MODE GENMASK(5, 0)
+#define WMI_CHAN_INFO_HT40_PLUS BIT(6)
+#define WMI_CHAN_INFO_PASSIVE BIT(7)
+#define WMI_CHAN_INFO_ADHOC_ALLOWED BIT(8)
+#define WMI_CHAN_INFO_AP_DISABLED BIT(9)
+#define WMI_CHAN_INFO_DFS BIT(10)
+#define WMI_CHAN_INFO_ALLOW_HT BIT(11)
+#define WMI_CHAN_INFO_ALLOW_VHT BIT(12)
+#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA BIT(13)
+#define WMI_CHAN_INFO_HALF_RATE BIT(14)
+#define WMI_CHAN_INFO_QUARTER_RATE BIT(15)
+#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16)
+#define WMI_CHAN_INFO_ALLOW_HE BIT(17)
+
+#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8)
+#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16)
+#define WMI_CHAN_REG_INFO1_REG_CLS GENMASK(31, 24)
+
+#define WMI_CHAN_REG_INFO2_ANT_MAX GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO2_MAX_TX_PWR GENMASK(15, 8)
+
+struct wmi_channel {
+ u32 tlv_header;
+ u32 mhz;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ u32 info;
+ u32 reg_info_1;
+ u32 reg_info_2;
+} __packed;
+
+struct wmi_mgmt_params {
+ void *tx_frame;
+ u16 frm_len;
+ u8 vdev_id;
+ u16 chanfreq;
+ void *pdata;
+ u16 desc_id;
+ u8 *macaddr;
+ void *qdf_ctx;
+};
+
+enum wmi_sta_ps_mode {
+ WMI_STA_PS_MODE_DISABLED = 0,
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+#define WMI_SMPS_MASK_LOWER_16BITS 0xFF
+#define WMI_SMPS_MASK_UPPER_3BITS 0x7
+#define WMI_SMPS_PARAM_VALUE_SHIFT 29
+
+#define ATH11K_WMI_FW_HANG_ASSERT_TYPE 1
+#define ATH11K_WMI_FW_HANG_DELAY 0
+
+/* type, 0:unused 1: ASSERT 2: not respond detect command
+ * delay_time_ms, the simulate will delay time
+ */
+
+struct wmi_force_fw_hang_cmd {
+ u32 tlv_header;
+ u32 type;
+ u32 delay_time_ms;
+};
+
+struct wmi_vdev_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_REQUEST_PEER_STAT = BIT(0),
+ WMI_REQUEST_AP_STAT = BIT(1),
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_BCNFLT_STAT = BIT(4),
+ WMI_REQUEST_VDEV_RATE_STAT = BIT(5),
+ WMI_REQUEST_INST_STAT = BIT(6),
+ WMI_REQUEST_MIB_STAT = BIT(7),
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT = BIT(8),
+ WMI_REQUEST_CONGESTION_STAT = BIT(9),
+ WMI_REQUEST_PEER_EXTD_STAT = BIT(10),
+ WMI_REQUEST_BCN_STAT = BIT(11),
+ WMI_REQUEST_BCN_STAT_RESET = BIT(12),
+ WMI_REQUEST_PEER_EXTD2_STAT = BIT(13),
+};
+
+struct wmi_request_stats_cmd {
+ u32 tlv_header;
+ enum wmi_stats_id stats_id;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_BEACON_TX_BUFFER_SIZE 512
+
+struct wmi_bcn_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 tim_ie_offset;
+ u32 buf_len;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u32 csa_event_bitmap;
+ u32 mbssid_ie_offset;
+ u32 esp_ie_offset;
+} __packed;
+
+struct wmi_key_seq_counter {
+ u32 key_seq_counter_l;
+ u32 key_seq_counter_h;
+} __packed;
+
+struct wmi_vdev_install_key_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u32 is_group_key_id_valid;
+ u32 group_key_id;
+
+ /* Followed by key_data containing key followed by
+ * tx mic and then rx mic
+ */
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u64 key_rsc_counter;
+ const void *key_data;
+};
+
+#define WMI_MAX_SUPPORTED_RATES 128
+#define WMI_HOST_MAX_HECAP_PHY_SIZE 3
+#define WMI_HOST_MAX_HE_RATE_SET 3
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+
+struct wmi_rate_set_arg {
+ u32 num_rates;
+ u8 rates[WMI_MAX_SUPPORTED_RATES];
+};
+
+struct peer_assoc_params {
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u8 vht_capable;
+ u32 tx_max_mcs_nss;
+ u32 peer_bw_rxnss_override;
+ bool is_pmf_enabled;
+ bool is_wme_set;
+ bool qos_flag;
+ bool apsd_flag;
+ bool ht_flag;
+ bool bw_40;
+ bool bw_80;
+ bool bw_160;
+ bool stbc_flag;
+ bool ldpc_flag;
+ bool static_mimops_flag;
+ bool dynamic_mimops_flag;
+ bool spatial_mux_flag;
+ bool vht_flag;
+ bool vht_ng_flag;
+ bool need_ptk_4_way;
+ bool need_gtk_2_way;
+ bool auth_flag;
+ bool safe_mode_enabled;
+ bool amsdu_disable;
+ /* Use common structure */
+ u8 peer_mac[ETH_ALEN];
+
+ bool he_flag;
+ u32 peer_he_cap_macinfo[2];
+ u32 peer_he_cap_macinfo_internal;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs_count;
+ u32 peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ bool twt_responder;
+ bool twt_requester;
+ struct ath11k_ppe_threshold peer_ppet;
+};
+
+struct wmi_peer_assoc_complete_cmd {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ u32 num_peer_legacy_rates;
+ u32 num_peer_ht_rates;
+ u32 peer_bw_rxnss_override;
+ struct wmi_ppe_threshold peer_ppet;
+ u32 peer_he_cap_info;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs;
+ u32 peer_he_cap_info_ext;
+ u32 peer_he_cap_info_internal;
+} __packed;
+
+struct wmi_stop_scan_cmd {
+ u32 tlv_header;
+ u32 requestor;
+ u32 scan_id;
+ u32 req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct scan_chan_list_params {
+ u32 pdev_id;
+ u16 nallchans;
+ struct channel_param ch_param[1];
+};
+
+struct wmi_scan_chan_list_cmd {
+ u32 tlv_header;
+ u32 num_scan_chans;
+ u32 flags;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_MGMT_SEND_DOWNLD_LEN 64
+
+#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
+#define WMI_TX_PARAMS_DWORD0_NSS_MASK GENMASK(27, 20)
+#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT GENMASK(31, 28)
+
+#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8)
+#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15)
+#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
+#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
+
+struct wmi_mgmt_send_params {
+ u32 tlv_header;
+ u32 tx_params_dword0;
+ u32 tx_params_dword1;
+};
+
+struct wmi_mgmt_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 desc_id;
+ u32 chanfreq;
+ u32 paddr_lo;
+ u32 paddr_hi;
+ u32 frame_len;
+ u32 buf_len;
+ u32 tx_params_valid;
+
+ /* This TLV is followed by struct wmi_mgmt_frame */
+
+ /* Followed by struct wmi_mgmt_send_params */
+} __packed;
+
+struct wmi_sta_powersave_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+};
+
+struct wmi_sta_smps_force_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 forced_mode;
+};
+
+struct wmi_sta_smps_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct wmi_bcn_prb_info {
+ u32 tlv_header;
+ u32 caps;
+ u32 erp;
+} __packed;
+
+enum {
+ WMI_PDEV_SUSPEND,
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct green_ap_ps_params {
+ u32 value;
+};
+
+struct wmi_pdev_green_ap_ps_enable_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+};
+
+struct ap_ps_params {
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct vdev_set_params {
+ u32 if_id;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct stats_request_params {
+ u32 stats_id;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+enum set_init_cc_type {
+ WMI_COUNTRY_INFO_TYPE_ALPHA,
+ WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
+ WMI_COUNTRY_INFO_TYPE_REGDOMAIN,
+};
+
+enum set_init_cc_flags {
+ INVALID_CC,
+ CC_IS_SET,
+ REGDMN_IS_SET,
+ ALPHA_IS_SET,
+};
+
+struct wmi_init_country_params {
+ union {
+ u16 country_code;
+ u16 regdom_id;
+ u8 alpha2[3];
+ } cc_info;
+ enum set_init_cc_flags flags;
+};
+
+struct wmi_init_country_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 init_cc_type;
+ union {
+ u32 country_code;
+ u32 regdom_id;
+ u32 alpha2;
+ } cc_info;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_info {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 filter_type;
+ u32 num_mac;
+} __packed;
+
+enum ath11k_wmi_pktlog_enable {
+ ATH11K_WMI_PKTLOG_ENABLE_AUTO = 0,
+ ATH11K_WMI_PKTLOG_ENABLE_FORCE = 1,
+};
+
+struct wmi_pktlog_enable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 evlist; /* WMI_PKTLOG_EVENT */
+ u32 enable;
+} __packed;
+
+struct wmi_pktlog_disable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+#define DFS_PHYERR_UNIT_TEST_CMD 0
+#define DFS_UNIT_TEST_MODULE 0x2b
+#define DFS_UNIT_TEST_TOKEN 0xAA
+
+enum dfs_test_args_idx {
+ DFS_TEST_CMDID = 0,
+ DFS_TEST_PDEV_ID,
+ DFS_TEST_RADAR_PARAM,
+ DFS_MAX_TEST_ARGS,
+};
+
+struct wmi_dfs_unit_test_arg {
+ u32 cmd_id;
+ u32 pdev_id;
+ u32 radar_param;
+};
+
+struct wmi_unit_test_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 module_id;
+ u32 num_args;
+ u32 diag_token;
+ /* Followed by test args*/
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+#define WMI_PEER_AUTH 0x00000001
+#define WMI_PEER_QOS 0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_HE 0x00000400
+#define WMI_PEER_APSD 0x00000800
+#define WMI_PEER_HT 0x00001000
+#define WMI_PEER_40MHZ 0x00002000
+#define WMI_PEER_STBC 0x00008000
+#define WMI_PEER_LDPC 0x00010000
+#define WMI_PEER_DYN_MIMOPS 0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX 0x00200000
+#define WMI_PEER_TWT_REQ 0x00400000
+#define WMI_PEER_TWT_RESP 0x00800000
+#define WMI_PEER_VHT 0x02000000
+#define WMI_PEER_80MHZ 0x04000000
+#define WMI_PEER_PMF 0x08000000
+/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
+ * Need to be cleaned up
+ */
+#define WMI_PEER_IS_P2P_CAPABLE 0x20000000
+#define WMI_PEER_160MHZ 0x40000000
+#define WMI_PEER_SAFEMODE_EN 0x80000000
+
+struct beacon_tmpl_params {
+ u8 vdev_id;
+ u32 tim_ie_offset;
+ u32 tmpl_len;
+ u32 tmpl_len_aligned;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u8 *frm;
+};
+
+struct wmi_rate_set {
+ u32 num_rates;
+ u32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
+};
+
+struct wmi_vht_rate_set {
+ u32 tlv_header;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u32 tx_max_mcs_nss;
+} __packed;
+
+struct wmi_he_rate_set {
+ u32 tlv_header;
+ u32 rx_mcs_set;
+ u32 tx_mcs_set;
+} __packed;
+
+#define MAX_REG_RULES 10
+#define REG_ALPHA2_LEN 2
+
+enum wmi_start_event_param {
+ WMI_VDEV_START_RESP_EVENT = 0,
+ WMI_VDEV_RESTART_RESP_EVENT,
+};
+
+struct wmi_vdev_start_resp_event {
+ u32 vdev_id;
+ u32 requestor_id;
+ enum wmi_start_event_param resp_type;
+ u32 status;
+ u32 chain_mask;
+ u32 smps_mode;
+ union {
+ u32 mac_id;
+ u32 pdev_id;
+ };
+ u32 cfgd_tx_streams;
+ u32 cfgd_rx_streams;
+} __packed;
+
+/* VDEV start response status codes */
+enum wmi_vdev_start_resp_status_code {
+ WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0,
+ WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1,
+ WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
+ WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
+ WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+};
+
+;
+enum cc_setting_code {
+ REG_SET_CC_STATUS_PASS = 0,
+ REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ REG_INIT_ALPHA2_NOT_FOUND = 2,
+ REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ REG_SET_CC_STATUS_NO_MEMORY = 4,
+ REG_SET_CC_STATUS_FAIL = 5,
+};
+
+/* Regaulatory Rule Flags Passed by FW */
+#define REGULATORY_CHAN_DISABLED BIT(0)
+#define REGULATORY_CHAN_NO_IR BIT(1)
+#define REGULATORY_CHAN_RADAR BIT(3)
+#define REGULATORY_CHAN_NO_OFDM BIT(6)
+#define REGULATORY_CHAN_INDOOR_ONLY BIT(9)
+
+#define REGULATORY_CHAN_NO_HT40 BIT(4)
+#define REGULATORY_CHAN_NO_80MHZ BIT(7)
+#define REGULATORY_CHAN_NO_160MHZ BIT(8)
+#define REGULATORY_CHAN_NO_20MHZ BIT(11)
+#define REGULATORY_CHAN_NO_10MHZ BIT(12)
+
+enum {
+ WMI_REG_SET_CC_STATUS_PASS = 0,
+ WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ WMI_REG_INIT_ALPHA2_NOT_FOUND = 2,
+ WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ WMI_REG_SET_CC_STATUS_NO_MEMORY = 4,
+ WMI_REG_SET_CC_STATUS_FAIL = 5,
+};
+
+struct cur_reg_rule {
+ u16 start_freq;
+ u16 end_freq;
+ u16 max_bw;
+ u8 reg_power;
+ u8 ant_gain;
+ u16 flags;
+};
+
+struct cur_regulatory_info {
+ enum cc_setting_code status_code;
+ u8 num_phy;
+ u8 phy_id;
+ u16 reg_dmn_pair;
+ u16 ctry_code;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2g;
+ u32 max_bw_2g;
+ u32 min_bw_5g;
+ u32 max_bw_5g;
+ u32 num_2g_reg_rules;
+ u32 num_5g_reg_rules;
+ struct cur_reg_rule *reg_rules_2g_ptr;
+ struct cur_reg_rule *reg_rules_5g_ptr;
+};
+
+struct wmi_reg_chan_list_cc_event {
+ u32 status_code;
+ u32 phy_id;
+ u32 alpha2;
+ u32 num_phy;
+ u32 country_id;
+ u32 domain_code;
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2g;
+ u32 max_bw_2g;
+ u32 min_bw_5g;
+ u32 max_bw_5g;
+ u32 num_2g_reg_rules;
+ u32 num_5g_reg_rules;
+} __packed;
+
+struct wmi_regulatory_rule_struct {
+ u32 tlv_header;
+ u32 freq_info;
+ u32 bw_pwr_info;
+ u32 flag_info;
+};
+
+struct wmi_peer_delete_resp_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_bcn_tx_status_event {
+ u32 vdev_id;
+ u32 tx_status;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ u32 vdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ u32 pdev_id;
+ u32 freq; /* Units in MHz */
+ u32 noise_floor; /* units are dBm */
+ /* rx clear - how often the channel was unused */
+ u32 rx_clear_count_low;
+ u32 rx_clear_count_high;
+ /* cycle count - elapsed time during measured period, in clock ticks */
+ u32 cycle_count_low;
+ u32 cycle_count_high;
+ /* tx cycle count - elapsed time spent in tx, in clock ticks */
+ u32 tx_cycle_count_low;
+ u32 tx_cycle_count_high;
+ /* rx cycle count - elapsed time spent in rx, in clock ticks */
+ u32 rx_cycle_count_low;
+ u32 rx_cycle_count_high;
+ /*rx_cycle cnt for my bss in 64bits format */
+ u32 rx_bss_cycle_count_low;
+ u32 rx_bss_cycle_count_high;
+} __packed;
+
+#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+
+struct wmi_vdev_install_key_compl_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+} __packed;
+
+struct wmi_vdev_install_key_complete_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+};
+
+struct wmi_peer_assoc_conf_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_assoc_conf_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+};
+
+/*
+ * PDEV statistics
+ */
+struct wmi_pdev_stats_base {
+ s32 chan_nf;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
+ u32 phy_err_count;
+ u32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+} __packed;
+
+struct wmi_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+
+ /* Num Local frames queued */
+ s32 local_enqued;
+
+ /* Num Local frames done */
+ s32 local_freed;
+
+ /* Num queued to HW */
+ s32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+
+ /* Num underruns */
+ s32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ s32 mpdus_requed;
+
+ /* excessive retries */
+ u32 tx_ko;
+
+ /* data hw rate code */
+ u32 data_rc;
+
+ /* Scheduler self triggers */
+ u32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ u32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ s32 phy_errs;
+
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+} __packed;
+
+struct wmi_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats {
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct wmi_bcn_stats {
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+} __packed;
+
+struct wmi_stats_event {
+ u32 stats_id;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
+ u32 num_mib_stats;
+ u32 pdev_id;
+ u32 num_bcn_stats;
+ u32 num_peer_extd_stats;
+ u32 num_peer_extd2_stats;
+} __packed;
+
+struct wmi_pdev_ctl_failsafe_chk_event {
+ u32 pdev_id;
+ u32 ctl_failsafe_status;
+} __packed;
+
+struct wmi_pdev_csa_switch_ev {
+ u32 pdev_id;
+ u32 current_switch_count;
+ u32 num_vdevs;
+} __packed;
+
+struct wmi_pdev_radar_ev {
+ u32 pdev_id;
+ u32 detection_mode;
+ u32 chan_freq;
+ u32 chan_width;
+ u32 detector_id;
+ u32 segment_id;
+ u32 timestamp;
+ u32 is_chirp;
+ s32 freq_offset;
+ s32 sidx;
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
+
+struct mgmt_rx_event_params {
+ u32 channel;
+ u32 snr;
+ u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
+ u32 rate;
+ enum wmi_phy_mode phy_mode;
+ u32 buf_len;
+ int status;
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u8 pdev_id;
+};
+
+#define ATH_MAX_ANTENNA 4
+
+struct wmi_mgmt_rx_hdr {
+ u32 channel;
+ u32 snr;
+ u32 rate;
+ u32 phy_mode;
+ u32 buf_len;
+ u32 status;
+ u32 rssi_ctl[ATH_MAX_ANTENNA];
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u32 rx_tsf_l32;
+ u32 rx_tsf_u32;
+ u32 pdev_id;
+} __packed;
+
+#define MAX_ANTENNA_EIGHT 8
+
+struct wmi_rssi_ctl_ext {
+ u32 tlv_header;
+ u32 rssi_ctl_ext[MAX_ANTENNA_EIGHT - ATH_MAX_ANTENNA];
+};
+
+struct wmi_mgmt_tx_compl_event {
+ u32 desc_id;
+ u32 status;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_scan_event {
+ u32 event_type; /* %WMI_SCAN_EVENT_ */
+ u32 reason; /* %WMI_SCAN_REASON_ */
+ u32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ u32 scan_req_id;
+ u32 scan_id;
+ u32 vdev_id;
+ /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed
+ * In case of AP it is TSF of the AP vdev
+ * In case of STA connected state, this is the TSF of the AP
+ * In case of STA not connected, it will be the free running HW timer
+ */
+ u32 tsf_timestamp;
+} __packed;
+
+struct wmi_peer_sta_kickout_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_event {
+ u32 vdev_id;
+ u32 reason;
+ u32 rssi;
+} __packed;
+
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
+struct wmi_chan_info_event {
+ u32 err_code;
+ u32 freq;
+ u32 cmd_flags;
+ u32 noise_floor;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 chan_tx_pwr_range;
+ u32 chan_tx_pwr_tp;
+ u32 rx_frame_count;
+ u32 my_bss_rx_cycle_count;
+ u32 rx_11b_mode_data_duration;
+ u32 tx_frame_cnt;
+ u32 mac_clk_mhz;
+ u32 vdev_id;
+} __packed;
+
+struct ath11k_targ_cap {
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 max_num_scan_channels;
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+};
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+};
+
+enum wmi_sta_powersave_param {
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? \
+ (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /* Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/* The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /* Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * FW will send before waking up.
+ */
+};
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+enum wmi_ap_ps_peer_param {
+ /** Set uapsd configuration for a given peer.
+ *
+ * This include the delivery and trigger enabled state for each AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /**
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /** Time in seconds for aging out buffered frames
+ * for STA in power save
+ */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+
+ /** Specify frame types that are considered SIFS
+ * RESP trigger frame
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3,
+
+ /** Specifies the trigger state of TID.
+ * Valid only for UAPSD frame type
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4,
+
+ /* Specifies the WNM sleep state of a STA */
+ WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5,
+};
+
+#define DISABLE_SIFS_RESPONSE_TRIGGER 0
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+
+#define WMI_CIPHER_NONE 0x0 /* clear key */
+#define WMI_CIPHER_WEP 0x1
+#define WMI_CIPHER_TKIP 0x2
+#define WMI_CIPHER_AES_OCB 0x3
+#define WMI_CIPHER_AES_CCM 0x4
+#define WMI_CIPHER_WAPI 0x5
+#define WMI_CIPHER_CKIP 0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+#define WMI_CIPHER_ANY 0x8
+#define WMI_CIPHER_AES_GCM 0x9
+#define WMI_CIPHER_AES_GMAC 0xa
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xffff)
+
+#define ATH11K_RC_VERSION_OFFSET 28
+#define ATH11K_RC_PREAMBLE_OFFSET 8
+#define ATH11K_RC_NSS_OFFSET 5
+
+#define ATH11K_HW_RATE_CODE(rate, nss, preamble) \
+ ((1 << ATH11K_RC_VERSION_OFFSET) | \
+ ((nss) << ATH11K_RC_NSS_OFFSET) | \
+ ((preamble) << ATH11K_RC_PREAMBLE_OFFSET) | \
+ (rate))
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+ WMI_RATE_PREAMBLE_HE,
+};
+
+/**
+ * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
+ * @WMI_RTS_CTS_DISABLED : RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS : RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF : CTS to self protection Enabled.
+ */
+enum wmi_rtscts_prot_mode {
+ WMI_RTS_CTS_DISABLED = 0,
+ WMI_USE_RTS_CTS = 1,
+ WMI_USE_CTS2SELF = 2,
+};
+
+/**
+ * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
+ * protection mode.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES - Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES - Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES - Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP - RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES - Enable RTS-CTS for all rate series.
+ */
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES = 1,
+ WMI_RTSCTS_ACROSS_SW_RETRIES = 2,
+ WMI_RTSCTS_ERP = 3,
+ WMI_RTSCTS_FOR_ALL_RATESERIES = 4,
+};
+
+struct ath11k_hal_reg_cap {
+ u32 eeprom_rd;
+ u32 eeprom_rd_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+struct ath11k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+enum wmi_sta_ps_param_rx_wake_policy {
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+enum ath11k_hw_txrx_mode {
+ ATH11K_HW_TXRX_RAW = 0,
+ ATH11K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH11K_HW_TXRX_ETHERNET = 2,
+};
+
+struct wmi_wmm_params {
+ u32 tlv_header;
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txoplimit;
+ u32 acm;
+ u32 no_ack;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u8 acm;
+ u8 aifs;
+ u16 cwmin;
+ u16 cwmax;
+ u16 txop;
+ u8 no_ack;
+};
+
+struct wmi_vdev_set_wmm_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_wmm_params wmm_params[4];
+ u32 wmm_param_type;
+} __packed;
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+#define ATH11K_TWT_DEF_STA_CONG_TIMER_MS 5000
+#define ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE 10
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN 20
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL 100
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN 80
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_MIN_NO_STA_SETUP 10
+#define ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN 2
+#define ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS 2
+#define ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS 2
+#define ATH11K_TWT_DEF_MAX_NO_STA_TWT 500
+#define ATH11K_TWT_DEF_MODE_CHECK_INTERVAL 10000
+#define ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
+#define ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+
+struct wmi_twt_enable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+} __packed;
+
+struct wmi_twt_disable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_obss_spatial_reuse_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ s32 obss_min;
+ s32 obss_max;
+ u32 vdev_id;
+} __packed;
+
+struct target_resource_config {
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_active_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 max_peer_ext_stats;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 rx_batchmode;
+ u32 tt_support;
+ u32 atf_config;
+ u32 iphdr_pad_config;
+ u32 qwrap_config:16,
+ alloc_frag_desc_for_data_pkt:16;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+};
+
+#define WMI_MAX_MEM_REQS 32
+
+#define MAX_RADIOS 3
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+
+struct ath11k_wmi_base {
+ struct ath11k_base *ab;
+ struct ath11k_pdev_wmi wmi[MAX_RADIOS];
+ enum ath11k_htc_ep_id wmi_endpoint_id[MAX_RADIOS];
+ u32 max_msg_len[MAX_RADIOS];
+
+ struct completion service_ready;
+ struct completion unified_ready;
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE);
+ wait_queue_head_t tx_credits_wq;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct wmi_host_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+
+ enum wmi_host_hw_mode_config_type preferred_hw_mode;
+ struct target_resource_config wlan_resource_config;
+
+ struct ath11k_targ_cap *targ_cap;
+};
+
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id);
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame);
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn);
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart);
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val);
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id);
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable);
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab);
+int ath11k_wmi_cmd_init(struct ath11k_base *ab);
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab);
+int ath11k_wmi_connect(struct ath11k_base *ab);
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id);
+int ath11k_wmi_attach(struct ath11k_base *ab);
+void ath11k_wmi_detach(struct ath11k_base *ab);
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param);
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, int vdev_id,
+ const u8 *addr, dma_addr_t paddr,
+ u8 tid, u8 ba_window_size_valid,
+ u32 ba_window_size);
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param);
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value);
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms);
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id);
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id);
+void ath11k_wmi_start_scan_init(struct ath11k *ar, struct scan_req_params *arg);
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params);
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param);
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param);
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id);
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param);
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg);
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type);
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param);
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param);
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param);
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list);
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id);
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op);
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_param);
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter);
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar);
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable);
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param);
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param);
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats);
+size_t ath11k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath11k_wmi_fw_stats_num_peers_extd(struct list_head *head);
+size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head);
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats, u32 stats_id,
+ char *buf);
+int ath11k_wmi_simulate_radar(struct ath11k *ar);
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index c0794f5988b3..2c9cec8b53d9 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -106,7 +106,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}
- mem = ioremap_nocache(res->start, resource_size(res));
+ mem = ioremap(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 63019c3de034..cdefb8e2daf1 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -92,7 +92,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -ENXIO;
}
- mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ mem = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
index 547cd46da260..d0f1e8bcc846 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_aic.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
@@ -406,7 +406,7 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
sram.com_att_6db =
ar9003_aic_find_index(1, fixed_com_att_db);
- sram.valid = 1;
+ sram.valid = true;
sram.rot_dir_att_db =
min(max(rot_dir_path_att_db,
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index fb649d85b8fc..dd0c32379375 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1216,7 +1216,7 @@ err_fw:
static int send_eject_command(struct usb_interface *interface)
{
struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_host_interface *iface_desc = &interface->altsetting[0];
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 20f4f8ea9f89..bee9110b91f3 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -666,14 +666,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
/*
* Some users have reported their EEPROM programmed with
- * 0x8000 set, this is not a supported regulatory domain
- * but since we have more than one user with it we need
- * a solution for them. We default to 0x64, which is the
- * default Atheros world regulatory domain.
+ * 0x8000 or 0x0 set, this is not a supported regulatory
+ * domain but since we have more than one user with it we
+ * need a solution for them. We default to 0x64, which is
+ * the default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
- if (reg->current_rd != COUNTRY_ERD_FLAG)
+ if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index c30fdd0cbf1e..e49c306e0eef 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1169,7 +1169,6 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, TIMING_BEACON_ONLY);
ieee80211_hw_set(wcn->hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 523550f94a3f..77269ac7f352 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -1620,7 +1620,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body.beacon_length6 = msg_body.beacon_length + 6;
if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) {
- wcn36xx_err("Beacon is to big: beacon size=%d\n",
+ wcn36xx_err("Beacon is too big: beacon size=%d\n",
msg_body.beacon_length);
ret = -ENOMEM;
goto out;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 7d6f14420855..0851d2bede89 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2579,6 +2579,38 @@ wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
return rc;
}
+static int wil_cfg80211_set_multicast_to_unicast(struct wiphy *wiphy,
+ struct net_device *dev,
+ const bool enabled)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ if (wil->multicast_to_unicast == enabled)
+ return 0;
+
+ wil_info(wil, "set multicast to unicast, enabled=%d\n", enabled);
+ wil->multicast_to_unicast = enabled;
+
+ return 0;
+}
+
+static int wil_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil->cqm_rssi_thold = rssi_thold;
+
+ rc = wmi_set_cqm_rssi_config(wil, rssi_thold, rssi_hyst);
+ if (rc)
+ /* reset stored value upon failure */
+ wil->cqm_rssi_thold = 0;
+
+ return rc;
+}
+
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2610,11 +2642,13 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
+ .set_cqm_rssi_config = wil_cfg80211_set_cqm_rssi_config,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
.sched_scan_start = wil_cfg80211_sched_scan_start,
.sched_scan_stop = wil_cfg80211_sched_scan_stop,
.update_ft_ies = wil_cfg80211_update_ft_ies,
+ .set_multicast_to_unicast = wil_cfg80211_set_multicast_to_unicast,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 912c4eaf017b..fef10886ca4a 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -11,26 +11,6 @@
#include "wil6210.h"
-static int wil_ethtoolops_begin(struct net_device *ndev)
-{
- struct wil6210_priv *wil = ndev_to_wil(ndev);
-
- mutex_lock(&wil->mutex);
-
- wil_dbg_misc(wil, "ethtoolops_begin\n");
-
- return 0;
-}
-
-static void wil_ethtoolops_complete(struct net_device *ndev)
-{
- struct wil6210_priv *wil = ndev_to_wil(ndev);
-
- wil_dbg_misc(wil, "ethtoolops_complete\n");
-
- mutex_unlock(&wil->mutex);
-}
-
static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *cp)
{
@@ -39,11 +19,12 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
u32 rx_itr_en, rx_itr_val = 0;
int ret;
+ mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
ret = wil_pm_runtime_get(wil);
if (ret < 0)
- return ret;
+ goto out;
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
@@ -57,7 +38,11 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
- return 0;
+ ret = 0;
+
+out:
+ mutex_unlock(&wil->mutex);
+ return ret;
}
static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
@@ -67,12 +52,14 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
struct wireless_dev *wdev = ndev->ieee80211_ptr;
int ret;
+ mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* only @rx_coalesce_usecs and @tx_coalesce_usecs supported,
@@ -88,24 +75,26 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
ret = wil_pm_runtime_get(wil);
if (ret < 0)
- return ret;
+ goto out;
wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
+ ret = 0;
- return 0;
+out:
+ mutex_unlock(&wil->mutex);
+ return ret;
out_bad:
wil_dbg_misc(wil, "Unsupported coalescing params. Raw command:\n");
print_hex_dump_debug("DBG[MISC] coal ", DUMP_PREFIX_OFFSET, 16, 4,
cp, sizeof(*cp), false);
+ mutex_unlock(&wil->mutex);
return -EINVAL;
}
static const struct ethtool_ops wil_ethtool_ops = {
- .begin = wil_ethtoolops_begin,
- .complete = wil_ethtoolops_complete,
.get_drvinfo = cfg80211_get_drvinfo,
.get_coalesce = wil_ethtoolops_get_coalesce,
.set_coalesce = wil_ethtoolops_set_coalesce,
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 06091d8a9e23..3ba5b2550a8c 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -762,7 +762,7 @@ int wil_priv_init(struct wil6210_priv *wil)
*/
wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
- wil->amsdu_en = 1;
+ wil->amsdu_en = true;
return 0;
@@ -1654,6 +1654,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
+ down_write(&wil->mem_lock);
+
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
if (test_bit(wil_status_suspending, wil->status))
@@ -1702,6 +1704,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->secured_boot) {
wil_err(wil, "secured boot is not supported\n");
+ up_write(&wil->mem_lock);
return -ENOTSUPP;
}
@@ -1737,6 +1740,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
+ up_write(&wil->mem_lock);
+
if (load_fw) {
wil_unmask_irq(wil);
@@ -1786,6 +1791,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
out:
+ up_write(&wil->mem_lock);
clear_bit(wil_status_resetting, wil->status);
return rc;
}
@@ -1811,9 +1817,7 @@ int __wil_up(struct wil6210_priv *wil)
WARN_ON(!mutex_is_locked(&wil->mutex));
- down_write(&wil->mem_lock);
rc = wil_reset(wil, true);
- up_write(&wil->mem_lock);
if (rc)
return rc;
@@ -1905,9 +1909,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
- down_write(&wil->mem_lock);
rc = wil_reset(wil, false);
- up_write(&wil->mem_lock);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 8ebc6d59aa74..bc8c15fb609d 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -10,6 +10,7 @@
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <linux/prefetch.h>
@@ -1139,7 +1140,7 @@ static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
void wil_tx_data_init(struct wil_ring_tx_data *txdata)
{
spin_lock_bh(&txdata->lock);
- txdata->dot1x_open = 0;
+ txdata->dot1x_open = false;
txdata->enabled = 0;
txdata->idle = 0;
txdata->last_idle = 0;
@@ -1529,6 +1530,35 @@ static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
return v;
}
+/* apply multicast to unicast only for ARP and IP packets
+ * (see NL80211_CMD_SET_MULTICAST_TO_UNICAST for more info)
+ */
+static bool wil_check_multicast_to_unicast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ const struct ethhdr *eth = (void *)skb->data;
+ const struct vlan_ethhdr *ethvlan = (void *)skb->data;
+ __be16 ethertype;
+
+ if (!wil->multicast_to_unicast)
+ return false;
+
+ /* multicast to unicast conversion only for some payload */
+ ethertype = eth->h_proto;
+ if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
+ ethertype = ethvlan->h_vlan_encapsulated_proto;
+ switch (ethertype) {
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
@@ -2336,7 +2366,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* in STA mode (ESS), all to same VRING (to AP) */
ring = wil_find_tx_ring_sta(wil, vif, skb);
} else if (bcast) {
- if (vif->pbss)
+ if (vif->pbss || wil_check_multicast_to_unicast(wil, skb))
/* in pbss, no bcast VRING - duplicate skb in
* all stations VRINGs
*/
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 778b63be6a9a..7bfe867c7509 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -869,6 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
u8 data_offset;
struct wil_rx_status_extended *s;
u16 sring_idx = sring - wil->srings;
+ int invalid_buff_id_retry;
BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
@@ -882,9 +883,9 @@ again:
/* Extract the buffer ID from the status message */
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ invalid_buff_id_retry = 0;
while (!buff_id) {
struct wil_rx_status_extended *s;
- int invalid_buff_id_retry = 0;
wil_dbg_txrx(wil,
"buff_id is not updated yet by HW, (swhead 0x%x)\n",
@@ -902,6 +903,11 @@ again:
if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
buff_id, sring->swhead);
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
wil_rx_status_reset_buff_id(sring);
wil_sring_advance_swhead(sring);
sring->invalid_buff_id_cnt++;
@@ -962,6 +968,11 @@ again:
if (unlikely(dmalen > sz)) {
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
stats->rx_large_frame++;
rxdata->skipping = true;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index c744c65225da..c736f7413a35 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -46,7 +46,7 @@
#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
-#define WIL_RX_EDMA_MID_VALID_BIT BIT(22)
+#define WIL_RX_EDMA_MID_VALID_BIT BIT(20)
#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
@@ -244,8 +244,8 @@ struct wil_ring_tx_status {
* calculated, Bit1- L4Err - TCP/UDP Checksum Error
* bit 7 : Reserved:1
* bit 8..19 : Flow ID:12 - MSDU flow ID
- * bit 20..21 : MID:2 - The MAC ID
- * bit 22 : MID_V:1 - The MAC ID field is valid
+ * bit 20 : MID_V:1 - The MAC ID field is valid
+ * bit 21..22 : MID:2 - The MAC ID
* bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
* bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
* bit 25 : BC:1 - The received MPDU is broadcast
@@ -479,7 +479,7 @@ static inline int wil_rx_status_get_mid(void *msg)
return 0; /* use the default MID */
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
- 20, 21);
+ 21, 22);
}
static inline int wil_rx_status_get_error(void *msg)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 97626bfd4dac..5dc881d3c057 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1059,6 +1059,8 @@ struct wil6210_priv {
u32 max_agg_wsize;
u32 max_ampdu_size;
+ u8 multicast_to_unicast;
+ s32 cqm_rssi_thold;
};
#define wil_to_wiphy(i) (i->wiphy)
@@ -1148,7 +1150,7 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
*/
static inline bool wil_cid_valid(struct wil6210_priv *wil, int cid)
{
- return (cid >= 0 && cid < wil->max_assoc_sta);
+ return (cid >= 0 && cid < wil->max_assoc_sta && cid < WIL6210_MAX_CID);
}
void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
@@ -1440,4 +1442,6 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
void update_supported_bands(struct wil6210_priv *wil);
void wil_clear_fw_log_addr(struct wil6210_priv *wil);
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+ s32 rssi_thold, u32 rssi_hyst);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1332eb8c831f..89c12cb2aaab 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -46,7 +46,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
{
- int i, rc;
+ int i;
const struct fw_map *map;
void *data;
u32 host_min, dump_size, offset, len;
@@ -62,9 +62,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
- rc = wil_mem_access_lock(wil);
- if (rc)
- return rc;
+ down_write(&wil->mem_lock);
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil,
+ "suspend/resume in progress. cannot copy crash dump\n");
+ up_write(&wil->mem_lock);
+ return -EBUSY;
+ }
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -84,7 +90,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
}
- wil_mem_access_unlock(wil);
+
+ up_write(&wil->mem_lock);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 7a0d934eb271..23e1ed6a9d6d 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -196,8 +196,8 @@ const struct fw_map talyn_mb_fw_mapping[] = {
{0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
/* DMA OFU 296b */
{0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
- /* ucode debug 4k */
- {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
+ /* ucode debug 256b */
+ {0x8c3000, 0x8c3100, 0x8c3000, "ucode_debug", true, true},
/* upper area 1536k */
{0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
@@ -476,6 +476,8 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_RBUFCAP_CFG_CMD";
case WMI_TEMP_SENSE_ALL_CMDID:
return "WMI_TEMP_SENSE_ALL_CMDID";
+ case WMI_SET_LINK_MONITOR_CMDID:
+ return "WMI_SET_LINK_MONITOR_CMD";
default:
return "Untracked CMD";
}
@@ -624,6 +626,10 @@ static const char *eventid2name(u16 eventid)
return "WMI_RBUFCAP_CFG_EVENT";
case WMI_TEMP_SENSE_ALL_DONE_EVENTID:
return "WMI_TEMP_SENSE_ALL_DONE_EVENTID";
+ case WMI_SET_LINK_MONITOR_EVENTID:
+ return "WMI_SET_LINK_MONITOR_EVENT";
+ case WMI_LINK_MONITOR_EVENTID:
+ return "WMI_LINK_MONITOR_EVENT";
default:
return "Untracked EVENT";
}
@@ -1507,14 +1513,14 @@ static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
if (vif->fw_stats_ready) {
/* clean old statistics */
vif->fw_stats_tsf = 0;
- vif->fw_stats_ready = 0;
+ vif->fw_stats_ready = false;
}
wil_link_stats_store_basic(vif, payload + hdr_size);
if (!has_next) {
vif->fw_stats_tsf = tsf;
- vif->fw_stats_ready = 1;
+ vif->fw_stats_ready = true;
}
break;
@@ -1529,14 +1535,14 @@ static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
if (wil->fw_stats_global.ready) {
/* clean old statistics */
wil->fw_stats_global.tsf = 0;
- wil->fw_stats_global.ready = 0;
+ wil->fw_stats_global.ready = false;
}
wil_link_stats_store_global(vif, payload + hdr_size);
if (!has_next) {
wil->fw_stats_global.tsf = tsf;
- wil->fw_stats_global.ready = 1;
+ wil->fw_stats_global.ready = true;
}
break;
@@ -1836,6 +1842,32 @@ fail:
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
}
+static void
+wmi_evt_link_monitor(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wmi_link_monitor_event *evt = d;
+ enum nl80211_cqm_rssi_threshold_event event_type;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "link monitor event too short %d\n", len);
+ return;
+ }
+
+ wil_dbg_wmi(wil, "link monitor event, type %d rssi %d (stored %d)\n",
+ evt->type, evt->rssi_level, wil->cqm_rssi_thold);
+
+ if (evt->type != WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT)
+ /* ignore */
+ return;
+
+ event_type = (evt->rssi_level > wil->cqm_rssi_thold ?
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW);
+ cfg80211_cqm_rssi_notify(ndev, event_type, evt->rssi_level, GFP_KERNEL);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -1869,6 +1901,7 @@ static const struct {
{WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
{WMI_FT_AUTH_STATUS_EVENTID, wmi_evt_auth_status},
{WMI_FT_REASSOC_STATUS_EVENTID, wmi_evt_reassoc_status},
+ {WMI_LINK_MONITOR_EVENTID, wmi_evt_link_monitor},
};
/*
@@ -3981,3 +4014,46 @@ int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval)
return 0;
}
+
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct {
+ struct wmi_set_link_monitor_cmd cmd;
+ s8 rssi_thold;
+ } __packed cmd = {
+ .cmd = {
+ .rssi_hyst = rssi_hyst,
+ .rssi_thresholds_list_size = 1,
+ },
+ .rssi_thold = rssi_thold,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_set_link_monitor_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX)
+ return -EINVAL;
+
+ rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 6bd4ccee28ab..e3558136e0c4 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -192,6 +192,7 @@ enum wmi_command_id {
WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
+ WMI_SET_LINK_MONITOR_CMDID = 0x845,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
@@ -1973,6 +1974,7 @@ enum wmi_event_id {
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_FT_AUTH_STATUS_EVENTID = 0x100C,
WMI_FT_REASSOC_STATUS_EVENTID = 0x100D,
+ WMI_LINK_MONITOR_EVENTID = 0x100E,
WMI_RADAR_GENERAL_CONFIG_EVENTID = 0x1100,
WMI_RADAR_CONFIG_SELECT_EVENTID = 0x1101,
WMI_RADAR_PARAMS_CONFIG_EVENTID = 0x1102,
@@ -2024,6 +2026,7 @@ enum wmi_event_id {
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
+ WMI_SET_LINK_MONITOR_EVENTID = 0x1845,
WMI_RF_XPM_READ_RESULT_EVENTID = 0x1856,
WMI_RF_XPM_WRITE_RESULT_EVENTID = 0x1857,
WMI_LED_CFG_DONE_EVENTID = 0x1858,
@@ -3312,6 +3315,36 @@ struct wmi_link_maintain_cfg_read_cmd {
__le32 cid;
} __packed;
+/* WMI_SET_LINK_MONITOR_CMDID */
+struct wmi_set_link_monitor_cmd {
+ u8 rssi_hyst;
+ u8 reserved[12];
+ u8 rssi_thresholds_list_size;
+ s8 rssi_thresholds_list[0];
+} __packed;
+
+/* wmi_link_monitor_event_type */
+enum wmi_link_monitor_event_type {
+ WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT = 0x00,
+ WMI_LINK_MONITOR_NOTIF_TX_ERR_EVT = 0x01,
+ WMI_LINK_MONITOR_NOTIF_THERMAL_EVT = 0x02,
+};
+
+/* WMI_SET_LINK_MONITOR_EVENTID */
+struct wmi_set_link_monitor_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_MONITOR_EVENTID */
+struct wmi_link_monitor_event {
+ /* link_monitor_event_type */
+ u8 type;
+ s8 rssi_level;
+ u8 reserved[2];
+} __packed;
+
/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
struct wmi_link_maintain_cfg_write_done_event {
/* requested connection ID */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index db2c3b8d491e..3b2680772f03 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2236,7 +2236,7 @@ static int at76_alloc_urbs(struct at76_priv *priv,
at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__);
at76_dbg(DBG_URB, "%s: NumEndpoints %d ", __func__,
- interface->altsetting[0].desc.bNumEndpoints);
+ interface->cur_altsetting->desc.bNumEndpoints);
ep_in = NULL;
ep_out = NULL;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 4325e91736eb..8b6b657c4b85 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1275,8 +1275,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
}
/* Interrupt handler bottom-half */
-static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
+static void b43legacy_interrupt_tasklet(unsigned long data)
{
+ struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data;
u32 reason;
u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
u32 merged_dma_reason = 0;
@@ -3741,7 +3742,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
tasklet_init(&wldev->isr_tasklet,
- (void (*)(unsigned long))b43legacy_interrupt_tasklet,
+ b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
wldev->__using_pio = true;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 96fd8e2bf773..b684a5b6d904 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -43,6 +43,7 @@
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
+#define SDIO_4359_FUNC2_BLOCKSIZE 256
/* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000
@@ -119,7 +120,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
brcmf_err("enable_irq_wake failed %d\n", ret);
return ret;
}
- sdiodev->irq_wake = true;
+ disable_irq_wake(pdata->oob_irq_nr);
sdio_claim_host(sdiodev->func1);
@@ -178,10 +179,6 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
sdio_release_host(sdiodev->func1);
sdiodev->oob_irq_requested = false;
- if (sdiodev->irq_wake) {
- disable_irq_wake(pdata->oob_irq_nr);
- sdiodev->irq_wake = false;
- }
free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
sdiodev->irq_en = false;
sdiodev->oob_irq_requested = false;
@@ -903,6 +900,7 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
+ unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE;
sdio_claim_host(sdiodev->func1);
@@ -912,7 +910,9 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
sdio_release_host(sdiodev->func1);
goto out;
}
- ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
+ if (sdiodev->func2->device == SDIO_DEVICE_ID_BROADCOM_4359)
+ f2_blksz = SDIO_4359_FUNC2_BLOCKSIZE;
+ ret = sdio_set_block_size(sdiodev->func2, f2_blksz);
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
sdio_release_host(sdiodev->func1);
@@ -969,8 +969,10 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_43012),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_89359),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1167,6 +1169,10 @@ static int brcmf_ops_sdio_resume(struct device *dev)
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
+ if (sdiodev->wowl_enabled &&
+ sdiodev->settings->bus.sdio.oob_irq_supported)
+ disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+
brcmf_sdiod_freezer_off(sdiodev);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 5598bbd09b62..a2328d3eee03 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <net/cfg80211.h>
#include <net/netlink.h>
+#include <uapi/linux/if_arp.h>
#include <brcmu_utils.h>
#include <defs.h>
@@ -619,6 +620,82 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
}
+/**
+ * brcmf_mon_add_vif() - create monitor mode virtual interface
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ */
+static struct wireless_dev *brcmf_mon_add_vif(struct wiphy *wiphy,
+ const char *name)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_vif *vif;
+ struct net_device *ndev;
+ struct brcmf_if *ifp;
+ int err;
+
+ if (cfg->pub->mon_if) {
+ err = -EEXIST;
+ goto err_out;
+ }
+
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_MONITOR);
+ if (IS_ERR(vif)) {
+ err = PTR_ERR(vif);
+ goto err_out;
+ }
+
+ ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN, ether_setup);
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_free_vif;
+ }
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ ndev->ieee80211_ptr = &vif->wdev;
+ ndev->needs_free_netdev = true;
+ ndev->priv_destructor = brcmf_cfg80211_free_netdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
+ ifp = netdev_priv(ndev);
+ ifp->vif = vif;
+ ifp->ndev = ndev;
+ ifp->drvr = cfg->pub;
+
+ vif->ifp = ifp;
+ vif->wdev.netdev = ndev;
+
+ err = brcmf_net_mon_attach(ifp);
+ if (err) {
+ brcmf_err("Failed to attach %s device\n", ndev->name);
+ free_netdev(ndev);
+ goto err_free_vif;
+ }
+
+ cfg->pub->mon_if = ifp;
+
+ return &vif->wdev;
+
+err_free_vif:
+ brcmf_free_vif(vif);
+err_out:
+ return ERR_PTR(err);
+}
+
+static int brcmf_mon_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = wdev->netdev;
+
+ ndev->netdev_ops->ndo_stop(ndev);
+
+ brcmf_net_detach(ndev, true);
+
+ cfg->pub->mon_if = NULL;
+
+ return 0;
+}
+
static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
@@ -641,9 +718,10 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return ERR_PTR(-EOPNOTSUPP);
+ case NL80211_IFTYPE_MONITOR:
+ return brcmf_mon_add_vif(wiphy, name);
case NL80211_IFTYPE_AP:
wdev = brcmf_ap_add_vif(wiphy, name, params);
break;
@@ -826,9 +904,10 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return -EOPNOTSUPP;
+ case NL80211_IFTYPE_MONITOR:
+ return brcmf_mon_del_vif(wiphy, wdev);
case NL80211_IFTYPE_AP:
return brcmf_cfg80211_del_ap_iface(wiphy, wdev);
case NL80211_IFTYPE_P2P_CLIENT:
@@ -5363,6 +5442,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_vif *vif_walk;
struct brcmf_cfg80211_vif *vif;
bool mbss;
+ struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
sizeof(*vif));
@@ -5375,7 +5455,8 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
brcmf_init_prof(&vif->profile);
- if (type == NL80211_IFTYPE_AP) {
+ if (type == NL80211_IFTYPE_AP &&
+ brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
mbss = false;
list_for_each_entry(vif_walk, &cfg->vif_list, list) {
if (vif_walk->wdev.iftype == NL80211_IFTYPE_AP) {
@@ -6012,19 +6093,17 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
(void *)roamtrigger, sizeof(roamtrigger));
- if (err) {
+ if (err)
bphy_err(drvr, "WLC_SET_ROAM_TRIGGER error (%d)\n", err);
- goto roam_setup_done;
- }
roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
(void *)roam_delta, sizeof(roam_delta));
- if (err) {
+ if (err)
bphy_err(drvr, "WLC_SET_ROAM_DELTA error (%d)\n", err);
- goto roam_setup_done;
- }
+
+ return 0;
roam_setup_done:
return err;
@@ -6522,6 +6601,9 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
* #STA <= 1, #AP <= 1, channels = 1, 2 total
* #AP <= 4, matching BI, channels = 1, 4 total
*
+ * no p2p and rsdb:
+ * #STA <= 2, #AP <= 2, channels = 2, 4 total
+ *
* p2p, no mchan, and mbss:
*
* #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total
@@ -6533,6 +6615,10 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
* #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
* #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
* #AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, rsdb, and no mbss:
+ * #STA <= 2, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
+ * channels = 2, 4 total
*/
static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
{
@@ -6540,13 +6626,16 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
struct ieee80211_iface_limit *c0_limits = NULL;
struct ieee80211_iface_limit *p2p_limits = NULL;
struct ieee80211_iface_limit *mbss_limits = NULL;
- bool mbss, p2p;
- int i, c, n_combos;
+ bool mon_flag, mbss, p2p, rsdb, mchan;
+ int i, c, n_combos, n_limits;
+ mon_flag = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FLAG);
mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
+ rsdb = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB);
+ mchan = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN);
- n_combos = 1 + !!p2p + !!mbss;
+ n_combos = 1 + !!(p2p && !rsdb) + !!mbss;
combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
if (!combo)
goto err;
@@ -6554,37 +6643,53 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ if (mon_flag)
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
+ if (p2p)
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
c = 0;
i = 0;
- c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+ n_limits = 1 + mon_flag + (p2p ? 2 : 0) + (rsdb || !p2p);
+ c0_limits = kcalloc(n_limits, sizeof(*c0_limits), GFP_KERNEL);
if (!c0_limits)
goto err;
- c0_limits[i].max = 1;
+
+ combo[c].num_different_channels = 1 + (rsdb || (p2p && mchan));
+ c0_limits[i].max = 1 + rsdb;
c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+ if (mon_flag) {
+ c0_limits[i].max = 1;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR);
+ }
if (p2p) {
- if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
- combo[c].num_different_channels = 2;
- else
- combo[c].num_different_channels = 1;
- wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
c0_limits[i].max = 1;
c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
- c0_limits[i].max = 1;
+ c0_limits[i].max = 1 + rsdb;
c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
+ }
+ if (p2p && rsdb) {
+ c0_limits[i].max = 2;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = 5;
+ } else if (p2p) {
+ combo[c].max_interfaces = i;
+ } else if (rsdb) {
+ c0_limits[i].max = 2;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = 3;
} else {
- combo[c].num_different_channels = 1;
c0_limits[i].max = 1;
c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = i;
}
- combo[c].max_interfaces = i;
combo[c].n_limits = i;
combo[c].limits = c0_limits;
- if (p2p) {
+ if (p2p && !rsdb) {
c++;
i = 0;
p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
@@ -6607,14 +6712,20 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
if (mbss) {
c++;
i = 0;
- mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+ n_limits = 1 + mon_flag;
+ mbss_limits = kcalloc(n_limits, sizeof(*mbss_limits),
+ GFP_KERNEL);
if (!mbss_limits)
goto err;
mbss_limits[i].max = 4;
mbss_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ if (mon_flag) {
+ mbss_limits[i].max = 1;
+ mbss_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR);
+ }
combo[c].beacon_int_infra_match = true;
combo[c].num_different_channels = 1;
- combo[c].max_interfaces = 4;
+ combo[c].max_interfaces = 4 + mon_flag;
combo[c].n_limits = i;
combo[c].limits = mbss_limits;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index a795d781b4c5..282d0bc14e8e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -433,11 +433,25 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
{
struct brcmf_chip_priv *ci;
int count;
+ struct brcmf_core *d11core2 = NULL;
+ struct brcmf_core_priv *d11priv2 = NULL;
ci = core->chip;
+ /* special handle two D11 cores reset */
+ if (core->pub.id == BCMA_CORE_80211) {
+ d11core2 = brcmf_chip_get_d11core(&ci->pub, 1);
+ if (d11core2) {
+ brcmf_dbg(INFO, "found two d11 cores, reset both\n");
+ d11priv2 = container_of(d11core2,
+ struct brcmf_core_priv, pub);
+ }
+ }
+
/* must disable first to work for arbitrary current core state */
brcmf_chip_ai_coredisable(core, prereset, reset);
+ if (d11priv2)
+ brcmf_chip_ai_coredisable(d11priv2, prereset, reset);
count = 0;
while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
@@ -449,9 +463,30 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
usleep_range(40, 60);
}
+ if (d11priv2) {
+ count = 0;
+ while (ci->ops->read32(ci->ctx,
+ d11priv2->wrapbase + BCMA_RESET_CTL) &
+ BCMA_RESET_CTL_RESET) {
+ ci->ops->write32(ci->ctx,
+ d11priv2->wrapbase + BCMA_RESET_CTL,
+ 0);
+ count++;
+ if (count > 50)
+ break;
+ usleep_range(40, 60);
+ }
+ }
+
ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
postreset | BCMA_IOCTL_CLK);
ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+ if (d11priv2) {
+ ci->ops->write32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL,
+ postreset | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL);
+ }
}
char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len)
@@ -677,7 +712,6 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43569_CHIP_ID:
case BRCM_CC_43570_CHIP_ID:
case BRCM_CC_4358_CHIP_ID:
- case BRCM_CC_4359_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
case BRCM_CC_4371_CHIP_ID:
return 0x180000;
@@ -687,6 +721,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4366_CHIP_ID:
case BRCM_CC_43664_CHIP_ID:
return 0x200000;
+ case BRCM_CC_4359_CHIP_ID:
+ return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
case CY_CC_4373_CHIP_ID:
return 0x160000;
default:
@@ -1109,6 +1145,21 @@ void brcmf_chip_detach(struct brcmf_chip *pub)
kfree(chip);
}
+struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry(core, &chip->cores, list) {
+ if (core->pub.id == BCMA_CORE_80211) {
+ if (unit-- == 0)
+ return &core->pub;
+ }
+ }
+ return NULL;
+}
+
struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
{
struct brcmf_chip_priv *chip;
@@ -1357,6 +1408,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
addr = CORE_CC_REG(base, sr_control0);
reg = chip->ops->read32(chip->ctx, addr);
return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
+ case BRCM_CC_4359_CHIP_ID:
case CY_CC_43012_CHIP_ID:
addr = CORE_CC_REG(pmu->base, retention_ctl);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
index 7b00f6a59e89..8fa38658e727 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
@@ -74,6 +74,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
const struct brcmf_buscore_ops *ops);
void brcmf_chip_detach(struct brcmf_chip *chip);
struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit);
struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub);
bool brcmf_chip_iscoreup(struct brcmf_core *core);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 85cf96461dde..23627c953a5e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -661,6 +661,8 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
goto fail;
}
+ netif_carrier_off(ndev);
+
ndev->priv_destructor = brcmf_cfg80211_free_netdev;
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
return 0;
@@ -671,7 +673,7 @@ fail:
return -EBADE;
}
-static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
+void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
{
if (ndev->reg_state == NETREG_REGISTERED) {
if (rtnl_locked)
@@ -684,6 +686,72 @@ static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
}
}
+static int brcmf_net_mon_open(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
+ u32 monitor;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_MONITOR, &monitor);
+ if (err) {
+ bphy_err(drvr, "BRCMF_C_GET_MONITOR error (%d)\n", err);
+ return err;
+ } else if (monitor) {
+ bphy_err(drvr, "Monitor mode is already enabled\n");
+ return -EEXIST;
+ }
+
+ monitor = 3;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_MONITOR, monitor);
+ if (err)
+ bphy_err(drvr, "BRCMF_C_SET_MONITOR error (%d)\n", err);
+
+ return err;
+}
+
+static int brcmf_net_mon_stop(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
+ u32 monitor;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ monitor = 0;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_MONITOR, monitor);
+ if (err)
+ bphy_err(drvr, "BRCMF_C_SET_MONITOR error (%d)\n", err);
+
+ return err;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_mon = {
+ .ndo_open = brcmf_net_mon_open,
+ .ndo_stop = brcmf_net_mon_stop,
+};
+
+int brcmf_net_mon_attach(struct brcmf_if *ifp)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct net_device *ndev;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ ndev = ifp->ndev;
+ ndev->netdev_ops = &brcmf_netdev_ops_mon;
+
+ err = register_netdevice(ndev);
+ if (err)
+ bphy_err(drvr, "Failed to register %s device\n", ndev->name);
+
+ return err;
+}
+
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
{
struct net_device *ndev;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 6699637d3bf8..33b2ab3b54b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -210,6 +210,8 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
+int brcmf_net_mon_attach(struct brcmf_if *ifp);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 1c9c74cc958e..5da0dda0d899 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -38,6 +38,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MCHAN, "mchan" },
{ BRCMF_FEAT_P2P, "p2p" },
{ BRCMF_FEAT_MONITOR, "monitor" },
+ { BRCMF_FEAT_MONITOR_FLAG, "rtap" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
{ BRCMF_FEAT_DOT11H, "802.11h" },
{ BRCMF_FEAT_SAE, "sae" },
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 280a1f6412d4..cda3fc1bab7f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -23,6 +23,7 @@
* GSCAN: enhanced scan offload feature.
* FWSUP: Firmware supplicant.
* MONITOR: firmware can pass monitor packets to host.
+ * MONITOR_FLAG: firmware flags monitor packets.
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
* DOT11H: firmware supports 802.11h
@@ -44,6 +45,7 @@
BRCMF_FEAT_DEF(GSCAN) \
BRCMF_FEAT_DEF(FWSUP) \
BRCMF_FEAT_DEF(MONITOR) \
+ BRCMF_FEAT_DEF(MONITOR_FLAG) \
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
BRCMF_FEAT_DEF(DOT11H) \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 0ff6f5212a94..ae4cf4372908 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -49,6 +49,8 @@
#define BRCMF_C_GET_PM 85
#define BRCMF_C_SET_PM 86
#define BRCMF_C_GET_REVINFO 98
+#define BRCMF_C_GET_MONITOR 107
+#define BRCMF_C_SET_MONITOR 108
#define BRCMF_C_GET_CURR_RATESET 114
#define BRCMF_C_GET_AP 117
#define BRCMF_C_SET_AP 118
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 2bd892df83cc..5e1a11c07551 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -908,7 +908,7 @@ static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
wlh += wlh[1] + 2;
if (entry->send_tim_signal) {
- entry->send_tim_signal = 0;
+ entry->send_tim_signal = false;
wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
wlh[2] = entry->mac_handle;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index e3dd8623be4e..8bb4f1fa790e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -365,7 +365,7 @@ brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
struct brcmf_msgbuf_pktid *pktid;
struct sk_buff *skb;
- if (idx < 0 || idx >= pktids->array_size) {
+ if (idx >= pktids->array_size) {
brcmf_err("Invalid packet id %d (max %d)\n", idx,
pktids->array_size);
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 7ba9f6a68645..1f5deea5a288 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2092,7 +2092,8 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
/* firmware requires unique mac address for p2pdev interface */
if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) {
bphy_err(drvr, "discovery vif must be different from primary interface\n");
- return ERR_PTR(-EINVAL);
+ err = -EINVAL;
+ goto fail;
}
brcmf_p2p_generate_bss_mac(p2p, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index f64ce5074a55..5105f62767fb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -78,7 +78,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
};
-#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
+#define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
#define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
@@ -1643,8 +1643,8 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
return -EINVAL;
}
- devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
- devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
+ devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
+ devinfo->tcm = ioremap(bar1_addr, bar1_size);
if (!devinfo->regs || !devinfo->tcm) {
brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 264ad63232f8..f9047db6a11d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -42,6 +42,8 @@
#define DEFAULT_F2_WATERMARK 0x8
#define CY_4373_F2_WATERMARK 0x40
#define CY_43012_F2_WATERMARK 0x60
+#define CY_4359_F2_WATERMARK 0x40
+#define CY_4359_F1_MESBUSYCTRL (CY_4359_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB)
#ifdef DEBUG
@@ -614,6 +616,7 @@ BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
+BRCMF_FW_DEF(4359, "brcmfmac4359-sdio");
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
@@ -636,6 +639,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
+ BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012)
};
@@ -1935,6 +1939,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
+ continue;
}
bus->sdcnt.rx_readahead_cnt++;
if (rd->len != roundup(rd_new.len, 16)) {
@@ -4205,6 +4210,19 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
&err);
break;
+ case SDIO_DEVICE_ID_BROADCOM_4359:
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
+ CY_4359_F2_WATERMARK);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+ CY_4359_F2_WATERMARK, &err);
+ devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+ &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+ &err);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ CY_4359_F1_MESBUSYCTRL, &err);
+ break;
default:
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
DEFAULT_F2_WATERMARK, &err);
@@ -4225,6 +4243,12 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
}
if (err == 0) {
+ /* Assign bus interface call back */
+ sdiod->bus_if->dev = sdiod->dev;
+ sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
+ sdiod->bus_if->chip = bus->ci->chip;
+ sdiod->bus_if->chiprev = bus->ci->chiprev;
+
/* Allow full data communication using DPC from now on. */
brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
@@ -4241,12 +4265,6 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
sdio_release_host(sdiod->func1);
- /* Assign bus interface call back */
- sdiod->bus_if->dev = sdiod->dev;
- sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
- sdiod->bus_if->chip = bus->ci->chip;
- sdiod->bus_if->chiprev = bus->ci->chiprev;
-
err = brcmf_alloc(sdiod->dev, sdiod->settings);
if (err) {
brcmf_err("brcmf_alloc failed\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 0bd47c119dae..163fd664780a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -178,7 +178,6 @@ struct brcmf_sdio_dev {
bool sd_irq_requested;
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
- bool irq_wake; /* irq wake enable flags */
bool sg_support;
uint max_request_size;
ushort max_segment_count;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 06f3c01f10b3..575ed19e9195 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -430,6 +430,7 @@ fail:
usb_free_urb(req->urb);
list_del(q->next);
}
+ kfree(reqs);
return NULL;
}
@@ -1348,7 +1349,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail;
}
- desc = &intf->altsetting[0].desc;
+ desc = &intf->cur_altsetting->desc;
if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
(desc->bInterfaceSubClass != 2) ||
(desc->bInterfaceProtocol != 0xff)) {
@@ -1361,7 +1362,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
num_of_eps = desc->bNumEndpoints;
for (ep = 0; ep < num_of_eps; ep++) {
- endpoint = &intf->altsetting[0].endpoint[ep].desc;
+ endpoint = &intf->cur_altsetting->endpoint[ep].desc;
endpoint_num = usb_endpoint_num(endpoint);
if (!usb_endpoint_xfer_bulk(endpoint))
continue;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 3f09d89ba922..7f2c15c799d2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -5408,7 +5408,7 @@ int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel)
{
u16 chspec = ch20mhz_chspec(channel);
- if (channel < 0 || channel > MAXCHANNEL)
+ if (channel > MAXCHANNEL)
return -EINVAL;
if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec))
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index f43c06569ea1..c4c8f1b62e1e 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
case AIROGVLIST: ridcode = RID_APLIST; break;
case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
- case AIROGWEPKNV: ridcode = RID_WEP_PERM;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
+ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
+ case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
case AIROGSTAT: ridcode = RID_STATUS; break;
case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
case AIROGSTATSC32: ridcode = RID_STATS; break;
@@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
return -EINVAL;
}
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+ if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+
+ if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index c4c83ab60cbc..536cd729c086 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -3206,8 +3206,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
}
}
-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
+static void ipw2100_irq_tasklet(unsigned long data)
{
+ struct ipw2100_priv *priv = (struct ipw2100_priv *)data;
struct net_device *dev = priv->net_dev;
unsigned long flags;
u32 inta, tmp;
@@ -5833,7 +5834,7 @@ static int ipw2100_close(struct net_device *dev)
/*
* TODO: Fix this function... its just wrong
*/
-static void ipw2100_tx_timeout(struct net_device *dev)
+static void ipw2100_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6006,7 +6007,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
spin_unlock_irqrestore(&priv->low_lock, flags);
}
-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
+static void ipw2100_irq_tasklet(unsigned long data);
static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_open = ipw2100_open,
@@ -6136,7 +6137,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ tasklet_init(&priv->irq_tasklet,
ipw2100_irq_tasklet, (unsigned long)priv);
/* NOTE: We do not start the deferred work for status checks yet */
@@ -6167,7 +6168,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
ioaddr = pci_iomap(pci_dev, 0, 0);
if (!ioaddr) {
printk(KERN_WARNING DRV_NAME
- "Error calling ioremap_nocache.\n");
+ "Error calling ioremap.\n");
err = -EIO;
goto fail;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 31e43fc1d12b..5ef6f87a48ac 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1945,8 +1945,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
-static void ipw_irq_tasklet(struct ipw_priv *priv)
+static void ipw_irq_tasklet(unsigned long data)
{
+ struct ipw_priv *priv = (struct ipw_priv *)data;
u32 inta, inta_mask, handled = 0;
unsigned long flags;
int rc = 0;
@@ -10677,7 +10678,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ tasklet_init(&priv->irq_tasklet,
ipw_irq_tasklet, (unsigned long)priv);
return ret;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 1168055da182..206b43b9dff8 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1376,8 +1376,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
}
static void
-il3945_irq_tasklet(struct il_priv *il)
+il3945_irq_tasklet(unsigned long data)
{
+ struct il_priv *il = (struct il_priv *)data;
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -3401,7 +3402,7 @@ il3945_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_init(&il->irq_tasklet,
- (void (*)(unsigned long))il3945_irq_tasklet,
+ il3945_irq_tasklet,
(unsigned long)il);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 3664f56f8cbd..da6d4202611c 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
+#include <linux/units.h>
#include <net/mac80211.h>
@@ -4343,8 +4344,9 @@ il4965_synchronize_irq(struct il_priv *il)
}
static void
-il4965_irq_tasklet(struct il_priv *il)
+il4965_irq_tasklet(unsigned long data)
{
+ struct il_priv *il = (struct il_priv *)data;
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -6237,7 +6239,7 @@ il4965_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_init(&il->irq_tasklet,
- (void (*)(unsigned long))il4965_irq_tasklet,
+ il4965_irq_tasklet,
(unsigned long)il);
}
@@ -6467,7 +6469,7 @@ il4965_set_hw_params(struct il_priv *il)
il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
il->hw_params.ct_kill_threshold =
- CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+ celsius_to_kelvin(CT_KILL_THRESHOLD_LEGACY);
il->hw_params.sens = &il4965_sensitivity;
il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index 32699b6a68c2..34d0579132ce 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/units.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -1104,7 +1105,7 @@ il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
/* get current temperature (Celsius) */
current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
- current_temp = KELVIN_TO_CELSIUS(current_temp);
+ current_temp = kelvin_to_celsius(current_temp);
/* select thermal txpower adjustment params, based on channel group
* (same frequency group used for mimo txatten adjustment) */
@@ -1610,8 +1611,8 @@ il4965_hw_get_temperature(struct il_priv *il)
temperature =
(temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
- D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
- KELVIN_TO_CELSIUS(temperature));
+ D_TEMP("Calibrated temperature: %dK, %ldC\n", temperature,
+ kelvin_to_celsius(temperature));
return temperature;
}
@@ -1670,12 +1671,12 @@ il4965_temperature_calib(struct il_priv *il)
if (il->temperature != temp) {
if (il->temperature)
- D_TEMP("Temperature changed " "from %dC to %dC\n",
- KELVIN_TO_CELSIUS(il->temperature),
- KELVIN_TO_CELSIUS(temp));
+ D_TEMP("Temperature changed " "from %ldC to %ldC\n",
+ kelvin_to_celsius(il->temperature),
+ kelvin_to_celsius(temp));
else
- D_TEMP("Temperature " "initialized to %dC\n",
- KELVIN_TO_CELSIUS(temp));
+ D_TEMP("Temperature " "initialized to %ldC\n",
+ kelvin_to_celsius(temp));
}
il->temperature = temp;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index d966b29b45ee..348c17ce72f5 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -699,7 +699,7 @@ il_eeprom_init(struct il_priv *il)
u32 gp = _il_rd(il, CSR_EEPROM_GP);
int sz;
int ret;
- u16 addr;
+ int addr;
/* allocate eeprom */
sz = il->cfg->eeprom_size;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index e7fb8e6bb9e7..bc9cd7e5ccb8 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -779,9 +779,6 @@ struct il_sensitivity_ranges {
u16 nrg_th_cca;
};
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
/**
* struct il_hw_params
* @bcast_id: f/w broadcast station ID
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index b92255b91b72..8a4579bb10d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -77,8 +77,7 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -104,8 +103,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index 2b1ae0cecc83..7140a5f3ea8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -103,8 +103,7 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl2000_2bgn_cfg = {
@@ -131,8 +130,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -153,8 +151,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -181,8 +178,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 56dc335a788c..a22a830019c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -92,6 +92,7 @@
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
+#define IWL_22000_SOSNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -199,7 +200,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
IWL_DEVICE_22000_COMMON, \
.trans.device_family = IWL_DEVICE_FAMILY_22000, \
.trans.base_params = &iwl_22000_base_params, \
- .trans.csr = &iwl_csr_v1, \
.gp2_reg_addr = 0xa02c68, \
.mon_dram_regs = { \
.write_ptr = { \
@@ -217,7 +217,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_AX210, \
.trans.base_params = &iwl_ax210_base_params, \
- .trans.csr = &iwl_csr_v1, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 512, \
@@ -236,24 +235,16 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
-const struct iwl_cfg iwl22000_2ac_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22500,
-};
-
-const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
- IWL_DEVICE_22500,
- .cdb = true,
-};
-
-const struct iwl_cfg iwl22000_2ac_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_JF_FW_PRE,
- IWL_DEVICE_22500,
-};
+/*
+ * If the device doesn't support HE, no need to have that many buffers.
+ * 22000 devices can split multiple frames into a single RB, so fewer are
+ * needed; AX210 cannot (but use smaller RBs by default) - these sizes
+ * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with
+ * additional overhead to account for processing time.
+ */
+#define IWL_NUM_RBDS_NON_HE 512
+#define IWL_NUM_RBDS_22000_HE 2048
+#define IWL_NUM_RBDS_AX210_HE 4096
const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
@@ -266,6 +257,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.tx_with_siso_diversity = true,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
@@ -278,6 +270,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
@@ -290,6 +283,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
@@ -302,6 +296,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
@@ -314,6 +309,7 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
@@ -326,6 +322,7 @@ const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
@@ -338,6 +335,7 @@ const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
@@ -350,6 +348,7 @@ const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax200_cfg_cc = {
@@ -363,6 +362,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650x_2ax_cfg = {
@@ -376,6 +376,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650w_2ax_cfg = {
@@ -389,6 +390,7 @@ const struct iwl_cfg killer1650w_2ax_cfg = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
/*
@@ -400,48 +402,56 @@ const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9461",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9462",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9461",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9462",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
@@ -454,6 +464,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -468,6 +479,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
@@ -482,6 +494,7 @@ const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -496,6 +509,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -510,6 +524,7 @@ const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -524,6 +539,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -538,18 +554,21 @@ const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
@@ -562,6 +581,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
@@ -574,6 +594,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
@@ -586,6 +607,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
@@ -598,6 +620,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_jf = {
@@ -610,6 +633,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_jf = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
@@ -622,6 +646,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
@@ -634,6 +659,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
@@ -646,18 +672,21 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
@@ -665,6 +694,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
@@ -672,12 +702,23 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
.name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
+ .fw_name_pre = IWL_22000_SOSNJ_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index aab4495c6085..3591336dc644 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -75,8 +75,7 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -125,7 +124,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
- .trans.csr = &iwl_csr_v1,
};
#define IWL_DEVICE_5150 \
@@ -141,8 +139,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index 39ea81903dbe..b4a8a6804c39 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -124,8 +124,7 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -179,8 +178,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -216,8 +214,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -272,8 +269,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
.trans.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -306,8 +302,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -333,8 +328,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
@@ -361,7 +355,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.eeprom_params = &iwl6000_eeprom_params,
.ht_params = &iwl6000_ht_params,
.led_mode = IWL_LED_BLINK,
- .trans.csr = &iwl_csr_v1,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index deb520aeb3f8..b72993e07fc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -154,8 +154,7 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .dccm_offset = IWL7000_DCCM_OFFSET, \
- .trans.csr = &iwl_csr_v1
+ .dccm_offset = IWL7000_DCCM_OFFSET
#define IWL_DEVICE_7000 \
IWL_DEVICE_7000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index b3cc477140c0..280d84fa5cb1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -151,8 +151,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.apmg_not_supported = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000, \
- .trans.csr = &iwl_csr_v1
+ .min_umac_error_event_table = 0x800000
#define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e9155b9b5ee4..379ea788e424 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -138,13 +138,13 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \
.trans.mq_rx_supported = true, \
+ .num_rbds = 512, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.trans.rf_id = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
.min_umac_error_event_table = 0x800000, \
- .trans.csr = &iwl_csr_v1, \
.d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 92 * 1024, \
.ht_params = &iwl9000_ht_params, \
@@ -171,6 +171,12 @@ static const struct iwl_tt_params iwl9000_tt_params = {
}, \
}
+const struct iwl_cfg_trans_params iwl9000_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+};
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
@@ -184,8 +190,10 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
IWL_DEVICE_9000,
};
+const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
+const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
+
const struct iwl_cfg iwl9260_2ac_160_cfg = {
- .name = "Intel(R) Wireless-AC 9260 160MHz",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index be5ef4c3e9d0..8d8380026180 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -237,11 +237,6 @@ struct iwl_sensitivity_ranges {
u16 nrg_th_cca;
};
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
/******************************************************************************
*
* Functions implemented in core module which are forward declared here
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index dc3f197f94d9..d42bc46fe566 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -10,6 +10,8 @@
*
*****************************************************************************/
+#include <linux/units.h>
+
/*
* DVM device-specific data & functions
*/
@@ -345,7 +347,7 @@ static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
- s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
+ s32 threshold = (s32)celsius_to_kelvin(CT_KILL_THRESHOLD_LEGACY) -
iwl_temp_calib_to_offset(priv);
priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
@@ -381,7 +383,7 @@ static void iwl5150_temperature(struct iwl_priv *priv)
vt = le32_to_cpu(priv->statistics.common.temperature);
vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
/* now vt hold the temperature in Kelvin */
- priv->temperature = KELVIN_TO_CELSIUS(vt);
+ priv->temperature = kelvin_to_celsius(vt);
iwl_tt_handler(priv);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 4f2789bb3b5b..598ee7315558 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1255,7 +1255,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/
hw = iwl_alloc_all();
if (!hw) {
- pr_err("%s: Cannot allocate network device\n", cfg->name);
+ pr_err("%s: Cannot allocate network device\n", trans->name);
goto out;
}
@@ -1390,7 +1390,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* 2. Read REV register
***********************/
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, priv->trans->hw_rev);
+ priv->trans->name, priv->trans->hw_rev);
if (iwl_trans_start_hw(priv->trans))
goto out_free_hw;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index cd73fc5cfcbb..fd454836adbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u8 hdr_len;
@@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
if (unlikely(!dev_cmd))
goto drop_unlock_priv;
- memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = REPLY_TX;
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 40fe2d667622..48d375a86d86 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
- int i, n_profiles, tbl_rev;
- int ret = 0;
+ int i, n_profiles, tbl_rev, pos;
+ int ret = 0;
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
if (IS_ERR(data))
@@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
- for (i = 0; i < n_profiles; i++) {
- /* the tables start at element 3 */
- int pos = 3;
+ /* the tables start at element 3 */
+ pos = 3;
+ for (i = 0; i < n_profiles; i++) {
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 7a0fe5adefa5..a0d6802c2715 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -240,7 +240,7 @@ enum iwl_tof_responder_cfg_flags {
};
/**
- * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * struct iwl_tof_responder_config_cmd_v6 - ToF AP mode (for debug)
* @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
* @responder_cfg_flags: &iwl_tof_responder_cfg_flags
* @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth
@@ -258,7 +258,7 @@ enum iwl_tof_responder_cfg_flags {
* @bssid: Current AP BSSID
* @reserved2: reserved
*/
-struct iwl_tof_responder_config_cmd {
+struct iwl_tof_responder_config_cmd_v6 {
__le32 cmd_valid_fields;
__le32 responder_cfg_flags;
u8 bandwidth;
@@ -274,6 +274,42 @@ struct iwl_tof_responder_config_cmd {
__le16 reserved2;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @reserved2: reserved
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/**
@@ -403,7 +439,7 @@ enum iwl_initiator_ap_flags {
};
/**
- * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * struct iwl_tof_range_req_ap_entry_v3 - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @bandwidth: AP bandwidth. One of iwl_tof_bandwidth.
@@ -420,7 +456,7 @@ enum iwl_initiator_ap_flags {
* @reserved: For alignment and future use
* @tsf_delta: not in use
*/
-struct iwl_tof_range_req_ap_entry {
+struct iwl_tof_range_req_ap_entry_v3 {
__le32 initiator_ap_flags;
u8 channel_num;
u8 bandwidth;
@@ -435,6 +471,72 @@ struct iwl_tof_range_req_ap_entry {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */
/**
+ * enum iwl_location_frame_format - location frame formats
+ * @IWL_LOCATION_FRAME_FORMAT_LEGACY: legacy
+ * @IWL_LOCATION_FRAME_FORMAT_HT: HT
+ * @IWL_LOCATION_FRAME_FORMAT_VHT: VHT
+ * @IWL_LOCATION_FRAME_FORMAT_HE: HE
+ */
+enum iwl_location_frame_format {
+ IWL_LOCATION_FRAME_FORMAT_LEGACY,
+ IWL_LOCATION_FRAME_FORMAT_HT,
+ IWL_LOCATION_FRAME_FORMAT_VHT,
+ IWL_LOCATION_FRAME_FORMAT_HE,
+};
+
+/**
+ * enum iwl_location_bw - location bandwidth selection
+ * @IWL_LOCATION_BW_20MHZ: 20MHz
+ * @IWL_LOCATION_BW_40MHZ: 40MHz
+ * @IWL_LOCATION_BW_80MHZ: 80MHz
+ */
+enum iwl_location_bw {
+ IWL_LOCATION_BW_20MHZ,
+ IWL_LOCATION_BW_40MHZ,
+ IWL_LOCATION_BW_80MHZ,
+};
+
+#define HLTK_11AZ_LEN 32
+#define TK_11AZ_LEN 32
+
+#define LOCATION_BW_POS 4
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @reserved: For alignment and future use
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ */
+struct iwl_tof_range_req_ap_entry {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ __le16 reserved;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -536,6 +638,38 @@ struct iwl_tof_range_req_cmd_v5 {
/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
/**
+ * struct iwl_tof_range_req_cmd_v7 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v7 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
+
+/**
* struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
@@ -565,7 +699,7 @@ struct iwl_tof_range_req_cmd {
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/*
* enum iwl_tof_range_request_status - status of the sent request
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 408798f351c6..1b2b5fa56e19 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -922,21 +922,6 @@ struct iwl_scan_probe_params_v4 {
#define SCAN_MAX_NUM_CHANS_V3 67
/**
- * struct iwl_scan_channel_params_v3
- * @flags: channel flags &enum iwl_scan_channel_flags
- * @count: num of channels in scan request
- * @reserved: for future use and alignment
- * @channel_config: array of explicit channel configurations
- * for 2.4Ghz and 5.2Ghz bands
- */
-struct iwl_scan_channel_params_v3 {
- u8 flags;
- u8 count;
- __le16 reserved;
- struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
-} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_3 */
-
-/**
* struct iwl_scan_channel_params_v4
* @flags: channel flags &enum iwl_scan_channel_flags
* @count: num of channels in scan request
@@ -1011,20 +996,6 @@ struct iwl_scan_periodic_parms_v1 {
} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
/**
- * struct iwl_scan_req_params_v11
- * @general_params: &struct iwl_scan_general_params_v10
- * @channel_params: &struct iwl_scan_channel_params_v3
- * @periodic_params: &struct iwl_scan_periodic_parms_v1
- * @probe_params: &struct iwl_scan_probe_params_v3
- */
-struct iwl_scan_req_params_v11 {
- struct iwl_scan_general_params_v10 general_params;
- struct iwl_scan_channel_params_v3 channel_params;
- struct iwl_scan_periodic_parms_v1 periodic_params;
- struct iwl_scan_probe_params_v3 probe_params;
-} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_11 */
-
-/**
* struct iwl_scan_req_params_v12
* @general_params: &struct iwl_scan_general_params_v10
* @channel_params: &struct iwl_scan_channel_params_v4
@@ -1053,18 +1024,6 @@ struct iwl_scan_req_params_v13 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
/**
- * struct iwl_scan_req_umac_v11
- * @uid: scan id, &enum iwl_umac_scan_uid_offsets
- * @ooc_priority: out of channel priority - &enum iwl_scan_priority
- * @scan_params: scan parameters
- */
-struct iwl_scan_req_umac_v11 {
- __le32 uid;
- __le32 ooc_priority;
- struct iwl_scan_req_params_v11 scan_params;
-} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_11 */
-
-/**
* struct iwl_scan_req_umac_v12
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f89a9e16a8c0..f1d1fe96fecc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -813,6 +813,7 @@ enum iwl_mac_beacon_flags {
IWL_MAC_BEACON_ANT_A = BIT(9),
IWL_MAC_BEACON_ANT_B = BIT(10),
IWL_MAC_BEACON_ANT_C = BIT(11),
+ IWL_MAC_BEACON_FILS = BIT(12),
};
/**
@@ -820,6 +821,7 @@ enum iwl_mac_beacon_flags {
* @byte_cnt: byte count of the beacon frame.
* @flags: least significant byte for rate code. The most significant byte
* is &enum iwl_mac_beacon_flags.
+ * @short_ssid: Short SSID
* @reserved: reserved
* @template_id: currently equal to the mac context id of the coresponding mac.
* @tim_idx: the offset of the tim IE in the beacon
@@ -831,14 +833,15 @@ enum iwl_mac_beacon_flags {
struct iwl_mac_beacon_cmd {
__le16 byte_cnt;
__le16 flags;
- __le64 reserved;
+ __le32 short_ssid;
+ __le32 reserved;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[0];
-} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10 */
struct iwl_beacon_notif {
struct iwl_mvm_tx_resp beacon_notify_hdr;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index ed90dd104366..91df1ee25dd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -929,7 +929,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+ strncpy(dump_info->dev_human_readable, fwrt->trans->name,
sizeof(dump_info->dev_human_readable) - 1);
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1);
@@ -1230,13 +1230,15 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
iter->lmac = 0;
}
- if (!iter->internal_txf)
+ if (!iter->internal_txf) {
for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
iter->fifo_size =
cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
return true;
}
+ iter->fifo--;
+ }
iter->internal_txf = 1;
@@ -2351,9 +2353,6 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
u32 occur, delay;
unsigned long idx;
- if (test_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
- return -EBUSY;
-
if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
tp_id);
@@ -2669,12 +2668,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
{
int ret = 0;
- /* if the FW crashed or not debug monitor cfg was given, there is
- * no point in changing the recording state
- */
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
- (!fwrt->trans->dbg.dest_tlv &&
- fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return 0;
if (fw_has_capa(&fwrt->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index ca3b1a461dea..89f74116569d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -320,31 +320,6 @@ out:
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
-static ssize_t iwl_dbgfs_fw_dbg_domain_write(struct iwl_fw_runtime *fwrt,
- char *buf, size_t count)
-{
- u32 new_domain;
- int ret;
-
- if (!iwl_trans_fw_running(fwrt->trans))
- return -EIO;
-
- ret = kstrtou32(buf, 0, &new_domain);
- if (ret)
- return ret;
-
- if (new_domain != fwrt->trans->dbg.domains_bitmap) {
- ret = iwl_dbg_tlv_gen_active_trigs(fwrt, new_domain);
- if (ret)
- return ret;
-
- iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
- NULL);
- }
-
- return count;
-}
-
static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
size_t size, char *buf)
{
@@ -352,7 +327,7 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
fwrt->trans->dbg.domains_bitmap);
}
-FWRT_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_domain, 20);
+FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
@@ -360,5 +335,5 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
- FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0600);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 994880a83652..90ca5f929cf9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -251,7 +251,7 @@ struct iwl_fw_dbg {
struct iwl_fw {
u32 ucode_ver;
- char fw_version[ETHTOOL_FWVERS_LEN];
+ char fw_version[64];
/* ucode images */
struct fw_img img[IWL_UCODE_TYPE_MAX];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index c24575ff0e54..f8c6ed823bc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -69,7 +69,7 @@
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
-#define IWL_FW_DBG_DOMAIN IWL_FW_INI_DOMAIN_ALWAYS_ON
+#define IWL_FW_DBG_DOMAIN IWL_TRANS_FW_DBG_DOMAIN(fwrt->trans)
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
@@ -130,14 +130,6 @@ struct iwl_txf_iter_data {
};
/**
- * enum iwl_fw_runtime_status - fw runtime status flags
- * @STATUS_GEN_ACTIVE_TRIGS: generating active trigger list
- */
-enum iwl_fw_runtime_status {
- STATUS_GEN_ACTIVE_TRIGS,
-};
-
-/**
* struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image
* @cfg: NIC configuration
@@ -150,7 +142,6 @@ enum iwl_fw_runtime_status {
* @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
- * @status: &enum iwl_fw_runtime_status
* @dump: debug dump data
*/
struct iwl_fw_runtime {
@@ -171,8 +162,6 @@ struct iwl_fw_runtime {
/* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg;
- unsigned long status;
-
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 317eac066082..be6a2bf9ce74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -285,52 +285,6 @@ struct iwl_pwr_tx_backoff {
};
/**
- * struct iwl_csr_params
- *
- * @flag_sw_reset: reset the device
- * @flag_mac_clock_ready:
- * Indicates MAC (ucode processor, etc.) is powered up and can run.
- * Internal resources are accessible.
- * NOTE: This does not indicate that the processor is actually running.
- * NOTE: This does not indicate that device has completed
- * init or post-power-down restore of internal SRAM memory.
- * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
- * SRAM is restored and uCode is in normal operation mode.
- * This note is relevant only for pre 5xxx devices.
- * NOTE: After device reset, this bit remains "0" until host sets
- * INIT_DONE
- * @flag_init_done: Host sets this to put device into fully operational
- * D0 power mode. Host resets this after SW_RESET to put device into
- * low power mode.
- * @flag_mac_access_req: Host sets this to request and maintain MAC wakeup,
- * to allow host access to device-internal resources. Host must wait for
- * mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device
- * registers.
- * @flag_val_mac_access_en: mac access is enabled
- * @flag_master_dis: disable master
- * @flag_stop_master: stop master
- * @addr_sw_reset: address for resetting the device
- * @mac_addr0_otp: first part of MAC address from OTP
- * @mac_addr1_otp: second part of MAC address from OTP
- * @mac_addr0_strap: first part of MAC address from strap
- * @mac_addr1_strap: second part of MAC address from strap
- */
-struct iwl_csr_params {
- u8 flag_sw_reset;
- u8 flag_mac_clock_ready;
- u8 flag_init_done;
- u8 flag_mac_access_req;
- u8 flag_val_mac_access_en;
- u8 flag_master_dis;
- u8 flag_stop_master;
- u8 addr_sw_reset;
- u32 mac_addr0_otp;
- u32 mac_addr1_otp;
- u32 mac_addr0_strap;
- u32 mac_addr1_strap;
-};
-
-/**
* struct iwl_cfg_trans - information needed to start the trans
*
* These values cannot be changed when multiple configs are used for a
@@ -348,7 +302,6 @@ struct iwl_csr_params {
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
- const struct iwl_csr_params *csr;
enum iwl_device_family device_family;
u32 umac_prph_offset;
u32 rf_id:1,
@@ -431,6 +384,8 @@ struct iwl_fw_mon_regs {
* @uhb_supported: ultra high band channels supported
* @min_256_ba_txq_size: minimum number of slots required in a TX queue which
* supports 256 BA aggregation
+ * @num_rbds: number of receive buffer descriptors to use
+ * (only used for multi-queue capable devices)
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -485,6 +440,7 @@ struct iwl_cfg {
u8 max_vht_ampdu_exponent;
u8 ucode_api_max;
u8 ucode_api_min;
+ u16 num_rbds;
u32 min_umac_error_event_table;
u32 extra_phy_cfg_flags;
u32 d3_debug_data_base_addr;
@@ -496,12 +452,22 @@ struct iwl_cfg {
const struct iwl_fw_mon_regs mon_smem_regs;
};
-extern const struct iwl_csr_params iwl_csr_v1;
-extern const struct iwl_csr_params iwl_csr_v2;
+#define IWL_CFG_ANY (~0)
+
+struct iwl_dev_info {
+ u16 device;
+ u16 subdevice;
+ const struct iwl_cfg *cfg;
+ const char *name;
+};
/*
* This list declares the config structures for all devices.
*/
+extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
+extern const char iwl9260_160_name[];
+extern const char iwl9560_160_name[];
+
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -595,9 +561,6 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
-extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
-extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
@@ -636,6 +599,7 @@ extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
+extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index 5ed07e37e3ee..eeaa8cbdddce 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,12 +64,12 @@
* the init done for driver command that configures several system modes
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
- * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ * @IWL_CTXT_INFO_RB_CB_SIZE: mask of the RBD Cyclic Buffer Size
* exponent, the actual size is 2**value, valid sizes are 8-2048.
* The value is four bits long. Maximum valid exponent is 12
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
* default is short format - not supported by the driver)
- * @IWL_CTXT_INFO_RB_SIZE_POS: RB size position
+ * @IWL_CTXT_INFO_RB_SIZE: RB size mask
* (values are IWL_CTXT_INFO_RB_SIZE_*K)
* @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
* @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
@@ -83,12 +83,12 @@
* @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
*/
enum iwl_context_info_flags {
- IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
- IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
- IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
- IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
- IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
- IWL_CTXT_INFO_RB_SIZE_POS = 9,
+ IWL_CTXT_INFO_AUTO_FUNC_INIT = 0x0001,
+ IWL_CTXT_INFO_EARLY_DEBUG = 0x0002,
+ IWL_CTXT_INFO_ENABLE_CDMP = 0x0004,
+ IWL_CTXT_INFO_RB_CB_SIZE = 0x00f0,
+ IWL_CTXT_INFO_TFD_FORMAT_LONG = 0x0100,
+ IWL_CTXT_INFO_RB_SIZE = 0x1e00,
IWL_CTXT_INFO_RB_SIZE_1K = 0x1,
IWL_CTXT_INFO_RB_SIZE_2K = 0x2,
IWL_CTXT_INFO_RB_SIZE_4K = 0x4,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 92d9898ab7c2..cb9e8e189a1a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -256,6 +256,7 @@
/* RESET */
#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
@@ -278,11 +279,35 @@
* 4: GOING_TO_SLEEP
* Indicates MAC is entering a power-saving sleep power-down.
* Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that device has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
*/
+#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
-#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
+#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
#define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
@@ -379,7 +404,7 @@ enum {
/* CSR GIO */
-#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
+#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002)
/*
* UCODE-DRIVER GP (general purpose) mailbox register 1
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index f266647dc08c..4208e720f6e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -290,10 +290,19 @@ void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
+ u32 domain = le32_to_cpu(hdr->domain);
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
int ret;
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & trans->dbg.domains_bitmap)) {
+ IWL_DEBUG_FW(trans,
+ "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
+ domain, trans->dbg.domains_bitmap);
+ return;
+ }
+
if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
goto out_err;
@@ -480,7 +489,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
if (!frag || frag->size || !pages)
return -EIO;
- while (pages) {
+ /*
+ * We try to allocate as many pages as we can, starting with
+ * the requested amount and going down until we can allocate
+ * something. Because of DIV_ROUND_UP(), pages will never go
+ * down to 0 and stop the loop, so stop when pages reaches 1,
+ * which is too small anyway.
+ */
+ while (pages > 1) {
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
&physical,
GFP_KERNEL | __GFP_NOWARN);
@@ -660,7 +676,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
list_for_each_entry(node, hcmd_list, list) {
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
- u32 domain = le32_to_cpu(hcmd->hdr.domain);
u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
struct iwl_host_cmd cmd = {
.id = WIDE_ID(hcmd_data->group, hcmd_data->id),
@@ -668,10 +683,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
.data = { hcmd_data->data, },
};
- if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
- !(domain & fwrt->trans->dbg.domains_bitmap))
- continue;
-
iwl_trans_send_cmd(fwrt->trans, &cmd);
}
}
@@ -891,55 +902,17 @@ static void
iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
struct iwl_dbg_tlv_time_point_data *tp)
{
- struct iwl_dbg_tlv_node *node, *tmp;
+ struct iwl_dbg_tlv_node *node;
struct list_head *trig_list = &tp->trig_list;
struct list_head *active_trig_list = &tp->active_trig_list;
- list_for_each_entry_safe(node, tmp, active_trig_list, list) {
- list_del(&node->list);
- kfree(node);
- }
-
list_for_each_entry(node, trig_list, list) {
struct iwl_ucode_tlv *tlv = &node->tlv;
- struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
- u32 domain = le32_to_cpu(trig->hdr.domain);
-
- if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
- !(domain & fwrt->trans->dbg.domains_bitmap))
- continue;
iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
}
}
-int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain)
-{
- int i;
-
- if (test_and_set_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
- return -EBUSY;
-
- iwl_fw_flush_dumps(fwrt);
-
- fwrt->trans->dbg.domains_bitmap = new_domain;
-
- IWL_DEBUG_FW(fwrt,
- "WRT: Generating active triggers list, domain 0x%x\n",
- fwrt->trans->dbg.domains_bitmap);
-
- for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
- struct iwl_dbg_tlv_time_point_data *tp =
- &fwrt->trans->dbg.time_point[i];
-
- iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
- }
-
- clear_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status);
-
- return 0;
-}
-
static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
union iwl_dbg_tlv_tp_data *tp_data,
@@ -1013,7 +986,16 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
- iwl_dbg_tlv_gen_active_trigs(fwrt, IWL_FW_DBG_DOMAIN);
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Generating active triggers list, domain 0x%x\n",
+ fwrt->trans->dbg.domains_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &fwrt->trans->dbg.time_point[i];
+
+ iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
+ }
*ini_dest = IWL_FW_INI_LOCATION_INVALID;
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index f18946872569..1360676b3b21 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -105,7 +105,6 @@ void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
-int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain);
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
#endif /* __iwl_dbg_tlv_h__*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4096ccf58b07..2d1cb4647c3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -493,6 +493,16 @@ static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
}
}
+static const char *iwl_reduced_fw_name(struct iwl_drv *drv)
+{
+ const char *name = drv->firmware_name;
+
+ if (strncmp(name, "iwlwifi-", 8) == 0)
+ name += 8;
+
+ return name;
+}
+
static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
@@ -551,12 +561,12 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u.%u%s",
+ "%u.%u.%u.%u%s %s",
IWL_UCODE_MAJOR(drv->fw.ucode_ver),
IWL_UCODE_MINOR(drv->fw.ucode_ver),
IWL_UCODE_API(drv->fw.ucode_ver),
IWL_UCODE_SERIAL(drv->fw.ucode_ver),
- buildstr);
+ buildstr, iwl_reduced_fw_name(drv));
/* Verify size of file vs. image size info in file's header */
@@ -636,12 +646,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u.%u%s",
+ "%u.%u.%u.%u%s %s",
IWL_UCODE_MAJOR(drv->fw.ucode_ver),
IWL_UCODE_MINOR(drv->fw.ucode_ver),
IWL_UCODE_API(drv->fw.ucode_ver),
IWL_UCODE_SERIAL(drv->fw.ucode_ver),
- buildstr);
+ buildstr, iwl_reduced_fw_name(drv));
data = ucode->data;
@@ -895,11 +905,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (major >= 35)
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%08x.%u", major, minor, local_comp);
+ "%u.%08x.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
else
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u", major, minor, local_comp);
+ "%u.%u.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
@@ -1647,6 +1659,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
#endif
+ drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
+
ret = iwl_request_firmware(drv, true);
if (ret) {
IWL_ERR(trans, "Couldn't request the fw\n");
@@ -1817,9 +1831,6 @@ MODULE_PARM_DESC(antenna_coupling,
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
MODULE_PARM_DESC(nvm_file, "NVM file name");
-module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
-MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
-
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 8836f85afe85..bf673ce5f183 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -611,10 +611,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*/
#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
-#define MQ_RX_TABLE_SIZE 512
-#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1)
-#define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1)
-#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
+#define RX_POOL_SIZE(rbds) ((rbds) - 1 + \
IWL_MAX_RX_HW_QUEUES * \
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
/* cb size is the exponent */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 1b7414bf7bef..2139f0b8f2bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -70,36 +70,6 @@
#include "iwl-prph.h"
#include "iwl-fh.h"
-const struct iwl_csr_params iwl_csr_v1 = {
- .flag_mac_clock_ready = 0,
- .flag_val_mac_access_en = 0,
- .flag_init_done = 2,
- .flag_mac_access_req = 3,
- .flag_sw_reset = 7,
- .flag_master_dis = 8,
- .flag_stop_master = 9,
- .addr_sw_reset = CSR_BASE + 0x020,
- .mac_addr0_otp = 0x380,
- .mac_addr1_otp = 0x384,
- .mac_addr0_strap = 0x388,
- .mac_addr1_strap = 0x38C
-};
-
-const struct iwl_csr_params iwl_csr_v2 = {
- .flag_init_done = 6,
- .flag_mac_clock_ready = 20,
- .flag_val_mac_access_en = 20,
- .flag_mac_access_req = 21,
- .flag_master_dis = 28,
- .flag_stop_master = 29,
- .flag_sw_reset = 31,
- .addr_sw_reset = CSR_BASE + 0x024,
- .mac_addr0_otp = 0x30,
- .mac_addr1_otp = 0x34,
- .mac_addr0_strap = 0x38,
- .mac_addr1_strap = 0x3C
-};
-
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
@@ -506,8 +476,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(cfg_trans->csr->flag_init_done));
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -518,8 +487,8 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* and accesses to uCode SRAM.
*/
err = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(cfg_trans->csr->flag_mac_clock_ready),
- BIT(cfg_trans->csr->flag_mac_clock_ready),
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (err < 0)
IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index ebea3f308b5d..82e5cac23d8d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
* @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @lar_disable: disable LAR (regulatory), default = 0
* @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
@@ -136,7 +135,6 @@ struct iwl_mod_params {
int antenna_coupling;
char *nvm_file;
u32 uapsd_disable;
- bool lar_disable;
bool fw_monitor;
bool disable_11ac;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 1e240a2a8329..bab0999f002c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_DC_HIGH = BIT(12),
};
+/**
+ * enum iwl_reg_capa_flags - global flags applied for the whole regulatory
+ * domain.
+ * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 2.4Ghz band is allowed.
+ * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 5Ghz band is allowed.
+ * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
+ */
+enum iwl_reg_capa_flags {
+ REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
+ REG_CAPA_BF_CCD_HIGH_BAND = BIT(1),
+ REG_CAPA_160MHZ_ALLOWED = BIT(2),
+ REG_CAPA_80MHZ_ALLOWED = BIT(3),
+ REG_CAPA_MCS_8_ALLOWED = BIT(4),
+ REG_CAPA_MCS_9_ALLOWED = BIT(5),
+ REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
+ REG_CAPA_DC_HIGH_ENABLED = BIT(9),
+};
+
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@@ -801,12 +829,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
struct iwl_nvm_data *data)
{
- __le32 mac_addr0 =
- cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr0_strap));
- __le32 mac_addr1 =
- cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr1_strap));
+ __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
+ __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
/*
@@ -816,10 +840,8 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
if (is_valid_ether_addr(data->hw_addr))
return;
- mac_addr0 = cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr0_otp));
- mac_addr1 = cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr1_otp));
+ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
@@ -939,10 +961,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
+ u8 tx_chains, u8 rx_chains)
{
struct iwl_nvm_data *data;
bool lar_enabled;
@@ -1022,7 +1045,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return NULL;
}
- if (lar_fw_supported && lar_enabled)
+ if (lar_enabled &&
+ fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
@@ -1038,6 +1062,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
+ u16 cap_flags,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1076,13 +1101,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
(flags & NL80211_RRF_NO_IR))
flags |= NL80211_RRF_GO_CONCURRENT;
+ /*
+ * cap_flags is per regulatory domain so apply it for every channel
+ */
+ if (ch_idx >= NUM_2GHZ_CHANNELS) {
+ if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+ flags |= NL80211_RRF_NO_160MHZ;
+ }
+
return flags;
}
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info)
+ u16 geo_info, u16 cap)
{
int ch_idx;
u16 ch_flags;
@@ -1140,7 +1179,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
}
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cfg);
+ ch_flags, cap,
+ cfg);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -1405,9 +1445,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
};
int ret;
- bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
@@ -1485,7 +1522,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
- if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+ if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
+ fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
nvm->lar_enabled = true;
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index b7e1ddf8f177..fb0b385d10fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags {
*/
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
+ u8 tx_chains, u8 rx_chains);
/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
@@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info);
+ u16 geo_info, u16 cap);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 14c8ba23f3b9..1136d9784f9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -411,13 +411,6 @@ enum {
HW_STEP_LOCATION_BITS = 24,
};
-#define AUX_MISC_MASTER1_EN 0xA20818
-enum aux_misc_master1_en {
- AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
-};
-
-#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
-#define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */
@@ -430,6 +423,9 @@ enum aux_misc_master1_en {
#define UMAG_SB_CPU_1_STATUS 0xA038C0
#define UMAG_SB_CPU_2_STATUS 0xA038C4
#define UMAG_GEN_HW_STATUS 0xA038C8
+#define UREG_UMAC_CURRENT_PC 0xa05c18
+#define UREG_LMAC1_CURRENT_PC 0xa05c1c
+#define UREG_LMAC2_CURRENT_PC 0xa05c20
/* For UMAG_GEN_HW_STATUS reg check */
enum {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 28bdc9a9617e..f91197e4ae40 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -66,7 +66,9 @@
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops)
+ const struct iwl_trans_ops *ops,
+ unsigned int cmd_pool_size,
+ unsigned int cmd_pool_align)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
- sizeof(struct iwl_device_cmd),
- sizeof(void *),
- SLAB_HWCACHE_ALIGN,
- NULL);
+ cmd_pool_size, cmd_pool_align,
+ SLAB_HWCACHE_ALIGN, NULL);
if (!trans->dev_cmd_pool)
return NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 8cadad7364ac..7b3b1f4c99b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -112,6 +112,8 @@
* 6) Eventually, the free function will be called.
*/
+#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
+
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
#define FH_RSCSR_FRAME_ALIGN 0x40
@@ -193,6 +195,18 @@ struct iwl_device_cmd {
};
} __packed;
+/**
+ * struct iwl_device_tx_cmd - buffer for TX command
+ * @hdr: the header
+ * @payload: the payload placeholder
+ *
+ * The actual structure is sized dynamically according to need.
+ */
+struct iwl_device_tx_cmd {
+ struct iwl_cmd_header hdr;
+ u8 payload[];
+} __packed;
+
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
/*
@@ -358,6 +372,24 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
}
}
+static inline int
+iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
+{
+ switch (rb_size) {
+ case IWL_AMSDU_2K:
+ return 2 * 1024;
+ case IWL_AMSDU_4K:
+ return 4 * 1024;
+ case IWL_AMSDU_8K:
+ return 8 * 1024;
+ case IWL_AMSDU_12K:
+ return 12 * 1024;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
struct iwl_hcmd_names {
u8 cmd_id;
const char *const cmd_name;
@@ -544,7 +576,7 @@ struct iwl_trans_ops {
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int queue);
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
@@ -839,6 +871,8 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
+ const char *name;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -948,22 +982,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
return trans->ops->dump_data(trans, dump_mask);
}
-static inline struct iwl_device_cmd *
+static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
- return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+ return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
- struct iwl_device_cmd *dev_cmd)
+ struct iwl_device_tx_cmd *dev_cmd)
{
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int queue)
+ struct iwl_device_tx_cmd *dev_cmd, int queue)
{
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@@ -1271,7 +1305,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops);
+ const struct iwl_trans_ops *ops,
+ unsigned int cmd_pool_size,
+ unsigned int cmd_pool_align);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 60aff2ecec12..58df25e2fb32 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -154,5 +154,6 @@
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT false
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
+#define IWL_MVM_USE_NSSN_SYNC 0
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 43ebb2149b63..8878409d2f07 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -989,6 +989,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif)) {
ret = 1;
@@ -1083,6 +1085,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
ieee80211_restart_hw(mvm->hw);
}
}
+
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
}
out_noreset:
mutex_unlock(&mvm->mutex);
@@ -1929,6 +1933,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index aa659162a7c2..190cf15b825c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -752,7 +752,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
pos += scnprintf(pos, endpos - pos, "FW: %s\n",
mvm->fwrt.fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n",
- mvm->fwrt.trans->cfg->name);
+ mvm->fwrt.trans->name);
pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
mvm->fwrt.dev->bus->name);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 9f4b117db9d7..f783d6d53b6f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -208,10 +208,11 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
-static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- u8 *channel, u8 *bandwidth,
- u8 *ctrl_ch_position)
+static int
+iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *bandwidth,
+ u8 *ctrl_ch_position)
{
u32 freq = peer->chandef.chan->center_freq;
@@ -243,15 +244,54 @@ static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm,
}
static int
+iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *format_bw,
+ u8 *ctrl_ch_position)
+{
+ u32 freq = peer->chandef.chan->center_freq;
+
+ *channel = ieee80211_frequency_to_channel(freq);
+
+ switch (peer->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ break;
+ default:
+ IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
+ peer->chandef.width);
+ return -EINVAL;
+ }
+
+ *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+ iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
+
+ return 0;
+}
+
+static int
iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry_v2 *target)
{
int ret;
- ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
- &target->bandwidth,
- &target->ctrl_ch_position);
+ ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
if (ret)
return ret;
@@ -278,18 +318,11 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
-static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry *target)
+static void
+iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
{
- int ret;
-
- ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
- &target->bandwidth,
- &target->ctrl_ch_position);
- if (ret)
- return ret;
-
memcpy(target->bssid, peer->addr, ETH_ALEN);
target->burst_period =
cpu_to_le16(peer->ftm.burst_period);
@@ -314,60 +347,166 @@ static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
FTM_PUT_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
FTM_PUT_FLAG(ALGO_FFT);
+}
+
+static int
+iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v3 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ /*
+ * Versions 3 and 4 has some common fields, so
+ * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
+ */
+ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
return 0;
}
-int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_pmsr_request *req)
+static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
+{
+ u32 status;
+ int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
+
+ if (!err && status) {
+ IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
+ status);
+ err = iwl_ftm_range_request_status_to_err(status);
+ }
+
+ return err;
+}
+
+static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
{
struct iwl_tof_range_req_cmd_v5 cmd_v5;
- struct iwl_tof_range_req_cmd cmd;
- bool new_api = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
- u8 num_of_ap;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd_v5,
+ .len[0] = sizeof(cmd_v5),
};
- u32 status = 0;
- int err, i;
+ u8 i;
+ int err;
- lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
- if (mvm->ftm_initiator.req)
- return -EBUSY;
+ for (i = 0; i < cmd_v5.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- if (new_api) {
- iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
- hcmd.data[0] = &cmd;
- hcmd.len[0] = sizeof(cmd);
- num_of_ap = cmd.num_of_ap;
- } else {
- iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
- hcmd.data[0] = &cmd_v5;
- hcmd.len[0] = sizeof(cmd_v5);
- num_of_ap = cmd_v5.num_of_ap;
+ err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
+ if (err)
+ return err;
}
- for (i = 0; i < num_of_ap; i++) {
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v7 cmd_v7;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd_v7,
+ .len[0] = sizeof(cmd_v7),
+ };
+ u8 i;
+ int err;
+
+ /*
+ * Versions 7 and 8 has the same structure except from the responders
+ * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
+ */
+ iwl_mvm_ftm_cmd(mvm, vif, (void *)&cmd_v7, req);
+
+ for (i = 0; i < cmd_v7.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- if (new_api)
- err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]);
- else
- err = iwl_mvm_ftm_put_target_v2(mvm, peer,
- &cmd_v5.ap[i]);
+ err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
if (err)
return err;
}
- err = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
- if (!err && status) {
- IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
- status);
- err = iwl_ftm_range_request_status_to_err(status);
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
+ int err;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->ftm_initiator.req)
+ return -EBUSY;
+
+ if (new_api) {
+ u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD);
+
+ if (cmd_ver == 8)
+ err = iwl_mvm_ftm_start_v8(mvm, vif, req);
+ else
+ err = iwl_mvm_ftm_start_v7(mvm, vif, req);
+
+ } else {
+ err = iwl_mvm_ftm_start_v5(mvm, vif, req);
}
if (!err) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 1513b8b4062f..834564198409 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,12 +62,72 @@
#include "mvm.h"
#include "constants.h"
+static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
+ u8 *bw, u8 *ctrl_ch_position)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *bw = IWL_TOF_BW_20_LEGACY;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *bw = IWL_TOF_BW_20_HT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *bw = IWL_TOF_BW_40;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *bw = IWL_TOF_BW_80;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
+ u8 *format_bw,
+ u8 *ctrl_ch_position)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
static int
iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ /*
+ * The command structure is the same for versions 6 and 7, (only the
+ * field interpretation is different), so the same struct can be use
+ * for all cases.
+ */
struct iwl_tof_responder_config_cmd cmd = {
.channel_num = chandef->chan->hw_value,
.cmd_valid_fields =
@@ -76,27 +136,22 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
+ u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_CONFIG_CMD);
+ int err;
lockdep_assert_held(&mvm->mutex);
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- cmd.bandwidth = IWL_TOF_BW_20_LEGACY;
- break;
- case NL80211_CHAN_WIDTH_20:
- cmd.bandwidth = IWL_TOF_BW_20_HT;
- break;
- case NL80211_CHAN_WIDTH_40:
- cmd.bandwidth = IWL_TOF_BW_40;
- cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
- break;
- case NL80211_CHAN_WIDTH_80:
- cmd.bandwidth = IWL_TOF_BW_80;
- cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
+ if (cmd_ver == 7)
+ err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
+ &cmd.ctrl_ch_position);
+ else
+ err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw,
+ &cmd.ctrl_ch_position);
+
+ if (err) {
+ IWL_ERR(mvm, "Failed to set responder bandwidth\n");
+ return err;
}
memcpy(cmd.bssid, vif->addr, ETH_ALEN);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index dd685f7eb410..54c094e88474 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -353,22 +353,35 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (ret == -ETIMEDOUT)
- iwl_fw_dbg_error_collect(&mvm->fwrt,
- FW_DBG_TRIGGER_ALIVE_TIMEOUT);
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_22000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_umac_prph(trans,
UMAG_SB_CPU_2_STATUS));
- else if (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_8000)
+ IWL_ERR(mvm, "UMAC PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_UMAC_CURRENT_PC));
+ IWL_ERR(mvm, "LMAC PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_LMAC1_CURRENT_PC));
+ if (iwl_mvm_is_cdb_supported(mvm))
+ IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_LMAC2_CURRENT_PC));
+ } else if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, SB_CPU_1_STATUS),
iwl_read_prph(trans, SB_CPU_2_STATUS));
+ }
+
+ if (ret == -ETIMEDOUT)
+ iwl_fw_dbg_error_collect(&mvm->fwrt,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT);
+
iwl_fw_set_current_image(&mvm->fwrt, old_type);
return ret;
}
@@ -841,9 +854,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
return 0;
}
+ if (!mvm->fwrt.ppag_table.enabled) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG not enabled, command not sent.\n");
+ return 0;
+ }
+
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
- IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
- mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 32dc9d6f0fb6..6717f25c46b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
- __le16_to_cpu(resp->geo_info));
+ __le16_to_cpu(resp->geo_info),
+ __le16_to_cpu(resp->cap));
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
@@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return ret;
}
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ if (likely(sta)) {
+ if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
+ return;
+ } else {
+ if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
+ return;
+ }
+
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
}
}
- if (sta) {
- if (iwl_mvm_tx_skb(mvm, skb, sta))
- goto drop;
- return;
- }
-
- if (iwl_mvm_tx_skb_non_sta(mvm, skb))
- goto drop;
+ iwl_mvm_tx_skb(mvm, skb, sta);
return;
drop:
ieee80211_free_txskb(hw, skb);
@@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
break;
}
- if (!txq->sta)
- iwl_mvm_tx_skb_non_sta(mvm, skb);
- else
- iwl_mvm_tx_skb(mvm, skb, txq->sta);
+ iwl_mvm_tx_skb(mvm, skb, txq->sta);
}
} while (atomic_dec_return(&mvmtxq->tx_request));
rcu_read_unlock();
@@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
return ret;
}
+static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
+{
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ rinfo->bw = RATE_INFO_BW_20;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rinfo->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rinfo->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rinfo->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_HT_MCS_NSS_MSK) + 1;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_NSS_MSK) + 1;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+ u32 gi_ltf = u32_get_bits(rate_n_flags,
+ RATE_MCS_HE_GI_LTF_MSK);
+
+ rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_NSS_MSK) + 1;
+
+ if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rinfo->bw = RATE_INFO_BW_HE_RU;
+ rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_MU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_TRIG:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+ rinfo->he_dcm = 1;
+ } else {
+ switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
+ case IWL_RATE_1M_PLCP:
+ rinfo->legacy = 10;
+ break;
+ case IWL_RATE_2M_PLCP:
+ rinfo->legacy = 20;
+ break;
+ case IWL_RATE_5M_PLCP:
+ rinfo->legacy = 55;
+ break;
+ case IWL_RATE_11M_PLCP:
+ rinfo->legacy = 110;
+ break;
+ case IWL_RATE_6M_PLCP:
+ rinfo->legacy = 60;
+ break;
+ case IWL_RATE_9M_PLCP:
+ rinfo->legacy = 90;
+ break;
+ case IWL_RATE_12M_PLCP:
+ rinfo->legacy = 120;
+ break;
+ case IWL_RATE_18M_PLCP:
+ rinfo->legacy = 180;
+ break;
+ case IWL_RATE_24M_PLCP:
+ rinfo->legacy = 240;
+ break;
+ case IWL_RATE_36M_PLCP:
+ rinfo->legacy = 360;
+ break;
+ case IWL_RATE_48M_PLCP:
+ rinfo->legacy = 480;
+ break;
+ case IWL_RATE_54M_PLCP:
+ rinfo->legacy = 540;
+ break;
+ }
+ }
+}
+
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 3ec8de00f3aa..d6ecc2d2bf21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1160,6 +1160,7 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
+ * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@@ -1170,6 +1171,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
+ IWL_MVM_STATUS_IN_D3,
};
/* Keep track of completed init configuration */
@@ -1298,9 +1300,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
- if (iwlwifi_mod_params.lar_disable)
- return false;
-
/*
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
@@ -1508,8 +1507,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
u16 len, const void *data,
u32 *status);
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 945c1ea5cda8..70b29bf16bb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -178,7 +178,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
} else {
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n",
- ret, mvm->cfg->name);
+ ret, mvm->trans->name);
ret = -ENODATA;
}
goto exit;
@@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
struct iwl_nvm_section *sections = mvm->nvm_sections;
const __be16 *hw;
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
- bool lar_enabled;
int regulatory_type;
/* Checking for required sections */
- if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
@@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
- lar_enabled = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-
- return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+ return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
regulatory, mac_override, phy_sku,
- mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
- lar_enabled);
+ mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
}
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 1b07a8e8f069..dfe02440d474 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -830,7 +830,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
- mvm->cfg->name, mvm->trans->hw_rev);
+ mvm->trans->name, mvm->trans->hw_rev);
if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 25d7faea1c62..c146303ec73b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -198,7 +198,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
if (!mvmvif->queue_params[ac].uapsd)
continue;
- if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN)
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd->flags |=
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
@@ -233,15 +233,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
cmd->snooze_window =
- (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+ test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
}
cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
- if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags &
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ||
+ cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
cmd->rx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout_uapsd =
@@ -354,8 +354,7 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_power_cmd *cmd,
- bool host_awake)
+ struct iwl_mac_power_cmd *cmd)
{
int dtimper = vif->bss_conf.dtim_period ?: 1;
int skip;
@@ -370,7 +369,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
if (dtimper >= 10)
return;
- if (host_awake) {
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
return;
skip = 2;
@@ -390,8 +389,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_power_cmd *cmd,
- bool host_awake)
+ struct iwl_mac_power_cmd *cmd)
{
int dtimper, bi;
int keep_alive;
@@ -437,9 +435,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
}
- iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
+ iwl_mvm_power_config_skip_dtim(mvm, vif, cmd);
- if (!host_awake) {
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
cmd->rx_data_timeout =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout =
@@ -512,8 +510,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
{
struct iwl_mac_power_cmd cmd = {};
- iwl_mvm_power_build_cmd(mvm, vif, &cmd,
- mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
@@ -536,7 +533,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
mvm->disable_power_off_d3 : mvm->disable_power_off)
cmd.flags &=
cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -943,7 +940,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
if (!mvmvif->bf_data.bf_enabled)
return 0;
- if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN)
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index ef99c49247b7..c15f7dbc9516 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
{
- struct iwl_mvm_rss_sync_notif notif = {
- .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
- .metadata.sync = 0,
- .nssn_sync.baid = baid,
- .nssn_sync.nssn = nssn,
- };
-
- iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+ if (IWL_MVM_USE_NSSN_SYNC) {
+ struct iwl_mvm_rss_sync_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+ .metadata.sync = 0,
+ .nssn_sync.baid = baid,
+ .nssn_sync.nssn = nssn,
+ };
+
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
+ sizeof(notif));
+ }
}
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index a046ac9fa852..3b263c81bcae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
cmd_size = sizeof(struct iwl_scan_config_v2);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
- cmd_size += num_channels;
+ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
cfg = kzalloc(cmd_size, GFP_KERNEL);
if (!cfg)
@@ -1907,20 +1907,6 @@ iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
}
static void
-iwl_mvm_scan_umac_fill_ch_p_v3(struct iwl_mvm *mvm,
- struct iwl_mvm_scan_params *params,
- struct ieee80211_vif *vif,
- struct iwl_scan_channel_params_v3 *cp)
-{
- cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
- cp->count = params->n_channels;
-
- iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
- params->n_channels, 0,
- cp->channel_config);
-}
-
-static void
iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
@@ -1937,37 +1923,6 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
vif->type);
}
-static int iwl_mvm_scan_umac_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_scan_params *params, int type,
- int uid)
-{
- struct iwl_scan_req_umac_v11 *cmd = mvm->scan_cmd;
- struct iwl_scan_req_params_v11 *scan_p = &cmd->scan_params;
- int ret;
- u16 gen_flags;
-
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
- cmd->uid = cpu_to_le32(uid);
-
- gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
- iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
- &scan_p->general_params,
- gen_flags);
-
- ret = iwl_mvm_fill_scan_sched_params(params,
- scan_p->periodic_params.schedule,
- &scan_p->periodic_params.delay);
- if (ret)
- return ret;
-
- iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
- iwl_mvm_scan_umac_fill_ch_p_v3(mvm, params, vif,
- &scan_p->channel_params);
-
- return 0;
-}
static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
@@ -2152,7 +2107,6 @@ static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12),
- IWL_SCAN_UMAC_HANDLER(11),
};
static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
@@ -2511,7 +2465,6 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
switch (scan_ver) {
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
- IWL_SCAN_REQ_UMAC_HANDLE_SIZE(11);
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dc5c02fbc65a..a8d0d17f79fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
-static struct iwl_device_cmd *
+static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (unlikely(!dev_cmd))
return NULL;
- /* Make sure we zero enough of dev_cmd */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
-
- memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -597,7 +592,7 @@ out:
}
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
- struct iwl_device_cmd *cmd)
+ struct iwl_device_tx_cmd *cmd)
{
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
@@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info info;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
@@ -847,10 +842,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
else if (next)
consume_skb(skb);
- while (next) {
- tmp = next;
- next = tmp->next;
-
+ skb_list_walk_safe(next, tmp, next) {
memcpy(tmp->cb, cb, sizeof(tmp->cb));
/*
* Compute the length of all the data added for the A-MSDU.
@@ -880,9 +872,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
skb_shinfo(tmp)->gso_size = 0;
}
- tmp->prev = NULL;
- tmp->next = NULL;
-
+ skb_mark_not_on_list(tmp);
__skb_queue_tail(mpdus_skb, tmp);
i++;
}
@@ -1078,7 +1068,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
@@ -1154,7 +1144,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
- return 0;
+ return -1;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {
@@ -1206,8 +1196,8 @@ drop:
return -1;
}
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta)
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index a4e09a5b1816..01f248ba8fec 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
ctxt_info_gen3->mcr_size =
- cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+ cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
trans_pcie->prph_info = prph_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index d38cefbb779e..acd01d86f101 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -57,6 +57,42 @@
#include "internal.h"
#include "iwl-prph.h"
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys,
+ int depth)
+{
+ void *result;
+
+ if (WARN(depth > 2,
+ "failed to allocate DMA memory not crossing 2^32 boundary"))
+ return NULL;
+
+ result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+ if (!result)
+ return NULL;
+
+ if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+ void *old = result;
+ dma_addr_t oldphys = *phys;
+
+ result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+ phys,
+ depth + 1);
+ dma_free_coherent(trans->dev, size, old, oldphys);
+ }
+
+ return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys)
+{
+ return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
u32 control_flags = 0, rb_size;
+ dma_addr_t phys;
int ret;
- ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
+ ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+ sizeof(*ctxt_info),
+ &phys);
if (!ctxt_info)
return -ENOMEM;
+ trans_pcie->ctxt_info_dma_addr = phys;
+
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
@@ -193,11 +232,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
- BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
- control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
- (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
- IWL_CTXT_INFO_RB_CB_SIZE_POS) |
- (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
+ WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
+ control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
+ control_flags |=
+ u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
+ IWL_CTXT_INFO_RB_CB_SIZE);
+ control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b0b7eca1754e..97f227f3cbc3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -565,14 +565,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)},
+
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -598,21 +592,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
+
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
@@ -910,7 +895,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
@@ -987,28 +971,74 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .name = _name }
+
+static const struct iwl_dev_info iwl_dev_info_table[] = {
+#if IS_ENABLED(CONFIG_IWLMVM)
+ IWL_DEV_INFO(0x2526, 0x0010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xA014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+
+ IWL_DEV_INFO(0x2526, 0x0030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x0038, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x003C, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x4030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+#endif /* CONFIG_IWLMVM */
+};
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg_trans_params *trans =
+ (struct iwl_cfg_trans_params *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
+ struct iwl_trans_pcie *trans_pcie;
unsigned long flags;
- int ret;
+ int i, ret;
+ /*
+ * This is needed for backwards compatibility with the old
+ * tables, so we don't need to change all the config structs
+ * at the same time. The cfg is used to compare with the old
+ * full cfg structs.
+ */
+ const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- iwl_trans = iwl_trans_pcie_alloc(pdev, ent, &cfg->trans);
+ /* make sure trans is the first element in iwl_cfg */
+ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
+
+ iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans);
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
- /* the trans_cfg should never change, so set it now */
- iwl_trans->trans_cfg = &cfg->trans;
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- if (WARN_ONCE(!iwl_trans->trans_cfg->csr,
- "CSR addresses aren't configured\n")) {
- ret = -EINVAL;
- goto out_free_trans;
+ /* the trans_cfg should never change, so set it now */
+ iwl_trans->trans_cfg = trans;
+
+ for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
+ const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
+
+ if ((dev_info->device == IWL_CFG_ANY ||
+ dev_info->device == pdev->device) &&
+ (dev_info->subdevice == IWL_CFG_ANY ||
+ dev_info->subdevice == pdev->subsystem_device)) {
+ iwl_trans->cfg = dev_info->cfg;
+ iwl_trans->name = dev_info->name;
+ goto found;
+ }
}
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -1027,22 +1057,22 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
- cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
- cfg = &iwlax210_2ax_cfg_ty_gf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- cfg = &iwlax210_2ax_cfg_so_jf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
- cfg = &iwlax211_2ax_cfg_so_gf_a0;
+ iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
- cfg = &iwlax411_2ax_cfg_so_gf4_a0;
+ iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
@@ -1050,13 +1080,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
(CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
- cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- cfg = &iwl_ax101_cfg_qu_hr;
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- cfg = &iwl22000_2ax_cfg_jf;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_jf;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
@@ -1073,19 +1107,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP)
- /*
- * b step fw is the same for physical card and fpga
- */
- cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
- CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_A_STEP) {
- cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
- } else {
- /*
- * a step no FPGA
- */
- cfg = &iwl22000_2ac_cfg_hr;
- }
+ CSR_HW_RF_STEP(iwl_trans->hw_rf_id) ==
+ SILICON_A_STEP)
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
}
/*
@@ -1096,17 +1122,21 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
if (cfg == &iwl_ax101_cfg_qu_hr)
- cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
else if (cfg == &iwl_ax201_cfg_qu_hr)
- cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
+ iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ else if (cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
+ else if (cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
}
/* same thing for QuZ... */
@@ -1126,8 +1156,27 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
- /* now set the real cfg we decided to use */
- iwl_trans->cfg = cfg;
+ /*
+ * If we didn't set the cfg yet, assume the trans is actually
+ * a full cfg from the old tables.
+ */
+ if (!iwl_trans->cfg)
+ iwl_trans->cfg = cfg;
+
+found:
+ /* if we don't have a name yet, copy name from the old cfg */
+ if (!iwl_trans->name)
+ iwl_trans->name = iwl_trans->cfg->name;
+
+ if (iwl_trans->trans_cfg->mq_rx_supported) {
+ if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
+ ret = -EINVAL;
+ goto out_free_trans;
+ }
+ trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds;
+ } else {
+ trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
+ }
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 &&
iwl_trans_grab_nic_access(iwl_trans, &flags)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a091690f6c79..72f144c3a46e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -106,6 +106,8 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @offset: indicates which offset of the page (in bytes)
+ * this buffer uses (if multiple RBs fit into one page)
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -113,6 +115,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 offset;
};
/**
@@ -305,7 +308,7 @@ struct iwl_cmd_meta {
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
- struct iwl_device_cmd *cmd;
+ void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
@@ -491,6 +494,7 @@ struct cont_rec {
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
+ * @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
@@ -510,11 +514,16 @@ struct cont_rec {
* @in_rescan: true if we have triggered a device rescan
* @base_rb_stts: base virtual address of receive buffer status for all queues
* @base_rb_stts_dma: base physical address of receive buffer status
+ * @supported_dma_mask: DMA mask to validate the actual address against,
+ * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
+ * @alloc_page_lock: spinlock for the page allocator
+ * @alloc_page: allocated page to still use parts of
+ * @alloc_page_used: how much of the allocated page was already used (bytes)
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
- struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
- struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
+ struct iwl_rx_mem_buffer *rx_pool;
+ struct iwl_rx_mem_buffer **global_table;
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
@@ -573,6 +582,7 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
u16 tfd_size;
+ u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
@@ -580,6 +590,13 @@ struct iwl_trans_pcie {
bool sw_csum_tx;
bool pcie_dbg_dumped_once;
u32 rx_page_order;
+ u32 rx_buf_bytes;
+ u32 supported_dma_mask;
+
+ /* allocator lock for the two values below */
+ spinlock_t alloc_page_lock;
+ struct page *alloc_page;
+ u32 alloc_page_used;
/*protect hw register */
spinlock_t reg_lock;
@@ -672,6 +689,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
int queue_size);
@@ -688,7 +715,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@@ -1082,7 +1109,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb);
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb);
#endif
/* common functions that are used by gen3 transport */
@@ -1106,7 +1134,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 452da44a21e0..427fcea5cb2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -240,7 +240,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
rxq->need_update = true;
return;
}
@@ -298,6 +298,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
/*
@@ -318,8 +319,8 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
list);
list_del(&rxb->list);
rxb->invalid = false;
- /* 12 first bits are expected to be empty */
- WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+ /* some low bits are expected to be unset (depending on hw) */
+ WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
/* Point to Rx buffer via next RBD in circular buffer */
iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
@@ -412,15 +413,34 @@ void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
*
*/
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
- gfp_t priority)
+ u32 *offset, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
struct page *page;
gfp_t gfp_mask = priority;
if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
+ if (trans_pcie->alloc_page) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ /* recheck */
+ if (trans_pcie->alloc_page) {
+ *offset = trans_pcie->alloc_page_used;
+ page = trans_pcie->alloc_page;
+ trans_pcie->alloc_page_used += rbsize;
+ if (trans_pcie->alloc_page_used >= allocsize)
+ trans_pcie->alloc_page = NULL;
+ else
+ get_page(page);
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ return page;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
@@ -436,6 +456,18 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
"Failed to alloc_pages\n");
return NULL;
}
+
+ if (2 * rbsize <= allocsize) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ if (!trans_pcie->alloc_page) {
+ get_page(page);
+ trans_pcie->alloc_page = page;
+ trans_pcie->alloc_page_used = rbsize;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
+ *offset = 0;
return page;
}
@@ -456,6 +488,8 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct page *page;
while (1) {
+ unsigned int offset;
+
spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
spin_unlock(&rxq->lock);
@@ -463,8 +497,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
spin_unlock(&rxq->lock);
- /* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, priority);
+ page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
if (!page)
return;
@@ -482,10 +515,11 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
BUG_ON(rxb->page);
rxb->page = page;
+ rxb->offset = offset;
/* Get physical address of the RB */
rxb->page_dma =
- dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
@@ -510,12 +544,11 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < RX_POOL_SIZE; i++) {
+ for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
if (!trans_pcie->rx_pool[i].page)
continue;
dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
__free_pages(trans_pcie->rx_pool[i].page,
trans_pcie->rx_page_order);
trans_pcie->rx_pool[i].page = NULL;
@@ -568,15 +601,17 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
BUG_ON(rxb->page);
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
+ page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
+ gfp_mask);
if (!page)
continue;
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, page,
+ rxb->offset,
+ trans_pcie->rx_buf_bytes,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
__free_pages(page, trans_pcie->rx_page_order);
@@ -738,7 +773,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
spin_lock_init(&rxq->lock);
if (trans->trans_cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
+ rxq->queue_size = trans->cfg->num_rbds;
else
rxq->queue_size = RX_QUEUE_SIZE;
@@ -807,8 +842,18 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -ENOMEM;
+ trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->rx_pool[0]),
+ GFP_KERNEL);
+ trans_pcie->global_table =
+ kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->global_table[0]),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
+ !trans_pcie->global_table) {
+ ret = -ENOMEM;
+ goto err;
+ }
spin_lock_init(&rba->lock);
@@ -845,6 +890,8 @@ err:
trans_pcie->base_rb_stts = NULL;
trans_pcie->base_rb_stts_dma = 0;
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
return ret;
@@ -1081,12 +1128,11 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
/* move the pool to the default queue and allocator ownerships */
queue_size = trans->trans_cfg->mq_rx_supported ?
- MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
+ trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
- BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
- ARRAY_SIZE(trans_pcie->rx_pool));
+
for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
@@ -1177,7 +1223,12 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
+
+ if (trans_pcie->alloc_page)
+ __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
}
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
@@ -1235,7 +1286,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false;
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
if (WARN_ON(!rxb))
@@ -1249,7 +1300,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool reclaim;
int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = {
- ._offset = offset,
+ ._offset = rxb->offset + offset,
._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page,
._page_stolen = false,
@@ -1355,8 +1406,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* rx_free list for reuse later. */
if (rxb->page != NULL) {
rxb->page_dma =
- dma_map_page(trans->dev, rxb->page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, rxb->page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/*
@@ -1390,13 +1441,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ vid = le16_to_cpu(rxq->cd[i].rbid);
else
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
- if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err;
rxb = trans_pcie->global_table[vid - 1];
@@ -1529,13 +1579,13 @@ out:
napi = &rxq->napi;
if (napi->poll) {
+ napi_gro_flush(napi, false);
+
if (napi->rx_count) {
netif_receive_skb_list(&napi->rx_list);
INIT_LIST_HEAD(&napi->rx_list);
napi->rx_count = 0;
}
-
- napi_gro_flush(napi, false);
}
iwl_pcie_rxq_restock(trans, rxq);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0d8b2a8ffa5d..19a2c72081ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -132,8 +132,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
@@ -175,7 +174,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a0677131634d..38d8fe21690a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -79,6 +79,7 @@
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
#include "fw/dbg.h"
+#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -183,8 +184,7 @@ out:
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_sw_reset));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
}
@@ -301,18 +301,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
u16 cap;
/*
- * HW bug W/A for instability in PCIe bus L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
*/
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
- iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- else
- iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
@@ -487,8 +482,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
@@ -510,12 +504,11 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret;
/* stop device's busmaster DMA activity */
- iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_stop_master));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
- ret = iwl_poll_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_master_dis),
- BIT(trans->trans_cfg->csr->flag_master_dis), 100);
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -564,8 +557,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
static int iwl_pcie_nic_init(struct iwl_trans *trans)
@@ -1270,7 +1262,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
@@ -1494,9 +1486,8 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (reset) {
/*
@@ -1561,7 +1552,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
}
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
@@ -1583,7 +1574,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} else {
iwl_trans_pcie_tx_reset(trans);
@@ -1945,6 +1936,11 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+ trans_pcie->rx_buf_bytes =
+ iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -2054,7 +2050,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -2079,8 +2075,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_val_mac_access_en),
- (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
@@ -2162,7 +2158,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -2963,7 +2959,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
int allocated_rb_nums)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0;
@@ -2989,9 +2985,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
rb->index = cpu_to_le32(i);
memcpy(rb->data, page_address(rxb->page), max_len);
/* remap the page for the free benefit */
- rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
- max_len,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, rxb->page,
+ rxb->offset, max_len,
+ DMA_FROM_DEVICE);
*data = iwl_fw_error_next_data(*data);
}
@@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- int ret, addr_size;
+ int ret, addr_size, txcmd_size, txcmd_align;
+ const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+ if (!cfg_trans->gen2) {
+ ops = &trans_ops_pcie;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = sizeof(void *);
+ } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ txcmd_align = 64;
+ } else {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_align = 128;
+ }
+
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
+
+ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+ if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+ return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
- if (cfg_trans->gen2)
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie_gen2);
- else
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie);
-
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ txcmd_size, txcmd_align);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3482,6 +3493,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
+ spin_lock_init(&trans_pcie->alloc_page_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 8ca0250de99e..86fc00167817 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
struct iwl_tfh_tb *tb;
+ /*
+ * Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
return -EINVAL;
tb = &tfd->tbs[idx];
@@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
return idx;
}
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ /* set the chaining pointer to the previous page if there */
+ *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+ IWL_WARN(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
@@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
/*
* Pull the ieee80211 header to be able to use TSO core,
@@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
dev_kfree_skb(csum_skb);
goto out_err;
}
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
tb_phys, tb_len);
@@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
/* put the payload */
while (data_left) {
+ int ret;
+
tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = dma_map_single(trans->dev, tso.data,
tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL);
+ if (ret) {
dev_kfree_skb(csum_skb);
goto out_err;
}
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_phys, tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@@ -372,7 +487,7 @@ out_err:
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
- int tb_idx;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
- if (!skb_frag_size(frag))
+ if (!fragsz)
continue;
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- return -ENOMEM;
- tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
- skb_frag_size(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
- tb_phys, skb_frag_size(frag));
- if (tb_idx < 0)
- return tb_idx;
-
- out_meta->tbs |= BIT(tb_idx);
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta);
+ if (ret)
+ return ret;
}
return 0;
@@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
- tb_phys, tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
skb_walk_frags(skb, frag) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, frag->data,
skb_headlen(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
- tb_phys, skb_headlen(frag));
if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
goto out_err;
}
@@ -538,7 +670,7 @@ out_err:
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
@@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
@@ -587,6 +719,10 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
int idx;
void *tfd;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
@@ -603,7 +739,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1101,9 +1237,15 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq;
int i;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txq[txq_id];
+
if (WARN_ON(!txq))
return;
@@ -1258,6 +1400,10 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
* in the op_mode. Just for the sake of the simplicity of the op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index f21f16ab2a97..2f69a6469fe7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->write_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[read_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@@ -306,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
txq->need_update = true;
return;
}
@@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb)
{
struct page **page_ptr;
+ struct page *next;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
- if (*page_ptr) {
- __free_page(*page_ptr);
- *page_ptr = NULL;
+ while (next) {
+ struct page *tmp = next;
+
+ next = *(void **)(page_address(next) + PAGE_SIZE -
+ sizeof(void *));
+ __free_page(tmp);
}
}
@@ -646,7 +652,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
/*
@@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct iwl_device_cmd *dev_cmd_ptr;
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1255,16 +1261,16 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_val_mac_access_en),
- (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
@@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+ struct page **page_ptr;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
if (!p->page)
goto alloc;
- /* enough room on this page */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
- return p;
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+ sizeof(void *))
+ goto out;
/* We don't have enough room on this page, get a new one. */
__free_page(p->page);
@@ -2072,6 +2095,11 @@ alloc:
if (!p->page)
return NULL;
p->pos = page_address(p->page);
+ /* set the chaining pointer to NULL */
+ *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+ *page_ptr = p->page;
+ get_page(p->page);
return p;
}
@@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
@@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
/* No A-MSDU without CONFIG_INET */
WARN_ON(1);
@@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
#endif /* CONFIG_INET */
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
@@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 0094b1d2b577..3ec46f48cfde 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -2508,7 +2508,7 @@ static int prism2_hostapd_add_sta(struct ap_data *ap,
sta->supported_rates[0] = 2;
if (sta->tx_supp_rates & WLAN_RATE_2M)
sta->supported_rates[1] = 4;
- if (sta->tx_supp_rates & WLAN_RATE_5M5)
+ if (sta->tx_supp_rates & WLAN_RATE_5M5)
sta->supported_rates[2] = 11;
if (sta->tx_supp_rates & WLAN_RATE_11M)
sta->supported_rates[3] = 22;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 05466281afb6..de97b3304115 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -761,7 +761,7 @@ static void hostap_set_multicast_list(struct net_device *dev)
}
-static void prism2_tx_timeout(struct net_device *dev)
+static void prism2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct hostap_interface *iface;
local_info_t *local;
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 28dac36d7c4c..00264a14e52c 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -647,7 +647,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
netif_wake_queue(dev);
}
-void orinoco_tx_timeout(struct net_device *dev)
+void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &dev->stats;
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
index 430862a6a24b..cdd026af100b 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
@@ -207,7 +207,7 @@ int orinoco_open(struct net_device *dev);
int orinoco_stop(struct net_device *dev);
void orinoco_set_multicast_list(struct net_device *dev);
int orinoco_change_mtu(struct net_device *dev, int new_mtu);
-void orinoco_tx_timeout(struct net_device *dev);
+void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue);
/********************************************************************/
/* Locking and synchronization functions */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 40a8b941ad5c..e753f43e0162 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1361,7 +1361,8 @@ static int ezusb_init(struct hermes *hw)
int retval;
BUG_ON(in_interrupt());
- BUG_ON(!upriv);
+ if (!upriv)
+ return -EINVAL;
upriv->reply_count = 0;
/* Write the MAGIC number on the simulated registers to keep
@@ -1608,9 +1609,9 @@ static int ezusb_probe(struct usb_interface *interface,
/* set up the endpoint information */
/* check out the endpoints */
- iface_desc = &interface->altsetting[0].desc;
+ iface_desc = &interface->cur_altsetting->desc;
for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
- ep = &interface->altsetting[0].endpoint[i].desc;
+ ep = &interface->cur_altsetting->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(ep)) {
/* we found a bulk in endpoint */
diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c
index 2b8fb07d07e7..8d680250a281 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_eth.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c
@@ -473,7 +473,7 @@ islpci_do_reset_and_wake(struct work_struct *work)
}
void
-islpci_eth_tx_timeout(struct net_device *ndev)
+islpci_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
islpci_private *priv = netdev_priv(ndev);
diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.h b/drivers/net/wireless/intersil/prism54/islpci_eth.h
index 61f4b43c6054..e433ccdc526b 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_eth.h
+++ b/drivers/net/wireless/intersil/prism54/islpci_eth.h
@@ -53,7 +53,7 @@ struct avs_80211_1_header {
void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *);
netdev_tx_t islpci_eth_transmit(struct sk_buff *, struct net_device *);
int islpci_eth_receive(islpci_private *);
-void islpci_eth_tx_timeout(struct net_device *);
+void islpci_eth_tx_timeout(struct net_device *, unsigned int txqueue);
void islpci_do_reset_and_wake(struct work_struct *);
#endif /* _ISL_GEN_H */
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 57edfada0665..c9401c121a14 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
int hw, ap, ap_max = ie[1];
u8 hw_rate;
+ if (ap_max > MAX_RATES) {
+ lbs_deb_assoc("invalid rates\n");
+ return tlv;
+ }
/* Advance past IE header */
ie += 2;
@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
struct cmd_ds_802_11_ad_hoc_join cmd;
u8 preamble = RADIO_PREAMBLE_SHORT;
int ret = 0;
+ int hw, i;
+ u8 rates_max;
+ u8 *rates;
/* TODO: set preamble based on scan result */
ret = lbs_set_radio(priv, preamble, 1);
@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
if (!rates_eid) {
lbs_add_rates(cmd.bss.rates);
} else {
- int hw, i;
- u8 rates_max = rates_eid[1];
- u8 *rates = cmd.bss.rates;
+ rates_max = rates_eid[1];
+ if (rates_max > MAX_RATES) {
+ lbs_deb_join("invalid rates");
+ goto out;
+ }
+ rates = cmd.bss.rates;
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
u8 hw_rate = lbs_rates[hw].bitrate / 5;
for (i = 0; i < rates_max; i++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index d14e55e3c9da..7d94695e7961 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1020,7 +1020,7 @@ static void mwifiex_set_multicast_list(struct net_device *dev)
* CFG802.11 network device handler for transmission timeout.
*/
static void
-mwifiex_tx_timeout(struct net_device *dev)
+mwifiex_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 547ff3c578ee..fa5634af40f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1295,19 +1295,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
return pos;
}
-/* This function return interface number with the same bss_type.
- */
-static inline u8
-mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type)
-{
- u8 i, num = 0;
-
- for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type)
- num++;
- return num;
-}
-
/*
* This function returns the correct private structure pointer based
* upon the BSS type and BSS number.
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 7caf1d26124a..f8f282ce39bd 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -894,7 +894,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
u8 *peer, *pos, *end;
u8 i, action, basic;
u16 cap = 0;
- int ie_len = 0;
+ int ies_len = 0;
if (len < (sizeof(struct ethhdr) + 3))
return;
@@ -916,7 +916,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
pos = buf + sizeof(struct ethhdr) + 4;
/* payload 1+ category 1 + action 1 + dialog 1 */
cap = get_unaligned_le16(pos);
- ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
pos += 2;
break;
@@ -926,7 +926,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
/* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
pos = buf + sizeof(struct ethhdr) + 6;
cap = get_unaligned_le16(pos);
- ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
pos += 2;
break;
@@ -934,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
return;
pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
- ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
break;
default:
mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
@@ -947,33 +947,33 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
- for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
- if (pos + 2 + pos[1] > end)
+ for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) {
+ u8 ie_len = pos[1];
+
+ if (pos + 2 + ie_len > end)
break;
switch (*pos) {
case WLAN_EID_SUPP_RATES:
- if (pos[1] > 32)
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
return;
- sta_ptr->tdls_cap.rates_len = pos[1];
- for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates_len = ie_len;
+ for (i = 0; i < ie_len; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break;
case WLAN_EID_EXT_SUPP_RATES:
- if (pos[1] > 32)
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
return;
basic = sta_ptr->tdls_cap.rates_len;
- if (pos[1] > 32 - basic)
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic)
return;
- for (i = 0; i < pos[1]; i++)
+ for (i = 0; i < ie_len; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
- sta_ptr->tdls_cap.rates_len += pos[1];
+ sta_ptr->tdls_cap.rates_len += ie_len;
break;
case WLAN_EID_HT_CAPABILITY:
- if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
- return;
- if (pos[1] != sizeof(struct ieee80211_ht_cap))
+ if (ie_len != sizeof(struct ieee80211_ht_cap))
return;
/* copy the ie's value into ht_capb*/
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
@@ -981,59 +981,45 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
sta_ptr->is_11n_enabled = 1;
break;
case WLAN_EID_HT_OPERATION:
- if (pos > end -
- sizeof(struct ieee80211_ht_operation) - 2)
- return;
- if (pos[1] != sizeof(struct ieee80211_ht_operation))
+ if (ie_len != sizeof(struct ieee80211_ht_operation))
return;
/* copy the ie's value into ht_oper*/
memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
sizeof(struct ieee80211_ht_operation));
break;
case WLAN_EID_BSS_COEX_2040:
- if (pos > end - 3)
- return;
- if (pos[1] != 1)
+ if (ie_len != sizeof(pos[2]))
return;
sta_ptr->tdls_cap.coex_2040 = pos[2];
break;
case WLAN_EID_EXT_CAPABILITY:
- if (pos > end - sizeof(struct ieee_types_header))
- return;
- if (pos[1] < sizeof(struct ieee_types_header))
+ if (ie_len < sizeof(struct ieee_types_header))
return;
- if (pos[1] > 8)
+ if (ie_len > 8)
return;
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) +
- min_t(u8, pos[1], 8));
+ min_t(u8, ie_len, 8));
break;
case WLAN_EID_RSN:
- if (pos > end - sizeof(struct ieee_types_header))
+ if (ie_len < sizeof(struct ieee_types_header))
return;
- if (pos[1] < sizeof(struct ieee_types_header))
- return;
- if (pos[1] > IEEE_MAX_IE_SIZE -
+ if (ie_len > IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header))
return;
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) +
- min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
+ min_t(u8, ie_len, IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header)));
break;
case WLAN_EID_QOS_CAPA:
- if (pos > end - 3)
- return;
- if (pos[1] != 1)
+ if (ie_len != sizeof(pos[2]))
return;
sta_ptr->tdls_cap.qos_info = pos[2];
break;
case WLAN_EID_VHT_OPERATION:
if (priv->adapter->is_hw_11ac_capable) {
- if (pos > end -
- sizeof(struct ieee80211_vht_operation) - 2)
- return;
- if (pos[1] !=
+ if (ie_len !=
sizeof(struct ieee80211_vht_operation))
return;
/* copy the ie's value into vhtoper*/
@@ -1043,10 +1029,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
break;
case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) {
- if (pos > end -
- sizeof(struct ieee80211_vht_cap) - 2)
- return;
- if (pos[1] != sizeof(struct ieee80211_vht_cap))
+ if (ie_len != sizeof(struct ieee80211_vht_cap))
return;
/* copy the ie's value into vhtcap*/
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
@@ -1056,9 +1039,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
break;
case WLAN_EID_AID:
if (priv->adapter->is_hw_11ac_capable) {
- if (pos > end - 4)
- return;
- if (pos[1] != 2)
+ if (ie_len != sizeof(u16))
return;
sta_ptr->tdls_cap.aid =
get_unaligned_le16((pos + 2));
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 53b5a4b2dcc5..59c187898132 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -281,8 +281,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
{
struct mt76_rx_tid *tid = NULL;
- rcu_swap_protected(wcid->aggr[tidno], tid,
- lockdep_is_held(&dev->mutex));
+ tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
+ lockdep_is_held(&dev->mutex));
if (tid) {
mt76_rx_aggr_shutdown(dev, tid);
kfree_rcu(tid, rcu_head);
diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c
index 55116f395f9a..a4a785467748 100644
--- a/drivers/net/wireless/mediatek/mt76/airtime.c
+++ b/drivers/net/wireless/mediatek/mt76/airtime.c
@@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
return 0;
sband = dev->hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx > sband->n_bitrates)
+ if (!sband || status->rate_idx >= sband->n_bitrates)
return 0;
rate = &sband->bitrates[status->rate_idx];
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index b9f2a401041a..96018fd65779 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
- mt76_led_cleanup(dev);
+ if (IS_ENABLED(CONFIG_MT76_LEDS))
+ mt76_led_cleanup(dev);
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 59d089e092f9..8849faa5bc10 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -1056,7 +1056,8 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
req->alpha2[0], req->alpha2[1]);
- ret = qtnf_cmd_reg_notify(mac, req, qtnf_mac_slave_radar_get(wiphy));
+ ret = qtnf_cmd_reg_notify(mac, req, qtnf_slave_radar_get(),
+ qtnf_dfs_offload_get());
if (ret) {
pr_err("MAC%u: failed to update region to %c%c: %d\n",
mac->macid, req->alpha2[0], req->alpha2[1], ret);
@@ -1078,7 +1079,8 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
{
struct wiphy *wiphy;
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
+ if (qtnf_dfs_offload_get() &&
+ (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
qtn_cfg80211_ops.start_radar_detection = NULL;
if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
@@ -1163,7 +1165,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_NETNS_OK;
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
+ if (qtnf_dfs_offload_get() &&
+ (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 548f6ff6d0f2..d0d7ec8794c4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -257,6 +257,14 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
cmd->pbss = s->pbss;
cmd->ht_required = s->ht_required;
cmd->vht_required = s->vht_required;
+ cmd->twt_responder = s->twt_responder;
+ if (s->he_obss_pd.enable) {
+ cmd->sr_params.sr_control |= QLINK_SR_SRG_INFORMATION_PRESENT;
+ cmd->sr_params.srg_obss_pd_min_offset =
+ s->he_obss_pd.min_offset;
+ cmd->sr_params.srg_obss_pd_max_offset =
+ s->he_obss_pd.max_offset;
+ }
aen = &cmd->aen;
aen->auth_type = s->auth_type;
@@ -510,6 +518,8 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
rate_dst->flags |= RATE_INFO_FLAGS_MCS;
else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS)
rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_HE_MCS)
+ rate_dst->flags |= RATE_INFO_FLAGS_HE_MCS;
if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI)
rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI;
@@ -2454,7 +2464,7 @@ out:
}
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
- bool slave_radar)
+ bool slave_radar, bool dfs_offload)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct qtnf_bus *bus = mac->bus;
@@ -2517,6 +2527,7 @@ int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
}
cmd->slave_radar = slave_radar;
+ cmd->dfs_offload = dfs_offload;
cmd->num_channels = 0;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 761755bf9ede..ab273257b078 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -58,7 +58,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif,
int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
bool up);
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
- bool slave_radar);
+ bool slave_radar, bool dfs_offload);
int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct qtnf_chan_stats *stats);
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 5fb598389487..4320180f8c07 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -21,8 +21,22 @@ static bool slave_radar = true;
module_param(slave_radar, bool, 0644);
MODULE_PARM_DESC(slave_radar, "set 0 to disable radar detection in slave mode");
+static bool dfs_offload;
+module_param(dfs_offload, bool, 0644);
+MODULE_PARM_DESC(dfs_offload, "set 1 to enable DFS offload to firmware");
+
static struct dentry *qtnf_debugfs_dir;
+bool qtnf_slave_radar_get(void)
+{
+ return slave_radar;
+}
+
+bool qtnf_dfs_offload_get(void)
+{
+ return dfs_offload;
+}
+
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
{
struct qtnf_wmac *mac = NULL;
@@ -156,7 +170,7 @@ static void qtnf_netdev_get_stats64(struct net_device *ndev,
/* Netdev handler for transmission timeout.
*/
-static void qtnf_netdev_tx_timeout(struct net_device *ndev)
+static void qtnf_netdev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
struct qtnf_wmac *mac;
@@ -211,9 +225,6 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev,
const struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
const struct qtnf_bus *bus = vif->mac->bus;
- if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE))
- return -EOPNOTSUPP;
-
ppid->id_len = sizeof(bus->hw_id);
memcpy(&ppid->id, bus->hw_id, ppid->id_len);
@@ -456,11 +467,6 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
return mac;
}
-bool qtnf_mac_slave_radar_get(struct wiphy *wiphy)
-{
- return slave_radar;
-}
-
static const struct ethtool_ops qtnf_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
};
@@ -656,12 +662,24 @@ bool qtnf_netdev_is_qtn(const struct net_device *ndev)
return ndev->netdev_ops == &qtnf_netdev_ops;
}
+static int qtnf_check_br_ports(struct net_device *dev, void *data)
+{
+ struct net_device *ndev = data;
+
+ if (dev != ndev && netdev_port_same_parent_id(dev, ndev))
+ return -ENOTSUPP;
+
+ return 0;
+}
+
static int qtnf_core_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
const struct netdev_notifier_changeupper_info *info;
+ struct net_device *brdev;
struct qtnf_vif *vif;
+ struct qtnf_bus *bus;
int br_domain;
int ret = 0;
@@ -672,25 +690,34 @@ static int qtnf_core_netdevice_event(struct notifier_block *nb,
return NOTIFY_OK;
vif = qtnf_netdev_get_priv(ndev);
+ bus = vif->mac->bus;
switch (event) {
case NETDEV_CHANGEUPPER:
info = ptr;
+ brdev = info->upper_dev;
- if (!netif_is_bridge_master(info->upper_dev))
+ if (!netif_is_bridge_master(brdev))
break;
pr_debug("[VIF%u.%u] change bridge: %s %s\n",
- vif->mac->macid, vif->vifid,
- netdev_name(info->upper_dev),
+ vif->mac->macid, vif->vifid, netdev_name(brdev),
info->linking ? "add" : "del");
- if (info->linking)
- br_domain = info->upper_dev->ifindex;
- else
- br_domain = ndev->ifindex;
+ if (IS_ENABLED(CONFIG_NET_SWITCHDEV) &&
+ (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)) {
+ if (info->linking)
+ br_domain = brdev->ifindex;
+ else
+ br_domain = ndev->ifindex;
+
+ ret = qtnf_cmd_netdev_changeupper(vif, br_domain);
+ } else {
+ ret = netdev_walk_all_lower_dev(brdev,
+ qtnf_check_br_ports,
+ ndev);
+ }
- ret = qtnf_cmd_netdev_changeupper(vif, br_domain);
break;
default:
break;
@@ -763,13 +790,11 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
}
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
- bus->netdev_nb.notifier_call = qtnf_core_netdevice_event;
- ret = register_netdevice_notifier(&bus->netdev_nb);
- if (ret) {
- pr_err("failed to register netdev notifier: %d\n", ret);
- goto error;
- }
+ bus->netdev_nb.notifier_call = qtnf_core_netdevice_event;
+ ret = register_netdevice_notifier(&bus->netdev_nb);
+ if (ret) {
+ pr_err("failed to register netdev notifier: %d\n", ret);
+ goto error;
}
bus->fw_state = QTNF_FW_STATE_RUNNING;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 116ec16aa15b..d715e1cd0006 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -133,7 +133,8 @@ struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
-bool qtnf_mac_slave_radar_get(struct wiphy *wiphy);
+bool qtnf_slave_radar_get(void);
+bool qtnf_dfs_offload_get(void);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 75527f1bb306..b2edb03819d1 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -6,7 +6,7 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 15
+#define QLINK_PROTO_VER 16
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -196,6 +196,45 @@ struct qlink_sta_info_state {
__le32 value;
} __packed;
+/**
+ * enum qlink_sr_ctrl_flags - control flags for spatial reuse parameter set
+ *
+ * @QLINK_SR_PSR_DISALLOWED: indicates whether or not PSR-based spatial reuse
+ * transmissions are allowed for STAs associated with the AP
+ * @QLINK_SR_NON_SRG_OBSS_PD_SR_DISALLOWED: indicates whether or not
+ * Non-SRG OBSS PD spatial reuse transmissions are allowed for STAs associated
+ * with the AP
+ * @NON_SRG_OFFSET_PRESENT: indicates whether or not Non-SRG OBSS PD Max offset
+ * field is valid in the element
+ * @QLINK_SR_SRG_INFORMATION_PRESENT: indicates whether or not SRG OBSS PD
+ * Min/Max offset fields ore valid in the element
+ */
+enum qlink_sr_ctrl_flags {
+ QLINK_SR_PSR_DISALLOWED = BIT(0),
+ QLINK_SR_NON_SRG_OBSS_PD_SR_DISALLOWED = BIT(1),
+ QLINK_SR_NON_SRG_OFFSET_PRESENT = BIT(2),
+ QLINK_SR_SRG_INFORMATION_PRESENT = BIT(3),
+};
+
+/**
+ * struct qlink_sr_params - spatial reuse parameters
+ *
+ * @sr_control: spatial reuse control field; flags contained in this field are
+ * defined in @qlink_sr_ctrl_flags
+ * @non_srg_obss_pd_max: added to -82 dBm to generate the value of the
+ * Non-SRG OBSS PD Max parameter
+ * @srg_obss_pd_min_offset: added to -82 dBm to generate the value of the
+ * SRG OBSS PD Min parameter
+ * @srg_obss_pd_max_offset: added to -82 dBm to generate the value of the
+ * SRG PBSS PD Max parameter
+ */
+struct qlink_sr_params {
+ u8 sr_control;
+ u8 non_srg_obss_pd_max;
+ u8 srg_obss_pd_min_offset;
+ u8 srg_obss_pd_max_offset;
+} __packed;
+
/* QLINK Command messages related definitions
*/
@@ -596,8 +635,9 @@ enum qlink_user_reg_hint_type {
* of &enum qlink_user_reg_hint_type.
* @num_channels: number of &struct qlink_tlv_channel in a variable portion of a
* payload.
- * @slave_radar: whether slave device should enable radar detection.
* @dfs_region: one of &enum qlink_dfs_regions.
+ * @slave_radar: whether slave device should enable radar detection.
+ * @dfs_offload: enable or disable DFS offload to firmware.
* @info: variable portion of regulatory notifier callback.
*/
struct qlink_cmd_reg_notify {
@@ -608,7 +648,7 @@ struct qlink_cmd_reg_notify {
u8 num_channels;
u8 dfs_region;
u8 slave_radar;
- u8 rsvd[1];
+ u8 dfs_offload;
u8 info[0];
} __packed;
@@ -650,6 +690,8 @@ enum qlink_hidden_ssid {
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
* @aen: encryption info
+ * @sr_params: spatial reuse parameters
+ * @twt_responder: enable Target Wake Time
* @info: variable configurations
*/
struct qlink_cmd_start_ap {
@@ -665,6 +707,9 @@ struct qlink_cmd_start_ap {
u8 ht_required;
u8 vht_required;
struct qlink_auth_encr aen;
+ struct qlink_sr_params sr_params;
+ u8 twt_responder;
+ u8 rsvd[3];
u8 info[0];
} __packed;
@@ -948,6 +993,7 @@ enum qlink_sta_info_rate_flags {
QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1),
QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2),
QLINK_STA_INFO_RATE_FLAG_60G = BIT(3),
+ QLINK_STA_INFO_RATE_FLAG_HE_MCS = BIT(4),
};
/**
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a36c3fea7495..6beac1f74e7c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1652,20 +1652,17 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, offset, reg);
}
- offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+ if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
+ return;
- if (crypto->cmd == SET_KEY) {
- rt2800_register_multiread(rt2x00dev, offset,
- &iveiv_entry, sizeof(iveiv_entry));
- if ((crypto->cipher == CIPHER_TKIP) ||
- (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
- (crypto->cipher == CIPHER_AES))
- iveiv_entry.iv[3] |= 0x20;
- iveiv_entry.iv[3] |= key->keyidx << 6;
- } else {
- memset(&iveiv_entry, 0, sizeof(iveiv_entry));
- }
+ offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+ memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+ if ((crypto->cipher == CIPHER_TKIP) ||
+ (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+ (crypto->cipher == CIPHER_AES))
+ iveiv_entry.iv[3] |= 0x20;
+ iveiv_entry.iv[3] |= key->keyidx << 6;
rt2800_register_multiwrite(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index a23c26574002..3868c07672bd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -311,6 +311,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800pci_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 7b931bb96a9e..bbfe1425c0ee 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -156,6 +156,7 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800soc_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 0dfb55c69b73..4cc64fe481a7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -654,6 +654,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800usb_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index a90a518b40d3..ea8a34ecae14 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1439,6 +1439,8 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
+void rt2x00mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index c3eab767bc21..7f9e43a4f805 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1255,16 +1255,6 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
{
int retval = 0;
- if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
- /*
- * This is special case for ieee80211_restart_hw(), otherwise
- * mac80211 never call start() two times in row without stop();
- */
- set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
- rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
- rt2x00lib_stop(rt2x00dev);
- }
-
/*
* If this is the first interface which is added,
* we should load the firmware now.
@@ -1292,7 +1282,6 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
out:
- clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
return retval;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index beb20c5faf5f..32efbc8e9f92 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -165,6 +165,15 @@ int rt2x00mac_start(struct ieee80211_hw *hw)
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
+ if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
+ /*
+ * This is special case for ieee80211_restart_hw(), otherwise
+ * mac80211 never call start() two times in row without stop();
+ */
+ set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+ rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
+ rt2x00lib_stop(rt2x00dev);
+ }
return rt2x00lib_start(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00mac_start);
@@ -180,6 +189,17 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_stop);
+void
+rt2x00mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+
+ if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART)
+ clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_reconfig_complete);
+
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index bc2dfef0de22..92e9e023c349 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -522,7 +522,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
{
- rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
+ rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced reset\n",
queue->qid);
rt2x00queue_stop_queue(queue);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index aa2bb2ae9809..54a1a4ea107b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -6384,7 +6384,7 @@ static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
u8 dir, xtype, num;
int ret = 0;
- host_interface = &interface->altsetting[0];
+ host_interface = interface->cur_altsetting;
interface_desc = &host_interface->desc;
endpoints = interface_desc->bNumEndpoints;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index e4a7e074ae3f..fa92e29fffda 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -61,9 +61,9 @@ enum ap_peer {
CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
+ le32p_replace_bits((__le32 *)(__pdesc + 8), __val, BIT(19))
#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
+ le32p_replace_bits((__le32 *)(__pdesc + 24), __val, GENMASK(11, 0))
int rtl_init_core(struct ieee80211_hw *hw);
void rtl_deinit_core(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 3c96c320236c..658ff425c256 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -862,7 +862,7 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
/* Resume RF Rx LPF corner
* After initialized, we can use coex_dm->btRf0x1eBackup
*/
- if (btcoexist->initilized) {
+ if (btcoexist->initialized) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 191dafd03189..a4940a3842de 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -1461,7 +1461,7 @@ void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
ex_btc8192e2ant_init_coex_dm(btcoexist);
}
- btcoexist->initilized = true;
+ btcoexist->initialized = true;
}
void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 8c0a7fdbf200..a96a995dd850 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -679,7 +679,7 @@ struct btc_coexist {
bool auto_report_2ant;
bool dbg_mode_1ant;
bool dbg_mode_2ant;
- bool initilized;
+ bool initialized;
bool stop_coex_dm;
bool manual_control;
struct btc_statistics statistics;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index f88d26535978..25335bd2873b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1061,13 +1061,15 @@ done:
return ret;
}
-static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
+static void _rtl_pci_irq_tasklet(unsigned long data)
{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1193,10 +1195,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
/*task */
tasklet_init(&rtlpriv->works.irq_tasklet,
- (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+ _rtl_pci_irq_tasklet,
(unsigned long)hw);
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
- (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+ _rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
INIT_WORK(&rtlpriv->works.lps_change_work,
rtl_lps_change_work_callback);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index e5e1ec5a41dc..bc0ac96ee615 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -737,7 +737,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
find_p2p_ie = true;
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE((__le16 *)&ie[1]);
+ noa_len = le16_to_cpu(*((__le16 *)&ie[1]));
if (ie + 3 + ie[1] > end)
return;
@@ -766,16 +766,16 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
index = 5;
for (i = 0; i < noa_num; i++) {
p2pinfo->noa_count_type[i] =
- READEF1BYTE(ie+index);
+ *(u8 *)(ie + index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
}
@@ -832,7 +832,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE((__le16 *)&ie[1]);
+ noa_len = le16_to_cpu(*(__le16 *)&ie[1]);
if (ie + 3 + ie[1] > end)
return;
@@ -861,16 +861,16 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
index = 5;
for (i = 0; i < noa_num; i++) {
p2pinfo->noa_count_type[i] =
- READEF1BYTE(ie+index);
+ *(u8 *)(ie + index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index e2e0bfbc24fe..fc7b9ad7e5d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -372,20 +372,20 @@ void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 rlbm, power_state = 0;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
- SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+ set_h2ccmd_pwrmode_parm_mode(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM=2.*/
- SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
- SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
+ set_h2ccmd_pwrmode_parm_rlbm(u1_h2c_set_pwrmode, rlbm);
+ set_h2ccmd_pwrmode_parm_smart_ps(u1_h2c_set_pwrmode,
(rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1);
- SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
+ set_h2ccmd_pwrmode_parm_awake_interval(u1_h2c_set_pwrmode,
ppsc->reg_max_lps_awakeintvl);
- SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+ set_h2ccmd_pwrmode_parm_all_queue_uapsd(u1_h2c_set_pwrmode, 0);
if (mode == FW_PS_ACTIVE_MODE)
power_state |= FW_PWR_STATE_ACTIVE;
else
power_state |= FW_PWR_STATE_RF_OFF;
- SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+ set_h2ccmd_pwrmode_parm_pwr_state(u1_h2c_set_pwrmode, power_state);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
index 39ddb7afea9d..79f095e47d71 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
@@ -169,82 +169,55 @@ enum rtl8188e_h2c_cmd {
SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __value)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __value)
-#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
-#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+static inline void set_h2ccmd_pwrmode_parm_mode(u8 *__ph2ccmd, u8 __val)
+{
+ *(u8 *)(__ph2ccmd) = __val;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_rlbm(u8 *__cmd, u8 __value)
+{
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(3, 0));
+}
+
+static inline void set_h2ccmd_pwrmode_parm_smart_ps(u8 *__cmd, u8 __value)
+{
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(7, 4));
+}
+
+static inline void set_h2ccmd_pwrmode_parm_awake_interval(u8 *__cmd, u8 __value)
+{
+ *(u8 *)(__cmd + 2) = __value;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_all_queue_uapsd(u8 *__cmd,
+ u8 __value)
+{
+ *(u8 *)(__cmd + 3) = __value;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_pwr_state(u8 *__cmd, u8 __value)
+{
+ *(u8 *)(__cmd + 4) = __value;
+}
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
/* AP_OFFLOAD */
#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value;
#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value;
#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value;
#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-
-/* Keep Alive Control*/
-#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
-#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-
-/*REMOTE_WAKE_CTRL */
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
-#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
-#else
-#define SET_88E_H2_REM_WAKE_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#endif
-
-/* GTK_OFFLOAD */
-#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-
-/* AOAC_RSVDPAGE_LOC */
-#define SET_88E_H2CCMD_AOAC_RSVD_LOC_REM_WAKE_CTRL_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd), 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value;
int rtl88e_download_fw(struct ieee80211_hw *hw,
bool buse_wake_on_wlan_fw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index f92e95f5494f..70716631de85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -2486,13 +2486,12 @@ void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw)
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
- BIT_OFFSET_LEN_MASK_32(0, 1);
+ u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & BIT(0);
u1_tmp = u1_tmp |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT((1)) |
((rtlpriv->btcoexist.bt_service == BT_SCO) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2)));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
@@ -2502,11 +2501,11 @@ void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw)
/* Config to 1T1R. */
if (rtlphy->rf_type == RF_1T1R) {
u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 5ca900f97d66..d13983ec09ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -264,7 +264,7 @@ static bool _rtl88e_check_condition(struct ieee80211_hw *hw,
u32 _board = rtlefuse->board_type; /*need efuse define*/
u32 _interface = rtlhal->interface;
u32 _platform = 0x08;/*SupportPlatform */
- u32 cond = condition;
+ u32 cond;
if (condition == 0xCDCDCDCD)
return true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index a0eda51e833c..4865639ac9ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -9,7 +9,6 @@
#include "phy.h"
#include "dm.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -59,7 +58,7 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -173,7 +172,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
return err;
}
-void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -189,7 +188,7 @@ void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl88e_get_btc_status(void)
+static bool rtl88e_get_btc_status(void)
{
return false;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
deleted file mode 100644
index 1407151503f8..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2013 Realtek Corporation.*/
-
-#ifndef __RTL92CE_SW_H__
-#define __RTL92CE_SW_H__
-
-int rtl88e_init_sw_vars(struct ieee80211_hw *hw);
-void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw);
-bool rtl88e_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 4bef237f488d..06fc9b5cdd8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -8,11 +8,11 @@
#include "../base.h"
#include "../core.h"
-#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
-#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
-#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
-#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
-#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_RSSI_STATE_NORMAL_POWER BIT(0)
+#define BT_RSSI_STATE_AMDPU_OFF BIT(1)
+#define BT_RSSI_STATE_SPECIAL_LOW BIT(2)
+#define BT_RSSI_STATE_BG_EDCA_LOW BIT(3)
+#define BT_RSSI_STATE_TXPOWER_LOW BIT(4)
#define BT_MASK 0x00ffffff
#define RTLPRIV (struct rtl_priv *)
@@ -1515,7 +1515,7 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
polling == 0xffffffff && bt_state == 0xff)
return false;
- bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
+ bt_state &= BIT(0);
if (bt_state != rtlpriv->btcoexist.bt_cur_state) {
rtlpriv->btcoexist.bt_cur_state = bt_state;
@@ -1524,8 +1524,7 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
bt_state = bt_state |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
- BIT_OFFSET_LEN_MASK_32(2, 1);
+ 0 : BIT(1)) | BIT(2);
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
}
return true;
@@ -1555,9 +1554,9 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
rtlpriv->btcoexist.bt_service = cur_service_type;
bt_state = bt_state |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT(1)) |
((rtlpriv->btcoexist.bt_service != BT_IDLE) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2));
/* Add interrupt migration when bt is not ini
* idle state (no traffic). */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
index 888d9fc94bc2..706fc753dfe6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
@@ -46,19 +46,19 @@
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
int rtl92c_download_fw(struct ieee80211_hw *hw);
void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index a52dd64d528d..6402a9e09be7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -2299,13 +2299,12 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
- BIT_OFFSET_LEN_MASK_32(0, 1);
+ u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & BIT(0);
u1_tmp = u1_tmp |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT(1)) |
((rtlpriv->btcoexist.bt_service == BT_SCO) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
@@ -2315,11 +2314,11 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
/* Config to 1T1R. */
if (rtlphy->rf_type == RF_1T1R) {
u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 900788e4018c..ed68c850f9a2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -14,7 +14,6 @@
#include "../rtl8192c/phy_common.h"
#include "hw.h"
#include "rf.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
@@ -65,7 +64,7 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
{
int err;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -161,7 +160,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
deleted file mode 100644
index f2d121a60159..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL92CE_SW_H__
-#define __RTL92CE_SW_H__
-
-int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
-void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl92c_init_var_map(struct ieee80211_hw *hw);
-bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
- u8 configtype);
-bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
- u8 configtype);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index fc9a3aae047f..8fc3cb824066 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -23,45 +23,6 @@ static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
-static long _rtl92ce_signal_scale_mapping(struct ieee80211_hw *hw,
- long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
-
- return retsig;
-}
-
static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *pdesc,
@@ -194,7 +155,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
rx_pwr[i] =
((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
/* Translate DBM to percentage. */
- rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
/* Get Rx snr value in DB */
rtlpriv->stats.rx_snr_db[i] =
@@ -209,7 +170,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -241,11 +202,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
*/
if (is_cck_rate)
pstats->signalstrength =
- (u8)(_rtl92ce_signal_scale_mapping(hw, pwdb_all));
+ (u8)(rtl_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
- (u8)(_rtl92ce_signal_scale_mapping
- (hw, total_rssi /= rf_rx_num));
+ (u8)(rtl_signal_scale_mapping(hw, total_rssi /= rf_rx_num));
}
static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index cec19b32c7e2..b4b67341dc83 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -567,44 +567,6 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
/*==============================================================*/
-static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
-static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
- long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
- return retsig;
-}
-
static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *p_desc,
@@ -678,7 +640,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
break;
}
}
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->recvsignalpower = rx_pwr_all;
if (packet_match_bssid) {
@@ -707,7 +669,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
rf_rx_num++;
rx_pwr[i] =
((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
- rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
@@ -716,7 +678,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -739,11 +701,10 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
}
if (is_cck_rate)
pstats->signalstrength =
- (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
+ (u8)(rtl_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
- (u8) (_rtl92c_signal_scale_mapping
- (hw, total_rssi /= rf_rx_num));
+ (u8)(rtl_signal_scale_mapping(hw, total_rssi /= rf_rx_num));
}
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index ab3e4aebad39..b53daf1b29f7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -12,7 +12,6 @@
#include "mac.h"
#include "dm.h"
#include "rf.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "hw.h"
@@ -252,45 +251,45 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
-#define USB_VENDER_ID_REALTEK 0x0bda
+#define USB_VENDOR_ID_REALTEK 0x0bda
/* 2010-10-19 DID_USB_V3.4 */
static const struct usb_device_id rtl8192c_usb_ids[] = {
/*=== Realtek demoboard ===*/
/* Default ID */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
/****** 8188CU ********/
/* RTL8188CTV */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle, (b/g mode only) */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
/* 8188cu Slim Solo */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
/* 8188cu Slim Combo */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
/* 8188RU High-power USB Dongle */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard (b/g mode only) */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
/* 8188RU in Alfa AWUS036NHR */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
/* RTL8188CUS-VL */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
/****** 8192CU ********/
/* 8192cu 2*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
/* 8192CE-VAU USB minCard */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
/*=== Customer ID ===*/
/****** 8188CU ********/
@@ -329,7 +328,7 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
/****** 8188 RU ********/
/* Netcore */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
/****** 8188CUS Slim Solo********/
{RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
deleted file mode 100644
index 662440c4cdc9..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL92CU_SW_H__
-#define __RTL92CU_SW_H__
-
-#define EFUSE_MAX_SECTION 16
-
-void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *powerlevel);
-void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
- u8 configtype);
-bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
- u8 configtype);
-void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
-void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data);
-bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
-u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr, u32 bitmask);
-void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
index bd8ea6d66dff..7f0a17c1a9ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
@@ -16,73 +16,26 @@
(GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \
(GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3)
-/* Define a macro that takes an le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
/* Firmware Header(8-byte alinment required) */
/* --- LONG WORD 0 ---- */
#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 0, 16)
-#define GET_FIRMWARE_HDR_CATEGORY(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 16, 8)
-#define GET_FIRMWARE_HDR_FUNCTION(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 24, 8)
+ le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0))
#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 0, 16)
+ le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(15, 0))
#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 16, 8)
-#define GET_FIRMWARE_HDR_RSVD1(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 24, 8)
-
-/* --- LONG WORD 1 ---- */
-#define GET_FIRMWARE_HDR_MONTH(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 0, 8)
-#define GET_FIRMWARE_HDR_DATE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 8, 8)
-#define GET_FIRMWARE_HDR_HOUR(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 16, 8)
-#define GET_FIRMWARE_HDR_MINUTE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 24, 8)
-#define GET_FIRMWARE_HDR_ROMCODE_SIZE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 12, 0, 16)
-#define GET_FIRMWARE_HDR_RSVD2(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 12, 16, 16)
-
-/* --- LONG WORD 2 ---- */
-#define GET_FIRMWARE_HDR_SVN_IDX(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 16, 0, 32)
-#define GET_FIRMWARE_HDR_RSVD3(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 20, 0, 32)
-
-/* --- LONG WORD 3 ---- */
-#define GET_FIRMWARE_HDR_RSVD4(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 24, 0, 32)
-#define GET_FIRMWARE_HDR_RSVD5(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 28, 0, 32)
+ le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(23, 16))
#define pagenum_128(_len) \
(u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
-#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
int rtl92d_download_fw(struct ieee80211_hw *hw);
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 92c9fb45f800..ab5b05ef168e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -23,16 +23,6 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u8 _rtl92d_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
u8 signal_strength_index)
{
@@ -43,33 +33,6 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
return signal_power;
}
-static long _rtl92de_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
- return retsig;
-}
-
static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92d *pdesc,
@@ -141,7 +104,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
break;
}
}
- pwdb_all = _rtl92d_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
/* CCK gain is smaller than OFDM/MCS gain, */
/* so we add gain diff by experiences, the val is 6 */
pwdb_all += 6;
@@ -183,7 +146,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
rf_rx_num++;
rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)
- 110;
- rssi = _rtl92d_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
@@ -191,7 +154,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106;
- pwdb_all = _rtl92d_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -212,10 +175,10 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
}
}
if (is_cck_rate)
- pstats->signalstrength = (u8)(_rtl92de_signal_scale_mapping(hw,
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
pwdb_all));
else if (rf_rx_num != 0)
- pstats->signalstrength = (u8)(_rtl92de_signal_scale_mapping(hw,
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
total_rssi /= rf_rx_num));
}
@@ -438,49 +401,50 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
- u8 *p_desc, struct sk_buff *skb)
+ u8 *pdesc8, struct sk_buff *skb)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
struct rx_fwinfo_92d *p_drvinfo;
- struct rx_desc_92d *pdesc = (struct rx_desc_92d *)p_desc;
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ u32 phystatus = get_rx_desc_physt(pdesc);
- stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
- stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ stats->length = (u16)get_rx_desc_pkt_len(pdesc);
+ stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
- stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
- stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
- stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
+ stats->icv = (u16)get_rx_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_desc_crc32(pdesc);
stats->hwerror = (stats->crc | stats->icv);
- stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
- stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
- stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
- stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
- && (GET_RX_DESC_FAGGR(pdesc) == 1));
- stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
- stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+ stats->decrypted = !get_rx_desc_swdec(pdesc);
+ stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
+ (get_rx_desc_faggr(pdesc) == 1));
+ stats->timestamp_low = get_rx_desc_tsfl(pdesc);
+ stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
+ stats->is_ht = (bool)get_rx_desc_rxht(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
- if (GET_RX_DESC_CRC32(pdesc))
+ if (get_rx_desc_crc32(pdesc))
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (!GET_RX_DESC_SWDEC(pdesc))
+ if (!get_rx_desc_swdec(pdesc))
rx_status->flag |= RX_FLAG_DECRYPTED;
- if (GET_RX_DESC_BW(pdesc))
+ if (get_rx_desc_bw(pdesc))
rx_status->bw = RATE_INFO_BW_40;
- if (GET_RX_DESC_RXHT(pdesc))
+ if (get_rx_desc_rxht(pdesc))
rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
false, stats->rate);
- rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ rx_status->mactime = get_rx_desc_tsfl(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
stats->rx_bufshift);
_rtl92de_translate_rx_signal_stuff(hw,
- skb, stats, pdesc,
+ skb, stats,
+ (struct rx_desc_92d *)pdesc,
p_drvinfo);
}
/*rx_status->qual = stats->signal; */
@@ -489,21 +453,23 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
- u8 *virtualaddress)
+ u8 *virtualaddress8)
{
+ __le32 *virtualaddress = (__le32 *)virtualaddress8;
+
memset(virtualaddress, 0, 8);
- SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
- SET_EARLYMODE_LEN0(virtualaddress, ptcb_desc->empkt_len[0]);
- SET_EARLYMODE_LEN1(virtualaddress, ptcb_desc->empkt_len[1]);
- SET_EARLYMODE_LEN2_1(virtualaddress, ptcb_desc->empkt_len[2] & 0xF);
- SET_EARLYMODE_LEN2_2(virtualaddress, ptcb_desc->empkt_len[2] >> 4);
- SET_EARLYMODE_LEN3(virtualaddress, ptcb_desc->empkt_len[3]);
- SET_EARLYMODE_LEN4(virtualaddress, ptcb_desc->empkt_len[4]);
+ set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num);
+ set_earlymode_len0(virtualaddress, ptcb_desc->empkt_len[0]);
+ set_earlymode_len1(virtualaddress, ptcb_desc->empkt_len[1]);
+ set_earlymode_len2_1(virtualaddress, ptcb_desc->empkt_len[2] & 0xF);
+ set_earlymode_len2_2(virtualaddress, ptcb_desc->empkt_len[2] >> 4);
+ set_earlymode_len3(virtualaddress, ptcb_desc->empkt_len[3]);
+ set_earlymode_len4(virtualaddress, ptcb_desc->empkt_len[4]);
}
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -514,7 +480,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 *pdesc = pdesc_tx;
+ __le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -549,15 +515,15 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92d));
+ clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_92d));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
if (rtlhal->earlymode_enable) {
- SET_TX_DESC_PKT_OFFSET(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ set_tx_desc_pkt_offset(pdesc, 1);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
@@ -567,60 +533,61 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
(u8 *)(skb->data));
}
} else {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
}
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
if (ptcb_desc->hw_rate < DESC_RATE6M)
ptcb_desc->hw_rate = DESC_RATE6M;
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
- SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+ set_tx_desc_data_shortgi(pdesc, 1);
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
ptcb_desc->hw_rate == DESC_RATEMCS7)
- SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+ set_tx_desc_data_shortgi(pdesc, 1);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ set_tx_desc_agg_enable(pdesc, 1);
+ set_tx_desc_max_agg_num(pdesc, 0x14);
}
- SET_TX_DESC_SEQ(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
- !ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_HW_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable
+ set_tx_desc_seq(pdesc, seq_number);
+ set_tx_desc_rts_enable(pdesc,
+ ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_hw_rts_enable(pdesc, ((ptcb_desc->rts_enable
|| ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+ set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
if (ptcb_desc->rts_rate < DESC_RATE6M)
ptcb_desc->rts_rate = DESC_RATE6M;
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_BW(pdesc, 0);
- SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_bw(pdesc, 0);
+ set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <=
DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
if (bw_40) {
if (ptcb_desc->packet_bw) {
- SET_TX_DESC_DATA_BW(pdesc, 1);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ set_tx_desc_data_bw(pdesc, 1);
+ set_tx_desc_tx_sub_carrier(pdesc, 3);
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf;
@@ -630,66 +597,66 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
- SET_TX_DESC_PKT_ID(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
- SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
- SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ set_tx_desc_pkt_id(pdesc, 0);
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(pdesc, 0xF);
+ set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ?
1 : 0);
- SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+ set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
/* Set TxRate and RTSRate in TxDesc */
/* This prevent Tx initial rate of new-coming packets */
/* from being overwritten by retried packet rate.*/
if (!ptcb_desc->use_driver_rate) {
- SET_TX_DESC_RTS_RATE(pdesc, 0x08);
- /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+ set_tx_desc_rts_rate(pdesc, 0x08);
+ /* set_tx_desc_tx_rate(pdesc, 0x0b); */
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function\n");
- SET_TX_DESC_RDG_ENABLE(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
+ set_tx_desc_rdg_enable(pdesc, 1);
+ set_tx_desc_htc(pdesc, 1);
}
}
}
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)buf_len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
if (rtlpriv->dm.useramask) {
- SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
} else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->ratr_index);
}
if (ieee80211_is_data_qos(fc))
- SET_TX_DESC_QOS(pdesc, 1);
+ set_tx_desc_qos(pdesc, 1);
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
}
- SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool firstseg,
+ u8 *pdesc8, bool firstseg,
bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -701,61 +668,64 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
skb->data, skb->len, PCI_DMA_TODEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
+ __le32 *pdesc = (__le32 *)pdesc8;
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
if (firstseg)
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
/* 5G have no CCK rate
* Caution: The macros below are multi-line expansions.
* The braces are needed no matter what checkpatch says
*/
if (rtlhal->current_bandtype == BAND_ON_5G) {
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE6M);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE6M);
} else {
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE1M);
}
- SET_TX_DESC_SEQ(pdesc, 0);
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
- SET_TX_DESC_RATE_ID(pdesc, 7);
- SET_TX_DESC_MACID(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
- SET_TX_DESC_USE_RATE(pdesc, 1);
+ set_tx_desc_seq(pdesc, 0);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_queue_sel(pdesc, fw_queue);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
+ set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
+ set_tx_desc_rate_id(pdesc, 7);
+ set_tx_desc_macid(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
+ set_tx_desc_offset(pdesc, 0x20);
+ set_tx_desc_use_rate(pdesc, 1);
if (!ieee80211_is_data_qos(fc) && ppsc->fwctrl_lps) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
}
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
@@ -766,16 +736,16 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
- SET_RX_DESC_OWN(pdesc, 1);
+ set_rx_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_DESC_EOR(pdesc, 1);
+ set_rx_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
@@ -786,17 +756,18 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc, bool istx, u8 desc_name)
+ u8 *p_desc8, bool istx, u8 desc_name)
{
+ __le32 *p_desc = (__le32 *)p_desc8;
u32 ret = 0;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(p_desc);
+ ret = get_tx_desc_own(p_desc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
+ ret = get_tx_desc_tx_buffer_address(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
@@ -806,13 +777,13 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(p_desc);
+ ret = get_rx_desc_own(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(p_desc);
- break;
+ ret = get_rx_desc_pkt_len(p_desc);
+ break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_DESC_BUFF_ADDR(p_desc);
+ ret = get_rx_desc_buff_addr(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 635989e15282..d01578875cd5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -14,514 +14,359 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read/write various fields in RX or TX descriptors */
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 16)
-#define GET_TX_DESC_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 8)
-#define GET_TX_DESC_BMC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 1)
-#define GET_TX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 25, 1)
-#define GET_TX_DESC_LAST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_TX_DESC_LINIP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_TX_DESC_NO_ACM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_TX_DESC_GF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_TX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
-#define SET_TX_DESC_BK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
-#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
-#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
-#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
-#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
-#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 26, 8, __val)
-
-#define GET_TX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
-#define GET_TX_DESC_AGG_BREAK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
-#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
-#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
-#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
-#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_TX_DESC_PIFS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_TX_DESC_RATE_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
-#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
-#define GET_TX_DESC_SEC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
-#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
-
-#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
-#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
-#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
-#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
-#define SET_TX_DESC_CCX(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
-#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
-#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
-#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
-#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
-#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
-#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
-#define GET_TX_DESC_DATA_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
-#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
-#define GET_TX_DESC_MORE_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
-#define GET_TX_DESC_RAW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
-#define GET_TX_DESC_CCX(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
-#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
-#define GET_TX_DESC_ANTSEL_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
-#define GET_TX_DESC_ANTSEL_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
-#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
-#define GET_TX_DESC_TX_ANTL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
-#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
-
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
-#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
-
-#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
-#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
-#define GET_TX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
-#define GET_TX_DESC_PKT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
-
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
-#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
-#define SET_TX_DESC_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
-#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
-#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
-#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
-#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
-#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
-#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
-#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
-#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
-#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
-#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
-#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
-#define GET_TX_DESC_AP_DCFE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
-#define GET_TX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
-#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
-#define GET_TX_DESC_USE_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
-#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
-#define GET_TX_DESC_DISABLE_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
-#define GET_TX_DESC_CTS2SELF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
-#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
-#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
-#define GET_TX_DESC_PORT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
-#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
-#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
-#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
-#define GET_TX_DESC_TX_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
-#define GET_TX_DESC_DATA_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
-#define GET_TX_DESC_DATA_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
-#define GET_TX_DESC_RTS_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
-#define GET_TX_DESC_RTS_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
-#define GET_TX_DESC_RTS_SC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
-#define GET_TX_DESC_RTS_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
-
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
-#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
-#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
-#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
-
-#define GET_TX_DESC_TX_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
-#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
-#define GET_TX_DESC_CCX_TAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
-#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
-#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
-#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
-#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
-#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
-
-#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
-#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
-#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
-#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
-#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
-#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
-#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
-#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
-
-#define GET_TX_DESC_TXAGC_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
-#define GET_TX_DESC_TXAGC_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
-#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
-#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
-#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
-#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
-#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
-#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
-#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
-#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
-#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
-#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
-
-#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
-#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
-#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
-#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
-#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
-#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
-
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
-
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
-#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
-
-#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
-#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
-
-#define GET_RX_DESC_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 14)
-#define GET_RX_DESC_CRC32(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 14, 1)
-#define GET_RX_DESC_ICV(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 15, 1)
-#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 4)
-#define GET_RX_DESC_SECURITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 20, 3)
-#define GET_RX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 23, 1)
-#define GET_RX_DESC_SHIFT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 2)
-#define GET_RX_DESC_PHYST(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_RX_DESC_SWDEC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_RX_DESC_LS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_RX_DESC_FS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_RX_DESC_EOR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_RX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
-#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_RX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_RX_DESC_TID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
-#define GET_RX_DESC_HWRSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
-#define GET_RX_DESC_PAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_RX_DESC_FAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_RX_DESC_A1_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_RX_DESC_A2_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
-#define GET_RX_DESC_PAM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
-#define GET_RX_DESC_PWR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
-#define GET_RX_DESC_MD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
-#define GET_RX_DESC_MF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
-#define GET_RX_DESC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
-#define GET_RX_DESC_MC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
-#define GET_RX_DESC_BC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
-#define GET_RX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
-#define GET_RX_DESC_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
-#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
-#define GET_RX_DESC_NEXT_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
-#define GET_RX_DESC_RSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
-
-#define GET_RX_DESC_RXMCS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
-#define GET_RX_DESC_RXHT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
-#define GET_RX_DESC_SPLCP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
-#define GET_RX_DESC_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
-#define GET_RX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
-#define GET_RX_DESC_HWPC_ERR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
-#define GET_RX_DESC_HWPC_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
-#define GET_RX_DESC_IV0(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
-
-#define GET_RX_DESC_IV1(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
-#define GET_RX_DESC_TSFL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
-
-#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
-#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
-
-#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
-#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
- memset((void *)__pdesc, 0, \
- min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(5));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(7));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(6));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(7));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 8));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(13, 0));
+}
+
+static inline u32 get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(14));
+}
+
+static inline u32 get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(15));
+}
+
+static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(19, 16));
+}
+
+static inline u32 get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(25, 24));
+}
+
+static inline u32 get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(26));
+}
+
+static inline u32 get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(27));
+}
+
+static inline u32 get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline u32 get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
+
+static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline u32 get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline u32 get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline u32 get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
+
+static inline u32 get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 5));
+}
+
+static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size)
+{
+ memset((void *)__pdesc, 0,
+ min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET));
+}
/* For 92D early mode */
-#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 0, 3, __value)
-#define SET_EARLYMODE_LEN0(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 4, 12, __value)
-#define SET_EARLYMODE_LEN1(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 16, 12, __value)
-#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 28, 4, __value)
-#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 0, 8, __value)
-#define SET_EARLYMODE_LEN3(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 8, 12, __value)
-#define SET_EARLYMODE_LEN4(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 20, 12, __value)
+static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(2, 0));
+}
+
+static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
+}
+
+static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
+}
+
+static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
+}
+
+static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0));
+}
+
+static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8));
+}
+
+static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20));
+}
struct rx_fwinfo_92d {
u8 gain_trsw[4];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index 648f9108ed4b..551aa86825ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -12,124 +12,6 @@
#include "fw.h"
#include "trx.h"
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
- 0x7f8001fe, /* 0, +6.0dB */
- 0x788001e2, /* 1, +5.5dB */
- 0x71c001c7, /* 2, +5.0dB */
- 0x6b8001ae, /* 3, +4.5dB */
- 0x65400195, /* 4, +4.0dB */
- 0x5fc0017f, /* 5, +3.5dB */
- 0x5a400169, /* 6, +3.0dB */
- 0x55400155, /* 7, +2.5dB */
- 0x50800142, /* 8, +2.0dB */
- 0x4c000130, /* 9, +1.5dB */
- 0x47c0011f, /* 10, +1.0dB */
- 0x43c0010f, /* 11, +0.5dB */
- 0x40000100, /* 12, +0dB */
- 0x3c8000f2, /* 13, -0.5dB */
- 0x390000e4, /* 14, -1.0dB */
- 0x35c000d7, /* 15, -1.5dB */
- 0x32c000cb, /* 16, -2.0dB */
- 0x300000c0, /* 17, -2.5dB */
- 0x2d4000b5, /* 18, -3.0dB */
- 0x2ac000ab, /* 19, -3.5dB */
- 0x288000a2, /* 20, -4.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x24000090, /* 22, -5.0dB */
- 0x22000088, /* 23, -5.5dB */
- 0x20000080, /* 24, -6.0dB */
- 0x1e400079, /* 25, -6.5dB */
- 0x1c800072, /* 26, -7.0dB */
- 0x1b00006c, /* 27. -7.5dB */
- 0x19800066, /* 28, -8.0dB */
- 0x18000060, /* 29, -8.5dB */
- 0x16c0005b, /* 30, -9.0dB */
- 0x15800056, /* 31, -9.5dB */
- 0x14400051, /* 32, -10.0dB */
- 0x1300004c, /* 33, -10.5dB */
- 0x12000048, /* 34, -11.0dB */
- 0x11000044, /* 35, -11.5dB */
- 0x10000040, /* 36, -12.0dB */
- 0x0f00003c, /* 37, -12.5dB */
- 0x0e400039, /* 38, -13.0dB */
- 0x0d800036, /* 39, -13.5dB */
- 0x0cc00033, /* 40, -14.0dB */
- 0x0c000030, /* 41, -14.5dB */
- 0x0b40002d, /* 42, -15.0dB */
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
-};
-
static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
index cbfecea232a3..27ac4f8b9a9b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
@@ -111,44 +111,40 @@ enum rtl8192e_h2c_cmd {
(u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __val)
+ u8p_replace_bits(__cmd + 1, __val, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __val)
+ u8p_replace_bits(__cmd + 1, __val, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __val)
+ *(u8 *)(__cmd + 2) = __val;
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __val)
+ *(u8 *)(__cmd + 3) = __val;
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __val)
+ *(u8 *)(__cmd + 4) = __val;
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 5, 0, 8, __val)
-#define GET_92E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+ *(u8 *)(__cmd + 5) = __val;
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val;
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __val)
+ u8p_replace_bits(__cmd, __val, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __val)
+ u8p_replace_bits(__cmd, __val, BIT(1))
#define SET_H2CCMD_MSRRPT_PARM_MACID(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw);
void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index b6ee7dae5943..b337d599b6f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -9,7 +9,6 @@
#include "phy.h"
#include "dm.h"
#include "hw.h"
-#include "sw.h"
#include "fw.h"
#include "trx.h"
#include "led.h"
@@ -65,7 +64,7 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -164,7 +163,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -175,7 +174,7 @@ void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl92ee_get_btc_status(void)
+static bool rtl92ee_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
deleted file mode 100644
index 36e29a2da0fd..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2014 Realtek Corporation.*/
-
-#ifndef __RTL92E_SW_H__
-#define __RTL92E_SW_H__
-
-int rtl92ee_init_sw_vars(struct ieee80211_hw *hw);
-void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw);
-bool rtl92ee_get_btc_status(void);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 1c7ee569f4bf..7a54497b7df2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -11,7 +11,6 @@
#include "dm.h"
#include "fw.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
deleted file mode 100644
index a31efba0e6b5..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __REALTEK_PCI92SE_SW_H__
-#define __REALTEK_PCI92SE_SW_H__
-
-#define EFUSE_MAX_SECTION 16
-
-int rtl92se_init_sw(struct ieee80211_hw *hw);
-void rtl92se_deinit_sw(struct ieee80211_hw *hw);
-void rtl92se_init_var_map(struct ieee80211_hw *hw);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index d8260c7afe09..c61a92df9d73 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -13,118 +13,6 @@
#include "fw.h"
#include "hal_btc.h"
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
- 0x7f8001fe,
- 0x788001e2,
- 0x71c001c7,
- 0x6b8001ae,
- 0x65400195,
- 0x5fc0017f,
- 0x5a400169,
- 0x55400155,
- 0x50800142,
- 0x4c000130,
- 0x47c0011f,
- 0x43c0010f,
- 0x40000100,
- 0x3c8000f2,
- 0x390000e4,
- 0x35c000d7,
- 0x32c000cb,
- 0x300000c0,
- 0x2d4000b5,
- 0x2ac000ab,
- 0x288000a2,
- 0x26000098,
- 0x24000090,
- 0x22000088,
- 0x20000080,
- 0x1e400079,
- 0x1c800072,
- 0x1b00006c,
- 0x19800066,
- 0x18000060,
- 0x16c0005b,
- 0x15800056,
- 0x14400051,
- 0x1300004c,
- 0x12000048,
- 0x11000044,
- 0x10000040,
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
-};
-
static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
index 5c843736de8d..3f9ed9b4428e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
@@ -18,19 +18,19 @@
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 5702ac6deebf..ea86d5bf33d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -11,7 +11,6 @@
#include "fw.h"
#include "../rtl8723com/fw_common.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -67,7 +66,7 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -166,7 +165,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -177,7 +176,7 @@ void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8723e_get_btc_status(void)
+static bool rtl8723e_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
deleted file mode 100644
index 200ce39a0dd7..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL8723E_SW_H__
-#define __RTL8723E_SW_H__
-
-int rtl8723e_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8723e_init_var_map(struct ieee80211_hw *hw);
-bool rtl8723e_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
index 97ea77464139..7c5e5e916274 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
@@ -83,37 +83,35 @@ enum rtl8723b_h2c_cmd {
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 4, __val)
+ u8p_replace_bits(__ph2ccmd + 1, __val, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 4, 4, __val)
+ u8p_replace_bits(__ph2ccmd + 1, __val, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 5, 0, 8, __val)
-#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \
- LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8)
+ *(u8 *)(__ph2ccmd + 5) = __val
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+ u8p_replace_bits(__ph2ccmd, __val, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+ u8p_replace_bits(__ph2ccmd, __val, BIT(1))
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 3c8528f0ecb3..36209ac5b208 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -13,7 +13,6 @@
#include "hw.h"
#include "fw.h"
#include "../rtl8723com/fw_common.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -64,7 +63,7 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -170,7 +169,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -181,7 +180,7 @@ void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8723be_get_btc_status(void)
+static bool rtl8723be_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
deleted file mode 100644
index 6ecacf9fbfd7..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2014 Realtek Corporation.*/
-
-#ifndef __RTL8723BE_SW_H__
-#define __RTL8723BE_SW_H__
-
-int rtl8723be_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8723be_init_var_map(struct ieee80211_hw *hw);
-bool rtl8723be_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index b54230433a6b..f57e8794f0ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -93,124 +93,6 @@ static const u32 rtl8821ae_txscaling_table[TXSCALE_TABLE_SIZE] = {
0x3FE /* 36, +6.0dB */
};
-static const u32 ofdmswing_table[] = {
- 0x0b40002d, /* 0, -15.0dB */
- 0x0c000030, /* 1, -14.5dB */
- 0x0cc00033, /* 2, -14.0dB */
- 0x0d800036, /* 3, -13.5dB */
- 0x0e400039, /* 4, -13.0dB */
- 0x0f00003c, /* 5, -12.5dB */
- 0x10000040, /* 6, -12.0dB */
- 0x11000044, /* 7, -11.5dB */
- 0x12000048, /* 8, -11.0dB */
- 0x1300004c, /* 9, -10.5dB */
- 0x14400051, /* 10, -10.0dB */
- 0x15800056, /* 11, -9.5dB */
- 0x16c0005b, /* 12, -9.0dB */
- 0x18000060, /* 13, -8.5dB */
- 0x19800066, /* 14, -8.0dB */
- 0x1b00006c, /* 15, -7.5dB */
- 0x1c800072, /* 16, -7.0dB */
- 0x1e400079, /* 17, -6.5dB */
- 0x20000080, /* 18, -6.0dB */
- 0x22000088, /* 19, -5.5dB */
- 0x24000090, /* 20, -5.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x288000a2, /* 22, -4.0dB */
- 0x2ac000ab, /* 23, -3.5dB */
- 0x2d4000b5, /* 24, -3.0dB */
- 0x300000c0, /* 25, -2.5dB */
- 0x32c000cb, /* 26, -2.0dB */
- 0x35c000d7, /* 27, -1.5dB */
- 0x390000e4, /* 28, -1.0dB */
- 0x3c8000f2, /* 29, -0.5dB */
- 0x40000100, /* 30, +0dB */
- 0x43c0010f, /* 31, +0.5dB */
- 0x47c0011f, /* 32, +1.0dB */
- 0x4c000130, /* 33, +1.5dB */
- 0x50800142, /* 34, +2.0dB */
- 0x55400155, /* 35, +2.5dB */
- 0x5a400169, /* 36, +3.0dB */
- 0x5fc0017f, /* 37, +3.5dB */
- 0x65400195, /* 38, +4.0dB */
- 0x6b8001ae, /* 39, +4.5dB */
- 0x71c001c7, /* 40, +5.0dB */
- 0x788001e2, /* 41, +5.5dB */
- 0x7f8001fe /* 42, +6.0dB */
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */
-};
-
static const u32 edca_setting_dl[PEER_MAX] = {
0xa44f, /* 0 UNKNOWN */
0x5ea44f, /* 1 REALTEK_90 */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index e11e496b7277..c269942b3f4a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -151,123 +151,115 @@ enum rtl8821a_h2c_cmd {
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_8812_H2CCMD_WOWLAN_FUNC_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(2))
#define SET_8812_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(3))
#define SET_8812_H2CCMD_WOWLAN_ALL_PKT_DROP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 4, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(4))
#define SET_8812_H2CCMD_WOWLAN_GPIO_ACTIVE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 5, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(5))
#define SET_8812_H2CCMD_WOWLAN_REKEY_WAKE_UP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(6))
#define SET_8812_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 7, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(7))
#define SET_8812_H2CCMD_WOWLAN_GPIONUM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_8812_H2CCMD_WOWLAN_GPIO_DURATION(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __value)
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __value)
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 4) = __value
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 5, 0, 8, __value)
-#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+ *(u8 *)(__cmd + 5) = __value
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd + 1, __value, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_H2CCMD_MSRRPT_PARM_MACID(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd+1, 0, 8, __value)
-#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd+2, 0, 8, __value)
+ u8p_replace_bits(__cmd + 1, __value, BIT(1))
/* AP_OFFLOAD */
#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
/* Keep Alive Control*/
#define SET_8812_H2CCMD_KEEP_ALIVE_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_KEEP_ALIVE_PERIOD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
/*REMOTE_WAKE_CTRL */
#define SET_8812_H2CCMD_REMOTE_WAKECTRL_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(2))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(3))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_REALWOWV2_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(6))
/* GTK_OFFLOAD */
#define SET_8812_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_8812_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
/* AOAC_RSVDPAGE_LOC */
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd), 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 4) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+5, 0, 8, __value)
+ *(u8 *)(__cmd + 5) = __value
/* Disconnect_Decision_Control */
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_USER_SETTING(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_CHECK_PERIOD(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value) /* unit: beacon period */
+ *(u8 *)(__cmd + 1) = __value
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_TRYPKT_NUM(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw);
#if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 3def6a2b3450..d8df816753cb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -10,7 +10,6 @@
#include "dm.h"
#include "hw.h"
#include "fw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -65,7 +64,7 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
}
/*InitializeVariables8812E*/
-int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -211,7 +210,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -228,7 +227,7 @@ void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8821ae_get_btc_status(void)
+static bool rtl8821ae_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
deleted file mode 100644
index 9d7610f84b20..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2010 Realtek Corporation.*/
-
-#ifndef __RTL8821AE_SW_H__
-#define __RTL8821AE_SW_H__
-
-int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8821ae_init_var_map(struct ieee80211_hw *hw);
-bool rtl8821ae_get_btc_status(void);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 3bdda1c98339..1cff9f07c9e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2898,121 +2898,6 @@ enum bt_radio_shared {
* 2. Before write integer to IO.
* 3. After read integer from IO.
****************************************/
-/* Convert little data endian to host ordering */
-#define EF1BYTE(_val) \
- ((u8)(_val))
-#define EF2BYTE(_val) \
- (le16_to_cpu(_val))
-#define EF4BYTE(_val) \
- (le32_to_cpu(_val))
-
-/* Read data from memory */
-#define READEF1BYTE(_ptr) \
- EF1BYTE(*((u8 *)(_ptr)))
-/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr) \
- EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr) \
- EF4BYTE(*(_ptr))
-
-/* Create a bit mask
- * Examples:
- * BIT_LEN_MASK_32(0) => 0x00000000
- * BIT_LEN_MASK_32(1) => 0x00000001
- * BIT_LEN_MASK_32(2) => 0x00000003
- * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
- */
-#define BIT_LEN_MASK_32(__bitlen) \
- (0xFFFFFFFF >> (32 - (__bitlen)))
-#define BIT_LEN_MASK_16(__bitlen) \
- (0xFFFF >> (16 - (__bitlen)))
-#define BIT_LEN_MASK_8(__bitlen) \
- (0xFF >> (8 - (__bitlen)))
-
-/* Create an offset bit mask
- * Examples:
- * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
- * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
- */
-#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
-
-/*Description:
- * Return 4-byte value in host byte ordering from
- * 4-byte pointer in little-endian system.
- */
-#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
- (EF4BYTE(*((__le32 *)(__pstart))))
-#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
- (EF2BYTE(*((__le16 *)(__pstart))))
-#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
- (EF1BYTE(*((u8 *)(__pstart))))
-
-/*Description:
- * Translate subfield (continuous bits in little-endian) of 4-byte
- * value to host byte ordering.
- */
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_32(__bitlen) \
- )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_16(__bitlen) \
- )
-#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_8(__bitlen) \
- )
-
-/* Description:
- * Mask subfield (continuous bits in little-endian) of 4-byte value
- * and return the result in 4-byte value in host byte ordering.
- */
-#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
- )
-#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
- )
-#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
- )
-
-/* Description:
- * Set subfield of little-endian 4-byte value to specified value.
- */
-#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((__le32 *)(__pstart)) = \
- cpu_to_le32( \
- LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
- )
-#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((__le16 *)(__pstart)) = \
- cpu_to_le16( \
- LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
- )
-#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u8 *)(__pstart)) = EF1BYTE \
- ( \
- LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
- )
#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
(__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 15e12155a04c..cac148d13cf1 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -15,6 +15,7 @@ rtw88-y += main.o \
ps.o \
sec.o \
bf.o \
+ wow.o \
regd.o
rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index cd28f675e9cb..a0f36f29b4a6 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -18,6 +18,7 @@ enum rtw_debug_mask {
RTW_DBG_DEBUGFS = 0x00000200,
RTW_DBG_PS = 0x00000400,
RTW_DBG_BF = 0x00000800,
+ RTW_DBG_WOW = 0x00001000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index b8c581161f61..243441453ead 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -10,6 +10,7 @@
#include "sec.h"
#include "debug.h"
#include "util.h"
+#include "wow.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -482,6 +483,96 @@ void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ struct rtw_fw_wow_keep_alive_para mode = {
+ .adopt = true,
+ .pkt_type = KEEP_ALIVE_NULL_PKT,
+ .period = 5,
+ };
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
+ SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
+ SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
+ SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
+ SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ struct rtw_fw_wow_disconnect_para mode = {
+ .adopt = true,
+ .period = 30,
+ .retry_count = 5,
+ };
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
+
+ if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
+ SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
+ SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
+ SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
+ SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
+
+ SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
+ SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
+ if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
+ SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
+ if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
+ SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
+ if (rtw_wow->pattern_cnt)
+ SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
+ u8 pairwise_key_enc,
+ u8 group_key_enc)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
+
+ SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
+ SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
+
+ SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
+
+ if (rtw_wow_no_link(rtwdev))
+ SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
enum rtw_rsvd_packet_type type)
{
@@ -496,6 +587,26 @@ static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
return location;
}
+void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 loc_nlo;
+
+ loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
+
+ SET_NLO_FUN_EN(h2c_pkt, enable);
+ if (enable) {
+ if (rtw_fw_lps_deep_mode)
+ SET_NLO_PS_32K(h2c_pkt, enable);
+ SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
+ SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -510,10 +621,45 @@ void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
+ LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+ u8 location = 0;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type != RSVD_PROBE_REQ)
+ continue;
+ if ((!ssid && !rsvd_pkt->ssid) ||
+ rtw_ssid_equal(rsvd_pkt->ssid, ssid))
+ location = rsvd_pkt->page;
+ }
+
+ return location;
+}
+
+u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+ u16 size = 0;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type != RSVD_PROBE_REQ)
+ continue;
+ if ((!ssid && !rsvd_pkt->ssid) ||
+ rtw_ssid_equal(rsvd_pkt->ssid, ssid))
+ size = rsvd_pkt->skb->len;
+ }
+
+ return size;
+}
+
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -559,6 +705,95 @@ rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return skb_new;
}
+static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
+ struct rtw_nlo_info_hdr *nlo_hdr;
+ struct cfg80211_ssid *ssid;
+ struct sk_buff *skb;
+ u8 *pos, loc;
+ u32 size;
+ int i;
+
+ if (!pno_req->inited || !pno_req->match_set_cnt)
+ return NULL;
+
+ size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
+ IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+
+ nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
+
+ nlo_hdr->nlo_count = pno_req->match_set_cnt;
+ nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
+
+ /* pattern check for firmware */
+ memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
+
+ for (i = 0; i < pno_req->match_set_cnt; i++)
+ nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
+
+ for (i = 0; i < pno_req->match_set_cnt; i++) {
+ ssid = &pno_req->match_sets[i].ssid;
+ loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
+ if (!loc) {
+ rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
+ kfree(skb);
+ return NULL;
+ }
+ nlo_hdr->location[i] = loc;
+ }
+
+ for (i = 0; i < pno_req->match_set_cnt; i++) {
+ pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
+ memcpy(pos, pno_req->match_sets[i].ssid.ssid,
+ pno_req->match_sets[i].ssid.ssid_len);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
+ struct ieee80211_channel *channels = pno_req->channels;
+ struct sk_buff *skb;
+ int count = pno_req->channel_cnt;
+ u8 *pos;
+ int i = 0;
+
+ skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+
+ for (i = 0; i < count; i++) {
+ pos = skb_put_zero(skb, 4);
+
+ CHSW_INFO_SET_CH(pos, channels[i].hw_value);
+
+ if (channels[i].flags & IEEE80211_CHAN_RADAR)
+ CHSW_INFO_SET_ACTION_ID(pos, 0);
+ else
+ CHSW_INFO_SET_ACTION_ID(pos, 1);
+ CHSW_INFO_SET_TIMEOUT(pos, 1);
+ CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
+ CHSW_INFO_SET_BW(pos, 0);
+ }
+
+ return skb;
+}
+
static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
@@ -591,6 +826,7 @@ static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
struct rtw_lps_pg_info_hdr *pg_info_hdr;
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
struct sk_buff *skb;
u32 size;
@@ -605,19 +841,22 @@ static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
pg_info_hdr->sec_cam_count =
rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
+ pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
+ conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
return skb;
}
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum rtw_rsvd_packet_type type)
+ struct rtw_rsvd_page *rsvd_pkt)
{
struct sk_buff *skb_new;
+ struct cfg80211_ssid *ssid;
- switch (type) {
+ switch (rsvd_pkt->type) {
case RSVD_BEACON:
skb_new = rtw_beacon_get(hw, vif);
break;
@@ -639,6 +878,21 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
case RSVD_LPS_PG_INFO:
skb_new = rtw_lps_pg_info_get(hw, vif);
break;
+ case RSVD_PROBE_REQ:
+ ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
+ if (ssid)
+ skb_new = ieee80211_probereq_get(hw, vif->addr,
+ ssid->ssid,
+ ssid->ssid_len, 0);
+ else
+ skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
+ break;
+ case RSVD_NLO_INFO:
+ skb_new = rtw_nlo_info_get(hw);
+ break;
+ case RSVD_CH_INFO:
+ skb_new = rtw_cs_channel_info_get(hw);
+ break;
default:
return NULL;
}
@@ -680,25 +934,53 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
memcpy(buf, skb->data, skb->len);
}
+static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
+ enum rtw_rsvd_packet_type type,
+ bool txdesc)
+{
+ struct rtw_rsvd_page *rsvd_pkt = NULL;
+
+ rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
+
+ if (!rsvd_pkt)
+ return NULL;
+
+ rsvd_pkt->type = type;
+ rsvd_pkt->add_txdesc = txdesc;
+
+ return rsvd_pkt;
+}
+
+static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_rsvd_page *rsvd_pkt)
+{
+ lockdep_assert_held(&rtwdev->mutex);
+ list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+}
+
void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
bool txdesc)
{
struct rtw_rsvd_page *rsvd_pkt;
- lockdep_assert_held(&rtwdev->mutex);
+ rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
+ if (!rsvd_pkt)
+ return;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
- if (rsvd_pkt->type == type)
- return;
- }
+ rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
+}
- rsvd_pkt = kmalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
+void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
if (!rsvd_pkt)
return;
- rsvd_pkt->type = type;
- rsvd_pkt->add_txdesc = txdesc;
- list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+ rsvd_pkt->ssid = ssid;
+ rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
}
void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
@@ -795,7 +1077,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
page_margin = page_size - tx_desc_sz;
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
- iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
+ iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt);
if (!iter) {
rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
@@ -857,13 +1139,16 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
kfree_skb(rsvd_pkt->skb);
+ rsvd_pkt->skb = NULL;
}
return buf;
release_skb:
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
kfree_skb(rsvd_pkt->skb);
+ rsvd_pkt->skb = NULL;
+ }
return NULL;
}
@@ -973,3 +1258,81 @@ out:
rtw_write8(rtwdev, REG_RCR + 2, rcr);
return 0;
}
+
+static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
+ u8 location)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
+
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+ UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
+ UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
+
+ /* include txdesc size */
+ UPDATE_PKT_SET_SIZE(h2c_pkt, size);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ u8 loc;
+ u32 size;
+
+ loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
+ if (!loc) {
+ rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
+ return;
+ }
+
+ size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
+ if (!size) {
+ rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
+ return;
+ }
+
+ __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
+}
+
+void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
+ u8 loc_ch_info;
+ const struct rtw_ch_switch_option cs_option = {
+ .dest_ch_en = 1,
+ .dest_ch = 1,
+ .periodic_option = 2,
+ .normal_period = 5,
+ .normal_period_sel = 0,
+ .normal_cycle = 10,
+ .slow_period = 1,
+ .slow_period_sel = 1,
+ };
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+
+ CH_SWITCH_SET_START(h2c_pkt, enable);
+ CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
+ CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
+ CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
+ CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
+ CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
+ CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
+ CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
+ CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
+
+ CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
+ CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
+
+ loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
+ CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 73d1b9ca8efc..ccd27bd45775 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -12,6 +12,8 @@
#define FW_HDR_SIZE 64
#define FW_HDR_CHKSUM_SIZE 8
+#define FW_NLO_INFO_CHECK_SIZE 4
+
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
#define RSVD_PAGE_START_ADDR 0x780
@@ -45,6 +47,9 @@ enum rtw_rsvd_packet_type {
RSVD_QOS_NULL,
RSVD_LPS_PG_DPK,
RSVD_LPS_PG_INFO,
+ RSVD_PROBE_REQ,
+ RSVD_NLO_INFO,
+ RSVD_CH_INFO,
};
enum rtw_fw_rf_type {
@@ -98,6 +103,57 @@ struct rtw_rsvd_page {
enum rtw_rsvd_packet_type type;
u8 page;
bool add_txdesc;
+ struct cfg80211_ssid *ssid;
+};
+
+enum rtw_keep_alive_pkt_type {
+ KEEP_ALIVE_NULL_PKT = 0,
+ KEEP_ALIVE_ARP_RSP = 1,
+};
+
+struct rtw_nlo_info_hdr {
+ u8 nlo_count;
+ u8 hidden_ap_count;
+ u8 rsvd1[2];
+ u8 pattern_check[FW_NLO_INFO_CHECK_SIZE];
+ u8 rsvd2[8];
+ u8 ssid_len[16];
+ u8 chiper[16];
+ u8 rsvd3[16];
+ u8 location[8];
+} __packed;
+
+enum rtw_packet_type {
+ RTW_PACKET_PROBE_REQ = 0x00,
+
+ RTW_PACKET_UNDEFINE = 0x7FFFFFFF,
+};
+
+struct rtw_fw_wow_keep_alive_para {
+ bool adopt;
+ u8 pkt_type;
+ u8 period; /* unit: sec */
+};
+
+struct rtw_fw_wow_disconnect_para {
+ bool adopt;
+ u8 period; /* unit: sec */
+ u8 retry_count;
+};
+
+struct rtw_ch_switch_option {
+ u8 periodic_option;
+ u32 tsf_high;
+ u32 tsf_low;
+ u8 dest_ch_en;
+ u8 absolute_time_en;
+ u8 dest_ch;
+ u8 normal_period;
+ u8 normal_period_sel;
+ u8 normal_cycle;
+ u8 slow_period;
+ u8 slow_period_sel;
+ u8 nlo_en;
};
struct rtw_fw_hdr {
@@ -146,6 +202,12 @@ struct rtw_fw_hdr {
#define H2C_PKT_PHYDM_INFO 0x11
#define H2C_PKT_IQK 0x0E
+#define H2C_PKT_CH_SWITCH 0x02
+#define H2C_PKT_UPDATE_PKT 0x0C
+
+#define H2C_PKT_CH_SWITCH_LEN 0x20
+#define H2C_PKT_UPDATE_PKT_LEN 0x4
+
#define SET_PKT_H2C_CATEGORY(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(6, 0))
#define SET_PKT_H2C_CMD_ID(h2c_pkt, value) \
@@ -182,6 +244,57 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define IQK_SET_SEGMENT_IQK(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+#define CHSW_INFO_SET_CH(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(7, 0))
+#define CHSW_INFO_SET_PRI_CH_IDX(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(11, 8))
+#define CHSW_INFO_SET_BW(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(15, 12))
+#define CHSW_INFO_SET_TIMEOUT(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(23, 16))
+#define CHSW_INFO_SET_ACTION_ID(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(30, 24))
+
+#define UPDATE_PKT_SET_SIZE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 0))
+#define UPDATE_PKT_SET_PKT_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+#define UPDATE_PKT_SET_LOCATION(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 24))
+
+#define CH_SWITCH_SET_START(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0))
+#define CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+#define CH_SWITCH_SET_ABSOLUTE_TIME(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2))
+#define CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(4, 3))
+#define CH_SWITCH_SET_INFO_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8))
+#define CH_SWITCH_SET_CH_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+#define CH_SWITCH_SET_PRI_CH_IDX(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24))
+#define CH_SWITCH_SET_DEST_CH(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0))
+#define CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(13, 8))
+#define CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(15, 14))
+#define CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(21, 16))
+#define CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(23, 22))
+#define CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(31, 24))
+#define CH_SWITCH_SET_TSF_HIGH(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(31, 0))
+#define CH_SWITCH_SET_TSF_LOW(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(31, 0))
+#define CH_SWITCH_SET_INFO_SIZE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x06, value, GENMASK(15, 0))
+
/* Command H2C */
#define H2C_CMD_RSVD_PAGE 0x0
#define H2C_CMD_MEDIA_STATUS_RPT 0x01
@@ -198,6 +311,13 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_QUERY_BT_MP_INFO 0x67
#define H2C_CMD_BT_WIFI_CONTROL 0x69
+#define H2C_CMD_KEEP_ALIVE 0x03
+#define H2C_CMD_DISCONNECT_DECISION 0x04
+#define H2C_CMD_WOWLAN 0x80
+#define H2C_CMD_REMOTE_WAKE_CTRL 0x81
+#define H2C_CMD_AOAC_GLOBAL_INFO 0x82
+#define H2C_CMD_NLO_INFO 0x8C
+
#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
@@ -224,6 +344,8 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
#define LPS_PG_SEC_CAM_EN(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define LPS_PG_PATTERN_CAM_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
@@ -301,6 +423,56 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_KEEP_ALIVE_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_KEEP_ALIVE_ADOPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
+#define SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+
+#define SET_WOWLAN_FUNC_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_WOWLAN_UNICAST_PKT_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(11))
+#define SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(14))
+#define SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(15))
+
+#define SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(12))
+
+#define SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
+#define SET_NLO_FUN_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_NLO_PS_32K(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_NLO_IGNORE_SECURITY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_NLO_LOC_NLO_INFO(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
u32 pkt_offset;
@@ -332,6 +504,8 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
bool txdesc);
+void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid);
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size);
void rtw_reset_rsvd_page(struct rtw_dev *rtwdev);
@@ -340,4 +514,16 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev,
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf);
+void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
+ u8 pairwise_key_enc,
+ u8 group_key_enc);
+
+void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid);
+void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 3d91aea942c3..85a81a578fd5 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -15,6 +15,7 @@ struct rtw_hci_ops {
void (*stop)(struct rtw_dev *rtwdev);
void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
+ void (*interface_cfg)(struct rtw_dev *rtwdev);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -59,6 +60,11 @@ static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter)
rtwdev->hci.ops->link_ps(rtwdev, enter);
}
+static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev)
+{
+ rtwdev->hci.ops->interface_cfg(rtwdev);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 507970387b2a..cadf0abbe16b 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -16,10 +16,12 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 value8;
txsc20 = primary_ch_idx;
- if (txsc20 == 1 || txsc20 == 3)
- txsc40 = 9;
- else
- txsc40 = 10;
+ if (bw == RTW_CHANNEL_WIDTH_80) {
+ if (txsc20 == 1 || txsc20 == 3)
+ txsc40 = 9;
+ else
+ txsc40 = 10;
+ }
rtw_write8(rtwdev, REG_DATA_SC,
BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
@@ -1034,5 +1036,7 @@ int rtw_mac_init(struct rtw_dev *rtwdev)
if (ret)
return ret;
+ rtw_hci_interface_cfg(rtwdev);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 34a1c3b53cd4..6fc33e11d08c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -12,6 +12,7 @@
#include "reg.h"
#include "bf.h"
#include "debug.h"
+#include "wow.h"
static void rtw_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
@@ -152,12 +153,10 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
u8 bcn_ctrl = 0;
rtwvif->port = port;
- rtwvif->vif = vif;
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
- rtwvif->in_lps = false;
memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
rtw_txq_init(rtwdev, vif->txq);
@@ -735,6 +734,44 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
return 0;
}
+#ifdef CONFIG_PM
+static int rtw_ops_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw_wow_suspend(rtwdev, wowlan);
+ if (ret)
+ rtw_err(rtwdev, "failed to suspend for wow %d\n", ret);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret ? 1 : 0;
+}
+
+static int rtw_ops_resume(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw_wow_resume(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to resume for wow %d\n", ret);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret ? 1 : 0;
+}
+
+static void rtw_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ device_set_wakeup_enable(rtwdev->dev, enabled);
+}
+#endif
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
@@ -757,5 +794,10 @@ const struct ieee80211_ops rtw_ops = {
.sta_statistics = rtw_ops_sta_statistics,
.flush = rtw_ops_flush,
.set_bitrate_mask = rtw_ops_set_bitrate_mask,
+#ifdef CONFIG_PM
+ .suspend = rtw_ops_suspend,
+ .resume = rtw_ops_resume,
+ .set_wakeup = rtw_ops_set_wakeup,
+#endif
};
EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index ae61415e1665..2845d2838f7b 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -706,8 +706,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
is_support_sgi = true;
} else if (sta->ht_cap.ht_supported) {
- ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) |
- (sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12);
+ ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
+ (sta->ht_cap.mcs.rx_mask[0] << 12);
if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
@@ -717,6 +717,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
is_support_sgi = true;
}
+ if (efuse->hw_cap.nss == 1)
+ ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
+
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
if (sta->vht_cap.vht_supported) {
@@ -750,11 +753,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
wireless_set = 0;
}
- if (efuse->hw_cap.nss == 1) {
- ra_mask &= RA_MASK_VHT_RATES_1SS;
- ra_mask &= RA_MASK_HT_RATES_1SS;
- }
-
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW_CHANNEL_WIDTH_80;
@@ -793,6 +791,26 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rtw_fw_send_ra_info(rtwdev, si);
}
+static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_fw_state *fw;
+
+ fw = &rtwdev->fw;
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+ return -EINVAL;
+
+ if (chip->wow_fw_name) {
+ fw = &rtwdev->wow_fw;
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rtw_power_on(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -813,11 +831,10 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
goto err;
}
- wait_for_completion(&fw->completion);
- if (!fw->firmware) {
- ret = -EINVAL;
- rtw_err(rtwdev, "failed to load firmware\n");
- goto err;
+ ret = rtw_wait_firmware_completion(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to wait firmware completion\n");
+ goto err_off;
}
ret = rtw_download_firmware(rtwdev, fw);
@@ -881,7 +898,7 @@ int rtw_core_start(struct rtw_dev *rtwdev)
static void rtw_power_off(struct rtw_dev *rtwdev)
{
- rtwdev->hci.ops->stop(rtwdev);
+ rtw_hci_stop(rtwdev);
rtw_mac_power_off(rtwdev);
}
@@ -1020,8 +1037,8 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw,
static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
- struct rtw_dev *rtwdev = context;
- struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_fw_state *fw = context;
+ struct rtw_dev *rtwdev = fw->rtwdev;
const struct rtw_fw_hdr *fw_hdr;
if (!firmware || !firmware->data) {
@@ -1043,17 +1060,35 @@ static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
fw->version, fw->sub_version, fw->sub_index, fw->h2c_version);
}
-static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
+static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type)
{
- struct rtw_fw_state *fw = &rtwdev->fw;
+ const char *fw_name;
+ struct rtw_fw_state *fw;
int ret;
+ switch (type) {
+ case RTW_WOWLAN_FW:
+ fw = &rtwdev->wow_fw;
+ fw_name = rtwdev->chip->wow_fw_name;
+ break;
+
+ case RTW_NORMAL_FW:
+ fw = &rtwdev->fw;
+ fw_name = rtwdev->chip->fw_name;
+ break;
+
+ default:
+ rtw_warn(rtwdev, "unsupported firmware type\n");
+ return -ENOENT;
+ }
+
+ fw->rtwdev = rtwdev;
init_completion(&fw->completion);
ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
- GFP_KERNEL, rtwdev, rtw_load_firmware_cb);
+ GFP_KERNEL, fw, rtw_load_firmware_cb);
if (ret) {
- rtw_err(rtwdev, "async firmware request failed\n");
+ rtw_err(rtwdev, "failed to async firmware request\n");
return ret;
}
@@ -1341,7 +1376,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
skb_queue_head_init(&rtwdev->coex.queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
- spin_lock_init(&rtwdev->dm_lock);
spin_lock_init(&rtwdev->rf_lock);
spin_lock_init(&rtwdev->h2c.lock);
spin_lock_init(&rtwdev->txq_lock);
@@ -1372,12 +1406,19 @@ int rtw_core_init(struct rtw_dev *rtwdev)
BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
BIT_AB | BIT_AM | BIT_APM;
- ret = rtw_load_firmware(rtwdev, rtwdev->chip->fw_name);
+ ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW);
if (ret) {
rtw_warn(rtwdev, "no firmware loaded\n");
return ret;
}
+ if (chip->wow_fw_name) {
+ ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
+ if (ret) {
+ rtw_warn(rtwdev, "no wow firmware loaded\n");
+ return ret;
+ }
+ }
return 0;
}
EXPORT_SYMBOL(rtw_core_init);
@@ -1385,12 +1426,16 @@ EXPORT_SYMBOL(rtw_core_init);
void rtw_core_deinit(struct rtw_dev *rtwdev)
{
struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_fw_state *wow_fw = &rtwdev->wow_fw;
struct rtw_rsvd_page *rsvd_pkt, *tmp;
unsigned long flags;
if (fw->firmware)
release_firmware(fw->firmware);
+ if (wow_fw->firmware)
+ release_firmware(wow_fw->firmware);
+
tasklet_kill(&rtwdev->tx_tasklet);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
@@ -1445,6 +1490,10 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+#ifdef CONFIG_PM
+ hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
+ hw->wiphy->max_sched_scan_ssids = rtwdev->chip->max_sched_scan_ssids;
+#endif
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index d012eefcd0da..f334d201bfb5 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -19,6 +19,10 @@
#define RTW_MAX_SEC_CAM_NUM 32
#define MAX_PG_CAM_BACKUP_NUM 8
+#define RTW_MAX_PATTERN_NUM 12
+#define RTW_MAX_PATTERN_MASK_SIZE 16
+#define RTW_MAX_PATTERN_SIZE 128
+
#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
#define RFREG_MASK 0xfffff
@@ -193,6 +197,11 @@ enum rtw_rx_queue_type {
RTK_MAX_RX_QUEUE_NUM
};
+enum rtw_fw_type {
+ RTW_NORMAL_FW = 0x0,
+ RTW_WOWLAN_FW = 0x1,
+};
+
enum rtw_rate_index {
RTW_RATEID_BGN_40M_2SS = 0,
RTW_RATEID_BGN_40M_1SS = 1,
@@ -337,6 +346,7 @@ enum rtw_flags {
RTW_FLAG_LEISURE_PS_DEEP,
RTW_FLAG_DIG_DISABLE,
RTW_FLAG_BUSY_TRAFFIC,
+ RTW_FLAG_WOWLAN,
NUM_OF_RTW_FLAGS,
};
@@ -367,6 +377,15 @@ enum rtw_snr {
RTW_SNR_NUM
};
+enum rtw_wow_flags {
+ RTW_WOW_FLAG_EN_MAGIC_PKT,
+ RTW_WOW_FLAG_EN_REKEY_PKT,
+ RTW_WOW_FLAG_EN_DISCONNECT,
+
+ /* keep it last */
+ RTW_WOW_FLAG_MAX,
+};
+
/* the power index is represented by differences, which cck-1s & ht40-1s are
* the base values, so for 1s's differences, there are only ht20 & ofdm
*/
@@ -608,6 +627,7 @@ struct rtw_lps_conf {
u8 smart_ps;
u8 port_id;
bool sec_cam_backup;
+ bool pattern_cam_backup;
};
enum rtw_hw_key_type {
@@ -716,7 +736,6 @@ struct rtw_bf_info {
};
struct rtw_vif {
- struct ieee80211_vif *vif;
enum rtw_net_type net_type;
u16 aid;
u8 mac_addr[ETH_ALEN];
@@ -727,7 +746,6 @@ struct rtw_vif {
const struct rtw_vif_port *conf;
struct rtw_traffic_stats stats;
- bool in_lps;
struct rtw_bfee bfee;
};
@@ -902,6 +920,33 @@ struct rtw_intf_phy_para {
u16 platform;
};
+struct rtw_wow_pattern {
+ u16 crc;
+ u8 type;
+ u8 valid;
+ u8 mask[RTW_MAX_PATTERN_MASK_SIZE];
+};
+
+struct rtw_pno_request {
+ bool inited;
+ u32 match_set_cnt;
+ struct cfg80211_match_set *match_sets;
+ u8 channel_cnt;
+ struct ieee80211_channel *channels;
+ struct cfg80211_sched_scan_plan scan_plan;
+};
+
+struct rtw_wow_param {
+ struct ieee80211_vif *wow_vif;
+ DECLARE_BITMAP(flags, RTW_WOW_FLAG_MAX);
+ u8 txpause;
+ u8 pattern_cnt;
+ struct rtw_wow_pattern patterns[RTW_MAX_PATTERN_NUM];
+
+ bool ips_enabled;
+ struct rtw_pno_request pno_req;
+};
+
struct rtw_intf_phy_para_table {
struct rtw_intf_phy_para *usb2_para;
struct rtw_intf_phy_para *usb3_para;
@@ -1030,6 +1075,10 @@ struct rtw_chip_info {
u8 bfer_su_max_num;
u8 bfer_mu_max_num;
+ const char *wow_fw_name;
+ const struct wiphy_wowlan_support *wowlan_stub;
+ const u8 max_sched_scan_ssids;
+
/* coex paras */
u32 coex_para_ver;
u8 bt_desired_ver;
@@ -1456,6 +1505,7 @@ struct rtw_fifo_conf {
struct rtw_fw_state {
const struct firmware *firmware;
+ struct rtw_dev *rtwdev;
struct completion completion;
u16 version;
u8 sub_version;
@@ -1534,9 +1584,6 @@ struct rtw_dev {
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
- /* lock for dm to use */
- spinlock_t dm_lock;
-
/* read/write rf register */
spinlock_t rf_lock;
@@ -1580,6 +1627,9 @@ struct rtw_dev {
u8 mp_mode;
+ struct rtw_fw_state wow_fw;
+ struct rtw_wow_param wow;
+
/* hci related data, must be last */
u8 priv[0] __aligned(sizeof(void *));
};
@@ -1605,6 +1655,18 @@ static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif)
return container_of(p, struct ieee80211_vif, drv_priv);
}
+static inline bool rtw_ssid_equal(struct cfg80211_ssid *a,
+ struct cfg80211_ssid *b)
+{
+ if (!a || !b || a->ssid_len != b->ssid_len)
+ return false;
+
+ if (memcmp(a->ssid, b->ssid, a->ssid_len))
+ return false;
+
+ return true;
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index a58e8276a41a..1fbc14c149ec 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include "main.h"
#include "pci.h"
+#include "reg.h"
#include "tx.h"
#include "rx.h"
#include "fw.h"
@@ -486,13 +487,6 @@ static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
rtwpci->irq_enabled = false;
}
-static int rtw_pci_setup(struct rtw_dev *rtwdev)
-{
- rtw_pci_reset_trx_ring(rtwdev);
-
- return 0;
-}
-
static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
/* reset dma and rx tag */
@@ -501,11 +495,22 @@ static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
rtwpci->rx_tag = 0;
}
+static int rtw_pci_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ rtw_pci_reset_trx_ring(rtwdev);
+ rtw_pci_dma_reset(rtwdev, rtwpci);
+
+ return 0;
+}
+
static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
struct rtw_pci_tx_ring *tx_ring;
u8 queue;
+ rtw_pci_reset_trx_ring(rtwdev);
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
tx_ring = &rtwpci->tx_rings[queue];
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
@@ -517,8 +522,6 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
unsigned long flags;
- rtw_pci_dma_reset(rtwdev, rtwpci);
-
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtw_pci_enable_interrupt(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
@@ -832,6 +835,11 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
while (count--) {
skb = skb_dequeue(&ring->queue);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
+ count, hw_queue, bd_idx, ring->r.rp, cur_rp);
+ break;
+ }
tx_data = rtw_pci_get_tx_data(skb);
pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
PCI_DMA_TODEVICE);
@@ -1222,6 +1230,21 @@ static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
rtwpci->link_ctrl = link_ctrl;
}
+static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ switch (chip->id) {
+ case RTW_CHIP_TYPE_8822C:
+ if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
+ rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
+ BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -1264,6 +1287,23 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
rtw_pci_link_cfg(rtwdev);
}
+#ifdef CONFIG_PM
+static int rtw_pci_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int rtw_pci_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
+#define RTW_PM_OPS (&rtw_pm_ops)
+#else
+#define RTW_PM_OPS NULL
+#endif
+
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
int ret;
@@ -1330,6 +1370,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
.stop = rtw_pci_stop,
.deep_ps = rtw_pci_deep_ps,
.link_ps = rtw_pci_link_ps,
+ .interface_cfg = rtw_pci_interface_cfg,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
@@ -1489,6 +1530,7 @@ static struct pci_driver rtw_pci_driver = {
.id_table = rtw_pci_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
+ .driver.pm = RTW_PM_OPS,
};
module_pci_driver(rtw_pci_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 49bf29a92152..1580cfc57361 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -210,7 +210,7 @@ struct rtw_pci {
void __iomem *mmap;
};
-static u32 max_num_of_tx_queue(u8 queue)
+static inline u32 max_num_of_tx_queue(u8 queue)
{
u32 max_num;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index a3e1e9578b65..eea9d888fbf1 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -1434,7 +1434,7 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
rtw_load_table(rtwdev, chip->rfk_init_tbl);
- dpk_info->is_dpk_pwr_on = 1;
+ dpk_info->is_dpk_pwr_on = true;
}
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 913e6f47130f..7a189a9926fe 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -91,11 +91,11 @@ void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter)
return;
/* check confirm power mode has left power save state */
- for (polling_cnt = 0; polling_cnt < 3; polling_cnt++) {
+ for (polling_cnt = 0; polling_cnt < 50; polling_cnt++) {
polling = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
if ((polling ^ confirm) & BIT_RPWM_TOGGLE)
return;
- mdelay(20);
+ udelay(100);
}
/* in case of fw/hw missed the request, retry */
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 7e817bc997eb..9d94534c9674 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -9,6 +9,8 @@
#define BIT_FEN_CPUEN BIT(2)
#define BIT_FEN_BB_GLB_RST BIT(1)
#define BIT_FEN_BB_RSTB BIT(0)
+#define BIT_R_DIS_PRST BIT(6)
+#define BIT_WLOCK_1C_B6 BIT(5)
#define REG_SYS_PW_CTRL 0x0004
#define REG_SYS_CLK_CTRL 0x0008
#define BIT_CPU_CLK_EN BIT(14)
@@ -160,8 +162,12 @@
#define REG_CR 0x0100
#define REG_TRXFF_BNDY 0x0114
#define REG_RXFF_BNDY 0x011C
+#define REG_FE1IMR 0x0120
+#define BIT_FS_RXDONE BIT(16)
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_C2HEVT 0x01A0
+#define REG_MCUTST_II 0x01C4
+#define REG_WOWLAN_WAKE_REASON 0x01C7
#define REG_HMETFR 0x01CC
#define REG_HMEBOX0 0x01D0
#define REG_HMEBOX1 0x01D4
@@ -192,9 +198,18 @@
#define REG_H2C_TAIL 0x0248
#define REG_H2C_READ_ADDR 0x024C
#define REG_H2C_INFO 0x0254
+#define REG_RXPKT_NUM 0x0284
+#define BIT_RXDMA_REQ BIT(19)
+#define BIT_RW_RELEASE BIT(18)
+#define BIT_RXDMA_IDLE BIT(17)
+#define REG_RXPKTNUM 0x02B0
#define REG_INT_MIG 0x0304
+#define REG_HCI_MIX_CFG 0x03FC
+#define BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK BIT(26)
+#define REG_BCNQ_INFO 0x0418
+#define BIT_MGQ_CPU_EMPTY BIT(24)
#define REG_FWHW_TXQ_CTRL 0x0420
#define BIT_EN_BCNQ_DL BIT(22)
#define BIT_EN_WR_FREE_TAIL BIT(20)
@@ -323,6 +338,20 @@
#define BIT_RFMOD_80M BIT(8)
#define BIT_RFMOD_40M BIT(7)
#define REG_WMAC_TRXPTCL_CTL_H 0x066C
+#define REG_WKFMCAM_CMD 0x0698
+#define BIT_WKFCAM_POLLING_V1 BIT(31)
+#define BIT_WKFCAM_CLR_V1 BIT(30)
+#define BIT_WKFCAM_WE BIT(16)
+#define BIT_SHIFT_WKFCAM_ADDR_V2 8
+#define BIT_MASK_WKFCAM_ADDR_V2 0xff
+#define BIT_WKFCAM_ADDR_V2(x) \
+ (((x) & BIT_MASK_WKFCAM_ADDR_V2) << BIT_SHIFT_WKFCAM_ADDR_V2)
+#define REG_WKFMCAM_RWD 0x069C
+#define BIT_WKFMCAM_VALID BIT(31)
+#define BIT_WKFMCAM_BC BIT(26)
+#define BIT_WKFMCAM_MC BIT(25)
+#define BIT_WKFMCAM_UC BIT(24)
+
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 174029836833..3865097696d4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -3487,12 +3487,12 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
- RTW_PWR_CMD_WRITE, BIT(7), 0},
- {0x0005,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x1018,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
- RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -4060,6 +4060,18 @@ static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
.pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
};
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_NET_DETECT,
+ .n_patterns = RTW_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+ .max_nd_match_sets = 4,
+};
+#endif
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -4106,6 +4118,11 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+#ifdef CONFIG_PM
+ .wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
+ .wowlan_stub = &rtw_wowlan_stub_8822c,
+ .max_sched_scan_ssids = 4,
+#endif
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
.scbd_support = true,
@@ -4135,3 +4152,4 @@ struct rtw_chip_info rtw8822c_hw_spec = {
EXPORT_SYMBOL(rtw8822c_hw_spec);
MODULE_FIRMWARE("rtw88/rtw8822c_fw.bin");
+MODULE_FIRMWARE("rtw88/rtw8822c_wow_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h
index 7bd2843b0bce..41c10e7144df 100644
--- a/drivers/net/wireless/realtek/rtw88/util.h
+++ b/drivers/net/wireless/realtek/rtw88/util.h
@@ -15,6 +15,8 @@ struct rtw_dev;
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
#define rtw_iterate_stas_atomic(rtwdev, iterator, data) \
ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data)
+#define rtw_iterate_keys(rtwdev, vif, iterator, data) \
+ ieee80211_iter_keys(rtwdev->hw, vif, iterator, data)
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
new file mode 100644
index 000000000000..af5c27e1bb07
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -0,0 +1,890 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "wow.h"
+#include "reg.h"
+#include "debug.h"
+#include "mac.h"
+#include "ps.h"
+
+static void rtw_wow_show_wakeup_reason(struct rtw_dev *rtwdev)
+{
+ u8 reason;
+
+ reason = rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON);
+
+ if (reason == RTW_WOW_RSN_RX_DEAUTH)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx deauth\n");
+ else if (reason == RTW_WOW_RSN_DISCONNECT)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: AP is off\n");
+ else if (reason == RTW_WOW_RSN_RX_MAGIC_PKT)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx magic packet\n");
+ else if (reason == RTW_WOW_RSN_RX_GTK_REKEY)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx gtk rekey\n");
+ else if (reason == RTW_WOW_RSN_RX_PTK_REKEY)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx ptk rekey\n");
+ else if (reason == RTW_WOW_RSN_RX_PATTERN_MATCH)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx pattern match packet\n");
+ else if (reason == RTW_WOW_RSN_RX_NLO)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "Rx NLO\n");
+ else
+ rtw_warn(rtwdev, "Unknown wakeup reason %x\n", reason);
+}
+
+static void rtw_wow_pattern_write_cam(struct rtw_dev *rtwdev, u8 addr,
+ u32 wdata)
+{
+ rtw_write32(rtwdev, REG_WKFMCAM_RWD, wdata);
+ rtw_write32(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1 |
+ BIT_WKFCAM_WE | BIT_WKFCAM_ADDR_V2(addr));
+
+ if (!check_hw_ready(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1, 0))
+ rtw_err(rtwdev, "failed to write pattern cam\n");
+}
+
+static void rtw_wow_pattern_write_cam_ent(struct rtw_dev *rtwdev, u8 id,
+ struct rtw_wow_pattern *rtw_pattern)
+{
+ int i;
+ u8 addr;
+ u32 wdata;
+
+ for (i = 0; i < RTW_MAX_PATTERN_MASK_SIZE / 4; i++) {
+ addr = (id << 3) + i;
+ wdata = rtw_pattern->mask[i * 4];
+ wdata |= rtw_pattern->mask[i * 4 + 1] << 8;
+ wdata |= rtw_pattern->mask[i * 4 + 2] << 16;
+ wdata |= rtw_pattern->mask[i * 4 + 3] << 24;
+ rtw_wow_pattern_write_cam(rtwdev, addr, wdata);
+ }
+
+ wdata = rtw_pattern->crc;
+ addr = (id << 3) + RTW_MAX_PATTERN_MASK_SIZE / 4;
+
+ switch (rtw_pattern->type) {
+ case RTW_PATTERN_BROADCAST:
+ wdata |= BIT_WKFMCAM_BC | BIT_WKFMCAM_VALID;
+ break;
+ case RTW_PATTERN_MULTICAST:
+ wdata |= BIT_WKFMCAM_MC | BIT_WKFMCAM_VALID;
+ break;
+ case RTW_PATTERN_UNICAST:
+ wdata |= BIT_WKFMCAM_UC | BIT_WKFMCAM_VALID;
+ break;
+ default:
+ break;
+ }
+ rtw_wow_pattern_write_cam(rtwdev, addr, wdata);
+}
+
+/* RTK internal CRC16 for Pattern Cam */
+static u16 __rtw_cal_crc16(u8 data, u16 crc)
+{
+ u8 shift_in, data_bit;
+ u8 crc_bit4, crc_bit11, crc_bit15;
+ u16 crc_result;
+ int index;
+
+ for (index = 0; index < 8; index++) {
+ crc_bit15 = ((crc & BIT(15)) ? 1 : 0);
+ data_bit = (data & (BIT(0) << index) ? 1 : 0);
+ shift_in = crc_bit15 ^ data_bit;
+
+ crc_result = crc << 1;
+
+ if (shift_in == 0)
+ crc_result &= (~BIT(0));
+ else
+ crc_result |= BIT(0);
+
+ crc_bit11 = ((crc & BIT(11)) ? 1 : 0) ^ shift_in;
+
+ if (crc_bit11 == 0)
+ crc_result &= (~BIT(12));
+ else
+ crc_result |= BIT(12);
+
+ crc_bit4 = ((crc & BIT(4)) ? 1 : 0) ^ shift_in;
+
+ if (crc_bit4 == 0)
+ crc_result &= (~BIT(5));
+ else
+ crc_result |= BIT(5);
+
+ crc = crc_result;
+ }
+ return crc;
+}
+
+static u16 rtw_calc_crc(u8 *pdata, int length)
+{
+ u16 crc = 0xffff;
+ int i;
+
+ for (i = 0; i < length; i++)
+ crc = __rtw_cal_crc16(pdata[i], crc);
+
+ /* get 1' complement */
+ return ~crc;
+}
+
+static void rtw_wow_pattern_generate(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ const struct cfg80211_pkt_pattern *pkt_pattern,
+ struct rtw_wow_pattern *rtw_pattern)
+{
+ const u8 *mask;
+ const u8 *pattern;
+ u8 mask_hw[RTW_MAX_PATTERN_MASK_SIZE] = {0};
+ u8 content[RTW_MAX_PATTERN_SIZE] = {0};
+ u8 mac_addr[ETH_ALEN] = {0};
+ u8 mask_len;
+ u16 count;
+ int len;
+ int i;
+
+ pattern = pkt_pattern->pattern;
+ len = pkt_pattern->pattern_len;
+ mask = pkt_pattern->mask;
+
+ ether_addr_copy(mac_addr, rtwvif->mac_addr);
+ memset(rtw_pattern, 0, sizeof(*rtw_pattern));
+
+ mask_len = DIV_ROUND_UP(len, 8);
+
+ if (is_broadcast_ether_addr(pattern))
+ rtw_pattern->type = RTW_PATTERN_BROADCAST;
+ else if (is_multicast_ether_addr(pattern))
+ rtw_pattern->type = RTW_PATTERN_MULTICAST;
+ else if (ether_addr_equal(pattern, mac_addr))
+ rtw_pattern->type = RTW_PATTERN_UNICAST;
+ else
+ rtw_pattern->type = RTW_PATTERN_INVALID;
+
+ /* translate mask from os to mask for hw
+ * pattern from OS uses 'ethenet frame', like this:
+ * | 6 | 6 | 2 | 20 | Variable | 4 |
+ * |--------+--------+------+-----------+------------+-----|
+ * | 802.3 Mac Header | IP Header | TCP Packet | FCS |
+ * | DA | SA | Type |
+ *
+ * BUT, packet catched by our HW is in '802.11 frame', begin from LLC
+ * | 24 or 30 | 6 | 2 | 20 | Variable | 4 |
+ * |-------------------+--------+------+-----------+------------+-----|
+ * | 802.11 MAC Header | LLC | IP Header | TCP Packet | FCS |
+ * | Others | Tpye |
+ *
+ * Therefore, we need translate mask_from_OS to mask_to_hw.
+ * We should left-shift mask by 6 bits, then set the new bit[0~5] = 0,
+ * because new mask[0~5] means 'SA', but our HW packet begins from LLC,
+ * bit[0~5] corresponds to first 6 Bytes in LLC, they just don't match.
+ */
+
+ /* Shift 6 bits */
+ for (i = 0; i < mask_len - 1; i++) {
+ mask_hw[i] = u8_get_bits(mask[i], GENMASK(7, 6));
+ mask_hw[i] |= u8_get_bits(mask[i + 1], GENMASK(5, 0)) << 2;
+ }
+ mask_hw[i] = u8_get_bits(mask[i], GENMASK(7, 6));
+
+ /* Set bit 0-5 to zero */
+ mask_hw[0] &= (~GENMASK(5, 0));
+
+ memcpy(rtw_pattern->mask, mask_hw, RTW_MAX_PATTERN_MASK_SIZE);
+
+ /* To get the wake up pattern from the mask.
+ * We do not count first 12 bits which means
+ * DA[6] and SA[6] in the pattern to match HW design.
+ */
+ count = 0;
+ for (i = 12; i < len; i++) {
+ if ((mask[i / 8] >> (i % 8)) & 0x01) {
+ content[count] = pattern[i];
+ count++;
+ }
+ }
+
+ rtw_pattern->crc = rtw_calc_crc(content, count);
+}
+
+static void rtw_wow_pattern_clear_cam(struct rtw_dev *rtwdev)
+{
+ bool ret;
+
+ rtw_write32(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1 |
+ BIT_WKFCAM_CLR_V1);
+
+ ret = check_hw_ready(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1, 0);
+ if (!ret)
+ rtw_err(rtwdev, "failed to clean pattern cam\n");
+}
+
+static void rtw_wow_pattern_write(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_wow_pattern *rtw_pattern = rtw_wow->patterns;
+ int i = 0;
+
+ for (i = 0; i < rtw_wow->pattern_cnt; i++)
+ rtw_wow_pattern_write_cam_ent(rtwdev, i, rtw_pattern + i);
+}
+
+static void rtw_wow_pattern_clear(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ rtw_wow_pattern_clear_cam(rtwdev);
+
+ rtw_wow->pattern_cnt = 0;
+ memset(rtw_wow->patterns, 0, sizeof(rtw_wow->patterns));
+}
+
+static void rtw_wow_bb_stop(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ /* wait 100ms for firmware to finish TX */
+ msleep(100);
+
+ if (!rtw_read32_mask(rtwdev, REG_BCNQ_INFO, BIT_MGQ_CPU_EMPTY))
+ rtw_warn(rtwdev, "Wrong status of MGQ_CPU empty!\n");
+
+ rtw_wow->txpause = rtw_read8(rtwdev, REG_TXPAUSE);
+ rtw_write8(rtwdev, REG_TXPAUSE, 0xff);
+ rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+}
+
+static void rtw_wow_bb_start(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+ rtw_write8(rtwdev, REG_TXPAUSE, rtw_wow->txpause);
+}
+
+static void rtw_wow_rx_dma_stop(struct rtw_dev *rtwdev)
+{
+ /* wait 100ms for HW to finish rx dma */
+ msleep(100);
+
+ rtw_write32_set(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
+
+ if (!check_hw_ready(rtwdev, REG_RXPKT_NUM, BIT_RXDMA_IDLE, 1))
+ rtw_err(rtwdev, "failed to stop rx dma\n");
+}
+
+static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
+{
+ rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
+}
+
+static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
+{
+ bool ret;
+
+ /* wait 100ms for wow firmware to finish work */
+ msleep(100);
+
+ if (wow_enable) {
+ if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
+ ret = 0;
+ } else {
+ if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 &&
+ rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0)
+ ret = 0;
+ }
+
+ if (ret)
+ rtw_err(rtwdev, "failed to check wow status %s\n",
+ wow_enable ? "enabled" : "disabled");
+
+ return ret;
+}
+
+static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw_fw_key_type_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = hw->priv;
+ u8 hw_key_type;
+
+ if (vif != rtwdev->wow.wow_vif)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ hw_key_type = RTW_CAM_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ hw_key_type = RTW_CAM_WEP104;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ hw_key_type = RTW_CAM_TKIP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ hw_key_type = RTW_CAM_AES;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ default:
+ rtw_err(rtwdev, "Unsupported key type for wowlan mode\n");
+ hw_key_type = 0;
+ break;
+ }
+
+ if (sta)
+ iter_data->pairwise_key_type = hw_key_type;
+ else
+ iter_data->group_key_type = hw_key_type;
+}
+
+static void rtw_wow_fw_security_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_fw_key_type_iter_data data = {};
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+
+ data.rtwdev = rtwdev;
+ rtw_iterate_keys(rtwdev, wow_vif,
+ rtw_wow_fw_security_type_iter, &data);
+ rtw_fw_set_aoac_global_info_cmd(rtwdev, data.pairwise_key_type,
+ data.group_key_type);
+}
+
+static int rtw_wow_fw_start(struct rtw_dev *rtwdev)
+{
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_send_rsvd_page_h2c(rtwdev);
+ rtw_wow_pattern_write(rtwdev);
+ rtw_wow_fw_security_type(rtwdev);
+ rtw_fw_set_disconnect_decision_cmd(rtwdev, true);
+ rtw_fw_set_keep_alive_cmd(rtwdev, true);
+ } else if (rtw_wow_no_link(rtwdev)) {
+ rtw_fw_set_nlo_info(rtwdev, true);
+ rtw_fw_update_pkt_probe_req(rtwdev, NULL);
+ rtw_fw_channel_switch(rtwdev, true);
+ }
+
+ rtw_fw_set_wowlan_ctrl_cmd(rtwdev, true);
+ rtw_fw_set_remote_wake_ctrl_cmd(rtwdev, true);
+
+ return rtw_wow_check_fw_status(rtwdev, true);
+}
+
+static int rtw_wow_fw_stop(struct rtw_dev *rtwdev)
+{
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_fw_set_disconnect_decision_cmd(rtwdev, false);
+ rtw_fw_set_keep_alive_cmd(rtwdev, false);
+ rtw_wow_pattern_clear(rtwdev);
+ } else if (rtw_wow_no_link(rtwdev)) {
+ rtw_fw_channel_switch(rtwdev, false);
+ rtw_fw_set_nlo_info(rtwdev, false);
+ }
+
+ rtw_fw_set_wowlan_ctrl_cmd(rtwdev, false);
+ rtw_fw_set_remote_wake_ctrl_cmd(rtwdev, false);
+
+ return rtw_wow_check_fw_status(rtwdev, false);
+}
+
+static void rtw_wow_avoid_reset_mac(struct rtw_dev *rtwdev)
+{
+ /* When resuming from wowlan mode, some hosts issue signal
+ * (PCIE: PREST, USB: SE0RST) to device, and lead to reset
+ * mac core. If it happens, the connection to AP will be lost.
+ * Setting REG_RSV_CTRL Register can avoid this process.
+ */
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ case RTW_HCI_TYPE_USB:
+ rtw_write8(rtwdev, REG_RSV_CTRL, BIT_WLOCK_1C_B6);
+ rtw_write8(rtwdev, REG_RSV_CTRL,
+ BIT_WLOCK_1C_B6 | BIT_R_DIS_PRST);
+ break;
+ default:
+ rtw_warn(rtwdev, "Unsupported hci type to disable reset MAC\n");
+ break;
+ }
+}
+
+static void rtw_wow_fw_media_status_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct rtw_fw_media_status_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+
+ rtw_fw_media_status_report(rtwdev, si->mac_id, iter_data->connect);
+}
+
+static void rtw_wow_fw_media_status(struct rtw_dev *rtwdev, bool connect)
+{
+ struct rtw_fw_media_status_iter_data data;
+
+ data.rtwdev = rtwdev;
+ data.connect = connect;
+
+ rtw_iterate_stas_atomic(rtwdev, rtw_wow_fw_media_status_iter, &data);
+}
+
+static void rtw_wow_config_pno_rsvd_page(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
+ struct cfg80211_ssid *ssid;
+ int i;
+
+ for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
+ ssid = &rtw_pno_req->match_sets[i].ssid;
+ rtw_add_rsvd_page_probe_req(rtwdev, ssid);
+ }
+ rtw_add_rsvd_page_probe_req(rtwdev, NULL);
+ rtw_add_rsvd_page(rtwdev, RSVD_NLO_INFO, false);
+ rtw_add_rsvd_page(rtwdev, RSVD_CH_INFO, true);
+}
+
+static void rtw_wow_config_linked_rsvd_page(struct rtw_dev *rtwdev)
+{
+ rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
+}
+
+static void rtw_wow_config_rsvd_page(struct rtw_dev *rtwdev)
+{
+ rtw_reset_rsvd_page(rtwdev);
+
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_wow_config_linked_rsvd_page(rtwdev);
+ } else if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags) &&
+ rtw_wow_no_link(rtwdev)) {
+ rtw_wow_config_pno_rsvd_page(rtwdev);
+ }
+}
+
+static int rtw_wow_dl_fw_rsvd_page(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+
+ rtw_wow_config_rsvd_page(rtwdev);
+
+ return rtw_fw_download_rsvd_page(rtwdev, wow_vif);
+}
+
+static int rtw_wow_swap_fw(struct rtw_dev *rtwdev, enum rtw_fw_type type)
+{
+ struct rtw_fw_state *fw;
+ int ret;
+
+ switch (type) {
+ case RTW_WOWLAN_FW:
+ fw = &rtwdev->wow_fw;
+ break;
+
+ case RTW_NORMAL_FW:
+ fw = &rtwdev->fw;
+ break;
+
+ default:
+ rtw_warn(rtwdev, "unsupported firmware type to swap\n");
+ return -ENOENT;
+ }
+
+ ret = rtw_download_firmware(rtwdev, fw);
+ if (ret)
+ goto out;
+
+ rtw_fw_send_general_info(rtwdev);
+ rtw_fw_send_phydm_info(rtwdev);
+ rtw_wow_fw_media_status(rtwdev, true);
+
+out:
+ return ret;
+}
+
+static void rtw_wow_check_pno(struct rtw_dev *rtwdev,
+ struct cfg80211_sched_scan_request *nd_config)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *pno_req = &rtw_wow->pno_req;
+ struct ieee80211_channel *channel;
+ int i, size;
+
+ if (!nd_config->n_match_sets || !nd_config->n_channels)
+ goto err;
+
+ pno_req->match_set_cnt = nd_config->n_match_sets;
+ size = sizeof(*pno_req->match_sets) * pno_req->match_set_cnt;
+ pno_req->match_sets = kmemdup(nd_config->match_sets, size, GFP_KERNEL);
+ if (!pno_req->match_sets)
+ goto err;
+
+ pno_req->channel_cnt = nd_config->n_channels;
+ size = sizeof(*nd_config->channels[0]) * nd_config->n_channels;
+ pno_req->channels = kmalloc(size, GFP_KERNEL);
+ if (!pno_req->channels)
+ goto channel_err;
+
+ for (i = 0 ; i < pno_req->channel_cnt; i++) {
+ channel = pno_req->channels + i;
+ memcpy(channel, nd_config->channels[i], sizeof(*channel));
+ }
+
+ pno_req->scan_plan = *nd_config->scan_plans;
+ pno_req->inited = true;
+
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: net-detect is enabled\n");
+
+ return;
+
+channel_err:
+ kfree(pno_req->match_sets);
+
+err:
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: net-detect is disabled\n");
+}
+
+static int rtw_wow_leave_linked_ps(struct rtw_dev *rtwdev)
+{
+ if (!test_bit(RTW_FLAG_WOWLAN, rtwdev->flags))
+ cancel_delayed_work_sync(&rtwdev->watch_dog_work);
+
+ rtw_leave_lps(rtwdev);
+
+ return 0;
+}
+
+static int rtw_wow_leave_no_link_ps(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ int ret = 0;
+
+ if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) {
+ if (rtw_fw_lps_deep_mode)
+ rtw_leave_lps_deep(rtwdev);
+ } else {
+ if (test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags)) {
+ rtw_wow->ips_enabled = true;
+ ret = rtw_leave_ips(rtwdev);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rtw_wow_leave_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_mgd_linked(rtwdev))
+ ret = rtw_wow_leave_linked_ps(rtwdev);
+ else if (rtw_wow_no_link(rtwdev))
+ ret = rtw_wow_leave_no_link_ps(rtwdev);
+
+ return ret;
+}
+
+static int rtw_wow_restore_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_no_link(rtwdev) && rtwdev->wow.ips_enabled)
+ ret = rtw_enter_ips(rtwdev);
+
+ return ret;
+}
+
+static int rtw_wow_enter_linked_ps(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ rtw_enter_lps(rtwdev, rtwvif->port);
+
+ return 0;
+}
+
+static int rtw_wow_enter_no_link_ps(struct rtw_dev *rtwdev)
+{
+ /* firmware enters deep ps by itself if supported */
+ set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
+
+ return 0;
+}
+
+static int rtw_wow_enter_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_mgd_linked(rtwdev))
+ ret = rtw_wow_enter_linked_ps(rtwdev);
+ else if (rtw_wow_no_link(rtwdev) && rtw_fw_lps_deep_mode)
+ ret = rtw_wow_enter_no_link_ps(rtwdev);
+
+ return ret;
+}
+
+static void rtw_wow_stop_trx(struct rtw_dev *rtwdev)
+{
+ rtw_wow_bb_stop(rtwdev);
+ rtw_wow_rx_dma_stop(rtwdev);
+}
+
+static int rtw_wow_start(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_wow_fw_start(rtwdev);
+ if (ret)
+ goto out;
+
+ rtw_hci_stop(rtwdev);
+ rtw_wow_bb_start(rtwdev);
+ rtw_wow_avoid_reset_mac(rtwdev);
+
+out:
+ return ret;
+}
+
+static int rtw_wow_enable(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ rtw_wow_stop_trx(rtwdev);
+
+ ret = rtw_wow_swap_fw(rtwdev, RTW_WOWLAN_FW);
+ if (ret) {
+ rtw_err(rtwdev, "failed to swap wow fw\n");
+ goto error;
+ }
+
+ set_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+
+ ret = rtw_wow_dl_fw_rsvd_page(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download wowlan rsvd page\n");
+ goto error;
+ }
+
+ ret = rtw_wow_start(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to start wow\n");
+ goto error;
+ }
+
+ return ret;
+
+error:
+ clear_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+ return ret;
+}
+
+static int rtw_wow_stop(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ /* some HCI related registers will be reset after resume,
+ * need to set them again.
+ */
+ ret = rtw_hci_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup hci\n");
+ return ret;
+ }
+
+ ret = rtw_hci_start(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to start hci\n");
+ return ret;
+ }
+
+ ret = rtw_wow_fw_stop(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to stop wowlan fw\n");
+
+ rtw_wow_bb_stop(rtwdev);
+
+ return ret;
+}
+
+static void rtw_wow_resume_trx(struct rtw_dev *rtwdev)
+{
+ rtw_wow_rx_dma_start(rtwdev);
+ rtw_wow_bb_start(rtwdev);
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
+ RTW_WATCH_DOG_DELAY_TIME);
+}
+
+static int rtw_wow_disable(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ clear_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+
+ ret = rtw_wow_stop(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to stop wow\n");
+ goto out;
+ }
+
+ ret = rtw_wow_swap_fw(rtwdev, RTW_NORMAL_FW);
+ if (ret) {
+ rtw_err(rtwdev, "failed to swap normal fw\n");
+ goto out;
+ }
+
+ ret = rtw_wow_dl_fw_rsvd_page(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to download normal rsvd page\n");
+
+out:
+ rtw_wow_resume_trx(rtwdev);
+ return ret;
+}
+
+static void rtw_wow_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ /* Current wowlan function support setting of only one STATION vif.
+ * So when one suitable vif is found, stop the iteration.
+ */
+ if (rtw_wow->wow_vif || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ switch (rtwvif->net_type) {
+ case RTW_NET_MGD_LINKED:
+ rtw_wow->wow_vif = vif;
+ break;
+ case RTW_NET_NO_LINK:
+ if (rtw_wow->pno_req.inited)
+ rtwdev->wow.wow_vif = vif;
+ break;
+ default:
+ break;
+ }
+}
+
+static int rtw_wow_set_wakeups(struct rtw_dev *rtwdev,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_wow_pattern *rtw_patterns = rtw_wow->patterns;
+ struct rtw_vif *rtwvif;
+ int i;
+
+ if (wowlan->disconnect)
+ set_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags);
+ if (wowlan->magic_pkt)
+ set_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags);
+ if (wowlan->gtk_rekey_failure)
+ set_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags);
+
+ if (wowlan->nd_config)
+ rtw_wow_check_pno(rtwdev, wowlan->nd_config);
+
+ rtw_iterate_vifs_atomic(rtwdev, rtw_wow_vif_iter, rtwdev);
+ if (!rtw_wow->wow_vif)
+ return -EPERM;
+
+ rtwvif = (struct rtw_vif *)rtw_wow->wow_vif->drv_priv;
+ if (wowlan->n_patterns && wowlan->patterns) {
+ rtw_wow->pattern_cnt = wowlan->n_patterns;
+ for (i = 0; i < wowlan->n_patterns; i++)
+ rtw_wow_pattern_generate(rtwdev, rtwvif,
+ wowlan->patterns + i,
+ rtw_patterns + i);
+ }
+
+ return 0;
+}
+
+static void rtw_wow_clear_wakeups(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *pno_req = &rtw_wow->pno_req;
+
+ if (pno_req->inited) {
+ kfree(pno_req->channels);
+ kfree(pno_req->match_sets);
+ }
+
+ memset(rtw_wow, 0, sizeof(rtwdev->wow));
+}
+
+int rtw_wow_suspend(struct rtw_dev *rtwdev, struct cfg80211_wowlan *wowlan)
+{
+ int ret = 0;
+
+ ret = rtw_wow_set_wakeups(rtwdev, wowlan);
+ if (ret) {
+ rtw_err(rtwdev, "failed to set wakeup event\n");
+ goto out;
+ }
+
+ ret = rtw_wow_leave_ps(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave ps from normal mode\n");
+ goto out;
+ }
+
+ ret = rtw_wow_enable(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to enable wow\n");
+ rtw_wow_restore_ps(rtwdev);
+ goto out;
+ }
+
+ ret = rtw_wow_enter_ps(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to enter ps for wow\n");
+
+out:
+ return ret;
+}
+
+int rtw_wow_resume(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ /* If wowlan mode is not enabled, do nothing */
+ if (!test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) {
+ rtw_err(rtwdev, "wow is not enabled\n");
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = rtw_wow_leave_ps(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave ps from wowlan mode\n");
+ goto out;
+ }
+
+ rtw_wow_show_wakeup_reason(rtwdev);
+
+ ret = rtw_wow_disable(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to disable wow\n");
+ goto out;
+ }
+
+ ret = rtw_wow_restore_ps(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to restore ps to normal mode\n");
+
+out:
+ rtw_wow_clear_wakeups(rtwdev);
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/wow.h b/drivers/net/wireless/realtek/rtw88/wow.h
new file mode 100644
index 000000000000..289368a2cba4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/wow.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_WOW_H__
+#define __RTW_WOW_H__
+
+#define PNO_CHECK_BYTE 4
+
+enum rtw_wow_pattern_type {
+ RTW_PATTERN_BROADCAST = 0,
+ RTW_PATTERN_MULTICAST,
+ RTW_PATTERN_UNICAST,
+ RTW_PATTERN_VALID,
+ RTW_PATTERN_INVALID,
+};
+
+enum rtw_wake_reason {
+ RTW_WOW_RSN_RX_PTK_REKEY = 0x1,
+ RTW_WOW_RSN_RX_GTK_REKEY = 0x2,
+ RTW_WOW_RSN_RX_DEAUTH = 0x8,
+ RTW_WOW_RSN_DISCONNECT = 0x10,
+ RTW_WOW_RSN_RX_MAGIC_PKT = 0x21,
+ RTW_WOW_RSN_RX_PATTERN_MATCH = 0x23,
+ RTW_WOW_RSN_RX_NLO = 0x55,
+};
+
+struct rtw_fw_media_status_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 connect;
+};
+
+struct rtw_fw_key_type_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 group_key_type;
+ u8 pairwise_key_type;
+};
+
+static inline bool rtw_wow_mgd_linked(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ return (rtwvif->net_type == RTW_NET_MGD_LINKED);
+}
+
+static inline bool rtw_wow_no_link(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ return (rtwvif->net_type == RTW_NET_NO_LINK);
+}
+
+int rtw_wow_suspend(struct rtw_dev *rtwdev, struct cfg80211_wowlan *wowlan);
+int rtw_wow_resume(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index f84250bdb8cf..6f8d5f9a9f7e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -622,6 +622,7 @@ static int bl_cmd(struct rsi_hw *adapter, u8 cmd, u8 exp_resp, char *str)
bl_start_cmd_timer(adapter, timeout);
status = bl_write_cmd(adapter, cmd, exp_resp, &regout_val);
if (status < 0) {
+ bl_stop_cmd_timer(adapter);
rsi_dbg(ERR_ZONE,
"%s: Command %s (%0x) writing failed..\n",
__func__, str, cmd);
@@ -737,10 +738,9 @@ static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size)
}
status = bl_cmd(adapter, cmd_req, cmd_resp, str);
- if (status) {
- bl_stop_cmd_timer(adapter);
+ if (status)
return status;
- }
+
return 0;
}
@@ -828,10 +828,9 @@ static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
status = bl_cmd(adapter, EOF_REACHED, FW_LOADING_SUCCESSFUL,
"EOF_REACHED");
- if (status) {
- bl_stop_cmd_timer(adapter);
+ if (status)
return status;
- }
+
rsi_dbg(INFO_ZONE, "FW loading is done and FW is running..\n");
return 0;
}
@@ -849,6 +848,7 @@ static int rsi_hal_prepare_fwload(struct rsi_hw *adapter)
&regout_val,
RSI_COMMON_REG_SIZE);
if (status < 0) {
+ bl_stop_cmd_timer(adapter);
rsi_dbg(ERR_ZONE,
"%s: REGOUT read failed\n", __func__);
return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 53f41fc2cadf..a62d41c0ccbc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/types.h>
#include <net/rsi_91x.h>
#include "rsi_usb.h"
#include "rsi_hal.h"
@@ -29,7 +30,7 @@ MODULE_PARM_DESC(dev_oper_mode,
"9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
"6[AP + BT classic], 14[AP + BT classic + BT LE]");
-static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num);
+static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t flags);
/**
* rsi_usb_card_write() - This function writes to the USB Card.
@@ -117,7 +118,7 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
__le16 buffer_size;
int ii, bin_found = 0, bout_found = 0;
- iface_desc = &(interface->altsetting[0]);
+ iface_desc = interface->cur_altsetting;
for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
endpoint = &(iface_desc->endpoint[ii].desc);
@@ -148,9 +149,17 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
break;
}
- if (!(dev->bulkin_endpoint_addr[0]) &&
- dev->bulkout_endpoint_addr[0])
+ if (!(dev->bulkin_endpoint_addr[0] && dev->bulkout_endpoint_addr[0])) {
+ dev_err(&interface->dev, "missing wlan bulk endpoints\n");
return -EINVAL;
+ }
+
+ if (adapter->priv->coex_mode > 1) {
+ if (!dev->bulkin_endpoint_addr[1]) {
+ dev_err(&interface->dev, "missing bt bulk-in endpoint\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -285,20 +294,29 @@ static void rsi_rx_done_handler(struct urb *urb)
status = 0;
out:
- if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num))
+ if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC))
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__);
if (status)
dev_kfree_skb(rx_cb->rx_skb);
}
+static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1];
+ struct urb *urb = rx_cb->rx_urb;
+
+ usb_kill_urb(urb);
+}
+
/**
* rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
* @adapter: Pointer to the adapter structure.
*
* Return: 0 on success, a negative error code on failure.
*/
-static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
+static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1];
@@ -328,9 +346,11 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
rsi_rx_done_handler,
rx_cb);
- status = usb_submit_urb(urb, GFP_KERNEL);
- if (status)
+ status = usb_submit_urb(urb, mem_flags);
+ if (status) {
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__);
+ dev_kfree_skb(skb);
+ }
return status;
}
@@ -816,17 +836,20 @@ static int rsi_probe(struct usb_interface *pfunction,
rsi_dbg(INIT_ZONE, "%s: Device Init Done\n", __func__);
}
- status = rsi_rx_urb_submit(adapter, WLAN_EP);
+ status = rsi_rx_urb_submit(adapter, WLAN_EP, GFP_KERNEL);
if (status)
goto err1;
if (adapter->priv->coex_mode > 1) {
- status = rsi_rx_urb_submit(adapter, BT_EP);
+ status = rsi_rx_urb_submit(adapter, BT_EP, GFP_KERNEL);
if (status)
- goto err1;
+ goto err_kill_wlan_urb;
}
return 0;
+
+err_kill_wlan_urb:
+ rsi_rx_urb_kill(adapter, WLAN_EP);
err1:
rsi_deinit_usb_interface(adapter);
err:
@@ -857,6 +880,10 @@ static void rsi_disconnect(struct usb_interface *pfunction)
adapter->priv->bt_adapter = NULL;
}
+ if (adapter->priv->coex_mode > 1)
+ rsi_rx_urb_kill(adapter, BT_EP);
+ rsi_rx_urb_kill(adapter, WLAN_EP);
+
rsi_reset_card(adapter);
rsi_deinit_usb_interface(adapter);
rsi_91x_deinit(adapter);
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index 2dfcdb145944..400dd585916b 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -715,7 +715,7 @@ void cw1200_tx(struct ieee80211_hw *dev,
};
struct ieee80211_sta *sta;
struct wsm_tx *wsm;
- bool tid_update = 0;
+ bool tid_update = false;
u8 flags = 0;
int ret;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 2a48fc6ad56c..6ef8fc9ae627 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1429,7 +1429,7 @@ out:
int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+ u16 tx_seq_16, bool is_pairwise)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
@@ -1444,8 +1444,10 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
lid_type = WEP_DEFAULT_LID_TYPE;
else
lid_type = BROADCAST_LID_TYPE;
- } else {
+ } else if (is_pairwise) {
lid_type = UNICAST_LID_TYPE;
+ } else {
+ lid_type = BROADCAST_LID_TYPE;
}
wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 084375ba4abf..bfad7b5a1ac6 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -65,7 +65,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16);
+ u16 tx_seq_16, bool is_pairwise);
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid);
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e994995d79ab..ed049c9f7e29 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3273,7 +3273,7 @@ out:
static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 id, u8 key_type, u8 key_size,
const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+ u16 tx_seq_16, bool is_pairwise)
{
struct wl1271_ap_key *ap_key;
int i;
@@ -3311,6 +3311,7 @@ static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ap_key->hlid = hlid;
ap_key->tx_seq_32 = tx_seq_32;
ap_key->tx_seq_16 = tx_seq_16;
+ ap_key->is_pairwise = is_pairwise;
wlvif->ap.recorded_keys[i] = ap_key;
return 0;
@@ -3346,7 +3347,7 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
key->id, key->key_type,
key->key_size, key->key,
hlid, key->tx_seq_32,
- key->tx_seq_16);
+ key->tx_seq_16, key->is_pairwise);
if (ret < 0)
goto out;
@@ -3369,7 +3370,8 @@ out:
static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u32 tx_seq_32,
- u16 tx_seq_16, struct ieee80211_sta *sta)
+ u16 tx_seq_16, struct ieee80211_sta *sta,
+ bool is_pairwise)
{
int ret;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
@@ -3396,12 +3398,12 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ret = wl1271_record_ap_key(wl, wlvif, id,
key_type, key_size,
key, hlid, tx_seq_32,
- tx_seq_16);
+ tx_seq_16, is_pairwise);
} else {
ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
id, key_type, key_size,
key, hlid, tx_seq_32,
- tx_seq_16);
+ tx_seq_16, is_pairwise);
}
if (ret < 0)
@@ -3501,6 +3503,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
u16 tx_seq_16 = 0;
u8 key_type;
u8 hlid;
+ bool is_pairwise;
wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
@@ -3550,12 +3553,14 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+
switch (cmd) {
case SET_KEY:
ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
- tx_seq_32, tx_seq_16, sta);
+ tx_seq_32, tx_seq_16, sta, is_pairwise);
if (ret < 0) {
wl1271_error("Could not add or replace key");
return ret;
@@ -3581,7 +3586,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
- 0, 0, sta);
+ 0, 0, sta, is_pairwise);
if (ret < 0) {
wl1271_error("Could not remove key");
return ret;
@@ -6223,6 +6228,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
@@ -6267,7 +6273,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
++ WIPHY_FLAG_IBSS_RSN;
wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 6fab60b0e367..eefae3f867b9 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -212,6 +212,7 @@ struct wl1271_ap_key {
u8 hlid;
u32 tx_seq_32;
u16 tx_seq_16;
+ bool is_pairwise;
};
enum wl12xx_flags {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 007bf6803293..686161db8706 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1285,7 +1285,7 @@ out:
return rc;
}
-static void wl3501_tx_timeout(struct net_device *dev)
+static void wl3501_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct net_device_stats *stats = &dev->stats;
int rc;
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 0db7362bedb4..41641fc2be74 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -830,7 +830,7 @@ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void zd1201_tx_timeout(struct net_device *dev)
+static void zd1201_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct zd1201 *zd = netdev_priv(dev);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 7b5c2fe5bd4d..8ff0374126e4 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1263,7 +1263,7 @@ static void print_id(struct usb_device *udev)
static int eject_installer(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
- struct usb_host_interface *iface_desc = &intf->altsetting[0];
+ struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 10d580c3dea3..6b7532f7c936 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -51,7 +51,8 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
found = false;
oldest = NULL;
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
+ lockdep_is_held(&vif->hash.cache.lock)) {
/* Make sure we don't add duplicate entries */
if (entry->len == len &&
memcmp(entry->tag, tag, len) == 0)
@@ -102,7 +103,8 @@ static void xenvif_flush_hash(struct xenvif *vif)
spin_lock_irqsave(&vif->hash.cache.lock, flags);
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
+ lockdep_is_held(&vif->hash.cache.lock)) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
kfree_rcu(entry, rcu);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f15ba3de6195..0c8a02a1ead7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -585,6 +585,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
struct net_device *dev = vif->dev;
void *addr;
struct xen_netif_ctrl_sring *shared;
+ RING_IDX rsp_prod, req_prod;
int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
@@ -593,7 +594,14 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
goto err;
shared = (struct xen_netif_ctrl_sring *)addr;
- BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(shared->rsp_prod);
+ req_prod = READ_ONCE(shared->req_prod);
+
+ BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
+ goto err_unmap;
err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
if (err < 0)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0020b2e8c279..315dfc6ea297 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1453,7 +1453,7 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
-
+ RING_IDX rsp_prod, req_prod;
int err = -ENOMEM;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
@@ -1462,7 +1462,14 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
goto err;
txs = (struct xen_netif_tx_sring *)addr;
- BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(txs->rsp_prod);
+ req_prod = READ_ONCE(txs->req_prod);
+
+ BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
+ goto err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&rx_ring_ref, 1, &addr);
@@ -1470,7 +1477,14 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
- BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(rxs->rsp_prod);
+ req_prod = READ_ONCE(rxs->req_prod);
+
+ BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
+ goto err;
return 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index f533b7372d59..286054b60d47 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -195,185 +195,6 @@ static void xenvif_debugfs_delif(struct xenvif *vif)
}
#endif /* CONFIG_DEBUG_FS */
-static int netback_remove(struct xenbus_device *dev)
-{
- struct backend_info *be = dev_get_drvdata(&dev->dev);
-
- set_backend_state(be, XenbusStateClosed);
-
- unregister_hotplug_status_watch(be);
- if (be->vif) {
- kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
- xen_unregister_watchers(be->vif);
- xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
- xenvif_free(be->vif);
- be->vif = NULL;
- }
- kfree(be->hotplug_script);
- kfree(be);
- dev_set_drvdata(&dev->dev, NULL);
- return 0;
-}
-
-
-/**
- * Entry point to this code when a new device is created. Allocate the basic
- * structures and switch to InitWait.
- */
-static int netback_probe(struct xenbus_device *dev,
- const struct xenbus_device_id *id)
-{
- const char *message;
- struct xenbus_transaction xbt;
- int err;
- int sg;
- const char *script;
- struct backend_info *be = kzalloc(sizeof(struct backend_info),
- GFP_KERNEL);
- if (!be) {
- xenbus_dev_fatal(dev, -ENOMEM,
- "allocating backend structure");
- return -ENOMEM;
- }
-
- be->dev = dev;
- dev_set_drvdata(&dev->dev, be);
-
- be->state = XenbusStateInitialising;
- err = xenbus_switch_state(dev, XenbusStateInitialising);
- if (err)
- goto fail;
-
- sg = 1;
-
- do {
- err = xenbus_transaction_start(&xbt);
- if (err) {
- xenbus_dev_fatal(dev, err, "starting transaction");
- goto fail;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
- if (err) {
- message = "writing feature-sg";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
- "%d", sg);
- if (err) {
- message = "writing feature-gso-tcpv4";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
- "%d", sg);
- if (err) {
- message = "writing feature-gso-tcpv6";
- goto abort_transaction;
- }
-
- /* We support partial checksum setup for IPv6 packets */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-ipv6-csum-offload",
- "%d", 1);
- if (err) {
- message = "writing feature-ipv6-csum-offload";
- goto abort_transaction;
- }
-
- /* We support rx-copy path. */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-rx-copy", "%d", 1);
- if (err) {
- message = "writing feature-rx-copy";
- goto abort_transaction;
- }
-
- /*
- * We don't support rx-flip path (except old guests who don't
- * grok this feature flag).
- */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-rx-flip", "%d", 0);
- if (err) {
- message = "writing feature-rx-flip";
- goto abort_transaction;
- }
-
- /* We support dynamic multicast-control. */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-multicast-control", "%d", 1);
- if (err) {
- message = "writing feature-multicast-control";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename,
- "feature-dynamic-multicast-control",
- "%d", 1);
- if (err) {
- message = "writing feature-dynamic-multicast-control";
- goto abort_transaction;
- }
-
- err = xenbus_transaction_end(xbt, 0);
- } while (err == -EAGAIN);
-
- if (err) {
- xenbus_dev_fatal(dev, err, "completing transaction");
- goto fail;
- }
-
- /*
- * Split event channels support, this is optional so it is not
- * put inside the above loop.
- */
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "feature-split-event-channels",
- "%u", separate_tx_rx_irq);
- if (err)
- pr_debug("Error writing feature-split-event-channels\n");
-
- /* Multi-queue support: This is an optional feature. */
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "multi-queue-max-queues", "%u", xenvif_max_queues);
- if (err)
- pr_debug("Error writing multi-queue-max-queues\n");
-
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "feature-ctrl-ring",
- "%u", true);
- if (err)
- pr_debug("Error writing feature-ctrl-ring\n");
-
- script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
- if (IS_ERR(script)) {
- err = PTR_ERR(script);
- xenbus_dev_fatal(dev, err, "reading script");
- goto fail;
- }
-
- be->hotplug_script = script;
-
-
- /* This kicks hotplug scripts, so do it immediately. */
- err = backend_create_xenvif(be);
- if (err)
- goto fail;
-
- return 0;
-
-abort_transaction:
- xenbus_transaction_end(xbt, 1);
- xenbus_dev_fatal(dev, err, "%s", message);
-fail:
- pr_debug("failed\n");
- netback_remove(dev);
- return err;
-}
-
-
/*
* Handle the creation of the hotplug script environment. We add the script
* and vif variables to the environment, for the benefit of the vif-* hotplug
@@ -827,6 +648,7 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
}
kfree(str);
}
@@ -1128,6 +950,174 @@ static int read_xenbus_vif_flags(struct backend_info *be)
return 0;
}
+static int netback_remove(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ unregister_hotplug_status_watch(be);
+ if (be->vif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+ backend_disconnect(be);
+ xenvif_free(be->vif);
+ be->vif = NULL;
+ }
+ kfree(be->hotplug_script);
+ kfree(be);
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+}
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and switch to InitWait.
+ */
+static int netback_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+ int sg;
+ const char *script;
+ struct backend_info *be = kzalloc(sizeof(*be), GFP_KERNEL);
+
+ if (!be) {
+ xenbus_dev_fatal(dev, -ENOMEM,
+ "allocating backend structure");
+ return -ENOMEM;
+ }
+
+ be->dev = dev;
+ dev_set_drvdata(&dev->dev, be);
+
+ sg = 1;
+
+ do {
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto fail;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
+ if (err) {
+ message = "writing feature-sg";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv4";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv6";
+ goto abort_transaction;
+ }
+
+ /* We support partial checksum setup for IPv6 packets */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-ipv6-csum-offload",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-ipv6-csum-offload";
+ goto abort_transaction;
+ }
+
+ /* We support rx-copy path. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-copy", "%d", 1);
+ if (err) {
+ message = "writing feature-rx-copy";
+ goto abort_transaction;
+ }
+
+ /* We don't support rx-flip path (except old guests who
+ * don't grok this feature flag).
+ */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-flip", "%d", 0);
+ if (err) {
+ message = "writing feature-rx-flip";
+ goto abort_transaction;
+ }
+
+ /* We support dynamic multicast-control. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-multicast-control", "%d", 1);
+ if (err) {
+ message = "writing feature-multicast-control";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-dynamic-multicast-control",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-dynamic-multicast-control";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ } while (err == -EAGAIN);
+
+ if (err) {
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto fail;
+ }
+
+ /* Split event channels support, this is optional so it is not
+ * put inside the above loop.
+ */
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-split-event-channels",
+ "%u", separate_tx_rx_irq);
+ if (err)
+ pr_debug("Error writing feature-split-event-channels\n");
+
+ /* Multi-queue support: This is an optional feature. */
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "multi-queue-max-queues", "%u", xenvif_max_queues);
+ if (err)
+ pr_debug("Error writing multi-queue-max-queues\n");
+
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-ctrl-ring",
+ "%u", true);
+ if (err)
+ pr_debug("Error writing feature-ctrl-ring\n");
+
+ backend_switch_state(be, XenbusStateInitWait);
+
+ script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
+ if (IS_ERR(script)) {
+ err = PTR_ERR(script);
+ xenbus_dev_fatal(dev, err, "reading script");
+ goto fail;
+ }
+
+ be->hotplug_script = script;
+
+ /* This kicks hotplug scripts, so do it immediately. */
+ err = backend_create_xenvif(be);
+ if (err)
+ goto fail;
+
+ return 0;
+
+abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+fail:
+ pr_debug("failed\n");
+ netback_remove(dev);
+ return err;
+}
+
static const struct xenbus_device_id netback_ids[] = {
{ "vif" },
{ "" }
@@ -1139,6 +1129,7 @@ static struct xenbus_driver netback_driver = {
.remove = netback_remove,
.uevent = netback_uevent,
.otherend_changed = frontend_changed,
+ .allow_rebind = true,
};
int xenvif_xenbus_init(void)
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 7507176cca0a..0207e66cee21 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -274,7 +274,6 @@ MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table);
static struct i2c_driver pn533_i2c_driver = {
.driver = {
.name = PN533_I2C_DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_pn533_i2c_match),
},
.probe = pn533_i2c_probe,
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 4590fbf82dc2..f5bb7ace2ff5 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
cmd, sizeof(cmd), false);
rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
- &transferred, 0);
+ &transferred, 5000);
kfree(buffer);
if (rc || (transferred != sizeof(cmd))) {
nfc_err(&phy->udev->dev,
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index cda996f6954e..2b83156efe3f 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -693,7 +693,7 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
target->nfcid1_len != 10)
return -EOPNOTSUPP;
- return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
PN544_RF_READER_CMD_ACTIVATE_NEXT,
target->nfcid1, target->nfcid1_len, NULL);
} else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK |
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 604dba4f18af..8e4d355dc3ae 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -565,7 +565,7 @@ static void port100_tx_update_payload_len(void *_frame, int len)
{
struct port100_frame *frame = _frame;
- frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
+ le16_add_cpu(&frame->datalen, len);
}
static bool port100_rx_frame_is_valid(void *_frame)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ad8e4df1282b..4eae441f86c9 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -337,13 +337,7 @@ static void pmem_release_disk(void *__pmem)
put_disk(pmem->disk);
}
-static void pmem_pagemap_page_free(struct page *page)
-{
- wake_up_var(&page->_refcount);
-}
-
static const struct dev_pagemap_ops fsdax_pagemap_ops = {
- .page_free = pmem_pagemap_page_free,
.kill = pmem_pagemap_kill,
.cleanup = pmem_pagemap_cleanup,
};
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index c6439638a419..b9358db83e96 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NVME_CORE
tristate
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
config BLK_DEV_NVME
tristate "NVM Express block device"
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index a5af21f5d370..2e6477ed420f 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -5,14 +5,11 @@
*/
#include <linux/hwmon.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
#include "nvme.h"
-/* These macros should be moved to linux/temperature.h */
-#define MILLICELSIUS_TO_KELVIN(t) DIV_ROUND_CLOSEST((t) + 273150, 1000)
-#define KELVIN_TO_MILLICELSIUS(t) ((t) * 1000L - 273150)
-
struct nvme_hwmon_data {
struct nvme_ctrl *ctrl;
struct nvme_smart_log log;
@@ -35,7 +32,7 @@ static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
return -EIO;
if (ret < 0)
return ret;
- *temp = KELVIN_TO_MILLICELSIUS(status & NVME_TEMP_THRESH_MASK);
+ *temp = kelvin_to_millicelsius(status & NVME_TEMP_THRESH_MASK);
return 0;
}
@@ -46,7 +43,7 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
int ret;
- temp = MILLICELSIUS_TO_KELVIN(temp);
+ temp = millicelsius_to_kelvin(temp);
threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK);
if (under)
@@ -88,7 +85,7 @@ static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp_min:
return nvme_get_temp_thresh(data->ctrl, channel, true, val);
case hwmon_temp_crit:
- *val = KELVIN_TO_MILLICELSIUS(data->ctrl->cctemp);
+ *val = kelvin_to_millicelsius(data->ctrl->cctemp);
return 0;
default:
break;
@@ -105,7 +102,7 @@ static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
temp = get_unaligned_le16(log->temperature);
else
temp = le16_to_cpu(log->temp_sensor[channel - 1]);
- *val = KELVIN_TO_MILLICELSIUS(temp);
+ *val = kelvin_to_millicelsius(temp);
break;
case hwmon_temp_alarm:
*val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 73567e922491..35efab1ba8d9 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -109,6 +109,14 @@ config QCOM_QFPROM
This driver can also be built as a module. If so, the module
will be called nvmem_qfprom.
+config NVMEM_SPMI_SDAM
+ tristate "SPMI SDAM Support"
+ depends on SPMI
+ help
+ This driver supports the Shared Direct Access Memory Module on
+ Qualcomm Technologies, Inc. PMICs. It provides the clients
+ an interface to read/write to the SDAM module's shared memory.
+
config ROCKCHIP_EFUSE
tristate "Rockchip eFuse Support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 9e667823edb3..6b466cd1427b 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -28,6 +28,8 @@ obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o
nvmem_mtk-efuse-y := mtk-efuse.o
obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
+obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o
+nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9f1ee9c766ec..1e4a798dce6e 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -83,7 +83,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell)
list_del(&cell->node);
mutex_unlock(&nvmem_mutex);
of_node_put(cell->np);
- kfree(cell->name);
+ kfree_const(cell->name);
kfree(cell);
}
@@ -110,7 +110,9 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
cell->nvmem = nvmem;
cell->offset = info->offset;
cell->bytes = info->bytes;
- cell->name = info->name;
+ cell->name = kstrdup_const(info->name, GFP_KERNEL);
+ if (!cell->name)
+ return -ENOMEM;
cell->bit_offset = info->bit_offset;
cell->nbits = info->nbits;
@@ -300,7 +302,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
cell->name, nvmem->stride);
/* Cells already added will be freed later. */
- kfree(cell->name);
+ kfree_const(cell->name);
kfree(cell);
return -EINVAL;
}
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index 03f1ab23ad51..399e1eb8b4c1 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -15,8 +15,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#define IMX_SIP_OTP 0xC200000A
-#define IMX_SIP_OTP_WRITE 0x2
+#define IMX_SIP_OTP_WRITE 0xc200000B
enum ocotp_devtype {
IMX8QXP,
@@ -139,8 +138,8 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
void *p;
int i, ret;
- index = offset >> 2;
- num_bytes = round_up((offset % 4) + bytes, 4);
+ index = offset;
+ num_bytes = round_up(bytes, 4);
count = num_bytes >> 2;
if (count > (priv->data->nregs - index))
@@ -169,7 +168,7 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
buf++;
}
- memcpy(val, (u8 *)p + offset % 4, bytes);
+ memcpy(val, (u8 *)p, bytes);
mutex_unlock(&scu_ocotp_mutex);
@@ -189,10 +188,10 @@ static int imx_scu_ocotp_write(void *context, unsigned int offset,
int ret;
/* allow only writing one complete OTP word at a time */
- if ((bytes != 4) || (offset % 4))
+ if (bytes != 4)
return -EINVAL;
- index = offset >> 2;
+ index = offset;
if (in_hole(context, index))
return -EINVAL;
@@ -212,8 +211,7 @@ static int imx_scu_ocotp_write(void *context, unsigned int offset,
mutex_lock(&scu_ocotp_mutex);
- arm_smccc_smc(IMX_SIP_OTP, IMX_SIP_OTP_WRITE, index, *buf,
- 0, 0, 0, 0, &res);
+ arm_smccc_smc(IMX_SIP_OTP_WRITE, index, *buf, 0, 0, 0, 0, 0, &res);
mutex_unlock(&scu_ocotp_mutex);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index fc40555ca4cd..4ba9cc8f76df 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -44,6 +44,14 @@
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
+#define IMX_OCOTP_BM_CTRL_DEFAULT \
+ { \
+ .bm_addr = IMX_OCOTP_BM_CTRL_ADDR, \
+ .bm_busy = IMX_OCOTP_BM_CTRL_BUSY, \
+ .bm_error = IMX_OCOTP_BM_CTRL_ERROR, \
+ .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\
+ }
+
#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */
#define TIMING_STROBE_READ_NS 37 /* Min time before read */
#define TIMING_RELAX_NS 17
@@ -62,18 +70,31 @@ struct ocotp_priv {
struct nvmem_config *config;
};
+struct ocotp_ctrl_reg {
+ u32 bm_addr;
+ u32 bm_busy;
+ u32 bm_error;
+ u32 bm_rel_shadows;
+};
+
struct ocotp_params {
unsigned int nregs;
unsigned int bank_address_words;
void (*set_timing)(struct ocotp_priv *priv);
+ struct ocotp_ctrl_reg ctrl;
};
-static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
+static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags)
{
int count;
u32 c, mask;
+ u32 bm_ctrl_busy, bm_ctrl_error;
+ void __iomem *base = priv->base;
- mask = IMX_OCOTP_BM_CTRL_BUSY | IMX_OCOTP_BM_CTRL_ERROR | flags;
+ bm_ctrl_busy = priv->params->ctrl.bm_busy;
+ bm_ctrl_error = priv->params->ctrl.bm_error;
+
+ mask = bm_ctrl_busy | bm_ctrl_error | flags;
for (count = 10000; count >= 0; count--) {
c = readl(base + IMX_OCOTP_ADDR_CTRL);
@@ -97,7 +118,7 @@ static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
* - A read is performed to from a fuse word which has been read
* locked.
*/
- if (c & IMX_OCOTP_BM_CTRL_ERROR)
+ if (c & bm_ctrl_error)
return -EPERM;
return -ETIMEDOUT;
}
@@ -105,15 +126,18 @@ static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
return 0;
}
-static void imx_ocotp_clr_err_if_set(void __iomem *base)
+static void imx_ocotp_clr_err_if_set(struct ocotp_priv *priv)
{
- u32 c;
+ u32 c, bm_ctrl_error;
+ void __iomem *base = priv->base;
+
+ bm_ctrl_error = priv->params->ctrl.bm_error;
c = readl(base + IMX_OCOTP_ADDR_CTRL);
- if (!(c & IMX_OCOTP_BM_CTRL_ERROR))
+ if (!(c & bm_ctrl_error))
return;
- writel(IMX_OCOTP_BM_CTRL_ERROR, base + IMX_OCOTP_ADDR_CTRL_CLR);
+ writel(bm_ctrl_error, base + IMX_OCOTP_ADDR_CTRL_CLR);
}
static int imx_ocotp_read(void *context, unsigned int offset,
@@ -140,7 +164,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
return ret;
}
- ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
dev_err(priv->dev, "timeout during read setup\n");
goto read_end;
@@ -157,7 +181,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
* issued
*/
if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL)
- imx_ocotp_clr_err_if_set(priv->base);
+ imx_ocotp_clr_err_if_set(priv);
}
ret = 0;
@@ -274,7 +298,7 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* write or reload must be completed before a write access can be
* requested.
*/
- ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
dev_err(priv->dev, "timeout during timing setup\n");
goto write_end;
@@ -306,8 +330,8 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
}
ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
- ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR;
- ctrl |= waddr & IMX_OCOTP_BM_CTRL_ADDR;
+ ctrl &= ~priv->params->ctrl.bm_addr;
+ ctrl |= waddr & priv->params->ctrl.bm_addr;
ctrl |= IMX_OCOTP_WR_UNLOCK;
writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL);
@@ -374,11 +398,11 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* be set. It must be cleared by software before any new write access
* can be issued.
*/
- ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
if (ret == -EPERM) {
dev_err(priv->dev, "failed write to locked region");
- imx_ocotp_clr_err_if_set(priv->base);
+ imx_ocotp_clr_err_if_set(priv);
} else {
dev_err(priv->dev, "timeout during data write\n");
}
@@ -394,10 +418,10 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
udelay(2);
/* reload all shadow registers */
- writel(IMX_OCOTP_BM_CTRL_REL_SHADOWS,
+ writel(priv->params->ctrl.bm_rel_shadows,
priv->base + IMX_OCOTP_ADDR_CTRL_SET);
- ret = imx_ocotp_wait_for_busy(priv->base,
- IMX_OCOTP_BM_CTRL_REL_SHADOWS);
+ ret = imx_ocotp_wait_for_busy(priv,
+ priv->params->ctrl.bm_rel_shadows);
if (ret < 0) {
dev_err(priv->dev, "timeout during shadow register reload\n");
goto write_end;
@@ -424,65 +448,76 @@ static const struct ocotp_params imx6q_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sl_params = {
.nregs = 64,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sll_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sx_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ul_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ull_params = {
.nregs = 64,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx7d_params = {
.nregs = 64,
.bank_address_words = 4,
.set_timing = imx_ocotp_set_imx7_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx7ulp_params = {
.nregs = 256,
.bank_address_words = 0,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mq_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mm_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mn_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
@@ -521,17 +556,17 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- clk_prepare_enable(priv->clk);
- imx_ocotp_clr_err_if_set(priv->base);
- clk_disable_unprepare(priv->clk);
-
priv->params = of_device_get_match_data(&pdev->dev);
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
priv->config = &imx_ocotp_nvmem_config;
- nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config);
+ clk_prepare_enable(priv->clk);
+ imx_ocotp_clr_err_if_set(priv);
+ clk_disable_unprepare(priv->clk);
+
+ nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config);
return PTR_ERR_OR_ZERO(nvmem);
}
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
new file mode 100644
index 000000000000..8682cda448d6
--- /dev/null
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
+
+#define SDAM_MEM_START 0x40
+#define REGISTER_MAP_ID 0x40
+#define REGISTER_MAP_VERSION 0x41
+#define SDAM_SIZE 0x44
+#define SDAM_PBS_TRIG_SET 0xE5
+#define SDAM_PBS_TRIG_CLR 0xE6
+
+struct sdam_chip {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct nvmem_config sdam_config;
+ unsigned int base;
+ unsigned int size;
+};
+
+/* read only register offsets */
+static const u8 sdam_ro_map[] = {
+ REGISTER_MAP_ID,
+ REGISTER_MAP_VERSION,
+ SDAM_SIZE
+};
+
+static bool sdam_is_valid(struct sdam_chip *sdam, unsigned int offset,
+ size_t len)
+{
+ unsigned int sdam_mem_end = SDAM_MEM_START + sdam->size - 1;
+
+ if (!len)
+ return false;
+
+ if (offset >= SDAM_MEM_START && offset <= sdam_mem_end
+ && (offset + len - 1) <= sdam_mem_end)
+ return true;
+ else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR)
+ && (len == 1))
+ return true;
+
+ return false;
+}
+
+static bool sdam_is_ro(unsigned int offset, size_t len)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++)
+ if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i])
+ return true;
+
+ return false;
+}
+
+static int sdam_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ struct device *dev = &sdam->pdev->dev;
+ int rc;
+
+ if (!sdam_is_valid(sdam, offset, bytes)) {
+ dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ dev_err(dev, "Failed to read SDAM offset %#x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ struct device *dev = &sdam->pdev->dev;
+ int rc;
+
+ if (!sdam_is_valid(sdam, offset, bytes)) {
+ dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ if (sdam_is_ro(offset, bytes)) {
+ dev_err(dev, "Invalid write offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ dev_err(dev, "Failed to write SDAM offset %#x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_probe(struct platform_device *pdev)
+{
+ struct sdam_chip *sdam;
+ struct nvmem_device *nvmem;
+ unsigned int val;
+ int rc;
+
+ sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL);
+ if (!sdam)
+ return -ENOMEM;
+
+ sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!sdam->regmap) {
+ dev_err(&pdev->dev, "Failed to get regmap handle\n");
+ return -ENXIO;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to get SDAM base, rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to read SDAM_SIZE rc=%d\n", rc);
+ return -EINVAL;
+ }
+ sdam->size = val * 32;
+
+ sdam->sdam_config.dev = &pdev->dev;
+ sdam->sdam_config.name = "spmi_sdam";
+ sdam->sdam_config.id = pdev->id;
+ sdam->sdam_config.owner = THIS_MODULE,
+ sdam->sdam_config.stride = 1;
+ sdam->sdam_config.word_size = 1;
+ sdam->sdam_config.reg_read = sdam_read;
+ sdam->sdam_config.reg_write = sdam_write;
+ sdam->sdam_config.priv = sdam;
+
+ nvmem = devm_nvmem_register(&pdev->dev, &sdam->sdam_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev,
+ "Failed to register SDAM nvmem device rc=%ld\n",
+ PTR_ERR(nvmem));
+ return -ENXIO;
+ }
+ dev_dbg(&pdev->dev,
+ "SDAM base=%#x size=%u registered successfully\n",
+ sdam->base, sdam->size);
+
+ return 0;
+}
+
+static const struct of_device_id sdam_match_table[] = {
+ { .compatible = "qcom,spmi-sdam" },
+ {},
+};
+
+static struct platform_driver sdam_driver = {
+ .driver = {
+ .name = "qcom,spmi-sdam",
+ .of_match_table = sdam_match_table,
+ },
+ .probe = sdam_probe,
+};
+
+static int __init sdam_init(void)
+{
+ return platform_driver_register(&sdam_driver);
+}
+subsys_initcall(sdam_init);
+
+static void __exit sdam_exit(void)
+{
+ return platform_driver_unregister(&sdam_driver);
+}
+module_exit(sdam_exit);
+
+MODULE_DESCRIPTION("QCOM SPMI SDAM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 37c2ccbefecd..d91618641be6 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -103,4 +103,8 @@ config OF_OVERLAY
config OF_NUMA
bool
+config OF_DMA_DEFAULT_COHERENT
+ # arches should select this if DMA is coherent by default for OF devices
+ bool
+
endif # OF
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 99c1b8058559..e8a39c3ec4d4 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -995,12 +995,16 @@ out:
* @np: device node
*
* It returns true if "dma-coherent" property was found
- * for this device in DT.
+ * for this device in the DT, or if DMA is coherent by
+ * default for OF devices on the current platform.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node = of_node_get(np);
+ if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
+ return true;
+
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
of_node_put(node);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index db7fbc0c0893..8d173fb3552a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -135,115 +135,38 @@ int __weak of_node_to_nid(struct device_node *np)
}
#endif
-/*
- * Assumptions behind phandle_cache implementation:
- * - phandle property values are in a contiguous range of 1..n
- *
- * If the assumptions do not hold, then
- * - the phandle lookup overhead reduction provided by the cache
- * will likely be less
- */
+#define OF_PHANDLE_CACHE_BITS 7
+#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
-static struct device_node **phandle_cache;
-static u32 phandle_cache_mask;
+static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
-/*
- * Caller must hold devtree_lock.
- */
-static void __of_free_phandle_cache(void)
+static u32 of_phandle_cache_hash(phandle handle)
{
- u32 cache_entries = phandle_cache_mask + 1;
- u32 k;
-
- if (!phandle_cache)
- return;
-
- for (k = 0; k < cache_entries; k++)
- of_node_put(phandle_cache[k]);
-
- kfree(phandle_cache);
- phandle_cache = NULL;
+ return hash_32(handle, OF_PHANDLE_CACHE_BITS);
}
-int of_free_phandle_cache(void)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&devtree_lock, flags);
-
- __of_free_phandle_cache();
-
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
- return 0;
-}
-#if !defined(CONFIG_MODULES)
-late_initcall_sync(of_free_phandle_cache);
-#endif
-
/*
* Caller must hold devtree_lock.
*/
-void __of_free_phandle_cache_entry(phandle handle)
+void __of_phandle_cache_inv_entry(phandle handle)
{
- phandle masked_handle;
+ u32 handle_hash;
struct device_node *np;
if (!handle)
return;
- masked_handle = handle & phandle_cache_mask;
-
- if (phandle_cache) {
- np = phandle_cache[masked_handle];
- if (np && handle == np->phandle) {
- of_node_put(np);
- phandle_cache[masked_handle] = NULL;
- }
- }
-}
-
-void of_populate_phandle_cache(void)
-{
- unsigned long flags;
- u32 cache_entries;
- struct device_node *np;
- u32 phandles = 0;
-
- raw_spin_lock_irqsave(&devtree_lock, flags);
-
- __of_free_phandle_cache();
-
- for_each_of_allnodes(np)
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
- phandles++;
-
- if (!phandles)
- goto out;
+ handle_hash = of_phandle_cache_hash(handle);
- cache_entries = roundup_pow_of_two(phandles);
- phandle_cache_mask = cache_entries - 1;
-
- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
- GFP_ATOMIC);
- if (!phandle_cache)
- goto out;
-
- for_each_of_allnodes(np)
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
- of_node_get(np);
- phandle_cache[np->phandle & phandle_cache_mask] = np;
- }
-
-out:
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ np = phandle_cache[handle_hash];
+ if (np && handle == np->phandle)
+ phandle_cache[handle_hash] = NULL;
}
void __init of_core_init(void)
{
struct device_node *np;
- of_populate_phandle_cache();
/* Create the kset, and register existing nodes */
mutex_lock(&of_mutex);
@@ -253,8 +176,11 @@ void __init of_core_init(void)
pr_err("failed to register existing nodes\n");
return;
}
- for_each_of_allnodes(np)
+ for_each_of_allnodes(np) {
__of_attach_node_sysfs(np);
+ if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
+ phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
+ }
mutex_unlock(&of_mutex);
/* Symlink in /proc as required by userspace ABI */
@@ -1235,36 +1161,24 @@ struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np = NULL;
unsigned long flags;
- phandle masked_handle;
+ u32 handle_hash;
if (!handle)
return NULL;
+ handle_hash = of_phandle_cache_hash(handle);
+
raw_spin_lock_irqsave(&devtree_lock, flags);
- masked_handle = handle & phandle_cache_mask;
-
- if (phandle_cache) {
- if (phandle_cache[masked_handle] &&
- handle == phandle_cache[masked_handle]->phandle)
- np = phandle_cache[masked_handle];
- if (np && of_node_check_flag(np, OF_DETACHED)) {
- WARN_ON(1); /* did not uncache np on node removal */
- of_node_put(np);
- phandle_cache[masked_handle] = NULL;
- np = NULL;
- }
- }
+ if (phandle_cache[handle_hash] &&
+ handle == phandle_cache[handle_hash]->phandle)
+ np = phandle_cache[handle_hash];
if (!np) {
for_each_of_allnodes(np)
if (np->phandle == handle &&
!of_node_check_flag(np, OF_DETACHED)) {
- if (phandle_cache) {
- /* will put when removed from cache */
- of_node_get(np);
- phandle_cache[masked_handle] = np;
- }
+ phandle_cache[handle_hash] = np;
break;
}
}
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 49b16f76d78e..08fd823edac9 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -276,7 +276,7 @@ void __of_detach_node(struct device_node *np)
of_node_set_flag(np, OF_DETACHED);
/* race with of_find_node_by_phandle() prevented by devtree_lock */
- __of_free_phandle_cache_entry(np->phandle);
+ __of_phandle_cache_inv_entry(np->phandle);
}
/**
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index fc757ef6eadc..f5c2a5487761 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -42,14 +42,37 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
return -EINVAL;
}
+static struct mii_timestamper *of_find_mii_timestamper(struct device_node *node)
+{
+ struct of_phandle_args arg;
+ int err;
+
+ err = of_parse_phandle_with_fixed_args(node, "timestamper", 1, 0, &arg);
+
+ if (err == -ENOENT)
+ return NULL;
+ else if (err)
+ return ERR_PTR(err);
+
+ if (arg.args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ return register_mii_timestamper(arg.np, arg.args[0]);
+}
+
static int of_mdiobus_register_phy(struct mii_bus *mdio,
struct device_node *child, u32 addr)
{
+ struct mii_timestamper *mii_ts;
struct phy_device *phy;
bool is_c45;
int rc;
u32 phy_id;
+ mii_ts = of_find_mii_timestamper(child);
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
+
is_c45 = of_device_is_compatible(child,
"ethernet-phy-ieee802.3-c45");
@@ -57,11 +80,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
else
phy = get_phy_device(mdio, addr, is_c45);
- if (IS_ERR(phy))
+ if (IS_ERR(phy)) {
+ unregister_mii_timestamper(mii_ts);
return PTR_ERR(phy);
+ }
rc = of_irq_get(child, 0);
if (rc == -EPROBE_DEFER) {
+ unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
return rc;
}
@@ -90,10 +116,12 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
* register it */
rc = phy_device_register(phy);
if (rc) {
+ unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
of_node_put(child);
return rc;
}
+ phy->mii_ts = mii_ts;
dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
child, addr);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 66294d29942a..207863c151a5 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -84,15 +84,11 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {}
int of_resolve_phandles(struct device_node *tree);
#endif
-#if defined(CONFIG_OF_DYNAMIC)
-void __of_free_phandle_cache_entry(phandle handle);
-#endif
+void __of_phandle_cache_inv_entry(phandle handle);
#if defined(CONFIG_OF_OVERLAY)
void of_overlay_mutex_lock(void);
void of_overlay_mutex_unlock(void);
-int of_free_phandle_cache(void);
-void of_populate_phandle_cache(void);
#else
static inline void of_overlay_mutex_lock(void) {};
static inline void of_overlay_mutex_unlock(void) {};
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 9617b7df7c4d..c9219fddf44b 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -974,8 +974,6 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
goto err_free_overlay_changeset;
}
- of_populate_phandle_cache();
-
ret = __of_changeset_apply_notify(&ovcs->cset);
if (ret)
pr_err("overlay apply changeset entry notify error %d\n", ret);
@@ -1218,17 +1216,8 @@ int of_overlay_remove(int *ovcs_id)
list_del(&ovcs->ovcs_list);
- /*
- * Disable phandle cache. Avoids race condition that would arise
- * from removing cache entry when the associated node is deleted.
- */
- of_free_phandle_cache();
-
ret_apply = 0;
ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
-
- of_populate_phandle_cache();
-
if (ret) {
if (ret_apply)
devicetree_state_flags |= DTSF_REVERT_FAIL;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index be7a7d332332..ba43e6a3dc0a 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -988,7 +988,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
- kref_init(&opp_table->list_kref);
/* Secure the device table modification */
list_add(&opp_table->node, &opp_tables);
@@ -1072,33 +1071,6 @@ static void _opp_table_kref_release(struct kref *kref)
mutex_unlock(&opp_table_lock);
}
-void _opp_remove_all_static(struct opp_table *opp_table)
-{
- struct dev_pm_opp *opp, *tmp;
-
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (!opp->dynamic)
- dev_pm_opp_put(opp);
- }
-
- opp_table->parsed_static_opps = false;
-}
-
-static void _opp_table_list_kref_release(struct kref *kref)
-{
- struct opp_table *opp_table = container_of(kref, struct opp_table,
- list_kref);
-
- _opp_remove_all_static(opp_table);
- mutex_unlock(&opp_table_lock);
-}
-
-void _put_opp_list_kref(struct opp_table *opp_table)
-{
- kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
- &opp_table_lock);
-}
-
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
@@ -1202,6 +1174,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+void _opp_remove_all_static(struct opp_table *opp_table)
+{
+ struct dev_pm_opp *opp, *tmp;
+
+ mutex_lock(&opp_table->lock);
+
+ if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps)
+ goto unlock;
+
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
+ if (!opp->dynamic)
+ dev_pm_opp_put_unlocked(opp);
+ }
+
+unlock:
+ mutex_unlock(&opp_table->lock);
+}
+
/**
* dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
* @dev: device for which we do this operation
@@ -2276,7 +2266,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev)
return;
}
- _put_opp_list_kref(opp_table);
+ _opp_remove_all_static(opp_table);
/* Drop reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 1cbb58240b80..9cd8f0adacae 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
struct dev_pm_opp *opp;
/* OPP table is already initialized for the device */
+ mutex_lock(&opp_table->lock);
if (opp_table->parsed_static_opps) {
- kref_get(&opp_table->list_kref);
+ opp_table->parsed_static_opps++;
+ mutex_unlock(&opp_table->lock);
return 0;
}
- /*
- * Re-initialize list_kref every time we add static OPPs to the OPP
- * table as the reference count may be 0 after the last tie static OPPs
- * were removed.
- */
- kref_init(&opp_table->list_kref);
+ opp_table->parsed_static_opps = 1;
+ mutex_unlock(&opp_table->lock);
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
@@ -678,15 +676,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
of_node_put(np);
- return ret;
+ goto remove_static_opp;
} else if (opp) {
count++;
}
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count))
- return -ENOENT;
+ if (WARN_ON(!count)) {
+ ret = -ENOENT;
+ goto remove_static_opp;
+ }
list_for_each_entry(opp, &opp_table->opp_list, node)
pstate_count += !!opp->pstate;
@@ -695,15 +695,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
if (pstate_count && pstate_count != count) {
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
count, pstate_count);
- return -ENOENT;
+ ret = -ENOENT;
+ goto remove_static_opp;
}
if (pstate_count)
opp_table->genpd_performance_state = true;
- opp_table->parsed_static_opps = true;
-
return 0;
+
+remove_static_opp:
+ _opp_remove_all_static(opp_table);
+
+ return ret;
}
/* Initializes OPP tables based on old-deprecated bindings */
@@ -738,6 +742,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
+ _opp_remove_all_static(opp_table);
return ret;
}
nr -= 2;
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 01a500e2c40a..d14e27102730 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -127,11 +127,10 @@ enum opp_table_access {
* @dev_list: list of devices that share these OPPs
* @opp_list: table of opps
* @kref: for reference count of the table.
- * @list_kref: for reference count of the OPP list.
* @lock: mutex protecting the opp_list and dev_list.
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
- * @parsed_static_opps: True if OPPs are initialized from DT.
+ * @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
@@ -167,7 +166,6 @@ struct opp_table {
struct list_head dev_list;
struct list_head opp_list;
struct kref kref;
- struct kref list_kref;
struct mutex lock;
struct device_node *np;
@@ -176,7 +174,7 @@ struct opp_table {
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
- bool parsed_static_opps;
+ unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 1c69c404df11..e3357e91decb 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -90,7 +90,7 @@ static int _store_optimized_voltages(struct device *dev,
goto out_map;
}
- base = ioremap_nocache(res->start, resource_size(res));
+ base = ioremap(res->start, resource_size(res));
if (!base) {
dev_err(dev, "Unable to map Efuse registers\n");
ret = -ENOMEM;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index ad290f79983b..a5507f75b524 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1534,7 +1534,7 @@ static int __init ccio_probe(struct parisc_device *dev)
*ioc_p = ioc;
ioc->hw_path = dev->hw_path;
- ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
+ ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
if (!ioc->ioc_regs) {
kfree(ioc);
return -ENOMEM;
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 2f1cac89ddf5..889d7ce282eb 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -974,7 +974,7 @@ static int __init dino_probe(struct parisc_device *dev)
}
dino_dev->hba.dev = dev;
- dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
+ dino_dev->hba.base_addr = ioremap(hpa, 4096);
dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev);
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 37a2c5db761d..9d00a24277aa 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -354,10 +354,10 @@ static int __init eisa_probe(struct parisc_device *dev)
eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR;
}
}
- eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
+ eisa_eeprom_addr = ioremap(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
if (!eisa_eeprom_addr) {
result = -ENOMEM;
- printk(KERN_ERR "EISA: ioremap_nocache failed!\n");
+ printk(KERN_ERR "EISA: ioremap failed!\n");
goto error_free_irq;
}
result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space,
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 32f506f00c89..8a3b0c3a1e92 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -927,7 +927,7 @@ void *iosapic_register(unsigned long hpa)
return NULL;
}
- isi->addr = ioremap_nocache(hpa, 4096);
+ isi->addr = ioremap(hpa, 4096);
isi->isi_hpa = hpa;
isi->isi_version = iosapic_rd_version(isi);
isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a99e385c68bd..732b516c7bf8 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1134,7 +1134,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
** Postable I/O port space is per PCI host adapter.
** base of 64MB PIOP region
*/
- lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
+ lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024);
sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
(int)lba_dev->hba.bus_num.start);
@@ -1476,7 +1476,7 @@ lba_driver_probe(struct parisc_device *dev)
u32 func_class;
void *tmp_obj;
char *version;
- void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
+ void __iomem *addr = ioremap(dev->hpa.start, 4096);
int max;
/* Read HW Rev First */
@@ -1575,7 +1575,7 @@ lba_driver_probe(struct parisc_device *dev)
} else {
if (!astro_iop_base) {
/* Sprockets PDC uses NPIOP region */
- astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
+ astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024);
pci_port = &lba_astro_port_ops;
}
@@ -1693,7 +1693,7 @@ void __init lba_init(void)
*/
void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
{
- void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
+ void __iomem * base_addr = ioremap(lba->hpa.start, 4096);
imask <<= 2; /* adjust for hints - 2 more bits */
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index de8e4e347249..7e112829d250 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1513,7 +1513,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
{
- return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
+ return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
}
static void sba_hw_init(struct sba_device *sba_dev)
@@ -1883,7 +1883,7 @@ static int __init sba_driver_callback(struct parisc_device *dev)
u32 func_class;
int i;
char *version;
- void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
+ void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *root;
#endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index b20651cea09f..9bf7fa99b103 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -719,7 +719,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
- base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ base = devm_ioremap(dev, res->start, resource_size(res));
if (!base)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 3dd2e2697294..cfeccd7e9fff 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -434,7 +434,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
- msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
+ msix_tbl = ioremap(ep->phys_base + tbl_addr,
PCI_MSIX_ENTRY_SIZE);
if (!msix_tbl)
return -EINVAL;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index c7709e49f0e4..6b43a5455c7a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -688,7 +688,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
table_offset &= PCI_MSIX_TABLE_OFFSET;
phys_addr = pci_resource_start(dev, bir) + table_offset;
- return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+ return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e87196cc1a7f..df21e3227b57 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -184,7 +184,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
return NULL;
}
- return ioremap_nocache(res->start, resource_size(res));
+ return ioremap(res->start, resource_size(res));
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4937a088d7d8..a3a1a0ea64f4 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1571,7 +1571,7 @@ static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
pci_read_config_dword(dev, 0xF0, &rcba);
/* use bits 31:14, 16 kB aligned */
- asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
+ asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
if (asus_rcba_base == NULL)
return;
}
@@ -4784,7 +4784,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
if (!(rcba & INTEL_LPC_RCBA_ENABLE))
return -EINVAL;
- rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
+ rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
PAGE_ALIGN(INTEL_UPDCR_REG));
if (!rcba_mem)
return -ENOMEM;
@@ -5074,18 +5074,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
#ifdef CONFIG_PCI_ATS
/*
- * Some devices have a broken ATS implementation causing IOMMU stalls.
- * Don't use ATS for those devices.
+ * Some devices require additional driver setup to enable ATS. Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+ * chance to load and configure the device.
*/
-static void quirk_no_ats(struct pci_dev *pdev)
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- pci_info(pdev, "disabling ATS (broken on this device)\n");
+ if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+ return;
+
+ pci_info(pdev, "disabling ATS\n");
pdev->ats_cap = 0;
}
/* AMD Stoney platform GPU */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+/* AMD Iceland dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi14 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 55083c67b2bb..95dca2cb5265 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
- goto ddr_perf_err;
+ goto cpuhp_state_err;
}
pmu->cpuhp_state = ret;
/* Register the pmu instance for cpu hotplug */
- cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ goto cpuhp_instance_err;
+ }
/* Request irq */
irq = of_irq_get(np, 0);
@@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
return 0;
ddr_perf_err:
- if (pmu->cpuhp_state)
- cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
-
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
+cpuhp_state_err:
ida_simple_remove(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
@@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
irq_set_affinity_hint(pmu->irq, NULL);
perf_pmu_unregister(&pmu->pmu);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 96183e31b96a..584de8f807cc 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
hisi_pmu->ops->stop_counters(hisi_pmu);
}
+
/*
- * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
- * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
- * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
- * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
- * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
+ * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
+ * determined from the MPIDR_EL1, but the encoding varies by CPU:
+ *
+ * - For MT variants of TSV110:
+ * SCCL is Aff2[7:3], CCL is Aff2[2:0]
+ *
+ * - For other MT parts:
+ * SCCL is Aff3[7:0], CCL is Aff2[7:0]
+ *
+ * - For non-MT parts:
+ * SCCL is Aff2[7:0], CCL is Aff1[7:0]
*/
-static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
{
u64 mpidr = read_cpuid_mpidr();
-
- if (mpidr & MPIDR_MT_BITMASK) {
- if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
- int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-
- if (sccl_id)
- *sccl_id = aff2 >> 3;
- if (ccl_id)
- *ccl_id = aff2 & 0x7;
- } else {
- if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
- if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
- }
+ int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ bool mt = mpidr & MPIDR_MT_BITMASK;
+ int sccl, ccl;
+
+ if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+ sccl = aff2 >> 3;
+ ccl = aff2 & 0x7;
+ } else if (mt) {
+ sccl = aff3;
+ ccl = aff2;
} else {
- if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
- if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ sccl = aff2;
+ ccl = aff1;
}
+
+ if (scclp)
+ *scclp = sccl;
+ if (cclp)
+ *cclp = ccl;
}
/*
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 0263db2ac874..b3ed94b98d9b 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -69,5 +69,6 @@ source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
+source "drivers/phy/intel/Kconfig"
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c96a1afc95bd..310c149a9df5 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -18,6 +18,7 @@ obj-y += broadcom/ \
cadence/ \
freescale/ \
hisilicon/ \
+ intel/ \
lantiq/ \
marvell/ \
motorola/ \
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index 3dab79e9d52b..e760d89d3976 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -48,7 +48,8 @@ config PHY_SUN9I_USB
config PHY_SUN50I_USB3
tristate "Allwinner H6 SoC USB3 PHY driver"
- depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM && OF
depends on RESET_CONTROLLER
select GENERIC_PHY
help
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index d3d983c128ea..b29f11c19155 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -50,7 +50,7 @@ config PHY_BCM_NS_USB3
config PHY_NS2_PCIE
tristate "Broadcom Northstar2 PCIe PHY driver"
- depends on OF && MDIO_BUS_MUX_BCM_IPROC
+ depends on (OF && MDIO_BUS_MUX_BCM_IPROC) || (COMPILE_TEST && MDIO_BUS)
select GENERIC_PHY
default ARCH_BCM_IPROC
help
@@ -83,7 +83,7 @@ config PHY_BRCM_SATA
config PHY_BRCM_USB
tristate "Broadcom STB USB PHY driver"
- depends on ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || COMPILE_TEST
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index f453c7d3ffff..c78de546135c 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_PHY_NS2_USB_DRD) += phy-bcm-ns2-usbdrd.o
obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
obj-$(CONFIG_PHY_BRCM_USB) += phy-brcm-usb-dvr.o
-phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o
+phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o phy-brcm-usb-init-synopsys.o
obj-$(CONFIG_PHY_BCM_SR_PCIE) += phy-bcm-sr-pcie.o
obj-$(CONFIG_PHY_BCM_SR_USB) += phy-bcm-sr-usb.o
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 50ac75bbb0c9..4710cfcc3037 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -33,6 +33,7 @@
#define SATA_PHY_CTRL_REG_28NM_SPACE_SIZE 0x8
enum brcm_sata_phy_version {
+ BRCM_SATA_PHY_STB_16NM,
BRCM_SATA_PHY_STB_28NM,
BRCM_SATA_PHY_STB_40NM,
BRCM_SATA_PHY_IPROC_NS2,
@@ -104,10 +105,13 @@ enum sata_phy_regs {
PLL1_ACTRL5 = 0x85,
PLL1_ACTRL6 = 0x86,
PLL1_ACTRL7 = 0x87,
+ PLL1_ACTRL8 = 0x88,
TX_REG_BANK = 0x070,
TX_ACTRL0 = 0x80,
TX_ACTRL0_TXPOL_FLIP = BIT(6),
+ TX_ACTRL5 = 0x85,
+ TX_ACTRL5_SSC_EN = BIT(11),
AEQRX_REG_BANK_0 = 0xd0,
AEQ_CONTROL1 = 0x81,
@@ -116,6 +120,7 @@ enum sata_phy_regs {
AEQ_FRC_EQ = 0x83,
AEQ_FRC_EQ_FORCE = BIT(0),
AEQ_FRC_EQ_FORCE_VAL = BIT(1),
+ AEQ_RFZ_FRC_VAL = BIT(8),
AEQRX_REG_BANK_1 = 0xe0,
AEQRX_SLCAL0_CTRL0 = 0x82,
AEQRX_SLCAL1_CTRL0 = 0x86,
@@ -152,7 +157,28 @@ enum sata_phy_regs {
TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
RXPMD_REG_BANK = 0x1c0,
+ RXPMD_RX_CDR_CONTROL1 = 0x81,
+ RXPMD_RX_PPM_VAL_MASK = 0x1ff,
+ RXPMD_RXPMD_EN_FRC = BIT(12),
+ RXPMD_RXPMD_EN_FRC_VAL = BIT(13),
+ RXPMD_RX_CDR_CDR_PROP_BW = 0x82,
+ RXPMD_G_CDR_PROP_BW_MASK = 0x7,
+ RXPMD_G1_CDR_PROP_BW_SHIFT = 0,
+ RXPMD_G2_CDR_PROP_BW_SHIFT = 3,
+ RXPMD_G3_CDR_PROB_BW_SHIFT = 6,
+ RXPMD_RX_CDR_CDR_ACQ_INTEG_BW = 0x83,
+ RXPMD_G_CDR_ACQ_INT_BW_MASK = 0x7,
+ RXPMD_G1_CDR_ACQ_INT_BW_SHIFT = 0,
+ RXPMD_G2_CDR_ACQ_INT_BW_SHIFT = 3,
+ RXPMD_G3_CDR_ACQ_INT_BW_SHIFT = 6,
+ RXPMD_RX_CDR_CDR_LOCK_INTEG_BW = 0x84,
+ RXPMD_G_CDR_LOCK_INT_BW_MASK = 0x7,
+ RXPMD_G1_CDR_LOCK_INT_BW_SHIFT = 0,
+ RXPMD_G2_CDR_LOCK_INT_BW_SHIFT = 3,
+ RXPMD_G3_CDR_LOCK_INT_BW_SHIFT = 6,
RXPMD_RX_FREQ_MON_CONTROL1 = 0x87,
+ RXPMD_MON_CORRECT_EN = BIT(8),
+ RXPMD_MON_MARGIN_VAL_MASK = 0xff,
};
enum sata_phy_ctrl_regs {
@@ -166,6 +192,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
u32 size = 0;
switch (priv->version) {
+ case BRCM_SATA_PHY_STB_16NM:
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_IPROC_NS2:
case BRCM_SATA_PHY_DSL_28NM:
@@ -287,6 +314,94 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port)
return brcm_stb_sata_rxaeq_init(port);
}
+static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp, value;
+
+ /* Reduce CP tail current to 1/16th of its default value */
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141);
+
+ /* Turn off CP tail current boost */
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006);
+
+ /* Set a specific AEQ equalizer value */
+ tmp = AEQ_FRC_EQ_FORCE_VAL | AEQ_FRC_EQ_FORCE;
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, AEQ_FRC_EQ,
+ ~(tmp | AEQ_RFZ_FRC_VAL |
+ AEQ_FRC_EQ_VAL_MASK << AEQ_FRC_EQ_VAL_SHIFT),
+ tmp | 32 << AEQ_FRC_EQ_VAL_SHIFT);
+
+ /* Set RX PPM val center frequency */
+ if (port->ssc_en)
+ value = 0x52;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1,
+ ~RXPMD_RX_PPM_VAL_MASK, value);
+
+ /* Set proportional loop bandwith Gen1/2/3 */
+ tmp = RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ if (port->ssc_en)
+ value = 2 << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ 2 << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ 2 << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ else
+ value = 1 << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp,
+ value);
+
+ /* Set CDR integral loop acquisition bandwidth for Gen1/2/3 */
+ tmp = RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G1_CDR_ACQ_INT_BW_SHIFT |
+ RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G2_CDR_ACQ_INT_BW_SHIFT |
+ RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
+ if (port->ssc_en)
+ value = 1 << RXPMD_G1_CDR_ACQ_INT_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_ACQ_INT_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW,
+ ~tmp, value);
+
+ /* Set CDR integral loop locking bandwidth to 1 for Gen 1/2/3 */
+ tmp = RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G1_CDR_LOCK_INT_BW_SHIFT |
+ RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G2_CDR_LOCK_INT_BW_SHIFT |
+ RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
+ if (port->ssc_en)
+ value = 1 << RXPMD_G1_CDR_LOCK_INT_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_LOCK_INT_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW,
+ ~tmp, value);
+
+ /* Set no guard band and clamp CDR */
+ tmp = RXPMD_MON_CORRECT_EN | RXPMD_MON_MARGIN_VAL_MASK;
+ if (port->ssc_en)
+ value = 0x51;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ ~tmp, RXPMD_MON_CORRECT_EN | value);
+
+ /* Turn on/off SSC */
+ brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
+ port->ssc_en ? TX_ACTRL5_SSC_EN : 0);
+
+ return 0;
+}
+
+static int brcm_stb_sata_16nm_init(struct brcm_sata_port *port)
+{
+ return brcm_stb_sata_16nm_ssc_init(port);
+}
+
/* NS2 SATA PLL1 defaults were characterized by H/W group */
#define NS2_PLL1_ACTRL2_MAGIC 0x1df8
#define NS2_PLL1_ACTRL3_MAGIC 0x2b00
@@ -544,6 +659,9 @@ static int brcm_sata_phy_init(struct phy *phy)
struct brcm_sata_port *port = phy_get_drvdata(phy);
switch (port->phy_priv->version) {
+ case BRCM_SATA_PHY_STB_16NM:
+ rc = brcm_stb_sata_16nm_init(port);
+ break;
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_STB_40NM:
rc = brcm_stb_sata_init(port);
@@ -601,6 +719,8 @@ static const struct phy_ops phy_ops = {
};
static const struct of_device_id brcm_sata_phy_of_match[] = {
+ { .compatible = "brcm,bcm7216-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_STB_16NM },
{ .compatible = "brcm,bcm7445-sata-phy",
.data = (void *)BRCM_SATA_PHY_STB_28NM },
{ .compatible = "brcm,bcm7425-sata-phy",
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
new file mode 100644
index 000000000000..456dc4a100c2
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Broadcom */
+
+/*
+ * This module contains USB PHY initialization for power up and S3 resume
+ * for newer Synopsys based USB hardware first used on the bcm7216.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/soc/brcmstb/brcmstb.h>
+#include "phy-brcm-usb-init.h"
+
+#define PHY_LOCK_TIMEOUT_MS 200
+
+/* Register definitions for syscon piarbctl registers */
+#define PIARBCTL_CAM 0x00
+#define PIARBCTL_SPLITTER 0x04
+#define PIARBCTL_MISC 0x08
+#define PIARBCTL_MISC_SECURE_MASK 0x80000000
+#define PIARBCTL_MISC_USB_SELECT_MASK 0x40000000
+#define PIARBCTL_MISC_USB_4G_SDRAM_MASK 0x20000000
+#define PIARBCTL_MISC_USB_PRIORITY_MASK 0x000f0000
+#define PIARBCTL_MISC_USB_MEM_PAGE_MASK 0x0000f000
+#define PIARBCTL_MISC_CAM1_MEM_PAGE_MASK 0x00000f00
+#define PIARBCTL_MISC_CAM0_MEM_PAGE_MASK 0x000000f0
+#define PIARBCTL_MISC_SATA_PRIORITY_MASK 0x0000000f
+
+#define PIARBCTL_MISC_USB_ONLY_MASK \
+ (PIARBCTL_MISC_USB_SELECT_MASK | \
+ PIARBCTL_MISC_USB_4G_SDRAM_MASK | \
+ PIARBCTL_MISC_USB_PRIORITY_MASK | \
+ PIARBCTL_MISC_USB_MEM_PAGE_MASK)
+
+/* Register definitions for the USB CTRL block */
+#define USB_CTRL_SETUP 0x00
+#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK 0x02000000
+#define USB_CTRL_SETUP_SCB2_EN_MASK 0x00008000
+#define USB_CTRL_SETUP_tca_drv_sel_MASK 0x01000000
+#define USB_CTRL_SETUP_SCB1_EN_MASK 0x00004000
+#define USB_CTRL_SETUP_SOFT_SHUTDOWN_MASK 0x00000200
+#define USB_CTRL_SETUP_IPP_MASK 0x00000020
+#define USB_CTRL_SETUP_IOC_MASK 0x00000010
+#define USB_CTRL_USB_PM 0x04
+#define USB_CTRL_USB_PM_USB_PWRDN_MASK 0x80000000
+#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000
+#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK 0x00800000
+#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK 0x00400000
+#define USB_CTRL_USB_PM_STATUS 0x08
+#define USB_CTRL_USB_DEVICE_CTL1 0x10
+#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003
+#define USB_CTRL_TEST_PORT_CTL 0x30
+#define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_MASK 0x000000ff
+#define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_PME_GEN_MASK 0x0000002e
+#define USB_CTRL_TP_DIAG1 0x34
+#define USB_CTLR_TP_DIAG1_wake_MASK 0x00000002
+#define USB_CTRL_CTLR_CSHCR 0x50
+#define USB_CTRL_CTLR_CSHCR_ctl_pme_en_MASK 0x00040000
+
+/* Register definitions for the USB_PHY block in 7211b0 */
+#define USB_PHY_PLL_CTL 0x00
+#define USB_PHY_PLL_CTL_PLL_RESETB_MASK 0x40000000
+#define USB_PHY_PLL_LDO_CTL 0x08
+#define USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK 0x00000004
+#define USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK 0x00000002
+#define USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK 0x00000001
+#define USB_PHY_UTMI_CTL_1 0x04
+#define USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK 0x00000800
+#define USB_PHY_UTMI_CTL_1_PHY_MODE_MASK 0x0000000c
+#define USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT 2
+#define USB_PHY_IDDQ 0x1c
+#define USB_PHY_IDDQ_phy_iddq_MASK 0x00000001
+#define USB_PHY_STATUS 0x20
+#define USB_PHY_STATUS_pll_lock_MASK 0x00000001
+
+/* Register definitions for the MDIO registers in the DWC2 block of
+ * the 7211b0.
+ * NOTE: The PHY's MDIO registers are only accessible through the
+ * legacy DesignWare USB controller even though it's not being used.
+ */
+#define USB_GMDIOCSR 0
+#define USB_GMDIOGEN 4
+
+/* Register definitions for the BDC EC block in 7211b0 */
+#define BDC_EC_AXIRDA 0x0c
+#define BDC_EC_AXIRDA_RTS_MASK 0xf0000000
+#define BDC_EC_AXIRDA_RTS_SHIFT 28
+
+
+static void usb_mdio_write_7211b0(struct brcm_usb_init_params *params,
+ uint8_t addr, uint16_t data)
+{
+ void __iomem *usb_mdio = params->regs[BRCM_REGS_USB_MDIO];
+
+ addr &= 0x1f; /* 5-bit address */
+ brcm_usb_writel(0xffffffff, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x59020000 | (addr << 18) | data,
+ usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x00000000, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+}
+
+static uint16_t __maybe_unused usb_mdio_read_7211b0(
+ struct brcm_usb_init_params *params, uint8_t addr)
+{
+ void __iomem *usb_mdio = params->regs[BRCM_REGS_USB_MDIO];
+
+ addr &= 0x1f; /* 5-bit address */
+ brcm_usb_writel(0xffffffff, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x69020000 | (addr << 18), usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x00000000, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ return brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & 0xffff;
+}
+
+static void usb2_eye_fix_7211b0(struct brcm_usb_init_params *params)
+{
+ /* select bank */
+ usb_mdio_write_7211b0(params, 0x1f, 0x80a0);
+
+ /* Set the eye */
+ usb_mdio_write_7211b0(params, 0x0a, 0xc6a0);
+}
+
+static void xhci_soft_reset(struct brcm_usb_init_params *params,
+ int on_off)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ /* Assert reset */
+ if (on_off)
+ USB_CTRL_UNSET(ctrl, USB_PM, XHC_SOFT_RESETB);
+ /* De-assert reset */
+ else
+ USB_CTRL_SET(ctrl, USB_PM, XHC_SOFT_RESETB);
+}
+
+static void usb_init_ipp(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+ u32 orig_reg;
+
+ pr_debug("%s\n", __func__);
+
+ orig_reg = reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
+ if (params->ipp != 2)
+ /* override ipp strap pin (if it exits) */
+ reg &= ~(USB_CTRL_MASK(SETUP, STRAP_IPP_SEL));
+
+ /* Override the default OC and PP polarity */
+ reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
+ if (params->ioc)
+ reg |= USB_CTRL_MASK(SETUP, IOC);
+ if (params->ipp == 1)
+ reg |= USB_CTRL_MASK(SETUP, IPP);
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+ /*
+ * If we're changing IPP, make sure power is off long enough
+ * to turn off any connected devices.
+ */
+ if ((reg ^ orig_reg) & USB_CTRL_MASK(SETUP, IPP))
+ msleep(50);
+}
+
+static void syscon_piarbctl_init(struct regmap *rmap)
+{
+ /* Switch from legacy USB OTG controller to new STB USB controller */
+ regmap_update_bits(rmap, PIARBCTL_MISC, PIARBCTL_MISC_USB_ONLY_MASK,
+ PIARBCTL_MISC_USB_SELECT_MASK |
+ PIARBCTL_MISC_USB_4G_SDRAM_MASK);
+}
+
+static void usb_init_common(struct brcm_usb_init_params *params)
+{
+ u32 reg;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ pr_debug("%s\n", __func__);
+
+ USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+
+ if (USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ reg |= params->mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+ switch (params->mode) {
+ case USB_CTLR_MODE_HOST:
+ USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ break;
+ default:
+ USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ USB_CTRL_SET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ break;
+ }
+}
+
+static void usb_wake_enable_7211b0(struct brcm_usb_init_params *params,
+ bool enable)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ if (enable)
+ USB_CTRL_SET(ctrl, CTLR_CSHCR, ctl_pme_en);
+ else
+ USB_CTRL_UNSET(ctrl, CTLR_CSHCR, ctl_pme_en);
+}
+
+static void usb_init_common_7211b0(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ void __iomem *usb_phy = params->regs[BRCM_REGS_USB_PHY];
+ void __iomem *bdc_ec = params->regs[BRCM_REGS_BDC_EC];
+ int timeout_ms = PHY_LOCK_TIMEOUT_MS;
+ u32 reg;
+
+ if (params->syscon_piarbctl)
+ syscon_piarbctl_init(params->syscon_piarbctl);
+
+ USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+
+ usb_wake_enable_7211b0(params, false);
+ if (!params->wake_enabled) {
+
+ /* undo possible suspend settings */
+ brcm_usb_writel(0, usb_phy + USB_PHY_IDDQ);
+ reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL);
+ reg |= USB_PHY_PLL_CTL_PLL_RESETB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL);
+
+ /* temporarily enable FSM so PHY comes up properly */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg |= USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+ }
+
+ /* Init the PHY */
+ reg = USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK |
+ USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK |
+ USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_LDO_CTL);
+
+ /* wait for lock */
+ while (timeout_ms-- > 0) {
+ reg = brcm_usb_readl(usb_phy + USB_PHY_STATUS);
+ if (reg & USB_PHY_STATUS_pll_lock_MASK)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Set the PHY_MODE */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg &= ~USB_PHY_UTMI_CTL_1_PHY_MODE_MASK;
+ reg |= params->mode << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+
+ /* Fix the incorrect default */
+ reg = brcm_usb_readl(ctrl + USB_CTRL_SETUP);
+ reg &= ~USB_CTRL_SETUP_tca_drv_sel_MASK;
+ brcm_usb_writel(reg, ctrl + USB_CTRL_SETUP);
+
+ usb_init_common(params);
+
+ /*
+ * The BDC controller will get occasional failures with
+ * the default "Read Transaction Size" of 6 (1024 bytes).
+ * Set it to 4 (256 bytes).
+ */
+ if ((params->mode != USB_CTLR_MODE_HOST) && bdc_ec) {
+ reg = brcm_usb_readl(bdc_ec + BDC_EC_AXIRDA);
+ reg &= ~BDC_EC_AXIRDA_RTS_MASK;
+ reg |= (0x4 << BDC_EC_AXIRDA_RTS_SHIFT);
+ brcm_usb_writel(reg, bdc_ec + BDC_EC_AXIRDA);
+ }
+
+ /*
+ * Disable FSM, otherwise the PHY will auto suspend when no
+ * device is connected and will be reset on resume.
+ */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg &= ~USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+
+ usb2_eye_fix_7211b0(params);
+}
+
+static void usb_init_xhci(struct brcm_usb_init_params *params)
+{
+ pr_debug("%s\n", __func__);
+
+ xhci_soft_reset(params, 0);
+}
+
+static void usb_uninit_common(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ pr_debug("%s\n", __func__);
+
+ USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+
+}
+
+static void usb_uninit_common_7211b0(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ void __iomem *usb_phy = params->regs[BRCM_REGS_USB_PHY];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ if (params->wake_enabled) {
+ USB_CTRL_SET(ctrl, TEST_PORT_CTL, TPOUT_SEL_PME_GEN);
+ usb_wake_enable_7211b0(params, true);
+ } else {
+ USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+ brcm_usb_writel(0, usb_phy + USB_PHY_PLL_LDO_CTL);
+ reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL);
+ reg &= ~USB_PHY_PLL_CTL_PLL_RESETB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL);
+ brcm_usb_writel(USB_PHY_IDDQ_phy_iddq_MASK,
+ usb_phy + USB_PHY_IDDQ);
+ }
+
+}
+
+static void usb_uninit_xhci(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ if (!params->wake_enabled)
+ xhci_soft_reset(params, 1);
+}
+
+static int usb_get_dual_select(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg = 0;
+
+ pr_debug("%s\n", __func__);
+
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ return reg;
+}
+
+static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ reg |= mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+}
+
+static const struct brcm_usb_init_ops bcm7216_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+static const struct brcm_usb_init_ops bcm7211b0_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common_7211b0,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common_7211b0,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ params->family_name = "7216";
+ params->ops = &bcm7216_ops;
+}
+
+void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ params->family_name = "7211";
+ params->ops = &bcm7211b0_ops;
+ params->suspend_with_clocks = true;
+}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 91b5b09589d6..9391ab42a12b 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -42,6 +42,7 @@
#define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */
#define USB_CTRL_EBRIDGE 0x0c
#define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */
+#define USB_CTRL_EBRIDGE_EBR_SCB_SIZE_MASK 0x00000f80 /* option */
#define USB_CTRL_OBRIDGE 0x10
#define USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK 0x08000000
#define USB_CTRL_MDIO 0x14
@@ -57,6 +58,8 @@
#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000 /* option */
#define USB_CTRL_USB_PM_USB20_HC_RESETB_MASK 0x30000000 /* option */
#define USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK 0x00300000 /* option */
+#define USB_CTRL_USB_PM_RMTWKUP_EN_MASK 0x00000001
+#define USB_CTRL_USB_PM_STATUS 0x38
#define USB_CTRL_USB30_CTL1 0x60
#define USB_CTRL_USB30_CTL1_PHY3_PLL_SEQ_START_MASK 0x00000010
#define USB_CTRL_USB30_CTL1_PHY3_RESETB_MASK 0x00010000
@@ -126,10 +129,6 @@ enum {
USB_CTRL_SELECTOR_COUNT,
};
-#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
-#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
-#define USB_CTRL_MASK(reg, field) \
- USB_CTRL_##reg##_##field##_MASK
#define USB_CTRL_MASK_FAMILY(params, reg, field) \
(params->usb_reg_bits_map[USB_CTRL_##reg##_##field##_SELECTOR])
@@ -140,13 +139,6 @@ enum {
usb_ctrl_unset_family(params, USB_CTRL_##reg, \
USB_CTRL_##reg##_##field##_SELECTOR)
-#define USB_CTRL_SET(base, reg, field) \
- usb_ctrl_set(USB_CTRL_REG(base, reg), \
- USB_CTRL_##reg##_##field##_MASK)
-#define USB_CTRL_UNSET(base, reg, field) \
- usb_ctrl_unset(USB_CTRL_REG(base, reg), \
- USB_CTRL_##reg##_##field##_MASK)
-
#define MDIO_USB2 0
#define MDIO_USB3 BIT(31)
@@ -176,6 +168,7 @@ static const struct id_to_type id_to_type_table[] = {
{ 0x33900000, BRCM_FAMILY_3390A0 },
{ 0x72500010, BRCM_FAMILY_7250B0 },
{ 0x72600000, BRCM_FAMILY_7260A0 },
+ { 0x72550000, BRCM_FAMILY_7260A0 },
{ 0x72680000, BRCM_FAMILY_7271A0 },
{ 0x72710000, BRCM_FAMILY_7271A0 },
{ 0x73640000, BRCM_FAMILY_7364A0 },
@@ -401,26 +394,14 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
},
};
-static inline u32 brcmusb_readl(void __iomem *addr)
-{
- return readl(addr);
-}
-
-static inline void brcmusb_writel(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
static inline
void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void __iomem *reg;
mask = params->usb_reg_bits_map[field];
- reg = params->ctrl_regs + reg_offset;
- brcmusb_writel(brcmusb_readl(reg) & ~mask, reg);
+ brcm_usb_ctrl_unset(params->regs[BRCM_REGS_CTRL] + reg_offset, mask);
};
static inline
@@ -428,45 +409,27 @@ void usb_ctrl_set_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void __iomem *reg;
mask = params->usb_reg_bits_map[field];
- reg = params->ctrl_regs + reg_offset;
- brcmusb_writel(brcmusb_readl(reg) | mask, reg);
+ brcm_usb_ctrl_set(params->regs[BRCM_REGS_CTRL] + reg_offset, mask);
};
-static inline void usb_ctrl_set(void __iomem *reg, u32 field)
-{
- u32 value;
-
- value = brcmusb_readl(reg);
- brcmusb_writel(value | field, reg);
-}
-
-static inline void usb_ctrl_unset(void __iomem *reg, u32 field)
-{
- u32 value;
-
- value = brcmusb_readl(reg);
- brcmusb_writel(value & ~field, reg);
-}
-
static u32 brcmusb_usb_mdio_read(void __iomem *ctrl_base, u32 reg, int mode)
{
u32 data;
data = (reg << 16) | mode;
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data |= (1 << 24);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data &= ~(1 << 24);
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- return brcmusb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
+ return brcm_usb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
}
static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
@@ -475,14 +438,14 @@ static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
u32 data;
data = (reg << 16) | val | mode;
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data |= (1 << 25);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data &= ~(1 << 25);
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
}
@@ -581,7 +544,7 @@ static void brcmusb_usb3_pll_54mhz(struct brcm_usb_init_params *params)
{
u32 ofs;
int ii;
- void __iomem *ctrl_base = params->ctrl_regs;
+ void __iomem *ctrl_base = params->regs[BRCM_REGS_CTRL];
/*
* On newer B53 based SoC's, the reference clock for the
@@ -662,7 +625,7 @@ static void brcmusb_usb3_ssc_enable(void __iomem *ctrl_base)
static void brcmusb_usb3_phy_workarounds(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl_base = params->ctrl_regs;
+ void __iomem *ctrl_base = params->regs[BRCM_REGS_CTRL];
brcmusb_usb3_pll_fix(ctrl_base);
brcmusb_usb3_pll_54mhz(params);
@@ -704,21 +667,21 @@ static void brcmusb_memc_fix(struct brcm_usb_init_params *params)
static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
{
- void __iomem *xhci_ec_base = params->xhci_ec_regs;
+ void __iomem *xhci_ec_base = params->regs[BRCM_REGS_XHCI_EC];
u32 val;
if (params->family_id != 0x74371000 || !xhci_ec_base)
return;
- brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
- val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+ brcm_usb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
+ val = brcm_usb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
/* set cfg_pick_ss_lock */
val |= (1 << 27);
- brcmusb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+ brcm_usb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
/* Reset USB 3.0 PHY for workaround to take effect */
- USB_CTRL_UNSET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
- USB_CTRL_SET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
+ USB_CTRL_UNSET(params->regs[BRCM_REGS_CTRL], USB30_CTL1, PHY3_RESETB);
+ USB_CTRL_SET(params->regs[BRCM_REGS_CTRL], USB30_CTL1, PHY3_RESETB);
}
static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
@@ -747,7 +710,7 @@ static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
* - default chip/rev.
* NOTE: The minor rev is always ignored.
*/
-static enum brcm_family_type brcmusb_get_family_type(
+static enum brcm_family_type get_family_type(
struct brcm_usb_init_params *params)
{
int last_type = -1;
@@ -775,9 +738,9 @@ static enum brcm_family_type brcmusb_get_family_type(
return last_type;
}
-void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
+static void usb_init_ipp(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
u32 reg;
u32 orig_reg;
@@ -791,7 +754,7 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
USB_CTRL_SET_FAMILY(params, USB30_CTL1, USB3_IPP);
}
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
orig_reg = reg;
if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_CC_DRD_MODE_ENABLE_SEL))
/* Never use the strap, it's going away. */
@@ -799,8 +762,8 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
SETUP,
STRAP_CC_DRD_MODE_ENABLE_SEL));
if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_IPP_SEL))
+ /* override ipp strap pin (if it exits) */
if (params->ipp != 2)
- /* override ipp strap pin (if it exits) */
reg &= ~(USB_CTRL_MASK_FAMILY(params, SETUP,
STRAP_IPP_SEL));
@@ -808,50 +771,38 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
if (params->ioc)
reg |= USB_CTRL_MASK(SETUP, IOC);
- if (params->ipp == 1 && ((reg & USB_CTRL_MASK(SETUP, IPP)) == 0))
+ if (params->ipp == 1)
reg |= USB_CTRL_MASK(SETUP, IPP);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
/*
* If we're changing IPP, make sure power is off long enough
* to turn off any connected devices.
*/
- if (reg != orig_reg)
+ if ((reg ^ orig_reg) & USB_CTRL_MASK(SETUP, IPP))
msleep(50);
}
-int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params)
+static void usb_wake_enable(struct brcm_usb_init_params *params,
+ bool enable)
{
- void __iomem *ctrl = params->ctrl_regs;
- u32 reg = 0;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
- if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
- PORT_MODE);
- }
- return reg;
+ if (enable)
+ USB_CTRL_SET(ctrl, USB_PM, RMTWKUP_EN);
+ else
+ USB_CTRL_UNSET(ctrl, USB_PM, RMTWKUP_EN);
}
-void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
- int mode)
+static void usb_init_common(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
u32 reg;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
- if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
- PORT_MODE);
- reg |= mode;
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- }
-}
-
-void brcm_usb_init_common(struct brcm_usb_init_params *params)
-{
- u32 reg;
- void __iomem *ctrl = params->ctrl_regs;
+ /* Clear any pending wake conditions */
+ usb_wake_enable(params, false);
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_PM_STATUS));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_PM_STATUS));
/* Take USB out of power down */
if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN)) {
@@ -877,7 +828,7 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
/* Block auto PLL suspend by USB2 PHY (Sasi) */
USB_CTRL_SET(ctrl, PLL_CTL, PLL_SUSPEND_EN);
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
if (params->selected_family == BRCM_FAMILY_7364A0)
/* Suppress overcurrent indication from USB30 ports for A0 */
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, OC3_DISABLE);
@@ -893,16 +844,16 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN);
if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN))
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
brcmusb_memc_fix(params);
if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
PORT_MODE);
reg |= params->mode;
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
}
if (USB_CTRL_MASK_FAMILY(params, USB_PM, BDC_SOFT_RESETB)) {
switch (params->mode) {
@@ -924,10 +875,10 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
}
}
-void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
+static void usb_init_eohci(struct brcm_usb_init_params *params)
{
u32 reg;
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
USB_CTRL_SET_FAMILY(params, USB_PM, USB20_HC_RESETB);
@@ -940,19 +891,30 @@ void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
USB_CTRL_SET(ctrl, EBRIDGE, ESTOP_SCB_REQ);
/* Setup the endian bits */
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
reg &= ~USB_CTRL_SETUP_ENDIAN_BITS;
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
if (params->selected_family == BRCM_FAMILY_7271A0)
/* Enable LS keep alive fix for certain keyboards */
USB_CTRL_SET(ctrl, OBRIDGE, LS_KEEP_ALIVE);
+
+ if (params->family_id == 0x72550000) {
+ /*
+ * Make the burst size 512 bytes to fix a hardware bug
+ * on the 7255a0. See HW7255-24.
+ */
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, EBRIDGE));
+ reg &= ~USB_CTRL_MASK(EBRIDGE, EBR_SCB_SIZE);
+ reg |= 0x800;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, EBRIDGE));
+ }
}
-void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
+static void usb_init_xhci(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
/* 1 millisecond - for USB clocks to settle down */
@@ -978,34 +940,80 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
brcmusb_usb3_otp_fix(params);
}
-void brcm_usb_uninit_common(struct brcm_usb_init_params *params)
+static void usb_uninit_common(struct brcm_usb_init_params *params)
{
if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB_PWRDN))
USB_CTRL_SET_FAMILY(params, USB_PM, USB_PWRDN);
if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN))
USB_CTRL_SET_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN);
+ if (params->wake_enabled)
+ usb_wake_enable(params, true);
}
-void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params)
+static void usb_uninit_eohci(struct brcm_usb_init_params *params)
{
- if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
- USB_CTRL_UNSET_FAMILY(params, USB_PM, USB20_HC_RESETB);
}
-void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params)
+static void usb_uninit_xhci(struct brcm_usb_init_params *params)
{
brcmusb_xhci_soft_reset(params, 1);
- USB_CTRL_SET(params->ctrl_regs, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+ USB_CTRL_SET(params->regs[BRCM_REGS_CTRL], USB30_PCTL,
+ PHY3_IDDQ_OVERRIDE);
+}
+
+static int usb_get_dual_select(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg = 0;
+
+ pr_debug("%s\n", __func__);
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ }
+ return reg;
}
-void brcm_usb_set_family_map(struct brcm_usb_init_params *params)
+static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ reg |= mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+}
+
+static const struct brcm_usb_init_ops bcm7445_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common,
+ .init_eohci = usb_init_eohci,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common,
+ .uninit_eohci = usb_uninit_eohci,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params)
{
int fam;
- fam = brcmusb_get_family_type(params);
+ pr_debug("%s\n", __func__);
+
+ fam = get_family_type(params);
params->selected_family = fam;
params->usb_reg_bits_map =
&usb_reg_bits_map_table[fam][0];
params->family_name = family_names[fam];
+ params->ops = &bcm7445_ops;
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index f4f4f6d5d258..899b9eb43fad 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -6,16 +6,50 @@
#ifndef _USB_BRCM_COMMON_INIT_H
#define _USB_BRCM_COMMON_INIT_H
+#include <linux/regmap.h>
+
#define USB_CTLR_MODE_HOST 0
#define USB_CTLR_MODE_DEVICE 1
#define USB_CTLR_MODE_DRD 2
#define USB_CTLR_MODE_TYPEC_PD 3
+enum brcmusb_reg_sel {
+ BRCM_REGS_CTRL = 0,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ BRCM_REGS_USB_PHY,
+ BRCM_REGS_USB_MDIO,
+ BRCM_REGS_BDC_EC,
+ BRCM_REGS_MAX
+};
+
+#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_MASK(reg, field) \
+ USB_CTRL_##reg##_##field##_MASK
+#define USB_CTRL_SET(base, reg, field) \
+ brcm_usb_ctrl_set(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+#define USB_CTRL_UNSET(base, reg, field) \
+ brcm_usb_ctrl_unset(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+
struct brcm_usb_init_params;
+struct brcm_usb_init_ops {
+ void (*init_ipp)(struct brcm_usb_init_params *params);
+ void (*init_common)(struct brcm_usb_init_params *params);
+ void (*init_eohci)(struct brcm_usb_init_params *params);
+ void (*init_xhci)(struct brcm_usb_init_params *params);
+ void (*uninit_common)(struct brcm_usb_init_params *params);
+ void (*uninit_eohci)(struct brcm_usb_init_params *params);
+ void (*uninit_xhci)(struct brcm_usb_init_params *params);
+ int (*get_dual_select)(struct brcm_usb_init_params *params);
+ void (*set_dual_select)(struct brcm_usb_init_params *params, int mode);
+};
+
struct brcm_usb_init_params {
- void __iomem *ctrl_regs;
- void __iomem *xhci_ec_regs;
+ void __iomem *regs[BRCM_REGS_MAX];
int ioc;
int ipp;
int mode;
@@ -24,19 +58,105 @@ struct brcm_usb_init_params {
int selected_family;
const char *family_name;
const u32 *usb_reg_bits_map;
+ const struct brcm_usb_init_ops *ops;
+ struct regmap *syscon_piarbctl;
+ bool wake_enabled;
+ bool suspend_with_clocks;
+};
+
+void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params);
+void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params);
+void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params);
+
+static inline u32 brcm_usb_readl(void __iomem *addr)
+{
+ /*
+ * MIPS endianness is configured by boot strap, which also reverses all
+ * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+ * endian I/O).
+ *
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+static inline void brcm_usb_writel(u32 val, void __iomem *addr)
+{
+ /* See brcmnand_readl() comments */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
+}
+
+static inline void brcm_usb_ctrl_unset(void __iomem *reg, u32 mask)
+{
+ brcm_usb_writel(brcm_usb_readl(reg) & ~(mask), reg);
};
-void brcm_usb_set_family_map(struct brcm_usb_init_params *params);
-int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params);
-void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
- int mode);
-
-void brcm_usb_init_ipp(struct brcm_usb_init_params *ini);
-void brcm_usb_init_common(struct brcm_usb_init_params *ini);
-void brcm_usb_init_eohci(struct brcm_usb_init_params *ini);
-void brcm_usb_init_xhci(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_common(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini);
+static inline void brcm_usb_ctrl_set(void __iomem *reg, u32 mask)
+{
+ brcm_usb_writel(brcm_usb_readl(reg) | (mask), reg);
+};
+
+static inline void brcm_usb_init_ipp(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_ipp)
+ ini->ops->init_ipp(ini);
+}
+
+static inline void brcm_usb_init_common(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_common)
+ ini->ops->init_common(ini);
+}
+
+static inline void brcm_usb_init_eohci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_eohci)
+ ini->ops->init_eohci(ini);
+}
+
+static inline void brcm_usb_init_xhci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_xhci)
+ ini->ops->init_xhci(ini);
+}
+
+static inline void brcm_usb_uninit_common(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_common)
+ ini->ops->uninit_common(ini);
+}
+
+static inline void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_eohci)
+ ini->ops->uninit_eohci(ini);
+}
+
+static inline void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_xhci)
+ ini->ops->uninit_xhci(ini);
+}
+
+static inline int brcm_usb_get_dual_select(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->get_dual_select)
+ return ini->ops->get_dual_select(ini);
+ return 0;
+}
+
+static inline void brcm_usb_set_dual_select(struct brcm_usb_init_params *ini,
+ int mode)
+{
+ if (ini->ops->set_dual_select)
+ ini->ops->set_dual_select(ini, mode);
+}
#endif /* _USB_BRCM_COMMON_INIT_H */
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index f5c1f2983a1d..491bbd46c5b3 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/soc/brcmstb/brcmstb.h>
#include <dt-bindings/phy/phy.h>
+#include <linux/mfd/syscon.h>
#include "phy-brcm-usb-init.h"
@@ -32,6 +33,12 @@ struct value_to_name_map {
const char *name;
};
+struct match_chip_info {
+ void *init_func;
+ u8 required_regs[BRCM_REGS_MAX + 1];
+ u8 optional_reg;
+};
+
static struct value_to_name_map brcm_dr_mode_to_name[] = {
{ USB_CTLR_MODE_HOST, "host" },
{ USB_CTLR_MODE_DEVICE, "peripheral" },
@@ -57,11 +64,26 @@ struct brcm_usb_phy_data {
bool has_xhci;
struct clk *usb_20_clk;
struct clk *usb_30_clk;
+ struct clk *suspend_clk;
struct mutex mutex; /* serialize phy init */
int init_count;
+ int wake_irq;
struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
};
+static s8 *node_reg_names[BRCM_REGS_MAX] = {
+ "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
+};
+
+static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
+{
+ struct phy *gphy = dev_id;
+
+ pm_wakeup_event(&gphy->dev, 0);
+
+ return IRQ_HANDLED;
+}
+
static int brcm_usb_phy_init(struct phy *gphy)
{
struct brcm_usb_phy *phy = phy_get_drvdata(gphy);
@@ -74,8 +96,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
*/
mutex_lock(&priv->mutex);
if (priv->init_count++ == 0) {
- clk_enable(priv->usb_20_clk);
- clk_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->usb_20_clk);
+ clk_prepare_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->suspend_clk);
brcm_usb_init_common(&priv->ini);
}
mutex_unlock(&priv->mutex);
@@ -106,8 +129,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
mutex_lock(&priv->mutex);
if (--priv->init_count == 0) {
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
+ clk_disable_unprepare(priv->suspend_clk);
}
mutex_unlock(&priv->mutex);
phy->inited = false;
@@ -194,7 +218,7 @@ static ssize_t dual_select_store(struct device *dev,
res = name_to_value(&brcm_dual_mode_to_name[0],
ARRAY_SIZE(brcm_dual_mode_to_name), buf, &value);
if (!res) {
- brcm_usb_init_set_dual_select(&priv->ini, value);
+ brcm_usb_set_dual_select(&priv->ini, value);
res = len;
}
mutex_unlock(&sysfs_lock);
@@ -209,7 +233,7 @@ static ssize_t dual_select_show(struct device *dev,
int value;
mutex_lock(&sysfs_lock);
- value = brcm_usb_init_get_dual_select(&priv->ini);
+ value = brcm_usb_get_dual_select(&priv->ini);
mutex_unlock(&sysfs_lock);
return sprintf(buf, "%s\n",
value_to_name(&brcm_dual_mode_to_name[0],
@@ -228,15 +252,106 @@ static const struct attribute_group brcm_usb_phy_group = {
.attrs = brcm_usb_phy_attrs,
};
-static int brcm_usb_phy_dvr_init(struct device *dev,
+static struct match_chip_info chip_info_7216 = {
+ .init_func = &brcm_usb_dvr_init_7216,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ -1,
+ },
+};
+
+static struct match_chip_info chip_info_7211b0 = {
+ .init_func = &brcm_usb_dvr_init_7211b0,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ BRCM_REGS_USB_PHY,
+ BRCM_REGS_USB_MDIO,
+ -1,
+ },
+ .optional_reg = BRCM_REGS_BDC_EC,
+};
+
+static struct match_chip_info chip_info_7445 = {
+ .init_func = &brcm_usb_dvr_init_7445,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ -1,
+ },
+};
+
+static const struct of_device_id brcm_usb_dt_ids[] = {
+ {
+ .compatible = "brcm,bcm7216-usb-phy",
+ .data = &chip_info_7216,
+ },
+ {
+ .compatible = "brcm,bcm7211-usb-phy",
+ .data = &chip_info_7211b0,
+ },
+ {
+ .compatible = "brcm,brcmstb-usb-phy",
+ .data = &chip_info_7445,
+ },
+ { /* sentinel */ }
+};
+
+static int brcm_usb_get_regs(struct platform_device *pdev,
+ enum brcmusb_reg_sel regs,
+ struct brcm_usb_init_params *ini,
+ bool optional)
+{
+ struct resource *res;
+
+ /* Older DT nodes have ctrl and optional xhci_ec by index only */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ node_reg_names[regs]);
+ if (res == NULL) {
+ if (regs == BRCM_REGS_CTRL) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ } else if (regs == BRCM_REGS_XHCI_EC) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /* XHCI_EC registers are optional */
+ if (res == NULL)
+ return 0;
+ }
+ if (res == NULL) {
+ if (optional) {
+ dev_dbg(&pdev->dev,
+ "Optional reg %s not found\n",
+ node_reg_names[regs]);
+ return 0;
+ }
+ dev_err(&pdev->dev, "can't get %s base addr\n",
+ node_reg_names[regs]);
+ return 1;
+ }
+ }
+ ini->regs[regs] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ini->regs[regs])) {
+ dev_err(&pdev->dev, "can't map %s register space\n",
+ node_reg_names[regs]);
+ return 1;
+ }
+ return 0;
+}
+
+static int brcm_usb_phy_dvr_init(struct platform_device *pdev,
struct brcm_usb_phy_data *priv,
struct device_node *dn)
{
- struct phy *gphy;
+ struct device *dev = &pdev->dev;
+ struct phy *gphy = NULL;
int err;
priv->usb_20_clk = of_clk_get_by_name(dn, "sw_usb");
if (IS_ERR(priv->usb_20_clk)) {
+ if (PTR_ERR(priv->usb_20_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(dev, "Clock not found in Device Tree\n");
priv->usb_20_clk = NULL;
}
@@ -267,6 +382,8 @@ static int brcm_usb_phy_dvr_init(struct device *dev,
priv->usb_30_clk = of_clk_get_by_name(dn, "sw_usb3");
if (IS_ERR(priv->usb_30_clk)) {
+ if (PTR_ERR(priv->usb_30_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(dev,
"USB3.0 clock not found in Device Tree\n");
priv->usb_30_clk = NULL;
@@ -275,18 +392,46 @@ static int brcm_usb_phy_dvr_init(struct device *dev,
if (err)
return err;
}
+
+ priv->suspend_clk = clk_get(dev, "usb0_freerun");
+ if (IS_ERR(priv->suspend_clk)) {
+ if (PTR_ERR(priv->suspend_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(dev, "Suspend Clock not found in Device Tree\n");
+ priv->suspend_clk = NULL;
+ }
+
+ priv->wake_irq = platform_get_irq_byname(pdev, "wake");
+ if (priv->wake_irq < 0)
+ priv->wake_irq = platform_get_irq_byname(pdev, "wakeup");
+ if (priv->wake_irq >= 0) {
+ err = devm_request_irq(dev, priv->wake_irq,
+ brcm_usb_phy_wake_isr, 0,
+ dev_name(dev), gphy);
+ if (err < 0)
+ return err;
+ device_set_wakeup_capable(dev, 1);
+ } else {
+ dev_info(dev,
+ "Wake interrupt missing, system wake not supported\n");
+ }
+
return 0;
}
static int brcm_usb_phy_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct brcm_usb_phy_data *priv;
struct phy_provider *phy_provider;
struct device_node *dn = pdev->dev.of_node;
int err;
const char *mode;
+ const struct of_device_id *match;
+ void (*dvr_init)(struct brcm_usb_init_params *params);
+ const struct match_chip_info *info;
+ struct regmap *rmap;
+ int x;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -295,30 +440,14 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
priv->ini.family_id = brcmstb_get_family_id();
priv->ini.product_id = brcmstb_get_product_id();
- brcm_usb_set_family_map(&priv->ini);
+
+ match = of_match_node(brcm_usb_dt_ids, dev->of_node);
+ info = match->data;
+ dvr_init = info->init_func;
+ (*dvr_init)(&priv->ini);
+
dev_dbg(dev, "Best mapping table is for %s\n",
priv->ini.family_name);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "can't get USB_CTRL base address\n");
- return -EINVAL;
- }
- priv->ini.ctrl_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->ini.ctrl_regs)) {
- dev_err(dev, "can't map CTRL register space\n");
- return -EINVAL;
- }
-
- /* The XHCI EC registers are optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- priv->ini.xhci_ec_regs =
- devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->ini.xhci_ec_regs)) {
- dev_err(dev, "can't map XHCI EC register space\n");
- return -EINVAL;
- }
- }
of_property_read_u32(dn, "brcm,ipp", &priv->ini.ipp);
of_property_read_u32(dn, "brcm,ioc", &priv->ini.ioc);
@@ -335,7 +464,23 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (of_property_read_bool(dn, "brcm,has-eohci"))
priv->has_eohci = true;
- err = brcm_usb_phy_dvr_init(dev, priv, dn);
+ for (x = 0; x < BRCM_REGS_MAX; x++) {
+ if (info->required_regs[x] >= BRCM_REGS_MAX)
+ break;
+
+ err = brcm_usb_get_regs(pdev, info->required_regs[x],
+ &priv->ini, false);
+ if (err)
+ return -EINVAL;
+ }
+ if (info->optional_reg) {
+ err = brcm_usb_get_regs(pdev, info->optional_reg,
+ &priv->ini, true);
+ if (err)
+ return -EINVAL;
+ }
+
+ err = brcm_usb_phy_dvr_init(pdev, priv, dn);
if (err)
return err;
@@ -354,14 +499,23 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (err)
dev_warn(dev, "Error creating sysfs attributes\n");
+ /* Get piarbctl syscon if it exists */
+ rmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "syscon-piarbctl");
+ if (IS_ERR(rmap))
+ rmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "brcm,syscon-piarbctl");
+ if (!IS_ERR(rmap))
+ priv->ini.syscon_piarbctl = rmap;
+
/* start with everything off */
if (priv->has_xhci)
brcm_usb_uninit_xhci(&priv->ini);
if (priv->has_eohci)
brcm_usb_uninit_eohci(&priv->ini);
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
@@ -381,8 +535,28 @@ static int brcm_usb_phy_suspend(struct device *dev)
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
if (priv->init_count) {
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ priv->ini.wake_enabled = device_may_wakeup(dev);
+ if (priv->phys[BRCM_USB_PHY_3_0].inited)
+ brcm_usb_uninit_xhci(&priv->ini);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited)
+ brcm_usb_uninit_eohci(&priv->ini);
+ brcm_usb_uninit_common(&priv->ini);
+
+ /*
+ * Handle the clocks unless needed for wake. This has
+ * to work for both older XHCI->3.0-clks, EOHCI->2.0-clks
+ * and newer XHCI->2.0-clks/3.0-clks.
+ */
+
+ if (!priv->ini.suspend_with_clocks) {
+ if (priv->phys[BRCM_USB_PHY_3_0].inited)
+ clk_disable_unprepare(priv->usb_30_clk);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited ||
+ !priv->has_eohci)
+ clk_disable_unprepare(priv->usb_20_clk);
+ }
+ if (priv->wake_irq >= 0)
+ enable_irq_wake(priv->wake_irq);
}
return 0;
}
@@ -391,8 +565,8 @@ static int brcm_usb_phy_resume(struct device *dev)
{
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
- clk_enable(priv->usb_20_clk);
- clk_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->usb_20_clk);
+ clk_prepare_enable(priv->usb_30_clk);
brcm_usb_init_ipp(&priv->ini);
/*
@@ -400,18 +574,22 @@ static int brcm_usb_phy_resume(struct device *dev)
* Uninitialize anything that wasn't previously initialized.
*/
if (priv->init_count) {
+ if (priv->wake_irq >= 0)
+ disable_irq_wake(priv->wake_irq);
brcm_usb_init_common(&priv->ini);
if (priv->phys[BRCM_USB_PHY_2_0].inited) {
brcm_usb_init_eohci(&priv->ini);
} else if (priv->has_eohci) {
brcm_usb_uninit_eohci(&priv->ini);
- clk_disable(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
}
if (priv->phys[BRCM_USB_PHY_3_0].inited) {
brcm_usb_init_xhci(&priv->ini);
} else if (priv->has_xhci) {
brcm_usb_uninit_xhci(&priv->ini);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
+ if (!priv->has_eohci)
+ clk_disable_unprepare(priv->usb_20_clk);
}
} else {
if (priv->has_xhci)
@@ -419,10 +597,10 @@ static int brcm_usb_phy_resume(struct device *dev)
if (priv->has_eohci)
brcm_usb_uninit_eohci(&priv->ini);
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
}
-
+ priv->ini.wake_enabled = false;
return 0;
}
#endif /* CONFIG_PM_SLEEP */
@@ -431,11 +609,6 @@ static const struct dev_pm_ops brcm_usb_phy_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(brcm_usb_phy_suspend, brcm_usb_phy_resume)
};
-static const struct of_device_id brcm_usb_dt_ids[] = {
- { .compatible = "brcm,brcmstb-usb-phy" },
- { /* sentinel */ }
-};
-
MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids);
static struct platform_driver brcm_usb_driver = {
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index de10402f2931..a5c08e5bd2bf 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -22,48 +22,134 @@
#include <dt-bindings/phy/phy.h>
/* PHY register offsets */
-#define SIERRA_PHY_PLL_CFG (0xc00e << 2)
-#define SIERRA_DET_STANDEC_A (0x4000 << 2)
-#define SIERRA_DET_STANDEC_B (0x4001 << 2)
-#define SIERRA_DET_STANDEC_C (0x4002 << 2)
-#define SIERRA_DET_STANDEC_D (0x4003 << 2)
-#define SIERRA_DET_STANDEC_E (0x4004 << 2)
-#define SIERRA_PSM_LANECAL (0x4008 << 2)
-#define SIERRA_PSM_DIAG (0x4015 << 2)
-#define SIERRA_PSC_TX_A0 (0x4028 << 2)
-#define SIERRA_PSC_TX_A1 (0x4029 << 2)
-#define SIERRA_PSC_TX_A2 (0x402A << 2)
-#define SIERRA_PSC_TX_A3 (0x402B << 2)
-#define SIERRA_PSC_RX_A0 (0x4030 << 2)
-#define SIERRA_PSC_RX_A1 (0x4031 << 2)
-#define SIERRA_PSC_RX_A2 (0x4032 << 2)
-#define SIERRA_PSC_RX_A3 (0x4033 << 2)
-#define SIERRA_PLLCTRL_SUBRATE (0x403A << 2)
-#define SIERRA_PLLCTRL_GEN_D (0x403E << 2)
-#define SIERRA_DRVCTRL_ATTEN (0x406A << 2)
-#define SIERRA_CLKPATHCTRL_TMR (0x4081 << 2)
-#define SIERRA_RX_CREQ_FLTR_A_MODE1 (0x4087 << 2)
-#define SIERRA_RX_CREQ_FLTR_A_MODE0 (0x4088 << 2)
-#define SIERRA_CREQ_CCLKDET_MODE01 (0x408E << 2)
-#define SIERRA_RX_CTLE_MAINTENANCE (0x4091 << 2)
-#define SIERRA_CREQ_FSMCLK_SEL (0x4092 << 2)
-#define SIERRA_CTLELUT_CTRL (0x4098 << 2)
-#define SIERRA_DFE_ECMP_RATESEL (0x40C0 << 2)
-#define SIERRA_DFE_SMP_RATESEL (0x40C1 << 2)
-#define SIERRA_DEQ_VGATUNE_CTRL (0x40E1 << 2)
-#define SIERRA_TMRVAL_MODE3 (0x416E << 2)
-#define SIERRA_TMRVAL_MODE2 (0x416F << 2)
-#define SIERRA_TMRVAL_MODE1 (0x4170 << 2)
-#define SIERRA_TMRVAL_MODE0 (0x4171 << 2)
-#define SIERRA_PICNT_MODE1 (0x4174 << 2)
-#define SIERRA_CPI_OUTBUF_RATESEL (0x417C << 2)
-#define SIERRA_LFPSFILT_NS (0x418A << 2)
-#define SIERRA_LFPSFILT_RD (0x418B << 2)
-#define SIERRA_LFPSFILT_MP (0x418C << 2)
-#define SIERRA_SDFILT_H2L_A (0x4191 << 2)
-
-#define SIERRA_MACRO_ID 0x00007364
-#define SIERRA_MAX_LANES 4
+#define SIERRA_COMMON_CDB_OFFSET 0x0
+#define SIERRA_MACRO_ID_REG 0x0
+#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
+#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
+#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
+#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+
+#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0x4000 << (block_offset)) + \
+ (((ln) << 9) << (reg_offset)))
+
+#define SIERRA_DET_STANDEC_A_PREG 0x000
+#define SIERRA_DET_STANDEC_B_PREG 0x001
+#define SIERRA_DET_STANDEC_C_PREG 0x002
+#define SIERRA_DET_STANDEC_D_PREG 0x003
+#define SIERRA_DET_STANDEC_E_PREG 0x004
+#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
+#define SIERRA_PSM_A0IN_TMR_PREG 0x009
+#define SIERRA_PSM_DIAG_PREG 0x015
+#define SIERRA_PSC_TX_A0_PREG 0x028
+#define SIERRA_PSC_TX_A1_PREG 0x029
+#define SIERRA_PSC_TX_A2_PREG 0x02A
+#define SIERRA_PSC_TX_A3_PREG 0x02B
+#define SIERRA_PSC_RX_A0_PREG 0x030
+#define SIERRA_PSC_RX_A1_PREG 0x031
+#define SIERRA_PSC_RX_A2_PREG 0x032
+#define SIERRA_PSC_RX_A3_PREG 0x033
+#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
+#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
+#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
+#define SIERRA_PLLCTRL_STATUS_PREG 0x044
+#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
+#define SIERRA_DFE_BIASTRIM_PREG 0x04C
+#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
+#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
+#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
+#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
+#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
+#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
+#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
+#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
+#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
+#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
+#define SIERRA_CREQ_SPARE_PREG 0x096
+#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097
+#define SIERRA_CTLELUT_CTRL_PREG 0x098
+#define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0
+#define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1
+#define SIERRA_DEQ_PHALIGN_CTRL 0x0C4
+#define SIERRA_DEQ_CONCUR_CTRL1_PREG 0x0C8
+#define SIERRA_DEQ_CONCUR_CTRL2_PREG 0x0C9
+#define SIERRA_DEQ_EPIPWR_CTRL2_PREG 0x0CD
+#define SIERRA_DEQ_FAST_MAINT_CYCLES_PREG 0x0CE
+#define SIERRA_DEQ_ERRCMP_CTRL_PREG 0x0D0
+#define SIERRA_DEQ_OFFSET_CTRL_PREG 0x0D8
+#define SIERRA_DEQ_GAIN_CTRL_PREG 0x0E0
+#define SIERRA_DEQ_VGATUNE_CTRL_PREG 0x0E1
+#define SIERRA_DEQ_GLUT0 0x0E8
+#define SIERRA_DEQ_GLUT1 0x0E9
+#define SIERRA_DEQ_GLUT2 0x0EA
+#define SIERRA_DEQ_GLUT3 0x0EB
+#define SIERRA_DEQ_GLUT4 0x0EC
+#define SIERRA_DEQ_GLUT5 0x0ED
+#define SIERRA_DEQ_GLUT6 0x0EE
+#define SIERRA_DEQ_GLUT7 0x0EF
+#define SIERRA_DEQ_GLUT8 0x0F0
+#define SIERRA_DEQ_GLUT9 0x0F1
+#define SIERRA_DEQ_GLUT10 0x0F2
+#define SIERRA_DEQ_GLUT11 0x0F3
+#define SIERRA_DEQ_GLUT12 0x0F4
+#define SIERRA_DEQ_GLUT13 0x0F5
+#define SIERRA_DEQ_GLUT14 0x0F6
+#define SIERRA_DEQ_GLUT15 0x0F7
+#define SIERRA_DEQ_GLUT16 0x0F8
+#define SIERRA_DEQ_ALUT0 0x108
+#define SIERRA_DEQ_ALUT1 0x109
+#define SIERRA_DEQ_ALUT2 0x10A
+#define SIERRA_DEQ_ALUT3 0x10B
+#define SIERRA_DEQ_ALUT4 0x10C
+#define SIERRA_DEQ_ALUT5 0x10D
+#define SIERRA_DEQ_ALUT6 0x10E
+#define SIERRA_DEQ_ALUT7 0x10F
+#define SIERRA_DEQ_ALUT8 0x110
+#define SIERRA_DEQ_ALUT9 0x111
+#define SIERRA_DEQ_ALUT10 0x112
+#define SIERRA_DEQ_ALUT11 0x113
+#define SIERRA_DEQ_ALUT12 0x114
+#define SIERRA_DEQ_ALUT13 0x115
+#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
+#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
+#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
+#define SIERRA_DEQ_PICTRL_PREG 0x161
+#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
+#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
+#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
+#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
+#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
+#define SIERRA_LFPSFILT_NS_PREG 0x18A
+#define SIERRA_LFPSFILT_RD_PREG 0x18B
+#define SIERRA_LFPSFILT_MP_PREG 0x18C
+#define SIERRA_SIGDET_SUPPORT_PREG 0x190
+#define SIERRA_SDFILT_H2L_A_PREG 0x191
+#define SIERRA_SDFILT_L2H_PREG 0x193
+#define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E
+#define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F
+#define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0
+#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
+
+#define SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset) \
+ (0xc000 << (block_offset))
+#define SIERRA_PHY_PLL_CFG 0xe
+
+#define SIERRA_MACRO_ID 0x00007364
+#define SIERRA_MAX_LANES 16
+#define PLL_LOCK_TIME 100000
+
+static const struct reg_field macro_id_type =
+ REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
+static const struct reg_field phy_pll_cfg_1 =
+ REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pllctrl_lock =
+ REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
struct cdns_sierra_inst {
struct phy *phy;
@@ -80,53 +166,172 @@ struct cdns_reg_pairs {
struct cdns_sierra_data {
u32 id_value;
- u32 pcie_regs;
- u32 usb_regs;
- struct cdns_reg_pairs *pcie_vals;
- struct cdns_reg_pairs *usb_vals;
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ u32 pcie_cmn_regs;
+ u32 pcie_ln_regs;
+ u32 usb_cmn_regs;
+ u32 usb_ln_regs;
+ struct cdns_reg_pairs *pcie_cmn_vals;
+ struct cdns_reg_pairs *pcie_ln_vals;
+ struct cdns_reg_pairs *usb_cmn_vals;
+ struct cdns_reg_pairs *usb_ln_vals;
};
-struct cdns_sierra_phy {
+struct cdns_regmap_cdb_context {
struct device *dev;
void __iomem *base;
+ u8 reg_offset_shift;
+};
+
+struct cdns_sierra_phy {
+ struct device *dev;
+ struct regmap *regmap;
struct cdns_sierra_data *init_data;
struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
struct reset_control *apb_rst;
+ struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_config_ctrl;
+ struct regmap *regmap_common_cdb;
+ struct regmap_field *macro_id_type;
+ struct regmap_field *phy_pll_cfg_1;
+ struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
struct clk *clk;
+ struct clk *cmn_refclk_dig_div;
+ struct clk *cmn_refclk1_dig_div;
int nsubnodes;
+ u32 num_lanes;
bool autoconf;
};
-static void cdns_sierra_phy_init(struct phy *gphy)
+static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ writew(val, ctx->base + offset);
+
+ return 0;
+}
+
+static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ *val = readw(ctx->base + offset);
+ return 0;
+}
+
+#define SIERRA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static struct regmap_config cdns_sierra_lane_cdb_config[] = {
+ SIERRA_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static struct regmap_config cdns_sierra_common_cdb_config = {
+ .name = "sierra_common_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static struct regmap_config cdns_sierra_phy_config_ctrl_config = {
+ .name = "sierra_phy_config_ctrl",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
+ struct regmap *regmap;
int i, j;
- struct cdns_reg_pairs *vals;
- u32 num_regs;
+ struct cdns_reg_pairs *cmn_vals, *ln_vals;
+ u32 num_cmn_regs, num_ln_regs;
+
+ /* Initialise the PHY registers, unless auto configured */
+ if (phy->autoconf)
+ return 0;
+ clk_set_rate(phy->cmn_refclk_dig_div, 25000000);
+ clk_set_rate(phy->cmn_refclk1_dig_div, 25000000);
if (ins->phy_type == PHY_TYPE_PCIE) {
- num_regs = phy->init_data->pcie_regs;
- vals = phy->init_data->pcie_vals;
+ num_cmn_regs = phy->init_data->pcie_cmn_regs;
+ num_ln_regs = phy->init_data->pcie_ln_regs;
+ cmn_vals = phy->init_data->pcie_cmn_vals;
+ ln_vals = phy->init_data->pcie_ln_vals;
} else if (ins->phy_type == PHY_TYPE_USB3) {
- num_regs = phy->init_data->usb_regs;
- vals = phy->init_data->usb_vals;
+ num_cmn_regs = phy->init_data->usb_cmn_regs;
+ num_ln_regs = phy->init_data->usb_ln_regs;
+ cmn_vals = phy->init_data->usb_cmn_vals;
+ ln_vals = phy->init_data->usb_ln_vals;
} else {
- return;
+ return -EINVAL;
}
- for (i = 0; i < ins->num_lanes; i++)
- for (j = 0; j < num_regs ; j++)
- writel(vals[j].val, phy->base +
- vals[j].off + (i + ins->mlane) * 0x800);
+
+ regmap = phy->regmap_common_cdb;
+ for (j = 0; j < num_cmn_regs ; j++)
+ regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val);
+
+ for (i = 0; i < ins->num_lanes; i++) {
+ for (j = 0; j < num_ln_regs ; j++) {
+ regmap = phy->regmap_lane_cdb[i + ins->mlane];
+ regmap_write(regmap, ln_vals[j].off, ln_vals[j].val);
+ }
+ }
+
+ return 0;
}
static int cdns_sierra_phy_on(struct phy *gphy)
{
+ struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
+ struct device *dev = sp->dev;
+ u32 val;
+ int ret;
/* Take the PHY lane group out of reset */
- return reset_control_deassert(ins->lnk_rst);
+ ret = reset_control_deassert(ins->lnk_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY lane out of reset\n");
+ return ret;
+ }
+
+ ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
+ val, val, 1000, PLL_LOCK_TIME);
+ if (ret < 0)
+ dev_err(dev, "PLL lock of lane failed\n");
+
+ return ret;
}
static int cdns_sierra_phy_off(struct phy *gphy)
@@ -136,9 +341,20 @@ static int cdns_sierra_phy_off(struct phy *gphy)
return reset_control_assert(ins->lnk_rst);
}
+static int cdns_sierra_phy_reset(struct phy *gphy)
+{
+ struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
+
+ reset_control_assert(sp->phy_rst);
+ reset_control_deassert(sp->phy_rst);
+ return 0;
+};
+
static const struct phy_ops ops = {
+ .init = cdns_sierra_phy_init,
.power_on = cdns_sierra_phy_on,
.power_off = cdns_sierra_phy_off,
+ .reset = cdns_sierra_phy_reset,
.owner = THIS_MODULE,
};
@@ -159,41 +375,152 @@ static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
static const struct of_device_id cdns_sierra_id_table[];
+static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
+ u32 block_offset, u8 reg_offset_shift,
+ const struct regmap_config *config)
+{
+ struct cdns_regmap_cdb_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+ ctx->base = base + block_offset;
+ ctx->reg_offset_shift = reg_offset_shift;
+
+ return devm_regmap_init(dev, NULL, ctx, config);
+}
+
+static int cdns_regfield_init(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct regmap_field *field;
+ struct regmap *regmap;
+ int i;
+
+ regmap = sp->regmap_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, macro_id_type);
+ if (IS_ERR(field)) {
+ dev_err(dev, "MACRO_ID_TYPE reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->macro_id_type = field;
+
+ regmap = sp->regmap_phy_config_ctrl;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->phy_pll_cfg_1 = field;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
+ if (IS_ERR(field)) {
+ dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->pllctrl_lock[i] = field;
+ }
+
+ return 0;
+}
+
+static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
+ void __iomem *base, u8 block_offset_shift,
+ u8 reg_offset_shift)
+{
+ struct device *dev = sp->dev;
+ struct regmap *regmap;
+ u32 block_offset;
+ int i;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_lane_cdb[i] = regmap;
+ }
+
+ regmap = cdns_regmap_init(dev, base, SIERRA_COMMON_CDB_OFFSET,
+ reg_offset_shift,
+ &cdns_sierra_common_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_common_cdb = regmap;
+
+ block_offset = SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+ &cdns_sierra_phy_config_ctrl_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY config and control regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_config_ctrl = regmap;
+
+ return 0;
+}
+
static int cdns_sierra_phy_probe(struct platform_device *pdev)
{
struct cdns_sierra_phy *sp;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
const struct of_device_id *match;
+ struct cdns_sierra_data *data;
+ unsigned int id_value;
struct resource *res;
int i, ret, node = 0;
+ void __iomem *base;
+ struct clk *clk;
struct device_node *dn = dev->of_node, *child;
if (of_get_child_count(dn) == 0)
return -ENODEV;
+ /* Get init data for this PHY */
+ match = of_match_device(cdns_sierra_id_table, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_sierra_data *)match->data;
+
sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
if (!sp)
return -ENOMEM;
dev_set_drvdata(dev, sp);
sp->dev = dev;
+ sp->init_data = data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sp->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(sp->base)) {
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
dev_err(dev, "missing \"reg\"\n");
- return PTR_ERR(sp->base);
+ return PTR_ERR(base);
}
- /* Get init data for this PHY */
- match = of_match_device(cdns_sierra_id_table, dev);
- if (!match)
- return -EINVAL;
- sp->init_data = (struct cdns_sierra_data *)match->data;
+ ret = cdns_regmap_init_blocks(sp, base, data->block_offset_shift,
+ data->reg_offset_shift);
+ if (ret)
+ return ret;
+
+ ret = cdns_regfield_init(sp);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, sp);
- sp->clk = devm_clk_get(dev, "phy_clk");
+ sp->clk = devm_clk_get_optional(dev, "phy_clk");
if (IS_ERR(sp->clk)) {
dev_err(dev, "failed to get clock phy_clk\n");
return PTR_ERR(sp->clk);
@@ -205,12 +532,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
return PTR_ERR(sp->phy_rst);
}
- sp->apb_rst = devm_reset_control_get(dev, "sierra_apb");
+ sp->apb_rst = devm_reset_control_get_optional(dev, "sierra_apb");
if (IS_ERR(sp->apb_rst)) {
dev_err(dev, "failed to get apb reset\n");
return PTR_ERR(sp->apb_rst);
}
+ clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->cmn_refclk_dig_div = clk;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->cmn_refclk1_dig_div = clk;
+
ret = clk_prepare_enable(sp->clk);
if (ret)
return ret;
@@ -219,7 +562,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
reset_control_deassert(sp->apb_rst);
/* Check that PHY is present */
- if (sp->init_data->id_value != readl(sp->base)) {
+ regmap_field_read(sp->macro_id_type, &id_value);
+ if (sp->init_data->id_value != id_value) {
ret = -EINVAL;
goto clk_disable;
}
@@ -230,7 +574,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
struct phy *gphy;
sp->phys[node].lnk_rst =
- of_reset_control_get_exclusive_by_index(child, 0);
+ of_reset_control_array_get_exclusive(child);
if (IS_ERR(sp->phys[node].lnk_rst)) {
dev_err(dev, "failed to get reset %s\n",
@@ -248,6 +592,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
}
}
+ sp->num_lanes += sp->phys[node].num_lanes;
+
gphy = devm_phy_create(dev, child, &ops);
if (IS_ERR(gphy)) {
@@ -257,17 +603,18 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
- /* Initialise the PHY registers, unless auto configured */
- if (!sp->autoconf)
- cdns_sierra_phy_init(gphy);
-
node++;
}
sp->nsubnodes = node;
+ if (sp->num_lanes > SIERRA_MAX_LANES) {
+ dev_err(dev, "Invalid lane configuration\n");
+ goto put_child2;
+ }
+
/* If more than one subnode, configure the PHY as multilink */
if (!sp->autoconf && sp->nsubnodes > 1)
- writel(2, sp->base + SIERRA_PHY_PLL_CFG);
+ regmap_field_write(sp->phy_pll_cfg_1, 0x1);
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -288,7 +635,7 @@ clk_disable:
static int cdns_sierra_phy_remove(struct platform_device *pdev)
{
- struct cdns_sierra_phy *phy = dev_get_drvdata(pdev->dev.parent);
+ struct cdns_sierra_phy *phy = platform_get_drvdata(pdev);
int i;
reset_control_assert(phy->phy_rst);
@@ -306,68 +653,158 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
return 0;
}
-static struct cdns_reg_pairs cdns_usb_regs[] = {
- /*
- * Write USB configuration parameters to the PHY.
- * These values are specific to this specific hardware
- * configuration.
- */
- {0xFE0A, SIERRA_DET_STANDEC_A},
- {0x000F, SIERRA_DET_STANDEC_B},
- {0x55A5, SIERRA_DET_STANDEC_C},
- {0x69AD, SIERRA_DET_STANDEC_D},
- {0x0241, SIERRA_DET_STANDEC_E},
- {0x0110, SIERRA_PSM_LANECAL},
- {0xCF00, SIERRA_PSM_DIAG},
- {0x001F, SIERRA_PSC_TX_A0},
- {0x0007, SIERRA_PSC_TX_A1},
- {0x0003, SIERRA_PSC_TX_A2},
- {0x0003, SIERRA_PSC_TX_A3},
- {0x0FFF, SIERRA_PSC_RX_A0},
- {0x0003, SIERRA_PSC_RX_A1},
- {0x0003, SIERRA_PSC_RX_A2},
- {0x0001, SIERRA_PSC_RX_A3},
- {0x0001, SIERRA_PLLCTRL_SUBRATE},
- {0x0406, SIERRA_PLLCTRL_GEN_D},
- {0x0000, SIERRA_DRVCTRL_ATTEN},
- {0x823E, SIERRA_CLKPATHCTRL_TMR},
- {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1},
- {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0},
- {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01},
- {0x023C, SIERRA_RX_CTLE_MAINTENANCE},
- {0x3232, SIERRA_CREQ_FSMCLK_SEL},
- {0x8452, SIERRA_CTLELUT_CTRL},
- {0x4121, SIERRA_DFE_ECMP_RATESEL},
- {0x4121, SIERRA_DFE_SMP_RATESEL},
- {0x9999, SIERRA_DEQ_VGATUNE_CTRL},
- {0x0330, SIERRA_TMRVAL_MODE0},
- {0x01FF, SIERRA_PICNT_MODE1},
- {0x0009, SIERRA_CPI_OUTBUF_RATESEL},
- {0x000F, SIERRA_LFPSFILT_NS},
- {0x0009, SIERRA_LFPSFILT_RD},
- {0x0001, SIERRA_LFPSFILT_MP},
- {0x8013, SIERRA_SDFILT_H2L_A},
- {0x0400, SIERRA_TMRVAL_MODE1},
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
+static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
};
-static struct cdns_reg_pairs cdns_pcie_regs[] = {
- /*
- * Write PCIe configuration parameters to the PHY.
- * These values are specific to this specific hardware
- * configuration.
- */
- {0x891f, SIERRA_DET_STANDEC_D},
- {0x0053, SIERRA_DET_STANDEC_E},
- {0x0400, SIERRA_TMRVAL_MODE2},
- {0x0200, SIERRA_TMRVAL_MODE3},
+/* refclk100MHz_32b_PCIe_ln_ext_ssc */
+static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}
+};
+
+/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
+static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
+ {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/* refclk100MHz_20b_USB_ln_ext_ssc */
+static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
+ {0xFE0A, SIERRA_DET_STANDEC_A_PREG},
+ {0x000F, SIERRA_DET_STANDEC_B_PREG},
+ {0x00A5, SIERRA_DET_STANDEC_C_PREG},
+ {0x69ad, SIERRA_DET_STANDEC_D_PREG},
+ {0x0241, SIERRA_DET_STANDEC_E_PREG},
+ {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+ {0x0014, SIERRA_PSM_A0IN_TMR_PREG},
+ {0xCF00, SIERRA_PSM_DIAG_PREG},
+ {0x001F, SIERRA_PSC_TX_A0_PREG},
+ {0x0007, SIERRA_PSC_TX_A1_PREG},
+ {0x0003, SIERRA_PSC_TX_A2_PREG},
+ {0x0003, SIERRA_PSC_TX_A3_PREG},
+ {0x0FFF, SIERRA_PSC_RX_A0_PREG},
+ {0x0619, SIERRA_PSC_RX_A1_PREG},
+ {0x0003, SIERRA_PSC_RX_A2_PREG},
+ {0x0001, SIERRA_PSC_RX_A3_PREG},
+ {0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0406, SIERRA_PLLCTRL_GEN_D_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
+ {0x2512, SIERRA_DFE_BIASTRIM_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x873E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x03CF, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x01CE, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x033F, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x8000, SIERRA_CREQ_SPARE_PREG},
+ {0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x8453, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4110, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4110, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
+ {0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x0048, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x9A8A, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0014, SIERRA_DEQ_GLUT0},
+ {0x0014, SIERRA_DEQ_GLUT1},
+ {0x0014, SIERRA_DEQ_GLUT2},
+ {0x0014, SIERRA_DEQ_GLUT3},
+ {0x0014, SIERRA_DEQ_GLUT4},
+ {0x0014, SIERRA_DEQ_GLUT5},
+ {0x0014, SIERRA_DEQ_GLUT6},
+ {0x0014, SIERRA_DEQ_GLUT7},
+ {0x0014, SIERRA_DEQ_GLUT8},
+ {0x0014, SIERRA_DEQ_GLUT9},
+ {0x0014, SIERRA_DEQ_GLUT10},
+ {0x0014, SIERRA_DEQ_GLUT11},
+ {0x0014, SIERRA_DEQ_GLUT12},
+ {0x0014, SIERRA_DEQ_GLUT13},
+ {0x0014, SIERRA_DEQ_GLUT14},
+ {0x0014, SIERRA_DEQ_GLUT15},
+ {0x0014, SIERRA_DEQ_GLUT16},
+ {0x0BAE, SIERRA_DEQ_ALUT0},
+ {0x0AEB, SIERRA_DEQ_ALUT1},
+ {0x0A28, SIERRA_DEQ_ALUT2},
+ {0x0965, SIERRA_DEQ_ALUT3},
+ {0x08A2, SIERRA_DEQ_ALUT4},
+ {0x07DF, SIERRA_DEQ_ALUT5},
+ {0x071C, SIERRA_DEQ_ALUT6},
+ {0x0659, SIERRA_DEQ_ALUT7},
+ {0x0596, SIERRA_DEQ_ALUT8},
+ {0x0514, SIERRA_DEQ_ALUT9},
+ {0x0492, SIERRA_DEQ_ALUT10},
+ {0x0410, SIERRA_DEQ_ALUT11},
+ {0x038E, SIERRA_DEQ_ALUT12},
+ {0x030C, SIERRA_DEQ_ALUT13},
+ {0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG},
+ {0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0400, SIERRA_CPICAL_TMRVAL_MODE1_PREG},
+ {0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG},
+ {0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
+ {0x0005, SIERRA_LFPSDET_SUPPORT_PREG},
+ {0x000F, SIERRA_LFPSFILT_NS_PREG},
+ {0x0009, SIERRA_LFPSFILT_RD_PREG},
+ {0x0001, SIERRA_LFPSFILT_MP_PREG},
+ {0x8013, SIERRA_SDFILT_H2L_A_PREG},
+ {0x8009, SIERRA_SDFILT_L2H_PREG},
+ {0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static const struct cdns_sierra_data cdns_map_sierra = {
SIERRA_MACRO_ID,
- ARRAY_SIZE(cdns_pcie_regs),
- ARRAY_SIZE(cdns_usb_regs),
- cdns_pcie_regs,
- cdns_usb_regs
+ 0x2,
+ 0x2,
+ ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+ cdns_pcie_cmn_regs_ext_ssc,
+ cdns_pcie_ln_regs_ext_ssc,
+ cdns_usb_cmn_regs_ext_ssc,
+ cdns_usb_ln_regs_ext_ssc,
+};
+
+static const struct cdns_sierra_data cdns_ti_map_sierra = {
+ SIERRA_MACRO_ID,
+ 0x0,
+ 0x1,
+ ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+ cdns_pcie_cmn_regs_ext_ssc,
+ cdns_pcie_ln_regs_ext_ssc,
+ cdns_usb_cmn_regs_ext_ssc,
+ cdns_usb_ln_regs_ext_ssc,
};
static const struct of_device_id cdns_sierra_id_table[] = {
@@ -375,6 +812,10 @@ static const struct of_device_id cdns_sierra_id_table[] = {
.compatible = "cdns,sierra-phy-t0",
.data = &cdns_map_sierra,
},
+ {
+ .compatible = "ti,sierra-phy-t0",
+ .data = &cdns_ti_map_sierra,
+ },
{}
};
MODULE_DEVICE_TABLE(of, cdns_sierra_id_table);
diff --git a/drivers/phy/hisilicon/Kconfig b/drivers/phy/hisilicon/Kconfig
index 534e393a09b3..1c73053bcc98 100644
--- a/drivers/phy/hisilicon/Kconfig
+++ b/drivers/phy/hisilicon/Kconfig
@@ -33,14 +33,14 @@ config PHY_HISTB_COMBPHY
If unsure, say N.
config PHY_HISI_INNO_USB2
- tristate "HiSilicon INNO USB2 PHY support"
- depends on (ARCH_HISI && ARM64) || COMPILE_TEST
- select GENERIC_PHY
- select MFD_SYSCON
- help
- Support for INNO USB2 PHY on HiSilicon SoCs. This Phy supports
- USB 1.5Mb/s, USB 12Mb/s, USB 480Mb/s speeds. It supports one
- USB host port to accept one USB device.
+ tristate "HiSilicon INNO USB2 PHY support"
+ depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Support for INNO USB2 PHY on HiSilicon SoCs. This Phy supports
+ USB 1.5Mb/s, USB 12Mb/s, USB 480Mb/s speeds. It supports one
+ USB host port to accept one USB device.
config PHY_HIX5HD2_SATA
tristate "HIX5HD2 SATA PHY Driver"
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
new file mode 100644
index 000000000000..4ea6a8897cd7
--- /dev/null
+++ b/drivers/phy/intel/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Intel Lightning Mountain(LGM) platform
+#
+config PHY_INTEL_EMMC
+ tristate "Intel EMMC PHY driver"
+ select GENERIC_PHY
+ help
+ Enable this to support the Intel EMMC PHY
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
new file mode 100644
index 000000000000..6b876a75599d
--- /dev/null
+++ b/drivers/phy/intel/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_INTEL_EMMC) += phy-intel-emmc.o
diff --git a/drivers/phy/intel/phy-intel-emmc.c b/drivers/phy/intel/phy-intel-emmc.c
new file mode 100644
index 000000000000..703aeb122541
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-emmc.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel eMMC PHY driver
+ * Copyright (C) 2019 Intel, Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* eMMC phy register definitions */
+#define EMMC_PHYCTRL0_REG 0xa8
+#define DR_TY_MASK GENMASK(30, 28)
+#define DR_TY_SHIFT(x) (((x) << 28) & DR_TY_MASK)
+#define OTAPDLYENA BIT(14)
+#define OTAPDLYSEL_MASK GENMASK(13, 10)
+#define OTAPDLYSEL_SHIFT(x) (((x) << 10) & OTAPDLYSEL_MASK)
+
+#define EMMC_PHYCTRL1_REG 0xac
+#define PDB_MASK BIT(0)
+#define PDB_SHIFT(x) (((x) << 0) & PDB_MASK)
+#define ENDLL_MASK BIT(7)
+#define ENDLL_SHIFT(x) (((x) << 7) & ENDLL_MASK)
+
+#define EMMC_PHYCTRL2_REG 0xb0
+#define FRQSEL_25M 0
+#define FRQSEL_50M 1
+#define FRQSEL_100M 2
+#define FRQSEL_150M 3
+#define FRQSEL_MASK GENMASK(24, 22)
+#define FRQSEL_SHIFT(x) (((x) << 22) & FRQSEL_MASK)
+
+#define EMMC_PHYSTAT_REG 0xbc
+#define CALDONE_MASK BIT(9)
+#define DLLRDY_MASK BIT(8)
+#define IS_CALDONE(x) ((x) & CALDONE_MASK)
+#define IS_DLLRDY(x) ((x) & DLLRDY_MASK)
+
+struct intel_emmc_phy {
+ struct regmap *syscfg;
+ struct clk *emmcclk;
+};
+
+static int intel_emmc_phy_power(struct phy *phy, bool on_off)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+ unsigned int caldone;
+ unsigned int dllrdy;
+ unsigned int freqsel;
+ unsigned long rate;
+ int ret, quot;
+
+ /*
+ * Keep phyctrl_pdb and phyctrl_endll low to allow
+ * initialization of CALIO state M/C DFFs
+ */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+ PDB_SHIFT(0));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Already finish power_off above */
+ if (!on_off)
+ return 0;
+
+ rate = clk_get_rate(priv->emmcclk);
+ quot = DIV_ROUND_CLOSEST(rate, 50000000);
+ if (quot > FRQSEL_150M)
+ dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+ freqsel = clamp_t(int, quot, FRQSEL_25M, FRQSEL_150M);
+
+ /*
+ * According to the user manual, calpad calibration
+ * cycle takes more than 2us without the minimal recommended
+ * value, so we may need a little margin here
+ */
+ udelay(5);
+
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+ PDB_SHIFT(1));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * According to the user manual, it asks driver to wait 5us for
+ * calpad busy trimming. However it is documented that this value is
+ * PVT(A.K.A process,voltage and temperature) relevant, so some
+ * failure cases are found which indicates we should be more tolerant
+ * to calpad busy trimming.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg, EMMC_PHYSTAT_REG,
+ caldone, IS_CALDONE(caldone),
+ 0, 50);
+ if (ret) {
+ dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Set the frequency of the DLL operation */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL2_REG, FRQSEL_MASK,
+ FRQSEL_SHIFT(freqsel));
+ if (ret) {
+ dev_err(&phy->dev, "set the frequency of dll failed:%d\n", ret);
+ return ret;
+ }
+
+ /* Turn on the DLL */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, ENDLL_MASK,
+ ENDLL_SHIFT(1));
+ if (ret) {
+ dev_err(&phy->dev, "turn on the dll failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100 kHZ) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * Hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg,
+ EMMC_PHYSTAT_REG,
+ dllrdy, IS_DLLRDY(dllrdy),
+ 0, 50 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(&phy->dev, "dllrdy failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_emmc_phy_init(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+ /*
+ * We purposely get the clock here and not in probe to avoid the
+ * circular dependency problem. We expect:
+ * - PHY driver to probe
+ * - SDHCI driver to start probe
+ * - SDHCI driver to register it's clock
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+ * The clock is optional, so upon any error just return it like
+ * any other error to user.
+ *
+ */
+ priv->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+ if (IS_ERR(priv->emmcclk)) {
+ dev_err(&phy->dev, "ERROR: getting emmcclk\n");
+ return PTR_ERR(priv->emmcclk);
+ }
+
+ return 0;
+}
+
+static int intel_emmc_phy_exit(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+ clk_put(priv->emmcclk);
+
+ return 0;
+}
+
+static int intel_emmc_phy_power_on(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+ int ret;
+
+ /* Drive impedance: 50 Ohm */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, DR_TY_MASK,
+ DR_TY_SHIFT(6));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR set drive-impednce-50ohm: %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay: disable */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, OTAPDLYENA,
+ 0);
+ if (ret) {
+ dev_err(&phy->dev, "ERROR Set output tap delay : %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG,
+ OTAPDLYSEL_MASK, OTAPDLYSEL_SHIFT(4));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR: output tap dly select: %d\n", ret);
+ return ret;
+ }
+
+ /* Power up eMMC phy analog blocks */
+ return intel_emmc_phy_power(phy, true);
+}
+
+static int intel_emmc_phy_power_off(struct phy *phy)
+{
+ /* Power down eMMC phy analog blocks */
+ return intel_emmc_phy_power(phy, false);
+}
+
+static const struct phy_ops ops = {
+ .init = intel_emmc_phy_init,
+ .exit = intel_emmc_phy_exit,
+ .power_on = intel_emmc_phy_power_on,
+ .power_off = intel_emmc_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int intel_emmc_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct intel_emmc_phy *priv;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Get eMMC phy (accessed via chiptop) regmap */
+ priv->syscfg = syscon_regmap_lookup_by_phandle(np, "intel,syscon");
+ if (IS_ERR(priv->syscfg)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(priv->syscfg);
+ }
+
+ generic_phy = devm_phy_create(dev, np, &ops);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id intel_emmc_phy_dt_ids[] = {
+ { .compatible = "intel,lgm-emmc-phy" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, intel_emmc_phy_dt_ids);
+
+static struct platform_driver intel_emmc_driver = {
+ .probe = intel_emmc_phy_probe,
+ .driver = {
+ .name = "intel-emmc-phy",
+ .of_match_table = intel_emmc_phy_dt_ids,
+ },
+};
+
+module_platform_driver(intel_emmc_driver);
+
+MODULE_AUTHOR("Peter Harliman Liem <peter.harliman.liem@intel.com>");
+MODULE_DESCRIPTION("Intel eMMC PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 6e457967653e..2ff9a48d833e 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -386,7 +386,7 @@ static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
default:
dev_err(dev, "invalid PHY mode %u\n", mode);
return ERR_PTR(-EINVAL);
- };
+ }
return priv->phy;
}
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 005e02dd4a91..8f6273c837ec 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -10,14 +10,16 @@ config ARMADA375_USBCLUSTER_PHY
config PHY_BERLIN_SATA
tristate "Marvell Berlin SATA PHY driver"
- depends on ARCH_BERLIN && HAS_IOMEM && OF
+ depends on ARCH_BERLIN || COMPILE_TEST
+ depends on OF && HAS_IOMEM
select GENERIC_PHY
help
Enable this to support the SATA PHY on Marvell Berlin SoCs.
config PHY_BERLIN_USB
tristate "Marvell Berlin USB PHY Driver"
- depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
+ depends on ARCH_BERLIN || COMPILE_TEST
+ depends on OF && HAS_IOMEM && RESET_CONTROLLER
select GENERIC_PHY
help
Enable this to support the USB PHY on Marvell Berlin SoCs.
@@ -95,7 +97,7 @@ config PHY_PXA_28NM_USB2
config PHY_PXA_USB
tristate "Marvell PXA USB PHY Driver"
- depends on ARCH_PXA || ARCH_MMP
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
select GENERIC_PHY
help
Enable this to support Marvell PXA USB PHY driver for Marvell
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index e3b87c94aaf6..e41367f36ee1 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -221,7 +221,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
- ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GBASER, 0x1, COMPHY_FW_MODE_XFI),
GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
GEN_CONF(2, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
@@ -235,14 +235,14 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
/* lane 4 */
ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
- ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GBASER, 0x2, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_HS_SGMII),
- ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GKR, -1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(5, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
@@ -342,7 +342,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE);
switch (lane->submode) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe);
break;
@@ -417,7 +417,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
/* refclk selection */
val = readl(priv->base + MVEBU_COMPHY_MISC_CTRL0(lane->id));
val &= ~MVEBU_COMPHY_MISC_CTRL0_REFCLK_SEL;
- if (lane->submode == PHY_INTERFACE_MODE_10GKR)
+ if (lane->submode == PHY_INTERFACE_MODE_10GBASER)
val |= MVEBU_COMPHY_MISC_CTRL0_ICP_FORCE;
writel(val, priv->base + MVEBU_COMPHY_MISC_CTRL0(lane->id));
@@ -564,7 +564,7 @@ static int mvebu_comphy_set_mode_rxaui(struct phy *phy)
return mvebu_comphy_init_plls(lane);
}
-static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
+static int mvebu_comphy_set_mode_10gbaser(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
@@ -735,8 +735,8 @@ static int mvebu_comphy_power_on_legacy(struct phy *phy)
case PHY_INTERFACE_MODE_RXAUI:
ret = mvebu_comphy_set_mode_rxaui(phy);
break;
- case PHY_INTERFACE_MODE_10GKR:
- ret = mvebu_comphy_set_mode_10gkr(phy);
+ case PHY_INTERFACE_MODE_10GBASER:
+ ret = mvebu_comphy_set_mode_10gbaser(phy);
break;
default:
return -ENOTSUPP;
@@ -782,8 +782,8 @@ static int mvebu_comphy_power_on(struct phy *phy)
lane->id);
fw_speed = COMPHY_FW_SPEED_3125;
break;
- case PHY_INTERFACE_MODE_10GKR:
- dev_dbg(priv->dev, "set lane %d to 10G-KR mode\n",
+ case PHY_INTERFACE_MODE_10GBASER:
+ dev_dbg(priv->dev, "set lane %d to 10GBASE-R mode\n",
lane->id);
fw_speed = COMPHY_FW_SPEED_103125;
break;
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 376f5d189da0..dee757c957f2 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -3,12 +3,13 @@
# Phy drivers for Mediatek devices
#
config PHY_MTK_TPHY
- tristate "MediaTek T-PHY Driver"
- depends on ARCH_MEDIATEK && OF
- select GENERIC_PHY
- help
- Say 'Y' here to add support for MediaTek T-PHY driver,
- it supports multiple usb2.0, usb3.0 ports, PCIe and
+ tristate "MediaTek T-PHY Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Say 'Y' here to add support for MediaTek T-PHY driver,
+ it supports multiple usb2.0, usb3.0 ports, PCIe and
SATA, and meanwhile supports two version T-PHY which have
different banks layout, the T-PHY with shared banks between
multi-ports is first version, otherwise is second veriosn,
@@ -16,7 +17,8 @@ config PHY_MTK_TPHY
config PHY_MTK_UFS
tristate "MediaTek UFS M-PHY driver"
- depends on ARCH_MEDIATEK && OF
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
select GENERIC_PHY
help
Support for UFS M-PHY on MediaTek chipsets.
@@ -25,10 +27,11 @@ config PHY_MTK_UFS
specified M-PHYs.
config PHY_MTK_XSPHY
- tristate "MediaTek XS-PHY Driver"
- depends on ARCH_MEDIATEK && OF
- select GENERIC_PHY
- help
+ tristate "MediaTek XS-PHY Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
Enable this to support the SuperSpeedPlus XS-PHY transceiver for
USB3.1 GEN2 controllers on MediaTek chips. The driver supports
multiple USB2.0, USB3.1 GEN2 ports.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b04f4fe85ac2..2eb28cc2d2dc 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -29,7 +29,7 @@ static void devm_phy_release(struct device *dev, void *res)
{
struct phy *phy = *(struct phy **)res;
- phy_put(phy);
+ phy_put(dev, phy);
}
static void devm_phy_provider_release(struct device *dev, void *res)
@@ -566,12 +566,12 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id)
EXPORT_SYMBOL_GPL(of_phy_get);
/**
- * phy_put() - release the PHY
- * @phy: the phy returned by phy_get()
+ * of_phy_put() - release the PHY
+ * @phy: the phy returned by of_phy_get()
*
- * Releases a refcount the caller received from phy_get().
+ * Releases a refcount the caller received from of_phy_get().
*/
-void phy_put(struct phy *phy)
+void of_phy_put(struct phy *phy)
{
if (!phy || IS_ERR(phy))
return;
@@ -584,6 +584,20 @@ void phy_put(struct phy *phy)
module_put(phy->ops->owner);
put_device(&phy->dev);
}
+EXPORT_SYMBOL_GPL(of_phy_put);
+
+/**
+ * phy_put() - release the PHY
+ * @dev: device that wants to release this phy
+ * @phy: the phy returned by phy_get()
+ *
+ * Releases a refcount the caller received from phy_get().
+ */
+void phy_put(struct device *dev, struct phy *phy)
+{
+ device_link_remove(dev, &phy->dev);
+ of_phy_put(phy);
+}
EXPORT_SYMBOL_GPL(phy_put);
/**
@@ -651,6 +665,7 @@ struct phy *phy_get(struct device *dev, const char *string)
{
int index = 0;
struct phy *phy;
+ struct device_link *link;
if (string == NULL) {
dev_WARN(dev, "missing string\n");
@@ -672,6 +687,13 @@ struct phy *phy_get(struct device *dev, const char *string)
get_device(&phy->dev);
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
+ }
+
return phy;
}
EXPORT_SYMBOL_GPL(phy_get);
@@ -765,6 +787,7 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id)
{
struct phy **ptr, *phy;
+ struct device_link *link;
ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
@@ -776,6 +799,14 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
devres_add(dev, ptr);
} else {
devres_free(ptr);
+ return phy;
+ }
+
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
}
return phy;
@@ -798,6 +829,7 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
int index)
{
struct phy **ptr, *phy;
+ struct device_link *link;
ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
@@ -819,6 +851,13 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
*ptr = phy;
devres_add(dev, ptr);
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
+ }
+
return phy;
}
EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index 42bc5150dd92..febe0aef68d4 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -80,7 +80,7 @@ static int read_poll_timeout(void __iomem *addr, u32 mask)
if (readl_relaxed(addr) & mask)
return 0;
- usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
+ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
} while (!time_after(jiffies, timeout));
return (readl_relaxed(addr) & mask) ? 0 : -ETIMEDOUT;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 66f91726b8b2..7db2a94f7a99 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -166,8 +166,9 @@ static const unsigned int sdm845_ufsphy_regs_layout[] = {
};
static const unsigned int sm8150_ufsphy_regs_layout[] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x180,
+ [QPHY_START_CTRL] = QPHY_V4_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_READY_STATUS,
+ [QPHY_SW_RESET] = QPHY_V4_SW_RESET,
};
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
@@ -885,7 +886,6 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
};
static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
@@ -1390,7 +1390,6 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
.is_dual_lane_phy = true,
- .no_pcs_sw_reset = true,
};
static void qcom_qmp_phy_configure(void __iomem *base,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index ab6ff9b45a32..90f793c2293d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*/
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index dbd2de4d28b1..0824b9dd5683 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -39,6 +39,7 @@ config PHY_ROCKCHIP_INNO_DSIDPHY
tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
help
Enable this to support the Rockchip MIPI/LVDS/TTL PHY with
Innosilicon IP block.
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index fc729ecd3fe9..a7c6c940a3a8 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
#include <linux/pm_runtime.h>
#include <linux/mfd/syscon.h>
@@ -167,31 +168,6 @@
#define DSI_PHY_STATUS 0xb0
#define PHY_LOCK BIT(0)
-struct mipi_dphy_timing {
- unsigned int clkmiss;
- unsigned int clkpost;
- unsigned int clkpre;
- unsigned int clkprepare;
- unsigned int clksettle;
- unsigned int clktermen;
- unsigned int clktrail;
- unsigned int clkzero;
- unsigned int dtermen;
- unsigned int eot;
- unsigned int hsexit;
- unsigned int hsprepare;
- unsigned int hszero;
- unsigned int hssettle;
- unsigned int hsskip;
- unsigned int hstrail;
- unsigned int init;
- unsigned int lpx;
- unsigned int taget;
- unsigned int tago;
- unsigned int tasure;
- unsigned int wakeup;
-};
-
struct inno_dsidphy {
struct device *dev;
struct clk *ref_clk;
@@ -201,7 +177,9 @@ struct inno_dsidphy {
void __iomem *host_base;
struct reset_control *rst;
enum phy_mode mode;
+ struct phy_configure_opts_mipi_dphy dphy_cfg;
+ struct clk *pll_clk;
struct {
struct clk_hw hw;
u8 prediv;
@@ -238,37 +216,79 @@ static void phy_update_bits(struct inno_dsidphy *inno,
writel(tmp, inno->phy_base + reg);
}
-static void mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
- unsigned long period)
+static unsigned long inno_dsidphy_pll_calc_rate(struct inno_dsidphy *inno,
+ unsigned long rate)
{
- /* Global Operation Timing Parameters */
- timing->clkmiss = 0;
- timing->clkpost = 70000 + 52 * period;
- timing->clkpre = 8 * period;
- timing->clkprepare = 65000;
- timing->clksettle = 95000;
- timing->clktermen = 0;
- timing->clktrail = 80000;
- timing->clkzero = 260000;
- timing->dtermen = 0;
- timing->eot = 0;
- timing->hsexit = 120000;
- timing->hsprepare = 65000 + 4 * period;
- timing->hszero = 145000 + 6 * period;
- timing->hssettle = 85000 + 6 * period;
- timing->hsskip = 40000;
- timing->hstrail = max(8 * period, 60000 + 4 * period);
- timing->init = 100000000;
- timing->lpx = 60000;
- timing->taget = 5 * timing->lpx;
- timing->tago = 4 * timing->lpx;
- timing->tasure = 2 * timing->lpx;
- timing->wakeup = 1000000000;
+ unsigned long prate = clk_get_rate(inno->ref_clk);
+ unsigned long best_freq = 0;
+ unsigned long fref, fout;
+ u8 min_prediv, max_prediv;
+ u8 _prediv, best_prediv = 1;
+ u16 _fbdiv, best_fbdiv = 1;
+ u32 min_delta = UINT_MAX;
+
+ /*
+ * The PLL output frequency can be calculated using a simple formula:
+ * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
+ * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
+ */
+ fref = prate / 2;
+ if (rate > 1000000000UL)
+ fout = 1000000000UL;
+ else
+ fout = rate;
+
+ /* 5Mhz < Fref / prediv < 40MHz */
+ min_prediv = DIV_ROUND_UP(fref, 40000000);
+ max_prediv = fref / 5000000;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fref);
+ _fbdiv = tmp;
+
+ /*
+ * The possible settings of feedback divider are
+ * 12, 13, 14, 16, ~ 511
+ */
+ if (_fbdiv == 15)
+ continue;
+
+ if (_fbdiv < 12 || _fbdiv > 511)
+ continue;
+
+ tmp = (u64)_fbdiv * fref;
+ do_div(tmp, _prediv);
+
+ delta = abs(fout - tmp);
+ if (!delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ break;
+ } else if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ min_delta = delta;
+ }
+ }
+
+ if (best_freq) {
+ inno->pll.prediv = best_prediv;
+ inno->pll.fbdiv = best_fbdiv;
+ inno->pll.rate = best_freq;
+ }
+
+ return best_freq;
}
static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
{
- struct mipi_dphy_timing gotp;
+ struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
const struct {
unsigned long rate;
u8 hs_prepare;
@@ -288,12 +308,14 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
{ 800000000, 0x21, 0x1f, 0x09, 0x29},
{1000000000, 0x09, 0x20, 0x09, 0x27},
};
- u32 t_txbyteclkhs, t_txclkesc, ui;
+ u32 t_txbyteclkhs, t_txclkesc;
u32 txbyteclkhs, txclkesc, esc_clk_div;
u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
unsigned int i;
+ inno_dsidphy_pll_calc_rate(inno, cfg->hs_clk_rate);
+
/* Select MIPI mode */
phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
@@ -328,32 +350,27 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
txclkesc = txbyteclkhs / esc_clk_div;
t_txclkesc = div_u64(PSEC_PER_SEC, txclkesc);
- ui = div_u64(PSEC_PER_SEC, inno->pll.rate);
-
- memset(&gotp, 0, sizeof(gotp));
- mipi_dphy_timing_get_default(&gotp, ui);
-
/*
* The value of counter for HS Ths-exit
* Ths-exit = Tpin_txbyteclkhs * value
*/
- hs_exit = DIV_ROUND_UP(gotp.hsexit, t_txbyteclkhs);
+ hs_exit = DIV_ROUND_UP(cfg->hs_exit, t_txbyteclkhs);
/*
* The value of counter for HS Tclk-post
* Tclk-post = Tpin_txbyteclkhs * value
*/
- clk_post = DIV_ROUND_UP(gotp.clkpost, t_txbyteclkhs);
+ clk_post = DIV_ROUND_UP(cfg->clk_post, t_txbyteclkhs);
/*
* The value of counter for HS Tclk-pre
* Tclk-pre = Tpin_txbyteclkhs * value
*/
- clk_pre = DIV_ROUND_UP(gotp.clkpre, t_txbyteclkhs);
+ clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
/*
* The value of counter for HS Tlpx Time
* Tlpx = Tpin_txbyteclkhs * (2 + value)
*/
- lpx = DIV_ROUND_UP(gotp.lpx, t_txbyteclkhs);
+ lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
if (lpx >= 2)
lpx -= 2;
@@ -362,19 +379,19 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
* Tta-go for turnaround
* Tta-go = Ttxclkesc * value
*/
- ta_go = DIV_ROUND_UP(gotp.tago, t_txclkesc);
+ ta_go = DIV_ROUND_UP(cfg->ta_go, t_txclkesc);
/*
* The value of counter for HS Tta-sure
* Tta-sure for turnaround
* Tta-sure = Ttxclkesc * value
*/
- ta_sure = DIV_ROUND_UP(gotp.tasure, t_txclkesc);
+ ta_sure = DIV_ROUND_UP(cfg->ta_sure, t_txclkesc);
/*
* The value of counter for HS Tta-wait
* Tta-wait for turnaround
* Tta-wait = Ttxclkesc * value
*/
- ta_wait = DIV_ROUND_UP(gotp.taget, t_txclkesc);
+ ta_wait = DIV_ROUND_UP(cfg->ta_get, t_txclkesc);
for (i = 0; i < ARRAY_SIZE(timings); i++)
if (inno->pll.rate <= timings[i].rate)
@@ -479,6 +496,7 @@ static int inno_dsidphy_power_on(struct phy *phy)
struct inno_dsidphy *inno = phy_get_drvdata(phy);
clk_prepare_enable(inno->pclk_phy);
+ clk_prepare_enable(inno->ref_clk);
pm_runtime_get_sync(inno->dev);
/* Bandgap power on */
@@ -524,6 +542,7 @@ static int inno_dsidphy_power_off(struct phy *phy)
LVDS_PLL_POWER_OFF | LVDS_BANDGAP_POWER_DOWN);
pm_runtime_put(inno->dev);
+ clk_disable_unprepare(inno->ref_clk);
clk_disable_unprepare(inno->pclk_phy);
return 0;
@@ -546,168 +565,32 @@ static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
return 0;
}
-static const struct phy_ops inno_dsidphy_ops = {
- .set_mode = inno_dsidphy_set_mode,
- .power_on = inno_dsidphy_power_on,
- .power_off = inno_dsidphy_power_off,
- .owner = THIS_MODULE,
-};
-
-static unsigned long inno_dsidphy_pll_round_rate(struct inno_dsidphy *inno,
- unsigned long prate,
- unsigned long rate,
- u8 *prediv, u16 *fbdiv)
-{
- unsigned long best_freq = 0;
- unsigned long fref, fout;
- u8 min_prediv, max_prediv;
- u8 _prediv, best_prediv = 1;
- u16 _fbdiv, best_fbdiv = 1;
- u32 min_delta = UINT_MAX;
-
- /*
- * The PLL output frequency can be calculated using a simple formula:
- * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
- * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
- */
- fref = prate / 2;
- if (rate > 1000000000UL)
- fout = 1000000000UL;
- else
- fout = rate;
-
- /* 5Mhz < Fref / prediv < 40MHz */
- min_prediv = DIV_ROUND_UP(fref, 40000000);
- max_prediv = fref / 5000000;
-
- for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
- u64 tmp;
- u32 delta;
-
- tmp = (u64)fout * _prediv;
- do_div(tmp, fref);
- _fbdiv = tmp;
-
- /*
- * The possible settings of feedback divider are
- * 12, 13, 14, 16, ~ 511
- */
- if (_fbdiv == 15)
- continue;
-
- if (_fbdiv < 12 || _fbdiv > 511)
- continue;
-
- tmp = (u64)_fbdiv * fref;
- do_div(tmp, _prediv);
-
- delta = abs(fout - tmp);
- if (!delta) {
- best_prediv = _prediv;
- best_fbdiv = _fbdiv;
- best_freq = tmp;
- break;
- } else if (delta < min_delta) {
- best_prediv = _prediv;
- best_fbdiv = _fbdiv;
- best_freq = tmp;
- min_delta = delta;
- }
- }
-
- if (best_freq) {
- *prediv = best_prediv;
- *fbdiv = best_fbdiv;
- }
-
- return best_freq;
-}
-
-static long inno_dsidphy_pll_clk_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int inno_dsidphy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
{
- struct inno_dsidphy *inno = hw_to_inno(hw);
- unsigned long fout;
- u16 fbdiv = 1;
- u8 prediv = 1;
-
- fout = inno_dsidphy_pll_round_rate(inno, *prate, rate,
- &prediv, &fbdiv);
-
- return fout;
-}
-
-static int inno_dsidphy_pll_clk_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct inno_dsidphy *inno = hw_to_inno(hw);
- unsigned long fout;
- u16 fbdiv = 1;
- u8 prediv = 1;
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+ int ret;
- fout = inno_dsidphy_pll_round_rate(inno, parent_rate, rate,
- &prediv, &fbdiv);
+ if (inno->mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
- dev_dbg(inno->dev, "fin=%lu, fout=%lu, prediv=%u, fbdiv=%u\n",
- parent_rate, fout, prediv, fbdiv);
+ ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+ if (ret)
+ return ret;
- inno->pll.prediv = prediv;
- inno->pll.fbdiv = fbdiv;
- inno->pll.rate = fout;
+ memcpy(&inno->dphy_cfg, &opts->mipi_dphy, sizeof(inno->dphy_cfg));
return 0;
}
-static unsigned long
-inno_dsidphy_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
-{
- struct inno_dsidphy *inno = hw_to_inno(hw);
-
- /* PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2 */
- return (prate / inno->pll.prediv * inno->pll.fbdiv) / 2;
-}
-
-static const struct clk_ops inno_dsidphy_pll_clk_ops = {
- .round_rate = inno_dsidphy_pll_clk_round_rate,
- .set_rate = inno_dsidphy_pll_clk_set_rate,
- .recalc_rate = inno_dsidphy_pll_clk_recalc_rate,
+static const struct phy_ops inno_dsidphy_ops = {
+ .configure = inno_dsidphy_configure,
+ .set_mode = inno_dsidphy_set_mode,
+ .power_on = inno_dsidphy_power_on,
+ .power_off = inno_dsidphy_power_off,
+ .owner = THIS_MODULE,
};
-static int inno_dsidphy_pll_register(struct inno_dsidphy *inno)
-{
- struct device *dev = inno->dev;
- struct clk *clk;
- const char *parent_name;
- struct clk_init_data init;
- int ret;
-
- parent_name = __clk_get_name(inno->ref_clk);
-
- init.name = "mipi_dphy_pll";
- ret = of_property_read_string(dev->of_node, "clock-output-names",
- &init.name);
- if (ret < 0)
- dev_dbg(dev, "phy should set clock-output-names property\n");
-
- init.ops = &inno_dsidphy_pll_clk_ops;
- init.parent_names = &parent_name;
- init.num_parents = 1;
- init.flags = 0;
-
- inno->pll.hw.init = &init;
- clk = devm_clk_register(dev, &inno->pll.hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to register PLL: %d\n", ret);
- return ret;
- }
-
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
- &inno->pll.hw);
-}
-
static int inno_dsidphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -764,10 +647,6 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
return ret;
}
- ret = inno_dsidphy_pll_register(inno);
- if (ret)
- return ret;
-
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index 290a6c70f570..9e483d1fdaf2 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -32,7 +32,7 @@ config PHY_EXYNOS_PCIE
config PHY_SAMSUNG_USB2
tristate "Samsung USB 2.0 PHY driver"
depends on HAS_IOMEM
- depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2
+ depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2 || COMPILE_TEST
select GENERIC_PHY
select MFD_SYSCON
default ARCH_EXYNOS
@@ -60,7 +60,7 @@ config PHY_EXYNOS5250_USB2
config PHY_S5PV210_USB2
bool "Support for S5PV210"
depends on PHY_SAMSUNG_USB2
- depends on ARCH_S5PV210
+ depends on ARCH_S5PV210 || COMPILE_TEST
help
Enable USB PHY support for S5PV210. This option requires that Samsung
USB 2.0 PHY driver is enabled and means that support for this
@@ -69,7 +69,7 @@ config PHY_S5PV210_USB2
config PHY_EXYNOS5_USBDRD
tristate "Exynos5 SoC series USB DRD PHY driver"
- depends on ARCH_EXYNOS && OF
+ depends on (ARCH_EXYNOS && OF) || COMPILE_TEST
depends on HAS_IOMEM
depends on USB_DWC3_EXYNOS
select GENERIC_PHY
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index 174888609779..6dbe9d0b9ff3 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -4,7 +4,7 @@
#
config PHY_DA8XX_USB
tristate "TI DA8xx USB PHY Driver"
- depends on ARCH_DAVINCI_DA8XX
+ depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
select GENERIC_PHY
select MFD_SYSCON
help
@@ -14,7 +14,7 @@ config PHY_DA8XX_USB
config PHY_DM816X_USB
tristate "TI dm816x USB PHY driver"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
@@ -33,6 +33,22 @@ config PHY_AM654_SERDES
This option enables support for TI AM654 SerDes PHY used for
PCIe.
+config PHY_J721E_WIZ
+ tristate "TI J721E WIZ (SERDES Wrapper) support"
+ depends on OF && ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM && OF_ADDRESS
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ select MULTIPLEXER
+ select REGMAP_MMIO
+ select MUX_MMIO
+ help
+ This option enables support for WIZ module present in TI's J721E
+ SoC. WIZ is a serdes wrapper used to configure some of the input
+ signals to the SERDES (Sierra/Torrent). This driver configures
+ three clock selects (pll0, pll1, dig) and resets for each of the
+ lanes.
+
config OMAP_CONTROL_PHY
tristate "OMAP CONTROL PHY Driver"
depends on ARCH_OMAP2PLUS || COMPILE_TEST
diff --git a/drivers/phy/ti/Makefile b/drivers/phy/ti/Makefile
index bff901eb0ecc..dcba2571c9bd 100644
--- a/drivers/phy/ti/Makefile
+++ b/drivers/phy/ti/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
obj-$(CONFIG_PHY_AM654_SERDES) += phy-am654-serdes.o
obj-$(CONFIG_PHY_TI_GMII_SEL) += phy-gmii-sel.o
+obj-$(CONFIG_PHY_J721E_WIZ) += phy-j721e-wiz.o
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
new file mode 100644
index 000000000000..7b51045df783
--- /dev/null
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Wrapper driver for SERDES used in J721E
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#define WIZ_SERDES_CTRL 0x404
+#define WIZ_SERDES_TOP_CTRL 0x408
+#define WIZ_SERDES_RST 0x40c
+#define WIZ_SERDES_TYPEC 0x410
+#define WIZ_LANECTL(n) (0x480 + (0x40 * (n)))
+
+#define WIZ_MAX_LANES 4
+#define WIZ_MUX_NUM_CLOCKS 3
+#define WIZ_DIV_NUM_CLOCKS_16G 2
+#define WIZ_DIV_NUM_CLOCKS_10G 1
+
+#define WIZ_SERDES_TYPEC_LN10_SWAP BIT(30)
+
+enum wiz_lane_standard_mode {
+ LANE_MODE_GEN1,
+ LANE_MODE_GEN2,
+ LANE_MODE_GEN3,
+ LANE_MODE_GEN4,
+};
+
+enum wiz_refclk_mux_sel {
+ PLL0_REFCLK,
+ PLL1_REFCLK,
+ REFCLK_DIG,
+};
+
+enum wiz_refclk_div_sel {
+ CMN_REFCLK_DIG_DIV,
+ CMN_REFCLK1_DIG_DIV,
+};
+
+static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31);
+static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31);
+static const struct reg_field pll1_refclk_mux_sel =
+ REG_FIELD(WIZ_SERDES_RST, 29, 29);
+static const struct reg_field pll0_refclk_mux_sel =
+ REG_FIELD(WIZ_SERDES_RST, 28, 28);
+static const struct reg_field refclk_dig_sel_16g =
+ REG_FIELD(WIZ_SERDES_RST, 24, 25);
+static const struct reg_field refclk_dig_sel_10g =
+ REG_FIELD(WIZ_SERDES_RST, 24, 24);
+static const struct reg_field pma_cmn_refclk_int_mode =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29);
+static const struct reg_field pma_cmn_refclk_mode =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 30, 31);
+static const struct reg_field pma_cmn_refclk_dig_div =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
+static const struct reg_field pma_cmn_refclk1_dig_div =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+
+static const struct reg_field p_enable[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 30, 31),
+ REG_FIELD(WIZ_LANECTL(1), 30, 31),
+ REG_FIELD(WIZ_LANECTL(2), 30, 31),
+ REG_FIELD(WIZ_LANECTL(3), 30, 31),
+};
+
+static const struct reg_field p_align[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 29, 29),
+ REG_FIELD(WIZ_LANECTL(1), 29, 29),
+ REG_FIELD(WIZ_LANECTL(2), 29, 29),
+ REG_FIELD(WIZ_LANECTL(3), 29, 29),
+};
+
+static const struct reg_field p_raw_auto_start[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 28, 28),
+ REG_FIELD(WIZ_LANECTL(1), 28, 28),
+ REG_FIELD(WIZ_LANECTL(2), 28, 28),
+ REG_FIELD(WIZ_LANECTL(3), 28, 28),
+};
+
+static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 24, 25),
+ REG_FIELD(WIZ_LANECTL(1), 24, 25),
+ REG_FIELD(WIZ_LANECTL(2), 24, 25),
+ REG_FIELD(WIZ_LANECTL(3), 24, 25),
+};
+
+static const struct reg_field typec_ln10_swap =
+ REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
+
+struct wiz_clk_mux {
+ struct clk_hw hw;
+ struct regmap_field *field;
+ u32 *table;
+ struct clk_init_data clk_data;
+};
+
+#define to_wiz_clk_mux(_hw) container_of(_hw, struct wiz_clk_mux, hw)
+
+struct wiz_clk_divider {
+ struct clk_hw hw;
+ struct regmap_field *field;
+ struct clk_div_table *table;
+ struct clk_init_data clk_data;
+};
+
+#define to_wiz_clk_div(_hw) container_of(_hw, struct wiz_clk_divider, hw)
+
+struct wiz_clk_mux_sel {
+ struct regmap_field *field;
+ u32 table[4];
+ const char *node_name;
+};
+
+struct wiz_clk_div_sel {
+ struct regmap_field *field;
+ struct clk_div_table *table;
+ const char *node_name;
+};
+
+static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
+ {
+ /*
+ * Mux value to be configured for each of the input clocks
+ * in the order populated in device tree
+ */
+ .table = { 1, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .table = { 1, 3, 0, 2 },
+ .node_name = "refclk-dig",
+ },
+};
+
+static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
+ {
+ /*
+ * Mux value to be configured for each of the input clocks
+ * in the order populated in device tree
+ */
+ .table = { 1, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "refclk-dig",
+ },
+};
+
+static struct clk_div_table clk_div_table[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 4, },
+ { .val = 3, .div = 8, },
+};
+
+static struct wiz_clk_div_sel clk_div_sel[] = {
+ {
+ .table = clk_div_table,
+ .node_name = "cmn-refclk-dig-div",
+ },
+ {
+ .table = clk_div_table,
+ .node_name = "cmn-refclk1-dig-div",
+ },
+};
+
+enum wiz_type {
+ J721E_WIZ_16G,
+ J721E_WIZ_10G,
+};
+
+#define WIZ_TYPEC_DIR_DEBOUNCE_MIN 100 /* ms */
+#define WIZ_TYPEC_DIR_DEBOUNCE_MAX 1000
+
+struct wiz {
+ struct regmap *regmap;
+ enum wiz_type type;
+ struct wiz_clk_mux_sel *clk_mux_sel;
+ struct wiz_clk_div_sel *clk_div_sel;
+ unsigned int clk_div_sel_num;
+ struct regmap_field *por_en;
+ struct regmap_field *phy_reset_n;
+ struct regmap_field *p_enable[WIZ_MAX_LANES];
+ struct regmap_field *p_align[WIZ_MAX_LANES];
+ struct regmap_field *p_raw_auto_start[WIZ_MAX_LANES];
+ struct regmap_field *p_standard_mode[WIZ_MAX_LANES];
+ struct regmap_field *pma_cmn_refclk_int_mode;
+ struct regmap_field *pma_cmn_refclk_mode;
+ struct regmap_field *pma_cmn_refclk_dig_div;
+ struct regmap_field *pma_cmn_refclk1_dig_div;
+ struct regmap_field *typec_ln10_swap;
+
+ struct device *dev;
+ u32 num_lanes;
+ struct platform_device *serdes_pdev;
+ struct reset_controller_dev wiz_phy_reset_dev;
+ struct gpio_desc *gpio_typec_dir;
+ int typec_dir_delay;
+};
+
+static int wiz_reset(struct wiz *wiz)
+{
+ int ret;
+
+ ret = regmap_field_write(wiz->por_en, 0x1);
+ if (ret)
+ return ret;
+
+ mdelay(1);
+
+ ret = regmap_field_write(wiz->por_en, 0x0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int wiz_mode_select(struct wiz *wiz)
+{
+ u32 num_lanes = wiz->num_lanes;
+ int ret;
+ int i;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = regmap_field_write(wiz->p_standard_mode[i],
+ LANE_MODE_GEN4);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_init_raw_interface(struct wiz *wiz, bool enable)
+{
+ u32 num_lanes = wiz->num_lanes;
+ int i;
+ int ret;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = regmap_field_write(wiz->p_align[i], enable);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(wiz->p_raw_auto_start[i], enable);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_init(struct wiz *wiz)
+{
+ struct device *dev = wiz->dev;
+ int ret;
+
+ ret = wiz_reset(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ reset failed\n");
+ return ret;
+ }
+
+ ret = wiz_mode_select(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ mode select failed\n");
+ return ret;
+ }
+
+ ret = wiz_init_raw_interface(wiz, true);
+ if (ret) {
+ dev_err(dev, "WIZ interface initialization failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_regfield_init(struct wiz *wiz)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel;
+ struct wiz_clk_div_sel *clk_div_sel;
+ struct regmap *regmap = wiz->regmap;
+ int num_lanes = wiz->num_lanes;
+ struct device *dev = wiz->dev;
+ int i;
+
+ wiz->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
+ if (IS_ERR(wiz->por_en)) {
+ dev_err(dev, "POR_EN reg field init failed\n");
+ return PTR_ERR(wiz->por_en);
+ }
+
+ wiz->phy_reset_n = devm_regmap_field_alloc(dev, regmap,
+ phy_reset_n);
+ if (IS_ERR(wiz->phy_reset_n)) {
+ dev_err(dev, "PHY_RESET_N reg field init failed\n");
+ return PTR_ERR(wiz->phy_reset_n);
+ }
+
+ wiz->pma_cmn_refclk_int_mode =
+ devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_int_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk_int_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK_INT_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk_int_mode);
+ }
+
+ wiz->pma_cmn_refclk_mode =
+ devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk_mode);
+ }
+
+ clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK_DIG_DIV];
+ clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pma_cmn_refclk_dig_div);
+ if (IS_ERR(clk_div_sel->field)) {
+ dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n");
+ return PTR_ERR(clk_div_sel->field);
+ }
+
+ if (wiz->type == J721E_WIZ_16G) {
+ clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1_DIG_DIV];
+ clk_div_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ pma_cmn_refclk1_dig_div);
+ if (IS_ERR(clk_div_sel->field)) {
+ dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
+ return PTR_ERR(clk_div_sel->field);
+ }
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[PLL0_REFCLK];
+ clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pll0_refclk_mux_sel);
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[PLL1_REFCLK];
+ clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pll1_refclk_mux_sel);
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[REFCLK_DIG];
+ if (wiz->type == J721E_WIZ_10G)
+ clk_mux_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ refclk_dig_sel_10g);
+ else
+ clk_mux_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ refclk_dig_sel_16g);
+
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ for (i = 0; i < num_lanes; i++) {
+ wiz->p_enable[i] = devm_regmap_field_alloc(dev, regmap,
+ p_enable[i]);
+ if (IS_ERR(wiz->p_enable[i])) {
+ dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+ return PTR_ERR(wiz->p_enable[i]);
+ }
+
+ wiz->p_align[i] = devm_regmap_field_alloc(dev, regmap,
+ p_align[i]);
+ if (IS_ERR(wiz->p_align[i])) {
+ dev_err(dev, "P%d_ALIGN reg field init failed\n", i);
+ return PTR_ERR(wiz->p_align[i]);
+ }
+
+ wiz->p_raw_auto_start[i] =
+ devm_regmap_field_alloc(dev, regmap, p_raw_auto_start[i]);
+ if (IS_ERR(wiz->p_raw_auto_start[i])) {
+ dev_err(dev, "P%d_RAW_AUTO_START reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_raw_auto_start[i]);
+ }
+
+ wiz->p_standard_mode[i] =
+ devm_regmap_field_alloc(dev, regmap, p_standard_mode[i]);
+ if (IS_ERR(wiz->p_standard_mode[i])) {
+ dev_err(dev, "P%d_STANDARD_MODE reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_standard_mode[i]);
+ }
+ }
+
+ wiz->typec_ln10_swap = devm_regmap_field_alloc(dev, regmap,
+ typec_ln10_swap);
+ if (IS_ERR(wiz->typec_ln10_swap)) {
+ dev_err(dev, "LN10_SWAP reg field init failed\n");
+ return PTR_ERR(wiz->typec_ln10_swap);
+ }
+
+ return 0;
+}
+
+static u8 wiz_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
+ struct regmap_field *field = mux->field;
+ unsigned int val;
+
+ regmap_field_read(field, &val);
+ return clk_mux_val_to_index(hw, mux->table, 0, val);
+}
+
+static int wiz_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
+ struct regmap_field *field = mux->field;
+ int val;
+
+ val = mux->table[index];
+ return regmap_field_write(field, val);
+}
+
+static const struct clk_ops wiz_clk_mux_ops = {
+ .set_parent = wiz_clk_mux_set_parent,
+ .get_parent = wiz_clk_mux_get_parent,
+};
+
+static int wiz_mux_clk_register(struct wiz *wiz, struct device_node *node,
+ struct regmap_field *field, u32 *table)
+{
+ struct device *dev = wiz->dev;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ struct wiz_clk_mux *mux;
+ char clk_name[100];
+ struct clk *clk;
+ int ret;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents < 2) {
+ dev_err(dev, "SERDES clock must have parents\n");
+ return -EINVAL;
+ }
+
+ parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
+ GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ of_clk_parent_fill(node, parent_names, num_parents);
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ node->name);
+
+ init = &mux->clk_data;
+
+ init->ops = &wiz_clk_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ mux->field = field;
+ mux->table = table;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
+
+ return ret;
+}
+
+static unsigned long wiz_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+ struct regmap_field *field = div->field;
+ int val;
+
+ regmap_field_read(field, &val);
+
+ return divider_recalc_rate(hw, parent_rate, val, div->table, 0x0, 2);
+}
+
+static long wiz_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+
+ return divider_round_rate(hw, rate, prate, div->table, 2, 0x0);
+}
+
+static int wiz_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+ struct regmap_field *field = div->field;
+ int val;
+
+ val = divider_get_val(rate, parent_rate, div->table, 2, 0x0);
+ if (val < 0)
+ return val;
+
+ return regmap_field_write(field, val);
+}
+
+static const struct clk_ops wiz_clk_div_ops = {
+ .recalc_rate = wiz_clk_div_recalc_rate,
+ .round_rate = wiz_clk_div_round_rate,
+ .set_rate = wiz_clk_div_set_rate,
+};
+
+static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
+ struct regmap_field *field,
+ struct clk_div_table *table)
+{
+ struct device *dev = wiz->dev;
+ struct wiz_clk_divider *div;
+ struct clk_init_data *init;
+ const char **parent_names;
+ char clk_name[100];
+ struct clk *clk;
+ int ret;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ node->name);
+
+ parent_names = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ of_clk_parent_fill(node, parent_names, 1);
+
+ init = &div->clk_data;
+
+ init->ops = &wiz_clk_div_ops;
+ init->flags = 0;
+ init->parent_names = parent_names;
+ init->num_parents = 1;
+ init->name = clk_name;
+
+ div->field = field;
+ div->table = table;
+ div->hw.init = init;
+
+ clk = devm_clk_register(dev, &div->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
+
+ return ret;
+}
+
+static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device_node *clk_node;
+ int i;
+
+ for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
+ clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
+ of_clk_del_provider(clk_node);
+ of_node_put(clk_node);
+ }
+}
+
+static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device *dev = wiz->dev;
+ struct device_node *clk_node;
+ const char *node_name;
+ unsigned long rate;
+ struct clk *clk;
+ int ret;
+ int i;
+
+ clk = devm_clk_get(dev, "core_ref_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "core_ref_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
+
+ clk = devm_clk_get(dev, "ext_ref_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "ext_ref_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
+
+ for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
+ node_name = clk_mux_sel[i].node_name;
+ clk_node = of_get_child_by_name(node, node_name);
+ if (!clk_node) {
+ dev_err(dev, "Unable to get %s node\n", node_name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = wiz_mux_clk_register(wiz, clk_node, clk_mux_sel[i].field,
+ clk_mux_sel[i].table);
+ if (ret) {
+ dev_err(dev, "Failed to register %s clock\n",
+ node_name);
+ of_node_put(clk_node);
+ goto err;
+ }
+
+ of_node_put(clk_node);
+ }
+
+ for (i = 0; i < wiz->clk_div_sel_num; i++) {
+ node_name = clk_div_sel[i].node_name;
+ clk_node = of_get_child_by_name(node, node_name);
+ if (!clk_node) {
+ dev_err(dev, "Unable to get %s node\n", node_name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = wiz_div_clk_register(wiz, clk_node, clk_div_sel[i].field,
+ clk_div_sel[i].table);
+ if (ret) {
+ dev_err(dev, "Failed to register %s clock\n",
+ node_name);
+ of_node_put(clk_node);
+ goto err;
+ }
+
+ of_node_put(clk_node);
+ }
+
+ return 0;
+err:
+ wiz_clock_cleanup(wiz, node);
+
+ return ret;
+}
+
+static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct device *dev = rcdev->dev;
+ struct wiz *wiz = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (id == 0) {
+ ret = regmap_field_write(wiz->phy_reset_n, false);
+ return ret;
+ }
+
+ ret = regmap_field_write(wiz->p_enable[id - 1], false);
+ return ret;
+}
+
+static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct device *dev = rcdev->dev;
+ struct wiz *wiz = dev_get_drvdata(dev);
+ int ret;
+
+ /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
+ if (id == 0 && wiz->gpio_typec_dir) {
+ if (wiz->typec_dir_delay)
+ msleep_interruptible(wiz->typec_dir_delay);
+
+ if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
+ regmap_field_write(wiz->typec_ln10_swap, 1);
+ else
+ regmap_field_write(wiz->typec_ln10_swap, 0);
+ }
+
+ if (id == 0) {
+ ret = regmap_field_write(wiz->phy_reset_n, true);
+ return ret;
+ }
+
+ ret = regmap_field_write(wiz->p_enable[id - 1], true);
+ return ret;
+}
+
+static const struct reset_control_ops wiz_phy_reset_ops = {
+ .assert = wiz_phy_reset_assert,
+ .deassert = wiz_phy_reset_deassert,
+};
+
+static struct regmap_config wiz_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static const struct of_device_id wiz_id_table[] = {
+ {
+ .compatible = "ti,j721e-wiz-16g", .data = (void *)J721E_WIZ_16G
+ },
+ {
+ .compatible = "ti,j721e-wiz-10g", .data = (void *)J721E_WIZ_10G
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wiz_id_table);
+
+static int wiz_probe(struct platform_device *pdev)
+{
+ struct reset_controller_dev *phy_reset_dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *serdes_pdev;
+ struct device_node *child_node;
+ struct regmap *regmap;
+ struct resource res;
+ void __iomem *base;
+ struct wiz *wiz;
+ u32 num_lanes;
+ int ret;
+
+ wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
+ if (!wiz)
+ return -ENOMEM;
+
+ wiz->type = (enum wiz_type)of_device_get_match_data(dev);
+
+ child_node = of_get_child_by_name(node, "serdes");
+ if (!child_node) {
+ dev_err(dev, "Failed to get SERDES child DT node\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(child_node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get memory resource\n");
+ goto err_addr_to_resource;
+ }
+
+ base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!base)
+ goto err_addr_to_resource;
+
+ regmap = devm_regmap_init_mmio(dev, base, &wiz_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ ret = PTR_ERR(regmap);
+ goto err_addr_to_resource;
+ }
+
+ ret = of_property_read_u32(node, "num-lanes", &num_lanes);
+ if (ret) {
+ dev_err(dev, "Failed to read num-lanes property\n");
+ goto err_addr_to_resource;
+ }
+
+ if (num_lanes > WIZ_MAX_LANES) {
+ dev_err(dev, "Cannot support %d lanes\n", num_lanes);
+ goto err_addr_to_resource;
+ }
+
+ wiz->gpio_typec_dir = devm_gpiod_get_optional(dev, "typec-dir",
+ GPIOD_IN);
+ if (IS_ERR(wiz->gpio_typec_dir)) {
+ ret = PTR_ERR(wiz->gpio_typec_dir);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request typec-dir gpio: %d\n",
+ ret);
+ goto err_addr_to_resource;
+ }
+
+ if (wiz->gpio_typec_dir) {
+ ret = of_property_read_u32(node, "typec-dir-debounce-ms",
+ &wiz->typec_dir_delay);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Invalid typec-dir-debounce property\n");
+ goto err_addr_to_resource;
+ }
+
+ /* use min. debounce from Type-C spec if not provided in DT */
+ if (ret == -EINVAL)
+ wiz->typec_dir_delay = WIZ_TYPEC_DIR_DEBOUNCE_MIN;
+
+ if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
+ wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+ dev_err(dev, "Invalid typec-dir-debounce property\n");
+ goto err_addr_to_resource;
+ }
+ }
+
+ wiz->dev = dev;
+ wiz->regmap = regmap;
+ wiz->num_lanes = num_lanes;
+ if (wiz->type == J721E_WIZ_10G)
+ wiz->clk_mux_sel = clk_mux_sel_10g;
+ else
+ wiz->clk_mux_sel = clk_mux_sel_16g;
+
+ wiz->clk_div_sel = clk_div_sel;
+
+ if (wiz->type == J721E_WIZ_10G)
+ wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
+ else
+ wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
+
+ platform_set_drvdata(pdev, wiz);
+
+ ret = wiz_regfield_init(wiz);
+ if (ret) {
+ dev_err(dev, "Failed to initialize regfields\n");
+ goto err_addr_to_resource;
+ }
+
+ phy_reset_dev = &wiz->wiz_phy_reset_dev;
+ phy_reset_dev->dev = dev;
+ phy_reset_dev->ops = &wiz_phy_reset_ops,
+ phy_reset_dev->owner = THIS_MODULE,
+ phy_reset_dev->of_node = node;
+ /* Reset for each of the lane and one for the entire SERDES */
+ phy_reset_dev->nr_resets = num_lanes + 1;
+
+ ret = devm_reset_controller_register(dev, phy_reset_dev);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to register reset controller\n");
+ goto err_addr_to_resource;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ ret = wiz_clock_init(wiz, node);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to initialize clocks\n");
+ goto err_get_sync;
+ }
+
+ serdes_pdev = of_platform_device_create(child_node, NULL, dev);
+ if (!serdes_pdev) {
+ dev_WARN(dev, "Unable to create SERDES platform device\n");
+ goto err_pdev_create;
+ }
+ wiz->serdes_pdev = serdes_pdev;
+
+ ret = wiz_init(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ initialization failed\n");
+ goto err_wiz_init;
+ }
+
+ of_node_put(child_node);
+ return 0;
+
+err_wiz_init:
+ of_platform_device_destroy(&serdes_pdev->dev, NULL);
+
+err_pdev_create:
+ wiz_clock_cleanup(wiz, node);
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+err_addr_to_resource:
+ of_node_put(child_node);
+
+ return ret;
+}
+
+static int wiz_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *serdes_pdev;
+ struct wiz *wiz;
+
+ wiz = dev_get_drvdata(dev);
+ serdes_pdev = wiz->serdes_pdev;
+
+ of_platform_device_destroy(&serdes_pdev->dev, NULL);
+ wiz_clock_cleanup(wiz, node);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static struct platform_driver wiz_driver = {
+ .probe = wiz_probe,
+ .remove = wiz_remove,
+ .driver = {
+ .name = "wiz",
+ .of_match_table = wiz_id_table,
+ },
+};
+module_platform_driver(wiz_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("TI J721E WIZ driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index edd6859afba8..a87946589eb7 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -850,6 +850,12 @@ static int ti_pipe3_probe(struct platform_device *pdev)
static int ti_pipe3_remove(struct platform_device *pdev)
{
+ struct ti_pipe3 *phy = platform_get_drvdata(pdev);
+
+ if (phy->mode == PIPE3_MODE_SATA) {
+ clk_disable_unprepare(phy->refclk);
+ phy->sata_refclk_enabled = false;
+ }
pm_runtime_disable(&pdev->dev);
return 0;
@@ -900,18 +906,8 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
{
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
- if (!IS_ERR(phy->refclk)) {
+ if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
- /*
- * SATA refclk needs an additional disable as we left it
- * on in probe to avoid Errata i783
- */
- if (phy->sata_refclk_enabled) {
- clk_disable_unprepare(phy->refclk);
- phy->sata_refclk_enabled = false;
- }
- }
-
if (!IS_ERR(phy->div_clk))
clk_disable_unprepare(phy->div_clk);
}
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index 8b8121e35edb..771d6fd50b45 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1583,7 +1583,6 @@ static const struct owl_pinmux_func s700_functions[] = {
[S700_MUX_USB30] = FUNCTION(usb30),
[S700_MUX_CLKO_25M] = FUNCTION(clko_25m),
[S700_MUX_MIPI_CSI] = FUNCTION(mipi_csi),
- [S700_MUX_DSI] = FUNCTION(dsi),
[S700_MUX_NAND] = FUNCTION(nand),
[S700_MUX_SPDIF] = FUNCTION(spdif),
[S700_MUX_SIRQ0] = FUNCTION(sirq0),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index 95ea593fa29d..bfed0e274643 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -2439,88 +2439,88 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
static const struct aspeed_pin_config aspeed_g4_configs[] = {
/* GPIO banks ranges [A, B], [D, J], [M, R] */
- { PIN_CONFIG_BIAS_PULL_DOWN, { D6, D5 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { D6, D5 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { J21, E18 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { J21, E18 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A18, E15 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { A18, E15 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D15, B14 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { D15, B14 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D18, C17 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { D18, C17 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A14, U18 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { A14, U18 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A8, E7 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { A8, E7 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { C22, E20 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_DISABLE, { C22, E20 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { J5, T1 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_DISABLE, { J5, T1 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { U1, U5 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_DISABLE, { U1, U5 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V3, V5 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_DISABLE, { V3, V5 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { W4, AB2 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_DISABLE, { W4, AB2 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V6, V7 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_DISABLE, { V6, V7 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y6, AB7 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_DISABLE, { Y6, AB7 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V20, A5 }, SCU8C, 31 },
- { PIN_CONFIG_BIAS_DISABLE, { V20, A5 }, SCU8C, 31 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D6, D5, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D6, D5, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J21, E18, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J21, E18, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A18, E15, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A18, E15, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D15, B14, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D15, B14, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D18, C17, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D18, C17, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A14, U18, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A14, U18, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A8, E7, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A8, E7, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C22, E20, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C22, E20, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J5, T1, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J5, T1, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, U1, U5, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, U1, U5, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V3, V5, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V3, V5, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, W4, AB2, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, W4, AB2, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V6, V7, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V6, V7, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y6, AB7, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y6, AB7, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V20, A5, SCU8C, 31),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V20, A5, SCU8C, 31),
/* GPIOs T[0-5] (RGMII1 Tx pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { A12, A13 }, SCU90, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A12, A13 }, SCU90, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { A12, A13 }, SCU90, 12 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, A12, A13, SCU90, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A12, A13, SCU90, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A12, A13, SCU90, 12),
/* GPIOs T[6-7], U[0-3] (RGMII2 TX pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { D9, D10 }, SCU90, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D9, D10 }, SCU90, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { D9, D10 }, SCU90, 14 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, D9, D10, SCU90, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D9, D10, SCU90, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D9, D10, SCU90, 14),
/* GPIOs U[4-7], V[0-1] (RGMII1 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { E11, E10 }, SCU90, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { E11, E10 }, SCU90, 13 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E11, E10, SCU90, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E11, E10, SCU90, 13),
/* GPIOs V[2-7] (RGMII2 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C9, C8 }, SCU90, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { C9, C8 }, SCU90, 15 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C9, C8, SCU90, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C9, C8, SCU90, 15),
/* ADC pull-downs (SCUA8[19:4]) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { L5, L5 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_DISABLE, { L5, L5 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L4, L4 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_DISABLE, { L4, L4 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L3, L3 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_DISABLE, { L3, L3 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L2, L2 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_DISABLE, { L2, L2 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L1, L1 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_DISABLE, { L1, L1 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M5, M5 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_DISABLE, { M5, M5 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M4, M4 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_DISABLE, { M4, M4 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M3, M3 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_DISABLE, { M3, M3 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M2, M2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { M2, M2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M1, M1 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { M1, M1 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N5, N5 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { N5, N5 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N4, N4 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { N4, N4 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N3, N3 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { N3, N3 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N2, N2 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { N2, N2 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N1, N1 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { N1, N1 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { P5, P5 }, SCUA8, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { P5, P5 }, SCUA8, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L5, L5, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L5, L5, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L4, L4, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L4, L4, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L3, L3, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L3, L3, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L2, L2, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L2, L2, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L1, L1, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L1, L1, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M5, M5, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M5, M5, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M4, M4, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M4, M4, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M3, M3, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M3, M3, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M2, M2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M2, M2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M1, M1, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M1, M1, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N5, N5, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N5, N5, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N4, N4, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N4, N4, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N3, N3, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N3, N3, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N2, N2, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N2, N2, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N1, N1, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N1, N1, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, P5, P5, SCUA8, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, P5, P5, SCUA8, 19),
/*
* Debounce settings for GPIOs D and E passthrough mode are in
@@ -2531,14 +2531,14 @@ static const struct aspeed_pin_config aspeed_g4_configs[] = {
* controller. Due to this tangle between GPIO and pinctrl we don't yet
* fully support pass-through debounce.
*/
- { PIN_CONFIG_INPUT_DEBOUNCE, { A18, D16 }, SCUA8, 20 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B17, A17 }, SCUA8, 21 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { C16, B16 }, SCUA8, 22 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { A16, E15 }, SCUA8, 23 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { D15, C15 }, SCUA8, 24 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B15, A15 }, SCUA8, 25 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { E14, D14 }, SCUA8, 26 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { C14, B14 }, SCUA8, 27 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A18, D16, SCUA8, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B17, A17, SCUA8, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, C16, B16, SCUA8, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A16, E15, SCUA8, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, D15, C15, SCUA8, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B15, A15, SCUA8, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, E14, D14, SCUA8, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, C14, B14, SCUA8, 27),
};
static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
@@ -2594,6 +2594,14 @@ static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g4_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 0, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g4_ops = {
.set = aspeed_g4_sig_expr_set,
};
@@ -2610,6 +2618,8 @@ static struct aspeed_pinctrl_data aspeed_g4_pinctrl_data = {
},
.configs = aspeed_g4_configs,
.nconfigs = ARRAY_SIZE(aspeed_g4_configs),
+ .confmaps = aspeed_g4_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g4_pin_config_map),
};
static const struct pinmux_ops aspeed_g4_pinmux_ops = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index d8a804b9f958..0cab4c2576e2 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2476,124 +2476,124 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
static struct aspeed_pin_config aspeed_g5_configs[] = {
/* GPIOA, GPIOQ */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B14, B13 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { B14, B13 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A11, N20 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { A11, N20 }, SCU8C, 16 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B14, B13, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B14, B13, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A11, N20, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A11, N20, SCU8C, 16),
/* GPIOB, GPIOR */
- { PIN_CONFIG_BIAS_PULL_DOWN, { K19, H20 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { K19, H20 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { AA19, E10 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { AA19, E10 }, SCU8C, 17 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, K19, H20, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, K19, H20, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, AA19, E10, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, AA19, E10, SCU8C, 17),
/* GPIOC, GPIOS*/
- { PIN_CONFIG_BIAS_PULL_DOWN, { C12, B11 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { C12, B11 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V20, AA20 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { V20, AA20 }, SCU8C, 18 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C12, B11, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C12, B11, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V20, AA20, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V20, AA20, SCU8C, 18),
/* GPIOD, GPIOY */
- { PIN_CONFIG_BIAS_PULL_DOWN, { F19, C21 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { F19, C21 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { R22, P20 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { R22, P20 }, SCU8C, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F19, C21, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F19, C21, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, R22, P20, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, R22, P20, SCU8C, 19),
/* GPIOE, GPIOZ */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B20, B19 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { B20, B19 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y20, W21 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { Y20, W21 }, SCU8C, 20 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B20, B19, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B20, B19, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y20, W21, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y20, W21, SCU8C, 20),
/* GPIOF, GPIOAA */
- { PIN_CONFIG_BIAS_PULL_DOWN, { J19, H18 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { J19, H18 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y21, P19 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { Y21, P19 }, SCU8C, 21 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J19, H18, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J19, H18, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y21, P19, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y21, P19, SCU8C, 21),
- /* GPIOG, GPIOAB */
- { PIN_CONFIG_BIAS_PULL_DOWN, { A19, E14 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { A19, E14 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N19, R20 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { N19, R20 }, SCU8C, 22 },
+ /* GPIOG, GPIOAB */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A19, E14, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A19, E14, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N19, R20, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N19, R20, SCU8C, 22),
/* GPIOH, GPIOAC */
- { PIN_CONFIG_BIAS_PULL_DOWN, { A18, D18 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { A18, D18 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G21, G22 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { G21, G22 }, SCU8C, 23 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A18, D18, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A18, D18, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G21, G22, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G21, G22, SCU8C, 23),
/* GPIOs [I, P] */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C18, A15 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_DISABLE, { C18, A15 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { R2, T3 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_DISABLE, { R2, T3 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L3, R1 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_DISABLE, { L3, R1 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { T2, W1 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_DISABLE, { T2, W1 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y1, T5 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_DISABLE, { Y1, T5 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V2, T4 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_DISABLE, { V2, T4 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { U5, W4 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_DISABLE, { U5, W4 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V4, V6 }, SCU8C, 31 },
- { PIN_CONFIG_BIAS_DISABLE, { V4, V6 }, SCU8C, 31 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C18, A15, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C18, A15, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, R2, T3, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, R2, T3, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L3, R1, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L3, R1, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, T2, W1, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, T2, W1, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y1, T5, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y1, T5, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V2, T4, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V2, T4, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, U5, W4, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, U5, W4, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V4, V6, SCU8C, 31),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V4, V6, SCU8C, 31),
/* GPIOs T[0-5] (RGMII1 Tx pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { B5, B5 }, SCU90, 8 },
- { PIN_CONFIG_DRIVE_STRENGTH, { E9, A5 }, SCU90, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { B5, D7 }, SCU90, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { B5, D7 }, SCU90, 12 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B5, B5, SCU90, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, E9, A5, SCU90, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B5, D7, SCU90, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B5, D7, SCU90, 12),
/* GPIOs T[6-7], U[0-3] (RGMII2 TX pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { B2, B2 }, SCU90, 10 },
- { PIN_CONFIG_DRIVE_STRENGTH, { B1, B3 }, SCU90, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { B2, D4 }, SCU90, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { B2, D4 }, SCU90, 14 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B2, B2, SCU90, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B1, B3, SCU90, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B2, D4, SCU90, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B2, D4, SCU90, 14),
/* GPIOs U[4-7], V[0-1] (RGMII1 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B4, C4 }, SCU90, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { B4, C4 }, SCU90, 13 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B4, C4, SCU90, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B4, C4, SCU90, 13),
/* GPIOs V[2-7] (RGMII2 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C2, E6 }, SCU90, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { C2, E6 }, SCU90, 15 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C2, E6, SCU90, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C2, E6, SCU90, 15),
/* ADC pull-downs (SCUA8[19:4]) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { F4, F4 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_DISABLE, { F4, F4 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F5, F5 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_DISABLE, { F5, F5 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E2, E2 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_DISABLE, { E2, E2 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E1, E1 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_DISABLE, { E1, E1 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F3, F3 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_DISABLE, { F3, F3 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E3, E3 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_DISABLE, { E3, E3 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G5, G5 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_DISABLE, { G5, G5 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G4, G4 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_DISABLE, { G4, G4 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F2, F2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { F2, F2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G3, G3 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { G3, G3 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G2, G2 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { G2, G2 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F1, F1 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { F1, F1 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H5, H5 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { H5, H5 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G1, G1 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { G1, G1 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H3, H3 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { H3, H3 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H4, H4 }, SCUA8, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { H4, H4 }, SCUA8, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F4, F4, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F4, F4, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F5, F5, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F5, F5, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E2, E2, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E2, E2, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E1, E1, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E1, E1, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F3, F3, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F3, F3, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E3, E3, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E3, E3, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G5, G5, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G5, G5, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G4, G4, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G4, G4, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F2, F2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F2, F2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G3, G3, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G3, G3, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G2, G2, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G2, G2, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F1, F1, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F1, F1, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H5, H5, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H5, H5, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G1, G1, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G1, G1, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H3, H3, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H3, H3, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H4, H4, SCUA8, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H4, H4, SCUA8, 19),
/*
* Debounce settings for GPIOs D and E passthrough mode are in
@@ -2604,14 +2604,14 @@ static struct aspeed_pin_config aspeed_g5_configs[] = {
* controller. Due to this tangle between GPIO and pinctrl we don't yet
* fully support pass-through debounce.
*/
- { PIN_CONFIG_INPUT_DEBOUNCE, { F19, E21 }, SCUA8, 20 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { F20, D20 }, SCUA8, 21 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { D21, E20 }, SCUA8, 22 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { G18, C21 }, SCUA8, 23 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B20, C20 }, SCUA8, 24 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { F18, F17 }, SCUA8, 25 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { E18, D19 }, SCUA8, 26 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { A20, B19 }, SCUA8, 27 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F19, E21, SCUA8, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F20, D20, SCUA8, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, D21, E20, SCUA8, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, G18, C21, SCUA8, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B20, C20, SCUA8, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F18, F17, SCUA8, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, E18, D19, SCUA8, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A20, B19, SCUA8, 27),
};
static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
@@ -2780,6 +2780,14 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g5_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 0, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
.eval = aspeed_g5_sig_expr_eval,
.set = aspeed_g5_sig_expr_set,
@@ -2797,6 +2805,8 @@ static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
},
.configs = aspeed_g5_configs,
.nconfigs = ARRAY_SIZE(aspeed_g5_configs),
+ .confmaps = aspeed_g5_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g5_pin_config_map),
};
static const struct pinmux_ops aspeed_g5_pinmux_ops = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index bb07024d22ed..fa32c3e9c9d1 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -26,7 +26,10 @@
#define SCU430 0x430 /* Multi-function Pin Control #8 */
#define SCU434 0x434 /* Multi-function Pin Control #9 */
#define SCU438 0x438 /* Multi-function Pin Control #10 */
+#define SCU440 0x440 /* USB Multi-function Pin Control #12 */
#define SCU450 0x450 /* Multi-function Pin Control #14 */
+#define SCU454 0x454 /* Multi-function Pin Control #15 */
+#define SCU458 0x458 /* Multi-function Pin Control #16 */
#define SCU4B0 0x4B0 /* Multi-function Pin Control #17 */
#define SCU4B4 0x4B4 /* Multi-function Pin Control #18 */
#define SCU4B8 0x4B8 /* Multi-function Pin Control #19 */
@@ -35,9 +38,17 @@
#define SCU4D8 0x4D8 /* Multi-function Pin Control #23 */
#define SCU500 0x500 /* Hardware Strap 1 */
#define SCU510 0x510 /* Hardware Strap 2 */
+#define SCU610 0x610 /* Disable GPIO Internal Pull-Down #0 */
+#define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
+#define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
+#define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
+#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
+#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU694 0x694 /* Multi-function Pin Control #25 */
+#define SCUC20 0xC20 /* PCIE configuration Setting Control */
-#define ASPEED_G6_NR_PINS 248
+#define ASPEED_G6_NR_PINS 256
#define M24 0
SIG_EXPR_LIST_DECL_SESG(M24, MDC3, MDIO3, SIG_DESC_SET(SCU410, 0));
@@ -1534,6 +1545,78 @@ GROUP_DECL(I3C4, AE25, AF24);
FUNC_DECL_2(I3C4, HVI3C4, I3C4);
FUNC_GROUP_DECL(FSI2, AE25, AF24);
+#define AF23 248
+SIG_EXPR_LIST_DECL_SESG(AF23, I3C1SCL, I3C1, SIG_DESC_SET(SCU438, 16));
+PIN_DECL_(AF23, SIG_EXPR_LIST_PTR(AF23, I3C1SCL));
+
+#define AE24 249
+SIG_EXPR_LIST_DECL_SESG(AE24, I3C1SDA, I3C1, SIG_DESC_SET(SCU438, 17));
+PIN_DECL_(AE24, SIG_EXPR_LIST_PTR(AE24, I3C1SDA));
+
+FUNC_GROUP_DECL(I3C1, AF23, AE24);
+
+#define AF22 250
+SIG_EXPR_LIST_DECL_SESG(AF22, I3C2SCL, I3C2, SIG_DESC_SET(SCU438, 18));
+PIN_DECL_(AF22, SIG_EXPR_LIST_PTR(AF22, I3C2SCL));
+
+#define AE22 251
+SIG_EXPR_LIST_DECL_SESG(AE22, I3C2SDA, I3C2, SIG_DESC_SET(SCU438, 19));
+PIN_DECL_(AE22, SIG_EXPR_LIST_PTR(AE22, I3C2SDA));
+
+FUNC_GROUP_DECL(I3C2, AF22, AE22);
+
+#define USB2ADP_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 0, 0 }
+#define USB2AD_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 1, 0 }
+#define USB2AH_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 2, 0 }
+#define USB2AHP_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 3, 0 }
+#define USB11BHID_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 0, 0 }
+#define USB2BD_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 1, 0 }
+#define USB2BH_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 2, 0 }
+
+#define A4 252
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADPDP, USBA, USB2ADP, USB2ADP_DESC,
+ SIG_DESC_SET(SCUC20, 16));
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADDP, USBA, USB2AD, USB2AD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHDP, USBA, USB2AH, USB2AH_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHPDP, USBA, USB2AHP, USB2AHP_DESC);
+PIN_DECL_(A4, SIG_EXPR_LIST_PTR(A4, USB2ADPDP), SIG_EXPR_LIST_PTR(A4, USB2ADDP),
+ SIG_EXPR_LIST_PTR(A4, USB2AHDP));
+
+#define B4 253
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADPDN, USBA, USB2ADP, USB2ADP_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADDN, USBA, USB2AD, USB2AD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHDN, USBA, USB2AH, USB2AH_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHPDN, USBA, USB2AHP, USB2AHP_DESC);
+PIN_DECL_(B4, SIG_EXPR_LIST_PTR(B4, USB2ADPDN), SIG_EXPR_LIST_PTR(B4, USB2ADDN),
+ SIG_EXPR_LIST_PTR(B4, USB2AHDN));
+
+GROUP_DECL(USBA, A4, B4);
+
+FUNC_DECL_1(USB2ADP, USBA);
+FUNC_DECL_1(USB2AD, USBA);
+FUNC_DECL_1(USB2AH, USBA);
+FUNC_DECL_1(USB2AHP, USBA);
+
+#define A6 254
+SIG_EXPR_LIST_DECL_SEMG(A6, USB11BDP, USBB, USB11BHID, USB11BHID_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A6, USB2BDDP, USBB, USB2BD, USB2BD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A6, USB2BHDP, USBB, USB2BH, USB2BH_DESC);
+PIN_DECL_(A6, SIG_EXPR_LIST_PTR(A6, USB11BDP), SIG_EXPR_LIST_PTR(A6, USB2BDDP),
+ SIG_EXPR_LIST_PTR(A6, USB2BHDP));
+
+#define B6 255
+SIG_EXPR_LIST_DECL_SEMG(B6, USB11BDN, USBB, USB11BHID, USB11BHID_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B6, USB2BDDN, USBB, USB2BD, USB2BD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B6, USB2BHDN, USBB, USB2BH, USB2BH_DESC);
+PIN_DECL_(B6, SIG_EXPR_LIST_PTR(B6, USB11BDN), SIG_EXPR_LIST_PTR(B6, USB2BDDN),
+ SIG_EXPR_LIST_PTR(B6, USB2BHDN));
+
+GROUP_DECL(USBB, A6, B6);
+
+FUNC_DECL_1(USB11BHID, USBB);
+FUNC_DECL_1(USB2BD, USBB);
+FUNC_DECL_1(USB2BH, USBB);
+
/* Pins, groups and functions are sort(1):ed alphabetically for sanity */
static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
@@ -1554,6 +1637,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(A24),
ASPEED_PINCTRL_PIN(A25),
ASPEED_PINCTRL_PIN(A3),
+ ASPEED_PINCTRL_PIN(A4),
+ ASPEED_PINCTRL_PIN(A6),
ASPEED_PINCTRL_PIN(AA11),
ASPEED_PINCTRL_PIN(AA12),
ASPEED_PINCTRL_PIN(AA16),
@@ -1625,6 +1710,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AE16),
ASPEED_PINCTRL_PIN(AE18),
ASPEED_PINCTRL_PIN(AE19),
+ ASPEED_PINCTRL_PIN(AE22),
+ ASPEED_PINCTRL_PIN(AE24),
ASPEED_PINCTRL_PIN(AE25),
ASPEED_PINCTRL_PIN(AE26),
ASPEED_PINCTRL_PIN(AE7),
@@ -1634,6 +1721,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AF12),
ASPEED_PINCTRL_PIN(AF14),
ASPEED_PINCTRL_PIN(AF15),
+ ASPEED_PINCTRL_PIN(AF22),
+ ASPEED_PINCTRL_PIN(AF23),
ASPEED_PINCTRL_PIN(AF24),
ASPEED_PINCTRL_PIN(AF25),
ASPEED_PINCTRL_PIN(AF7),
@@ -1654,6 +1743,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(B25),
ASPEED_PINCTRL_PIN(B26),
ASPEED_PINCTRL_PIN(B3),
+ ASPEED_PINCTRL_PIN(B4),
+ ASPEED_PINCTRL_PIN(B6),
ASPEED_PINCTRL_PIN(C1),
ASPEED_PINCTRL_PIN(C11),
ASPEED_PINCTRL_PIN(C12),
@@ -1847,6 +1938,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(I2C7),
ASPEED_PINCTRL_GROUP(I2C8),
ASPEED_PINCTRL_GROUP(I2C9),
+ ASPEED_PINCTRL_GROUP(I3C1),
+ ASPEED_PINCTRL_GROUP(I3C2),
ASPEED_PINCTRL_GROUP(I3C3),
ASPEED_PINCTRL_GROUP(I3C4),
ASPEED_PINCTRL_GROUP(I3C5),
@@ -2012,6 +2105,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(UART7),
ASPEED_PINCTRL_GROUP(UART8),
ASPEED_PINCTRL_GROUP(UART9),
+ ASPEED_PINCTRL_GROUP(USBA),
+ ASPEED_PINCTRL_GROUP(USBB),
ASPEED_PINCTRL_GROUP(VB),
ASPEED_PINCTRL_GROUP(VGAHS),
ASPEED_PINCTRL_GROUP(VGAVS),
@@ -2079,6 +2174,8 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(I2C7),
ASPEED_PINCTRL_FUNC(I2C8),
ASPEED_PINCTRL_FUNC(I2C9),
+ ASPEED_PINCTRL_FUNC(I3C1),
+ ASPEED_PINCTRL_FUNC(I3C2),
ASPEED_PINCTRL_FUNC(I3C3),
ASPEED_PINCTRL_FUNC(I3C4),
ASPEED_PINCTRL_FUNC(I3C5),
@@ -2221,6 +2318,13 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(UART7),
ASPEED_PINCTRL_FUNC(UART8),
ASPEED_PINCTRL_FUNC(UART9),
+ ASPEED_PINCTRL_FUNC(USB11BHID),
+ ASPEED_PINCTRL_FUNC(USB2AD),
+ ASPEED_PINCTRL_FUNC(USB2ADP),
+ ASPEED_PINCTRL_FUNC(USB2AH),
+ ASPEED_PINCTRL_FUNC(USB2AHP),
+ ASPEED_PINCTRL_FUNC(USB2BD),
+ ASPEED_PINCTRL_FUNC(USB2BH),
ASPEED_PINCTRL_FUNC(VB),
ASPEED_PINCTRL_FUNC(VGAHS),
ASPEED_PINCTRL_FUNC(VGAVS),
@@ -2230,6 +2334,260 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(WDTRST4),
};
+static struct aspeed_pin_config aspeed_g6_configs[] = {
+ /* GPIOB7 */
+ ASPEED_PULL_DOWN_PINCONF(J24, SCU610, 15),
+ /* GPIOB6 */
+ ASPEED_PULL_DOWN_PINCONF(H25, SCU610, 14),
+ /* GPIOB5 */
+ ASPEED_PULL_DOWN_PINCONF(G26, SCU610, 13),
+ /* GPIOB4 */
+ ASPEED_PULL_DOWN_PINCONF(J23, SCU610, 12),
+ /* GPIOB3 */
+ ASPEED_PULL_DOWN_PINCONF(J25, SCU610, 11),
+ /* GPIOB2 */
+ ASPEED_PULL_DOWN_PINCONF(H26, SCU610, 10),
+ /* GPIOB1 */
+ ASPEED_PULL_DOWN_PINCONF(K23, SCU610, 9),
+ /* GPIOB0 */
+ ASPEED_PULL_DOWN_PINCONF(J26, SCU610, 8),
+
+ /* GPIOH3 */
+ ASPEED_PULL_DOWN_PINCONF(A17, SCU614, 27),
+ /* GPIOH2 */
+ ASPEED_PULL_DOWN_PINCONF(C18, SCU614, 26),
+ /* GPIOH1 */
+ ASPEED_PULL_DOWN_PINCONF(B18, SCU614, 25),
+ /* GPIOH0 */
+ ASPEED_PULL_DOWN_PINCONF(A18, SCU614, 24),
+
+ /* GPIOL7 */
+ ASPEED_PULL_DOWN_PINCONF(C14, SCU618, 31),
+ /* GPIOL6 */
+ ASPEED_PULL_DOWN_PINCONF(B14, SCU618, 30),
+ /* GPIOL5 */
+ ASPEED_PULL_DOWN_PINCONF(F15, SCU618, 29),
+ /* GPIOL4 */
+ ASPEED_PULL_DOWN_PINCONF(C15, SCU618, 28),
+
+ /* GPIOJ7 */
+ ASPEED_PULL_UP_PINCONF(D19, SCU618, 15),
+ /* GPIOJ6 */
+ ASPEED_PULL_UP_PINCONF(C20, SCU618, 14),
+ /* GPIOJ5 */
+ ASPEED_PULL_UP_PINCONF(A19, SCU618, 13),
+ /* GPIOJ4 */
+ ASPEED_PULL_UP_PINCONF(C19, SCU618, 12),
+ /* GPIOJ3 */
+ ASPEED_PULL_UP_PINCONF(D20, SCU618, 11),
+ /* GPIOJ2 */
+ ASPEED_PULL_UP_PINCONF(E19, SCU618, 10),
+ /* GPIOJ1 */
+ ASPEED_PULL_UP_PINCONF(A20, SCU618, 9),
+ /* GPIOJ0 */
+ ASPEED_PULL_UP_PINCONF(B20, SCU618, 8),
+
+ /* GPIOI7 */
+ ASPEED_PULL_DOWN_PINCONF(A15, SCU618, 7),
+ /* GPIOI6 */
+ ASPEED_PULL_DOWN_PINCONF(B16, SCU618, 6),
+ /* GPIOI5 */
+ ASPEED_PULL_DOWN_PINCONF(E16, SCU618, 5),
+ /* GPIOI4 */
+ ASPEED_PULL_DOWN_PINCONF(C16, SCU618, 4),
+ /* GPIOI3 */
+ ASPEED_PULL_DOWN_PINCONF(D16, SCU618, 3),
+ /* GPIOI2 */
+ ASPEED_PULL_DOWN_PINCONF(E17, SCU618, 2),
+ /* GPIOI1 */
+ ASPEED_PULL_DOWN_PINCONF(A16, SCU618, 1),
+ /* GPIOI0 */
+ ASPEED_PULL_DOWN_PINCONF(D17, SCU618, 0),
+
+ /* GPIOP7 */
+ ASPEED_PULL_DOWN_PINCONF(Y23, SCU61C, 31),
+ /* GPIOP6 */
+ ASPEED_PULL_DOWN_PINCONF(AB24, SCU61C, 30),
+ /* GPIOP5 */
+ ASPEED_PULL_DOWN_PINCONF(AB23, SCU61C, 29),
+ /* GPIOP4 */
+ ASPEED_PULL_DOWN_PINCONF(W23, SCU61C, 28),
+ /* GPIOP3 */
+ ASPEED_PULL_DOWN_PINCONF(AA24, SCU61C, 27),
+ /* GPIOP2 */
+ ASPEED_PULL_DOWN_PINCONF(AA23, SCU61C, 26),
+ /* GPIOP1 */
+ ASPEED_PULL_DOWN_PINCONF(W24, SCU61C, 25),
+ /* GPIOP0 */
+ ASPEED_PULL_DOWN_PINCONF(AB22, SCU61C, 24),
+
+ /* GPIOO7 */
+ ASPEED_PULL_DOWN_PINCONF(AC23, SCU61C, 23),
+ /* GPIOO6 */
+ ASPEED_PULL_DOWN_PINCONF(AC24, SCU61C, 22),
+ /* GPIOO5 */
+ ASPEED_PULL_DOWN_PINCONF(AC22, SCU61C, 21),
+ /* GPIOO4 */
+ ASPEED_PULL_DOWN_PINCONF(AD25, SCU61C, 20),
+ /* GPIOO3 */
+ ASPEED_PULL_DOWN_PINCONF(AD24, SCU61C, 19),
+ /* GPIOO2 */
+ ASPEED_PULL_DOWN_PINCONF(AD23, SCU61C, 18),
+ /* GPIOO1 */
+ ASPEED_PULL_DOWN_PINCONF(AD22, SCU61C, 17),
+ /* GPIOO0 */
+ ASPEED_PULL_DOWN_PINCONF(AD26, SCU61C, 16),
+
+ /* GPION7 */
+ ASPEED_PULL_DOWN_PINCONF(M26, SCU61C, 15),
+ /* GPION6 */
+ ASPEED_PULL_DOWN_PINCONF(N26, SCU61C, 14),
+ /* GPION5 */
+ ASPEED_PULL_DOWN_PINCONF(M23, SCU61C, 13),
+ /* GPION4 */
+ ASPEED_PULL_DOWN_PINCONF(P26, SCU61C, 12),
+ /* GPION3 */
+ ASPEED_PULL_DOWN_PINCONF(N24, SCU61C, 11),
+ /* GPION2 */
+ ASPEED_PULL_DOWN_PINCONF(N25, SCU61C, 10),
+ /* GPION1 */
+ ASPEED_PULL_DOWN_PINCONF(N23, SCU61C, 9),
+ /* GPION0 */
+ ASPEED_PULL_DOWN_PINCONF(P25, SCU61C, 8),
+
+ /* GPIOM7 */
+ ASPEED_PULL_DOWN_PINCONF(D13, SCU61C, 7),
+ /* GPIOM6 */
+ ASPEED_PULL_DOWN_PINCONF(C13, SCU61C, 6),
+ /* GPIOM5 */
+ ASPEED_PULL_DOWN_PINCONF(C12, SCU61C, 5),
+ /* GPIOM4 */
+ ASPEED_PULL_DOWN_PINCONF(B12, SCU61C, 4),
+ /* GPIOM3 */
+ ASPEED_PULL_DOWN_PINCONF(E14, SCU61C, 3),
+ /* GPIOM2 */
+ ASPEED_PULL_DOWN_PINCONF(A12, SCU61C, 2),
+ /* GPIOM1 */
+ ASPEED_PULL_DOWN_PINCONF(B13, SCU61C, 1),
+ /* GPIOM0 */
+ ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
+
+ /* GPIOS7 */
+ ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+ /* GPIOS6 */
+ ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+ /* GPIOS5 */
+ ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+ /* GPIOS4 */
+ ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+ /* GPIOS3*/
+ ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+ /* GPIOS2 */
+ ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+ /* GPIOS1 */
+ ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+ /* GPIOS0 */
+ ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+
+ /* GPIOR7 */
+ ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+ /* GPIOR6 */
+ ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+ /* GPIOR5 */
+ ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+ /* GPIOR4 */
+ ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+ /* GPIOR3*/
+ ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+ /* GPIOR2 */
+ ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+ /* GPIOR1 */
+ ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+ /* GPIOR0 */
+ ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+
+ /* GPIOX7 */
+ ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
+ /* GPIOX6 */
+ ASPEED_PULL_DOWN_PINCONF(AF9, SCU634, 30),
+ /* GPIOX5 */
+ ASPEED_PULL_DOWN_PINCONF(AD9, SCU634, 29),
+ /* GPIOX4 */
+ ASPEED_PULL_DOWN_PINCONF(AB9, SCU634, 28),
+ /* GPIOX3*/
+ ASPEED_PULL_DOWN_PINCONF(AF8, SCU634, 27),
+ /* GPIOX2 */
+ ASPEED_PULL_DOWN_PINCONF(AC9, SCU634, 26),
+ /* GPIOX1 */
+ ASPEED_PULL_DOWN_PINCONF(AA9, SCU634, 25),
+ /* GPIOX0 */
+ ASPEED_PULL_DOWN_PINCONF(AE8, SCU634, 24),
+
+ /* GPIOV7 */
+ ASPEED_PULL_DOWN_PINCONF(AF15, SCU634, 15),
+ /* GPIOV6 */
+ ASPEED_PULL_DOWN_PINCONF(AD15, SCU634, 14),
+ /* GPIOV5 */
+ ASPEED_PULL_DOWN_PINCONF(AE14, SCU634, 13),
+ /* GPIOV4 */
+ ASPEED_PULL_DOWN_PINCONF(AE15, SCU634, 12),
+ /* GPIOV3*/
+ ASPEED_PULL_DOWN_PINCONF(AC15, SCU634, 11),
+ /* GPIOV2 */
+ ASPEED_PULL_DOWN_PINCONF(AD14, SCU634, 10),
+ /* GPIOV1 */
+ ASPEED_PULL_DOWN_PINCONF(AF14, SCU634, 9),
+ /* GPIOV0 */
+ ASPEED_PULL_DOWN_PINCONF(AB15, SCU634, 8),
+
+ /* GPIOZ7 */
+ ASPEED_PULL_DOWN_PINCONF(AF10, SCU638, 15),
+ /* GPIOZ6 */
+ ASPEED_PULL_DOWN_PINCONF(AD11, SCU638, 14),
+ /* GPIOZ5 */
+ ASPEED_PULL_DOWN_PINCONF(AA11, SCU638, 13),
+ /* GPIOZ4 */
+ ASPEED_PULL_DOWN_PINCONF(AC11, SCU638, 12),
+ /* GPIOZ3*/
+ ASPEED_PULL_DOWN_PINCONF(AB11, SCU638, 11),
+
+ /* GPIOZ1 */
+ ASPEED_PULL_DOWN_PINCONF(AD10, SCU638, 9),
+ /* GPIOZ0 */
+ ASPEED_PULL_DOWN_PINCONF(AC10, SCU638, 8),
+
+ /* GPIOY6 */
+ ASPEED_PULL_DOWN_PINCONF(AC12, SCU638, 6),
+ /* GPIOY5 */
+ ASPEED_PULL_DOWN_PINCONF(AF12, SCU638, 5),
+ /* GPIOY4 */
+ ASPEED_PULL_DOWN_PINCONF(AE12, SCU638, 4),
+ /* GPIOY3 */
+ ASPEED_PULL_DOWN_PINCONF(AA12, SCU638, 3),
+ /* GPIOY2 */
+ ASPEED_PULL_DOWN_PINCONF(AE11, SCU638, 2),
+ /* GPIOY1 */
+ ASPEED_PULL_DOWN_PINCONF(AD12, SCU638, 1),
+ /* GPIOY0 */
+ ASPEED_PULL_DOWN_PINCONF(AF11, SCU638, 0),
+
+ /* LAD3 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AC7, AC7 }, SCU454, GENMASK(31, 30)},
+ /* LAD2 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AC8, AC8 }, SCU454, GENMASK(29, 28)},
+ /* LAD1 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AB8, AB8 }, SCU454, GENMASK(27, 26)},
+ /* LAD0 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AB7, AB7 }, SCU454, GENMASK(25, 24)},
+
+ /* MAC3 */
+ { PIN_CONFIG_POWER_SOURCE, { H24, E26 }, SCU458, BIT_MASK(4)},
+ { PIN_CONFIG_DRIVE_STRENGTH, { H24, E26 }, SCU458, GENMASK(1, 0)},
+ /* MAC4 */
+ { PIN_CONFIG_POWER_SOURCE, { F24, B24 }, SCU458, BIT_MASK(5)},
+ { PIN_CONFIG_DRIVE_STRENGTH, { F24, B24 }, SCU458, GENMASK(3, 2)},
+};
+
/**
* Configure a pin's signal by applying an expression's descriptor state for
* all descriptors in the expression.
@@ -2297,6 +2655,20 @@ static int aspeed_g6_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g6_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_UP, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_UP, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 4, 0, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 1, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 12, 2, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 3, GENMASK(1, 0)},
+ { PIN_CONFIG_POWER_SOURCE, 3300, 0, BIT_MASK(0)},
+ { PIN_CONFIG_POWER_SOURCE, 1800, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
.set = aspeed_g6_sig_expr_set,
};
@@ -2311,6 +2683,10 @@ static struct aspeed_pinctrl_data aspeed_g6_pinctrl_data = {
.functions = aspeed_g6_functions,
.nfunctions = ARRAY_SIZE(aspeed_g6_functions),
},
+ .configs = aspeed_g6_configs,
+ .nconfigs = ARRAY_SIZE(aspeed_g6_configs),
+ .confmaps = aspeed_g6_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g6_pin_config_map),
};
static const struct pinmux_ops aspeed_g6_pinmux_ops = {
@@ -2331,12 +2707,21 @@ static const struct pinctrl_ops aspeed_g6_pinctrl_ops = {
.dt_free_map = pinctrl_utils_free_map,
};
+static const struct pinconf_ops aspeed_g6_conf_ops = {
+ .is_generic = true,
+ .pin_config_get = aspeed_pin_config_get,
+ .pin_config_set = aspeed_pin_config_set,
+ .pin_config_group_get = aspeed_pin_config_group_get,
+ .pin_config_group_set = aspeed_pin_config_group_set,
+};
+
static struct pinctrl_desc aspeed_g6_pinctrl_desc = {
.name = "aspeed-g6-pinctrl",
.pins = aspeed_g6_pins,
.npins = ARRAY_SIZE(aspeed_g6_pins),
.pctlops = &aspeed_g6_pinctrl_ops,
.pmxops = &aspeed_g6_pinmux_ops,
+ .confops = &aspeed_g6_conf_ops,
};
static int aspeed_g6_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 54933665b5f8..b625a657171e 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -411,49 +411,21 @@ static inline const struct aspeed_pin_config *find_pinconf_config(
return NULL;
}
-/*
- * Aspeed pin configuration description.
- *
- * @param: pinconf configuration parameter
- * @arg: The supported argument for @param, or -1 if any value is supported
- * @val: The register value to write to configure @arg for @param
- *
- * The map is to be used in conjunction with the configuration array supplied
- * by the driver implementation.
- */
-struct aspeed_pin_config_map {
- enum pin_config_param param;
- s32 arg;
- u32 val;
-};
-
enum aspeed_pin_config_map_type { MAP_TYPE_ARG, MAP_TYPE_VAL };
-/* Aspeed consistently both:
- *
- * 1. Defines "disable bits" for internal pull-downs
- * 2. Uses 8mA or 16mA drive strengths
- */
-static const struct aspeed_pin_config_map pin_config_map[] = {
- { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1 },
- { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0 },
- { PIN_CONFIG_BIAS_DISABLE, -1, 1 },
- { PIN_CONFIG_DRIVE_STRENGTH, 8, 0 },
- { PIN_CONFIG_DRIVE_STRENGTH, 16, 1 },
-};
-
static const struct aspeed_pin_config_map *find_pinconf_map(
+ const struct aspeed_pinctrl_data *pdata,
enum pin_config_param param,
enum aspeed_pin_config_map_type type,
s64 value)
{
int i;
- for (i = 0; i < ARRAY_SIZE(pin_config_map); i++) {
+ for (i = 0; i < pdata->nconfmaps; i++) {
const struct aspeed_pin_config_map *elem;
bool match;
- elem = &pin_config_map[i];
+ elem = &pdata->confmaps[i];
switch (type) {
case MAP_TYPE_ARG:
@@ -491,8 +463,8 @@ int aspeed_pin_config_get(struct pinctrl_dev *pctldev, unsigned int offset,
if (rc < 0)
return rc;
- pmap = find_pinconf_map(param, MAP_TYPE_VAL,
- (val & BIT(pconf->bit)) >> pconf->bit);
+ pmap = find_pinconf_map(pdata, param, MAP_TYPE_VAL,
+ (val & pconf->mask) >> __ffs(pconf->mask));
if (!pmap)
return -EINVAL;
@@ -535,22 +507,22 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
if (!pconf)
return -ENOTSUPP;
- pmap = find_pinconf_map(param, MAP_TYPE_ARG, arg);
+ pmap = find_pinconf_map(pdata, param, MAP_TYPE_ARG, arg);
if (WARN_ON(!pmap))
return -EINVAL;
- val = pmap->val << pconf->bit;
+ val = pmap->val << __ffs(pconf->mask);
rc = regmap_update_bits(pdata->scu, pconf->reg,
- BIT(pconf->bit), val);
+ pmap->mask, val);
if (rc < 0)
return rc;
- pr_debug("%s: Set SCU%02X[%d]=%d for param %d(=%d) on pin %d\n",
- __func__, pconf->reg, pconf->bit, pmap->val,
- param, arg, offset);
+ pr_debug("%s: Set SCU%02X[%lu]=%d for param %d(=%d) on pin %d\n",
+ __func__, pconf->reg, __ffs(pconf->mask),
+ pmap->val, param, arg, offset);
}
return 0;
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
index a5d83986f32e..4dcde3bc29c8 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -24,8 +24,7 @@ struct aspeed_pin_config {
enum pin_config_param param;
unsigned int pins[2];
unsigned int reg;
- u8 bit;
- u8 value;
+ u32 mask;
};
#define ASPEED_PINCTRL_PIN(name_) \
@@ -35,6 +34,38 @@ struct aspeed_pin_config {
.drv_data = (void *) &(PIN_SYM(name_)) \
}
+#define ASPEED_SB_PINCONF(param_, pin0_, pin1_, reg_, bit_) { \
+ .param = param_, \
+ .pins = {pin0_, pin1_}, \
+ .reg = reg_, \
+ .mask = BIT_MASK(bit_) \
+}
+
+#define ASPEED_PULL_DOWN_PINCONF(pin_, reg_, bit_) \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, pin_, pin_, reg_, bit_), \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, pin_, pin_, reg_, bit_)
+
+#define ASPEED_PULL_UP_PINCONF(pin_, reg_, bit_) \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_UP, pin_, pin_, reg_, bit_), \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, pin_, pin_, reg_, bit_)
+/*
+ * Aspeed pin configuration description.
+ *
+ * @param: pinconf configuration parameter
+ * @arg: The supported argument for @param, or -1 if any value is supported
+ * @val: The register value to write to configure @arg for @param
+ * @mask: The bitfield mask for @val
+ *
+ * The map is to be used in conjunction with the configuration array supplied
+ * by the driver implementation.
+ */
+struct aspeed_pin_config_map {
+ enum pin_config_param param;
+ s32 arg;
+ u32 val;
+ u32 mask;
+};
+
struct aspeed_pinctrl_data {
struct regmap *scu;
@@ -45,6 +76,9 @@ struct aspeed_pinctrl_data {
const unsigned int nconfigs;
struct aspeed_pinmux_data pinmux;
+
+ const struct aspeed_pin_config_map *confmaps;
+ const unsigned int nconfmaps;
};
/* Aspeed pinctrl helpers */
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 140c5ce9fbc1..f86739e800c3 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -737,6 +737,7 @@ struct aspeed_pin_desc {
#define FUNC_DECL_(func, ...) \
static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
+#define FUNC_DECL_1(func, group) FUNC_DECL_(func, #group)
#define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 831a9318c384..25166217c3e0 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -286,6 +286,12 @@ static int iproc_gpio_irq_set_type(struct irq_data *d, unsigned int type)
iproc_set_bit(chip, IPROC_GPIO_INT_DE_OFFSET, gpio, dual_edge);
iproc_set_bit(chip, IPROC_GPIO_INT_EDGE_OFFSET, gpio,
rising_or_high);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev,
@@ -843,7 +849,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
"gpio-ranges");
/* optional GPIO interrupt support */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
struct irq_chip *irqc;
struct gpio_irq_chip *girq;
@@ -868,7 +874,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
+ girq->handler = handle_bad_irq;
}
ret = gpiochip_add_data(gc, chip);
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 32f268f173d1..57044ab376d3 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1049,7 +1049,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -EINVAL;
- pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ pinctrl->base1 = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pinctrl->base1) {
dev_err(&pdev->dev, "unable to map I/O space\n");
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 3756fc9d5826..f1d60a708815 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -578,7 +578,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -EINVAL;
- pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ pinctrl->base1 = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pinctrl->base1) {
dev_err(&pdev->dev, "unable to map I/O space\n");
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index e2f72dcce4c9..7b6409ef553c 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -560,7 +560,6 @@ static void __maybe_unused madera_pin_dbg_show(struct pinctrl_dev *pctldev,
seq_puts(s, " SCHMITT");
}
-
static const struct pinctrl_ops madera_pin_group_ops = {
.get_groups_count = madera_get_groups_count,
.get_group_name = madera_get_group_name,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 2bbd8ee93507..446d84fe0e31 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1376,8 +1376,15 @@ void devm_pinctrl_put(struct pinctrl *p)
}
EXPORT_SYMBOL_GPL(devm_pinctrl_put);
-int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
- bool dup)
+/**
+ * pinctrl_register_mappings() - register a set of pin controller mappings
+ * @maps: the pincontrol mappings table to register. Note the pinctrl-core
+ * keeps a reference to the passed in maps, so they should _not_ be
+ * marked with __initdata.
+ * @num_maps: the number of maps in the mapping table
+ */
+int pinctrl_register_mappings(const struct pinctrl_map *maps,
+ unsigned num_maps)
{
int i, ret;
struct pinctrl_maps *maps_node;
@@ -1430,17 +1437,8 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
if (!maps_node)
return -ENOMEM;
+ maps_node->maps = maps;
maps_node->num_maps = num_maps;
- if (dup) {
- maps_node->maps = kmemdup(maps, sizeof(*maps) * num_maps,
- GFP_KERNEL);
- if (!maps_node->maps) {
- kfree(maps_node);
- return -ENOMEM;
- }
- } else {
- maps_node->maps = maps;
- }
mutex_lock(&pinctrl_maps_mutex);
list_add_tail(&maps_node->node, &pinctrl_maps);
@@ -1448,22 +1446,14 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
return 0;
}
+EXPORT_SYMBOL_GPL(pinctrl_register_mappings);
/**
- * pinctrl_register_mappings() - register a set of pin controller mappings
- * @maps: the pincontrol mappings table to register. This should probably be
- * marked with __initdata so it can be discarded after boot. This
- * function will perform a shallow copy for the mapping entries.
- * @num_maps: the number of maps in the mapping table
+ * pinctrl_unregister_mappings() - unregister a set of pin controller mappings
+ * @maps: the pincontrol mappings table passed to pinctrl_register_mappings()
+ * when registering the mappings.
*/
-int pinctrl_register_mappings(const struct pinctrl_map *maps,
- unsigned num_maps)
-{
- return pinctrl_register_map(maps, num_maps, true);
-}
-EXPORT_SYMBOL_GPL(pinctrl_register_mappings);
-
-void pinctrl_unregister_map(const struct pinctrl_map *map)
+void pinctrl_unregister_mappings(const struct pinctrl_map *map)
{
struct pinctrl_maps *maps_node;
@@ -1478,6 +1468,7 @@ void pinctrl_unregister_map(const struct pinctrl_map *map)
}
mutex_unlock(&pinctrl_maps_mutex);
}
+EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings);
/**
* pinctrl_force_sleep() - turn a given controller device into sleep state
@@ -1535,15 +1526,8 @@ int pinctrl_init_done(struct device *dev)
return ret;
}
-#ifdef CONFIG_PM
-
-/**
- * pinctrl_pm_select_state() - select pinctrl state for PM
- * @dev: device to select default state for
- * @state: state to set
- */
-static int pinctrl_pm_select_state(struct device *dev,
- struct pinctrl_state *state)
+static int pinctrl_select_bound_state(struct device *dev,
+ struct pinctrl_state *state)
{
struct dev_pin_info *pins = dev->pins;
int ret;
@@ -1558,15 +1542,27 @@ static int pinctrl_pm_select_state(struct device *dev,
}
/**
- * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * pinctrl_select_default_state() - select default pinctrl state
* @dev: device to select default state for
*/
-int pinctrl_pm_select_default_state(struct device *dev)
+int pinctrl_select_default_state(struct device *dev)
{
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->default_state);
+ return pinctrl_select_bound_state(dev, dev->pins->default_state);
+}
+EXPORT_SYMBOL_GPL(pinctrl_select_default_state);
+
+#ifdef CONFIG_PM
+
+/**
+ * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * @dev: device to select default state for
+ */
+int pinctrl_pm_select_default_state(struct device *dev)
+{
+ return pinctrl_select_default_state(dev);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
@@ -1579,7 +1575,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev)
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->sleep_state);
+ return pinctrl_select_bound_state(dev, dev->pins->sleep_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state);
@@ -1592,7 +1588,7 @@ int pinctrl_pm_select_idle_state(struct device *dev)
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->idle_state);
+ return pinctrl_select_bound_state(dev, dev->pins->idle_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state);
#endif
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 7f34167a0405..840103c40c14 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -236,10 +236,6 @@ extern struct pinctrl_gpio_range *
pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev,
unsigned int pin);
-int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
- bool dup);
-void pinctrl_unregister_map(const struct pinctrl_map *map);
-
extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
extern int pinctrl_force_default(struct pinctrl_dev *pctldev);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 674920daac26..9357f7c46cf3 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -51,7 +51,7 @@ void pinctrl_dt_free_maps(struct pinctrl *p)
struct pinctrl_dt_map *dt_map, *n1;
list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) {
- pinctrl_unregister_map(dt_map->map);
+ pinctrl_unregister_mappings(dt_map->map);
list_del(&dt_map->node);
dt_free_map(dt_map->pctldev, dt_map->map,
dt_map->num_maps);
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
dt_map->num_maps = num_maps;
list_add_tail(&dt_map->node, &p->dt_maps);
- return pinctrl_register_map(map, num_maps, false);
+ return pinctrl_register_mappings(map, num_maps);
err_free_map:
dt_free_map(pctldev, map, num_maps);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 3ea9ce3e0cd9..de775a85a51e 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -137,6 +137,13 @@ config PINCTRL_IMX8MN
help
Say Y here to enable the imx8mn pinctrl driver
+config PINCTRL_IMX8MP
+ bool "IMX8MP pinctrl driver"
+ depends on ARCH_MXC && ARM64
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx8mp pinctrl driver
+
config PINCTRL_IMX8MQ
bool "IMX8MQ pinctrl driver"
depends on ARCH_MXC && ARM64
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 78e9140c13e3..0ebd3af21e4d 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PINCTRL_IMX7D) += pinctrl-imx7d.o
obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o
obj-$(CONFIG_PINCTRL_IMX8MM) += pinctrl-imx8mm.o
obj-$(CONFIG_PINCTRL_IMX8MN) += pinctrl-imx8mn.o
+obj-$(CONFIG_PINCTRL_IMX8MP) += pinctrl-imx8mp.o
obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o
obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 7e29e3fecdb2..c00d0022d311 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -611,7 +611,7 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
if (!res)
return -ENOENT;
- ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ ipctl->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!ipctl->base)
return -ENOMEM;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mp.c b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
new file mode 100644
index 000000000000..e3f644c2ec13
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imx8mp_pads {
+ MX8MP_IOMUXC_RESERVE0 = 0,
+ MX8MP_IOMUXC_RESERVE1 = 1,
+ MX8MP_IOMUXC_RESERVE2 = 2,
+ MX8MP_IOMUXC_RESERVE3 = 3,
+ MX8MP_IOMUXC_RESERVE4 = 4,
+ MX8MP_IOMUXC_GPIO1_IO00 = 5,
+ MX8MP_IOMUXC_GPIO1_IO01 = 6,
+ MX8MP_IOMUXC_GPIO1_IO02 = 7,
+ MX8MP_IOMUXC_GPIO1_IO03 = 8,
+ MX8MP_IOMUXC_GPIO1_IO04 = 9,
+ MX8MP_IOMUXC_GPIO1_IO05 = 10,
+ MX8MP_IOMUXC_GPIO1_IO06 = 11,
+ MX8MP_IOMUXC_GPIO1_IO07 = 12,
+ MX8MP_IOMUXC_GPIO1_IO08 = 13,
+ MX8MP_IOMUXC_GPIO1_IO09 = 14,
+ MX8MP_IOMUXC_GPIO1_IO10 = 15,
+ MX8MP_IOMUXC_GPIO1_IO11 = 16,
+ MX8MP_IOMUXC_GPIO1_IO12 = 17,
+ MX8MP_IOMUXC_GPIO1_IO13 = 18,
+ MX8MP_IOMUXC_GPIO1_IO14 = 19,
+ MX8MP_IOMUXC_GPIO1_IO15 = 20,
+ MX8MP_IOMUXC_ENET_MDC = 21,
+ MX8MP_IOMUXC_ENET_MDIO = 22,
+ MX8MP_IOMUXC_ENET_TD3 = 23,
+ MX8MP_IOMUXC_ENET_TD2 = 24,
+ MX8MP_IOMUXC_ENET_TD1 = 25,
+ MX8MP_IOMUXC_ENET_TD0 = 26,
+ MX8MP_IOMUXC_ENET_TX_CTL = 27,
+ MX8MP_IOMUXC_ENET_TXC = 28,
+ MX8MP_IOMUXC_ENET_RX_CTL = 29,
+ MX8MP_IOMUXC_ENET_RXC = 30,
+ MX8MP_IOMUXC_ENET_RD0 = 31,
+ MX8MP_IOMUXC_ENET_RD1 = 32,
+ MX8MP_IOMUXC_ENET_RD2 = 33,
+ MX8MP_IOMUXC_ENET_RD3 = 34,
+ MX8MP_IOMUXC_SD1_CLK = 35,
+ MX8MP_IOMUXC_SD1_CMD = 36,
+ MX8MP_IOMUXC_SD1_DATA0 = 37,
+ MX8MP_IOMUXC_SD1_DATA1 = 38,
+ MX8MP_IOMUXC_SD1_DATA2 = 39,
+ MX8MP_IOMUXC_SD1_DATA3 = 40,
+ MX8MP_IOMUXC_SD1_DATA4 = 41,
+ MX8MP_IOMUXC_SD1_DATA5 = 42,
+ MX8MP_IOMUXC_SD1_DATA6 = 43,
+ MX8MP_IOMUXC_SD1_DATA7 = 44,
+ MX8MP_IOMUXC_SD1_RESET_B = 45,
+ MX8MP_IOMUXC_SD1_STROBE = 46,
+ MX8MP_IOMUXC_SD2_CD_B = 47,
+ MX8MP_IOMUXC_SD2_CLK = 48,
+ MX8MP_IOMUXC_SD2_CMD = 49,
+ MX8MP_IOMUXC_SD2_DATA0 = 50,
+ MX8MP_IOMUXC_SD2_DATA1 = 51,
+ MX8MP_IOMUXC_SD2_DATA2 = 52,
+ MX8MP_IOMUXC_SD2_DATA3 = 53,
+ MX8MP_IOMUXC_SD2_RESET_B = 54,
+ MX8MP_IOMUXC_SD2_WP = 55,
+ MX8MP_IOMUXC_NAND_ALE = 56,
+ MX8MP_IOMUXC_NAND_CE0_B = 57,
+ MX8MP_IOMUXC_NAND_CE1_B = 58,
+ MX8MP_IOMUXC_NAND_CE2_B = 59,
+ MX8MP_IOMUXC_NAND_CE3_B = 60,
+ MX8MP_IOMUXC_NAND_CLE = 61,
+ MX8MP_IOMUXC_NAND_DATA00 = 62,
+ MX8MP_IOMUXC_NAND_DATA01 = 63,
+ MX8MP_IOMUXC_NAND_DATA02 = 64,
+ MX8MP_IOMUXC_NAND_DATA03 = 65,
+ MX8MP_IOMUXC_NAND_DATA04 = 66,
+ MX8MP_IOMUXC_NAND_DATA05 = 67,
+ MX8MP_IOMUXC_NAND_DATA06 = 68,
+ MX8MP_IOMUXC_NAND_DATA07 = 69,
+ MX8MP_IOMUXC_NAND_DQS = 70,
+ MX8MP_IOMUXC_NAND_RE_B = 71,
+ MX8MP_IOMUXC_NAND_READY_B = 72,
+ MX8MP_IOMUXC_NAND_WE_B = 73,
+ MX8MP_IOMUXC_NAND_WP_B = 74,
+ MX8MP_IOMUXC_SAI5_RXFS = 75,
+ MX8MP_IOMUXC_SAI5_RXC = 76,
+ MX8MP_IOMUXC_SAI5_RXD0 = 77,
+ MX8MP_IOMUXC_SAI5_RXD1 = 78,
+ MX8MP_IOMUXC_SAI5_RXD2 = 79,
+ MX8MP_IOMUXC_SAI5_RXD3 = 80,
+ MX8MP_IOMUXC_SAI5_MCLK = 81,
+ MX8MP_IOMUXC_SAI1_RXFS = 82,
+ MX8MP_IOMUXC_SAI1_RXC = 83,
+ MX8MP_IOMUXC_SAI1_RXD0 = 84,
+ MX8MP_IOMUXC_SAI1_RXD1 = 85,
+ MX8MP_IOMUXC_SAI1_RXD2 = 86,
+ MX8MP_IOMUXC_SAI1_RXD3 = 87,
+ MX8MP_IOMUXC_SAI1_RXD4 = 88,
+ MX8MP_IOMUXC_SAI1_RXD5 = 89,
+ MX8MP_IOMUXC_SAI1_RXD6 = 90,
+ MX8MP_IOMUXC_SAI1_RXD7 = 91,
+ MX8MP_IOMUXC_SAI1_TXFS = 92,
+ MX8MP_IOMUXC_SAI1_TXC = 93,
+ MX8MP_IOMUXC_SAI1_TXD0 = 94,
+ MX8MP_IOMUXC_SAI1_TXD1 = 95,
+ MX8MP_IOMUXC_SAI1_TXD2 = 96,
+ MX8MP_IOMUXC_SAI1_TXD3 = 97,
+ MX8MP_IOMUXC_SAI1_TXD4 = 98,
+ MX8MP_IOMUXC_SAI1_TXD5 = 99,
+ MX8MP_IOMUXC_SAI1_TXD6 = 100,
+ MX8MP_IOMUXC_SAI1_TXD7 = 101,
+ MX8MP_IOMUXC_SAI1_MCLK = 102,
+ MX8MP_IOMUXC_SAI2_RXFS = 103,
+ MX8MP_IOMUXC_SAI2_RXC = 104,
+ MX8MP_IOMUXC_SAI2_RXD0 = 105,
+ MX8MP_IOMUXC_SAI2_TXFS = 106,
+ MX8MP_IOMUXC_SAI2_TXC = 107,
+ MX8MP_IOMUXC_SAI2_TXD0 = 108,
+ MX8MP_IOMUXC_SAI2_MCLK = 109,
+ MX8MP_IOMUXC_SAI3_RXFS = 110,
+ MX8MP_IOMUXC_SAI3_RXC = 111,
+ MX8MP_IOMUXC_SAI3_RXD = 112,
+ MX8MP_IOMUXC_SAI3_TXFS = 113,
+ MX8MP_IOMUXC_SAI3_TXC = 114,
+ MX8MP_IOMUXC_SAI3_TXD = 115,
+ MX8MP_IOMUXC_SAI3_MCLK = 116,
+ MX8MP_IOMUXC_SPDIF_TX = 117,
+ MX8MP_IOMUXC_SPDIF_RX = 118,
+ MX8MP_IOMUXC_SPDIF_EXT_CLK = 119,
+ MX8MP_IOMUXC_ECSPI1_SCLK = 120,
+ MX8MP_IOMUXC_ECSPI1_MOSI = 121,
+ MX8MP_IOMUXC_ECSPI1_MISO = 122,
+ MX8MP_IOMUXC_ECSPI1_SS0 = 123,
+ MX8MP_IOMUXC_ECSPI2_SCLK = 124,
+ MX8MP_IOMUXC_ECSPI2_MOSI = 125,
+ MX8MP_IOMUXC_ECSPI2_MISO = 126,
+ MX8MP_IOMUXC_ECSPI2_SS0 = 127,
+ MX8MP_IOMUXC_I2C1_SCL = 128,
+ MX8MP_IOMUXC_I2C1_SDA = 129,
+ MX8MP_IOMUXC_I2C2_SCL = 130,
+ MX8MP_IOMUXC_I2C2_SDA = 131,
+ MX8MP_IOMUXC_I2C3_SCL = 132,
+ MX8MP_IOMUXC_I2C3_SDA = 133,
+ MX8MP_IOMUXC_I2C4_SCL = 134,
+ MX8MP_IOMUXC_I2C4_SDA = 135,
+ MX8MP_IOMUXC_UART1_RXD = 136,
+ MX8MP_IOMUXC_UART1_TXD = 137,
+ MX8MP_IOMUXC_UART2_RXD = 138,
+ MX8MP_IOMUXC_UART2_TXD = 139,
+ MX8MP_IOMUXC_UART3_RXD = 140,
+ MX8MP_IOMUXC_UART3_TXD = 141,
+ MX8MP_IOMUXC_UART4_RXD = 142,
+ MX8MP_IOMUXC_UART4_TXD = 143,
+ MX8MP_IOMUXC_HDMI_DDC_SCL = 144,
+ MX8MP_IOMUXC_HDMI_DDC_SDA = 145,
+ MX8MP_IOMUXC_HDMI_CEC = 146,
+ MX8MP_IOMUXC_HDMI_HPD = 147,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx8mp_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO00),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO01),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO02),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO03),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO04),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO05),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO06),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO07),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO08),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO09),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO10),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO11),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO12),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO13),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO14),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO15),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_MDC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_MDIO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TX_CTL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RX_CTL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_CMD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_RESET_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_STROBE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CD_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CMD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_RESET_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_WP),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_ALE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE0_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE1_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE2_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE3_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CLE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA00),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA01),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA02),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA03),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA04),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA05),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA06),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA07),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DQS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_RE_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_READY_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_WE_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_WP_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_TX),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_RX),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_EXT_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C1_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C1_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C3_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C3_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C4_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C4_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART1_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART1_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART2_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART2_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART3_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART3_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART4_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART4_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_DDC_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_DDC_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_CEC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_HPD),
+};
+
+static const struct imx_pinctrl_soc_info imx8mp_pinctrl_info = {
+ .pins = imx8mp_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8mp_pinctrl_pads),
+ .gpr_compatible = "fsl,imx8mp-iomuxc-gpr",
+};
+
+static const struct of_device_id imx8mp_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8mp-iomuxc", .data = &imx8mp_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imx8mp_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx8mp_pinctrl_info);
+}
+
+static struct platform_driver imx8mp_pinctrl_driver = {
+ .driver = {
+ .name = "imx8mp-pinctrl",
+ .of_match_table = of_match_ptr(imx8mp_pinctrl_of_match),
+ },
+ .probe = imx8mp_pinctrl_probe,
+};
+
+static int __init imx8mp_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8mp_pinctrl_driver);
+}
+arch_initcall(imx8mp_pinctrl_init);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 6091947a8f51..ee440ec4c94c 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -31,6 +31,19 @@ config PINCTRL_CHERRYVIEW
Cherryview/Braswell pinctrl driver provides an interface that
allows configuring of SoC pins and using them as GPIOs.
+config PINCTRL_LYNXPOINT
+ tristate "Intel Lynxpoint pinctrl and GPIO driver"
+ depends on ACPI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ help
+ Lynxpoint is the PCH of Intel Haswell. This pinctrl driver
+ provides an interface that allows configuring of PCH pins and
+ using them as GPIOs.
+
config PINCTRL_MERRIFIELD
tristate "Intel Merrifield pinctrl driver"
depends on X86_INTEL_MID
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 7e620b471ef6..f60f99cfa7aa 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
+obj-$(CONFIG_PINCTRL_LYNXPOINT) += pinctrl-lynxpoint.o
obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 55141d5de29e..b409642f168d 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -93,7 +93,7 @@
#define BYT_DEFAULT_GPIO_MUX 0
#define BYT_ALTER_GPIO_MUX 1
-struct byt_gpio_pin_context {
+struct intel_pad_context {
u32 conf0;
u32 val;
};
@@ -105,16 +105,6 @@ struct byt_gpio_pin_context {
.pad_map = (map),\
}
-struct byt_gpio {
- struct gpio_chip chip;
- struct platform_device *pdev;
- struct pinctrl_dev *pctl_dev;
- struct pinctrl_desc pctl_desc;
- const struct intel_pinctrl_soc_data *soc_data;
- struct intel_community *communities_copy;
- struct byt_gpio_pin_context *saved_context;
-};
-
/* SCORE pins, aka GPIOC_<pin_no> or GPIO_S0_SC[<pin_no>] */
static const struct pinctrl_pin_desc byt_score_pins[] = {
PINCTRL_PIN(0, "SATA_GP0"),
@@ -550,14 +540,14 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = {
static DEFINE_RAW_SPINLOCK(byt_lock);
-static struct intel_community *byt_get_community(struct byt_gpio *vg,
+static struct intel_community *byt_get_community(struct intel_pinctrl *vg,
unsigned int pin)
{
struct intel_community *comm;
int i;
- for (i = 0; i < vg->soc_data->ncommunities; i++) {
- comm = vg->communities_copy + i;
+ for (i = 0; i < vg->ncommunities; i++) {
+ comm = vg->communities + i;
if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
return comm;
}
@@ -565,7 +555,7 @@ static struct intel_community *byt_get_community(struct byt_gpio *vg,
return NULL;
}
-static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
+static void __iomem *byt_gpio_reg(struct intel_pinctrl *vg, unsigned int offset,
int reg)
{
struct intel_community *comm = byt_get_community(vg, offset);
@@ -592,17 +582,17 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
static int byt_get_groups_count(struct pinctrl_dev *pctldev)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->ngroups;
+ return vg->soc->ngroups;
}
static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
unsigned int selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->groups[selector].name;
+ return vg->soc->groups[selector].name;
}
static int byt_get_group_pins(struct pinctrl_dev *pctldev,
@@ -610,10 +600,10 @@ static int byt_get_group_pins(struct pinctrl_dev *pctldev,
const unsigned int **pins,
unsigned int *num_pins)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *pins = vg->soc_data->groups[selector].pins;
- *num_pins = vg->soc_data->groups[selector].npins;
+ *pins = vg->soc->groups[selector].pins;
+ *num_pins = vg->soc->groups[selector].npins;
return 0;
}
@@ -626,17 +616,17 @@ static const struct pinctrl_ops byt_pinctrl_ops = {
static int byt_get_functions_count(struct pinctrl_dev *pctldev)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->nfunctions;
+ return vg->soc->nfunctions;
}
static const char *byt_get_function_name(struct pinctrl_dev *pctldev,
unsigned int selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->functions[selector].name;
+ return vg->soc->functions[selector].name;
}
static int byt_get_function_groups(struct pinctrl_dev *pctldev,
@@ -644,15 +634,15 @@ static int byt_get_function_groups(struct pinctrl_dev *pctldev,
const char * const **groups,
unsigned int *num_groups)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *groups = vg->soc_data->functions[selector].groups;
- *num_groups = vg->soc_data->functions[selector].ngroups;
+ *groups = vg->soc->functions[selector].groups;
+ *num_groups = vg->soc->functions[selector].ngroups;
return 0;
}
-static void byt_set_group_simple_mux(struct byt_gpio *vg,
+static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
const struct intel_pingroup group,
unsigned int func)
{
@@ -667,7 +657,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
group.name, i);
continue;
@@ -682,7 +672,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
-static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
const struct intel_pingroup group,
const unsigned int *func)
{
@@ -697,7 +687,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
group.name, i);
continue;
@@ -715,9 +705,9 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
unsigned int group_selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
- const struct intel_function func = vg->soc_data->functions[func_selector];
- const struct intel_pingroup group = vg->soc_data->groups[group_selector];
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_function func = vg->soc->functions[func_selector];
+ const struct intel_pingroup group = vg->soc->groups[group_selector];
if (group.modes)
byt_set_group_mixed_mux(vg, group, group.modes);
@@ -729,22 +719,22 @@ static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
return 0;
}
-static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned int offset)
+static u32 byt_get_gpio_mux(struct intel_pinctrl *vg, unsigned int offset)
{
/* SCORE pin 92-93 */
- if (!strcmp(vg->soc_data->uid, BYT_SCORE_ACPI_UID) &&
+ if (!strcmp(vg->soc->uid, BYT_SCORE_ACPI_UID) &&
offset >= 92 && offset <= 93)
return BYT_ALTER_GPIO_MUX;
/* SUS pin 11-21 */
- if (!strcmp(vg->soc_data->uid, BYT_SUS_ACPI_UID) &&
+ if (!strcmp(vg->soc->uid, BYT_SUS_ACPI_UID) &&
offset >= 11 && offset <= 21)
return BYT_ALTER_GPIO_MUX;
return BYT_DEFAULT_GPIO_MUX;
}
-static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
+static void byt_gpio_clear_triggering(struct intel_pinctrl *vg, unsigned int offset)
{
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
@@ -752,7 +742,13 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
- value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
+ /* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
+ if (value & BYT_DIRECT_IRQ_EN)
+ /* nothing to do */ ;
+ else
+ value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
writel(value, reg);
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
@@ -761,7 +757,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
u32 value, gpio_mux;
unsigned long flags;
@@ -784,13 +780,12 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
value |= gpio_mux;
writel(value, reg);
- dev_warn(&vg->pdev->dev, FW_BUG
- "pin %u forcibly re-configured as GPIO\n", offset);
+ dev_warn(vg->dev, FW_BUG "pin %u forcibly re-configured as GPIO\n", offset);
}
raw_spin_unlock_irqrestore(&byt_lock, flags);
- pm_runtime_get(&vg->pdev->dev);
+ pm_runtime_get(vg->dev);
return 0;
}
@@ -799,10 +794,10 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
byt_gpio_clear_triggering(vg, offset);
- pm_runtime_put(&vg->pdev->dev);
+ pm_runtime_put(vg->dev);
}
static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
@@ -810,7 +805,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
unsigned int offset,
bool input)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
@@ -822,15 +817,15 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
value &= ~BYT_DIR_MASK;
if (input)
value |= BYT_OUTPUT_EN;
- else
+ else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
/*
* Before making any direction modifications, do a check if gpio
* is set for direct IRQ. On baytrail, setting GPIO to output
- * does not make sense, so let's at least warn the caller before
+ * does not make sense, so let's at least inform the caller before
* they shoot themselves in the foot.
*/
- WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
- "Potential Error: Setting GPIO with direct_irq_en to output");
+ dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+
writel(value, val_reg);
raw_spin_unlock_irqrestore(&byt_lock, flags);
@@ -893,7 +888,7 @@ static int byt_set_pull_strength(u32 *reg, u16 strength)
static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
unsigned long *config)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
enum pin_config_param param = pinconf_to_config_param(*config);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -978,7 +973,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
unsigned long *configs,
unsigned int num_configs)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
unsigned int param, arg;
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -1012,7 +1007,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (val & BYT_INPUT_EN) {
val &= ~BYT_INPUT_EN;
writel(val, val_reg);
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"pin %u forcibly set to input mode\n",
offset);
}
@@ -1034,7 +1029,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (val & BYT_INPUT_EN) {
val &= ~BYT_INPUT_EN;
writel(val, val_reg);
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"pin %u forcibly set to input mode\n",
offset);
}
@@ -1115,7 +1110,7 @@ static const struct pinctrl_desc byt_pinctrl_desc = {
static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 val;
@@ -1129,7 +1124,7 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 old_val;
@@ -1148,7 +1143,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 value;
@@ -1161,9 +1156,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
raw_spin_unlock_irqrestore(&byt_lock, flags);
if (!(value & BYT_OUTPUT_EN))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
if (!(value & BYT_INPUT_EN))
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
return -EINVAL;
}
@@ -1188,11 +1183,11 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
int i;
u32 conf0, val;
- for (i = 0; i < vg->soc_data->npins; i++) {
+ for (i = 0; i < vg->soc->npins; i++) {
const struct intel_community *comm;
const char *pull_str = NULL;
const char *pull = NULL;
@@ -1202,7 +1197,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
unsigned int pin;
raw_spin_lock_irqsave(&byt_lock, flags);
- pin = vg->soc_data->pins[i].number;
+ pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
seq_printf(s,
@@ -1297,7 +1292,7 @@ static const struct gpio_chip byt_gpio_chip = {
static void byt_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
unsigned int offset = irqd_to_hwirq(d);
void __iomem *reg;
@@ -1313,7 +1308,7 @@ static void byt_irq_ack(struct irq_data *d)
static void byt_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
}
@@ -1321,7 +1316,7 @@ static void byt_irq_mask(struct irq_data *d)
static void byt_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
unsigned int offset = irqd_to_hwirq(d);
unsigned long flags;
void __iomem *reg;
@@ -1359,7 +1354,7 @@ static void byt_irq_unmask(struct irq_data *d)
static int byt_irq_type(struct irq_data *d, unsigned int type)
{
- struct byt_gpio *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ struct intel_pinctrl *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
u32 offset = irqd_to_hwirq(d);
u32 value;
unsigned long flags;
@@ -1395,20 +1390,10 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip byt_irqchip = {
- .name = "BYT-GPIO",
- .irq_ack = byt_irq_ack,
- .irq_mask = byt_irq_mask,
- .irq_unmask = byt_irq_unmask,
- .irq_set_type = byt_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
static void byt_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct byt_gpio *vg = gpiochip_get_data(
- irq_desc_get_handler_data(desc));
+ struct intel_pinctrl *vg = gpiochip_get_data(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_data_get_irq_chip(data);
u32 base, pin;
void __iomem *reg;
@@ -1420,7 +1405,7 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve interrupt status register\n",
base);
continue;
@@ -1441,22 +1426,9 @@ static void byt_init_irq_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
{
- /*
- * FIXME: currently the valid_mask is filled in as part of
- * initializing the irq_chip below in byt_gpio_irq_init_hw().
- * when converting this driver to the new way of passing the
- * gpio_irq_chip along when adding the gpio_chip, move the
- * mask initialization into this callback instead. Right now
- * this callback is here to make sure the mask gets allocated.
- */
-}
-
-static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
-{
- struct byt_gpio *vg = gpiochip_get_data(chip);
- struct device *dev = &vg->pdev->dev;
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg;
- u32 base, value;
+ u32 value;
int i;
/*
@@ -1464,12 +1436,12 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
* do not use direct IRQ mode. This will prevent spurious
* interrupts from misconfigured pins.
*/
- for (i = 0; i < vg->soc_data->npins; i++) {
- unsigned int pin = vg->soc_data->pins[i].number;
+ for (i = 0; i < vg->soc->npins; i++) {
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
@@ -1477,20 +1449,27 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
value = readl(reg);
if (value & BYT_DIRECT_IRQ_EN) {
- clear_bit(i, chip->irq.valid_mask);
- dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+ clear_bit(i, valid_mask);
+ dev_dbg(vg->dev, "excluding GPIO %d from IRQ domain\n", i);
} else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
byt_gpio_clear_triggering(vg, i);
- dev_dbg(dev, "disabling GPIO %d\n", i);
+ dev_dbg(vg->dev, "disabling GPIO %d\n", i);
}
}
+}
+
+static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ void __iomem *reg;
+ u32 base, value;
/* clear interrupt status trigger registers */
- for (base = 0; base < vg->soc_data->npins; base += 32) {
+ for (base = 0; base < vg->soc->npins; base += 32) {
reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve irq status reg\n",
base);
continue;
@@ -1501,7 +1480,7 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
might be misconfigured in bios */
value = readl(reg);
if (value)
- dev_err(&vg->pdev->dev,
+ dev_err(vg->dev,
"GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n",
base / 32, value);
}
@@ -1511,19 +1490,20 @@ static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
static int byt_gpio_add_pin_ranges(struct gpio_chip *chip)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
- struct device *dev = &vg->pdev->dev;
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ struct device *dev = vg->dev;
int ret;
- ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc_data->npins);
+ ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc->npins);
if (ret)
dev_err(dev, "failed to add GPIO pin range\n");
return ret;
}
-static int byt_gpio_probe(struct byt_gpio *vg)
+static int byt_gpio_probe(struct intel_pinctrl *vg)
{
+ struct platform_device *pdev = to_platform_device(vg->dev);
struct gpio_chip *gc;
struct resource *irq_rc;
int ret;
@@ -1531,32 +1511,39 @@ static int byt_gpio_probe(struct byt_gpio *vg)
/* Set up gpio chip */
vg->chip = byt_gpio_chip;
gc = &vg->chip;
- gc->label = dev_name(&vg->pdev->dev);
+ gc->label = dev_name(vg->dev);
gc->base = -1;
gc->can_sleep = false;
gc->add_pin_ranges = byt_gpio_add_pin_ranges;
- gc->parent = &vg->pdev->dev;
- gc->ngpio = vg->soc_data->npins;
- gc->irq.init_valid_mask = byt_init_irq_valid_mask;
+ gc->parent = vg->dev;
+ gc->ngpio = vg->soc->npins;
#ifdef CONFIG_PM_SLEEP
- vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
- sizeof(*vg->saved_context), GFP_KERNEL);
- if (!vg->saved_context)
+ vg->context.pads = devm_kcalloc(vg->dev, gc->ngpio, sizeof(*vg->context.pads),
+ GFP_KERNEL);
+ if (!vg->context.pads)
return -ENOMEM;
#endif
/* set up interrupts */
- irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0);
+ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_rc && irq_rc->start) {
struct gpio_irq_chip *girq;
+ vg->irqchip.name = "BYT-GPIO",
+ vg->irqchip.irq_ack = byt_irq_ack,
+ vg->irqchip.irq_mask = byt_irq_mask,
+ vg->irqchip.irq_unmask = byt_irq_unmask,
+ vg->irqchip.irq_set_type = byt_irq_type,
+ vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE,
+
girq = &gc->irq;
- girq->chip = &byt_irqchip;
+ girq->chip = &vg->irqchip;
girq->init_hw = byt_gpio_irq_init_hw;
+ girq->init_valid_mask = byt_init_irq_valid_mask;
girq->parent_handler = byt_gpio_irq_handler;
girq->num_parents = 1;
- girq->parents = devm_kcalloc(&vg->pdev->dev, girq->num_parents,
+ girq->parents = devm_kcalloc(vg->dev, girq->num_parents,
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
@@ -1565,34 +1552,35 @@ static int byt_gpio_probe(struct byt_gpio *vg)
girq->handler = handle_bad_irq;
}
- ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
+ ret = devm_gpiochip_add_data(vg->dev, gc, vg);
if (ret) {
- dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
+ dev_err(vg->dev, "failed adding byt-gpio chip\n");
return ret;
}
return ret;
}
-static int byt_set_soc_data(struct byt_gpio *vg,
- const struct intel_pinctrl_soc_data *soc_data)
+static int byt_set_soc_data(struct intel_pinctrl *vg,
+ const struct intel_pinctrl_soc_data *soc)
{
+ struct platform_device *pdev = to_platform_device(vg->dev);
int i;
- vg->soc_data = soc_data;
- vg->communities_copy = devm_kcalloc(&vg->pdev->dev,
- soc_data->ncommunities,
- sizeof(*vg->communities_copy),
- GFP_KERNEL);
- if (!vg->communities_copy)
+ vg->soc = soc;
+
+ vg->ncommunities = vg->soc->ncommunities;
+ vg->communities = devm_kcalloc(vg->dev, vg->ncommunities,
+ sizeof(*vg->communities), GFP_KERNEL);
+ if (!vg->communities)
return -ENOMEM;
- for (i = 0; i < soc_data->ncommunities; i++) {
- struct intel_community *comm = vg->communities_copy + i;
+ for (i = 0; i < vg->soc->ncommunities; i++) {
+ struct intel_community *comm = vg->communities + i;
- *comm = vg->soc_data->communities[i];
+ *comm = vg->soc->communities[i];
- comm->pad_regs = devm_platform_ioremap_resource(vg->pdev, 0);
+ comm->pad_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(comm->pad_regs))
return PTR_ERR(comm->pad_regs);
}
@@ -1610,15 +1598,16 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
{
const struct intel_pinctrl_soc_data *soc_data = NULL;
const struct intel_pinctrl_soc_data **soc_table;
+ struct device *dev = &pdev->dev;
struct acpi_device *acpi_dev;
- struct byt_gpio *vg;
+ struct intel_pinctrl *vg;
int i, ret;
- acpi_dev = ACPI_COMPANION(&pdev->dev);
+ acpi_dev = ACPI_COMPANION(dev);
if (!acpi_dev)
return -ENODEV;
- soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(&pdev->dev);
+ soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(dev);
for (i = 0; soc_table[i]; i++) {
if (!strcmp(acpi_dev->pnp.unique_id, soc_table[i]->uid)) {
@@ -1630,26 +1619,26 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
if (!soc_data)
return -ENODEV;
- vg = devm_kzalloc(&pdev->dev, sizeof(*vg), GFP_KERNEL);
+ vg = devm_kzalloc(dev, sizeof(*vg), GFP_KERNEL);
if (!vg)
return -ENOMEM;
- vg->pdev = pdev;
+ vg->dev = dev;
ret = byt_set_soc_data(vg, soc_data);
if (ret) {
- dev_err(&pdev->dev, "failed to set soc data\n");
+ dev_err(dev, "failed to set soc data\n");
return ret;
}
- vg->pctl_desc = byt_pinctrl_desc;
- vg->pctl_desc.name = dev_name(&pdev->dev);
- vg->pctl_desc.pins = vg->soc_data->pins;
- vg->pctl_desc.npins = vg->soc_data->npins;
+ vg->pctldesc = byt_pinctrl_desc;
+ vg->pctldesc.name = dev_name(dev);
+ vg->pctldesc.pins = vg->soc->pins;
+ vg->pctldesc.npins = vg->soc->npins;
- vg->pctl_dev = devm_pinctrl_register(&pdev->dev, &vg->pctl_desc, vg);
- if (IS_ERR(vg->pctl_dev)) {
- dev_err(&pdev->dev, "failed to register pinctrl driver\n");
- return PTR_ERR(vg->pctl_dev);
+ vg->pctldev = devm_pinctrl_register(dev, &vg->pctldesc, vg);
+ if (IS_ERR(vg->pctldev)) {
+ dev_err(dev, "failed to register pinctrl driver\n");
+ return PTR_ERR(vg->pctldev);
}
ret = byt_gpio_probe(vg);
@@ -1657,7 +1646,7 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, vg);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
return 0;
}
@@ -1665,30 +1654,30 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int byt_gpio_suspend(struct device *dev)
{
- struct byt_gpio *vg = dev_get_drvdata(dev);
+ struct intel_pinctrl *vg = dev_get_drvdata(dev);
unsigned long flags;
int i;
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < vg->soc_data->npins; i++) {
+ for (i = 0; i < vg->soc->npins; i++) {
void __iomem *reg;
u32 value;
- unsigned int pin = vg->soc_data->pins[i].number;
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
}
value = readl(reg) & BYT_CONF0_RESTORE_MASK;
- vg->saved_context[i].conf0 = value;
+ vg->context.pads[i].conf0 = value;
reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
value = readl(reg) & BYT_VAL_RESTORE_MASK;
- vg->saved_context[i].val = value;
+ vg->context.pads[i].val = value;
}
raw_spin_unlock_irqrestore(&byt_lock, flags);
@@ -1697,29 +1686,29 @@ static int byt_gpio_suspend(struct device *dev)
static int byt_gpio_resume(struct device *dev)
{
- struct byt_gpio *vg = dev_get_drvdata(dev);
+ struct intel_pinctrl *vg = dev_get_drvdata(dev);
unsigned long flags;
int i;
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < vg->soc_data->npins; i++) {
+ for (i = 0; i < vg->soc->npins; i++) {
void __iomem *reg;
u32 value;
- unsigned int pin = vg->soc_data->pins[i].number;
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
}
value = readl(reg);
if ((value & BYT_CONF0_RESTORE_MASK) !=
- vg->saved_context[i].conf0) {
+ vg->context.pads[i].conf0) {
value &= ~BYT_CONF0_RESTORE_MASK;
- value |= vg->saved_context[i].conf0;
+ value |= vg->context.pads[i].conf0;
writel(value, reg);
dev_info(dev, "restored pin %d conf0 %#08x", i, value);
}
@@ -1727,11 +1716,11 @@ static int byt_gpio_resume(struct device *dev)
reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
value = readl(reg);
if ((value & BYT_VAL_RESTORE_MASK) !=
- vg->saved_context[i].val) {
+ vg->context.pads[i].val) {
u32 v;
v = value & ~BYT_VAL_RESTORE_MASK;
- v |= vg->saved_context[i].val;
+ v |= vg->context.pads[i].val;
if (v != value) {
writel(v, reg);
dev_dbg(dev, "restored pin %d val %#08x\n",
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 60527b93a711..4c74fdde576d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1289,7 +1289,10 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
- return direction != CHV_PADCTRL0_GPIOCFG_GPO;
+ if (direction == CHV_PADCTRL0_GPIOCFG_GPO)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 4860bc9a4e48..74fdfd2b9ff5 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -8,8 +8,8 @@
*/
#include <linux/acpi.h>
-#include <linux/interrupt.h>
#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -85,39 +85,6 @@ struct intel_community_context {
u32 *hostown;
};
-struct intel_pinctrl_context {
- struct intel_pad_context *pads;
- struct intel_community_context *communities;
-};
-
-/**
- * struct intel_pinctrl - Intel pinctrl private structure
- * @dev: Pointer to the device structure
- * @lock: Lock to serialize register access
- * @pctldesc: Pin controller description
- * @pctldev: Pointer to the pin controller device
- * @chip: GPIO chip in this pin controller
- * @irqchip: IRQ chip in this pin controller
- * @soc: SoC/PCH specific pin configuration data
- * @communities: All communities in this pin controller
- * @ncommunities: Number of communities in this pin controller
- * @context: Configuration saved over system sleep
- * @irq: pinctrl/GPIO chip irq number
- */
-struct intel_pinctrl {
- struct device *dev;
- raw_spinlock_t lock;
- struct pinctrl_desc pctldesc;
- struct pinctrl_dev *pctldev;
- struct gpio_chip chip;
- struct irq_chip irqchip;
- const struct intel_pinctrl_soc_data *soc;
- struct intel_community *communities;
- size_t ncommunities;
- struct intel_pinctrl_context context;
- int irq;
-};
-
#define pin_to_padno(c, p) ((p) - (c)->pin_base)
#define padgroup_offset(g, p) ((p) - (g)->base)
@@ -944,7 +911,10 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (padcfg0 & PADCFG0_PMODE_MASK)
return -EINVAL;
- return !!(padcfg0 & PADCFG0_GPIOTXDIS);
+ if (padcfg0 & PADCFG0_GPIOTXDIS)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -1160,8 +1130,8 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
return ret;
}
-static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
- const struct intel_community *community)
+static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
+ const struct intel_community *community)
{
int ret = 0, i;
@@ -1181,6 +1151,24 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
return ret;
}
+static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
+{
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ int ret, i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ struct intel_community *community = &pctrl->communities[i];
+
+ ret = intel_gpio_add_community_ranges(pctrl, community);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
@@ -1205,7 +1193,8 @@ static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
{
- int ret, i;
+ int ret;
+ struct gpio_irq_chip *girq;
pctrl->chip = intel_gpio_chip;
@@ -1214,6 +1203,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.parent = pctrl->dev;
pctrl->chip.base = -1;
+ pctrl->chip.add_pin_ranges = intel_gpio_add_pin_ranges;
pctrl->irq = irq;
/* Setup IRQ chip */
@@ -1225,26 +1215,9 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
- ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
- if (ret) {
- dev_err(pctrl->dev, "failed to register gpiochip\n");
- return ret;
- }
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- struct intel_community *community = &pctrl->communities[i];
-
- ret = intel_gpio_add_pin_ranges(pctrl, community);
- if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO pin range\n");
- return ret;
- }
- }
-
/*
- * We need to request the interrupt here (instead of providing chip
- * to the irq directly) because on some platforms several GPIO
- * controllers share the same interrupt line.
+ * On some platforms several GPIO controllers share the same interrupt
+ * line.
*/
ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq,
IRQF_SHARED | IRQF_NO_THREAD,
@@ -1254,14 +1227,20 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
return ret;
}
- ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
- handle_bad_irq, IRQ_TYPE_NONE);
+ girq = &pctrl->chip.irq;
+ girq->chip = &pctrl->irqchip;
+ /* This will let us handle the IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
- dev_err(pctrl->dev, "failed to add irqchip\n");
+ dev_err(pctrl->dev, "failed to register gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 34b38a321760..c6f066f6d3fb 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -10,7 +10,10 @@
#ifndef PINCTRL_INTEL_H
#define PINCTRL_INTEL_H
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
#include <linux/pm.h>
+#include <linux/spinlock_types.h>
struct pinctrl_pin_desc;
struct platform_device;
@@ -174,6 +177,47 @@ struct intel_pinctrl_soc_data {
size_t ncommunities;
};
+struct intel_pad_context;
+struct intel_community_context;
+
+/**
+ * struct intel_pinctrl_context - context to be saved during suspend-resume
+ * @pads: Opaque context per pad (driver dependent)
+ * @communities: Opaque context per community (driver dependent)
+ */
+struct intel_pinctrl_context {
+ struct intel_pad_context *pads;
+ struct intel_community_context *communities;
+};
+
+/**
+ * struct intel_pinctrl - Intel pinctrl private structure
+ * @dev: Pointer to the device structure
+ * @lock: Lock to serialize register access
+ * @pctldesc: Pin controller description
+ * @pctldev: Pointer to the pin controller device
+ * @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
+ * @soc: SoC/PCH specific pin configuration data
+ * @communities: All communities in this pin controller
+ * @ncommunities: Number of communities in this pin controller
+ * @context: Configuration saved over system sleep
+ * @irq: pinctrl/GPIO chip irq number
+ */
+struct intel_pinctrl {
+ struct device *dev;
+ raw_spinlock_t lock;
+ struct pinctrl_desc pctldesc;
+ struct pinctrl_dev *pctldev;
+ struct gpio_chip chip;
+ struct irq_chip irqchip;
+ const struct intel_pinctrl_soc_data *soc;
+ struct intel_community *communities;
+ size_t ncommunities;
+ struct intel_pinctrl_context context;
+ int irq;
+};
+
int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
new file mode 100644
index 000000000000..e928742c7181
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Lynxpoint PCH pinctrl/GPIO driver
+ *
+ * Copyright (c) 2012, 2019, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "pinctrl-intel.h"
+
+#define COMMUNITY(p, n) \
+ { \
+ .pin_base = (p), \
+ .npins = (n), \
+ }
+
+static const struct pinctrl_pin_desc lptlp_pins[] = {
+ PINCTRL_PIN(0, "GP0_UART1_RXD"),
+ PINCTRL_PIN(1, "GP1_UART1_TXD"),
+ PINCTRL_PIN(2, "GP2_UART1_RTSB"),
+ PINCTRL_PIN(3, "GP3_UART1_CTSB"),
+ PINCTRL_PIN(4, "GP4_I2C0_SDA"),
+ PINCTRL_PIN(5, "GP5_I2C0_SCL"),
+ PINCTRL_PIN(6, "GP6_I2C1_SDA"),
+ PINCTRL_PIN(7, "GP7_I2C1_SCL"),
+ PINCTRL_PIN(8, "GP8"),
+ PINCTRL_PIN(9, "GP9"),
+ PINCTRL_PIN(10, "GP10"),
+ PINCTRL_PIN(11, "GP11_SMBALERTB"),
+ PINCTRL_PIN(12, "GP12_LANPHYPC"),
+ PINCTRL_PIN(13, "GP13"),
+ PINCTRL_PIN(14, "GP14"),
+ PINCTRL_PIN(15, "GP15"),
+ PINCTRL_PIN(16, "GP16_MGPIO9"),
+ PINCTRL_PIN(17, "GP17_MGPIO10"),
+ PINCTRL_PIN(18, "GP18_SRC0CLKRQB"),
+ PINCTRL_PIN(19, "GP19_SRC1CLKRQB"),
+ PINCTRL_PIN(20, "GP20_SRC2CLKRQB"),
+ PINCTRL_PIN(21, "GP21_SRC3CLKRQB"),
+ PINCTRL_PIN(22, "GP22_SRC4CLKRQB_TRST2"),
+ PINCTRL_PIN(23, "GP23_SRC5CLKRQB_TDI2"),
+ PINCTRL_PIN(24, "GP24_MGPIO0"),
+ PINCTRL_PIN(25, "GP25_USBWAKEOUTB"),
+ PINCTRL_PIN(26, "GP26_MGPIO5"),
+ PINCTRL_PIN(27, "GP27_MGPIO6"),
+ PINCTRL_PIN(28, "GP28_MGPIO7"),
+ PINCTRL_PIN(29, "GP29_SLP_WLANB_MGPIO3"),
+ PINCTRL_PIN(30, "GP30_SUSWARNB_SUSPWRDNACK_MGPIO1"),
+ PINCTRL_PIN(31, "GP31_ACPRESENT_MGPIO2"),
+ PINCTRL_PIN(32, "GP32_CLKRUNB"),
+ PINCTRL_PIN(33, "GP33_DEVSLP0"),
+ PINCTRL_PIN(34, "GP34_SATA0XPCIE6L3B_SATA0GP"),
+ PINCTRL_PIN(35, "GP35_SATA1XPCIE6L2B_SATA1GP"),
+ PINCTRL_PIN(36, "GP36_SATA2XPCIE6L1B_SATA2GP"),
+ PINCTRL_PIN(37, "GP37_SATA3XPCIE6L0B_SATA3GP"),
+ PINCTRL_PIN(38, "GP38_DEVSLP1"),
+ PINCTRL_PIN(39, "GP39_DEVSLP2"),
+ PINCTRL_PIN(40, "GP40_OC0B"),
+ PINCTRL_PIN(41, "GP41_OC1B"),
+ PINCTRL_PIN(42, "GP42_OC2B"),
+ PINCTRL_PIN(43, "GP43_OC3B"),
+ PINCTRL_PIN(44, "GP44"),
+ PINCTRL_PIN(45, "GP45_TMS2"),
+ PINCTRL_PIN(46, "GP46_TDO2"),
+ PINCTRL_PIN(47, "GP47"),
+ PINCTRL_PIN(48, "GP48"),
+ PINCTRL_PIN(49, "GP49"),
+ PINCTRL_PIN(50, "GP50"),
+ PINCTRL_PIN(51, "GP51_GSXDOUT"),
+ PINCTRL_PIN(52, "GP52_GSXSLOAD"),
+ PINCTRL_PIN(53, "GP53_GSXDIN"),
+ PINCTRL_PIN(54, "GP54_GSXSRESETB"),
+ PINCTRL_PIN(55, "GP55_GSXCLK"),
+ PINCTRL_PIN(56, "GP56"),
+ PINCTRL_PIN(57, "GP57"),
+ PINCTRL_PIN(58, "GP58"),
+ PINCTRL_PIN(59, "GP59"),
+ PINCTRL_PIN(60, "GP60_SML0ALERTB_MGPIO4"),
+ PINCTRL_PIN(61, "GP61_SUS_STATB"),
+ PINCTRL_PIN(62, "GP62_SUSCLK"),
+ PINCTRL_PIN(63, "GP63_SLP_S5B"),
+ PINCTRL_PIN(64, "GP64_SDIO_CLK"),
+ PINCTRL_PIN(65, "GP65_SDIO_CMD"),
+ PINCTRL_PIN(66, "GP66_SDIO_D0"),
+ PINCTRL_PIN(67, "GP67_SDIO_D1"),
+ PINCTRL_PIN(68, "GP68_SDIO_D2"),
+ PINCTRL_PIN(69, "GP69_SDIO_D3"),
+ PINCTRL_PIN(70, "GP70_SDIO_POWER_EN"),
+ PINCTRL_PIN(71, "GP71_MPHYPC"),
+ PINCTRL_PIN(72, "GP72_BATLOWB"),
+ PINCTRL_PIN(73, "GP73_SML1ALERTB_PCHHOTB_MGPIO8"),
+ PINCTRL_PIN(74, "GP74_SML1DATA_MGPIO12"),
+ PINCTRL_PIN(75, "GP75_SML1CLK_MGPIO11"),
+ PINCTRL_PIN(76, "GP76_BMBUSYB"),
+ PINCTRL_PIN(77, "GP77_PIRQAB"),
+ PINCTRL_PIN(78, "GP78_PIRQBB"),
+ PINCTRL_PIN(79, "GP79_PIRQCB"),
+ PINCTRL_PIN(80, "GP80_PIRQDB"),
+ PINCTRL_PIN(81, "GP81_SPKR"),
+ PINCTRL_PIN(82, "GP82_RCINB"),
+ PINCTRL_PIN(83, "GP83_GSPI0_CSB"),
+ PINCTRL_PIN(84, "GP84_GSPI0_CLK"),
+ PINCTRL_PIN(85, "GP85_GSPI0_MISO"),
+ PINCTRL_PIN(86, "GP86_GSPI0_MOSI"),
+ PINCTRL_PIN(87, "GP87_GSPI1_CSB"),
+ PINCTRL_PIN(88, "GP88_GSPI1_CLK"),
+ PINCTRL_PIN(89, "GP89_GSPI1_MISO"),
+ PINCTRL_PIN(90, "GP90_GSPI1_MOSI"),
+ PINCTRL_PIN(91, "GP91_UART0_RXD"),
+ PINCTRL_PIN(92, "GP92_UART0_TXD"),
+ PINCTRL_PIN(93, "GP93_UART0_RTSB"),
+ PINCTRL_PIN(94, "GP94_UART0_CTSB"),
+};
+
+static const struct intel_community lptlp_communities[] = {
+ COMMUNITY(0, 95),
+};
+
+static const struct intel_pinctrl_soc_data lptlp_soc_data = {
+ .pins = lptlp_pins,
+ .npins = ARRAY_SIZE(lptlp_pins),
+ .communities = lptlp_communities,
+ .ncommunities = ARRAY_SIZE(lptlp_communities),
+};
+
+/* LynxPoint chipset has support for 95 GPIO pins */
+
+#define LP_NUM_GPIO 95
+
+/* Bitmapped register offsets */
+#define LP_ACPI_OWNED 0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
+#define LP_IRQ2IOXAPIC 0x10 /* Bitmap, set by bios, 1: pin routed to IOxAPIC */
+#define LP_GC 0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
+#define LP_INT_STAT 0x80
+#define LP_INT_ENABLE 0x90
+
+/* Each pin has two 32 bit config registers, starting at 0x100 */
+#define LP_CONFIG1 0x100
+#define LP_CONFIG2 0x104
+
+/* LP_CONFIG1 reg bits */
+#define OUT_LVL_BIT BIT(31)
+#define IN_LVL_BIT BIT(30)
+#define TRIG_SEL_BIT BIT(4) /* 0: Edge, 1: Level */
+#define INT_INV_BIT BIT(3) /* Invert interrupt triggering */
+#define DIR_BIT BIT(2) /* 0: Output, 1: Input */
+#define USE_SEL_MASK GENMASK(1, 0) /* 0: Native, 1: GPIO, ... */
+#define USE_SEL_NATIVE (0 << 0)
+#define USE_SEL_GPIO (1 << 0)
+
+/* LP_CONFIG2 reg bits */
+#define GPINDIS_BIT BIT(2) /* disable input sensing */
+#define GPIWP_MASK GENMASK(1, 0) /* weak pull options */
+#define GPIWP_NONE 0 /* none */
+#define GPIWP_DOWN 1 /* weak pull down */
+#define GPIWP_UP 2 /* weak pull up */
+
+/*
+ * Lynxpoint gpios are controlled through both bitmapped registers and
+ * per gpio specific registers. The bitmapped registers are in chunks of
+ * 3 x 32bit registers to cover all 95 GPIOs
+ *
+ * per gpio specific registers consist of two 32bit registers per gpio
+ * (LP_CONFIG1 and LP_CONFIG2), with 95 GPIOs there's a total of
+ * 190 config registers.
+ *
+ * A simplified view of the register layout look like this:
+ *
+ * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31 (bitmapped registers)
+ * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
+ * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
+ * ...
+ * LP_INT_ENABLE[31:0] ...
+ * LP_INT_ENABLE[63:32] ...
+ * LP_INT_ENABLE[94:64] ...
+ * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
+ * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
+ * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
+ * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
+ * LP2_CONFIG1 (gpio 2) ...
+ * LP2_CONFIG2 (gpio 2) ...
+ * ...
+ * LP94_CONFIG1 (gpio 94) ...
+ * LP94_CONFIG2 (gpio 94) ...
+ *
+ * IOxAPIC redirection map applies only for gpio 8-10, 13-14, 45-55.
+ */
+
+static struct intel_community *lp_get_community(struct intel_pinctrl *lg,
+ unsigned int pin)
+{
+ struct intel_community *comm;
+ int i;
+
+ for (i = 0; i < lg->ncommunities; i++) {
+ comm = &lg->communities[i];
+ if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
+ return comm;
+ }
+
+ return NULL;
+}
+
+static void __iomem *lp_gpio_reg(struct gpio_chip *chip, unsigned int offset,
+ int reg)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ struct intel_community *comm;
+ int reg_offset;
+
+ comm = lp_get_community(lg, offset);
+ if (!comm)
+ return NULL;
+
+ offset -= comm->pin_base;
+
+ if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
+ /* per gpio specific config registers */
+ reg_offset = offset * 8;
+ else
+ /* bitmapped registers */
+ reg_offset = (offset / 32) * 4;
+
+ return comm->regs + reg_offset + reg;
+}
+
+static bool lp_gpio_acpi_use(struct intel_pinctrl *lg, unsigned int pin)
+{
+ void __iomem *acpi_use;
+
+ acpi_use = lp_gpio_reg(&lg->chip, pin, LP_ACPI_OWNED);
+ if (!acpi_use)
+ return true;
+
+ return !(ioread32(acpi_use) & BIT(pin % 32));
+}
+
+static bool lp_gpio_ioxapic_use(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *ioxapic_use = lp_gpio_reg(chip, offset, LP_IRQ2IOXAPIC);
+ u32 value;
+
+ value = ioread32(ioxapic_use);
+
+ if (offset >= 8 && offset <= 10)
+ return !!(value & BIT(offset - 8 + 0));
+ if (offset >= 13 && offset <= 14)
+ return !!(value & BIT(offset - 13 + 3));
+ if (offset >= 45 && offset <= 55)
+ return !!(value & BIT(offset - 45 + 5));
+
+ return false;
+}
+
+static int lp_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->ngroups;
+}
+
+static const char *lp_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->groups[selector].name;
+}
+
+static int lp_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = lg->soc->groups[selector].pins;
+ *num_pins = lg->soc->groups[selector].npins;
+
+ return 0;
+}
+
+static void lp_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ u32 value, mode;
+
+ value = ioread32(reg);
+
+ mode = value & USE_SEL_MASK;
+ if (mode == USE_SEL_GPIO)
+ seq_puts(s, "GPIO ");
+ else
+ seq_printf(s, "mode %d ", mode);
+
+ seq_printf(s, "0x%08x 0x%08x", value, ioread32(conf2));
+
+ if (lp_gpio_acpi_use(lg, pin))
+ seq_puts(s, " [ACPI]");
+}
+
+static const struct pinctrl_ops lptlp_pinctrl_ops = {
+ .get_groups_count = lp_get_groups_count,
+ .get_group_name = lp_get_group_name,
+ .get_group_pins = lp_get_group_pins,
+ .pin_dbg_show = lp_pin_dbg_show,
+};
+
+static int lp_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->nfunctions;
+}
+
+static const char *lp_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->functions[selector].name;
+}
+
+static int lp_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = lg->soc->functions[selector].groups;
+ *num_groups = lg->soc->functions[selector].ngroups;
+
+ return 0;
+}
+
+static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pingroup *grp = &lg->soc->groups[group];
+ unsigned long flags;
+ int i;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /* Now enable the mux setting for each pin in the group */
+ for (i = 0; i < grp->npins; i++) {
+ void __iomem *reg = lp_gpio_reg(&lg->chip, grp->pins[i], LP_CONFIG1);
+ u32 value;
+
+ value = ioread32(reg);
+
+ value &= ~USE_SEL_MASK;
+ if (grp->modes)
+ value |= grp->modes[i];
+ else
+ value |= grp->mode;
+
+ iowrite32(value, reg);
+ }
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ unsigned long flags;
+ u32 value;
+
+ pm_runtime_get(lg->dev);
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /*
+ * Reconfigure pin to GPIO mode if needed and issue a warning,
+ * since we expect firmware to configure it properly.
+ */
+ value = ioread32(reg);
+ if ((value & USE_SEL_MASK) != USE_SEL_GPIO) {
+ iowrite32((value & USE_SEL_MASK) | USE_SEL_GPIO, reg);
+ dev_warn(lg->dev, FW_BUG "pin %u forcibly reconfigured as GPIO\n", pin);
+ }
+
+ /* Enable input sensing */
+ iowrite32(ioread32(conf2) & ~GPINDIS_BIT, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static void lp_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /* Disable input sensing */
+ iowrite32(ioread32(conf2) | GPINDIS_BIT, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ pm_runtime_put(lg->dev);
+}
+
+static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin, bool input)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ value = ioread32(reg);
+ value &= ~DIR_BIT;
+ if (input) {
+ value |= DIR_BIT;
+ } else {
+ /*
+ * Before making any direction modifications, do a check if GPIO
+ * is set for direct IRQ. On Lynxpoint, setting GPIO to output
+ * does not make sense, so let's at least warn the caller before
+ * they shoot themselves in the foot.
+ */
+ WARN(lp_gpio_ioxapic_use(&lg->chip, pin),
+ "Potential Error: Setting GPIO to output with IOxAPIC redirection");
+ }
+ iowrite32(value, reg);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static const struct pinmux_ops lptlp_pinmux_ops = {
+ .get_functions_count = lp_get_functions_count,
+ .get_function_name = lp_get_function_name,
+ .get_function_groups = lp_get_function_groups,
+ .set_mux = lp_pinmux_set_mux,
+ .gpio_request_enable = lp_gpio_request_enable,
+ .gpio_disable_free = lp_gpio_disable_free,
+ .gpio_set_direction = lp_gpio_set_direction,
+};
+
+static int lp_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned long flags;
+ u32 value, pull;
+ u16 arg = 0;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ value = ioread32(conf2);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ pull = value & GPIWP_MASK;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (pull != GPIWP_DOWN)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (pull != GPIWP_UP)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ enum pin_config_param param;
+ unsigned long flags;
+ int i, ret = 0;
+ u32 value;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ value = ioread32(conf2);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ value &= ~GPIWP_MASK;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value &= ~GPIWP_MASK;
+ value |= GPIWP_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value &= ~GPIWP_MASK;
+ value |= GPIWP_UP;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ }
+
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ iowrite32(value, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return ret;
+}
+
+static const struct pinconf_ops lptlp_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = lp_pin_config_get,
+ .pin_config_set = lp_pin_config_set,
+};
+
+static const struct pinctrl_desc lptlp_pinctrl_desc = {
+ .pctlops = &lptlp_pinctrl_ops,
+ .pmxops = &lptlp_pinmux_ops,
+ .confops = &lptlp_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int lp_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ return !!(ioread32(reg) & IN_LVL_BIT);
+}
+
+static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ if (value)
+ iowrite32(ioread32(reg) | OUT_LVL_BIT, reg);
+ else
+ iowrite32(ioread32(reg) & ~OUT_LVL_BIT, reg);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int lp_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ lp_gpio_set(chip, offset, value);
+
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int lp_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+
+ if (ioread32(reg) & DIR_BIT)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static void lp_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ void __iomem *reg, *ena;
+ unsigned long pending;
+ u32 base, pin;
+
+ /* check from GPIO controller which pin triggered the interrupt */
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+ ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+
+ /* Only interrupts that are enabled */
+ pending = ioread32(reg) & ioread32(ena);
+
+ for_each_set_bit(pin, &pending, 32) {
+ unsigned int irq;
+
+ irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
+ generic_handle_irq(irq);
+ }
+ }
+ chip->irq_eoi(data);
+}
+
+static void lp_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_STAT);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_unmask(struct irq_data *d)
+{
+}
+
+static void lp_irq_mask(struct irq_data *d)
+{
+}
+
+static void lp_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
+ unsigned long flags;
+ u32 value;
+
+ if (hwirq >= lg->chip.ngpio)
+ return -EINVAL;
+
+ /* Fail if BIOS reserved pin for ACPI use */
+ if (lp_gpio_acpi_use(lg, hwirq)) {
+ dev_err(lg->dev, "pin %u can't be used as IRQ\n", hwirq);
+ return -EBUSY;
+ }
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ value = ioread32(reg);
+
+ /* set both TRIG_SEL and INV bits to 0 for rising edge */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
+
+ /* TRIG_SEL bit 0, INV bit 1 for falling edge */
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 0 for level low */
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 1 for level high */
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ value |= TRIG_SEL_BIT | INT_INV_BIT;
+
+ iowrite32(value, reg);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip lp_irqchip = {
+ .name = "LP-GPIO",
+ .irq_ack = lp_irq_ack,
+ .irq_mask = lp_irq_mask,
+ .irq_unmask = lp_irq_unmask,
+ .irq_enable = lp_irq_enable,
+ .irq_disable = lp_irq_disable,
+ .irq_set_type = lp_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ void __iomem *reg;
+ unsigned int base;
+
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ /* disable gpio pin interrupts */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+ iowrite32(0, reg);
+ /* Clear interrupt status register */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+ iowrite32(0xffffffff, reg);
+ }
+
+ return 0;
+}
+
+static int lp_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ struct device *dev = lg->dev;
+ int ret;
+
+ ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, lg->soc->npins);
+ if (ret)
+ dev_err(dev, "failed to add GPIO pin range\n");
+
+ return ret;
+}
+
+static int lp_gpio_probe(struct platform_device *pdev)
+{
+ const struct intel_pinctrl_soc_data *soc;
+ struct intel_pinctrl *lg;
+ struct gpio_chip *gc;
+ struct resource *io_rc, *irq_rc;
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ unsigned int i;
+ int ret;
+
+ soc = (const struct intel_pinctrl_soc_data *)device_get_match_data(dev);
+ if (!soc)
+ return -ENODEV;
+
+ lg = devm_kzalloc(dev, sizeof(*lg), GFP_KERNEL);
+ if (!lg)
+ return -ENOMEM;
+
+ lg->dev = dev;
+ lg->soc = soc;
+
+ lg->ncommunities = lg->soc->ncommunities;
+ lg->communities = devm_kcalloc(dev, lg->ncommunities,
+ sizeof(*lg->communities), GFP_KERNEL);
+ if (!lg->communities)
+ return -ENOMEM;
+
+ lg->pctldesc = lptlp_pinctrl_desc;
+ lg->pctldesc.name = dev_name(dev);
+ lg->pctldesc.pins = lg->soc->pins;
+ lg->pctldesc.npins = lg->soc->npins;
+
+ lg->pctldev = devm_pinctrl_register(dev, &lg->pctldesc, lg);
+ if (IS_ERR(lg->pctldev)) {
+ dev_err(dev, "failed to register pinctrl driver\n");
+ return PTR_ERR(lg->pctldev);
+ }
+
+ platform_set_drvdata(pdev, lg);
+
+ io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!io_rc) {
+ dev_err(dev, "missing IO resources\n");
+ return -EINVAL;
+ }
+
+ regs = devm_ioport_map(dev, io_rc->start, resource_size(io_rc));
+ if (!regs) {
+ dev_err(dev, "failed mapping IO region %pR\n", &io_rc);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < lg->soc->ncommunities; i++) {
+ struct intel_community *comm = &lg->communities[i];
+
+ *comm = lg->soc->communities[i];
+
+ comm->regs = regs;
+ comm->pad_regs = regs + 0x100;
+ }
+
+ raw_spin_lock_init(&lg->lock);
+
+ gc = &lg->chip;
+ gc->label = dev_name(dev);
+ gc->owner = THIS_MODULE;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->direction_input = lp_gpio_direction_input;
+ gc->direction_output = lp_gpio_direction_output;
+ gc->get = lp_gpio_get;
+ gc->set = lp_gpio_set;
+ gc->get_direction = lp_gpio_get_direction;
+ gc->base = -1;
+ gc->ngpio = LP_NUM_GPIO;
+ gc->can_sleep = false;
+ gc->add_pin_ranges = lp_gpio_add_pin_ranges;
+ gc->parent = dev;
+
+ /* set up interrupts */
+ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq_rc && irq_rc->start) {
+ struct gpio_irq_chip *girq;
+
+ girq = &gc->irq;
+ girq->chip = &lp_irqchip;
+ girq->init_hw = lp_gpio_irq_init_hw;
+ girq->parent_handler = lp_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = (unsigned int)irq_rc->start;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ }
+
+ ret = devm_gpiochip_add_data(dev, gc, lg);
+ if (ret) {
+ dev_err(dev, "failed adding lp-gpio chip\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int lp_gpio_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int lp_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lp_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int lp_gpio_resume(struct device *dev)
+{
+ struct intel_pinctrl *lg = dev_get_drvdata(dev);
+ void __iomem *reg;
+ int i;
+
+ /* on some hardware suspend clears input sensing, re-enable it here */
+ for (i = 0; i < lg->chip.ngpio; i++) {
+ if (gpiochip_is_requested(&lg->chip, i) != NULL) {
+ reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
+ iowrite32(ioread32(reg) & ~GPINDIS_BIT, reg);
+ }
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops lp_gpio_pm_ops = {
+ .runtime_suspend = lp_gpio_runtime_suspend,
+ .runtime_resume = lp_gpio_runtime_resume,
+ .resume = lp_gpio_resume,
+};
+
+static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
+ { "INT33C7", (kernel_ulong_t)&lptlp_soc_data },
+ { "INT3437", (kernel_ulong_t)&lptlp_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
+
+static struct platform_driver lp_gpio_driver = {
+ .probe = lp_gpio_probe,
+ .remove = lp_gpio_remove,
+ .driver = {
+ .name = "lp_gpio",
+ .pm = &lp_gpio_pm_ops,
+ .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
+ },
+};
+
+static int __init lp_gpio_init(void)
+{
+ return platform_driver_register(&lp_gpio_driver);
+}
+
+static void __exit lp_gpio_exit(void)
+{
+ platform_driver_unregister(&lp_gpio_driver);
+}
+
+subsys_initcall(lp_gpio_init);
+module_exit(lp_gpio_exit);
+
+MODULE_AUTHOR("Mathias Nyman (Intel)");
+MODULE_AUTHOR("Andy Shevchenko (Intel)");
+MODULE_DESCRIPTION("Intel Lynxpoint pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 44d7f50bbc82..330c8f077b73 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -49,6 +49,7 @@
.padown_offset = SPT_PAD_OWN, \
.padcfglock_offset = SPT_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
+ .is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
@@ -588,6 +589,7 @@ static const struct intel_pinctrl_soc_data spth_soc_data = {
static const struct acpi_device_id spt_pinctrl_acpi_match[] = {
{ "INT344B", (kernel_ulong_t)&sptlp_soc_data },
+ { "INT3451", (kernel_ulong_t)&spth_soc_data },
{ "INT345D", (kernel_ulong_t)&spth_soc_data },
{ }
};
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 58572b15b3ce..08a86f6fdea6 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -2,7 +2,7 @@
/*
* Intel Tiger Lake PCH pinctrl/GPIO driver
*
- * Copyright (C) 2019, Intel Corporation
+ * Copyright (C) 2019 - 2020, Intel Corporation
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
@@ -21,15 +21,19 @@
#define TGL_GPI_IS 0x100
#define TGL_GPI_IE 0x120
-#define TGL_GPP(r, s, e) \
+#define TGL_NO_GPIO -1
+
+#define TGL_GPP(r, s, e, g) \
{ \
.reg_num = (r), \
.base = (s), \
.size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
}
-#define TGL_COMMUNITY(s, e, g) \
+#define TGL_COMMUNITY(b, s, e, g) \
{ \
+ .barno = (b), \
.padown_offset = TGL_PAD_OWN, \
.padcfglock_offset = TGL_PADCFGLOCK, \
.hostown_offset = TGL_HOSTSW_OWN, \
@@ -42,7 +46,7 @@
}
/* Tiger Lake-LP */
-static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+static const struct pinctrl_pin_desc tgllp_pins[] = {
/* GPP_B */
PINCTRL_PIN(0, "CORE_VID_0"),
PINCTRL_PIN(1, "CORE_VID_1"),
@@ -113,324 +117,273 @@ static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
PINCTRL_PIN(64, "GPPC_A_22"),
PINCTRL_PIN(65, "I2S1_SCLK"),
PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
-};
-
-static const struct intel_padgroup tgllp_community0_gpps[] = {
- TGL_GPP(0, 0, 25), /* GPP_B */
- TGL_GPP(1, 26, 41), /* GPP_T */
- TGL_GPP(2, 42, 66), /* GPP_A */
-};
-
-static const struct intel_community tgllp_community0[] = {
- TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
- .uid = "0",
- .pins = tgllp_community0_pins,
- .npins = ARRAY_SIZE(tgllp_community0_pins),
- .communities = tgllp_community0,
- .ncommunities = ARRAY_SIZE(tgllp_community0),
-};
-
-static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
/* GPP_S */
- PINCTRL_PIN(0, "SNDW0_CLK"),
- PINCTRL_PIN(1, "SNDW0_DATA"),
- PINCTRL_PIN(2, "SNDW1_CLK"),
- PINCTRL_PIN(3, "SNDW1_DATA"),
- PINCTRL_PIN(4, "SNDW2_CLK"),
- PINCTRL_PIN(5, "SNDW2_DATA"),
- PINCTRL_PIN(6, "SNDW3_CLK"),
- PINCTRL_PIN(7, "SNDW3_DATA"),
+ PINCTRL_PIN(67, "SNDW0_CLK"),
+ PINCTRL_PIN(68, "SNDW0_DATA"),
+ PINCTRL_PIN(69, "SNDW1_CLK"),
+ PINCTRL_PIN(70, "SNDW1_DATA"),
+ PINCTRL_PIN(71, "SNDW2_CLK"),
+ PINCTRL_PIN(72, "SNDW2_DATA"),
+ PINCTRL_PIN(73, "SNDW3_CLK"),
+ PINCTRL_PIN(74, "SNDW3_DATA"),
/* GPP_H */
- PINCTRL_PIN(8, "GPPC_H_0"),
- PINCTRL_PIN(9, "GPPC_H_1"),
- PINCTRL_PIN(10, "GPPC_H_2"),
- PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
- PINCTRL_PIN(12, "I2C2_SDA"),
- PINCTRL_PIN(13, "I2C2_SCL"),
- PINCTRL_PIN(14, "I2C3_SDA"),
- PINCTRL_PIN(15, "I2C3_SCL"),
- PINCTRL_PIN(16, "I2C4_SDA"),
- PINCTRL_PIN(17, "I2C4_SCL"),
- PINCTRL_PIN(18, "SRCCLKREQB_4"),
- PINCTRL_PIN(19, "SRCCLKREQB_5"),
- PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
- PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
- PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
- PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
- PINCTRL_PIN(24, "DDPB_CTRLCLK"),
- PINCTRL_PIN(25, "DDPB_CTRLDATA"),
- PINCTRL_PIN(26, "CPU_C10_GATEB"),
- PINCTRL_PIN(27, "TIME_SYNC_0"),
- PINCTRL_PIN(28, "IMGCLKOUT_1"),
- PINCTRL_PIN(29, "IMGCLKOUT_2"),
- PINCTRL_PIN(30, "IMGCLKOUT_3"),
- PINCTRL_PIN(31, "IMGCLKOUT_4"),
+ PINCTRL_PIN(75, "GPPC_H_0"),
+ PINCTRL_PIN(76, "GPPC_H_1"),
+ PINCTRL_PIN(77, "GPPC_H_2"),
+ PINCTRL_PIN(78, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(79, "I2C2_SDA"),
+ PINCTRL_PIN(80, "I2C2_SCL"),
+ PINCTRL_PIN(81, "I2C3_SDA"),
+ PINCTRL_PIN(82, "I2C3_SCL"),
+ PINCTRL_PIN(83, "I2C4_SDA"),
+ PINCTRL_PIN(84, "I2C4_SCL"),
+ PINCTRL_PIN(85, "SRCCLKREQB_4"),
+ PINCTRL_PIN(86, "SRCCLKREQB_5"),
+ PINCTRL_PIN(87, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(88, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(89, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(90, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(91, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(92, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(93, "CPU_C10_GATEB"),
+ PINCTRL_PIN(94, "TIME_SYNC_0"),
+ PINCTRL_PIN(95, "IMGCLKOUT_1"),
+ PINCTRL_PIN(96, "IMGCLKOUT_2"),
+ PINCTRL_PIN(97, "IMGCLKOUT_3"),
+ PINCTRL_PIN(98, "IMGCLKOUT_4"),
/* GPP_D */
- PINCTRL_PIN(32, "ISH_GP_0"),
- PINCTRL_PIN(33, "ISH_GP_1"),
- PINCTRL_PIN(34, "ISH_GP_2"),
- PINCTRL_PIN(35, "ISH_GP_3"),
- PINCTRL_PIN(36, "IMGCLKOUT_0"),
- PINCTRL_PIN(37, "SRCCLKREQB_0"),
- PINCTRL_PIN(38, "SRCCLKREQB_1"),
- PINCTRL_PIN(39, "SRCCLKREQB_2"),
- PINCTRL_PIN(40, "SRCCLKREQB_3"),
- PINCTRL_PIN(41, "ISH_SPI_CSB"),
- PINCTRL_PIN(42, "ISH_SPI_CLK"),
- PINCTRL_PIN(43, "ISH_SPI_MISO"),
- PINCTRL_PIN(44, "ISH_SPI_MOSI"),
- PINCTRL_PIN(45, "ISH_UART0_RXD"),
- PINCTRL_PIN(46, "ISH_UART0_TXD"),
- PINCTRL_PIN(47, "ISH_UART0_RTSB"),
- PINCTRL_PIN(48, "ISH_UART0_CTSB"),
- PINCTRL_PIN(49, "ISH_GP_4"),
- PINCTRL_PIN(50, "ISH_GP_5"),
- PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
- PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
+ PINCTRL_PIN(99, "ISH_GP_0"),
+ PINCTRL_PIN(100, "ISH_GP_1"),
+ PINCTRL_PIN(101, "ISH_GP_2"),
+ PINCTRL_PIN(102, "ISH_GP_3"),
+ PINCTRL_PIN(103, "IMGCLKOUT_0"),
+ PINCTRL_PIN(104, "SRCCLKREQB_0"),
+ PINCTRL_PIN(105, "SRCCLKREQB_1"),
+ PINCTRL_PIN(106, "SRCCLKREQB_2"),
+ PINCTRL_PIN(107, "SRCCLKREQB_3"),
+ PINCTRL_PIN(108, "ISH_SPI_CSB"),
+ PINCTRL_PIN(109, "ISH_SPI_CLK"),
+ PINCTRL_PIN(110, "ISH_SPI_MISO"),
+ PINCTRL_PIN(111, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(112, "ISH_UART0_RXD"),
+ PINCTRL_PIN(113, "ISH_UART0_TXD"),
+ PINCTRL_PIN(114, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(115, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(116, "ISH_GP_4"),
+ PINCTRL_PIN(117, "ISH_GP_5"),
+ PINCTRL_PIN(118, "I2S_MCLK1_OUT"),
+ PINCTRL_PIN(119, "GSPI2_CLK_LOOPBK"),
/* GPP_U */
- PINCTRL_PIN(53, "UART3_RXD"),
- PINCTRL_PIN(54, "UART3_TXD"),
- PINCTRL_PIN(55, "UART3_RTSB"),
- PINCTRL_PIN(56, "UART3_CTSB"),
- PINCTRL_PIN(57, "GSPI3_CS0B"),
- PINCTRL_PIN(58, "GSPI3_CLK"),
- PINCTRL_PIN(59, "GSPI3_MISO"),
- PINCTRL_PIN(60, "GSPI3_MOSI"),
- PINCTRL_PIN(61, "GSPI4_CS0B"),
- PINCTRL_PIN(62, "GSPI4_CLK"),
- PINCTRL_PIN(63, "GSPI4_MISO"),
- PINCTRL_PIN(64, "GSPI4_MOSI"),
- PINCTRL_PIN(65, "GSPI5_CS0B"),
- PINCTRL_PIN(66, "GSPI5_CLK"),
- PINCTRL_PIN(67, "GSPI5_MISO"),
- PINCTRL_PIN(68, "GSPI5_MOSI"),
- PINCTRL_PIN(69, "GSPI6_CS0B"),
- PINCTRL_PIN(70, "GSPI6_CLK"),
- PINCTRL_PIN(71, "GSPI6_MISO"),
- PINCTRL_PIN(72, "GSPI6_MOSI"),
- PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
- PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
- PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
- PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
+ PINCTRL_PIN(120, "UART3_RXD"),
+ PINCTRL_PIN(121, "UART3_TXD"),
+ PINCTRL_PIN(122, "UART3_RTSB"),
+ PINCTRL_PIN(123, "UART3_CTSB"),
+ PINCTRL_PIN(124, "GSPI3_CS0B"),
+ PINCTRL_PIN(125, "GSPI3_CLK"),
+ PINCTRL_PIN(126, "GSPI3_MISO"),
+ PINCTRL_PIN(127, "GSPI3_MOSI"),
+ PINCTRL_PIN(128, "GSPI4_CS0B"),
+ PINCTRL_PIN(129, "GSPI4_CLK"),
+ PINCTRL_PIN(130, "GSPI4_MISO"),
+ PINCTRL_PIN(131, "GSPI4_MOSI"),
+ PINCTRL_PIN(132, "GSPI5_CS0B"),
+ PINCTRL_PIN(133, "GSPI5_CLK"),
+ PINCTRL_PIN(134, "GSPI5_MISO"),
+ PINCTRL_PIN(135, "GSPI5_MOSI"),
+ PINCTRL_PIN(136, "GSPI6_CS0B"),
+ PINCTRL_PIN(137, "GSPI6_CLK"),
+ PINCTRL_PIN(138, "GSPI6_MISO"),
+ PINCTRL_PIN(139, "GSPI6_MOSI"),
+ PINCTRL_PIN(140, "GSPI3_CLK_LOOPBK"),
+ PINCTRL_PIN(141, "GSPI4_CLK_LOOPBK"),
+ PINCTRL_PIN(142, "GSPI5_CLK_LOOPBK"),
+ PINCTRL_PIN(143, "GSPI6_CLK_LOOPBK"),
/* vGPIO */
- PINCTRL_PIN(77, "CNV_BTEN"),
- PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
- PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
- PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
- PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
- PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
- PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
- PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
- PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
- PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
- PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
- PINCTRL_PIN(88, "vUART0_TXD"),
- PINCTRL_PIN(89, "vUART0_RXD"),
- PINCTRL_PIN(90, "vUART0_CTS_B"),
- PINCTRL_PIN(91, "vUART0_RTS_B"),
- PINCTRL_PIN(92, "vISH_UART0_TXD"),
- PINCTRL_PIN(93, "vISH_UART0_RXD"),
- PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
- PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
- PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
- PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
- PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
- PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
- PINCTRL_PIN(100, "vI2S2_SCLK"),
- PINCTRL_PIN(101, "vI2S2_SFRM"),
- PINCTRL_PIN(102, "vI2S2_TXD"),
- PINCTRL_PIN(103, "vI2S2_RXD"),
-};
-
-static const struct intel_padgroup tgllp_community1_gpps[] = {
- TGL_GPP(0, 0, 7), /* GPP_S */
- TGL_GPP(1, 8, 31), /* GPP_H */
- TGL_GPP(2, 32, 52), /* GPP_D */
- TGL_GPP(3, 53, 76), /* GPP_U */
- TGL_GPP(4, 77, 103), /* vGPIO */
-};
-
-static const struct intel_community tgllp_community1[] = {
- TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
- .uid = "1",
- .pins = tgllp_community1_pins,
- .npins = ARRAY_SIZE(tgllp_community1_pins),
- .communities = tgllp_community1,
- .ncommunities = ARRAY_SIZE(tgllp_community1),
-};
-
-static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
+ PINCTRL_PIN(144, "CNV_BTEN"),
+ PINCTRL_PIN(145, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(146, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(147, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(148, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(149, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(150, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(151, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(152, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(153, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(154, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(155, "vUART0_TXD"),
+ PINCTRL_PIN(156, "vUART0_RXD"),
+ PINCTRL_PIN(157, "vUART0_CTS_B"),
+ PINCTRL_PIN(158, "vUART0_RTS_B"),
+ PINCTRL_PIN(159, "vISH_UART0_TXD"),
+ PINCTRL_PIN(160, "vISH_UART0_RXD"),
+ PINCTRL_PIN(161, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(162, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(163, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(164, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(165, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(166, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(167, "vI2S2_SCLK"),
+ PINCTRL_PIN(168, "vI2S2_SFRM"),
+ PINCTRL_PIN(169, "vI2S2_TXD"),
+ PINCTRL_PIN(170, "vI2S2_RXD"),
/* GPP_C */
- PINCTRL_PIN(0, "SMBCLK"),
- PINCTRL_PIN(1, "SMBDATA"),
- PINCTRL_PIN(2, "SMBALERTB"),
- PINCTRL_PIN(3, "SML0CLK"),
- PINCTRL_PIN(4, "SML0DATA"),
- PINCTRL_PIN(5, "SML0ALERTB"),
- PINCTRL_PIN(6, "SML1CLK"),
- PINCTRL_PIN(7, "SML1DATA"),
- PINCTRL_PIN(8, "UART0_RXD"),
- PINCTRL_PIN(9, "UART0_TXD"),
- PINCTRL_PIN(10, "UART0_RTSB"),
- PINCTRL_PIN(11, "UART0_CTSB"),
- PINCTRL_PIN(12, "UART1_RXD"),
- PINCTRL_PIN(13, "UART1_TXD"),
- PINCTRL_PIN(14, "UART1_RTSB"),
- PINCTRL_PIN(15, "UART1_CTSB"),
- PINCTRL_PIN(16, "I2C0_SDA"),
- PINCTRL_PIN(17, "I2C0_SCL"),
- PINCTRL_PIN(18, "I2C1_SDA"),
- PINCTRL_PIN(19, "I2C1_SCL"),
- PINCTRL_PIN(20, "UART2_RXD"),
- PINCTRL_PIN(21, "UART2_TXD"),
- PINCTRL_PIN(22, "UART2_RTSB"),
- PINCTRL_PIN(23, "UART2_CTSB"),
+ PINCTRL_PIN(171, "SMBCLK"),
+ PINCTRL_PIN(172, "SMBDATA"),
+ PINCTRL_PIN(173, "SMBALERTB"),
+ PINCTRL_PIN(174, "SML0CLK"),
+ PINCTRL_PIN(175, "SML0DATA"),
+ PINCTRL_PIN(176, "SML0ALERTB"),
+ PINCTRL_PIN(177, "SML1CLK"),
+ PINCTRL_PIN(178, "SML1DATA"),
+ PINCTRL_PIN(179, "UART0_RXD"),
+ PINCTRL_PIN(180, "UART0_TXD"),
+ PINCTRL_PIN(181, "UART0_RTSB"),
+ PINCTRL_PIN(182, "UART0_CTSB"),
+ PINCTRL_PIN(183, "UART1_RXD"),
+ PINCTRL_PIN(184, "UART1_TXD"),
+ PINCTRL_PIN(185, "UART1_RTSB"),
+ PINCTRL_PIN(186, "UART1_CTSB"),
+ PINCTRL_PIN(187, "I2C0_SDA"),
+ PINCTRL_PIN(188, "I2C0_SCL"),
+ PINCTRL_PIN(189, "I2C1_SDA"),
+ PINCTRL_PIN(190, "I2C1_SCL"),
+ PINCTRL_PIN(191, "UART2_RXD"),
+ PINCTRL_PIN(192, "UART2_TXD"),
+ PINCTRL_PIN(193, "UART2_RTSB"),
+ PINCTRL_PIN(194, "UART2_CTSB"),
/* GPP_F */
- PINCTRL_PIN(24, "CNV_BRI_DT"),
- PINCTRL_PIN(25, "CNV_BRI_RSP"),
- PINCTRL_PIN(26, "CNV_RGI_DT"),
- PINCTRL_PIN(27, "CNV_RGI_RSP"),
- PINCTRL_PIN(28, "CNV_RF_RESET_B"),
- PINCTRL_PIN(29, "GPPC_F_5"),
- PINCTRL_PIN(30, "CNV_PA_BLANKING"),
- PINCTRL_PIN(31, "GPPC_F_7"),
- PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
- PINCTRL_PIN(33, "BOOTMPC"),
- PINCTRL_PIN(34, "GPPC_F_10"),
- PINCTRL_PIN(35, "GPPC_F_11"),
- PINCTRL_PIN(36, "GSXDOUT"),
- PINCTRL_PIN(37, "GSXSLOAD"),
- PINCTRL_PIN(38, "GSXDIN"),
- PINCTRL_PIN(39, "GSXSRESETB"),
- PINCTRL_PIN(40, "GSXCLK"),
- PINCTRL_PIN(41, "GMII_MDC"),
- PINCTRL_PIN(42, "GMII_MDIO"),
- PINCTRL_PIN(43, "SRCCLKREQB_6"),
- PINCTRL_PIN(44, "EXT_PWR_GATEB"),
- PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
- PINCTRL_PIN(46, "VNN_CTRL"),
- PINCTRL_PIN(47, "V1P05_CTRL"),
- PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
+ PINCTRL_PIN(195, "CNV_BRI_DT"),
+ PINCTRL_PIN(196, "CNV_BRI_RSP"),
+ PINCTRL_PIN(197, "CNV_RGI_DT"),
+ PINCTRL_PIN(198, "CNV_RGI_RSP"),
+ PINCTRL_PIN(199, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(200, "GPPC_F_5"),
+ PINCTRL_PIN(201, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(202, "GPPC_F_7"),
+ PINCTRL_PIN(203, "I2S_MCLK2_INOUT"),
+ PINCTRL_PIN(204, "BOOTMPC"),
+ PINCTRL_PIN(205, "GPPC_F_10"),
+ PINCTRL_PIN(206, "GPPC_F_11"),
+ PINCTRL_PIN(207, "GSXDOUT"),
+ PINCTRL_PIN(208, "GSXSLOAD"),
+ PINCTRL_PIN(209, "GSXDIN"),
+ PINCTRL_PIN(210, "GSXSRESETB"),
+ PINCTRL_PIN(211, "GSXCLK"),
+ PINCTRL_PIN(212, "GMII_MDC"),
+ PINCTRL_PIN(213, "GMII_MDIO"),
+ PINCTRL_PIN(214, "SRCCLKREQB_6"),
+ PINCTRL_PIN(215, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(216, "EXT_PWR_GATE2B"),
+ PINCTRL_PIN(217, "VNN_CTRL"),
+ PINCTRL_PIN(218, "V1P05_CTRL"),
+ PINCTRL_PIN(219, "GPPF_CLK_LOOPBACK"),
/* HVCMOS */
- PINCTRL_PIN(49, "L_BKLTEN"),
- PINCTRL_PIN(50, "L_BKLTCTL"),
- PINCTRL_PIN(51, "L_VDDEN"),
- PINCTRL_PIN(52, "SYS_PWROK"),
- PINCTRL_PIN(53, "SYS_RESETB"),
- PINCTRL_PIN(54, "MLK_RSTB"),
+ PINCTRL_PIN(220, "L_BKLTEN"),
+ PINCTRL_PIN(221, "L_BKLTCTL"),
+ PINCTRL_PIN(222, "L_VDDEN"),
+ PINCTRL_PIN(223, "SYS_PWROK"),
+ PINCTRL_PIN(224, "SYS_RESETB"),
+ PINCTRL_PIN(225, "MLK_RSTB"),
/* GPP_E */
- PINCTRL_PIN(55, "SATAXPCIE_0"),
- PINCTRL_PIN(56, "SPI1_IO_2"),
- PINCTRL_PIN(57, "SPI1_IO_3"),
- PINCTRL_PIN(58, "CPU_GP_0"),
- PINCTRL_PIN(59, "SATA_DEVSLP_0"),
- PINCTRL_PIN(60, "SATA_DEVSLP_1"),
- PINCTRL_PIN(61, "GPPC_E_6"),
- PINCTRL_PIN(62, "CPU_GP_1"),
- PINCTRL_PIN(63, "SPI1_CS1B"),
- PINCTRL_PIN(64, "USB2_OCB_0"),
- PINCTRL_PIN(65, "SPI1_CSB"),
- PINCTRL_PIN(66, "SPI1_CLK"),
- PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
- PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
- PINCTRL_PIN(69, "DDSP_HPD_A"),
- PINCTRL_PIN(70, "ISH_GP_6"),
- PINCTRL_PIN(71, "ISH_GP_7"),
- PINCTRL_PIN(72, "GPPC_E_17"),
- PINCTRL_PIN(73, "DDP1_CTRLCLK"),
- PINCTRL_PIN(74, "DDP1_CTRLDATA"),
- PINCTRL_PIN(75, "DDP2_CTRLCLK"),
- PINCTRL_PIN(76, "DDP2_CTRLDATA"),
- PINCTRL_PIN(77, "DDPA_CTRLCLK"),
- PINCTRL_PIN(78, "DDPA_CTRLDATA"),
- PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(226, "SATAXPCIE_0"),
+ PINCTRL_PIN(227, "SPI1_IO_2"),
+ PINCTRL_PIN(228, "SPI1_IO_3"),
+ PINCTRL_PIN(229, "CPU_GP_0"),
+ PINCTRL_PIN(230, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(231, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(232, "GPPC_E_6"),
+ PINCTRL_PIN(233, "CPU_GP_1"),
+ PINCTRL_PIN(234, "SPI1_CS1B"),
+ PINCTRL_PIN(235, "USB2_OCB_0"),
+ PINCTRL_PIN(236, "SPI1_CSB"),
+ PINCTRL_PIN(237, "SPI1_CLK"),
+ PINCTRL_PIN(238, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(239, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(240, "DDSP_HPD_A"),
+ PINCTRL_PIN(241, "ISH_GP_6"),
+ PINCTRL_PIN(242, "ISH_GP_7"),
+ PINCTRL_PIN(243, "GPPC_E_17"),
+ PINCTRL_PIN(244, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(245, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(246, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(247, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(248, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(249, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(250, "SPI1_CLK_LOOPBK"),
/* JTAG */
- PINCTRL_PIN(80, "JTAG_TDO"),
- PINCTRL_PIN(81, "JTAGX"),
- PINCTRL_PIN(82, "PRDYB"),
- PINCTRL_PIN(83, "PREQB"),
- PINCTRL_PIN(84, "CPU_TRSTB"),
- PINCTRL_PIN(85, "JTAG_TDI"),
- PINCTRL_PIN(86, "JTAG_TMS"),
- PINCTRL_PIN(87, "JTAG_TCK"),
- PINCTRL_PIN(88, "DBG_PMODE"),
-};
-
-static const struct intel_padgroup tgllp_community4_gpps[] = {
- TGL_GPP(0, 0, 23), /* GPP_C */
- TGL_GPP(1, 24, 48), /* GPP_F */
- TGL_GPP(2, 49, 54), /* HVCMOS */
- TGL_GPP(3, 55, 79), /* GPP_E */
- TGL_GPP(4, 80, 88), /* JTAG */
+ PINCTRL_PIN(251, "JTAG_TDO"),
+ PINCTRL_PIN(252, "JTAGX"),
+ PINCTRL_PIN(253, "PRDYB"),
+ PINCTRL_PIN(254, "PREQB"),
+ PINCTRL_PIN(255, "CPU_TRSTB"),
+ PINCTRL_PIN(256, "JTAG_TDI"),
+ PINCTRL_PIN(257, "JTAG_TMS"),
+ PINCTRL_PIN(258, "JTAG_TCK"),
+ PINCTRL_PIN(259, "DBG_PMODE"),
+ /* GPP_R */
+ PINCTRL_PIN(260, "HDA_BCLK"),
+ PINCTRL_PIN(261, "HDA_SYNC"),
+ PINCTRL_PIN(262, "HDA_SDO"),
+ PINCTRL_PIN(263, "HDA_SDI_0"),
+ PINCTRL_PIN(264, "HDA_RSTB"),
+ PINCTRL_PIN(265, "HDA_SDI_1"),
+ PINCTRL_PIN(266, "GPP_R_6"),
+ PINCTRL_PIN(267, "GPP_R_7"),
+ /* SPI */
+ PINCTRL_PIN(268, "SPI0_IO_2"),
+ PINCTRL_PIN(269, "SPI0_IO_3"),
+ PINCTRL_PIN(270, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(271, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(272, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(273, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(274, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(275, "SPI0_CLK"),
+ PINCTRL_PIN(276, "SPI0_CLK_LOOPBK"),
};
-static const struct intel_community tgllp_community4[] = {
- TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
+static const struct intel_padgroup tgllp_community0_gpps[] = {
+ TGL_GPP(0, 0, 25, 0), /* GPP_B */
+ TGL_GPP(1, 26, 41, 32), /* GPP_T */
+ TGL_GPP(2, 42, 66, 64), /* GPP_A */
};
-static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
- .uid = "4",
- .pins = tgllp_community4_pins,
- .npins = ARRAY_SIZE(tgllp_community4_pins),
- .communities = tgllp_community4,
- .ncommunities = ARRAY_SIZE(tgllp_community4),
+static const struct intel_padgroup tgllp_community1_gpps[] = {
+ TGL_GPP(0, 67, 74, 96), /* GPP_S */
+ TGL_GPP(1, 75, 98, 128), /* GPP_H */
+ TGL_GPP(2, 99, 119, 160), /* GPP_D */
+ TGL_GPP(3, 120, 143, 192), /* GPP_U */
+ TGL_GPP(4, 144, 170, 224), /* vGPIO */
};
-static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
- /* GPP_R */
- PINCTRL_PIN(0, "HDA_BCLK"),
- PINCTRL_PIN(1, "HDA_SYNC"),
- PINCTRL_PIN(2, "HDA_SDO"),
- PINCTRL_PIN(3, "HDA_SDI_0"),
- PINCTRL_PIN(4, "HDA_RSTB"),
- PINCTRL_PIN(5, "HDA_SDI_1"),
- PINCTRL_PIN(6, "GPP_R_6"),
- PINCTRL_PIN(7, "GPP_R_7"),
- /* SPI */
- PINCTRL_PIN(8, "SPI0_IO_2"),
- PINCTRL_PIN(9, "SPI0_IO_3"),
- PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
- PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
- PINCTRL_PIN(12, "SPI0_TPM_CSB"),
- PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
- PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
- PINCTRL_PIN(15, "SPI0_CLK"),
- PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
+static const struct intel_padgroup tgllp_community4_gpps[] = {
+ TGL_GPP(0, 171, 194, 256), /* GPP_C */
+ TGL_GPP(1, 195, 219, 288), /* GPP_F */
+ TGL_GPP(2, 220, 225, TGL_NO_GPIO), /* HVCMOS */
+ TGL_GPP(3, 226, 250, 320), /* GPP_E */
+ TGL_GPP(4, 251, 259, TGL_NO_GPIO), /* JTAG */
};
static const struct intel_padgroup tgllp_community5_gpps[] = {
- TGL_GPP(0, 0, 7), /* GPP_R */
- TGL_GPP(1, 8, 16), /* SPI */
-};
-
-static const struct intel_community tgllp_community5[] = {
- TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
+ TGL_GPP(0, 260, 267, 352), /* GPP_R */
+ TGL_GPP(1, 268, 276, TGL_NO_GPIO), /* SPI */
};
-static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
- .uid = "5",
- .pins = tgllp_community5_pins,
- .npins = ARRAY_SIZE(tgllp_community5_pins),
- .communities = tgllp_community5,
- .ncommunities = ARRAY_SIZE(tgllp_community5),
+static const struct intel_community tgllp_communities[] = {
+ TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
+ TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
+ TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
+ TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
};
-static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
- &tgllp_community0_soc_data,
- &tgllp_community1_soc_data,
- &tgllp_community4_soc_data,
- &tgllp_community5_soc_data,
- NULL
+static const struct intel_pinctrl_soc_data tgllp_soc_data = {
+ .pins = tgllp_pins,
+ .npins = ARRAY_SIZE(tgllp_pins),
+ .communities = tgllp_communities,
+ .ncommunities = ARRAY_SIZE(tgllp_communities),
};
static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
- { "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
+ { "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
@@ -438,7 +391,7 @@ MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
static struct platform_driver tgl_pinctrl_driver = {
- .probe = intel_pinctrl_probe_by_uid,
+ .probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "tigerlake-pinctrl",
.acpi_match_table = tgl_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
index ba2356a8ab89..845c408b5fdb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 MediaTek Inc.
* Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
index 8ff88bf2e849..aa79d7ecee00 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
/*
* Copyright (c) 2017 Baylibre SAS.
* Author: Jerome Brunet <jbrunet@baylibre.com>
@@ -5,7 +6,6 @@
* Copyright (c) 2017 Amlogic, Inc. All rights reserved.
* Author: Xingyu Chen <xingyu.chen@amlogic.com>
*
- * SPDX-License-Identifier: (GPL-2.0+ or MIT)
*/
struct meson_pmx_bank {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 2d5339edd0b7..6cd4b3ec1b40 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -233,6 +233,8 @@ static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
static const unsigned int hdmi_cec_0_pins[] = { GPIOH_3 };
static const unsigned int eth_txd1_0_pins[] = { GPIOH_5 };
static const unsigned int eth_txd0_0_pins[] = { GPIOH_6 };
+static const unsigned int eth_rxd3_h_pins[] = { GPIOH_5 };
+static const unsigned int eth_rxd2_h_pins[] = { GPIOH_6 };
static const unsigned int clk_24m_out_pins[] = { GPIOH_9 };
static const unsigned int spi_ss1_pins[] = { GPIOH_0 };
@@ -535,6 +537,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
GROUP(spi_miso_1, 9, 12),
GROUP(spi_mosi_1, 9, 11),
GROUP(spi_sclk_1, 9, 10),
+ GROUP(eth_rxd3_h, 6, 15),
+ GROUP(eth_rxd2_h, 6, 14),
GROUP(eth_txd3, 6, 13),
GROUP(eth_txd2, 6, 12),
GROUP(eth_tx_clk, 6, 11),
@@ -746,7 +750,8 @@ static const char * const ethernet_groups[] = {
"eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
"eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
"eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
- "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2"
+ "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2",
+ "eth_rxd3_h", "eth_rxd2_h"
};
static const char * const i2c_a_groups[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index aa9dcde0f069..243fba254175 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -733,13 +732,20 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
ret = 0;
break;
}
- };
+ }
if (ret) {
dev_err(dev, "no gpio-controller child node\n");
return ret;
}
- nr_irq_parent = of_irq_count(np);
+ nr_irq_parent = platform_irq_count(pdev);
+ if (nr_irq_parent < 0) {
+ if (nr_irq_parent != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't determine irq count: %pe\n",
+ ERR_PTR(nr_irq_parent));
+ return nr_irq_parent;
+ }
+
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
@@ -776,7 +782,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
if (!girq->parents)
return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
- int irq = irq_of_parse_and_map(np, i);
+ int irq = platform_get_irq(pdev, i);
if (irq < 0)
continue;
@@ -800,7 +806,7 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
ret = 0;
break;
}
- };
+ }
if (ret)
return ret;
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 22077cbe6880..a935065cdac4 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -331,7 +331,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d)
return 0;
}
-static struct irq_chip npcmgpio_irqchip = {
+static const struct irq_chip npcmgpio_irqchip = {
.name = "NPCM7XX-GPIO-IRQ",
.irq_ack = npcmgpio_irq_ack,
.irq_unmask = npcmgpio_irq_unmask,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index eab078244a4c..73aff6591de2 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -866,7 +866,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ gpio_dev->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!gpio_dev->base)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index 986e04ac6b5b..d6c9f9dcff97 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -798,7 +798,7 @@ static int artpec6_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
enum pin_config_param param;
unsigned int arg;
unsigned int regval;
- unsigned int *reg;
+ void __iomem *reg;
int i;
/* Check for valid pin */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 369e04350e3d..96f04d121ebd 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3,7 +3,7 @@
* Ingenic SoCs pinctrl driver
*
* Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
- * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/compiler.h>
@@ -42,28 +42,36 @@
#define JZ4760_GPIO_FLAG 0x50
#define JZ4760_GPIO_PEN 0x70
-#define X1000_GPIO_PZ_BASE 0x700
-#define X1000_GPIO_PZ_GID2LD 0x7f0
+#define X1830_GPIO_PEL 0x110
+#define X1830_GPIO_PEH 0x120
#define REG_SET(x) ((x) + 0x4)
#define REG_CLEAR(x) ((x) + 0x8)
+#define REG_PZ_BASE(x) ((x) * 7)
+#define REG_PZ_GID2LD(x) ((x) * 7 + 0xf0)
+
+#define GPIO_PULL_DIS 0
+#define GPIO_PULL_UP 1
+#define GPIO_PULL_DOWN 2
+
#define PINS_PER_GPIO_CHIP 32
enum jz_version {
ID_JZ4740,
ID_JZ4725B,
ID_JZ4760,
- ID_JZ4760B,
ID_JZ4770,
ID_JZ4780,
ID_X1000,
- ID_X1000E,
ID_X1500,
+ ID_X1830,
};
struct ingenic_chip_info {
unsigned int num_chips;
+ unsigned int reg_offset;
+ enum jz_version version;
const struct group_desc *groups;
unsigned int num_groups;
@@ -79,7 +87,6 @@ struct ingenic_pinctrl {
struct regmap *map;
struct pinctrl_dev *pctl;
struct pinctrl_pin_desc *pdesc;
- enum jz_version version;
const struct ingenic_chip_info *info;
};
@@ -216,6 +223,8 @@ static const struct function_desc jz4740_functions[] = {
static const struct ingenic_chip_info jz4740_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_JZ4740,
.groups = jz4740_groups,
.num_groups = ARRAY_SIZE(jz4740_groups),
.functions = jz4740_functions,
@@ -339,6 +348,8 @@ static const struct function_desc jz4725b_functions[] = {
static const struct ingenic_chip_info jz4725b_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_JZ4725B,
.groups = jz4725b_groups,
.num_groups = ARRAY_SIZE(jz4725b_groups),
.functions = jz4725b_functions,
@@ -592,16 +603,8 @@ static const struct function_desc jz4760_functions[] = {
static const struct ingenic_chip_info jz4760_chip_info = {
.num_chips = 6,
- .groups = jz4760_groups,
- .num_groups = ARRAY_SIZE(jz4760_groups),
- .functions = jz4760_functions,
- .num_functions = ARRAY_SIZE(jz4760_functions),
- .pull_ups = jz4760_pull_ups,
- .pull_downs = jz4760_pull_downs,
-};
-
-static const struct ingenic_chip_info jz4760b_chip_info = {
- .num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4760,
.groups = jz4760_groups,
.num_groups = ARRAY_SIZE(jz4760_groups),
.functions = jz4760_functions,
@@ -880,6 +883,8 @@ static const struct function_desc jz4770_functions[] = {
static const struct ingenic_chip_info jz4770_chip_info = {
.num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4770,
.groups = jz4770_groups,
.num_groups = ARRAY_SIZE(jz4770_groups),
.functions = jz4770_functions,
@@ -1013,6 +1018,8 @@ static const struct function_desc jz4780_functions[] = {
static const struct ingenic_chip_info jz4780_chip_info = {
.num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4780,
.groups = jz4780_groups,
.num_groups = ARRAY_SIZE(jz4780_groups),
.functions = jz4780_functions,
@@ -1022,7 +1029,7 @@ static const struct ingenic_chip_info jz4780_chip_info = {
};
static const u32 x1000_pull_ups[4] = {
- 0xffffffff, 0x8dffffff, 0x7d3fffff, 0xffffffff,
+ 0xffffffff, 0xfdffffff, 0x0dffffff, 0x0000003f,
};
static const u32 x1000_pull_downs[4] = {
@@ -1033,28 +1040,45 @@ static int x1000_uart0_data_pins[] = { 0x4a, 0x4b, };
static int x1000_uart0_hwflow_pins[] = { 0x4c, 0x4d, };
static int x1000_uart1_data_a_pins[] = { 0x04, 0x05, };
static int x1000_uart1_data_d_pins[] = { 0x62, 0x63, };
-static int x1000_uart1_hwflow_d_pins[] = { 0x64, 0x65, };
+static int x1000_uart1_hwflow_pins[] = { 0x64, 0x65, };
static int x1000_uart2_data_a_pins[] = { 0x02, 0x03, };
static int x1000_uart2_data_d_pins[] = { 0x65, 0x64, };
+static int x1000_sfc_pins[] = { 0x1d, 0x1c, 0x1e, 0x1f, 0x1a, 0x1b, };
+static int x1000_ssi_dt_a_22_pins[] = { 0x16, };
+static int x1000_ssi_dt_a_29_pins[] = { 0x1d, };
+static int x1000_ssi_dt_d_pins[] = { 0x62, };
+static int x1000_ssi_dr_a_23_pins[] = { 0x17, };
+static int x1000_ssi_dr_a_28_pins[] = { 0x1c, };
+static int x1000_ssi_dr_d_pins[] = { 0x63, };
+static int x1000_ssi_clk_a_24_pins[] = { 0x18, };
+static int x1000_ssi_clk_a_26_pins[] = { 0x1a, };
+static int x1000_ssi_clk_d_pins[] = { 0x60, };
+static int x1000_ssi_gpc_a_20_pins[] = { 0x14, };
+static int x1000_ssi_gpc_a_31_pins[] = { 0x1f, };
+static int x1000_ssi_ce0_a_25_pins[] = { 0x19, };
+static int x1000_ssi_ce0_a_27_pins[] = { 0x1b, };
+static int x1000_ssi_ce0_d_pins[] = { 0x61, };
+static int x1000_ssi_ce1_a_21_pins[] = { 0x15, };
+static int x1000_ssi_ce1_a_30_pins[] = { 0x1e, };
static int x1000_mmc0_1bit_pins[] = { 0x18, 0x19, 0x17, };
static int x1000_mmc0_4bit_pins[] = { 0x16, 0x15, 0x14, };
static int x1000_mmc0_8bit_pins[] = { 0x13, 0x12, 0x11, 0x10, };
static int x1000_mmc1_1bit_pins[] = { 0x40, 0x41, 0x42, };
static int x1000_mmc1_4bit_pins[] = { 0x43, 0x44, 0x45, };
-static int x1000_nemc_8bit_data_pins[] = {
+static int x1000_emc_8bit_data_pins[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
};
-static int x1000_nemc_16bit_data_pins[] = {
+static int x1000_emc_16bit_data_pins[] = {
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
};
-static int x1000_nemc_addr_pins[] = {
+static int x1000_emc_addr_pins[] = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
};
-static int x1000_nemc_rd_we_pins[] = { 0x30, 0x31, };
-static int x1000_nemc_wait_pins[] = { 0x34, };
-static int x1000_nemc_cs1_pins[] = { 0x32, };
-static int x1000_nemc_cs2_pins[] = { 0x33, };
+static int x1000_emc_rd_we_pins[] = { 0x30, 0x31, };
+static int x1000_emc_wait_pins[] = { 0x34, };
+static int x1000_emc_cs1_pins[] = { 0x32, };
+static int x1000_emc_cs2_pins[] = { 0x33, };
static int x1000_i2c0_pins[] = { 0x38, 0x37, };
static int x1000_i2c1_a_pins[] = { 0x01, 0x00, };
static int x1000_i2c1_c_pins[] = { 0x5b, 0x5a, };
@@ -1083,23 +1107,40 @@ static int x1000_uart0_data_funcs[] = { 0, 0, };
static int x1000_uart0_hwflow_funcs[] = { 0, 0, };
static int x1000_uart1_data_a_funcs[] = { 2, 2, };
static int x1000_uart1_data_d_funcs[] = { 1, 1, };
-static int x1000_uart1_hwflow_d_funcs[] = { 1, 1, };
+static int x1000_uart1_hwflow_funcs[] = { 1, 1, };
static int x1000_uart2_data_a_funcs[] = { 2, 2, };
static int x1000_uart2_data_d_funcs[] = { 0, 0, };
+static int x1000_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int x1000_ssi_dt_a_22_funcs[] = { 2, };
+static int x1000_ssi_dt_a_29_funcs[] = { 2, };
+static int x1000_ssi_dt_d_funcs[] = { 0, };
+static int x1000_ssi_dr_a_23_funcs[] = { 2, };
+static int x1000_ssi_dr_a_28_funcs[] = { 2, };
+static int x1000_ssi_dr_d_funcs[] = { 0, };
+static int x1000_ssi_clk_a_24_funcs[] = { 2, };
+static int x1000_ssi_clk_a_26_funcs[] = { 2, };
+static int x1000_ssi_clk_d_funcs[] = { 0, };
+static int x1000_ssi_gpc_a_20_funcs[] = { 2, };
+static int x1000_ssi_gpc_a_31_funcs[] = { 2, };
+static int x1000_ssi_ce0_a_25_funcs[] = { 2, };
+static int x1000_ssi_ce0_a_27_funcs[] = { 2, };
+static int x1000_ssi_ce0_d_funcs[] = { 0, };
+static int x1000_ssi_ce1_a_21_funcs[] = { 2, };
+static int x1000_ssi_ce1_a_30_funcs[] = { 2, };
static int x1000_mmc0_1bit_funcs[] = { 1, 1, 1, };
static int x1000_mmc0_4bit_funcs[] = { 1, 1, 1, };
static int x1000_mmc0_8bit_funcs[] = { 1, 1, 1, 1, 1, };
static int x1000_mmc1_1bit_funcs[] = { 0, 0, 0, };
static int x1000_mmc1_4bit_funcs[] = { 0, 0, 0, };
-static int x1000_nemc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_nemc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_nemc_addr_funcs[] = {
+static int x1000_emc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int x1000_emc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int x1000_emc_addr_funcs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
-static int x1000_nemc_rd_we_funcs[] = { 0, 0, };
-static int x1000_nemc_wait_funcs[] = { 0, };
-static int x1000_nemc_cs1_funcs[] = { 0, };
-static int x1000_nemc_cs2_funcs[] = { 0, };
+static int x1000_emc_rd_we_funcs[] = { 0, 0, };
+static int x1000_emc_wait_funcs[] = { 0, };
+static int x1000_emc_cs1_funcs[] = { 0, };
+static int x1000_emc_cs2_funcs[] = { 0, };
static int x1000_i2c0_funcs[] = { 0, 0, };
static int x1000_i2c1_a_funcs[] = { 2, 2, };
static int x1000_i2c1_c_funcs[] = { 0, 0, };
@@ -1121,21 +1162,38 @@ static const struct group_desc x1000_groups[] = {
INGENIC_PIN_GROUP("uart0-hwflow", x1000_uart0_hwflow),
INGENIC_PIN_GROUP("uart1-data-a", x1000_uart1_data_a),
INGENIC_PIN_GROUP("uart1-data-d", x1000_uart1_data_d),
- INGENIC_PIN_GROUP("uart1-hwflow-d", x1000_uart1_hwflow_d),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1000_uart1_hwflow),
INGENIC_PIN_GROUP("uart2-data-a", x1000_uart2_data_a),
INGENIC_PIN_GROUP("uart2-data-d", x1000_uart2_data_d),
+ INGENIC_PIN_GROUP("sfc", x1000_sfc),
+ INGENIC_PIN_GROUP("ssi-dt-a-22", x1000_ssi_dt_a_22),
+ INGENIC_PIN_GROUP("ssi-dt-a-29", x1000_ssi_dt_a_29),
+ INGENIC_PIN_GROUP("ssi-dt-d", x1000_ssi_dt_d),
+ INGENIC_PIN_GROUP("ssi-dr-a-23", x1000_ssi_dr_a_23),
+ INGENIC_PIN_GROUP("ssi-dr-a-28", x1000_ssi_dr_a_28),
+ INGENIC_PIN_GROUP("ssi-dr-d", x1000_ssi_dr_d),
+ INGENIC_PIN_GROUP("ssi-clk-a-24", x1000_ssi_clk_a_24),
+ INGENIC_PIN_GROUP("ssi-clk-a-26", x1000_ssi_clk_a_26),
+ INGENIC_PIN_GROUP("ssi-clk-d", x1000_ssi_clk_d),
+ INGENIC_PIN_GROUP("ssi-gpc-a-20", x1000_ssi_gpc_a_20),
+ INGENIC_PIN_GROUP("ssi-gpc-a-31", x1000_ssi_gpc_a_31),
+ INGENIC_PIN_GROUP("ssi-ce0-a-25", x1000_ssi_ce0_a_25),
+ INGENIC_PIN_GROUP("ssi-ce0-a-27", x1000_ssi_ce0_a_27),
+ INGENIC_PIN_GROUP("ssi-ce0-d", x1000_ssi_ce0_d),
+ INGENIC_PIN_GROUP("ssi-ce1-a-21", x1000_ssi_ce1_a_21),
+ INGENIC_PIN_GROUP("ssi-ce1-a-30", x1000_ssi_ce1_a_30),
INGENIC_PIN_GROUP("mmc0-1bit", x1000_mmc0_1bit),
INGENIC_PIN_GROUP("mmc0-4bit", x1000_mmc0_4bit),
INGENIC_PIN_GROUP("mmc0-8bit", x1000_mmc0_8bit),
INGENIC_PIN_GROUP("mmc1-1bit", x1000_mmc1_1bit),
INGENIC_PIN_GROUP("mmc1-4bit", x1000_mmc1_4bit),
- INGENIC_PIN_GROUP("nemc-8bit-data", x1000_nemc_8bit_data),
- INGENIC_PIN_GROUP("nemc-16bit-data", x1000_nemc_16bit_data),
- INGENIC_PIN_GROUP("nemc-addr", x1000_nemc_addr),
- INGENIC_PIN_GROUP("nemc-rd-we", x1000_nemc_rd_we),
- INGENIC_PIN_GROUP("nemc-wait", x1000_nemc_wait),
- INGENIC_PIN_GROUP("nemc-cs1", x1000_nemc_cs1),
- INGENIC_PIN_GROUP("nemc-cs2", x1000_nemc_cs2),
+ INGENIC_PIN_GROUP("emc-8bit-data", x1000_emc_8bit_data),
+ INGENIC_PIN_GROUP("emc-16bit-data", x1000_emc_16bit_data),
+ INGENIC_PIN_GROUP("emc-addr", x1000_emc_addr),
+ INGENIC_PIN_GROUP("emc-rd-we", x1000_emc_rd_we),
+ INGENIC_PIN_GROUP("emc-wait", x1000_emc_wait),
+ INGENIC_PIN_GROUP("emc-cs1", x1000_emc_cs1),
+ INGENIC_PIN_GROUP("emc-cs2", x1000_emc_cs2),
INGENIC_PIN_GROUP("i2c0-data", x1000_i2c0),
INGENIC_PIN_GROUP("i2c1-data-a", x1000_i2c1_a),
INGENIC_PIN_GROUP("i2c1-data-c", x1000_i2c1_c),
@@ -1154,21 +1212,30 @@ static const struct group_desc x1000_groups[] = {
static const char *x1000_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *x1000_uart1_groups[] = {
- "uart1-data-a", "uart1-data-d", "uart1-hwflow-d",
+ "uart1-data-a", "uart1-data-d", "uart1-hwflow",
};
static const char *x1000_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
+static const char *x1000_sfc_groups[] = { "sfc", };
+static const char *x1000_ssi_groups[] = {
+ "ssi-dt-a-22", "ssi-dt-a-29", "ssi-dt-d",
+ "ssi-dr-a-23", "ssi-dr-a-28", "ssi-dr-d",
+ "ssi-clk-a-24", "ssi-clk-a-26", "ssi-clk-d",
+ "ssi-gpc-a-20", "ssi-gpc-a-31",
+ "ssi-ce0-a-25", "ssi-ce0-a-27", "ssi-ce0-d",
+ "ssi-ce1-a-21", "ssi-ce1-a-30",
+};
static const char *x1000_mmc0_groups[] = {
"mmc0-1bit", "mmc0-4bit", "mmc0-8bit",
};
static const char *x1000_mmc1_groups[] = {
- "mmc1-1bit-e", "mmc1-4bit-e",
+ "mmc1-1bit", "mmc1-4bit",
};
-static const char *x1000_nemc_groups[] = {
- "nemc-8bit-data", "nemc-16bit-data",
- "nemc-addr", "nemc-rd-we", "nemc-wait",
+static const char *x1000_emc_groups[] = {
+ "emc-8bit-data", "emc-16bit-data",
+ "emc-addr", "emc-rd-we", "emc-wait",
};
-static const char *x1000_cs1_groups[] = { "nemc-cs1", };
-static const char *x1000_cs2_groups[] = { "nemc-cs2", };
+static const char *x1000_cs1_groups[] = { "emc-cs1", };
+static const char *x1000_cs2_groups[] = { "emc-cs2", };
static const char *x1000_i2c0_groups[] = { "i2c0-data", };
static const char *x1000_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
static const char *x1000_i2c2_groups[] = { "i2c2-data", };
@@ -1187,11 +1254,13 @@ static const struct function_desc x1000_functions[] = {
{ "uart0", x1000_uart0_groups, ARRAY_SIZE(x1000_uart0_groups), },
{ "uart1", x1000_uart1_groups, ARRAY_SIZE(x1000_uart1_groups), },
{ "uart2", x1000_uart2_groups, ARRAY_SIZE(x1000_uart2_groups), },
+ { "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
+ { "ssi", x1000_ssi_groups, ARRAY_SIZE(x1000_ssi_groups), },
{ "mmc0", x1000_mmc0_groups, ARRAY_SIZE(x1000_mmc0_groups), },
{ "mmc1", x1000_mmc1_groups, ARRAY_SIZE(x1000_mmc1_groups), },
- { "nemc", x1000_nemc_groups, ARRAY_SIZE(x1000_nemc_groups), },
- { "nemc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
- { "nemc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
+ { "emc", x1000_emc_groups, ARRAY_SIZE(x1000_emc_groups), },
+ { "emc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
+ { "emc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
{ "i2c0", x1000_i2c0_groups, ARRAY_SIZE(x1000_i2c0_groups), },
{ "i2c1", x1000_i2c1_groups, ARRAY_SIZE(x1000_i2c1_groups), },
{ "i2c2", x1000_i2c2_groups, ARRAY_SIZE(x1000_i2c2_groups), },
@@ -1207,16 +1276,8 @@ static const struct function_desc x1000_functions[] = {
static const struct ingenic_chip_info x1000_chip_info = {
.num_chips = 4,
- .groups = x1000_groups,
- .num_groups = ARRAY_SIZE(x1000_groups),
- .functions = x1000_functions,
- .num_functions = ARRAY_SIZE(x1000_functions),
- .pull_ups = x1000_pull_ups,
- .pull_downs = x1000_pull_downs,
-};
-
-static const struct ingenic_chip_info x1000e_chip_info = {
- .num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_X1000,
.groups = x1000_groups,
.num_groups = ARRAY_SIZE(x1000_groups),
.functions = x1000_functions,
@@ -1229,11 +1290,11 @@ static int x1500_uart0_data_pins[] = { 0x4a, 0x4b, };
static int x1500_uart0_hwflow_pins[] = { 0x4c, 0x4d, };
static int x1500_uart1_data_a_pins[] = { 0x04, 0x05, };
static int x1500_uart1_data_d_pins[] = { 0x62, 0x63, };
-static int x1500_uart1_hwflow_d_pins[] = { 0x64, 0x65, };
+static int x1500_uart1_hwflow_pins[] = { 0x64, 0x65, };
static int x1500_uart2_data_a_pins[] = { 0x02, 0x03, };
static int x1500_uart2_data_d_pins[] = { 0x65, 0x64, };
-static int x1500_mmc0_1bit_pins[] = { 0x18, 0x19, 0x17, };
-static int x1500_mmc0_4bit_pins[] = { 0x16, 0x15, 0x14, };
+static int x1500_mmc_1bit_pins[] = { 0x18, 0x19, 0x17, };
+static int x1500_mmc_4bit_pins[] = { 0x16, 0x15, 0x14, };
static int x1500_i2c0_pins[] = { 0x38, 0x37, };
static int x1500_i2c1_a_pins[] = { 0x01, 0x00, };
static int x1500_i2c1_c_pins[] = { 0x5b, 0x5a, };
@@ -1252,11 +1313,11 @@ static int x1500_uart0_data_funcs[] = { 0, 0, };
static int x1500_uart0_hwflow_funcs[] = { 0, 0, };
static int x1500_uart1_data_a_funcs[] = { 2, 2, };
static int x1500_uart1_data_d_funcs[] = { 1, 1, };
-static int x1500_uart1_hwflow_d_funcs[] = { 1, 1, };
+static int x1500_uart1_hwflow_funcs[] = { 1, 1, };
static int x1500_uart2_data_a_funcs[] = { 2, 2, };
static int x1500_uart2_data_d_funcs[] = { 0, 0, };
-static int x1500_mmc0_1bit_funcs[] = { 1, 1, 1, };
-static int x1500_mmc0_4bit_funcs[] = { 1, 1, 1, };
+static int x1500_mmc_1bit_funcs[] = { 1, 1, 1, };
+static int x1500_mmc_4bit_funcs[] = { 1, 1, 1, };
static int x1500_i2c0_funcs[] = { 0, 0, };
static int x1500_i2c1_a_funcs[] = { 2, 2, };
static int x1500_i2c1_c_funcs[] = { 0, 0, };
@@ -1273,11 +1334,12 @@ static const struct group_desc x1500_groups[] = {
INGENIC_PIN_GROUP("uart0-hwflow", x1500_uart0_hwflow),
INGENIC_PIN_GROUP("uart1-data-a", x1500_uart1_data_a),
INGENIC_PIN_GROUP("uart1-data-d", x1500_uart1_data_d),
- INGENIC_PIN_GROUP("uart1-hwflow-d", x1500_uart1_hwflow_d),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1500_uart1_hwflow),
INGENIC_PIN_GROUP("uart2-data-a", x1500_uart2_data_a),
INGENIC_PIN_GROUP("uart2-data-d", x1500_uart2_data_d),
- INGENIC_PIN_GROUP("mmc0-1bit", x1500_mmc0_1bit),
- INGENIC_PIN_GROUP("mmc0-4bit", x1500_mmc0_4bit),
+ INGENIC_PIN_GROUP("sfc", x1000_sfc),
+ INGENIC_PIN_GROUP("mmc-1bit", x1500_mmc_1bit),
+ INGENIC_PIN_GROUP("mmc-4bit", x1500_mmc_4bit),
INGENIC_PIN_GROUP("i2c0-data", x1500_i2c0),
INGENIC_PIN_GROUP("i2c1-data-a", x1500_i2c1_a),
INGENIC_PIN_GROUP("i2c1-data-c", x1500_i2c1_c),
@@ -1293,10 +1355,10 @@ static const struct group_desc x1500_groups[] = {
static const char *x1500_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *x1500_uart1_groups[] = {
- "uart1-data-a", "uart1-data-d", "uart1-hwflow-d",
+ "uart1-data-a", "uart1-data-d", "uart1-hwflow",
};
static const char *x1500_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
-static const char *x1500_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *x1500_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
static const char *x1500_i2c0_groups[] = { "i2c0-data", };
static const char *x1500_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
static const char *x1500_i2c2_groups[] = { "i2c2-data", };
@@ -1312,7 +1374,8 @@ static const struct function_desc x1500_functions[] = {
{ "uart0", x1500_uart0_groups, ARRAY_SIZE(x1500_uart0_groups), },
{ "uart1", x1500_uart1_groups, ARRAY_SIZE(x1500_uart1_groups), },
{ "uart2", x1500_uart2_groups, ARRAY_SIZE(x1500_uart2_groups), },
- { "mmc0", x1500_mmc0_groups, ARRAY_SIZE(x1500_mmc0_groups), },
+ { "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
+ { "mmc", x1500_mmc_groups, ARRAY_SIZE(x1500_mmc_groups), },
{ "i2c0", x1500_i2c0_groups, ARRAY_SIZE(x1500_i2c0_groups), },
{ "i2c1", x1500_i2c1_groups, ARRAY_SIZE(x1500_i2c1_groups), },
{ "i2c2", x1500_i2c2_groups, ARRAY_SIZE(x1500_i2c2_groups), },
@@ -1327,6 +1390,8 @@ static const struct function_desc x1500_functions[] = {
static const struct ingenic_chip_info x1500_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_X1500,
.groups = x1500_groups,
.num_groups = ARRAY_SIZE(x1500_groups),
.functions = x1500_functions,
@@ -1335,6 +1400,222 @@ static const struct ingenic_chip_info x1500_chip_info = {
.pull_downs = x1000_pull_downs,
};
+static const u32 x1830_pull_ups[4] = {
+ 0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
+};
+
+static const u32 x1830_pull_downs[4] = {
+ 0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
+};
+
+static int x1830_uart0_data_pins[] = { 0x33, 0x36, };
+static int x1830_uart0_hwflow_pins[] = { 0x34, 0x35, };
+static int x1830_uart1_data_pins[] = { 0x38, 0x37, };
+static int x1830_sfc_pins[] = { 0x17, 0x18, 0x1a, 0x19, 0x1b, 0x1c, };
+static int x1830_ssi0_dt_pins[] = { 0x4c, };
+static int x1830_ssi0_dr_pins[] = { 0x4b, };
+static int x1830_ssi0_clk_pins[] = { 0x4f, };
+static int x1830_ssi0_gpc_pins[] = { 0x4d, };
+static int x1830_ssi0_ce0_pins[] = { 0x50, };
+static int x1830_ssi0_ce1_pins[] = { 0x4e, };
+static int x1830_ssi1_dt_c_pins[] = { 0x53, };
+static int x1830_ssi1_dr_c_pins[] = { 0x54, };
+static int x1830_ssi1_clk_c_pins[] = { 0x57, };
+static int x1830_ssi1_gpc_c_pins[] = { 0x55, };
+static int x1830_ssi1_ce0_c_pins[] = { 0x58, };
+static int x1830_ssi1_ce1_c_pins[] = { 0x56, };
+static int x1830_ssi1_dt_d_pins[] = { 0x62, };
+static int x1830_ssi1_dr_d_pins[] = { 0x63, };
+static int x1830_ssi1_clk_d_pins[] = { 0x66, };
+static int x1830_ssi1_gpc_d_pins[] = { 0x64, };
+static int x1830_ssi1_ce0_d_pins[] = { 0x67, };
+static int x1830_ssi1_ce1_d_pins[] = { 0x65, };
+static int x1830_mmc0_1bit_pins[] = { 0x24, 0x25, 0x20, };
+static int x1830_mmc0_4bit_pins[] = { 0x21, 0x22, 0x23, };
+static int x1830_mmc1_1bit_pins[] = { 0x42, 0x43, 0x44, };
+static int x1830_mmc1_4bit_pins[] = { 0x45, 0x46, 0x47, };
+static int x1830_i2c0_pins[] = { 0x0c, 0x0d, };
+static int x1830_i2c1_pins[] = { 0x39, 0x3a, };
+static int x1830_i2c2_pins[] = { 0x5b, 0x5c, };
+static int x1830_pwm_pwm0_b_pins[] = { 0x31, };
+static int x1830_pwm_pwm0_c_pins[] = { 0x4b, };
+static int x1830_pwm_pwm1_b_pins[] = { 0x32, };
+static int x1830_pwm_pwm1_c_pins[] = { 0x4c, };
+static int x1830_pwm_pwm2_c_8_pins[] = { 0x48, };
+static int x1830_pwm_pwm2_c_13_pins[] = { 0x4d, };
+static int x1830_pwm_pwm3_c_9_pins[] = { 0x49, };
+static int x1830_pwm_pwm3_c_14_pins[] = { 0x4e, };
+static int x1830_pwm_pwm4_c_15_pins[] = { 0x4f, };
+static int x1830_pwm_pwm4_c_25_pins[] = { 0x59, };
+static int x1830_pwm_pwm5_c_16_pins[] = { 0x50, };
+static int x1830_pwm_pwm5_c_26_pins[] = { 0x5a, };
+static int x1830_pwm_pwm6_c_17_pins[] = { 0x51, };
+static int x1830_pwm_pwm6_c_27_pins[] = { 0x5b, };
+static int x1830_pwm_pwm7_c_18_pins[] = { 0x52, };
+static int x1830_pwm_pwm7_c_28_pins[] = { 0x5c, };
+static int x1830_mac_pins[] = {
+ 0x29, 0x30, 0x2f, 0x28, 0x2e, 0x2d, 0x2a, 0x2b, 0x26, 0x27,
+};
+
+static int x1830_uart0_data_funcs[] = { 0, 0, };
+static int x1830_uart0_hwflow_funcs[] = { 0, 0, };
+static int x1830_uart1_data_funcs[] = { 0, 0, };
+static int x1830_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int x1830_ssi0_dt_funcs[] = { 0, };
+static int x1830_ssi0_dr_funcs[] = { 0, };
+static int x1830_ssi0_clk_funcs[] = { 0, };
+static int x1830_ssi0_gpc_funcs[] = { 0, };
+static int x1830_ssi0_ce0_funcs[] = { 0, };
+static int x1830_ssi0_ce1_funcs[] = { 0, };
+static int x1830_ssi1_dt_c_funcs[] = { 1, };
+static int x1830_ssi1_dr_c_funcs[] = { 1, };
+static int x1830_ssi1_clk_c_funcs[] = { 1, };
+static int x1830_ssi1_gpc_c_funcs[] = { 1, };
+static int x1830_ssi1_ce0_c_funcs[] = { 1, };
+static int x1830_ssi1_ce1_c_funcs[] = { 1, };
+static int x1830_ssi1_dt_d_funcs[] = { 2, };
+static int x1830_ssi1_dr_d_funcs[] = { 2, };
+static int x1830_ssi1_clk_d_funcs[] = { 2, };
+static int x1830_ssi1_gpc_d_funcs[] = { 2, };
+static int x1830_ssi1_ce0_d_funcs[] = { 2, };
+static int x1830_ssi1_ce1_d_funcs[] = { 2, };
+static int x1830_mmc0_1bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc0_4bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc1_1bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc1_4bit_funcs[] = { 0, 0, 0, };
+static int x1830_i2c0_funcs[] = { 1, 1, };
+static int x1830_i2c1_funcs[] = { 0, 0, };
+static int x1830_i2c2_funcs[] = { 1, 1, };
+static int x1830_pwm_pwm0_b_funcs[] = { 0, };
+static int x1830_pwm_pwm0_c_funcs[] = { 1, };
+static int x1830_pwm_pwm1_b_funcs[] = { 0, };
+static int x1830_pwm_pwm1_c_funcs[] = { 1, };
+static int x1830_pwm_pwm2_c_8_funcs[] = { 0, };
+static int x1830_pwm_pwm2_c_13_funcs[] = { 1, };
+static int x1830_pwm_pwm3_c_9_funcs[] = { 0, };
+static int x1830_pwm_pwm3_c_14_funcs[] = { 1, };
+static int x1830_pwm_pwm4_c_15_funcs[] = { 1, };
+static int x1830_pwm_pwm4_c_25_funcs[] = { 0, };
+static int x1830_pwm_pwm5_c_16_funcs[] = { 1, };
+static int x1830_pwm_pwm5_c_26_funcs[] = { 0, };
+static int x1830_pwm_pwm6_c_17_funcs[] = { 1, };
+static int x1830_pwm_pwm6_c_27_funcs[] = { 0, };
+static int x1830_pwm_pwm7_c_18_funcs[] = { 1, };
+static int x1830_pwm_pwm7_c_28_funcs[] = { 0, };
+static int x1830_mac_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+
+static const struct group_desc x1830_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", x1830_uart0_data),
+ INGENIC_PIN_GROUP("uart0-hwflow", x1830_uart0_hwflow),
+ INGENIC_PIN_GROUP("uart1-data", x1830_uart1_data),
+ INGENIC_PIN_GROUP("sfc", x1830_sfc),
+ INGENIC_PIN_GROUP("ssi0-dt", x1830_ssi0_dt),
+ INGENIC_PIN_GROUP("ssi0-dr", x1830_ssi0_dr),
+ INGENIC_PIN_GROUP("ssi0-clk", x1830_ssi0_clk),
+ INGENIC_PIN_GROUP("ssi0-gpc", x1830_ssi0_gpc),
+ INGENIC_PIN_GROUP("ssi0-ce0", x1830_ssi0_ce0),
+ INGENIC_PIN_GROUP("ssi0-ce1", x1830_ssi0_ce1),
+ INGENIC_PIN_GROUP("ssi1-dt-c", x1830_ssi1_dt_c),
+ INGENIC_PIN_GROUP("ssi1-dr-c", x1830_ssi1_dr_c),
+ INGENIC_PIN_GROUP("ssi1-clk-c", x1830_ssi1_clk_c),
+ INGENIC_PIN_GROUP("ssi1-gpc-c", x1830_ssi1_gpc_c),
+ INGENIC_PIN_GROUP("ssi1-ce0-c", x1830_ssi1_ce0_c),
+ INGENIC_PIN_GROUP("ssi1-ce1-c", x1830_ssi1_ce1_c),
+ INGENIC_PIN_GROUP("ssi1-dt-d", x1830_ssi1_dt_d),
+ INGENIC_PIN_GROUP("ssi1-dr-d", x1830_ssi1_dr_d),
+ INGENIC_PIN_GROUP("ssi1-clk-d", x1830_ssi1_clk_d),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", x1830_ssi1_gpc_d),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", x1830_ssi1_ce0_d),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", x1830_ssi1_ce1_d),
+ INGENIC_PIN_GROUP("mmc0-1bit", x1830_mmc0_1bit),
+ INGENIC_PIN_GROUP("mmc0-4bit", x1830_mmc0_4bit),
+ INGENIC_PIN_GROUP("mmc1-1bit", x1830_mmc1_1bit),
+ INGENIC_PIN_GROUP("mmc1-4bit", x1830_mmc1_4bit),
+ INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0),
+ INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1),
+ INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2),
+ INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b),
+ INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c),
+ INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b),
+ INGENIC_PIN_GROUP("pwm1-c", x1830_pwm_pwm1_c),
+ INGENIC_PIN_GROUP("pwm2-c-8", x1830_pwm_pwm2_c_8),
+ INGENIC_PIN_GROUP("pwm2-c-13", x1830_pwm_pwm2_c_13),
+ INGENIC_PIN_GROUP("pwm3-c-9", x1830_pwm_pwm3_c_9),
+ INGENIC_PIN_GROUP("pwm3-c-14", x1830_pwm_pwm3_c_14),
+ INGENIC_PIN_GROUP("pwm4-c-15", x1830_pwm_pwm4_c_15),
+ INGENIC_PIN_GROUP("pwm4-c-25", x1830_pwm_pwm4_c_25),
+ INGENIC_PIN_GROUP("pwm5-c-16", x1830_pwm_pwm5_c_16),
+ INGENIC_PIN_GROUP("pwm5-c-26", x1830_pwm_pwm5_c_26),
+ INGENIC_PIN_GROUP("pwm6-c-17", x1830_pwm_pwm6_c_17),
+ INGENIC_PIN_GROUP("pwm6-c-27", x1830_pwm_pwm6_c_27),
+ INGENIC_PIN_GROUP("pwm7-c-18", x1830_pwm_pwm7_c_18),
+ INGENIC_PIN_GROUP("pwm7-c-28", x1830_pwm_pwm7_c_28),
+ INGENIC_PIN_GROUP("mac", x1830_mac),
+};
+
+static const char *x1830_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *x1830_uart1_groups[] = { "uart1-data", };
+static const char *x1830_sfc_groups[] = { "sfc", };
+static const char *x1830_ssi0_groups[] = {
+ "ssi0-dt", "ssi0-dr", "ssi0-clk", "ssi0-gpc", "ssi0-ce0", "ssi0-ce1",
+};
+static const char *x1830_ssi1_groups[] = {
+ "ssi1-dt-c", "ssi1-dt-d",
+ "ssi1-dr-c", "ssi1-dr-d",
+ "ssi1-clk-c", "ssi1-clk-d",
+ "ssi1-gpc-c", "ssi1-gpc-d",
+ "ssi1-ce0-c", "ssi1-ce0-d",
+ "ssi1-ce1-c", "ssi1-ce1-d",
+};
+static const char *x1830_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *x1830_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
+static const char *x1830_i2c0_groups[] = { "i2c0-data", };
+static const char *x1830_i2c1_groups[] = { "i2c1-data", };
+static const char *x1830_i2c2_groups[] = { "i2c2-data", };
+static const char *x1830_pwm0_groups[] = { "pwm0-b", "pwm0-c", };
+static const char *x1830_pwm1_groups[] = { "pwm1-b", "pwm1-c", };
+static const char *x1830_pwm2_groups[] = { "pwm2-c-8", "pwm2-c-13", };
+static const char *x1830_pwm3_groups[] = { "pwm3-c-9", "pwm3-c-14", };
+static const char *x1830_pwm4_groups[] = { "pwm4-c-15", "pwm4-c-25", };
+static const char *x1830_pwm5_groups[] = { "pwm5-c-16", "pwm5-c-26", };
+static const char *x1830_pwm6_groups[] = { "pwm6-c-17", "pwm6-c-27", };
+static const char *x1830_pwm7_groups[] = { "pwm7-c-18", "pwm7-c-28", };
+static const char *x1830_mac_groups[] = { "mac", };
+
+static const struct function_desc x1830_functions[] = {
+ { "uart0", x1830_uart0_groups, ARRAY_SIZE(x1830_uart0_groups), },
+ { "uart1", x1830_uart1_groups, ARRAY_SIZE(x1830_uart1_groups), },
+ { "sfc", x1830_sfc_groups, ARRAY_SIZE(x1830_sfc_groups), },
+ { "ssi0", x1830_ssi0_groups, ARRAY_SIZE(x1830_ssi0_groups), },
+ { "ssi1", x1830_ssi1_groups, ARRAY_SIZE(x1830_ssi1_groups), },
+ { "mmc0", x1830_mmc0_groups, ARRAY_SIZE(x1830_mmc0_groups), },
+ { "mmc1", x1830_mmc1_groups, ARRAY_SIZE(x1830_mmc1_groups), },
+ { "i2c0", x1830_i2c0_groups, ARRAY_SIZE(x1830_i2c0_groups), },
+ { "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
+ { "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
+ { "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
+ { "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
+ { "pwm2", x1830_pwm2_groups, ARRAY_SIZE(x1830_pwm2_groups), },
+ { "pwm3", x1830_pwm3_groups, ARRAY_SIZE(x1830_pwm3_groups), },
+ { "pwm4", x1830_pwm4_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm5", x1830_pwm5_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm6", x1830_pwm6_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm7", x1830_pwm7_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "mac", x1830_mac_groups, ARRAY_SIZE(x1830_mac_groups), },
+};
+
+static const struct ingenic_chip_info x1830_chip_info = {
+ .num_chips = 4,
+ .reg_offset = 0x1000,
+ .version = ID_X1830,
+ .groups = x1830_groups,
+ .num_groups = ARRAY_SIZE(x1830_groups),
+ .functions = x1830_functions,
+ .num_functions = ARRAY_SIZE(x1830_functions),
+ .pull_ups = x1830_pull_ups,
+ .pull_downs = x1830_pull_downs,
+};
+
static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
{
unsigned int val;
@@ -1363,12 +1644,14 @@ static void ingenic_gpio_shadow_set_bit(struct ingenic_gpio_chip *jzgc,
else
reg = REG_CLEAR(reg);
- regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_BASE + reg, BIT(offset));
+ regmap_write(jzgc->jzpc->map, REG_PZ_BASE(
+ jzgc->jzpc->info->reg_offset) + reg, BIT(offset));
}
static void ingenic_gpio_shadow_set_bit_load(struct ingenic_gpio_chip *jzgc)
{
- regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_GID2LD,
+ regmap_write(jzgc->jzpc->map, REG_PZ_GID2LD(
+ jzgc->jzpc->info->reg_offset),
jzgc->gc.base / PINS_PER_GPIO_CHIP);
}
@@ -1383,7 +1666,7 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
@@ -1393,58 +1676,42 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
u8 offset, unsigned int type)
{
u8 reg1, reg2;
-
- if (jzgc->jzpc->version >= ID_JZ4760) {
- reg1 = JZ4760_GPIO_PAT1;
- reg2 = JZ4760_GPIO_PAT0;
- } else {
- reg1 = JZ4740_GPIO_TRIG;
- reg2 = JZ4740_GPIO_DIR;
- }
+ bool val1, val2;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, true);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_set_bit(jzgc, reg1, offset, true);
- }
+ val1 = val2 = true;
break;
case IRQ_TYPE_EDGE_FALLING:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, true);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_set_bit(jzgc, reg1, offset, true);
- }
+ val1 = false;
+ val2 = true;
break;
case IRQ_TYPE_LEVEL_HIGH:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, false);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_set_bit(jzgc, reg1, offset, false);
- }
+ val1 = true;
+ val2 = false;
break;
case IRQ_TYPE_LEVEL_LOW:
default:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, false);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_set_bit(jzgc, reg1, offset, false);
- }
+ val1 = val2 = false;
break;
}
+
+ if (jzgc->jzpc->info->version >= ID_JZ4760) {
+ reg1 = JZ4760_GPIO_PAT1;
+ reg2 = JZ4760_GPIO_PAT0;
+ } else {
+ reg1 = JZ4740_GPIO_TRIG;
+ reg2 = JZ4740_GPIO_DIR;
+ }
+
+ if (jzgc->jzpc->info->version >= ID_X1000) {
+ ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
+ ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
+ ingenic_gpio_shadow_set_bit_load(jzgc);
+ } else {
+ ingenic_gpio_set_bit(jzgc, reg2, offset, val1);
+ ingenic_gpio_set_bit(jzgc, reg1, offset, val2);
+ }
}
static void ingenic_gpio_irq_mask(struct irq_data *irqd)
@@ -1469,7 +1736,7 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
@@ -1485,7 +1752,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
@@ -1510,7 +1777,7 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
}
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
@@ -1567,7 +1834,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
@@ -1611,7 +1878,7 @@ static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- regmap_write(jzpc->map, offt * 0x100 +
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
(set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
}
@@ -1620,14 +1887,15 @@ static inline void ingenic_shadow_config_pin(struct ingenic_pinctrl *jzpc,
{
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
- regmap_write(jzpc->map, X1000_GPIO_PZ_BASE +
+ regmap_write(jzpc->map, REG_PZ_BASE(jzpc->info->reg_offset) +
(set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
}
static inline void ingenic_shadow_config_pin_load(struct ingenic_pinctrl *jzpc,
unsigned int pin)
{
- regmap_write(jzpc->map, X1000_GPIO_PZ_GID2LD, pin / PINS_PER_GPIO_CHIP);
+ regmap_write(jzpc->map, REG_PZ_GID2LD(jzpc->info->reg_offset),
+ pin / PINS_PER_GPIO_CHIP);
}
static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
@@ -1637,7 +1905,7 @@ static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
unsigned int val;
- regmap_read(jzpc->map, offt * 0x100 + reg, &val);
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset + reg, &val);
return val & BIT(idx);
}
@@ -1648,7 +1916,7 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->version >= ID_JZ4760)
+ if (jzpc->info->version >= ID_JZ4760)
return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
@@ -1674,13 +1942,13 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
dev_dbg(jzpc->dev, "set pin P%c%u to function %u\n",
'A' + offt, idx, func);
- if (jzpc->version >= ID_X1000) {
+ if (jzpc->info->version >= ID_X1000) {
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->version >= ID_JZ4760) {
+ } else if (jzpc->info->version >= ID_JZ4760) {
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
@@ -1733,12 +2001,12 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "set pin P%c%u to %sput\n",
'A' + offt, idx, input ? "in" : "out");
- if (jzpc->version >= ID_X1000) {
+ if (jzpc->info->version >= ID_X1000) {
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->version >= ID_JZ4760) {
+ } else if (jzpc->info->version >= ID_JZ4760) {
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
@@ -1768,7 +2036,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
bool pull;
- if (jzpc->version >= ID_JZ4760)
+ if (jzpc->info->version >= ID_JZ4760)
pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
else
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -1798,18 +2066,37 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
}
static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
- unsigned int pin, bool enabled)
+ unsigned int pin, unsigned int bias)
{
- if (jzpc->version >= ID_JZ4760)
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !enabled);
- else
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
+ if (jzpc->info->version >= ID_X1830) {
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int half = PINS_PER_GPIO_CHIP / 2;
+ unsigned int idxh = pin % half * 2;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+
+ if (idx < half) {
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_CLEAR(X1830_GPIO_PEL), 3 << idxh);
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_SET(X1830_GPIO_PEL), bias << idxh);
+ } else {
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_CLEAR(X1830_GPIO_PEH), 3 << idxh);
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_SET(X1830_GPIO_PEH), bias << idxh);
+ }
+
+ } else if (jzpc->info->version >= ID_JZ4760) {
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
+ }
}
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->version >= ID_JZ4760)
+ if (jzpc->info->version >= ID_JZ4760)
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
else
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
@@ -1843,7 +2130,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, false);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_DIS);
break;
case PIN_CONFIG_BIAS_PULL_UP:
@@ -1851,7 +2138,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
dev_dbg(jzpc->dev, "set pull-up for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, true);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_UP);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
@@ -1859,7 +2146,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
dev_dbg(jzpc->dev, "set pull-down for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, true);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_DOWN);
break;
case PIN_CONFIG_OUTPUT:
@@ -1939,25 +2226,13 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
.reg_stride = 4,
};
-static const struct of_device_id ingenic_pinctrl_of_match[] = {
- { .compatible = "ingenic,jz4740-pinctrl", .data = (void *) ID_JZ4740 },
- { .compatible = "ingenic,jz4725b-pinctrl", .data = (void *)ID_JZ4725B },
- { .compatible = "ingenic,jz4760-pinctrl", .data = (void *) ID_JZ4760 },
- { .compatible = "ingenic,jz4760b-pinctrl", .data = (void *) ID_JZ4760B },
- { .compatible = "ingenic,jz4770-pinctrl", .data = (void *) ID_JZ4770 },
- { .compatible = "ingenic,jz4780-pinctrl", .data = (void *) ID_JZ4780 },
- { .compatible = "ingenic,x1000-pinctrl", .data = (void *) ID_X1000 },
- { .compatible = "ingenic,x1000e-pinctrl", .data = (void *) ID_X1000E },
- { .compatible = "ingenic,x1500-pinctrl", .data = (void *) ID_X1500 },
- {},
-};
-
static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
{ .compatible = "ingenic,jz4740-gpio", },
{ .compatible = "ingenic,jz4760-gpio", },
{ .compatible = "ingenic,jz4770-gpio", },
{ .compatible = "ingenic,jz4780-gpio", },
{ .compatible = "ingenic,x1000-gpio", },
+ { .compatible = "ingenic,x1830-gpio", },
{},
};
@@ -1981,7 +2256,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
return -ENOMEM;
jzgc->jzpc = jzpc;
- jzgc->reg_base = bank * 0x100;
+ jzgc->reg_base = bank * jzpc->info->reg_offset;
jzgc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "GPIO%c", 'A' + bank);
if (!jzgc->gc.label)
@@ -2048,9 +2323,6 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
struct ingenic_pinctrl *jzpc;
struct pinctrl_desc *pctl_desc;
void __iomem *base;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- const struct of_device_id *of_id = of_match_device(
- ingenic_pinctrl_of_match, dev);
const struct ingenic_chip_info *chip_info;
struct device_node *node;
unsigned int i;
@@ -2060,8 +2332,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
if (!jzpc)
return -ENOMEM;
- base = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -2073,31 +2344,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
}
jzpc->dev = dev;
-
- if (of_id)
- jzpc->version = (enum jz_version)of_id->data;
- else
- jzpc->version = (enum jz_version)id->driver_data;
-
- if (jzpc->version >= ID_X1500)
- chip_info = &x1500_chip_info;
- else if (jzpc->version >= ID_X1000E)
- chip_info = &x1000e_chip_info;
- else if (jzpc->version >= ID_X1000)
- chip_info = &x1000_chip_info;
- else if (jzpc->version >= ID_JZ4780)
- chip_info = &jz4780_chip_info;
- else if (jzpc->version >= ID_JZ4770)
- chip_info = &jz4770_chip_info;
- else if (jzpc->version >= ID_JZ4760B)
- chip_info = &jz4760b_chip_info;
- else if (jzpc->version >= ID_JZ4760)
- chip_info = &jz4760_chip_info;
- else if (jzpc->version >= ID_JZ4725B)
- chip_info = &jz4725b_chip_info;
- else
- chip_info = &jz4740_chip_info;
- jzpc->info = chip_info;
+ jzpc->info = chip_info = of_device_get_match_data(dev);
pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
if (!pctl_desc)
@@ -2166,25 +2413,25 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id ingenic_pinctrl_ids[] = {
- { "jz4740-pinctrl", ID_JZ4740 },
- { "jz4725b-pinctrl", ID_JZ4725B },
- { "jz4760-pinctrl", ID_JZ4760 },
- { "jz4760b-pinctrl", ID_JZ4760B },
- { "jz4770-pinctrl", ID_JZ4770 },
- { "jz4780-pinctrl", ID_JZ4780 },
- { "x1000-pinctrl", ID_X1000 },
- { "x1000e-pinctrl", ID_X1000E },
- { "x1500-pinctrl", ID_X1500 },
+static const struct of_device_id ingenic_pinctrl_of_match[] = {
+ { .compatible = "ingenic,jz4740-pinctrl", .data = &jz4740_chip_info },
+ { .compatible = "ingenic,jz4725b-pinctrl", .data = &jz4725b_chip_info },
+ { .compatible = "ingenic,jz4760-pinctrl", .data = &jz4760_chip_info },
+ { .compatible = "ingenic,jz4760b-pinctrl", .data = &jz4760_chip_info },
+ { .compatible = "ingenic,jz4770-pinctrl", .data = &jz4770_chip_info },
+ { .compatible = "ingenic,jz4780-pinctrl", .data = &jz4780_chip_info },
+ { .compatible = "ingenic,x1000-pinctrl", .data = &x1000_chip_info },
+ { .compatible = "ingenic,x1000e-pinctrl", .data = &x1000_chip_info },
+ { .compatible = "ingenic,x1500-pinctrl", .data = &x1500_chip_info },
+ { .compatible = "ingenic,x1830-pinctrl", .data = &x1830_chip_info },
{},
};
static struct platform_driver ingenic_pinctrl_driver = {
.driver = {
.name = "pinctrl-ingenic",
- .of_match_table = of_match_ptr(ingenic_pinctrl_of_match),
+ .of_match_table = ingenic_pinctrl_of_match,
},
- .id_table = ingenic_pinctrl_ids,
};
static int __init ingenic_pinctrl_drv_register(void)
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 215db220d795..617585be6a7d 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -1229,8 +1229,8 @@ static int rza1_parse_gpiochip(struct rza1_pinctrl *rza1_pctl,
pinctrl_add_gpio_range(rza1_pctl->pctl, range);
- dev_info(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
- chip->label, chip->ngpio);
+ dev_dbg(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
+ chip->label, chip->ngpio);
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 5d6f9f61ce02..9a8daa256a32 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -960,7 +960,6 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned long flags;
/*
* While they may not wake up when the TLMM is powered off,
@@ -971,12 +970,8 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
if (d->parent_data)
irq_chip_set_wake_parent(d, on);
- raw_spin_lock_irqsave(&pctrl->lock, flags);
-
irq_set_irq_wake(pctrl->irq, on);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index e1259ce27396..183f0b2d9f8e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -589,7 +589,7 @@ static const char * const blsp_uart5_groups[] = {
static const char * const qdss_traceclk_a_groups[] = {
"gpio46",
};
-const char * const m_voc_groups[] = {
+static const char * const m_voc_groups[] = {
"gpio123", "gpio124",
};
static const char * const blsp_i2c5_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index d6cfad7417b1..1b6465a882f2 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -456,14 +456,18 @@ enum sc7180_functions {
msm_mux_qspi_data,
msm_mux_qup00,
msm_mux_qup01,
- msm_mux_qup02,
+ msm_mux_qup02_i2c,
+ msm_mux_qup02_uart,
msm_mux_qup03,
- msm_mux_qup04,
+ msm_mux_qup04_i2c,
+ msm_mux_qup04_uart,
msm_mux_qup05,
msm_mux_qup10,
- msm_mux_qup11,
+ msm_mux_qup11_i2c,
+ msm_mux_qup11_uart,
msm_mux_qup12,
- msm_mux_qup13,
+ msm_mux_qup13_i2c,
+ msm_mux_qup13_uart,
msm_mux_qup14,
msm_mux_qup15,
msm_mux_sdc1_tb,
@@ -543,7 +547,10 @@ static const char * const sdc1_tb_groups[] = {
static const char * const sdc2_tb_groups[] = {
"gpio5",
};
-static const char * const qup11_groups[] = {
+static const char * const qup11_i2c_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const qup11_uart_groups[] = {
"gpio6", "gpio7",
};
static const char * const ddr_bist_groups[] = {
@@ -593,7 +600,10 @@ static const char * const qdss_groups[] = {
static const char * const pll_reset_groups[] = {
"gpio14",
};
-static const char * const qup02_groups[] = {
+static const char * const qup02_i2c_groups[] = {
+ "gpio15", "gpio16",
+};
+static const char * const qup02_uart_groups[] = {
"gpio15", "gpio16",
};
static const char * const cci_i2c_groups[] = {
@@ -698,7 +708,10 @@ static const char * const wlan1_adc1_groups[] = {
static const char * const atest_usb13_groups[] = {
"gpio44",
};
-static const char * const qup13_groups[] = {
+static const char * const qup13_i2c_groups[] = {
+ "gpio46", "gpio47",
+};
+static const char * const qup13_uart_groups[] = {
"gpio46", "gpio47",
};
static const char * const gcc_gp1_groups[] = {
@@ -848,7 +861,10 @@ static const char * const usb_phy_groups[] = {
static const char * const mss_lte_groups[] = {
"gpio108", "gpio109",
};
-static const char * const qup04_groups[] = {
+static const char * const qup04_i2c_groups[] = {
+ "gpio115", "gpio116",
+};
+static const char * const qup04_uart_groups[] = {
"gpio115", "gpio116",
};
@@ -929,14 +945,18 @@ static const struct msm_function sc7180_functions[] = {
FUNCTION(qspi_data),
FUNCTION(qup00),
FUNCTION(qup01),
- FUNCTION(qup02),
+ FUNCTION(qup02_i2c),
+ FUNCTION(qup02_uart),
FUNCTION(qup03),
- FUNCTION(qup04),
+ FUNCTION(qup04_i2c),
+ FUNCTION(qup04_uart),
FUNCTION(qup05),
FUNCTION(qup10),
- FUNCTION(qup11),
+ FUNCTION(qup11_i2c),
+ FUNCTION(qup11_uart),
FUNCTION(qup12),
- FUNCTION(qup13),
+ FUNCTION(qup13_i2c),
+ FUNCTION(qup13_uart),
FUNCTION(qup14),
FUNCTION(qup15),
FUNCTION(sdc1_tb),
@@ -976,8 +996,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[3] = PINGROUP(3, SOUTH, qup01, sp_cmu, dbg_out, qdss_cti, _, _, _, _, _),
[4] = PINGROUP(4, NORTH, sdc1_tb, _, qdss_cti, _, _, _, _, _, _),
[5] = PINGROUP(5, NORTH, sdc2_tb, _, _, _, _, _, _, _, _),
- [6] = PINGROUP(6, NORTH, qup11, qup11, _, _, _, _, _, _, _),
- [7] = PINGROUP(7, NORTH, qup11, qup11, ddr_bist, _, _, _, _, _, _),
+ [6] = PINGROUP(6, NORTH, qup11_i2c, qup11_uart, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, NORTH, qup11_i2c, qup11_uart, ddr_bist, _, _, _, _, _, _),
[8] = PINGROUP(8, NORTH, gp_pdm1, ddr_bist, _, phase_flag, qdss_cti, _, _, _, _),
[9] = PINGROUP(9, NORTH, ddr_bist, _, phase_flag, qdss_cti, _, _, _, _, _),
[10] = PINGROUP(10, NORTH, mdp_vsync, ddr_bist, _, _, _, _, _, _, _),
@@ -985,8 +1005,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, qup01, _, phase_flag, wlan2_adc0, atest_usb10, ddr_pxi3, _),
[13] = PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, qdss, _, _, _, _, _, _),
[14] = PINGROUP(14, SOUTH, cam_mclk, pll_reset, qdss, _, _, _, _, _, _),
- [15] = PINGROUP(15, SOUTH, cam_mclk, qup02, qup02, qdss, _, _, _, _, _),
- [16] = PINGROUP(16, SOUTH, cam_mclk, qup02, qup02, qdss, _, _, _, _, _),
+ [15] = PINGROUP(15, SOUTH, cam_mclk, qup02_i2c, qup02_uart, qdss, _, _, _, _, _),
+ [16] = PINGROUP(16, SOUTH, cam_mclk, qup02_i2c, qup02_uart, qdss, _, _, _, _, _),
[17] = PINGROUP(17, SOUTH, cci_i2c, _, phase_flag, qdss, _, wlan1_adc0, atest_usb12, ddr_pxi1, atest_char),
[18] = PINGROUP(18, SOUTH, cci_i2c, agera_pll, _, phase_flag, qdss, vsense_trigger, ddr_pxi0, atest_char3, _),
[19] = PINGROUP(19, SOUTH, cci_i2c, _, phase_flag, qdss, atest_char2, _, _, _, _),
@@ -1016,8 +1036,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[43] = PINGROUP(43, NORTH, qup12, _, _, _, _, _, _, _, _),
[44] = PINGROUP(44, NORTH, qup12, _, phase_flag, qdss_cti, wlan1_adc1, atest_usb13, ddr_pxi1, _, _),
[45] = PINGROUP(45, NORTH, qup12, qdss_cti, _, _, _, _, _, _, _),
- [46] = PINGROUP(46, NORTH, qup13, qup13, _, _, _, _, _, _, _),
- [47] = PINGROUP(47, NORTH, qup13, qup13, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, NORTH, qup13_i2c, qup13_uart, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, NORTH, qup13_i2c, qup13_uart, _, _, _, _, _, _, _),
[48] = PINGROUP(48, NORTH, gcc_gp1, _, _, _, _, _, _, _, _),
[49] = PINGROUP(49, WEST, mi2s_1, btfm_slimbus, _, _, _, _, _, _, _),
[50] = PINGROUP(50, WEST, mi2s_1, btfm_slimbus, gp_pdm1, _, _, _, _, _, _),
@@ -1085,8 +1105,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[112] = PINGROUP(112, NORTH, _, _, _, _, _, _, _, _, _),
[113] = PINGROUP(113, NORTH, _, _, _, _, _, _, _, _, _),
[114] = PINGROUP(114, NORTH, _, _, _, _, _, _, _, _, _),
- [115] = PINGROUP(115, WEST, qup04, qup04, _, _, _, _, _, _, _),
- [116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, WEST, qup04_i2c, qup04_uart, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, WEST, qup04_i2c, qup04_uart, _, _, _, _, _, _, _),
[117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
[119] = UFS_RESET(ufs_reset, 0x7f000),
@@ -1099,6 +1119,22 @@ static const struct msm_pingroup sc7180_groups[] = {
[126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sc7180_pdc_map[] = {
+ {0, 40}, {3, 50}, {4, 42}, {5, 70}, {6, 41}, {9, 35},
+ {10, 80}, {11, 51}, {16, 20}, {21, 55}, {22, 90}, {23, 21},
+ {24, 61}, {26, 52}, {28, 36}, {30, 100}, {31, 33}, {32, 81},
+ {33, 62}, {34, 43}, {36, 91}, {37, 53}, {38, 63}, {39, 72},
+ {41, 101}, {42, 7}, {43, 34}, {45, 73}, {47, 82}, {49, 17},
+ {52, 109}, {53, 102}, {55, 92}, {56, 56}, {57, 57}, {58, 83},
+ {59, 37}, {62, 110}, {63, 111}, {64, 74}, {65, 44}, {66, 93},
+ {67, 58}, {68, 112}, {69, 32}, {70, 54}, {72, 59}, {73, 64},
+ {74, 71}, {78, 31}, {82, 30}, {85, 103}, {86, 38}, {87, 39},
+ {88, 45}, {89, 46}, {90, 47}, {91, 48}, {92, 60}, {93, 49},
+ {94, 84}, {95, 94}, {98, 65}, {101, 66}, {104, 67}, {109, 104},
+ {110, 68}, {113, 69}, {114, 113}, {115, 108}, {116, 121},
+ {117, 114}, {118, 119},
+};
+
static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
.pins = sc7180_pins,
.npins = ARRAY_SIZE(sc7180_pins),
@@ -1109,6 +1145,8 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
.ngpios = 120,
.tiles = sc7180_tiles,
.ntiles = ARRAY_SIZE(sc7180_tiles),
+ .wakeirq_map = sc7180_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
};
static int sc7180_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 653d1095bfea..fe0be8a6ebb7 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1060,7 +1060,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
- girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index dca86886b1f9..fba1d41d20ec 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -439,7 +439,7 @@ static const struct pinconf_ops pm8xxx_pinconf_ops = {
.pin_config_group_set = pm8xxx_pin_config_set,
};
-static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+static const struct pinctrl_desc pm8xxx_pinctrl_desc = {
.name = "pm8xxx_gpio",
.pctlops = &pm8xxx_pinctrl_ops,
.pmxops = &pm8xxx_pinmux_ops,
@@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
- girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 3d8b1d74fa2f..681d8dcf37e3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -430,7 +430,7 @@ static const struct pinconf_ops pm8xxx_pinconf_ops = {
.pin_config_group_set = pm8xxx_pin_config_set,
};
-static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+static const struct pinctrl_desc pm8xxx_pinctrl_desc = {
.name = "pm8xxx_mpp",
.pctlops = &pm8xxx_pinctrl_ops,
.pmxops = &pm8xxx_pinmux_ops,
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 425fadd6c346..dfd805e76862 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -4,30 +4,34 @@
#
config PINCTRL_SAMSUNG
bool
+ depends on OF_GPIO
select PINMUX
select PINCONF
config PINCTRL_EXYNOS
- bool "Pinctrl driver data for Samsung EXYNOS SoCs"
- depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
+ bool "Pinctrl common driver part for Samsung Exynos SoCs"
+ depends on OF_GPIO
+ depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
select PINCTRL_SAMSUNG
select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
config PINCTRL_EXYNOS_ARM
- bool "ARMv7-specific pinctrl driver data for Exynos" if COMPILE_TEST
+ bool "ARMv7-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
depends on PINCTRL_EXYNOS
config PINCTRL_EXYNOS_ARM64
- bool "ARMv8-specific pinctrl driver data for Exynos" if COMPILE_TEST
+ bool "ARMv8-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
depends on PINCTRL_EXYNOS
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on ARCH_S3C24XX && OF
+ depends on OF_GPIO
+ depends on ARCH_S3C24XX || COMPILE_TEST
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
bool "Samsung S3C64XX SoC pinctrl driver"
- depends on ARCH_S3C64XX
+ depends on OF_GPIO
+ depends on ARCH_S3C64XX || COMPILE_TEST
select PINCTRL_SAMSUNG
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 28d66e7cb098..cf0e0dc42b84 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -26,8 +26,9 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7792 if ARCH_R8A7792
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
- select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
- select PINCTRL_PFC_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select PINCTRL_PFC_R8A77950 if ARCH_R8A77950 || ARCH_R8A7795
+ select PINCTRL_PFC_R8A77951 if ARCH_R8A77951 || ARCH_R8A7795
+ select PINCTRL_PFC_R8A77960 if ARCH_R8A77960
select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
@@ -115,8 +116,11 @@ config PINCTRL_PFC_R8A7793
config PINCTRL_PFC_R8A7794
bool "R-Car E2 pin control support" if COMPILE_TEST
-config PINCTRL_PFC_R8A7795
- bool "R-Car H3 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77950
+ bool "R-Car H3 ES1.x pin control support" if COMPILE_TEST
+
+config PINCTRL_PFC_R8A77951
+ bool "R-Car H3 ES2.0+ pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77960
bool "R-Car M3-W pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 3bc05666e1a6..9ebe321d24c4 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -18,8 +18,8 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7791) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7792) += pfc-r8a7792.o
obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795-es1.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77950) += pfc-r8a77950.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77951) += pfc-r8a77951.o
obj-$(CONFIG_PINCTRL_PFC_R8A77960) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77961) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 65e52688f091..82209116955b 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/psci.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include "core.h"
@@ -568,18 +569,18 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7794_pinmux_info,
},
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7795
+/* Both r8a7795 entries must be present to make sanity checks work */
+#ifdef CONFIG_PINCTRL_PFC_R8A77950
{
.compatible = "renesas,pfc-r8a7795",
- .data = &r8a7795_pinmux_info,
+ .data = &r8a77950_pinmux_info,
},
-#ifdef DEBUG
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
{
- /* For sanity checks only (nothing matches against this) */
- .compatible = "renesas,pfc-r8a77950", /* R-Car H3 ES1.0 */
- .data = &r8a7795es1_pinmux_info,
+ .compatible = "renesas,pfc-r8a7795",
+ .data = &r8a77951_pinmux_info,
},
-#endif /* DEBUG */
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77960
{
@@ -886,19 +887,49 @@ static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
static inline void sh_pfc_check_driver(struct platform_driver *pdrv) {}
#endif /* !DEBUG */
+#ifdef CONFIG_OF
+static const void *sh_pfc_quirk_match(void)
+{
+#if defined(CONFIG_PINCTRL_PFC_R8A77950) || \
+ defined(CONFIG_PINCTRL_PFC_R8A77951)
+ const struct soc_device_attribute *match;
+ static const struct soc_device_attribute quirks[] = {
+ {
+ .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = &r8a77950_pinmux_info,
+ },
+ {
+ .soc_id = "r8a7795",
+ .data = &r8a77951_pinmux_info,
+ },
+
+ { /* sentinel */ }
+ };
+
+ match = soc_device_match(quirks);
+ if (match)
+ return match->data ?: ERR_PTR(-ENODEV);
+#endif /* CONFIG_PINCTRL_PFC_R8A77950 || CONFIG_PINCTRL_PFC_R8A77951 */
+
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
static int sh_pfc_probe(struct platform_device *pdev)
{
-#ifdef CONFIG_OF
- struct device_node *np = pdev->dev.of_node;
-#endif
const struct sh_pfc_soc_info *info;
struct sh_pfc *pfc;
int ret;
#ifdef CONFIG_OF
- if (np)
- info = of_device_get_match_data(&pdev->dev);
- else
+ if (pdev->dev.of_node) {
+ info = sh_pfc_quirk_match();
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ if (!info)
+ info = of_device_get_match_data(&pdev->dev);
+ } else
#endif
info = (const void *)platform_get_device_id(pdev)->driver_data;
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 5a55b8da7919..8213e118aa40 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -386,12 +386,11 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
}
/* Register the function GPIOs chip. */
- if (pfc->info->nr_func_gpios == 0)
- return 0;
-
- chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
+ if (pfc->info->nr_func_gpios) {
+ chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ }
#endif /* CONFIG_PINCTRL_SH_FUNC_GPIO */
return 0;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 24866a5958ae..a9875038ed9b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ATAG0_A, 0, FN_REMOCON_B, 0,
/* IP0_11_8 [4] */
FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
- FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
+ FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
FN_PWM4_B, 0, 0, 0,
0, 0, 0, 0,
/* IP0_7_5 [3] */
@@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TS_SDAT0_A, 0, 0, 0,
0, 0, 0, 0,
/* IP1_10_8 [3] */
- FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
+ FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
/* IP1_7_5 [3] */
FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
index ad05da8f6516..04812e62f3a4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7795 ES1.x processor support - PFC hardware block.
+ * R8A77950 processor support - PFC hardware block.
*
* Copyright (C) 2015-2017 Renesas Electronics Corporation
*/
@@ -5562,8 +5562,8 @@ static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
{ /* sentinel */ },
};
-static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
- u32 *pocctrl)
+static int r8a77950_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
+ u32 *pocctrl)
{
int bit = -EINVAL;
@@ -5820,8 +5820,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
+static unsigned int r8a77950_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
{
const struct pinmux_bias_reg *reg;
unsigned int bit;
@@ -5838,8 +5838,8 @@ static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
return PIN_CONFIG_BIAS_PULL_DOWN;
}
-static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
+static void r8a77950_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
{
const struct pinmux_bias_reg *reg;
u32 enable, updown;
@@ -5861,15 +5861,15 @@ static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
sh_pfc_write(pfc, reg->puen, enable);
}
-static const struct sh_pfc_soc_operations r8a7795es1_pinmux_ops = {
- .pin_to_pocctrl = r8a7795es1_pin_to_pocctrl,
- .get_bias = r8a7795es1_pinmux_get_bias,
- .set_bias = r8a7795es1_pinmux_set_bias,
+static const struct sh_pfc_soc_operations r8a77950_pinmux_ops = {
+ .pin_to_pocctrl = r8a77950_pin_to_pocctrl,
+ .get_bias = r8a77950_pinmux_get_bias,
+ .set_bias = r8a77950_pinmux_set_bias,
};
-const struct sh_pfc_soc_info r8a7795es1_pinmux_info = {
+const struct sh_pfc_soc_info r8a77950_pinmux_info = {
.name = "r8a77950_pfc",
- .ops = &r8a7795es1_pinmux_ops,
+ .ops = &r8a77950_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
index d3145aa135d0..256fab4b03d3 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7795 ES2.0+ processor support - PFC hardware block.
+ * R8A77951 processor support - PFC hardware block.
*
* Copyright (C) 2015-2019 Renesas Electronics Corporation
*/
@@ -5915,7 +5915,8 @@ static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
{ /* sentinel */ },
};
-static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
+static int r8a77951_pin_to_pocctrl(struct sh_pfc *pfc,
+ unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
@@ -6172,8 +6173,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
+static unsigned int r8a77951_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
{
const struct pinmux_bias_reg *reg;
unsigned int bit;
@@ -6190,8 +6191,8 @@ static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
return PIN_CONFIG_BIAS_PULL_DOWN;
}
-static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
+static void r8a77951_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
{
const struct pinmux_bias_reg *reg;
u32 enable, updown;
@@ -6213,29 +6214,15 @@ static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
sh_pfc_write(pfc, reg->puen, enable);
}
-static const struct soc_device_attribute r8a7795es1[] = {
- { .soc_id = "r8a7795", .revision = "ES1.*" },
- { /* sentinel */ }
+static const struct sh_pfc_soc_operations r8a77951_pinmux_ops = {
+ .pin_to_pocctrl = r8a77951_pin_to_pocctrl,
+ .get_bias = r8a77951_pinmux_get_bias,
+ .set_bias = r8a77951_pinmux_set_bias,
};
-static int r8a7795_pinmux_init(struct sh_pfc *pfc)
-{
- if (soc_device_match(r8a7795es1))
- pfc->info = &r8a7795es1_pinmux_info;
-
- return 0;
-}
-
-static const struct sh_pfc_soc_operations r8a7795_pinmux_ops = {
- .init = r8a7795_pinmux_init,
- .pin_to_pocctrl = r8a7795_pin_to_pocctrl,
- .get_bias = r8a7795_pinmux_get_bias,
- .set_bias = r8a7795_pinmux_set_bias,
-};
-
-const struct sh_pfc_soc_info r8a7795_pinmux_info = {
+const struct sh_pfc_soc_info r8a77951_pinmux_info = {
.name = "r8a77951_pfc",
- .ops = &r8a7795_pinmux_ops,
+ .ops = &r8a77951_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 8bdf33c807f6..6616f5210b9d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -5998,7 +5998,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
- { PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */
+ { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */
{ PIN_FSCLKST, 20, 2 }, /* FSCLKST */
{ PIN_TMS, 4, 2 }, /* TMS */
} },
@@ -6254,8 +6254,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */
} },
{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
- [ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
- [ 1] = SH_PFC_PIN_NONE,
+ [ 0] = SH_PFC_PIN_NONE,
+ [ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
[ 2] = PIN_FSCLKST, /* FSCLKST */
[ 3] = PIN_EXTALR, /* EXTALR*/
[ 4] = PIN_TRST_N, /* TRST# */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 4a95867deb8a..908837ea487b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -497,17 +497,15 @@ enum {
SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
CRX0_MARK, CRX1_MARK,
CTX0_MARK, CTX1_MARK,
+ CRX0_CRX1_MARK, CTX0_CTX1_MARK,
PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
IERXD_MARK, IETXD_MARK,
- CRX0_CRX1_MARK,
WDTOVF_MARK,
- CRX0X1_MARK,
-
/* DMAC */
TEND0_MARK, DACK0_MARK, DREQ0_MARK,
TEND1_MARK, DACK1_MARK, DREQ1_MARK,
@@ -995,12 +993,12 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(PJ3_DATA, PJ3MD_00),
PINMUX_DATA(CRX1_MARK, PJ3MD_01),
- PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
+ PINMUX_DATA(CRX0_CRX1_MARK, PJ3MD_10),
PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
PINMUX_DATA(PJ2_DATA, PJ2MD_000),
PINMUX_DATA(CTX1_MARK, PJ2MD_001),
- PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
+ PINMUX_DATA(CTX0_CTX1_MARK, PJ2MD_010),
PINMUX_DATA(CS2_MARK, PJ2MD_011),
PINMUX_DATA(SCK0_MARK, PJ2MD_100),
PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
@@ -1245,6 +1243,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(CTX1),
GPIO_FN(CRX1),
GPIO_FN(CTX0),
+ GPIO_FN(CTX0_CTX1),
GPIO_FN(CRX0),
GPIO_FN(CRX0_CRX1),
@@ -2020,18 +2019,18 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PKIOR0", 0xfffe3932, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
- PJ11_IN, PJ11_OUT,
- PJ10_IN, PJ10_OUT,
- PJ9_IN, PJ9_OUT,
- PJ8_IN, PJ8_OUT,
- PJ7_IN, PJ7_OUT,
- PJ6_IN, PJ6_OUT,
- PJ5_IN, PJ5_OUT,
- PJ4_IN, PJ4_OUT,
- PJ3_IN, PJ3_OUT,
- PJ2_IN, PJ2_OUT,
- PJ1_IN, PJ1_OUT,
- PJ0_IN, PJ0_OUT ))
+ PK11_IN, PK11_OUT,
+ PK10_IN, PK10_OUT,
+ PK9_IN, PK9_OUT,
+ PK8_IN, PK8_OUT,
+ PK7_IN, PK7_OUT,
+ PK6_IN, PK6_OUT,
+ PK5_IN, PK5_OUT,
+ PK4_IN, PK4_OUT,
+ PK3_IN, PK3_OUT,
+ PK2_IN, PK2_OUT,
+ PK1_IN, PK1_OUT,
+ PK0_IN, PK0_OUT ))
},
{}
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index 6cbb18ef77dc..d20974a55d93 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -737,13 +737,12 @@ enum {
CRX0_MARK, CTX0_MARK,
CRX1_MARK, CTX1_MARK,
CRX2_MARK, CTX2_MARK,
- CRX0_CRX1_MARK,
- CRX0_CRX1_CRX2_MARK,
- CTX0CTX1CTX2_MARK,
+ CRX0_CRX1_MARK, CTX0_CTX1_MARK,
+ CRX0_CRX1_CRX2_MARK, CTX0_CTX1_CTX2_MARK,
CRX1_PJ22_MARK, CTX1_PJ23_MARK,
CRX2_PJ20_MARK, CTX2_PJ21_MARK,
- CRX0CRX1_PJ22_MARK,
- CRX0CRX1CRX2_PJ20_MARK,
+ CRX0_CRX1_PJ22_MARK, CTX0_CTX1_PJ23_MARK,
+ CRX0_CRX1_CRX2_PJ20_MARK, CTX0_CTX1_CTX2_PJ21_MARK,
/* VDC */
DV_CLK_MARK,
@@ -821,6 +820,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(CS3_MARK, PC8MD_001),
PINMUX_DATA(TXD7_MARK, PC8MD_010),
PINMUX_DATA(CTX1_MARK, PC8MD_011),
+ PINMUX_DATA(CTX0_CTX1_MARK, PC8MD_100),
PINMUX_DATA(PC7_DATA, PC7MD_000),
PINMUX_DATA(CKE_MARK, PC7MD_001),
@@ -833,11 +833,12 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(CAS_MARK, PC6MD_001),
PINMUX_DATA(SCK7_MARK, PC6MD_010),
PINMUX_DATA(CTX0_MARK, PC6MD_011),
+ PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC6MD_100),
PINMUX_DATA(PC5_DATA, PC5MD_000),
PINMUX_DATA(RAS_MARK, PC5MD_001),
PINMUX_DATA(CRX0_MARK, PC5MD_011),
- PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
+ PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC5MD_100),
PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
PINMUX_DATA(PC4_DATA, PC4MD_00),
@@ -1289,30 +1290,32 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
- PINMUX_DATA(CTX1_MARK, PJ23MD_101),
+ PINMUX_DATA(CTX1_PJ23_MARK, PJ23MD_101),
+ PINMUX_DATA(CTX0_CTX1_PJ23_MARK, PJ23MD_110),
PINMUX_DATA(PJ22_DATA, PJ22MD_000),
PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
- PINMUX_DATA(CRX1_MARK, PJ22MD_101),
- PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
+ PINMUX_DATA(CRX1_PJ22_MARK, PJ22MD_101),
+ PINMUX_DATA(CRX0_CRX1_PJ22_MARK, PJ22MD_110),
PINMUX_DATA(PJ21_DATA, PJ21MD_000),
PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
- PINMUX_DATA(CTX2_MARK, PJ21MD_101),
+ PINMUX_DATA(CTX2_PJ21_MARK, PJ21MD_101),
+ PINMUX_DATA(CTX0_CTX1_CTX2_PJ21_MARK, PJ21MD_110),
PINMUX_DATA(PJ20_DATA, PJ20MD_000),
PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
- PINMUX_DATA(CRX2_MARK, PJ20MD_101),
- PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
+ PINMUX_DATA(CRX2_PJ20_MARK, PJ20MD_101),
+ PINMUX_DATA(CRX0_CRX1_CRX2_PJ20_MARK, PJ20MD_110),
PINMUX_DATA(PJ19_DATA, PJ19MD_000),
PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
@@ -1663,12 +1666,24 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(WDTOVF),
/* CAN */
+ GPIO_FN(CTX2),
+ GPIO_FN(CRX2),
GPIO_FN(CTX1),
GPIO_FN(CRX1),
GPIO_FN(CTX0),
GPIO_FN(CRX0),
+ GPIO_FN(CTX0_CTX1),
GPIO_FN(CRX0_CRX1),
+ GPIO_FN(CTX0_CTX1_CTX2),
GPIO_FN(CRX0_CRX1_CRX2),
+ GPIO_FN(CTX2_PJ21),
+ GPIO_FN(CRX2_PJ20),
+ GPIO_FN(CTX1_PJ23),
+ GPIO_FN(CRX1_PJ22),
+ GPIO_FN(CTX0_CTX1_PJ23),
+ GPIO_FN(CRX0_CRX1_PJ22),
+ GPIO_FN(CTX0_CTX1_CTX2_PJ21),
+ GPIO_FN(CRX0_CRX1_CRX2_PJ20),
/* DMAC */
GPIO_FN(TEND0),
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 640d2a4cb838..d57e633e99c0 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -318,8 +318,8 @@ extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
extern const struct sh_pfc_soc_info r8a7792_pinmux_info;
extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795es1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77950_pinmux_info __weak;
+extern const struct sh_pfc_soc_info r8a77951_pinmux_info __weak;
extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
index ec0d34c33903..b0882d120765 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.h
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics 2017
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
index a78d7b922ef4..31d62bbb7f43 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-sunxi.h"
@@ -549,7 +548,17 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
{
- switch (of_irq_count(pdev->dev.of_node)) {
+ int ret;
+
+ ret = platform_irq_count(pdev);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Couldn't determine irq count: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ switch (ret) {
case 2:
dev_warn(&pdev->dev,
"Your device tree's pinctrl node is broken, which has no IRQ of PG bank routed.\n");
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 692d8b3e2a20..cefbbb8d1a68 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -648,7 +648,7 @@ static int tegra_pinctrl_suspend(struct device *dev)
{
struct tegra_pmx *pmx = dev_get_drvdata(dev);
u32 *backup_regs = pmx->backup_regs;
- u32 *regs;
+ u32 __iomem *regs;
size_t bank_size;
unsigned int i, k;
@@ -666,7 +666,7 @@ static int tegra_pinctrl_resume(struct device *dev)
{
struct tegra_pmx *pmx = dev_get_drvdata(dev);
u32 *backup_regs = pmx->backup_regs;
- u32 *regs;
+ u32 __iomem *regs;
size_t bank_size;
unsigned int i, k;
diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
index bb0edf51dfda..5731d1b60e28 100644
--- a/drivers/platform/chrome/wilco_ec/keyboard_leds.c
+++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
@@ -73,13 +73,6 @@ static int send_kbbl_msg(struct wilco_ec_device *ec,
return ret;
}
- if (response->status) {
- dev_err(ec->dev,
- "EC reported failure sending keyboard LEDs command: %d",
- response->status);
- return -EIO;
- }
-
return 0;
}
@@ -87,6 +80,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
{
struct wilco_keyboard_leds_msg request;
struct wilco_keyboard_leds_msg response;
+ int ret;
memset(&request, 0, sizeof(request));
request.command = WILCO_EC_COMMAND_KBBL;
@@ -94,7 +88,18 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
request.mode = WILCO_KBBL_MODE_FLAG_PWM;
request.percent = brightness;
- return send_kbbl_msg(ec, &request, &response);
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response.status);
+ return -EIO;
+ }
+
+ return 0;
}
static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
@@ -140,6 +145,13 @@ static int kbbl_init(struct wilco_ec_device *ec)
if (ret < 0)
return ret;
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response.status);
+ return -EIO;
+ }
+
if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
return response.percent;
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index cef0133aa47a..1ab207ec9c94 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -257,12 +257,12 @@ static int goldfish_pipe_error_convert(int status)
}
}
-static int pin_user_pages(unsigned long first_page,
- unsigned long last_page,
- unsigned int last_page_size,
- int is_write,
- struct page *pages[MAX_BUFFERS_PER_COMMAND],
- unsigned int *iter_last_page_size)
+static int goldfish_pin_pages(unsigned long first_page,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ int is_write,
+ struct page *pages[MAX_BUFFERS_PER_COMMAND],
+ unsigned int *iter_last_page_size)
{
int ret;
int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
@@ -274,7 +274,7 @@ static int pin_user_pages(unsigned long first_page,
*iter_last_page_size = last_page_size;
}
- ret = get_user_pages_fast(first_page, requested_pages,
+ ret = pin_user_pages_fast(first_page, requested_pages,
!is_write ? FOLL_WRITE : 0,
pages);
if (ret <= 0)
@@ -285,18 +285,6 @@ static int pin_user_pages(unsigned long first_page,
return ret;
}
-static void release_user_pages(struct page **pages, int pages_count,
- int is_write, s32 consumed_size)
-{
- int i;
-
- for (i = 0; i < pages_count; i++) {
- if (!is_write && consumed_size > 0)
- set_page_dirty(pages[i]);
- put_page(pages[i]);
- }
-}
-
/* Populate the call parameters, merging adjacent pages together */
static void populate_rw_params(struct page **pages,
int pages_count,
@@ -354,9 +342,9 @@ static int transfer_max_buffers(struct goldfish_pipe *pipe,
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
- pages_count = pin_user_pages(first_page, last_page,
- last_page_size, is_write,
- pipe->pages, &iter_last_page_size);
+ pages_count = goldfish_pin_pages(first_page, last_page,
+ last_page_size, is_write,
+ pipe->pages, &iter_last_page_size);
if (pages_count < 0) {
mutex_unlock(&pipe->lock);
return pages_count;
@@ -372,7 +360,8 @@ static int transfer_max_buffers(struct goldfish_pipe *pipe,
*consumed_size = pipe->command_buffer->rw_params.consumed_size;
- release_user_pages(pipe->pages, pages_count, is_write, *consumed_size);
+ unpin_user_pages_dirty_lock(pipe->pages, pages_count,
+ !is_write && *consumed_size > 0);
mutex_unlock(&pipe->lock);
return 0;
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 9a5c9fd2dbc6..5739a9669b29 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info {
* @work: work struct for deferred process
* @timer: background timer
* @vring: Tx/Rx ring
- * @spin_lock: spin lock
+ * @spin_lock: Tx/Rx spin lock
* @is_ready: ready flag
*/
struct mlxbf_tmfifo {
@@ -164,7 +164,7 @@ struct mlxbf_tmfifo {
struct work_struct work;
struct timer_list timer;
struct mlxbf_tmfifo_vring *vring[2];
- spinlock_t spin_lock; /* spin lock */
+ spinlock_t spin_lock[2]; /* spin lock */
bool is_ready;
};
@@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
/* Use spin-lock to protect the 'cons->tx_buf'. */
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
while (size > 0) {
addr = cons->tx_buf.buf + cons->tx_buf.tail;
@@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
}
}
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
}
/* Rx/Tx one word in the descriptor buffer. */
@@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
fifo->vring[is_rx] = NULL;
/* Notify upper layer that packet is done. */
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
}
mlxbf_tmfifo_desc_done:
@@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
* worker handler.
*/
if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
mlxbf_tmfifo_console_output(tm_vdev, vring);
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
&fifo->pend_events)) {
return true;
@@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
if (!fifo)
return -ENOMEM;
- spin_lock_init(&fifo->spin_lock);
+ spin_lock_init(&fifo->spin_lock[0]);
+ spin_lock_init(&fifo->spin_lock[1]);
INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
mutex_init(&fifo->lock);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 706207d192ae..77be37a1fbcf 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
item = pdata->items;
for (i = 0; i < pdata->counter; i++, item++) {
+ if (item->capability) {
+ /*
+ * Read group capability register to get actual number
+ * of interrupt capable components and set group mask
+ * accordingly.
+ */
+ ret = regmap_read(priv->regmap, item->capability,
+ &regval);
+ if (ret)
+ goto out;
+
+ item->mask = GENMASK((regval & item->mask) - 1, 0);
+ }
+
/* Clear group presense event. */
ret = regmap_write(priv->regmap, item->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 27d5b40fb717..587403c44598 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -997,7 +997,6 @@ config INTEL_SCU_IPC
config INTEL_SCU_IPC_UTIL
tristate "Intel SCU IPC utility driver"
depends on INTEL_SCU_IPC
- default y
---help---
The IPC Util driver provides an interface with the SCU enabling
low level access for debug work and updating the firmware. Say
@@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM
depends on PCI && IOSF_MBI && PM
help
Power-management driver for Intel's Image Signal Processor found on
- Bay and Cherry Trail devices. This dummy driver's sole purpose is to
- turn the ISP off (put it in D3) to save power and to allow entering
- of S0ix modes.
+ Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+ is to turn the ISP off (put it in D3) to save power and to allow
+ entering of S0ix modes.
To compile this driver as a module, choose M here: the module
will be called intel_atomisp2_pm.
@@ -1337,6 +1336,17 @@ config PCENGINES_APU2
To compile this driver as a module, choose M here: the module
will be called pcengines-apuv2.
+config INTEL_UNCORE_FREQ_CONTROL
+ tristate "Intel Uncore frequency control driver"
+ depends on X86_64
+ help
+ This driver allows control of uncore frequency limits on
+ supported server platforms.
+ Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-uncore-frequency.
+
source "drivers/platform/x86/intel_speed_select_if/Kconfig"
config SYSTEM76_ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 42d85a00be4e..3747b1f07cf1 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index b361c73636a4..6f12747a359a 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
{ KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
{ KE_KEY, 0x7c, { KEY_MICMUTE } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 821b08e01635..612ef5526226 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -33,9 +33,9 @@
#include <linux/seq_file.h>
#include <linux/platform_data/x86/asus-wmi.h>
#include <linux/platform_device.h>
-#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/units.h>
#include <acpi/battery.h>
#include <acpi/video.h>
@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
#define NOTIFY_KBD_FBM 0x99
+#define NOTIFY_KBD_TTP 0xae
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
@@ -81,6 +82,10 @@ MODULE_LICENSE("GPL");
#define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02
#define ASUS_FAN_BOOST_MODES_MASK 0x03
+#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0
+#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
+#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2
+
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
@@ -198,6 +203,9 @@ struct asus_wmi {
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
+ bool throttle_thermal_policy_available;
+ u8 throttle_thermal_policy_mode;
+
// The RSOC controls the maximum charging percentage.
bool battery_rsoc_available;
@@ -512,13 +520,7 @@ static void kbd_led_update(struct asus_wmi *asus)
{
int ctrl_param = 0;
- /*
- * bits 0-2: level
- * bit 7: light on/off
- */
- if (asus->kbd_led_wk > 0)
- ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
-
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
}
@@ -1512,9 +1514,8 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
if (err < 0)
return err;
- value = DECI_KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000;
-
- return sprintf(buf, "%d\n", value);
+ return sprintf(buf, "%ld\n",
+ deci_kelvin_to_millicelsius(value & 0xFFFF));
}
/* Fan1 */
@@ -1724,6 +1725,107 @@ static ssize_t fan_boost_mode_store(struct device *dev,
// Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
static DEVICE_ATTR_RW(fan_boost_mode);
+/* Throttle thermal policy ****************************************************/
+
+static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->throttle_thermal_policy_available = false;
+
+ err = asus_wmi_get_devstate(asus,
+ ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
+ asus->throttle_thermal_policy_available = true;
+
+ return 0;
+}
+
+static int throttle_thermal_policy_write(struct asus_wmi *asus)
+{
+ int err;
+ u8 value;
+ u32 retval;
+
+ value = asus->throttle_thermal_policy_mode;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ value, &retval);
+ if (err) {
+ pr_warn("Failed to set throttle thermal policy: %d\n", err);
+ return err;
+ }
+
+ if (retval != 1) {
+ pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
+ retval);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
+{
+ if (!asus->throttle_thermal_policy_available)
+ return 0;
+
+ asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+ return throttle_thermal_policy_write(asus);
+}
+
+static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
+{
+ u8 new_mode = asus->throttle_thermal_policy_mode + 1;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ return throttle_thermal_policy_write(asus);
+}
+
+static ssize_t throttle_thermal_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 mode = asus->throttle_thermal_policy_mode;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t throttle_thermal_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result;
+ u8 new_mode;
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou8(buf, 10, &new_mode);
+ if (result < 0)
+ return result;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ return -EINVAL;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ throttle_thermal_policy_write(asus);
+
+ return count;
+}
+
+// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+static DEVICE_ATTR_RW(throttle_thermal_policy);
+
/* Backlight ******************************************************************/
static int read_backlight_power(struct asus_wmi *asus)
@@ -2005,6 +2107,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
+ if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) {
+ throttle_thermal_policy_switch_next(asus);
+ return;
+ }
+
if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
return;
@@ -2155,6 +2262,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
+ &dev_attr_throttle_thermal_policy.attr,
NULL
};
@@ -2178,6 +2286,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_ALS_ENABLE;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
+ else if (attr == &dev_attr_throttle_thermal_policy.attr)
+ ok = asus->throttle_thermal_policy_available;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -2437,6 +2547,12 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_fan_boost_mode;
+ err = throttle_thermal_policy_check_present(asus);
+ if (err)
+ goto fail_throttle_thermal_policy;
+ else
+ throttle_thermal_policy_set_default(asus);
+
err = asus_wmi_sysfs_init(asus->platform_device);
if (err)
goto fail_sysfs;
@@ -2521,6 +2637,7 @@ fail_hwmon:
fail_input:
asus_wmi_sysfs_exit(asus->platform_device);
fail_sysfs:
+fail_throttle_thermal_policy:
fail_fan_boost_mode:
fail_platform:
kfree(asus);
diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
index be85ed966bf3..b471b86c28fe 100644
--- a/drivers/platform/x86/gpd-pocket-fan.c
+++ b/drivers/platform/x86/gpd-pocket-fan.c
@@ -16,17 +16,27 @@
#define MAX_SPEED 3
-static int temp_limits[3] = { 55000, 60000, 65000 };
+#define TEMP_LIMIT0_DEFAULT 55000
+#define TEMP_LIMIT1_DEFAULT 60000
+#define TEMP_LIMIT2_DEFAULT 65000
+
+#define HYSTERESIS_DEFAULT 3000
+
+#define SPEED_ON_AC_DEFAULT 2
+
+static int temp_limits[3] = {
+ TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
+};
module_param_array(temp_limits, int, NULL, 0444);
MODULE_PARM_DESC(temp_limits,
"Millicelsius values above which the fan speed increases");
-static int hysteresis = 3000;
+static int hysteresis = HYSTERESIS_DEFAULT;
module_param(hysteresis, int, 0444);
MODULE_PARM_DESC(hysteresis,
"Hysteresis in millicelsius before lowering the fan speed");
-static int speed_on_ac = 2;
+static int speed_on_ac = SPEED_ON_AC_DEFAULT;
module_param(speed_on_ac, int, 0444);
MODULE_PARM_DESC(speed_on_ac,
"minimum fan speed to allow when system is powered by AC");
@@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
int i;
for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
- if (temp_limits[i] < 40000 || temp_limits[i] > 70000) {
+ if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
temp_limits[i]);
- return -EINVAL;
+ temp_limits[0] = TEMP_LIMIT0_DEFAULT;
+ temp_limits[1] = TEMP_LIMIT1_DEFAULT;
+ temp_limits[2] = TEMP_LIMIT2_DEFAULT;
+ break;
}
}
if (hysteresis < 1000 || hysteresis > 10000) {
dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
hysteresis);
- return -EINVAL;
+ hysteresis = HYSTERESIS_DEFAULT;
}
if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
speed_on_ac);
- return -EINVAL;
+ speed_on_ac = SPEED_ON_AC_DEFAULT;
}
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index ef6d4bd77b1a..43d590250228 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -19,6 +19,7 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = {
+ {"INT1051", 0},
{"INT33D5", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c
new file mode 100644
index 000000000000..2b1a0734c3f8
--- /dev/null
+++ b/drivers/platform/x86/intel-uncore-frequency.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Setting
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
+ * one control CPU is identified per die to read/write limit. This control CPU
+ * is changed, if the CPU state is changed to offline. When the last CPU is
+ * offline in a die then remove the sysfs object for that die.
+ * The majority of actual code is related to sysfs create and read/write
+ * attributes.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_UNCORE_RATIO_LIMIT 0x620
+#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
+
+/**
+ * struct uncore_data - Encapsulate all uncore data
+ * @stored_uncore_data: Last user changed MSR 620 value, which will be restored
+ * on system resume.
+ * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
+ * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
+ * @control_cpu: Designated CPU for a die to read/write
+ * @valid: Mark the data valid/invalid
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+ * settings for a die/package.
+ */
+struct uncore_data {
+ struct kobject kobj;
+ u64 stored_uncore_data;
+ u32 initial_min_freq_khz;
+ u32 initial_max_freq_khz;
+ int control_cpu;
+ bool valid;
+};
+
+#define to_uncore_data(a) container_of(a, struct uncore_data, kobj)
+
+/* Max instances for uncore data, one for each die */
+static int uncore_max_entries __read_mostly;
+/* Storage for uncore data for all instances */
+static struct uncore_data *uncore_instances;
+/* Root of the all uncore sysfs kobjs */
+struct kobject uncore_root_kobj;
+/* Stores the CPU mask of the target CPUs to use during uncore read/write */
+static cpumask_t uncore_cpu_mask;
+/* CPU online callback register instance */
+static enum cpuhp_state uncore_hp_state __read_mostly;
+/* Mutex to control all mutual exclusions */
+static DEFINE_MUTEX(uncore_lock);
+
+struct uncore_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *kobj,
+ struct attribute *attr, const char *c, ssize_t count);
+};
+
+#define define_one_uncore_ro(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_uncore_rw(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+#define show_uncore_data(member_name) \
+ static ssize_t show_##member_name(struct kobject *kobj, \
+ struct attribute *attr, \
+ char *buf) \
+ { \
+ struct uncore_data *data = to_uncore_data(kobj); \
+ return scnprintf(buf, PAGE_SIZE, "%u\n", \
+ data->member_name); \
+ } \
+ define_one_uncore_ro(member_name)
+
+show_uncore_data(initial_min_freq_khz);
+show_uncore_data(initial_max_freq_khz);
+
+/* Common function to read MSR 0x620 and read min/max */
+static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
+ unsigned int *max)
+{
+ u64 cap;
+ int ret;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ return ret;
+
+ *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+/* Common function to set min/max ratios to be used by sysfs callbacks */
+static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
+ int set_max)
+{
+ int ret;
+ u64 cap;
+
+ mutex_lock(&uncore_lock);
+
+ input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (!input || input > 0x7F) {
+ ret = -EINVAL;
+ goto finish_write;
+ }
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ goto finish_write;
+
+ if (set_max) {
+ cap &= ~0x7F;
+ cap |= input;
+ } else {
+ cap &= ~GENMASK(14, 8);
+ cap |= (input << 8);
+ }
+
+ ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+ if (ret)
+ goto finish_write;
+
+ data->stored_uncore_data = cap;
+
+finish_write:
+ mutex_unlock(&uncore_lock);
+
+ return ret;
+}
+
+static ssize_t store_min_max_freq_khz(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, ssize_t count,
+ int min_max)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+
+ uncore_write_ratio(data, input, min_max);
+
+ return count;
+}
+
+static ssize_t show_min_max_freq_khz(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf, int min_max)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+ unsigned int min, max;
+ int ret;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_read_ratio(data, &min, &max);
+ mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
+ if (min_max)
+ return sprintf(buf, "%u\n", max);
+
+ return sprintf(buf, "%u\n", min);
+}
+
+#define store_uncore_min_max(name, min_max) \
+ static ssize_t store_##name(struct kobject *kobj, \
+ struct attribute *attr, \
+ const char *buf, ssize_t count) \
+ { \
+ \
+ return store_min_max_freq_khz(kobj, attr, buf, count, \
+ min_max); \
+ }
+
+#define show_uncore_min_max(name, min_max) \
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct attribute *attr, char *buf) \
+ { \
+ \
+ return show_min_max_freq_khz(kobj, attr, buf, min_max); \
+ }
+
+store_uncore_min_max(min_freq_khz, 0);
+store_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_min_max(min_freq_khz, 0);
+show_uncore_min_max(max_freq_khz, 1);
+
+define_one_uncore_rw(min_freq_khz);
+define_one_uncore_rw(max_freq_khz);
+
+static struct attribute *uncore_attrs[] = {
+ &initial_min_freq_khz.attr,
+ &initial_max_freq_khz.attr,
+ &max_freq_khz.attr,
+ &min_freq_khz.attr,
+ NULL
+};
+
+static struct kobj_type uncore_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = uncore_attrs,
+};
+
+static struct kobj_type uncore_root_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* Caller provides protection */
+static struct uncore_data *uncore_get_instance(unsigned int cpu)
+{
+ int id = topology_logical_die_id(cpu);
+
+ if (id >= 0 && id < uncore_max_entries)
+ return &uncore_instances[id];
+
+ return NULL;
+}
+
+static void uncore_add_die_entry(int cpu)
+{
+ struct uncore_data *data;
+
+ mutex_lock(&uncore_lock);
+ data = uncore_get_instance(cpu);
+ if (!data) {
+ mutex_unlock(&uncore_lock);
+ return;
+ }
+
+ if (data->valid) {
+ /* control cpu changed */
+ data->control_cpu = cpu;
+ } else {
+ char str[64];
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+ sprintf(str, "package_%02d_die_%02d",
+ topology_physical_package_id(cpu),
+ topology_die_id(cpu));
+
+ uncore_read_ratio(data, &data->initial_min_freq_khz,
+ &data->initial_max_freq_khz);
+
+ ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
+ &uncore_root_kobj, str);
+ if (!ret) {
+ data->control_cpu = cpu;
+ data->valid = true;
+ }
+ }
+ mutex_unlock(&uncore_lock);
+}
+
+/* Last CPU in this die is offline, so remove sysfs entries */
+static void uncore_remove_die_entry(int cpu)
+{
+ struct uncore_data *data;
+
+ mutex_lock(&uncore_lock);
+ data = uncore_get_instance(cpu);
+ if (data) {
+ kobject_put(&data->kobj);
+ data->control_cpu = -1;
+ data->valid = false;
+ }
+ mutex_unlock(&uncore_lock);
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+ int target;
+
+ /* Check if there is an online cpu in the package for uncore MSR */
+ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return 0;
+
+ /* Use this CPU on this die as a control CPU */
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+ uncore_add_die_entry(cpu);
+
+ return 0;
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+ int target;
+
+ /* Check if existing cpu is used for uncore MSRs */
+ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+ return 0;
+
+ /* Find a new cpu to set uncore MSR */
+ target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &uncore_cpu_mask);
+ uncore_add_die_entry(target);
+ } else {
+ uncore_remove_die_entry(cpu);
+ }
+
+ return 0;
+}
+
+static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
+ void *_unused)
+{
+ int cpu;
+
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ for_each_cpu(cpu, &uncore_cpu_mask) {
+ struct uncore_data *data;
+ int ret;
+
+ data = uncore_get_instance(cpu);
+ if (!data || !data->valid || !data->stored_uncore_data)
+ continue;
+
+ ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT,
+ data->stored_uncore_data);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block uncore_pm_nb = {
+ .notifier_call = uncore_pm_notify,
+};
+
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+ ICPU(INTEL_FAM6_BROADWELL_G),
+ ICPU(INTEL_FAM6_BROADWELL_X),
+ ICPU(INTEL_FAM6_BROADWELL_D),
+ ICPU(INTEL_FAM6_SKYLAKE_X),
+ ICPU(INTEL_FAM6_ICELAKE_X),
+ ICPU(INTEL_FAM6_ICELAKE_D),
+ {}
+};
+
+static int __init intel_uncore_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ id = x86_match_cpu(intel_uncore_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ uncore_max_entries = topology_max_packages() *
+ topology_max_die_per_package();
+ uncore_instances = kcalloc(uncore_max_entries,
+ sizeof(*uncore_instances), GFP_KERNEL);
+ if (!uncore_instances)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype,
+ &cpu_subsys.dev_root->kobj,
+ "intel_uncore_frequency");
+ if (ret)
+ goto err_free;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/uncore-freq:online",
+ uncore_event_cpu_online,
+ uncore_event_cpu_offline);
+ if (ret < 0)
+ goto err_rem_kobj;
+
+ uncore_hp_state = ret;
+
+ ret = register_pm_notifier(&uncore_pm_nb);
+ if (ret)
+ goto err_rem_state;
+
+ return 0;
+
+err_rem_state:
+ cpuhp_remove_state(uncore_hp_state);
+err_rem_kobj:
+ kobject_put(&uncore_root_kobj);
+err_free:
+ kfree(uncore_instances);
+
+ return ret;
+}
+module_init(intel_uncore_init)
+
+static void __exit intel_uncore_exit(void)
+{
+ int i;
+
+ unregister_pm_notifier(&uncore_pm_nb);
+ cpuhp_remove_state(uncore_hp_state);
+ for (i = 0; i < uncore_max_entries; ++i) {
+ if (uncore_instances[i].valid)
+ kobject_put(&uncore_instances[i].kobj);
+ }
+ kobject_put(&uncore_root_kobj);
+ kfree(uncore_instances);
+}
+module_exit(intel_uncore_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c
index b0f421fea2a5..805fc0d8515c 100644
--- a/drivers/platform/x86/intel_atomisp2_pm.c
+++ b/drivers/platform/x86/intel_atomisp2_pm.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
- * Trail devices. The sole purpose of this driver is to allow the ISP to
- * be put in D3.
+ * Dummy driver for Intel's Image Signal Processor found on Bay Trail
+ * and Cherry Trail devices. The sole purpose of this driver is to allow
+ * the ISP to be put in D3.
*
* Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
*
@@ -36,8 +36,7 @@
static int isp_set_power(struct pci_dev *dev, bool enable)
{
unsigned long timeout;
- u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
- ISPSSPM0_IUNIT_POWER_OFF;
+ u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF;
/* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
@@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable)
/*
* There should be no IUNIT access while power-down is
- * in progress HW sighting: 4567865
+ * in progress. HW sighting: 4567865.
* Wait up to 50 ms for the IUNIT to shut down.
* And we do the same for power on.
*/
timeout = jiffies + msecs_to_jiffies(50);
- while (1) {
+ do {
u32 tmp;
/* Wait until ISPSSPM0 bit[25:24] shows the right value */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
if (tmp == val)
- break;
+ return 0;
- if (time_after(jiffies, timeout)) {
- dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
- enable ? "on" : "off");
- return -EBUSY;
- }
usleep_range(1000, 2000);
- }
+ } while (time_before(jiffies, timeout));
- return 0;
+ dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+ return -EBUSY;
}
static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/platform/x86/intel_cht_int33fe_typec.c b/drivers/platform/x86/intel_cht_int33fe_typec.c
index 2d097fc2dd46..04138215956b 100644
--- a/drivers/platform/x86/intel_cht_int33fe_typec.c
+++ b/drivers/platform/x86/intel_cht_int33fe_typec.c
@@ -36,30 +36,6 @@ enum {
INT33FE_NODE_MAX,
};
-static const struct software_node nodes[];
-
-static const struct software_node_ref_args pi3usb30532_ref = {
- &nodes[INT33FE_NODE_PI3USB30532]
-};
-
-static const struct software_node_ref_args dp_ref = {
- &nodes[INT33FE_NODE_DISPLAYPORT]
-};
-
-static struct software_node_ref_args mux_ref;
-
-static const struct software_node_reference usb_connector_refs[] = {
- { "orientation-switch", 1, &pi3usb30532_ref},
- { "mode-switch", 1, &pi3usb30532_ref},
- { "displayport", 1, &dp_ref},
- { }
-};
-
-static const struct software_node_reference fusb302_refs[] = {
- { "usb-role-switch", 1, &mux_ref},
- { }
-};
-
/*
* Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
* the max17047 both through the INT33FE ACPI device (it is right there
@@ -95,8 +71,18 @@ static const struct property_entry max17047_props[] = {
{ }
};
+/*
+ * We are not using inline property here because those are constant,
+ * and we need to adjust this one at runtime to point to real
+ * software node.
+ */
+static struct software_node_ref_args fusb302_mux_refs[] = {
+ { .node = NULL },
+};
+
static const struct property_entry fusb302_props[] = {
PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs),
{ }
};
@@ -112,6 +98,8 @@ static const u32 snk_pdo[] = {
PDO_VAR(5000, 12000, 3000),
};
+static const struct software_node nodes[];
+
static const struct property_entry usb_connector_props[] = {
PROPERTY_ENTRY_STRING("data-role", "dual"),
PROPERTY_ENTRY_STRING("power-role", "dual"),
@@ -119,15 +107,21 @@ static const struct property_entry usb_connector_props[] = {
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
+ PROPERTY_ENTRY_REF("orientation-switch",
+ &nodes[INT33FE_NODE_PI3USB30532]),
+ PROPERTY_ENTRY_REF("mode-switch",
+ &nodes[INT33FE_NODE_PI3USB30532]),
+ PROPERTY_ENTRY_REF("displayport",
+ &nodes[INT33FE_NODE_DISPLAYPORT]),
{ }
};
static const struct software_node nodes[] = {
- { "fusb302", NULL, fusb302_props, fusb302_refs },
+ { "fusb302", NULL, fusb302_props },
{ "max17047", NULL, max17047_props },
{ "pi3usb30532" },
{ "displayport" },
- { "connector", &nodes[0], usb_connector_props, usb_connector_refs },
+ { "connector", &nodes[0], usb_connector_props },
{ }
};
@@ -163,9 +157,10 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
{
software_node_unregister_nodes(nodes);
- if (mux_ref.node) {
- fwnode_handle_put(software_node_fwnode(mux_ref.node));
- mux_ref.node = NULL;
+ if (fusb302_mux_refs[0].node) {
+ fwnode_handle_put(
+ software_node_fwnode(fusb302_mux_refs[0].node));
+ fusb302_mux_refs[0].node = NULL;
}
if (data->dp) {
@@ -177,25 +172,31 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
{
+ const struct software_node *mux_ref_node;
int ret;
- ret = software_node_register_nodes(nodes);
- if (ret)
- return ret;
-
- /* The devices that are not created in this driver need extra steps. */
-
/*
* There is no ACPI device node for the USB role mux, so we need to wait
* until the mux driver has created software node for the mux device.
* It means we depend on the mux driver. This function will return
* -EPROBE_DEFER until the mux device is registered.
*/
- mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
- if (!mux_ref.node) {
- ret = -EPROBE_DEFER;
- goto err_remove_nodes;
- }
+ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!mux_ref_node)
+ return -EPROBE_DEFER;
+
+ /*
+ * Update node used in "usb-role-switch" property. Note that we
+ * rely on software_node_register_nodes() to use the original
+ * instance of properties instead of copying them.
+ */
+ fusb302_mux_refs[0].node = mux_ref_node;
+
+ ret = software_node_register_nodes(nodes);
+ if (ret)
+ return ret;
+
+ /* The devices that are not created in this driver need extra steps. */
/*
* The DP connector does have ACPI device node. In this case we can just
diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h
index 512ad234ad0d..35ed9711c7b9 100644
--- a/drivers/platform/x86/intel_ips.h
+++ b/drivers/platform/x86/intel_ips.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2010 Intel Corporation
*/
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index b102f6dd5693..101d7e791a13 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/thermal.h>
#include <linux/types.h>
+#include <linux/units.h>
MODULE_AUTHOR("Thomas Sujith");
MODULE_AUTHOR("Zhang Rui");
@@ -302,8 +303,10 @@ static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr,
int result;
result = sensor_get_auxtrip(attr->handle, idx, &value);
+ if (result)
+ return result;
- return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
+ return sprintf(buf, "%lu", deci_kelvin_to_celsius(value));
}
static ssize_t aux0_show(struct device *dev,
@@ -332,8 +335,8 @@ static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr,
if (value < 0)
return -EINVAL;
- result = sensor_set_auxtrip(attr->handle, idx,
- CELSIUS_TO_DECI_KELVIN(value));
+ result = sensor_set_auxtrip(attr->handle, idx,
+ celsius_to_deci_kelvin(value));
return result ? result : count;
}
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 292bace83f1e..6f436836fe50 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- ddata = (struct mid_pb_ddata *)id->driver_data;
+ ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
+ sizeof(*ddata), GFP_KERNEL);
if (!ddata)
- return -ENODATA;
+ return -ENOMEM;
ddata->dev = &pdev->dev;
ddata->irq = irq;
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 571b4754477c..144faa8bad3d 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = {
{"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
{"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
{"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
- {},
+ {}
};
static const struct pmc_bit_map spt_mphy_map[] = {
@@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = {
{"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
{"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
{"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
- {},
+ {}
};
static const struct pmc_bit_map spt_pfear_map[] = {
@@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = {
{"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
{"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
{"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
- {},
+ {}
+};
+
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+ spt_pfear_map,
+ NULL
};
static const struct pmc_bit_map spt_ltr_show_map[] = {
@@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = {
};
static const struct pmc_reg_map spt_reg_map = {
- .pfear_sts = spt_pfear_map,
+ .pfear_sts = ext_spt_pfear_map,
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
.ltr_show_sts = spt_ltr_show_map,
@@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+ /*
+ * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+ * Tiger Lake and Elkhart Lake.
+ */
{"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
@@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
- /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+ /*
+ * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+ * Tiger Lake and ELkhart Lake.
+ */
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
{"PSF8", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+ cnp_pfear_map,
+ NULL
+};
+static const struct pmc_bit_map icl_pfear_map[] = {
/* Ice Lake generation onwards only */
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
@@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{}
};
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+ cnp_pfear_map,
+ icl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map tgl_pfear_map[] = {
+ /* Tiger Lake and Elkhart Lake generation onwards only */
+ {"PSF9", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"RES_68", BIT(3)},
+ {"RES_69", BIT(4)},
+ {"RES_70", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+ cnp_pfear_map,
+ tgl_pfear_map,
+ NULL
+};
+
static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
{"AUDIO_D3", BIT(0)},
{"OTG_D3", BIT(1)},
@@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
cnp_slps0_dbg0_map,
cnp_slps0_dbg1_map,
cnp_slps0_dbg2_map,
- NULL,
+ NULL
};
static const struct pmc_bit_map cnp_ltr_show_map[] = {
@@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
};
static const struct pmc_reg_map cnp_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_cnp_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
@@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = {
};
static const struct pmc_reg_map icl_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_icl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
@@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = {
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
};
-static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
-{
- return readb(pmcdev->regbase + offset);
-}
+static const struct pmc_reg_map tgl_reg_map = {
+ .pfear_sts = ext_tgl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+};
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
}
-static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
- reg_offset, u32 val)
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+ u32 val)
{
writel(val, pmcdev->regbase + reg_offset);
}
@@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void)
#if IS_ENABLED(CONFIG_DEBUG_FS)
static bool slps0_dbg_latch;
-static void pmc_core_display_map(struct seq_file *s, int index,
- u8 pf_reg, const struct pmc_bit_map *pf_map)
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+ return readb(pmcdev->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+ u8 pf_reg, const struct pmc_bit_map **pf_map)
{
seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
- index, pf_map[index].name,
- pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+ ip, pf_map[idx][index].name,
+ pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
}
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+ const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
- int index, iter;
+ int index, iter, idx, ip = 0;
iter = pmcdev->map->ppfear0_offset;
@@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
- for (index = 0; map[index].name &&
- index < pmcdev->map->ppfear_buckets * 8; index++)
- pmc_core_display_map(s, index, pf_regs[index / 8], map);
+ for (idx = 0; maps[idx]; idx++) {
+ for (index = 0; maps[idx][index].name &&
+ index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
+ pmc_core_display_map(s, index, idx, ip,
+ pf_regs[index / 8], maps);
+ }
return 0;
}
@@ -561,21 +623,22 @@ out_unlock:
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
-static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
-*userbuf, size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
{
struct pmc_dev *pmcdev = &pmc;
const struct pmc_reg_map *map = pmcdev->map;
u32 val, buf_size, fd;
- int err = 0;
+ int err;
buf_size = count < 64 ? count : 64;
- mutex_lock(&pmcdev->lock);
- if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
- err = -EFAULT;
- goto out_unlock;
- }
+ err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&pmcdev->lock);
if (val > map->ltr_ignore_max) {
err = -EINVAL;
@@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
&pmc_core_dev_state);
- debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
- &pmc_core_ppfear_fops);
+ if (pmcdev->map->pfear_sts)
+ debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+ pmcdev, &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
&pmc_core_ltr_ignore_ops);
@@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
+ INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
+ INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
+ INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
- { 0, },
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
};
/*
* This quirk can be used on those platforms where
- * the platform BIOS enforces 24Mhx Crystal to shutdown
+ * the platform BIOS enforces 24Mhz crystal to shutdown
* before PMC can assert SLP_S0#.
*/
static int quirk_xtal_ignore(const struct dmi_system_id *id)
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index fdee5772e532..f1a0792b3f91 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel Core SoC Power Management Controller Header File
*
@@ -186,6 +186,8 @@ enum ppfear_regs {
#define ICL_NUM_IP_IGN_ALLOWED 20
#define ICL_PMC_LTR_WIGIG 0x1BFC
+#define TGL_NUM_IP_IGN_ALLOWED 22
+
struct pmc_bit_map {
const char *name;
u32 bit_mask;
@@ -213,7 +215,7 @@ struct pmc_bit_map {
* captures them to have a common implementation.
*/
struct pmc_reg_map {
- const struct pmc_bit_map *pfear_sts;
+ const struct pmc_bit_map **pfear_sts;
const struct pmc_bit_map *mphy_sts;
const struct pmc_bit_map *pll_sts;
const struct pmc_bit_map **slps0_dbg_maps;
diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c
index 6fe829f30997..e1266f5c6359 100644
--- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
+++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c
@@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
+ INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
+ INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 5c1da2bb1435..2433bf73f1ed 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -12,23 +12,13 @@
*/
#include <linux/acpi.h>
-#include <linux/atomic.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_qos.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/suspend.h>
#include <asm/intel_pmc_ipc.h>
@@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset)
writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
}
-static inline u8 __maybe_unused ipc_data_readb(u32 offset)
-{
- return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
-}
-
static inline u32 ipc_data_readl(u32 offset)
{
return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
@@ -211,35 +196,6 @@ static inline int is_gcr_valid(u32 offset)
}
/**
- * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: data pointer for storing the register output
- *
- * Reads the 32-bit PMC GCR register at given offset.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- *data = readl(ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_read);
-
-/**
* intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register
* @offset: offset of GCR register from GCR address base
* @data: data pointer for storing the register output
@@ -269,36 +225,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data)
EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64);
/**
- * intel_pmc_gcr_write() - Write PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: register update value
- *
- * Writes the PMC GCR register of given offset with given
- * value.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_write(u32 offset, u32 data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- writel(data, ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
-
-/**
* intel_pmc_gcr_update() - Update PMC GCR register bits
* @offset: offset of GCR register from GCR address base
* @mask: bit mask for update operation
@@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
*
* Return: negative value on error or 0 on success.
*/
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
+static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
{
u32 new_val;
int ret = 0;
@@ -339,7 +265,6 @@ gcr_ipc_unlock:
spin_unlock(&ipcdev.gcr_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
static int update_no_reboot_bit(void *priv, bool set)
{
@@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void)
*
* Return: an IPC error code or 0 on success.
*/
-int intel_pmc_ipc_simple_command(int cmd, int sub)
+static int intel_pmc_ipc_simple_command(int cmd, int sub)
{
int ret;
@@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
/**
* intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
@@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
*
* Return: an IPC error code or 0 on success.
*/
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
- u32 outlen, u32 dptr, u32 sptr)
+static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+ u32 outlen, u32 dptr, u32 sptr)
{
u32 wbuf[4] = { 0 };
int ret;
@@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
/**
* intel_pmc_ipc_command() - IPC command with input/output data
@@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
}
return (ssize_t)count;
}
+static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store);
static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
struct device_attribute *attr,
@@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
int subcmd;
int ret;
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
if (val)
subcmd = 1;
@@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
}
return (ssize_t)count;
}
-
-static DEVICE_ATTR(simplecmd, S_IWUSR,
- NULL, intel_pmc_ipc_simple_cmd_store);
-static DEVICE_ATTR(northpeak, S_IWUSR,
- NULL, intel_pmc_ipc_northpeak_store);
+static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store);
static struct attribute *intel_ipc_attrs[] = {
&dev_attr_northpeak.attr,
@@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = {
.attrs = intel_ipc_attrs,
};
+static const struct attribute_group *intel_ipc_groups[] = {
+ &intel_ipc_group,
+ NULL
+};
+
static struct resource punit_res_array[] = {
/* Punit BIOS */
{
@@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev)
goto err_irq;
}
- ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group %d\n",
- ret);
- goto err_sys;
- }
-
ipcdev.has_gcr_regs = true;
return 0;
-err_sys:
- devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
+
err_irq:
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
@@ -980,7 +898,6 @@ err_irq:
static int ipc_plat_remove(struct platform_device *pdev)
{
- sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
@@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = {
.driver = {
.name = "pmc-ipc-plat",
.acpi_match_table = ACPI_PTR(ipc_acpi_ids),
+ .dev_groups = intel_ipc_groups,
},
};
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index cdab916fbf92..3d7da5266136 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -26,11 +26,7 @@
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
-#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
+#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
/* Command id associated with message IPCMSG_PCNTRL */
#define IPC_CMD_PCNTRL_W 0 /* Register write */
@@ -58,56 +54,29 @@
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
#define IPC_IOC 0x100 /* IPC command register IOC bit */
-#define PCI_DEVICE_ID_LINCROFT 0x082a
-#define PCI_DEVICE_ID_PENWELL 0x080e
-#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
-#define PCI_DEVICE_ID_TANGIER 0x11a0
-
-/* intel scu ipc driver data */
-struct intel_scu_ipc_pdata_t {
- u32 i2c_base;
- u32 i2c_len;
- u8 irq_mode;
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
- .i2c_base = 0xff12b000,
- .i2c_len = 0x10,
- .irq_mode = 0,
-};
-
-/* Penwell and Cloverview */
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
- .i2c_base = 0xff12b000,
- .i2c_len = 0x10,
- .irq_mode = 1,
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
- .i2c_base = 0xff00d000,
- .i2c_len = 0x10,
- .irq_mode = 0,
-};
-
struct intel_scu_ipc_dev {
struct device *dev;
void __iomem *ipc_base;
- void __iomem *i2c_base;
struct completion cmd_complete;
u8 irq_mode;
};
static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
+#define IPC_STATUS 0x04
+#define IPC_STATUS_IRQ BIT(2)
+#define IPC_STATUS_ERR BIT(1)
+#define IPC_STATUS_BUSY BIT(0)
+
/*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
+ * IPC Write/Read Buffers:
+ * 16 byte buffer for sending and receiving data to and from SCU.
*/
+#define IPC_WRITE_BUFFER 0x80
#define IPC_READ_BUFFER 0x90
-#define IPC_I2C_CNTRL_ADDR 0
-#define I2C_DATA_ADDR 0x04
+/* Timeout in jiffies */
+#define IPC_TIMEOUT (3 * HZ)
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
@@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
*/
static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
{
- if (scu->irq_mode) {
- reinit_completion(&scu->cmd_complete);
- writel(cmd | IPC_IOC, scu->ipc_base);
- }
- writel(cmd, scu->ipc_base);
+ reinit_completion(&scu->cmd_complete);
+ writel(cmd | IPC_IOC, scu->ipc_base);
}
/*
@@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
*/
static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
{
- writel(data, scu->ipc_base + 0x80 + offset);
+ writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
}
/*
@@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32
*/
static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
{
- return __raw_readl(scu->ipc_base + 0x04);
+ return __raw_readl(scu->ipc_base + IPC_STATUS);
}
/* Read ipc byte data */
@@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- u32 status = ipc_read_status(scu);
- u32 loop_count = 100000;
+ unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
- /* break if scu doesn't reset busy bit after huge retry */
- while ((status & BIT(0)) && --loop_count) {
- udelay(1); /* scu processing time is in few u secods */
- status = ipc_read_status(scu);
- }
+ do {
+ u32 status;
- if (status & BIT(0)) {
- dev_err(scu->dev, "IPC timed out");
- return -ETIMEDOUT;
- }
+ status = ipc_read_status(scu);
+ if (!(status & IPC_STATUS_BUSY))
+ return (status & IPC_STATUS_ERR) ? -EIO : 0;
- if (status & BIT(1))
- return -EIO;
+ usleep_range(50, 100);
+ } while (time_before(jiffies, end));
- return 0;
+ dev_err(scu->dev, "IPC timed out");
+ return -ETIMEDOUT;
}
/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
@@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
- if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) {
+ if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) {
dev_err(scu->dev, "IPC timed out\n");
return -ETIMEDOUT;
}
status = ipc_read_status(scu);
- if (status & BIT(1))
+ if (status & IPC_STATUS_ERR)
return -EIO;
return 0;
@@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
}
/**
- * intel_scu_ipc_ioread8 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read byte
+ * intel_scu_ipc_ioread8 - read a word via the SCU
+ * @addr: Register on SCU
+ * @data: Return pointer for read byte
*
- * Read a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * Read a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_ioread8(u16 addr, u8 *data)
{
@@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data)
EXPORT_SYMBOL(intel_scu_ipc_ioread8);
/**
- * intel_scu_ipc_ioread16 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read word
- *
- * Read a register pair. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
-{
- u16 x[2] = {addr, addr + 1};
- return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
-
-/**
- * intel_scu_ipc_ioread32 - read a dword via the SCU
- * @addr: register on SCU
- * @data: return pointer for read dword
- *
- * Read four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- * intel_scu_ipc_iowrite8 - write a byte via the SCU
- * @addr: register on SCU
- * @data: byte to write
+ * intel_scu_ipc_iowrite8 - write a byte via the SCU
+ * @addr: Register on SCU
+ * @data: Byte to write
*
- * Write a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * Write a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_iowrite8(u16 addr, u8 data)
{
@@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data)
EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
/**
- * intel_scu_ipc_iowrite16 - write a word via the SCU
- * @addr: register on SCU
- * @data: word to write
- *
- * Write two registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_readvv - read a set of registers
+ * @addr: Register list
+ * @data: Bytes to return
+ * @len: Length of array
*
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
-{
- u16 x[2] = {addr, addr + 1};
- return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
-
-/**
- * intel_scu_ipc_iowrite32 - write a dword via the SCU
- * @addr: register on SCU
- * @data: dword to write
+ * Read registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
*
- * Write four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * The largest array length permitted by the hardware is 5 items.
*
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
-
-/**
- * intel_scu_ipc_readvv - read a set of registers
- * @addr: register list
- * @data: bytes to return
- * @len: length of array
- *
- * Read registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * The largest array length permitted by the hardware is 5 items.
- *
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
{
@@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
EXPORT_SYMBOL(intel_scu_ipc_readv);
/**
- * intel_scu_ipc_writev - write a set of registers
- * @addr: register list
- * @data: bytes to write
- * @len: length of array
- *
- * Write registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_writev - write a set of registers
+ * @addr: Register list
+ * @data: Bytes to write
+ * @len: Length of array
*
- * The largest array length permitted by the hardware is 5 items.
+ * Write registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * The largest array length permitted by the hardware is 5 items.
*
+ * This function may sleep.
*/
int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
{
@@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
EXPORT_SYMBOL(intel_scu_ipc_writev);
/**
- * intel_scu_ipc_update_register - r/m/w a register
- * @addr: register address
- * @bits: bits to update
- * @mask: mask of bits to update
- *
- * Read-modify-write power control unit register. The first data argument
- * must be register value and second is mask value
- * mask is a bitmap that indicates which bits to update.
- * 0 = masked. Don't modify this bit, 1 = modify this bit.
- * returns 0 on success or an error code.
- *
- * This function may sleep. Locking between SCU accesses is handled
- * for the caller.
+ * intel_scu_ipc_update_register - r/m/w a register
+ * @addr: Register address
+ * @bits: Bits to update
+ * @mask: Mask of bits to update
+ *
+ * Read-modify-write power control unit register. The first data argument
+ * must be register value and second is mask value mask is a bitmap that
+ * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
+ * modify this bit. returns %0 on success or an error code.
+ *
+ * This function may sleep. Locking between SCU accesses is handled
+ * for the caller.
*/
int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
{
@@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
EXPORT_SYMBOL(intel_scu_ipc_update_register);
/**
- * intel_scu_ipc_simple_command - send a simple command
- * @cmd: command
- * @sub: sub type
+ * intel_scu_ipc_simple_command - send a simple command
+ * @cmd: Command
+ * @sub: Sub type
*
- * Issue a simple command to the SCU. Do not use this interface if
- * you must then access data as any data values may be overwritten
- * by another SCU access by the time this function returns.
+ * Issue a simple command to the SCU. Do not use this interface if you must
+ * then access data as any data values may be overwritten by another SCU
+ * access by the time this function returns.
*
- * This function may sleep. Locking for SCU accesses is handled for
- * the caller.
+ * This function may sleep. Locking for SCU accesses is handled for the
+ * caller.
*/
int intel_scu_ipc_simple_command(int cmd, int sub)
{
@@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
EXPORT_SYMBOL(intel_scu_ipc_simple_command);
/**
- * intel_scu_ipc_command - command with data
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in dwords
- * @out: output data
- * @outlein: output length in dwords
- *
- * Issue a command to the SCU which involves data transfers. Do the
- * data copies under the lock but leave it for the caller to interpret
+ * intel_scu_ipc_command - command with data
+ * @cmd: Command
+ * @sub: Sub type
+ * @in: Input data
+ * @inlen: Input length in dwords
+ * @out: Output data
+ * @outlen: Output length in dwords
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret.
*/
int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
u32 *out, int outlen)
@@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
}
EXPORT_SYMBOL(intel_scu_ipc_command);
-#define IPC_SPTR 0x08
-#define IPC_DPTR 0x0C
-
-/**
- * intel_scu_ipc_raw_command() - IPC command with data and pointers
- * @cmd: IPC command code.
- * @sub: IPC command sub type.
- * @in: input data of this IPC command.
- * @inlen: input data length in dwords.
- * @out: output data of this IPC command.
- * @outlen: output data length in dwords.
- * @sptr: data writing to SPTR register.
- * @dptr: data writing to DPTR register.
- *
- * Send an IPC command to SCU with input/output data and source/dest pointers.
- *
- * Return: an IPC error code or 0 on success.
- */
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
- u32 *out, int outlen, u32 dptr, u32 sptr)
-{
- struct intel_scu_ipc_dev *scu = &ipcdev;
- int inbuflen = DIV_ROUND_UP(inlen, 4);
- u32 inbuf[4];
- int i, err;
-
- /* Up to 16 bytes */
- if (inbuflen > 4)
- return -EINVAL;
-
- mutex_lock(&ipclock);
- if (scu->dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
-
- writel(dptr, scu->ipc_base + IPC_DPTR);
- writel(sptr, scu->ipc_base + IPC_SPTR);
-
- /*
- * SRAM controller doesn't support 8-bit writes, it only
- * supports 32-bit writes, so we have to copy input data into
- * the temporary buffer, and SCU FW will use the inlen to
- * determine the actual input data length in the temporary
- * buffer.
- */
- memcpy(inbuf, in, inlen);
-
- for (i = 0; i < inbuflen; i++)
- ipc_data_writel(scu, inbuf[i], 4 * i);
-
- ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
- err = intel_scu_ipc_check_status(scu);
- if (!err) {
- for (i = 0; i < outlen; i++)
- *out++ = ipc_data_readl(scu, 4 * i);
- }
-
- mutex_unlock(&ipclock);
- return err;
-}
-EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command);
-
-/* I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ 2 /* I2C Read command */
-
-/**
- * intel_scu_ipc_i2c_cntrl - I2C read/write operations
- * @addr: I2C address + command bits
- * @data: data to read/write
- *
- * Perform an an I2C read/write operation via the SCU. All locking is
- * handled for the caller. This function may sleep.
- *
- * Returns an error code or 0 on success.
- *
- * This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
-{
- struct intel_scu_ipc_dev *scu = &ipcdev;
- u32 cmd = 0;
-
- mutex_lock(&ipclock);
- if (scu->dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
- cmd = (addr >> 24) & 0xFF;
- if (cmd == IPC_I2C_READ) {
- writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
- /* Write not getting updated without delay */
- usleep_range(1000, 2000);
- *data = readl(scu->i2c_base + I2C_DATA_ADDR);
- } else if (cmd == IPC_I2C_WRITE) {
- writel(*data, scu->i2c_base + I2C_DATA_ADDR);
- usleep_range(1000, 2000);
- writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
- } else {
- dev_err(scu->dev,
- "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
- mutex_unlock(&ipclock);
- return -EIO;
- }
- mutex_unlock(&ipclock);
- return 0;
-}
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
-
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
* When ioc bit is set to 1, caller api must wait for interrupt handler called
@@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
static irqreturn_t ioc(int irq, void *dev_id)
{
struct intel_scu_ipc_dev *scu = dev_id;
+ int status = ipc_read_status(scu);
- if (scu->irq_mode)
- complete(&scu->cmd_complete);
+ writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
+ complete(&scu->cmd_complete);
return IRQ_HANDLED;
}
@@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err;
struct intel_scu_ipc_dev *scu = &ipcdev;
- struct intel_scu_ipc_pdata_t *pdata;
if (scu->dev) /* We support only one SCU */
return -EBUSY;
- pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
- if (!pdata)
- return -ENODEV;
-
- scu->irq_mode = pdata->irq_mode;
-
err = pcim_enable_device(pdev);
if (err)
return err;
@@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scu->ipc_base = pcim_iomap_table(pdev)[0];
- scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
- if (!scu->i2c_base)
- return -ENOMEM;
-
err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
scu);
if (err)
@@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
}
-#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata}
-
static const struct pci_device_id pci_ids[] = {
- SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata),
+ { PCI_VDEVICE(INTEL, 0x080e) },
+ { PCI_VDEVICE(INTEL, 0x08ea) },
+ { PCI_VDEVICE(INTEL, 0x11a0) },
{}
};
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
index 3de5a3c66529..0c2aa22c7a12 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
@@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
{0x7F, 0x00, 0x0B},
{0x7F, 0x10, 0x12},
{0x7F, 0x20, 0x23},
+ {0x94, 0x03, 0x03},
+ {0x95, 0x03, 0x03},
};
static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
@@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
{0xD0, 0x03, 0x08},
{0x7F, 0x02, 0x00},
{0x7F, 0x08, 0x00},
+ {0x95, 0x03, 0x03},
};
struct isst_cmd {
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index e84d3e983e0c..8e3fb55ac1ae 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file,
u32 verbosity;
int err;
- if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
- return -EFAULT;
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity);
if (err) {
pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err);
- count = err;
+ return err;
}
return count;
@@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file,
u32 verbosity;
int err;
- if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
- return -EFAULT;
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity);
if (err) {
pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err);
- count = err;
+ return err;
}
return count;
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index df8565bad595..c4c742bb23cf 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = {
static int telemetry_pltdrv_probe(struct platform_device *pdev)
{
- struct resource *res0 = NULL, *res1 = NULL;
const struct x86_cpu_id *id;
- int size, ret = -ENOMEM;
+ void __iomem *mem;
+ int ret;
id = x86_match_cpu(telemetry_cpu_ids);
if (!id)
@@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
telm_conf = (struct telemetry_plt_config *)id->driver_data;
- res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res0) {
- ret = -EINVAL;
- goto out;
- }
- size = resource_size(res0);
- if (!devm_request_mem_region(&pdev->dev, res0->start, size,
- pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- telm_conf->pss_config.ssram_base_addr = res0->start;
- telm_conf->pss_config.ssram_size = size;
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
- res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res1) {
- ret = -EINVAL;
- goto out;
- }
- size = resource_size(res1);
- if (!devm_request_mem_region(&pdev->dev, res1->start, size,
- pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
+ telm_conf->pss_config.regmap = mem;
- telm_conf->ioss_config.ssram_base_addr = res1->start;
- telm_conf->ioss_config.ssram_size = size;
+ mem = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
- telm_conf->pss_config.regmap = ioremap_nocache(
- telm_conf->pss_config.ssram_base_addr,
- telm_conf->pss_config.ssram_size);
- if (!telm_conf->pss_config.regmap) {
- ret = -ENOMEM;
- goto out;
- }
-
- telm_conf->ioss_config.regmap = ioremap_nocache(
- telm_conf->ioss_config.ssram_base_addr,
- telm_conf->ioss_config.ssram_size);
- if (!telm_conf->ioss_config.regmap) {
- ret = -ENOMEM;
- goto out;
- }
+ telm_conf->ioss_config.regmap = mem;
mutex_init(&telm_conf->telem_lock);
mutex_init(&telm_conf->telem_trace_lock);
@@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
return 0;
out:
- if (res0)
- release_mem_region(res0->start, resource_size(res0));
- if (res1)
- release_mem_region(res1->start, resource_size(res1));
- if (telm_conf->pss_config.regmap)
- iounmap(telm_conf->pss_config.regmap);
- if (telm_conf->ioss_config.regmap)
- iounmap(telm_conf->ioss_config.regmap);
dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
return ret;
@@ -1204,9 +1163,6 @@ out:
static int telemetry_pltdrv_remove(struct platform_device *pdev)
{
telemetry_clear_pltdata();
- iounmap(telm_conf->pss_config.regmap);
- iounmap(telm_conf->ioss_config.regmap);
-
return 0;
}
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 8fe51e43f1bc..c27548fd386a 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -35,6 +35,8 @@
#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
+#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b
+#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e
#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
@@ -46,6 +48,8 @@
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
@@ -68,6 +72,7 @@
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
+#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
@@ -85,9 +90,13 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
+#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
+#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
+#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
@@ -96,6 +105,9 @@
#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
#define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04
@@ -112,17 +124,29 @@
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0)
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
+#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
+#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
+
+/* Masks for aggregation for comex carriers */
+#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+ MLXPLAT_CPLD_AGGR_MASK_CARRIER)
+#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1
/* Default I2C parent bus number */
#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
/* Maximum number of possible physical buses equipped on system */
#define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16
+#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24
/* Number of channels in group */
#define MLXPLAT_CPLD_GRP_CHNL_NUM 8
@@ -130,14 +154,16 @@
/* Start channel numbers */
#define MLXPLAT_CPLD_CH1 2
#define MLXPLAT_CPLD_CH2 10
+#define MLXPLAT_CPLD_CH3 18
/* Number of LPC attached MUX platform devices */
-#define MLXPLAT_CPLD_LPC_MUX_DEVS 2
+#define MLXPLAT_CPLD_LPC_MUX_DEVS 3
/* Hotplug devices adapter numbers */
#define MLXPLAT_CPLD_NR_NONE -1
#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10
#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4
+#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2 3
#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11
#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
@@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = {
IORESOURCE_IO),
};
+/* Platform i2c next generation systems data */
+static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
+ {
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .mask = MLXPLAT_CPLD_I2C_CAP_MASK,
+ .bit = MLXPLAT_CPLD_I2C_CAP_BIT,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = {
+ {
+ .data = mlxplat_mlxcpld_i2c_ng_items_data,
+ },
+};
+
/* Platform next generation systems i2c data */
static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
+ .items = mlxplat_mlxcpld_i2c_ng_items,
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
@@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = {
static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
/* Platform mux data */
-static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
+static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
@@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
};
+/* Platform mux configuration variables */
+static int mlxplat_max_adap_num;
+static int mlxplat_mux_num;
+static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
+
+/* Platform extended mux data */
+static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH3,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+
+};
+
/* Platform hotplug devices */
static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
{
@@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
},
};
+/* Platform hotplug comex carrier system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
/* Platform hotplug default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
{
@@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
},
};
+static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
+ {
+ .data = mlxplat_mlxcpld_comex_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.items = mlxplat_mlxcpld_default_items,
@@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
+ .items = mlxplat_mlxcpld_comex_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK,
+};
+
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
{
.label = "pwr1",
@@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug extended system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+ },
+ {
+ .label = "pwr4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_ext_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
+ .items = mlxplat_mlxcpld_ext_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
};
+/* Platform led for Comex based 100GbE systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
+ .data = mlxplat_mlxcpld_comex_100G_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
+};
+
/* Platform register access default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
{
@@ -1157,6 +1482,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
.mode = 0200,
},
{
+ .label = "select_iio",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
@@ -1245,6 +1576,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
.label = "reset_comex_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
@@ -1263,6 +1606,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
.label = "reset_comex_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
@@ -1275,6 +1624,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.bit = GENMASK(7, 0),
.mode = 0444,
},
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
};
static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
@@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
@@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
@@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
@@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
@@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = {
{ MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
};
+static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = {
+ { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET,
+ MLXPLAT_CPLD_LOW_AGGRCX_MASK },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+};
+
struct mlxplat_mlxcpld_regmap_context {
void __iomem *base;
};
@@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = {
.reg_write = mlxplat_mlxcpld_reg_write,
};
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_comex_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_ng400,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"),
};
@@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_default_channels[i];
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_default_channels[i]);
@@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
return 1;
-};
+}
+
+static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data);
+ mlxplat_mux_data = mlxplat_extended_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_comex_data;
+ mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_led = &mlxplat_comex_100G_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_ext_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
@@ -1954,6 +2470,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_comex_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_ng400_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
/* Scan adapters from expected id to verify it is free. */
*nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i <
- MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) {
+ mlxplat_max_adap_num; i++) {
search_adap = i2c_get_adapter(i);
if (search_adap) {
i2c_put_adapter(search_adap);
@@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
}
/* Return with error if free id for adapter is not found. */
- if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM)
+ if (i == mlxplat_max_adap_num)
return -ENODEV;
/* Shift adapter ids, since expected parent adapter is not free. */
*nr = i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ for (i = 0; i < mlxplat_mux_num; i++) {
shift = *nr - mlxplat_mux_data[i].parent;
mlxplat_mux_data[i].parent = *nr;
mlxplat_mux_data[i].base_nr += shift;
@@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void)
if (nr < 0)
goto fail_alloc;
- nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr;
+ nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
if (mlxplat_i2c)
mlxplat_i2c->regmap = priv->regmap;
priv->pdev_i2c = platform_device_register_resndata(
@@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void)
goto fail_alloc;
}
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ for (i = 0; i < mlxplat_mux_num; i++) {
priv->pdev_mux[i] = platform_device_register_resndata(
&priv->pdev_i2c->dev,
"i2c-mux-reg", i, NULL,
@@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void)
platform_device_unregister(priv->pdev_led);
platform_device_unregister(priv->pdev_hotplug);
- for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
+ for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 52ef1419b671..3e3c66dfec2e 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -489,7 +489,7 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
pmc->base_addr &= PMC_BASE_ADDR_MASK;
- pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
+ pmc->regmap = ioremap(pmc->base_addr, PMC_MMIO_REG_LEN);
if (!pmc->regmap) {
dev_err(&pdev->dev, "error: ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 9b6a93ff41ff..23e40aa2176e 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1394,7 +1394,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
int ret = 0;
int i;
- samsung->f0000_segment = ioremap_nocache(0xf0000, 0xffff);
+ samsung->f0000_segment = ioremap(0xf0000, 0xffff);
if (!samsung->f0000_segment) {
if (debug || force)
pr_err("Can't map the segment at 0xf0000\n");
@@ -1434,7 +1434,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
if (debug)
samsung_sabi_infos(samsung, loca, ifaceP);
- samsung->sabi_iface = ioremap_nocache(ifaceP, 16);
+ samsung->sabi_iface = ioremap(ifaceP, 16);
if (!samsung->sabi_iface) {
pr_err("Can't remap %x\n", ifaceP);
ret = -EINVAL;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 72205771d03d..93177e6e5ecd 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-digma_citi_e200.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-gp-electronic-t701.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"),
{ }
};
@@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3676-onda-v80-plus-v3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-onda-v820w-32g.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-onda-v891w-v1.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3676-onda-v891w-v3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-pipo-w2s.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"),
{ }
};
@@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = {
.properties = pipo_w2s_props,
};
+static const struct property_entry pipo_w11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pipo_w11_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pipo_w11_props,
+};
+
static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-schneider-sct101ctm.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-teclast_x98plus2.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c11.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = {
static const struct property_entry trekstor_primebook_c13_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c13.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = {
static const struct property_entry trekstor_primetab_t13b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primetab-t13b.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
@@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-surftab-wintron70-st70416-6.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -910,6 +909,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Pipo W11 */
+ .driver_data = (void *)&pipo_w11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ /* Above matches are too generic, add bios-ver match */
+ DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"),
+ },
+ },
+ {
/* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
@@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
- DMI_MATCH(DMI_PRODUCT_NAME,
- "SurfTab wintron 7.0 ST70416-6"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"),
/* Exact match, different versions need different fw */
DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
},
@@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
}
static int ts_dmi_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
+ unsigned long action, void *data)
{
struct device *dev = data;
struct i2c_client *client;
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 179b737280e1..c43d8ad02529 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -747,34 +747,12 @@ __skip:
}
/*
- * Compute ISA PnP checksum for first eight bytes.
- */
-static unsigned char __init isapnp_checksum(unsigned char *data)
-{
- int i, j;
- unsigned char checksum = 0x6a, bit, b;
-
- for (i = 0; i < 8; i++) {
- b = data[i];
- for (j = 0; j < 8; j++) {
- bit = 0;
- if (b & (1 << j))
- bit = 1;
- checksum =
- ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7)
- | (checksum >> 1);
- }
- }
- return checksum;
-}
-
-/*
* Build device list for all present ISA PnP devices.
*/
static int __init isapnp_build_device_list(void)
{
int csn;
- unsigned char header[9], checksum;
+ unsigned char header[9];
struct pnp_card *card;
u32 eisa_id;
char id[8];
@@ -784,7 +762,6 @@ static int __init isapnp_build_device_list(void)
for (csn = 1; csn <= isapnp_csn_count; csn++) {
isapnp_wake(csn);
isapnp_peek(header, 9);
- checksum = isapnp_checksum(header);
eisa_id = header[0] | header[1] << 8 |
header[2] << 16 | header[3] << 24;
pnp_eisa_id_to_string(eisa_id, id);
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index 089b6244b716..b8fe166cd0d9 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -12,6 +12,22 @@ menuconfig POWER_AVS
Say Y here to enable Adaptive Voltage Scaling class support.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ depends on POWER_AVS
+ select PM_OPP
+ select REGMAP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like QCS404.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support"
depends on POWER_AVS && ARCH_ROCKCHIP && OF
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index a1b8cd453f19..9007d05853e2 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
+obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c
new file mode 100644
index 000000000000..9192fb747653
--- /dev/null
+++ b/drivers/power/avs/qcom-cpr.c
@@ -0,0 +1,1793 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/nvmem-consumer.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define FUSE_REVISION_UNKNOWN (-1)
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr_fuse {
+ char *ring_osc;
+ char *init_voltage;
+ char *quotient;
+ char *quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_volt_scale;
+ int max_quot_scale;
+ /* fuse quot */
+ int quot_offset;
+ int quot_scale;
+ int quot_adjust;
+ /* fuse quot_offset */
+ int quot_offset_scale;
+ int quot_offset_adjust;
+};
+
+struct cpr_fuses {
+ int init_voltage_step;
+ int init_voltage_width;
+ struct fuse_corner_data *fuse_corner_data;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ int min_diff_quot;
+ int *step_quot;
+
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+
+ struct cpr_fuses cpr_fuses;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *config;
+ struct reg_sequence *settings;
+ int num_regs_per_fuse;
+};
+
+struct cpr_acc_desc {
+ const struct cpr_desc *cpr_desc;
+ const struct acc_desc *acc_desc;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_corners;
+ unsigned int ref_clk_khz;
+
+ struct generic_pm_domain pd;
+ struct device *dev;
+ struct device *attached_cpu_dev;
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct clk *cpu_clk;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ u32 gcnt;
+ unsigned long flags;
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct cpr_fuse *cpr_fuses;
+
+ struct dentry *debugfs;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ return !drv->loop_disabled;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ const struct cpr_desc *desc = drv->desc;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
+ ctl, irq);
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f == end)
+ return;
+
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
+ int new_uV, enum voltage_change_dir dir)
+{
+ int ret;
+ struct fuse_corner *fuse_corner = corner->fuse_corner;
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
+ if (ret) {
+ dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
+ new_uV);
+ return ret;
+ }
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
+{
+ return drv->corner ? drv->corner - drv->corners + 1 : 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV, ret;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ if (dir != UP && dir != DOWN)
+ return 0;
+
+ step_uV = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_uV)
+ return -EINVAL;
+
+ corner = drv->corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->up_threshold,
+ desc->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_up_limit)
+ error_steps = desc->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ new_uV = min(new_uV, corner->max_uV);
+
+ dev_dbg(drv->dev,
+ "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ } else if (dir == DOWN) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->down_threshold,
+ desc->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_down_limit)
+ error_steps = desc->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ new_uV = max(new_uV, corner->min_uV);
+
+ dev_dbg(drv->dev,
+ "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ }
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret) {
+ cpr_irq_clr_nack(drv);
+ return ret;
+ }
+ drv->corner->last_uV = new_uV;
+
+ if (dir == UP) {
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else if (dir == DOWN) {
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = desc->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ }
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ return 0;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ const struct cpr_desc *desc = drv->desc;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 val;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ dev_dbg(drv->dev, "CPR is disabled\n");
+ ret = IRQ_NONE;
+ } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
+ dev_dbg(drv->dev, "CPR measurement is not ready\n");
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ dev_err_ratelimited(drv->dev,
+ "Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ ret = IRQ_NONE;
+ } else {
+ /*
+ * Following sequence of handling is as per each IRQ's
+ * priority
+ */
+ if (val & CPR_INT_UP) {
+ cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
+ } else {
+ dev_dbg(drv->dev,
+ "IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ int ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ ret = regulator_disable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW ceiling, floor and vlevel */
+ val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /*
+ * Clear the target quotient value and gate count of all
+ * ring oscillators
+ */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
+ desc->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+static int cpr_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
+ __func__, state, cpr_get_cur_perf_state(drv));
+
+ /*
+ * Determine new corner we're going to.
+ * Remove one since lowest performance state is 1.
+ */
+ corner = drv->corners + state - 1;
+ end = &drv->corners[drv->num_corners - 1];
+ if (corner > end || corner < drv->corners) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Determine direction */
+ if (drv->corner > corner)
+ dir = DOWN;
+ else if (drv->corner < corner)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ if (cpr_is_allowed(drv))
+ new_uV = corner->last_uV;
+ else
+ new_uV = corner->uV;
+
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv)) {
+ cpr_irq_clr(drv);
+ if (drv->corner != corner)
+ cpr_corner_restore(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data)
+{
+ struct nvmem_cell *cell;
+ ssize_t len;
+ char *ret;
+ int i;
+
+ *data = 0;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) != -EPROBE_DEFER)
+ dev_err(dev, "undefined cell %s\n", cname);
+ return PTR_ERR(cell);
+ }
+
+ ret = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(ret)) {
+ dev_err(dev, "can't read cell %s\n", cname);
+ return PTR_ERR(ret);
+ }
+
+ for (i = 0; i < len; i++)
+ *data |= ret[i] << (8 * i);
+
+ kfree(ret);
+ dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len);
+
+ return 0;
+}
+
+static int
+cpr_populate_ring_osc_idx(struct cpr_drv *drv)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ u32 data;
+ int ret;
+
+ for (; fuse < end; fuse++, fuses++) {
+ ret = cpr_read_efuse(drv->dev, fuses->ring_osc,
+ &data);
+ if (ret)
+ return ret;
+ fuse->ring_osc_idx = data;
+ }
+
+ return 0;
+}
+
+static int cpr_read_fuse_uV(const struct cpr_desc *desc,
+ const struct fuse_corner_data *fdata,
+ const char *init_v_efuse,
+ int step_volt,
+ struct cpr_drv *drv)
+{
+ int step_size_uV, steps, uV;
+ u32 bits = 0;
+ int ret;
+
+ ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits);
+ if (ret)
+ return ret;
+
+ steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
+ /* Not two's complement.. instead highest bit is sign bit */
+ if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
+ steps = -steps;
+
+ step_size_uV = desc->cpr_fuses.init_voltage_step;
+
+ uV = fdata->ref_uV + steps * step_size_uV;
+ return DIV_ROUND_UP(uV, step_volt) * step_volt;
+}
+
+static int cpr_fuse_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int i;
+ unsigned int step_volt;
+ struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end;
+ int uV;
+ const struct reg_sequence *accs;
+ int ret;
+
+ accs = acc_desc->settings;
+
+ step_volt = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_volt)
+ return -EINVAL;
+
+ /* Populate fuse_corner members */
+ fuse = drv->fuse_corners;
+ end = &fuse[desc->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+
+ for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
+ /*
+ * Update SoC voltages: platforms might choose a different
+ * regulators than the one used to characterize the algorithms
+ * (ie, init_voltage_step).
+ */
+ fdata->min_uV = roundup(fdata->min_uV, step_volt);
+ fdata->max_uV = roundup(fdata->max_uV, step_volt);
+
+ /* Populate uV */
+ uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
+ step_volt, drv);
+ if (uV < 0)
+ return uV;
+
+ fuse->min_uV = fdata->min_uV;
+ fuse->max_uV = fdata->max_uV;
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Populate target quotient by scaling */
+ ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot);
+ if (ret)
+ return ret;
+
+ fuse->quot *= fdata->quot_scale;
+ fuse->quot += fdata->quot_offset;
+ fuse->quot += fdata->quot_adjust;
+ fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
+
+ /* Populate acc settings */
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->min_uV,
+ fuse->min_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "min uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->min_uV, i);
+ return -EINVAL;
+ }
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->max_uV,
+ fuse->max_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "max uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->max_uV, i);
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev,
+ "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
+ i, fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
+ }
+
+ return 0;
+}
+
+static int cpr_calculate_scaling(const char *quot_offset,
+ struct cpr_drv *drv,
+ const struct fuse_corner_data *fdata,
+ const struct corner *corner)
+{
+ u32 quot_diff = 0;
+ unsigned long freq_diff;
+ int scaling;
+ const struct fuse_corner *fuse, *prev_fuse;
+ int ret;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ if (quot_offset) {
+ ret = cpr_read_efuse(drv->dev, quot_offset, &quot_diff);
+ if (ret)
+ return ret;
+
+ quot_diff *= fdata->quot_offset_scale;
+ quot_diff += fdata->quot_offset_adjust;
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ return min(scaling, fdata->max_quot_scale);
+}
+
+static int cpr_interpolate(const struct corner *corner, int step_volt,
+ const struct fuse_corner_data *fdata)
+{
+ unsigned long f_high, f_low, f_diff;
+ int uV_high, uV_low, uV;
+ u64 temp, temp_limit;
+ const struct fuse_corner *fuse, *prev_fuse;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ f_high = fuse->max_freq;
+ f_low = prev_fuse->max_freq;
+ uV_high = fuse->uV;
+ uV_low = prev_fuse->uV;
+ f_diff = fuse->max_freq - corner->freq;
+
+ /*
+ * Don't interpolate in the wrong direction. This could happen
+ * if the adjusted fuse voltage overlaps with the previous fuse's
+ * adjusted voltage.
+ */
+ if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
+ return corner->uV;
+
+ temp = f_diff * (uV_high - uV_low);
+ do_div(temp, f_high - f_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = f_diff * fdata->max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ uV = uV_high - min(temp, temp_limit);
+ return roundup(uV, step_volt);
+}
+
+static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
+{
+ struct device_node *np;
+ unsigned int fuse_corner = 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+ if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
+ pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
+ __func__);
+
+ of_node_put(np);
+
+ return fuse_corner;
+}
+
+static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
+ struct device *cpu_dev)
+{
+ u64 rate = 0;
+ struct device_node *ref_np;
+ struct device_node *desc_np;
+ struct device_node *child_np = NULL;
+ struct device_node *child_req_np = NULL;
+
+ desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!desc_np)
+ return 0;
+
+ ref_np = dev_pm_opp_get_of_node(ref);
+ if (!ref_np)
+ goto out_ref;
+
+ do {
+ of_node_put(child_req_np);
+ child_np = of_get_next_available_child(desc_np, child_np);
+ child_req_np = of_parse_phandle(child_np, "required-opps", 0);
+ } while (child_np && child_req_np != ref_np);
+
+ if (child_np && child_req_np == ref_np)
+ of_property_read_u64(child_np, "opp-hz", &rate);
+
+ of_node_put(child_req_np);
+ of_node_put(child_np);
+ of_node_put(ref_np);
+out_ref:
+ of_node_put(desc_np);
+
+ return (unsigned long) rate;
+}
+
+static int cpr_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ int i, level, scaling = 0;
+ unsigned int fnum, fc;
+ const char *quot_offset;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ struct corner_data *cdata;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling;
+ unsigned long freq_diff, freq_diff_mhz;
+ unsigned long freq;
+ int step_volt = regulator_get_linear_step(drv->vdd_apc);
+ struct dev_pm_opp *opp;
+
+ if (!step_volt)
+ return -EINVAL;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+
+ cdata = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(struct corner_data),
+ GFP_KERNEL);
+ if (!cdata)
+ return -ENOMEM;
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (level = 1; level <= drv->num_corners; level++) {
+ opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
+ if (IS_ERR(opp))
+ return -EINVAL;
+ fc = cpr_get_fuse_corner(opp);
+ if (!fc) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ fnum = fc - 1;
+ freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
+ if (!freq) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ cdata[level - 1].fuse_corner = fnum;
+ cdata[level - 1].freq = freq;
+
+ fuse = &drv->fuse_corners[fnum];
+ dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
+ freq, dev_pm_opp_get_level(opp) - 1, fnum);
+ if (freq > fuse->max_freq)
+ fuse->max_freq = freq;
+ dev_pm_opp_put(opp);
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ fnum = cdata[i].fuse_corner;
+ fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
+ quot_offset = fuses[fnum].quotient_offset;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->freq = cdata[i].freq;
+ corner->uV = fuse->uV;
+
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ scaling = cpr_calculate_scaling(quot_offset, drv,
+ fdata, corner);
+ if (scaling < 0)
+ return scaling;
+
+ apply_scaling = true;
+ } else if (corner->freq == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - corner->freq;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ corner->uV = cpr_interpolate(corner, step_volt, fdata);
+ }
+
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+
+ return 0;
+}
+
+static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct cpr_fuse *fuses;
+ int i;
+
+ fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
+ sizeof(struct cpr_fuse),
+ GFP_KERNEL);
+ if (!fuses)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < desc->num_fuse_corners; i++) {
+ char tbuf[32];
+
+ snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
+ fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].ring_osc)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
+ fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].init_voltage)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
+ fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].quotient)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
+ fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].quotient_offset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return fuses;
+}
+
+static void cpr_set_loop_allowed(struct cpr_drv *drv)
+{
+ drv->loop_disabled = false;
+}
+
+static int cpr_init_parameters(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct clk *clk;
+
+ clk = clk_get(drv->dev, "ref");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ drv->ref_clk_khz = clk_get_rate(clk) / 1000;
+ clk_put(clk);
+
+ if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
+ desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
+ desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
+ desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
+ desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
+ desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+ return -EINVAL;
+
+ dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
+ desc->up_threshold, desc->down_threshold);
+
+ return 0;
+}
+
+static int cpr_find_initial_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ const struct corner *end;
+ struct corner *iter;
+ unsigned int i = 0;
+
+ if (!drv->cpu_clk) {
+ dev_err(drv->dev, "cannot get rate from NULL clk\n");
+ return -EINVAL;
+ }
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ /*
+ * Some bootloaders set a CPU clock frequency that is not defined
+ * in the OPP table. When running at an unlisted frequency,
+ * cpufreq_online() will change to the OPP which has the lowest
+ * frequency, at or above the unlisted frequency.
+ * Since cpufreq_online() always "rounds up" in the case of an
+ * unlisted frequency, this function always "rounds down" in case
+ * of an unlisted frequency. That way, when cpufreq_online()
+ * triggers the first ever call to cpr_set_performance_state(),
+ * it will correctly determine the direction as UP.
+ */
+ for (iter = drv->corners; iter <= end; iter++) {
+ if (iter->freq > rate)
+ break;
+ i++;
+ if (iter->freq == rate) {
+ drv->corner = iter;
+ break;
+ }
+ if (iter->freq < rate)
+ drv->corner = iter;
+ }
+
+ if (!drv->corner) {
+ dev_err(drv->dev, "boot up corner not found\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev, "boot up perf state: %u\n", i);
+
+ return 0;
+}
+
+static const struct cpr_desc qcs404_cpr_desc = {
+ .num_fuse_corners = 3,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 25, 25, 25, },
+ .timer_delay_us = 5000,
+ .timer_cons_up = 0,
+ .timer_cons_down = 2,
+ .up_threshold = 1,
+ .down_threshold = 3,
+ .idle_clocks = 15,
+ .gcnt_us = 1,
+ .vdd_apc_step_up_limit = 1,
+ .vdd_apc_step_down_limit = 1,
+ .cpr_fuses = {
+ .init_voltage_step = 8000,
+ .init_voltage_width = 6,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* fuse corner 0 */
+ {
+ .ref_uV = 1224000,
+ .max_uV = 1224000,
+ .min_uV = 1048000,
+ .max_volt_scale = 0,
+ .max_quot_scale = 0,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 1 */
+ {
+ .ref_uV = 1288000,
+ .max_uV = 1288000,
+ .min_uV = 1048000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = -20,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 2 */
+ {
+ .ref_uV = 1352000,
+ .max_uV = 1384000,
+ .min_uV = 1088000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ },
+ },
+};
+
+static const struct acc_desc qcs404_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xb120, 0x1041040 },
+ { 0xb124, 0x41 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ },
+ .config = (struct reg_sequence[]){
+ { 0xb138, 0xff },
+ { 0xb130, 0x5555 },
+ },
+ .num_regs_per_fuse = 2,
+};
+
+static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
+ .cpr_desc = &qcs404_cpr_desc,
+ .acc_desc = &qcs404_acc_desc,
+};
+
+static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int cpr_power_off(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_disable(drv);
+}
+
+static int cpr_power_on(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_enable(drv);
+}
+
+static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int ret = 0;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
+
+ /*
+ * This driver only supports scaling voltage for a CPU cluster
+ * where all CPUs in the cluster share a single regulator.
+ * Therefore, save the struct device pointer only for the first
+ * CPU device that gets attached. There is no need to do any
+ * additional initialization when further CPUs get attached.
+ */
+ if (drv->attached_cpu_dev)
+ goto unlock;
+
+ /*
+ * cpr_scale_voltage() requires the direction (if we are changing
+ * to a higher or lower OPP). The first time
+ * cpr_set_performance_state() is called, there is no previous
+ * performance state defined. Therefore, we call
+ * cpr_find_initial_corner() that gets the CPU clock frequency
+ * set by the bootloader, so that we can determine the direction
+ * the first time cpr_set_performance_state() is called.
+ */
+ drv->cpu_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(drv->cpu_clk)) {
+ ret = PTR_ERR(drv->cpu_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
+ goto unlock;
+ }
+ drv->attached_cpu_dev = dev;
+
+ dev_dbg(drv->dev, "using cpu clk from: %s\n",
+ dev_name(drv->attached_cpu_dev));
+
+ /*
+ * Everything related to (virtual) corners has to be initialized
+ * here, when attaching to the power domain, since we need to know
+ * the maximum frequency for each fuse corner, and this is only
+ * available after the cpufreq driver has attached to us.
+ * The reason for this is that we need to know the highest
+ * frequency associated with each fuse corner.
+ */
+ ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
+ if (ret < 0) {
+ dev_err(drv->dev, "could not get OPP count\n");
+ goto unlock;
+ }
+ drv->num_corners = ret;
+
+ if (drv->num_corners < 2) {
+ dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners);
+
+ drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(*drv->corners),
+ GFP_KERNEL);
+ if (!drv->corners) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = cpr_corner_init(drv);
+ if (ret)
+ goto unlock;
+
+ cpr_set_loop_allowed(drv);
+
+ ret = cpr_init_parameters(drv);
+ if (ret)
+ goto unlock;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ goto unlock;
+
+ ret = cpr_find_initial_corner(drv);
+ if (ret)
+ goto unlock;
+
+ if (acc_desc->config)
+ regmap_multi_reg_write(drv->tcsr, acc_desc->config,
+ acc_desc->num_regs_per_fuse);
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_debug_info_show(struct seq_file *s, void *unused)
+{
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+ struct cpr_drv *drv = s->private;
+ struct fuse_corner *fuse_corner;
+ struct corner *corner;
+
+ corner = drv->corner;
+ fuse_corner = corner->fuse_corner;
+
+ seq_printf(s, "corner, current_volt = %d uV\n",
+ corner->last_uV);
+
+ ro_sel = fuse_corner->ring_osc_idx;
+ gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
+ seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
+
+ ctl = cpr_read(drv, REG_RBCPR_CTL);
+ seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
+
+ irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
+
+ reg = cpr_read(drv, REG_RBCPR_RESULT_0);
+ seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ seq_printf(s, " [step_dn = %u", step_dn);
+
+ seq_printf(s, ", step_up = %u", step_up);
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ seq_printf(s, ", error_steps = %u", error_steps);
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ seq_printf(s, ", error = %u", error);
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ seq_printf(s, ", error_lt_0 = %u", error_lt0);
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ seq_printf(s, ", busy = %u]\n", busy);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
+
+static void cpr_debugfs_init(struct cpr_drv *drv)
+{
+ drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
+
+ debugfs_create_file("debug_info", 0444, drv->debugfs,
+ drv, &cpr_debug_info_fops);
+}
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ int irq, ret;
+ const struct cpr_acc_desc *data;
+ struct device_node *np;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+
+ data = of_device_get_match_data(dev);
+ if (!data || !data->cpr_desc || !data->acc_desc)
+ return -EINVAL;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+ drv->dev = dev;
+ drv->desc = data->cpr_desc;
+ drv->acc_desc = data->acc_desc;
+
+ drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
+ sizeof(*drv->fuse_corners),
+ GFP_KERNEL);
+ if (!drv->fuse_corners)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ drv->tcsr = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+
+ /*
+ * Initialize fuse corners, since it simply depends
+ * on data in efuses.
+ * Everything related to (virtual) corners has to be
+ * initialized after attaching to the power domain,
+ * since it depends on the CPU's OPP table.
+ */
+ ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev);
+ if (ret)
+ return ret;
+
+ drv->cpr_fuses = cpr_get_fuses(drv);
+ if (IS_ERR(drv->cpr_fuses))
+ return PTR_ERR(drv->cpr_fuses);
+
+ ret = cpr_populate_ring_osc_idx(drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_fuse_corner_init(drv);
+ if (ret)
+ return ret;
+
+ mutex_init(&drv->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ cpr_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
+ GFP_KERNEL);
+ if (!drv->pd.name)
+ return -EINVAL;
+
+ drv->pd.power_off = cpr_power_off;
+ drv->pd.power_on = cpr_power_on;
+ drv->pd.set_performance_state = cpr_set_performance_state;
+ drv->pd.opp_to_performance_state = cpr_get_performance_state;
+ drv->pd.attach_dev = cpr_pd_attach_dev;
+
+ ret = pm_genpd_init(&drv->pd, NULL, true);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drv);
+ cpr_debugfs_init(drv);
+
+ return 0;
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&drv->pd);
+
+ debugfs_remove_recursive(drv->debugfs);
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c721939767eb..0498363203e8 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -141,14 +141,14 @@ config POWER_RESET_LTC2952
down via the LTC2952. Bindings are made in the device tree.
config POWER_RESET_MT6323
- bool "MediaTek MT6323 power-off driver"
- depends on MFD_MT6397
- help
- The power-off driver is responsible for externally shutdown down
- the power of a remote MediaTek SoC MT6323 is connected to through
- controlling a tiny circuit BBPU inside MT6323 RTC.
-
- Say Y if you have a board where MT6323 could be found.
+ bool "MediaTek MT6323 power-off driver"
+ depends on MFD_MT6397
+ help
+ The power-off driver is responsible for externally shutdown down
+ the power of a remote MediaTek SoC MT6323 is connected to through
+ controlling a tiny circuit BBPU inside MT6323 RTC.
+
+ Say Y if you have a board where MT6323 could be found.
config POWER_RESET_QNAP
bool "QNAP power-off driver"
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 1c18f465a245..2fe3a627cb53 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -66,7 +66,7 @@
#define SHDW_CFG_NOT_USED (32)
-struct shdwc_config {
+struct shdwc_reg_config {
u8 wkup_pin_input;
u8 mr_rtcwk_shift;
u8 mr_rttwk_shift;
@@ -74,8 +74,17 @@ struct shdwc_config {
u8 sr_rttwk_shift;
};
+struct pmc_reg_config {
+ u8 mckr;
+};
+
+struct reg_config {
+ struct shdwc_reg_config shdwc;
+ struct pmc_reg_config pmc;
+};
+
struct shdwc {
- const struct shdwc_config *cfg;
+ const struct reg_config *rcfg;
struct clk *sclk;
void __iomem *shdwc_base;
void __iomem *mpddrc_base;
@@ -95,6 +104,7 @@ static const unsigned long long sdwc_dbc_period[] = {
static void __init at91_wakeup_status(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
+ const struct reg_config *rcfg = shdw->rcfg;
u32 reg;
char *reason = "unknown";
@@ -106,11 +116,11 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
if (!reg)
return;
- if (SHDW_WK_PIN(reg, shdw->cfg))
+ if (SHDW_WK_PIN(reg, &rcfg->shdwc))
reason = "WKUP pin";
- else if (SHDW_RTCWK(reg, shdw->cfg))
+ else if (SHDW_RTCWK(reg, &rcfg->shdwc))
reason = "RTC";
- else if (SHDW_RTTWK(reg, shdw->cfg))
+ else if (SHDW_RTTWK(reg, &rcfg->shdwc))
reason = "RTT";
pr_info("AT91: Wake-Up source: %s\n", reason);
@@ -131,9 +141,9 @@ static void at91_poweroff(void)
" str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
/* Switch the master clock source to slow clock. */
- "1: ldr r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ "1: ldr r6, [%4, %5]\n\t"
" bic r6, r6, #" __stringify(AT91_PMC_CSS) "\n\t"
- " str r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ " str r6, [%4, %5]\n\t"
/* Wait for clock switch. */
"2: ldr r6, [%4, #" __stringify(AT91_PMC_SR) "]\n\t"
" tst r6, #" __stringify(AT91_PMC_MCKRDY) "\n\t"
@@ -148,7 +158,8 @@ static void at91_poweroff(void)
"r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
"r" (at91_shdwc->shdwc_base),
"r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW),
- "r" (at91_shdwc->pmc_base)
+ "r" (at91_shdwc->pmc_base),
+ "r" (at91_shdwc->rcfg->pmc.mckr)
: "r6");
}
@@ -215,6 +226,7 @@ static u32 at91_shdwc_get_wakeup_input(struct platform_device *pdev,
static void at91_shdwc_dt_configure(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
+ const struct reg_config *rcfg = shdw->rcfg;
struct device_node *np = pdev->dev.of_node;
u32 mode = 0, tmp, input;
@@ -227,10 +239,10 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
mode |= AT91_SHDW_WKUPDBC(at91_shdwc_debouncer_value(pdev, tmp));
if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
- mode |= SHDW_RTCWKEN(shdw->cfg);
+ mode |= SHDW_RTCWKEN(&rcfg->shdwc);
if (of_property_read_bool(np, "atmel,wakeup-rtt-timer"))
- mode |= SHDW_RTTWKEN(shdw->cfg);
+ mode |= SHDW_RTTWKEN(&rcfg->shdwc);
dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
@@ -239,30 +251,40 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
writel(input, shdw->shdwc_base + AT91_SHDW_WUIR);
}
-static const struct shdwc_config sama5d2_shdwc_config = {
- .wkup_pin_input = 0,
- .mr_rtcwk_shift = 17,
- .mr_rttwk_shift = SHDW_CFG_NOT_USED,
- .sr_rtcwk_shift = 5,
- .sr_rttwk_shift = SHDW_CFG_NOT_USED,
+static const struct reg_config sama5d2_reg_config = {
+ .shdwc = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = SHDW_CFG_NOT_USED,
+ .sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = SHDW_CFG_NOT_USED,
+ },
+ .pmc = {
+ .mckr = 0x30,
+ },
};
-static const struct shdwc_config sam9x60_shdwc_config = {
- .wkup_pin_input = 0,
- .mr_rtcwk_shift = 17,
- .mr_rttwk_shift = 16,
- .sr_rtcwk_shift = 5,
- .sr_rttwk_shift = 4,
+static const struct reg_config sam9x60_reg_config = {
+ .shdwc = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = 16,
+ .sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = 4,
+ },
+ .pmc = {
+ .mckr = 0x28,
+ },
};
static const struct of_device_id at91_shdwc_of_match[] = {
{
.compatible = "atmel,sama5d2-shdwc",
- .data = &sama5d2_shdwc_config,
+ .data = &sama5d2_reg_config,
},
{
.compatible = "microchip,sam9x60-shdwc",
- .data = &sam9x60_shdwc_config,
+ .data = &sam9x60_reg_config,
}, {
/*sentinel*/
}
@@ -303,7 +325,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
- at91_shdwc->cfg = match->data;
+ at91_shdwc->rcfg = match->data;
at91_shdwc->sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(at91_shdwc->sclk))
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index 308ca9d9d276..5466eeea261c 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -64,9 +64,11 @@ static int gpio_restart_probe(struct platform_device *pdev)
gpio_restart->reset_gpio = devm_gpiod_get(&pdev->dev, NULL,
open_source ? GPIOD_IN : GPIOD_OUT_LOW);
- if (IS_ERR(gpio_restart->reset_gpio)) {
- dev_err(&pdev->dev, "Could not get reset GPIO\n");
- return PTR_ERR(gpio_restart->reset_gpio);
+ ret = PTR_ERR_OR_ZERO(gpio_restart->reset_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get reset GPIO\n");
+ return ret;
}
gpio_restart->restart_handler.notifier_call = gpio_restart_notify;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 27164a1d3c7c..9a5591ab90d0 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -73,10 +73,10 @@ config WM831X_POWER
provided by Wolfson Microelectronics WM831x PMICs.
config WM8350_POWER
- tristate "WM8350 PMU support"
- depends on MFD_WM8350
- help
- Say Y here to enable support for the power management unit
+ tristate "WM8350 PMU support"
+ depends on MFD_WM8350
+ help
+ Say Y here to enable support for the power management unit
provided by the Wolfson Microelectronics WM8350 PMIC.
config TEST_POWER
@@ -209,16 +209,16 @@ config BATTERY_WM97XX
Say Y to enable support for battery measured by WM97xx aux port.
config BATTERY_SBS
- tristate "SBS Compliant gas gauge"
- depends on I2C
- help
+ tristate "SBS Compliant gas gauge"
+ depends on I2C
+ help
Say Y to include support for SBS battery driver for SBS-compliant
gas gauges.
config CHARGER_SBS
- tristate "SBS Compliant charger"
- depends on I2C
- help
+ tristate "SBS Compliant charger"
+ depends on I2C
+ help
Say Y to include support for SBS compliant battery chargers.
config MANAGER_SBS
@@ -484,11 +484,11 @@ config CHARGER_MANAGER
depends on REGULATOR
select EXTCON
help
- Say Y to enable charger-manager support, which allows multiple
- chargers attached to a battery and multiple batteries attached to a
- system. The charger-manager also can monitor charging status in
- runtime and in suspend-to-RAM by waking up the system periodically
- with help of suspend_again support.
+ Say Y to enable charger-manager support, which allows multiple
+ chargers attached to a battery and multiple batteries attached to a
+ system. The charger-manager also can monitor charging status in
+ runtime and in suspend-to-RAM by waking up the system periodically
+ with help of suspend_again support.
config CHARGER_LT3651
tristate "Analog Devices LT3651 charger"
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 8a0f9d769690..f69550d64f09 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -789,7 +789,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
ret = -ENXIO;
break;
- };
+ }
di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
@@ -1079,7 +1079,7 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
ret = -EPERM;
break;
- };
+ }
di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
return ret;
}
@@ -2427,7 +2427,7 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
default:
break;
- };
+ }
}
/**
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index c3912ee9eb99..b96f90a82ecf 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2221,10 +2221,10 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
ab8500_fg_update_cap_scalers(di);
queue_work(di->fg_wq, &di->fg_work);
break;
- };
+ }
default:
break;
- };
+ }
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
switch (ext->desc->type) {
@@ -2331,7 +2331,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_MAX_TIME_REG\n", __func__);
goto out;
- };
+ }
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_FLAG_TIME_REG, di->bm->fg_params->pcut_flag_time);
@@ -2339,7 +2339,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_FLAG_TIME_REG\n", __func__);
goto out;
- };
+ }
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_RESTART_REG, di->bm->fg_params->pcut_max_restart);
@@ -2347,7 +2347,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_RESTART_REG\n", __func__);
goto out;
- };
+ }
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_DEBOUNCE_REG, di->bm->fg_params->pcut_debounce_time);
@@ -2355,7 +2355,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_DEBOUNCE_REG\n", __func__);
goto out;
- };
+ }
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_CTL_STATUS_REG, di->bm->fg_params->pcut_enable);
@@ -2363,7 +2363,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_CTL_STATUS_REG\n", __func__);
goto out;
- };
+ }
}
out:
return ret;
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index e6e37d4f20e4..2fb33a07879a 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -1823,7 +1823,7 @@ static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di,
"Enter 0. Disable AC/USB Charging\n"
"1. Enable AC charging\n"
"2. Enable USB Charging\n");
- };
+ }
return strlen(buf);
}
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index 0d34a932b6d5..ac360016b08a 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -23,6 +24,9 @@
#define AXP20X_PWR_STATUS_ACIN_PRESENT BIT(7)
#define AXP20X_PWR_STATUS_ACIN_AVAIL BIT(6)
+#define AXP813_ACIN_PATH_SEL BIT(7)
+#define AXP813_ACIN_PATH_SEL_TO_BIT(x) (!!(x) << 7)
+
#define AXP813_VHOLD_MASK GENMASK(5, 3)
#define AXP813_VHOLD_UV_TO_BIT(x) ((((x) / 100000) - 40) << 3)
#define AXP813_VHOLD_REG_TO_UV(x) \
@@ -40,6 +44,9 @@ struct axp20x_ac_power {
struct power_supply *supply;
struct iio_channel *acin_v;
struct iio_channel *acin_i;
+ bool has_acin_path_sel;
+ unsigned int num_irqs;
+ unsigned int irqs[];
};
static irqreturn_t axp20x_ac_power_irq(int irq, void *devid)
@@ -86,6 +93,17 @@ static int axp20x_ac_power_get_property(struct power_supply *psy,
return ret;
val->intval = !!(reg & AXP20X_PWR_STATUS_ACIN_AVAIL);
+
+ /* ACIN_PATH_SEL disables ACIN even if ACIN_AVAIL is set. */
+ if (val->intval && power->has_acin_path_sel) {
+ ret = regmap_read(power->regmap, AXP813_ACIN_PATH_CTRL,
+ &reg);
+ if (ret)
+ return ret;
+
+ val->intval = !!(reg & AXP813_ACIN_PATH_SEL);
+ }
+
return 0;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
@@ -143,6 +161,11 @@ static int axp813_ac_power_set_property(struct power_supply *psy,
struct axp20x_ac_power *power = power_supply_get_drvdata(psy);
switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return regmap_update_bits(power->regmap, AXP813_ACIN_PATH_CTRL,
+ AXP813_ACIN_PATH_SEL,
+ AXP813_ACIN_PATH_SEL_TO_BIT(val->intval));
+
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
if (val->intval < 4000000 || val->intval > 4700000)
return -EINVAL;
@@ -169,7 +192,8 @@ static int axp813_ac_power_set_property(struct power_supply *psy,
static int axp813_ac_power_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
+ return psp == POWER_SUPPLY_PROP_ONLINE ||
+ psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
}
@@ -221,34 +245,86 @@ static const struct power_supply_desc axp813_ac_power_desc = {
.set_property = axp813_ac_power_set_property,
};
+static const char * const axp20x_irq_names[] = {
+ "ACIN_PLUGIN",
+ "ACIN_REMOVAL",
+};
+
struct axp_data {
const struct power_supply_desc *power_desc;
+ const char * const *irq_names;
+ unsigned int num_irq_names;
bool acin_adc;
+ bool acin_path_sel;
};
static const struct axp_data axp20x_data = {
- .power_desc = &axp20x_ac_power_desc,
- .acin_adc = true,
+ .power_desc = &axp20x_ac_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .acin_adc = true,
+ .acin_path_sel = false,
};
static const struct axp_data axp22x_data = {
- .power_desc = &axp22x_ac_power_desc,
- .acin_adc = false,
+ .power_desc = &axp22x_ac_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .acin_adc = false,
+ .acin_path_sel = false,
};
static const struct axp_data axp813_data = {
- .power_desc = &axp813_ac_power_desc,
- .acin_adc = false,
+ .power_desc = &axp813_ac_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .acin_adc = false,
+ .acin_path_sel = true,
};
+#ifdef CONFIG_PM_SLEEP
+static int axp20x_ac_power_suspend(struct device *dev)
+{
+ struct axp20x_ac_power *power = dev_get_drvdata(dev);
+ int i = 0;
+
+ /*
+ * Allow wake via ACIN_PLUGIN only.
+ *
+ * As nested threaded IRQs are not automatically disabled during
+ * suspend, we must explicitly disable the remainder of the IRQs.
+ */
+ if (device_may_wakeup(&power->supply->dev))
+ enable_irq_wake(power->irqs[i++]);
+ while (i < power->num_irqs)
+ disable_irq(power->irqs[i++]);
+
+ return 0;
+}
+
+static int axp20x_ac_power_resume(struct device *dev)
+{
+ struct axp20x_ac_power *power = dev_get_drvdata(dev);
+ int i = 0;
+
+ if (device_may_wakeup(&power->supply->dev))
+ disable_irq_wake(power->irqs[i++]);
+ while (i < power->num_irqs)
+ enable_irq(power->irqs[i++]);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(axp20x_ac_power_pm_ops, axp20x_ac_power_suspend,
+ axp20x_ac_power_resume);
+
static int axp20x_ac_power_probe(struct platform_device *pdev)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_ac_power *power;
const struct axp_data *axp_data;
- static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
- NULL };
int i, irq, ret;
if (!of_device_is_available(pdev->dev.of_node))
@@ -259,12 +335,14 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
return -EINVAL;
}
- power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+ axp_data = of_device_get_match_data(&pdev->dev);
+
+ power = devm_kzalloc(&pdev->dev,
+ struct_size(power, irqs, axp_data->num_irq_names),
+ GFP_KERNEL);
if (!power)
return -ENOMEM;
- axp_data = of_device_get_match_data(&pdev->dev);
-
if (axp_data->acin_adc) {
power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
if (IS_ERR(power->acin_v)) {
@@ -282,6 +360,8 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
}
power->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ power->has_acin_path_sel = axp_data->acin_path_sel;
+ power->num_irqs = axp_data->num_irq_names;
platform_set_drvdata(pdev, power);
@@ -295,20 +375,22 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
return PTR_ERR(power->supply);
/* Request irqs after registering, as irqs may trigger immediately */
- for (i = 0; irq_names[i]; i++) {
- irq = platform_get_irq_byname(pdev, irq_names[i]);
+ for (i = 0; i < axp_data->num_irq_names; i++) {
+ irq = platform_get_irq_byname(pdev, axp_data->irq_names[i]);
if (irq < 0) {
- dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
- irq_names[i], irq);
- continue;
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ axp_data->irq_names[i], irq);
+ return irq;
}
- irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
- ret = devm_request_any_context_irq(&pdev->dev, irq,
+ power->irqs[i] = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+ ret = devm_request_any_context_irq(&pdev->dev, power->irqs[i],
axp20x_ac_power_irq, 0,
DRVNAME, power);
- if (ret < 0)
- dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
- irq_names[i], ret);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error requesting %s IRQ: %d\n",
+ axp_data->irq_names[i], ret);
+ return ret;
+ }
}
return 0;
@@ -331,8 +413,9 @@ MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
static struct platform_driver axp20x_ac_power_driver = {
.probe = axp20x_ac_power_probe,
.driver = {
- .name = DRVNAME,
- .of_match_table = axp20x_ac_power_match,
+ .name = DRVNAME,
+ .of_match_table = axp20x_ac_power_match,
+ .pm = &axp20x_ac_power_pm_ops,
},
};
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 5f0a5722b19e..4fde24b5f35a 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -29,6 +30,9 @@
#define AXP20X_USB_STATUS_VBUS_VALID BIT(2)
+#define AXP20X_VBUS_PATH_SEL BIT(7)
+#define AXP20X_VBUS_PATH_SEL_OFFSET 7
+
#define AXP20X_VBUS_VHOLD_uV(b) (4000000 + (((b) >> 3) & 7) * 100000)
#define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3)
#define AXP20X_VBUS_VHOLD_OFFSET 3
@@ -57,7 +61,6 @@
#define DEBOUNCE_TIME msecs_to_jiffies(50)
struct axp20x_usb_power {
- struct device_node *np;
struct regmap *regmap;
struct power_supply *supply;
enum axp20x_variants axp20x_id;
@@ -65,14 +68,32 @@ struct axp20x_usb_power {
struct iio_channel *vbus_i;
struct delayed_work vbus_detect;
unsigned int old_status;
+ unsigned int online;
+ unsigned int num_irqs;
+ unsigned int irqs[];
};
+static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
+{
+ /*
+ * Polling is only necessary while VBUS is offline. While online, a
+ * present->absent transition implies an online->offline transition
+ * and will triger the VBUS_REMOVAL IRQ.
+ */
+ if (power->axp20x_id >= AXP221_ID && !power->online)
+ return true;
+
+ return false;
+}
+
static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
{
struct axp20x_usb_power *power = devid;
power_supply_changed(power->supply);
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+
return IRQ_HANDLED;
}
@@ -92,17 +113,11 @@ static void axp20x_usb_power_poll_vbus(struct work_struct *work)
power_supply_changed(power->supply);
power->old_status = val;
+ power->online = val & AXP20X_PWR_STATUS_VBUS_USED;
out:
- mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
-}
-
-static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
-{
- if (power->axp20x_id >= AXP221_ID)
- return true;
-
- return false;
+ if (axp20x_usb_vbus_needs_polling(power))
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
}
static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
@@ -264,6 +279,16 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
return 0;
}
+static int axp813_usb_power_set_online(struct axp20x_usb_power *power,
+ int intval)
+{
+ int val = !intval << AXP20X_VBUS_PATH_SEL_OFFSET;
+
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_PATH_SEL, val);
+}
+
static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
int intval)
{
@@ -345,6 +370,11 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (power->axp20x_id != AXP813_ID)
+ return -EINVAL;
+ return axp813_usb_power_set_online(power, val->intval);
+
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return axp20x_usb_power_set_voltage_min(power, val->intval);
@@ -364,6 +394,18 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
+ struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
+
+ /*
+ * The VBUS path select flag works differently on on AXP288 and newer:
+ * - On AXP20x and AXP22x, the flag enables VBUS (ignoring N_VBUSEN).
+ * - On AXP288 and AXP8xx, the flag disables VBUS (ignoring N_VBUSEN).
+ * We only expose the control on variants where it can be used to force
+ * the VBUS input offline.
+ */
+ if (psp == POWER_SUPPLY_PROP_ONLINE)
+ return power->axp20x_id == AXP813_ID;
+
return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
psp == POWER_SUPPLY_PROP_CURRENT_MAX;
}
@@ -406,6 +448,92 @@ static const struct power_supply_desc axp22x_usb_power_desc = {
.set_property = axp20x_usb_power_set_property,
};
+static const char * const axp20x_irq_names[] = {
+ "VBUS_PLUGIN",
+ "VBUS_REMOVAL",
+ "VBUS_VALID",
+ "VBUS_NOT_VALID",
+};
+
+static const char * const axp22x_irq_names[] = {
+ "VBUS_PLUGIN",
+ "VBUS_REMOVAL",
+};
+
+struct axp_data {
+ const struct power_supply_desc *power_desc;
+ const char * const *irq_names;
+ unsigned int num_irq_names;
+ enum axp20x_variants axp20x_id;
+};
+
+static const struct axp_data axp202_data = {
+ .power_desc = &axp20x_usb_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .axp20x_id = AXP202_ID,
+};
+
+static const struct axp_data axp221_data = {
+ .power_desc = &axp22x_usb_power_desc,
+ .irq_names = axp22x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp22x_irq_names),
+ .axp20x_id = AXP221_ID,
+};
+
+static const struct axp_data axp223_data = {
+ .power_desc = &axp22x_usb_power_desc,
+ .irq_names = axp22x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp22x_irq_names),
+ .axp20x_id = AXP223_ID,
+};
+
+static const struct axp_data axp813_data = {
+ .power_desc = &axp22x_usb_power_desc,
+ .irq_names = axp22x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp22x_irq_names),
+ .axp20x_id = AXP813_ID,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int axp20x_usb_power_suspend(struct device *dev)
+{
+ struct axp20x_usb_power *power = dev_get_drvdata(dev);
+ int i = 0;
+
+ /*
+ * Allow wake via VBUS_PLUGIN only.
+ *
+ * As nested threaded IRQs are not automatically disabled during
+ * suspend, we must explicitly disable the remainder of the IRQs.
+ */
+ if (device_may_wakeup(&power->supply->dev))
+ enable_irq_wake(power->irqs[i++]);
+ while (i < power->num_irqs)
+ disable_irq(power->irqs[i++]);
+
+ return 0;
+}
+
+static int axp20x_usb_power_resume(struct device *dev)
+{
+ struct axp20x_usb_power *power = dev_get_drvdata(dev);
+ int i = 0;
+
+ if (device_may_wakeup(&power->supply->dev))
+ disable_irq_wake(power->irqs[i++]);
+ while (i < power->num_irqs)
+ enable_irq(power->irqs[i++]);
+
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(axp20x_usb_power_pm_ops, axp20x_usb_power_suspend,
+ axp20x_usb_power_resume);
+
static int configure_iio_channels(struct platform_device *pdev,
struct axp20x_usb_power *power)
{
@@ -441,12 +569,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_usb_power *power;
- static const char * const axp20x_irq_names[] = { "VBUS_PLUGIN",
- "VBUS_REMOVAL", "VBUS_VALID", "VBUS_NOT_VALID", NULL };
- static const char * const axp22x_irq_names[] = {
- "VBUS_PLUGIN", "VBUS_REMOVAL", NULL };
- const char * const *irq_names;
- const struct power_supply_desc *usb_power_desc;
+ const struct axp_data *axp_data;
int i, irq, ret;
if (!of_device_is_available(pdev->dev.of_node))
@@ -457,16 +580,19 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return -EINVAL;
}
- power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+ axp_data = of_device_get_match_data(&pdev->dev);
+
+ power = devm_kzalloc(&pdev->dev,
+ struct_size(power, irqs, axp_data->num_irq_names),
+ GFP_KERNEL);
if (!power)
return -ENOMEM;
platform_set_drvdata(pdev, power);
- power->axp20x_id = (enum axp20x_variants)of_device_get_match_data(
- &pdev->dev);
- power->np = pdev->dev.of_node;
+ power->axp20x_id = axp_data->axp20x_id;
power->regmap = axp20x->regmap;
+ power->num_irqs = axp_data->num_irq_names;
if (power->axp20x_id == AXP202_ID) {
/* Enable vbus valid checking */
@@ -483,18 +609,6 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (ret)
return ret;
-
- usb_power_desc = &axp20x_usb_power_desc;
- irq_names = axp20x_irq_names;
- } else if (power->axp20x_id == AXP221_ID ||
- power->axp20x_id == AXP223_ID ||
- power->axp20x_id == AXP813_ID) {
- usb_power_desc = &axp22x_usb_power_desc;
- irq_names = axp22x_irq_names;
- } else {
- dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
- axp20x->variant);
- return -EINVAL;
}
if (power->axp20x_id == AXP813_ID) {
@@ -506,25 +620,29 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = power;
- power->supply = devm_power_supply_register(&pdev->dev, usb_power_desc,
+ power->supply = devm_power_supply_register(&pdev->dev,
+ axp_data->power_desc,
&psy_cfg);
if (IS_ERR(power->supply))
return PTR_ERR(power->supply);
/* Request irqs after registering, as irqs may trigger immediately */
- for (i = 0; irq_names[i]; i++) {
- irq = platform_get_irq_byname(pdev, irq_names[i]);
+ for (i = 0; i < axp_data->num_irq_names; i++) {
+ irq = platform_get_irq_byname(pdev, axp_data->irq_names[i]);
if (irq < 0) {
- dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
- irq_names[i], irq);
- continue;
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ axp_data->irq_names[i], irq);
+ return irq;
+ }
+ power->irqs[i] = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+ ret = devm_request_any_context_irq(&pdev->dev, power->irqs[i],
+ axp20x_usb_power_irq, 0,
+ DRVNAME, power);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error requesting %s IRQ: %d\n",
+ axp_data->irq_names[i], ret);
+ return ret;
}
- irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
- ret = devm_request_any_context_irq(&pdev->dev, irq,
- axp20x_usb_power_irq, 0, DRVNAME, power);
- if (ret < 0)
- dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
- irq_names[i], ret);
}
INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
@@ -546,16 +664,16 @@ static int axp20x_usb_power_remove(struct platform_device *pdev)
static const struct of_device_id axp20x_usb_power_match[] = {
{
.compatible = "x-powers,axp202-usb-power-supply",
- .data = (void *)AXP202_ID,
+ .data = &axp202_data,
}, {
.compatible = "x-powers,axp221-usb-power-supply",
- .data = (void *)AXP221_ID,
+ .data = &axp221_data,
}, {
.compatible = "x-powers,axp223-usb-power-supply",
- .data = (void *)AXP223_ID,
+ .data = &axp223_data,
}, {
.compatible = "x-powers,axp813-usb-power-supply",
- .data = (void *)AXP813_ID,
+ .data = &axp813_data,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
@@ -564,8 +682,9 @@ static struct platform_driver axp20x_usb_power_driver = {
.probe = axp20x_usb_power_probe,
.remove = axp20x_usb_power_remove,
.driver = {
- .name = DRVNAME,
- .of_match_table = axp20x_usb_power_match,
+ .name = DRVNAME,
+ .of_match_table = axp20x_usb_power_match,
+ .pm = &axp20x_usb_power_pm_ops,
},
};
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 9d1ec8d677de..aebd1253dbc9 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -25,12 +25,20 @@
#define BQ25895_ID 7
#define BQ25896_ID 0
+enum bq25890_chip_version {
+ BQ25890,
+ BQ25892,
+ BQ25895,
+ BQ25896,
+};
+
enum bq25890_fields {
F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */
F_BHOT, F_BCOLD, F_VINDPM_OFS, /* Reg01 */
F_CONV_START, F_CONV_RATE, F_BOOSTF, F_ICO_EN,
F_HVDCP_EN, F_MAXC_EN, F_FORCE_DPM, F_AUTO_DPDM_EN, /* Reg02 */
- F_BAT_LOAD_EN, F_WD_RST, F_OTG_CFG, F_CHG_CFG, F_SYSVMIN, /* Reg03 */
+ F_BAT_LOAD_EN, F_WD_RST, F_OTG_CFG, F_CHG_CFG, F_SYSVMIN,
+ F_MIN_VBAT_SEL, /* Reg03 */
F_PUMPX_EN, F_ICHG, /* Reg04 */
F_IPRECHG, F_ITERM, /* Reg05 */
F_VREG, F_BATLOWV, F_VRECHG, /* Reg06 */
@@ -39,8 +47,9 @@ enum bq25890_fields {
F_BATCMP, F_VCLAMP, F_TREG, /* Reg08 */
F_FORCE_ICO, F_TMR2X_EN, F_BATFET_DIS, F_JEITA_VSET,
F_BATFET_DLY, F_BATFET_RST_EN, F_PUMPX_UP, F_PUMPX_DN, /* Reg09 */
- F_BOOSTV, F_BOOSTI, /* Reg0A */
- F_VBUS_STAT, F_CHG_STAT, F_PG_STAT, F_SDP_STAT, F_VSYS_STAT, /* Reg0B */
+ F_BOOSTV, F_PFM_OTG_DIS, F_BOOSTI, /* Reg0A */
+ F_VBUS_STAT, F_CHG_STAT, F_PG_STAT, F_SDP_STAT, F_0B_RSVD,
+ F_VSYS_STAT, /* Reg0B */
F_WD_FAULT, F_BOOST_FAULT, F_CHG_FAULT, F_BAT_FAULT,
F_NTC_FAULT, /* Reg0C */
F_FORCE_VINDPM, F_VINDPM, /* Reg0D */
@@ -91,7 +100,7 @@ struct bq25890_device {
struct regmap *rmap;
struct regmap_field *rmap_fields[F_MAX_FIELDS];
- int chip_id;
+ enum bq25890_chip_version chip_version;
struct bq25890_init_data init_data;
struct bq25890_state state;
@@ -111,8 +120,7 @@ static const struct regmap_access_table bq25890_writeable_regs = {
static const struct regmap_range bq25890_volatile_reg_ranges[] = {
regmap_reg_range(0x00, 0x00),
regmap_reg_range(0x09, 0x09),
- regmap_reg_range(0x0b, 0x0c),
- regmap_reg_range(0x0e, 0x14),
+ regmap_reg_range(0x0b, 0x14),
};
static const struct regmap_access_table bq25890_volatile_regs = {
@@ -155,7 +163,7 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_OTG_CFG] = REG_FIELD(0x03, 5, 5),
[F_CHG_CFG] = REG_FIELD(0x03, 4, 4),
[F_SYSVMIN] = REG_FIELD(0x03, 1, 3),
- /* MIN_VBAT_SEL on BQ25896 */
+ [F_MIN_VBAT_SEL] = REG_FIELD(0x03, 0, 0), // BQ25896 only
/* REG04 */
[F_PUMPX_EN] = REG_FIELD(0x04, 7, 7),
[F_ICHG] = REG_FIELD(0x04, 0, 6),
@@ -188,8 +196,8 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_PUMPX_DN] = REG_FIELD(0x09, 0, 0),
/* REG0A */
[F_BOOSTV] = REG_FIELD(0x0A, 4, 7),
- /* PFM_OTG_DIS 3 on BQ25896 */
[F_BOOSTI] = REG_FIELD(0x0A, 0, 2), // reserved on BQ25895
+ [F_PFM_OTG_DIS] = REG_FIELD(0x0A, 3, 3), // BQ25896 only
/* REG0B */
[F_VBUS_STAT] = REG_FIELD(0x0B, 5, 7),
[F_CHG_STAT] = REG_FIELD(0x0B, 3, 4),
@@ -275,6 +283,7 @@ static const union {
struct bq25890_lookup lt;
} bq25890_tables[] = {
/* range tables */
+ /* TODO: BQ25896 has max ICHG 3008 mA */
[TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */
[TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
@@ -391,11 +400,13 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
- if (bq->chip_id == BQ25890_ID)
+ if (bq->chip_version == BQ25890)
val->strval = "BQ25890";
- else if (bq->chip_id == BQ25895_ID)
+ else if (bq->chip_version == BQ25892)
+ val->strval = "BQ25892";
+ else if (bq->chip_version == BQ25895)
val->strval = "BQ25895";
- else if (bq->chip_id == BQ25896_ID)
+ else if (bq->chip_version == BQ25896)
val->strval = "BQ25896";
else
val->strval = "UNKNOWN";
@@ -741,6 +752,56 @@ static int bq25890_usb_notifier(struct notifier_block *nb, unsigned long val,
return NOTIFY_OK;
}
+static int bq25890_get_chip_version(struct bq25890_device *bq)
+{
+ int id, rev;
+
+ id = bq25890_field_read(bq, F_PN);
+ if (id < 0) {
+ dev_err(bq->dev, "Cannot read chip ID.\n");
+ return id;
+ }
+
+ rev = bq25890_field_read(bq, F_DEV_REV);
+ if (rev < 0) {
+ dev_err(bq->dev, "Cannot read chip revision.\n");
+ return rev;
+ }
+
+ switch (id) {
+ case BQ25890_ID:
+ bq->chip_version = BQ25890;
+ break;
+
+ /* BQ25892 and BQ25896 share same ID 0 */
+ case BQ25896_ID:
+ switch (rev) {
+ case 2:
+ bq->chip_version = BQ25896;
+ break;
+ case 1:
+ bq->chip_version = BQ25892;
+ break;
+ default:
+ dev_err(bq->dev,
+ "Unknown device revision %d, assume BQ25892\n",
+ rev);
+ bq->chip_version = BQ25892;
+ }
+ break;
+
+ case BQ25895_ID:
+ bq->chip_version = BQ25895;
+ break;
+
+ default:
+ dev_err(bq->dev, "Unknown chip ID %d\n", id);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int bq25890_irq_probe(struct bq25890_device *bq)
{
struct gpio_desc *irq;
@@ -859,16 +920,10 @@ static int bq25890_probe(struct i2c_client *client,
i2c_set_clientdata(client, bq);
- bq->chip_id = bq25890_field_read(bq, F_PN);
- if (bq->chip_id < 0) {
- dev_err(dev, "Cannot read chip ID.\n");
- return bq->chip_id;
- }
-
- if ((bq->chip_id != BQ25890_ID) && (bq->chip_id != BQ25895_ID)
- && (bq->chip_id != BQ25896_ID)) {
- dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id);
- return -ENODEV;
+ ret = bq25890_get_chip_version(bq);
+ if (ret) {
+ dev_err(dev, "Cannot read chip ID or unknown chip.\n");
+ return ret;
}
if (!dev->platform_data) {
@@ -986,12 +1041,18 @@ static const struct dev_pm_ops bq25890_pm = {
static const struct i2c_device_id bq25890_i2c_ids[] = {
{ "bq25890", 0 },
+ { "bq25892", 0 },
+ { "bq25895", 0 },
+ { "bq25896", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, bq25890_i2c_ids);
static const struct of_device_id bq25890_of_match[] = {
{ .compatible = "ti,bq25890", },
+ { .compatible = "ti,bq25892", },
+ { .compatible = "ti,bq25895", },
+ { .compatible = "ti,bq25896", },
{ },
};
MODULE_DEVICE_TABLE(of, bq25890_of_match);
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 6cc7c3910e09..ffad9ee03a68 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -132,11 +132,8 @@ static int cros_usbpd_charger_get_num_ports(struct charger_data *charger)
ret = cros_usbpd_charger_ec_command(charger, 0,
EC_CMD_CHARGE_PORT_COUNT,
NULL, 0, &resp, sizeof(resp));
- if (ret < 0) {
- dev_err(charger->dev,
- "Unable to get the number of ports (err:0x%x)\n", ret);
+ if (ret < 0)
return ret;
- }
return resp.port_count;
}
@@ -148,11 +145,8 @@ static int cros_usbpd_charger_get_usbpd_num_ports(struct charger_data *charger)
ret = cros_usbpd_charger_ec_command(charger, 0, EC_CMD_USB_PD_PORTS,
NULL, 0, &resp, sizeof(resp));
- if (ret < 0) {
- dev_err(charger->dev,
- "Unable to get the number or ports (err:0x%x)\n", ret);
+ if (ret < 0)
return ret;
- }
return resp.num_ports;
}
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 35816d4b3012..2748715c4c75 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -100,10 +100,17 @@ static int ingenic_battery_set_scale(struct ingenic_battery *bat)
return -EINVAL;
}
- return iio_write_channel_attribute(bat->channel,
- scale_raw[best_idx],
- scale_raw[best_idx + 1],
- IIO_CHAN_INFO_SCALE);
+ /* Only set scale if there is more than one (fractional) entry */
+ if (scale_len > 2) {
+ ret = iio_write_channel_attribute(bat->channel,
+ scale_raw[best_idx],
+ scale_raw[best_idx + 1],
+ IIO_CHAN_INFO_SCALE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static enum power_supply_property ingenic_battery_properties[] = {
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 03592ceaca88..192d9db0fb00 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -149,7 +149,7 @@ static int micro_batt_get_property(struct power_supply *b,
default:
val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
break;
- };
+ }
break;
case POWER_SUPPLY_PROP_STATUS:
val->intval = get_status(b);
@@ -168,7 +168,7 @@ static int micro_batt_get_property(struct power_supply *b,
break;
default:
return -EINVAL;
- };
+ }
return 0;
}
@@ -185,7 +185,7 @@ static int micro_ac_get_property(struct power_supply *b,
break;
default:
return -EINVAL;
- };
+ }
return 0;
}
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index da49436176cd..30a9014b2f95 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -449,7 +449,7 @@ static int ltc294x_i2c_remove(struct i2c_client *client)
{
struct ltc294x_info *info = i2c_get_clientdata(client);
- cancel_delayed_work(&info->work);
+ cancel_delayed_work_sync(&info->work);
power_supply_unregister(info->supply);
return 0;
}
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 62499018e68b..8a1f0ee493aa 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/power_supply.h>
#include <linux/max17040_battery.h>
#include <linux/slab.h>
@@ -28,6 +29,9 @@
#define MAX17040_DELAY 1000
#define MAX17040_BATTERY_FULL 95
+#define MAX17040_ATHD_MASK 0xFFC0
+#define MAX17040_ATHD_DEFAULT_POWER_UP 4
+
struct max17040_chip {
struct i2c_client *client;
struct delayed_work work;
@@ -42,6 +46,8 @@ struct max17040_chip {
int soc;
/* State Of Charge */
int status;
+ /* Low alert threshold from 32% to 1% of the State of Charge */
+ u32 low_soc_alert;
};
static int max17040_get_property(struct power_supply *psy,
@@ -98,6 +104,21 @@ static void max17040_reset(struct i2c_client *client)
max17040_write_reg(client, MAX17040_CMD, 0x0054);
}
+static int max17040_set_low_soc_alert(struct i2c_client *client, u32 level)
+{
+ int ret;
+ u16 data;
+
+ level = 32 - level;
+ data = max17040_read_reg(client, MAX17040_RCOMP);
+ /* clear the alrt bit and set LSb 5 bits */
+ data &= MAX17040_ATHD_MASK;
+ data |= level;
+ ret = max17040_write_reg(client, MAX17040_RCOMP, data);
+
+ return ret;
+}
+
static void max17040_get_vcell(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
@@ -160,21 +181,81 @@ static void max17040_get_status(struct i2c_client *client)
chip->status = POWER_SUPPLY_STATUS_FULL;
}
+static int max17040_get_of_data(struct max17040_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+
+ chip->low_soc_alert = MAX17040_ATHD_DEFAULT_POWER_UP;
+ device_property_read_u32(dev,
+ "maxim,alert-low-soc-level",
+ &chip->low_soc_alert);
+
+ if (chip->low_soc_alert <= 0 || chip->low_soc_alert >= 33)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void max17040_check_changes(struct i2c_client *client)
+{
+ max17040_get_vcell(client);
+ max17040_get_soc(client);
+ max17040_get_online(client);
+ max17040_get_status(client);
+}
+
static void max17040_work(struct work_struct *work)
{
struct max17040_chip *chip;
+ int last_soc, last_status;
chip = container_of(work, struct max17040_chip, work.work);
- max17040_get_vcell(chip->client);
- max17040_get_soc(chip->client);
- max17040_get_online(chip->client);
- max17040_get_status(chip->client);
+ /* store SOC and status to check changes */
+ last_soc = chip->soc;
+ last_status = chip->status;
+ max17040_check_changes(chip->client);
+
+ /* check changes and send uevent */
+ if (last_soc != chip->soc || last_status != chip->status)
+ power_supply_changed(chip->battery);
queue_delayed_work(system_power_efficient_wq, &chip->work,
MAX17040_DELAY);
}
+static irqreturn_t max17040_thread_handler(int id, void *dev)
+{
+ struct max17040_chip *chip = dev;
+ struct i2c_client *client = chip->client;
+
+ dev_warn(&client->dev, "IRQ: Alert battery low level");
+ /* read registers */
+ max17040_check_changes(chip->client);
+
+ /* send uevent */
+ power_supply_changed(chip->battery);
+
+ /* reset alert bit */
+ max17040_set_low_soc_alert(client, chip->low_soc_alert);
+
+ return IRQ_HANDLED;
+}
+
+static int max17040_enable_alert_irq(struct max17040_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ unsigned int flags;
+ int ret;
+
+ flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ max17040_thread_handler, flags,
+ chip->battery->desc->name, chip);
+
+ return ret;
+}
+
static enum power_supply_property max17040_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
@@ -196,6 +277,7 @@ static int max17040_probe(struct i2c_client *client,
struct i2c_adapter *adapter = client->adapter;
struct power_supply_config psy_cfg = {};
struct max17040_chip *chip;
+ int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
return -EIO;
@@ -206,6 +288,12 @@ static int max17040_probe(struct i2c_client *client,
chip->client = client;
chip->pdata = client->dev.platform_data;
+ ret = max17040_get_of_data(chip);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed: low SOC alert OF data out of bounds\n");
+ return ret;
+ }
i2c_set_clientdata(client, chip);
psy_cfg.drv_data = chip;
@@ -220,6 +308,24 @@ static int max17040_probe(struct i2c_client *client,
max17040_reset(client);
max17040_get_version(client);
+ /* check interrupt */
+ if (client->irq && of_device_is_compatible(client->dev.of_node,
+ "maxim,max77836-battery")) {
+ ret = max17040_set_low_soc_alert(client, chip->low_soc_alert);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to set low SOC alert: err %d\n", ret);
+ return ret;
+ }
+
+ ret = max17040_enable_alert_irq(chip);
+ if (ret) {
+ client->irq = 0;
+ dev_warn(&client->dev,
+ "Failed to get IRQ err %d\n", ret);
+ }
+ }
+
INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
queue_delayed_work(system_power_efficient_wq, &chip->work,
MAX17040_DELAY);
@@ -244,6 +350,10 @@ static int max17040_suspend(struct device *dev)
struct max17040_chip *chip = i2c_get_clientdata(client);
cancel_delayed_work(&chip->work);
+
+ if (client->irq && device_may_wakeup(dev))
+ enable_irq_wake(client->irq);
+
return 0;
}
@@ -254,6 +364,10 @@ static int max17040_resume(struct device *dev)
queue_delayed_work(system_power_efficient_wq, &chip->work,
MAX17040_DELAY);
+
+ if (client->irq && device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
return 0;
}
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 0dfad2cf13fe..69ec4295d55d 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -282,6 +282,8 @@ static int max17042_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
ret = regmap_read(map, MAX17042_V_empty, &data);
+ else if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
+ ret = regmap_read(map, MAX17055_V_empty, &data);
else
ret = regmap_read(map, MAX17047_V_empty, &data);
if (ret < 0)
@@ -627,7 +629,8 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
config->filter_cfg);
regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg);
if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047 ||
- chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)
+ chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050 ||
+ chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
regmap_write(map, MAX17047_FullSOCThr,
config->full_soc_thresh);
}
@@ -758,6 +761,8 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
max17042_override_por(map, MAX17042_V_empty, config->vempty);
+ if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
+ max17042_override_por(map, MAX17055_V_empty, config->vempty);
else
max17042_override_por(map, MAX17047_V_empty, config->vempty);
max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
@@ -765,7 +770,10 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
max17042_override_por(map, MAX17042_FCTC, config->fctc);
max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
- if (chip->chip_type) {
+ if (chip->chip_type &&
+ ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
+ (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+ (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050))) {
max17042_override_por(map, MAX17042_EmptyTempCo,
config->empty_tempco);
max17042_override_por(map, MAX17042_K_empty0,
@@ -929,7 +937,8 @@ max17042_get_default_pdata(struct max17042_chip *chip)
if (!pdata)
return pdata;
- if (chip->chip_type != MAXIM_DEVICE_TYPE_MAX17042) {
+ if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+ (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)) {
pdata->init_data = max17047_default_pdata_init_regs;
pdata->num_init_data =
ARRAY_SIZE(max17047_default_pdata_init_regs);
@@ -1167,6 +1176,7 @@ static const struct of_device_id max17042_dt_match[] = {
{ .compatible = "maxim,max17042" },
{ .compatible = "maxim,max17047" },
{ .compatible = "maxim,max17050" },
+ { .compatible = "maxim,max17055" },
{ },
};
MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -1176,6 +1186,7 @@ static const struct i2c_device_id max17042_id[] = {
{ "max17042", MAXIM_DEVICE_TYPE_MAX17042 },
{ "max17047", MAXIM_DEVICE_TYPE_MAX17047 },
{ "max17050", MAXIM_DEVICE_TYPE_MAX17050 },
+ { "max17055", MAXIM_DEVICE_TYPE_MAX17055 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max17042_id);
diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c
index 5f9477c5cf5a..d913428bedc0 100644
--- a/drivers/power/supply/max77650-charger.c
+++ b/drivers/power/supply/max77650-charger.c
@@ -354,9 +354,16 @@ static int max77650_charger_remove(struct platform_device *pdev)
return max77650_charger_disable(chg);
}
+static const struct of_device_id max77650_charger_of_match[] = {
+ { .compatible = "maxim,max77650-charger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_charger_of_match);
+
static struct platform_driver max77650_charger_driver = {
.driver = {
.name = "max77650-charger",
+ .of_match_table = max77650_charger_of_match,
},
.probe = max77650_charger_probe,
.remove = max77650_charger_remove,
diff --git a/drivers/power/supply/pda_power.c b/drivers/power/supply/pda_power.c
index 3ae5707d39fa..03a37fd6be27 100644
--- a/drivers/power/supply/pda_power.c
+++ b/drivers/power/supply/pda_power.c
@@ -429,6 +429,10 @@ wrongid:
static int pda_power_remove(struct platform_device *pdev)
{
+#if IS_ENABLED(CONFIG_USB_PHY)
+ if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier)
+ usb_unregister_notifier(transceiver, &otg_nb);
+#endif
if (pdata->is_usb_online && usb_irq)
free_irq(usb_irq->start, pda_psy_usb);
if (pdata->is_ac_online && ac_irq)
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 5c36c430ce8b..1a9a9fae73d3 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -565,9 +565,11 @@ EXPORT_SYMBOL_GPL(devm_power_supply_get_by_phandle);
int power_supply_get_battery_info(struct power_supply *psy,
struct power_supply_battery_info *info)
{
+ struct power_supply_resistance_temp_table *resist_table;
struct device_node *battery_np;
const char *value;
int err, len, index;
+ const __be32 *list;
info->energy_full_design_uwh = -EINVAL;
info->charge_full_design_uah = -EINVAL;
@@ -578,6 +580,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
info->constant_charge_current_max_ua = -EINVAL;
info->constant_charge_voltage_max_uv = -EINVAL;
info->factory_internal_resistance_uohm = -EINVAL;
+ info->resist_table = NULL;
for (index = 0; index < POWER_SUPPLY_OCV_TEMP_MAX; index++) {
info->ocv_table[index] = NULL;
@@ -644,7 +647,6 @@ int power_supply_get_battery_info(struct power_supply *psy,
for (index = 0; index < len; index++) {
struct power_supply_battery_ocv_table *table;
char *propname;
- const __be32 *list;
int i, tab_len, size;
propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index);
@@ -677,6 +679,26 @@ int power_supply_get_battery_info(struct power_supply *psy,
}
}
+ list = of_get_property(battery_np, "resistance-temp-table", &len);
+ if (!list || !len)
+ goto out_put_node;
+
+ info->resist_table_size = len / (2 * sizeof(__be32));
+ resist_table = info->resist_table = devm_kcalloc(&psy->dev,
+ info->resist_table_size,
+ sizeof(*resist_table),
+ GFP_KERNEL);
+ if (!info->resist_table) {
+ power_supply_put_battery_info(psy, info);
+ err = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (index = 0; index < info->resist_table_size; index++) {
+ resist_table[index].temp = be32_to_cpu(*list++);
+ resist_table[index].resistance = be32_to_cpu(*list++);
+ }
+
out_put_node:
of_node_put(battery_np);
return err;
@@ -692,10 +714,53 @@ void power_supply_put_battery_info(struct power_supply *psy,
if (info->ocv_table[i])
devm_kfree(&psy->dev, info->ocv_table[i]);
}
+
+ if (info->resist_table)
+ devm_kfree(&psy->dev, info->resist_table);
}
EXPORT_SYMBOL_GPL(power_supply_put_battery_info);
/**
+ * power_supply_temp2resist_simple() - find the battery internal resistance
+ * percent
+ * @table: Pointer to battery resistance temperature table
+ * @table_len: The table length
+ * @ocv: Current temperature
+ *
+ * This helper function is used to look up battery internal resistance percent
+ * according to current temperature value from the resistance temperature table,
+ * and the table must be ordered descending. Then the actual battery internal
+ * resistance = the ideal battery internal resistance * percent / 100.
+ *
+ * Return: the battery internal resistance percent
+ */
+int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+ int table_len, int temp)
+{
+ int i, resist;
+
+ for (i = 0; i < table_len; i++)
+ if (temp > table[i].temp)
+ break;
+
+ if (i > 0 && i < table_len) {
+ int tmp;
+
+ tmp = (table[i - 1].resistance - table[i].resistance) *
+ (temp - table[i].temp);
+ tmp /= table[i - 1].temp - table[i].temp;
+ resist = tmp + table[i].resistance;
+ } else if (i == 0) {
+ resist = table[0].resistance;
+ } else {
+ resist = table[table_len - 1].resistance;
+ }
+
+ return resist;
+}
+EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple);
+
+/**
* power_supply_ocv2cap_simple() - find the battery capacity
* @table: Pointer to battery OCV lookup table
* @table_len: OCV table length
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index f8d74e9f7931..6acd242eed48 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -5,6 +5,7 @@
* Copyright (c) 2010, NVIDIA Corporation.
*/
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
@@ -46,10 +47,10 @@ enum {
/* Battery Mode defines */
#define BATTERY_MODE_OFFSET 0x03
-#define BATTERY_MODE_MASK 0x8000
-enum sbs_battery_mode {
- BATTERY_MODE_AMPS = 0,
- BATTERY_MODE_WATTS = 0x8000
+#define BATTERY_MODE_CAPACITY_MASK BIT(15)
+enum sbs_capacity_mode {
+ CAPACITY_MODE_AMPS = 0,
+ CAPACITY_MODE_WATTS = BATTERY_MODE_CAPACITY_MASK
};
/* manufacturer access defines */
@@ -518,8 +519,8 @@ static void sbs_unit_adjustment(struct i2c_client *client,
}
}
-static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
- enum sbs_battery_mode mode)
+static enum sbs_capacity_mode sbs_set_capacity_mode(struct i2c_client *client,
+ enum sbs_capacity_mode mode)
{
int ret, original_val;
@@ -527,13 +528,13 @@ static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
if (original_val < 0)
return original_val;
- if ((original_val & BATTERY_MODE_MASK) == mode)
+ if ((original_val & BATTERY_MODE_CAPACITY_MASK) == mode)
return mode;
- if (mode == BATTERY_MODE_AMPS)
- ret = original_val & ~BATTERY_MODE_MASK;
+ if (mode == CAPACITY_MODE_AMPS)
+ ret = original_val & ~BATTERY_MODE_CAPACITY_MASK;
else
- ret = original_val | BATTERY_MODE_MASK;
+ ret = original_val | BATTERY_MODE_CAPACITY_MASK;
ret = sbs_write_word_data(client, BATTERY_MODE_OFFSET, ret);
if (ret < 0)
@@ -541,7 +542,7 @@ static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
usleep_range(1000, 2000);
- return original_val & BATTERY_MODE_MASK;
+ return original_val & BATTERY_MODE_CAPACITY_MASK;
}
static int sbs_get_battery_capacity(struct i2c_client *client,
@@ -549,13 +550,13 @@ static int sbs_get_battery_capacity(struct i2c_client *client,
union power_supply_propval *val)
{
s32 ret;
- enum sbs_battery_mode mode = BATTERY_MODE_WATTS;
+ enum sbs_capacity_mode mode = CAPACITY_MODE_WATTS;
if (power_supply_is_amp_property(psp))
- mode = BATTERY_MODE_AMPS;
+ mode = CAPACITY_MODE_AMPS;
- mode = sbs_set_battery_mode(client, mode);
- if (mode < 0)
+ mode = sbs_set_capacity_mode(client, mode);
+ if ((int)mode < 0)
return mode;
ret = sbs_read_word_data(client, sbs_data[reg_offset].addr);
@@ -564,7 +565,7 @@ static int sbs_get_battery_capacity(struct i2c_client *client,
val->intval = ret;
- ret = sbs_set_battery_mode(client, mode);
+ ret = sbs_set_capacity_mode(client, mode);
if (ret < 0)
return ret;
@@ -1001,6 +1002,6 @@ module_i2c_driver(sbs_battery_driver);
MODULE_DESCRIPTION("SBS battery monitor driver");
MODULE_LICENSE("GPL");
-module_param(force_load, bool, S_IRUSR | S_IRGRP | S_IROTH);
+module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load,
"Attempt to load the driver even if no battery is connected");
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index bc8f5bda5762..469c83fdaa8e 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -62,6 +62,8 @@
#define SC27XX_FGU_CUR_BASIC_ADC 8192
#define SC27XX_FGU_SAMPLE_HZ 2
+/* micro Ohms */
+#define SC27XX_FGU_IDEAL_RESISTANCE 20000
/*
* struct sc27xx_fgu_data: describe the FGU device
@@ -81,9 +83,12 @@
* @max_volt: the maximum constant input voltage in millivolt
* @min_volt: the minimum drained battery voltage in microvolt
* @table_len: the capacity table length
+ * @resist_table_len: the resistance table length
* @cur_1000ma_adc: ADC value corresponding to 1000 mA
* @vol_1000mv_adc: ADC value corresponding to 1000 mV
+ * @calib_resist: the real resistance of coulomb counter chip in uOhm
* @cap_table: capacity table with corresponding ocv
+ * @resist_table: resistance percent table with corresponding temperature
*/
struct sc27xx_fgu_data {
struct regmap *regmap;
@@ -103,15 +108,19 @@ struct sc27xx_fgu_data {
int max_volt;
int min_volt;
int table_len;
+ int resist_table_len;
int cur_1000ma_adc;
int vol_1000mv_adc;
+ int calib_resist;
struct power_supply_battery_ocv_table *cap_table;
+ struct power_supply_resistance_temp_table *resist_table;
};
static int sc27xx_fgu_cap_to_clbcnt(struct sc27xx_fgu_data *data, int capacity);
static void sc27xx_fgu_capacity_calibration(struct sc27xx_fgu_data *data,
int cap, bool int_mode);
static void sc27xx_fgu_adjust_cap(struct sc27xx_fgu_data *data, int cap);
+static int sc27xx_fgu_get_temp(struct sc27xx_fgu_data *data, int *temp);
static const char * const sc27xx_charger_supply_name[] = {
"sc2731_charger",
@@ -434,7 +443,7 @@ static int sc27xx_fgu_get_current(struct sc27xx_fgu_data *data, int *val)
static int sc27xx_fgu_get_vbat_ocv(struct sc27xx_fgu_data *data, int *val)
{
- int vol, cur, ret;
+ int vol, cur, ret, temp, resistance;
ret = sc27xx_fgu_get_vbat_vol(data, &vol);
if (ret)
@@ -444,8 +453,19 @@ static int sc27xx_fgu_get_vbat_ocv(struct sc27xx_fgu_data *data, int *val)
if (ret)
return ret;
+ resistance = data->internal_resist;
+ if (data->resist_table_len > 0) {
+ ret = sc27xx_fgu_get_temp(data, &temp);
+ if (ret)
+ return ret;
+
+ resistance = power_supply_temp2resist_simple(data->resist_table,
+ data->resist_table_len, temp);
+ resistance = data->internal_resist * resistance / 100;
+ }
+
/* Return the battery OCV in micro volts. */
- *val = vol * 1000 - cur * data->internal_resist;
+ *val = vol * 1000 - cur * resistance;
return 0;
}
@@ -884,7 +904,9 @@ static int sc27xx_fgu_calibration(struct sc27xx_fgu_data *data)
*/
cal_4200mv = (calib_data & 0x1ff) + 6963 - 4096 - 256;
data->vol_1000mv_adc = DIV_ROUND_CLOSEST(cal_4200mv * 10, 42);
- data->cur_1000ma_adc = data->vol_1000mv_adc * 4;
+ data->cur_1000ma_adc =
+ DIV_ROUND_CLOSEST(data->vol_1000mv_adc * 4 * data->calib_resist,
+ SC27XX_FGU_IDEAL_RESISTANCE);
kfree(buf);
return 0;
@@ -929,6 +951,18 @@ static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data)
if (!data->alarm_cap)
data->alarm_cap += 1;
+ data->resist_table_len = info.resist_table_size;
+ if (data->resist_table_len > 0) {
+ data->resist_table = devm_kmemdup(data->dev, info.resist_table,
+ data->resist_table_len *
+ sizeof(struct power_supply_resistance_temp_table),
+ GFP_KERNEL);
+ if (!data->resist_table) {
+ power_supply_put_battery_info(data->battery, &info);
+ return -ENOMEM;
+ }
+ }
+
power_supply_put_battery_info(data->battery, &info);
ret = sc27xx_fgu_calibration(data);
@@ -1051,6 +1085,15 @@ static int sc27xx_fgu_probe(struct platform_device *pdev)
return ret;
}
+ ret = device_property_read_u32(&pdev->dev,
+ "sprd,calib-resistance-micro-ohms",
+ &data->calib_resist);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get fgu calibration resistance\n");
+ return ret;
+ }
+
data->channel = devm_iio_channel_get(dev, "bat-temp");
if (IS_ERR(data->channel)) {
dev_err(dev, "failed to get IIO channel\n");
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index 1b80ae479e7d..cdb9a23d825f 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -100,7 +100,9 @@ struct ucs1002_info {
struct i2c_client *client;
struct regmap *regmap;
struct regulator_desc *regulator_descriptor;
+ struct regulator_dev *rdev;
bool present;
+ bool output_disable;
};
static enum power_supply_property ucs1002_props[] = {
@@ -233,6 +235,11 @@ static int ucs1002_get_max_current(struct ucs1002_info *info,
unsigned int reg;
int ret;
+ if (info->output_disable) {
+ val->intval = 0;
+ return 0;
+ }
+
ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
if (ret)
return ret;
@@ -247,6 +254,12 @@ static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
unsigned int reg;
int ret, idx;
+ if (val == 0) {
+ info->output_disable = true;
+ regulator_disable_regmap(info->rdev);
+ return 0;
+ }
+
for (idx = 0; idx < ARRAY_SIZE(ucs1002_current_limit_uA); idx++) {
if (val == ucs1002_current_limit_uA[idx])
break;
@@ -270,6 +283,12 @@ static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
if (reg != idx)
return -EINVAL;
+ info->output_disable = false;
+
+ if (info->rdev && info->rdev->use_count &&
+ !regulator_is_enabled_regmap(info->rdev))
+ regulator_enable_regmap(info->rdev);
+
return 0;
}
@@ -470,9 +489,24 @@ static irqreturn_t ucs1002_alert_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static int ucs1002_regulator_enable(struct regulator_dev *rdev)
+{
+ struct ucs1002_info *info = rdev_get_drvdata(rdev);
+
+ /*
+ * If the output is disabled due to 0 maximum current, just pretend the
+ * enable did work. The regulator will be enabled as soon as we get a
+ * a non-zero maximum current budget.
+ */
+ if (info->output_disable)
+ return 0;
+
+ return regulator_enable_regmap(rdev);
+}
+
static const struct regulator_ops ucs1002_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
+ .enable = ucs1002_regulator_enable,
.disable = regulator_disable_regmap,
};
@@ -499,7 +533,6 @@ static int ucs1002_probe(struct i2c_client *client,
};
struct regulator_config regulator_config = {};
int irq_a_det, irq_alert, ret;
- struct regulator_dev *rdev;
struct ucs1002_info *info;
unsigned int regval;
@@ -589,10 +622,11 @@ static int ucs1002_probe(struct i2c_client *client,
regulator_config.dev = dev;
regulator_config.of_node = dev->of_node;
regulator_config.regmap = info->regmap;
+ regulator_config.driver_data = info;
- rdev = devm_regulator_register(dev, info->regulator_descriptor,
+ info->rdev = devm_regulator_register(dev, info->regulator_descriptor,
&regulator_config);
- ret = PTR_ERR_OR_ZERO(rdev);
+ ret = PTR_ERR_OR_ZERO(info->rdev);
if (ret) {
dev_err(dev, "Failed to register VBUS regulator: %d\n", ret);
return ret;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 2e5b6a6834da..73257cf107d9 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -980,6 +980,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
+ INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
@@ -989,6 +990,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core),
INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index b0d1b8d264fa..475c60dccaa4 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -56,20 +56,6 @@ config PTP_1588_CLOCK_QORIQ
To compile this driver as a module, choose M here: the module
will be called ptp-qoriq.
-config PTP_1588_CLOCK_IXP46X
- tristate "Intel IXP46x as PTP clock"
- depends on IXP4XX_ETH
- depends on PTP_1588_CLOCK
- default y
- help
- This driver adds support for using the IXP46X as a PTP
- clock. This clock is only useful if your PTP programs are
- getting hardware time stamps on the PTP Ethernet packets
- using the SO_TIMESTAMPING API.
-
- To compile this driver as a module, choose M here: the module
- will be called ptp_ixp46x.
-
comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
@@ -89,6 +75,16 @@ config DP83640_PHY
In order for this to work, your MAC driver must also
implement the skb_tx_timestamp() function.
+config PTP_1588_CLOCK_INES
+ tristate "ZHAW InES PTP time stamping IP core"
+ depends on NETWORK_PHY_TIMESTAMPING
+ depends on PHYLIB
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for using the ZHAW InES 1588 IP
+ core. This clock is only useful if the MII bus of your MAC
+ is wired up to the core.
+
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 69a06f86a450..8c830336f178 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -6,10 +6,10 @@
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
-obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
+obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
-obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o \ No newline at end of file
+obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
index 9263bc33b8f4..69eedda9f731 100644
--- a/drivers/ptp/idt8a340_reg.h
+++ b/drivers/ptp/idt8a340_reg.h
@@ -77,6 +77,8 @@
#define JTAG_DEVICE_ID 0x001c
#define PRODUCT_ID 0x001e
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
#define STATUS 0xc03c
#define USER_GPIO0_TO_7_STATUS 0x008a
#define USER_GPIO8_TO_15_STATUS 0x008b
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 61fafe0374ce..ac1f2bf9e888 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
{
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
+ ptp_cleanup_pin_groups(ptp);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
@@ -302,9 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- ptp_cleanup_pin_groups(ptp);
-
posix_clock_unregister(&ptp->clock);
+
return 0;
}
EXPORT_SYMBOL(ptp_clock_unregister);
@@ -368,6 +368,12 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
}
EXPORT_SYMBOL(ptp_schedule_worker);
+void ptp_cancel_worker_sync(struct ptp_clock *ptp)
+{
+ kthread_cancel_delayed_work_sync(&ptp->aux_work);
+}
+EXPORT_SYMBOL(ptp_cancel_worker_sync);
+
/* module operations */
static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index a5110b7b4ece..032e112c3dd9 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -405,6 +405,7 @@ static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
if (idtcm->calculate_overhead_flag) {
+ /* Assumption: I2C @ 400KHz */
total_overhead_ns = ktime_to_ns(ktime_get_raw()
- idtcm->start_time)
+ idtcm->tod_write_overhead_ns
@@ -596,44 +597,7 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
{
- return idtcm_read(idtcm,
- GENERAL_STATUS,
- HW_REV_ID,
- hw_rev_id,
- sizeof(u8));
-}
-
-static int idtcm_read_bond_id(struct idtcm *idtcm, u8 *bond_id)
-{
- return idtcm_read(idtcm,
- GENERAL_STATUS,
- BOND_ID,
- bond_id,
- sizeof(u8));
-}
-
-static int idtcm_read_hw_csr_id(struct idtcm *idtcm, u16 *hw_csr_id)
-{
- int err;
- u8 buf[2] = {0};
-
- err = idtcm_read(idtcm, GENERAL_STATUS, HW_CSR_ID, buf, sizeof(buf));
-
- *hw_csr_id = (buf[1] << 8) | buf[0];
-
- return err;
-}
-
-static int idtcm_read_hw_irq_id(struct idtcm *idtcm, u16 *hw_irq_id)
-{
- int err;
- u8 buf[2] = {0};
-
- err = idtcm_read(idtcm, GENERAL_STATUS, HW_IRQ_ID, buf, sizeof(buf));
-
- *hw_irq_id = (buf[1] << 8) | buf[0];
-
- return err;
+ return idtcm_read(idtcm, HW_REVISION, REV_ID, hw_rev_id, sizeof(u8));
}
static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
@@ -674,20 +638,11 @@ static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
sizeof(u8));
}
-static int idtcm_read_pipeline(struct idtcm *idtcm, u32 *pipeline)
+static int idtcm_read_otp_scsr_config_select(struct idtcm *idtcm,
+ u8 *config_select)
{
- int err;
- u8 buf[4] = {0};
-
- err = idtcm_read(idtcm,
- GENERAL_STATUS,
- PIPELINE_ID,
- &buf[0],
- sizeof(buf));
-
- *pipeline = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
-
- return err;
+ return idtcm_read(idtcm, GENERAL_STATUS, OTP_SCSR_CONFIG_SELECT,
+ config_select, sizeof(u8));
}
static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask)
@@ -1078,31 +1033,25 @@ static void idtcm_display_version_info(struct idtcm *idtcm)
u8 major;
u8 minor;
u8 hotfix;
- u32 pipeline;
u16 product_id;
- u16 csr_id;
- u16 irq_id;
u8 hw_rev_id;
- u8 bond_id;
+ u8 config_select;
+ char *fmt = "%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d\n";
idtcm_read_major_release(idtcm, &major);
idtcm_read_minor_release(idtcm, &minor);
idtcm_read_hotfix_release(idtcm, &hotfix);
- idtcm_read_pipeline(idtcm, &pipeline);
idtcm_read_product_id(idtcm, &product_id);
idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
- idtcm_read_bond_id(idtcm, &bond_id);
- idtcm_read_hw_csr_id(idtcm, &csr_id);
- idtcm_read_hw_irq_id(idtcm, &irq_id);
-
- dev_info(&idtcm->client->dev, "Version: %d.%d.%d, Pipeline %u\t"
- "0x%04x, Rev %d, Bond %d, CSR %d, IRQ %d\n",
- major, minor, hotfix, pipeline,
- product_id, hw_rev_id, bond_id, csr_id, irq_id);
+
+ idtcm_read_otp_scsr_config_select(idtcm, &config_select);
+
+ dev_info(&idtcm->client->dev, fmt, major, minor, hotfix,
+ product_id, hw_rev_id, config_select);
}
-static struct ptp_clock_info idtcm_caps = {
+static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 1,
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
new file mode 100644
index 000000000000..dfda54cbd866
--- /dev/null
+++ b/drivers/ptp/ptp_ines.c
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 MOSER-BAER AG
+//
+
+#define pr_fmt(fmt) "InES_PTP: " fmt
+
+#include <linux/ethtool.h>
+#include <linux/export.h>
+#include <linux/if_vlan.h>
+#include <linux/mii_timestamper.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/stddef.h>
+
+MODULE_DESCRIPTION("Driver for the ZHAW InES PTP time stamping IP core");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/* GLOBAL register */
+#define MCAST_MAC_SELECT_SHIFT 2
+#define MCAST_MAC_SELECT_MASK 0x3
+#define IO_RESET BIT(1)
+#define PTP_RESET BIT(0)
+
+/* VERSION register */
+#define IF_MAJOR_VER_SHIFT 12
+#define IF_MAJOR_VER_MASK 0xf
+#define IF_MINOR_VER_SHIFT 8
+#define IF_MINOR_VER_MASK 0xf
+#define FPGA_MAJOR_VER_SHIFT 4
+#define FPGA_MAJOR_VER_MASK 0xf
+#define FPGA_MINOR_VER_SHIFT 0
+#define FPGA_MINOR_VER_MASK 0xf
+
+/* INT_STAT register */
+#define RX_INTR_STATUS_3 BIT(5)
+#define RX_INTR_STATUS_2 BIT(4)
+#define RX_INTR_STATUS_1 BIT(3)
+#define TX_INTR_STATUS_3 BIT(2)
+#define TX_INTR_STATUS_2 BIT(1)
+#define TX_INTR_STATUS_1 BIT(0)
+
+/* INT_MSK register */
+#define RX_INTR_MASK_3 BIT(5)
+#define RX_INTR_MASK_2 BIT(4)
+#define RX_INTR_MASK_1 BIT(3)
+#define TX_INTR_MASK_3 BIT(2)
+#define TX_INTR_MASK_2 BIT(1)
+#define TX_INTR_MASK_1 BIT(0)
+
+/* BUF_STAT register */
+#define RX_FIFO_NE_3 BIT(5)
+#define RX_FIFO_NE_2 BIT(4)
+#define RX_FIFO_NE_1 BIT(3)
+#define TX_FIFO_NE_3 BIT(2)
+#define TX_FIFO_NE_2 BIT(1)
+#define TX_FIFO_NE_1 BIT(0)
+
+/* PORT_CONF register */
+#define CM_ONE_STEP BIT(6)
+#define PHY_SPEED_SHIFT 4
+#define PHY_SPEED_MASK 0x3
+#define P2P_DELAY_WR_POS_SHIFT 2
+#define P2P_DELAY_WR_POS_MASK 0x3
+#define PTP_MODE_SHIFT 0
+#define PTP_MODE_MASK 0x3
+
+/* TS_STAT_TX register */
+#define TS_ENABLE BIT(15)
+#define DATA_READ_POS_SHIFT 8
+#define DATA_READ_POS_MASK 0x1f
+#define DISCARDED_EVENTS_SHIFT 4
+#define DISCARDED_EVENTS_MASK 0xf
+
+#define INES_N_PORTS 3
+#define INES_REGISTER_SIZE 0x80
+#define INES_PORT_OFFSET 0x20
+#define INES_PORT_SIZE 0x20
+#define INES_FIFO_DEPTH 90
+#define INES_MAX_EVENTS 100
+
+#define BC_PTP_V1 0
+#define BC_PTP_V2 1
+#define TC_E2E_PTP_V2 2
+#define TC_P2P_PTP_V2 3
+
+#define OFF_PTP_CLOCK_ID 20
+#define OFF_PTP_PORT_NUM 28
+
+#define PHY_SPEED_10 0
+#define PHY_SPEED_100 1
+#define PHY_SPEED_1000 2
+
+#define PORT_CONF \
+ ((PHY_SPEED_1000 << PHY_SPEED_SHIFT) | (BC_PTP_V2 << PTP_MODE_SHIFT))
+
+#define ines_read32(s, r) __raw_readl((void __iomem *)&s->regs->r)
+#define ines_write32(s, v, r) __raw_writel(v, (void __iomem *)&s->regs->r)
+
+#define MESSAGE_TYPE_SYNC 1
+#define MESSAGE_TYPE_P_DELAY_REQ 2
+#define MESSAGE_TYPE_P_DELAY_RESP 3
+#define MESSAGE_TYPE_DELAY_REQ 4
+
+#define SYNC 0x0
+#define DELAY_REQ 0x1
+#define PDELAY_REQ 0x2
+#define PDELAY_RESP 0x3
+
+static LIST_HEAD(ines_clocks);
+static DEFINE_MUTEX(ines_clocks_lock);
+
+struct ines_global_regs {
+ u32 id;
+ u32 test;
+ u32 global;
+ u32 version;
+ u32 test2;
+ u32 int_stat;
+ u32 int_msk;
+ u32 buf_stat;
+};
+
+struct ines_port_registers {
+ u32 port_conf;
+ u32 p_delay;
+ u32 ts_stat_tx;
+ u32 ts_stat_rx;
+ u32 ts_tx;
+ u32 ts_rx;
+};
+
+struct ines_timestamp {
+ struct list_head list;
+ unsigned long tmo;
+ u16 tag;
+ u64 sec;
+ u64 nsec;
+ u64 clkid;
+ u16 portnum;
+ u16 seqid;
+};
+
+struct ines_port {
+ struct ines_port_registers *regs;
+ struct mii_timestamper mii_ts;
+ struct ines_clock *clock;
+ bool rxts_enabled;
+ bool txts_enabled;
+ unsigned int index;
+ struct delayed_work ts_work;
+ /* lock protects event list and tx_skb */
+ spinlock_t lock;
+ struct sk_buff *tx_skb;
+ struct list_head events;
+ struct list_head pool;
+ struct ines_timestamp pool_data[INES_MAX_EVENTS];
+};
+
+struct ines_clock {
+ struct ines_port port[INES_N_PORTS];
+ struct ines_global_regs __iomem *regs;
+ void __iomem *base;
+ struct device_node *node;
+ struct device *dev;
+ struct list_head list;
+};
+
+static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
+ struct ines_timestamp *ts, struct device *dev);
+static int ines_rxfifo_read(struct ines_port *port);
+static u64 ines_rxts64(struct ines_port *port, unsigned int words);
+static bool ines_timestamp_expired(struct ines_timestamp *ts);
+static u64 ines_txts64(struct ines_port *port, unsigned int words);
+static void ines_txtstamp_work(struct work_struct *work);
+static bool is_sync_pdelay_resp(struct sk_buff *skb, int type);
+static u8 tag_to_msgtype(u8 tag);
+
+static void ines_clock_cleanup(struct ines_clock *clock)
+{
+ struct ines_port *port;
+ int i;
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ cancel_delayed_work_sync(&port->ts_work);
+ }
+}
+
+static int ines_clock_init(struct ines_clock *clock, struct device *device,
+ void __iomem *addr)
+{
+ struct device_node *node = device->of_node;
+ unsigned long port_addr;
+ struct ines_port *port;
+ int i, j;
+
+ INIT_LIST_HEAD(&clock->list);
+ clock->node = node;
+ clock->dev = device;
+ clock->base = addr;
+ clock->regs = clock->base;
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ port_addr = (unsigned long) clock->base +
+ INES_PORT_OFFSET + i * INES_PORT_SIZE;
+ port->regs = (struct ines_port_registers *) port_addr;
+ port->clock = clock;
+ port->index = i;
+ INIT_DELAYED_WORK(&port->ts_work, ines_txtstamp_work);
+ spin_lock_init(&port->lock);
+ INIT_LIST_HEAD(&port->events);
+ INIT_LIST_HEAD(&port->pool);
+ for (j = 0; j < INES_MAX_EVENTS; j++)
+ list_add(&port->pool_data[j].list, &port->pool);
+ }
+
+ ines_write32(clock, 0xBEEF, test);
+ ines_write32(clock, 0xBEEF, test2);
+
+ dev_dbg(device, "ID 0x%x\n", ines_read32(clock, id));
+ dev_dbg(device, "TEST 0x%x\n", ines_read32(clock, test));
+ dev_dbg(device, "VERSION 0x%x\n", ines_read32(clock, version));
+ dev_dbg(device, "TEST2 0x%x\n", ines_read32(clock, test2));
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ ines_write32(port, PORT_CONF, port_conf);
+ }
+
+ return 0;
+}
+
+static struct ines_port *ines_find_port(struct device_node *node, u32 index)
+{
+ struct ines_port *port = NULL;
+ struct ines_clock *clock;
+ struct list_head *this;
+
+ mutex_lock(&ines_clocks_lock);
+ list_for_each(this, &ines_clocks) {
+ clock = list_entry(this, struct ines_clock, list);
+ if (clock->node == node) {
+ port = &clock->port[index];
+ break;
+ }
+ }
+ mutex_unlock(&ines_clocks_lock);
+ return port;
+}
+
+static u64 ines_find_rxts(struct ines_port *port, struct sk_buff *skb, int type)
+{
+ struct list_head *this, *next;
+ struct ines_timestamp *ts;
+ unsigned long flags;
+ u64 ns = 0;
+
+ if (type == PTP_CLASS_NONE)
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ines_rxfifo_read(port);
+ list_for_each_safe(this, next, &port->events) {
+ ts = list_entry(this, struct ines_timestamp, list);
+ if (ines_timestamp_expired(ts)) {
+ list_del_init(&ts->list);
+ list_add(&ts->list, &port->pool);
+ continue;
+ }
+ if (ines_match(skb, type, ts, port->clock->dev)) {
+ ns = ts->sec * 1000000000ULL + ts->nsec;
+ list_del_init(&ts->list);
+ list_add(&ts->list, &port->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ns;
+}
+
+static u64 ines_find_txts(struct ines_port *port, struct sk_buff *skb)
+{
+ unsigned int class = ptp_classify_raw(skb), i;
+ u32 data_rd_pos, buf_stat, mask, ts_stat_tx;
+ struct ines_timestamp ts;
+ unsigned long flags;
+ u64 ns = 0;
+
+ mask = TX_FIFO_NE_1 << port->index;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ for (i = 0; i < INES_FIFO_DEPTH; i++) {
+
+ buf_stat = ines_read32(port->clock, buf_stat);
+ if (!(buf_stat & mask)) {
+ dev_dbg(port->clock->dev,
+ "Tx timestamp FIFO unexpectedly empty\n");
+ break;
+ }
+ ts_stat_tx = ines_read32(port, ts_stat_tx);
+ data_rd_pos = (ts_stat_tx >> DATA_READ_POS_SHIFT) &
+ DATA_READ_POS_MASK;
+ if (data_rd_pos) {
+ dev_err(port->clock->dev,
+ "unexpected Tx read pos %u\n", data_rd_pos);
+ break;
+ }
+
+ ts.tag = ines_read32(port, ts_tx);
+ ts.sec = ines_txts64(port, 3);
+ ts.nsec = ines_txts64(port, 2);
+ ts.clkid = ines_txts64(port, 4);
+ ts.portnum = ines_read32(port, ts_tx);
+ ts.seqid = ines_read32(port, ts_tx);
+
+ if (ines_match(skb, class, &ts, port->clock->dev)) {
+ ns = ts.sec * 1000000000ULL + ts.nsec;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ns;
+}
+
+static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ u32 cm_one_step = 0, port_conf, ts_stat_rx, ts_stat_tx;
+ struct hwtstamp_config cfg;
+ unsigned long flags;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ ts_stat_tx = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ ts_stat_tx = TS_ENABLE;
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ ts_stat_tx = TS_ENABLE;
+ cm_one_step = CM_ONE_STEP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ts_stat_rx = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ts_stat_rx = TS_ENABLE;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_conf = ines_read32(port, port_conf);
+ port_conf &= ~CM_ONE_STEP;
+ port_conf |= cm_one_step;
+
+ ines_write32(port, port_conf, port_conf);
+ ines_write32(port, ts_stat_rx, ts_stat_rx);
+ ines_write32(port, ts_stat_tx, ts_stat_tx);
+
+ port->rxts_enabled = ts_stat_rx == TS_ENABLE ? true : false;
+ port->txts_enabled = ts_stat_tx == TS_ENABLE ? true : false;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void ines_link_state(struct mii_timestamper *mii_ts,
+ struct phy_device *phydev)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ u32 port_conf, speed_conf;
+ unsigned long flags;
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed_conf = PHY_SPEED_10 << PHY_SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ speed_conf = PHY_SPEED_100 << PHY_SPEED_SHIFT;
+ break;
+ case SPEED_1000:
+ speed_conf = PHY_SPEED_1000 << PHY_SPEED_SHIFT;
+ break;
+ default:
+ dev_err(port->clock->dev, "bad speed: %d\n", phydev->speed);
+ return;
+ }
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_conf = ines_read32(port, port_conf);
+ port_conf &= ~(0x3 << PHY_SPEED_SHIFT);
+ port_conf |= speed_conf;
+
+ ines_write32(port, port_conf, port_conf);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
+ struct ines_timestamp *ts, struct device *dev)
+{
+ u8 *msgtype, *data = skb_mac_header(skb);
+ unsigned int offset = 0;
+ __be16 *portn, *seqid;
+ __be64 *clkid;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ return false;
+
+ if (ptp_class & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (ptp_class & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return false;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return false;
+
+ msgtype = data + offset;
+ clkid = (__be64 *)(data + offset + OFF_PTP_CLOCK_ID);
+ portn = (__be16 *)(data + offset + OFF_PTP_PORT_NUM);
+ seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+ if (tag_to_msgtype(ts->tag & 0x7) != (*msgtype & 0xf)) {
+ dev_dbg(dev, "msgtype mismatch ts %hhu != skb %hhu\n",
+ tag_to_msgtype(ts->tag & 0x7), *msgtype & 0xf);
+ return false;
+ }
+ if (cpu_to_be64(ts->clkid) != *clkid) {
+ dev_dbg(dev, "clkid mismatch ts %llx != skb %llx\n",
+ cpu_to_be64(ts->clkid), *clkid);
+ return false;
+ }
+ if (ts->portnum != ntohs(*portn)) {
+ dev_dbg(dev, "portn mismatch ts %hu != skb %hu\n",
+ ts->portnum, ntohs(*portn));
+ return false;
+ }
+ if (ts->seqid != ntohs(*seqid)) {
+ dev_dbg(dev, "seqid mismatch ts %hu != skb %hu\n",
+ ts->seqid, ntohs(*seqid));
+ return false;
+ }
+
+ return true;
+}
+
+static bool ines_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ if (!port->rxts_enabled)
+ return false;
+
+ ns = ines_find_rxts(port, skb, type);
+ if (!ns)
+ return false;
+
+ ssh = skb_hwtstamps(skb);
+ ssh->hwtstamp = ns_to_ktime(ns);
+ netif_rx(skb);
+
+ return true;
+}
+
+static int ines_rxfifo_read(struct ines_port *port)
+{
+ u32 data_rd_pos, buf_stat, mask, ts_stat_rx;
+ struct ines_timestamp *ts;
+ unsigned int i;
+
+ mask = RX_FIFO_NE_1 << port->index;
+
+ for (i = 0; i < INES_FIFO_DEPTH; i++) {
+ if (list_empty(&port->pool)) {
+ dev_err(port->clock->dev, "event pool is empty\n");
+ return -1;
+ }
+ buf_stat = ines_read32(port->clock, buf_stat);
+ if (!(buf_stat & mask))
+ break;
+
+ ts_stat_rx = ines_read32(port, ts_stat_rx);
+ data_rd_pos = (ts_stat_rx >> DATA_READ_POS_SHIFT) &
+ DATA_READ_POS_MASK;
+ if (data_rd_pos) {
+ dev_err(port->clock->dev, "unexpected Rx read pos %u\n",
+ data_rd_pos);
+ break;
+ }
+
+ ts = list_first_entry(&port->pool, struct ines_timestamp, list);
+ ts->tmo = jiffies + HZ;
+ ts->tag = ines_read32(port, ts_rx);
+ ts->sec = ines_rxts64(port, 3);
+ ts->nsec = ines_rxts64(port, 2);
+ ts->clkid = ines_rxts64(port, 4);
+ ts->portnum = ines_read32(port, ts_rx);
+ ts->seqid = ines_read32(port, ts_rx);
+
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &port->events);
+ }
+
+ return 0;
+}
+
+static u64 ines_rxts64(struct ines_port *port, unsigned int words)
+{
+ unsigned int i;
+ u64 result;
+ u16 word;
+
+ word = ines_read32(port, ts_rx);
+ result = word;
+ words--;
+ for (i = 0; i < words; i++) {
+ word = ines_read32(port, ts_rx);
+ result <<= 16;
+ result |= word;
+ }
+ return result;
+}
+
+static bool ines_timestamp_expired(struct ines_timestamp *ts)
+{
+ return time_after(jiffies, ts->tmo);
+}
+
+static int ines_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_P2P);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static u64 ines_txts64(struct ines_port *port, unsigned int words)
+{
+ unsigned int i;
+ u64 result;
+ u16 word;
+
+ word = ines_read32(port, ts_tx);
+ result = word;
+ words--;
+ for (i = 0; i < words; i++) {
+ word = ines_read32(port, ts_tx);
+ result <<= 16;
+ result |= word;
+ }
+ return result;
+}
+
+static bool ines_txts_onestep(struct ines_port *port, struct sk_buff *skb, int type)
+{
+ unsigned long flags;
+ u32 port_conf;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port_conf = ines_read32(port, port_conf);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (port_conf & CM_ONE_STEP)
+ return is_sync_pdelay_resp(skb, type);
+
+ return false;
+}
+
+static void ines_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ struct sk_buff *old_skb = NULL;
+ unsigned long flags;
+
+ if (!port->txts_enabled || ines_txts_onestep(port, skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (port->tx_skb)
+ old_skb = port->tx_skb;
+
+ port->tx_skb = skb;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (old_skb)
+ kfree_skb(old_skb);
+
+ schedule_delayed_work(&port->ts_work, 1);
+}
+
+static void ines_txtstamp_work(struct work_struct *work)
+{
+ struct ines_port *port =
+ container_of(work, struct ines_port, ts_work.work);
+ struct skb_shared_hwtstamps ssh;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&port->lock, flags);
+ skb = port->tx_skb;
+ port->tx_skb = NULL;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ns = ines_find_txts(port, skb);
+ if (!ns) {
+ kfree_skb(skb);
+ return;
+ }
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_complete_tx_timestamp(skb, &ssh);
+}
+
+static bool is_sync_pdelay_resp(struct sk_buff *skb, int type)
+{
+ u8 *data = skb->data, *msgtype;
+ unsigned int offset = 0;
+
+ if (type & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (type & PTP_CLASS_V1)
+ offset += OFF_PTP_CONTROL;
+
+ if (skb->len < offset + 1)
+ return 0;
+
+ msgtype = data + offset;
+
+ switch ((*msgtype & 0xf)) {
+ case SYNC:
+ case PDELAY_RESP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u8 tag_to_msgtype(u8 tag)
+{
+ switch (tag) {
+ case MESSAGE_TYPE_SYNC:
+ return SYNC;
+ case MESSAGE_TYPE_P_DELAY_REQ:
+ return PDELAY_REQ;
+ case MESSAGE_TYPE_P_DELAY_RESP:
+ return PDELAY_RESP;
+ case MESSAGE_TYPE_DELAY_REQ:
+ return DELAY_REQ;
+ }
+ return 0xf;
+}
+
+static struct mii_timestamper *ines_ptp_probe_channel(struct device *device,
+ unsigned int index)
+{
+ struct device_node *node = device->of_node;
+ struct ines_port *port;
+
+ if (index > INES_N_PORTS - 1) {
+ dev_err(device, "bad port index %u\n", index);
+ return ERR_PTR(-EINVAL);
+ }
+ port = ines_find_port(node, index);
+ if (!port) {
+ dev_err(device, "missing port index %u\n", index);
+ return ERR_PTR(-ENODEV);
+ }
+ port->mii_ts.rxtstamp = ines_rxtstamp;
+ port->mii_ts.txtstamp = ines_txtstamp;
+ port->mii_ts.hwtstamp = ines_hwtstamp;
+ port->mii_ts.link_state = ines_link_state;
+ port->mii_ts.ts_info = ines_ts_info;
+
+ return &port->mii_ts;
+}
+
+static void ines_ptp_release_channel(struct device *device,
+ struct mii_timestamper *mii_ts)
+{
+}
+
+static struct mii_timestamping_ctrl ines_ctrl = {
+ .probe_channel = ines_ptp_probe_channel,
+ .release_channel = ines_ptp_release_channel,
+};
+
+static int ines_ptp_ctrl_probe(struct platform_device *pld)
+{
+ struct ines_clock *clock;
+ struct resource *res;
+ void __iomem *addr;
+ int err = 0;
+
+ res = platform_get_resource(pld, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pld->dev, "missing memory resource\n");
+ return -EINVAL;
+ }
+ addr = devm_ioremap_resource(&pld->dev, res);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto out;
+ }
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto out;
+ }
+ if (ines_clock_init(clock, &pld->dev, addr)) {
+ kfree(clock);
+ err = -ENOMEM;
+ goto out;
+ }
+ err = register_mii_tstamp_controller(&pld->dev, &ines_ctrl);
+ if (err) {
+ kfree(clock);
+ goto out;
+ }
+ mutex_lock(&ines_clocks_lock);
+ list_add_tail(&ines_clocks, &clock->list);
+ mutex_unlock(&ines_clocks_lock);
+
+ dev_set_drvdata(&pld->dev, clock);
+out:
+ return err;
+}
+
+static int ines_ptp_ctrl_remove(struct platform_device *pld)
+{
+ struct ines_clock *clock = dev_get_drvdata(&pld->dev);
+
+ unregister_mii_tstamp_controller(&pld->dev);
+ mutex_lock(&ines_clocks_lock);
+ list_del(&clock->list);
+ mutex_unlock(&ines_clocks_lock);
+ ines_clock_cleanup(clock);
+ kfree(clock);
+ return 0;
+}
+
+static const struct of_device_id ines_ptp_ctrl_of_match[] = {
+ { .compatible = "ines,ptp-ctrl" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ines_ptp_ctrl_of_match);
+
+static struct platform_driver ines_ptp_ctrl_driver = {
+ .probe = ines_ptp_ctrl_probe,
+ .remove = ines_ptp_ctrl_remove,
+ .driver = {
+ .name = "ines_ptp_ctrl",
+ .of_match_table = of_match_ptr(ines_ptp_ctrl_of_match),
+ },
+};
+module_platform_driver(ines_ptp_ctrl_driver);
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index a577218d1ab7..b27c46ebfc8f 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -74,14 +74,13 @@ static void set_fipers(struct ptp_qoriq *ptp_qoriq)
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
}
-static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
- bool update_event)
+int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
struct ptp_clock_event event;
void __iomem *reg_etts_l;
void __iomem *reg_etts_h;
- u32 valid, stat, lo, hi;
+ u32 valid, lo, hi;
switch (index) {
case 0:
@@ -101,6 +100,10 @@ static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
event.type = PTP_CLOCK_EXTTS;
event.index = index;
+ if (ptp_qoriq->extts_fifo_support)
+ if (!(ptp_qoriq->read(&regs->ctrl_regs->tmr_stat) & valid))
+ return 0;
+
do {
lo = ptp_qoriq->read(reg_etts_l);
hi = ptp_qoriq->read(reg_etts_h);
@@ -111,11 +114,13 @@ static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
ptp_clock_event(ptp_qoriq->clock, &event);
}
- stat = ptp_qoriq->read(&regs->ctrl_regs->tmr_stat);
- } while (ptp_qoriq->extts_fifo_support && (stat & valid));
+ if (!ptp_qoriq->extts_fifo_support)
+ break;
+ } while (ptp_qoriq->read(&regs->ctrl_regs->tmr_stat) & valid);
return 0;
}
+EXPORT_SYMBOL_GPL(extts_clean_up);
/*
* Interrupt service routine
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 74eb5af7295f..97bfdd47954f 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -194,6 +194,18 @@ config REGULATOR_BD70528
This driver can also be built as a module. If so, the module
will be called bd70528-regulator.
+config REGULATOR_BD71828
+ tristate "ROHM BD71828 Power Regulator"
+ depends on MFD_ROHM_BD71828
+ select REGULATOR_ROHM
+ help
+ This driver supports voltage regulators on ROHM BD71828 PMIC.
+ This will enable support for the software controllable buck
+ and LDO regulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd71828-regulator.
+
config REGULATOR_BD718XX
tristate "ROHM BD71837 Power Regulator"
depends on MFD_ROHM_BD718XX
@@ -600,6 +612,27 @@ config REGULATOR_MCP16502
through the regulator interface. In addition it enables
suspend-to-ram/standby transition.
+config REGULATOR_MP8859
+ tristate "MPS MP8859 regulator driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the MP8859 voltage regulator. This driver
+ supports basic operations (get/set voltage) through the regulator
+ interface.
+ Say M here if you want to include support for the regulator as a
+ module. The module will be named "mp8859".
+
+config REGULATOR_MPQ7920
+ tristate "Monolithic MPQ7920 PMIC"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ Say y here to support the MPQ7920 PMIC. This will enable supports
+ the software controllable 4 buck and 5 LDO regulators.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6311
tristate "MediaTek MT6311 PMIC"
depends on I2C
@@ -1077,6 +1110,13 @@ config REGULATOR_VEXPRESS
This driver provides support for voltage regulators available
on the ARM Ltd's Versatile Express platform.
+config REGULATOR_VQMMC_IPQ4019
+ tristate "IPQ4019 VQMMC SD LDO regulator support"
+ depends on ARCH_QCOM
+ help
+ This driver provides support for the VQMMC LDO I/0
+ voltage regulator of the IPQ4019 SD/EMMC controller.
+
config REGULATOR_WM831X
tristate "Wolfson Microelectronics WM831x PMIC regulators"
depends on MFD_WM831X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2210ba56f9bd..07bc977c52b0 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
+obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
@@ -77,6 +78,8 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
+obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o
+obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
@@ -132,6 +135,7 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
+obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
new file mode 100644
index 000000000000..b2fa17be4988
--- /dev/null
+++ b/drivers/regulator/bd71828-regulator.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019 ROHM Semiconductors
+// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver
+//
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+struct reg_init {
+ unsigned int reg;
+ unsigned int mask;
+ unsigned int val;
+};
+struct bd71828_regulator_data {
+ struct regulator_desc desc;
+ const struct rohm_dvs_config dvs;
+ const struct reg_init *reg_inits;
+ int reg_init_amnt;
+};
+
+static const struct reg_init buck1_inits[] = {
+ /*
+ * DVS Buck voltages can be changed by register values or via GPIO.
+ * Use register accesses by default.
+ */
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK1_CTRL,
+ .val = BD71828_DVS_BUCK1_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck2_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK2_CTRL,
+ .val = BD71828_DVS_BUCK2_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck6_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK6_CTRL,
+ .val = BD71828_DVS_BUCK6_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck7_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK7_CTRL,
+ .val = BD71828_DVS_BUCK7_CTRL_I2C,
+ },
+};
+
+static const struct regulator_linear_range bd71828_buck1267_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250),
+ REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck3_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000),
+ REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck4_volts[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck5_volts[] = {
+ REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_ldo_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0),
+};
+
+static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int val;
+
+ switch (ramp_delay) {
+ case 1 ... 2500:
+ val = 0;
+ break;
+ case 2501 ... 5000:
+ val = 1;
+ break;
+ case 5001 ... 10000:
+ val = 2;
+ break;
+ case 10001 ... 20000:
+ val = 3;
+ break;
+ default:
+ val = 3;
+ dev_err(&rdev->dev,
+ "ramp_delay: %d not supported, setting 20mV/uS",
+ ramp_delay);
+ }
+
+ /*
+ * On BD71828 the ramp delay level control reg is at offset +2 to
+ * enable reg
+ */
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2,
+ BD71828_MASK_RAMP_DELAY,
+ val << (ffs(BD71828_MASK_RAMP_DELAY) - 1));
+}
+
+static int buck_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct bd71828_regulator_data *data;
+
+ data = container_of(desc, struct bd71828_regulator_data, desc);
+
+ return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap);
+}
+
+static int ldo6_parse_dt(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ uint32_t uv = 0;
+ unsigned int en;
+ struct regmap *regmap = cfg->regmap;
+ static const char * const props[] = { "rohm,dvs-run-voltage",
+ "rohm,dvs-idle-voltage",
+ "rohm,dvs-suspend-voltage",
+ "rohm,dvs-lpsr-voltage" };
+ unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN,
+ BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN };
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ ret = of_property_read_u32(np, props[i], &uv);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ continue;
+ }
+ if (uv)
+ en = 0xffffffff;
+ else
+ en = 0;
+
+ ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct regulator_ops bd71828_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_dvs_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd71828_set_ramp_delay,
+};
+
+static const struct regulator_ops bd71828_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_ldo6_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct bd71828_regulator_data bd71828_rdata[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK1,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK1_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK1_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK1_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ /*
+ * LPSR voltage is same as SUSPEND voltage. Allow
+ * setting it so that regulator can be set enabled at
+ * LPSR state
+ */
+ .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck1_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck1_inits),
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK2,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK2_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK2_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK2_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck2_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck2_inits),
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK3,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck3_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts),
+ .n_voltages = BD71828_BUCK3_VOLTS,
+ .enable_reg = BD71828_REG_BUCK3_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK3_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK3_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK3 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK3_VOLT,
+ .idle_reg = BD71828_REG_BUCK3_VOLT,
+ .suspend_reg = BD71828_REG_BUCK3_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK3_VOLT,
+ .run_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_mask = BD71828_MASK_BUCK3_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK3_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK4,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck4_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts),
+ .n_voltages = BD71828_BUCK4_VOLTS,
+ .enable_reg = BD71828_REG_BUCK4_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK4_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK4_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK4 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK4_VOLT,
+ .idle_reg = BD71828_REG_BUCK4_VOLT,
+ .suspend_reg = BD71828_REG_BUCK4_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK4_VOLT,
+ .run_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_mask = BD71828_MASK_BUCK4_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK4_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK5,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck5_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts),
+ .n_voltages = BD71828_BUCK5_VOLTS,
+ .enable_reg = BD71828_REG_BUCK5_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK5_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK5_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK5 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK5_VOLT,
+ .idle_reg = BD71828_REG_BUCK5_VOLT,
+ .suspend_reg = BD71828_REG_BUCK5_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK5_VOLT,
+ .run_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_mask = BD71828_MASK_BUCK5_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK5_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK6,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK6_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK6_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK6_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck6_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck6_inits),
+ },
+ {
+ .desc = {
+ .name = "buck7",
+ .of_match = of_match_ptr("BUCK7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK7,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK7_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK7_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK7_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck7_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck7_inits),
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO1,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO1_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO1_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO1 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO1_VOLT,
+ .idle_reg = BD71828_REG_LDO1_VOLT,
+ .suspend_reg = BD71828_REG_LDO1_VOLT,
+ .lpsr_reg = BD71828_REG_LDO1_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO2,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO2_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO2_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO2 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO2_VOLT,
+ .idle_reg = BD71828_REG_LDO2_VOLT,
+ .suspend_reg = BD71828_REG_LDO2_VOLT,
+ .lpsr_reg = BD71828_REG_LDO2_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO3,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO3_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO3_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO3 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO3_VOLT,
+ .idle_reg = BD71828_REG_LDO3_VOLT,
+ .suspend_reg = BD71828_REG_LDO3_VOLT,
+ .lpsr_reg = BD71828_REG_LDO3_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ }, {
+ .desc = {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO4,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO4_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO4_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO1 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO4_VOLT,
+ .idle_reg = BD71828_REG_LDO4_VOLT,
+ .suspend_reg = BD71828_REG_LDO4_VOLT,
+ .lpsr_reg = BD71828_REG_LDO4_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO5,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO5_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO5_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ .owner = THIS_MODULE,
+ },
+ /*
+ * LDO5 is special. It can choose vsel settings to be configured
+ * from 2 different registers (by GPIO).
+ *
+ * This driver supports only configuration where
+ * BD71828_REG_LDO5_VOLT_L is used.
+ */
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO5_VOLT,
+ .idle_reg = BD71828_REG_LDO5_VOLT,
+ .suspend_reg = BD71828_REG_LDO5_VOLT,
+ .lpsr_reg = BD71828_REG_LDO5_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ }, {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("LDO6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO6,
+ .ops = &bd71828_ldo6_ops,
+ .type = REGULATOR_VOLTAGE,
+ .fixed_uV = BD71828_LDO_6_VOLTAGE,
+ .n_voltages = 1,
+ .enable_reg = BD71828_REG_LDO6_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .owner = THIS_MODULE,
+ /*
+ * LDO6 only supports enable/disable for all states.
+ * Voltage for LDO6 is fixed.
+ */
+ .of_parse_cb = ldo6_parse_dt,
+ },
+ }, {
+ .desc = {
+ /* SNVS LDO in data-sheet */
+ .name = "ldo7",
+ .of_match = of_match_ptr("LDO7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO_SNVS,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO7_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO7_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO7 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO7_VOLT,
+ .idle_reg = BD71828_REG_LDO7_VOLT,
+ .suspend_reg = BD71828_REG_LDO7_VOLT,
+ .lpsr_reg = BD71828_REG_LDO7_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ },
+};
+
+static int bd71828_probe(struct platform_device *pdev)
+{
+ struct rohm_regmap_dev *bd71828;
+ int i, j, ret;
+ struct regulator_config config = {
+ .dev = pdev->dev.parent,
+ };
+
+ bd71828 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd71828) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+
+ config.regmap = bd71828->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) {
+ struct regulator_dev *rdev;
+ const struct bd71828_regulator_data *rd;
+
+ rd = &bd71828_rdata[i];
+ rdev = devm_regulator_register(&pdev->dev,
+ &rd->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ rd->desc.name);
+ return PTR_ERR(rdev);
+ }
+ for (j = 0; j < rd->reg_init_amnt; j++) {
+ ret = regmap_update_bits(bd71828->regmap,
+ rd->reg_inits[j].reg,
+ rd->reg_inits[j].mask,
+ rd->reg_inits[j].val);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "regulator %s init failed\n",
+ rd->desc.name);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver bd71828_regulator = {
+ .driver = {
+ .name = "bd71828-pmic"
+ },
+ .probe = bd71828_probe,
+};
+
+module_platform_driver(bd71828_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71828 voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd71828-pmic");
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index 13a43eee2e46..8f9b2d8eaf10 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -1142,28 +1142,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
},
};
-struct bd718xx_pmic_inits {
- const struct bd718xx_regulator_data *r_datas;
- unsigned int r_amount;
-};
-
static int bd718xx_probe(struct platform_device *pdev)
{
struct bd718xx *mfd;
struct regulator_config config = { 0 };
- struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = {
- [ROHM_CHIP_TYPE_BD71837] = {
- .r_datas = bd71837_regulators,
- .r_amount = ARRAY_SIZE(bd71837_regulators),
- },
- [ROHM_CHIP_TYPE_BD71847] = {
- .r_datas = bd71847_regulators,
- .r_amount = ARRAY_SIZE(bd71847_regulators),
- },
- };
-
int i, j, err;
bool use_snvs;
+ const struct bd718xx_regulator_data *reg_data;
+ unsigned int num_reg_data;
mfd = dev_get_drvdata(pdev->dev.parent);
if (!mfd) {
@@ -1172,8 +1158,16 @@ static int bd718xx_probe(struct platform_device *pdev)
goto err;
}
- if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT ||
- !pmic_regulators[mfd->chip.chip_type].r_datas) {
+ switch (mfd->chip.chip_type) {
+ case ROHM_CHIP_TYPE_BD71837:
+ reg_data = bd71837_regulators;
+ num_reg_data = ARRAY_SIZE(bd71837_regulators);
+ break;
+ case ROHM_CHIP_TYPE_BD71847:
+ reg_data = bd71847_regulators;
+ num_reg_data = ARRAY_SIZE(bd71847_regulators);
+ break;
+ default:
dev_err(&pdev->dev, "Unsupported chip type\n");
err = -EINVAL;
goto err;
@@ -1215,13 +1209,13 @@ static int bd718xx_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) {
+ for (i = 0; i < num_reg_data; i++) {
const struct regulator_desc *desc;
struct regulator_dev *rdev;
const struct bd718xx_regulator_data *r;
- r = &pmic_regulators[mfd->chip.chip_type].r_datas[i];
+ r = &reg_data[i];
desc = &r->desc;
config.dev = pdev->dev.parent;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 03d79fee2987..d015d99cb59d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3470,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
out:
return ret;
}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev);
static int regulator_limit_voltage_step(struct regulator_dev *rdev,
int *current_uV, int *min_uV)
@@ -4034,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
return ret;
return ret - rdev->constraints->uV_offset;
}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev);
/**
* regulator_get_voltage - get regulator output voltage
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index f9448ed50e05..0cdeb6186529 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -131,8 +131,7 @@ static const struct of_device_id da9210_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, da9210_dt_ids);
-static int da9210_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9210_i2c_probe(struct i2c_client *i2c)
{
struct da9210 *chip;
struct device *dev = &i2c->dev;
@@ -228,7 +227,7 @@ static struct i2c_driver da9210_regulator_driver = {
.name = "da9210",
.of_match_table = of_match_ptr(da9210_dt_ids),
},
- .probe = da9210_i2c_probe,
+ .probe_new = da9210_i2c_probe,
.id_table = da9210_i2c_id,
};
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 523dc1b95826..2ea4362ffa5c 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -416,8 +416,7 @@ static int da9211_regulator_init(struct da9211 *chip)
/*
* I2C driver interface functions
*/
-static int da9211_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9211_i2c_probe(struct i2c_client *i2c)
{
struct da9211 *chip;
int error, ret;
@@ -526,7 +525,7 @@ static struct i2c_driver da9211_regulator_driver = {
.name = "da9211",
.of_match_table = of_match_ptr(da9211_dt_ids),
},
- .probe = da9211_i2c_probe,
+ .probe_new = da9211_i2c_probe,
.id_table = da9211_i2c_id,
};
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index ca3dc3f3bb29..bb16c465426e 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -13,6 +13,8 @@
#include <linux/regulator/driver.h>
#include <linux/module.h>
+#include "internal.h"
+
/**
* regulator_is_enabled_regmap - standard is_enabled() for regmap users
*
@@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
consumers[i].supply = supply_names[i];
}
EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names);
+
+/**
+ * regulator_is_equal - test whether two regulators are the same
+ *
+ * @reg1: first regulator to operate on
+ * @reg2: second regulator to operate on
+ */
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+ return reg1->rdev == reg2->rdev;
+}
+EXPORT_SYMBOL_GPL(regulator_is_equal);
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 978f5e903cae..cfb765986d0d 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -137,8 +137,7 @@ static const struct regmap_config isl9305_regmap = {
.cache_type = REGCACHE_RBTREE,
};
-static int isl9305_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int isl9305_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct isl9305_pdata *pdata = i2c->dev.platform_data;
@@ -198,7 +197,7 @@ static struct i2c_driver isl9305_regulator_driver = {
.name = "isl9305",
.of_match_table = of_match_ptr(isl9305_dt_ids),
},
- .probe = isl9305_i2c_probe,
+ .probe_new = isl9305_i2c_probe,
.id_table = isl9305_i2c_id,
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index bc96e65ef7c0..8be252f81b09 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -400,8 +400,7 @@ static int setup_regulators(struct lp3971 *lp3971,
return 0;
}
-static int lp3971_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int lp3971_i2c_probe(struct i2c_client *i2c)
{
struct lp3971 *lp3971;
struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev);
@@ -449,7 +448,7 @@ static struct i2c_driver lp3971_i2c_driver = {
.driver = {
.name = "LP3971",
},
- .probe = lp3971_i2c_probe,
+ .probe_new = lp3971_i2c_probe,
.id_table = lp3971_i2c_id,
};
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index d934540eb8c4..e12e52c69e52 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -301,8 +301,7 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ltc3676_regulator_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc3676_regulator_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct regulator_init_data *init_data = dev_get_platdata(dev);
@@ -380,7 +379,7 @@ static struct i2c_driver ltc3676_driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ltc3676_of_match),
},
- .probe = ltc3676_regulator_probe,
+ .probe_new = ltc3676_regulator_probe,
.id_table = ltc3676_i2c_id,
};
module_i2c_driver(ltc3676_driver);
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
new file mode 100644
index 000000000000..1d26b506ee5b
--- /dev/null
+++ b/drivers/regulator/mp8859.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 five technologies GmbH
+// Author: Markus Reichl <m.reichl@fivetechno.de>
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+
+
+#define VOL_MIN_IDX 0x00
+#define VOL_MAX_IDX 0x7ff
+
+/* Register definitions */
+#define MP8859_VOUT_L_REG 0 //3 lo Bits
+#define MP8859_VOUT_H_REG 1 //8 hi Bits
+#define MP8859_VOUT_GO_REG 2
+#define MP8859_IOUT_LIM_REG 3
+#define MP8859_CTL1_REG 4
+#define MP8859_CTL2_REG 5
+#define MP8859_RESERVED1_REG 6
+#define MP8859_RESERVED2_REG 7
+#define MP8859_RESERVED3_REG 8
+#define MP8859_STATUS_REG 9
+#define MP8859_INTERRUPT_REG 0x0A
+#define MP8859_MASK_REG 0x0B
+#define MP8859_ID1_REG 0x0C
+#define MP8859_MFR_ID_REG 0x27
+#define MP8859_DEV_ID_REG 0x28
+#define MP8859_IC_REV_REG 0x29
+
+#define MP8859_MAX_REG 0x29
+
+#define MP8859_GO_BIT 0x01
+
+
+static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
+{
+ int ret;
+
+ ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7);
+
+ if (ret)
+ return ret;
+ ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3);
+
+ if (ret)
+ return ret;
+ ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG,
+ MP8859_GO_BIT, 1);
+ return ret;
+}
+
+static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
+{
+ unsigned int val_tmp;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp);
+
+ if (ret)
+ return ret;
+ val = val_tmp << 3;
+
+ ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp);
+
+ if (ret)
+ return ret;
+ val |= val_tmp & 0x07;
+ return val;
+}
+
+static const struct regulator_linear_range mp8859_dcdc_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
+};
+
+static const struct regmap_config mp8859_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MP8859_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regulator_ops mp8859_ops = {
+ .set_voltage_sel = mp8859_set_voltage_sel,
+ .get_voltage_sel = mp8859_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+};
+
+static const struct regulator_desc mp8859_regulators[] = {
+ {
+ .id = 0,
+ .type = REGULATOR_VOLTAGE,
+ .name = "mp8859_dcdc",
+ .of_match = of_match_ptr("mp8859_dcdc"),
+ .n_voltages = VOL_MAX_IDX + 1,
+ .linear_ranges = mp8859_dcdc_ranges,
+ .n_linear_ranges = 1,
+ .ops = &mp8859_ops,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int mp8859_i2c_probe(struct i2c_client *i2c)
+{
+ int ret;
+ struct regulator_config config = {.dev = &i2c->dev};
+ struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap);
+ struct regulator_dev *rdev;
+
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+ rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0],
+ &config);
+
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "failed to register %s: %d\n",
+ mp8859_regulators[0].name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct of_device_id mp8859_dt_id[] = {
+ {.compatible = "mps,mp8859"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mp8859_dt_id);
+
+static const struct i2c_device_id mp8859_i2c_id[] = {
+ { "mp8859", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id);
+
+static struct i2c_driver mp8859_regulator_driver = {
+ .driver = {
+ .name = "mp8859",
+ .of_match_table = of_match_ptr(mp8859_dt_id),
+ },
+ .probe_new = mp8859_i2c_probe,
+ .id_table = mp8859_i2c_id,
+};
+
+module_i2c_driver(mp8859_regulator_driver);
+
+MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver");
+MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c
new file mode 100644
index 000000000000..54c862edf571
--- /dev/null
+++ b/drivers/regulator/mpq7920.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// mpq7920.c - regulator driver for mps mpq7920
+//
+// Copyright 2019 Monolithic Power Systems, Inc
+//
+// Author: Saravanan Sekar <sravanhome@gmail.com>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include "mpq7920.h"
+
+#define MPQ7920_BUCK_VOLT_RANGE \
+ ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+#define MPQ7920_LDO_VOLT_RANGE \
+ ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+
+#define MPQ7920BUCK(_name, _id, _ilim) \
+ [MPQ7920_BUCK ## _id] = { \
+ .id = MPQ7920_BUCK ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .of_parse_cb = mpq7920_parse_cb, \
+ .ops = &mpq7920_buck_ops, \
+ .min_uV = MPQ7920_BUCK_VOLT_MIN, \
+ .uV_step = MPQ7920_VOLT_STEP, \
+ .n_voltages = MPQ7920_BUCK_VOLT_RANGE, \
+ .curr_table = _ilim, \
+ .n_current_limits = ARRAY_SIZE(_ilim), \
+ .csel_reg = MPQ7920_BUCK ##_id## _REG_C, \
+ .csel_mask = MPQ7920_MASK_BUCK_ILIM, \
+ .enable_reg = MPQ7920_REG_REGULATOR_EN, \
+ .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
+ MPQ7920_BUCK ## _id), \
+ .vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \
+ .vsel_mask = MPQ7920_MASK_VREF, \
+ .active_discharge_on = MPQ7920_DISCHARGE_ON, \
+ .active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \
+ .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
+ .soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \
+ .soft_start_mask = MPQ7920_MASK_SOFTSTART, \
+ .owner = THIS_MODULE, \
+ }
+
+#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \
+ [MPQ7920_LDO ## _id] = { \
+ .id = MPQ7920_LDO ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .ops = _ops, \
+ .min_uV = MPQ7920_LDO_VOLT_MIN, \
+ .uV_step = MPQ7920_VOLT_STEP, \
+ .n_voltages = MPQ7920_LDO_VOLT_RANGE, \
+ .vsel_reg = MPQ7920_LDO ##_id## _REG_A, \
+ .vsel_mask = MPQ7920_MASK_VREF, \
+ .curr_table = _ilim, \
+ .n_current_limits = _ilim_sz, \
+ .csel_reg = _creg, \
+ .csel_mask = _cmask, \
+ .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\
+ .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
+ MPQ7920_LDO ##_id + 1), \
+ .active_discharge_on = MPQ7920_DISCHARGE_ON, \
+ .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
+ .active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+enum mpq7920_regulators {
+ MPQ7920_BUCK1,
+ MPQ7920_BUCK2,
+ MPQ7920_BUCK3,
+ MPQ7920_BUCK4,
+ MPQ7920_LDO1, /* LDORTC */
+ MPQ7920_LDO2,
+ MPQ7920_LDO3,
+ MPQ7920_LDO4,
+ MPQ7920_LDO5,
+ MPQ7920_MAX_REGULATORS,
+};
+
+struct mpq7920_regulator_info {
+ struct regmap *regmap;
+ struct regulator_desc *rdesc;
+};
+
+static const struct regmap_config mpq7920_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x25,
+};
+
+/* Current limits array (in uA)
+ * ILIM1 & ILIM3
+ */
+static const unsigned int mpq7920_I_limits1[] = {
+ 4600000, 6600000, 7600000, 9300000
+};
+
+/* ILIM2 & ILIM4 */
+static const unsigned int mpq7920_I_limits2[] = {
+ 2700000, 3900000, 5100000, 6100000
+};
+
+/* LDO4 & LDO5 */
+static const unsigned int mpq7920_I_limits3[] = {
+ 300000, 700000
+};
+
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
+static int mpq7920_parse_cb(struct device_node *np,
+ const struct regulator_desc *rdesc,
+ struct regulator_config *config);
+
+/* RTCLDO not controllable, always ON */
+static const struct regulator_ops mpq7920_ldortc_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_wo_current_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+};
+
+static const struct regulator_ops mpq7920_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_soft_start = regulator_set_soft_start_regmap,
+ .set_ramp_delay = mpq7920_set_ramp_delay,
+};
+
+static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = {
+ MPQ7920BUCK("buck1", 1, mpq7920_I_limits1),
+ MPQ7920BUCK("buck2", 2, mpq7920_I_limits2),
+ MPQ7920BUCK("buck3", 3, mpq7920_I_limits1),
+ MPQ7920BUCK("buck4", 4, mpq7920_I_limits2),
+ MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3,
+ ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B,
+ MPQ7920_MASK_LDO_ILIM),
+ MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3,
+ ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B,
+ MPQ7920_MASK_LDO_ILIM),
+};
+
+/*
+ * DVS ramp rate BUCK1 to BUCK4
+ * 00-01: Reserved
+ * 10: 8mV/us
+ * 11: 4mV/us
+ */
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_val;
+
+ if (ramp_delay > 8000 || ramp_delay < 0)
+ return -EINVAL;
+
+ if (ramp_delay <= 4000)
+ ramp_val = 3;
+ else
+ ramp_val = 2;
+
+ return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0,
+ MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6);
+}
+
+static int mpq7920_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ uint8_t val;
+ int ret;
+ struct mpq7920_regulator_info *info = config->driver_data;
+ struct regulator_desc *rdesc = &info->rdesc[desc->id];
+
+ if (of_property_read_bool(np, "mps,buck-ovp-disable")) {
+ regmap_update_bits(config->regmap,
+ MPQ7920_BUCK1_REG_B + (rdesc->id * 4),
+ MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE);
+ }
+
+ ret = of_property_read_u8(np, "mps,buck-phase-delay", &val);
+ if (!ret) {
+ regmap_update_bits(config->regmap,
+ MPQ7920_BUCK1_REG_C + (rdesc->id * 4),
+ MPQ7920_MASK_BUCK_PHASE_DEALY,
+ (val & 3) << 4);
+ }
+
+ ret = of_property_read_u8(np, "mps,buck-softstart", &val);
+ if (!ret)
+ rdesc->soft_start_val_on = (val & 3) << 2;
+
+ return 0;
+}
+
+static void mpq7920_parse_dt(struct device *dev,
+ struct mpq7920_regulator_info *info)
+{
+ int ret;
+ struct device_node *np = dev->of_node;
+ uint8_t freq;
+
+ np = of_get_child_by_name(np, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return;
+ }
+
+ ret = of_property_read_u8(np, "mps,switch-freq", &freq);
+ if (!ret) {
+ regmap_update_bits(info->regmap, MPQ7920_REG_CTL0,
+ MPQ7920_MASK_SWITCH_FREQ,
+ (freq & 3) << 4);
+ }
+
+ of_node_put(np);
+}
+
+static int mpq7920_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct mpq7920_regulator_info *info;
+ struct regulator_config config = { NULL, };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i;
+
+ info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->rdesc = mpq7920_regulators_desc;
+ regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+
+ i2c_set_clientdata(client, info);
+ info->regmap = regmap;
+ if (client->dev.of_node)
+ mpq7920_parse_dt(&client->dev, info);
+
+ config.dev = dev;
+ config.regmap = regmap;
+ config.driver_data = info;
+
+ for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev,
+ &mpq7920_regulators_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator!\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mpq7920_of_match[] = {
+ { .compatible = "mps,mpq7920"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpq7920_of_match);
+
+static const struct i2c_device_id mpq7920_id[] = {
+ { "mpq7920", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mpq7920_id);
+
+static struct i2c_driver mpq7920_regulator_driver = {
+ .driver = {
+ .name = "mpq7920",
+ .of_match_table = of_match_ptr(mpq7920_of_match),
+ },
+ .probe_new = mpq7920_i2c_probe,
+ .id_table = mpq7920_id,
+};
+module_i2c_driver(mpq7920_regulator_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mpq7920.h b/drivers/regulator/mpq7920.h
new file mode 100644
index 000000000000..489924655a96
--- /dev/null
+++ b/drivers/regulator/mpq7920.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * mpq7920.h - Regulator definitions for mpq7920
+ *
+ * Copyright 2019 Monolithic Power Systems, Inc
+ *
+ */
+
+#ifndef __MPQ7920_H__
+#define __MPQ7920_H__
+
+#define MPQ7920_REG_CTL0 0x00
+#define MPQ7920_REG_CTL1 0x01
+#define MPQ7920_REG_CTL2 0x02
+#define MPQ7920_BUCK1_REG_A 0x03
+#define MPQ7920_BUCK1_REG_B 0x04
+#define MPQ7920_BUCK1_REG_C 0x05
+#define MPQ7920_BUCK1_REG_D 0x06
+#define MPQ7920_BUCK2_REG_A 0x07
+#define MPQ7920_BUCK2_REG_B 0x08
+#define MPQ7920_BUCK2_REG_C 0x09
+#define MPQ7920_BUCK2_REG_D 0x0a
+#define MPQ7920_BUCK3_REG_A 0x0b
+#define MPQ7920_BUCK3_REG_B 0x0c
+#define MPQ7920_BUCK3_REG_C 0x0d
+#define MPQ7920_BUCK3_REG_D 0x0e
+#define MPQ7920_BUCK4_REG_A 0x0f
+#define MPQ7920_BUCK4_REG_B 0x10
+#define MPQ7920_BUCK4_REG_C 0x11
+#define MPQ7920_BUCK4_REG_D 0x12
+#define MPQ7920_LDO1_REG_A 0x13
+#define MPQ7920_LDO1_REG_B 0x0
+#define MPQ7920_LDO2_REG_A 0x14
+#define MPQ7920_LDO2_REG_B 0x15
+#define MPQ7920_LDO2_REG_C 0x16
+#define MPQ7920_LDO3_REG_A 0x17
+#define MPQ7920_LDO3_REG_B 0x18
+#define MPQ7920_LDO3_REG_C 0x19
+#define MPQ7920_LDO4_REG_A 0x1a
+#define MPQ7920_LDO4_REG_B 0x1b
+#define MPQ7920_LDO4_REG_C 0x1c
+#define MPQ7920_LDO5_REG_A 0x1d
+#define MPQ7920_LDO5_REG_B 0x1e
+#define MPQ7920_LDO5_REG_C 0x1f
+#define MPQ7920_REG_MODE 0x20
+#define MPQ7920_REG_REGULATOR_EN 0x22
+
+#define MPQ7920_MASK_VREF 0x7f
+#define MPQ7920_MASK_BUCK_ILIM 0xc0
+#define MPQ7920_MASK_LDO_ILIM BIT(6)
+#define MPQ7920_MASK_DISCHARGE BIT(5)
+#define MPQ7920_MASK_MODE 0xc0
+#define MPQ7920_MASK_SOFTSTART 0x0c
+#define MPQ7920_MASK_SWITCH_FREQ 0x30
+#define MPQ7920_MASK_BUCK_PHASE_DEALY 0x30
+#define MPQ7920_MASK_DVS_SLEWRATE 0xc0
+#define MPQ7920_MASK_OVP 0x40
+#define MPQ7920_OVP_DISABLE ~(0x40)
+#define MPQ7920_DISCHARGE_ON BIT(5)
+
+#define MPQ7920_REGULATOR_EN_OFFSET 7
+
+/* values in mV */
+#define MPQ7920_BUCK_VOLT_MIN 400000
+#define MPQ7920_LDO_VOLT_MIN 650000
+#define MPQ7920_VOLT_MAX 3587500
+#define MPQ7920_VOLT_STEP 12500
+
+#endif /* __MPQ7920_H__ */
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index af95449d3590..69e6af3cd505 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -85,8 +85,7 @@ static const struct regulator_desc mt6311_regulators[] = {
/*
* I2C driver interface functions
*/
-static int mt6311_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int mt6311_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -154,7 +153,7 @@ static struct i2c_driver mt6311_regulator_driver = {
.name = "mt6311",
.of_match_table = of_match_ptr(mt6311_dt_ids),
},
- .probe = mt6311_i2c_probe,
+ .probe_new = mt6311_i2c_probe,
.id_table = mt6311_i2c_id,
};
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 3d3415839ba2..787ced918372 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -279,8 +279,7 @@ error_i2c:
/*
* I2C driver interface functions
*/
-static int pv88060_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int pv88060_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88060 *chip;
@@ -385,7 +384,7 @@ static struct i2c_driver pv88060_regulator_driver = {
.name = "pv88060",
.of_match_table = of_match_ptr(pv88060_dt_ids),
},
- .probe = pv88060_i2c_probe,
+ .probe_new = pv88060_i2c_probe,
.id_table = pv88060_i2c_id,
};
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index b1d0d97ae935..784729ec2182 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -272,8 +272,7 @@ error_i2c:
/*
* I2C driver interface functions
*/
-static int pv88090_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int pv88090_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88090 *chip;
@@ -406,7 +405,7 @@ static struct i2c_driver pv88090_regulator_driver = {
.name = "pv88090",
.of_match_table = of_match_ptr(pv88090_dt_ids),
},
- .probe = pv88090_i2c_probe,
+ .probe_new = pv88090_i2c_probe,
.id_table = pv88090_i2c_id,
};
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 5b4003226484..31f79fda3238 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
}
if (!pdata->dvs_gpio[i]) {
- dev_warn(dev, "there is no dvs%d gpio\n", i);
+ dev_info(dev, "there is no dvs%d gpio\n", i);
continue;
}
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 51f7e8b74d8c..115f59530852 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -390,5 +390,5 @@ module_platform_driver(s2mpa01_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 4f2dc5ebffdc..23d288278957 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1265,5 +1265,5 @@ module_platform_driver(s2mps11_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 12d6b8d2e97e..4abd3ed31f60 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -1015,5 +1015,5 @@ module_exit(s5m8767_pmic_exit);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index bf1a3508ebc4..44e4cecbf6de 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -439,8 +439,7 @@ static void slg51000_clear_fault_log(struct slg51000 *chip)
dev_dbg(chip->dev, "Fault log: FLT_POR\n");
}
-static int slg51000_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int slg51000_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct slg51000 *chip;
@@ -509,7 +508,7 @@ static struct i2c_driver slg51000_regulator_driver = {
.driver = {
.name = "slg51000-regulator",
},
- .probe = slg51000_i2c_probe,
+ .probe_new = slg51000_i2c_probe,
.id_table = slg51000_i2c_id,
};
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
index 42e03b2c10a0..2222e739e62b 100644
--- a/drivers/regulator/sy8106a-regulator.c
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -61,8 +61,7 @@ static const struct regulator_desc sy8106a_reg = {
/*
* I2C driver interface functions
*/
-static int sy8106a_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int sy8106a_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regulator_dev *rdev;
@@ -141,7 +140,7 @@ static struct i2c_driver sy8106a_regulator_driver = {
.name = "sy8106a",
.of_match_table = of_match_ptr(sy8106a_i2c_of_match),
},
- .probe = sy8106a_i2c_probe,
+ .probe_new = sy8106a_i2c_probe,
.id_table = sy8106a_i2c_id,
};
diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c
index 92adb4f3ee19..62d243f3b904 100644
--- a/drivers/regulator/sy8824x.c
+++ b/drivers/regulator/sy8824x.c
@@ -112,8 +112,7 @@ static const struct regmap_config sy8824_regmap_config = {
.val_bits = 8,
};
-static int sy8824_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sy8824_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
@@ -222,7 +221,7 @@ static struct i2c_driver sy8824_regulator_driver = {
.name = "sy8824-regulator",
.of_match_table = of_match_ptr(sy8824_dt_ids),
},
- .probe = sy8824_i2c_probe,
+ .probe_new = sy8824_i2c_probe,
.id_table = sy8824_id,
};
module_i2c_driver(sy8824_regulator_driver);
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 89b9314d64c9..af9abcd9c166 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -748,7 +748,7 @@ static int ti_abb_probe(struct platform_device *pdev)
* We may have shared interrupt register offsets which are
* write-1-to-clear between domains ensuring exclusivity.
*/
- abb->int_base = devm_ioremap_nocache(dev, res->start,
+ abb->int_base = devm_ioremap(dev, res->start,
resource_size(res));
if (!abb->int_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
@@ -768,7 +768,7 @@ static int ti_abb_probe(struct platform_device *pdev)
* We may have shared efuse register offsets which are read-only
* between domains
*/
- abb->efuse_base = devm_ioremap_nocache(dev, res->start,
+ abb->efuse_base = devm_ioremap(dev, res->start,
resource_size(res));
if (!abb->efuse_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index 7b0e38f8d627..0edc83089ba2 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -220,8 +220,7 @@ static const struct regmap_config tps65132_regmap_config = {
.wr_table = &tps65132_no_reg_table,
};
-static int tps65132_probe(struct i2c_client *client,
- const struct i2c_device_id *client_id)
+static int tps65132_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tps65132_regulator *tps;
@@ -272,7 +271,7 @@ static struct i2c_driver tps65132_i2c_driver = {
.driver = {
.name = "tps65132",
},
- .probe = tps65132_probe,
+ .probe_new = tps65132_probe,
.id_table = tps65132_id,
};
diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
index 9a9ee8188109..cbadb1c99679 100644
--- a/drivers/regulator/vctrl-regulator.c
+++ b/drivers/regulator/vctrl-regulator.c
@@ -11,10 +11,13 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/sort.h>
+#include "internal.h"
+
struct vctrl_voltage_range {
int min_uV;
int max_uV;
@@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
static int vctrl_get_voltage(struct regulator_dev *rdev)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
- int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+ int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
return vctrl_calc_output_voltage(vctrl, ctrl_uV);
}
@@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
struct regulator *ctrl_reg = vctrl->ctrl_reg;
- int orig_ctrl_uV = regulator_get_voltage(ctrl_reg);
+ int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
int ret;
if (req_min_uV >= uV || !vctrl->ovp_threshold)
/* voltage rising or no OVP */
- return regulator_set_voltage(
- ctrl_reg,
+ return regulator_set_voltage_rdev(ctrl_reg->rdev,
vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
- vctrl_calc_ctrl_voltage(vctrl, req_max_uV));
+ vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
+ PM_SUSPEND_ON);
while (uV > req_min_uV) {
int max_drop_uV = (uV * vctrl->ovp_threshold) / 100;
@@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ next_ctrl_uV,
next_ctrl_uV,
- next_ctrl_uV);
+ PM_SUSPEND_ON);
if (ret)
goto err;
@@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
err:
/* Try to go back to original voltage */
- regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV);
+ regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
+ PM_SUSPEND_ON);
return ret;
}
@@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
/* voltage rising or no OVP */
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ vctrl->vtable[selector].ctrl,
vctrl->vtable[selector].ctrl,
- vctrl->vtable[selector].ctrl);
+ PM_SUSPEND_ON);
if (!ret)
vctrl->sel = selector;
@@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
else
next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
vctrl->vtable[next_sel].ctrl,
- vctrl->vtable[next_sel].ctrl);
+ vctrl->vtable[next_sel].ctrl,
+ PM_SUSPEND_ON);
if (ret) {
dev_err(&rdev->dev,
"failed to set control voltage to %duV\n",
@@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
err:
if (vctrl->sel != orig_sel) {
/* Try to go back to original voltage */
- if (!regulator_set_voltage(ctrl_reg,
+ if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
+ vctrl->vtable[orig_sel].ctrl,
vctrl->vtable[orig_sel].ctrl,
- vctrl->vtable[orig_sel].ctrl))
+ PM_SUSPEND_ON))
vctrl->sel = orig_sel;
else
dev_warn(&rdev->dev,
@@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev)
if (ret)
return ret;
- ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+ ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
if (ctrl_uV < 0) {
dev_err(&pdev->dev, "failed to get control voltage\n");
return ctrl_uV;
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
new file mode 100644
index 000000000000..6d5ae25d08d1
--- /dev/null
+++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com>
+// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr>
+//
+// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+static const unsigned int ipq4019_vmmc_voltages[] = {
+ 1500000, 1800000, 2500000, 3000000,
+};
+
+static const struct regulator_ops ipq4019_regulator_voltage_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc vmmc_regulator = {
+ .name = "vmmcq",
+ .ops = &ipq4019_regulator_voltage_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .volt_table = ipq4019_vmmc_voltages,
+ .n_voltages = ARRAY_SIZE(ipq4019_vmmc_voltages),
+ .vsel_reg = 0,
+ .vsel_mask = 0x3,
+};
+
+static const struct regmap_config ipq4019_vmmcq_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static int ipq4019_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regulator_init_data *init_data;
+ struct regulator_config cfg = {};
+ struct regulator_dev *rdev;
+ struct resource *res;
+ struct regmap *rmap;
+ void __iomem *base;
+
+ init_data = of_get_regulator_init_data(dev, dev->of_node,
+ &vmmc_regulator);
+ if (!init_data)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config);
+ if (IS_ERR(rmap))
+ return PTR_ERR(rmap);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.of_node = dev->of_node;
+ cfg.regmap = rmap;
+
+ rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator: %ld\n",
+ PTR_ERR(rdev));
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static const struct of_device_id regulator_ipq4019_of_match[] = {
+ { .compatible = "qcom,vqmmc-ipq4019-regulator", },
+ {},
+};
+
+static struct platform_driver ipq4019_regulator_driver = {
+ .probe = ipq4019_regulator_probe,
+ .driver = {
+ .name = "vqmmc-ipq4019-regulator",
+ .of_match_table = of_match_ptr(regulator_ipq4019_of_match),
+ },
+};
+module_platform_driver(ipq4019_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>");
+MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator");
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index d3a75d447fce..e8194f1f01a8 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -20,6 +20,16 @@
struct m48t35_rtc {
u8 pad[0x7ff8]; /* starts at 0x7ff8 */
+#ifdef CONFIG_SGI_IP27
+ u8 hour;
+ u8 min;
+ u8 sec;
+ u8 control;
+ u8 year;
+ u8 month;
+ u8 date;
+ u8 day;
+#else
u8 control;
u8 sec;
u8 min;
@@ -28,6 +38,7 @@ struct m48t35_rtc {
u8 date;
u8 month;
u8 year;
+#endif
};
#define M48T35_RTC_SET 0x80
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 579b3ff5c644..feb1f8e52c00 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -504,7 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
if (unlikely(!rtc->res))
return -EBUSY;
- rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start,
+ rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start,
rtc->regsize);
if (unlikely(!rtc->regbase))
return -EINVAL;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a1915061932e..5256e3ce84e5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev)
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
return -ENODEV;
- /* (re-)init queue's state machine */
- ap_queue_reinit_state(to_ap_queue(dev));
}
/* Add queue/card to list of active queues/cards */
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 433b7b64368d..bb35ba4a8d24 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
-void ap_queue_reinit_state(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index dad2be333d82..37c3bdc3642d 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
- aq->state = AP_STATE_RESET_START;
+ aq->state = AP_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->list);
@@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq)
spin_unlock_bh(&aq->lock);
}
-void ap_queue_reinit_state(struct ap_queue *aq)
+void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
aq->state = AP_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
+EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index c1db64a2db21..110fe9d0cb91 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
/* do some plausibility checks on the key block */
- if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
- prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
DEBUG_ERR("%s reply with invalid or unknown key block\n",
__func__);
rc = -EIO;
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index c50f3e86cc74..7cbb384ec535 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME,
aq->private = zq;
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 35c7c6672713..c78c0d119806 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_rapq(aq->qid);
rc = zcrypt_cex2c_rng_supported(aq);
if (rc < 0) {
zcrypt_queue_free(zq);
@@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
else
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2C_CLEANUP_TIME;
aq->private = zq;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 442e3d6162f7..6fabc906114c 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME,
aq->private = zq;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 871d44746f5c..9575a627a1e1 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -125,12 +125,6 @@ struct qeth_routing_info {
enum qeth_routing_types type;
};
-/* IPA stuff */
-struct qeth_ipa_info {
- __u32 supported_funcs;
- __u32 enabled_funcs;
-};
-
/* SETBRIDGEPORT stuff */
enum qeth_sbp_roles {
QETH_SBP_ROLE_NONE = 0,
@@ -169,41 +163,6 @@ struct qeth_vnicc_info {
bool rx_bcast_enabled;
};
-static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
- enum qeth_ipa_setadp_cmd func)
-{
- return (ipa->supported_funcs & func);
-}
-
-static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
- enum qeth_ipa_funcs func)
-{
- return (ipa->supported_funcs & func);
-}
-
-static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
- enum qeth_ipa_funcs func)
-{
- return (ipa->supported_funcs & ipa->enabled_funcs & func);
-}
-
-#define qeth_adp_supported(c, f) \
- qeth_is_adp_supported(&c->options.adp, f)
-#define qeth_is_supported(c, f) \
- qeth_is_ipa_supported(&c->options.ipa4, f)
-#define qeth_is_enabled(c, f) \
- qeth_is_ipa_enabled(&c->options.ipa4, f)
-#define qeth_is_supported6(c, f) \
- qeth_is_ipa_supported(&c->options.ipa6, f)
-#define qeth_is_enabled6(c, f) \
- qeth_is_ipa_enabled(&c->options.ipa6, f)
-#define qeth_is_ipafunc_supported(c, prot, f) \
- ((prot == QETH_PROT_IPV6) ? \
- qeth_is_supported6(c, f) : qeth_is_supported(c, f))
-#define qeth_is_ipafunc_enabled(c, prot, f) \
- ((prot == QETH_PROT_IPV6) ? \
- qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
-
#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
@@ -262,7 +221,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
-#define QETH_RX_PULL_LEN 256
struct qeth_hdr_layer3 {
__u8 id;
@@ -600,7 +558,6 @@ enum qeth_channel_states {
*/
enum qeth_card_states {
CARD_STATE_DOWN,
- CARD_STATE_HARDSETUP,
CARD_STATE_SOFTSETUP,
};
@@ -650,6 +607,8 @@ struct qeth_cmd_buffer {
long timeout;
unsigned char *data;
void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply);
void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
unsigned int data_length);
int rc;
@@ -660,6 +619,14 @@ static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob)
refcount_inc(&iob->ref_count);
}
+static inline struct qeth_ipa_cmd *__ipa_reply(struct qeth_cmd_buffer *iob)
+{
+ if (!IS_IPA(iob->data))
+ return NULL;
+
+ return (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+}
+
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
{
return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
@@ -735,11 +702,11 @@ enum qeth_discipline_id {
};
struct qeth_card_options {
+ struct qeth_ipa_caps ipa4;
+ struct qeth_ipa_caps ipa6;
struct qeth_routing_info route4;
- struct qeth_ipa_info ipa4;
- struct qeth_ipa_info adp; /*Adapter parameters*/
struct qeth_routing_info route6;
- struct qeth_ipa_info ipa6;
+ struct qeth_ipa_caps adp; /* Adapter parameters */
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
@@ -769,12 +736,10 @@ struct qeth_osn_info {
struct qeth_discipline {
const struct device_type *devtype;
- int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
- int (*recover)(void *ptr);
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
- int (*set_online) (struct ccwgroup_device *);
- int (*set_offline) (struct ccwgroup_device *);
+ int (*set_online)(struct qeth_card *card);
+ void (*set_offline)(struct qeth_card *card);
int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -862,6 +827,13 @@ static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
return card->state == CARD_STATE_SOFTSETUP;
}
+static inline void qeth_unlock_channel(struct qeth_card *card,
+ struct qeth_channel *channel)
+{
+ atomic_set(&channel->irq_pending, 0);
+ wake_up(&card->wait_q);
+}
+
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
@@ -957,18 +929,6 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
return dst;
}
-static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
- u8 flags)
-{
- if ((card->dev->features & NETIF_F_RXCSUM) &&
- (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- QETH_CARD_STAT_INC(card, rx_skb_csum);
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- }
-}
-
static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
@@ -1035,14 +995,11 @@ struct net_device *qeth_clone_netdev(struct net_device *orig);
struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_do_run_thread(struct qeth_card *, unsigned long);
-void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
-void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
int qeth_stop_channel(struct qeth_channel *channel);
+int qeth_set_offline(struct qeth_card *card, bool resetting);
void qeth_print_status_message(struct qeth_card *);
-int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -1065,9 +1022,6 @@ struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
- struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
- struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_clear_ipacmd_list(struct qeth_card *);
@@ -1076,9 +1030,11 @@ void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
-void qeth_tx_timeout(struct net_device *);
+void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length);
+ u16 cmd_length,
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply));
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 29facb913671..9639938581f5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -247,9 +247,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
QETH_CARD_TEXT(card, 2, "realcbp");
- if (card->state != CARD_STATE_DOWN)
- return -EPERM;
-
/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
qeth_clear_working_pool_list(card);
qeth_free_buffer_pool(card);
@@ -520,11 +517,10 @@ static int __qeth_issue_next_read(struct qeth_card *card)
} else {
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
- atomic_set(&channel->irq_pending, 0);
+ qeth_unlock_channel(card, channel);
qeth_put_cmd(iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
- wake_up(&card->wait_q);
}
return rc;
}
@@ -749,8 +745,8 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
goto out;
}
- if (IS_IPA(iob->data)) {
- cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+ cmd = __ipa_reply(iob);
+ if (cmd) {
cmd = qeth_check_ipa_data(card, cmd);
if (!cmd)
goto out;
@@ -759,17 +755,12 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
card->osn_info.assist_cb(card->dev, cmd);
goto out;
}
- } else {
- /* non-IPA commands should only flow during initialization */
- if (card->state != CARD_STATE_DOWN)
- goto out;
}
/* match against pending cmd requests */
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
- if (!IS_IPA(tmp->data) ||
- __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
+ if (tmp->match && tmp->match(tmp, iob)) {
request = tmp;
/* take the object outside the lock */
qeth_get_cmd(request);
@@ -824,7 +815,8 @@ static int qeth_set_thread_start_bit(struct qeth_card *card,
return 0;
}
-void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
+static void qeth_clear_thread_start_bit(struct qeth_card *card,
+ unsigned long thread)
{
unsigned long flags;
@@ -833,9 +825,9 @@ void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up(&card->wait_q);
}
-EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
-void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+static void qeth_clear_thread_running_bit(struct qeth_card *card,
+ unsigned long thread)
{
unsigned long flags;
@@ -844,7 +836,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up_all(&card->wait_q);
}
-EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
@@ -865,7 +856,7 @@ static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
-int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
int rc = 0;
@@ -873,7 +864,6 @@ int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
(rc = __qeth_do_run_thread(card, thread)) >= 0);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
@@ -881,7 +871,6 @@ void qeth_schedule_recovery(struct qeth_card *card)
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
-EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
struct irb *irb)
@@ -972,8 +961,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* while we hold the ccwdev lock, this stays valid: */
gdev = dev_get_drvdata(&cdev->dev);
card = dev_get_drvdata(&gdev->dev);
- if (!card)
- return;
QETH_CARD_TEXT(card, 5, "irq");
@@ -1003,24 +990,25 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
channel->active_cmd = NULL;
+ qeth_unlock_channel(card, channel);
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
if (iob)
qeth_cancel_cmd(iob, rc);
- atomic_set(&channel->irq_pending, 0);
- wake_up(&card->wait_q);
return;
}
- atomic_set(&channel->irq_pending, 0);
-
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
channel->state = CH_STATE_STOPPED;
+ wake_up(&card->wait_q);
+ }
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
channel->state = CH_STATE_HALTED;
+ wake_up(&card->wait_q);
+ }
if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
SCSW_FCTL_HALT_FUNC))) {
@@ -1054,7 +1042,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
- goto out;
+ return;
}
}
@@ -1062,16 +1050,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* sanity check: */
if (irb->scsw.cmd.count > iob->length) {
qeth_cancel_cmd(iob, -EIO);
- goto out;
+ return;
}
if (iob->callback)
iob->callback(card, iob,
iob->length - irb->scsw.cmd.count);
}
-
-out:
- wake_up(&card->wait_q);
- return;
}
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
@@ -1198,31 +1182,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
}
}
-static void qeth_clean_channel(struct qeth_channel *channel)
-{
- struct ccw_device *cdev = channel->ccwdev;
-
- QETH_DBF_TEXT(SETUP, 2, "freech");
-
- spin_lock_irq(get_ccwdev_lock(cdev));
- cdev->handler = NULL;
- spin_unlock_irq(get_ccwdev_lock(cdev));
-}
-
-static void qeth_setup_channel(struct qeth_channel *channel)
-{
- struct ccw_device *cdev = channel->ccwdev;
-
- QETH_DBF_TEXT(SETUP, 2, "setupch");
-
- channel->state = CH_STATE_DOWN;
- atomic_set(&channel->irq_pending, 0);
-
- spin_lock_irq(get_ccwdev_lock(cdev));
- cdev->handler = qeth_irq;
- spin_unlock_irq(get_ccwdev_lock(cdev));
-}
-
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int count = single ? 1 : card->dev->num_tx_queues;
@@ -1318,6 +1277,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
+static int qeth_do_reset(void *data);
static void qeth_start_kernel_thread(struct work_struct *work)
{
struct task_struct *ts;
@@ -1329,8 +1289,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
card->write.state != CH_STATE_UP)
return;
if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
- ts = kthread_run(card->discipline->recover, (void *)card,
- "qeth_recover");
+ ts = kthread_run(qeth_do_reset, card, "qeth_recover");
if (IS_ERR(ts)) {
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card,
@@ -1395,9 +1354,6 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
if (!card->read_cmd)
goto out_read_cmd;
- qeth_setup_channel(&card->read);
- qeth_setup_channel(&card->write);
- qeth_setup_channel(&card->data);
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
@@ -1467,12 +1423,38 @@ int qeth_stop_channel(struct qeth_channel *channel)
channel->active_cmd);
channel->active_cmd = NULL;
}
+ cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
return rc;
}
EXPORT_SYMBOL_GPL(qeth_stop_channel);
+static int qeth_start_channel(struct qeth_channel *channel)
+{
+ struct ccw_device *cdev = channel->ccwdev;
+ int rc;
+
+ channel->state = CH_STATE_DOWN;
+ atomic_set(&channel->irq_pending, 0);
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = qeth_irq;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
+ rc = ccw_device_set_online(cdev);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = NULL;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ return rc;
+}
+
static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
@@ -1698,6 +1680,13 @@ static void qeth_mpc_finalize_cmd(struct qeth_card *card,
iob->callback = qeth_release_buffer_cb;
}
+static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply)
+{
+ /* MPC cmds are issued strictly in sequence. */
+ return !IS_IPA(reply->data);
+}
+
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
void *data,
unsigned int data_length)
@@ -1712,6 +1701,7 @@ static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
iob->data);
iob->finalize = qeth_mpc_finalize_cmd;
+ iob->match = qeth_mpc_match_reply;
return iob;
}
@@ -1784,8 +1774,7 @@ static int qeth_send_control_data(struct qeth_card *card,
QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_dequeue_cmd(card, iob);
qeth_put_cmd(iob);
- atomic_set(&channel->irq_pending, 0);
- wake_up(&card->wait_q);
+ qeth_unlock_channel(card, channel);
goto out;
}
@@ -2632,7 +2621,8 @@ static int qeth_init_input_buffer(struct qeth_card *card,
if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
buf->rx_skb = netdev_alloc_skb(card->dev,
- QETH_RX_PULL_LEN + ETH_HLEN);
+ ETH_HLEN +
+ sizeof(struct ipv6hdr));
if (!buf->rx_skb)
return 1;
}
@@ -2673,7 +2663,7 @@ static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}
-int qeth_init_qdio_queues(struct qeth_card *card)
+static int qeth_init_qdio_queues(struct qeth_card *card)
{
unsigned int i;
int rc;
@@ -2721,7 +2711,6 @@ int qeth_init_qdio_queues(struct qeth_card *card)
}
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
@@ -2733,7 +2722,9 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card,
}
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length)
+ u16 cmd_length,
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply))
{
u8 prot_type = qeth_mpc_select_prot_type(card);
u16 total_length = iob->length;
@@ -2741,6 +2732,7 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
iob->data);
iob->finalize = qeth_ipa_finalize_cmd;
+ iob->match = match;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -2753,6 +2745,14 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply)
+{
+ struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
+
+ return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
+}
+
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
enum qeth_ipa_cmds cmd_code,
enum qeth_prot_versions prot,
@@ -2768,7 +2768,7 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
if (!iob)
return NULL;
- qeth_prepare_ipa_cmd(card, iob, data_length);
+ qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
hdr = &__ipa_cmd(iob)->hdr;
hdr->command = cmd_code;
@@ -2867,7 +2867,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
}
- card->options.adp.supported_funcs =
+ card->options.adp.supported =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
return 0;
}
@@ -2923,8 +2923,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
QETH_CARD_TEXT(card, 2, "ipaunsup");
- card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
- card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+ card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
+ card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
return -EOPNOTSUPP;
default:
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
@@ -2932,13 +2932,11 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
return -EIO;
}
- if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
- card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
- card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- } else
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ card->options.ipa4 = cmd->hdr.assists;
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ card->options.ipa6 = cmd->hdr.assists;
+ else
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
CARD_DEVID(card));
return 0;
@@ -3106,7 +3104,6 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
}
return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}
-EXPORT_SYMBOL_GPL(qeth_hw_trap);
static int qeth_check_qdio_errors(struct qeth_card *card,
struct qdio_buffer *buf,
@@ -3409,7 +3406,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
struct qeth_card *card = (struct qeth_card *)card_ptr;
if (card->dev->flags & IFF_UP)
- napi_schedule(&card->napi);
+ napi_schedule_irqoff(&card->napi);
}
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
@@ -4316,7 +4313,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
return rc;
}
-void qeth_tx_timeout(struct net_device *dev)
+void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qeth_card *card;
@@ -4697,7 +4694,7 @@ static void qeth_determine_capabilities(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "detcapab");
if (!ddev->online) {
ddev_offline = 1;
- rc = ccw_device_set_online(ddev);
+ rc = qeth_start_channel(channel);
if (rc) {
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out;
@@ -4872,9 +4869,6 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "freecrd");
- qeth_clean_channel(&card->read);
- qeth_clean_channel(&card->write);
- qeth_clean_channel(&card->data);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
unregister_service_level(&card->qeth_service_level);
@@ -4937,13 +4931,14 @@ retry:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
- rc = ccw_device_set_online(CARD_RDEV(card));
+
+ rc = qeth_start_channel(&card->read);
if (rc)
goto retriable;
- rc = ccw_device_set_online(CARD_WDEV(card));
+ rc = qeth_start_channel(&card->write);
if (rc)
goto retriable;
- rc = ccw_device_set_online(CARD_DDEV(card));
+ rc = qeth_start_channel(&card->data);
if (rc)
goto retriable;
retriable:
@@ -5004,9 +4999,9 @@ retriable:
*carrier_ok = true;
}
- card->options.ipa4.supported_funcs = 0;
- card->options.ipa6.supported_funcs = 0;
- card->options.adp.supported_funcs = 0;
+ card->options.ipa4.supported = 0;
+ card->options.ipa6.supported = 0;
+ card->options.adp.supported = 0;
card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
@@ -5038,6 +5033,12 @@ retriable:
if (rc)
goto out;
+ rc = qeth_init_qdio_queues(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "9err%d", rc);
+ goto out;
+ }
+
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -5048,6 +5049,204 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
+static int qeth_set_online(struct qeth_card *card)
+{
+ int rc;
+
+ mutex_lock(&card->discipline_mutex);
+ mutex_lock(&card->conf_mutex);
+ QETH_CARD_TEXT(card, 2, "setonlin");
+
+ rc = card->discipline->set_online(card);
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
+
+ return rc;
+}
+
+int qeth_set_offline(struct qeth_card *card, bool resetting)
+{
+ int rc, rc2, rc3;
+
+ mutex_lock(&card->discipline_mutex);
+ mutex_lock(&card->conf_mutex);
+ QETH_CARD_TEXT(card, 3, "setoffl");
+
+ if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
+ qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+ card->info.hwtrap = 1;
+ }
+
+ rtnl_lock();
+ card->info.open_when_online = card->dev->flags & IFF_UP;
+ dev_close(card->dev);
+ netif_device_detach(card->dev);
+ netif_carrier_off(card->dev);
+ rtnl_unlock();
+
+ card->discipline->set_offline(card);
+
+ rc = qeth_stop_channel(&card->data);
+ rc2 = qeth_stop_channel(&card->write);
+ rc3 = qeth_stop_channel(&card->read);
+ if (!rc)
+ rc = (rc2) ? rc2 : rc3;
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
+
+ /* let user_space know that device is offline */
+ kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_set_offline);
+
+static int qeth_do_reset(void *data)
+{
+ struct qeth_card *card = data;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "recover1");
+ if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+ return 0;
+ QETH_CARD_TEXT(card, 2, "recover2");
+ dev_warn(&card->gdev->dev,
+ "A recovery process has been started for the device\n");
+
+ qeth_set_offline(card, true);
+ rc = qeth_set_online(card);
+ if (!rc) {
+ dev_info(&card->gdev->dev,
+ "Device successfully recovered!\n");
+ } else {
+ ccwgroup_set_offline(card->gdev);
+ dev_warn(&card->gdev->dev,
+ "The qeth device driver failed to recover an error on the device\n");
+ }
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_QETH_L3)
+static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
+{
+ struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
+ struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
+ struct net_device *dev = skb->dev;
+
+ if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
+ dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
+ "FAKELL", skb->len);
+ return;
+ }
+
+ if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
+ u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+ ETH_P_IP;
+ unsigned char tg_addr[ETH_ALEN];
+
+ skb_reset_network_header(skb);
+ switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
+ case QETH_CAST_MULTICAST:
+ if (prot == ETH_P_IP)
+ ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+ else
+ ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+ QETH_CARD_STAT_INC(card, rx_multicast);
+ break;
+ case QETH_CAST_BROADCAST:
+ ether_addr_copy(tg_addr, dev->broadcast);
+ QETH_CARD_STAT_INC(card, rx_multicast);
+ break;
+ default:
+ if (card->options.sniffer)
+ skb->pkt_type = PACKET_OTHERHOST;
+ ether_addr_copy(tg_addr, dev->dev_addr);
+ }
+
+ if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
+ dev_hard_header(skb, dev, prot, tg_addr,
+ &l3_hdr->next_hop.rx.src_mac, skb->len);
+ else
+ dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
+ skb->len);
+ }
+
+ /* copy VLAN tag from hdr into skb */
+ if (!card->options.sniffer &&
+ (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
+ QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
+ u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
+ l3_hdr->vlan_id :
+ l3_hdr->next_hop.rx.vlan_id;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
+ }
+}
+#endif
+
+static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr, bool uses_frags)
+{
+ struct napi_struct *napi = &card->napi;
+ bool is_cso;
+
+ switch (hdr->hdr.l2.id) {
+ case QETH_HEADER_TYPE_OSN:
+ skb_push(skb, sizeof(*hdr));
+ skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+ QETH_CARD_STAT_INC(card, rx_packets);
+
+ card->osn_info.data_cb(skb);
+ return;
+#if IS_ENABLED(CONFIG_QETH_L3)
+ case QETH_HEADER_TYPE_LAYER3:
+ qeth_l3_rebuild_skb(card, skb, hdr);
+ is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ break;
+#endif
+ case QETH_HEADER_TYPE_LAYER2:
+ is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ break;
+ default:
+ /* never happens */
+ if (uses_frags)
+ napi_free_frags(napi);
+ else
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ QETH_CARD_STAT_INC(card, rx_skb_csum);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+ QETH_CARD_STAT_INC(card, rx_packets);
+ if (skb_is_nonlinear(skb)) {
+ QETH_CARD_STAT_INC(card, rx_sg_skbs);
+ QETH_CARD_STAT_ADD(card, rx_sg_frags,
+ skb_shinfo(skb)->nr_frags);
+ }
+
+ if (uses_frags) {
+ napi_gro_frags(napi);
+ } else {
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ napi_gro_receive(napi, skb);
+ }
+}
+
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
{
struct page *page = virt_to_page(data);
@@ -5064,17 +5263,20 @@ static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
- struct qeth_qdio_buffer *qethbuffer,
- struct qdio_buffer_element **__element, int *__offset,
- struct qeth_hdr **hdr)
+static int qeth_extract_skb(struct qeth_card *card,
+ struct qeth_qdio_buffer *qethbuffer,
+ struct qdio_buffer_element **__element,
+ int *__offset)
{
struct qdio_buffer_element *element = *__element;
struct qdio_buffer *buffer = qethbuffer->buffer;
+ struct napi_struct *napi = &card->napi;
unsigned int linear_len = 0;
+ bool uses_frags = false;
int offset = *__offset;
bool use_rx_sg = false;
unsigned int headroom;
+ struct qeth_hdr *hdr;
struct sk_buff *skb;
int skb_len = 0;
@@ -5082,42 +5284,42 @@ next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
- return NULL;
+ return -ENODATA;
element++;
offset = 0;
}
- *hdr = element->addr + offset;
- offset += sizeof(struct qeth_hdr);
+ hdr = element->addr + offset;
+ offset += sizeof(*hdr);
skb = NULL;
- switch ((*hdr)->hdr.l2.id) {
+ switch (hdr->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
- skb_len = (*hdr)->hdr.l2.pkt_length;
+ skb_len = hdr->hdr.l2.pkt_length;
linear_len = ETH_HLEN;
headroom = 0;
break;
case QETH_HEADER_TYPE_LAYER3:
- skb_len = (*hdr)->hdr.l3.length;
+ skb_len = hdr->hdr.l3.length;
if (!IS_LAYER3(card)) {
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
goto walk_packet;
}
- if ((*hdr)->hdr.l3.flags & QETH_HDR_PASSTHRU) {
+ if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
linear_len = ETH_HLEN;
headroom = 0;
break;
}
- if ((*hdr)->hdr.l3.flags & QETH_HDR_IPV6)
+ if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
linear_len = sizeof(struct ipv6hdr);
else
linear_len = sizeof(struct iphdr);
headroom = ETH_HLEN;
break;
case QETH_HEADER_TYPE_OSN:
- skb_len = (*hdr)->hdr.osn.pdu_length;
+ skb_len = hdr->hdr.osn.pdu_length;
if (!IS_OSN(card)) {
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
goto walk_packet;
@@ -5127,13 +5329,13 @@ next_packet:
headroom = sizeof(struct qeth_hdr);
break;
default:
- if ((*hdr)->hdr.l2.id & QETH_HEADER_MASK_INVAL)
+ if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
QETH_CARD_STAT_INC(card, rx_frame_errors);
else
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
/* Can't determine packet length, drop the whole buffer. */
- return NULL;
+ return -EPROTONOSUPPORT;
}
if (skb_len < linear_len) {
@@ -5146,21 +5348,43 @@ next_packet:
!atomic_read(&card->force_alloc_skb) &&
!IS_OSN(card));
- if (use_rx_sg && qethbuffer->rx_skb) {
+ if (use_rx_sg) {
/* QETH_CQ_ENABLED only: */
- skb = qethbuffer->rx_skb;
- qethbuffer->rx_skb = NULL;
- } else {
- if (!use_rx_sg)
- linear_len = skb_len;
- skb = napi_alloc_skb(&card->napi, linear_len + headroom);
+ if (qethbuffer->rx_skb &&
+ skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
+ skb = qethbuffer->rx_skb;
+ qethbuffer->rx_skb = NULL;
+ goto use_skb;
+ }
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ /* -ENOMEM, no point in falling back further. */
+ QETH_CARD_STAT_INC(card, rx_dropped_nomem);
+ goto walk_packet;
+ }
+
+ if (skb_tailroom(skb) >= linear_len + headroom) {
+ uses_frags = true;
+ goto use_skb;
+ }
+
+ netdev_info_once(card->dev,
+ "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
+ linear_len + headroom, skb_tailroom(skb));
+ /* Shouldn't happen. Don't optimize, fall back to linear skb. */
}
- if (!skb)
+ linear_len = skb_len;
+ skb = napi_alloc_skb(napi, linear_len + headroom);
+ if (!skb) {
QETH_CARD_STAT_INC(card, rx_dropped_nomem);
- else if (headroom)
- skb_reserve(skb, headroom);
+ goto walk_packet;
+ }
+use_skb:
+ if (headroom)
+ skb_reserve(skb, headroom);
walk_packet:
while (skb_len) {
int data_len = min(skb_len, (int)(element->length - offset));
@@ -5193,11 +5417,14 @@ walk_packet:
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
if (skb) {
- dev_kfree_skb_any(skb);
+ if (uses_frags)
+ napi_free_frags(napi);
+ else
+ dev_kfree_skb_any(skb);
QETH_CARD_STAT_INC(card,
rx_length_errors);
}
- return NULL;
+ return -EMSGSIZE;
}
element++;
offset = 0;
@@ -5210,22 +5437,40 @@ walk_packet:
*__element = element;
*__offset = offset;
- if (use_rx_sg) {
- QETH_CARD_STAT_INC(card, rx_sg_skbs);
- QETH_CARD_STAT_ADD(card, rx_sg_frags,
- skb_shinfo(skb)->nr_frags);
+
+ qeth_receive_skb(card, skb, hdr, uses_frags);
+ return 0;
+}
+
+static int qeth_extract_skbs(struct qeth_card *card, int budget,
+ struct qeth_qdio_buffer *buf, bool *done)
+{
+ int work_done = 0;
+
+ WARN_ON_ONCE(!budget);
+ *done = false;
+
+ while (budget) {
+ if (qeth_extract_skb(card, buf, &card->rx.b_element,
+ &card->rx.e_offset)) {
+ *done = true;
+ break;
+ }
+
+ work_done++;
+ budget--;
}
- return skb;
+
+ return work_done;
}
-EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
int qeth_poll(struct napi_struct *napi, int budget)
{
struct qeth_card *card = container_of(napi, struct qeth_card, napi);
int work_done = 0;
struct qeth_qdio_buffer *buffer;
- int done;
int new_budget = budget;
+ bool done;
while (1) {
if (!card->rx.b_count) {
@@ -5248,11 +5493,10 @@ int qeth_poll(struct napi_struct *napi, int budget)
if (!(card->rx.qdio_err &&
qeth_check_qdio_errors(card, buffer->buffer,
card->rx.qdio_err, "qinerr")))
- work_done +=
- card->discipline->process_rx_buffer(
- card, new_budget, &done);
+ work_done += qeth_extract_skbs(card, new_budget,
+ buffer, &done);
else
- done = 1;
+ done = true;
if (done) {
QETH_CARD_STAT_INC(card, rx_bufs);
@@ -5421,9 +5665,9 @@ int qeth_setassparms_cb(struct qeth_card *card,
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+ card->options.ipa4.enabled = cmd->hdr.assists.enabled;
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+ card->options.ipa6.enabled = cmd->hdr.assists.enabled;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -5824,7 +6068,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
goto err;
}
}
- rc = card->discipline->set_online(gdev);
+
+ rc = qeth_set_online(card);
err:
return rc;
}
@@ -5832,7 +6077,8 @@ err:
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- return card->discipline->set_offline(gdev);
+
+ return qeth_set_offline(card, false);
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
@@ -5855,7 +6101,7 @@ static int qeth_suspend(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- card->discipline->set_offline(gdev);
+ qeth_set_offline(card, false);
return 0;
}
@@ -5864,7 +6110,7 @@ static int qeth_resume(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
- rc = card->discipline->set_online(gdev);
+ rc = qeth_set_online(card);
qeth_set_allowed_threads(card, 0xffffffff, 0);
if (rc)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 458db34239a7..3865f7258449 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -53,6 +53,16 @@ static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
return (caps->enabled & mask) == mask;
}
+#define qeth_adp_supported(c, f) \
+ qeth_ipa_caps_supported(&c->options.adp, f)
+#define qeth_is_supported(c, f) \
+ qeth_ipa_caps_supported(&c->options.ipa4, f)
+#define qeth_is_supported6(c, f) \
+ qeth_ipa_caps_supported(&c->options.ipa6, f)
+#define qeth_is_ipafunc_supported(c, prot, f) \
+ ((prot == QETH_PROT_IPV6) ? qeth_is_supported6(c, f) : \
+ qeth_is_supported(c, f))
+
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
@@ -338,14 +348,14 @@ enum qeth_card_info_port_speed {
/* (SET)DELIP(M) IPA stuff ***************************************************/
struct qeth_ipacmd_setdelip4 {
- __u8 ip_addr[4];
- __u8 mask[4];
+ __be32 addr;
+ __be32 mask;
__u32 flags;
} __attribute__ ((packed));
struct qeth_ipacmd_setdelip6 {
- __u8 ip_addr[16];
- __u8 mask[16];
+ struct in6_addr addr;
+ struct in6_addr prefix;
__u32 flags;
} __attribute__ ((packed));
@@ -766,8 +776,7 @@ struct qeth_ipacmd_hdr {
__u8 prim_version_no;
__u8 param_count;
__u16 prot_version;
- __u32 ipa_supported;
- __u32 ipa_enabled;
+ struct qeth_ipa_caps assists;
} __attribute__ ((packed));
/* The IPA command itself */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 7bd86027f559..2bd9993aa60b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -24,8 +24,6 @@ static ssize_t qeth_dev_state_show(struct device *dev,
switch (card->state) {
case CARD_STATE_DOWN:
return sprintf(buf, "DOWN\n");
- case CARD_STATE_HARDSETUP:
- return sprintf(buf, "HARDSETUP\n");
case CARD_STATE_SOFTSETUP:
if (card->dev->flags & IFF_UP)
return sprintf(buf, "UP (LAN %s)\n",
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index ddc615b431a8..adf25c9fd2b3 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -13,7 +13,6 @@ extern const struct attribute_group *qeth_l2_attr_groups[];
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
-void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
int qeth_bridgeport_query_ports(struct qeth_card *card,
enum qeth_sbp_roles *role,
enum qeth_sbp_states *state);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 47d37e75dda6..692bd2623401 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -24,7 +24,6 @@
#include "qeth_core.h"
#include "qeth_l2.h"
-static int qeth_l2_set_offline(struct ccwgroup_device *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -284,59 +283,17 @@ static void qeth_l2_stop_card(struct qeth_card *card)
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_HARDSETUP;
- }
- if (card->state == CARD_STATE_HARDSETUP) {
qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
card->info.promisc_mode = 0;
}
-static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
- int budget, int *done)
-{
- int work_done = 0;
- struct sk_buff *skb;
- struct qeth_hdr *hdr;
- unsigned int len;
-
- *done = 0;
- WARN_ON_ONCE(!budget);
- while (budget) {
- skb = qeth_core_get_next_skb(card,
- &card->qdio.in_q->bufs[card->rx.b_index],
- &card->rx.b_element, &card->rx.e_offset, &hdr);
- if (!skb) {
- *done = 1;
- break;
- }
-
- if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
- skb->protocol = eth_type_trans(skb, skb->dev);
- qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
- len = skb->len;
- napi_gro_receive(&card->napi, skb);
- } else {
- skb_push(skb, sizeof(*hdr));
- skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
- len = skb->len;
- card->osn_info.data_cb(skb);
- }
-
- work_done++;
- budget--;
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, len);
- }
- return work_done;
-}
-
static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
@@ -649,7 +606,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_l2_set_offline(cgdev);
+ qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
@@ -767,17 +724,31 @@ static void qeth_l2_trace_features(struct qeth_card *card)
sizeof(card->options.vnicc.sup_chars));
}
-static int qeth_l2_set_online(struct ccwgroup_device *gdev)
+static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ if (!card->options.sbp.reflect_promisc &&
+ card->options.sbp.role != QETH_SBP_ROLE_NONE) {
+ /* Conditional to avoid spurious error messages */
+ qeth_bridgeport_setrole(card, card->options.sbp.role);
+ /* Let the callback function refresh the stored role value. */
+ qeth_bridgeport_query_ports(card, &card->options.sbp.role,
+ NULL);
+ }
+ if (card->options.sbp.hostnotification) {
+ if (qeth_bridgeport_an_set(card, 1))
+ card->options.sbp.hostnotification = 0;
+ } else {
+ qeth_bridgeport_an_set(card, 0);
+ }
+}
+
+static int qeth_l2_set_online(struct qeth_card *card)
+{
+ struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
bool carrier_ok;
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 2, "setonlin");
-
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
@@ -787,9 +758,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card);
- if (card->options.sbp.supported_funcs)
+ if (card->options.sbp.supported_funcs) {
+ qeth_l2_setup_bridgeport_attrs(card);
dev_info(&card->gdev->dev,
- "The device represents a Bridge Capable Port\n");
+ "The device represents a Bridge Capable Port\n");
+ }
mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card);
@@ -800,20 +773,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
qeth_trace_features(card);
qeth_l2_trace_features(card);
- qeth_l2_setup_bridgeport_attrs(card);
-
- card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
- rc = qeth_init_qdio_queues(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "6err%d", rc);
- rc = -ENODEV;
- goto out_remove;
- }
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
@@ -840,8 +804,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
@@ -850,81 +812,12 @@ out_remove:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
-
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
- int recovery_mode)
+static void qeth_l2_set_offline(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- int rc = 0, rc2 = 0, rc3 = 0;
-
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 3, "setoffl");
-
- if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- card->info.hwtrap = 1;
- }
-
- rtnl_lock();
- card->info.open_when_online = card->dev->flags & IFF_UP;
- dev_close(card->dev);
- netif_device_detach(card->dev);
- netif_carrier_off(card->dev);
- rtnl_unlock();
-
qeth_l2_stop_card(card);
- rc = qeth_stop_channel(&card->data);
- rc2 = qeth_stop_channel(&card->write);
- rc3 = qeth_stop_channel(&card->read);
- if (!rc)
- rc = (rc2) ? rc2 : rc3;
- if (rc)
- QETH_CARD_TEXT_(card, 2, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
-
- /* let user_space know that device is offline */
- kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
- return 0;
-}
-
-static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
-{
- return __qeth_l2_set_offline(cgdev, 0);
-}
-
-static int qeth_l2_recover(void *ptr)
-{
- struct qeth_card *card;
- int rc = 0;
-
- card = (struct qeth_card *) ptr;
- QETH_CARD_TEXT(card, 2, "recover1");
- if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
- return 0;
- QETH_CARD_TEXT(card, 2, "recover2");
- dev_warn(&card->gdev->dev,
- "A recovery process has been started for the device\n");
- __qeth_l2_set_offline(card->gdev, 1);
- rc = qeth_l2_set_online(card->gdev);
- if (!rc)
- dev_info(&card->gdev->dev,
- "Device successfully recovered!\n");
- else {
- ccwgroup_set_offline(card->gdev);
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- }
- qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
- qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
- return 0;
}
static int __init qeth_l2_init(void)
@@ -961,8 +854,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l2_discipline = {
.devtype = &qeth_l2_devtype,
- .process_rx_buffer = qeth_l2_process_inbound_buffer,
- .recover = qeth_l2_recover,
.setup = qeth_l2_probe_device,
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
@@ -1001,7 +892,8 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
if (!iob)
return -ENOMEM;
- qeth_prepare_ipa_cmd(card, iob, (u16) data_len);
+ qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL);
+
memcpy(__ipa_cmd(iob), data, data_len);
iob->callback = qeth_osn_assist_cb;
return qeth_send_ipa_cmd(card, iob, NULL, NULL);
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 7fa325cf6f8d..86bcae992f72 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -246,40 +246,6 @@ static struct attribute_group qeth_l2_bridgeport_attr_group = {
.attrs = qeth_l2_bridgeport_attrs,
};
-/**
- * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
- * @card: qeth_card structure pointer
- *
- * Note: this function is called with conf_mutex held by the caller
- */
-void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
-{
- int rc;
-
- if (!card)
- return;
- if (!card->options.sbp.supported_funcs)
- return;
-
- mutex_lock(&card->sbp_lock);
- if (!card->options.sbp.reflect_promisc &&
- card->options.sbp.role != QETH_SBP_ROLE_NONE) {
- /* Conditional to avoid spurious error messages */
- qeth_bridgeport_setrole(card, card->options.sbp.role);
- /* Let the callback function refresh the stored role value. */
- qeth_bridgeport_query_ports(card,
- &card->options.sbp.role, NULL);
- }
- if (card->options.sbp.hostnotification) {
- rc = qeth_bridgeport_an_set(card, 1);
- if (rc)
- card->options.sbp.hostnotification = 0;
- } else {
- qeth_bridgeport_an_set(card, 0);
- }
- mutex_unlock(&card->sbp_lock);
-}
-
/* VNIC CHARS support */
/* convert sysfs attr name to VNIC characteristic */
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 5db04fe472c0..6ccfe2121095 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -23,7 +23,6 @@ struct qeth_ipaddr {
struct hlist_node hnode;
enum qeth_ip_types type;
u8 is_multicast:1;
- u8 in_progress:1;
u8 disp_flag:2;
u8 ipato:1; /* ucast only */
@@ -35,7 +34,7 @@ struct qeth_ipaddr {
union {
struct {
__be32 addr;
- unsigned int mask;
+ __be32 mask;
} a4;
struct {
struct in6_addr addr;
@@ -102,7 +101,8 @@ struct qeth_ipato_entry {
extern const struct attribute_group *qeth_l3_attr_groups[];
-void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
+int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
+ char *buf);
int qeth_l3_create_device_attributes(struct device *);
void qeth_l3_remove_device_attributes(struct device *);
int qeth_l3_setrouting_v4(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 5508ab89b518..317d56647a4a 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -37,30 +37,18 @@
#include "qeth_l3.h"
-
-static int qeth_l3_set_offline(struct ccwgroup_device *);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
-static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
-{
- sprintf(buf, "%pI4", addr);
-}
-
-static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
-{
- sprintf(buf, "%pI6", addr);
-}
-
-void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
- char *buf)
+int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
+ char *buf)
{
if (proto == QETH_PROT_IPV4)
- qeth_l3_ipaddr4_to_string(addr, buf);
- else if (proto == QETH_PROT_IPV6)
- qeth_l3_ipaddr6_to_string(addr, buf);
+ return sprintf(buf, "%pI4", addr);
+ else
+ return sprintf(buf, "%pI6", addr);
}
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
@@ -161,8 +149,6 @@ static int qeth_l3_delete_ip(struct qeth_card *card,
addr->ref_counter--;
if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
return rc;
- if (addr->in_progress)
- return -EINPROGRESS;
if (qeth_card_hw_is_reachable(card))
rc = qeth_l3_deregister_addr_entry(card, addr);
@@ -223,29 +209,10 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
return 0;
}
- /* qeth_l3_register_addr_entry can go to sleep
- * if we add a IPV4 addr. It is caused by the reason
- * that SETIP ipa cmd starts ARP staff for IPV4 addr.
- * Thus we should unlock spinlock, and make a protection
- * using in_progress variable to indicate that there is
- * an hardware operation with this IPV4 address
- */
- if (addr->proto == QETH_PROT_IPV4) {
- addr->in_progress = 1;
- mutex_unlock(&card->ip_lock);
- rc = qeth_l3_register_addr_entry(card, addr);
- mutex_lock(&card->ip_lock);
- addr->in_progress = 0;
- } else
- rc = qeth_l3_register_addr_entry(card, addr);
+ rc = qeth_l3_register_addr_entry(card, addr);
if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- if (addr->ref_counter < 1) {
- qeth_l3_deregister_addr_entry(card, addr);
- hash_del(&addr->hnode);
- kfree(addr);
- }
} else {
hash_del(&addr->hnode);
kfree(addr);
@@ -313,19 +280,10 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
- if (addr->proto == QETH_PROT_IPV4) {
- addr->in_progress = 1;
- mutex_unlock(&card->ip_lock);
- rc = qeth_l3_register_addr_entry(card, addr);
- mutex_lock(&card->ip_lock);
- addr->in_progress = 0;
- } else
- rc = qeth_l3_register_addr_entry(card, addr);
+ rc = qeth_l3_register_addr_entry(card, addr);
if (!rc) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- if (addr->ref_counter < 1)
- qeth_l3_delete_ip(card, addr);
} else {
hash_del(&addr->hnode);
kfree(addr);
@@ -379,17 +337,16 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
-static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
+static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len)
{
- int i, j;
- for (i = 0; i < 16; i++) {
- j = (len) - (i * 8);
- if (j >= 8)
- netmask[i] = 0xff;
- else if (j > 0)
- netmask[i] = (u8)(0xFF00 >> j);
- else
- netmask[i] = 0;
+ unsigned int i = 0;
+
+ while (len && i < 4) {
+ int mask_len = min_t(int, len, 32);
+
+ prefix->s6_addr32[i] = inet_make_mask(mask_len);
+ len -= mask_len;
+ i++;
}
}
@@ -412,7 +369,6 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- __u8 netmask[16];
u32 flags;
QETH_CARD_TEXT(card, 4, "setdelip");
@@ -427,15 +383,13 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
if (addr->proto == QETH_PROT_IPV6) {
- memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
- sizeof(struct in6_addr));
- qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
- memcpy(cmd->data.setdelip6.mask, netmask,
- sizeof(struct in6_addr));
+ cmd->data.setdelip6.addr = addr->u.a6.addr;
+ qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix,
+ addr->u.a6.pfxlen);
cmd->data.setdelip6.flags = flags;
} else {
- memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
- memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
+ cmd->data.setdelip4.addr = addr->u.a4.addr;
+ cmd->data.setdelip4.mask = addr->u.a4.mask;
cmd->data.setdelip4.flags = flags;
}
@@ -581,6 +535,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
+ mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
@@ -600,6 +555,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -613,6 +569,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "delipato");
+ mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
@@ -629,6 +586,8 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
+ mutex_unlock(&card->conf_mutex);
+
return rc;
}
@@ -637,6 +596,7 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
+ int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -644,7 +604,11 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- return qeth_l3_modify_ip(card, &addr, add);
+ mutex_lock(&card->conf_mutex);
+ rc = qeth_l3_modify_ip(card, &addr, add);
+ mutex_unlock(&card->conf_mutex);
+
+ return rc;
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
@@ -937,7 +901,7 @@ out:
return rc;
}
-static int qeth_l3_start_ipassists(struct qeth_card *card)
+static void qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
@@ -947,7 +911,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
qeth_l3_start_ipa_multicast(card); /* go on*/
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
- return 0;
}
static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
@@ -1198,96 +1161,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr *hdr)
-{
- struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
- struct net_device *dev = skb->dev;
-
- if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
- dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
- "FAKELL", skb->len);
- return;
- }
-
- if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
- u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
- ETH_P_IP;
- unsigned char tg_addr[ETH_ALEN];
-
- skb_reset_network_header(skb);
- switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
- case QETH_CAST_MULTICAST:
- if (prot == ETH_P_IP)
- ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
- else
- ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
- QETH_CARD_STAT_INC(card, rx_multicast);
- break;
- case QETH_CAST_BROADCAST:
- ether_addr_copy(tg_addr, card->dev->broadcast);
- QETH_CARD_STAT_INC(card, rx_multicast);
- break;
- default:
- if (card->options.sniffer)
- skb->pkt_type = PACKET_OTHERHOST;
- ether_addr_copy(tg_addr, card->dev->dev_addr);
- }
-
- if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
- card->dev->header_ops->create(skb, card->dev, prot,
- tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
- skb->len);
- else
- card->dev->header_ops->create(skb, card->dev, prot,
- tg_addr, "FAKELL", skb->len);
- }
-
- /* copy VLAN tag from hdr into skb */
- if (!card->options.sniffer &&
- (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
- QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
- u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
- hdr->hdr.l3.vlan_id :
- hdr->hdr.l3.next_hop.rx.vlan_id;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
- }
-
- qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
-}
-
-static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
- int budget, int *done)
-{
- int work_done = 0;
- struct sk_buff *skb;
- struct qeth_hdr *hdr;
-
- *done = 0;
- WARN_ON_ONCE(!budget);
- while (budget) {
- skb = qeth_core_get_next_skb(card,
- &card->qdio.in_q->bufs[card->rx.b_index],
- &card->rx.b_element, &card->rx.e_offset, &hdr);
- if (!skb) {
- *done = 1;
- break;
- }
-
- if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
- qeth_l3_rebuild_skb(card, skb, hdr);
-
- skb->protocol = eth_type_trans(skb, skb->dev);
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
-
- napi_gro_receive(&card->napi, skb);
- work_done++;
- budget--;
- }
- return work_done;
-}
-
static void qeth_l3_stop_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "stopcard");
@@ -1304,15 +1177,12 @@ static void qeth_l3_stop_card(struct qeth_card *card)
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_HARDSETUP;
- }
- if (card->state == CARD_STATE_HARDSETUP) {
qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
card->info.promisc_mode = 0;
}
@@ -2168,7 +2038,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_l3_set_offline(cgdev);
+ qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
@@ -2180,17 +2050,13 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_clear_ipato_list(card);
}
-static int qeth_l3_set_online(struct ccwgroup_device *gdev)
+static int qeth_l3_set_online(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
bool carrier_ok;
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 2, "setonlin");
-
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
@@ -2198,7 +2064,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
/* softsetup */
@@ -2208,11 +2073,8 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
if (rc)
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
if (!card->options.sniffer) {
- rc = qeth_l3_start_ipassists(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "3err%d", rc);
- goto out_remove;
- }
+ qeth_l3_start_ipassists(card);
+
rc = qeth_l3_setrouting_v4(card);
if (rc)
QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
@@ -2221,12 +2083,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
}
- rc = qeth_init_qdio_queues(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "6err%d", rc);
- rc = -ENODEV;
- goto out_remove;
- }
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
@@ -2255,8 +2111,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
qeth_trace_features(card);
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
qeth_l3_stop_card(card);
@@ -2264,88 +2118,12 @@ out_remove:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
-
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
- int recovery_mode)
+static void qeth_l3_set_offline(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- int rc = 0, rc2 = 0, rc3 = 0;
-
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 3, "setoffl");
-
- if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- card->info.hwtrap = 1;
- }
-
- rtnl_lock();
- card->info.open_when_online = card->dev->flags & IFF_UP;
- dev_close(card->dev);
- netif_device_detach(card->dev);
- netif_carrier_off(card->dev);
- rtnl_unlock();
-
qeth_l3_stop_card(card);
- if (card->options.cq == QETH_CQ_ENABLED) {
- rtnl_lock();
- call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
- rtnl_unlock();
- }
-
- rc = qeth_stop_channel(&card->data);
- rc2 = qeth_stop_channel(&card->write);
- rc3 = qeth_stop_channel(&card->read);
- if (!rc)
- rc = (rc2) ? rc2 : rc3;
- if (rc)
- QETH_CARD_TEXT_(card, 2, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
-
- /* let user_space know that device is offline */
- kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
- return 0;
-}
-
-static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
-{
- return __qeth_l3_set_offline(cgdev, 0);
-}
-
-static int qeth_l3_recover(void *ptr)
-{
- struct qeth_card *card;
- int rc = 0;
-
- card = (struct qeth_card *) ptr;
- QETH_CARD_TEXT(card, 2, "recover1");
- QETH_CARD_HEX(card, 2, &card, sizeof(void *));
- if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
- return 0;
- QETH_CARD_TEXT(card, 2, "recover2");
- dev_warn(&card->gdev->dev,
- "A recovery process has been started for the device\n");
- __qeth_l3_set_offline(card->gdev, 1);
- rc = qeth_l3_set_online(card->gdev);
- if (!rc)
- dev_info(&card->gdev->dev,
- "Device successfully recovered!\n");
- else {
- ccwgroup_set_offline(card->gdev);
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- }
- qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
- qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
- return 0;
}
/* Returns zero if the command is successfully "consumed" */
@@ -2357,8 +2135,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l3_discipline = {
.devtype = &qeth_l3_devtype,
- .process_rx_buffer = qeth_l3_process_inbound_buffer,
- .recover = qeth_l3_recover,
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
@@ -2437,7 +2213,7 @@ static int qeth_l3_ip_event(struct notifier_block *this,
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
addr.u.a4.addr = ifa->ifa_address;
- addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
+ addr.u.a4.mask = ifa->ifa_mask;
return qeth_l3_handle_ip_event(card, &addr, event);
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index e8c848f72c6d..29f2517d2a31 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -402,30 +402,35 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
- char addr_str[40];
- int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int str_len = 0;
- entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
- /* add strlen for "/<mask>\n" */
- entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+ char addr_str[40];
+ int entry_len;
+
if (ipatoe->proto != proto)
continue;
- /* String must not be longer than PAGE_SIZE. So we check if
- * string length gets near PAGE_SIZE. Then we can savely display
- * the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+
+ entry_len = qeth_l3_ipaddr_to_string(proto, ipatoe->addr,
+ addr_str);
+ if (entry_len < 0)
+ continue;
+
+ /* Append /%mask to the entry: */
+ entry_len += 1 + ((proto == QETH_PROT_IPV4) ? 2 : 3);
+ /* Enough room to format %entry\n into null terminated page? */
+ if (entry_len + 1 > PAGE_SIZE - str_len - 1)
break;
- qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i,
- "%s/%i\n", addr_str, ipatoe->mask_bits);
+
+ entry_len = scnprintf(buf, PAGE_SIZE - str_len,
+ "%s/%i\n", addr_str, ipatoe->mask_bits);
+ str_len += entry_len;
+ buf += entry_len;
}
mutex_unlock(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
- return i;
+ return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
@@ -471,16 +476,14 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
int mask_bits;
int rc = 0;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (rc)
- goto out;
+ return rc;
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
- if (!ipatoe) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!ipatoe)
+ return -ENOMEM;
+
ipatoe->proto = proto;
memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
ipatoe->mask_bits = mask_bits;
@@ -488,8 +491,7 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
rc = qeth_l3_add_ipato_entry(card, ipatoe);
if (rc)
kfree(ipatoe);
-out:
- mutex_unlock(&card->conf_mutex);
+
return rc ? rc : count;
}
@@ -512,11 +514,9 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
int mask_bits;
int rc = 0;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (!rc)
rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -623,31 +623,34 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *ipaddr;
- char addr_str[40];
int str_len = 0;
- int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i;
- entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
- entry_len += 2; /* \n + terminator */
mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
+ char addr_str[40];
+ int entry_len;
+
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
- /* String must not be longer than PAGE_SIZE. So we check if
- * string length gets near PAGE_SIZE. Then we can savely display
- * the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - str_len) <= entry_len)
+
+ entry_len = qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u,
+ addr_str);
+ if (entry_len < 0)
+ continue;
+
+ /* Enough room to format %addr\n into null terminated page? */
+ if (entry_len + 1 > PAGE_SIZE - str_len - 1)
break;
- qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
- addr_str);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
- addr_str);
+
+ entry_len = scnprintf(buf, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
+ str_len += entry_len;
+ buf += entry_len;
}
mutex_unlock(&card->ip_lock);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return str_len;
+ return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -658,63 +661,34 @@ static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
QETH_IP_TYPE_VIPA);
}
-static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
- u8 *addr)
-{
- if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
- return -EINVAL;
- }
- return 0;
-}
-
-static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
+static ssize_t qeth_l3_vipa_store(struct device *dev, const char *buf, bool add,
+ size_t count, enum qeth_prot_versions proto)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
u8 addr[16] = {0, };
int rc;
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_vipae(buf, proto, addr);
+ rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+ rc = qeth_l3_modify_rxip_vipa(card, add, addr,
QETH_IP_TYPE_VIPA, proto);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
qeth_l3_dev_vipa_add4_show,
qeth_l3_dev_vipa_add4_store);
-static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
-{
- u8 addr[16];
- int rc;
-
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, false, addr,
- QETH_IP_TYPE_VIPA, proto);
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
-}
-
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
@@ -731,9 +705,7 @@ static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
@@ -743,9 +715,7 @@ static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
@@ -798,54 +768,34 @@ static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
return 0;
}
-static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
+static ssize_t qeth_l3_rxip_store(struct device *dev, const char *buf, bool add,
+ size_t count, enum qeth_prot_versions proto)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
u8 addr[16] = {0, };
int rc;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+ rc = qeth_l3_modify_rxip_vipa(card, add, addr,
QETH_IP_TYPE_RXIP, proto);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
qeth_l3_dev_rxip_add4_show,
qeth_l3_dev_rxip_add4_store);
-static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
-{
- u8 addr[16];
- int rc;
-
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, false, addr,
- QETH_IP_TYPE_RXIP, proto);
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
-}
-
static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
@@ -862,9 +812,7 @@ static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
@@ -874,9 +822,7 @@ static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index c25e8a54e869..3170b295a5da 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -134,7 +134,7 @@ static char *blogic_cmd_failure_reason;
static void blogic_announce_drvr(struct blogic_adapter *adapter)
{
blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter);
- blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff " "<lnz@dandelion.com>\n", adapter);
+ blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>\n", adapter);
}
@@ -440,7 +440,7 @@ static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode,
goto done;
}
if (blogic_global_options.trace_config)
- blogic_notice("blogic_cmd(%02X) Status = %02X: " "(Modify I/O Address)\n", adapter, opcode, statusreg.all);
+ blogic_notice("blogic_cmd(%02X) Status = %02X: (Modify I/O Address)\n", adapter, opcode, statusreg.all);
result = 0;
goto done;
}
@@ -716,23 +716,23 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
pci_addr = base_addr1 = pci_resource_start(pci_device, 1);
if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) {
- blogic_err("BusLogic: Base Address0 0x%X not I/O for " "MultiMaster Host Adapter\n", NULL, base_addr0);
- blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ blogic_err("BusLogic: Base Address0 0x%lX not I/O for MultiMaster Host Adapter\n", NULL, base_addr0);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
continue;
}
if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) {
- blogic_err("BusLogic: Base Address1 0x%X not Memory for " "MultiMaster Host Adapter\n", NULL, base_addr1);
- blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr);
+ blogic_err("BusLogic: Base Address1 0x%lX not Memory for MultiMaster Host Adapter\n", NULL, base_addr1);
+ blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr);
continue;
}
if (irq_ch == 0) {
- blogic_err("BusLogic: IRQ Channel %d invalid for " "MultiMaster Host Adapter\n", NULL, irq_ch);
- blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ blogic_err("BusLogic: IRQ Channel %d invalid for MultiMaster Host Adapter\n", NULL, irq_ch);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
continue;
}
if (blogic_global_options.trace_probe) {
- blogic_notice("BusLogic: PCI MultiMaster Host Adapter " "detected at\n", NULL);
- blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr);
+ blogic_notice("BusLogic: PCI MultiMaster Host Adapter detected at\n", NULL);
+ blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr);
}
/*
Issue the Inquire PCI Host Adapter Information command to determine
@@ -818,7 +818,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
nonpr_mmcount++;
mmcount++;
} else
- blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL);
+ blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL);
}
/*
If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq."
@@ -956,23 +956,23 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
pci_addr = base_addr1 = pci_resource_start(pci_device, 1);
#ifdef CONFIG_SCSI_FLASHPOINT
if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) {
- blogic_err("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, base_addr0);
- blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ blogic_err("BusLogic: Base Address0 0x%lX not I/O for FlashPoint Host Adapter\n", NULL, base_addr0);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
continue;
}
if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) {
- blogic_err("BusLogic: Base Address1 0x%X not Memory for " "FlashPoint Host Adapter\n", NULL, base_addr1);
- blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr);
+ blogic_err("BusLogic: Base Address1 0x%lX not Memory for FlashPoint Host Adapter\n", NULL, base_addr1);
+ blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr);
continue;
}
if (irq_ch == 0) {
- blogic_err("BusLogic: IRQ Channel %d invalid for " "FlashPoint Host Adapter\n", NULL, irq_ch);
- blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ blogic_err("BusLogic: IRQ Channel %d invalid for FlashPoint Host Adapter\n", NULL, irq_ch);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
continue;
}
if (blogic_global_options.trace_probe) {
- blogic_notice("BusLogic: FlashPoint Host Adapter " "detected at\n", NULL);
- blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr);
+ blogic_notice("BusLogic: FlashPoint Host Adapter detected at\n", NULL);
+ blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr);
}
if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) {
struct blogic_probeinfo *probeinfo =
@@ -987,11 +987,11 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
probeinfo->pci_device = pci_dev_get(pci_device);
fpcount++;
} else
- blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL);
+ blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL);
#else
- blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", NULL, bus, device);
- blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, irq %d, " "but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch);
- blogic_err("BusLogic: support was omitted in this kernel " "configuration.\n", NULL);
+ blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", NULL, bus, device);
+ blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, irq %d, but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch);
+ blogic_err("BusLogic: support was omitted in this kernel configuration.\n", NULL);
#endif
}
/*
@@ -1099,9 +1099,9 @@ static bool blogic_failure(struct blogic_adapter *adapter, char *msg)
if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) {
blogic_err("While configuring BusLogic PCI Host Adapter at\n",
adapter);
- blogic_err("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr);
+ blogic_err("Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr);
} else
- blogic_err("While configuring BusLogic Host Adapter at " "I/O Address 0x%X:\n", adapter, adapter->io_addr);
+ blogic_err("While configuring BusLogic Host Adapter at I/O Address 0x%lX:\n", adapter, adapter->io_addr);
blogic_err("%s FAILED - DETACHING\n", adapter, msg);
if (blogic_cmd_failure_reason != NULL)
blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter,
@@ -1129,13 +1129,13 @@ static bool __init blogic_probe(struct blogic_adapter *adapter)
fpinfo->present = false;
if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 &&
fpinfo->present)) {
- blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev);
- blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, " "but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr);
+ blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev);
+ blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr);
blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter);
return false;
}
if (blogic_global_options.trace_probe)
- blogic_notice("BusLogic_Probe(0x%X): FlashPoint Found\n", adapter, adapter->io_addr);
+ blogic_notice("BusLogic_Probe(0x%lX): FlashPoint Found\n", adapter, adapter->io_addr);
/*
Indicate the Host Adapter Probe completed successfully.
*/
@@ -1152,7 +1152,7 @@ static bool __init blogic_probe(struct blogic_adapter *adapter)
intreg.all = blogic_rdint(adapter);
georeg.all = blogic_rdgeom(adapter);
if (blogic_global_options.trace_probe)
- blogic_notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, " "Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all);
+ blogic_notice("BusLogic_Probe(0x%lX): Status 0x%02X, Interrupt 0x%02X, Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all);
if (statusreg.all == 0 || statusreg.sr.diag_active ||
statusreg.sr.cmd_param_busy || statusreg.sr.rsvd ||
statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0)
@@ -1231,7 +1231,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
udelay(100);
}
if (blogic_global_options.trace_hw_reset)
- blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Active, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
if (timeout < 0)
return false;
/*
@@ -1251,7 +1251,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
udelay(100);
}
if (blogic_global_options.trace_hw_reset)
- blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Completed, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
if (timeout < 0)
return false;
/*
@@ -1267,7 +1267,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
udelay(100);
}
if (blogic_global_options.trace_hw_reset)
- blogic_notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ blogic_notice("BusLogic_HardwareReset(0x%lX): Host Adapter Ready, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
if (timeout < 0)
return false;
/*
@@ -1323,7 +1323,7 @@ static bool __init blogic_checkadapter(struct blogic_adapter *adapter)
Provide tracing information if requested and return.
*/
if (blogic_global_options.trace_probe)
- blogic_notice("BusLogic_Check(0x%X): MultiMaster %s\n", adapter,
+ blogic_notice("BusLogic_Check(0x%lX): MultiMaster %s\n", adapter,
adapter->io_addr,
(result ? "Found" : "Not Found"));
return result;
@@ -1836,7 +1836,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
int tgt_id;
blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
- blogic_info(" Firmware Version: %s, I/O Address: 0x%X, " "IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
+ blogic_info(" Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
blogic_info(" DMA Channel: ", adapter);
if (adapter->dma_ch > 0)
@@ -1844,7 +1844,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
else
blogic_info("None, ", adapter);
if (adapter->bios_addr > 0)
- blogic_info("BIOS Address: 0x%X, ", adapter,
+ blogic_info("BIOS Address: 0x%lX, ", adapter,
adapter->bios_addr);
else
blogic_info("BIOS Address: None, ", adapter);
@@ -1852,7 +1852,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
blogic_info(" PCI Bus: %d, Device: %d, Address: ", adapter,
adapter->bus, adapter->dev);
if (adapter->pci_addr > 0)
- blogic_info("0x%X, ", adapter, adapter->pci_addr);
+ blogic_info("0x%lX, ", adapter, adapter->pci_addr);
else
blogic_info("Unassigned, ", adapter);
}
@@ -1932,10 +1932,10 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
blogic_info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter,
discon_msg, tagq_msg);
if (blogic_multimaster_type(adapter)) {
- blogic_info(" Scatter/Gather Limit: %d of %d segments, " "Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count);
- blogic_info(" Driver Queue Depth: %d, " "Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth);
+ blogic_info(" Scatter/Gather Limit: %d of %d segments, Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count);
+ blogic_info(" Driver Queue Depth: %d, Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth);
} else
- blogic_info(" Driver Queue Depth: %d, " "Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit);
+ blogic_info(" Driver Queue Depth: %d, Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit);
blogic_info(" Tagged Queue Depth: ", adapter);
common_tagq_depth = true;
for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++)
@@ -2717,7 +2717,7 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter)
then there is most likely a bug in
the Host Adapter firmware.
*/
- blogic_warn("Illegal CCB #%ld status %d in " "Incoming Mailbox\n", adapter, ccb->serial, ccb->status);
+ blogic_warn("Illegal CCB #%ld status %d in Incoming Mailbox\n", adapter, ccb->serial, ccb->status);
}
}
next_inbox->comp_code = BLOGIC_INBOX_FREE;
@@ -2752,7 +2752,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
if (ccb->opcode == BLOGIC_BDR) {
int tgt_id = ccb->tgt_id;
- blogic_warn("Bus Device Reset CCB #%ld to Target " "%d Completed\n", adapter, ccb->serial, tgt_id);
+ blogic_warn("Bus Device Reset CCB #%ld to Target %d Completed\n", adapter, ccb->serial, tgt_id);
blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done);
adapter->tgt_flags[tgt_id].tagq_active = false;
adapter->cmds_since_rst[tgt_id] = 0;
@@ -2829,7 +2829,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
if (blogic_global_options.trace_err) {
int i;
blogic_notice("CCB #%ld Target %d: Result %X Host "
- "Adapter Status %02X " "Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status);
+ "Adapter Status %02X Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status);
blogic_notice("CDB ", adapter);
for (i = 0; i < ccb->cdblen; i++)
blogic_notice(" %02X", adapter, ccb->cdb[i]);
@@ -3203,12 +3203,12 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
*/
if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) {
spin_unlock_irq(adapter->scsi_host->host_lock);
- blogic_warn("Unable to write Outgoing Mailbox - " "Pausing for 1 second\n", adapter);
+ blogic_warn("Unable to write Outgoing Mailbox - Pausing for 1 second\n", adapter);
blogic_delay(1);
spin_lock_irq(adapter->scsi_host->host_lock);
if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
ccb)) {
- blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
+ blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter);
blogic_dealloc_ccb(ccb, 1);
command->result = DID_ERROR << 16;
command->scsi_done(command);
@@ -3443,8 +3443,8 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
if (diskparam->cylinders != saved_cyl)
blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors);
} else if (part_end_head > 0 || part_end_sector > 0) {
- blogic_warn("Warning: Partition Table appears to " "have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector);
- blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors);
+ blogic_warn("Warning: Partition Table appears to have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector);
+ blogic_warn("not compatible with current BusLogic Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors);
}
}
kfree(buf);
@@ -3689,7 +3689,7 @@ static int __init blogic_parseopts(char *options)
blogic_probe_options.probe134 = true;
break;
default:
- blogic_err("BusLogic: Invalid Driver Options " "(invalid I/O Address 0x%X)\n", NULL, io_addr);
+ blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr);
return 0;
}
} else if (blogic_parse(&options, "NoProbeISA"))
@@ -3710,7 +3710,7 @@ static int __init blogic_parseopts(char *options)
for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) {
unsigned short qdepth = simple_strtoul(options, &options, 0);
if (qdepth > BLOGIC_MAX_TAG_DEPTH) {
- blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth);
+ blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth);
return 0;
}
drvr_opts->qdepth[tgt_id] = qdepth;
@@ -3719,12 +3719,12 @@ static int __init blogic_parseopts(char *options)
else if (*options == ']')
break;
else {
- blogic_err("BusLogic: Invalid Driver Options " "(',' or ']' expected at '%s')\n", NULL, options);
+ blogic_err("BusLogic: Invalid Driver Options (',' or ']' expected at '%s')\n", NULL, options);
return 0;
}
}
if (*options != ']') {
- blogic_err("BusLogic: Invalid Driver Options " "(']' expected at '%s')\n", NULL, options);
+ blogic_err("BusLogic: Invalid Driver Options (']' expected at '%s')\n", NULL, options);
return 0;
} else
options++;
@@ -3732,7 +3732,7 @@ static int __init blogic_parseopts(char *options)
unsigned short qdepth = simple_strtoul(options, &options, 0);
if (qdepth == 0 ||
qdepth > BLOGIC_MAX_TAG_DEPTH) {
- blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth);
+ blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth);
return 0;
}
drvr_opts->common_qdepth = qdepth;
@@ -3778,7 +3778,7 @@ static int __init blogic_parseopts(char *options)
unsigned short bus_settle_time =
simple_strtoul(options, &options, 0);
if (bus_settle_time > 5 * 60) {
- blogic_err("BusLogic: Invalid Driver Options " "(invalid Bus Settle Time %d)\n", NULL, bus_settle_time);
+ blogic_err("BusLogic: Invalid Driver Options (invalid Bus Settle Time %d)\n", NULL, bus_settle_time);
return 0;
}
drvr_opts->bus_settle_time = bus_settle_time;
@@ -3803,14 +3803,14 @@ static int __init blogic_parseopts(char *options)
if (*options == ',')
options++;
else if (*options != ';' && *options != '\0') {
- blogic_err("BusLogic: Unexpected Driver Option '%s' " "ignored\n", NULL, options);
+ blogic_err("BusLogic: Unexpected Driver Option '%s' ignored\n", NULL, options);
*options = '\0';
}
}
if (!(blogic_drvr_options_count == 0 ||
blogic_probeinfo_count == 0 ||
blogic_drvr_options_count == blogic_probeinfo_count)) {
- blogic_err("BusLogic: Invalid Driver Options " "(all or no I/O Addresses must be specified)\n", NULL);
+ blogic_err("BusLogic: Invalid Driver Options (all or no I/O Addresses must be specified)\n", NULL);
return 0;
}
/*
@@ -3864,7 +3864,7 @@ static int __init blogic_setup(char *str)
(void) get_options(str, ARRAY_SIZE(ints), ints);
if (ints[0] != 0) {
- blogic_err("BusLogic: Obsolete Command Line Entry " "Format Ignored\n", NULL);
+ blogic_err("BusLogic: Obsolete Command Line Entry Format Ignored\n", NULL);
return 0;
}
if (str == NULL || *str == '\0')
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 90cf4691b8c3..a7881f8eb05e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 8466aa784ec1..8b891a05d9e7 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -293,7 +293,7 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
if (!request_mem_region(start, 0x1000, "aic79xx"))
error = ENOMEM;
if (!error) {
- *maddr = ioremap_nocache(base_page, base_offset + 512);
+ *maddr = ioremap(base_page, base_offset + 512);
if (*maddr == NULL) {
error = ENOMEM;
release_mem_region(start, 0x1000);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index a9d40d3b90ef..4190a025381a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
* At some speeds, we only support
* ST transfers.
*/
- if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+ if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
break;
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 717d8d1082ce..9b293b1f0b71 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -372,7 +372,7 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
if (!request_mem_region(start, 0x1000, "aic7xxx"))
error = ENOMEM;
if (error == 0) {
- *maddr = ioremap_nocache(start, 256);
+ *maddr = ioremap(start, 256);
if (*maddr == NULL) {
error = ENOMEM;
release_mem_region(start, 0x1000);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f5781e31f57c..d022407e5645 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -54,6 +54,9 @@ static struct scsi_host_template aic94xx_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index db687ef8a99e..40dc8eac0e3a 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -270,7 +270,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
break;
}
case ACB_ADAPTER_TYPE_C:{
- acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+ acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
if (!acb->pmuC) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
return false;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0760d0bd8a10..9b81cfbbc5c5 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -453,14 +453,14 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
u8 __iomem *addr;
int pcicfg_reg;
- addr = ioremap_nocache(pci_resource_start(pcidev, 2),
+ addr = ioremap(pci_resource_start(pcidev, 2),
pci_resource_len(pcidev, 2));
if (addr == NULL)
return -ENOMEM;
phba->ctrl.csr = addr;
phba->csr_va = addr;
- addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
+ addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024);
if (addr == NULL)
goto pci_map_err;
phba->ctrl.db = addr;
@@ -471,7 +471,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
else
pcicfg_reg = 0;
- addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
+ addr = ioremap(pci_resource_start(pcidev, pcicfg_reg),
pci_resource_len(pcidev, pcicfg_reg));
if (addr == NULL)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index f069e09beb10..6f8335ddb1f2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1414,7 +1414,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
- tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ tgt->ctx_base = ioremap(reg_base + reg_off, 4);
if (!tgt->ctx_base)
return -ENOMEM;
return 0;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 12666313b937..e53ebc5eff85 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2715,7 +2715,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
- ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ ep->qp.ctx_base = ioremap(reg_base + reg_off, 4);
if (!ep->qp.ctx_base)
return -ENOMEM;
goto arm_cq;
@@ -2736,7 +2736,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
/* 5709 device in normal node and 5706/5708 devices */
reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
- ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
+ ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off,
MB_KERNEL_CTX_SIZE);
if (!ep->qp.ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 76751d6c7f0d..ed5f4a6ae270 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -872,6 +872,10 @@ static long ch_ioctl_compat(struct file * file,
unsigned int cmd, unsigned long arg)
{
scsi_changer *ch = file->private_data;
+ int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
+ file->f_flags & O_NDELAY);
+ if (retval)
+ return retval;
switch (cmd) {
case CHIOGPARAMS:
@@ -883,7 +887,7 @@ static long ch_ioctl_compat(struct file * file,
case CHIOINITELEM:
case CHIOSVOLTAG:
/* compatible */
- return ch_ioctl(file, cmd, arg);
+ return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
case CHIOGSTATUS32:
{
struct changer_element_status32 ces32;
@@ -898,8 +902,7 @@ static long ch_ioctl_compat(struct file * file,
return ch_gstatus(ch, ces32.ces_type, data);
}
default:
- // return scsi_ioctl_compat(ch->device, cmd, (void*)arg);
- return -ENOIOCTLCMD;
+ return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg));
}
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 2e8a3ac575cb..8dea7d53788a 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -529,7 +529,7 @@ static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
goto err_free_hw;
/* Get the start address of registers from BAR 0 */
- hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+ hw->regstart = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!hw->regstart) {
csio_err(hw, "Could not map BAR 0, regstart = %p\n",
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 469d0bc9f5fe..00cf33573136 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev,
return -EINVAL;
/* Delete NPIV lnodes */
- csio_lnodes_exit(hw, 1);
+ csio_lnodes_exit(hw, 1);
/* Block upper IOs */
csio_lnodes_block_request(hw);
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index bb88995a12c7..89afa31e33cb 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -243,8 +243,6 @@ static void esp_set_all_config3(struct esp *esp, u8 val)
/* Reset the ESP chip, _not_ the SCSI bus. */
static void esp_reset_esp(struct esp *esp)
{
- u8 family_code, version;
-
/* Now reset the ESP chip */
scsi_esp_cmd(esp, ESP_CMD_RC);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
@@ -257,14 +255,19 @@ static void esp_reset_esp(struct esp *esp)
*/
esp->max_period = ((35 * esp->ccycle) / 1000);
if (esp->rev == FAST) {
- version = esp_read8(ESP_UID);
- family_code = (version & 0xf8) >> 3;
- if (family_code == 0x02)
+ u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
+
+ if (family_code == ESP_UID_F236) {
esp->rev = FAS236;
- else if (family_code == 0x0a)
+ } else if (family_code == ESP_UID_HME) {
esp->rev = FASHME; /* Version is usually '5'. */
- else
+ } else if (family_code == ESP_UID_FSC) {
+ esp->rev = FSC;
+ /* Enable Active Negation */
+ esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
+ } else {
esp->rev = FAS100A;
+ }
esp->min_period = ((4 * esp->ccycle) / 1000);
} else {
esp->min_period = ((5 * esp->ccycle) / 1000);
@@ -308,7 +311,7 @@ static void esp_reset_esp(struct esp *esp)
case FAS236:
case PCSCSI:
- /* Fast 236, AM53c974 or HME */
+ case FSC:
esp_write8(esp->config2, ESP_CFG2);
if (esp->rev == FASHME) {
u8 cfg3 = esp->target[0].esp_config3;
@@ -2373,10 +2376,11 @@ static const char *esp_chip_names[] = {
"ESP100A",
"ESP236",
"FAS236",
+ "AM53C974",
+ "53CF9x-2",
"FAS100A",
"FAST",
"FASHME",
- "AM53C974",
};
static struct scsi_transport_template *esp_transport_template;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 91b32f2a1a1b..446a3d18c022 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -78,12 +78,14 @@
#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
-/* ESP config register 4 read-write, found only on am53c974 chips */
-#define ESP_CONFIG4_RADE 0x04 /* Active negation */
-#define ESP_CONFIG4_RAE 0x08 /* Active negation on REQ and ACK */
-#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature */
-#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 */
-#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 */
+/* ESP config register 4 read-write */
+#define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */
+#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */
+#define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */
+#define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */
+#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */
+#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 (am53c974) */
+#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 (am53c974) */
#define ESP_CONFIG_GE_12NS (0)
#define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1)
@@ -209,10 +211,15 @@
#define ESP_TEST_TS 0x04 /* Tristate test mode */
/* ESP unique ID register read-only, found on fas236+fas100a only */
+#define ESP_UID_FAM 0xf8 /* ESP family bitmask */
+
+#define ESP_FAMILY(uid) (((uid) & ESP_UID_FAM) >> 3)
+
+/* Values for the ESP family bits */
#define ESP_UID_F100A 0x00 /* ESP FAS100A */
#define ESP_UID_F236 0x02 /* ESP FAS236 */
-#define ESP_UID_REV 0x07 /* ESP revision */
-#define ESP_UID_FAM 0xf8 /* ESP family */
+#define ESP_UID_HME 0x0a /* FAS HME */
+#define ESP_UID_FSC 0x14 /* NCR/Symbios Logic 53CF9x-2 */
/* ESP fifo flags register read-only */
/* Note that the following implies a 16 byte FIFO on the ESP. */
@@ -257,15 +264,17 @@ struct esp_cmd_priv {
};
#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
+/* NOTE: this enum is ordered based on chip features! */
enum esp_rev {
- ESP100 = 0x00, /* NCR53C90 - very broken */
- ESP100A = 0x01, /* NCR53C90A */
- ESP236 = 0x02,
- FAS236 = 0x03,
- FAS100A = 0x04,
- FAST = 0x05,
- FASHME = 0x06,
- PCSCSI = 0x07, /* AM53c974 */
+ ESP100, /* NCR53C90 - very broken */
+ ESP100A, /* NCR53C90A */
+ ESP236,
+ FAS236,
+ PCSCSI, /* AM53c974 */
+ FSC, /* NCR/Symbios Logic 53CF9x-2 */
+ FAS100A,
+ FAST,
+ FASHME,
};
struct esp_cmd_entry {
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8ef150dfb6f7..b60795893994 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 1f55b9e4e74a..1b88a3b53eee 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
- u64 a0, a1;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
- u64 a0, a1;
+ u64 a[2] = {};
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
- err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = ((u8 *)&a0)[i];
+ mac_addr[i] = ((u8 *)&a)[i];
return 0;
}
@@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
if (err)
pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
if (err)
pr_err("Can't del addr [%pM], %d\n", addr, err);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 233c73e01246..2bdd64648ef0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -180,10 +180,10 @@ struct hisi_sas_port {
struct hisi_sas_cq {
struct hisi_hba *hisi_hba;
- const struct cpumask *pci_irq_mask;
- struct tasklet_struct tasklet;
+ const struct cpumask *irq_mask;
int rd_point;
int id;
+ int irq_no;
};
struct hisi_sas_dq {
@@ -627,7 +627,7 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
-extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba);
extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no);
extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 03588ec3c394..9a6deb21fe4d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -163,13 +163,11 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
- unsigned long flags;
-
if (hisi_hba->hw->slot_index_alloc ||
slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
hisi_sas_slot_index_clear(hisi_hba, slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
}
}
@@ -185,12 +183,11 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
{
int index;
void *bitmap = hisi_hba->slot_index_tags;
- unsigned long flags;
if (scsi_cmnd)
return scsi_cmnd->request->tag;
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
hisi_hba->last_slot_index + 1);
if (index >= hisi_hba->slot_index_count) {
@@ -198,13 +195,13 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
hisi_hba->slot_index_count,
HISI_SAS_UNRESERVED_IPTT);
if (index >= hisi_hba->slot_index_count) {
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return -SAS_QUEUE_FULL;
}
}
hisi_sas_slot_index_set(hisi_hba, index);
hisi_hba->last_slot_index = index;
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return index;
}
@@ -220,7 +217,6 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
- unsigned long flags;
int device_id = slot->device_id;
struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
@@ -247,9 +243,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
}
}
- spin_lock_irqsave(&sas_dev->lock, flags);
+ spin_lock(&sas_dev->lock);
list_del_init(&slot->entry);
- spin_unlock_irqrestore(&sas_dev->lock, flags);
+ spin_unlock(&sas_dev->lock);
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
@@ -489,14 +485,14 @@ static int hisi_sas_task_prep(struct sas_task *task,
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
- spin_unlock_irqrestore(&dq->lock, flags);
- spin_lock_irqsave(&sas_dev->lock, flags);
+ spin_unlock(&dq->lock);
+ spin_lock(&sas_dev->lock);
list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&sas_dev->lock, flags);
+ spin_unlock(&sas_dev->lock);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
@@ -562,7 +558,6 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
{
u32 rc;
u32 pass = 0;
- unsigned long flags;
struct hisi_hba *hisi_hba;
struct device *dev;
struct domain_device *device = task->dev;
@@ -606,9 +601,9 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev_err(dev, "task exec: failed[%d]!\n", rc);
if (likely(pass)) {
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock(&dq->lock);
hisi_hba->hw->start_delivery(dq);
- spin_unlock_irqrestore(&dq->lock, flags);
+ spin_unlock(&dq->lock);
}
return rc;
@@ -659,12 +654,11 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = NULL;
- unsigned long flags;
int last = hisi_hba->last_dev_id;
int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
int i;
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
int queue = i % hisi_hba->queue_count;
@@ -684,7 +678,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
i++;
}
hisi_hba->last_dev_id = i;
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return sas_dev;
}
@@ -1233,10 +1227,10 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
/*
- * flush tasklet to avoid free'ing task
+ * sync irq to avoid free'ing task
* before using task in IO completion
*/
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
slot->task = NULL;
}
@@ -1626,11 +1620,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (slot) {
/*
- * flush tasklet to avoid free'ing task
+ * sync irq to avoid free'ing task
* before using task in IO completion
*/
cq = &hisi_hba->cq[slot->dlvry_queue];
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE;
@@ -1694,10 +1688,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
- * flush tasklet to avoid free'ing task
+ * sync irq to avoid free'ing task
* before using task in IO completion
*/
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
slot->task = NULL;
}
}
@@ -1965,14 +1959,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
- spin_unlock_irqrestore(&dq->lock, flags);
- spin_lock_irqsave(&sas_dev->lock, flags);
+ spin_unlock(&dq->lock);
+ spin_lock(&sas_dev->lock);
list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&sas_dev->lock, flags);
+ spin_unlock(&sas_dev->lock);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
@@ -2001,9 +1995,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_unlock_irqrestore(&task->task_state_lock, flags);
WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock(&dq->lock);
hisi_hba->hw->start_delivery(dq);
- spin_unlock_irqrestore(&dq->lock, flags);
+ spin_unlock(&dq->lock);
return 0;
@@ -2076,10 +2070,10 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
/*
- * flush tasklet to avoid free'ing task
+ * sync irq to avoid free'ing task
* before using task in IO completion
*/
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
slot->task = NULL;
}
dev_err(dev, "internal task abort: timeout and not done.\n");
@@ -2131,7 +2125,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
case HISI_SAS_INT_ABT_DEV:
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- const struct cpumask *mask = cq->pci_irq_mask;
+ const struct cpumask *mask = cq->irq_mask;
if (mask && !cpumask_intersects(cpu_online_mask, mask))
continue;
@@ -2225,17 +2219,17 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
-void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
+void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
{
int i;
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
}
}
-EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
+EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
{
@@ -3936,7 +3930,7 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
hisi_sas_debugfs_dir);
- debugfs_create_file("trigger_dump", 0600,
+ debugfs_create_file("trigger_dump", 0200,
hisi_hba->debugfs_dir,
hisi_hba,
&hisi_sas_debugfs_trigger_dump_fops);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3af53cc42bd6..fa25766502a2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1772,6 +1772,9 @@ static struct scsi_host_template sht_v1_hw = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = host_attrs_v1_hw,
.host_reset = hisi_sas_host_reset,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 61b1e2693b08..e05faf315dcd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -773,7 +773,6 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev = device->lldd_dev;
int sata_idx = sas_dev->sata_idx;
int start, end;
- unsigned long flags;
if (!sata_dev) {
/*
@@ -797,12 +796,12 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
end = 64 * (sata_idx + 2);
}
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
while (1) {
start = find_next_zero_bit(bitmap,
hisi_hba->slot_index_count, start);
if (start >= end) {
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return -SAS_QUEUE_FULL;
}
/*
@@ -814,7 +813,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
}
set_bit(start, bitmap);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return start;
}
@@ -843,9 +842,8 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
struct hisi_sas_device *sas_dev = NULL;
int i, sata_dev = dev_is_sata(device);
int sata_idx = -1;
- unsigned long flags;
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
if (sata_dev)
if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
@@ -876,7 +874,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
}
out:
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return sas_dev;
}
@@ -3111,9 +3109,9 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-static void cq_tasklet_v2_hw(unsigned long val)
+static irqreturn_t cq_thread_v2_hw(int irq_no, void *p)
{
- struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
+ struct hisi_sas_cq *cq = p;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
struct hisi_sas_itct *itct;
@@ -3181,6 +3179,8 @@ static void cq_tasklet_v2_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+
+ return IRQ_HANDLED;
}
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
@@ -3191,9 +3191,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
- tasklet_schedule(&cq->tasklet);
-
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
@@ -3360,18 +3358,18 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
- struct tasklet_struct *t = &cq->tasklet;
- irq = irq_map[queue_no + 96];
- rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
- DRV_NAME " cq", cq);
+ cq->irq_no = irq_map[queue_no + 96];
+ rc = devm_request_threaded_irq(dev, cq->irq_no,
+ cq_interrupt_v2_hw,
+ cq_thread_v2_hw, IRQF_ONESHOT,
+ DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
goto err_out;
}
- tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
}
hisi_hba->cq_nvecs = hisi_hba->queue_count;
@@ -3432,7 +3430,6 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
interrupt_disable_v2_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
- hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
@@ -3551,6 +3548,9 @@ static struct scsi_host_template sht_v2_hw = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = host_attrs_v2_hw,
.host_reset = hisi_sas_host_reset,
};
@@ -3603,11 +3603,6 @@ static int hisi_sas_v2_probe(struct platform_device *pdev)
static int hisi_sas_v2_remove(struct platform_device *pdev)
{
- struct sas_ha_struct *sha = platform_get_drvdata(pdev);
- struct hisi_hba *hisi_hba = sha->lldd_ha;
-
- hisi_sas_kill_tasklets(hisi_hba);
-
return hisi_sas_remove(pdev);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index bf5d5f138437..a2debe0c8185 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -495,6 +495,13 @@ struct hisi_sas_err_record_v3 {
#define BASE_VECTORS_V3_HW 16
#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
+#define CHNL_INT_STS_MSK 0xeeeeeeee
+#define CHNL_INT_STS_PHY_MSK 0xe
+#define CHNL_INT_STS_INT0_MSK BIT(1)
+#define CHNL_INT_STS_INT1_MSK BIT(2)
+#define CHNL_INT_STS_INT2_MSK BIT(3)
+#define CHNL_WIDTH 4
+
enum {
DSM_FUNC_ERR_HANDLE_MSI = 0,
};
@@ -1819,19 +1826,19 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
int phy_no = 0;
irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
- & 0xeeeeeeee;
+ & CHNL_INT_STS_MSK;
while (irq_msk) {
- if (irq_msk & (2 << (phy_no * 4)))
+ if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH)))
handle_chl_int0_v3_hw(hisi_hba, phy_no);
- if (irq_msk & (4 << (phy_no * 4)))
+ if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH)))
handle_chl_int1_v3_hw(hisi_hba, phy_no);
- if (irq_msk & (8 << (phy_no * 4)))
+ if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH)))
handle_chl_int2_v3_hw(hisi_hba, phy_no);
- irq_msk &= ~(0xe << (phy_no * 4));
+ irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH));
phy_no++;
}
@@ -2299,9 +2306,9 @@ out:
return sts;
}
-static void cq_tasklet_v3_hw(unsigned long val)
+static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
{
- struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
+ struct hisi_sas_cq *cq = p;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
struct hisi_sas_complete_v3_hdr *complete_queue;
@@ -2338,6 +2345,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+
+ return IRQ_HANDLED;
}
static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
@@ -2348,9 +2357,7 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
- tasklet_schedule(&cq->tasklet);
-
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
@@ -2365,7 +2372,7 @@ static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
BASE_VECTORS_V3_HW);
if (!mask)
goto fallback;
- cq->pci_irq_mask = mask;
+ cq->irq_mask = mask;
for_each_cpu(cpu, mask)
hisi_hba->reply_map[cpu] = queue;
}
@@ -2389,6 +2396,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
.pre_vectors = BASE_VECTORS_V3_HW,
};
+ dev_info(dev, "Enable MSI auto-affinity\n");
+
min_msi = MIN_AFFINE_VECTORS_V3_HW;
hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
@@ -2441,15 +2450,20 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
goto free_irq_vectors;
}
- /* Init tasklets for cq only */
+ if (hisi_sas_intr_conv)
+ dev_info(dev, "Enable interrupt converge\n");
+
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- struct tasklet_struct *t = &cq->tasklet;
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
- unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
-
- rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
- cq_interrupt_v3_hw, irqflags,
+ unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED :
+ IRQF_ONESHOT;
+
+ cq->irq_no = pci_irq_vector(pdev, nr);
+ rc = devm_request_threaded_irq(dev, cq->irq_no,
+ cq_interrupt_v3_hw,
+ cq_thread_v3_hw,
+ irqflags,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
@@ -2457,8 +2471,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
rc = -ENOENT;
goto free_irq_vectors;
}
-
- tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
}
return 0;
@@ -2534,7 +2546,6 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
interrupt_disable_v3_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
- hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
@@ -2910,7 +2921,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
- hisi_sas_kill_tasklets(hisi_hba);
+ hisi_sas_sync_irqs(hisi_hba);
}
static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
@@ -3075,6 +3086,9 @@ static struct scsi_host_template sht_v3_hw = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = host_attrs_v3_hw,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.host_reset = hisi_sas_host_reset,
@@ -3309,7 +3323,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
- hisi_sas_kill_tasklets(hisi_hba);
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 216e557f703e..1a4ddfacb458 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6876,7 +6876,7 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
{
ulong page_base = ((ulong) base) & PAGE_MASK;
ulong page_offs = ((ulong) base) - page_base;
- void __iomem *page_remapped = ioremap_nocache(page_base,
+ void __iomem *page_remapped = ioremap(page_base,
page_offs + size);
return page_remapped ? (page_remapped + page_offs) : NULL;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 54b8c6f9daf4..d9e94e81da01 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1877,7 +1877,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
*/
struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
struct ibmvscsis_cmd *cmd, *nxt;
- struct iu_entry *iue;
long rc = ADAPT_SUCCESS;
bool retry = false;
@@ -1931,8 +1930,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
*/
vscsi->credit += 1;
} else {
- iue = cmd->iue;
-
crq->valid = VALID_CMD_RESP_EL;
crq->format = cmd->rsp.format;
@@ -3796,7 +3793,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
se_cmd);
struct iu_entry *iue = cmd->iue;
struct scsi_info *vscsi = cmd->adapter;
- char *sd;
uint len = 0;
int rc;
@@ -3804,7 +3800,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
1);
if (rc) {
dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
- sd = se_cmd->sense_buffer;
se_cmd->scsi_sense_length = 18;
memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
/* Logical Unit Communication Time-out asc/ascq = 0x0801 */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 41fd64c9c8e9..1d39628ac947 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -1640,7 +1640,7 @@ static int initio_state_6(struct initio_host * host)
*
*/
-int initio_state_7(struct initio_host * host)
+static int initio_state_7(struct initio_host * host)
{
int cnt, i;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 079c04bc448a..ae45cbe98ae2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6727,6 +6727,9 @@ static struct scsi_host_template driver_template = {
.name = "IPR",
.info = ipr_ioa_info,
.ioctl = ipr_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ipr_ioctl,
+#endif
.queuecommand = ipr_queuecommand,
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 1727d0c71b12..b48aac8dfcb8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -168,6 +168,9 @@ static struct scsi_host_template isci_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = isci_host_attrs,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0bc63a7ab41c..b5dd1caae5e9 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -887,6 +887,10 @@ free_host:
static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+
+ if (WARN_ON_ONCE(session->leadconn))
+ return;
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index abac2f350aee..c48a73a0f517 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -98,7 +98,7 @@ lasi700_probe(struct parisc_device *dev)
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- hostdata->base = ioremap_nocache(base, 0x100);
+ hostdata->base = ioremap(base, 0x100);
hostdata->differential = 0;
if (dev->id.sversion == LASI_700_SVERSION) {
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e9e00740f7ca..c5a828a041e0 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -137,7 +137,7 @@ static void sas_ata_task_done(struct sas_task *task)
} else {
ac = sas_to_ata_err(stat);
if (ac) {
- pr_warn("%s: SAS error %x\n", __func__, stat->stat);
+ pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat);
/* We saw a SAS error. Send a vague error. */
if (!link->sactive) {
qc->err_mask = ac;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d7302c2052f9..daf951b0b3f5 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -179,7 +179,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
res = i->dft->lldd_dev_found(dev);
if (res) {
- pr_warn("driver on host %s cannot handle device %llx, error:%d\n",
+ pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 9fdb9c9fbda4..ab671cdd4cfb 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -500,7 +500,7 @@ static int sas_ex_general(struct domain_device *dev)
ex_assign_report_general(dev, rg_resp);
if (dev->ex_dev.configuring) {
- pr_debug("RG: ex %llx self-configuring...\n",
+ pr_debug("RG: ex %016llx self-configuring...\n",
SAS_ADDR(dev->sas_addr));
schedule_timeout_interruptible(5*HZ);
} else
@@ -881,7 +881,7 @@ static struct domain_device *sas_ex_discover_end_dev(
res = sas_discover_end_dev(child);
if (res) {
- pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n",
+ pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
goto out_list_del;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 01f1738ce6df..1f1d01901978 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -107,7 +107,7 @@ static inline void sas_smp_host_handler(struct bsg_job *job,
static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
{
- pr_warn("%s: for %s device %16llx returned %d\n",
+ pr_warn("%s: for %s device %016llx returned %d\n",
func, dev->parent ? "exp-attached" :
"direct-attached",
SAS_ADDR(dev->sas_addr), err);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 7c86fd248129..19cf418928fa 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -165,7 +165,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
}
sas_port_add_phy(port->port, phy->phy);
- pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n",
+ pr_debug("%s added to %s, phy_mask:0x%x (%016llx)\n",
dev_name(&phy->phy->dev), dev_name(&port->port->dev),
port->phy_mask,
SAS_ADDR(port->attached_sas_addr));
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index bec83eb8ab87..9e0975e55c27 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -330,7 +330,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
int_to_scsilun(cmd->device->lun, &lun);
- pr_notice("eh: device %llx LUN %llx has the task\n",
+ pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
SAS_ADDR(dev->sas_addr),
cmd->device->lun);
@@ -615,7 +615,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
reset:
tmf_resp = sas_recover_lu(task->dev, cmd);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
- pr_notice("dev %016llx LU %llx is recovered\n",
+ pr_notice("dev %016llx LU 0x%llx is recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
sas_eh_finish_cmd(cmd);
@@ -666,7 +666,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
* of effort could recover from errors. Quite
* possibly the HA just disappeared.
*/
- pr_err("error from device %llx, LUN %llx couldn't be recovered in any way\n",
+ pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
SAS_ADDR(task->dev->sas_addr),
cmd->device->lun);
@@ -851,7 +851,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
if (scsi_dev->tagged_supported) {
scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
} else {
- pr_notice("device %llx, LUN %llx doesn't support TCQ\n",
+ pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
SAS_ADDR(dev->sas_addr), scsi_dev->lun);
scsi_change_queue_depth(scsi_dev, 1);
}
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index 1ded7d85027e..e2d42593ce52 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -27,7 +27,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
if (iu->status != SAM_STAT_CHECK_CONDITION)
- dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n",
+ dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n",
SAS_ADDR(task->dev->sas_addr), iu->status);
}
else
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 935f98804198..04d73e2be373 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1223,6 +1223,8 @@ struct lpfc_hba {
#define LPFC_POLL_HB 1 /* slowpath heartbeat */
#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
+
+ char os_host_name[MAXHOSTNAMELEN];
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ff82b36a37a..46f56f30f77e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4123,14 +4123,13 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
/*
* The 'topology' is not a configurable parameter if :
* - persistent topology enabled
- * - G7 adapters
- * - G6 with no private loop support
+ * - G7/G6 with no private loop support
*/
- if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
+ if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
(!phba->sli4_hba.pc_sli4_params.pls &&
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3114 Loop mode not supported\n");
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index ee353c84a097..25d3dd39bc05 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport);
int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
-void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
+void lpfc_fdmi_change_check(struct lpfc_vport *vport);
void lpfc_delayed_disc_tmo(struct timer_list *);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 99c9bb249758..58b35a1442c1 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1493,33 +1493,35 @@ int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
- char fwrev[FW_REV_STR_SIZE];
- int n;
+ char fwrev[FW_REV_STR_SIZE] = {0};
+ char tmp[MAXHOSTNAMELEN] = {0};
- lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+ memset(symbol, 0, size);
- n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
- if (size < n)
- return n;
+ lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+ scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " DV%s.",
- lpfc_release_version);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " HN:%s.",
- init_utsname()->nodename);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
/* Note :- OS name is "Linux" */
- n += scnprintf(symbol + n, size - n, " OS:%s",
- init_utsname()->sysname);
- return n;
+ scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
+ strlcat(symbol, tmp, size);
+
+buffer_done:
+ return strnlen(symbol, size);
+
}
static uint32_t
@@ -1998,14 +2000,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
- * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
+ * lpfc_fdmi_change_check - Check for changed FDMI parameters
* @vport: pointer to a host virtual N_Port data structure.
*
- * Called from hbeat timeout routine to check if the number of discovered
- * ports has changed. If so, re-register thar port Attribute.
+ * Check how many mapped NPorts we are connected to
+ * Check if our hostname changed
+ * Called from hbeat timeout routine to check if any FDMI parameters
+ * changed. If so, re-register those Attributes.
*/
void
-lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
+lpfc_fdmi_change_check(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
@@ -2018,17 +2022,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
if (!(vport->fc_flag & FC_FABRIC))
return;
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ /* Check if system hostname changed */
+ if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+
+ /* Since this effects multiple HBA and PORT attributes, we need
+ * de-register and go thru the whole FDMI registration cycle.
+ * DHBA -> DPRT -> RHBA -> RPA (physical port)
+ * DPRT -> RPRT (vports)
+ */
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+ else
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+
+ /* Since this code path registers all the port attributes
+ * we can just return without further checking.
+ */
+ return;
+ }
+
if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
return;
+ /* Check if the number of mapped NPorts changed */
cnt = lpfc_find_map_node(vport);
if (cnt == vport->fdmi_num_disc)
return;
- ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
- return;
-
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
LPFC_FDMI_PORT_ATTR_num_disc);
@@ -2616,8 +2644,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- init_utsname()->nodename);
+ scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+ vport->phba->os_host_name);
len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
len += (len & 3) ? (4 - (len & 3)) : 4;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a5ecbce4eda2..819335b16c2e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2085,6 +2085,8 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
int copied = 0;
struct lpfc_dmabuf *dmabuf, *next;
+ memset(buffer, 0, size);
+
spin_lock_irq(&phba->hbalock);
if (phba->ras_fwlog.state != ACTIVE) {
spin_unlock_irq(&phba->hbalock);
@@ -2094,10 +2096,15 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
list_for_each_entry_safe(dmabuf, next,
&phba->ras_fwlog.fwlog_buff_list, list) {
+ /* Check if copying will go over size and a '\0' char */
+ if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) {
+ memcpy(buffer + copied, dmabuf->virt,
+ size - copied - 1);
+ copied += size - copied - 1;
+ break;
+ }
memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
copied += LPFC_RAS_MAX_ENTRY_SIZE;
- if (size > copied)
- break;
}
return copied;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 85ada3deb47d..dcc8999c6a68 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/lockdep.h>
+#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -3315,6 +3316,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
lpfc_sli4_clear_fcf_rr_bmask(phba);
}
+ /* Prepare for LINK up registrations */
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
return;
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 25cdcbc2b02f..9a064b96e570 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3925,6 +3925,9 @@ struct lpfc_mbx_wr_object {
#define LPFC_CHANGE_STATUS_FW_RESET 0x02
#define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04
#define LPFC_CHANGE_STATUS_PCI_RESET 0x05
+#define lpfc_wr_object_csf_SHIFT 8
+#define lpfc_wr_object_csf_MASK 0x00000001
+#define lpfc_wr_object_csf_WORD word5
} response;
} u;
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6a04fdb3fbf2..5a605773dd0a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1362,7 +1362,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
lpfc_rcv_seq_check_edtov(vports[i]);
- lpfc_fdmi_num_disc_check(vports[i]);
+ lpfc_fdmi_change_check(vports[i]);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -8320,14 +8320,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
phba->hba_flag |= HBA_PERSISTENT_TOPO;
switch (phba->pcidev->device) {
case PCI_DEVICE_ID_LANCER_G7_FC:
- if (tf || (pt == LINK_FLAGS_LOOP)) {
- /* Invalid values from FW - use driver params */
- phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
- } else {
- /* Prism only supports PT2PT topology */
- phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
- }
- break;
case PCI_DEVICE_ID_LANCER_G6_FC:
if (!tf) {
phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
@@ -10449,6 +10441,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
case LPFC_SLI_INTF_IF_TYPE_6:
iounmap(phba->sli4_hba.drbl_regs_memmap_p);
iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index ae4359013846..a024e5a3918f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -308,7 +308,7 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
mb->mbxStatus);
mempool_free(login_mbox, phba->mbox_mem_pool);
mempool_free(link_mbox, phba->mbox_mem_pool);
- lpfc_sli_release_iocbq(phba, save_iocb);
+ kfree(save_iocb);
return;
}
@@ -325,7 +325,61 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
}
mempool_free(link_mbox, phba->mbox_mem_pool);
- lpfc_sli_release_iocbq(phba, save_iocb);
+ kfree(save_iocb);
+}
+
+/**
+ * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function provides the unreg rpi mailbox completion handler for a tgt.
+ * The routine frees the memory resources associated with the completed
+ * mailbox command and transmits the ELS ACC.
+ *
+ * This routine is only called if we are SLI4, acting in target
+ * mode and the remote NPort issues the PLOGI after link up.
+ **/
+static void
+lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+ LPFC_MBOXQ_t *mbox = pmb->context3;
+ struct lpfc_iocbq *piocb = NULL;
+ int rc;
+
+ if (mbox) {
+ pmb->context3 = NULL;
+ piocb = mbox->context3;
+ mbox->context3 = NULL;
+ }
+
+ /*
+ * Complete the unreg rpi mbx request, and update flags.
+ * This will also restart any deferred events.
+ */
+ lpfc_nlp_get(ndlp);
+ lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
+
+ if (!piocb) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+ "4578 PLOGI ACC fail\n");
+ if (mbox)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
+ if (rc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+ "4579 PLOGI ACC fail %x\n", rc);
+ if (mbox)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ kfree(piocb);
+out:
+ lpfc_nlp_put(ndlp);
}
static int
@@ -345,6 +399,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
+ u16 rpi;
int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt));
@@ -488,7 +543,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
link_mbox->vport = vport;
link_mbox->ctx_ndlp = ndlp;
- save_iocb = lpfc_sli_get_iocbq(phba);
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
if (!save_iocb)
goto out;
/* Save info from cmd IOCB used in rsp */
@@ -513,7 +568,36 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->nvmet_support && !defer_acc) {
+ link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!link_mbox)
+ goto out;
+
+ /* As unique identifiers such as iotag would be overwritten
+ * with those from the cmdiocb, allocate separate temporary
+ * storage for the copy.
+ */
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+ if (!save_iocb)
+ goto out;
+
+ /* Unreg RPI is required for SLI4. */
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
+ link_mbox->vport = vport;
+ link_mbox->ctx_ndlp = ndlp;
+ link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
+
+ if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+
+ /* Save info from cmd IOCB used in rsp */
+ memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
+
+ /* Delay sending ACC till unreg RPI completes. */
+ defer_acc = 1;
+ } else if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
@@ -553,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if ((vport->port_type == LPFC_NPIV_PORT &&
vport->cfg_restrict_login)) {
+ /* no deferred ACC */
+ kfree(save_iocb);
+
/* In order to preserve RPIs, we want to cleanup
* the default RPI the firmware created to rcv
* this ELS request. The only way to do this is
@@ -571,8 +658,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (defer_acc) {
/* So the order here should be:
- * Issue CONFIG_LINK mbox
- * CONFIG_LINK cmpl
+ * SLI3 pt2pt
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * SLI4 tgt
+ * Issue UNREG RPI mbx
+ * UNREG RPI cmpl
* Issue PLOGI ACC
* PLOGI ACC cmpl
* Issue REG_LOGIN mbox
@@ -596,10 +687,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
out:
if (defer_acc)
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "4577 pt2pt discovery failure: %p %p %p\n",
+ "4577 discovery failure: %p %p %p\n",
save_iocb, link_mbox, login_mbox);
- if (save_iocb)
- lpfc_sli_release_iocbq(phba, save_iocb);
+ kfree(save_iocb);
if (link_mbox)
mempool_free(link_mbox, phba->mbox_mem_pool);
if (login_mbox)
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b138d9fee675..2c7e0b22db2f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -481,7 +481,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
- if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
+ if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
continue;
if (psb->rdata && psb->rdata->pnode &&
@@ -528,7 +528,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
list_del_init(&psb->list);
psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
- if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
+ if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 625c046ac4ef..64002b0cb02d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4918,8 +4918,17 @@ static int
lpfc_sli4_rb_setup(struct lpfc_hba *phba)
{
phba->hbq_in_use = 1;
- phba->hbqs[LPFC_ELS_HBQ].entry_count =
- lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
+ /**
+ * Specific case when the MDS diagnostics is enabled and supported.
+ * The receive buffer count is truncated to manage the incoming
+ * traffic.
+ **/
+ if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
+ else
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
phba->hbq_count = 1;
lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
/* Initially populate or replenish the HBQs */
@@ -19449,7 +19458,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
- uint32_t shdr_status, shdr_add_status, shdr_change_status;
+ uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
uint32_t mbox_tmo;
struct lpfc_dmabuf *dmabuf;
uint32_t written = 0;
@@ -19506,6 +19515,16 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
if (check_change_status) {
shdr_change_status = bf_get(lpfc_wr_object_change_status,
&wr_object->u.response);
+
+ if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+ shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+ shdr_csf = bf_get(lpfc_wr_object_csf,
+ &wr_object->u.response);
+ if (shdr_csf)
+ shdr_change_status =
+ LPFC_CHANGE_STATUS_PCI_RESET;
+ }
+
switch (shdr_change_status) {
case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9e5ff58edaca..9563c49f36ab 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.6.0.2"
+#define LPFC_DRIVER_VERSION "12.6.0.3"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index f6ac819e6e96..8443f2f35be2 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -731,7 +731,7 @@ megaraid_init_mbox(adapter_t *adapter)
goto out_free_raid_dev;
}
- raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
+ raid_dev->baseaddr = ioremap(raid_dev->baseport, 128);
if (!raid_dev->baseaddr) {
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index bd8184072bed..83d8c4cb1ad5 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.710.50.00-rc1"
-#define MEGASAS_RELDATE "June 28, 2019"
+#define MEGASAS_VERSION "07.713.01.00-rc1"
+#define MEGASAS_RELDATE "Dec 27, 2019"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -2233,9 +2233,9 @@ enum MR_PD_TYPE {
/* JBOD Queue depth definitions */
#define MEGASAS_SATA_QD 32
-#define MEGASAS_SAS_QD 64
+#define MEGASAS_SAS_QD 256
#define MEGASAS_DEFAULT_PD_QD 64
-#define MEGASAS_NVME_QD 32
+#define MEGASAS_NVME_QD 64
#define MR_DEFAULT_NVME_PAGE_SIZE 4096
#define MR_DEFAULT_NVME_PAGE_SHIFT 12
@@ -2640,10 +2640,11 @@ enum MEGASAS_OCR_CAUSE {
};
enum DCMD_RETURN_STATUS {
- DCMD_SUCCESS = 0,
- DCMD_TIMEOUT = 1,
- DCMD_FAILED = 2,
- DCMD_NOT_FIRED = 3,
+ DCMD_SUCCESS = 0x00,
+ DCMD_TIMEOUT = 0x01,
+ DCMD_FAILED = 0x02,
+ DCMD_BUSY = 0x03,
+ DCMD_INIT = 0xff,
};
u8
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a4bc81479284..acb82181f70f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1099,7 +1099,7 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- return DCMD_NOT_FIRED;
+ return DCMD_INIT;
}
instance->instancet->issue_dcmd(instance, cmd);
@@ -1123,19 +1123,19 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, int timeout)
{
int ret = 0;
- cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+ cmd->cmd_status_drv = DCMD_INIT;
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- return DCMD_NOT_FIRED;
+ return DCMD_INIT;
}
instance->instancet->issue_dcmd(instance, cmd);
if (timeout) {
ret = wait_event_timeout(instance->int_cmd_wait_q,
- cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
+ cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
if (!ret) {
dev_err(&instance->pdev->dev,
"DCMD(opcode: 0x%x) is timed out, func:%s\n",
@@ -1144,10 +1144,9 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
}
} else
wait_event(instance->int_cmd_wait_q,
- cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
+ cmd->cmd_status_drv != DCMD_INIT);
- return (cmd->cmd_status_drv == MFI_STAT_OK) ?
- DCMD_SUCCESS : DCMD_FAILED;
+ return cmd->cmd_status_drv;
}
/**
@@ -1190,19 +1189,19 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
cmd->sync_cmd = 1;
- cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+ cmd->cmd_status_drv = DCMD_INIT;
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- return DCMD_NOT_FIRED;
+ return DCMD_INIT;
}
instance->instancet->issue_dcmd(instance, cmd);
if (timeout) {
ret = wait_event_timeout(instance->abort_cmd_wait_q,
- cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
+ cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
if (!ret) {
opcode = cmd_to_abort->frame->dcmd.opcode;
dev_err(&instance->pdev->dev,
@@ -1212,13 +1211,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
}
} else
wait_event(instance->abort_cmd_wait_q,
- cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
+ cmd->cmd_status_drv != DCMD_INIT);
cmd->sync_cmd = 0;
megasas_return_cmd(instance, cmd);
- return (cmd->cmd_status_drv == MFI_STAT_OK) ?
- DCMD_SUCCESS : DCMD_FAILED;
+ return cmd->cmd_status_drv;
}
/**
@@ -1887,6 +1885,10 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
mr_device_priv_data->is_tm_capable =
raid->capability.tmCapable;
+
+ if (!raid->flags.isEPD)
+ sdev->no_write_same = 1;
+
} else if (instance->use_seqnum_jbod_fp) {
pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id;
@@ -2150,6 +2152,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
void megaraid_sas_kill_hba(struct megasas_instance *instance)
{
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ dev_warn(&instance->pdev->dev,
+ "Adapter already dead, skipping kill HBA\n");
+ return;
+ }
+
/* Set critical error to block I/O & ioctls in case caller didn't */
atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
/* Wait 1 second to ensure IO or ioctls in build have posted */
@@ -2726,7 +2734,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
"reset queue\n",
reset_cmd);
- reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+ reset_cmd->cmd_status_drv = DCMD_INIT;
instance->instancet->fire_cmd(instance,
reset_cmd->frame_phys_addr,
0, instance->reg_set);
@@ -3416,7 +3424,6 @@ static struct scsi_host_template megasas_template = {
.bios_param = megasas_bios_param,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
- .no_write_same = 1,
};
/**
@@ -3432,7 +3439,11 @@ static void
megasas_complete_int_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
- cmd->cmd_status_drv = cmd->frame->io.cmd_status;
+ if (cmd->cmd_status_drv == DCMD_INIT)
+ cmd->cmd_status_drv =
+ (cmd->frame->io.cmd_status == MFI_STAT_OK) ?
+ DCMD_SUCCESS : DCMD_FAILED;
+
wake_up(&instance->int_cmd_wait_q);
}
@@ -3451,7 +3462,7 @@ megasas_complete_abort(struct megasas_instance *instance,
{
if (cmd->sync_cmd) {
cmd->sync_cmd = 0;
- cmd->cmd_status_drv = 0;
+ cmd->cmd_status_drv = DCMD_SUCCESS;
wake_up(&instance->abort_cmd_wait_q);
}
}
@@ -3727,7 +3738,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
dev_notice(&instance->pdev->dev, "%p synchronous cmd"
"on the internal reset queue,"
"issue it again.\n", cmd);
- cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+ cmd->cmd_status_drv = DCMD_INIT;
instance->instancet->fire_cmd(instance,
cmd->frame_phys_addr,
0, instance->reg_set);
@@ -4392,7 +4403,8 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
if (instance->adapter_type == MFI_SERIES)
return KILL_ADAPTER;
else if (instance->unload ||
- test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+ test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
+ &instance->reset_flags))
return IGNORE_TIMEOUT;
else
return INITIATE_OCR;
@@ -5875,7 +5887,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
base_addr = pci_resource_start(instance->pdev, instance->bar);
- instance->reg_set = ioremap_nocache(base_addr, 8192);
+ instance->reg_set = ioremap(base_addr, 8192);
if (!instance->reg_set) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
@@ -7593,6 +7605,7 @@ megasas_resume(struct pci_dev *pdev)
struct Scsi_Host *host;
struct megasas_instance *instance;
int irq_flags = PCI_IRQ_LEGACY;
+ u32 status_reg;
instance = pci_get_drvdata(pdev);
@@ -7620,9 +7633,35 @@ megasas_resume(struct pci_dev *pdev)
/*
* We expect the FW state to be READY
*/
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
+ if (megasas_transition_to_ready(instance, 0)) {
+ dev_info(&instance->pdev->dev,
+ "Failed to transition controller to ready from %s!\n",
+ __func__);
+ if (instance->adapter_type != MFI_SERIES) {
+ status_reg =
+ instance->instancet->read_fw_status_reg(instance);
+ if (!(status_reg & MFI_RESET_ADAPTER) ||
+ ((megasas_adp_reset_wait_for_ready
+ (instance, true, 0)) == FAILED))
+ goto fail_ready_state;
+ } else {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+
+ /* waiting for about 30 seconds before retry */
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "FW restarted successfully from %s!\n",
+ __func__);
+ }
if (megasas_set_dma_mask(instance))
goto fail_set_dma_mask;
@@ -8036,6 +8075,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
dma_addr_t sense_handle;
unsigned long *sense_ptr;
u32 opcode = 0;
+ int ret = DCMD_SUCCESS;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
@@ -8176,13 +8216,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* cmd to the SCSI mid-layer
*/
cmd->sync_cmd = 1;
- if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
+
+ ret = megasas_issue_blocked_cmd(instance, cmd, 0);
+ switch (ret) {
+ case DCMD_INIT:
+ case DCMD_BUSY:
cmd->sync_cmd = 0;
dev_err(&instance->pdev->dev,
"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
- __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
- cmd->cmd_status_drv);
- return -EBUSY;
+ __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
+ cmd->cmd_status_drv);
+ error = -EBUSY;
+ goto out;
}
cmd->sync_cmd = 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index e301458bcbae..f3b36fd0a0eb 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -364,6 +364,35 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
instance->max_fw_cmds = instance->max_fw_cmds-1;
}
}
+
+static inline void
+megasas_get_msix_index(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd,
+ struct megasas_cmd_fusion *cmd,
+ u8 data_arms)
+{
+ int sdev_busy;
+
+ /* nr_hw_queue = 1 for MegaRAID */
+ struct blk_mq_hw_ctx *hctx =
+ scmd->device->request_queue->queue_hw_ctx[0];
+
+ sdev_busy = atomic_read(&hctx->nr_active);
+
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+ MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+ else if (instance->msix_load_balance)
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+ instance->msix_vectors));
+ else
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->reply_map[raw_smp_processor_id()];
+}
+
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
* @instance: Adapter soft state
@@ -1312,7 +1341,9 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
}
if (ret == DCMD_TIMEOUT)
- megaraid_sas_kill_hba(instance);
+ dev_warn(&instance->pdev->dev,
+ "%s DCMD timed out, continue without JBOD sequence map\n",
+ __func__);
if (ret == DCMD_SUCCESS)
instance->pd_seq_map_id++;
@@ -1394,7 +1425,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
ret = megasas_issue_polled(instance, cmd);
if (ret == DCMD_TIMEOUT)
- megaraid_sas_kill_hba(instance);
+ dev_warn(&instance->pdev->dev,
+ "%s DCMD timed out, RAID map is disabled\n",
+ __func__);
megasas_return_cmd(instance, cmd);
@@ -2825,19 +2858,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = (io_info.fpOkForIo > 0) ? true : false;
}
- if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
- atomic_read(&scp->device->device_busy) >
- (io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
- cmd->request_desc->SCSIIO.MSIxIndex =
- mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
- MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
- cmd->request_desc->SCSIIO.MSIxIndex =
- (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
- instance->msix_vectors));
- else
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->reply_map[raw_smp_processor_id()];
+ megasas_get_msix_index(instance, scp, cmd, io_info.data_arms);
if (instance->adapter_type >= VENTURA_SERIES) {
/* FP for Optimal raid level 1.
@@ -3158,18 +3179,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
- atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH)
- cmd->request_desc->SCSIIO.MSIxIndex =
- mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
- MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
- cmd->request_desc->SCSIIO.MSIxIndex =
- (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
- instance->msix_vectors));
- else
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->reply_map[raw_smp_processor_id()];
+ megasas_get_msix_index(instance, scmd, cmd, 1);
if (!fp_possible) {
/* system pd firmware path */
@@ -4219,7 +4229,8 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
* megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance
*/
-static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
+void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+ bool return_ioctl)
{
int j;
struct megasas_cmd_fusion *cmd_fusion;
@@ -4283,6 +4294,16 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
break;
}
+ if (return_ioctl && cmd_mfi->sync_cmd &&
+ cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
+ dev_err(&instance->pdev->dev,
+ "return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n",
+ __func__, __LINE__, cmd_mfi->frame->hdr.cmd,
+ le32_to_cpu(cmd_mfi->frame->dcmd.opcode));
+ cmd_mfi->cmd_status_drv = DCMD_BUSY;
+ result = COMPLETE_CMD;
+ }
+
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@@ -4298,6 +4319,37 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
}
/*
+ * megasas_return_polled_cmds: Return polled mode commands back to the pool
+ * before initiating an OCR.
+ * @instance: Controller's soft instance
+ */
+static void
+megasas_return_polled_cmds(struct megasas_instance *instance)
+{
+ int i;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+
+ fusion = instance->ctrl_context;
+
+ for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+
+ if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
+ if (megasas_dbg_lvl & OCR_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "%s %d return cmd 0x%x opcode 0x%x\n",
+ __func__, __LINE__, cmd_mfi->frame->hdr.cmd,
+ le32_to_cpu(cmd_mfi->frame->dcmd.opcode));
+ cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
+ megasas_return_cmd(instance, cmd_mfi);
+ }
+ }
+}
+
+/*
* megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
* @instance: per adapter struct
* @channel: the channel assigned by the OS
@@ -4847,6 +4899,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
instance->instancet->disable_intr(instance);
megasas_sync_irqs((unsigned long)instance);
@@ -4951,7 +5004,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
goto kill_hba;
}
- megasas_refire_mgmt_cmd(instance);
+ megasas_refire_mgmt_cmd(instance,
+ (i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1)
+ ? 1 : 0));
/* Reset load balance info */
if (fusion->load_balance_info)
@@ -4959,8 +5014,16 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
(sizeof(struct LD_LOAD_BALANCE_INFO) *
MAX_LOGICAL_DRIVES_EXT));
- if (!megasas_get_map_info(instance))
+ if (!megasas_get_map_info(instance)) {
megasas_sync_map_info(instance);
+ } else {
+ /*
+ * Return pending polled mode cmds before
+ * retrying OCR
+ */
+ megasas_return_polled_cmds(instance);
+ continue;
+ }
megasas_setup_jbod_map(instance);
@@ -4987,6 +5050,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
megasas_set_dynamic_target_properties(sdev, is_target_prop);
}
+ status_reg = instance->instancet->read_fw_status_reg
+ (instance);
+ abs_state = status_reg & MFI_STATE_MASK;
+ if (abs_state != MFI_STATE_OPERATIONAL) {
+ dev_info(&instance->pdev->dev,
+ "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n",
+ abs_state, instance->host->host_no);
+ goto out;
+ }
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
dev_info(&instance->pdev->dev,
@@ -5046,7 +5118,7 @@ kill_hba:
instance->skip_heartbeat_timer_del = 1;
retval = FAILED;
out:
- clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
mutex_unlock(&instance->reset_mutex);
return retval;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index c013c80fe4e6..d57ecc7f88d8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -89,6 +89,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
+#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
#define MEGASAS_REDUCE_QD_COUNT 64
@@ -864,9 +865,20 @@ struct MR_LD_RAID {
u8 regTypeReqOnRead;
__le16 seqNum;
- struct {
- u32 ldSyncRequired:1;
- u32 reserved:31;
+struct {
+#ifndef MFI_BIG_ENDIAN
+ u32 ldSyncRequired:1;
+ u32 regTypeReqOnReadIsValid:1;
+ u32 isEPD:1;
+ u32 enableSLDOnAllRWIOs:1;
+ u32 reserved:28;
+#else
+ u32 reserved:28;
+ u32 enableSLDOnAllRWIOs:1;
+ u32 isEPD:1;
+ u32 regTypeReqOnReadIsValid:1;
+ u32 ldSyncRequired:1;
+#endif
} flags;
u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 18b1e31b5eb8..ed3923f8db4f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -122,6 +122,9 @@
* 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MPI2_IOCSTATUS_FAILURE
* 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT
+ * 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT
+ * 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT
* --------------------------------------------------------------------------
*/
@@ -162,7 +165,7 @@
/* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x36)
+#define MPI2_HEADER_VERSION_UNIT (0x39)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -181,6 +184,7 @@
#define MPI2_IOC_STATE_READY (0x10000000)
#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
#define MPI2_IOC_STATE_FAULT (0x40000000)
+#define MPI2_IOC_STATE_COREDUMP (0x50000000)
#define MPI2_IOC_STATE_MASK (0xF0000000)
#define MPI2_IOC_STATE_SHIFT (28)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 3a6871aecada..43a3bf8ff428 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -249,6 +249,8 @@
* 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1
* Added DMDReport Delay Time defines to PCIeIOUnitPage1
* 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7.
+ * 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID
+ * Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT
*/
#ifndef MPI2_CNFG_H
@@ -891,6 +893,8 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020)
+#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID (0x00000010)
/*
*Generic structure to use for product-specific manufacturing pages
@@ -962,9 +966,10 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
/* IO Unit Page 1 Flags defines */
#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00000000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00010000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00020000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT (16)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000)
#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
@@ -3931,7 +3936,13 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
U32 MaximumDataTransferSize; /*0x0C */
U32 Capabilities; /*0x10 */
U16 NOIOB; /* 0x14 */
- U16 Reserved2; /* 0x16 */
+ U16 ShutdownLatency; /* 0x16 */
+ U16 VendorID; /* 0x18 */
+ U16 DeviceID; /* 0x1A */
+ U16 SubsystemVendorID; /* 0x1C */
+ U16 SubsystemID; /* 0x1E */
+ U8 RevisionID; /* 0x20 */
+ U8 Reserved21[3]; /* 0x21 */
} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
index a3f677853098..33b9c3a6fd40 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -19,6 +19,10 @@
* 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
* 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP
* Shorten some defines to be compatible with DOS
+ * 06-24-19 02.06.05 Whitespace adjustments to help with identifier
+ * checking tool.
+ * 10-02-19 02.06.06 Added MPI26_IMAGE_HEADER_SIG1_COREDUMP
+ * Added MPI2_FLASH_REGION_COREDUMP
*/
#ifndef MPI2_IMAGE_H
#define MPI2_IMAGE_H
@@ -213,6 +217,8 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E)
#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147)
#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250)
+/* little-endian "DUMP" */
+#define MPI26_IMAGE_HEADER_SIG1_COREDUMP (0x504D5544)
/**** Definitions for Signature2 field ****/
#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
@@ -359,6 +365,7 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
#define MPI2_FLASH_REGION_CPLD (0x15)
#define MPI2_FLASH_REGION_PSOC (0x16)
+#define MPI2_FLASH_REGION_COREDUMP (0x17)
/*ImageRevision */
#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 68ea408cd5c5..e83c7c529dc9 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -175,6 +175,10 @@
* Moved FW image definitions ionto new mpi2_image,h
* 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
* 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
+ * 10-02-19 02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE
+ * Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED
+ * Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP
+ * Added MPI2_FW_UPLOAD_ITYPE_COREDUMP
* --------------------------------------------------------------------------
*/
@@ -248,6 +252,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
/*ConfigurationFlags */
#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001)
+#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE (0x0002)
/*minimum depth for a Reply Descriptor Post Queue */
#define MPI2_RDPQ_DEPTH_MIN (16)
@@ -377,6 +382,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000)
#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
@@ -1458,8 +1464,8 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
/*MPI v2.6 and newer */
#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15)
#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
+#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP (0x17)
#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
-#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF)
/*MPI v2.0 FWDownload TransactionContext Element */
typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 45fd8dfb7c40..663782bb790d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -124,7 +124,14 @@ enum mpt3sas_perf_mode {
};
static int
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
+ u32 ioc_state, int timeout);
+static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
+static void
+_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
+static void
+_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
/**
* mpt3sas_base_check_cmd_timeout - Function
@@ -609,7 +616,8 @@ _base_fault_reset_work(struct work_struct *work)
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->shost_recovery || ioc->pci_error_recovery)
+ if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
+ ioc->pci_error_recovery)
goto rearm_timer;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
@@ -656,20 +664,64 @@ _base_fault_reset_work(struct work_struct *work)
return; /* don't rearm timer */
}
- ioc->non_operational_loop = 0;
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
+ u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
+ ioc->manu_pg11.CoreDumpTOSec :
+ MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
+
+ timeout /= (FAULT_POLLING_INTERVAL/1000);
+
+ if (ioc->ioc_coredump_loop == 0) {
+ mpt3sas_print_coredump_info(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ /* do not accept any IOs and disable the interrupts */
+ spin_lock_irqsave(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ ioc->shost_recovery = 1;
+ spin_unlock_irqrestore(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ _base_mask_interrupts(ioc);
+ _base_clear_outstanding_commands(ioc);
+ }
+
+ ioc_info(ioc, "%s: CoreDump loop %d.",
+ __func__, ioc->ioc_coredump_loop);
+ /* Wait until CoreDump completes or times out */
+ if (ioc->ioc_coredump_loop++ < timeout) {
+ spin_lock_irqsave(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ goto rearm_timer;
+ }
+ }
+
+ if (ioc->ioc_coredump_loop) {
+ if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
+ ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
+ __func__, ioc->ioc_coredump_loop);
+ else
+ ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
+ __func__, ioc->ioc_coredump_loop);
+ ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
+ }
+ ioc->non_operational_loop = 0;
if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
ioc_warn(ioc, "%s: hard reset: %s\n",
__func__, rc == 0 ? "success" : "failed");
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
- if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
- mpt3sas_base_fault_info(ioc, doorbell &
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP)
+ mpt3sas_print_coredump_info(ioc, doorbell &
MPI2_DOORBELL_DATA_MASK);
if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
MPI2_IOC_STATE_OPERATIONAL)
return; /* don't rearm timer */
}
+ ioc->ioc_coredump_loop = 0;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
rearm_timer:
@@ -749,6 +801,49 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
}
/**
+ * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
+{
+ ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
+}
+
+/**
+ * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
+ * completes or times out
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
+ const char *caller)
+{
+ u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
+ ioc->manu_pg11.CoreDumpTOSec :
+ MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
+
+ int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
+ timeout);
+
+ if (ioc_state)
+ ioc_err(ioc,
+ "%s: CoreDump timed out. (ioc_state=0x%x)\n",
+ caller, ioc_state);
+ else
+ ioc_info(ioc,
+ "%s: CoreDump completed. (ioc_state=0x%x)\n",
+ caller, ioc_state);
+
+ return ioc_state;
+}
+
+/**
* mpt3sas_halt_firmware - halt's mpt controller firmware
* @ioc: per adapter object
*
@@ -768,9 +863,14 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
dump_stack();
doorbell = ioc->base_readl(&ioc->chip->Doorbell);
- if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
- mpt3sas_base_fault_info(ioc , doorbell);
- else {
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ } else {
writel(0xC0FFEE00, &ioc->chip->Doorbell);
ioc_err(ioc, "Firmware is halted due to command timeout\n");
}
@@ -3103,6 +3203,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
*/
if (!ioc->combined_reply_queue &&
ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ ioc_info(ioc,
+ "combined ReplyQueue is off, Enabling msix load balance\n");
ioc->msix_load_balance = true;
}
@@ -3115,9 +3217,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
r = _base_alloc_irq_vectors(ioc);
if (r < 0) {
- dfailprintk(ioc,
- ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
- r));
+ ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
goto try_ioapic;
}
@@ -3206,9 +3306,15 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
rc = _base_diag_reset(ioc);
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ rc = _base_diag_reset(ioc);
}
return rc;
@@ -3279,7 +3385,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->chip == NULL) {
- ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
+ ioc_err(ioc,
+ "unable to map adapter memory! or resource not found\n");
r = -EINVAL;
goto out_fail;
}
@@ -3318,8 +3425,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
- dfailprintk(ioc,
- ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
+ ioc_err(ioc,
+ "allocation for replyPostRegisterIndex failed!\n");
r = -ENOMEM;
goto out_fail;
}
@@ -3467,6 +3574,22 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _base_sdev_nr_inflight_request -get number of inflight requests
+ * of a request queue.
+ * @q: request_queue object
+ *
+ * returns number of inflight request of a request queue.
+ */
+inline unsigned long
+_base_sdev_nr_inflight_request(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
+
+ return atomic_read(&hctx->nr_active);
+}
+
+
+/**
* _base_get_high_iops_msix_index - get the msix index of
* high iops queues
* @ioc: per adapter object
@@ -3485,7 +3608,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
* reply queues in terms of batch count 16 when outstanding
* IOs on the target device is >=8.
*/
- if (atomic_read(&scmd->device->device_busy) >
+ if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
return base_mod64((
atomic64_add_return(1, &ioc->high_iops_outstanding) /
@@ -4264,7 +4387,8 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
&fwpkg_data_dma, GFP_KERNEL);
if (!fwpkg_data) {
- ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ ioc_err(ioc,
+ "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
return -ENOMEM;
}
@@ -4994,12 +5118,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
}
- dinitprintk(ioc,
- ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
- ioc->max_sges_in_main_message,
- ioc->max_sges_in_chain_message,
- ioc->shost->sg_tablesize,
- ioc->chains_needed_per_io));
+ ioc_info(ioc,
+ "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
+ "sge_per_io(%d), chains_per_io(%d)\n",
+ ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message,
+ ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io);
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -5109,15 +5234,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
ioc->request_sz);
- dinitprintk(ioc,
- ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->request, ioc->hba_queue_depth,
- ioc->request_sz,
- (ioc->hba_queue_depth * ioc->request_sz) / 1024));
+ ioc_info(ioc,
+ "request pool(0x%p) - dma(0x%llx): "
+ "depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->request, (unsigned long long) ioc->request_dma,
+ ioc->hba_queue_depth, ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz) / 1024);
- dinitprintk(ioc,
- ioc_info(ioc, "request pool: dma(0x%llx)\n",
- (unsigned long long)ioc->request_dma));
total_sz += sz;
dinitprintk(ioc,
@@ -5302,13 +5425,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
}
- dinitprintk(ioc,
- ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->sense, ioc->scsiio_depth,
- SCSI_SENSE_BUFFERSIZE, sz / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "sense_dma(0x%llx)\n",
- (unsigned long long)ioc->sense_dma));
+ ioc_info(ioc,
+ "sense pool(0x%p)- dma(0x%llx): depth(%d),"
+ "element_size(%d), pool_size(%d kB)\n",
+ ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz / 1024);
+
total_sz += sz;
/* reply pool, 4 byte align */
@@ -5386,12 +5508,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "config page: dma_pool_alloc failed\n");
goto out;
}
- dinitprintk(ioc,
- ioc_info(ioc, "config page(0x%p): size(%d)\n",
- ioc->config_page, ioc->config_page_sz));
- dinitprintk(ioc,
- ioc_info(ioc, "config_page_dma(0x%llx)\n",
- (unsigned long long)ioc->config_page_dma));
+
+ ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
+ ioc->config_page, (unsigned long long)ioc->config_page_dma,
+ ioc->config_page_sz);
total_sz += ioc->config_page_sz;
ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
@@ -5446,6 +5566,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
return 0;
if (count && current_state == MPI2_IOC_STATE_FAULT)
break;
+ if (count && current_state == MPI2_IOC_STATE_COREDUMP)
+ break;
usleep_range(1000, 1500);
count++;
@@ -5547,7 +5669,12 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
doorbell = ioc->base_readl(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc , doorbell);
+ mpt3sas_print_fault_code(ioc, doorbell);
+ return -EFAULT;
+ }
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, doorbell);
return -EFAULT;
}
} else if (int_status == 0xFFFFFFFF)
@@ -5609,6 +5736,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
{
u32 ioc_state;
int r = 0;
+ unsigned long flags;
if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
ioc_err(ioc, "%s: unknown reset_type\n", __func__);
@@ -5627,6 +5755,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
r = -EFAULT;
goto out;
}
+
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -5635,6 +5764,26 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
goto out;
}
out:
+ if (r != 0) {
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ /*
+ * Wait for IOC state CoreDump to clear only during
+ * HBA initialization & release time.
+ */
+ if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
+ ioc->fault_reset_work_q == NULL)) {
+ spin_unlock_irqrestore(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ mpt3sas_print_coredump_info(ioc, ioc_state);
+ mpt3sas_base_wait_for_coredump_completion(ioc,
+ __func__);
+ spin_lock_irqsave(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ }
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ }
ioc_info(ioc, "message unit reset: %s\n",
r == 0 ? "SUCCESS" : "FAILED");
return r;
@@ -5782,7 +5931,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
mfp = (__le32 *)reply;
pr_info("\toffset:data\n");
for (i = 0; i < reply_bytes/4; i++)
- pr_info("\t[0x%02x]:%08x\n", i*4,
+ ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
le32_to_cpu(mfp[i]));
}
return 0;
@@ -5850,10 +5999,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc->ioc_link_reset_in_progress)
ioc->ioc_link_reset_in_progress = 0;
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->base_cmds.status, mpi_request,
- sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
+ mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
+ issue_reset);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5926,10 +6074,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->base_cmds.status, mpi_request,
- sizeof(Mpi2SepRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->base_cmds.status, mpi_request,
+ sizeof(Mpi2SepRequest_t)/4, issue_reset);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -6028,9 +6175,15 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
goto issue_diag_reset;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ ioc_info(ioc,
+ "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
+ __func__, ioc_state);
+ return -EFAULT;
}
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
@@ -6209,6 +6362,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
}
+ /*
+ * Set the flag to enable CoreDump state feature in IOC firmware.
+ */
+ mpi_request.ConfigurationFlags |=
+ cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
+
/* This time stamp specifies number of milliseconds
* since epoch ~ midnight January 1, 1970.
*/
@@ -6220,9 +6379,9 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
int i;
mfp = (__le32 *)&mpi_request;
- pr_info("\toffset:data\n");
+ ioc_info(ioc, "\toffset:data\n");
for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
- pr_info("\t[0x%02x]:%08x\n", i*4,
+ ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
le32_to_cpu(mfp[i]));
}
@@ -6592,8 +6751,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
/* wait 100 msec */
msleep(100);
- if (count++ > 20)
+ if (count++ > 20) {
+ ioc_info(ioc,
+ "Stop writing magic sequence after 20 retries\n");
goto out;
+ }
host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
drsprintk(ioc,
@@ -6617,8 +6779,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
- if (host_diagnostic == 0xFFFFFFFF)
+ if (host_diagnostic == 0xFFFFFFFF) {
+ ioc_info(ioc,
+ "Invalid host diagnostic register value\n");
goto out;
+ }
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
@@ -6705,16 +6870,33 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
return 0;
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
+ ioc_info(ioc, "unexpected doorbell active!\n");
goto issue_diag_reset;
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
goto issue_diag_reset;
}
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
+ /*
+ * if host reset is invoked while watch dog thread is waiting
+ * for IOC state to be changed to Fault state then driver has
+ * to wait here for CoreDump state to clear otherwise reset
+ * will be issued to the FW and FW move the IOC state to
+ * reset state without copying the FW logs to coredump region.
+ */
+ if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
+ mpt3sas_print_coredump_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_wait_for_coredump_completion(ioc,
+ __func__);
+ }
+ goto issue_diag_reset;
+ }
+
if (type == FORCE_BIG_HAMMER)
goto issue_diag_reset;
@@ -6958,8 +7140,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
ioc->reply_queue_count = 1;
if (!ioc->cpu_msix_table) {
- dfailprintk(ioc,
- ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
+ ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
r = -ENOMEM;
goto out_free_resources;
}
@@ -6968,8 +7149,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->reply_post_host_index) {
- dfailprintk(ioc,
- ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
+ ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
r = -ENOMEM;
goto out_free_resources;
}
@@ -7195,6 +7375,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
sizeof(struct mpt3sas_facts));
ioc->non_operational_loop = 0;
+ ioc->ioc_coredump_loop = 0;
ioc->got_task_abort_from_ioctl = 0;
return 0;
@@ -7276,14 +7457,14 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_after_reset_handler - after reset handler
+ * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
* @ioc: per adapter object
*/
-static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+static void
+_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
{
- mpt3sas_scsih_after_reset_handler(ioc);
- mpt3sas_ctl_after_reset_handler(ioc);
- dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
ioc->transport_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -7317,6 +7498,17 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_clear_outstanding_commands - clear all outstanding commands
+ * @ioc: per adapter object
+ */
+static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
+{
+ mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
+ mpt3sas_ctl_clear_outstanding_ioctls(ioc);
+ _base_clear_outstanding_mpt_commands(ioc);
+}
+
+/**
* _base_reset_done_handler - reset done handler
* @ioc: per adapter object
*/
@@ -7474,7 +7666,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
MPT3_DIAG_BUFFER_IS_RELEASED))) {
is_trigger = 1;
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
- if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
+ (ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP)
is_fault = 1;
}
_base_pre_reset_handler(ioc);
@@ -7483,7 +7677,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
r = _base_make_ioc_ready(ioc, type);
if (r)
goto out;
- _base_after_reset_handler(ioc);
+ _base_clear_outstanding_commands(ioc);
/* If this hard reset is called while port enable is active, then
* there is no reason to call make_ioc_operational
@@ -7514,9 +7708,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_reset_done_handler(ioc);
out:
- dtmprintk(ioc,
- ioc_info(ioc, "%s: %s\n",
- __func__, r == 0 ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 4ebf81ea4d2f..e7197150721f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "32.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 32
+#define MPT3SAS_DRIVER_VERSION "33.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 33
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -90,6 +90,10 @@
#define MPT2SAS_BUILD_VERSION 0
#define MPT2SAS_RELEASE_VERSION 00
+/* CoreDump: Default timeout */
+#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/
+#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF)
+
/*
* Set MPT3SAS_SG_DEPTH value based on user input.
*/
@@ -140,6 +144,7 @@
#define MAX_CHAIN_ELEMT_SZ 16
#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
+#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6
#define FW_IMG_HDR_READ_TIMEOUT 15
#define IOC_OPERATIONAL_WAIT_COUNT 10
@@ -398,7 +403,10 @@ struct Mpi2ManufacturingPage11_t {
u8 HostTraceBufferFlags; /* 4Fh */
u16 HostTraceBufferMaxSizeKB; /* 50h */
u16 HostTraceBufferMinSizeKB; /* 52h */
- __le32 Reserved10[2]; /* 54h - 5Bh */
+ u8 CoreDumpTOSec; /* 54h */
+ u8 Reserved8; /* 55h */
+ u16 Reserved9; /* 56h */
+ __le32 Reserved10; /* 58h */
};
/**
@@ -589,6 +597,7 @@ static inline void sas_device_put(struct _sas_device *s)
* @connector_name: ASCII value of the Connector's name
* @serial_number: pointer of serial number string allocated runtime
* @access_status: Device's Access Status
+ * @shutdown_latency: NVMe device's RTD3 Entry Latency
* @refcount: reference count for deletion
*/
struct _pcie_device {
@@ -611,6 +620,7 @@ struct _pcie_device {
u8 *serial_number;
u8 reset_timeout;
u8 access_status;
+ u16 shutdown_latency;
struct kref refcount;
};
/**
@@ -1045,6 +1055,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @cpu_msix_table: table for mapping cpus to msix index
* @cpu_msix_table_sz: table size
* @total_io_cnt: Gives total IO count, used to load balance the interrupts
+ * @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state
* @high_iops_outstanding: used to load balance the interrupts
* within high iops reply queues
* @msix_load_balance: Enables load balancing of interrupts across
@@ -1073,6 +1084,10 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @event_context: unique id for each logged event
* @event_log: event log pointer
* @event_masks: events that are masked
+ * @max_shutdown_latency: timeout value for NVMe shutdown operation,
+ * which is equal that NVMe drive's RTD3 Entry Latency
+ * which has reported maximum RTD3 Entry Latency value
+ * among attached NVMe drives.
* @facts: static facts data
* @prev_fw_facts: previous fw facts data
* @pfacts: static port facts data
@@ -1231,6 +1246,7 @@ struct MPT3SAS_ADAPTER {
u32 ioc_reset_count;
MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
u32 non_operational_loop;
+ u8 ioc_coredump_loop;
atomic64_t total_io_cnt;
atomic64_t high_iops_outstanding;
bool msix_load_balance;
@@ -1283,7 +1299,7 @@ struct MPT3SAS_ADAPTER {
u8 tm_custom_handling;
u8 nvme_abort_timeout;
-
+ u16 max_shutdown_latency;
/* static config pages */
struct mpt3sas_facts facts;
@@ -1531,6 +1547,17 @@ void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc,
u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked);
void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code);
+#define mpt3sas_print_fault_code(ioc, fault_code) \
+do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
+ mpt3sas_base_fault_info(ioc, fault_code); } while (0)
+
+void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code);
+#define mpt3sas_print_coredump_info(ioc, fault_code) \
+do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
+ mpt3sas_base_coredump_info(ioc, fault_code); } while (0)
+
+int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
+ const char *caller);
int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
Mpi2SasIoUnitControlReply_t *mpi_reply,
Mpi2SasIoUnitControlRequest_t *mpi_request);
@@ -1552,6 +1579,11 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
u8 status, void *mpi_request, int sz);
+#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \
+do { ioc_err(ioc, "In func: %s\n", __func__); \
+ issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \
+ status, mpi_request, sz); } while (0)
+
int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
/* scsih shared API */
@@ -1560,7 +1592,8 @@ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
-void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(
+ struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
@@ -1694,7 +1727,7 @@ void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
-void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
u8 msix_index, u32 reply);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 14a1a2793dd5..62ddf53ab3ae 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -101,9 +101,6 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
Mpi2ConfigRequest_t *mpi_request;
char *desc = NULL;
- if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
- return;
-
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
case MPI2_CONFIG_PAGETYPE_IO_UNIT:
@@ -269,7 +266,8 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_reply->MsgLength*4);
}
ioc->config_cmds.status &= ~MPT3_CMD_PENDING;
- _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+ if (ioc->logging_level & MPT_DEBUG_CONFIG)
+ _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
ioc->config_cmds.smid = USHRT_MAX;
complete(&ioc->config_cmds.done);
return 1;
@@ -305,6 +303,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
u8 retry_count, issue_host_reset = 0;
struct config_request mem;
u32 ioc_status = UINT_MAX;
+ u8 issue_reset = 0;
mutex_lock(&ioc->config_cmds.mutex);
if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
@@ -378,14 +377,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
config_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->config_cmds.smid = smid;
memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
- _config_display_some_debug(ioc, smid, "config_request", NULL);
+ if (ioc->logging_level & MPT_DEBUG_CONFIG)
+ _config_display_some_debug(ioc, smid, "config_request", NULL);
init_completion(&ioc->config_cmds.done);
ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->config_cmds.status, mpi_request,
- sizeof(Mpi2ConfigRequest_t)/4);
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->config_cmds.status, mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4, issue_reset);
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
@@ -404,8 +407,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
/* Reply Frame Sanity Checks to workaround FW issues */
if ((mpi_request->Header.PageType & 0xF) !=
(mpi_reply->Header.PageType & 0xF)) {
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
_debug_dump_mf(mpi_request, ioc->request_sz/4);
- _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
ioc->name, __func__,
mpi_request->Header.PageType & 0xF,
@@ -415,8 +421,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (((mpi_request->Header.PageType & 0xF) ==
MPI2_CONFIG_PAGETYPE_EXTENDED) &&
mpi_request->ExtPageType != mpi_reply->ExtPageType) {
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
_debug_dump_mf(mpi_request, ioc->request_sz/4);
- _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
ioc->name, __func__,
mpi_request->ExtPageType,
@@ -439,8 +448,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (p) {
if ((mpi_request->Header.PageType & 0xF) !=
(p[3] & 0xF)) {
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
_debug_dump_mf(mpi_request, ioc->request_sz/4);
- _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
@@ -452,8 +464,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (((mpi_request->Header.PageType & 0xF) ==
MPI2_CONFIG_PAGETYPE_EXTENDED) &&
(mpi_request->ExtPageType != p[6])) {
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
_debug_dump_mf(mpi_request, ioc->request_sz/4);
- _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 6874cf017739..62e552838565 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -180,6 +180,12 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
case MPI2_FUNCTION_SMP_PASSTHROUGH:
desc = "smp_passthrough";
break;
+ case MPI2_FUNCTION_TOOLBOX:
+ desc = "toolbox";
+ break;
+ case MPI2_FUNCTION_NVME_ENCAPSULATED:
+ desc = "nvme_encapsulated";
+ break;
}
if (!desc)
@@ -478,14 +484,15 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd.
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
*/
-void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__));
if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
ioc->ctl_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -1021,10 +1028,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->ignore_loginfos = 0;
}
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- karg.data_sge_offset);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ karg.data_sge_offset, issue_reset);
goto issue_host_reset;
}
@@ -1325,7 +1331,8 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
__func__));
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc,
+ "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
return 0;
}
@@ -1733,10 +1740,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
goto issue_host_reset;
}
@@ -2108,6 +2114,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
u16 ioc_status;
u32 ioc_state;
int rc;
+ u8 reset_needed = 0;
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
@@ -2115,6 +2122,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
rc = 0;
*issue_reset = 0;
+
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (ioc->diag_buffer_status[buffer_type] &
@@ -2157,9 +2165,10 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- sizeof(Mpi2DiagReleaseRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
+ *issue_reset = reset_needed;
rc = -EFAULT;
goto out;
}
@@ -2417,10 +2426,9 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
goto issue_host_reset;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a038be8a0e90..c597d544eb39 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1050,6 +1050,34 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
+ * @ioc: per adapter object
+ * Context: This function will acquire ioc->pcie_device_lock
+ *
+ * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
+ * which has reported maximum among all available NVMe drives.
+ * Minimum max_shutdown_latency will be six seconds.
+ */
+static void
+_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if (pcie_device->shutdown_latency) {
+ if (shutdown_latency < pcie_device->shutdown_latency)
+ shutdown_latency =
+ pcie_device->shutdown_latency;
+ }
+ }
+ ioc->max_shutdown_latency = shutdown_latency;
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
* _scsih_pcie_device_remove - remove pcie_device from list.
* @ioc: per adapter object
* @pcie_device: the pcie_device object
@@ -1063,6 +1091,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
int was_on_pcie_device_list = 0;
+ u8 update_latency = 0;
if (!pcie_device)
return;
@@ -1082,11 +1111,21 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
list_del_init(&pcie_device->list);
was_on_pcie_device_list = 1;
}
+ if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+ update_latency = 1;
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
if (was_on_pcie_device_list) {
kfree(pcie_device->serial_number);
pcie_device_put(pcie_device);
}
+
+ /*
+ * This device's RTD3 Entry Latency matches IOC's
+ * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+ * from the available drives as current drive is getting removed.
+ */
+ if (update_latency)
+ _scsih_set_nvme_max_shutdown_latency(ioc);
}
@@ -1101,6 +1140,7 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _pcie_device *pcie_device;
unsigned long flags;
int was_on_pcie_device_list = 0;
+ u8 update_latency = 0;
if (ioc->shost_recovery)
return;
@@ -1113,12 +1153,22 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
was_on_pcie_device_list = 1;
pcie_device_put(pcie_device);
}
+ if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+ update_latency = 1;
}
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
if (was_on_pcie_device_list) {
_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
pcie_device_put(pcie_device);
}
+
+ /*
+ * This device's RTD3 Entry Latency matches IOC's
+ * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+ * from the available drives as current drive is getting removed.
+ */
+ if (update_latency)
+ _scsih_set_nvme_max_shutdown_latency(ioc);
}
/**
@@ -1554,7 +1604,12 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
- return scsi_change_queue_depth(sdev, qdepth);
+ scsi_change_queue_depth(sdev, qdepth);
+ sdev_printk(KERN_INFO, sdev,
+ "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported,
+ sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
+ return sdev->queue_depth;
}
/**
@@ -2673,6 +2728,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
u16 smid = 0;
u32 ioc_state;
int rc;
+ u8 issue_reset = 0;
lockdep_assert_held(&ioc->tm_cmds.mutex);
@@ -2695,7 +2751,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ return (!rc) ? SUCCESS : FAILED;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return (!rc) ? SUCCESS : FAILED;
@@ -2726,9 +2788,10 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
- if (mpt3sas_base_check_cmd_timeout(ioc,
- ioc->tm_cmds.status, mpi_request,
- sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->tm_cmds.status, mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
+ if (issue_reset) {
rc = mpt3sas_base_hard_reset_handler(ioc,
FORCE_BIG_HAMMER);
rc = (!rc) ? SUCCESS : FAILED;
@@ -2875,15 +2938,17 @@ scsih_abort(struct scsi_cmnd *scmd)
u8 timeout = 30;
struct _pcie_device *pcie_device = NULL;
- sdev_printk(KERN_INFO, scmd->device,
- "attempting task abort! scmd(%p)\n", scmd);
+ sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
+ "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
+ scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+ (scmd->request->timeout / HZ) * 1000);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(%p)\n", scmd);
+ "device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -2892,6 +2957,8 @@ scsih_abort(struct scsi_cmnd *scmd)
/* check for completed command */
if (st == NULL || st->cb_idx == 0xFF) {
+ sdev_printk(KERN_INFO, scmd->device, "No reference found at "
+ "driver, assuming scmd(0x%p) might have completed\n", scmd);
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -2920,7 +2987,7 @@ scsih_abort(struct scsi_cmnd *scmd)
if (r == SUCCESS && st->cb_idx != 0xFF)
r = FAILED;
out:
- sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (pcie_device)
pcie_device_put(pcie_device);
@@ -2949,14 +3016,14 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
sdev_printk(KERN_INFO, scmd->device,
- "attempting device reset! scmd(%p)\n", scmd);
+ "attempting device reset! scmd(0x%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(%p)\n", scmd);
+ "device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -2996,7 +3063,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
r = FAILED;
out:
- sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (sas_device)
@@ -3027,15 +3094,15 @@ scsih_target_reset(struct scsi_cmnd *scmd)
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
- starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
- scmd);
+ starget_printk(KERN_INFO, starget,
+ "attempting target reset! scmd(0x%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
- starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
- scmd);
+ starget_printk(KERN_INFO, starget,
+ "target been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -3074,7 +3141,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
if (r == SUCCESS && atomic_read(&starget->target_busy))
r = FAILED;
out:
- starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (sas_device)
@@ -3097,7 +3164,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
- ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
+ ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
scsi_print_command(scmd);
if (ioc->is_driver_loading || ioc->remove_host) {
@@ -3109,7 +3176,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
out:
- ioc_info(ioc, "host reset: %s scmd(%p)\n",
+ ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
return r;
@@ -4475,6 +4542,7 @@ static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventDataTemperature_t *event_data)
{
+ u32 doorbell;
if (ioc->temp_sensors_count >= event_data->SensorNum) {
ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
@@ -4484,6 +4552,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
event_data->SensorNum);
ioc_err(ioc, "Current Temp In Celsius: %d\n",
event_data->CurrentTemperature);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ }
+ }
}
}
@@ -6933,6 +7013,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
pcie_device->nvme_mdts =
le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+ pcie_device->shutdown_latency =
+ le16_to_cpu(pcie_device_pg2.ShutdownLatency);
+ /*
+ * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
+ * if drive's RTD3 Entry Latency is greater then IOC's
+ * max_shutdown_latency.
+ */
+ if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
+ ioc->max_shutdown_latency =
+ pcie_device->shutdown_latency;
if (pcie_device_pg2.ControllerResetTO)
pcie_device->reset_timeout =
pcie_device_pg2.ControllerResetTO;
@@ -7669,10 +7759,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->scsih_cmds.status, mpi_request,
- sizeof(Mpi2RaidActionRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
rc = -EFAULT;
goto out;
}
@@ -9272,15 +9361,17 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
+ * scsi & tm cmds.
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
*/
void
-mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
ioc->scsih_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9357,6 +9448,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
}
_scsih_remove_unresponding_devices(ioc);
_scsih_scan_for_devices_after_reset(ioc);
+ _scsih_set_nvme_max_shutdown_latency(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
ioc->start_scan = 0;
@@ -9660,6 +9752,75 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_nvme_shutdown - NVMe shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending IoUnitControl request with shutdown operation code to alert IOC that
+ * the host system is shutting down so that IOC can issue NVMe shutdown to
+ * NVMe drives attached to it.
+ */
+static void
+_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26IoUnitControlRequest_t *mpi_request;
+ Mpi26IoUnitControlReply_t *mpi_reply;
+ u16 smid;
+
+ /* are there any NVMe devices ? */
+ if (list_empty(&ioc->pcie_device_list))
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
+ goto out;
+ }
+
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ ioc_err(ioc,
+ "%s: failed obtaining a smid\n", __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
+
+ init_completion(&ioc->scsih_cmds.done);
+ ioc->put_smid_default(ioc, smid);
+ /* Wait for max_shutdown_latency seconds */
+ ioc_info(ioc,
+ "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
+ ioc->max_shutdown_latency);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done,
+ ioc->max_shutdown_latency*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ ioc_err(ioc, "%s: timeout\n", __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ ioc_info(ioc, "Io Unit Control shutdown (complete):"
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+
+/**
* _scsih_ir_shutdown - IR shutdown notification
* @ioc: per adapter object
*
@@ -9851,6 +10012,7 @@ scsih_shutdown(struct pci_dev *pdev)
&ioc->ioc_pg1_copy);
_scsih_ir_shutdown(ioc);
+ _scsih_nvme_shutdown(ioc);
mpt3sas_base_detach(ioc);
}
@@ -10533,6 +10695,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
ioc->logging_level = logging_level;
ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* Host waits for minimum of six seconds */
+ ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
/*
* Enable MEMORY MOVE support flag.
*/
@@ -10681,6 +10845,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
mpt3sas_base_stop_watchdog(ioc);
flush_scheduled_work();
scsi_block_requests(shost);
+ _scsih_nvme_shutdown(ioc);
device_state = pci_choose_state(pdev, state);
ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
@@ -10715,7 +10880,7 @@ scsih_resume(struct pci_dev *pdev)
r = mpt3sas_base_map_resources(ioc);
if (r)
return r;
-
+ ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
scsi_unblock_requests(shost);
mpt3sas_base_start_watchdog(ioc);
@@ -10784,6 +10949,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
if (rc)
return PCI_ERS_RESULT_DISCONNECT;
+ ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
ioc_warn(ioc, "hard reset: %s\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 5324662751bf..6ec5b7f33dfd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -719,11 +719,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
sas_device_put(sas_device);
}
- if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
- dev_printk(KERN_INFO, &rphy->dev,
- "add: handle(0x%04x), sas_addr(0x%016llx)\n",
- handle, (unsigned long long)
- mpt3sas_port->remote_identify.sas_address);
+ dev_info(&rphy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n", handle,
+ (unsigned long long)mpt3sas_port->remote_identify.sas_address);
+
mpt3sas_port->rphy = rphy;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list);
@@ -813,6 +812,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
}
if (!ioc->remove_host)
sas_port_delete(mpt3sas_port->port);
+ ioc_info(ioc, "%s: removed: sas_addr(0x%016llx)\n",
+ __func__, (unsigned long long)sas_address);
kfree(mpt3sas_port);
}
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index da719b0694dc..7af9173c4925 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -47,6 +47,9 @@ static struct scsi_host_template mvs_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = mvst_host_attrs,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 539ac8ce4fcd..d4bd31a75b9d 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -3531,7 +3531,7 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
spin_lock_init(&cb->queue_lock);
if (mmio_size < PAGE_SIZE)
mmio_size = PAGE_SIZE;
- cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+ cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
if (cb->mmio_base == NULL) {
dev_err(&pdev->dev,
"Unable to map Controller Register Window\n");
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
index 9289c19fcb2f..fb8eacfceee8 100644
--- a/drivers/scsi/myrb.h
+++ b/drivers/scsi/myrb.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
*
* Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index eb0dd566330a..5c5666491c2e 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -2311,7 +2311,7 @@ static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
/* Map the Controller Register Window. */
if (mmio_size < PAGE_SIZE)
mmio_size = PAGE_SIZE;
- cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+ cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
if (cs->mmio_base == NULL) {
dev_err(&pdev->dev,
"Unable to map Controller Register Window\n");
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
index e6702ee85e9f..9f6696d0ddd5 100644
--- a/drivers/scsi/myrs.h
+++ b/drivers/scsi/myrs.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
*
* This driver supports the newer, SCSI-based firmware interface only.
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 93616f9fd6d7..d79ce97a04bd 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1560,7 +1560,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
goto next_entry;
data->MmioAddress = (unsigned long)
- ioremap_nocache(p_dev->resource[2]->start,
+ ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
data->MmioLength = resource_size(p_dev->resource[2]);
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index ff618ad80ebd..3c6076e4c6d2 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -101,6 +101,9 @@ static struct scsi_host_template pm8001_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = pm8001_host_attrs,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 832af4213046..3337cd341d21 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1699,6 +1699,16 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
return err;
}
+#ifdef QLA_64BIT_PTR
+#define LOAD_CMD MBC_LOAD_RAM_A64_ROM
+#define DUMP_CMD MBC_DUMP_RAM_A64_ROM
+#define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
+#else
+#define LOAD_CMD MBC_LOAD_RAM
+#define DUMP_CMD MBC_DUMP_RAM
+#define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
+#endif
+
#define DUMP_IT_BACK 0 /* for debug of RISC loading */
static int
qla1280_load_firmware_dma(struct scsi_qla_host *ha)
@@ -1748,7 +1758,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
for(i = 0; i < cnt; i++)
((__le16 *)ha->request_ring)[i] = fw_data[i];
- mb[0] = MBC_LOAD_RAM;
+ mb[0] = LOAD_CMD;
mb[1] = risc_address;
mb[4] = cnt;
mb[3] = ha->request_dma & 0xffff;
@@ -1759,8 +1769,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
__func__, mb[0],
(void *)(long)ha->request_dma,
mb[6], mb[7], mb[2], mb[3]);
- err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
- BIT_1 | BIT_0, mb);
+ err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
if (err) {
printk(KERN_ERR "scsi(%li): Failed to load partial "
"segment of f\n", ha->host_no);
@@ -1768,7 +1777,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
}
#if DUMP_IT_BACK
- mb[0] = MBC_DUMP_RAM;
+ mb[0] = DUMP_CMD;
mb[1] = risc_address;
mb[4] = cnt;
mb[3] = p_tbuf & 0xffff;
@@ -1776,8 +1785,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
mb[7] = upper_32_bits(p_tbuf) & 0xffff;
mb[6] = upper_32_bits(p_tbuf) >> 16;
- err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
- BIT_1 | BIT_0, mb);
+ err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
if (err) {
printk(KERN_ERR
"Failed to dump partial segment of f/w\n");
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index a1a8aefc7cc3..e7820b5bca38 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -277,6 +277,8 @@ struct device_reg {
#define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */
#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum */
#define MBC_ABOUT_FIRMWARE 8 /* Get firmware revision */
+#define MBC_LOAD_RAM_A64_ROM 9 /* Load RAM 64bit ROM version */
+#define MBC_DUMP_RAM_A64_ROM 0x0a /* Dump RAM 64bit ROM version */
#define MBC_INIT_REQUEST_QUEUE 0x10 /* Initialize request queue */
#define MBC_INIT_RESPONSE_QUEUE 0x11 /* Initialize response queue */
#define MBC_EXECUTE_IOCB 0x12 /* Execute IOCB command */
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index cbaf178fc979..d7169e43f5e1 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -54,7 +54,8 @@ void qla2x00_bsg_sp_free(srb_t *sp)
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_FXIOCB_BCMD ||
sp->type == SRB_ELS_CMD_HST)
- kfree(sp->fcport);
+ qla2x00_free_fcport(sp->fcport);
+
qla2x00_rel_sp(sp);
}
@@ -405,7 +406,7 @@ done_unmap_sg:
done_free_fcport:
if (bsg_request->msgcode == FC_BSG_RPT_ELS)
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
done:
return rval;
}
@@ -545,7 +546,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
return rval;
done_free_fcport:
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
done_unmap_sg:
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -796,7 +797,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
- (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+ (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
@@ -2049,7 +2050,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
return rval;
done_free_fcport:
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
done_unmap_rsp_sg:
if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 30afc59c1870..e5500bba06ca 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -18,7 +18,7 @@
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |
- * | | | 0x212a-0x2130 |
+ * | | | 0x212a-0x2134 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 2edd9f7b3074..ed32e9715794 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2402,6 +2402,7 @@ typedef struct fc_port {
unsigned int scan_needed:1;
unsigned int n2n_flag:1;
unsigned int explicit_logout:1;
+ unsigned int prli_pend_timer:1;
struct completion nvme_del_done;
uint32_t nvme_prli_service_param;
@@ -2428,6 +2429,7 @@ typedef struct fc_port {
struct work_struct free_work;
struct work_struct reg_work;
uint64_t jiffies_at_registration;
+ unsigned long prli_expired;
struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
uint16_t tgt_id;
@@ -2464,6 +2466,7 @@ typedef struct fc_port {
struct qla_tgt_sess *tgt_session;
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
+ atomic_t shadow_disc_state;
enum discovery_state next_disc_state;
enum login_state fw_login_state;
unsigned long dm_login_expire;
@@ -2510,6 +2513,19 @@ struct event_arg {
extern const char *const port_state_str[5];
+static const char * const port_dstate_str[] = {
+ "DELETED",
+ "GNN_ID",
+ "GNL",
+ "LOGIN_PEND",
+ "LOGIN_FAILED",
+ "GPDB",
+ "UPD_FCPORT",
+ "LOGIN_COMPLETE",
+ "ADISC",
+ "DELETE_PEND"
+};
+
/*
* FC port flags.
*/
@@ -3263,7 +3279,6 @@ enum qla_work_type {
QLA_EVT_IDC_ACK,
QLA_EVT_ASYNC_LOGIN,
QLA_EVT_ASYNC_LOGOUT,
- QLA_EVT_ASYNC_LOGOUT_DONE,
QLA_EVT_ASYNC_ADISC,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
@@ -3953,7 +3968,7 @@ struct qla_hw_data {
void *sfp_data;
dma_addr_t sfp_data_dma;
- void *flt;
+ struct qla_flt_header *flt;
dma_addr_t flt_dma;
#define XGMAC_DATA_SIZE 4096
@@ -4845,6 +4860,9 @@ struct sff_8247_a0 {
(ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
NVME_ONLY_TARGET(fcport)) \
+#define PRLI_PHASE(_cls) \
+ ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP))
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 9dc09c117416..d641918cdd46 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1354,12 +1354,12 @@ struct vp_rpt_id_entry_24xx {
uint8_t port_id[3];
uint8_t format;
union {
- struct {
+ struct _f0 {
/* format 0 loop */
uint8_t vp_idx_map[16];
uint8_t reserved_4[32];
} f0;
- struct {
+ struct _f1 {
/* format 1 fabric */
uint8_t vpstat1_subcode; /* vp_status=1 subcode */
uint8_t flags;
@@ -1381,21 +1381,22 @@ struct vp_rpt_id_entry_24xx {
uint16_t bbcr;
uint8_t reserved_5[6];
} f1;
- struct { /* format 2: N2N direct connect */
- uint8_t vpstat1_subcode;
- uint8_t flags;
- uint16_t rsv6;
- uint8_t rsv2[12];
-
- uint8_t ls_rjt_vendor;
- uint8_t ls_rjt_explanation;
- uint8_t ls_rjt_reason;
- uint8_t rsv3[5];
-
- uint8_t port_name[8];
- uint8_t node_name[8];
- uint8_t remote_nport_id[4];
- uint32_t reserved_5;
+ struct _f2 { /* format 2: N2N direct connect */
+ uint8_t vpstat1_subcode;
+ uint8_t flags;
+ uint16_t fip_flags;
+ uint8_t rsv2[12];
+
+ uint8_t ls_rjt_vendor;
+ uint8_t ls_rjt_explanation;
+ uint8_t ls_rjt_reason;
+ uint8_t rsv3[5];
+
+ uint8_t port_name[8];
+ uint8_t node_name[8];
+ uint16_t bbcr;
+ uint8_t reserved_5[2];
+ uint8_t remote_nport_id[4];
} f2;
} u;
};
@@ -1470,13 +1471,6 @@ struct qla_flt_location {
uint16_t checksum;
};
-struct qla_flt_header {
- uint16_t version;
- uint16_t length;
- uint16_t checksum;
- uint16_t unused;
-};
-
#define FLT_REG_FW 0x01
#define FLT_REG_BOOT_CODE 0x07
#define FLT_REG_VPD_0 0x14
@@ -1537,6 +1531,14 @@ struct qla_flt_region {
uint32_t end;
};
+struct qla_flt_header {
+ uint16_t version;
+ uint16_t length;
+ uint16_t checksum;
+ uint16_t unused;
+ struct qla_flt_region region[0];
+};
+
#define FLT_REGION_SIZE 16
#define FLT_MAX_REGIONS 0xFF
#define FLT_REGIONS_SIZE (FLT_REGION_SIZE * FLT_MAX_REGIONS)
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 5b163ad85c34..2a64729a2bc5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,14 +72,13 @@ extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
+extern int qla24xx_async_abort_cmd(srb_t *, bool);
extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state);
extern fc_port_t *
@@ -182,8 +181,6 @@ extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
- fc_port_t *, uint16_t *);
extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
@@ -201,6 +198,7 @@ extern void qla2x00_free_host(struct scsi_qla_host *);
extern void qla2x00_relogin(struct scsi_qla_host *);
extern void qla2x00_do_work(struct scsi_qla_host *);
extern void qla2x00_free_fcports(struct scsi_qla_host *);
+extern void qla2x00_free_fcport(fc_port_t *);
extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
extern void qla83xx_service_idc_aen(struct work_struct *);
@@ -253,8 +251,9 @@ extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_free_dma(srb_t *sp);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
-extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
-extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
+extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
+extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
+extern int qla24xx_async_abort_cmd(srb_t *, bool);
extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 446a9d6ba255..aaa4a5bbf2ff 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2963,7 +2963,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
- fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -3097,9 +3096,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
@@ -4290,7 +4287,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
- fcport->disc_state = DSC_GNN_ID;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
@@ -4464,7 +4461,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aa5204163bec..9e6b56527b25 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -49,16 +49,9 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
- struct req_que *req;
- unsigned long flags;
- struct qla_hw_data *ha = sp->vha->hw;
- WARN_ON_ONCE(irqs_disabled());
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = sp->qpair->req;
- req->outstanding_cmds[sp->handle] = NULL;
+ WARN_ON(irqs_disabled());
iocb = &sp->u.iocb_cmd;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
iocb->timeout(sp);
}
@@ -142,7 +135,7 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res)
sp->free(sp);
}
-static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
{
scsi_qla_host_t *vha = cmd_sp->vha;
struct srb_iocb *abt_iocb;
@@ -242,6 +235,7 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
case SRB_CTRL_VP:
+ default:
rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
@@ -258,10 +252,6 @@ qla2x00_async_iocb_timeout(void *data)
sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
break;
- default:
- WARN_ON_ONCE(true);
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
- break;
}
}
@@ -326,10 +316,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (!sp)
goto done;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
fcport->flags |= FCF_ASYNC_SENT;
fcport->logout_completed = 0;
- fcport->disc_state = DSC_LOGIN_PEND;
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
sp->gen1 = fcport->rscn_gen;
@@ -425,7 +415,7 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->flags &= ~FCF_ASYNC_ACTIVE;
/* Don't re-login in target mode */
if (!fcport->tgt_session)
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, fcport, 1);
qlt_logo_completion_handler(fcport, data[0]);
}
@@ -533,7 +523,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
e->u.fcport.fcport = fcport;
fcport->flags |= FCF_ASYNC_ACTIVE;
- fcport->disc_state = DSC_LOGIN_PEND;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
return qla2x00_post_work(vha, e);
}
@@ -685,7 +675,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
port_id_t id;
u64 wwn;
u16 data[2];
- u8 current_login_state;
+ u8 current_login_state, nvme_cls;
fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -744,10 +734,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
- if (NVME_TARGET(vha->hw, fcport))
- current_login_state = e->current_login_state >> 4;
- else
- current_login_state = e->current_login_state & 0xf;
+ nvme_cls = e->current_login_state >> 4;
+ current_login_state = e->current_login_state & 0xf;
+
+ if (PRLI_PHASE(nvme_cls)) {
+ current_login_state = nvme_cls;
+ fcport->fc4_type &= ~FS_FC4TYPE_FCP;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ } else if (PRLI_PHASE(current_login_state)) {
+ fcport->fc4_type |= FS_FC4TYPE_FCP;
+ fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+ }
ql_dbg(ql_dbg_disc, vha, 0x20e2,
"%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
@@ -836,7 +833,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
* with GNL. Push disc_state back to DELETED
* so GNL can go out again
*/
- fcport->disc_state = DSC_DELETED;
+ qla2x00_set_fcport_disc_state(fcport,
+ DSC_DELETED);
break;
case DSC_LS_PRLI_COMP:
if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -912,7 +910,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
break;
case ISP_CFG_N:
- fcport->disc_state = DSC_DELETED;
+ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
if (time_after_eq(jiffies, fcport->dm_login_expire)) {
if (fcport->n2n_link_reset_cnt < 2) {
fcport->n2n_link_reset_cnt++;
@@ -992,7 +990,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
set_bit(loop_id, vha->hw->loop_id_map);
wwn = wwn_to_u64(e->port_name);
- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
+ ql_dbg(ql_dbg_disc, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
__func__, (void *)&wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
@@ -1051,6 +1049,16 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
vha->gnl.sent = 0;
+ if (!list_empty(&vha->gnl.fcports)) {
+ /* retrigger gnl */
+ list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
+ gnl_entry) {
+ list_del_init(&fcport->gnl_entry);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
+ break;
+ }
+ }
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -1072,7 +1080,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport->flags |= FCF_ASYNC_SENT;
- fcport->disc_state = DSC_GNL;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
@@ -1121,8 +1129,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
+ fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
return rval;
}
@@ -1216,12 +1224,19 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
- if (!vha->flags.online)
+ if (!vha->flags.online) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
+ __func__, __LINE__, fcport->port_name);
return rval;
+ }
- if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
- fcport->fw_login_state == DSC_LS_PRLI_PEND)
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
+ fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
+ qla_dual_mode_enabled(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
+ __func__, __LINE__, fcport->port_name);
return rval;
+ }
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -1295,12 +1310,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
return rval;
}
- fcport->disc_state = DSC_GPDB;
-
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
+
fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gpdb";
@@ -1349,6 +1364,7 @@ done_free_sp:
sp->free(sp);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
qla24xx_post_gpdb_work(vha, fcport, opt);
return rval;
}
@@ -1379,7 +1395,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC session revalidate success\n",
__func__, __LINE__, ea->fcport->port_name);
- ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -1433,7 +1449,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* Set discovery state back to GNL to Relogin attempt */
if (qla_dual_mode_enabled(vha) ||
qla_ini_mode_enabled(vha)) {
- fcport->disc_state = DSC_GNL;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
return;
@@ -1600,6 +1616,10 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
default:
if (fcport->login_pause) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
+ "%s %d %8phC exit\n",
+ __func__, __LINE__,
+ fcport->port_name);
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1758,9 +1778,23 @@ qla2x00_tmf_iocb_timeout(void *data)
{
srb_t *sp = data;
struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ int rc, h;
+ unsigned long flags;
- tmf->u.tmf.comp_status = CS_TIMEOUT;
- complete(&tmf->u.tmf.comp);
+ rc = qla24xx_async_abort_cmd(sp, false);
+ if (rc) {
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ sp->qpair->req->outstanding_cmds[h] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ tmf->u.tmf.comp_status = CS_TIMEOUT;
+ tmf->u.tmf.data = QLA_FUNCTION_FAILED;
+ complete(&tmf->u.tmf.comp);
+ }
}
static void qla2x00_tmf_sp_done(srb_t *sp, int res)
@@ -1976,7 +2010,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_prli_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
__func__, __LINE__, ea->fcport->port_name,
ea->fcport->loop_id, ea->fcport->d_id.b24);
@@ -1996,11 +2030,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
__func__, __LINE__, ea->fcport->port_name, ea->data[1]);
ea->fcport->flags &= ~FCF_ASYNC_SENT;
- ea->fcport->disc_state = DSC_LOGIN_FAILED;
+ qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
else
- qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, ea->fcport, 1);
break;
case MBS_LOOP_ID_USED:
/* data[1] = IO PARAM 1 = nport ID */
@@ -2047,6 +2081,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
set_bit(lid, vha->hw->loop_id_map);
ea->fcport->loop_id = lid;
ea->fcport->keep_nport_handle = 0;
+ ea->fcport->logout_on_delete = 1;
qlt_schedule_sess_for_deletion(ea->fcport);
}
break;
@@ -2054,16 +2089,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
return;
}
-void
-qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- uint16_t *data)
-{
- qlt_logo_completion_handler(fcport, data[0]);
- fcport->login_gen++;
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
- return;
-}
-
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
@@ -4925,12 +4950,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
qla2x00_get_data_rate(vha);
/* Determine what we need to do */
- if (ha->current_topology == ISP_CFG_FL &&
- (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
-
- set_bit(RSCN_UPDATE, &flags);
-
- } else if (ha->current_topology == ISP_CFG_F &&
+ if ((ha->current_topology == ISP_CFG_FL ||
+ ha->current_topology == ISP_CFG_F) &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
set_bit(RSCN_UPDATE, &flags);
@@ -5087,7 +5108,7 @@ skip_login:
rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
&entries);
if (rval != QLA_SUCCESS)
- goto cleanup_allocation;
+ goto err;
ql_dbg(ql_dbg_disc, vha, 0x2011,
"Entries in ID list (%d).\n", entries);
@@ -5117,7 +5138,7 @@ skip_login:
ql_log(ql_log_warn, vha, 0x2012,
"Memory allocation failed for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
- goto cleanup_allocation;
+ goto err;
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
@@ -5207,7 +5228,7 @@ skip_login:
ql_log(ql_log_warn, vha, 0xd031,
"Failed to allocate memory for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
- goto cleanup_allocation;
+ goto err;
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
@@ -5230,7 +5251,7 @@ skip_login:
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
+ ql2xplogiabsentdevice);
if (fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_FCP2_DEVICE) == 0 &&
fcport->port_type != FCT_INITIATOR &&
@@ -5250,15 +5271,14 @@ skip_login:
qla24xx_fcport_handle_login(vha, fcport);
}
-cleanup_allocation:
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x2098,
- "Configure local loop error exit: rval=%x.\n", rval);
- }
+ return rval;
- return (rval);
+err:
+ ql_dbg(ql_dbg_disc, vha, 0x2098,
+ "Configure local loop error exit: rval=%x.\n", rval);
+ return rval;
}
static void
@@ -5385,11 +5405,14 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
__func__, fcport->port_name);
- fcport->disc_state = DSC_UPD_FCPORT;
+ qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
fcport->deleted = 0;
- fcport->logout_on_delete = 1;
+ if (vha->hw->current_topology == ISP_CFG_NL)
+ fcport->logout_on_delete = 0;
+ else
+ fcport->logout_on_delete = 1;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
switch (vha->hw->current_topology) {
@@ -5405,7 +5428,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
return;
}
@@ -5450,7 +5473,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
}
}
- fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
}
void qla_register_fcport_fn(struct work_struct *work)
@@ -5859,7 +5882,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
if (NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
- fcport->disc_state = DSC_GNL;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
vha->fcport_count--;
fcport->login_succ = 0;
}
@@ -5905,7 +5928,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
+ ql2xplogiabsentdevice);
if (fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_FCP2_DEVICE) == 0 &&
fcport->port_type != FCT_INITIATOR &&
@@ -6071,7 +6094,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, fcport, 1);
rval = 1;
break;
@@ -6585,9 +6608,9 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
list_for_each_entry(vp, &ha->vp_list, list)
- qla2x00_mark_all_devices_lost(vp, 0);
+ qla2x00_mark_all_devices_lost(vp);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@@ -6663,14 +6686,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
- qla2x00_mark_all_devices_lost(vp, 0);
+ qla2x00_mark_all_devices_lost(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vp->vref_count);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 352aba4127f7..364b3db8b2dc 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -105,6 +105,30 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
INIT_LIST_HEAD(&ctx->dsd_list);
}
+static inline void
+qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
+{
+ int old_val;
+ uint8_t shiftbits, mask;
+
+ /* This will have to change when the max no. of states > 16 */
+ shiftbits = 4;
+ mask = (1 << shiftbits) - 1;
+
+ fcport->disc_state = state;
+ while (1) {
+ old_val = atomic_read(&fcport->shadow_disc_state);
+ if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
+ old_val, (old_val << shiftbits) | state)) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
+ "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
+ fcport->port_name, port_dstate_str[old_val & mask],
+ port_dstate_str[state], fcport->d_id.b24);
+ return;
+ }
+ }
+}
+
static inline int
qla2x00_hba_err_chk_enabled(srb_t *sp)
{
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8b050f0b4333..47bf60a9490a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2537,13 +2537,32 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
+ unsigned long flags = 0;
+ int res, h;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- complete(&lio->u.els_logo.comp);
+ /* Abort the exchange */
+ res = qla24xx_async_abort_cmd(sp, false);
+ if (res) {
+ ql_dbg(ql_dbg_io, vha, 0x3070,
+ "mbx abort_command failed.\n");
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ sp->qpair->req->outstanding_cmds[h] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ complete(&lio->u.els_logo.comp);
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0x3071,
+ "mbx abort_command success.\n");
+ }
}
static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
@@ -2717,23 +2736,29 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
- struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
- int res;
+ int res, h;
ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
"%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
/* Abort the exchange */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- res = ha->isp_ops->abort_command(sp);
+ res = qla24xx_async_abort_cmd(sp, false);
ql_dbg(ql_dbg_io, vha, 0x3070,
"mbx abort_command %s\n",
(res == QLA_SUCCESS) ? "successful" : "failed");
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ if (res) {
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ sp->qpair->req->outstanding_cmds[h] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
}
void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
@@ -2852,7 +2877,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
fw_status[0], fw_status[1], fw_status[2]);
fcport->flags &= ~FCF_ASYNC_SENT;
- fcport->disc_state = DSC_LOGIN_FAILED;
+ qla2x00_set_fcport_disc_state(fcport,
+ DSC_LOGIN_FAILED);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
break;
}
@@ -2865,7 +2891,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
fw_status[0], fw_status[1], fw_status[2]);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
- sp->fcport->disc_state = DSC_LOGIN_FAILED;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
break;
}
@@ -2898,11 +2924,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return -ENOMEM;
}
fcport->flags |= FCF_ASYNC_SENT;
- fcport->disc_state = DSC_LOGIN_PEND;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
@@ -2975,7 +3002,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
out:
- fcport->flags &= ~(FCF_ASYNC_SENT);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
sp->free(sp);
done:
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7b8a6bfcf08d..e7bad0bfffda 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -788,7 +788,7 @@ skip_rio:
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -861,7 +861,7 @@ skip_rio:
}
vha->device_flags |= DFLG_NO_CABLE;
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -881,7 +881,7 @@ skip_rio:
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -924,7 +924,7 @@ skip_rio:
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
if (!N2N_TOPO(ha))
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -953,7 +953,7 @@ skip_rio:
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -1022,7 +1022,6 @@ skip_rio:
"Marking port lost loopid=%04x portid=%06x.\n",
fcport->loop_id, fcport->d_id.b24);
if (qla_ini_mode_enabled(vha)) {
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
fcport->logout_on_delete = 0;
qlt_schedule_sess_for_deletion(fcport);
}
@@ -1034,14 +1033,14 @@ global_port_update:
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
vha->device_flags |= DFLG_NO_CABLE;
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
atomic_set(&vha->vp_state, VP_FAILED);
fc_vport_set_state(vha->fc_vport,
FC_VPORT_FAILED);
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
vha->flags.management_server_logged_in = 0;
@@ -1253,11 +1252,33 @@ global_port_update:
case MBA_DPORT_DIAGNOSTICS:
ql_dbg(ql_dbg_async, vha, 0x5052,
- "D-Port Diagnostics: %04x result=%s\n",
- mb[0],
- mb[1] == 0 ? "start" :
- mb[1] == 1 ? "done (pass)" :
- mb[1] == 2 ? "done (error)" : "other");
+ "D-Port Diagnostics: %04x %04x %04x %04x\n",
+ mb[0], mb[1], mb[2], mb[3]);
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ static char *results[] = {
+ "start", "done(pass)", "done(error)", "undefined" };
+ static char *types[] = {
+ "none", "dynamic", "static", "other" };
+ uint result = mb[1] >> 0 & 0x3;
+ uint type = mb[1] >> 6 & 0x3;
+ uint sw = mb[1] >> 15 & 0x1;
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
+ results[result], types[type], sw);
+ if (result == 2) {
+ static char *reasons[] = {
+ "reserved", "unexpected reject",
+ "unexpected phase", "retry exceeded",
+ "timed out", "not supported",
+ "user stopped" };
+ uint reason = mb[2] >> 0 & 0xf;
+ uint phase = mb[2] >> 12 & 0xf;
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "D-Port Diagnostics: reason=%s phase=%u \n",
+ reason < 7 ? reasons[reason] : "other",
+ phase >> 1);
+ }
+ }
break;
case MBA_TEMPERATURE_ALERT:
@@ -2152,12 +2173,12 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
* swab32 of the "data" field in the beginning of qla2x00_status_entry()
* would make guard field appear at offset 2
*/
- a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
- a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
- a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
- e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
- e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
- e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
+ a_guard = get_unaligned_le16(ap + 2);
+ a_app_tag = get_unaligned_le16(ap + 0);
+ a_ref_tag = get_unaligned_le32(ap + 4);
+ e_guard = get_unaligned_le16(ep + 2);
+ e_app_tag = get_unaligned_le16(ep + 0);
+ e_ref_tag = get_unaligned_le32(ep + 4);
ql_dbg(ql_dbg_io, vha, 0x3023,
"iocb(s) %p Returned STATUS.\n", sts24);
@@ -2745,7 +2766,6 @@ check_scsi_status:
port_state_str[FCS_ONLINE],
comp_status);
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
qlt_schedule_sess_for_deletion(fcport);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b7c1108c48e2..9e09964f5c0e 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -6152,9 +6152,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mcp->mb[7] = LSW(MSD(req_dma));
mcp->mb[8] = MSW(addr);
/* Setting RAM ID to valid */
- mcp->mb[10] |= BIT_7;
/* For MCTP RAM ID is 0x40 */
- mcp->mb[10] |= 0x40;
+ mcp->mb[10] = BIT_7 | 0x40;
mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index eabc5127174e..8ae639d089d1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -147,7 +147,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
"Marking port dead, loop_id=0x%04x : %x.\n",
fcport->loop_id, fcport->vha->vp_idx);
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
}
}
@@ -167,7 +167,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list)
fcport->logout_on_delete = 0;
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
@@ -327,7 +327,7 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
*/
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 605b59c76c90..cad1fc2a1b28 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -789,7 +789,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha)
}
ha->cregbase =
- ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+ ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
if (!ha->cregbase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -810,7 +810,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha)
}
ha->iobase =
- ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+ ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
if (!ha->iobase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -1210,9 +1210,9 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
" Existing TGT-ID %x did not get "
" offline event from firmware.\n",
fcport->old_tgt_id);
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
return rval;
}
break;
@@ -1230,7 +1230,7 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
return QLA_MEMORY_ALLOC_FAILED;
}
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
return rval;
}
@@ -1274,7 +1274,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha)
if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
if (fcport->port_type != FCT_INITIATOR)
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
}
}
@@ -1298,7 +1298,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha)
/* Free all new device structures not processed. */
list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
list_del(&fcport->list);
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
}
return rval;
@@ -1706,7 +1706,7 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
if (!fcport)
return;
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
return;
}
@@ -1740,7 +1740,7 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
} else if (evt->u.aenfx.mbx[2] == 2) {
vha->device_flags |= DFLG_NO_CABLE;
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
}
break;
@@ -2513,7 +2513,7 @@ check_scsi_status:
atomic_read(&fcport->state));
if (atomic_read(&fcport->state) == FCS_ONLINE)
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1);
break;
case CS_ABORTED:
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 2b2028f2383e..185c5f34d4c1 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1612,8 +1612,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
return (u8 *)&ha->hablob->fw->data[offset];
}
-static __le32
-qla82xx_get_fw_size(struct qla_hw_data *ha)
+static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
{
struct qla82xx_uri_data_desc *uri_desc = NULL;
@@ -1624,7 +1623,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha)
return cpu_to_le32(uri_desc->size);
}
- return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
+ return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
}
static u8 *
@@ -1816,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
}
flashaddr = FLASH_ADDR_START;
- size = (__force u32)qla82xx_get_fw_size(ha) / 8;
+ size = qla82xx_get_fw_size(ha) / 8;
ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
for (i = 0; i < size; i++) {
@@ -1883,7 +1882,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
static int
qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
{
- __le32 val;
+ uint32_t val;
uint32_t min_size;
struct qla_hw_data *ha = vha->hw;
const struct firmware *fw = ha->hablob->fw;
@@ -1896,8 +1895,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
min_size = QLA82XX_URI_FW_MIN_SIZE;
} else {
- val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
- if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
+ val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
+ if (val != QLA82XX_BDINFO_MAGIC)
return -EINVAL;
min_size = QLA82XX_FW_MIN_SIZE;
@@ -3030,7 +3029,7 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
/* Set DEV_FAILED flag to disable timer */
vha->device_flags |= DFLG_DEV_FAILED;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
vha->flags.online = 0;
vha->flags.init_done = 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8b84bc4a6ac8..b520a980d1dc 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1110,7 +1110,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
{
u8 i;
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
for (i = 0; i < 10; i++) {
if (wait_event_timeout(vha->fcport_waitQ,
@@ -1667,7 +1667,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -3854,37 +3854,21 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
}
static inline void
-qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
- int defer)
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
{
- struct fc_rport *rport;
- scsi_qla_host_t *base_vha;
- unsigned long flags;
+ int now;
if (!fcport->rport)
return;
- rport = fcport->rport;
- if (defer) {
- base_vha = pci_get_drvdata(vha->hw->pdev);
- spin_lock_irqsave(vha->host->host_lock, flags);
- fcport->drport = rport;
- spin_unlock_irqrestore(vha->host->host_lock, flags);
- qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
- set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
- qla2xxx_wake_dpc(base_vha);
- } else {
- int now;
-
- if (rport) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
- "%s %8phN. rport %p roles %x\n",
- __func__, fcport->port_name, rport,
- rport->roles);
- fc_remote_port_delete(rport);
- }
- qlt_do_generation_tick(vha, &now);
+ if (fcport->rport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
+ "%s %8phN. rport %p roles %x\n",
+ __func__, fcport->port_name, fcport->rport,
+ fcport->rport->roles);
+ fc_remote_port_delete(fcport->rport);
}
+ qlt_do_generation_tick(vha, &now);
}
/*
@@ -3897,18 +3881,18 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
* Context:
*/
void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
- int do_login, int defer)
+ int do_login)
{
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
- qla2x00_schedule_rport_del(vha, fcport, defer);
+ qla2x00_schedule_rport_del(vha, fcport);
return;
}
if (atomic_read(&fcport->state) == FCS_ONLINE &&
vha->vp_idx == fcport->vha->vp_idx) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
- qla2x00_schedule_rport_del(vha, fcport, defer);
+ qla2x00_schedule_rport_del(vha, fcport);
}
/*
* We may need to retry the login, so don't change the state of the
@@ -3937,7 +3921,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
* Context:
*/
void
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
{
fc_port_t *fcport;
@@ -3957,13 +3941,6 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
*/
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
- if (atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
- if (defer)
- qla2x00_schedule_rport_del(vha, fcport, defer);
- else if (vha->vp_idx == fcport->vha->vp_idx)
- qla2x00_schedule_rport_del(vha, fcport, defer);
- }
}
}
@@ -4965,7 +4942,6 @@ int qla2x00_post_async_##name##_work( \
qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
-qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
@@ -5032,7 +5008,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
fcport->jiffies_at_registration = jiffies;
fcport->sec_since_registration = 0;
fcport->next_disc_state = DSC_DELETED;
- fcport->disc_state = DSC_UPD_FCPORT;
+ qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
queue_work(system_unbound_wq, &fcport->reg_work);
@@ -5253,10 +5229,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_ASYNC_LOGOUT:
rc = qla2x00_async_logout(vha, e->u.logio.fcport);
break;
- case QLA_EVT_ASYNC_LOGOUT_DONE:
- qla2x00_async_logout_done(vha, e->u.logio.fcport,
- e->u.logio.data);
- break;
case QLA_EVT_ASYNC_ADISC:
qla2x00_async_adisc(vha, e->u.logio.fcport,
e->u.logio.data);
@@ -6899,13 +6871,13 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
qpair->online = 0;
mutex_unlock(&ha->mq_lock);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
- qla2x00_mark_all_devices_lost(vp, 0);
+ qla2x00_mark_all_devices_lost(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vp->vref_count);
}
@@ -7270,6 +7242,8 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
+ BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
+ BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bbe90354f49b..76a38bf86cbc 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -669,8 +669,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
struct qla_hw_data *ha = vha->hw;
uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
- struct qla_flt_header *flt = (void *)ha->flt;
- struct qla_flt_region *region = (void *)&flt[1];
+ struct qla_flt_header *flt = ha->flt;
+ struct qla_flt_region *region = &flt->region[0];
uint16_t *wptr, cnt, chksum;
uint32_t start;
@@ -2652,18 +2652,15 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
struct qla_flt_region *region)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_flt_header *flt;
- struct qla_flt_region *flt_reg;
+ struct qla_flt_header *flt = ha->flt;
+ struct qla_flt_region *flt_reg = &flt->region[0];
uint16_t cnt;
int rval = QLA_FUNCTION_FAILED;
if (!ha->flt)
return QLA_FUNCTION_FAILED;
- flt = (struct qla_flt_header *)ha->flt;
- flt_reg = (struct qla_flt_region *)&flt[1];
cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
-
for (; cnt; cnt--, flt_reg++) {
if (flt_reg->start == start) {
memcpy((uint8_t *)region, flt_reg,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 68c14143e50e..70081b395fb2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -596,7 +596,8 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else {
sp->fcport->login_retry = 0;
- sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(sp->fcport,
+ DSC_LOGIN_COMPLETE);
sp->fcport->deleted = 0;
sp->fcport->logout_on_delete = 1;
}
@@ -957,7 +958,7 @@ void qlt_free_session_done(struct work_struct *work)
struct qlt_plogi_ack_t *own =
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+ ql_dbg(ql_dbg_disc, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
@@ -966,7 +967,7 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
- qla2x00_mark_device_lost(vha, sess, 0, 0);
+ qla2x00_mark_device_lost(vha, sess, 0);
if (sess->send_els_logo) {
qlt_port_logo_t logo;
@@ -1024,7 +1025,7 @@ void qlt_free_session_done(struct work_struct *work)
while (!READ_ONCE(sess->logout_completed)) {
if (!traced) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+ ql_dbg(ql_dbg_disc, vha, 0xf086,
"%s: waiting for sess %p logout\n",
__func__, sess);
traced = true;
@@ -1045,6 +1046,10 @@ void qlt_free_session_done(struct work_struct *work)
(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
}
+ spin_lock_irqsave(&vha->work_lock, flags);
+ sess->flags &= ~FCF_ASYNC_SENT;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->se_sess) {
sess->se_sess = NULL;
@@ -1052,7 +1057,7 @@ void qlt_free_session_done(struct work_struct *work)
tgt->sess_count--;
}
- sess->disc_state = DSC_DELETED;
+ qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
sess->deleted = QLA_SESS_DELETED;
@@ -1108,7 +1113,7 @@ void qlt_free_session_done(struct work_struct *work)
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
sess->free_pending = 0;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1151,13 +1156,18 @@ void qlt_unreg_sess(struct fc_port *sess)
return;
}
sess->free_pending = 1;
+ /*
+ * Use FCF_ASYNC_SENT flag to block other cmds used in sess
+ * management from being sent.
+ */
+ sess->flags |= FCF_ASYNC_SENT;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
- sess->disc_state = DSC_DELETE_PEND;
+ qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
@@ -1257,7 +1267,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
- sess->disc_state = DSC_DELETE_PEND;
+ sess->prli_pend_timer = 0;
+ qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
qla24xx_chk_fcp_state(sess);
@@ -3446,13 +3457,13 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
cmd->trc_flags |= TRC_DIF_ERR;
- cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
- cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
- cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+ cmd->a_guard = get_unaligned_be16(ap + 0);
+ cmd->a_app_tag = get_unaligned_be16(ap + 2);
+ cmd->a_ref_tag = get_unaligned_be32(ap + 4);
- cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
- cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
- cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+ cmd->e_guard = get_unaligned_be16(ep + 0);
+ cmd->e_app_tag = get_unaligned_be16(ep + 2);
+ cmd->e_ref_tag = get_unaligned_be32(ep + 4);
ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
"%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
@@ -4579,7 +4590,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* find other sess with nport_id collision */
if (port_id.b24 == other_sess->d_id.b24) {
if (loop_id != other_sess->loop_id) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
+ ql_dbg(ql_dbg_disc, vha, 0x1000c,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4595,7 +4606,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* Another wwn used to have our s_id/loop_id
* kill the session, but don't free the loop_id
*/
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
+ ql_dbg(ql_dbg_disc, vha, 0xf01b,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4610,7 +4621,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* find other sess with nport handle collision */
if ((loop_id == other_sess->loop_id) &&
(loop_id != FC_NO_LOOP_ID)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
+ ql_dbg(ql_dbg_disc, vha, 0x1000d,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -6053,7 +6064,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
if (!IS_SW_RESV_ADDR(fcport->d_id))
vha->fcport_count++;
fcport->login_gen++;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
fcport->login_succ = 1;
newfcport = 1;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d006f0a97b8c..6539499e9e95 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -379,8 +379,7 @@ static inline int get_datalen_for_atio(struct atio_from_isp *atio)
{
int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
- return (be32_to_cpu(get_unaligned((uint32_t *)
- &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+ return get_unaligned_be32(&atio->u.isp24.fcp_cmnd.add_cdb[len * 4]);
}
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 03bd3b712b77..bb03c022e023 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.21-k"
+#define QLA2XXX_VERSION "10.01.00.22-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2323432a0edb..5504ab11decc 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4145,7 +4145,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
- if (ha->fw_dump)
+ if (ha->fw_dump)
vfree(ha->fw_dump);
ha->queues_len = 0;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 57bcd05605bf..8f3af87b6bb0 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -189,17 +189,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
}
-/**
- * scsi_ioctl - Dispatch ioctl to scsi device
- * @sdev: scsi device receiving ioctl
- * @cmd: which ioctl is it
- * @arg: data associated with ioctl
- *
- * Description: The scsi_ioctl() function differs from most ioctls in that it
- * does not take a major/minor number as the dev field. Rather, it takes
- * a pointer to a &struct scsi_device.
- */
-int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg)
{
char scsi_cmd[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sense_hdr;
@@ -266,14 +256,50 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
return scsi_ioctl_get_pci(sdev, arg);
case SG_SCSI_RESET:
return scsi_ioctl_reset(sdev, arg);
- default:
- if (sdev->host->hostt->ioctl)
- return sdev->host->hostt->ioctl(sdev, cmd, arg);
}
+ return -ENOIOCTLCMD;
+}
+
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a &struct scsi_device.
+ */
+int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ int ret = scsi_ioctl_common(sdev, cmd, arg);
+
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (sdev->host->hostt->ioctl)
+ return sdev->host->hostt->ioctl(sdev, cmd, arg);
+
return -EINVAL;
}
EXPORT_SYMBOL(scsi_ioctl);
+#ifdef CONFIG_COMPAT
+int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ int ret = scsi_ioctl_common(sdev, cmd, arg);
+
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (sdev->host->hostt->compat_ioctl)
+ return sdev->host->hostt->compat_ioctl(sdev, cmd, arg);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsi_compat_ioctl);
+#endif
+
/*
* We can process a reset even when a device isn't fully operable.
*/
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3e7a45d0daca..610ee41fa54c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2108,6 +2108,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
memset(data, 0, sizeof(*data));
memset(&cmd[0], 0, 12);
+
+ dbd = sdev->set_dbd_for_ms ? 8 : dbd;
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
cmd[2] = modepage;
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
index 836185de28c4..3df877886119 100644
--- a/drivers/scsi/scsi_logging.h
+++ b/drivers/scsi/scsi_logging.h
@@ -53,7 +53,7 @@ do { \
} while (0)
#else
#define SCSI_LOG_LEVEL(SHIFT, BITS) 0
-#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
+#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do { } while (0)
#endif /* CONFIG_SCSI_LOGGING */
/*
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ed8d9709b9b9..dfc726fa34e3 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2093,7 +2093,12 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
"could not register session's dev\n");
goto release_ida;
}
- transport_register_device(&session->dev);
+ err = transport_register_device(&session->dev);
+ if (err) {
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register transport's dev\n");
+ goto release_dev;
+ }
spin_lock_irqsave(&sesslock, flags);
list_add(&session->sess_list, &sesslist);
@@ -2103,6 +2108,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
return 0;
+release_dev:
+ device_del(&session->dev);
release_ida:
if (session->ida_used)
ida_simple_remove(&iscsi_sess_ida, session->target_id);
@@ -2263,7 +2270,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
"register connection's dev\n");
goto release_parent_ref;
}
- transport_register_device(&conn->dev);
+ err = transport_register_device(&conn->dev);
+ if (err) {
+ iscsi_cls_session_printk(KERN_ERR, session, "could not "
+ "register transport's dev\n");
+ goto release_conn_ref;
+ }
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
@@ -2272,6 +2284,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
return conn;
+release_conn_ref:
+ put_device(&conn->dev);
release_parent_ref:
put_device(&session->dev);
free_conn:
@@ -2947,6 +2961,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
return err;
}
+static int iscsi_session_has_conns(int sid)
+{
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&connlock, flags);
+ list_for_each_entry(conn, &connlist, conn_list) {
+ if (iscsi_conn_get_sid(conn) == sid) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&connlock, flags);
+
+ return found;
+}
+
static int
iscsi_set_iface_params(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
@@ -3524,10 +3556,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
- if (session)
- transport->destroy_session(session);
- else
+ if (!session)
err = -EINVAL;
+ else if (iscsi_session_has_conns(ev->u.d_session.sid))
+ err = -EBUSY;
+ else
+ transport->destroy_session(session);
break;
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cea625906440..8ca9299ffd36 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1465,13 +1465,12 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* Note: most ioctls are forward onto the block subsystem or further
* down in the scsi subsystem.
**/
-static int sd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, void __user *p)
{
struct gendisk *disk = bdev->bd_disk;
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
- void __user *p = (void __user *)arg;
int error;
SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
@@ -1507,9 +1506,6 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
break;
default:
error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
- if (error != -ENOTTY)
- break;
- error = scsi_ioctl(sdp, cmd, p);
break;
}
out:
@@ -1691,39 +1687,31 @@ static void sd_rescan(struct device *dev)
revalidate_disk(sdkp->disk);
}
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+ int ret;
+
+ ret = sd_ioctl_common(bdev, mode, cmd, p);
+ if (ret != -ENOTTY)
+ return ret;
+
+ return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
+}
#ifdef CONFIG_COMPAT
-/*
- * This gets directly called from VFS. When the ioctl
- * is not recognized we go back to the other translation paths.
- */
static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct gendisk *disk = bdev->bd_disk;
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdev = sdkp->device;
void __user *p = compat_ptr(arg);
- int error;
-
- error = scsi_verify_blk_ioctl(bdev, cmd);
- if (error < 0)
- return error;
+ int ret;
- error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
- (mode & FMODE_NDELAY) != 0);
- if (error)
- return error;
+ ret = sd_ioctl_common(bdev, mode, cmd, p);
+ if (ret != -ENOTTY)
+ return ret;
- if (is_sed_ioctl(cmd))
- return sed_ioctl(sdkp->opal_dev, cmd, p);
-
- /*
- * Let the static ioctl translation table take care of it.
- */
- if (!sdev->host->hostt->compat_ioctl)
- return -ENOIOCTLCMD;
- return sdev->host->hostt->compat_ioctl(sdev, cmd, p);
+ return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
}
#endif
@@ -2211,8 +2199,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
u8 type;
int ret = 0;
- if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
+ sdkp->protection_type = 0;
return ret;
+ }
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
@@ -2956,15 +2946,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
q->limits.zoned = BLK_ZONED_HM;
} else {
sdkp->zoned = (buffer[8] >> 4) & 3;
- if (sdkp->zoned == 1)
+ if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) {
/* Host-aware */
q->limits.zoned = BLK_ZONED_HA;
- else
+ } else {
/*
- * Treat drive-managed devices as
- * regular block devices.
+ * Treat drive-managed devices and host-aware devices
+ * with partitions as regular block devices.
*/
q->limits.zoned = BLK_ZONED_NONE;
+ }
}
if (blk_queue_is_zoned(q) && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index e0bd4cf17230..e4282bce5834 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -325,22 +325,21 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
}
/**
- * sd_zbc_check_zones - Check the device capacity and zone sizes
+ * sd_zbc_check_capacity - Check the device capacity
* @sdkp: Target disk
+ * @buf: command buffer
+ * @zblock: zone size in number of blocks
*
- * Check that the device capacity as reported by READ CAPACITY matches the
- * max_lba value (plus one)of the report zones command reply. Also check that
- * all zones of the device have an equal size, only allowing the last zone of
- * the disk to have a smaller size (runt zone). The zone size must also be a
- * power of two.
+ * Get the device zone size and check that the device capacity as reported
+ * by READ CAPACITY matches the max_lba value (plus one) of the report zones
+ * command reply for devices with RC_BASIS == 0.
*
- * Returns the zone size in number of blocks upon success or an error code
- * upon failure.
+ * Returns 0 upon success or an error code upon failure.
*/
-static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
- u32 *zblocks)
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
+ u32 *zblocks)
{
- u64 zone_blocks = 0;
+ u64 zone_blocks;
sector_t max_lba;
unsigned char *rec;
int ret;
@@ -363,17 +362,9 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
}
- /* Parse REPORT ZONES header */
+ /* Get the size of the first reported zone */
rec = buf + 64;
zone_blocks = get_unaligned_be64(&rec[8]);
- if (!zone_blocks || !is_power_of_2(zone_blocks)) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "Devices with non power of 2 zone "
- "size are not supported\n");
- return -ENODEV;
- }
-
if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
@@ -405,11 +396,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
if (ret)
goto err;
- /*
- * Check zone size: only devices with a constant zone size (except
- * an eventual last runt zone) that is a power of 2 are supported.
- */
- ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
+ /* Check the device capacity reported by report zones */
+ ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks);
if (ret != 0)
goto err;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 160748ad9c0f..bafeaf7b9ad8 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -405,6 +405,38 @@ sg_release(struct inode *inode, struct file *filp)
return 0;
}
+static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count)
+{
+ struct sg_header __user *old_hdr = buf;
+ int reply_len;
+
+ if (count >= SZ_SG_HEADER) {
+ /* negative reply_len means v3 format, otherwise v1/v2 */
+ if (get_user(reply_len, &old_hdr->reply_len))
+ return -EFAULT;
+
+ if (reply_len >= 0)
+ return get_user(*pack_id, &old_hdr->pack_id);
+
+ if (in_compat_syscall() &&
+ count >= sizeof(struct compat_sg_io_hdr)) {
+ struct compat_sg_io_hdr __user *hp = buf;
+
+ return get_user(*pack_id, &hp->pack_id);
+ }
+
+ if (count >= sizeof(struct sg_io_hdr)) {
+ struct sg_io_hdr __user *hp = buf;
+
+ return get_user(*pack_id, &hp->pack_id);
+ }
+ }
+
+ /* no valid header was passed, so ignore the pack_id */
+ *pack_id = -1;
+ return 0;
+}
+
static ssize_t
sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
{
@@ -413,8 +445,8 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_request *srp;
int req_pack_id = -1;
sg_io_hdr_t *hp;
- struct sg_header *old_hdr = NULL;
- int retval = 0;
+ struct sg_header *old_hdr;
+ int retval;
/*
* This could cause a response to be stranded. Close the associated
@@ -429,79 +461,34 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
- if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
- old_hdr = memdup_user(buf, SZ_SG_HEADER);
- if (IS_ERR(old_hdr))
- return PTR_ERR(old_hdr);
- if (old_hdr->reply_len < 0) {
- if (count >= SZ_SG_IO_HDR) {
- /*
- * This is stupid.
- *
- * We're copying the whole sg_io_hdr_t from user
- * space just to get the 'pack_id' field. But the
- * field is at different offsets for the compat
- * case, so we'll use "get_sg_io_hdr()" to copy
- * the whole thing and convert it.
- *
- * We could do something like just calculating the
- * offset based of 'in_compat_syscall()', but the
- * 'compat_sg_io_hdr' definition is in the wrong
- * place for that.
- */
- sg_io_hdr_t *new_hdr;
- new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
- if (!new_hdr) {
- retval = -ENOMEM;
- goto free_old_hdr;
- }
- retval = get_sg_io_hdr(new_hdr, buf);
- req_pack_id = new_hdr->pack_id;
- kfree(new_hdr);
- if (retval) {
- retval = -EFAULT;
- goto free_old_hdr;
- }
- }
- } else
- req_pack_id = old_hdr->pack_id;
- }
+ if (sfp->force_packid)
+ retval = get_sg_io_pack_id(&req_pack_id, buf, count);
+ if (retval)
+ return retval;
+
srp = sg_get_rq_mark(sfp, req_pack_id);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching)) {
- retval = -ENODEV;
- goto free_old_hdr;
- }
- if (filp->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- goto free_old_hdr;
- }
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
(atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching)) {
- retval = -ENODEV;
- goto free_old_hdr;
- }
- if (retval) {
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ if (retval)
/* -ERESTARTSYS as signal hit process */
- goto free_old_hdr;
- }
- }
- if (srp->header.interface_id != '\0') {
- retval = sg_new_read(sfp, buf, count, srp);
- goto free_old_hdr;
+ return retval;
}
+ if (srp->header.interface_id != '\0')
+ return sg_new_read(sfp, buf, count, srp);
hp = &srp->header;
- if (old_hdr == NULL) {
- old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
- if (! old_hdr) {
- retval = -ENOMEM;
- goto free_old_hdr;
- }
- }
- memset(old_hdr, 0, SZ_SG_HEADER);
+ old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL);
+ if (!old_hdr)
+ return -ENOMEM;
+
old_hdr->reply_len = (int) hp->timeout;
old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
old_hdr->pack_id = hp->pack_id;
@@ -575,7 +562,12 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
int err = 0, err2;
int len;
- if (count < SZ_SG_IO_HDR) {
+ if (in_compat_syscall()) {
+ if (count < sizeof(struct compat_sg_io_hdr)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ } else if (count < SZ_SG_IO_HDR) {
err = -EINVAL;
goto err_out;
}
@@ -919,19 +911,14 @@ static int put_compat_request_table(struct compat_sg_req_info __user *o,
#endif
static long
-sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
+ unsigned int cmd_in, void __user *p)
{
- void __user *p = (void __user *)arg;
int __user *ip = p;
int result, val, read_only;
- Sg_device *sdp;
- Sg_fd *sfp;
Sg_request *srp;
unsigned long iflags;
- if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
- return -ENXIO;
-
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
@@ -1154,29 +1141,44 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
cmd_in, filp->f_flags & O_NDELAY);
if (result)
return result;
+
+ return -ENOIOCTLCMD;
+}
+
+static long
+sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ int ret;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+
+ ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
return scsi_ioctl(sdp->device, cmd_in, p);
}
#ifdef CONFIG_COMPAT
static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
+ void __user *p = compat_ptr(arg);
Sg_device *sdp;
Sg_fd *sfp;
- struct scsi_device *sdev;
+ int ret;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- sdev = sdp->device;
- if (sdev->host->hostt->compat_ioctl) {
- int ret;
-
- ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
-
+ ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
+ if (ret != -ENOIOCTLCMD)
return ret;
- }
-
- return -ENOIOCTLCMD;
+
+ return scsi_compat_ioctl(sdp->device, cmd_in, p);
}
#endif
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 412ac56ecd60..b7492568e02f 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -7457,7 +7457,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
goto disable_device;
}
- ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+ ctrl_info->iomem_base = ioremap(pci_resource_start(
ctrl_info->pci_dev, 0),
sizeof(struct pqi_ctrl_registers));
if (!ctrl_info->iomem_base) {
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index a85d52b5dc32..f8397978f8ab 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -71,7 +71,7 @@ static int snirm710_probe(struct platform_device *dev)
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- hostdata->base = ioremap_nocache(base, 0x100);
+ hostdata->base = ioremap(base, 0x100);
hostdata->differential = 0;
hostdata->clock = SNIRM710_CLOCK;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 4664fdf75c0f..0fbb8fe6e521 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -38,6 +38,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bio.h>
+#include <linux/compat.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/cdrom.h>
@@ -598,6 +599,51 @@ out:
return ret;
}
+#ifdef CONFIG_COMPAT
+static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ unsigned long arg)
+{
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct scsi_device *sdev = cd->device;
+ void __user *argp = compat_ptr(arg);
+ int ret;
+
+ mutex_lock(&sr_mutex);
+
+ ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
+ (mode & FMODE_NDELAY) != 0);
+ if (ret)
+ goto out;
+
+ scsi_autopm_get_device(sdev);
+
+ /*
+ * Send SCSI addressing ioctls directly to mid level, send other
+ * ioctls to cdrom/block level.
+ */
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ ret = scsi_compat_ioctl(sdev, cmd, argp);
+ goto put;
+ }
+
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp);
+ if (ret != -ENOSYS)
+ goto put;
+
+ ret = scsi_compat_ioctl(sdev, cmd, argp);
+
+put:
+ scsi_autopm_put_device(sdev);
+
+out:
+ mutex_unlock(&sr_mutex);
+ return ret;
+
+}
+#endif
+
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
@@ -641,12 +687,11 @@ static const struct block_device_operations sr_bdops =
.open = sr_block_open,
.release = sr_block_release,
.ioctl = sr_block_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = sr_block_compat_ioctl,
+#endif
.check_events = sr_block_check_events,
.revalidate_disk = sr_block_revalidate_disk,
- /*
- * No compat_ioctl for now because sr_block_ioctl never
- * seems to pass arbitrary ioctls down to host drivers.
- */
};
static int sr_open(struct cdrom_device_info *cdi, int purpose)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9e3fff2de83e..393f3019ccac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3501,7 +3501,7 @@ out:
/* The ioctl command */
-static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p)
{
int i, cmd_nr, cmd_type, bt;
int retval = 0;
@@ -3509,7 +3509,6 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
struct scsi_tape *STp = file->private_data;
struct st_modedef *STm;
struct st_partstat *STps;
- void __user *p = (void __user *)arg;
if (mutex_lock_interruptible(&STp->lock))
return -ERESTARTSYS;
@@ -3824,9 +3823,19 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
}
mutex_unlock(&STp->lock);
switch (cmd_in) {
+ case SCSI_IOCTL_STOP_UNIT:
+ /* unload */
+ retval = scsi_ioctl(STp->device, cmd_in, p);
+ if (!retval) {
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
+ }
+ return retval;
+
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
break;
+
default:
if ((cmd_in == SG_IO ||
cmd_in == SCSI_IOCTL_SEND_COMMAND ||
@@ -3840,42 +3849,46 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
return i;
break;
}
- retval = scsi_ioctl(STp->device, cmd_in, p);
- if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */
- STp->rew_at_close = 0;
- STp->ready = ST_NO_TAPE;
- }
- return retval;
+ return -ENOTTY;
out:
mutex_unlock(&STp->lock);
return retval;
}
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+ struct scsi_tape *STp = file->private_data;
+ int ret;
+
+ ret = st_ioctl_common(file, cmd_in, p);
+ if (ret != -ENOTTY)
+ return ret;
+
+ return scsi_ioctl(STp->device, cmd_in, p);
+}
+
#ifdef CONFIG_COMPAT
static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
void __user *p = compat_ptr(arg);
struct scsi_tape *STp = file->private_data;
- struct scsi_device *sdev = STp->device;
- int ret = -ENOIOCTLCMD;
+ int ret;
/* argument conversion is handled using put_user_mtpos/put_user_mtget */
switch (cmd_in) {
- case MTIOCTOP:
- return st_ioctl(file, MTIOCTOP, (unsigned long)p);
case MTIOCPOS32:
- return st_ioctl(file, MTIOCPOS, (unsigned long)p);
+ return st_ioctl_common(file, MTIOCPOS, p);
case MTIOCGET32:
- return st_ioctl(file, MTIOCGET, (unsigned long)p);
+ return st_ioctl_common(file, MTIOCGET, p);
}
- if (sdev->host->hostt->compat_ioctl) {
+ ret = st_ioctl_common(file, cmd_in, p);
+ if (ret != -ENOTTY)
+ return ret;
- ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
-
- }
- return ret;
+ return scsi_compat_ioctl(STp->device, cmd_in, p);
}
#endif
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f8faf8b3d965..fb41636519ee 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1842,9 +1842,11 @@ static int storvsc_probe(struct hv_device *device,
*/
host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
/*
+ * For non-IDE disks, the host supports multiple channels.
* Set the number of HW queues we are supporting.
*/
- host->nr_hw_queues = num_present_cpus();
+ if (!dev_is_ide)
+ host->nr_hw_queues = num_present_cpus();
/*
* Set the error handler work queue.
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 440a73eae647..f37df79e37e1 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -190,7 +190,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!res || !res->start)
goto fail_unlink;
- esp->regs = ioremap_nocache(res->start, 0x20);
+ esp->regs = ioremap(res->start, 0x20);
if (!esp->regs)
goto fail_unmap_regs;
@@ -198,7 +198,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!res || !res->start)
goto fail_unmap_regs;
- esp->dma_regs = ioremap_nocache(res->start, 0x10);
+ esp->dma_regs = ioremap(res->start, 0x10);
esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index 9dc17f1288f9..d37e2a69136a 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -227,7 +227,7 @@ static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram)
/*
* 24C16 EEPROM reading.
*
- * GPOI0 - data in/data out
+ * GPIO0 - data in/data out
* GPIO1 - clock
* Symbios NVRAM wiring now also used by Tekram.
*/
@@ -524,7 +524,7 @@ static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
/*
* 93C46 EEPROM reading.
*
- * GPOI0 - data in
+ * GPIO0 - data in
* GPIO1 - data out
* GPIO2 - clock
* GPIO4 - chip select
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 6feeb0faf123..56a6a1ed5ec2 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -19,6 +19,85 @@
#define CDNS_UFS_REG_HCLKDIV 0xFC
#define CDNS_UFS_REG_PHY_XCFGD1 0x113C
+#define CDNS_UFS_MAX_L4_ATTRS 12
+
+struct cdns_ufs_host {
+ /**
+ * cdns_ufs_dme_attr_val - for storing L4 attributes
+ */
+ u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
+};
+
+/**
+ * cdns_ufs_get_l4_attr - get L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
+{
+ struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+ &host->cdns_ufs_dme_attr_val[0]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
+ &host->cdns_ufs_dme_attr_val[1]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+ &host->cdns_ufs_dme_attr_val[2]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
+ &host->cdns_ufs_dme_attr_val[3]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+ &host->cdns_ufs_dme_attr_val[4]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+ &host->cdns_ufs_dme_attr_val[5]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+ &host->cdns_ufs_dme_attr_val[6]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+ &host->cdns_ufs_dme_attr_val[7]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+ &host->cdns_ufs_dme_attr_val[8]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+ &host->cdns_ufs_dme_attr_val[9]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
+ &host->cdns_ufs_dme_attr_val[10]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+ &host->cdns_ufs_dme_attr_val[11]);
+}
+
+/**
+ * cdns_ufs_set_l4_attr - set L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
+{
+ struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+ host->cdns_ufs_dme_attr_val[0]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
+ host->cdns_ufs_dme_attr_val[1]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+ host->cdns_ufs_dme_attr_val[2]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
+ host->cdns_ufs_dme_attr_val[3]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+ host->cdns_ufs_dme_attr_val[4]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+ host->cdns_ufs_dme_attr_val[5]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+ host->cdns_ufs_dme_attr_val[6]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+ host->cdns_ufs_dme_attr_val[7]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+ host->cdns_ufs_dme_attr_val[8]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+ host->cdns_ufs_dme_attr_val[9]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
+ host->cdns_ufs_dme_attr_val[10]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+ host->cdns_ufs_dme_attr_val[11]);
+}
/**
* Sets HCLKDIV register value based on the core_clk
@@ -78,6 +157,22 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
}
/**
+ * Called around hibern8 enter/exit.
+ * @hba: host controller instance
+ * @cmd: UIC Command
+ * @status: notify stage (pre, post change)
+ *
+ */
+static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
+ cdns_ufs_get_l4_attr(hba);
+ if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
+ cdns_ufs_set_l4_attr(hba);
+}
+
+/**
* Called before and after Link startup is carried out.
* @hba: host controller instance
* @status: notify stage (pre, post change)
@@ -117,6 +212,14 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
static int cdns_ufs_init(struct ufs_hba *hba)
{
int status = 0;
+ struct cdns_ufs_host *host;
+ struct device *dev = hba->dev;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+
+ if (!host)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, host);
if (hba->vops && hba->vops->phy_initialization)
status = hba->vops->phy_initialization(hba);
@@ -144,8 +247,10 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
.name = "cdns-ufs-pltfm",
+ .init = cdns_ufs_init,
.hce_enable_notify = cdns_ufs_hce_enable_notify,
.link_startup_notify = cdns_ufs_link_startup_notify,
+ .hibern8_notify = cdns_ufs_hibern8_notify,
};
static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
@@ -154,6 +259,7 @@ static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
.hce_enable_notify = cdns_ufs_hce_enable_notify,
.link_startup_notify = cdns_ufs_link_startup_notify,
.phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+ .hibern8_notify = cdns_ufs_hibern8_notify,
};
static const struct of_device_id cdns_ufs_of_match[] = {
@@ -219,6 +325,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
static struct platform_driver cdns_ufs_pltfrm_driver = {
.probe = cdns_ufs_pltfrm_probe,
.remove = cdns_ufs_pltfrm_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
.driver = {
.name = "cdns-ufshcd",
.pm = &cdns_ufs_dev_pm_ops,
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 83e28edc3ac5..53eae5fe2ade 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -6,16 +6,30 @@
* Peter Wang <peter.wang@mediatek.com>
*/
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-mediatek.h"
+#define ufs_mtk_smc(cmd, val, res) \
+ arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
+ cmd, val, 0, 0, 0, 0, 0, &(res))
+
+#define ufs_mtk_ref_clk_notify(on, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -81,6 +95,49 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
return err;
}
+static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct arm_smccc_res res;
+ unsigned long timeout;
+ u32 value;
+
+ if (host->ref_clk_enabled == on)
+ return 0;
+
+ if (on) {
+ ufs_mtk_ref_clk_notify(on, res);
+ ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
+ } else {
+ ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
+ }
+
+ /* Wait for ack */
+ timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS);
+ do {
+ value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
+
+ /* Wait until ack bit equals to req bit */
+ if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
+ goto out;
+
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
+
+ ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
+
+ return -ETIMEDOUT;
+
+out:
+ host->ref_clk_enabled = on;
+ if (!on)
+ ufs_mtk_ref_clk_notify(on, res);
+
+ return 0;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -105,12 +162,16 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
switch (status) {
case PRE_CHANGE:
- if (!on)
+ if (!on) {
+ ufs_mtk_setup_ref_clk(hba, on);
ret = phy_power_off(host->mphy);
+ }
break;
case POST_CHANGE:
- if (on)
+ if (on) {
ret = phy_power_on(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, on);
+ }
break;
}
@@ -150,6 +211,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable runtime autosuspend */
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+ /* Enable clock-gating */
+ hba->caps |= UFSHCD_CAP_CLK_GATING;
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -238,6 +302,23 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
return ret;
}
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ u32 ah_ms;
+
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
+ ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+ hba->ahit);
+ else
+ ah_ms = 10;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = ah_ms + 5;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
static int ufs_mtk_post_link(struct ufs_hba *hba)
{
/* disable device LCC */
@@ -246,6 +327,15 @@ static int ufs_mtk_post_link(struct ufs_hba *hba)
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
+ /* configure auto-hibern8 timer to 10ms */
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ufshcd_auto_hibern8_update(hba,
+ FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
+ }
+
+ ufs_mtk_setup_clk_gating(hba);
+
return 0;
}
@@ -269,12 +359,86 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
return ret;
}
+static void ufs_mtk_device_reset(struct ufs_hba *hba)
+{
+ struct arm_smccc_res res;
+
+ ufs_mtk_device_reset_ctrl(0, res);
+
+ /*
+ * The reset signal is active low. UFS devices shall detect
+ * more than or equal to 1us of positive or negative RST_n
+ * pulse width.
+ *
+ * To be on safe side, keep the reset low for at least 10us.
+ */
+ usleep_range(10, 15);
+
+ ufs_mtk_device_reset_ctrl(1, res);
+
+ /* Some devices may need time to respond to rst_n */
+ usleep_range(10000, 15000);
+
+ dev_info(hba->dev, "device reset done\n");
+}
+
+static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ return err;
+
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 0);
+ if (err)
+ return err;
+
+ err = ufshcd_uic_hibern8_exit(hba);
+ if (!err)
+ ufshcd_set_link_active(hba);
+ else
+ return err;
+
+ err = ufshcd_make_hba_operational(hba);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 1);
+ if (err) {
+ /* Resume UniPro state for following error recovery */
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 0);
+ return err;
+ }
+
+ return 0;
+}
+
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
+ int err;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- if (ufshcd_is_link_hibern8(hba))
+ if (ufshcd_is_link_hibern8(hba)) {
+ err = ufs_mtk_link_set_lpm(hba);
+ if (err)
+ return -EAGAIN;
phy_power_off(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, false);
+ }
return 0;
}
@@ -282,9 +446,40 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int err;
- if (ufshcd_is_link_hibern8(hba))
+ if (ufshcd_is_link_hibern8(hba)) {
+ ufs_mtk_setup_ref_clk(hba, true);
phy_power_on(host->mphy);
+ err = ufs_mtk_link_set_hpm(hba);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
+
+ ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
+
+ ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
+ REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
+ "MPHY Ctrl ");
+
+ /* Direct debugging information to REG_MTK_PROBE */
+ ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
+}
+
+static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
return 0;
}
@@ -301,8 +496,11 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.setup_clocks = ufs_mtk_setup_clocks,
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
+ .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
+ .dbg_register_dump = ufs_mtk_dbg_register_dump,
+ .device_reset = ufs_mtk_device_reset,
};
/**
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 19f8c42fe06f..fccdd979d6fb 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -6,6 +6,30 @@
#ifndef _UFS_MEDIATEK_H
#define _UFS_MEDIATEK_H
+#include <linux/bitops.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+/*
+ * Vendor specific UFSHCI Registers
+ */
+#define REG_UFS_REFCLK_CTRL 0x144
+#define REG_UFS_EXTREG 0x2100
+#define REG_UFS_MPHYCTRL 0x2200
+#define REG_UFS_REJECT_MON 0x22AC
+#define REG_UFS_DEBUG_SEL 0x22C0
+#define REG_UFS_PROBE 0x22C8
+
+/*
+ * Ref-clk control
+ *
+ * Values for register REG_UFS_REFCLK_CTRL
+ */
+#define REFCLK_RELEASE 0x0
+#define REFCLK_REQUEST BIT(0)
+#define REFCLK_ACK BIT(1)
+
+#define REFCLK_REQ_TIMEOUT_MS 3
+
/*
* Vendor specific pre-defined parameters
*/
@@ -30,6 +54,13 @@
#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
/*
+ * SiP commands
+ */
+#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
+#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
+#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
+
+/*
* VS_DEBUGCLOCKENABLE
*/
enum {
@@ -48,6 +79,7 @@ enum {
struct ufs_mtk_host {
struct ufs_hba *hba;
struct phy *mphy;
+ bool ref_clk_enabled;
};
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index ad2abc96c0f1..dbdf8b01abed 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -118,26 +118,6 @@ static ssize_t spm_target_link_state_show(struct device *dev,
ufs_pm_lvl_states[hba->spm_lvl].link_state));
}
-static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
-{
- unsigned long flags;
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit != ahit)
- hba->ahit = ahit;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (!pm_runtime_suspended(hba->dev)) {
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
- ufshcd_auto_hibern8_enable(hba);
- ufshcd_release(hba);
- pm_runtime_put(hba->dev);
- }
-}
-
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
static int ufshcd_ahit_to_us(u32 ahit)
{
@@ -733,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev, \
struct scsi_device *sdev = to_scsi_device(dev); \
struct ufs_hba *hba = shost_priv(sdev->host); \
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
- if (!ufs_is_valid_unit_desc_lun(lun)) \
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
return -EINVAL; \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
lun, _duname##_DESC_PARAM##_puname, buf, _size); \
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h
index e5621e59a432..0f4e750a6748 100644
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ b/drivers/scsi/ufs/ufs-sysfs.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (C) 2018 Western Digital Corporation
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Western Digital Corporation
*/
#ifndef __UFS_SYSFS_H__
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 3327981ef894..dde2eb02f76f 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -63,7 +63,6 @@
#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
-#define UFS_UPIU_MAX_GENERAL_LUN 8
/* Well known logical unit id in LUN field of UPIU */
enum {
@@ -499,9 +498,9 @@ struct ufs_query_res {
#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */
+#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */
#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
/*
@@ -530,28 +529,28 @@ struct ufs_dev_info {
bool f_power_on_wp_en;
/* Keeps information if any of the LU is power on write protected */
bool is_lu_power_on_wp;
-};
-
-#define MAX_MODEL_LEN 16
-/**
- * ufs_dev_desc - ufs device details from the device descriptor
- *
- * @wmanufacturerid: card details
- * @model: card model
- */
-struct ufs_dev_desc {
+ /* Maximum number of general LU supported by the UFS device */
+ u8 max_lu_supported;
u16 wmanufacturerid;
+ /*UFS device Product Name */
u8 *model;
};
/**
* ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
+ * @dev_info: pointer of instance of struct ufs_dev_info
* @lun: LU number to check
* @return: true if the lun has a matching unit descriptor, false otherwise
*/
-static inline bool ufs_is_valid_unit_desc_lun(u8 lun)
+static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
+ u8 lun)
{
- return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN);
+ if (!dev_info || !dev_info->max_lu_supported) {
+ pr_err("Max General LU supported by UFS isn't initilized\n");
+ return false;
+ }
+
+ return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index fe6cad9b2a0d..d0ab147f98d3 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -22,16 +22,17 @@
* @quirk: device quirk
*/
struct ufs_dev_fix {
- struct ufs_dev_desc card;
+ u16 wmanufacturerid;
+ u8 *model;
unsigned int quirk;
};
-#define END_FIX { { 0 }, 0 }
+#define END_FIX { }
/* add specific device quirk */
#define UFS_FIX(_vendor, _model, _quirk) { \
- .card.wmanufacturerid = (_vendor),\
- .card.model = (_model), \
+ .wmanufacturerid = (_vendor),\
+ .model = (_model), \
.quirk = (_quirk), \
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b5966faf3e98..abd0e6b05f79 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -246,11 +246,10 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
-static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
@@ -266,26 +265,18 @@ static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
return tag >= 0 && tag < hba->nutrs;
}
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
{
- int ret = 0;
-
if (!hba->is_irq_enabled) {
- ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
- hba);
- if (ret)
- dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
- __func__, ret);
+ enable_irq(hba->irq);
hba->is_irq_enabled = true;
}
-
- return ret;
}
static inline void ufshcd_disable_irq(struct ufs_hba *hba)
{
if (hba->is_irq_enabled) {
- free_irq(hba->irq, hba);
+ disable_irq(hba->irq);
hba->is_irq_enabled = false;
}
}
@@ -335,27 +326,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
u8 opcode = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = lrbp->cmd;
int transfer_len = -1;
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
- if (lrbp->cmd)
+ if (cmd)
ufshcd_add_cmd_upiu_trace(hba, tag, str);
return;
}
- if (lrbp->cmd) { /* data phase exists */
+ if (cmd) { /* data phase exists */
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str);
- opcode = (u8)(*lrbp->cmd->cmnd);
+ opcode = cmd->cmnd[0];
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
* Currently we only fully trace read(10) and write(10)
* commands
*/
- if (lrbp->cmd->request && lrbp->cmd->request->bio)
- lba =
- lrbp->cmd->request->bio->bi_iter.bi_sector;
+ if (cmd->request && cmd->request->bio)
+ lba = cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
}
@@ -393,7 +384,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
- if (err_hist->reg[p] == 0)
+ if (err_hist->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
@@ -401,7 +392,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
}
if (!found)
- dev_err(hba->dev, "No record of %s errors\n", err_name);
+ dev_err(hba->dev, "No record of %s\n", err_name);
}
static void ufshcd_print_host_regs(struct ufs_hba *hba)
@@ -436,8 +427,7 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
ufshcd_print_clk_freqs(hba);
- if (hba->vops && hba->vops->dbg_register_dump)
- hba->vops->dbg_register_dump(hba);
+ ufshcd_vops_dbg_register_dump(hba);
}
static
@@ -497,8 +487,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->outstanding_reqs, hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -646,40 +636,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_get_tm_free_slot - get a free slot for task management request
- * @hba: per adapter instance
- * @free_slot: pointer to variable with available slot value
- *
- * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
- * Returns 0 if free slot is not available, else return 1 with tag value
- * in @free_slot.
- */
-static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
-{
- int tag;
- bool ret = false;
-
- if (!free_slot)
- goto out;
-
- do {
- tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
- if (tag >= hba->nutmrs)
- goto out;
- } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
-
- *free_slot = tag;
- ret = true;
-out:
- return ret;
-}
-
-static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
-{
- clear_bit_unlock(slot, &hba->tm_slots_in_use);
-}
-
-/**
* ufshcd_utrl_clear - Clear a bit in UTRLCLR register
* @hba: per adapter instance
* @pos: position of the bit to be cleared
@@ -1273,6 +1229,24 @@ out:
return ret;
}
+static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
+{
+ int *busy = priv;
+
+ WARN_ON_ONCE(reserved);
+ (*busy)++;
+ return false;
+}
+
+/* Whether or not any tag is in use by a request that is in progress. */
+static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
+{
+ struct request_queue *q = hba->cmd_queue;
+ int busy = 0;
+
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
+ return busy;
+}
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
@@ -1490,6 +1464,8 @@ static void ufshcd_ungate_work(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_setup_clocks(hba, true);
+ ufshcd_enable_irq(hba);
+
/* Exit from hibern8 */
if (ufshcd_can_hibern8_during_gating(hba)) {
/* Prevent gating in this path */
@@ -1619,7 +1595,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->lrb_in_use || hba->outstanding_tasks
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
@@ -1636,6 +1612,8 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
+ ufshcd_disable_irq(hba);
+
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -1673,7 +1651,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->lrb_in_use || hba->outstanding_tasks
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done
|| ufshcd_eh_in_progress(hba))
return;
@@ -1881,12 +1859,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
hba->lrb[task_tag].issue_time_stamp = ktime_get();
hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+ ufshcd_add_command_trace(hba, task_tag, "send");
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
- ufshcd_add_command_trace(hba, task_tag, "send");
}
/**
@@ -2239,6 +2217,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
{
+ struct scsi_cmnd *cmd = lrbp->cmd;
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
@@ -2252,12 +2231,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
/* Total EHS length and Data segment length will be zero */
ucd_req_ptr->header.dword_2 = 0;
- ucd_req_ptr->sc.exp_data_transfer_len =
- cpu_to_be32(lrbp->cmd->sdb.length);
+ ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
- cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+ cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
- memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+ memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@@ -2443,22 +2421,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0;
- /* acquire the tag to make sure device cmds don't use it */
- if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
- /*
- * Dev manage command in progress, requeue the command.
- * Requeuing the command helps in cases where the request *may*
- * find different tag instead of waiting for dev manage command
- * completion.
- */
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
err = ufshcd_hold(hba, true);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
- clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -2479,7 +2444,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
- clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release(hba);
goto out;
}
/* Make sure descriptors are ready before ringing the doorbell */
@@ -2627,44 +2592,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
}
/**
- * ufshcd_get_dev_cmd_tag - Get device management command tag
- * @hba: per-adapter instance
- * @tag_out: pointer to variable with available slot value
- *
- * Get a free slot and lock it until device management command
- * completes.
- *
- * Returns false if free slot is unavailable for locking, else
- * return true with tag value in @tag.
- */
-static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
-{
- int tag;
- bool ret = false;
- unsigned long tmp;
-
- if (!tag_out)
- goto out;
-
- do {
- tmp = ~hba->lrb_in_use;
- tag = find_last_bit(&tmp, hba->nutrs);
- if (tag >= hba->nutrs)
- goto out;
- } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
-
- *tag_out = tag;
- ret = true;
-out:
- return ret;
-}
-
-static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
-{
- clear_bit_unlock(tag, &hba->lrb_in_use);
-}
-
-/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
* @cmd_type: specifies the type (NOP, Query...)
@@ -2676,6 +2603,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
+ struct request_queue *q = hba->cmd_queue;
+ struct request *req;
struct ufshcd_lrb *lrbp;
int err;
int tag;
@@ -2689,7 +2618,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
*/
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out_unlock;
+ }
+ tag = req->tag;
+ WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
@@ -2714,8 +2649,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err ? "query_complete_err" : "query_complete");
out_put_tag:
- ufshcd_put_dev_cmd_tag(hba, tag);
- wake_up(&hba->dev_cmd.tag_wq);
+ blk_put_request(req);
+out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -2918,7 +2853,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
int ret = 0;
u32 retries;
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
ret = ufshcd_query_attr(hba, opcode, idn, index,
selector, attr_val);
if (ret)
@@ -3210,17 +3145,6 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba,
return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
}
-static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
- u8 *buf,
- u32 size)
-{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
-}
-
-static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
-{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
-}
/**
* struct uc_string_id - unicode string
@@ -3345,7 +3269,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
- if (!ufs_is_valid_unit_desc_lun(lun))
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -3929,7 +3853,7 @@ out:
return ret;
}
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
@@ -3955,6 +3879,25 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
+
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+{
+ unsigned long flags;
+
+ if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->ahit == ahit)
+ goto out_unlock;
+ hba->ahit = ahit;
+ if (!pm_runtime_suspended(hba->dev))
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
@@ -4095,6 +4038,26 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
pwr_mode->hs_rate);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
| pwr_mode->pwr_tx);
@@ -4188,7 +4151,7 @@ out:
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
int err = 0;
u32 reg;
@@ -4234,6 +4197,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
out:
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
/**
* ufshcd_hba_stop - Send controller to reset state
@@ -4311,7 +4275,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
return 0;
}
-static int ufshcd_hba_enable(struct ufs_hba *hba)
+int ufshcd_hba_enable(struct ufs_hba *hba)
{
int ret;
@@ -4336,6 +4300,8 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
+
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
int tx_lanes, i, err = 0;
@@ -4372,13 +4338,14 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
return ufshcd_disable_tx_lcc(hba, true);
}
-static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
- u32 reg)
+void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+ u32 reg)
{
reg_hist->reg[reg_hist->pos] = reg;
reg_hist->tstamp[reg_hist->pos] = ktime_get();
reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
}
+EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
/**
* ufshcd_link_startup - Initialize unipro link startup
@@ -4561,7 +4528,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba,
* protected so skip reading bLUWriteProtect parameter for
* it. For other W-LUs, UNIT DESCRIPTOR is not available.
*/
- else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+ else if (lun >= hba->dev_info.max_lu_supported)
ret = -ENOTSUPP;
else
ret = ufshcd_read_unit_desc_param(hba,
@@ -4608,6 +4575,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
/* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
sdev->use_10_for_ms = 1;
+ /* DBD field should be set to 1 in mode sense(10) */
+ sdev->set_dbd_for_ms = 1;
+
/* allow SCSI layer to restart the device in case of errors */
sdev->allow_restart = 1;
@@ -4799,7 +4769,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
} /* end of switch */
- if (host_byte(result) != DID_OK)
+ if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -4856,12 +4826,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
cmd->result = result;
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
- clear_bit_unlock(index, &hba->lrb_in_use);
+ lrbp->compl_time_stamp = ktime_get();
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
__ufshcd_release(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ lrbp->compl_time_stamp = ktime_get();
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
"dev_complete");
@@ -4870,17 +4841,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--;
-
- lrbp->compl_time_stamp = ktime_get();
}
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
ufshcd_clk_scaling_update_busy(hba);
-
- /* we might have free'd some tags above */
- wake_up(&hba->dev_cmd.tag_wq);
}
/**
@@ -5053,6 +5019,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
hba->auto_bkops_enabled = false;
trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
}
@@ -5077,6 +5044,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
ufshcd_disable_auto_bkops(hba);
}
+ hba->is_urgent_bkops_lvl_checked = false;
}
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -5123,6 +5091,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
err = ufshcd_enable_auto_bkops(hba);
else
err = ufshcd_disable_auto_bkops(hba);
+ hba->urgent_bkops_lvl = curr_status;
out:
return err;
}
@@ -5200,7 +5169,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -5214,7 +5183,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
out:
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
pm_runtime_put_sync(hba->dev);
return;
}
@@ -5348,8 +5317,8 @@ static void ufshcd_err_handler(struct work_struct *work)
/*
* if host reset is required then skip clearing the pending
- * transfers forcefully because they will automatically get
- * cleared after link startup.
+ * transfers forcefully because they will get cleared during
+ * host reset and restore
*/
if (needs_reset)
goto skip_pending_xfer_clear;
@@ -5603,6 +5572,27 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
return retval;
}
+struct ctm_info {
+ struct ufs_hba *hba;
+ unsigned long pending;
+ unsigned int ncpl;
+};
+
+static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
+{
+ struct ctm_info *const ci = priv;
+ struct completion *c;
+
+ WARN_ON_ONCE(reserved);
+ if (test_bit(req->tag, &ci->pending))
+ return true;
+ ci->ncpl++;
+ c = req->end_io_data;
+ if (c)
+ complete(c);
+ return true;
+}
+
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
@@ -5613,16 +5603,14 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
- u32 tm_doorbell;
+ struct request_queue *q = hba->tmf_queue;
+ struct ctm_info ci = {
+ .hba = hba,
+ .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
+ };
- tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- if (hba->tm_condition) {
- wake_up(&hba->tm_wq);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+ return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
}
/**
@@ -5728,7 +5716,10 @@ out:
static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
struct utp_task_req_desc *treq, u8 tm_function)
{
+ struct request_queue *q = hba->tmf_queue;
struct Scsi_Host *host = hba->host;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request *req;
unsigned long flags;
int free_slot, task_tag, err;
@@ -5737,7 +5728,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req->end_io_data = &wait;
+ free_slot = req->tag;
+ WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
@@ -5763,10 +5757,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
/* wait until the task management command is completed */
- err = wait_event_timeout(hba->tm_wq,
- test_bit(free_slot, &hba->tm_condition),
+ err = wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
+ /*
+ * Make sure that ufshcd_compl_tm() does not trigger a
+ * use-after-free.
+ */
+ req->end_io_data = NULL;
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
@@ -5785,9 +5783,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
__clear_bit(free_slot, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- clear_bit(free_slot, &hba->tm_condition);
- ufshcd_put_tm_slot(hba, free_slot);
- wake_up(&hba->tm_tag_wq);
+ blk_put_request(req);
ufshcd_release(hba);
return err;
@@ -5863,6 +5859,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
+ struct request_queue *q = hba->cmd_queue;
+ struct request *req;
struct ufshcd_lrb *lrbp;
int err = 0;
int tag;
@@ -5872,7 +5870,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock);
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out_unlock;
+ }
+ tag = req->tag;
+ WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
@@ -5948,8 +5952,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
}
}
- ufshcd_put_dev_cmd_tag(hba, tag);
- wake_up(&hba->dev_cmd.tag_wq);
+ blk_put_request(req);
+out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -6244,9 +6248,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
- clear_bit_unlock(tag, &hba->lrb_in_use);
- wake_up(&hba->dev_cmd.tag_wq);
-
out:
if (!err) {
err = SUCCESS;
@@ -6279,9 +6280,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
int err;
unsigned long flags;
- /* Reset the host controller */
+ /*
+ * Stop the host controller and complete the requests
+ * cleared by h/w
+ */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_hba_stop(hba, false);
+ hba->silence_err_logs = true;
+ ufshcd_complete_requests(hba);
+ hba->silence_err_logs = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
@@ -6292,7 +6299,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
goto out;
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba);
+ err = ufshcd_probe_hba(hba, false);
if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
err = -EIO;
@@ -6315,7 +6322,6 @@ out:
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
int err = 0;
- unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
do {
@@ -6325,15 +6331,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
- /*
- * After reset the door-bell might be cleared, complete
- * outstanding requests in s/w here.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
return err;
}
@@ -6488,7 +6485,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
if (!desc_buf)
return;
- ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
+ desc_buf, buff_len);
if (ret) {
dev_err(hba->dev,
"%s: Failed reading power descriptor.len = %d ret = %d",
@@ -6578,16 +6576,13 @@ out:
return ret;
}
-static int ufs_get_device_desc(struct ufs_hba *hba,
- struct ufs_dev_desc *dev_desc)
+static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
size_t buff_len;
u8 model_index;
u8 *desc_buf;
-
- if (!dev_desc)
- return -EINVAL;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
buff_len = max_t(size_t, hba->desc_size.dev_desc,
QUERY_DESC_MAX_SIZE + 1);
@@ -6597,7 +6592,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
goto out;
}
- err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
+ hba->desc_size.dev_desc);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -6608,12 +6604,12 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
* getting vendor (manufacturerID) and Bank Index in big endian
* format
*/
- dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
- &dev_desc->model, SD_ASCII_STD);
+ &dev_info->model, SD_ASCII_STD);
if (err < 0) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
@@ -6631,23 +6627,25 @@ out:
return err;
}
-static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
+static void ufs_put_device_desc(struct ufs_hba *hba)
{
- kfree(dev_desc->model);
- dev_desc->model = NULL;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ kfree(dev_info->model);
+ dev_info->model = NULL;
}
-static void ufs_fixup_device_setup(struct ufs_hba *hba,
- struct ufs_dev_desc *dev_desc)
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
{
struct ufs_dev_fix *f;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
for (f = ufs_fixups; f->quirk; f++) {
- if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
- f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_desc->model &&
- STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
- !strcmp(f->card.model, UFS_ANY_MODEL)))
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
+ ((dev_info->model &&
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
+ !strcmp(f->model, UFS_ANY_MODEL)))
hba->dev_quirks |= f->quirk;
}
}
@@ -6863,6 +6861,37 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
+static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
+{
+ int err;
+ size_t buff_len;
+ u8 *desc_buf;
+
+ buff_len = hba->desc_size.geom_desc;
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ desc_buf, buff_len);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
+ hba->dev_info.max_lu_supported = 32;
+ else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
+ hba->dev_info.max_lu_supported = 8;
+
+out:
+ kfree(desc_buf);
+ return err;
+}
+
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6931,15 +6960,92 @@ out:
return err;
}
+static int ufshcd_device_params_init(struct ufs_hba *hba)
+{
+ bool flag;
+ int ret;
+
+ /* Clear any previous UFS device information */
+ memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
+ /* Init UFS geometry descriptor related parameters */
+ ret = ufshcd_device_geo_params_init(hba);
+ if (ret)
+ goto out;
+
+ /* Check and apply UFS device quirks */
+ ret = ufs_get_device_desc(hba);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ufs_fixup_device_setup(hba);
+
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ hba->dev_info.f_power_on_wp_en = flag;
+
+ /* Probe maximum power mode co-supported by both UFS host and device */
+ if (ufshcd_get_max_pwr_mode(hba))
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_add_lus - probe and add UFS logical units
+ * @hba: per-adapter instance
+ */
+static int ufshcd_add_lus(struct ufs_hba *hba)
+{
+ int ret;
+
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+ ret = ufshcd_scsi_add_wlus(hba);
+ if (ret)
+ goto out;
+
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ if (!hba->devfreq) {
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+ }
+
+ hba->clk_scaling.is_allowed = true;
+ }
+
+ ufs_bsg_probe(hba);
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+
+out:
+ return ret;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
+ * @async: asynchronous execution or not
*
* Execute link-startup and verify device initialization
*/
-static int ufshcd_probe_hba(struct ufs_hba *hba)
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
{
- struct ufs_dev_desc card = {0};
int ret;
ktime_t start = ktime_get();
@@ -6957,27 +7063,26 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
+ /* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
+ /* Initiate UFS initialization, and waiting until completion */
ret = ufshcd_complete_dev_init(hba);
if (ret)
goto out;
- /* Init check for device descriptor sizes */
- ufshcd_init_desc_sizes(hba);
-
- ret = ufs_get_device_desc(hba, &card);
- if (ret) {
- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
- __func__, ret);
- goto out;
+ /*
+ * Initialize UFS device parameters used by driver, these
+ * parameters are associated with UFS descriptors.
+ */
+ if (async) {
+ ret = ufshcd_device_params_init(hba);
+ if (ret)
+ goto out;
}
- ufs_fixup_device_setup(hba, &card);
- ufs_put_device_desc(&card);
-
ufshcd_tune_unipro_params(hba);
/* UFS device is also active now */
@@ -6985,11 +7090,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_force_reset_auto_bkops(hba);
hba->wlun_dev_clr_ua = true;
- if (ufshcd_get_max_pwr_mode(hba)) {
- dev_err(hba->dev,
- "%s: Failed getting max supported power mode\n",
- __func__);
- } else {
+ /* Gear up to HS gear if supported */
+ if (hba->max_pwr_info.is_valid) {
/*
* Set the right value to bRefClkFreq before attempting to
* switch to HS gears.
@@ -7010,59 +7112,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
- /*
- * If we are in error handling context or in power management callbacks
- * context, no need to scan the host
- */
- if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
- bool flag;
-
- /* clear any previous UFS device information */
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
- if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
- hba->dev_info.f_power_on_wp_en = flag;
-
- if (!hba->is_init_prefetch)
- ufshcd_init_icc_levels(hba);
-
- /* Add required well known logical units to scsi mid layer */
- if (ufshcd_scsi_add_wlus(hba))
- goto out;
-
- /* Initialize devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- if (!hba->devfreq) {
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
- }
- hba->clk_scaling.is_allowed = true;
- }
-
- ufs_bsg_probe(hba);
-
- scsi_scan_host(hba->host);
- pm_runtime_put_sync(hba->dev);
- }
-
- if (!hba->is_init_prefetch)
- hba->is_init_prefetch = true;
-
out:
- /*
- * If we failed to initialize the device or the device is not
- * present, turn off the power/clocks etc.
- */
- if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
- pm_runtime_put_sync(hba->dev);
- ufshcd_exit_clk_scaling(hba);
- ufshcd_hba_exit(hba);
- }
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7078,43 +7128,25 @@ out:
static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
+ int ret;
- ufshcd_probe_hba(hba);
-}
-
-static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
-{
- unsigned long flags;
- struct Scsi_Host *host;
- struct ufs_hba *hba;
- int index;
- bool found = false;
-
- if (!scmd || !scmd->device || !scmd->device->host)
- return BLK_EH_DONE;
-
- host = scmd->device->host;
- hba = shost_priv(host);
- if (!hba)
- return BLK_EH_DONE;
-
- spin_lock_irqsave(host->host_lock, flags);
-
- for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[index].cmd == scmd) {
- found = true;
- break;
- }
- }
-
- spin_unlock_irqrestore(host->host_lock, flags);
+ /* Initialize hba, detect and initialize UFS device */
+ ret = ufshcd_probe_hba(hba, true);
+ if (ret)
+ goto out;
+ /* Probe and add UFS logical units */
+ ret = ufshcd_add_lus(hba);
+out:
/*
- * Bypass SCSI error handling and reset the block layer timer if this
- * SCSI command was not actually dispatched to UFS driver, otherwise
- * let SCSI layer handle the error as usual.
+ * If we failed to initialize the device or the device is not
+ * present, turn off the power/clocks etc.
*/
- return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
+ if (ret) {
+ pm_runtime_put_sync(hba->dev);
+ ufshcd_exit_clk_scaling(hba);
+ ufshcd_hba_exit(hba);
+ }
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -7135,7 +7167,6 @@ static struct scsi_host_template ufshcd_driver_template = {
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
- .eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -7574,6 +7605,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
+ ufs_put_device_desc(hba);
}
}
@@ -7701,8 +7733,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* turning off the link would also turn off the device.
*/
else if ((req_link_state == UIC_LINK_OFF_STATE) &&
- (!check_for_bkops || (check_for_bkops &&
- !hba->auto_bkops_enabled))) {
+ (!check_for_bkops || !hba->auto_bkops_enabled)) {
/*
* Let's make sure that link is in low power mode, we are doing
* this currently by putting the link in Hibern8. Otherway to
@@ -7908,6 +7939,11 @@ disable_clks:
ret = ufshcd_vops_suspend(hba, pm_op);
if (ret)
goto set_link_active;
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
+ ufshcd_disable_irq(hba);
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
@@ -7917,11 +7953,7 @@ disable_clks:
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- /*
- * Disable the host irq as host controller as there won't be any
- * host controller transaction expected till resume.
- */
- ufshcd_disable_irq(hba);
+
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
goto out;
@@ -7974,9 +8006,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
/* enable the host irq as host controller would be active soon */
- ret = ufshcd_enable_irq(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
+ ufshcd_enable_irq(hba);
ret = ufshcd_vreg_set_hpm(hba);
if (ret)
@@ -8250,6 +8280,9 @@ void ufshcd_remove(struct ufs_hba *hba)
{
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
+ blk_cleanup_queue(hba->tmf_queue);
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+ blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
@@ -8328,6 +8361,18 @@ out_error:
}
EXPORT_SYMBOL(ufshcd_alloc_host);
+/* This function exists because blk_mq_alloc_tag_set() requires this. */
+static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
+{
+ WARN_ON_ONCE(true);
+ return BLK_STS_NOTSUPP;
+}
+
+static const struct blk_mq_ops ufshcd_tmf_ops = {
+ .queue_rq = ufshcd_queue_tmf,
+};
+
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
@@ -8397,10 +8442,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
- /* Initailize wait queue for task management */
- init_waitqueue_head(&hba->tm_wq);
- init_waitqueue_head(&hba->tm_tag_wq);
-
/* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
@@ -8413,9 +8454,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_rwsem(&hba->clk_scaling_lock);
- /* Initialize device management tag acquire wait queue */
- init_waitqueue_head(&hba->dev_cmd.tag_wq);
-
ufshcd_init_clk_gating(hba);
ufshcd_init_clk_scaling(hba);
@@ -8449,6 +8487,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto exit_gating;
}
+ hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
+ if (IS_ERR(hba->cmd_queue)) {
+ err = PTR_ERR(hba->cmd_queue);
+ goto out_remove_scsi_host;
+ }
+
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
+ .nr_hw_queues = 1,
+ .queue_depth = hba->nutmrs,
+ .ops = &ufshcd_tmf_ops,
+ .flags = BLK_MQ_F_NO_SCHED,
+ };
+ err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
+ if (err < 0)
+ goto free_cmd_queue;
+ hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
+ if (IS_ERR(hba->tmf_queue)) {
+ err = PTR_ERR(hba->tmf_queue);
+ goto free_tmf_tag_set;
+ }
+
/* Reset the attached device */
ufshcd_vops_device_reset(hba);
@@ -8458,7 +8517,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
- goto out_remove_scsi_host;
+ goto free_tmf_queue;
}
/*
@@ -8495,6 +8554,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
return 0;
+free_tmf_queue:
+ blk_cleanup_queue(hba->tmf_queue);
+free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+free_cmd_queue:
+ blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 2740f6941ec6..2ae6c7c8528c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -212,13 +212,11 @@ struct ufs_query {
* @type: device management command type - Query, NOP OUT
* @lock: lock to allow one command at a time
* @complete: internal commands completion
- * @tag_wq: wait queue until free command slot is available
*/
struct ufs_dev_cmd {
enum dev_cmd_type type;
struct mutex lock;
struct completion *complete;
- wait_queue_head_t tag_wq;
struct ufs_query query;
};
@@ -322,7 +320,7 @@ struct ufs_hba_variant_ops {
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
- int (*apply_dev_quirks)(struct ufs_hba *);
+ int (*apply_dev_quirks)(struct ufs_hba *hba);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -483,7 +481,7 @@ struct ufs_stats {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
- * @lrb_in_use: lrb in use
+ * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
@@ -495,17 +493,14 @@ struct ufs_stats {
* @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command
- * @tm_wq: wait queue for task management
- * @tm_tag_wq: wait queue for free task management slots
- * @tm_slots_in_use: bit map of task management request slots in use
+ * @tmf_tag_set: TMF tag set.
+ * @tmf_queue: Used to allocate TMF tags.
* @pwr_done: completion for power mode change
- * @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states
* @eh_flags: Error handling flags
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
- * @is_init_prefetch: flag to check if data was pre-fetched in initialization
* @init_prefetch_data: data pre-fetched during initialization
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
@@ -513,6 +508,7 @@ struct ufs_stats {
* @uic_error: UFS interconnect layer error status
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
+ * @silence_err_logs: flag to silence error logs
* @dev_cmd: ufs device management command information
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
* @auto_bkops_enabled: to track whether bkops is enabled in device
@@ -541,6 +537,7 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
+ struct request_queue *cmd_queue;
/*
* This field is to keep a reference to "scsi_device" corresponding to
* "UFS device" W-LU.
@@ -561,7 +558,6 @@ struct ufs_hba {
u32 ahit;
struct ufshcd_lrb *lrb;
- unsigned long lrb_in_use;
unsigned long outstanding_tasks;
unsigned long outstanding_reqs;
@@ -643,10 +639,8 @@ struct ufs_hba {
/* Device deviations from standard UFS device spec. */
unsigned int dev_quirks;
- wait_queue_head_t tm_wq;
- wait_queue_head_t tm_tag_wq;
- unsigned long tm_condition;
- unsigned long tm_slots_in_use;
+ struct blk_mq_tag_set tmf_tag_set;
+ struct request_queue *tmf_queue;
struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex;
@@ -657,7 +651,6 @@ struct ufs_hba {
u32 intr_mask;
u16 ee_ctrl_mask;
bool is_powered;
- bool is_init_prefetch;
struct ufs_init_prefetch init_prefetch_data;
/* Work Queues */
@@ -670,6 +663,7 @@ struct ufs_hba {
u32 saved_err;
u32 saved_uic_err;
struct ufs_stats ufs_stats;
+ bool silence_err_logs;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -803,12 +797,17 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
+int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
+void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+ u32 reg);
static inline void check_upiu_size(void)
{
@@ -927,6 +926,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
#define SD_ASCII_STD true
#define SD_RAW false
@@ -1086,8 +1086,10 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->device_reset)
+ if (hba->vops && hba->vops->device_reset) {
hba->vops->device_reset(hba);
+ ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
+ }
}
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index f539f873f94d..3dc4d8b76509 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -161,6 +161,17 @@
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
+#define DL_FC0ProtectionTimeOutVal_Default 8191
+#define DL_TC0ReplayTimeOutVal_Default 65535
+#define DL_AFC0ReqTimeOutVal_Default 32767
+#define DL_FC1ProtectionTimeOutVal_Default 8191
+#define DL_TC1ReplayTimeOutVal_Default 65535
+#define DL_AFC1ReqTimeOutVal_Default 32767
+
+#define DME_LocalFC0ProtectionTimeOutVal 0xD041
+#define DME_LocalTC0ReplayTimeOutVal 0xD042
+#define DME_LocalAFC0ReqTimeOutVal 0xD043
+
/* PA power modes */
enum {
FAST_MODE = 1,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 70008816c91f..c3f010df641e 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -365,7 +365,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
int segs = scsi_dma_map(cmd);
if (segs == -ENOMEM) {
- scmd_printk(KERN_ERR, cmd,
+ scmd_printk(KERN_DEBUG, cmd,
"vmw_pvscsi: Failed to map cmd sglist for DMA.\n");
return -ENOMEM;
} else if (segs > 1) {
@@ -392,7 +392,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
cmd->sc_data_direction);
if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
- scmd_printk(KERN_ERR, cmd,
+ scmd_printk(KERN_DEBUG, cmd,
"vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
return -ENOMEM;
}
@@ -402,6 +402,17 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
return 0;
}
+/*
+ * The device incorrectly doesn't clear the first byte of the sense
+ * buffer in some cases. We have to do it ourselves.
+ * Otherwise we run into trouble when SWIOTLB is forced.
+ */
+static void pvscsi_patch_sense(struct scsi_cmnd *cmd)
+{
+ if (cmd->sense_buffer)
+ cmd->sense_buffer[0] = 0;
+}
+
static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx)
{
@@ -544,6 +555,8 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
cmd = ctx->cmd;
abort_cmp = ctx->abort_cmp;
pvscsi_unmap_buffers(adapter, ctx);
+ if (sdstat != SAM_STAT_CHECK_CONDITION)
+ pvscsi_patch_sense(cmd);
pvscsi_release_context(adapter, ctx);
if (abort_cmp) {
/*
@@ -712,7 +725,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
- scmd_printk(KERN_ERR, cmd,
+ scmd_printk(KERN_DEBUG, cmd,
"vmw_pvscsi: Failed to map sense buffer for DMA.\n");
ctx->sensePA = 0;
return -ENOMEM;
@@ -873,6 +886,7 @@ static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
scmd_printk(KERN_ERR, cmd,
"Forced reset on cmd %p\n", cmd);
pvscsi_unmap_buffers(adapter, ctx);
+ pvscsi_patch_sense(cmd);
pvscsi_release_context(adapter, ctx);
cmd->result = (DID_RESET << 16);
cmd->scsi_done(cmd);
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 77bce208210e..7eac76cccc4c 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -89,7 +89,7 @@ zalon_probe(struct parisc_device *dev)
struct gsc_irq gsc_irq;
u32 zalon_vers;
int error = -ENODEV;
- void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096);
+ void __iomem *zalon = ioremap(dev->hpa.start, 4096);
void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET;
static int unit = 0;
struct Scsi_Host *host;
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index a23a8e5794f5..bdd82e497d5f 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -801,7 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
/* additional setup required for Fastlane */
if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
/* map full address space up to ESP base for DMA */
- zep->board_base = ioremap_nocache(board,
+ zep->board_base = ioremap(board,
FASTLANE_ESP_ADDR-1);
if (!zep->board_base) {
pr_err("Cannot allocate board address space\n");
@@ -816,7 +816,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
esp->ops = zdd->esp_ops;
if (ioaddr > 0xffffff)
- esp->regs = ioremap_nocache(ioaddr, 0x20);
+ esp->regs = ioremap(ioaddr, 0x20);
else
/* ZorroII address space remapped nocache by early startup */
esp->regs = ZTWO_VADDR(ioaddr);
@@ -842,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
* Only Fastlane Z3 for now - add switch for correct struct
* dma_registers size if adding any more
*/
- esp->dma_regs = ioremap_nocache(dmaaddr,
+ esp->dma_regs = ioremap(dmaaddr,
sizeof(struct fastlane_dma_registers));
} else
/* ZorroII address space remapped nocache by early startup */
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 9475353f49d6..d996782a7106 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -368,7 +368,7 @@ static int clk_establish_mapping(struct clk *clk)
if (!mapping->base && mapping->phys) {
kref_init(&mapping->ref);
- mapping->base = ioremap_nocache(mapping->phys, mapping->len);
+ mapping->base = ioremap(mapping->phys, mapping->len);
if (unlikely(!mapping->base))
return -ENXIO;
} else if (mapping->base) {
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 8485e812d9b2..f8e070d67fa3 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -213,7 +213,7 @@ int __init register_intc_controller(struct intc_desc *desc)
WARN_ON(resource_type(res) != IORESOURCE_MEM);
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
- d->window[k].virt = ioremap_nocache(res->start,
+ d->window[k].virt = ioremap(res->start,
resource_size(res));
if (!d->window[k].virt)
goto err2;
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index 87d69e7471f9..f9f043a3d90a 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -73,7 +73,7 @@ int register_intc_userimask(unsigned long addr)
if (unlikely(uimask))
return -EBUSY;
- uimask = ioremap_nocache(addr, SZ_4K);
+ uimask = ioremap(addr, SZ_4K);
if (unlikely(!uimask))
return -ENOMEM;
diff --git a/drivers/siox/siox.h b/drivers/siox/siox.h
index c674bf6fb119..f08b43b713c5 100644
--- a/drivers/siox/siox.h
+++ b/drivers/siox/siox.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
*/
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index a444badd8df5..4aad2566f52d 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -641,6 +641,8 @@ static int qcom_slim_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
slim_unregister_controller(&ctrl->ctrl);
+ clk_disable_unprepare(ctrl->rclk);
+ clk_disable_unprepare(ctrl->hclk);
destroy_workqueue(ctrl->rxwq);
return 0;
}
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 29fbab55c3b3..e3f5ebc0c05e 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -666,10 +666,12 @@ static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
struct device *dev = ctrl->dev;
int ret, size;
- ctrl->dma_rx_channel = dma_request_slave_channel(dev, "rx");
- if (!ctrl->dma_rx_channel) {
- dev_err(dev, "Failed to request dma channels");
- return -EINVAL;
+ ctrl->dma_rx_channel = dma_request_chan(dev, "rx");
+ if (IS_ERR(ctrl->dma_rx_channel)) {
+ dev_err(dev, "Failed to request RX dma channel");
+ ret = PTR_ERR(ctrl->dma_rx_channel);
+ ctrl->dma_rx_channel = NULL;
+ return ret;
}
size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN;
@@ -703,10 +705,12 @@ static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
int ret = 0;
int size;
- ctrl->dma_tx_channel = dma_request_slave_channel(dev, "tx");
- if (!ctrl->dma_tx_channel) {
- dev_err(dev, "Failed to request dma channels");
- return -EINVAL;
+ ctrl->dma_tx_channel = dma_request_chan(dev, "tx");
+ if (IS_ERR(ctrl->dma_tx_channel)) {
+ dev_err(dev, "Failed to request TX dma channel");
+ ret = PTR_ERR(ctrl->dma_tx_channel);
+ ctrl->dma_tx_channel = NULL;
+ return ret;
}
size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN);
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index b2f013bfe42e..c73035915f1d 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2011-2017, The Linux Foundation
*/
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 5823f5b67d16..3f0261d53ad9 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
struct meson_ee_pwrc *pwrc,
struct meson_ee_pwrc_domain *dom)
{
+ int ret;
+
dom->pwrc = pwrc;
dom->num_rstc = dom->desc.reset_names_count;
dom->num_clks = dom->desc.clk_names_count;
@@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
* prepare/enable counters won't be in sync.
*/
if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) {
- int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
+ ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
if (ret)
return ret;
- pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false);
- } else
- pm_genpd_init(&dom->base, NULL,
- (dom->desc.get_power ?
- dom->desc.get_power(dom) : true));
+ ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov,
+ false);
+ if (ret)
+ return ret;
+ } else {
+ ret = pm_genpd_init(&dom->base, NULL,
+ (dom->desc.get_power ?
+ dom->desc.get_power(dom) : true));
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
pwrc->xlate.domains[i] = &dom->base;
}
- of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
-
- return 0;
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
}
static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c
index cb0303a0fe60..dff1375851cf 100644
--- a/drivers/soc/lantiq/fpi-bus.c
+++ b/drivers/soc/lantiq/fpi-bus.c
@@ -28,14 +28,12 @@ static int ltq_fpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res_xbar;
struct regmap *rcu_regmap;
void __iomem *xbar_membase;
u32 rcu_ahb_endianness_reg_offset;
int ret;
- res_xbar = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xbar_membase = devm_ioremap_resource(dev, res_xbar);
+ xbar_membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xbar_membase))
return PTR_ERR(xbar_membase);
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 3c82de5f9417..9add0fd5fa6c 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -9,12 +9,54 @@
#include <linux/mailbox_controller.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
-#define CMDQ_ARG_A_WRITE_MASK 0xffff
#define CMDQ_WRITE_ENABLE_MASK BIT(0)
+#define CMDQ_POLL_ENABLE_MASK BIT(0)
#define CMDQ_EOC_IRQ_EN BIT(0)
#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
<< 32 | CMDQ_EOC_IRQ_EN)
+struct cmdq_instruction {
+ union {
+ u32 value;
+ u32 mask;
+ };
+ union {
+ u16 offset;
+ u16 event;
+ };
+ u8 subsys;
+ u8 op;
+};
+
+int cmdq_dev_get_client_reg(struct device *dev,
+ struct cmdq_client_reg *client_reg, int idx)
+{
+ struct of_phandle_args spec;
+ int err;
+
+ if (!client_reg)
+ return -ENOENT;
+
+ err = of_parse_phandle_with_fixed_args(dev->of_node,
+ "mediatek,gce-client-reg",
+ 3, idx, &spec);
+ if (err < 0) {
+ dev_err(dev,
+ "error %d can't parse gce-client-reg property (%d)",
+ err, idx);
+
+ return err;
+ }
+
+ client_reg->subsys = (u8)spec.args[0];
+ client_reg->offset = (u16)spec.args[1];
+ client_reg->size = (u16)spec.args[2];
+ of_node_put(spec.np);
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_dev_get_client_reg);
+
static void cmdq_client_timeout(struct timer_list *t)
{
struct cmdq_client *client = from_timer(client, t, timer);
@@ -110,10 +152,10 @@ void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
}
EXPORT_SYMBOL(cmdq_pkt_destroy);
-static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
- u32 arg_a, u32 arg_b)
+static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
+ struct cmdq_instruction inst)
{
- u64 *cmd_ptr;
+ struct cmdq_instruction *cmd_ptr;
if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
/*
@@ -129,8 +171,9 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
__func__, (u32)pkt->buf_size);
return -ENOMEM;
}
+
cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
- (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b;
+ *cmd_ptr = inst;
pkt->cmd_buf_size += CMDQ_INST_SIZE;
return 0;
@@ -138,24 +181,34 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
{
- u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
- (subsys << CMDQ_SUBSYS_SHIFT);
+ struct cmdq_instruction inst;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
+ inst.op = CMDQ_CODE_WRITE;
+ inst.value = value;
+ inst.offset = offset;
+ inst.subsys = subsys;
+
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_write);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
- u32 offset_mask = offset;
- int err = 0;
+ struct cmdq_instruction inst = { {0} };
+ u16 offset_mask = offset;
+ int err;
if (mask != 0xffffffff) {
- err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
offset_mask |= CMDQ_WRITE_ENABLE_MASK;
}
- err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
+ err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
return err;
}
@@ -163,43 +216,85 @@ EXPORT_SYMBOL(cmdq_pkt_write_mask);
int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
{
- u32 arg_b;
+ struct cmdq_instruction inst = { {0} };
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- /*
- * WFE arg_b
- * bit 0-11: wait value
- * bit 15: 1 - wait, 0 - no wait
- * bit 16-27: update value
- * bit 31: 1 - update, 0 - no update
- */
- arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_OPTION;
+ inst.event = event;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_wfe);
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
{
+ struct cmdq_instruction inst = { {0} };
+
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
- CMDQ_WFE_UPDATE);
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_clear_event);
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ inst.op = CMDQ_CODE_POLL;
+ inst.value = value;
+ inst.offset = offset;
+ inst.subsys = subsys;
+ err = cmdq_pkt_append_command(pkt, inst);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll);
+
+int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ offset = offset | CMDQ_POLL_ENABLE_MASK;
+ err = cmdq_pkt_poll(pkt, subsys, offset, value);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll_mask);
+
static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
+ struct cmdq_instruction inst = { {0} };
int err;
/* insert EOC and generate IRQ for each command iteration */
- err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
+ inst.op = CMDQ_CODE_EOC;
+ inst.value = CMDQ_EOC_IRQ_EN;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
/* JUMP to end */
- err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
+ inst.op = CMDQ_CODE_JUMP;
+ inst.value = CMDQ_JUMP_PASS;
+ err = cmdq_pkt_append_command(pkt, inst);
return err;
}
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
index eb96a3086d6d..5db919d96aba 100644
--- a/drivers/soc/tegra/flowctrl.c
+++ b/drivers/soc/tegra/flowctrl.c
@@ -219,7 +219,7 @@ static int __init tegra_flowctrl_init(void)
return 0;
}
- tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res));
+ tegra_flowctrl_base = ioremap(res.start, resource_size(&res));
if (!tegra_flowctrl_base)
return -ENXIO;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 4d719d4b8d5a..606abbe55bba 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -408,7 +408,7 @@ static int __init tegra_init_fuse(void)
}
}
- fuse->base = ioremap_nocache(regs.start, resource_size(&regs));
+ fuse->base = ioremap(regs.start, resource_size(&regs));
if (!fuse->base) {
pr_err("failed to map FUSE registers\n");
return -ENXIO;
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index df76778af601..a2fd6ccd48f9 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -159,11 +159,11 @@ void __init tegra_init_apbmisc(void)
}
}
- apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
+ apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
if (!apbmisc_base)
pr_err("failed to map APBMISC registers\n");
- strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
+ strapping_base = ioremap(straps.start, resource_size(&straps));
if (!strapping_base)
pr_err("failed to map strapping options registers\n");
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index ea0e11a09c12..1699dda6b393 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -2826,7 +2826,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
of_address_to_resource(np, index, &regs);
- wake = ioremap_nocache(regs.start, resource_size(&regs));
+ wake = ioremap(regs.start, resource_size(&regs));
if (!wake) {
dev_err(pmc->dev, "failed to map PMC wake registers\n");
return;
@@ -3097,7 +3097,7 @@ static int __init tegra_pmc_early_init(void)
}
}
- pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
+ pmc->base = ioremap(regs.start, resource_size(&regs));
if (!pmc->base) {
pr_err("failed to map PMC registers\n");
of_node_put(np);
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index cf545f428d03..4486e055794c 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+config TI_K3_RINGACC
+ bool "K3 Ring accelerator Sub System"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_INTA_IRQCHIP
+ help
+ Say y here to support the K3 Ring accelerator module.
+ The Ring Accelerator (RINGACC or RA) provides hardware acceleration
+ to enable straightforward passing of work between a producer
+ and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+ If unsure, say N.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 788b5cd1e180..bec827937a5f 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
new file mode 100644
index 000000000000..5fb2ee2ac978
--- /dev/null
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+ u32 resv_16[4];
+ u32 db;
+ u32 resv_4[1];
+ u32 occ;
+ u32 indx;
+ u32 hwocc;
+ u32 hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+ u32 head_data[128];
+ u32 tail_data[128];
+ u32 peek_head_data[128];
+ u32 peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+ u32 revision;
+ u32 config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+ u32 control;
+ u32 status;
+ u8 resv_512[504];
+ u32 data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
+#define K3_RINGACC_PROXY_NOT_USED (-1)
+
+enum k3_ringacc_proxy_access_mode {
+ PROXY_ACCESS_MODE_HEAD = 0,
+ PROXY_ACCESS_MODE_TAIL = 1,
+ PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+ PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
+#define K3_RINGACC_FIFO_REGS_STEP 0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT (127U)
+
+struct k3_ring_ops {
+ int (*push_tail)(struct k3_ring *ring, void *elm);
+ int (*push_head)(struct k3_ring *ring, void *elm);
+ int (*pop_tail)(struct k3_ring *ring, void *elm);
+ int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @free: Number of free elements
+ * @occ: Ring occupancy
+ * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
+ * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ */
+struct k3_ring {
+ struct k3_ring_rt_regs __iomem *rt;
+ struct k3_ring_fifo_regs __iomem *fifos;
+ struct k3_ringacc_proxy_target_regs __iomem *proxy;
+ dma_addr_t ring_mem_dma;
+ void *ring_mem_virt;
+ struct k3_ring_ops *ops;
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+ u32 flags;
+#define K3_RING_FLAG_BUSY BIT(1)
+#define K3_RING_FLAG_SHARED BIT(2)
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+ u32 ring_id;
+ struct k3_ringacc *parent;
+ u32 use_count;
+ int proxy_id;
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ */
+struct k3_ringacc {
+ struct device *dev;
+ struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+ void __iomem *proxy_target_base;
+ u32 num_rings; /* number of rings in Ringacc module */
+ unsigned long *rings_inuse;
+ struct ti_sci_resource *rm_gp_range;
+
+ bool dma_ring_reset_quirk;
+ u32 num_proxies;
+ unsigned long *proxy_inuse;
+
+ struct k3_ring *rings;
+ struct list_head list;
+ struct mutex req_lock; /* protect rings allocation */
+
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+ u32 tisci_dev_id;
+};
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+ return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+ (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+ return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+ .push_tail = k3_ringacc_ring_push_io,
+ .push_head = k3_ringacc_ring_push_head_io,
+ .pop_tail = k3_ringacc_ring_pop_tail_io,
+ .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+ .push_tail = k3_ringacc_ring_push_tail_proxy,
+ .push_head = k3_ringacc_ring_push_head_proxy,
+ .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+ .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+ struct device *dev = ring->parent->dev;
+
+ dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+ dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+ &ring->ring_mem_dma);
+ dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+ ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+
+ dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+ dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+ dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+ dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+ dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+ if (ring->ring_mem_virt)
+ print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+ 16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags)
+{
+ int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (id == K3_RINGACC_RING_ID_ANY) {
+ /* Request for any general purpose ring */
+ struct ti_sci_resource_desc *gp_rings =
+ &ringacc->rm_gp_range->desc[0];
+ unsigned long size;
+
+ size = gp_rings->start + gp_rings->num;
+ id = find_next_zero_bit(ringacc->rings_inuse, size,
+ gp_rings->start);
+ if (id == size)
+ goto error;
+ } else if (id < 0) {
+ goto error;
+ }
+
+ if (test_bit(id, ringacc->rings_inuse) &&
+ !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+ goto error;
+ else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+ goto out;
+
+ if (flags & K3_RINGACC_RING_USE_PROXY) {
+ proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies, 0);
+ if (proxy_id == ringacc->num_proxies)
+ goto error;
+ }
+
+ if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ set_bit(proxy_id, ringacc->proxy_inuse);
+ ringacc->rings[id].proxy_id = proxy_id;
+ dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+ proxy_id);
+ } else {
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+ }
+
+ set_bit(id, ringacc->rings_inuse);
+out:
+ ringacc->rings[id].use_count++;
+ mutex_unlock(&ringacc->req_lock);
+ return &ringacc->rings[id];
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ ring->size,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ ring->occ = 0;
+ ring->free = 0;
+ ring->rindex = 0;
+ ring->windex = 0;
+
+ k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+ enum k3_ring_mode mode)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ mode,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ if (!ring->parent->dma_ring_reset_quirk)
+ goto reset;
+
+ if (!occ)
+ occ = readl(&ring->rt->occ);
+
+ if (occ) {
+ u32 db_ring_cnt, db_ring_cnt_cur;
+
+ dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+ ring->ring_id, occ);
+ /* TI-SCI ring reset */
+ k3_ringacc_ring_reset_sci(ring);
+
+ /*
+ * Setup the ring in ring/doorbell mode (if not already in this
+ * mode)
+ */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(
+ ring, K3_RINGACC_RING_MODE_RING);
+ /*
+ * Ring the doorbell 2**22 – ringOcc times.
+ * This will wrap the internal UDMAP ring state occupancy
+ * counter (which is 21-bits wide) to 0.
+ */
+ db_ring_cnt = (1U << 22) - occ;
+
+ while (db_ring_cnt != 0) {
+ /*
+ * Ring the doorbell with the maximum count each
+ * iteration if possible to minimize the total
+ * of writes
+ */
+ if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+ db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+ else
+ db_ring_cnt_cur = db_ring_cnt;
+
+ writel(db_ring_cnt_cur, &ring->rt->db);
+ db_ring_cnt -= db_ring_cnt_cur;
+ }
+
+ /* Restore the original ring mode (if not ring mode) */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+ }
+
+reset:
+ /* Reset the ring */
+ k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc;
+
+ if (!ring)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (--ring->use_count)
+ goto out;
+
+ if (!(ring->flags & K3_RING_FLAG_BUSY))
+ goto no_init;
+
+ k3_ringacc_ring_free_sci(ring);
+
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt, ring->ring_mem_dma);
+ ring->flags = 0;
+ ring->ops = NULL;
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+ ring->proxy = NULL;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+no_init:
+ clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+out:
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+ int irq_num;
+
+ if (!ring)
+ return -EINVAL;
+
+ irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+ if (irq_num <= 0)
+ irq_num = -EINVAL;
+ return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ u32 ring_idx;
+ int ret;
+
+ if (!ringacc->tisci)
+ return -EINVAL;
+
+ ring_idx = ring->ring_id;
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring_idx,
+ lower_32_bits(ring->ring_mem_dma),
+ upper_32_bits(ring->ring_mem_dma),
+ ring->size,
+ ring->mode,
+ ring->elm_size,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+ ret, ring_idx);
+
+ return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret = 0;
+
+ if (!ring || !cfg)
+ return -EINVAL;
+ if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+ cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+ cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+ !test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+ ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+ cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+ dev_err(ringacc->dev,
+ "Message mode must use proxy for %u element size\n",
+ 4 << ring->elm_size);
+ return -EINVAL;
+ }
+
+ /*
+ * In case of shared ring only the first user (master user) can
+ * configure the ring. The sequence should be by the client:
+ * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+ * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ */
+ if (ring->use_count != 1)
+ return 0;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->occ = 0;
+ ring->free = 0;
+ ring->rindex = 0;
+ ring->windex = 0;
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+ ring->proxy = ringacc->proxy_target_base +
+ ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+ switch (ring->mode) {
+ case K3_RINGACC_RING_MODE_RING:
+ ring->ops = &k3_ring_mode_ring_ops;
+ break;
+ case K3_RINGACC_RING_MODE_MESSAGE:
+ if (ring->proxy)
+ ring->ops = &k3_ring_mode_proxy_ops;
+ else
+ ring->ops = &k3_ring_mode_msg_ops;
+ break;
+ default:
+ ring->ops = NULL;
+ ret = -EINVAL;
+ goto err_free_proxy;
+ };
+
+ ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+ ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+ K3_RING_FLAG_SHARED : 0;
+
+ k3_ringacc_ring_dump(ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+err_free_proxy:
+ ring->proxy = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->free)
+ ring->free = ring->size - readl(&ring->rt->occ);
+
+ return ring->free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return readl(&ring->rt->occ);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+ return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+ K3_RINGACC_ACCESS_MODE_POP_TAIL,
+ K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+ K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+ enum k3_ringacc_proxy_access_mode mode)
+{
+ u32 val;
+
+ val = ring->ring_id;
+ val |= K3_RINGACC_PROXY_MODE(mode);
+ val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+ writel(val, &ring->proxy->control);
+ return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)&ring->proxy->data;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
+ ring->occ);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ ptr = (void __iomem *)&ring->fifos->head_data;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ ptr = (void __iomem *)&ring->fifos->tail_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
+ ring->windex, ring->occ, ring->rindex);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
+
+ memcpy(elem_ptr, elem, (4 << ring->elm_size));
+
+ ring->windex = (ring->windex + 1) % ring->size;
+ ring->free--;
+ writel(1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+ ring->free, ring->windex);
+
+ return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
+
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+ ring->rindex = (ring->rindex + 1) % ring->size;
+ ring->occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+ ring->occ, ring->rindex, elem_ptr);
+ return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
+ ring->windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_tail)
+ ret = ring->ops->push_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+ ring->free, ring->windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_head)
+ ret = ring->ops->push_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->occ)
+ ring->occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
+ ring->rindex);
+
+ if (!ring->occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_head)
+ ret = ring->ops->pop_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->occ)
+ ring->occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
+ ring->rindex);
+
+ if (!ring->occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_tail)
+ ret = ring->ops->pop_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device_node *ringacc_np;
+ struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+ struct k3_ringacc *entry;
+
+ ringacc_np = of_parse_phandle(np, property, 0);
+ if (!ringacc_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_for_each_entry(entry, &k3_ringacc_list, list)
+ if (entry->dev->of_node == ringacc_np) {
+ ringacc = entry;
+ break;
+ }
+ mutex_unlock(&k3_ringacc_list_lock);
+ of_node_put(ringacc_np);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+ struct device_node *node = ringacc->dev->of_node;
+ struct device *dev = ringacc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+ if (ret) {
+ dev_err(dev, "ti,num-rings read failure %d\n", ret);
+ return ret;
+ }
+
+ ringacc->dma_ring_reset_quirk =
+ of_property_read_bool(node, "ti,dma-ring-reset-quirk");
+
+ ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ if (IS_ERR(ringacc->tisci)) {
+ ret = PTR_ERR(ringacc->tisci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ ringacc->tisci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,sci-dev-id",
+ &ringacc->tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+ return ret;
+ }
+
+ pdev->id = ringacc->tisci_dev_id;
+
+ ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+ ringacc->tisci_dev_id,
+ "ti,sci-rm-range-gp-rings");
+ if (IS_ERR(ringacc->rm_gp_range)) {
+ dev_err(dev, "Failed to allocate MSI interrupts\n");
+ return PTR_ERR(ringacc->rm_gp_range);
+ }
+
+ return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+ ringacc->rm_gp_range);
+}
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ struct k3_ringacc *ringacc;
+ void __iomem *base_fifo, *base_rt;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = k3_ringacc_probe_dt(ringacc);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return PTR_ERR(base_rt);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
+ base_fifo = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_fifo))
+ return PTR_ERR(base_fifo);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
+ ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_gcfg))
+ return PTR_ERR(ringacc->proxy_gcfg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "proxy_target");
+ ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_target_base))
+ return PTR_ERR(ringacc->proxy_target_base);
+
+ ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+ K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+ ringacc->proxy_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_proxies),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+ return -ENOMEM;
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ ringacc->rings[i].rt = base_rt +
+ K3_RINGACC_RT_REGS_STEP * i;
+ ringacc->rings[i].fifos = base_fifo +
+ K3_RINGACC_FIFO_REGS_STEP * i;
+ ringacc->rings[i].parent = ringacc;
+ ringacc->rings[i].ring_id = i;
+ ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+ dev_set_drvdata(dev, ringacc);
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+ ringacc->num_rings,
+ ringacc->rm_gp_range->desc[0].start,
+ ringacc->rm_gp_range->desc[0].num,
+ ringacc->tisci_dev_id);
+ dev_info(dev, "dma-ring-reset-quirk: %s\n",
+ ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+ dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+ readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+ { .compatible = "ti,am654-navss-ringacc", },
+ {},
+};
+
+static struct platform_driver k3_ringacc_driver = {
+ .probe = k3_ringacc_probe,
+ .driver = {
+ .name = "k3-ringacc",
+ .of_match_table = k3_ringacc_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(k3_ringacc_driver);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 378369d9364a..e9ece45d7a33 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
ret = rproc_boot(m3_ipc->rproc);
if (ret)
dev_err(dev, "rproc_boot failed\n");
+ else
+ m3_ipc_state = m3_ipc;
do_exit(0);
}
@@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
- m3_ipc_state = m3_ipc;
-
return 0;
err_put_rproc:
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a840c0272135..a3aa40996f13 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -511,7 +511,7 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENODEV;
}
- xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xvcu->vcu_slcr_ba) {
dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
@@ -524,7 +524,7 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENODEV;
}
- xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xvcu->logicore_reg_ba) {
dev_err(&pdev->dev, "logicore register mapping failed.\n");
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index c725d0a8b288..fa2b4ab92ed9 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -31,4 +31,13 @@ config SOUNDWIRE_INTEL
enable this config option to get the SoundWire support for that
device.
+config SOUNDWIRE_QCOM
+ tristate "Qualcomm SoundWire Master driver"
+ depends on SLIMBUS
+ depends on SND_SOC
+ help
+ SoundWire Qualcomm Master driver.
+ If you have an Qualcomm platform which has a SoundWire Master then
+ enable this config option to get the SoundWire support for that
+ device
endif
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index 563894e5ecaf..e2cdff990e9f 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -21,3 +21,7 @@ obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
soundwire-intel-init-objs := intel_init.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel-init.o
+
+#Qualcomm driver
+soundwire-qcom-objs := qcom.o
+obj-$(CONFIG_SOUNDWIRE_QCOM) += soundwire-qcom.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index be5d437058ed..6106577fb3ed 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -456,26 +456,35 @@ err:
static int sdw_assign_device_num(struct sdw_slave *slave)
{
int ret, dev_num;
+ bool new_device = false;
/* check first if device number is assigned, if so reuse that */
if (!slave->dev_num) {
- mutex_lock(&slave->bus->bus_lock);
- dev_num = sdw_get_device_num(slave);
- mutex_unlock(&slave->bus->bus_lock);
- if (dev_num < 0) {
- dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
- dev_num);
- return dev_num;
+ if (!slave->dev_num_sticky) {
+ mutex_lock(&slave->bus->bus_lock);
+ dev_num = sdw_get_device_num(slave);
+ mutex_unlock(&slave->bus->bus_lock);
+ if (dev_num < 0) {
+ dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
+ dev_num);
+ return dev_num;
+ }
+ slave->dev_num = dev_num;
+ slave->dev_num_sticky = dev_num;
+ new_device = true;
+ } else {
+ slave->dev_num = slave->dev_num_sticky;
}
- } else {
+ }
+
+ if (!new_device)
dev_info(slave->bus->dev,
- "Slave already registered dev_num:%d\n",
+ "Slave already registered, reusing dev_num:%d\n",
slave->dev_num);
- /* Clear the slave->dev_num to transfer message on device 0 */
- dev_num = slave->dev_num;
- slave->dev_num = 0;
- }
+ /* Clear the slave->dev_num to transfer message on device 0 */
+ dev_num = slave->dev_num;
+ slave->dev_num = 0;
ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
if (ret < 0) {
@@ -485,7 +494,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
}
/* After xfer of msg, restore dev_num */
- slave->dev_num = dev_num;
+ slave->dev_num = slave->dev_num_sticky;
return 0;
}
@@ -979,6 +988,24 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
struct sdw_slave *slave;
int i, ret = 0;
+ /* first check if any Slaves fell off the bus */
+ for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+ mutex_lock(&bus->bus_lock);
+ if (test_bit(i, bus->assigned) == false) {
+ mutex_unlock(&bus->bus_lock);
+ continue;
+ }
+ mutex_unlock(&bus->bus_lock);
+
+ slave = sdw_get_slave(bus, i);
+ if (!slave)
+ continue;
+
+ if (status[i] == SDW_SLAVE_UNATTACHED &&
+ slave->status != SDW_SLAVE_UNATTACHED)
+ sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+ }
+
if (status[0] == SDW_SLAVE_ATTACHED) {
dev_dbg(bus->dev, "Slave attached, programming device number\n");
ret = sdw_program_device_num(bus);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index fed21e2b2277..9bec270d0fa4 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -74,6 +74,7 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_INTMASK 0x48
#define CDNS_MCP_INT_IRQ BIT(31)
+#define CDNS_MCP_INT_RESERVED1 GENMASK(30, 17)
#define CDNS_MCP_INT_WAKEUP BIT(16)
#define CDNS_MCP_INT_SLAVE_RSVD BIT(15)
#define CDNS_MCP_INT_SLAVE_ALERT BIT(14)
@@ -85,10 +86,12 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_INT_DATA_CLASH BIT(9)
#define CDNS_MCP_INT_PARITY BIT(8)
#define CDNS_MCP_INT_CMD_ERR BIT(7)
+#define CDNS_MCP_INT_RESERVED2 GENMASK(6, 4)
#define CDNS_MCP_INT_RX_NE BIT(3)
#define CDNS_MCP_INT_RX_WL BIT(2)
#define CDNS_MCP_INT_TXE BIT(1)
#define CDNS_MCP_INT_TXF BIT(0)
+#define CDNS_MCP_INT_RESERVED (CDNS_MCP_INT_RESERVED1 | CDNS_MCP_INT_RESERVED2)
#define CDNS_MCP_INTSET 0x4C
@@ -444,7 +447,8 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
time = wait_for_completion_timeout(&cdns->tx_complete,
msecs_to_jiffies(CDNS_TX_TIMEOUT));
if (!time) {
- dev_err(cdns->dev, "IO transfer timed out\n");
+ dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
+ cmd, msg->dev_num, msg->addr, msg->len);
msg->len = 0;
return SDW_CMD_TIMEOUT;
}
@@ -672,13 +676,36 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
/* first check if Slave reported multiple status */
if (set_status > 1) {
+ u32 val;
+
+ dev_warn_ratelimited(cdns->dev,
+ "Slave %d reported multiple Status: %d\n",
+ i, mask);
+
+ /* check latest status extracted from PING commands */
+ val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+ val >>= (i * 2);
+
+ switch (val & 0x3) {
+ case 0:
+ status[i] = SDW_SLAVE_UNATTACHED;
+ break;
+ case 1:
+ status[i] = SDW_SLAVE_ATTACHED;
+ break;
+ case 2:
+ status[i] = SDW_SLAVE_ALERT;
+ break;
+ case 3:
+ default:
+ status[i] = SDW_SLAVE_RESERVED;
+ break;
+ }
+
dev_warn_ratelimited(cdns->dev,
- "Slave reported multiple Status: %d\n",
- mask);
- /*
- * TODO: we need to reread the status here by
- * issuing a PING cmd
- */
+ "Slave %d status updated to %d\n",
+ i, status[i]);
+
}
}
@@ -705,6 +732,10 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
int_status = cdns_readl(cdns, CDNS_MCP_INTSTAT);
+ /* check for reserved values read as zero */
+ if (int_status & CDNS_MCP_INT_RESERVED)
+ return IRQ_NONE;
+
if (!(int_status & CDNS_MCP_INT_IRQ))
return IRQ_NONE;
@@ -812,8 +843,9 @@ int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
EXPORT_SYMBOL(sdw_cdns_exit_reset);
/**
- * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts
* @cdns: Cadence instance
+ * @state: True if we are trying to enable interrupt.
*/
int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
{
@@ -849,12 +881,21 @@ int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
mask = interrupt_mask;
update_masks:
+ /* clear slave interrupt status before enabling interrupt */
+ if (state) {
+ u32 slave_state;
+
+ slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave_state);
+ slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave_state);
+ }
+
cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
- /* commit changes */
- return cdns_update_config(cdns);
+ return 0;
}
EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
@@ -948,8 +989,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
ret = cdns_allocate_pdi(cdns, &stream->out,
stream->num_out, offset);
- offset += stream->num_out;
-
if (ret)
return ret;
@@ -1224,8 +1263,10 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
* cdns_find_pdi() - Find a free PDI
*
* @cdns: Cadence instance
+ * @offset: Starting offset
* @num: Number of PDIs
* @pdi: PDI instances
+ * @dai_id: DAI id
*
* Find a PDI for a given PDI array. The PDI num and dai_id are
* expected to match, return NULL otherwise.
@@ -1277,6 +1318,7 @@ EXPORT_SYMBOL(sdw_cdns_config_stream);
* @stream: Stream to be allocated
* @ch: Channel count
* @dir: Data direction
+ * @dai_id: DAI id
*/
struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 99dc61021211..06ef3a3ac080 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -529,17 +529,24 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
}
-static int intel_config_stream(struct sdw_intel *sdw,
+static int intel_params_stream(struct sdw_intel *sdw,
struct snd_pcm_substream *substream,
struct snd_soc_dai *dai,
- struct snd_pcm_hw_params *hw_params, int link_id)
+ struct snd_pcm_hw_params *hw_params,
+ int link_id, int alh_stream_id)
{
struct sdw_intel_link_res *res = sdw->res;
+ struct sdw_intel_stream_params_data params_data;
- if (res->ops && res->ops->config_stream && res->arg)
- return res->ops->config_stream(res->arg,
- substream, dai, hw_params, link_id);
+ params_data.substream = substream;
+ params_data.dai = dai;
+ params_data.hw_params = hw_params;
+ params_data.link_id = link_id;
+ params_data.alh_stream_id = alh_stream_id;
+ if (res->ops && res->ops->params_stream && res->dev)
+ return res->ops->params_stream(res->dev,
+ &params_data);
return -EIO;
}
@@ -654,7 +661,8 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
/* Inform DSP about PDI stream number */
- ret = intel_config_stream(sdw, substream, dai, params,
+ ret = intel_params_stream(sdw, substream, dai, params,
+ sdw->instance,
pdi->intel_alh_id);
if (ret)
goto error;
@@ -872,6 +880,9 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
"intel-sdw-ip-clock",
&prop->mclk_freq);
+ /* the values reported by BIOS are the 2x clock, not the bus clock */
+ prop->mclk_freq /= 2;
+
fwnode_property_read_u32(link,
"intel-quirk-mask",
&quirk_mask);
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index d923b6262330..38b7c125fb10 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -5,23 +5,26 @@
#define __SDW_INTEL_LOCAL_H
/**
- * struct sdw_intel_link_res - Soundwire link resources
+ * struct sdw_intel_link_res - Soundwire Intel link resource structure,
+ * typically populated by the controller driver.
+ * @pdev: platform_device
+ * @mmio_base: mmio base of SoundWire registers
* @registers: Link IO registers base
* @shim: Audio shim pointer
* @alh: ALH (Audio Link Hub) pointer
* @irq: Interrupt line
* @ops: Shim callback ops
- * @arg: Shim callback ops argument
- *
- * This is set as pdata for each link instance.
+ * @dev: device implementing hw_params and free callbacks
*/
struct sdw_intel_link_res {
+ struct platform_device *pdev;
+ void __iomem *mmio_base; /* not strictly needed, useful for debug */
void __iomem *registers;
void __iomem *shim;
void __iomem *alh;
int irq;
const struct sdw_intel_ops *ops;
- void *arg;
+ struct device *dev;
};
#endif /* __SDW_INTEL_LOCAL_H */
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 2a2b4d8df462..4b769409f6f8 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -27,19 +27,9 @@ static int link_mask;
module_param_named(sdw_link_mask, link_mask, int, 0444);
MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
-struct sdw_link_data {
- struct sdw_intel_link_res res;
- struct platform_device *pdev;
-};
-
-struct sdw_intel_ctx {
- int count;
- struct sdw_link_data *links;
-};
-
static int sdw_intel_cleanup_pdev(struct sdw_intel_ctx *ctx)
{
- struct sdw_link_data *link = ctx->links;
+ struct sdw_intel_link_res *link = ctx->links;
int i;
if (!link)
@@ -62,7 +52,7 @@ static struct sdw_intel_ctx
{
struct platform_device_info pdevinfo;
struct platform_device *pdev;
- struct sdw_link_data *link;
+ struct sdw_intel_link_res *link;
struct sdw_intel_ctx *ctx;
struct acpi_device *adev;
int ret, i;
@@ -123,14 +113,13 @@ static struct sdw_intel_ctx
continue;
}
- link->res.irq = res->irq;
- link->res.registers = res->mmio_base + SDW_LINK_BASE
+ link->registers = res->mmio_base + SDW_LINK_BASE
+ (SDW_LINK_SIZE * i);
- link->res.shim = res->mmio_base + SDW_SHIM_BASE;
- link->res.alh = res->mmio_base + SDW_ALH_BASE;
+ link->shim = res->mmio_base + SDW_SHIM_BASE;
+ link->alh = res->mmio_base + SDW_ALH_BASE;
- link->res.ops = res->ops;
- link->res.arg = res->arg;
+ link->ops = res->ops;
+ link->dev = res->dev;
memset(&pdevinfo, 0, sizeof(pdevinfo));
@@ -138,8 +127,6 @@ static struct sdw_intel_ctx
pdevinfo.name = "int-sdw";
pdevinfo.id = i;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
- pdevinfo.data = &link->res;
- pdevinfo.size_data = sizeof(link->res);
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
@@ -216,7 +203,6 @@ void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
return sdw_intel_add_controller(res);
}
-EXPORT_SYMBOL(sdw_intel_init);
/**
* sdw_intel_exit() - SoundWire Intel exit
@@ -224,10 +210,8 @@ EXPORT_SYMBOL(sdw_intel_init);
*
* Delete the controller instances created and cleanup
*/
-void sdw_intel_exit(void *arg)
+void sdw_intel_exit(struct sdw_intel_ctx *ctx)
{
- struct sdw_intel_ctx *ctx = arg;
-
sdw_intel_cleanup_pdev(ctx);
kfree(ctx);
}
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
new file mode 100644
index 000000000000..1c6c6a2e0def
--- /dev/null
+++ b/drivers/soundwire/qcom.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/slimbus.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "bus.h"
+
+#define SWRM_COMP_HW_VERSION 0x00
+#define SWRM_COMP_CFG_ADDR 0x04
+#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1)
+#define SWRM_COMP_CFG_ENABLE_MSK BIT(0)
+#define SWRM_COMP_PARAMS 0x100
+#define SWRM_COMP_PARAMS_DOUT_PORTS_MASK GENMASK(4, 0)
+#define SWRM_COMP_PARAMS_DIN_PORTS_MASK GENMASK(9, 5)
+#define SWRM_INTERRUPT_STATUS 0x200
+#define SWRM_INTERRUPT_STATUS_RMSK GENMASK(16, 0)
+#define SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED BIT(1)
+#define SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS BIT(2)
+#define SWRM_INTERRUPT_STATUS_CMD_ERROR BIT(7)
+#define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED BIT(10)
+#define SWRM_INTERRUPT_MASK_ADDR 0x204
+#define SWRM_INTERRUPT_CLEAR 0x208
+#define SWRM_CMD_FIFO_WR_CMD 0x300
+#define SWRM_CMD_FIFO_RD_CMD 0x304
+#define SWRM_CMD_FIFO_CMD 0x308
+#define SWRM_CMD_FIFO_STATUS 0x30C
+#define SWRM_CMD_FIFO_CFG_ADDR 0x314
+#define SWRM_RD_WR_CMD_RETRIES 0x7
+#define SWRM_CMD_FIFO_RD_FIFO_ADDR 0x318
+#define SWRM_ENUMERATOR_CFG_ADDR 0x500
+#define SWRM_MCP_FRAME_CTRL_BANK_ADDR(m) (0x101C + 0x40 * (m))
+#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT 3
+#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK GENMASK(2, 0)
+#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK GENMASK(7, 3)
+#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT 0
+#define SWRM_MCP_CFG_ADDR 0x1048
+#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK GENMASK(21, 17)
+#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT 0x11
+#define SWRM_DEF_CMD_NO_PINGS 0x1f
+#define SWRM_MCP_STATUS 0x104C
+#define SWRM_MCP_STATUS_BANK_NUM_MASK BIT(0)
+#define SWRM_MCP_SLV_STATUS 0x1090
+#define SWRM_MCP_SLV_STATUS_MASK GENMASK(1, 0)
+#define SWRM_DP_PORT_CTRL_BANK(n, m) (0x1124 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT 0x18
+#define SWRM_DP_PORT_CTRL_OFFSET2_SHFT 0x10
+#define SWRM_DP_PORT_CTRL_OFFSET1_SHFT 0x08
+#define SWRM_AHB_BRIDGE_WR_DATA_0 0xc85
+#define SWRM_AHB_BRIDGE_WR_ADDR_0 0xc89
+#define SWRM_AHB_BRIDGE_RD_ADDR_0 0xc8d
+#define SWRM_AHB_BRIDGE_RD_DATA_0 0xc91
+
+#define SWRM_REG_VAL_PACK(data, dev, id, reg) \
+ ((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
+
+#define SWRM_MAX_ROW_VAL 0 /* Rows = 48 */
+#define SWRM_DEFAULT_ROWS 48
+#define SWRM_MIN_COL_VAL 0 /* Cols = 2 */
+#define SWRM_DEFAULT_COL 16
+#define SWRM_MAX_COL_VAL 7
+#define SWRM_SPECIAL_CMD_ID 0xF
+#define MAX_FREQ_NUM 1
+#define TIMEOUT_MS (2 * HZ)
+#define QCOM_SWRM_MAX_RD_LEN 0xf
+#define QCOM_SDW_MAX_PORTS 14
+#define DEFAULT_CLK_FREQ 9600000
+#define SWRM_MAX_DAIS 0xF
+
+struct qcom_swrm_port_config {
+ u8 si;
+ u8 off1;
+ u8 off2;
+};
+
+struct qcom_swrm_ctrl {
+ struct sdw_bus bus;
+ struct device *dev;
+ struct regmap *regmap;
+ struct completion *comp;
+ struct work_struct slave_work;
+ /* read/write lock */
+ spinlock_t comp_lock;
+ /* Port alloc/free lock */
+ struct mutex port_lock;
+ struct clk *hclk;
+ u8 wr_cmd_id;
+ u8 rd_cmd_id;
+ int irq;
+ unsigned int version;
+ int num_din_ports;
+ int num_dout_ports;
+ unsigned long dout_port_mask;
+ unsigned long din_port_mask;
+ struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
+ struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
+ enum sdw_slave_status status[SDW_MAX_DEVICES];
+ int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
+ int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
+};
+
+#define to_qcom_sdw(b) container_of(b, struct qcom_swrm_ctrl, bus)
+
+static int qcom_swrm_abh_reg_read(struct qcom_swrm_ctrl *ctrl, int reg,
+ u32 *val)
+{
+ struct regmap *wcd_regmap = ctrl->regmap;
+ int ret;
+
+ /* pg register + offset */
+ ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_RD_ADDR_0,
+ (u8 *)&reg, 4);
+ if (ret < 0)
+ return SDW_CMD_FAIL;
+
+ ret = regmap_bulk_read(wcd_regmap, SWRM_AHB_BRIDGE_RD_DATA_0,
+ val, 4);
+ if (ret < 0)
+ return SDW_CMD_FAIL;
+
+ return SDW_CMD_OK;
+}
+
+static int qcom_swrm_ahb_reg_write(struct qcom_swrm_ctrl *ctrl,
+ int reg, int val)
+{
+ struct regmap *wcd_regmap = ctrl->regmap;
+ int ret;
+ /* pg register + offset */
+ ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_WR_DATA_0,
+ (u8 *)&val, 4);
+ if (ret)
+ return SDW_CMD_FAIL;
+
+ /* write address register */
+ ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_WR_ADDR_0,
+ (u8 *)&reg, 4);
+ if (ret)
+ return SDW_CMD_FAIL;
+
+ return SDW_CMD_OK;
+}
+
+static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *ctrl, u8 cmd_data,
+ u8 dev_addr, u16 reg_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(comp);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ ctrl->comp = &comp;
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+ val = SWRM_REG_VAL_PACK(cmd_data, dev_addr,
+ SWRM_SPECIAL_CMD_ID, reg_addr);
+ ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_WR_CMD, val);
+ if (ret)
+ goto err;
+
+ ret = wait_for_completion_timeout(ctrl->comp,
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret)
+ ret = SDW_CMD_IGNORED;
+ else
+ ret = SDW_CMD_OK;
+err:
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ ctrl->comp = NULL;
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+ return ret;
+}
+
+static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *ctrl,
+ u8 dev_addr, u16 reg_addr,
+ u32 len, u8 *rval)
+{
+ int i, ret;
+ u32 val;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ ctrl->comp = &comp;
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+ val = SWRM_REG_VAL_PACK(len, dev_addr, SWRM_SPECIAL_CMD_ID, reg_addr);
+ ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_RD_CMD, val);
+ if (ret)
+ goto err;
+
+ ret = wait_for_completion_timeout(ctrl->comp,
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ ret = SDW_CMD_IGNORED;
+ goto err;
+ } else {
+ ret = SDW_CMD_OK;
+ }
+
+ for (i = 0; i < len; i++) {
+ ctrl->reg_read(ctrl, SWRM_CMD_FIFO_RD_FIFO_ADDR, &val);
+ rval[i] = val & 0xFF;
+ }
+
+err:
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ ctrl->comp = NULL;
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+ return ret;
+}
+
+static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
+{
+ u32 val;
+ int i;
+
+ ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
+
+ for (i = 0; i < SDW_MAX_DEVICES; i++) {
+ u32 s;
+
+ s = (val >> (i * 2));
+ s &= SWRM_MCP_SLV_STATUS_MASK;
+ ctrl->status[i] = s;
+ }
+}
+
+static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_id;
+ u32 sts, value;
+ unsigned long flags;
+
+ ctrl->reg_read(ctrl, SWRM_INTERRUPT_STATUS, &sts);
+
+ if (sts & SWRM_INTERRUPT_STATUS_CMD_ERROR) {
+ ctrl->reg_read(ctrl, SWRM_CMD_FIFO_STATUS, &value);
+ dev_err_ratelimited(ctrl->dev,
+ "CMD error, fifo status 0x%x\n",
+ value);
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
+ }
+
+ if ((sts & SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED) ||
+ sts & SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS)
+ schedule_work(&ctrl->slave_work);
+
+ /**
+ * clear the interrupt before complete() is called, as complete can
+ * schedule new read/writes which require interrupts, clearing the
+ * interrupt would avoid missing interrupts in such cases.
+ */
+ ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR, sts);
+
+ if (sts & SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED) {
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ if (ctrl->comp)
+ complete(ctrl->comp);
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
+{
+ u32 val;
+
+ /* Clear Rows and Cols */
+ val = (SWRM_MAX_ROW_VAL << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT |
+ SWRM_MIN_COL_VAL << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT);
+
+ ctrl->reg_write(ctrl, SWRM_MCP_FRAME_CTRL_BANK_ADDR(0), val);
+
+ /* Disable Auto enumeration */
+ ctrl->reg_write(ctrl, SWRM_ENUMERATOR_CFG_ADDR, 0);
+
+ /* Mask soundwire interrupts */
+ ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR,
+ SWRM_INTERRUPT_STATUS_RMSK);
+
+ /* Configure No pings */
+ ctrl->reg_read(ctrl, SWRM_MCP_CFG_ADDR, &val);
+ val &= ~SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK;
+ val |= (SWRM_DEF_CMD_NO_PINGS <<
+ SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT);
+ ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
+
+ /* Configure number of retries of a read/write cmd */
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR, SWRM_RD_WR_CMD_RETRIES);
+
+ /* Set IRQ to PULSE */
+ ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR,
+ SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK |
+ SWRM_COMP_CFG_ENABLE_MSK);
+ return 0;
+}
+
+static enum sdw_command_response qcom_swrm_xfer_msg(struct sdw_bus *bus,
+ struct sdw_msg *msg)
+{
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ int ret, i, len;
+
+ if (msg->flags == SDW_MSG_FLAG_READ) {
+ for (i = 0; i < msg->len;) {
+ if ((msg->len - i) < QCOM_SWRM_MAX_RD_LEN)
+ len = msg->len - i;
+ else
+ len = QCOM_SWRM_MAX_RD_LEN;
+
+ ret = qcom_swrm_cmd_fifo_rd_cmd(ctrl, msg->dev_num,
+ msg->addr + i, len,
+ &msg->buf[i]);
+ if (ret)
+ return ret;
+
+ i = i + len;
+ }
+ } else if (msg->flags == SDW_MSG_FLAG_WRITE) {
+ for (i = 0; i < msg->len; i++) {
+ ret = qcom_swrm_cmd_fifo_wr_cmd(ctrl, msg->buf[i],
+ msg->dev_num,
+ msg->addr + i);
+ if (ret)
+ return SDW_CMD_IGNORED;
+ }
+ }
+
+ return SDW_CMD_OK;
+}
+
+static int qcom_swrm_pre_bank_switch(struct sdw_bus *bus)
+{
+ u32 reg = SWRM_MCP_FRAME_CTRL_BANK_ADDR(bus->params.next_bank);
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ u32 val;
+
+ ctrl->reg_read(ctrl, reg, &val);
+
+ val &= ~SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK;
+ val &= ~SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK;
+
+ val |= (SWRM_MAX_ROW_VAL << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT |
+ SWRM_MAX_COL_VAL << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT);
+
+ return ctrl->reg_write(ctrl, reg, val);
+}
+
+static int qcom_swrm_port_params(struct sdw_bus *bus,
+ struct sdw_port_params *p_params,
+ unsigned int bank)
+{
+ /* TBD */
+ return 0;
+}
+
+static int qcom_swrm_transport_params(struct sdw_bus *bus,
+ struct sdw_transport_params *params,
+ enum sdw_reg_bank bank)
+{
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ u32 value;
+
+ value = params->offset1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
+ value |= params->offset2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
+ value |= params->sample_interval - 1;
+
+ return ctrl->reg_write(ctrl,
+ SWRM_DP_PORT_CTRL_BANK((params->port_num), bank),
+ value);
+}
+
+static int qcom_swrm_port_enable(struct sdw_bus *bus,
+ struct sdw_enable_ch *enable_ch,
+ unsigned int bank)
+{
+ u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank);
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ u32 val;
+
+ ctrl->reg_read(ctrl, reg, &val);
+
+ if (enable_ch->enable)
+ val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
+ else
+ val &= ~(0xff << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
+
+ return ctrl->reg_write(ctrl, reg, val);
+}
+
+static struct sdw_master_port_ops qcom_swrm_port_ops = {
+ .dpn_set_port_params = qcom_swrm_port_params,
+ .dpn_set_port_transport_params = qcom_swrm_transport_params,
+ .dpn_port_enable_ch = qcom_swrm_port_enable,
+};
+
+static struct sdw_master_ops qcom_swrm_ops = {
+ .xfer_msg = qcom_swrm_xfer_msg,
+ .pre_bank_switch = qcom_swrm_pre_bank_switch,
+};
+
+static int qcom_swrm_compute_params(struct sdw_bus *bus)
+{
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ struct sdw_master_runtime *m_rt;
+ struct sdw_slave_runtime *s_rt;
+ struct sdw_port_runtime *p_rt;
+ struct qcom_swrm_port_config *pcfg;
+ int i = 0;
+
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ pcfg = &ctrl->pconfig[p_rt->num - 1];
+ p_rt->transport_params.port_num = p_rt->num;
+ p_rt->transport_params.sample_interval = pcfg->si + 1;
+ p_rt->transport_params.offset1 = pcfg->off1;
+ p_rt->transport_params.offset2 = pcfg->off2;
+ }
+
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ pcfg = &ctrl->pconfig[i];
+ p_rt->transport_params.port_num = p_rt->num;
+ p_rt->transport_params.sample_interval =
+ pcfg->si + 1;
+ p_rt->transport_params.offset1 = pcfg->off1;
+ p_rt->transport_params.offset2 = pcfg->off2;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static u32 qcom_swrm_freq_tbl[MAX_FREQ_NUM] = {
+ DEFAULT_CLK_FREQ,
+};
+
+static void qcom_swrm_slave_wq(struct work_struct *work)
+{
+ struct qcom_swrm_ctrl *ctrl =
+ container_of(work, struct qcom_swrm_ctrl, slave_work);
+
+ qcom_swrm_get_device_status(ctrl);
+ sdw_handle_slave_status(&ctrl->bus, ctrl->status);
+}
+
+
+static void qcom_swrm_stream_free_ports(struct qcom_swrm_ctrl *ctrl,
+ struct sdw_stream_runtime *stream)
+{
+ struct sdw_master_runtime *m_rt;
+ struct sdw_port_runtime *p_rt;
+ unsigned long *port_mask;
+
+ mutex_lock(&ctrl->port_lock);
+
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ if (m_rt->direction == SDW_DATA_DIR_RX)
+ port_mask = &ctrl->dout_port_mask;
+ else
+ port_mask = &ctrl->din_port_mask;
+
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node)
+ clear_bit(p_rt->num - 1, port_mask);
+ }
+
+ mutex_unlock(&ctrl->port_lock);
+}
+
+static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
+ struct sdw_stream_runtime *stream,
+ struct snd_pcm_hw_params *params,
+ int direction)
+{
+ struct sdw_port_config pconfig[QCOM_SDW_MAX_PORTS];
+ struct sdw_stream_config sconfig;
+ struct sdw_master_runtime *m_rt;
+ struct sdw_slave_runtime *s_rt;
+ struct sdw_port_runtime *p_rt;
+ unsigned long *port_mask;
+ int i, maxport, pn, nports = 0, ret = 0;
+
+ mutex_lock(&ctrl->port_lock);
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ if (m_rt->direction == SDW_DATA_DIR_RX) {
+ maxport = ctrl->num_dout_ports;
+ port_mask = &ctrl->dout_port_mask;
+ } else {
+ maxport = ctrl->num_din_ports;
+ port_mask = &ctrl->din_port_mask;
+ }
+
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ /* Port numbers start from 1 - 14*/
+ pn = find_first_zero_bit(port_mask, maxport);
+ if (pn > (maxport - 1)) {
+ dev_err(ctrl->dev, "All ports busy\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ set_bit(pn, port_mask);
+ pconfig[nports].num = pn + 1;
+ pconfig[nports].ch_mask = p_rt->ch_mask;
+ nports++;
+ }
+ }
+ }
+
+ if (direction == SNDRV_PCM_STREAM_CAPTURE)
+ sconfig.direction = SDW_DATA_DIR_TX;
+ else
+ sconfig.direction = SDW_DATA_DIR_RX;
+
+ /* hw parameters wil be ignored as we only support PDM */
+ sconfig.ch_count = 1;
+ sconfig.frame_rate = params_rate(params);
+ sconfig.type = stream->type;
+ sconfig.bps = 1;
+ sdw_stream_add_master(&ctrl->bus, &sconfig, pconfig,
+ nports, stream);
+err:
+ if (ret) {
+ for (i = 0; i < nports; i++)
+ clear_bit(pconfig[i].num - 1, port_mask);
+ }
+
+ mutex_unlock(&ctrl->port_lock);
+
+ return ret;
+}
+
+static int qcom_swrm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ struct sdw_stream_runtime *sruntime = ctrl->sruntime[dai->id];
+ int ret;
+
+ ret = qcom_swrm_stream_alloc_ports(ctrl, sruntime, params,
+ substream->stream);
+ if (ret)
+ qcom_swrm_stream_free_ports(ctrl, sruntime);
+
+ return ret;
+}
+
+static int qcom_swrm_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ struct sdw_stream_runtime *sruntime = ctrl->sruntime[dai->id];
+
+ qcom_swrm_stream_free_ports(ctrl, sruntime);
+ sdw_stream_remove_master(&ctrl->bus, sruntime);
+
+ return 0;
+}
+
+static int qcom_swrm_set_sdw_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+
+ ctrl->sruntime[dai->id] = stream;
+
+ return 0;
+}
+
+static int qcom_swrm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdw_stream_runtime *sruntime;
+ int ret, i;
+
+ sruntime = sdw_alloc_stream(dai->name);
+ if (!sruntime)
+ return -ENOMEM;
+
+ ctrl->sruntime[dai->id] = sruntime;
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sruntime,
+ substream->stream);
+ if (ret < 0 && ret != -ENOTSUPP) {
+ dev_err(dai->dev, "Failed to set sdw stream on %s",
+ rtd->codec_dais[i]->name);
+ sdw_release_stream(sruntime);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+
+ sdw_release_stream(ctrl->sruntime[dai->id]);
+ ctrl->sruntime[dai->id] = NULL;
+}
+
+static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
+ .hw_params = qcom_swrm_hw_params,
+ .hw_free = qcom_swrm_hw_free,
+ .startup = qcom_swrm_startup,
+ .shutdown = qcom_swrm_shutdown,
+ .set_sdw_stream = qcom_swrm_set_sdw_stream,
+};
+
+static const struct snd_soc_component_driver qcom_swrm_dai_component = {
+ .name = "soundwire",
+};
+
+static int qcom_swrm_register_dais(struct qcom_swrm_ctrl *ctrl)
+{
+ int num_dais = ctrl->num_dout_ports + ctrl->num_din_ports;
+ struct snd_soc_dai_driver *dais;
+ struct snd_soc_pcm_stream *stream;
+ struct device *dev = ctrl->dev;
+ int i;
+
+ /* PDM dais are only tested for now */
+ dais = devm_kcalloc(dev, num_dais, sizeof(*dais), GFP_KERNEL);
+ if (!dais)
+ return -ENOMEM;
+
+ for (i = 0; i < num_dais; i++) {
+ dais[i].name = devm_kasprintf(dev, GFP_KERNEL, "SDW Pin%d", i);
+ if (!dais[i].name)
+ return -ENOMEM;
+
+ if (i < ctrl->num_dout_ports)
+ stream = &dais[i].playback;
+ else
+ stream = &dais[i].capture;
+
+ stream->channels_min = 1;
+ stream->channels_max = 1;
+ stream->rates = SNDRV_PCM_RATE_48000;
+ stream->formats = SNDRV_PCM_FMTBIT_S16_LE;
+
+ dais[i].ops = &qcom_swrm_pdm_dai_ops;
+ dais[i].id = i;
+ }
+
+ return devm_snd_soc_register_component(ctrl->dev,
+ &qcom_swrm_dai_component,
+ dais, num_dais);
+}
+
+static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
+{
+ struct device_node *np = ctrl->dev->of_node;
+ u8 off1[QCOM_SDW_MAX_PORTS];
+ u8 off2[QCOM_SDW_MAX_PORTS];
+ u8 si[QCOM_SDW_MAX_PORTS];
+ int i, ret, nports, val;
+
+ ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
+
+ ctrl->num_dout_ports = val & SWRM_COMP_PARAMS_DOUT_PORTS_MASK;
+ ctrl->num_din_ports = (val & SWRM_COMP_PARAMS_DIN_PORTS_MASK) >> 5;
+
+ ret = of_property_read_u32(np, "qcom,din-ports", &val);
+ if (ret)
+ return ret;
+
+ if (val > ctrl->num_din_ports)
+ return -EINVAL;
+
+ ctrl->num_din_ports = val;
+
+ ret = of_property_read_u32(np, "qcom,dout-ports", &val);
+ if (ret)
+ return ret;
+
+ if (val > ctrl->num_dout_ports)
+ return -EINVAL;
+
+ ctrl->num_dout_ports = val;
+
+ nports = ctrl->num_dout_ports + ctrl->num_din_ports;
+
+ ret = of_property_read_u8_array(np, "qcom,ports-offset1",
+ off1, nports);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u8_array(np, "qcom,ports-offset2",
+ off2, nports);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u8_array(np, "qcom,ports-sinterval-low",
+ si, nports);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nports; i++) {
+ ctrl->pconfig[i].si = si[i];
+ ctrl->pconfig[i].off1 = off1[i];
+ ctrl->pconfig[i].off2 = off2[i];
+ }
+
+ return 0;
+}
+
+static int qcom_swrm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdw_master_prop *prop;
+ struct sdw_bus_params *params;
+ struct qcom_swrm_ctrl *ctrl;
+ int ret;
+ u32 val;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ if (dev->parent->bus == &slimbus_bus) {
+ ctrl->reg_read = qcom_swrm_abh_reg_read;
+ ctrl->reg_write = qcom_swrm_ahb_reg_write;
+ ctrl->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!ctrl->regmap)
+ return -EINVAL;
+ } else {
+ /* Only WCD based SoundWire controller is supported */
+ return -ENOTSUPP;
+ }
+
+ ctrl->irq = of_irq_get(dev->of_node, 0);
+ if (ctrl->irq < 0)
+ return ctrl->irq;
+
+ ctrl->hclk = devm_clk_get(dev, "iface");
+ if (IS_ERR(ctrl->hclk))
+ return PTR_ERR(ctrl->hclk);
+
+ clk_prepare_enable(ctrl->hclk);
+
+ ctrl->dev = dev;
+ dev_set_drvdata(&pdev->dev, ctrl);
+ spin_lock_init(&ctrl->comp_lock);
+ mutex_init(&ctrl->port_lock);
+ INIT_WORK(&ctrl->slave_work, qcom_swrm_slave_wq);
+
+ ctrl->bus.dev = dev;
+ ctrl->bus.ops = &qcom_swrm_ops;
+ ctrl->bus.port_ops = &qcom_swrm_port_ops;
+ ctrl->bus.compute_params = &qcom_swrm_compute_params;
+
+ ret = qcom_swrm_get_port_config(ctrl);
+ if (ret)
+ return ret;
+
+ params = &ctrl->bus.params;
+ params->max_dr_freq = DEFAULT_CLK_FREQ;
+ params->curr_dr_freq = DEFAULT_CLK_FREQ;
+ params->col = SWRM_DEFAULT_COL;
+ params->row = SWRM_DEFAULT_ROWS;
+ ctrl->reg_read(ctrl, SWRM_MCP_STATUS, &val);
+ params->curr_bank = val & SWRM_MCP_STATUS_BANK_NUM_MASK;
+ params->next_bank = !params->curr_bank;
+
+ prop = &ctrl->bus.prop;
+ prop->max_clk_freq = DEFAULT_CLK_FREQ;
+ prop->num_clk_gears = 0;
+ prop->num_clk_freq = MAX_FREQ_NUM;
+ prop->clk_freq = &qcom_swrm_freq_tbl[0];
+ prop->default_col = SWRM_DEFAULT_COL;
+ prop->default_row = SWRM_DEFAULT_ROWS;
+
+ ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &ctrl->version);
+
+ ret = devm_request_threaded_irq(dev, ctrl->irq, NULL,
+ qcom_swrm_irq_handler,
+ IRQF_TRIGGER_RISING,
+ "soundwire", ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to request soundwire irq\n");
+ goto err;
+ }
+
+ ret = sdw_add_bus_master(&ctrl->bus);
+ if (ret) {
+ dev_err(dev, "Failed to register Soundwire controller (%d)\n",
+ ret);
+ goto err;
+ }
+
+ qcom_swrm_init(ctrl);
+ ret = qcom_swrm_register_dais(ctrl);
+ if (ret)
+ goto err;
+
+ dev_info(dev, "Qualcomm Soundwire controller v%x.%x.%x Registered\n",
+ (ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff,
+ ctrl->version & 0xffff);
+
+ return 0;
+err:
+ clk_disable_unprepare(ctrl->hclk);
+ return ret;
+}
+
+static int qcom_swrm_remove(struct platform_device *pdev)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
+
+ sdw_delete_bus_master(&ctrl->bus);
+ clk_disable_unprepare(ctrl->hclk);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_swrm_of_match[] = {
+ { .compatible = "qcom,soundwire-v1.3.0", },
+ {/* sentinel */},
+};
+
+MODULE_DEVICE_TABLE(of, qcom_swrm_of_match);
+
+static struct platform_driver qcom_swrm_driver = {
+ .probe = &qcom_swrm_probe,
+ .remove = &qcom_swrm_remove,
+ .driver = {
+ .name = "qcom-soundwire",
+ .of_match_table = qcom_swrm_of_match,
+ }
+};
+module_platform_driver(qcom_swrm_driver);
+
+MODULE_DESCRIPTION("Qualcomm soundwire driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index e69f94a8c3a8..178ae92b8cc1 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1554,8 +1554,6 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
ret = _sdw_prepare_stream(stream);
- if (ret < 0)
- pr_err("Prepare for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1622,8 +1620,6 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
ret = _sdw_enable_stream(stream);
- if (ret < 0)
- pr_err("Enable for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1698,8 +1694,6 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
ret = _sdw_disable_stream(stream);
- if (ret < 0)
- pr_err("Disable for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1756,8 +1750,6 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
ret = _sdw_deprepare_stream(stream);
- if (ret < 0)
- pr_err("De-prepare for stream:%d failed: %d\n", ret, ret);
sdw_release_bus_lock(stream);
return ret;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 870f7797b56b..d6ed0c355954 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI
This controller does not support generic SPI messages. It only
supports the high-level SPI memory interface.
+config SPI_HISI_SFC_V3XX
+ tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CONFIG_MTD_SPI_NOR
+ help
+ This enables support for HiSilicon v3xx SPI-NOR flash controller
+ found in hi16xx chipsets.
+
config SPI_NXP_FLEXSPI
tristate "NXP Flex SPI controller"
depends on ARCH_LAYERSCAPE || HAS_IOMEM
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index bb49c9e6d0a0..9b65ec5afc5e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 56f0ca361deb..013458cabe3c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master,
master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
err = PTR_ERR(master->dma_tx);
- if (err == -EPROBE_DEFER) {
- dev_warn(dev, "no DMA channel available at the moment\n");
- goto error_clear;
- }
- dev_err(dev,
- "DMA TX channel not available, SPI unable to use DMA\n");
- err = -EBUSY;
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "No TX DMA channel, DMA is disabled\n");
goto error_clear;
}
- /*
- * No reason to check EPROBE_DEFER here since we have already requested
- * tx channel. If it fails here, it's for another reason.
- */
- master->dma_rx = dma_request_slave_channel(dev, "rx");
-
- if (!master->dma_rx) {
- dev_err(dev,
- "DMA RX channel not available, SPI unable to use DMA\n");
- err = -EBUSY;
+ master->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(master->dma_rx)) {
+ err = PTR_ERR(master->dma_rx);
+ /*
+ * No reason to check EPROBE_DEFER here since we have already
+ * requested tx channel.
+ */
+ dev_err(dev, "No RX DMA channel, DMA is disabled\n");
goto error;
}
@@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
return 0;
error:
- if (master->dma_rx)
+ if (!IS_ERR(master->dma_rx))
dma_release_channel(master->dma_rx);
if (!IS_ERR(master->dma_tx))
dma_release_channel(master->dma_tx);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 85bad70f59e3..23d295f36c80 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
name = qspi_irq_tab[val].irq_name;
if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
/* get the l2 interrupts */
- irq = platform_get_irq_byname(pdev, name);
+ irq = platform_get_irq_byname_optional(pdev, name);
} else if (!num_ints && soc_intc) {
/* all mspi, bspi intrs muxed to one L1 intr */
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index fb61a620effc..11c235879bb7 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -68,7 +68,7 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
-#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */
+#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
@@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
}
}
-static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
- struct bcm2835_spi *bs)
+static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+ struct bcm2835_spi *bs)
{
struct dma_slave_config slave_config;
const __be32 *addr;
@@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
if (!addr) {
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
- goto err;
+ /* Fall back to interrupt mode */
+ return 0;
}
dma_reg_base = be32_to_cpup(addr);
/* get tx/rx dma */
- ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
- if (!ctlr->dma_tx) {
+ ctlr->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ctlr->dma_tx)) {
dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
goto err;
}
- ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
- if (!ctlr->dma_rx) {
+ ctlr->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ctlr->dma_rx)) {
dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_rx);
+ ctlr->dma_rx = NULL;
goto err_release;
}
@@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
- return;
+ return 0;
err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
@@ -1005,7 +1010,14 @@ err_config:
err_release:
bcm2835_dma_release(ctlr, bs);
err:
- return;
+ /*
+ * Only report error for deferred probing, otherwise fall back to
+ * interrupt mode
+ */
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
}
static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
@@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
- dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
+ else
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_controller_put;
}
@@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
clk_prepare_enable(bs->clk);
- bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ if (err)
+ goto out_clk_disable;
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), ctlr);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
- goto out_clk_disable;
+ goto out_dma_release;
}
err = devm_spi_register_controller(&pdev->dev, ctlr);
if (err) {
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
err);
- goto out_clk_disable;
+ goto out_dma_release;
}
bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
return 0;
+out_dma_release:
+ bcm2835_dma_release(ctlr, bs);
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_controller_put:
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index d84e22dd6f9f..68491a8bf7b5 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
int spi_bitbang_init(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
+ bool custom_cs;
- if (!master || !bitbang->chipselect)
+ if (!master)
+ return -EINVAL;
+ /*
+ * We only need the chipselect callback if we are actually using it.
+ * If we just use GPIO descriptors, it is surplus. If the
+ * SPI_MASTER_GPIO_SS flag is set, we always need to call the
+ * driver-specific chipselect routine.
+ */
+ custom_cs = (!master->use_gpio_descriptors ||
+ (master->flags & SPI_MASTER_GPIO_SS));
+
+ if (custom_cs && !bitbang->chipselect)
return -EINVAL;
mutex_init(&bitbang->lock);
@@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
master->transfer_one = spi_bitbang_transfer_one;
- master->set_cs = spi_bitbang_set_cs;
+ /*
+ * When using GPIO descriptors, the ->set_cs() callback doesn't even
+ * get called unless SPI_MASTER_GPIO_SS is set.
+ */
+ if (custom_cs)
+ master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 2663bb12d9ce..0d86c37e0aeb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -301,7 +301,7 @@ int dw_spi_mid_init(struct dw_spi *dws)
void __iomem *clk_reg;
u32 clk_cdiv;
- clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
+ clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
if (!clk_reg)
return -ENOMEM;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 5a25da377119..31e3f866d11a 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -297,6 +297,9 @@ static int dw_spi_transfer_one(struct spi_controller *master,
dws->len = transfer->len;
spin_unlock_irqrestore(&dws->buf_lock, flags);
+ /* Ensure dw->rx and dw->rx_end are visible */
+ smp_mb();
+
spi_enable_chip(dws, 0);
/* Handle per transfer options for bpw and speed */
@@ -469,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
struct spi_controller *master;
int ret;
- BUG_ON(dws == NULL);
+ if (!dws)
+ return -EINVAL;
master = spi_alloc_master(dev, 0);
if (!master)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 8428b69c858b..6ec2dcb8c57a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -396,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
if (!dma)
return -ENOMEM;
- dma->chan_rx = dma_request_slave_channel(dev, "rx");
- if (!dma->chan_rx) {
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
dev_err(dev, "rx dma channel not available\n");
- ret = -ENODEV;
+ ret = PTR_ERR(dma->chan_rx);
return ret;
}
- dma->chan_tx = dma_request_slave_channel(dev, "tx");
- if (!dma->chan_tx) {
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
dev_err(dev, "tx dma channel not available\n");
- ret = -ENODEV;
+ ret = PTR_ERR(dma->chan_tx);
goto err_tx_channel;
}
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 2cc0ddb4a988..d0b8cc741a24 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
if (fsl_lpspi_can_dma(controller, spi, t))
- fsl_lpspi->usedma = 1;
+ fsl_lpspi->usedma = true;
else
- fsl_lpspi->usedma = 0;
+ fsl_lpspi->usedma = false;
return fsl_lpspi_config(fsl_lpspi);
}
@@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->dev = &pdev->dev;
fsl_lpspi->is_slave = is_slave;
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->transfer_one = fsl_lpspi_transfer_one;
+ controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->dev.of_node = pdev->dev.of_node;
+ controller->bus_num = pdev->id;
+ controller->slave_abort = fsl_lpspi_slave_abort;
+
+ ret = devm_spi_register_controller(&pdev->dev, controller);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_controller error.\n");
+ goto out_controller_put;
+ }
+
if (!fsl_lpspi->is_slave) {
for (i = 0; i < controller->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
controller->prepare_message = fsl_lpspi_prepare_message;
}
- controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- controller->transfer_one = fsl_lpspi_transfer_one;
- controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
- controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
- controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
- controller->dev.of_node = pdev->dev.of_node;
- controller->bus_num = pdev->id;
- controller->slave_abort = fsl_lpspi_slave_abort;
-
init_completion(&fsl_lpspi->xfer_done);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
- ret = devm_spi_register_controller(&pdev->dev, controller);
- if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_controller error.\n");
- goto out_controller_put;
- }
-
return 0;
out_controller_put:
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 79b1558b74b8..e8a499cd1f13 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem,
op->data.nbytes > q->devtype_data->txfifo)
return false;
- return true;
+ return spi_mem_default_supports_op(mem, op);
}
static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index fb4159ad6bf6..3b81772fea0d 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -706,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- int irq = 0, type;
- int ret = -ENOMEM;
+ int irq, type;
+ int ret;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
@@ -722,10 +722,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
if (spisel_boot) {
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
- if (!pinfo->immr_spi_cs) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!pinfo->immr_spi_cs)
+ return -ENOMEM;
}
#endif
/*
@@ -744,24 +742,15 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
ret = of_address_to_resource(np, 0, &mem);
if (ret)
- goto err;
+ return ret;
irq = platform_get_irq(ofdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err;
- }
+ if (irq < 0)
+ return irq;
master = fsl_spi_probe(dev, &mem, irq);
- if (IS_ERR(master)) {
- ret = PTR_ERR(master);
- goto err;
- }
-
- return 0;
-err:
- return ret;
+ return PTR_ERR_OR_ZERO(master);
}
static int of_fsl_spi_remove(struct platform_device *ofdev)
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
new file mode 100644
index 000000000000..4cf8fc80a7b7
--- /dev/null
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
+//
+// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
+// Author: John Garry <john.garry@huawei.com>
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HISI_SFC_V3XX_VERSION (0x1f8)
+
+#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
+#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
+#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
+#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
+#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
+#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
+#define HISI_SFC_V3XX_CMD_INS (0x308)
+#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
+#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
+
+struct hisi_sfc_v3xx_host {
+ struct device *dev;
+ void __iomem *regbase;
+ int max_cmd_dword;
+};
+
+#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000
+#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10
+
+static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
+ !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
+ HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
+ HISI_SFC_V3XX_WAIT_TIMEOUT_US);
+}
+
+static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
+ struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ struct hisi_sfc_v3xx_host *host;
+ uintptr_t addr = (uintptr_t)op->data.buf.in;
+ int max_byte_count;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ max_byte_count = host->max_cmd_dword * 4;
+
+ if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
+ op->data.nbytes = 4 - (addr % 4);
+ else if (op->data.nbytes > max_byte_count)
+ op->data.nbytes = max_byte_count;
+
+ return 0;
+}
+
+/*
+ * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
+ * DATABUF registers -so use __io{read,write}32_copy when possible. For
+ * trailing bytes, copy them byte-by-byte from the DATABUF register, as we
+ * can't clobber outside the source/dest buffer.
+ *
+ * For efficient data read/write, we try to put any start 32b unaligned data
+ * into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
+ */
+static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
+ u8 *to, unsigned int len)
+{
+ void __iomem *from;
+ int i;
+
+ from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)to, 4)) {
+ int words = len / 4;
+
+ __ioread32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val;
+
+ to += words * 4;
+ from += words * 4;
+
+ val = __raw_readl(from);
+
+ for (i = 0; i < len; i++, val >>= 8, to++)
+ *to = (u8)val;
+ }
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
+ u32 val = __raw_readl(from);
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ to++, val >>= 8, j++)
+ *to = (u8)val;
+ }
+ }
+}
+
+static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
+ const u8 *from, unsigned int len)
+{
+ void __iomem *to;
+ int i;
+
+ to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)from, 4)) {
+ int words = len / 4;
+
+ __iowrite32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val = 0;
+
+ to += words * 4;
+ from += words * 4;
+
+ for (i = 0; i < len; i++, from++)
+ val |= *from << i * 8;
+ __raw_writel(val, to);
+ }
+
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
+ u32 val = 0;
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ from++, j++)
+ val |= *from << j * 8;
+ __raw_writel(val, to);
+ }
+ }
+}
+
+static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
+ const struct spi_mem_op *op,
+ u8 chip_select)
+{
+ int ret, len = op->data.nbytes;
+ u32 config = 0;
+
+ if (op->addr.nbytes)
+ config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+
+ if (op->data.dir != SPI_MEM_NO_DATA) {
+ config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
+ config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len);
+ else if (op->data.dir == SPI_MEM_DATA_IN)
+ config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
+
+ config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
+ chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
+ HISI_SFC_V3XX_CMD_CFG_START_MSK;
+
+ writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
+ writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
+
+ writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
+
+ ret = hisi_sfc_v3xx_wait_cmd_idle(host);
+ if (ret)
+ return ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
+
+ return 0;
+}
+
+static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_device *spi = mem->spi;
+ u8 chip_select = spi->chip_select;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
+}
+
+static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+ .adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
+ .exec_op = hisi_sfc_v3xx_exec_op,
+};
+
+static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_controller *ctlr;
+ u32 version;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ host = spi_controller_get_devdata(ctlr);
+ host->dev = dev;
+
+ platform_set_drvdata(pdev, host);
+
+ host->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->regbase)) {
+ ret = PTR_ERR(host->regbase);
+ goto err_put_master;
+ }
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
+
+ version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
+
+ switch (version) {
+ case 0x351:
+ host->max_cmd_dword = 64;
+ break;
+ default:
+ host->max_cmd_dword = 16;
+ break;
+ }
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ goto err_put_master;
+
+ dev_info(&pdev->dev, "hw version 0x%x\n", version);
+
+ return 0;
+
+err_put_master:
+ spi_master_put(ctlr);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
+ {"HISI0341", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
+#endif
+
+static struct platform_driver hisi_sfc_v3xx_spi_driver = {
+ .driver = {
+ .name = "hisi-sfc-v3xx",
+ .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids),
+ },
+ .probe = hisi_sfc_v3xx_probe,
+};
+
+module_platform_driver(hisi_sfc_v3xx_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index f4a8f470aecc..8543f5ed1099 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev)
master->unprepare_message = img_spfi_unprepare;
master->handle_err = img_spfi_handle_err;
- spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
- spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
+ spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
+ if (IS_ERR(spfi->tx_ch)) {
+ ret = PTR_ERR(spfi->tx_ch);
+ spfi->tx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
+ spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
+ if (IS_ERR(spfi->rx_ch)) {
+ ret = PTR_ERR(spfi->rx_ch);
+ spfi->rx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
if (!spfi->tx_ch || !spfi->rx_ch) {
if (spfi->tx_ch)
dma_release_channel(spfi->tx_ch);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 49f0099db0cb..f4f28a400a96 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi,
}
if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
- spi_imx->usedma = 1;
+ spi_imx->usedma = true;
else
- spi_imx->usedma = 0;
+ spi_imx->usedma = false;
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
spi_imx->rx = mx53_ecspi_rx_slave;
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index cc49fa41fbab..bba10f030e33 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -170,7 +170,7 @@ static int jcore_spi_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), pdev->name))
goto exit_busy;
- hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ hw->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!hw->base)
goto exit_busy;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index f3f10443f9e2..7f5680fe2568 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -19,7 +19,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
-#include <linux/gpio.h>
/*
* The Meson SPICC controller could support DMA based transfers, but is not
@@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
static int meson_spicc_setup(struct spi_device *spi)
{
- int ret = 0;
-
if (!spi->controller_state)
spi->controller_state = spi_master_get_devdata(spi->master);
- else if (gpio_is_valid(spi->cs_gpio))
- goto out_gpio;
- else if (spi->cs_gpio == -ENOENT)
- return 0;
-
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "failed to request cs gpio\n");
- return ret;
- }
- }
-
-out_gpio:
- ret = gpio_direction_output(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
- return ret;
+ return 0;
}
static void meson_spicc_cleanup(struct spi_device *spi)
{
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
-
spi->controller_state = NULL;
}
@@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
master->prepare_message = meson_spicc_prepare_message;
master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
master->transfer_one = meson_spicc_transfer_one;
+ master->use_gpio_descriptors = true;
/* Setup max rate according to the Meson GX datasheet */
if ((rate >> 2) > SPICC_MAX_FREQ)
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 996c1c8a9c71..dce85ee07cd0 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (ret)
goto out_master_free;
- ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!ssp->dmach) {
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
dev_err(ssp->dev, "Failed to request DMA\n");
- ret = -ENODEV;
+ ret = PTR_ERR(ssp->dmach);
goto out_master_free;
}
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index cb52fd8008d0..d25ee32862e0 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -603,7 +603,7 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
if (!chip->flash_region_mapped_ptr) {
chip->flash_region_mapped_ptr =
- devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start +
+ devm_ioremap(fiu->dev, (fiu->res_mem->start +
(fiu->info->max_map_size *
desc->mem->spi->chip_select)),
(u32)desc->info.length);
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index fe624731c74c..87cd0233c60b 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -12,6 +12,7 @@
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/reset.h>
#include <asm/unaligned.h>
@@ -20,7 +21,7 @@
struct npcm_pspi {
struct completion xfer_done;
- struct regmap *rst_regmap;
+ struct reset_control *reset;
struct spi_master *master;
unsigned int tx_bytes;
unsigned int rx_bytes;
@@ -59,12 +60,6 @@ struct npcm_pspi {
#define NPCM_PSPI_MIN_CLK_DIVIDER 4
#define NPCM_PSPI_DEFAULT_CLK 25000000
-/* reset register */
-#define NPCM7XX_IPSRST2_OFFSET 0x24
-
-#define NPCM7XX_PSPI1_RESET BIT(22)
-#define NPCM7XX_PSPI2_RESET BIT(23)
-
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : 2;
@@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
priv->mode = spi->mode;
}
+ /*
+ * If transfer is even length, and 8 bits per word transfer,
+ * then implement 16 bits-per-word transfer.
+ */
+ if (priv->bits_per_word == 8 && !(t->len & 0x1))
+ t->bits_per_word = 16;
+
if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
npcm_pspi_set_transfer_size(priv, t->bits_per_word);
priv->bits_per_word = t->bits_per_word;
@@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
static void npcm_pspi_send(struct npcm_pspi *priv)
{
int wsize;
+ u16 val;
wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
priv->tx_bytes -= wsize;
@@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv)
switch (wsize) {
case 1:
- iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ val = *priv->tx_buf++;
+ iowrite8(val, NPCM_PSPI_DATA + priv->base);
break;
case 2:
- iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ val = *priv->tx_buf++;
+ val = *priv->tx_buf++ | (val << 8);
+ iowrite16(val, NPCM_PSPI_DATA + priv->base);
break;
default:
WARN_ON_ONCE(1);
return;
}
-
- priv->tx_buf += wsize;
}
static void npcm_pspi_recv(struct npcm_pspi *priv)
@@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv)
switch (rsize) {
case 1:
- val = ioread8(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA);
break;
case 2:
val = ioread16(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = (val >> 8);
+ *priv->rx_buf++ = val & 0xff;
break;
default:
WARN_ON_ONCE(1);
return;
}
-
- *priv->rx_buf = val;
- priv->rx_buf += rsize;
}
static int npcm_pspi_transfer_one(struct spi_master *master,
@@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
{
- regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET,
- NPCM7XX_PSPI1_RESET << priv->id);
- regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0);
+ reset_control_assert(priv->reset);
+ udelay(5);
+ reset_control_deassert(priv->reset);
}
static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
@@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
if (num_cs < 0)
return num_cs;
- pdev->id = of_alias_get_id(np, "spi");
- if (pdev->id < 0)
- pdev->id = 0;
-
master = spi_alloc_master(&pdev->dev, sizeof(*priv));
if (!master)
return -ENOMEM;
@@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
priv = spi_master_get_devdata(master);
priv->master = master;
priv->is_save_param = false;
- priv->id = pdev->id;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev)
goto out_disable_clk;
}
- priv->rst_regmap =
- syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
- if (IS_ERR(priv->rst_regmap)) {
- dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n");
- return PTR_ERR(priv->rst_regmap);
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto out_disable_clk;
}
/* reset SPI-HW block */
@@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
master->mode_bits = SPI_CPHA | SPI_CPOL;
master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
+ master->bus_num = -1;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->transfer_one = npcm_pspi_transfer_one;
master->prepare_transfer_hardware =
@@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk;
- pr_info("NPCM Peripheral SPI %d probed\n", pdev->id);
+ pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
return 0;
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index e2331eb7b47a..9df7c5979c29 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -20,7 +20,6 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/spi_oc_tiny.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#define DRV_NAME "spi_oc_tiny"
@@ -50,8 +49,6 @@ struct tiny_spi {
unsigned int txc, rxc;
const u8 *txp;
u8 *rxp;
- int gpio_cs_count;
- int *gpio_cs;
};
static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
@@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
}
-static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
-{
- struct tiny_spi *hw = tiny_spi_to_hw(spi);
-
- if (hw->gpio_cs_count > 0) {
- gpio_set_value(hw->gpio_cs[spi->chip_select],
- (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
- }
-}
-
static int tiny_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
@@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
- unsigned int i;
u32 val;
if (!np)
return 0;
- hw->gpio_cs_count = of_gpio_count(np);
- if (hw->gpio_cs_count > 0) {
- hw->gpio_cs = devm_kcalloc(&pdev->dev,
- hw->gpio_cs_count, sizeof(unsigned int),
- GFP_KERNEL);
- if (!hw->gpio_cs)
- return -ENOMEM;
- }
- for (i = 0; i < hw->gpio_cs_count; i++) {
- hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
- if (hw->gpio_cs[i] < 0)
- return -ENODEV;
- }
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
if (!of_property_read_u32(np, "clock-frequency", &val))
hw->freq = val;
@@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct tiny_spi *hw;
struct spi_master *master;
- unsigned int i;
int err = -ENODEV;
master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
@@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the master state. */
master->bus_num = pdev->id;
- master->num_chipselect = 255;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tiny_spi_setup;
+ master->use_gpio_descriptors = true;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
@@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
- hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
/* find and map our resources */
@@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
}
/* find platform data */
if (platp) {
- hw->gpio_cs_count = platp->gpio_cs_count;
- hw->gpio_cs = platp->gpio_cs;
- if (platp->gpio_cs_count && !platp->gpio_cs) {
- err = -EBUSY;
- goto exit;
- }
hw->freq = platp->freq;
hw->baudwidth = platp->baudwidth;
} else {
@@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
if (err)
goto exit;
}
- for (i = 0; i < hw->gpio_cs_count; i++) {
- err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
- if (err)
- goto exit_gpio;
- gpio_direction_output(hw->gpio_cs[i], 1);
- }
- hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
@@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
return 0;
-exit_gpio:
- while (i-- > 0)
- gpio_free(hw->gpio_cs[i]);
exit:
spi_master_put(master);
return err;
@@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
- unsigned int i;
spi_bitbang_stop(&hw->bitbang);
- for (i = 0; i < hw->gpio_cs_count; i++)
- gpio_free(hw->gpio_cs[i]);
spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 9071333ebdd8..4c7a71f0fb3e 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
return limit;
}
+static void pxa2xx_spi_off(struct driver_data *drv_data)
+{
+ /* On MMP, disabling SSE seems to corrupt the rx fifo */
+ if (drv_data->ssp_type == MMP2_SSP)
+ return;
+
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+}
+
static int null_writer(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
@@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
@@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static void handle_bad_msg(struct driver_data *drv_data)
{
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
pxa2xx_spi_write(drv_data, SSCR1,
pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
@@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
|| (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
!= (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
- pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
+ if (drv_data->ssp_type != MMP2_SSP)
+ pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
/* first set CR1 without interrupt and service enables */
@@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
@@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
/* Clear and disable interrupts and service requests */
write_SSSR_CS(drv_data, drv_data->clear_sr);
pxa2xx_spi_write(drv_data, SSCR1,
@@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP now */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
return 0;
}
@@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
/* KBL-H */
{ PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
{ PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
+ /* CML-V */
+ { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index 250fd60e1678..3c4f83bf7084 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -137,7 +137,7 @@ enum qspi_clocks {
struct qcom_qspi {
void __iomem *base;
struct device *dev;
- struct clk_bulk_data clks[QSPI_NUM_CLKS];
+ struct clk_bulk_data *clks;
struct qspi_xfer xfer;
/* Lock to protect xfer and IRQ accessed registers */
spinlock_t lock;
@@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev)
goto exit_probe_master_put;
}
+ ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
+ sizeof(*ctrl->clks), GFP_KERNEL);
+ if (!ctrl->clks) {
+ ret = -ENOMEM;
+ goto exit_probe_master_put;
+ }
+
ctrl->clks[QSPI_CLK_CORE].id = "core";
ctrl->clks[QSPI_CLK_IFACE].id = "iface";
ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 7222c7689c3c..85575d45901c 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -159,7 +159,7 @@
#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
-#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
@@ -242,6 +242,7 @@ struct spi_ops {
u16 mode_bits;
u16 flags;
u16 fifo_size;
+ u8 num_hw_ss;
};
/*
@@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
return n;
}
-#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
-
static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
@@ -620,9 +619,8 @@ no_dma_tx:
dmaengine_terminate_all(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&rspi->ctlr->dev),
- dev_name(&rspi->ctlr->dev));
+ dev_warn_once(&rspi->ctlr->dev,
+ "DMA not available, falling back to PIO\n");
}
return ret;
}
@@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
+ /* Configure slave signal to assert */
+ rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
+ : spi->chip_select);
+
/* CMOS output mode and MOSI signal from previous transfer */
rspi->sppcr = 0;
if (spi->mode & SPI_LOOP)
rspi->sppcr |= SPPCR_SPLP;
- set_config_register(rspi, 8);
+ rspi->ops->set_config_register(rspi, 8);
if (msg->spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
@@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
+ .num_hw_ss = 2,
};
static const struct spi_ops rspi_rz_ops = {
@@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
+ .num_hw_ss = 1,
};
static const struct spi_ops qspi_ops = {
@@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = {
SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
+ .num_hw_ss = 1,
};
#ifdef CONFIG_OF
@@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->mode_bits = ops->mode_bits;
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = rspi->ops->num_hw_ss;
ret = platform_get_irq_byname_optional(pdev, "rx");
if (ret < 0) {
@@ -1314,8 +1321,6 @@ error1:
static const struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
- { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
- { "qspi", (kernel_ulong_t)&qspi_ops },
{},
};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 8f134735291f..1c11a00a2c36 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -14,8 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -55,7 +53,6 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
- unsigned short unused_ss;
bool native_cs_inited;
bool native_cs_high;
bool slave_aborted;
@@ -63,140 +60,140 @@ struct sh_msiof_spi_priv {
#define MAX_SS 3 /* Maximum number of native chip selects */
-#define TMDR1 0x00 /* Transmit Mode Register 1 */
-#define TMDR2 0x04 /* Transmit Mode Register 2 */
-#define TMDR3 0x08 /* Transmit Mode Register 3 */
-#define RMDR1 0x10 /* Receive Mode Register 1 */
-#define RMDR2 0x14 /* Receive Mode Register 2 */
-#define RMDR3 0x18 /* Receive Mode Register 3 */
-#define TSCR 0x20 /* Transmit Clock Select Register */
-#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
-#define CTR 0x28 /* Control Register */
-#define FCTR 0x30 /* FIFO Control Register */
-#define STR 0x40 /* Status Register */
-#define IER 0x44 /* Interrupt Enable Register */
-#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
-#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
-#define TFDR 0x50 /* Transmit FIFO Data Register */
-#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
-#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
-#define RFDR 0x60 /* Receive FIFO Data Register */
-
-/* TMDR1 and RMDR1 */
-#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
-#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
-#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
-#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
-#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
-#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
-#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
-#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
-#define MDR1_FLD_SHIFT 2
-#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
-/* TMDR1 */
-#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
-#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
-#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
-
-/* TMDR2 and RMDR2 */
-#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
-#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
-
-/* TSCR and RSCR */
-#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
-#define SCR_BRPS(i) (((i) - 1) << 8)
-#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
-#define SCR_BRDV_DIV_2 0
-#define SCR_BRDV_DIV_4 1
-#define SCR_BRDV_DIV_8 2
-#define SCR_BRDV_DIV_16 3
-#define SCR_BRDV_DIV_32 4
-#define SCR_BRDV_DIV_1 7
-
-/* CTR */
-#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
-#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
-#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
-#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
-#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
-#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
-#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
-#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
-#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
-#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
-#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
-#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
-#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
-#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
-#define CTR_TXE BIT(9) /* Transmit Enable */
-#define CTR_RXE BIT(8) /* Receive Enable */
-#define CTR_TXRST BIT(1) /* Transmit Reset */
-#define CTR_RXRST BIT(0) /* Receive Reset */
-
-/* FCTR */
-#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
-#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
-#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
-#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
-#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
-#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
-#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
-#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
-#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
-#define FCTR_TFUA_SHIFT 20
-#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
-#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
-#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
-#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
-#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
-#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
-#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
-#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
-#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
-#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
-#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
-#define FCTR_RFUA_SHIFT 4
-#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
-
-/* STR */
-#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
-#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
-#define STR_TEOF BIT(23) /* Frame Transmission End */
-#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
-#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
-#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
-#define STR_RFFUL BIT(13) /* Receive FIFO Full */
-#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
-#define STR_REOF BIT(7) /* Frame Reception End */
-#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
-#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
-#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
-
-/* IER */
-#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
-#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
-#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
-#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
-#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
-#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
-#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
-#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
-#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
-#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
-#define IER_REOFE BIT(7) /* Frame Reception End Enable */
-#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
-#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
-#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
+#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_FLD_SHIFT 2
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
+#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRPS(i) (((i) - 1) << 8)
+#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+#define SISCR_BRDV_DIV_2 0
+#define SISCR_BRDV_DIV_4 1
+#define SISCR_BRDV_DIV_8 2
+#define SISCR_BRDV_DIV_16 3
+#define SISCR_BRDV_DIV_32 4
+#define SISCR_BRDV_DIV_1 7
+
+/* SICTR */
+#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
+#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
+#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_TFUA_SHIFT 20
+#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
+#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+#define SIFCTR_RFUA_SHIFT 4
+#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
- case TSCR:
- case RSCR:
+ case SITSCR:
+ case SIRSCR:
return ioread16(p->mapbase + reg_offs);
default:
return ioread32(p->mapbase + reg_offs);
@@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
u32 value)
{
switch (reg_offs) {
- case TSCR:
- case RSCR:
+ case SITSCR:
+ case SIRSCR:
iowrite16(value, p->mapbase + reg_offs);
break;
default:
@@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
u32 mask = clr | set;
u32 data;
- data = sh_msiof_read(p, CTR);
+ data = sh_msiof_read(p, SICTR);
data &= ~clr;
data |= set;
- sh_msiof_write(p, CTR, data);
+ sh_msiof_write(p, SICTR, data);
- return readl_poll_timeout_atomic(p->mapbase + CTR, data,
+ return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
(data & mask) == set, 1, 100);
}
@@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
struct sh_msiof_spi_priv *p = data;
/* just disable the interrupt and wake up */
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
complete(&p->done);
return IRQ_HANDLED;
@@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
{
- u32 mask = CTR_TXRST | CTR_RXRST;
+ u32 mask = SICTR_TXRST | SICTR_RXRST;
u32 data;
- data = sh_msiof_read(p, CTR);
+ data = sh_msiof_read(p, SICTR);
data |= mask;
- sh_msiof_write(p, CTR, data);
+ sh_msiof_write(p, SICTR, data);
- readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
+ readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
100);
}
static const u32 sh_msiof_spi_div_array[] = {
- SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
- SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
+ SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
+ SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
@@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
div = DIV_ROUND_UP(parent_rate, spi_hz);
if (div <= 1024) {
- /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+ /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
if (!div_pow && div <= 32 && div > 2)
div_pow = 1;
@@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
brps = 32;
}
- scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
- sh_msiof_write(p, TSCR, scr);
+ scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+ sh_msiof_write(p, SITSCR, scr);
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
- sh_msiof_write(p, RSCR, scr);
+ sh_msiof_write(p, SIRSCR, scr);
}
static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
@@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return 0;
}
- val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
- val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+ val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
+ val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
return val;
}
@@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
* 1 0 11 11 0 0
* 1 1 11 11 1 1
*/
- tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
- tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
- tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
+ tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
if (spi_controller_is_slave(p->ctlr)) {
- sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
+ sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
} else {
- sh_msiof_write(p, TMDR1,
- tmp | MDR1_TRMD | TMDR1_PCON |
- (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ sh_msiof_write(p, SITMDR1,
+ tmp | SIMDR1_TRMD | SITMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
}
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
- sh_msiof_write(p, RMDR1, tmp);
+ sh_msiof_write(p, SIRMDR1, tmp);
tmp = 0;
- tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
- tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
+ tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
+ tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
- tmp |= edge << CTR_TEDG_SHIFT;
- tmp |= edge << CTR_REDG_SHIFT;
- tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
- sh_msiof_write(p, CTR, tmp);
+ tmp |= edge << SICTR_TEDG_SHIFT;
+ tmp |= edge << SICTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+ sh_msiof_write(p, SICTR, tmp);
}
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
- u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
+ u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
- sh_msiof_write(p, TMDR2, dr2);
+ sh_msiof_write(p, SITMDR2, dr2);
else
- sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
+ sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
if (rx_buf)
- sh_msiof_write(p, RMDR2, dr2);
+ sh_msiof_write(p, SIRMDR2, dr2);
}
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR,
- sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
+ sh_msiof_write(p, SISTR,
+ sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_8[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_8[k] << fs);
}
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
@@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_16[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_16[k] << fs);
}
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
}
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
@@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_32[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_32[k] << fs);
}
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
}
static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+ sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
}
static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+ sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
}
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
@@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
@@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
}
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
@@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
}
static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+ buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
}
static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+ put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
}
static int sh_msiof_spi_setup(struct spi_device *spi)
@@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
/* Configure native chip select mode/polarity early */
- clr = MDR1_SYNCMD_MASK;
- set = MDR1_SYNCMD_SPI;
+ clr = SIMDR1_SYNCMD_MASK;
+ set = SIMDR1_SYNCMD_SPI;
if (spi->mode & SPI_CS_HIGH)
- clr |= BIT(MDR1_SYNCAC_SHIFT);
+ clr |= BIT(SIMDR1_SYNCAC_SHIFT);
else
- set |= BIT(MDR1_SYNCAC_SHIFT);
+ set |= BIT(SIMDR1_SYNCAC_SHIFT);
pm_runtime_get_sync(&p->pdev->dev);
- tmp = sh_msiof_read(p, TMDR1) & ~clr;
- sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
- tmp = sh_msiof_read(p, RMDR1) & ~clr;
- sh_msiof_write(p, RMDR1, tmp | set);
+ tmp = sh_msiof_read(p, SITMDR1) & ~clr;
+ sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
+ tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
+ sh_msiof_write(p, SIRMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
p->native_cs_high = spi->mode & SPI_CS_HIGH;
p->native_cs_inited = true;
@@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
/* Configure pins before asserting CS */
if (spi->cs_gpiod) {
- ss = p->unused_ss;
+ ss = ctlr->unused_native_cs;
cs_high = p->native_cs_high;
} else {
ss = spi->chip_select;
@@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
/* setup clock and rx/tx signals */
if (!slave)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
if (rx_buf && !ret)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
if (!ret)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
/* start by setting frame bit */
if (!ret && !slave)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
return ret;
}
@@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
/* shut down frame, rx/tx and clock signals */
if (!slave)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
if (!ret)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
if (rx_buf && !ret)
- ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
if (!ret && !slave)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
return ret;
}
@@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
fifo_shift = 32 - bits;
/* default FIFO watermarks for PIO */
- sh_msiof_write(p, FCTR, 0);
+ sh_msiof_write(p, SIFCTR, 0);
/* setup msiof transfer mode registers */
sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
- sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
/* write tx fifo */
if (tx_buf)
@@ -731,7 +728,7 @@ stop_reset:
sh_msiof_reset_str(p);
sh_msiof_spi_stop(p, rx_buf);
stop_ier:
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
return ret;
}
@@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
- ier_bits |= IER_RDREQE | IER_RDMAE;
+ ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
if (tx) {
- ier_bits |= IER_TDREQE | IER_TDMAE;
+ ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
@@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
/* 1 stage FIFO watermarks for DMA */
- sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
+ sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
/* setup msiof transfer mode registers (32-bit words) */
sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
- sh_msiof_write(p, IER, ier_bits);
+ sh_msiof_write(p, SIIER, ier_bits);
reinit_completion(&p->done);
if (tx)
@@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
if (ret)
goto stop_reset;
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
} else {
/* wait for tx fifo to be emptied */
- sh_msiof_write(p, IER, IER_TEOFE);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE);
ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
@@ -856,7 +853,7 @@ stop_dma:
no_dma_tx:
if (rx)
dmaengine_terminate_all(p->ctlr->dma_rx);
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
return ret;
}
@@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
}
#endif
-static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
-{
- struct device *dev = &p->pdev->dev;
- unsigned int used_ss_mask = 0;
- unsigned int cs_gpios = 0;
- unsigned int num_cs, i;
- int ret;
-
- ret = gpiod_count(dev, "cs");
- if (ret <= 0)
- return 0;
-
- num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
- for (i = 0; i < num_cs; i++) {
- struct gpio_desc *gpiod;
-
- gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
- if (!IS_ERR(gpiod)) {
- devm_gpiod_put(dev, gpiod);
- cs_gpios++;
- continue;
- }
-
- if (PTR_ERR(gpiod) != -ENOENT)
- return PTR_ERR(gpiod);
-
- if (i >= MAX_SS) {
- dev_err(dev, "Invalid native chip select %d\n", i);
- return -EINVAL;
- }
- used_ss_mask |= BIT(i);
- }
- p->unused_ss = ffz(used_ss_mask);
- if (cs_gpios && p->unused_ss >= MAX_SS) {
- dev_err(dev, "No unused native chip select available\n");
- return -EINVAL;
- }
- return 0;
-}
-
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
@@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
ctlr = p->ctlr;
ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
- dma_tx_id, res->start + TFDR);
+ dma_tx_id, res->start + SITFDR);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
- dma_rx_id, res->start + RFDR);
+ dma_rx_id, res->start + SIRFDR);
if (!ctlr->dma_rx)
goto free_tx_chan;
@@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
- /* Setup GPIO chip selects */
- ctlr->num_chipselect = p->info->num_chipselect;
- ret = sh_msiof_get_cs_gpios(p);
- if (ret)
- goto err1;
-
/* init controller code */
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = p->info->num_chipselect;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
@@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = MAX_SS;
ret = sh_msiof_request_dma(p);
if (ret < 0)
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index e1e639191557..8419e6722e17 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
/* request DMA channels */
- sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
- if (!sspi->rx_chan) {
+ sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(sspi->rx_chan)) {
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(sspi->rx_chan);
goto free_master;
}
- sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
- if (!sspi->tx_chan) {
+ sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(sspi->tx_chan)) {
dev_err(&pdev->dev, "can not allocate tx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(sspi->tx_chan);
goto free_rx_dma;
}
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 4e726929bb4f..4ef569b47aa6 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
return 0;
}
-static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
{
struct dma_slave_config dma_cfg;
struct device *dev = qspi->dev;
+ int ret = 0;
memset(&dma_cfg, 0, sizeof(dma_cfg));
@@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
dma_cfg.src_maxburst = 4;
dma_cfg.dst_maxburst = 4;
- qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
- if (qspi->dma_chrx) {
+ qspi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(qspi->dma_chrx)) {
+ ret = PTR_ERR(qspi->dma_chrx);
+ qspi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto out;
+ } else {
if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
dev_err(dev, "dma rx config failed\n");
dma_release_channel(qspi->dma_chrx);
@@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
}
}
- qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
- if (qspi->dma_chtx) {
+ qspi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(qspi->dma_chtx)) {
+ ret = PTR_ERR(qspi->dma_chtx);
+ qspi->dma_chtx = NULL;
+ } else {
if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
dev_err(dev, "dma tx config failed\n");
dma_release_channel(qspi->dma_chtx);
@@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
}
}
+out:
init_completion(&qspi->dma_completion);
+
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
}
static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
@@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
- stm32_qspi_dma_setup(qspi);
+ ret = stm32_qspi_dma_setup(qspi);
+ if (ret)
+ goto err;
+
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index b222ce8d083e..e041f9c4ec47 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -974,29 +973,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
}
/**
- * stm32_spi_setup - setup device chip select
- */
-static int stm32_spi_setup(struct spi_device *spi_dev)
-{
- int ret = 0;
-
- if (!gpio_is_valid(spi_dev->cs_gpio)) {
- dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
- spi_dev->cs_gpio);
- return -EINVAL;
- }
-
- dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
- spi_dev->cs_gpio,
- (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
-
- ret = gpio_direction_output(spi_dev->cs_gpio,
- !(spi_dev->mode & SPI_CS_HIGH));
-
- return ret;
-}
-
-/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
*/
static int stm32_spi_prepare_msg(struct spi_master *master,
@@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct stm32_spi *spi;
struct resource *res;
- int i, ret;
+ int ret;
master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
if (!master) {
@@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
- master->setup = stm32_spi_setup;
+ master->use_gpio_descriptors = true;
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
- spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
- if (!spi->dma_tx)
+ spi->dma_tx = dma_request_chan(spi->dev, "tx");
+ if (IS_ERR(spi->dma_tx)) {
+ ret = PTR_ERR(spi->dma_tx);
+ spi->dma_tx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_disable;
+
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
- else
+ } else {
master->dma_tx = spi->dma_tx;
+ }
+
+ spi->dma_rx = dma_request_chan(spi->dev, "rx");
+ if (IS_ERR(spi->dma_rx)) {
+ ret = PTR_ERR(spi->dma_rx);
+ spi->dma_rx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma_release;
- spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
- if (!spi->dma_rx)
dev_warn(&pdev->dev, "failed to request rx dma channel\n");
- else
+ } else {
master->dma_rx = spi->dma_rx;
+ }
if (spi->dma_tx || spi->dma_rx)
master->can_dma = stm32_spi_can_dma;
@@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "spi master registration failed: %d\n",
ret);
- goto err_dma_release;
+ goto err_pm_disable;
}
- if (!master->cs_gpios) {
+ if (!master->cs_gpiods) {
dev_err(&pdev->dev, "no CS gpios available\n");
ret = -EINVAL;
- goto err_dma_release;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i])) {
- dev_err(&pdev->dev, "%i is not a valid gpio\n",
- master->cs_gpios[i]);
- ret = -EINVAL;
- goto err_dma_release;
- }
-
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "can't get CS gpio %i\n",
- master->cs_gpios[i]);
- goto err_dma_release;
- }
+ goto err_pm_disable;
}
dev_info(&pdev->dev, "driver initialized\n");
return 0;
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_dma_release:
if (spi->dma_tx)
dma_release_channel(spi->dma_tx);
if (spi->dma_rx)
dma_release_channel(spi->dma_rx);
-
- pm_runtime_disable(&pdev->dev);
err_clk_disable:
clk_disable_unprepare(spi->clk);
err_master_put:
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index fc40ab146c86..83edabdb41ad 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
if ((bits_per_word == 8 || bits_per_word == 16 ||
bits_per_word == 32) && t->len > 3) {
- tspi->is_packed = 1;
+ tspi->is_packed = true;
tspi->words_per_32bit = 32/bits_per_word;
} else {
- tspi->is_packed = 0;
+ tspi->is_packed = false;
tspi->words_per_32bit = 1;
}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 66dcb6128539..366a3e5cca6b 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -80,8 +80,6 @@ struct ti_qspi {
#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-#define QSPI_FCLK 192000000
-
/* Clock Control */
#define QSPI_CLK_EN (1 << 31)
#define QSPI_CLK_DIV_MAX 0xffff
@@ -316,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
{
int wlen;
unsigned int cmd;
+ u32 rx;
+ u8 rxlen, rx_wlen;
u8 *rxbuf;
rxbuf = t->rx_buf;
@@ -332,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
break;
}
wlen = t->bits_per_word >> 3; /* in bytes */
+ rx_wlen = wlen;
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
if (qspi_is_busy(qspi))
return -EBUSY;
+ switch (wlen) {
+ case 1:
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ rxlen = QSPI_WLEN_MAX_BYTES;
+ } else {
+ rxlen = min(count, 4);
+ }
+ rx_wlen = rxlen << 3;
+ cmd &= ~QSPI_WLEN_MASK;
+ cmd |= QSPI_WLEN(rx_wlen);
+ break;
+ default:
+ rxlen = wlen;
+ break;
+ }
+
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (ti_qspi_poll_wc(qspi)) {
dev_err(qspi->dev, "read timed out\n");
return -ETIMEDOUT;
}
+
switch (wlen) {
case 1:
- *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ u32 *rxp = (u32 *) rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ *rxp++ = be32_to_cpu(rx);
+ } else {
+ u8 *rxp = rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ if (rx_wlen >= 8)
+ *rxp++ = rx >> (rx_wlen - 8);
+ if (rx_wlen >= 16)
+ *rxp++ = rx >> (rx_wlen - 16);
+ if (rx_wlen >= 24)
+ *rxp++ = rx >> (rx_wlen - 24);
+ if (rx_wlen >= 32)
+ *rxp++ = rx;
+ }
break;
case 2:
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
@@ -354,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
break;
}
- rxbuf += wlen;
- count -= wlen;
+ rxbuf += rxlen;
+ count -= rxlen;
}
return 0;
@@ -527,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
QSPI_SPI_SETUP_REG(spi->chip_select));
}
+static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ size_t max_len;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.val < qspi->mmap_size) {
+ /* Limit MMIO to the mmaped region */
+ if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
+ max_len = qspi->mmap_size - op->addr.val;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ } else {
+ /*
+ * Use fallback mode (SW generated transfers) above the
+ * mmaped region.
+ * Adjust size to comply with the QSPI max frame length.
+ */
+ max_len = QSPI_FRAME;
+ max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ }
+
+ return 0;
+}
+
static int ti_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
@@ -577,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.exec_op = ti_qspi_exec_mem_op,
+ .adjust_op_size = ti_qspi_adjust_op_size,
};
static int ti_qspi_start_transfer_one(struct spi_master *master,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 223353fa2d8a..d7ea6af74743 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index ce9b30112e26..0fa50979644d 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -23,6 +24,7 @@
struct uniphier_spi_priv {
void __iomem *base;
+ dma_addr_t base_dma_addr;
struct clk *clk;
struct spi_master *master;
struct completion xfer_done;
@@ -32,6 +34,7 @@ struct uniphier_spi_priv {
unsigned int rx_bytes;
const u8 *tx_buf;
u8 *rx_buf;
+ atomic_t dma_busy;
bool is_save_param;
u8 bits_per_word;
@@ -61,11 +64,16 @@ struct uniphier_spi_priv {
#define SSI_FPS_FSTRT BIT(14)
#define SSI_SR 0x14
+#define SSI_SR_BUSY BIT(7)
#define SSI_SR_RNE BIT(0)
#define SSI_IE 0x18
+#define SSI_IE_TCIE BIT(4)
#define SSI_IE_RCIE BIT(3)
+#define SSI_IE_TXRE BIT(2)
+#define SSI_IE_RXRE BIT(1)
#define SSI_IE_RORIE BIT(0)
+#define SSI_IE_ALL_MASK GENMASK(4, 0)
#define SSI_IS 0x1c
#define SSI_IS_RXRS BIT(9)
@@ -87,15 +95,19 @@ struct uniphier_spi_priv {
#define SSI_RXDR 0x24
#define SSI_FIFO_DEPTH 8U
+#define SSI_FIFO_BURST_NUM 1
+
+#define SSI_DMA_RX_BUSY BIT(1)
+#define SSI_DMA_TX_BUSY BIT(0)
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
}
-static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
+ u32 mask)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_IE);
@@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
writel(val, priv->base + SSI_IE);
}
-static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
+ u32 mask)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_IE);
@@ -334,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
writel(val, priv->base + SSI_FPS);
}
+static bool uniphier_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ if ((!master->dma_tx && !master->dma_rx)
+ || (!master->dma_tx && t->tx_buf)
+ || (!master->dma_rx && t->rx_buf))
+ return false;
+
+ return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
+}
+
+static void uniphier_spi_dma_rxcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
+
+ if (!(state & SSI_DMA_TX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static void uniphier_spi_dma_txcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
+
+ if (!(state & SSI_DMA_RX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+ int buswidth;
+
+ atomic_set(&priv->dma_busy, 0);
+
+ uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
+
+ if (priv->bits_per_word <= 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (priv->bits_per_word <= 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ if (priv->rx_buf) {
+ struct dma_slave_config rxconf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = priv->base_dma_addr + SSI_RXDR,
+ .src_addr_width = buswidth,
+ .src_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_rx, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(
+ master->dma_rx,
+ t->rx_sg.sgl, t->rx_sg.nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto out_err_prep;
+
+ rxdesc->callback = uniphier_spi_dma_rxcb;
+ rxdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
+ atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (priv->tx_buf) {
+ struct dma_slave_config txconf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = priv->base_dma_addr + SSI_TXDR,
+ .dst_addr_width = buswidth,
+ .dst_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_tx, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(
+ master->dma_tx,
+ t->tx_sg.sgl, t->tx_sg.nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto out_err_prep;
+
+ txdesc->callback = uniphier_spi_dma_txcb;
+ txdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
+ atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ /* signal that we need to wait for completion */
+ return (priv->tx_buf || priv->rx_buf);
+
+out_err_prep:
+ if (rxdesc)
+ dmaengine_terminate_sync(master->dma_rx);
+
+ return -EINVAL;
+}
+
static int uniphier_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
@@ -346,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master,
uniphier_spi_fill_tx_fifo(priv);
- uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+ uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
time_left = wait_for_completion_timeout(&priv->xfer_done,
msecs_to_jiffies(SSI_TIMEOUT_MS));
- uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+ uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
if (!time_left) {
dev_err(dev, "transfer timeout.\n");
@@ -395,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
unsigned long threshold;
+ bool use_dma;
/* Terminate and return success for 0 byte length transfer */
if (!t->len)
@@ -402,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
uniphier_spi_setup_transfer(spi, t);
+ use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+ if (use_dma)
+ return uniphier_spi_transfer_one_dma(master, spi, t);
+
/*
* If the transfer operation will take longer than
* SSI_POLL_TIMEOUT_US, it should use irq.
@@ -432,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
return 0;
}
+static void uniphier_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ u32 val;
+
+ /* stop running spi transfer */
+ writel(0, priv->base + SSI_CTL);
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
+ dmaengine_terminate_async(master->dma_tx);
+ atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+ }
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
+ dmaengine_terminate_async(master->dma_rx);
+ atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+ }
+}
+
static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
{
struct uniphier_spi_priv *priv = dev_id;
@@ -477,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv;
struct spi_master *master;
+ struct resource *res;
+ struct dma_slave_caps caps;
+ u32 dma_tx_burst = 0, dma_rx_burst = 0;
unsigned long clk_rate;
int irq;
int ret;
@@ -491,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev)
priv->master = master;
priv->is_save_param = false;
- priv->base = devm_platform_ioremap_resource(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
}
+ priv->base_dma_addr = res->start;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -538,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev)
= uniphier_spi_prepare_transfer_hardware;
master->unprepare_transfer_hardware
= uniphier_spi_unprepare_transfer_hardware;
+ master->handle_err = uniphier_spi_handle_err;
+ master->can_dma = uniphier_spi_can_dma;
+
master->num_chipselect = 1;
+ master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+ master->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR_OR_NULL(master->dma_tx)) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ goto out_disable_clk;
+ master->dma_tx = NULL;
+ dma_tx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_tx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
+ ret);
+ goto out_disable_clk;
+ }
+ dma_tx_burst = caps.max_burst;
+ }
+
+ master->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR_OR_NULL(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ goto out_disable_clk;
+ master->dma_rx = NULL;
+ dma_rx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_rx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
+ ret);
+ goto out_disable_clk;
+ }
+ dma_rx_burst = caps.max_burst;
+ }
+
+ master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
@@ -558,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
+ if (priv->master->dma_tx)
+ dma_release_channel(priv->master->dma_tx);
+ if (priv->master->dma_rx)
+ dma_release_channel(priv->master->dma_rx);
+
clk_disable_unprepare(priv->clk);
return 0;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8994545367a2..38b4c78df506 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1674,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
}
}
+ if (unlikely(ctlr->ptp_sts_supported)) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
+ }
+ }
+
spi_unmap_msg(ctlr, mesg);
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
@@ -2451,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
int nb, i;
struct gpio_desc **cs;
struct device *dev = &ctlr->dev;
+ unsigned long native_cs_mask = 0;
+ unsigned int num_cs_gpios = 0;
nb = gpiod_count(dev, "cs");
ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
@@ -2492,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
if (!gpioname)
return -ENOMEM;
gpiod_set_consumer_name(cs[i], gpioname);
+ num_cs_gpios++;
+ continue;
}
+
+ if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ native_cs_mask |= BIT(i);
+ }
+
+ ctlr->unused_native_cs = ffz(native_cs_mask);
+ if (num_cs_gpios && ctlr->max_native_cs &&
+ ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index 06b68dd6e022..bc275968fcc6 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -63,7 +63,7 @@ int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports
for (i = 0; i < 2; i++) {
void __iomem *uart_regs;
- uart_regs = ioremap_nocache(SSB_EUART, 16);
+ uart_regs = ioremap(SSB_EUART, 16);
if (uart_regs) {
uart_regs += (i * 8);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 6a5622e0ded5..c1186415896b 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -122,7 +122,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap(addr, len);
if (!mmio)
goto out;
@@ -168,7 +168,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap(addr, len);
if (!mmio)
goto out;
@@ -382,7 +382,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
- ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000);
+ ssb_pcicore_controller.io_map_base = (unsigned long)ioremap(SSB_PCI_MEM, 0x04000000);
set_io_port_base(ssb_pcicore_controller.io_map_base);
/* Give some time to the PCI controller to configure itself with the new
* values. Not waiting at this point causes crashes of the machine. */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index eaf753b70ec5..baccd7c883cc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -42,10 +42,6 @@ source "drivers/staging/rtl8188eu/Kconfig"
source "drivers/staging/rts5208/Kconfig"
-source "drivers/staging/octeon/Kconfig"
-
-source "drivers/staging/octeon-usb/Kconfig"
-
source "drivers/staging/vt6655/Kconfig"
source "drivers/staging/vt6656/Kconfig"
@@ -116,8 +112,6 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
-source "drivers/staging/isdn/Kconfig"
-
source "drivers/staging/wusbcore/Kconfig"
source "drivers/staging/uwb/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 0a4396c9067b..fdd03fd6e704 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -12,8 +12,6 @@ obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
-obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
-obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
@@ -48,10 +46,9 @@ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
-obj-$(CONFIG_ISDN_CAPI) += isdn/
obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
-obj-$(CONFIG_EXFAT_FS) += exfat/
+obj-$(CONFIG_STAGING_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 74d497d39c5a..5891d0744a76 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -537,14 +537,14 @@ static int set_name(struct ashmem_area *asma, void __user *name)
len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
if (len < 0)
return len;
- if (len == ASHMEM_NAME_LEN)
- local_name[ASHMEM_NAME_LEN - 1] = '\0';
+
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
if (asma->file)
ret = -EINVAL;
else
- strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ strscpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name,
+ ASHMEM_NAME_LEN);
mutex_unlock(&ashmem_mutex);
return ret;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index c394686a8e7d..38b51eace4f9 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -274,18 +274,6 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
_ion_buffer_destroy(buffer);
}
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
- void *ptr)
-{
-}
-
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
@@ -349,8 +337,6 @@ static const struct dma_buf_ops dma_buf_ops = {
.detach = ion_dma_buf_detatch,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
- .map = ion_dma_buf_kmap,
- .unmap = ion_dma_buf_kunmap,
};
static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 39e6c59df1e9..5801067e7c1b 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/wait.h>
-#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/init.h>
@@ -133,9 +133,9 @@ struct axis_fifo {
int has_tx_fifo; /* whether the IP has the tx fifo enabled */
wait_queue_head_t read_queue; /* wait queue for asynchronos read */
- spinlock_t read_queue_lock; /* lock for reading waitqueue */
+ struct mutex read_lock; /* lock for reading */
wait_queue_head_t write_queue; /* wait queue for asynchronos write */
- spinlock_t write_queue_lock; /* lock for writing waitqueue */
+ struct mutex write_lock; /* lock for writing */
unsigned int write_flags; /* write file flags */
unsigned int read_flags; /* read file flags */
@@ -336,7 +336,21 @@ static void reset_ip_core(struct axis_fifo *fifo)
iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET);
}
-/* reads a single packet from the fifo as dictated by the tlast signal */
+/**
+ * axis_fifo_write() - Read a packet from AXIS-FIFO character device.
+ * @f Open file.
+ * @buf User space buffer to read to.
+ * @len User space buffer length.
+ * @off Buffer offset.
+ *
+ * As defined by the device's documentation, we need to check the device's
+ * occupancy before reading the length register and then the data. All these
+ * operations must be executed atomically, in order and one after the other
+ * without missing any.
+ *
+ * Returns the number of bytes read from the device or negative error code
+ * on failure.
+ */
static ssize_t axis_fifo_read(struct file *f, char __user *buf,
size_t len, loff_t *off)
{
@@ -350,36 +364,37 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
u32 tmp_buf[READ_BUF_SIZE];
if (fifo->read_flags & O_NONBLOCK) {
- /* opened in non-blocking mode
- * return if there are no packets available
+ /*
+ * Device opened in non-blocking mode. Try to lock it and then
+ * check if any packet is available.
*/
- if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET))
+ if (!mutex_trylock(&fifo->read_lock))
return -EAGAIN;
+
+ if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) {
+ ret = -EAGAIN;
+ goto end_unlock;
+ }
} else {
/* opened in blocking mode
* wait for a packet available interrupt (or timeout)
* if nothing is currently available
*/
- spin_lock_irq(&fifo->read_queue_lock);
- ret = wait_event_interruptible_lock_irq_timeout
- (fifo->read_queue,
- ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
- fifo->read_queue_lock,
- (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
+ mutex_lock(&fifo->read_lock);
+ ret = wait_event_interruptible_timeout(fifo->read_queue,
+ ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
+ (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
MAX_SCHEDULE_TIMEOUT);
- spin_unlock_irq(&fifo->read_queue_lock);
- if (ret == 0) {
- /* timeout occurred */
- dev_dbg(fifo->dt_device, "read timeout");
- return -EAGAIN;
- } else if (ret == -ERESTARTSYS) {
- /* signal received */
- return -ERESTARTSYS;
- } else if (ret < 0) {
- dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
- ret);
- return ret;
+ if (ret <= 0) {
+ if (ret == 0) {
+ ret = -EAGAIN;
+ } else if (ret != -ERESTARTSYS) {
+ dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
+ ret);
+ }
+
+ goto end_unlock;
}
}
@@ -387,14 +402,16 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
if (!bytes_available) {
dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
reset_ip_core(fifo);
- return -EIO;
+ ret = -EIO;
+ goto end_unlock;
}
if (bytes_available > len) {
dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
bytes_available, len);
reset_ip_core(fifo);
- return -EINVAL;
+ ret = -EINVAL;
+ goto end_unlock;
}
if (bytes_available % sizeof(u32)) {
@@ -403,7 +420,8 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
*/
dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
reset_ip_core(fifo);
- return -EIO;
+ ret = -EIO;
+ goto end_unlock;
}
words_available = bytes_available / sizeof(u32);
@@ -423,16 +441,37 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
copy * sizeof(u32))) {
reset_ip_core(fifo);
- return -EFAULT;
+ ret = -EFAULT;
+ goto end_unlock;
}
copied += copy;
words_available -= copy;
}
- return bytes_available;
+ ret = bytes_available;
+
+end_unlock:
+ mutex_unlock(&fifo->read_lock);
+
+ return ret;
}
+/**
+ * axis_fifo_write() - Write buffer to AXIS-FIFO character device.
+ * @f Open file.
+ * @buf User space buffer to write to the device.
+ * @len User space buffer length.
+ * @off Buffer offset.
+ *
+ * As defined by the device's documentation, we need to write to the device's
+ * data buffer then to the device's packet length register atomically. Also,
+ * we need to lock before checking if the device has available space to avoid
+ * any concurrency issue.
+ *
+ * Returns the number of bytes written to the device or negative error code
+ * on failure.
+ */
static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
size_t len, loff_t *off)
{
@@ -465,12 +504,17 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
}
if (fifo->write_flags & O_NONBLOCK) {
- /* opened in non-blocking mode
- * return if there is not enough room available in the fifo
+ /*
+ * Device opened in non-blocking mode. Try to lock it and then
+ * check if there is any room to write the given buffer.
*/
+ if (!mutex_trylock(&fifo->write_lock))
+ return -EAGAIN;
+
if (words_to_write > ioread32(fifo->base_addr +
XLLF_TDFV_OFFSET)) {
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto end_unlock;
}
} else {
/* opened in blocking mode */
@@ -478,30 +522,22 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
/* wait for an interrupt (or timeout) if there isn't
* currently enough room in the fifo
*/
- spin_lock_irq(&fifo->write_queue_lock);
- ret = wait_event_interruptible_lock_irq_timeout
- (fifo->write_queue,
- ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
+ mutex_lock(&fifo->write_lock);
+ ret = wait_event_interruptible_timeout(fifo->write_queue,
+ ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
>= words_to_write,
- fifo->write_queue_lock,
- (write_timeout >= 0) ?
- msecs_to_jiffies(write_timeout) :
+ (write_timeout >= 0) ? msecs_to_jiffies(write_timeout) :
MAX_SCHEDULE_TIMEOUT);
- spin_unlock_irq(&fifo->write_queue_lock);
- if (ret == 0) {
- /* timeout occurred */
- dev_dbg(fifo->dt_device, "write timeout\n");
- return -EAGAIN;
- } else if (ret == -ERESTARTSYS) {
- /* signal received */
- return -ERESTARTSYS;
- } else if (ret < 0) {
- /* unknown error */
- dev_err(fifo->dt_device,
- "wait_event_interruptible_timeout() error in write (ret=%i)\n",
- ret);
- return ret;
+ if (ret <= 0) {
+ if (ret == 0) {
+ ret = -EAGAIN;
+ } else if (ret != -ERESTARTSYS) {
+ dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n",
+ ret);
+ }
+
+ goto end_unlock;
}
}
@@ -515,7 +551,8 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
copy * sizeof(u32))) {
reset_ip_core(fifo);
- return -EFAULT;
+ ret = -EFAULT;
+ goto end_unlock;
}
for (i = 0; i < copy; i++)
@@ -526,10 +563,15 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
words_to_write -= copy;
}
+ ret = copied * sizeof(u32);
+
/* write packet size to fifo */
- iowrite32(copied * sizeof(u32), fifo->base_addr + XLLF_TLR_OFFSET);
+ iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET);
+
+end_unlock:
+ mutex_unlock(&fifo->write_lock);
- return (ssize_t)copied * sizeof(u32);
+ return ret;
}
static irqreturn_t axis_fifo_irq(int irq, void *dw)
@@ -789,8 +831,8 @@ static int axis_fifo_probe(struct platform_device *pdev)
init_waitqueue_head(&fifo->read_queue);
init_waitqueue_head(&fifo->write_queue);
- spin_lock_init(&fifo->read_queue_lock);
- spin_lock_init(&fifo->write_queue_lock);
+ mutex_init(&fifo->read_lock);
+ mutex_init(&fifo->write_lock);
/* ----------------------------
* init device memory space
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index f99211ec46de..04e224f8b779 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -279,7 +279,7 @@ static int das6402_ai_check_chanlist(struct comedi_device *dev,
if (aref0 == AREF_DIFF && chan > (s->n_chan / 2)) {
dev_dbg(dev->class_dev,
- "chanlist differential channel to large\n");
+ "chanlist differential channel too large\n");
return -EINVAL;
}
}
diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c
index 673d732dcb8f..8f398b30f5bf 100644
--- a/drivers/staging/comedi/drivers/ni_routes.c
+++ b/drivers/staging/comedi/drivers/ni_routes.c
@@ -72,9 +72,6 @@ static int ni_find_device_routes(const char *device_family,
}
}
- if (!rv)
- return -ENODATA;
-
/* Second, find the set of routes valid for this device. */
for (i = 0; ni_device_routes_list[i]; ++i) {
if (memcmp(ni_device_routes_list[i]->device, board_name,
@@ -84,12 +81,12 @@ static int ni_find_device_routes(const char *device_family,
}
}
- if (!dr)
- return -ENODATA;
-
tables->route_values = rv;
tables->valid_routes = dr;
+ if (!rv || !dr)
+ return -ENODATA;
+
return 0;
}
@@ -487,6 +484,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest,
{
int src;
+ if (!tables->route_values)
+ return -EINVAL;
+
dest = B(dest); /* subtract NI names offset */
/* ensure we are not going to under/over run the route value table */
if (dest < 0 || dest >= NI_NUM_NAMES)
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
index 0130019cbec2..292a19dfcaf5 100644
--- a/drivers/staging/exfat/Kconfig
+++ b/drivers/staging/exfat/Kconfig
@@ -1,41 +1,41 @@
# SPDX-License-Identifier: GPL-2.0
-config EXFAT_FS
+config STAGING_EXFAT_FS
tristate "exFAT fs support"
depends on BLOCK
select NLS
help
This adds support for the exFAT file system.
-config EXFAT_DISCARD
+config STAGING_EXFAT_DISCARD
bool "enable discard support"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
default y
-config EXFAT_DELAYED_SYNC
+config STAGING_EXFAT_DELAYED_SYNC
bool "enable delayed sync"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
default n
-config EXFAT_KERNEL_DEBUG
+config STAGING_EXFAT_KERNEL_DEBUG
bool "enable kernel debug features via ioctl"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
default n
-config EXFAT_DEBUG_MSG
+config STAGING_EXFAT_DEBUG_MSG
bool "print debug messages"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
default n
-config EXFAT_DEFAULT_CODEPAGE
+config STAGING_EXFAT_DEFAULT_CODEPAGE
int "Default codepage for exFAT"
default 437
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
help
This option should be set to the codepage of your exFAT filesystems.
-config EXFAT_DEFAULT_IOCHARSET
+config STAGING_EXFAT_DEFAULT_IOCHARSET
string "Default iocharset for exFAT"
default "utf8"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
help
Set this to the default input/output character set you'd like exFAT to use.
diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile
index 6c90aec83feb..057556eeca0c 100644
--- a/drivers/staging/exfat/Makefile
+++ b/drivers/staging/exfat/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
-obj-$(CONFIG_EXFAT_FS) += exfat.o
+obj-$(CONFIG_STAGING_EXFAT_FS) += exfat.o
exfat-y := exfat_core.o \
exfat_super.o \
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 51c665a924b7..4d87360fab35 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
#include <linux/buffer_head.h>
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
/* For Debugging Purpose */
/* IOCTL code 'f' used by
* - file systems typically #0~0x1F
@@ -22,9 +22,9 @@
#define EXFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01
#define EXFAT_DEBUGFLAGS_ERROR_RW 0x02
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-#ifdef CONFIG_EXFAT_DEBUG_MSG
+#ifdef CONFIG_STAGING_EXFAT_DEBUG_MSG
#define DEBUG 1
#else
#undef DEBUG
@@ -516,49 +516,6 @@ struct buf_cache_t {
struct buffer_head *buf_bh;
};
-struct fs_func {
- s32 (*alloc_cluster)(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
- void (*free_cluster)(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
- s32 (*count_used_clusters)(struct super_block *sb);
-
- s32 (*init_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, u32 type, u32 start_clu, u64 size);
- s32 (*init_ext_entry)(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
- s32 (*find_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
- void (*delete_dir_entry)(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- s32 offset, s32 num_entries);
- void (*get_uni_name_from_ext_entry)(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
- s32 (*count_ext_entries)(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- struct dentry_t *p_entry);
- s32 (*calc_num_entries)(struct uni_name_t *p_uniname);
-
- u32 (*get_entry_type)(struct dentry_t *p_entry);
- void (*set_entry_type)(struct dentry_t *p_entry, u32 type);
- u32 (*get_entry_attr)(struct dentry_t *p_entry);
- void (*set_entry_attr)(struct dentry_t *p_entry, u32 attr);
- u8 (*get_entry_flag)(struct dentry_t *p_entry);
- void (*set_entry_flag)(struct dentry_t *p_entry, u8 flag);
- u32 (*get_entry_clu0)(struct dentry_t *p_entry);
- void (*set_entry_clu0)(struct dentry_t *p_entry, u32 clu0);
- u64 (*get_entry_size)(struct dentry_t *p_entry);
- void (*set_entry_size)(struct dentry_t *p_entry, u64 size);
- void (*get_entry_time)(struct dentry_t *p_entry,
- struct timestamp_t *tp, u8 mode);
- void (*set_entry_time)(struct dentry_t *p_entry,
- struct timestamp_t *tp, u8 mode);
-};
-
struct fs_info_t {
u32 drv; /* drive ID */
u32 vol_type; /* volume FAT type */
@@ -597,7 +554,6 @@ struct fs_info_t {
u32 dev_ejected; /* block device operation error flag */
- struct fs_func *fs_func;
struct mutex v_mutex;
/* FAT cache */
@@ -661,10 +617,10 @@ struct exfat_mount_options {
/* on error: continue, panic, remount-ro */
unsigned char errors;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
/* flag on if -o dicard specified and device support discard() */
unsigned char discard;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
};
#define EXFAT_HASH_BITS 8
@@ -700,9 +656,9 @@ struct exfat_sb_info {
spinlock_t inode_hash_lock;
struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
long debug_flags;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
};
/*
@@ -829,5 +785,40 @@ int exfat_bdev_write(struct super_block *sb, sector_t secno,
struct buffer_head *bh, u32 num_secs, bool sync);
int exfat_bdev_sync(struct super_block *sb);
+/* cluster operation functions */
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain);
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse);
+s32 exfat_count_used_clusters(struct super_block *sb);
+
+/* dir operation functions */
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type);
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries);
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname);
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry);
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
+
+/* dir entry getter/setter */
+u32 exfat_get_entry_type(struct dentry_t *p_entry);
+u32 exfat_get_entry_attr(struct dentry_t *p_entry);
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
+u8 exfat_get_entry_flag(struct dentry_t *p_entry);
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags);
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
+u64 exfat_get_entry_size(struct dentry_t *p_entry);
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+
extern const u8 uni_upcase[];
#endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
index 7bcd98b13109..0a3dc3568293 100644
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -31,17 +31,17 @@ void exfat_bdev_close(struct super_block *sb)
}
int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
- u32 num_secs, bool read)
+ u32 num_secs, bool read)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
struct exfat_sb_info *sbi = EXFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
return -ENODEV;
@@ -66,19 +66,19 @@ int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head *
}
int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
- u32 num_secs, bool sync)
+ u32 num_secs, bool sync)
{
s32 count;
struct buffer_head *bh2;
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
struct exfat_sb_info *sbi = EXFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
return -ENODEV;
@@ -121,13 +121,13 @@ no_bh:
int exfat_bdev_sync(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
struct exfat_sb_info *sbi = EXFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
return -ENODEV;
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index 794000e7bc6f..07b460d01334 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -177,11 +177,11 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
{
int i, b;
sector_t sector;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_mount_options *opts = &sbi->options;
int ret;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -192,7 +192,7 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
if (opts->discard) {
ret = sb_issue_discard(sb, START_SECTOR(clu),
(1 << p_fs->sectors_per_clu_bits),
@@ -204,7 +204,7 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
return ret;
}
}
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
return sector_write(sb, sector, p_fs->vol_amap[i], 0);
}
@@ -249,7 +249,7 @@ static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
return CLUSTER_32(~0);
}
-static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
struct chain_t *p_chain)
{
s32 num_clusters = 0;
@@ -328,7 +328,7 @@ static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
return num_clusters;
}
-static void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
s32 do_relse)
{
s32 num_clusters = 0;
@@ -434,7 +434,7 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
return count;
}
-static s32 exfat_count_used_clusters(struct super_block *sb)
+s32 exfat_count_used_clusters(struct super_block *sb)
{
int i, map_i, map_b, count = 0;
u8 k;
@@ -499,7 +499,7 @@ s32 load_alloc_bitmap(struct super_block *sb)
if (!ep)
return -ENOENT;
- type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+ type = exfat_get_entry_type((struct dentry_t *)ep);
if (type == TYPE_UNUSED)
break;
@@ -745,7 +745,7 @@ s32 load_upcase_table(struct super_block *sb)
if (!ep)
return -ENOENT;
- type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+ type = exfat_get_entry_type((struct dentry_t *)ep);
if (type == TYPE_UNUSED)
break;
@@ -787,7 +787,7 @@ void free_upcase_table(struct super_block *sb)
* Directory Entry Management Functions
*/
-static u32 exfat_get_entry_type(struct dentry_t *p_entry)
+u32 exfat_get_entry_type(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
@@ -862,56 +862,56 @@ static void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
}
}
-static u32 exfat_get_entry_attr(struct dentry_t *p_entry)
+u32 exfat_get_entry_attr(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
return (u32)GET16_A(ep->attr);
}
-static void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
SET16_A(ep->attr, (u16)attr);
}
-static u8 exfat_get_entry_flag(struct dentry_t *p_entry)
+u8 exfat_get_entry_flag(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return ep->flags;
}
-static void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
ep->flags = flags;
}
-static u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET32_A(ep->start_clu);
}
-static void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
SET32_A(ep->start_clu, start_clu);
}
-static u64 exfat_get_entry_size(struct dentry_t *p_entry)
+u64 exfat_get_entry_size(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET64_A(ep->valid_size);
}
-static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
@@ -919,7 +919,7 @@ static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
SET64_A(ep->size, size);
}
-static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t = 0x00, d = 0x21;
@@ -948,7 +948,7 @@ static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *t
tp->year = (d >> 9);
}
-static void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t, d;
@@ -1013,7 +1013,7 @@ static void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
}
static s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, u32 type, u32 start_clu, u64 size)
+ s32 entry, u32 type, u32 start_clu, u64 size)
{
sector_t sector;
u8 flags;
@@ -1088,20 +1088,19 @@ static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
return 0;
}
-static void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
s32 entry, s32 order, s32 num_entries)
{
int i;
sector_t sector;
struct dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
for (i = order; i < num_entries; i++) {
ep = get_entry_in_dir(sb, p_dir, entry + i, &sector);
if (!ep)
return;
- p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
+ exfat_set_entry_type(ep, TYPE_DELETED);
exfat_buf_modify(sb, sector);
}
}
@@ -1256,7 +1255,7 @@ static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
}
static s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- sector_t *sector, s32 *offset)
+ sector_t *sector, s32 *offset)
{
s32 off, ret;
u32 clu = 0;
@@ -1366,7 +1365,7 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
goto err_out;
ep = (struct dentry_t *)(buf + off);
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
if ((entry_type != TYPE_FILE) && (entry_type != TYPE_DIR))
goto err_out;
@@ -1396,7 +1395,7 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
* instead of copying whole sector, we will check every entry.
* this will provide minimum stablity and consistency.
*/
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED))
goto err_out;
@@ -1492,7 +1491,8 @@ void release_entry_set(struct entry_set_cache_t *es)
/* search EMPTY CONTINUOUS "num_entries" entries */
static s32 search_deleted_or_unused_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 num_entries)
+ struct chain_t *p_dir,
+ s32 num_entries)
{
int i, dentry, num_empty = 0;
s32 dentries_per_clu;
@@ -1539,7 +1539,7 @@ static s32 search_deleted_or_unused_entry(struct super_block *sb,
if (!ep)
return -1;
- type = p_fs->fs_func->get_entry_type(ep);
+ type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED) {
num_empty++;
@@ -1613,7 +1613,7 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
clu.flags = p_dir->flags;
/* (1) allocate a cluster */
- ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu);
+ ret = exfat_alloc_cluster(sb, 1, &clu);
if (ret < 1)
return -EIO;
@@ -1649,8 +1649,8 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
fid->entry + 1, &sector);
if (!ep)
return -ENOENT;
- p_fs->fs_func->set_entry_size(ep, size);
- p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
+ exfat_set_entry_size(ep, size);
+ exfat_set_entry_flag(ep, p_dir->flags);
exfat_buf_modify(sb, sector);
update_dir_checksum(sb, &fid->dir,
@@ -1668,7 +1668,7 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
}
static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
- s32 order)
+ s32 order)
{
int i, len = 0;
@@ -1689,7 +1689,7 @@ static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *unina
* -1 : (root dir, ".") it is the root dir itself
* -2 : entry with the name does not exist
*/
-static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 num_entries,
struct dos_name_t *p_dosname, u32 type)
{
@@ -1735,7 +1735,7 @@ static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
if (!ep)
return -2;
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
step = 1;
if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) {
@@ -1832,21 +1832,20 @@ static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
return -2;
}
-static s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
s32 entry, struct dentry_t *p_entry)
{
int i, count = 0;
u32 type;
struct file_dentry_t *file_ep = (struct file_dentry_t *)p_entry;
struct dentry_t *ext_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) {
ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL);
if (!ext_ep)
return -1;
- type = p_fs->fs_func->get_entry_type(ext_ep);
+ type = exfat_get_entry_type(ext_ep);
if ((type == TYPE_EXTEND) || (type == TYPE_STREAM))
count++;
else
@@ -1884,7 +1883,7 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
if (!ep)
return -ENOENT;
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
if (entry_type == TYPE_UNUSED)
return count;
@@ -1940,7 +1939,7 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
if (!ep)
break;
- type = p_fs->fs_func->get_entry_type(ep);
+ type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED)
return true;
@@ -1984,9 +1983,8 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
struct dos_name_t *p_dosname)
{
s32 num_entries;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- num_entries = p_fs->fs_func->calc_num_entries(p_uniname);
+ num_entries = exfat_calc_num_entries(p_uniname);
if (num_entries == 0)
return -EINVAL;
@@ -1995,14 +1993,13 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
return 0;
}
-static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
struct chain_t *p_dir, s32 entry,
u16 *uniname)
{
int i;
struct dentry_t *ep;
struct entry_set_cache_t *es;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
if (!es || es->num_entries < 3) {
@@ -2020,7 +2017,7 @@ static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
* So, the index of first file-name dentry should start from 2.
*/
for (i = 2; i < es->num_entries; i++, ep++) {
- if (p_fs->fs_func->get_entry_type(ep) == TYPE_EXTEND)
+ if (exfat_get_entry_type(ep) == TYPE_EXTEND)
extract_uni_name_from_name_entry((struct name_dentry_t *)
ep, uniname, i);
else
@@ -2032,7 +2029,7 @@ out:
release_entry_set(es);
}
-static s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
{
s32 len;
@@ -2100,36 +2097,6 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
return 0;
}
-/*
- * File Operation Functions
- */
-static struct fs_func exfat_fs_func = {
- .alloc_cluster = exfat_alloc_cluster,
- .free_cluster = exfat_free_cluster,
- .count_used_clusters = exfat_count_used_clusters,
-
- .init_dir_entry = exfat_init_dir_entry,
- .init_ext_entry = exfat_init_ext_entry,
- .find_dir_entry = exfat_find_dir_entry,
- .delete_dir_entry = exfat_delete_dir_entry,
- .get_uni_name_from_ext_entry = exfat_get_uni_name_from_ext_entry,
- .count_ext_entries = exfat_count_ext_entries,
- .calc_num_entries = exfat_calc_num_entries,
-
- .get_entry_type = exfat_get_entry_type,
- .set_entry_type = exfat_set_entry_type,
- .get_entry_attr = exfat_get_entry_attr,
- .set_entry_attr = exfat_set_entry_attr,
- .get_entry_flag = exfat_get_entry_flag,
- .set_entry_flag = exfat_set_entry_flag,
- .get_entry_clu0 = exfat_get_entry_clu0,
- .set_entry_clu0 = exfat_set_entry_clu0,
- .get_entry_size = exfat_get_entry_size,
- .set_entry_size = exfat_set_entry_size,
- .get_entry_time = exfat_get_entry_time,
- .set_entry_time = exfat_set_entry_time,
-};
-
s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
{
struct bpbex_t *p_bpb = (struct bpbex_t *)p_pbr->bpb;
@@ -2173,8 +2140,6 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
p_fs->clu_srch_ptr = 2;
p_fs->used_clusters = UINT_MAX;
- p_fs->fs_func = &exfat_fs_func;
-
return 0;
}
@@ -2187,7 +2152,6 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
struct dos_name_t dos_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
&dos_name);
@@ -2204,7 +2168,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
/* (1) allocate a cluster */
- ret = fs_func->alloc_cluster(sb, 1, &clu);
+ ret = exfat_alloc_cluster(sb, 1, &clu);
if (ret < 0)
return ret;
else if (ret == 0)
@@ -2218,13 +2182,13 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
/* (2) update the directory entry */
/* make sub-dir entry in parent directory */
- ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
- size);
+ ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
+ size);
if (ret != 0)
return ret;
- ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
- &dos_name);
+ ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+ &dos_name);
if (ret != 0)
return ret;
@@ -2252,7 +2216,6 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
struct dos_name_t dos_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
&dos_name);
@@ -2268,13 +2231,13 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
/* fill the dos name directory entry information of the created file.
* the first cluster is not determined yet. (0)
*/
- ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
- CLUSTER_32(0), 0);
+ ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
+ CLUSTER_32(0), 0);
if (ret != 0)
return ret;
- ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
- &dos_name);
+ ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+ &dos_name);
if (ret != 0)
return ret;
@@ -2301,8 +2264,6 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
sector_t sector;
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
ep = get_entry_in_dir(sb, p_dir, entry, &sector);
if (!ep)
@@ -2311,7 +2272,7 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
exfat_buf_lock(sb, sector);
/* exfat_buf_lock() before call count_ext_entries() */
- num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep);
+ num_entries = exfat_count_ext_entries(sb, p_dir, entry, ep);
if (num_entries < 0) {
exfat_buf_unlock(sb, sector);
return;
@@ -2321,7 +2282,7 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
exfat_buf_unlock(sb, sector);
/* (1) update the directory entry */
- fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
+ exfat_delete_dir_entry(sb, p_dir, entry, 0, num_entries);
}
s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
@@ -2332,8 +2293,6 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
struct dos_name_t dos_name;
struct dentry_t *epold, *epnew;
struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
if (!epold)
@@ -2342,8 +2301,8 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
exfat_buf_lock(sb, sector_old);
/* exfat_buf_lock() before call count_ext_entries() */
- num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry,
- epold);
+ num_old_entries = exfat_count_ext_entries(sb, p_dir, oldentry,
+ epold);
if (num_old_entries < 0) {
exfat_buf_unlock(sb, sector_old);
return -ENOENT;
@@ -2371,10 +2330,10 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
}
memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
- if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
- fs_func->set_entry_attr(epnew,
- fs_func->get_entry_attr(epnew) |
- ATTR_ARCHIVE);
+ if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+ exfat_set_entry_attr(epnew,
+ exfat_get_entry_attr(epnew) |
+ ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
exfat_buf_modify(sb, sector_new);
@@ -2395,33 +2354,33 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
exfat_buf_modify(sb, sector_new);
exfat_buf_unlock(sb, sector_old);
- ret = fs_func->init_ext_entry(sb, p_dir, newentry,
- num_new_entries, p_uniname,
- &dos_name);
+ ret = exfat_init_ext_entry(sb, p_dir, newentry,
+ num_new_entries, p_uniname,
+ &dos_name);
if (ret != 0)
return ret;
- fs_func->delete_dir_entry(sb, p_dir, oldentry, 0,
- num_old_entries);
+ exfat_delete_dir_entry(sb, p_dir, oldentry, 0,
+ num_old_entries);
fid->entry = newentry;
} else {
- if (fs_func->get_entry_type(epold) == TYPE_FILE) {
- fs_func->set_entry_attr(epold,
- fs_func->get_entry_attr(epold) |
- ATTR_ARCHIVE);
+ if (exfat_get_entry_type(epold) == TYPE_FILE) {
+ exfat_set_entry_attr(epold,
+ exfat_get_entry_attr(epold) |
+ ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
exfat_buf_modify(sb, sector_old);
exfat_buf_unlock(sb, sector_old);
- ret = fs_func->init_ext_entry(sb, p_dir, oldentry,
- num_new_entries, p_uniname,
- &dos_name);
+ ret = exfat_init_ext_entry(sb, p_dir, oldentry,
+ num_new_entries, p_uniname,
+ &dos_name);
if (ret != 0)
return ret;
- fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
- num_old_entries);
+ exfat_delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
+ num_old_entries);
}
return 0;
@@ -2436,23 +2395,21 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
struct dos_name_t dos_name;
struct dentry_t *epmov, *epnew;
struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
if (!epmov)
return -ENOENT;
/* check if the source and target directory is the same */
- if (fs_func->get_entry_type(epmov) == TYPE_DIR &&
- fs_func->get_entry_clu0(epmov) == p_newdir->dir)
+ if (exfat_get_entry_type(epmov) == TYPE_DIR &&
+ exfat_get_entry_clu0(epmov) == p_newdir->dir)
return -EINVAL;
exfat_buf_lock(sb, sector_mov);
/* exfat_buf_lock() before call count_ext_entries() */
- num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry,
- epmov);
+ num_old_entries = exfat_count_ext_entries(sb, p_olddir, oldentry,
+ epmov);
if (num_old_entries < 0) {
exfat_buf_unlock(sb, sector_mov);
return -ENOENT;
@@ -2479,9 +2436,9 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
}
memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
- if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
- fs_func->set_entry_attr(epnew, fs_func->get_entry_attr(epnew) |
- ATTR_ARCHIVE);
+ if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+ exfat_set_entry_attr(epnew, exfat_get_entry_attr(epnew) |
+ ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
exfat_buf_modify(sb, sector_new);
@@ -2501,12 +2458,12 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
exfat_buf_modify(sb, sector_new);
exfat_buf_unlock(sb, sector_mov);
- ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries,
- p_uniname, &dos_name);
+ ret = exfat_init_ext_entry(sb, p_newdir, newentry, num_new_entries,
+ p_uniname, &dos_name);
if (ret != 0)
return ret;
- fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
+ exfat_delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
fid->dir.dir = p_newdir->dir;
fid->dir.size = p_newdir->size;
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 9f91853b189b..b81d2a87b82e 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -38,8 +38,8 @@
static struct kmem_cache *exfat_inode_cachep;
-static int exfat_default_codepage = CONFIG_EXFAT_DEFAULT_CODEPAGE;
-static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET;
+static int exfat_default_codepage = CONFIG_STAGING_EXFAT_DEFAULT_CODEPAGE;
+static char exfat_default_iocharset[] = CONFIG_STAGING_EXFAT_DEFAULT_IOCHARSET;
#define INC_IVERSION(x) (inode_inc_iversion(x))
#define GET_IVERSION(x) (inode_peek_iversion_raw(x))
@@ -365,7 +365,7 @@ static int ffsMountVol(struct super_block *sb)
if (p_bd->sector_size < sb->s_blocksize) {
printk(KERN_INFO "EXFAT: mount failed - sector size %d less than blocksize %ld\n",
- p_bd->sector_size, sb->s_blocksize);
+ p_bd->sector_size, sb->s_blocksize);
ret = -EINVAL;
goto out;
}
@@ -492,7 +492,7 @@ static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
mutex_lock(&p_fs->v_mutex);
if (p_fs->used_clusters == UINT_MAX)
- p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb);
+ p_fs->used_clusters = exfat_count_used_clusters(sb);
info->FatType = p_fs->vol_type;
info->ClusterSize = p_fs->cluster_size;
@@ -565,8 +565,8 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
goto out;
/* search the file name for directories */
- dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries,
- &dos_name, TYPE_ALL);
+ dentry = exfat_find_dir_entry(sb, &dir, &uni_name, num_entries,
+ &dos_name, TYPE_ALL);
if (dentry < -1) {
ret = -ENOENT;
goto out;
@@ -595,18 +595,18 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
}
ep2 = ep + 1;
- fid->type = p_fs->fs_func->get_entry_type(ep);
+ fid->type = exfat_get_entry_type(ep);
fid->rwoffset = 0;
fid->hint_last_off = -1;
- fid->attr = p_fs->fs_func->get_entry_attr(ep);
+ fid->attr = exfat_get_entry_attr(ep);
- fid->size = p_fs->fs_func->get_entry_size(ep2);
+ fid->size = exfat_get_entry_size(ep2);
if ((fid->type == TYPE_FILE) && (fid->size == 0)) {
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
fid->start_clu = CLUSTER_32(~0);
} else {
- fid->flags = p_fs->fs_func->get_entry_flag(ep2);
- fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2);
+ fid->flags = exfat_get_entry_flag(ep2);
+ fid->start_clu = exfat_get_entry_clu0(ep2);
}
release_entry_set(es);
@@ -647,7 +647,7 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
/* create a new file */
ret = create_file(inode, &dir, &uni_name, mode, fid);
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -886,9 +886,9 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
new_clu.flags = fid->flags;
/* (1) allocate a chain of clusters */
- num_alloced = p_fs->fs_func->alloc_cluster(sb,
- num_alloc,
- &new_clu);
+ num_alloced = exfat_alloc_cluster(sb,
+ num_alloc,
+ &new_clu);
if (num_alloced == 0)
break;
if (num_alloced < 0) {
@@ -991,24 +991,24 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
goto err_out;
ep2 = ep + 1;
- p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
- p_fs->fs_func->set_entry_attr(ep, fid->attr);
+ exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+ exfat_set_entry_attr(ep, fid->attr);
if (modified) {
- if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags)
- p_fs->fs_func->set_entry_flag(ep2, fid->flags);
+ if (exfat_get_entry_flag(ep2) != fid->flags)
+ exfat_set_entry_flag(ep2, fid->flags);
- if (p_fs->fs_func->get_entry_size(ep2) != fid->size)
- p_fs->fs_func->set_entry_size(ep2, fid->size);
+ if (exfat_get_entry_size(ep2) != fid->size)
+ exfat_set_entry_size(ep2, fid->size);
- if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu)
- p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu);
+ if (exfat_get_entry_clu0(ep2) != fid->start_clu)
+ exfat_set_entry_clu0(ep2, fid->start_clu);
}
update_dir_checksum_with_entry_set(sb, es);
release_entry_set(es);
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1108,13 +1108,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
ep2 = ep + 1;
- p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
- p_fs->fs_func->set_entry_attr(ep, fid->attr);
+ exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+ exfat_set_entry_attr(ep, fid->attr);
- p_fs->fs_func->set_entry_size(ep2, new_size);
+ exfat_set_entry_size(ep2, new_size);
if (new_size == 0) {
- p_fs->fs_func->set_entry_flag(ep2, 0x01);
- p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0));
+ exfat_set_entry_flag(ep2, 0x01);
+ exfat_set_entry_clu0(ep2, CLUSTER_32(0));
}
update_dir_checksum_with_entry_set(sb, es);
@@ -1127,14 +1127,14 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
/* (3) free the clusters */
- p_fs->fs_func->free_cluster(sb, &clu, 0);
+ exfat_free_cluster(sb, &clu, 0);
/* hint information */
fid->hint_last_off = -1;
if (fid->rwoffset > fid->size)
fid->rwoffset = fid->size;
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1217,7 +1217,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
goto out2;
}
- if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+ if (exfat_get_entry_attr(ep) & ATTR_READONLY) {
ret = -EPERM;
goto out2;
}
@@ -1237,7 +1237,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
if (!ep)
goto out;
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
if (entry_type == TYPE_DIR) {
struct chain_t new_clu;
@@ -1274,15 +1274,15 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
if (!ep)
goto out;
- num_entries = p_fs->fs_func->count_ext_entries(sb, p_dir,
- new_entry, ep);
+ num_entries = exfat_count_ext_entries(sb, p_dir,
+ new_entry, ep);
if (num_entries < 0)
goto out;
- p_fs->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0,
- num_entries + 1);
+ exfat_delete_dir_entry(sb, p_dir, new_entry, 0,
+ num_entries + 1);
}
out:
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1324,7 +1324,7 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
goto out;
}
- if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+ if (exfat_get_entry_attr(ep) & ATTR_READONLY) {
ret = -EPERM;
goto out;
}
@@ -1338,13 +1338,13 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
clu_to_free.flags = fid->flags;
/* (2) free the clusters */
- p_fs->fs_func->free_cluster(sb, &clu_to_free, 0);
+ exfat_free_cluster(sb, &clu_to_free, 0);
fid->size = 0;
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1398,7 +1398,7 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
goto out;
}
- type = p_fs->fs_func->get_entry_type(ep);
+ type = exfat_get_entry_type(ep);
if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
@@ -1415,12 +1415,12 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
/* set the file attribute */
fid->attr = attr;
- p_fs->fs_func->set_entry_attr(ep, attr);
+ exfat_set_entry_attr(ep, attr);
update_dir_checksum_with_entry_set(sb, es);
release_entry_set(es);
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1481,7 +1481,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = count; /* propogate error upward */
+ ret = count; /* propagate error upward */
goto out;
}
info->NumSubdirs = count;
@@ -1502,9 +1502,9 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
ep2 = ep + 1;
/* set FILE_INFO structure using the acquired struct dentry_t */
- info->Attr = p_fs->fs_func->get_entry_attr(ep);
+ info->Attr = exfat_get_entry_attr(ep);
- p_fs->fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ exfat_get_entry_time(ep, &tm, TM_CREATE);
info->CreateTimestamp.Year = tm.year;
info->CreateTimestamp.Month = tm.mon;
info->CreateTimestamp.Day = tm.day;
@@ -1513,7 +1513,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
info->CreateTimestamp.Second = tm.sec;
info->CreateTimestamp.MilliSecond = 0;
- p_fs->fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ exfat_get_entry_time(ep, &tm, TM_MODIFY);
info->ModifyTimestamp.Year = tm.year;
info->ModifyTimestamp.Month = tm.mon;
info->ModifyTimestamp.Day = tm.day;
@@ -1528,13 +1528,13 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
/* XXX this is very bad for exfat cuz name is already included in es.
* API should be revised
*/
- p_fs->fs_func->get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
- uni_name.name);
+ exfat_get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
+ uni_name.name);
nls_uniname_to_cstring(sb, info->Name, &uni_name);
info->NumSubdirs = 2;
- info->Size = p_fs->fs_func->get_entry_size(ep2);
+ info->Size = exfat_get_entry_size(ep2);
release_entry_set(es);
@@ -1548,7 +1548,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = count; /* propogate error upward */
+ ret = count; /* propagate error upward */
goto out;
}
info->NumSubdirs += count;
@@ -1602,7 +1602,7 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
}
ep2 = ep + 1;
- p_fs->fs_func->set_entry_attr(ep, info->Attr);
+ exfat_set_entry_attr(ep, info->Attr);
/* set FILE_INFO structure using the acquired struct dentry_t */
tm.sec = info->CreateTimestamp.Second;
@@ -1611,7 +1611,7 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
tm.day = info->CreateTimestamp.Day;
tm.mon = info->CreateTimestamp.Month;
tm.year = info->CreateTimestamp.Year;
- p_fs->fs_func->set_entry_time(ep, &tm, TM_CREATE);
+ exfat_set_entry_time(ep, &tm, TM_CREATE);
tm.sec = info->ModifyTimestamp.Second;
tm.min = info->ModifyTimestamp.Minute;
@@ -1619,9 +1619,9 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
tm.day = info->ModifyTimestamp.Day;
tm.mon = info->ModifyTimestamp.Month;
tm.year = info->ModifyTimestamp.Year;
- p_fs->fs_func->set_entry_time(ep, &tm, TM_MODIFY);
+ exfat_set_entry_time(ep, &tm, TM_MODIFY);
- p_fs->fs_func->set_entry_size(ep2, info->Size);
+ exfat_set_entry_size(ep2, info->Size);
update_dir_checksum_with_entry_set(sb, es);
release_entry_set(es);
@@ -1704,7 +1704,7 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
new_clu.flags = fid->flags;
/* (1) allocate a cluster */
- num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu);
+ num_alloced = exfat_alloc_cluster(sb, 1, &new_clu);
if (num_alloced < 0) {
ret = -EIO;
goto out;
@@ -1744,13 +1744,11 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
/* (3) update directory entry */
if (modified) {
- if (p_fs->fs_func->get_entry_flag(ep) != fid->flags)
- p_fs->fs_func->set_entry_flag(ep, fid->flags);
-
- if (p_fs->fs_func->get_entry_clu0(ep) != fid->start_clu)
- p_fs->fs_func->set_entry_clu0(ep,
- fid->start_clu);
+ if (exfat_get_entry_flag(ep) != fid->flags)
+ exfat_set_entry_flag(ep, fid->flags);
+ if (exfat_get_entry_clu0(ep) != fid->start_clu)
+ exfat_set_entry_clu0(ep, fid->start_clu);
}
update_dir_checksum_with_entry_set(sb, es);
@@ -1804,7 +1802,7 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
ret = create_dir(inode, &dir, &uni_name, fid);
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1831,7 +1829,6 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
struct file_id_t *fid = &(EXFAT_I(inode)->fid);
/* check the validity of pointer parameters */
@@ -1913,7 +1910,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
ret = -ENOENT;
goto out;
}
- type = fs_func->get_entry_type(ep);
+ type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED)
break;
@@ -1922,9 +1919,9 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
continue;
exfat_buf_lock(sb, sector);
- dir_entry->Attr = fs_func->get_entry_attr(ep);
+ dir_entry->Attr = exfat_get_entry_attr(ep);
- fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ exfat_get_entry_time(ep, &tm, TM_CREATE);
dir_entry->CreateTimestamp.Year = tm.year;
dir_entry->CreateTimestamp.Month = tm.mon;
dir_entry->CreateTimestamp.Day = tm.day;
@@ -1933,7 +1930,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
dir_entry->CreateTimestamp.Second = tm.sec;
dir_entry->CreateTimestamp.MilliSecond = 0;
- fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ exfat_get_entry_time(ep, &tm, TM_MODIFY);
dir_entry->ModifyTimestamp.Year = tm.year;
dir_entry->ModifyTimestamp.Month = tm.mon;
dir_entry->ModifyTimestamp.Day = tm.day;
@@ -1946,8 +1943,8 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
sizeof(struct date_time_t));
*uni_name.name = 0x0;
- fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry,
- uni_name.name);
+ exfat_get_uni_name_from_ext_entry(sb, &dir, dentry,
+ uni_name.name);
nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
exfat_buf_unlock(sb, sector);
@@ -1957,7 +1954,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
goto out;
}
- dir_entry->Size = fs_func->get_entry_size(ep);
+ dir_entry->Size = exfat_get_entry_size(ep);
/* hint information */
if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */
@@ -2047,13 +2044,13 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
remove_file(inode, &dir, dentry);
/* (2) free the clusters */
- p_fs->fs_func->free_cluster(sb, &clu_to_free, 1);
+ exfat_free_cluster(sb, &clu_to_free, 1);
fid->size = 0;
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -2176,14 +2173,14 @@ static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct inode *inode = filp->f_path.dentry->d_inode;
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
unsigned int flags;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
switch (cmd) {
case EXFAT_IOCTL_GET_VOLUME_ID:
return exfat_ioctl_volume_id(inode);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
case EXFAT_IOC_GET_DEBUGFLAGS: {
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -2207,7 +2204,7 @@ static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
return 0;
}
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
default:
return -ENOTTY; /* Inappropriate ioctl for device */
}
@@ -3400,7 +3397,7 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",errors=panic");
else
seq_puts(m, ",errors=remount-ro");
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
if (opts->discard)
seq_puts(m, ",discard");
#endif
@@ -3481,7 +3478,7 @@ enum {
Opt_err_ro,
Opt_utf8_hack,
Opt_err,
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
Opt_discard,
#endif /* EXFAT_CONFIG_DISCARD */
};
@@ -3501,9 +3498,9 @@ static const match_table_t exfat_tokens = {
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_utf8_hack, "utf8"},
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
{Opt_discard, "discard"},
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
{Opt_err, NULL}
};
@@ -3524,7 +3521,7 @@ static int parse_options(char *options, int silent, int *debug,
opts->iocharset = exfat_default_iocharset;
opts->casesensitive = 0;
opts->errors = EXFAT_ERRORS_RO;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
opts->discard = 0;
#endif
*debug = 0;
@@ -3595,11 +3592,11 @@ static int parse_options(char *options, int silent, int *debug,
case Opt_debug:
*debug = 1;
break;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
case Opt_discard:
opts->discard = 1;
break;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
case Opt_utf8_hack:
break;
default:
@@ -3803,7 +3800,7 @@ static void __exit exfat_destroy_inodecache(void)
kmem_cache_destroy(exfat_inode_cachep);
}
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
static void exfat_debug_kill_sb(struct super_block *sb)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -3831,17 +3828,17 @@ static void exfat_debug_kill_sb(struct super_block *sb)
kill_block_super(sb);
}
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
static struct file_system_type exfat_fs_type = {
.owner = THIS_MODULE,
.name = "exfat",
.mount = exfat_fs_mount,
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
.kill_sb = exfat_debug_kill_sb,
#else
.kill_sb = kill_block_super,
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
.fs_flags = FS_REQUIRES_DEV,
};
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index cd8be80d2076..be6b50f454b4 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -303,7 +303,7 @@ static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
}
gasket_dev->bar_data[bar_num].virt_base =
- ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
+ ioremap(gasket_dev->bar_data[bar_num].phys_base,
gasket_dev->bar_data[bar_num].length_bytes);
if (!gasket_dev->bar_data[bar_num].virt_base) {
dev_err(gasket_dev->dev,
diff --git a/drivers/staging/hp/hp100.c b/drivers/staging/hp/hp100.c
index 6ec78f5c602f..e2f0b58e5dfd 100644
--- a/drivers/staging/hp/hp100.c
+++ b/drivers/staging/hp/hp100.c
@@ -339,14 +339,11 @@ static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
if (sig == NULL)
goto err;
- for (i = 0; i < ARRAY_SIZE(hp100_isa_tbl); i++) {
- if (!strcmp(hp100_isa_tbl[i], sig))
- break;
-
- }
+ i = match_string(hp100_isa_tbl, ARRAY_SIZE(hp100_isa_tbl), sig);
+ if (i < 0)
+ goto err;
- if (i < ARRAY_SIZE(hp100_isa_tbl))
- return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
+ return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
err:
return -ENODEV;
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index 39687139a7d3..39dfe3f7f254 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -237,6 +237,12 @@ static const char * const adis16203_status_error_msgs[] = {
[ADIS16203_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
};
+static const struct adis_timeout adis16203_timeouts = {
+ .reset_ms = ADIS16203_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16203_STARTUP_DELAY,
+ .self_test_ms = ADIS16203_STARTUP_DELAY
+};
+
static const struct adis_data adis16203_data = {
.read_delay = 20,
.msc_ctrl_reg = ADIS16203_MSC_CTRL,
@@ -245,7 +251,7 @@ static const struct adis_data adis16203_data = {
.self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
.self_test_no_autoclear = true,
- .startup_delay = ADIS16203_STARTUP_DELAY,
+ .timeouts = &adis16203_timeouts,
.status_error_msgs = adis16203_status_error_msgs,
.status_error_mask = BIT(ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT) |
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index a480409090c0..39eb8364aa95 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -359,6 +359,12 @@ static const char * const adis16240_status_error_msgs[] = {
[ADIS16240_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.225V",
};
+static const struct adis_timeout adis16240_timeouts = {
+ .reset_ms = ADIS16240_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16240_STARTUP_DELAY,
+ .self_test_ms = ADIS16240_STARTUP_DELAY,
+};
+
static const struct adis_data adis16240_data = {
.write_delay = 35,
.read_delay = 35,
@@ -368,7 +374,7 @@ static const struct adis_data adis16240_data = {
.self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
.self_test_no_autoclear = true,
- .startup_delay = ADIS16240_STARTUP_DELAY,
+ .timeouts = &adis16240_timeouts,
.status_error_msgs = adis16240_status_error_msgs,
.status_error_mask = BIT(ADIS16240_DIAG_STAT_PWRON_FAIL_BIT) |
@@ -399,6 +405,13 @@ static int adis16240_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
+
ret = adis_init(st, indio_dev, spi, &adis16240_data);
if (ret)
return ret;
diff --git a/drivers/staging/isdn/Kconfig b/drivers/staging/isdn/Kconfig
deleted file mode 100644
index faaf63887094..000000000000
--- a/drivers/staging/isdn/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "ISDN CAPI drivers"
- depends on ISDN_CAPI
-
-source "drivers/staging/isdn/avm/Kconfig"
-
-source "drivers/staging/isdn/gigaset/Kconfig"
-
-source "drivers/staging/isdn/hysdn/Kconfig"
-
-endmenu
-
diff --git a/drivers/staging/isdn/Makefile b/drivers/staging/isdn/Makefile
deleted file mode 100644
index 025504bae5df..000000000000
--- a/drivers/staging/isdn/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the kernel ISDN subsystem and device drivers.
-
-# Object files in subdirectories
-
-obj-$(CONFIG_CAPI_AVM) += avm/
-obj-$(CONFIG_HYSDN) += hysdn/
-obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/staging/isdn/TODO b/drivers/staging/isdn/TODO
deleted file mode 100644
index 9210d11eb68b..000000000000
--- a/drivers/staging/isdn/TODO
+++ /dev/null
@@ -1,22 +0,0 @@
-TODO: Remove in late 2019 unless there are users
-
-
-I tried to find any indication of whether the capi drivers are
-still in use, and have not found anything from a long time ago.
-
-With public ISDN networks almost completely shut down over the past 12
-months, there is very little you can actually do with this hardware. The
-main remaining use case would be to connect ISDN voice phones to an
-in-house installation with Asterisk or LCR, but anyone trying this in
-turn seems to be using either the mISDN driver stack, or out-of-tree
-drivers from the hardware vendors.
-
-I may of course have missed something, so I would suggest moving
-these into drivers/staging/ just in case someone still uses one
-of the three remaining in-kernel drivers (avm, hysdn, gigaset).
-
-If nobody complains, we can remove them entirely in six months,
-or otherwise move the core code and any drivers that are still
-needed back into drivers/isdn.
-
- Arnd Bergmann <arnd@arndb.de>
diff --git a/drivers/staging/isdn/avm/Kconfig b/drivers/staging/isdn/avm/Kconfig
deleted file mode 100644
index 81483db067bb..000000000000
--- a/drivers/staging/isdn/avm/Kconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# ISDN AVM drivers
-#
-
-menuconfig CAPI_AVM
- bool "Active AVM cards"
- help
- Enable support for AVM active ISDN cards.
-
-if CAPI_AVM
-
-config ISDN_DRV_AVMB1_B1ISA
- tristate "AVM B1 ISA support"
- depends on ISA
- help
- Enable support for the ISA version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_B1PCI
- tristate "AVM B1 PCI support"
- depends on PCI
- help
- Enable support for the PCI version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_B1PCIV4
- bool "AVM B1 PCI V4 support"
- depends on ISDN_DRV_AVMB1_B1PCI
- help
- Enable support for the V4 version of AVM B1 PCI card.
-
-config ISDN_DRV_AVMB1_T1ISA
- tristate "AVM T1/T1-B ISA support"
- depends on ISA
- help
- Enable support for the AVM T1 T1B card.
- Note: This is a PRI card and handle 30 B-channels.
-
-config ISDN_DRV_AVMB1_B1PCMCIA
- tristate "AVM B1/M1/M2 PCMCIA support"
- depends on PCMCIA
- help
- Enable support for the PCMCIA version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_AVM_CS
- tristate "AVM B1/M1/M2 PCMCIA cs module"
- depends on ISDN_DRV_AVMB1_B1PCMCIA
- help
- Enable the PCMCIA client driver for the AVM B1/M1/M2
- PCMCIA cards.
-
-config ISDN_DRV_AVMB1_T1PCI
- tristate "AVM T1/T1-B PCI support"
- depends on PCI
- help
- Enable support for the AVM T1 T1B card.
- Note: This is a PRI card and handle 30 B-channels.
-
-config ISDN_DRV_AVMB1_C4
- tristate "AVM C4/C2 support"
- depends on PCI
- help
- Enable support for the AVM C4/C2 PCI cards.
- These cards handle 4/2 BRI ISDN lines (8/4 channels).
-
-endif # CAPI_AVM
diff --git a/drivers/staging/isdn/avm/Makefile b/drivers/staging/isdn/avm/Makefile
deleted file mode 100644
index 3830a0573fcc..000000000000
--- a/drivers/staging/isdn/avm/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the AVM ISDN device drivers
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1ISA) += b1isa.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1PCI) += b1pci.o b1.o b1dma.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1PCMCIA) += b1pcmcia.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_AVM_CS) += avm_cs.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_T1ISA) += t1isa.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_T1PCI) += t1pci.o b1.o b1dma.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_C4) += c4.o b1.o
diff --git a/drivers/staging/isdn/avm/avm_cs.c b/drivers/staging/isdn/avm/avm_cs.c
deleted file mode 100644
index 62b8030ee331..000000000000
--- a/drivers/staging/isdn/avm/avm_cs.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/* $Id: avm_cs.c,v 1.4.6.3 2001/09/23 22:24:33 kai Exp $
- *
- * A PCMCIA client driver for AVM B1/M1/M2
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ciscode.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/cisreg.h>
-
-#include <linux/skbuff.h>
-#include <linux/capi.h>
-#include <linux/b1lli.h>
-#include <linux/b1pcmcia.h>
-
-/*====================================================================*/
-
-MODULE_DESCRIPTION("CAPI4Linux: PCMCIA client driver for AVM B1/M1/M2");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/*====================================================================*/
-
-static int avmcs_config(struct pcmcia_device *link);
-static void avmcs_release(struct pcmcia_device *link);
-static void avmcs_detach(struct pcmcia_device *p_dev);
-
-static int avmcs_probe(struct pcmcia_device *p_dev)
-{
- /* General socket configuration */
- p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
- p_dev->config_index = 1;
- p_dev->config_regs = PRESENT_OPTION;
-
- return avmcs_config(p_dev);
-} /* avmcs_attach */
-
-
-static void avmcs_detach(struct pcmcia_device *link)
-{
- avmcs_release(link);
-} /* avmcs_detach */
-
-static int avmcs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
-{
- p_dev->resource[0]->end = 16;
- p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
- p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
-
- return pcmcia_request_io(p_dev);
-}
-
-static int avmcs_config(struct pcmcia_device *link)
-{
- int i = -1;
- char devname[128];
- int cardtype;
- int (*addcard)(unsigned int port, unsigned irq);
-
- devname[0] = 0;
- if (link->prod_id[1])
- strlcpy(devname, link->prod_id[1], sizeof(devname));
-
- /*
- * find IO port
- */
- if (pcmcia_loop_config(link, avmcs_configcheck, NULL))
- return -ENODEV;
-
- do {
- if (!link->irq) {
- /* undo */
- pcmcia_disable_device(link);
- break;
- }
-
- /*
- * configure the PCMCIA socket
- */
- i = pcmcia_enable_device(link);
- if (i != 0) {
- pcmcia_disable_device(link);
- break;
- }
-
- } while (0);
-
- if (devname[0]) {
- char *s = strrchr(devname, ' ');
- if (!s)
- s = devname;
- else s++;
- if (strcmp("M1", s) == 0) {
- cardtype = AVM_CARDTYPE_M1;
- } else if (strcmp("M2", s) == 0) {
- cardtype = AVM_CARDTYPE_M2;
- } else {
- cardtype = AVM_CARDTYPE_B1;
- }
- } else
- cardtype = AVM_CARDTYPE_B1;
-
- /* If any step failed, release any partially configured state */
- if (i != 0) {
- avmcs_release(link);
- return -ENODEV;
- }
-
-
- switch (cardtype) {
- case AVM_CARDTYPE_M1: addcard = b1pcmcia_addcard_m1; break;
- case AVM_CARDTYPE_M2: addcard = b1pcmcia_addcard_m2; break;
- default:
- case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
- }
- if ((i = (*addcard)(link->resource[0]->start, link->irq)) < 0) {
- dev_err(&link->dev,
- "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
- (unsigned int) link->resource[0]->start, link->irq);
- avmcs_release(link);
- return -ENODEV;
- }
- return 0;
-
-} /* avmcs_config */
-
-
-static void avmcs_release(struct pcmcia_device *link)
-{
- b1pcmcia_delcard(link->resource[0]->start, link->irq);
- pcmcia_disable_device(link);
-} /* avmcs_release */
-
-
-static const struct pcmcia_device_id avmcs_ids[] = {
- PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335),
- PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M1", 0x95d42008, 0x81e10430),
- PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M2", 0x95d42008, 0x18e8558a),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, avmcs_ids);
-
-static struct pcmcia_driver avmcs_driver = {
- .owner = THIS_MODULE,
- .name = "avm_cs",
- .probe = avmcs_probe,
- .remove = avmcs_detach,
- .id_table = avmcs_ids,
-};
-module_pcmcia_driver(avmcs_driver);
diff --git a/drivers/staging/isdn/avm/avmcard.h b/drivers/staging/isdn/avm/avmcard.h
deleted file mode 100644
index cdfa89c71997..000000000000
--- a/drivers/staging/isdn/avm/avmcard.h
+++ /dev/null
@@ -1,581 +0,0 @@
-/* $Id: avmcard.h,v 1.1.4.1.2.1 2001/12/21 15:00:17 kai Exp $
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _AVMCARD_H_
-#define _AVMCARD_H_
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-
-#define AVMB1_PORTLEN 0x1f
-#define AVM_MAXVERSION 8
-#define AVM_NCCI_PER_CHANNEL 4
-
-/*
- * Versions
- */
-
-#define VER_DRIVER 0
-#define VER_CARDTYPE 1
-#define VER_HWID 2
-#define VER_SERIAL 3
-#define VER_OPTION 4
-#define VER_PROTO 5
-#define VER_PROFILE 6
-#define VER_CAPI 7
-
-enum avmcardtype {
- avm_b1isa,
- avm_b1pci,
- avm_b1pcmcia,
- avm_m1,
- avm_m2,
- avm_t1isa,
- avm_t1pci,
- avm_c4,
- avm_c2
-};
-
-typedef struct avmcard_dmabuf {
- long size;
- u8 *dmabuf;
- dma_addr_t dmaaddr;
-} avmcard_dmabuf;
-
-typedef struct avmcard_dmainfo {
- u32 recvlen;
- avmcard_dmabuf recvbuf;
-
- avmcard_dmabuf sendbuf;
- struct sk_buff_head send_queue;
-
- struct pci_dev *pcidev;
-} avmcard_dmainfo;
-
-typedef struct avmctrl_info {
- char cardname[32];
-
- int versionlen;
- char versionbuf[1024];
- char *version[AVM_MAXVERSION];
-
- char infobuf[128]; /* for function procinfo */
-
- struct avmcard *card;
- struct capi_ctr capi_ctrl;
-
- struct list_head ncci_head;
-} avmctrl_info;
-
-typedef struct avmcard {
- char name[32];
-
- spinlock_t lock;
- unsigned int port;
- unsigned irq;
- unsigned long membase;
- enum avmcardtype cardtype;
- unsigned char revision;
- unsigned char class;
- int cardnr; /* for t1isa */
-
- char msgbuf[128]; /* capimsg msg part */
- char databuf[2048]; /* capimsg data part */
-
- void __iomem *mbase;
- volatile u32 csr;
- avmcard_dmainfo *dma;
-
- struct avmctrl_info *ctrlinfo;
-
- u_int nr_controllers;
- u_int nlogcontr;
- struct list_head list;
-} avmcard;
-
-extern int b1_irq_table[16];
-
-/*
- * LLI Messages to the ISDN-ControllerISDN Controller
- */
-
-#define SEND_POLL 0x72 /*
- * after load <- RECEIVE_POLL
- */
-#define SEND_INIT 0x11 /*
- * first message <- RECEIVE_INIT
- * int32 NumApplications int32
- * NumNCCIs int32 BoardNumber
- */
-#define SEND_REGISTER 0x12 /*
- * register an application int32
- * ApplIDId int32 NumMessages
- * int32 NumB3Connections int32
- * NumB3Blocks int32 B3Size
- *
- * AnzB3Connection != 0 &&
- * AnzB3Blocks >= 1 && B3Size >= 1
- */
-#define SEND_RELEASE 0x14 /*
- * deregister an application int32
- * ApplID
- */
-#define SEND_MESSAGE 0x15 /*
- * send capi-message int32 length
- * capi-data ...
- */
-#define SEND_DATA_B3_REQ 0x13 /*
- * send capi-data-message int32
- * MsgLength capi-data ... int32
- * B3Length data ....
- */
-
-#define SEND_CONFIG 0x21 /*
- */
-
-#define SEND_POLLACK 0x73 /* T1 Watchdog */
-
-/*
- * LLI Messages from the ISDN-ControllerISDN Controller
- */
-
-#define RECEIVE_POLL 0x32 /*
- * <- after SEND_POLL
- */
-#define RECEIVE_INIT 0x27 /*
- * <- after SEND_INIT int32 length
- * byte total length b1struct board
- * driver revision b1struct card
- * type b1struct reserved b1struct
- * serial number b1struct driver
- * capability b1struct d-channel
- * protocol b1struct CAPI-2.0
- * profile b1struct capi version
- */
-#define RECEIVE_MESSAGE 0x21 /*
- * <- after SEND_MESSAGE int32
- * AppllID int32 Length capi-data
- * ....
- */
-#define RECEIVE_DATA_B3_IND 0x22 /*
- * received data int32 AppllID
- * int32 Length capi-data ...
- * int32 B3Length data ...
- */
-#define RECEIVE_START 0x23 /*
- * Handshake
- */
-#define RECEIVE_STOP 0x24 /*
- * Handshake
- */
-#define RECEIVE_NEW_NCCI 0x25 /*
- * int32 AppllID int32 NCCI int32
- * WindowSize
- */
-#define RECEIVE_FREE_NCCI 0x26 /*
- * int32 AppllID int32 NCCI
- */
-#define RECEIVE_RELEASE 0x26 /*
- * int32 AppllID int32 0xffffffff
- */
-#define RECEIVE_TASK_READY 0x31 /*
- * int32 tasknr
- * int32 Length Taskname ...
- */
-#define RECEIVE_DEBUGMSG 0x71 /*
- * int32 Length message
- *
- */
-#define RECEIVE_POLLDWORD 0x75 /* t1pci in dword mode */
-
-#define WRITE_REGISTER 0x00
-#define READ_REGISTER 0x01
-
-/*
- * port offsets
- */
-
-#define B1_READ 0x00
-#define B1_WRITE 0x01
-#define B1_INSTAT 0x02
-#define B1_OUTSTAT 0x03
-#define B1_ANALYSE 0x04
-#define B1_REVISION 0x05
-#define B1_RESET 0x10
-
-
-#define B1_STAT0(cardtype) ((cardtype) == avm_m1 ? 0x81200000l : 0x80A00000l)
-#define B1_STAT1(cardtype) (0x80E00000l)
-
-/* ---------------------------------------------------------------- */
-
-static inline unsigned char b1outp(unsigned int base,
- unsigned short offset,
- unsigned char value)
-{
- outb(value, base + offset);
- return inb(base + B1_ANALYSE);
-}
-
-
-static inline int b1_rx_full(unsigned int base)
-{
- return inb(base + B1_INSTAT) & 0x1;
-}
-
-static inline unsigned char b1_get_byte(unsigned int base)
-{
- unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
- while (!b1_rx_full(base) && time_before(jiffies, stop));
- if (b1_rx_full(base))
- return inb(base + B1_READ);
- printk(KERN_CRIT "b1lli(0x%x): rx not full after 1 second\n", base);
- return 0;
-}
-
-static inline unsigned int b1_get_word(unsigned int base)
-{
- unsigned int val = 0;
- val |= b1_get_byte(base);
- val |= (b1_get_byte(base) << 8);
- val |= (b1_get_byte(base) << 16);
- val |= (b1_get_byte(base) << 24);
- return val;
-}
-
-static inline int b1_tx_empty(unsigned int base)
-{
- return inb(base + B1_OUTSTAT) & 0x1;
-}
-
-static inline void b1_put_byte(unsigned int base, unsigned char val)
-{
- while (!b1_tx_empty(base));
- b1outp(base, B1_WRITE, val);
-}
-
-static inline int b1_save_put_byte(unsigned int base, unsigned char val)
-{
- unsigned long stop = jiffies + 2 * HZ;
- while (!b1_tx_empty(base) && time_before(jiffies, stop));
- if (!b1_tx_empty(base)) return -1;
- b1outp(base, B1_WRITE, val);
- return 0;
-}
-
-static inline void b1_put_word(unsigned int base, unsigned int val)
-{
- b1_put_byte(base, val & 0xff);
- b1_put_byte(base, (val >> 8) & 0xff);
- b1_put_byte(base, (val >> 16) & 0xff);
- b1_put_byte(base, (val >> 24) & 0xff);
-}
-
-static inline unsigned int b1_get_slice(unsigned int base,
- unsigned char *dp)
-{
- unsigned int len, i;
-
- len = i = b1_get_word(base);
- while (i-- > 0) *dp++ = b1_get_byte(base);
- return len;
-}
-
-static inline void b1_put_slice(unsigned int base,
- unsigned char *dp, unsigned int len)
-{
- unsigned i = len;
- b1_put_word(base, i);
- while (i-- > 0)
- b1_put_byte(base, *dp++);
-}
-
-static void b1_wr_reg(unsigned int base,
- unsigned int reg,
- unsigned int value)
-{
- b1_put_byte(base, WRITE_REGISTER);
- b1_put_word(base, reg);
- b1_put_word(base, value);
-}
-
-static inline unsigned int b1_rd_reg(unsigned int base,
- unsigned int reg)
-{
- b1_put_byte(base, READ_REGISTER);
- b1_put_word(base, reg);
- return b1_get_word(base);
-
-}
-
-static inline void b1_reset(unsigned int base)
-{
- b1outp(base, B1_RESET, 0);
- mdelay(55 * 2); /* 2 TIC's */
-
- b1outp(base, B1_RESET, 1);
- mdelay(55 * 2); /* 2 TIC's */
-
- b1outp(base, B1_RESET, 0);
- mdelay(55 * 2); /* 2 TIC's */
-}
-
-static inline unsigned char b1_disable_irq(unsigned int base)
-{
- return b1outp(base, B1_INSTAT, 0x00);
-}
-
-/* ---------------------------------------------------------------- */
-
-static inline void b1_set_test_bit(unsigned int base,
- enum avmcardtype cardtype,
- int onoff)
-{
- b1_wr_reg(base, B1_STAT0(cardtype), onoff ? 0x21 : 0x20);
-}
-
-static inline int b1_get_test_bit(unsigned int base,
- enum avmcardtype cardtype)
-{
- return (b1_rd_reg(base, B1_STAT0(cardtype)) & 0x01) != 0;
-}
-
-/* ---------------------------------------------------------------- */
-
-#define T1_FASTLINK 0x00
-#define T1_SLOWLINK 0x08
-
-#define T1_READ B1_READ
-#define T1_WRITE B1_WRITE
-#define T1_INSTAT B1_INSTAT
-#define T1_OUTSTAT B1_OUTSTAT
-#define T1_IRQENABLE 0x05
-#define T1_FIFOSTAT 0x06
-#define T1_RESETLINK 0x10
-#define T1_ANALYSE 0x11
-#define T1_IRQMASTER 0x12
-#define T1_IDENT 0x17
-#define T1_RESETBOARD 0x1f
-
-#define T1F_IREADY 0x01
-#define T1F_IHALF 0x02
-#define T1F_IFULL 0x04
-#define T1F_IEMPTY 0x08
-#define T1F_IFLAGS 0xF0
-
-#define T1F_OREADY 0x10
-#define T1F_OHALF 0x20
-#define T1F_OEMPTY 0x40
-#define T1F_OFULL 0x80
-#define T1F_OFLAGS 0xF0
-
-/* there are HEMA cards with 1k and 4k FIFO out */
-#define FIFO_OUTBSIZE 256
-#define FIFO_INPBSIZE 512
-
-#define HEMA_VERSION_ID 0
-#define HEMA_PAL_ID 0
-
-static inline void t1outp(unsigned int base,
- unsigned short offset,
- unsigned char value)
-{
- outb(value, base + offset);
-}
-
-static inline unsigned char t1inp(unsigned int base,
- unsigned short offset)
-{
- return inb(base + offset);
-}
-
-static inline int t1_isfastlink(unsigned int base)
-{
- return (inb(base + T1_IDENT) & ~0x82) == 1;
-}
-
-static inline unsigned char t1_fifostatus(unsigned int base)
-{
- return inb(base + T1_FIFOSTAT);
-}
-
-static inline unsigned int t1_get_slice(unsigned int base,
- unsigned char *dp)
-{
- unsigned int len, i;
-#ifdef FASTLINK_DEBUG
- unsigned wcnt = 0, bcnt = 0;
-#endif
-
- len = i = b1_get_word(base);
- if (t1_isfastlink(base)) {
- int status;
- while (i > 0) {
- status = t1_fifostatus(base) & (T1F_IREADY | T1F_IHALF);
- if (i >= FIFO_INPBSIZE) status |= T1F_IFULL;
-
- switch (status) {
- case T1F_IREADY | T1F_IHALF | T1F_IFULL:
- insb(base + B1_READ, dp, FIFO_INPBSIZE);
- dp += FIFO_INPBSIZE;
- i -= FIFO_INPBSIZE;
-#ifdef FASTLINK_DEBUG
- wcnt += FIFO_INPBSIZE;
-#endif
- break;
- case T1F_IREADY | T1F_IHALF:
- insb(base + B1_READ, dp, i);
-#ifdef FASTLINK_DEBUG
- wcnt += i;
-#endif
- dp += i;
- i = 0;
- break;
- default:
- *dp++ = b1_get_byte(base);
- i--;
-#ifdef FASTLINK_DEBUG
- bcnt++;
-#endif
- break;
- }
- }
-#ifdef FASTLINK_DEBUG
- if (wcnt)
- printk(KERN_DEBUG "b1lli(0x%x): get_slice l=%d w=%d b=%d\n",
- base, len, wcnt, bcnt);
-#endif
- } else {
- while (i-- > 0)
- *dp++ = b1_get_byte(base);
- }
- return len;
-}
-
-static inline void t1_put_slice(unsigned int base,
- unsigned char *dp, unsigned int len)
-{
- unsigned i = len;
- b1_put_word(base, i);
- if (t1_isfastlink(base)) {
- int status;
- while (i > 0) {
- status = t1_fifostatus(base) & (T1F_OREADY | T1F_OHALF);
- if (i >= FIFO_OUTBSIZE) status |= T1F_OEMPTY;
- switch (status) {
- case T1F_OREADY | T1F_OHALF | T1F_OEMPTY:
- outsb(base + B1_WRITE, dp, FIFO_OUTBSIZE);
- dp += FIFO_OUTBSIZE;
- i -= FIFO_OUTBSIZE;
- break;
- case T1F_OREADY | T1F_OHALF:
- outsb(base + B1_WRITE, dp, i);
- dp += i;
- i = 0;
- break;
- default:
- b1_put_byte(base, *dp++);
- i--;
- break;
- }
- }
- } else {
- while (i-- > 0)
- b1_put_byte(base, *dp++);
- }
-}
-
-static inline void t1_disable_irq(unsigned int base)
-{
- t1outp(base, T1_IRQMASTER, 0x00);
-}
-
-static inline void t1_reset(unsigned int base)
-{
- /* reset T1 Controller */
- b1_reset(base);
- /* disable irq on HEMA */
- t1outp(base, B1_INSTAT, 0x00);
- t1outp(base, B1_OUTSTAT, 0x00);
- t1outp(base, T1_IRQMASTER, 0x00);
- /* reset HEMA board configuration */
- t1outp(base, T1_RESETBOARD, 0xf);
-}
-
-static inline void b1_setinterrupt(unsigned int base, unsigned irq,
- enum avmcardtype cardtype)
-{
- switch (cardtype) {
- case avm_t1isa:
- t1outp(base, B1_INSTAT, 0x00);
- t1outp(base, B1_INSTAT, 0x02);
- t1outp(base, T1_IRQMASTER, 0x08);
- break;
- case avm_b1isa:
- b1outp(base, B1_INSTAT, 0x00);
- b1outp(base, B1_RESET, b1_irq_table[irq]);
- b1outp(base, B1_INSTAT, 0x02);
- break;
- default:
- case avm_m1:
- case avm_m2:
- case avm_b1pci:
- b1outp(base, B1_INSTAT, 0x00);
- b1outp(base, B1_RESET, 0xf0);
- b1outp(base, B1_INSTAT, 0x02);
- break;
- case avm_c4:
- case avm_t1pci:
- b1outp(base, B1_RESET, 0xf0);
- break;
- }
-}
-
-/* b1.c */
-avmcard *b1_alloc_card(int nr_controllers);
-void b1_free_card(avmcard *card);
-int b1_detect(unsigned int base, enum avmcardtype cardtype);
-void b1_getrevision(avmcard *card);
-int b1_load_t4file(avmcard *card, capiloaddatapart *t4file);
-int b1_load_config(avmcard *card, capiloaddatapart *config);
-int b1_loaded(avmcard *card);
-
-int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data);
-void b1_reset_ctr(struct capi_ctr *ctrl);
-void b1_register_appl(struct capi_ctr *ctrl, u16 appl,
- capi_register_params *rp);
-void b1_release_appl(struct capi_ctr *ctrl, u16 appl);
-u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-void b1_parse_version(avmctrl_info *card);
-irqreturn_t b1_interrupt(int interrupt, void *devptr);
-
-int b1_proc_show(struct seq_file *m, void *v);
-
-avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *,
- long rsize, long ssize);
-void avmcard_dma_free(avmcard_dmainfo *);
-
-/* b1dma.c */
-int b1pciv4_detect(avmcard *card);
-int t1pci_detect(avmcard *card);
-void b1dma_reset(avmcard *card);
-irqreturn_t b1dma_interrupt(int interrupt, void *devptr);
-
-int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data);
-void b1dma_reset_ctr(struct capi_ctr *ctrl);
-void b1dma_remove_ctr(struct capi_ctr *ctrl);
-void b1dma_register_appl(struct capi_ctr *ctrl,
- u16 appl,
- capi_register_params *rp);
-void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl);
-u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-int b1dma_proc_show(struct seq_file *m, void *v);
-
-#endif /* _AVMCARD_H_ */
diff --git a/drivers/staging/isdn/avm/b1.c b/drivers/staging/isdn/avm/b1.c
deleted file mode 100644
index 32ec8cf31fd0..000000000000
--- a/drivers/staging/isdn/avm/b1.c
+++ /dev/null
@@ -1,819 +0,0 @@
-/* $Id: b1.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Common module for AVM B1 cards.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Common support for active AVM cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-int b1_irq_table[16] =
-{0,
- 0,
- 0,
- 192, /* irq 3 */
- 32, /* irq 4 */
- 160, /* irq 5 */
- 96, /* irq 6 */
- 224, /* irq 7 */
- 0,
- 64, /* irq 9 */
- 80, /* irq 10 */
- 208, /* irq 11 */
- 48, /* irq 12 */
- 0,
- 0,
- 112, /* irq 15 */
-};
-
-/* ------------------------------------------------------------- */
-
-avmcard *b1_alloc_card(int nr_controllers)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int i;
-
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card)
- return NULL;
-
- cinfo = kcalloc(nr_controllers, sizeof(*cinfo), GFP_KERNEL);
- if (!cinfo) {
- kfree(card);
- return NULL;
- }
-
- card->ctrlinfo = cinfo;
- for (i = 0; i < nr_controllers; i++) {
- INIT_LIST_HEAD(&cinfo[i].ncci_head);
- cinfo[i].card = card;
- }
- spin_lock_init(&card->lock);
- card->nr_controllers = nr_controllers;
-
- return card;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1_free_card(avmcard *card)
-{
- kfree(card->ctrlinfo);
- kfree(card);
-}
-
-/* ------------------------------------------------------------- */
-
-int b1_detect(unsigned int base, enum avmcardtype cardtype)
-{
- int onoff, i;
-
- /*
- * Statusregister 0000 00xx
- */
- if ((inb(base + B1_INSTAT) & 0xfc)
- || (inb(base + B1_OUTSTAT) & 0xfc))
- return 1;
- /*
- * Statusregister 0000 001x
- */
- b1outp(base, B1_INSTAT, 0x2); /* enable irq */
- /* b1outp(base, B1_OUTSTAT, 0x2); */
- if ((inb(base + B1_INSTAT) & 0xfe) != 0x2
- /* || (inb(base + B1_OUTSTAT) & 0xfe) != 0x2 */)
- return 2;
- /*
- * Statusregister 0000 000x
- */
- b1outp(base, B1_INSTAT, 0x0); /* disable irq */
- b1outp(base, B1_OUTSTAT, 0x0);
- if ((inb(base + B1_INSTAT) & 0xfe)
- || (inb(base + B1_OUTSTAT) & 0xfe))
- return 3;
-
- for (onoff = !0, i = 0; i < 10; i++) {
- b1_set_test_bit(base, cardtype, onoff);
- if (b1_get_test_bit(base, cardtype) != onoff)
- return 4;
- onoff = !onoff;
- }
-
- if (cardtype == avm_m1)
- return 0;
-
- if ((b1_rd_reg(base, B1_STAT1(cardtype)) & 0x0f) != 0x01)
- return 5;
-
- return 0;
-}
-
-void b1_getrevision(avmcard *card)
-{
- card->class = inb(card->port + B1_ANALYSE);
- card->revision = inb(card->port + B1_REVISION);
-}
-
-#define FWBUF_SIZE 256
-int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
-{
- unsigned char buf[FWBUF_SIZE];
- unsigned char *dp;
- int i, left;
- unsigned int base = card->port;
-
- dp = t4file->data;
- left = t4file->len;
- while (left > FWBUF_SIZE) {
- if (t4file->user) {
- if (copy_from_user(buf, dp, FWBUF_SIZE))
- return -EFAULT;
- } else {
- memcpy(buf, dp, FWBUF_SIZE);
- }
- for (i = 0; i < FWBUF_SIZE; i++)
- if (b1_save_put_byte(base, buf[i]) < 0) {
- printk(KERN_ERR "%s: corrupted firmware file ?\n",
- card->name);
- return -EIO;
- }
- left -= FWBUF_SIZE;
- dp += FWBUF_SIZE;
- }
- if (left) {
- if (t4file->user) {
- if (copy_from_user(buf, dp, left))
- return -EFAULT;
- } else {
- memcpy(buf, dp, left);
- }
- for (i = 0; i < left; i++)
- if (b1_save_put_byte(base, buf[i]) < 0) {
- printk(KERN_ERR "%s: corrupted firmware file ?\n",
- card->name);
- return -EIO;
- }
- }
- return 0;
-}
-
-int b1_load_config(avmcard *card, capiloaddatapart *config)
-{
- unsigned char buf[FWBUF_SIZE];
- unsigned char *dp;
- unsigned int base = card->port;
- int i, j, left;
-
- dp = config->data;
- left = config->len;
- if (left) {
- b1_put_byte(base, SEND_CONFIG);
- b1_put_word(base, 1);
- b1_put_byte(base, SEND_CONFIG);
- b1_put_word(base, left);
- }
- while (left > FWBUF_SIZE) {
- if (config->user) {
- if (copy_from_user(buf, dp, FWBUF_SIZE))
- return -EFAULT;
- } else {
- memcpy(buf, dp, FWBUF_SIZE);
- }
- for (i = 0; i < FWBUF_SIZE; ) {
- b1_put_byte(base, SEND_CONFIG);
- for (j = 0; j < 4; j++) {
- b1_put_byte(base, buf[i++]);
- }
- }
- left -= FWBUF_SIZE;
- dp += FWBUF_SIZE;
- }
- if (left) {
- if (config->user) {
- if (copy_from_user(buf, dp, left))
- return -EFAULT;
- } else {
- memcpy(buf, dp, left);
- }
- for (i = 0; i < left; ) {
- b1_put_byte(base, SEND_CONFIG);
- for (j = 0; j < 4; j++) {
- if (i < left)
- b1_put_byte(base, buf[i++]);
- else
- b1_put_byte(base, 0);
- }
- }
- }
- return 0;
-}
-
-int b1_loaded(avmcard *card)
-{
- unsigned int base = card->port;
- unsigned long stop;
- unsigned char ans;
- unsigned long tout = 2;
-
- for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
- if (b1_tx_empty(base))
- break;
- }
- if (!b1_tx_empty(base)) {
- printk(KERN_ERR "%s: b1_loaded: tx err, corrupted t4 file ?\n",
- card->name);
- return 0;
- }
- b1_put_byte(base, SEND_POLL);
- for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
- if (b1_rx_full(base)) {
- ans = b1_get_byte(base);
- if (ans == RECEIVE_POLL)
- return 1;
-
- printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n",
- card->name, ans);
- return 0;
- }
- }
- printk(KERN_ERR "%s: b1_loaded: firmware not running\n", card->name);
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- int retval;
-
- b1_reset(port);
- retval = b1_load_t4file(card, &data->firmware);
-
- if (retval) {
- b1_reset(port);
- printk(KERN_ERR "%s: failed to load t4file!!\n",
- card->name);
- return retval;
- }
-
- b1_disable_irq(port);
-
- if (data->configuration.len > 0 && data->configuration.data) {
- retval = b1_load_config(card, &data->configuration);
- if (retval) {
- b1_reset(port);
- printk(KERN_ERR "%s: failed to load config!!\n",
- card->name);
- return retval;
- }
- }
-
- if (!b1_loaded(card)) {
- printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
- return -EIO;
- }
-
- spin_lock_irqsave(&card->lock, flags);
- b1_setinterrupt(port, card->irq, card->cardtype);
- b1_put_byte(port, SEND_INIT);
- b1_put_word(port, CAPI_MAXAPPL);
- b1_put_word(port, AVM_NCCI_PER_CHANNEL * 2);
- b1_put_word(port, ctrl->cnr - 1);
- spin_unlock_irqrestore(&card->lock, flags);
-
- return 0;
-}
-
-void b1_reset_ctr(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
-
- b1_reset(port);
- b1_reset(port);
-
- memset(cinfo->version, 0, sizeof(cinfo->version));
- spin_lock_irqsave(&card->lock, flags);
- capilib_release(&cinfo->ncci_head);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_down(ctrl);
-}
-
-void b1_register_appl(struct capi_ctr *ctrl,
- u16 appl,
- capi_register_params *rp)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- int nconn, want = rp->level3cnt;
-
- if (want > 0) nconn = want;
- else nconn = ctrl->profile.nbchannel * -want;
- if (nconn == 0) nconn = ctrl->profile.nbchannel;
-
- spin_lock_irqsave(&card->lock, flags);
- b1_put_byte(port, SEND_REGISTER);
- b1_put_word(port, appl);
- b1_put_word(port, 1024 * (nconn + 1));
- b1_put_word(port, nconn);
- b1_put_word(port, rp->datablkcnt);
- b1_put_word(port, rp->datablklen);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-void b1_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- capilib_release_appl(&cinfo->ncci_head, appl);
- b1_put_byte(port, SEND_RELEASE);
- b1_put_word(port, appl);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- u16 len = CAPIMSG_LEN(skb->data);
- u8 cmd = CAPIMSG_COMMAND(skb->data);
- u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
- u16 dlen, retval;
-
- spin_lock_irqsave(&card->lock, flags);
- if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- if (retval != CAPI_NOERROR) {
- spin_unlock_irqrestore(&card->lock, flags);
- return retval;
- }
-
- dlen = CAPIMSG_DATALEN(skb->data);
-
- b1_put_byte(port, SEND_DATA_B3_REQ);
- b1_put_slice(port, skb->data, len);
- b1_put_slice(port, skb->data + len, dlen);
- } else {
- b1_put_byte(port, SEND_MESSAGE);
- b1_put_slice(port, skb->data, len);
- }
- spin_unlock_irqrestore(&card->lock, flags);
-
- dev_kfree_skb_any(skb);
- return CAPI_NOERROR;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1_parse_version(avmctrl_info *cinfo)
-{
- struct capi_ctr *ctrl = &cinfo->capi_ctrl;
- avmcard *card = cinfo->card;
- capi_profile *profp;
- u8 *dversion;
- u8 flag;
- int i, j;
-
- for (j = 0; j < AVM_MAXVERSION; j++)
- cinfo->version[j] = "";
- for (i = 0, j = 0;
- j < AVM_MAXVERSION && i < cinfo->versionlen;
- j++, i += cinfo->versionbuf[i] + 1)
- cinfo->version[j] = &cinfo->versionbuf[i + 1];
-
- strlcpy(ctrl->serial, cinfo->version[VER_SERIAL], sizeof(ctrl->serial));
- memcpy(&ctrl->profile, cinfo->version[VER_PROFILE], sizeof(capi_profile));
- strlcpy(ctrl->manu, "AVM GmbH", sizeof(ctrl->manu));
- dversion = cinfo->version[VER_DRIVER];
- ctrl->version.majorversion = 2;
- ctrl->version.minorversion = 0;
- ctrl->version.majormanuversion = (((dversion[0] - '0') & 0xf) << 4);
- ctrl->version.majormanuversion |= ((dversion[2] - '0') & 0xf);
- ctrl->version.minormanuversion = (dversion[3] - '0') << 4;
- ctrl->version.minormanuversion |=
- (dversion[5] - '0') * 10 + ((dversion[6] - '0') & 0xf);
-
- profp = &ctrl->profile;
-
- flag = ((u8 *)(profp->manu))[1];
- switch (flag) {
- case 0: if (cinfo->version[VER_CARDTYPE])
- strcpy(cinfo->cardname, cinfo->version[VER_CARDTYPE]);
- else strcpy(cinfo->cardname, "B1");
- break;
- case 3: strcpy(cinfo->cardname, "PCMCIA B"); break;
- case 4: strcpy(cinfo->cardname, "PCMCIA M1"); break;
- case 5: strcpy(cinfo->cardname, "PCMCIA M2"); break;
- case 6: strcpy(cinfo->cardname, "B1 V3.0"); break;
- case 7: strcpy(cinfo->cardname, "B1 PCI"); break;
- default: sprintf(cinfo->cardname, "AVM?%u", (unsigned int)flag); break;
- }
- printk(KERN_NOTICE "%s: card %d \"%s\" ready.\n",
- card->name, ctrl->cnr, cinfo->cardname);
-
- flag = ((u8 *)(profp->manu))[3];
- if (flag)
- printk(KERN_NOTICE "%s: card %d Protocol:%s%s%s%s%s%s%s\n",
- card->name,
- ctrl->cnr,
- (flag & 0x01) ? " DSS1" : "",
- (flag & 0x02) ? " CT1" : "",
- (flag & 0x04) ? " VN3" : "",
- (flag & 0x08) ? " NI1" : "",
- (flag & 0x10) ? " AUSTEL" : "",
- (flag & 0x20) ? " ESS" : "",
- (flag & 0x40) ? " 1TR6" : ""
- );
-
- flag = ((u8 *)(profp->manu))[5];
- if (flag)
- printk(KERN_NOTICE "%s: card %d Linetype:%s%s%s%s\n",
- card->name,
- ctrl->cnr,
- (flag & 0x01) ? " point to point" : "",
- (flag & 0x02) ? " point to multipoint" : "",
- (flag & 0x08) ? " leased line without D-channel" : "",
- (flag & 0x04) ? " leased line with D-channel" : ""
- );
-}
-
-/* ------------------------------------------------------------- */
-
-irqreturn_t b1_interrupt(int interrupt, void *devptr)
-{
- avmcard *card = devptr;
- avmctrl_info *cinfo = &card->ctrlinfo[0];
- struct capi_ctr *ctrl = &cinfo->capi_ctrl;
- unsigned char b1cmd;
- struct sk_buff *skb;
-
- unsigned ApplId;
- unsigned MsgLen;
- unsigned DataB3Len;
- unsigned NCCI;
- unsigned WindowSize;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
-
- if (!b1_rx_full(card->port)) {
- spin_unlock_irqrestore(&card->lock, flags);
- return IRQ_NONE;
- }
-
- b1cmd = b1_get_byte(card->port);
-
- switch (b1cmd) {
-
- case RECEIVE_DATA_B3_IND:
-
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = b1_get_slice(card->port, card->msgbuf);
- DataB3Len = b1_get_slice(card->port, card->databuf);
- spin_unlock_irqrestore(&card->lock, flags);
-
- if (MsgLen < 30) { /* not CAPI 64Bit */
- memset(card->msgbuf + MsgLen, 0, 30-MsgLen);
- MsgLen = 30;
- CAPIMSG_SETLEN(card->msgbuf, 30);
- }
-
- skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- skb_put_data(skb, card->databuf, DataB3Len);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_MESSAGE:
-
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = b1_get_slice(card->port, card->msgbuf);
- skb = alloc_skb(MsgLen, GFP_ATOMIC);
-
- if (!skb) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- spin_unlock_irqrestore(&card->lock, flags);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_NEW_NCCI:
-
- ApplId = b1_get_word(card->port);
- NCCI = b1_get_word(card->port);
- WindowSize = b1_get_word(card->port);
- capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
- spin_unlock_irqrestore(&card->lock, flags);
- break;
-
- case RECEIVE_FREE_NCCI:
-
- ApplId = b1_get_word(card->port);
- NCCI = b1_get_word(card->port);
- if (NCCI != 0xffffffff)
- capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
- spin_unlock_irqrestore(&card->lock, flags);
- break;
-
- case RECEIVE_START:
- /* b1_put_byte(card->port, SEND_POLLACK); */
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_resume_output(ctrl);
- break;
-
- case RECEIVE_STOP:
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_suspend_output(ctrl);
- break;
-
- case RECEIVE_INIT:
-
- cinfo->versionlen = b1_get_slice(card->port, cinfo->versionbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- b1_parse_version(cinfo);
- printk(KERN_INFO "%s: %s-card (%s) now active\n",
- card->name,
- cinfo->version[VER_CARDTYPE],
- cinfo->version[VER_DRIVER]);
- capi_ctr_ready(ctrl);
- break;
-
- case RECEIVE_TASK_READY:
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = b1_get_slice(card->port, card->msgbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
- card->name, ApplId, card->msgbuf);
- break;
-
- case RECEIVE_DEBUGMSG:
- MsgLen = b1_get_slice(card->port, card->msgbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
- break;
-
- case 0xff:
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: card removed ?\n", card->name);
- return IRQ_NONE;
- default:
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n",
- card->name, b1cmd);
- return IRQ_HANDLED;
- }
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-int b1_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u8 flag;
- char *s;
-
- seq_printf(m, "%-16s %s\n", "name", card->name);
- seq_printf(m, "%-16s 0x%x\n", "io", card->port);
- seq_printf(m, "%-16s %d\n", "irq", card->irq);
- switch (card->cardtype) {
- case avm_b1isa: s = "B1 ISA"; break;
- case avm_b1pci: s = "B1 PCI"; break;
- case avm_b1pcmcia: s = "B1 PCMCIA"; break;
- case avm_m1: s = "M1"; break;
- case avm_m2: s = "M2"; break;
- case avm_t1isa: s = "T1 ISA (HEMA)"; break;
- case avm_t1pci: s = "T1 PCI"; break;
- case avm_c4: s = "C4"; break;
- case avm_c2: s = "C2"; break;
- default: s = "???"; break;
- }
- seq_printf(m, "%-16s %s\n", "type", s);
- if (card->cardtype == avm_t1isa)
- seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
-
- s = cinfo->version[VER_DRIVER];
- if (s)
- seq_printf(m, "%-16s %s\n", "ver_driver", s);
-
- s = cinfo->version[VER_CARDTYPE];
- if (s)
- seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
-
- s = cinfo->version[VER_SERIAL];
- if (s)
- seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[3];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
- "protocol",
- (flag & 0x01) ? " DSS1" : "",
- (flag & 0x02) ? " CT1" : "",
- (flag & 0x04) ? " VN3" : "",
- (flag & 0x08) ? " NI1" : "",
- (flag & 0x10) ? " AUSTEL" : "",
- (flag & 0x20) ? " ESS" : "",
- (flag & 0x40) ? " 1TR6" : ""
- );
- }
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[5];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s\n",
- "linetype",
- (flag & 0x01) ? " point to point" : "",
- (flag & 0x02) ? " point to multipoint" : "",
- (flag & 0x08) ? " leased line without D-channel" : "",
- (flag & 0x04) ? " leased line with D-channel" : ""
- );
- }
- seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
- return 0;
-}
-EXPORT_SYMBOL(b1_proc_show);
-
-/* ------------------------------------------------------------- */
-
-#ifdef CONFIG_PCI
-
-avmcard_dmainfo *
-avmcard_dma_alloc(char *name, struct pci_dev *pdev, long rsize, long ssize)
-{
- avmcard_dmainfo *p;
- void *buf;
-
- p = kzalloc(sizeof(avmcard_dmainfo), GFP_KERNEL);
- if (!p) {
- printk(KERN_WARNING "%s: no memory.\n", name);
- goto err;
- }
-
- p->recvbuf.size = rsize;
- buf = pci_alloc_consistent(pdev, rsize, &p->recvbuf.dmaaddr);
- if (!buf) {
- printk(KERN_WARNING "%s: allocation of receive dma buffer failed.\n", name);
- goto err_kfree;
- }
- p->recvbuf.dmabuf = buf;
-
- p->sendbuf.size = ssize;
- buf = pci_alloc_consistent(pdev, ssize, &p->sendbuf.dmaaddr);
- if (!buf) {
- printk(KERN_WARNING "%s: allocation of send dma buffer failed.\n", name);
- goto err_free_consistent;
- }
-
- p->sendbuf.dmabuf = buf;
- skb_queue_head_init(&p->send_queue);
-
- return p;
-
-err_free_consistent:
- pci_free_consistent(p->pcidev, p->recvbuf.size,
- p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
-err_kfree:
- kfree(p);
-err:
- return NULL;
-}
-
-void avmcard_dma_free(avmcard_dmainfo *p)
-{
- pci_free_consistent(p->pcidev, p->recvbuf.size,
- p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
- pci_free_consistent(p->pcidev, p->sendbuf.size,
- p->sendbuf.dmabuf, p->sendbuf.dmaaddr);
- skb_queue_purge(&p->send_queue);
- kfree(p);
-}
-
-EXPORT_SYMBOL(avmcard_dma_alloc);
-EXPORT_SYMBOL(avmcard_dma_free);
-
-#endif
-
-EXPORT_SYMBOL(b1_irq_table);
-
-EXPORT_SYMBOL(b1_alloc_card);
-EXPORT_SYMBOL(b1_free_card);
-EXPORT_SYMBOL(b1_detect);
-EXPORT_SYMBOL(b1_getrevision);
-EXPORT_SYMBOL(b1_load_t4file);
-EXPORT_SYMBOL(b1_load_config);
-EXPORT_SYMBOL(b1_loaded);
-EXPORT_SYMBOL(b1_load_firmware);
-EXPORT_SYMBOL(b1_reset_ctr);
-EXPORT_SYMBOL(b1_register_appl);
-EXPORT_SYMBOL(b1_release_appl);
-EXPORT_SYMBOL(b1_send_message);
-
-EXPORT_SYMBOL(b1_parse_version);
-EXPORT_SYMBOL(b1_interrupt);
-
-static int __init b1_init(void)
-{
- char *p;
- char rev[32];
-
- p = strchr(revision, ':');
- if (p && p[1]) {
- strlcpy(rev, p + 2, 32);
- p = strchr(rev, '$');
- if (p && p > rev)
- *(p - 1) = 0;
- } else {
- strcpy(rev, "1.0");
- }
- printk(KERN_INFO "b1: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit b1_exit(void)
-{
-}
-
-module_init(b1_init);
-module_exit(b1_exit);
diff --git a/drivers/staging/isdn/avm/b1dma.c b/drivers/staging/isdn/avm/b1dma.c
deleted file mode 100644
index 6a3dc9937ce5..000000000000
--- a/drivers/staging/isdn/avm/b1dma.c
+++ /dev/null
@@ -1,981 +0,0 @@
-/* $Id: b1dma.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Common module for AVM B1 cards that support dma with AMCC
- *
- * Copyright 2000 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-#undef AVM_B1DMA_DEBUG
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: DMA support for active AVM cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-static bool suppress_pollack = 0;
-module_param(suppress_pollack, bool, 0);
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_dispatch_tx(avmcard *card);
-
-/* ------------------------------------------------------------- */
-
-/* S5933 */
-
-#define AMCC_RXPTR 0x24
-#define AMCC_RXLEN 0x28
-#define AMCC_TXPTR 0x2c
-#define AMCC_TXLEN 0x30
-
-#define AMCC_INTCSR 0x38
-# define EN_READ_TC_INT 0x00008000L
-# define EN_WRITE_TC_INT 0x00004000L
-# define EN_TX_TC_INT EN_READ_TC_INT
-# define EN_RX_TC_INT EN_WRITE_TC_INT
-# define AVM_FLAG 0x30000000L
-
-# define ANY_S5933_INT 0x00800000L
-# define READ_TC_INT 0x00080000L
-# define WRITE_TC_INT 0x00040000L
-# define TX_TC_INT READ_TC_INT
-# define RX_TC_INT WRITE_TC_INT
-# define MASTER_ABORT_INT 0x00100000L
-# define TARGET_ABORT_INT 0x00200000L
-# define BUS_MASTER_INT 0x00200000L
-# define ALL_INT 0x000C0000L
-
-#define AMCC_MCSR 0x3c
-# define A2P_HI_PRIORITY 0x00000100L
-# define EN_A2P_TRANSFERS 0x00000400L
-# define P2A_HI_PRIORITY 0x00001000L
-# define EN_P2A_TRANSFERS 0x00004000L
-# define RESET_A2P_FLAGS 0x04000000L
-# define RESET_P2A_FLAGS 0x02000000L
-
-/* ------------------------------------------------------------- */
-
-static inline void b1dma_writel(avmcard *card, u32 value, int off)
-{
- writel(value, card->mbase + off);
-}
-
-static inline u32 b1dma_readl(avmcard *card, int off)
-{
- return readl(card->mbase + off);
-}
-
-/* ------------------------------------------------------------- */
-
-static inline int b1dma_tx_empty(unsigned int port)
-{
- return inb(port + 0x03) & 0x1;
-}
-
-static inline int b1dma_rx_full(unsigned int port)
-{
- return inb(port + 0x02) & 0x1;
-}
-
-static int b1dma_tolink(avmcard *card, void *buf, unsigned int len)
-{
- unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
- unsigned char *s = (unsigned char *)buf;
- while (len--) {
- while (!b1dma_tx_empty(card->port)
- && time_before(jiffies, stop));
- if (!b1dma_tx_empty(card->port))
- return -1;
- t1outp(card->port, 0x01, *s++);
- }
- return 0;
-}
-
-static int b1dma_fromlink(avmcard *card, void *buf, unsigned int len)
-{
- unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
- unsigned char *s = (unsigned char *)buf;
- while (len--) {
- while (!b1dma_rx_full(card->port)
- && time_before(jiffies, stop));
- if (!b1dma_rx_full(card->port))
- return -1;
- *s++ = t1inp(card->port, 0x00);
- }
- return 0;
-}
-
-static int WriteReg(avmcard *card, u32 reg, u8 val)
-{
- u8 cmd = 0x00;
- if (b1dma_tolink(card, &cmd, 1) == 0
- && b1dma_tolink(card, &reg, 4) == 0) {
- u32 tmp = val;
- return b1dma_tolink(card, &tmp, 4);
- }
- return -1;
-}
-
-static u8 ReadReg(avmcard *card, u32 reg)
-{
- u8 cmd = 0x01;
- if (b1dma_tolink(card, &cmd, 1) == 0
- && b1dma_tolink(card, &reg, 4) == 0) {
- u32 tmp;
- if (b1dma_fromlink(card, &tmp, 4) == 0)
- return (u8)tmp;
- }
- return 0xff;
-}
-
-/* ------------------------------------------------------------- */
-
-static inline void _put_byte(void **pp, u8 val)
-{
- u8 *s = *pp;
- *s++ = val;
- *pp = s;
-}
-
-static inline void _put_word(void **pp, u32 val)
-{
- u8 *s = *pp;
- *s++ = val & 0xff;
- *s++ = (val >> 8) & 0xff;
- *s++ = (val >> 16) & 0xff;
- *s++ = (val >> 24) & 0xff;
- *pp = s;
-}
-
-static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len)
-{
- unsigned i = len;
- _put_word(pp, i);
- while (i-- > 0)
- _put_byte(pp, *dp++);
-}
-
-static inline u8 _get_byte(void **pp)
-{
- u8 *s = *pp;
- u8 val;
- val = *s++;
- *pp = s;
- return val;
-}
-
-static inline u32 _get_word(void **pp)
-{
- u8 *s = *pp;
- u32 val;
- val = *s++;
- val |= (*s++ << 8);
- val |= (*s++ << 16);
- val |= (*s++ << 24);
- *pp = s;
- return val;
-}
-
-static inline u32 _get_slice(void **pp, unsigned char *dp)
-{
- unsigned int len, i;
-
- len = i = _get_word(pp);
- while (i-- > 0) *dp++ = _get_byte(pp);
- return len;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_reset(avmcard *card)
-{
- card->csr = 0x0;
- b1dma_writel(card, card->csr, AMCC_INTCSR);
- b1dma_writel(card, 0, AMCC_MCSR);
- b1dma_writel(card, 0, AMCC_RXLEN);
- b1dma_writel(card, 0, AMCC_TXLEN);
-
- t1outp(card->port, 0x10, 0x00);
- t1outp(card->port, 0x07, 0x00);
-
- b1dma_writel(card, 0, AMCC_MCSR);
- mdelay(10);
- b1dma_writel(card, 0x0f000000, AMCC_MCSR); /* reset all */
- mdelay(10);
- b1dma_writel(card, 0, AMCC_MCSR);
- if (card->cardtype == avm_t1pci)
- mdelay(42);
- else
- mdelay(10);
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1dma_detect(avmcard *card)
-{
- b1dma_writel(card, 0, AMCC_MCSR);
- mdelay(10);
- b1dma_writel(card, 0x0f000000, AMCC_MCSR); /* reset all */
- mdelay(10);
- b1dma_writel(card, 0, AMCC_MCSR);
- mdelay(42);
-
- b1dma_writel(card, 0, AMCC_RXLEN);
- b1dma_writel(card, 0, AMCC_TXLEN);
- card->csr = 0x0;
- b1dma_writel(card, card->csr, AMCC_INTCSR);
-
- if (b1dma_readl(card, AMCC_MCSR) != 0x000000E6)
- return 1;
-
- b1dma_writel(card, 0xffffffff, AMCC_RXPTR);
- b1dma_writel(card, 0xffffffff, AMCC_TXPTR);
- if (b1dma_readl(card, AMCC_RXPTR) != 0xfffffffc
- || b1dma_readl(card, AMCC_TXPTR) != 0xfffffffc)
- return 2;
-
- b1dma_writel(card, 0x0, AMCC_RXPTR);
- b1dma_writel(card, 0x0, AMCC_TXPTR);
- if (b1dma_readl(card, AMCC_RXPTR) != 0x0
- || b1dma_readl(card, AMCC_TXPTR) != 0x0)
- return 3;
-
- t1outp(card->port, 0x10, 0x00);
- t1outp(card->port, 0x07, 0x00);
-
- t1outp(card->port, 0x02, 0x02);
- t1outp(card->port, 0x03, 0x02);
-
- if ((t1inp(card->port, 0x02) & 0xFE) != 0x02
- || t1inp(card->port, 0x3) != 0x03)
- return 4;
-
- t1outp(card->port, 0x02, 0x00);
- t1outp(card->port, 0x03, 0x00);
-
- if ((t1inp(card->port, 0x02) & 0xFE) != 0x00
- || t1inp(card->port, 0x3) != 0x01)
- return 5;
-
- return 0;
-}
-
-int t1pci_detect(avmcard *card)
-{
- int ret;
-
- if ((ret = b1dma_detect(card)) != 0)
- return ret;
-
- /* Transputer test */
-
- if (WriteReg(card, 0x80001000, 0x11) != 0
- || WriteReg(card, 0x80101000, 0x22) != 0
- || WriteReg(card, 0x80201000, 0x33) != 0
- || WriteReg(card, 0x80301000, 0x44) != 0)
- return 6;
-
- if (ReadReg(card, 0x80001000) != 0x11
- || ReadReg(card, 0x80101000) != 0x22
- || ReadReg(card, 0x80201000) != 0x33
- || ReadReg(card, 0x80301000) != 0x44)
- return 7;
-
- if (WriteReg(card, 0x80001000, 0x55) != 0
- || WriteReg(card, 0x80101000, 0x66) != 0
- || WriteReg(card, 0x80201000, 0x77) != 0
- || WriteReg(card, 0x80301000, 0x88) != 0)
- return 8;
-
- if (ReadReg(card, 0x80001000) != 0x55
- || ReadReg(card, 0x80101000) != 0x66
- || ReadReg(card, 0x80201000) != 0x77
- || ReadReg(card, 0x80301000) != 0x88)
- return 9;
-
- return 0;
-}
-
-int b1pciv4_detect(avmcard *card)
-{
- int ret, i;
-
- if ((ret = b1dma_detect(card)) != 0)
- return ret;
-
- for (i = 0; i < 5; i++) {
- if (WriteReg(card, 0x80A00000, 0x21) != 0)
- return 6;
- if ((ReadReg(card, 0x80A00000) & 0x01) != 0x01)
- return 7;
- }
- for (i = 0; i < 5; i++) {
- if (WriteReg(card, 0x80A00000, 0x20) != 0)
- return 8;
- if ((ReadReg(card, 0x80A00000) & 0x01) != 0x00)
- return 9;
- }
-
- return 0;
-}
-
-static void b1dma_queue_tx(avmcard *card, struct sk_buff *skb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
-
- skb_queue_tail(&card->dma->send_queue, skb);
-
- if (!(card->csr & EN_TX_TC_INT)) {
- b1dma_dispatch_tx(card);
- b1dma_writel(card, card->csr, AMCC_INTCSR);
- }
-
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_dispatch_tx(avmcard *card)
-{
- avmcard_dmainfo *dma = card->dma;
- struct sk_buff *skb;
- u8 cmd, subcmd;
- u16 len;
- u32 txlen;
- void *p;
-
- skb = skb_dequeue(&dma->send_queue);
-
- len = CAPIMSG_LEN(skb->data);
-
- if (len) {
- cmd = CAPIMSG_COMMAND(skb->data);
- subcmd = CAPIMSG_SUBCOMMAND(skb->data);
-
- p = dma->sendbuf.dmabuf;
-
- if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
- u16 dlen = CAPIMSG_DATALEN(skb->data);
- _put_byte(&p, SEND_DATA_B3_REQ);
- _put_slice(&p, skb->data, len);
- _put_slice(&p, skb->data + len, dlen);
- } else {
- _put_byte(&p, SEND_MESSAGE);
- _put_slice(&p, skb->data, len);
- }
- txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
-#ifdef AVM_B1DMA_DEBUG
- printk(KERN_DEBUG "tx: put msg len=%d\n", txlen);
-#endif
- } else {
- txlen = skb->len - 2;
-#ifdef AVM_B1DMA_POLLDEBUG
- if (skb->data[2] == SEND_POLLACK)
- printk(KERN_INFO "%s: send ack\n", card->name);
-#endif
-#ifdef AVM_B1DMA_DEBUG
- printk(KERN_DEBUG "tx: put 0x%x len=%d\n",
- skb->data[2], txlen);
-#endif
- skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
- skb->len - 2);
- }
- txlen = (txlen + 3) & ~3;
-
- b1dma_writel(card, dma->sendbuf.dmaaddr, AMCC_TXPTR);
- b1dma_writel(card, txlen, AMCC_TXLEN);
-
- card->csr |= EN_TX_TC_INT;
-
- dev_kfree_skb_any(skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void queue_pollack(avmcard *card)
-{
- struct sk_buff *skb;
- void *p;
-
- skb = alloc_skb(3, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost poll ack\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_POLLACK);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_handle_rx(avmcard *card)
-{
- avmctrl_info *cinfo = &card->ctrlinfo[0];
- avmcard_dmainfo *dma = card->dma;
- struct capi_ctr *ctrl = &cinfo->capi_ctrl;
- struct sk_buff *skb;
- void *p = dma->recvbuf.dmabuf + 4;
- u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize;
- u8 b1cmd = _get_byte(&p);
-
-#ifdef AVM_B1DMA_DEBUG
- printk(KERN_DEBUG "rx: 0x%x %lu\n", b1cmd, (unsigned long)dma->recvlen);
-#endif
-
- switch (b1cmd) {
- case RECEIVE_DATA_B3_IND:
-
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- DataB3Len = _get_slice(&p, card->databuf);
-
- if (MsgLen < 30) { /* not CAPI 64Bit */
- memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
- MsgLen = 30;
- CAPIMSG_SETLEN(card->msgbuf, 30);
- }
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- skb_put_data(skb, card->databuf, DataB3Len);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_MESSAGE:
-
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) {
- spin_lock(&card->lock);
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- spin_unlock(&card->lock);
- }
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_NEW_NCCI:
-
- ApplId = _get_word(&p);
- NCCI = _get_word(&p);
- WindowSize = _get_word(&p);
- spin_lock(&card->lock);
- capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
- spin_unlock(&card->lock);
- break;
-
- case RECEIVE_FREE_NCCI:
-
- ApplId = _get_word(&p);
- NCCI = _get_word(&p);
-
- if (NCCI != 0xffffffff) {
- spin_lock(&card->lock);
- capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
- spin_unlock(&card->lock);
- }
- break;
-
- case RECEIVE_START:
-#ifdef AVM_B1DMA_POLLDEBUG
- printk(KERN_INFO "%s: receive poll\n", card->name);
-#endif
- if (!suppress_pollack)
- queue_pollack(card);
- capi_ctr_resume_output(ctrl);
- break;
-
- case RECEIVE_STOP:
- capi_ctr_suspend_output(ctrl);
- break;
-
- case RECEIVE_INIT:
-
- cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
- b1_parse_version(cinfo);
- printk(KERN_INFO "%s: %s-card (%s) now active\n",
- card->name,
- cinfo->version[VER_CARDTYPE],
- cinfo->version[VER_DRIVER]);
- capi_ctr_ready(ctrl);
- break;
-
- case RECEIVE_TASK_READY:
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
- card->name, ApplId, card->msgbuf);
- break;
-
- case RECEIVE_DEBUGMSG:
- MsgLen = _get_slice(&p, card->msgbuf);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
- break;
-
- default:
- printk(KERN_ERR "%s: b1dma_interrupt: 0x%x ???\n",
- card->name, b1cmd);
- return;
- }
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_handle_interrupt(avmcard *card)
-{
- u32 status;
- u32 newcsr;
-
- spin_lock(&card->lock);
-
- status = b1dma_readl(card, AMCC_INTCSR);
- if ((status & ANY_S5933_INT) == 0) {
- spin_unlock(&card->lock);
- return;
- }
-
- newcsr = card->csr | (status & ALL_INT);
- if (status & TX_TC_INT) newcsr &= ~EN_TX_TC_INT;
- if (status & RX_TC_INT) newcsr &= ~EN_RX_TC_INT;
- b1dma_writel(card, newcsr, AMCC_INTCSR);
-
- if ((status & RX_TC_INT) != 0) {
- struct avmcard_dmainfo *dma = card->dma;
- u32 rxlen;
- if (card->dma->recvlen == 0) {
- rxlen = b1dma_readl(card, AMCC_RXLEN);
- if (rxlen == 0) {
- dma->recvlen = *((u32 *)dma->recvbuf.dmabuf);
- rxlen = (dma->recvlen + 3) & ~3;
- b1dma_writel(card, dma->recvbuf.dmaaddr + 4, AMCC_RXPTR);
- b1dma_writel(card, rxlen, AMCC_RXLEN);
-#ifdef AVM_B1DMA_DEBUG
- } else {
- printk(KERN_ERR "%s: rx not complete (%d).\n",
- card->name, rxlen);
-#endif
- }
- } else {
- spin_unlock(&card->lock);
- b1dma_handle_rx(card);
- dma->recvlen = 0;
- spin_lock(&card->lock);
- b1dma_writel(card, dma->recvbuf.dmaaddr, AMCC_RXPTR);
- b1dma_writel(card, 4, AMCC_RXLEN);
- }
- }
-
- if ((status & TX_TC_INT) != 0) {
- if (skb_queue_empty(&card->dma->send_queue))
- card->csr &= ~EN_TX_TC_INT;
- else
- b1dma_dispatch_tx(card);
- }
- b1dma_writel(card, card->csr, AMCC_INTCSR);
-
- spin_unlock(&card->lock);
-}
-
-irqreturn_t b1dma_interrupt(int interrupt, void *devptr)
-{
- avmcard *card = devptr;
-
- b1dma_handle_interrupt(card);
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1dma_loaded(avmcard *card)
-{
- unsigned long stop;
- unsigned char ans;
- unsigned long tout = 2;
- unsigned int base = card->port;
-
- for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
- if (b1_tx_empty(base))
- break;
- }
- if (!b1_tx_empty(base)) {
- printk(KERN_ERR "%s: b1dma_loaded: tx err, corrupted t4 file ?\n",
- card->name);
- return 0;
- }
- b1_put_byte(base, SEND_POLLACK);
- for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
- if (b1_rx_full(base)) {
- if ((ans = b1_get_byte(base)) == RECEIVE_POLLDWORD) {
- return 1;
- }
- printk(KERN_ERR "%s: b1dma_loaded: got 0x%x, firmware not running in dword mode\n", card->name, ans);
- return 0;
- }
- }
- printk(KERN_ERR "%s: b1dma_loaded: firmware not running\n", card->name);
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_send_init(avmcard *card)
-{
- struct sk_buff *skb;
- void *p;
-
- skb = alloc_skb(15, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost register appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_INIT);
- _put_word(&p, CAPI_MAXAPPL);
- _put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
- _put_word(&p, card->cardnr - 1);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- b1dma_queue_tx(card, skb);
-}
-
-int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- int retval;
-
- b1dma_reset(card);
-
- if ((retval = b1_load_t4file(card, &data->firmware))) {
- b1dma_reset(card);
- printk(KERN_ERR "%s: failed to load t4file!!\n",
- card->name);
- return retval;
- }
-
- if (data->configuration.len > 0 && data->configuration.data) {
- if ((retval = b1_load_config(card, &data->configuration))) {
- b1dma_reset(card);
- printk(KERN_ERR "%s: failed to load config!!\n",
- card->name);
- return retval;
- }
- }
-
- if (!b1dma_loaded(card)) {
- b1dma_reset(card);
- printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
- return -EIO;
- }
-
- card->csr = AVM_FLAG;
- b1dma_writel(card, card->csr, AMCC_INTCSR);
- b1dma_writel(card, EN_A2P_TRANSFERS | EN_P2A_TRANSFERS | A2P_HI_PRIORITY |
- P2A_HI_PRIORITY | RESET_A2P_FLAGS | RESET_P2A_FLAGS,
- AMCC_MCSR);
- t1outp(card->port, 0x07, 0x30);
- t1outp(card->port, 0x10, 0xF0);
-
- card->dma->recvlen = 0;
- b1dma_writel(card, card->dma->recvbuf.dmaaddr, AMCC_RXPTR);
- b1dma_writel(card, 4, AMCC_RXLEN);
- card->csr |= EN_RX_TC_INT;
- b1dma_writel(card, card->csr, AMCC_INTCSR);
-
- b1dma_send_init(card);
-
- return 0;
-}
-
-void b1dma_reset_ctr(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- b1dma_reset(card);
-
- memset(cinfo->version, 0, sizeof(cinfo->version));
- capilib_release(&cinfo->ncci_head);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_down(ctrl);
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_register_appl(struct capi_ctr *ctrl,
- u16 appl,
- capi_register_params *rp)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- struct sk_buff *skb;
- int want = rp->level3cnt;
- int nconn;
- void *p;
-
- if (want > 0) nconn = want;
- else nconn = ctrl->profile.nbchannel * -want;
- if (nconn == 0) nconn = ctrl->profile.nbchannel;
-
- skb = alloc_skb(23, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost register appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_REGISTER);
- _put_word(&p, appl);
- _put_word(&p, 1024 * (nconn + 1));
- _put_word(&p, nconn);
- _put_word(&p, rp->datablkcnt);
- _put_word(&p, rp->datablklen);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- struct sk_buff *skb;
- void *p;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- capilib_release_appl(&cinfo->ncci_head, appl);
- spin_unlock_irqrestore(&card->lock, flags);
-
- skb = alloc_skb(7, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost release appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_RELEASE);
- _put_word(&p, appl);
-
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u16 retval = CAPI_NOERROR;
-
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- spin_unlock_irqrestore(&card->lock, flags);
- }
- if (retval == CAPI_NOERROR)
- b1dma_queue_tx(card, skb);
-
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1dma_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u8 flag;
- char *s;
- u32 txoff, txlen, rxoff, rxlen, csr;
- unsigned long flags;
-
- seq_printf(m, "%-16s %s\n", "name", card->name);
- seq_printf(m, "%-16s 0x%x\n", "io", card->port);
- seq_printf(m, "%-16s %d\n", "irq", card->irq);
- seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
- switch (card->cardtype) {
- case avm_b1isa: s = "B1 ISA"; break;
- case avm_b1pci: s = "B1 PCI"; break;
- case avm_b1pcmcia: s = "B1 PCMCIA"; break;
- case avm_m1: s = "M1"; break;
- case avm_m2: s = "M2"; break;
- case avm_t1isa: s = "T1 ISA (HEMA)"; break;
- case avm_t1pci: s = "T1 PCI"; break;
- case avm_c4: s = "C4"; break;
- case avm_c2: s = "C2"; break;
- default: s = "???"; break;
- }
- seq_printf(m, "%-16s %s\n", "type", s);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[3];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
- "protocol",
- (flag & 0x01) ? " DSS1" : "",
- (flag & 0x02) ? " CT1" : "",
- (flag & 0x04) ? " VN3" : "",
- (flag & 0x08) ? " NI1" : "",
- (flag & 0x10) ? " AUSTEL" : "",
- (flag & 0x20) ? " ESS" : "",
- (flag & 0x40) ? " 1TR6" : ""
- );
- }
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[5];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s\n",
- "linetype",
- (flag & 0x01) ? " point to point" : "",
- (flag & 0x02) ? " point to multipoint" : "",
- (flag & 0x08) ? " leased line without D-channel" : "",
- (flag & 0x04) ? " leased line with D-channel" : ""
- );
- }
- seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
-
- spin_lock_irqsave(&card->lock, flags);
-
- txoff = (dma_addr_t)b1dma_readl(card, AMCC_TXPTR)-card->dma->sendbuf.dmaaddr;
- txlen = b1dma_readl(card, AMCC_TXLEN);
-
- rxoff = (dma_addr_t)b1dma_readl(card, AMCC_RXPTR)-card->dma->recvbuf.dmaaddr;
- rxlen = b1dma_readl(card, AMCC_RXLEN);
-
- csr = b1dma_readl(card, AMCC_INTCSR);
-
- spin_unlock_irqrestore(&card->lock, flags);
-
- seq_printf(m, "%-16s 0x%lx\n", "csr (cached)", (unsigned long)card->csr);
- seq_printf(m, "%-16s 0x%lx\n", "csr", (unsigned long)csr);
- seq_printf(m, "%-16s %lu\n", "txoff", (unsigned long)txoff);
- seq_printf(m, "%-16s %lu\n", "txlen", (unsigned long)txlen);
- seq_printf(m, "%-16s %lu\n", "rxoff", (unsigned long)rxoff);
- seq_printf(m, "%-16s %lu\n", "rxlen", (unsigned long)rxlen);
-
- return 0;
-}
-EXPORT_SYMBOL(b1dma_proc_show);
-
-/* ------------------------------------------------------------- */
-
-EXPORT_SYMBOL(b1dma_reset);
-EXPORT_SYMBOL(t1pci_detect);
-EXPORT_SYMBOL(b1pciv4_detect);
-EXPORT_SYMBOL(b1dma_interrupt);
-
-EXPORT_SYMBOL(b1dma_load_firmware);
-EXPORT_SYMBOL(b1dma_reset_ctr);
-EXPORT_SYMBOL(b1dma_register_appl);
-EXPORT_SYMBOL(b1dma_release_appl);
-EXPORT_SYMBOL(b1dma_send_message);
-
-static int __init b1dma_init(void)
-{
- char *p;
- char rev[32];
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- printk(KERN_INFO "b1dma: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit b1dma_exit(void)
-{
-}
-
-module_init(b1dma_init);
-module_exit(b1dma_exit);
diff --git a/drivers/staging/isdn/avm/b1isa.c b/drivers/staging/isdn/avm/b1isa.c
deleted file mode 100644
index cdfea72e0ef6..000000000000
--- a/drivers/staging/isdn/avm/b1isa.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/* $Id: b1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Module for AVM B1 ISA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 ISA card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static void b1isa_remove(struct pci_dev *pdev)
-{
- avmctrl_info *cinfo = pci_get_drvdata(pdev);
- avmcard *card;
-
- if (!cinfo)
- return;
-
- card = cinfo->card;
-
- b1_reset(card->port);
- b1_reset(card->port);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- release_region(card->port, AVMB1_PORTLEN);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static char *b1isa_procinfo(struct capi_ctr *ctrl);
-
-static int b1isa_probe(struct pci_dev *pdev)
-{
- avmctrl_info *cinfo;
- avmcard *card;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "b1isa: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- cinfo = card->ctrlinfo;
-
- card->port = pci_resource_start(pdev, 0);
- card->irq = pdev->irq;
- card->cardtype = avm_b1isa;
- sprintf(card->name, "b1isa-%x", card->port);
-
- if (card->port != 0x150 && card->port != 0x250
- && card->port != 0x300 && card->port != 0x340) {
- printk(KERN_WARNING "b1isa: invalid port 0x%x.\n", card->port);
- retval = -EINVAL;
- goto err_free;
- }
- if (b1_irq_table[card->irq & 0xf] == 0) {
- printk(KERN_WARNING "b1isa: irq %d not valid.\n", card->irq);
- retval = -EINVAL;
- goto err_free;
- }
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "b1isa: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free;
- }
- retval = request_irq(card->irq, b1_interrupt, 0, card->name, card);
- if (retval) {
- printk(KERN_ERR "b1isa: unable to get IRQ %d.\n", card->irq);
- goto err_release_region;
- }
- b1_reset(card->port);
- if ((retval = b1_detect(card->port, card->cardtype)) != 0) {
- printk(KERN_NOTICE "b1isa: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_free_irq;
- }
- b1_reset(card->port);
- b1_getrevision(card);
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "b1isa";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1_register_appl;
- cinfo->capi_ctrl.release_appl = b1_release_appl;
- cinfo->capi_ctrl.send_message = b1_send_message;
- cinfo->capi_ctrl.load_firmware = b1_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
- cinfo->capi_ctrl.procinfo = b1isa_procinfo;
- cinfo->capi_ctrl.proc_show = b1_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "b1isa: attach controller failed.\n");
- goto err_free_irq;
- }
-
- printk(KERN_INFO "b1isa: AVM B1 ISA at i/o %#x, irq %d, revision %d\n",
- card->port, card->irq, card->revision);
-
- pci_set_drvdata(pdev, cinfo);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-static char *b1isa_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->revision : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-#define MAX_CARDS 4
-static struct pci_dev isa_dev[MAX_CARDS];
-static int io[MAX_CARDS];
-static int irq[MAX_CARDS];
-
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-
-static int b1isa_add_card(struct capi_driver *driver, capicardparams *data)
-{
- int i;
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (isa_dev[i].resource[0].start)
- continue;
-
- isa_dev[i].resource[0].start = data->port;
- isa_dev[i].irq = data->irq;
-
- if (b1isa_probe(&isa_dev[i]) == 0)
- return 0;
- }
- return -ENODEV;
-}
-
-static struct capi_driver capi_driver_b1isa = {
- .name = "b1isa",
- .revision = "1.0",
- .add_card = b1isa_add_card,
-};
-
-static int __init b1isa_init(void)
-{
- char *p;
- char rev[32];
- int i;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (!io[i])
- break;
-
- isa_dev[i].resource[0].start = io[i];
- isa_dev[i].irq = irq[i];
-
- if (b1isa_probe(&isa_dev[i]) != 0)
- return -ENODEV;
- }
-
- strlcpy(capi_driver_b1isa.revision, rev, 32);
- register_capi_driver(&capi_driver_b1isa);
- printk(KERN_INFO "b1isa: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit b1isa_exit(void)
-{
- int i;
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (isa_dev[i].resource[0].start)
- b1isa_remove(&isa_dev[i]);
- }
- unregister_capi_driver(&capi_driver_b1isa);
-}
-
-module_init(b1isa_init);
-module_exit(b1isa_exit);
diff --git a/drivers/staging/isdn/avm/b1pci.c b/drivers/staging/isdn/avm/b1pci.c
deleted file mode 100644
index b76b57a82c02..000000000000
--- a/drivers/staging/isdn/avm/b1pci.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/* $Id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM B1 PCI-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-static struct pci_device_id b1pci_pci_tbl[] = {
- { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, b1pci_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 PCI card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static char *b1pci_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->revision : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "b1pci: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- cinfo = card->ctrlinfo;
- sprintf(card->name, "b1pci-%x", p->port);
- card->port = p->port;
- card->irq = p->irq;
- card->cardtype = avm_b1pci;
-
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free;
- }
- b1_reset(card->port);
- retval = b1_detect(card->port, card->cardtype);
- if (retval) {
- printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_release_region;
- }
- b1_reset(card->port);
- b1_getrevision(card);
-
- retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq);
- retval = -EBUSY;
- goto err_release_region;
- }
-
- cinfo->capi_ctrl.driver_name = "b1pci";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1_register_appl;
- cinfo->capi_ctrl.release_appl = b1_release_appl;
- cinfo->capi_ctrl.send_message = b1_send_message;
- cinfo->capi_ctrl.load_firmware = b1_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
- cinfo->capi_ctrl.procinfo = b1pci_procinfo;
- cinfo->capi_ctrl.proc_show = b1_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
- cinfo->capi_ctrl.owner = THIS_MODULE;
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "b1pci: attach controller failed.\n");
- goto err_free_irq;
- }
-
- if (card->revision >= 4) {
- printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, revision %d (no dma)\n",
- card->port, card->irq, card->revision);
- } else {
- printk(KERN_INFO "b1pci: AVM B1 PCI at i/o %#x, irq %d, revision %d\n",
- card->port, card->irq, card->revision);
- }
-
- pci_set_drvdata(pdev, card);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-static void b1pci_remove(struct pci_dev *pdev)
-{
- avmcard *card = pci_get_drvdata(pdev);
- avmctrl_info *cinfo = card->ctrlinfo;
- unsigned int port = card->port;
-
- b1_reset(port);
- b1_reset(port);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- release_region(card->port, AVMB1_PORTLEN);
- b1_free_card(card);
-}
-
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-/* ------------------------------------------------------------- */
-
-static char *b1pciv4_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->membase : 0,
- cinfo->card ? cinfo->card->revision : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "b1pci: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- card->dma = avmcard_dma_alloc("b1pci", pdev, 2048 + 128, 2048 + 128);
- if (!card->dma) {
- printk(KERN_WARNING "b1pci: dma alloc.\n");
- retval = -ENOMEM;
- goto err_free;
- }
-
- cinfo = card->ctrlinfo;
- sprintf(card->name, "b1pciv4-%x", p->port);
- card->port = p->port;
- card->irq = p->irq;
- card->membase = p->membase;
- card->cardtype = avm_b1pci;
-
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free_dma;
- }
-
- card->mbase = ioremap(card->membase, 64);
- if (!card->mbase) {
- printk(KERN_NOTICE "b1pci: can't remap memory at 0x%lx\n",
- card->membase);
- retval = -ENOMEM;
- goto err_release_region;
- }
-
- b1dma_reset(card);
-
- retval = b1pciv4_detect(card);
- if (retval) {
- printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_unmap;
- }
- b1dma_reset(card);
- b1_getrevision(card);
-
- retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "b1pci: unable to get IRQ %d.\n",
- card->irq);
- retval = -EBUSY;
- goto err_unmap;
- }
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "b1pciv4";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1dma_register_appl;
- cinfo->capi_ctrl.release_appl = b1dma_release_appl;
- cinfo->capi_ctrl.send_message = b1dma_send_message;
- cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
- cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
- cinfo->capi_ctrl.proc_show = b1dma_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "b1pci: attach controller failed.\n");
- goto err_free_irq;
- }
- card->cardnr = cinfo->capi_ctrl.cnr;
-
- printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, mem %#lx, revision %d (dma)\n",
- card->port, card->irq, card->membase, card->revision);
-
- pci_set_drvdata(pdev, card);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_unmap:
- iounmap(card->mbase);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
- avmcard_dma_free(card->dma);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-
-}
-
-static void b1pciv4_remove(struct pci_dev *pdev)
-{
- avmcard *card = pci_get_drvdata(pdev);
- avmctrl_info *cinfo = card->ctrlinfo;
-
- b1dma_reset(card);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- iounmap(card->mbase);
- release_region(card->port, AVMB1_PORTLEN);
- avmcard_dma_free(card->dma);
- b1_free_card(card);
-}
-
-#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
-
-static int b1pci_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct capicardparams param;
- int retval;
-
- if (pci_enable_device(pdev) < 0) {
- printk(KERN_ERR "b1pci: failed to enable AVM-B1\n");
- return -ENODEV;
- }
- param.irq = pdev->irq;
-
- if (pci_resource_start(pdev, 2)) { /* B1 PCI V4 */
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- pci_set_master(pdev);
-#endif
- param.membase = pci_resource_start(pdev, 0);
- param.port = pci_resource_start(pdev, 2);
-
- printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 V4 at i/o %#x, irq %d, mem %#x\n",
- param.port, param.irq, param.membase);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- retval = b1pciv4_probe(&param, pdev);
-#else
- retval = b1pci_probe(&param, pdev);
-#endif
- if (retval != 0) {
- printk(KERN_ERR "b1pci: no AVM-B1 V4 at i/o %#x, irq %d, mem %#x detected\n",
- param.port, param.irq, param.membase);
- }
- } else {
- param.membase = 0;
- param.port = pci_resource_start(pdev, 1);
-
- printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 at i/o %#x, irq %d\n",
- param.port, param.irq);
- retval = b1pci_probe(&param, pdev);
- if (retval != 0) {
- printk(KERN_ERR "b1pci: no AVM-B1 at i/o %#x, irq %d detected\n",
- param.port, param.irq);
- }
- }
- return retval;
-}
-
-static void b1pci_pci_remove(struct pci_dev *pdev)
-{
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- avmcard *card = pci_get_drvdata(pdev);
-
- if (card->dma)
- b1pciv4_remove(pdev);
- else
- b1pci_remove(pdev);
-#else
- b1pci_remove(pdev);
-#endif
-}
-
-static struct pci_driver b1pci_pci_driver = {
- .name = "b1pci",
- .id_table = b1pci_pci_tbl,
- .probe = b1pci_pci_probe,
- .remove = b1pci_pci_remove,
-};
-
-static struct capi_driver capi_driver_b1pci = {
- .name = "b1pci",
- .revision = "1.0",
-};
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-static struct capi_driver capi_driver_b1pciv4 = {
- .name = "b1pciv4",
- .revision = "1.0",
-};
-#endif
-
-static int __init b1pci_init(void)
-{
- char *p;
- char rev[32];
- int err;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
-
- err = pci_register_driver(&b1pci_pci_driver);
- if (!err) {
- strlcpy(capi_driver_b1pci.revision, rev, 32);
- register_capi_driver(&capi_driver_b1pci);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- strlcpy(capi_driver_b1pciv4.revision, rev, 32);
- register_capi_driver(&capi_driver_b1pciv4);
-#endif
- printk(KERN_INFO "b1pci: revision %s\n", rev);
- }
- return err;
-}
-
-static void __exit b1pci_exit(void)
-{
- unregister_capi_driver(&capi_driver_b1pci);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- unregister_capi_driver(&capi_driver_b1pciv4);
-#endif
- pci_unregister_driver(&b1pci_pci_driver);
-}
-
-module_init(b1pci_init);
-module_exit(b1pci_exit);
diff --git a/drivers/staging/isdn/avm/b1pcmcia.c b/drivers/staging/isdn/avm/b1pcmcia.c
deleted file mode 100644
index 3aca16e62902..000000000000
--- a/drivers/staging/isdn/avm/b1pcmcia.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* $Id: b1pcmcia.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM B1/M1/M2 PCMCIA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/capi.h>
-#include <linux/b1pcmcia.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM PCMCIA cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static void b1pcmcia_remove_ctr(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
-
- b1_reset(port);
- b1_reset(port);
-
- detach_capi_ctr(ctrl);
- free_irq(card->irq, card);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static LIST_HEAD(cards);
-
-static char *b1pcmcia_procinfo(struct capi_ctr *ctrl);
-
-static int b1pcmcia_add_card(unsigned int port, unsigned irq,
- enum avmcardtype cardtype)
-{
- avmctrl_info *cinfo;
- avmcard *card;
- char *cardname;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "b1pcmcia: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
- cinfo = card->ctrlinfo;
-
- switch (cardtype) {
- case avm_m1: sprintf(card->name, "m1-%x", port); break;
- case avm_m2: sprintf(card->name, "m2-%x", port); break;
- default: sprintf(card->name, "b1pcmcia-%x", port); break;
- }
- card->port = port;
- card->irq = irq;
- card->cardtype = cardtype;
-
- retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "b1pcmcia: unable to get IRQ %d.\n",
- card->irq);
- retval = -EBUSY;
- goto err_free;
- }
- b1_reset(card->port);
- if ((retval = b1_detect(card->port, card->cardtype)) != 0) {
- printk(KERN_NOTICE "b1pcmcia: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_free_irq;
- }
- b1_reset(card->port);
- b1_getrevision(card);
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "b1pcmcia";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1_register_appl;
- cinfo->capi_ctrl.release_appl = b1_release_appl;
- cinfo->capi_ctrl.send_message = b1_send_message;
- cinfo->capi_ctrl.load_firmware = b1_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
- cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo;
- cinfo->capi_ctrl.proc_show = b1_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "b1pcmcia: attach controller failed.\n");
- goto err_free_irq;
- }
- switch (cardtype) {
- case avm_m1: cardname = "M1"; break;
- case avm_m2: cardname = "M2"; break;
- default: cardname = "B1 PCMCIA"; break;
- }
-
- printk(KERN_INFO "b1pcmcia: AVM %s at i/o %#x, irq %d, revision %d\n",
- cardname, card->port, card->irq, card->revision);
-
- list_add(&card->list, &cards);
- return cinfo->capi_ctrl.cnr;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static char *b1pcmcia_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->revision : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1pcmcia_addcard_b1(unsigned int port, unsigned irq)
-{
- return b1pcmcia_add_card(port, irq, avm_b1pcmcia);
-}
-
-int b1pcmcia_addcard_m1(unsigned int port, unsigned irq)
-{
- return b1pcmcia_add_card(port, irq, avm_m1);
-}
-
-int b1pcmcia_addcard_m2(unsigned int port, unsigned irq)
-{
- return b1pcmcia_add_card(port, irq, avm_m2);
-}
-
-int b1pcmcia_delcard(unsigned int port, unsigned irq)
-{
- struct list_head *l;
- avmcard *card;
-
- list_for_each(l, &cards) {
- card = list_entry(l, avmcard, list);
- if (card->port == port && card->irq == irq) {
- b1pcmcia_remove_ctr(&card->ctrlinfo[0].capi_ctrl);
- return 0;
- }
- }
- return -ESRCH;
-}
-
-EXPORT_SYMBOL(b1pcmcia_addcard_b1);
-EXPORT_SYMBOL(b1pcmcia_addcard_m1);
-EXPORT_SYMBOL(b1pcmcia_addcard_m2);
-EXPORT_SYMBOL(b1pcmcia_delcard);
-
-static struct capi_driver capi_driver_b1pcmcia = {
- .name = "b1pcmcia",
- .revision = "1.0",
-};
-
-static int __init b1pcmcia_init(void)
-{
- char *p;
- char rev[32];
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- strlcpy(capi_driver_b1pcmcia.revision, rev, 32);
- register_capi_driver(&capi_driver_b1pcmcia);
- printk(KERN_INFO "b1pci: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit b1pcmcia_exit(void)
-{
- unregister_capi_driver(&capi_driver_b1pcmcia);
-}
-
-module_init(b1pcmcia_init);
-module_exit(b1pcmcia_exit);
diff --git a/drivers/staging/isdn/avm/c4.c b/drivers/staging/isdn/avm/c4.c
deleted file mode 100644
index ac72cd204c4d..000000000000
--- a/drivers/staging/isdn/avm/c4.c
+++ /dev/null
@@ -1,1317 +0,0 @@
-/* $Id: c4.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM C4 & C2 card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-#undef AVM_C4_DEBUG
-#undef AVM_C4_POLLDEBUG
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-static bool suppress_pollack;
-
-static const struct pci_device_id c4_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, c4_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM C2/C4 cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-module_param(suppress_pollack, bool, 0);
-
-/* ------------------------------------------------------------- */
-
-static void c4_dispatch_tx(avmcard *card);
-
-/* ------------------------------------------------------------- */
-
-#define DC21285_DRAM_A0MR 0x40000000
-#define DC21285_DRAM_A1MR 0x40004000
-#define DC21285_DRAM_A2MR 0x40008000
-#define DC21285_DRAM_A3MR 0x4000C000
-
-#define CAS_OFFSET 0x88
-
-#define DC21285_ARMCSR_BASE 0x42000000
-
-#define PCI_OUT_INT_STATUS 0x30
-#define PCI_OUT_INT_MASK 0x34
-#define MAILBOX_0 0x50
-#define MAILBOX_1 0x54
-#define MAILBOX_2 0x58
-#define MAILBOX_3 0x5C
-#define DOORBELL 0x60
-#define DOORBELL_SETUP 0x64
-
-#define CHAN_1_CONTROL 0x90
-#define CHAN_2_CONTROL 0xB0
-#define DRAM_TIMING 0x10C
-#define DRAM_ADDR_SIZE_0 0x110
-#define DRAM_ADDR_SIZE_1 0x114
-#define DRAM_ADDR_SIZE_2 0x118
-#define DRAM_ADDR_SIZE_3 0x11C
-#define SA_CONTROL 0x13C
-#define XBUS_CYCLE 0x148
-#define XBUS_STROBE 0x14C
-#define DBELL_PCI_MASK 0x150
-#define DBELL_SA_MASK 0x154
-
-#define SDRAM_SIZE 0x1000000
-
-/* ------------------------------------------------------------- */
-
-#define MBOX_PEEK_POKE MAILBOX_0
-
-#define DBELL_ADDR 0x01
-#define DBELL_DATA 0x02
-#define DBELL_RNWR 0x40
-#define DBELL_INIT 0x80
-
-/* ------------------------------------------------------------- */
-
-#define MBOX_UP_ADDR MAILBOX_0
-#define MBOX_UP_LEN MAILBOX_1
-#define MBOX_DOWN_ADDR MAILBOX_2
-#define MBOX_DOWN_LEN MAILBOX_3
-
-#define DBELL_UP_HOST 0x00000100
-#define DBELL_UP_ARM 0x00000200
-#define DBELL_DOWN_HOST 0x00000400
-#define DBELL_DOWN_ARM 0x00000800
-#define DBELL_RESET_HOST 0x40000000
-#define DBELL_RESET_ARM 0x80000000
-
-/* ------------------------------------------------------------- */
-
-#define DRAM_TIMING_DEF 0x001A01A5
-#define DRAM_AD_SZ_DEF0 0x00000045
-#define DRAM_AD_SZ_NULL 0x00000000
-
-#define SA_CTL_ALLRIGHT 0x64AA0271
-
-#define INIT_XBUS_CYCLE 0x100016DB
-#define INIT_XBUS_STROBE 0xF1F1F1F1
-
-/* ------------------------------------------------------------- */
-
-#define RESET_TIMEOUT (15 * HZ) /* 15 sec */
-#define PEEK_POKE_TIMEOUT (HZ / 10) /* 0.1 sec */
-
-/* ------------------------------------------------------------- */
-
-#define c4outmeml(addr, value) writel(value, addr)
-#define c4inmeml(addr) readl(addr)
-#define c4outmemw(addr, value) writew(value, addr)
-#define c4inmemw(addr) readw(addr)
-#define c4outmemb(addr, value) writeb(value, addr)
-#define c4inmemb(addr) readb(addr)
-
-/* ------------------------------------------------------------- */
-
-static inline int wait_for_doorbell(avmcard *card, unsigned long t)
-{
- unsigned long stop;
-
- stop = jiffies + t;
- while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
- if (!time_before(jiffies, stop))
- return -1;
- mb();
- }
- return 0;
-}
-
-static int c4_poke(avmcard *card, unsigned long off, unsigned long value)
-{
-
- if (wait_for_doorbell(card, HZ / 10) < 0)
- return -1;
-
- c4outmeml(card->mbase + MBOX_PEEK_POKE, off);
- c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
-
- if (wait_for_doorbell(card, HZ / 10) < 0)
- return -1;
-
- c4outmeml(card->mbase + MBOX_PEEK_POKE, value);
- c4outmeml(card->mbase + DOORBELL, DBELL_DATA | DBELL_ADDR);
-
- return 0;
-}
-
-static int c4_peek(avmcard *card, unsigned long off, unsigned long *valuep)
-{
- if (wait_for_doorbell(card, HZ / 10) < 0)
- return -1;
-
- c4outmeml(card->mbase + MBOX_PEEK_POKE, off);
- c4outmeml(card->mbase + DOORBELL, DBELL_RNWR | DBELL_ADDR);
-
- if (wait_for_doorbell(card, HZ / 10) < 0)
- return -1;
-
- *valuep = c4inmeml(card->mbase + MBOX_PEEK_POKE);
-
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_load_t4file(avmcard *card, capiloaddatapart *t4file)
-{
- u32 val;
- unsigned char *dp;
- u_int left;
- u32 loadoff = 0;
-
- dp = t4file->data;
- left = t4file->len;
- while (left >= sizeof(u32)) {
- if (t4file->user) {
- if (copy_from_user(&val, dp, sizeof(val)))
- return -EFAULT;
- } else {
- memcpy(&val, dp, sizeof(val));
- }
- if (c4_poke(card, loadoff, val)) {
- printk(KERN_ERR "%s: corrupted firmware file ?\n",
- card->name);
- return -EIO;
- }
- left -= sizeof(u32);
- dp += sizeof(u32);
- loadoff += sizeof(u32);
- }
- if (left) {
- val = 0;
- if (t4file->user) {
- if (copy_from_user(&val, dp, left))
- return -EFAULT;
- } else {
- memcpy(&val, dp, left);
- }
- if (c4_poke(card, loadoff, val)) {
- printk(KERN_ERR "%s: corrupted firmware file ?\n",
- card->name);
- return -EIO;
- }
- }
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static inline void _put_byte(void **pp, u8 val)
-{
- u8 *s = *pp;
- *s++ = val;
- *pp = s;
-}
-
-static inline void _put_word(void **pp, u32 val)
-{
- u8 *s = *pp;
- *s++ = val & 0xff;
- *s++ = (val >> 8) & 0xff;
- *s++ = (val >> 16) & 0xff;
- *s++ = (val >> 24) & 0xff;
- *pp = s;
-}
-
-static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len)
-{
- unsigned i = len;
- _put_word(pp, i);
- while (i-- > 0)
- _put_byte(pp, *dp++);
-}
-
-static inline u8 _get_byte(void **pp)
-{
- u8 *s = *pp;
- u8 val;
- val = *s++;
- *pp = s;
- return val;
-}
-
-static inline u32 _get_word(void **pp)
-{
- u8 *s = *pp;
- u32 val;
- val = *s++;
- val |= (*s++ << 8);
- val |= (*s++ << 16);
- val |= (*s++ << 24);
- *pp = s;
- return val;
-}
-
-static inline u32 _get_slice(void **pp, unsigned char *dp)
-{
- unsigned int len, i;
-
- len = i = _get_word(pp);
- while (i-- > 0) *dp++ = _get_byte(pp);
- return len;
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_reset(avmcard *card)
-{
- unsigned long stop;
-
- c4outmeml(card->mbase + DOORBELL, DBELL_RESET_ARM);
-
- stop = jiffies + HZ * 10;
- while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
- if (!time_before(jiffies, stop))
- return;
- c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
- mb();
- }
-
- c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
- c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_detect(avmcard *card)
-{
- unsigned long stop, dummy;
-
- c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x0c);
- if (c4inmeml(card->mbase + PCI_OUT_INT_MASK) != 0x0c)
- return 1;
-
- c4outmeml(card->mbase + DOORBELL, DBELL_RESET_ARM);
-
- stop = jiffies + HZ * 10;
- while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
- if (!time_before(jiffies, stop))
- return 2;
- c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
- mb();
- }
-
- c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
- c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);
-
- c4outmeml(card->mbase + MAILBOX_0, 0x55aa55aa);
- if (c4inmeml(card->mbase + MAILBOX_0) != 0x55aa55aa) return 3;
-
- c4outmeml(card->mbase + MAILBOX_0, 0xaa55aa55);
- if (c4inmeml(card->mbase + MAILBOX_0) != 0xaa55aa55) return 4;
-
- if (c4_poke(card, DC21285_ARMCSR_BASE + DBELL_SA_MASK, 0)) return 5;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DBELL_PCI_MASK, 0)) return 6;
- if (c4_poke(card, DC21285_ARMCSR_BASE + SA_CONTROL, SA_CTL_ALLRIGHT))
- return 7;
- if (c4_poke(card, DC21285_ARMCSR_BASE + XBUS_CYCLE, INIT_XBUS_CYCLE))
- return 8;
- if (c4_poke(card, DC21285_ARMCSR_BASE + XBUS_STROBE, INIT_XBUS_STROBE))
- return 8;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_TIMING, 0)) return 9;
-
- mdelay(1);
-
- if (c4_peek(card, DC21285_DRAM_A0MR, &dummy)) return 10;
- if (c4_peek(card, DC21285_DRAM_A1MR, &dummy)) return 11;
- if (c4_peek(card, DC21285_DRAM_A2MR, &dummy)) return 12;
- if (c4_peek(card, DC21285_DRAM_A3MR, &dummy)) return 13;
-
- if (c4_poke(card, DC21285_DRAM_A0MR + CAS_OFFSET, 0)) return 14;
- if (c4_poke(card, DC21285_DRAM_A1MR + CAS_OFFSET, 0)) return 15;
- if (c4_poke(card, DC21285_DRAM_A2MR + CAS_OFFSET, 0)) return 16;
- if (c4_poke(card, DC21285_DRAM_A3MR + CAS_OFFSET, 0)) return 17;
-
- mdelay(1);
-
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_TIMING, DRAM_TIMING_DEF))
- return 18;
-
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_0, DRAM_AD_SZ_DEF0))
- return 19;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_1, DRAM_AD_SZ_NULL))
- return 20;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_2, DRAM_AD_SZ_NULL))
- return 21;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_3, DRAM_AD_SZ_NULL))
- return 22;
-
- /* Transputer test */
-
- if (c4_poke(card, 0x000000, 0x11111111)
- || c4_poke(card, 0x400000, 0x22222222)
- || c4_poke(card, 0x800000, 0x33333333)
- || c4_poke(card, 0xC00000, 0x44444444))
- return 23;
-
- if (c4_peek(card, 0x000000, &dummy) || dummy != 0x11111111
- || c4_peek(card, 0x400000, &dummy) || dummy != 0x22222222
- || c4_peek(card, 0x800000, &dummy) || dummy != 0x33333333
- || c4_peek(card, 0xC00000, &dummy) || dummy != 0x44444444)
- return 24;
-
- if (c4_poke(card, 0x000000, 0x55555555)
- || c4_poke(card, 0x400000, 0x66666666)
- || c4_poke(card, 0x800000, 0x77777777)
- || c4_poke(card, 0xC00000, 0x88888888))
- return 25;
-
- if (c4_peek(card, 0x000000, &dummy) || dummy != 0x55555555
- || c4_peek(card, 0x400000, &dummy) || dummy != 0x66666666
- || c4_peek(card, 0x800000, &dummy) || dummy != 0x77777777
- || c4_peek(card, 0xC00000, &dummy) || dummy != 0x88888888)
- return 26;
-
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_dispatch_tx(avmcard *card)
-{
- avmcard_dmainfo *dma = card->dma;
- struct sk_buff *skb;
- u8 cmd, subcmd;
- u16 len;
- u32 txlen;
- void *p;
-
-
- if (card->csr & DBELL_DOWN_ARM) { /* tx busy */
- return;
- }
-
- skb = skb_dequeue(&dma->send_queue);
- if (!skb) {
-#ifdef AVM_C4_DEBUG
- printk(KERN_DEBUG "%s: tx underrun\n", card->name);
-#endif
- return;
- }
-
- len = CAPIMSG_LEN(skb->data);
-
- if (len) {
- cmd = CAPIMSG_COMMAND(skb->data);
- subcmd = CAPIMSG_SUBCOMMAND(skb->data);
-
- p = dma->sendbuf.dmabuf;
-
- if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
- u16 dlen = CAPIMSG_DATALEN(skb->data);
- _put_byte(&p, SEND_DATA_B3_REQ);
- _put_slice(&p, skb->data, len);
- _put_slice(&p, skb->data + len, dlen);
- } else {
- _put_byte(&p, SEND_MESSAGE);
- _put_slice(&p, skb->data, len);
- }
- txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
-#ifdef AVM_C4_DEBUG
- printk(KERN_DEBUG "%s: tx put msg len=%d\n", card->name, txlen);
-#endif
- } else {
- txlen = skb->len - 2;
-#ifdef AVM_C4_POLLDEBUG
- if (skb->data[2] == SEND_POLLACK)
- printk(KERN_INFO "%s: ack to c4\n", card->name);
-#endif
-#ifdef AVM_C4_DEBUG
- printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n",
- card->name, skb->data[2], txlen);
-#endif
- skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
- skb->len - 2);
- }
- txlen = (txlen + 3) & ~3;
-
- c4outmeml(card->mbase + MBOX_DOWN_ADDR, dma->sendbuf.dmaaddr);
- c4outmeml(card->mbase + MBOX_DOWN_LEN, txlen);
-
- card->csr |= DBELL_DOWN_ARM;
-
- c4outmeml(card->mbase + DOORBELL, DBELL_DOWN_ARM);
-
- dev_kfree_skb_any(skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void queue_pollack(avmcard *card)
-{
- struct sk_buff *skb;
- void *p;
-
- skb = alloc_skb(3, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost poll ack\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_POLLACK);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
- c4_dispatch_tx(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_handle_rx(avmcard *card)
-{
- avmcard_dmainfo *dma = card->dma;
- struct capi_ctr *ctrl;
- avmctrl_info *cinfo;
- struct sk_buff *skb;
- void *p = dma->recvbuf.dmabuf;
- u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize;
- u8 b1cmd = _get_byte(&p);
- u32 cidx;
-
-
-#ifdef AVM_C4_DEBUG
- printk(KERN_DEBUG "%s: rx 0x%x len=%lu\n", card->name,
- b1cmd, (unsigned long)dma->recvlen);
-#endif
-
- switch (b1cmd) {
- case RECEIVE_DATA_B3_IND:
-
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- DataB3Len = _get_slice(&p, card->databuf);
- cidx = CAPIMSG_CONTROLLER(card->msgbuf)-card->cardnr;
- if (cidx >= card->nlogcontr) cidx = 0;
- ctrl = &card->ctrlinfo[cidx].capi_ctrl;
-
- if (MsgLen < 30) { /* not CAPI 64Bit */
- memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
- MsgLen = 30;
- CAPIMSG_SETLEN(card->msgbuf, 30);
- }
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- skb_put_data(skb, card->databuf, DataB3Len);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_MESSAGE:
-
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- cidx = CAPIMSG_CONTROLLER(card->msgbuf)-card->cardnr;
- if (cidx >= card->nlogcontr) cidx = 0;
- cinfo = &card->ctrlinfo[cidx];
- ctrl = &card->ctrlinfo[cidx].capi_ctrl;
-
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
-
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_NEW_NCCI:
-
- ApplId = _get_word(&p);
- NCCI = _get_word(&p);
- WindowSize = _get_word(&p);
- cidx = (NCCI & 0x7f) - card->cardnr;
- if (cidx >= card->nlogcontr) cidx = 0;
-
- capilib_new_ncci(&card->ctrlinfo[cidx].ncci_head, ApplId, NCCI, WindowSize);
-
- break;
-
- case RECEIVE_FREE_NCCI:
-
- ApplId = _get_word(&p);
- NCCI = _get_word(&p);
-
- if (NCCI != 0xffffffff) {
- cidx = (NCCI & 0x7f) - card->cardnr;
- if (cidx >= card->nlogcontr) cidx = 0;
- capilib_free_ncci(&card->ctrlinfo[cidx].ncci_head, ApplId, NCCI);
- }
- break;
-
- case RECEIVE_START:
-#ifdef AVM_C4_POLLDEBUG
- printk(KERN_INFO "%s: poll from c4\n", card->name);
-#endif
- if (!suppress_pollack)
- queue_pollack(card);
- for (cidx = 0; cidx < card->nr_controllers; cidx++) {
- ctrl = &card->ctrlinfo[cidx].capi_ctrl;
- capi_ctr_resume_output(ctrl);
- }
- break;
-
- case RECEIVE_STOP:
- for (cidx = 0; cidx < card->nr_controllers; cidx++) {
- ctrl = &card->ctrlinfo[cidx].capi_ctrl;
- capi_ctr_suspend_output(ctrl);
- }
- break;
-
- case RECEIVE_INIT:
-
- cidx = card->nlogcontr;
- if (cidx >= card->nr_controllers) {
- printk(KERN_ERR "%s: card with %d controllers ??\n",
- card->name, cidx + 1);
- break;
- }
- card->nlogcontr++;
- cinfo = &card->ctrlinfo[cidx];
- ctrl = &cinfo->capi_ctrl;
- cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
- b1_parse_version(cinfo);
- printk(KERN_INFO "%s: %s-card (%s) now active\n",
- card->name,
- cinfo->version[VER_CARDTYPE],
- cinfo->version[VER_DRIVER]);
- capi_ctr_ready(&cinfo->capi_ctrl);
- break;
-
- case RECEIVE_TASK_READY:
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
- card->name, ApplId, card->msgbuf);
- break;
-
- case RECEIVE_DEBUGMSG:
- MsgLen = _get_slice(&p, card->msgbuf);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
- break;
-
- default:
- printk(KERN_ERR "%s: c4_interrupt: 0x%x ???\n",
- card->name, b1cmd);
- return;
- }
-}
-
-/* ------------------------------------------------------------- */
-
-static irqreturn_t c4_handle_interrupt(avmcard *card)
-{
- unsigned long flags;
- u32 status;
-
- spin_lock_irqsave(&card->lock, flags);
- status = c4inmeml(card->mbase + DOORBELL);
-
- if (status & DBELL_RESET_HOST) {
- u_int i;
- c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x0c);
- spin_unlock_irqrestore(&card->lock, flags);
- if (card->nlogcontr == 0)
- return IRQ_HANDLED;
- printk(KERN_ERR "%s: unexpected reset\n", card->name);
- for (i = 0; i < card->nr_controllers; i++) {
- avmctrl_info *cinfo = &card->ctrlinfo[i];
- memset(cinfo->version, 0, sizeof(cinfo->version));
- spin_lock_irqsave(&card->lock, flags);
- capilib_release(&cinfo->ncci_head);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_down(&cinfo->capi_ctrl);
- }
- card->nlogcontr = 0;
- return IRQ_HANDLED;
- }
-
- status &= (DBELL_UP_HOST | DBELL_DOWN_HOST);
- if (!status) {
- spin_unlock_irqrestore(&card->lock, flags);
- return IRQ_HANDLED;
- }
- c4outmeml(card->mbase + DOORBELL, status);
-
- if ((status & DBELL_UP_HOST) != 0) {
- card->dma->recvlen = c4inmeml(card->mbase + MBOX_UP_LEN);
- c4outmeml(card->mbase + MBOX_UP_LEN, 0);
- c4_handle_rx(card);
- card->dma->recvlen = 0;
- c4outmeml(card->mbase + MBOX_UP_LEN, card->dma->recvbuf.size);
- c4outmeml(card->mbase + DOORBELL, DBELL_UP_ARM);
- }
-
- if ((status & DBELL_DOWN_HOST) != 0) {
- card->csr &= ~DBELL_DOWN_ARM;
- c4_dispatch_tx(card);
- } else if (card->csr & DBELL_DOWN_HOST) {
- if (c4inmeml(card->mbase + MBOX_DOWN_LEN) == 0) {
- card->csr &= ~DBELL_DOWN_ARM;
- c4_dispatch_tx(card);
- }
- }
- spin_unlock_irqrestore(&card->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t c4_interrupt(int interrupt, void *devptr)
-{
- avmcard *card = devptr;
-
- return c4_handle_interrupt(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_send_init(avmcard *card)
-{
- struct sk_buff *skb;
- void *p;
- unsigned long flags;
-
- skb = alloc_skb(15, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost register appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_INIT);
- _put_word(&p, CAPI_MAXAPPL);
- _put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
- _put_word(&p, card->cardnr - 1);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-static int queue_sendconfigword(avmcard *card, u32 val)
-{
- struct sk_buff *skb;
- unsigned long flags;
- void *p;
-
- skb = alloc_skb(3 + 4, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, send config\n",
- card->name);
- return -ENOMEM;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_CONFIG);
- _put_word(&p, val);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
- return 0;
-}
-
-static int queue_sendconfig(avmcard *card, char cval[4])
-{
- struct sk_buff *skb;
- unsigned long flags;
- void *p;
-
- skb = alloc_skb(3 + 4, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, send config\n",
- card->name);
- return -ENOMEM;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_CONFIG);
- _put_byte(&p, cval[0]);
- _put_byte(&p, cval[1]);
- _put_byte(&p, cval[2]);
- _put_byte(&p, cval[3]);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
-
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
- return 0;
-}
-
-static int c4_send_config(avmcard *card, capiloaddatapart *config)
-{
- u8 val[4];
- unsigned char *dp;
- u_int left;
- int retval;
-
- if ((retval = queue_sendconfigword(card, 1)) != 0)
- return retval;
- if ((retval = queue_sendconfigword(card, config->len)) != 0)
- return retval;
-
- dp = config->data;
- left = config->len;
- while (left >= sizeof(u32)) {
- if (config->user) {
- if (copy_from_user(val, dp, sizeof(val)))
- return -EFAULT;
- } else {
- memcpy(val, dp, sizeof(val));
- }
- if ((retval = queue_sendconfig(card, val)) != 0)
- return retval;
- left -= sizeof(val);
- dp += sizeof(val);
- }
- if (left) {
- memset(val, 0, sizeof(val));
- if (config->user) {
- if (copy_from_user(&val, dp, left))
- return -EFAULT;
- } else {
- memcpy(&val, dp, left);
- }
- if ((retval = queue_sendconfig(card, val)) != 0)
- return retval;
- }
-
- return 0;
-}
-
-static int c4_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- int retval;
-
- if ((retval = c4_load_t4file(card, &data->firmware))) {
- printk(KERN_ERR "%s: failed to load t4file!!\n",
- card->name);
- c4_reset(card);
- return retval;
- }
-
- card->csr = 0;
- c4outmeml(card->mbase + MBOX_UP_LEN, 0);
- c4outmeml(card->mbase + MBOX_DOWN_LEN, 0);
- c4outmeml(card->mbase + DOORBELL, DBELL_INIT);
- mdelay(1);
- c4outmeml(card->mbase + DOORBELL,
- DBELL_UP_HOST | DBELL_DOWN_HOST | DBELL_RESET_HOST);
-
- c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x08);
-
- card->dma->recvlen = 0;
- c4outmeml(card->mbase + MBOX_UP_ADDR, card->dma->recvbuf.dmaaddr);
- c4outmeml(card->mbase + MBOX_UP_LEN, card->dma->recvbuf.size);
- c4outmeml(card->mbase + DOORBELL, DBELL_UP_ARM);
-
- if (data->configuration.len > 0 && data->configuration.data) {
- retval = c4_send_config(card, &data->configuration);
- if (retval) {
- printk(KERN_ERR "%s: failed to set config!!\n",
- card->name);
- c4_reset(card);
- return retval;
- }
- }
-
- c4_send_init(card);
-
- return 0;
-}
-
-
-static void c4_reset_ctr(struct capi_ctr *ctrl)
-{
- avmcard *card = ((avmctrl_info *)(ctrl->driverdata))->card;
- avmctrl_info *cinfo;
- u_int i;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
-
- c4_reset(card);
-
- spin_unlock_irqrestore(&card->lock, flags);
-
- for (i = 0; i < card->nr_controllers; i++) {
- cinfo = &card->ctrlinfo[i];
- memset(cinfo->version, 0, sizeof(cinfo->version));
- capi_ctr_down(&cinfo->capi_ctrl);
- }
- card->nlogcontr = 0;
-}
-
-static void c4_remove(struct pci_dev *pdev)
-{
- avmcard *card = pci_get_drvdata(pdev);
- avmctrl_info *cinfo;
- u_int i;
-
- if (!card)
- return;
-
- c4_reset(card);
-
- for (i = 0; i < card->nr_controllers; i++) {
- cinfo = &card->ctrlinfo[i];
- detach_capi_ctr(&cinfo->capi_ctrl);
- }
-
- free_irq(card->irq, card);
- iounmap(card->mbase);
- release_region(card->port, AVMB1_PORTLEN);
- avmcard_dma_free(card->dma);
- pci_set_drvdata(pdev, NULL);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-
-static void c4_register_appl(struct capi_ctr *ctrl,
- u16 appl,
- capi_register_params *rp)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- struct sk_buff *skb;
- int want = rp->level3cnt;
- unsigned long flags;
- int nconn;
- void *p;
-
- if (ctrl->cnr == card->cardnr) {
-
- if (want > 0) nconn = want;
- else nconn = ctrl->profile.nbchannel * 4 * -want;
- if (nconn == 0) nconn = ctrl->profile.nbchannel * 4;
-
- skb = alloc_skb(23, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost register appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_REGISTER);
- _put_word(&p, appl);
- _put_word(&p, 1024 * (nconn + 1));
- _put_word(&p, nconn);
- _put_word(&p, rp->datablkcnt);
- _put_word(&p, rp->datablklen);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
-
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
- }
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned long flags;
- struct sk_buff *skb;
- void *p;
-
- spin_lock_irqsave(&card->lock, flags);
- capilib_release_appl(&cinfo->ncci_head, appl);
- spin_unlock_irqrestore(&card->lock, flags);
-
- if (ctrl->cnr == card->cardnr) {
- skb = alloc_skb(7, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost release appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_RELEASE);
- _put_word(&p, appl);
-
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
- skb_queue_tail(&card->dma->send_queue, skb);
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
- }
-}
-
-/* ------------------------------------------------------------- */
-
-
-static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u16 retval = CAPI_NOERROR;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- }
- if (retval == CAPI_NOERROR) {
- skb_queue_tail(&card->dma->send_queue, skb);
- c4_dispatch_tx(card);
- }
- spin_unlock_irqrestore(&card->lock, flags);
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static char *c4_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->membase : 0
- );
- return cinfo->infobuf;
-}
-
-static int c4_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u8 flag;
- char *s;
-
- seq_printf(m, "%-16s %s\n", "name", card->name);
- seq_printf(m, "%-16s 0x%x\n", "io", card->port);
- seq_printf(m, "%-16s %d\n", "irq", card->irq);
- seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
- switch (card->cardtype) {
- case avm_b1isa: s = "B1 ISA"; break;
- case avm_b1pci: s = "B1 PCI"; break;
- case avm_b1pcmcia: s = "B1 PCMCIA"; break;
- case avm_m1: s = "M1"; break;
- case avm_m2: s = "M2"; break;
- case avm_t1isa: s = "T1 ISA (HEMA)"; break;
- case avm_t1pci: s = "T1 PCI"; break;
- case avm_c4: s = "C4"; break;
- case avm_c2: s = "C2"; break;
- default: s = "???"; break;
- }
- seq_printf(m, "%-16s %s\n", "type", s);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[3];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
- "protocol",
- (flag & 0x01) ? " DSS1" : "",
- (flag & 0x02) ? " CT1" : "",
- (flag & 0x04) ? " VN3" : "",
- (flag & 0x08) ? " NI1" : "",
- (flag & 0x10) ? " AUSTEL" : "",
- (flag & 0x20) ? " ESS" : "",
- (flag & 0x40) ? " 1TR6" : ""
- );
- }
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[5];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s\n",
- "linetype",
- (flag & 0x01) ? " point to point" : "",
- (flag & 0x02) ? " point to multipoint" : "",
- (flag & 0x08) ? " leased line without D-channel" : "",
- (flag & 0x04) ? " leased line with D-channel" : ""
- );
- }
- seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
- int nr_controllers)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int retval;
- int i;
-
- card = b1_alloc_card(nr_controllers);
- if (!card) {
- printk(KERN_WARNING "c4: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
- card->dma = avmcard_dma_alloc("c4", dev, 2048 + 128, 2048 + 128);
- if (!card->dma) {
- printk(KERN_WARNING "c4: no memory.\n");
- retval = -ENOMEM;
- goto err_free;
- }
-
- sprintf(card->name, "c%d-%x", nr_controllers, p->port);
- card->port = p->port;
- card->irq = p->irq;
- card->membase = p->membase;
- card->cardtype = (nr_controllers == 4) ? avm_c4 : avm_c2;
-
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "c4: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free_dma;
- }
-
- card->mbase = ioremap(card->membase, 128);
- if (card->mbase == NULL) {
- printk(KERN_NOTICE "c4: can't remap memory at 0x%lx\n",
- card->membase);
- retval = -EIO;
- goto err_release_region;
- }
-
- retval = c4_detect(card);
- if (retval != 0) {
- printk(KERN_NOTICE "c4: NO card at 0x%x error(%d)\n",
- card->port, retval);
- retval = -EIO;
- goto err_unmap;
- }
- c4_reset(card);
-
- retval = request_irq(card->irq, c4_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "c4: unable to get IRQ %d.\n", card->irq);
- retval = -EBUSY;
- goto err_unmap;
- }
-
- for (i = 0; i < nr_controllers; i++) {
- cinfo = &card->ctrlinfo[i];
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "c4";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = c4_register_appl;
- cinfo->capi_ctrl.release_appl = c4_release_appl;
- cinfo->capi_ctrl.send_message = c4_send_message;
- cinfo->capi_ctrl.load_firmware = c4_load_firmware;
- cinfo->capi_ctrl.reset_ctr = c4_reset_ctr;
- cinfo->capi_ctrl.procinfo = c4_procinfo;
- cinfo->capi_ctrl.proc_show = c4_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "c4: attach controller failed (%d).\n", i);
- for (i--; i >= 0; i--) {
- cinfo = &card->ctrlinfo[i];
- detach_capi_ctr(&cinfo->capi_ctrl);
- }
- goto err_free_irq;
- }
- if (i == 0)
- card->cardnr = cinfo->capi_ctrl.cnr;
- }
-
- printk(KERN_INFO "c4: AVM C%d at i/o %#x, irq %d, mem %#lx\n",
- nr_controllers, card->port, card->irq,
- card->membase);
- pci_set_drvdata(dev, card);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_unmap:
- iounmap(card->mbase);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
- avmcard_dma_free(card->dma);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_probe(struct pci_dev *dev, const struct pci_device_id *ent)
-{
- int nr = ent->driver_data;
- int retval = 0;
- struct capicardparams param;
-
- if (pci_enable_device(dev) < 0) {
- printk(KERN_ERR "c4: failed to enable AVM-C%d\n", nr);
- return -ENODEV;
- }
- pci_set_master(dev);
-
- param.port = pci_resource_start(dev, 1);
- param.irq = dev->irq;
- param.membase = pci_resource_start(dev, 0);
-
- printk(KERN_INFO "c4: PCI BIOS reports AVM-C%d at i/o %#x, irq %d, mem %#x\n",
- nr, param.port, param.irq, param.membase);
-
- retval = c4_add_card(&param, dev, nr);
- if (retval != 0) {
- printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
- nr, param.port, param.irq, param.membase);
- pci_disable_device(dev);
- return -ENODEV;
- }
- return 0;
-}
-
-static struct pci_driver c4_pci_driver = {
- .name = "c4",
- .id_table = c4_pci_tbl,
- .probe = c4_probe,
- .remove = c4_remove,
-};
-
-static struct capi_driver capi_driver_c2 = {
- .name = "c2",
- .revision = "1.0",
-};
-
-static struct capi_driver capi_driver_c4 = {
- .name = "c4",
- .revision = "1.0",
-};
-
-static int __init c4_init(void)
-{
- char *p;
- char rev[32];
- int err;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- err = pci_register_driver(&c4_pci_driver);
- if (!err) {
- strlcpy(capi_driver_c2.revision, rev, 32);
- register_capi_driver(&capi_driver_c2);
- strlcpy(capi_driver_c4.revision, rev, 32);
- register_capi_driver(&capi_driver_c4);
- printk(KERN_INFO "c4: revision %s\n", rev);
- }
- return err;
-}
-
-static void __exit c4_exit(void)
-{
- unregister_capi_driver(&capi_driver_c2);
- unregister_capi_driver(&capi_driver_c4);
- pci_unregister_driver(&c4_pci_driver);
-}
-
-module_init(c4_init);
-module_exit(c4_exit);
diff --git a/drivers/staging/isdn/avm/t1isa.c b/drivers/staging/isdn/avm/t1isa.c
deleted file mode 100644
index 2153619c5b31..000000000000
--- a/drivers/staging/isdn/avm/t1isa.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/* $Id: t1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Module for AVM T1 HEMA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/netdevice.h>
-#include <linux/kernelcapi.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 HEMA ISA card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static int hema_irq_table[16] =
-{0,
- 0,
- 0,
- 0x80, /* irq 3 */
- 0,
- 0x90, /* irq 5 */
- 0,
- 0xA0, /* irq 7 */
- 0,
- 0xB0, /* irq 9 */
- 0xC0, /* irq 10 */
- 0xD0, /* irq 11 */
- 0xE0, /* irq 12 */
- 0,
- 0,
- 0xF0, /* irq 15 */
-};
-
-static int t1_detectandinit(unsigned int base, unsigned irq, int cardnr)
-{
- unsigned char cregs[8];
- unsigned char reverse_cardnr;
- unsigned char dummy;
- int i;
-
- reverse_cardnr = ((cardnr & 0x01) << 3) | ((cardnr & 0x02) << 1)
- | ((cardnr & 0x04) >> 1) | ((cardnr & 0x08) >> 3);
- cregs[0] = (HEMA_VERSION_ID << 4) | (reverse_cardnr & 0xf);
- cregs[1] = 0x00; /* fast & slow link connected to CON1 */
- cregs[2] = 0x05; /* fast link 20MBit, slow link 20 MBit */
- cregs[3] = 0;
- cregs[4] = 0x11; /* zero wait state */
- cregs[5] = hema_irq_table[irq & 0xf];
- cregs[6] = 0;
- cregs[7] = 0;
-
- /*
- * no one else should use the ISA bus in this moment,
- * but no function there to prevent this :-(
- * save_flags(flags); cli();
- */
-
- /* board reset */
- t1outp(base, T1_RESETBOARD, 0xf);
- mdelay(100);
- dummy = t1inp(base, T1_FASTLINK + T1_OUTSTAT); /* first read */
-
- /* write config */
- dummy = (base >> 4) & 0xff;
- for (i = 1; i <= 0xf; i++) t1outp(base, i, dummy);
- t1outp(base, HEMA_PAL_ID & 0xf, dummy);
- t1outp(base, HEMA_PAL_ID >> 4, cregs[0]);
- for (i = 1; i < 7; i++) t1outp(base, 0, cregs[i]);
- t1outp(base, ((base >> 4)) & 0x3, cregs[7]);
- /* restore_flags(flags); */
-
- mdelay(100);
- t1outp(base, T1_FASTLINK + T1_RESETLINK, 0);
- t1outp(base, T1_SLOWLINK + T1_RESETLINK, 0);
- mdelay(10);
- t1outp(base, T1_FASTLINK + T1_RESETLINK, 1);
- t1outp(base, T1_SLOWLINK + T1_RESETLINK, 1);
- mdelay(100);
- t1outp(base, T1_FASTLINK + T1_RESETLINK, 0);
- t1outp(base, T1_SLOWLINK + T1_RESETLINK, 0);
- mdelay(10);
- t1outp(base, T1_FASTLINK + T1_ANALYSE, 0);
- mdelay(5);
- t1outp(base, T1_SLOWLINK + T1_ANALYSE, 0);
-
- if (t1inp(base, T1_FASTLINK + T1_OUTSTAT) != 0x1) /* tx empty */
- return 1;
- if (t1inp(base, T1_FASTLINK + T1_INSTAT) != 0x0) /* rx empty */
- return 2;
- if (t1inp(base, T1_FASTLINK + T1_IRQENABLE) != 0x0)
- return 3;
- if ((t1inp(base, T1_FASTLINK + T1_FIFOSTAT) & 0xf0) != 0x70)
- return 4;
- if ((t1inp(base, T1_FASTLINK + T1_IRQMASTER) & 0x0e) != 0)
- return 5;
- if ((t1inp(base, T1_FASTLINK + T1_IDENT) & 0x7d) != 1)
- return 6;
- if (t1inp(base, T1_SLOWLINK + T1_OUTSTAT) != 0x1) /* tx empty */
- return 7;
- if ((t1inp(base, T1_SLOWLINK + T1_IRQMASTER) & 0x0e) != 0)
- return 8;
- if ((t1inp(base, T1_SLOWLINK + T1_IDENT) & 0x7d) != 0)
- return 9;
- return 0;
-}
-
-static irqreturn_t t1isa_interrupt(int interrupt, void *devptr)
-{
- avmcard *card = devptr;
- avmctrl_info *cinfo = &card->ctrlinfo[0];
- struct capi_ctr *ctrl = &cinfo->capi_ctrl;
- unsigned char b1cmd;
- struct sk_buff *skb;
-
- unsigned ApplId;
- unsigned MsgLen;
- unsigned DataB3Len;
- unsigned NCCI;
- unsigned WindowSize;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
-
- while (b1_rx_full(card->port)) {
-
- b1cmd = b1_get_byte(card->port);
-
- switch (b1cmd) {
-
- case RECEIVE_DATA_B3_IND:
-
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = t1_get_slice(card->port, card->msgbuf);
- DataB3Len = t1_get_slice(card->port, card->databuf);
- spin_unlock_irqrestore(&card->lock, flags);
-
- if (MsgLen < 30) { /* not CAPI 64Bit */
- memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
- MsgLen = 30;
- CAPIMSG_SETLEN(card->msgbuf, 30);
- }
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- skb_put_data(skb, card->databuf, DataB3Len);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_MESSAGE:
-
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = t1_get_slice(card->port, card->msgbuf);
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3)
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_NEW_NCCI:
-
- ApplId = b1_get_word(card->port);
- NCCI = b1_get_word(card->port);
- WindowSize = b1_get_word(card->port);
- capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
- spin_unlock_irqrestore(&card->lock, flags);
- break;
-
- case RECEIVE_FREE_NCCI:
-
- ApplId = b1_get_word(card->port);
- NCCI = b1_get_word(card->port);
- if (NCCI != 0xffffffff)
- capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
- spin_unlock_irqrestore(&card->lock, flags);
- break;
-
- case RECEIVE_START:
- b1_put_byte(card->port, SEND_POLLACK);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_resume_output(ctrl);
- break;
-
- case RECEIVE_STOP:
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_suspend_output(ctrl);
- break;
-
- case RECEIVE_INIT:
-
- cinfo->versionlen = t1_get_slice(card->port, cinfo->versionbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- b1_parse_version(cinfo);
- printk(KERN_INFO "%s: %s-card (%s) now active\n",
- card->name,
- cinfo->version[VER_CARDTYPE],
- cinfo->version[VER_DRIVER]);
- capi_ctr_ready(ctrl);
- break;
-
- case RECEIVE_TASK_READY:
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = t1_get_slice(card->port, card->msgbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
- card->name, ApplId, card->msgbuf);
- break;
-
- case RECEIVE_DEBUGMSG:
- MsgLen = t1_get_slice(card->port, card->msgbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
- break;
-
-
- case 0xff:
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: card reseted ?\n", card->name);
- return IRQ_HANDLED;
- default:
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n",
- card->name, b1cmd);
- return IRQ_NONE;
- }
- }
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-
-static int t1isa_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- int retval;
-
- t1_disable_irq(port);
- b1_reset(port);
-
- if ((retval = b1_load_t4file(card, &data->firmware))) {
- b1_reset(port);
- printk(KERN_ERR "%s: failed to load t4file!!\n",
- card->name);
- return retval;
- }
-
- if (data->configuration.len > 0 && data->configuration.data) {
- if ((retval = b1_load_config(card, &data->configuration))) {
- b1_reset(port);
- printk(KERN_ERR "%s: failed to load config!!\n",
- card->name);
- return retval;
- }
- }
-
- if (!b1_loaded(card)) {
- printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
- return -EIO;
- }
-
- spin_lock_irqsave(&card->lock, flags);
- b1_setinterrupt(port, card->irq, card->cardtype);
- b1_put_byte(port, SEND_INIT);
- b1_put_word(port, CAPI_MAXAPPL);
- b1_put_word(port, AVM_NCCI_PER_CHANNEL * 30);
- b1_put_word(port, ctrl->cnr - 1);
- spin_unlock_irqrestore(&card->lock, flags);
-
- return 0;
-}
-
-static void t1isa_reset_ctr(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
-
- t1_disable_irq(port);
- b1_reset(port);
- b1_reset(port);
-
- memset(cinfo->version, 0, sizeof(cinfo->version));
- spin_lock_irqsave(&card->lock, flags);
- capilib_release(&cinfo->ncci_head);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_down(ctrl);
-}
-
-static void t1isa_remove(struct pci_dev *pdev)
-{
- avmctrl_info *cinfo = pci_get_drvdata(pdev);
- avmcard *card;
-
- if (!cinfo)
- return;
-
- card = cinfo->card;
-
- t1_disable_irq(card->port);
- b1_reset(card->port);
- b1_reset(card->port);
- t1_reset(card->port);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- release_region(card->port, AVMB1_PORTLEN);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-static char *t1isa_procinfo(struct capi_ctr *ctrl);
-
-static int t1isa_probe(struct pci_dev *pdev, int cardnr)
-{
- avmctrl_info *cinfo;
- avmcard *card;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "t1isa: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- cinfo = card->ctrlinfo;
- card->port = pci_resource_start(pdev, 0);
- card->irq = pdev->irq;
- card->cardtype = avm_t1isa;
- card->cardnr = cardnr;
- sprintf(card->name, "t1isa-%x", card->port);
-
- if (!(((card->port & 0x7) == 0) && ((card->port & 0x30) != 0x30))) {
- printk(KERN_WARNING "t1isa: invalid port 0x%x.\n", card->port);
- retval = -EINVAL;
- goto err_free;
- }
- if (hema_irq_table[card->irq & 0xf] == 0) {
- printk(KERN_WARNING "t1isa: irq %d not valid.\n", card->irq);
- retval = -EINVAL;
- goto err_free;
- }
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_INFO "t1isa: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free;
- }
- retval = request_irq(card->irq, t1isa_interrupt, 0, card->name, card);
- if (retval) {
- printk(KERN_INFO "t1isa: unable to get IRQ %d.\n", card->irq);
- retval = -EBUSY;
- goto err_release_region;
- }
-
- if ((retval = t1_detectandinit(card->port, card->irq, card->cardnr)) != 0) {
- printk(KERN_INFO "t1isa: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_free_irq;
- }
- t1_disable_irq(card->port);
- b1_reset(card->port);
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "t1isa";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1_register_appl;
- cinfo->capi_ctrl.release_appl = b1_release_appl;
- cinfo->capi_ctrl.send_message = t1isa_send_message;
- cinfo->capi_ctrl.load_firmware = t1isa_load_firmware;
- cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr;
- cinfo->capi_ctrl.procinfo = t1isa_procinfo;
- cinfo->capi_ctrl.proc_show = b1_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_INFO "t1isa: attach controller failed.\n");
- goto err_free_irq;
- }
-
- printk(KERN_INFO "t1isa: AVM T1 ISA at i/o %#x, irq %d, card %d\n",
- card->port, card->irq, card->cardnr);
-
- pci_set_drvdata(pdev, cinfo);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- u16 len = CAPIMSG_LEN(skb->data);
- u8 cmd = CAPIMSG_COMMAND(skb->data);
- u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
- u16 dlen, retval;
-
- spin_lock_irqsave(&card->lock, flags);
- if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- if (retval != CAPI_NOERROR) {
- spin_unlock_irqrestore(&card->lock, flags);
- return retval;
- }
- dlen = CAPIMSG_DATALEN(skb->data);
-
- b1_put_byte(port, SEND_DATA_B3_REQ);
- t1_put_slice(port, skb->data, len);
- t1_put_slice(port, skb->data + len, dlen);
- } else {
- b1_put_byte(port, SEND_MESSAGE);
- t1_put_slice(port, skb->data, len);
- }
- spin_unlock_irqrestore(&card->lock, flags);
- dev_kfree_skb_any(skb);
- return CAPI_NOERROR;
-}
-/* ------------------------------------------------------------- */
-
-static char *t1isa_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d %d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->cardnr : 0
- );
- return cinfo->infobuf;
-}
-
-
-/* ------------------------------------------------------------- */
-
-#define MAX_CARDS 4
-static struct pci_dev isa_dev[MAX_CARDS];
-static int io[MAX_CARDS];
-static int irq[MAX_CARDS];
-static int cardnr[MAX_CARDS];
-
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_array(cardnr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_PARM_DESC(cardnr, "Card number(s) (as jumpered)");
-
-static int t1isa_add_card(struct capi_driver *driver, capicardparams *data)
-{
- int i;
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (isa_dev[i].resource[0].start)
- continue;
-
- isa_dev[i].resource[0].start = data->port;
- isa_dev[i].irq = data->irq;
-
- if (t1isa_probe(&isa_dev[i], data->cardnr) == 0)
- return 0;
- }
- return -ENODEV;
-}
-
-static struct capi_driver capi_driver_t1isa = {
- .name = "t1isa",
- .revision = "1.0",
- .add_card = t1isa_add_card,
-};
-
-static int __init t1isa_init(void)
-{
- char rev[32];
- char *p;
- int i;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (!io[i])
- break;
-
- isa_dev[i].resource[0].start = io[i];
- isa_dev[i].irq = irq[i];
-
- if (t1isa_probe(&isa_dev[i], cardnr[i]) != 0)
- return -ENODEV;
- }
-
- strlcpy(capi_driver_t1isa.revision, rev, 32);
- register_capi_driver(&capi_driver_t1isa);
- printk(KERN_INFO "t1isa: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit t1isa_exit(void)
-{
- int i;
-
- unregister_capi_driver(&capi_driver_t1isa);
- for (i = 0; i < MAX_CARDS; i++) {
- if (!io[i])
- break;
-
- t1isa_remove(&isa_dev[i]);
- }
-}
-
-module_init(t1isa_init);
-module_exit(t1isa_exit);
diff --git a/drivers/staging/isdn/avm/t1pci.c b/drivers/staging/isdn/avm/t1pci.c
deleted file mode 100644
index f5ed1d5004c9..000000000000
--- a/drivers/staging/isdn/avm/t1pci.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/* $Id: t1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM T1 PCI-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-#undef CONFIG_T1PCI_DEBUG
-#undef CONFIG_T1PCI_POLLDEBUG
-
-/* ------------------------------------------------------------- */
-static char *revision = "$Revision: 1.1.2.2 $";
-/* ------------------------------------------------------------- */
-
-static struct pci_device_id t1pci_pci_tbl[] = {
- { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, PCI_ANY_ID, PCI_ANY_ID },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, t1pci_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 PCI card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static char *t1pci_procinfo(struct capi_ctr *ctrl);
-
-static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "t1pci: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- card->dma = avmcard_dma_alloc("t1pci", pdev, 2048 + 128, 2048 + 128);
- if (!card->dma) {
- printk(KERN_WARNING "t1pci: no memory.\n");
- retval = -ENOMEM;
- goto err_free;
- }
-
- cinfo = card->ctrlinfo;
- sprintf(card->name, "t1pci-%x", p->port);
- card->port = p->port;
- card->irq = p->irq;
- card->membase = p->membase;
- card->cardtype = avm_t1pci;
-
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "t1pci: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free_dma;
- }
-
- card->mbase = ioremap(card->membase, 64);
- if (!card->mbase) {
- printk(KERN_NOTICE "t1pci: can't remap memory at 0x%lx\n",
- card->membase);
- retval = -EIO;
- goto err_release_region;
- }
-
- b1dma_reset(card);
-
- retval = t1pci_detect(card);
- if (retval != 0) {
- if (retval < 6)
- printk(KERN_NOTICE "t1pci: NO card at 0x%x (%d)\n",
- card->port, retval);
- else
- printk(KERN_NOTICE "t1pci: card at 0x%x, but cable not connected or T1 has no power (%d)\n",
- card->port, retval);
- retval = -EIO;
- goto err_unmap;
- }
- b1dma_reset(card);
-
- retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "t1pci: unable to get IRQ %d.\n", card->irq);
- retval = -EBUSY;
- goto err_unmap;
- }
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "t1pci";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1dma_register_appl;
- cinfo->capi_ctrl.release_appl = b1dma_release_appl;
- cinfo->capi_ctrl.send_message = b1dma_send_message;
- cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
- cinfo->capi_ctrl.procinfo = t1pci_procinfo;
- cinfo->capi_ctrl.proc_show = b1dma_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "t1pci: attach controller failed.\n");
- retval = -EBUSY;
- goto err_free_irq;
- }
- card->cardnr = cinfo->capi_ctrl.cnr;
-
- printk(KERN_INFO "t1pci: AVM T1 PCI at i/o %#x, irq %d, mem %#lx\n",
- card->port, card->irq, card->membase);
-
- pci_set_drvdata(pdev, card);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_unmap:
- iounmap(card->mbase);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
- avmcard_dma_free(card->dma);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static void t1pci_remove(struct pci_dev *pdev)
-{
- avmcard *card = pci_get_drvdata(pdev);
- avmctrl_info *cinfo = card->ctrlinfo;
-
- b1dma_reset(card);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- iounmap(card->mbase);
- release_region(card->port, AVMB1_PORTLEN);
- avmcard_dma_free(card->dma);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static char *t1pci_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->membase : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
-{
- struct capicardparams param;
- int retval;
-
- if (pci_enable_device(dev) < 0) {
- printk(KERN_ERR "t1pci: failed to enable AVM-T1-PCI\n");
- return -ENODEV;
- }
- pci_set_master(dev);
-
- param.port = pci_resource_start(dev, 1);
- param.irq = dev->irq;
- param.membase = pci_resource_start(dev, 0);
-
- printk(KERN_INFO "t1pci: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
- param.port, param.irq, param.membase);
-
- retval = t1pci_add_card(&param, dev);
- if (retval != 0) {
- printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
- param.port, param.irq, param.membase);
- pci_disable_device(dev);
- return -ENODEV;
- }
- return 0;
-}
-
-static struct pci_driver t1pci_pci_driver = {
- .name = "t1pci",
- .id_table = t1pci_pci_tbl,
- .probe = t1pci_probe,
- .remove = t1pci_remove,
-};
-
-static struct capi_driver capi_driver_t1pci = {
- .name = "t1pci",
- .revision = "1.0",
-};
-
-static int __init t1pci_init(void)
-{
- char *p;
- char rev[32];
- int err;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- err = pci_register_driver(&t1pci_pci_driver);
- if (!err) {
- strlcpy(capi_driver_t1pci.revision, rev, 32);
- register_capi_driver(&capi_driver_t1pci);
- printk(KERN_INFO "t1pci: revision %s\n", rev);
- }
- return err;
-}
-
-static void __exit t1pci_exit(void)
-{
- unregister_capi_driver(&capi_driver_t1pci);
- pci_unregister_driver(&t1pci_pci_driver);
-}
-
-module_init(t1pci_init);
-module_exit(t1pci_exit);
diff --git a/drivers/staging/isdn/gigaset/Kconfig b/drivers/staging/isdn/gigaset/Kconfig
deleted file mode 100644
index c593105b3600..000000000000
--- a/drivers/staging/isdn/gigaset/Kconfig
+++ /dev/null
@@ -1,62 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menuconfig ISDN_DRV_GIGASET
- tristate "Siemens Gigaset support"
- depends on TTY
- select CRC_CCITT
- select BITREVERSE
- help
- This driver supports the Siemens Gigaset SX205/255 family of
- ISDN DECT bases, including the predecessors Gigaset 3070/3075
- and 4170/4175 and their T-Com versions Sinus 45isdn and Sinus
- 721X.
- If you have one of these devices, say M here and for at least
- one of the connection specific parts that follow.
- This will build a module called "gigaset".
- Note: If you build your ISDN subsystem (ISDN_CAPI or ISDN_I4L)
- as a module, you have to build this driver as a module too,
- otherwise the Gigaset device won't show up as an ISDN device.
-
-if ISDN_DRV_GIGASET
-
-config GIGASET_CAPI
- bool "Gigaset CAPI support"
- depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m')
- default 'y'
- help
- Build the Gigaset driver as a CAPI 2.0 driver interfacing with
- the Kernel CAPI subsystem. To use it with the old ISDN4Linux
- subsystem you'll have to enable the capidrv glue driver.
- (select ISDN_CAPI_CAPIDRV.)
- Say N to build the old native ISDN4Linux variant.
- If unsure, say Y.
-
-config GIGASET_BASE
- tristate "Gigaset base station support"
- depends on USB
- help
- Say M here if you want to use the USB interface of the Gigaset
- base for connection to your system.
- This will build a module called "bas_gigaset".
-
-config GIGASET_M105
- tristate "Gigaset M105 support"
- depends on USB
- help
- Say M here if you want to connect to the Gigaset base via DECT
- using a Gigaset M105 (Sinus 45 Data 2) USB DECT device.
- This will build a module called "usb_gigaset".
-
-config GIGASET_M101
- tristate "Gigaset M101 support"
- help
- Say M here if you want to connect to the Gigaset base via DECT
- using a Gigaset M101 (Sinus 45 Data 1) RS232 DECT device.
- This will build a module called "ser_gigaset".
-
-config GIGASET_DEBUG
- bool "Gigaset debugging"
- help
- This enables debugging code in the Gigaset drivers.
- If in doubt, say yes.
-
-endif # ISDN_DRV_GIGASET
diff --git a/drivers/staging/isdn/gigaset/Makefile b/drivers/staging/isdn/gigaset/Makefile
deleted file mode 100644
index 9c010891dcd7..000000000000
--- a/drivers/staging/isdn/gigaset/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
-
-ifdef CONFIG_GIGASET_CAPI
-gigaset-y += capi.o
-else
-gigaset-y += dummyll.o
-endif
-
-usb_gigaset-y := usb-gigaset.o
-ser_gigaset-y := ser-gigaset.o
-bas_gigaset-y := bas-gigaset.o isocdata.o
-
-obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset.o
-obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o
-obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o
-obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o
diff --git a/drivers/staging/isdn/gigaset/asyncdata.c b/drivers/staging/isdn/gigaset/asyncdata.c
deleted file mode 100644
index a34b3c9d8a71..000000000000
--- a/drivers/staging/isdn/gigaset/asyncdata.c
+++ /dev/null
@@ -1,606 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Common data handling layer for ser_gigaset and usb_gigaset
- *
- * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Stefan Eilers.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/crc-ccitt.h>
-#include <linux/bitrev.h>
-#include <linux/export.h>
-
-/* check if byte must be stuffed/escaped
- * I'm not sure which data should be encoded.
- * Therefore I will go the hard way and encode every value
- * less than 0x20, the flag sequence and the control escape char.
- */
-static inline int muststuff(unsigned char c)
-{
- if (c < PPP_TRANS) return 1;
- if (c == PPP_FLAG) return 1;
- if (c == PPP_ESCAPE) return 1;
- /* other possible candidates: */
- /* 0x91: XON with parity set */
- /* 0x93: XOFF with parity set */
- return 0;
-}
-
-/* == data input =========================================================== */
-
-/* process a block of received bytes in command mode
- * (mstate != MS_LOCKED && (inputstate & INS_command))
- * Append received bytes to the command response buffer and forward them
- * line by line to the response handler. Exit whenever a mode/state change
- * might have occurred.
- * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
- * removed before passing the line to the response handler.
- * Return value:
- * number of processed bytes
- */
-static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
- unsigned char *src = inbuf->data + inbuf->head;
- struct cardstate *cs = inbuf->cs;
- unsigned cbytes = cs->cbytes;
- unsigned procbytes = 0;
- unsigned char c;
-
- while (procbytes < numbytes) {
- c = *src++;
- procbytes++;
-
- switch (c) {
- case '\n':
- if (cbytes == 0 && cs->respdata[0] == '\r') {
- /* collapse LF with preceding CR */
- cs->respdata[0] = 0;
- break;
- }
- /* fall through */
- case '\r':
- /* end of message line, pass to response handler */
- if (cbytes >= MAX_RESP_SIZE) {
- dev_warn(cs->dev, "response too large (%d)\n",
- cbytes);
- cbytes = MAX_RESP_SIZE;
- }
- cs->cbytes = cbytes;
- gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
- cbytes, cs->respdata);
- gigaset_handle_modem_response(cs);
- cbytes = 0;
-
- /* store EOL byte for CRLF collapsing */
- cs->respdata[0] = c;
-
- /* cs->dle may have changed */
- if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
- inbuf->inputstate &= ~INS_command;
-
- /* return for reevaluating state */
- goto exit;
-
- case DLE_FLAG:
- if (inbuf->inputstate & INS_DLE_char) {
- /* quoted DLE: clear quote flag */
- inbuf->inputstate &= ~INS_DLE_char;
- } else if (cs->dle ||
- (inbuf->inputstate & INS_DLE_command)) {
- /* DLE escape, pass up for handling */
- inbuf->inputstate |= INS_DLE_char;
- goto exit;
- }
- /* quoted or not in DLE mode: treat as regular data */
- /* fall through */
- default:
- /* append to line buffer if possible */
- if (cbytes < MAX_RESP_SIZE)
- cs->respdata[cbytes] = c;
- cbytes++;
- }
- }
-exit:
- cs->cbytes = cbytes;
- return procbytes;
-}
-
-/* process a block of received bytes in lock mode
- * All received bytes are passed unmodified to the tty i/f.
- * Return value:
- * number of processed bytes
- */
-static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
- unsigned char *src = inbuf->data + inbuf->head;
-
- gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
- gigaset_if_receive(inbuf->cs, src, numbytes);
- return numbytes;
-}
-
-/* process a block of received bytes in HDLC data mode
- * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
- * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
- * When a frame is complete, check the FCS and pass valid frames to the LL.
- * If DLE is encountered, return immediately to let the caller handle it.
- * Return value:
- * number of processed bytes
- */
-static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- struct bc_state *bcs = cs->bcs;
- int inputstate = bcs->inputstate;
- __u16 fcs = bcs->rx_fcs;
- struct sk_buff *skb = bcs->rx_skb;
- unsigned char *src = inbuf->data + inbuf->head;
- unsigned procbytes = 0;
- unsigned char c;
-
- if (inputstate & INS_byte_stuff) {
- if (!numbytes)
- return 0;
- inputstate &= ~INS_byte_stuff;
- goto byte_stuff;
- }
-
- while (procbytes < numbytes) {
- c = *src++;
- procbytes++;
- if (c == DLE_FLAG) {
- if (inputstate & INS_DLE_char) {
- /* quoted DLE: clear quote flag */
- inputstate &= ~INS_DLE_char;
- } else if (cs->dle || (inputstate & INS_DLE_command)) {
- /* DLE escape, pass up for handling */
- inputstate |= INS_DLE_char;
- break;
- }
- }
-
- if (c == PPP_ESCAPE) {
- /* byte stuffing indicator: pull in next byte */
- if (procbytes >= numbytes) {
- /* end of buffer, save for later processing */
- inputstate |= INS_byte_stuff;
- break;
- }
-byte_stuff:
- c = *src++;
- procbytes++;
- if (c == DLE_FLAG) {
- if (inputstate & INS_DLE_char) {
- /* quoted DLE: clear quote flag */
- inputstate &= ~INS_DLE_char;
- } else if (cs->dle ||
- (inputstate & INS_DLE_command)) {
- /* DLE escape, pass up for handling */
- inputstate |=
- INS_DLE_char | INS_byte_stuff;
- break;
- }
- }
- c ^= PPP_TRANS;
-#ifdef CONFIG_GIGASET_DEBUG
- if (!muststuff(c))
- gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
-#endif
- } else if (c == PPP_FLAG) {
- /* end of frame: process content if any */
- if (inputstate & INS_have_data) {
- gig_dbg(DEBUG_HDLC,
- "7e----------------------------");
-
- /* check and pass received frame */
- if (!skb) {
- /* skipped frame */
- gigaset_isdn_rcv_err(bcs);
- } else if (skb->len < 2) {
- /* frame too short for FCS */
- dev_warn(cs->dev,
- "short frame (%d)\n",
- skb->len);
- gigaset_isdn_rcv_err(bcs);
- dev_kfree_skb_any(skb);
- } else if (fcs != PPP_GOODFCS) {
- /* frame check error */
- dev_err(cs->dev,
- "Checksum failed, %u bytes corrupted!\n",
- skb->len);
- gigaset_isdn_rcv_err(bcs);
- dev_kfree_skb_any(skb);
- } else {
- /* good frame */
- __skb_trim(skb, skb->len - 2);
- gigaset_skb_rcvd(bcs, skb);
- }
-
- /* prepare reception of next frame */
- inputstate &= ~INS_have_data;
- skb = gigaset_new_rx_skb(bcs);
- } else {
- /* empty frame (7E 7E) */
-#ifdef CONFIG_GIGASET_DEBUG
- ++bcs->emptycount;
-#endif
- if (!skb) {
- /* skipped (?) */
- gigaset_isdn_rcv_err(bcs);
- skb = gigaset_new_rx_skb(bcs);
- }
- }
-
- fcs = PPP_INITFCS;
- continue;
-#ifdef CONFIG_GIGASET_DEBUG
- } else if (muststuff(c)) {
- /* Should not happen. Possible after ZDLE=1<CR><LF>. */
- gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
-#endif
- }
-
- /* regular data byte, append to skb */
-#ifdef CONFIG_GIGASET_DEBUG
- if (!(inputstate & INS_have_data)) {
- gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
- bcs->emptycount);
- bcs->emptycount = 0;
- }
-#endif
- inputstate |= INS_have_data;
- if (skb) {
- if (skb->len >= bcs->rx_bufsize) {
- dev_warn(cs->dev, "received packet too long\n");
- dev_kfree_skb_any(skb);
- /* skip remainder of packet */
- bcs->rx_skb = skb = NULL;
- } else {
- __skb_put_u8(skb, c);
- fcs = crc_ccitt_byte(fcs, c);
- }
- }
- }
-
- bcs->inputstate = inputstate;
- bcs->rx_fcs = fcs;
- return procbytes;
-}
-
-/* process a block of received bytes in transparent data mode
- * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
- * Invert bytes, undoing byte stuffing and watching for DLE escapes.
- * If DLE is encountered, return immediately to let the caller handle it.
- * Return value:
- * number of processed bytes
- */
-static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- struct bc_state *bcs = cs->bcs;
- int inputstate = bcs->inputstate;
- struct sk_buff *skb = bcs->rx_skb;
- unsigned char *src = inbuf->data + inbuf->head;
- unsigned procbytes = 0;
- unsigned char c;
-
- if (!skb) {
- /* skip this block */
- gigaset_new_rx_skb(bcs);
- return numbytes;
- }
-
- while (procbytes < numbytes && skb->len < bcs->rx_bufsize) {
- c = *src++;
- procbytes++;
-
- if (c == DLE_FLAG) {
- if (inputstate & INS_DLE_char) {
- /* quoted DLE: clear quote flag */
- inputstate &= ~INS_DLE_char;
- } else if (cs->dle || (inputstate & INS_DLE_command)) {
- /* DLE escape, pass up for handling */
- inputstate |= INS_DLE_char;
- break;
- }
- }
-
- /* regular data byte: append to current skb */
- inputstate |= INS_have_data;
- __skb_put_u8(skb, bitrev8(c));
- }
-
- /* pass data up */
- if (inputstate & INS_have_data) {
- gigaset_skb_rcvd(bcs, skb);
- inputstate &= ~INS_have_data;
- gigaset_new_rx_skb(bcs);
- }
-
- bcs->inputstate = inputstate;
- return procbytes;
-}
-
-/* process DLE escapes
- * Called whenever a DLE sequence might be encountered in the input stream.
- * Either processes the entire DLE sequence or, if that isn't possible,
- * notes the fact that an initial DLE has been received in the INS_DLE_char
- * inputstate flag and resumes processing of the sequence on the next call.
- */
-static void handle_dle(struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
-
- if (cs->mstate == MS_LOCKED)
- return; /* no DLE processing in lock mode */
-
- if (!(inbuf->inputstate & INS_DLE_char)) {
- /* no DLE pending */
- if (inbuf->data[inbuf->head] == DLE_FLAG &&
- (cs->dle || inbuf->inputstate & INS_DLE_command)) {
- /* start of DLE sequence */
- inbuf->head++;
- if (inbuf->head == inbuf->tail ||
- inbuf->head == RBUFSIZE) {
- /* end of buffer, save for later processing */
- inbuf->inputstate |= INS_DLE_char;
- return;
- }
- } else {
- /* regular data byte */
- return;
- }
- }
-
- /* consume pending DLE */
- inbuf->inputstate &= ~INS_DLE_char;
-
- switch (inbuf->data[inbuf->head]) {
- case 'X': /* begin of event message */
- if (inbuf->inputstate & INS_command)
- dev_notice(cs->dev,
- "received <DLE>X in command mode\n");
- inbuf->inputstate |= INS_command | INS_DLE_command;
- inbuf->head++; /* byte consumed */
- break;
- case '.': /* end of event message */
- if (!(inbuf->inputstate & INS_DLE_command))
- dev_notice(cs->dev,
- "received <DLE>. without <DLE>X\n");
- inbuf->inputstate &= ~INS_DLE_command;
- /* return to data mode if in DLE mode */
- if (cs->dle)
- inbuf->inputstate &= ~INS_command;
- inbuf->head++; /* byte consumed */
- break;
- case DLE_FLAG: /* DLE in data stream */
- /* mark as quoted */
- inbuf->inputstate |= INS_DLE_char;
- if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
- dev_notice(cs->dev,
- "received <DLE><DLE> not in DLE mode\n");
- break; /* quoted byte left in buffer */
- default:
- dev_notice(cs->dev, "received <DLE><%02x>\n",
- inbuf->data[inbuf->head]);
- /* quoted byte left in buffer */
- }
-}
-
-/**
- * gigaset_m10x_input() - process a block of data received from the device
- * @inbuf: received data and device descriptor structure.
- *
- * Called by hardware module {ser,usb}_gigaset with a block of received
- * bytes. Separates the bytes received over the serial data channel into
- * user data and command replies (locked/unlocked) according to the
- * current state of the interface.
- */
-void gigaset_m10x_input(struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- unsigned numbytes, procbytes;
-
- gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
-
- while (inbuf->head != inbuf->tail) {
- /* check for DLE escape */
- handle_dle(inbuf);
-
- /* process a contiguous block of bytes */
- numbytes = (inbuf->head > inbuf->tail ?
- RBUFSIZE : inbuf->tail) - inbuf->head;
- gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
- /*
- * numbytes may be 0 if handle_dle() ate the last byte.
- * This does no harm, *_loop() will just return 0 immediately.
- */
-
- if (cs->mstate == MS_LOCKED)
- procbytes = lock_loop(numbytes, inbuf);
- else if (inbuf->inputstate & INS_command)
- procbytes = cmd_loop(numbytes, inbuf);
- else if (cs->bcs->proto2 == L2_HDLC)
- procbytes = hdlc_loop(numbytes, inbuf);
- else
- procbytes = iraw_loop(numbytes, inbuf);
- inbuf->head += procbytes;
-
- /* check for buffer wraparound */
- if (inbuf->head >= RBUFSIZE)
- inbuf->head = 0;
-
- gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
- }
-}
-EXPORT_SYMBOL_GPL(gigaset_m10x_input);
-
-
-/* == data output ========================================================== */
-
-/*
- * Encode a data packet into an octet stuffed HDLC frame with FCS,
- * opening and closing flags, preserving headroom data.
- * parameters:
- * skb skb containing original packet (freed upon return)
- * Return value:
- * pointer to newly allocated skb containing the result frame
- * and the original link layer header, NULL on error
- */
-static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
-{
- struct sk_buff *hdlc_skb;
- __u16 fcs;
- unsigned char c;
- unsigned char *cp;
- int len;
- unsigned int stuf_cnt;
-
- stuf_cnt = 0;
- fcs = PPP_INITFCS;
- cp = skb->data;
- len = skb->len;
- while (len--) {
- if (muststuff(*cp))
- stuf_cnt++;
- fcs = crc_ccitt_byte(fcs, *cp++);
- }
- fcs ^= 0xffff; /* complement */
-
- /* size of new buffer: original size + number of stuffing bytes
- * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
- * + room for link layer header
- */
- hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
- if (!hdlc_skb) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- /* Copy link layer header into new skb */
- skb_reset_mac_header(hdlc_skb);
- skb_reserve(hdlc_skb, skb->mac_len);
- memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
- hdlc_skb->mac_len = skb->mac_len;
-
- /* Add flag sequence in front of everything.. */
- skb_put_u8(hdlc_skb, PPP_FLAG);
-
- /* Perform byte stuffing while copying data. */
- while (skb->len--) {
- if (muststuff(*skb->data)) {
- skb_put_u8(hdlc_skb, PPP_ESCAPE);
- skb_put_u8(hdlc_skb, (*skb->data++) ^ PPP_TRANS);
- } else
- skb_put_u8(hdlc_skb, *skb->data++);
- }
-
- /* Finally add FCS (byte stuffed) and flag sequence */
- c = (fcs & 0x00ff); /* least significant byte first */
- if (muststuff(c)) {
- skb_put_u8(hdlc_skb, PPP_ESCAPE);
- c ^= PPP_TRANS;
- }
- skb_put_u8(hdlc_skb, c);
-
- c = ((fcs >> 8) & 0x00ff);
- if (muststuff(c)) {
- skb_put_u8(hdlc_skb, PPP_ESCAPE);
- c ^= PPP_TRANS;
- }
- skb_put_u8(hdlc_skb, c);
-
- skb_put_u8(hdlc_skb, PPP_FLAG);
-
- dev_kfree_skb_any(skb);
- return hdlc_skb;
-}
-
-/*
- * Encode a data packet into an octet stuffed raw bit inverted frame,
- * preserving headroom data.
- * parameters:
- * skb skb containing original packet (freed upon return)
- * Return value:
- * pointer to newly allocated skb containing the result frame
- * and the original link layer header, NULL on error
- */
-static struct sk_buff *iraw_encode(struct sk_buff *skb)
-{
- struct sk_buff *iraw_skb;
- unsigned char c;
- unsigned char *cp;
- int len;
-
- /* size of new buffer (worst case = every byte must be stuffed):
- * 2 * original size + room for link layer header
- */
- iraw_skb = dev_alloc_skb(2 * skb->len + skb->mac_len);
- if (!iraw_skb) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- /* copy link layer header into new skb */
- skb_reset_mac_header(iraw_skb);
- skb_reserve(iraw_skb, skb->mac_len);
- memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
- iraw_skb->mac_len = skb->mac_len;
-
- /* copy and stuff data */
- cp = skb->data;
- len = skb->len;
- while (len--) {
- c = bitrev8(*cp++);
- if (c == DLE_FLAG)
- skb_put_u8(iraw_skb, c);
- skb_put_u8(iraw_skb, c);
- }
- dev_kfree_skb_any(skb);
- return iraw_skb;
-}
-
-/**
- * gigaset_m10x_send_skb() - queue an skb for sending
- * @bcs: B channel descriptor structure.
- * @skb: data to send.
- *
- * Called by LL to encode and queue an skb for sending, and start
- * transmission if necessary.
- * Once the payload data has been transmitted completely, gigaset_skb_sent()
- * will be called with the skb's link layer header preserved.
- *
- * Return value:
- * number of bytes accepted for sending (skb->len) if ok,
- * error code < 0 (eg. -ENOMEM) on error
- */
-int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
-{
- struct cardstate *cs = bcs->cs;
- unsigned len = skb->len;
- unsigned long flags;
-
- if (bcs->proto2 == L2_HDLC)
- skb = HDLC_Encode(skb);
- else
- skb = iraw_encode(skb);
- if (!skb) {
- dev_err(cs->dev,
- "unable to allocate memory for encoding!\n");
- return -ENOMEM;
- }
-
- skb_queue_tail(&bcs->squeue, skb);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->connected)
- tasklet_schedule(&cs->write_tasklet);
- spin_unlock_irqrestore(&cs->lock, flags);
-
- return len; /* ok so far */
-}
-EXPORT_SYMBOL_GPL(gigaset_m10x_send_skb);
diff --git a/drivers/staging/isdn/gigaset/bas-gigaset.c b/drivers/staging/isdn/gigaset/bas-gigaset.c
deleted file mode 100644
index c334525a5f63..000000000000
--- a/drivers/staging/isdn/gigaset/bas-gigaset.c
+++ /dev/null
@@ -1,2672 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * USB driver for Gigaset 307x base via direct USB connection.
- *
- * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>,
- * Stefan Eilers.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers"
-#define DRIVER_DESC "USB Driver for Gigaset 307x"
-
-
-/* Module parameters */
-
-static int startmode = SM_ISDN;
-static int cidmode = 1;
-
-module_param(startmode, int, S_IRUGO);
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
-MODULE_PARM_DESC(cidmode, "Call-ID mode");
-
-#define GIGASET_MINORS 1
-#define GIGASET_MINOR 16
-#define GIGASET_MODULENAME "bas_gigaset"
-#define GIGASET_DEVNAME "ttyGB"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-/* interrupt pipe message size according to ibid. ch. 2.2 */
-#define IP_MSGSIZE 3
-
-/* Values for the Gigaset 307x */
-#define USB_GIGA_VENDOR_ID 0x0681
-#define USB_3070_PRODUCT_ID 0x0001
-#define USB_3075_PRODUCT_ID 0x0002
-#define USB_SX303_PRODUCT_ID 0x0021
-#define USB_SX353_PRODUCT_ID 0x0022
-
-/* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table[] = {
- { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
- { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
- { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
- { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, gigaset_table);
-
-/*======================= local function prototypes ==========================*/
-
-/* function called if a new device belonging to this driver is connected */
-static int gigaset_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-
-/* Function will be called if the device is unplugged */
-static void gigaset_disconnect(struct usb_interface *interface);
-
-/* functions called before/after suspend */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
-static int gigaset_resume(struct usb_interface *intf);
-
-/* functions called before/after device reset */
-static int gigaset_pre_reset(struct usb_interface *intf);
-static int gigaset_post_reset(struct usb_interface *intf);
-
-static int atread_submit(struct cardstate *, int);
-static void stopurbs(struct bas_bc_state *);
-static int req_submit(struct bc_state *, int, int, int);
-static int atwrite_submit(struct cardstate *, unsigned char *, int);
-static int start_cbsend(struct cardstate *);
-
-/*============================================================================*/
-
-struct bas_cardstate {
- struct usb_device *udev; /* USB device pointer */
- struct cardstate *cs;
- struct usb_interface *interface; /* interface for this device */
- unsigned char minor; /* starting minor number */
-
- struct urb *urb_ctrl; /* control pipe default URB */
- struct usb_ctrlrequest dr_ctrl;
- struct timer_list timer_ctrl; /* control request timeout */
- int retry_ctrl;
-
- struct timer_list timer_atrdy; /* AT command ready timeout */
- struct urb *urb_cmd_out; /* for sending AT commands */
- struct usb_ctrlrequest dr_cmd_out;
- int retry_cmd_out;
-
- struct urb *urb_cmd_in; /* for receiving AT replies */
- struct usb_ctrlrequest dr_cmd_in;
- struct timer_list timer_cmd_in; /* receive request timeout */
- unsigned char *rcvbuf; /* AT reply receive buffer */
-
- struct urb *urb_int_in; /* URB for interrupt pipe */
- unsigned char *int_in_buf;
- struct work_struct int_in_wq; /* for usb_clear_halt() */
- struct timer_list timer_int_in; /* int read retry delay */
- int retry_int_in;
-
- spinlock_t lock; /* locks all following */
- int basstate; /* bitmap (BS_*) */
- int pending; /* uncompleted base request */
- wait_queue_head_t waitqueue;
- int rcvbuf_size; /* size of AT receive buffer */
- /* 0: no receive in progress */
- int retry_cmd_in; /* receive req retry count */
-};
-
-/* status of direct USB connection to 307x base (bits in basstate) */
-#define BS_ATOPEN 0x001 /* AT channel open */
-#define BS_B1OPEN 0x002 /* B channel 1 open */
-#define BS_B2OPEN 0x004 /* B channel 2 open */
-#define BS_ATREADY 0x008 /* base ready for AT command */
-#define BS_INIT 0x010 /* base has signalled INIT_OK */
-#define BS_ATTIMER 0x020 /* waiting for HD_READY_SEND_ATDATA */
-#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */
-#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */
-#define BS_SUSPEND 0x100 /* USB port suspended */
-#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
-
-
-static struct gigaset_driver *driver;
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver gigaset_usb_driver = {
- .name = GIGASET_MODULENAME,
- .probe = gigaset_probe,
- .disconnect = gigaset_disconnect,
- .id_table = gigaset_table,
- .suspend = gigaset_suspend,
- .resume = gigaset_resume,
- .reset_resume = gigaset_post_reset,
- .pre_reset = gigaset_pre_reset,
- .post_reset = gigaset_post_reset,
- .disable_hub_initiated_lpm = 1,
-};
-
-/* get message text for usb_submit_urb return code
- */
-static char *get_usb_rcmsg(int rc)
-{
- static char unkmsg[28];
-
- switch (rc) {
- case 0:
- return "success";
- case -ENOMEM:
- return "out of memory";
- case -ENODEV:
- return "device not present";
- case -ENOENT:
- return "endpoint not present";
- case -ENXIO:
- return "URB type not supported";
- case -EINVAL:
- return "invalid argument";
- case -EAGAIN:
- return "start frame too early or too much scheduled";
- case -EFBIG:
- return "too many isoc frames requested";
- case -EPIPE:
- return "endpoint stalled";
- case -EMSGSIZE:
- return "invalid packet size";
- case -ENOSPC:
- return "would overcommit USB bandwidth";
- case -ESHUTDOWN:
- return "device shut down";
- case -EPERM:
- return "reject flag set";
- case -EHOSTUNREACH:
- return "device suspended";
- default:
- snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", rc);
- return unkmsg;
- }
-}
-
-/* get message text for USB status code
- */
-static char *get_usb_statmsg(int status)
-{
- static char unkmsg[28];
-
- switch (status) {
- case 0:
- return "success";
- case -ENOENT:
- return "unlinked (sync)";
- case -EINPROGRESS:
- return "URB still pending";
- case -EPROTO:
- return "bitstuff error, timeout, or unknown USB error";
- case -EILSEQ:
- return "CRC mismatch, timeout, or unknown USB error";
- case -ETIME:
- return "USB response timeout";
- case -EPIPE:
- return "endpoint stalled";
- case -ECOMM:
- return "IN buffer overrun";
- case -ENOSR:
- return "OUT buffer underrun";
- case -EOVERFLOW:
- return "endpoint babble";
- case -EREMOTEIO:
- return "short packet";
- case -ENODEV:
- return "device removed";
- case -EXDEV:
- return "partial isoc transfer";
- case -EINVAL:
- return "ISO madness";
- case -ECONNRESET:
- return "unlinked (async)";
- case -ESHUTDOWN:
- return "device shut down";
- default:
- snprintf(unkmsg, sizeof(unkmsg), "unknown status %d", status);
- return unkmsg;
- }
-}
-
-/* usb_pipetype_str
- * retrieve string representation of USB pipe type
- */
-static inline char *usb_pipetype_str(int pipe)
-{
- if (usb_pipeisoc(pipe))
- return "Isoc";
- if (usb_pipeint(pipe))
- return "Int";
- if (usb_pipecontrol(pipe))
- return "Ctrl";
- if (usb_pipebulk(pipe))
- return "Bulk";
- return "?";
-}
-
-/* dump_urb
- * write content of URB to syslog for debugging
- */
-static inline void dump_urb(enum debuglevel level, const char *tag,
- struct urb *urb)
-{
-#ifdef CONFIG_GIGASET_DEBUG
- int i;
- gig_dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb);
- if (urb) {
- gig_dbg(level,
- " dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
- "hcpriv=0x%08lx, transfer_flags=0x%x,",
- (unsigned long) urb->dev,
- usb_pipetype_str(urb->pipe),
- usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- (unsigned long) urb->hcpriv,
- urb->transfer_flags);
- gig_dbg(level,
- " transfer_buffer=0x%08lx[%d], actual_length=%d, "
- "setup_packet=0x%08lx,",
- (unsigned long) urb->transfer_buffer,
- urb->transfer_buffer_length, urb->actual_length,
- (unsigned long) urb->setup_packet);
- gig_dbg(level,
- " start_frame=%d, number_of_packets=%d, interval=%d, "
- "error_count=%d,",
- urb->start_frame, urb->number_of_packets, urb->interval,
- urb->error_count);
- gig_dbg(level,
- " context=0x%08lx, complete=0x%08lx, "
- "iso_frame_desc[]={",
- (unsigned long) urb->context,
- (unsigned long) urb->complete);
- for (i = 0; i < urb->number_of_packets; i++) {
- struct usb_iso_packet_descriptor *pifd
- = &urb->iso_frame_desc[i];
- gig_dbg(level,
- " {offset=%u, length=%u, actual_length=%u, "
- "status=%u}",
- pifd->offset, pifd->length, pifd->actual_length,
- pifd->status);
- }
- }
- gig_dbg(level, "}}");
-#endif
-}
-
-/* read/set modem control bits etc. (m10x only) */
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
- unsigned new_state)
-{
- return -EINVAL;
-}
-
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
- return -EINVAL;
-}
-
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
- return -EINVAL;
-}
-
-/* set/clear bits in base connection state, return previous state
- */
-static inline int update_basstate(struct bas_cardstate *ucs,
- int set, int clear)
-{
- unsigned long flags;
- int state;
-
- spin_lock_irqsave(&ucs->lock, flags);
- state = ucs->basstate;
- ucs->basstate = (state & ~clear) | set;
- spin_unlock_irqrestore(&ucs->lock, flags);
- return state;
-}
-
-/* error_hangup
- * hang up any existing connection because of an unrecoverable error
- * This function may be called from any context and takes care of scheduling
- * the necessary actions for execution outside of interrupt context.
- * cs->lock must not be held.
- * argument:
- * B channel control structure
- */
-static inline void error_hangup(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
-
- gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL);
- gigaset_schedule_event(cs);
-}
-
-/* error_reset
- * reset Gigaset device because of an unrecoverable error
- * This function may be called from any context, and takes care of
- * scheduling the necessary actions for execution outside of interrupt context.
- * cs->hw.bas->lock must not be held.
- * argument:
- * controller state structure
- */
-static inline void error_reset(struct cardstate *cs)
-{
- /* reset interrupt pipe to recover (ignore errors) */
- update_basstate(cs->hw.bas, BS_RESETTING, 0);
- if (req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT))
- /* submission failed, escalate to USB port reset */
- usb_queue_reset_device(cs->hw.bas->interface);
-}
-
-/* check_pending
- * check for completion of pending control request
- * parameter:
- * ucs hardware specific controller state structure
- */
-static void check_pending(struct bas_cardstate *ucs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ucs->lock, flags);
- switch (ucs->pending) {
- case 0:
- break;
- case HD_OPEN_ATCHANNEL:
- if (ucs->basstate & BS_ATOPEN)
- ucs->pending = 0;
- break;
- case HD_OPEN_B1CHANNEL:
- if (ucs->basstate & BS_B1OPEN)
- ucs->pending = 0;
- break;
- case HD_OPEN_B2CHANNEL:
- if (ucs->basstate & BS_B2OPEN)
- ucs->pending = 0;
- break;
- case HD_CLOSE_ATCHANNEL:
- if (!(ucs->basstate & BS_ATOPEN))
- ucs->pending = 0;
- break;
- case HD_CLOSE_B1CHANNEL:
- if (!(ucs->basstate & BS_B1OPEN))
- ucs->pending = 0;
- break;
- case HD_CLOSE_B2CHANNEL:
- if (!(ucs->basstate & BS_B2OPEN))
- ucs->pending = 0;
- break;
- case HD_DEVICE_INIT_ACK: /* no reply expected */
- ucs->pending = 0;
- break;
- case HD_RESET_INTERRUPT_PIPE:
- if (!(ucs->basstate & BS_RESETTING))
- ucs->pending = 0;
- break;
- /*
- * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
- * and should never end up here
- */
- default:
- dev_warn(&ucs->interface->dev,
- "unknown pending request 0x%02x cleared\n",
- ucs->pending);
- ucs->pending = 0;
- }
-
- if (!ucs->pending)
- del_timer(&ucs->timer_ctrl);
-
- spin_unlock_irqrestore(&ucs->lock, flags);
-}
-
-/* cmd_in_timeout
- * timeout routine for command input request
- * argument:
- * controller state structure
- */
-static void cmd_in_timeout(struct timer_list *t)
-{
- struct bas_cardstate *ucs = from_timer(ucs, t, timer_cmd_in);
- struct cardstate *cs = ucs->cs;
- int rc;
-
- if (!ucs->rcvbuf_size) {
- gig_dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
- return;
- }
-
- if (ucs->retry_cmd_in++ >= BAS_RETRY) {
- dev_err(cs->dev,
- "control read: timeout, giving up after %d tries\n",
- ucs->retry_cmd_in);
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- error_reset(cs);
- return;
- }
-
- gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d",
- __func__, ucs->retry_cmd_in);
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc < 0) {
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- if (rc != -ENODEV)
- error_reset(cs);
- }
-}
-
-/* read_ctrl_callback
- * USB completion handler for control pipe input
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block
- * urb->context = inbuf structure for controller state
- */
-static void read_ctrl_callback(struct urb *urb)
-{
- struct inbuf_t *inbuf = urb->context;
- struct cardstate *cs = inbuf->cs;
- struct bas_cardstate *ucs = cs->hw.bas;
- int status = urb->status;
- unsigned numbytes;
- int rc;
-
- update_basstate(ucs, 0, BS_ATRDPEND);
- wake_up(&ucs->waitqueue);
- del_timer(&ucs->timer_cmd_in);
-
- switch (status) {
- case 0: /* normal completion */
- numbytes = urb->actual_length;
- if (unlikely(numbytes != ucs->rcvbuf_size)) {
- dev_warn(cs->dev,
- "control read: received %d chars, expected %d\n",
- numbytes, ucs->rcvbuf_size);
- if (numbytes > ucs->rcvbuf_size)
- numbytes = ucs->rcvbuf_size;
- }
-
- /* copy received bytes to inbuf, notify event layer */
- if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) {
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(cs);
- }
- break;
-
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* no further action necessary */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- break;
-
- default: /* other errors: retry */
- if (ucs->retry_cmd_in++ < BAS_RETRY) {
- gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__,
- get_usb_statmsg(status), ucs->retry_cmd_in);
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0)
- /* successfully resubmitted, skip freeing */
- return;
- if (rc == -ENODEV)
- /* disconnect, no further action necessary */
- break;
- }
- dev_err(cs->dev, "control read: %s, giving up after %d tries\n",
- get_usb_statmsg(status), ucs->retry_cmd_in);
- error_reset(cs);
- }
-
- /* read finished, free buffer */
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
-}
-
-/* atread_submit
- * submit an HD_READ_ATMESSAGE command URB and optionally start a timeout
- * parameters:
- * cs controller state structure
- * timeout timeout in 1/10 sec., 0: none
- * return value:
- * 0 on success
- * -EBUSY if another request is pending
- * any URB submission error code
- */
-static int atread_submit(struct cardstate *cs, int timeout)
-{
- struct bas_cardstate *ucs = cs->hw.bas;
- int basstate;
- int ret;
-
- gig_dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)",
- ucs->rcvbuf_size);
-
- basstate = update_basstate(ucs, BS_ATRDPEND, 0);
- if (basstate & BS_ATRDPEND) {
- dev_err(cs->dev,
- "could not submit HD_READ_ATMESSAGE: URB busy\n");
- return -EBUSY;
- }
-
- if (basstate & BS_SUSPEND) {
- dev_notice(cs->dev,
- "HD_READ_ATMESSAGE not submitted, "
- "suspend in progress\n");
- update_basstate(ucs, 0, BS_ATRDPEND);
- /* treat like disconnect */
- return -ENODEV;
- }
-
- ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
- ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
- ucs->dr_cmd_in.wValue = 0;
- ucs->dr_cmd_in.wIndex = 0;
- ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
- usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
- usb_rcvctrlpipe(ucs->udev, 0),
- (unsigned char *) &ucs->dr_cmd_in,
- ucs->rcvbuf, ucs->rcvbuf_size,
- read_ctrl_callback, cs->inbuf);
-
- ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
- if (ret != 0) {
- update_basstate(ucs, 0, BS_ATRDPEND);
- dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
- get_usb_rcmsg(ret));
- return ret;
- }
-
- if (timeout > 0) {
- gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
- mod_timer(&ucs->timer_cmd_in, jiffies + timeout * HZ / 10);
- }
- return 0;
-}
-
-/* int_in_work
- * workqueue routine to clear halt on interrupt in endpoint
- */
-
-static void int_in_work(struct work_struct *work)
-{
- struct bas_cardstate *ucs =
- container_of(work, struct bas_cardstate, int_in_wq);
- struct urb *urb = ucs->urb_int_in;
- struct cardstate *cs = urb->context;
- int rc;
-
- /* clear halt condition */
- rc = usb_clear_halt(ucs->udev, urb->pipe);
- gig_dbg(DEBUG_USBREQ, "clear_halt: %s", get_usb_rcmsg(rc));
- if (rc == 0)
- /* success, resubmit interrupt read URB */
- rc = usb_submit_urb(urb, GFP_ATOMIC);
-
- switch (rc) {
- case 0: /* success */
- case -ENODEV: /* device gone */
- case -EINVAL: /* URB already resubmitted, or terminal badness */
- break;
- default: /* failure: try to recover by resetting the device */
- dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
- rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
- if (rc == 0) {
- rc = usb_reset_device(ucs->udev);
- usb_unlock_device(ucs->udev);
- }
- }
- ucs->retry_int_in = 0;
-}
-
-/* int_in_resubmit
- * timer routine for interrupt read delayed resubmit
- * argument:
- * controller state structure
- */
-static void int_in_resubmit(struct timer_list *t)
-{
- struct bas_cardstate *ucs = from_timer(ucs, t, timer_int_in);
- struct cardstate *cs = ucs->cs;
- int rc;
-
- if (ucs->retry_int_in++ >= BAS_RETRY) {
- dev_err(cs->dev, "interrupt read: giving up after %d tries\n",
- ucs->retry_int_in);
- usb_queue_reset_device(ucs->interface);
- return;
- }
-
- gig_dbg(DEBUG_USBREQ, "%s: retry %d", __func__, ucs->retry_int_in);
- rc = usb_submit_urb(ucs->urb_int_in, GFP_ATOMIC);
- if (rc != 0 && rc != -ENODEV) {
- dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
- get_usb_rcmsg(rc));
- usb_queue_reset_device(ucs->interface);
- }
-}
-
-/* read_int_callback
- * USB completion handler for interrupt pipe input
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block
- * urb->context = controller state structure
- */
-static void read_int_callback(struct urb *urb)
-{
- struct cardstate *cs = urb->context;
- struct bas_cardstate *ucs = cs->hw.bas;
- struct bc_state *bcs;
- int status = urb->status;
- unsigned long flags;
- int rc;
- unsigned l;
- int channel;
-
- switch (status) {
- case 0: /* success */
- ucs->retry_int_in = 0;
- break;
- case -EPIPE: /* endpoint stalled */
- schedule_work(&ucs->int_in_wq);
- /* fall through */
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* no further action necessary */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- return;
- case -EPROTO: /* protocol error or unplug */
- case -EILSEQ:
- case -ETIME:
- /* resubmit after delay */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- mod_timer(&ucs->timer_int_in, jiffies + HZ / 10);
- return;
- default: /* other errors: just resubmit */
- dev_warn(cs->dev, "interrupt read: %s\n",
- get_usb_statmsg(status));
- goto resubmit;
- }
-
- /* drop incomplete packets even if the missing bytes wouldn't matter */
- if (unlikely(urb->actual_length < IP_MSGSIZE)) {
- dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n",
- urb->actual_length);
- goto resubmit;
- }
-
- l = (unsigned) ucs->int_in_buf[1] +
- (((unsigned) ucs->int_in_buf[2]) << 8);
-
- gig_dbg(DEBUG_USBREQ, "<-------%d: 0x%02x (%u [0x%02x 0x%02x])",
- urb->actual_length, (int)ucs->int_in_buf[0], l,
- (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]);
-
- channel = 0;
-
- switch (ucs->int_in_buf[0]) {
- case HD_DEVICE_INIT_OK:
- update_basstate(ucs, BS_INIT, 0);
- break;
-
- case HD_READY_SEND_ATDATA:
- del_timer(&ucs->timer_atrdy);
- update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
- start_cbsend(cs);
- break;
-
- case HD_OPEN_B2CHANNEL_ACK:
- ++channel;
- /* fall through */
- case HD_OPEN_B1CHANNEL_ACK:
- bcs = cs->bcs + channel;
- update_basstate(ucs, BS_B1OPEN << channel, 0);
- gigaset_bchannel_up(bcs);
- break;
-
- case HD_OPEN_ATCHANNEL_ACK:
- update_basstate(ucs, BS_ATOPEN, 0);
- start_cbsend(cs);
- break;
-
- case HD_CLOSE_B2CHANNEL_ACK:
- ++channel;
- /* fall through */
- case HD_CLOSE_B1CHANNEL_ACK:
- bcs = cs->bcs + channel;
- update_basstate(ucs, 0, BS_B1OPEN << channel);
- stopurbs(bcs->hw.bas);
- gigaset_bchannel_down(bcs);
- break;
-
- case HD_CLOSE_ATCHANNEL_ACK:
- update_basstate(ucs, 0, BS_ATOPEN);
- break;
-
- case HD_B2_FLOW_CONTROL:
- ++channel;
- /* fall through */
- case HD_B1_FLOW_CONTROL:
- bcs = cs->bcs + channel;
- atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
- &bcs->hw.bas->corrbytes);
- gig_dbg(DEBUG_ISO,
- "Flow control (channel %d, sub %d): 0x%02x => %d",
- channel, bcs->hw.bas->numsub, l,
- atomic_read(&bcs->hw.bas->corrbytes));
- break;
-
- case HD_RECEIVEATDATA_ACK: /* AT response ready to be received */
- if (!l) {
- dev_warn(cs->dev,
- "HD_RECEIVEATDATA_ACK with length 0 ignored\n");
- break;
- }
- spin_lock_irqsave(&cs->lock, flags);
- if (ucs->basstate & BS_ATRDPEND) {
- spin_unlock_irqrestore(&cs->lock, flags);
- dev_warn(cs->dev,
- "HD_RECEIVEATDATA_ACK(%d) during HD_READ_ATMESSAGE(%d) ignored\n",
- l, ucs->rcvbuf_size);
- break;
- }
- if (ucs->rcvbuf_size) {
- /* throw away previous buffer - we have no queue */
- dev_err(cs->dev,
- "receive AT data overrun, %d bytes lost\n",
- ucs->rcvbuf_size);
- kfree(ucs->rcvbuf);
- ucs->rcvbuf_size = 0;
- }
- ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
- if (ucs->rcvbuf == NULL) {
- spin_unlock_irqrestore(&cs->lock, flags);
- dev_err(cs->dev, "out of memory receiving AT data\n");
- break;
- }
- ucs->rcvbuf_size = l;
- ucs->retry_cmd_in = 0;
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc < 0) {
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- if (rc < 0 && rc != -ENODEV)
- error_reset(cs);
- break;
-
- case HD_RESET_INTERRUPT_PIPE_ACK:
- update_basstate(ucs, 0, BS_RESETTING);
- dev_notice(cs->dev, "interrupt pipe reset\n");
- break;
-
- case HD_SUSPEND_END:
- gig_dbg(DEBUG_USBREQ, "HD_SUSPEND_END");
- break;
-
- default:
- dev_warn(cs->dev,
- "unknown Gigaset signal 0x%02x (%u) ignored\n",
- (int) ucs->int_in_buf[0], l);
- }
-
- check_pending(ucs);
- wake_up(&ucs->waitqueue);
-
-resubmit:
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc != 0 && rc != -ENODEV)) {
- dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
- get_usb_rcmsg(rc));
- error_reset(cs);
- }
-}
-
-/* read_iso_callback
- * USB completion handler for B channel isochronous input
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block of completed request
- * urb->context = bc_state structure
- */
-static void read_iso_callback(struct urb *urb)
-{
- struct bc_state *bcs;
- struct bas_bc_state *ubc;
- int status = urb->status;
- unsigned long flags;
- int i, rc;
-
- /* status codes not worth bothering the tasklet with */
- if (unlikely(status == -ENOENT ||
- status == -ECONNRESET ||
- status == -EINPROGRESS ||
- status == -ENODEV ||
- status == -ESHUTDOWN)) {
- gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(status));
- return;
- }
-
- bcs = urb->context;
- ubc = bcs->hw.bas;
-
- spin_lock_irqsave(&ubc->isoinlock, flags);
- if (likely(ubc->isoindone == NULL)) {
- /* pass URB to tasklet */
- ubc->isoindone = urb;
- ubc->isoinstatus = status;
- tasklet_hi_schedule(&ubc->rcvd_tasklet);
- } else {
- /* tasklet still busy, drop data and resubmit URB */
- gig_dbg(DEBUG_ISO, "%s: overrun", __func__);
- ubc->loststatus = status;
- for (i = 0; i < BAS_NUMFRAMES; i++) {
- ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
- if (unlikely(urb->iso_frame_desc[i].status != 0 &&
- urb->iso_frame_desc[i].status != -EINPROGRESS))
- ubc->loststatus = urb->iso_frame_desc[i].status;
- urb->iso_frame_desc[i].status = 0;
- urb->iso_frame_desc[i].actual_length = 0;
- }
- if (likely(ubc->running)) {
- /* urb->dev is clobbered by USB subsystem */
- urb->dev = bcs->cs->hw.bas->udev;
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = BAS_NUMFRAMES;
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc != 0 && rc != -ENODEV)) {
- dev_err(bcs->cs->dev,
- "could not resubmit isoc read URB: %s\n",
- get_usb_rcmsg(rc));
- dump_urb(DEBUG_ISO, "isoc read", urb);
- error_hangup(bcs);
- }
- }
- }
- spin_unlock_irqrestore(&ubc->isoinlock, flags);
-}
-
-/* write_iso_callback
- * USB completion handler for B channel isochronous output
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block of completed request
- * urb->context = isow_urbctx_t structure
- */
-static void write_iso_callback(struct urb *urb)
-{
- struct isow_urbctx_t *ucx;
- struct bas_bc_state *ubc;
- int status = urb->status;
- unsigned long flags;
-
- /* status codes not worth bothering the tasklet with */
- if (unlikely(status == -ENOENT ||
- status == -ECONNRESET ||
- status == -EINPROGRESS ||
- status == -ENODEV ||
- status == -ESHUTDOWN)) {
- gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(status));
- return;
- }
-
- /* pass URB context to tasklet */
- ucx = urb->context;
- ubc = ucx->bcs->hw.bas;
- ucx->status = status;
-
- spin_lock_irqsave(&ubc->isooutlock, flags);
- ubc->isooutovfl = ubc->isooutdone;
- ubc->isooutdone = ucx;
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- tasklet_hi_schedule(&ubc->sent_tasklet);
-}
-
-/* starturbs
- * prepare and submit USB request blocks for isochronous input and output
- * argument:
- * B channel control structure
- * return value:
- * 0 on success
- * < 0 on error (no URBs submitted)
- */
-static int starturbs(struct bc_state *bcs)
-{
- struct usb_device *udev = bcs->cs->hw.bas->udev;
- struct bas_bc_state *ubc = bcs->hw.bas;
- struct urb *urb;
- int j, k;
- int rc;
-
- /* initialize L2 reception */
- if (bcs->proto2 == L2_HDLC)
- bcs->inputstate |= INS_flag_hunt;
-
- /* submit all isochronous input URBs */
- ubc->running = 1;
- for (k = 0; k < BAS_INURBS; k++) {
- urb = ubc->isoinurbs[k];
- if (!urb) {
- rc = -EFAULT;
- goto error;
- }
- usb_fill_int_urb(urb, udev,
- usb_rcvisocpipe(udev, 3 + 2 * bcs->channel),
- ubc->isoinbuf + k * BAS_INBUFSIZE,
- BAS_INBUFSIZE, read_iso_callback, bcs,
- BAS_FRAMETIME);
-
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = BAS_NUMFRAMES;
- for (j = 0; j < BAS_NUMFRAMES; j++) {
- urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
- urb->iso_frame_desc[j].length = BAS_MAXFRAME;
- urb->iso_frame_desc[j].status = 0;
- urb->iso_frame_desc[j].actual_length = 0;
- }
-
- dump_urb(DEBUG_ISO, "Initial isoc read", urb);
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc != 0)
- goto error;
- }
-
- /* initialize L2 transmission */
- gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG);
-
- /* set up isochronous output URBs for flag idling */
- for (k = 0; k < BAS_OUTURBS; ++k) {
- urb = ubc->isoouturbs[k].urb;
- if (!urb) {
- rc = -EFAULT;
- goto error;
- }
- usb_fill_int_urb(urb, udev,
- usb_sndisocpipe(udev, 4 + 2 * bcs->channel),
- ubc->isooutbuf->data,
- sizeof(ubc->isooutbuf->data),
- write_iso_callback, &ubc->isoouturbs[k],
- BAS_FRAMETIME);
-
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = BAS_NUMFRAMES;
- for (j = 0; j < BAS_NUMFRAMES; ++j) {
- urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
- urb->iso_frame_desc[j].length = BAS_NORMFRAME;
- urb->iso_frame_desc[j].status = 0;
- urb->iso_frame_desc[j].actual_length = 0;
- }
- ubc->isoouturbs[k].limit = -1;
- }
-
- /* keep one URB free, submit the others */
- for (k = 0; k < BAS_OUTURBS - 1; ++k) {
- dump_urb(DEBUG_ISO, "Initial isoc write", urb);
- rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
- if (rc != 0)
- goto error;
- }
- dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
- ubc->isooutfree = &ubc->isoouturbs[BAS_OUTURBS - 1];
- ubc->isooutdone = ubc->isooutovfl = NULL;
- return 0;
-error:
- stopurbs(ubc);
- return rc;
-}
-
-/* stopurbs
- * cancel the USB request blocks for isochronous input and output
- * errors are silently ignored
- * argument:
- * B channel control structure
- */
-static void stopurbs(struct bas_bc_state *ubc)
-{
- int k, rc;
-
- ubc->running = 0;
-
- for (k = 0; k < BAS_INURBS; ++k) {
- rc = usb_unlink_urb(ubc->isoinurbs[k]);
- gig_dbg(DEBUG_ISO,
- "%s: isoc input URB %d unlinked, result = %s",
- __func__, k, get_usb_rcmsg(rc));
- }
-
- for (k = 0; k < BAS_OUTURBS; ++k) {
- rc = usb_unlink_urb(ubc->isoouturbs[k].urb);
- gig_dbg(DEBUG_ISO,
- "%s: isoc output URB %d unlinked, result = %s",
- __func__, k, get_usb_rcmsg(rc));
- }
-}
-
-/* Isochronous Write - Bottom Half */
-/* =============================== */
-
-/* submit_iso_write_urb
- * fill and submit the next isochronous write URB
- * parameters:
- * ucx context structure containing URB
- * return value:
- * number of frames submitted in URB
- * 0 if URB not submitted because no data available (isooutbuf busy)
- * error code < 0 on error
- */
-static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
-{
- struct urb *urb = ucx->urb;
- struct bas_bc_state *ubc = ucx->bcs->hw.bas;
- struct usb_iso_packet_descriptor *ifd;
- int corrbytes, nframe, rc;
-
- /* urb->dev is clobbered by USB subsystem */
- urb->dev = ucx->bcs->cs->hw.bas->udev;
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = ubc->isooutbuf->data;
- urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
-
- for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) {
- ifd = &urb->iso_frame_desc[nframe];
-
- /* compute frame length according to flow control */
- ifd->length = BAS_NORMFRAME;
- corrbytes = atomic_read(&ubc->corrbytes);
- if (corrbytes != 0) {
- gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
- __func__, corrbytes);
- if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
- corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME;
- else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME)
- corrbytes = BAS_LOWFRAME - BAS_NORMFRAME;
- ifd->length += corrbytes;
- atomic_add(-corrbytes, &ubc->corrbytes);
- }
-
- /* retrieve block of data to send */
- rc = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
- if (rc < 0) {
- if (rc == -EBUSY) {
- gig_dbg(DEBUG_ISO,
- "%s: buffer busy at frame %d",
- __func__, nframe);
- /* tasklet will be restarted from
- gigaset_isoc_send_skb() */
- } else {
- dev_err(ucx->bcs->cs->dev,
- "%s: buffer error %d at frame %d\n",
- __func__, rc, nframe);
- return rc;
- }
- break;
- }
- ifd->offset = rc;
- ucx->limit = ubc->isooutbuf->nextread;
- ifd->status = 0;
- ifd->actual_length = 0;
- }
- if (unlikely(nframe == 0))
- return 0; /* no data to send */
- urb->number_of_packets = nframe;
-
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc)) {
- if (rc == -ENODEV)
- /* device removed - give up silently */
- gig_dbg(DEBUG_ISO, "%s: disconnected", __func__);
- else
- dev_err(ucx->bcs->cs->dev,
- "could not submit isoc write URB: %s\n",
- get_usb_rcmsg(rc));
- return rc;
- }
- ++ubc->numsub;
- return nframe;
-}
-
-/* write_iso_tasklet
- * tasklet scheduled when an isochronous output URB from the Gigaset device
- * has completed
- * parameter:
- * data B channel state structure
- */
-static void write_iso_tasklet(unsigned long data)
-{
- struct bc_state *bcs = (struct bc_state *) data;
- struct bas_bc_state *ubc = bcs->hw.bas;
- struct cardstate *cs = bcs->cs;
- struct isow_urbctx_t *done, *next, *ovfl;
- struct urb *urb;
- int status;
- struct usb_iso_packet_descriptor *ifd;
- unsigned long flags;
- int i;
- struct sk_buff *skb;
- int len;
- int rc;
-
- /* loop while completed URBs arrive in time */
- for (;;) {
- if (unlikely(!(ubc->running))) {
- gig_dbg(DEBUG_ISO, "%s: not running", __func__);
- return;
- }
-
- /* retrieve completed URBs */
- spin_lock_irqsave(&ubc->isooutlock, flags);
- done = ubc->isooutdone;
- ubc->isooutdone = NULL;
- ovfl = ubc->isooutovfl;
- ubc->isooutovfl = NULL;
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- if (ovfl) {
- dev_err(cs->dev, "isoc write underrun\n");
- error_hangup(bcs);
- break;
- }
- if (!done)
- break;
-
- /* submit free URB if available */
- spin_lock_irqsave(&ubc->isooutlock, flags);
- next = ubc->isooutfree;
- ubc->isooutfree = NULL;
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- if (next) {
- rc = submit_iso_write_urb(next);
- if (unlikely(rc <= 0 && rc != -ENODEV)) {
- /* could not submit URB, put it back */
- spin_lock_irqsave(&ubc->isooutlock, flags);
- if (ubc->isooutfree == NULL) {
- ubc->isooutfree = next;
- next = NULL;
- }
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- if (next) {
- /* couldn't put it back */
- dev_err(cs->dev,
- "losing isoc write URB\n");
- error_hangup(bcs);
- }
- }
- }
-
- /* process completed URB */
- urb = done->urb;
- status = done->status;
- switch (status) {
- case -EXDEV: /* partial completion */
- gig_dbg(DEBUG_ISO, "%s: URB partially completed",
- __func__);
- /* fall through - what's the difference anyway? */
- case 0: /* normal completion */
- /* inspect individual frames
- * assumptions (for lack of documentation):
- * - actual_length bytes of first frame in error are
- * successfully sent
- * - all following frames are not sent at all
- */
- for (i = 0; i < BAS_NUMFRAMES; i++) {
- ifd = &urb->iso_frame_desc[i];
- if (ifd->status ||
- ifd->actual_length != ifd->length) {
- dev_warn(cs->dev,
- "isoc write: frame %d[%d/%d]: %s\n",
- i, ifd->actual_length,
- ifd->length,
- get_usb_statmsg(ifd->status));
- break;
- }
- }
- break;
- case -EPIPE: /* stall - probably underrun */
- dev_err(cs->dev, "isoc write: stalled\n");
- error_hangup(bcs);
- break;
- default: /* other errors */
- dev_warn(cs->dev, "isoc write: %s\n",
- get_usb_statmsg(status));
- }
-
- /* mark the write buffer area covered by this URB as free */
- if (done->limit >= 0)
- ubc->isooutbuf->read = done->limit;
-
- /* mark URB as free */
- spin_lock_irqsave(&ubc->isooutlock, flags);
- next = ubc->isooutfree;
- ubc->isooutfree = done;
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- if (next) {
- /* only one URB still active - resubmit one */
- rc = submit_iso_write_urb(next);
- if (unlikely(rc <= 0 && rc != -ENODEV)) {
- /* couldn't submit */
- error_hangup(bcs);
- }
- }
- }
-
- /* process queued SKBs */
- while ((skb = skb_dequeue(&bcs->squeue))) {
- /* copy to output buffer, doing L2 encapsulation */
- len = skb->len;
- if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) {
- /* insufficient buffer space, push back onto queue */
- skb_queue_head(&bcs->squeue, skb);
- gig_dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d",
- __func__, skb_queue_len(&bcs->squeue));
- break;
- }
- skb_pull(skb, len);
- gigaset_skb_sent(bcs, skb);
- dev_kfree_skb_any(skb);
- }
-}
-
-/* Isochronous Read - Bottom Half */
-/* ============================== */
-
-/* read_iso_tasklet
- * tasklet scheduled when an isochronous input URB from the Gigaset device
- * has completed
- * parameter:
- * data B channel state structure
- */
-static void read_iso_tasklet(unsigned long data)
-{
- struct bc_state *bcs = (struct bc_state *) data;
- struct bas_bc_state *ubc = bcs->hw.bas;
- struct cardstate *cs = bcs->cs;
- struct urb *urb;
- int status;
- struct usb_iso_packet_descriptor *ifd;
- char *rcvbuf;
- unsigned long flags;
- int totleft, numbytes, offset, frame, rc;
-
- /* loop while more completed URBs arrive in the meantime */
- for (;;) {
- /* retrieve URB */
- spin_lock_irqsave(&ubc->isoinlock, flags);
- urb = ubc->isoindone;
- if (!urb) {
- spin_unlock_irqrestore(&ubc->isoinlock, flags);
- return;
- }
- status = ubc->isoinstatus;
- ubc->isoindone = NULL;
- if (unlikely(ubc->loststatus != -EINPROGRESS)) {
- dev_warn(cs->dev,
- "isoc read overrun, URB dropped (status: %s, %d bytes)\n",
- get_usb_statmsg(ubc->loststatus),
- ubc->isoinlost);
- ubc->loststatus = -EINPROGRESS;
- }
- spin_unlock_irqrestore(&ubc->isoinlock, flags);
-
- if (unlikely(!(ubc->running))) {
- gig_dbg(DEBUG_ISO,
- "%s: channel not running, "
- "dropped URB with status: %s",
- __func__, get_usb_statmsg(status));
- return;
- }
-
- switch (status) {
- case 0: /* normal completion */
- break;
- case -EXDEV: /* inspect individual frames
- (we do that anyway) */
- gig_dbg(DEBUG_ISO, "%s: URB partially completed",
- __func__);
- break;
- case -ENOENT:
- case -ECONNRESET:
- case -EINPROGRESS:
- gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(status));
- continue; /* -> skip */
- case -EPIPE:
- dev_err(cs->dev, "isoc read: stalled\n");
- error_hangup(bcs);
- continue; /* -> skip */
- default: /* other error */
- dev_warn(cs->dev, "isoc read: %s\n",
- get_usb_statmsg(status));
- goto error;
- }
-
- rcvbuf = urb->transfer_buffer;
- totleft = urb->actual_length;
- for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
- ifd = &urb->iso_frame_desc[frame];
- numbytes = ifd->actual_length;
- switch (ifd->status) {
- case 0: /* success */
- break;
- case -EPROTO: /* protocol error or unplug */
- case -EILSEQ:
- case -ETIME:
- /* probably just disconnected, ignore */
- gig_dbg(DEBUG_ISO,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- get_usb_statmsg(ifd->status));
- break;
- default: /* other error */
- /* report, assume transferred bytes are ok */
- dev_warn(cs->dev,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- get_usb_statmsg(ifd->status));
- }
- if (unlikely(numbytes > BAS_MAXFRAME))
- dev_warn(cs->dev,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- "exceeds max frame size");
- if (unlikely(numbytes > totleft)) {
- dev_warn(cs->dev,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- "exceeds total transfer length");
- numbytes = totleft;
- }
- offset = ifd->offset;
- if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
- dev_warn(cs->dev,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- "exceeds end of buffer");
- numbytes = BAS_INBUFSIZE - offset;
- }
- gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
- totleft -= numbytes;
- }
- if (unlikely(totleft > 0))
- dev_warn(cs->dev, "isoc read: %d data bytes missing\n",
- totleft);
-
-error:
- /* URB processed, resubmit */
- for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
- urb->iso_frame_desc[frame].status = 0;
- urb->iso_frame_desc[frame].actual_length = 0;
- }
- /* urb->dev is clobbered by USB subsystem */
- urb->dev = bcs->cs->hw.bas->udev;
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = BAS_NUMFRAMES;
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc != 0 && rc != -ENODEV)) {
- dev_err(cs->dev,
- "could not resubmit isoc read URB: %s\n",
- get_usb_rcmsg(rc));
- dump_urb(DEBUG_ISO, "resubmit isoc read", urb);
- error_hangup(bcs);
- }
- }
-}
-
-/* Channel Operations */
-/* ================== */
-
-/* req_timeout
- * timeout routine for control output request
- * argument:
- * controller state structure
- */
-static void req_timeout(struct timer_list *t)
-{
- struct bas_cardstate *ucs = from_timer(ucs, t, timer_ctrl);
- struct cardstate *cs = ucs->cs;
- int pending;
- unsigned long flags;
-
- check_pending(ucs);
-
- spin_lock_irqsave(&ucs->lock, flags);
- pending = ucs->pending;
- ucs->pending = 0;
- spin_unlock_irqrestore(&ucs->lock, flags);
-
- switch (pending) {
- case 0: /* no pending request */
- gig_dbg(DEBUG_USBREQ, "%s: no request pending", __func__);
- break;
-
- case HD_OPEN_ATCHANNEL:
- dev_err(cs->dev, "timeout opening AT channel\n");
- error_reset(cs);
- break;
-
- case HD_OPEN_B1CHANNEL:
- dev_err(cs->dev, "timeout opening channel 1\n");
- error_hangup(&cs->bcs[0]);
- break;
-
- case HD_OPEN_B2CHANNEL:
- dev_err(cs->dev, "timeout opening channel 2\n");
- error_hangup(&cs->bcs[1]);
- break;
-
- case HD_CLOSE_ATCHANNEL:
- dev_err(cs->dev, "timeout closing AT channel\n");
- error_reset(cs);
- break;
-
- case HD_CLOSE_B1CHANNEL:
- dev_err(cs->dev, "timeout closing channel 1\n");
- error_reset(cs);
- break;
-
- case HD_CLOSE_B2CHANNEL:
- dev_err(cs->dev, "timeout closing channel 2\n");
- error_reset(cs);
- break;
-
- case HD_RESET_INTERRUPT_PIPE:
- /* error recovery escalation */
- dev_err(cs->dev,
- "reset interrupt pipe timeout, attempting USB reset\n");
- usb_queue_reset_device(ucs->interface);
- break;
-
- default:
- dev_warn(cs->dev, "request 0x%02x timed out, clearing\n",
- pending);
- }
-
- wake_up(&ucs->waitqueue);
-}
-
-/* write_ctrl_callback
- * USB completion handler for control pipe output
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block of completed request
- * urb->context = hardware specific controller state structure
- */
-static void write_ctrl_callback(struct urb *urb)
-{
- struct bas_cardstate *ucs = urb->context;
- int status = urb->status;
- int rc;
- unsigned long flags;
-
- /* check status */
- switch (status) {
- case 0: /* normal completion */
- spin_lock_irqsave(&ucs->lock, flags);
- switch (ucs->pending) {
- case HD_DEVICE_INIT_ACK: /* no reply expected */
- del_timer(&ucs->timer_ctrl);
- ucs->pending = 0;
- break;
- }
- spin_unlock_irqrestore(&ucs->lock, flags);
- return;
-
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* ignore silently */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- break;
-
- default: /* any failure */
- /* don't retry if suspend requested */
- if (++ucs->retry_ctrl > BAS_RETRY ||
- (ucs->basstate & BS_SUSPEND)) {
- dev_err(&ucs->interface->dev,
- "control request 0x%02x failed: %s\n",
- ucs->dr_ctrl.bRequest,
- get_usb_statmsg(status));
- break; /* give up */
- }
- dev_notice(&ucs->interface->dev,
- "control request 0x%02x: %s, retry %d\n",
- ucs->dr_ctrl.bRequest, get_usb_statmsg(status),
- ucs->retry_ctrl);
- /* urb->dev is clobbered by USB subsystem */
- urb->dev = ucs->udev;
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc)) {
- dev_err(&ucs->interface->dev,
- "could not resubmit request 0x%02x: %s\n",
- ucs->dr_ctrl.bRequest, get_usb_rcmsg(rc));
- break;
- }
- /* resubmitted */
- return;
- }
-
- /* failed, clear pending request */
- spin_lock_irqsave(&ucs->lock, flags);
- del_timer(&ucs->timer_ctrl);
- ucs->pending = 0;
- spin_unlock_irqrestore(&ucs->lock, flags);
- wake_up(&ucs->waitqueue);
-}
-
-/* req_submit
- * submit a control output request without message buffer to the Gigaset base
- * and optionally start a timeout
- * parameters:
- * bcs B channel control structure
- * req control request code (HD_*)
- * val control request parameter value (set to 0 if unused)
- * timeout timeout in seconds (0: no timeout)
- * return value:
- * 0 on success
- * -EBUSY if another request is pending
- * any URB submission error code
- */
-static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
-{
- struct bas_cardstate *ucs = bcs->cs->hw.bas;
- int ret;
- unsigned long flags;
-
- gig_dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val);
-
- spin_lock_irqsave(&ucs->lock, flags);
- if (ucs->pending) {
- spin_unlock_irqrestore(&ucs->lock, flags);
- dev_err(bcs->cs->dev,
- "submission of request 0x%02x failed: "
- "request 0x%02x still pending\n",
- req, ucs->pending);
- return -EBUSY;
- }
-
- ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ;
- ucs->dr_ctrl.bRequest = req;
- ucs->dr_ctrl.wValue = cpu_to_le16(val);
- ucs->dr_ctrl.wIndex = 0;
- ucs->dr_ctrl.wLength = 0;
- usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- (unsigned char *) &ucs->dr_ctrl, NULL, 0,
- write_ctrl_callback, ucs);
- ucs->retry_ctrl = 0;
- ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
- if (unlikely(ret)) {
- dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
- req, get_usb_rcmsg(ret));
- spin_unlock_irqrestore(&ucs->lock, flags);
- return ret;
- }
- ucs->pending = req;
-
- if (timeout > 0) {
- gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
- mod_timer(&ucs->timer_ctrl, jiffies + timeout * HZ / 10);
- }
-
- spin_unlock_irqrestore(&ucs->lock, flags);
- return 0;
-}
-
-/* gigaset_init_bchannel
- * called by common.c to connect a B channel
- * initialize isochronous I/O and tell the Gigaset base to open the channel
- * argument:
- * B channel control structure
- * return value:
- * 0 on success, error code < 0 on error
- */
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- int req, ret;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (unlikely(!cs->connected)) {
- gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
- spin_unlock_irqrestore(&cs->lock, flags);
- return -ENODEV;
- }
-
- if (cs->hw.bas->basstate & BS_SUSPEND) {
- dev_notice(cs->dev,
- "not starting isoc I/O, suspend in progress\n");
- spin_unlock_irqrestore(&cs->lock, flags);
- return -EHOSTUNREACH;
- }
-
- ret = starturbs(bcs);
- if (ret < 0) {
- spin_unlock_irqrestore(&cs->lock, flags);
- dev_err(cs->dev,
- "could not start isoc I/O for channel B%d: %s\n",
- bcs->channel + 1,
- ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
- if (ret != -ENODEV)
- error_hangup(bcs);
- return ret;
- }
-
- req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
- ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
- if (ret < 0) {
- dev_err(cs->dev, "could not open channel B%d\n",
- bcs->channel + 1);
- stopurbs(bcs->hw.bas);
- }
-
- spin_unlock_irqrestore(&cs->lock, flags);
- if (ret < 0 && ret != -ENODEV)
- error_hangup(bcs);
- return ret;
-}
-
-/* gigaset_close_bchannel
- * called by common.c to disconnect a B channel
- * tell the Gigaset base to close the channel
- * stopping isochronous I/O and LL notification will be done when the
- * acknowledgement for the close arrives
- * argument:
- * B channel control structure
- * return value:
- * 0 on success, error code < 0 on error
- */
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- int req, ret;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (unlikely(!cs->connected)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
- return -ENODEV;
- }
-
- if (!(cs->hw.bas->basstate & (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
- /* channel not running: just signal common.c */
- spin_unlock_irqrestore(&cs->lock, flags);
- gigaset_bchannel_down(bcs);
- return 0;
- }
-
- /* channel running: tell device to close it */
- req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
- ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
- if (ret < 0)
- dev_err(cs->dev, "closing channel B%d failed\n",
- bcs->channel + 1);
-
- spin_unlock_irqrestore(&cs->lock, flags);
- return ret;
-}
-
-/* Device Operations */
-/* ================= */
-
-/* complete_cb
- * unqueue first command buffer from queue, waking any sleepers
- * must be called with cs->cmdlock held
- * parameter:
- * cs controller state structure
- */
-static void complete_cb(struct cardstate *cs)
-{
- struct cmdbuf_t *cb = cs->cmdbuf;
-
- /* unqueue completed buffer */
- cs->cmdbytes -= cs->curlen;
- gig_dbg(DEBUG_OUTPUT, "write_command: sent %u bytes, %u left",
- cs->curlen, cs->cmdbytes);
- if (cb->next != NULL) {
- cs->cmdbuf = cb->next;
- cs->cmdbuf->prev = NULL;
- cs->curlen = cs->cmdbuf->len;
- } else {
- cs->cmdbuf = NULL;
- cs->lastcmdbuf = NULL;
- cs->curlen = 0;
- }
-
- if (cb->wake_tasklet)
- tasklet_schedule(cb->wake_tasklet);
-
- kfree(cb);
-}
-
-/* write_command_callback
- * USB completion handler for AT command transmission
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block of completed request
- * urb->context = controller state structure
- */
-static void write_command_callback(struct urb *urb)
-{
- struct cardstate *cs = urb->context;
- struct bas_cardstate *ucs = cs->hw.bas;
- int status = urb->status;
- unsigned long flags;
-
- update_basstate(ucs, 0, BS_ATWRPEND);
- wake_up(&ucs->waitqueue);
-
- /* check status */
- switch (status) {
- case 0: /* normal completion */
- break;
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* ignore silently */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- return;
- default: /* any failure */
- if (++ucs->retry_cmd_out > BAS_RETRY) {
- dev_warn(cs->dev,
- "command write: %s, "
- "giving up after %d retries\n",
- get_usb_statmsg(status),
- ucs->retry_cmd_out);
- break;
- }
- if (ucs->basstate & BS_SUSPEND) {
- dev_warn(cs->dev,
- "command write: %s, "
- "won't retry - suspend requested\n",
- get_usb_statmsg(status));
- break;
- }
- if (cs->cmdbuf == NULL) {
- dev_warn(cs->dev,
- "command write: %s, "
- "cannot retry - cmdbuf gone\n",
- get_usb_statmsg(status));
- break;
- }
- dev_notice(cs->dev, "command write: %s, retry %d\n",
- get_usb_statmsg(status), ucs->retry_cmd_out);
- if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
- /* resubmitted - bypass regular exit block */
- return;
- /* command send failed, assume base still waiting */
- update_basstate(ucs, BS_ATREADY, 0);
- }
-
- spin_lock_irqsave(&cs->cmdlock, flags);
- if (cs->cmdbuf != NULL)
- complete_cb(cs);
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-}
-
-/* atrdy_timeout
- * timeout routine for AT command transmission
- * argument:
- * controller state structure
- */
-static void atrdy_timeout(struct timer_list *t)
-{
- struct bas_cardstate *ucs = from_timer(ucs, t, timer_atrdy);
- struct cardstate *cs = ucs->cs;
-
- dev_warn(cs->dev, "timeout waiting for HD_READY_SEND_ATDATA\n");
-
- /* fake the missing signal - what else can I do? */
- update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
- start_cbsend(cs);
-}
-
-/* atwrite_submit
- * submit an HD_WRITE_ATMESSAGE command URB
- * parameters:
- * cs controller state structure
- * buf buffer containing command to send
- * len length of command to send
- * return value:
- * 0 on success
- * -EBUSY if another request is pending
- * any URB submission error code
- */
-static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
-{
- struct bas_cardstate *ucs = cs->hw.bas;
- int rc;
-
- gig_dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len);
-
- if (update_basstate(ucs, BS_ATWRPEND, 0) & BS_ATWRPEND) {
- dev_err(cs->dev,
- "could not submit HD_WRITE_ATMESSAGE: URB busy\n");
- return -EBUSY;
- }
-
- ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ;
- ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE;
- ucs->dr_cmd_out.wValue = 0;
- ucs->dr_cmd_out.wIndex = 0;
- ucs->dr_cmd_out.wLength = cpu_to_le16(len);
- usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- (unsigned char *) &ucs->dr_cmd_out, buf, len,
- write_command_callback, cs);
- rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
- if (unlikely(rc)) {
- update_basstate(ucs, 0, BS_ATWRPEND);
- dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n",
- get_usb_rcmsg(rc));
- return rc;
- }
-
- /* submitted successfully, start timeout if necessary */
- if (!(update_basstate(ucs, BS_ATTIMER, BS_ATREADY) & BS_ATTIMER)) {
- gig_dbg(DEBUG_OUTPUT, "setting ATREADY timeout of %d/10 secs",
- ATRDY_TIMEOUT);
- mod_timer(&ucs->timer_atrdy, jiffies + ATRDY_TIMEOUT * HZ / 10);
- }
- return 0;
-}
-
-/* start_cbsend
- * start transmission of AT command queue if necessary
- * parameter:
- * cs controller state structure
- * return value:
- * 0 on success
- * error code < 0 on error
- */
-static int start_cbsend(struct cardstate *cs)
-{
- struct cmdbuf_t *cb;
- struct bas_cardstate *ucs = cs->hw.bas;
- unsigned long flags;
- int rc;
- int retval = 0;
-
- /* check if suspend requested */
- if (ucs->basstate & BS_SUSPEND) {
- gig_dbg(DEBUG_OUTPUT, "suspending");
- return -EHOSTUNREACH;
- }
-
- /* check if AT channel is open */
- if (!(ucs->basstate & BS_ATOPEN)) {
- gig_dbg(DEBUG_OUTPUT, "AT channel not open");
- rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
- if (rc < 0) {
- /* flush command queue */
- spin_lock_irqsave(&cs->cmdlock, flags);
- while (cs->cmdbuf != NULL)
- complete_cb(cs);
- spin_unlock_irqrestore(&cs->cmdlock, flags);
- }
- return rc;
- }
-
- /* try to send first command in queue */
- spin_lock_irqsave(&cs->cmdlock, flags);
-
- while ((cb = cs->cmdbuf) != NULL && (ucs->basstate & BS_ATREADY)) {
- ucs->retry_cmd_out = 0;
- rc = atwrite_submit(cs, cb->buf, cb->len);
- if (unlikely(rc)) {
- retval = rc;
- complete_cb(cs);
- }
- }
-
- spin_unlock_irqrestore(&cs->cmdlock, flags);
- return retval;
-}
-
-/* gigaset_write_cmd
- * This function is called by the device independent part of the driver
- * to transmit an AT command string to the Gigaset device.
- * It encapsulates the device specific method for transmission over the
- * direct USB connection to the base.
- * The command string is added to the queue of commands to send, and
- * USB transmission is started if necessary.
- * parameters:
- * cs controller state structure
- * cb command buffer structure
- * return value:
- * number of bytes queued on success
- * error code < 0 on error
- */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
- unsigned long flags;
- int rc;
-
- gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
- DEBUG_TRANSCMD : DEBUG_LOCKCMD,
- "CMD Transmit", cb->len, cb->buf);
-
- /* translate "+++" escape sequence sent as a single separate command
- * into "close AT channel" command for error recovery
- * The next command will reopen the AT channel automatically.
- */
- if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) {
- /* If an HD_RECEIVEATDATA_ACK message remains unhandled
- * because of an error, the base never sends another one.
- * The response channel is thus effectively blocked.
- * Closing and reopening the AT channel does *not* clear
- * this condition.
- * As a stopgap measure, submit a zero-length AT read
- * before closing the AT channel. This has the undocumented
- * effect of triggering a new HD_RECEIVEATDATA_ACK message
- * from the base if necessary.
- * The subsequent AT channel close then discards any pending
- * messages.
- */
- spin_lock_irqsave(&cs->lock, flags);
- if (!(cs->hw.bas->basstate & BS_ATRDPEND)) {
- kfree(cs->hw.bas->rcvbuf);
- cs->hw.bas->rcvbuf = NULL;
- cs->hw.bas->rcvbuf_size = 0;
- cs->hw.bas->retry_cmd_in = 0;
- atread_submit(cs, 0);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
- if (cb->wake_tasklet)
- tasklet_schedule(cb->wake_tasklet);
- if (!rc)
- rc = cb->len;
- kfree(cb);
- return rc;
- }
-
- spin_lock_irqsave(&cs->cmdlock, flags);
- cb->prev = cs->lastcmdbuf;
- if (cs->lastcmdbuf)
- cs->lastcmdbuf->next = cb;
- else {
- cs->cmdbuf = cb;
- cs->curlen = cb->len;
- }
- cs->cmdbytes += cb->len;
- cs->lastcmdbuf = cb;
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- spin_lock_irqsave(&cs->lock, flags);
- if (unlikely(!cs->connected)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
- /* flush command queue */
- spin_lock_irqsave(&cs->cmdlock, flags);
- while (cs->cmdbuf != NULL)
- complete_cb(cs);
- spin_unlock_irqrestore(&cs->cmdlock, flags);
- return -ENODEV;
- }
- rc = start_cbsend(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return rc < 0 ? rc : cb->len;
-}
-
-/* gigaset_write_room
- * tty_driver.write_room interface routine
- * return number of characters the driver will accept to be written via
- * gigaset_write_cmd
- * parameter:
- * controller state structure
- * return value:
- * number of characters
- */
-static int gigaset_write_room(struct cardstate *cs)
-{
- return IF_WRITEBUF;
-}
-
-/* gigaset_chars_in_buffer
- * tty_driver.chars_in_buffer interface routine
- * return number of characters waiting to be sent
- * parameter:
- * controller state structure
- * return value:
- * number of characters
- */
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
- return cs->cmdbytes;
-}
-
-/* gigaset_brkchars
- * implementation of ioctl(GIGASET_BRKCHARS)
- * parameter:
- * controller state structure
- * return value:
- * -EINVAL (unimplemented function)
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
- return -EINVAL;
-}
-
-
-/* Device Initialization/Shutdown */
-/* ============================== */
-
-/* Free hardware dependent part of the B channel structure
- * parameter:
- * bcs B channel structure
- */
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
- struct bas_bc_state *ubc = bcs->hw.bas;
- int i;
-
- if (!ubc)
- return;
-
- /* kill URBs and tasklets before freeing - better safe than sorry */
- ubc->running = 0;
- gig_dbg(DEBUG_INIT, "%s: killing isoc URBs", __func__);
- for (i = 0; i < BAS_OUTURBS; ++i) {
- usb_kill_urb(ubc->isoouturbs[i].urb);
- usb_free_urb(ubc->isoouturbs[i].urb);
- }
- for (i = 0; i < BAS_INURBS; ++i) {
- usb_kill_urb(ubc->isoinurbs[i]);
- usb_free_urb(ubc->isoinurbs[i]);
- }
- tasklet_kill(&ubc->sent_tasklet);
- tasklet_kill(&ubc->rcvd_tasklet);
- kfree(ubc->isooutbuf);
- kfree(ubc);
- bcs->hw.bas = NULL;
-}
-
-/* Initialize hardware dependent part of the B channel structure
- * parameter:
- * bcs B channel structure
- * return value:
- * 0 on success, error code < 0 on failure
- */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
- int i;
- struct bas_bc_state *ubc;
-
- bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
- if (!ubc) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
-
- ubc->running = 0;
- atomic_set(&ubc->corrbytes, 0);
- spin_lock_init(&ubc->isooutlock);
- for (i = 0; i < BAS_OUTURBS; ++i) {
- ubc->isoouturbs[i].urb = NULL;
- ubc->isoouturbs[i].bcs = bcs;
- }
- ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
- ubc->numsub = 0;
- ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
- if (!ubc->isooutbuf) {
- pr_err("out of memory\n");
- kfree(ubc);
- bcs->hw.bas = NULL;
- return -ENOMEM;
- }
- tasklet_init(&ubc->sent_tasklet,
- write_iso_tasklet, (unsigned long) bcs);
-
- spin_lock_init(&ubc->isoinlock);
- for (i = 0; i < BAS_INURBS; ++i)
- ubc->isoinurbs[i] = NULL;
- ubc->isoindone = NULL;
- ubc->loststatus = -EINPROGRESS;
- ubc->isoinlost = 0;
- ubc->seqlen = 0;
- ubc->inbyte = 0;
- ubc->inbits = 0;
- ubc->goodbytes = 0;
- ubc->alignerrs = 0;
- ubc->fcserrs = 0;
- ubc->frameerrs = 0;
- ubc->giants = 0;
- ubc->runts = 0;
- ubc->aborts = 0;
- ubc->shared0s = 0;
- ubc->stolen0s = 0;
- tasklet_init(&ubc->rcvd_tasklet,
- read_iso_tasklet, (unsigned long) bcs);
- return 0;
-}
-
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
- struct bas_bc_state *ubc = bcs->hw.bas;
-
- bcs->hw.bas->running = 0;
- atomic_set(&bcs->hw.bas->corrbytes, 0);
- bcs->hw.bas->numsub = 0;
- spin_lock_init(&ubc->isooutlock);
- spin_lock_init(&ubc->isoinlock);
- ubc->loststatus = -EINPROGRESS;
-}
-
-static void gigaset_freecshw(struct cardstate *cs)
-{
- /* timers, URBs and rcvbuf are disposed of in disconnect */
- kfree(cs->hw.bas->int_in_buf);
- kfree(cs->hw.bas);
- cs->hw.bas = NULL;
-}
-
-/* Initialize hardware dependent part of the cardstate structure
- * parameter:
- * cs cardstate structure
- * return value:
- * 0 on success, error code < 0 on failure
- */
-static int gigaset_initcshw(struct cardstate *cs)
-{
- struct bas_cardstate *ucs;
-
- cs->hw.bas = ucs = kzalloc(sizeof(*ucs), GFP_KERNEL);
- if (!ucs) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
- ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
- if (!ucs->int_in_buf) {
- kfree(ucs);
- pr_err("out of memory\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&ucs->lock);
- ucs->cs = cs;
- timer_setup(&ucs->timer_ctrl, req_timeout, 0);
- timer_setup(&ucs->timer_atrdy, atrdy_timeout, 0);
- timer_setup(&ucs->timer_cmd_in, cmd_in_timeout, 0);
- timer_setup(&ucs->timer_int_in, int_in_resubmit, 0);
- init_waitqueue_head(&ucs->waitqueue);
- INIT_WORK(&ucs->int_in_wq, int_in_work);
-
- return 0;
-}
-
-/* freeurbs
- * unlink and deallocate all URBs unconditionally
- * caller must make sure that no commands are still in progress
- * parameter:
- * cs controller state structure
- */
-static void freeurbs(struct cardstate *cs)
-{
- struct bas_cardstate *ucs = cs->hw.bas;
- struct bas_bc_state *ubc;
- int i, j;
-
- gig_dbg(DEBUG_INIT, "%s: killing URBs", __func__);
- for (j = 0; j < BAS_CHANNELS; ++j) {
- ubc = cs->bcs[j].hw.bas;
- for (i = 0; i < BAS_OUTURBS; ++i) {
- usb_kill_urb(ubc->isoouturbs[i].urb);
- usb_free_urb(ubc->isoouturbs[i].urb);
- ubc->isoouturbs[i].urb = NULL;
- }
- for (i = 0; i < BAS_INURBS; ++i) {
- usb_kill_urb(ubc->isoinurbs[i]);
- usb_free_urb(ubc->isoinurbs[i]);
- ubc->isoinurbs[i] = NULL;
- }
- }
- usb_kill_urb(ucs->urb_int_in);
- usb_free_urb(ucs->urb_int_in);
- ucs->urb_int_in = NULL;
- usb_kill_urb(ucs->urb_cmd_out);
- usb_free_urb(ucs->urb_cmd_out);
- ucs->urb_cmd_out = NULL;
- usb_kill_urb(ucs->urb_cmd_in);
- usb_free_urb(ucs->urb_cmd_in);
- ucs->urb_cmd_in = NULL;
- usb_kill_urb(ucs->urb_ctrl);
- usb_free_urb(ucs->urb_ctrl);
- ucs->urb_ctrl = NULL;
-}
-
-/* gigaset_probe
- * This function is called when a new USB device is connected.
- * It checks whether the new device is handled by this driver.
- */
-static int gigaset_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_host_interface *hostif;
- struct usb_device *udev = interface_to_usbdev(interface);
- struct cardstate *cs = NULL;
- struct bas_cardstate *ucs = NULL;
- struct bas_bc_state *ubc;
- struct usb_endpoint_descriptor *endpoint;
- int i, j;
- int rc;
-
- gig_dbg(DEBUG_INIT,
- "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
- __func__, le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
-
- /* set required alternate setting */
- hostif = interface->cur_altsetting;
- if (hostif->desc.bAlternateSetting != 3) {
- gig_dbg(DEBUG_INIT,
- "%s: wrong alternate setting %d - trying to switch",
- __func__, hostif->desc.bAlternateSetting);
- if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
- < 0) {
- dev_warn(&udev->dev, "usb_set_interface failed, "
- "device %d interface %d altsetting %d\n",
- udev->devnum, hostif->desc.bInterfaceNumber,
- hostif->desc.bAlternateSetting);
- return -ENODEV;
- }
- hostif = interface->cur_altsetting;
- }
-
- /* Reject application specific interfaces
- */
- if (hostif->desc.bInterfaceClass != 255) {
- dev_warn(&udev->dev, "%s: bInterfaceClass == %d\n",
- __func__, hostif->desc.bInterfaceClass);
- return -ENODEV;
- }
-
- if (hostif->desc.bNumEndpoints < 1)
- return -ENODEV;
-
- dev_info(&udev->dev,
- "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
- __func__, le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
-
- /* allocate memory for our device state and initialize it */
- cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
- GIGASET_MODULENAME);
- if (!cs)
- return -ENODEV;
- ucs = cs->hw.bas;
-
- /* save off device structure ptrs for later use */
- usb_get_dev(udev);
- ucs->udev = udev;
- ucs->interface = interface;
- cs->dev = &interface->dev;
-
- /* allocate URBs:
- * - one for the interrupt pipe
- * - three for the different uses of the default control pipe
- * - three for each isochronous pipe
- */
- if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) ||
- !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) ||
- !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) ||
- !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
- goto allocerr;
-
- for (j = 0; j < BAS_CHANNELS; ++j) {
- ubc = cs->bcs[j].hw.bas;
- for (i = 0; i < BAS_OUTURBS; ++i)
- if (!(ubc->isoouturbs[i].urb =
- usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
- goto allocerr;
- for (i = 0; i < BAS_INURBS; ++i)
- if (!(ubc->isoinurbs[i] =
- usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
- goto allocerr;
- }
-
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
-
- /* Fill the interrupt urb and send it to the core */
- endpoint = &hostif->endpoint[0].desc;
- usb_fill_int_urb(ucs->urb_int_in, udev,
- usb_rcvintpipe(udev,
- usb_endpoint_num(endpoint)),
- ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
- endpoint->bInterval);
- rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
- if (rc != 0) {
- dev_err(cs->dev, "could not submit interrupt URB: %s\n",
- get_usb_rcmsg(rc));
- goto error;
- }
- ucs->retry_int_in = 0;
-
- /* tell the device that the driver is ready */
- rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
- if (rc != 0)
- goto error;
-
- /* tell common part that the device is ready */
- if (startmode == SM_LOCKED)
- cs->mstate = MS_LOCKED;
-
- /* save address of controller structure */
- usb_set_intfdata(interface, cs);
-
- rc = gigaset_start(cs);
- if (rc < 0)
- goto error;
-
- return 0;
-
-allocerr:
- dev_err(cs->dev, "could not allocate URBs\n");
- rc = -ENOMEM;
-error:
- freeurbs(cs);
- usb_set_intfdata(interface, NULL);
- usb_put_dev(udev);
- gigaset_freecs(cs);
- return rc;
-}
-
-/* gigaset_disconnect
- * This function is called when the Gigaset base is unplugged.
- */
-static void gigaset_disconnect(struct usb_interface *interface)
-{
- struct cardstate *cs;
- struct bas_cardstate *ucs;
- int j;
-
- cs = usb_get_intfdata(interface);
-
- ucs = cs->hw.bas;
-
- dev_info(cs->dev, "disconnecting Gigaset base\n");
-
- /* mark base as not ready, all channels disconnected */
- ucs->basstate = 0;
-
- /* tell LL all channels are down */
- for (j = 0; j < BAS_CHANNELS; ++j)
- gigaset_bchannel_down(cs->bcs + j);
-
- /* stop driver (common part) */
- gigaset_stop(cs);
-
- /* stop delayed work and URBs, free ressources */
- del_timer_sync(&ucs->timer_ctrl);
- del_timer_sync(&ucs->timer_atrdy);
- del_timer_sync(&ucs->timer_cmd_in);
- del_timer_sync(&ucs->timer_int_in);
- cancel_work_sync(&ucs->int_in_wq);
- freeurbs(cs);
- usb_set_intfdata(interface, NULL);
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- usb_put_dev(ucs->udev);
- ucs->interface = NULL;
- ucs->udev = NULL;
- cs->dev = NULL;
- gigaset_freecs(cs);
-}
-
-/* gigaset_suspend
- * This function is called before the USB connection is suspended
- * or before the USB device is reset.
- * In the latter case, message == PMSG_ON.
- */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct cardstate *cs = usb_get_intfdata(intf);
- struct bas_cardstate *ucs = cs->hw.bas;
- int rc;
-
- /* set suspend flag; this stops AT command/response traffic */
- if (update_basstate(ucs, BS_SUSPEND, 0) & BS_SUSPEND) {
- gig_dbg(DEBUG_SUSPEND, "already suspended");
- return 0;
- }
-
- /* wait a bit for blocking conditions to go away */
- rc = wait_event_timeout(ucs->waitqueue,
- !(ucs->basstate &
- (BS_B1OPEN | BS_B2OPEN | BS_ATRDPEND | BS_ATWRPEND)),
- BAS_TIMEOUT * HZ / 10);
- gig_dbg(DEBUG_SUSPEND, "wait_event_timeout() -> %d", rc);
-
- /* check for conditions preventing suspend */
- if (ucs->basstate & (BS_B1OPEN | BS_B2OPEN | BS_ATRDPEND | BS_ATWRPEND)) {
- dev_warn(cs->dev, "cannot suspend:\n");
- if (ucs->basstate & BS_B1OPEN)
- dev_warn(cs->dev, " B channel 1 open\n");
- if (ucs->basstate & BS_B2OPEN)
- dev_warn(cs->dev, " B channel 2 open\n");
- if (ucs->basstate & BS_ATRDPEND)
- dev_warn(cs->dev, " receiving AT reply\n");
- if (ucs->basstate & BS_ATWRPEND)
- dev_warn(cs->dev, " sending AT command\n");
- update_basstate(ucs, 0, BS_SUSPEND);
- return -EBUSY;
- }
-
- /* close AT channel if open */
- if (ucs->basstate & BS_ATOPEN) {
- gig_dbg(DEBUG_SUSPEND, "closing AT channel");
- rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, 0);
- if (rc) {
- update_basstate(ucs, 0, BS_SUSPEND);
- return rc;
- }
- wait_event_timeout(ucs->waitqueue, !ucs->pending,
- BAS_TIMEOUT * HZ / 10);
- /* in case of timeout, proceed anyway */
- }
-
- /* kill all URBs and delayed work that might still be pending */
- usb_kill_urb(ucs->urb_ctrl);
- usb_kill_urb(ucs->urb_int_in);
- del_timer_sync(&ucs->timer_ctrl);
- del_timer_sync(&ucs->timer_atrdy);
- del_timer_sync(&ucs->timer_cmd_in);
- del_timer_sync(&ucs->timer_int_in);
-
- /* don't try to cancel int_in_wq from within reset as it
- * might be the one requesting the reset
- */
- if (message.event != PM_EVENT_ON)
- cancel_work_sync(&ucs->int_in_wq);
-
- gig_dbg(DEBUG_SUSPEND, "suspend complete");
- return 0;
-}
-
-/* gigaset_resume
- * This function is called after the USB connection has been resumed.
- */
-static int gigaset_resume(struct usb_interface *intf)
-{
- struct cardstate *cs = usb_get_intfdata(intf);
- struct bas_cardstate *ucs = cs->hw.bas;
- int rc;
-
- /* resubmit interrupt URB for spontaneous messages from base */
- rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
- if (rc) {
- dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
- get_usb_rcmsg(rc));
- return rc;
- }
- ucs->retry_int_in = 0;
-
- /* clear suspend flag to reallow activity */
- update_basstate(ucs, 0, BS_SUSPEND);
-
- gig_dbg(DEBUG_SUSPEND, "resume complete");
- return 0;
-}
-
-/* gigaset_pre_reset
- * This function is called before the USB connection is reset.
- */
-static int gigaset_pre_reset(struct usb_interface *intf)
-{
- /* handle just like suspend */
- return gigaset_suspend(intf, PMSG_ON);
-}
-
-/* gigaset_post_reset
- * This function is called after the USB connection has been reset.
- */
-static int gigaset_post_reset(struct usb_interface *intf)
-{
- /* FIXME: send HD_DEVICE_INIT_ACK? */
-
- /* resume operations */
- return gigaset_resume(intf);
-}
-
-
-static const struct gigaset_ops gigops = {
- .write_cmd = gigaset_write_cmd,
- .write_room = gigaset_write_room,
- .chars_in_buffer = gigaset_chars_in_buffer,
- .brkchars = gigaset_brkchars,
- .init_bchannel = gigaset_init_bchannel,
- .close_bchannel = gigaset_close_bchannel,
- .initbcshw = gigaset_initbcshw,
- .freebcshw = gigaset_freebcshw,
- .reinitbcshw = gigaset_reinitbcshw,
- .initcshw = gigaset_initcshw,
- .freecshw = gigaset_freecshw,
- .set_modem_ctrl = gigaset_set_modem_ctrl,
- .baud_rate = gigaset_baud_rate,
- .set_line_ctrl = gigaset_set_line_ctrl,
- .send_skb = gigaset_isoc_send_skb,
- .handle_input = gigaset_isoc_input,
-};
-
-/* bas_gigaset_init
- * This function is called after the kernel module is loaded.
- */
-static int __init bas_gigaset_init(void)
-{
- int result;
-
- /* allocate memory for our driver state and initialize it */
- driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
- GIGASET_MODULENAME, GIGASET_DEVNAME,
- &gigops, THIS_MODULE);
- if (driver == NULL)
- goto error;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&gigaset_usb_driver);
- if (result < 0) {
- pr_err("error %d registering USB driver\n", -result);
- goto error;
- }
-
- pr_info(DRIVER_DESC "\n");
- return 0;
-
-error:
- if (driver)
- gigaset_freedriver(driver);
- driver = NULL;
- return -1;
-}
-
-/* bas_gigaset_exit
- * This function is called before the kernel module is unloaded.
- */
-static void __exit bas_gigaset_exit(void)
-{
- struct bas_cardstate *ucs;
- int i;
-
- gigaset_blockdriver(driver); /* => probe will fail
- * => no gigaset_start any more
- */
-
- /* stop all connected devices */
- for (i = 0; i < driver->minors; i++) {
- if (gigaset_shutdown(driver->cs + i) < 0)
- continue; /* no device */
- /* from now on, no isdn callback should be possible */
-
- /* close all still open channels */
- ucs = driver->cs[i].hw.bas;
- if (ucs->basstate & BS_B1OPEN) {
- gig_dbg(DEBUG_INIT, "closing B1 channel");
- usb_control_msg(ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_B1CHANNEL, OUT_VENDOR_REQ,
- 0, 0, NULL, 0, BAS_TIMEOUT);
- }
- if (ucs->basstate & BS_B2OPEN) {
- gig_dbg(DEBUG_INIT, "closing B2 channel");
- usb_control_msg(ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_B2CHANNEL, OUT_VENDOR_REQ,
- 0, 0, NULL, 0, BAS_TIMEOUT);
- }
- if (ucs->basstate & BS_ATOPEN) {
- gig_dbg(DEBUG_INIT, "closing AT channel");
- usb_control_msg(ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_ATCHANNEL, OUT_VENDOR_REQ,
- 0, 0, NULL, 0, BAS_TIMEOUT);
- }
- ucs->basstate = 0;
- }
-
- /* deregister this driver with the USB subsystem */
- usb_deregister(&gigaset_usb_driver);
- /* this will call the disconnect-callback */
- /* from now on, no disconnect/probe callback should be running */
-
- gigaset_freedriver(driver);
- driver = NULL;
-}
-
-
-module_init(bas_gigaset_init);
-module_exit(bas_gigaset_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/gigaset/capi.c b/drivers/staging/isdn/gigaset/capi.c
deleted file mode 100644
index 83d7dd48c61d..000000000000
--- a/drivers/staging/isdn/gigaset/capi.c
+++ /dev/null
@@ -1,2517 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Kernel CAPI interface for the Gigaset driver
- *
- * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/ratelimit.h>
-#include <linux/isdn/capilli.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/export.h>
-
-/* missing from kernelcapi.h */
-#define CapiNcpiNotSupportedByProtocol 0x0001
-#define CapiFlagsNotSupportedByProtocol 0x0002
-#define CapiAlertAlreadySent 0x0003
-#define CapiFacilitySpecificFunctionNotSupported 0x3011
-
-/* missing from capicmd.h */
-#define CAPI_CONNECT_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 8 * 1)
-#define CAPI_CONNECT_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 3 * 1)
-#define CAPI_CONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 1)
-#define CAPI_CONNECT_B3_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 1)
-#define CAPI_DATA_B3_REQ_LEN64 (CAPI_MSG_BASELEN + 4 + 4 + 2 + 2 + 2 + 8)
-#define CAPI_DATA_B3_CONF_LEN (CAPI_MSG_BASELEN + 4 + 2 + 2)
-#define CAPI_DISCONNECT_IND_LEN (CAPI_MSG_BASELEN + 4 + 2)
-#define CAPI_DISCONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 1)
-#define CAPI_FACILITY_CONF_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 2 + 1)
-/* most _CONF messages contain only Controller/PLCI/NCCI and Info parameters */
-#define CAPI_STDCONF_LEN (CAPI_MSG_BASELEN + 4 + 2)
-
-#define CAPI_FACILITY_HANDSET 0x0000
-#define CAPI_FACILITY_DTMF 0x0001
-#define CAPI_FACILITY_V42BIS 0x0002
-#define CAPI_FACILITY_SUPPSVC 0x0003
-#define CAPI_FACILITY_WAKEUP 0x0004
-#define CAPI_FACILITY_LI 0x0005
-
-#define CAPI_SUPPSVC_GETSUPPORTED 0x0000
-#define CAPI_SUPPSVC_LISTEN 0x0001
-
-/* missing from capiutil.h */
-#define CAPIMSG_PLCI_PART(m) CAPIMSG_U8(m, 9)
-#define CAPIMSG_NCCI_PART(m) CAPIMSG_U16(m, 10)
-#define CAPIMSG_HANDLE_REQ(m) CAPIMSG_U16(m, 18) /* DATA_B3_REQ/_IND only! */
-#define CAPIMSG_FLAGS(m) CAPIMSG_U16(m, 20)
-#define CAPIMSG_SETCONTROLLER(m, contr) capimsg_setu8(m, 8, contr)
-#define CAPIMSG_SETPLCI_PART(m, plci) capimsg_setu8(m, 9, plci)
-#define CAPIMSG_SETNCCI_PART(m, ncci) capimsg_setu16(m, 10, ncci)
-#define CAPIMSG_SETFLAGS(m, flags) capimsg_setu16(m, 20, flags)
-
-/* parameters with differing location in DATA_B3_CONF/_RESP: */
-#define CAPIMSG_SETHANDLE_CONF(m, handle) capimsg_setu16(m, 12, handle)
-#define CAPIMSG_SETINFO_CONF(m, info) capimsg_setu16(m, 14, info)
-
-/* Flags (DATA_B3_REQ/_IND) */
-#define CAPI_FLAGS_DELIVERY_CONFIRMATION 0x04
-#define CAPI_FLAGS_RESERVED (~0x1f)
-
-/* buffer sizes */
-#define MAX_BC_OCTETS 11
-#define MAX_HLC_OCTETS 3
-#define MAX_NUMBER_DIGITS 20
-#define MAX_FMT_IE_LEN 20
-
-/* values for bcs->apconnstate */
-#define APCONN_NONE 0 /* inactive/listening */
-#define APCONN_SETUP 1 /* connecting */
-#define APCONN_ACTIVE 2 /* B channel up */
-
-/* registered application data structure */
-struct gigaset_capi_appl {
- struct list_head ctrlist;
- struct gigaset_capi_appl *bcnext;
- u16 id;
- struct capi_register_params rp;
- u16 nextMessageNumber;
- u32 listenInfoMask;
- u32 listenCIPmask;
-};
-
-/* CAPI specific controller data structure */
-struct gigaset_capi_ctr {
- struct capi_ctr ctr;
- struct list_head appls;
- struct sk_buff_head sendqueue;
- atomic_t sendqlen;
- /* two _cmsg structures possibly used concurrently: */
- _cmsg hcmsg; /* for message composition triggered from hardware */
- _cmsg acmsg; /* for dissection of messages sent from application */
- u8 bc_buf[MAX_BC_OCTETS + 1];
- u8 hlc_buf[MAX_HLC_OCTETS + 1];
- u8 cgpty_buf[MAX_NUMBER_DIGITS + 3];
- u8 cdpty_buf[MAX_NUMBER_DIGITS + 2];
-};
-
-/* CIP Value table (from CAPI 2.0 standard, ch. 6.1) */
-static struct {
- u8 *bc;
- u8 *hlc;
-} cip2bchlc[] = {
- [1] = { "8090A3", NULL }, /* Speech (A-law) */
- [2] = { "8890", NULL }, /* Unrestricted digital information */
- [3] = { "8990", NULL }, /* Restricted digital information */
- [4] = { "9090A3", NULL }, /* 3,1 kHz audio (A-law) */
- [5] = { "9190", NULL }, /* 7 kHz audio */
- [6] = { "9890", NULL }, /* Video */
- [7] = { "88C0C6E6", NULL }, /* Packet mode */
- [8] = { "8890218F", NULL }, /* 56 kbit/s rate adaptation */
- [9] = { "9190A5", NULL }, /* Unrestricted digital information
- * with tones/announcements */
- [16] = { "8090A3", "9181" }, /* Telephony */
- [17] = { "9090A3", "9184" }, /* Group 2/3 facsimile */
- [18] = { "8890", "91A1" }, /* Group 4 facsimile Class 1 */
- [19] = { "8890", "91A4" }, /* Teletex service basic and mixed mode
- * and Group 4 facsimile service
- * Classes II and III */
- [20] = { "8890", "91A8" }, /* Teletex service basic and
- * processable mode */
- [21] = { "8890", "91B1" }, /* Teletex service basic mode */
- [22] = { "8890", "91B2" }, /* International interworking for
- * Videotex */
- [23] = { "8890", "91B5" }, /* Telex */
- [24] = { "8890", "91B8" }, /* Message Handling Systems
- * in accordance with X.400 */
- [25] = { "8890", "91C1" }, /* OSI application
- * in accordance with X.200 */
- [26] = { "9190A5", "9181" }, /* 7 kHz telephony */
- [27] = { "9190A5", "916001" }, /* Video telephony, first connection */
- [28] = { "8890", "916002" }, /* Video telephony, second connection */
-};
-
-/*
- * helper functions
- * ================
- */
-
-/*
- * emit unsupported parameter warning
- */
-static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
- char *msgname, char *paramname)
-{
- if (param && *param)
- dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
- msgname, paramname);
-}
-
-/*
- * convert an IE from Gigaset hex string to ETSI binary representation
- * including length byte
- * return value: result length, -1 on error
- */
-static int encode_ie(char *in, u8 *out, int maxlen)
-{
- int l = 0;
- while (*in) {
- if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
- return -1;
- out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
- in += 2;
- }
- out[0] = l;
- return l;
-}
-
-/*
- * convert an IE from ETSI binary representation including length byte
- * to Gigaset hex string
- */
-static void decode_ie(u8 *in, char *out)
-{
- int i = *in;
- while (i-- > 0) {
- /* ToDo: conversion to upper case necessary? */
- *out++ = toupper(hex_asc_hi(*++in));
- *out++ = toupper(hex_asc_lo(*in));
- }
-}
-
-/*
- * retrieve application data structure for an application ID
- */
-static inline struct gigaset_capi_appl *
-get_appl(struct gigaset_capi_ctr *iif, u16 appl)
-{
- struct gigaset_capi_appl *ap;
-
- list_for_each_entry(ap, &iif->appls, ctrlist)
- if (ap->id == appl)
- return ap;
- return NULL;
-}
-
-/*
- * dump CAPI message to kernel messages for debugging
- */
-static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
-{
-#ifdef CONFIG_GIGASET_DEBUG
- /* dump at most 20 messages in 20 secs */
- static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
- _cdebbuf *cdb;
-
- if (!(gigaset_debuglevel & level))
- return;
- if (!___ratelimit(&msg_dump_ratelimit, tag))
- return;
-
- cdb = capi_cmsg2str(p);
- if (cdb) {
- gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf);
- cdebbuf_free(cdb);
- } else {
- gig_dbg(level, "%s: [%d] %s", tag, p->ApplId,
- capi_cmd2str(p->Command, p->Subcommand));
- }
-#endif
-}
-
-static inline void dump_rawmsg(enum debuglevel level, const char *tag,
- unsigned char *data)
-{
-#ifdef CONFIG_GIGASET_DEBUG
- char *dbgline;
- int i, l;
-
- if (!(gigaset_debuglevel & level))
- return;
-
- l = CAPIMSG_LEN(data);
- if (l < 12) {
- gig_dbg(level, "%s: ??? LEN=%04d", tag, l);
- return;
- }
- gig_dbg(level, "%s: 0x%02x:0x%02x: ID=%03d #0x%04x LEN=%04d NCCI=0x%x",
- tag, CAPIMSG_COMMAND(data), CAPIMSG_SUBCOMMAND(data),
- CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
- CAPIMSG_CONTROL(data));
- l -= 12;
- if (l <= 0)
- return;
- if (l > 64)
- l = 64; /* arbitrary limit */
- dbgline = kmalloc_array(3, l, GFP_ATOMIC);
- if (!dbgline)
- return;
- for (i = 0; i < l; i++) {
- dbgline[3 * i] = hex_asc_hi(data[12 + i]);
- dbgline[3 * i + 1] = hex_asc_lo(data[12 + i]);
- dbgline[3 * i + 2] = ' ';
- }
- dbgline[3 * l - 1] = '\0';
- gig_dbg(level, " %s", dbgline);
- kfree(dbgline);
- if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 &&
- (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ ||
- CAPIMSG_SUBCOMMAND(data) == CAPI_IND)) {
- l = CAPIMSG_DATALEN(data);
- gig_dbg(level, " DataLength=%d", l);
- if (l <= 0 || !(gigaset_debuglevel & DEBUG_LLDATA))
- return;
- if (l > 64)
- l = 64; /* arbitrary limit */
- dbgline = kmalloc_array(3, l, GFP_ATOMIC);
- if (!dbgline)
- return;
- data += CAPIMSG_LEN(data);
- for (i = 0; i < l; i++) {
- dbgline[3 * i] = hex_asc_hi(data[i]);
- dbgline[3 * i + 1] = hex_asc_lo(data[i]);
- dbgline[3 * i + 2] = ' ';
- }
- dbgline[3 * l - 1] = '\0';
- gig_dbg(level, " %s", dbgline);
- kfree(dbgline);
- }
-#endif
-}
-
-/*
- * format CAPI IE as string
- */
-
-#ifdef CONFIG_GIGASET_DEBUG
-static const char *format_ie(const char *ie)
-{
- static char result[3 * MAX_FMT_IE_LEN];
- int len, count;
- char *pout = result;
-
- if (!ie)
- return "NULL";
-
- count = len = ie[0];
- if (count > MAX_FMT_IE_LEN)
- count = MAX_FMT_IE_LEN - 1;
- while (count--) {
- *pout++ = hex_asc_hi(*++ie);
- *pout++ = hex_asc_lo(*ie);
- *pout++ = ' ';
- }
- if (len > MAX_FMT_IE_LEN) {
- *pout++ = '.';
- *pout++ = '.';
- *pout++ = '.';
- }
- *--pout = 0;
- return result;
-}
-#endif
-
-/*
- * emit DATA_B3_CONF message
- */
-static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr,
- u16 appl, u16 msgid, int channel,
- u16 handle, u16 info)
-{
- struct sk_buff *cskb;
- u8 *msg;
-
- cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
- if (!cskb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- /* frequent message, avoid _cmsg overhead */
- msg = __skb_put(cskb, CAPI_DATA_B3_CONF_LEN);
- CAPIMSG_SETLEN(msg, CAPI_DATA_B3_CONF_LEN);
- CAPIMSG_SETAPPID(msg, appl);
- CAPIMSG_SETCOMMAND(msg, CAPI_DATA_B3);
- CAPIMSG_SETSUBCOMMAND(msg, CAPI_CONF);
- CAPIMSG_SETMSGID(msg, msgid);
- CAPIMSG_SETCONTROLLER(msg, ctr->cnr);
- CAPIMSG_SETPLCI_PART(msg, channel);
- CAPIMSG_SETNCCI_PART(msg, 1);
- CAPIMSG_SETHANDLE_CONF(msg, handle);
- CAPIMSG_SETINFO_CONF(msg, info);
-
- /* emit message */
- dump_rawmsg(DEBUG_MCMD, __func__, msg);
- capi_ctr_handle_message(ctr, appl, cskb);
-}
-
-
-/*
- * driver interface functions
- * ==========================
- */
-
-/**
- * gigaset_skb_sent() - acknowledge transmission of outgoing skb
- * @bcs: B channel descriptor structure.
- * @skb: sent data.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when the data in a
- * skb has been successfully sent, for signalling completion to the LL.
- */
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap = bcs->ap;
- unsigned char *req = skb_mac_header(dskb);
- u16 flags;
-
- /* update statistics */
- ++bcs->trans_up;
-
- if (!ap) {
- gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
- return;
- }
-
- /* don't send further B3 messages if disconnected */
- if (bcs->apconnstate < APCONN_ACTIVE) {
- gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
- return;
- }
-
- /*
- * send DATA_B3_CONF if "delivery confirmation" bit was set in request;
- * otherwise it has already been sent by do_data_b3_req()
- */
- flags = CAPIMSG_FLAGS(req);
- if (flags & CAPI_FLAGS_DELIVERY_CONFIRMATION)
- send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req),
- bcs->channel + 1, CAPIMSG_HANDLE_REQ(req),
- (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) ?
- CapiFlagsNotSupportedByProtocol :
- CAPI_NOERROR);
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_sent);
-
-/**
- * gigaset_skb_rcvd() - pass received skb to LL
- * @bcs: B channel descriptor structure.
- * @skb: received data.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when user data has
- * been successfully received, for passing to the LL.
- * Warning: skb must not be accessed anymore!
- */
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap = bcs->ap;
- int len = skb->len;
-
- /* update statistics */
- bcs->trans_down++;
-
- if (!ap) {
- gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* don't send further B3 messages if disconnected */
- if (bcs->apconnstate < APCONN_ACTIVE) {
- gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /*
- * prepend DATA_B3_IND message to payload
- * Parameters: NCCI = 1, all others 0/unused
- * frequent message, avoid _cmsg overhead
- */
- skb_push(skb, CAPI_DATA_B3_REQ_LEN);
- CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN);
- CAPIMSG_SETAPPID(skb->data, ap->id);
- CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3);
- CAPIMSG_SETSUBCOMMAND(skb->data, CAPI_IND);
- CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++);
- CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr);
- CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1);
- CAPIMSG_SETNCCI_PART(skb->data, 1);
- /* Data parameter not used */
- CAPIMSG_SETDATALEN(skb->data, len);
- /* Data handle parameter not used */
- CAPIMSG_SETFLAGS(skb->data, 0);
- /* Data64 parameter not present */
-
- /* emit message */
- dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
-
-/**
- * gigaset_isdn_rcv_err() - signal receive error
- * @bcs: B channel descriptor structure.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when a receive error
- * has occurred, for signalling to the LL.
- */
-void gigaset_isdn_rcv_err(struct bc_state *bcs)
-{
- /* if currently ignoring packets, just count down */
- if (bcs->ignore) {
- bcs->ignore--;
- return;
- }
-
- /* update statistics */
- bcs->corrupted++;
-
- /* ToDo: signal error -> LL */
-}
-EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
-
-/**
- * gigaset_isdn_icall() - signal incoming call
- * @at_state: connection state structure.
- *
- * Called by main module at tasklet level to notify the LL that an incoming
- * call has been received. @at_state contains the parameters of the call.
- *
- * Return value: call disposition (ICALL_*)
- */
-int gigaset_isdn_icall(struct at_state_t *at_state)
-{
- struct cardstate *cs = at_state->cs;
- struct bc_state *bcs = at_state->bcs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap;
- u32 actCIPmask;
- struct sk_buff *skb;
- unsigned int msgsize;
- unsigned long flags;
- int i;
-
- /*
- * ToDo: signal calls without a free B channel, too
- * (requires a u8 handle for the at_state structure that can
- * be stored in the PLCI and used in the CONNECT_RESP message
- * handler to retrieve it)
- */
- if (!bcs)
- return ICALL_IGNORE;
-
- /* prepare CONNECT_IND message, using B channel number as PLCI */
- capi_cmsg_header(&iif->hcmsg, 0, CAPI_CONNECT, CAPI_IND, 0,
- iif->ctr.cnr | ((bcs->channel + 1) << 8));
-
- /* minimum size, all structs empty */
- msgsize = CAPI_CONNECT_IND_BASELEN;
-
- /* Bearer Capability (mandatory) */
- if (at_state->str_var[STR_ZBC]) {
- /* pass on BC from Gigaset */
- if (encode_ie(at_state->str_var[STR_ZBC], iif->bc_buf,
- MAX_BC_OCTETS) < 0) {
- dev_warn(cs->dev, "RING ignored - bad BC %s\n",
- at_state->str_var[STR_ZBC]);
- return ICALL_IGNORE;
- }
-
- /* look up corresponding CIP value */
- iif->hcmsg.CIPValue = 0; /* default if nothing found */
- for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
- if (cip2bchlc[i].bc != NULL &&
- cip2bchlc[i].hlc == NULL &&
- !strcmp(cip2bchlc[i].bc,
- at_state->str_var[STR_ZBC])) {
- iif->hcmsg.CIPValue = i;
- break;
- }
- } else {
- /* no BC (internal call): assume CIP 1 (speech, A-law) */
- iif->hcmsg.CIPValue = 1;
- encode_ie(cip2bchlc[1].bc, iif->bc_buf, MAX_BC_OCTETS);
- }
- iif->hcmsg.BC = iif->bc_buf;
- msgsize += iif->hcmsg.BC[0];
-
- /* High Layer Compatibility (optional) */
- if (at_state->str_var[STR_ZHLC]) {
- /* pass on HLC from Gigaset */
- if (encode_ie(at_state->str_var[STR_ZHLC], iif->hlc_buf,
- MAX_HLC_OCTETS) < 0) {
- dev_warn(cs->dev, "RING ignored - bad HLC %s\n",
- at_state->str_var[STR_ZHLC]);
- return ICALL_IGNORE;
- }
- iif->hcmsg.HLC = iif->hlc_buf;
- msgsize += iif->hcmsg.HLC[0];
-
- /* look up corresponding CIP value */
- /* keep BC based CIP value if none found */
- if (at_state->str_var[STR_ZBC])
- for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
- if (cip2bchlc[i].hlc != NULL &&
- !strcmp(cip2bchlc[i].hlc,
- at_state->str_var[STR_ZHLC]) &&
- !strcmp(cip2bchlc[i].bc,
- at_state->str_var[STR_ZBC])) {
- iif->hcmsg.CIPValue = i;
- break;
- }
- }
-
- /* Called Party Number (optional) */
- if (at_state->str_var[STR_ZCPN]) {
- i = strlen(at_state->str_var[STR_ZCPN]);
- if (i > MAX_NUMBER_DIGITS) {
- dev_warn(cs->dev, "RING ignored - bad number %s\n",
- at_state->str_var[STR_ZBC]);
- return ICALL_IGNORE;
- }
- iif->cdpty_buf[0] = i + 1;
- iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */
- memcpy(iif->cdpty_buf + 2, at_state->str_var[STR_ZCPN], i);
- iif->hcmsg.CalledPartyNumber = iif->cdpty_buf;
- msgsize += iif->hcmsg.CalledPartyNumber[0];
- }
-
- /* Calling Party Number (optional) */
- if (at_state->str_var[STR_NMBR]) {
- i = strlen(at_state->str_var[STR_NMBR]);
- if (i > MAX_NUMBER_DIGITS) {
- dev_warn(cs->dev, "RING ignored - bad number %s\n",
- at_state->str_var[STR_ZBC]);
- return ICALL_IGNORE;
- }
- iif->cgpty_buf[0] = i + 2;
- iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */
- iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */
- memcpy(iif->cgpty_buf + 3, at_state->str_var[STR_NMBR], i);
- iif->hcmsg.CallingPartyNumber = iif->cgpty_buf;
- msgsize += iif->hcmsg.CallingPartyNumber[0];
- }
-
- /* remaining parameters (not supported, always left NULL):
- * - CalledPartySubaddress
- * - CallingPartySubaddress
- * - AdditionalInfo
- * - BChannelinformation
- * - Keypadfacility
- * - Useruserdata
- * - Facilitydataarray
- */
-
- gig_dbg(DEBUG_CMD, "icall: PLCI %x CIP %d BC %s",
- iif->hcmsg.adr.adrPLCI, iif->hcmsg.CIPValue,
- format_ie(iif->hcmsg.BC));
- gig_dbg(DEBUG_CMD, "icall: HLC %s",
- format_ie(iif->hcmsg.HLC));
- gig_dbg(DEBUG_CMD, "icall: CgPty %s",
- format_ie(iif->hcmsg.CallingPartyNumber));
- gig_dbg(DEBUG_CMD, "icall: CdPty %s",
- format_ie(iif->hcmsg.CalledPartyNumber));
-
- /* scan application list for matching listeners */
- spin_lock_irqsave(&bcs->aplock, flags);
- if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) {
- dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
- __func__, bcs->ap, bcs->apconnstate);
- bcs->ap = NULL;
- bcs->apconnstate = APCONN_NONE;
- }
- spin_unlock_irqrestore(&bcs->aplock, flags);
- actCIPmask = 1 | (1 << iif->hcmsg.CIPValue);
- list_for_each_entry(ap, &iif->appls, ctrlist)
- if (actCIPmask & ap->listenCIPmask) {
- /* build CONNECT_IND message for this application */
- iif->hcmsg.ApplId = ap->id;
- iif->hcmsg.Messagenumber = ap->nextMessageNumber++;
-
- skb = alloc_skb(msgsize, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n",
- __func__);
- break;
- }
- if (capi_cmsg2message(&iif->hcmsg,
- __skb_put(skb, msgsize))) {
- dev_err(cs->dev, "%s: message parser failure\n",
- __func__);
- dev_kfree_skb_any(skb);
- break;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
-
- /* add to listeners on this B channel, update state */
- spin_lock_irqsave(&bcs->aplock, flags);
- ap->bcnext = bcs->ap;
- bcs->ap = ap;
- bcs->chstate |= CHS_NOTIFY_LL;
- bcs->apconnstate = APCONN_SETUP;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- /* emit message */
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
- }
-
- /*
- * Return "accept" if any listeners.
- * Gigaset will send ALERTING.
- * There doesn't seem to be a way to avoid this.
- */
- return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE;
-}
-
-/*
- * send a DISCONNECT_IND message to an application
- * does not sleep, clobbers the controller's hcmsg structure
- */
-static void send_disconnect_ind(struct bc_state *bcs,
- struct gigaset_capi_appl *ap, u16 reason)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct sk_buff *skb;
-
- if (bcs->apconnstate == APCONN_NONE)
- return;
-
- capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND,
- ap->nextMessageNumber++,
- iif->ctr.cnr | ((bcs->channel + 1) << 8));
- iif->hcmsg.Reason = reason;
- skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(&iif->hcmsg,
- __skb_put(skb, CAPI_DISCONNECT_IND_LEN))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * send a DISCONNECT_B3_IND message to an application
- * Parameters: NCCI = 1, NCPI empty, Reason_B3 = 0
- * does not sleep, clobbers the controller's hcmsg structure
- */
-static void send_disconnect_b3_ind(struct bc_state *bcs,
- struct gigaset_capi_appl *ap)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct sk_buff *skb;
-
- /* nothing to do if no logical connection active */
- if (bcs->apconnstate < APCONN_ACTIVE)
- return;
- bcs->apconnstate = APCONN_SETUP;
-
- capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
- ap->nextMessageNumber++,
- iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
- skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(&iif->hcmsg,
- __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_connD() - signal D channel connect
- * @bcs: B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the D channel
- * connection has been established.
- */
-void gigaset_isdn_connD(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap;
- struct sk_buff *skb;
- unsigned int msgsize;
- unsigned long flags;
-
- spin_lock_irqsave(&bcs->aplock, flags);
- ap = bcs->ap;
- if (!ap) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
- return;
- }
- if (bcs->apconnstate == APCONN_NONE) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- dev_warn(cs->dev, "%s: application %u not connected\n",
- __func__, ap->id);
- return;
- }
- spin_unlock_irqrestore(&bcs->aplock, flags);
- while (ap->bcnext) {
- /* this should never happen */
- dev_warn(cs->dev, "%s: dropping extra application %u\n",
- __func__, ap->bcnext->id);
- send_disconnect_ind(bcs, ap->bcnext,
- CapiCallGivenToOtherApplication);
- ap->bcnext = ap->bcnext->bcnext;
- }
-
- /* prepare CONNECT_ACTIVE_IND message
- * Note: LLC not supported by device
- */
- capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND,
- ap->nextMessageNumber++,
- iif->ctr.cnr | ((bcs->channel + 1) << 8));
-
- /* minimum size, all structs empty */
- msgsize = CAPI_CONNECT_ACTIVE_IND_BASELEN;
-
- /* ToDo: set parameter: Connected number
- * (requires ev-layer state machine extension to collect
- * ZCON device reply)
- */
-
- /* build and emit CONNECT_ACTIVE_IND message */
- skb = alloc_skb(msgsize, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_hupD() - signal D channel hangup
- * @bcs: B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the D channel
- * connection has been shut down.
- */
-void gigaset_isdn_hupD(struct bc_state *bcs)
-{
- struct gigaset_capi_appl *ap;
- unsigned long flags;
-
- /*
- * ToDo: pass on reason code reported by device
- * (requires ev-layer state machine extension to collect
- * ZCAU device reply)
- */
- spin_lock_irqsave(&bcs->aplock, flags);
- while (bcs->ap != NULL) {
- ap = bcs->ap;
- bcs->ap = ap->bcnext;
- spin_unlock_irqrestore(&bcs->aplock, flags);
- send_disconnect_b3_ind(bcs, ap);
- send_disconnect_ind(bcs, ap, 0);
- spin_lock_irqsave(&bcs->aplock, flags);
- }
- bcs->apconnstate = APCONN_NONE;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-}
-
-/**
- * gigaset_isdn_connB() - signal B channel connect
- * @bcs: B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the B channel
- * connection has been established.
- */
-void gigaset_isdn_connB(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap;
- struct sk_buff *skb;
- unsigned long flags;
- unsigned int msgsize;
- u8 command;
-
- spin_lock_irqsave(&bcs->aplock, flags);
- ap = bcs->ap;
- if (!ap) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
- return;
- }
- if (!bcs->apconnstate) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- dev_warn(cs->dev, "%s: application %u not connected\n",
- __func__, ap->id);
- return;
- }
-
- /*
- * emit CONNECT_B3_ACTIVE_IND if we already got CONNECT_B3_REQ;
- * otherwise we have to emit CONNECT_B3_IND first, and follow up with
- * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP
- * Parameters in both cases always: NCCI = 1, NCPI empty
- */
- if (bcs->apconnstate >= APCONN_ACTIVE) {
- command = CAPI_CONNECT_B3_ACTIVE;
- msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
- } else {
- command = CAPI_CONNECT_B3;
- msgsize = CAPI_CONNECT_B3_IND_BASELEN;
- }
- bcs->apconnstate = APCONN_ACTIVE;
-
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- while (ap->bcnext) {
- /* this should never happen */
- dev_warn(cs->dev, "%s: dropping extra application %u\n",
- __func__, ap->bcnext->id);
- send_disconnect_ind(bcs, ap->bcnext,
- CapiCallGivenToOtherApplication);
- ap->bcnext = ap->bcnext->bcnext;
- }
-
- capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND,
- ap->nextMessageNumber++,
- iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
- skb = alloc_skb(msgsize, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_hupB() - signal B channel hangup
- * @bcs: B channel descriptor structure.
- *
- * Called by main module to notify the LL that the B channel connection has
- * been shut down.
- */
-void gigaset_isdn_hupB(struct bc_state *bcs)
-{
- struct gigaset_capi_appl *ap = bcs->ap;
-
- /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
-
- if (!ap) {
- gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
- return;
- }
-
- send_disconnect_b3_ind(bcs, ap);
-}
-
-/**
- * gigaset_isdn_start() - signal device availability
- * @cs: device descriptor structure.
- *
- * Called by main module to notify the LL that the device is available for
- * use.
- */
-void gigaset_isdn_start(struct cardstate *cs)
-{
- struct gigaset_capi_ctr *iif = cs->iif;
-
- /* fill profile data: manufacturer name */
- strcpy(iif->ctr.manu, "Siemens");
- /* CAPI and device version */
- iif->ctr.version.majorversion = 2; /* CAPI 2.0 */
- iif->ctr.version.minorversion = 0;
- /* ToDo: check/assert cs->gotfwver? */
- iif->ctr.version.majormanuversion = cs->fwver[0];
- iif->ctr.version.minormanuversion = cs->fwver[1];
- /* number of B channels supported */
- iif->ctr.profile.nbchannel = cs->channels;
- /* global options: internal controller, supplementary services */
- iif->ctr.profile.goptions = 0x11;
- /* B1 protocols: 64 kbit/s HDLC or transparent */
- iif->ctr.profile.support1 = 0x03;
- /* B2 protocols: transparent only */
- /* ToDo: X.75 SLP ? */
- iif->ctr.profile.support2 = 0x02;
- /* B3 protocols: transparent only */
- iif->ctr.profile.support3 = 0x01;
- /* no serial number */
- strcpy(iif->ctr.serial, "0");
- capi_ctr_ready(&iif->ctr);
-}
-
-/**
- * gigaset_isdn_stop() - signal device unavailability
- * @cs: device descriptor structure.
- *
- * Called by main module to notify the LL that the device is no longer
- * available for use.
- */
-void gigaset_isdn_stop(struct cardstate *cs)
-{
- struct gigaset_capi_ctr *iif = cs->iif;
- capi_ctr_down(&iif->ctr);
-}
-
-/*
- * kernel CAPI callback methods
- * ============================
- */
-
-/*
- * register CAPI application
- */
-static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
- capi_register_params *rp)
-{
- struct gigaset_capi_ctr *iif
- = container_of(ctr, struct gigaset_capi_ctr, ctr);
- struct cardstate *cs = ctr->driverdata;
- struct gigaset_capi_appl *ap;
-
- gig_dbg(DEBUG_CMD, "%s [%u] l3cnt=%u blkcnt=%u blklen=%u",
- __func__, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
-
- list_for_each_entry(ap, &iif->appls, ctrlist)
- if (ap->id == appl) {
- dev_notice(cs->dev,
- "application %u already registered\n", appl);
- return;
- }
-
- ap = kzalloc(sizeof(*ap), GFP_KERNEL);
- if (!ap) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- ap->id = appl;
- ap->rp = *rp;
-
- list_add(&ap->ctrlist, &iif->appls);
- dev_info(cs->dev, "application %u registered\n", ap->id);
-}
-
-/*
- * remove CAPI application from channel
- * helper function to keep indentation levels down and stay in 80 columns
- */
-
-static inline void remove_appl_from_channel(struct bc_state *bcs,
- struct gigaset_capi_appl *ap)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_appl *bcap;
- unsigned long flags;
- int prevconnstate;
-
- spin_lock_irqsave(&bcs->aplock, flags);
- bcap = bcs->ap;
- if (bcap == NULL) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
-
- /* check first application on channel */
- if (bcap == ap) {
- bcs->ap = ap->bcnext;
- if (bcs->ap != NULL) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
-
- /* none left, clear channel state */
- prevconnstate = bcs->apconnstate;
- bcs->apconnstate = APCONN_NONE;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- if (prevconnstate == APCONN_ACTIVE) {
- dev_notice(cs->dev, "%s: hanging up channel %u\n",
- __func__, bcs->channel);
- gigaset_add_event(cs, &bcs->at_state,
- EV_HUP, NULL, 0, NULL);
- gigaset_schedule_event(cs);
- }
- return;
- }
-
- /* check remaining list */
- do {
- if (bcap->bcnext == ap) {
- bcap->bcnext = bcap->bcnext->bcnext;
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
- bcap = bcap->bcnext;
- } while (bcap != NULL);
- spin_unlock_irqrestore(&bcs->aplock, flags);
-}
-
-/*
- * release CAPI application
- */
-static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl)
-{
- struct gigaset_capi_ctr *iif
- = container_of(ctr, struct gigaset_capi_ctr, ctr);
- struct cardstate *cs = iif->ctr.driverdata;
- struct gigaset_capi_appl *ap, *tmp;
- unsigned ch;
-
- gig_dbg(DEBUG_CMD, "%s [%u]", __func__, appl);
-
- list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist)
- if (ap->id == appl) {
- /* remove from any channels */
- for (ch = 0; ch < cs->channels; ch++)
- remove_appl_from_channel(&cs->bcs[ch], ap);
-
- /* remove from registration list */
- list_del(&ap->ctrlist);
- kfree(ap);
- dev_info(cs->dev, "application %u released\n", appl);
- }
-}
-
-/*
- * =====================================================================
- * outgoing CAPI message handler
- * =====================================================================
- */
-
-/*
- * helper function: emit reply message with given Info value
- */
-static void send_conf(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb,
- u16 info)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /*
- * _CONF replies always only have NCCI and Info parameters
- * so they'll fit into the _REQ message skb
- */
- capi_cmsg_answer(&iif->acmsg);
- iif->acmsg.Info = info;
- if (capi_cmsg2message(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- __skb_trim(skb, CAPI_STDCONF_LEN);
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * process FACILITY_REQ message
- */
-static void do_facility_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct sk_buff *cskb;
- u8 *pparam;
- unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
- u16 function, info;
- static u8 confparam[10]; /* max. 9 octets + length byte */
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /*
- * Facility Request Parameter is not decoded by capi_message2cmsg()
- * encoding depends on Facility Selector
- */
- switch (cmsg->FacilitySelector) {
- case CAPI_FACILITY_DTMF: /* ToDo */
- info = CapiFacilityNotSupported;
- confparam[0] = 2; /* length */
- /* DTMF information: Unknown DTMF request */
- capimsg_setu16(confparam, 1, 2);
- break;
-
- case CAPI_FACILITY_V42BIS: /* not supported */
- info = CapiFacilityNotSupported;
- confparam[0] = 2; /* length */
- /* V.42 bis information: not available */
- capimsg_setu16(confparam, 1, 1);
- break;
-
- case CAPI_FACILITY_SUPPSVC:
- /* decode Function parameter */
- pparam = cmsg->FacilityRequestParameter;
- if (pparam == NULL || pparam[0] < 2) {
- dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
- "Facility Request Parameter");
- send_conf(iif, ap, skb, CapiIllMessageParmCoding);
- return;
- }
- function = CAPIMSG_U16(pparam, 1);
- switch (function) {
- case CAPI_SUPPSVC_GETSUPPORTED:
- info = CapiSuccess;
- /* Supplementary Service specific parameter */
- confparam[3] = 6; /* length */
- /* Supplementary services info: Success */
- capimsg_setu16(confparam, 4, CapiSuccess);
- /* Supported Services: none */
- capimsg_setu32(confparam, 6, 0);
- break;
- case CAPI_SUPPSVC_LISTEN:
- if (pparam[0] < 7 || pparam[3] < 4) {
- dev_notice(cs->dev, "%s: %s missing\n",
- "FACILITY_REQ", "Notification Mask");
- send_conf(iif, ap, skb,
- CapiIllMessageParmCoding);
- return;
- }
- if (CAPIMSG_U32(pparam, 4) != 0) {
- dev_notice(cs->dev,
- "%s: unsupported supplementary service notification mask 0x%x\n",
- "FACILITY_REQ", CAPIMSG_U32(pparam, 4));
- info = CapiFacilitySpecificFunctionNotSupported;
- confparam[3] = 2; /* length */
- capimsg_setu16(confparam, 4,
- CapiSupplementaryServiceNotSupported);
- break;
- }
- info = CapiSuccess;
- confparam[3] = 2; /* length */
- capimsg_setu16(confparam, 4, CapiSuccess);
- break;
-
- /* ToDo: add supported services */
-
- default:
- dev_notice(cs->dev,
- "%s: unsupported supplementary service function 0x%04x\n",
- "FACILITY_REQ", function);
- info = CapiFacilitySpecificFunctionNotSupported;
- /* Supplementary Service specific parameter */
- confparam[3] = 2; /* length */
- /* Supplementary services info: not supported */
- capimsg_setu16(confparam, 4,
- CapiSupplementaryServiceNotSupported);
- }
-
- /* Facility confirmation parameter */
- confparam[0] = confparam[3] + 3; /* total length */
- /* Function: copy from _REQ message */
- capimsg_setu16(confparam, 1, function);
- /* Supplementary Service specific parameter already set above */
- break;
-
- case CAPI_FACILITY_WAKEUP: /* ToDo */
- info = CapiFacilityNotSupported;
- confparam[0] = 2; /* length */
- /* Number of accepted awake request parameters: 0 */
- capimsg_setu16(confparam, 1, 0);
- break;
-
- default:
- info = CapiFacilityNotSupported;
- confparam[0] = 0; /* empty struct */
- }
-
- /* send FACILITY_CONF with given Info and confirmation parameter */
- dev_kfree_skb_any(skb);
- capi_cmsg_answer(cmsg);
- cmsg->Info = info;
- cmsg->FacilityConfirmationParameter = confparam;
- msgsize += confparam[0]; /* length */
- cskb = alloc_skb(msgsize, GFP_ATOMIC);
- if (!cskb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(cmsg, __skb_put(cskb, msgsize))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(cskb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
-}
-
-
-/*
- * process LISTEN_REQ message
- * just store the masks in the application data structure
- */
-static void do_listen_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-
- /* store listening parameters */
- ap->listenInfoMask = iif->acmsg.InfoMask;
- ap->listenCIPmask = iif->acmsg.CIPmask;
- send_conf(iif, ap, skb, CapiSuccess);
-}
-
-/*
- * process ALERT_REQ message
- * nothing to do, Gigaset always alerts anyway
- */
-static void do_alert_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- send_conf(iif, ap, skb, CapiAlertAlreadySent);
-}
-
-/*
- * process CONNECT_REQ message
- * allocate a B channel, prepare dial commands, queue a DIAL event,
- * emit CONNECT_CONF reply
- */
-static void do_connect_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- char **commands;
- char *s;
- u8 *pp;
- unsigned long flags;
- int i, l, lbc, lhlc;
- u16 info;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* get free B channel & construct PLCI */
- bcs = gigaset_get_free_channel(cs);
- if (!bcs) {
- dev_notice(cs->dev, "%s: no B channel available\n",
- "CONNECT_REQ");
- send_conf(iif, ap, skb, CapiNoPlciAvailable);
- return;
- }
- spin_lock_irqsave(&bcs->aplock, flags);
- if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE)
- dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
- __func__, bcs->ap, bcs->apconnstate);
- ap->bcnext = NULL;
- bcs->ap = ap;
- bcs->apconnstate = APCONN_SETUP;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- bcs->rx_bufsize = ap->rp.datablklen;
- dev_kfree_skb(bcs->rx_skb);
- gigaset_new_rx_skb(bcs);
- cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
-
- /* build command table */
- commands = kcalloc(AT_NUM, sizeof(*commands), GFP_KERNEL);
- if (!commands)
- goto oom;
-
- /* encode parameter: Called party number */
- pp = cmsg->CalledPartyNumber;
- if (pp == NULL || *pp == 0) {
- dev_notice(cs->dev, "%s: %s missing\n",
- "CONNECT_REQ", "Called party number");
- info = CapiIllMessageParmCoding;
- goto error;
- }
- l = *pp++;
- /* check type of number/numbering plan byte */
- switch (*pp) {
- case 0x80: /* unknown type / unknown numbering plan */
- case 0x81: /* unknown type / ISDN/Telephony numbering plan */
- break;
- default: /* others: warn about potential misinterpretation */
- dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n",
- "CONNECT_REQ", "Called party number", *pp);
- }
- pp++;
- l--;
- /* translate "**" internal call prefix to CTP value */
- if (l >= 2 && pp[0] == '*' && pp[1] == '*') {
- s = "^SCTP=0\r";
- pp += 2;
- l -= 2;
- } else {
- s = "^SCTP=1\r";
- }
- commands[AT_TYPE] = kstrdup(s, GFP_KERNEL);
- if (!commands[AT_TYPE])
- goto oom;
- commands[AT_DIAL] = kmalloc(l + 3, GFP_KERNEL);
- if (!commands[AT_DIAL])
- goto oom;
- snprintf(commands[AT_DIAL], l + 3, "D%.*s\r", l, pp);
-
- /* encode parameter: Calling party number */
- pp = cmsg->CallingPartyNumber;
- if (pp != NULL && *pp > 0) {
- l = *pp++;
-
- /* check type of number/numbering plan byte */
- /* ToDo: allow for/handle Ext=1? */
- switch (*pp) {
- case 0x00: /* unknown type / unknown numbering plan */
- case 0x01: /* unknown type / ISDN/Telephony num. plan */
- break;
- default:
- dev_notice(cs->dev,
- "%s: %s type/plan 0x%02x unsupported\n",
- "CONNECT_REQ", "Calling party number", *pp);
- }
- pp++;
- l--;
-
- /* check presentation indicator */
- if (!l) {
- dev_notice(cs->dev, "%s: %s IE truncated\n",
- "CONNECT_REQ", "Calling party number");
- info = CapiIllMessageParmCoding;
- goto error;
- }
- switch (*pp & 0xfc) { /* ignore Screening indicator */
- case 0x80: /* Presentation allowed */
- s = "^SCLIP=1\r";
- break;
- case 0xa0: /* Presentation restricted */
- s = "^SCLIP=0\r";
- break;
- default:
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "CONNECT_REQ",
- "Presentation/Screening indicator",
- *pp);
- s = "^SCLIP=1\r";
- }
- commands[AT_CLIP] = kstrdup(s, GFP_KERNEL);
- if (!commands[AT_CLIP])
- goto oom;
- pp++;
- l--;
-
- if (l) {
- /* number */
- commands[AT_MSN] = kmalloc(l + 8, GFP_KERNEL);
- if (!commands[AT_MSN])
- goto oom;
- snprintf(commands[AT_MSN], l + 8, "^SMSN=%*s\r", l, pp);
- }
- }
-
- /* check parameter: CIP Value */
- if (cmsg->CIPValue >= ARRAY_SIZE(cip2bchlc) ||
- (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
- dev_notice(cs->dev, "%s: unknown CIP value %d\n",
- "CONNECT_REQ", cmsg->CIPValue);
- info = CapiCipValueUnknown;
- goto error;
- }
-
- /*
- * check/encode parameters: BC & HLC
- * must be encoded together as device doesn't accept HLC separately
- * explicit parameters override values derived from CIP
- */
-
- /* determine lengths */
- if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */
- lbc = 2 * cmsg->BC[0];
- else if (cip2bchlc[cmsg->CIPValue].bc) /* BC derived from CIP */
- lbc = strlen(cip2bchlc[cmsg->CIPValue].bc);
- else /* no BC */
- lbc = 0;
- if (cmsg->HLC && cmsg->HLC[0]) /* HLC specified explicitly */
- lhlc = 2 * cmsg->HLC[0];
- else if (cip2bchlc[cmsg->CIPValue].hlc) /* HLC derived from CIP */
- lhlc = strlen(cip2bchlc[cmsg->CIPValue].hlc);
- else /* no HLC */
- lhlc = 0;
-
- if (lbc) {
- /* have BC: allocate and assemble command string */
- l = lbc + 7; /* "^SBC=" + value + "\r" + null byte */
- if (lhlc)
- l += lhlc + 7; /* ";^SHLC=" + value */
- commands[AT_BC] = kmalloc(l, GFP_KERNEL);
- if (!commands[AT_BC])
- goto oom;
- strcpy(commands[AT_BC], "^SBC=");
- if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */
- decode_ie(cmsg->BC, commands[AT_BC] + 5);
- else /* BC derived from CIP */
- strcpy(commands[AT_BC] + 5,
- cip2bchlc[cmsg->CIPValue].bc);
- if (lhlc) {
- strcpy(commands[AT_BC] + lbc + 5, ";^SHLC=");
- if (cmsg->HLC && cmsg->HLC[0])
- /* HLC specified explicitly */
- decode_ie(cmsg->HLC,
- commands[AT_BC] + lbc + 12);
- else /* HLC derived from CIP */
- strcpy(commands[AT_BC] + lbc + 12,
- cip2bchlc[cmsg->CIPValue].hlc);
- }
- strcpy(commands[AT_BC] + l - 2, "\r");
- } else {
- /* no BC */
- if (lhlc) {
- dev_notice(cs->dev, "%s: cannot set HLC without BC\n",
- "CONNECT_REQ");
- info = CapiIllMessageParmCoding; /* ? */
- goto error;
- }
- }
-
- /* check/encode parameter: B Protocol */
- if (cmsg->BProtocol == CAPI_DEFAULT) {
- bcs->proto2 = L2_HDLC;
- dev_warn(cs->dev,
- "B2 Protocol X.75 SLP unsupported, using Transparent\n");
- } else {
- switch (cmsg->B1protocol) {
- case 0:
- bcs->proto2 = L2_HDLC;
- break;
- case 1:
- bcs->proto2 = L2_VOICE;
- break;
- default:
- dev_warn(cs->dev,
- "B1 Protocol %u unsupported, using Transparent\n",
- cmsg->B1protocol);
- bcs->proto2 = L2_VOICE;
- }
- if (cmsg->B2protocol != 1)
- dev_warn(cs->dev,
- "B2 Protocol %u unsupported, using Transparent\n",
- cmsg->B2protocol);
- if (cmsg->B3protocol != 0)
- dev_warn(cs->dev,
- "B3 Protocol %u unsupported, using Transparent\n",
- cmsg->B3protocol);
- ignore_cstruct_param(cs, cmsg->B1configuration,
- "CONNECT_REQ", "B1 Configuration");
- ignore_cstruct_param(cs, cmsg->B2configuration,
- "CONNECT_REQ", "B2 Configuration");
- ignore_cstruct_param(cs, cmsg->B3configuration,
- "CONNECT_REQ", "B3 Configuration");
- }
- commands[AT_PROTO] = kmalloc(9, GFP_KERNEL);
- if (!commands[AT_PROTO])
- goto oom;
- snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
-
- /* ToDo: check/encode remaining parameters */
- ignore_cstruct_param(cs, cmsg->CalledPartySubaddress,
- "CONNECT_REQ", "Called pty subaddr");
- ignore_cstruct_param(cs, cmsg->CallingPartySubaddress,
- "CONNECT_REQ", "Calling pty subaddr");
- ignore_cstruct_param(cs, cmsg->LLC,
- "CONNECT_REQ", "LLC");
- if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
- ignore_cstruct_param(cs, cmsg->BChannelinformation,
- "CONNECT_REQ", "B Channel Information");
- ignore_cstruct_param(cs, cmsg->Keypadfacility,
- "CONNECT_REQ", "Keypad Facility");
- ignore_cstruct_param(cs, cmsg->Useruserdata,
- "CONNECT_REQ", "User-User Data");
- ignore_cstruct_param(cs, cmsg->Facilitydataarray,
- "CONNECT_REQ", "Facility Data Array");
- }
-
- /* encode parameter: B channel to use */
- commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
- if (!commands[AT_ISO])
- goto oom;
- snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
- (unsigned) bcs->channel + 1);
-
- /* queue & schedule EV_DIAL event */
- if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
- bcs->at_state.seq_index, NULL)) {
- info = CAPI_MSGOSRESOURCEERR;
- goto error;
- }
- gigaset_schedule_event(cs);
- send_conf(iif, ap, skb, CapiSuccess);
- return;
-
-oom:
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- info = CAPI_MSGOSRESOURCEERR;
-error:
- if (commands)
- for (i = 0; i < AT_NUM; i++)
- kfree(commands[i]);
- kfree(commands);
- gigaset_free_channel(bcs);
- send_conf(iif, ap, skb, info);
-}
-
-/*
- * process CONNECT_RESP message
- * checks protocol parameters and queues an ACCEPT or HUP event
- */
-static void do_connect_resp(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- struct gigaset_capi_appl *oap;
- unsigned long flags;
- int channel;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
- dev_kfree_skb_any(skb);
-
- /* extract and check channel number from PLCI */
- channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
- if (!channel || channel > cs->channels) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "CONNECT_RESP", "PLCI", cmsg->adr.adrPLCI);
- return;
- }
- bcs = cs->bcs + channel - 1;
-
- switch (cmsg->Reject) {
- case 0: /* Accept */
- /* drop all competing applications, keep only this one */
- spin_lock_irqsave(&bcs->aplock, flags);
- while (bcs->ap != NULL) {
- oap = bcs->ap;
- bcs->ap = oap->bcnext;
- if (oap != ap) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- send_disconnect_ind(bcs, oap,
- CapiCallGivenToOtherApplication);
- spin_lock_irqsave(&bcs->aplock, flags);
- }
- }
- ap->bcnext = NULL;
- bcs->ap = ap;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- bcs->rx_bufsize = ap->rp.datablklen;
- dev_kfree_skb(bcs->rx_skb);
- gigaset_new_rx_skb(bcs);
- bcs->chstate |= CHS_NOTIFY_LL;
-
- /* check/encode B channel protocol */
- if (cmsg->BProtocol == CAPI_DEFAULT) {
- bcs->proto2 = L2_HDLC;
- dev_warn(cs->dev,
- "B2 Protocol X.75 SLP unsupported, using Transparent\n");
- } else {
- switch (cmsg->B1protocol) {
- case 0:
- bcs->proto2 = L2_HDLC;
- break;
- case 1:
- bcs->proto2 = L2_VOICE;
- break;
- default:
- dev_warn(cs->dev,
- "B1 Protocol %u unsupported, using Transparent\n",
- cmsg->B1protocol);
- bcs->proto2 = L2_VOICE;
- }
- if (cmsg->B2protocol != 1)
- dev_warn(cs->dev,
- "B2 Protocol %u unsupported, using Transparent\n",
- cmsg->B2protocol);
- if (cmsg->B3protocol != 0)
- dev_warn(cs->dev,
- "B3 Protocol %u unsupported, using Transparent\n",
- cmsg->B3protocol);
- ignore_cstruct_param(cs, cmsg->B1configuration,
- "CONNECT_RESP", "B1 Configuration");
- ignore_cstruct_param(cs, cmsg->B2configuration,
- "CONNECT_RESP", "B2 Configuration");
- ignore_cstruct_param(cs, cmsg->B3configuration,
- "CONNECT_RESP", "B3 Configuration");
- }
-
- /* ToDo: check/encode remaining parameters */
- ignore_cstruct_param(cs, cmsg->ConnectedNumber,
- "CONNECT_RESP", "Connected Number");
- ignore_cstruct_param(cs, cmsg->ConnectedSubaddress,
- "CONNECT_RESP", "Connected Subaddress");
- ignore_cstruct_param(cs, cmsg->LLC,
- "CONNECT_RESP", "LLC");
- if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
- ignore_cstruct_param(cs, cmsg->BChannelinformation,
- "CONNECT_RESP", "BChannel Information");
- ignore_cstruct_param(cs, cmsg->Keypadfacility,
- "CONNECT_RESP", "Keypad Facility");
- ignore_cstruct_param(cs, cmsg->Useruserdata,
- "CONNECT_RESP", "User-User Data");
- ignore_cstruct_param(cs, cmsg->Facilitydataarray,
- "CONNECT_RESP", "Facility Data Array");
- }
-
- /* Accept call */
- if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state,
- EV_ACCEPT, NULL, 0, NULL))
- return;
- gigaset_schedule_event(cs);
- return;
-
- case 1: /* Ignore */
- /* send DISCONNECT_IND to this application */
- send_disconnect_ind(bcs, ap, 0);
-
- /* remove it from the list of listening apps */
- spin_lock_irqsave(&bcs->aplock, flags);
- if (bcs->ap == ap) {
- bcs->ap = ap->bcnext;
- if (bcs->ap == NULL) {
- /* last one: stop ev-layer hupD notifications */
- bcs->apconnstate = APCONN_NONE;
- bcs->chstate &= ~CHS_NOTIFY_LL;
- }
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
- for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) {
- if (oap->bcnext == ap) {
- oap->bcnext = oap->bcnext->bcnext;
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
- }
- spin_unlock_irqrestore(&bcs->aplock, flags);
- dev_err(cs->dev, "%s: application %u not found\n",
- __func__, ap->id);
- return;
-
- default: /* Reject */
- /* drop all competing applications, keep only this one */
- spin_lock_irqsave(&bcs->aplock, flags);
- while (bcs->ap != NULL) {
- oap = bcs->ap;
- bcs->ap = oap->bcnext;
- if (oap != ap) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- send_disconnect_ind(bcs, oap,
- CapiCallGivenToOtherApplication);
- spin_lock_irqsave(&bcs->aplock, flags);
- }
- }
- ap->bcnext = NULL;
- bcs->ap = ap;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- /* reject call - will trigger DISCONNECT_IND for this app */
- dev_info(cs->dev, "%s: Reject=%x\n",
- "CONNECT_RESP", cmsg->Reject);
- if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state,
- EV_HUP, NULL, 0, NULL))
- return;
- gigaset_schedule_event(cs);
- return;
- }
-}
-
-/*
- * process CONNECT_B3_REQ message
- * build NCCI and emit CONNECT_B3_CONF reply
- */
-static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- int channel;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* extract and check channel number from PLCI */
- channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
- if (!channel || channel > cs->channels) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
- send_conf(iif, ap, skb, CapiIllContrPlciNcci);
- return;
- }
- bcs = &cs->bcs[channel - 1];
-
- /* mark logical connection active */
- bcs->apconnstate = APCONN_ACTIVE;
-
- /* build NCCI: always 1 (one B3 connection only) */
- cmsg->adr.adrNCCI |= 1 << 16;
-
- /* NCPI parameter: not applicable for B3 Transparent */
- ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
- send_conf(iif, ap, skb,
- (cmsg->NCPI && cmsg->NCPI[0]) ?
- CapiNcpiNotSupportedByProtocol : CapiSuccess);
-}
-
-/*
- * process CONNECT_B3_RESP message
- * Depending on the Reject parameter, either emit CONNECT_B3_ACTIVE_IND
- * or queue EV_HUP and emit DISCONNECT_B3_IND.
- * The emitted message is always shorter than the received one,
- * allowing to reuse the skb.
- */
-static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- int channel;
- unsigned int msgsize;
- u8 command;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* extract and check channel number and NCCI */
- channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
- if (!channel || channel > cs->channels ||
- ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
- dev_kfree_skb_any(skb);
- return;
- }
- bcs = &cs->bcs[channel - 1];
-
- if (cmsg->Reject) {
- /* Reject: clear B3 connect received flag */
- bcs->apconnstate = APCONN_SETUP;
-
- /* trigger hangup, causing eventual DISCONNECT_IND */
- if (!gigaset_add_event(cs, &bcs->at_state,
- EV_HUP, NULL, 0, NULL)) {
- dev_kfree_skb_any(skb);
- return;
- }
- gigaset_schedule_event(cs);
-
- /* emit DISCONNECT_B3_IND */
- command = CAPI_DISCONNECT_B3;
- msgsize = CAPI_DISCONNECT_B3_IND_BASELEN;
- } else {
- /*
- * Accept: emit CONNECT_B3_ACTIVE_IND immediately, as
- * we only send CONNECT_B3_IND if the B channel is up
- */
- command = CAPI_CONNECT_B3_ACTIVE;
- msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
- }
- capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
- ap->nextMessageNumber++, cmsg->adr.adrNCCI);
- __skb_trim(skb, msgsize);
- if (capi_cmsg2message(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * process DISCONNECT_REQ message
- * schedule EV_HUP and emit DISCONNECT_B3_IND if necessary,
- * emit DISCONNECT_CONF reply
- */
-static void do_disconnect_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- _cmsg *b3cmsg;
- struct sk_buff *b3skb;
- int channel;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* extract and check channel number from PLCI */
- channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
- if (!channel || channel > cs->channels) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
- send_conf(iif, ap, skb, CapiIllContrPlciNcci);
- return;
- }
- bcs = cs->bcs + channel - 1;
-
- /* ToDo: process parameter: Additional info */
- if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
- ignore_cstruct_param(cs, cmsg->BChannelinformation,
- "DISCONNECT_REQ", "B Channel Information");
- ignore_cstruct_param(cs, cmsg->Keypadfacility,
- "DISCONNECT_REQ", "Keypad Facility");
- ignore_cstruct_param(cs, cmsg->Useruserdata,
- "DISCONNECT_REQ", "User-User Data");
- ignore_cstruct_param(cs, cmsg->Facilitydataarray,
- "DISCONNECT_REQ", "Facility Data Array");
- }
-
- /* skip if DISCONNECT_IND already sent */
- if (!bcs->apconnstate)
- return;
-
- /* check for active logical connection */
- if (bcs->apconnstate >= APCONN_ACTIVE) {
- /* clear it */
- bcs->apconnstate = APCONN_SETUP;
-
- /*
- * emit DISCONNECT_B3_IND with cause 0x3301
- * use separate cmsg structure, as the content of iif->acmsg
- * is still needed for creating the _CONF message
- */
- b3cmsg = kmalloc(sizeof(*b3cmsg), GFP_KERNEL);
- if (!b3cmsg) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- return;
- }
- capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
- ap->nextMessageNumber++,
- cmsg->adr.adrPLCI | (1 << 16));
- b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
- b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
- if (b3skb == NULL) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- kfree(b3cmsg);
- return;
- }
- if (capi_cmsg2message(b3cmsg,
- __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
- dev_err(cs->dev, "%s: message parser failure\n",
- __func__);
- kfree(b3cmsg);
- dev_kfree_skb_any(b3skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
- kfree(b3cmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
- }
-
- /* trigger hangup, causing eventual DISCONNECT_IND */
- if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- return;
- }
- gigaset_schedule_event(cs);
-
- /* emit reply */
- send_conf(iif, ap, skb, CapiSuccess);
-}
-
-/*
- * process DISCONNECT_B3_REQ message
- * schedule EV_HUP and emit DISCONNECT_B3_CONF reply
- */
-static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- int channel;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* extract and check channel number and NCCI */
- channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
- if (!channel || channel > cs->channels ||
- ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
- send_conf(iif, ap, skb, CapiIllContrPlciNcci);
- return;
- }
- bcs = &cs->bcs[channel - 1];
-
- /* reject if logical connection not active */
- if (bcs->apconnstate < APCONN_ACTIVE) {
- send_conf(iif, ap, skb,
- CapiMessageNotSupportedInCurrentState);
- return;
- }
-
- /* trigger hangup, causing eventual DISCONNECT_B3_IND */
- if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- return;
- }
- gigaset_schedule_event(cs);
-
- /* NCPI parameter: not applicable for B3 Transparent */
- ignore_cstruct_param(cs, cmsg->NCPI,
- "DISCONNECT_B3_REQ", "NCPI");
- send_conf(iif, ap, skb,
- (cmsg->NCPI && cmsg->NCPI[0]) ?
- CapiNcpiNotSupportedByProtocol : CapiSuccess);
-}
-
-/*
- * process DATA_B3_REQ message
- */
-static void do_data_b3_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- struct bc_state *bcs;
- int channel = CAPIMSG_PLCI_PART(skb->data);
- u16 ncci = CAPIMSG_NCCI_PART(skb->data);
- u16 msglen = CAPIMSG_LEN(skb->data);
- u16 datalen = CAPIMSG_DATALEN(skb->data);
- u16 flags = CAPIMSG_FLAGS(skb->data);
- u16 msgid = CAPIMSG_MSGID(skb->data);
- u16 handle = CAPIMSG_HANDLE_REQ(skb->data);
-
- /* frequent message, avoid _cmsg overhead */
- dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
-
- /* check parameters */
- if (channel == 0 || channel > cs->channels || ncci != 1) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data));
- send_conf(iif, ap, skb, CapiIllContrPlciNcci);
- return;
- }
- bcs = &cs->bcs[channel - 1];
- if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64)
- dev_notice(cs->dev, "%s: unexpected length %d\n",
- "DATA_B3_REQ", msglen);
- if (msglen + datalen != skb->len)
- dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n",
- "DATA_B3_REQ", msglen, datalen, skb->len);
- if (msglen + datalen > skb->len) {
- /* message too short for announced data length */
- send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */
- return;
- }
- if (flags & CAPI_FLAGS_RESERVED) {
- dev_notice(cs->dev, "%s: reserved flags set (%x)\n",
- "DATA_B3_REQ", flags);
- send_conf(iif, ap, skb, CapiIllMessageParmCoding);
- return;
- }
-
- /* reject if logical connection not active */
- if (bcs->apconnstate < APCONN_ACTIVE) {
- send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
- return;
- }
-
- /* pull CAPI message into link layer header */
- skb_reset_mac_header(skb);
- skb->mac_len = msglen;
- skb_pull(skb, msglen);
-
- /* pass to device-specific module */
- if (cs->ops->send_skb(bcs, skb) < 0) {
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- return;
- }
-
- /*
- * DATA_B3_CONF will be sent by gigaset_skb_sent() only if "delivery
- * confirmation" bit is set; otherwise we have to send it now
- */
- if (!(flags & CAPI_FLAGS_DELIVERY_CONFIRMATION))
- send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle,
- flags ? CapiFlagsNotSupportedByProtocol
- : CAPI_NOERROR);
-}
-
-/*
- * process RESET_B3_REQ message
- * just always reply "not supported by current protocol"
- */
-static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- send_conf(iif, ap, skb,
- CapiResetProcedureNotSupportedByCurrentProtocol);
-}
-
-/*
- * unsupported CAPI message handler
- */
-static void do_unsupported(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
-}
-
-/*
- * CAPI message handler: no-op
- */
-static void do_nothing(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- dev_kfree_skb_any(skb);
-}
-
-static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
- dev_kfree_skb_any(skb);
-}
-
-/* table of outgoing CAPI message handlers with lookup function */
-typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *,
- struct gigaset_capi_appl *,
- struct sk_buff *);
-
-static struct {
- u16 cmd;
- capi_send_handler_t handler;
-} capi_send_handler_table[] = {
- /* most frequent messages first for faster lookup */
- { CAPI_DATA_B3_REQ, do_data_b3_req },
- { CAPI_DATA_B3_RESP, do_data_b3_resp },
-
- { CAPI_ALERT_REQ, do_alert_req },
- { CAPI_CONNECT_ACTIVE_RESP, do_nothing },
- { CAPI_CONNECT_B3_ACTIVE_RESP, do_nothing },
- { CAPI_CONNECT_B3_REQ, do_connect_b3_req },
- { CAPI_CONNECT_B3_RESP, do_connect_b3_resp },
- { CAPI_CONNECT_B3_T90_ACTIVE_RESP, do_nothing },
- { CAPI_CONNECT_REQ, do_connect_req },
- { CAPI_CONNECT_RESP, do_connect_resp },
- { CAPI_DISCONNECT_B3_REQ, do_disconnect_b3_req },
- { CAPI_DISCONNECT_B3_RESP, do_nothing },
- { CAPI_DISCONNECT_REQ, do_disconnect_req },
- { CAPI_DISCONNECT_RESP, do_nothing },
- { CAPI_FACILITY_REQ, do_facility_req },
- { CAPI_FACILITY_RESP, do_nothing },
- { CAPI_LISTEN_REQ, do_listen_req },
- { CAPI_SELECT_B_PROTOCOL_REQ, do_unsupported },
- { CAPI_RESET_B3_REQ, do_reset_b3_req },
- { CAPI_RESET_B3_RESP, do_nothing },
-
- /*
- * ToDo: support overlap sending (requires ev-layer state
- * machine extension to generate additional ATD commands)
- */
- { CAPI_INFO_REQ, do_unsupported },
- { CAPI_INFO_RESP, do_nothing },
-
- /*
- * ToDo: what's the proper response for these?
- */
- { CAPI_MANUFACTURER_REQ, do_nothing },
- { CAPI_MANUFACTURER_RESP, do_nothing },
-};
-
-/* look up handler */
-static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd)
-{
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(capi_send_handler_table); i++)
- if (capi_send_handler_table[i].cmd == cmd)
- return capi_send_handler_table[i].handler;
- return NULL;
-}
-
-
-/**
- * gigaset_send_message() - accept a CAPI message from an application
- * @ctr: controller descriptor structure.
- * @skb: CAPI message.
- *
- * Return value: CAPI error code
- * Note: capidrv (and probably others, too) only uses the return value to
- * decide whether it has to free the skb (only if result != CAPI_NOERROR (0))
- */
-static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb)
-{
- struct gigaset_capi_ctr *iif
- = container_of(ctr, struct gigaset_capi_ctr, ctr);
- struct cardstate *cs = ctr->driverdata;
- struct gigaset_capi_appl *ap;
- capi_send_handler_t handler;
-
- /* can only handle linear sk_buffs */
- if (skb_linearize(skb) < 0) {
- dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__);
- return CAPI_MSGOSRESOURCEERR;
- }
-
- /* retrieve application data structure */
- ap = get_appl(iif, CAPIMSG_APPID(skb->data));
- if (!ap) {
- dev_notice(cs->dev, "%s: application %u not registered\n",
- __func__, CAPIMSG_APPID(skb->data));
- return CAPI_ILLAPPNR;
- }
-
- /* look up command */
- handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
- if (!handler) {
- /* unknown/unsupported message type */
- if (printk_ratelimit())
- dev_notice(cs->dev, "%s: unsupported message %u\n",
- __func__, CAPIMSG_CMD(skb->data));
- return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
- }
-
- /* serialize */
- if (atomic_add_return(1, &iif->sendqlen) > 1) {
- /* queue behind other messages */
- skb_queue_tail(&iif->sendqueue, skb);
- return CAPI_NOERROR;
- }
-
- /* process message */
- handler(iif, ap, skb);
-
- /* process other messages arrived in the meantime */
- while (atomic_sub_return(1, &iif->sendqlen) > 0) {
- skb = skb_dequeue(&iif->sendqueue);
- if (!skb) {
- /* should never happen */
- dev_err(cs->dev, "%s: send queue empty\n", __func__);
- continue;
- }
- ap = get_appl(iif, CAPIMSG_APPID(skb->data));
- if (!ap) {
- /* could that happen? */
- dev_warn(cs->dev, "%s: application %u vanished\n",
- __func__, CAPIMSG_APPID(skb->data));
- continue;
- }
- handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
- if (!handler) {
- /* should never happen */
- dev_err(cs->dev, "%s: handler %x vanished\n",
- __func__, CAPIMSG_CMD(skb->data));
- continue;
- }
- handler(iif, ap, skb);
- }
-
- return CAPI_NOERROR;
-}
-
-/**
- * gigaset_procinfo() - build single line description for controller
- * @ctr: controller descriptor structure.
- *
- * Return value: pointer to generated string (null terminated)
- */
-static char *gigaset_procinfo(struct capi_ctr *ctr)
-{
- return ctr->name; /* ToDo: more? */
-}
-
-static int gigaset_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctr = m->private;
- struct cardstate *cs = ctr->driverdata;
- char *s;
- int i;
-
- seq_printf(m, "%-16s %s\n", "name", ctr->name);
- seq_printf(m, "%-16s %s %s\n", "dev",
- dev_driver_string(cs->dev), dev_name(cs->dev));
- seq_printf(m, "%-16s %d\n", "id", cs->myid);
- if (cs->gotfwver)
- seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware",
- cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
- seq_printf(m, "%-16s %d\n", "channels", cs->channels);
- seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no");
-
- switch (cs->mode) {
- case M_UNKNOWN:
- s = "unknown";
- break;
- case M_CONFIG:
- s = "config";
- break;
- case M_UNIMODEM:
- s = "Unimodem";
- break;
- case M_CID:
- s = "CID";
- break;
- default:
- s = "??";
- }
- seq_printf(m, "%-16s %s\n", "mode", s);
-
- switch (cs->mstate) {
- case MS_UNINITIALIZED:
- s = "uninitialized";
- break;
- case MS_INIT:
- s = "init";
- break;
- case MS_LOCKED:
- s = "locked";
- break;
- case MS_SHUTDOWN:
- s = "shutdown";
- break;
- case MS_RECOVER:
- s = "recover";
- break;
- case MS_READY:
- s = "ready";
- break;
- default:
- s = "??";
- }
- seq_printf(m, "%-16s %s\n", "mstate", s);
-
- seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no");
- seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no");
- seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no");
- seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no");
-
- for (i = 0; i < cs->channels; i++) {
- seq_printf(m, "[%d]%-13s %d\n", i, "corrupted",
- cs->bcs[i].corrupted);
- seq_printf(m, "[%d]%-13s %d\n", i, "trans_down",
- cs->bcs[i].trans_down);
- seq_printf(m, "[%d]%-13s %d\n", i, "trans_up",
- cs->bcs[i].trans_up);
- seq_printf(m, "[%d]%-13s %d\n", i, "chstate",
- cs->bcs[i].chstate);
- switch (cs->bcs[i].proto2) {
- case L2_BITSYNC:
- s = "bitsync";
- break;
- case L2_HDLC:
- s = "HDLC";
- break;
- case L2_VOICE:
- s = "voice";
- break;
- default:
- s = "??";
- }
- seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s);
- }
- return 0;
-}
-
-/**
- * gigaset_isdn_regdev() - register device to LL
- * @cs: device descriptor structure.
- * @isdnid: device name.
- *
- * Return value: 0 on success, error code < 0 on failure
- */
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
-{
- struct gigaset_capi_ctr *iif;
- int rc;
-
- iif = kzalloc(sizeof(*iif), GFP_KERNEL);
- if (!iif) {
- pr_err("%s: out of memory\n", __func__);
- return -ENOMEM;
- }
-
- /* prepare controller structure */
- iif->ctr.owner = THIS_MODULE;
- iif->ctr.driverdata = cs;
- strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name) - 1);
- iif->ctr.driver_name = "gigaset";
- iif->ctr.load_firmware = NULL;
- iif->ctr.reset_ctr = NULL;
- iif->ctr.register_appl = gigaset_register_appl;
- iif->ctr.release_appl = gigaset_release_appl;
- iif->ctr.send_message = gigaset_send_message;
- iif->ctr.procinfo = gigaset_procinfo;
- iif->ctr.proc_show = gigaset_proc_show,
- INIT_LIST_HEAD(&iif->appls);
- skb_queue_head_init(&iif->sendqueue);
- atomic_set(&iif->sendqlen, 0);
-
- /* register controller with CAPI */
- rc = attach_capi_ctr(&iif->ctr);
- if (rc) {
- pr_err("attach_capi_ctr failed (%d)\n", rc);
- kfree(iif);
- return rc;
- }
-
- cs->iif = iif;
- cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
- return 0;
-}
-
-/**
- * gigaset_isdn_unregdev() - unregister device from LL
- * @cs: device descriptor structure.
- */
-void gigaset_isdn_unregdev(struct cardstate *cs)
-{
- struct gigaset_capi_ctr *iif = cs->iif;
-
- detach_capi_ctr(&iif->ctr);
- kfree(iif);
- cs->iif = NULL;
-}
-
-static struct capi_driver capi_driver_gigaset = {
- .name = "gigaset",
- .revision = "1.0",
-};
-
-/**
- * gigaset_isdn_regdrv() - register driver to LL
- */
-void gigaset_isdn_regdrv(void)
-{
- pr_info("Kernel CAPI interface\n");
- register_capi_driver(&capi_driver_gigaset);
-}
-
-/**
- * gigaset_isdn_unregdrv() - unregister driver from LL
- */
-void gigaset_isdn_unregdrv(void)
-{
- unregister_capi_driver(&capi_driver_gigaset);
-}
diff --git a/drivers/staging/isdn/gigaset/common.c b/drivers/staging/isdn/gigaset/common.c
deleted file mode 100644
index 3bb8092858ab..000000000000
--- a/drivers/staging/isdn/gigaset/common.c
+++ /dev/null
@@ -1,1153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
-#define DRIVER_DESC "Driver for Gigaset 307x"
-
-#ifdef CONFIG_GIGASET_DEBUG
-#define DRIVER_DESC_DEBUG " (debug build)"
-#else
-#define DRIVER_DESC_DEBUG ""
-#endif
-
-/* Module parameters */
-int gigaset_debuglevel;
-EXPORT_SYMBOL_GPL(gigaset_debuglevel);
-module_param_named(debug, gigaset_debuglevel, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug level");
-
-/* driver state flags */
-#define VALID_MINOR 0x01
-#define VALID_ID 0x02
-
-/**
- * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging
- * @level: debugging level.
- * @msg: message prefix.
- * @len: number of bytes to dump.
- * @buf: data to dump.
- *
- * If the current debugging level includes one of the bits set in @level,
- * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio,
- * prefixed by the text @msg.
- */
-void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
- size_t len, const unsigned char *buf)
-{
- unsigned char outbuf[80];
- unsigned char c;
- size_t space = sizeof outbuf - 1;
- unsigned char *out = outbuf;
- size_t numin = len;
-
- while (numin--) {
- c = *buf++;
- if (c == '~' || c == '^' || c == '\\') {
- if (!space--)
- break;
- *out++ = '\\';
- }
- if (c & 0x80) {
- if (!space--)
- break;
- *out++ = '~';
- c ^= 0x80;
- }
- if (c < 0x20 || c == 0x7f) {
- if (!space--)
- break;
- *out++ = '^';
- c ^= 0x40;
- }
- if (!space--)
- break;
- *out++ = c;
- }
- *out = 0;
-
- gig_dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf);
-}
-EXPORT_SYMBOL_GPL(gigaset_dbg_buffer);
-
-static int setflags(struct cardstate *cs, unsigned flags, unsigned delay)
-{
- int r;
-
- r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags);
- cs->control_state = flags;
- if (r < 0)
- return r;
-
- if (delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(delay * HZ / 1000);
- }
-
- return 0;
-}
-
-int gigaset_enterconfigmode(struct cardstate *cs)
-{
- int i, r;
-
- cs->control_state = TIOCM_RTS;
-
- r = setflags(cs, TIOCM_DTR, 200);
- if (r < 0)
- goto error;
- r = setflags(cs, 0, 200);
- if (r < 0)
- goto error;
- for (i = 0; i < 5; ++i) {
- r = setflags(cs, TIOCM_RTS, 100);
- if (r < 0)
- goto error;
- r = setflags(cs, 0, 100);
- if (r < 0)
- goto error;
- }
- r = setflags(cs, TIOCM_RTS | TIOCM_DTR, 800);
- if (r < 0)
- goto error;
-
- return 0;
-
-error:
- dev_err(cs->dev, "error %d on setuartbits\n", -r);
- cs->control_state = TIOCM_RTS | TIOCM_DTR;
- cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS | TIOCM_DTR);
-
- return -1;
-}
-
-static int test_timeout(struct at_state_t *at_state)
-{
- if (!at_state->timer_expires)
- return 0;
-
- if (--at_state->timer_expires) {
- gig_dbg(DEBUG_MCMD, "decreased timer of %p to %lu",
- at_state, at_state->timer_expires);
- return 0;
- }
-
- gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
- at_state->timer_index, NULL);
- return 1;
-}
-
-static void timer_tick(struct timer_list *t)
-{
- struct cardstate *cs = from_timer(cs, t, timer);
- unsigned long flags;
- unsigned channel;
- struct at_state_t *at_state;
- int timeout = 0;
-
- spin_lock_irqsave(&cs->lock, flags);
-
- for (channel = 0; channel < cs->channels; ++channel)
- if (test_timeout(&cs->bcs[channel].at_state))
- timeout = 1;
-
- if (test_timeout(&cs->at_state))
- timeout = 1;
-
- list_for_each_entry(at_state, &cs->temp_at_states, list)
- if (test_timeout(at_state))
- timeout = 1;
-
- if (cs->running) {
- mod_timer(&cs->timer, jiffies + msecs_to_jiffies(GIG_TICK));
- if (timeout) {
- gig_dbg(DEBUG_EVENT, "scheduling timeout");
- tasklet_schedule(&cs->event_tasklet);
- }
- }
-
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-int gigaset_get_channel(struct bc_state *bcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) {
- gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
- bcs->channel);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- return -EBUSY;
- }
- ++bcs->use_count;
- bcs->busy = 1;
- gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- return 0;
-}
-
-struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!try_module_get(cs->driver->owner)) {
- gig_dbg(DEBUG_CHANNEL,
- "could not get module for allocating channel");
- spin_unlock_irqrestore(&cs->lock, flags);
- return NULL;
- }
- for (i = 0; i < cs->channels; ++i)
- if (!cs->bcs[i].use_count) {
- ++cs->bcs[i].use_count;
- cs->bcs[i].busy = 1;
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_CHANNEL, "allocated channel %d", i);
- return cs->bcs + i;
- }
- module_put(cs->driver->owner);
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_CHANNEL, "no free channel");
- return NULL;
-}
-
-void gigaset_free_channel(struct bc_state *bcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (!bcs->busy) {
- gig_dbg(DEBUG_CHANNEL, "could not free channel %d",
- bcs->channel);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- return;
- }
- --bcs->use_count;
- bcs->busy = 0;
- module_put(bcs->cs->driver->owner);
- gig_dbg(DEBUG_CHANNEL, "freed channel %d", bcs->channel);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
-}
-
-int gigaset_get_channels(struct cardstate *cs)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&cs->lock, flags);
- for (i = 0; i < cs->channels; ++i)
- if (cs->bcs[i].use_count) {
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_CHANNEL,
- "could not allocate all channels");
- return -EBUSY;
- }
- for (i = 0; i < cs->channels; ++i)
- ++cs->bcs[i].use_count;
- spin_unlock_irqrestore(&cs->lock, flags);
-
- gig_dbg(DEBUG_CHANNEL, "allocated all channels");
-
- return 0;
-}
-
-void gigaset_free_channels(struct cardstate *cs)
-{
- unsigned long flags;
- int i;
-
- gig_dbg(DEBUG_CHANNEL, "unblocking all channels");
- spin_lock_irqsave(&cs->lock, flags);
- for (i = 0; i < cs->channels; ++i)
- --cs->bcs[i].use_count;
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-void gigaset_block_channels(struct cardstate *cs)
-{
- unsigned long flags;
- int i;
-
- gig_dbg(DEBUG_CHANNEL, "blocking all channels");
- spin_lock_irqsave(&cs->lock, flags);
- for (i = 0; i < cs->channels; ++i)
- ++cs->bcs[i].use_count;
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-static void clear_events(struct cardstate *cs)
-{
- struct event_t *ev;
- unsigned head, tail;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->ev_lock, flags);
-
- head = cs->ev_head;
- tail = cs->ev_tail;
-
- while (tail != head) {
- ev = cs->events + head;
- kfree(ev->ptr);
- head = (head + 1) % MAX_EVENTS;
- }
-
- cs->ev_head = tail;
-
- spin_unlock_irqrestore(&cs->ev_lock, flags);
-}
-
-/**
- * gigaset_add_event() - add event to device event queue
- * @cs: device descriptor structure.
- * @at_state: connection state structure.
- * @type: event type.
- * @ptr: pointer parameter for event.
- * @parameter: integer parameter for event.
- * @arg: pointer parameter for event.
- *
- * Allocate an event queue entry from the device's event queue, and set it up
- * with the parameters given.
- *
- * Return value: added event
- */
-struct event_t *gigaset_add_event(struct cardstate *cs,
- struct at_state_t *at_state, int type,
- void *ptr, int parameter, void *arg)
-{
- unsigned long flags;
- unsigned next, tail;
- struct event_t *event = NULL;
-
- gig_dbg(DEBUG_EVENT, "queueing event %d", type);
-
- spin_lock_irqsave(&cs->ev_lock, flags);
-
- tail = cs->ev_tail;
- next = (tail + 1) % MAX_EVENTS;
- if (unlikely(next == cs->ev_head))
- dev_err(cs->dev, "event queue full\n");
- else {
- event = cs->events + tail;
- event->type = type;
- event->at_state = at_state;
- event->cid = -1;
- event->ptr = ptr;
- event->arg = arg;
- event->parameter = parameter;
- cs->ev_tail = next;
- }
-
- spin_unlock_irqrestore(&cs->ev_lock, flags);
-
- return event;
-}
-EXPORT_SYMBOL_GPL(gigaset_add_event);
-
-static void clear_at_state(struct at_state_t *at_state)
-{
- int i;
-
- for (i = 0; i < STR_NUM; ++i) {
- kfree(at_state->str_var[i]);
- at_state->str_var[i] = NULL;
- }
-}
-
-static void dealloc_temp_at_states(struct cardstate *cs)
-{
- struct at_state_t *cur, *next;
-
- list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
- list_del(&cur->list);
- clear_at_state(cur);
- kfree(cur);
- }
-}
-
-static void gigaset_freebcs(struct bc_state *bcs)
-{
- int i;
-
- gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
- bcs->cs->ops->freebcshw(bcs);
-
- gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
- clear_at_state(&bcs->at_state);
- gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
- dev_kfree_skb(bcs->rx_skb);
- bcs->rx_skb = NULL;
-
- for (i = 0; i < AT_NUM; ++i) {
- kfree(bcs->commands[i]);
- bcs->commands[i] = NULL;
- }
-}
-
-static struct cardstate *alloc_cs(struct gigaset_driver *drv)
-{
- unsigned long flags;
- unsigned i;
- struct cardstate *cs;
- struct cardstate *ret = NULL;
-
- spin_lock_irqsave(&drv->lock, flags);
- if (drv->blocked)
- goto exit;
- for (i = 0; i < drv->minors; ++i) {
- cs = drv->cs + i;
- if (!(cs->flags & VALID_MINOR)) {
- cs->flags = VALID_MINOR;
- ret = cs;
- break;
- }
- }
-exit:
- spin_unlock_irqrestore(&drv->lock, flags);
- return ret;
-}
-
-static void free_cs(struct cardstate *cs)
-{
- cs->flags = 0;
-}
-
-static void make_valid(struct cardstate *cs, unsigned mask)
-{
- unsigned long flags;
- struct gigaset_driver *drv = cs->driver;
- spin_lock_irqsave(&drv->lock, flags);
- cs->flags |= mask;
- spin_unlock_irqrestore(&drv->lock, flags);
-}
-
-static void make_invalid(struct cardstate *cs, unsigned mask)
-{
- unsigned long flags;
- struct gigaset_driver *drv = cs->driver;
- spin_lock_irqsave(&drv->lock, flags);
- cs->flags &= ~mask;
- spin_unlock_irqrestore(&drv->lock, flags);
-}
-
-/**
- * gigaset_freecs() - free all associated ressources of a device
- * @cs: device descriptor structure.
- *
- * Stops all tasklets and timers, unregisters the device from all
- * subsystems it was registered to, deallocates the device structure
- * @cs and all structures referenced from it.
- * Operations on the device should be stopped before calling this.
- */
-void gigaset_freecs(struct cardstate *cs)
-{
- int i;
- unsigned long flags;
-
- if (!cs)
- return;
-
- mutex_lock(&cs->mutex);
-
- spin_lock_irqsave(&cs->lock, flags);
- cs->running = 0;
- spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are
- not rescheduled below */
-
- tasklet_kill(&cs->event_tasklet);
- del_timer_sync(&cs->timer);
-
- switch (cs->cs_init) {
- default:
- /* clear B channel structures */
- for (i = 0; i < cs->channels; ++i) {
- gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
- gigaset_freebcs(cs->bcs + i);
- }
-
- /* clear device sysfs */
- gigaset_free_dev_sysfs(cs);
-
- gigaset_if_free(cs);
-
- gig_dbg(DEBUG_INIT, "clearing hw");
- cs->ops->freecshw(cs);
-
- /* fall through */
- case 2: /* error in initcshw */
- /* Deregister from LL */
- make_invalid(cs, VALID_ID);
- gigaset_isdn_unregdev(cs);
-
- /* fall through */
- case 1: /* error when registering to LL */
- gig_dbg(DEBUG_INIT, "clearing at_state");
- clear_at_state(&cs->at_state);
- dealloc_temp_at_states(cs);
- clear_events(cs);
- tty_port_destroy(&cs->port);
-
- /* fall through */
- case 0: /* error in basic setup */
- gig_dbg(DEBUG_INIT, "freeing inbuf");
- kfree(cs->inbuf);
- kfree(cs->bcs);
- }
-
- mutex_unlock(&cs->mutex);
- free_cs(cs);
-}
-EXPORT_SYMBOL_GPL(gigaset_freecs);
-
-void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
- struct cardstate *cs, int cid)
-{
- int i;
-
- INIT_LIST_HEAD(&at_state->list);
- at_state->waiting = 0;
- at_state->getstring = 0;
- at_state->pending_commands = 0;
- at_state->timer_expires = 0;
- at_state->timer_active = 0;
- at_state->timer_index = 0;
- at_state->seq_index = 0;
- at_state->ConState = 0;
- for (i = 0; i < STR_NUM; ++i)
- at_state->str_var[i] = NULL;
- at_state->int_var[VAR_ZDLE] = 0;
- at_state->int_var[VAR_ZCTP] = -1;
- at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
- at_state->cs = cs;
- at_state->bcs = bcs;
- at_state->cid = cid;
- if (!cid)
- at_state->replystruct = cs->tabnocid;
- else
- at_state->replystruct = cs->tabcid;
-}
-
-
-static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
-/* inbuf->read must be allocated before! */
-{
- inbuf->head = 0;
- inbuf->tail = 0;
- inbuf->cs = cs;
- inbuf->inputstate = INS_command;
-}
-
-/**
- * gigaset_fill_inbuf() - append received data to input buffer
- * @inbuf: buffer structure.
- * @src: received data.
- * @numbytes: number of bytes received.
- *
- * Return value: !=0 if some data was appended
- */
-int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
- unsigned numbytes)
-{
- unsigned n, head, tail, bytesleft;
-
- gig_dbg(DEBUG_INTR, "received %u bytes", numbytes);
-
- if (!numbytes)
- return 0;
-
- bytesleft = numbytes;
- tail = inbuf->tail;
- head = inbuf->head;
- gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
-
- while (bytesleft) {
- if (head > tail)
- n = head - 1 - tail;
- else if (head == 0)
- n = (RBUFSIZE - 1) - tail;
- else
- n = RBUFSIZE - tail;
- if (!n) {
- dev_err(inbuf->cs->dev,
- "buffer overflow (%u bytes lost)\n",
- bytesleft);
- break;
- }
- if (n > bytesleft)
- n = bytesleft;
- memcpy(inbuf->data + tail, src, n);
- bytesleft -= n;
- tail = (tail + n) % RBUFSIZE;
- src += n;
- }
- gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
- inbuf->tail = tail;
- return numbytes != bytesleft;
-}
-EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
-
-/* Initialize the b-channel structure */
-static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs,
- int channel)
-{
- int i;
-
- bcs->tx_skb = NULL;
-
- skb_queue_head_init(&bcs->squeue);
-
- bcs->corrupted = 0;
- bcs->trans_down = 0;
- bcs->trans_up = 0;
-
- gig_dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel);
- gigaset_at_init(&bcs->at_state, bcs, cs, -1);
-
-#ifdef CONFIG_GIGASET_DEBUG
- bcs->emptycount = 0;
-#endif
-
- bcs->rx_bufsize = 0;
- bcs->rx_skb = NULL;
- bcs->rx_fcs = PPP_INITFCS;
- bcs->inputstate = 0;
- bcs->channel = channel;
- bcs->cs = cs;
-
- bcs->chstate = 0;
- bcs->use_count = 1;
- bcs->busy = 0;
- bcs->ignore = cs->ignoreframes;
-
- for (i = 0; i < AT_NUM; ++i)
- bcs->commands[i] = NULL;
-
- spin_lock_init(&bcs->aplock);
- bcs->ap = NULL;
- bcs->apconnstate = 0;
-
- gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
- return cs->ops->initbcshw(bcs);
-}
-
-/**
- * gigaset_initcs() - initialize device structure
- * @drv: hardware driver the device belongs to
- * @channels: number of B channels supported by device
- * @onechannel: !=0 if B channel data and AT commands share one
- * communication channel (M10x),
- * ==0 if B channels have separate communication channels (base)
- * @ignoreframes: number of frames to ignore after setting up B channel
- * @cidmode: !=0: start in CallID mode
- * @modulename: name of driver module for LL registration
- *
- * Allocate and initialize cardstate structure for Gigaset driver
- * Calls hardware dependent gigaset_initcshw() function
- * Calls B channel initialization function gigaset_initbcs() for each B channel
- *
- * Return value:
- * pointer to cardstate structure
- */
-struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
- int onechannel, int ignoreframes,
- int cidmode, const char *modulename)
-{
- struct cardstate *cs;
- unsigned long flags;
- int i;
-
- gig_dbg(DEBUG_INIT, "allocating cs");
- cs = alloc_cs(drv);
- if (!cs) {
- pr_err("maximum number of devices exceeded\n");
- return NULL;
- }
-
- cs->cs_init = 0;
- cs->channels = channels;
- cs->onechannel = onechannel;
- cs->ignoreframes = ignoreframes;
- INIT_LIST_HEAD(&cs->temp_at_states);
- cs->running = 0;
- timer_setup(&cs->timer, timer_tick, 0);
- spin_lock_init(&cs->ev_lock);
- cs->ev_tail = 0;
- cs->ev_head = 0;
-
- tasklet_init(&cs->event_tasklet, gigaset_handle_event,
- (unsigned long) cs);
- tty_port_init(&cs->port);
- cs->commands_pending = 0;
- cs->cur_at_seq = 0;
- cs->gotfwver = -1;
- cs->dev = NULL;
- cs->tty_dev = NULL;
- cs->cidmode = cidmode != 0;
- cs->tabnocid = gigaset_tab_nocid;
- cs->tabcid = gigaset_tab_cid;
-
- init_waitqueue_head(&cs->waitqueue);
- cs->waiting = 0;
-
- cs->mode = M_UNKNOWN;
- cs->mstate = MS_UNINITIALIZED;
-
- cs->bcs = kmalloc_array(channels, sizeof(struct bc_state), GFP_KERNEL);
- cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
- if (!cs->bcs || !cs->inbuf) {
- pr_err("out of memory\n");
- goto error;
- }
- ++cs->cs_init;
-
- gig_dbg(DEBUG_INIT, "setting up at_state");
- spin_lock_init(&cs->lock);
- gigaset_at_init(&cs->at_state, NULL, cs, 0);
- cs->dle = 0;
- cs->cbytes = 0;
-
- gig_dbg(DEBUG_INIT, "setting up inbuf");
- gigaset_inbuf_init(cs->inbuf, cs);
-
- cs->connected = 0;
- cs->isdn_up = 0;
-
- gig_dbg(DEBUG_INIT, "setting up cmdbuf");
- cs->cmdbuf = cs->lastcmdbuf = NULL;
- spin_lock_init(&cs->cmdlock);
- cs->curlen = 0;
- cs->cmdbytes = 0;
-
- gig_dbg(DEBUG_INIT, "setting up iif");
- if (gigaset_isdn_regdev(cs, modulename) < 0) {
- pr_err("error registering ISDN device\n");
- goto error;
- }
-
- make_valid(cs, VALID_ID);
- ++cs->cs_init;
- gig_dbg(DEBUG_INIT, "setting up hw");
- if (cs->ops->initcshw(cs) < 0)
- goto error;
-
- ++cs->cs_init;
-
- /* set up character device */
- gigaset_if_init(cs);
-
- /* set up device sysfs */
- gigaset_init_dev_sysfs(cs);
-
- /* set up channel data structures */
- for (i = 0; i < channels; ++i) {
- gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
- if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) {
- pr_err("could not allocate channel %d data\n", i);
- goto error;
- }
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- cs->running = 1;
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->timer.expires = jiffies + msecs_to_jiffies(GIG_TICK);
- add_timer(&cs->timer);
-
- gig_dbg(DEBUG_INIT, "cs initialized");
- return cs;
-
-error:
- gig_dbg(DEBUG_INIT, "failed");
- gigaset_freecs(cs);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(gigaset_initcs);
-
-/* ReInitialize the b-channel structure on hangup */
-void gigaset_bcs_reinit(struct bc_state *bcs)
-{
- struct sk_buff *skb;
- struct cardstate *cs = bcs->cs;
- unsigned long flags;
-
- while ((skb = skb_dequeue(&bcs->squeue)) != NULL)
- dev_kfree_skb(skb);
-
- spin_lock_irqsave(&cs->lock, flags);
- clear_at_state(&bcs->at_state);
- bcs->at_state.ConState = 0;
- bcs->at_state.timer_active = 0;
- bcs->at_state.timer_expires = 0;
- bcs->at_state.cid = -1; /* No CID defined */
- spin_unlock_irqrestore(&cs->lock, flags);
-
- bcs->inputstate = 0;
-
-#ifdef CONFIG_GIGASET_DEBUG
- bcs->emptycount = 0;
-#endif
-
- bcs->rx_fcs = PPP_INITFCS;
- bcs->chstate = 0;
-
- bcs->ignore = cs->ignoreframes;
- dev_kfree_skb(bcs->rx_skb);
- bcs->rx_skb = NULL;
-
- cs->ops->reinitbcshw(bcs);
-}
-
-static void cleanup_cs(struct cardstate *cs)
-{
- struct cmdbuf_t *cb, *tcb;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
-
- cs->mode = M_UNKNOWN;
- cs->mstate = MS_UNINITIALIZED;
-
- clear_at_state(&cs->at_state);
- dealloc_temp_at_states(cs);
- gigaset_at_init(&cs->at_state, NULL, cs, 0);
-
- cs->inbuf->inputstate = INS_command;
- cs->inbuf->head = 0;
- cs->inbuf->tail = 0;
-
- cb = cs->cmdbuf;
- while (cb) {
- tcb = cb;
- cb = cb->next;
- kfree(tcb);
- }
- cs->cmdbuf = cs->lastcmdbuf = NULL;
- cs->curlen = 0;
- cs->cmdbytes = 0;
- cs->gotfwver = -1;
- cs->dle = 0;
- cs->cur_at_seq = 0;
- cs->commands_pending = 0;
- cs->cbytes = 0;
-
- spin_unlock_irqrestore(&cs->lock, flags);
-
- for (i = 0; i < cs->channels; ++i) {
- gigaset_freebcs(cs->bcs + i);
- if (gigaset_initbcs(cs->bcs + i, cs, i) < 0)
- pr_err("could not allocate channel %d data\n", i);
- }
-
- if (cs->waiting) {
- cs->cmd_result = -ENODEV;
- cs->waiting = 0;
- wake_up_interruptible(&cs->waitqueue);
- }
-}
-
-
-/**
- * gigaset_start() - start device operations
- * @cs: device descriptor structure.
- *
- * Prepares the device for use by setting up communication parameters,
- * scheduling an EV_START event to initiate device initialization, and
- * waiting for completion of the initialization.
- *
- * Return value:
- * 0 on success, error code < 0 on failure
- */
-int gigaset_start(struct cardstate *cs)
-{
- unsigned long flags;
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -EBUSY;
-
- spin_lock_irqsave(&cs->lock, flags);
- cs->connected = 1;
- spin_unlock_irqrestore(&cs->lock, flags);
-
- if (cs->mstate != MS_LOCKED) {
- cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS);
- cs->ops->baud_rate(cs, B115200);
- cs->ops->set_line_ctrl(cs, CS8);
- cs->control_state = TIOCM_DTR | TIOCM_RTS;
- }
-
- cs->waiting = 1;
-
- if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
- cs->waiting = 0;
- goto error;
- }
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- mutex_unlock(&cs->mutex);
- return 0;
-
-error:
- mutex_unlock(&cs->mutex);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(gigaset_start);
-
-/**
- * gigaset_shutdown() - shut down device operations
- * @cs: device descriptor structure.
- *
- * Deactivates the device by scheduling an EV_SHUTDOWN event and
- * waiting for completion of the shutdown.
- *
- * Return value:
- * 0 - success, -ENODEV - error (no device associated)
- */
-int gigaset_shutdown(struct cardstate *cs)
-{
- mutex_lock(&cs->mutex);
-
- if (!(cs->flags & VALID_MINOR)) {
- mutex_unlock(&cs->mutex);
- return -ENODEV;
- }
-
- cs->waiting = 1;
-
- if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL))
- goto exit;
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- cleanup_cs(cs);
-
-exit:
- mutex_unlock(&cs->mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(gigaset_shutdown);
-
-/**
- * gigaset_stop() - stop device operations
- * @cs: device descriptor structure.
- *
- * Stops operations on the device by scheduling an EV_STOP event and
- * waiting for completion of the shutdown.
- */
-void gigaset_stop(struct cardstate *cs)
-{
- mutex_lock(&cs->mutex);
-
- cs->waiting = 1;
-
- if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL))
- goto exit;
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- cleanup_cs(cs);
-
-exit:
- mutex_unlock(&cs->mutex);
-}
-EXPORT_SYMBOL_GPL(gigaset_stop);
-
-static LIST_HEAD(drivers);
-static DEFINE_SPINLOCK(driver_lock);
-
-struct cardstate *gigaset_get_cs_by_id(int id)
-{
- unsigned long flags;
- struct cardstate *ret = NULL;
- struct cardstate *cs;
- struct gigaset_driver *drv;
- unsigned i;
-
- spin_lock_irqsave(&driver_lock, flags);
- list_for_each_entry(drv, &drivers, list) {
- spin_lock(&drv->lock);
- for (i = 0; i < drv->minors; ++i) {
- cs = drv->cs + i;
- if ((cs->flags & VALID_ID) && cs->myid == id) {
- ret = cs;
- break;
- }
- }
- spin_unlock(&drv->lock);
- if (ret)
- break;
- }
- spin_unlock_irqrestore(&driver_lock, flags);
- return ret;
-}
-
-static struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
-{
- unsigned long flags;
- struct cardstate *ret = NULL;
- struct gigaset_driver *drv;
- unsigned index;
-
- spin_lock_irqsave(&driver_lock, flags);
- list_for_each_entry(drv, &drivers, list) {
- if (minor < drv->minor || minor >= drv->minor + drv->minors)
- continue;
- index = minor - drv->minor;
- spin_lock(&drv->lock);
- if (drv->cs[index].flags & VALID_MINOR)
- ret = drv->cs + index;
- spin_unlock(&drv->lock);
- if (ret)
- break;
- }
- spin_unlock_irqrestore(&driver_lock, flags);
- return ret;
-}
-
-struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
-{
- return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
-}
-
-/**
- * gigaset_freedriver() - free all associated ressources of a driver
- * @drv: driver descriptor structure.
- *
- * Unregisters the driver from the system and deallocates the driver
- * structure @drv and all structures referenced from it.
- * All devices should be shut down before calling this.
- */
-void gigaset_freedriver(struct gigaset_driver *drv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&driver_lock, flags);
- list_del(&drv->list);
- spin_unlock_irqrestore(&driver_lock, flags);
-
- gigaset_if_freedriver(drv);
-
- kfree(drv->cs);
- kfree(drv);
-}
-EXPORT_SYMBOL_GPL(gigaset_freedriver);
-
-/**
- * gigaset_initdriver() - initialize driver structure
- * @minor: First minor number
- * @minors: Number of minors this driver can handle
- * @procname: Name of the driver
- * @devname: Name of the device files (prefix without minor number)
- *
- * Allocate and initialize gigaset_driver structure. Initialize interface.
- *
- * Return value:
- * Pointer to the gigaset_driver structure on success, NULL on failure.
- */
-struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
- const char *procname,
- const char *devname,
- const struct gigaset_ops *ops,
- struct module *owner)
-{
- struct gigaset_driver *drv;
- unsigned long flags;
- unsigned i;
-
- drv = kmalloc(sizeof *drv, GFP_KERNEL);
- if (!drv)
- return NULL;
-
- drv->have_tty = 0;
- drv->minor = minor;
- drv->minors = minors;
- spin_lock_init(&drv->lock);
- drv->blocked = 0;
- drv->ops = ops;
- drv->owner = owner;
- INIT_LIST_HEAD(&drv->list);
-
- drv->cs = kmalloc_array(minors, sizeof(*drv->cs), GFP_KERNEL);
- if (!drv->cs)
- goto error;
-
- for (i = 0; i < minors; ++i) {
- drv->cs[i].flags = 0;
- drv->cs[i].driver = drv;
- drv->cs[i].ops = drv->ops;
- drv->cs[i].minor_index = i;
- mutex_init(&drv->cs[i].mutex);
- }
-
- gigaset_if_initdriver(drv, procname, devname);
-
- spin_lock_irqsave(&driver_lock, flags);
- list_add(&drv->list, &drivers);
- spin_unlock_irqrestore(&driver_lock, flags);
-
- return drv;
-
-error:
- kfree(drv);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(gigaset_initdriver);
-
-/**
- * gigaset_blockdriver() - block driver
- * @drv: driver descriptor structure.
- *
- * Prevents the driver from attaching new devices, in preparation for
- * deregistration.
- */
-void gigaset_blockdriver(struct gigaset_driver *drv)
-{
- drv->blocked = 1;
-}
-EXPORT_SYMBOL_GPL(gigaset_blockdriver);
-
-static int __init gigaset_init_module(void)
-{
- /* in accordance with the principle of least astonishment,
- * setting the 'debug' parameter to 1 activates a sensible
- * set of default debug levels
- */
- if (gigaset_debuglevel == 1)
- gigaset_debuglevel = DEBUG_DEFAULT;
-
- pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
- gigaset_isdn_regdrv();
- return 0;
-}
-
-static void __exit gigaset_exit_module(void)
-{
- gigaset_isdn_unregdrv();
-}
-
-module_init(gigaset_init_module);
-module_exit(gigaset_exit_module);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/gigaset/dummyll.c b/drivers/staging/isdn/gigaset/dummyll.c
deleted file mode 100644
index 4b9637e5da6e..000000000000
--- a/drivers/staging/isdn/gigaset/dummyll.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Dummy LL interface for the Gigaset driver
- *
- * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include <linux/export.h>
-#include "gigaset.h"
-
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_sent);
-
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
-
-void gigaset_isdn_rcv_err(struct bc_state *bcs)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
-
-int gigaset_isdn_icall(struct at_state_t *at_state)
-{
- return ICALL_IGNORE;
-}
-
-void gigaset_isdn_connD(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_hupD(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_connB(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_hupB(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_start(struct cardstate *cs)
-{
-}
-
-void gigaset_isdn_stop(struct cardstate *cs)
-{
-}
-
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
-{
- return 0;
-}
-
-void gigaset_isdn_unregdev(struct cardstate *cs)
-{
-}
-
-void gigaset_isdn_regdrv(void)
-{
- pr_info("no ISDN subsystem interface\n");
-}
-
-void gigaset_isdn_unregdrv(void)
-{
-}
diff --git a/drivers/staging/isdn/gigaset/ev-layer.c b/drivers/staging/isdn/gigaset/ev-layer.c
deleted file mode 100644
index f8bb1869c600..000000000000
--- a/drivers/staging/isdn/gigaset/ev-layer.c
+++ /dev/null
@@ -1,1910 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include <linux/export.h>
-#include "gigaset.h"
-
-/* ========================================================== */
-/* bit masks for pending commands */
-#define PC_DIAL 0x001
-#define PC_HUP 0x002
-#define PC_INIT 0x004
-#define PC_DLE0 0x008
-#define PC_DLE1 0x010
-#define PC_SHUTDOWN 0x020
-#define PC_ACCEPT 0x040
-#define PC_CID 0x080
-#define PC_NOCID 0x100
-#define PC_CIDMODE 0x200
-#define PC_UMMODE 0x400
-
-/* types of modem responses */
-#define RT_NOTHING 0
-#define RT_ZSAU 1
-#define RT_RING 2
-#define RT_NUMBER 3
-#define RT_STRING 4
-#define RT_ZCAU 6
-
-/* Possible ASCII responses */
-#define RSP_OK 0
-#define RSP_ERROR 1
-#define RSP_ZGCI 3
-#define RSP_RING 4
-#define RSP_ZVLS 5
-#define RSP_ZCAU 6
-
-/* responses with values to store in at_state */
-/* - numeric */
-#define RSP_VAR 100
-#define RSP_ZSAU (RSP_VAR + VAR_ZSAU)
-#define RSP_ZDLE (RSP_VAR + VAR_ZDLE)
-#define RSP_ZCTP (RSP_VAR + VAR_ZCTP)
-/* - string */
-#define RSP_STR (RSP_VAR + VAR_NUM)
-#define RSP_NMBR (RSP_STR + STR_NMBR)
-#define RSP_ZCPN (RSP_STR + STR_ZCPN)
-#define RSP_ZCON (RSP_STR + STR_ZCON)
-#define RSP_ZBC (RSP_STR + STR_ZBC)
-#define RSP_ZHLC (RSP_STR + STR_ZHLC)
-
-#define RSP_WRONG_CID -2 /* unknown cid in cmd */
-#define RSP_INVAL -6 /* invalid response */
-#define RSP_NODEV -9 /* device not connected */
-
-#define RSP_NONE -19
-#define RSP_STRING -20
-#define RSP_NULL -21
-#define RSP_INIT -27
-#define RSP_ANY -26
-#define RSP_LAST -28
-
-/* actions for process_response */
-#define ACT_NOTHING 0
-#define ACT_SETDLE1 1
-#define ACT_SETDLE0 2
-#define ACT_FAILINIT 3
-#define ACT_HUPMODEM 4
-#define ACT_CONFIGMODE 5
-#define ACT_INIT 6
-#define ACT_DLE0 7
-#define ACT_DLE1 8
-#define ACT_FAILDLE0 9
-#define ACT_FAILDLE1 10
-#define ACT_RING 11
-#define ACT_CID 12
-#define ACT_FAILCID 13
-#define ACT_SDOWN 14
-#define ACT_FAILSDOWN 15
-#define ACT_DEBUG 16
-#define ACT_WARN 17
-#define ACT_DIALING 18
-#define ACT_ABORTDIAL 19
-#define ACT_DISCONNECT 20
-#define ACT_CONNECT 21
-#define ACT_REMOTEREJECT 22
-#define ACT_CONNTIMEOUT 23
-#define ACT_REMOTEHUP 24
-#define ACT_ABORTHUP 25
-#define ACT_ICALL 26
-#define ACT_ACCEPTED 27
-#define ACT_ABORTACCEPT 28
-#define ACT_TIMEOUT 29
-#define ACT_GETSTRING 30
-#define ACT_SETVER 31
-#define ACT_FAILVER 32
-#define ACT_GOTVER 33
-#define ACT_TEST 34
-#define ACT_ERROR 35
-#define ACT_ABORTCID 36
-#define ACT_ZCAU 37
-#define ACT_NOTIFY_BC_DOWN 38
-#define ACT_NOTIFY_BC_UP 39
-#define ACT_DIAL 40
-#define ACT_ACCEPT 41
-#define ACT_HUP 43
-#define ACT_IF_LOCK 44
-#define ACT_START 45
-#define ACT_STOP 46
-#define ACT_FAKEDLE0 47
-#define ACT_FAKEHUP 48
-#define ACT_FAKESDOWN 49
-#define ACT_SHUTDOWN 50
-#define ACT_PROC_CIDMODE 51
-#define ACT_UMODESET 52
-#define ACT_FAILUMODE 53
-#define ACT_CMODESET 54
-#define ACT_FAILCMODE 55
-#define ACT_IF_VER 56
-#define ACT_CMD 100
-
-/* at command sequences */
-#define SEQ_NONE 0
-#define SEQ_INIT 100
-#define SEQ_DLE0 200
-#define SEQ_DLE1 250
-#define SEQ_CID 300
-#define SEQ_NOCID 350
-#define SEQ_HUP 400
-#define SEQ_DIAL 600
-#define SEQ_ACCEPT 720
-#define SEQ_SHUTDOWN 500
-#define SEQ_CIDMODE 10
-#define SEQ_UMMODE 11
-
-
-/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
- * 400: hup, 500: reset, 600: dial, 700: ring */
-struct reply_t gigaset_tab_nocid[] =
-{
-/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
- * action, command */
-
-/* initialize device, set cid mode if possible */
- {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
-
- {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
- {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
- "+GMR\r"},
-
- {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
- {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
-
- {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
- "^SDLE=0\r"},
- {RSP_OK, 108, 108, -1, 104, -1},
- {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
- {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
- {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
-
- {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
- ACT_HUPMODEM,
- ACT_TIMEOUT} },
- {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
-
- {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
- {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
- {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
- {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
-
- {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
- {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
-
- {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
-
- {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
- ACT_INIT} },
- {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
- ACT_INIT} },
- {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
- ACT_INIT} },
- {RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} },
-
-/* leave dle mode */
- {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
- {RSP_OK, 201, 201, -1, 202, -1},
- {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
- {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
- {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
- {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
-
-/* enter dle mode */
- {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
- {RSP_OK, 251, 251, -1, 252, -1},
- {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
- {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
- {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
-
-/* incoming call */
- {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
-
-/* get cid */
- {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
- {RSP_OK, 301, 301, -1, 302, -1},
- {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
- {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
- {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
-
-/* enter cid mode */
- {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
- {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
- {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
- {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
-
-/* leave cid mode */
- {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
- {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
- {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
- {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
-
-/* abort getting cid */
- {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
-
-/* reset */
- {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
- {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
- {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
- {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
- {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
-
- {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
- {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
- {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
- {EV_START, -1, -1, -1, -1, -1, {ACT_START} },
- {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
- {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
-
-/* misc. */
- {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
- {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
- {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
- {RSP_LAST}
-};
-
-/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
- * 400: hup, 750: accepted icall */
-struct reply_t gigaset_tab_cid[] =
-{
-/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
- * action, command */
-
-/* dial */
- {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
- {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} },
- {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} },
- {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} },
- {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} },
- {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
- {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
- {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
- {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
- {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
- {RSP_OK, 608, 608, -1, 609, -1},
- {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
- {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
-
- {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
- {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
-
-/* optional dialing responses */
- {EV_BC_OPEN, 650, 650, -1, 651, -1},
- {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
- {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
-
-/* connect */
- {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
- {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
- ACT_NOTIFY_BC_UP} },
- {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
- {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
- ACT_NOTIFY_BC_UP} },
- {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
-
-/* remote hangup */
- {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
- {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
- {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
-
-/* hangup */
- {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
- {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
- {RSP_OK, 401, 401, -1, 402, 5},
- {RSP_ZVLS, 402, 402, 0, 403, 5},
- {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
- {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
- {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
- {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
- {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
-
- {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/* ring */
- {RSP_ZBC, 700, 700, -1, -1, -1, {0} },
- {RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
- {RSP_NMBR, 700, 700, -1, -1, -1, {0} },
- {RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
- {RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
- {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
- {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/*accept icall*/
- {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
- {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} },
- {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} },
- {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
- {RSP_OK, 723, 723, -1, 724, 5, {0} },
- {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
- {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
- {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
- {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
- {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
- {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
-
- {EV_BC_OPEN, 750, 750, -1, 751, -1},
- {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
-
-/* B channel closed (general case) */
- {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/* misc. */
- {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
- {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
- {RSP_LAST}
-};
-
-
-static const struct resp_type_t {
- char *response;
- int resp_code;
- int type;
-}
-resp_type[] =
-{
- {"OK", RSP_OK, RT_NOTHING},
- {"ERROR", RSP_ERROR, RT_NOTHING},
- {"ZSAU", RSP_ZSAU, RT_ZSAU},
- {"ZCAU", RSP_ZCAU, RT_ZCAU},
- {"RING", RSP_RING, RT_RING},
- {"ZGCI", RSP_ZGCI, RT_NUMBER},
- {"ZVLS", RSP_ZVLS, RT_NUMBER},
- {"ZCTP", RSP_ZCTP, RT_NUMBER},
- {"ZDLE", RSP_ZDLE, RT_NUMBER},
- {"ZHLC", RSP_ZHLC, RT_STRING},
- {"ZBC", RSP_ZBC, RT_STRING},
- {"NMBR", RSP_NMBR, RT_STRING},
- {"ZCPN", RSP_ZCPN, RT_STRING},
- {"ZCON", RSP_ZCON, RT_STRING},
- {NULL, 0, 0}
-};
-
-static const struct zsau_resp_t {
- char *str;
- int code;
-}
-zsau_resp[] =
-{
- {"OUTGOING_CALL_PROCEEDING", ZSAU_PROCEEDING},
- {"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
- {"ACTIVE", ZSAU_ACTIVE},
- {"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
- {"NULL", ZSAU_NULL},
- {"DISCONNECT_REQ", ZSAU_DISCONNECT_REQ},
- {NULL, ZSAU_UNKNOWN}
-};
-
-/* check for and remove fixed string prefix
- * If s starts with prefix terminated by a non-alphanumeric character,
- * return pointer to the first character after that, otherwise return NULL.
- */
-static char *skip_prefix(char *s, const char *prefix)
-{
- while (*prefix)
- if (*s++ != *prefix++)
- return NULL;
- if (isalnum(*s))
- return NULL;
- return s;
-}
-
-/* queue event with CID */
-static void add_cid_event(struct cardstate *cs, int cid, int type,
- void *ptr, int parameter)
-{
- unsigned long flags;
- unsigned next, tail;
- struct event_t *event;
-
- gig_dbg(DEBUG_EVENT, "queueing event %d for cid %d", type, cid);
-
- spin_lock_irqsave(&cs->ev_lock, flags);
-
- tail = cs->ev_tail;
- next = (tail + 1) % MAX_EVENTS;
- if (unlikely(next == cs->ev_head)) {
- dev_err(cs->dev, "event queue full\n");
- kfree(ptr);
- } else {
- event = cs->events + tail;
- event->type = type;
- event->cid = cid;
- event->ptr = ptr;
- event->arg = NULL;
- event->parameter = parameter;
- event->at_state = NULL;
- cs->ev_tail = next;
- }
-
- spin_unlock_irqrestore(&cs->ev_lock, flags);
-}
-
-/**
- * gigaset_handle_modem_response() - process received modem response
- * @cs: device descriptor structure.
- *
- * Called by asyncdata/isocdata if a block of data received from the
- * device must be processed as a modem command response. The data is
- * already in the cs structure.
- */
-void gigaset_handle_modem_response(struct cardstate *cs)
-{
- char *eoc, *psep, *ptr;
- const struct resp_type_t *rt;
- const struct zsau_resp_t *zr;
- int cid, parameter;
- u8 type, value;
-
- if (!cs->cbytes) {
- /* ignore additional LFs/CRs (M10x config mode or cx100) */
- gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
- return;
- }
- cs->respdata[cs->cbytes] = 0;
-
- if (cs->at_state.getstring) {
- /* state machine wants next line verbatim */
- cs->at_state.getstring = 0;
- ptr = kstrdup(cs->respdata, GFP_ATOMIC);
- gig_dbg(DEBUG_EVENT, "string==%s", ptr ? ptr : "NULL");
- add_cid_event(cs, 0, RSP_STRING, ptr, 0);
- return;
- }
-
- /* look up response type */
- for (rt = resp_type; rt->response; ++rt) {
- eoc = skip_prefix(cs->respdata, rt->response);
- if (eoc)
- break;
- }
- if (!rt->response) {
- add_cid_event(cs, 0, RSP_NONE, NULL, 0);
- gig_dbg(DEBUG_EVENT, "unknown modem response: '%s'\n",
- cs->respdata);
- return;
- }
-
- /* check for CID */
- psep = strrchr(cs->respdata, ';');
- if (psep &&
- !kstrtoint(psep + 1, 10, &cid) &&
- cid >= 1 && cid <= 65535) {
- /* valid CID: chop it off */
- *psep = 0;
- } else {
- /* no valid CID: leave unchanged */
- cid = 0;
- }
-
- gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata);
- if (cid)
- gig_dbg(DEBUG_EVENT, "CID: %d", cid);
-
- switch (rt->type) {
- case RT_NOTHING:
- /* check parameter separator */
- if (*eoc)
- goto bad_param; /* extra parameter */
-
- add_cid_event(cs, cid, rt->resp_code, NULL, 0);
- break;
-
- case RT_RING:
- /* check parameter separator */
- if (!*eoc)
- eoc = NULL; /* no parameter */
- else if (*eoc++ != ',')
- goto bad_param;
-
- add_cid_event(cs, 0, rt->resp_code, NULL, cid);
-
- /* process parameters as individual responses */
- while (eoc) {
- /* look up parameter type */
- psep = NULL;
- for (rt = resp_type; rt->response; ++rt) {
- psep = skip_prefix(eoc, rt->response);
- if (psep)
- break;
- }
-
- /* all legal parameters are of type RT_STRING */
- if (!psep || rt->type != RT_STRING) {
- dev_warn(cs->dev,
- "illegal RING parameter: '%s'\n",
- eoc);
- return;
- }
-
- /* skip parameter value separator */
- if (*psep++ != '=')
- goto bad_param;
-
- /* look up end of parameter */
- eoc = strchr(psep, ',');
- if (eoc)
- *eoc++ = 0;
-
- /* retrieve parameter value */
- ptr = kstrdup(psep, GFP_ATOMIC);
-
- /* queue event */
- add_cid_event(cs, cid, rt->resp_code, ptr, 0);
- }
- break;
-
- case RT_ZSAU:
- /* check parameter separator */
- if (!*eoc) {
- /* no parameter */
- add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE);
- break;
- }
- if (*eoc++ != '=')
- goto bad_param;
-
- /* look up parameter value */
- for (zr = zsau_resp; zr->str; ++zr)
- if (!strcmp(eoc, zr->str))
- break;
- if (!zr->str)
- goto bad_param;
-
- add_cid_event(cs, cid, rt->resp_code, NULL, zr->code);
- break;
-
- case RT_STRING:
- /* check parameter separator */
- if (*eoc++ != '=')
- goto bad_param;
-
- /* retrieve parameter value */
- ptr = kstrdup(eoc, GFP_ATOMIC);
-
- /* queue event */
- add_cid_event(cs, cid, rt->resp_code, ptr, 0);
- break;
-
- case RT_ZCAU:
- /* check parameter separators */
- if (*eoc++ != '=')
- goto bad_param;
- psep = strchr(eoc, ',');
- if (!psep)
- goto bad_param;
- *psep++ = 0;
-
- /* decode parameter values */
- if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) {
- *--psep = ',';
- goto bad_param;
- }
- parameter = (type << 8) | value;
-
- add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
- break;
-
- case RT_NUMBER:
- /* check parameter separator */
- if (*eoc++ != '=')
- goto bad_param;
-
- /* decode parameter value */
- if (kstrtoint(eoc, 10, &parameter))
- goto bad_param;
-
- /* special case ZDLE: set flag before queueing event */
- if (rt->resp_code == RSP_ZDLE)
- cs->dle = parameter;
-
- add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
- break;
-
-bad_param:
- /* parameter unexpected, incomplete or malformed */
- dev_warn(cs->dev, "bad parameter in response '%s'\n",
- cs->respdata);
- add_cid_event(cs, cid, rt->resp_code, NULL, -1);
- break;
-
- default:
- dev_err(cs->dev, "%s: internal error on '%s'\n",
- __func__, cs->respdata);
- }
-}
-EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
-
-/* disconnect_nobc
- * process closing of connection associated with given AT state structure
- * without B channel
- */
-static void disconnect_nobc(struct at_state_t **at_state_p,
- struct cardstate *cs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- ++(*at_state_p)->seq_index;
-
- /* revert to selected idle mode */
- if (!cs->cidmode) {
- cs->at_state.pending_commands |= PC_UMMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
- cs->commands_pending = 1;
- }
-
- /* check for and deallocate temporary AT state */
- if (!list_empty(&(*at_state_p)->list)) {
- list_del(&(*at_state_p)->list);
- kfree(*at_state_p);
- *at_state_p = NULL;
- }
-
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-/* disconnect_bc
- * process closing of connection associated with given AT state structure
- * and B channel
- */
-static void disconnect_bc(struct at_state_t *at_state,
- struct cardstate *cs, struct bc_state *bcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- ++at_state->seq_index;
-
- /* revert to selected idle mode */
- if (!cs->cidmode) {
- cs->at_state.pending_commands |= PC_UMMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
- cs->commands_pending = 1;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- /* invoke hardware specific handler */
- cs->ops->close_bchannel(bcs);
-
- /* notify LL */
- if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
- bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
- gigaset_isdn_hupD(bcs);
- }
-}
-
-/* get_free_channel
- * get a free AT state structure: either one of those associated with the
- * B channels of the Gigaset device, or if none of those is available,
- * a newly allocated one with bcs=NULL
- * The structure should be freed by calling disconnect_nobc() after use.
- */
-static inline struct at_state_t *get_free_channel(struct cardstate *cs,
- int cid)
-/* cids: >0: siemens-cid
- * 0: without cid
- * -1: no cid assigned yet
- */
-{
- unsigned long flags;
- int i;
- struct at_state_t *ret;
-
- for (i = 0; i < cs->channels; ++i)
- if (gigaset_get_channel(cs->bcs + i) >= 0) {
- ret = &cs->bcs[i].at_state;
- ret->cid = cid;
- return ret;
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC);
- if (ret) {
- gigaset_at_init(ret, NULL, cs, cid);
- list_add(&ret->list, &cs->temp_at_states);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return ret;
-}
-
-static void init_failed(struct cardstate *cs, int mode)
-{
- int i;
- struct at_state_t *at_state;
-
- cs->at_state.pending_commands &= ~PC_INIT;
- cs->mode = mode;
- cs->mstate = MS_UNINITIALIZED;
- gigaset_free_channels(cs);
- for (i = 0; i < cs->channels; ++i) {
- at_state = &cs->bcs[i].at_state;
- if (at_state->pending_commands & PC_CID) {
- at_state->pending_commands &= ~PC_CID;
- at_state->pending_commands |= PC_NOCID;
- cs->commands_pending = 1;
- }
- }
-}
-
-static void schedule_init(struct cardstate *cs, int state)
-{
- if (cs->at_state.pending_commands & PC_INIT) {
- gig_dbg(DEBUG_EVENT, "not scheduling PC_INIT again");
- return;
- }
- cs->mstate = state;
- cs->mode = M_UNKNOWN;
- gigaset_block_channels(cs);
- cs->at_state.pending_commands |= PC_INIT;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_INIT");
- cs->commands_pending = 1;
-}
-
-/* send an AT command
- * adding the "AT" prefix, cid and DLE encapsulation as appropriate
- */
-static void send_command(struct cardstate *cs, const char *cmd,
- struct at_state_t *at_state)
-{
- int cid = at_state->cid;
- struct cmdbuf_t *cb;
- size_t buflen;
-
- buflen = strlen(cmd) + 12; /* DLE ( A T 1 2 3 4 5 <cmd> DLE ) \0 */
- cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, GFP_ATOMIC);
- if (!cb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (cid > 0 && cid <= 65535)
- cb->len = snprintf(cb->buf, buflen,
- cs->dle ? "\020(AT%d%s\020)" : "AT%d%s",
- cid, cmd);
- else
- cb->len = snprintf(cb->buf, buflen,
- cs->dle ? "\020(AT%s\020)" : "AT%s",
- cmd);
- cb->offset = 0;
- cb->next = NULL;
- cb->wake_tasklet = NULL;
- cs->ops->write_cmd(cs, cb);
-}
-
-static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid)
-{
- struct at_state_t *at_state;
- int i;
- unsigned long flags;
-
- if (cid == 0)
- return &cs->at_state;
-
- for (i = 0; i < cs->channels; ++i)
- if (cid == cs->bcs[i].at_state.cid)
- return &cs->bcs[i].at_state;
-
- spin_lock_irqsave(&cs->lock, flags);
-
- list_for_each_entry(at_state, &cs->temp_at_states, list)
- if (cid == at_state->cid) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return at_state;
- }
-
- spin_unlock_irqrestore(&cs->lock, flags);
-
- return NULL;
-}
-
-static void bchannel_down(struct bc_state *bcs)
-{
- if (bcs->chstate & CHS_B_UP) {
- bcs->chstate &= ~CHS_B_UP;
- gigaset_isdn_hupB(bcs);
- }
-
- if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
- bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
- gigaset_isdn_hupD(bcs);
- }
-
- gigaset_free_channel(bcs);
-
- gigaset_bcs_reinit(bcs);
-}
-
-static void bchannel_up(struct bc_state *bcs)
-{
- if (bcs->chstate & CHS_B_UP) {
- dev_notice(bcs->cs->dev, "%s: B channel already up\n",
- __func__);
- return;
- }
-
- bcs->chstate |= CHS_B_UP;
- gigaset_isdn_connB(bcs);
-}
-
-static void start_dial(struct at_state_t *at_state, void *data,
- unsigned seq_index)
-{
- struct bc_state *bcs = at_state->bcs;
- struct cardstate *cs = at_state->cs;
- char **commands = data;
- unsigned long flags;
- int i;
-
- bcs->chstate |= CHS_NOTIFY_LL;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (at_state->seq_index != seq_index) {
- spin_unlock_irqrestore(&cs->lock, flags);
- goto error;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- for (i = 0; i < AT_NUM; ++i) {
- kfree(bcs->commands[i]);
- bcs->commands[i] = commands[i];
- }
-
- at_state->pending_commands |= PC_CID;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_CID");
- cs->commands_pending = 1;
- return;
-
-error:
- for (i = 0; i < AT_NUM; ++i) {
- kfree(commands[i]);
- commands[i] = NULL;
- }
- at_state->pending_commands |= PC_NOCID;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_NOCID");
- cs->commands_pending = 1;
- return;
-}
-
-static void start_accept(struct at_state_t *at_state)
-{
- struct cardstate *cs = at_state->cs;
- struct bc_state *bcs = at_state->bcs;
- int i;
-
- for (i = 0; i < AT_NUM; ++i) {
- kfree(bcs->commands[i]);
- bcs->commands[i] = NULL;
- }
-
- bcs->commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
- bcs->commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
- if (!bcs->commands[AT_PROTO] || !bcs->commands[AT_ISO]) {
- dev_err(at_state->cs->dev, "out of memory\n");
- /* error reset */
- at_state->pending_commands |= PC_HUP;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
- cs->commands_pending = 1;
- return;
- }
-
- snprintf(bcs->commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
- snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
-
- at_state->pending_commands |= PC_ACCEPT;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_ACCEPT");
- cs->commands_pending = 1;
-}
-
-static void do_start(struct cardstate *cs)
-{
- gigaset_free_channels(cs);
-
- if (cs->mstate != MS_LOCKED)
- schedule_init(cs, MS_INIT);
-
- cs->isdn_up = 1;
- gigaset_isdn_start(cs);
-
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
-}
-
-static void finish_shutdown(struct cardstate *cs)
-{
- if (cs->mstate != MS_LOCKED) {
- cs->mstate = MS_UNINITIALIZED;
- cs->mode = M_UNKNOWN;
- }
-
- /* Tell the LL that the device is not available .. */
- if (cs->isdn_up) {
- cs->isdn_up = 0;
- gigaset_isdn_stop(cs);
- }
-
- /* The rest is done by cleanup_cs() in process context. */
-
- cs->cmd_result = -ENODEV;
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
-}
-
-static void do_shutdown(struct cardstate *cs)
-{
- gigaset_block_channels(cs);
-
- if (cs->mstate == MS_READY) {
- cs->mstate = MS_SHUTDOWN;
- cs->at_state.pending_commands |= PC_SHUTDOWN;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_SHUTDOWN");
- cs->commands_pending = 1;
- } else
- finish_shutdown(cs);
-}
-
-static void do_stop(struct cardstate *cs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- cs->connected = 0;
- spin_unlock_irqrestore(&cs->lock, flags);
-
- do_shutdown(cs);
-}
-
-/* Entering cid mode or getting a cid failed:
- * try to initialize the device and try again.
- *
- * channel >= 0: getting cid for the channel failed
- * channel < 0: entering cid mode failed
- *
- * returns 0 on success, <0 on failure
- */
-static int reinit_and_retry(struct cardstate *cs, int channel)
-{
- int i;
-
- if (--cs->retry_count <= 0)
- return -EFAULT;
-
- for (i = 0; i < cs->channels; ++i)
- if (cs->bcs[i].at_state.cid > 0)
- return -EBUSY;
-
- if (channel < 0)
- dev_warn(cs->dev,
- "Could not enter cid mode. Reinit device and try again.\n");
- else {
- dev_warn(cs->dev,
- "Could not get a call id. Reinit device and try again.\n");
- cs->bcs[channel].at_state.pending_commands |= PC_CID;
- }
- schedule_init(cs, MS_INIT);
- return 0;
-}
-
-static int at_state_invalid(struct cardstate *cs,
- struct at_state_t *test_ptr)
-{
- unsigned long flags;
- unsigned channel;
- struct at_state_t *at_state;
- int retval = 0;
-
- spin_lock_irqsave(&cs->lock, flags);
-
- if (test_ptr == &cs->at_state)
- goto exit;
-
- list_for_each_entry(at_state, &cs->temp_at_states, list)
- if (at_state == test_ptr)
- goto exit;
-
- for (channel = 0; channel < cs->channels; ++channel)
- if (&cs->bcs[channel].at_state == test_ptr)
- goto exit;
-
- retval = 1;
-exit:
- spin_unlock_irqrestore(&cs->lock, flags);
- return retval;
-}
-
-static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
- struct at_state_t *at_state)
-{
- int retval;
-
- retval = gigaset_isdn_icall(at_state);
- switch (retval) {
- case ICALL_ACCEPT:
- break;
- default:
- dev_err(cs->dev, "internal error: disposition=%d\n", retval);
- /* fall through */
- case ICALL_IGNORE:
- case ICALL_REJECT:
- /* hang up actively
- * Device doc says that would reject the call.
- * In fact it doesn't.
- */
- at_state->pending_commands |= PC_HUP;
- cs->commands_pending = 1;
- break;
- }
-}
-
-static int do_lock(struct cardstate *cs)
-{
- int mode;
- int i;
-
- switch (cs->mstate) {
- case MS_UNINITIALIZED:
- case MS_READY:
- if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
- cs->at_state.pending_commands)
- return -EBUSY;
-
- for (i = 0; i < cs->channels; ++i)
- if (cs->bcs[i].at_state.pending_commands)
- return -EBUSY;
-
- if (gigaset_get_channels(cs) < 0)
- return -EBUSY;
-
- break;
- case MS_LOCKED:
- break;
- default:
- return -EBUSY;
- }
-
- mode = cs->mode;
- cs->mstate = MS_LOCKED;
- cs->mode = M_UNKNOWN;
-
- return mode;
-}
-
-static int do_unlock(struct cardstate *cs)
-{
- if (cs->mstate != MS_LOCKED)
- return -EINVAL;
-
- cs->mstate = MS_UNINITIALIZED;
- cs->mode = M_UNKNOWN;
- gigaset_free_channels(cs);
- if (cs->connected)
- schedule_init(cs, MS_INIT);
-
- return 0;
-}
-
-static void do_action(int action, struct cardstate *cs,
- struct bc_state *bcs,
- struct at_state_t **p_at_state, char **pp_command,
- int *p_genresp, int *p_resp_code,
- struct event_t *ev)
-{
- struct at_state_t *at_state = *p_at_state;
- struct bc_state *bcs2;
- unsigned long flags;
-
- int channel;
-
- unsigned char *s, *e;
- int i;
- unsigned long val;
-
- switch (action) {
- case ACT_NOTHING:
- break;
- case ACT_TIMEOUT:
- at_state->waiting = 1;
- break;
- case ACT_INIT:
- cs->at_state.pending_commands &= ~PC_INIT;
- cs->cur_at_seq = SEQ_NONE;
- cs->mode = M_UNIMODEM;
- spin_lock_irqsave(&cs->lock, flags);
- if (!cs->cidmode) {
- spin_unlock_irqrestore(&cs->lock, flags);
- gigaset_free_channels(cs);
- cs->mstate = MS_READY;
- break;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->at_state.pending_commands |= PC_CIDMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
- cs->commands_pending = 1;
- break;
- case ACT_FAILINIT:
- dev_warn(cs->dev, "Could not initialize the device.\n");
- cs->dle = 0;
- init_failed(cs, M_UNKNOWN);
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_CONFIGMODE:
- init_failed(cs, M_CONFIG);
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_SETDLE1:
- cs->dle = 1;
- /* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
- cs->inbuf[0].inputstate &=
- ~(INS_command | INS_DLE_command);
- break;
- case ACT_SETDLE0:
- cs->dle = 0;
- cs->inbuf[0].inputstate =
- (cs->inbuf[0].inputstate & ~INS_DLE_command)
- | INS_command;
- break;
- case ACT_CMODESET:
- if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
- gigaset_free_channels(cs);
- cs->mstate = MS_READY;
- }
- cs->mode = M_CID;
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_UMODESET:
- cs->mode = M_UNIMODEM;
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_FAILCMODE:
- cs->cur_at_seq = SEQ_NONE;
- if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
- init_failed(cs, M_UNKNOWN);
- break;
- }
- if (reinit_and_retry(cs, -1) < 0)
- schedule_init(cs, MS_RECOVER);
- break;
- case ACT_FAILUMODE:
- cs->cur_at_seq = SEQ_NONE;
- schedule_init(cs, MS_RECOVER);
- break;
- case ACT_HUPMODEM:
- /* send "+++" (hangup in unimodem mode) */
- if (cs->connected) {
- struct cmdbuf_t *cb;
-
- cb = kmalloc(sizeof(struct cmdbuf_t) + 3, GFP_ATOMIC);
- if (!cb) {
- dev_err(cs->dev, "%s: out of memory\n",
- __func__);
- return;
- }
- memcpy(cb->buf, "+++", 3);
- cb->len = 3;
- cb->offset = 0;
- cb->next = NULL;
- cb->wake_tasklet = NULL;
- cs->ops->write_cmd(cs, cb);
- }
- break;
- case ACT_RING:
- /* get fresh AT state structure for new CID */
- at_state = get_free_channel(cs, ev->parameter);
- if (!at_state) {
- dev_warn(cs->dev,
- "RING ignored: could not allocate channel structure\n");
- break;
- }
-
- /* initialize AT state structure
- * note that bcs may be NULL if no B channel is free
- */
- at_state->ConState = 700;
- for (i = 0; i < STR_NUM; ++i) {
- kfree(at_state->str_var[i]);
- at_state->str_var[i] = NULL;
- }
- at_state->int_var[VAR_ZCTP] = -1;
-
- spin_lock_irqsave(&cs->lock, flags);
- at_state->timer_expires = RING_TIMEOUT;
- at_state->timer_active = 1;
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case ACT_ICALL:
- handle_icall(cs, bcs, at_state);
- break;
- case ACT_FAILSDOWN:
- dev_warn(cs->dev, "Could not shut down the device.\n");
- /* fall through */
- case ACT_FAKESDOWN:
- case ACT_SDOWN:
- cs->cur_at_seq = SEQ_NONE;
- finish_shutdown(cs);
- break;
- case ACT_CONNECT:
- if (cs->onechannel) {
- at_state->pending_commands |= PC_DLE1;
- cs->commands_pending = 1;
- break;
- }
- bcs->chstate |= CHS_D_UP;
- gigaset_isdn_connD(bcs);
- cs->ops->init_bchannel(bcs);
- break;
- case ACT_DLE1:
- cs->cur_at_seq = SEQ_NONE;
- bcs = cs->bcs + cs->curchannel;
-
- bcs->chstate |= CHS_D_UP;
- gigaset_isdn_connD(bcs);
- cs->ops->init_bchannel(bcs);
- break;
- case ACT_FAKEHUP:
- at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
- /* fall through */
- case ACT_DISCONNECT:
- cs->cur_at_seq = SEQ_NONE;
- at_state->cid = -1;
- if (!bcs) {
- disconnect_nobc(p_at_state, cs);
- } else if (cs->onechannel && cs->dle) {
- /* Check for other open channels not needed:
- * DLE only used for M10x with one B channel.
- */
- at_state->pending_commands |= PC_DLE0;
- cs->commands_pending = 1;
- } else {
- disconnect_bc(at_state, cs, bcs);
- }
- break;
- case ACT_FAKEDLE0:
- at_state->int_var[VAR_ZDLE] = 0;
- cs->dle = 0;
- /* fall through */
- case ACT_DLE0:
- cs->cur_at_seq = SEQ_NONE;
- bcs2 = cs->bcs + cs->curchannel;
- disconnect_bc(&bcs2->at_state, cs, bcs2);
- break;
- case ACT_ABORTHUP:
- cs->cur_at_seq = SEQ_NONE;
- dev_warn(cs->dev, "Could not hang up.\n");
- at_state->cid = -1;
- if (!bcs)
- disconnect_nobc(p_at_state, cs);
- else if (cs->onechannel)
- at_state->pending_commands |= PC_DLE0;
- else
- disconnect_bc(at_state, cs, bcs);
- schedule_init(cs, MS_RECOVER);
- break;
- case ACT_FAILDLE0:
- cs->cur_at_seq = SEQ_NONE;
- dev_warn(cs->dev, "Error leaving DLE mode.\n");
- cs->dle = 0;
- bcs2 = cs->bcs + cs->curchannel;
- disconnect_bc(&bcs2->at_state, cs, bcs2);
- schedule_init(cs, MS_RECOVER);
- break;
- case ACT_FAILDLE1:
- cs->cur_at_seq = SEQ_NONE;
- dev_warn(cs->dev,
- "Could not enter DLE mode. Trying to hang up.\n");
- channel = cs->curchannel;
- cs->bcs[channel].at_state.pending_commands |= PC_HUP;
- cs->commands_pending = 1;
- break;
-
- case ACT_CID: /* got cid; start dialing */
- cs->cur_at_seq = SEQ_NONE;
- channel = cs->curchannel;
- if (ev->parameter > 0 && ev->parameter <= 65535) {
- cs->bcs[channel].at_state.cid = ev->parameter;
- cs->bcs[channel].at_state.pending_commands |=
- PC_DIAL;
- cs->commands_pending = 1;
- break;
- }
- /* fall through - bad cid */
- case ACT_FAILCID:
- cs->cur_at_seq = SEQ_NONE;
- channel = cs->curchannel;
- if (reinit_and_retry(cs, channel) < 0) {
- dev_warn(cs->dev,
- "Could not get a call ID. Cannot dial.\n");
- bcs2 = cs->bcs + channel;
- disconnect_bc(&bcs2->at_state, cs, bcs2);
- }
- break;
- case ACT_ABORTCID:
- cs->cur_at_seq = SEQ_NONE;
- bcs2 = cs->bcs + cs->curchannel;
- disconnect_bc(&bcs2->at_state, cs, bcs2);
- break;
-
- case ACT_DIALING:
- case ACT_ACCEPTED:
- cs->cur_at_seq = SEQ_NONE;
- break;
-
- case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */
- if (bcs)
- disconnect_bc(at_state, cs, bcs);
- else
- disconnect_nobc(p_at_state, cs);
- break;
-
- case ACT_ABORTDIAL: /* error/timeout during dial preparation */
- cs->cur_at_seq = SEQ_NONE;
- at_state->pending_commands |= PC_HUP;
- cs->commands_pending = 1;
- break;
-
- case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
- case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
- case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
- at_state->pending_commands |= PC_HUP;
- cs->commands_pending = 1;
- break;
- case ACT_GETSTRING: /* warning: RING, ZDLE, ...
- are not handled properly anymore */
- at_state->getstring = 1;
- break;
- case ACT_SETVER:
- if (!ev->ptr) {
- *p_genresp = 1;
- *p_resp_code = RSP_ERROR;
- break;
- }
- s = ev->ptr;
-
- if (!strcmp(s, "OK")) {
- /* OK without version string: assume old response */
- *p_genresp = 1;
- *p_resp_code = RSP_NONE;
- break;
- }
-
- for (i = 0; i < 4; ++i) {
- val = simple_strtoul(s, (char **) &e, 10);
- if (val > INT_MAX || e == s)
- break;
- if (i == 3) {
- if (*e)
- break;
- } else if (*e != '.')
- break;
- else
- s = e + 1;
- cs->fwver[i] = val;
- }
- if (i != 4) {
- *p_genresp = 1;
- *p_resp_code = RSP_ERROR;
- break;
- }
- cs->gotfwver = 0;
- break;
- case ACT_GOTVER:
- if (cs->gotfwver == 0) {
- cs->gotfwver = 1;
- gig_dbg(DEBUG_EVENT,
- "firmware version %02d.%03d.%02d.%02d",
- cs->fwver[0], cs->fwver[1],
- cs->fwver[2], cs->fwver[3]);
- break;
- }
- /* fall through */
- case ACT_FAILVER:
- cs->gotfwver = -1;
- dev_err(cs->dev, "could not read firmware version.\n");
- break;
- case ACT_ERROR:
- gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
- __func__, at_state->ConState);
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_DEBUG:
- gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
- __func__, ev->type, at_state->ConState);
- break;
- case ACT_WARN:
- dev_warn(cs->dev, "%s: resp_code %d in ConState %d!\n",
- __func__, ev->type, at_state->ConState);
- break;
- case ACT_ZCAU:
- dev_warn(cs->dev, "cause code %04x in connection state %d.\n",
- ev->parameter, at_state->ConState);
- break;
-
- /* events from the LL */
-
- case ACT_DIAL:
- if (!ev->ptr) {
- *p_genresp = 1;
- *p_resp_code = RSP_ERROR;
- break;
- }
- start_dial(at_state, ev->ptr, ev->parameter);
- break;
- case ACT_ACCEPT:
- start_accept(at_state);
- break;
- case ACT_HUP:
- at_state->pending_commands |= PC_HUP;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
- cs->commands_pending = 1;
- break;
-
- /* hotplug events */
-
- case ACT_STOP:
- do_stop(cs);
- break;
- case ACT_START:
- do_start(cs);
- break;
-
- /* events from the interface */
-
- case ACT_IF_LOCK:
- cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
- break;
- case ACT_IF_VER:
- if (ev->parameter != 0)
- cs->cmd_result = -EINVAL;
- else if (cs->gotfwver != 1) {
- cs->cmd_result = -ENOENT;
- } else {
- memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
- cs->cmd_result = 0;
- }
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
- break;
-
- /* events from the proc file system */
-
- case ACT_PROC_CIDMODE:
- spin_lock_irqsave(&cs->lock, flags);
- if (ev->parameter != cs->cidmode) {
- cs->cidmode = ev->parameter;
- if (ev->parameter) {
- cs->at_state.pending_commands |= PC_CIDMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
- } else {
- cs->at_state.pending_commands |= PC_UMMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
- }
- cs->commands_pending = 1;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
- break;
-
- /* events from the hardware drivers */
-
- case ACT_NOTIFY_BC_DOWN:
- bchannel_down(bcs);
- break;
- case ACT_NOTIFY_BC_UP:
- bchannel_up(bcs);
- break;
- case ACT_SHUTDOWN:
- do_shutdown(cs);
- break;
-
-
- default:
- if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
- *pp_command = at_state->bcs->commands[action - ACT_CMD];
- if (!*pp_command) {
- *p_genresp = 1;
- *p_resp_code = RSP_NULL;
- }
- } else
- dev_err(cs->dev, "%s: action==%d!\n", __func__, action);
- }
-}
-
-/* State machine to do the calling and hangup procedure */
-static void process_event(struct cardstate *cs, struct event_t *ev)
-{
- struct bc_state *bcs;
- char *p_command = NULL;
- struct reply_t *rep;
- int rcode;
- int genresp = 0;
- int resp_code = RSP_ERROR;
- struct at_state_t *at_state;
- int index;
- int curact;
- unsigned long flags;
-
- if (ev->cid >= 0) {
- at_state = at_state_from_cid(cs, ev->cid);
- if (!at_state) {
- gig_dbg(DEBUG_EVENT, "event %d for invalid cid %d",
- ev->type, ev->cid);
- gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
- NULL, 0, NULL);
- return;
- }
- } else {
- at_state = ev->at_state;
- if (at_state_invalid(cs, at_state)) {
- gig_dbg(DEBUG_EVENT, "event for invalid at_state %p",
- at_state);
- return;
- }
- }
-
- gig_dbg(DEBUG_EVENT, "connection state %d, event %d",
- at_state->ConState, ev->type);
-
- bcs = at_state->bcs;
-
- /* Setting the pointer to the dial array */
- rep = at_state->replystruct;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (ev->type == EV_TIMEOUT) {
- if (ev->parameter != at_state->timer_index
- || !at_state->timer_active) {
- ev->type = RSP_NONE; /* old timeout */
- gig_dbg(DEBUG_EVENT, "old timeout");
- } else {
- if (at_state->waiting)
- gig_dbg(DEBUG_EVENT, "stopped waiting");
- else
- gig_dbg(DEBUG_EVENT, "timeout occurred");
- }
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- /* if the response belongs to a variable in at_state->int_var[VAR_XXXX]
- or at_state->str_var[STR_XXXX], set it */
- if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) {
- index = ev->type - RSP_VAR;
- at_state->int_var[index] = ev->parameter;
- } else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) {
- index = ev->type - RSP_STR;
- kfree(at_state->str_var[index]);
- at_state->str_var[index] = ev->ptr;
- ev->ptr = NULL; /* prevent process_events() from
- deallocating ptr */
- }
-
- if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING)
- at_state->getstring = 0;
-
- /* Search row in dial array which matches modem response and current
- constate */
- for (;; rep++) {
- rcode = rep->resp_code;
- if (rcode == RSP_LAST) {
- /* found nothing...*/
- dev_warn(cs->dev, "%s: rcode=RSP_LAST: "
- "resp_code %d in ConState %d!\n",
- __func__, ev->type, at_state->ConState);
- return;
- }
- if ((rcode == RSP_ANY || rcode == ev->type)
- && ((int) at_state->ConState >= rep->min_ConState)
- && (rep->max_ConState < 0
- || (int) at_state->ConState <= rep->max_ConState)
- && (rep->parameter < 0 || rep->parameter == ev->parameter))
- break;
- }
-
- p_command = rep->command;
-
- at_state->waiting = 0;
- for (curact = 0; curact < MAXACT; ++curact) {
- /* The row tells us what we should do ..
- */
- do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
- &genresp, &resp_code, ev);
- if (!at_state)
- /* at_state destroyed by disconnect */
- return;
- }
-
- /* Jump to the next con-state regarding the array */
- if (rep->new_ConState >= 0)
- at_state->ConState = rep->new_ConState;
-
- if (genresp) {
- spin_lock_irqsave(&cs->lock, flags);
- at_state->timer_expires = 0;
- at_state->timer_active = 0;
- spin_unlock_irqrestore(&cs->lock, flags);
- gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
- } else {
- /* Send command to modem if not NULL... */
- if (p_command) {
- if (cs->connected)
- send_command(cs, p_command, at_state);
- else
- gigaset_add_event(cs, at_state, RSP_NODEV,
- NULL, 0, NULL);
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!rep->timeout) {
- at_state->timer_expires = 0;
- at_state->timer_active = 0;
- } else if (rep->timeout > 0) { /* new timeout */
- at_state->timer_expires = rep->timeout * 10;
- at_state->timer_active = 1;
- ++at_state->timer_index;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- }
-}
-
-static void schedule_sequence(struct cardstate *cs,
- struct at_state_t *at_state, int sequence)
-{
- cs->cur_at_seq = sequence;
- gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL);
-}
-
-static void process_command_flags(struct cardstate *cs)
-{
- struct at_state_t *at_state = NULL;
- struct bc_state *bcs;
- int i;
- int sequence;
- unsigned long flags;
-
- cs->commands_pending = 0;
-
- if (cs->cur_at_seq) {
- gig_dbg(DEBUG_EVENT, "not searching scheduled commands: busy");
- return;
- }
-
- gig_dbg(DEBUG_EVENT, "searching scheduled commands");
-
- sequence = SEQ_NONE;
-
- /* clear pending_commands and hangup channels on shutdown */
- if (cs->at_state.pending_commands & PC_SHUTDOWN) {
- cs->at_state.pending_commands &= ~PC_CIDMODE;
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- at_state = &bcs->at_state;
- at_state->pending_commands &=
- ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
- if (at_state->cid > 0)
- at_state->pending_commands |= PC_HUP;
- if (at_state->pending_commands & PC_CID) {
- at_state->pending_commands |= PC_NOCID;
- at_state->pending_commands &= ~PC_CID;
- }
- }
- }
-
- /* clear pending_commands and hangup channels on reset */
- if (cs->at_state.pending_commands & PC_INIT) {
- cs->at_state.pending_commands &= ~PC_CIDMODE;
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- at_state = &bcs->at_state;
- at_state->pending_commands &=
- ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
- if (at_state->cid > 0)
- at_state->pending_commands |= PC_HUP;
- if (cs->mstate == MS_RECOVER) {
- if (at_state->pending_commands & PC_CID) {
- at_state->pending_commands |= PC_NOCID;
- at_state->pending_commands &= ~PC_CID;
- }
- }
- }
- }
-
- /* only switch back to unimodem mode if no commands are pending and
- * no channels are up */
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->at_state.pending_commands == PC_UMMODE
- && !cs->cidmode
- && list_empty(&cs->temp_at_states)
- && cs->mode == M_CID) {
- sequence = SEQ_UMMODE;
- at_state = &cs->at_state;
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- if (bcs->at_state.pending_commands ||
- bcs->at_state.cid > 0) {
- sequence = SEQ_NONE;
- break;
- }
- }
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->at_state.pending_commands &= ~PC_UMMODE;
- if (sequence != SEQ_NONE) {
- schedule_sequence(cs, at_state, sequence);
- return;
- }
-
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- if (bcs->at_state.pending_commands & PC_HUP) {
- if (cs->dle) {
- cs->curchannel = bcs->channel;
- schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
- return;
- }
- bcs->at_state.pending_commands &= ~PC_HUP;
- if (bcs->at_state.pending_commands & PC_CID) {
- /* not yet dialing: PC_NOCID is sufficient */
- bcs->at_state.pending_commands |= PC_NOCID;
- bcs->at_state.pending_commands &= ~PC_CID;
- } else {
- schedule_sequence(cs, &bcs->at_state, SEQ_HUP);
- return;
- }
- }
- if (bcs->at_state.pending_commands & PC_NOCID) {
- bcs->at_state.pending_commands &= ~PC_NOCID;
- cs->curchannel = bcs->channel;
- schedule_sequence(cs, &cs->at_state, SEQ_NOCID);
- return;
- } else if (bcs->at_state.pending_commands & PC_DLE0) {
- bcs->at_state.pending_commands &= ~PC_DLE0;
- cs->curchannel = bcs->channel;
- schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
- return;
- }
- }
-
- list_for_each_entry(at_state, &cs->temp_at_states, list)
- if (at_state->pending_commands & PC_HUP) {
- at_state->pending_commands &= ~PC_HUP;
- schedule_sequence(cs, at_state, SEQ_HUP);
- return;
- }
-
- if (cs->at_state.pending_commands & PC_INIT) {
- cs->at_state.pending_commands &= ~PC_INIT;
- cs->dle = 0;
- cs->inbuf->inputstate = INS_command;
- schedule_sequence(cs, &cs->at_state, SEQ_INIT);
- return;
- }
- if (cs->at_state.pending_commands & PC_SHUTDOWN) {
- cs->at_state.pending_commands &= ~PC_SHUTDOWN;
- schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN);
- return;
- }
- if (cs->at_state.pending_commands & PC_CIDMODE) {
- cs->at_state.pending_commands &= ~PC_CIDMODE;
- if (cs->mode == M_UNIMODEM) {
- cs->retry_count = 1;
- schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
- return;
- }
- }
-
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- if (bcs->at_state.pending_commands & PC_DLE1) {
- bcs->at_state.pending_commands &= ~PC_DLE1;
- cs->curchannel = bcs->channel;
- schedule_sequence(cs, &cs->at_state, SEQ_DLE1);
- return;
- }
- if (bcs->at_state.pending_commands & PC_ACCEPT) {
- bcs->at_state.pending_commands &= ~PC_ACCEPT;
- schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT);
- return;
- }
- if (bcs->at_state.pending_commands & PC_DIAL) {
- bcs->at_state.pending_commands &= ~PC_DIAL;
- schedule_sequence(cs, &bcs->at_state, SEQ_DIAL);
- return;
- }
- if (bcs->at_state.pending_commands & PC_CID) {
- switch (cs->mode) {
- case M_UNIMODEM:
- cs->at_state.pending_commands |= PC_CIDMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
- cs->commands_pending = 1;
- return;
- case M_UNKNOWN:
- schedule_init(cs, MS_INIT);
- return;
- }
- bcs->at_state.pending_commands &= ~PC_CID;
- cs->curchannel = bcs->channel;
- cs->retry_count = 2;
- schedule_sequence(cs, &cs->at_state, SEQ_CID);
- return;
- }
- }
-}
-
-static void process_events(struct cardstate *cs)
-{
- struct event_t *ev;
- unsigned head, tail;
- int i;
- int check_flags = 0;
- int was_busy;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->ev_lock, flags);
- head = cs->ev_head;
-
- for (i = 0; i < 2 * MAX_EVENTS; ++i) {
- tail = cs->ev_tail;
- if (tail == head) {
- if (!check_flags && !cs->commands_pending)
- break;
- check_flags = 0;
- spin_unlock_irqrestore(&cs->ev_lock, flags);
- process_command_flags(cs);
- spin_lock_irqsave(&cs->ev_lock, flags);
- tail = cs->ev_tail;
- if (tail == head) {
- if (!cs->commands_pending)
- break;
- continue;
- }
- }
-
- ev = cs->events + head;
- was_busy = cs->cur_at_seq != SEQ_NONE;
- spin_unlock_irqrestore(&cs->ev_lock, flags);
- process_event(cs, ev);
- spin_lock_irqsave(&cs->ev_lock, flags);
- kfree(ev->ptr);
- ev->ptr = NULL;
- if (was_busy && cs->cur_at_seq == SEQ_NONE)
- check_flags = 1;
-
- head = (head + 1) % MAX_EVENTS;
- cs->ev_head = head;
- }
-
- spin_unlock_irqrestore(&cs->ev_lock, flags);
-
- if (i == 2 * MAX_EVENTS) {
- dev_err(cs->dev,
- "infinite loop in process_events; aborting.\n");
- }
-}
-
-/* tasklet scheduled on any event received from the Gigaset device
- * parameter:
- * data ISDN controller state structure
- */
-void gigaset_handle_event(unsigned long data)
-{
- struct cardstate *cs = (struct cardstate *) data;
-
- /* handle incoming data on control/common channel */
- if (cs->inbuf->head != cs->inbuf->tail) {
- gig_dbg(DEBUG_INTR, "processing new data");
- cs->ops->handle_input(cs->inbuf);
- }
-
- process_events(cs);
-}
diff --git a/drivers/staging/isdn/gigaset/gigaset.h b/drivers/staging/isdn/gigaset/gigaset.h
deleted file mode 100644
index 0ecc2b5ea553..000000000000
--- a/drivers/staging/isdn/gigaset/gigaset.h
+++ /dev/null
@@ -1,827 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Siemens Gigaset 307x driver
- * Common header file for all connection variants
- *
- * Written by Stefan Eilers
- * and Hansjoerg Lipp <hjlipp@web.de>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#ifndef GIGASET_H
-#define GIGASET_H
-
-/* define global prefix for pr_ macros in linux/kernel.h */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/ppp_defs.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/list.h>
-#include <linux/atomic.h>
-
-#define GIG_VERSION {0, 5, 0, 0}
-#define GIG_COMPAT {0, 4, 0, 0}
-
-#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
-#define MAX_RESP_SIZE 511 /* Max. size of a response string */
-
-#define MAX_EVENTS 64 /* size of event queue */
-
-#define RBUFSIZE 8192
-
-#define GIG_TICK 100 /* in milliseconds */
-
-/* timeout values (unit: 1 sec) */
-#define INIT_TIMEOUT 1
-
-/* timeout values (unit: 0.1 sec) */
-#define RING_TIMEOUT 3 /* for additional parameters to RING */
-#define BAS_TIMEOUT 20 /* for response to Base USB ops */
-#define ATRDY_TIMEOUT 3 /* for HD_READY_SEND_ATDATA */
-
-#define BAS_RETRY 3 /* max. retries for base USB ops */
-
-#define MAXACT 3
-
-extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */
-
-/* debug flags, combine by adding/bitwise OR */
-enum debuglevel {
- DEBUG_INTR = 0x00008, /* interrupt processing */
- DEBUG_CMD = 0x00020, /* sent/received LL commands */
- DEBUG_STREAM = 0x00040, /* application data stream I/O events */
- DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
- DEBUG_LLDATA = 0x00100, /* sent/received LL data */
- DEBUG_EVENT = 0x00200, /* event processing */
- DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
- DEBUG_CHANNEL = 0x01000, /* channel allocation/deallocation */
- DEBUG_TRANSCMD = 0x02000, /* AT-COMMANDS+RESPONSES */
- DEBUG_MCMD = 0x04000, /* COMMANDS THAT ARE SENT VERY OFTEN */
- DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data
- structures */
- DEBUG_SUSPEND = 0x10000, /* suspend/resume processing */
- DEBUG_OUTPUT = 0x20000, /* output to device */
- DEBUG_ISO = 0x40000, /* isochronous transfers */
- DEBUG_IF = 0x80000, /* character device operations */
- DEBUG_USBREQ = 0x100000, /* USB communication (except payload
- data) */
- DEBUG_LOCKCMD = 0x200000, /* AT commands and responses when
- MS_LOCKED */
-
- DEBUG_ANY = 0x3fffff, /* print message if any of the others is
- activated */
-};
-
-#ifdef CONFIG_GIGASET_DEBUG
-
-#define gig_dbg(level, format, arg...) \
- do { \
- if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \
- printk(KERN_DEBUG KBUILD_MODNAME ": " format "\n", \
- ## arg); \
- } while (0)
-#define DEBUG_DEFAULT (DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ)
-
-#else
-
-#define gig_dbg(level, format, arg...) do {} while (0)
-#define DEBUG_DEFAULT 0
-
-#endif
-
-void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
- size_t len, const unsigned char *buf);
-
-/* connection state */
-#define ZSAU_NONE 0
-#define ZSAU_PROCEEDING 1
-#define ZSAU_CALL_DELIVERED 2
-#define ZSAU_ACTIVE 3
-#define ZSAU_DISCONNECT_IND 4
-#define ZSAU_NULL 5
-#define ZSAU_DISCONNECT_REQ 6
-#define ZSAU_UNKNOWN -1
-
-/* USB control transfer requests */
-#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
-#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
-
-/* interrupt pipe messages */
-#define HD_B1_FLOW_CONTROL 0x80
-#define HD_B2_FLOW_CONTROL 0x81
-#define HD_RECEIVEATDATA_ACK (0x35) /* 3070 */
-#define HD_READY_SEND_ATDATA (0x36) /* 3070 */
-#define HD_OPEN_ATCHANNEL_ACK (0x37) /* 3070 */
-#define HD_CLOSE_ATCHANNEL_ACK (0x38) /* 3070 */
-#define HD_DEVICE_INIT_OK (0x11) /* ISurf USB + 3070 */
-#define HD_OPEN_B1CHANNEL_ACK (0x51) /* ISurf USB + 3070 */
-#define HD_OPEN_B2CHANNEL_ACK (0x52) /* ISurf USB + 3070 */
-#define HD_CLOSE_B1CHANNEL_ACK (0x53) /* ISurf USB + 3070 */
-#define HD_CLOSE_B2CHANNEL_ACK (0x54) /* ISurf USB + 3070 */
-#define HD_SUSPEND_END (0x61) /* ISurf USB */
-#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) /* ISurf USB + 3070 */
-
-/* control requests */
-#define HD_OPEN_B1CHANNEL (0x23) /* ISurf USB + 3070 */
-#define HD_CLOSE_B1CHANNEL (0x24) /* ISurf USB + 3070 */
-#define HD_OPEN_B2CHANNEL (0x25) /* ISurf USB + 3070 */
-#define HD_CLOSE_B2CHANNEL (0x26) /* ISurf USB + 3070 */
-#define HD_RESET_INTERRUPT_PIPE (0x27) /* ISurf USB + 3070 */
-#define HD_DEVICE_INIT_ACK (0x34) /* ISurf USB + 3070 */
-#define HD_WRITE_ATMESSAGE (0x12) /* 3070 */
-#define HD_READ_ATMESSAGE (0x13) /* 3070 */
-#define HD_OPEN_ATCHANNEL (0x28) /* 3070 */
-#define HD_CLOSE_ATCHANNEL (0x29) /* 3070 */
-
-/* number of B channels supported by base driver */
-#define BAS_CHANNELS 2
-
-/* USB frames for isochronous transfer */
-#define BAS_FRAMETIME 1 /* number of milliseconds between frames */
-#define BAS_NUMFRAMES 8 /* number of frames per URB */
-#define BAS_MAXFRAME 16 /* allocated bytes per frame */
-#define BAS_NORMFRAME 8 /* send size without flow control */
-#define BAS_HIGHFRAME 10 /* " " with positive flow control */
-#define BAS_LOWFRAME 5 /* " " with negative flow control */
-#define BAS_CORRFRAMES 4 /* flow control multiplicator */
-
-#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isoc in buf
- * per URB */
-#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */
-#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */
-
-#define BAS_INURBS 3
-#define BAS_OUTURBS 3
-
-/* variable commands in struct bc_state */
-#define AT_ISO 0
-#define AT_DIAL 1
-#define AT_MSN 2
-#define AT_BC 3
-#define AT_PROTO 4
-#define AT_TYPE 5
-#define AT_CLIP 6
-/* total number */
-#define AT_NUM 7
-
-/* variables in struct at_state_t */
-/* - numeric */
-#define VAR_ZSAU 0
-#define VAR_ZDLE 1
-#define VAR_ZCTP 2
-/* total number */
-#define VAR_NUM 3
-/* - string */
-#define STR_NMBR 0
-#define STR_ZCPN 1
-#define STR_ZCON 2
-#define STR_ZBC 3
-#define STR_ZHLC 4
-/* total number */
-#define STR_NUM 5
-
-/* event types */
-#define EV_TIMEOUT -105
-#define EV_IF_VER -106
-#define EV_PROC_CIDMODE -107
-#define EV_SHUTDOWN -108
-#define EV_START -110
-#define EV_STOP -111
-#define EV_IF_LOCK -112
-#define EV_ACCEPT -114
-#define EV_DIAL -115
-#define EV_HUP -116
-#define EV_BC_OPEN -117
-#define EV_BC_CLOSED -118
-
-/* input state */
-#define INS_command 0x0001 /* receiving messages (not payload data) */
-#define INS_DLE_char 0x0002 /* DLE flag received (in DLE mode) */
-#define INS_byte_stuff 0x0004
-#define INS_have_data 0x0008
-#define INS_DLE_command 0x0020 /* DLE message start (<DLE> X) received */
-#define INS_flag_hunt 0x0040
-
-/* channel state */
-#define CHS_D_UP 0x01
-#define CHS_B_UP 0x02
-#define CHS_NOTIFY_LL 0x04
-
-#define ICALL_REJECT 0
-#define ICALL_ACCEPT 1
-#define ICALL_IGNORE 2
-
-/* device state */
-#define MS_UNINITIALIZED 0
-#define MS_INIT 1
-#define MS_LOCKED 2
-#define MS_SHUTDOWN 3
-#define MS_RECOVER 4
-#define MS_READY 5
-
-/* mode */
-#define M_UNKNOWN 0
-#define M_CONFIG 1
-#define M_UNIMODEM 2
-#define M_CID 3
-
-/* start mode */
-#define SM_LOCKED 0
-#define SM_ISDN 1 /* default */
-
-/* layer 2 protocols (AT^SBPR=...) */
-#define L2_BITSYNC 0
-#define L2_HDLC 1
-#define L2_VOICE 2
-
-struct gigaset_ops;
-struct gigaset_driver;
-
-struct usb_cardstate;
-struct ser_cardstate;
-struct bas_cardstate;
-
-struct bc_state;
-struct usb_bc_state;
-struct ser_bc_state;
-struct bas_bc_state;
-
-struct reply_t {
- int resp_code; /* RSP_XXXX */
- int min_ConState; /* <0 => ignore */
- int max_ConState; /* <0 => ignore */
- int parameter; /* e.g. ZSAU_XXXX <0: ignore*/
- int new_ConState; /* <0 => ignore */
- int timeout; /* >0 => *HZ; <=0 => TOUT_XXXX*/
- int action[MAXACT]; /* ACT_XXXX */
- char *command; /* NULL==none */
-};
-
-extern struct reply_t gigaset_tab_cid[];
-extern struct reply_t gigaset_tab_nocid[];
-
-struct inbuf_t {
- struct cardstate *cs;
- int inputstate;
- int head, tail;
- unsigned char data[RBUFSIZE];
-};
-
-/* isochronous write buffer structure
- * circular buffer with pad area for extraction of complete USB frames
- * - data[read..nextread-1] is valid data already submitted to the USB subsystem
- * - data[nextread..write-1] is valid data yet to be sent
- * - data[write] is the next byte to write to
- * - in byte-oriented L2 procotols, it is completely free
- * - in bit-oriented L2 procotols, it may contain a partial byte of valid data
- * - data[write+1..read-1] is free
- * - wbits is the number of valid data bits in data[write], starting at the LSB
- * - writesem is the semaphore for writing to the buffer:
- * if writesem <= 0, data[write..read-1] is currently being written to
- * - idle contains the byte value to repeat when the end of valid data is
- * reached; if nextread==write (buffer contains no data to send), either the
- * BAS_OUTBUFPAD bytes immediately before data[write] (if
- * write>=BAS_OUTBUFPAD) or those of the pad area (if write<BAS_OUTBUFPAD)
- * are also filled with that value
- */
-struct isowbuf_t {
- int read;
- int nextread;
- int write;
- atomic_t writesem;
- int wbits;
- unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
- unsigned char idle;
-};
-
-/* isochronous write URB context structure
- * data to be stored along with the URB and retrieved when it is returned
- * as completed by the USB subsystem
- * - urb: pointer to the URB itself
- * - bcs: pointer to the B Channel control structure
- * - limit: end of write buffer area covered by this URB
- * - status: URB completion status
- */
-struct isow_urbctx_t {
- struct urb *urb;
- struct bc_state *bcs;
- int limit;
- int status;
-};
-
-/* AT state structure
- * data associated with the state of an ISDN connection, whether or not
- * it is currently assigned a B channel
- */
-struct at_state_t {
- struct list_head list;
- int waiting;
- int getstring;
- unsigned timer_index;
- unsigned long timer_expires;
- int timer_active;
- unsigned int ConState; /* State of connection */
- struct reply_t *replystruct;
- int cid;
- int int_var[VAR_NUM]; /* see VAR_XXXX */
- char *str_var[STR_NUM]; /* see STR_XXXX */
- unsigned pending_commands; /* see PC_XXXX */
- unsigned seq_index;
-
- struct cardstate *cs;
- struct bc_state *bcs;
-};
-
-struct event_t {
- int type;
- void *ptr, *arg;
- int parameter;
- int cid;
- struct at_state_t *at_state;
-};
-
-/* This buffer holds all information about the used B-Channel */
-struct bc_state {
- struct sk_buff *tx_skb; /* Current transfer buffer to modem */
- struct sk_buff_head squeue; /* B-Channel send Queue */
-
- /* Variables for debugging .. */
- int corrupted; /* Counter for corrupted packages */
- int trans_down; /* Counter of packages (downstream) */
- int trans_up; /* Counter of packages (upstream) */
-
- struct at_state_t at_state;
-
- /* receive buffer */
- unsigned rx_bufsize; /* max size accepted by application */
- struct sk_buff *rx_skb;
- __u16 rx_fcs;
- int inputstate; /* see INS_XXXX */
-
- int channel;
-
- struct cardstate *cs;
-
- unsigned chstate; /* bitmap (CHS_*) */
- int ignore;
- unsigned proto2; /* layer 2 protocol (L2_*) */
- char *commands[AT_NUM]; /* see AT_XXXX */
-
-#ifdef CONFIG_GIGASET_DEBUG
- int emptycount;
-#endif
- int busy;
- int use_count;
-
- /* private data of hardware drivers */
- union {
- struct ser_bc_state *ser; /* serial hardware driver */
- struct usb_bc_state *usb; /* usb hardware driver (m105) */
- struct bas_bc_state *bas; /* usb hardware driver (base) */
- } hw;
-
- void *ap; /* associated LL application */
- int apconnstate; /* LL application connection state */
- spinlock_t aplock;
-};
-
-struct cardstate {
- struct gigaset_driver *driver;
- unsigned minor_index;
- struct device *dev;
- struct device *tty_dev;
- unsigned flags;
-
- const struct gigaset_ops *ops;
-
- /* Stuff to handle communication */
- wait_queue_head_t waitqueue;
- int waiting;
- int mode; /* see M_XXXX */
- int mstate; /* Modem state: see MS_XXXX */
- /* only changed by the event layer */
- int cmd_result;
-
- int channels;
- struct bc_state *bcs; /* Array of struct bc_state */
-
- int onechannel; /* data and commands transmitted in one
- stream (M10x) */
-
- spinlock_t lock;
- struct at_state_t at_state; /* at_state_t for cid == 0 */
- struct list_head temp_at_states;/* list of temporary "struct
- at_state_t"s without B channel */
-
- struct inbuf_t *inbuf;
-
- struct cmdbuf_t *cmdbuf, *lastcmdbuf;
- spinlock_t cmdlock;
- unsigned curlen, cmdbytes;
-
- struct tty_port port;
- struct tasklet_struct if_wake_tasklet;
- unsigned control_state;
-
- unsigned fwver[4];
- int gotfwver;
-
- unsigned running; /* !=0 if events are handled */
- unsigned connected; /* !=0 if hardware is connected */
- unsigned isdn_up; /* !=0 after gigaset_isdn_start() */
-
- unsigned cidmode;
-
- int myid; /* id for communication with LL */
- void *iif; /* LL interface structure */
- unsigned short hw_hdr_len; /* headroom needed in data skbs */
-
- struct reply_t *tabnocid;
- struct reply_t *tabcid;
- int cs_init;
- int ignoreframes; /* frames to ignore after setting up the
- B channel */
- struct mutex mutex; /* locks this structure:
- * connected is not changed,
- * hardware_up is not changed,
- * MState is not changed to or from
- * MS_LOCKED */
-
- struct timer_list timer;
- int retry_count;
- int dle; /* !=0 if DLE mode is active
- (ZDLE=1 received -- M10x only) */
- int cur_at_seq; /* sequence of AT commands being
- processed */
- int curchannel; /* channel those commands are meant
- for */
- int commands_pending; /* flag(s) in xxx.commands_pending have
- been set */
- struct tasklet_struct
- event_tasklet; /* tasklet for serializing AT commands.
- * Scheduled
- * -> for modem reponses (and
- * incoming data for M10x)
- * -> on timeout
- * -> after setting bits in
- * xxx.at_state.pending_command
- * (e.g. command from LL) */
- struct tasklet_struct
- write_tasklet; /* tasklet for serial output
- * (not used in base driver) */
-
- /* event queue */
- struct event_t events[MAX_EVENTS];
- unsigned ev_tail, ev_head;
- spinlock_t ev_lock;
-
- /* current modem response */
- unsigned char respdata[MAX_RESP_SIZE + 1];
- unsigned cbytes;
-
- /* private data of hardware drivers */
- union {
- struct usb_cardstate *usb; /* USB hardware driver (m105) */
- struct ser_cardstate *ser; /* serial hardware driver */
- struct bas_cardstate *bas; /* USB hardware driver (base) */
- } hw;
-};
-
-struct gigaset_driver {
- struct list_head list;
- spinlock_t lock; /* locks minor tables and blocked */
- struct tty_driver *tty;
- unsigned have_tty;
- unsigned minor;
- unsigned minors;
- struct cardstate *cs;
- int blocked;
-
- const struct gigaset_ops *ops;
- struct module *owner;
-};
-
-struct cmdbuf_t {
- struct cmdbuf_t *next, *prev;
- int len, offset;
- struct tasklet_struct *wake_tasklet;
- unsigned char buf[0];
-};
-
-struct bas_bc_state {
- /* isochronous output state */
- int running;
- atomic_t corrbytes;
- spinlock_t isooutlock;
- struct isow_urbctx_t isoouturbs[BAS_OUTURBS];
- struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl;
- struct isowbuf_t *isooutbuf;
- unsigned numsub; /* submitted URB counter
- (for diagnostic messages only) */
- struct tasklet_struct sent_tasklet;
-
- /* isochronous input state */
- spinlock_t isoinlock;
- struct urb *isoinurbs[BAS_INURBS];
- unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
- struct urb *isoindone; /* completed isoc read URB */
- int isoinstatus; /* status of completed URB */
- int loststatus; /* status of dropped URB */
- unsigned isoinlost; /* number of bytes lost */
- /* state of bit unstuffing algorithm
- (in addition to BC_state.inputstate) */
- unsigned seqlen; /* number of '1' bits not yet
- unstuffed */
- unsigned inbyte, inbits; /* collected bits for next byte */
- /* statistics */
- unsigned goodbytes; /* bytes correctly received */
- unsigned alignerrs; /* frames with incomplete byte at end */
- unsigned fcserrs; /* FCS errors */
- unsigned frameerrs; /* framing errors */
- unsigned giants; /* long frames */
- unsigned runts; /* short frames */
- unsigned aborts; /* HDLC aborts */
- unsigned shared0s; /* '0' bits shared between flags */
- unsigned stolen0s; /* '0' stuff bits also serving as
- leading flag bits */
- struct tasklet_struct rcvd_tasklet;
-};
-
-struct gigaset_ops {
- /* Called from ev-layer.c/interface.c for sending AT commands to the
- device */
- int (*write_cmd)(struct cardstate *cs, struct cmdbuf_t *cb);
-
- /* Called from interface.c for additional device control */
- int (*write_room)(struct cardstate *cs);
- int (*chars_in_buffer)(struct cardstate *cs);
- int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]);
-
- /* Called from ev-layer.c after setting up connection
- * Should call gigaset_bchannel_up(), when finished. */
- int (*init_bchannel)(struct bc_state *bcs);
-
- /* Called from ev-layer.c after hanging up
- * Should call gigaset_bchannel_down(), when finished. */
- int (*close_bchannel)(struct bc_state *bcs);
-
- /* Called by gigaset_initcs() for setting up bcs->hw.xxx */
- int (*initbcshw)(struct bc_state *bcs);
-
- /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
- void (*freebcshw)(struct bc_state *bcs);
-
- /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */
- void (*reinitbcshw)(struct bc_state *bcs);
-
- /* Called by gigaset_initcs() for setting up cs->hw.xxx */
- int (*initcshw)(struct cardstate *cs);
-
- /* Called by gigaset_freecs() for freeing cs->hw.xxx */
- void (*freecshw)(struct cardstate *cs);
-
- /* Called from common.c/interface.c for additional serial port
- control */
- int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state,
- unsigned new_state);
- int (*baud_rate)(struct cardstate *cs, unsigned cflag);
- int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
-
- /* Called from LL interface to put an skb into the send-queue.
- * After sending is completed, gigaset_skb_sent() must be called
- * with the skb's link layer header preserved. */
- int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
-
- /* Called from ev-layer.c to process a block of data
- * received through the common/control channel. */
- void (*handle_input)(struct inbuf_t *inbuf);
-
-};
-
-/* = Common structures and definitions =======================================
- */
-
-/* Parser states for DLE-Event:
- * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
- * <DLE_FLAG>: 0x10
- * <EVENT>: ((a-z)* | (A-Z)* | (0-10)*)+
- */
-#define DLE_FLAG 0x10
-
-/* ===========================================================================
- * Functions implemented in asyncdata.c
- */
-
-/* Called from LL interface to put an skb into the send queue. */
-int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
-
-/* Called from ev-layer.c to process a block of data
- * received through the common/control channel. */
-void gigaset_m10x_input(struct inbuf_t *inbuf);
-
-/* ===========================================================================
- * Functions implemented in isocdata.c
- */
-
-/* Called from LL interface to put an skb into the send queue. */
-int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
-
-/* Called from ev-layer.c to process a block of data
- * received through the common/control channel. */
-void gigaset_isoc_input(struct inbuf_t *inbuf);
-
-/* Called from bas-gigaset.c to process a block of data
- * received through the isochronous channel */
-void gigaset_isoc_receive(unsigned char *src, unsigned count,
- struct bc_state *bcs);
-
-/* Called from bas-gigaset.c to put a block of data
- * into the isochronous output buffer */
-int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len);
-
-/* Called from bas-gigaset.c to initialize the isochronous output buffer */
-void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
-
-/* Called from bas-gigaset.c to retrieve a block of bytes for sending */
-int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
-
-/* ===========================================================================
- * Functions implemented in LL interface
- */
-
-/* Called from common.c for setting up/shutting down with the ISDN subsystem */
-void gigaset_isdn_regdrv(void);
-void gigaset_isdn_unregdrv(void);
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid);
-void gigaset_isdn_unregdev(struct cardstate *cs);
-
-/* Called from hardware module to indicate completion of an skb */
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb);
-void gigaset_isdn_rcv_err(struct bc_state *bcs);
-
-/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
-void gigaset_isdn_start(struct cardstate *cs);
-void gigaset_isdn_stop(struct cardstate *cs);
-int gigaset_isdn_icall(struct at_state_t *at_state);
-void gigaset_isdn_connD(struct bc_state *bcs);
-void gigaset_isdn_hupD(struct bc_state *bcs);
-void gigaset_isdn_connB(struct bc_state *bcs);
-void gigaset_isdn_hupB(struct bc_state *bcs);
-
-/* ===========================================================================
- * Functions implemented in ev-layer.c
- */
-
-/* tasklet called from common.c to process queued events */
-void gigaset_handle_event(unsigned long data);
-
-/* called from isocdata.c / asyncdata.c
- * when a complete modem response line has been received */
-void gigaset_handle_modem_response(struct cardstate *cs);
-
-/* ===========================================================================
- * Functions implemented in proc.c
- */
-
-/* initialize sysfs for device */
-void gigaset_init_dev_sysfs(struct cardstate *cs);
-void gigaset_free_dev_sysfs(struct cardstate *cs);
-
-/* ===========================================================================
- * Functions implemented in common.c/gigaset.h
- */
-
-void gigaset_bcs_reinit(struct bc_state *bcs);
-void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
- struct cardstate *cs, int cid);
-int gigaset_get_channel(struct bc_state *bcs);
-struct bc_state *gigaset_get_free_channel(struct cardstate *cs);
-void gigaset_free_channel(struct bc_state *bcs);
-int gigaset_get_channels(struct cardstate *cs);
-void gigaset_free_channels(struct cardstate *cs);
-void gigaset_block_channels(struct cardstate *cs);
-
-/* Allocate and initialize driver structure. */
-struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
- const char *procname,
- const char *devname,
- const struct gigaset_ops *ops,
- struct module *owner);
-
-/* Deallocate driver structure. */
-void gigaset_freedriver(struct gigaset_driver *drv);
-
-struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
-struct cardstate *gigaset_get_cs_by_id(int id);
-void gigaset_blockdriver(struct gigaset_driver *drv);
-
-/* Allocate and initialize card state. Calls hardware dependent
- gigaset_init[b]cs(). */
-struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
- int onechannel, int ignoreframes,
- int cidmode, const char *modulename);
-
-/* Free card state. Calls hardware dependent gigaset_free[b]cs(). */
-void gigaset_freecs(struct cardstate *cs);
-
-/* Tell common.c that hardware and driver are ready. */
-int gigaset_start(struct cardstate *cs);
-
-/* Tell common.c that the device is not present any more. */
-void gigaset_stop(struct cardstate *cs);
-
-/* Tell common.c that the driver is being unloaded. */
-int gigaset_shutdown(struct cardstate *cs);
-
-/* Append event to the queue.
- * Returns NULL on failure or a pointer to the event on success.
- * ptr must be kmalloc()ed (and not be freed by the caller).
- */
-struct event_t *gigaset_add_event(struct cardstate *cs,
- struct at_state_t *at_state, int type,
- void *ptr, int parameter, void *arg);
-
-/* Called on CONFIG1 command from frontend. */
-int gigaset_enterconfigmode(struct cardstate *cs);
-
-/* cs->lock must not be locked */
-static inline void gigaset_schedule_event(struct cardstate *cs)
-{
- unsigned long flags;
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->running)
- tasklet_schedule(&cs->event_tasklet);
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-/* Tell common.c that B channel has been closed. */
-/* cs->lock must not be locked */
-static inline void gigaset_bchannel_down(struct bc_state *bcs)
-{
- gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL);
- gigaset_schedule_event(bcs->cs);
-}
-
-/* Tell common.c that B channel has been opened. */
-/* cs->lock must not be locked */
-static inline void gigaset_bchannel_up(struct bc_state *bcs)
-{
- gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL);
- gigaset_schedule_event(bcs->cs);
-}
-
-/* set up next receive skb for data mode */
-static inline struct sk_buff *gigaset_new_rx_skb(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- unsigned short hw_hdr_len = cs->hw_hdr_len;
-
- if (bcs->ignore) {
- bcs->rx_skb = NULL;
- } else {
- bcs->rx_skb = dev_alloc_skb(bcs->rx_bufsize + hw_hdr_len);
- if (bcs->rx_skb == NULL)
- dev_warn(cs->dev, "could not allocate skb\n");
- else
- skb_reserve(bcs->rx_skb, hw_hdr_len);
- }
- return bcs->rx_skb;
-}
-
-/* append received bytes to inbuf */
-int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
- unsigned numbytes);
-
-/* ===========================================================================
- * Functions implemented in interface.c
- */
-
-/* initialize interface */
-void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
- const char *devname);
-/* release interface */
-void gigaset_if_freedriver(struct gigaset_driver *drv);
-/* add minor */
-void gigaset_if_init(struct cardstate *cs);
-/* remove minor */
-void gigaset_if_free(struct cardstate *cs);
-/* device received data */
-void gigaset_if_receive(struct cardstate *cs,
- unsigned char *buffer, size_t len);
-
-#endif
diff --git a/drivers/staging/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c
deleted file mode 100644
index 9ddadd07e707..000000000000
--- a/drivers/staging/isdn/gigaset/interface.c
+++ /dev/null
@@ -1,613 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * interface to user space for the gigaset driver
- *
- * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/gigaset_dev.h>
-#include <linux/tty_flip.h>
-#include <linux/module.h>
-
-/*** our ioctls ***/
-
-static int if_lock(struct cardstate *cs, int *arg)
-{
- int cmd = *arg;
-
- gig_dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd);
-
- if (cmd > 1)
- return -EINVAL;
-
- if (cmd < 0) {
- *arg = cs->mstate == MS_LOCKED;
- return 0;
- }
-
- if (!cmd && cs->mstate == MS_LOCKED && cs->connected) {
- cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS);
- cs->ops->baud_rate(cs, B115200);
- cs->ops->set_line_ctrl(cs, CS8);
- cs->control_state = TIOCM_DTR | TIOCM_RTS;
- }
-
- cs->waiting = 1;
- if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK,
- NULL, cmd, NULL)) {
- cs->waiting = 0;
- return -ENOMEM;
- }
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- if (cs->cmd_result >= 0) {
- *arg = cs->cmd_result;
- return 0;
- }
-
- return cs->cmd_result;
-}
-
-static int if_version(struct cardstate *cs, unsigned arg[4])
-{
- static const unsigned version[4] = GIG_VERSION;
- static const unsigned compat[4] = GIG_COMPAT;
- unsigned cmd = arg[0];
-
- gig_dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd);
-
- switch (cmd) {
- case GIGVER_DRIVER:
- memcpy(arg, version, sizeof version);
- return 0;
- case GIGVER_COMPAT:
- memcpy(arg, compat, sizeof compat);
- return 0;
- case GIGVER_FWBASE:
- cs->waiting = 1;
- if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER,
- NULL, 0, arg)) {
- cs->waiting = 0;
- return -ENOMEM;
- }
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- if (cs->cmd_result >= 0)
- return 0;
-
- return cs->cmd_result;
- default:
- return -EINVAL;
- }
-}
-
-static int if_config(struct cardstate *cs, int *arg)
-{
- gig_dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg);
-
- if (*arg != 1)
- return -EINVAL;
-
- if (cs->mstate != MS_LOCKED)
- return -EBUSY;
-
- if (!cs->connected) {
- pr_err("%s: not connected\n", __func__);
- return -ENODEV;
- }
-
- *arg = 0;
- return gigaset_enterconfigmode(cs);
-}
-
-/*** the terminal driver ***/
-
-static int if_open(struct tty_struct *tty, struct file *filp)
-{
- struct cardstate *cs;
-
- gig_dbg(DEBUG_IF, "%d+%d: %s()",
- tty->driver->minor_start, tty->index, __func__);
-
- cs = gigaset_get_cs_by_tty(tty);
- if (!cs || !try_module_get(cs->driver->owner))
- return -ENODEV;
-
- if (mutex_lock_interruptible(&cs->mutex)) {
- module_put(cs->driver->owner);
- return -ERESTARTSYS;
- }
- tty->driver_data = cs;
-
- ++cs->port.count;
-
- if (cs->port.count == 1) {
- tty_port_tty_set(&cs->port, tty);
- cs->port.low_latency = 1;
- }
-
- mutex_unlock(&cs->mutex);
- return 0;
-}
-
-static void if_close(struct tty_struct *tty, struct file *filp)
-{
- struct cardstate *cs = tty->driver_data;
-
- if (!cs) { /* happens if we didn't find cs in open */
- gig_dbg(DEBUG_IF, "%s: no cardstate", __func__);
- return;
- }
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
- else if (!cs->port.count)
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- else if (!--cs->port.count)
- tty_port_tty_set(&cs->port, NULL);
-
- mutex_unlock(&cs->mutex);
-
- module_put(cs->driver->owner);
-}
-
-static int if_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct cardstate *cs = tty->driver_data;
- int retval = -ENODEV;
- int int_arg;
- unsigned char buf[6];
- unsigned version[4];
-
- gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
- } else {
- retval = 0;
- switch (cmd) {
- case GIGASET_REDIR:
- retval = get_user(int_arg, (int __user *) arg);
- if (retval >= 0)
- retval = if_lock(cs, &int_arg);
- if (retval >= 0)
- retval = put_user(int_arg, (int __user *) arg);
- break;
- case GIGASET_CONFIG:
- retval = get_user(int_arg, (int __user *) arg);
- if (retval >= 0)
- retval = if_config(cs, &int_arg);
- if (retval >= 0)
- retval = put_user(int_arg, (int __user *) arg);
- break;
- case GIGASET_BRKCHARS:
- retval = copy_from_user(&buf,
- (const unsigned char __user *) arg, 6)
- ? -EFAULT : 0;
- if (retval >= 0) {
- gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
- 6, buf);
- retval = cs->ops->brkchars(cs, buf);
- }
- break;
- case GIGASET_VERSION:
- retval = copy_from_user(version,
- (unsigned __user *) arg, sizeof version)
- ? -EFAULT : 0;
- if (retval >= 0)
- retval = if_version(cs, version);
- if (retval >= 0)
- retval = copy_to_user((unsigned __user *) arg,
- version, sizeof version)
- ? -EFAULT : 0;
- break;
- default:
- gig_dbg(DEBUG_IF, "%s: arg not supported - 0x%04x",
- __func__, cmd);
- retval = -ENOIOCTLCMD;
- }
- }
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-#ifdef CONFIG_COMPAT
-static long if_compat_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- return if_ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static int if_tiocmget(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
- int retval;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- retval = cs->control_state & (TIOCM_RTS | TIOCM_DTR);
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-static int if_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct cardstate *cs = tty->driver_data;
- int retval;
- unsigned mc;
-
- gig_dbg(DEBUG_IF, "%u: %s(0x%x, 0x%x)",
- cs->minor_index, __func__, set, clear);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
- } else {
- mc = (cs->control_state | set) & ~clear & (TIOCM_RTS | TIOCM_DTR);
- retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc);
- cs->control_state = mc;
- }
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- struct cardstate *cs = tty->driver_data;
- struct cmdbuf_t *cb;
- int retval;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
- goto done;
- }
- if (cs->mstate != MS_LOCKED) {
- dev_warn(cs->dev, "can't write to unlocked device\n");
- retval = -EBUSY;
- goto done;
- }
- if (count <= 0) {
- /* nothing to do */
- retval = 0;
- goto done;
- }
-
- cb = kmalloc(sizeof(struct cmdbuf_t) + count, GFP_KERNEL);
- if (!cb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- retval = -ENOMEM;
- goto done;
- }
-
- memcpy(cb->buf, buf, count);
- cb->len = count;
- cb->offset = 0;
- cb->next = NULL;
- cb->wake_tasklet = &cs->if_wake_tasklet;
- retval = cs->ops->write_cmd(cs, cb);
-done:
- mutex_unlock(&cs->mutex);
- return retval;
-}
-
-static int if_write_room(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
- int retval;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
- } else if (cs->mstate != MS_LOCKED) {
- dev_warn(cs->dev, "can't write to unlocked device\n");
- retval = -EBUSY;
- } else
- retval = cs->ops->write_room(cs);
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-static int if_chars_in_buffer(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
- int retval = 0;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected");
- else if (cs->mstate != MS_LOCKED)
- dev_warn(cs->dev, "can't write to unlocked device\n");
- else
- retval = cs->ops->chars_in_buffer(cs);
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-static void if_throttle(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
- else
- gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
-
- mutex_unlock(&cs->mutex);
-}
-
-static void if_unthrottle(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
- else
- gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
-
- mutex_unlock(&cs->mutex);
-}
-
-static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
-{
- struct cardstate *cs = tty->driver_data;
- unsigned int iflag;
- unsigned int cflag;
- unsigned int old_cflag;
- unsigned int control_state, new_state;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- goto out;
- }
-
- iflag = tty->termios.c_iflag;
- cflag = tty->termios.c_cflag;
- old_cflag = old ? old->c_cflag : cflag;
- gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
- cs->minor_index, iflag, cflag, old_cflag);
-
- /* get a local copy of the current port settings */
- control_state = cs->control_state;
-
- /*
- * Update baud rate.
- * Do not attempt to cache old rates and skip settings,
- * disconnects screw such tricks up completely.
- * Premature optimization is the root of all evil.
- */
-
- /* reassert DTR and (maybe) RTS on transition from B0 */
- if ((old_cflag & CBAUD) == B0) {
- new_state = control_state | TIOCM_DTR;
- /* don't set RTS if using hardware flow control */
- if (!(old_cflag & CRTSCTS))
- new_state |= TIOCM_RTS;
- gig_dbg(DEBUG_IF, "%u: from B0 - set DTR%s",
- cs->minor_index,
- (new_state & TIOCM_RTS) ? " only" : "/RTS");
- cs->ops->set_modem_ctrl(cs, control_state, new_state);
- control_state = new_state;
- }
-
- cs->ops->baud_rate(cs, cflag & CBAUD);
-
- if ((cflag & CBAUD) == B0) {
- /* Drop RTS and DTR */
- gig_dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index);
- new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS);
- cs->ops->set_modem_ctrl(cs, control_state, new_state);
- control_state = new_state;
- }
-
- /*
- * Update line control register (LCR)
- */
-
- cs->ops->set_line_ctrl(cs, cflag);
-
- /* save off the modified port settings */
- cs->control_state = control_state;
-
-out:
- mutex_unlock(&cs->mutex);
-}
-
-static const struct tty_operations if_ops = {
- .open = if_open,
- .close = if_close,
- .ioctl = if_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = if_compat_ioctl,
-#endif
- .write = if_write,
- .write_room = if_write_room,
- .chars_in_buffer = if_chars_in_buffer,
- .set_termios = if_set_termios,
- .throttle = if_throttle,
- .unthrottle = if_unthrottle,
- .tiocmget = if_tiocmget,
- .tiocmset = if_tiocmset,
-};
-
-
-/* wakeup tasklet for the write operation */
-static void if_wake(unsigned long data)
-{
- struct cardstate *cs = (struct cardstate *)data;
-
- tty_port_tty_wakeup(&cs->port);
-}
-
-/*** interface to common ***/
-
-void gigaset_if_init(struct cardstate *cs)
-{
- struct gigaset_driver *drv;
-
- drv = cs->driver;
- if (!drv->have_tty)
- return;
-
- tasklet_init(&cs->if_wake_tasklet, if_wake, (unsigned long) cs);
-
- mutex_lock(&cs->mutex);
- cs->tty_dev = tty_port_register_device(&cs->port, drv->tty,
- cs->minor_index, NULL);
-
- if (!IS_ERR(cs->tty_dev))
- dev_set_drvdata(cs->tty_dev, cs);
- else {
- pr_warn("could not register device to the tty subsystem\n");
- cs->tty_dev = NULL;
- }
- mutex_unlock(&cs->mutex);
-}
-
-void gigaset_if_free(struct cardstate *cs)
-{
- struct gigaset_driver *drv;
-
- drv = cs->driver;
- if (!drv->have_tty)
- return;
-
- tasklet_disable(&cs->if_wake_tasklet);
- tasklet_kill(&cs->if_wake_tasklet);
- cs->tty_dev = NULL;
- tty_unregister_device(drv->tty, cs->minor_index);
-}
-
-/**
- * gigaset_if_receive() - pass a received block of data to the tty device
- * @cs: device descriptor structure.
- * @buffer: received data.
- * @len: number of bytes received.
- *
- * Called by asyncdata/isocdata if a block of data received from the
- * device must be sent to userspace through the ttyG* device.
- */
-void gigaset_if_receive(struct cardstate *cs,
- unsigned char *buffer, size_t len)
-{
- tty_insert_flip_string(&cs->port, buffer, len);
- tty_flip_buffer_push(&cs->port);
-}
-EXPORT_SYMBOL_GPL(gigaset_if_receive);
-
-/* gigaset_if_initdriver
- * Initialize tty interface.
- * parameters:
- * drv Driver
- * procname Name of the driver (e.g. for /proc/tty/drivers)
- * devname Name of the device files (prefix without minor number)
- */
-void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
- const char *devname)
-{
- int ret;
- struct tty_driver *tty;
-
- drv->have_tty = 0;
-
- drv->tty = tty = alloc_tty_driver(drv->minors);
- if (tty == NULL)
- goto enomem;
-
- tty->type = TTY_DRIVER_TYPE_SERIAL;
- tty->subtype = SERIAL_TYPE_NORMAL;
- tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
-
- tty->driver_name = procname;
- tty->name = devname;
- tty->minor_start = drv->minor;
-
- tty->init_termios = tty_std_termios;
- tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- tty_set_operations(tty, &if_ops);
-
- ret = tty_register_driver(tty);
- if (ret < 0) {
- pr_err("error %d registering tty driver\n", ret);
- goto error;
- }
- gig_dbg(DEBUG_IF, "tty driver initialized");
- drv->have_tty = 1;
- return;
-
-enomem:
- pr_err("out of memory\n");
-error:
- if (drv->tty)
- put_tty_driver(drv->tty);
-}
-
-void gigaset_if_freedriver(struct gigaset_driver *drv)
-{
- if (!drv->have_tty)
- return;
-
- drv->have_tty = 0;
- tty_unregister_driver(drv->tty);
- put_tty_driver(drv->tty);
-}
diff --git a/drivers/staging/isdn/gigaset/isocdata.c b/drivers/staging/isdn/gigaset/isocdata.c
deleted file mode 100644
index 3ecf6e33ed15..000000000000
--- a/drivers/staging/isdn/gigaset/isocdata.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Common data handling layer for bas_gigaset
- *
- * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
- * Hansjoerg Lipp <hjlipp@web.de>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/crc-ccitt.h>
-#include <linux/bitrev.h>
-
-/* access methods for isowbuf_t */
-/* ============================ */
-
-/* initialize buffer structure
- */
-void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
-{
- iwb->read = 0;
- iwb->nextread = 0;
- iwb->write = 0;
- atomic_set(&iwb->writesem, 1);
- iwb->wbits = 0;
- iwb->idle = idle;
- memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD);
-}
-
-/* compute number of bytes which can be appended to buffer
- * so that there is still room to append a maximum frame of flags
- */
-static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
-{
- int read, write, freebytes;
-
- read = iwb->read;
- write = iwb->write;
- freebytes = read - write;
- if (freebytes > 0) {
- /* no wraparound: need padding space within regular area */
- return freebytes - BAS_OUTBUFPAD;
- } else if (read < BAS_OUTBUFPAD) {
- /* wraparound: can use space up to end of regular area */
- return BAS_OUTBUFSIZE - write;
- } else {
- /* following the wraparound yields more space */
- return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD;
- }
-}
-
-/* start writing
- * acquire the write semaphore
- * return 0 if acquired, <0 if busy
- */
-static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
-{
- if (!atomic_dec_and_test(&iwb->writesem)) {
- atomic_inc(&iwb->writesem);
- gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore",
- __func__);
- return -EBUSY;
- }
- gig_dbg(DEBUG_ISO,
- "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
- __func__, iwb->data[iwb->write], iwb->wbits);
- return 0;
-}
-
-/* finish writing
- * release the write semaphore
- * returns the current write position
- */
-static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
-{
- int write = iwb->write;
- atomic_inc(&iwb->writesem);
- return write;
-}
-
-/* append bits to buffer without any checks
- * - data contains bits to append, starting at LSB
- * - nbits is number of bits to append (0..24)
- * must be called with the write semaphore held
- * If more than nbits bits are set in data, the extraneous bits are set in the
- * buffer too, but the write position is only advanced by nbits.
- */
-static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
-{
- int write = iwb->write;
- data <<= iwb->wbits;
- data |= iwb->data[write];
- nbits += iwb->wbits;
- while (nbits >= 8) {
- iwb->data[write++] = data & 0xff;
- write %= BAS_OUTBUFSIZE;
- data >>= 8;
- nbits -= 8;
- }
- iwb->wbits = nbits;
- iwb->data[write] = data & 0xff;
- iwb->write = write;
-}
-
-/* put final flag on HDLC bitstream
- * also sets the idle fill byte to the correspondingly shifted flag pattern
- * must be called with the write semaphore held
- */
-static inline void isowbuf_putflag(struct isowbuf_t *iwb)
-{
- int write;
-
- /* add two flags, thus reliably covering one byte */
- isowbuf_putbits(iwb, 0x7e7e, 8);
- /* recover the idle flag byte */
- write = iwb->write;
- iwb->idle = iwb->data[write];
- gig_dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
- /* mask extraneous bits in buffer */
- iwb->data[write] &= (1 << iwb->wbits) - 1;
-}
-
-/* retrieve a block of bytes for sending
- * The requested number of bytes is provided as a contiguous block.
- * If necessary, the frame is filled to the requested number of bytes
- * with the idle value.
- * returns offset to frame, < 0 on busy or error
- */
-int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
-{
- int read, write, limit, src, dst;
- unsigned char pbyte;
-
- read = iwb->nextread;
- write = iwb->write;
- if (likely(read == write)) {
- /* return idle frame */
- return read < BAS_OUTBUFPAD ?
- BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD;
- }
-
- limit = read + size;
- gig_dbg(DEBUG_STREAM, "%s: read=%d write=%d limit=%d",
- __func__, read, write, limit);
-#ifdef CONFIG_GIGASET_DEBUG
- if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) {
- pr_err("invalid size %d\n", size);
- return -EINVAL;
- }
-#endif
-
- if (read < write) {
- /* no wraparound in valid data */
- if (limit >= write) {
- /* append idle frame */
- if (isowbuf_startwrite(iwb) < 0)
- return -EBUSY;
- /* write position could have changed */
- write = iwb->write;
- if (limit >= write) {
- pbyte = iwb->data[write]; /* save
- partial byte */
- limit = write + BAS_OUTBUFPAD;
- gig_dbg(DEBUG_STREAM,
- "%s: filling %d->%d with %02x",
- __func__, write, limit, iwb->idle);
- if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE)
- memset(iwb->data + write, iwb->idle,
- BAS_OUTBUFPAD);
- else {
- /* wraparound, fill entire pad area */
- memset(iwb->data + write, iwb->idle,
- BAS_OUTBUFSIZE + BAS_OUTBUFPAD
- - write);
- limit = 0;
- }
- gig_dbg(DEBUG_STREAM,
- "%s: restoring %02x at %d",
- __func__, pbyte, limit);
- iwb->data[limit] = pbyte; /* restore
- partial byte */
- iwb->write = limit;
- }
- isowbuf_donewrite(iwb);
- }
- } else {
- /* valid data wraparound */
- if (limit >= BAS_OUTBUFSIZE) {
- /* copy wrapped part into pad area */
- src = 0;
- dst = BAS_OUTBUFSIZE;
- while (dst < limit && src < write)
- iwb->data[dst++] = iwb->data[src++];
- if (dst <= limit) {
- /* fill pad area with idle byte */
- memset(iwb->data + dst, iwb->idle,
- BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst);
- }
- limit = src;
- }
- }
- iwb->nextread = limit;
- return read;
-}
-
-/* dump_bytes
- * write hex bytes to syslog for debugging
- */
-static inline void dump_bytes(enum debuglevel level, const char *tag,
- unsigned char *bytes, int count)
-{
-#ifdef CONFIG_GIGASET_DEBUG
- unsigned char c;
- static char dbgline[3 * 32 + 1];
- int i = 0;
-
- if (!(gigaset_debuglevel & level))
- return;
-
- while (count-- > 0) {
- if (i > sizeof(dbgline) - 4) {
- dbgline[i] = '\0';
- gig_dbg(level, "%s:%s", tag, dbgline);
- i = 0;
- }
- c = *bytes++;
- dbgline[i] = (i && !(i % 12)) ? '-' : ' ';
- i++;
- dbgline[i++] = hex_asc_hi(c);
- dbgline[i++] = hex_asc_lo(c);
- }
- dbgline[i] = '\0';
- gig_dbg(level, "%s:%s", tag, dbgline);
-#endif
-}
-
-/*============================================================================*/
-
-/* bytewise HDLC bitstuffing via table lookup
- * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits
- * index: 256*(number of preceding '1' bits) + (next byte to stuff)
- * value: bit 9.. 0 = result bits
- * bit 12..10 = number of trailing '1' bits in result
- * bit 14..13 = number of bits added by stuffing
- */
-static const u16 stufftab[5 * 256] = {
-/* previous 1s = 0: */
- 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
- 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
- 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
- 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f,
- 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
- 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f,
- 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
- 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df,
- 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f,
- 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f,
- 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af,
- 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f,
- 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf,
- 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f,
- 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
- 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
-
-/* previous 1s = 1: */
- 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
- 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
- 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
- 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f,
- 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f,
- 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af,
- 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf,
- 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef,
- 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f,
- 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f,
- 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f,
- 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f,
- 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f,
- 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af,
- 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
- 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
-
-/* previous 1s = 2: */
- 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
- 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
- 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
- 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077,
- 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097,
- 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7,
- 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7,
- 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7,
- 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517,
- 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537,
- 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557,
- 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577,
- 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997,
- 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7,
- 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
- 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
-
-/* previous 1s = 3: */
- 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
- 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
- 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
- 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b,
- 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b,
- 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb,
- 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db,
- 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb,
- 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b,
- 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b,
- 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b,
- 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b,
- 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b,
- 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb,
- 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
- 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
-
-/* previous 1s = 4: */
- 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
- 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
- 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
- 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d,
- 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d,
- 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd,
- 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd,
- 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d,
- 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d,
- 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d,
- 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d,
- 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d,
- 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d,
- 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd,
- 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd,
- 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d
-};
-
-/* hdlc_bitstuff_byte
- * perform HDLC bitstuffing for one input byte (8 bits, LSB first)
- * parameters:
- * cin input byte
- * ones number of trailing '1' bits in result before this step
- * iwb pointer to output buffer structure
- * (write semaphore must be held)
- * return value:
- * number of trailing '1' bits in result after this step
- */
-
-static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
- int ones)
-{
- u16 stuff;
- int shiftinc, newones;
-
- /* get stuffing information for input byte
- * value: bit 9.. 0 = result bits
- * bit 12..10 = number of trailing '1' bits in result
- * bit 14..13 = number of bits added by stuffing
- */
- stuff = stufftab[256 * ones + cin];
- shiftinc = (stuff >> 13) & 3;
- newones = (stuff >> 10) & 7;
- stuff &= 0x3ff;
-
- /* append stuffed byte to output stream */
- isowbuf_putbits(iwb, stuff, 8 + shiftinc);
- return newones;
-}
-
-/* hdlc_buildframe
- * Perform HDLC framing with bitstuffing on a byte buffer
- * The input buffer is regarded as a sequence of bits, starting with the least
- * significant bit of the first byte and ending with the most significant bit
- * of the last byte. A 16 bit FCS is appended as defined by RFC 1662.
- * Whenever five consecutive '1' bits appear in the resulting bit sequence, a
- * '0' bit is inserted after them.
- * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110')
- * are appended to the output buffer starting at the given bit position, which
- * is assumed to already contain a leading flag.
- * The output buffer must have sufficient length; count + count/5 + 6 bytes
- * starting at *out are safe and are verified to be present.
- * parameters:
- * in input buffer
- * count number of bytes in input buffer
- * iwb pointer to output buffer structure
- * (write semaphore must be held)
- * return value:
- * position of end of packet in output buffer on success,
- * -EAGAIN if write semaphore busy or buffer full
- */
-
-static inline int hdlc_buildframe(struct isowbuf_t *iwb,
- unsigned char *in, int count)
-{
- int ones;
- u16 fcs;
- int end;
- unsigned char c;
-
- if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
- isowbuf_startwrite(iwb) < 0) {
- gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
- __func__, isowbuf_freebytes(iwb));
- return -EAGAIN;
- }
-
- dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
-
- /* bitstuff and checksum input data */
- fcs = PPP_INITFCS;
- ones = 0;
- while (count-- > 0) {
- c = *in++;
- ones = hdlc_bitstuff_byte(iwb, c, ones);
- fcs = crc_ccitt_byte(fcs, c);
- }
-
- /* bitstuff and append FCS
- * (complemented, least significant byte first) */
- fcs ^= 0xffff;
- ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
- ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
-
- /* put closing flag and repeat byte for flag idle */
- isowbuf_putflag(iwb);
- end = isowbuf_donewrite(iwb);
- return end;
-}
-
-/* trans_buildframe
- * Append a block of 'transparent' data to the output buffer,
- * inverting the bytes.
- * The output buffer must have sufficient length; count bytes
- * starting at *out are safe and are verified to be present.
- * parameters:
- * in input buffer
- * count number of bytes in input buffer
- * iwb pointer to output buffer structure
- * (write semaphore must be held)
- * return value:
- * position of end of packet in output buffer on success,
- * -EAGAIN if write semaphore busy or buffer full
- */
-
-static inline int trans_buildframe(struct isowbuf_t *iwb,
- unsigned char *in, int count)
-{
- int write;
- unsigned char c;
-
- if (unlikely(count <= 0))
- return iwb->write;
-
- if (isowbuf_freebytes(iwb) < count ||
- isowbuf_startwrite(iwb) < 0) {
- gig_dbg(DEBUG_ISO, "can't put %d bytes", count);
- return -EAGAIN;
- }
-
- gig_dbg(DEBUG_STREAM, "put %d bytes", count);
- dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
-
- write = iwb->write;
- do {
- c = bitrev8(*in++);
- iwb->data[write++] = c;
- write %= BAS_OUTBUFSIZE;
- } while (--count > 0);
- iwb->write = write;
- iwb->idle = c;
-
- return isowbuf_donewrite(iwb);
-}
-
-int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
-{
- int result;
-
- switch (bcs->proto2) {
- case L2_HDLC:
- result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
- gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d",
- __func__, len, result);
- break;
- default: /* assume transparent */
- result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len);
- gig_dbg(DEBUG_ISO, "%s: %d bytes trans -> %d",
- __func__, len, result);
- }
- return result;
-}
-
-/* hdlc_putbyte
- * append byte c to current skb of B channel structure *bcs, updating fcs
- */
-static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
-{
- bcs->rx_fcs = crc_ccitt_byte(bcs->rx_fcs, c);
- if (bcs->rx_skb == NULL)
- /* skipping */
- return;
- if (bcs->rx_skb->len >= bcs->rx_bufsize) {
- dev_warn(bcs->cs->dev, "received oversized packet discarded\n");
- bcs->hw.bas->giants++;
- dev_kfree_skb_any(bcs->rx_skb);
- bcs->rx_skb = NULL;
- return;
- }
- __skb_put_u8(bcs->rx_skb, c);
-}
-
-/* hdlc_flush
- * drop partial HDLC data packet
- */
-static inline void hdlc_flush(struct bc_state *bcs)
-{
- /* clear skb or allocate new if not skipping */
- if (bcs->rx_skb != NULL)
- skb_trim(bcs->rx_skb, 0);
- else
- gigaset_new_rx_skb(bcs);
-
- /* reset packet state */
- bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* hdlc_done
- * process completed HDLC data packet
- */
-static inline void hdlc_done(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- struct sk_buff *procskb;
- unsigned int len;
-
- if (unlikely(bcs->ignore)) {
- bcs->ignore--;
- hdlc_flush(bcs);
- return;
- }
- procskb = bcs->rx_skb;
- if (procskb == NULL) {
- /* previous error */
- gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
- gigaset_isdn_rcv_err(bcs);
- } else if (procskb->len < 2) {
- dev_notice(cs->dev, "received short frame (%d octets)\n",
- procskb->len);
- bcs->hw.bas->runts++;
- dev_kfree_skb_any(procskb);
- gigaset_isdn_rcv_err(bcs);
- } else if (bcs->rx_fcs != PPP_GOODFCS) {
- dev_notice(cs->dev, "frame check error\n");
- bcs->hw.bas->fcserrs++;
- dev_kfree_skb_any(procskb);
- gigaset_isdn_rcv_err(bcs);
- } else {
- len = procskb->len;
- __skb_trim(procskb, len -= 2); /* subtract FCS */
- gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, len);
- dump_bytes(DEBUG_STREAM_DUMP,
- "rcv data", procskb->data, len);
- bcs->hw.bas->goodbytes += len;
- gigaset_skb_rcvd(bcs, procskb);
- }
- gigaset_new_rx_skb(bcs);
- bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* hdlc_frag
- * drop HDLC data packet with non-integral last byte
- */
-static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
-{
- if (unlikely(bcs->ignore)) {
- bcs->ignore--;
- hdlc_flush(bcs);
- return;
- }
-
- dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits);
- bcs->hw.bas->alignerrs++;
- gigaset_isdn_rcv_err(bcs);
- __skb_trim(bcs->rx_skb, 0);
- bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* bit counts lookup table for HDLC bit unstuffing
- * index: input byte
- * value: bit 0..3 = number of consecutive '1' bits starting from LSB
- * bit 4..6 = number of consecutive '1' bits starting from MSB
- * (replacing 8 by 7 to make it fit; the algorithm won't care)
- * bit 7 set if there are 5 or more "interior" consecutive '1' bits
- */
-static const unsigned char bitcounts[256] = {
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07,
- 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
- 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15,
- 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
- 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16,
- 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24,
- 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25,
- 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34,
- 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78
-};
-
-/* hdlc_unpack
- * perform HDLC frame processing (bit unstuffing, flag detection, FCS
- * calculation) on a sequence of received data bytes (8 bits each, LSB first)
- * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
- * notify of errors via gigaset_isdn_rcv_err
- * tally frames, errors etc. in BC structure counters
- * parameters:
- * src received data
- * count number of received bytes
- * bcs receiving B channel structure
- */
-static inline void hdlc_unpack(unsigned char *src, unsigned count,
- struct bc_state *bcs)
-{
- struct bas_bc_state *ubc = bcs->hw.bas;
- int inputstate;
- unsigned seqlen, inbyte, inbits;
-
- /* load previous state:
- * inputstate = set of flag bits:
- * - INS_flag_hunt: no complete opening flag received since connection
- * setup or last abort
- * - INS_have_data: at least one complete data byte received since last
- * flag
- * seqlen = number of consecutive '1' bits in last 7 input stream bits
- * (0..7)
- * inbyte = accumulated partial data byte (if !INS_flag_hunt)
- * inbits = number of valid bits in inbyte, starting at LSB (0..6)
- */
- inputstate = bcs->inputstate;
- seqlen = ubc->seqlen;
- inbyte = ubc->inbyte;
- inbits = ubc->inbits;
-
- /* bit unstuffing a byte a time
- * Take your time to understand this; it's straightforward but tedious.
- * The "bitcounts" lookup table is used to speed up the counting of
- * leading and trailing '1' bits.
- */
- while (count--) {
- unsigned char c = *src++;
- unsigned char tabentry = bitcounts[c];
- unsigned lead1 = tabentry & 0x0f;
- unsigned trail1 = (tabentry >> 4) & 0x0f;
-
- seqlen += lead1;
-
- if (unlikely(inputstate & INS_flag_hunt)) {
- if (c == PPP_FLAG) {
- /* flag-in-one */
- inputstate &= ~(INS_flag_hunt | INS_have_data);
- inbyte = 0;
- inbits = 0;
- } else if (seqlen == 6 && trail1 != 7) {
- /* flag completed & not followed by abort */
- inputstate &= ~(INS_flag_hunt | INS_have_data);
- inbyte = c >> (lead1 + 1);
- inbits = 7 - lead1;
- if (trail1 >= 8) {
- /* interior stuffing:
- * omitting the MSB handles most cases,
- * correct the incorrectly handled
- * cases individually */
- inbits--;
- switch (c) {
- case 0xbe:
- inbyte = 0x3f;
- break;
- }
- }
- }
- /* else: continue flag-hunting */
- } else if (likely(seqlen < 5 && trail1 < 7)) {
- /* streamlined case: 8 data bits, no stuffing */
- inbyte |= c << inbits;
- hdlc_putbyte(inbyte & 0xff, bcs);
- inputstate |= INS_have_data;
- inbyte >>= 8;
- /* inbits unchanged */
- } else if (likely(seqlen == 6 && inbits == 7 - lead1 &&
- trail1 + 1 == inbits &&
- !(inputstate & INS_have_data))) {
- /* streamlined case: flag idle - state unchanged */
- } else if (unlikely(seqlen > 6)) {
- /* abort sequence */
- ubc->aborts++;
- hdlc_flush(bcs);
- inputstate |= INS_flag_hunt;
- } else if (seqlen == 6) {
- /* closing flag, including (6 - lead1) '1's
- * and one '0' from inbits */
- if (inbits > 7 - lead1) {
- hdlc_frag(bcs, inbits + lead1 - 7);
- inputstate &= ~INS_have_data;
- } else {
- if (inbits < 7 - lead1)
- ubc->stolen0s++;
- if (inputstate & INS_have_data) {
- hdlc_done(bcs);
- inputstate &= ~INS_have_data;
- }
- }
-
- if (c == PPP_FLAG) {
- /* complete flag, LSB overlaps preceding flag */
- ubc->shared0s++;
- inbits = 0;
- inbyte = 0;
- } else if (trail1 != 7) {
- /* remaining bits */
- inbyte = c >> (lead1 + 1);
- inbits = 7 - lead1;
- if (trail1 >= 8) {
- /* interior stuffing:
- * omitting the MSB handles most cases,
- * correct the incorrectly handled
- * cases individually */
- inbits--;
- switch (c) {
- case 0xbe:
- inbyte = 0x3f;
- break;
- }
- }
- } else {
- /* abort sequence follows,
- * skb already empty anyway */
- ubc->aborts++;
- inputstate |= INS_flag_hunt;
- }
- } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */
-
- if (c == PPP_FLAG) {
- /* complete flag */
- if (seqlen == 5)
- ubc->stolen0s++;
- if (inbits) {
- hdlc_frag(bcs, inbits);
- inbits = 0;
- inbyte = 0;
- } else if (inputstate & INS_have_data)
- hdlc_done(bcs);
- inputstate &= ~INS_have_data;
- } else if (trail1 == 7) {
- /* abort sequence */
- ubc->aborts++;
- hdlc_flush(bcs);
- inputstate |= INS_flag_hunt;
- } else {
- /* stuffed data */
- if (trail1 < 7) { /* => seqlen == 5 */
- /* stuff bit at position lead1,
- * no interior stuffing */
- unsigned char mask = (1 << lead1) - 1;
- c = (c & mask) | ((c & ~mask) >> 1);
- inbyte |= c << inbits;
- inbits += 7;
- } else if (seqlen < 5) { /* trail1 >= 8 */
- /* interior stuffing:
- * omitting the MSB handles most cases,
- * correct the incorrectly handled
- * cases individually */
- switch (c) {
- case 0xbe:
- c = 0x7e;
- break;
- }
- inbyte |= c << inbits;
- inbits += 7;
- } else { /* seqlen == 5 && trail1 >= 8 */
-
- /* stuff bit at lead1 *and* interior
- * stuffing -- unstuff individually */
- switch (c) {
- case 0x7d:
- c = 0x3f;
- break;
- case 0xbe:
- c = 0x3f;
- break;
- case 0x3e:
- c = 0x1f;
- break;
- case 0x7c:
- c = 0x3e;
- break;
- }
- inbyte |= c << inbits;
- inbits += 6;
- }
- if (inbits >= 8) {
- inbits -= 8;
- hdlc_putbyte(inbyte & 0xff, bcs);
- inputstate |= INS_have_data;
- inbyte >>= 8;
- }
- }
- }
- seqlen = trail1 & 7;
- }
-
- /* save new state */
- bcs->inputstate = inputstate;
- ubc->seqlen = seqlen;
- ubc->inbyte = inbyte;
- ubc->inbits = inbits;
-}
-
-/* trans_receive
- * pass on received USB frame transparently as SKB via gigaset_skb_rcvd
- * invert bytes
- * tally frames, errors etc. in BC structure counters
- * parameters:
- * src received data
- * count number of received bytes
- * bcs receiving B channel structure
- */
-static inline void trans_receive(unsigned char *src, unsigned count,
- struct bc_state *bcs)
-{
- struct sk_buff *skb;
- int dobytes;
- unsigned char *dst;
-
- if (unlikely(bcs->ignore)) {
- bcs->ignore--;
- return;
- }
- skb = bcs->rx_skb;
- if (skb == NULL) {
- skb = gigaset_new_rx_skb(bcs);
- if (skb == NULL)
- return;
- }
- dobytes = bcs->rx_bufsize - skb->len;
- while (count > 0) {
- dst = skb_put(skb, count < dobytes ? count : dobytes);
- while (count > 0 && dobytes > 0) {
- *dst++ = bitrev8(*src++);
- count--;
- dobytes--;
- }
- if (dobytes == 0) {
- dump_bytes(DEBUG_STREAM_DUMP,
- "rcv data", skb->data, skb->len);
- bcs->hw.bas->goodbytes += skb->len;
- gigaset_skb_rcvd(bcs, skb);
- skb = gigaset_new_rx_skb(bcs);
- if (skb == NULL)
- return;
- dobytes = bcs->rx_bufsize;
- }
- }
-}
-
-void gigaset_isoc_receive(unsigned char *src, unsigned count,
- struct bc_state *bcs)
-{
- switch (bcs->proto2) {
- case L2_HDLC:
- hdlc_unpack(src, count, bcs);
- break;
- default: /* assume transparent */
- trans_receive(src, count, bcs);
- }
-}
-
-/* == data input =========================================================== */
-
-/* process a block of received bytes in command mode (mstate != MS_LOCKED)
- * Append received bytes to the command response buffer and forward them
- * line by line to the response handler.
- * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
- * removed before passing the line to the response handler.
- */
-static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- unsigned cbytes = cs->cbytes;
- unsigned char c;
-
- while (numbytes--) {
- c = *src++;
- switch (c) {
- case '\n':
- if (cbytes == 0 && cs->respdata[0] == '\r') {
- /* collapse LF with preceding CR */
- cs->respdata[0] = 0;
- break;
- }
- /* fall through */
- case '\r':
- /* end of message line, pass to response handler */
- if (cbytes >= MAX_RESP_SIZE) {
- dev_warn(cs->dev, "response too large (%d)\n",
- cbytes);
- cbytes = MAX_RESP_SIZE;
- }
- cs->cbytes = cbytes;
- gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
- cbytes, cs->respdata);
- gigaset_handle_modem_response(cs);
- cbytes = 0;
-
- /* store EOL byte for CRLF collapsing */
- cs->respdata[0] = c;
- break;
- default:
- /* append to line buffer if possible */
- if (cbytes < MAX_RESP_SIZE)
- cs->respdata[cbytes] = c;
- cbytes++;
- }
- }
-
- /* save state */
- cs->cbytes = cbytes;
-}
-
-
-/* process a block of data received through the control channel
- */
-void gigaset_isoc_input(struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- unsigned tail, head, numbytes;
- unsigned char *src;
-
- head = inbuf->head;
- while (head != (tail = inbuf->tail)) {
- gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
- if (head > tail)
- tail = RBUFSIZE;
- src = inbuf->data + head;
- numbytes = tail - head;
- gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
-
- if (cs->mstate == MS_LOCKED) {
- gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
- numbytes, src);
- gigaset_if_receive(inbuf->cs, src, numbytes);
- } else {
- cmd_loop(src, numbytes, inbuf);
- }
-
- head += numbytes;
- if (head == RBUFSIZE)
- head = 0;
- gig_dbg(DEBUG_INTR, "setting head to %u", head);
- inbuf->head = head;
- }
-}
-
-
-/* == data output ========================================================== */
-
-/**
- * gigaset_isoc_send_skb() - queue an skb for sending
- * @bcs: B channel descriptor structure.
- * @skb: data to send.
- *
- * Called by LL to queue an skb for sending, and start transmission if
- * necessary.
- * Once the payload data has been transmitted completely, gigaset_skb_sent()
- * will be called with the skb's link layer header preserved.
- *
- * Return value:
- * number of bytes accepted for sending (skb->len) if ok,
- * error code < 0 (eg. -ENODEV) on error
- */
-int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
-{
- int len = skb->len;
- unsigned long flags;
-
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (!bcs->cs->connected) {
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- return -ENODEV;
- }
-
- skb_queue_tail(&bcs->squeue, skb);
- gig_dbg(DEBUG_ISO, "%s: skb queued, qlen=%d",
- __func__, skb_queue_len(&bcs->squeue));
-
- /* tasklet submits URB if necessary */
- tasklet_schedule(&bcs->hw.bas->sent_tasklet);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
-
- return len; /* ok so far */
-}
diff --git a/drivers/staging/isdn/gigaset/proc.c b/drivers/staging/isdn/gigaset/proc.c
deleted file mode 100644
index 8914439a4237..000000000000
--- a/drivers/staging/isdn/gigaset/proc.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-
-static ssize_t show_cidmode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cardstate *cs = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", cs->cidmode);
-}
-
-static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cardstate *cs = dev_get_drvdata(dev);
- long int value;
- char *end;
-
- value = simple_strtol(buf, &end, 0);
- while (*end)
- if (!isspace(*end++))
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- cs->waiting = 1;
- if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
- NULL, value, NULL)) {
- cs->waiting = 0;
- mutex_unlock(&cs->mutex);
- return -ENOMEM;
- }
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- mutex_unlock(&cs->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(cidmode, S_IRUGO | S_IWUSR, show_cidmode, set_cidmode);
-
-/* free sysfs for device */
-void gigaset_free_dev_sysfs(struct cardstate *cs)
-{
- if (!cs->tty_dev)
- return;
-
- gig_dbg(DEBUG_INIT, "removing sysfs entries");
- device_remove_file(cs->tty_dev, &dev_attr_cidmode);
-}
-
-/* initialize sysfs for device */
-void gigaset_init_dev_sysfs(struct cardstate *cs)
-{
- if (!cs->tty_dev)
- return;
-
- gig_dbg(DEBUG_INIT, "setting up sysfs");
- if (device_create_file(cs->tty_dev, &dev_attr_cidmode))
- pr_err("could not create sysfs attribute\n");
-}
diff --git a/drivers/staging/isdn/gigaset/ser-gigaset.c b/drivers/staging/isdn/gigaset/ser-gigaset.c
deleted file mode 100644
index 5587e9e7fc73..000000000000
--- a/drivers/staging/isdn/gigaset/ser-gigaset.c
+++ /dev/null
@@ -1,796 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* This is the serial hardware link layer (HLL) for the Gigaset 307x isdn
- * DECT base (aka Sinus 45 isdn) using the RS232 DECT data module M101,
- * written as a line discipline.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Tilman Schmidt"
-#define DRIVER_DESC "Serial Driver for Gigaset 307x using Siemens M101"
-
-#define GIGASET_MINORS 1
-#define GIGASET_MINOR 0
-#define GIGASET_MODULENAME "ser_gigaset"
-#define GIGASET_DEVNAME "ttyGS"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_LDISC(N_GIGASET_M101);
-
-static int startmode = SM_ISDN;
-module_param(startmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "initial operation mode");
-static int cidmode = 1;
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(cidmode, "stay in CID mode when idle");
-
-static struct gigaset_driver *driver;
-
-struct ser_cardstate {
- struct platform_device dev;
- struct tty_struct *tty;
- atomic_t refcnt;
- struct completion dead_cmp;
-};
-
-static struct platform_driver device_driver = {
- .driver = {
- .name = GIGASET_MODULENAME,
- },
-};
-
-static void flush_send_queue(struct cardstate *);
-
-/* transmit data from current open skb
- * result: number of bytes sent or error code < 0
- */
-static int write_modem(struct cardstate *cs)
-{
- struct tty_struct *tty = cs->hw.ser->tty;
- struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
- struct sk_buff *skb = bcs->tx_skb;
- int sent = -EOPNOTSUPP;
-
- WARN_ON(!tty || !tty->ops || !skb);
-
- if (!skb->len) {
- dev_kfree_skb_any(skb);
- bcs->tx_skb = NULL;
- return -EINVAL;
- }
-
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- if (tty->ops->write)
- sent = tty->ops->write(tty, skb->data, skb->len);
- gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent);
- if (sent < 0) {
- /* error */
- flush_send_queue(cs);
- return sent;
- }
- skb_pull(skb, sent);
- if (!skb->len) {
- /* skb sent completely */
- gigaset_skb_sent(bcs, skb);
-
- gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
- (unsigned long) skb);
- dev_kfree_skb_any(skb);
- bcs->tx_skb = NULL;
- }
- return sent;
-}
-
-/*
- * transmit first queued command buffer
- * result: number of bytes sent or error code < 0
- */
-static int send_cb(struct cardstate *cs)
-{
- struct tty_struct *tty = cs->hw.ser->tty;
- struct cmdbuf_t *cb, *tcb;
- unsigned long flags;
- int sent = 0;
-
- WARN_ON(!tty || !tty->ops);
-
- cb = cs->cmdbuf;
- if (!cb)
- return 0; /* nothing to do */
-
- if (cb->len) {
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- sent = tty->ops->write(tty, cb->buf + cb->offset, cb->len);
- if (sent < 0) {
- /* error */
- gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent);
- flush_send_queue(cs);
- return sent;
- }
- cb->offset += sent;
- cb->len -= sent;
- gig_dbg(DEBUG_OUTPUT, "send_cb: sent %d, left %u, queued %u",
- sent, cb->len, cs->cmdbytes);
- }
-
- while (cb && !cb->len) {
- spin_lock_irqsave(&cs->cmdlock, flags);
- cs->cmdbytes -= cs->curlen;
- tcb = cb;
- cs->cmdbuf = cb = cb->next;
- if (cb) {
- cb->prev = NULL;
- cs->curlen = cb->len;
- } else {
- cs->lastcmdbuf = NULL;
- cs->curlen = 0;
- }
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- if (tcb->wake_tasklet)
- tasklet_schedule(tcb->wake_tasklet);
- kfree(tcb);
- }
- return sent;
-}
-
-/*
- * send queue tasklet
- * If there is already a skb opened, put data to the transfer buffer
- * by calling "write_modem".
- * Otherwise take a new skb out of the queue.
- */
-static void gigaset_modem_fill(unsigned long data)
-{
- struct cardstate *cs = (struct cardstate *) data;
- struct bc_state *bcs;
- struct sk_buff *nextskb;
- int sent = 0;
-
- if (!cs) {
- gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
- return;
- }
- bcs = cs->bcs;
- if (!bcs) {
- gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
- return;
- }
- if (!bcs->tx_skb) {
- /* no skb is being sent; send command if any */
- sent = send_cb(cs);
- gig_dbg(DEBUG_OUTPUT, "%s: send_cb -> %d", __func__, sent);
- if (sent)
- /* something sent or error */
- return;
-
- /* no command to send; get skb */
- nextskb = skb_dequeue(&bcs->squeue);
- if (!nextskb)
- /* no skb either, nothing to do */
- return;
- bcs->tx_skb = nextskb;
-
- gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
- (unsigned long) bcs->tx_skb);
- }
-
- /* send skb */
- gig_dbg(DEBUG_OUTPUT, "%s: tx_skb", __func__);
- if (write_modem(cs) < 0)
- gig_dbg(DEBUG_OUTPUT, "%s: write_modem failed", __func__);
-}
-
-/*
- * throw away all data queued for sending
- */
-static void flush_send_queue(struct cardstate *cs)
-{
- struct sk_buff *skb;
- struct cmdbuf_t *cb;
- unsigned long flags;
-
- /* command queue */
- spin_lock_irqsave(&cs->cmdlock, flags);
- while ((cb = cs->cmdbuf) != NULL) {
- cs->cmdbuf = cb->next;
- if (cb->wake_tasklet)
- tasklet_schedule(cb->wake_tasklet);
- kfree(cb);
- }
- cs->cmdbuf = cs->lastcmdbuf = NULL;
- cs->cmdbytes = cs->curlen = 0;
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- /* data queue */
- if (cs->bcs->tx_skb)
- dev_kfree_skb_any(cs->bcs->tx_skb);
- while ((skb = skb_dequeue(&cs->bcs->squeue)) != NULL)
- dev_kfree_skb_any(skb);
-}
-
-
-/* Gigaset Driver Interface */
-/* ======================== */
-
-/*
- * queue an AT command string for transmission to the Gigaset device
- * parameters:
- * cs controller state structure
- * buf buffer containing the string to send
- * len number of characters to send
- * wake_tasklet tasklet to run when transmission is complete, or NULL
- * return value:
- * number of bytes queued, or error code < 0
- */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
- unsigned long flags;
-
- gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
- DEBUG_TRANSCMD : DEBUG_LOCKCMD,
- "CMD Transmit", cb->len, cb->buf);
-
- spin_lock_irqsave(&cs->cmdlock, flags);
- cb->prev = cs->lastcmdbuf;
- if (cs->lastcmdbuf)
- cs->lastcmdbuf->next = cb;
- else {
- cs->cmdbuf = cb;
- cs->curlen = cb->len;
- }
- cs->cmdbytes += cb->len;
- cs->lastcmdbuf = cb;
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->connected)
- tasklet_schedule(&cs->write_tasklet);
- spin_unlock_irqrestore(&cs->lock, flags);
- return cb->len;
-}
-
-/*
- * tty_driver.write_room interface routine
- * return number of characters the driver will accept to be written
- * parameter:
- * controller state structure
- * return value:
- * number of characters
- */
-static int gigaset_write_room(struct cardstate *cs)
-{
- unsigned bytes;
-
- bytes = cs->cmdbytes;
- return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
-}
-
-/*
- * tty_driver.chars_in_buffer interface routine
- * return number of characters waiting to be sent
- * parameter:
- * controller state structure
- * return value:
- * number of characters
- */
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
- return cs->cmdbytes;
-}
-
-/*
- * implementation of ioctl(GIGASET_BRKCHARS)
- * parameter:
- * controller state structure
- * return value:
- * -EINVAL (unimplemented function)
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
- /* not implemented */
- return -EINVAL;
-}
-
-/*
- * Open B channel
- * Called by "do_action" in ev-layer.c
- */
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
- gigaset_bchannel_up(bcs);
- return 0;
-}
-
-/*
- * Close B channel
- * Called by "do_action" in ev-layer.c
- */
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
- gigaset_bchannel_down(bcs);
- return 0;
-}
-
-/*
- * Set up B channel structure
- * This is called by "gigaset_initcs" in common.c
- */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
- /* unused */
- bcs->hw.ser = NULL;
- return 0;
-}
-
-/*
- * Free B channel structure
- * Called by "gigaset_freebcs" in common.c
- */
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
- /* unused */
-}
-
-/*
- * Reinitialize B channel structure
- * This is called by "bcs_reinit" in common.c
- */
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
-}
-
-/*
- * Free hardware specific device data
- * This will be called by "gigaset_freecs" in common.c
- */
-static void gigaset_freecshw(struct cardstate *cs)
-{
- tasklet_kill(&cs->write_tasklet);
- if (!cs->hw.ser)
- return;
- platform_device_unregister(&cs->hw.ser->dev);
-}
-
-static void gigaset_device_release(struct device *dev)
-{
- kfree(container_of(dev, struct ser_cardstate, dev.dev));
-}
-
-/*
- * Set up hardware specific device data
- * This is called by "gigaset_initcs" in common.c
- */
-static int gigaset_initcshw(struct cardstate *cs)
-{
- int rc;
- struct ser_cardstate *scs;
-
- scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
- if (!scs) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
- cs->hw.ser = scs;
-
- cs->hw.ser->dev.name = GIGASET_MODULENAME;
- cs->hw.ser->dev.id = cs->minor_index;
- cs->hw.ser->dev.dev.release = gigaset_device_release;
- rc = platform_device_register(&cs->hw.ser->dev);
- if (rc != 0) {
- pr_err("error %d registering platform device\n", rc);
- kfree(cs->hw.ser);
- cs->hw.ser = NULL;
- return rc;
- }
-
- tasklet_init(&cs->write_tasklet,
- gigaset_modem_fill, (unsigned long) cs);
- return 0;
-}
-
-/*
- * set modem control lines
- * Parameters:
- * card state structure
- * modem control line state ([TIOCM_DTR]|[TIOCM_RTS])
- * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
- * and by "if_lock" and "if_termios" in interface.c
- */
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
- unsigned new_state)
-{
- struct tty_struct *tty = cs->hw.ser->tty;
- unsigned int set, clear;
-
- WARN_ON(!tty || !tty->ops);
- /* tiocmset is an optional tty driver method */
- if (!tty->ops->tiocmset)
- return -EINVAL;
- set = new_state & ~old_state;
- clear = old_state & ~new_state;
- if (!set && !clear)
- return 0;
- gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear);
- return tty->ops->tiocmset(tty, set, clear);
-}
-
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
- return -EINVAL;
-}
-
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
- return -EINVAL;
-}
-
-static const struct gigaset_ops ops = {
- .write_cmd = gigaset_write_cmd,
- .write_room = gigaset_write_room,
- .chars_in_buffer = gigaset_chars_in_buffer,
- .brkchars = gigaset_brkchars,
- .init_bchannel = gigaset_init_bchannel,
- .close_bchannel = gigaset_close_bchannel,
- .initbcshw = gigaset_initbcshw,
- .freebcshw = gigaset_freebcshw,
- .reinitbcshw = gigaset_reinitbcshw,
- .initcshw = gigaset_initcshw,
- .freecshw = gigaset_freecshw,
- .set_modem_ctrl = gigaset_set_modem_ctrl,
- .baud_rate = gigaset_baud_rate,
- .set_line_ctrl = gigaset_set_line_ctrl,
- .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
- .handle_input = gigaset_m10x_input, /* asyncdata.c */
-};
-
-
-/* Line Discipline Interface */
-/* ========================= */
-
-/* helper functions for cardstate refcounting */
-static struct cardstate *cs_get(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->disc_data;
-
- if (!cs || !cs->hw.ser) {
- gig_dbg(DEBUG_ANY, "%s: no cardstate", __func__);
- return NULL;
- }
- atomic_inc(&cs->hw.ser->refcnt);
- return cs;
-}
-
-static void cs_put(struct cardstate *cs)
-{
- if (atomic_dec_and_test(&cs->hw.ser->refcnt))
- complete(&cs->hw.ser->dead_cmp);
-}
-
-/*
- * Called by the tty driver when the line discipline is pushed onto the tty.
- * Called in process context.
- */
-static int
-gigaset_tty_open(struct tty_struct *tty)
-{
- struct cardstate *cs;
- int rc;
-
- gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101");
-
- pr_info(DRIVER_DESC "\n");
-
- if (!driver) {
- pr_err("%s: no driver structure\n", __func__);
- return -ENODEV;
- }
-
- /* allocate memory for our device state and initialize it */
- cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
- if (!cs) {
- rc = -ENODEV;
- goto error;
- }
-
- cs->dev = &cs->hw.ser->dev.dev;
- cs->hw.ser->tty = tty;
- atomic_set(&cs->hw.ser->refcnt, 1);
- init_completion(&cs->hw.ser->dead_cmp);
- tty->disc_data = cs;
-
- /* Set the amount of data we're willing to receive per call
- * from the hardware driver to half of the input buffer size
- * to leave some reserve.
- * Note: We don't do flow control towards the hardware driver.
- * If more data is received than will fit into the input buffer,
- * it will be dropped and an error will be logged. This should
- * never happen as the device is slow and the buffer size ample.
- */
- tty->receive_room = RBUFSIZE/2;
-
- /* OK.. Initialization of the datastructures and the HW is done.. Now
- * startup system and notify the LL that we are ready to run
- */
- if (startmode == SM_LOCKED)
- cs->mstate = MS_LOCKED;
- rc = gigaset_start(cs);
- if (rc < 0) {
- tasklet_kill(&cs->write_tasklet);
- goto error;
- }
-
- gig_dbg(DEBUG_INIT, "Startup of HLL done");
- return 0;
-
-error:
- gig_dbg(DEBUG_INIT, "Startup of HLL failed");
- tty->disc_data = NULL;
- gigaset_freecs(cs);
- return rc;
-}
-
-/*
- * Called by the tty driver when the line discipline is removed.
- * Called from process context.
- */
-static void
-gigaset_tty_close(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->disc_data;
-
- gig_dbg(DEBUG_INIT, "Stopping HLL for Gigaset M101");
-
- if (!cs) {
- gig_dbg(DEBUG_INIT, "%s: no cardstate", __func__);
- return;
- }
-
- /* prevent other callers from entering ldisc methods */
- tty->disc_data = NULL;
-
- if (!cs->hw.ser)
- pr_err("%s: no hw cardstate\n", __func__);
- else {
- /* wait for running methods to finish */
- if (!atomic_dec_and_test(&cs->hw.ser->refcnt))
- wait_for_completion(&cs->hw.ser->dead_cmp);
- }
-
- /* stop operations */
- gigaset_stop(cs);
- tasklet_kill(&cs->write_tasklet);
- flush_send_queue(cs);
- cs->dev = NULL;
- gigaset_freecs(cs);
-
- gig_dbg(DEBUG_INIT, "Shutdown of HLL done");
-}
-
-/*
- * Called by the tty driver when the tty line is hung up.
- * Wait for I/O to driver to complete and unregister ISDN device.
- * This is already done by the close routine, so just call that.
- * Called from process context.
- */
-static int gigaset_tty_hangup(struct tty_struct *tty)
-{
- gigaset_tty_close(tty);
- return 0;
-}
-
-/*
- * Ioctl on the tty.
- * Called in process context only.
- * May be re-entered by multiple ioctl calling threads.
- */
-static int
-gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct cardstate *cs = cs_get(tty);
- int rc, val;
- int __user *p = (int __user *)arg;
-
- if (!cs)
- return -ENXIO;
-
- switch (cmd) {
-
- case FIONREAD:
- /* unused, always return zero */
- val = 0;
- rc = put_user(val, p);
- break;
-
- case TCFLSH:
- /* flush our buffers and the serial port's buffer */
- switch (arg) {
- case TCIFLUSH:
- /* no own input buffer to flush */
- break;
- case TCIOFLUSH:
- case TCOFLUSH:
- flush_send_queue(cs);
- break;
- }
- /* fall through */
-
- default:
- /* pass through to underlying serial device */
- rc = n_tty_ioctl_helper(tty, file, cmd, arg);
- break;
- }
- cs_put(cs);
- return rc;
-}
-
-/*
- * Called by the tty driver when a block of data has been received.
- * Will not be re-entered while running but other ldisc functions
- * may be called in parallel.
- * Can be called from hard interrupt level as well as soft interrupt
- * level or mainline.
- * Parameters:
- * tty tty structure
- * buf buffer containing received characters
- * cflags buffer containing error flags for received characters (ignored)
- * count number of received characters
- */
-static void
-gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
- char *cflags, int count)
-{
- struct cardstate *cs = cs_get(tty);
- unsigned tail, head, n;
- struct inbuf_t *inbuf;
-
- if (!cs)
- return;
- inbuf = cs->inbuf;
- if (!inbuf) {
- dev_err(cs->dev, "%s: no inbuf\n", __func__);
- cs_put(cs);
- return;
- }
-
- tail = inbuf->tail;
- head = inbuf->head;
- gig_dbg(DEBUG_INTR, "buffer state: %u -> %u, receive %u bytes",
- head, tail, count);
-
- if (head <= tail) {
- /* possible buffer wraparound */
- n = min_t(unsigned, count, RBUFSIZE - tail);
- memcpy(inbuf->data + tail, buf, n);
- tail = (tail + n) % RBUFSIZE;
- buf += n;
- count -= n;
- }
-
- if (count > 0) {
- /* tail < head and some data left */
- n = head - tail - 1;
- if (count > n) {
- dev_err(cs->dev,
- "inbuf overflow, discarding %d bytes\n",
- count - n);
- count = n;
- }
- memcpy(inbuf->data + tail, buf, count);
- tail += count;
- }
-
- gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
- inbuf->tail = tail;
-
- /* Everything was received .. Push data into handler */
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(cs);
- cs_put(cs);
-}
-
-/*
- * Called by the tty driver when there's room for more data to send.
- */
-static void
-gigaset_tty_wakeup(struct tty_struct *tty)
-{
- struct cardstate *cs = cs_get(tty);
-
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- if (!cs)
- return;
- tasklet_schedule(&cs->write_tasklet);
- cs_put(cs);
-}
-
-static struct tty_ldisc_ops gigaset_ldisc = {
- .owner = THIS_MODULE,
- .magic = TTY_LDISC_MAGIC,
- .name = "ser_gigaset",
- .open = gigaset_tty_open,
- .close = gigaset_tty_close,
- .hangup = gigaset_tty_hangup,
- .ioctl = gigaset_tty_ioctl,
- .receive_buf = gigaset_tty_receive,
- .write_wakeup = gigaset_tty_wakeup,
-};
-
-
-/* Initialization / Shutdown */
-/* ========================= */
-
-static int __init ser_gigaset_init(void)
-{
- int rc;
-
- gig_dbg(DEBUG_INIT, "%s", __func__);
- rc = platform_driver_register(&device_driver);
- if (rc != 0) {
- pr_err("error %d registering platform driver\n", rc);
- return rc;
- }
-
- /* allocate memory for our driver state and initialize it */
- driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
- GIGASET_MODULENAME, GIGASET_DEVNAME,
- &ops, THIS_MODULE);
- if (!driver) {
- rc = -ENOMEM;
- goto error;
- }
-
- rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
- if (rc != 0) {
- pr_err("error %d registering line discipline\n", rc);
- goto error;
- }
-
- return 0;
-
-error:
- if (driver) {
- gigaset_freedriver(driver);
- driver = NULL;
- }
- platform_driver_unregister(&device_driver);
- return rc;
-}
-
-static void __exit ser_gigaset_exit(void)
-{
- int rc;
-
- gig_dbg(DEBUG_INIT, "%s", __func__);
-
- if (driver) {
- gigaset_freedriver(driver);
- driver = NULL;
- }
-
- rc = tty_unregister_ldisc(N_GIGASET_M101);
- if (rc != 0)
- pr_err("error %d unregistering line discipline\n", rc);
-
- platform_driver_unregister(&device_driver);
-}
-
-module_init(ser_gigaset_init);
-module_exit(ser_gigaset_exit);
diff --git a/drivers/staging/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c
deleted file mode 100644
index a20c0bfa68f3..000000000000
--- a/drivers/staging/isdn/gigaset/usb-gigaset.c
+++ /dev/null
@@ -1,959 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * USB driver for Gigaset 307x directly or using M105 Data.
- *
- * Copyright (c) 2001 by Stefan Eilers
- * and Hansjoerg Lipp <hjlipp@web.de>.
- *
- * This driver was derived from the USB skeleton driver by
- * Greg Kroah-Hartman <greg@kroah.com>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers"
-#define DRIVER_DESC "USB Driver for Gigaset 307x using M105"
-
-/* Module parameters */
-
-static int startmode = SM_ISDN;
-static int cidmode = 1;
-
-module_param(startmode, int, S_IRUGO);
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
-MODULE_PARM_DESC(cidmode, "Call-ID mode");
-
-#define GIGASET_MINORS 1
-#define GIGASET_MINOR 8
-#define GIGASET_MODULENAME "usb_gigaset"
-#define GIGASET_DEVNAME "ttyGU"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-/* Values for the Gigaset M105 Data */
-#define USB_M105_VENDOR_ID 0x0681
-#define USB_M105_PRODUCT_ID 0x0009
-
-/* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table[] = {
- { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, gigaset_table);
-
-/*
- * Control requests (empty fields: 00)
- *
- * RT|RQ|VALUE|INDEX|LEN |DATA
- * In:
- * C1 08 01
- * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:?
- * C1 0F ll ll
- * Get device information/status (llll: 0x200 and 0x40 seen).
- * Real size: I only saw MIN(llll,0x64).
- * Contents: seems to be always the same...
- * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes)
- * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0"
- * rest: ?
- * Out:
- * 41 11
- * Initialize/reset device ?
- * 41 00 xx 00
- * ? (xx=00 or 01; 01 on start, 00 on close)
- * 41 07 vv mm
- * Set/clear flags vv=value, mm=mask (see RQ 08)
- * 41 12 xx
- * Used before the following configuration requests are issued
- * (with xx=0x0f). I've seen other values<0xf, though.
- * 41 01 xx xx
- * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1.
- * 41 03 ps bb
- * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity
- * [ 0x30: m, 0x40: s ]
- * [s: 0: 1 stop bit; 1: 1.5; 2: 2]
- * bb: bits/byte (seen 7 and 8)
- * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00
- * ??
- * Initialization: 01, 40, 00, 00
- * Open device: 00 40, 00, 00
- * yy and zz seem to be equal, either 0x00 or 0x0a
- * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80)
- * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
- * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
- * xx is usually 0x00 but was 0x7e before starting data transfer
- * in unimodem mode. So, this might be an array of characters that
- * need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
- *
- * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
- * flags per packet.
- */
-
-/* functions called if a device of this driver is connected/disconnected */
-static int gigaset_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void gigaset_disconnect(struct usb_interface *interface);
-
-/* functions called before/after suspend */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
-static int gigaset_resume(struct usb_interface *intf);
-static int gigaset_pre_reset(struct usb_interface *intf);
-
-static struct gigaset_driver *driver;
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver gigaset_usb_driver = {
- .name = GIGASET_MODULENAME,
- .probe = gigaset_probe,
- .disconnect = gigaset_disconnect,
- .id_table = gigaset_table,
- .suspend = gigaset_suspend,
- .resume = gigaset_resume,
- .reset_resume = gigaset_resume,
- .pre_reset = gigaset_pre_reset,
- .post_reset = gigaset_resume,
- .disable_hub_initiated_lpm = 1,
-};
-
-struct usb_cardstate {
- struct usb_device *udev; /* usb device pointer */
- struct usb_interface *interface; /* interface for this device */
- int busy; /* bulk output in progress */
-
- /* Output buffer */
- unsigned char *bulk_out_buffer;
- int bulk_out_size;
- int bulk_out_epnum;
- struct urb *bulk_out_urb;
-
- /* Input buffer */
- unsigned char *rcvbuf;
- int rcvbuf_size;
- struct urb *read_urb;
-
- char bchars[6]; /* for request 0x19 */
-};
-
-static inline unsigned tiocm_to_gigaset(unsigned state)
-{
- return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
-}
-
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
- unsigned new_state)
-{
- struct usb_device *udev = cs->hw.usb->udev;
- unsigned mask, val;
- int r;
-
- mask = tiocm_to_gigaset(old_state ^ new_state);
- val = tiocm_to_gigaset(new_state);
-
- gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
- r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
- (val & 0xff) | ((mask & 0xff) << 8), 0,
- NULL, 0, 2000 /* timeout? */);
- if (r < 0)
- return r;
- return 0;
-}
-
-/*
- * Set M105 configuration value
- * using undocumented device commands reverse engineered from USB traces
- * of the Siemens Windows driver
- */
-static int set_value(struct cardstate *cs, u8 req, u16 val)
-{
- struct usb_device *udev = cs->hw.usb->udev;
- int r, r2;
-
- gig_dbg(DEBUG_USBREQ, "request %02x (%04x)",
- (unsigned)req, (unsigned)val);
- r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x12, 0x41,
- 0xf /*?*/, 0, NULL, 0, 2000 /*?*/);
- /* no idea what this does */
- if (r < 0) {
- dev_err(&udev->dev, "error %d on request 0x12\n", -r);
- return r;
- }
-
- r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, 0x41,
- val, 0, NULL, 0, 2000 /*?*/);
- if (r < 0)
- dev_err(&udev->dev, "error %d on request 0x%02x\n",
- -r, (unsigned)req);
-
- r2 = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
- 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/);
- if (r2 < 0)
- dev_err(&udev->dev, "error %d on request 0x19\n", -r2);
-
- return r < 0 ? r : (r2 < 0 ? r2 : 0);
-}
-
-/*
- * set the baud rate on the internal serial adapter
- * using the undocumented parameter setting command
- */
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
- u16 val;
- u32 rate;
-
- cflag &= CBAUD;
-
- switch (cflag) {
- case B300: rate = 300; break;
- case B600: rate = 600; break;
- case B1200: rate = 1200; break;
- case B2400: rate = 2400; break;
- case B4800: rate = 4800; break;
- case B9600: rate = 9600; break;
- case B19200: rate = 19200; break;
- case B38400: rate = 38400; break;
- case B57600: rate = 57600; break;
- case B115200: rate = 115200; break;
- default:
- rate = 9600;
- dev_err(cs->dev, "unsupported baudrate request 0x%x,"
- " using default of B9600\n", cflag);
- }
-
- val = 0x383fff / rate + 1;
-
- return set_value(cs, 1, val);
-}
-
-/*
- * set the line format on the internal serial adapter
- * using the undocumented parameter setting command
- */
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
- u16 val = 0;
-
- /* set the parity */
- if (cflag & PARENB)
- val |= (cflag & PARODD) ? 0x10 : 0x20;
-
- /* set the number of data bits */
- switch (cflag & CSIZE) {
- case CS5:
- val |= 5 << 8; break;
- case CS6:
- val |= 6 << 8; break;
- case CS7:
- val |= 7 << 8; break;
- case CS8:
- val |= 8 << 8; break;
- default:
- dev_err(cs->dev, "CSIZE was not CS5-CS8, using default of 8\n");
- val |= 8 << 8;
- break;
- }
-
- /* set the number of stop bits */
- if (cflag & CSTOPB) {
- if ((cflag & CSIZE) == CS5)
- val |= 1; /* 1.5 stop bits */
- else
- val |= 2; /* 2 stop bits */
- }
-
- return set_value(cs, 3, val);
-}
-
-
-/*============================================================================*/
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
- gigaset_bchannel_up(bcs);
- return 0;
-}
-
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
- gigaset_bchannel_down(bcs);
- return 0;
-}
-
-static int write_modem(struct cardstate *cs);
-static int send_cb(struct cardstate *cs);
-
-
-/* Write tasklet handler: Continue sending current skb, or send command, or
- * start sending an skb from the send queue.
- */
-static void gigaset_modem_fill(unsigned long data)
-{
- struct cardstate *cs = (struct cardstate *) data;
- struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
-
- gig_dbg(DEBUG_OUTPUT, "modem_fill");
-
- if (cs->hw.usb->busy) {
- gig_dbg(DEBUG_OUTPUT, "modem_fill: busy");
- return;
- }
-
-again:
- if (!bcs->tx_skb) { /* no skb is being sent */
- if (cs->cmdbuf) { /* commands to send? */
- gig_dbg(DEBUG_OUTPUT, "modem_fill: cb");
- if (send_cb(cs) < 0) {
- gig_dbg(DEBUG_OUTPUT,
- "modem_fill: send_cb failed");
- goto again; /* no callback will be called! */
- }
- return;
- }
-
- /* skbs to send? */
- bcs->tx_skb = skb_dequeue(&bcs->squeue);
- if (!bcs->tx_skb)
- return;
-
- gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)!",
- (unsigned long) bcs->tx_skb);
- }
-
- gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
- if (write_modem(cs) < 0) {
- gig_dbg(DEBUG_OUTPUT, "modem_fill: write_modem failed");
- goto again; /* no callback will be called! */
- }
-}
-
-/*
- * Interrupt Input URB completion routine
- */
-static void gigaset_read_int_callback(struct urb *urb)
-{
- struct cardstate *cs = urb->context;
- struct inbuf_t *inbuf = cs->inbuf;
- int status = urb->status;
- int r;
- unsigned numbytes;
- unsigned char *src;
- unsigned long flags;
-
- if (!status) {
- numbytes = urb->actual_length;
-
- if (numbytes) {
- src = cs->hw.usb->rcvbuf;
- if (unlikely(*src))
- dev_warn(cs->dev,
- "%s: There was no leading 0, but 0x%02x!\n",
- __func__, (unsigned) *src);
- ++src; /* skip leading 0x00 */
- --numbytes;
- if (gigaset_fill_inbuf(inbuf, src, numbytes)) {
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(inbuf->cs);
- }
- } else
- gig_dbg(DEBUG_INTR, "Received zero block length");
- } else {
- /* The urb might have been killed. */
- gig_dbg(DEBUG_ANY, "%s - nonzero status received: %d",
- __func__, status);
- if (status == -ENOENT || status == -ESHUTDOWN)
- /* killed or endpoint shutdown: don't resubmit */
- return;
- }
-
- /* resubmit URB */
- spin_lock_irqsave(&cs->lock, flags);
- if (!cs->connected) {
- spin_unlock_irqrestore(&cs->lock, flags);
- pr_err("%s: disconnected\n", __func__);
- return;
- }
- r = usb_submit_urb(urb, GFP_ATOMIC);
- spin_unlock_irqrestore(&cs->lock, flags);
- if (r)
- dev_err(cs->dev, "error %d resubmitting URB\n", -r);
-}
-
-
-/* This callback routine is called when data was transmitted to the device. */
-static void gigaset_write_bulk_callback(struct urb *urb)
-{
- struct cardstate *cs = urb->context;
- int status = urb->status;
- unsigned long flags;
-
- switch (status) {
- case 0: /* normal completion */
- break;
- case -ENOENT: /* killed */
- gig_dbg(DEBUG_ANY, "%s: killed", __func__);
- cs->hw.usb->busy = 0;
- return;
- default:
- dev_err(cs->dev, "bulk transfer failed (status %d)\n",
- -status);
- /* That's all we can do. Communication problems
- are handled by timeouts or network protocols. */
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!cs->connected) {
- pr_err("%s: disconnected\n", __func__);
- } else {
- cs->hw.usb->busy = 0;
- tasklet_schedule(&cs->write_tasklet);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-static int send_cb(struct cardstate *cs)
-{
- struct cmdbuf_t *cb = cs->cmdbuf;
- unsigned long flags;
- int count;
- int status = -ENOENT;
- struct usb_cardstate *ucs = cs->hw.usb;
-
- do {
- if (!cb->len) {
- spin_lock_irqsave(&cs->cmdlock, flags);
- cs->cmdbytes -= cs->curlen;
- gig_dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left",
- cs->curlen, cs->cmdbytes);
- cs->cmdbuf = cb->next;
- if (cs->cmdbuf) {
- cs->cmdbuf->prev = NULL;
- cs->curlen = cs->cmdbuf->len;
- } else {
- cs->lastcmdbuf = NULL;
- cs->curlen = 0;
- }
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- if (cb->wake_tasklet)
- tasklet_schedule(cb->wake_tasklet);
- kfree(cb);
-
- cb = cs->cmdbuf;
- }
-
- if (cb) {
- count = min(cb->len, ucs->bulk_out_size);
- gig_dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count);
-
- usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
- usb_sndbulkpipe(ucs->udev,
- ucs->bulk_out_epnum),
- cb->buf + cb->offset, count,
- gigaset_write_bulk_callback, cs);
-
- cb->offset += count;
- cb->len -= count;
- ucs->busy = 1;
-
- spin_lock_irqsave(&cs->lock, flags);
- status = cs->connected ?
- usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
- -ENODEV;
- spin_unlock_irqrestore(&cs->lock, flags);
-
- if (status) {
- ucs->busy = 0;
- dev_err(cs->dev,
- "could not submit urb (error %d)\n",
- -status);
- cb->len = 0; /* skip urb => remove cb+wakeup
- in next loop cycle */
- }
- }
- } while (cb && status); /* next command on error */
-
- return status;
-}
-
-/* Send command to device. */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
- unsigned long flags;
- int len;
-
- gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
- DEBUG_TRANSCMD : DEBUG_LOCKCMD,
- "CMD Transmit", cb->len, cb->buf);
-
- spin_lock_irqsave(&cs->cmdlock, flags);
- cb->prev = cs->lastcmdbuf;
- if (cs->lastcmdbuf)
- cs->lastcmdbuf->next = cb;
- else {
- cs->cmdbuf = cb;
- cs->curlen = cb->len;
- }
- cs->cmdbytes += cb->len;
- cs->lastcmdbuf = cb;
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- spin_lock_irqsave(&cs->lock, flags);
- len = cb->len;
- if (cs->connected)
- tasklet_schedule(&cs->write_tasklet);
- spin_unlock_irqrestore(&cs->lock, flags);
- return len;
-}
-
-static int gigaset_write_room(struct cardstate *cs)
-{
- unsigned bytes;
-
- bytes = cs->cmdbytes;
- return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
-}
-
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
- return cs->cmdbytes;
-}
-
-/*
- * set the break characters on the internal serial adapter
- * using undocumented device commands reverse engineered from USB traces
- * of the Siemens Windows driver
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
- struct usb_device *udev = cs->hw.usb->udev;
-
- gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
- memcpy(cs->hw.usb->bchars, buf, 6);
- return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
- 0, 0, &buf, 6, 2000);
-}
-
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
- /* unused */
-}
-
-/* Initialize the b-channel structure */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
- /* unused */
- bcs->hw.usb = NULL;
- return 0;
-}
-
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
-}
-
-static void gigaset_freecshw(struct cardstate *cs)
-{
- tasklet_kill(&cs->write_tasklet);
- kfree(cs->hw.usb);
-}
-
-static int gigaset_initcshw(struct cardstate *cs)
-{
- struct usb_cardstate *ucs;
-
- cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
- if (!ucs) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
-
- ucs->bchars[0] = 0;
- ucs->bchars[1] = 0;
- ucs->bchars[2] = 0;
- ucs->bchars[3] = 0;
- ucs->bchars[4] = 0x11;
- ucs->bchars[5] = 0x13;
- tasklet_init(&cs->write_tasklet,
- gigaset_modem_fill, (unsigned long) cs);
-
- return 0;
-}
-
-/* Send data from current skb to the device. */
-static int write_modem(struct cardstate *cs)
-{
- int ret = 0;
- int count;
- struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
- struct usb_cardstate *ucs = cs->hw.usb;
- unsigned long flags;
-
- gig_dbg(DEBUG_OUTPUT, "len: %d...", bcs->tx_skb->len);
-
- if (!bcs->tx_skb->len) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- return -EINVAL;
- }
-
- /* Copy data to bulk out buffer and transmit data */
- count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
- skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
- skb_pull(bcs->tx_skb, count);
- ucs->busy = 1;
- gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
-
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->connected) {
- usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
- usb_sndbulkpipe(ucs->udev,
- ucs->bulk_out_epnum),
- ucs->bulk_out_buffer, count,
- gigaset_write_bulk_callback, cs);
- ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
- } else {
- ret = -ENODEV;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- if (ret) {
- dev_err(cs->dev, "could not submit urb (error %d)\n", -ret);
- ucs->busy = 0;
- }
-
- if (!bcs->tx_skb->len) {
- /* skb sent completely */
- gigaset_skb_sent(bcs, bcs->tx_skb);
-
- gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
- (unsigned long) bcs->tx_skb);
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- }
-
- return ret;
-}
-
-static int gigaset_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- int retval;
- struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_host_interface *hostif = interface->cur_altsetting;
- struct cardstate *cs = NULL;
- struct usb_cardstate *ucs = NULL;
- struct usb_endpoint_descriptor *endpoint;
- int buffer_size;
-
- gig_dbg(DEBUG_ANY, "%s: Check if device matches ...", __func__);
-
- /* See if the device offered us matches what we can accept */
- if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
- (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) {
- gig_dbg(DEBUG_ANY, "device ID (0x%x, 0x%x) not for me - skip",
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
- return -ENODEV;
- }
- if (hostif->desc.bInterfaceNumber != 0) {
- gig_dbg(DEBUG_ANY, "interface %d not for me - skip",
- hostif->desc.bInterfaceNumber);
- return -ENODEV;
- }
- if (hostif->desc.bAlternateSetting != 0) {
- dev_notice(&udev->dev, "unsupported altsetting %d - skip",
- hostif->desc.bAlternateSetting);
- return -ENODEV;
- }
- if (hostif->desc.bInterfaceClass != 255) {
- dev_notice(&udev->dev, "unsupported interface class %d - skip",
- hostif->desc.bInterfaceClass);
- return -ENODEV;
- }
-
- if (hostif->desc.bNumEndpoints < 2) {
- dev_err(&interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
-
- dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
-
- /* allocate memory for our device state and initialize it */
- cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
- if (!cs)
- return -ENODEV;
- ucs = cs->hw.usb;
-
- /* save off device structure ptrs for later use */
- usb_get_dev(udev);
- ucs->udev = udev;
- ucs->interface = interface;
- cs->dev = &interface->dev;
-
- /* save address of controller structure */
- usb_set_intfdata(interface, cs);
-
- endpoint = &hostif->endpoint[0].desc;
-
- if (!usb_endpoint_is_bulk_out(endpoint)) {
- dev_err(&interface->dev, "missing bulk-out endpoint\n");
- retval = -ENODEV;
- goto error;
- }
-
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- ucs->bulk_out_size = buffer_size;
- ucs->bulk_out_epnum = usb_endpoint_num(endpoint);
- ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!ucs->bulk_out_buffer) {
- dev_err(cs->dev, "Couldn't allocate bulk_out_buffer\n");
- retval = -ENOMEM;
- goto error;
- }
-
- ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!ucs->bulk_out_urb) {
- dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
- retval = -ENOMEM;
- goto error;
- }
-
- endpoint = &hostif->endpoint[1].desc;
-
- if (!usb_endpoint_is_int_in(endpoint)) {
- dev_err(&interface->dev, "missing int-in endpoint\n");
- retval = -ENODEV;
- goto error;
- }
-
- ucs->busy = 0;
-
- ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!ucs->read_urb) {
- dev_err(cs->dev, "No free urbs available\n");
- retval = -ENOMEM;
- goto error;
- }
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- ucs->rcvbuf_size = buffer_size;
- ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
- if (!ucs->rcvbuf) {
- dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
- retval = -ENOMEM;
- goto error;
- }
- /* Fill the interrupt urb and send it to the core */
- usb_fill_int_urb(ucs->read_urb, udev,
- usb_rcvintpipe(udev, usb_endpoint_num(endpoint)),
- ucs->rcvbuf, buffer_size,
- gigaset_read_int_callback,
- cs, endpoint->bInterval);
-
- retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
- if (retval) {
- dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
- goto error;
- }
-
- /* tell common part that the device is ready */
- if (startmode == SM_LOCKED)
- cs->mstate = MS_LOCKED;
-
- retval = gigaset_start(cs);
- if (retval < 0) {
- tasklet_kill(&cs->write_tasklet);
- goto error;
- }
- return 0;
-
-error:
- usb_kill_urb(ucs->read_urb);
- kfree(ucs->bulk_out_buffer);
- usb_free_urb(ucs->bulk_out_urb);
- kfree(ucs->rcvbuf);
- usb_free_urb(ucs->read_urb);
- usb_set_intfdata(interface, NULL);
- ucs->read_urb = ucs->bulk_out_urb = NULL;
- ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
- usb_put_dev(ucs->udev);
- ucs->udev = NULL;
- ucs->interface = NULL;
- gigaset_freecs(cs);
- return retval;
-}
-
-static void gigaset_disconnect(struct usb_interface *interface)
-{
- struct cardstate *cs;
- struct usb_cardstate *ucs;
-
- cs = usb_get_intfdata(interface);
- ucs = cs->hw.usb;
-
- dev_info(cs->dev, "disconnecting Gigaset USB adapter\n");
-
- usb_kill_urb(ucs->read_urb);
-
- gigaset_stop(cs);
-
- usb_set_intfdata(interface, NULL);
- tasklet_kill(&cs->write_tasklet);
-
- usb_kill_urb(ucs->bulk_out_urb);
-
- kfree(ucs->bulk_out_buffer);
- usb_free_urb(ucs->bulk_out_urb);
- kfree(ucs->rcvbuf);
- usb_free_urb(ucs->read_urb);
- ucs->read_urb = ucs->bulk_out_urb = NULL;
- ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
-
- usb_put_dev(ucs->udev);
- ucs->interface = NULL;
- ucs->udev = NULL;
- cs->dev = NULL;
- gigaset_freecs(cs);
-}
-
-/* gigaset_suspend
- * This function is called before the USB connection is suspended or reset.
- */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct cardstate *cs = usb_get_intfdata(intf);
-
- /* stop activity */
- cs->connected = 0; /* prevent rescheduling */
- usb_kill_urb(cs->hw.usb->read_urb);
- tasklet_kill(&cs->write_tasklet);
- usb_kill_urb(cs->hw.usb->bulk_out_urb);
-
- gig_dbg(DEBUG_SUSPEND, "suspend complete");
- return 0;
-}
-
-/* gigaset_resume
- * This function is called after the USB connection has been resumed or reset.
- */
-static int gigaset_resume(struct usb_interface *intf)
-{
- struct cardstate *cs = usb_get_intfdata(intf);
- int rc;
-
- /* resubmit interrupt URB */
- cs->connected = 1;
- rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL);
- if (rc) {
- dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc);
- return rc;
- }
-
- gig_dbg(DEBUG_SUSPEND, "resume complete");
- return 0;
-}
-
-/* gigaset_pre_reset
- * This function is called before the USB connection is reset.
- */
-static int gigaset_pre_reset(struct usb_interface *intf)
-{
- /* same as suspend */
- return gigaset_suspend(intf, PMSG_ON);
-}
-
-static const struct gigaset_ops ops = {
- .write_cmd = gigaset_write_cmd,
- .write_room = gigaset_write_room,
- .chars_in_buffer = gigaset_chars_in_buffer,
- .brkchars = gigaset_brkchars,
- .init_bchannel = gigaset_init_bchannel,
- .close_bchannel = gigaset_close_bchannel,
- .initbcshw = gigaset_initbcshw,
- .freebcshw = gigaset_freebcshw,
- .reinitbcshw = gigaset_reinitbcshw,
- .initcshw = gigaset_initcshw,
- .freecshw = gigaset_freecshw,
- .set_modem_ctrl = gigaset_set_modem_ctrl,
- .baud_rate = gigaset_baud_rate,
- .set_line_ctrl = gigaset_set_line_ctrl,
- .send_skb = gigaset_m10x_send_skb,
- .handle_input = gigaset_m10x_input,
-};
-
-/*
- * This function is called while kernel-module is loaded
- */
-static int __init usb_gigaset_init(void)
-{
- int result;
-
- /* allocate memory for our driver state and initialize it */
- driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
- GIGASET_MODULENAME, GIGASET_DEVNAME,
- &ops, THIS_MODULE);
- if (driver == NULL) {
- result = -ENOMEM;
- goto error;
- }
-
- /* register this driver with the USB subsystem */
- result = usb_register(&gigaset_usb_driver);
- if (result < 0) {
- pr_err("error %d registering USB driver\n", -result);
- goto error;
- }
-
- pr_info(DRIVER_DESC "\n");
- return 0;
-
-error:
- if (driver)
- gigaset_freedriver(driver);
- driver = NULL;
- return result;
-}
-
-/*
- * This function is called while unloading the kernel-module
- */
-static void __exit usb_gigaset_exit(void)
-{
- int i;
-
- gigaset_blockdriver(driver); /* => probe will fail
- * => no gigaset_start any more
- */
-
- /* stop all connected devices */
- for (i = 0; i < driver->minors; i++)
- gigaset_shutdown(driver->cs + i);
-
- /* from now on, no isdn callback should be possible */
-
- /* deregister this driver with the USB subsystem */
- usb_deregister(&gigaset_usb_driver);
- /* this will call the disconnect-callback */
- /* from now on, no disconnect/probe callback should be running */
-
- gigaset_freedriver(driver);
- driver = NULL;
-}
-
-
-module_init(usb_gigaset_init);
-module_exit(usb_gigaset_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/hysdn/Kconfig b/drivers/staging/isdn/hysdn/Kconfig
deleted file mode 100644
index 4c8a9283b9dd..000000000000
--- a/drivers/staging/isdn/hysdn/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config HYSDN
- tristate "Hypercope HYSDN cards (Champ, Ergo, Metro) support (module only)"
- depends on m && PROC_FS && PCI
- help
- Say Y here if you have one of Hypercope's active PCI ISDN cards
- Champ, Ergo and Metro. You will then get a module called hysdn.
- Please read the file <file:Documentation/isdn/hysdn.rst> for more
- information.
-
-config HYSDN_CAPI
- bool "HYSDN CAPI 2.0 support"
- depends on HYSDN && ISDN_CAPI
- help
- Say Y here if you like to use Hypercope's CAPI 2.0 interface.
diff --git a/drivers/staging/isdn/hysdn/Makefile b/drivers/staging/isdn/hysdn/Makefile
deleted file mode 100644
index e01f17f22ebb..000000000000
--- a/drivers/staging/isdn/hysdn/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-# Makefile for the hysdn ISDN device driver
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_HYSDN) += hysdn.o
-
-# Multipart objects.
-
-hysdn-y := hysdn_procconf.o hysdn_proclog.o boardergo.o \
- hysdn_boot.o hysdn_sched.o hysdn_net.o hysdn_init.o
-hysdn-$(CONFIG_HYSDN_CAPI) += hycapi.o
diff --git a/drivers/staging/isdn/hysdn/boardergo.c b/drivers/staging/isdn/hysdn/boardergo.c
deleted file mode 100644
index 2aa2a0e08247..000000000000
--- a/drivers/staging/isdn/hysdn/boardergo.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/* $Id: boardergo.c,v 1.5.6.7 2001/11/06 21:58:19 kai Exp $
- *
- * Linux driver for HYSDN cards, specific routines for ergo type boards.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * As all Linux supported cards Champ2, Ergo and Metro2/4 use the same
- * DPRAM interface and layout with only minor differences all related
- * stuff is done here, not in separate modules.
- *
- */
-
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "hysdn_defs.h"
-#include "boardergo.h"
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-/***************************************************/
-/* The cards interrupt handler. Called from system */
-/***************************************************/
-static irqreturn_t
-ergo_interrupt(int intno, void *dev_id)
-{
- hysdn_card *card = dev_id; /* parameter from irq */
- tErgDpram *dpr;
- unsigned long flags;
- unsigned char volatile b;
-
- if (!card)
- return IRQ_NONE; /* error -> spurious interrupt */
- if (!card->irq_enabled)
- return IRQ_NONE; /* other device interrupting or irq switched off */
-
- spin_lock_irqsave(&card->hysdn_lock, flags); /* no further irqs allowed */
-
- if (!(bytein(card->iobase + PCI9050_INTR_REG) & PCI9050_INTR_REG_STAT1)) {
- spin_unlock_irqrestore(&card->hysdn_lock, flags); /* restore old state */
- return IRQ_NONE; /* no interrupt requested by E1 */
- }
- /* clear any pending ints on the board */
- dpr = card->dpram;
- b = dpr->ToPcInt; /* clear for ergo */
- b |= dpr->ToPcIntMetro; /* same for metro */
- b |= dpr->ToHyInt; /* and for champ */
-
- /* start kernel task immediately after leaving all interrupts */
- if (!card->hw_lock)
- schedule_work(&card->irq_queue);
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- return IRQ_HANDLED;
-} /* ergo_interrupt */
-
-/******************************************************************************/
-/* ergo_irq_bh will be called as part of the kernel clearing its shared work */
-/* queue sometime after a call to schedule_work has been made passing our */
-/* work_struct. This task is the only one handling data transfer from or to */
-/* the card after booting. The task may be queued from everywhere */
-/* (interrupts included). */
-/******************************************************************************/
-static void
-ergo_irq_bh(struct work_struct *ugli_api)
-{
- hysdn_card *card = container_of(ugli_api, hysdn_card, irq_queue);
- tErgDpram *dpr;
- int again;
- unsigned long flags;
-
- if (card->state != CARD_STATE_RUN)
- return; /* invalid call */
-
- dpr = card->dpram; /* point to DPRAM */
-
- spin_lock_irqsave(&card->hysdn_lock, flags);
- if (card->hw_lock) {
- spin_unlock_irqrestore(&card->hysdn_lock, flags); /* hardware currently unavailable */
- return;
- }
- card->hw_lock = 1; /* we now lock the hardware */
-
- do {
- again = 0; /* assume loop not to be repeated */
-
- if (!dpr->ToHyFlag) {
- /* we are able to send a buffer */
-
- if (hysdn_sched_tx(card, dpr->ToHyBuf, &dpr->ToHySize, &dpr->ToHyChannel,
- ERG_TO_HY_BUF_SIZE)) {
- dpr->ToHyFlag = 1; /* enable tx */
- again = 1; /* restart loop */
- }
- } /* we are able to send a buffer */
- if (dpr->ToPcFlag) {
- /* a message has arrived for us, handle it */
-
- if (hysdn_sched_rx(card, dpr->ToPcBuf, dpr->ToPcSize, dpr->ToPcChannel)) {
- dpr->ToPcFlag = 0; /* we worked the data */
- again = 1; /* restart loop */
- }
- } /* a message has arrived for us */
- if (again) {
- dpr->ToHyInt = 1;
- dpr->ToPcInt = 1; /* interrupt to E1 for all cards */
- } else
- card->hw_lock = 0; /* free hardware again */
- } while (again); /* until nothing more to do */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
-} /* ergo_irq_bh */
-
-
-/*********************************************************/
-/* stop the card (hardware reset) and disable interrupts */
-/*********************************************************/
-static void
-ergo_stopcard(hysdn_card *card)
-{
- unsigned long flags;
- unsigned char val;
-
- hysdn_net_release(card); /* first release the net device if existing */
-#ifdef CONFIG_HYSDN_CAPI
- hycapi_capi_stop(card);
-#endif /* CONFIG_HYSDN_CAPI */
- spin_lock_irqsave(&card->hysdn_lock, flags);
- val = bytein(card->iobase + PCI9050_INTR_REG); /* get actual value */
- val &= ~(PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1); /* mask irq */
- byteout(card->iobase + PCI9050_INTR_REG, val);
- card->irq_enabled = 0;
- byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RESET); /* reset E1 processor */
- card->state = CARD_STATE_UNUSED;
- card->err_log_state = ERRLOG_STATE_OFF; /* currently no log active */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
-} /* ergo_stopcard */
-
-/**************************************************************************/
-/* enable or disable the cards error log. The event is queued if possible */
-/**************************************************************************/
-static void
-ergo_set_errlog_state(hysdn_card *card, int on)
-{
- unsigned long flags;
-
- if (card->state != CARD_STATE_RUN) {
- card->err_log_state = ERRLOG_STATE_OFF; /* must be off */
- return;
- }
- spin_lock_irqsave(&card->hysdn_lock, flags);
-
- if (((card->err_log_state == ERRLOG_STATE_OFF) && !on) ||
- ((card->err_log_state == ERRLOG_STATE_ON) && on)) {
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- return; /* nothing to do */
- }
- if (on)
- card->err_log_state = ERRLOG_STATE_START; /* request start */
- else
- card->err_log_state = ERRLOG_STATE_STOP; /* request stop */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- schedule_work(&card->irq_queue);
-} /* ergo_set_errlog_state */
-
-/******************************************/
-/* test the cards RAM and return 0 if ok. */
-/******************************************/
-static const char TestText[36] = "This Message is filler, why read it";
-
-static int
-ergo_testram(hysdn_card *card)
-{
- tErgDpram *dpr = card->dpram;
-
- memset(dpr->TrapTable, 0, sizeof(dpr->TrapTable)); /* clear all Traps */
- dpr->ToHyInt = 1; /* E1 INTR state forced */
-
- memcpy(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText,
- sizeof(TestText));
- if (memcmp(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText,
- sizeof(TestText)))
- return (-1);
-
- memcpy(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText,
- sizeof(TestText));
- if (memcmp(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText,
- sizeof(TestText)))
- return (-1);
-
- return (0);
-} /* ergo_testram */
-
-/*****************************************************************************/
-/* this function is intended to write stage 1 boot image to the cards buffer */
-/* this is done in two steps. First the 1024 hi-words are written (offs=0), */
-/* then the 1024 lo-bytes are written. The remaining DPRAM is cleared, the */
-/* PCI-write-buffers flushed and the card is taken out of reset. */
-/* The function then waits for a reaction of the E1 processor or a timeout. */
-/* Negative return values are interpreted as errors. */
-/*****************************************************************************/
-static int
-ergo_writebootimg(struct HYSDN_CARD *card, unsigned char *buf,
- unsigned long offs)
-{
- unsigned char *dst;
- tErgDpram *dpram;
- int cnt = (BOOT_IMG_SIZE >> 2); /* number of words to move and swap (byte order!) */
-
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: write bootldr offs=0x%lx ", offs);
-
- dst = card->dpram; /* pointer to start of DPRAM */
- dst += (offs + ERG_DPRAM_FILL_SIZE); /* offset in the DPRAM */
- while (cnt--) {
- *dst++ = *(buf + 1); /* high byte */
- *dst++ = *buf; /* low byte */
- dst += 2; /* point to next longword */
- buf += 2; /* buffer only filled with words */
- }
-
- /* if low words (offs = 2) have been written, clear the rest of the DPRAM, */
- /* flush the PCI-write-buffer and take the E1 out of reset */
- if (offs) {
- memset(card->dpram, 0, ERG_DPRAM_FILL_SIZE); /* fill the DPRAM still not cleared */
- dpram = card->dpram; /* get pointer to dpram structure */
- dpram->ToHyNoDpramErrLog = 0xFF; /* write a dpram register */
- while (!dpram->ToHyNoDpramErrLog); /* reread volatile register to flush PCI */
-
- byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RUN); /* start E1 processor */
- /* the interrupts are still masked */
-
- msleep_interruptible(20); /* Timeout 20ms */
-
- if (((tDpramBootSpooler *) card->dpram)->Len != DPRAM_SPOOLER_DATA_SIZE) {
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: write bootldr no answer");
- return (-ERR_BOOTIMG_FAIL);
- }
- } /* start_boot_img */
- return (0); /* successful */
-} /* ergo_writebootimg */
-
-/********************************************************************************/
-/* ergo_writebootseq writes the buffer containing len bytes to the E1 processor */
-/* using the boot spool mechanism. If everything works fine 0 is returned. In */
-/* case of errors a negative error value is returned. */
-/********************************************************************************/
-static int
-ergo_writebootseq(struct HYSDN_CARD *card, unsigned char *buf, int len)
-{
- tDpramBootSpooler *sp = (tDpramBootSpooler *) card->dpram;
- unsigned char *dst;
- unsigned char buflen;
- int nr_write;
- unsigned char tmp_rdptr;
- unsigned char wr_mirror;
- int i;
-
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: write boot seq len=%d ", len);
-
- dst = sp->Data; /* point to data in spool structure */
- buflen = sp->Len; /* maximum len of spooled data */
- wr_mirror = sp->WrPtr; /* only once read */
-
- /* try until all bytes written or error */
- i = 0x1000; /* timeout value */
- while (len) {
-
- /* first determine the number of bytes that may be buffered */
- do {
- tmp_rdptr = sp->RdPtr; /* first read the pointer */
- i--; /* decrement timeout */
- } while (i && (tmp_rdptr != sp->RdPtr)); /* wait for stable pointer */
-
- if (!i) {
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: write boot seq timeout");
- return (-ERR_BOOTSEQ_FAIL); /* value not stable -> timeout */
- }
- if ((nr_write = tmp_rdptr - wr_mirror - 1) < 0)
- nr_write += buflen; /* now we got number of free bytes - 1 in buffer */
-
- if (!nr_write)
- continue; /* no free bytes in buffer */
-
- if (nr_write > len)
- nr_write = len; /* limit if last few bytes */
- i = 0x1000; /* reset timeout value */
-
- /* now we know how much bytes we may put in the puffer */
- len -= nr_write; /* we savely could adjust len before output */
- while (nr_write--) {
- *(dst + wr_mirror) = *buf++; /* output one byte */
- if (++wr_mirror >= buflen)
- wr_mirror = 0;
- sp->WrPtr = wr_mirror; /* announce the next byte to E1 */
- } /* while (nr_write) */
-
- } /* while (len) */
- return (0);
-} /* ergo_writebootseq */
-
-/***********************************************************************************/
-/* ergo_waitpofready waits for a maximum of 10 seconds for the completition of the */
-/* boot process. If the process has been successful 0 is returned otherwise a */
-/* negative error code is returned. */
-/***********************************************************************************/
-static int
-ergo_waitpofready(struct HYSDN_CARD *card)
-{
- tErgDpram *dpr = card->dpram; /* pointer to DPRAM structure */
- int timecnt = 10000 / 50; /* timeout is 10 secs max. */
- unsigned long flags;
- int msg_size;
- int i;
-
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: waiting for pof ready");
- while (timecnt--) {
- /* wait until timeout */
-
- if (dpr->ToPcFlag) {
- /* data has arrived */
-
- if ((dpr->ToPcChannel != CHAN_SYSTEM) ||
- (dpr->ToPcSize < MIN_RDY_MSG_SIZE) ||
- (dpr->ToPcSize > MAX_RDY_MSG_SIZE) ||
- ((*(unsigned long *) dpr->ToPcBuf) != RDY_MAGIC))
- break; /* an error occurred */
-
- /* Check for additional data delivered during SysReady */
- msg_size = dpr->ToPcSize - RDY_MAGIC_SIZE;
- if (msg_size > 0)
- if (EvalSysrTokData(card, dpr->ToPcBuf + RDY_MAGIC_SIZE, msg_size))
- break;
-
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "ERGO: pof boot success");
- spin_lock_irqsave(&card->hysdn_lock, flags);
-
- card->state = CARD_STATE_RUN; /* now card is running */
- /* enable the cards interrupt */
- byteout(card->iobase + PCI9050_INTR_REG,
- bytein(card->iobase + PCI9050_INTR_REG) |
- (PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1));
- card->irq_enabled = 1; /* we are ready to receive interrupts */
-
- dpr->ToPcFlag = 0; /* reset data indicator */
- dpr->ToHyInt = 1;
- dpr->ToPcInt = 1; /* interrupt to E1 for all cards */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- if ((hynet_enable & (1 << card->myid))
- && (i = hysdn_net_create(card)))
- {
- ergo_stopcard(card);
- card->state = CARD_STATE_BOOTERR;
- return (i);
- }
-#ifdef CONFIG_HYSDN_CAPI
- if ((i = hycapi_capi_create(card))) {
- printk(KERN_WARNING "HYSDN: failed to create capi-interface.\n");
- }
-#endif /* CONFIG_HYSDN_CAPI */
- return (0); /* success */
- } /* data has arrived */
- msleep_interruptible(50); /* Timeout 50ms */
- } /* wait until timeout */
-
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: pof boot ready timeout");
- return (-ERR_POF_TIMEOUT);
-} /* ergo_waitpofready */
-
-
-
-/************************************************************************************/
-/* release the cards hardware. Before releasing do a interrupt disable and hardware */
-/* reset. Also unmap dpram. */
-/* Use only during module release. */
-/************************************************************************************/
-static void
-ergo_releasehardware(hysdn_card *card)
-{
- ergo_stopcard(card); /* first stop the card if not already done */
- free_irq(card->irq, card); /* release interrupt */
- release_region(card->iobase + PCI9050_INTR_REG, 1); /* release all io ports */
- release_region(card->iobase + PCI9050_USER_IO, 1);
- iounmap(card->dpram);
- card->dpram = NULL; /* release shared mem */
-} /* ergo_releasehardware */
-
-
-/*********************************************************************************/
-/* acquire the needed hardware ports and map dpram. If an error occurs a nonzero */
-/* value is returned. */
-/* Use only during module init. */
-/*********************************************************************************/
-int
-ergo_inithardware(hysdn_card *card)
-{
- if (!request_region(card->iobase + PCI9050_INTR_REG, 1, "HYSDN"))
- return (-1);
- if (!request_region(card->iobase + PCI9050_USER_IO, 1, "HYSDN")) {
- release_region(card->iobase + PCI9050_INTR_REG, 1);
- return (-1); /* ports already in use */
- }
- card->memend = card->membase + ERG_DPRAM_PAGE_SIZE - 1;
- if (!(card->dpram = ioremap(card->membase, ERG_DPRAM_PAGE_SIZE))) {
- release_region(card->iobase + PCI9050_INTR_REG, 1);
- release_region(card->iobase + PCI9050_USER_IO, 1);
- return (-1);
- }
-
- ergo_stopcard(card); /* disable interrupts */
- if (request_irq(card->irq, ergo_interrupt, IRQF_SHARED, "HYSDN", card)) {
- ergo_releasehardware(card); /* return the acquired hardware */
- return (-1);
- }
- /* success, now setup the function pointers */
- card->stopcard = ergo_stopcard;
- card->releasehardware = ergo_releasehardware;
- card->testram = ergo_testram;
- card->writebootimg = ergo_writebootimg;
- card->writebootseq = ergo_writebootseq;
- card->waitpofready = ergo_waitpofready;
- card->set_errlog_state = ergo_set_errlog_state;
- INIT_WORK(&card->irq_queue, ergo_irq_bh);
- spin_lock_init(&card->hysdn_lock);
-
- return (0);
-} /* ergo_inithardware */
diff --git a/drivers/staging/isdn/hysdn/boardergo.h b/drivers/staging/isdn/hysdn/boardergo.h
deleted file mode 100644
index e99bd81c4034..000000000000
--- a/drivers/staging/isdn/hysdn/boardergo.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* $Id: boardergo.h,v 1.2.6.1 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, definitions for ergo type boards (buffers..).
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-
-/************************************************/
-/* defines for the dual port memory of the card */
-/************************************************/
-#define ERG_DPRAM_PAGE_SIZE 0x2000 /* DPRAM occupies a 8K page */
-#define BOOT_IMG_SIZE 4096
-#define ERG_DPRAM_FILL_SIZE (ERG_DPRAM_PAGE_SIZE - BOOT_IMG_SIZE)
-
-#define ERG_TO_HY_BUF_SIZE 0x0E00 /* 3072 bytes buffer size to card */
-#define ERG_TO_PC_BUF_SIZE 0x0E00 /* 3072 bytes to PC, too */
-
-/* following DPRAM layout copied from OS2-driver boarderg.h */
-typedef struct ErgDpram_tag {
- /*0000 */ unsigned char ToHyBuf[ERG_TO_HY_BUF_SIZE];
- /*0E00 */ unsigned char ToPcBuf[ERG_TO_PC_BUF_SIZE];
-
- /*1C00 */ unsigned char bSoftUart[SIZE_RSV_SOFT_UART];
- /* size 0x1B0 */
-
- /*1DB0 *//* tErrLogEntry */ unsigned char volatile ErrLogMsg[64];
- /* size 64 bytes */
- /*1DB0 unsigned long ulErrType; */
- /*1DB4 unsigned long ulErrSubtype; */
- /*1DB8 unsigned long ucTextSize; */
- /*1DB9 unsigned long ucText[ERRLOG_TEXT_SIZE]; *//* ASCIIZ of len ucTextSize-1 */
- /*1DF0 */
-
- /*1DF0 */ unsigned short volatile ToHyChannel;
- /*1DF2 */ unsigned short volatile ToHySize;
- /*1DF4 */ unsigned char volatile ToHyFlag;
- /* !=0: msg for Hy waiting */
- /*1DF5 */ unsigned char volatile ToPcFlag;
- /* !=0: msg for PC waiting */
- /*1DF6 */ unsigned short volatile ToPcChannel;
- /*1DF8 */ unsigned short volatile ToPcSize;
- /*1DFA */ unsigned char bRes1DBA[0x1E00 - 0x1DFA];
- /* 6 bytes */
-
- /*1E00 */ unsigned char bRestOfEntryTbl[0x1F00 - 0x1E00];
- /*1F00 */ unsigned long TrapTable[62];
- /*1FF8 */ unsigned char bRes1FF8[0x1FFB - 0x1FF8];
- /* low part of reset vetor */
- /*1FFB */ unsigned char ToPcIntMetro;
- /* notes:
- * - metro has 32-bit boot ram - accessing
- * ToPcInt and ToHyInt would be the same;
- * so we moved ToPcInt to 1FFB.
- * Because on the PC side both vars are
- * readonly (reseting on int from E1 to PC),
- * we can read both vars on both cards
- * without destroying anything.
- * - 1FFB is the high byte of the reset vector,
- * so E1 side should NOT change this byte
- * when writing!
- */
- /*1FFC */ unsigned char volatile ToHyNoDpramErrLog;
- /* note: ToHyNoDpramErrLog is used to inform
- * boot loader, not to use DPRAM based
- * ErrLog; when DOS driver is rewritten
- * this becomes obsolete
- */
- /*1FFD */ unsigned char bRes1FFD;
- /*1FFE */ unsigned char ToPcInt;
- /* E1_intclear; on CHAMP2: E1_intset */
- /*1FFF */ unsigned char ToHyInt;
- /* E1_intset; on CHAMP2: E1_intclear */
-} tErgDpram;
-
-/**********************************************/
-/* PCI9050 controller local register offsets: */
-/* copied from boarderg.c */
-/**********************************************/
-#define PCI9050_INTR_REG 0x4C /* Interrupt register */
-#define PCI9050_USER_IO 0x51 /* User I/O register */
-
-/* bitmask for PCI9050_INTR_REG: */
-#define PCI9050_INTR_REG_EN1 0x01 /* 1= enable (def.), 0= disable */
-#define PCI9050_INTR_REG_POL1 0x02 /* 1= active high (def.), 0= active low */
-#define PCI9050_INTR_REG_STAT1 0x04 /* 1= intr. active, 0= intr. not active (def.) */
-#define PCI9050_INTR_REG_ENPCI 0x40 /* 1= PCI interrupts enable (def.) */
-
-/* bitmask for PCI9050_USER_IO: */
-#define PCI9050_USER_IO_EN3 0x02 /* 1= disable , 0= enable (def.) */
-#define PCI9050_USER_IO_DIR3 0x04 /* 1= output (def.), 0= input */
-#define PCI9050_USER_IO_DAT3 0x08 /* 1= high (def.) , 0= low */
-
-#define PCI9050_E1_RESET (PCI9050_USER_IO_DIR3) /* 0x04 */
-#define PCI9050_E1_RUN (PCI9050_USER_IO_DAT3 | PCI9050_USER_IO_DIR3) /* 0x0C */
diff --git a/drivers/staging/isdn/hysdn/hycapi.c b/drivers/staging/isdn/hysdn/hycapi.c
deleted file mode 100644
index a2c15cd7bf67..000000000000
--- a/drivers/staging/isdn/hysdn/hycapi.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/* $Id: hycapi.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, CAPI2.0-Interface.
- *
- * Author Ulrich Albrecht <u.albrecht@hypercope.de> for Hypercope GmbH
- * Copyright 2000 by Hypercope GmbH
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#define VER_DRIVER 0
-#define VER_CARDTYPE 1
-#define VER_HWID 2
-#define VER_SERIAL 3
-#define VER_OPTION 4
-#define VER_PROTO 5
-#define VER_PROFILE 6
-#define VER_CAPI 7
-
-#include "hysdn_defs.h"
-#include <linux/kernelcapi.h>
-
-static char hycapi_revision[] = "$Revision: 1.8.6.4 $";
-
-unsigned int hycapi_enable = 0xffffffff;
-module_param(hycapi_enable, uint, 0);
-
-typedef struct _hycapi_appl {
- unsigned int ctrl_mask;
- capi_register_params rp;
- struct sk_buff *listen_req[CAPI_MAXCONTR];
-} hycapi_appl;
-
-static hycapi_appl hycapi_applications[CAPI_MAXAPPL];
-
-static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-
-static inline int _hycapi_appCheck(int app_id, int ctrl_no)
-{
- if ((ctrl_no <= 0) || (ctrl_no > CAPI_MAXCONTR) || (app_id <= 0) ||
- (app_id > CAPI_MAXAPPL))
- {
- printk(KERN_ERR "HYCAPI: Invalid request app_id %d for controller %d", app_id, ctrl_no);
- return -1;
- }
- return ((hycapi_applications[app_id - 1].ctrl_mask & (1 << (ctrl_no-1))) != 0);
-}
-
-/******************************
-Kernel-Capi callback reset_ctr
-******************************/
-
-static void
-hycapi_reset_ctr(struct capi_ctr *ctrl)
-{
- hycapictrl_info *cinfo = ctrl->driverdata;
-
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "HYCAPI hycapi_reset_ctr\n");
-#endif
- capilib_release(&cinfo->ncci_head);
- capi_ctr_down(ctrl);
-}
-
-/******************************
-Kernel-Capi callback remove_ctr
-******************************/
-
-static void
-hycapi_remove_ctr(struct capi_ctr *ctrl)
-{
- int i;
- hycapictrl_info *cinfo = NULL;
- hysdn_card *card = NULL;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "HYCAPI hycapi_remove_ctr\n");
-#endif
- cinfo = (hycapictrl_info *)(ctrl->driverdata);
- if (!cinfo) {
- printk(KERN_ERR "No hycapictrl_info set!");
- return;
- }
- card = cinfo->card;
- capi_ctr_suspend_output(ctrl);
- for (i = 0; i < CAPI_MAXAPPL; i++) {
- if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) {
- kfree_skb(hycapi_applications[i].listen_req[ctrl->cnr - 1]);
- hycapi_applications[i].listen_req[ctrl->cnr - 1] = NULL;
- }
- }
- detach_capi_ctr(ctrl);
- ctrl->driverdata = NULL;
- kfree(card->hyctrlinfo);
-
-
- card->hyctrlinfo = NULL;
-}
-
-/***********************************************************
-
-Queue a CAPI-message to the controller.
-
-***********************************************************/
-
-static void
-hycapi_sendmsg_internal(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
-
- spin_lock_irq(&cinfo->lock);
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_send_message\n");
-#endif
- cinfo->skbs[cinfo->in_idx++] = skb; /* add to buffer list */
- if (cinfo->in_idx >= HYSDN_MAX_CAPI_SKB)
- cinfo->in_idx = 0; /* wrap around */
- cinfo->sk_count++; /* adjust counter */
- if (cinfo->sk_count >= HYSDN_MAX_CAPI_SKB) {
- /* inform upper layers we're full */
- printk(KERN_ERR "HYSDN Card%d: CAPI-buffer overrun!\n",
- card->myid);
- capi_ctr_suspend_output(ctrl);
- }
- cinfo->tx_skb = skb;
- spin_unlock_irq(&cinfo->lock);
- schedule_work(&card->irq_queue);
-}
-
-/***********************************************************
-hycapi_register_internal
-
-Send down the CAPI_REGISTER-Command to the controller.
-This functions will also be used if the adapter has been rebooted to
-re-register any applications in the private list.
-
-************************************************************/
-
-static void
-hycapi_register_internal(struct capi_ctr *ctrl, __u16 appl,
- capi_register_params *rp)
-{
- char ExtFeatureDefaults[] = "49 /0/0/0/0,*/1,*/2,*/3,*/4,*/5,*/6,*/7,*/8,*/9,*";
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
- struct sk_buff *skb;
- __u16 len;
- __u8 _command = 0xa0, _subcommand = 0x80;
- __u16 MessageNumber = 0x0000;
- __u16 MessageBufferSize = 0;
- int slen = strlen(ExtFeatureDefaults);
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_register_appl\n");
-#endif
- MessageBufferSize = rp->level3cnt * rp->datablkcnt * rp->datablklen;
-
- len = CAPI_MSG_BASELEN + 8 + slen + 1;
- if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
- printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n",
- card->myid);
- return;
- }
- skb_put_data(skb, &len, sizeof(__u16));
- skb_put_data(skb, &appl, sizeof(__u16));
- skb_put_data(skb, &_command, sizeof(__u8));
- skb_put_data(skb, &_subcommand, sizeof(__u8));
- skb_put_data(skb, &MessageNumber, sizeof(__u16));
- skb_put_data(skb, &MessageBufferSize, sizeof(__u16));
- skb_put_data(skb, &(rp->level3cnt), sizeof(__u16));
- skb_put_data(skb, &(rp->datablkcnt), sizeof(__u16));
- skb_put_data(skb, &(rp->datablklen), sizeof(__u16));
- skb_put_data(skb, ExtFeatureDefaults, slen);
- hycapi_applications[appl - 1].ctrl_mask |= (1 << (ctrl->cnr - 1));
- hycapi_send_message(ctrl, skb);
-}
-
-/************************************************************
-hycapi_restart_internal
-
-After an adapter has been rebootet, re-register all applications and
-send a LISTEN_REQ (if there has been such a thing )
-
-*************************************************************/
-
-static void hycapi_restart_internal(struct capi_ctr *ctrl)
-{
- int i;
- struct sk_buff *skb;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_WARNING "HYSDN: hycapi_restart_internal");
-#endif
- for (i = 0; i < CAPI_MAXAPPL; i++) {
- if (_hycapi_appCheck(i + 1, ctrl->cnr) == 1) {
- hycapi_register_internal(ctrl, i + 1,
- &hycapi_applications[i].rp);
- if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) {
- skb = skb_copy(hycapi_applications[i].listen_req[ctrl->cnr - 1], GFP_ATOMIC);
- hycapi_sendmsg_internal(ctrl, skb);
- }
- }
- }
-}
-
-/*************************************************************
-Register an application.
-Error-checking is done for CAPI-compliance.
-
-The application is recorded in the internal list.
-*************************************************************/
-
-static void
-hycapi_register_appl(struct capi_ctr *ctrl, __u16 appl,
- capi_register_params *rp)
-{
- int MaxLogicalConnections = 0, MaxBDataBlocks = 0, MaxBDataLen = 0;
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
- int chk = _hycapi_appCheck(appl, ctrl->cnr);
- if (chk < 0) {
- return;
- }
- if (chk == 1) {
- printk(KERN_INFO "HYSDN: apl %d already registered\n", appl);
- return;
- }
- MaxBDataBlocks = rp->datablkcnt > CAPI_MAXDATAWINDOW ? CAPI_MAXDATAWINDOW : rp->datablkcnt;
- rp->datablkcnt = MaxBDataBlocks;
- MaxBDataLen = rp->datablklen < 1024 ? 1024 : rp->datablklen;
- rp->datablklen = MaxBDataLen;
-
- MaxLogicalConnections = rp->level3cnt;
- if (MaxLogicalConnections < 0) {
- MaxLogicalConnections = card->bchans * -MaxLogicalConnections;
- }
- if (MaxLogicalConnections == 0) {
- MaxLogicalConnections = card->bchans;
- }
-
- rp->level3cnt = MaxLogicalConnections;
- memcpy(&hycapi_applications[appl - 1].rp,
- rp, sizeof(capi_register_params));
-}
-
-/*********************************************************************
-
-hycapi_release_internal
-
-Send down a CAPI_RELEASE to the controller.
-*********************************************************************/
-
-static void hycapi_release_internal(struct capi_ctr *ctrl, __u16 appl)
-{
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
- struct sk_buff *skb;
- __u16 len;
- __u8 _command = 0xa1, _subcommand = 0x80;
- __u16 MessageNumber = 0x0000;
-
- capilib_release_appl(&cinfo->ncci_head, appl);
-
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_release_appl\n");
-#endif
- len = CAPI_MSG_BASELEN;
- if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
- printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n",
- card->myid);
- return;
- }
- skb_put_data(skb, &len, sizeof(__u16));
- skb_put_data(skb, &appl, sizeof(__u16));
- skb_put_data(skb, &_command, sizeof(__u8));
- skb_put_data(skb, &_subcommand, sizeof(__u8));
- skb_put_data(skb, &MessageNumber, sizeof(__u16));
- hycapi_send_message(ctrl, skb);
- hycapi_applications[appl - 1].ctrl_mask &= ~(1 << (ctrl->cnr - 1));
-}
-
-/******************************************************************
-hycapi_release_appl
-
-Release the application from the internal list an remove it's
-registration at controller-level
-******************************************************************/
-
-static void
-hycapi_release_appl(struct capi_ctr *ctrl, __u16 appl)
-{
- int chk;
-
- chk = _hycapi_appCheck(appl, ctrl->cnr);
- if (chk < 0) {
- printk(KERN_ERR "HYCAPI: Releasing invalid appl %d on controller %d\n", appl, ctrl->cnr);
- return;
- }
- if (hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]) {
- kfree_skb(hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]);
- hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1] = NULL;
- }
- if (chk == 1)
- {
- hycapi_release_internal(ctrl, appl);
- }
-}
-
-
-/**************************************************************
-Kill a single controller.
-**************************************************************/
-
-int hycapi_capi_release(hysdn_card *card)
-{
- hycapictrl_info *cinfo = card->hyctrlinfo;
- struct capi_ctr *ctrl;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_capi_release\n");
-#endif
- if (cinfo) {
- ctrl = &cinfo->capi_ctrl;
- hycapi_remove_ctr(ctrl);
- }
- return 0;
-}
-
-/**************************************************************
-hycapi_capi_stop
-
-Stop CAPI-Output on a card. (e.g. during reboot)
-***************************************************************/
-
-int hycapi_capi_stop(hysdn_card *card)
-{
- hycapictrl_info *cinfo = card->hyctrlinfo;
- struct capi_ctr *ctrl;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_capi_stop\n");
-#endif
- if (cinfo) {
- ctrl = &cinfo->capi_ctrl;
-/* ctrl->suspend_output(ctrl); */
- capi_ctr_down(ctrl);
- }
- return 0;
-}
-
-/***************************************************************
-hycapi_send_message
-
-Send a message to the controller.
-
-Messages are parsed for their Command/Subcommand-type, and appropriate
-action's are performed.
-
-Note that we have to muck around with a 64Bit-DATA_REQ as there are
-firmware-releases that do not check the MsgLen-Indication!
-
-***************************************************************/
-
-static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- __u16 appl_id;
- int _len, _len2;
- __u8 msghead[64];
- hycapictrl_info *cinfo = ctrl->driverdata;
- u16 retval = CAPI_NOERROR;
-
- appl_id = CAPIMSG_APPID(skb->data);
- switch (_hycapi_appCheck(appl_id, ctrl->cnr))
- {
- case 0:
-/* printk(KERN_INFO "Need to register\n"); */
- hycapi_register_internal(ctrl,
- appl_id,
- &(hycapi_applications[appl_id - 1].rp));
- break;
- case 1:
- break;
- default:
- printk(KERN_ERR "HYCAPI: Controller mixup!\n");
- retval = CAPI_ILLAPPNR;
- goto out;
- }
- switch (CAPIMSG_CMD(skb->data)) {
- case CAPI_DISCONNECT_B3_RESP:
- capilib_free_ncci(&cinfo->ncci_head, appl_id,
- CAPIMSG_NCCI(skb->data));
- break;
- case CAPI_DATA_B3_REQ:
- _len = CAPIMSG_LEN(skb->data);
- if (_len > 22) {
- _len2 = _len - 22;
- skb_copy_from_linear_data(skb, msghead, 22);
- skb_copy_to_linear_data_offset(skb, _len2,
- msghead, 22);
- skb_pull(skb, _len2);
- CAPIMSG_SETLEN(skb->data, 22);
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- }
- break;
- case CAPI_LISTEN_REQ:
- if (hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1])
- {
- kfree_skb(hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]);
- hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1] = NULL;
- }
- if (!(hycapi_applications[appl_id -1].listen_req[ctrl->cnr - 1] = skb_copy(skb, GFP_ATOMIC)))
- {
- printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n");
- }
- break;
- default:
- break;
- }
-out:
- if (retval == CAPI_NOERROR)
- hycapi_sendmsg_internal(ctrl, skb);
- else
- dev_kfree_skb_any(skb);
-
- return retval;
-}
-
-static int hycapi_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
- char *s;
-
- seq_printf(m, "%-16s %s\n", "name", cinfo->cardname);
- seq_printf(m, "%-16s 0x%x\n", "io", card->iobase);
- seq_printf(m, "%-16s %d\n", "irq", card->irq);
-
- switch (card->brdtype) {
- case BD_PCCARD: s = "HYSDN Hycard"; break;
- case BD_ERGO: s = "HYSDN Ergo2"; break;
- case BD_METRO: s = "HYSDN Metro4"; break;
- case BD_CHAMP2: s = "HYSDN Champ2"; break;
- case BD_PLEXUS: s = "HYSDN Plexus30"; break;
- default: s = "???"; break;
- }
- seq_printf(m, "%-16s %s\n", "type", s);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
- seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
- return 0;
-}
-
-/**************************************************************
-hycapi_load_firmware
-
-This does NOT load any firmware, but the callback somehow is needed
-on capi-interface registration.
-
-**************************************************************/
-
-static int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_load_firmware\n");
-#endif
- return 0;
-}
-
-
-static char *hycapi_procinfo(struct capi_ctr *ctrl)
-{
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "%s\n", __func__);
-#endif
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d %s",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->iobase : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- hycapi_revision
- );
- return cinfo->infobuf;
-}
-
-/******************************************************************
-hycapi_rx_capipkt
-
-Receive a capi-message.
-
-All B3_DATA_IND are converted to 64K-extension compatible format.
-New nccis are created if necessary.
-*******************************************************************/
-
-void
-hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf, unsigned short len)
-{
- struct sk_buff *skb;
- hycapictrl_info *cinfo = card->hyctrlinfo;
- struct capi_ctr *ctrl;
- __u16 ApplId;
- __u16 MsgLen, info;
- __u16 len2, CapiCmd;
- __u32 CP64[2] = {0, 0};
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_rx_capipkt\n");
-#endif
- if (!cinfo) {
- return;
- }
- ctrl = &cinfo->capi_ctrl;
- if (len < CAPI_MSG_BASELEN) {
- printk(KERN_ERR "HYSDN Card%d: invalid CAPI-message, length %d!\n",
- card->myid, len);
- return;
- }
- MsgLen = CAPIMSG_LEN(buf);
- ApplId = CAPIMSG_APPID(buf);
- CapiCmd = CAPIMSG_CMD(buf);
-
- if ((CapiCmd == CAPI_DATA_B3_IND) && (MsgLen < 30)) {
- len2 = len + (30 - MsgLen);
- if (!(skb = alloc_skb(len2, GFP_ATOMIC))) {
- printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
- card->myid);
- return;
- }
- skb_put_data(skb, buf, MsgLen);
- skb_put_data(skb, CP64, 2 * sizeof(__u32));
- skb_put_data(skb, buf + MsgLen, len - MsgLen);
- CAPIMSG_SETLEN(skb->data, 30);
- } else {
- if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
- printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
- card->myid);
- return;
- }
- skb_put_data(skb, buf, len);
- }
- switch (CAPIMSG_CMD(skb->data))
- {
- case CAPI_CONNECT_B3_CONF:
-/* Check info-field for error-indication: */
- info = CAPIMSG_U16(skb->data, 12);
- switch (info)
- {
- case 0:
- capilib_new_ncci(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data),
- hycapi_applications[ApplId - 1].rp.datablkcnt);
-
- break;
- case 0x0001:
- printk(KERN_ERR "HYSDN Card%d: NCPI not supported by current "
- "protocol. NCPI ignored.\n", card->myid);
- break;
- case 0x2001:
- printk(KERN_ERR "HYSDN Card%d: Message not supported in"
- " current state\n", card->myid);
- break;
- case 0x2002:
- printk(KERN_ERR "HYSDN Card%d: invalid PLCI\n", card->myid);
- break;
- case 0x2004:
- printk(KERN_ERR "HYSDN Card%d: out of NCCI\n", card->myid);
- break;
- case 0x3008:
- printk(KERN_ERR "HYSDN Card%d: NCPI not supported\n",
- card->myid);
- break;
- default:
- printk(KERN_ERR "HYSDN Card%d: Info in CONNECT_B3_CONF: %d\n",
- card->myid, info);
- break;
- }
- break;
- case CAPI_CONNECT_B3_IND:
- capilib_new_ncci(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- hycapi_applications[ApplId - 1].rp.datablkcnt);
- break;
- case CAPI_DATA_B3_CONF:
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- break;
- default:
- break;
- }
- capi_ctr_handle_message(ctrl, ApplId, skb);
-}
-
-/******************************************************************
-hycapi_tx_capiack
-
-Internally acknowledge a msg sent. This will remove the msg from the
-internal queue.
-
-*******************************************************************/
-
-void hycapi_tx_capiack(hysdn_card *card)
-{
- hycapictrl_info *cinfo = card->hyctrlinfo;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_tx_capiack\n");
-#endif
- if (!cinfo) {
- return;
- }
- spin_lock_irq(&cinfo->lock);
- kfree_skb(cinfo->skbs[cinfo->out_idx]); /* free skb */
- cinfo->skbs[cinfo->out_idx++] = NULL;
- if (cinfo->out_idx >= HYSDN_MAX_CAPI_SKB)
- cinfo->out_idx = 0; /* wrap around */
-
- if (cinfo->sk_count-- == HYSDN_MAX_CAPI_SKB) /* dec usage count */
- capi_ctr_resume_output(&cinfo->capi_ctrl);
- spin_unlock_irq(&cinfo->lock);
-}
-
-/***************************************************************
-hycapi_tx_capiget(hysdn_card *card)
-
-This is called when polling for messages to SEND.
-
-****************************************************************/
-
-struct sk_buff *
-hycapi_tx_capiget(hysdn_card *card)
-{
- hycapictrl_info *cinfo = card->hyctrlinfo;
- if (!cinfo) {
- return (struct sk_buff *)NULL;
- }
- if (!cinfo->sk_count)
- return (struct sk_buff *)NULL; /* nothing available */
-
- return (cinfo->skbs[cinfo->out_idx]); /* next packet to send */
-}
-
-
-/**********************************************************
-int hycapi_init()
-
-attach the capi-driver to the kernel-capi.
-
-***********************************************************/
-
-int hycapi_init(void)
-{
- int i;
- for (i = 0; i < CAPI_MAXAPPL; i++) {
- memset(&(hycapi_applications[i]), 0, sizeof(hycapi_appl));
- }
- return (0);
-}
-
-/**************************************************************
-hycapi_cleanup(void)
-
-detach the capi-driver to the kernel-capi. Actually this should
-free some more ressources. Do that later.
-**************************************************************/
-
-void
-hycapi_cleanup(void)
-{
-}
-
-/********************************************************************
-hycapi_capi_create(hysdn_card *card)
-
-Attach the card with its capi-ctrl.
-*********************************************************************/
-
-static void hycapi_fill_profile(hysdn_card *card)
-{
- hycapictrl_info *cinfo = NULL;
- struct capi_ctr *ctrl = NULL;
- cinfo = card->hyctrlinfo;
- if (!cinfo) return;
- ctrl = &cinfo->capi_ctrl;
- strcpy(ctrl->manu, "Hypercope");
- ctrl->version.majorversion = 2;
- ctrl->version.minorversion = 0;
- ctrl->version.majormanuversion = 3;
- ctrl->version.minormanuversion = 2;
- ctrl->profile.ncontroller = card->myid;
- ctrl->profile.nbchannel = card->bchans;
- ctrl->profile.goptions = GLOBAL_OPTION_INTERNAL_CONTROLLER |
- GLOBAL_OPTION_B_CHANNEL_OPERATION;
- ctrl->profile.support1 = B1_PROT_64KBIT_HDLC |
- (card->faxchans ? B1_PROT_T30 : 0) |
- B1_PROT_64KBIT_TRANSPARENT;
- ctrl->profile.support2 = B2_PROT_ISO7776 |
- (card->faxchans ? B2_PROT_T30 : 0) |
- B2_PROT_TRANSPARENT;
- ctrl->profile.support3 = B3_PROT_TRANSPARENT |
- B3_PROT_T90NL |
- (card->faxchans ? B3_PROT_T30 : 0) |
- (card->faxchans ? B3_PROT_T30EXT : 0) |
- B3_PROT_ISO8208;
-}
-
-int
-hycapi_capi_create(hysdn_card *card)
-{
- hycapictrl_info *cinfo = NULL;
- struct capi_ctr *ctrl = NULL;
- int retval;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_capi_create\n");
-#endif
- if ((hycapi_enable & (1 << card->myid)) == 0) {
- return 1;
- }
- if (!card->hyctrlinfo) {
- cinfo = kzalloc(sizeof(hycapictrl_info), GFP_ATOMIC);
- if (!cinfo) {
- printk(KERN_WARNING "HYSDN: no memory for capi-ctrl.\n");
- return -ENOMEM;
- }
- card->hyctrlinfo = cinfo;
- cinfo->card = card;
- spin_lock_init(&cinfo->lock);
- INIT_LIST_HEAD(&cinfo->ncci_head);
-
- switch (card->brdtype) {
- case BD_PCCARD: strcpy(cinfo->cardname, "HYSDN Hycard"); break;
- case BD_ERGO: strcpy(cinfo->cardname, "HYSDN Ergo2"); break;
- case BD_METRO: strcpy(cinfo->cardname, "HYSDN Metro4"); break;
- case BD_CHAMP2: strcpy(cinfo->cardname, "HYSDN Champ2"); break;
- case BD_PLEXUS: strcpy(cinfo->cardname, "HYSDN Plexus30"); break;
- default: strcpy(cinfo->cardname, "HYSDN ???"); break;
- }
-
- ctrl = &cinfo->capi_ctrl;
- ctrl->driver_name = "hycapi";
- ctrl->driverdata = cinfo;
- ctrl->register_appl = hycapi_register_appl;
- ctrl->release_appl = hycapi_release_appl;
- ctrl->send_message = hycapi_send_message;
- ctrl->load_firmware = hycapi_load_firmware;
- ctrl->reset_ctr = hycapi_reset_ctr;
- ctrl->procinfo = hycapi_procinfo;
- ctrl->proc_show = hycapi_proc_show;
- strcpy(ctrl->name, cinfo->cardname);
- ctrl->owner = THIS_MODULE;
-
- retval = attach_capi_ctr(ctrl);
- if (retval) {
- printk(KERN_ERR "hycapi: attach controller failed.\n");
- return -EBUSY;
- }
- /* fill in the blanks: */
- hycapi_fill_profile(card);
- capi_ctr_ready(ctrl);
- } else {
- /* resume output on stopped ctrl */
- ctrl = &card->hyctrlinfo->capi_ctrl;
- hycapi_fill_profile(card);
- capi_ctr_ready(ctrl);
- hycapi_restart_internal(ctrl);
-/* ctrl->resume_output(ctrl); */
- }
- return 0;
-}
diff --git a/drivers/staging/isdn/hysdn/hysdn_boot.c b/drivers/staging/isdn/hysdn/hysdn_boot.c
deleted file mode 100644
index ba177c3a621b..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_boot.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/* $Id: hysdn_boot.c,v 1.4.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards
- * specific routines for booting and pof handling
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include "hysdn_defs.h"
-#include "hysdn_pof.h"
-
-/********************************/
-/* defines for pof read handler */
-/********************************/
-#define POF_READ_FILE_HEAD 0
-#define POF_READ_TAG_HEAD 1
-#define POF_READ_TAG_DATA 2
-
-/************************************************************/
-/* definition of boot specific data area. This data is only */
-/* needed during boot and so allocated dynamically. */
-/************************************************************/
-struct boot_data {
- unsigned short Cryptor; /* for use with Decrypt function */
- unsigned short Nrecs; /* records remaining in file */
- unsigned char pof_state;/* actual state of read handler */
- unsigned char is_crypted;/* card data is crypted */
- int BufSize; /* actual number of bytes bufferd */
- int last_error; /* last occurred error */
- unsigned short pof_recid;/* actual pof recid */
- unsigned long pof_reclen;/* total length of pof record data */
- unsigned long pof_recoffset;/* actual offset inside pof record */
- union {
- unsigned char BootBuf[BOOT_BUF_SIZE];/* buffer as byte count */
- tPofRecHdr PofRecHdr; /* header for actual record/chunk */
- tPofFileHdr PofFileHdr; /* header from POF file */
- tPofTimeStamp PofTime; /* time information */
- } buf;
-};
-
-/*****************************************************/
-/* start decryption of successive POF file chuncks. */
-/* */
-/* to be called at start of POF file reading, */
-/* before starting any decryption on any POF record. */
-/*****************************************************/
-static void
-StartDecryption(struct boot_data *boot)
-{
- boot->Cryptor = CRYPT_STARTTERM;
-} /* StartDecryption */
-
-
-/***************************************************************/
-/* decrypt complete BootBuf */
-/* NOTE: decryption must be applied to all or none boot tags - */
-/* to HI and LO boot loader and (all) seq tags, because */
-/* global Cryptor is started for whole POF. */
-/***************************************************************/
-static void
-DecryptBuf(struct boot_data *boot, int cnt)
-{
- unsigned char *bufp = boot->buf.BootBuf;
-
- while (cnt--) {
- boot->Cryptor = (boot->Cryptor >> 1) ^ ((boot->Cryptor & 1U) ? CRYPT_FEEDTERM : 0);
- *bufp++ ^= (unsigned char)boot->Cryptor;
- }
-} /* DecryptBuf */
-
-/********************************************************************************/
-/* pof_handle_data executes the required actions dependent on the active record */
-/* id. If successful 0 is returned, a negative value shows an error. */
-/********************************************************************************/
-static int
-pof_handle_data(hysdn_card *card, int datlen)
-{
- struct boot_data *boot = card->boot; /* pointer to boot specific data */
- long l;
- unsigned char *imgp;
- int img_len;
-
- /* handle the different record types */
- switch (boot->pof_recid) {
-
- case TAG_TIMESTMP:
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF created %s", boot->buf.PofTime.DateTimeText);
- break;
-
- case TAG_CBOOTDTA:
- DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
- /* fall through */
- case TAG_BOOTDTA:
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
- (boot->pof_recid == TAG_CBOOTDTA) ? "CBOOTDATA" : "BOOTDTA",
- datlen, boot->pof_recoffset);
-
- if (boot->pof_reclen != POF_BOOT_LOADER_TOTAL_SIZE) {
- boot->last_error = EPOF_BAD_IMG_SIZE; /* invalid length */
- return (boot->last_error);
- }
- imgp = boot->buf.BootBuf; /* start of buffer */
- img_len = datlen; /* maximum length to transfer */
-
- l = POF_BOOT_LOADER_OFF_IN_PAGE -
- (boot->pof_recoffset & (POF_BOOT_LOADER_PAGE_SIZE - 1));
- if (l > 0) {
- /* buffer needs to be truncated */
- imgp += l; /* advance pointer */
- img_len -= l; /* adjust len */
- }
- /* at this point no special handling for data wrapping over buffer */
- /* is necessary, because the boot image always will be adjusted to */
- /* match a page boundary inside the buffer. */
- /* The buffer for the boot image on the card is filled in 2 cycles */
- /* first the 1024 hi-words are put in the buffer, then the low 1024 */
- /* word are handled in the same way with different offset. */
-
- if (img_len > 0) {
- /* data available for copy */
- if ((boot->last_error =
- card->writebootimg(card, imgp,
- (boot->pof_recoffset > POF_BOOT_LOADER_PAGE_SIZE) ? 2 : 0)) < 0)
- return (boot->last_error);
- }
- break; /* end of case boot image hi/lo */
-
- case TAG_CABSDATA:
- DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
- /* fall through */
- case TAG_ABSDATA:
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
- (boot->pof_recid == TAG_CABSDATA) ? "CABSDATA" : "ABSDATA",
- datlen, boot->pof_recoffset);
-
- if ((boot->last_error = card->writebootseq(card, boot->buf.BootBuf, datlen)) < 0)
- return (boot->last_error); /* error writing data */
-
- if (boot->pof_recoffset + datlen >= boot->pof_reclen)
- return (card->waitpofready(card)); /* data completely spooled, wait for ready */
-
- break; /* end of case boot seq data */
-
- default:
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF got data(id=0x%lx) len=%d offs=0x%lx", boot->pof_recid,
- datlen, boot->pof_recoffset);
-
- break; /* simply skip record */
- } /* switch boot->pof_recid */
-
- return (0);
-} /* pof_handle_data */
-
-
-/******************************************************************************/
-/* pof_write_buffer is called when the buffer has been filled with the needed */
-/* number of data bytes. The number delivered is additionally supplied for */
-/* verification. The functions handles the data and returns the needed number */
-/* of bytes for the next action. If the returned value is 0 or less an error */
-/* occurred and booting must be aborted. */
-/******************************************************************************/
-int
-pof_write_buffer(hysdn_card *card, int datlen)
-{
- struct boot_data *boot = card->boot; /* pointer to boot specific data */
-
- if (!boot)
- return (-EFAULT); /* invalid call */
- if (boot->last_error < 0)
- return (boot->last_error); /* repeated error */
-
- if (card->debug_flags & LOG_POF_WRITE)
- hysdn_addlog(card, "POF write: got %d bytes ", datlen);
-
- switch (boot->pof_state) {
- case POF_READ_FILE_HEAD:
- if (card->debug_flags & LOG_POF_WRITE)
- hysdn_addlog(card, "POF write: checking file header");
-
- if (datlen != sizeof(tPofFileHdr)) {
- boot->last_error = -EPOF_INTERNAL;
- break;
- }
- if (boot->buf.PofFileHdr.Magic != TAGFILEMAGIC) {
- boot->last_error = -EPOF_BAD_MAGIC;
- break;
- }
- /* Setup the new state and vars */
- boot->Nrecs = (unsigned short)(boot->buf.PofFileHdr.N_PofRecs); /* limited to 65535 */
- boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
- boot->last_error = sizeof(tPofRecHdr); /* new length */
- break;
-
- case POF_READ_TAG_HEAD:
- if (card->debug_flags & LOG_POF_WRITE)
- hysdn_addlog(card, "POF write: checking tag header");
-
- if (datlen != sizeof(tPofRecHdr)) {
- boot->last_error = -EPOF_INTERNAL;
- break;
- }
- boot->pof_recid = boot->buf.PofRecHdr.PofRecId; /* actual pof recid */
- boot->pof_reclen = boot->buf.PofRecHdr.PofRecDataLen; /* total length */
- boot->pof_recoffset = 0; /* no starting offset */
-
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF: got record id=0x%lx length=%ld ",
- boot->pof_recid, boot->pof_reclen);
-
- boot->pof_state = POF_READ_TAG_DATA; /* now start with tag data */
- if (boot->pof_reclen < BOOT_BUF_SIZE)
- boot->last_error = boot->pof_reclen; /* limit size */
- else
- boot->last_error = BOOT_BUF_SIZE; /* maximum */
-
- if (!boot->last_error) { /* no data inside record */
- boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
- boot->last_error = sizeof(tPofRecHdr); /* new length */
- }
- break;
-
- case POF_READ_TAG_DATA:
- if (card->debug_flags & LOG_POF_WRITE)
- hysdn_addlog(card, "POF write: getting tag data");
-
- if (datlen != boot->last_error) {
- boot->last_error = -EPOF_INTERNAL;
- break;
- }
- if ((boot->last_error = pof_handle_data(card, datlen)) < 0)
- return (boot->last_error); /* an error occurred */
- boot->pof_recoffset += datlen;
- if (boot->pof_recoffset >= boot->pof_reclen) {
- boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
- boot->last_error = sizeof(tPofRecHdr); /* new length */
- } else {
- if (boot->pof_reclen - boot->pof_recoffset < BOOT_BUF_SIZE)
- boot->last_error = boot->pof_reclen - boot->pof_recoffset; /* limit size */
- else
- boot->last_error = BOOT_BUF_SIZE; /* maximum */
- }
- break;
-
- default:
- boot->last_error = -EPOF_INTERNAL; /* unknown state */
- break;
- } /* switch (boot->pof_state) */
-
- return (boot->last_error);
-} /* pof_write_buffer */
-
-
-/*******************************************************************************/
-/* pof_write_open is called when an open for boot on the cardlog device occurs. */
-/* The function returns the needed number of bytes for the next operation. If */
-/* the returned number is less or equal 0 an error specified by this code */
-/* occurred. Additionally the pointer to the buffer data area is set on success */
-/*******************************************************************************/
-int
-pof_write_open(hysdn_card *card, unsigned char **bufp)
-{
- struct boot_data *boot; /* pointer to boot specific data */
-
- if (card->boot) {
- if (card->debug_flags & LOG_POF_OPEN)
- hysdn_addlog(card, "POF open: already opened for boot");
- return (-ERR_ALREADY_BOOT); /* boot already active */
- }
- /* error no mem available */
- if (!(boot = kzalloc(sizeof(struct boot_data), GFP_KERNEL))) {
- if (card->debug_flags & LOG_MEM_ERR)
- hysdn_addlog(card, "POF open: unable to allocate mem");
- return (-EFAULT);
- }
- card->boot = boot;
- card->state = CARD_STATE_BOOTING;
-
- card->stopcard(card); /* first stop the card */
- if (card->testram(card)) {
- if (card->debug_flags & LOG_POF_OPEN)
- hysdn_addlog(card, "POF open: DPRAM test failure");
- boot->last_error = -ERR_BOARD_DPRAM;
- card->state = CARD_STATE_BOOTERR; /* show boot error */
- return (boot->last_error);
- }
- boot->BufSize = 0; /* Buffer is empty */
- boot->pof_state = POF_READ_FILE_HEAD; /* read file header */
- StartDecryption(boot); /* if POF File should be encrypted */
-
- if (card->debug_flags & LOG_POF_OPEN)
- hysdn_addlog(card, "POF open: success");
-
- *bufp = boot->buf.BootBuf; /* point to buffer */
- return (sizeof(tPofFileHdr));
-} /* pof_write_open */
-
-/********************************************************************************/
-/* pof_write_close is called when an close of boot on the cardlog device occurs. */
-/* The return value must be 0 if everything has happened as desired. */
-/********************************************************************************/
-int
-pof_write_close(hysdn_card *card)
-{
- struct boot_data *boot = card->boot; /* pointer to boot specific data */
-
- if (!boot)
- return (-EFAULT); /* invalid call */
-
- card->boot = NULL; /* no boot active */
- kfree(boot);
-
- if (card->state == CARD_STATE_RUN)
- card->set_errlog_state(card, 1); /* activate error log */
-
- if (card->debug_flags & LOG_POF_OPEN)
- hysdn_addlog(card, "POF close: success");
-
- return (0);
-} /* pof_write_close */
-
-/*********************************************************************************/
-/* EvalSysrTokData checks additional records delivered with the Sysready Message */
-/* when POF has been booted. A return value of 0 is used if no error occurred. */
-/*********************************************************************************/
-int
-EvalSysrTokData(hysdn_card *card, unsigned char *cp, int len)
-{
- u_char *p;
- u_char crc;
-
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "SysReady Token data length %d", len);
-
- if (len < 2) {
- hysdn_addlog(card, "SysReady Token Data to short");
- return (1);
- }
- for (p = cp, crc = 0; p < (cp + len - 2); p++)
- if ((crc & 0x80))
- crc = (((u_char) (crc << 1)) + 1) + *p;
- else
- crc = ((u_char) (crc << 1)) + *p;
- crc = ~crc;
- if (crc != *(cp + len - 1)) {
- hysdn_addlog(card, "SysReady Token Data invalid CRC");
- return (1);
- }
- len--; /* don't check CRC byte */
- while (len > 0) {
-
- if (*cp == SYSR_TOK_END)
- return (0); /* End of Token stream */
-
- if (len < (*(cp + 1) + 2)) {
- hysdn_addlog(card, "token 0x%x invalid length %d", *cp, *(cp + 1));
- return (1);
- }
- switch (*cp) {
- case SYSR_TOK_B_CHAN: /* 1 */
- if (*(cp + 1) != 1)
- return (1); /* length invalid */
- card->bchans = *(cp + 2);
- break;
-
- case SYSR_TOK_FAX_CHAN: /* 2 */
- if (*(cp + 1) != 1)
- return (1); /* length invalid */
- card->faxchans = *(cp + 2);
- break;
-
- case SYSR_TOK_MAC_ADDR: /* 3 */
- if (*(cp + 1) != 6)
- return (1); /* length invalid */
- memcpy(card->mac_addr, cp + 2, 6);
- break;
-
- default:
- hysdn_addlog(card, "unknown token 0x%02x length %d", *cp, *(cp + 1));
- break;
- }
- len -= (*(cp + 1) + 2); /* adjust len */
- cp += (*(cp + 1) + 2); /* and pointer */
- }
-
- hysdn_addlog(card, "no end token found");
- return (1);
-} /* EvalSysrTokData */
diff --git a/drivers/staging/isdn/hysdn/hysdn_defs.h b/drivers/staging/isdn/hysdn/hysdn_defs.h
deleted file mode 100644
index cdac46a21692..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_defs.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/* $Id: hysdn_defs.h,v 1.5.6.3 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards
- * global definitions and exported vars and functions.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef HYSDN_DEFS_H
-#define HYSDN_DEFS_H
-
-#include <linux/hysdn_if.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/skbuff.h>
-
-#include "ince1pc.h"
-
-#ifdef CONFIG_HYSDN_CAPI
-#include <linux/capi.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-
-/***************************/
-/* CAPI-Profile values. */
-/***************************/
-
-#define GLOBAL_OPTION_INTERNAL_CONTROLLER 0x0001
-#define GLOBAL_OPTION_EXTERNAL_CONTROLLER 0x0002
-#define GLOBAL_OPTION_HANDSET 0x0004
-#define GLOBAL_OPTION_DTMF 0x0008
-#define GLOBAL_OPTION_SUPPL_SERVICES 0x0010
-#define GLOBAL_OPTION_CHANNEL_ALLOCATION 0x0020
-#define GLOBAL_OPTION_B_CHANNEL_OPERATION 0x0040
-
-#define B1_PROT_64KBIT_HDLC 0x0001
-#define B1_PROT_64KBIT_TRANSPARENT 0x0002
-#define B1_PROT_V110_ASYNCH 0x0004
-#define B1_PROT_V110_SYNCH 0x0008
-#define B1_PROT_T30 0x0010
-#define B1_PROT_64KBIT_INV_HDLC 0x0020
-#define B1_PROT_56KBIT_TRANSPARENT 0x0040
-
-#define B2_PROT_ISO7776 0x0001
-#define B2_PROT_TRANSPARENT 0x0002
-#define B2_PROT_SDLC 0x0004
-#define B2_PROT_LAPD 0x0008
-#define B2_PROT_T30 0x0010
-#define B2_PROT_PPP 0x0020
-#define B2_PROT_TRANSPARENT_IGNORE_B1_FRAMING_ERRORS 0x0040
-
-#define B3_PROT_TRANSPARENT 0x0001
-#define B3_PROT_T90NL 0x0002
-#define B3_PROT_ISO8208 0x0004
-#define B3_PROT_X25_DCE 0x0008
-#define B3_PROT_T30 0x0010
-#define B3_PROT_T30EXT 0x0020
-
-#define HYSDN_MAXVERSION 8
-
-/* Number of sendbuffers in CAPI-queue */
-#define HYSDN_MAX_CAPI_SKB 20
-
-#endif /* CONFIG_HYSDN_CAPI*/
-
-/************************************************/
-/* constants and bits for debugging/log outputs */
-/************************************************/
-#define LOG_MAX_LINELEN 120
-#define DEB_OUT_SYSLOG 0x80000000 /* output to syslog instead of proc fs */
-#define LOG_MEM_ERR 0x00000001 /* log memory errors like kmalloc failure */
-#define LOG_POF_OPEN 0x00000010 /* log pof open and close activities */
-#define LOG_POF_RECORD 0x00000020 /* log pof record parser */
-#define LOG_POF_WRITE 0x00000040 /* log detailed pof write operation */
-#define LOG_POF_CARD 0x00000080 /* log pof related card functions */
-#define LOG_CNF_LINE 0x00000100 /* all conf lines are put to procfs */
-#define LOG_CNF_DATA 0x00000200 /* non comment conf lines are shown with channel */
-#define LOG_CNF_MISC 0x00000400 /* additional conf line debug outputs */
-#define LOG_SCHED_ASYN 0x00001000 /* debug schedulers async tx routines */
-#define LOG_PROC_OPEN 0x00100000 /* open and close from procfs are logged */
-#define LOG_PROC_ALL 0x00200000 /* all actions from procfs are logged */
-#define LOG_NET_INIT 0x00010000 /* network init and deinit logging */
-
-#define DEF_DEB_FLAGS 0x7fff000f /* everything is logged to procfs */
-
-/**********************************/
-/* proc filesystem name constants */
-/**********************************/
-#define PROC_SUBDIR_NAME "hysdn"
-#define PROC_CONF_BASENAME "cardconf"
-#define PROC_LOG_BASENAME "cardlog"
-
-/***********************************/
-/* PCI 32 bit parms for IO and MEM */
-/***********************************/
-#define PCI_REG_PLX_MEM_BASE 0
-#define PCI_REG_PLX_IO_BASE 1
-#define PCI_REG_MEMORY_BASE 3
-
-/**************/
-/* card types */
-/**************/
-#define BD_NONE 0U
-#define BD_PERFORMANCE 1U
-#define BD_VALUE 2U
-#define BD_PCCARD 3U
-#define BD_ERGO 4U
-#define BD_METRO 5U
-#define BD_CHAMP2 6U
-#define BD_PLEXUS 7U
-
-/******************************************************/
-/* defined states for cards shown by reading cardconf */
-/******************************************************/
-#define CARD_STATE_UNUSED 0 /* never been used or booted */
-#define CARD_STATE_BOOTING 1 /* booting is in progress */
-#define CARD_STATE_BOOTERR 2 /* a previous boot was aborted */
-#define CARD_STATE_RUN 3 /* card is active */
-
-/*******************************/
-/* defines for error_log_state */
-/*******************************/
-#define ERRLOG_STATE_OFF 0 /* error log is switched off, nothing to do */
-#define ERRLOG_STATE_ON 1 /* error log is switched on, wait for data */
-#define ERRLOG_STATE_START 2 /* start error logging */
-#define ERRLOG_STATE_STOP 3 /* stop error logging */
-
-/*******************************/
-/* data structure for one card */
-/*******************************/
-typedef struct HYSDN_CARD {
-
- /* general variables for the cards */
- int myid; /* own driver card id */
- unsigned char bus; /* pci bus the card is connected to */
- unsigned char devfn; /* slot+function bit encoded */
- unsigned short subsysid;/* PCI subsystem id */
- unsigned char brdtype; /* type of card */
- unsigned int bchans; /* number of available B-channels */
- unsigned int faxchans; /* number of available fax-channels */
- unsigned char mac_addr[6];/* MAC Address read from card */
- unsigned int irq; /* interrupt number */
- unsigned int iobase; /* IO-port base address */
- unsigned long plxbase; /* PLX memory base */
- unsigned long membase; /* DPRAM memory base */
- unsigned long memend; /* DPRAM memory end */
- void *dpram; /* mapped dpram */
- int state; /* actual state of card -> CARD_STATE_** */
- struct HYSDN_CARD *next; /* pointer to next card */
-
- /* data areas for the /proc file system */
- void *proclog; /* pointer to proclog filesystem specific data */
- void *procconf; /* pointer to procconf filesystem specific data */
-
- /* debugging and logging */
- unsigned char err_log_state;/* actual error log state of the card */
- unsigned long debug_flags;/* tells what should be debugged and where */
- void (*set_errlog_state) (struct HYSDN_CARD *, int);
-
- /* interrupt handler + interrupt synchronisation */
- struct work_struct irq_queue; /* interrupt task queue */
- unsigned char volatile irq_enabled;/* interrupt enabled if != 0 */
- unsigned char volatile hw_lock;/* hardware is currently locked -> no access */
-
- /* boot process */
- void *boot; /* pointer to boot private data */
- int (*writebootimg) (struct HYSDN_CARD *, unsigned char *, unsigned long);
- int (*writebootseq) (struct HYSDN_CARD *, unsigned char *, int);
- int (*waitpofready) (struct HYSDN_CARD *);
- int (*testram) (struct HYSDN_CARD *);
-
- /* scheduler for data transfer (only async parts) */
- unsigned char async_data[256];/* async data to be sent (normally for config) */
- unsigned short volatile async_len;/* length of data to sent */
- unsigned short volatile async_channel;/* channel number for async transfer */
- int volatile async_busy; /* flag != 0 sending in progress */
- int volatile net_tx_busy; /* a network packet tx is in progress */
-
- /* network interface */
- void *netif; /* pointer to network structure */
-
- /* init and deinit stopcard for booting, too */
- void (*stopcard) (struct HYSDN_CARD *);
- void (*releasehardware) (struct HYSDN_CARD *);
-
- spinlock_t hysdn_lock;
-#ifdef CONFIG_HYSDN_CAPI
- struct hycapictrl_info {
- char cardname[32];
- spinlock_t lock;
- int versionlen;
- char versionbuf[1024];
- char *version[HYSDN_MAXVERSION];
-
- char infobuf[128]; /* for function procinfo */
-
- struct HYSDN_CARD *card;
- struct capi_ctr capi_ctrl;
- struct sk_buff *skbs[HYSDN_MAX_CAPI_SKB];
- int in_idx, out_idx; /* indexes to buffer ring */
- int sk_count; /* number of buffers currently in ring */
- struct sk_buff *tx_skb; /* buffer for tx operation */
-
- struct list_head ncci_head;
- } *hyctrlinfo;
-#endif /* CONFIG_HYSDN_CAPI */
-} hysdn_card;
-
-#ifdef CONFIG_HYSDN_CAPI
-typedef struct hycapictrl_info hycapictrl_info;
-#endif /* CONFIG_HYSDN_CAPI */
-
-
-/*****************/
-/* exported vars */
-/*****************/
-extern hysdn_card *card_root; /* pointer to first card */
-
-
-
-/*************************/
-/* im/exported functions */
-/*************************/
-
-/* hysdn_procconf.c */
-extern int hysdn_procconf_init(void); /* init proc config filesys */
-extern void hysdn_procconf_release(void); /* deinit proc config filesys */
-
-/* hysdn_proclog.c */
-extern int hysdn_proclog_init(hysdn_card *); /* init proc log entry */
-extern void hysdn_proclog_release(hysdn_card *); /* deinit proc log entry */
-extern void hysdn_addlog(hysdn_card *, char *, ...); /* output data to log */
-extern void hysdn_card_errlog(hysdn_card *, tErrLogEntry *, int); /* output card log */
-
-/* boardergo.c */
-extern int ergo_inithardware(hysdn_card *card); /* get hardware -> module init */
-
-/* hysdn_boot.c */
-extern int pof_write_close(hysdn_card *); /* close proc file after writing pof */
-extern int pof_write_open(hysdn_card *, unsigned char **); /* open proc file for writing pof */
-extern int pof_write_buffer(hysdn_card *, int); /* write boot data to card */
-extern int EvalSysrTokData(hysdn_card *, unsigned char *, int); /* Check Sysready Token Data */
-
-/* hysdn_sched.c */
-extern int hysdn_sched_tx(hysdn_card *, unsigned char *,
- unsigned short volatile *, unsigned short volatile *,
- unsigned short);
-extern int hysdn_sched_rx(hysdn_card *, unsigned char *, unsigned short,
- unsigned short);
-extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
- unsigned short); /* send one cfg line */
-
-/* hysdn_net.c */
-extern unsigned int hynet_enable;
-extern int hysdn_net_create(hysdn_card *); /* create a new net device */
-extern int hysdn_net_release(hysdn_card *); /* delete the device */
-extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */
-extern void hysdn_tx_netack(hysdn_card *); /* acknowledge a packet tx */
-extern struct sk_buff *hysdn_tx_netget(hysdn_card *); /* get next network packet */
-extern void hysdn_rx_netpkt(hysdn_card *, unsigned char *,
- unsigned short); /* rxed packet from network */
-
-#ifdef CONFIG_HYSDN_CAPI
-extern unsigned int hycapi_enable;
-extern int hycapi_capi_create(hysdn_card *); /* create a new capi device */
-extern int hycapi_capi_release(hysdn_card *); /* delete the device */
-extern int hycapi_capi_stop(hysdn_card *card); /* suspend */
-extern void hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf,
- unsigned short len);
-extern void hycapi_tx_capiack(hysdn_card *card);
-extern struct sk_buff *hycapi_tx_capiget(hysdn_card *card);
-extern int hycapi_init(void);
-extern void hycapi_cleanup(void);
-#endif /* CONFIG_HYSDN_CAPI */
-
-#endif /* HYSDN_DEFS_H */
diff --git a/drivers/staging/isdn/hysdn/hysdn_init.c b/drivers/staging/isdn/hysdn/hysdn_init.c
deleted file mode 100644
index 0db2f7506250..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_init.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* $Id: hysdn_init.c,v 1.6.6.6 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, init functions.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-
-#include "hysdn_defs.h"
-
-static struct pci_device_id hysdn_pci_tbl[] = {
- { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
- PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_METRO, 0, 0, BD_METRO },
- { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
- PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2, 0, 0, BD_CHAMP2 },
- { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
- PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_ERGO, 0, 0, BD_ERGO },
- { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
- PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO, 0, 0, BD_ERGO },
-
- { } /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, hysdn_pci_tbl);
-MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
-MODULE_AUTHOR("Werner Cornelius");
-MODULE_LICENSE("GPL");
-
-static int cardmax; /* number of found cards */
-hysdn_card *card_root = NULL; /* pointer to first card */
-static hysdn_card *card_last = NULL; /* pointer to first card */
-
-
-/****************************************************************************/
-/* The module startup and shutdown code. Only compiled when used as module. */
-/* Using the driver as module is always advisable, because the booting */
-/* image becomes smaller and the driver code is only loaded when needed. */
-/* Additionally newer versions may be activated without rebooting. */
-/****************************************************************************/
-
-/****************************************************************************/
-/* init_module is called once when the module is loaded to do all necessary */
-/* things like autodetect... */
-/* If the return value of this function is 0 the init has been successful */
-/* and the module is added to the list in /proc/modules, otherwise an error */
-/* is assumed and the module will not be kept in memory. */
-/****************************************************************************/
-
-static int hysdn_pci_init_one(struct pci_dev *akt_pcidev,
- const struct pci_device_id *ent)
-{
- hysdn_card *card;
- int rc;
-
- rc = pci_enable_device(akt_pcidev);
- if (rc)
- return rc;
-
- if (!(card = kzalloc(sizeof(hysdn_card), GFP_KERNEL))) {
- printk(KERN_ERR "HYSDN: unable to alloc device mem \n");
- rc = -ENOMEM;
- goto err_out;
- }
- card->myid = cardmax; /* set own id */
- card->bus = akt_pcidev->bus->number;
- card->devfn = akt_pcidev->devfn; /* slot + function */
- card->subsysid = akt_pcidev->subsystem_device;
- card->irq = akt_pcidev->irq;
- card->iobase = pci_resource_start(akt_pcidev, PCI_REG_PLX_IO_BASE);
- card->plxbase = pci_resource_start(akt_pcidev, PCI_REG_PLX_MEM_BASE);
- card->membase = pci_resource_start(akt_pcidev, PCI_REG_MEMORY_BASE);
- card->brdtype = BD_NONE; /* unknown */
- card->debug_flags = DEF_DEB_FLAGS; /* set default debug */
- card->faxchans = 0; /* default no fax channels */
- card->bchans = 2; /* and 2 b-channels */
- card->brdtype = ent->driver_data;
-
- if (ergo_inithardware(card)) {
- printk(KERN_WARNING "HYSDN: card at io 0x%04x already in use\n", card->iobase);
- rc = -EBUSY;
- goto err_out_card;
- }
-
- cardmax++;
- card->next = NULL; /*end of chain */
- if (card_last)
- card_last->next = card; /* pointer to next card */
- else
- card_root = card;
- card_last = card; /* new chain end */
-
- pci_set_drvdata(akt_pcidev, card);
- return 0;
-
-err_out_card:
- kfree(card);
-err_out:
- pci_disable_device(akt_pcidev);
- return rc;
-}
-
-static void hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
-{
- hysdn_card *card = pci_get_drvdata(akt_pcidev);
-
- pci_set_drvdata(akt_pcidev, NULL);
-
- if (card->stopcard)
- card->stopcard(card);
-
-#ifdef CONFIG_HYSDN_CAPI
- hycapi_capi_release(card);
-#endif
-
- if (card->releasehardware)
- card->releasehardware(card); /* free all hardware resources */
-
- if (card == card_root) {
- card_root = card_root->next;
- if (!card_root)
- card_last = NULL;
- } else {
- hysdn_card *tmp = card_root;
- while (tmp) {
- if (tmp->next == card)
- tmp->next = card->next;
- card_last = tmp;
- tmp = tmp->next;
- }
- }
-
- kfree(card);
- pci_disable_device(akt_pcidev);
-}
-
-static struct pci_driver hysdn_pci_driver = {
- .name = "hysdn",
- .id_table = hysdn_pci_tbl,
- .probe = hysdn_pci_init_one,
- .remove = hysdn_pci_remove_one,
-};
-
-static int hysdn_have_procfs;
-
-static int __init
-hysdn_init(void)
-{
- int rc;
-
- printk(KERN_NOTICE "HYSDN: module loaded\n");
-
- rc = pci_register_driver(&hysdn_pci_driver);
- if (rc)
- return rc;
-
- printk(KERN_INFO "HYSDN: %d card(s) found.\n", cardmax);
-
- if (!hysdn_procconf_init())
- hysdn_have_procfs = 1;
-
-#ifdef CONFIG_HYSDN_CAPI
- if (cardmax > 0) {
- if (hycapi_init()) {
- printk(KERN_ERR "HYCAPI: init failed\n");
-
- if (hysdn_have_procfs)
- hysdn_procconf_release();
-
- pci_unregister_driver(&hysdn_pci_driver);
- return -ESPIPE;
- }
- }
-#endif /* CONFIG_HYSDN_CAPI */
-
- return 0; /* no error */
-} /* init_module */
-
-
-/***********************************************************************/
-/* cleanup_module is called when the module is released by the kernel. */
-/* The routine is only called if init_module has been successful and */
-/* the module counter has a value of 0. Otherwise this function will */
-/* not be called. This function must release all resources still allo- */
-/* cated as after the return from this function the module code will */
-/* be removed from memory. */
-/***********************************************************************/
-static void __exit
-hysdn_exit(void)
-{
- if (hysdn_have_procfs)
- hysdn_procconf_release();
-
- pci_unregister_driver(&hysdn_pci_driver);
-
-#ifdef CONFIG_HYSDN_CAPI
- hycapi_cleanup();
-#endif /* CONFIG_HYSDN_CAPI */
-
- printk(KERN_NOTICE "HYSDN: module unloaded\n");
-} /* cleanup_module */
-
-module_init(hysdn_init);
-module_exit(hysdn_exit);
diff --git a/drivers/staging/isdn/hysdn/hysdn_net.c b/drivers/staging/isdn/hysdn/hysdn_net.c
deleted file mode 100644
index dcb9ef7a2651..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_net.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/* $Id: hysdn_net.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, net (ethernet type) handling routines.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * This net module has been inspired by the skeleton driver from
- * Donald Becker (becker@CESDIS.gsfc.nasa.gov)
- *
- */
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inetdevice.h>
-
-#include "hysdn_defs.h"
-
-unsigned int hynet_enable = 0xffffffff;
-module_param(hynet_enable, uint, 0);
-
-#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */
-
-/****************************************************************************/
-/* structure containing the complete network data. The structure is aligned */
-/* in a way that both, the device and statistics are kept inside it. */
-/* for proper access, the device structure MUST be the first var/struct */
-/* inside the definition. */
-/****************************************************************************/
-struct net_local {
- /* Tx control lock. This protects the transmit buffer ring
- * state along with the "tx full" state of the driver. This
- * means all netif_queue flow control actions are protected
- * by this lock as well.
- */
- struct net_device *dev;
- spinlock_t lock;
- struct sk_buff *skbs[MAX_SKB_BUFFERS]; /* pointers to tx-skbs */
- int in_idx, out_idx; /* indexes to buffer ring */
- int sk_count; /* number of buffers currently in ring */
-}; /* net_local */
-
-
-
-/*********************************************************************/
-/* Open/initialize the board. This is called (in the current kernel) */
-/* sometime after booting when the 'ifconfig' program is run. */
-/* This routine should set everything up anew at each open, even */
-/* registers that "should" only need to be set once at boot, so that */
-/* there is non-reboot way to recover if something goes wrong. */
-/*********************************************************************/
-static int
-net_open(struct net_device *dev)
-{
- struct in_device *in_dev;
- hysdn_card *card = dev->ml_priv;
- int i;
-
- netif_start_queue(dev); /* start tx-queueing */
-
- /* Fill in the MAC-level header (if not already set) */
- if (!card->mac_addr[0]) {
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = 0xfc;
- if ((in_dev = dev->ip_ptr) != NULL) {
- const struct in_ifaddr *ifa;
-
- rcu_read_lock();
- ifa = rcu_dereference(in_dev->ifa_list);
- if (ifa != NULL)
- memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
- rcu_read_unlock();
- }
- } else
- memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
-
- return (0);
-} /* net_open */
-
-/*******************************************/
-/* flush the currently occupied tx-buffers */
-/* must only be called when device closed */
-/*******************************************/
-static void
-flush_tx_buffers(struct net_local *nl)
-{
-
- while (nl->sk_count) {
- dev_kfree_skb(nl->skbs[nl->out_idx++]); /* free skb */
- if (nl->out_idx >= MAX_SKB_BUFFERS)
- nl->out_idx = 0; /* wrap around */
- nl->sk_count--;
- }
-} /* flush_tx_buffers */
-
-
-/*********************************************************************/
-/* close/decativate the device. The device is not removed, but only */
-/* deactivated. */
-/*********************************************************************/
-static int
-net_close(struct net_device *dev)
-{
-
- netif_stop_queue(dev); /* disable queueing */
-
- flush_tx_buffers((struct net_local *) dev);
-
- return (0); /* success */
-} /* net_close */
-
-/************************************/
-/* send a packet on this interface. */
-/* new style for kernel >= 2.3.33 */
-/************************************/
-static netdev_tx_t
-net_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_local *lp = (struct net_local *) dev;
-
- spin_lock_irq(&lp->lock);
-
- lp->skbs[lp->in_idx++] = skb; /* add to buffer list */
- if (lp->in_idx >= MAX_SKB_BUFFERS)
- lp->in_idx = 0; /* wrap around */
- lp->sk_count++; /* adjust counter */
- netif_trans_update(dev);
-
- /* If we just used up the very last entry in the
- * TX ring on this device, tell the queueing
- * layer to send no more.
- */
- if (lp->sk_count >= MAX_SKB_BUFFERS)
- netif_stop_queue(dev);
-
- /* When the TX completion hw interrupt arrives, this
- * is when the transmit statistics are updated.
- */
-
- spin_unlock_irq(&lp->lock);
-
- if (lp->sk_count <= 3) {
- schedule_work(&((hysdn_card *) dev->ml_priv)->irq_queue);
- }
- return NETDEV_TX_OK; /* success */
-} /* net_send_packet */
-
-
-
-/***********************************************************************/
-/* acknowlegde a packet send. The network layer will be informed about */
-/* completion */
-/***********************************************************************/
-void
-hysdn_tx_netack(hysdn_card *card)
-{
- struct net_local *lp = card->netif;
-
- if (!lp)
- return; /* non existing device */
-
-
- if (!lp->sk_count)
- return; /* error condition */
-
- lp->dev->stats.tx_packets++;
- lp->dev->stats.tx_bytes += lp->skbs[lp->out_idx]->len;
-
- dev_kfree_skb(lp->skbs[lp->out_idx++]); /* free skb */
- if (lp->out_idx >= MAX_SKB_BUFFERS)
- lp->out_idx = 0; /* wrap around */
-
- if (lp->sk_count-- == MAX_SKB_BUFFERS) /* dec usage count */
- netif_start_queue((struct net_device *) lp);
-} /* hysdn_tx_netack */
-
-/*****************************************************/
-/* we got a packet from the network, go and queue it */
-/*****************************************************/
-void
-hysdn_rx_netpkt(hysdn_card *card, unsigned char *buf, unsigned short len)
-{
- struct net_local *lp = card->netif;
- struct net_device *dev;
- struct sk_buff *skb;
-
- if (!lp)
- return; /* non existing device */
-
- dev = lp->dev;
- dev->stats.rx_bytes += len;
-
- skb = dev_alloc_skb(len);
- if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- return;
- }
- /* copy the data */
- skb_put_data(skb, buf, len);
-
- /* determine the used protocol */
- skb->protocol = eth_type_trans(skb, dev);
-
- dev->stats.rx_packets++; /* adjust packet count */
-
- netif_rx(skb);
-} /* hysdn_rx_netpkt */
-
-/*****************************************************/
-/* return the pointer to a network packet to be send */
-/*****************************************************/
-struct sk_buff *
-hysdn_tx_netget(hysdn_card *card)
-{
- struct net_local *lp = card->netif;
-
- if (!lp)
- return (NULL); /* non existing device */
-
- if (!lp->sk_count)
- return (NULL); /* nothing available */
-
- return (lp->skbs[lp->out_idx]); /* next packet to send */
-} /* hysdn_tx_netget */
-
-static const struct net_device_ops hysdn_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = net_send_packet,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-/*****************************************************************************/
-/* hysdn_net_create creates a new net device for the given card. If a device */
-/* already exists, it will be deleted and created a new one. The return value */
-/* 0 announces success, else a negative error code will be returned. */
-/*****************************************************************************/
-int
-hysdn_net_create(hysdn_card *card)
-{
- struct net_device *dev;
- int i;
- struct net_local *lp;
-
- if (!card) {
- printk(KERN_WARNING "No card-pt in hysdn_net_create!\n");
- return (-ENOMEM);
- }
- hysdn_net_release(card); /* release an existing net device */
-
- dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev) {
- printk(KERN_WARNING "HYSDN: unable to allocate mem\n");
- return (-ENOMEM);
- }
-
- lp = netdev_priv(dev);
- lp->dev = dev;
-
- dev->netdev_ops = &hysdn_netdev_ops;
- spin_lock_init(&((struct net_local *) dev)->lock);
-
- /* initialise necessary or informing fields */
- dev->base_addr = card->iobase; /* IO address */
- dev->irq = card->irq; /* irq */
-
- dev->netdev_ops = &hysdn_netdev_ops;
- if ((i = register_netdev(dev))) {
- printk(KERN_WARNING "HYSDN: unable to create network device\n");
- free_netdev(dev);
- return (i);
- }
- dev->ml_priv = card; /* remember pointer to own data structure */
- card->netif = dev; /* setup the local pointer */
-
- if (card->debug_flags & LOG_NET_INIT)
- hysdn_addlog(card, "network device created");
- return 0; /* and return success */
-} /* hysdn_net_create */
-
-/***************************************************************************/
-/* hysdn_net_release deletes the net device for the given card. The return */
-/* value 0 announces success, else a negative error code will be returned. */
-/***************************************************************************/
-int
-hysdn_net_release(hysdn_card *card)
-{
- struct net_device *dev = card->netif;
-
- if (!dev)
- return (0); /* non existing */
-
- card->netif = NULL; /* clear out pointer */
- net_close(dev);
-
- flush_tx_buffers((struct net_local *) dev); /* empty buffers */
-
- unregister_netdev(dev); /* release the device */
- free_netdev(dev); /* release the memory allocated */
- if (card->debug_flags & LOG_NET_INIT)
- hysdn_addlog(card, "network device deleted");
-
- return (0); /* always successful */
-} /* hysdn_net_release */
-
-/*****************************************************************************/
-/* hysdn_net_getname returns a pointer to the name of the network interface. */
-/* if the interface is not existing, a "-" is returned. */
-/*****************************************************************************/
-char *
-hysdn_net_getname(hysdn_card *card)
-{
- struct net_device *dev = card->netif;
-
- if (!dev)
- return ("-"); /* non existing */
-
- return (dev->name);
-} /* hysdn_net_getname */
diff --git a/drivers/staging/isdn/hysdn/hysdn_pof.h b/drivers/staging/isdn/hysdn/hysdn_pof.h
deleted file mode 100644
index f63f5fa59d7e..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_pof.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* $Id: hysdn_pof.h,v 1.2.6.1 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, definitions used for handling pof-files.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/************************/
-/* POF specific defines */
-/************************/
-#define BOOT_BUF_SIZE 0x1000 /* =4096, maybe moved to other h file */
-#define CRYPT_FEEDTERM 0x8142
-#define CRYPT_STARTTERM 0x81a5
-/* max. timeout time in seconds
- * from end of booting to POF is ready
- */
-#define POF_READY_TIME_OUT_SEC 10
-
-/**********************************/
-/* defines for 1.stage boot image */
-/**********************************/
-
-/* the POF file record containing the boot loader image
- * has 2 pages a 16KB:
- * 1. page contains the high 16-bit part of the 32-bit E1 words
- * 2. page contains the low 16-bit part of the 32-bit E1 words
- *
- * In each 16KB page we assume the start of the boot loader code
- * in the highest 2KB part (at offset 0x3800);
- * the rest (0x0000..0x37FF) is assumed to contain 0 bytes.
- */
-
-#define POF_BOOT_LOADER_PAGE_SIZE 0x4000 /* =16384U */
-#define POF_BOOT_LOADER_TOTAL_SIZE (2U * POF_BOOT_LOADER_PAGE_SIZE)
-
-#define POF_BOOT_LOADER_CODE_SIZE 0x0800 /* =2KB =2048U */
-
-/* offset in boot page, where loader code may start */
-/* =0x3800= 14336U */
-#define POF_BOOT_LOADER_OFF_IN_PAGE (POF_BOOT_LOADER_PAGE_SIZE-POF_BOOT_LOADER_CODE_SIZE)
-
-
-/*--------------------------------------POF file record structs------------*/
-typedef struct PofFileHdr_tag { /* Pof file header */
- /*00 */ unsigned long Magic __attribute__((packed));
- /*04 */ unsigned long N_PofRecs __attribute__((packed));
-/*08 */
-} tPofFileHdr;
-
-typedef struct PofRecHdr_tag { /* Pof record header */
- /*00 */ unsigned short PofRecId __attribute__((packed));
- /*02 */ unsigned long PofRecDataLen __attribute__((packed));
-/*06 */
-} tPofRecHdr;
-
-typedef struct PofTimeStamp_tag {
- /*00 */ unsigned long UnixTime __attribute__((packed));
- /*04 */ unsigned char DateTimeText[0x28];
- /* =40 */
-/*2C */
-} tPofTimeStamp;
-
-/* tPofFileHdr.Magic value: */
-#define TAGFILEMAGIC 0x464F501AUL
-/* tPofRecHdr.PofRecId values: */
-#define TAG_ABSDATA 0x1000 /* abs. data */
-#define TAG_BOOTDTA 0x1001 /* boot data */
-#define TAG_COMMENT 0x0020
-#define TAG_SYSCALL 0x0021
-#define TAG_FLOWCTRL 0x0022
-#define TAG_TIMESTMP 0x0010 /* date/time stamp of version */
-#define TAG_CABSDATA 0x1100 /* crypted abs. data */
-#define TAG_CBOOTDTA 0x1101 /* crypted boot data */
diff --git a/drivers/staging/isdn/hysdn/hysdn_procconf.c b/drivers/staging/isdn/hysdn/hysdn_procconf.c
deleted file mode 100644
index 48afd9f5316e..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_procconf.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/* $Id: hysdn_procconf.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, /proc/net filesystem dir and conf functions.
- *
- * written by Werner Cornelius (werner@titro.de) for Hypercope GmbH
- *
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/cred.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <net/net_namespace.h>
-
-#include "hysdn_defs.h"
-
-static DEFINE_MUTEX(hysdn_conf_mutex);
-
-#define INFO_OUT_LEN 80 /* length of info line including lf */
-
-/********************************************************/
-/* defines and data structure for conf write operations */
-/********************************************************/
-#define CONF_STATE_DETECT 0 /* waiting for detect */
-#define CONF_STATE_CONF 1 /* writing config data */
-#define CONF_STATE_POF 2 /* writing pof data */
-#define CONF_LINE_LEN 255 /* 255 chars max */
-
-struct conf_writedata {
- hysdn_card *card; /* card the device is connected to */
- int buf_size; /* actual number of bytes in the buffer */
- int needed_size; /* needed size when reading pof */
- int state; /* actual interface states from above constants */
- unsigned char conf_line[CONF_LINE_LEN]; /* buffered conf line */
- unsigned short channel; /* active channel number */
- unsigned char *pof_buffer; /* buffer when writing pof */
-};
-
-/***********************************************************************/
-/* process_line parses one config line and transfers it to the card if */
-/* necessary. */
-/* if the return value is negative an error occurred. */
-/***********************************************************************/
-static int
-process_line(struct conf_writedata *cnf)
-{
- unsigned char *cp = cnf->conf_line;
- int i;
-
- if (cnf->card->debug_flags & LOG_CNF_LINE)
- hysdn_addlog(cnf->card, "conf line: %s", cp);
-
- if (*cp == '-') { /* option */
- cp++; /* point to option char */
-
- if (*cp++ != 'c')
- return (0); /* option unknown or used */
- i = 0; /* start value for channel */
- while ((*cp <= '9') && (*cp >= '0'))
- i = i * 10 + *cp++ - '0'; /* get decimal number */
- if (i > 65535) {
- if (cnf->card->debug_flags & LOG_CNF_MISC)
- hysdn_addlog(cnf->card, "conf channel invalid %d", i);
- return (-ERR_INV_CHAN); /* invalid channel */
- }
- cnf->channel = i & 0xFFFF; /* set new channel number */
- return (0); /* success */
- } /* option */
- if (*cp == '*') { /* line to send */
- if (cnf->card->debug_flags & LOG_CNF_DATA)
- hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp);
- return (hysdn_tx_cfgline(cnf->card, cnf->conf_line + 1,
- cnf->channel)); /* send the line without * */
- } /* line to send */
- return (0);
-} /* process_line */
-
-/***********************************/
-/* conf file operations and tables */
-/***********************************/
-
-/****************************************************/
-/* write conf file -> boot or send cfg line to card */
-/****************************************************/
-static ssize_t
-hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
- struct conf_writedata *cnf;
- int i;
- unsigned char ch, *cp;
-
- if (!count)
- return (0); /* nothing to handle */
-
- if (!(cnf = file->private_data))
- return (-EFAULT); /* should never happen */
-
- if (cnf->state == CONF_STATE_DETECT) { /* auto detect cnf or pof data */
- if (copy_from_user(&ch, buf, 1)) /* get first char for detect */
- return (-EFAULT);
-
- if (ch == 0x1A) {
- /* we detected a pof file */
- if ((cnf->needed_size = pof_write_open(cnf->card, &cnf->pof_buffer)) <= 0)
- return (cnf->needed_size); /* an error occurred -> exit */
- cnf->buf_size = 0; /* buffer is empty */
- cnf->state = CONF_STATE_POF; /* new state */
- } else {
- /* conf data has been detected */
- cnf->buf_size = 0; /* buffer is empty */
- cnf->state = CONF_STATE_CONF; /* requested conf data write */
- if (cnf->card->state != CARD_STATE_RUN)
- return (-ERR_NOT_BOOTED);
- cnf->conf_line[CONF_LINE_LEN - 1] = 0; /* limit string length */
- cnf->channel = 4098; /* default channel for output */
- }
- } /* state was auto detect */
- if (cnf->state == CONF_STATE_POF) { /* pof write active */
- i = cnf->needed_size - cnf->buf_size; /* bytes still missing for write */
- if (i <= 0)
- return (-EINVAL); /* size error handling pof */
-
- if (i < count)
- count = i; /* limit requested number of bytes */
- if (copy_from_user(cnf->pof_buffer + cnf->buf_size, buf, count))
- return (-EFAULT); /* error while copying */
- cnf->buf_size += count;
-
- if (cnf->needed_size == cnf->buf_size) {
- cnf->needed_size = pof_write_buffer(cnf->card, cnf->buf_size); /* write data */
- if (cnf->needed_size <= 0) {
- cnf->card->state = CARD_STATE_BOOTERR; /* show boot error */
- return (cnf->needed_size); /* an error occurred */
- }
- cnf->buf_size = 0; /* buffer is empty again */
- }
- }
- /* pof write active */
- else { /* conf write active */
-
- if (cnf->card->state != CARD_STATE_RUN) {
- if (cnf->card->debug_flags & LOG_CNF_MISC)
- hysdn_addlog(cnf->card, "cnf write denied -> not booted");
- return (-ERR_NOT_BOOTED);
- }
- i = (CONF_LINE_LEN - 1) - cnf->buf_size; /* bytes available in buffer */
- if (i > 0) {
- /* copy remaining bytes into buffer */
-
- if (count > i)
- count = i; /* limit transfer */
- if (copy_from_user(cnf->conf_line + cnf->buf_size, buf, count))
- return (-EFAULT); /* error while copying */
-
- i = count; /* number of chars in buffer */
- cp = cnf->conf_line + cnf->buf_size;
- while (i) {
- /* search for end of line */
- if ((*cp < ' ') && (*cp != 9))
- break; /* end of line found */
- cp++;
- i--;
- } /* search for end of line */
-
- if (i) {
- /* delimiter found */
- *cp++ = 0; /* string termination */
- count -= (i - 1); /* subtract remaining bytes from count */
- while ((i) && (*cp < ' ') && (*cp != 9)) {
- i--; /* discard next char */
- count++; /* mark as read */
- cp++; /* next char */
- }
- cnf->buf_size = 0; /* buffer is empty after transfer */
- if ((i = process_line(cnf)) < 0) /* handle the line */
- count = i; /* return the error */
- }
- /* delimiter found */
- else {
- cnf->buf_size += count; /* add chars to string */
- if (cnf->buf_size >= CONF_LINE_LEN - 1) {
- if (cnf->card->debug_flags & LOG_CNF_MISC)
- hysdn_addlog(cnf->card, "cnf line too long %d chars pos %d", cnf->buf_size, count);
- return (-ERR_CONF_LONG);
- }
- } /* not delimited */
-
- }
- /* copy remaining bytes into buffer */
- else {
- if (cnf->card->debug_flags & LOG_CNF_MISC)
- hysdn_addlog(cnf->card, "cnf line too long");
- return (-ERR_CONF_LONG);
- }
- } /* conf write active */
-
- return (count);
-} /* hysdn_conf_write */
-
-/*******************************************/
-/* read conf file -> output card info data */
-/*******************************************/
-static ssize_t
-hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
- char *cp;
-
- if (!(file->f_mode & FMODE_READ))
- return -EPERM; /* no permission to read */
-
- if (!(cp = file->private_data))
- return -EFAULT; /* should never happen */
-
- return simple_read_from_buffer(buf, count, off, cp, strlen(cp));
-} /* hysdn_conf_read */
-
-/******************/
-/* open conf file */
-/******************/
-static int
-hysdn_conf_open(struct inode *ino, struct file *filep)
-{
- hysdn_card *card;
- struct conf_writedata *cnf;
- char *cp, *tmp;
-
- /* now search the addressed card */
- mutex_lock(&hysdn_conf_mutex);
- card = PDE_DATA(ino);
- if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
- hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x",
- filep->f_cred->fsuid, filep->f_cred->fsgid,
- filep->f_mode);
-
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write boot file or conf line */
-
- if (!(cnf = kmalloc(sizeof(struct conf_writedata), GFP_KERNEL))) {
- mutex_unlock(&hysdn_conf_mutex);
- return (-EFAULT);
- }
- cnf->card = card;
- cnf->buf_size = 0; /* nothing buffered */
- cnf->state = CONF_STATE_DETECT; /* start auto detect */
- filep->private_data = cnf;
-
- } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
- /* read access -> output card info data */
-
- if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) {
- mutex_unlock(&hysdn_conf_mutex);
- return (-EFAULT); /* out of memory */
- }
- filep->private_data = tmp; /* start of string */
-
- /* first output a headline */
- sprintf(tmp, "id bus slot type irq iobase dp-mem b-chans fax-chans state device");
- cp = tmp; /* start of string */
- while (*cp)
- cp++;
- while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
- *cp++ = ' ';
- *cp++ = '\n';
-
- /* and now the data */
- sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d %s",
- card->myid,
- card->bus,
- PCI_SLOT(card->devfn),
- card->brdtype,
- card->irq,
- card->iobase,
- card->membase,
- card->bchans,
- card->faxchans,
- card->state,
- hysdn_net_getname(card));
- while (*cp)
- cp++;
- while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
- *cp++ = ' ';
- *cp++ = '\n';
- *cp = 0; /* end of string */
- } else { /* simultaneous read/write access forbidden ! */
- mutex_unlock(&hysdn_conf_mutex);
- return (-EPERM); /* no permission this time */
- }
- mutex_unlock(&hysdn_conf_mutex);
- return nonseekable_open(ino, filep);
-} /* hysdn_conf_open */
-
-/***************************/
-/* close a config file. */
-/***************************/
-static int
-hysdn_conf_close(struct inode *ino, struct file *filep)
-{
- hysdn_card *card;
- struct conf_writedata *cnf;
- int retval = 0;
-
- mutex_lock(&hysdn_conf_mutex);
- card = PDE_DATA(ino);
- if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
- hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x",
- filep->f_cred->fsuid, filep->f_cred->fsgid,
- filep->f_mode);
-
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write boot file or conf line */
- if (filep->private_data) {
- cnf = filep->private_data;
-
- if (cnf->state == CONF_STATE_POF)
- retval = pof_write_close(cnf->card); /* close the pof write */
- kfree(filep->private_data); /* free allocated memory for buffer */
-
- } /* handle write private data */
- } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
- /* read access -> output card info data */
-
- kfree(filep->private_data); /* release memory */
- }
- mutex_unlock(&hysdn_conf_mutex);
- return (retval);
-} /* hysdn_conf_close */
-
-/******************************************************/
-/* table for conf filesystem functions defined above. */
-/******************************************************/
-static const struct file_operations conf_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hysdn_conf_read,
- .write = hysdn_conf_write,
- .open = hysdn_conf_open,
- .release = hysdn_conf_close,
-};
-
-/*****************************/
-/* hysdn subdir in /proc/net */
-/*****************************/
-struct proc_dir_entry *hysdn_proc_entry = NULL;
-
-/*******************************************************************************/
-/* hysdn_procconf_init is called when the module is loaded and after the cards */
-/* have been detected. The needed proc dir and card config files are created. */
-/* The log init is called at last. */
-/*******************************************************************************/
-int
-hysdn_procconf_init(void)
-{
- hysdn_card *card;
- unsigned char conf_name[20];
-
- hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, init_net.proc_net);
- if (!hysdn_proc_entry) {
- printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
- return (-1);
- }
- card = card_root; /* point to first card */
- while (card) {
-
- sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
- if ((card->procconf = (void *) proc_create_data(conf_name,
- S_IFREG | S_IRUGO | S_IWUSR,
- hysdn_proc_entry,
- &conf_fops,
- card)) != NULL) {
- hysdn_proclog_init(card); /* init the log file entry */
- }
- card = card->next; /* next entry */
- }
-
- printk(KERN_NOTICE "HYSDN: procfs initialised\n");
- return 0;
-} /* hysdn_procconf_init */
-
-/*************************************************************************************/
-/* hysdn_procconf_release is called when the module is unloaded and before the cards */
-/* resources are released. The module counter is assumed to be 0 ! */
-/*************************************************************************************/
-void
-hysdn_procconf_release(void)
-{
- hysdn_card *card;
- unsigned char conf_name[20];
-
- card = card_root; /* start with first card */
- while (card) {
-
- sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
- if (card->procconf)
- remove_proc_entry(conf_name, hysdn_proc_entry);
-
- hysdn_proclog_release(card); /* init the log file entry */
-
- card = card->next; /* point to next card */
- }
-
- remove_proc_entry(PROC_SUBDIR_NAME, init_net.proc_net);
-}
diff --git a/drivers/staging/isdn/hysdn/hysdn_proclog.c b/drivers/staging/isdn/hysdn/hysdn_proclog.c
deleted file mode 100644
index 6e898b90e86e..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_proclog.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/* $Id: hysdn_proclog.c,v 1.9.6.3 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, /proc/net filesystem log functions.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-
-#include "hysdn_defs.h"
-
-/* the proc subdir for the interface is defined in the procconf module */
-extern struct proc_dir_entry *hysdn_proc_entry;
-
-static DEFINE_MUTEX(hysdn_log_mutex);
-static void put_log_buffer(hysdn_card *card, char *cp);
-
-/*************************************************/
-/* structure keeping ascii log for device output */
-/*************************************************/
-struct log_data {
- struct log_data *next;
- unsigned long usage_cnt;/* number of files still to work */
- void *proc_ctrl; /* pointer to own control procdata structure */
- char log_start[2]; /* log string start (final len aligned by size) */
-};
-
-/**********************************************/
-/* structure holding proc entrys for one card */
-/**********************************************/
-struct procdata {
- struct proc_dir_entry *log; /* log entry */
- char log_name[15]; /* log filename */
- struct log_data *log_head, *log_tail; /* head and tail for queue */
- int if_used; /* open count for interface */
- unsigned char logtmp[LOG_MAX_LINELEN];
- wait_queue_head_t rd_queue;
-};
-
-
-/**********************************************/
-/* log function for cards error log interface */
-/**********************************************/
-void
-hysdn_card_errlog(hysdn_card *card, tErrLogEntry *logp, int maxsize)
-{
- char buf[ERRLOG_TEXT_SIZE + 40];
-
- sprintf(buf, "LOG 0x%08lX 0x%08lX : %s\n", logp->ulErrType, logp->ulErrSubtype, logp->ucText);
- put_log_buffer(card, buf); /* output the string */
-} /* hysdn_card_errlog */
-
-/***************************************************/
-/* Log function using format specifiers for output */
-/***************************************************/
-void
-hysdn_addlog(hysdn_card *card, char *fmt, ...)
-{
- struct procdata *pd = card->proclog;
- char *cp;
- va_list args;
-
- if (!pd)
- return; /* log structure non existent */
-
- cp = pd->logtmp;
- cp += sprintf(cp, "HYSDN: card %d ", card->myid);
-
- va_start(args, fmt);
- cp += vsprintf(cp, fmt, args);
- va_end(args);
- *cp++ = '\n';
- *cp = 0;
-
- if (card->debug_flags & DEB_OUT_SYSLOG)
- printk(KERN_INFO "%s", pd->logtmp);
- else
- put_log_buffer(card, pd->logtmp);
-
-} /* hysdn_addlog */
-
-/********************************************/
-/* put an log buffer into the log queue. */
-/* This buffer will be kept until all files */
-/* opened for read got the contents. */
-/* Flushes buffers not longer in use. */
-/********************************************/
-static void
-put_log_buffer(hysdn_card *card, char *cp)
-{
- struct log_data *ib;
- struct procdata *pd = card->proclog;
- unsigned long flags;
-
- if (!pd)
- return;
- if (!cp)
- return;
- if (!*cp)
- return;
- if (pd->if_used <= 0)
- return; /* no open file for read */
-
- if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC)))
- return; /* no memory */
- strcpy(ib->log_start, cp); /* set output string */
- ib->next = NULL;
- ib->proc_ctrl = pd; /* point to own control structure */
- spin_lock_irqsave(&card->hysdn_lock, flags);
- ib->usage_cnt = pd->if_used;
- if (!pd->log_head)
- pd->log_head = ib; /* new head */
- else
- pd->log_tail->next = ib; /* follows existing messages */
- pd->log_tail = ib; /* new tail */
-
- /* delete old entrys */
- while (pd->log_head->next) {
- if ((pd->log_head->usage_cnt <= 0) &&
- (pd->log_head->next->usage_cnt <= 0)) {
- ib = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(ib);
- } else {
- break;
- }
- } /* pd->log_head->next */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
-
- wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
-} /* put_log_buffer */
-
-
-/******************************/
-/* file operations and tables */
-/******************************/
-
-/****************************************/
-/* write log file -> set log level bits */
-/****************************************/
-static ssize_t
-hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
- int rc;
- hysdn_card *card = file->private_data;
-
- rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags);
- if (rc < 0)
- return rc;
- hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
- return (count);
-} /* hysdn_log_write */
-
-/******************/
-/* read log file */
-/******************/
-static ssize_t
-hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
- struct log_data *inf;
- int len;
- hysdn_card *card = PDE_DATA(file_inode(file));
-
- if (!(inf = *((struct log_data **) file->private_data))) {
- struct procdata *pd = card->proclog;
- if (file->f_flags & O_NONBLOCK)
- return (-EAGAIN);
-
- wait_event_interruptible(pd->rd_queue, (inf =
- *((struct log_data **) file->private_data)));
- }
- if (!inf)
- return (0);
-
- inf->usage_cnt--; /* new usage count */
- file->private_data = &inf->next; /* next structure */
- if ((len = strlen(inf->log_start)) <= count) {
- if (copy_to_user(buf, inf->log_start, len))
- return -EFAULT;
- *off += len;
- return (len);
- }
- return (0);
-} /* hysdn_log_read */
-
-/******************/
-/* open log file */
-/******************/
-static int
-hysdn_log_open(struct inode *ino, struct file *filep)
-{
- hysdn_card *card = PDE_DATA(ino);
-
- mutex_lock(&hysdn_log_mutex);
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write log level only */
- filep->private_data = card; /* remember our own card */
- } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
- struct procdata *pd = card->proclog;
- unsigned long flags;
-
- /* read access -> log/debug read */
- spin_lock_irqsave(&card->hysdn_lock, flags);
- pd->if_used++;
- if (pd->log_head)
- filep->private_data = &pd->log_tail->next;
- else
- filep->private_data = &pd->log_head;
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- } else { /* simultaneous read/write access forbidden ! */
- mutex_unlock(&hysdn_log_mutex);
- return (-EPERM); /* no permission this time */
- }
- mutex_unlock(&hysdn_log_mutex);
- return nonseekable_open(ino, filep);
-} /* hysdn_log_open */
-
-/*******************************************************************************/
-/* close a cardlog file. If the file has been opened for exclusive write it is */
-/* assumed as pof data input and the pof loader is noticed about. */
-/* Otherwise file is handled as log output. In this case the interface usage */
-/* count is decremented and all buffers are noticed of closing. If this file */
-/* was the last one to be closed, all buffers are freed. */
-/*******************************************************************************/
-static int
-hysdn_log_close(struct inode *ino, struct file *filep)
-{
- struct log_data *inf;
- struct procdata *pd;
- hysdn_card *card;
- int retval = 0;
-
- mutex_lock(&hysdn_log_mutex);
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write debug level written */
- retval = 0; /* success */
- } else {
- /* read access -> log/debug read, mark one further file as closed */
-
- inf = *((struct log_data **) filep->private_data); /* get first log entry */
- if (inf)
- pd = (struct procdata *) inf->proc_ctrl; /* still entries there */
- else {
- /* no info available -> search card */
- card = PDE_DATA(file_inode(filep));
- pd = card->proclog; /* pointer to procfs log */
- }
- if (pd)
- pd->if_used--; /* decrement interface usage count by one */
-
- while (inf) {
- inf->usage_cnt--; /* decrement usage count for buffers */
- inf = inf->next;
- }
-
- if (pd)
- if (pd->if_used <= 0) /* delete buffers if last file closed */
- while (pd->log_head) {
- inf = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(inf);
- }
- } /* read access */
- mutex_unlock(&hysdn_log_mutex);
-
- return (retval);
-} /* hysdn_log_close */
-
-/*************************************************/
-/* select/poll routine to be able using select() */
-/*************************************************/
-static __poll_t
-hysdn_log_poll(struct file *file, poll_table *wait)
-{
- __poll_t mask = 0;
- hysdn_card *card = PDE_DATA(file_inode(file));
- struct procdata *pd = card->proclog;
-
- if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE)
- return (mask); /* no polling for write supported */
-
- poll_wait(file, &(pd->rd_queue), wait);
-
- if (*((struct log_data **) file->private_data))
- mask |= EPOLLIN | EPOLLRDNORM;
-
- return mask;
-} /* hysdn_log_poll */
-
-/**************************************************/
-/* table for log filesystem functions defined above. */
-/**************************************************/
-static const struct file_operations log_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hysdn_log_read,
- .write = hysdn_log_write,
- .poll = hysdn_log_poll,
- .open = hysdn_log_open,
- .release = hysdn_log_close,
-};
-
-
-/***********************************************************************************/
-/* hysdn_proclog_init is called when the module is loaded after creating the cards */
-/* conf files. */
-/***********************************************************************************/
-int
-hysdn_proclog_init(hysdn_card *card)
-{
- struct procdata *pd;
-
- /* create a cardlog proc entry */
-
- if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
- sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
- pd->log = proc_create_data(pd->log_name,
- S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
- &log_fops, card);
-
- init_waitqueue_head(&(pd->rd_queue));
-
- card->proclog = (void *) pd; /* remember procfs structure */
- }
- return (0);
-} /* hysdn_proclog_init */
-
-/************************************************************************************/
-/* hysdn_proclog_release is called when the module is unloaded and before the cards */
-/* conf file is released */
-/* The module counter is assumed to be 0 ! */
-/************************************************************************************/
-void
-hysdn_proclog_release(hysdn_card *card)
-{
- struct procdata *pd;
-
- if ((pd = (struct procdata *) card->proclog) != NULL) {
- if (pd->log)
- remove_proc_entry(pd->log_name, hysdn_proc_entry);
- kfree(pd); /* release memory */
- card->proclog = NULL;
- }
-} /* hysdn_proclog_release */
diff --git a/drivers/staging/isdn/hysdn/hysdn_sched.c b/drivers/staging/isdn/hysdn/hysdn_sched.c
deleted file mode 100644
index 31d7c1415543..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_sched.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/* $Id: hysdn_sched.c,v 1.5.6.4 2001/11/06 21:58:19 kai Exp $
- *
- * Linux driver for HYSDN cards
- * scheduler routines for handling exchange card <-> pc.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "hysdn_defs.h"
-
-/*****************************************************************************/
-/* hysdn_sched_rx is called from the cards handler to announce new data is */
-/* available from the card. The routine has to handle the data and return */
-/* with a nonzero code if the data could be worked (or even thrown away), if */
-/* no room to buffer the data is available a zero return tells the card */
-/* to keep the data until later. */
-/*****************************************************************************/
-int
-hysdn_sched_rx(hysdn_card *card, unsigned char *buf, unsigned short len,
- unsigned short chan)
-{
-
- switch (chan) {
- case CHAN_NDIS_DATA:
- if (hynet_enable & (1 << card->myid)) {
- /* give packet to network handler */
- hysdn_rx_netpkt(card, buf, len);
- }
- break;
-
- case CHAN_ERRLOG:
- hysdn_card_errlog(card, (tErrLogEntry *) buf, len);
- if (card->err_log_state == ERRLOG_STATE_ON)
- card->err_log_state = ERRLOG_STATE_START; /* start new fetch */
- break;
-#ifdef CONFIG_HYSDN_CAPI
- case CHAN_CAPI:
-/* give packet to CAPI handler */
- if (hycapi_enable & (1 << card->myid)) {
- hycapi_rx_capipkt(card, buf, len);
- }
- break;
-#endif /* CONFIG_HYSDN_CAPI */
- default:
- printk(KERN_INFO "irq message channel %d len %d unhandled \n", chan, len);
- break;
-
- } /* switch rx channel */
-
- return (1); /* always handled */
-} /* hysdn_sched_rx */
-
-/*****************************************************************************/
-/* hysdn_sched_tx is called from the cards handler to announce that there is */
-/* room in the tx-buffer to the card and data may be sent if needed. */
-/* If the routine wants to send data it must fill buf, len and chan with the */
-/* appropriate data and return a nonzero value. With a zero return no new */
-/* data to send is assumed. maxlen specifies the buffer size available for */
-/* sending. */
-/*****************************************************************************/
-int
-hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
- unsigned short volatile *len, unsigned short volatile *chan,
- unsigned short maxlen)
-{
- struct sk_buff *skb;
-
- if (card->net_tx_busy) {
- card->net_tx_busy = 0; /* reset flag */
- hysdn_tx_netack(card); /* acknowledge packet send */
- } /* a network packet has completely been transferred */
- /* first of all async requests are handled */
- if (card->async_busy) {
- if (card->async_len <= maxlen) {
- memcpy(buf, card->async_data, card->async_len);
- *len = card->async_len;
- *chan = card->async_channel;
- card->async_busy = 0; /* reset request */
- return (1);
- }
- card->async_busy = 0; /* in case of length error */
- } /* async request */
- if ((card->err_log_state == ERRLOG_STATE_START) &&
- (maxlen >= ERRLOG_CMD_REQ_SIZE)) {
- strcpy(buf, ERRLOG_CMD_REQ); /* copy the command */
- *len = ERRLOG_CMD_REQ_SIZE; /* buffer length */
- *chan = CHAN_ERRLOG; /* and channel */
- card->err_log_state = ERRLOG_STATE_ON; /* new state is on */
- return (1); /* tell that data should be send */
- } /* error log start and able to send */
- if ((card->err_log_state == ERRLOG_STATE_STOP) &&
- (maxlen >= ERRLOG_CMD_STOP_SIZE)) {
- strcpy(buf, ERRLOG_CMD_STOP); /* copy the command */
- *len = ERRLOG_CMD_STOP_SIZE; /* buffer length */
- *chan = CHAN_ERRLOG; /* and channel */
- card->err_log_state = ERRLOG_STATE_OFF; /* new state is off */
- return (1); /* tell that data should be send */
- } /* error log start and able to send */
- /* now handle network interface packets */
- if ((hynet_enable & (1 << card->myid)) &&
- (skb = hysdn_tx_netget(card)) != NULL)
- {
- if (skb->len <= maxlen) {
- /* copy the packet to the buffer */
- skb_copy_from_linear_data(skb, buf, skb->len);
- *len = skb->len;
- *chan = CHAN_NDIS_DATA;
- card->net_tx_busy = 1; /* we are busy sending network data */
- return (1); /* go and send the data */
- } else
- hysdn_tx_netack(card); /* aknowledge packet -> throw away */
- } /* send a network packet if available */
-#ifdef CONFIG_HYSDN_CAPI
- if (((hycapi_enable & (1 << card->myid))) &&
- ((skb = hycapi_tx_capiget(card)) != NULL))
- {
- if (skb->len <= maxlen) {
- skb_copy_from_linear_data(skb, buf, skb->len);
- *len = skb->len;
- *chan = CHAN_CAPI;
- hycapi_tx_capiack(card);
- return (1); /* go and send the data */
- }
- }
-#endif /* CONFIG_HYSDN_CAPI */
- return (0); /* nothing to send */
-} /* hysdn_sched_tx */
-
-
-/*****************************************************************************/
-/* send one config line to the card and return 0 if successful, otherwise a */
-/* negative error code. */
-/* The function works with timeouts perhaps not giving the greatest speed */
-/* sending the line, but this should be meaningless because only some lines */
-/* are to be sent and this happens very seldom. */
-/*****************************************************************************/
-int
-hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
-{
- int cnt = 50; /* timeout intervalls */
- unsigned long flags;
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1);
-
- while (card->async_busy) {
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg delayed");
-
- msleep_interruptible(20); /* Timeout 20ms */
- if (!--cnt)
- return (-ERR_ASYNC_TIME); /* timed out */
- } /* wait for buffer to become free */
-
- spin_lock_irqsave(&card->hysdn_lock, flags);
- strcpy(card->async_data, line);
- card->async_len = strlen(line) + 1;
- card->async_channel = chan;
- card->async_busy = 1; /* request transfer */
-
- /* now queue the task */
- schedule_work(&card->irq_queue);
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg data queued");
-
- cnt++; /* short delay */
-
- while (card->async_busy) {
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg waiting for tx-ready");
-
- msleep_interruptible(20); /* Timeout 20ms */
- if (!--cnt)
- return (-ERR_ASYNC_TIME); /* timed out */
- } /* wait for buffer to become free again */
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg data send");
-
- return (0); /* line send correctly */
-} /* hysdn_tx_cfgline */
diff --git a/drivers/staging/isdn/hysdn/ince1pc.h b/drivers/staging/isdn/hysdn/ince1pc.h
deleted file mode 100644
index cab68361de65..000000000000
--- a/drivers/staging/isdn/hysdn/ince1pc.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Linux driver for HYSDN cards
- * common definitions for both sides of the bus:
- * - conventions both spoolers must know
- * - channel numbers agreed upon
- *
- * Author M. Steinkopf
- * Copyright 1999 by M. Steinkopf
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef __INCE1PC_H__
-#define __INCE1PC_H__
-
-/* basic scalar definitions have same meanning,
- * but their declaration location depends on environment
- */
-
-/*--------------------------------------channel numbers---------------------*/
-#define CHAN_SYSTEM 0x0001 /* system channel (spooler to spooler) */
-#define CHAN_ERRLOG 0x0005 /* error logger */
-#define CHAN_CAPI 0x0064 /* CAPI interface */
-#define CHAN_NDIS_DATA 0x1001 /* NDIS data transfer */
-
-/*--------------------------------------POF ready msg-----------------------*/
-/* NOTE: after booting POF sends system ready message to PC: */
-#define RDY_MAGIC 0x52535953UL /* 'SYSR' reversed */
-#define RDY_MAGIC_SIZE 4 /* size in bytes */
-
-#define MAX_N_TOK_BYTES 255
-
-#define MIN_RDY_MSG_SIZE RDY_MAGIC_SIZE
-#define MAX_RDY_MSG_SIZE (RDY_MAGIC_SIZE + MAX_N_TOK_BYTES)
-
-#define SYSR_TOK_END 0
-#define SYSR_TOK_B_CHAN 1 /* nr. of B-Channels; DataLen=1; def: 2 */
-#define SYSR_TOK_FAX_CHAN 2 /* nr. of FAX Channels; DataLen=1; def: 0 */
-#define SYSR_TOK_MAC_ADDR 3 /* MAC-Address; DataLen=6; def: auto */
-#define SYSR_TOK_ESC 255 /* undefined data size yet */
-/* default values, if not corrected by token: */
-#define SYSR_TOK_B_CHAN_DEF 2 /* assume 2 B-Channels */
-#define SYSR_TOK_FAX_CHAN_DEF 1 /* assume 1 FAX Channel */
-
-/* syntax of new SYSR token stream:
- * channel: CHAN_SYSTEM
- * msgsize: MIN_RDY_MSG_SIZE <= x <= MAX_RDY_MSG_SIZE
- * RDY_MAGIC_SIZE <= x <= (RDY_MAGIC_SIZE+MAX_N_TOK_BYTES)
- * msg : 0 1 2 3 {4 5 6 ..}
- * S Y S R MAX_N_TOK_BYTES bytes of TokenStream
- *
- * TokenStream := empty
- * | {NonEndTokenChunk} EndToken RotlCRC
- * NonEndTokenChunk:= NonEndTokenId DataLen [Data]
- * NonEndTokenId := 0x01 .. 0xFE 1 BYTE
- * DataLen := 0x00 .. 0xFF 1 BYTE
- * Data := DataLen bytes
- * EndToken := 0x00
- * RotlCRC := special 1 byte CRC over all NonEndTokenChunk bytes
- * s. RotlCRC algorithm
- *
- * RotlCRC algorithm:
- * ucSum= 0 1 unsigned char
- * for all NonEndTokenChunk bytes:
- * ROTL(ucSum,1) rotate left by 1
- * ucSum += Char; add current byte with swap around
- * RotlCRC= ~ucSum; invert all bits for result
- *
- * note:
- * - for 16-bit FIFO add padding 0 byte to achieve even token data bytes!
- */
-
-/*--------------------------------------error logger------------------------*/
-/* note: pof needs final 0 ! */
-#define ERRLOG_CMD_REQ "ERRLOG ON"
-#define ERRLOG_CMD_REQ_SIZE 10 /* with final 0 byte ! */
-#define ERRLOG_CMD_STOP "ERRLOG OFF"
-#define ERRLOG_CMD_STOP_SIZE 11 /* with final 0 byte ! */
-
-#define ERRLOG_ENTRY_SIZE 64 /* sizeof(tErrLogEntry) */
- /* remaining text size = 55 */
-#define ERRLOG_TEXT_SIZE (ERRLOG_ENTRY_SIZE - 2 * 4 - 1)
-
-typedef struct ErrLogEntry_tag {
-
- /*00 */ unsigned long ulErrType;
-
- /*04 */ unsigned long ulErrSubtype;
-
- /*08 */ unsigned char ucTextSize;
-
- /*09 */ unsigned char ucText[ERRLOG_TEXT_SIZE];
- /* ASCIIZ of len ucTextSize-1 */
-
-/*40 */
-} tErrLogEntry;
-
-
-#if defined(__TURBOC__)
-#if sizeof(tErrLogEntry) != ERRLOG_ENTRY_SIZE
-#error size of tErrLogEntry != ERRLOG_ENTRY_SIZE
-#endif /* */
-#endif /* */
-
-/*--------------------------------------DPRAM boot spooler------------------*/
-/* this is the struture used between pc and
- * hyperstone to exchange boot data
- */
-#define DPRAM_SPOOLER_DATA_SIZE 0x20
-typedef struct DpramBootSpooler_tag {
-
- /*00 */ unsigned char Len;
-
- /*01 */ volatile unsigned char RdPtr;
-
- /*02 */ unsigned char WrPtr;
-
- /*03 */ unsigned char Data[DPRAM_SPOOLER_DATA_SIZE];
-
-/*23 */
-} tDpramBootSpooler;
-
-
-#define DPRAM_SPOOLER_MIN_SIZE 5 /* Len+RdPtr+Wrptr+2*data */
-#define DPRAM_SPOOLER_DEF_SIZE 0x23 /* current default size */
-
-/*--------------------------------------HYCARD/ERGO DPRAM SoftUart----------*/
-/* at DPRAM offset 0x1C00: */
-#define SIZE_RSV_SOFT_UART 0x1B0 /* 432 bytes reserved for SoftUart */
-
-
-#endif /* __INCE1PC_H__ */
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
index 0a23727d0dc3..93cf28febdf6 100644
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -338,7 +338,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR);
reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR);
- pcard->regs_bar_base = ioremap_nocache(reg_bar_phys_addr, PAGE_SIZE);
+ pcard->regs_bar_base = ioremap(reg_bar_phys_addr, PAGE_SIZE);
if (!pcard->regs_bar_base) {
dev_err(&pcard->pdev->dev,
"probe: REG_BAR could not remap memory to virtual space\n");
@@ -367,7 +367,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR);
dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR);
- pcard->dma_bar_base = ioremap_nocache(dma_bar_phys_addr,
+ pcard->dma_bar_base = ioremap(dma_bar_phys_addr,
dma_bar_phys_len);
if (!pcard->dma_bar_base) {
dev_err(&pcard->pdev->dev,
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index 5460bf973c9c..25bb5c97dd21 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -32,7 +32,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matt.Sickler@Daktronics.com");
-struct i2c_device {
+struct kpc_i2c {
unsigned long smba;
struct i2c_adapter adapter;
unsigned int features;
@@ -131,7 +131,7 @@ struct i2c_device {
/* Make sure the SMBus host is ready to start transmitting.
* Return 0 if it is, -EBUSY if it is not.
*/
-static int i801_check_pre(struct i2c_device *priv)
+static int i801_check_pre(struct kpc_i2c *priv)
{
int status;
@@ -156,7 +156,7 @@ static int i801_check_pre(struct i2c_device *priv)
}
/* Convert the status register to an error code, and clear it. */
-static int i801_check_post(struct i2c_device *priv, int status, int timeout)
+static int i801_check_post(struct kpc_i2c *priv, int status, int timeout)
{
int result = 0;
@@ -206,7 +206,7 @@ static int i801_check_post(struct i2c_device *priv, int status, int timeout)
return result;
}
-static int i801_transaction(struct i2c_device *priv, int xact)
+static int i801_transaction(struct kpc_i2c *priv, int xact)
{
int status;
int result;
@@ -235,7 +235,7 @@ static int i801_transaction(struct i2c_device *priv, int xact)
}
/* wait for INTR bit as advised by Intel */
-static void i801_wait_hwpec(struct i2c_device *priv)
+static void i801_wait_hwpec(struct kpc_i2c *priv)
{
int timeout = 0;
int status;
@@ -251,7 +251,7 @@ static void i801_wait_hwpec(struct i2c_device *priv)
outb_p(status, SMBHSTSTS(priv));
}
-static int i801_block_transaction_by_block(struct i2c_device *priv,
+static int i801_block_transaction_by_block(struct kpc_i2c *priv,
union i2c_smbus_data *data,
char read_write, int hwpec)
{
@@ -285,7 +285,7 @@ static int i801_block_transaction_by_block(struct i2c_device *priv,
return 0;
}
-static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
+static int i801_block_transaction_byte_by_byte(struct kpc_i2c *priv,
union i2c_smbus_data *data,
char read_write, int command,
int hwpec)
@@ -367,7 +367,7 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
return 0;
}
-static int i801_set_block_buffer_mode(struct i2c_device *priv)
+static int i801_set_block_buffer_mode(struct kpc_i2c *priv)
{
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
@@ -376,7 +376,7 @@ static int i801_set_block_buffer_mode(struct i2c_device *priv)
}
/* Block transaction function */
-static int i801_block_transaction(struct i2c_device *priv,
+static int i801_block_transaction(struct kpc_i2c *priv,
union i2c_smbus_data *data, char read_write,
int command, int hwpec)
{
@@ -440,7 +440,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
int hwpec;
int block = 0;
int ret, xact = 0;
- struct i2c_device *priv = i2c_get_adapdata(adap);
+ struct kpc_i2c *priv = i2c_get_adapdata(adap);
hwpec = (priv->features & FEATURE_SMBUS_PEC) &&
(flags & I2C_CLIENT_PEC) &&
@@ -572,9 +572,13 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
return 0;
}
+#define enable_flag(x) (x)
+#define disable_flag(x) 0
+#define enable_flag_if(x, cond) ((cond) ? (x) : 0)
+
static u32 i801_func(struct i2c_adapter *adapter)
{
- struct i2c_device *priv = i2c_get_adapdata(adapter);
+ struct kpc_i2c *priv = i2c_get_adapdata(adapter);
/* original settings
* u32 f = I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
@@ -588,42 +592,42 @@ static u32 i801_func(struct i2c_adapter *adapter)
// http://lxr.free-electrons.com/source/include/uapi/linux/i2c.h#L85
u32 f =
- I2C_FUNC_I2C | /* 0x00000001(I enabled this
- * one)
- */
- !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
- !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
- ((priv->features & FEATURE_SMBUS_PEC) ?
- I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
- !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
- I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
- !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
- !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
- !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
- !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
- !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
- !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
- !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
- ((priv->features & FEATURE_I2C_BLOCK_READ) ?
- I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
-
- I2C_FUNC_SMBUS_BYTE | /* _READ_BYTE _WRITE_BYTE */
- I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA
- * _WRITE_BYTE_DATA
- */
- I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA
- * _WRITE_WORD_DATA
- */
- I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA
- * _WRITE_BLOCK_DATA
- */
- !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK
- * _WRITE_I2C_BLOCK
- */
- !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE
+ enable_flag(I2C_FUNC_I2C) | /* 0x00000001(I enabled this one) */
+ disable_flag(I2C_FUNC_10BIT_ADDR) | /* 0x00000002 */
+ disable_flag(I2C_FUNC_PROTOCOL_MANGLING) | /* 0x00000004 */
+ enable_flag_if(I2C_FUNC_SMBUS_PEC,
+ priv->features & FEATURE_SMBUS_PEC) |
+ /* 0x00000008 */
+ disable_flag(I2C_FUNC_SMBUS_BLOCK_PROC_CALL) | /* 0x00008000 */
+ enable_flag(I2C_FUNC_SMBUS_QUICK) | /* 0x00010000 */
+ disable_flag(I2C_FUNC_SMBUS_READ_BYTE) | /* 0x00020000 */
+ disable_flag(I2C_FUNC_SMBUS_WRITE_BYTE) | /* 0x00040000 */
+ disable_flag(I2C_FUNC_SMBUS_READ_BYTE_DATA) | /* 0x00080000 */
+ disable_flag(I2C_FUNC_SMBUS_WRITE_BYTE_DATA) | /* 0x00100000 */
+ disable_flag(I2C_FUNC_SMBUS_READ_WORD_DATA) | /* 0x00200000 */
+ disable_flag(I2C_FUNC_SMBUS_WRITE_WORD_DATA) | /* 0x00400000 */
+ disable_flag(I2C_FUNC_SMBUS_PROC_CALL) | /* 0x00800000 */
+ disable_flag(I2C_FUNC_SMBUS_READ_BLOCK_DATA) | /* 0x01000000 */
+ disable_flag(I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) | /* 0x02000000 */
+ enable_flag_if(I2C_FUNC_SMBUS_READ_I2C_BLOCK,
+ priv->features & FEATURE_I2C_BLOCK_READ) |
+ /* 0x04000000 */
+ enable_flag(I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) | /* 0x08000000 */
+
+ enable_flag(I2C_FUNC_SMBUS_BYTE) | /* _READ_BYTE _WRITE_BYTE */
+ enable_flag(I2C_FUNC_SMBUS_BYTE_DATA) | /* _READ_BYTE_DATA
+ * _WRITE_BYTE_DATA
+ */
+ enable_flag(I2C_FUNC_SMBUS_WORD_DATA) | /* _READ_WORD_DATA
+ * _WRITE_WORD_DATA
+ */
+ enable_flag(I2C_FUNC_SMBUS_BLOCK_DATA) | /* _READ_BLOCK_DATA
+ * _WRITE_BLOCK_DATA
+ */
+ disable_flag(I2C_FUNC_SMBUS_I2C_BLOCK) | /* _READ_I2C_BLOCK
+ * _WRITE_I2C_BLOCK
+ */
+ disable_flag(I2C_FUNC_SMBUS_EMUL); /* _QUICK _BYTE
* _BYTE_DATA _WORD_DATA
* _PROC_CALL
* _WRITE_BLOCK_DATA
@@ -632,6 +636,10 @@ static u32 i801_func(struct i2c_adapter *adapter)
return f;
}
+#undef enable_flag
+#undef disable_flag
+#undef enable_flag_if
+
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = i801_access,
.functionality = i801_func,
@@ -640,10 +648,10 @@ static const struct i2c_algorithm smbus_algorithm = {
/********************************
*** Part 2 - Driver Handlers ***
********************************/
-static int pi2c_probe(struct platform_device *pldev)
+static int kpc_i2c_probe(struct platform_device *pldev)
{
int err;
- struct i2c_device *priv;
+ struct kpc_i2c *priv;
struct resource *res;
priv = devm_kzalloc(&pldev->dev, sizeof(*priv), GFP_KERNEL);
@@ -659,7 +667,7 @@ static int pi2c_probe(struct platform_device *pldev)
if (!res)
return -ENXIO;
- priv->smba = (unsigned long)devm_ioremap_nocache(&pldev->dev,
+ priv->smba = (unsigned long)devm_ioremap(&pldev->dev,
res->start,
resource_size(res));
if (!priv->smba)
@@ -692,11 +700,11 @@ static int pi2c_probe(struct platform_device *pldev)
return 0;
}
-static int pi2c_remove(struct platform_device *pldev)
+static int kpc_i2c_remove(struct platform_device *pldev)
{
- struct i2c_device *lddev;
+ struct kpc_i2c *lddev;
- lddev = (struct i2c_device *)platform_get_drvdata(pldev);
+ lddev = (struct kpc_i2c *)platform_get_drvdata(pldev);
i2c_del_adapter(&lddev->adapter);
@@ -710,12 +718,12 @@ static int pi2c_remove(struct platform_device *pldev)
return 0;
}
-static struct platform_driver i2c_plat_driver_i = {
- .probe = pi2c_probe,
- .remove = pi2c_remove,
+static struct platform_driver kpc_i2c_driver = {
+ .probe = kpc_i2c_probe,
+ .remove = kpc_i2c_remove,
.driver = {
.name = KP_DRIVER_NAME_I2C,
},
};
-module_platform_driver(i2c_plat_driver_i);
+module_platform_driver(kpc_i2c_driver);
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
index 8becf972af9c..1c360daa703d 100644
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ b/drivers/staging/kpc2000/kpc2000_spi.c
@@ -464,7 +464,7 @@ kp_spi_probe(struct platform_device *pldev)
goto free_master;
}
- kpspi->base = devm_ioremap_nocache(&pldev->dev, r->start,
+ kpspi->base = devm_ioremap(&pldev->dev, r->start,
resource_size(r));
status = spi_register_master(master);
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index cb52bd9a6d2f..40525540dde6 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -49,9 +49,7 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
u64 dma_addr;
u64 user_ctl;
- BUG_ON(priv == NULL);
ldev = priv->ldev;
- BUG_ON(ldev == NULL);
acd = kzalloc(sizeof(*acd), GFP_KERNEL);
if (!acd) {
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
index a05ae6d40db9..ec79a8500caf 100644
--- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
+++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
@@ -122,7 +122,7 @@ int kpc_dma_probe(struct platform_device *pldev)
rv = -ENXIO;
goto err_kfree;
}
- ldev->eng_regs = ioremap_nocache(r->start, resource_size(r));
+ ldev->eng_regs = ioremap(r->start, resource_size(r));
if (!ldev->eng_regs) {
dev_err(&ldev->pldev->dev, "%s: failed to ioremap engine regs!\n", __func__);
rv = -ENXIO;
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 3cffc8be6656..211dd4a11cac 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -45,7 +45,7 @@ struct wep_key {
* function prototypes
*/
static int ks_wlan_open(struct net_device *dev);
-static void ks_wlan_tx_timeout(struct net_device *dev);
+static void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int ks_wlan_close(struct net_device *dev);
static void ks_wlan_set_rx_mode(struct net_device *dev);
@@ -2498,7 +2498,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
}
static
-void ks_wlan_tx_timeout(struct net_device *dev)
+void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ks_wlan_private *priv = netdev_priv(dev);
diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
index 6f0cd0784786..3be41698df4c 100644
--- a/drivers/staging/media/allegro-dvt/allegro-core.c
+++ b/drivers/staging/media/allegro-dvt/allegro-core.c
@@ -2914,7 +2914,7 @@ static int allegro_probe(struct platform_device *pdev)
"regs resource missing from device tree\n");
return -EINVAL;
}
- regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (IS_ERR(regs)) {
dev_err(&pdev->dev, "failed to map registers\n");
return PTR_ERR(regs);
@@ -2932,7 +2932,7 @@ static int allegro_probe(struct platform_device *pdev)
"sram resource missing from device tree\n");
return -EINVAL;
}
- sram_regs = devm_ioremap_nocache(&pdev->dev,
+ sram_regs = devm_ioremap(&pdev->dev,
sram_res->start,
resource_size(sram_res));
if (IS_ERR(sram_regs)) {
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index 85ea5a434ced..20a99ecb37c4 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -2,7 +2,6 @@
obj-$(CONFIG_MOST) += most_core.o
most_core-y := core.o
most_core-y += configfs.o
-ccflags-y += -I $(srctree)/drivers/staging/
obj-$(CONFIG_MOST_CDEV) += cdev/
obj-$(CONFIG_MOST_NET) += net/
diff --git a/drivers/staging/most/cdev/Makefile b/drivers/staging/most/cdev/Makefile
index 9f4a8b8c9c27..ef90cd71994a 100644
--- a/drivers/staging/most/cdev/Makefile
+++ b/drivers/staging/most/cdev/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_CDEV) += most_cdev.o
most_cdev-objs := cdev.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index f880147c82fd..71943d17f825 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -16,7 +16,8 @@
#include <linux/kfifo.h>
#include <linux/uaccess.h>
#include <linux/idr.h>
-#include "most/core.h"
+
+#include "../most.h"
#define CHRDEV_REGION_SIZE 50
@@ -25,7 +26,7 @@ static struct cdev_component {
struct ida minor_id;
unsigned int major;
struct class *class;
- struct core_component cc;
+ struct most_component cc;
} comp;
struct comp_channel {
diff --git a/drivers/staging/most/configfs.c b/drivers/staging/most/configfs.c
index 34a9fb53985c..9a961222f458 100644
--- a/drivers/staging/most/configfs.c
+++ b/drivers/staging/most/configfs.c
@@ -10,7 +10,10 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/configfs.h>
-#include <most/core.h>
+
+#include "most.h"
+
+#define MAX_STRING_SIZE 80
struct mdev_link {
struct config_item item;
@@ -22,13 +25,13 @@ struct mdev_link {
u16 subbuffer_size;
u16 packets_per_xact;
u16 dbr_size;
- char datatype[PAGE_SIZE];
- char direction[PAGE_SIZE];
- char name[PAGE_SIZE];
- char device[PAGE_SIZE];
- char channel[PAGE_SIZE];
- char comp[PAGE_SIZE];
- char comp_params[PAGE_SIZE];
+ char datatype[MAX_STRING_SIZE];
+ char direction[MAX_STRING_SIZE];
+ char name[MAX_STRING_SIZE];
+ char device[MAX_STRING_SIZE];
+ char channel[MAX_STRING_SIZE];
+ char comp[MAX_STRING_SIZE];
+ char comp_params[MAX_STRING_SIZE];
};
static struct list_head mdev_link_list;
@@ -125,6 +128,8 @@ static ssize_t mdev_link_create_link_store(struct config_item *item,
return ret;
list_add_tail(&mdev_link->list, &mdev_link_list);
mdev_link->create_link = tmp;
+ mdev_link->destroy_link = false;
+
return count;
}
@@ -140,13 +145,16 @@ static ssize_t mdev_link_destroy_link_store(struct config_item *item,
return ret;
if (!tmp)
return count;
- mdev_link->destroy_link = tmp;
+
ret = most_remove_link(mdev_link->device, mdev_link->channel,
mdev_link->comp);
if (ret)
return ret;
if (!list_empty(&mdev_link_list))
list_del(&mdev_link->list);
+
+ mdev_link->destroy_link = tmp;
+
return count;
}
@@ -197,7 +205,7 @@ static ssize_t mdev_link_device_store(struct config_item *item,
{
struct mdev_link *mdev_link = to_mdev_link(item);
- strcpy(mdev_link->device, page);
+ strlcpy(mdev_link->device, page, sizeof(mdev_link->device));
strim(mdev_link->device);
return count;
}
@@ -212,7 +220,7 @@ static ssize_t mdev_link_channel_store(struct config_item *item,
{
struct mdev_link *mdev_link = to_mdev_link(item);
- strcpy(mdev_link->channel, page);
+ strlcpy(mdev_link->channel, page, sizeof(mdev_link->channel));
strim(mdev_link->channel);
return count;
}
@@ -227,7 +235,8 @@ static ssize_t mdev_link_comp_store(struct config_item *item,
{
struct mdev_link *mdev_link = to_mdev_link(item);
- strcpy(mdev_link->comp, page);
+ strlcpy(mdev_link->comp, page, sizeof(mdev_link->comp));
+ strim(mdev_link->comp);
return count;
}
@@ -242,7 +251,8 @@ static ssize_t mdev_link_comp_params_store(struct config_item *item,
{
struct mdev_link *mdev_link = to_mdev_link(item);
- strcpy(mdev_link->comp_params, page);
+ strlcpy(mdev_link->comp_params, page, sizeof(mdev_link->comp_params));
+ strim(mdev_link->comp_params);
return count;
}
@@ -373,13 +383,20 @@ static void mdev_link_release(struct config_item *item)
struct mdev_link *mdev_link = to_mdev_link(item);
int ret;
- if (!list_empty(&mdev_link_list)) {
- ret = most_remove_link(mdev_link->device, mdev_link->channel,
- mdev_link->comp);
- if (ret && (ret != -ENODEV))
- pr_err("Removing link failed.\n");
- list_del(&mdev_link->list);
+ if (mdev_link->destroy_link)
+ goto free_item;
+
+ ret = most_remove_link(mdev_link->device, mdev_link->channel,
+ mdev_link->comp);
+ if (ret) {
+ pr_err("Removing link failed.\n");
+ goto free_item;
}
+
+ if (!list_empty(&mdev_link_list))
+ list_del(&mdev_link->list);
+
+free_item:
kfree(to_mdev_link(item));
}
@@ -630,7 +647,7 @@ static struct most_sound most_sound_subsys = {
},
};
-int most_register_configfs_subsys(struct core_component *c)
+int most_register_configfs_subsys(struct most_component *c)
{
int ret;
@@ -674,7 +691,7 @@ void most_interface_register_notify(const char *mdev)
most_cfg_complete("sound");
}
-void most_deregister_configfs_subsys(struct core_component *c)
+void most_deregister_configfs_subsys(struct most_component *c)
{
if (!strcmp(c->name, "cdev"))
configfs_unregister_subsystem(&most_cdev.subsys);
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 51a6b41d5b82..0c4ae6920d77 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -2,10 +2,9 @@
/*
* core.c - Implementation of core module of MOST Linux driver stack
*
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/slab.h>
@@ -21,25 +20,18 @@
#include <linux/kthread.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
-#include <most/core.h>
+
+#include "most.h"
#define MAX_CHANNELS 64
#define STRING_SIZE 80
static struct ida mdev_id;
static int dummy_num_buffers;
-
-static struct mostcore {
- struct device dev;
- struct device_driver drv;
- struct bus_type bus;
- struct list_head comp_list;
-} mc;
-
-#define to_driver(d) container_of(d, struct mostcore, drv)
+static struct list_head comp_list;
struct pipe {
- struct core_component *comp;
+ struct most_component *comp;
int refs;
int num_buffers;
};
@@ -151,7 +143,7 @@ static void flush_channel_fifos(struct most_channel *c)
spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
- pr_info("WARN: fifo | trash fifo not empty\n");
+ dev_warn(&c->dev, "Channel or trash fifo not empty\n");
}
/**
@@ -402,7 +394,7 @@ static ssize_t description_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct most_interface *iface = to_most_interface(dev);
+ struct most_interface *iface = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
}
@@ -411,7 +403,7 @@ static ssize_t interface_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct most_interface *iface = to_most_interface(dev);
+ struct most_interface *iface = dev_get_drvdata(dev);
switch (iface->interface) {
case ITYPE_LOOPBACK:
@@ -454,11 +446,11 @@ static const struct attribute_group *interface_attr_groups[] = {
NULL,
};
-static struct core_component *match_component(char *name)
+static struct most_component *match_component(char *name)
{
- struct core_component *comp;
+ struct most_component *comp;
- list_for_each_entry(comp, &mc.comp_list, list) {
+ list_for_each_entry(comp, &comp_list, list) {
if (!strcmp(comp->name, name))
return comp;
}
@@ -476,7 +468,7 @@ static int print_links(struct device *dev, void *data)
int offs = d->offs;
char *buf = d->buf;
struct most_channel *c;
- struct most_interface *iface = to_most_interface(dev);
+ struct most_interface *iface = dev_get_drvdata(dev);
list_for_each_entry(c, &iface->p->channel_list, list) {
if (c->pipe0.comp) {
@@ -484,7 +476,7 @@ static int print_links(struct device *dev, void *data)
PAGE_SIZE - offs,
"%s:%s:%s\n",
c->pipe0.comp->name,
- dev_name(&iface->dev),
+ dev_name(iface->dev),
dev_name(&c->dev));
}
if (c->pipe1.comp) {
@@ -492,7 +484,7 @@ static int print_links(struct device *dev, void *data)
PAGE_SIZE - offs,
"%s:%s:%s\n",
c->pipe1.comp->name,
- dev_name(&iface->dev),
+ dev_name(iface->dev),
dev_name(&c->dev));
}
}
@@ -500,20 +492,33 @@ static int print_links(struct device *dev, void *data)
return 0;
}
+static int most_match(struct device *dev, struct device_driver *drv)
+{
+ if (!strcmp(dev_name(dev), "most"))
+ return 0;
+ else
+ return 1;
+}
+
+static struct bus_type mostbus = {
+ .name = "most",
+ .match = most_match,
+};
+
static ssize_t links_show(struct device_driver *drv, char *buf)
{
struct show_links_data d = { .buf = buf };
- bus_for_each_dev(&mc.bus, NULL, &d, print_links);
+ bus_for_each_dev(&mostbus, NULL, &d, print_links);
return d.offs;
}
static ssize_t components_show(struct device_driver *drv, char *buf)
{
- struct core_component *comp;
+ struct most_component *comp;
int offs = 0;
- list_for_each_entry(comp, &mc.comp_list, list) {
+ list_for_each_entry(comp, &comp_list, list) {
offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
comp->name);
}
@@ -531,10 +536,11 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
struct most_interface *iface;
struct most_channel *c, *tmp;
- dev = bus_find_device_by_name(&mc.bus, NULL, mdev);
+ dev = bus_find_device_by_name(&mostbus, NULL, mdev);
if (!dev)
return NULL;
- iface = to_most_interface(dev);
+ put_device(dev);
+ iface = dev_get_drvdata(dev);
list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
if (!strcmp(dev_name(&c->dev), mdev_ch))
return c;
@@ -544,12 +550,12 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
static
inline int link_channel_to_component(struct most_channel *c,
- struct core_component *comp,
+ struct most_component *comp,
char *name,
char *comp_param)
{
int ret;
- struct core_component **comp_ptr;
+ struct most_component **comp_ptr;
if (!c->pipe0.comp)
comp_ptr = &c->pipe0.comp;
@@ -623,7 +629,7 @@ int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
}
if (i == ARRAY_SIZE(ch_data_type))
- pr_info("WARN: invalid attribute settings\n");
+ dev_warn(&c->dev, "Invalid attribute settings\n");
return 0;
}
@@ -642,7 +648,7 @@ int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
} else if (!strcmp(buf, "tx")) {
c->cfg.direction = MOST_CH_TX;
} else {
- pr_info("Invalid direction\n");
+ dev_err(&c->dev, "Invalid direction\n");
return -ENODATA;
}
return 0;
@@ -660,7 +666,7 @@ int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
int most_cfg_complete(char *comp_name)
{
- struct core_component *comp;
+ struct most_component *comp;
comp = match_component(comp_name);
if (!comp)
@@ -673,7 +679,7 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
char *comp_param)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
- struct core_component *comp = match_component(comp_name);
+ struct most_component *comp = match_component(comp_name);
if (!c || !comp)
return -ENODEV;
@@ -684,7 +690,7 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
{
struct most_channel *c;
- struct core_component *comp;
+ struct most_component *comp;
comp = match_component(comp_name);
if (!comp)
@@ -722,13 +728,11 @@ static const struct attribute_group *mc_attr_groups[] = {
NULL,
};
-static int most_match(struct device *dev, struct device_driver *drv)
-{
- if (!strcmp(dev_name(dev), "most"))
- return 0;
- else
- return 1;
-}
+static struct device_driver mostbus_driver = {
+ .name = "most_core",
+ .bus = &mostbus,
+ .groups = mc_attr_groups,
+};
static inline void trash_mbo(struct mbo *mbo)
{
@@ -795,7 +799,7 @@ static int hdm_enqueue_thread(void *data)
mutex_unlock(&c->nq_mutex);
if (unlikely(ret)) {
- pr_err("hdm enqueue failed\n");
+ dev_err(&c->dev, "Buffer enqueue failed\n");
nq_hdm_mbo(mbo);
c->hdm_enqueue_task = NULL;
return 0;
@@ -922,7 +926,7 @@ flush_fifos:
void most_submit_mbo(struct mbo *mbo)
{
if (WARN_ONCE(!mbo || !mbo->context,
- "bad mbo or missing channel reference\n"))
+ "Bad buffer or missing channel reference\n"))
return;
nq_hdm_mbo(mbo);
@@ -941,8 +945,6 @@ static void most_write_completion(struct mbo *mbo)
struct most_channel *c;
c = mbo->context;
- if (mbo->status == MBO_E_INVAL)
- pr_info("WARN: Tx MBO status: invalid\n");
if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
trash_mbo(mbo);
else
@@ -950,7 +952,7 @@ static void most_write_completion(struct mbo *mbo)
}
int channel_has_mbo(struct most_interface *iface, int id,
- struct core_component *comp)
+ struct most_component *comp)
{
struct most_channel *c = iface->p->channel[id];
unsigned long flags;
@@ -981,7 +983,7 @@ EXPORT_SYMBOL_GPL(channel_has_mbo);
* Returns a pointer to MBO on success or NULL otherwise.
*/
struct mbo *most_get_mbo(struct most_interface *iface, int id,
- struct core_component *comp)
+ struct most_component *comp)
{
struct mbo *mbo;
struct most_channel *c;
@@ -1087,7 +1089,7 @@ static void most_read_completion(struct mbo *mbo)
* Returns 0 on success or error code otherwise.
*/
int most_start_channel(struct most_interface *iface, int id,
- struct core_component *comp)
+ struct most_component *comp)
{
int num_buffer;
int ret;
@@ -1101,14 +1103,14 @@ int most_start_channel(struct most_interface *iface, int id,
goto out; /* already started by another component */
if (!try_module_get(iface->mod)) {
- pr_info("failed to acquire HDM lock\n");
+ dev_err(&c->dev, "Failed to acquire HDM lock\n");
mutex_unlock(&c->start_mutex);
return -ENOLCK;
}
c->cfg.extra_len = 0;
if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
- pr_info("channel configuration failed. Go check settings...\n");
+ dev_err(&c->dev, "Channel configuration failed. Go check settings...\n");
ret = -EINVAL;
goto err_put_module;
}
@@ -1157,7 +1159,7 @@ EXPORT_SYMBOL_GPL(most_start_channel);
* @comp: driver component
*/
int most_stop_channel(struct most_interface *iface, int id,
- struct core_component *comp)
+ struct most_component *comp)
{
struct most_channel *c;
@@ -1182,8 +1184,8 @@ int most_stop_channel(struct most_interface *iface, int id,
c->is_poisoned = true;
if (c->iface->poison_channel(c->iface, c->channel_id)) {
- pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
- c->iface->description);
+ dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id,
+ c->iface->description);
mutex_unlock(&c->start_mutex);
return -EAGAIN;
}
@@ -1192,7 +1194,7 @@ int most_stop_channel(struct most_interface *iface, int id,
#ifdef CMPL_INTERRUPTIBLE
if (wait_for_completion_interruptible(&c->cleanup)) {
- pr_info("Interrupted while clean up ch %d\n", c->channel_id);
+ dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id);
mutex_unlock(&c->start_mutex);
return -EINTR;
}
@@ -1215,14 +1217,13 @@ EXPORT_SYMBOL_GPL(most_stop_channel);
* most_register_component - registers a driver component with the core
* @comp: driver component
*/
-int most_register_component(struct core_component *comp)
+int most_register_component(struct most_component *comp)
{
if (!comp) {
pr_err("Bad component\n");
return -EINVAL;
}
- list_add_tail(&comp->list, &mc.comp_list);
- pr_info("registered new core component %s\n", comp->name);
+ list_add_tail(&comp->list, &comp_list);
return 0;
}
EXPORT_SYMBOL_GPL(most_register_component);
@@ -1231,9 +1232,9 @@ static int disconnect_channels(struct device *dev, void *data)
{
struct most_interface *iface;
struct most_channel *c, *tmp;
- struct core_component *comp = data;
+ struct most_component *comp = data;
- iface = to_most_interface(dev);
+ iface = dev_get_drvdata(dev);
list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
if (c->pipe0.comp == comp || c->pipe1.comp == comp)
comp->disconnect_channel(c->iface, c->channel_id);
@@ -1249,28 +1250,24 @@ static int disconnect_channels(struct device *dev, void *data)
* most_deregister_component - deregisters a driver component with the core
* @comp: driver component
*/
-int most_deregister_component(struct core_component *comp)
+int most_deregister_component(struct most_component *comp)
{
if (!comp) {
pr_err("Bad component\n");
return -EINVAL;
}
- bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
+ bus_for_each_dev(&mostbus, NULL, comp, disconnect_channels);
list_del(&comp->list);
- pr_info("deregistering component %s\n", comp->name);
return 0;
}
EXPORT_SYMBOL_GPL(most_deregister_component);
-static void release_interface(struct device *dev)
-{
- pr_info("releasing interface dev %s...\n", dev_name(dev));
-}
-
static void release_channel(struct device *dev)
{
- pr_info("releasing channel dev %s...\n", dev_name(dev));
+ struct most_channel *c = to_channel(dev);
+
+ kfree(c);
}
/**
@@ -1288,13 +1285,13 @@ int most_register_interface(struct most_interface *iface)
if (!iface || !iface->enqueue || !iface->configure ||
!iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
- pr_err("Bad interface or channel overflow\n");
+ dev_err(iface->dev, "Bad interface or channel overflow\n");
return -EINVAL;
}
id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
if (id < 0) {
- pr_info("Failed to alloc mdev ID\n");
+ dev_err(iface->dev, "Failed to allocate device ID\n");
return id;
}
@@ -1307,14 +1304,13 @@ int most_register_interface(struct most_interface *iface)
INIT_LIST_HEAD(&iface->p->channel_list);
iface->p->dev_id = id;
strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
- iface->dev.init_name = iface->p->name;
- iface->dev.bus = &mc.bus;
- iface->dev.parent = &mc.dev;
- iface->dev.groups = interface_attr_groups;
- iface->dev.release = release_interface;
- if (device_register(&iface->dev)) {
- pr_err("registering iface->dev failed\n");
+ iface->dev->bus = &mostbus;
+ iface->dev->groups = interface_attr_groups;
+ dev_set_drvdata(iface->dev, iface);
+ if (device_register(iface->dev)) {
+ dev_err(iface->dev, "Failed to register interface device\n");
kfree(iface->p);
+ put_device(iface->dev);
ida_simple_remove(&mdev_id, id);
return -ENOMEM;
}
@@ -1330,7 +1326,7 @@ int most_register_interface(struct most_interface *iface)
else
snprintf(c->name, STRING_SIZE, "%s", name_suffix);
c->dev.init_name = c->name;
- c->dev.parent = &iface->dev;
+ c->dev.parent = iface->dev;
c->dev.groups = channel_attr_groups;
c->dev.release = release_channel;
iface->p->channel[i] = c;
@@ -1356,26 +1352,23 @@ int most_register_interface(struct most_interface *iface)
mutex_init(&c->nq_mutex);
list_add_tail(&c->list, &iface->p->channel_list);
if (device_register(&c->dev)) {
- pr_err("registering c->dev failed\n");
+ dev_err(&c->dev, "Failed to register channel device\n");
goto err_free_most_channel;
}
}
- pr_info("registered new device mdev%d (%s)\n",
- id, iface->description);
most_interface_register_notify(iface->description);
return 0;
err_free_most_channel:
- kfree(c);
+ put_device(&c->dev);
err_free_resources:
while (i > 0) {
c = iface->p->channel[--i];
device_unregister(&c->dev);
- kfree(c);
}
kfree(iface->p);
- device_unregister(&iface->dev);
+ device_unregister(iface->dev);
ida_simple_remove(&mdev_id, id);
return -ENOMEM;
}
@@ -1393,8 +1386,6 @@ void most_deregister_interface(struct most_interface *iface)
int i;
struct most_channel *c;
- pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev),
- iface->description);
for (i = 0; i < iface->num_channels; i++) {
c = iface->p->channel[i];
if (c->pipe0.comp)
@@ -1407,12 +1398,11 @@ void most_deregister_interface(struct most_interface *iface)
c->pipe1.comp = NULL;
list_del(&c->list);
device_unregister(&c->dev);
- kfree(c);
}
ida_simple_remove(&mdev_id, iface->p->dev_id);
kfree(iface->p);
- device_unregister(&iface->dev);
+ device_unregister(iface->dev);
}
EXPORT_SYMBOL_GPL(most_deregister_interface);
@@ -1462,57 +1452,35 @@ void most_resume_enqueue(struct most_interface *iface, int id)
}
EXPORT_SYMBOL_GPL(most_resume_enqueue);
-static void release_most_sub(struct device *dev)
-{
- pr_info("releasing most_subsystem\n");
-}
-
static int __init most_init(void)
{
int err;
- pr_info("init()\n");
- INIT_LIST_HEAD(&mc.comp_list);
+ INIT_LIST_HEAD(&comp_list);
ida_init(&mdev_id);
- mc.bus.name = "most",
- mc.bus.match = most_match,
- mc.drv.name = "most_core",
- mc.drv.bus = &mc.bus,
- mc.drv.groups = mc_attr_groups;
-
- err = bus_register(&mc.bus);
+ err = bus_register(&mostbus);
if (err) {
- pr_info("Cannot register most bus\n");
+ pr_err("Failed to register most bus\n");
return err;
}
- err = driver_register(&mc.drv);
+ err = driver_register(&mostbus_driver);
if (err) {
- pr_info("Cannot register core driver\n");
+ pr_err("Failed to register core driver\n");
goto err_unregister_bus;
}
- mc.dev.init_name = "most_bus";
- mc.dev.release = release_most_sub;
- if (device_register(&mc.dev)) {
- err = -ENOMEM;
- goto err_unregister_driver;
- }
configfs_init();
return 0;
-err_unregister_driver:
- driver_unregister(&mc.drv);
err_unregister_bus:
- bus_unregister(&mc.bus);
+ bus_unregister(&mostbus);
return err;
}
static void __exit most_exit(void)
{
- pr_info("exit core module\n");
- device_unregister(&mc.dev);
- driver_unregister(&mc.drv);
- bus_unregister(&mc.bus);
+ driver_unregister(&mostbus_driver);
+ bus_unregister(&mostbus);
ida_destroy(&mdev_id);
}
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
index 116f04d69244..861adacf6c72 100644
--- a/drivers/staging/most/dim2/Makefile
+++ b/drivers/staging/most/dim2/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_DIM2) += most_dim2.o
most_dim2-objs := dim2.o hal.o sysfs.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 64c979155a49..16593281fcda 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -21,7 +21,7 @@
#include <linux/sched.h>
#include <linux/kthread.h>
-#include "most/core.h"
+#include "../most.h"
#include "hal.h"
#include "errors.h"
#include "sysfs.h"
@@ -854,8 +854,9 @@ static int dim2_probe(struct platform_device *pdev)
dev->most_iface.poison_channel = poison_channel;
dev->most_iface.request_netinfo = request_netinfo;
dev->most_iface.driver_dev = &pdev->dev;
+ dev->most_iface.dev = &dev->dev;
dev->dev.init_name = "dim2_state";
- dev->dev.parent = &dev->most_iface.dev;
+ dev->dev.parent = &pdev->dev;
ret = most_register_interface(&dev->most_iface);
if (ret) {
diff --git a/drivers/staging/most/i2c/Makefile b/drivers/staging/most/i2c/Makefile
index 2b3769dc19e7..71099dd0f85b 100644
--- a/drivers/staging/most/i2c/Makefile
+++ b/drivers/staging/most/i2c/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_I2C) += most_i2c.o
most_i2c-objs := i2c.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 4a4fc1005932..2980f7065846 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/err.h>
-#include "most/core.h"
+#include "../most.h"
enum { CH_RX, CH_TX, NUM_CHANNELS };
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/most.h
index 49859aef98df..232e01b7f5d2 100644
--- a/drivers/staging/most/core.h
+++ b/drivers/staging/most/most.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* most.h - API for component and adapter drivers
*
@@ -47,7 +47,7 @@ enum most_channel_data_type {
MOST_CH_SYNC = 1 << 5,
};
-enum mbo_status_flags {
+enum most_status_flags {
/* MBO was processed successfully (data was send or received )*/
MBO_SUCCESS = 0,
/* The MBO contains wrong or missing information. */
@@ -184,7 +184,7 @@ struct mbo {
dma_addr_t bus_address;
u16 buffer_length;
u16 processed_length;
- enum mbo_status_flags status;
+ enum most_status_flags status;
void (*complete)(struct mbo *mbo);
};
@@ -229,7 +229,7 @@ struct mbo {
* @priv Private field used by mostcore to store context information.
*/
struct most_interface {
- struct device dev;
+ struct device *dev;
struct device *driver_dev;
struct module *mod;
enum most_interface_type interface;
@@ -251,10 +251,8 @@ struct most_interface {
struct interface_private *p;
};
-#define to_most_interface(d) container_of(d, struct most_interface, dev)
-
/**
- * struct core_component - identifies a loadable component for the mostcore
+ * struct most_component - identifies a loadable component for the mostcore
* @list: list_head
* @name: component name
* @probe_channel: function for core to notify driver about channel connection
@@ -262,7 +260,7 @@ struct most_interface {
* @rx_completion: completion handler for received packets
* @tx_completion: completion handler for transmitted packets
*/
-struct core_component {
+struct most_component {
struct list_head list;
const char *name;
struct module *mod;
@@ -310,20 +308,20 @@ void most_stop_enqueue(struct most_interface *iface, int channel_idx);
* in wait fifo.
*/
void most_resume_enqueue(struct most_interface *iface, int channel_idx);
-int most_register_component(struct core_component *comp);
-int most_deregister_component(struct core_component *comp);
+int most_register_component(struct most_component *comp);
+int most_deregister_component(struct most_component *comp);
struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
- struct core_component *comp);
+ struct most_component *comp);
void most_put_mbo(struct mbo *mbo);
int channel_has_mbo(struct most_interface *iface, int channel_idx,
- struct core_component *comp);
+ struct most_component *comp);
int most_start_channel(struct most_interface *iface, int channel_idx,
- struct core_component *comp);
+ struct most_component *comp);
int most_stop_channel(struct most_interface *iface, int channel_idx,
- struct core_component *comp);
+ struct most_component *comp);
int __init configfs_init(void);
-int most_register_configfs_subsys(struct core_component *comp);
-void most_deregister_configfs_subsys(struct core_component *comp);
+int most_register_configfs_subsys(struct most_component *comp);
+void most_deregister_configfs_subsys(struct most_component *comp);
int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
char *comp_param);
int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
diff --git a/drivers/staging/most/net/Makefile b/drivers/staging/most/net/Makefile
index f0ac64dee71b..1582c97eb204 100644
--- a/drivers/staging/most/net/Makefile
+++ b/drivers/staging/most/net/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_NET) += most_net.o
most_net-objs := net.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index 6cab1bb8956e..5547e36e09de 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -15,7 +15,8 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/kobject.h>
-#include "most/core.h"
+
+#include "../most.h"
#define MEP_HDR_LEN 8
#define MDP_HDR_LEN 16
@@ -70,7 +71,7 @@ struct net_dev_context {
static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
static DEFINE_SPINLOCK(list_lock); /* list_head, ch->linked = false, dev_hold */
-static struct core_component comp;
+static struct most_component comp;
static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
{
@@ -81,6 +82,11 @@ static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
unsigned int payload_len = skb->len - ETH_HLEN;
unsigned int mdp_len = payload_len + MDP_HDR_LEN;
+ if (mdp_len < skb->len) {
+ pr_err("drop: too large packet! (%u)\n", skb->len);
+ return -EINVAL;
+ }
+
if (mbo->buffer_length < mdp_len) {
pr_err("drop: too small buffer! (%d for %d)\n",
mbo->buffer_length, mdp_len);
@@ -128,6 +134,11 @@ static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
u8 *buff = mbo->virt_address;
unsigned int mep_len = skb->len + MEP_HDR_LEN;
+ if (mep_len < skb->len) {
+ pr_err("drop: too large packet! (%u)\n", skb->len);
+ return -EINVAL;
+ }
+
if (mbo->buffer_length < mep_len) {
pr_err("drop: too small buffer! (%d for %d)\n",
mbo->buffer_length, mep_len);
@@ -497,7 +508,7 @@ put_nd:
return ret;
}
-static struct core_component comp = {
+static struct most_component comp = {
.mod = THIS_MODULE,
.name = "net",
.probe_channel = comp_probe_channel,
diff --git a/drivers/staging/most/sound/Makefile b/drivers/staging/most/sound/Makefile
index a3d086c6ca70..f0cd9d8d213e 100644
--- a/drivers/staging/most/sound/Makefile
+++ b/drivers/staging/most/sound/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_SOUND) += most_sound.o
most_sound-objs := sound.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 723d0bd1cc21..44cf2334834f 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -17,12 +17,13 @@
#include <sound/pcm_params.h>
#include <linux/sched.h>
#include <linux/kthread.h>
-#include <most/core.h>
+
+#include "../most.h"
#define DRIVER_NAME "sound"
#define STRING_SIZE 80
-static struct core_component comp;
+static struct most_component comp;
/**
* struct channel - private structure to keep channel specific data
@@ -323,45 +324,6 @@ static int pcm_close(struct snd_pcm_substream *substream)
}
/**
- * pcm_hw_params - implements hw_params callback function for PCM middle layer
- * @substream: sub-stream pointer
- * @hw_params: contains the hardware parameters set by the application
- *
- * This is called when the hardware parameters is set by the application, that
- * is, once when the buffer size, the period size, the format, etc. are defined
- * for the PCM substream. Many hardware setups should be done is this callback,
- * including the allocation of buffers.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct channel *channel = substream->private_data;
-
- if ((params_channels(hw_params) > channel->pcm_hardware.channels_max) ||
- (params_channels(hw_params) < channel->pcm_hardware.channels_min)) {
- pr_err("Requested number of channels not supported.\n");
- return -EINVAL;
- }
- return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
-}
-
-/**
- * pcm_hw_free - implements hw_free callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This is called to release the resources allocated via hw_params.
- * This function will be always called before the close callback is called.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
-/**
* pcm_prepare - implements prepare callback function for PCM middle layer
* @substream: substream pointer
*
@@ -462,9 +424,6 @@ static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops pcm_ops = {
.open = pcm_open,
.close = pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = pcm_hw_params,
- .hw_free = pcm_hw_free,
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
@@ -661,8 +620,7 @@ skip_adpt_alloc:
pcm->private_data = channel;
strscpy(pcm->name, device_name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, direction, &pcm_ops);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
- NULL, 0, 0);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
return 0;
@@ -779,9 +737,9 @@ static int audio_tx_completion(struct most_interface *iface, int channel_id)
}
/**
- * Initialization of the struct core_component
+ * Initialization of the struct most_component
*/
-static struct core_component comp = {
+static struct most_component comp = {
.mod = THIS_MODULE,
.name = DRIVER_NAME,
.probe_channel = audio_probe_channel,
diff --git a/drivers/staging/most/usb/Makefile b/drivers/staging/most/usb/Makefile
index 83cf2ead7122..c2b207339aec 100644
--- a/drivers/staging/most/usb/Makefile
+++ b/drivers/staging/most/usb/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_USB) += most_usb.o
most_usb-objs := usb.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
index 360cb5b7a10b..0bda88c4bc89 100644
--- a/drivers/staging/most/usb/usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -23,7 +23,8 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
-#include "most/core.h"
+
+#include "../most.h"
#define USB_MTU 512
#define NO_ISOCHRONOUS_URB 0
@@ -101,6 +102,7 @@ struct clear_hold_work {
* @poll_work_obj: work for polling link status
*/
struct most_dev {
+ struct device dev;
struct usb_device *usb_device;
struct most_interface iface;
struct most_channel_capability *cap;
@@ -122,6 +124,7 @@ struct most_dev {
};
#define to_mdev(d) container_of(d, struct most_dev, iface)
+#define to_mdev_from_dev(d) container_of(d, struct most_dev, dev)
#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
static void wq_clear_halt(struct work_struct *wq_obj);
@@ -1022,6 +1025,12 @@ static void release_dci(struct device *dev)
kfree(dci);
}
+static void release_mdev(struct device *dev)
+{
+ struct most_dev *mdev = to_mdev_from_dev(dev);
+
+ kfree(mdev);
+}
/**
* hdm_probe - probe function of USB device driver
* @interface: Interface of the attached USB device
@@ -1060,6 +1069,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
mdev->iface.mod = hdm_usb_fops.owner;
+ mdev->iface.dev = &mdev->dev;
mdev->iface.driver_dev = &interface->dev;
mdev->iface.interface = ITYPE_USB;
mdev->iface.configure = hdm_configure_channel;
@@ -1078,6 +1088,9 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
usb_dev->config->desc.bConfigurationValue,
usb_iface_desc->desc.bInterfaceNumber);
+ mdev->dev.init_name = mdev->description;
+ mdev->dev.parent = &interface->dev;
+ mdev->dev.release = release_mdev;
mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
if (!mdev->conf)
goto err_free_mdev;
@@ -1151,7 +1164,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
}
mdev->dci->dev.init_name = "dci";
- mdev->dci->dev.parent = &mdev->iface.dev;
+ mdev->dci->dev.parent = get_device(mdev->iface.dev);
mdev->dci->dev.groups = dci_attr_groups;
mdev->dci->dev.release = release_dci;
if (device_register(&mdev->dci->dev)) {
@@ -1165,7 +1178,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
mutex_unlock(&mdev->io_mutex);
return 0;
err_free_dci:
- kfree(mdev->dci);
+ put_device(&mdev->dci->dev);
err_free_busy_urbs:
kfree(mdev->busy_urbs);
err_free_ep_address:
@@ -1175,7 +1188,7 @@ err_free_cap:
err_free_conf:
kfree(mdev->conf);
err_free_mdev:
- kfree(mdev);
+ put_device(&mdev->dev);
err_out_of_memory:
if (ret == 0 || ret == -ENOMEM) {
ret = -ENOMEM;
@@ -1205,14 +1218,15 @@ static void hdm_disconnect(struct usb_interface *interface)
del_timer_sync(&mdev->link_stat_timer);
cancel_work_sync(&mdev->poll_work_obj);
- device_unregister(&mdev->dci->dev);
+ if (mdev->dci)
+ device_unregister(&mdev->dci->dev);
most_deregister_interface(&mdev->iface);
kfree(mdev->busy_urbs);
kfree(mdev->cap);
kfree(mdev->conf);
kfree(mdev->ep_address);
- kfree(mdev);
+ put_device(&mdev->dev);
}
static struct usb_driver hdm_usb = {
diff --git a/drivers/staging/most/video/Makefile b/drivers/staging/most/video/Makefile
index 2d857d3cbcc8..856175fec8b6 100644
--- a/drivers/staging/most/video/Makefile
+++ b/drivers/staging/most/video/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_VIDEO) += most_video.o
most_video-objs := video.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 10c1ef7e3a3e..d32ae49d617b 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -21,11 +21,11 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
-#include "most/core.h"
+#include "../most.h"
#define V4L2_CMP_MAX_INPUT 1
-static struct core_component comp;
+static struct most_component comp;
struct most_video_dev {
struct most_interface *iface;
@@ -527,7 +527,7 @@ static int comp_disconnect_channel(struct most_interface *iface,
return 0;
}
-static struct core_component comp = {
+static struct most_component comp = {
.mod = THIS_MODULE,
.name = "video",
.probe_channel = comp_probe_channel,
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index a4c08110094b..d89d68ffa7bc 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -138,7 +138,7 @@
memc: memc@5000 {
compatible = "mtk,mt7621-memc";
- reg = <0x300 0x100>;
+ reg = <0x5000 0x1000>;
};
cpc: cpc@1fbf0000 {
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index 01dbb66f7e9a..386d619e3ee9 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -123,6 +123,8 @@ static int nvec_kbd_probe(struct platform_device *pdev)
keycodes[j++] = extcode_tab_us102[i];
idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
idev->name = "nvec keyboard";
idev->phys = "nvec";
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_LED);
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
deleted file mode 100644
index 6a5d842ee0f2..000000000000
--- a/drivers/staging/octeon-usb/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config OCTEON_USB
- tristate "Cavium Networks Octeon USB support"
- depends on CAVIUM_OCTEON_SOC && USB
- help
- This driver supports USB host controller on some Cavium
- Networks' products in the Octeon family.
-
- To compile this driver as a module, choose M here. The module
- will be called octeon-hcd.
-
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
deleted file mode 100644
index 9873a0130ad5..000000000000
--- a/drivers/staging/octeon-usb/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-${CONFIG_OCTEON_USB} := octeon-hcd.o
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
deleted file mode 100644
index 2b29acca5caa..000000000000
--- a/drivers/staging/octeon-usb/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-This driver is functional and has been tested on EdgeRouter Lite,
-D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage.
-
-TODO:
- - kernel coding style
- - checkpatch warnings
-
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
deleted file mode 100644
index 582c9187559d..000000000000
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ /dev/null
@@ -1,3737 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2008 Cavium Networks
- *
- * Some parts of the code were originally released under BSD license:
- *
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- */
-
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/usb/hcd.h>
-#include <linux/prefetch.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "octeon-hcd.h"
-
-/**
- * enum cvmx_usb_speed - the possible USB device speeds
- *
- * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
- * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
- * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
- */
-enum cvmx_usb_speed {
- CVMX_USB_SPEED_HIGH = 0,
- CVMX_USB_SPEED_FULL = 1,
- CVMX_USB_SPEED_LOW = 2,
-};
-
-/**
- * enum cvmx_usb_transfer - the possible USB transfer types
- *
- * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
- * transfers
- * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
- * priority periodic transfers
- * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
- * transfers
- * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
- * periodic transfers
- */
-enum cvmx_usb_transfer {
- CVMX_USB_TRANSFER_CONTROL = 0,
- CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
- CVMX_USB_TRANSFER_BULK = 2,
- CVMX_USB_TRANSFER_INTERRUPT = 3,
-};
-
-/**
- * enum cvmx_usb_direction - the transfer directions
- *
- * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
- * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
- */
-enum cvmx_usb_direction {
- CVMX_USB_DIRECTION_OUT,
- CVMX_USB_DIRECTION_IN,
-};
-
-/**
- * enum cvmx_usb_status - possible callback function status codes
- *
- * @CVMX_USB_STATUS_OK: The transaction / operation finished without
- * any errors
- * @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented
- * @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight
- * by a user call to cvmx_usb_cancel
- * @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected
- * error status
- * @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response
- * from the device
- * @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the
- * device even after a number of retries
- * @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle
- * error even after a number of retries
- * @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error
- * @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error
- * even after a number of retries
- */
-enum cvmx_usb_status {
- CVMX_USB_STATUS_OK,
- CVMX_USB_STATUS_SHORT,
- CVMX_USB_STATUS_CANCEL,
- CVMX_USB_STATUS_ERROR,
- CVMX_USB_STATUS_STALL,
- CVMX_USB_STATUS_XACTERR,
- CVMX_USB_STATUS_DATATGLERR,
- CVMX_USB_STATUS_BABBLEERR,
- CVMX_USB_STATUS_FRAMEERR,
-};
-
-/**
- * struct cvmx_usb_port_status - the USB port status information
- *
- * @port_enabled: 1 = Usb port is enabled, 0 = disabled
- * @port_over_current: 1 = Over current detected, 0 = Over current not
- * detected. Octeon doesn't support over current detection.
- * @port_powered: 1 = Port power is being supplied to the device, 0 =
- * power is off. Octeon doesn't support turning port power
- * off.
- * @port_speed: Current port speed.
- * @connected: 1 = A device is connected to the port, 0 = No device is
- * connected.
- * @connect_change: 1 = Device connected state changed since the last set
- * status call.
- */
-struct cvmx_usb_port_status {
- u32 reserved : 25;
- u32 port_enabled : 1;
- u32 port_over_current : 1;
- u32 port_powered : 1;
- enum cvmx_usb_speed port_speed : 2;
- u32 connected : 1;
- u32 connect_change : 1;
-};
-
-/**
- * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
- *
- * @offset: This is the offset in bytes into the main buffer where this data
- * is stored.
- * @length: This is the length in bytes of the data.
- * @status: This is the status of this individual packet transfer.
- */
-struct cvmx_usb_iso_packet {
- int offset;
- int length;
- enum cvmx_usb_status status;
-};
-
-/**
- * enum cvmx_usb_initialize_flags - flags used by the initialization function
- *
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
- * as clock source at USB_XO and
- * USB_XI.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
- * board clock source at USB_XO.
- * USB_XI should be tied to GND.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
- * crystal
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
- * data transfer use for the USB
- */
-enum cvmx_usb_initialize_flags {
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
- /* Bits 3-4 used to encode the clock frequency */
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
-};
-
-/**
- * enum cvmx_usb_pipe_flags - internal flags for a pipe.
- *
- * @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
- * actively using hardware.
- * @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed
- * pipe is in the ping state.
- */
-enum cvmx_usb_pipe_flags {
- CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
- CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
-};
-
-/* Maximum number of times to retry failed transactions */
-#define MAX_RETRIES 3
-
-/* Maximum number of hardware channels supported by the USB block */
-#define MAX_CHANNELS 8
-
-/*
- * The low level hardware can transfer a maximum of this number of bytes in each
- * transfer. The field is 19 bits wide
- */
-#define MAX_TRANSFER_BYTES ((1 << 19) - 1)
-
-/*
- * The low level hardware can transfer a maximum of this number of packets in
- * each transfer. The field is 10 bits wide
- */
-#define MAX_TRANSFER_PACKETS ((1 << 10) - 1)
-
-/**
- * Logical transactions may take numerous low level
- * transactions, especially when splits are concerned. This
- * enum represents all of the possible stages a transaction can
- * be in. Note that split completes are always even. This is so
- * the NAK handler can backup to the previous low level
- * transaction with a simple clearing of bit 0.
- */
-enum cvmx_usb_stage {
- CVMX_USB_STAGE_NON_CONTROL,
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
- CVMX_USB_STAGE_SETUP,
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
- CVMX_USB_STAGE_DATA,
- CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
- CVMX_USB_STAGE_STATUS,
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
-};
-
-/**
- * struct cvmx_usb_transaction - describes each pending USB transaction
- * regardless of type. These are linked together
- * to form a list of pending requests for a pipe.
- *
- * @node: List node for transactions in the pipe.
- * @type: Type of transaction, duplicated of the pipe.
- * @flags: State flags for this transaction.
- * @buffer: User's physical buffer address to read/write.
- * @buffer_length: Size of the user's buffer in bytes.
- * @control_header: For control transactions, physical address of the 8
- * byte standard header.
- * @iso_start_frame: For ISO transactions, the starting frame number.
- * @iso_number_packets: For ISO transactions, the number of packets in the
- * request.
- * @iso_packets: For ISO transactions, the sub packets in the request.
- * @actual_bytes: Actual bytes transfer for this transaction.
- * @stage: For control transactions, the current stage.
- * @urb: URB.
- */
-struct cvmx_usb_transaction {
- struct list_head node;
- enum cvmx_usb_transfer type;
- u64 buffer;
- int buffer_length;
- u64 control_header;
- int iso_start_frame;
- int iso_number_packets;
- struct cvmx_usb_iso_packet *iso_packets;
- int xfersize;
- int pktcnt;
- int retries;
- int actual_bytes;
- enum cvmx_usb_stage stage;
- struct urb *urb;
-};
-
-/**
- * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
- * and some USB device. It contains a list of pending
- * request to the device.
- *
- * @node: List node for pipe list
- * @next: Pipe after this one in the list
- * @transactions: List of pending transactions
- * @interval: For periodic pipes, the interval between packets in
- * frames
- * @next_tx_frame: The next frame this pipe is allowed to transmit on
- * @flags: State flags for this pipe
- * @device_speed: Speed of device connected to this pipe
- * @transfer_type: Type of transaction supported by this pipe
- * @transfer_dir: IN or OUT. Ignored for Control
- * @multi_count: Max packet in a row for the device
- * @max_packet: The device's maximum packet size in bytes
- * @device_addr: USB device address at other end of pipe
- * @endpoint_num: USB endpoint number at other end of pipe
- * @hub_device_addr: Hub address this device is connected to
- * @hub_port: Hub port this device is connected to
- * @pid_toggle: This toggles between 0/1 on every packet send to track
- * the data pid needed
- * @channel: Hardware DMA channel for this pipe
- * @split_sc_frame: The low order bits of the frame number the split
- * complete should be sent on
- */
-struct cvmx_usb_pipe {
- struct list_head node;
- struct list_head transactions;
- u64 interval;
- u64 next_tx_frame;
- enum cvmx_usb_pipe_flags flags;
- enum cvmx_usb_speed device_speed;
- enum cvmx_usb_transfer transfer_type;
- enum cvmx_usb_direction transfer_dir;
- int multi_count;
- u16 max_packet;
- u8 device_addr;
- u8 endpoint_num;
- u8 hub_device_addr;
- u8 hub_port;
- u8 pid_toggle;
- u8 channel;
- s8 split_sc_frame;
-};
-
-struct cvmx_usb_tx_fifo {
- struct {
- int channel;
- int size;
- u64 address;
- } entry[MAX_CHANNELS + 1];
- int head;
- int tail;
-};
-
-/**
- * struct octeon_hcd - the state of the USB block
- *
- * lock: Serialization lock.
- * init_flags: Flags passed to initialize.
- * index: Which USB block this is for.
- * idle_hardware_channels: Bit set for every idle hardware channel.
- * usbcx_hprt: Stored port status so we don't need to read a CSR to
- * determine splits.
- * pipe_for_channel: Map channels to pipes.
- * pipe: Storage for pipes.
- * indent: Used by debug output to indent functions.
- * port_status: Last port status used for change notification.
- * idle_pipes: List of open pipes that have no transactions.
- * active_pipes: Active pipes indexed by transfer type.
- * frame_number: Increments every SOF interrupt for time keeping.
- * active_split: Points to the current active split, or NULL.
- */
-struct octeon_hcd {
- spinlock_t lock; /* serialization lock */
- int init_flags;
- int index;
- int idle_hardware_channels;
- union cvmx_usbcx_hprt usbcx_hprt;
- struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
- int indent;
- struct cvmx_usb_port_status port_status;
- struct list_head idle_pipes;
- struct list_head active_pipes[4];
- u64 frame_number;
- struct cvmx_usb_transaction *active_split;
- struct cvmx_usb_tx_fifo periodic;
- struct cvmx_usb_tx_fifo nonperiodic;
-};
-
-/*
- * This macro logically sets a single field in a CSR. It does the sequence
- * read, modify, and write
- */
-#define USB_SET_FIELD32(address, _union, field, value) \
- do { \
- union _union c; \
- \
- c.u32 = cvmx_usb_read_csr32(usb, address); \
- c.s.field = value; \
- cvmx_usb_write_csr32(usb, address, c.u32); \
- } while (0)
-
-/* Returns the IO address to push/pop stuff data from the FIFOs */
-#define USB_FIFO_ADDRESS(channel, usb_index) \
- (CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000)
-
-/**
- * struct octeon_temp_buffer - a bounce buffer for USB transfers
- * @orig_buffer: the original buffer passed by the USB stack
- * @data: the newly allocated temporary buffer (excluding meta-data)
- *
- * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If
- * the buffer is too short, we need to allocate a temporary one, and this struct
- * represents it.
- */
-struct octeon_temp_buffer {
- void *orig_buffer;
- u8 data[0];
-};
-
-static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
-{
- return container_of((void *)p, struct usb_hcd, hcd_priv);
-}
-
-/**
- * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
- * (if needed)
- * @urb: URB.
- * @mem_flags: Memory allocation flags.
- *
- * This function allocates a temporary bounce buffer whenever it's needed
- * due to HW limitations.
- */
-static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
-{
- struct octeon_temp_buffer *temp;
-
- if (urb->num_sgs || urb->sg ||
- (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) ||
- !(urb->transfer_buffer_length % sizeof(u32)))
- return 0;
-
- temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) +
- sizeof(*temp), mem_flags);
- if (!temp)
- return -ENOMEM;
-
- temp->orig_buffer = urb->transfer_buffer;
- if (usb_urb_dir_out(urb))
- memcpy(temp->data, urb->transfer_buffer,
- urb->transfer_buffer_length);
- urb->transfer_buffer = temp->data;
- urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
-
- return 0;
-}
-
-/**
- * octeon_free_temp_buffer - free a temporary buffer used by USB transfers.
- * @urb: URB.
- *
- * Frees a buffer allocated by octeon_alloc_temp_buffer().
- */
-static void octeon_free_temp_buffer(struct urb *urb)
-{
- struct octeon_temp_buffer *temp;
- size_t length;
-
- if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
- return;
-
- temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer,
- data);
- if (usb_urb_dir_in(urb)) {
- if (usb_pipeisoc(urb->pipe))
- length = urb->transfer_buffer_length;
- else
- length = urb->actual_length;
-
- memcpy(temp->orig_buffer, urb->transfer_buffer, length);
- }
- urb->transfer_buffer = temp->orig_buffer;
- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
- kfree(temp);
-}
-
-/**
- * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma().
- * @hcd: USB HCD structure.
- * @urb: URB.
- * @mem_flags: Memory allocation flags.
- */
-static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
- gfp_t mem_flags)
-{
- int ret;
-
- ret = octeon_alloc_temp_buffer(urb, mem_flags);
- if (ret)
- return ret;
-
- ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
- if (ret)
- octeon_free_temp_buffer(urb);
-
- return ret;
-}
-
-/**
- * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma()
- * @hcd: USB HCD structure.
- * @urb: URB.
- */
-static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
- usb_hcd_unmap_urb_for_dma(hcd, urb);
- octeon_free_temp_buffer(urb);
-}
-
-/**
- * Read a USB 32bit CSR. It performs the necessary address swizzle
- * for 32bit CSRs and logs the value in a readable format if
- * debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to read
- *
- * Returns: Result of the read
- */
-static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address)
-{
- return cvmx_read64_uint32(address ^ 4);
-}
-
-/**
- * Write a USB 32bit CSR. It performs the necessary address
- * swizzle for 32bit CSRs and logs the value in a readable format
- * if debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to write
- * @value: Value to write
- */
-static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb,
- u64 address, u32 value)
-{
- cvmx_write64_uint32(address ^ 4, value);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
-}
-
-/**
- * Return non zero if this pipe connects to a non HIGH speed
- * device through a high speed hub.
- *
- * @usb: USB block this access is for
- * @pipe: Pipe to check
- *
- * Returns: Non zero if we need to do split transactions
- */
-static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe)
-{
- return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
- usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
-}
-
-/**
- * Trivial utility function to return the correct PID for a pipe
- *
- * @pipe: pipe to check
- *
- * Returns: PID for pipe
- */
-static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
-{
- if (pipe->pid_toggle)
- return 2; /* Data1 */
- return 0; /* Data0 */
-}
-
-/* Loops through register until txfflsh or rxfflsh become zero.*/
-static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type)
-{
- int result;
- u64 address = CVMX_USBCX_GRSTCTL(usb->index);
- u64 done = cvmx_get_cycle() + 100 *
- (u64)octeon_get_clock_rate / 1000000;
- union cvmx_usbcx_grstctl c;
-
- while (1) {
- c.u32 = cvmx_usb_read_csr32(usb, address);
- if (fflsh_type == 0 && c.s.txfflsh == 0) {
- result = 0;
- break;
- } else if (fflsh_type == 1 && c.s.rxfflsh == 0) {
- result = 0;
- break;
- } else if (cvmx_get_cycle() > done) {
- result = -1;
- break;
- }
-
- __delay(100);
- }
- return result;
-}
-
-static void cvmx_fifo_setup(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
- union cvmx_usbcx_gnptxfsiz npsiz;
- union cvmx_usbcx_hptxfsiz psiz;
-
- usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GHWCFG3(usb->index));
-
- /*
- * Program the USBC_GRXFSIZ register to select the size of the receive
- * FIFO (25%).
- */
- USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz,
- rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
-
- /*
- * Program the USBC_GNPTXFSIZ register to select the size and the start
- * address of the non-periodic transmit FIFO for nonperiodic
- * transactions (50%).
- */
- npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
- npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
- npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32);
-
- /*
- * Program the USBC_HPTXFSIZ register to select the size and start
- * address of the periodic transmit FIFO for periodic transactions
- * (25%).
- */
- psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
- psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
- psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32);
-
- /* Flush all FIFOs */
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, txfnum, 0x10);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, txfflsh, 1);
- cvmx_wait_tx_rx(usb, 0);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, rxfflsh, 1);
- cvmx_wait_tx_rx(usb, 1);
-}
-
-/**
- * Shutdown a USB port after a call to cvmx_usb_initialize().
- * The port should be disabled with all pipes closed when this
- * function is called.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_shutdown(struct octeon_hcd *usb)
-{
- union cvmx_usbnx_clk_ctl usbn_clk_ctl;
-
- /* Make sure all pipes are closed */
- if (!list_empty(&usb->idle_pipes) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK]))
- return -EBUSY;
-
- /* Disable the clocks and put them in power on reset */
- usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.enable = 1;
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hclk_rst = 1;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hrst = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- return 0;
-}
-
-/**
- * Initialize a USB port for use. This must be called before any
- * other access to the Octeon USB port is made. The port starts
- * off in the disabled state.
- *
- * @dev: Pointer to struct device for logging purposes.
- * @usb: Pointer to struct octeon_hcd.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_initialize(struct device *dev,
- struct octeon_hcd *usb)
-{
- int channel;
- int divisor;
- int retries = 0;
- union cvmx_usbcx_hcfg usbcx_hcfg;
- union cvmx_usbnx_clk_ctl usbn_clk_ctl;
- union cvmx_usbcx_gintsts usbc_gintsts;
- union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
- union cvmx_usbcx_gintmsk usbcx_gintmsk;
- union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
- union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
-
-retry:
- /*
- * Power On Reset and PHY Initialization
- *
- * 1. Wait for DCOK to assert (nothing to do)
- *
- * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
- * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
- */
- usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hrst = 0;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hclk_rst = 0;
- usbn_clk_ctl.s.enable = 0;
- /*
- * 2b. Select the USB reference clock/crystal parameters by writing
- * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
- */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
- /*
- * The USB port uses 12/24/48MHz 2.5V board clock
- * source at USB_XO. USB_XI should be tied to GND.
- * Most Octeon evaluation boards require this setting
- */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
- OCTEON_IS_MODEL(OCTEON_CN56XX) ||
- OCTEON_IS_MODEL(OCTEON_CN50XX))
- /* From CN56XX,CN50XX,CN31XX,CN30XX manuals */
- usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */
- else
- /* From CN52XX manual */
- usbn_clk_ctl.s.p_rtype = 1;
-
- switch (usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
- usbn_clk_ctl.s.p_c_sel = 0;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
- usbn_clk_ctl.s.p_c_sel = 1;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
- usbn_clk_ctl.s.p_c_sel = 2;
- break;
- }
- } else {
- /*
- * The USB port uses a 12MHz crystal as clock source
- * at USB_XO and USB_XI
- */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
- /* From CN31XX,CN30XX manual */
- usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */
- else
- /* From CN56XX,CN52XX,CN50XX manuals. */
- usbn_clk_ctl.s.p_rtype = 0;
-
- usbn_clk_ctl.s.p_c_sel = 0;
- }
- /*
- * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
- * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
- * such that USB is as close as possible to 125Mhz
- */
- divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000);
- /* Lower than 4 doesn't seem to work properly */
- if (divisor < 4)
- divisor = 4;
- usbn_clk_ctl.s.divide = divisor;
- usbn_clk_ctl.s.divide2 = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-
- /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
- usbn_clk_ctl.s.hclk_rst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
- __delay(64);
- /*
- * 3. Program the power-on reset field in the USBN clock-control
- * register:
- * USBN_CLK_CTL[POR] = 0
- */
- usbn_clk_ctl.s.por = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 4. Wait 1 ms for PHY clock to start */
- mdelay(1);
- /*
- * 5. Program the Reset input from automatic test equipment field in the
- * USBP control and status register:
- * USBN_USBP_CTL_STATUS[ATE_RESET] = 1
- */
- usbn_usbp_ctl_status.u64 =
- cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
- usbn_usbp_ctl_status.s.ate_reset = 1;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 6. Wait 10 cycles */
- __delay(10);
- /*
- * 7. Clear ATE_RESET field in the USBN clock-control register:
- * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
- */
- usbn_usbp_ctl_status.s.ate_reset = 0;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /*
- * 8. Program the PHY reset field in the USBN clock-control register:
- * USBN_CLK_CTL[PRST] = 1
- */
- usbn_clk_ctl.s.prst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /*
- * 9. Program the USBP control and status register to select host or
- * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
- * device
- */
- usbn_usbp_ctl_status.s.hst_mode = 0;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 10. Wait 1 us */
- udelay(1);
- /*
- * 11. Program the hreset_n field in the USBN clock-control register:
- * USBN_CLK_CTL[HRST] = 1
- */
- usbn_clk_ctl.s.hrst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 12. Proceed to USB core initialization */
- usbn_clk_ctl.s.enable = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- udelay(1);
-
- /*
- * USB Core Initialization
- *
- * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
- * determine USB core configuration parameters.
- *
- * Nothing needed
- *
- * 2. Program the following fields in the global AHB configuration
- * register (USBC_GAHBCFG)
- * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
- * Burst length, USBC_GAHBCFG[HBSTLEN] = 0
- * Nonperiodic TxFIFO empty level (slave mode only),
- * USBC_GAHBCFG[NPTXFEMPLVL]
- * Periodic TxFIFO empty level (slave mode only),
- * USBC_GAHBCFG[PTXFEMPLVL]
- * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
- */
- usbcx_gahbcfg.u32 = 0;
- usbcx_gahbcfg.s.dmaen = !(usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
- usbcx_gahbcfg.s.hbstlen = 0;
- usbcx_gahbcfg.s.nptxfemplvl = 1;
- usbcx_gahbcfg.s.ptxfemplvl = 1;
- usbcx_gahbcfg.s.glblintrmsk = 1;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
- usbcx_gahbcfg.u32);
-
- /*
- * 3. Program the following fields in USBC_GUSBCFG register.
- * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
- * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
- * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
- * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
- */
- usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GUSBCFG(usb->index));
- usbcx_gusbcfg.s.toutcal = 0;
- usbcx_gusbcfg.s.ddrsel = 0;
- usbcx_gusbcfg.s.usbtrdtim = 0x5;
- usbcx_gusbcfg.s.phylpwrclksel = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
- usbcx_gusbcfg.u32);
-
- /*
- * 4. The software must unmask the following bits in the USBC_GINTMSK
- * register.
- * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
- * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
- */
- usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTMSK(usb->index));
- usbcx_gintmsk.s.otgintmsk = 1;
- usbcx_gintmsk.s.modemismsk = 1;
- usbcx_gintmsk.s.hchintmsk = 1;
- usbcx_gintmsk.s.sofmsk = 0;
- /* We need RX FIFO interrupts if we don't have DMA */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- usbcx_gintmsk.s.rxflvlmsk = 1;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
- usbcx_gintmsk.u32);
-
- /*
- * Disable all channel interrupts. We'll enable them per channel later.
- */
- for (channel = 0; channel < 8; channel++)
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- 0);
-
- /*
- * Host Port Initialization
- *
- * 1. Program the host-port interrupt-mask field to unmask,
- * USBC_GINTMSK[PRTINT] = 1
- */
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, prtintmsk, 1);
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, disconnintmsk, 1);
-
- /*
- * 2. Program the USBC_HCFG register to select full-speed host
- * or high-speed host.
- */
- usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
- usbcx_hcfg.s.fslssupp = 0;
- usbcx_hcfg.s.fslspclksel = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
-
- cvmx_fifo_setup(usb);
-
- /*
- * If the controller is getting port events right after the reset, it
- * means the initialization failed. Try resetting the controller again
- * in such case. This is seen to happen after cold boot on DSR-1000N.
- */
- usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTSTS(usb->index));
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
- usbc_gintsts.u32);
- dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32);
- if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint)
- return 0;
- if (retries++ >= 5)
- return -EAGAIN;
- dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n",
- (int)usbc_gintsts.u32);
- msleep(50);
- cvmx_usb_shutdown(usb);
- msleep(50);
- goto retry;
-}
-
-/**
- * Reset a USB port. After this call succeeds, the USB port is
- * online and servicing requests.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_reset_port(struct octeon_hcd *usb)
-{
- usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPRT(usb->index));
-
- /* Program the port reset bit to start the reset process */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtrst, 1);
-
- /*
- * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
- * process to complete.
- */
- mdelay(50);
-
- /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtrst, 0);
-
- /*
- * Read the port speed field to get the enumerated speed,
- * USBC_HPRT[PRTSPD].
- */
- usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPRT(usb->index));
-}
-
-/**
- * Disable a USB port. After this call the USB port will not
- * generate data transfers and will not generate events.
- * Transactions in process will fail and call their
- * associated callbacks.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_disable(struct octeon_hcd *usb)
-{
- /* Disable the port */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtena, 1);
- return 0;
-}
-
-/**
- * Get the current state of the USB port. Use this call to
- * determine if the usb port has anything connected, is enabled,
- * or has some sort of error condition. The return value of this
- * call has "changed" bits to signal of the value of some fields
- * have changed between calls.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: Port status information
- */
-static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_hprt usbc_hprt;
- struct cvmx_usb_port_status result;
-
- memset(&result, 0, sizeof(result));
-
- usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- result.port_enabled = usbc_hprt.s.prtena;
- result.port_over_current = usbc_hprt.s.prtovrcurract;
- result.port_powered = usbc_hprt.s.prtpwr;
- result.port_speed = usbc_hprt.s.prtspd;
- result.connected = usbc_hprt.s.prtconnsts;
- result.connect_change =
- result.connected != usb->port_status.connected;
-
- return result;
-}
-
-/**
- * Open a virtual pipe between the host and a USB device. A pipe
- * must be opened before data can be transferred between a device
- * and Octeon.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @device_addr:
- * USB device address to open the pipe to
- * (0-127).
- * @endpoint_num:
- * USB endpoint number to open the pipe to
- * (0-15).
- * @device_speed:
- * The speed of the device the pipe is going
- * to. This must match the device's speed,
- * which may be different than the port speed.
- * @max_packet: The maximum packet length the device can
- * transmit/receive (low speed=0-8, full
- * speed=0-1023, high speed=0-1024). This value
- * comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <10:0>.
- * @transfer_type:
- * The type of transfer this pipe is for.
- * @transfer_dir:
- * The direction the pipe is in. This is not
- * used for control pipes.
- * @interval: For ISOCHRONOUS and INTERRUPT transfers,
- * this is how often the transfer is scheduled
- * for. All other transfers should specify
- * zero. The units are in frames (8000/sec at
- * high speed, 1000/sec for full speed).
- * @multi_count:
- * For high speed devices, this is the maximum
- * allowed number of packet per microframe.
- * Specify zero for non high speed devices. This
- * value comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <12:11>.
- * @hub_device_addr:
- * Hub device address this device is connected
- * to. Devices connected directly to Octeon
- * use zero. This is only used when the device
- * is full/low speed behind a high speed hub.
- * The address will be of the high speed hub,
- * not and full speed hubs after it.
- * @hub_port: Which port on the hub the device is
- * connected. Use zero for devices connected
- * directly to Octeon. Like hub_device_addr,
- * this is only used for full/low speed
- * devices behind a high speed hub.
- *
- * Returns: A non-NULL value is a pipe. NULL means an error.
- */
-static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb,
- int device_addr,
- int endpoint_num,
- enum cvmx_usb_speed
- device_speed,
- int max_packet,
- enum cvmx_usb_transfer
- transfer_type,
- enum cvmx_usb_direction
- transfer_dir,
- int interval, int multi_count,
- int hub_device_addr,
- int hub_port)
-{
- struct cvmx_usb_pipe *pipe;
-
- pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC);
- if (!pipe)
- return NULL;
- if ((device_speed == CVMX_USB_SPEED_HIGH) &&
- (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (transfer_type == CVMX_USB_TRANSFER_BULK))
- pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
- pipe->device_addr = device_addr;
- pipe->endpoint_num = endpoint_num;
- pipe->device_speed = device_speed;
- pipe->max_packet = max_packet;
- pipe->transfer_type = transfer_type;
- pipe->transfer_dir = transfer_dir;
- INIT_LIST_HEAD(&pipe->transactions);
-
- /*
- * All pipes use interval to rate limit NAK processing. Force an
- * interval if one wasn't supplied
- */
- if (!interval)
- interval = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- pipe->interval = interval * 8;
- /* Force start splits to be schedule on uFrame 0 */
- pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) +
- pipe->interval;
- } else {
- pipe->interval = interval;
- pipe->next_tx_frame = usb->frame_number + pipe->interval;
- }
- pipe->multi_count = multi_count;
- pipe->hub_device_addr = hub_device_addr;
- pipe->hub_port = hub_port;
- pipe->pid_toggle = 0;
- pipe->split_sc_frame = -1;
- list_add_tail(&pipe->node, &usb->idle_pipes);
-
- /*
- * We don't need to tell the hardware about this pipe yet since
- * it doesn't have any submitted requests
- */
-
- return pipe;
-}
-
-/**
- * Poll the RX FIFOs and remove data as needed. This function is only used
- * in non DMA mode. It is very important that this function be called quickly
- * enough to prevent FIFO overflow.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_grxstsph rx_status;
- int channel;
- int bytes;
- u64 address;
- u32 *ptr;
-
- rx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GRXSTSPH(usb->index));
- /* Only read data if IN data is there */
- if (rx_status.s.pktsts != 2)
- return;
- /* Check if no data is available */
- if (!rx_status.s.bcnt)
- return;
-
- channel = rx_status.s.chnum;
- bytes = rx_status.s.bcnt;
- if (!bytes)
- return;
-
- /* Get where the DMA engine would have written this data */
- address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) +
- channel * 8);
-
- ptr = cvmx_phys_to_ptr(address);
- cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8,
- address + bytes);
-
- /* Loop writing the FIFO data for this packet into memory */
- while (bytes > 0) {
- *ptr++ = cvmx_usb_read_csr32(usb,
- USB_FIFO_ADDRESS(channel, usb->index));
- bytes -= 4;
- }
- CVMX_SYNCW;
-}
-
-/**
- * Fill the TX hardware fifo with data out of the software
- * fifos
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @fifo: Software fifo to use
- * @available: Amount of space in the hardware fifo
- *
- * Returns: Non zero if the hardware fifo was too small and needs
- * to be serviced again.
- */
-static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb,
- struct cvmx_usb_tx_fifo *fifo, int available)
-{
- /*
- * We're done either when there isn't anymore space or the software FIFO
- * is empty
- */
- while (available && (fifo->head != fifo->tail)) {
- int i = fifo->tail;
- const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
- u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
- usb->index) ^ 4;
- int words = available;
-
- /* Limit the amount of data to what the SW fifo has */
- if (fifo->entry[i].size <= available) {
- words = fifo->entry[i].size;
- fifo->tail++;
- if (fifo->tail > MAX_CHANNELS)
- fifo->tail = 0;
- }
-
- /* Update the next locations and counts */
- available -= words;
- fifo->entry[i].address += words * 4;
- fifo->entry[i].size -= words;
-
- /*
- * Write the HW fifo data. The read every three writes is due
- * to an errata on CN3XXX chips
- */
- while (words > 3) {
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_read64_uint64(
- CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- words -= 3;
- }
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words) {
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words)
- cvmx_write64_uint32(csr_address, *ptr++);
- }
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- }
- return fifo->head != fifo->tail;
-}
-
-/**
- * Check the hardware FIFOs and fill them as needed
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb)
-{
- if (usb->periodic.head != usb->periodic.tail) {
- union cvmx_usbcx_hptxsts tx_status;
-
- tx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPTXSTS(usb->index));
- if (cvmx_usb_fill_tx_hw(usb, &usb->periodic,
- tx_status.s.ptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, ptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, ptxfempmsk, 0);
- }
-
- if (usb->nonperiodic.head != usb->nonperiodic.tail) {
- union cvmx_usbcx_gnptxsts tx_status;
-
- tx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GNPTXSTS(usb->index));
- if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic,
- tx_status.s.nptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, nptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, nptxfempmsk, 0);
- }
-}
-
-/**
- * Fill the TX FIFO with an outgoing packet
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel number to get packet from
- */
-static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel)
-{
- union cvmx_usbcx_hccharx hcchar;
- union cvmx_usbcx_hcspltx usbc_hcsplt;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
- struct cvmx_usb_tx_fifo *fifo;
-
- /* We only need to fill data on outbound channels */
- hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
- if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
- return;
-
- /* OUT Splits only have data on the start and not the complete */
- usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCSPLTX(channel, usb->index));
- if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
- return;
-
- /*
- * Find out how many bytes we need to fill and convert it into 32bit
- * words.
- */
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
- if (!usbc_hctsiz.s.xfersize)
- return;
-
- if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
- (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
- fifo = &usb->periodic;
- else
- fifo = &usb->nonperiodic;
-
- fifo->entry[fifo->head].channel = channel;
- fifo->entry[fifo->head].address =
- cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
- channel * 8);
- fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2;
- fifo->head++;
- if (fifo->head > MAX_CHANNELS)
- fifo->head = 0;
-
- cvmx_usb_poll_tx_fifo(usb);
-}
-
-/**
- * Perform channel specific setup for Control transactions. All
- * the generic stuff will already have been done in cvmx_usb_start_channel().
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel to setup
- * @pipe: Pipe for control transaction
- */
-static void cvmx_usb_start_channel_control(struct octeon_hcd *usb,
- int channel,
- struct cvmx_usb_pipe *pipe)
-{
- struct usb_hcd *hcd = octeon_to_hcd(usb);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_transaction *transaction =
- list_first_entry(&pipe->transactions, typeof(*transaction),
- node);
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- int bytes_to_transfer = transaction->buffer_length -
- transaction->actual_bytes;
- int packets_to_transfer;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
-
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- dev_err(dev, "%s: ERROR - Non control stage\n", __func__);
- break;
- case CVMX_USB_STAGE_SETUP:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = sizeof(*header);
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- CVMX_USB_DIRECTION_OUT);
- /*
- * Setup send the control header instead of the buffer data. The
- * buffer data will be used in the next stage
- */
- cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
- channel * 8,
- transaction->control_header);
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = 0;
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- CVMX_USB_DIRECTION_OUT);
-
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- case CVMX_USB_STAGE_DATA:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (header->bRequestType & USB_DIR_IN)
- bytes_to_transfer = 0;
- else if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
- }
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- if (!(header->bRequestType & USB_DIR_IN))
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- case CVMX_USB_STAGE_STATUS:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- }
-
- /*
- * Make sure the transfer never exceeds the byte limit of the hardware.
- * Further bytes will be sent as continued transactions
- */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /*
- * Calculate the number of packets to transfer. If the length is zero
- * we still need to transfer one packet
- */
- packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
- pipe->max_packet);
- if (packets_to_transfer == 0) {
- packets_to_transfer = 1;
- } else if ((packets_to_transfer > 1) &&
- (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /*
- * Limit to one packet when not using DMA. Channels must be
- * restarted between every packet for IN transactions, so there
- * is no reason to do multiple packets in a row
- */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /*
- * Limit the number of packet and data transferred to what the
- * hardware can handle
- */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index),
- usbc_hctsiz.u32);
-}
-
-/**
- * Start a channel to perform the pipe's head transaction
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel to setup
- * @pipe: Pipe to start
- */
-static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
- struct cvmx_usb_pipe *pipe)
-{
- struct cvmx_usb_transaction *transaction =
- list_first_entry(&pipe->transactions, typeof(*transaction),
- node);
-
- /* Make sure all writes to the DMA region get flushed */
- CVMX_SYNCW;
-
- /* Attach the channel to the pipe */
- usb->pipe_for_channel[channel] = pipe;
- pipe->channel = channel;
- pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /* Mark this channel as in use */
- usb->idle_hardware_channels &= ~(1 << channel);
-
- /* Enable the channel interrupt bits */
- {
- union cvmx_usbcx_hcintx usbc_hcint;
- union cvmx_usbcx_hcintmskx usbc_hcintmsk;
- union cvmx_usbcx_haintmsk usbc_haintmsk;
-
- /* Clear all channel status bits */
- usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index));
-
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index),
- usbc_hcint.u32);
-
- usbc_hcintmsk.u32 = 0;
- usbc_hcintmsk.s.chhltdmsk = 1;
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /*
- * Channels need these extra interrupts when we aren't
- * in DMA mode.
- */
- usbc_hcintmsk.s.datatglerrmsk = 1;
- usbc_hcintmsk.s.frmovrunmsk = 1;
- usbc_hcintmsk.s.bblerrmsk = 1;
- usbc_hcintmsk.s.xacterrmsk = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * Splits don't generate xfercompl, so we need
- * ACK and NYET.
- */
- usbc_hcintmsk.s.nyetmsk = 1;
- usbc_hcintmsk.s.ackmsk = 1;
- }
- usbc_hcintmsk.s.nakmsk = 1;
- usbc_hcintmsk.s.stallmsk = 1;
- usbc_hcintmsk.s.xfercomplmsk = 1;
- }
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- usbc_hcintmsk.u32);
-
- /* Enable the channel interrupt to propagate */
- usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HAINTMSK(usb->index));
- usbc_haintmsk.s.haintmsk |= 1 << channel;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index),
- usbc_haintmsk.u32);
- }
-
- /* Setup the location the DMA engine uses. */
- {
- u64 reg;
- u64 dma_address = transaction->buffer +
- transaction->actual_bytes;
-
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- dma_address = transaction->buffer +
- transaction->iso_packets[0].offset +
- transaction->actual_bytes;
-
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)
- reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index);
- else
- reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index);
- cvmx_write64_uint64(reg + channel * 8, dma_address);
- }
-
- /* Setup both the size of the transfer and the SPLIT characteristics */
- {
- union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
- int packets_to_transfer;
- int bytes_to_transfer = transaction->buffer_length -
- transaction->actual_bytes;
-
- /*
- * ISOCHRONOUS transactions store each individual transfer size
- * in the packet structure, not the global buffer_length
- */
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- bytes_to_transfer =
- transaction->iso_packets[0].length -
- transaction->actual_bytes;
-
- /*
- * We need to do split transactions when we are talking to non
- * high speed devices that are behind a high speed hub
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * On the start split phase (stage is even) record the
- * frame number we will need to send the split complete.
- * We only store the lower two bits since the time ahead
- * can only be two frames
- */
- if ((transaction->stage & 1) == 0) {
- if (transaction->type == CVMX_USB_TRANSFER_BULK)
- pipe->split_sc_frame =
- (usb->frame_number + 1) & 0x7f;
- else
- pipe->split_sc_frame =
- (usb->frame_number + 2) & 0x7f;
- } else {
- pipe->split_sc_frame = -1;
- }
-
- usbc_hcsplt.s.spltena = 1;
- usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
- usbc_hcsplt.s.prtaddr = pipe->hub_port;
- usbc_hcsplt.s.compsplt = (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
-
- /*
- * SPLIT transactions can only ever transmit one data
- * packet so limit the transfer size to the max packet
- * size
- */
- if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
-
- /*
- * ISOCHRONOUS OUT splits are unique in that they limit
- * data transfers to 188 byte chunks representing the
- * begin/middle/end of the data or all
- */
- if (!usbc_hcsplt.s.compsplt &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (pipe->transfer_type ==
- CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /*
- * Clear the split complete frame number as
- * there isn't going to be a split complete
- */
- pipe->split_sc_frame = -1;
- /*
- * See if we've started this transfer and sent
- * data
- */
- if (transaction->actual_bytes == 0) {
- /*
- * Nothing sent yet, this is either a
- * begin or the entire payload
- */
- if (bytes_to_transfer <= 188)
- /* Entire payload in one go */
- usbc_hcsplt.s.xactpos = 3;
- else
- /* First part of payload */
- usbc_hcsplt.s.xactpos = 2;
- } else {
- /*
- * Continuing the previous data, we must
- * either be in the middle or at the end
- */
- if (bytes_to_transfer <= 188)
- /* End of payload */
- usbc_hcsplt.s.xactpos = 1;
- else
- /* Middle of payload */
- usbc_hcsplt.s.xactpos = 0;
- }
- /*
- * Again, the transfer size is limited to 188
- * bytes
- */
- if (bytes_to_transfer > 188)
- bytes_to_transfer = 188;
- }
- }
-
- /*
- * Make sure the transfer never exceeds the byte limit of the
- * hardware. Further bytes will be sent as continued
- * transactions
- */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /*
- * Round MAX_TRANSFER_BYTES to a multiple of out packet
- * size
- */
- bytes_to_transfer = MAX_TRANSFER_BYTES /
- pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /*
- * Calculate the number of packets to transfer. If the length is
- * zero we still need to transfer one packet
- */
- packets_to_transfer =
- DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
- if (packets_to_transfer == 0) {
- packets_to_transfer = 1;
- } else if ((packets_to_transfer > 1) &&
- (usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /*
- * Limit to one packet when not using DMA. Channels must
- * be restarted between every packet for IN
- * transactions, so there is no reason to do multiple
- * packets in a row
- */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer *
- pipe->max_packet;
- } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /*
- * Limit the number of packet and data transferred to
- * what the hardware can handle
- */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer *
- pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- /* Update the DATA0/DATA1 toggle */
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- /*
- * High speed pipes may need a hardware ping before they start
- */
- if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING)
- usbc_hctsiz.s.dopng = 1;
-
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCSPLTX(channel, usb->index),
- usbc_hcsplt.u32);
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index),
- usbc_hctsiz.u32);
- }
-
- /* Setup the Host Channel Characteristics Register */
- {
- union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
-
- /*
- * Set the startframe odd/even properly. This is only used for
- * periodic
- */
- usbc_hcchar.s.oddfrm = usb->frame_number & 1;
-
- /*
- * Set the number of back to back packets allowed by this
- * endpoint. Split transactions interpret "ec" as the number of
- * immediate retries of failure. These retries happen too
- * quickly, so we disable these entirely for splits
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count < 1)
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count > 3)
- usbc_hcchar.s.ec = 3;
- else
- usbc_hcchar.s.ec = pipe->multi_count;
-
- /* Set the rest of the endpoint specific settings */
- usbc_hcchar.s.devaddr = pipe->device_addr;
- usbc_hcchar.s.eptype = transaction->type;
- usbc_hcchar.s.lspddev =
- (pipe->device_speed == CVMX_USB_SPEED_LOW);
- usbc_hcchar.s.epdir = pipe->transfer_dir;
- usbc_hcchar.s.epnum = pipe->endpoint_num;
- usbc_hcchar.s.mps = pipe->max_packet;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index),
- usbc_hcchar.u32);
- }
-
- /* Do transaction type specific fixups as needed */
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- cvmx_usb_start_channel_control(usb, channel, pipe);
- break;
- case CVMX_USB_TRANSFER_BULK:
- case CVMX_USB_TRANSFER_INTERRUPT:
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISO transactions require different PIDs depending on
- * direction and how many packets are needed
- */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- if (pipe->multi_count < 2) /* Need DATA0 */
- USB_SET_FIELD32(
- CVMX_USBCX_HCTSIZX(channel,
- usb->index),
- cvmx_usbcx_hctsizx, pid, 0);
- else /* Need MDATA */
- USB_SET_FIELD32(
- CVMX_USBCX_HCTSIZX(channel,
- usb->index),
- cvmx_usbcx_hctsizx, pid, 3);
- }
- }
- break;
- }
- {
- union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 =
- cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel,
- usb->index))
- };
- transaction->xfersize = usbc_hctsiz.s.xfersize;
- transaction->pktcnt = usbc_hctsiz.s.pktcnt;
- }
- /* Remember when we start a split transaction */
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- usb->active_split = transaction;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, chena, 1);
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_fill_tx_fifo(usb, channel);
-}
-
-/**
- * Find a pipe that is ready to be scheduled to hardware.
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @xfer_type: Transfer type
- *
- * Returns: Pipe or NULL if none are ready
- */
-static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb,
- enum cvmx_usb_transfer xfer_type)
-{
- struct list_head *list = usb->active_pipes + xfer_type;
- u64 current_frame = usb->frame_number;
- struct cvmx_usb_pipe *pipe;
-
- list_for_each_entry(pipe, list, node) {
- struct cvmx_usb_transaction *t =
- list_first_entry(&pipe->transactions, typeof(*t),
- node);
- if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
- (pipe->next_tx_frame <= current_frame) &&
- ((pipe->split_sc_frame == -1) ||
- ((((int)current_frame - pipe->split_sc_frame) & 0x7f) <
- 0x40)) &&
- (!usb->active_split || (usb->active_split == t))) {
- prefetch(t);
- return pipe;
- }
- }
- return NULL;
-}
-
-static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb,
- int is_sof)
-{
- struct cvmx_usb_pipe *pipe;
-
- /* Find a pipe needing service. */
- if (is_sof) {
- /*
- * Only process periodic pipes on SOF interrupts. This way we
- * are sure that the periodic data is sent in the beginning of
- * the frame.
- */
- pipe = cvmx_usb_find_ready_pipe(usb,
- CVMX_USB_TRANSFER_ISOCHRONOUS);
- if (pipe)
- return pipe;
- pipe = cvmx_usb_find_ready_pipe(usb,
- CVMX_USB_TRANSFER_INTERRUPT);
- if (pipe)
- return pipe;
- }
- pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL);
- if (pipe)
- return pipe;
- return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK);
-}
-
-/**
- * Called whenever a pipe might need to be scheduled to the
- * hardware.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @is_sof: True if this schedule was called on a SOF interrupt.
- */
-static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof)
-{
- int channel;
- struct cvmx_usb_pipe *pipe;
- int need_sof;
- enum cvmx_usb_transfer ttype;
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /*
- * Without DMA we need to be careful to not schedule something
- * at the end of a frame and cause an overrun.
- */
- union cvmx_usbcx_hfnum hfnum = {
- .u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HFNUM(usb->index))
- };
-
- union cvmx_usbcx_hfir hfir = {
- .u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HFIR(usb->index))
- };
-
- if (hfnum.s.frrem < hfir.s.frint / 4)
- goto done;
- }
-
- while (usb->idle_hardware_channels) {
- /* Find an idle channel */
- channel = __fls(usb->idle_hardware_channels);
- if (unlikely(channel > 7))
- break;
-
- pipe = cvmx_usb_next_pipe(usb, is_sof);
- if (!pipe)
- break;
-
- cvmx_usb_start_channel(usb, channel, pipe);
- }
-
-done:
- /*
- * Only enable SOF interrupts when we have transactions pending in the
- * future that might need to be scheduled
- */
- need_sof = 0;
- for (ttype = CVMX_USB_TRANSFER_CONTROL;
- ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
- list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
- if (pipe->next_tx_frame > usb->frame_number) {
- need_sof = 1;
- break;
- }
- }
- }
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, sofmsk, need_sof);
-}
-
-static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb,
- enum cvmx_usb_status status,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction
- *transaction,
- int bytes_transferred,
- struct urb *urb)
-{
- struct usb_hcd *hcd = octeon_to_hcd(usb);
- struct device *dev = hcd->self.controller;
-
- if (likely(status == CVMX_USB_STATUS_OK))
- urb->actual_length = bytes_transferred;
- else
- urb->actual_length = 0;
-
- urb->hcpriv = NULL;
-
- /* For Isochronous transactions we need to update the URB packet status
- * list from data in our private copy
- */
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- int i;
- /*
- * The pointer to the private list is stored in the setup_packet
- * field.
- */
- struct cvmx_usb_iso_packet *iso_packet =
- (struct cvmx_usb_iso_packet *)urb->setup_packet;
- /* Recalculate the transfer size by adding up each packet */
- urb->actual_length = 0;
- for (i = 0; i < urb->number_of_packets; i++) {
- if (iso_packet[i].status == CVMX_USB_STATUS_OK) {
- urb->iso_frame_desc[i].status = 0;
- urb->iso_frame_desc[i].actual_length =
- iso_packet[i].length;
- urb->actual_length +=
- urb->iso_frame_desc[i].actual_length;
- } else {
- dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
- i, urb->number_of_packets,
- iso_packet[i].status, pipe,
- transaction, iso_packet[i].length);
- urb->iso_frame_desc[i].status = -EREMOTEIO;
- }
- }
- /* Free the private list now that we don't need it anymore */
- kfree(iso_packet);
- urb->setup_packet = NULL;
- }
-
- switch (status) {
- case CVMX_USB_STATUS_OK:
- urb->status = 0;
- break;
- case CVMX_USB_STATUS_CANCEL:
- if (urb->status == 0)
- urb->status = -ENOENT;
- break;
- case CVMX_USB_STATUS_STALL:
- dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EPIPE;
- break;
- case CVMX_USB_STATUS_BABBLEERR:
- dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EPIPE;
- break;
- case CVMX_USB_STATUS_SHORT:
- dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EREMOTEIO;
- break;
- case CVMX_USB_STATUS_ERROR:
- case CVMX_USB_STATUS_XACTERR:
- case CVMX_USB_STATUS_DATATGLERR:
- case CVMX_USB_STATUS_FRAMEERR:
- dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
- status, pipe, transaction, bytes_transferred);
- urb->status = -EPROTO;
- break;
- }
- usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb);
- spin_unlock(&usb->lock);
- usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status);
- spin_lock(&usb->lock);
-}
-
-/**
- * Signal the completion of a transaction and free it. The
- * transaction will be removed from the pipe transaction list.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe the transaction is on
- * @transaction:
- * Transaction that completed
- * @complete_code:
- * Completion code
- */
-static void cvmx_usb_complete(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- enum cvmx_usb_status complete_code)
-{
- /* If this was a split then clear our split in progress marker */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
-
- /*
- * Isochronous transactions need extra processing as they might not be
- * done after a single data transfer
- */
- if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /* Update the number of bytes transferred in this ISO packet */
- transaction->iso_packets[0].length = transaction->actual_bytes;
- transaction->iso_packets[0].status = complete_code;
-
- /*
- * If there are more ISOs pending and we succeeded, schedule the
- * next one
- */
- if ((transaction->iso_number_packets > 1) &&
- (complete_code == CVMX_USB_STATUS_OK)) {
- /* No bytes transferred for this packet as of yet */
- transaction->actual_bytes = 0;
- /* One less ISO waiting to transfer */
- transaction->iso_number_packets--;
- /* Increment to the next location in our packet array */
- transaction->iso_packets++;
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- return;
- }
- }
-
- /* Remove the transaction from the pipe list */
- list_del(&transaction->node);
- if (list_empty(&pipe->transactions))
- list_move_tail(&pipe->node, &usb->idle_pipes);
- octeon_usb_urb_complete_callback(usb, complete_code, pipe,
- transaction,
- transaction->actual_bytes,
- transaction->urb);
- kfree(transaction);
-}
-
-/**
- * Submit a usb transaction to a pipe. Called for all types
- * of transactions.
- *
- * @usb:
- * @pipe: Which pipe to submit to.
- * @type: Transaction type
- * @buffer: User buffer for the transaction
- * @buffer_length:
- * User buffer's length in bytes
- * @control_header:
- * For control transactions, the 8 byte standard header
- * @iso_start_frame:
- * For ISO transactions, the start frame
- * @iso_number_packets:
- * For ISO, the number of packet in the transaction.
- * @iso_packets:
- * A description of each ISO packet
- * @urb: URB for the callback
- *
- * Returns: Transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- enum cvmx_usb_transfer type,
- u64 buffer,
- int buffer_length,
- u64 control_header,
- int iso_start_frame,
- int iso_number_packets,
- struct cvmx_usb_iso_packet *iso_packets,
- struct urb *urb)
-{
- struct cvmx_usb_transaction *transaction;
-
- if (unlikely(pipe->transfer_type != type))
- return NULL;
-
- transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC);
- if (unlikely(!transaction))
- return NULL;
-
- transaction->type = type;
- transaction->buffer = buffer;
- transaction->buffer_length = buffer_length;
- transaction->control_header = control_header;
- /* FIXME: This is not used, implement it. */
- transaction->iso_start_frame = iso_start_frame;
- transaction->iso_number_packets = iso_number_packets;
- transaction->iso_packets = iso_packets;
- transaction->urb = urb;
- if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
- transaction->stage = CVMX_USB_STAGE_SETUP;
- else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-
- if (!list_empty(&pipe->transactions)) {
- list_add_tail(&transaction->node, &pipe->transactions);
- } else {
- list_add_tail(&transaction->node, &pipe->transactions);
- list_move_tail(&pipe->node,
- &usb->active_pipes[pipe->transfer_type]);
-
- /*
- * We may need to schedule the pipe if this was the head of the
- * pipe.
- */
- cvmx_usb_schedule(usb, 0);
- }
-
- return transaction;
-}
-
-/**
- * Call to submit a USB Bulk transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-/**
- * Call to submit a USB Interrupt transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB returned when the callback is called.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_INTERRUPT,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-/**
- * Call to submit a USB Control transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_control(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- int buffer_length = urb->transfer_buffer_length;
- u64 control_header = urb->setup_dma;
- struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header);
-
- if ((header->bRequestType & USB_DIR_IN) == 0)
- buffer_length = le16_to_cpu(header->wLength);
-
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_CONTROL,
- urb->transfer_dma, buffer_length,
- control_header,
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-/**
- * Call to submit a USB Isochronous transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB returned when the callback is called.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- struct cvmx_usb_iso_packet *packets;
-
- packets = (struct cvmx_usb_iso_packet *)urb->setup_packet;
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_ISOCHRONOUS,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- urb->start_frame,
- urb->number_of_packets,
- packets, urb);
-}
-
-/**
- * Cancel one outstanding request in a pipe. Canceling a request
- * can fail if the transaction has already completed before cancel
- * is called. Even after a successful cancel call, it may take
- * a frame or two for the cvmx_usb_poll() function to call the
- * associated callback.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to cancel requests in.
- * @transaction: Transaction to cancel, returned by the submit function.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_cancel(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction)
-{
- /*
- * If the transaction is the HEAD of the queue and scheduled. We need to
- * treat it special
- */
- if (list_first_entry(&pipe->transactions, typeof(*transaction), node) ==
- transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
- union cvmx_usbcx_hccharx usbc_hcchar;
-
- usb->pipe_for_channel[pipe->channel] = NULL;
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- CVMX_SYNCW;
-
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
- /*
- * If the channel isn't enabled then the transaction already
- * completed.
- */
- if (usbc_hcchar.s.chena) {
- usbc_hcchar.s.chdis = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(pipe->channel,
- usb->index),
- usbc_hcchar.u32);
- }
- }
- cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL);
- return 0;
-}
-
-/**
- * Cancel all outstanding requests in a pipe. Logically all this
- * does is call cvmx_usb_cancel() in a loop.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to cancel requests in.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_cancel_all(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe)
-{
- struct cvmx_usb_transaction *transaction, *next;
-
- /* Simply loop through and attempt to cancel each transaction */
- list_for_each_entry_safe(transaction, next, &pipe->transactions, node) {
- int result = cvmx_usb_cancel(usb, pipe, transaction);
-
- if (unlikely(result != 0))
- return result;
- }
- return 0;
-}
-
-/**
- * Close a pipe created with cvmx_usb_open_pipe().
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to close.
- *
- * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
- * outstanding transfers.
- */
-static int cvmx_usb_close_pipe(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe)
-{
- /* Fail if the pipe has pending transactions */
- if (!list_empty(&pipe->transactions))
- return -EBUSY;
-
- list_del(&pipe->node);
- kfree(pipe);
-
- return 0;
-}
-
-/**
- * Get the current USB protocol level frame number. The frame
- * number is always in the range of 0-0x7ff.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: USB frame number
- */
-static int cvmx_usb_get_frame_number(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_hfnum usbc_hfnum;
-
- usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
-
- return usbc_hfnum.s.frnum;
-}
-
-static void cvmx_usb_transfer_control(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- union cvmx_usbcx_hccharx usbc_hcchar,
- int buffer_space_left,
- int bytes_in_last_packet)
-{
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- /* This should be impossible */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_ERROR);
- break;
- case CVMX_USB_STAGE_SETUP:
- pipe->pid_toggle = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage =
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
- } else {
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->wLength)
- transaction->stage = CVMX_USB_STAGE_DATA;
- else
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- {
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->wLength)
- transaction->stage = CVMX_USB_STAGE_DATA;
- else
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA:
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
- /*
- * For setup OUT data that are splits,
- * the hardware doesn't appear to count
- * transferred data. Here we manually
- * update the data transferred
- */
- if (!usbc_hcchar.s.epdir) {
- if (buffer_space_left < pipe->max_packet)
- transaction->actual_bytes +=
- buffer_space_left;
- else
- transaction->actual_bytes +=
- pipe->max_packet;
- }
- } else if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
- } else {
- transaction->stage = CVMX_USB_STAGE_DATA;
- }
- break;
- case CVMX_USB_STAGE_STATUS:
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage =
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
- else
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
- break;
- }
-}
-
-static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- union cvmx_usbcx_hcintx usbc_hcint,
- int buffer_space_left,
- int bytes_in_last_packet)
-{
- /*
- * The only time a bulk transfer isn't complete when it finishes with
- * an ACK is during a split transaction. For splits we need to continue
- * the transfer if more data is needed.
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- else if (buffer_space_left &&
- (bytes_in_last_packet == pipe->max_packet))
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- else
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- } else {
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (usbc_hcint.s.nak))
- pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
- if (!buffer_space_left ||
- (bytes_in_last_packet < pipe->max_packet))
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- }
-}
-
-static void cvmx_usb_transfer_intr(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- int buffer_space_left,
- int bytes_in_last_packet)
-{
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) {
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- } else if (buffer_space_left &&
- (bytes_in_last_packet == pipe->max_packet)) {
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- } else {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- }
- } else if (!buffer_space_left ||
- (bytes_in_last_packet < pipe->max_packet)) {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
- }
-}
-
-static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- int buffer_space_left,
- int bytes_in_last_packet,
- int bytes_this_transfer)
-{
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISOCHRONOUS OUT splits don't require a complete split stage.
- * Instead they use a sequence of begin OUT splits to transfer
- * the data 188 bytes at a time. Once the transfer is complete,
- * the pipe sleeps until the next schedule interval.
- */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- /*
- * If no space left or this wasn't a max size packet
- * then this transfer is complete. Otherwise start it
- * again to send the next 188 bytes
- */
- if (!buffer_space_left || (bytes_this_transfer < 188)) {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- }
- return;
- }
- if (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
- /*
- * We are in the incoming data phase. Keep getting data
- * until we run out of space or get a small packet
- */
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet)) {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- }
- } else {
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- }
- } else {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
- }
-}
-
-/**
- * Poll a channel for status
- *
- * @usb: USB device
- * @channel: Channel to poll
- *
- * Returns: Zero on success
- */
-static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel)
-{
- struct usb_hcd *hcd = octeon_to_hcd(usb);
- struct device *dev = hcd->self.controller;
- union cvmx_usbcx_hcintx usbc_hcint;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
- union cvmx_usbcx_hccharx usbc_hcchar;
- struct cvmx_usb_pipe *pipe;
- struct cvmx_usb_transaction *transaction;
- int bytes_this_transfer;
- int bytes_in_last_packet;
- int packets_processed;
- int buffer_space_left;
-
- /* Read the interrupt status bits for the channel */
- usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index));
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
-
- if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
- /*
- * There seems to be a bug in CN31XX which can cause
- * interrupt IN transfers to get stuck until we do a
- * write of HCCHARX without changing things
- */
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel,
- usb->index),
- usbc_hcchar.u32);
- return 0;
- }
-
- /*
- * In non DMA mode the channels don't halt themselves. We need
- * to manually disable channels that are left running
- */
- if (!usbc_hcint.s.chhltd) {
- if (usbc_hcchar.s.chena) {
- union cvmx_usbcx_hcintmskx hcintmsk;
- /* Disable all interrupts except CHHLTD */
- hcintmsk.u32 = 0;
- hcintmsk.s.chhltdmsk = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- hcintmsk.u32);
- usbc_hcchar.s.chdis = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index),
- usbc_hcchar.u32);
- return 0;
- } else if (usbc_hcint.s.xfercompl) {
- /*
- * Successful IN/OUT with transfer complete.
- * Channel halt isn't needed.
- */
- } else {
- dev_err(dev, "USB%d: Channel %d interrupt without halt\n",
- usb->index, channel);
- return 0;
- }
- }
- } else {
- /*
- * There is are no interrupts that we need to process when the
- * channel is still running
- */
- if (!usbc_hcint.s.chhltd)
- return 0;
- }
-
- /* Disable the channel interrupts now that it is done */
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
- usb->idle_hardware_channels |= (1 << channel);
-
- /* Make sure this channel is tied to a valid pipe */
- pipe = usb->pipe_for_channel[channel];
- prefetch(pipe);
- if (!pipe)
- return 0;
- transaction = list_first_entry(&pipe->transactions,
- typeof(*transaction),
- node);
- prefetch(transaction);
-
- /*
- * Disconnect this pipe from the HW channel. Later the schedule
- * function will figure out which pipe needs to go
- */
- usb->pipe_for_channel[channel] = NULL;
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /*
- * Read the channel config info so we can figure out how much data
- * transferred
- */
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- /*
- * Calculating the number of bytes successfully transferred is dependent
- * on the transfer direction
- */
- packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
- if (usbc_hcchar.s.epdir) {
- /*
- * IN transactions are easy. For every byte received the
- * hardware decrements xfersize. All we need to do is subtract
- * the current value of xfersize from its starting value and we
- * know how many bytes were written to the buffer
- */
- bytes_this_transfer = transaction->xfersize -
- usbc_hctsiz.s.xfersize;
- } else {
- /*
- * OUT transaction don't decrement xfersize. Instead pktcnt is
- * decremented on every successful packet send. The hardware
- * does this when it receives an ACK, or NYET. If it doesn't
- * receive one of these responses pktcnt doesn't change
- */
- bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
- /*
- * The last packet may not be a full transfer if we didn't have
- * enough data
- */
- if (bytes_this_transfer > transaction->xfersize)
- bytes_this_transfer = transaction->xfersize;
- }
- /* Figure out how many bytes were in the last packet of the transfer */
- if (packets_processed)
- bytes_in_last_packet = bytes_this_transfer -
- (packets_processed - 1) * usbc_hcchar.s.mps;
- else
- bytes_in_last_packet = bytes_this_transfer;
-
- /*
- * As a special case, setup transactions output the setup header, not
- * the user's data. For this reason we don't count setup data as bytes
- * transferred
- */
- if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
- (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
- bytes_this_transfer = 0;
-
- /*
- * Add the bytes transferred to the running total. It is important that
- * bytes_this_transfer doesn't count any data that needs to be
- * retransmitted
- */
- transaction->actual_bytes += bytes_this_transfer;
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- buffer_space_left = transaction->iso_packets[0].length -
- transaction->actual_bytes;
- else
- buffer_space_left = transaction->buffer_length -
- transaction->actual_bytes;
-
- /*
- * We need to remember the PID toggle state for the next transaction.
- * The hardware already updated it for the next transaction
- */
- pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
-
- /*
- * For high speed bulk out, assume the next transaction will need to do
- * a ping before proceeding. If this isn't true the ACK processing below
- * will clear this flag
- */
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
- pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- if (WARN_ON_ONCE(bytes_this_transfer < 0)) {
- /*
- * In some rare cases the DMA engine seems to get stuck and
- * keeps substracting same byte count over and over again. In
- * such case we just need to fail every transaction.
- */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_ERROR);
- return 0;
- }
-
- if (usbc_hcint.s.stall) {
- /*
- * STALL as a response means this transaction cannot be
- * completed because the device can't process transactions. Tell
- * the user. Any data that was transferred will be counted on
- * the actual bytes transferred
- */
- pipe->pid_toggle = 0;
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_STALL);
- } else if (usbc_hcint.s.xacterr) {
- /*
- * XactErr as a response means the device signaled
- * something wrong with the transfer. For example, PID
- * toggle errors cause these.
- */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_XACTERR);
- } else if (usbc_hcint.s.bblerr) {
- /* Babble Error (BblErr) */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_BABBLEERR);
- } else if (usbc_hcint.s.datatglerr) {
- /* Data toggle error */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_DATATGLERR);
- } else if (usbc_hcint.s.nyet) {
- /*
- * NYET as a response is only allowed in three cases: as a
- * response to a ping, as a response to a split transaction, and
- * as a response to a bulk out. The ping case is handled by
- * hardware, so we only have splits and bulk out
- */
- if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->retries = 0;
- /*
- * If there is more data to go then we need to try
- * again. Otherwise this transaction is complete
- */
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet))
- cvmx_usb_complete(usb, pipe,
- transaction,
- CVMX_USB_STATUS_OK);
- } else {
- /*
- * Split transactions retry the split complete 4 times
- * then rewind to the start split and do the entire
- * transactions again
- */
- transaction->retries++;
- if ((transaction->retries & 0x3) == 0) {
- /*
- * Rewind to the beginning of the transaction by
- * anding off the split complete bit
- */
- transaction->stage &= ~1;
- pipe->split_sc_frame = -1;
- }
- }
- } else if (usbc_hcint.s.ack) {
- transaction->retries = 0;
- /*
- * The ACK bit can only be checked after the other error bits.
- * This is because a multi packet transfer may succeed in a
- * number of packets and then get a different response on the
- * last packet. In this case both ACK and the last response bit
- * will be set. If none of the other response bits is set, then
- * the last packet must have been an ACK
- *
- * Since we got an ACK, we know we don't need to do a ping on
- * this pipe
- */
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- cvmx_usb_transfer_control(usb, pipe, transaction,
- usbc_hcchar,
- buffer_space_left,
- bytes_in_last_packet);
- break;
- case CVMX_USB_TRANSFER_BULK:
- cvmx_usb_transfer_bulk(usb, pipe, transaction,
- usbc_hcint, buffer_space_left,
- bytes_in_last_packet);
- break;
- case CVMX_USB_TRANSFER_INTERRUPT:
- cvmx_usb_transfer_intr(usb, pipe, transaction,
- buffer_space_left,
- bytes_in_last_packet);
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- cvmx_usb_transfer_isoc(usb, pipe, transaction,
- buffer_space_left,
- bytes_in_last_packet,
- bytes_this_transfer);
- break;
- }
- } else if (usbc_hcint.s.nak) {
- /*
- * If this was a split then clear our split in progress marker.
- */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
- /*
- * NAK as a response means the device couldn't accept the
- * transaction, but it should be retried in the future. Rewind
- * to the beginning of the transaction by anding off the split
- * complete bit. Retry in the next interval
- */
- transaction->retries = 0;
- transaction->stage &= ~1;
- pipe->next_tx_frame += pipe->interval;
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number +
- pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) %
- pipe->interval;
- } else {
- struct cvmx_usb_port_status port;
-
- port = cvmx_usb_get_status(usb);
- if (port.port_enabled) {
- /* We'll retry the exact same transaction again */
- transaction->retries++;
- } else {
- /*
- * We get channel halted interrupts with no result bits
- * sets when the cable is unplugged
- */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_ERROR);
- }
- }
- return 0;
-}
-
-static void octeon_usb_port_callback(struct octeon_hcd *usb)
-{
- spin_unlock(&usb->lock);
- usb_hcd_poll_rh_status(octeon_to_hcd(usb));
- spin_lock(&usb->lock);
-}
-
-/**
- * Poll the USB block for status and call all needed callback
- * handlers. This function is meant to be called in the interrupt
- * handler for the USB controller. It can also be called
- * periodically in a loop for non-interrupt based operation.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_poll(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_hfnum usbc_hfnum;
- union cvmx_usbcx_gintsts usbc_gintsts;
-
- prefetch_range(usb, sizeof(*usb));
-
- /* Update the frame counter */
- usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum)
- usb->frame_number += 0x4000;
- usb->frame_number &= ~0x3fffull;
- usb->frame_number |= usbc_hfnum.s.frnum;
-
- /* Read the pending interrupts */
- usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTSTS(usb->index));
-
- /* Clear the interrupts now that we know about them */
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
- usbc_gintsts.u32);
-
- if (usbc_gintsts.s.rxflvl) {
- /*
- * RxFIFO Non-Empty (RxFLvl)
- * Indicates that there is at least one packet pending to be
- * read from the RxFIFO.
- *
- * In DMA mode this is handled by hardware
- */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_poll_rx_fifo(usb);
- }
- if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
- /* Fill the Tx FIFOs when not in DMA mode */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_poll_tx_fifo(usb);
- }
- if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
- union cvmx_usbcx_hprt usbc_hprt;
- /*
- * Disconnect Detected Interrupt (DisconnInt)
- * Asserted when a device disconnect is detected.
- *
- * Host Port Interrupt (PrtInt)
- * The core sets this bit to indicate a change in port status of
- * one of the O2P USB core ports in Host mode. The application
- * must read the Host Port Control and Status (HPRT) register to
- * determine the exact event that caused this interrupt. The
- * application must clear the appropriate status bit in the Host
- * Port Control and Status register to clear this bit.
- *
- * Call the user's port callback
- */
- octeon_usb_port_callback(usb);
- /* Clear the port change bits */
- usbc_hprt.u32 =
- cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- usbc_hprt.s.prtena = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
- usbc_hprt.u32);
- }
- if (usbc_gintsts.s.hchint) {
- /*
- * Host Channels Interrupt (HChInt)
- * The core sets this bit to indicate that an interrupt is
- * pending on one of the channels of the core (in Host mode).
- * The application must read the Host All Channels Interrupt
- * (HAINT) register to determine the exact number of the channel
- * on which the interrupt occurred, and then read the
- * corresponding Host Channel-n Interrupt (HCINTn) register to
- * determine the exact cause of the interrupt. The application
- * must clear the appropriate status bit in the HCINTn register
- * to clear this bit.
- */
- union cvmx_usbcx_haint usbc_haint;
-
- usbc_haint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HAINT(usb->index));
- while (usbc_haint.u32) {
- int channel;
-
- channel = __fls(usbc_haint.u32);
- cvmx_usb_poll_channel(usb, channel);
- usbc_haint.u32 ^= 1 << channel;
- }
- }
-
- cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
-
- return 0;
-}
-
-/* convert between an HCD pointer and the corresponding struct octeon_hcd */
-static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
-{
- return (struct octeon_hcd *)(hcd->hcd_priv);
-}
-
-static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- unsigned long flags;
-
- spin_lock_irqsave(&usb->lock, flags);
- cvmx_usb_poll(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- return IRQ_HANDLED;
-}
-
-static int octeon_usb_start(struct usb_hcd *hcd)
-{
- hcd->state = HC_STATE_RUNNING;
- return 0;
-}
-
-static void octeon_usb_stop(struct usb_hcd *hcd)
-{
- hcd->state = HC_STATE_HALT;
-}
-
-static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
-
- return cvmx_usb_get_frame_number(usb);
-}
-
-static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
- struct urb *urb,
- gfp_t mem_flags)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_transaction *transaction = NULL;
- struct cvmx_usb_pipe *pipe;
- unsigned long flags;
- struct cvmx_usb_iso_packet *iso_packet;
- struct usb_host_endpoint *ep = urb->ep;
- int rc;
-
- urb->status = 0;
- spin_lock_irqsave(&usb->lock, flags);
-
- rc = usb_hcd_link_urb_to_ep(hcd, urb);
- if (rc) {
- spin_unlock_irqrestore(&usb->lock, flags);
- return rc;
- }
-
- if (!ep->hcpriv) {
- enum cvmx_usb_transfer transfer_type;
- enum cvmx_usb_speed speed;
- int split_device = 0;
- int split_port = 0;
-
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_ISOCHRONOUS:
- transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS;
- break;
- case PIPE_INTERRUPT:
- transfer_type = CVMX_USB_TRANSFER_INTERRUPT;
- break;
- case PIPE_CONTROL:
- transfer_type = CVMX_USB_TRANSFER_CONTROL;
- break;
- default:
- transfer_type = CVMX_USB_TRANSFER_BULK;
- break;
- }
- switch (urb->dev->speed) {
- case USB_SPEED_LOW:
- speed = CVMX_USB_SPEED_LOW;
- break;
- case USB_SPEED_FULL:
- speed = CVMX_USB_SPEED_FULL;
- break;
- default:
- speed = CVMX_USB_SPEED_HIGH;
- break;
- }
- /*
- * For slow devices on high speed ports we need to find the hub
- * that does the speed translation so we know where to send the
- * split transactions.
- */
- if (speed != CVMX_USB_SPEED_HIGH) {
- /*
- * Start at this device and work our way up the usb
- * tree.
- */
- struct usb_device *dev = urb->dev;
-
- while (dev->parent) {
- /*
- * If our parent is high speed then he'll
- * receive the splits.
- */
- if (dev->parent->speed == USB_SPEED_HIGH) {
- split_device = dev->parent->devnum;
- split_port = dev->portnum;
- break;
- }
- /*
- * Move up the tree one level. If we make it all
- * the way up the tree, then the port must not
- * be in high speed mode and we don't need a
- * split.
- */
- dev = dev->parent;
- }
- }
- pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe), speed,
- le16_to_cpu(ep->desc.wMaxPacketSize)
- & 0x7ff,
- transfer_type,
- usb_pipein(urb->pipe) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT,
- urb->interval,
- (le16_to_cpu(ep->desc.wMaxPacketSize)
- >> 11) & 0x3,
- split_device, split_port);
- if (!pipe) {
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&usb->lock, flags);
- dev_dbg(dev, "Failed to create pipe\n");
- return -ENOMEM;
- }
- ep->hcpriv = pipe;
- } else {
- pipe = ep->hcpriv;
- }
-
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_ISOCHRONOUS:
- dev_dbg(dev, "Submit isochronous to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- /*
- * Allocate a structure to use for our private list of
- * isochronous packets.
- */
- iso_packet = kmalloc_array(urb->number_of_packets,
- sizeof(struct cvmx_usb_iso_packet),
- GFP_ATOMIC);
- if (iso_packet) {
- int i;
- /* Fill the list with the data from the URB */
- for (i = 0; i < urb->number_of_packets; i++) {
- iso_packet[i].offset =
- urb->iso_frame_desc[i].offset;
- iso_packet[i].length =
- urb->iso_frame_desc[i].length;
- iso_packet[i].status = CVMX_USB_STATUS_ERROR;
- }
- /*
- * Store a pointer to the list in the URB setup_packet
- * field. We know this currently isn't being used and
- * this saves us a bunch of logic.
- */
- urb->setup_packet = (char *)iso_packet;
- transaction = cvmx_usb_submit_isochronous(usb,
- pipe, urb);
- /*
- * If submit failed we need to free our private packet
- * list.
- */
- if (!transaction) {
- urb->setup_packet = NULL;
- kfree(iso_packet);
- }
- }
- break;
- case PIPE_INTERRUPT:
- dev_dbg(dev, "Submit interrupt to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_interrupt(usb, pipe, urb);
- break;
- case PIPE_CONTROL:
- dev_dbg(dev, "Submit control to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_control(usb, pipe, urb);
- break;
- case PIPE_BULK:
- dev_dbg(dev, "Submit bulk to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_bulk(usb, pipe, urb);
- break;
- }
- if (!transaction) {
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&usb->lock, flags);
- dev_dbg(dev, "Failed to submit\n");
- return -ENOMEM;
- }
- urb->hcpriv = transaction;
- spin_unlock_irqrestore(&usb->lock, flags);
- return 0;
-}
-
-static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
- struct urb *urb,
- int status)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- unsigned long flags;
- int rc;
-
- if (!urb->dev)
- return -EINVAL;
-
- spin_lock_irqsave(&usb->lock, flags);
-
- rc = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (rc)
- goto out;
-
- urb->status = status;
- cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv);
-
-out:
- spin_unlock_irqrestore(&usb->lock, flags);
-
- return rc;
-}
-
-static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep)
-{
- struct device *dev = hcd->self.controller;
-
- if (ep->hcpriv) {
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- struct cvmx_usb_pipe *pipe = ep->hcpriv;
- unsigned long flags;
-
- spin_lock_irqsave(&usb->lock, flags);
- cvmx_usb_cancel_all(usb, pipe);
- if (cvmx_usb_close_pipe(usb, pipe))
- dev_dbg(dev, "Closing pipe %p failed\n", pipe);
- spin_unlock_irqrestore(&usb->lock, flags);
- ep->hcpriv = NULL;
- }
-}
-
-static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- struct cvmx_usb_port_status port_status;
- unsigned long flags;
-
- spin_lock_irqsave(&usb->lock, flags);
- port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- buf[0] = port_status.connect_change << 1;
-
- return buf[0] != 0;
-}
-
-static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
- u16 wIndex, char *buf, u16 wLength)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_port_status usb_port_status;
- int port_status;
- struct usb_hub_descriptor *desc;
- unsigned long flags;
-
- switch (typeReq) {
- case ClearHubFeature:
- dev_dbg(dev, "ClearHubFeature\n");
- switch (wValue) {
- case C_HUB_LOCAL_POWER:
- case C_HUB_OVER_CURRENT:
- /* Nothing required here */
- break;
- default:
- return -EINVAL;
- }
- break;
- case ClearPortFeature:
- dev_dbg(dev, "ClearPortFeature\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- switch (wValue) {
- case USB_PORT_FEAT_ENABLE:
- dev_dbg(dev, " ENABLE\n");
- spin_lock_irqsave(&usb->lock, flags);
- cvmx_usb_disable(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- case USB_PORT_FEAT_SUSPEND:
- dev_dbg(dev, " SUSPEND\n");
- /* Not supported on Octeon */
- break;
- case USB_PORT_FEAT_POWER:
- dev_dbg(dev, " POWER\n");
- /* Not supported on Octeon */
- break;
- case USB_PORT_FEAT_INDICATOR:
- dev_dbg(dev, " INDICATOR\n");
- /* Port inidicator not supported */
- break;
- case USB_PORT_FEAT_C_CONNECTION:
- dev_dbg(dev, " C_CONNECTION\n");
- /* Clears drivers internal connect status change flag */
- spin_lock_irqsave(&usb->lock, flags);
- usb->port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- case USB_PORT_FEAT_C_RESET:
- dev_dbg(dev, " C_RESET\n");
- /*
- * Clears the driver's internal Port Reset Change flag.
- */
- spin_lock_irqsave(&usb->lock, flags);
- usb->port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- case USB_PORT_FEAT_C_ENABLE:
- dev_dbg(dev, " C_ENABLE\n");
- /*
- * Clears the driver's internal Port Enable/Disable
- * Change flag.
- */
- spin_lock_irqsave(&usb->lock, flags);
- usb->port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- case USB_PORT_FEAT_C_SUSPEND:
- dev_dbg(dev, " C_SUSPEND\n");
- /*
- * Clears the driver's internal Port Suspend Change
- * flag, which is set when resume signaling on the host
- * port is complete.
- */
- break;
- case USB_PORT_FEAT_C_OVER_CURRENT:
- dev_dbg(dev, " C_OVER_CURRENT\n");
- /* Clears the driver's overcurrent Change flag */
- spin_lock_irqsave(&usb->lock, flags);
- usb->port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- default:
- dev_dbg(dev, " UNKNOWN\n");
- return -EINVAL;
- }
- break;
- case GetHubDescriptor:
- dev_dbg(dev, "GetHubDescriptor\n");
- desc = (struct usb_hub_descriptor *)buf;
- desc->bDescLength = 9;
- desc->bDescriptorType = 0x29;
- desc->bNbrPorts = 1;
- desc->wHubCharacteristics = cpu_to_le16(0x08);
- desc->bPwrOn2PwrGood = 1;
- desc->bHubContrCurrent = 0;
- desc->u.hs.DeviceRemovable[0] = 0;
- desc->u.hs.DeviceRemovable[1] = 0xff;
- break;
- case GetHubStatus:
- dev_dbg(dev, "GetHubStatus\n");
- *(__le32 *)buf = 0;
- break;
- case GetPortStatus:
- dev_dbg(dev, "GetPortStatus\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&usb->lock, flags);
- usb_port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- port_status = 0;
-
- if (usb_port_status.connect_change) {
- port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
- dev_dbg(dev, " C_CONNECTION\n");
- }
-
- if (usb_port_status.port_enabled) {
- port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
- dev_dbg(dev, " C_ENABLE\n");
- }
-
- if (usb_port_status.connected) {
- port_status |= (1 << USB_PORT_FEAT_CONNECTION);
- dev_dbg(dev, " CONNECTION\n");
- }
-
- if (usb_port_status.port_enabled) {
- port_status |= (1 << USB_PORT_FEAT_ENABLE);
- dev_dbg(dev, " ENABLE\n");
- }
-
- if (usb_port_status.port_over_current) {
- port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
- dev_dbg(dev, " OVER_CURRENT\n");
- }
-
- if (usb_port_status.port_powered) {
- port_status |= (1 << USB_PORT_FEAT_POWER);
- dev_dbg(dev, " POWER\n");
- }
-
- if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) {
- port_status |= USB_PORT_STAT_HIGH_SPEED;
- dev_dbg(dev, " HIGHSPEED\n");
- } else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) {
- port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
- dev_dbg(dev, " LOWSPEED\n");
- }
-
- *((__le32 *)buf) = cpu_to_le32(port_status);
- break;
- case SetHubFeature:
- dev_dbg(dev, "SetHubFeature\n");
- /* No HUB features supported */
- break;
- case SetPortFeature:
- dev_dbg(dev, "SetPortFeature\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- switch (wValue) {
- case USB_PORT_FEAT_SUSPEND:
- dev_dbg(dev, " SUSPEND\n");
- return -EINVAL;
- case USB_PORT_FEAT_POWER:
- dev_dbg(dev, " POWER\n");
- /*
- * Program the port power bit to drive VBUS on the USB.
- */
- spin_lock_irqsave(&usb->lock, flags);
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
- cvmx_usbcx_hprt, prtpwr, 1);
- spin_unlock_irqrestore(&usb->lock, flags);
- return 0;
- case USB_PORT_FEAT_RESET:
- dev_dbg(dev, " RESET\n");
- spin_lock_irqsave(&usb->lock, flags);
- cvmx_usb_reset_port(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- return 0;
- case USB_PORT_FEAT_INDICATOR:
- dev_dbg(dev, " INDICATOR\n");
- /* Not supported */
- break;
- default:
- dev_dbg(dev, " UNKNOWN\n");
- return -EINVAL;
- }
- break;
- default:
- dev_dbg(dev, "Unknown root hub request\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct hc_driver octeon_hc_driver = {
- .description = "Octeon USB",
- .product_desc = "Octeon Host Controller",
- .hcd_priv_size = sizeof(struct octeon_hcd),
- .irq = octeon_usb_irq,
- .flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
- .start = octeon_usb_start,
- .stop = octeon_usb_stop,
- .urb_enqueue = octeon_usb_urb_enqueue,
- .urb_dequeue = octeon_usb_urb_dequeue,
- .endpoint_disable = octeon_usb_endpoint_disable,
- .get_frame_number = octeon_usb_get_frame_number,
- .hub_status_data = octeon_usb_hub_status_data,
- .hub_control = octeon_usb_hub_control,
- .map_urb_for_dma = octeon_map_urb_for_dma,
- .unmap_urb_for_dma = octeon_unmap_urb_for_dma,
-};
-
-static int octeon_usb_probe(struct platform_device *pdev)
-{
- int status;
- int initialize_flags;
- int usb_num;
- struct resource *res_mem;
- struct device_node *usbn_node;
- int irq = platform_get_irq(pdev, 0);
- struct device *dev = &pdev->dev;
- struct octeon_hcd *usb;
- struct usb_hcd *hcd;
- u32 clock_rate = 48000000;
- bool is_crystal_clock = false;
- const char *clock_type;
- int i;
-
- if (!dev->of_node) {
- dev_err(dev, "Error: empty of_node\n");
- return -ENXIO;
- }
- usbn_node = dev->of_node->parent;
-
- i = of_property_read_u32(usbn_node,
- "clock-frequency", &clock_rate);
- if (i)
- i = of_property_read_u32(usbn_node,
- "refclk-frequency", &clock_rate);
- if (i) {
- dev_err(dev, "No USBN \"clock-frequency\"\n");
- return -ENXIO;
- }
- switch (clock_rate) {
- case 12000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case 24000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case 48000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n",
- clock_rate);
- return -ENXIO;
- }
-
- i = of_property_read_string(usbn_node,
- "cavium,refclk-type", &clock_type);
- if (i)
- i = of_property_read_string(usbn_node,
- "refclk-type", &clock_type);
-
- if (!i && strcmp("crystal", clock_type) == 0)
- is_crystal_clock = true;
-
- if (is_crystal_clock)
- initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- else
- initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(dev, "found no memory resource\n");
- return -ENXIO;
- }
- usb_num = (res_mem->start >> 44) & 1;
-
- if (irq < 0) {
- /* Defective device tree, but we know how to fix it. */
- irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
-
- irq = irq_create_mapping(NULL, hwirq);
- }
-
- /*
- * Set the DMA mask to 64bits so we get buffers already translated for
- * DMA.
- */
- i = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (i)
- return i;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
- if (!hcd) {
- dev_dbg(dev, "Failed to allocate memory for HCD\n");
- return -1;
- }
- hcd->uses_new_polling = 1;
- usb = (struct octeon_hcd *)hcd->hcd_priv;
-
- spin_lock_init(&usb->lock);
-
- usb->init_flags = initialize_flags;
-
- /* Initialize the USB state structure */
- usb->index = usb_num;
- INIT_LIST_HEAD(&usb->idle_pipes);
- for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++)
- INIT_LIST_HEAD(&usb->active_pipes[i]);
-
- /* Due to an errata, CN31XX doesn't support DMA */
- if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
- usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
- /* Only use one channel with non DMA */
- usb->idle_hardware_channels = 0x1;
- } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
- /* CN5XXX have an errata with channel 3 */
- usb->idle_hardware_channels = 0xf7;
- } else {
- usb->idle_hardware_channels = 0xff;
- }
-
- status = cvmx_usb_initialize(dev, usb);
- if (status) {
- dev_dbg(dev, "USB initialization failed with %d\n", status);
- usb_put_hcd(hcd);
- return -1;
- }
-
- status = usb_add_hcd(hcd, irq, 0);
- if (status) {
- dev_dbg(dev, "USB add HCD failed with %d\n", status);
- usb_put_hcd(hcd);
- return -1;
- }
- device_wakeup_enable(hcd->self.controller);
-
- dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
-
- return 0;
-}
-
-static int octeon_usb_remove(struct platform_device *pdev)
-{
- int status;
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- unsigned long flags;
-
- usb_remove_hcd(hcd);
- spin_lock_irqsave(&usb->lock, flags);
- status = cvmx_usb_shutdown(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- if (status)
- dev_dbg(dev, "USB shutdown failed with %d\n", status);
-
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static const struct of_device_id octeon_usb_match[] = {
- {
- .compatible = "cavium,octeon-5750-usbc",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, octeon_usb_match);
-
-static struct platform_driver octeon_usb_driver = {
- .driver = {
- .name = "octeon-hcd",
- .of_match_table = octeon_usb_match,
- },
- .probe = octeon_usb_probe,
- .remove = octeon_usb_remove,
-};
-
-static int __init octeon_usb_driver_init(void)
-{
- if (usb_disabled())
- return 0;
-
- return platform_driver_register(&octeon_usb_driver);
-}
-module_init(octeon_usb_driver_init);
-
-static void __exit octeon_usb_driver_exit(void)
-{
- if (usb_disabled())
- return;
-
- platform_driver_unregister(&octeon_usb_driver);
-}
-module_exit(octeon_usb_driver_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
-MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
deleted file mode 100644
index 9ed619c93a4e..000000000000
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ /dev/null
@@ -1,1847 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Octeon HCD hardware register definitions.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Some parts of the code were originally released under BSD license:
- *
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- */
-
-#ifndef __OCTEON_HCD_H__
-#define __OCTEON_HCD_H__
-
-#include <asm/bitfield.h>
-
-#define CVMX_USBCXBASE 0x00016F0010000000ull
-#define CVMX_USBCXREG1(reg, bid) \
- (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
- ((bid) & 1) * 0x100000000000ull)
-#define CVMX_USBCXREG2(reg, bid, off) \
- (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
- (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32)
-
-#define CVMX_USBCX_GAHBCFG(bid) CVMX_USBCXREG1(0x008, bid)
-#define CVMX_USBCX_GHWCFG3(bid) CVMX_USBCXREG1(0x04c, bid)
-#define CVMX_USBCX_GINTMSK(bid) CVMX_USBCXREG1(0x018, bid)
-#define CVMX_USBCX_GINTSTS(bid) CVMX_USBCXREG1(0x014, bid)
-#define CVMX_USBCX_GNPTXFSIZ(bid) CVMX_USBCXREG1(0x028, bid)
-#define CVMX_USBCX_GNPTXSTS(bid) CVMX_USBCXREG1(0x02c, bid)
-#define CVMX_USBCX_GOTGCTL(bid) CVMX_USBCXREG1(0x000, bid)
-#define CVMX_USBCX_GRSTCTL(bid) CVMX_USBCXREG1(0x010, bid)
-#define CVMX_USBCX_GRXFSIZ(bid) CVMX_USBCXREG1(0x024, bid)
-#define CVMX_USBCX_GRXSTSPH(bid) CVMX_USBCXREG1(0x020, bid)
-#define CVMX_USBCX_GUSBCFG(bid) CVMX_USBCXREG1(0x00c, bid)
-#define CVMX_USBCX_HAINT(bid) CVMX_USBCXREG1(0x414, bid)
-#define CVMX_USBCX_HAINTMSK(bid) CVMX_USBCXREG1(0x418, bid)
-#define CVMX_USBCX_HCCHARX(off, bid) CVMX_USBCXREG2(0x500, bid, off)
-#define CVMX_USBCX_HCFG(bid) CVMX_USBCXREG1(0x400, bid)
-#define CVMX_USBCX_HCINTMSKX(off, bid) CVMX_USBCXREG2(0x50c, bid, off)
-#define CVMX_USBCX_HCINTX(off, bid) CVMX_USBCXREG2(0x508, bid, off)
-#define CVMX_USBCX_HCSPLTX(off, bid) CVMX_USBCXREG2(0x504, bid, off)
-#define CVMX_USBCX_HCTSIZX(off, bid) CVMX_USBCXREG2(0x510, bid, off)
-#define CVMX_USBCX_HFIR(bid) CVMX_USBCXREG1(0x404, bid)
-#define CVMX_USBCX_HFNUM(bid) CVMX_USBCXREG1(0x408, bid)
-#define CVMX_USBCX_HPRT(bid) CVMX_USBCXREG1(0x440, bid)
-#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid)
-#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid)
-
-#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull)
-#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull)
-
-#define CVMX_USBNXREG1(reg, bid) \
- (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid))
-#define CVMX_USBNXREG2(reg, bid) \
- (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid))
-
-#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid)
-#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid)
-#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid)
-#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid)
-
-/**
- * cvmx_usbc#_gahbcfg
- *
- * Core AHB Configuration Register (GAHBCFG)
- *
- * This register can be used to configure the core after power-on or a change in
- * mode of operation. This register mainly contains AHB system-related
- * configuration parameters. The AHB is the processor interface to the O2P USB
- * core. In general, software need not know about this interface except to
- * program the values as specified.
- *
- * The application must program this register as part of the O2P USB core
- * initialization. Do not change this register after the initial programming.
- */
-union cvmx_usbcx_gahbcfg {
- u32 u32;
- /**
- * struct cvmx_usbcx_gahbcfg_s
- * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl)
- * Software should set this bit to 0x1.
- * Indicates when the Periodic TxFIFO Empty Interrupt bit in the
- * Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This
- * bit is used only in Slave mode.
- * * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic
- * TxFIFO is half empty
- * * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic
- * TxFIFO is completely empty
- * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl)
- * Software should set this bit to 0x1.
- * Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in
- * the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered.
- * This bit is used only in Slave mode.
- * * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non-
- * Periodic TxFIFO is half empty
- * * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non-
- * Periodic TxFIFO is completely empty
- * @dmaen: DMA Enable (DMAEn)
- * * 1'b0: Core operates in Slave mode
- * * 1'b1: Core operates in a DMA mode
- * @hbstlen: Burst Length/Type (HBstLen)
- * This field has not effect and should be left as 0x0.
- * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk)
- * Software should set this field to 0x1.
- * The application uses this bit to mask or unmask the interrupt
- * line assertion to itself. Irrespective of this bit's setting,
- * the interrupt status registers are updated by the core.
- * * 1'b0: Mask the interrupt assertion to the application.
- * * 1'b1: Unmask the interrupt assertion to the application.
- */
- struct cvmx_usbcx_gahbcfg_s {
- __BITFIELD_FIELD(u32 reserved_9_31 : 23,
- __BITFIELD_FIELD(u32 ptxfemplvl : 1,
- __BITFIELD_FIELD(u32 nptxfemplvl : 1,
- __BITFIELD_FIELD(u32 reserved_6_6 : 1,
- __BITFIELD_FIELD(u32 dmaen : 1,
- __BITFIELD_FIELD(u32 hbstlen : 4,
- __BITFIELD_FIELD(u32 glblintrmsk : 1,
- ;)))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_ghwcfg3
- *
- * User HW Config3 Register (GHWCFG3)
- *
- * This register contains the configuration options of the O2P USB core.
- */
-union cvmx_usbcx_ghwcfg3 {
- u32 u32;
- /**
- * struct cvmx_usbcx_ghwcfg3_s
- * @dfifodepth: DFIFO Depth (DfifoDepth)
- * This value is in terms of 32-bit words.
- * * Minimum value is 32
- * * Maximum value is 32768
- * @ahbphysync: AHB and PHY Synchronous (AhbPhySync)
- * Indicates whether AHB and PHY clocks are synchronous to
- * each other.
- * * 1'b0: No
- * * 1'b1: Yes
- * This bit is tied to 1.
- * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType)
- * * 1'b0: Asynchronous reset is used in the core
- * * 1'b1: Synchronous reset is used in the core
- * @optfeature: Optional Features Removed (OptFeature)
- * Indicates whether the User ID register, GPIO interface ports,
- * and SOF toggle and counter ports were removed for gate count
- * optimization.
- * @vendor_control_interface_support: Vendor Control Interface Support
- * * 1'b0: Vendor Control Interface is not available on the core.
- * * 1'b1: Vendor Control Interface is available.
- * @i2c_selection: I2C Selection
- * * 1'b0: I2C Interface is not available on the core.
- * * 1'b1: I2C Interface is available on the core.
- * @otgen: OTG Function Enabled (OtgEn)
- * The application uses this bit to indicate the O2P USB core's
- * OTG capabilities.
- * * 1'b0: Not OTG capable
- * * 1'b1: OTG Capable
- * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth)
- * * 3'b000: 4 bits
- * * 3'b001: 5 bits
- * * 3'b010: 6 bits
- * * 3'b011: 7 bits
- * * 3'b100: 8 bits
- * * 3'b101: 9 bits
- * * 3'b110: 10 bits
- * * Others: Reserved
- * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth)
- * * 4'b0000: 11 bits
- * * 4'b0001: 12 bits
- * - ...
- * * 4'b1000: 19 bits
- * * Others: Reserved
- */
- struct cvmx_usbcx_ghwcfg3_s {
- __BITFIELD_FIELD(u32 dfifodepth : 16,
- __BITFIELD_FIELD(u32 reserved_13_15 : 3,
- __BITFIELD_FIELD(u32 ahbphysync : 1,
- __BITFIELD_FIELD(u32 rsttype : 1,
- __BITFIELD_FIELD(u32 optfeature : 1,
- __BITFIELD_FIELD(u32 vendor_control_interface_support : 1,
- __BITFIELD_FIELD(u32 i2c_selection : 1,
- __BITFIELD_FIELD(u32 otgen : 1,
- __BITFIELD_FIELD(u32 pktsizewidth : 3,
- __BITFIELD_FIELD(u32 xfersizewidth : 4,
- ;))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gintmsk
- *
- * Core Interrupt Mask Register (GINTMSK)
- *
- * This register works with the Core Interrupt register to interrupt the
- * application. When an interrupt bit is masked, the interrupt associated with
- * that bit will not be generated. However, the Core Interrupt (GINTSTS)
- * register bit corresponding to that interrupt will still be set.
- * Mask interrupt: 1'b0, Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_gintmsk {
- u32 u32;
- /**
- * struct cvmx_usbcx_gintmsk_s
- * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask
- * (WkUpIntMsk)
- * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask
- * (SessReqIntMsk)
- * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk)
- * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk)
- * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk)
- * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk)
- * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk)
- * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk)
- * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk)
- * Incomplete Isochronous OUT Transfer Mask
- * (incompISOOUTMsk)
- * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask
- * (incompISOINMsk)
- * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk)
- * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk)
- * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk)
- * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk)
- * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask
- * (ISOOutDropMsk)
- * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk)
- * @usbrstmsk: USB Reset Mask (USBRstMsk)
- * @usbsuspmsk: USB Suspend Mask (USBSuspMsk)
- * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk)
- * @i2cint: I2C Interrupt Mask (I2CINT)
- * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk)
- * I2C Carkit Interrupt Mask (I2CCKINTMsk)
- * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk)
- * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask
- * (GINNakEffMsk)
- * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk)
- * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk)
- * @sofmsk: Start of (micro)Frame Mask (SofMsk)
- * @otgintmsk: OTG Interrupt Mask (OTGIntMsk)
- * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk)
- */
- struct cvmx_usbcx_gintmsk_s {
- __BITFIELD_FIELD(u32 wkupintmsk : 1,
- __BITFIELD_FIELD(u32 sessreqintmsk : 1,
- __BITFIELD_FIELD(u32 disconnintmsk : 1,
- __BITFIELD_FIELD(u32 conidstschngmsk : 1,
- __BITFIELD_FIELD(u32 reserved_27_27 : 1,
- __BITFIELD_FIELD(u32 ptxfempmsk : 1,
- __BITFIELD_FIELD(u32 hchintmsk : 1,
- __BITFIELD_FIELD(u32 prtintmsk : 1,
- __BITFIELD_FIELD(u32 reserved_23_23 : 1,
- __BITFIELD_FIELD(u32 fetsuspmsk : 1,
- __BITFIELD_FIELD(u32 incomplpmsk : 1,
- __BITFIELD_FIELD(u32 incompisoinmsk : 1,
- __BITFIELD_FIELD(u32 oepintmsk : 1,
- __BITFIELD_FIELD(u32 inepintmsk : 1,
- __BITFIELD_FIELD(u32 epmismsk : 1,
- __BITFIELD_FIELD(u32 reserved_16_16 : 1,
- __BITFIELD_FIELD(u32 eopfmsk : 1,
- __BITFIELD_FIELD(u32 isooutdropmsk : 1,
- __BITFIELD_FIELD(u32 enumdonemsk : 1,
- __BITFIELD_FIELD(u32 usbrstmsk : 1,
- __BITFIELD_FIELD(u32 usbsuspmsk : 1,
- __BITFIELD_FIELD(u32 erlysuspmsk : 1,
- __BITFIELD_FIELD(u32 i2cint : 1,
- __BITFIELD_FIELD(u32 ulpickintmsk : 1,
- __BITFIELD_FIELD(u32 goutnakeffmsk : 1,
- __BITFIELD_FIELD(u32 ginnakeffmsk : 1,
- __BITFIELD_FIELD(u32 nptxfempmsk : 1,
- __BITFIELD_FIELD(u32 rxflvlmsk : 1,
- __BITFIELD_FIELD(u32 sofmsk : 1,
- __BITFIELD_FIELD(u32 otgintmsk : 1,
- __BITFIELD_FIELD(u32 modemismsk : 1,
- __BITFIELD_FIELD(u32 reserved_0_0 : 1,
- ;))))))))))))))))))))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gintsts
- *
- * Core Interrupt Register (GINTSTS)
- *
- * This register interrupts the application for system-level events in the
- * current mode of operation (Device mode or Host mode). It is shown in
- * Interrupt. Some of the bits in this register are valid only in Host mode,
- * while others are valid in Device mode only. This register also indicates the
- * current mode of operation. In order to clear the interrupt status bits of
- * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status
- * interrupts are read only; once software reads from or writes to the FIFO
- * while servicing these interrupts, FIFO interrupt conditions are cleared
- * automatically.
- */
-union cvmx_usbcx_gintsts {
- u32 u32;
- /**
- * struct cvmx_usbcx_gintsts_s
- * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt)
- * In Device mode, this interrupt is asserted when a resume is
- * detected on the USB. In Host mode, this interrupt is asserted
- * when a remote wakeup is detected on the USB.
- * For more information on how to use this interrupt, see "Partial
- * Power-Down and Clock Gating Programming Model" on
- * page 353.
- * @sessreqint: Session Request/New Session Detected Interrupt
- * (SessReqInt)
- * In Host mode, this interrupt is asserted when a session request
- * is detected from the device. In Device mode, this interrupt is
- * asserted when the utmiotg_bvalid signal goes high.
- * For more information on how to use this interrupt, see "Partial
- * Power-Down and Clock Gating Programming Model" on
- * page 353.
- * @disconnint: Disconnect Detected Interrupt (DisconnInt)
- * Asserted when a device disconnect is detected.
- * @conidstschng: Connector ID Status Change (ConIDStsChng)
- * The core sets this bit when there is a change in connector ID
- * status.
- * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp)
- * Asserted when the Periodic Transmit FIFO is either half or
- * completely empty and there is space for at least one entry to be
- * written in the Periodic Request Queue. The half or completely
- * empty status is determined by the Periodic TxFIFO Empty Level
- * bit in the Core AHB Configuration register
- * (GAHBCFG.PTxFEmpLvl).
- * @hchint: Host Channels Interrupt (HChInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the channels of the core (in Host mode). The
- * application must read the Host All Channels Interrupt (HAINT)
- * register to determine the exact number of the channel on which
- * the interrupt occurred, and then read the corresponding Host
- * Channel-n Interrupt (HCINTn) register to determine the exact
- * cause of the interrupt. The application must clear the
- * appropriate status bit in the HCINTn register to clear this bit.
- * @prtint: Host Port Interrupt (PrtInt)
- * The core sets this bit to indicate a change in port status of
- * one of the O2P USB core ports in Host mode. The application must
- * read the Host Port Control and Status (HPRT) register to
- * determine the exact event that caused this interrupt. The
- * application must clear the appropriate status bit in the Host
- * Port Control and Status register to clear this bit.
- * @fetsusp: Data Fetch Suspended (FetSusp)
- * This interrupt is valid only in DMA mode. This interrupt
- * indicates that the core has stopped fetching data for IN
- * endpoints due to the unavailability of TxFIFO space or Request
- * Queue space. This interrupt is used by the application for an
- * endpoint mismatch algorithm.
- * @incomplp: Incomplete Periodic Transfer (incomplP)
- * In Host mode, the core sets this interrupt bit when there are
- * incomplete periodic transactions still pending which are
- * scheduled for the current microframe.
- * Incomplete Isochronous OUT Transfer (incompISOOUT)
- * The Device mode, the core sets this interrupt to indicate that
- * there is at least one isochronous OUT endpoint on which the
- * transfer is not completed in the current microframe. This
- * interrupt is asserted along with the End of Periodic Frame
- * Interrupt (EOPF) bit in this register.
- * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN)
- * The core sets this interrupt to indicate that there is at least
- * one isochronous IN endpoint on which the transfer is not
- * completed in the current microframe. This interrupt is asserted
- * along with the End of Periodic Frame Interrupt (EOPF) bit in
- * this register.
- * @oepint: OUT Endpoints Interrupt (OEPInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the OUT endpoints of the core (in Device mode). The
- * application must read the Device All Endpoints Interrupt
- * (DAINT) register to determine the exact number of the OUT
- * endpoint on which the interrupt occurred, and then read the
- * corresponding Device OUT Endpoint-n Interrupt (DOEPINTn)
- * register to determine the exact cause of the interrupt. The
- * application must clear the appropriate status bit in the
- * corresponding DOEPINTn register to clear this bit.
- * @iepint: IN Endpoints Interrupt (IEPInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the IN endpoints of the core (in Device mode). The
- * application must read the Device All Endpoints Interrupt
- * (DAINT) register to determine the exact number of the IN
- * endpoint on which the interrupt occurred, and then read the
- * corresponding Device IN Endpoint-n Interrupt (DIEPINTn)
- * register to determine the exact cause of the interrupt. The
- * application must clear the appropriate status bit in the
- * corresponding DIEPINTn register to clear this bit.
- * @epmis: Endpoint Mismatch Interrupt (EPMis)
- * Indicates that an IN token has been received for a non-periodic
- * endpoint, but the data for another endpoint is present in the
- * top of the Non-Periodic Transmit FIFO and the IN endpoint
- * mismatch count programmed by the application has expired.
- * @eopf: End of Periodic Frame Interrupt (EOPF)
- * Indicates that the period specified in the Periodic Frame
- * Interval field of the Device Configuration register
- * (DCFG.PerFrInt) has been reached in the current microframe.
- * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop)
- * The core sets this bit when it fails to write an isochronous OUT
- * packet into the RxFIFO because the RxFIFO doesn't have
- * enough space to accommodate a maximum packet size packet
- * for the isochronous OUT endpoint.
- * @enumdone: Enumeration Done (EnumDone)
- * The core sets this bit to indicate that speed enumeration is
- * complete. The application must read the Device Status (DSTS)
- * register to obtain the enumerated speed.
- * @usbrst: USB Reset (USBRst)
- * The core sets this bit to indicate that a reset is detected on
- * the USB.
- * @usbsusp: USB Suspend (USBSusp)
- * The core sets this bit to indicate that a suspend was detected
- * on the USB. The core enters the Suspended state when there
- * is no activity on the phy_line_state_i signal for an extended
- * period of time.
- * @erlysusp: Early Suspend (ErlySusp)
- * The core sets this bit to indicate that an Idle state has been
- * detected on the USB for 3 ms.
- * @i2cint: I2C Interrupt (I2CINT)
- * This bit is always 0x0.
- * @ulpickint: ULPI Carkit Interrupt (ULPICKINT)
- * This bit is always 0x0.
- * @goutnakeff: Global OUT NAK Effective (GOUTNakEff)
- * Indicates that the Set Global OUT NAK bit in the Device Control
- * register (DCTL.SGOUTNak), set by the application, has taken
- * effect in the core. This bit can be cleared by writing the Clear
- * Global OUT NAK bit in the Device Control register
- * (DCTL.CGOUTNak).
- * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff)
- * Indicates that the Set Global Non-Periodic IN NAK bit in the
- * Device Control register (DCTL.SGNPInNak), set by the
- * application, has taken effect in the core. That is, the core has
- * sampled the Global IN NAK bit set by the application. This bit
- * can be cleared by clearing the Clear Global Non-Periodic IN
- * NAK bit in the Device Control register (DCTL.CGNPInNak).
- * This interrupt does not necessarily mean that a NAK handshake
- * is sent out on the USB. The STALL bit takes precedence over
- * the NAK bit.
- * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp)
- * This interrupt is asserted when the Non-Periodic TxFIFO is
- * either half or completely empty, and there is space for at least
- * one entry to be written to the Non-Periodic Transmit Request
- * Queue. The half or completely empty status is determined by
- * the Non-Periodic TxFIFO Empty Level bit in the Core AHB
- * Configuration register (GAHBCFG.NPTxFEmpLvl).
- * @rxflvl: RxFIFO Non-Empty (RxFLvl)
- * Indicates that there is at least one packet pending to be read
- * from the RxFIFO.
- * @sof: Start of (micro)Frame (Sof)
- * In Host mode, the core sets this bit to indicate that an SOF
- * (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the
- * USB. The application must write a 1 to this bit to clear the
- * interrupt.
- * In Device mode, in the core sets this bit to indicate that an
- * SOF token has been received on the USB. The application can read
- * the Device Status register to get the current (micro)frame
- * number. This interrupt is seen only when the core is operating
- * at either HS or FS.
- * @otgint: OTG Interrupt (OTGInt)
- * The core sets this bit to indicate an OTG protocol event. The
- * application must read the OTG Interrupt Status (GOTGINT)
- * register to determine the exact event that caused this
- * interrupt. The application must clear the appropriate status bit
- * in the GOTGINT register to clear this bit.
- * @modemis: Mode Mismatch Interrupt (ModeMis)
- * The core sets this bit when the application is trying to access:
- * * A Host mode register, when the core is operating in Device
- * mode
- * * A Device mode register, when the core is operating in Host
- * mode
- * The register access is completed on the AHB with an OKAY
- * response, but is ignored by the core internally and doesn't
- * affect the operation of the core.
- * @curmod: Current Mode of Operation (CurMod)
- * Indicates the current mode of operation.
- * * 1'b0: Device mode
- * * 1'b1: Host mode
- */
- struct cvmx_usbcx_gintsts_s {
- __BITFIELD_FIELD(u32 wkupint : 1,
- __BITFIELD_FIELD(u32 sessreqint : 1,
- __BITFIELD_FIELD(u32 disconnint : 1,
- __BITFIELD_FIELD(u32 conidstschng : 1,
- __BITFIELD_FIELD(u32 reserved_27_27 : 1,
- __BITFIELD_FIELD(u32 ptxfemp : 1,
- __BITFIELD_FIELD(u32 hchint : 1,
- __BITFIELD_FIELD(u32 prtint : 1,
- __BITFIELD_FIELD(u32 reserved_23_23 : 1,
- __BITFIELD_FIELD(u32 fetsusp : 1,
- __BITFIELD_FIELD(u32 incomplp : 1,
- __BITFIELD_FIELD(u32 incompisoin : 1,
- __BITFIELD_FIELD(u32 oepint : 1,
- __BITFIELD_FIELD(u32 iepint : 1,
- __BITFIELD_FIELD(u32 epmis : 1,
- __BITFIELD_FIELD(u32 reserved_16_16 : 1,
- __BITFIELD_FIELD(u32 eopf : 1,
- __BITFIELD_FIELD(u32 isooutdrop : 1,
- __BITFIELD_FIELD(u32 enumdone : 1,
- __BITFIELD_FIELD(u32 usbrst : 1,
- __BITFIELD_FIELD(u32 usbsusp : 1,
- __BITFIELD_FIELD(u32 erlysusp : 1,
- __BITFIELD_FIELD(u32 i2cint : 1,
- __BITFIELD_FIELD(u32 ulpickint : 1,
- __BITFIELD_FIELD(u32 goutnakeff : 1,
- __BITFIELD_FIELD(u32 ginnakeff : 1,
- __BITFIELD_FIELD(u32 nptxfemp : 1,
- __BITFIELD_FIELD(u32 rxflvl : 1,
- __BITFIELD_FIELD(u32 sof : 1,
- __BITFIELD_FIELD(u32 otgint : 1,
- __BITFIELD_FIELD(u32 modemis : 1,
- __BITFIELD_FIELD(u32 curmod : 1,
- ;))))))))))))))))))))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gnptxfsiz
- *
- * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ)
- *
- * The application can program the RAM size and the memory start address for the
- * Non-Periodic TxFIFO.
- */
-union cvmx_usbcx_gnptxfsiz {
- u32 u32;
- /**
- * struct cvmx_usbcx_gnptxfsiz_s
- * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep)
- * This value is in terms of 32-bit words.
- * Minimum value is 16
- * Maximum value is 32768
- * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr)
- * This field contains the memory start address for Non-Periodic
- * Transmit FIFO RAM.
- */
- struct cvmx_usbcx_gnptxfsiz_s {
- __BITFIELD_FIELD(u32 nptxfdep : 16,
- __BITFIELD_FIELD(u32 nptxfstaddr : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_gnptxsts
- *
- * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS)
- *
- * This read-only register contains the free space information for the
- * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue.
- */
-union cvmx_usbcx_gnptxsts {
- u32 u32;
- /**
- * struct cvmx_usbcx_gnptxsts_s
- * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
- * Entry in the Non-Periodic Tx Request Queue that is currently
- * being processed by the MAC.
- * * Bits [30:27]: Channel/endpoint number
- * * Bits [26:25]:
- * - 2'b00: IN/OUT token
- * - 2'b01: Zero-length transmit packet (device IN/host OUT)
- * - 2'b10: PING/CSPLIT token
- * - 2'b11: Channel halt command
- * * Bit [24]: Terminate (last entry for selected channel/endpoint)
- * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available
- * (NPTxQSpcAvail)
- * Indicates the amount of free space available in the Non-
- * Periodic Transmit Request Queue. This queue holds both IN
- * and OUT requests in Host mode. Device mode has only IN
- * requests.
- * * 8'h0: Non-Periodic Transmit Request Queue is full
- * * 8'h1: 1 location available
- * * 8'h2: 2 locations available
- * * n: n locations available (0..8)
- * * Others: Reserved
- * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail)
- * Indicates the amount of free space available in the Non-
- * Periodic TxFIFO.
- * Values are in terms of 32-bit words.
- * * 16'h0: Non-Periodic TxFIFO is full
- * * 16'h1: 1 word available
- * * 16'h2: 2 words available
- * * 16'hn: n words available (where 0..32768)
- * * 16'h8000: 32768 words available
- * * Others: Reserved
- */
- struct cvmx_usbcx_gnptxsts_s {
- __BITFIELD_FIELD(u32 reserved_31_31 : 1,
- __BITFIELD_FIELD(u32 nptxqtop : 7,
- __BITFIELD_FIELD(u32 nptxqspcavail : 8,
- __BITFIELD_FIELD(u32 nptxfspcavail : 16,
- ;))))
- } s;
-};
-
-/**
- * cvmx_usbc#_grstctl
- *
- * Core Reset Register (GRSTCTL)
- *
- * The application uses this register to reset various hardware features inside
- * the core.
- */
-union cvmx_usbcx_grstctl {
- u32 u32;
- /**
- * struct cvmx_usbcx_grstctl_s
- * @ahbidle: AHB Master Idle (AHBIdle)
- * Indicates that the AHB Master State Machine is in the IDLE
- * condition.
- * @dmareq: DMA Request Signal (DMAReq)
- * Indicates that the DMA request is in progress. Used for debug.
- * @txfnum: TxFIFO Number (TxFNum)
- * This is the FIFO number that must be flushed using the TxFIFO
- * Flush bit. This field must not be changed until the core clears
- * the TxFIFO Flush bit.
- * * 5'h0: Non-Periodic TxFIFO flush
- * * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic
- * TxFIFO flush in Host mode
- * * 5'h2: Periodic TxFIFO 2 flush in Device mode
- * - ...
- * * 5'hF: Periodic TxFIFO 15 flush in Device mode
- * * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the
- * core
- * @txfflsh: TxFIFO Flush (TxFFlsh)
- * This bit selectively flushes a single or all transmit FIFOs, but
- * cannot do so if the core is in the midst of a transaction.
- * The application must only write this bit after checking that the
- * core is neither writing to the TxFIFO nor reading from the
- * TxFIFO.
- * The application must wait until the core clears this bit before
- * performing any operations. This bit takes 8 clocks (of phy_clk
- * or hclk, whichever is slower) to clear.
- * @rxfflsh: RxFIFO Flush (RxFFlsh)
- * The application can flush the entire RxFIFO using this bit, but
- * must first ensure that the core is not in the middle of a
- * transaction.
- * The application must only write to this bit after checking that
- * the core is neither reading from the RxFIFO nor writing to the
- * RxFIFO.
- * The application must wait until the bit is cleared before
- * performing any other operations. This bit will take 8 clocks
- * (slowest of PHY or AHB clock) to clear.
- * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh)
- * The application writes this bit to flush the IN Token Sequence
- * Learning Queue.
- * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst)
- * The application writes this bit to reset the (micro)frame number
- * counter inside the core. When the (micro)frame counter is reset,
- * the subsequent SOF sent out by the core will have a
- * (micro)frame number of 0.
- * @hsftrst: HClk Soft Reset (HSftRst)
- * The application uses this bit to flush the control logic in the
- * AHB Clock domain. Only AHB Clock Domain pipelines are reset.
- * * FIFOs are not flushed with this bit.
- * * All state machines in the AHB clock domain are reset to the
- * Idle state after terminating the transactions on the AHB,
- * following the protocol.
- * * CSR control bits used by the AHB clock domain state
- * machines are cleared.
- * * To clear this interrupt, status mask bits that control the
- * interrupt status and are generated by the AHB clock domain
- * state machine are cleared.
- * * Because interrupt status bits are not cleared, the application
- * can get the status of any core events that occurred after it set
- * this bit.
- * This is a self-clearing bit that the core clears after all
- * necessary logic is reset in the core. This may take several
- * clocks, depending on the core's current state.
- * @csftrst: Core Soft Reset (CSftRst)
- * Resets the hclk and phy_clock domains as follows:
- * * Clears the interrupts and all the CSR registers except the
- * following register bits:
- * - PCGCCTL.RstPdwnModule
- * - PCGCCTL.GateHclk
- * - PCGCCTL.PwrClmp
- * - PCGCCTL.StopPPhyLPwrClkSelclk
- * - GUSBCFG.PhyLPwrClkSel
- * - GUSBCFG.DDRSel
- * - GUSBCFG.PHYSel
- * - GUSBCFG.FSIntf
- * - GUSBCFG.ULPI_UTMI_Sel
- * - GUSBCFG.PHYIf
- * - HCFG.FSLSPclkSel
- * - DCFG.DevSpd
- * * All module state machines (except the AHB Slave Unit) are
- * reset to the IDLE state, and all the transmit FIFOs and the
- * receive FIFO are flushed.
- * * Any transactions on the AHB Master are terminated as soon
- * as possible, after gracefully completing the last data phase of
- * an AHB transfer. Any transactions on the USB are terminated
- * immediately.
- * The application can write to this bit any time it wants to reset
- * the core. This is a self-clearing bit and the core clears this
- * bit after all the necessary logic is reset in the core, which
- * may take several clocks, depending on the current state of the
- * core. Once this bit is cleared software should wait at least 3
- * PHY clocks before doing any access to the PHY domain
- * (synchronization delay). Software should also should check that
- * bit 31 of this register is 1 (AHB Master is IDLE) before
- * starting any operation.
- * Typically software reset is used during software development
- * and also when you dynamically change the PHY selection bits
- * in the USB configuration registers listed above. When you
- * change the PHY, the corresponding clock for the PHY is
- * selected and used in the PHY domain. Once a new clock is
- * selected, the PHY domain has to be reset for proper operation.
- */
- struct cvmx_usbcx_grstctl_s {
- __BITFIELD_FIELD(u32 ahbidle : 1,
- __BITFIELD_FIELD(u32 dmareq : 1,
- __BITFIELD_FIELD(u32 reserved_11_29 : 19,
- __BITFIELD_FIELD(u32 txfnum : 5,
- __BITFIELD_FIELD(u32 txfflsh : 1,
- __BITFIELD_FIELD(u32 rxfflsh : 1,
- __BITFIELD_FIELD(u32 intknqflsh : 1,
- __BITFIELD_FIELD(u32 frmcntrrst : 1,
- __BITFIELD_FIELD(u32 hsftrst : 1,
- __BITFIELD_FIELD(u32 csftrst : 1,
- ;))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_grxfsiz
- *
- * Receive FIFO Size Register (GRXFSIZ)
- *
- * The application can program the RAM size that must be allocated to the
- * RxFIFO.
- */
-union cvmx_usbcx_grxfsiz {
- u32 u32;
- /**
- * struct cvmx_usbcx_grxfsiz_s
- * @rxfdep: RxFIFO Depth (RxFDep)
- * This value is in terms of 32-bit words.
- * * Minimum value is 16
- * * Maximum value is 32768
- */
- struct cvmx_usbcx_grxfsiz_s {
- __BITFIELD_FIELD(u32 reserved_16_31 : 16,
- __BITFIELD_FIELD(u32 rxfdep : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_grxstsph
- *
- * Receive Status Read and Pop Register, Host Mode (GRXSTSPH)
- *
- * A read to the Receive Status Read and Pop register returns and additionally
- * pops the top data entry out of the RxFIFO.
- * This Description is only valid when the core is in Host Mode. For Device Mode
- * use USBC_GRXSTSPD instead.
- * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the
- * same offset in the O2P USB core. The offset difference shown in this
- * document is for software clarity and is actually ignored by the
- * hardware.
- */
-union cvmx_usbcx_grxstsph {
- u32 u32;
- /**
- * struct cvmx_usbcx_grxstsph_s
- * @pktsts: Packet Status (PktSts)
- * Indicates the status of the received packet
- * * 4'b0010: IN data packet received
- * * 4'b0011: IN transfer completed (triggers an interrupt)
- * * 4'b0101: Data toggle error (triggers an interrupt)
- * * 4'b0111: Channel halted (triggers an interrupt)
- * * Others: Reserved
- * @dpid: Data PID (DPID)
- * * 2'b00: DATA0
- * * 2'b10: DATA1
- * * 2'b01: DATA2
- * * 2'b11: MDATA
- * @bcnt: Byte Count (BCnt)
- * Indicates the byte count of the received IN data packet
- * @chnum: Channel Number (ChNum)
- * Indicates the channel number to which the current received
- * packet belongs.
- */
- struct cvmx_usbcx_grxstsph_s {
- __BITFIELD_FIELD(u32 reserved_21_31 : 11,
- __BITFIELD_FIELD(u32 pktsts : 4,
- __BITFIELD_FIELD(u32 dpid : 2,
- __BITFIELD_FIELD(u32 bcnt : 11,
- __BITFIELD_FIELD(u32 chnum : 4,
- ;)))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gusbcfg
- *
- * Core USB Configuration Register (GUSBCFG)
- *
- * This register can be used to configure the core after power-on or a changing
- * to Host mode or Device mode. It contains USB and USB-PHY related
- * configuration parameters. The application must program this register before
- * starting any transactions on either the AHB or the USB. Do not make changes
- * to this register after the initial programming.
- */
-union cvmx_usbcx_gusbcfg {
- u32 u32;
- /**
- * struct cvmx_usbcx_gusbcfg_s
- * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel)
- * This bit is always 0x0.
- * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel)
- * Software should set this bit to 0x0.
- * Selects either 480-MHz or 48-MHz (low-power) PHY mode. In
- * FS and LS modes, the PHY can usually operate on a 48-MHz
- * clock to save power.
- * * 1'b0: 480-MHz Internal PLL clock
- * * 1'b1: 48-MHz External Clock
- * In 480 MHz mode, the UTMI interface operates at either 60 or
- * 30-MHz, depending upon whether 8- or 16-bit data width is
- * selected. In 48-MHz mode, the UTMI interface operates at 48
- * MHz in FS mode and at either 48 or 6 MHz in LS mode
- * (depending on the PHY vendor).
- * This bit drives the utmi_fsls_low_power core output signal, and
- * is valid only for UTMI+ PHYs.
- * @usbtrdtim: USB Turnaround Time (USBTrdTim)
- * Sets the turnaround time in PHY clocks.
- * Specifies the response time for a MAC request to the Packet
- * FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM).
- * This must be programmed to 0x5.
- * @hnpcap: HNP-Capable (HNPCap)
- * This bit is always 0x0.
- * @srpcap: SRP-Capable (SRPCap)
- * This bit is always 0x0.
- * @ddrsel: ULPI DDR Select (DDRSel)
- * Software should set this bit to 0x0.
- * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial
- * Software should set this bit to 0x0.
- * @fsintf: Full-Speed Serial Interface Select (FSIntf)
- * Software should set this bit to 0x0.
- * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel)
- * This bit is always 0x0.
- * @phyif: PHY Interface (PHYIf)
- * This bit is always 0x1.
- * @toutcal: HS/FS Timeout Calibration (TOutCal)
- * The number of PHY clocks that the application programs in this
- * field is added to the high-speed/full-speed interpacket timeout
- * duration in the core to account for any additional delays
- * introduced by the PHY. This may be required, since the delay
- * introduced by the PHY in generating the linestate condition may
- * vary from one PHY to another.
- * The USB standard timeout value for high-speed operation is
- * 736 to 816 (inclusive) bit times. The USB standard timeout
- * value for full-speed operation is 16 to 18 (inclusive) bit
- * times. The application must program this field based on the
- * speed of enumeration. The number of bit times added per PHY
- * clock are:
- * High-speed operation:
- * * One 30-MHz PHY clock = 16 bit times
- * * One 60-MHz PHY clock = 8 bit times
- * Full-speed operation:
- * * One 30-MHz PHY clock = 0.4 bit times
- * * One 60-MHz PHY clock = 0.2 bit times
- * * One 48-MHz PHY clock = 0.25 bit times
- */
- struct cvmx_usbcx_gusbcfg_s {
- __BITFIELD_FIELD(u32 reserved_17_31 : 15,
- __BITFIELD_FIELD(u32 otgi2csel : 1,
- __BITFIELD_FIELD(u32 phylpwrclksel : 1,
- __BITFIELD_FIELD(u32 reserved_14_14 : 1,
- __BITFIELD_FIELD(u32 usbtrdtim : 4,
- __BITFIELD_FIELD(u32 hnpcap : 1,
- __BITFIELD_FIELD(u32 srpcap : 1,
- __BITFIELD_FIELD(u32 ddrsel : 1,
- __BITFIELD_FIELD(u32 physel : 1,
- __BITFIELD_FIELD(u32 fsintf : 1,
- __BITFIELD_FIELD(u32 ulpi_utmi_sel : 1,
- __BITFIELD_FIELD(u32 phyif : 1,
- __BITFIELD_FIELD(u32 toutcal : 3,
- ;)))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_haint
- *
- * Host All Channels Interrupt Register (HAINT)
- *
- * When a significant event occurs on a channel, the Host All Channels Interrupt
- * register interrupts the application using the Host Channels Interrupt bit of
- * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt.
- * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in
- * this register are set and cleared when the application sets and clears bits
- * in the corresponding Host Channel-n Interrupt register.
- */
-union cvmx_usbcx_haint {
- u32 u32;
- /**
- * struct cvmx_usbcx_haint_s
- * @haint: Channel Interrupts (HAINT)
- * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15
- */
- struct cvmx_usbcx_haint_s {
- __BITFIELD_FIELD(u32 reserved_16_31 : 16,
- __BITFIELD_FIELD(u32 haint : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_haintmsk
- *
- * Host All Channels Interrupt Mask Register (HAINTMSK)
- *
- * The Host All Channel Interrupt Mask register works with the Host All Channel
- * Interrupt register to interrupt the application when an event occurs on a
- * channel. There is one interrupt mask bit per channel, up to a maximum of 16
- * bits.
- * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_haintmsk {
- u32 u32;
- /**
- * struct cvmx_usbcx_haintmsk_s
- * @haintmsk: Channel Interrupt Mask (HAINTMsk)
- * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15
- */
- struct cvmx_usbcx_haintmsk_s {
- __BITFIELD_FIELD(u32 reserved_16_31 : 16,
- __BITFIELD_FIELD(u32 haintmsk : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcchar#
- *
- * Host Channel-n Characteristics Register (HCCHAR)
- *
- */
-union cvmx_usbcx_hccharx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hccharx_s
- * @chena: Channel Enable (ChEna)
- * This field is set by the application and cleared by the OTG
- * host.
- * * 1'b0: Channel disabled
- * * 1'b1: Channel enabled
- * @chdis: Channel Disable (ChDis)
- * The application sets this bit to stop transmitting/receiving
- * data on a channel, even before the transfer for that channel is
- * complete. The application must wait for the Channel Disabled
- * interrupt before treating the channel as disabled.
- * @oddfrm: Odd Frame (OddFrm)
- * This field is set (reset) by the application to indicate that
- * the OTG host must perform a transfer in an odd (micro)frame.
- * This field is applicable for only periodic (isochronous and
- * interrupt) transactions.
- * * 1'b0: Even (micro)frame
- * * 1'b1: Odd (micro)frame
- * @devaddr: Device Address (DevAddr)
- * This field selects the specific device serving as the data
- * source or sink.
- * @ec: Multi Count (MC) / Error Count (EC)
- * When the Split Enable bit of the Host Channel-n Split Control
- * register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates
- * to the host the number of transactions that should be executed
- * per microframe for this endpoint.
- * * 2'b00: Reserved. This field yields undefined results.
- * * 2'b01: 1 transaction
- * * 2'b10: 2 transactions to be issued for this endpoint per
- * microframe
- * * 2'b11: 3 transactions to be issued for this endpoint per
- * microframe
- * When HCSPLTn.SpltEna is set (1'b1), this field indicates the
- * number of immediate retries to be performed for a periodic split
- * transactions on transaction errors. This field must be set to at
- * least 2'b01.
- * @eptype: Endpoint Type (EPType)
- * Indicates the transfer type selected.
- * * 2'b00: Control
- * * 2'b01: Isochronous
- * * 2'b10: Bulk
- * * 2'b11: Interrupt
- * @lspddev: Low-Speed Device (LSpdDev)
- * This field is set by the application to indicate that this
- * channel is communicating to a low-speed device.
- * @epdir: Endpoint Direction (EPDir)
- * Indicates whether the transaction is IN or OUT.
- * * 1'b0: OUT
- * * 1'b1: IN
- * @epnum: Endpoint Number (EPNum)
- * Indicates the endpoint number on the device serving as the
- * data source or sink.
- * @mps: Maximum Packet Size (MPS)
- * Indicates the maximum packet size of the associated endpoint.
- */
- struct cvmx_usbcx_hccharx_s {
- __BITFIELD_FIELD(u32 chena : 1,
- __BITFIELD_FIELD(u32 chdis : 1,
- __BITFIELD_FIELD(u32 oddfrm : 1,
- __BITFIELD_FIELD(u32 devaddr : 7,
- __BITFIELD_FIELD(u32 ec : 2,
- __BITFIELD_FIELD(u32 eptype : 2,
- __BITFIELD_FIELD(u32 lspddev : 1,
- __BITFIELD_FIELD(u32 reserved_16_16 : 1,
- __BITFIELD_FIELD(u32 epdir : 1,
- __BITFIELD_FIELD(u32 epnum : 4,
- __BITFIELD_FIELD(u32 mps : 11,
- ;)))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcfg
- *
- * Host Configuration Register (HCFG)
- *
- * This register configures the core after power-on. Do not make changes to this
- * register after initializing the host.
- */
-union cvmx_usbcx_hcfg {
- u32 u32;
- /**
- * struct cvmx_usbcx_hcfg_s
- * @fslssupp: FS- and LS-Only Support (FSLSSupp)
- * The application uses this bit to control the core's enumeration
- * speed. Using this bit, the application can make the core
- * enumerate as a FS host, even if the connected device supports
- * HS traffic. Do not make changes to this field after initial
- * programming.
- * * 1'b0: HS/FS/LS, based on the maximum speed supported by
- * the connected device
- * * 1'b1: FS/LS-only, even if the connected device can support HS
- * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel)
- * When the core is in FS Host mode
- * * 2'b00: PHY clock is running at 30/60 MHz
- * * 2'b01: PHY clock is running at 48 MHz
- * * Others: Reserved
- * When the core is in LS Host mode
- * * 2'b00: PHY clock is running at 30/60 MHz. When the
- * UTMI+/ULPI PHY Low Power mode is not selected, use
- * 30/60 MHz.
- * * 2'b01: PHY clock is running at 48 MHz. When the UTMI+
- * PHY Low Power mode is selected, use 48MHz if the PHY
- * supplies a 48 MHz clock during LS mode.
- * * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode,
- * use 6 MHz when the UTMI+ PHY Low Power mode is
- * selected and the PHY supplies a 6 MHz clock during LS
- * mode. If you select a 6 MHz clock during LS mode, you must
- * do a soft reset.
- * * 2'b11: Reserved
- */
- struct cvmx_usbcx_hcfg_s {
- __BITFIELD_FIELD(u32 reserved_3_31 : 29,
- __BITFIELD_FIELD(u32 fslssupp : 1,
- __BITFIELD_FIELD(u32 fslspclksel : 2,
- ;)))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcint#
- *
- * Host Channel-n Interrupt Register (HCINT)
- *
- * This register indicates the status of a channel with respect to USB- and
- * AHB-related events. The application must read this register when the Host
- * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is
- * set. Before the application can read this register, it must first read
- * the Host All Channels Interrupt (HAINT) register to get the exact channel
- * number for the Host Channel-n Interrupt register. The application must clear
- * the appropriate bit in this register to clear the corresponding bits in the
- * HAINT and GINTSTS registers.
- */
-union cvmx_usbcx_hcintx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hcintx_s
- * @datatglerr: Data Toggle Error (DataTglErr)
- * @frmovrun: Frame Overrun (FrmOvrun)
- * @bblerr: Babble Error (BblErr)
- * @xacterr: Transaction Error (XactErr)
- * @nyet: NYET Response Received Interrupt (NYET)
- * @ack: ACK Response Received Interrupt (ACK)
- * @nak: NAK Response Received Interrupt (NAK)
- * @stall: STALL Response Received Interrupt (STALL)
- * @ahberr: This bit is always 0x0.
- * @chhltd: Channel Halted (ChHltd)
- * Indicates the transfer completed abnormally either because of
- * any USB transaction error or in response to disable request by
- * the application.
- * @xfercompl: Transfer Completed (XferCompl)
- * Transfer completed normally without any errors.
- */
- struct cvmx_usbcx_hcintx_s {
- __BITFIELD_FIELD(u32 reserved_11_31 : 21,
- __BITFIELD_FIELD(u32 datatglerr : 1,
- __BITFIELD_FIELD(u32 frmovrun : 1,
- __BITFIELD_FIELD(u32 bblerr : 1,
- __BITFIELD_FIELD(u32 xacterr : 1,
- __BITFIELD_FIELD(u32 nyet : 1,
- __BITFIELD_FIELD(u32 ack : 1,
- __BITFIELD_FIELD(u32 nak : 1,
- __BITFIELD_FIELD(u32 stall : 1,
- __BITFIELD_FIELD(u32 ahberr : 1,
- __BITFIELD_FIELD(u32 chhltd : 1,
- __BITFIELD_FIELD(u32 xfercompl : 1,
- ;))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcintmsk#
- *
- * Host Channel-n Interrupt Mask Register (HCINTMSKn)
- *
- * This register reflects the mask for each channel status described in the
- * previous section.
- * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_hcintmskx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hcintmskx_s
- * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk)
- * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk)
- * @bblerrmsk: Babble Error Mask (BblErrMsk)
- * @xacterrmsk: Transaction Error Mask (XactErrMsk)
- * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk)
- * @ackmsk: ACK Response Received Interrupt Mask (AckMsk)
- * @nakmsk: NAK Response Received Interrupt Mask (NakMsk)
- * @stallmsk: STALL Response Received Interrupt Mask (StallMsk)
- * @ahberrmsk: AHB Error Mask (AHBErrMsk)
- * @chhltdmsk: Channel Halted Mask (ChHltdMsk)
- * @xfercomplmsk: Transfer Completed Mask (XferComplMsk)
- */
- struct cvmx_usbcx_hcintmskx_s {
- __BITFIELD_FIELD(u32 reserved_11_31 : 21,
- __BITFIELD_FIELD(u32 datatglerrmsk : 1,
- __BITFIELD_FIELD(u32 frmovrunmsk : 1,
- __BITFIELD_FIELD(u32 bblerrmsk : 1,
- __BITFIELD_FIELD(u32 xacterrmsk : 1,
- __BITFIELD_FIELD(u32 nyetmsk : 1,
- __BITFIELD_FIELD(u32 ackmsk : 1,
- __BITFIELD_FIELD(u32 nakmsk : 1,
- __BITFIELD_FIELD(u32 stallmsk : 1,
- __BITFIELD_FIELD(u32 ahberrmsk : 1,
- __BITFIELD_FIELD(u32 chhltdmsk : 1,
- __BITFIELD_FIELD(u32 xfercomplmsk : 1,
- ;))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcsplt#
- *
- * Host Channel-n Split Control Register (HCSPLT)
- *
- */
-union cvmx_usbcx_hcspltx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hcspltx_s
- * @spltena: Split Enable (SpltEna)
- * The application sets this field to indicate that this channel is
- * enabled to perform split transactions.
- * @compsplt: Do Complete Split (CompSplt)
- * The application sets this field to request the OTG host to
- * perform a complete split transaction.
- * @xactpos: Transaction Position (XactPos)
- * This field is used to determine whether to send all, first,
- * middle, or last payloads with each OUT transaction.
- * * 2'b11: All. This is the entire data payload is of this
- * transaction (which is less than or equal to 188 bytes).
- * * 2'b10: Begin. This is the first data payload of this
- * transaction (which is larger than 188 bytes).
- * * 2'b00: Mid. This is the middle payload of this transaction
- * (which is larger than 188 bytes).
- * * 2'b01: End. This is the last payload of this transaction
- * (which is larger than 188 bytes).
- * @hubaddr: Hub Address (HubAddr)
- * This field holds the device address of the transaction
- * translator's hub.
- * @prtaddr: Port Address (PrtAddr)
- * This field is the port number of the recipient transaction
- * translator.
- */
- struct cvmx_usbcx_hcspltx_s {
- __BITFIELD_FIELD(u32 spltena : 1,
- __BITFIELD_FIELD(u32 reserved_17_30 : 14,
- __BITFIELD_FIELD(u32 compsplt : 1,
- __BITFIELD_FIELD(u32 xactpos : 2,
- __BITFIELD_FIELD(u32 hubaddr : 7,
- __BITFIELD_FIELD(u32 prtaddr : 7,
- ;))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hctsiz#
- *
- * Host Channel-n Transfer Size Register (HCTSIZ)
- *
- */
-union cvmx_usbcx_hctsizx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hctsizx_s
- * @dopng: Do Ping (DoPng)
- * Setting this field to 1 directs the host to do PING protocol.
- * @pid: PID (Pid)
- * The application programs this field with the type of PID to use
- * for the initial transaction. The host will maintain this field
- * for the rest of the transfer.
- * * 2'b00: DATA0
- * * 2'b01: DATA2
- * * 2'b10: DATA1
- * * 2'b11: MDATA (non-control)/SETUP (control)
- * @pktcnt: Packet Count (PktCnt)
- * This field is programmed by the application with the expected
- * number of packets to be transmitted (OUT) or received (IN).
- * The host decrements this count on every successful
- * transmission or reception of an OUT/IN packet. Once this count
- * reaches zero, the application is interrupted to indicate normal
- * completion.
- * @xfersize: Transfer Size (XferSize)
- * For an OUT, this field is the number of data bytes the host will
- * send during the transfer.
- * For an IN, this field is the buffer size that the application
- * has reserved for the transfer. The application is expected to
- * program this field as an integer multiple of the maximum packet
- * size for IN transactions (periodic and non-periodic).
- */
- struct cvmx_usbcx_hctsizx_s {
- __BITFIELD_FIELD(u32 dopng : 1,
- __BITFIELD_FIELD(u32 pid : 2,
- __BITFIELD_FIELD(u32 pktcnt : 10,
- __BITFIELD_FIELD(u32 xfersize : 19,
- ;))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hfir
- *
- * Host Frame Interval Register (HFIR)
- *
- * This register stores the frame interval information for the current speed to
- * which the O2P USB core has enumerated.
- */
-union cvmx_usbcx_hfir {
- u32 u32;
- /**
- * struct cvmx_usbcx_hfir_s
- * @frint: Frame Interval (FrInt)
- * The value that the application programs to this field specifies
- * the interval between two consecutive SOFs (FS) or micro-
- * SOFs (HS) or Keep-Alive tokens (HS). This field contains the
- * number of PHY clocks that constitute the required frame
- * interval. The default value set in this field for a FS operation
- * when the PHY clock frequency is 60 MHz. The application can
- * write a value to this register only after the Port Enable bit of
- * the Host Port Control and Status register (HPRT.PrtEnaPort)
- * has been set. If no value is programmed, the core calculates
- * the value based on the PHY clock specified in the FS/LS PHY
- * Clock Select field of the Host Configuration register
- * (HCFG.FSLSPclkSel). Do not change the value of this field
- * after the initial configuration.
- * * 125 us (PHY clock frequency for HS)
- * * 1 ms (PHY clock frequency for FS/LS)
- */
- struct cvmx_usbcx_hfir_s {
- __BITFIELD_FIELD(u32 reserved_16_31 : 16,
- __BITFIELD_FIELD(u32 frint : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hfnum
- *
- * Host Frame Number/Frame Time Remaining Register (HFNUM)
- *
- * This register indicates the current frame number.
- * It also indicates the time remaining (in terms of the number of PHY clocks)
- * in the current (micro)frame.
- */
-union cvmx_usbcx_hfnum {
- u32 u32;
- /**
- * struct cvmx_usbcx_hfnum_s
- * @frrem: Frame Time Remaining (FrRem)
- * Indicates the amount of time remaining in the current
- * microframe (HS) or frame (FS/LS), in terms of PHY clocks.
- * This field decrements on each PHY clock. When it reaches
- * zero, this field is reloaded with the value in the Frame
- * Interval register and a new SOF is transmitted on the USB.
- * @frnum: Frame Number (FrNum)
- * This field increments when a new SOF is transmitted on the
- * USB, and is reset to 0 when it reaches 16'h3FFF.
- */
- struct cvmx_usbcx_hfnum_s {
- __BITFIELD_FIELD(u32 frrem : 16,
- __BITFIELD_FIELD(u32 frnum : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hprt
- *
- * Host Port Control and Status Register (HPRT)
- *
- * This register is available in both Host and Device modes.
- * Currently, the OTG Host supports only one port.
- * A single register holds USB port-related information such as USB reset,
- * enable, suspend, resume, connect status, and test mode for each port. The
- * R_SS_WC bits in this register can trigger an interrupt to the application
- * through the Host Port Interrupt bit of the Core Interrupt register
- * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this
- * register and clear the bit that caused the interrupt. For the R_SS_WC bits,
- * the application must write a 1 to the bit to clear the interrupt.
- */
-union cvmx_usbcx_hprt {
- u32 u32;
- /**
- * struct cvmx_usbcx_hprt_s
- * @prtspd: Port Speed (PrtSpd)
- * Indicates the speed of the device attached to this port.
- * * 2'b00: High speed
- * * 2'b01: Full speed
- * * 2'b10: Low speed
- * * 2'b11: Reserved
- * @prttstctl: Port Test Control (PrtTstCtl)
- * The application writes a nonzero value to this field to put
- * the port into a Test mode, and the corresponding pattern is
- * signaled on the port.
- * * 4'b0000: Test mode disabled
- * * 4'b0001: Test_J mode
- * * 4'b0010: Test_K mode
- * * 4'b0011: Test_SE0_NAK mode
- * * 4'b0100: Test_Packet mode
- * * 4'b0101: Test_Force_Enable
- * * Others: Reserved
- * PrtSpd must be zero (i.e. the interface must be in high-speed
- * mode) to use the PrtTstCtl test modes.
- * @prtpwr: Port Power (PrtPwr)
- * The application uses this field to control power to this port,
- * and the core clears this bit on an overcurrent condition.
- * * 1'b0: Power off
- * * 1'b1: Power on
- * @prtlnsts: Port Line Status (PrtLnSts)
- * Indicates the current logic level USB data lines
- * * Bit [10]: Logic level of D-
- * * Bit [11]: Logic level of D+
- * @prtrst: Port Reset (PrtRst)
- * When the application sets this bit, a reset sequence is
- * started on this port. The application must time the reset
- * period and clear this bit after the reset sequence is
- * complete.
- * * 1'b0: Port not in reset
- * * 1'b1: Port in reset
- * The application must leave this bit set for at least a
- * minimum duration mentioned below to start a reset on the
- * port. The application can leave it set for another 10 ms in
- * addition to the required minimum duration, before clearing
- * the bit, even though there is no maximum limit set by the
- * USB standard.
- * * High speed: 50 ms
- * * Full speed/Low speed: 10 ms
- * @prtsusp: Port Suspend (PrtSusp)
- * The application sets this bit to put this port in Suspend
- * mode. The core only stops sending SOFs when this is set.
- * To stop the PHY clock, the application must set the Port
- * Clock Stop bit, which will assert the suspend input pin of
- * the PHY.
- * The read value of this bit reflects the current suspend
- * status of the port. This bit is cleared by the core after a
- * remote wakeup signal is detected or the application sets
- * the Port Reset bit or Port Resume bit in this register or the
- * Resume/Remote Wakeup Detected Interrupt bit or
- * Disconnect Detected Interrupt bit in the Core Interrupt
- * register (GINTSTS.WkUpInt or GINTSTS.DisconnInt,
- * respectively).
- * * 1'b0: Port not in Suspend mode
- * * 1'b1: Port in Suspend mode
- * @prtres: Port Resume (PrtRes)
- * The application sets this bit to drive resume signaling on
- * the port. The core continues to drive the resume signal
- * until the application clears this bit.
- * If the core detects a USB remote wakeup sequence, as
- * indicated by the Port Resume/Remote Wakeup Detected
- * Interrupt bit of the Core Interrupt register
- * (GINTSTS.WkUpInt), the core starts driving resume
- * signaling without application intervention and clears this bit
- * when it detects a disconnect condition. The read value of
- * this bit indicates whether the core is currently driving
- * resume signaling.
- * * 1'b0: No resume driven
- * * 1'b1: Resume driven
- * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng)
- * The core sets this bit when the status of the Port
- * Overcurrent Active bit (bit 4) in this register changes.
- * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct)
- * Indicates the overcurrent condition of the port.
- * * 1'b0: No overcurrent condition
- * * 1'b1: Overcurrent condition
- * @prtenchng: Port Enable/Disable Change (PrtEnChng)
- * The core sets this bit when the status of the Port Enable bit
- * [2] of this register changes.
- * @prtena: Port Enable (PrtEna)
- * A port is enabled only by the core after a reset sequence,
- * and is disabled by an overcurrent condition, a disconnect
- * condition, or by the application clearing this bit. The
- * application cannot set this bit by a register write. It can only
- * clear it to disable the port. This bit does not trigger any
- * interrupt to the application.
- * * 1'b0: Port disabled
- * * 1'b1: Port enabled
- * @prtconndet: Port Connect Detected (PrtConnDet)
- * The core sets this bit when a device connection is detected
- * to trigger an interrupt to the application using the Host Port
- * Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt).
- * The application must write a 1 to this bit to clear the
- * interrupt.
- * @prtconnsts: Port Connect Status (PrtConnSts)
- * * 0: No device is attached to the port.
- * * 1: A device is attached to the port.
- */
- struct cvmx_usbcx_hprt_s {
- __BITFIELD_FIELD(u32 reserved_19_31 : 13,
- __BITFIELD_FIELD(u32 prtspd : 2,
- __BITFIELD_FIELD(u32 prttstctl : 4,
- __BITFIELD_FIELD(u32 prtpwr : 1,
- __BITFIELD_FIELD(u32 prtlnsts : 2,
- __BITFIELD_FIELD(u32 reserved_9_9 : 1,
- __BITFIELD_FIELD(u32 prtrst : 1,
- __BITFIELD_FIELD(u32 prtsusp : 1,
- __BITFIELD_FIELD(u32 prtres : 1,
- __BITFIELD_FIELD(u32 prtovrcurrchng : 1,
- __BITFIELD_FIELD(u32 prtovrcurract : 1,
- __BITFIELD_FIELD(u32 prtenchng : 1,
- __BITFIELD_FIELD(u32 prtena : 1,
- __BITFIELD_FIELD(u32 prtconndet : 1,
- __BITFIELD_FIELD(u32 prtconnsts : 1,
- ;)))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hptxfsiz
- *
- * Host Periodic Transmit FIFO Size Register (HPTXFSIZ)
- *
- * This register holds the size and the memory start address of the Periodic
- * TxFIFO, as shown in Figures 310 and 311.
- */
-union cvmx_usbcx_hptxfsiz {
- u32 u32;
- /**
- * struct cvmx_usbcx_hptxfsiz_s
- * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize)
- * This value is in terms of 32-bit words.
- * * Minimum value is 16
- * * Maximum value is 32768
- * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr)
- */
- struct cvmx_usbcx_hptxfsiz_s {
- __BITFIELD_FIELD(u32 ptxfsize : 16,
- __BITFIELD_FIELD(u32 ptxfstaddr : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hptxsts
- *
- * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS)
- *
- * This read-only register contains the free space information for the Periodic
- * TxFIFO and the Periodic Transmit Request Queue
- */
-union cvmx_usbcx_hptxsts {
- u32 u32;
- /**
- * struct cvmx_usbcx_hptxsts_s
- * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop)
- * This indicates the entry in the Periodic Tx Request Queue that
- * is currently being processes by the MAC.
- * This register is used for debugging.
- * * Bit [31]: Odd/Even (micro)frame
- * - 1'b0: send in even (micro)frame
- * - 1'b1: send in odd (micro)frame
- * * Bits [30:27]: Channel/endpoint number
- * * Bits [26:25]: Type
- * - 2'b00: IN/OUT
- * - 2'b01: Zero-length packet
- * - 2'b10: CSPLIT
- * - 2'b11: Disable channel command
- * * Bit [24]: Terminate (last entry for the selected
- * channel/endpoint)
- * @ptxqspcavail: Periodic Transmit Request Queue Space Available
- * (PTxQSpcAvail)
- * Indicates the number of free locations available to be written
- * in the Periodic Transmit Request Queue. This queue holds both
- * IN and OUT requests.
- * * 8'h0: Periodic Transmit Request Queue is full
- * * 8'h1: 1 location available
- * * 8'h2: 2 locations available
- * * n: n locations available (0..8)
- * * Others: Reserved
- * @ptxfspcavail: Periodic Transmit Data FIFO Space Available
- * (PTxFSpcAvail)
- * Indicates the number of free locations available to be written
- * to in the Periodic TxFIFO.
- * Values are in terms of 32-bit words
- * * 16'h0: Periodic TxFIFO is full
- * * 16'h1: 1 word available
- * * 16'h2: 2 words available
- * * 16'hn: n words available (where 0..32768)
- * * 16'h8000: 32768 words available
- * * Others: Reserved
- */
- struct cvmx_usbcx_hptxsts_s {
- __BITFIELD_FIELD(u32 ptxqtop : 8,
- __BITFIELD_FIELD(u32 ptxqspcavail : 8,
- __BITFIELD_FIELD(u32 ptxfspcavail : 16,
- ;)))
- } s;
-};
-
-/**
- * cvmx_usbn#_clk_ctl
- *
- * USBN_CLK_CTL = USBN's Clock Control
- *
- * This register is used to control the frequency of the hclk and the
- * hreset and phy_rst signals.
- */
-union cvmx_usbnx_clk_ctl {
- u64 u64;
- /**
- * struct cvmx_usbnx_clk_ctl_s
- * @divide2: The 'hclk' used by the USB subsystem is derived
- * from the eclk.
- * Also see the field DIVIDE. DIVIDE2<1> must currently
- * be zero because it is not implemented, so the maximum
- * ratio of eclk/hclk is currently 16.
- * The actual divide number for hclk is:
- * (DIVIDE2 + 1) * (DIVIDE + 1)
- * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
- * generate the hclk in the USB Subsystem is held
- * in reset. This bit must be set to '0' before
- * changing the value os DIVIDE in this register.
- * The reset to the HCLK_DIVIDERis also asserted
- * when core reset is asserted.
- * @p_x_on: Force USB-PHY on during suspend.
- * '1' USB-PHY XO block is powered-down during
- * suspend.
- * '0' USB-PHY XO block is powered-up during
- * suspend.
- * The value of this field must be set while POR is
- * active.
- * @p_rtype: PHY reference clock type
- * On CN50XX/CN52XX/CN56XX the values are:
- * '0' The USB-PHY uses a 12MHz crystal as a clock source
- * at the USB_XO and USB_XI pins.
- * '1' Reserved.
- * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the
- * USB_XO pin. USB_XI should be tied to ground in this
- * case.
- * '3' Reserved.
- * On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are:
- * '0' Reserved.
- * '1' Reserved.
- * '2' The PHY PLL uses the XO block output as a reference.
- * The XO block uses an external clock supplied on the
- * XO pin. USB_XI should be tied to ground for this
- * usage.
- * '3' The XO block uses the clock from a crystal.
- * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
- * remain powered in Suspend Mode.
- * '1' The USB-PHY XO Bias, Bandgap and PLL are
- * powered down in suspend mode.
- * The value of this field must be set while POR is
- * active.
- * @p_c_sel: Phy clock speed select.
- * Selects the reference clock / crystal frequency.
- * '11': Reserved
- * '10': 48 MHz (reserved when a crystal is used)
- * '01': 24 MHz (reserved when a crystal is used)
- * '00': 12 MHz
- * The value of this field must be set while POR is
- * active.
- * NOTE: if a crystal is used as a reference clock,
- * this field must be set to 12 MHz.
- * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
- * @sd_mode: Scaledown mode for the USBC. Control timing events
- * in the USBC, for normal operation this must be '0'.
- * @s_bist: Starts bist on the hclk memories, during the '0'
- * to '1' transition.
- * @por: Power On Reset for the PHY.
- * Resets all the PHYS registers and state machines.
- * @enable: When '1' allows the generation of the hclk. When
- * '0' the hclk will not be generated. SEE DIVIDE
- * field of this register.
- * @prst: When this field is '0' the reset associated with
- * the phy_clk functionality in the USB Subsystem is
- * help in reset. This bit should not be set to '1'
- * until the time it takes 6 clocks (hclk or phy_clk,
- * whichever is slower) has passed. Under normal
- * operation once this bit is set to '1' it should not
- * be set to '0'.
- * @hrst: When this field is '0' the reset associated with
- * the hclk functioanlity in the USB Subsystem is
- * held in reset.This bit should not be set to '1'
- * until 12ms after phy_clk is stable. Under normal
- * operation, once this bit is set to '1' it should
- * not be set to '0'.
- * @divide: The frequency of 'hclk' used by the USB subsystem
- * is the eclk frequency divided by the value of
- * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
- * DIVIDE2 of this register.
- * The hclk frequency should be less than 125Mhz.
- * After writing a value to this field the SW should
- * read the field for the value written.
- * The ENABLE field of this register should not be set
- * until AFTER this field is set and then read.
- */
- struct cvmx_usbnx_clk_ctl_s {
- __BITFIELD_FIELD(u64 reserved_20_63 : 44,
- __BITFIELD_FIELD(u64 divide2 : 2,
- __BITFIELD_FIELD(u64 hclk_rst : 1,
- __BITFIELD_FIELD(u64 p_x_on : 1,
- __BITFIELD_FIELD(u64 p_rtype : 2,
- __BITFIELD_FIELD(u64 p_com_on : 1,
- __BITFIELD_FIELD(u64 p_c_sel : 2,
- __BITFIELD_FIELD(u64 cdiv_byp : 1,
- __BITFIELD_FIELD(u64 sd_mode : 2,
- __BITFIELD_FIELD(u64 s_bist : 1,
- __BITFIELD_FIELD(u64 por : 1,
- __BITFIELD_FIELD(u64 enable : 1,
- __BITFIELD_FIELD(u64 prst : 1,
- __BITFIELD_FIELD(u64 hrst : 1,
- __BITFIELD_FIELD(u64 divide : 3,
- ;)))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbn#_usbp_ctl_status
- *
- * USBN_USBP_CTL_STATUS = USBP Control And Status Register
- *
- * Contains general control and status information for the USBN block.
- */
-union cvmx_usbnx_usbp_ctl_status {
- u64 u64;
- /**
- * struct cvmx_usbnx_usbp_ctl_status_s
- * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
- * @txvreftune: HS DC Voltage Level Adjustment
- * @txfslstune: FS/LS Source Impedance Adjustment
- * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
- * @sqrxtune: Squelch Threshold Adjustment
- * @compdistune: Disconnect Threshold Adjustment
- * @otgtune: VBUS Valid Threshold Adjustment
- * @otgdisable: OTG Block Disable
- * @portreset: Per_Port Reset
- * @drvvbus: Drive VBUS
- * @lsbist: Low-Speed BIST Enable.
- * @fsbist: Full-Speed BIST Enable.
- * @hsbist: High-Speed BIST Enable.
- * @bist_done: PHY Bist Done.
- * Asserted at the end of the PHY BIST sequence.
- * @bist_err: PHY Bist Error.
- * Indicates an internal error was detected during
- * the BIST sequence.
- * @tdata_out: PHY Test Data Out.
- * Presents either internally generated signals or
- * test register contents, based upon the value of
- * test_data_out_sel.
- * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
- * Normally should be set to zero.
- * When customers have no intent to use USB PHY
- * interface, they should:
- * - still provide 3.3V to USB_VDD33, and
- * - tie USB_REXT to 3.3V supply, and
- * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
- * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
- * @dma_bmode: When set to 1 the L2C DMA address will be updated
- * with byte-counts between packets. When set to 0
- * the L2C DMA address is incremented to the next
- * 4-byte aligned address after adding byte-count.
- * @usbc_end: Bigendian input to the USB Core. This should be
- * set to '0' for operation.
- * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
- * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
- * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D+ line. '1' pull down-resistance is connected
- * to D+/ '0' pull down resistance is not connected
- * to D+. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal operation.
- * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D- line. '1' pull down-resistance is connected
- * to D-. '0' pull down resistance is not connected
- * to D-. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal operation.
- * @hst_mode: When '0' the USB is acting as HOST, when '1'
- * USB is acting as device. This field needs to be
- * set while the USB is in reset.
- * @tuning: Transmitter Tuning for High-Speed Operation.
- * Tunes the current supply and rise/fall output
- * times for high-speed operation.
- * [20:19] == 11: Current supply increased
- * approximately 9%
- * [20:19] == 10: Current supply increased
- * approximately 4.5%
- * [20:19] == 01: Design default.
- * [20:19] == 00: Current supply decreased
- * approximately 4.5%
- * [22:21] == 11: Rise and fall times are increased.
- * [22:21] == 10: Design default.
- * [22:21] == 01: Rise and fall times are decreased.
- * [22:21] == 00: Rise and fall times are decreased
- * further as compared to the 01 setting.
- * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
- * Enables or disables bit stuffing on data[15:8]
- * when bit-stuffing is enabled.
- * @tx_bs_en: Transmit Bit Stuffing on [7:0].
- * Enables or disables bit stuffing on data[7:0]
- * when bit-stuffing is enabled.
- * @loop_enb: PHY Loopback Test Enable.
- * '1': During data transmission the receive is
- * enabled.
- * '0': During data transmission the receive is
- * disabled.
- * Must be '0' for normal operation.
- * @vtest_enb: Analog Test Pin Enable.
- * '1' The PHY's analog_test pin is enabled for the
- * input and output of applicable analog test signals.
- * '0' THe analog_test pin is disabled.
- * @bist_enb: Built-In Self Test Enable.
- * Used to activate BIST in the PHY.
- * @tdata_sel: Test Data Out Select.
- * '1' test_data_out[3:0] (PHY) register contents
- * are output. '0' internally generated signals are
- * output.
- * @taddr_in: Mode Address for Test Interface.
- * Specifies the register address for writing to or
- * reading from the PHY test interface register.
- * @tdata_in: Internal Testing Register Input Data and Select
- * This is a test bus. Data is present on [3:0],
- * and its corresponding select (enable) is present
- * on bits [7:4].
- * @ate_reset: Reset input from automatic test equipment.
- * This is a test signal. When the USB Core is
- * powered up (not in Susned Mode), an automatic
- * tester can use this to disable phy_clock and
- * free_clk, then re-enable them with an aligned
- * phase.
- * '1': The phy_clk and free_clk outputs are
- * disabled. "0": The phy_clock and free_clk outputs
- * are available within a specific period after the
- * de-assertion.
- */
- struct cvmx_usbnx_usbp_ctl_status_s {
- __BITFIELD_FIELD(u64 txrisetune : 1,
- __BITFIELD_FIELD(u64 txvreftune : 4,
- __BITFIELD_FIELD(u64 txfslstune : 4,
- __BITFIELD_FIELD(u64 txhsxvtune : 2,
- __BITFIELD_FIELD(u64 sqrxtune : 3,
- __BITFIELD_FIELD(u64 compdistune : 3,
- __BITFIELD_FIELD(u64 otgtune : 3,
- __BITFIELD_FIELD(u64 otgdisable : 1,
- __BITFIELD_FIELD(u64 portreset : 1,
- __BITFIELD_FIELD(u64 drvvbus : 1,
- __BITFIELD_FIELD(u64 lsbist : 1,
- __BITFIELD_FIELD(u64 fsbist : 1,
- __BITFIELD_FIELD(u64 hsbist : 1,
- __BITFIELD_FIELD(u64 bist_done : 1,
- __BITFIELD_FIELD(u64 bist_err : 1,
- __BITFIELD_FIELD(u64 tdata_out : 4,
- __BITFIELD_FIELD(u64 siddq : 1,
- __BITFIELD_FIELD(u64 txpreemphasistune : 1,
- __BITFIELD_FIELD(u64 dma_bmode : 1,
- __BITFIELD_FIELD(u64 usbc_end : 1,
- __BITFIELD_FIELD(u64 usbp_bist : 1,
- __BITFIELD_FIELD(u64 tclk : 1,
- __BITFIELD_FIELD(u64 dp_pulld : 1,
- __BITFIELD_FIELD(u64 dm_pulld : 1,
- __BITFIELD_FIELD(u64 hst_mode : 1,
- __BITFIELD_FIELD(u64 tuning : 4,
- __BITFIELD_FIELD(u64 tx_bs_enh : 1,
- __BITFIELD_FIELD(u64 tx_bs_en : 1,
- __BITFIELD_FIELD(u64 loop_enb : 1,
- __BITFIELD_FIELD(u64 vtest_enb : 1,
- __BITFIELD_FIELD(u64 bist_enb : 1,
- __BITFIELD_FIELD(u64 tdata_sel : 1,
- __BITFIELD_FIELD(u64 taddr_in : 4,
- __BITFIELD_FIELD(u64 tdata_in : 8,
- __BITFIELD_FIELD(u64 ate_reset : 1,
- ;)))))))))))))))))))))))))))))))))))
- } s;
-};
-
-#endif /* __OCTEON_HCD_H__ */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
deleted file mode 100644
index e7f4ddcc1361..000000000000
--- a/drivers/staging/octeon/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config OCTEON_ETHERNET
- tristate "Cavium Networks Octeon Ethernet support"
- depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
- depends on NETDEVICES
- depends on BROKEN
- select PHYLIB
- select MDIO_OCTEON
- help
- This driver supports the builtin ethernet ports on Cavium
- Networks' products in the Octeon family. This driver supports the
- CN3XXX and CN5XXX Octeon processors.
-
- To compile this driver as a module, choose M here. The module
- will be called octeon-ethernet.
-
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
deleted file mode 100644
index 3887cf5f1e84..000000000000
--- a/drivers/staging/octeon/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Copyright (C) 2005-2009 Cavium Networks
-#
-
-#
-# Makefile for Cavium OCTEON on-board ethernet driver
-#
-
-obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
-
-octeon-ethernet-y := ethernet.o
-octeon-ethernet-y += ethernet-mdio.o
-octeon-ethernet-y += ethernet-mem.o
-octeon-ethernet-y += ethernet-rgmii.o
-octeon-ethernet-y += ethernet-rx.o
-octeon-ethernet-y += ethernet-sgmii.o
-octeon-ethernet-y += ethernet-spi.o
-octeon-ethernet-y += ethernet-tx.o
diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO
deleted file mode 100644
index 67a0a1f6b922..000000000000
--- a/drivers/staging/octeon/TODO
+++ /dev/null
@@ -1,9 +0,0 @@
-This driver is functional and supports Ethernet on OCTEON+/OCTEON2/OCTEON3
-chips at least up to CN7030.
-
-TODO:
- - general code review and clean up
- - make driver self-contained instead of being split between staging and
- arch/mips/cavium-octeon.
-
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
deleted file mode 100644
index ef9e767b0e2e..000000000000
--- a/drivers/staging/octeon/ethernet-defines.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-/*
- * A few defines are used to control the operation of this driver:
- * USE_ASYNC_IOBDMA
- * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
- * IOBDMAs to issue IO accesses without stalling. Set this to zero
- * to disable this. Note that IOBDMAs require CVMSEG.
- * REUSE_SKBUFFS_WITHOUT_FREE
- * Allows the TX path to free an skbuff into the FPA hardware pool. This
- * can significantly improve performance for forwarding and bridging, but
- * may be somewhat dangerous. Checks are made, but if any buffer is reused
- * without the proper Linux cleanup, the networking stack may have very
- * bizarre bugs.
- */
-#ifndef __ETHERNET_DEFINES_H__
-#define __ETHERNET_DEFINES_H__
-
-#ifdef CONFIG_NETFILTER
-#define REUSE_SKBUFFS_WITHOUT_FREE 0
-#else
-#define REUSE_SKBUFFS_WITHOUT_FREE 1
-#endif
-
-#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
-
-/* Maximum number of SKBs to try to free per xmit packet. */
-#define MAX_OUT_QUEUE_DEPTH 1000
-
-#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(u32))
-#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(u32))
-
-#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS + 1)
-
-#endif /* __ETHERNET_DEFINES_H__ */
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
deleted file mode 100644
index c798672d61b2..000000000000
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ /dev/null
@@ -1,178 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/ratelimit.h>
-#include <linux/of_mdio.h>
-#include <generated/utsrelease.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-
-static void cvm_oct_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, UTS_RELEASE, sizeof(info->version));
- strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
-}
-
-static int cvm_oct_nway_reset(struct net_device *dev)
-{
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (dev->phydev)
- return phy_start_aneg(dev->phydev);
-
- return -EINVAL;
-}
-
-const struct ethtool_ops cvm_oct_ethtool_ops = {
- .get_drvinfo = cvm_oct_get_drvinfo,
- .nway_reset = cvm_oct_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-/**
- * cvm_oct_ioctl - IOCTL support for PHY control
- * @dev: Device to change
- * @rq: the request
- * @cmd: the command
- *
- * Returns Zero on success
- */
-int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- union cvmx_helper_link_info li)
-{
- if (li.s.link_up) {
- pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
- netdev_name(priv->netdev), li.s.speed,
- (li.s.full_duplex) ? "Full" : "Half",
- priv->port, priv->queue);
- } else {
- pr_notice_ratelimited("%s: Link down\n",
- netdev_name(priv->netdev));
- }
-}
-
-void cvm_oct_adjust_link(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_helper_link_info link_info;
-
- link_info.u64 = 0;
- link_info.s.link_up = dev->phydev->link ? 1 : 0;
- link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0;
- link_info.s.speed = dev->phydev->speed;
- priv->link_info = link_info.u64;
-
- /*
- * The polling task need to know about link status changes.
- */
- if (priv->poll)
- priv->poll(dev);
-
- if (priv->last_link != dev->phydev->link) {
- priv->last_link = dev->phydev->link;
- cvmx_helper_link_set(priv->port, link_info);
- cvm_oct_note_carrier(priv, link_info);
- }
-}
-
-int cvm_oct_common_stop(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- union cvmx_helper_link_info link_info;
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int index = INDEX(priv->port);
-
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
- priv->poll = NULL;
-
- if (dev->phydev)
- phy_disconnect(dev->phydev);
-
- if (priv->last_link) {
- link_info.u64 = 0;
- priv->last_link = 0;
-
- cvmx_helper_link_set(priv->port, link_info);
- cvm_oct_note_carrier(priv, link_info);
- }
- return 0;
-}
-
-/**
- * cvm_oct_phy_setup_device - setup the PHY
- *
- * @dev: Device to setup
- *
- * Returns Zero on success, negative on failure
- */
-int cvm_oct_phy_setup_device(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- struct device_node *phy_node;
- struct phy_device *phydev = NULL;
-
- if (!priv->of_node)
- goto no_phy;
-
- phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
- if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
- int rc;
-
- rc = of_phy_register_fixed_link(priv->of_node);
- if (rc)
- return rc;
-
- phy_node = of_node_get(priv->of_node);
- }
- if (!phy_node)
- goto no_phy;
-
- phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
- priv->phy_mode);
- of_node_put(phy_node);
-
- if (!phydev)
- return -ENODEV;
-
- priv->last_link = 0;
- phy_start(phydev);
-
- return 0;
-no_phy:
- /* If there is no phy, assume a direct MAC connection and that
- * the link is up.
- */
- netif_carrier_on(dev);
- return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
deleted file mode 100644
index e3771d48c49b..000000000000
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/string.h>
-#include <linux/ethtool.h>
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-extern const struct ethtool_ops cvm_oct_ethtool_ops;
-
-void octeon_mdiobus_force_mod_depencency(void);
-
-int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
deleted file mode 100644
index 532594957ebc..000000000000
--- a/drivers/staging/octeon/ethernet-mem.c
+++ /dev/null
@@ -1,154 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-mem.h"
-#include "ethernet-defines.h"
-
-/**
- * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
- * @pool: Pool to allocate an skbuff for
- * @size: Size of the buffer needed for the pool
- * @elements: Number of buffers to allocate
- *
- * Returns the actual number of buffers allocated.
- */
-static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
-{
- int freed = elements;
-
- while (freed) {
- struct sk_buff *skb = dev_alloc_skb(size + 256);
-
- if (unlikely(!skb))
- break;
- skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
- *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
- cvmx_fpa_free(skb->data, pool, size / 128);
- freed--;
- }
- return elements - freed;
-}
-
-/**
- * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
- * @pool: Pool to allocate an skbuff for
- * @size: Size of the buffer needed for the pool
- * @elements: Number of buffers to allocate
- */
-static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
-{
- char *memory;
-
- do {
- memory = cvmx_fpa_alloc(pool);
- if (memory) {
- struct sk_buff *skb =
- *(struct sk_buff **)(memory - sizeof(void *));
- elements--;
- dev_kfree_skb(skb);
- }
- } while (memory);
-
- if (elements < 0)
- pr_warn("Freeing of pool %u had too many skbuffs (%d)\n",
- pool, elements);
- else if (elements > 0)
- pr_warn("Freeing of pool %u is missing %d skbuffs\n",
- pool, elements);
-}
-
-/**
- * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
- * @pool: Pool to populate
- * @size: Size of each buffer in the pool
- * @elements: Number of buffers to allocate
- *
- * Returns the actual number of buffers allocated.
- */
-static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
-{
- char *memory;
- char *fpa;
- int freed = elements;
-
- while (freed) {
- /*
- * FPA memory must be 128 byte aligned. Since we are
- * aligning we need to save the original pointer so we
- * can feed it to kfree when the memory is returned to
- * the kernel.
- *
- * We allocate an extra 256 bytes to allow for
- * alignment and space for the original pointer saved
- * just before the block.
- */
- memory = kmalloc(size + 256, GFP_ATOMIC);
- if (unlikely(!memory)) {
- pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
- elements * size, pool);
- break;
- }
- fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
- *((char **)fpa - 1) = memory;
- cvmx_fpa_free(fpa, pool, 0);
- freed--;
- }
- return elements - freed;
-}
-
-/**
- * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
- * @pool: FPA pool to free
- * @size: Size of each buffer in the pool
- * @elements: Number of buffers that should be in the pool
- */
-static void cvm_oct_free_hw_memory(int pool, int size, int elements)
-{
- char *memory;
- char *fpa;
-
- do {
- fpa = cvmx_fpa_alloc(pool);
- if (fpa) {
- elements--;
- fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
- memory = *((char **)fpa - 1);
- kfree(memory);
- }
- } while (fpa);
-
- if (elements < 0)
- pr_warn("Freeing of pool %u had too many buffers (%d)\n",
- pool, elements);
- else if (elements > 0)
- pr_warn("Warning: Freeing of pool %u is missing %d buffers\n",
- pool, elements);
-}
-
-int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
-{
- int freed;
-
- if (pool == CVMX_FPA_PACKET_POOL)
- freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
- else
- freed = cvm_oct_fill_hw_memory(pool, size, elements);
- return freed;
-}
-
-void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
-{
- if (pool == CVMX_FPA_PACKET_POOL)
- cvm_oct_free_hw_skbuff(pool, size, elements);
- else
- cvm_oct_free_hw_memory(pool, size, elements);
-}
diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h
deleted file mode 100644
index 692dcdb7154d..000000000000
--- a/drivers/staging/octeon/ethernet-mem.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
-void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
deleted file mode 100644
index 0c4fac31540a..000000000000
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-#include <linux/ratelimit.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-#include "ethernet-mdio.h"
-
-static DEFINE_SPINLOCK(global_register_lock);
-
-static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
-{
- union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
- union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
- union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- /* Set preamble checking. */
- gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index,
- interface));
- gmxx_rxx_frm_ctl.s.pre_chk = enable;
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
- gmxx_rxx_frm_ctl.u64);
-
- /* Set FCS stripping. */
- ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
- if (enable)
- ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
- else
- ipd_sub_port_fcs.s.port_bit &=
- 0xffffffffull ^ (1ull << priv->port);
- cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
-
- /* Clear any error bits. */
- gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index,
- interface));
- cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
- gmxx_rxx_int_reg.u64);
-}
-
-static void cvm_oct_check_preamble_errors(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_helper_link_info link_info;
- unsigned long flags;
-
- link_info.u64 = priv->link_info;
-
- /*
- * Take the global register lock since we are going to
- * touch registers that affect more than one port.
- */
- spin_lock_irqsave(&global_register_lock, flags);
-
- if (link_info.s.speed == 10 && priv->last_speed == 10) {
- /*
- * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are
- * getting preamble errors.
- */
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
-
- gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
- (index, interface));
- if (gmxx_rxx_int_reg.s.pcterr) {
- /*
- * We are getting preamble errors at 10Mbps. Most
- * likely the PHY is giving us packets with misaligned
- * preambles. In order to get these packets we need to
- * disable preamble checking and do it in software.
- */
- cvm_oct_set_hw_preamble(priv, false);
- printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
- dev->name);
- }
- } else {
- /*
- * Since the 10Mbps preamble workaround is allowed we need to
- * enable preamble checking, FCS stripping, and clear error
- * bits on every speed change. If errors occur during 10Mbps
- * operation the above code will change this stuff
- */
- if (priv->last_speed != link_info.s.speed)
- cvm_oct_set_hw_preamble(priv, true);
- priv->last_speed = link_info.s.speed;
- }
- spin_unlock_irqrestore(&global_register_lock, flags);
-}
-
-static void cvm_oct_rgmii_poll(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_helper_link_info link_info;
- bool status_change;
-
- link_info = cvmx_helper_link_get(priv->port);
- if (priv->link_info != link_info.u64 &&
- cvmx_helper_link_set(priv->port, link_info))
- link_info.u64 = priv->link_info;
- status_change = priv->link_info != link_info.u64;
- priv->link_info = link_info.u64;
-
- cvm_oct_check_preamble_errors(dev);
-
- if (likely(!status_change))
- return;
-
- /* Tell core. */
- if (link_info.s.link_up) {
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- } else if (netif_carrier_ok(dev)) {
- netif_carrier_off(dev);
- }
- cvm_oct_note_carrier(priv, link_info);
-}
-
-int cvm_oct_rgmii_open(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int ret;
-
- ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
- if (ret)
- return ret;
-
- if (dev->phydev) {
- /*
- * In phydev mode, we need still periodic polling for the
- * preamble error checking, and we also need to call this
- * function on every link state change.
- *
- * Only true RGMII ports need to be polled. In GMII mode, port
- * 0 is really a RGMII port.
- */
- if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII &&
- priv->port == 0) ||
- (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
- priv->poll = cvm_oct_check_preamble_errors;
- cvm_oct_check_preamble_errors(dev);
- }
- }
-
- return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
deleted file mode 100644
index 2c16230f993c..000000000000
--- a/drivers/staging/octeon/ethernet-rx.c
+++ /dev/null
@@ -1,538 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/cache.h>
-#include <linux/cpumask.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/string.h>
-#include <linux/prefetch.h>
-#include <linux/ratelimit.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mem.h"
-#include "ethernet-rx.h"
-#include "ethernet-util.h"
-
-static atomic_t oct_rx_ready = ATOMIC_INIT(0);
-
-static struct oct_rx_group {
- int irq;
- int group;
- struct napi_struct napi;
-} oct_rx_group[16];
-
-/**
- * cvm_oct_do_interrupt - interrupt handler.
- * @irq: Interrupt number.
- * @napi_id: Cookie to identify the NAPI instance.
- *
- * The interrupt occurs whenever the POW has packets in our group.
- *
- */
-static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
-{
- /* Disable the IRQ and start napi_poll. */
- disable_irq_nosync(irq);
- napi_schedule(napi_id);
-
- return IRQ_HANDLED;
-}
-
-/**
- * cvm_oct_check_rcv_error - process receive errors
- * @work: Work queue entry pointing to the packet.
- *
- * Returns Non-zero if the packet can be dropped, zero otherwise.
- */
-static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
-{
- int port;
-
- if (octeon_has_feature(OCTEON_FEATURE_PKND))
- port = work->word0.pip.cn68xx.pknd;
- else
- port = work->word1.cn38xx.ipprt;
-
- if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
- /*
- * Ignore length errors on min size packets. Some
- * equipment incorrectly pads packets to 64+4FCS
- * instead of 60+4FCS. Note these packets still get
- * counted as frame errors.
- */
- } else if (work->word2.snoip.err_code == 5 ||
- work->word2.snoip.err_code == 7) {
- /*
- * We received a packet with either an alignment error
- * or a FCS error. This may be signalling that we are
- * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
- * off. If this is the case we need to parse the
- * packet to determine if we can remove a non spec
- * preamble and generate a correct packet.
- */
- int interface = cvmx_helper_get_interface_num(port);
- int index = cvmx_helper_get_interface_index_num(port);
- union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
-
- gmxx_rxx_frm_ctl.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
- if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
- u8 *ptr =
- cvmx_phys_to_ptr(work->packet_ptr.s.addr);
- int i = 0;
-
- while (i < work->word1.len - 1) {
- if (*ptr != 0x55)
- break;
- ptr++;
- i++;
- }
-
- if (*ptr == 0xd5) {
- /* Port received 0xd5 preamble */
- work->packet_ptr.s.addr += i + 1;
- work->word1.len -= i + 5;
- } else if ((*ptr & 0xf) == 0xd) {
- /* Port received 0xd preamble */
- work->packet_ptr.s.addr += i;
- work->word1.len -= i + 4;
- for (i = 0; i < work->word1.len; i++) {
- *ptr =
- ((*ptr & 0xf0) >> 4) |
- ((*(ptr + 1) & 0xf) << 4);
- ptr++;
- }
- } else {
- printk_ratelimited("Port %d unknown preamble, packet dropped\n",
- port);
- cvm_oct_free_work(work);
- return 1;
- }
- }
- } else {
- printk_ratelimited("Port %d receive error code %d, packet dropped\n",
- port, work->word2.snoip.err_code);
- cvm_oct_free_work(work);
- return 1;
- }
-
- return 0;
-}
-
-static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
-{
- int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr = work->packet_ptr;
- int len = work->word1.len;
- int segment_size;
-
- while (segments--) {
- union cvmx_buf_ptr next_ptr;
-
- next_ptr = *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
-
- /*
- * Octeon Errata PKI-100: The segment size is wrong.
- *
- * Until it is fixed, calculate the segment size based on
- * the packet pool buffer size.
- * When it is fixed, the following line should be replaced
- * with this one:
- * int segment_size = segment_ptr.s.size;
- */
- segment_size =
- CVMX_FPA_PACKET_POOL_SIZE -
- (segment_ptr.s.addr -
- (((segment_ptr.s.addr >> 7) -
- segment_ptr.s.back) << 7));
-
- /* Don't copy more than what is left in the packet */
- if (segment_size > len)
- segment_size = len;
-
- /* Copy the data into the packet */
- skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
- segment_size);
- len -= segment_size;
- segment_ptr = next_ptr;
- }
-}
-
-static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
-{
- const int coreid = cvmx_get_core_num();
- u64 old_group_mask;
- u64 old_scratch;
- int rx_count = 0;
- int did_work_request = 0;
- int packet_not_copied;
-
- /* Prefetch cvm_oct_device since we know we need it soon */
- prefetch(cvm_oct_device);
-
- if (USE_ASYNC_IOBDMA) {
- /* Save scratch in case userspace is using it */
- CVMX_SYNCIOBDMA;
- old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- }
-
- /* Only allow work for our group (and preserve priorities) */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
- cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
- BIT(rx_group->group));
- cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
- } else {
- old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
- (old_group_mask & ~0xFFFFull) |
- BIT(rx_group->group));
- }
-
- if (USE_ASYNC_IOBDMA) {
- cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
- did_work_request = 1;
- }
-
- while (rx_count < budget) {
- struct sk_buff *skb = NULL;
- struct sk_buff **pskb = NULL;
- int skb_in_hw;
- struct cvmx_wqe *work;
- int port;
-
- if (USE_ASYNC_IOBDMA && did_work_request)
- work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
- else
- work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
-
- prefetch(work);
- did_work_request = 0;
- if (!work) {
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
- BIT(rx_group->group));
- cvmx_write_csr(CVMX_SSO_WQ_INT,
- BIT(rx_group->group));
- } else {
- union cvmx_pow_wq_int wq_int;
-
- wq_int.u64 = 0;
- wq_int.s.iq_dis = BIT(rx_group->group);
- wq_int.s.wq_int = BIT(rx_group->group);
- cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
- }
- break;
- }
- pskb = (struct sk_buff **)
- (cvm_oct_get_buffer_ptr(work->packet_ptr) -
- sizeof(void *));
- prefetch(pskb);
-
- if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
- cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
- CVMX_POW_NO_WAIT);
- did_work_request = 1;
- }
- rx_count++;
-
- skb_in_hw = work->word2.s.bufs == 1;
- if (likely(skb_in_hw)) {
- skb = *pskb;
- prefetch(&skb->head);
- prefetch(&skb->len);
- }
-
- if (octeon_has_feature(OCTEON_FEATURE_PKND))
- port = work->word0.pip.cn68xx.pknd;
- else
- port = work->word1.cn38xx.ipprt;
-
- prefetch(cvm_oct_device[port]);
-
- /* Immediately throw away all packets with receive errors */
- if (unlikely(work->word2.snoip.rcv_error)) {
- if (cvm_oct_check_rcv_error(work))
- continue;
- }
-
- /*
- * We can only use the zero copy path if skbuffs are
- * in the FPA pool and the packet fits in a single
- * buffer.
- */
- if (likely(skb_in_hw)) {
- skb->data = skb->head + work->packet_ptr.s.addr -
- cvmx_ptr_to_phys(skb->head);
- prefetch(skb->data);
- skb->len = work->word1.len;
- skb_set_tail_pointer(skb, skb->len);
- packet_not_copied = 1;
- } else {
- /*
- * We have to copy the packet. First allocate
- * an skbuff for it.
- */
- skb = dev_alloc_skb(work->word1.len);
- if (!skb) {
- cvm_oct_free_work(work);
- continue;
- }
-
- /*
- * Check if we've received a packet that was
- * entirely stored in the work entry.
- */
- if (unlikely(work->word2.s.bufs == 0)) {
- u8 *ptr = work->packet_data;
-
- if (likely(!work->word2.s.not_IP)) {
- /*
- * The beginning of the packet
- * moves for IP packets.
- */
- if (work->word2.s.is_v6)
- ptr += 2;
- else
- ptr += 6;
- }
- skb_put_data(skb, ptr, work->word1.len);
- /* No packet buffers to free */
- } else {
- copy_segments_to_skb(work, skb);
- }
- packet_not_copied = 0;
- }
- if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
- cvm_oct_device[port])) {
- struct net_device *dev = cvm_oct_device[port];
-
- /*
- * Only accept packets for devices that are
- * currently up.
- */
- if (likely(dev->flags & IFF_UP)) {
- skb->protocol = eth_type_trans(skb, dev);
- skb->dev = dev;
-
- if (unlikely(work->word2.s.not_IP ||
- work->word2.s.IP_exc ||
- work->word2.s.L4_error ||
- !work->word2.s.tcp_or_udp))
- skb->ip_summed = CHECKSUM_NONE;
- else
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* Increment RX stats for virtual ports */
- if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- }
- netif_receive_skb(skb);
- } else {
- /*
- * Drop any packet received for a device that
- * isn't up.
- */
- dev->stats.rx_dropped++;
- dev_kfree_skb_irq(skb);
- }
- } else {
- /*
- * Drop any packet received for a device that
- * doesn't exist.
- */
- printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
- port);
- dev_kfree_skb_irq(skb);
- }
- /*
- * Check to see if the skbuff and work share the same
- * packet buffer.
- */
- if (likely(packet_not_copied)) {
- /*
- * This buffer needs to be replaced, increment
- * the number of buffers we need to free by
- * one.
- */
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- 1);
-
- cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
- } else {
- cvm_oct_free_work(work);
- }
- }
- /* Restore the original POW group mask */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
- cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
- } else {
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
- }
-
- if (USE_ASYNC_IOBDMA) {
- /* Restore the scratch area */
- cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
- }
- cvm_oct_rx_refill_pool(0);
-
- return rx_count;
-}
-
-/**
- * cvm_oct_napi_poll - the NAPI poll function.
- * @napi: The NAPI instance.
- * @budget: Maximum number of packets to receive.
- *
- * Returns the number of packets processed.
- */
-static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
-{
- struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
- napi);
- int rx_count;
-
- rx_count = cvm_oct_poll(rx_group, budget);
-
- if (rx_count < budget) {
- /* No more work */
- napi_complete_done(napi, rx_count);
- enable_irq(rx_group->irq);
- }
- return rx_count;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * cvm_oct_poll_controller - poll for receive packets
- * device.
- *
- * @dev: Device to poll. Unused
- */
-void cvm_oct_poll_controller(struct net_device *dev)
-{
- int i;
-
- if (!atomic_read(&oct_rx_ready))
- return;
-
- for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
- if (!(pow_receive_groups & BIT(i)))
- continue;
-
- cvm_oct_poll(&oct_rx_group[i], 16);
- }
-}
-#endif
-
-void cvm_oct_rx_initialize(void)
-{
- int i;
- struct net_device *dev_for_napi = NULL;
-
- for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
- if (cvm_oct_device[i]) {
- dev_for_napi = cvm_oct_device[i];
- break;
- }
- }
-
- if (!dev_for_napi)
- panic("No net_devices were allocated.");
-
- for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
- int ret;
-
- if (!(pow_receive_groups & BIT(i)))
- continue;
-
- netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
- cvm_oct_napi_poll, rx_napi_weight);
- napi_enable(&oct_rx_group[i].napi);
-
- oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
- oct_rx_group[i].group = i;
-
- /* Register an IRQ handler to receive POW interrupts */
- ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
- "Ethernet", &oct_rx_group[i].napi);
- if (ret)
- panic("Could not acquire Ethernet IRQ %d\n",
- oct_rx_group[i].irq);
-
- disable_irq_nosync(oct_rx_group[i].irq);
-
- /* Enable POW interrupt when our port has at least one packet */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- union cvmx_sso_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
-
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
- cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
- } else {
- union cvmx_pow_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
-
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
- }
-
- /* Schedule NAPI now. This will indirectly enable the
- * interrupt.
- */
- napi_schedule(&oct_rx_group[i].napi);
- }
- atomic_inc(&oct_rx_ready);
-}
-
-void cvm_oct_rx_shutdown(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
- if (!(pow_receive_groups & BIT(i)))
- continue;
-
- /* Disable POW interrupt */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX))
- cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
- else
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
-
- /* Free the interrupt handler */
- free_irq(oct_rx_group[i].irq, cvm_oct_device);
-
- netif_napi_del(&oct_rx_group[i].napi);
- }
-}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
deleted file mode 100644
index ff6482fa20d6..000000000000
--- a/drivers/staging/octeon/ethernet-rx.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-void cvm_oct_poll_controller(struct net_device *dev);
-void cvm_oct_rx_initialize(void);
-void cvm_oct_rx_shutdown(void);
-
-static inline void cvm_oct_rx_refill_pool(int fill_threshold)
-{
- int number_to_free;
- int num_freed;
- /* Refill the packet buffer pool */
- number_to_free =
- cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-
- if (number_to_free > fill_threshold) {
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- -number_to_free);
- num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
- CVMX_FPA_PACKET_POOL_SIZE,
- number_to_free);
- if (num_freed != number_to_free) {
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- number_to_free - num_freed);
- }
- }
-}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
deleted file mode 100644
index d7fbd9159302..000000000000
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/phy.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/ratelimit.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-#include "ethernet-mdio.h"
-
-int cvm_oct_sgmii_open(struct net_device *dev)
-{
- return cvm_oct_common_open(dev, cvm_oct_link_poll);
-}
-
-int cvm_oct_sgmii_init(struct net_device *dev)
-{
- cvm_oct_common_init(dev);
-
- /* FIXME: Need autoneg logic */
- return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
deleted file mode 100644
index c582403e6a1f..000000000000
--- a/drivers/staging/octeon/ethernet-spi.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-
-static int number_spi_ports;
-static int need_retrain[2] = { 0, 0 };
-
-static void cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg, int index)
-{
- if (spx_int_reg.s.spf)
- pr_err("SPI%d: SRX Spi4 interface down\n", index);
- if (spx_int_reg.s.calerr)
- pr_err("SPI%d: SRX Spi4 Calendar table parity error\n", index);
- if (spx_int_reg.s.syncerr)
- pr_err("SPI%d: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n",
- index);
- if (spx_int_reg.s.diperr)
- pr_err("SPI%d: SRX Spi4 DIP4 error\n", index);
- if (spx_int_reg.s.tpaovr)
- pr_err("SPI%d: SRX Selected port has hit TPA overflow\n",
- index);
- if (spx_int_reg.s.rsverr)
- pr_err("SPI%d: SRX Spi4 reserved control word detected\n",
- index);
- if (spx_int_reg.s.drwnng)
- pr_err("SPI%d: SRX Spi4 receive FIFO drowning/overflow\n",
- index);
- if (spx_int_reg.s.clserr)
- pr_err("SPI%d: SRX Spi4 packet closed on non-16B alignment without EOP\n",
- index);
- if (spx_int_reg.s.spiovr)
- pr_err("SPI%d: SRX Spi4 async FIFO overflow\n", index);
- if (spx_int_reg.s.abnorm)
- pr_err("SPI%d: SRX Abnormal packet termination (ERR bit)\n",
- index);
- if (spx_int_reg.s.prtnxa)
- pr_err("SPI%d: SRX Port out of range\n", index);
-}
-
-static void cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg, int index)
-{
- if (stx_int_reg.s.syncerr)
- pr_err("SPI%d: STX Interface encountered a fatal error\n",
- index);
- if (stx_int_reg.s.frmerr)
- pr_err("SPI%d: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n",
- index);
- if (stx_int_reg.s.unxfrm)
- pr_err("SPI%d: STX Unexpected framing sequence\n", index);
- if (stx_int_reg.s.nosync)
- pr_err("SPI%d: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n",
- index);
- if (stx_int_reg.s.diperr)
- pr_err("SPI%d: STX DIP2 error on the Spi4 Status channel\n",
- index);
- if (stx_int_reg.s.datovr)
- pr_err("SPI%d: STX Spi4 FIFO overflow error\n", index);
- if (stx_int_reg.s.ovrbst)
- pr_err("SPI%d: STX Transmit packet burst too big\n", index);
- if (stx_int_reg.s.calpar1)
- pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
- index, 1);
- if (stx_int_reg.s.calpar0)
- pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
- index, 0);
-}
-
-static irqreturn_t cvm_oct_spi_spx_int(int index)
-{
- union cvmx_spxx_int_reg spx_int_reg;
- union cvmx_stxx_int_reg stx_int_reg;
-
- spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(index));
- cvmx_write_csr(CVMX_SPXX_INT_REG(index), spx_int_reg.u64);
- if (!need_retrain[index]) {
- spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(index));
- cvm_oct_spxx_int_pr(spx_int_reg, index);
- }
-
- stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(index));
- cvmx_write_csr(CVMX_STXX_INT_REG(index), stx_int_reg.u64);
- if (!need_retrain[index]) {
- stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(index));
- cvm_oct_stxx_int_pr(stx_int_reg, index);
- }
-
- cvmx_write_csr(CVMX_SPXX_INT_MSK(index), 0);
- cvmx_write_csr(CVMX_STXX_INT_MSK(index), 0);
- need_retrain[index] = 1;
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
-{
- irqreturn_t return_status = IRQ_NONE;
- union cvmx_npi_rsl_int_blocks rsl_int_blocks;
-
- /* Check and see if this interrupt was caused by the GMX block */
- rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
- if (rsl_int_blocks.s.spx1) /* 19 - SPX1_INT_REG & STX1_INT_REG */
- return_status = cvm_oct_spi_spx_int(1);
-
- if (rsl_int_blocks.s.spx0) /* 18 - SPX0_INT_REG & STX0_INT_REG */
- return_status = cvm_oct_spi_spx_int(0);
-
- return return_status;
-}
-
-static void cvm_oct_spi_enable_error_reporting(int interface)
-{
- union cvmx_spxx_int_msk spxx_int_msk;
- union cvmx_stxx_int_msk stxx_int_msk;
-
- spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
- spxx_int_msk.s.calerr = 1;
- spxx_int_msk.s.syncerr = 1;
- spxx_int_msk.s.diperr = 1;
- spxx_int_msk.s.tpaovr = 1;
- spxx_int_msk.s.rsverr = 1;
- spxx_int_msk.s.drwnng = 1;
- spxx_int_msk.s.clserr = 1;
- spxx_int_msk.s.spiovr = 1;
- spxx_int_msk.s.abnorm = 1;
- spxx_int_msk.s.prtnxa = 1;
- cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
-
- stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
- stxx_int_msk.s.frmerr = 1;
- stxx_int_msk.s.unxfrm = 1;
- stxx_int_msk.s.nosync = 1;
- stxx_int_msk.s.diperr = 1;
- stxx_int_msk.s.datovr = 1;
- stxx_int_msk.s.ovrbst = 1;
- stxx_int_msk.s.calpar1 = 1;
- stxx_int_msk.s.calpar0 = 1;
- cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
-}
-
-static void cvm_oct_spi_poll(struct net_device *dev)
-{
- static int spi4000_port;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface;
-
- for (interface = 0; interface < 2; interface++) {
- if ((priv->port == interface * 16) && need_retrain[interface]) {
- if (cvmx_spi_restart_interface
- (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
- need_retrain[interface] = 0;
- cvm_oct_spi_enable_error_reporting(interface);
- }
- }
-
- /*
- * The SPI4000 TWSI interface is very slow. In order
- * not to bring the system to a crawl, we only poll a
- * single port every second. This means negotiation
- * speed changes take up to 10 seconds, but at least
- * we don't waste absurd amounts of time waiting for
- * TWSI.
- */
- if (priv->port == spi4000_port) {
- /*
- * This function does nothing if it is called on an
- * interface without a SPI4000.
- */
- cvmx_spi4000_check_speed(interface, priv->port);
- /*
- * Normal ordering increments. By decrementing
- * we only match once per iteration.
- */
- spi4000_port--;
- if (spi4000_port < 0)
- spi4000_port = 10;
- }
- }
-}
-
-int cvm_oct_spi_init(struct net_device *dev)
-{
- int r;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (number_spi_ports == 0) {
- r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
- IRQF_SHARED, "SPI", &number_spi_ports);
- if (r)
- return r;
- }
- number_spi_ports++;
-
- if ((priv->port == 0) || (priv->port == 16)) {
- cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
- priv->poll = cvm_oct_spi_poll;
- }
- cvm_oct_common_init(dev);
- return 0;
-}
-
-void cvm_oct_spi_uninit(struct net_device *dev)
-{
- int interface;
-
- cvm_oct_common_uninit(dev);
- number_spi_ports--;
- if (number_spi_ports == 0) {
- for (interface = 0; interface < 2; interface++) {
- cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
- cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
- }
- free_irq(OCTEON_IRQ_RML, &number_spi_ports);
- }
-}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
deleted file mode 100644
index b334cf89794e..000000000000
--- a/drivers/staging/octeon/ethernet-tx.c
+++ /dev/null
@@ -1,717 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/ratelimit.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-#include <linux/atomic.h>
-#include <net/sch_generic.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-tx.h"
-#include "ethernet-util.h"
-
-#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
-
-/*
- * You can define GET_SKBUFF_QOS() to override how the skbuff output
- * function determines which output queue is used. The default
- * implementation always uses the base queue for the port. If, for
- * example, you wanted to use the skb->priority field, define
- * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
- */
-#ifndef GET_SKBUFF_QOS
-#define GET_SKBUFF_QOS(skb) 0
-#endif
-
-static void cvm_oct_tx_do_cleanup(unsigned long arg);
-static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
-
-/* Maximum number of SKBs to try to free per xmit packet. */
-#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
-
-static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
-{
- int undo;
-
- undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
- MAX_SKB_TO_FREE;
- if (undo > 0)
- cvmx_fau_atomic_add32(fau, -undo);
- skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
- -skb_to_free;
- return skb_to_free;
-}
-
-static void cvm_oct_kick_tx_poll_watchdog(void)
-{
- union cvmx_ciu_timx ciu_timx;
-
- ciu_timx.u64 = 0;
- ciu_timx.s.one_shot = 1;
- ciu_timx.s.len = cvm_oct_tx_poll_interval;
- cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
-}
-
-static void cvm_oct_free_tx_skbs(struct net_device *dev)
-{
- int skb_to_free;
- int qos, queues_per_port;
- int total_freed = 0;
- int total_remaining = 0;
- unsigned long flags;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- queues_per_port = cvmx_pko_get_num_queues(priv->port);
- /* Drain any pending packets in the free list */
- for (qos = 0; qos < queues_per_port; qos++) {
- if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
- continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
- total_freed += skb_to_free;
- if (skb_to_free > 0) {
- struct sk_buff *to_free_list = NULL;
-
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
- while (skb_to_free > 0) {
- struct sk_buff *t;
-
- t = __skb_dequeue(&priv->tx_free_list[qos]);
- t->next = to_free_list;
- to_free_list = t;
- skb_to_free--;
- }
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
- flags);
- /* Do the actual freeing outside of the lock. */
- while (to_free_list) {
- struct sk_buff *t = to_free_list;
-
- to_free_list = to_free_list->next;
- dev_kfree_skb_any(t);
- }
- }
- total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
- }
- if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
- netif_wake_queue(dev);
- if (total_remaining)
- cvm_oct_kick_tx_poll_watchdog();
-}
-
-/**
- * cvm_oct_xmit - transmit a packet
- * @skb: Packet to send
- * @dev: Device info structure
- *
- * Returns Always returns NETDEV_TX_OK
- */
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- union cvmx_pko_command_word0 pko_command;
- union cvmx_buf_ptr hw_buffer;
- u64 old_scratch;
- u64 old_scratch2;
- int qos;
- int i;
- enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
- struct octeon_ethernet *priv = netdev_priv(dev);
- struct sk_buff *to_free_list;
- int skb_to_free;
- int buffers_to_free;
- u32 total_to_clean;
- unsigned long flags;
-#if REUSE_SKBUFFS_WITHOUT_FREE
- unsigned char *fpa_head;
-#endif
-
- /*
- * Prefetch the private data structure. It is larger than the
- * one cache line.
- */
- prefetch(priv);
-
- /*
- * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
- * completely remove "qos" in the event neither interface
- * supports multiple queues per port.
- */
- if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
- (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
- qos = GET_SKBUFF_QOS(skb);
- if (qos <= 0)
- qos = 0;
- else if (qos >= cvmx_pko_get_num_queues(priv->port))
- qos = 0;
- } else {
- qos = 0;
- }
-
- if (USE_ASYNC_IOBDMA) {
- /* Save scratch in case userspace is using it */
- CVMX_SYNCIOBDMA;
- old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-
- /*
- * Fetch and increment the number of packets to be
- * freed.
- */
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
- FAU_NUM_PACKET_BUFFERS_TO_FREE,
- 0);
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- }
-
- /*
- * We have space for 6 segment pointers, If there will be more
- * than that, we must linearize.
- */
- if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
- if (unlikely(__skb_linearize(skb))) {
- queue_type = QUEUE_DROP;
- if (USE_ASYNC_IOBDMA) {
- /*
- * Get the number of skbuffs in use
- * by the hardware
- */
- CVMX_SYNCIOBDMA;
- skb_to_free =
- cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- } else {
- /*
- * Get the number of skbuffs in use
- * by the hardware
- */
- skb_to_free =
- cvmx_fau_fetch_and_add32(priv->fau +
- qos * 4,
- MAX_SKB_TO_FREE);
- }
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau +
- qos * 4);
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
- goto skip_xmit;
- }
- }
-
- /*
- * The CN3XXX series of parts has an errata (GMX-401) which
- * causes the GMX block to hang if a collision occurs towards
- * the end of a <68 byte packet. As a workaround for this, we
- * pad packets to be 68 bytes whenever we are in half duplex
- * mode. We don't handle the case of having a small packet but
- * no room to add the padding. The kernel should always give
- * us at least a cache line
- */
- if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- if (interface < 2) {
- /* We only need to pad packet in half duplex mode */
- gmx_prt_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- if (gmx_prt_cfg.s.duplex == 0) {
- int add_bytes = 64 - skb->len;
-
- if ((skb_tail_pointer(skb) + add_bytes) <=
- skb_end_pointer(skb))
- __skb_put_zero(skb, add_bytes);
- }
- }
- }
-
- /* Build the PKO command */
- pko_command.u64 = 0;
-#ifdef __LITTLE_ENDIAN
- pko_command.s.le = 1;
-#endif
- pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
- pko_command.s.segs = 1;
- pko_command.s.total_bytes = skb->len;
- pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
- pko_command.s.subone0 = 1;
-
- pko_command.s.dontfree = 1;
-
- /* Build the PKO buffer pointer */
- hw_buffer.u64 = 0;
- if (skb_shinfo(skb)->nr_frags == 0) {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
- hw_buffer.s.pool = 0;
- hw_buffer.s.size = skb->len;
- } else {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
- hw_buffer.s.pool = 0;
- hw_buffer.s.size = skb_headlen(skb);
- CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *fs = skb_shinfo(skb)->frags + i;
-
- hw_buffer.s.addr =
- XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
- hw_buffer.s.size = skb_frag_size(fs);
- CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
- }
- hw_buffer.s.addr =
- XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
- hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
- pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
- pko_command.s.gather = 1;
- goto dont_put_skbuff_in_hw;
- }
-
- /*
- * See if we can put this skb in the FPA pool. Any strange
- * behavior from the Linux networking stack will most likely
- * be caused by a bug in the following code. If some field is
- * in use by the network stack and gets carried over when a
- * buffer is reused, bad things may happen. If in doubt and
- * you dont need the absolute best performance, disable the
- * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
- * shown a 25% increase in performance under some loads.
- */
-#if REUSE_SKBUFFS_WITHOUT_FREE
- fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
- if (unlikely(skb->data < fpa_head)) {
- /* TX buffer beginning can't meet FPA alignment constraints */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely
- ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
- /* TX buffer isn't large enough for the FPA */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb_shared(skb))) {
- /* TX buffer sharing data with someone else */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb_cloned(skb))) {
- /* TX buffer has been cloned */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb_header_cloned(skb))) {
- /* TX buffer header has been cloned */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb->destructor)) {
- /* TX buffer has a destructor */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb_shinfo(skb)->nr_frags)) {
- /* TX buffer has fragments */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely
- (skb->truesize !=
- sizeof(*skb) + skb_end_offset(skb))) {
- /* TX buffer truesize has been changed */
- goto dont_put_skbuff_in_hw;
- }
-
- /*
- * We can use this buffer in the FPA. We don't need the FAU
- * update anymore
- */
- pko_command.s.dontfree = 0;
-
- hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
- ((unsigned long)fpa_head >> 7);
-
- *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
-
- /*
- * The skbuff will be reused without ever being freed. We must
- * cleanup a bunch of core things.
- */
- dst_release(skb_dst(skb));
- skb_dst_set(skb, NULL);
- skb_ext_reset(skb);
- nf_reset_ct(skb);
-
-#ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
- skb_reset_tc(skb);
-#endif /* CONFIG_NET_SCHED */
-#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
-
-dont_put_skbuff_in_hw:
-
- /* Check if we can use the hardware checksumming */
- if ((skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->version == 4) &&
- (ip_hdr(skb)->ihl == 5) &&
- ((ip_hdr(skb)->frag_off == 0) ||
- (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
- ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
- (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
- /* Use hardware checksum calc */
- pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
- }
-
- if (USE_ASYNC_IOBDMA) {
- /* Get the number of skbuffs in use by the hardware */
- CVMX_SYNCIOBDMA;
- skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
- } else {
- /* Get the number of skbuffs in use by the hardware */
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- buffers_to_free =
- cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
- }
-
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
-
- /*
- * If we're sending faster than the receive can free them then
- * don't do the HW free.
- */
- if ((buffers_to_free < -100) && !pko_command.s.dontfree)
- pko_command.s.dontfree = 1;
-
- if (pko_command.s.dontfree) {
- queue_type = QUEUE_CORE;
- pko_command.s.reg0 = priv->fau + qos * 4;
- } else {
- queue_type = QUEUE_HW;
- }
- if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- FAU_TOTAL_TX_TO_CLEAN, 1);
-
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
-
- /* Drop this packet if we have too many already queued to the HW */
- if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
- MAX_OUT_QUEUE_DEPTH)) {
- if (dev->tx_queue_len != 0) {
- /* Drop the lock when notifying the core. */
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
- flags);
- netif_stop_queue(dev);
- spin_lock_irqsave(&priv->tx_free_list[qos].lock,
- flags);
- } else {
- /* If not using normal queueing. */
- queue_type = QUEUE_DROP;
- goto skip_xmit;
- }
- }
-
- cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
- CVMX_PKO_LOCK_NONE);
-
- /* Send the packet to the output queue */
- if (unlikely(cvmx_pko_send_packet_finish(priv->port,
- priv->queue + qos,
- pko_command, hw_buffer,
- CVMX_PKO_LOCK_NONE))) {
- printk_ratelimited("%s: Failed to send the packet\n",
- dev->name);
- queue_type = QUEUE_DROP;
- }
-skip_xmit:
- to_free_list = NULL;
-
- switch (queue_type) {
- case QUEUE_DROP:
- skb->next = to_free_list;
- to_free_list = skb;
- dev->stats.tx_dropped++;
- break;
- case QUEUE_HW:
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
- break;
- case QUEUE_CORE:
- __skb_queue_tail(&priv->tx_free_list[qos], skb);
- break;
- default:
- BUG();
- }
-
- while (skb_to_free > 0) {
- struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
-
- t->next = to_free_list;
- to_free_list = t;
- skb_to_free--;
- }
-
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
-
- /* Do the actual freeing outside of the lock. */
- while (to_free_list) {
- struct sk_buff *t = to_free_list;
-
- to_free_list = to_free_list->next;
- dev_kfree_skb_any(t);
- }
-
- if (USE_ASYNC_IOBDMA) {
- CVMX_SYNCIOBDMA;
- total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- /* Restore the scratch area */
- cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
- cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
- } else {
- total_to_clean =
- cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
- }
-
- if (total_to_clean & 0x3ff) {
- /*
- * Schedule the cleanup tasklet every 1024 packets for
- * the pathological case of high traffic on one port
- * delaying clean up of packets on a different port
- * that is blocked waiting for the cleanup.
- */
- tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
- }
-
- cvm_oct_kick_tx_poll_watchdog();
-
- return NETDEV_TX_OK;
-}
-
-/**
- * cvm_oct_xmit_pow - transmit a packet to the POW
- * @skb: Packet to send
- * @dev: Device info structure
-
- * Returns Always returns zero
- */
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- void *packet_buffer;
- void *copy_location;
-
- /* Get a work queue entry */
- struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
-
- if (unlikely(!work)) {
- printk_ratelimited("%s: Failed to allocate a work queue entry\n",
- dev->name);
- dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- return 0;
- }
-
- /* Get a packet buffer */
- packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
- if (unlikely(!packet_buffer)) {
- printk_ratelimited("%s: Failed to allocate a packet buffer\n",
- dev->name);
- cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
- dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- return 0;
- }
-
- /*
- * Calculate where we need to copy the data to. We need to
- * leave 8 bytes for a next pointer (unused). We also need to
- * include any configure skip. Then we need to align the IP
- * packet src and dest into the same 64bit word. The below
- * calculation may add a little extra, but that doesn't
- * hurt.
- */
- copy_location = packet_buffer + sizeof(u64);
- copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
-
- /*
- * We have to copy the packet since whoever processes this
- * packet will free it to a hardware pool. We can't use the
- * trick of counting outstanding packets like in
- * cvm_oct_xmit.
- */
- memcpy(copy_location, skb->data, skb->len);
-
- /*
- * Fill in some of the work queue fields. We may need to add
- * more if the software at the other end needs them.
- */
- if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
- work->word0.pip.cn38xx.hw_chksum = skb->csum;
- work->word1.len = skb->len;
- cvmx_wqe_set_port(work, priv->port);
- cvmx_wqe_set_qos(work, priv->port & 0x7);
- cvmx_wqe_set_grp(work, pow_send_group);
- work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- work->word1.tag = pow_send_group; /* FIXME */
- /* Default to zero. Sets of zero later are commented out */
- work->word2.u64 = 0;
- work->word2.s.bufs = 1;
- work->packet_ptr.u64 = 0;
- work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
- work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
- work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
- work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- work->word2.s.ip_offset = 14;
-#if 0
- work->word2.s.vlan_valid = 0; /* FIXME */
- work->word2.s.vlan_cfi = 0; /* FIXME */
- work->word2.s.vlan_id = 0; /* FIXME */
- work->word2.s.dec_ipcomp = 0; /* FIXME */
-#endif
- work->word2.s.tcp_or_udp =
- (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
- (ip_hdr(skb)->protocol == IPPROTO_UDP);
-#if 0
- /* FIXME */
- work->word2.s.dec_ipsec = 0;
- /* We only support IPv4 right now */
- work->word2.s.is_v6 = 0;
- /* Hardware would set to zero */
- work->word2.s.software = 0;
- /* No error, packet is internal */
- work->word2.s.L4_error = 0;
-#endif
- work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
- (ip_hdr(skb)->frag_off ==
- cpu_to_be16(1 << 14)));
-#if 0
- /* Assume Linux is sending a good packet */
- work->word2.s.IP_exc = 0;
-#endif
- work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
- work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
-#if 0
- /* This is an IP packet */
- work->word2.s.not_IP = 0;
- /* No error, packet is internal */
- work->word2.s.rcv_error = 0;
- /* No error, packet is internal */
- work->word2.s.err_code = 0;
-#endif
-
- /*
- * When copying the data, include 4 bytes of the
- * ethernet header to align the same way hardware
- * does.
- */
- memcpy(work->packet_data, skb->data + 10,
- sizeof(work->packet_data));
- } else {
-#if 0
- work->word2.snoip.vlan_valid = 0; /* FIXME */
- work->word2.snoip.vlan_cfi = 0; /* FIXME */
- work->word2.snoip.vlan_id = 0; /* FIXME */
- work->word2.snoip.software = 0; /* Hardware would set to zero */
-#endif
- work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
- work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
- work->word2.snoip.is_bcast =
- (skb->pkt_type == PACKET_BROADCAST);
- work->word2.snoip.is_mcast =
- (skb->pkt_type == PACKET_MULTICAST);
- work->word2.snoip.not_IP = 1; /* IP was done up above */
-#if 0
- /* No error, packet is internal */
- work->word2.snoip.rcv_error = 0;
- /* No error, packet is internal */
- work->word2.snoip.err_code = 0;
-#endif
- memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
- }
-
- /* Submit the packet to the POW */
- cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
- cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- dev_consume_skb_any(skb);
- return 0;
-}
-
-/**
- * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
- * @dev: Device being shutdown
- *
- */
-void cvm_oct_tx_shutdown_dev(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- unsigned long flags;
- int qos;
-
- for (qos = 0; qos < 16; qos++) {
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
- while (skb_queue_len(&priv->tx_free_list[qos]))
- dev_kfree_skb_any(__skb_dequeue
- (&priv->tx_free_list[qos]));
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
- }
-}
-
-static void cvm_oct_tx_do_cleanup(unsigned long arg)
-{
- int port;
-
- for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
- if (cvm_oct_device[port]) {
- struct net_device *dev = cvm_oct_device[port];
-
- cvm_oct_free_tx_skbs(dev);
- }
- }
-}
-
-static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
-{
- /* Disable the interrupt. */
- cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
- /* Do the work in the tasklet. */
- tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
- return IRQ_HANDLED;
-}
-
-void cvm_oct_tx_initialize(void)
-{
- int i;
-
- /* Disable the interrupt. */
- cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
- /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
- i = request_irq(OCTEON_IRQ_TIMER1,
- cvm_oct_tx_cleanup_watchdog, 0,
- "Ethernet", cvm_oct_device);
-
- if (i)
- panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
-}
-
-void cvm_oct_tx_shutdown(void)
-{
- /* Free the interrupt handler */
- free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
-}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
deleted file mode 100644
index 78936e9b33b0..000000000000
--- a/drivers/staging/octeon/ethernet-tx.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
- int do_free, int qos);
-void cvm_oct_tx_initialize(void);
-void cvm_oct_tx_shutdown(void);
-void cvm_oct_tx_shutdown_dev(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
deleted file mode 100644
index 2af83a12ca78..000000000000
--- a/drivers/staging/octeon/ethernet-util.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-/**
- * cvm_oct_get_buffer_ptr - convert packet data address to pointer
- * @packet_ptr: Packet data hardware address
- *
- * Returns Packet buffer pointer
- */
-static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
-{
- return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back)
- << 7);
-}
-
-/**
- * INTERFACE - convert IPD port to logical interface
- * @ipd_port: Port to check
- *
- * Returns Logical interface
- */
-static inline int INTERFACE(int ipd_port)
-{
- int interface;
-
- if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
- return 10;
- interface = cvmx_helper_get_interface_num(ipd_port);
- if (interface >= 0)
- return interface;
- panic("Illegal ipd_port %d passed to %s\n", ipd_port, __func__);
-}
-
-/**
- * INDEX - convert IPD/PKO port number to the port's interface index
- * @ipd_port: Port to check
- *
- * Returns Index into interface port list
- */
-static inline int INDEX(int ipd_port)
-{
- return cvmx_helper_get_interface_index_num(ipd_port);
-}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
deleted file mode 100644
index f42c3816ce49..000000000000
--- a/drivers/staging/octeon/ethernet.c
+++ /dev/null
@@ -1,992 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/of_net.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mem.h"
-#include "ethernet-rx.h"
-#include "ethernet-tx.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-
-#define OCTEON_MAX_MTU 65392
-
-static int num_packet_buffers = 1024;
-module_param(num_packet_buffers, int, 0444);
-MODULE_PARM_DESC(num_packet_buffers, "\n"
- "\tNumber of packet buffers to allocate and store in the\n"
- "\tFPA. By default, 1024 packet buffers are used.\n");
-
-static int pow_receive_group = 15;
-module_param(pow_receive_group, int, 0444);
-MODULE_PARM_DESC(pow_receive_group, "\n"
- "\tPOW group to receive packets from. All ethernet hardware\n"
- "\twill be configured to send incoming packets to this POW\n"
- "\tgroup. Also any other software can submit packets to this\n"
- "\tgroup for the kernel to process.");
-
-static int receive_group_order;
-module_param(receive_group_order, int, 0444);
-MODULE_PARM_DESC(receive_group_order, "\n"
- "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
- "\twill be configured to send incoming packets to multiple POW\n"
- "\tgroups. pow_receive_group parameter is ignored when multiple\n"
- "\tgroups are taken into use and groups are allocated starting\n"
- "\tfrom 0. By default, a single group is used.\n");
-
-int pow_send_group = -1;
-module_param(pow_send_group, int, 0644);
-MODULE_PARM_DESC(pow_send_group, "\n"
- "\tPOW group to send packets to other software on. This\n"
- "\tcontrols the creation of the virtual device pow0.\n"
- "\talways_use_pow also depends on this value.");
-
-int always_use_pow;
-module_param(always_use_pow, int, 0444);
-MODULE_PARM_DESC(always_use_pow, "\n"
- "\tWhen set, always send to the pow group. This will cause\n"
- "\tpackets sent to real ethernet devices to be sent to the\n"
- "\tPOW group instead of the hardware. Unless some other\n"
- "\tapplication changes the config, packets will still be\n"
- "\treceived from the low level hardware. Use this option\n"
- "\tto allow a CVMX app to intercept all packets from the\n"
- "\tlinux kernel. You must specify pow_send_group along with\n"
- "\tthis option.");
-
-char pow_send_list[128] = "";
-module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
-MODULE_PARM_DESC(pow_send_list, "\n"
- "\tComma separated list of ethernet devices that should use the\n"
- "\tPOW for transmit instead of the actual ethernet hardware. This\n"
- "\tis a per port version of always_use_pow. always_use_pow takes\n"
- "\tprecedence over this list. For example, setting this to\n"
- "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
- "\tusing the pow_send_group.");
-
-int rx_napi_weight = 32;
-module_param(rx_napi_weight, int, 0444);
-MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
-
-/* Mask indicating which receive groups are in use. */
-int pow_receive_groups;
-
-/*
- * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
- *
- * Set to one right before cvm_oct_poll_queue is destroyed.
- */
-atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
-
-/*
- * Array of every ethernet device owned by this driver indexed by
- * the ipd input port number.
- */
-struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
-
-u64 cvm_oct_tx_poll_interval;
-
-static void cvm_oct_rx_refill_worker(struct work_struct *work);
-static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
-
-static void cvm_oct_rx_refill_worker(struct work_struct *work)
-{
- /*
- * FPA 0 may have been drained, try to refill it if we need
- * more than num_packet_buffers / 2, otherwise normal receive
- * processing will refill it. If it were drained, no packets
- * could be received so cvm_oct_napi_poll would never be
- * invoked to do the refill.
- */
- cvm_oct_rx_refill_pool(num_packet_buffers / 2);
-
- if (!atomic_read(&cvm_oct_poll_queue_stopping))
- schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
-}
-
-static void cvm_oct_periodic_worker(struct work_struct *work)
-{
- struct octeon_ethernet *priv = container_of(work,
- struct octeon_ethernet,
- port_periodic_work.work);
-
- if (priv->poll)
- priv->poll(cvm_oct_device[priv->port]);
-
- cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats
- (cvm_oct_device[priv->port]);
-
- if (!atomic_read(&cvm_oct_poll_queue_stopping))
- schedule_delayed_work(&priv->port_periodic_work, HZ);
-}
-
-static void cvm_oct_configure_common_hw(void)
-{
- /* Setup the FPA */
- cvmx_fpa_enable();
- cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
- num_packet_buffers);
- cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
- num_packet_buffers);
- if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
- cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
-
-#ifdef __LITTLE_ENDIAN
- {
- union cvmx_ipd_ctl_status ipd_ctl_status;
-
- ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_ctl_status.s.pkt_lend = 1;
- ipd_ctl_status.s.wqe_lend = 1;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
- }
-#endif
-
- cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
-}
-
-/**
- * cvm_oct_free_work- Free a work queue entry
- *
- * @work_queue_entry: Work queue entry to free
- *
- * Returns Zero on success, Negative on failure.
- */
-int cvm_oct_free_work(void *work_queue_entry)
-{
- struct cvmx_wqe *work = work_queue_entry;
-
- int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr = work->packet_ptr;
-
- while (segments--) {
- union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
- if (unlikely(!segment_ptr.s.i))
- cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
- segment_ptr.s.pool,
- CVMX_FPA_PACKET_POOL_SIZE / 128);
- segment_ptr = next_ptr;
- }
- cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
-
- return 0;
-}
-EXPORT_SYMBOL(cvm_oct_free_work);
-
-/**
- * cvm_oct_common_get_stats - get the low level ethernet statistics
- * @dev: Device to get the statistics from
- *
- * Returns Pointer to the statistics
- */
-static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
-{
- cvmx_pip_port_status_t rx_status;
- cvmx_pko_port_status_t tx_status;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (octeon_is_simulation()) {
- /* The simulator doesn't support statistics */
- memset(&rx_status, 0, sizeof(rx_status));
- memset(&tx_status, 0, sizeof(tx_status));
- } else {
- cvmx_pip_get_port_status(priv->port, 1, &rx_status);
- cvmx_pko_get_port_status(priv->port, 1, &tx_status);
- }
-
- dev->stats.rx_packets += rx_status.inb_packets;
- dev->stats.tx_packets += tx_status.packets;
- dev->stats.rx_bytes += rx_status.inb_octets;
- dev->stats.tx_bytes += tx_status.octets;
- dev->stats.multicast += rx_status.multicast_packets;
- dev->stats.rx_crc_errors += rx_status.inb_errors;
- dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
- dev->stats.rx_dropped += rx_status.dropped_packets;
- }
-
- return &dev->stats;
-}
-
-/**
- * cvm_oct_common_change_mtu - change the link MTU
- * @dev: Device to change
- * @new_mtu: The new MTU
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- int vlan_bytes = VLAN_HLEN;
-#else
- int vlan_bytes = 0;
-#endif
- int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
-
- dev->mtu = new_mtu;
-
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int index = INDEX(priv->port);
- /* Add ethernet header and FCS, and VLAN if configured. */
- int max_packet = new_mtu + mtu_overhead;
-
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
- OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Signal errors on packets larger than the MTU */
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
- max_packet);
- } else {
- /*
- * Set the hardware to truncate packets larger
- * than the MTU and smaller the 64 bytes.
- */
- union cvmx_pip_frm_len_chkx frm_len_chk;
-
- frm_len_chk.u64 = 0;
- frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
- frm_len_chk.s.maxlen = max_packet;
- cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
- frm_len_chk.u64);
- }
- /*
- * Set the hardware to truncate packets larger than
- * the MTU. The jabber register must be set to a
- * multiple of 8 bytes, so round up.
- */
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
- (max_packet + 7) & ~7u);
- }
- return 0;
-}
-
-/**
- * cvm_oct_common_set_multicast_list - set the multicast list
- * @dev: Device to work on
- */
-static void cvm_oct_common_set_multicast_list(struct net_device *dev)
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
-
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- union cvmx_gmxx_rxx_adr_ctl control;
- int index = INDEX(priv->port);
-
- control.u64 = 0;
- control.s.bcst = 1; /* Allow broadcast MAC addresses */
-
- if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
- (dev->flags & IFF_PROMISC))
- /* Force accept multicast packets */
- control.s.mcst = 2;
- else
- /* Force reject multicast packets */
- control.s.mcst = 1;
-
- if (dev->flags & IFF_PROMISC)
- /*
- * Reject matches if promisc. Since CAM is
- * shut off, should accept everything.
- */
- control.s.cam_mode = 0;
- else
- /* Filter packets based on the CAM */
- control.s.cam_mode = 1;
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
- control.u64);
- if (dev->flags & IFF_PROMISC)
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 0);
- else
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 1);
-
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
-}
-
-static int cvm_oct_set_mac_filter(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int interface = INTERFACE(priv->port);
-
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int i;
- u8 *ptr = dev->dev_addr;
- u64 mac = 0;
- int index = INDEX(priv->port);
-
- for (i = 0; i < 6; i++)
- mac = (mac << 8) | (u64)ptr[i];
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
- ptr[0]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
- ptr[1]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
- ptr[2]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
- ptr[3]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
- ptr[4]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
- ptr[5]);
- cvm_oct_common_set_multicast_list(dev);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
- return 0;
-}
-
-/**
- * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
- * @dev: The device in question.
- * @addr: Socket address.
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
-{
- int r = eth_mac_addr(dev, addr);
-
- if (r)
- return r;
- return cvm_oct_set_mac_filter(dev);
-}
-
-/**
- * cvm_oct_common_init - per network device initialization
- * @dev: Device to initialize
- *
- * Returns Zero on success
- */
-int cvm_oct_common_init(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- const u8 *mac = NULL;
-
- if (priv->of_node)
- mac = of_get_mac_address(priv->of_node);
-
- if (!IS_ERR_OR_NULL(mac))
- ether_addr_copy(dev->dev_addr, mac);
- else
- eth_hw_addr_random(dev);
-
- /*
- * Force the interface to use the POW send if always_use_pow
- * was specified or it is in the pow send list.
- */
- if ((pow_send_group != -1) &&
- (always_use_pow || strstr(pow_send_list, dev->name)))
- priv->queue = -1;
-
- if (priv->queue != -1)
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
-
- /* We do our own locking, Linux doesn't need to */
- dev->features |= NETIF_F_LLTX;
- dev->ethtool_ops = &cvm_oct_ethtool_ops;
-
- cvm_oct_set_mac_filter(dev);
- dev_set_mtu(dev, dev->mtu);
-
- /*
- * Zero out stats for port so we won't mistakenly show
- * counters from the bootloader.
- */
- memset(dev->netdev_ops->ndo_get_stats(dev), 0,
- sizeof(struct net_device_stats));
-
- if (dev->netdev_ops->ndo_stop)
- dev->netdev_ops->ndo_stop(dev);
-
- return 0;
-}
-
-void cvm_oct_common_uninit(struct net_device *dev)
-{
- if (dev->phydev)
- phy_disconnect(dev->phydev);
-}
-
-int cvm_oct_common_open(struct net_device *dev,
- void (*link_poll)(struct net_device *))
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- union cvmx_helper_link_info link_info;
- int rv;
-
- rv = cvm_oct_phy_setup_device(dev);
- if (rv)
- return rv;
-
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmx_cfg.s.en = 1;
- if (octeon_has_feature(OCTEON_FEATURE_PKND))
- gmx_cfg.s.pknd = priv->port;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
- if (octeon_is_simulation())
- return 0;
-
- if (dev->phydev) {
- int r = phy_read_status(dev->phydev);
-
- if (r == 0 && dev->phydev->link == 0)
- netif_carrier_off(dev);
- cvm_oct_adjust_link(dev);
- } else {
- link_info = cvmx_helper_link_get(priv->port);
- if (!link_info.s.link_up)
- netif_carrier_off(dev);
- priv->poll = link_poll;
- link_poll(dev);
- }
-
- return 0;
-}
-
-void cvm_oct_link_poll(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_helper_link_info link_info;
-
- link_info = cvmx_helper_link_get(priv->port);
- if (link_info.u64 == priv->link_info)
- return;
-
- if (cvmx_helper_link_set(priv->port, link_info))
- link_info.u64 = priv->link_info;
- else
- priv->link_info = link_info.u64;
-
- if (link_info.s.link_up) {
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- } else if (netif_carrier_ok(dev)) {
- netif_carrier_off(dev);
- }
- cvm_oct_note_carrier(priv, link_info);
-}
-
-static int cvm_oct_xaui_open(struct net_device *dev)
-{
- return cvm_oct_common_open(dev, cvm_oct_link_poll);
-}
-
-static const struct net_device_ops cvm_oct_npi_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_xaui_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
- .ndo_init = cvm_oct_sgmii_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_sgmii_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_spi_netdev_ops = {
- .ndo_init = cvm_oct_spi_init,
- .ndo_uninit = cvm_oct_spi_uninit,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_rgmii_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_pow_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_start_xmit = cvm_oct_xmit_pow,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static struct device_node *cvm_oct_of_get_child
- (const struct device_node *parent, int reg_val)
-{
- struct device_node *node = NULL;
- int size;
- const __be32 *addr;
-
- for (;;) {
- node = of_get_next_child(parent, node);
- if (!node)
- break;
- addr = of_get_property(node, "reg", &size);
- if (addr && (be32_to_cpu(*addr) == reg_val))
- break;
- }
- return node;
-}
-
-static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
-{
- struct device_node *ni, *np;
-
- ni = cvm_oct_of_get_child(pip, interface);
- if (!ni)
- return NULL;
-
- np = cvm_oct_of_get_child(ni, port);
- of_node_put(ni);
-
- return np;
-}
-
-static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
- int port)
-{
- struct device_node *np = priv->of_node;
- u32 delay_value;
- bool rx_delay;
- bool tx_delay;
-
- /* By default, both RX/TX delay is enabled in
- * __cvmx_helper_rgmii_enable().
- */
- rx_delay = true;
- tx_delay = true;
-
- if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
- rx_delay = delay_value > 0;
- }
- if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
- tx_delay = delay_value > 0;
- }
-
- if (!rx_delay && !tx_delay)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
- else if (!rx_delay)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
- else if (!tx_delay)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
- else
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
-}
-
-static int cvm_oct_probe(struct platform_device *pdev)
-{
- int num_interfaces;
- int interface;
- int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
- int qos;
- struct device_node *pip;
- int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
-
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- mtu_overhead += VLAN_HLEN;
-#endif
-
- octeon_mdiobus_force_mod_depencency();
-
- pip = pdev->dev.of_node;
- if (!pip) {
- pr_err("Error: No 'pip' in /aliases\n");
- return -EINVAL;
- }
-
- cvm_oct_configure_common_hw();
-
- cvmx_helper_initialize_packet_io_global();
-
- if (receive_group_order) {
- if (receive_group_order > 4)
- receive_group_order = 4;
- pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
- } else {
- pow_receive_groups = BIT(pow_receive_group);
- }
-
- /* Change the input group for all ports before input is enabled */
- num_interfaces = cvmx_helper_get_number_of_interfaces();
- for (interface = 0; interface < num_interfaces; interface++) {
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
-
- for (port = cvmx_helper_get_ipd_port(interface, 0);
- port < cvmx_helper_get_ipd_port(interface, num_ports);
- port++) {
- union cvmx_pip_prt_tagx pip_prt_tagx;
-
- pip_prt_tagx.u64 =
- cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
-
- if (receive_group_order) {
- int tag_mask;
-
- /* We support only 16 groups at the moment, so
- * always disable the two additional "hidden"
- * tag_mask bits on CN68XX.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX))
- pip_prt_tagx.u64 |= 0x3ull << 44;
-
- tag_mask = ~((1 << receive_group_order) - 1);
- pip_prt_tagx.s.grptagbase = 0;
- pip_prt_tagx.s.grptagmask = tag_mask;
- pip_prt_tagx.s.grptag = 1;
- pip_prt_tagx.s.tag_mode = 0;
- pip_prt_tagx.s.inc_prt_flag = 1;
- pip_prt_tagx.s.ip6_dprt_flag = 1;
- pip_prt_tagx.s.ip4_dprt_flag = 1;
- pip_prt_tagx.s.ip6_sprt_flag = 1;
- pip_prt_tagx.s.ip4_sprt_flag = 1;
- pip_prt_tagx.s.ip6_dst_flag = 1;
- pip_prt_tagx.s.ip4_dst_flag = 1;
- pip_prt_tagx.s.ip6_src_flag = 1;
- pip_prt_tagx.s.ip4_src_flag = 1;
- pip_prt_tagx.s.grp = 0;
- } else {
- pip_prt_tagx.s.grptag = 0;
- pip_prt_tagx.s.grp = pow_receive_group;
- }
-
- cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
- pip_prt_tagx.u64);
- }
- }
-
- cvmx_helper_ipd_and_packet_input_enable();
-
- memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
-
- /*
- * Initialize the FAU used for counting packet buffers that
- * need to be freed.
- */
- cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-
- /* Initialize the FAU used for counting tx SKBs that need to be freed */
- cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
-
- if ((pow_send_group != -1)) {
- struct net_device *dev;
-
- dev = alloc_etherdev(sizeof(struct octeon_ethernet));
- if (dev) {
- /* Initialize the device private structure. */
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->netdev_ops = &cvm_oct_pow_netdev_ops;
- priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
- priv->port = CVMX_PIP_NUM_INPUT_PORTS;
- priv->queue = -1;
- strscpy(dev->name, "pow%d", sizeof(dev->name));
- for (qos = 0; qos < 16; qos++)
- skb_queue_head_init(&priv->tx_free_list[qos]);
- dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
- dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
-
- if (register_netdev(dev) < 0) {
- pr_err("Failed to register ethernet device for POW\n");
- free_netdev(dev);
- } else {
- cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
- pr_info("%s: POW send group %d, receive group %d\n",
- dev->name, pow_send_group,
- pow_receive_group);
- }
- } else {
- pr_err("Failed to allocate ethernet device for POW\n");
- }
- }
-
- num_interfaces = cvmx_helper_get_number_of_interfaces();
- for (interface = 0; interface < num_interfaces; interface++) {
- cvmx_helper_interface_mode_t imode =
- cvmx_helper_interface_get_mode(interface);
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
- int port_index;
-
- for (port_index = 0,
- port = cvmx_helper_get_ipd_port(interface, 0);
- port < cvmx_helper_get_ipd_port(interface, num_ports);
- port_index++, port++) {
- struct octeon_ethernet *priv;
- struct net_device *dev =
- alloc_etherdev(sizeof(struct octeon_ethernet));
- if (!dev) {
- pr_err("Failed to allocate ethernet device for port %d\n",
- port);
- continue;
- }
-
- /* Initialize the device private structure. */
- SET_NETDEV_DEV(dev, &pdev->dev);
- priv = netdev_priv(dev);
- priv->netdev = dev;
- priv->of_node = cvm_oct_node_for_port(pip, interface,
- port_index);
-
- INIT_DELAYED_WORK(&priv->port_periodic_work,
- cvm_oct_periodic_worker);
- priv->imode = imode;
- priv->port = port;
- priv->queue = cvmx_pko_get_base_queue(priv->port);
- priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
- priv->phy_mode = PHY_INTERFACE_MODE_NA;
- for (qos = 0; qos < 16; qos++)
- skb_queue_head_init(&priv->tx_free_list[qos]);
- for (qos = 0; qos < cvmx_pko_get_num_queues(port);
- qos++)
- cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
- dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
- dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
-
- switch (priv->imode) {
- /* These types don't support ports to IPD/PKO */
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strscpy(dev->name, "npi%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
- strscpy(dev->name, "xaui%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strscpy(dev->name, "loop%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
- dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
- strscpy(dev->name, "eth%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- dev->netdev_ops = &cvm_oct_spi_netdev_ops;
- strscpy(dev->name, "spi%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- priv->phy_mode = PHY_INTERFACE_MODE_GMII;
- dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
- strscpy(dev->name, "eth%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
- strscpy(dev->name, "eth%d", sizeof(dev->name));
- cvm_set_rgmii_delay(priv, interface,
- port_index);
- break;
- }
-
- if (!dev->netdev_ops) {
- free_netdev(dev);
- } else if (register_netdev(dev) < 0) {
- pr_err("Failed to register ethernet device for interface %d, port %d\n",
- interface, priv->port);
- free_netdev(dev);
- } else {
- cvm_oct_device[priv->port] = dev;
- fau -=
- cvmx_pko_get_num_queues(priv->port) *
- sizeof(u32);
- schedule_delayed_work(&priv->port_periodic_work,
- HZ);
- }
- }
- }
-
- cvm_oct_tx_initialize();
- cvm_oct_rx_initialize();
-
- /*
- * 150 uS: about 10 1500-byte packets at 1GE.
- */
- cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
-
- schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
-
- return 0;
-}
-
-static int cvm_oct_remove(struct platform_device *pdev)
-{
- int port;
-
- cvmx_ipd_disable();
-
- atomic_inc_return(&cvm_oct_poll_queue_stopping);
- cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
-
- cvm_oct_rx_shutdown();
- cvm_oct_tx_shutdown();
-
- cvmx_pko_disable();
-
- /* Free the ethernet devices */
- for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
- if (cvm_oct_device[port]) {
- struct net_device *dev = cvm_oct_device[port];
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- cancel_delayed_work_sync(&priv->port_periodic_work);
-
- cvm_oct_tx_shutdown_dev(dev);
- unregister_netdev(dev);
- free_netdev(dev);
- cvm_oct_device[port] = NULL;
- }
- }
-
- cvmx_pko_shutdown();
-
- cvmx_ipd_free_ptr();
-
- /* Free the HW pools */
- cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
- num_packet_buffers);
- cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
- num_packet_buffers);
- if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
- cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
- return 0;
-}
-
-static const struct of_device_id cvm_oct_match[] = {
- {
- .compatible = "cavium,octeon-3860-pip",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, cvm_oct_match);
-
-static struct platform_driver cvm_oct_driver = {
- .probe = cvm_oct_probe,
- .remove = cvm_oct_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = cvm_oct_match,
- },
-};
-
-module_platform_driver(cvm_oct_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
deleted file mode 100644
index a6140705706f..000000000000
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-/*
- * External interface for the Cavium Octeon ethernet driver.
- */
-#ifndef OCTEON_ETHERNET_H
-#define OCTEON_ETHERNET_H
-
-#include <linux/of.h>
-#include <linux/phy.h>
-
-#ifdef CONFIG_CAVIUM_OCTEON_SOC
-
-#include <asm/octeon/octeon.h>
-
-#include <asm/octeon/cvmx-asxx-defs.h>
-#include <asm/octeon/cvmx-config.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-gmxx-defs.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-helper-util.h>
-#include <asm/octeon/cvmx-ipd.h>
-#include <asm/octeon/cvmx-ipd-defs.h>
-#include <asm/octeon/cvmx-npi-defs.h>
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-pko.h>
-#include <asm/octeon/cvmx-pow.h>
-#include <asm/octeon/cvmx-scratch.h>
-#include <asm/octeon/cvmx-spi.h>
-#include <asm/octeon/cvmx-spxx-defs.h>
-#include <asm/octeon/cvmx-stxx-defs.h>
-#include <asm/octeon/cvmx-wqe.h>
-
-#else
-
-#include "octeon-stubs.h"
-
-#endif
-
-/**
- * This is the definition of the Ethernet driver's private
- * driver state stored in netdev_priv(dev).
- */
-struct octeon_ethernet {
- /* PKO hardware output port */
- int port;
- /* PKO hardware queue for the port */
- int queue;
- /* Hardware fetch and add to count outstanding tx buffers */
- int fau;
- /* My netdev. */
- struct net_device *netdev;
- /*
- * Type of port. This is one of the enums in
- * cvmx_helper_interface_mode_t
- */
- int imode;
- /* PHY mode */
- phy_interface_t phy_mode;
- /* List of outstanding tx buffers per queue */
- struct sk_buff_head tx_free_list[16];
- unsigned int last_speed;
- unsigned int last_link;
- /* Last negotiated link state */
- u64 link_info;
- /* Called periodically to check link status */
- void (*poll)(struct net_device *dev);
- struct delayed_work port_periodic_work;
- struct device_node *of_node;
-};
-
-int cvm_oct_free_work(void *work_queue_entry);
-
-int cvm_oct_rgmii_open(struct net_device *dev);
-
-int cvm_oct_sgmii_init(struct net_device *dev);
-int cvm_oct_sgmii_open(struct net_device *dev);
-
-int cvm_oct_spi_init(struct net_device *dev);
-void cvm_oct_spi_uninit(struct net_device *dev);
-
-int cvm_oct_common_init(struct net_device *dev);
-void cvm_oct_common_uninit(struct net_device *dev);
-void cvm_oct_adjust_link(struct net_device *dev);
-int cvm_oct_common_stop(struct net_device *dev);
-int cvm_oct_common_open(struct net_device *dev,
- void (*link_poll)(struct net_device *));
-void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- union cvmx_helper_link_info li);
-void cvm_oct_link_poll(struct net_device *dev);
-
-extern int always_use_pow;
-extern int pow_send_group;
-extern int pow_receive_groups;
-extern char pow_send_list[];
-extern struct net_device *cvm_oct_device[];
-extern atomic_t cvm_oct_poll_queue_stopping;
-extern u64 cvm_oct_tx_poll_interval;
-
-extern int rx_napi_weight;
-
-#endif
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
deleted file mode 100644
index 79213c045504..000000000000
--- a/drivers/staging/octeon/octeon-stubs.h
+++ /dev/null
@@ -1,1433 +0,0 @@
-#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
-
-#ifndef XKPHYS_TO_PHYS
-# define XKPHYS_TO_PHYS(p) (p)
-#endif
-
-#define OCTEON_IRQ_WORKQ0 0
-#define OCTEON_IRQ_RML 0
-#define OCTEON_IRQ_TIMER1 0
-#define OCTEON_IS_MODEL(x) 0
-#define octeon_has_feature(x) 0
-#define octeon_get_clock_rate() 0
-
-#define CVMX_SYNCIOBDMA do { } while(0)
-
-#define CVMX_HELPER_INPUT_TAG_TYPE 0
-#define CVMX_HELPER_FIRST_MBUFF_SKIP 7
-#define CVMX_FAU_REG_END (2048)
-#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
-#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 16
-#define CVMX_FPA_PACKET_POOL (0)
-#define CVMX_FPA_PACKET_POOL_SIZE 16
-#define CVMX_FPA_WQE_POOL (1)
-#define CVMX_FPA_WQE_POOL_SIZE 16
-#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CTL(a, b) ((a)+(b))
-#define CVMX_GMXX_PRTX_CFG(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_FRM_MAX(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_JABBER(a, b) ((a)+(b))
-#define CVMX_IPD_CTL_STATUS 0
-#define CVMX_PIP_FRM_LEN_CHKX(a) (a)
-#define CVMX_PIP_NUM_INPUT_PORTS 1
-#define CVMX_SCR_SCRATCH 0
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 2
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 2
-#define CVMX_IPD_SUB_PORT_FCS 0
-#define CVMX_SSO_WQ_IQ_DIS 0
-#define CVMX_SSO_WQ_INT 0
-#define CVMX_POW_WQ_INT 0
-#define CVMX_SSO_WQ_INT_PC 0
-#define CVMX_NPI_RSL_INT_BLOCKS 0
-#define CVMX_POW_WQ_INT_PC 0
-
-union cvmx_pip_wqe_word2 {
- uint64_t u64;
- struct {
- uint64_t bufs:8;
- uint64_t ip_offset:8;
- uint64_t vlan_valid:1;
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- uint64_t vlan_cfi:1;
- uint64_t vlan_id:12;
- uint64_t pr:4;
- uint64_t unassigned2:8;
- uint64_t dec_ipcomp:1;
- uint64_t tcp_or_udp:1;
- uint64_t dec_ipsec:1;
- uint64_t is_v6:1;
- uint64_t software:1;
- uint64_t L4_error:1;
- uint64_t is_frag:1;
- uint64_t IP_exc:1;
- uint64_t is_bcast:1;
- uint64_t is_mcast:1;
- uint64_t not_IP:1;
- uint64_t rcv_error:1;
- uint64_t err_code:8;
- } s;
- struct {
- uint64_t bufs:8;
- uint64_t ip_offset:8;
- uint64_t vlan_valid:1;
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- uint64_t vlan_cfi:1;
- uint64_t vlan_id:12;
- uint64_t port:12;
- uint64_t dec_ipcomp:1;
- uint64_t tcp_or_udp:1;
- uint64_t dec_ipsec:1;
- uint64_t is_v6:1;
- uint64_t software:1;
- uint64_t L4_error:1;
- uint64_t is_frag:1;
- uint64_t IP_exc:1;
- uint64_t is_bcast:1;
- uint64_t is_mcast:1;
- uint64_t not_IP:1;
- uint64_t rcv_error:1;
- uint64_t err_code:8;
- } s_cn68xx;
-
- struct {
- uint64_t unused1:16;
- uint64_t vlan:16;
- uint64_t unused2:32;
- } svlan;
- struct {
- uint64_t bufs:8;
- uint64_t unused:8;
- uint64_t vlan_valid:1;
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- uint64_t vlan_cfi:1;
- uint64_t vlan_id:12;
- uint64_t pr:4;
- uint64_t unassigned2:12;
- uint64_t software:1;
- uint64_t unassigned3:1;
- uint64_t is_rarp:1;
- uint64_t is_arp:1;
- uint64_t is_bcast:1;
- uint64_t is_mcast:1;
- uint64_t not_IP:1;
- uint64_t rcv_error:1;
- uint64_t err_code:8;
- } snoip;
-
-};
-
-union cvmx_pip_wqe_word0 {
- struct {
- uint64_t next_ptr:40;
- uint8_t unused;
- __wsum hw_chksum;
- } cn38xx;
- struct {
- uint64_t pknd:6; /* 0..5 */
- uint64_t unused2:2; /* 6..7 */
- uint64_t bpid:6; /* 8..13 */
- uint64_t unused1:18; /* 14..31 */
- uint64_t l2ptr:8; /* 32..39 */
- uint64_t l3ptr:8; /* 40..47 */
- uint64_t unused0:8; /* 48..55 */
- uint64_t l4ptr:8; /* 56..63 */
- } cn68xx;
-};
-
-union cvmx_wqe_word0 {
- uint64_t u64;
- union cvmx_pip_wqe_word0 pip;
-};
-
-union cvmx_wqe_word1 {
- uint64_t u64;
- struct {
- uint64_t tag:32;
- uint64_t tag_type:2;
- uint64_t varies:14;
- uint64_t len:16;
- };
- struct {
- uint64_t tag:32;
- uint64_t tag_type:2;
- uint64_t zero_2:3;
- uint64_t grp:6;
- uint64_t zero_1:1;
- uint64_t qos:3;
- uint64_t zero_0:1;
- uint64_t len:16;
- } cn68xx;
- struct {
- uint64_t tag:32;
- uint64_t tag_type:2;
- uint64_t zero_2:1;
- uint64_t grp:4;
- uint64_t qos:3;
- uint64_t ipprt:6;
- uint64_t len:16;
- } cn38xx;
-};
-
-union cvmx_buf_ptr {
- void *ptr;
- uint64_t u64;
- struct {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t addr:40;
- } s;
-};
-
-struct cvmx_wqe {
- union cvmx_wqe_word0 word0;
- union cvmx_wqe_word1 word1;
- union cvmx_pip_wqe_word2 word2;
- union cvmx_buf_ptr packet_ptr;
- uint8_t packet_data[96];
-};
-
-union cvmx_helper_link_info {
- uint64_t u64;
- struct {
- uint64_t reserved_20_63:44;
- uint64_t link_up:1; /**< Is the physical link up? */
- uint64_t full_duplex:1; /**< 1 if the link is full duplex */
- uint64_t speed:18; /**< Speed of the link in Mbps */
- } s;
-};
-
-enum cvmx_fau_reg_32 {
- CVMX_FAU_REG_32_START = 0,
-};
-
-enum cvmx_fau_op_size {
- CVMX_FAU_OP_SIZE_8 = 0,
- CVMX_FAU_OP_SIZE_16 = 1,
- CVMX_FAU_OP_SIZE_32 = 2,
- CVMX_FAU_OP_SIZE_64 = 3
-};
-
-typedef enum {
- CVMX_SPI_MODE_UNKNOWN = 0,
- CVMX_SPI_MODE_TX_HALFPLEX = 1,
- CVMX_SPI_MODE_RX_HALFPLEX = 2,
- CVMX_SPI_MODE_DUPLEX = 3
-} cvmx_spi_mode_t;
-
-typedef enum {
- CVMX_HELPER_INTERFACE_MODE_DISABLED,
- CVMX_HELPER_INTERFACE_MODE_RGMII,
- CVMX_HELPER_INTERFACE_MODE_GMII,
- CVMX_HELPER_INTERFACE_MODE_SPI,
- CVMX_HELPER_INTERFACE_MODE_PCIE,
- CVMX_HELPER_INTERFACE_MODE_XAUI,
- CVMX_HELPER_INTERFACE_MODE_SGMII,
- CVMX_HELPER_INTERFACE_MODE_PICMG,
- CVMX_HELPER_INTERFACE_MODE_NPI,
- CVMX_HELPER_INTERFACE_MODE_LOOP,
-} cvmx_helper_interface_mode_t;
-
-typedef enum {
- CVMX_POW_WAIT = 1,
- CVMX_POW_NO_WAIT = 0,
-} cvmx_pow_wait_t;
-
-typedef enum {
- CVMX_PKO_LOCK_NONE = 0,
- CVMX_PKO_LOCK_ATOMIC_TAG = 1,
- CVMX_PKO_LOCK_CMD_QUEUE = 2,
-} cvmx_pko_lock_t;
-
-typedef enum {
- CVMX_PKO_SUCCESS,
- CVMX_PKO_INVALID_PORT,
- CVMX_PKO_INVALID_QUEUE,
- CVMX_PKO_INVALID_PRIORITY,
- CVMX_PKO_NO_MEMORY,
- CVMX_PKO_PORT_ALREADY_SETUP,
- CVMX_PKO_CMD_QUEUE_INIT_ERROR
-} cvmx_pko_status_t;
-
-enum cvmx_pow_tag_type {
- CVMX_POW_TAG_TYPE_ORDERED = 0L,
- CVMX_POW_TAG_TYPE_ATOMIC = 1L,
- CVMX_POW_TAG_TYPE_NULL = 2L,
- CVMX_POW_TAG_TYPE_NULL_NULL = 3L
-};
-
-union cvmx_ipd_ctl_status {
- uint64_t u64;
- struct cvmx_ipd_ctl_status_s {
- uint64_t reserved_18_63:46;
- uint64_t use_sop:1;
- uint64_t rst_done:1;
- uint64_t clken:1;
- uint64_t no_wptr:1;
- uint64_t pq_apkt:1;
- uint64_t pq_nabuf:1;
- uint64_t ipd_full:1;
- uint64_t pkt_off:1;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } s;
- struct cvmx_ipd_ctl_status_cn30xx {
- uint64_t reserved_10_63:54;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn30xx;
- struct cvmx_ipd_ctl_status_cn38xxp2 {
- uint64_t reserved_9_63:55;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn38xxp2;
- struct cvmx_ipd_ctl_status_cn50xx {
- uint64_t reserved_15_63:49;
- uint64_t no_wptr:1;
- uint64_t pq_apkt:1;
- uint64_t pq_nabuf:1;
- uint64_t ipd_full:1;
- uint64_t pkt_off:1;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn50xx;
- struct cvmx_ipd_ctl_status_cn58xx {
- uint64_t reserved_12_63:52;
- uint64_t ipd_full:1;
- uint64_t pkt_off:1;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn58xx;
- struct cvmx_ipd_ctl_status_cn63xxp1 {
- uint64_t reserved_16_63:48;
- uint64_t clken:1;
- uint64_t no_wptr:1;
- uint64_t pq_apkt:1;
- uint64_t pq_nabuf:1;
- uint64_t ipd_full:1;
- uint64_t pkt_off:1;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn63xxp1;
-};
-
-union cvmx_ipd_sub_port_fcs {
- uint64_t u64;
- struct cvmx_ipd_sub_port_fcs_s {
- uint64_t port_bit:32;
- uint64_t reserved_32_35:4;
- uint64_t port_bit2:4;
- uint64_t reserved_40_63:24;
- } s;
- struct cvmx_ipd_sub_port_fcs_cn30xx {
- uint64_t port_bit:3;
- uint64_t reserved_3_63:61;
- } cn30xx;
- struct cvmx_ipd_sub_port_fcs_cn38xx {
- uint64_t port_bit:32;
- uint64_t reserved_32_63:32;
- } cn38xx;
-};
-
-union cvmx_ipd_sub_port_qos_cnt {
- uint64_t u64;
- struct cvmx_ipd_sub_port_qos_cnt_s {
- uint64_t cnt:32;
- uint64_t port_qos:9;
- uint64_t reserved_41_63:23;
- } s;
-};
-typedef struct {
- uint32_t dropped_octets;
- uint32_t dropped_packets;
- uint32_t pci_raw_packets;
- uint32_t octets;
- uint32_t packets;
- uint32_t multicast_packets;
- uint32_t broadcast_packets;
- uint32_t len_64_packets;
- uint32_t len_65_127_packets;
- uint32_t len_128_255_packets;
- uint32_t len_256_511_packets;
- uint32_t len_512_1023_packets;
- uint32_t len_1024_1518_packets;
- uint32_t len_1519_max_packets;
- uint32_t fcs_align_err_packets;
- uint32_t runt_packets;
- uint32_t runt_crc_packets;
- uint32_t oversize_packets;
- uint32_t oversize_crc_packets;
- uint32_t inb_packets;
- uint64_t inb_octets;
- uint16_t inb_errors;
-} cvmx_pip_port_status_t;
-
-typedef struct {
- uint32_t packets;
- uint64_t octets;
- uint64_t doorbell;
-} cvmx_pko_port_status_t;
-
-union cvmx_pip_frm_len_chkx {
- uint64_t u64;
- struct cvmx_pip_frm_len_chkx_s {
- uint64_t reserved_32_63:32;
- uint64_t maxlen:16;
- uint64_t minlen:16;
- } s;
-};
-
-union cvmx_gmxx_rxx_frm_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_ctl_s {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t vlan_len:1;
- uint64_t pad_len:1;
- uint64_t pre_align:1;
- uint64_t null_dis:1;
- uint64_t reserved_11_11:1;
- uint64_t ptp_mode:1;
- uint64_t reserved_13_63:51;
- } s;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t vlan_len:1;
- uint64_t pad_len:1;
- uint64_t reserved_9_63:55;
- } cn30xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t vlan_len:1;
- uint64_t reserved_8_63:56;
- } cn31xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_align:1;
- uint64_t null_dis:1;
- uint64_t reserved_11_63:53;
- } cn50xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_align:1;
- uint64_t reserved_10_63:54;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t vlan_len:1;
- uint64_t pad_len:1;
- uint64_t pre_align:1;
- uint64_t null_dis:1;
- uint64_t reserved_11_63:53;
- } cn58xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_align:1;
- uint64_t null_dis:1;
- uint64_t reserved_11_11:1;
- uint64_t ptp_mode:1;
- uint64_t reserved_13_63:51;
- } cn61xx;
-};
-
-union cvmx_gmxx_rxx_int_reg {
- uint64_t u64;
- struct cvmx_gmxx_rxx_int_reg_s {
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t maxerr:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t lenerr:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t phy_link:1;
- uint64_t phy_spd:1;
- uint64_t phy_dupx:1;
- uint64_t pause_drp:1;
- uint64_t loc_fault:1;
- uint64_t rem_fault:1;
- uint64_t bad_seq:1;
- uint64_t bad_term:1;
- uint64_t unsop:1;
- uint64_t uneop:1;
- uint64_t undat:1;
- uint64_t hg2fld:1;
- uint64_t hg2cc:1;
- uint64_t reserved_29_63:35;
- } s;
- struct cvmx_gmxx_rxx_int_reg_cn30xx {
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t maxerr:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t lenerr:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t phy_link:1;
- uint64_t phy_spd:1;
- uint64_t phy_dupx:1;
- uint64_t reserved_19_63:45;
- } cn30xx;
- struct cvmx_gmxx_rxx_int_reg_cn50xx {
- uint64_t reserved_0_0:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t reserved_6_6:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t phy_link:1;
- uint64_t phy_spd:1;
- uint64_t phy_dupx:1;
- uint64_t pause_drp:1;
- uint64_t reserved_20_63:44;
- } cn50xx;
- struct cvmx_gmxx_rxx_int_reg_cn52xx {
- uint64_t reserved_0_0:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t reserved_5_6:2;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t reserved_9_9:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t reserved_16_18:3;
- uint64_t pause_drp:1;
- uint64_t loc_fault:1;
- uint64_t rem_fault:1;
- uint64_t bad_seq:1;
- uint64_t bad_term:1;
- uint64_t unsop:1;
- uint64_t uneop:1;
- uint64_t undat:1;
- uint64_t hg2fld:1;
- uint64_t hg2cc:1;
- uint64_t reserved_29_63:35;
- } cn52xx;
- struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
- uint64_t reserved_0_0:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t reserved_5_6:2;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t reserved_9_9:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t reserved_16_18:3;
- uint64_t pause_drp:1;
- uint64_t loc_fault:1;
- uint64_t rem_fault:1;
- uint64_t bad_seq:1;
- uint64_t bad_term:1;
- uint64_t unsop:1;
- uint64_t uneop:1;
- uint64_t undat:1;
- uint64_t reserved_27_63:37;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn58xx {
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t maxerr:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t lenerr:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t phy_link:1;
- uint64_t phy_spd:1;
- uint64_t phy_dupx:1;
- uint64_t pause_drp:1;
- uint64_t reserved_20_63:44;
- } cn58xx;
- struct cvmx_gmxx_rxx_int_reg_cn61xx {
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t reserved_5_6:2;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t reserved_9_9:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t reserved_16_18:3;
- uint64_t pause_drp:1;
- uint64_t loc_fault:1;
- uint64_t rem_fault:1;
- uint64_t bad_seq:1;
- uint64_t bad_term:1;
- uint64_t unsop:1;
- uint64_t uneop:1;
- uint64_t undat:1;
- uint64_t hg2fld:1;
- uint64_t hg2cc:1;
- uint64_t reserved_29_63:35;
- } cn61xx;
-};
-
-union cvmx_gmxx_prtx_cfg {
- uint64_t u64;
- struct cvmx_gmxx_prtx_cfg_s {
- uint64_t reserved_22_63:42;
- uint64_t pknd:6;
- uint64_t reserved_14_15:2;
- uint64_t tx_idle:1;
- uint64_t rx_idle:1;
- uint64_t reserved_9_11:3;
- uint64_t speed_msb:1;
- uint64_t reserved_4_7:4;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } s;
- struct cvmx_gmxx_prtx_cfg_cn30xx {
- uint64_t reserved_4_63:60;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } cn30xx;
- struct cvmx_gmxx_prtx_cfg_cn52xx {
- uint64_t reserved_14_63:50;
- uint64_t tx_idle:1;
- uint64_t rx_idle:1;
- uint64_t reserved_9_11:3;
- uint64_t speed_msb:1;
- uint64_t reserved_4_7:4;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } cn52xx;
-};
-
-union cvmx_gmxx_rxx_adr_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_ctl_s {
- uint64_t reserved_4_63:60;
- uint64_t cam_mode:1;
- uint64_t mcst:2;
- uint64_t bcst:1;
- } s;
-};
-
-union cvmx_pip_prt_tagx {
- uint64_t u64;
- struct cvmx_pip_prt_tagx_s {
- uint64_t reserved_54_63:10;
- uint64_t portadd_en:1;
- uint64_t inc_hwchk:1;
- uint64_t reserved_50_51:2;
- uint64_t grptagbase_msb:2;
- uint64_t reserved_46_47:2;
- uint64_t grptagmask_msb:2;
- uint64_t reserved_42_43:2;
- uint64_t grp_msb:2;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t grptag_mskip:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } s;
- struct cvmx_pip_prt_tagx_cn30xx {
- uint64_t reserved_40_63:24;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t reserved_30_30:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } cn30xx;
- struct cvmx_pip_prt_tagx_cn50xx {
- uint64_t reserved_40_63:24;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t grptag_mskip:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } cn50xx;
-};
-
-union cvmx_spxx_int_reg {
- uint64_t u64;
- struct cvmx_spxx_int_reg_s {
- uint64_t reserved_32_63:32;
- uint64_t spf:1;
- uint64_t reserved_12_30:19;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
-};
-
-union cvmx_spxx_int_msk {
- uint64_t u64;
- struct cvmx_spxx_int_msk_s {
- uint64_t reserved_12_63:52;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
-};
-
-union cvmx_pow_wq_int {
- uint64_t u64;
- struct cvmx_pow_wq_int_s {
- uint64_t wq_int:16;
- uint64_t iq_dis:16;
- uint64_t reserved_32_63:32;
- } s;
-};
-
-union cvmx_sso_wq_int_thrx {
- uint64_t u64;
- struct {
- uint64_t iq_thr:12;
- uint64_t reserved_12_13:2;
- uint64_t ds_thr:12;
- uint64_t reserved_26_27:2;
- uint64_t tc_thr:4;
- uint64_t tc_en:1;
- uint64_t reserved_33_63:31;
- } s;
-};
-
-union cvmx_stxx_int_reg {
- uint64_t u64;
- struct cvmx_stxx_int_reg_s {
- uint64_t reserved_9_63:55;
- uint64_t syncerr:1;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
-};
-
-union cvmx_stxx_int_msk {
- uint64_t u64;
- struct cvmx_stxx_int_msk_s {
- uint64_t reserved_8_63:56;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
-};
-
-union cvmx_pow_wq_int_pc {
- uint64_t u64;
- struct cvmx_pow_wq_int_pc_s {
- uint64_t reserved_0_7:8;
- uint64_t pc_thr:20;
- uint64_t reserved_28_31:4;
- uint64_t pc:28;
- uint64_t reserved_60_63:4;
- } s;
-};
-
-union cvmx_pow_wq_int_thrx {
- uint64_t u64;
- struct cvmx_pow_wq_int_thrx_s {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_23_23:1;
- uint64_t ds_thr:11;
- uint64_t reserved_11_11:1;
- uint64_t iq_thr:11;
- } s;
- struct cvmx_pow_wq_int_thrx_cn30xx {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_18_23:6;
- uint64_t ds_thr:6;
- uint64_t reserved_6_11:6;
- uint64_t iq_thr:6;
- } cn30xx;
- struct cvmx_pow_wq_int_thrx_cn31xx {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_20_23:4;
- uint64_t ds_thr:8;
- uint64_t reserved_8_11:4;
- uint64_t iq_thr:8;
- } cn31xx;
- struct cvmx_pow_wq_int_thrx_cn52xx {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_21_23:3;
- uint64_t ds_thr:9;
- uint64_t reserved_9_11:3;
- uint64_t iq_thr:9;
- } cn52xx;
- struct cvmx_pow_wq_int_thrx_cn63xx {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_22_23:2;
- uint64_t ds_thr:10;
- uint64_t reserved_10_11:2;
- uint64_t iq_thr:10;
- } cn63xx;
-};
-
-union cvmx_npi_rsl_int_blocks {
- uint64_t u64;
- struct cvmx_npi_rsl_int_blocks_s {
- uint64_t reserved_32_63:32;
- uint64_t rint_31:1;
- uint64_t iob:1;
- uint64_t reserved_28_29:2;
- uint64_t rint_27:1;
- uint64_t rint_26:1;
- uint64_t rint_25:1;
- uint64_t rint_24:1;
- uint64_t asx1:1;
- uint64_t asx0:1;
- uint64_t rint_21:1;
- uint64_t pip:1;
- uint64_t spx1:1;
- uint64_t spx0:1;
- uint64_t lmc:1;
- uint64_t l2c:1;
- uint64_t rint_15:1;
- uint64_t reserved_13_14:2;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t rint_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npi:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } s;
- struct cvmx_npi_rsl_int_blocks_cn30xx {
- uint64_t reserved_32_63:32;
- uint64_t rint_31:1;
- uint64_t iob:1;
- uint64_t rint_29:1;
- uint64_t rint_28:1;
- uint64_t rint_27:1;
- uint64_t rint_26:1;
- uint64_t rint_25:1;
- uint64_t rint_24:1;
- uint64_t asx1:1;
- uint64_t asx0:1;
- uint64_t rint_21:1;
- uint64_t pip:1;
- uint64_t spx1:1;
- uint64_t spx0:1;
- uint64_t lmc:1;
- uint64_t l2c:1;
- uint64_t rint_15:1;
- uint64_t rint_14:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t rint_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npi:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } cn30xx;
- struct cvmx_npi_rsl_int_blocks_cn38xx {
- uint64_t reserved_32_63:32;
- uint64_t rint_31:1;
- uint64_t iob:1;
- uint64_t rint_29:1;
- uint64_t rint_28:1;
- uint64_t rint_27:1;
- uint64_t rint_26:1;
- uint64_t rint_25:1;
- uint64_t rint_24:1;
- uint64_t asx1:1;
- uint64_t asx0:1;
- uint64_t rint_21:1;
- uint64_t pip:1;
- uint64_t spx1:1;
- uint64_t spx0:1;
- uint64_t lmc:1;
- uint64_t l2c:1;
- uint64_t rint_15:1;
- uint64_t rint_14:1;
- uint64_t rint_13:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t rint_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npi:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } cn38xx;
- struct cvmx_npi_rsl_int_blocks_cn50xx {
- uint64_t reserved_31_63:33;
- uint64_t iob:1;
- uint64_t lmc1:1;
- uint64_t agl:1;
- uint64_t reserved_24_27:4;
- uint64_t asx1:1;
- uint64_t asx0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t spx1:1;
- uint64_t spx0:1;
- uint64_t lmc:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npi:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } cn50xx;
-};
-
-union cvmx_pko_command_word0 {
- uint64_t u64;
- struct {
- uint64_t total_bytes:16;
- uint64_t segs:6;
- uint64_t dontfree:1;
- uint64_t ignore_i:1;
- uint64_t ipoffp1:7;
- uint64_t gather:1;
- uint64_t rsp:1;
- uint64_t wqp:1;
- uint64_t n2:1;
- uint64_t le:1;
- uint64_t reg0:11;
- uint64_t subone0:1;
- uint64_t reg1:11;
- uint64_t subone1:1;
- uint64_t size0:2;
- uint64_t size1:2;
- } s;
-};
-
-union cvmx_ciu_timx {
- uint64_t u64;
- struct cvmx_ciu_timx_s {
- uint64_t reserved_37_63:27;
- uint64_t one_shot:1;
- uint64_t len:36;
- } s;
-};
-
-union cvmx_gmxx_rxx_rx_inbnd {
- uint64_t u64;
- struct cvmx_gmxx_rxx_rx_inbnd_s {
- uint64_t status:1;
- uint64_t speed:2;
- uint64_t duplex:1;
- uint64_t reserved_4_63:60;
- } s;
-};
-
-static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg,
- int32_t value)
-{
- return value;
-}
-
-static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg,
- int32_t value)
-{ }
-
-static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg,
- int32_t value)
-{ }
-
-static inline uint64_t cvmx_scratch_read64(uint64_t address)
-{
- return 0;
-}
-
-static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
-{ }
-
-static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
-{
- return 0;
-}
-
-static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
-{
- return (void *)(uintptr_t)(physical_address);
-}
-
-static inline uint64_t cvmx_ptr_to_phys(void *ptr)
-{
- return (unsigned long)ptr;
-}
-
-static inline int cvmx_helper_get_interface_num(int ipd_port)
-{
- return ipd_port;
-}
-
-static inline int cvmx_helper_get_interface_index_num(int ipd_port)
-{
- return ipd_port;
-}
-
-static inline void cvmx_fpa_enable(void)
-{ }
-
-static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
-{
- return 0;
-}
-
-static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
-{ }
-
-static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
-{
- return 0;
-}
-
-static inline void *cvmx_fpa_alloc(uint64_t pool)
-{
- return NULL;
-}
-
-static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
- uint64_t num_cache_lines)
-{ }
-
-static inline int octeon_is_simulation(void)
-{
- return 1;
-}
-
-static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
- cvmx_pip_port_status_t *status)
-{ }
-
-static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
- cvmx_pko_port_status_t *status)
-{ }
-
-static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
- interface)
-{
- return 0;
-}
-
-static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
-{
- union cvmx_helper_link_info ret = { .u64 = 0 };
-
- return ret;
-}
-
-static inline int cvmx_helper_link_set(int ipd_port,
- union cvmx_helper_link_info link_info)
-{
- return 0;
-}
-
-static inline int cvmx_helper_initialize_packet_io_global(void)
-{
- return 0;
-}
-
-static inline int cvmx_helper_get_number_of_interfaces(void)
-{
- return 2;
-}
-
-static inline int cvmx_helper_ports_on_interface(int interface)
-{
- return 1;
-}
-
-static inline int cvmx_helper_get_ipd_port(int interface, int port)
-{
- return 0;
-}
-
-static inline int cvmx_helper_ipd_and_packet_input_enable(void)
-{
- return 0;
-}
-
-static inline void cvmx_ipd_disable(void)
-{ }
-
-static inline void cvmx_ipd_free_ptr(void)
-{ }
-
-static inline void cvmx_pko_disable(void)
-{ }
-
-static inline void cvmx_pko_shutdown(void)
-{ }
-
-static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
-{
- return port;
-}
-
-static inline int cvmx_pko_get_base_queue(int port)
-{
- return port;
-}
-
-static inline int cvmx_pko_get_num_queues(int port)
-{
- return port;
-}
-
-static inline unsigned int cvmx_get_core_num(void)
-{
- return 0;
-}
-
-static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
- cvmx_pow_wait_t wait)
-{ }
-
-static inline void cvmx_pow_work_request_async(int scr_addr,
- cvmx_pow_wait_t wait)
-{ }
-
-static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
-{
- struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
-
- return wqe;
-}
-
-static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
-{
- return (void *)(unsigned long)wait;
-}
-
-static inline int cvmx_spi_restart_interface(int interface,
- cvmx_spi_mode_t mode, int timeout)
-{
- return 0;
-}
-
-static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
- enum cvmx_fau_reg_32 reg,
- int32_t value)
-{ }
-
-static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
- int interface,
- int port)
-{
- union cvmx_gmxx_rxx_rx_inbnd r;
-
- r.u64 = 0;
- return r;
-}
-
-static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
- cvmx_pko_lock_t use_locking)
-{ }
-
-static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
- uint64_t queue, union cvmx_pko_command_word0 pko_command,
- union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
-{
- return 0;
-}
-
-static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
-{ }
-
-static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
-{ }
-
-static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
-{
- return 0;
-}
-
-static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
-{ }
-
-static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t qos, uint64_t grp)
-{ }
-
-#define CVMX_ASXX_RX_CLK_SETX(a, b) ((a)+(b))
-#define CVMX_ASXX_TX_CLK_SETX(a, b) ((a)+(b))
-#define CVMX_CIU_TIMX(a) (a)
-#define CVMX_GMXX_RXX_ADR_CAM0(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM1(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM2(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM3(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM4(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM5(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_FRM_CTL(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_INT_REG(a, b) ((a)+(b))
-#define CVMX_GMXX_SMACX(a, b) ((a)+(b))
-#define CVMX_PIP_PRT_TAGX(a) (a)
-#define CVMX_POW_PP_GRP_MSKX(a) (a)
-#define CVMX_POW_WQ_INT_THRX(a) (a)
-#define CVMX_SPXX_INT_MSK(a) (a)
-#define CVMX_SPXX_INT_REG(a) (a)
-#define CVMX_SSO_PPX_GRP_MSK(a) (a)
-#define CVMX_SSO_WQ_INT_THRX(a) (a)
-#define CVMX_STXX_INT_MSK(a) (a)
-#define CVMX_STXX_INT_REG(a) (a)
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 6ec7e3ce3863..4bc5d5fce9bf 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -63,7 +63,6 @@
#define UDELAY_COUNT 3
#define UDELAY_DELAY 100
-
#define TX_DESC_PER_IOCB 8
#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
@@ -1627,18 +1626,18 @@ enum {
#define MPI_COREDUMP_COOKIE 0x5555aaaa
struct mpi_coredump_global_header {
u32 cookie;
- u8 idString[16];
- u32 timeLo;
- u32 timeHi;
- u32 imageSize;
- u32 headerSize;
+ u8 id_string[16];
+ u32 time_lo;
+ u32 time_hi;
+ u32 image_size;
+ u32 header_size;
u8 info[220];
};
struct mpi_coredump_segment_header {
u32 cookie;
- u32 segNum;
- u32 segSize;
+ u32 seg_num;
+ u32 seg_size;
u32 extra;
u8 description[16];
};
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 83f34ca43aa4..8cf39615c520 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -142,10 +142,10 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
u32 *direct_ptr, temp;
u32 *indirect_ptr;
-
/* The XAUI needs to be read out per port */
status = ql_read_other_func_serdes_reg(qdev,
- XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ XG_SERDES_XAUI_HSS_PCS_START,
+ &temp);
if (status)
temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
@@ -297,7 +297,6 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xfi_direct_valid, xfi_indirect_valid);
-
/* Get XAUI_XFI_HSS_PLL register block. */
if (qdev->func & 1) {
direct_ptr =
@@ -482,7 +481,8 @@ static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
int status;
for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
- status = ql_write_mpi_reg(qdev, RISC_124,
+ status = ql_write_mpi_reg(qdev,
+ RISC_124,
(SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
if (status)
goto end;
@@ -702,8 +702,8 @@ static void ql_build_coredump_seg_header(
{
memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
- seg_hdr->segNum = seg_number;
- seg_hdr->segSize = seg_size;
+ seg_hdr->seg_num = seg_number;
+ seg_hdr->seg_size = seg_size;
strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
@@ -741,12 +741,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
memset(&(mpi_coredump->mpi_global_header), 0,
sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.headerSize =
+ mpi_coredump->mpi_global_header.header_size =
sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.imageSize =
+ mpi_coredump->mpi_global_header.image_size =
sizeof(struct ql_mpi_coredump);
- strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.idString));
+ strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.id_string));
/* Get generic NIC reg dump */
ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
@@ -1108,7 +1108,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ &mpi_coredump->nic_routing_words[0]);
if (status)
goto err;
@@ -1227,17 +1227,15 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
{
int i, status;
-
memset(&(mpi_coredump->mpi_global_header), 0,
sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.headerSize =
+ mpi_coredump->mpi_global_header.header_size =
sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.imageSize =
+ mpi_coredump->mpi_global_header.image_size =
sizeof(struct ql_reg_dump);
- strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.idString));
-
+ strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.id_string));
/* segment 16 */
ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index 56d116d79e56..790997aff995 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -32,7 +32,6 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
-
#include "qlge.h"
struct ql_stats {
@@ -197,8 +196,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
*/
cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) !=
- qdev->tx_max_coalesced_frames) {
+ le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
cqicb = (struct cqicb *)rx_ring;
@@ -207,7 +205,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cpu_to_le16(qdev->tx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
+ CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to load CQICB.\n");
@@ -219,8 +217,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
/* Update the inbound (RSS) handler queues if they changed. */
cqicb = (struct cqicb *)&qdev->rx_ring[0];
if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) !=
- qdev->rx_max_coalesced_frames) {
+ le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
rx_ring = &qdev->rx_ring[i];
cqicb = (struct cqicb *)rx_ring;
@@ -229,7 +226,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cpu_to_le16(qdev->rx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
+ CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to load CQICB.\n");
@@ -332,6 +329,7 @@ quit:
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
int index;
+
switch (stringset) {
case ETH_SS_TEST:
memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
@@ -339,8 +337,8 @@ static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
for (index = 0; index < QLGE_STATS_LEN; index++) {
memcpy(buf + index * ETH_GSTRING_LEN,
- ql_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
+ ql_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
}
break;
}
@@ -412,6 +410,7 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+
strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, qlge_driver_version,
sizeof(drvinfo->version));
@@ -431,7 +430,7 @@ static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
/* WOL is only supported for mezz card. */
if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
- ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+ ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
wol->supported = WAKE_MAGIC;
wol->wolopts = qdev->wol;
}
@@ -444,9 +443,9 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
/* WOL is only supported for mezz card. */
if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
- ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+ ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
netif_info(qdev, drv, qdev->ndev,
- "WOL is only supported for mezz card\n");
+ "WOL is only supported for mezz card\n");
return -EOPNOTSUPP;
}
if (wol->wolopts & ~WAKE_MAGIC)
@@ -506,7 +505,7 @@ static void ql_stop_loopback(struct ql_adapter *qdev)
}
static void ql_create_lb_frame(struct sk_buff *skb,
- unsigned int frame_size)
+ unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
@@ -516,13 +515,13 @@ static void ql_create_lb_frame(struct sk_buff *skb,
}
void ql_check_lb_frame(struct ql_adapter *qdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
unsigned int frame_size = skb->len;
if ((*(skb->data + 3) == 0xFF) &&
- (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
atomic_dec(&qdev->lb_count);
return;
}
@@ -566,7 +565,7 @@ out:
}
static void ql_self_test(struct net_device *ndev,
- struct ethtool_test *eth_test, u64 *data)
+ struct ethtool_test *eth_test, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
@@ -672,7 +671,7 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
}
static void ql_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ql_adapter *qdev = netdev_priv(netdev);
@@ -684,7 +683,7 @@ static void ql_get_pauseparam(struct net_device *netdev,
}
static int ql_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ql_adapter *qdev = netdev_priv(netdev);
int status = 0;
@@ -703,12 +702,14 @@ static int ql_set_pauseparam(struct net_device *netdev,
static u32 ql_get_msglevel(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+
return qdev->msg_enable;
}
static void ql_set_msglevel(struct net_device *ndev, u32 value)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+
qdev->msg_enable = value;
}
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 6ad4515311f7..ef8037d0b52e 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -77,14 +77,12 @@ MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
module_param(qlge_mpi_coredump, int, 0);
MODULE_PARM_DESC(qlge_mpi_coredump,
- "Option to enable MPI firmware dump. "
- "Default is OFF - Do Not allocate memory. ");
+ "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
static int qlge_force_coredump;
module_param(qlge_force_coredump, int, 0);
MODULE_PARM_DESC(qlge_force_coredump,
- "Option to allow force of firmware core dump. "
- "Default is OFF - Do not allow.");
+ "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
static const struct pci_device_id qlge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
@@ -178,8 +176,9 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
"register 0x%.08x access error, value = 0x%.08x!.\n",
reg, temp);
return -EIO;
- } else if (temp & bit)
+ } else if (temp & bit) {
return 0;
+ }
udelay(UDELAY_DELAY);
}
netif_alert(qdev, probe, qdev->ndev,
@@ -206,7 +205,6 @@ static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
return -ETIMEDOUT;
}
-
/* Used to issue init control blocks to hw. Maps control block,
* sets address, triggers download, waits for completion.
*/
@@ -270,36 +268,34 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
{
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
if (type == MAC_ADDR_TYPE_CAM_MAC) {
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW,
+ 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -343,7 +339,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
@@ -352,7 +348,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
ql_write32(qdev, MAC_ADDR_DATA, lower);
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
@@ -362,7 +358,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
ql_write32(qdev, MAC_ADDR_DATA, upper);
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
break;
@@ -375,8 +371,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -384,8 +379,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
type); /* type */
ql_write32(qdev, MAC_ADDR_DATA, lower);
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -393,16 +387,15 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
type); /* type */
ql_write32(qdev, MAC_ADDR_DATA, upper);
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
/* This field should also include the queue id
- and possibly the function id. Right now we hardcode
- the route field to NIC core.
+ * and possibly the function id. Right now we hardcode
+ * the route field to NIC core.
*/
cam_output = (CAM_OUT_ROUTE_NIC |
(qdev->
@@ -423,8 +416,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* That's bit-27 we're talking about.
*/
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
@@ -467,7 +459,8 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
if (status)
return status;
status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ MAC_ADDR_TYPE_CAM_MAC,
+ qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
netif_err(qdev, ifup, qdev->ndev,
@@ -672,17 +665,17 @@ static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
- /* This data is stored on flash as an array of
+ /* This data is stored on flash as an array of
* __le32. Since ql_read32() returns cpu endian
* we need to swap it back.
*/
@@ -721,8 +714,9 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
}
status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8000) / sizeof(u16),
- "8000");
+ sizeof(struct flash_params_8000) /
+ sizeof(u16),
+ "8000");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
@@ -734,12 +728,12 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
*/
if (qdev->flash.flash_params_8000.data_type1 == 2)
memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr1,
- qdev->ndev->addr_len);
+ qdev->flash.flash_params_8000.mac_addr1,
+ qdev->ndev->addr_len);
else
memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr,
- qdev->ndev->addr_len);
+ qdev->flash.flash_params_8000.mac_addr,
+ qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
@@ -748,8 +742,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
}
memcpy(qdev->ndev->dev_addr,
- mac_addr,
- qdev->ndev->addr_len);
+ mac_addr,
+ qdev->ndev->addr_len);
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
@@ -784,8 +778,9 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
}
status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8012) / sizeof(u16),
- "8012");
+ sizeof(struct flash_params_8012) /
+ sizeof(u16),
+ "8012");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
@@ -798,8 +793,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
}
memcpy(qdev->ndev->dev_addr,
- qdev->flash.flash_params_8012.mac_addr,
- qdev->ndev->addr_len);
+ qdev->flash.flash_params_8012.mac_addr,
+ qdev->ndev->addr_len);
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
@@ -815,7 +810,7 @@ static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
int status;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
return status;
/* write the data to the data reg */
@@ -834,14 +829,14 @@ int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* get the data */
@@ -1436,10 +1431,9 @@ static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct sk_buff *skb;
struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
@@ -1483,10 +1477,9 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
@@ -1528,8 +1521,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset + hlen,
- length - hlen);
+ lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
skb->len += length - hlen;
skb->data_len += length - hlen;
skb->truesize += length - hlen;
@@ -1540,7 +1532,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb_checksum_none_assert(skb);
if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1576,10 +1568,9 @@ err_out:
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
struct net_device *ndev = qdev->ndev;
@@ -1648,7 +1639,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1779,8 +1770,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
"Chaining page at offset = %d, for %d bytes to skb.\n",
lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
+ lbq_desc->p.pg_chunk.offset, length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
@@ -1804,10 +1794,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
- skb_fill_page_desc(skb, 0,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
@@ -1857,9 +1846,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
"Adding page %d to skb for %d bytes.\n",
i, size);
skb_fill_page_desc(skb, i,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- size);
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset, size);
skb->len += size;
skb->data_len += size;
skb->truesize += size;
@@ -1875,9 +1863,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u16 vlan_id)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
@@ -1938,7 +1926,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1970,8 +1958,8 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
@@ -1986,34 +1974,34 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
* separate buffers.
*/
ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
+ vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
/* The data fit in a single small buffer.
* Allocate a new skb, copy the data and
* return the buffer to the free pool.
*/
- ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
/* TCP packet in a page chunk that's been checksummed.
* Tack it on to our GRO skb and let it go.
*/
- ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
/* Non-TCP packet in a page chunk. Allocate an
* skb, tack it on frags, and send it up.
*/
- ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
* can be processed corrrectly by the split frame logic.
*/
ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
+ vlan_id);
}
return (unsigned long)length;
@@ -2222,15 +2210,16 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
"Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
- * right after the RSS rings. */
+ * right after the RSS rings.
+ */
for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
trx_ring = &qdev->rx_ring[i];
/* If this TX completion ring belongs to this vector and
* it's not empty then service it.
*/
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
- (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
- trx_ring->cnsmr_idx)) {
+ (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
+ trx_ring->cnsmr_idx)) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing TX completion ring %d.\n",
__func__, trx_ring->cq_id);
@@ -2304,7 +2293,7 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
}
static int qlge_set_features(struct net_device *ndev,
- netdev_features_t features)
+ netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
int err;
@@ -2447,7 +2436,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* Check MPI processor activity.
*/
if ((var & STS_PI) &&
- (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
+ (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
@@ -2456,7 +2445,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
"Got MPI processor interrupt.\n");
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
- qdev->workqueue, &qdev->mpi_work, 0);
+ qdev->workqueue, &qdev->mpi_work, 0);
work_done++;
}
@@ -2639,7 +2628,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
-
static void ql_free_shadow_space(struct ql_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
@@ -2887,7 +2875,8 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
}
/* Allocate queues and buffers for this completions queue based
- * on the values in the parameter structure. */
+ * on the values in the parameter structure.
+ */
static int ql_alloc_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
@@ -3530,19 +3519,17 @@ static int ql_route_initialize(struct ql_adapter *qdev)
return status;
status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
- RT_IDX_IP_CSUM_ERR, 1);
+ RT_IDX_IP_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register "
- "for IP CSUM error packets.\n");
+ "Failed to init routing register for IP CSUM error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
- RT_IDX_TU_CSUM_ERR, 1);
+ RT_IDX_TU_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register "
- "for TCP/UDP CSUM error packets.\n");
+ "Failed to init routing register for TCP/UDP CSUM error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
@@ -3556,7 +3543,7 @@ static int ql_route_initialize(struct ql_adapter *qdev)
*/
if (qdev->rss_ring_count > 1) {
status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
- RT_IDX_RSS_MATCH, 1);
+ RT_IDX_RSS_MATCH, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for MATCH RSS packets.\n");
@@ -3654,7 +3641,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Default WOL is enable on Mezz cards */
if (qdev->pdev->subsystem_device == 0x0068 ||
- qdev->pdev->subsystem_device == 0x0180)
+ qdev->pdev->subsystem_device == 0x0180)
qdev->wol = WAKE_MAGIC;
/* Start up the rx queues. */
@@ -3731,8 +3718,9 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
/* Wait for the NIC and MGMNT FIFOs to empty. */
ql_wait_fifo_empty(qdev);
- } else
+ } else {
clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
+ }
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
@@ -3880,7 +3868,7 @@ static int ql_adapter_up(struct ql_adapter *qdev)
* link is up the turn on the carrier.
*/
if ((ql_read32(qdev, STS) & qdev->port_init) &&
- (ql_read32(qdev, STS) & qdev->port_link_up))
+ (ql_read32(qdev, STS) & qdev->port_link_up))
ql_link_on(qdev);
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
@@ -4099,21 +4087,20 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
- if (ndev->mtu == 1500 && new_mtu == 9000) {
+ if (ndev->mtu == 1500 && new_mtu == 9000)
netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
- } else if (ndev->mtu == 9000 && new_mtu == 1500) {
+ else if (ndev->mtu == 9000 && new_mtu == 1500)
netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
- } else
+ else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 3*HZ);
+ &qdev->mpi_port_cfg_work, 3 * HZ);
ndev->mtu = new_mtu;
- if (!netif_running(qdev->ndev)) {
+ if (!netif_running(qdev->ndev))
return 0;
- }
status = ql_change_rx_buffers(qdev);
if (status) {
@@ -4267,14 +4254,15 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
if (status)
return status;
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ MAC_ADDR_TYPE_CAM_MAC,
+ qdev->func * MAX_CQ);
if (status)
netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
-static void qlge_tx_timeout(struct net_device *ndev)
+static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ql_adapter *qdev = netdev_priv(ndev);
ql_queue_asic_error(qdev);
@@ -4334,7 +4322,7 @@ static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
u32 nic_func1, nic_func2;
status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
- &temp);
+ &temp);
if (status)
return status;
@@ -4455,7 +4443,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
pdev->needs_freset = 1;
pci_save_state(pdev);
qdev->reg_base =
- ioremap_nocache(pci_resource_start(pdev, 1),
+ ioremap(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
@@ -4465,7 +4453,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev->doorbell_area =
- ioremap_nocache(pci_resource_start(pdev, 3),
+ ioremap(pci_resource_start(pdev, 3),
pci_resource_len(pdev, 3));
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
@@ -4578,11 +4566,12 @@ static int qlge_probe(struct pci_dev *pdev,
{
struct net_device *ndev = NULL;
struct ql_adapter *qdev = NULL;
- static int cards_found = 0;
+ static int cards_found;
int err = 0;
ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
- min(MAX_CPUS, netif_get_num_default_rss_queues()));
+ min(MAX_CPUS,
+ netif_get_num_default_rss_queues()));
if (!ndev)
return -ENOMEM;
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 9e422bbbb6ab..bb03b2fa7233 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -134,7 +134,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
for (i = 0; i < mbcp->out_count; i++) {
status =
ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
- &mbcp->mbox_out[i]);
+ &mbcp->mbox_out[i]);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
@@ -184,7 +184,7 @@ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
for (i = 0; i < mbcp->in_count; i++) {
status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
- mbcp->mbox_in[i]);
+ mbcp->mbox_in[i]);
if (status)
goto end;
}
@@ -293,7 +293,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 0);
+ &qdev->mpi_port_cfg_work, 0);
}
ql_link_on(qdev);
@@ -544,7 +544,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
if (status)
goto end;
-
/* If we're generating a system error, then there's nothing
* to wait for.
*/
@@ -730,7 +729,6 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
mbcp->mbox_in[1] = qdev->link_config;
mbcp->mbox_in[2] = qdev->max_frame_size;
-
status = ql_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -747,7 +745,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
}
static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
- u32 size)
+ u32 size)
{
int status = 0;
struct mbox_params mbc;
@@ -768,7 +766,6 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
mbcp->mbox_in[7] = LSW(MSD(req_dma));
mbcp->mbox_in[8] = MSW(addr);
-
status = ql_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -782,14 +779,14 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
/* Issue a mailbox command to dump RISC RAM. */
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count)
+ u32 ram_addr, int word_count)
{
int status;
char *my_buf;
dma_addr_t buf_dma;
my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
- &buf_dma);
+ &buf_dma);
if (!my_buf)
return -EIO;
@@ -798,7 +795,7 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
memcpy(buf, my_buf, word_count * sizeof(u32));
pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
- buf_dma);
+ buf_dma);
return status;
}
@@ -850,7 +847,6 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
mbcp->mbox_in[1] = wol;
-
status = ql_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -922,7 +918,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
*/
wait_time =
wait_for_completion_timeout(&qdev->ide_completion,
- wait_time);
+ wait_time);
if (!wait_time) {
netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
break;
@@ -965,7 +961,6 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
mbcp->mbox_in[1] = led_config;
-
status = ql_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -1130,8 +1125,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
}
if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
- qdev->max_frame_size ==
- CFG_DEFAULT_MAX_FRAME_SIZE)
+ qdev->max_frame_size == CFG_DEFAULT_MAX_FRAME_SIZE)
goto end;
qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
@@ -1278,7 +1272,7 @@ void ql_mpi_reset_work(struct work_struct *work)
netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
qdev->core_is_dumped = 1;
queue_delayed_work(qdev->workqueue,
- &qdev->mpi_core_to_log, 5 * HZ);
+ &qdev->mpi_core_to_log, 5 * HZ);
}
ql_soft_reset_mpi_risc(qdev);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 88e42cc1d837..93283c7deec4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -429,7 +429,7 @@ static void update_bmc_sta(struct adapter *padapter)
/* prepare for add_RATid */
supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
- network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates, supportRateNum, 1);
+ network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates);
memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
psta->bssratelen = supportRateNum;
@@ -802,7 +802,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
supportRateNum += ie_len;
}
- network_type = rtw_check_network_type(supportRate, supportRateNum, channel);
+ network_type = rtw_check_network_type(supportRate);
rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index d9b0f9e6235c..c525682d0edf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -402,7 +402,6 @@ static u16 Efuse_GetCurrentSize(struct adapter *pAdapter)
int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
{
u8 ReadState = PG_STATE_HEADER;
- int bContinual = true;
int bDataEmpty = true;
u8 efuse_data, word_cnts = 0;
u16 efuse_addr = 0;
@@ -422,7 +421,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
/* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
/* Skip dummy parts to prevent unexpected data read from Efuse. */
/* By pass right now. 2009.02.19. */
- while (bContinual && AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
/* Header Read ------------- */
if (ReadState & PG_STATE_HEADER) {
if (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data) && (efuse_data != 0xFF)) {
@@ -464,7 +463,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
ReadState = PG_STATE_HEADER;
}
} else {
- bContinual = false;
+ break;
}
} else if (ReadState & PG_STATE_DATA) {
/* Data section Read ------------- */
@@ -498,8 +497,8 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
if (!PgWriteSuccess)
return false;
- else
- efuse_addr = Efuse_GetCurrentSize(pAdapter);
+
+ efuse_addr = Efuse_GetCurrentSize(pAdapter);
} else {
efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
}
@@ -714,10 +713,9 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
if (ALL_WORDS_DISABLED(efuse_data)) {
ret = false;
break;
- } else {
- curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
- curPkt.word_en = efuse_data & 0x0F;
}
+ curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ curPkt.word_en = efuse_data & 0x0F;
} else {
cur_header = efuse_data;
curPkt.offset = (cur_header >> 4) & 0x0F;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index cc1b5438c04c..29f615443e8f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -98,7 +98,7 @@ bool rtw_is_cckratesonly_included(u8 *rate)
return true;
}
-int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
+int rtw_check_network_type(unsigned char *rate)
{
/* could be pure B, pure G, or B/G */
if (rtw_is_cckratesonly_included(rate))
@@ -157,11 +157,10 @@ u8 *rtw_get_ie(u8 *pbuf, int index, uint *len, int limit)
if (*p == index) {
*len = *(p + 1);
return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
}
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
if (i >= limit)
break;
}
@@ -295,10 +294,9 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, uint *wpa_ie_len, int limit)
goto check_next_ie;
*wpa_ie_len = *(pbuf + 1);
return pbuf;
- } else {
- *wpa_ie_len = 0;
- return NULL;
}
+ *wpa_ie_len = 0;
+ return NULL;
check_next_ie:
limit_new = limit - (pbuf - pie) - 2 - len;
@@ -596,9 +594,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
cnt += in_ie[cnt + 1] + 2;
break;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
}
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
return wpsie_ptr;
}
@@ -642,9 +639,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
if (len_attr)
*len_attr = attr_len;
break;
- } else {
- attr_ptr += attr_len; /* goto next */
}
+ attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index e984b4605e91..36841d20c3c1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -291,7 +291,7 @@ static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
remain_len = ies_len - (next_ie - ies);
ssid_ie[1] = 0;
- memcpy(ssid_ie+2, next_ie, remain_len);
+ memcpy(ssid_ie + 2, next_ie, remain_len);
len_diff -= ssid_len_ori;
break;
@@ -363,14 +363,14 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
memcpy(pframe, cur_network->ies, cur_network->ie_length);
len_diff = update_hidden_ssid(
- pframe+_BEACON_IE_OFFSET_
- , cur_network->ie_length-_BEACON_IE_OFFSET_
+ pframe + _BEACON_IE_OFFSET_
+ , cur_network->ie_length - _BEACON_IE_OFFSET_
, pmlmeinfo->hidden_ssid_mode
);
- pframe += (cur_network->ie_length+len_diff);
- pattrib->pktlen += (cur_network->ie_length+len_diff);
- wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
- pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ pframe += (cur_network->ie_length + len_diff);
+ pattrib->pktlen += (cur_network->ie_length + len_diff);
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
+ pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
@@ -504,7 +504,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
#if defined(CONFIG_88EU_AP_MODE)
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
- pwps_ie = rtw_get_wps_ie(cur_network->ies+_FIXED_IE_LENGTH_, cur_network->ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+ pwps_ie = rtw_get_wps_ie(cur_network->ies + _FIXED_IE_LENGTH_, cur_network->ie_length - _FIXED_IE_LENGTH_, NULL, &wps_ielen);
/* inerset & update wps_probe_resp_ie */
if (pmlmepriv->wps_probe_resp_ie && pwps_ie && wps_ielen > 0) {
@@ -522,13 +522,13 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pattrib->pktlen += wps_offset;
wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
- if ((wps_offset+wps_ielen+2) <= MAX_IE_SZ) {
- memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen+2);
- pframe += wps_ielen+2;
- pattrib->pktlen += wps_ielen+2;
+ if ((wps_offset + wps_ielen + 2) <= MAX_IE_SZ) {
+ memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen + 2);
+ pframe += wps_ielen + 2;
+ pattrib->pktlen += wps_ielen + 2;
}
- if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+ if ((wps_offset + wps_ielen + 2 + remainder_ielen) <= MAX_IE_SZ) {
memcpy(pframe, premainder_ie, remainder_ielen);
pframe += remainder_ielen;
pattrib->pktlen += remainder_ielen;
@@ -751,7 +751,7 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
return;
/* update attribute */
@@ -943,7 +943,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, pstat->bssratelen, pstat->bssrateset, &pattrib->pktlen);
} else {
pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pstat->bssrateset, &pattrib->pktlen);
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, pstat->bssratelen-8, pstat->bssrateset+8, &pattrib->pktlen);
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, pstat->bssratelen - 8, pstat->bssrateset + 8, &pattrib->pktlen);
}
if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
@@ -952,17 +952,17 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
/* FILL HT CAP INFO IE */
pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (pbuf && ie_len > 0) {
- memcpy(pframe, pbuf, ie_len+2);
- pframe += (ie_len+2);
- pattrib->pktlen += (ie_len+2);
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
}
/* FILL HT ADD INFO IE */
pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (pbuf && ie_len > 0) {
- memcpy(pframe, pbuf, ie_len+2);
- pframe += (ie_len+2);
- pattrib->pktlen += (ie_len+2);
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
}
}
@@ -973,14 +973,14 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if (pbuf && !memcmp(pbuf+2, WMM_PARA_IE, 6)) {
- memcpy(pframe, pbuf, ie_len+2);
- pframe += (ie_len+2);
- pattrib->pktlen += (ie_len+2);
+ if (pbuf && !memcmp(pbuf + 2, WMM_PARA_IE, 6)) {
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
break;
}
- if ((pbuf == NULL) || (ie_len == 0))
+ if (!pbuf || ie_len == 0)
break;
}
}
@@ -1082,8 +1082,8 @@ static void issue_assocreq(struct adapter *padapter)
/* Check if the AP's supported rates are also supported by STA. */
for (j = 0; j < sta_bssrate_len; j++) {
/* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
- if ((pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK)
- == (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK))
+ if ((pmlmeinfo->network.SupportedRates[i] | IEEE80211_BASIC_RATE_MASK)
+ == (sta_bssrate[j] | IEEE80211_BASIC_RATE_MASK))
break;
}
@@ -1120,7 +1120,7 @@ static void issue_assocreq(struct adapter *padapter)
/* HT caps */
if (padapter->mlmepriv.htpriv.ht_option) {
p = rtw_get_ie((pmlmeinfo->network.ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.ie_length - sizeof(struct ndis_802_11_fixed_ie)));
- if ((p != NULL) && (!(is_ap_in_tkip(padapter)))) {
+ if (p && !is_ap_in_tkip(padapter)) {
memcpy(&pmlmeinfo->HT_caps, p + 2, sizeof(struct ieee80211_ht_cap));
/* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
@@ -1263,7 +1263,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da,
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
/* da == NULL, assume it's null data for sta to ap*/
- if (da == NULL)
+ if (!da)
da = pnetwork->MacAddress;
do {
@@ -1392,7 +1392,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
/* da == NULL, assume it's null data for sta to ap*/
- if (da == NULL)
+ if (!da)
da = pnetwork->MacAddress;
do {
@@ -1444,7 +1444,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
goto exit;
/* update attribute */
@@ -1780,7 +1780,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
- if ((p == NULL) || (len == 0)) { /* non-HT */
+ if (!p || len == 0) { /* non-HT */
if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
continue;
@@ -1833,7 +1833,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
return _SUCCESS;
psta = rtw_get_stainfo(pstapriv, addr);
- if (psta == NULL)
+ if (!psta)
return _SUCCESS;
if (initiator == 0) { /* recipient */
@@ -1865,6 +1865,7 @@ unsigned int send_beacon(struct adapter *padapter)
int issue = 0;
int poll = 0;
unsigned long start = jiffies;
+ u32 passing_time;
rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
do {
@@ -1874,7 +1875,7 @@ unsigned int send_beacon(struct adapter *padapter)
yield();
rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
poll++;
- } while ((poll%10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+ } while ((poll % 10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
} while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
@@ -1883,15 +1884,14 @@ unsigned int send_beacon(struct adapter *padapter)
DBG_88E("%s fail! %u ms\n", __func__,
jiffies_to_msecs(jiffies - start));
return _FAIL;
- } else {
- u32 passing_time = jiffies_to_msecs(jiffies - start);
-
- if (passing_time > 100 || issue > 3)
- DBG_88E("%s success, issue:%d, poll:%d, %u ms\n",
- __func__, issue, poll,
- jiffies_to_msecs(jiffies - start));
- return _SUCCESS;
}
+ passing_time = jiffies_to_msecs(jiffies - start);
+
+ if (passing_time > 100 || issue > 3)
+ DBG_88E("%s success, issue:%d, poll:%d, %u ms\n",
+ __func__, issue, poll,
+ jiffies_to_msecs(jiffies - start));
+ return _SUCCESS;
}
/****************************************************************************
@@ -2082,7 +2082,7 @@ static u8 collect_bss_info(struct adapter *padapter,
/* checking rate info... */
i = 0;
p = rtw_get_ie(bssid->ies + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
- if (p != NULL) {
+ if (p) {
if (len > NDIS_802_11_LENGTH_RATES_EX) {
DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
return _FAIL;
@@ -2093,7 +2093,7 @@ static u8 collect_bss_info(struct adapter *padapter,
p = rtw_get_ie(bssid->ies + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
if (p) {
- if (len > (NDIS_802_11_LENGTH_RATES_EX-i)) {
+ if (len > (NDIS_802_11_LENGTH_RATES_EX - i)) {
DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
return _FAIL;
}
@@ -2518,7 +2518,7 @@ static unsigned int OnProbeReq(struct adapter *padapter,
return _SUCCESS;
if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
- !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE))
+ !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE))
return _SUCCESS;
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, &ielen,
@@ -2526,7 +2526,7 @@ static unsigned int OnProbeReq(struct adapter *padapter,
/* check (wildcard) SSID */
if (p) {
- if ((ielen != 0 && memcmp((void *)(p+2), (void *)cur->ssid.ssid, cur->ssid.ssid_length)) ||
+ if ((ielen != 0 && memcmp((void *)(p + 2), (void *)cur->ssid.ssid, cur->ssid.ssid_length)) ||
(ielen == 0 && pmlmeinfo->hidden_ssid_mode))
return _SUCCESS;
@@ -2583,7 +2583,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
}
/* check the vendor of the assoc AP */
- pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct ieee80211_hdr_3addr), len-sizeof(struct ieee80211_hdr_3addr));
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr));
/* update TSF Value */
update_TSF(pmlmeext, pframe, len);
@@ -2596,7 +2596,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
if (((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
- if (psta != NULL) {
+ if (psta) {
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
@@ -2610,7 +2610,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
}
} else if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
- if (psta != NULL) {
+ if (psta) {
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
if ((sta_rx_pkts(psta) & 0xf) == 0)
@@ -2729,7 +2729,7 @@ static unsigned int OnAuth(struct adapter *padapter,
if ((pstat->auth_seq + 1) != seq) {
DBG_88E("(1)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq+1);
+ seq, pstat->auth_seq + 1);
status = _STATS_OUT_OF_AUTH_SEQ_;
goto auth_fail;
}
@@ -2742,7 +2742,7 @@ static unsigned int OnAuth(struct adapter *padapter,
pstat->authalg = algorithm;
} else {
DBG_88E("(2)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq+1);
+ seq, pstat->auth_seq + 1);
status = _STATS_OUT_OF_AUTH_SEQ_;
goto auth_fail;
}
@@ -2761,7 +2761,7 @@ static unsigned int OnAuth(struct adapter *padapter,
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &ie_len,
len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
- if ((p == NULL) || (ie_len <= 0)) {
+ if (!p || ie_len <= 0) {
DBG_88E("auth rejected because challenge failure!(1)\n");
status = _STATS_CHALLENGE_FAIL_;
goto auth_fail;
@@ -2779,7 +2779,7 @@ static unsigned int OnAuth(struct adapter *padapter,
}
} else {
DBG_88E("(3)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq+1);
+ seq, pstat->auth_seq + 1);
status = _STATS_OUT_OF_AUTH_SEQ_;
goto auth_fail;
}
@@ -2855,7 +2855,7 @@ static unsigned int OnAuthClient(struct adapter *padapter,
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &len,
pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
- if (p == NULL)
+ if (!p)
goto authclnt_fail;
memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
@@ -2864,10 +2864,9 @@ static unsigned int OnAuthClient(struct adapter *padapter,
set_link_timer(pmlmeext, REAUTH_TO);
return _SUCCESS;
- } else {
- /* open system */
- go2asoc = 1;
}
+ /* open system */
+ go2asoc = 1;
} else if (seq == 4) {
if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
go2asoc = 1;
@@ -2975,7 +2974,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
goto OnAssocReqFail;
} else {
/* check if ssid match */
- if (memcmp((void *)(p+2), cur->ssid.ssid, cur->ssid.ssid_length))
+ if (memcmp((void *)(p + 2), cur->ssid.ssid, cur->ssid.ssid_length))
status = _STATS_FAILURE_;
if (ie_len != cur->ssid.ssid_length)
@@ -2987,7 +2986,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* check if the supported rate is ok */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p == NULL) {
+ if (!p) {
DBG_88E("Rx a sta assoc-req which supported rate is empty!\n");
/* use our own rate set as statoin used */
/* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
@@ -2996,14 +2995,15 @@ static unsigned int OnAssocReq(struct adapter *padapter,
status = _STATS_FAILURE_;
goto OnAssocReqFail;
} else {
- memcpy(supportRate, p+2, ie_len);
+ memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p != NULL) {
+ if (p) {
if (supportRateNum <= sizeof(supportRate)) {
- memcpy(supportRate+supportRateNum, p+2, ie_len);
+ memcpy(supportRate + supportRateNum,
+ p + 2, ie_len);
supportRateNum += ie_len;
}
}
@@ -3031,7 +3031,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
wpa_ie = elems.rsn_ie;
wpa_ie_len = elems.rsn_ie_len;
- if (rtw_parse_wpa2_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ if (rtw_parse_wpa2_ie(wpa_ie - 2, wpa_ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
pstat->dot8021xalg = 1;/* psk, todo:802.1x */
pstat->wpa_psk |= BIT(1);
@@ -3052,7 +3052,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
wpa_ie = elems.wpa_ie;
wpa_ie_len = elems.wpa_ie_len;
- if (rtw_parse_wpa_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ if (rtw_parse_wpa_ie(wpa_ie - 2, wpa_ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
pstat->dot8021xalg = 1;/* psk, todo:802.1x */
pstat->wpa_psk |= BIT(0);
@@ -3094,7 +3094,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
/* that the selected registrar of AP is _FLASE */
- if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
+ if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS | WLAN_STA_MAYBE_WPS))) {
if (pmlmepriv->wps_beacon_ie) {
u8 selected_registrar = 0;
@@ -3131,7 +3131,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
copy_len = min_t(int, wpa_ie_len + 2, sizeof(pstat->wpa_ie));
}
if (copy_len > 0)
- memcpy(pstat->wpa_ie, wpa_ie-2, copy_len);
+ memcpy(pstat->wpa_ie, wpa_ie - 2, copy_len);
}
/* check if there is WMM IE & support WWM-PS */
pstat->flags &= ~WLAN_STA_WME;
@@ -3146,14 +3146,14 @@ static unsigned int OnAssocReq(struct adapter *padapter,
p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
for (;;) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p != NULL) {
- if (!memcmp(p+2, WMM_IE, 6)) {
+ if (p) {
+ if (!memcmp(p + 2, WMM_IE, 6)) {
pstat->flags |= WLAN_STA_WME;
pstat->qos_option = 1;
- pstat->qos_info = *(p+8);
+ pstat->qos_info = *(p + 8);
- pstat->max_sp_len = (pstat->qos_info>>5) & 0x3;
+ pstat->max_sp_len = (pstat->qos_info >> 5) & 0x3;
if ((pstat->qos_info & 0xf) != 0xf)
pstat->has_legacy_ac = true;
@@ -3162,22 +3162,22 @@ static unsigned int OnAssocReq(struct adapter *padapter,
if (pstat->qos_info & 0xf) {
if (pstat->qos_info & BIT(0))
- pstat->uapsd_vo = BIT(0)|BIT(1);
+ pstat->uapsd_vo = BIT(0) | BIT(1);
else
pstat->uapsd_vo = 0;
if (pstat->qos_info & BIT(1))
- pstat->uapsd_vi = BIT(0)|BIT(1);
+ pstat->uapsd_vi = BIT(0) | BIT(1);
else
pstat->uapsd_vi = 0;
if (pstat->qos_info & BIT(2))
- pstat->uapsd_bk = BIT(0)|BIT(1);
+ pstat->uapsd_bk = BIT(0) | BIT(1);
else
pstat->uapsd_bk = 0;
if (pstat->qos_info & BIT(3))
- pstat->uapsd_be = BIT(0)|BIT(1);
+ pstat->uapsd_be = BIT(0) | BIT(1);
else
pstat->uapsd_be = 0;
}
@@ -3245,7 +3245,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
DBG_88E(" old AID %d\n", pstat->aid);
} else {
for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
- if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+ if (!pstapriv->sta_aid[pstat->aid - 1])
break;
/* if (pstat->aid > NUM_STA) { */
@@ -3452,14 +3452,13 @@ static unsigned int OnDeAuth(struct adapter *padapter,
}
return _SUCCESS;
- } else
+ }
#endif
- {
- DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
+ DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
- receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
- }
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
}
@@ -3506,14 +3505,13 @@ static unsigned int OnDisassoc(struct adapter *padapter,
}
return _SUCCESS;
- } else
+ }
#endif
- {
- DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
+ DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
- receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
- }
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
}
@@ -3958,7 +3956,7 @@ static void init_channel_list(struct adapter *padapter,
((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
- if (reg == NULL) {
+ if (!reg) {
reg = &channel_list->reg_class[cla];
cla++;
reg->reg_class = o->op_class;
@@ -4515,7 +4513,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* set per sta rate after updating HT cap. */
set_sta_rate(padapter, psta);
rtw_hal_set_hwreg(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
- media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE: 1 means connect */
+ media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE: 1 means connect */
rtw_hal_set_hwreg(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
}
@@ -4844,7 +4842,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
pmlmeinfo->state = WIFI_FW_AP_STATE;
type = _HW_STATE_AP_;
} else if (psetop->mode == Ndis802_11Infrastructure) {
- pmlmeinfo->state &= ~(BIT(0)|BIT(1));/* clear state */
+ pmlmeinfo->state &= ~(BIT(0) | BIT(1));/* clear state */
pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to STATION_STATE */
type = _HW_STATE_STATION_;
} else if (psetop->mode == Ndis802_11IBSS) {
@@ -5040,7 +5038,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
u8 val8;
if (is_client_associated_to_ap(padapter))
- issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
@@ -5084,7 +5082,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter,
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
/* clear out first */
- memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
+ memset(out, 0, sizeof(struct rtw_ieee80211_channel) * out_num);
/* acquire channels from in */
j = 0;
@@ -5263,7 +5261,7 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
DBG_88E("r871x_set_stakey_hdl(): enc_algorithm=%d\n", pparm->algorithm);
- if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA-4))) {
+ if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA - 4))) {
DBG_88E("r871x_set_stakey_hdl():set_stakey failed, mac_id(aid)=%d\n", psta->mac_id);
return H2C_REJECTED;
}
@@ -5276,10 +5274,10 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
return H2C_SUCCESS_RSP;
- } else {
- DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
- return H2C_REJECTED;
}
+
+ DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
+ return H2C_REJECTED;
}
/* below for sta mode */
@@ -5333,14 +5331,14 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
ptxBeacon_parm = kmemdup(&pmlmeinfo->network,
sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
- if (ptxBeacon_parm == NULL) {
+ if (!ptxBeacon_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
- len_diff = update_hidden_ssid(ptxBeacon_parm->ies+_BEACON_IE_OFFSET_,
- ptxBeacon_parm->ie_length-_BEACON_IE_OFFSET_,
+ len_diff = update_hidden_ssid(ptxBeacon_parm->ies + _BEACON_IE_OFFSET_,
+ ptxBeacon_parm->ie_length - _BEACON_IE_OFFSET_,
pmlmeinfo->hidden_ssid_mode);
ptxBeacon_parm->ie_length += len_diff;
@@ -5361,7 +5359,7 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
peventbuf = (uint *)pbuf;
evt_sz = (u16)(*peventbuf & 0xffff);
- evt_code = (u8)((*peventbuf>>16) & 0xff);
+ evt_code = (u8)((*peventbuf >> 16) & 0xff);
/* checking if event code is valid */
if (evt_code >= MAX_C2HEVT) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 03dc7e5fcc38..c4f58507dbfd 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -154,8 +154,8 @@ void ips_enter(struct adapter *padapter)
int ips_leave(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int result = _SUCCESS;
int keyid;
@@ -200,28 +200,24 @@ int ips_leave(struct adapter *padapter)
static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
{
- struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
- bool ret = false;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
if (time_after_eq(adapter->pwrctrlpriv.ips_deny_time, jiffies))
- goto exit;
+ return false;
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS) ||
check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE))
- goto exit;
-
- ret = true;
+ return false;
-exit:
- return ret;
+ return true;
}
void rtw_ps_processor(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
enum rt_rf_power_state rfpwrstate;
pwrpriv->ps_processing = true;
@@ -282,7 +278,7 @@ static void pwr_state_check_handler(struct timer_list *t)
*/
void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
{
- u8 rpwm;
+ u8 rpwm;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
pslv = PS_STATE(pslv);
@@ -335,8 +331,8 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
static u8 PS_RDY_CHECK(struct adapter *padapter)
{
unsigned long curr_time, delta_time;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
curr_time = jiffies;
delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
@@ -437,7 +433,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
/* */
void LPS_Enter(struct adapter *padapter)
{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
if (!PS_RDY_CHECK(padapter))
return;
@@ -463,7 +459,7 @@ void LPS_Enter(struct adapter *padapter)
/* Leave the leisure power save mode. */
void LPS_Leave(struct adapter *padapter)
{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
if (pwrpriv->bLeisurePs) {
if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
@@ -483,8 +479,8 @@ void LPS_Leave(struct adapter *padapter)
/* */
void LeaveAllPowerSaveMode(struct adapter *Adapter)
{
- struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
- u8 enqueue = 0;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ u8 enqueue = 0;
if (check_fwstate(pmlmepriv, _FW_LINKED))
rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
@@ -611,7 +607,7 @@ exit:
int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
{
- int ret = 0;
+ int ret = 0;
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
if (mode < PS_MODE_NUM) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index af8a79ce8736..6df873e4c2ed 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1247,9 +1247,8 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
if (ralink_vendor_flag) {
DBG_88E("link to Tenda W311R AP\n");
return HT_IOT_PEER_TENDA;
- } else {
- DBG_88E("Capture EPIGRAM_OUI\n");
}
+ DBG_88E("Capture EPIGRAM_OUI\n");
} else {
break;
}
@@ -1266,10 +1265,9 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
} else if (ralink_vendor_flag && epigram_vendor_flag) {
DBG_88E("link to Tenda W311R AP\n");
return HT_IOT_PEER_TENDA;
- } else {
- DBG_88E("link to new AP\n");
- return HT_IOT_PEER_UNKNOWN;
}
+ DBG_88E("link to new AP\n");
+ return HT_IOT_PEER_UNKNOWN;
}
void update_IOT_info(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index c37591657bac..258531bc1408 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -1022,10 +1022,10 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
ClearMFrag(mem_start);
break;
- } else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
}
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
+
addr = (size_t)(pframe);
mem_start = (unsigned char *)round_up(addr, 4) + hw_hdr_offset;
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 4e2f6cb55a75..7489491f5aaa 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -967,10 +967,11 @@ void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
return;
- } else {
- rtl88eu_dm_txpower_tracking_callback_thermalmeter(Adapter);
- pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
}
+
+ rtl88eu_dm_txpower_tracking_callback_thermalmeter(Adapter);
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
+
}
/* 3============================================================ */
diff --git a/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
index 251bd8aba3b1..a55a0d8b9fb7 100644
--- a/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
+++ b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
@@ -154,35 +154,37 @@ void rtl88eu_dm_update_rx_idle_ant(struct odm_dm_struct *dm_odm, u8 ant)
struct adapter *adapter = dm_odm->Adapter;
u32 default_ant, optional_ant;
- if (dm_fat_tbl->RxIdleAnt != ant) {
- if (ant == MAIN_ANT) {
- default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
- MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
- optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
- AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
- } else {
- default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
- AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
- optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
- MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
- }
+ if (dm_fat_tbl->RxIdleAnt == ant)
+ return;
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
- phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
- BIT(5) | BIT(4) | BIT(3), default_ant);
- phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
- BIT(8) | BIT(7) | BIT(6), optional_ant);
- phy_set_bb_reg(adapter, ODM_REG_ANTSEL_CTRL_11N,
- BIT(14) | BIT(13) | BIT(12), default_ant);
- phy_set_bb_reg(adapter, ODM_REG_RESP_TX_11N,
- BIT(6) | BIT(7), default_ant);
- } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
- phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
- BIT(5) | BIT(4) | BIT(3), default_ant);
- phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
- BIT(8) | BIT(7) | BIT(6), optional_ant);
- }
+ if (ant == MAIN_ANT) {
+ default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+ MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+ optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+ AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+ } else {
+ default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+ AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+ optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+ MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+ }
+
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+ BIT(5) | BIT(4) | BIT(3), default_ant);
+ phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+ BIT(8) | BIT(7) | BIT(6), optional_ant);
+ phy_set_bb_reg(adapter, ODM_REG_ANTSEL_CTRL_11N,
+ BIT(14) | BIT(13) | BIT(12), default_ant);
+ phy_set_bb_reg(adapter, ODM_REG_RESP_TX_11N,
+ BIT(6) | BIT(7), default_ant);
+ } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
+ phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+ BIT(5) | BIT(4) | BIT(3), default_ant);
+ phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+ BIT(8) | BIT(7) | BIT(6), optional_ant);
}
+
dm_fat_tbl->RxIdleAnt = ant;
}
@@ -303,6 +305,7 @@ void rtl88eu_dm_antenna_diversity(struct odm_dm_struct *dm_odm)
if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
return;
+
if (!dm_odm->bLinked) {
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
("ODM_AntennaDiversity_88E(): No Link.\n"));
@@ -318,19 +321,20 @@ void rtl88eu_dm_antenna_diversity(struct odm_dm_struct *dm_odm)
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
return;
- } else {
- if (!dm_fat_tbl->bBecomeLinked) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("Need to Turn on HW AntDiv\n"));
- phy_set_bb_reg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1);
- phy_set_bb_reg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N,
- BIT(15), 1);
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- phy_set_bb_reg(adapter, ODM_REG_TX_ANT_CTRL_11N,
- BIT(21), 1);
- dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
- }
}
+
+ if (!dm_fat_tbl->bBecomeLinked) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("Need to Turn on HW AntDiv\n"));
+ phy_set_bb_reg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1);
+ phy_set_bb_reg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N,
+ BIT(15), 1);
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ phy_set_bb_reg(adapter, ODM_REG_TX_ANT_CTRL_11N,
+ BIT(21), 1);
+ dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
+ }
+
if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ||
(dm_odm->AntDivType == CGCS_RX_HW_ANTDIV))
rtl88eu_dm_hw_ant_div(dm_odm);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 51c40abfafaa..afaf9e55195a 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -923,27 +923,27 @@ static bool simularity_compare(struct adapter *adapt, s32 resulta[][8],
}
}
return result;
- } else {
- if (!(sim_bitmap & 0x03)) { /* path A TX OK */
- for (i = 0; i < 2; i++)
- resulta[3][i] = resulta[c1][i];
- }
- if (!(sim_bitmap & 0x0c)) { /* path A RX OK */
- for (i = 2; i < 4; i++)
- resulta[3][i] = resulta[c1][i];
- }
+ }
- if (!(sim_bitmap & 0x30)) { /* path B TX OK */
- for (i = 4; i < 6; i++)
- resulta[3][i] = resulta[c1][i];
- }
+ if (!(sim_bitmap & 0x03)) { /* path A TX OK */
+ for (i = 0; i < 2; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+ if (!(sim_bitmap & 0x0c)) { /* path A RX OK */
+ for (i = 2; i < 4; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
- if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
- for (i = 6; i < 8; i++)
- resulta[3][i] = resulta[c1][i];
- }
- return false;
+ if (!(sim_bitmap & 0x30)) { /* path B TX OK */
+ for (i = 4; i < 6; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+
+ if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
+ for (i = 6; i < 8; i++)
+ resulta[3][i] = resulta[c1][i];
}
+ return false;
}
static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
@@ -1053,10 +1053,9 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
result[t][3] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_A_2,
bMaskDWord)&0x3FF0000)>>16;
break;
- } else {
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("Path A Rx IQK Fail!!\n"));
}
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Path A Rx IQK Fail!!\n"));
}
if (path_a_ok == 0x00) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 545d6a6102f1..241f55b92808 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -21,7 +21,7 @@
/* Initialize GPIO setting registers */
static void dm_InitGPIOSetting(struct adapter *Adapter)
{
- u8 tmp1byte;
+ u8 tmp1byte;
tmp1byte = usb_read8(Adapter, REG_GPIO_MUXCFG);
tmp1byte &= (GPIOSEL_GPIO | ~GPIOSEL_ENBT);
@@ -35,8 +35,8 @@ static void dm_InitGPIOSetting(struct adapter *Adapter)
static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
{
struct hal_data_8188e *hal_data = Adapter->HalData;
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
- struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
/* Init Value */
memset(dm_odm, 0, sizeof(*dm_odm));
@@ -51,44 +51,46 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
dm_odm->AntDivType = hal_data->TRxAntDivType;
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ /* Tx power tracking BB swing table.
+ * The base index =
+ * 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB
+ */
dm_odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
dm_odm->BbSwingIdxOfdmCurrent = 12;
dm_odm->BbSwingFlagOfdm = false;
- pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
- ODM_RF_TX_PWR_TRACK;
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
dm_odm->SupportAbility = pdmpriv->InitODMFlag;
}
static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
{
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct hal_data_8188e *hal_data = Adapter->HalData;
- struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
int i;
- pdmpriv->InitODMFlag = ODM_BB_DIG |
- ODM_BB_RA_MASK |
- ODM_BB_DYNAMIC_TXPWR |
- ODM_BB_FA_CNT |
- ODM_BB_RSSI_MONITOR |
- ODM_BB_CCK_PD |
- ODM_BB_PWR_SAVE |
- ODM_MAC_EDCA_TURBO |
- ODM_RF_CALIBRATION |
- ODM_RF_TX_PWR_TRACK;
+ pdmpriv->InitODMFlag = ODM_BB_DIG |
+ ODM_BB_RA_MASK |
+ ODM_BB_DYNAMIC_TXPWR |
+ ODM_BB_FA_CNT |
+ ODM_BB_RSSI_MONITOR |
+ ODM_BB_CCK_PD |
+ ODM_BB_PWR_SAVE |
+ ODM_MAC_EDCA_TURBO |
+ ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
if (hal_data->AntDivCfg)
pdmpriv->InitODMFlag |= ODM_BB_ANT_DIV;
if (Adapter->registrypriv.mp_mode == 1) {
- pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
- ODM_RF_TX_PWR_TRACK;
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
}
dm_odm->SupportAbility = pdmpriv->InitODMFlag;
@@ -106,20 +108,23 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
dm_odm->pbPowerSaving = (bool *)&pwrctrlpriv->bpower_saving;
dm_odm->AntDivType = hal_data->TRxAntDivType;
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ /* Tx power tracking BB swing table.
+ * The base index =
+ * 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB
+ */
dm_odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
dm_odm->BbSwingIdxOfdmCurrent = 12;
dm_odm->BbSwingFlagOfdm = false;
for (i = 0; i < NUM_STA; i++)
- ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i, NULL);
+ ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i,
+ NULL);
}
void rtl8188e_InitHalDm(struct adapter *Adapter)
{
- struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
- struct odm_dm_struct *dm_odm = &(Adapter->HalData->odmpriv);
+ struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
+ struct odm_dm_struct *dm_odm = &Adapter->HalData->odmpriv;
dm_InitGPIOSetting(Adapter);
pdmpriv->DM_Type = DM_Type_ByDriver;
@@ -162,7 +167,7 @@ skip_dm:
void rtw_hal_dm_init(struct adapter *Adapter)
{
- struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
+ struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
struct odm_dm_struct *podmpriv = &Adapter->HalData->odmpriv;
memset(pdmpriv, 0, sizeof(struct dm_priv));
@@ -172,23 +177,28 @@ void rtw_hal_dm_init(struct adapter *Adapter)
/* Add new function to reset the state of antenna diversity before link. */
/* Compare RSSI for deciding antenna */
-void rtw_hal_antdiv_rssi_compared(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
+void rtw_hal_antdiv_rssi_compared(struct adapter *Adapter,
+ struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src)
{
if (Adapter->HalData->AntDivCfg != 0) {
- /* select optimum_antenna for before linked =>For antenna diversity */
- if (dst->Rssi >= src->Rssi) {/* keep org parameter */
+ /* select optimum_antenna for before linked => For antenna
+ * diversity
+ */
+ if (dst->Rssi >= src->Rssi) {/* keep org parameter */
src->Rssi = dst->Rssi;
- src->PhyInfo.Optimum_antenna = dst->PhyInfo.Optimum_antenna;
+ src->PhyInfo.Optimum_antenna =
+ dst->PhyInfo.Optimum_antenna;
}
}
}
/* Add new function to reset the state of antenna diversity before link. */
-u8 rtw_hal_antdiv_before_linked(struct adapter *Adapter)
+bool rtw_hal_antdiv_before_linked(struct adapter *Adapter)
{
struct odm_dm_struct *dm_odm = &Adapter->HalData->odmpriv;
struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
- struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
/* Condition that does not need to use antenna diversity. */
if (Adapter->HalData->AntDivCfg == 0)
@@ -197,15 +207,16 @@ u8 rtw_hal_antdiv_before_linked(struct adapter *Adapter)
if (check_fwstate(pmlmepriv, _FW_LINKED))
return false;
- if (dm_swat_tbl->SWAS_NoLink_State == 0) {
- /* switch channel */
- dm_swat_tbl->SWAS_NoLink_State = 1;
- dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ? Antenna_B : Antenna_A;
-
- rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
- return true;
- } else {
+ if (dm_swat_tbl->SWAS_NoLink_State != 0) {
dm_swat_tbl->SWAS_NoLink_State = 0;
return false;
}
+
+ /* switch channel */
+ dm_swat_tbl->SWAS_NoLink_State = 1;
+ dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ?
+ Antenna_B : Antenna_A;
+
+ rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
+ return true;
}
diff --git a/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h b/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
index da66695a1d8f..0c5b2b0948f5 100644
--- a/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
+++ b/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
@@ -15,11 +15,6 @@
#define MAX_TXPWR_IDX_NMODE_92S 63
#define Reset_Cnt_Limit 3
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-
#define MAX_AGGR_NUM 0x07
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 516a89647003..39df30599a5d 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -209,7 +209,7 @@ void rtw_hal_set_bwmode(struct adapter *padapter,
void rtw_hal_set_chan(struct adapter *padapter, u8 channel);
void rtw_hal_dm_watchdog(struct adapter *padapter);
-u8 rtw_hal_antdiv_before_linked(struct adapter *padapter);
+bool rtw_hal_antdiv_before_linked(struct adapter *padapter);
void rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
struct wlan_bssid_ex *dst,
struct wlan_bssid_ex *src);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index d569fe5ed8e6..75f0ebe0faf5 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -765,7 +765,7 @@ bool rtw_is_cckrates_included(u8 *rate);
bool rtw_is_cckratesonly_included(u8 *rate);
-int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
+int rtw_check_network_type(unsigned char *rate);
void rtw_get_bcn_info(struct wlan_network *pnetwork);
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 8245cea2feef..9d39fe13626a 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -239,7 +239,6 @@ struct odm_rate_adapt {
#define IQK_MAC_REG_NUM 4
#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM_MAX 10
#define IQK_BB_REG_NUM 9
#define HP_THERMAL_NUM 8
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index 19204335ab4c..910b982ab775 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -10,12 +10,7 @@ enum{
UP_LINK,
DOWN_LINK,
};
-/* duplicate code,will move to ODM ######### */
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-/* duplicate code,will move to ODM ######### */
+
struct dm_priv {
u8 DM_Type;
u8 DMFlag;
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index b5dfb226f32a..c3847cf16ec1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -19,9 +19,6 @@
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
-#define RTL8711_RF_MAX_SENS 6
-#define RTL8711_RF_DEF_SENS 4
-
/* We now define the following channels as the max channels in each
* channel plan.
*/
@@ -30,8 +27,6 @@
#define MAX_CHANNEL_NUM_2G 14
#define MAX_CHANNEL_NUM 14 /* 2.4 GHz only */
-#define NUM_REGULATORYS 1
-
/* Country codes */
#define USA 0x555320
#define EUROPE 0x1 /* temp, should be provided later */
@@ -74,17 +69,6 @@ enum _REG_PREAMBLE_MODE {
PREAMBLE_SHORT = 3,
};
-enum _RTL8712_RF_MIMO_CONFIG_ {
- RTL8712_RFCONFIG_1T = 0x10,
- RTL8712_RFCONFIG_2T = 0x20,
- RTL8712_RFCONFIG_1R = 0x01,
- RTL8712_RFCONFIG_2R = 0x02,
- RTL8712_RFCONFIG_1T1R = 0x11,
- RTL8712_RFCONFIG_1T2R = 0x12,
- RTL8712_RFCONFIG_TURBO = 0x92,
- RTL8712_RFCONFIG_2T2R = 0x22
-};
-
enum rf90_radio_path {
RF90_PATH_A = 0, /* Radio Path A */
RF90_PATH_B = 1, /* Radio Path B */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 710c33fd4965..9b6ea86d1dcf 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -222,18 +222,21 @@ static char *translate_scan(struct adapter *padapter,
/* parsing WPA/WPA2 IE */
{
- u8 buf[MAX_WPA_IE_LEN];
+ u8 *buf;
u8 wpa_ie[255], rsn_ie[255];
u16 wpa_len = 0, rsn_len = 0;
u8 *p;
+ buf = kzalloc(MAX_WPA_IE_LEN, GFP_ATOMIC);
+ if (!buf)
+ return start;
+
rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.ssid.ssid));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
if (wpa_len > 0) {
p = buf;
- memset(buf, 0, MAX_WPA_IE_LEN);
p += sprintf(p, "wpa_ie=");
for (i = 0; i < wpa_len; i++)
p += sprintf(p, "%02x", wpa_ie[i]);
@@ -250,7 +253,6 @@ static char *translate_scan(struct adapter *padapter,
}
if (rsn_len > 0) {
p = buf;
- memset(buf, 0, MAX_WPA_IE_LEN);
p += sprintf(p, "rsn_ie=");
for (i = 0; i < rsn_len; i++)
p += sprintf(p, "%02x", rsn_ie[i]);
@@ -264,6 +266,7 @@ static char *translate_scan(struct adapter *padapter,
iwe.u.data.length = rsn_len;
start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
}
+ kfree(buf);
}
{/* parsing WPS IE */
@@ -593,9 +596,8 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
cnt += buf[cnt+1]+2;
break;
- } else {
- cnt += buf[cnt+1]+2; /* goto next */
}
+ cnt += buf[cnt+1]+2; /* goto next */
}
}
}
@@ -770,8 +772,7 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
return ret;
- else
- ret = true;
+ ret = true;
blInserted = false;
/* overwrite PMKID */
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index dace81a7d1ba..11183b9f757a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -267,7 +267,7 @@ static short _rtl92e_check_nic_enough_desc(struct net_device *dev, int prio)
return 0;
}
-static void _rtl92e_tx_timeout(struct net_device *dev)
+static void _rtl92e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1973,18 +1973,11 @@ u8 rtl92e_rx_db_to_percent(s8 antpower)
u8 rtl92e_evm_db_to_percent(s8 value)
{
- s8 ret_val;
+ s8 ret_val = clamp(-value, 0, 33) * 3;
- ret_val = value;
-
- if (ret_val >= 0)
- ret_val = 0;
- if (ret_val <= -33)
- ret_val = -33;
- ret_val = 0 - ret_val;
- ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
+
return ret_val;
}
@@ -2463,7 +2456,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
}
- ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len);
+ ioaddr = (unsigned long)ioremap(pmem_start, pmem_len);
if (ioaddr == (unsigned long)NULL) {
netdev_err(dev, "ioremap failed!");
goto err_rel_mem;
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index dcd51bf4aed3..0be7426b6ebc 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -1,13 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
NIC_SELECT = RTL8192U
-ccflags-y := -std=gnu89
-ccflags-y += -O2
-
ccflags-y += -DCONFIG_FORCE_HARD_FLOAT=y
ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
ccflags-y += -DTHOMAS_BEACON -DTHOMAS_TASKLET -DTHOMAS_SKB -DTHOMAS_TURBO
-ccflags-y += -I $(srctree)/$(src)/ieee80211
r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o \
r8190_rtl8256.o r819xU_phy.o r819xU_firmware.o \
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
deleted file mode 100644
index 0d4d6489f767..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ /dev/null
@@ -1,27 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-NIC_SELECT = RTL8192U
-
-ccflags-y := -O2
-ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
-
-ieee80211-rsl-objs := ieee80211_rx.o \
- ieee80211_softmac.o \
- ieee80211_tx.o \
- ieee80211_wx.o \
- ieee80211_module.o \
- ieee80211_softmac_wx.o\
- rtl819x_HTProc.o\
- rtl819x_TSProc.o\
- rtl819x_BAProc.o\
- dot11d.o
-
-ieee80211_crypt-rsl-objs := ieee80211_crypt.o
-ieee80211_crypt_tkip-rsl-objs := ieee80211_crypt_tkip.o
-ieee80211_crypt_ccmp-rsl-objs := ieee80211_crypt_ccmp.o
-ieee80211_crypt_wep-rsl-objs := ieee80211_crypt_wep.o
-
-obj-m +=ieee80211-rsl.o
-obj-m +=ieee80211_crypt-rsl.o
-obj-m +=ieee80211_crypt_wep-rsl.o
-obj-m +=ieee80211_crypt_tkip-rsl.o
-obj-m +=ieee80211_crypt_ccmp-rsl.o
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 00fea127bdc3..e101f7b13c7e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -232,8 +232,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
#ifdef NOT_YET
if (ieee->iw_mode == IW_MODE_MASTER) {
- printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
- ieee->dev->name);
+ netdev_dbg(ieee->dev, "Master mode not yet supported.\n");
return 0;
/*
hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *)
@@ -261,9 +260,9 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->iw_mode == IW_MODE_MASTER) {
if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
- printk(KERN_DEBUG "%s: unknown management frame "
+ netdev_dbg(skb->dev, "unknown management frame "
"(type=0x%02x, stype=0x%02x) dropped\n",
- skb->dev->name, type, stype);
+ type, stype);
return -1;
}
@@ -271,8 +270,8 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
return 0;
}
- printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
- "received in non-Host AP mode\n", skb->dev->name);
+ netdev_dbg(skb->dev, "hostap_rx_frame_mgmt: management frame "
+ "received in non-Host AP mode\n");
return -1;
#endif
}
@@ -349,9 +348,9 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->tkip_countermeasures &&
strcmp(crypt->ops->name, "TKIP") == 0) {
if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
+ netdev_dbg(ieee->dev, "TKIP countermeasures: dropped "
"received packet from %pM\n",
- ieee->dev->name, hdr->addr2);
+ hdr->addr2);
}
return -1;
}
@@ -397,9 +396,9 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
- printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
+ netdev_dbg(ieee->dev, "MSDU decryption/MIC verification failed"
" (SA=%pM keyidx=%d)\n",
- ieee->dev->name, hdr->addr2, keyidx);
+ hdr->addr2, keyidx);
return -1;
}
@@ -749,7 +748,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
kfree(prxbIndicateArray);
}
-static u8 parse_subframe(struct sk_buff *skb,
+static u8 parse_subframe(struct ieee80211_device *ieee,
+ struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats,
struct ieee80211_rxb *rxb, u8 *src, u8 *dst)
{
@@ -810,11 +810,11 @@ static u8 parse_subframe(struct sk_buff *skb,
nSubframe_Length = (nSubframe_Length >> 8) + (nSubframe_Length << 8);
if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
- printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\
- __func__, rxb->nr_subframes);
- printk("%s: A-MSDU parse error!! Subframe Length: %d\n", __func__, nSubframe_Length);
- printk("nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
- printk("The Packet SeqNum is %d\n", SeqNum);
+ netdev_dbg(ieee->dev, "A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",
+ rxb->nr_subframes);
+ netdev_dbg(ieee->dev, "A-MSDU parse error!! Subframe Length: %d\n", nSubframe_Length);
+ netdev_dbg(ieee->dev, "nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
+ netdev_dbg(ieee->dev, "The Packet SeqNum is %d\n", SeqNum);
return 0;
}
@@ -904,8 +904,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
stats = &ieee->stats;
if (skb->len < 10) {
- printk(KERN_INFO "%s: SKB length < 10\n",
- dev->name);
+ netdev_info(dev, "SKB length < 10\n");
goto rx_dropped;
}
@@ -919,7 +918,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
if (HTCCheck(ieee, skb->data)) {
if (net_ratelimit())
- printk("find HTCControl\n");
+ netdev_warn(dev, "find HTCControl\n");
hdrlen += 4;
rx_stats->bContainHTC = true;
}
@@ -1113,7 +1112,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
(keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
- printk("decrypt frame error\n");
+ netdev_dbg(ieee->dev, "decrypt frame error\n");
goto rx_dropped;
}
@@ -1141,9 +1140,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
flen -= hdrlen;
if (frag_skb->tail + flen > frag_skb->end) {
- printk(KERN_WARNING "%s: host decrypted and "
- "reassembled frame did not fit skb\n",
- dev->name);
+ netdev_warn(dev, "host decrypted and "
+ "reassembled frame did not fit skb\n");
ieee80211_frag_cache_invalidate(ieee, hdr);
goto rx_dropped;
}
@@ -1178,7 +1176,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
* encrypted/authenticated */
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
- printk("==>decrypt msdu error\n");
+ netdev_dbg(ieee->dev, "==>decrypt msdu error\n");
goto rx_dropped;
}
@@ -1250,7 +1248,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
goto rx_dropped;
/* to parse amsdu packets */
/* qos data packets & reserved bit is 1 */
- if (parse_subframe(skb, rx_stats, rxb, src, dst) == 0) {
+ if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) {
/* only to free rxb, and not submit the packets to upper layer */
for (i = 0; i < rxb->nr_subframes; i++) {
dev_kfree_skb(rxb->subframes[i]);
@@ -1863,7 +1861,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x13 &&
info_element->data[2] == 0x74)) {
- printk("========>%s(): athros AP is exist\n", __func__);
+ netdev_dbg(ieee->dev, "========> athros AP is exist\n");
network->atheros_cap_exist = true;
} else
network->atheros_cap_exist = false;
@@ -1980,8 +1978,8 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
}
break;
case MFIE_TYPE_QOS_PARAMETER:
- printk(KERN_ERR
- "QoS Error need to parse QOS_PARAMETER IE\n");
+ netdev_err(ieee->dev,
+ "QoS Error need to parse QOS_PARAMETER IE\n");
break;
case MFIE_TYPE_COUNTRY:
@@ -2357,14 +2355,14 @@ static inline void ieee80211_process_probe_response(
if (IS_COUNTRY_IE_VALID(ieee)) {
// Case 1: Country code
if (!is_legal_channel(ieee, network->channel)) {
- printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
+ netdev_warn(ieee->dev, "GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
goto out;
}
} else {
// Case 2: No any country code.
// Filter over channel ch12~14
if (network->channel > 11) {
- printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
+ netdev_warn(ieee->dev, "GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
goto out;
}
}
@@ -2372,14 +2370,14 @@ static inline void ieee80211_process_probe_response(
if (IS_COUNTRY_IE_VALID(ieee)) {
// Case 1: Country code
if (!is_legal_channel(ieee, network->channel)) {
- printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
+ netdev_warn(ieee->dev, "GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
goto out;
}
} else {
// Case 2: No any country code.
// Filter over channel ch12~14
if (network->channel > 14) {
- printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
+ netdev_warn(ieee->dev, "GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
goto out;
}
}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 7e2cabd16e88..89dd1fb0b38d 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -60,7 +60,7 @@ double __extendsfdf2(float a)
#include <linux/seq_file.h>
/* FIXME: check if 2.6.7 is ok */
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
/* set here to open your trace code. */
u32 rt_global_debug_component = COMP_DOWN |
COMP_SEC |
@@ -640,7 +640,7 @@ short check_nic_enough_desc(struct net_device *dev, int queue_index)
return (used < MAX_TX_URB);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3954,18 +3954,11 @@ static u8 rtl819x_query_rxpwrpercentage(s8 antpower)
static u8 rtl819x_evm_dbtopercentage(s8 value)
{
- s8 ret_val;
+ s8 ret_val = clamp(-value, 0, 33) * 3;
- ret_val = value;
-
- if (ret_val >= 0)
- ret_val = 0;
- if (ret_val <= -33)
- ret_val = -33;
- ret_val = 0 - ret_val;
- ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
+
return ret_val;
}
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 5822bb7984b9..0118edb0b9ab 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -23,7 +23,7 @@
#include "r8192U.h"
#include "r8192U_hw.h"
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
#include "r8192U_wx.h"
#define RATE_COUNT 12
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index c04d8eca0cfb..555e52522be6 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -7,7 +7,7 @@
#include "r8192U_dm.h"
#include "r819xU_firmware_img.h"
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
#include <linux/bitops.h>
static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 00ea0beb12c9..116773943a2e 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -663,17 +663,11 @@ static u8 evm_db2percentage(s8 value)
/*
* -33dB~0dB to 0%~99%
*/
- s8 ret_val;
+ s8 ret_val = clamp(-value, 0, 33) * 3;
- ret_val = value;
- if (ret_val >= 0)
- ret_val = 0;
- if (ret_val <= -33)
- ret_val = -33;
- ret_val = -ret_val;
- ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
+
return ret_val;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 5e687f6d2c3e..c642825ca8ef 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -207,10 +207,10 @@ static RT_CHANNEL_PLAN_MAP RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x02, 0x1F}, /* 0x57, RT_CHANNEL_DOMAIN_FCC1_FCC10 */
};
-static RT_CHANNEL_PLAN_MAP RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02}; /* use the conbination for max channel numbers */
+ /* use the combination for max channel numbers */
+static RT_CHANNEL_PLAN_MAP RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02};
-/*
- * Search the @param ch in given @param ch_set
+/* Search the @param ch in given @param ch_set
* @ch_set: the given channel set
* @ch: the given channel number
*
@@ -229,8 +229,7 @@ int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch)
return i;
}
-/*
- * Check the @param ch is fit with setband setting of @param adapter
+/* Check the @param ch is fit with setband setting of @param adapter
* @adapter: the given adapter
* @ch: the given channel number
*
@@ -2265,7 +2264,7 @@ inline struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
/****************************************************************************
-Following are some TX fuctions for WiFi MLME
+Following are some TX functions for WiFi MLME
*****************************************************************************/
@@ -2956,7 +2955,7 @@ exit:
return ret;
}
-/* if psta == NULL, indiate we are station(client) now... */
+/* if psta == NULL, indicate we are station(client) now... */
void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
{
struct xmit_frame *pmgntframe;
@@ -3356,9 +3355,11 @@ void issue_assocreq(struct adapter *padapter)
(!memcmp(pIE->data, WPS_OUI, 4))) {
vs_ie_length = pIE->Length;
if ((!padapter->registrypriv.wifi_spec) && (!memcmp(pIE->data, WPS_OUI, 4))) {
- /* Commented by Kurt 20110629 */
- /* In some older APs, WPS handshake */
- /* would be fail if we append vender extensions informations to AP */
+ /* Commented by Kurt 20110629
+ * In some older APs, WPS handshake
+ * would be fail if we append vendor
+ * extensions information to AP
+ */
vs_ie_length = 14;
}
@@ -3406,7 +3407,7 @@ exit:
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
}
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
unsigned int power_mode, bool wait_ack)
{
@@ -3481,7 +3482,7 @@ exit:
/*
* [IMPORTANT] Don't call this function in interrupt context
*
- * When wait_ms > 0, this function shoule be called at process context
+ * When wait_ms > 0, this function should be called at process context
* da == NULL for station mode
*/
int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
@@ -3493,7 +3494,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
struct sta_info *psta;
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
@@ -3558,14 +3559,14 @@ s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
pmlmeext = &padapter->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
return _issue_nulldata(padapter, da, 0, false);
}
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
u16 tid, bool wait_ack)
{
@@ -3644,7 +3645,7 @@ exit:
return ret;
}
-/* when wait_ms >0 , this function shoule be called at process context */
+/* when wait_ms >0 , this function should be called at process context */
/* da == NULL for station mode */
int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
{
@@ -3653,7 +3654,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
@@ -4264,7 +4265,7 @@ unsigned int send_beacon(struct adapter *padapter)
/****************************************************************************
-Following are some utitity fuctions for WiFi MLME
+Following are some utility functions for WiFi MLME
*****************************************************************************/
@@ -4568,7 +4569,7 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
}
#endif
- /* mark bss info receving from nearby channel as SignalQuality 101 */
+ /* mark bss info receiving from nearby channel as SignalQuality 101 */
if (bssid->Configuration.DSConfig != rtw_get_oper_ch(padapter))
bssid->PhyInfo.SignalQuality = 101;
@@ -4589,7 +4590,7 @@ void start_create_ibss(struct adapter *padapter)
/* update wireless mode */
update_wireless_mode(padapter);
- /* udpate capability */
+ /* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&cap_IBSS) {/* adhoc master */
@@ -4644,7 +4645,7 @@ void start_clnt_join(struct adapter *padapter)
/* update wireless mode */
update_wireless_mode(padapter);
- /* udpate capability */
+ /* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&cap_ESS) {
@@ -5379,8 +5380,7 @@ static void rtw_mlmeext_disconnect(struct adapter *padapter)
/* set_opmode_cmd(padapter, infra_client_with_mlme); */
- /*
- * For safety, prevent from keeping macid sleep.
+ /* For safety, prevent from keeping macid sleep.
* If we can sure all power mode enter/leave are paired,
* this check can be removed.
* Lucas@20131113
@@ -5441,7 +5441,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* turn on dynamic functions */
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
- /* update IOT-releated issue */
+ /* update IOT-related issue */
update_IOT_info(padapter);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
@@ -5449,7 +5449,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* BCN interval */
rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
- /* udpate capability */
+ /* update capability */
update_capinfo(padapter, pmlmeinfo->capability);
/* WMM, Update EDCA param */
@@ -6385,7 +6385,9 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
Save_DM_Func_Flag(padapter);
Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
- /* config the initial gain under scaning, need to write the BB registers */
+ /* config the initial gain under scanning, need to write the BB
+ * registers
+ */
initialgain = 0x1e;
rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.c b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
index beb4002a40e1..357802db9aed 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
@@ -622,33 +622,3 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
}
-
-
-
-
-/* 3 ============================================================ */
-/* 3 IQ Calibration */
-/* 3 ============================================================ */
-
-u8 ODM_GetRightChnlPlaceforIQK(u8 chnl)
-{
- u8 channel_all[ODM_TARGET_CHNL_NUM_2G_5G] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
- 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
- 161, 163, 165
- };
- u8 place = chnl;
-
-
- if (chnl > 14) {
- for (place = 14; place < sizeof(channel_all); place++) {
- if (channel_all[place] == chnl)
- return place-13;
- }
- }
- return 0;
-
-}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.h b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
index 3d6f68bc61d7..643fcf37c9ad 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.h
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
@@ -44,12 +44,4 @@ void ODM_ClearTxPowerTrackingState(PDM_ODM_T pDM_Odm);
void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter);
-
-
-#define ODM_TARGET_CHNL_NUM_2G_5G 59
-
-
-u8 ODM_GetRightChnlPlaceforIQK(u8 chnl);
-
-
#endif /* #ifndef __HAL_PHY_RF_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 1ca9063a269f..85ea535dd6e9 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
+/*****************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
@@ -82,7 +82,9 @@ static void setIqkMatrix_8723B(
/* if (RFPath == ODM_RF_PATH_A) */
switch (RFPath) {
case ODM_RF_PATH_A:
- /* wirte new elements A, C, D to regC80 and regC94, element B is always 0 */
+ /* write new elements A, C, D to regC80 and regC94,
+ * element B is always 0
+ */
value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, bMaskDWord, value32);
@@ -93,7 +95,9 @@ static void setIqkMatrix_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT24, value32);
break;
case ODM_RF_PATH_B:
- /* wirte new elements A, C, D to regC88 and regC9C, element B is always 0 */
+ /* write new elements A, C, D to regC88 and regC9C,
+ * element B is always 0
+ */
value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
@@ -166,7 +170,7 @@ void DoIQK_8723B(
/*-----------------------------------------------------------------------------
* Function: odm_TxPwrTrackSetPwr88E()
*
- * Overview: 88E change all channel tx power accordign to flag.
+ * Overview: 88E change all channel tx power according to flag.
* OFDM & CCK are all different.
*
* Input: NONE
@@ -1788,7 +1792,7 @@ void PHY_IQCalibrate_8723B(
PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
s32 result[4][8]; /* last is final result */
- u8 i, final_candidate, Indexforchannel;
+ u8 i, final_candidate;
bool bPathAOK, bPathBOK;
s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC, RegTmp = 0;
bool is12simular, is13simular, is23simular;
@@ -1993,17 +1997,14 @@ void PHY_IQCalibrate_8723B(
_PHY_PathBFillIQKMatrix8723B(padapter, bPathBOK, result, final_candidate, (RegEC4 == 0));
}
- Indexforchannel = ODM_GetRightChnlPlaceforIQK(pHalData->CurrentChannel);
-
/* To Fix BSOD when final_candidate is 0xff */
/* by sherry 20120321 */
if (final_candidate < 4) {
for (i = 0; i < IQK_Matrix_REG_NUM; i++)
- pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][i] = result[final_candidate][i];
- pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].bIQKDone = true;
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].Value[0][i] = result[final_candidate][i];
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].bIQKDone = true;
}
- /* RT_DISP(FINIT, INIT_IQK, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel)); */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", 0));
_PHY_SaveADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 66127f6c8e4d..de8caa6cd418 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -750,7 +750,7 @@ static void Hal_BT_EfusePowerSwitch(
rtw_write8(padapter, 0x6B, tempval);
/* Attention!! Between 0x6A[14] and 0x6A[15] setting need 100us delay */
- /* So don't wirte 0x6A[14]= 1 and 0x6A[15]= 0 together! */
+ /* So don't write 0x6A[14]= 1 and 0x6A[15]= 0 together! */
msleep(1);
/* disable BT output isolation */
/* 0x6A[15] = 0 */
@@ -765,7 +765,7 @@ static void Hal_BT_EfusePowerSwitch(
rtw_write8(padapter, 0x6B, tempval);
/* Attention!! Between 0x6A[14] and 0x6A[15] setting need 100us delay */
- /* So don't wirte 0x6A[14]= 1 and 0x6A[15]= 0 together! */
+ /* So don't write 0x6A[14]= 1 and 0x6A[15]= 0 together! */
/* disable BT power cut */
/* 0x6A[14] = 1 */
@@ -1231,7 +1231,7 @@ static u16 hal_EfuseGetCurrentSize_WiFi(
goto exit;
error:
- /* report max size to prevent wirte efuse */
+ /* report max size to prevent write efuse */
EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &efuse_addr, bPseudoTest);
exit:
@@ -2237,7 +2237,7 @@ void rtl8723b_InitAntenna_Selection(struct adapter *padapter)
u8 val;
val = rtw_read8(padapter, REG_LEDCFG2);
- /* Let 8051 take control antenna settting */
+ /* Let 8051 take control antenna setting */
val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
rtw_write8(padapter, REG_LEDCFG2, val);
}
@@ -3191,22 +3191,26 @@ static void rtl8723b_fill_default_txdesc(
if (bmcst)
ptxdesc->bmc = 1;
- /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
- /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
- /* mgnt frame should be controled by Hw because Fw will also send null data */
- /* which we cannot control when Fw LPS enable. */
- /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
- /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
- /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
- /* 2010.06.23. Added by tynli. */
+ /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS.
+ * (1) The sequence number of each non-Qos frame / broadcast /
+ * multicast / mgnt frame should be controlled by Hw because Fw
+ * will also send null data which we cannot control when Fw LPS
+ * enable.
+ * --> default enable non-Qos data sequense number. 2010.06.23.
+ * by tynli.
+ * (2) Enable HW SEQ control for beacon packet, because we use
+ * Hw beacon.
+ * (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos
+ * packets.
+ * 2010.06.23. Added by tynli.
+ */
if (!pattrib->qos_en) /* Hw set sequence number */
ptxdesc->en_hwseq = 1; /* HWSEQ_EN */
}
-/*
- *Description:
+/* Description:
*
- *Parameters:
+ * Parameters:
* pxmitframe xmitframe
* pbuf where to fill tx desc
*/
@@ -3543,7 +3547,7 @@ static void hw_var_set_mlme_sitesurvey(struct adapter *padapter, u8 variable, u8
rtw_write8(padapter, reg_bcn_ctl, val8);
}
- /* Save orignal RRSR setting. */
+ /* Save original RRSR setting. */
pHalData->RegRRSR = rtw_read16(padapter, REG_RRSR);
} else {
/* sitesurvey done */
@@ -3561,7 +3565,7 @@ static void hw_var_set_mlme_sitesurvey(struct adapter *padapter, u8 variable, u8
value_rcr |= rcr_clear_bit;
rtw_write32(padapter, REG_RCR, value_rcr);
- /* Restore orignal RRSR setting. */
+ /* Restore original RRSR setting. */
rtw_write16(padapter, REG_RRSR, pHalData->RegRRSR);
}
}
@@ -4329,8 +4333,7 @@ void GetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
}
}
-/*
- *Description:
+/* Description:
* Change default setting of specified variable.
*/
u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval)
@@ -4348,8 +4351,7 @@ u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, v
return bResult;
}
-/*
- *Description:
+/* Description:
* Query setting of specified variable.
*/
u8 GetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval)
diff --git a/drivers/staging/rts5208/Makefile b/drivers/staging/rts5208/Makefile
index 6a934c41c738..3c9e9797d3d9 100644
--- a/drivers/staging/rts5208/Makefile
+++ b/drivers/staging/rts5208/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RTS5208) := rts5208.o
-ccflags-y := -Idrivers/scsi
-
rts5208-y := rtsx.o rtsx_chip.o rtsx_transport.o rtsx_scsi.o \
rtsx_card.o general.o sd.o xd.o ms.o spi.o
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index cb95ad6fa4f9..be0053c795b7 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -831,7 +831,8 @@ static int rtsx_probe(struct pci_dev *pci,
host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
if (!host) {
dev_err(&pci->dev, "Unable to allocate the scsi host\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto scsi_host_alloc_fail;
}
dev = host_to_rtsx(host);
@@ -858,7 +859,7 @@ static int rtsx_probe(struct pci_dev *pci,
dev_info(&pci->dev, "Resource length: 0x%x\n",
(unsigned int)pci_resource_len(pci, 0));
dev->addr = pci_resource_start(pci, 0);
- dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
+ dev->remap_addr = ioremap(dev->addr, pci_resource_len(pci, 0));
if (!dev->remap_addr) {
dev_err(&pci->dev, "ioremap error\n");
err = -ENXIO;
@@ -971,7 +972,8 @@ ioremap_fail:
kfree(dev->chip);
chip_alloc_fail:
dev_err(&pci->dev, "%s failed\n", __func__);
-
+scsi_host_alloc_fail:
+ pci_release_regions(pci);
return err;
}
@@ -983,6 +985,7 @@ static void rtsx_remove(struct pci_dev *pci)
quiesce_and_remove_host(dev);
release_everything(dev);
+ pci_release_regions(pci);
}
/* PCI IDs */
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index ea1d3d4efbc2..b8d60701f898 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -50,7 +50,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
}
/* now map mmio and vidmem */
- sm750_dev->pvReg = ioremap_nocache(sm750_dev->vidreg_start,
+ sm750_dev->pvReg = ioremap(sm750_dev->vidreg_start,
sm750_dev->vidreg_size);
if (!sm750_dev->pvReg) {
pr_err("mmio failed\n");
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 1d1440d43002..0433536930a9 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1078,7 +1078,7 @@ out_save_flags:
* Queue the work and return. Make sure we have not already been informed that
* the IO Partition is gone; if so, we will have already timed-out the xmits.
*/
-static void visornic_xmit_timeout(struct net_device *netdev)
+static void visornic_xmit_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct visornic_devdata *devdata = netdev_priv(netdev);
unsigned long flags;
diff --git a/drivers/staging/uwb/whc-rc.c b/drivers/staging/uwb/whc-rc.c
index 34020ed351ab..a5ab255d7d36 100644
--- a/drivers/staging/uwb/whc-rc.c
+++ b/drivers/staging/uwb/whc-rc.c
@@ -216,11 +216,11 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc)
goto error_request_region;
}
- whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
+ whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len);
if (whcrc->rc_base == NULL) {
dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
whcrc->rc_len, whcrc->area, result);
- goto error_ioremap_nocache;
+ goto error_ioremap;
}
result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
@@ -254,7 +254,7 @@ error_cmd_buffer:
free_irq(umc_dev->irq, whcrc);
error_request_irq:
iounmap(whcrc->rc_base);
-error_ioremap_nocache:
+error_ioremap:
release_mem_region(whcrc->area, whcrc->rc_len);
error_request_region:
return result;
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index afe43fa5a6d7..54d9e2f31916 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -13,5 +13,5 @@ vchiq-objs := \
obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-camera/
-ccflags-y += -Idrivers/staging/vc04_services -D__VCCOREVER__=0x04000000
+ccflags-y += -D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index 826016c3431a..33485184a98a 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -193,17 +193,6 @@ static int snd_bcm2835_playback_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_bcm2835_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
-}
-
-static int snd_bcm2835_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
{
struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
@@ -316,9 +305,6 @@ snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops snd_bcm2835_playback_ops = {
.open = snd_bcm2835_playback_open,
.close = snd_bcm2835_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_bcm2835_pcm_hw_params,
- .hw_free = snd_bcm2835_pcm_hw_free,
.prepare = snd_bcm2835_pcm_prepare,
.trigger = snd_bcm2835_pcm_trigger,
.pointer = snd_bcm2835_pcm_pointer,
@@ -328,9 +314,6 @@ static const struct snd_pcm_ops snd_bcm2835_playback_ops = {
static const struct snd_pcm_ops snd_bcm2835_playback_spdif_ops = {
.open = snd_bcm2835_playback_spdif_open,
.close = snd_bcm2835_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_bcm2835_pcm_hw_params,
- .hw_free = snd_bcm2835_pcm_hw_free,
.prepare = snd_bcm2835_pcm_prepare,
.trigger = snd_bcm2835_pcm_trigger,
.pointer = snd_bcm2835_pcm_pointer,
@@ -362,7 +345,7 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
spdif ? &snd_bcm2835_playback_spdif_ops :
&snd_bcm2835_playback_ops);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
chip->card->dev, 128 * 1024, 128 * 1024);
if (spdif)
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index beb6a0063bb8..1ef31a984741 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -60,6 +60,9 @@ MODULE_PARM_DESC(max_video_width, "Threshold for video mode");
module_param(max_video_height, int, 0644);
MODULE_PARM_DESC(max_video_height, "Threshold for video mode");
+/* camera instance counter */
+static atomic_t camera_instance = ATOMIC_INIT(0);
+
/* global device data array */
static struct bm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
@@ -1870,7 +1873,6 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
/* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
mutex_init(&dev->mutex);
- dev->camera_num = camera;
dev->max_width = resolutions[camera][0];
dev->max_height = resolutions[camera][1];
@@ -1886,8 +1888,9 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
dev->capture.fmt = &formats[3]; /* JPEG */
/* v4l device registration */
- snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
- "%s", BM2835_MMAL_MODULE_NAME);
+ dev->camera_num = v4l2_device_set_name(&dev->v4l2_dev,
+ BM2835_MMAL_MODULE_NAME,
+ &camera_instance);
ret = v4l2_device_register(NULL, &dev->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "%s: could not register V4L2 device: %d\n",
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index 56b1037d8e25..ff2b960d8cac 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -4,8 +4,8 @@
#ifndef VCHI_H_
#define VCHI_H_
-#include "interface/vchi/vchi_cfg.h"
-#include "interface/vchi/vchi_common.h"
+#include "vchi_cfg.h"
+#include "vchi_common.h"
/******************************************************************************
* Global defs
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 0ce3b08b3441..efdd3b1c7d85 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -3,7 +3,7 @@
#include <linux/module.h>
#include <linux/types.h>
-#include "interface/vchi/vchi.h"
+#include "../vchi/vchi.h"
#include "vchiq.h"
#include "vchiq_core.h"
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index fb2855e686a7..d6ca6e5551a7 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -758,7 +758,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
*/
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH)
{
- bool ret = true;
+ bool ret;
unsigned char byPwr = 0;
unsigned char byDec = 0;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 4e651b698617..f18e059ce66b 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -381,8 +381,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
- if ((priv->rf_type == RF_AL2230) ||
- (priv->rf_type == RF_AL2230S)) {
+ if (priv->rf_type == RF_AL2230 ||
+ priv->rf_type == RF_AL2230S) {
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
@@ -461,8 +461,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
if (ret)
goto end;
- if ((priv->rf_type == RF_VT3226) ||
- (priv->rf_type == RF_VT3342A0)) {
+ if (priv->rf_type == RF_VT3226 ||
+ priv->rf_type == RF_VT3342A0) {
ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
MAC_REG_ITRTMSET, 0x23);
if (ret)
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 50e1c8918040..fe6c11266123 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -52,6 +52,8 @@
#define RATE_AUTO 12
#define MAX_RATE 12
+#define VNT_B_RATES (BIT(RATE_1M) | BIT(RATE_2M) |\
+ BIT(RATE_5M) | BIT(RATE_11M))
/*
* device specific
@@ -204,6 +206,22 @@ enum {
CONTEXT_BEACON_PACKET
};
+struct vnt_rx_header {
+ u32 wbk_status;
+ u8 rx_sts;
+ u8 rx_rate;
+ u16 pay_load_len;
+} __packed;
+
+struct vnt_rx_tail {
+ __le64 tsf_time;
+ u8 sq;
+ u8 new_rsr;
+ u8 rssi;
+ u8 rsr;
+ u8 sq_3;
+} __packed;
+
/* RCB (Receive Control Block) */
struct vnt_rcb {
void *priv;
@@ -262,7 +280,6 @@ struct vnt_private {
struct usb_interface *intf;
u64 tsf_time;
- u8 rx_rate;
u32 rx_buf_sz;
int mc_list_count;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 3b94e80f1d5e..821aae8ca402 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -29,27 +29,21 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_supported_band *sband;
struct sk_buff *skb;
- struct ieee80211_rx_status rx_status = { 0 };
- struct ieee80211_hdr *hdr;
- __le16 fc;
- u8 *rsr, *new_rsr, *rssi;
- __le64 *tsf_time;
+ struct ieee80211_rx_status *rx_status;
+ struct vnt_rx_header *head;
+ struct vnt_rx_tail *tail;
u32 frame_size;
- int ii, r;
- u8 *rx_rate, *sq, *sq_3;
- u32 wbk_status;
- u8 *skb_data;
- u16 *pay_load_len;
- u16 pay_load_with_padding;
+ int ii;
+ u16 rx_bitrate, pay_load_with_padding;
u8 rate_idx = 0;
- u8 rate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
long rx_dbm;
skb = ptr_rcb->skb;
+ rx_status = IEEE80211_SKB_RXCB(skb);
/* [31:16]RcvByteCount ( not include 4-byte Status ) */
- wbk_status = *((u32 *)(skb->data));
- frame_size = wbk_status >> 16;
+ head = (struct vnt_rx_header *)skb->data;
+ frame_size = head->wbk_status >> 16;
frame_size += 4;
if (bytes_received != frame_size) {
@@ -63,106 +57,66 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
return false;
}
- skb_data = (u8 *)skb->data;
-
- rx_rate = skb_data + 5;
-
/* real Frame Size = USBframe_size -4WbkStatus - 4RxStatus */
/* -8TSF - 4RSR - 4SQ3 - ?Padding */
/* if SQ3 the range is 24~27, if no SQ3 the range is 20~23 */
- pay_load_len = (u16 *)(skb_data + 6);
-
/*Fix hardware bug => PLCP_Length error */
- if (((bytes_received - (*pay_load_len)) > 27) ||
- ((bytes_received - (*pay_load_len)) < 24) ||
- (bytes_received < (*pay_load_len))) {
+ if (((bytes_received - head->pay_load_len) > 27) ||
+ ((bytes_received - head->pay_load_len) < 24) ||
+ (bytes_received < head->pay_load_len)) {
dev_dbg(&priv->usb->dev, "Wrong PLCP Length %x\n",
- *pay_load_len);
+ head->pay_load_len);
return false;
}
sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
-
- for (r = RATE_1M; r < MAX_RATE; r++) {
- if (*rx_rate == rate[r])
- break;
- }
-
- priv->rx_rate = r;
+ rx_bitrate = head->rx_rate * 5; /* rx_rate * 5 */
for (ii = 0; ii < sband->n_bitrates; ii++) {
- if (sband->bitrates[ii].hw_value == r) {
+ if (sband->bitrates[ii].bitrate == rx_bitrate) {
rate_idx = ii;
break;
}
}
if (ii == sband->n_bitrates) {
- dev_dbg(&priv->usb->dev, "Wrong RxRate %x\n", *rx_rate);
+ dev_dbg(&priv->usb->dev, "Wrong Rx Bit Rate %d\n", rx_bitrate);
return false;
}
- pay_load_with_padding = ((*pay_load_len / 4) +
- ((*pay_load_len % 4) ? 1 : 0)) * 4;
-
- tsf_time = (__le64 *)(skb_data + 8 + pay_load_with_padding);
-
- priv->tsf_time = le64_to_cpu(*tsf_time);
+ pay_load_with_padding = ((head->pay_load_len / 4) +
+ ((head->pay_load_len % 4) ? 1 : 0)) * 4;
- if (priv->bb_type == BB_TYPE_11G) {
- sq_3 = skb_data + 8 + pay_load_with_padding + 12;
- sq = sq_3;
- } else {
- sq = skb_data + 8 + pay_load_with_padding + 8;
- sq_3 = sq;
- }
-
- new_rsr = skb_data + 8 + pay_load_with_padding + 9;
- rssi = skb_data + 8 + pay_load_with_padding + 10;
+ tail = (struct vnt_rx_tail *)(skb->data +
+ sizeof(*head) + pay_load_with_padding);
+ priv->tsf_time = le64_to_cpu(tail->tsf_time);
- rsr = skb_data + 8 + pay_load_with_padding + 11;
- if (*rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
+ if (tail->rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
return false;
- frame_size = *pay_load_len;
-
- vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
+ vnt_rf_rssi_to_dbm(priv, tail->rssi, &rx_dbm);
priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
priv->current_rssi = priv->bb_pre_ed_rssi;
- skb_pull(skb, 8);
- skb_trim(skb, frame_size);
-
- rx_status.mactime = priv->tsf_time;
- rx_status.band = hw->conf.chandef.chan->band;
- rx_status.signal = rx_dbm;
- rx_status.flag = 0;
- rx_status.freq = hw->conf.chandef.chan->center_freq;
+ skb_pull(skb, sizeof(*head));
+ skb_trim(skb, head->pay_load_len);
- if (!(*rsr & RSR_CRCOK))
- rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
+ rx_status->mactime = priv->tsf_time;
+ rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->signal = rx_dbm;
+ rx_status->flag = 0;
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
- hdr = (struct ieee80211_hdr *)(skb->data);
- fc = hdr->frame_control;
+ if (!(tail->rsr & RSR_CRCOK))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- rx_status.rate_idx = rate_idx;
-
- if (ieee80211_has_protected(fc)) {
- if (priv->local_id > REV_ID_VT3253_A1) {
- rx_status.flag |= RX_FLAG_DECRYPTED;
-
- /* Drop packet */
- if (!(*new_rsr & NEWRSR_DECRYPTOK)) {
- dev_kfree_skb(skb);
- return true;
- }
- }
- }
+ rx_status->rate_idx = rate_idx;
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ if (tail->new_rsr & NEWRSR_DECRYPTOK)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
ieee80211_rx_irqsafe(priv->hw, skb);
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index 60a00af250bf..70358d427211 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -30,7 +30,6 @@ int vnt_download_firmware(struct vnt_private *priv)
{
struct device *dev = &priv->usb->dev;
const struct firmware *fw;
- void *buffer = NULL;
u16 length;
int ii;
int ret = 0;
@@ -44,26 +43,17 @@ int vnt_download_firmware(struct vnt_private *priv)
goto end;
}
- buffer = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto free_fw;
- }
-
for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
length = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
- memcpy(buffer, fw->data + ii, length);
ret = vnt_control_out(priv, 0, 0x1200 + ii, 0x0000, length,
- buffer);
+ fw->data + ii);
if (ret)
- goto free_buffer;
+ goto free_fw;
dev_dbg(dev, "Download firmware...%d %zu\n", ii, fw->size);
}
-free_buffer:
- kfree(buffer);
free_fw:
release_firmware(fw);
end:
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index f40947955675..af215860be4c 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -99,9 +99,11 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
info->status.rates[0].count = tx_retry;
- if (!(tsr & (TSR_TMO | TSR_RETRYTMO))) {
+ if (!(tsr & TSR_TMO)) {
info->status.rates[0].idx = idx;
- info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(priv->hw, context->skb);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 9cb924c54571..5e48b3ddb94c 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1015,6 +1015,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(priv->hw, SUPPORTS_PS);
+ ieee80211_hw_set(priv->hw, PS_NULLFUNC_STACK);
priv->hw->max_signal = 100;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index f9020a4f7bbf..29caba728906 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -278,11 +278,9 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
- if (ieee80211_is_pspoll(hdr->frame_control)) {
- __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-
- buf->duration_a = dur;
- buf->duration_b = dur;
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ buf->duration_a = hdr->duration_id;
+ buf->duration_b = hdr->duration_id;
} else {
buf->duration_a = vnt_get_duration_le(priv,
tx_context->pkt_type, need_ack);
@@ -371,10 +369,8 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
tx_context->pkt_type, &buf->ab);
/* Get Duration and TimeStampOff */
- if (ieee80211_is_pspoll(hdr->frame_control)) {
- __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-
- buf->duration = dur;
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ buf->duration = hdr->duration_id;
} else {
buf->duration = vnt_get_duration_le(priv, tx_context->pkt_type,
need_ack);
@@ -815,10 +811,14 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
- if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
- pkt_type = PK_TYPE_11GB;
- else
- pkt_type = PK_TYPE_11GA;
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ if (priv->basic_rates & VNT_B_RATES)
+ pkt_type = PK_TYPE_11GB;
+ else
+ pkt_type = PK_TYPE_11GA;
+ } else {
+ pkt_type = PK_TYPE_11A;
+ }
}
} else {
pkt_type = PK_TYPE_11B;
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index d977d4777e4f..7bfccc48a366 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -34,7 +34,7 @@
#define USB_CTL_WAIT 500 /* ms */
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
- u16 index, u16 length, u8 *buffer)
+ u16 index, u16 length, const u8 *buffer)
{
int ret = 0;
u8 *usb_buffer;
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index b65d9c01a211..4e3341bc3221 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -21,7 +21,7 @@
#define VNT_REG_BLOCK_SIZE 64
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
- u16 index, u16 length, u8 *buffer);
+ u16 index, u16 length, const u8 *buffer);
int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer);
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
index e44772289af8..efcb7c6a5aa7 100644
--- a/drivers/staging/wfx/TODO
+++ b/drivers/staging/wfx/TODO
@@ -1,17 +1,60 @@
This is a list of things that need to be done to get this driver out of the
staging directory.
- - I have to take a decision about secure link support. I can:
- - drop completely
- - keep it in an external patch (my preferred option)
- - replace call to mbedtls with kernel crypto API (necessitate a
- bunch of work)
- - pull mbedtls in kernel (non-realistic)
-
- - mac80211 interface does not (yet) have expected quality to be placed
- outside of staging:
- - Some processings are redundant with mac80211 ones
- - Many members from wfx_dev/wfx_vif can be retrieved from mac80211
- structures
- - Some functions are too complex
- - ...
+ - All structures defined in hif_api_*.h are intended to sent/received to/from
+ hardware. All their members whould be declared __le32 or __le16.
+ See:
+ https://lore.kernel.org/lkml/20191111202852.GX26530@ZenIV.linux.org.uk
+
+ - Once previous item done, it will be possible to audit the driver with
+ `sparse'. It will probably find tons of problems with big endian
+ architectures.
+
+ - hif_api_*.h whave been imported from firmware code. Some of the structures
+ are never used in driver.
+
+ - Driver try to maintains power save status of the stations. However, this
+ work is already done by mac80211. sta_asleep_mask and pspoll_mask should be
+ dropped.
+
+ - wfx_tx_queues_get() should be reworked. It currently try compute itself the
+ QoS policy. However, firmware already do the job. Firmware would prefer to
+ have a few packets in each queue and be able to choose itself which queue to
+ use.
+
+ - As suggested by Felix, rate control could be improved following this idea:
+ https://lore.kernel.org/lkml/3099559.gv3Q75KnN1@pc-42/
+
+ - When driver is about to loose BSS, it forge its own Null Func request (see
+ wfx_cqm_bssloss_sm()). It should use mechanism provided by mac80211.
+
+ - AP is actually is setup after a call to wfx_bss_info_changed(). Yet,
+ ieee80211_ops provide callback start_ap().
+
+ - The current process for joining a network is incredibly complex. Should be
+ reworked.
+
+ - Monitoring mode is not implemented despite being mandatory by mac80211.
+
+ - "compatible" value are not correct. They should be "vendor,chip". See:
+ https://lore.kernel.org/driverdev-devel/5226570.CMH5hVlZcI@pc-42
+
+ - The "state" field from wfx_vif should be replaced by "vif->type".
+
+ - It seems that wfx_upload_keys() is useless.
+
+ - "event_queue" from wfx_vif seems overkill. These event are rare and they
+ probably could be handled in a simpler fashion.
+
+ - Feature called "secure link" should be either developed (using kernel
+ crypto API) or dropped.
+
+ - In wfx_cmd_send(), "async" allow to send command without waiting the reply.
+ It may help in some situation, but it is not yet used. In add, it may cause
+ some trouble:
+ https://lore.kernel.org/driverdev-devel/alpine.DEB.2.21.1910041317381.2992@hadrien/
+ So, fix it (by replacing the mutex with a semaphore) or drop it.
+
+ - Chip support P2P, but driver does not implement it.
+
+ - Chip support kind of Mesh, but driver does not implement it.
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index 2432ba95c2f5..983c41d1fe7c 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -271,8 +271,7 @@ static void bh_work(struct work_struct *work)
if (last_op_is_rx)
ack_sdio_data(wdev);
- if (!wdev->hif.tx_buffers_used && !work_pending(work) &&
- !atomic_read(&wdev->scan_in_progress)) {
+ if (!wdev->hif.tx_buffers_used && !work_pending(work)) {
device_release(wdev);
release_chip = true;
}
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index ab0cda1e124f..40bc33035de2 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -107,6 +107,8 @@ static int wfx_spi_copy_to_io(void *priv, unsigned int addr,
cpu_to_le16s(&regaddr);
+ // Register address and CONFIG content always use 16bit big endian
+ // ("BADC" order)
if (bus->need_swab)
swab16s(&regaddr);
if (bus->need_swab && addr == WFX_REG_CONFIG)
@@ -183,7 +185,7 @@ static int wfx_spi_probe(struct spi_device *func)
if (func->bits_per_word != 16 && func->bits_per_word != 8)
dev_warn(&func->dev, "unusual bits/word value: %d\n",
func->bits_per_word);
- if (func->max_speed_hz > 49000000)
+ if (func->max_speed_hz > 50000000)
dev_warn(&func->dev, "%dHz is a very high speed\n",
func->max_speed_hz);
@@ -223,8 +225,7 @@ static int wfx_spi_probe(struct spi_device *func)
return ret;
}
-/* Disconnect Function to be called by SPI stack when device is disconnected */
-static int wfx_spi_disconnect(struct spi_device *func)
+static int wfx_spi_remove(struct spi_device *func)
{
struct wfx_spi_priv *bus = spi_get_drvdata(func);
@@ -263,5 +264,5 @@ struct spi_driver wfx_spi_driver = {
},
.id_table = wfx_spi_id,
.probe = wfx_spi_probe,
- .remove = wfx_spi_disconnect,
+ .remove = wfx_spi_remove,
};
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index e7fcce8d0cc4..5d198457c6ce 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -13,42 +13,9 @@
#include "bh.h"
#include "sta.h"
-static int wfx_handle_pspoll(struct wfx_vif *wvif, struct sk_buff *skb)
-{
- struct ieee80211_sta *sta;
- struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
- int link_id = 0;
- u32 pspoll_mask = 0;
- int i;
-
- if (wvif->state != WFX_STATE_AP)
- return 1;
- if (!ether_addr_equal(wvif->vif->addr, pspoll->bssid))
- return 1;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(wvif->vif, pspoll->ta);
- if (sta)
- link_id = ((struct wfx_sta_priv *)&sta->drv_priv)->link_id;
- rcu_read_unlock();
- if (link_id)
- pspoll_mask = BIT(link_id);
- else
- return 1;
-
- wvif->pspoll_mask |= pspoll_mask;
- /* Do not report pspols if data for given link id is queued already. */
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- if (wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
- pspoll_mask)) {
- wfx_bh_request_tx(wvif->wdev);
- return 1;
- }
- }
- return 0;
-}
-
-static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, struct sk_buff *skb)
+static int wfx_drop_encrypt_data(struct wfx_dev *wdev,
+ const struct hif_ind_rx *arg,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *frame = (struct ieee80211_hdr *) skb->data;
size_t hdrlen = ieee80211_hdrlen(frame->frame_control);
@@ -98,15 +65,12 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, s
}
-void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
- struct sk_buff *skb)
+void wfx_rx_cb(struct wfx_vif *wvif,
+ const struct hif_ind_rx *arg, struct sk_buff *skb)
{
- int link_id = arg->rx_flags.peer_sta_id;
struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct wfx_link_entry *entry = NULL;
- bool early_data = false;
memset(hdr, 0, sizeof(*hdr));
@@ -116,14 +80,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
ieee80211_is_beacon(frame->frame_control)))
goto drop;
- if (link_id && link_id <= WFX_MAX_STA_IN_AP_MODE) {
- entry = &wvif->link_id_db[link_id - 1];
- entry->timestamp = jiffies;
- if (entry->status == WFX_LINK_SOFT &&
- ieee80211_is_data(frame->frame_control))
- early_data = true;
- }
-
if (arg->status == HIF_STATUS_MICFAILURE)
hdr->flag |= RX_FLAG_MMIC_ERROR;
else if (arg->status)
@@ -134,10 +90,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
goto drop;
}
- if (ieee80211_is_pspoll(frame->frame_control))
- if (wfx_handle_pspoll(wvif, skb))
- goto drop;
-
hdr->band = NL80211_BAND_2GHZ;
hdr->freq = ieee80211_channel_to_frequency(arg->channel_number,
hdr->band);
@@ -171,20 +123,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
!arg->status && wvif->vif &&
ether_addr_equal(ieee80211_get_SA(frame),
wvif->vif->bss_conf.bssid)) {
- const u8 *tim_ie;
- u8 *ies = mgmt->u.beacon.variable;
- size_t ies_len = skb->len - (ies - skb->data);
-
- tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
- if (tim_ie) {
- struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2];
-
- if (wvif->dtim_period != tim->dtim_period) {
- wvif->dtim_period = tim->dtim_period;
- schedule_work(&wvif->set_beacon_wakeup_period_work);
- }
- }
-
/* Disable beacon filter once we're associated... */
if (wvif->disable_beacon_filter &&
(wvif->vif->bss_conf.assoc ||
@@ -193,18 +131,7 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
schedule_work(&wvif->update_filtering_work);
}
}
-
- if (early_data) {
- spin_lock_bh(&wvif->ps_state_lock);
- /* Double-check status with lock held */
- if (entry->status == WFX_LINK_SOFT)
- skb_queue_tail(&entry->rx_queue, skb);
- else
- ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
- spin_unlock_bh(&wvif->ps_state_lock);
- } else {
- ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
- }
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
return;
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
index a50ce352bc5e..61c28bfd2a37 100644
--- a/drivers/staging/wfx/data_rx.h
+++ b/drivers/staging/wfx/data_rx.h
@@ -13,7 +13,7 @@
struct wfx_vif;
struct sk_buff;
-void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
- struct sk_buff *skb);
+void wfx_rx_cb(struct wfx_vif *wvif,
+ const struct hif_ind_rx *arg, struct sk_buff *skb);
#endif /* WFX_DATA_RX_H */
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index b13d7341f8bb..20f4740734f2 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -17,7 +17,6 @@
#include "hif_tx_mib.h"
#define WFX_INVALID_RATE_ID 15
-#define WFX_LINK_ID_NO_ASSOC 15
#define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
static int wfx_get_hw_rate(struct wfx_dev *wdev,
@@ -169,7 +168,8 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
wfx_tx_policy_build(wvif, &wanted, rates);
spin_lock_bh(&cache->lock);
- if (WARN_ON(list_empty(&cache->free))) {
+ if (list_empty(&cache->free)) {
+ WARN(1, "unable to get a valid Tx policy");
spin_unlock_bh(&cache->lock);
return WFX_INVALID_RATE_ID;
}
@@ -216,38 +216,25 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
static int wfx_tx_policy_upload(struct wfx_vif *wvif)
{
+ struct tx_policy *policies = wvif->tx_policy_cache.cache;
+ u8 tmp_rates[12];
int i;
- struct tx_policy_cache *cache = &wvif->tx_policy_cache;
- struct hif_mib_set_tx_rate_retry_policy *arg =
- kzalloc(struct_size(arg,
- tx_rate_retry_policy,
- HIF_MIB_NUM_TX_RATE_RETRY_POLICIES),
- GFP_KERNEL);
- struct hif_mib_tx_rate_retry_policy *dst;
- spin_lock_bh(&cache->lock);
- /* Upload only modified entries. */
- for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) {
- struct tx_policy *src = &cache->cache[i];
-
- if (!src->uploaded && memzcmp(src->rates, sizeof(src->rates))) {
- dst = arg->tx_rate_retry_policy +
- arg->num_tx_rate_policies;
-
- dst->policy_index = i;
- dst->short_retry_count = 255;
- dst->long_retry_count = 255;
- dst->first_rate_sel = 1;
- dst->terminate = 1;
- dst->count_init = 1;
- memcpy(&dst->rates, src->rates, sizeof(src->rates));
- src->uploaded = true;
- arg->num_tx_rate_policies++;
+ do {
+ spin_lock_bh(&wvif->tx_policy_cache.lock);
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+ if (!policies[i].uploaded &&
+ memzcmp(policies[i].rates, sizeof(policies[i].rates)))
+ break;
+ if (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES) {
+ policies[i].uploaded = 1;
+ memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
+ spin_unlock_bh(&wvif->tx_policy_cache.lock);
+ hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
+ } else {
+ spin_unlock_bh(&wvif->tx_policy_cache.lock);
}
- }
- spin_unlock_bh(&cache->lock);
- hif_set_tx_rate_retry_policy(wvif, arg);
- kfree(arg);
+ } while (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES);
return 0;
}
@@ -277,164 +264,6 @@ void wfx_tx_policy_init(struct wfx_vif *wvif)
list_add(&cache->cache[i].link, &cache->free);
}
-/* Link ID related functions */
-
-static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
-{
- int i, ret = 0;
- unsigned long max_inactivity = 0;
- unsigned long now = jiffies;
-
- spin_lock_bh(&wvif->ps_state_lock);
- for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
- if (!wvif->link_id_db[i].status) {
- ret = i + 1;
- break;
- } else if (wvif->link_id_db[i].status != WFX_LINK_HARD &&
- !wvif->wdev->tx_queue_stats.link_map_cache[i + 1]) {
- unsigned long inactivity =
- now - wvif->link_id_db[i].timestamp;
-
- if (inactivity < max_inactivity)
- continue;
- max_inactivity = inactivity;
- ret = i + 1;
- }
- }
-
- if (ret) {
- struct wfx_link_entry *entry = &wvif->link_id_db[ret - 1];
-
- entry->status = WFX_LINK_RESERVE;
- ether_addr_copy(entry->mac, mac);
- memset(&entry->buffered, 0, WFX_MAX_TID);
- skb_queue_head_init(&entry->rx_queue);
- wfx_tx_lock(wvif->wdev);
-
- if (!schedule_work(&wvif->link_id_work))
- wfx_tx_unlock(wvif->wdev);
- } else {
- dev_info(wvif->wdev->dev, "no more link-id available\n");
- }
- spin_unlock_bh(&wvif->ps_state_lock);
- return ret;
-}
-
-int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac)
-{
- int i, ret = 0;
-
- spin_lock_bh(&wvif->ps_state_lock);
- for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
- if (ether_addr_equal(mac, wvif->link_id_db[i].mac) &&
- wvif->link_id_db[i].status) {
- wvif->link_id_db[i].timestamp = jiffies;
- ret = i + 1;
- break;
- }
- }
- spin_unlock_bh(&wvif->ps_state_lock);
- return ret;
-}
-
-static int wfx_map_link(struct wfx_vif *wvif,
- struct wfx_link_entry *link_entry, int sta_id)
-{
- int ret;
-
- ret = hif_map_link(wvif, link_entry->mac, 0, sta_id);
-
- if (ret == 0)
- /* Save the MAC address currently associated with the peer
- * for future unmap request
- */
- ether_addr_copy(link_entry->old_mac, link_entry->mac);
-
- return ret;
-}
-
-int wfx_unmap_link(struct wfx_vif *wvif, int sta_id)
-{
- u8 *mac_addr = NULL;
-
- if (sta_id)
- mac_addr = wvif->link_id_db[sta_id - 1].old_mac;
-
- return hif_map_link(wvif, mac_addr, 1, sta_id);
-}
-
-void wfx_link_id_gc_work(struct work_struct *work)
-{
- struct wfx_vif *wvif =
- container_of(work, struct wfx_vif, link_id_gc_work.work);
- unsigned long now = jiffies;
- unsigned long next_gc = -1;
- long ttl;
- u32 mask;
- int i;
-
- if (wvif->state != WFX_STATE_AP)
- return;
-
- wfx_tx_lock_flush(wvif->wdev);
- spin_lock_bh(&wvif->ps_state_lock);
- for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
- bool need_reset = false;
-
- mask = BIT(i + 1);
- if (wvif->link_id_db[i].status == WFX_LINK_RESERVE ||
- (wvif->link_id_db[i].status == WFX_LINK_HARD &&
- !(wvif->link_id_map & mask))) {
- if (wvif->link_id_map & mask) {
- wvif->sta_asleep_mask &= ~mask;
- wvif->pspoll_mask &= ~mask;
- need_reset = true;
- }
- wvif->link_id_map |= mask;
- if (wvif->link_id_db[i].status != WFX_LINK_HARD)
- wvif->link_id_db[i].status = WFX_LINK_SOFT;
-
- spin_unlock_bh(&wvif->ps_state_lock);
- if (need_reset)
- wfx_unmap_link(wvif, i + 1);
- wfx_map_link(wvif, &wvif->link_id_db[i], i + 1);
- next_gc = min(next_gc, WFX_LINK_ID_GC_TIMEOUT);
- spin_lock_bh(&wvif->ps_state_lock);
- } else if (wvif->link_id_db[i].status == WFX_LINK_SOFT) {
- ttl = wvif->link_id_db[i].timestamp - now +
- WFX_LINK_ID_GC_TIMEOUT;
- if (ttl <= 0) {
- need_reset = true;
- wvif->link_id_db[i].status = WFX_LINK_OFF;
- wvif->link_id_map &= ~mask;
- wvif->sta_asleep_mask &= ~mask;
- wvif->pspoll_mask &= ~mask;
- spin_unlock_bh(&wvif->ps_state_lock);
- wfx_unmap_link(wvif, i + 1);
- spin_lock_bh(&wvif->ps_state_lock);
- } else {
- next_gc = min_t(unsigned long, next_gc, ttl);
- }
- }
- if (need_reset)
- skb_queue_purge(&wvif->link_id_db[i].rx_queue);
- }
- spin_unlock_bh(&wvif->ps_state_lock);
- if (next_gc != -1)
- schedule_delayed_work(&wvif->link_id_gc_work, next_gc);
- wfx_tx_unlock(wvif->wdev);
-}
-
-void wfx_link_id_work(struct work_struct *work)
-{
- struct wfx_vif *wvif =
- container_of(work, struct wfx_vif, link_id_work);
-
- wfx_tx_flush(wvif->wdev);
- wfx_link_id_gc_work(&wvif->link_id_gc_work.work);
- wfx_tx_unlock(wvif->wdev);
-}
-
/* Tx implementation */
static bool ieee80211_is_action_back(struct ieee80211_hdr *hdr)
@@ -453,29 +282,21 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
struct ieee80211_sta *sta)
{
u32 mask = ~BIT(tx_priv->raw_link_id);
+ struct wfx_sta_priv *sta_priv;
+ int tid = ieee80211_get_tid(hdr);
spin_lock_bh(&wvif->ps_state_lock);
- if (ieee80211_is_auth(hdr->frame_control)) {
+ if (ieee80211_is_auth(hdr->frame_control))
wvif->sta_asleep_mask &= mask;
- wvif->pspoll_mask &= mask;
- }
-
- if (tx_priv->link_id == WFX_LINK_ID_AFTER_DTIM &&
- !wvif->mcast_buffered) {
- wvif->mcast_buffered = true;
- if (wvif->sta_asleep_mask)
- schedule_work(&wvif->mcast_start_work);
- }
-
- if (tx_priv->raw_link_id) {
- wvif->link_id_db[tx_priv->raw_link_id - 1].timestamp = jiffies;
- if (tx_priv->tid < WFX_MAX_TID)
- wvif->link_id_db[tx_priv->raw_link_id - 1].buffered[tx_priv->tid]++;
- }
spin_unlock_bh(&wvif->ps_state_lock);
- if (sta)
- ieee80211_sta_set_buffered(sta, tx_priv->tid, true);
+ if (sta) {
+ sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+ spin_lock_bh(&sta_priv->lock);
+ sta_priv->buffered[tid]++;
+ ieee80211_sta_set_buffered(sta, tid, true);
+ spin_unlock_bh(&sta_priv->lock);
+ }
}
static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
@@ -485,7 +306,6 @@ static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
struct wfx_sta_priv *sta_priv =
sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL;
const u8 *da = ieee80211_get_DA(hdr);
- int ret;
if (sta_priv && sta_priv->link_id)
return sta_priv->link_id;
@@ -493,14 +313,7 @@ static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
return 0;
if (is_multicast_ether_addr(da))
return 0;
- ret = wfx_find_link_id(wvif, da);
- if (!ret)
- ret = wfx_alloc_link_id(wvif, da);
- if (!ret) {
- dev_err(wvif->wdev->dev, "no more link-id available\n");
- return WFX_LINK_ID_NO_ASSOC;
- }
- return ret;
+ return WFX_LINK_ID_NO_ASSOC;
}
static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
@@ -597,17 +410,6 @@ static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, str
return ret;
}
-static u8 wfx_tx_get_tid(struct ieee80211_hdr *hdr)
-{
- // FIXME: ieee80211_get_tid(hdr) should be sufficient for all cases.
- if (!ieee80211_is_data(hdr->frame_control))
- return WFX_MAX_TID;
- if (ieee80211_is_data_qos(hdr->frame_control))
- return ieee80211_get_tid(hdr);
- else
- return 0;
-}
-
static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
{
int mic_space;
@@ -639,7 +441,6 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
// Fill tx_priv
tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
- tx_priv->tid = wfx_tx_get_tid(hdr);
tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
tx_priv->link_id = tx_priv->raw_link_id;
if (ieee80211_has_protected(hdr->frame_control))
@@ -668,9 +469,15 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// Fill tx request
req = (struct hif_req_tx *)hif_msg->body;
- req->packet_id = queue_id << 16 |
- IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ // packet_id just need to be unique on device. 32bits are more than
+ // necessary for that task, so we tae advantage of it to add some extra
+ // data for debug.
+ req->packet_id = queue_id << 28 |
+ IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16 |
+ (atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF);
req->data_flags.fc_offset = offset;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ req->data_flags.after_dtim = 1;
req->queue_id.peer_sta_id = tx_priv->raw_link_id;
// Queue index are inverted between firmware and Linux
req->queue_id.queue_id = 3 - queue_id;
@@ -680,6 +487,8 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// Auxiliary operations
wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ schedule_work(&wvif->update_tim_work);
wfx_bh_request_tx(wvif->wdev);
return 0;
}
@@ -719,7 +528,7 @@ drop:
ieee80211_tx_status_irqsafe(wdev->hw, skb);
}
-void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
{
int i;
int tx_count;
@@ -791,14 +600,11 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
else
tx_info->flags |= IEEE80211_TX_STAT_ACK;
} else if (arg->status == HIF_REQUEUE) {
- /* "REQUEUE" means "implicit suspend" */
- struct hif_ind_suspend_resume_tx suspend = {
- .suspend_resume_flags.resume = 0,
- .suspend_resume_flags.bc_mc_only = 1,
- };
-
WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
- wfx_suspend_resume(wvif, &suspend);
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ wvif->after_dtim_tx_allowed = false; // DTIM period elapsed
+ schedule_work(&wvif->update_tim_work);
+ }
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
} else {
if (wvif->bss_loss_state &&
@@ -808,31 +614,25 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
wfx_pending_remove(wvif->wdev, skb);
}
-static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb,
- struct hif_req_tx *req)
+static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb)
{
- struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int tid = wfx_tx_get_tid(hdr);
- int raw_link_id = req->queue_id.peer_sta_id;
- u8 *buffered;
-
- if (raw_link_id && tid < WFX_MAX_TID) {
- buffered = wvif->link_id_db[raw_link_id - 1].buffered;
-
- spin_lock_bh(&wvif->ps_state_lock);
- WARN(!buffered[tid], "inconsistent notification");
- buffered[tid]--;
- spin_unlock_bh(&wvif->ps_state_lock);
-
- if (!buffered[tid]) {
- rcu_read_lock();
- sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
- if (sta)
- ieee80211_sta_set_buffered(sta, tid, false);
- rcu_read_unlock();
- }
+ struct ieee80211_sta *sta;
+ struct wfx_sta_priv *sta_priv;
+ int tid = ieee80211_get_tid(hdr);
+
+ rcu_read_lock(); // protect sta
+ sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ if (sta) {
+ sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+ spin_lock_bh(&sta_priv->lock);
+ WARN(!sta_priv->buffered[tid], "inconsistent notification");
+ sta_priv->buffered[tid]--;
+ if (!sta_priv->buffered[tid])
+ ieee80211_sta_set_buffered(sta, tid, false);
+ spin_unlock_bh(&sta_priv->lock);
}
+ rcu_read_unlock();
}
void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
@@ -846,7 +646,7 @@ void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
WARN_ON(!wvif);
skb_pull(skb, offset);
- wfx_notify_buffered_tx(wvif, skb, req);
+ wfx_notify_buffered_tx(wvif, skb);
wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
ieee80211_tx_status_irqsafe(wdev->hw, skb);
}
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index 0fc388db62e0..04b2147101b6 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -14,29 +14,10 @@
#include "hif_api_cmd.h"
#include "hif_api_mib.h"
-// FIXME: use IEEE80211_NUM_TIDS
-#define WFX_MAX_TID 8
-
struct wfx_tx_priv;
struct wfx_dev;
struct wfx_vif;
-enum wfx_link_status {
- WFX_LINK_OFF,
- WFX_LINK_RESERVE,
- WFX_LINK_SOFT,
- WFX_LINK_HARD,
-};
-
-struct wfx_link_entry {
- unsigned long timestamp;
- enum wfx_link_status status;
- u8 mac[ETH_ALEN];
- u8 old_mac[ETH_ALEN];
- u8 buffered[WFX_MAX_TID];
- struct sk_buff_head rx_queue;
-};
-
struct tx_policy {
struct list_head link;
int usage_count;
@@ -57,7 +38,6 @@ struct wfx_tx_priv {
struct ieee80211_key_conf *hw_key;
u8 link_id;
u8 raw_link_id;
- u8 tid;
} __packed;
void wfx_tx_policy_init(struct wfx_vif *wvif);
@@ -65,14 +45,9 @@ void wfx_tx_policy_upload_work(struct work_struct *work);
void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
-void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg);
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg);
void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
-int wfx_unmap_link(struct wfx_vif *wvif, int link_id);
-void wfx_link_id_work(struct work_struct *work);
-void wfx_link_id_gc_work(struct work_struct *work);
-int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac);
-
static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info;
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
index d17a75242365..1164aba118a1 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/staging/wfx/debug.c
@@ -145,7 +145,7 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
st->pwr_clk_freq,
st->is_ext_pwr_clk ? "yes" : "no");
seq_printf(seq,
- "N. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
+ "Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
st->nb_rx_frame, st->per_total, st->throughput);
seq_puts(seq, " Num. of PER RSSI SNR CFO\n");
seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n");
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
index dbf8bda71ff7..9d61082c1e6c 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/staging/wfx/fwio.c
@@ -61,7 +61,7 @@
#define DCA_TIMEOUT 50 // milliseconds
#define WAKEUP_TIMEOUT 200 // milliseconds
-static const char * const fwio_error_strings[] = {
+static const char * const fwio_errors[] = {
[ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
[ERR_SIG_VERIF_FAILED] = "Signature verification failed",
[ERR_AES_CTRL_KEY] = "AES control key not initialized",
@@ -220,22 +220,16 @@ static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
static void print_boot_status(struct wfx_dev *wdev)
{
- u32 val32;
+ u32 reg;
- sram_reg_read(wdev, WFX_STATUS_INFO, &val32);
- if (val32 == 0x12345678) {
- dev_info(wdev->dev, "no error reported by secure boot\n");
- } else {
- sram_reg_read(wdev, WFX_ERR_INFO, &val32);
- if (val32 < ARRAY_SIZE(fwio_error_strings) &&
- fwio_error_strings[val32])
- dev_info(wdev->dev, "secure boot error: %s\n",
- fwio_error_strings[val32]);
- else
- dev_info(wdev->dev,
- "secure boot error: Unknown (0x%02x)\n",
- val32);
- }
+ sram_reg_read(wdev, WFX_STATUS_INFO, &reg);
+ if (reg == 0x12345678)
+ return;
+ sram_reg_read(wdev, WFX_ERR_INFO, &reg);
+ if (reg < ARRAY_SIZE(fwio_errors) && fwio_errors[reg])
+ dev_info(wdev->dev, "secure boot: %s\n", fwio_errors[reg]);
+ else
+ dev_info(wdev->dev, "secure boot: Error %#02x\n", reg);
}
static int load_firmware_secure(struct wfx_dev *wdev)
@@ -345,7 +339,7 @@ int wfx_init_device(struct wfx_dev *wdev)
ktime_t now, start;
u32 reg;
- reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_WORD_MODE2;
+ reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_BYTE_ORDER_ABCD;
if (wdev->pdata.use_rising_clk)
reg |= CFG_CLK_RISE_EDGE;
ret = config_reg_write(wdev, reg);
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index c15831de4ff4..5554d6eddbf3 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -137,7 +137,7 @@ struct hif_ie_tlv {
struct hif_req_update_ie {
struct hif_ie_flags ie_flags;
- u16 num_i_es;
+ u16 num_ies;
struct hif_ie_tlv ie[];
} __packed;
@@ -180,7 +180,7 @@ struct hif_req_start_scan {
struct hif_auto_scan_param auto_scan_param;
u8 num_of_probe_requests;
u8 probe_delay;
- u8 num_of_ssi_ds;
+ u8 num_of_ssids;
u8 num_of_channels;
u32 min_channel_time;
u32 max_channel_time;
@@ -188,7 +188,7 @@ struct hif_req_start_scan {
u8 ssid_and_channel_lists[];
} __packed;
-struct hif_start_scan_req_cstnbssid_body {
+struct hif_req_start_scan_alt {
u8 band;
struct hif_scan_type scan_type;
struct hif_scan_flags scan_flags;
@@ -196,7 +196,7 @@ struct hif_start_scan_req_cstnbssid_body {
struct hif_auto_scan_param auto_scan_param;
u8 num_of_probe_requests;
u8 probe_delay;
- u8 num_of_ssi_ds;
+ u8 num_of_ssids;
u8 num_of_channels;
u32 min_channel_time;
u32 max_channel_time;
@@ -253,7 +253,8 @@ struct hif_queue {
struct hif_data_flags {
u8 more:1;
u8 fc_offset:3;
- u8 reserved:4;
+ u8 after_dtim:1;
+ u8 reserved:3;
} __packed;
struct hif_tx_flags {
@@ -377,17 +378,6 @@ struct hif_cnf_edca_queue_params {
u32 status;
} __packed;
-enum hif_ap_mode {
- HIF_MODE_IBSS = 0x0,
- HIF_MODE_BSS = 0x1
-};
-
-enum hif_preamble {
- HIF_PREAMBLE_LONG = 0x0,
- HIF_PREAMBLE_SHORT = 0x1,
- HIF_PREAMBLE_SHORT_LONG12 = 0x2
-};
-
struct hif_join_flags {
u8 reserved1:2;
u8 force_no_beacon:1;
@@ -396,14 +386,16 @@ struct hif_join_flags {
} __packed;
struct hif_req_join {
- u8 mode;
+ u8 infrastructure_bss_mode:1;
+ u8 reserved1:7;
u8 band;
u16 channel_number;
u8 bssid[ETH_ALEN];
u16 atim_window;
- u8 preamble_type;
+ u8 short_preamble:1;
+ u8 reserved2:7;
u8 probe_for_join;
- u8 reserved;
+ u8 reserved3;
struct hif_join_flags join_flags;
u32 ssid_length;
u8 ssid[HIF_API_SSID_SIZE];
@@ -466,8 +458,9 @@ struct hif_req_start {
u32 reserved1;
u32 beacon_interval;
u8 dtim_period;
- u8 preamble_type;
- u8 reserved2;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 reserved3;
u8 ssid_length;
u8 ssid[HIF_API_SSID_SIZE];
u32 basic_rate_set;
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
index 94b789ceb4ff..0c67cd4c1593 100644
--- a/drivers/staging/wfx/hif_api_mib.h
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -181,19 +181,13 @@ struct hif_mib_ipv6_addr_data_frame_condition {
u8 i_pv6_address[HIF_API_IPV6_ADDRESS_SIZE];
} __packed;
-union hif_addr_type {
- u8 value;
- struct {
- u8 type_unicast:1;
- u8 type_multicast:1;
- u8 type_broadcast:1;
- u8 reserved:5;
- } bits;
-};
+#define HIF_FILTER_UNICAST 0x1
+#define HIF_FILTER_MULTICAST 0x2
+#define HIF_FILTER_BROADCAST 0x4
struct hif_mib_uc_mc_bc_data_frame_condition {
u8 condition_idx;
- union hif_addr_type param;
+ u8 allowed_frames;
u8 reserved[2];
} __packed;
@@ -212,9 +206,11 @@ struct hif_mib_config_data_filter {
} __packed;
struct hif_mib_set_data_filtering {
- u8 default_filter;
- u8 enable;
- u8 reserved[2];
+ u8 invert_matching:1;
+ u8 reserved1:7;
+ u8 enable:1;
+ u8 reserved2:7;
+ u8 reserved3[2];
} __packed;
enum hif_arp_ns_frame_treatment {
@@ -395,11 +391,6 @@ struct hif_mib_non_erp_protection {
u8 reserved2[3];
} __packed;
-enum hif_tx_mode {
- HIF_TX_MODE_MIXED = 0x0,
- HIF_TX_MODE_GREENFIELD = 0x1
-};
-
enum hif_tmplt {
HIF_TMPLT_PRBREQ = 0x0,
HIF_TMPLT_BCN = 0x1,
@@ -471,9 +462,11 @@ struct hif_mib_set_association_mode {
u8 mode:1;
u8 rateset:1;
u8 spacing:1;
- u8 reserved:4;
- u8 preamble_type;
- u8 mixed_or_greenfield_type;
+ u8 reserved1:4;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 greenfield:1;
+ u8 reserved3:7;
u8 mpdu_start_spacing;
u32 basic_rate_set;
} __packed;
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
index 820de216be0c..33c22c5d629d 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/staging/wfx/hif_rx.c
@@ -18,8 +18,8 @@
#include "secure_link.h"
#include "hif_api_cmd.h"
-static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_generic_confirm(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
// All confirm messages start with status
int status = le32_to_cpu(*((__le32 *) buf));
@@ -59,9 +59,10 @@ static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
return status;
}
-static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
+static int hif_tx_confirm(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_cnf_tx *body = buf;
+ const struct hif_cnf_tx *body = buf;
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
WARN_ON(!wvif);
@@ -72,31 +73,27 @@ static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
return 0;
}
-static int hif_multi_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_multi_tx_confirm(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_cnf_multi_transmit *body = buf;
- struct hif_cnf_tx *buf_loc = (struct hif_cnf_tx *) &body->tx_conf_payload;
+ const struct hif_cnf_multi_transmit *body = buf;
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- int count = body->num_tx_confs;
int i;
- WARN(count <= 0, "corrupted message");
+ WARN(body->num_tx_confs <= 0, "corrupted message");
WARN_ON(!wvif);
if (!wvif)
return -EFAULT;
- for (i = 0; i < count; ++i) {
- wfx_tx_confirm_cb(wvif, buf_loc);
- buf_loc++;
- }
+ for (i = 0; i < body->num_tx_confs; i++)
+ wfx_tx_confirm_cb(wvif, &body->tx_conf_payload[i]);
return 0;
}
-static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_startup_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_ind_startup *body = buf;
+ const struct hif_ind_startup *body = buf;
if (body->status || body->firmware_type > 4) {
dev_err(wdev->dev, "received invalid startup indication");
@@ -112,8 +109,8 @@ static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
return 0;
}
-static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_wakeup_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
if (!wdev->pdata.gpio_wakeup
|| !gpiod_get_value(wdev->pdata.gpio_wakeup)) {
@@ -123,25 +120,27 @@ static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
return 0;
}
-static int hif_keys_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_keys_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_ind_sl_exchange_pub_keys *body = buf;
+ const struct hif_ind_sl_exchange_pub_keys *body = buf;
+ u8 pubkey[API_NCP_PUB_KEY_SIZE];
- // Compatibility with legacy secure link
- if (body->status == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
- body->status = 0;
- if (body->status)
+ // SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS is used by legacy secure link
+ if (body->status && body->status != SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
dev_warn(wdev->dev, "secure link negociation error\n");
- wfx_sl_check_pubkey(wdev, body->ncp_pub_key, body->ncp_pub_key_mac);
+ memcpy(pubkey, body->ncp_pub_key, sizeof(pubkey));
+ memreverse(pubkey, sizeof(pubkey));
+ wfx_sl_check_pubkey(wdev, pubkey, body->ncp_pub_key_mac);
return 0;
}
-static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf, struct sk_buff *skb)
+static int hif_receive_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif,
+ const void *buf, struct sk_buff *skb)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- struct hif_ind_rx *body = buf;
+ const struct hif_ind_rx *body = buf;
if (!wvif) {
dev_warn(wdev->dev, "ignore rx data for non-existent vif %d\n",
@@ -154,11 +153,11 @@ static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
return 0;
}
-static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_event_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- struct hif_ind_event *body = buf;
+ const struct hif_ind_event *body = buf;
struct wfx_hif_event *event;
int first;
@@ -183,7 +182,8 @@ static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
}
static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif,
+ const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
@@ -194,19 +194,20 @@ static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
}
static int hif_scan_complete_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif,
+ const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- struct hif_ind_scan_cmpl *body = buf;
WARN_ON(!wvif);
- wfx_scan_complete_cb(wvif, body);
+ wfx_scan_complete(wvif);
return 0;
}
static int hif_join_complete_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif,
+ const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
@@ -217,21 +218,26 @@ static int hif_join_complete_indication(struct wfx_dev *wdev,
}
static int hif_suspend_resume_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif,
+ const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- struct hif_ind_suspend_resume_tx *body = buf;
+ const struct hif_ind_suspend_resume_tx *body = buf;
WARN_ON(!wvif);
- wfx_suspend_resume(wvif, body);
+ WARN(!body->suspend_resume_flags.bc_mc_only, "unsupported suspend/resume notification");
+ if (body->suspend_resume_flags.resume)
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
+ else
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
return 0;
}
-static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_error_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_ind_error *body = buf;
+ const struct hif_ind_error *body = buf;
u8 *pRollback = (u8 *) body->data;
u32 *pStatus = (u32 *) body->data;
@@ -268,10 +274,10 @@ static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
return 0;
}
-static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_generic_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_ind_generic *body = buf;
+ const struct hif_ind_generic *body = buf;
switch (body->indication_type) {
case HIF_GENERIC_INDICATION_TYPE_RAW:
@@ -299,9 +305,10 @@ static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
}
static int hif_exception_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif, const void *buf)
{
size_t len = hif->len - 4; // drop header
+
dev_err(wdev->dev, "firmware exception\n");
print_hex_dump_bytes("Dump: ", DUMP_PREFIX_NONE, buf, len);
wdev->chip_frozen = 1;
@@ -311,7 +318,8 @@ static int hif_exception_indication(struct wfx_dev *wdev,
static const struct {
int msg_id;
- int (*handler)(struct wfx_dev *wdev, struct hif_msg *hif, void *buf);
+ int (*handler)(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf);
} hif_handlers[] = {
/* Confirmations */
{ HIF_CNF_ID_TX, hif_tx_confirm },
@@ -335,7 +343,7 @@ static const struct {
void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
{
int i;
- struct hif_msg *hif = (struct hif_msg *) skb->data;
+ const struct hif_msg *hif = (const struct hif_msg *)skb->data;
int hif_id = hif->id;
if (hif_id == HIF_IND_ID_RX) {
@@ -358,7 +366,12 @@ void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
goto free;
}
}
- dev_err(wdev->dev, "unsupported HIF ID %02x\n", hif_id);
+ if (hif_id & 0x80)
+ dev_err(wdev->dev, "unsupported HIF indication: ID %02x\n",
+ hif_id);
+ else
+ dev_err(wdev->dev, "unexpected HIF confirmation: ID %02x\n",
+ hif_id);
free:
dev_kfree_skb(skb);
}
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index cb7cddcb9815..2428363371fa 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -6,7 +6,6 @@
* Copyright (c) 2017-2019, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include "hif_tx.h"
@@ -221,41 +220,59 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
return ret;
}
-int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg)
+int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
+ int chan_start_idx, int chan_num)
{
int ret, i;
struct hif_msg *hif;
- struct hif_ssid_def *ssids;
- size_t buf_len = sizeof(struct hif_req_start_scan) +
- arg->scan_req.num_of_channels * sizeof(u8) +
- arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
- struct hif_req_start_scan *body = wfx_alloc_hif(buf_len, &hif);
- u8 *ptr = (u8 *) body + sizeof(*body);
-
- WARN(arg->scan_req.num_of_channels > HIF_API_MAX_NB_CHANNELS, "invalid params");
- WARN(arg->scan_req.num_of_ssi_ds > 2, "invalid params");
- WARN(arg->scan_req.band > 1, "invalid params");
-
- // FIXME: This API is unnecessary complex, fixing NumOfChannels and
- // adding a member SsidDef at end of struct hif_req_start_scan would
- // simplify that a lot.
- memcpy(body, &arg->scan_req, sizeof(*body));
- cpu_to_le32s(&body->min_channel_time);
- cpu_to_le32s(&body->max_channel_time);
- cpu_to_le32s(&body->tx_power_level);
- memcpy(ptr, arg->ssids,
- arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def));
- ssids = (struct hif_ssid_def *) ptr;
- for (i = 0; i < body->num_of_ssi_ds; ++i)
- cpu_to_le32s(&ssids[i].ssid_length);
- ptr += arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
- memcpy(ptr, arg->ch, arg->scan_req.num_of_channels * sizeof(u8));
- ptr += arg->scan_req.num_of_channels * sizeof(u8);
- WARN(buf_len != ptr - (u8 *) body, "allocation size mismatch");
+ size_t buf_len =
+ sizeof(struct hif_req_start_scan_alt) + chan_num * sizeof(u8);
+ struct hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif);
+ int tmo_chan_fg, tmo_chan_bg, tmo;
+
+ WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params");
+ WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params");
+
+ compiletime_assert(IEEE80211_MAX_SSID_LEN == HIF_API_SSID_SIZE,
+ "API inconsistency");
+ for (i = 0; i < req->n_ssids; i++) {
+ memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid,
+ IEEE80211_MAX_SSID_LEN);
+ body->ssid_def[i].ssid_length =
+ cpu_to_le32(req->ssids[i].ssid_len);
+ }
+ body->num_of_ssids = HIF_API_MAX_NB_SSIDS;
+ // Background scan is always a good idea
+ body->scan_type.type = 1;
+ body->scan_flags.fbg = 1;
+ body->tx_power_level =
+ cpu_to_le32(req->channels[chan_start_idx]->max_power);
+ body->num_of_channels = chan_num;
+ for (i = 0; i < chan_num; i++)
+ body->channel_list[i] =
+ req->channels[i + chan_start_idx]->hw_value;
+ if (req->no_cck)
+ body->max_transmit_rate = API_RATE_INDEX_G_6MBPS;
+ else
+ body->max_transmit_rate = API_RATE_INDEX_B_1MBPS;
+ if (req->channels[chan_start_idx]->flags & IEEE80211_CHAN_NO_IR) {
+ body->min_channel_time = cpu_to_le32(50);
+ body->max_channel_time = cpu_to_le32(150);
+ } else {
+ body->min_channel_time = cpu_to_le32(10);
+ body->max_channel_time = cpu_to_le32(50);
+ body->num_of_probe_requests = 2;
+ body->probe_delay = 100;
+ }
+ tmo_chan_bg = le32_to_cpu(body->max_channel_time) * USEC_PER_TU;
+ tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
+ tmo_chan_fg *= body->num_of_probe_requests;
+ tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg);
+
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
- return ret;
+ return ret ? ret : usecs_to_jiffies(tmo);
}
int hif_stop_scan(struct wfx_vif *wvif)
@@ -271,18 +288,29 @@ int hif_stop_scan(struct wfx_vif *wvif)
return ret;
}
-int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg)
+int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel, const u8 *ssidie)
{
int ret;
struct hif_msg *hif;
struct hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
- memcpy(body, arg, sizeof(struct hif_req_join));
- cpu_to_le16s(&body->channel_number);
- cpu_to_le16s(&body->atim_window);
- cpu_to_le32s(&body->ssid_length);
- cpu_to_le32s(&body->beacon_interval);
- cpu_to_le32s(&body->basic_rate_set);
+ WARN_ON(!conf->basic_rates);
+ body->infrastructure_bss_mode = !conf->ibss_joined;
+ body->short_preamble = conf->use_short_preamble;
+ if (channel && channel->flags & IEEE80211_CHAN_NO_IR)
+ body->probe_for_join = 0;
+ else
+ body->probe_for_join = 1;
+ body->channel_number = cpu_to_le16(channel->hw_value);
+ body->beacon_interval = cpu_to_le32(conf->beacon_int);
+ body->basic_rate_set =
+ cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
+ memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
+ if (!conf->ibss_joined && ssidie) {
+ body->ssid_length = cpu_to_le32(ssidie[1]);
+ memcpy(body->ssid, &ssidie[2], ssidie[1]);
+ }
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
@@ -341,19 +369,28 @@ int hif_remove_key(struct wfx_dev *wdev, int idx)
return ret;
}
-int hif_set_edca_queue_params(struct wfx_vif *wvif,
- const struct hif_req_edca_queue_params *arg)
+int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
+ const struct ieee80211_tx_queue_params *arg)
{
int ret;
struct hif_msg *hif;
struct hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body),
&hif);
- // NOTE: queues numerotation are not the same between WFx and Linux
- memcpy(body, arg, sizeof(*body));
- cpu_to_le16s(&body->cw_min);
- cpu_to_le16s(&body->cw_max);
- cpu_to_le16s(&body->tx_op_limit);
+ if (!body)
+ return -ENOMEM;
+
+ WARN_ON(arg->aifs > 255);
+ body->aifsn = arg->aifs;
+ body->cw_min = cpu_to_le16(arg->cw_min);
+ body->cw_max = cpu_to_le16(arg->cw_max);
+ body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP);
+ body->queue_id = 3 - queue;
+ // API 2.0 has changed queue IDs values
+ if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE)
+ body->queue_id = HIF_QUEUE_ID_BACKGROUND;
+ if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK)
+ body->queue_id = HIF_QUEUE_ID_BESTEFFORT;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS,
sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -361,43 +398,57 @@ int hif_set_edca_queue_params(struct wfx_vif *wvif,
return ret;
}
-int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
{
int ret;
struct hif_msg *hif;
struct hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif);
- memcpy(body, arg, sizeof(*body));
+ if (!body)
+ return -ENOMEM;
+
+ if (ps) {
+ body->pm_mode.enter_psm = 1;
+ // Firmware does not support more than 128ms
+ body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255);
+ if (body->fast_psm_idle_period)
+ body->pm_mode.fast_psm = 1;
+ }
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
-int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg)
+int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel)
{
int ret;
struct hif_msg *hif;
struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
- memcpy(body, arg, sizeof(*body));
- cpu_to_le16s(&body->channel_number);
- cpu_to_le32s(&body->beacon_interval);
- cpu_to_le32s(&body->basic_rate_set);
+ body->dtim_period = conf->dtim_period,
+ body->short_preamble = conf->use_short_preamble,
+ body->channel_number = cpu_to_le16(channel->hw_value),
+ body->beacon_interval = cpu_to_le32(conf->beacon_int);
+ body->basic_rate_set =
+ cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
+ body->ssid_length = conf->ssid_len;
+ memcpy(body->ssid, conf->ssid, conf->ssid_len);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
-int hif_beacon_transmit(struct wfx_vif *wvif, bool enable_beaconing)
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable)
{
int ret;
struct hif_msg *hif;
struct hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body),
&hif);
- body->enable_beaconing = enable_beaconing ? 1 : 0;
+ body->enable_beaconing = enable ? 1 : 0;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT,
sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -421,16 +472,15 @@ int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
return ret;
}
-int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
- const u8 *ies, size_t ies_len)
+int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
{
int ret;
struct hif_msg *hif;
int buf_len = sizeof(struct hif_req_update_ie) + ies_len;
struct hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
- memcpy(&body->ie_flags, target_frame, sizeof(struct hif_ie_flags));
- body->num_i_es = cpu_to_le16(1);
+ body->ie_flags.beacon = 1;
+ body->num_ies = cpu_to_le16(1);
memcpy(body->ie, ies, ies_len);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
index f61ae7b0d41c..20977e461718 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/staging/wfx/hif_tx.h
@@ -12,15 +12,13 @@
#include "hif_api_cmd.h"
+struct ieee80211_channel;
+struct ieee80211_bss_conf;
+struct ieee80211_tx_queue_params;
+struct cfg80211_scan_request;
struct wfx_dev;
struct wfx_vif;
-struct wfx_scan_params {
- struct hif_req_start_scan scan_req;
- struct hif_ssid_def *ssids;
- u8 *ch;
-};
-
struct wfx_hif_cmd {
struct mutex lock;
struct mutex key_renew_lock;
@@ -44,21 +42,23 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
void *buf, size_t buf_size);
int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
void *buf, size_t buf_size);
-int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg);
+int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
+ int chan_start, int chan_num);
int hif_stop_scan(struct wfx_vif *wvif);
-int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg);
-int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel, const u8 *ssidie);
+int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout);
int hif_set_bss_params(struct wfx_vif *wvif,
const struct hif_req_set_bss_params *arg);
int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg);
int hif_remove_key(struct wfx_dev *wdev, int idx);
-int hif_set_edca_queue_params(struct wfx_vif *wvif,
- const struct hif_req_edca_queue_params *arg);
-int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg);
+int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
+ const struct ieee80211_tx_queue_params *arg);
+int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel);
int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
-int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
- const u8 *ies, size_t ies_len);
+int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len);
int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
int destination);
int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
index 9be74881c56c..bf3769c2a9b6 100644
--- a/drivers/staging/wfx/hif_tx_mib.h
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -15,13 +15,15 @@
#include "hif_tx.h"
#include "hif_api_mib.h"
-static inline int hif_set_output_power(struct wfx_vif *wvif, int power_level)
+static inline int hif_set_output_power(struct wfx_vif *wvif, int val)
{
- __le32 val = cpu_to_le32(power_level);
+ struct hif_mib_current_tx_power_level arg = {
+ .power_level = cpu_to_le32(val * 10),
+ };
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
- &val, sizeof(val));
+ &arg, sizeof(arg));
}
static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
@@ -42,10 +44,25 @@ static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
}
static inline int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
- struct hif_mib_rcpi_rssi_threshold *arg)
+ int rssi_thold, int rssi_hyst)
{
+ struct hif_mib_rcpi_rssi_threshold arg = {
+ .rolling_average_count = 8,
+ .detection = 1,
+ };
+
+ if (!rssi_thold && !rssi_hyst) {
+ arg.upperthresh = 1;
+ arg.lowerthresh = 1;
+ } else {
+ arg.upper_threshold = rssi_thold + rssi_hyst;
+ arg.upper_threshold = (arg.upper_threshold + 110) * 2;
+ arg.lower_threshold = rssi_thold;
+ arg.lower_threshold = (arg.lower_threshold + 110) * 2;
+ }
+
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_RCPI_RSSI_THRESHOLD, arg, sizeof(*arg));
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD, &arg, sizeof(arg));
}
static inline int hif_get_counters_table(struct wfx_dev *wdev,
@@ -130,8 +147,17 @@ static inline int hif_set_operational_mode(struct wfx_dev *wdev,
}
static inline int hif_set_template_frame(struct wfx_vif *wvif,
- struct hif_mib_template_frame *arg)
+ struct sk_buff *skb,
+ u8 frame_type, int init_rate)
{
+ struct hif_mib_template_frame *arg;
+
+ skb_push(skb, 4);
+ arg = (struct hif_mib_template_frame *)skb->data;
+ skb_pull(skb, 4);
+ arg->init_rate = init_rate;
+ arg->frame_type = frame_type;
+ arg->frame_length = cpu_to_le16(skb->len);
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
arg, sizeof(*arg));
}
@@ -165,50 +191,105 @@ static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
}
static inline int hif_set_association_mode(struct wfx_vif *wvif,
- struct hif_mib_set_association_mode *arg)
+ struct ieee80211_bss_conf *info,
+ struct ieee80211_sta_ht_cap *ht_cap)
{
+ int basic_rates = wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates);
+ struct hif_mib_set_association_mode val = {
+ .preambtype_use = 1,
+ .mode = 1,
+ .rateset = 1,
+ .spacing = 1,
+ .short_preamble = info->use_short_preamble,
+ .basic_rate_set = cpu_to_le32(basic_rates)
+ };
+
+ // FIXME: it is strange to not retrieve all information from bss_info
+ if (ht_cap && ht_cap->ht_supported) {
+ val.mpdu_start_spacing = ht_cap->ampdu_density;
+ if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
+ val.greenfield = !!(ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD);
+ }
+
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_ASSOCIATION_MODE, arg, sizeof(*arg));
+ HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
}
static inline int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
- struct hif_mib_set_tx_rate_retry_policy *arg)
+ int policy_index, uint8_t *rates)
{
- size_t size = struct_size(arg, tx_rate_retry_policy,
- arg->num_tx_rate_policies);
+ struct hif_mib_set_tx_rate_retry_policy *arg;
+ size_t size = struct_size(arg, tx_rate_retry_policy, 1);
+ int ret;
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+ arg = kzalloc(size, GFP_KERNEL);
+ arg->num_tx_rate_policies = 1;
+ arg->tx_rate_retry_policy[0].policy_index = policy_index;
+ arg->tx_rate_retry_policy[0].short_retry_count = 255;
+ arg->tx_rate_retry_policy[0].long_retry_count = 255;
+ arg->tx_rate_retry_policy[0].first_rate_sel = 1;
+ arg->tx_rate_retry_policy[0].terminate = 1;
+ arg->tx_rate_retry_policy[0].count_init = 1;
+ memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
+ sizeof(arg->tx_rate_retry_policy[0].rates));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+ kfree(arg);
+ return ret;
}
static inline int hif_set_mac_addr_condition(struct wfx_vif *wvif,
- struct hif_mib_mac_addr_data_frame_condition *arg)
+ int idx, const u8 *mac_addr)
{
+ struct hif_mib_mac_addr_data_frame_condition val = {
+ .condition_idx = idx,
+ .address_type = HIF_MAC_ADDR_A1,
+ };
+
+ ether_addr_copy(val.mac_address, mac_addr);
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
- arg, sizeof(*arg));
+ &val, sizeof(val));
}
static inline int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
- struct hif_mib_uc_mc_bc_data_frame_condition *arg)
+ int idx, u8 allowed_frames)
{
+ struct hif_mib_uc_mc_bc_data_frame_condition val = {
+ .condition_idx = idx,
+ .allowed_frames = allowed_frames,
+ };
+
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
- arg, sizeof(*arg));
+ &val, sizeof(val));
}
-static inline int hif_set_config_data_filter(struct wfx_vif *wvif,
- struct hif_mib_config_data_filter *arg)
+static inline int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable,
+ int idx, int mac_filters,
+ int frames_types_filters)
{
+ struct hif_mib_config_data_filter val = {
+ .enable = enable,
+ .filter_idx = idx,
+ .mac_cond = mac_filters,
+ .uc_mc_bc_cond = frames_types_filters,
+ };
+
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_CONFIG_DATA_FILTER, arg, sizeof(*arg));
+ HIF_MIB_ID_CONFIG_DATA_FILTER, &val, sizeof(val));
}
static inline int hif_set_data_filtering(struct wfx_vif *wvif,
- struct hif_mib_set_data_filtering *arg)
+ bool enable, bool invert)
{
+ struct hif_mib_set_data_filtering val = {
+ .enable = enable,
+ .invert_matching = invert,
+ };
+
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_DATA_FILTERING, arg, sizeof(*arg));
+ HIF_MIB_ID_SET_DATA_FILTERING, &val, sizeof(val));
}
static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
@@ -221,34 +302,56 @@ static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
&arg, sizeof(arg));
};
-static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif,
- struct hif_mib_arp_ip_addr_table *fp)
+static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx,
+ __be32 *addr)
{
+ struct hif_mib_arp_ip_addr_table arg = {
+ .condition_idx = idx,
+ .arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
+ };
+
+ if (addr) {
+ // Caution: type of addr is __be32
+ memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
+ arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+ }
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
- fp, sizeof(*fp));
+ &arg, sizeof(arg));
}
-static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev,
- bool enabled)
+static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
{
- __le32 arg = enabled ? cpu_to_le32(1) : 0;
+ struct hif_mib_gl_set_multi_msg arg = {
+ .enable_multi_tx_conf = enable,
+ };
return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
&arg, sizeof(arg));
}
-static inline int hif_set_uapsd_info(struct wfx_vif *wvif,
- struct hif_mib_set_uapsd_information *arg)
+static inline int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
{
+ struct hif_mib_set_uapsd_information arg = { };
+
+ if (val & BIT(IEEE80211_AC_VO))
+ arg.trig_voice = 1;
+ if (val & BIT(IEEE80211_AC_VI))
+ arg.trig_video = 1;
+ if (val & BIT(IEEE80211_AC_BE))
+ arg.trig_be = 1;
+ if (val & BIT(IEEE80211_AC_BK))
+ arg.trig_bckgrnd = 1;
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_SET_UAPSD_INFORMATION,
- arg, sizeof(*arg));
+ &arg, sizeof(arg));
}
static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
{
- __le32 arg = enable ? cpu_to_le32(1) : 0;
+ struct hif_mib_non_erp_protection arg = {
+ .use_cts_to_self = enable,
+ };
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
@@ -256,16 +359,18 @@ static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
static inline int hif_slot_time(struct wfx_vif *wvif, int val)
{
- __le32 arg = cpu_to_le32(val);
+ struct hif_mib_slot_time arg = {
+ .slot_time = cpu_to_le32(val),
+ };
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
&arg, sizeof(arg));
}
-static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
+static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool enable)
{
struct hif_mib_set_ht_protection arg = {
- .dual_cts_prot = val,
+ .dual_cts_prot = enable,
};
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_HT_PROTECTION,
@@ -274,7 +379,9 @@ static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
{
- __le32 arg = cpu_to_le32(val);
+ struct hif_mib_wep_default_key_id arg = {
+ .wep_default_key_id = val,
+ };
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
@@ -283,7 +390,9 @@ static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
static inline int hif_rts_threshold(struct wfx_vif *wvif, int val)
{
- __le32 arg = cpu_to_le32(val > 0 ? val : 0xFFFF);
+ struct hif_mib_dot11_rts_threshold arg = {
+ .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
+ };
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
index b2c1a66de963..4b6ef061b40b 100644
--- a/drivers/staging/wfx/hwio.h
+++ b/drivers/staging/wfx/hwio.h
@@ -37,16 +37,11 @@ int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040
#define CFG_ERR_HOST_CRC_MISS 0x00000080 // only with SDIO
#define CFG_SPI_IGNORE_CS 0x00000080 // only with SPI
-/* Bytes ordering (only writable in SPI): */
-#define CFG_WORD_MODE_MASK 0x00000300
-/*
- * B1,B0,B3,B2 (In SPI, register address and
- * CONFIG data always use this mode)
- */
-#define CFG_WORD_MODE0 0x00000000
-#define CFG_WORD_MODE1 0x00000100 // B3,B2,B1,B0
-#define CFG_WORD_MODE2 0x00000200 // B0,B1,B2,B3 (SDIO)
-#define CFG_DIRECT_ACCESS_MODE 0x00000400 // Direct or queue access mode
+#define CFG_BYTE_ORDER_MASK 0x00000300 // only writable with SPI
+#define CFG_BYTE_ORDER_BADC 0x00000000
+#define CFG_BYTE_ORDER_DCBA 0x00000100
+#define CFG_BYTE_ORDER_ABCD 0x00000200 // SDIO always use this value
+#define CFG_DIRECT_ACCESS_MODE 0x00000400
#define CFG_PREFETCH_AHB 0x00000800
#define CFG_DISABLE_CPU_CLK 0x00001000
#define CFG_PREFETCH_SRAM 0x00002000
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 3b47b6c21ea1..84adad64fc30 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -131,10 +131,11 @@ static const struct ieee80211_ops wfx_ops = {
.stop = wfx_stop,
.add_interface = wfx_add_interface,
.remove_interface = wfx_remove_interface,
- .config = wfx_config,
+ .config = wfx_config,
.tx = wfx_tx,
.conf_tx = wfx_conf_tx,
.hw_scan = wfx_hw_scan,
+ .cancel_hw_scan = wfx_cancel_hw_scan,
.sta_add = wfx_sta_add,
.sta_remove = wfx_sta_remove,
.sta_notify = wfx_sta_notify,
@@ -182,7 +183,7 @@ struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
} else {
ret = devm_gpiod_get(dev, label, GPIOD_OUT_LOW);
}
- if (IS_ERR(ret) || !ret) {
+ if (IS_ERR_OR_NULL(ret)) {
if (!ret || PTR_ERR(ret) == -ENOENT)
dev_warn(dev, "gpio %s is not defined\n", label);
else
@@ -297,6 +298,11 @@ struct wfx_dev *wfx_init_common(struct device *dev,
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+ hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->max_ap_assoc_sta = WFX_MAX_STA_IN_AP_MODE;
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 680fed31cefb..0bcc61feee1d 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -31,8 +31,6 @@ void wfx_tx_flush(struct wfx_dev *wdev)
{
int ret;
- WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
-
// Do not wait for any reply if chip is frozen
if (wdev->chip_frozen)
return;
@@ -177,11 +175,9 @@ void wfx_tx_queues_deinit(struct wfx_dev *wdev)
wfx_tx_queues_clear(wdev);
}
-size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
- u32 link_id_map)
+int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map)
{
- size_t ret;
- int i, bit;
+ int ret, i;
if (!link_id_map)
return 0;
@@ -191,11 +187,9 @@ size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
ret = skb_queue_len(&queue->queue);
} else {
ret = 0;
- for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
- ++i, bit <<= 1) {
- if (link_id_map & bit)
+ for (i = 0; i < ARRAY_SIZE(queue->link_map_cache); i++)
+ if (link_id_map & BIT(i))
ret += queue->link_map_cache[i];
- }
}
spin_unlock_bh(&queue->queue.lock);
return ret;
@@ -237,7 +231,6 @@ static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
break;
}
}
- WARN_ON(!skb);
if (skb) {
tx_priv = wfx_skb_tx_priv(skb);
tx_priv->xmit_timestamp = ktime_get();
@@ -362,80 +355,38 @@ bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
struct wfx_queue *queue)
{
- bool handled = false;
- struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
struct hif_req_tx *req = wfx_skb_txreq(skb);
- struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
-
- enum {
- do_probe,
- do_drop,
- do_wep,
- do_tx,
- } action = do_tx;
-
- switch (wvif->vif->type) {
- case NL80211_IFTYPE_STATION:
- if (wvif->state < WFX_STATE_PRE_STA)
- action = do_drop;
- break;
- case NL80211_IFTYPE_AP:
- if (!wvif->state) {
- action = do_drop;
- } else if (!(BIT(tx_priv->raw_link_id) &
- (BIT(0) | wvif->link_id_map))) {
- dev_warn(wvif->wdev->dev, "a frame with expired link-id is dropped\n");
- action = do_drop;
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- if (wvif->state != WFX_STATE_IBSS)
- action = do_drop;
- break;
- case NL80211_IFTYPE_MONITOR:
- default:
- action = do_drop;
- break;
- }
-
- if (action == do_tx) {
- if (ieee80211_is_nullfunc(frame->frame_control)) {
- mutex_lock(&wvif->bss_loss_lock);
- if (wvif->bss_loss_state) {
- wvif->bss_loss_confirm_id = req->packet_id;
- req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
- }
- mutex_unlock(&wvif->bss_loss_lock);
- } else if (ieee80211_has_protected(frame->frame_control) &&
- tx_priv->hw_key &&
- tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
- (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
- action = do_wep;
+ struct ieee80211_key_conf *hw_key = wfx_skb_tx_priv(skb)->hw_key;
+ struct ieee80211_hdr *frame =
+ (struct ieee80211_hdr *)(req->frame + req->data_flags.fc_offset);
+
+ // FIXME: mac80211 is smart enough to handle BSS loss. Driver should not
+ // try to do anything about that.
+ if (ieee80211_is_nullfunc(frame->frame_control)) {
+ mutex_lock(&wvif->bss_loss_lock);
+ if (wvif->bss_loss_state) {
+ wvif->bss_loss_confirm_id = req->packet_id;
+ req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
}
+ mutex_unlock(&wvif->bss_loss_lock);
}
- switch (action) {
- case do_drop:
- wfx_pending_remove(wvif->wdev, skb);
- handled = true;
- break;
- case do_wep:
+ // FIXME: identify the exact scenario matched by this condition. Does it
+ // happen yet?
+ if (ieee80211_has_protected(frame->frame_control) &&
+ hw_key && hw_key->keyidx != wvif->wep_default_key_id &&
+ (hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
wfx_tx_lock(wvif->wdev);
WARN_ON(wvif->wep_pending_skb);
- wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
+ wvif->wep_default_key_id = hw_key->keyidx;
wvif->wep_pending_skb = skb;
if (!schedule_work(&wvif->wep_key_work))
wfx_tx_unlock(wvif->wdev);
- handled = true;
- break;
- case do_tx:
- break;
- default:
- /* Do nothing */
- break;
+ return true;
+ } else {
+ return false;
}
- return handled;
}
static int wfx_get_prio_queue(struct wfx_vif *wvif,
@@ -443,7 +394,7 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
{
static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
BIT(WFX_LINK_ID_UAPSD);
- struct hif_req_edca_queue_params *edca;
+ const struct ieee80211_tx_queue_params *edca;
unsigned int score, best = -1;
int winner = -1;
int i;
@@ -452,13 +403,13 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
int queued;
- edca = &wvif->edca.params[i];
+ edca = &wvif->edca_params[i];
queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
tx_allowed_mask);
if (!queued)
continue;
*total += queued;
- score = ((edca->aifsn + edca->cw_min) << 16) +
+ score = ((edca->aifs + edca->cw_min) << 16) +
((edca->cw_max - edca->cw_min) *
(get_random_int() & 0xFFFF));
if (score < best && (winner < 0 || i != 3)) {
@@ -480,94 +431,100 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
struct wfx_queue **queue_p,
- u32 *tx_allowed_mask_p,
- bool *more)
+ u32 *tx_allowed_mask_p)
{
int idx;
u32 tx_allowed_mask;
int total = 0;
- /* Search for a queue with multicast frames buffered */
- if (wvif->mcast_tx) {
- tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
- idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
- if (idx >= 0) {
- *more = total > 1;
- goto found;
- }
- }
-
/* Search for unicast traffic */
tx_allowed_mask = ~wvif->sta_asleep_mask;
tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
- if (wvif->sta_asleep_mask) {
- tx_allowed_mask |= wvif->pspoll_mask;
+ if (wvif->sta_asleep_mask)
tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
- } else {
+ else
tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
- }
idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
if (idx < 0)
return -ENOENT;
-found:
*queue_p = &wvif->wdev->tx_queue[idx];
*tx_allowed_mask_p = tx_allowed_mask;
return 0;
}
+struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif)
+{
+ struct wfx_dev *wdev = wvif->wdev;
+ struct ieee80211_tx_info *tx_info;
+ struct hif_msg *hif;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ skb_queue_walk(&wdev->tx_queue[i].queue, skb) {
+ tx_info = IEEE80211_SKB_CB(skb);
+ hif = (struct hif_msg *)skb->data;
+ if ((tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) &&
+ (hif->interface == wvif->id))
+ return (struct hif_msg *)skb->data;
+ }
+ }
+ return NULL;
+}
+
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
{
struct sk_buff *skb;
struct hif_msg *hif = NULL;
- struct hif_req_tx *req = NULL;
struct wfx_queue *queue = NULL;
struct wfx_queue *vif_queue = NULL;
u32 tx_allowed_mask = 0;
u32 vif_tx_allowed_mask = 0;
const struct wfx_tx_priv *tx_priv = NULL;
struct wfx_vif *wvif;
- /* More is used only for broadcasts. */
- bool more = false;
- bool vif_more = false;
int not_found;
int burst;
+ int i;
+
+ if (atomic_read(&wdev->tx_lock))
+ return NULL;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ if (wvif->after_dtim_tx_allowed) {
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ skb = wfx_tx_queue_get(wvif->wdev,
+ &wdev->tx_queue[i],
+ BIT(WFX_LINK_ID_AFTER_DTIM));
+ if (skb) {
+ hif = (struct hif_msg *)skb->data;
+ // Cannot happen since only one vif can
+ // be AP at time
+ WARN_ON(wvif->id != hif->interface);
+ return hif;
+ }
+ }
+ // No more multicast to sent
+ wvif->after_dtim_tx_allowed = false;
+ schedule_work(&wvif->update_tim_work);
+ }
+ }
for (;;) {
int ret = -ENOENT;
int queue_num;
- struct ieee80211_hdr *hdr;
-
- if (atomic_read(&wdev->tx_lock))
- return NULL;
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
spin_lock_bh(&wvif->ps_state_lock);
not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
- &vif_tx_allowed_mask,
- &vif_more);
-
- if (wvif->mcast_buffered && (not_found || !vif_more) &&
- (wvif->mcast_tx ||
- !wvif->sta_asleep_mask)) {
- wvif->mcast_buffered = false;
- if (wvif->mcast_tx) {
- wvif->mcast_tx = false;
- schedule_work(&wvif->mcast_stop_work);
- }
- }
+ &vif_tx_allowed_mask);
spin_unlock_bh(&wvif->ps_state_lock);
- if (vif_more) {
- more = true;
- tx_allowed_mask = vif_tx_allowed_mask;
- queue = vif_queue;
- ret = 0;
- break;
- } else if (!not_found) {
+ if (!not_found) {
if (queue && queue != vif_queue)
dev_info(wdev->dev, "vifs disagree about queue priority\n");
tx_allowed_mask |= vif_tx_allowed_mask;
@@ -592,11 +549,9 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
if (hif_handle_tx_data(wvif, skb, queue))
continue; /* Handled by WSM */
- wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
-
/* allow bursting if txop is set */
- if (wvif->edca.params[queue_num].tx_op_limit)
- burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
+ if (wvif->edca_params[queue_num].txop)
+ burst = wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
else
burst = 1;
@@ -606,15 +561,6 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
else
wdev->tx_burst_idx = -1;
- /* more buffered multicast/broadcast frames
- * ==> set MoreData flag in IEEE 802.11 header
- * to inform PS STAs
- */
- if (more) {
- req = (struct hif_req_tx *) hif->body;
- hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- }
return hif;
}
}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
index 21566e48b2c2..90bb060d1204 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/staging/wfx/queue.h
@@ -13,9 +13,10 @@
#include "hif_api_cmd.h"
#define WFX_MAX_STA_IN_AP_MODE 14
-#define WFX_LINK_ID_AFTER_DTIM (WFX_MAX_STA_IN_AP_MODE + 1)
-#define WFX_LINK_ID_UAPSD (WFX_MAX_STA_IN_AP_MODE + 2)
-#define WFX_LINK_ID_MAX (WFX_MAX_STA_IN_AP_MODE + 3)
+#define WFX_LINK_ID_NO_ASSOC 15
+#define WFX_LINK_ID_AFTER_DTIM (WFX_LINK_ID_NO_ASSOC + 1)
+#define WFX_LINK_ID_UAPSD (WFX_LINK_ID_NO_ASSOC + 2)
+#define WFX_LINK_ID_MAX (WFX_LINK_ID_NO_ASSOC + 3)
struct wfx_dev;
struct wfx_vif;
@@ -46,10 +47,11 @@ void wfx_tx_queues_clear(struct wfx_dev *wdev);
bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
+struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif);
void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
struct sk_buff *skb);
-size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index 35fcf9119f96..6e1e50048651 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -16,279 +16,118 @@ static void __ieee80211_scan_completed_compat(struct ieee80211_hw *hw,
bool aborted)
{
struct cfg80211_scan_info info = {
- .aborted = aborted ? 1 : 0,
+ .aborted = aborted,
};
ieee80211_scan_completed(hw, &info);
}
-static void wfx_scan_restart_delayed(struct wfx_vif *wvif)
+static int update_probe_tmpl(struct wfx_vif *wvif,
+ struct cfg80211_scan_request *req)
{
- if (wvif->delayed_unjoin) {
- wvif->delayed_unjoin = false;
- if (!schedule_work(&wvif->unjoin_work))
- wfx_tx_unlock(wvif->wdev);
- } else if (wvif->delayed_link_loss) {
- wvif->delayed_link_loss = 0;
- wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
- }
-}
-
-static int wfx_scan_start(struct wfx_vif *wvif, struct wfx_scan_params *scan)
-{
- int ret;
- int tmo = 500;
-
- if (wvif->state == WFX_STATE_PRE_STA)
- return -EBUSY;
-
- tmo += scan->scan_req.num_of_channels *
- ((20 * (scan->scan_req.max_channel_time)) + 10);
- atomic_set(&wvif->scan.in_progress, 1);
- atomic_set(&wvif->wdev->scan_in_progress, 1);
-
- schedule_delayed_work(&wvif->scan.timeout, msecs_to_jiffies(tmo));
- ret = hif_scan(wvif, scan);
- if (ret) {
- wfx_scan_failed_cb(wvif);
- atomic_set(&wvif->scan.in_progress, 0);
- atomic_set(&wvif->wdev->scan_in_progress, 0);
- cancel_delayed_work_sync(&wvif->scan.timeout);
- wfx_scan_restart_delayed(wvif);
- }
- return ret;
-}
-
-int wfx_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *hw_req)
-{
- struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- struct cfg80211_scan_request *req = &hw_req->req;
struct sk_buff *skb;
- int i, ret;
- struct hif_mib_template_frame *p;
-
- if (!wvif)
- return -EINVAL;
-
- if (wvif->state == WFX_STATE_AP)
- return -EOPNOTSUPP;
-
- if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
- req->n_ssids = 0;
- if (req->n_ssids > HIF_API_MAX_NB_SSIDS)
- return -EINVAL;
-
- skb = ieee80211_probereq_get(hw, wvif->vif->addr, NULL, 0, req->ie_len);
+ skb = ieee80211_probereq_get(wvif->wdev->hw, wvif->vif->addr,
+ NULL, 0, req->ie_len);
if (!skb)
return -ENOMEM;
- if (req->ie_len)
- memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
-
- mutex_lock(&wdev->conf_mutex);
-
- p = (struct hif_mib_template_frame *)skb_push(skb, 4);
- p->frame_type = HIF_TMPLT_PRBREQ;
- p->frame_length = cpu_to_le16(skb->len - 4);
- ret = hif_set_template_frame(wvif, p);
- skb_pull(skb, 4);
-
- if (!ret)
- /* Host want to be the probe responder. */
- ret = wfx_fwd_probe_req(wvif, true);
- if (ret) {
- mutex_unlock(&wdev->conf_mutex);
- dev_kfree_skb(skb);
- return ret;
- }
-
- wfx_tx_lock_flush(wdev);
-
- WARN(wvif->scan.req, "unexpected concurrent scan");
- wvif->scan.req = req;
- wvif->scan.n_ssids = 0;
- wvif->scan.status = 0;
- wvif->scan.begin = &req->channels[0];
- wvif->scan.curr = wvif->scan.begin;
- wvif->scan.end = &req->channels[req->n_channels];
- wvif->scan.output_power = wdev->output_power;
-
- for (i = 0; i < req->n_ssids; ++i) {
- struct hif_ssid_def *dst = &wvif->scan.ssids[wvif->scan.n_ssids];
-
- memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
- dst->ssid_length = req->ssids[i].ssid_len;
- ++wvif->scan.n_ssids;
- }
-
- mutex_unlock(&wdev->conf_mutex);
-
- if (skb)
- dev_kfree_skb(skb);
- schedule_work(&wvif->scan.work);
+ skb_put_data(skb, req->ie, req->ie_len);
+ hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBREQ, 0);
+ dev_kfree_skb(skb);
return 0;
}
-void wfx_scan_work(struct work_struct *work)
+static int send_scan_req(struct wfx_vif *wvif,
+ struct cfg80211_scan_request *req, int start_idx)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan.work);
- struct ieee80211_channel **it;
- struct wfx_scan_params scan = {
- .scan_req.scan_type.type = 0, /* Foreground */
- };
- struct ieee80211_channel *first;
- bool first_run = (wvif->scan.begin == wvif->scan.curr &&
- wvif->scan.begin != wvif->scan.end);
- int i;
-
- down(&wvif->scan.lock);
- mutex_lock(&wvif->wdev->conf_mutex);
-
- if (first_run) {
- if (wvif->state == WFX_STATE_STA &&
- !(wvif->powersave_mode.pm_mode.enter_psm)) {
- struct hif_req_set_pm_mode pm = wvif->powersave_mode;
-
- pm.pm_mode.enter_psm = 1;
- wfx_set_pm(wvif, &pm);
- }
- }
-
- if (!wvif->scan.req || wvif->scan.curr == wvif->scan.end) {
- if (wvif->scan.output_power != wvif->wdev->output_power)
- hif_set_output_power(wvif,
- wvif->wdev->output_power * 10);
-
- if (wvif->scan.status < 0)
- dev_warn(wvif->wdev->dev, "scan failed\n");
- else if (wvif->scan.req)
- dev_dbg(wvif->wdev->dev, "scan completed\n");
- else
- dev_dbg(wvif->wdev->dev, "scan canceled\n");
-
- wvif->scan.req = NULL;
- wfx_scan_restart_delayed(wvif);
- wfx_tx_unlock(wvif->wdev);
- mutex_unlock(&wvif->wdev->conf_mutex);
- __ieee80211_scan_completed_compat(wvif->wdev->hw,
- wvif->scan.status ? 1 : 0);
- up(&wvif->scan.lock);
- if (wvif->state == WFX_STATE_STA &&
- !(wvif->powersave_mode.pm_mode.enter_psm))
- wfx_set_pm(wvif, &wvif->powersave_mode);
- return;
- }
- first = *wvif->scan.curr;
-
- for (it = wvif->scan.curr + 1, i = 1;
- it != wvif->scan.end && i < HIF_API_MAX_NB_CHANNELS;
- ++it, ++i) {
- if ((*it)->band != first->band)
- break;
- if (((*it)->flags ^ first->flags) &
- IEEE80211_CHAN_NO_IR)
+ int i, ret, timeout;
+ struct ieee80211_channel *ch_start, *ch_cur;
+
+ for (i = start_idx; i < req->n_channels; i++) {
+ ch_start = req->channels[start_idx];
+ ch_cur = req->channels[i];
+ WARN(ch_cur->band != NL80211_BAND_2GHZ, "band not supported");
+ if (ch_cur->max_power != ch_start->max_power)
break;
- if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
- (*it)->max_power != first->max_power)
+ if ((ch_cur->flags ^ ch_start->flags) & IEEE80211_CHAN_NO_IR)
break;
}
- scan.scan_req.band = first->band;
-
- if (wvif->scan.req->no_cck)
- scan.scan_req.max_transmit_rate = API_RATE_INDEX_G_6MBPS;
- else
- scan.scan_req.max_transmit_rate = API_RATE_INDEX_B_1MBPS;
- scan.scan_req.num_of_probe_requests =
- (first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
- scan.scan_req.num_of_ssi_ds = wvif->scan.n_ssids;
- scan.ssids = &wvif->scan.ssids[0];
- scan.scan_req.num_of_channels = it - wvif->scan.curr;
- scan.scan_req.probe_delay = 100;
- // FIXME: Check if FW can do active scan while joined.
- if (wvif->state == WFX_STATE_STA) {
- scan.scan_req.scan_type.type = 1;
- scan.scan_req.scan_flags.fbg = 1;
+ wfx_tx_lock_flush(wvif->wdev);
+ wvif->scan_abort = false;
+ reinit_completion(&wvif->scan_complete);
+ timeout = hif_scan(wvif, req, start_idx, i - start_idx);
+ if (timeout < 0)
+ return timeout;
+ ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
+ if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
+ hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
+ wfx_tx_unlock(wvif->wdev);
+ if (!ret) {
+ dev_notice(wvif->wdev->dev, "scan timeout\n");
+ hif_stop_scan(wvif);
+ return -ETIMEDOUT;
}
-
- scan.ch = kcalloc(scan.scan_req.num_of_channels,
- sizeof(u8), GFP_KERNEL);
-
- if (!scan.ch) {
- wvif->scan.status = -ENOMEM;
- goto fail;
+ if (wvif->scan_abort) {
+ dev_notice(wvif->wdev->dev, "scan abort\n");
+ return -ECONNABORTED;
}
- for (i = 0; i < scan.scan_req.num_of_channels; ++i)
- scan.ch[i] = wvif->scan.curr[i]->hw_value;
+ return i - start_idx;
+}
- if (wvif->scan.curr[0]->flags & IEEE80211_CHAN_NO_IR) {
- scan.scan_req.min_channel_time = 50;
- scan.scan_req.max_channel_time = 150;
- } else {
- scan.scan_req.min_channel_time = 10;
- scan.scan_req.max_channel_time = 50;
- }
- if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
- wvif->scan.output_power != first->max_power) {
- wvif->scan.output_power = first->max_power;
- hif_set_output_power(wvif, wvif->scan.output_power * 10);
- }
- wvif->scan.status = wfx_scan_start(wvif, &scan);
- kfree(scan.ch);
- if (wvif->scan.status)
- goto fail;
- wvif->scan.curr = it;
- mutex_unlock(&wvif->wdev->conf_mutex);
- return;
+/*
+ * It is not really necessary to run scan request asynchronously. However,
+ * there is a bug in "iw scan" when ieee80211_scan_completed() is called before
+ * wfx_hw_scan() return
+ */
+void wfx_hw_scan_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work);
+ struct ieee80211_scan_request *hw_req = wvif->scan_req;
+ int chan_cur, ret;
-fail:
- wvif->scan.curr = wvif->scan.end;
+ mutex_lock(&wvif->scan_lock);
+ mutex_lock(&wvif->wdev->conf_mutex);
+ update_probe_tmpl(wvif, &hw_req->req);
+ wfx_fwd_probe_req(wvif, true);
+ chan_cur = 0;
+ do {
+ ret = send_scan_req(wvif, &hw_req->req, chan_cur);
+ if (ret > 0)
+ chan_cur += ret;
+ } while (ret > 0 && chan_cur < hw_req->req.n_channels);
mutex_unlock(&wvif->wdev->conf_mutex);
- up(&wvif->scan.lock);
- schedule_work(&wvif->scan.work);
+ mutex_unlock(&wvif->scan_lock);
+ __ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0);
}
-static void wfx_scan_complete(struct wfx_vif *wvif)
+int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
{
- up(&wvif->scan.lock);
- atomic_set(&wvif->wdev->scan_in_progress, 0);
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
- wfx_scan_work(&wvif->scan.work);
-}
+ WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS);
-void wfx_scan_failed_cb(struct wfx_vif *wvif)
-{
- if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
- wvif->scan.status = -EIO;
- schedule_work(&wvif->scan.timeout.work);
- }
+ if (vif->type == NL80211_IFTYPE_AP)
+ return -EOPNOTSUPP;
+
+ if (wvif->state == WFX_STATE_PRE_STA)
+ return -EBUSY;
+
+ wvif->scan_req = hw_req;
+ schedule_work(&wvif->scan_work);
+ return 0;
}
-void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg)
+void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
- wvif->scan.status = 1;
- schedule_work(&wvif->scan.timeout.work);
- }
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wvif->scan_abort = true;
+ hif_stop_scan(wvif);
}
-void wfx_scan_timeout(struct work_struct *work)
+void wfx_scan_complete(struct wfx_vif *wvif)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- scan.timeout.work);
-
- if (atomic_xchg(&wvif->scan.in_progress, 0)) {
- if (wvif->scan.status > 0) {
- wvif->scan.status = 0;
- } else if (!wvif->scan.status) {
- dev_warn(wvif->wdev->dev, "timeout waiting for scan complete notification\n");
- wvif->scan.status = -ETIMEDOUT;
- wvif->scan.curr = wvif->scan.end;
- hif_stop_scan(wvif);
- }
- wfx_scan_complete(wvif);
- }
+ complete(&wvif->scan_complete);
}
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
index b4ddd0771a9b..2eb786c9572c 100644
--- a/drivers/staging/wfx/scan.h
+++ b/drivers/staging/wfx/scan.h
@@ -8,35 +8,15 @@
#ifndef WFX_SCAN_H
#define WFX_SCAN_H
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
#include <net/mac80211.h>
-#include "hif_api_cmd.h"
-
struct wfx_dev;
struct wfx_vif;
-struct wfx_scan {
- struct semaphore lock;
- struct work_struct work;
- struct delayed_work timeout;
- struct cfg80211_scan_request *req;
- struct ieee80211_channel **begin;
- struct ieee80211_channel **curr;
- struct ieee80211_channel **end;
- struct hif_ssid_def ssids[HIF_API_MAX_NB_SSIDS];
- int output_power;
- int n_ssids;
- int status;
- atomic_t in_progress;
-};
-
+void wfx_hw_scan_work(struct work_struct *work);
int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req);
-void wfx_scan_work(struct work_struct *work);
-void wfx_scan_timeout(struct work_struct *work);
-void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg);
-void wfx_scan_failed_cb(struct wfx_vif *wvif);
+void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_scan_complete(struct wfx_vif *wvif);
#endif /* WFX_SCAN_H */
diff --git a/drivers/staging/wfx/secure_link.h b/drivers/staging/wfx/secure_link.h
index 666b26e5308d..c3d055b2f8b1 100644
--- a/drivers/staging/wfx/secure_link.h
+++ b/drivers/staging/wfx/secure_link.h
@@ -25,14 +25,16 @@ static inline int wfx_sl_decode(struct wfx_dev *wdev, struct hif_sl_msg *m)
return -EIO;
}
-static inline int wfx_sl_encode(struct wfx_dev *wdev, struct hif_msg *input,
+static inline int wfx_sl_encode(struct wfx_dev *wdev,
+ const struct hif_msg *input,
struct hif_sl_msg *output)
{
return -EIO;
}
-static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev, u8 *ncp_pubkey,
- u8 *ncp_pubmac)
+static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev,
+ const u8 *ncp_pubkey,
+ const u8 *ncp_pubmac)
{
return -EIO;
}
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index 471dd15b227f..03d0f224ffdb 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -17,10 +17,9 @@
#include "hif_tx.h"
#include "hif_tx_mib.h"
-#define TXOP_UNIT 32
#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
-static u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
+u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
{
int i;
u32 ret = 0;
@@ -64,13 +63,8 @@ void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
int tx = 0;
mutex_lock(&wvif->bss_loss_lock);
- wvif->delayed_link_loss = 0;
cancel_work_sync(&wvif->bss_params_work);
- /* If we have a pending unjoin */
- if (wvif->delayed_unjoin)
- goto end;
-
if (init) {
schedule_delayed_work(&wvif->bss_loss_work, HZ);
wvif->bss_loss_state = 0;
@@ -94,62 +88,30 @@ void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
// FIXME: call ieee80211_beacon_loss/ieee80211_connection_loss instead
if (tx) {
struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_control control = { };
wvif->bss_loss_state++;
skb = ieee80211_nullfunc_get(wvif->wdev->hw, wvif->vif, false);
if (!skb)
goto end;
+ hdr = (struct ieee80211_hdr *)skb->data;
memset(IEEE80211_SKB_CB(skb), 0,
sizeof(*IEEE80211_SKB_CB(skb)));
IEEE80211_SKB_CB(skb)->control.vif = wvif->vif;
IEEE80211_SKB_CB(skb)->driver_rates[0].idx = 0;
IEEE80211_SKB_CB(skb)->driver_rates[0].count = 1;
IEEE80211_SKB_CB(skb)->driver_rates[1].idx = -1;
- wfx_tx(wvif->wdev->hw, NULL, skb);
+ rcu_read_lock(); // protect control.sta
+ control.sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ wfx_tx(wvif->wdev->hw, &control, skb);
+ rcu_read_unlock();
}
end:
mutex_unlock(&wvif->bss_loss_lock);
}
-static int wfx_set_uapsd_param(struct wfx_vif *wvif,
- const struct wfx_edca_params *arg)
-{
- /* Here's the mapping AC [queue, bit]
- * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
- */
-
- if (arg->uapsd_enable[IEEE80211_AC_VO])
- wvif->uapsd_info.trig_voice = 1;
- else
- wvif->uapsd_info.trig_voice = 0;
-
- if (arg->uapsd_enable[IEEE80211_AC_VI])
- wvif->uapsd_info.trig_video = 1;
- else
- wvif->uapsd_info.trig_video = 0;
-
- if (arg->uapsd_enable[IEEE80211_AC_BE])
- wvif->uapsd_info.trig_be = 1;
- else
- wvif->uapsd_info.trig_be = 0;
-
- if (arg->uapsd_enable[IEEE80211_AC_BK])
- wvif->uapsd_info.trig_bckgrnd = 1;
- else
- wvif->uapsd_info.trig_bckgrnd = 0;
-
- /* Currently pseudo U-APSD operation is not supported, so setting
- * MinAutoTriggerInterval, MaxAutoTriggerInterval and
- * AutoTriggerStep to 0
- */
- wvif->uapsd_info.min_auto_trigger_interval = 0;
- wvif->uapsd_info.max_auto_trigger_interval = 0;
- wvif->uapsd_info.auto_trigger_step = 0;
-
- return hif_set_uapsd_info(wvif, &wvif->uapsd_info);
-}
-
int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
{
wvif->fwd_probe_req = enable;
@@ -160,63 +122,31 @@ int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
static int wfx_set_mcast_filter(struct wfx_vif *wvif,
struct wfx_grp_addr_table *fp)
{
- int i, ret;
- struct hif_mib_config_data_filter config = { };
- struct hif_mib_set_data_filtering filter_data = { };
- struct hif_mib_mac_addr_data_frame_condition filter_addr_val = { };
- struct hif_mib_uc_mc_bc_data_frame_condition filter_addr_type = { };
+ int i;
// Temporary workaround for filters
- return hif_set_data_filtering(wvif, &filter_data);
-
- if (!fp->enable) {
- filter_data.enable = 0;
- return hif_set_data_filtering(wvif, &filter_data);
- }
-
- // A1 Address match on list
- for (i = 0; i < fp->num_addresses; i++) {
- filter_addr_val.condition_idx = i;
- filter_addr_val.address_type = HIF_MAC_ADDR_A1;
- ether_addr_copy(filter_addr_val.mac_address,
- fp->address_list[i]);
- ret = hif_set_mac_addr_condition(wvif,
- &filter_addr_val);
- if (ret)
- return ret;
- config.mac_cond |= 1 << i;
- }
-
- // Accept unicast and broadcast
- filter_addr_type.condition_idx = 0;
- filter_addr_type.param.bits.type_unicast = 1;
- filter_addr_type.param.bits.type_broadcast = 1;
- ret = hif_set_uc_mc_bc_condition(wvif, &filter_addr_type);
- if (ret)
- return ret;
+ return hif_set_data_filtering(wvif, false, true);
- config.uc_mc_bc_cond = 1;
- config.filter_idx = 0; // TODO #define MULTICAST_FILTERING 0
- config.enable = 1;
- ret = hif_set_config_data_filter(wvif, &config);
- if (ret)
- return ret;
+ if (!fp->enable)
+ return hif_set_data_filtering(wvif, false, true);
- // discard all data frames except match filter
- filter_data.enable = 1;
- filter_data.default_filter = 1; // discard all
- ret = hif_set_data_filtering(wvif, &filter_data);
+ for (i = 0; i < fp->num_addresses; i++)
+ hif_set_mac_addr_condition(wvif, i, fp->address_list[i]);
+ hif_set_uc_mc_bc_condition(wvif, 0,
+ HIF_FILTER_UNICAST | HIF_FILTER_BROADCAST);
+ hif_set_config_data_filter(wvif, true, 0, BIT(1),
+ BIT(fp->num_addresses) - 1);
+ hif_set_data_filtering(wvif, true, true);
- return ret;
+ return 0;
}
void wfx_update_filtering(struct wfx_vif *wvif)
{
int ret;
- bool is_sta = wvif->vif && NL80211_IFTYPE_STATION == wvif->vif->type;
- bool filter_bssid = wvif->filter_bssid;
- bool fwd_probe_req = wvif->fwd_probe_req;
- struct hif_mib_bcn_filter_enable bf_ctrl;
+ int bf_enable;
+ int bf_count;
+ int n_filter_ies;
struct hif_ie_table_entry filter_ies[] = {
{
.ie_id = WLAN_EID_VENDOR_SPECIFIC,
@@ -236,33 +166,29 @@ void wfx_update_filtering(struct wfx_vif *wvif)
.has_appeared = 1,
}
};
- int n_filter_ies;
if (wvif->state == WFX_STATE_PASSIVE)
return;
if (wvif->disable_beacon_filter) {
- bf_ctrl.enable = 0;
- bf_ctrl.bcn_count = 1;
+ bf_enable = 0;
+ bf_count = 1;
n_filter_ies = 0;
- } else if (!is_sta) {
- bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE |
- HIF_BEACON_FILTER_AUTO_ERP;
- bf_ctrl.bcn_count = 0;
+ } else if (wvif->vif->type != NL80211_IFTYPE_STATION) {
+ bf_enable = HIF_BEACON_FILTER_ENABLE | HIF_BEACON_FILTER_AUTO_ERP;
+ bf_count = 0;
n_filter_ies = 2;
} else {
- bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE;
- bf_ctrl.bcn_count = 0;
+ bf_enable = HIF_BEACON_FILTER_ENABLE;
+ bf_count = 0;
n_filter_ies = 3;
}
- ret = hif_set_rx_filter(wvif, filter_bssid, fwd_probe_req);
+ ret = hif_set_rx_filter(wvif, wvif->filter_bssid, wvif->fwd_probe_req);
if (!ret)
- ret = hif_set_beacon_filter_table(wvif, n_filter_ies,
- filter_ies);
+ ret = hif_set_beacon_filter_table(wvif, n_filter_ies, filter_ies);
if (!ret)
- ret = hif_beacon_filter_control(wvif, bf_ctrl.enable,
- bf_ctrl.bcn_count);
+ ret = hif_beacon_filter_control(wvif, bf_enable, bf_count);
if (!ret)
ret = wfx_set_mcast_filter(wvif, &wvif->mcast_filter);
if (ret)
@@ -316,90 +242,71 @@ void wfx_configure_filter(struct ieee80211_hw *hw,
*total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_PROBE_REQ;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- down(&wvif->scan.lock);
+ mutex_lock(&wvif->scan_lock);
wvif->filter_bssid = (*total_flags &
(FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 0 : 1;
wvif->disable_beacon_filter = !(*total_flags & FIF_PROBE_REQ);
wfx_fwd_probe_req(wvif, true);
wfx_update_filtering(wvif);
- up(&wvif->scan.lock);
+ mutex_unlock(&wvif->scan_lock);
}
}
-int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+static int wfx_update_pm(struct wfx_vif *wvif)
{
- struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- int ret = 0;
- /* To prevent re-applying PM request OID again and again*/
- u16 old_uapsd_flags, new_uapsd_flags;
- struct hif_req_edca_queue_params *edca;
-
- mutex_lock(&wdev->conf_mutex);
+ struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
+ bool ps = conf->flags & IEEE80211_CONF_PS;
+ int ps_timeout = conf->dynamic_ps_timeout;
+ struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
- if (queue < hw->queues) {
- old_uapsd_flags = *((u16 *) &wvif->uapsd_info);
- edca = &wvif->edca.params[queue];
-
- wvif->edca.uapsd_enable[queue] = params->uapsd;
- edca->aifsn = params->aifs;
- edca->cw_min = params->cw_min;
- edca->cw_max = params->cw_max;
- edca->tx_op_limit = params->txop * TXOP_UNIT;
- edca->allowed_medium_time = 0;
- ret = hif_set_edca_queue_params(wvif, edca);
- if (ret) {
- ret = -EINVAL;
- goto out;
- }
-
- if (wvif->vif->type == NL80211_IFTYPE_STATION) {
- ret = wfx_set_uapsd_param(wvif, &wvif->edca);
- new_uapsd_flags = *((u16 *) &wvif->uapsd_info);
- if (!ret && wvif->setbssparams_done &&
- wvif->state == WFX_STATE_STA &&
- old_uapsd_flags != new_uapsd_flags)
- ret = wfx_set_pm(wvif, &wvif->powersave_mode);
- }
- } else {
- ret = -EINVAL;
+ WARN_ON(conf->dynamic_ps_timeout < 0);
+ if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
+ return 0;
+ if (!ps)
+ ps_timeout = 0;
+ if (wvif->uapsd_mask)
+ ps_timeout = 0;
+
+ // Kernel disable powersave when an AP is in use. In contrary, it is
+ // absolutely necessary to enable legacy powersave for WF200 if channels
+ // are differents.
+ if (wdev_to_wvif(wvif->wdev, 0))
+ chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan;
+ if (wdev_to_wvif(wvif->wdev, 1))
+ chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan;
+ if (chan0 && chan1 && chan0->hw_value != chan1->hw_value &&
+ wvif->vif->type != NL80211_IFTYPE_AP) {
+ ps = true;
+ ps_timeout = 0;
}
-out:
- mutex_unlock(&wdev->conf_mutex);
- return ret;
+ if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
+ TU_TO_JIFFIES(512)))
+ dev_warn(wvif->wdev->dev,
+ "timeout while waiting of set_pm_mode_complete\n");
+ return hif_set_pm(wvif, ps, ps_timeout);
}
-int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
{
- struct hif_req_set_pm_mode pm = *arg;
- u16 uapsd_flags;
- int ret;
-
- if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
- return 0;
-
- memcpy(&uapsd_flags, &wvif->uapsd_info, sizeof(uapsd_flags));
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ int old_uapsd = wvif->uapsd_mask;
+ int ret = 0;
- if (uapsd_flags != 0)
- pm.pm_mode.fast_psm = 0;
+ WARN_ON(queue >= hw->queues);
- // Kernel disable PowerSave when multiple vifs are in use. In contrary,
- // it is absolutly necessary to enable PowerSave for WF200
- if (wvif_count(wvif->wdev) > 1) {
- pm.pm_mode.enter_psm = 1;
- pm.pm_mode.fast_psm = 0;
+ mutex_lock(&wdev->conf_mutex);
+ assign_bit(queue, &wvif->uapsd_mask, params->uapsd);
+ memcpy(&wvif->edca_params[queue], params, sizeof(*params));
+ hif_set_edca_queue_params(wvif, queue, params);
+ if (wvif->vif->type == NL80211_IFTYPE_STATION &&
+ old_uapsd != wvif->uapsd_mask) {
+ hif_set_uapsd_info(wvif, wvif->uapsd_mask);
+ wfx_update_pm(wvif);
}
-
- if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
- msecs_to_jiffies(300)))
- dev_warn(wvif->wdev->dev,
- "timeout while waiting of set_pm_mode_complete\n");
- ret = hif_set_pm(wvif, &pm);
- // FIXME: why ?
- if (-ETIMEDOUT == wvif->scan.status)
- wvif->scan.status = 1;
+ mutex_unlock(&wdev->conf_mutex);
return ret;
}
@@ -413,56 +320,27 @@ int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return 0;
}
-/* If successful, LOCKS the TX queue! */
static int __wfx_flush(struct wfx_dev *wdev, bool drop)
{
- int ret;
-
for (;;) {
- if (drop) {
+ if (drop)
wfx_tx_queues_clear(wdev);
- } else {
- ret = wait_event_timeout(
- wdev->tx_queue_stats.wait_link_id_empty,
- wfx_tx_queues_is_empty(wdev),
- 2 * HZ);
- }
-
- if (!drop && ret <= 0) {
- ret = -ETIMEDOUT;
- break;
- }
- ret = 0;
-
- wfx_tx_lock_flush(wdev);
- if (!wfx_tx_queues_is_empty(wdev)) {
- /* Highly unlikely: WSM requeued frames. */
- wfx_tx_unlock(wdev);
- continue;
- }
- break;
+ if (wait_event_timeout(wdev->tx_queue_stats.wait_link_id_empty,
+ wfx_tx_queues_is_empty(wdev),
+ 2 * HZ) <= 0)
+ return -ETIMEDOUT;
+ wfx_tx_flush(wdev);
+ if (wfx_tx_queues_is_empty(wdev))
+ return 0;
+ dev_warn(wdev->dev, "frames queued while flushing tx queues");
}
- return ret;
}
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
- struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif;
-
- if (vif) {
- wvif = (struct wfx_vif *) vif->drv_priv;
- if (wvif->vif->type == NL80211_IFTYPE_MONITOR)
- drop = true;
- if (wvif->vif->type == NL80211_IFTYPE_AP &&
- !wvif->enable_beacon)
- drop = true;
- }
-
- // FIXME: only flush requested vif
- if (!__wfx_flush(wdev, drop))
- wfx_tx_unlock(wdev);
+ // FIXME: only flush requested vif and queues
+ __wfx_flush(hw->priv, drop);
}
/* WSM callbacks */
@@ -476,7 +354,7 @@ static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
int cqm_evt;
rcpi_rssi = raw_rcpi_rssi / 2 - 110;
- if (rcpi_rssi <= wvif->cqm_rssi_thold)
+ if (rcpi_rssi <= wvif->vif->bss_conf.cqm_rssi_thold)
cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
@@ -499,18 +377,9 @@ static void wfx_event_handler_work(struct work_struct *work)
switch (event->evt.event_id) {
case HIF_EVENT_IND_BSSLOST:
cancel_work_sync(&wvif->unjoin_work);
- if (!down_trylock(&wvif->scan.lock)) {
- wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
- up(&wvif->scan.lock);
- } else {
- /* Scan is in progress. Delay reporting.
- * Scan complete will trigger bss_loss_work
- */
- wvif->delayed_link_loss = 1;
- /* Also start a watchdog. */
- schedule_delayed_work(&wvif->bss_loss_work,
- 5 * HZ);
- }
+ mutex_lock(&wvif->scan_lock);
+ wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+ mutex_unlock(&wvif->scan_lock);
break;
case HIF_EVENT_IND_BSSREGAINED:
wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
@@ -554,30 +423,10 @@ static void wfx_bss_params_work(struct work_struct *work)
mutex_unlock(&wvif->wdev->conf_mutex);
}
-static void wfx_set_beacon_wakeup_period_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- set_beacon_wakeup_period_work);
-
- hif_set_beacon_wakeup_period(wvif, wvif->dtim_period,
- wvif->dtim_period);
-}
-
static void wfx_do_unjoin(struct wfx_vif *wvif)
{
mutex_lock(&wvif->wdev->conf_mutex);
- if (atomic_read(&wvif->scan.in_progress)) {
- if (wvif->delayed_unjoin)
- dev_dbg(wvif->wdev->dev,
- "delayed unjoin is already scheduled\n");
- else
- wvif->delayed_unjoin = true;
- goto done;
- }
-
- wvif->delayed_link_loss = false;
-
if (!wvif->state)
goto done;
@@ -585,7 +434,6 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
goto done;
cancel_work_sync(&wvif->update_filtering_work);
- cancel_work_sync(&wvif->set_beacon_wakeup_period_work);
wvif->state = WFX_STATE_PASSIVE;
/* Unjoin is a reset. */
@@ -593,8 +441,6 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
hif_keep_alive_period(wvif, 0);
hif_reset(wvif, false);
wfx_tx_policy_init(wvif);
- hif_set_output_power(wvif, wvif->wdev->output_power * 10);
- wvif->dtim_period = 0;
hif_set_macaddr(wvif, wvif->vif->addr);
wfx_free_event_queue(wvif);
cancel_work_sync(&wvif->event_handler_work);
@@ -606,8 +452,6 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
wvif->disable_beacon_filter = false;
wfx_update_filtering(wvif);
memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
- wvif->setbssparams_done = false;
- memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
done:
mutex_unlock(&wvif->wdev->conf_mutex);
@@ -644,33 +488,21 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
hif_set_mfp(wvif, mfpc, mfpr);
}
-/* MUST be called with tx_lock held! It will be unlocked for us. */
static void wfx_do_join(struct wfx_vif *wvif)
{
- const u8 *bssid;
+ int ret;
+ const u8 *ssidie;
struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
struct cfg80211_bss *bss = NULL;
- struct hif_req_join join = {
- .mode = conf->ibss_joined ? HIF_MODE_IBSS : HIF_MODE_BSS,
- .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
- .probe_for_join = 1,
- .atim_window = 0,
- .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
- conf->basic_rates),
- };
- if (wvif->channel->flags & IEEE80211_CHAN_NO_IR)
- join.probe_for_join = 0;
+ wfx_tx_lock_flush(wvif->wdev);
if (wvif->state)
wfx_do_unjoin(wvif);
- bssid = wvif->vif->bss_conf.bssid;
-
bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
- bssid, NULL, 0,
+ conf->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
-
if (!bss && !conf->ibss_joined) {
wfx_tx_unlock(wvif->wdev);
return;
@@ -678,41 +510,15 @@ static void wfx_do_join(struct wfx_vif *wvif)
mutex_lock(&wvif->wdev->conf_mutex);
- /* Under the conf lock: check scan status and
- * bail out if it is in progress.
- */
- if (atomic_read(&wvif->scan.in_progress)) {
- wfx_tx_unlock(wvif->wdev);
- goto done_put;
- }
-
- /* Sanity check basic rates */
- if (!join.basic_rate_set)
- join.basic_rate_set = 7;
-
/* Sanity check beacon interval */
if (!wvif->beacon_int)
wvif->beacon_int = 1;
- join.beacon_interval = wvif->beacon_int;
-
- // DTIM period will be set on first Beacon
- wvif->dtim_period = 0;
-
- join.channel_number = wvif->channel->hw_value;
- memcpy(join.bssid, bssid, sizeof(join.bssid));
-
- if (!conf->ibss_joined) {
- const u8 *ssidie;
-
- rcu_read_lock();
+ rcu_read_lock();
+ if (!conf->ibss_joined)
ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
- if (ssidie) {
- join.ssid_length = ssidie[1];
- memcpy(join.ssid, &ssidie[2], join.ssid_length);
- }
- rcu_read_unlock();
- }
+ else
+ ssidie = NULL;
wfx_tx_flush(wvif->wdev);
@@ -723,7 +529,9 @@ static void wfx_do_join(struct wfx_vif *wvif)
/* Perform actual join */
wvif->wdev->tx_burst_idx = -1;
- if (hif_join(wvif, &join)) {
+ ret = hif_join(wvif, conf, wvif->channel, ssidie);
+ rcu_read_unlock();
+ if (ret) {
ieee80211_connection_loss(wvif->vif);
wvif->join_complete_status = -1;
/* Tx lock still held, unjoin will clear it. */
@@ -749,7 +557,6 @@ static void wfx_do_join(struct wfx_vif *wvif)
}
wfx_update_filtering(wvif);
-done_put:
mutex_unlock(&wvif->wdev->conf_mutex);
if (bss)
cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
@@ -766,30 +573,26 @@ static void wfx_unjoin_work(struct work_struct *work)
int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
- struct wfx_link_entry *entry;
- struct sk_buff *skb;
-
- if (wvif->vif->type != NL80211_IFTYPE_AP)
- return 0;
+ spin_lock_init(&sta_priv->lock);
sta_priv->vif_id = wvif->id;
- sta_priv->link_id = wfx_find_link_id(wvif, sta->addr);
- if (!sta_priv->link_id) {
- dev_warn(wdev->dev, "mo more link-id available\n");
- return -ENOENT;
- }
- entry = &wvif->link_id_db[sta_priv->link_id - 1];
+ // FIXME: in station mode, the current API interprets new link-id as a
+ // tdls peer.
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return 0;
+ sta_priv->link_id = ffz(wvif->link_id_map);
+ wvif->link_id_map |= BIT(sta_priv->link_id);
+ WARN_ON(!sta_priv->link_id);
+ WARN_ON(sta_priv->link_id >= WFX_MAX_STA_IN_AP_MODE);
+ hif_map_link(wvif, sta->addr, 0, sta_priv->link_id);
+
spin_lock_bh(&wvif->ps_state_lock);
if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
wvif->sta_asleep_mask |= BIT(sta_priv->link_id);
- entry->status = WFX_LINK_HARD;
- while ((skb = skb_dequeue(&entry->rx_queue)))
- ieee80211_rx_irqsafe(wdev->hw, skb);
spin_unlock_bh(&wvif->ps_state_lock);
return 0;
}
@@ -797,225 +600,116 @@ int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
- struct wfx_link_entry *entry;
+ int i;
- if (wvif->vif->type != NL80211_IFTYPE_AP || !sta_priv->link_id)
+ for (i = 0; i < ARRAY_SIZE(sta_priv->buffered); i++)
+ WARN(sta_priv->buffered[i], "release station while Tx is in progress");
+ // FIXME: see note in wfx_sta_add()
+ if (vif->type == NL80211_IFTYPE_STATION)
return 0;
-
- entry = &wvif->link_id_db[sta_priv->link_id - 1];
- spin_lock_bh(&wvif->ps_state_lock);
- entry->status = WFX_LINK_RESERVE;
- entry->timestamp = jiffies;
- wfx_tx_lock(wdev);
- if (!schedule_work(&wvif->link_id_work))
- wfx_tx_unlock(wdev);
- spin_unlock_bh(&wvif->ps_state_lock);
- flush_work(&wvif->link_id_work);
+ // FIXME add a mutex?
+ hif_map_link(wvif, sta->addr, 1, sta_priv->link_id);
+ wvif->link_id_map &= ~BIT(sta_priv->link_id);
return 0;
}
-static void wfx_set_cts_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_cts_work);
- u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
- struct hif_ie_flags target_frame = {
- .beacon = 1,
- };
-
- mutex_lock(&wvif->wdev->conf_mutex);
- erp_ie[2] = wvif->erp_info;
- mutex_unlock(&wvif->wdev->conf_mutex);
-
- hif_erp_use_protection(wvif, erp_ie[2] & WLAN_ERP_USE_PROTECTION);
-
- if (wvif->vif->type != NL80211_IFTYPE_STATION)
- hif_update_ie(wvif, &target_frame, erp_ie, sizeof(erp_ie));
-}
-
static int wfx_start_ap(struct wfx_vif *wvif)
{
int ret;
- struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
- struct hif_req_start start = {
- .channel_number = wvif->channel->hw_value,
- .beacon_interval = conf->beacon_int,
- .dtim_period = conf->dtim_period,
- .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
- .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
- conf->basic_rates),
- };
-
- memset(start.ssid, 0, sizeof(start.ssid));
- if (!conf->hidden_ssid) {
- start.ssid_length = conf->ssid_len;
- memcpy(start.ssid, conf->ssid, start.ssid_length);
- }
-
- wvif->beacon_int = conf->beacon_int;
- wvif->dtim_period = conf->dtim_period;
-
- memset(&wvif->link_id_db, 0, sizeof(wvif->link_id_db));
+ wvif->beacon_int = wvif->vif->bss_conf.beacon_int;
wvif->wdev->tx_burst_idx = -1;
- ret = hif_start(wvif, &start);
- if (!ret)
- ret = wfx_upload_keys(wvif);
- if (!ret) {
- if (wvif_count(wvif->wdev) <= 1)
- hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
- wvif->state = WFX_STATE_AP;
- wfx_update_filtering(wvif);
- }
- return ret;
+ ret = hif_start(wvif, &wvif->vif->bss_conf, wvif->channel);
+ if (ret)
+ return ret;
+ ret = wfx_upload_keys(wvif);
+ if (ret)
+ return ret;
+ if (wvif_count(wvif->wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ wvif->state = WFX_STATE_AP;
+ wfx_update_filtering(wvif);
+ return 0;
}
static int wfx_update_beaconing(struct wfx_vif *wvif)
{
- struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
-
- if (wvif->vif->type == NL80211_IFTYPE_AP) {
- /* TODO: check if changed channel, band */
- if (wvif->state != WFX_STATE_AP ||
- wvif->beacon_int != conf->beacon_int) {
- wfx_tx_lock_flush(wvif->wdev);
- if (wvif->state != WFX_STATE_PASSIVE) {
- hif_reset(wvif, false);
- wfx_tx_policy_init(wvif);
- }
- wvif->state = WFX_STATE_PASSIVE;
- wfx_start_ap(wvif);
- wfx_tx_unlock(wvif->wdev);
- } else {
- }
- }
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+ if (wvif->state == WFX_STATE_AP &&
+ wvif->beacon_int == wvif->vif->bss_conf.beacon_int)
+ return 0;
+ wfx_tx_lock_flush(wvif->wdev);
+ hif_reset(wvif, false);
+ wfx_tx_policy_init(wvif);
+ wvif->state = WFX_STATE_PASSIVE;
+ wfx_start_ap(wvif);
+ wfx_tx_unlock(wvif->wdev);
return 0;
}
-static int wfx_upload_beacon(struct wfx_vif *wvif)
+static int wfx_upload_ap_templates(struct wfx_vif *wvif)
{
- int ret = 0;
- struct sk_buff *skb = NULL;
- struct ieee80211_mgmt *mgmt;
- struct hif_mib_template_frame *p;
+ struct sk_buff *skb;
if (wvif->vif->type == NL80211_IFTYPE_STATION ||
wvif->vif->type == NL80211_IFTYPE_MONITOR ||
wvif->vif->type == NL80211_IFTYPE_UNSPECIFIED)
- goto done;
+ return 0;
skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
-
if (!skb)
return -ENOMEM;
-
- p = (struct hif_mib_template_frame *) skb_push(skb, 4);
- p->frame_type = HIF_TMPLT_BCN;
- p->init_rate = API_RATE_INDEX_B_1MBPS; /* 1Mbps DSSS */
- p->frame_length = cpu_to_le16(skb->len - 4);
-
- ret = hif_set_template_frame(wvif, p);
-
- skb_pull(skb, 4);
-
- if (ret)
- goto done;
- /* TODO: Distill probe resp; remove TIM and any other beacon-specific
- * IEs
- */
- mgmt = (void *)skb->data;
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
-
- p->frame_type = HIF_TMPLT_PRBRES;
-
- ret = hif_set_template_frame(wvif, p);
- wfx_fwd_probe_req(wvif, false);
-
-done:
+ hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN,
+ API_RATE_INDEX_B_1MBPS);
dev_kfree_skb(skb);
- return ret;
-}
-
-static int wfx_is_ht(const struct wfx_ht_info *ht_info)
-{
- return ht_info->channel_type != NL80211_CHAN_NO_HT;
-}
-
-static int wfx_ht_greenfield(const struct wfx_ht_info *ht_info)
-{
- return wfx_is_ht(ht_info) &&
- (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ht_info->operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-}
-static int wfx_ht_ampdu_density(const struct wfx_ht_info *ht_info)
-{
- if (!wfx_is_ht(ht_info))
- return 0;
- return ht_info->ht_cap.ampdu_density;
+ skb = ieee80211_proberesp_get(wvif->wdev->hw, wvif->vif);
+ if (!skb)
+ return -ENOMEM;
+ hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES,
+ API_RATE_INDEX_B_1MBPS);
+ dev_kfree_skb(skb);
+ return 0;
}
static void wfx_join_finalize(struct wfx_vif *wvif,
struct ieee80211_bss_conf *info)
{
struct ieee80211_sta *sta = NULL;
- struct hif_mib_set_association_mode association_mode = { };
- if (info->dtim_period)
- wvif->dtim_period = info->dtim_period;
wvif->beacon_int = info->beacon_int;
-
- rcu_read_lock();
+ rcu_read_lock(); // protect sta
if (info->bssid && !info->ibss_joined)
sta = ieee80211_find_sta(wvif->vif, info->bssid);
- if (sta) {
- wvif->ht_info.ht_cap = sta->ht_cap;
+ if (sta)
wvif->bss_params.operational_rate_set =
wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
- wvif->ht_info.operation_mode = info->ht_operation_mode;
- } else {
- memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+ else
wvif->bss_params.operational_rate_set = -1;
- }
- rcu_read_unlock();
-
- /* Non Greenfield stations present */
- if (wvif->ht_info.operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
+ if (sta &&
+ info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
hif_dual_cts_protection(wvif, true);
else
hif_dual_cts_protection(wvif, false);
- association_mode.preambtype_use = 1;
- association_mode.mode = 1;
- association_mode.rateset = 1;
- association_mode.spacing = 1;
- association_mode.preamble_type = info->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG;
- association_mode.basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates));
- association_mode.mixed_or_greenfield_type = wfx_ht_greenfield(&wvif->ht_info);
- association_mode.mpdu_start_spacing = wfx_ht_ampdu_density(&wvif->ht_info);
-
wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
cancel_work_sync(&wvif->unjoin_work);
wvif->bss_params.beacon_lost_count = 20;
wvif->bss_params.aid = info->aid;
- if (wvif->dtim_period < 1)
- wvif->dtim_period = 1;
-
- hif_set_association_mode(wvif, &association_mode);
+ hif_set_association_mode(wvif, info, sta ? &sta->ht_cap : NULL);
+ rcu_read_unlock();
if (!info->ibss_joined) {
hif_keep_alive_period(wvif, 30 /* sec */);
hif_set_bss_params(wvif, &wvif->bss_params);
- wvif->setbssparams_done = true;
- wfx_set_beacon_wakeup_period_work(&wvif->set_beacon_wakeup_period_work);
- wfx_set_pm(wvif, &wvif->powersave_mode);
+ hif_set_beacon_wakeup_period(wvif, info->dtim_period,
+ info->dtim_period);
+ wfx_update_pm(wvif);
}
}
@@ -1028,48 +722,40 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
bool do_join = false;
int i;
- int nb_arp_addr;
mutex_lock(&wdev->conf_mutex);
/* TODO: BSS_CHANGED_QOS */
if (changed & BSS_CHANGED_ARP_FILTER) {
- struct hif_mib_arp_ip_addr_table filter = { };
-
- nb_arp_addr = info->arp_addr_cnt;
- if (nb_arp_addr <= 0 || nb_arp_addr > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
- nb_arp_addr = 0;
-
for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
- filter.condition_idx = i;
- if (i < nb_arp_addr) {
- // Caution: type of arp_addr_list[i] is __be32
- memcpy(filter.ipv4_address,
- &info->arp_addr_list[i],
- sizeof(filter.ipv4_address));
- filter.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
- } else {
- filter.arp_enable = HIF_ARP_NS_FILTERING_DISABLE;
- }
- hif_set_arp_ipv4_filter(wvif, &filter);
+ __be32 *arp_addr = &info->arp_addr_list[i];
+
+ if (info->arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+ arp_addr = NULL;
+ if (i >= info->arp_addr_cnt)
+ arp_addr = NULL;
+ hif_set_arp_ipv4_filter(wvif, i, arp_addr);
}
}
- if (changed &
- (BSS_CHANGED_BEACON | BSS_CHANGED_AP_PROBE_RESP |
- BSS_CHANGED_BSSID | BSS_CHANGED_SSID | BSS_CHANGED_IBSS)) {
+ if (changed & BSS_CHANGED_BEACON ||
+ changed & BSS_CHANGED_AP_PROBE_RESP ||
+ changed & BSS_CHANGED_BSSID ||
+ changed & BSS_CHANGED_SSID ||
+ changed & BSS_CHANGED_IBSS) {
wvif->beacon_int = info->beacon_int;
wfx_update_beaconing(wvif);
- wfx_upload_beacon(wvif);
+ wfx_upload_ap_templates(wvif);
+ wfx_fwd_probe_req(wvif, false);
}
if (changed & BSS_CHANGED_BEACON_ENABLED &&
- wvif->state != WFX_STATE_IBSS) {
- if (wvif->enable_beacon != info->enable_beacon) {
- hif_beacon_transmit(wvif, info->enable_beacon);
- wvif->enable_beacon = info->enable_beacon;
- }
- }
+ wvif->state != WFX_STATE_IBSS)
+ hif_beacon_transmit(wvif, info->enable_beacon);
+
+ if (changed & BSS_CHANGED_BEACON_INFO)
+ hif_set_beacon_wakeup_period(wvif, info->dtim_period,
+ info->dtim_period);
/* assoc/disassoc, or maybe AID changed */
if (changed & BSS_CHANGED_ASSOC) {
@@ -1095,10 +781,11 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID)
do_join = true;
- if (changed &
- (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID |
- BSS_CHANGED_IBSS | BSS_CHANGED_BASIC_RATES |
- BSS_CHANGED_HT)) {
+ if (changed & BSS_CHANGED_ASSOC ||
+ changed & BSS_CHANGED_BSSID ||
+ changed & BSS_CHANGED_IBSS ||
+ changed & BSS_CHANGED_BASIC_RATES ||
+ changed & BSS_CHANGED_HT) {
if (info->assoc) {
if (wvif->state < WFX_STATE_PRE_STA) {
ieee80211_connection_loss(vif);
@@ -1119,106 +806,50 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
}
}
- /* ERP Protection */
- if (changed & (BSS_CHANGED_ASSOC |
- BSS_CHANGED_ERP_CTS_PROT |
- BSS_CHANGED_ERP_PREAMBLE)) {
- u32 prev_erp_info = wvif->erp_info;
+ if (changed & BSS_CHANGED_ASSOC ||
+ changed & BSS_CHANGED_ERP_CTS_PROT ||
+ changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
+ hif_erp_use_protection(wvif, info->use_cts_prot);
if (info->use_cts_prot)
- wvif->erp_info |= WLAN_ERP_USE_PROTECTION;
- else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
- wvif->erp_info &= ~WLAN_ERP_USE_PROTECTION;
-
+ erp_ie[2] |= WLAN_ERP_USE_PROTECTION;
if (info->use_short_preamble)
- wvif->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
- else
- wvif->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
-
- if (prev_erp_info != wvif->erp_info)
- schedule_work(&wvif->set_cts_work);
+ erp_ie[2] |= WLAN_ERP_BARKER_PREAMBLE;
+ if (wvif->vif->type != NL80211_IFTYPE_STATION)
+ hif_update_ie_beacon(wvif, erp_ie, sizeof(erp_ie));
}
- if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT))
+ if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_ERP_SLOT)
hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
- if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
- struct hif_mib_rcpi_rssi_threshold th = {
- .rolling_average_count = 8,
- .detection = 1,
- };
-
- wvif->cqm_rssi_thold = info->cqm_rssi_thold;
-
- if (!info->cqm_rssi_thold && !info->cqm_rssi_hyst) {
- th.upperthresh = 1;
- th.lowerthresh = 1;
- } else {
- /* FIXME It's not a correct way of setting threshold.
- * Upper and lower must be set equal here and adjusted
- * in callback. However current implementation is much
- * more reliable and stable.
- */
- /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
- * RSSI = RCPI / 2 - 110
- */
- th.upper_threshold = info->cqm_rssi_thold + info->cqm_rssi_hyst;
- th.upper_threshold = (th.upper_threshold + 110) * 2;
- th.lower_threshold = info->cqm_rssi_thold;
- th.lower_threshold = (th.lower_threshold + 110) * 2;
- }
- hif_set_rcpi_rssi_threshold(wvif, &th);
- }
+ if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_CQM)
+ hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold,
+ info->cqm_rssi_hyst);
+
+ if (changed & BSS_CHANGED_TXPOWER)
+ hif_set_output_power(wvif, info->txpower);
+
+ if (changed & BSS_CHANGED_PS)
+ wfx_update_pm(wvif);
- if (changed & BSS_CHANGED_TXPOWER &&
- info->txpower != wdev->output_power) {
- wdev->output_power = info->txpower;
- hif_set_output_power(wvif, wdev->output_power * 10);
- }
mutex_unlock(&wdev->conf_mutex);
- if (do_join) {
- wfx_tx_lock_flush(wdev);
- wfx_do_join(wvif); /* Will unlock it for us */
- }
+ if (do_join)
+ wfx_do_join(wvif);
}
-static void wfx_ps_notify(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd,
- int link_id)
+static void wfx_ps_notify_sta(struct wfx_vif *wvif,
+ enum sta_notify_cmd notify_cmd, int link_id)
{
- u32 bit, prev;
-
spin_lock_bh(&wvif->ps_state_lock);
- /* Zero link id means "for all link IDs" */
- if (link_id) {
- bit = BIT(link_id);
- } else if (notify_cmd != STA_NOTIFY_AWAKE) {
- dev_warn(wvif->wdev->dev, "unsupported notify command\n");
- bit = 0;
- } else {
- bit = wvif->link_id_map;
- }
- prev = wvif->sta_asleep_mask & bit;
-
- switch (notify_cmd) {
- case STA_NOTIFY_SLEEP:
- if (!prev) {
- if (wvif->mcast_buffered && !wvif->sta_asleep_mask)
- schedule_work(&wvif->mcast_start_work);
- wvif->sta_asleep_mask |= bit;
- }
- break;
- case STA_NOTIFY_AWAKE:
- if (prev) {
- wvif->sta_asleep_mask &= ~bit;
- wvif->pspoll_mask &= ~bit;
- if (link_id && !wvif->sta_asleep_mask)
- schedule_work(&wvif->mcast_stop_work);
- wfx_bh_request_tx(wvif->wdev);
- }
- break;
- }
+ if (notify_cmd == STA_NOTIFY_SLEEP)
+ wvif->sta_asleep_mask |= BIT(link_id);
+ else // notify_cmd == STA_NOTIFY_AWAKE
+ wvif->sta_asleep_mask &= ~BIT(link_id);
spin_unlock_bh(&wvif->ps_state_lock);
+ if (notify_cmd == STA_NOTIFY_AWAKE)
+ wfx_bh_request_tx(wvif->wdev);
}
void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -1227,23 +858,19 @@ void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
- wfx_ps_notify(wvif, notify_cmd, sta_priv->link_id);
+ wfx_ps_notify_sta(wvif, notify_cmd, sta_priv->link_id);
}
-static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
+static int wfx_update_tim(struct wfx_vif *wvif)
{
struct sk_buff *skb;
- struct hif_ie_flags target_frame = {
- .beacon = 1,
- };
u16 tim_offset, tim_length;
u8 *tim_ptr;
skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
&tim_offset, &tim_length);
if (!skb) {
- if (!__wfx_flush(wvif->wdev, true))
- wfx_tx_unlock(wvif->wdev);
+ __wfx_flush(wvif->wdev, true);
return -ENOENT;
}
tim_ptr = skb->data + tim_offset;
@@ -1255,23 +882,23 @@ static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
tim_ptr[2] = 0;
/* Set/reset aid0 bit */
- if (aid0_bit_set)
+ if (wfx_tx_queues_get_after_dtim(wvif))
tim_ptr[4] |= 1;
else
tim_ptr[4] &= ~1;
}
- hif_update_ie(wvif, &target_frame, tim_ptr, tim_length);
+ hif_update_ie_beacon(wvif, tim_ptr, tim_length);
dev_kfree_skb(skb);
return 0;
}
-static void wfx_set_tim_work(struct work_struct *work)
+static void wfx_update_tim_work(struct work_struct *work)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_tim_work);
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work);
- wfx_set_tim_impl(wvif, wvif->aid0_bit_set);
+ wfx_update_tim(wvif);
}
int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
@@ -1280,50 +907,16 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *) &sta->drv_priv;
struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
- schedule_work(&wvif->set_tim_work);
+ schedule_work(&wvif->update_tim_work);
return 0;
}
-static void wfx_mcast_start_work(struct work_struct *work)
+void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- mcast_start_work);
- long tmo = wvif->dtim_period * TU_TO_JIFFIES(wvif->beacon_int + 20);
-
- cancel_work_sync(&wvif->mcast_stop_work);
- if (!wvif->aid0_bit_set) {
- wfx_tx_lock_flush(wvif->wdev);
- wfx_set_tim_impl(wvif, true);
- wvif->aid0_bit_set = true;
- mod_timer(&wvif->mcast_timeout, jiffies + tmo);
- wfx_tx_unlock(wvif->wdev);
- }
-}
-
-static void wfx_mcast_stop_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- mcast_stop_work);
-
- if (wvif->aid0_bit_set) {
- del_timer_sync(&wvif->mcast_timeout);
- wfx_tx_lock_flush(wvif->wdev);
- wvif->aid0_bit_set = false;
- wfx_set_tim_impl(wvif, false);
- wfx_tx_unlock(wvif->wdev);
- }
-}
-
-static void wfx_mcast_timeout(struct timer_list *t)
-{
- struct wfx_vif *wvif = from_timer(wvif, t, mcast_timeout);
-
- dev_warn(wvif->wdev->dev, "multicast delivery timeout\n");
- spin_lock_bh(&wvif->ps_state_lock);
- wvif->mcast_tx = wvif->aid0_bit_set && wvif->mcast_buffered;
- if (wvif->mcast_tx)
- wfx_bh_request_tx(wvif->wdev);
- spin_unlock_bh(&wvif->ps_state_lock);
+ WARN(!wfx_tx_queues_get_after_dtim(wvif), "incorrect sequence");
+ WARN(wvif->after_dtim_tx_allowed, "incorrect sequence");
+ wvif->after_dtim_tx_allowed = true;
+ wfx_bh_request_tx(wvif->wdev);
}
int wfx_ampdu_action(struct ieee80211_hw *hw,
@@ -1341,35 +934,6 @@ int wfx_ampdu_action(struct ieee80211_hw *hw,
return -ENOTSUPP;
}
-void wfx_suspend_resume(struct wfx_vif *wvif,
- struct hif_ind_suspend_resume_tx *arg)
-{
- if (arg->suspend_resume_flags.bc_mc_only) {
- bool cancel_tmo = false;
-
- spin_lock_bh(&wvif->ps_state_lock);
- if (!arg->suspend_resume_flags.resume)
- wvif->mcast_tx = false;
- else
- wvif->mcast_tx = wvif->aid0_bit_set &&
- wvif->mcast_buffered;
- if (wvif->mcast_tx) {
- cancel_tmo = true;
- wfx_bh_request_tx(wvif->wdev);
- }
- spin_unlock_bh(&wvif->ps_state_lock);
- if (cancel_tmo)
- del_timer_sync(&wvif->mcast_timeout);
- } else if (arg->suspend_resume_flags.resume) {
- // FIXME: should change each station status independently
- wfx_ps_notify(wvif, STA_NOTIFY_AWAKE, 0);
- wfx_bh_request_tx(wvif->wdev);
- } else {
- // FIXME: should change each station status independently
- wfx_ps_notify(wvif, STA_NOTIFY_SLEEP, 0);
- }
-}
-
int wfx_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf)
{
@@ -1395,7 +959,6 @@ int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
WARN(wvif->channel, "channel overwrite");
wvif->channel = ch;
- wvif->ht_info.channel_type = cfg80211_get_chandef_type(&conf->def);
return 0;
}
@@ -1413,97 +976,14 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
int wfx_config(struct ieee80211_hw *hw, u32 changed)
{
- int ret = 0;
- struct wfx_dev *wdev = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- struct wfx_vif *wvif;
-
- // FIXME: Interface id should not been hardcoded
- wvif = wdev_to_wvif(wdev, 0);
- if (!wvif) {
- WARN(1, "interface 0 does not exist anymore");
- return 0;
- }
-
- down(&wvif->scan.lock);
- mutex_lock(&wdev->conf_mutex);
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- wdev->output_power = conf->power_level;
- hif_set_output_power(wvif, wdev->output_power * 10);
- }
-
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- wvif = NULL;
- while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- memset(&wvif->powersave_mode, 0,
- sizeof(wvif->powersave_mode));
- if (conf->flags & IEEE80211_CONF_PS) {
- wvif->powersave_mode.pm_mode.enter_psm = 1;
- if (conf->dynamic_ps_timeout > 0) {
- wvif->powersave_mode.pm_mode.fast_psm = 1;
- /*
- * Firmware does not support more than
- * 128ms
- */
- wvif->powersave_mode.fast_psm_idle_period =
- min(conf->dynamic_ps_timeout *
- 2, 255);
- }
- }
- if (wvif->state == WFX_STATE_STA && wvif->bss_params.aid)
- wfx_set_pm(wvif, &wvif->powersave_mode);
- }
- wvif = wdev_to_wvif(wdev, 0);
- }
-
- mutex_unlock(&wdev->conf_mutex);
- up(&wvif->scan.lock);
- return ret;
+ return 0;
}
int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- int i;
+ int i, ret = 0;
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- // FIXME: parameters are set by kernel juste after interface_add.
- // Keep struct hif_req_edca_queue_params blank?
- struct hif_req_edca_queue_params default_edca_params[] = {
- [IEEE80211_AC_VO] = {
- .queue_id = HIF_QUEUE_ID_VOICE,
- .aifsn = 2,
- .cw_min = 3,
- .cw_max = 7,
- .tx_op_limit = TXOP_UNIT * 47,
- },
- [IEEE80211_AC_VI] = {
- .queue_id = HIF_QUEUE_ID_VIDEO,
- .aifsn = 2,
- .cw_min = 7,
- .cw_max = 15,
- .tx_op_limit = TXOP_UNIT * 94,
- },
- [IEEE80211_AC_BE] = {
- .queue_id = HIF_QUEUE_ID_BESTEFFORT,
- .aifsn = 3,
- .cw_min = 15,
- .cw_max = 1023,
- .tx_op_limit = TXOP_UNIT * 0,
- },
- [IEEE80211_AC_BK] = {
- .queue_id = HIF_QUEUE_ID_BACKGROUND,
- .aifsn = 7,
- .cw_min = 15,
- .cw_max = 1023,
- .tx_op_limit = TXOP_UNIT * 0,
- },
- };
-
- BUILD_BUG_ON(ARRAY_SIZE(default_edca_params) != ARRAY_SIZE(wvif->edca.params));
- if (wfx_api_older_than(wdev, 2, 0)) {
- default_edca_params[IEEE80211_AC_BE].queue_id = HIF_QUEUE_ID_BACKGROUND;
- default_edca_params[IEEE80211_AC_BK].queue_id = HIF_QUEUE_ID_BESTEFFORT;
- }
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_UAPSD |
@@ -1536,51 +1016,37 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wvif->vif = vif;
wvif->wdev = wdev;
- INIT_WORK(&wvif->link_id_work, wfx_link_id_work);
- INIT_DELAYED_WORK(&wvif->link_id_gc_work, wfx_link_id_gc_work);
-
+ wvif->link_id_map = 1; // link-id 0 is reserved for multicast
spin_lock_init(&wvif->ps_state_lock);
- INIT_WORK(&wvif->set_tim_work, wfx_set_tim_work);
+ INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work);
- INIT_WORK(&wvif->mcast_start_work, wfx_mcast_start_work);
- INIT_WORK(&wvif->mcast_stop_work, wfx_mcast_stop_work);
- timer_setup(&wvif->mcast_timeout, wfx_mcast_timeout, 0);
+ memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
- wvif->setbssparams_done = false;
mutex_init(&wvif->bss_loss_lock);
INIT_DELAYED_WORK(&wvif->bss_loss_work, wfx_bss_loss_work);
wvif->wep_default_key_id = -1;
INIT_WORK(&wvif->wep_key_work, wfx_wep_key_work);
- sema_init(&wvif->scan.lock, 1);
- INIT_WORK(&wvif->scan.work, wfx_scan_work);
- INIT_DELAYED_WORK(&wvif->scan.timeout, wfx_scan_timeout);
-
spin_lock_init(&wvif->event_queue_lock);
INIT_LIST_HEAD(&wvif->event_queue);
INIT_WORK(&wvif->event_handler_work, wfx_event_handler_work);
init_completion(&wvif->set_pm_mode_complete);
complete(&wvif->set_pm_mode_complete);
- INIT_WORK(&wvif->set_beacon_wakeup_period_work,
- wfx_set_beacon_wakeup_period_work);
INIT_WORK(&wvif->update_filtering_work, wfx_update_filtering_work);
INIT_WORK(&wvif->bss_params_work, wfx_bss_params_work);
- INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work);
INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
+ INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
+
+ mutex_init(&wvif->scan_lock);
+ init_completion(&wvif->scan_complete);
+ INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
mutex_unlock(&wdev->conf_mutex);
hif_set_macaddr(wvif, vif->addr);
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- memcpy(&wvif->edca.params[i], &default_edca_params[i],
- sizeof(default_edca_params[i]));
- wvif->edca.uapsd_enable[i] = false;
- hif_set_edca_queue_params(wvif, &wvif->edca.params[i]);
- }
- wfx_set_uapsd_param(wvif, &wvif->edca);
wfx_tx_policy_init(wvif);
wvif = NULL;
@@ -1591,9 +1057,9 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
else
hif_set_block_ack_policy(wvif, 0x00, 0x00);
// Combo force powersave mode. We can re-enable it now
- wfx_set_pm(wvif, &wvif->powersave_mode);
+ ret = wfx_update_pm(wvif);
}
- return 0;
+ return ret;
}
void wfx_remove_interface(struct ieee80211_hw *hw,
@@ -1601,15 +1067,11 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- int i;
- // If scan is in progress, stop it
- while (down_trylock(&wvif->scan.lock))
- schedule();
- up(&wvif->scan.lock);
wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
mutex_lock(&wdev->conf_mutex);
+ WARN(wvif->link_id_map != 1, "corrupted state");
switch (wvif->state) {
case WFX_STATE_PRE_STA:
case WFX_STATE_STA:
@@ -1619,19 +1081,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
wfx_tx_unlock(wdev);
break;
case WFX_STATE_AP:
- for (i = 0; wvif->link_id_map; ++i) {
- if (wvif->link_id_map & BIT(i)) {
- wfx_unmap_link(wvif, i);
- wvif->link_id_map &= ~BIT(i);
- }
- }
- memset(wvif->link_id_db, 0, sizeof(wvif->link_id_db));
wvif->sta_asleep_mask = 0;
- wvif->enable_beacon = false;
- wvif->mcast_tx = false;
- wvif->aid0_bit_set = false;
- wvif->mcast_buffered = false;
- wvif->pspoll_mask = 0;
/* reset.link_id = 0; */
hif_reset(wvif, false);
break;
@@ -1646,12 +1096,8 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
/* FIXME: In add to reset MAC address, try to reset interface */
hif_set_macaddr(wvif, NULL);
- cancel_delayed_work_sync(&wvif->scan.timeout);
-
wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
cancel_work_sync(&wvif->unjoin_work);
- cancel_delayed_work_sync(&wvif->link_id_gc_work);
- del_timer_sync(&wvif->mcast_timeout);
wfx_free_event_queue(wvif);
wdev->vif[wvif->id] = NULL;
@@ -1666,7 +1112,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
else
hif_set_block_ack_policy(wvif, 0x00, 0x00);
// Combo force powersave mode. We can re-enable it now
- wfx_set_pm(wvif, &wvif->powersave_mode);
+ wfx_update_pm(wvif);
}
}
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index 4ccf1b17632b..cf99a8a74a81 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -23,23 +23,11 @@ enum wfx_state {
WFX_STATE_AP,
};
-struct wfx_ht_info {
- struct ieee80211_sta_ht_cap ht_cap;
- enum nl80211_channel_type channel_type;
- u16 operation_mode;
-};
-
struct wfx_hif_event {
struct list_head link;
struct hif_ind_event evt;
};
-struct wfx_edca_params {
- /* NOTE: index is a linux queue id. */
- struct hif_req_edca_queue_params params[IEEE80211_NUM_ACS];
- bool uapsd_enable[IEEE80211_NUM_ACS];
-};
-
struct wfx_grp_addr_table {
bool enable;
int num_addresses;
@@ -49,6 +37,9 @@ struct wfx_grp_addr_table {
struct wfx_sta_priv {
int link_id;
int vif_id;
+ u8 buffered[IEEE80211_NUM_TIDS];
+ // Ensure atomicity of "buffered" and calls to ieee80211_sta_set_buffered()
+ spinlock_t lock;
};
// mac80211 interface
@@ -91,13 +82,12 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf);
// WSM Callbacks
-void wfx_suspend_resume(struct wfx_vif *wvif,
- struct hif_ind_suspend_resume_tx *arg);
+void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd);
// Other Helpers
void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad);
void wfx_update_filtering(struct wfx_vif *wvif);
-int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable);
+u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates);
#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index 3f6198ab2235..30c6a13f0e22 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -153,7 +153,7 @@ hif_mib_list_enum
#define hif_mib_list hif_mib_list_enum { -1, NULL }
DECLARE_EVENT_CLASS(hif_data,
- TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
TP_ARGS(hif, tx_fill_level, is_recv),
TP_STRUCT__entry(
__field(int, tx_fill_level)
@@ -203,12 +203,12 @@ DECLARE_EVENT_CLASS(hif_data,
)
);
DEFINE_EVENT(hif_data, hif_send,
- TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
TP_ARGS(hif, tx_fill_level, is_recv));
#define _trace_hif_send(hif, tx_fill_level)\
trace_hif_send(hif, tx_fill_level, false)
DEFINE_EVENT(hif_data, hif_recv,
- TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
TP_ARGS(hif, tx_fill_level, is_recv));
#define _trace_hif_recv(hif, tx_fill_level)\
trace_hif_recv(hif, tx_fill_level, true)
@@ -359,7 +359,8 @@ TRACE_EVENT(bh_stats,
trace_bh_stats(ind, req, cnf, busy, release)
TRACE_EVENT(tx_stats,
- TP_PROTO(struct hif_cnf_tx *tx_cnf, struct sk_buff *skb, int delay),
+ TP_PROTO(const struct hif_cnf_tx *tx_cnf, const struct sk_buff *skb,
+ int delay),
TP_ARGS(tx_cnf, skb, delay),
TP_STRUCT__entry(
__field(int, pkt_id)
@@ -375,8 +376,9 @@ TRACE_EVENT(tx_stats,
// Keep sync with wfx_rates definition in main.c
static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9,
10, 11, 12, 13 };
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->driver_rates;
+ const struct ieee80211_tx_info *tx_info =
+ (const struct ieee80211_tx_info *)skb->cb;
+ const struct ieee80211_tx_rate *rates = tx_info->driver_rates;
int i;
__entry->pkt_id = tx_cnf->packet_id;
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 781a8c8ba982..8b85bb1abb9c 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -26,6 +26,9 @@
#include "hif_tx.h"
#include "hif_api_general.h"
+#define USEC_PER_TXOP 32 // see struct ieee80211_tx_queue_params
+#define USEC_PER_TU 1024
+
struct hwbus_ops;
struct wfx_dev {
@@ -51,14 +54,12 @@ struct wfx_dev {
int tx_burst_idx;
atomic_t tx_lock;
+ atomic_t packet_id;
u32 key_map;
struct hif_req_add_key keys[MAX_KEY_ENTRIES];
struct hif_rx_stats rx_stats;
struct mutex rx_stats_lock;
-
- int output_power;
- atomic_t scan_in_progress;
};
struct wfx_vif {
@@ -68,24 +69,15 @@ struct wfx_vif {
int id;
enum wfx_state state;
- int delayed_link_loss;
int bss_loss_state;
u32 bss_loss_confirm_id;
struct mutex bss_loss_lock;
struct delayed_work bss_loss_work;
u32 link_id_map;
- struct wfx_link_entry link_id_db[WFX_MAX_STA_IN_AP_MODE];
- struct delayed_work link_id_gc_work;
- struct work_struct link_id_work;
- bool aid0_bit_set;
- bool mcast_tx;
- bool mcast_buffered;
+ bool after_dtim_tx_allowed;
struct wfx_grp_addr_table mcast_filter;
- struct timer_list mcast_timeout;
- struct work_struct mcast_start_work;
- struct work_struct mcast_stop_work;
s8 wep_default_key_id;
struct sk_buff *wep_pending_skb;
@@ -95,37 +87,30 @@ struct wfx_vif {
struct work_struct tx_policy_upload_work;
u32 sta_asleep_mask;
- u32 pspoll_mask;
spinlock_t ps_state_lock;
- struct work_struct set_tim_work;
+ struct work_struct update_tim_work;
- int dtim_period;
int beacon_int;
- bool enable_beacon;
- struct work_struct set_beacon_wakeup_period_work;
-
bool filter_bssid;
bool fwd_probe_req;
bool disable_beacon_filter;
struct work_struct update_filtering_work;
- u32 erp_info;
- int cqm_rssi_thold;
- bool setbssparams_done;
- struct wfx_ht_info ht_info;
- struct wfx_edca_params edca;
- struct hif_mib_set_uapsd_information uapsd_info;
+ unsigned long uapsd_mask;
+ struct ieee80211_tx_queue_params edca_params[IEEE80211_NUM_ACS];
struct hif_req_set_bss_params bss_params;
struct work_struct bss_params_work;
- struct work_struct set_cts_work;
int join_complete_status;
- bool delayed_unjoin;
struct work_struct unjoin_work;
- struct wfx_scan scan;
+ /* avoid some operations in parallel with scan */
+ struct mutex scan_lock;
+ struct work_struct scan_work;
+ struct completion scan_complete;
+ bool scan_abort;
+ struct ieee80211_scan_request *scan_req;
- struct hif_req_set_pm_mode powersave_mode;
struct completion set_pm_mode_complete;
struct list_head event_queue;
diff --git a/drivers/staging/wilc1000/fw.h b/drivers/staging/wilc1000/fw.h
new file mode 100644
index 000000000000..a76e1dea4345
--- /dev/null
+++ b/drivers/staging/wilc1000/fw.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
+#ifndef WILC_FW_H
+#define WILC_FW_H
+
+#include <linux/ieee80211.h>
+
+#define WILC_MAX_NUM_STA 9
+#define WILC_MAX_RATES_SUPPORTED 12
+#define WILC_MAX_NUM_PMKIDS 16
+#define WILC_MAX_NUM_SCANNED_CH 14
+
+struct wilc_assoc_resp {
+ __le16 capab_info;
+ __le16 status_code;
+ __le16 aid;
+} __packed;
+
+struct wilc_pmkid {
+ u8 bssid[ETH_ALEN];
+ u8 pmkid[WLAN_PMKID_LEN];
+} __packed;
+
+struct wilc_pmkid_attr {
+ u8 numpmkid;
+ struct wilc_pmkid pmkidlist[WILC_MAX_NUM_PMKIDS];
+} __packed;
+
+struct wilc_reg_frame {
+ u8 reg;
+ u8 reg_id;
+ __le16 frame_type;
+} __packed;
+
+struct wilc_drv_handler {
+ __le32 handler;
+ u8 mode;
+} __packed;
+
+struct wilc_wep_key {
+ u8 index;
+ u8 key_len;
+ u8 key[0];
+} __packed;
+
+struct wilc_sta_wpa_ptk {
+ u8 mac_addr[ETH_ALEN];
+ u8 key_len;
+ u8 key[0];
+} __packed;
+
+struct wilc_ap_wpa_ptk {
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+ u8 key_len;
+ u8 key[0];
+} __packed;
+
+struct wilc_gtk_key {
+ u8 mac_addr[ETH_ALEN];
+ u8 rsc[8];
+ u8 index;
+ u8 key_len;
+ u8 key[0];
+} __packed;
+
+struct wilc_op_mode {
+ __le32 mode;
+} __packed;
+
+struct wilc_noa_opp_enable {
+ u8 ct_window;
+ u8 cnt;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct wilc_noa_opp_disable {
+ u8 cnt;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct wilc_join_bss_param {
+ char ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_terminator;
+ u8 bss_type;
+ u8 ch;
+ __le16 cap_info;
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 beacon_period;
+ u8 dtim_period;
+ u8 supp_rates[WILC_MAX_RATES_SUPPORTED + 1];
+ u8 wmm_cap;
+ u8 uapsd_cap;
+ u8 ht_capable;
+ u8 rsn_found;
+ u8 rsn_grp_policy;
+ u8 mode_802_11i;
+ u8 p_suites[3];
+ u8 akm_suites[3];
+ u8 rsn_cap[2];
+ u8 noa_enabled;
+ __le32 tsf_lo;
+ u8 idx;
+ u8 opp_enabled;
+ union {
+ struct wilc_noa_opp_disable opp_dis;
+ struct wilc_noa_opp_enable opp_en;
+ };
+} __packed;
+#endif
diff --git a/drivers/staging/wilc1000/hif.c b/drivers/staging/wilc1000/hif.c
index 349e45d58ec9..658790bd465b 100644
--- a/drivers/staging/wilc1000/hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -10,7 +10,6 @@
#define WILC_HIF_CONNECT_TIMEOUT_MS 9500
#define WILC_FALSE_FRMWR_CHANNEL 100
-#define WILC_MAX_RATES_SUPPORTED 12
struct wilc_rcvd_mac_info {
u8 status;
@@ -27,48 +26,6 @@ struct wilc_del_all_sta {
u8 mac[WILC_MAX_NUM_STA][ETH_ALEN];
};
-struct wilc_op_mode {
- __le32 mode;
-};
-
-struct wilc_reg_frame {
- u8 reg;
- u8 reg_id;
- __le16 frame_type;
-} __packed;
-
-struct wilc_drv_handler {
- __le32 handler;
- u8 mode;
-} __packed;
-
-struct wilc_wep_key {
- u8 index;
- u8 key_len;
- u8 key[0];
-} __packed;
-
-struct wilc_sta_wpa_ptk {
- u8 mac_addr[ETH_ALEN];
- u8 key_len;
- u8 key[0];
-} __packed;
-
-struct wilc_ap_wpa_ptk {
- u8 mac_addr[ETH_ALEN];
- u8 index;
- u8 key_len;
- u8 key[0];
-} __packed;
-
-struct wilc_gtk_key {
- u8 mac_addr[ETH_ALEN];
- u8 rsc[8];
- u8 index;
- u8 key_len;
- u8 key[0];
-} __packed;
-
union wilc_message_body {
struct wilc_rcvd_net_info net_info;
struct wilc_rcvd_mac_info mac_info;
@@ -86,51 +43,6 @@ struct host_if_msg {
bool is_sync;
};
-struct wilc_noa_opp_enable {
- u8 ct_window;
- u8 cnt;
- __le32 duration;
- __le32 interval;
- __le32 start_time;
-} __packed;
-
-struct wilc_noa_opp_disable {
- u8 cnt;
- __le32 duration;
- __le32 interval;
- __le32 start_time;
-} __packed;
-
-struct wilc_join_bss_param {
- char ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_terminator;
- u8 bss_type;
- u8 ch;
- __le16 cap_info;
- u8 sa[ETH_ALEN];
- u8 bssid[ETH_ALEN];
- __le16 beacon_period;
- u8 dtim_period;
- u8 supp_rates[WILC_MAX_RATES_SUPPORTED + 1];
- u8 wmm_cap;
- u8 uapsd_cap;
- u8 ht_capable;
- u8 rsn_found;
- u8 rsn_grp_policy;
- u8 mode_802_11i;
- u8 p_suites[3];
- u8 akm_suites[3];
- u8 rsn_cap[2];
- u8 noa_enabled;
- __le32 tsf_lo;
- u8 idx;
- u8 opp_enabled;
- union {
- struct wilc_noa_opp_disable opp_dis;
- struct wilc_noa_opp_enable opp_en;
- };
-} __packed;
-
/* 'msg' should be free by the caller for syc */
static struct host_if_msg*
wilc_alloc_work(struct wilc_vif *vif, void (*work_fun)(struct work_struct *),
@@ -640,7 +552,7 @@ static s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
{
u8 *ies;
u16 ies_len;
- struct assoc_resp *res = (struct assoc_resp *)buffer;
+ struct wilc_assoc_resp *res = (struct wilc_assoc_resp *)buffer;
ret_conn_info->status = le16_to_cpu(res->status_code);
if (ret_conn_info->status == WLAN_STATUS_SUCCESS) {
diff --git a/drivers/staging/wilc1000/hif.h b/drivers/staging/wilc1000/hif.h
index 22ee6fffd599..db9179171f05 100644
--- a/drivers/staging/wilc1000/hif.h
+++ b/drivers/staging/wilc1000/hif.h
@@ -17,14 +17,11 @@ enum {
WILC_CLIENT_MODE = 0x4
};
-#define WILC_MAX_NUM_STA 9
-#define WILC_MAX_NUM_SCANNED_CH 14
#define WILC_MAX_NUM_PROBED_SSID 10
#define WILC_TX_MIC_KEY_LEN 8
#define WILC_RX_MIC_KEY_LEN 8
-#define WILC_MAX_NUM_PMKIDS 16
#define WILC_ADD_STA_LENGTH 40
#define WILC_NUM_CONCURRENT_IFC 2
@@ -35,12 +32,6 @@ enum {
#define WILC_MAX_ASSOC_RESP_FRAME_SIZE 256
-struct assoc_resp {
- __le16 capab_info;
- __le16 status_code;
- __le16 aid;
-} __packed;
-
struct rf_info {
u8 link_speed;
s8 rssi;
@@ -59,16 +50,6 @@ enum host_if_state {
HOST_IF_FORCE_32BIT = 0xFFFFFFFF
};
-struct wilc_pmkid {
- u8 bssid[ETH_ALEN];
- u8 pmkid[WLAN_PMKID_LEN];
-} __packed;
-
-struct wilc_pmkid_attr {
- u8 numpmkid;
- struct wilc_pmkid pmkidlist[WILC_MAX_NUM_PMKIDS];
-} __packed;
-
struct cfg_param_attr {
u32 flag;
u16 short_retry_limit;
diff --git a/drivers/staging/wilc1000/netdev.c b/drivers/staging/wilc1000/netdev.c
index d2c0b0f7cf63..fce5bf2d82fa 100644
--- a/drivers/staging/wilc1000/netdev.c
+++ b/drivers/staging/wilc1000/netdev.c
@@ -96,21 +96,18 @@ void wilc_mac_indicate(struct wilc *wilc)
static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
{
- u8 *bssid, *bssid1;
struct net_device *ndev = NULL;
struct wilc_vif *vif;
-
- bssid = mac_header + 10;
- bssid1 = mac_header + 4;
+ struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
if (vif->mode == WILC_STATION_MODE)
- if (ether_addr_equal_unaligned(bssid, vif->bssid)) {
+ if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
goto out;
}
if (vif->mode == WILC_AP_MODE)
- if (ether_addr_equal_unaligned(bssid1, vif->bssid)) {
+ if (ether_addr_equal_unaligned(h->addr1, vif->bssid)) {
ndev = vif->ndev;
goto out;
}
@@ -177,7 +174,7 @@ static int wilc_txq_task(void *vp)
}
srcu_read_unlock(&wl->srcu, srcu_idx);
}
- } while (ret == -ENOBUFS && !wl->close);
+ } while (ret == WILC_VMM_ENTRY_FULL_RETRY && !wl->close);
}
return 0;
}
@@ -186,7 +183,7 @@ static int wilc_wlan_get_firmware(struct net_device *dev)
{
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
- int chip_id, ret = 0;
+ int chip_id;
const struct firmware *wilc_firmware;
char *firmware;
@@ -201,14 +198,11 @@ static int wilc_wlan_get_firmware(struct net_device *dev)
if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
netdev_err(dev, "%s - firmware not available\n", firmware);
- ret = -1;
- goto fail;
+ return -EINVAL;
}
wilc->firmware = wilc_firmware;
-fail:
-
- return ret;
+ return 0;
}
static int wilc_start_firmware(struct net_device *dev)
@@ -218,7 +212,7 @@ static int wilc_start_firmware(struct net_device *dev)
int ret = 0;
ret = wilc_wlan_start(wilc);
- if (ret < 0)
+ if (ret)
return ret;
if (!wait_for_completion_timeout(&wilc->sync_event,
@@ -241,7 +235,7 @@ static int wilc1000_firmware_download(struct net_device *dev)
ret = wilc_wlan_firmware_download(wilc, wilc->firmware->data,
wilc->firmware->size);
- if (ret < 0)
+ if (ret)
return ret;
release_firmware(wilc->firmware);
@@ -420,7 +414,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
return 0;
fail:
- return -1;
+ return -EINVAL;
}
static void wlan_deinitialize_threads(struct net_device *dev)
@@ -500,14 +494,12 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
wl->close = 0;
ret = wilc_wlan_init(dev);
- if (ret < 0)
- return -EIO;
+ if (ret)
+ return ret;
ret = wlan_initialize_threads(dev);
- if (ret < 0) {
- ret = -EIO;
+ if (ret)
goto fail_wilc_wlan;
- }
if (wl->gpio_irq && init_irq(dev)) {
ret = -EIO;
@@ -521,22 +513,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
goto fail_irq_init;
}
- if (wilc_wlan_get_firmware(dev)) {
- ret = -EIO;
+ ret = wilc_wlan_get_firmware(dev);
+ if (ret)
goto fail_irq_enable;
- }
ret = wilc1000_firmware_download(dev);
- if (ret < 0) {
- ret = -EIO;
+ if (ret)
goto fail_irq_enable;
- }
ret = wilc_start_firmware(dev);
- if (ret < 0) {
- ret = -EIO;
+ if (ret)
goto fail_irq_enable;
- }
if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) {
int size;
@@ -548,11 +535,10 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
firmware_ver[size] = '\0';
netdev_dbg(dev, "Firmware Ver = %s\n", firmware_ver);
}
- ret = wilc_init_fw_config(dev, vif);
- if (ret < 0) {
+ ret = wilc_init_fw_config(dev, vif);
+ if (ret) {
netdev_err(dev, "Failed to configure firmware\n");
- ret = -EIO;
goto fail_fw_start;
}
wl->initialized = true;
@@ -603,11 +589,11 @@ static int wilc_mac_open(struct net_device *ndev)
netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
ret = wilc_init_host_int(ndev);
- if (ret < 0)
+ if (ret)
return ret;
ret = wilc_wlan_initialize(ndev, vif);
- if (ret < 0) {
+ if (ret) {
wilc_deinit_host_int(ndev);
return ret;
}
@@ -840,7 +826,7 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
struct wilc_vif *vif;
- int srcu_idx;
+ int srcu_idx, ifc_cnt = 0;
if (!wilc)
return;
@@ -861,7 +847,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
flush_workqueue(wilc->hif_workqueue);
destroy_workqueue(wilc->hif_workqueue);
- do {
+ while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
mutex_lock(&wilc->vif_mutex);
if (wilc->vif_num <= 0) {
mutex_unlock(&wilc->vif_mutex);
@@ -874,7 +860,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->vif_num--;
mutex_unlock(&wilc->vif_mutex);
synchronize_srcu(&wilc->srcu);
- } while (1);
+ ifc_cnt++;
+ }
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
diff --git a/drivers/staging/wilc1000/netdev.h b/drivers/staging/wilc1000/netdev.h
index cd8f0d72caaa..d5f7a6037fbc 100644
--- a/drivers/staging/wilc1000/netdev.h
+++ b/drivers/staging/wilc1000/netdev.h
@@ -21,7 +21,6 @@
#define FLOW_CONTROL_LOWER_THRESHOLD 128
#define FLOW_CONTROL_UPPER_THRESHOLD 256
-#define WILC_MAX_NUM_PMKIDS 16
#define PMKID_FOUND 1
#define NUM_STA_ASSOCIATED 8
diff --git a/drivers/staging/wilc1000/sdio.c b/drivers/staging/wilc1000/sdio.c
index 319e039380b0..ca99335687c4 100644
--- a/drivers/staging/wilc1000/sdio.c
+++ b/drivers/staging/wilc1000/sdio.c
@@ -273,7 +273,7 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10c data...\n");
- goto fail;
+ return ret;
}
cmd.address = 0x10d;
@@ -281,7 +281,7 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10d data...\n");
- goto fail;
+ return ret;
}
cmd.address = 0x10e;
@@ -289,11 +289,9 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10e data...\n");
- goto fail;
+ return ret;
}
- return 1;
-fail:
return 0;
}
@@ -311,7 +309,7 @@ static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10 data...\n");
- goto fail;
+ return ret;
}
cmd.address = 0x11;
@@ -319,11 +317,9 @@ static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x11 data...\n");
- goto fail;
+ return ret;
}
- return 1;
-fail:
return 0;
}
@@ -347,18 +343,16 @@ static int wilc_sdio_set_func1_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x110 data...\n");
- goto fail;
+ return ret;
}
cmd.address = 0x111;
cmd.data = (u8)(block_size >> 8);
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x111 data...\n");
- goto fail;
+ return ret;
}
- return 1;
-fail:
return 0;
}
@@ -384,19 +378,18 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.address = addr;
cmd.data = data;
ret = wilc_sdio_cmd52(wilc, &cmd);
- if (ret) {
+ if (ret)
dev_err(&func->dev,
"Failed cmd 52, read reg (%08x) ...\n", addr);
- goto fail;
- }
} else {
struct sdio_cmd53 cmd;
/**
* set the AHB address
**/
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
cmd.read_write = 1;
cmd.function = 0;
@@ -407,18 +400,12 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.buffer = (u8 *)&data;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
- if (ret) {
+ if (ret)
dev_err(&func->dev,
"Failed cmd53, write reg (%08x)...\n", addr);
- goto fail;
- }
}
- return 1;
-
-fail:
-
- return 0;
+ return ret;
}
static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
@@ -470,14 +457,15 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.buffer = buf;
cmd.block_size = block_size;
if (addr > 0) {
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], block send...\n", addr);
- goto fail;
+ return ret;
}
if (addr > 0)
addr += nblk * block_size;
@@ -493,21 +481,18 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.block_size = block_size;
if (addr > 0) {
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], bytes send...\n", addr);
- goto fail;
+ return ret;
}
}
- return 1;
-
-fail:
-
return 0;
}
@@ -528,14 +513,15 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
if (ret) {
dev_err(&func->dev,
"Failed cmd 52, read reg (%08x) ...\n", addr);
- goto fail;
+ return ret;
}
*data = cmd.data;
} else {
struct sdio_cmd53 cmd;
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
cmd.read_write = 0;
cmd.function = 0;
@@ -550,16 +536,11 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
if (ret) {
dev_err(&func->dev,
"Failed cmd53, read reg (%08x)...\n", addr);
- goto fail;
+ return ret;
}
}
le32_to_cpus(data);
-
- return 1;
-
-fail:
-
return 0;
}
@@ -612,14 +593,15 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.buffer = buf;
cmd.block_size = block_size;
if (addr > 0) {
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], block read...\n", addr);
- goto fail;
+ return ret;
}
if (addr > 0)
addr += nblk * block_size;
@@ -635,21 +617,18 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.block_size = block_size;
if (addr > 0) {
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], bytes read...\n", addr);
- goto fail;
+ return ret;
}
}
- return 1;
-
-fail:
-
return 0;
}
@@ -661,7 +640,7 @@ fail:
static int wilc_sdio_deinit(struct wilc *wilc)
{
- return 1;
+ return 0;
}
static int wilc_sdio_init(struct wilc *wilc, bool resume)
@@ -686,15 +665,16 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, enable csa...\n");
- goto fail;
+ return ret;
}
/**
* function 0 block size
**/
- if (!wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
+ ret = wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE);
+ if (ret) {
dev_err(&func->dev, "Fail cmd 52, set func 0 block size...\n");
- goto fail;
+ return ret;
}
sdio_priv->block_size = WILC_SDIO_BLOCK_SIZE;
@@ -710,7 +690,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
if (ret) {
dev_err(&func->dev,
"Fail cmd 52, set IOE register...\n");
- goto fail;
+ return ret;
}
/**
@@ -727,7 +707,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
if (ret) {
dev_err(&func->dev,
"Fail cmd 52, get IOR register...\n");
- goto fail;
+ return ret;
}
if (cmd.data == 0x2)
break;
@@ -735,15 +715,16 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
if (loop <= 0) {
dev_err(&func->dev, "Fail func 1 is not ready...\n");
- goto fail;
+ return -EINVAL;
}
/**
* func 1 is ready, set func 1 block size
**/
- if (!wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
+ ret = wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE);
+ if (ret) {
dev_err(&func->dev, "Fail set func 1 block size...\n");
- goto fail;
+ return ret;
}
/**
@@ -757,16 +738,17 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, set IEN register...\n");
- goto fail;
+ return ret;
}
/**
* make sure can read back chip id correctly
**/
if (!resume) {
- if (!wilc_sdio_read_reg(wilc, 0x1000, &chipid)) {
+ ret = wilc_sdio_read_reg(wilc, 0x1000, &chipid);
+ if (ret) {
dev_err(&func->dev, "Fail cmd read chip id...\n");
- goto fail;
+ return ret;
}
dev_err(&func->dev, "chipid (%08x)\n", chipid);
if ((chipid & 0xfff) > 0x2a0)
@@ -777,10 +759,6 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
sdio_priv->has_thrpt_enh3);
}
- return 1;
-
-fail:
-
return 0;
}
@@ -806,7 +784,7 @@ static int wilc_sdio_read_size(struct wilc *wilc, u32 *size)
tmp |= (cmd.data << 8);
*size = tmp;
- return 1;
+ return 0;
}
static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
@@ -865,7 +843,7 @@ static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
*int_status = tmp;
- return 1;
+ return 0;
}
static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
@@ -909,10 +887,10 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf8 data (%d) ...\n",
__LINE__);
- goto fail;
+ return ret;
}
}
- return 1;
+ return 0;
}
if (sdio_priv->irq_gpio) {
/* has_thrpt_enh2 uses register 0xf8 to clear interrupts. */
@@ -926,7 +904,6 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
if (flags) {
int i;
- ret = 1;
for (i = 0; i < sdio_priv->nint; i++) {
if (flags & 1) {
struct sdio_cmd52 cmd;
@@ -942,15 +919,12 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf8 data (%d) ...\n",
__LINE__);
- goto fail;
+ return ret;
}
}
- if (!ret)
- break;
flags >>= 1;
}
- if (!ret)
- goto fail;
+
for (i = sdio_priv->nint; i < MAX_NUM_INT; i++) {
if (flags & 1)
dev_err(&func->dev,
@@ -985,11 +959,9 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf6 data (%d) ...\n",
__LINE__);
- goto fail;
+ return ret;
}
}
- return 1;
-fail:
return 0;
}
@@ -1001,12 +973,12 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
if (nint > MAX_NUM_INT) {
dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
- return 0;
+ return -EINVAL;
}
if (nint > MAX_NUN_INT_THRPT_ENH2) {
dev_err(&func->dev,
"Cannot support more than 5 interrupts when has_thrpt_enh2=1.\n");
- return 0;
+ return -EINVAL;
}
sdio_priv->nint = nint;
@@ -1014,15 +986,15 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
/**
* Disable power sequencer
**/
- if (!wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
+ if (wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
dev_err(&func->dev, "Failed read misc reg...\n");
- return 0;
+ return -EINVAL;
}
reg &= ~BIT(8);
- if (!wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
+ if (wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
dev_err(&func->dev, "Failed write misc reg...\n");
- return 0;
+ return -EINVAL;
}
if (sdio_priv->irq_gpio) {
@@ -1033,59 +1005,59 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
* interrupt pin mux select
**/
ret = wilc_sdio_read_reg(wilc, WILC_PIN_MUX_0, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev, "Failed read reg (%08x)...\n",
WILC_PIN_MUX_0);
- return 0;
+ return ret;
}
reg |= BIT(8);
ret = wilc_sdio_write_reg(wilc, WILC_PIN_MUX_0, reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev, "Failed write reg (%08x)...\n",
WILC_PIN_MUX_0);
- return 0;
+ return ret;
}
/**
* interrupt enable
**/
ret = wilc_sdio_read_reg(wilc, WILC_INTR_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev, "Failed read reg (%08x)...\n",
WILC_INTR_ENABLE);
- return 0;
+ return ret;
}
for (i = 0; (i < 5) && (nint > 0); i++, nint--)
reg |= BIT((27 + i));
ret = wilc_sdio_write_reg(wilc, WILC_INTR_ENABLE, reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev, "Failed write reg (%08x)...\n",
WILC_INTR_ENABLE);
- return 0;
+ return ret;
}
if (nint) {
ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev,
"Failed read reg (%08x)...\n",
WILC_INTR2_ENABLE);
- return 0;
+ return ret;
}
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev,
"Failed write reg (%08x)...\n",
WILC_INTR2_ENABLE);
- return 0;
+ return ret;
}
}
}
- return 1;
+ return 0;
}
/* Global sdio HIF function table */
diff --git a/drivers/staging/wilc1000/spi.c b/drivers/staging/wilc1000/spi.c
index 55f8757325f0..3ffc7b4fddf6 100644
--- a/drivers/staging/wilc1000/spi.c
+++ b/drivers/staging/wilc1000/spi.c
@@ -13,7 +13,6 @@
struct wilc_spi {
int crc_off;
int nint;
- int has_thrpt_enh;
};
static const struct wilc_hif_func wilc_hif_spi;
@@ -89,11 +88,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
#define CMD_SINGLE_READ 0xca
#define CMD_RESET 0xcf
-#define N_OK 1
-#define N_FAIL 0
-#define N_RESET -1
-#define N_RETRY -2
-
#define DATA_PKT_SZ_256 256
#define DATA_PKT_SZ_512 512
#define DATA_PKT_SZ_1K 1024
@@ -300,7 +294,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
u32 len2;
u8 rsp;
int len = 0;
- int result = N_OK;
+ int result = 0;
int retry;
u8 crc[2];
@@ -388,11 +382,11 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
break;
default:
- result = N_FAIL;
+ result = -EINVAL;
break;
}
- if (result != N_OK)
+ if (result)
return result;
if (!spi_priv->crc_off)
@@ -425,7 +419,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (len2 > ARRAY_SIZE(wb)) {
dev_err(&spi->dev, "spi buffer size too small (%d) (%zu)\n",
len2, ARRAY_SIZE(wb));
- return N_FAIL;
+ return -EINVAL;
}
/* zero spi write buffers. */
for (wix = len; wix < len2; wix++)
@@ -434,7 +428,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_tx_rx(wilc, wb, rb, len2)) {
dev_err(&spi->dev, "Failed cmd write, bus error...\n");
- return N_FAIL;
+ return -EINVAL;
}
/*
@@ -449,7 +443,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
dev_err(&spi->dev,
"Failed cmd response, cmd (%02x), resp (%02x)\n",
cmd, rsp);
- return N_FAIL;
+ return -EINVAL;
}
/*
@@ -459,7 +453,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (rsp != 0x00) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
rsp);
- return N_FAIL;
+ return -EINVAL;
}
if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ ||
@@ -486,7 +480,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (retry <= 0) {
dev_err(&spi->dev,
"Error, data read response (%02x)\n", rsp);
- return N_RESET;
+ return -EAGAIN;
}
}
@@ -502,7 +496,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
} else {
dev_err(&spi->dev,
"buffer overrun when reading data.\n");
- return N_FAIL;
+ return -EINVAL;
}
if (!spi_priv->crc_off) {
@@ -515,7 +509,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
} else {
dev_err(&spi->dev,
"buffer overrun when reading crc.\n");
- return N_FAIL;
+ return -EINVAL;
}
}
} else if ((cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) {
@@ -541,7 +535,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
dev_err(&spi->dev,
"Failed block read, bus err\n");
- return N_FAIL;
+ return -EINVAL;
}
/*
@@ -550,7 +544,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
dev_err(&spi->dev,
"Failed block crc read, bus err\n");
- return N_FAIL;
+ return -EINVAL;
}
ix += nbytes;
@@ -582,14 +576,14 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_rx(wilc, &rsp, 1)) {
dev_err(&spi->dev,
"Failed resp read, bus err\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
if (((rsp >> 4) & 0xf) == 0xf)
break;
} while (retry--);
- if (result == N_FAIL)
+ if (result)
break;
/*
@@ -598,7 +592,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
dev_err(&spi->dev,
"Failed block read, bus err\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
@@ -608,7 +602,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
dev_err(&spi->dev,
"Failed block crc read, bus err\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
@@ -624,7 +618,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
struct spi_device *spi = to_spi_device(wilc->dev);
struct wilc_spi *spi_priv = wilc->bus_data;
int ix, nbytes;
- int result = 1;
+ int result = 0;
u8 cmd, order, crc[2] = {0};
/*
@@ -652,7 +646,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
if (wilc_spi_tx(wilc, &cmd, 1)) {
dev_err(&spi->dev,
"Failed data block cmd write, bus error...\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
@@ -662,7 +656,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
if (wilc_spi_tx(wilc, &b[ix], nbytes)) {
dev_err(&spi->dev,
"Failed data block write, bus error...\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
@@ -672,7 +666,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
if (!spi_priv->crc_off) {
if (wilc_spi_tx(wilc, crc, 2)) {
dev_err(&spi->dev, "Failed data block crc write, bus error...\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
}
@@ -701,7 +695,7 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat)
cpu_to_le32s(&dat);
result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4,
0);
- if (result != N_OK)
+ if (result)
dev_err(&spi->dev, "Failed internal write cmd...\n");
return result;
@@ -714,14 +708,14 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
result = spi_cmd_complete(wilc, CMD_INTERNAL_READ, adr, (u8 *)data, 4,
0);
- if (result != N_OK) {
+ if (result) {
dev_err(&spi->dev, "Failed internal read cmd...\n");
- return 0;
+ return result;
}
le32_to_cpus(data);
- return 1;
+ return result;
}
/********************************************
@@ -733,7 +727,7 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
{
struct spi_device *spi = to_spi_device(wilc->dev);
- int result = N_OK;
+ int result;
u8 cmd = CMD_SINGLE_WRITE;
u8 clockless = 0;
@@ -745,7 +739,7 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
}
result = spi_cmd_complete(wilc, cmd, addr, (u8 *)&data, 4, clockless);
- if (result != N_OK)
+ if (result)
dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr);
return result;
@@ -760,29 +754,29 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
* has to be greated than 4
*/
if (size <= 4)
- return 0;
+ return -EINVAL;
result = spi_cmd_complete(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size, 0);
- if (result != N_OK) {
+ if (result) {
dev_err(&spi->dev,
"Failed cmd, write block (%08x)...\n", addr);
- return 0;
+ return result;
}
/*
* Data
*/
result = spi_data_write(wilc, buf, size);
- if (result != N_OK)
+ if (result)
dev_err(&spi->dev, "Failed block data write...\n");
- return 1;
+ return result;
}
static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
{
struct spi_device *spi = to_spi_device(wilc->dev);
- int result = N_OK;
+ int result;
u8 cmd = CMD_SINGLE_READ;
u8 clockless = 0;
@@ -793,14 +787,14 @@ static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
}
result = spi_cmd_complete(wilc, cmd, addr, (u8 *)data, 4, clockless);
- if (result != N_OK) {
+ if (result) {
dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr);
- return 0;
+ return result;
}
le32_to_cpus(data);
- return 1;
+ return 0;
}
static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
@@ -809,15 +803,13 @@ static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
int result;
if (size <= 4)
- return 0;
+ return -EINVAL;
result = spi_cmd_complete(wilc, CMD_DMA_EXT_READ, addr, buf, size, 0);
- if (result != N_OK) {
+ if (result)
dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr);
- return 0;
- }
- return 1;
+ return result;
}
/********************************************
@@ -831,7 +823,7 @@ static int wilc_spi_deinit(struct wilc *wilc)
/*
* TODO:
*/
- return 1;
+ return 0;
}
static int wilc_spi_init(struct wilc *wilc, bool resume)
@@ -841,13 +833,14 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
u32 reg;
u32 chipid;
static int isinit;
+ int ret;
if (isinit) {
- if (!wilc_spi_read_reg(wilc, 0x1000, &chipid)) {
+ ret = wilc_spi_read_reg(wilc, 0x1000, &chipid);
+ if (ret)
dev_err(&spi->dev, "Fail cmd read chip id...\n");
- return 0;
- }
- return 1;
+
+ return ret;
}
/*
@@ -859,7 +852,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
* way to reset
*/
/* the SPI to it's initial value. */
- if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
+ ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
+ if (ret) {
/*
* Read failed. Try with CRC off. This might happen when module
* is removed but chip isn't reset
@@ -867,24 +861,26 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
spi_priv->crc_off = 1;
dev_err(&spi->dev,
"Failed read with CRC on, retrying with CRC off\n");
- if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
+ ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
+ if (ret) {
/*
* Read failed with both CRC on and off,
* something went bad
*/
dev_err(&spi->dev, "Failed internal read protocol\n");
- return 0;
+ return ret;
}
}
if (spi_priv->crc_off == 0) {
reg &= ~0xc; /* disable crc checking */
reg &= ~0x70;
reg |= (0x5 << 4);
- if (!spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg)) {
+ ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg);
+ if (ret) {
dev_err(&spi->dev,
"[wilc spi %d]: Failed internal write reg\n",
__LINE__);
- return 0;
+ return ret;
}
spi_priv->crc_off = 1;
}
@@ -892,168 +888,35 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
/*
* make sure can read back chip id correctly
*/
- if (!wilc_spi_read_reg(wilc, 0x1000, &chipid)) {
+ ret = wilc_spi_read_reg(wilc, 0x1000, &chipid);
+ if (ret) {
dev_err(&spi->dev, "Fail cmd read chip id...\n");
- return 0;
+ return ret;
}
- spi_priv->has_thrpt_enh = 1;
-
isinit = 1;
- return 1;
+ return 0;
}
static int wilc_spi_read_size(struct wilc *wilc, u32 *size)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
- struct wilc_spi *spi_priv = wilc->bus_data;
int ret;
- if (spi_priv->has_thrpt_enh) {
- ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
- size);
- *size = *size & IRQ_DMA_WD_CNT_MASK;
- } else {
- u32 tmp;
- u32 byte_cnt;
-
- ret = wilc_spi_read_reg(wilc, WILC_VMM_TO_HOST_SIZE,
- &byte_cnt);
- if (!ret) {
- dev_err(&spi->dev,
- "Failed read WILC_VMM_TO_HOST_SIZE ...\n");
- return ret;
- }
- tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
- *size = tmp;
- }
+ ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, size);
+ *size = *size & IRQ_DMA_WD_CNT_MASK;
return ret;
}
static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
- struct wilc_spi *spi_priv = wilc->bus_data;
- int ret;
- u32 tmp;
- u32 byte_cnt;
- bool unexpected_irq;
- int j;
- u32 unknown_mask;
- u32 irq_flags;
- int k = IRG_FLAGS_OFFSET + 5;
-
- if (spi_priv->has_thrpt_enh)
- return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
- int_status);
- ret = wilc_spi_read_reg(wilc, WILC_VMM_TO_HOST_SIZE, &byte_cnt);
- if (!ret) {
- dev_err(&spi->dev,
- "Failed read WILC_VMM_TO_HOST_SIZE ...\n");
- return ret;
- }
- tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
-
- j = 0;
- do {
- wilc_spi_read_reg(wilc, 0x1a90, &irq_flags);
- tmp |= ((irq_flags >> 27) << IRG_FLAGS_OFFSET);
-
- if (spi_priv->nint > 5) {
- wilc_spi_read_reg(wilc, 0x1a94, &irq_flags);
- tmp |= (((irq_flags >> 0) & 0x7) << k);
- }
-
- unknown_mask = ~((1ul << spi_priv->nint) - 1);
-
- unexpected_irq = (tmp >> IRG_FLAGS_OFFSET) & unknown_mask;
- if (unexpected_irq) {
- dev_err(&spi->dev,
- "Unexpected interrupt(2):j=%d,tmp=%x,mask=%x\n",
- j, tmp, unknown_mask);
- }
-
- j++;
- } while (unexpected_irq);
-
- *int_status = tmp;
-
- return ret;
+ return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, int_status);
}
static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
- struct wilc_spi *spi_priv = wilc->bus_data;
- int ret;
- u32 flags;
- u32 tbl_ctl;
-
- if (spi_priv->has_thrpt_enh) {
- return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE,
- val);
- }
-
- flags = val & (BIT(MAX_NUM_INT) - 1);
- if (flags) {
- int i;
-
- ret = 1;
- for (i = 0; i < spi_priv->nint; i++) {
- /*
- * No matter what you write 1 or 0,
- * it will clear interrupt.
- */
- if (flags & 1)
- ret = wilc_spi_write_reg(wilc,
- 0x10c8 + i * 4, 1);
- if (!ret)
- break;
- flags >>= 1;
- }
- if (!ret) {
- dev_err(&spi->dev,
- "Failed wilc_spi_write_reg, set reg %x ...\n",
- 0x10c8 + i * 4);
- return ret;
- }
- for (i = spi_priv->nint; i < MAX_NUM_INT; i++) {
- if (flags & 1)
- dev_err(&spi->dev,
- "Unexpected interrupt cleared %d...\n",
- i);
- flags >>= 1;
- }
- }
-
- tbl_ctl = 0;
- /* select VMM table 0 */
- if (val & SEL_VMM_TBL0)
- tbl_ctl |= BIT(0);
- /* select VMM table 1 */
- if (val & SEL_VMM_TBL1)
- tbl_ctl |= BIT(1);
-
- ret = wilc_spi_write_reg(wilc, WILC_VMM_TBL_CTL, tbl_ctl);
- if (!ret) {
- dev_err(&spi->dev, "fail write reg vmm_tbl_ctl...\n");
- return ret;
- }
-
- if (val & EN_VMM) {
- /*
- * enable vmm transfer.
- */
- ret = wilc_spi_write_reg(wilc, WILC_VMM_CORE_CTL, 1);
- if (!ret) {
- dev_err(&spi->dev, "fail write reg vmm_core_ctl...\n");
- return ret;
- }
- }
-
- return ret;
+ return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE, val);
}
static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
@@ -1065,7 +928,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
if (nint > MAX_NUM_INT) {
dev_err(&spi->dev, "Too many interrupts (%d)...\n", nint);
- return 0;
+ return -EINVAL;
}
spi_priv->nint = nint;
@@ -1074,58 +937,58 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
* interrupt pin mux select
*/
ret = wilc_spi_read_reg(wilc, WILC_PIN_MUX_0, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed read reg (%08x)...\n",
WILC_PIN_MUX_0);
- return 0;
+ return ret;
}
reg |= BIT(8);
ret = wilc_spi_write_reg(wilc, WILC_PIN_MUX_0, reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_PIN_MUX_0);
- return 0;
+ return ret;
}
/*
* interrupt enable
*/
ret = wilc_spi_read_reg(wilc, WILC_INTR_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed read reg (%08x)...\n",
WILC_INTR_ENABLE);
- return 0;
+ return ret;
}
for (i = 0; (i < 5) && (nint > 0); i++, nint--)
reg |= (BIT((27 + i)));
ret = wilc_spi_write_reg(wilc, WILC_INTR_ENABLE, reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_INTR_ENABLE);
- return 0;
+ return ret;
}
if (nint) {
ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed read reg (%08x)...\n",
WILC_INTR2_ENABLE);
- return 0;
+ return ret;
}
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_INTR2_ENABLE);
- return 0;
+ return ret;
}
}
- return 1;
+ return 0;
}
/* Global spi HIF function table */
diff --git a/drivers/staging/wilc1000/wlan.c b/drivers/staging/wilc1000/wlan.c
index d3de76126b78..601e4d1345d2 100644
--- a/drivers/staging/wilc1000/wlan.c
+++ b/drivers/staging/wilc1000/wlan.c
@@ -489,50 +489,44 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
struct wilc_vif *vif;
if (wilc->quit)
- goto out;
+ goto out_update_cnt;
mutex_lock(&wilc->txq_add_to_head_cs);
tqe = wilc_wlan_txq_get_first(wilc);
if (!tqe)
- goto out;
+ goto out_unlock;
dev = tqe->vif->ndev;
wilc_wlan_txq_filter_dup_tcp_ack(dev);
i = 0;
sum = 0;
- do {
- if (tqe && (i < (WILC_VMM_TBL_SIZE - 1))) {
- if (tqe->type == WILC_CFG_PKT)
- vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET;
-
- else if (tqe->type == WILC_NET_PKT)
- vmm_sz = ETH_ETHERNET_HDR_OFFSET;
-
- else
- vmm_sz = HOST_HDR_OFFSET;
+ while (tqe && (i < (WILC_VMM_TBL_SIZE - 1))) {
+ if (tqe->type == WILC_CFG_PKT)
+ vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET;
+ else if (tqe->type == WILC_NET_PKT)
+ vmm_sz = ETH_ETHERNET_HDR_OFFSET;
+ else
+ vmm_sz = HOST_HDR_OFFSET;
- vmm_sz += tqe->buffer_size;
+ vmm_sz += tqe->buffer_size;
- if (vmm_sz & 0x3)
- vmm_sz = (vmm_sz + 4) & ~0x3;
+ if (vmm_sz & 0x3)
+ vmm_sz = (vmm_sz + 4) & ~0x3;
- if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
- break;
+ if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
+ break;
- vmm_table[i] = vmm_sz / 4;
- if (tqe->type == WILC_CFG_PKT)
- vmm_table[i] |= BIT(10);
- cpu_to_le32s(&vmm_table[i]);
+ vmm_table[i] = vmm_sz / 4;
+ if (tqe->type == WILC_CFG_PKT)
+ vmm_table[i] |= BIT(10);
+ cpu_to_le32s(&vmm_table[i]);
- i++;
- sum += vmm_sz;
- tqe = wilc_wlan_txq_get_next(wilc, tqe);
- } else {
- break;
- }
- } while (1);
+ i++;
+ sum += vmm_sz;
+ tqe = wilc_wlan_txq_get_next(wilc, tqe);
+ }
if (i == 0)
- goto out;
+ goto out_unlock;
vmm_table[i] = 0x0;
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
@@ -540,7 +534,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
func = wilc->hif_func;
do {
ret = func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg);
- if (!ret)
+ if (ret)
break;
if ((reg & 0x1) == 0)
@@ -554,7 +548,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
}
} while (!wilc->quit);
- if (!ret)
+ if (ret)
goto out_release_bus;
timeout = 200;
@@ -563,16 +557,16 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
WILC_VMM_TBL_RX_SHADOW_BASE,
(u8 *)vmm_table,
((i + 1) * 4));
- if (!ret)
+ if (ret)
break;
ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x2);
- if (!ret)
+ if (ret)
break;
do {
ret = func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, &reg);
- if (!ret)
+ if (ret)
break;
if ((reg >> 2) & 0x1) {
entries = ((reg >> 3) & 0x3f);
@@ -585,27 +579,27 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
break;
}
- if (!ret)
+ if (ret)
break;
if (entries == 0) {
ret = func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg);
- if (!ret)
+ if (ret)
break;
reg &= ~BIT(0);
ret = func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, reg);
- if (!ret)
- break;
- break;
}
- break;
- } while (1);
+ } while (0);
- if (!ret)
+ if (ret)
goto out_release_bus;
if (entries == 0) {
- ret = -ENOBUFS;
+ /*
+ * No VMM space available in firmware so retry to transmit
+ * the packet from tx queue.
+ */
+ ret = WILC_VMM_ENTRY_FULL_RETRY;
goto out_release_bus;
}
@@ -664,7 +658,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
ret = func->hif_clear_int_ext(wilc, ENABLE_TX_VMM);
- if (!ret)
+ if (ret)
goto out_release_bus;
ret = func->hif_block_tx_ext(wilc, 0, txb, offset);
@@ -672,9 +666,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
out_release_bus:
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
-out:
+out_unlock:
mutex_unlock(&wilc->txq_add_to_head_cs);
+out_update_cnt:
*txq_count = wilc->txq_entries;
return ret;
}
@@ -725,9 +720,7 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
}
}
offset += tp_len;
- if (offset >= size)
- break;
- } while (1);
+ } while (offset < size);
}
static void wilc_wlan_handle_rxq(struct wilc *wilc)
@@ -736,11 +729,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
u8 *buffer;
struct rxq_entry_t *rqe;
- do {
- if (wilc->quit) {
- complete(&wilc->cfg_event);
- break;
- }
+ while (!wilc->quit) {
rqe = wilc_wlan_rxq_remove(wilc);
if (!rqe)
break;
@@ -750,7 +739,9 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
wilc_wlan_handle_rx_buff(wilc, buffer, size);
kfree(rqe);
- } while (1);
+ }
+ if (wilc->quit)
+ complete(&wilc->cfg_event);
}
static void wilc_unknown_isr_ext(struct wilc *wilc)
@@ -785,7 +776,7 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
wilc->hif_func->hif_clear_int_ext(wilc, DATA_INT_CLR | ENABLE_RX_VMM);
ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
- if (!ret)
+ if (ret)
return;
offset += size;
@@ -846,7 +837,7 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
memcpy(dma_buffer, &buffer[offset], size2);
ret = wilc->hif_func->hif_block_tx(wilc, addr,
dma_buffer, size2);
- if (!ret)
+ if (ret)
break;
addr += size2;
@@ -855,17 +846,15 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
}
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- if (!ret) {
- ret = -EIO;
+ if (ret)
goto fail;
- }
} while (offset < buffer_size);
fail:
kfree(dma_buffer);
- return (ret < 0) ? ret : 0;
+ return ret;
}
int wilc_wlan_start(struct wilc *wilc)
@@ -882,49 +871,26 @@ int wilc_wlan_start(struct wilc *wilc)
}
acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg);
- if (!ret) {
+ if (ret) {
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- return -EIO;
+ return ret;
}
reg = 0;
if (wilc->io_type == WILC_HIF_SDIO && wilc->dev_irq_num)
reg |= WILC_HAVE_SDIO_IRQ_GPIO;
-#ifdef WILC_DISABLE_PMU
-#else
- reg |= WILC_HAVE_USE_PMU;
-#endif
-
-#ifdef WILC_SLEEP_CLK_SRC_XO
- reg |= WILC_HAVE_SLEEP_CLK_SRC_XO;
-#elif defined WILC_SLEEP_CLK_SRC_RTC
- reg |= WILC_HAVE_SLEEP_CLK_SRC_RTC;
-#endif
-
-#ifdef WILC_EXT_PA_INV_TX_RX
- reg |= WILC_HAVE_EXT_PA_INV_TX_RX;
-#endif
- reg |= WILC_HAVE_USE_IRQ_AS_HOST_WAKE;
- reg |= WILC_HAVE_LEGACY_RF_SETTINGS;
-#ifdef XTAL_24
- reg |= WILC_HAVE_XTAL_24;
-#endif
-#ifdef DISABLE_WILC_UART
- reg |= WILC_HAVE_DISABLE_WILC_UART;
-#endif
-
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg);
- if (!ret) {
+ if (ret) {
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- return -EIO;
+ return ret;
}
wilc->hif_func->hif_sync_ext(wilc, NUM_INT_EXT);
ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid);
- if (!ret) {
+ if (ret) {
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- return -EIO;
+ return ret;
}
wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
@@ -939,7 +905,7 @@ int wilc_wlan_start(struct wilc *wilc)
wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- return (ret < 0) ? ret : 0;
+ return ret;
}
int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
@@ -950,33 +916,33 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (!ret) {
+ if (ret) {
netdev_err(vif->ndev, "Error while reading reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return -EIO;
+ return ret;
}
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
(reg | WILC_ABORT_REQ_BIT));
- if (!ret) {
+ if (ret) {
netdev_err(vif->ndev, "Error while writing reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return -EIO;
+ return ret;
}
ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg);
- if (!ret) {
+ if (ret) {
netdev_err(vif->ndev, "Error while reading reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return -EIO;
+ return ret;
}
reg = BIT(0);
ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg);
- if (!ret) {
+ if (ret) {
netdev_err(vif->ndev, "Error while writing reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return -EIO;
+ return ret;
}
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
@@ -992,21 +958,14 @@ void wilc_wlan_cleanup(struct net_device *dev)
struct wilc *wilc = vif->wilc;
wilc->quit = 1;
- do {
- tqe = wilc_wlan_txq_remove_from_head(dev);
- if (!tqe)
- break;
+ while ((tqe = wilc_wlan_txq_remove_from_head(dev))) {
if (tqe->tx_complete_func)
tqe->tx_complete_func(tqe->priv, 0);
kfree(tqe);
- } while (1);
+ }
- do {
- rqe = wilc_wlan_rxq_remove(wilc);
- if (!rqe)
- break;
+ while ((rqe = wilc_wlan_rxq_remove(wilc)))
kfree(rqe);
- } while (1);
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
@@ -1156,10 +1115,11 @@ int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
return ret;
}
-static u32 init_chip(struct net_device *dev)
+static int init_chip(struct net_device *dev)
{
u32 chipid;
- u32 reg, ret = 0;
+ u32 reg;
+ int ret = 0;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
@@ -1169,18 +1129,18 @@ static u32 init_chip(struct net_device *dev)
if ((chipid & 0xfff) != 0xa0) {
ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
- if (!ret) {
+ if (ret) {
netdev_err(dev, "fail read reg 0x1118\n");
goto release;
}
reg |= BIT(0);
ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
- if (!ret) {
+ if (ret) {
netdev_err(dev, "fail write reg 0x1118\n");
goto release;
}
ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
- if (!ret) {
+ if (ret) {
netdev_err(dev, "fail write reg 0xc0000\n");
goto release;
}
@@ -1230,7 +1190,7 @@ int wilc_wlan_init(struct net_device *dev)
wilc->quit = 0;
- if (!wilc->hif_func->hif_init(wilc, false)) {
+ if (wilc->hif_func->hif_init(wilc, false)) {
ret = -EIO;
goto fail;
}
@@ -1251,12 +1211,12 @@ int wilc_wlan_init(struct net_device *dev)
goto fail;
}
- if (!init_chip(dev)) {
+ if (init_chip(dev)) {
ret = -EIO;
goto fail;
}
- return 1;
+ return 0;
fail:
diff --git a/drivers/staging/wilc1000/wlan.h b/drivers/staging/wilc1000/wlan.h
index 1f6957cf2e9c..8c4634262adb 100644
--- a/drivers/staging/wilc1000/wlan.h
+++ b/drivers/staging/wilc1000/wlan.h
@@ -197,6 +197,8 @@
#define IS_MANAGMEMENT_CALLBACK 0x080
#define IS_MGMT_STATUS_SUCCES 0x040
+#define WILC_WID_TYPE GENMASK(15, 12)
+#define WILC_VMM_ENTRY_FULL_RETRY 1
/********************************************
*
* Tx/Rx Queue Structure
diff --git a/drivers/staging/wilc1000/wlan_cfg.c b/drivers/staging/wilc1000/wlan_cfg.c
index 6f6b286788d1..fe2a7ed8e5cd 100644
--- a/drivers/staging/wilc1000/wlan_cfg.c
+++ b/drivers/staging/wilc1000/wlan_cfg.c
@@ -4,6 +4,7 @@
* All rights reserved.
*/
+#include <linux/bitfield.h>
#include "wlan_if.h"
#include "wlan.h"
#include "wlan_cfg.h"
@@ -132,75 +133,54 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
*
********************************************/
-#define GET_WID_TYPE(wid) (((wid) >> 12) & 0x7)
static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
{
u16 wid;
u32 len = 0, i = 0;
+ struct wilc_cfg *cfg = &wl->cfg;
while (size > 0) {
i = 0;
wid = get_unaligned_le16(info);
- switch (GET_WID_TYPE(wid)) {
+ switch (FIELD_GET(WILC_WID_TYPE, wid)) {
case WID_CHAR:
- do {
- if (wl->cfg.b[i].id == WID_NIL)
- break;
-
- if (wl->cfg.b[i].id == wid) {
- wl->cfg.b[i].val = info[4];
- break;
- }
+ while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
- } while (1);
+
+ if (cfg->b[i].id == wid)
+ cfg->b[i].val = info[4];
+
len = 3;
break;
case WID_SHORT:
- do {
- struct wilc_cfg_hword *hw = &wl->cfg.hw[i];
+ while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
+ i++;
- if (hw->id == WID_NIL)
- break;
+ if (cfg->hw[i].id == wid)
+ cfg->hw[i].val = get_unaligned_le16(&info[4]);
- if (hw->id == wid) {
- hw->val = get_unaligned_le16(&info[4]);
- break;
- }
- i++;
- } while (1);
len = 4;
break;
case WID_INT:
- do {
- struct wilc_cfg_word *w = &wl->cfg.w[i];
+ while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
+ i++;
- if (w->id == WID_NIL)
- break;
+ if (cfg->w[i].id == wid)
+ cfg->w[i].val = get_unaligned_le32(&info[4]);
- if (w->id == wid) {
- w->val = get_unaligned_le32(&info[4]);
- break;
- }
- i++;
- } while (1);
len = 6;
break;
case WID_STR:
- do {
- if (wl->cfg.s[i].id == WID_NIL)
- break;
-
- if (wl->cfg.s[i].id == wid) {
- memcpy(wl->cfg.s[i].str, &info[2],
- (info[2] + 2));
- break;
- }
+ while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
i++;
- } while (1);
+
+ if (cfg->s[i].id == wid)
+ memcpy(cfg->s[i].str, &info[2], info[2] + 2);
+
len = 2 + info[2];
break;
@@ -223,16 +203,12 @@ static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info)
if (len == 1 && wid == WID_STATUS) {
int i = 0;
- do {
- if (wl->cfg.b[i].id == WID_NIL)
- break;
-
- if (wl->cfg.b[i].id == wid) {
- wl->cfg.b[i].val = info[3];
- break;
- }
+ while (wl->cfg.b[i].id != WID_NIL &&
+ wl->cfg.b[i].id != wid)
i++;
- } while (1);
+
+ if (wl->cfg.b[i].id == wid)
+ wl->cfg.b[i].val = info[3];
}
}
@@ -244,7 +220,7 @@ static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info)
int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size)
{
- u8 type = (id >> 12) & 0xf;
+ u8 type = FIELD_GET(WILC_WID_TYPE, id);
int ret = 0;
switch (type) {
@@ -290,65 +266,47 @@ int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id)
int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer,
u32 buffer_size)
{
- u32 type = (wid >> 12) & 0xf;
+ u8 type = FIELD_GET(WILC_WID_TYPE, wid);
int i, ret = 0;
+ struct wilc_cfg *cfg = &wl->cfg;
i = 0;
if (type == CFG_BYTE_CMD) {
- do {
- if (wl->cfg.b[i].id == WID_NIL)
- break;
-
- if (wl->cfg.b[i].id == wid) {
- memcpy(buffer, &wl->cfg.b[i].val, 1);
- ret = 1;
- break;
- }
+ while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
- } while (1);
+
+ if (cfg->b[i].id == wid) {
+ memcpy(buffer, &cfg->b[i].val, 1);
+ ret = 1;
+ }
} else if (type == CFG_HWORD_CMD) {
- do {
- if (wl->cfg.hw[i].id == WID_NIL)
- break;
-
- if (wl->cfg.hw[i].id == wid) {
- memcpy(buffer, &wl->cfg.hw[i].val, 2);
- ret = 2;
- break;
- }
+ while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
i++;
- } while (1);
+
+ if (cfg->hw[i].id == wid) {
+ memcpy(buffer, &cfg->hw[i].val, 2);
+ ret = 2;
+ }
} else if (type == CFG_WORD_CMD) {
- do {
- if (wl->cfg.w[i].id == WID_NIL)
- break;
-
- if (wl->cfg.w[i].id == wid) {
- memcpy(buffer, &wl->cfg.w[i].val, 4);
- ret = 4;
- break;
- }
+ while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
i++;
- } while (1);
- } else if (type == CFG_STR_CMD) {
- do {
- u32 id = wl->cfg.s[i].id;
- if (id == WID_NIL)
- break;
+ if (cfg->w[i].id == wid) {
+ memcpy(buffer, &cfg->w[i].val, 4);
+ ret = 4;
+ }
+ } else if (type == CFG_STR_CMD) {
+ while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
+ i++;
- if (id == wid) {
- u16 size = get_unaligned_le16(wl->cfg.s[i].str);
+ if (cfg->s[i].id == wid) {
+ u16 size = get_unaligned_le16(cfg->s[i].str);
- if (buffer_size >= size) {
- memcpy(buffer, &wl->cfg.s[i].str[2],
- size);
- ret = size;
- }
- break;
+ if (buffer_size >= size) {
+ memcpy(buffer, &cfg->s[i].str[2], size);
+ ret = size;
}
- i++;
- } while (1);
+ }
}
return ret;
}
diff --git a/drivers/staging/wilc1000/wlan_if.h b/drivers/staging/wilc1000/wlan_if.h
index 7c7ee66c35f5..f85fd575136d 100644
--- a/drivers/staging/wilc1000/wlan_if.h
+++ b/drivers/staging/wilc1000/wlan_if.h
@@ -8,6 +8,7 @@
#define WILC_WLAN_IF_H
#include <linux/netdevice.h>
+#include "fw.h"
/********************************************
*
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index a70fb84f38f1..b809c0015c0c 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -101,7 +101,7 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev);
static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd);
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr);
-static void p80211knetdev_tx_timeout(struct net_device *netdev);
+static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc);
int wlan_watchdog = 5000;
@@ -1074,7 +1074,7 @@ static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
return drop;
}
-static void p80211knetdev_tx_timeout(struct net_device *netdev)
+static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct wlandevice *wlandev = netdev->ml_priv;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 7350fe5d96a3..a8860d2aee68 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -959,7 +959,7 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
}
}
- return 0;
+ return result;
}
/*----------------------------------------------------------------
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7251a87bb576..b94ed4e30770 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4149,9 +4149,6 @@ int iscsit_close_connection(
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
- if (conn->conn_transport->iscsit_wait_conn)
- conn->conn_transport->iscsit_wait_conn(conn);
-
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
@@ -4237,6 +4234,9 @@ int iscsit_close_connection(
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
struct crypto_ahash *tfm;
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1354a157e9af..6a38ff936389 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -221,7 +221,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
ep = fc_seq_exch(seq);
lport = ep->lp;
if (cmd->was_ddp_setup) {
- BUG_ON(!ep);
BUG_ON(!lport);
/*
* Since DDP (Large Rx offload) was setup for this request,
diff --git a/drivers/tc/tc-driver.c b/drivers/tc/tc-driver.c
index 16b5bae63c74..d45f2c1ff341 100644
--- a/drivers/tc/tc-driver.c
+++ b/drivers/tc/tc-driver.c
@@ -56,8 +56,8 @@ EXPORT_SYMBOL(tc_unregister_driver);
* system is in its list of supported devices. Returns the matching
* tc_device_id structure or %NULL if there is no match.
*/
-const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
- struct tc_dev *tdev)
+static const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
+ struct tc_dev *tdev)
{
const struct tc_device_id *id = tdrv->id_table;
@@ -71,7 +71,6 @@ const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
}
return NULL;
}
-EXPORT_SYMBOL(tc_match_device);
/**
* tc_bus_match - Tell if a device structure has a matching
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index cf3fad2cb871..c5b17dd8f587 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -47,7 +47,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
for (slot = 0; slot < tbus->num_tcslots; slot++) {
slotaddr = tbus->slot_base + slot * slotsize;
extslotaddr = tbus->ext_slot_base + slot * extslotsize;
- module = ioremap_nocache(slotaddr, slotsize);
+ module = ioremap(slotaddr, slotsize);
BUG_ON(!module);
offset = TC_OLDCARD;
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 676ffcb64985..8da63f38e6bd 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -2,7 +2,7 @@
# Generic Trusted Execution Environment Configuration
config TEE
tristate "Trusted Execution Environment support"
- depends on HAVE_ARM_SMCCC || COMPILE_TEST
+ depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
@@ -14,7 +14,7 @@ if TEE
menu "TEE drivers"
source "drivers/tee/optee/Kconfig"
-
+source "drivers/tee/amdtee/Kconfig"
endmenu
endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 21f51fd88b07..68da044afbfa 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -4,3 +4,4 @@ tee-objs += tee_core.o
tee-objs += tee_shm.o
tee-objs += tee_shm_pool.o
obj-$(CONFIG_OPTEE) += optee/
+obj-$(CONFIG_AMDTEE) += amdtee/
diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig
new file mode 100644
index 000000000000..4e32b6413b41
--- /dev/null
+++ b/drivers/tee/amdtee/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+# AMD-TEE Trusted Execution Environment Configuration
+config AMDTEE
+ tristate "AMD-TEE"
+ default m
+ depends on CRYPTO_DEV_SP_PSP
+ help
+ This implements AMD's Trusted Execution Environment (TEE) driver.
diff --git a/drivers/tee/amdtee/Makefile b/drivers/tee/amdtee/Makefile
new file mode 100644
index 000000000000..ff1485266117
--- /dev/null
+++ b/drivers/tee/amdtee/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+obj-$(CONFIG_AMDTEE) += amdtee.o
+amdtee-objs += core.o
+amdtee-objs += call.o
+amdtee-objs += shm_pool.o
diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
new file mode 100644
index 000000000000..ff48c3e47375
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_if.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+/*
+ * This file has definitions related to Host and AMD-TEE Trusted OS interface.
+ * These definitions must match the definitions on the TEE side.
+ */
+
+#ifndef AMDTEE_IF_H
+#define AMDTEE_IF_H
+
+#include <linux/types.h>
+
+/*****************************************************************************
+ ** TEE Param
+ ******************************************************************************/
+#define TEE_MAX_PARAMS 4
+
+/**
+ * struct memref - memory reference structure
+ * @buf_id: buffer ID of the buffer mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ * @offset: offset in bytes from beginning of the buffer
+ * @size: data size in bytes
+ */
+struct memref {
+ u32 buf_id;
+ u32 offset;
+ u32 size;
+};
+
+struct value {
+ u32 a;
+ u32 b;
+};
+
+/*
+ * Parameters passed to open_session or invoke_command
+ */
+union tee_op_param {
+ struct memref mref;
+ struct value val;
+};
+
+struct tee_operation {
+ u32 param_types;
+ union tee_op_param params[TEE_MAX_PARAMS];
+};
+
+/* Must be same as in GP TEE specification */
+#define TEE_OP_PARAM_TYPE_NONE 0
+#define TEE_OP_PARAM_TYPE_VALUE_INPUT 1
+#define TEE_OP_PARAM_TYPE_VALUE_OUTPUT 2
+#define TEE_OP_PARAM_TYPE_VALUE_INOUT 3
+#define TEE_OP_PARAM_TYPE_INVALID 4
+#define TEE_OP_PARAM_TYPE_MEMREF_INPUT 5
+#define TEE_OP_PARAM_TYPE_MEMREF_OUTPUT 6
+#define TEE_OP_PARAM_TYPE_MEMREF_INOUT 7
+
+#define TEE_PARAM_TYPE_GET(t, i) (((t) >> ((i) * 4)) & 0xF)
+#define TEE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+
+/*****************************************************************************
+ ** TEE Commands
+ *****************************************************************************/
+
+/*
+ * The shared memory between rich world and secure world may be physically
+ * non-contiguous. Below structures are meant to describe a shared memory region
+ * via scatter/gather (sg) list
+ */
+
+/**
+ * struct tee_sg_desc - sg descriptor for a physically contiguous buffer
+ * @low_addr: [in] bits[31:0] of buffer's physical address. Must be 4KB aligned
+ * @hi_addr: [in] bits[63:32] of the buffer's physical address
+ * @size: [in] size in bytes (must be multiple of 4KB)
+ */
+struct tee_sg_desc {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+/**
+ * struct tee_sg_list - structure describing a scatter/gather list
+ * @count: [in] number of sg descriptors
+ * @size: [in] total size of all buffers in the list. Must be multiple of 4KB
+ * @buf: [in] list of sg buffer descriptors
+ */
+#define TEE_MAX_SG_DESC 64
+struct tee_sg_list {
+ u32 count;
+ u32 size;
+ struct tee_sg_desc buf[TEE_MAX_SG_DESC];
+};
+
+/**
+ * struct tee_cmd_map_shared_mem - command to map shared memory
+ * @buf_id: [out] return buffer ID value
+ * @sg_list: [in] list describing memory to be mapped
+ */
+struct tee_cmd_map_shared_mem {
+ u32 buf_id;
+ struct tee_sg_list sg_list;
+};
+
+/**
+ * struct tee_cmd_unmap_shared_mem - command to unmap shared memory
+ * @buf_id: [in] buffer ID of memory to be unmapped
+ */
+struct tee_cmd_unmap_shared_mem {
+ u32 buf_id;
+};
+
+/**
+ * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
+ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+ * @size: [in] size of TA binary in bytes
+ * @ta_handle: [out] return handle of the loaded TA
+ */
+struct tee_cmd_load_ta {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_unload_ta - command to unload TA binary from TEE environment
+ * @ta_handle: [in] handle of the loaded TA to be unloaded
+ */
+struct tee_cmd_unload_ta {
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_open_session - command to call TA_OpenSessionEntryPoint in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [out] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_open_session {
+ u32 ta_handle;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+/**
+ * struct tee_cmd_close_session - command to call TA_CloseSessionEntryPoint()
+ * in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [in] pointer to TA allocated session data
+ */
+struct tee_cmd_close_session {
+ u32 ta_handle;
+ u32 session_info;
+};
+
+/**
+ * struct tee_cmd_invoke_cmd - command to call TA_InvokeCommandEntryPoint() in
+ * TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @cmd_id: [in] TA command ID
+ * @session_info: [in] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_invoke_cmd {
+ u32 ta_handle;
+ u32 cmd_id;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+#endif /*AMDTEE_IF_H*/
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
new file mode 100644
index 000000000000..d7f798c3394b
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#ifndef AMDTEE_PRIVATE_H
+#define AMDTEE_PRIVATE_H
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include "amdtee_if.h"
+
+#define DRIVER_NAME "amdtee"
+#define DRIVER_AUTHOR "AMD-TEE Linux driver team"
+
+/* Some GlobalPlatform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_GENERIC 0xFFFF0000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+/* Maximum number of sessions which can be opened with a Trusted Application */
+#define TEE_NUM_SESSIONS 32
+
+#define TA_LOAD_PATH "/amdtee"
+#define TA_PATH_MAX 60
+
+/**
+ * struct amdtee - main service struct
+ * @teedev: client device
+ * @pool: shared memory pool
+ */
+struct amdtee {
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+};
+
+/**
+ * struct amdtee_session - Trusted Application (TA) session related information.
+ * @ta_handle: handle to Trusted Application (TA) loaded in TEE environment
+ * @refcount: counter to keep track of sessions opened for the TA instance
+ * @session_info: an array pointing to TA allocated session data.
+ * @sess_mask: session usage bit-mask. If a particular bit is set, then the
+ * corresponding @session_info entry is in use or valid.
+ *
+ * Session structure is updated on open_session and this information is used for
+ * subsequent operations with the Trusted Application.
+ */
+struct amdtee_session {
+ struct list_head list_node;
+ u32 ta_handle;
+ struct kref refcount;
+ u32 session_info[TEE_NUM_SESSIONS];
+ DECLARE_BITMAP(sess_mask, TEE_NUM_SESSIONS);
+ spinlock_t lock; /* synchronizes access to @sess_mask */
+};
+
+/**
+ * struct amdtee_context_data - AMD-TEE driver context data
+ * @sess_list: Keeps track of sessions opened in current TEE context
+ */
+struct amdtee_context_data {
+ struct list_head sess_list;
+};
+
+struct amdtee_driver_data {
+ struct amdtee *amdtee;
+};
+
+struct shmem_desc {
+ void *kaddr;
+ u64 size;
+};
+
+/**
+ * struct amdtee_shm_data - Shared memory data
+ * @kaddr: Kernel virtual address of shared memory
+ * @buf_id: Buffer id of memory mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ */
+struct amdtee_shm_data {
+ struct list_head shm_node;
+ void *kaddr;
+ u32 buf_id;
+};
+
+struct amdtee_shm_context {
+ struct list_head shmdata_list;
+};
+
+#define LOWER_TWO_BYTE_MASK 0x0000FFFF
+
+/**
+ * set_session_id() - Sets the session identifier.
+ * @ta_handle: [in] handle of the loaded Trusted Application (TA)
+ * @session_index: [in] Session index. Range: 0 to (TEE_NUM_SESSIONS - 1).
+ * @session: [out] Pointer to session id
+ *
+ * Lower two bytes of the session identifier represents the TA handle and the
+ * upper two bytes is session index.
+ */
+static inline void set_session_id(u32 ta_handle, u32 session_index,
+ u32 *session)
+{
+ *session = (session_index << 16) | (LOWER_TWO_BYTE_MASK & ta_handle);
+}
+
+static inline u32 get_ta_handle(u32 session)
+{
+ return session & LOWER_TWO_BYTE_MASK;
+}
+
+static inline u32 get_session_index(u32 session)
+{
+ return (session >> 16) & LOWER_TWO_BYTE_MASK;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+
+int amdtee_close_session(struct tee_context *ctx, u32 session);
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+int amdtee_map_shmem(struct tee_shm *shm);
+
+void amdtee_unmap_shmem(struct tee_shm *shm);
+
+int handle_load_ta(void *data, u32 size,
+ struct tee_ioctl_open_session_arg *arg);
+
+int handle_unload_ta(u32 ta_handle);
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p);
+
+int handle_close_session(u32 ta_handle, u32 info);
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id);
+
+void handle_unmap_shmem(u32 buf_id);
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p);
+
+struct tee_shm_pool *amdtee_config_shm(void);
+
+u32 get_buffer_id(struct tee_shm *shm);
+#endif /*AMDTEE_PRIVATE_H*/
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
new file mode 100644
index 000000000000..096dd4d92d39
--- /dev/null
+++ b/drivers/tee/amdtee/call.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-tee.h>
+#include <linux/slab.h>
+#include <linux/psp-sev.h>
+#include "amdtee_if.h"
+#include "amdtee_private.h"
+
+static int tee_params_to_amd_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ amd->param_types = 0;
+ for (i = 0; i < count; i++) {
+ /* AMD TEE does not support meta parameter */
+ if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ amd->param_types |= ((tee[i].attr & 0xF) << i * 4);
+ }
+
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE)
+ continue;
+
+ /* It is assumed that all values are within 2^32-1 */
+ if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) {
+ u32 buf_id = get_buffer_id(tee[i].u.memref.shm);
+
+ amd->params[i].mref.buf_id = buf_id;
+ amd->params[i].mref.offset = tee[i].u.memref.shm_offs;
+ amd->params[i].mref.size = tee[i].u.memref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ if (tee[i].u.value.c)
+ pr_warn("%s: Discarding value c", __func__);
+
+ amd->params[i].val.a = tee[i].u.value.a;
+ amd->params[i].val.b = tee[i].u.value.b;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ /* Assumes amd->param_types is valid */
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID ||
+ type > TEE_OP_PARAM_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE ||
+ type == TEE_OP_PARAM_TYPE_VALUE_INPUT ||
+ type == TEE_OP_PARAM_TYPE_MEMREF_INPUT)
+ continue;
+
+ /*
+ * It is assumed that buf_id remains unchanged for
+ * both open_session and invoke_cmd call
+ */
+ if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) {
+ tee[i].u.memref.shm_offs = amd->params[i].mref.offset;
+ tee[i].u.memref.size = amd->params[i].mref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ /* field 'c' not supported by AMD TEE */
+ tee[i].u.value.a = amd->params[i].val.a;
+ tee[i].u.value.b = amd->params[i].val.b;
+ tee[i].u.value.c = 0;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+int handle_unload_ta(u32 ta_handle)
+{
+ struct tee_cmd_unload_ta cmd = {0};
+ u32 status;
+ int ret;
+
+ if (!ta_handle)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("unload ta: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+int handle_close_session(u32 ta_handle, u32 info)
+{
+ struct tee_cmd_close_session cmd = {0};
+ u32 status;
+ int ret;
+
+ if (ta_handle == 0)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+ cmd.session_info = info;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_CLOSE_SESSION, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("close session: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void handle_unmap_shmem(u32 buf_id)
+{
+ struct tee_cmd_unmap_shared_mem cmd = {0};
+ u32 status;
+ int ret;
+
+ cmd.buf_id = buf_id;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNMAP_SHARED_MEM, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret)
+ pr_debug("unmap shared memory: buf_id %u status = 0x%x\n",
+ buf_id, status);
+}
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p)
+{
+ struct tee_cmd_invoke_cmd cmd = {0};
+ int ret;
+
+ if (!arg || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort invoke command\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ cmd.cmd_id = arg->func;
+ cmd.session_info = sinfo;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_INVOKE_CMD, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("invoke command: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ pr_debug("invoke command: RO = 0x%x ret = 0x%x\n",
+ arg->ret_origin, arg->ret);
+ }
+
+ return ret;
+}
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id)
+{
+ struct tee_cmd_map_shared_mem *cmd;
+ phys_addr_t paddr;
+ int ret, i;
+ u32 status;
+
+ if (!count || !start || !buf_id)
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /* Size must be page aligned */
+ for (i = 0; i < count ; i++) {
+ if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+
+ if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
+ pr_err("map shared memory: page unaligned. addr 0x%llx",
+ (u64)start[i].kaddr);
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+ }
+
+ cmd->sg_list.count = count;
+
+ /* Create buffer list */
+ for (i = 0; i < count ; i++) {
+ paddr = __psp_pa(start[i].kaddr);
+ cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr);
+ cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr);
+ cmd->sg_list.buf[i].size = start[i].size;
+ cmd->sg_list.size += cmd->sg_list.buf[i].size;
+
+ pr_debug("buf[%d]:hi addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].hi_addr);
+ pr_debug("buf[%d]:low addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].low_addr);
+ pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size);
+ pr_debug("list size = 0x%x\n", cmd->sg_list.size);
+ }
+
+ *buf_id = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_MAP_SHARED_MEM, (void *)cmd,
+ sizeof(*cmd), &status);
+ if (!ret && !status) {
+ *buf_id = cmd->buf_id;
+ pr_debug("mapped buffer ID = 0x%x\n", *buf_id);
+ } else {
+ pr_err("map shared memory: status = 0x%x\n", status);
+ ret = -ENOMEM;
+ }
+
+free_cmd:
+ kfree(cmd);
+
+ return ret;
+}
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p)
+{
+ struct tee_cmd_open_session cmd = {0};
+ int ret;
+
+ if (!arg || !info || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_GENERIC;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort open session\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ *info = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_OPEN_SESSION, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("open session: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ *info = cmd.session_info;
+ pr_debug("open session: session info = 0x%x\n", *info);
+ }
+
+ pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret,
+ arg->ret_origin);
+
+ return ret;
+}
+
+int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+{
+ struct tee_cmd_load_ta cmd = {0};
+ phys_addr_t blob;
+ int ret;
+
+ if (size == 0 || !data || !arg)
+ return -EINVAL;
+
+ blob = __psp_pa(data);
+ if (blob & (PAGE_SIZE - 1)) {
+ pr_err("load TA: page unaligned. blob 0x%llx", blob);
+ return -EINVAL;
+ }
+
+ cmd.hi_addr = upper_32_bits(blob);
+ cmd.low_addr = lower_32_bits(blob);
+ cmd.size = size;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ set_session_id(cmd.ta_handle, 0, &arg->session);
+ }
+
+ pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
+ cmd.ta_handle, arg->ret_origin, arg->ret);
+
+ return 0;
+}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
new file mode 100644
index 000000000000..6370bb55f512
--- /dev/null
+++ b/drivers/tee/amdtee/core.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include "amdtee_private.h"
+#include "../tee_private.h"
+#include <linux/psp-tee.h>
+
+static struct amdtee_driver_data *drv_data;
+static DEFINE_MUTEX(session_list_mutex);
+static struct amdtee_shm_context shmctx;
+
+static void amdtee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_AMDTEE,
+ .impl_caps = 0,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ *vers = v;
+}
+
+static int amdtee_open(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+ INIT_LIST_HEAD(&shmctx.shmdata_list);
+
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void release_session(struct amdtee_session *sess)
+{
+ int i;
+
+ /* Close any open session */
+ for (i = 0; i < TEE_NUM_SESSIONS; ++i) {
+ /* Check if session entry 'i' is valid */
+ if (!test_bit(i, sess->sess_mask))
+ continue;
+
+ handle_close_session(sess->ta_handle, sess->session_info[i]);
+ }
+
+ /* Unload Trusted Application once all sessions are closed */
+ handle_unload_ta(sess->ta_handle);
+ kfree(sess);
+}
+
+static void amdtee_release(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+
+ if (!ctxdata)
+ return;
+
+ while (true) {
+ struct amdtee_session *sess;
+
+ sess = list_first_entry_or_null(&ctxdata->sess_list,
+ struct amdtee_session,
+ list_node);
+
+ if (!sess)
+ break;
+
+ list_del(&sess->list_node);
+ release_session(sess);
+ }
+ kfree(ctxdata);
+
+ ctx->data = NULL;
+}
+
+/**
+ * alloc_session() - Allocate a session structure
+ * @ctxdata: TEE Context data structure
+ * @session: Session ID for which 'struct amdtee_session' structure is to be
+ * allocated.
+ *
+ * Scans the TEE context's session list to check if TA is already loaded in to
+ * TEE. If yes, returns the 'session' structure for that TA. Else allocates,
+ * initializes a new 'session' structure and adds it to context's session list.
+ *
+ * The caller must hold a mutex.
+ *
+ * Returns:
+ * 'struct amdtee_session *' on success and NULL on failure.
+ */
+static struct amdtee_session *alloc_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ struct amdtee_session *sess;
+ u32 ta_handle = get_ta_handle(session);
+
+ /* Scan session list to check if TA is already loaded in to TEE */
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->ta_handle == ta_handle) {
+ kref_get(&sess->refcount);
+ return sess;
+ }
+
+ /* Allocate a new session and add to list */
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (sess) {
+ sess->ta_handle = ta_handle;
+ kref_init(&sess->refcount);
+ spin_lock_init(&sess->lock);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ }
+
+ return sess;
+}
+
+/* Requires mutex to be held */
+static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ u32 ta_handle = get_ta_handle(session);
+ u32 index = get_session_index(session);
+ struct amdtee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (ta_handle == sess->ta_handle &&
+ test_bit(index, sess->sess_mask))
+ return sess;
+
+ return NULL;
+}
+
+u32 get_buffer_id(struct tee_shm *shm)
+{
+ u32 buf_id = 0;
+ struct amdtee_shm_data *shmdata;
+
+ list_for_each_entry(shmdata, &shmctx.shmdata_list, shm_node)
+ if (shmdata->kaddr == shm->kaddr) {
+ buf_id = shmdata->buf_id;
+ break;
+ }
+
+ return buf_id;
+}
+
+static DEFINE_MUTEX(drv_mutex);
+static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
+ size_t *ta_size)
+{
+ const struct firmware *fw;
+ char fw_name[TA_PATH_MAX];
+ struct {
+ u32 lo;
+ u16 mid;
+ u16 hi_ver;
+ u8 seq_n[8];
+ } *uuid = ptr;
+ int n, rc = 0;
+
+ n = snprintf(fw_name, TA_PATH_MAX,
+ "%s/%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x.bin",
+ TA_LOAD_PATH, uuid->lo, uuid->mid, uuid->hi_ver,
+ uuid->seq_n[0], uuid->seq_n[1],
+ uuid->seq_n[2], uuid->seq_n[3],
+ uuid->seq_n[4], uuid->seq_n[5],
+ uuid->seq_n[6], uuid->seq_n[7]);
+ if (n < 0 || n >= TA_PATH_MAX) {
+ pr_err("failed to get firmware name\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&drv_mutex);
+ n = request_firmware(&fw, fw_name, &ctx->teedev->dev);
+ if (n) {
+ pr_err("failed to load firmware %s\n", fw_name);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ *ta_size = roundup(fw->size, PAGE_SIZE);
+ *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
+ if (IS_ERR(*ta)) {
+ pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
+ (u64)*ta);
+ rc = -ENOMEM;
+ goto rel_fw;
+ }
+
+ memcpy(*ta, fw->data, fw->size);
+rel_fw:
+ release_firmware(fw);
+unlock:
+ mutex_unlock(&drv_mutex);
+ return rc;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess = NULL;
+ u32 session_info;
+ size_t ta_size;
+ int rc, i;
+ void *ta;
+
+ if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) {
+ pr_err("unsupported client login method\n");
+ return -EINVAL;
+ }
+
+ rc = copy_ta_binary(ctx, &arg->uuid[0], &ta, &ta_size);
+ if (rc) {
+ pr_err("failed to copy TA binary\n");
+ return rc;
+ }
+
+ /* Load the TA binary into TEE environment */
+ handle_load_ta(ta, ta_size, arg);
+ if (arg->ret == TEEC_SUCCESS) {
+ mutex_lock(&session_list_mutex);
+ sess = alloc_session(ctxdata, arg->session);
+ mutex_unlock(&session_list_mutex);
+ }
+
+ if (arg->ret != TEEC_SUCCESS)
+ goto out;
+
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Find an empty session index for the given TA */
+ spin_lock(&sess->lock);
+ i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
+ if (i < TEE_NUM_SESSIONS)
+ set_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+
+ if (i >= TEE_NUM_SESSIONS) {
+ pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+
+ if (arg->ret == TEEC_SUCCESS) {
+ sess->session_info[i] = session_info;
+ set_session_id(sess->ta_handle, i, &arg->session);
+ } else {
+ pr_err("open_session failed %d\n", arg->ret);
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+out:
+ free_pages((u64)ta, get_order(ta_size));
+ return rc;
+}
+
+static void destroy_session(struct kref *ref)
+{
+ struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ refcount);
+
+ /* Unload the TA from TEE */
+ handle_unload_ta(sess->ta_handle);
+ mutex_lock(&session_list_mutex);
+ list_del(&sess->list_node);
+ mutex_unlock(&session_list_mutex);
+ kfree(sess);
+}
+
+int amdtee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ u32 i, ta_handle, session_info;
+ struct amdtee_session *sess;
+
+ pr_debug("%s: sid = 0x%x\n", __func__, session);
+
+ /*
+ * Check that the session is valid and clear the session
+ * usage bit
+ */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, session);
+ if (sess) {
+ ta_handle = get_ta_handle(session);
+ i = get_session_index(session);
+ session_info = sess->session_info[i];
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ /* Close the session */
+ handle_close_session(ta_handle, session_info);
+
+ kref_put(&sess->refcount, destroy_session);
+
+ return 0;
+}
+
+int amdtee_map_shmem(struct tee_shm *shm)
+{
+ struct shmem_desc shmem;
+ struct amdtee_shm_data *shmnode;
+ int rc, count;
+ u32 buf_id;
+
+ if (!shm)
+ return -EINVAL;
+
+ shmnode = kmalloc(sizeof(*shmnode), GFP_KERNEL);
+ if (!shmnode)
+ return -ENOMEM;
+
+ count = 1;
+ shmem.kaddr = shm->kaddr;
+ shmem.size = shm->size;
+
+ /*
+ * Send a MAP command to TEE and get the corresponding
+ * buffer Id
+ */
+ rc = handle_map_shmem(count, &shmem, &buf_id);
+ if (rc) {
+ pr_err("map_shmem failed: ret = %d\n", rc);
+ kfree(shmnode);
+ return rc;
+ }
+
+ shmnode->kaddr = shm->kaddr;
+ shmnode->buf_id = buf_id;
+ list_add(&shmnode->shm_node, &shmctx.shmdata_list);
+
+ pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
+
+ return 0;
+}
+
+void amdtee_unmap_shmem(struct tee_shm *shm)
+{
+ struct amdtee_shm_data *shmnode;
+ u32 buf_id;
+
+ if (!shm)
+ return;
+
+ buf_id = get_buffer_id(shm);
+ /* Unmap the shared memory from TEE */
+ handle_unmap_shmem(buf_id);
+
+ list_for_each_entry(shmnode, &shmctx.shmdata_list, shm_node)
+ if (buf_id == shmnode->buf_id) {
+ list_del(&shmnode->shm_node);
+ kfree(shmnode);
+ break;
+ }
+}
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess;
+ u32 i, session_info;
+
+ /* Check that the session is valid */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, arg->session);
+ if (sess) {
+ i = get_session_index(arg->session);
+ session_info = sess->session_info[i];
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ handle_invoke_cmd(arg, session_info, param);
+
+ return 0;
+}
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ return -EINVAL;
+}
+
+static const struct tee_driver_ops amdtee_ops = {
+ .get_version = amdtee_get_version,
+ .open = amdtee_open,
+ .release = amdtee_release,
+ .open_session = amdtee_open_session,
+ .close_session = amdtee_close_session,
+ .invoke_func = amdtee_invoke_func,
+ .cancel_req = amdtee_cancel_req,
+};
+
+static const struct tee_desc amdtee_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &amdtee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init amdtee_driver_init(void)
+{
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct amdtee *amdtee;
+ int rc;
+
+ rc = psp_check_tee_status();
+ if (rc) {
+ pr_err("amd-tee driver: tee not present\n");
+ return rc;
+ }
+
+ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ amdtee = kzalloc(sizeof(*amdtee), GFP_KERNEL);
+ if (!amdtee) {
+ rc = -ENOMEM;
+ goto err_kfree_drv_data;
+ }
+
+ pool = amdtee_config_shm();
+ if (IS_ERR(pool)) {
+ pr_err("shared pool configuration error\n");
+ rc = PTR_ERR(pool);
+ goto err_kfree_amdtee;
+ }
+
+ teedev = tee_device_alloc(&amdtee_desc, NULL, pool, amdtee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_pool;
+ }
+ amdtee->teedev = teedev;
+
+ rc = tee_device_register(amdtee->teedev);
+ if (rc)
+ goto err_device_unregister;
+
+ amdtee->pool = pool;
+
+ drv_data->amdtee = amdtee;
+
+ pr_info("amd-tee driver initialization successful\n");
+ return 0;
+
+err_device_unregister:
+ tee_device_unregister(amdtee->teedev);
+
+err_free_pool:
+ tee_shm_pool_free(pool);
+
+err_kfree_amdtee:
+ kfree(amdtee);
+
+err_kfree_drv_data:
+ kfree(drv_data);
+ drv_data = NULL;
+
+ pr_err("amd-tee driver initialization failed\n");
+ return rc;
+}
+module_init(amdtee_driver_init);
+
+static void __exit amdtee_driver_exit(void)
+{
+ struct amdtee *amdtee;
+
+ if (!drv_data || !drv_data->amdtee)
+ return;
+
+ amdtee = drv_data->amdtee;
+
+ tee_device_unregister(amdtee->teedev);
+ tee_shm_pool_free(amdtee->pool);
+}
+module_exit(amdtee_driver_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION("AMD-TEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
new file mode 100644
index 000000000000..065854e2db18
--- /dev/null
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-sev.h>
+#include "amdtee_private.h"
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned long va;
+ int rc;
+
+ va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!va)
+ return -ENOMEM;
+
+ shm->kaddr = (void *)va;
+ shm->paddr = __psp_pa((void *)va);
+ shm->size = PAGE_SIZE << order;
+
+ /* Map the allocated memory in to TEE */
+ rc = amdtee_map_shmem(shm);
+ if (rc) {
+ free_pages(va, order);
+ shm->kaddr = NULL;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
+{
+ /* Unmap the shared memory from TEE */
+ amdtee_unmap_shmem(shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
+
+struct tee_shm_pool *amdtee_config_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index d1ad512e1708..3ca71e3812ed 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -3,6 +3,7 @@
config OPTEE
tristate "OP-TEE"
depends on HAVE_ARM_SMCCC
+ depends on MMU
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
index 0332a5301d61..d767eebf30bd 100644
--- a/drivers/tee/optee/shm_pool.c
+++ b/drivers/tee/optee/shm_pool.c
@@ -28,9 +28,22 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
shm->size = PAGE_SIZE << order;
if (shm->flags & TEE_SHM_DMA_BUF) {
+ unsigned int nr_pages = 1 << order, i;
+ struct page **pages;
+
+ pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pages; i++) {
+ pages[i] = page;
+ page++;
+ }
+
shm->flags |= TEE_SHM_REGISTER;
- rc = optee_shm_register(shm->ctx, shm, &page, 1 << order,
+ rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
+ kfree(pages);
}
return rc;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 09ddcd06c715..937ac5aaa6d8 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -71,11 +71,6 @@ static void tee_shm_op_release(struct dma_buf *dmabuf)
tee_shm_release(shm);
}
-static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
-{
- return NULL;
-}
-
static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct tee_shm *shm = dmabuf->priv;
@@ -93,7 +88,6 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.map_dma_buf = tee_shm_op_map_dma_buf,
.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
.release = tee_shm_op_release,
- .map = tee_shm_op_map,
.mmap = tee_shm_op_mmap,
};
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 79b27865c6f4..5a05db5438d6 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -151,16 +151,33 @@ config THERMAL_GOV_POWER_ALLOCATOR
config CPU_THERMAL
bool "Generic cpu cooling support"
- depends on CPU_FREQ
depends on THERMAL_OF
help
+ Enable the CPU cooling features. If the system has no active
+ cooling device available, this option allows to use the CPU
+ as a cooling device.
+
+if CPU_THERMAL
+
+config CPU_FREQ_THERMAL
+ bool "CPU frequency cooling device"
+ depends on CPU_FREQ
+ default y
+ help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
(drivers/acpi/processor_thermal.c).
This will be useful for platforms using the generic thermal interface
and not the ACPI interface.
- If you want this support, you should say Y here.
+config CPU_IDLE_THERMAL
+ bool "CPU idle cooling device"
+ depends on IDLE_INJECT
+ help
+ This implements the CPU cooling mechanism through
+ idle injection. This will throttle the CPU by injecting
+ idle cycle.
+endif
config CLOCK_THERMAL
bool "Generic clock cooling support"
@@ -263,6 +280,20 @@ config SPEAR_THERMAL
Enable this to plug the SPEAr thermal sensor driver into the Linux
thermal framework.
+config SUN8I_THERMAL
+ tristate "Allwinner sun8i thermal driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on NVMEM
+ depends on OF
+ depends on RESET_CONTROLLER
+ help
+ Support for the sun8i thermal sensor driver into the Linux thermal
+ framework.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun8i-thermal.
+
config ROCKCHIP_THERMAL
tristate "Rockchip thermal driver"
depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index baeb70bf0568..9fb88e26fb10 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -19,7 +19,8 @@ thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o
# cpufreq cooling
-thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
+thermal_sys-$(CONFIG_CPU_FREQ_THERMAL) += cpufreq_cooling.o
+thermal_sys-$(CONFIG_CPU_IDLE_THERMAL) += cpuidle_cooling.o
# clock cooling
thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o
@@ -31,6 +32,7 @@ thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o
obj-y += broadcom/
obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
+obj-$(CONFIG_SUN8I_THERMAL) += sun8i_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index 8a9e9bc421c6..ccb1fe18e993 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -67,7 +67,11 @@
/**
* struct amlogic_thermal_soc_calib_data
- * @A, B, m, n: calibration parameters
+ * @A: calibration parameters
+ * @B: calibration parameters
+ * @m: calibration parameters
+ * @n: calibration parameters
+ *
* This structure is required for configuration of amlogic thermal driver.
*/
struct amlogic_thermal_soc_calib_data {
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 709a22f455e9..7c447cd149e7 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -21,8 +21,6 @@
#include "thermal_core.h"
-#define TO_MCELSIUS(c) ((c) * 1000)
-
/* Thermal Manager Control and Status Register */
#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
#define PMU_TM_DISABLE_OFFS 0
@@ -155,6 +153,9 @@ static void armadaxp_init(struct platform_device *pdev,
regmap_write(priv->syscon, data->syscon_control1_off, reg);
+ reg &= ~PMU_TDC0_SW_RST_MASK;
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
+
/* Enable the sensor */
regmap_read(priv->syscon, data->syscon_status_off, &reg);
reg &= ~PMU_TM_DISABLE_MASK;
@@ -578,7 +579,7 @@ static const struct armada_thermal_data armadaxp_data = {
.coef_m = 10000000ULL,
.coef_div = 13825,
.syscon_status_off = 0xb0,
- .syscon_control1_off = 0xd0,
+ .syscon_control1_off = 0x2d0,
};
static const struct armada_thermal_data armada370_data = {
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig
index cf43e1520de9..061f1db6edc9 100644
--- a/drivers/thermal/broadcom/Kconfig
+++ b/drivers/thermal/broadcom/Kconfig
@@ -1,4 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+config BCM2711_THERMAL
+ tristate "Broadcom AVS RO thermal sensor driver"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on THERMAL_OF && MFD_SYSCON
+ help
+ Support for thermal sensors on Broadcom BCM2711 SoCs.
+
config BCM2835_THERMAL
tristate "Thermal sensors on bcm2835 SoC"
depends on ARCH_BCM2835 || COMPILE_TEST
diff --git a/drivers/thermal/broadcom/Makefile b/drivers/thermal/broadcom/Makefile
index 490ab1f7a86d..c917b243d5f0 100644
--- a/drivers/thermal/broadcom/Makefile
+++ b/drivers/thermal/broadcom/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BCM2711_THERMAL) += bcm2711_thermal.o
obj-$(CONFIG_BCM2835_THERMAL) += bcm2835_thermal.o
obj-$(CONFIG_BRCMSTB_THERMAL) += brcmstb_thermal.o
obj-$(CONFIG_BCM_NS_THERMAL) += ns-thermal.o
diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c
new file mode 100644
index 000000000000..67c2a737bc9d
--- /dev/null
+++ b/drivers/thermal/broadcom/bcm2711_thermal.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Broadcom AVS RO thermal sensor driver
+ *
+ * based on brcmstb_thermal
+ *
+ * Copyright (C) 2020 Stefan Wahren
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#include "../thermal_hwmon.h"
+
+#define AVS_RO_TEMP_STATUS 0x200
+#define AVS_RO_TEMP_STATUS_VALID_MSK (BIT(16) | BIT(10))
+#define AVS_RO_TEMP_STATUS_DATA_MSK GENMASK(9, 0)
+
+struct bcm2711_thermal_priv {
+ struct regmap *regmap;
+ struct thermal_zone_device *thermal;
+};
+
+static int bcm2711_get_temp(void *data, int *temp)
+{
+ struct bcm2711_thermal_priv *priv = data;
+ int slope = thermal_zone_get_slope(priv->thermal);
+ int offset = thermal_zone_get_offset(priv->thermal);
+ u32 val;
+ int ret;
+ long t;
+
+ ret = regmap_read(priv->regmap, AVS_RO_TEMP_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & AVS_RO_TEMP_STATUS_VALID_MSK))
+ return -EIO;
+
+ val &= AVS_RO_TEMP_STATUS_DATA_MSK;
+
+ /* Convert a HW code to a temperature reading (millidegree celsius) */
+ t = slope * val + offset;
+
+ *temp = t < 0 ? 0 : t;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops bcm2711_thermal_of_ops = {
+ .get_temp = bcm2711_get_temp,
+};
+
+static const struct of_device_id bcm2711_thermal_id_table[] = {
+ { .compatible = "brcm,bcm2711-thermal" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2711_thermal_id_table);
+
+static int bcm2711_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal;
+ struct bcm2711_thermal_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *parent;
+ struct regmap *regmap;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* get regmap from syscon node */
+ parent = of_get_parent(dev->of_node); /* parent should be syscon node */
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "failed to get regmap: %d\n", ret);
+ return ret;
+ }
+ priv->regmap = regmap;
+
+ thermal = devm_thermal_zone_of_sensor_register(dev, 0, priv,
+ &bcm2711_thermal_of_ops);
+ if (IS_ERR(thermal)) {
+ ret = PTR_ERR(thermal);
+ dev_err(dev, "could not register sensor: %d\n", ret);
+ return ret;
+ }
+
+ priv->thermal = thermal;
+
+ thermal->tzp->no_hwmon = false;
+ ret = thermal_add_hwmon_sysfs(thermal);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver bcm2711_thermal_driver = {
+ .probe = bcm2711_thermal_probe,
+ .driver = {
+ .name = "bcm2711_thermal",
+ .of_match_table = bcm2711_thermal_id_table,
+ },
+};
+module_platform_driver(bcm2711_thermal_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefan Wahren");
+MODULE_DESCRIPTION("Broadcom AVS RO thermal sensor driver");
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 5825ac581f56..8df5edef1ded 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -49,7 +49,7 @@
#define AVS_TMON_TP_TEST_ENABLE 0x20
/* Default coefficients */
-#define AVS_TMON_TEMP_SLOPE -487
+#define AVS_TMON_TEMP_SLOPE 487
#define AVS_TMON_TEMP_OFFSET 410040
/* HW related temperature constants */
@@ -102,29 +102,28 @@ static struct avs_tmon_trip avs_tmon_trips[] = {
},
};
+struct brcmstb_thermal_params {
+ unsigned int offset;
+ unsigned int mult;
+ const struct thermal_zone_of_device_ops *of_ops;
+};
+
struct brcmstb_thermal_priv {
void __iomem *tmon_base;
struct device *dev;
struct thermal_zone_device *thermal;
+ /* Process specific thermal parameters used for calculations */
+ const struct brcmstb_thermal_params *temp_params;
};
-static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
- int *offset)
-{
- *slope = thermal_zone_get_slope(tz);
- *offset = thermal_zone_get_offset(tz);
-}
-
/* Convert a HW code to a temperature reading (millidegree celsius) */
-static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
+static inline int avs_tmon_code_to_temp(struct brcmstb_thermal_priv *priv,
u32 code)
{
- const int val = code & AVS_TMON_TEMP_MASK;
- int slope, offset;
+ int offset = priv->temp_params->offset;
+ int mult = priv->temp_params->mult;
- avs_tmon_get_coeffs(tz, &slope, &offset);
-
- return slope * val + offset;
+ return (offset - (int)((code & AVS_TMON_TEMP_MASK) * mult));
}
/*
@@ -133,23 +132,22 @@ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
* @temp: temperature to convert
* @low: if true, round toward the low side
*/
-static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
+static inline u32 avs_tmon_temp_to_code(struct brcmstb_thermal_priv *priv,
int temp, bool low)
{
- int slope, offset;
+ int offset = priv->temp_params->offset;
+ int mult = priv->temp_params->mult;
if (temp < AVS_TMON_TEMP_MIN)
- return AVS_TMON_TEMP_MAX; /* Maximum code value */
-
- avs_tmon_get_coeffs(tz, &slope, &offset);
+ return AVS_TMON_TEMP_MAX; /* Maximum code value */
if (temp >= offset)
return 0; /* Minimum code value */
if (low)
- return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
+ return (u32)(DIV_ROUND_UP(offset - temp, mult));
else
- return (u32)((offset - temp) / abs(slope));
+ return (u32)((offset - temp) / mult);
}
static int brcmstb_get_temp(void *data, int *temp)
@@ -167,7 +165,7 @@ static int brcmstb_get_temp(void *data, int *temp)
val = (val & AVS_TMON_STATUS_data_msk) >> AVS_TMON_STATUS_data_shift;
- t = avs_tmon_code_to_temp(priv->thermal, val);
+ t = avs_tmon_code_to_temp(priv, val);
if (t < 0)
*temp = 0;
else
@@ -201,7 +199,7 @@ static int avs_tmon_get_trip_temp(struct brcmstb_thermal_priv *priv,
val &= trip->reg_msk;
val >>= trip->reg_shift;
- return avs_tmon_code_to_temp(priv->thermal, val);
+ return avs_tmon_code_to_temp(priv, val);
}
static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
@@ -214,7 +212,7 @@ static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
dev_dbg(priv->dev, "set temp %d to %d\n", type, temp);
/* round toward low temp for the low interrupt */
- val = avs_tmon_temp_to_code(priv->thermal, temp,
+ val = avs_tmon_temp_to_code(priv, temp,
type == TMON_TRIP_TYPE_LOW);
val <<= trip->reg_shift;
@@ -231,7 +229,7 @@ static int avs_tmon_get_intr_temp(struct brcmstb_thermal_priv *priv)
u32 val;
val = __raw_readl(priv->tmon_base + AVS_TMON_TEMP_INT_CODE);
- return avs_tmon_code_to_temp(priv->thermal, val);
+ return avs_tmon_code_to_temp(priv, val);
}
static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data)
@@ -290,19 +288,37 @@ static int brcmstb_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops brcmstb_16nm_of_ops = {
+ .get_temp = brcmstb_get_temp,
+};
+
+static const struct brcmstb_thermal_params brcmstb_16nm_params = {
+ .offset = 457829,
+ .mult = 557,
+ .of_ops = &brcmstb_16nm_of_ops,
+};
+
+static const struct thermal_zone_of_device_ops brcmstb_28nm_of_ops = {
.get_temp = brcmstb_get_temp,
.set_trips = brcmstb_set_trips,
};
+static const struct brcmstb_thermal_params brcmstb_28nm_params = {
+ .offset = 410040,
+ .mult = 487,
+ .of_ops = &brcmstb_28nm_of_ops,
+};
+
static const struct of_device_id brcmstb_thermal_id_table[] = {
- { .compatible = "brcm,avs-tmon" },
+ { .compatible = "brcm,avs-tmon-bcm7216", .data = &brcmstb_16nm_params },
+ { .compatible = "brcm,avs-tmon", .data = &brcmstb_28nm_params },
{},
};
MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table);
static int brcmstb_thermal_probe(struct platform_device *pdev)
{
+ const struct thermal_zone_of_device_ops *of_ops;
struct thermal_zone_device *thermal;
struct brcmstb_thermal_priv *priv;
struct resource *res;
@@ -312,6 +328,10 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->temp_params = of_device_get_match_data(&pdev->dev);
+ if (!priv->temp_params)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->tmon_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->tmon_base))
@@ -319,9 +339,10 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
platform_set_drvdata(pdev, priv);
+ of_ops = priv->temp_params->of_ops;
thermal = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv,
- &of_ops);
+ of_ops);
if (IS_ERR(thermal)) {
ret = PTR_ERR(thermal);
dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
@@ -331,16 +352,15 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
priv->thermal = thermal;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "could not get IRQ\n");
- return irq;
- }
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- brcmstb_tmon_irq_thread, IRQF_ONESHOT,
- DRV_NAME, priv);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
- return ret;
+ if (irq >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ brcmstb_tmon_irq_thread,
+ IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
+ return ret;
+ }
}
dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n");
diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c
index 3ad3256c48fd..7cb3ae4b44ee 100644
--- a/drivers/thermal/clock_cooling.c
+++ b/drivers/thermal/clock_cooling.c
@@ -7,7 +7,7 @@
* Copyright (C) 2013 Texas Instruments Inc.
* Contact: Eduardo Valentin <eduardo.valentin@ti.com>
*
- * Highly based on cpu_cooling.c.
+ * Highly based on cpufreq_cooling.c.
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*/
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 52569b27b426..fe83d7a210d4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/drivers/thermal/cpu_cooling.c
+ * linux/drivers/thermal/cpufreq_cooling.c
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
*
@@ -63,6 +63,7 @@ struct time_in_idle {
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
+ * @qos_req: PM QoS contraint to apply
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -620,7 +621,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
struct thermal_cooling_device *cdev = NULL;
if (!np) {
- pr_err("cpu_cooling: OF node not available for cpu%d\n",
+ pr_err("cpufreq_cooling: OF node not available for cpu%d\n",
policy->cpu);
return NULL;
}
@@ -630,7 +631,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
cdev = __cpufreq_cooling_register(np, policy, em);
if (IS_ERR(cdev)) {
- pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
+ pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n",
policy->cpu, PTR_ERR(cdev));
cdev = NULL;
}
diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c
new file mode 100644
index 000000000000..0bb843246f59
--- /dev/null
+++ b/drivers/thermal/cpuidle_cooling.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Limited.
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ */
+#include <linux/cpu_cooling.h>
+#include <linux/cpuidle.h>
+#include <linux/err.h>
+#include <linux/idle_inject.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+/**
+ * struct cpuidle_cooling_device - data for the idle cooling device
+ * @ii_dev: an atomic to keep track of the last task exiting the idle cycle
+ * @state: a normalized integer giving the state of the cooling device
+ */
+struct cpuidle_cooling_device {
+ struct idle_inject_device *ii_dev;
+ unsigned long state;
+};
+
+static DEFINE_IDA(cpuidle_ida);
+
+/**
+ * cpuidle_cooling_runtime - Running time computation
+ * @idle_duration_us: the idle cooling device
+ * @state: a percentile based number
+ *
+ * The running duration is computed from the idle injection duration
+ * which is fixed. If we reach 100% of idle injection ratio, that
+ * means the running duration is zero. If we have a 50% ratio
+ * injection, that means we have equal duration for idle and for
+ * running duration.
+ *
+ * The formula is deduced as follows:
+ *
+ * running = idle x ((100 / ratio) - 1)
+ *
+ * For precision purpose for integer math, we use the following:
+ *
+ * running = (idle x 100) / ratio - idle
+ *
+ * For example, if we have an injected duration of 50%, then we end up
+ * with 10ms of idle injection and 10ms of running duration.
+ *
+ * Return: An unsigned int for a usec based runtime duration.
+ */
+static unsigned int cpuidle_cooling_runtime(unsigned int idle_duration_us,
+ unsigned long state)
+{
+ if (!state)
+ return 0;
+
+ return ((idle_duration_us * 100) / state) - idle_duration_us;
+}
+
+/**
+ * cpuidle_cooling_get_max_state - Get the maximum state
+ * @cdev : the thermal cooling device
+ * @state : a pointer to the state variable to be filled
+ *
+ * The function always returns 100 as the injection ratio. It is
+ * percentile based for consistency accross different platforms.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ /*
+ * Depending on the configuration or the hardware, the running
+ * cycle and the idle cycle could be different. We want to
+ * unify that to an 0..100 interval, so the set state
+ * interface will be the same whatever the platform is.
+ *
+ * The state 100% will make the cluster 100% ... idle. A 0%
+ * injection ratio means no idle injection at all and 50%
+ * means for 10ms of idle injection, we have 10ms of running
+ * time.
+ */
+ *state = 100;
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_get_cur_state - Get the current cooling state
+ * @cdev: the thermal cooling device
+ * @state: a pointer to the state
+ *
+ * The function just copies the state value from the private thermal
+ * cooling device structure, the mapping is 1 <-> 1.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct cpuidle_cooling_device *idle_cdev = cdev->devdata;
+
+ *state = idle_cdev->state;
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_set_cur_state - Set the current cooling state
+ * @cdev: the thermal cooling device
+ * @state: the target state
+ *
+ * The function checks first if we are initiating the mitigation which
+ * in turn wakes up all the idle injection tasks belonging to the idle
+ * cooling device. In any case, it updates the internal state for the
+ * cooling device.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct cpuidle_cooling_device *idle_cdev = cdev->devdata;
+ struct idle_inject_device *ii_dev = idle_cdev->ii_dev;
+ unsigned long current_state = idle_cdev->state;
+ unsigned int runtime_us, idle_duration_us;
+
+ idle_cdev->state = state;
+
+ idle_inject_get_duration(ii_dev, &runtime_us, &idle_duration_us);
+
+ runtime_us = cpuidle_cooling_runtime(idle_duration_us, state);
+
+ idle_inject_set_duration(ii_dev, runtime_us, idle_duration_us);
+
+ if (current_state == 0 && state > 0) {
+ idle_inject_start(ii_dev);
+ } else if (current_state > 0 && !state) {
+ idle_inject_stop(ii_dev);
+ }
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_ops - thermal cooling device ops
+ */
+static struct thermal_cooling_device_ops cpuidle_cooling_ops = {
+ .get_max_state = cpuidle_cooling_get_max_state,
+ .get_cur_state = cpuidle_cooling_get_cur_state,
+ .set_cur_state = cpuidle_cooling_set_cur_state,
+};
+
+/**
+ * cpuidle_of_cooling_register - Idle cooling device initialization function
+ * @drv: a cpuidle driver structure pointer
+ * @np: a node pointer to a device tree cooling device node
+ *
+ * This function is in charge of creating a cooling device per cpuidle
+ * driver and register it to thermal framework.
+ *
+ * Return: zero on success, or negative value corresponding to the
+ * error detected in the underlying subsystems.
+ */
+int cpuidle_of_cooling_register(struct device_node *np,
+ struct cpuidle_driver *drv)
+{
+ struct idle_inject_device *ii_dev;
+ struct cpuidle_cooling_device *idle_cdev;
+ struct thermal_cooling_device *cdev;
+ char dev_name[THERMAL_NAME_LENGTH];
+ int id, ret;
+
+ idle_cdev = kzalloc(sizeof(*idle_cdev), GFP_KERNEL);
+ if (!idle_cdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ id = ida_simple_get(&cpuidle_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto out_kfree;
+ }
+
+ ii_dev = idle_inject_register(drv->cpumask);
+ if (!ii_dev) {
+ ret = -EINVAL;
+ goto out_id;
+ }
+
+ idle_inject_set_duration(ii_dev, TICK_USEC, TICK_USEC);
+
+ idle_cdev->ii_dev = ii_dev;
+
+ snprintf(dev_name, sizeof(dev_name), "thermal-idle-%d", id);
+
+ cdev = thermal_of_cooling_device_register(np, dev_name, idle_cdev,
+ &cpuidle_cooling_ops);
+ if (IS_ERR(cdev)) {
+ ret = PTR_ERR(cdev);
+ goto out_unregister;
+ }
+
+ return 0;
+
+out_unregister:
+ idle_inject_unregister(ii_dev);
+out_id:
+ ida_simple_remove(&cpuidle_ida, id);
+out_kfree:
+ kfree(idle_cdev);
+out:
+ return ret;
+}
+
+/**
+ * cpuidle_cooling_register - Idle cooling device initialization function
+ * @drv: a cpuidle driver structure pointer
+ *
+ * This function is in charge of creating a cooling device per cpuidle
+ * driver and register it to thermal framework.
+ *
+ * Return: zero on success, or negative value corresponding to the
+ * error detected in the underlying subsystems.
+ */
+int cpuidle_cooling_register(struct cpuidle_driver *drv)
+{
+ return cpuidle_of_cooling_register(NULL, drv);
+}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 372dbbaaafb8..21d4d6e6409a 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -152,8 +152,8 @@ static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data)
db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING,
next_low, next_high);
- dev_info(&th->tz->device,
- "PRCMU set max %ld, min %ld\n", next_high, next_low);
+ dev_dbg(&th->tz->device,
+ "PRCMU set max %ld, min %ld\n", next_high, next_low);
} else if (idx == num_points - 1)
/* So we roof out 1 degree over the max point */
th->interpolated_temp = db8500_thermal_points[idx] + 1;
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index ef59256887ff..a87d4fa031c8 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -53,6 +53,7 @@ static DEFINE_IDA(devfreq_ida);
* 'utilization' (which is 'busy_time / 'total_time').
* The 'res_util' range is from 100 to (power_table[state] * 100)
* for the corresponding 'state'.
+ * @capped_state: index to cooling state with in dynamic power budget
*/
struct devfreq_cooling_device {
int id;
@@ -587,7 +588,7 @@ EXPORT_SYMBOL_GPL(devfreq_cooling_register);
/**
* devfreq_cooling_unregister() - Unregister devfreq cooling device.
- * @dfc: Pointer to devfreq cooling device to unregister.
+ * @cdev: Pointer to devfreq cooling device to unregister.
*/
void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index afd99f668c65..aaa07180ab48 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -58,8 +58,8 @@ static long get_target_state(struct thermal_zone_device *tz,
/**
* fair_share_throttle - throttles devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* Throttling Logic: This uses three parameters to calculate the new
* throttle state of the cooling devices associated with the given zone.
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index b831fc77cf64..991a1c54296d 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -71,8 +71,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/**
* bang_bang_control - controls devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - the trip point
+ * @tz: thermal_zone_device
+ * @trip: the trip point
*
* Regulation Logic: a two point regulation, deliver cooling state depending
* on the previous state shown in this diagram:
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 3517883b5cdb..efae0c02d898 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -369,6 +369,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3400_thermal_match[] = {
+ {"INT1040", 0},
{"INT3400", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index a7bbd8584ae2..aeece1e136a5 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -282,6 +282,7 @@ static int int3403_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3403_device_ids[] = {
+ {"INT1043", 0},
{"INT3403", 0},
{"", 0},
};
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 75484d6c5056..432213272f1e 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/thermal.h>
+#include <linux/units.h>
#include "int340x_thermal_zone.h"
static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
@@ -34,7 +35,7 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
*temp = (unsigned long)conv_temp * 10;
} else
/* _TMP returns the temperature in tenths of degrees Kelvin */
- *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
+ *temp = deci_kelvin_to_millicelsius(tmp);
return 0;
}
@@ -116,7 +117,7 @@ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
snprintf(name, sizeof(name), "PAT%d", trip);
status = acpi_execute_simple_method(d->adev->handle, name,
- MILLICELSIUS_TO_DECI_KELVIN(temp));
+ millicelsius_to_deci_kelvin(temp));
if (ACPI_FAILURE(status))
return -EIO;
@@ -163,7 +164,7 @@ static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
if (ACPI_FAILURE(status))
return -EIO;
- *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+ *temp = deci_kelvin_to_millicelsius(r);
return 0;
}
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 89a015387283..b1fd34516e28 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -42,6 +42,9 @@
/* IceLake thermal reporting device */
#define PCI_DEVICE_ID_PROC_ICL_THERMAL 0x8a03
+/* JasperLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_JSL_THERMAL 0x4503
+
#define DRV_NAME "proc_thermal"
struct power_config {
@@ -724,6 +727,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_GLK_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_ICL_THERMAL),
.driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_JSL_THERMAL)},
{ 0, },
};
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index 4f0bb8f502e1..56401fd4708d 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/thermal.h>
+#include <linux/units.h>
#include <linux/pm.h>
/* Intel PCH thermal Device IDs */
@@ -23,6 +24,7 @@
#define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */
#define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */
#define PCH_THERMAL_DID_CNL_H 0xA379 /* CNL-H PCH */
+#define PCH_THERMAL_DID_CML_H 0X06F9 /* CML-H PCH */
/* Wildcat Point-LP PCH Thermal registers */
#define WPT_TEMP 0x0000 /* Temperature */
@@ -92,7 +94,7 @@ static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
if (ACPI_SUCCESS(status)) {
unsigned long trip_temp;
- trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+ trip_temp = deci_kelvin_to_millicelsius(r);
if (trip_temp) {
ptd->psv_temp = trip_temp;
ptd->psv_trip_id = *nr_trips;
@@ -272,6 +274,7 @@ enum board_ids {
board_wpt,
board_skl,
board_cnl,
+ board_cml,
};
static const struct board_info {
@@ -294,6 +297,10 @@ static const struct board_info {
.name = "pch_cannonlake",
.ops = &pch_dev_ops_wpt,
},
+ [board_cml] = {
+ .name = "pch_cometlake",
+ .ops = &pch_dev_ops_wpt,
+ }
};
static int intel_pch_thermal_probe(struct pci_dev *pdev,
@@ -365,7 +372,7 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
thermal_zone_device_unregister(ptd->tzd);
iounmap(ptd->hw_base);
pci_set_drvdata(pdev, NULL);
- pci_release_region(pdev, 0);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -398,6 +405,8 @@ static const struct pci_device_id intel_pch_thermal_id[] = {
.driver_data = board_cnl, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_H),
.driver_data = board_cnl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CML_H),
+ .driver_data = board_cml, },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 88fd0fbe0cfa..82d06c7411eb 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -33,7 +33,7 @@ struct max77620_therm_info {
/**
* max77620_thermal_read_temp: Read PMIC die temperatue.
* @data: Device specific data.
- * temp: Temperature in millidegrees Celsius
+ * @temp: Temperature in millidegrees Celsius
*
* The actual temperature of PMIC die is not available from PMIC.
* PMIC only tells the status if it has crossed or not the threshold level
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index acf4854cbb8b..76e30603d4d5 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -358,7 +358,7 @@ static const int mt7622_mux_values[MT7622_NUM_SENSORS] = { 0, };
static const int mt7622_vts_index[MT7622_NUM_SENSORS] = { VTS1 };
static const int mt7622_tc_offset[MT7622_NUM_CONTROLLER] = { 0x0, };
-/**
+/*
* The MT8173 thermal controller has four banks. Each bank can read up to
* four temperature sensors simultaneously. The MT8173 has a total of 5
* temperature sensors. We use each bank to measure a certain area of the
@@ -400,7 +400,7 @@ static const struct mtk_thermal_data mt8173_thermal_data = {
.sensor_mux_values = mt8173_mux_values,
};
-/**
+/*
* The MT2701 thermal controller has one bank, which can read up to
* three temperature sensors simultaneously. The MT2701 has a total of 3
* temperature sensors.
@@ -430,7 +430,7 @@ static const struct mtk_thermal_data mt2701_thermal_data = {
.sensor_mux_values = mt2701_mux_values,
};
-/**
+/*
* The MT2712 thermal controller has one bank, which can read up to
* four temperature sensors simultaneously. The MT2712 has a total of 4
* temperature sensors.
@@ -484,7 +484,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
.sensor_mux_values = mt7622_mux_values,
};
-/**
+/*
* The MT8183 thermal controller has one bank for the current SW framework.
* The MT8183 has a total of 6 temperature sensors.
* There are two thermal controller to control the six sensor.
@@ -495,7 +495,6 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
* data, and this indeed needs the temperatures of the individual banks
* for making better decisions.
*/
-
static const struct mtk_thermal_data mt8183_thermal_data = {
.auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
.num_banks = MT8183_NUM_SENSORS_PER_ZONE,
@@ -519,7 +518,8 @@ static const struct mtk_thermal_data mt8183_thermal_data = {
/**
* raw_to_mcelsius - convert a raw ADC value to mcelsius
- * @mt: The thermal controller
+ * @mt: The thermal controller
+ * @sensno: sensor number
* @raw: raw ADC value
*
* This converts the raw ADC value to mcelsius using the SoC specific
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index dc5093be553e..ef0baa954ff0 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -493,7 +493,7 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
if (!dev || !dev->of_node) {
of_node_put(np);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENODEV);
}
sensor_np = of_node_get(dev->of_node);
@@ -754,7 +754,7 @@ end:
return ret;
}
-/**
+/*
* It maps 'enum thermal_trip_type' found in include/linux/thermal.h
* into the device tree binding of 'trip', property type.
*/
@@ -977,7 +977,7 @@ free_tz:
return ERR_PTR(ret);
}
-static inline void of_thermal_free_zone(struct __thermal_zone *tz)
+static __init void of_thermal_free_zone(struct __thermal_zone *tz)
{
struct __thermal_bind_params *tbp;
int i, j;
@@ -999,6 +999,38 @@ static inline void of_thermal_free_zone(struct __thermal_zone *tz)
}
/**
+ * of_thermal_destroy_zones - remove all zones parsed and allocated resources
+ *
+ * Finds all zones parsed and added to the thermal framework and remove them
+ * from the system, together with their resources.
+ *
+ */
+static __init void of_thermal_destroy_zones(void)
+{
+ struct device_node *np, *child;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np) {
+ pr_debug("unable to find thermal zones\n");
+ return;
+ }
+
+ for_each_available_child_of_node(np, child) {
+ struct thermal_zone_device *zone;
+
+ zone = thermal_zone_get_zone_by_name(child->name);
+ if (IS_ERR(zone))
+ continue;
+
+ thermal_zone_device_unregister(zone);
+ kfree(zone->tzp);
+ kfree(zone->ops);
+ of_thermal_free_zone(zone->devdata);
+ }
+ of_node_put(np);
+}
+
+/**
* of_parse_thermal_zones - parse device tree thermal data
*
* Initialization function that can be called by machine initialization
@@ -1087,35 +1119,3 @@ exit_free:
return -ENOMEM;
}
-
-/**
- * of_thermal_destroy_zones - remove all zones parsed and allocated resources
- *
- * Finds all zones parsed and added to the thermal framework and remove them
- * from the system, together with their resources.
- *
- */
-void of_thermal_destroy_zones(void)
-{
- struct device_node *np, *child;
-
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np) {
- pr_debug("unable to find thermal zones\n");
- return;
- }
-
- for_each_available_child_of_node(np, child) {
- struct thermal_zone_device *zone;
-
- zone = thermal_zone_get_zone_by_name(child->name);
- if (IS_ERR(zone))
- continue;
-
- thermal_zone_device_unregister(zone);
- kfree(zone->tzp);
- kfree(zone->ops);
- of_thermal_free_zone(zone->devdata);
- }
- of_node_put(np);
-}
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 45e9fcb172cc..874bc46e6c73 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -9,9 +9,12 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
#include <linux/thermal.h>
#include "thermal_core.h"
+#include "thermal_hwmon.h"
#define SITES_MAX 16
#define TMR_DISABLE 0x0
@@ -24,129 +27,80 @@
#define TMU_VER1 0x1
#define TMU_VER2 0x2
-/*
- * QorIQ TMU Registers
- */
-struct qoriq_tmu_site_regs {
- u32 tritsr; /* Immediate Temperature Site Register */
- u32 tratsr; /* Average Temperature Site Register */
- u8 res0[0x8];
-};
+#define REGS_TMR 0x000 /* Mode Register */
+#define TMR_DISABLE 0x0
+#define TMR_ME 0x80000000
+#define TMR_ALPF 0x0c000000
+#define TMR_MSITE_ALL GENMASK(15, 0)
-struct qoriq_tmu_regs_v1 {
- u32 tmr; /* Mode Register */
- u32 tsr; /* Status Register */
- u32 tmtmir; /* Temperature measurement interval Register */
- u8 res0[0x14];
- u32 tier; /* Interrupt Enable Register */
- u32 tidr; /* Interrupt Detect Register */
- u32 tiscr; /* Interrupt Site Capture Register */
- u32 ticscr; /* Interrupt Critical Site Capture Register */
- u8 res1[0x10];
- u32 tmhtcrh; /* High Temperature Capture Register */
- u32 tmhtcrl; /* Low Temperature Capture Register */
- u8 res2[0x8];
- u32 tmhtitr; /* High Temperature Immediate Threshold */
- u32 tmhtatr; /* High Temperature Average Threshold */
- u32 tmhtactr; /* High Temperature Average Crit Threshold */
- u8 res3[0x24];
- u32 ttcfgr; /* Temperature Configuration Register */
- u32 tscfgr; /* Sensor Configuration Register */
- u8 res4[0x78];
- struct qoriq_tmu_site_regs site[SITES_MAX];
- u8 res5[0x9f8];
- u32 ipbrr0; /* IP Block Revision Register 0 */
- u32 ipbrr1; /* IP Block Revision Register 1 */
- u8 res6[0x310];
- u32 ttrcr[4]; /* Temperature Range Control Register */
-};
+#define REGS_TMTMIR 0x008 /* Temperature measurement interval Register */
+#define TMTMIR_DEFAULT 0x0000000f
-struct qoriq_tmu_regs_v2 {
- u32 tmr; /* Mode Register */
- u32 tsr; /* Status Register */
- u32 tmsr; /* monitor site register */
- u32 tmtmir; /* Temperature measurement interval Register */
- u8 res0[0x10];
- u32 tier; /* Interrupt Enable Register */
- u32 tidr; /* Interrupt Detect Register */
- u8 res1[0x8];
- u32 tiiscr; /* interrupt immediate site capture register */
- u32 tiascr; /* interrupt average site capture register */
- u32 ticscr; /* Interrupt Critical Site Capture Register */
- u32 res2;
- u32 tmhtcr; /* monitor high temperature capture register */
- u32 tmltcr; /* monitor low temperature capture register */
- u32 tmrtrcr; /* monitor rising temperature rate capture register */
- u32 tmftrcr; /* monitor falling temperature rate capture register */
- u32 tmhtitr; /* High Temperature Immediate Threshold */
- u32 tmhtatr; /* High Temperature Average Threshold */
- u32 tmhtactr; /* High Temperature Average Crit Threshold */
- u32 res3;
- u32 tmltitr; /* monitor low temperature immediate threshold */
- u32 tmltatr; /* monitor low temperature average threshold register */
- u32 tmltactr; /* monitor low temperature average critical threshold */
- u32 res4;
- u32 tmrtrctr; /* monitor rising temperature rate critical threshold */
- u32 tmftrctr; /* monitor falling temperature rate critical threshold*/
- u8 res5[0x8];
- u32 ttcfgr; /* Temperature Configuration Register */
- u32 tscfgr; /* Sensor Configuration Register */
- u8 res6[0x78];
- struct qoriq_tmu_site_regs site[SITES_MAX];
- u8 res7[0x9f8];
- u32 ipbrr0; /* IP Block Revision Register 0 */
- u32 ipbrr1; /* IP Block Revision Register 1 */
- u8 res8[0x300];
- u32 teumr0;
- u32 teumr1;
- u32 teumr2;
- u32 res9;
- u32 ttrcr[4]; /* Temperature Range Control Register */
-};
+#define REGS_V2_TMSR 0x008 /* monitor site register */
-struct qoriq_tmu_data;
+#define REGS_V2_TMTMIR 0x00c /* Temperature measurement interval Register */
+
+#define REGS_TIER 0x020 /* Interrupt Enable Register */
+#define TIER_DISABLE 0x0
+
+
+#define REGS_TTCFGR 0x080 /* Temperature Configuration Register */
+#define REGS_TSCFGR 0x084 /* Sensor Configuration Register */
+
+#define REGS_TRITSR(n) (0x100 + 16 * (n)) /* Immediate Temperature
+ * Site Register
+ */
+#define TRITSR_V BIT(31)
+#define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n
+ * Control Register
+ */
+#define REGS_IPBRR(n) (0xbf8 + 4 * (n)) /* IP Block Revision
+ * Register n
+ */
+#define REGS_V2_TEUMR(n) (0xf00 + 4 * (n))
/*
* Thermal zone data
*/
struct qoriq_sensor {
- struct thermal_zone_device *tzd;
- struct qoriq_tmu_data *qdata;
int id;
};
struct qoriq_tmu_data {
int ver;
- struct qoriq_tmu_regs_v1 __iomem *regs;
- struct qoriq_tmu_regs_v2 __iomem *regs_v2;
+ struct regmap *regmap;
struct clk *clk;
- bool little_endian;
- struct qoriq_sensor *sensor[SITES_MAX];
+ struct qoriq_sensor sensor[SITES_MAX];
};
-static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr)
+static struct qoriq_tmu_data *qoriq_sensor_to_data(struct qoriq_sensor *s)
{
- if (p->little_endian)
- iowrite32(val, addr);
- else
- iowrite32be(val, addr);
-}
-
-static u32 tmu_read(struct qoriq_tmu_data *p, void __iomem *addr)
-{
- if (p->little_endian)
- return ioread32(addr);
- else
- return ioread32be(addr);
+ return container_of(s, struct qoriq_tmu_data, sensor[s->id]);
}
static int tmu_get_temp(void *p, int *temp)
{
struct qoriq_sensor *qsensor = p;
- struct qoriq_tmu_data *qdata = qsensor->qdata;
+ struct qoriq_tmu_data *qdata = qoriq_sensor_to_data(qsensor);
u32 val;
+ /*
+ * REGS_TRITSR(id) has the following layout:
+ *
+ * 31 ... 7 6 5 4 3 2 1 0
+ * V TEMP
+ *
+ * Where V bit signifies if the measurement is ready and is
+ * within sensor range. TEMP is an 8 bit value representing
+ * temperature in C.
+ */
+ if (regmap_read_poll_timeout(qdata->regmap,
+ REGS_TRITSR(qsensor->id),
+ val,
+ val & TRITSR_V,
+ USEC_PER_MSEC,
+ 10 * USEC_PER_MSEC))
+ return -ENODATA;
- val = tmu_read(qdata, &qdata->regs->site[qsensor->id].tritsr);
*temp = (val & 0xff) * 1000;
return 0;
@@ -156,84 +110,82 @@ static const struct thermal_zone_of_device_ops tmu_tz_ops = {
.get_temp = tmu_get_temp,
};
-static int qoriq_tmu_register_tmu_zone(struct platform_device *pdev)
+static int qoriq_tmu_register_tmu_zone(struct device *dev,
+ struct qoriq_tmu_data *qdata)
{
- struct qoriq_tmu_data *qdata = platform_get_drvdata(pdev);
- int id, sites = 0;
+ int id;
+
+ if (qdata->ver == TMU_VER1) {
+ regmap_write(qdata->regmap, REGS_TMR,
+ TMR_MSITE_ALL | TMR_ME | TMR_ALPF);
+ } else {
+ regmap_write(qdata->regmap, REGS_V2_TMSR, TMR_MSITE_ALL);
+ regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2);
+ }
for (id = 0; id < SITES_MAX; id++) {
- qdata->sensor[id] = devm_kzalloc(&pdev->dev,
- sizeof(struct qoriq_sensor), GFP_KERNEL);
- if (!qdata->sensor[id])
- return -ENOMEM;
-
- qdata->sensor[id]->id = id;
- qdata->sensor[id]->qdata = qdata;
- qdata->sensor[id]->tzd = devm_thermal_zone_of_sensor_register(
- &pdev->dev, id, qdata->sensor[id], &tmu_tz_ops);
- if (IS_ERR(qdata->sensor[id]->tzd)) {
- if (PTR_ERR(qdata->sensor[id]->tzd) == -ENODEV)
+ struct thermal_zone_device *tzd;
+ struct qoriq_sensor *sensor = &qdata->sensor[id];
+ int ret;
+
+ sensor->id = id;
+
+ tzd = devm_thermal_zone_of_sensor_register(dev, id,
+ sensor,
+ &tmu_tz_ops);
+ ret = PTR_ERR_OR_ZERO(tzd);
+ if (ret) {
+ if (ret == -ENODEV)
continue;
- else
- return PTR_ERR(qdata->sensor[id]->tzd);
+
+ regmap_write(qdata->regmap, REGS_TMR, TMR_DISABLE);
+ return ret;
}
- if (qdata->ver == TMU_VER1)
- sites |= 0x1 << (15 - id);
- else
- sites |= 0x1 << id;
- }
+ if (devm_thermal_add_hwmon_sysfs(tzd))
+ dev_warn(dev,
+ "Failed to add hwmon sysfs attributes\n");
- /* Enable monitoring */
- if (sites != 0) {
- if (qdata->ver == TMU_VER1) {
- tmu_write(qdata, sites | TMR_ME | TMR_ALPF,
- &qdata->regs->tmr);
- } else {
- tmu_write(qdata, sites, &qdata->regs_v2->tmsr);
- tmu_write(qdata, TMR_ME | TMR_ALPF_V2,
- &qdata->regs_v2->tmr);
- }
}
return 0;
}
-static int qoriq_tmu_calibration(struct platform_device *pdev)
+static int qoriq_tmu_calibration(struct device *dev,
+ struct qoriq_tmu_data *data)
{
int i, val, len;
u32 range[4];
const u32 *calibration;
- struct device_node *np = pdev->dev.of_node;
- struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
+ struct device_node *np = dev->of_node;
len = of_property_count_u32_elems(np, "fsl,tmu-range");
if (len < 0 || len > 4) {
- dev_err(&pdev->dev, "invalid range data.\n");
+ dev_err(dev, "invalid range data.\n");
return len;
}
val = of_property_read_u32_array(np, "fsl,tmu-range", range, len);
if (val != 0) {
- dev_err(&pdev->dev, "failed to read range data.\n");
+ dev_err(dev, "failed to read range data.\n");
return val;
}
/* Init temperature range registers */
for (i = 0; i < len; i++)
- tmu_write(data, range[i], &data->regs->ttrcr[i]);
+ regmap_write(data->regmap, REGS_TTRnCR(i), range[i]);
calibration = of_get_property(np, "fsl,tmu-calibration", &len);
if (calibration == NULL || len % 8) {
- dev_err(&pdev->dev, "invalid calibration data.\n");
+ dev_err(dev, "invalid calibration data.\n");
return -ENODEV;
}
for (i = 0; i < len; i += 8, calibration += 2) {
val = of_read_number(calibration, 1);
- tmu_write(data, val, &data->regs->ttcfgr);
+ regmap_write(data->regmap, REGS_TTCFGR, val);
val = of_read_number(calibration + 1, 1);
- tmu_write(data, val, &data->regs->tscfgr);
+ regmap_write(data->regmap, REGS_TSCFGR, val);
}
return 0;
@@ -242,76 +194,117 @@ static int qoriq_tmu_calibration(struct platform_device *pdev)
static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
{
/* Disable interrupt, using polling instead */
- tmu_write(data, TIER_DISABLE, &data->regs->tier);
+ regmap_write(data->regmap, REGS_TIER, TIER_DISABLE);
/* Set update_interval */
+
if (data->ver == TMU_VER1) {
- tmu_write(data, TMTMIR_DEFAULT, &data->regs->tmtmir);
+ regmap_write(data->regmap, REGS_TMTMIR, TMTMIR_DEFAULT);
} else {
- tmu_write(data, TMTMIR_DEFAULT, &data->regs_v2->tmtmir);
- tmu_write(data, TEUMR0_V2, &data->regs_v2->teumr0);
+ regmap_write(data->regmap, REGS_V2_TMTMIR, TMTMIR_DEFAULT);
+ regmap_write(data->regmap, REGS_V2_TEUMR(0), TEUMR0_V2);
}
/* Disable monitoring */
- tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
}
+static const struct regmap_range qoriq_yes_ranges[] = {
+ regmap_reg_range(REGS_TMR, REGS_TSCFGR),
+ regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(3)),
+ regmap_reg_range(REGS_V2_TEUMR(0), REGS_V2_TEUMR(2)),
+ regmap_reg_range(REGS_IPBRR(0), REGS_IPBRR(1)),
+ /* Read only registers below */
+ regmap_reg_range(REGS_TRITSR(0), REGS_TRITSR(15)),
+};
+
+static const struct regmap_access_table qoriq_wr_table = {
+ .yes_ranges = qoriq_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges) - 1,
+};
+
+static const struct regmap_access_table qoriq_rd_table = {
+ .yes_ranges = qoriq_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges),
+};
+
static int qoriq_tmu_probe(struct platform_device *pdev)
{
int ret;
u32 ver;
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data),
+ struct device *dev = &pdev->dev;
+ const bool little_endian = of_property_read_bool(np, "little-endian");
+ const enum regmap_endian format_endian =
+ little_endian ? REGMAP_ENDIAN_LITTLE : REGMAP_ENDIAN_BIG;
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &qoriq_rd_table,
+ .wr_table = &qoriq_wr_table,
+ .val_format_endian = format_endian,
+ .max_register = SZ_4K,
+ };
+ void __iomem *base;
+
+ data = devm_kzalloc(dev, sizeof(struct qoriq_tmu_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
- platform_set_drvdata(pdev, data);
-
- data->little_endian = of_property_read_bool(np, "little-endian");
+ base = devm_platform_ioremap_resource(pdev, 0);
+ ret = PTR_ERR_OR_ZERO(base);
+ if (ret) {
+ dev_err(dev, "Failed to get memory region\n");
+ return ret;
+ }
- data->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(data->regs)) {
- dev_err(&pdev->dev, "Failed to get memory region\n");
- return PTR_ERR(data->regs);
+ data->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ ret = PTR_ERR_OR_ZERO(data->regmap);
+ if (ret) {
+ dev_err(dev, "Failed to init regmap (%d)\n", ret);
+ return ret;
}
- data->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ data->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
ret = clk_prepare_enable(data->clk);
if (ret) {
- dev_err(&pdev->dev, "Failed to enable clock\n");
+ dev_err(dev, "Failed to enable clock\n");
return ret;
}
/* version register offset at: 0xbf8 on both v1 and v2 */
- ver = tmu_read(data, &data->regs->ipbrr0);
+ ret = regmap_read(data->regmap, REGS_IPBRR(0), &ver);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read IP block version\n");
+ return ret;
+ }
data->ver = (ver >> 8) & 0xff;
- if (data->ver == TMU_VER2)
- data->regs_v2 = (void __iomem *)data->regs;
qoriq_tmu_init_device(data); /* TMU initialization */
- ret = qoriq_tmu_calibration(pdev); /* TMU calibration */
+ ret = qoriq_tmu_calibration(dev, data); /* TMU calibration */
if (ret < 0)
goto err;
- ret = qoriq_tmu_register_tmu_zone(pdev);
+ ret = qoriq_tmu_register_tmu_zone(dev, data);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register sensors\n");
+ dev_err(dev, "Failed to register sensors\n");
ret = -ENODEV;
goto err;
}
+ platform_set_drvdata(pdev, data);
+
return 0;
err:
clk_disable_unprepare(data->clk);
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -321,24 +314,21 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
/* Disable monitoring */
- tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
clk_disable_unprepare(data->clk);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
{
- u32 tmr;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
+ int ret;
- /* Disable monitoring */
- tmr = tmu_read(data, &data->regs->tmr);
- tmr &= ~TMR_ME;
- tmu_write(data, tmr, &data->regs->tmr);
+ ret = regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, 0);
+ if (ret)
+ return ret;
clk_disable_unprepare(data->clk);
@@ -347,7 +337,6 @@ static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
static int __maybe_unused qoriq_tmu_resume(struct device *dev)
{
- u32 tmr;
int ret;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
@@ -356,11 +345,7 @@ static int __maybe_unused qoriq_tmu_resume(struct device *dev)
return ret;
/* Enable monitoring */
- tmr = tmu_read(data, &data->regs->tmr);
- tmr |= TMR_ME;
- tmu_write(data, tmr, &data->regs->tmr);
-
- return 0;
+ return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME);
}
static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 1460cf9d9f1c..72877bdc072d 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -182,9 +182,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
tsc->coef.a2);
mcelsius = FIXPT_TO_MCELSIUS(val);
- /* Make sure we are inside specifications */
- if ((mcelsius < MCELSIUS(-40)) || (mcelsius > MCELSIUS(125)))
- return -EIO;
+ /* Guaranteed operating range is -40C to 125C. */
/* Round value to device granularity setting */
*temp = rcar_gen3_thermal_round(mcelsius);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index d0873de718da..8f1aafa2044e 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -219,7 +219,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
* to get stable temperature.
* see "Usage Notes" on datasheet
*/
- udelay(300);
+ usleep_range(300, 400);
new = rcar_thermal_read(priv, THSSR) & CTEMP;
if (new == old) {
@@ -275,12 +275,7 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
tmp = MCELSIUS((priv->ctemp * 5) - 60);
mutex_unlock(&priv->lock);
- if ((tmp < MCELSIUS(-45)) || (tmp > MCELSIUS(125))) {
- struct device *dev = rcar_priv_to_dev(priv);
-
- dev_err(dev, "it couldn't measure temperature correctly\n");
- return -EIO;
- }
+ /* Guaranteed operating range is -45C to 125C. */
*temp = tmp;
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 343c2f5c5a25..7c1a8bccdcba 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -19,7 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/pinctrl/consumer.h>
-/**
+/*
* If the temperature over a period of time High,
* the resulting TSHUT gave CRU module,let it reset the entire chip,
* or via GPIO give PMIC.
@@ -29,7 +29,7 @@ enum tshut_mode {
TSHUT_MODE_GPIO,
};
-/**
+/*
* The system Temperature Sensors tshut(tshut) polarity
* the bit 8 is tshut polarity.
* 0: low active, 1: high active
@@ -39,7 +39,7 @@ enum tshut_polarity {
TSHUT_HIGH_ACTIVE,
};
-/**
+/*
* The system has two Temperature Sensors.
* sensor0 is for CPU, and sensor1 is for GPU.
*/
@@ -48,7 +48,7 @@ enum sensor_id {
SENSOR_GPU,
};
-/**
+/*
* The conversion table has the adc value and temperature.
* ADC_DECREMENT: the adc value is of diminishing.(e.g. rk3288_code_table)
* ADC_INCREMENT: the adc value is incremental.(e.g. rk3368_code_table)
@@ -58,6 +58,8 @@ enum adc_sort_mode {
ADC_INCREMENT,
};
+#include "thermal_hwmon.h"
+
/**
* The max sensors is two in rockchip SoCs.
* Two sensors: CPU and GPU sensor.
@@ -80,13 +82,14 @@ struct chip_tsadc_table {
/**
* struct rockchip_tsadc_chip - hold the private data of tsadc chip
- * @chn_id[SOC_MAX_SENSORS]: the sensor id of chip correspond to the channel
+ * @chn_id: array of sensor ids of chip corresponding to the channel
* @chn_num: the channel number of tsadc chip
* @tshut_temp: the hardware-controlled shutdown temperature value
* @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
* @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
* @initialize: SoC special initialize tsadc controller method
* @irq_ack: clear the interrupt
+ * @control: enable/disable method for the tsadc controller
* @get_temp: get the temperature
* @set_alarm_temp: set the high temperature interrupt
* @set_tshut_temp: set the hardware-controlled shutdown temperature
@@ -139,7 +142,7 @@ struct rockchip_thermal_sensor {
* @chip: pointer to the platform/configuration data
* @pdev: platform device of thermal
* @reset: the reset controller of tsadc
- * @sensors[SOC_MAX_SENSORS]: the thermal sensor
+ * @sensors: array of thermal sensors
* @clk: the controller clock is divided by the exteral 24MHz
* @pclk: the advanced peripherals bus clock
* @grf: the general register file will be used to do static set by software
@@ -590,6 +593,9 @@ static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
/**
* rk_tsadcv2_initialize - initialize TASDC Controller.
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
*
* (1) Set TSADC_V2_AUTO_PERIOD:
* Configure the interleave between every two accessing of
@@ -624,6 +630,9 @@ static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs,
/**
* rk_tsadcv3_initialize - initialize TASDC Controller.
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
*
* (1) The tsadc control power sequence.
*
@@ -723,6 +732,8 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
/**
* rk_tsadcv3_control - the tsadc controller is enabled or disabled.
+ * @regs: the base address of tsadc controller
+ * @enable: boolean flag to enable the controller
*
* NOTE: TSADC controller works at auto mode, and some SoCs need set the
* tsadc_q_sel bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output
@@ -1206,6 +1217,7 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
/**
* Reset TSADC Controller, reset all tsadc registers.
+ * @reset: the reset controller of tsadc
*/
static void rockchip_thermal_reset_controller(struct reset_control *reset)
{
@@ -1321,8 +1333,15 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
thermal->chip->control(thermal->regs, true);
- for (i = 0; i < thermal->chip->chn_num; i++)
+ for (i = 0; i < thermal->chip->chn_num; i++) {
rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
+ thermal->sensors[i].tzd->tzp->no_hwmon = false;
+ error = thermal_add_hwmon_sysfs(thermal->sensors[i].tzd);
+ if (error)
+ dev_warn(&pdev->dev,
+ "failed to register sensor %d with hwmon: %d\n",
+ i, error);
+ }
platform_set_drvdata(pdev, thermal);
@@ -1344,6 +1363,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
for (i = 0; i < thermal->chip->chn_num; i++) {
struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
+ thermal_remove_hwmon_sysfs(sensor->tzd);
rockchip_thermal_toggle_sensor(sensor, false);
}
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index fe0d2ba51392..f4eff5a41a84 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -5,7 +5,7 @@ config EXYNOS_THERMAL
depends on HAS_IOMEM
help
If you say yes here you get support for the TMU (Thermal Management
- Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
+ Unit) driver for Samsung Exynos series of SoCs. This driver initialises
the TMU, reports temperature and handles cooling action if defined.
This driver uses the Exynos core thermal APIs and TMU configuration
data from the supported SoCs.
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index fb2c55123a99..fd4a17812f33 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
+ * exynos_tmu.c - Samsung Exynos TMU (Thermal Management Unit)
*
* Copyright (C) 2014 Samsung Electronics
* Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
@@ -138,7 +138,7 @@ enum soc_type {
/**
* struct exynos_tmu_data : A structure to hold the private data of the TMU
- driver
+ * driver
* @id: identifier of the one instance of the TMU controller.
* @base: base address of the single instance of the TMU controller.
* @base_second: base address of the common registers of the TMU controller.
@@ -162,8 +162,11 @@ enum soc_type {
* 0 < reference_voltage <= 31
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
+ * @tzd: pointer to thermal_zone_device structure
* @ntrip: number of supported trip points.
* @enabled: current status of TMU device
+ * @tmu_set_trip_temp: SoC specific method to set trip (rising threshold)
+ * @tmu_set_trip_hyst: SoC specific to set hysteresis (falling threshold)
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -1183,7 +1186,7 @@ static struct platform_driver exynos_tmu_driver = {
module_platform_driver(exynos_tmu_driver);
-MODULE_DESCRIPTION("EXYNOS TMU Driver");
+MODULE_DESCRIPTION("Exynos TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:exynos-tmu");
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index cf9ddc52f30e..ad9e3bf8fdf6 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -30,7 +30,7 @@
#define DTS_DR_OFFSET 0x1C
#define DTS_SR_OFFSET 0x20
#define DTS_ITENR_OFFSET 0x24
-#define DTS_CIFR_OFFSET 0x28
+#define DTS_ICIFR_OFFSET 0x28
/* DTS_CFGR1 register mask definitions */
#define HSREF_CLK_DIV_MASK GENMASK(30, 24)
@@ -51,10 +51,16 @@
/* DTS_DR register mask definitions */
#define TS1_MFREQ_MASK GENMASK(15, 0)
+/* DTS_ITENR register mask definitions */
+#define ITENR_MASK (GENMASK(2, 0) | GENMASK(6, 4))
+
+/* DTS_ICIFR register mask definitions */
+#define ICIFR_MASK (GENMASK(2, 0) | GENMASK(6, 4))
+
/* Less significant bit position definitions */
#define TS1_T0_POS 16
-#define TS1_SMP_TIME_POS 16
#define TS1_HITTHD_POS 16
+#define TS1_LITTHD_POS 0
#define HSREF_CLK_DIV_POS 24
/* DTS_CFGR1 bit definitions */
@@ -76,58 +82,59 @@
#define ONE_MHZ 1000000
#define POLL_TIMEOUT 5000
#define STARTUP_TIME 40
-#define TS1_T0_VAL0 30
-#define TS1_T0_VAL1 130
+#define TS1_T0_VAL0 30000 /* 30 celsius */
+#define TS1_T0_VAL1 130000 /* 130 celsius */
#define NO_HW_TRIG 0
-
-/* The Thermal Framework expects millidegrees */
-#define mcelsius(temp) ((temp) * 1000)
-
-/* The Sensor expects oC degrees */
-#define celsius(temp) ((temp) / 1000)
+#define SAMPLING_TIME 15
struct stm_thermal_sensor {
struct device *dev;
struct thermal_zone_device *th_dev;
enum thermal_device_mode mode;
struct clk *clk;
- int high_temp;
- int low_temp;
- int temp_critical;
- int temp_passive;
unsigned int low_temp_enabled;
- int num_trips;
+ unsigned int high_temp_enabled;
int irq;
- unsigned int irq_enabled;
void __iomem *base;
int t0, fmt0, ramp_coeff;
};
-static irqreturn_t stm_thermal_alarm_irq(int irq, void *sdata)
+static int stm_enable_irq(struct stm_thermal_sensor *sensor)
{
- struct stm_thermal_sensor *sensor = sdata;
+ u32 value;
- disable_irq_nosync(irq);
- sensor->irq_enabled = false;
+ dev_dbg(sensor->dev, "low:%d high:%d\n", sensor->low_temp_enabled,
+ sensor->high_temp_enabled);
- return IRQ_WAKE_THREAD;
+ /* Disable IT generation for low and high thresholds */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ value &= ~(LOW_THRESHOLD | HIGH_THRESHOLD);
+
+ if (sensor->low_temp_enabled)
+ value |= HIGH_THRESHOLD;
+
+ if (sensor->high_temp_enabled)
+ value |= LOW_THRESHOLD;
+
+ /* Enable interrupts */
+ writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
+
+ return 0;
}
-static irqreturn_t stm_thermal_alarm_irq_thread(int irq, void *sdata)
+static irqreturn_t stm_thermal_irq_handler(int irq, void *sdata)
{
- u32 value;
struct stm_thermal_sensor *sensor = sdata;
- /* read IT reason in SR and clear flags */
- value = readl_relaxed(sensor->base + DTS_SR_OFFSET);
+ dev_dbg(sensor->dev, "sr:%d\n",
+ readl_relaxed(sensor->base + DTS_SR_OFFSET));
- if ((value & LOW_THRESHOLD) == LOW_THRESHOLD)
- writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
- if ((value & HIGH_THRESHOLD) == HIGH_THRESHOLD)
- writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+ stm_enable_irq(sensor);
- thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+ /* Acknoledge all DTS irqs */
+ writel_relaxed(ICIFR_MASK, sensor->base + DTS_ICIFR_OFFSET);
return IRQ_HANDLED;
}
@@ -160,6 +167,8 @@ static int stm_sensor_power_on(struct stm_thermal_sensor *sensor)
writel_relaxed(value, sensor->base +
DTS_CFGR1_OFFSET);
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
return 0;
}
@@ -167,6 +176,8 @@ static int stm_sensor_power_off(struct stm_thermal_sensor *sensor)
{
u32 value;
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
/* Stop measuring */
value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
value &= ~TS1_START;
@@ -263,60 +274,17 @@ static int stm_thermal_calculate_threshold(struct stm_thermal_sensor *sensor,
int temp, u32 *th)
{
int freqM;
- u32 sampling_time;
-
- /* Retrieve the number of periods to sample */
- sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
- TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
/* Figure out the CLK_PTAT frequency for a given temperature */
- freqM = ((temp - sensor->t0) * sensor->ramp_coeff)
- + sensor->fmt0;
-
- dev_dbg(sensor->dev, "%s: freqM for threshold = %d Hz",
- __func__, freqM);
+ freqM = ((temp - sensor->t0) * sensor->ramp_coeff) / 1000 +
+ sensor->fmt0;
/* Figure out the threshold sample number */
- *th = clk_get_rate(sensor->clk);
+ *th = clk_get_rate(sensor->clk) * SAMPLING_TIME / freqM;
if (!*th)
return -EINVAL;
- *th = *th / freqM;
-
- *th *= sampling_time;
-
- return 0;
-}
-
-static int stm_thermal_set_threshold(struct stm_thermal_sensor *sensor)
-{
- u32 value, th;
- int ret;
-
- value = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
-
- /* Erase threshold content */
- value &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
-
- /* Retrieve the sample threshold number th for a given temperature */
- ret = stm_thermal_calculate_threshold(sensor, sensor->high_temp, &th);
- if (ret)
- return ret;
-
- value |= th & TS1_LITTHD_MASK;
-
- if (sensor->low_temp_enabled) {
- /* Retrieve the sample threshold */
- ret = stm_thermal_calculate_threshold(sensor, sensor->low_temp,
- &th);
- if (ret)
- return ret;
-
- value |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
- }
-
- /* Write value on the Low interrupt threshold */
- writel_relaxed(value, sensor->base + DTS_ITR1_OFFSET);
+ dev_dbg(sensor->dev, "freqM=%d Hz, threshold=0x%x", freqM, *th);
return 0;
}
@@ -326,77 +294,57 @@ static int stm_disable_irq(struct stm_thermal_sensor *sensor)
{
u32 value;
- /* Disable IT generation for low and high thresholds */
+ /* Disable IT generation */
value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
- writel_relaxed(value & ~(LOW_THRESHOLD | HIGH_THRESHOLD),
- sensor->base + DTS_ITENR_OFFSET);
-
- dev_dbg(sensor->dev, "%s: IT disabled on sensor side", __func__);
-
- return 0;
-}
-
-/* Enable temperature interrupt */
-static int stm_enable_irq(struct stm_thermal_sensor *sensor)
-{
- u32 value;
-
- /*
- * Code below enables High temperature threshold using a low threshold
- * sampling value
- */
-
- /* Make sure LOW_THRESHOLD IT is clear before enabling */
- writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
-
- /* Enable IT generation for low threshold */
- value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
- value |= LOW_THRESHOLD;
-
- /* Enable the low temperature threshold if needed */
- if (sensor->low_temp_enabled) {
- /* Make sure HIGH_THRESHOLD IT is clear before enabling */
- writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
-
- /* Enable IT generation for high threshold */
- value |= HIGH_THRESHOLD;
- }
-
- /* Enable thresholds */
+ value &= ~ITENR_MASK;
writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
- dev_dbg(sensor->dev, "%s: IT enabled on sensor side", __func__);
-
return 0;
}
-static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
+static int stm_thermal_set_trips(void *data, int low, int high)
{
+ struct stm_thermal_sensor *sensor = data;
+ u32 itr1, th;
int ret;
- sensor->mode = THERMAL_DEVICE_DISABLED;
+ dev_dbg(sensor->dev, "set trips %d <--> %d\n", low, high);
- ret = stm_sensor_power_off(sensor);
- if (ret)
- return ret;
+ /* Erase threshold content */
+ itr1 = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
+ itr1 &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
- ret = stm_disable_irq(sensor);
- if (ret)
- return ret;
+ /*
+ * Disable low-temp if "low" is too small. As per thermal framework
+ * API, we use -INT_MAX rather than INT_MIN.
+ */
- ret = stm_thermal_set_threshold(sensor);
- if (ret)
- return ret;
+ if (low > -INT_MAX) {
+ sensor->low_temp_enabled = 1;
+ /* add 0.5 of hysteresis due to measurement error */
+ ret = stm_thermal_calculate_threshold(sensor, low - 500, &th);
+ if (ret)
+ return ret;
- ret = stm_enable_irq(sensor);
- if (ret)
- return ret;
+ itr1 |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
+ } else {
+ sensor->low_temp_enabled = 0;
+ }
- ret = stm_sensor_power_on(sensor);
- if (ret)
- return ret;
+ /* Disable high-temp if "high" is too big. */
+ if (high < INT_MAX) {
+ sensor->high_temp_enabled = 1;
+ ret = stm_thermal_calculate_threshold(sensor, high, &th);
+ if (ret)
+ return ret;
- sensor->mode = THERMAL_DEVICE_ENABLED;
+ itr1 |= (TS1_LITTHD_MASK & (th << TS1_LITTHD_POS));
+ } else {
+ sensor->high_temp_enabled = 0;
+ }
+
+ /* Write new threshod values*/
+ writel_relaxed(itr1, sensor->base + DTS_ITR1_OFFSET);
return 0;
}
@@ -405,76 +353,26 @@ static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
static int stm_thermal_get_temp(void *data, int *temp)
{
struct stm_thermal_sensor *sensor = data;
- u32 sampling_time;
+ u32 periods;
int freqM, ret;
if (sensor->mode != THERMAL_DEVICE_ENABLED)
return -EAGAIN;
- /* Retrieve the number of samples */
- ret = readl_poll_timeout(sensor->base + DTS_DR_OFFSET, freqM,
- (freqM & TS1_MFREQ_MASK), STARTUP_TIME,
- POLL_TIMEOUT);
-
+ /* Retrieve the number of periods sampled */
+ ret = readl_relaxed_poll_timeout(sensor->base + DTS_DR_OFFSET, periods,
+ (periods & TS1_MFREQ_MASK),
+ STARTUP_TIME, POLL_TIMEOUT);
if (ret)
return ret;
- if (!freqM)
- return -ENODATA;
-
- /* Retrieve the number of periods sampled */
- sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
- TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
-
- /* Figure out the number of samples per period */
- freqM /= sampling_time;
-
/* Figure out the CLK_PTAT frequency */
- freqM = clk_get_rate(sensor->clk) / freqM;
+ freqM = (clk_get_rate(sensor->clk) * SAMPLING_TIME) / periods;
if (!freqM)
return -EINVAL;
- dev_dbg(sensor->dev, "%s: freqM=%d\n", __func__, freqM);
-
/* Figure out the temperature in mili celsius */
- *temp = mcelsius(sensor->t0 + ((freqM - sensor->fmt0) /
- sensor->ramp_coeff));
-
- dev_dbg(sensor->dev, "%s: temperature = %d millicelsius",
- __func__, *temp);
-
- /* Update thresholds */
- if (sensor->num_trips > 1) {
- /* Update alarm threshold value to next higher trip point */
- if (sensor->high_temp == sensor->temp_passive &&
- celsius(*temp) >= sensor->temp_passive) {
- sensor->high_temp = sensor->temp_critical;
- sensor->low_temp = sensor->temp_passive;
- sensor->low_temp_enabled = true;
- ret = stm_thermal_update_threshold(sensor);
- if (ret)
- return ret;
- }
-
- if (sensor->high_temp == sensor->temp_critical &&
- celsius(*temp) < sensor->temp_passive) {
- sensor->high_temp = sensor->temp_passive;
- sensor->low_temp_enabled = false;
- ret = stm_thermal_update_threshold(sensor);
- if (ret)
- return ret;
- }
-
- /*
- * Re-enable alarm IRQ if temperature below critical
- * temperature
- */
- if (!sensor->irq_enabled &&
- (celsius(*temp) < sensor->temp_critical)) {
- sensor->irq_enabled = true;
- enable_irq(sensor->irq);
- }
- }
+ *temp = (freqM - sensor->fmt0) * 1000 / sensor->ramp_coeff + sensor->t0;
return 0;
}
@@ -493,8 +391,8 @@ static int stm_register_irq(struct stm_thermal_sensor *sensor)
}
ret = devm_request_threaded_irq(dev, sensor->irq,
- stm_thermal_alarm_irq,
- stm_thermal_alarm_irq_thread,
+ NULL,
+ stm_thermal_irq_handler,
IRQF_ONESHOT,
dev->driver->name, sensor);
if (ret) {
@@ -503,8 +401,6 @@ static int stm_register_irq(struct stm_thermal_sensor *sensor)
return ret;
}
- sensor->irq_enabled = true;
-
dev_dbg(dev, "%s: thermal IRQ registered", __func__);
return 0;
@@ -514,6 +410,8 @@ static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
{
int ret;
+ stm_disable_irq(sensor);
+
ret = stm_sensor_power_off(sensor);
if (ret)
return ret;
@@ -526,7 +424,6 @@ static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
{
int ret;
- struct device *dev = sensor->dev;
ret = clk_prepare_enable(sensor->clk);
if (ret)
@@ -540,26 +437,8 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
if (ret)
goto thermal_unprepare;
- /* Set threshold(s) for IRQ */
- ret = stm_thermal_set_threshold(sensor);
- if (ret)
- goto thermal_unprepare;
-
- ret = stm_enable_irq(sensor);
- if (ret)
- goto thermal_unprepare;
-
- ret = stm_sensor_power_on(sensor);
- if (ret) {
- dev_err(dev, "%s: failed to power on sensor\n", __func__);
- goto irq_disable;
- }
-
return 0;
-irq_disable:
- stm_disable_irq(sensor);
-
thermal_unprepare:
clk_disable_unprepare(sensor->clk);
@@ -576,8 +455,6 @@ static int stm_thermal_suspend(struct device *dev)
if (ret)
return ret;
- sensor->mode = THERMAL_DEVICE_DISABLED;
-
return 0;
}
@@ -590,7 +467,12 @@ static int stm_thermal_resume(struct device *dev)
if (ret)
return ret;
- sensor->mode = THERMAL_DEVICE_ENABLED;
+ ret = stm_sensor_power_on(sensor);
+ if (ret)
+ return ret;
+
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+ stm_enable_irq(sensor);
return 0;
}
@@ -600,6 +482,7 @@ SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
static const struct thermal_zone_of_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
+ .set_trips = stm_thermal_set_trips,
};
static const struct of_device_id stm_thermal_of_match[] = {
@@ -612,9 +495,8 @@ static int stm_thermal_probe(struct platform_device *pdev)
{
struct stm_thermal_sensor *sensor;
struct resource *res;
- const struct thermal_trip *trip;
void __iomem *base;
- int ret, i;
+ int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "%s: device tree node not found\n",
@@ -645,10 +527,23 @@ static int stm_thermal_probe(struct platform_device *pdev)
return PTR_ERR(sensor->clk);
}
- /* Register IRQ into GIC */
- ret = stm_register_irq(sensor);
- if (ret)
+ stm_disable_irq(sensor);
+
+ /* Clear irq flags */
+ writel_relaxed(ICIFR_MASK, sensor->base + DTS_ICIFR_OFFSET);
+
+ /* Configure and enable HW sensor */
+ ret = stm_thermal_prepare(sensor);
+ if (ret) {
+ dev_err(&pdev->dev, "Error prepare sensor: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret) {
+ dev_err(&pdev->dev, "Error power on sensor: %d\n", ret);
return ret;
+ }
sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
sensor,
@@ -661,53 +556,12 @@ static int stm_thermal_probe(struct platform_device *pdev)
return ret;
}
- if (!sensor->th_dev->ops->get_crit_temp) {
- /* Critical point must be provided */
- ret = -EINVAL;
- goto err_tz;
- }
-
- ret = sensor->th_dev->ops->get_crit_temp(sensor->th_dev,
- &sensor->temp_critical);
- if (ret) {
- dev_err(&pdev->dev,
- "Not able to read critical_temp: %d\n", ret);
+ /* Register IRQ into GIC */
+ ret = stm_register_irq(sensor);
+ if (ret)
goto err_tz;
- }
-
- sensor->temp_critical = celsius(sensor->temp_critical);
-
- /* Set thresholds for IRQ */
- sensor->high_temp = sensor->temp_critical;
-
- trip = of_thermal_get_trip_points(sensor->th_dev);
- sensor->num_trips = of_thermal_get_ntrips(sensor->th_dev);
-
- /* Find out passive temperature if it exists */
- for (i = (sensor->num_trips - 1); i >= 0; i--) {
- if (trip[i].type == THERMAL_TRIP_PASSIVE) {
- sensor->temp_passive = celsius(trip[i].temperature);
- /* Update high temperature threshold */
- sensor->high_temp = sensor->temp_passive;
- }
- }
- /*
- * Ensure low_temp_enabled flag is disabled.
- * By disabling low_temp_enabled, low threshold IT will not be
- * configured neither enabled because it is not needed as high
- * threshold is set on the lowest temperature trip point after
- * probe.
- */
- sensor->low_temp_enabled = false;
-
- /* Configure and enable HW sensor */
- ret = stm_thermal_prepare(sensor);
- if (ret) {
- dev_err(&pdev->dev,
- "Not able to enable sensor: %d\n", ret);
- goto err_tz;
- }
+ stm_enable_irq(sensor);
/*
* Thermal_zone doesn't enable hwmon as default,
@@ -718,8 +572,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
if (ret)
goto err_tz;
- sensor->mode = THERMAL_DEVICE_ENABLED;
-
dev_info(&pdev->dev, "%s: Driver initialized successfully\n",
__func__);
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 6e051cbd824f..2ae7198d3067 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -174,8 +174,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/**
* step_wise_throttle - throttles devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* Throttling Logic: This uses the trend of the thermal zone to throttle.
* If the thermal zone is 'heating up' this throttles all the cooling
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
new file mode 100644
index 000000000000..74d73be16496
--- /dev/null
+++ b/drivers/thermal/sun8i_thermal.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thermal sensor driver for Allwinner SOC
+ * Copyright (C) 2019 Yangtao Li
+ *
+ * Based on the work of Icenowy Zheng <icenowy@aosc.io>
+ * Based on the work of Ondrej Jirman <megous@megous.com>
+ * Based on the work of Josef Gajdusek <atx@atx.name>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#include "thermal_hwmon.h"
+
+#define MAX_SENSOR_NUM 4
+
+#define FT_TEMP_MASK GENMASK(11, 0)
+#define TEMP_CALIB_MASK GENMASK(11, 0)
+#define CALIBRATE_DEFAULT 0x800
+
+#define SUN8I_THS_CTRL0 0x00
+#define SUN8I_THS_CTRL2 0x40
+#define SUN8I_THS_IC 0x44
+#define SUN8I_THS_IS 0x48
+#define SUN8I_THS_MFC 0x70
+#define SUN8I_THS_TEMP_CALIB 0x74
+#define SUN8I_THS_TEMP_DATA 0x80
+
+#define SUN50I_THS_CTRL0 0x00
+#define SUN50I_H6_THS_ENABLE 0x04
+#define SUN50I_H6_THS_PC 0x08
+#define SUN50I_H6_THS_DIC 0x10
+#define SUN50I_H6_THS_DIS 0x20
+#define SUN50I_H6_THS_MFC 0x30
+#define SUN50I_H6_THS_TEMP_CALIB 0xa0
+#define SUN50I_H6_THS_TEMP_DATA 0xc0
+
+#define SUN8I_THS_CTRL0_T_ACQ0(x) (GENMASK(15, 0) & (x))
+#define SUN8I_THS_CTRL2_T_ACQ1(x) ((GENMASK(15, 0) & (x)) << 16)
+#define SUN8I_THS_DATA_IRQ_STS(x) BIT(x + 8)
+
+#define SUN50I_THS_CTRL0_T_ACQ(x) ((GENMASK(15, 0) & (x)) << 16)
+#define SUN50I_THS_FILTER_EN BIT(2)
+#define SUN50I_THS_FILTER_TYPE(x) (GENMASK(1, 0) & (x))
+#define SUN50I_H6_THS_PC_TEMP_PERIOD(x) ((GENMASK(19, 0) & (x)) << 12)
+#define SUN50I_H6_THS_DATA_IRQ_STS(x) BIT(x)
+
+/* millidegree celsius */
+
+struct tsensor {
+ struct ths_device *tmdev;
+ struct thermal_zone_device *tzd;
+ int id;
+};
+
+struct ths_thermal_chip {
+ bool has_mod_clk;
+ bool has_bus_clk_reset;
+ int sensor_num;
+ int offset;
+ int scale;
+ int ft_deviation;
+ int temp_data_base;
+ int (*calibrate)(struct ths_device *tmdev,
+ u16 *caldata, int callen);
+ int (*init)(struct ths_device *tmdev);
+ int (*irq_ack)(struct ths_device *tmdev);
+ int (*calc_temp)(struct ths_device *tmdev,
+ int id, int reg);
+};
+
+struct ths_device {
+ const struct ths_thermal_chip *chip;
+ struct device *dev;
+ struct regmap *regmap;
+ struct reset_control *reset;
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct tsensor sensor[MAX_SENSOR_NUM];
+};
+
+/* Temp Unit: millidegree Celsius */
+static int sun8i_ths_calc_temp(struct ths_device *tmdev,
+ int id, int reg)
+{
+ return tmdev->chip->offset - (reg * tmdev->chip->scale / 10);
+}
+
+static int sun50i_h5_calc_temp(struct ths_device *tmdev,
+ int id, int reg)
+{
+ if (reg >= 0x500)
+ return -1191 * reg / 10 + 223000;
+ else if (!id)
+ return -1452 * reg / 10 + 259000;
+ else
+ return -1590 * reg / 10 + 276000;
+}
+
+static int sun8i_ths_get_temp(void *data, int *temp)
+{
+ struct tsensor *s = data;
+ struct ths_device *tmdev = s->tmdev;
+ int val = 0;
+
+ regmap_read(tmdev->regmap, tmdev->chip->temp_data_base +
+ 0x4 * s->id, &val);
+
+ /* ths have no data yet */
+ if (!val)
+ return -EAGAIN;
+
+ *temp = tmdev->chip->calc_temp(tmdev, s->id, val);
+ /*
+ * According to the original sdk, there are some platforms(rarely)
+ * that add a fixed offset value after calculating the temperature
+ * value. We can't simply put it on the formula for calculating the
+ * temperature above, because the formula for calculating the
+ * temperature above is also used when the sensor is calibrated. If
+ * do this, the correct calibration formula is hard to know.
+ */
+ *temp += tmdev->chip->ft_deviation;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops ths_ops = {
+ .get_temp = sun8i_ths_get_temp,
+};
+
+static const struct regmap_config config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+ .max_register = 0xfc,
+};
+
+static int sun8i_h3_irq_ack(struct ths_device *tmdev)
+{
+ int i, state, ret = 0;
+
+ regmap_read(tmdev->regmap, SUN8I_THS_IS, &state);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & SUN8I_THS_DATA_IRQ_STS(i)) {
+ regmap_write(tmdev->regmap, SUN8I_THS_IS,
+ SUN8I_THS_DATA_IRQ_STS(i));
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
+static int sun50i_h6_irq_ack(struct ths_device *tmdev)
+{
+ int i, state, ret = 0;
+
+ regmap_read(tmdev->regmap, SUN50I_H6_THS_DIS, &state);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & SUN50I_H6_THS_DATA_IRQ_STS(i)) {
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_DIS,
+ SUN50I_H6_THS_DATA_IRQ_STS(i));
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t sun8i_irq_thread(int irq, void *data)
+{
+ struct ths_device *tmdev = data;
+ int i, state;
+
+ state = tmdev->chip->irq_ack(tmdev);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & BIT(i))
+ thermal_zone_device_update(tmdev->sensor[i].tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun8i_h3_ths_calibrate(struct ths_device *tmdev,
+ u16 *caldata, int callen)
+{
+ int i;
+
+ if (!caldata[0] || callen < 2 * tmdev->chip->sensor_num)
+ return -EINVAL;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ int offset = (i % 2) << 4;
+
+ regmap_update_bits(tmdev->regmap,
+ SUN8I_THS_TEMP_CALIB + (4 * (i >> 1)),
+ 0xfff << offset,
+ caldata[i] << offset);
+ }
+
+ return 0;
+}
+
+static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
+ u16 *caldata, int callen)
+{
+ struct device *dev = tmdev->dev;
+ int i, ft_temp;
+
+ if (!caldata[0] || callen < 2 + 2 * tmdev->chip->sensor_num)
+ return -EINVAL;
+
+ /*
+ * efuse layout:
+ *
+ * 0 11 16 32
+ * +-------+-------+-------+
+ * |temp| |sensor0|sensor1|
+ * +-------+-------+-------+
+ *
+ * The calibration data on the H6 is the ambient temperature and
+ * sensor values that are filled during the factory test stage.
+ *
+ * The unit of stored FT temperature is 0.1 degreee celusis.
+ *
+ * We need to calculate a delta between measured and caluclated
+ * register values and this will become a calibration offset.
+ */
+ ft_temp = (caldata[0] & FT_TEMP_MASK) * 100;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ int sensor_reg = caldata[i + 1];
+ int cdata, offset;
+ int sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg);
+
+ /*
+ * Calibration data is CALIBRATE_DEFAULT - (calculated
+ * temperature from sensor reading at factory temperature
+ * minus actual factory temperature) * 14.88 (scale from
+ * temperature to register values)
+ */
+ cdata = CALIBRATE_DEFAULT -
+ ((sensor_temp - ft_temp) * 10 / tmdev->chip->scale);
+ if (cdata & ~TEMP_CALIB_MASK) {
+ /*
+ * Calibration value more than 12-bit, but calibration
+ * register is 12-bit. In this case, ths hardware can
+ * still work without calibration, although the data
+ * won't be so accurate.
+ */
+ dev_warn(dev, "sensor%d is not calibrated.\n", i);
+ continue;
+ }
+
+ offset = (i % 2) * 16;
+ regmap_update_bits(tmdev->regmap,
+ SUN50I_H6_THS_TEMP_CALIB + (i / 2 * 4),
+ 0xfff << offset,
+ cdata << offset);
+ }
+
+ return 0;
+}
+
+static int sun8i_ths_calibrate(struct ths_device *tmdev)
+{
+ struct nvmem_cell *calcell;
+ struct device *dev = tmdev->dev;
+ u16 *caldata;
+ size_t callen;
+ int ret = 0;
+
+ calcell = devm_nvmem_cell_get(dev, "calibration");
+ if (IS_ERR(calcell)) {
+ if (PTR_ERR(calcell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ /*
+ * Even if the external calibration data stored in sid is
+ * not accessible, the THS hardware can still work, although
+ * the data won't be so accurate.
+ *
+ * The default value of calibration register is 0x800 for
+ * every sensor, and the calibration value is usually 0x7xx
+ * or 0x8xx, so they won't be away from the default value
+ * for a lot.
+ *
+ * So here we do not return error if the calibartion data is
+ * not available, except the probe needs deferring.
+ */
+ goto out;
+ }
+
+ caldata = nvmem_cell_read(calcell, &callen);
+ if (IS_ERR(caldata)) {
+ ret = PTR_ERR(caldata);
+ goto out;
+ }
+
+ tmdev->chip->calibrate(tmdev, caldata, callen);
+
+ kfree(caldata);
+out:
+ return ret;
+}
+
+static int sun8i_ths_resource_init(struct ths_device *tmdev)
+{
+ struct device *dev = tmdev->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tmdev->regmap = devm_regmap_init_mmio(dev, base, &config);
+ if (IS_ERR(tmdev->regmap))
+ return PTR_ERR(tmdev->regmap);
+
+ if (tmdev->chip->has_bus_clk_reset) {
+ tmdev->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(tmdev->reset))
+ return PTR_ERR(tmdev->reset);
+
+ tmdev->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(tmdev->bus_clk))
+ return PTR_ERR(tmdev->bus_clk);
+ }
+
+ if (tmdev->chip->has_mod_clk) {
+ tmdev->mod_clk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(tmdev->mod_clk))
+ return PTR_ERR(tmdev->mod_clk);
+ }
+
+ ret = reset_control_deassert(tmdev->reset);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(tmdev->bus_clk);
+ if (ret)
+ goto assert_reset;
+
+ ret = clk_set_rate(tmdev->mod_clk, 24000000);
+ if (ret)
+ goto bus_disable;
+
+ ret = clk_prepare_enable(tmdev->mod_clk);
+ if (ret)
+ goto bus_disable;
+
+ ret = sun8i_ths_calibrate(tmdev);
+ if (ret)
+ goto mod_disable;
+
+ return 0;
+
+mod_disable:
+ clk_disable_unprepare(tmdev->mod_clk);
+bus_disable:
+ clk_disable_unprepare(tmdev->bus_clk);
+assert_reset:
+ reset_control_assert(tmdev->reset);
+
+ return ret;
+}
+
+static int sun8i_h3_thermal_init(struct ths_device *tmdev)
+{
+ int val;
+
+ /* average over 4 samples */
+ regmap_write(tmdev->regmap, SUN8I_THS_MFC,
+ SUN50I_THS_FILTER_EN |
+ SUN50I_THS_FILTER_TYPE(1));
+ /*
+ * clkin = 24MHz
+ * filter_samples = 4
+ * period = 0.25s
+ *
+ * x = period * clkin / 4096 / filter_samples - 1
+ * = 365
+ */
+ val = GENMASK(7 + tmdev->chip->sensor_num, 8);
+ regmap_write(tmdev->regmap, SUN8I_THS_IC,
+ SUN50I_H6_THS_PC_TEMP_PERIOD(365) | val);
+ /*
+ * T_acq = 20us
+ * clkin = 24MHz
+ *
+ * x = T_acq * clkin - 1
+ * = 479
+ */
+ regmap_write(tmdev->regmap, SUN8I_THS_CTRL0,
+ SUN8I_THS_CTRL0_T_ACQ0(479));
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN8I_THS_CTRL2,
+ SUN8I_THS_CTRL2_T_ACQ1(479) | val);
+
+ return 0;
+}
+
+/*
+ * Without this undocummented value, the returned temperatures would
+ * be higher than real ones by about 20C.
+ */
+#define SUN50I_H6_CTRL0_UNK 0x0000002f
+
+static int sun50i_h6_thermal_init(struct ths_device *tmdev)
+{
+ int val;
+
+ /*
+ * T_acq = 20us
+ * clkin = 24MHz
+ *
+ * x = T_acq * clkin - 1
+ * = 479
+ */
+ regmap_write(tmdev->regmap, SUN50I_THS_CTRL0,
+ SUN50I_H6_CTRL0_UNK | SUN50I_THS_CTRL0_T_ACQ(479));
+ /* average over 4 samples */
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_MFC,
+ SUN50I_THS_FILTER_EN |
+ SUN50I_THS_FILTER_TYPE(1));
+ /*
+ * clkin = 24MHz
+ * filter_samples = 4
+ * period = 0.25s
+ *
+ * x = period * clkin / 4096 / filter_samples - 1
+ * = 365
+ */
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_PC,
+ SUN50I_H6_THS_PC_TEMP_PERIOD(365));
+ /* enable sensor */
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_ENABLE, val);
+ /* thermal data interrupt enable */
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_DIC, val);
+
+ return 0;
+}
+
+static int sun8i_ths_register(struct ths_device *tmdev)
+{
+ int i;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ tmdev->sensor[i].tmdev = tmdev;
+ tmdev->sensor[i].id = i;
+ tmdev->sensor[i].tzd =
+ devm_thermal_zone_of_sensor_register(tmdev->dev,
+ i,
+ &tmdev->sensor[i],
+ &ths_ops);
+ if (IS_ERR(tmdev->sensor[i].tzd))
+ return PTR_ERR(tmdev->sensor[i].tzd);
+
+ if (devm_thermal_add_hwmon_sysfs(tmdev->sensor[i].tzd))
+ dev_warn(tmdev->dev,
+ "Failed to add hwmon sysfs attributes\n");
+ }
+
+ return 0;
+}
+
+static int sun8i_ths_probe(struct platform_device *pdev)
+{
+ struct ths_device *tmdev;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ tmdev = devm_kzalloc(dev, sizeof(*tmdev), GFP_KERNEL);
+ if (!tmdev)
+ return -ENOMEM;
+
+ tmdev->dev = dev;
+ tmdev->chip = of_device_get_match_data(&pdev->dev);
+ if (!tmdev->chip)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, tmdev);
+
+ ret = sun8i_ths_resource_init(tmdev);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = tmdev->chip->init(tmdev);
+ if (ret)
+ return ret;
+
+ ret = sun8i_ths_register(tmdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Avoid entering the interrupt handler, the thermal device is not
+ * registered yet, we deffer the registration of the interrupt to
+ * the end.
+ */
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ sun8i_irq_thread,
+ IRQF_ONESHOT, "ths", tmdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int sun8i_ths_remove(struct platform_device *pdev)
+{
+ struct ths_device *tmdev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(tmdev->mod_clk);
+ clk_disable_unprepare(tmdev->bus_clk);
+ reset_control_assert(tmdev->reset);
+
+ return 0;
+}
+
+static const struct ths_thermal_chip sun8i_a83t_ths = {
+ .sensor_num = 3,
+ .scale = 705,
+ .offset = 191668,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun8i_h3_ths = {
+ .sensor_num = 1,
+ .scale = 1211,
+ .offset = 217000,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun8i_r40_ths = {
+ .sensor_num = 2,
+ .offset = 251086,
+ .scale = 1130,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_a64_ths = {
+ .sensor_num = 3,
+ .offset = 260890,
+ .scale = 1170,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_h5_ths = {
+ .sensor_num = 2,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun50i_h5_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_h6_ths = {
+ .sensor_num = 2,
+ .has_bus_clk_reset = true,
+ .ft_deviation = 7000,
+ .offset = 187744,
+ .scale = 672,
+ .temp_data_base = SUN50I_H6_THS_TEMP_DATA,
+ .calibrate = sun50i_h6_ths_calibrate,
+ .init = sun50i_h6_thermal_init,
+ .irq_ack = sun50i_h6_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct of_device_id of_ths_match[] = {
+ { .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths },
+ { .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths },
+ { .compatible = "allwinner,sun8i-r40-ths", .data = &sun8i_r40_ths },
+ { .compatible = "allwinner,sun50i-a64-ths", .data = &sun50i_a64_ths },
+ { .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths },
+ { .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_ths_match);
+
+static struct platform_driver ths_driver = {
+ .probe = sun8i_ths_probe,
+ .remove = sun8i_ths_remove,
+ .driver = {
+ .name = "sun8i-thermal",
+ .of_match_table = of_ths_match,
+ },
+};
+module_platform_driver(ths_driver);
+
+MODULE_DESCRIPTION("Thermal sensor driver for Allwinner SOC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 5acaad3a594f..66e0639da4bf 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -360,7 +360,7 @@ static struct soctherm_oc_irq_chip_data soc_irq_cdata;
/**
* ccroc_writel() - writes a value to a CCROC register
* @ts: pointer to a struct tegra_soctherm
- * @v: the value to write
+ * @value: the value to write
* @reg: the register offset
*
* Writes @v to @reg. No return value.
@@ -435,6 +435,7 @@ static int tegra_thermctl_get_temp(void *data, int *out_temp)
/**
* enforce_temp_range() - check and enforce temperature range [min, max]
+ * @dev: struct device * of the SOC_THERM instance
* @trip_temp: the trip temperature to check
*
* Checks and enforces the permitted temperature range that SOC_THERM
@@ -747,6 +748,8 @@ static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
/**
* tegra_soctherm_set_hwtrips() - set HW trip point from DT data
* @dev: struct device * of the SOC_THERM instance
+ * @sg: pointer to the sensor group to set the thermtrip temperature for
+ * @tz: struct thermal_zone_device *
*
* Configure the SOC_THERM HW trip points, setting "THERMTRIP"
* "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
@@ -931,6 +934,7 @@ static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
/**
* soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
+ * @ts: pointer to a struct tegra_soctherm
* @alarm: The soctherm throttle id
* @enable: Flag indicating enable the soctherm over-current
* interrupt or disable it
@@ -1156,7 +1160,7 @@ static void soctherm_oc_irq_enable(struct irq_data *data)
/**
* soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
- * @irq_data: The interrupt request information
+ * @data: The interrupt request information
*
* Clears the interrupt request enable bit of the overcurrent
* interrupt request chip data.
@@ -1206,6 +1210,7 @@ static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
/**
* soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
* @d: Interrupt request domain
+ * @ctrlr: Controller device tree node
* @intspec: Array of u32s from DTs "interrupt" property
* @intsize: Number of values inside the intspec array
* @out_hwirq: HW IRQ value associated with this interrupt
@@ -1681,6 +1686,7 @@ err:
/**
* soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
* and register them as cooling devices.
+ * @pdev: Pointer to platform_device struct
*/
static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
{
@@ -1751,6 +1757,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
/**
* throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config
+ * @ts: pointer to a struct tegra_soctherm
* @level: describing the level LOW/MED/HIGH of throttling
*
* It's necessary to set up the CPU-local CCROC NV_THERM instance with
@@ -1798,6 +1805,7 @@ static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level)
/**
* throttlectl_cpu_level_select() - program CPU pulse skipper config
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* Pulse skippers are used to throttle clock frequencies. This
@@ -1841,6 +1849,7 @@ static void throttlectl_cpu_level_select(struct tegra_soctherm *ts,
/**
* throttlectl_cpu_mn() - program CPU pulse skipper configuration
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* Pulse skippers are used to throttle clock frequencies. This
@@ -1874,6 +1883,7 @@ static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
/**
* throttlectl_gpu_level_select() - selects throttling level for GPU
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* This function programs soctherm's interface to GK20a NV_THERM to select
@@ -1918,6 +1928,7 @@ static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
/**
* soctherm_throttle_program() - programs pulse skippers' configuration
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of the throttle event id.
*
* Pulse skippers are used to throttle clock frequencies.
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index ae5743c9a894..73665c3ccfe0 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -76,13 +76,17 @@ static int gadc_thermal_read_linear_lookup_table(struct device *dev,
struct gadc_thermal_info *gti)
{
struct device_node *np = dev->of_node;
+ enum iio_chan_type chan_type;
int ntable;
int ret;
ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
sizeof(u32));
if (ntable <= 0) {
- dev_notice(dev, "no lookup table, assuming DAC channel returns milliCelcius\n");
+ ret = iio_get_channel_type(gti->channel, &chan_type);
+ if (ret || chan_type != IIO_TEMP)
+ dev_notice(dev,
+ "no lookup table, assuming DAC channel returns milliCelcius\n");
return 0;
}
@@ -124,13 +128,6 @@ static int gadc_thermal_probe(struct platform_device *pdev)
if (!gti)
return -ENOMEM;
- ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
- if (ret < 0)
- return ret;
-
- gti->dev = &pdev->dev;
- platform_set_drvdata(pdev, gti);
-
gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel");
if (IS_ERR(gti->channel)) {
ret = PTR_ERR(gti->channel);
@@ -139,6 +136,13 @@ static int gadc_thermal_probe(struct platform_device *pdev)
return ret;
}
+ ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
+ if (ret < 0)
+ return ret;
+
+ gti->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gti);
+
gti->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, gti,
&gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 207b0cda70da..a9bf00e91d64 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -92,14 +92,12 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
/* device tree support */
#ifdef CONFIG_THERMAL_OF
int of_parse_thermal_zones(void);
-void of_thermal_destroy_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
-static inline void of_thermal_destroy_zones(void) { }
static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
{
return 0;
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index dd5d8ee37928..c8d2620f2e42 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -248,3 +248,31 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
kfree(hwmon);
}
EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
+
+static void devm_thermal_hwmon_release(struct device *dev, void *res)
+{
+ thermal_remove_hwmon_sysfs(*(struct thermal_zone_device **)res);
+}
+
+int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ struct thermal_zone_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_thermal_hwmon_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = thermal_add_hwmon_sysfs(tz);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = tz;
+ devres_add(&tz->device, ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_add_hwmon_sysfs);
diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
index a160b9d62dd0..1a9d65f6a6a8 100644
--- a/drivers/thermal/thermal_hwmon.h
+++ b/drivers/thermal/thermal_hwmon.h
@@ -17,6 +17,7 @@
#ifdef CONFIG_THERMAL_HWMON
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
+int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
#else
static inline int
@@ -25,6 +26,12 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return 0;
}
+static inline int
+devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ return 0;
+}
+
static inline void
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c
index 962873fd9242..293cffd9c8ad 100644
--- a/drivers/thermal/user_space.c
+++ b/drivers/thermal/user_space.c
@@ -17,8 +17,8 @@
/**
* notify_user_space - Notifies user space about thermal events
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* This function notifies the user space through UEvents.
*/
diff --git a/drivers/thermal/zx2967_thermal.c b/drivers/thermal/zx2967_thermal.c
index 7c8a82c8e1e9..8e3a2d3c2f9a 100644
--- a/drivers/thermal/zx2967_thermal.c
+++ b/drivers/thermal/zx2967_thermal.c
@@ -45,6 +45,7 @@
* @clk_topcrm: topcrm clk structure
* @clk_apb: apb clk structure
* @regs: pointer to base address of the thermal sensor
+ * @dev: struct device pointer
*/
struct zx2967_thermal_priv {
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index fd9adca898ff..1eb757e8df3b 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-menuconfig THUNDERBOLT
- tristate "Thunderbolt support"
+menuconfig USB4
+ tristate "Unified support for USB4 and Thunderbolt"
depends on PCI
depends on X86 || COMPILE_TEST
select APPLE_PROPERTIES if EFI_STUB && X86
@@ -9,9 +9,10 @@ menuconfig THUNDERBOLT
select CRYPTO_HASH
select NVMEM
help
- Thunderbolt Controller driver. This driver is required if you
- want to hotplug Thunderbolt devices on Apple hardware or on PCs
- with Intel Falcon Ridge or newer.
+ USB4 and Thunderbolt driver. USB4 is the public speficiation
+ based on Thunderbolt 3 protocol. This driver is required if
+ you want to hotplug Thunderbolt and USB4 compliant devices on
+ Apple hardware or on PCs with Intel Falcon Ridge or newer.
To compile this driver a module, choose M here. The module will be
called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 001187c577bf..eae28dd45250 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
+obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index fdd77bb4628d..19db6cdc5b70 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -113,7 +113,16 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
return ret;
}
-static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+/**
+ * tb_switch_find_cap() - Find switch capability
+ * @sw Switch to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
int offset = sw->config.first_cap_offset;
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d97813e80e5f..f77ceae5c7d7 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -708,19 +708,26 @@ void tb_ctl_stop(struct tb_ctl *ctl)
/* public interface, commands */
/**
- * tb_cfg_error() - send error packet
+ * tb_cfg_ack_plug() - Ack hot plug/unplug event
+ * @ctl: Control channel to use
+ * @route: Router that originated the event
+ * @port: Port where the hot plug/unplug happened
+ * @unplug: Ack hot plug or unplug
*
- * Return: Returns 0 on success or an error code on failure.
+ * Call this as response for hot plug/unplug event to ack it.
+ * Returns %0 on success or an error code on failure.
*/
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
- enum tb_cfg_error error)
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
{
struct cfg_error_pkg pkg = {
.header = tb_cfg_make_header(route),
.port = port,
- .error = error,
+ .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
+ .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
+ : TB_CFG_ERROR_PG_HOT_PLUG,
};
- tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
+ tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
+ unplug ? "un" : "", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 2f1a1e111110..97cb03b38953 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -123,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
return header;
}
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
- enum tb_cfg_error error);
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
int timeout_msec);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 8dd7de0cc826..921d164b3f35 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -131,12 +131,51 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
}
/**
+ * tb_eeprom_get_drom_offset - get drom offset within eeprom
+ */
+static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
+{
+ struct tb_cap_plug_events cap;
+ int res;
+
+ if (!sw->cap_plug_events) {
+ tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
+ return -ENODEV;
+ }
+ res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
+ sizeof(cap) / 4);
+ if (res)
+ return res;
+
+ if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
+ tb_sw_warn(sw, "no NVM\n");
+ return -ENODEV;
+ }
+
+ if (cap.drom_offset > 0xffff) {
+ tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
+ cap.drom_offset);
+ return -ENXIO;
+ }
+ *offset = cap.drom_offset;
+ return 0;
+}
+
+/**
* tb_eeprom_read_n - read count bytes from offset into val
*/
static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
size_t count)
{
+ u16 drom_offset;
int i, res;
+
+ res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+ if (res)
+ return res;
+
+ offset += drom_offset;
+
res = tb_eeprom_active(sw, true);
if (res)
return res;
@@ -239,36 +278,6 @@ struct tb_drom_entry_port {
/**
- * tb_eeprom_get_drom_offset - get drom offset within eeprom
- */
-static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
-{
- struct tb_cap_plug_events cap;
- int res;
- if (!sw->cap_plug_events) {
- tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
- return -ENOSYS;
- }
- res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
- sizeof(cap) / 4);
- if (res)
- return res;
-
- if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
- tb_sw_warn(sw, "no NVM\n");
- return -ENOSYS;
- }
-
- if (cap.drom_offset > 0xffff) {
- tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
- cap.drom_offset);
- return -ENXIO;
- }
- *offset = cap.drom_offset;
- return 0;
-}
-
-/**
* tb_drom_read_uid_only - read uid directly from drom
*
* Does not use the cached copy in sw->drom. Used during resume to check switch
@@ -277,17 +286,11 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
{
u8 data[9];
- u16 drom_offset;
u8 crc;
- int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
- if (res)
- return res;
-
- if (drom_offset == 0)
- return -ENODEV;
+ int res;
/* read uid */
- res = tb_eeprom_read_n(sw, drom_offset, data, 9);
+ res = tb_eeprom_read_n(sw, 0, data, 9);
if (res)
return res;
@@ -484,12 +487,42 @@ err_free:
return ret;
}
+static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
+{
+ int ret;
+
+ ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
+ if (ret)
+ return ret;
+
+ /* Size includes CRC8 + UID + CRC32 */
+ *size += 1 + 8 + 4;
+ sw->drom = kzalloc(*size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
+ if (ret) {
+ kfree(sw->drom);
+ sw->drom = NULL;
+ }
+
+ return ret;
+}
+
+static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+ size_t count)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_drom_read(sw, offset, val, count);
+ return tb_eeprom_read_n(sw, offset, val, count);
+}
+
/**
* tb_drom_read - copy drom to sw->drom and parse it
*/
int tb_drom_read(struct tb_switch *sw)
{
- u16 drom_offset;
u16 size;
u32 crc;
struct tb_drom_header *header;
@@ -510,18 +543,26 @@ int tb_drom_read(struct tb_switch *sw)
goto parse;
/*
- * The root switch contains only a dummy drom (header only,
- * no entries). Hardcode the configuration here.
+ * USB4 hosts may support reading DROM through router
+ * operations.
*/
- tb_drom_read_uid_only(sw, &sw->uid);
+ if (tb_switch_is_usb4(sw)) {
+ usb4_switch_read_uid(sw, &sw->uid);
+ if (!usb4_copy_host_drom(sw, &size))
+ goto parse;
+ } else {
+ /*
+ * The root switch contains only a dummy drom
+ * (header only, no entries). Hardcode the
+ * configuration here.
+ */
+ tb_drom_read_uid_only(sw, &sw->uid);
+ }
+
return 0;
}
- res = tb_eeprom_get_drom_offset(sw, &drom_offset);
- if (res)
- return res;
-
- res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
+ res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
if (res)
return res;
size &= 0x3ff;
@@ -535,7 +576,7 @@ int tb_drom_read(struct tb_switch *sw)
sw->drom = kzalloc(size, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
- res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
+ res = tb_drom_read_n(sw, 0, sw->drom, size);
if (res)
goto err;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 641b21b54460..1be491ecbb45 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1271,6 +1271,9 @@ static struct pci_device_id nhi_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ /* Any USB4 compliant host */
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
+
{ 0,}
};
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index b7b973949f8e..5d276ee9b38e 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -74,4 +74,6 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
+#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
+
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ca86a8e09c77..ad5479f21174 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -163,10 +163,12 @@ static int nvm_validate_and_write(struct tb_switch *sw)
image_size -= hdr_size;
}
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_write(sw, 0, buf, image_size);
return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
}
-static int nvm_authenticate_host(struct tb_switch *sw)
+static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
{
int ret = 0;
@@ -206,7 +208,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
return ret;
}
-static int nvm_authenticate_device(struct tb_switch *sw)
+static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
{
int ret, retries = 10;
@@ -251,6 +253,78 @@ static int nvm_authenticate_device(struct tb_switch *sw)
return -ETIMEDOUT;
}
+static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ /*
+ * During host router NVM upgrade we should not allow root port to
+ * go into D3cold because some root ports cannot trigger PME
+ * itself. To be on the safe side keep the root port in D0 during
+ * the whole upgrade process.
+ */
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_put(&root_port->dev);
+}
+
+static inline bool nvm_readable(struct tb_switch *sw)
+{
+ if (tb_switch_is_usb4(sw)) {
+ /*
+ * USB4 devices must support NVM operations but it is
+ * optional for hosts. Therefore we query the NVM sector
+ * size here and if it is supported assume NVM
+ * operations are implemented.
+ */
+ return usb4_switch_nvm_sector_size(sw) > 0;
+ }
+
+ /* Thunderbolt 2 and 3 devices support NVM through DMA port */
+ return !!sw->dma_port;
+}
+
+static inline bool nvm_upgradeable(struct tb_switch *sw)
+{
+ if (sw->no_nvm_upgrade)
+ return false;
+ return nvm_readable(sw);
+}
+
+static inline int nvm_read(struct tb_switch *sw, unsigned int address,
+ void *buf, size_t size)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_read(sw, address, buf, size);
+ return dma_port_flash_read(sw->dma_port, address, buf, size);
+}
+
+static int nvm_authenticate(struct tb_switch *sw)
+{
+ int ret;
+
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_authenticate(sw);
+
+ if (!tb_route(sw)) {
+ nvm_authenticate_start_dma_port(sw);
+ ret = nvm_authenticate_host_dma_port(sw);
+ } else {
+ ret = nvm_authenticate_device_dma_port(sw);
+ }
+
+ return ret;
+}
+
static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
@@ -264,7 +338,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
- ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+ ret = nvm_read(sw, offset, val, bytes);
mutex_unlock(&sw->tb->lock);
out:
@@ -341,8 +415,20 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
u32 val;
int ret;
- if (!sw->dma_port)
+ if (!nvm_readable(sw))
+ return 0;
+
+ /*
+ * The NVM format of non-Intel hardware is not known so
+ * currently restrict NVM upgrade for Intel hardware. We may
+ * relax this in the future when we learn other NVM formats.
+ */
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) {
+ dev_info(&sw->dev,
+ "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
+ sw->config.vendor_id);
return 0;
+ }
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@@ -358,8 +444,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
if (!sw->safe_mode) {
u32 nvm_size, hdr_size;
- ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
- sizeof(val));
+ ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
if (ret)
goto err_ida;
@@ -367,8 +452,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - hdr_size) / 2;
- ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
- sizeof(val));
+ ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
if (ret)
goto err_ida;
@@ -620,6 +704,24 @@ int tb_port_clear_counter(struct tb_port *port, int counter)
}
/**
+ * tb_port_unlock() - Unlock downstream port
+ * @port: Port to unlock
+ *
+ * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
+ * downstream router accessible for CM.
+ */
+int tb_port_unlock(struct tb_port *port)
+{
+ if (tb_switch_is_icm(port->sw))
+ return 0;
+ if (!tb_port_is_null(port))
+ return -EINVAL;
+ if (tb_switch_is_usb4(port->sw))
+ return usb4_port_unlock(port);
+ return 0;
+}
+
+/**
* tb_init_port() - initialize a port
*
* This is a helper method for tb_switch_alloc. Does not check or initialize
@@ -650,6 +752,10 @@ static int tb_init_port(struct tb_port *port)
port->cap_phy = cap;
else
tb_port_WARN(port, "non switch port without a PHY\n");
+
+ cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
+ if (cap > 0)
+ port->cap_usb4 = cap;
} else if (port->port != 0) {
cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
if (cap > 0)
@@ -936,12 +1042,47 @@ bool tb_port_is_enabled(struct tb_port *port)
case TB_TYPE_DP_HDMI_OUT:
return tb_dp_port_is_enabled(port);
+ case TB_TYPE_USB3_UP:
+ case TB_TYPE_USB3_DOWN:
+ return tb_usb3_port_is_enabled(port);
+
default:
return false;
}
}
/**
+ * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
+ * @port: USB3 adapter port to check
+ */
+bool tb_usb3_port_is_enabled(struct tb_port *port)
+{
+ u32 data;
+
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_0, 1))
+ return false;
+
+ return !!(data & ADP_USB3_CS_0_PE);
+}
+
+/**
+ * tb_usb3_port_enable() - Enable USB3 adapter port
+ * @port: USB3 adapter port to enable
+ * @enable: Enable/disable the USB3 adapter
+ */
+int tb_usb3_port_enable(struct tb_port *port, bool enable)
+{
+ u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
+ : ADP_USB3_CS_0_V;
+
+ if (!port->cap_adap)
+ return -ENXIO;
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_0, 1);
+}
+
+/**
* tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
* @port: PCIe port to check
*/
@@ -1088,20 +1229,38 @@ int tb_dp_port_enable(struct tb_port *port, bool enable)
/* switch utility functions */
-static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
+static const char *tb_switch_generation_name(const struct tb_switch *sw)
+{
+ switch (sw->generation) {
+ case 1:
+ return "Thunderbolt 1";
+ case 2:
+ return "Thunderbolt 2";
+ case 3:
+ return "Thunderbolt 3";
+ case 4:
+ return "USB4";
+ default:
+ return "Unknown";
+ }
+}
+
+static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
{
- tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
- sw->vendor_id, sw->device_id, sw->revision,
- sw->thunderbolt_version);
- tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
+ const struct tb_regs_switch_header *regs = &sw->config;
+
+ tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+ tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
+ regs->revision, regs->thunderbolt_version);
+ tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
tb_dbg(tb, " Config:\n");
tb_dbg(tb,
" Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
- sw->upstream_port_number, sw->depth,
- (((u64) sw->route_hi) << 32) | sw->route_lo,
- sw->enabled, sw->plug_events_delay);
+ regs->upstream_port_number, regs->depth,
+ (((u64) regs->route_hi) << 32) | regs->route_lo,
+ regs->enabled, regs->plug_events_delay);
tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
- sw->__unknown1, sw->__unknown4);
+ regs->__unknown1, regs->__unknown4);
}
/**
@@ -1148,6 +1307,10 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
if (res)
return res;
+ /* Plug events are always enabled in USB4 */
+ if (tb_switch_is_usb4(sw))
+ return 0;
+
res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
if (res)
return res;
@@ -1359,30 +1522,6 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
-static void nvm_authenticate_start(struct tb_switch *sw)
-{
- struct pci_dev *root_port;
-
- /*
- * During host router NVM upgrade we should not allow root port to
- * go into D3cold because some root ports cannot trigger PME
- * itself. To be on the safe side keep the root port in D0 during
- * the whole upgrade process.
- */
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
- if (root_port)
- pm_runtime_get_noresume(&root_port->dev);
-}
-
-static void nvm_authenticate_complete(struct tb_switch *sw)
-{
- struct pci_dev *root_port;
-
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
- if (root_port)
- pm_runtime_put(&root_port->dev);
-}
-
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1431,17 +1570,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
goto exit_unlock;
sw->nvm->authenticating = true;
-
- if (!tb_route(sw)) {
- /*
- * Keep root port from suspending as long as the
- * NVM upgrade process is running.
- */
- nvm_authenticate_start(sw);
- ret = nvm_authenticate_host(sw);
- } else {
- ret = nvm_authenticate_device(sw);
- }
+ ret = nvm_authenticate(sw);
}
exit_unlock:
@@ -1556,11 +1685,11 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr) {
- if (sw->dma_port && !sw->no_nvm_upgrade)
+ if (nvm_upgradeable(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_version.attr) {
- if (sw->dma_port)
+ if (nvm_readable(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_boot.attr) {
@@ -1672,6 +1801,9 @@ static int tb_switch_get_generation(struct tb_switch *sw)
return 3;
default:
+ if (tb_switch_is_usb4(sw))
+ return 4;
+
/*
* For unknown switches assume generation to be 1 to be
* on the safe side.
@@ -1682,6 +1814,19 @@ static int tb_switch_get_generation(struct tb_switch *sw)
}
}
+static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
+{
+ int max_depth;
+
+ if (tb_switch_is_usb4(sw) ||
+ (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
+ max_depth = USB4_SWITCH_MAX_DEPTH;
+ else
+ max_depth = TB_SWITCH_MAX_DEPTH;
+
+ return depth > max_depth;
+}
+
/**
* tb_switch_alloc() - allocate a switch
* @tb: Pointer to the owning domain
@@ -1703,10 +1848,16 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
int upstream_port;
int i, ret, depth;
- /* Make sure we do not exceed maximum topology limit */
+ /* Unlock the downstream port so we can access the switch below */
+ if (route) {
+ struct tb_switch *parent_sw = tb_to_switch(parent);
+ struct tb_port *down;
+
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
+ }
+
depth = tb_route_length(route);
- if (depth > TB_SWITCH_MAX_DEPTH)
- return ERR_PTR(-EADDRNOTAVAIL);
upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
if (upstream_port < 0)
@@ -1721,8 +1872,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
if (ret)
goto err_free_sw_ports;
+ sw->generation = tb_switch_get_generation(sw);
+
tb_dbg(tb, "current switch config:\n");
- tb_dump_switch(tb, &sw->config);
+ tb_dump_switch(tb, sw);
/* configure switch */
sw->config.upstream_port_number = upstream_port;
@@ -1731,6 +1884,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
sw->config.route_lo = lower_32_bits(route);
sw->config.enabled = 0;
+ /* Make sure we do not exceed maximum topology limit */
+ if (tb_switch_exceeds_max_depth(sw, depth)) {
+ ret = -EADDRNOTAVAIL;
+ goto err_free_sw_ports;
+ }
+
/* initialize ports */
sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
GFP_KERNEL);
@@ -1745,14 +1904,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
sw->ports[i].port = i;
}
- sw->generation = tb_switch_get_generation(sw);
-
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
- if (ret < 0) {
- tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
- goto err_free_sw_ports;
- }
- sw->cap_plug_events = ret;
+ if (ret > 0)
+ sw->cap_plug_events = ret;
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
if (ret > 0)
@@ -1823,7 +1977,8 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
*
* Call this function before the switch is added to the system. It will
* upload configuration to the switch and makes it available for the
- * connection manager to use.
+ * connection manager to use. Can be called to the switch again after
+ * resume from low power states to re-initialize it.
*
* Return: %0 in case of success and negative errno in case of failure
*/
@@ -1834,21 +1989,50 @@ int tb_switch_configure(struct tb_switch *sw)
int ret;
route = tb_route(sw);
- tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
- route, tb_route_length(route), sw->config.upstream_port_number);
- if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
- tb_sw_warn(sw, "unknown switch vendor id %#x\n",
- sw->config.vendor_id);
+ tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
+ sw->config.enabled ? "restoring " : "initializing", route,
+ tb_route_length(route), sw->config.upstream_port_number);
sw->config.enabled = 1;
- /* upload configuration */
- ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
- if (ret)
- return ret;
+ if (tb_switch_is_usb4(sw)) {
+ /*
+ * For USB4 devices, we need to program the CM version
+ * accordingly so that it knows to expose all the
+ * additional capabilities.
+ */
+ sw->config.cmuv = USB4_VERSION_1_0;
+
+ /* Enumerate the switch */
+ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+ ROUTER_CS_1, 4);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_setup(sw);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_configure_link(sw);
+ } else {
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+ tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+ sw->config.vendor_id);
- ret = tb_lc_configure_link(sw);
+ if (!sw->cap_plug_events) {
+ tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+ return -ENODEV;
+ }
+
+ /* Enumerate the switch */
+ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+ ROUTER_CS_1, 3);
+ if (ret)
+ return ret;
+
+ ret = tb_lc_configure_link(sw);
+ }
if (ret)
return ret;
@@ -1857,18 +2041,32 @@ int tb_switch_configure(struct tb_switch *sw)
static int tb_switch_set_uuid(struct tb_switch *sw)
{
+ bool uid = false;
u32 uuid[4];
int ret;
if (sw->uuid)
return 0;
- /*
- * The newer controllers include fused UUID as part of link
- * controller specific registers
- */
- ret = tb_lc_read_uuid(sw, uuid);
- if (ret) {
+ if (tb_switch_is_usb4(sw)) {
+ ret = usb4_switch_read_uid(sw, &sw->uid);
+ if (ret)
+ return ret;
+ uid = true;
+ } else {
+ /*
+ * The newer controllers include fused UUID as part of
+ * link controller specific registers
+ */
+ ret = tb_lc_read_uuid(sw, uuid);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ uid = true;
+ }
+ }
+
+ if (uid) {
/*
* ICM generates UUID based on UID and fills the upper
* two words with ones. This is not strictly following
@@ -1935,7 +2133,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
nvm_get_auth_status(sw, &status);
if (status) {
if (!tb_route(sw))
- nvm_authenticate_complete(sw);
+ nvm_authenticate_complete_dma_port(sw);
return 0;
}
@@ -1950,7 +2148,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
/* Now we can allow root port to suspend again */
if (!tb_route(sw))
- nvm_authenticate_complete(sw);
+ nvm_authenticate_complete_dma_port(sw);
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
@@ -2004,6 +2202,8 @@ static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
if (!up->dual_link_port || !up->dual_link_port->remote)
return false;
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_lane_bonding_possible(sw);
return tb_lc_lane_bonding_possible(sw);
}
@@ -2175,6 +2375,10 @@ int tb_switch_add(struct tb_switch *sw)
ret = tb_switch_update_link_attributes(sw);
if (ret)
return ret;
+
+ ret = tb_switch_tmu_init(sw);
+ if (ret)
+ return ret;
}
ret = device_add(&sw->dev);
@@ -2240,7 +2444,11 @@ void tb_switch_remove(struct tb_switch *sw)
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
- tb_lc_unconfigure_link(sw);
+
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_unconfigure_link(sw);
+ else
+ tb_lc_unconfigure_link(sw);
tb_switch_nvm_remove(sw);
@@ -2298,7 +2506,10 @@ int tb_switch_resume(struct tb_switch *sw)
return err;
}
- err = tb_drom_read_uid_only(sw, &uid);
+ if (tb_switch_is_usb4(sw))
+ err = usb4_switch_read_uid(sw, &uid);
+ else
+ err = tb_drom_read_uid_only(sw, &uid);
if (err) {
tb_sw_warn(sw, "uid read failed\n");
return err;
@@ -2311,16 +2522,7 @@ int tb_switch_resume(struct tb_switch *sw)
}
}
- /* upload configuration */
- err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
- if (err)
- return err;
-
- err = tb_lc_configure_link(sw);
- if (err)
- return err;
-
- err = tb_plug_events_active(sw, true);
+ err = tb_switch_configure(sw);
if (err)
return err;
@@ -2336,8 +2538,14 @@ int tb_switch_resume(struct tb_switch *sw)
tb_sw_set_unplugged(port->remote->sw);
else if (port->xdomain)
port->xdomain->is_unplugged = true;
- } else if (tb_port_has_remote(port)) {
- if (tb_switch_resume(port->remote->sw)) {
+ } else if (tb_port_has_remote(port) || port->xdomain) {
+ /*
+ * Always unlock the port so the downstream
+ * switch/domain is accessible.
+ */
+ if (tb_port_unlock(port))
+ tb_port_warn(port, "failed to unlock port\n");
+ if (port->remote && tb_switch_resume(port->remote->sw)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
@@ -2361,7 +2569,10 @@ void tb_switch_suspend(struct tb_switch *sw)
tb_switch_suspend(port->remote->sw);
}
- tb_lc_set_sleep(sw);
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_set_sleep(sw);
+ else
+ tb_lc_set_sleep(sw);
}
/**
@@ -2374,6 +2585,8 @@ void tb_switch_suspend(struct tb_switch *sw)
*/
bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_query_dp_resource(sw, in);
return tb_lc_dp_sink_query(sw, in);
}
@@ -2388,6 +2601,8 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_alloc_dp_resource(sw, in);
return tb_lc_dp_sink_alloc(sw, in);
}
@@ -2401,10 +2616,16 @@ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
- if (tb_lc_dp_sink_dealloc(sw, in)) {
+ int ret;
+
+ if (tb_switch_is_usb4(sw))
+ ret = usb4_switch_dealloc_dp_resource(sw, in);
+ else
+ ret = tb_lc_dp_sink_dealloc(sw, in);
+
+ if (ret)
tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
in->port);
- }
}
struct tb_sw_lookup {
@@ -2517,6 +2738,24 @@ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
return NULL;
}
+/**
+ * tb_switch_find_port() - return the first port of @type on @sw or NULL
+ * @sw: Switch to find the port from
+ * @type: Port type to look for
+ */
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+ enum tb_port_type type)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->config.type == type)
+ return port;
+ }
+
+ return NULL;
+}
+
void tb_switch_exit(void)
{
ida_destroy(&nvm_ida);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index ea8727f769d6..107cd232f486 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -111,6 +111,10 @@ static void tb_discover_tunnels(struct tb_switch *sw)
tunnel = tb_tunnel_discover_pci(tb, port);
break;
+ case TB_TYPE_USB3_DOWN:
+ tunnel = tb_tunnel_discover_usb3(tb, port);
+ break;
+
default:
break;
}
@@ -158,6 +162,137 @@ static void tb_scan_xdomain(struct tb_port *port)
}
}
+static int tb_enable_tmu(struct tb_switch *sw)
+{
+ int ret;
+
+ /* If it is already enabled in correct mode, don't touch it */
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_disable(sw);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_post_time(sw);
+ if (ret)
+ return ret;
+
+ return tb_switch_tmu_enable(sw);
+}
+
+/**
+ * tb_find_unused_port() - return the first inactive port on @sw
+ * @sw: Switch to find the port on
+ * @type: Port type to look for
+ */
+static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
+ enum tb_port_type type)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
+ continue;
+ if (port->config.type != type)
+ continue;
+ if (!port->cap_adap)
+ continue;
+ if (tb_port_is_enabled(port))
+ continue;
+ return port;
+ }
+ return NULL;
+}
+
+static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ struct tb_port *down;
+
+ down = usb4_switch_map_usb3_down(sw, port);
+ if (down) {
+ if (WARN_ON(!tb_port_is_usb3_down(down)))
+ goto out;
+ if (WARN_ON(tb_usb3_port_is_enabled(down)))
+ goto out;
+
+ return down;
+ }
+
+out:
+ return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN);
+}
+
+static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down, *port;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
+ if (!up)
+ return 0;
+
+ /*
+ * Look up available down port. Since we are chaining it should
+ * be found right above this switch.
+ */
+ port = tb_port_at(tb_route(sw), parent);
+ down = tb_find_usb3_down(parent, port);
+ if (!down)
+ return 0;
+
+ if (tb_route(parent)) {
+ struct tb_port *parent_up;
+ /*
+ * Check first that the parent switch has its upstream USB3
+ * port enabled. Otherwise the chain is not complete and
+ * there is no point setting up a new tunnel.
+ */
+ parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
+ if (!parent_up || !tb_port_is_enabled(parent_up))
+ return 0;
+ }
+
+ tunnel = tb_tunnel_alloc_usb3(tb, up, down);
+ if (!tunnel)
+ return -ENOMEM;
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(up,
+ "USB3 tunnel activation failed, aborting\n");
+ tb_tunnel_free(tunnel);
+ return -EIO;
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ return 0;
+}
+
+static int tb_create_usb3_tunnels(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret;
+
+ if (tb_route(sw)) {
+ ret = tb_tunnel_usb3(sw->tb, sw);
+ if (ret)
+ return ret;
+ }
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+ ret = tb_create_usb3_tunnels(port->remote->sw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void tb_scan_port(struct tb_port *port);
/**
@@ -257,6 +392,18 @@ static void tb_scan_port(struct tb_port *port)
if (tb_switch_lane_bonding_enable(sw))
tb_sw_warn(sw, "failed to enable lane bonding\n");
+ if (tb_enable_tmu(sw))
+ tb_sw_warn(sw, "failed to enable TMU\n");
+
+ /*
+ * Create USB 3.x tunnels only when the switch is plugged to the
+ * domain. This is because we scan the domain also during discovery
+ * and want to discover existing USB 3.x tunnels before we create
+ * any new.
+ */
+ if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
+ tb_sw_warn(sw, "USB3 tunnel creation failed\n");
+
tb_scan_switch(sw);
}
@@ -338,57 +485,18 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
}
}
-/**
- * tb_find_port() - return the first port of @type on @sw or NULL
- * @sw: Switch to find the port from
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_port(struct tb_switch *sw,
- enum tb_port_type type)
-{
- struct tb_port *port;
-
- tb_switch_for_each_port(sw, port) {
- if (port->config.type == type)
- return port;
- }
-
- return NULL;
-}
-
-/**
- * tb_find_unused_port() - return the first inactive port on @sw
- * @sw: Switch to find the port on
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
- enum tb_port_type type)
-{
- struct tb_port *port;
-
- tb_switch_for_each_port(sw, port) {
- if (tb_is_upstream_port(port))
- continue;
- if (port->config.type != type)
- continue;
- if (port->cap_adap)
- continue;
- if (tb_port_is_enabled(port))
- continue;
- return port;
- }
- return NULL;
-}
-
static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
const struct tb_port *port)
{
+ struct tb_port *down = NULL;
+
/*
* To keep plugging devices consistently in the same PCIe
- * hierarchy, do mapping here for root switch downstream PCIe
- * ports.
+ * hierarchy, do mapping here for switch downstream PCIe ports.
*/
- if (!tb_route(sw)) {
+ if (tb_switch_is_usb4(sw)) {
+ down = usb4_switch_map_pcie_down(sw, port);
+ } else if (!tb_route(sw)) {
int phy_port = tb_phy_port_from_link(port->port);
int index;
@@ -409,12 +517,17 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
/* Validate the hard-coding */
if (WARN_ON(index > sw->config.max_port_number))
goto out;
- if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
+
+ down = &sw->ports[index];
+ }
+
+ if (down) {
+ if (WARN_ON(!tb_port_is_pcie_down(down)))
goto out;
- if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
+ if (WARN_ON(tb_pci_port_is_enabled(down)))
goto out;
- return &sw->ports[index];
+ return down;
}
out:
@@ -586,7 +699,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
struct tb_switch *parent_sw;
struct tb_tunnel *tunnel;
- up = tb_find_port(sw, TB_TYPE_PCIE_UP);
+ up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
if (!up)
return 0;
@@ -624,7 +737,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw);
- nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
+ nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
mutex_lock(&tb->lock);
tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
@@ -719,6 +832,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
tb_remove_dp_resources(port->remote->sw);
+ tb_switch_tmu_disable(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
@@ -786,8 +900,7 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
route = tb_cfg_get_route(&pkg->header);
- if (tb_cfg_error(tb->ctl, route, pkg->port,
- TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+ if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
pkg->port);
}
@@ -866,10 +979,17 @@ static int tb_start(struct tb *tb)
return ret;
}
+ /* Enable TMU if it is off */
+ tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb->root_switch);
+ /*
+ * If the boot firmware did not create USB 3.x tunnels create them
+ * now for the whole topology.
+ */
+ tb_create_usb3_tunnels(tb->root_switch);
/* Add DP IN resources for the root switch */
tb_add_dp_resources(tb->root_switch);
/* Make the discovered switches available to the userspace */
@@ -897,6 +1017,9 @@ static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
+ if (tb_enable_tmu(sw))
+ tb_sw_warn(sw, "failed to restore TMU configuration\n");
+
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index ec851f20c571..2eb2bcd3cca3 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -44,6 +44,39 @@ struct tb_switch_nvm {
#define TB_SWITCH_KEY_SIZE 32
#define TB_SWITCH_MAX_DEPTH 6
+#define USB4_SWITCH_MAX_DEPTH 5
+
+/**
+ * enum tb_switch_tmu_rate - TMU refresh rate
+ * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
+ * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
+ * transmission of the Delay Request TSNOS
+ * (Time Sync Notification Ordered Set) on a Link
+ * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
+ * transmission of the Delay Request TSNOS on
+ * a Link
+ */
+enum tb_switch_tmu_rate {
+ TB_SWITCH_TMU_RATE_OFF = 0,
+ TB_SWITCH_TMU_RATE_HIFI = 16,
+ TB_SWITCH_TMU_RATE_NORMAL = 1000,
+};
+
+/**
+ * struct tb_switch_tmu - Structure holding switch TMU configuration
+ * @cap: Offset to the TMU capability (%0 if not found)
+ * @has_ucap: Does the switch support uni-directional mode
+ * @rate: TMU refresh rate related to upstream switch. In case of root
+ * switch this holds the domain rate.
+ * @unidirectional: Is the TMU in uni-directional or bi-directional mode
+ * related to upstream switch. Don't case for root switch.
+ */
+struct tb_switch_tmu {
+ int cap;
+ bool has_ucap;
+ enum tb_switch_tmu_rate rate;
+ bool unidirectional;
+};
/**
* struct tb_switch - a thunderbolt switch
@@ -54,6 +87,7 @@ struct tb_switch_nvm {
* mailbox this will hold the pointer to that (%NULL
* otherwise). If set it also means the switch has
* upgradeable NVM.
+ * @tmu: The switch TMU configuration
* @tb: Pointer to the domain the switch belongs to
* @uid: Unique ID of the switch
* @uuid: UUID of the switch (or %NULL if not supported)
@@ -92,6 +126,7 @@ struct tb_switch {
struct tb_regs_switch_header config;
struct tb_port *ports;
struct tb_dma_port *dma_port;
+ struct tb_switch_tmu tmu;
struct tb *tb;
u64 uid;
uuid_t *uuid;
@@ -128,7 +163,9 @@ struct tb_switch {
* @remote: Remote port (%NULL if not connected)
* @xdomain: Remote host (%NULL if not connected)
* @cap_phy: Offset, zero if not found
+ * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
+ * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
* @port: Port number on switch
* @disabled: Disabled by eeprom
* @bonded: true if the port is bonded (two lanes combined as one)
@@ -145,7 +182,9 @@ struct tb_port {
struct tb_port *remote;
struct tb_xdomain *xdomain;
int cap_phy;
+ int cap_tmu;
int cap_adap;
+ int cap_usb4;
u8 port;
bool disabled;
bool bonded;
@@ -393,6 +432,16 @@ static inline bool tb_port_is_dpout(const struct tb_port *port)
return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
}
+static inline bool tb_port_is_usb3_down(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_USB3_DOWN;
+}
+
+static inline bool tb_port_is_usb3_up(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_USB3_UP;
+}
+
static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
@@ -533,6 +582,8 @@ void tb_switch_suspend(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route);
void tb_sw_set_unplugged(struct tb_switch *sw);
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+ enum tb_port_type type);
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
@@ -636,6 +687,17 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
}
/**
+ * tb_switch_is_usb4() - Is the switch USB4 compliant
+ * @sw: Switch to check
+ *
+ * Returns true if the @sw is USB4 compliant router, false otherwise.
+ */
+static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
+{
+ return sw->config.thunderbolt_version == USB4_VERSION_1_0;
+}
+
+/**
* tb_switch_is_icm() - Is the switch handled by ICM firmware
* @sw: Switch to check
*
@@ -656,10 +718,22 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_tmu_init(struct tb_switch *sw);
+int tb_switch_tmu_post_time(struct tb_switch *sw);
+int tb_switch_tmu_disable(struct tb_switch *sw);
+int tb_switch_tmu_enable(struct tb_switch *sw);
+
+static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
+{
+ return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
+ !sw->tmu.unidirectional;
+}
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
+int tb_port_unlock(struct tb_port *port);
int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
void tb_port_release_in_hopid(struct tb_port *port, int hopid);
int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
@@ -668,9 +742,13 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
bool tb_port_is_enabled(struct tb_port *port);
+bool tb_usb3_port_is_enabled(struct tb_port *port);
+int tb_usb3_port_enable(struct tb_port *port, bool enable);
+
bool tb_pci_port_is_enabled(struct tb_port *port);
int tb_pci_port_enable(struct tb_port *port, bool enable);
@@ -734,4 +812,27 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
+int usb4_switch_setup(struct tb_switch *sw);
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+int usb4_switch_configure_link(struct tb_switch *sw);
+void usb4_switch_unconfigure_link(struct tb_switch *sw);
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+int usb4_switch_set_sleep(struct tb_switch *sw);
+int usb4_switch_nvm_sector_size(struct tb_switch *sw);
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+ const void *buf, size_t size);
+int usb4_switch_nvm_authenticate(struct tb_switch *sw);
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port);
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port);
+
+int usb4_port_unlock(struct tb_port *port);
#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 3705057723b6..fc208c567953 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -67,9 +67,13 @@ struct cfg_error_pkg {
u32 zero1:4;
u32 port:6;
u32 zero2:2; /* Both should be zero, still they are different fields. */
- u32 zero3:16;
+ u32 zero3:14;
+ u32 pg:2;
} __packed;
+#define TB_CFG_ERROR_PG_HOT_PLUG 0x2
+#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3
+
/* TB_CFG_PKG_EVENT */
struct cfg_event_pkg {
struct tb_cfg_header header;
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 7ee45b73c7f7..c29c5075525a 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -26,6 +26,7 @@
#define TB_MAX_CONFIG_RW_LENGTH 60
enum tb_switch_cap {
+ TB_SWITCH_CAP_TMU = 0x03,
TB_SWITCH_CAP_VSE = 0x05,
};
@@ -41,6 +42,7 @@ enum tb_port_cap {
TB_PORT_CAP_TIME1 = 0x03,
TB_PORT_CAP_ADAP = 0x04,
TB_PORT_CAP_VSE = 0x05,
+ TB_PORT_CAP_USB4 = 0x06,
};
enum tb_port_state {
@@ -164,10 +166,52 @@ struct tb_regs_switch_header {
* milliseconds. Writing 0x00 is interpreted
* as 255ms.
*/
- u32 __unknown4:16;
+ u32 cmuv:8;
+ u32 __unknown4:8;
u32 thunderbolt_version:8;
} __packed;
+/* USB4 version 1.0 */
+#define USB4_VERSION_1_0 0x20
+
+#define ROUTER_CS_1 0x01
+#define ROUTER_CS_4 0x04
+#define ROUTER_CS_5 0x05
+#define ROUTER_CS_5_SLP BIT(0)
+#define ROUTER_CS_5_C3S BIT(23)
+#define ROUTER_CS_5_PTO BIT(24)
+#define ROUTER_CS_5_UTO BIT(25)
+#define ROUTER_CS_5_HCO BIT(26)
+#define ROUTER_CS_5_CV BIT(31)
+#define ROUTER_CS_6 0x06
+#define ROUTER_CS_6_SLPR BIT(0)
+#define ROUTER_CS_6_TNS BIT(1)
+#define ROUTER_CS_6_HCI BIT(18)
+#define ROUTER_CS_6_CR BIT(25)
+#define ROUTER_CS_7 0x07
+#define ROUTER_CS_9 0x09
+#define ROUTER_CS_25 0x19
+#define ROUTER_CS_26 0x1a
+#define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24)
+#define ROUTER_CS_26_STATUS_SHIFT 24
+#define ROUTER_CS_26_ONS BIT(30)
+#define ROUTER_CS_26_OV BIT(31)
+
+/* Router TMU configuration */
+#define TMU_RTR_CS_0 0x00
+#define TMU_RTR_CS_0_TD BIT(27)
+#define TMU_RTR_CS_0_UCAP BIT(30)
+#define TMU_RTR_CS_1 0x01
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK GENMASK(31, 16)
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT 16
+#define TMU_RTR_CS_2 0x02
+#define TMU_RTR_CS_3 0x03
+#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
+#define TMU_RTR_CS_22 0x16
+#define TMU_RTR_CS_24 0x18
+
enum tb_port_type {
TB_TYPE_INACTIVE = 0x000000,
TB_TYPE_PORT = 0x000001,
@@ -178,7 +222,8 @@ enum tb_port_type {
TB_TYPE_DP_HDMI_OUT = 0x0e0102,
TB_TYPE_PCIE_DOWN = 0x100101,
TB_TYPE_PCIE_UP = 0x100102,
- /* TB_TYPE_USB = 0x200000, lower order bits are not known */
+ TB_TYPE_USB3_DOWN = 0x200101,
+ TB_TYPE_USB3_UP = 0x200102,
};
/* Present on every port in TB_CF_PORT at address zero. */
@@ -216,10 +261,15 @@ struct tb_regs_port_header {
#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0)
#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20)
#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20
+#define ADP_CS_4_LCK BIT(31)
#define ADP_CS_5 0x05
#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
#define ADP_CS_5_LCA_SHIFT 22
+/* TMU adapter registers */
+#define TMU_ADP_CS_3 0x03
+#define TMU_ADP_CS_3_UDM BIT(29)
+
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
@@ -237,6 +287,12 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
+/* USB4 port registers */
+#define PORT_CS_18 0x12
+#define PORT_CS_18_BE BIT(8)
+#define PORT_CS_19 0x13
+#define PORT_CS_19_PC BIT(3)
+
/* Display Port adapter registers */
#define ADP_DP_CS_0 0x00
#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16)
@@ -277,6 +333,11 @@ struct tb_regs_port_header {
#define ADP_PCIE_CS_0 0x00
#define ADP_PCIE_CS_0_PE BIT(31)
+/* USB adapter registers */
+#define ADP_USB3_CS_0 0x00
+#define ADP_USB3_CS_0_V BIT(30)
+#define ADP_USB3_CS_0_PE BIT(31)
+
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
/* DWORD 0 */
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
new file mode 100644
index 000000000000..039c42a06000
--- /dev/null
+++ b/drivers/thunderbolt/tmu.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Time Management Unit (TMU) support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+
+#include "tb.h"
+
+static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
+{
+ bool root_switch = !tb_route(sw);
+
+ switch (sw->tmu.rate) {
+ case TB_SWITCH_TMU_RATE_OFF:
+ return "off";
+
+ case TB_SWITCH_TMU_RATE_HIFI:
+ /* Root switch does not have upstream directionality */
+ if (root_switch)
+ return "HiFi";
+ if (sw->tmu.unidirectional)
+ return "uni-directional, HiFi";
+ return "bi-directional, HiFi";
+
+ case TB_SWITCH_TMU_RATE_NORMAL:
+ if (root_switch)
+ return "normal";
+ return "uni-directional, normal";
+
+ default:
+ return "unknown";
+ }
+}
+
+static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TMU_RTR_CS_0_UCAP);
+}
+
+static int tb_switch_tmu_rate_read(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+ if (ret)
+ return ret;
+
+ val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+ return val;
+}
+
+static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
+ val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+}
+
+static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
+ u32 value)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
+ if (ret)
+ return ret;
+
+ data &= ~mask;
+ data |= value;
+
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_tmu + offset, 1);
+}
+
+static int tb_port_tmu_set_unidirectional(struct tb_port *port,
+ bool unidirectional)
+{
+ u32 val;
+
+ if (!port->sw->tmu.has_ucap)
+ return 0;
+
+ val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
+ return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
+}
+
+static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
+{
+ return tb_port_tmu_set_unidirectional(port, false);
+}
+
+static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_3, 1);
+ if (ret)
+ return false;
+
+ return val & TMU_ADP_CS_3_UDM;
+}
+
+static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return ret;
+
+ if (set)
+ val |= TMU_RTR_CS_0_TD;
+ else
+ val &= ~TMU_RTR_CS_0_TD;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+}
+
+/**
+ * tb_switch_tmu_init() - Initialize switch TMU structures
+ * @sw: Switch to initialized
+ *
+ * This function must be called before other TMU related functions to
+ * makes the internal structures are filled in correctly. Does not
+ * change any hardware configuration.
+ */
+int tb_switch_tmu_init(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
+ if (ret > 0)
+ sw->tmu.cap = ret;
+
+ tb_switch_for_each_port(sw, port) {
+ int cap;
+
+ cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
+ if (cap > 0)
+ port->cap_tmu = cap;
+ }
+
+ ret = tb_switch_tmu_rate_read(sw);
+ if (ret < 0)
+ return ret;
+
+ sw->tmu.rate = ret;
+
+ sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
+ if (sw->tmu.has_ucap) {
+ tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
+
+ if (tb_route(sw)) {
+ struct tb_port *up = tb_upstream_port(sw);
+
+ sw->tmu.unidirectional =
+ tb_port_tmu_is_unidirectional(up);
+ }
+ } else {
+ sw->tmu.unidirectional = false;
+ }
+
+ tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
+ return 0;
+}
+
+/**
+ * tb_switch_tmu_post_time() - Update switch local time
+ * @sw: Switch whose time to update
+ *
+ * Updates switch local time using time posting procedure.
+ */
+int tb_switch_tmu_post_time(struct tb_switch *sw)
+{
+ unsigned int post_local_time_offset, post_time_offset;
+ struct tb_switch *root_switch = sw->tb->root_switch;
+ u64 hi, mid, lo, local_time, post_time;
+ int i, ret, retries = 100;
+ u32 gm_local_time[3];
+
+ if (!tb_route(sw))
+ return 0;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ /* Need to be able to read the grand master time */
+ if (!root_switch->tmu.cap)
+ return 0;
+
+ ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
+ root_switch->tmu.cap + TMU_RTR_CS_1,
+ ARRAY_SIZE(gm_local_time));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
+ tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
+ gm_local_time[i]);
+
+ /* Convert to nanoseconds (drop fractional part) */
+ hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
+ mid = gm_local_time[1];
+ lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
+ TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
+ local_time = hi << 48 | mid << 16 | lo;
+
+ /* Tell the switch that time sync is disrupted for a while */
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
+ post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
+
+ /*
+ * Write the Grandmaster time to the Post Local Time registers
+ * of the new switch.
+ */
+ ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
+ post_local_time_offset, 2);
+ if (ret)
+ goto out;
+
+ /*
+ * Have the new switch update its local time (by writing 1 to
+ * the post_time registers) and wait for the completion of the
+ * same (post_time register becomes 0). This means the time has
+ * been converged properly.
+ */
+ post_time = 1;
+
+ ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
+ if (ret)
+ goto out;
+
+ do {
+ usleep_range(5, 10);
+ ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
+ post_time_offset, 2);
+ if (ret)
+ goto out;
+ } while (--retries && post_time);
+
+ if (!retries) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
+
+out:
+ tb_switch_tmu_set_time_disruption(sw, false);
+ return ret;
+}
+
+/**
+ * tb_switch_tmu_disable() - Disable TMU of a switch
+ * @sw: Switch whose TMU to disable
+ *
+ * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
+ */
+int tb_switch_tmu_disable(struct tb_switch *sw)
+{
+ int ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ /* Already disabled? */
+ if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
+ return 0;
+
+ if (sw->tmu.unidirectional) {
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ /* The switch may be unplugged so ignore any errors */
+ tb_port_tmu_unidirectional_disable(up);
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+ }
+
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+ sw->tmu.unidirectional = false;
+ sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
+
+ tb_sw_dbg(sw, "TMU: disabled\n");
+ return 0;
+}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a switch
+ * @sw: Switch whose TMU to enable
+ *
+ * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode
+ * all tunneling should work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
+{
+ int ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ /* Change mode to bi-directional */
+ if (tb_route(sw) && sw->tmu.unidirectional) {
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+
+ ret = tb_port_tmu_unidirectional_disable(up);
+ if (ret)
+ return ret;
+ } else {
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+ }
+
+ sw->tmu.unidirectional = false;
+ sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
+ tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
+
+ return tb_switch_tmu_set_time_disruption(sw, false);
+}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 0d3463c4e24a..dbe90bcf4ad4 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -19,6 +19,12 @@
#define TB_PCI_PATH_DOWN 0
#define TB_PCI_PATH_UP 1
+/* USB3 adapters use always HopID of 8 for both directions */
+#define TB_USB3_HOPID 8
+
+#define TB_USB3_PATH_DOWN 0
+#define TB_USB3_PATH_UP 1
+
/* DP adapters use HopID 8 for AUX and 9 for Video */
#define TB_DP_AUX_TX_HOPID 8
#define TB_DP_AUX_RX_HOPID 8
@@ -31,7 +37,7 @@
#define TB_DMA_PATH_OUT 0
#define TB_DMA_PATH_IN 1
-static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
+static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \
@@ -243,6 +249,12 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
}
+static bool tb_dp_is_usb4(const struct tb_switch *sw)
+{
+ /* Titan Ridge DP adapters need the same treatment as USB4 */
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
{
int timeout = 10;
@@ -250,8 +262,7 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
int ret;
/* Both ends need to support this */
- if (!tb_switch_is_titan_ridge(in->sw) ||
- !tb_switch_is_titan_ridge(out->sw))
+ if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
return 0;
ret = tb_port_read(out, &val, TB_CFG_PORT,
@@ -531,7 +542,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
u32 val, rate = 0, lanes = 0;
int ret;
- if (tb_switch_is_titan_ridge(sw)) {
+ if (tb_dp_is_usb4(sw)) {
int timeout = 10;
/*
@@ -843,6 +854,156 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
return tunnel;
}
+static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
+{
+ int res;
+
+ res = tb_usb3_port_enable(tunnel->src_port, activate);
+ if (res)
+ return res;
+
+ if (tb_port_is_usb3_up(tunnel->dst_port))
+ return tb_usb3_port_enable(tunnel->dst_port, activate);
+
+ return 0;
+}
+
+static void tb_usb3_init_path(struct tb_path *path)
+{
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 3;
+ path->weight = 3;
+ path->drop_packages = 0;
+ path->nfc_credits = 0;
+ path->hops[0].initial_credits = 7;
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
+}
+
+/**
+ * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
+ * @tb: Pointer to the domain structure
+ * @down: USB3 downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the USB3 upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ if (!tb_usb3_port_is_enabled(down))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_usb3_activate;
+ tunnel->src_port = down;
+
+ /*
+ * Discover both paths even if they are not complete. We will
+ * clean them up by calling tb_tunnel_deactivate() below in that
+ * case.
+ */
+ path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
+ &tunnel->dst_port, "USB3 Up");
+ if (!path) {
+ /* Just disable the downstream port */
+ tb_usb3_port_enable(down, false);
+ goto err_free;
+ }
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
+
+ path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
+ "USB3 Down");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
+
+ /* Validate that the tunnel is complete */
+ if (!tb_port_is_usb3_up(tunnel->dst_port)) {
+ tb_port_warn(tunnel->dst_port,
+ "path does not end on an USB3 adapter, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (down != tunnel->src_port) {
+ tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
+ tb_tunnel_warn(tunnel,
+ "tunnel is not fully activated, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ tb_tunnel_dbg(tunnel, "discovered\n");
+ return tunnel;
+
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
+err_free:
+ tb_tunnel_free(tunnel);
+
+ return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
+ * @tb: Pointer to the domain structure
+ * @up: USB3 upstream adapter port
+ * @down: USB3 downstream adapter port
+ *
+ * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
+ * @TB_TYPE_USB3_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or %NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+ struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_usb3_activate;
+ tunnel->src_port = down;
+ tunnel->dst_port = up;
+
+ path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
+ "USB3 Down");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_usb3_init_path(path);
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+
+ path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
+ "USB3 Up");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_usb3_init_path(path);
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+
+ return tunnel;
+}
+
/**
* tb_tunnel_free() - free a tunnel
* @tunnel: Tunnel to be freed
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index ba888da005f5..3f5ba93225e7 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -15,6 +15,7 @@ enum tb_tunnel_type {
TB_TUNNEL_PCI,
TB_TUNNEL_DP,
TB_TUNNEL_DMA,
+ TB_TUNNEL_USB3,
};
/**
@@ -57,6 +58,9 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
int receive_path);
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+ struct tb_port *down);
void tb_tunnel_free(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
@@ -82,5 +86,10 @@ static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_DMA;
}
+static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_USB3;
+}
+
#endif
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
new file mode 100644
index 000000000000..b341fc60c4ba
--- /dev/null
+++ b/drivers/thunderbolt/usb4.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB4 specific functionality
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ktime.h>
+
+#include "tb.h"
+
+#define USB4_DATA_DWORDS 16
+#define USB4_DATA_RETRIES 3
+
+enum usb4_switch_op {
+ USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
+ USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
+ USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
+ USB4_SWITCH_OP_NVM_WRITE = 0x20,
+ USB4_SWITCH_OP_NVM_AUTH = 0x21,
+ USB4_SWITCH_OP_NVM_READ = 0x22,
+ USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
+ USB4_SWITCH_OP_DROM_READ = 0x24,
+ USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
+};
+
+#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
+#define USB4_NVM_READ_OFFSET_SHIFT 2
+#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
+#define USB4_NVM_READ_LENGTH_SHIFT 24
+
+#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
+#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
+
+#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
+#define USB4_DROM_ADDRESS_SHIFT 2
+#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
+#define USB4_DROM_SIZE_SHIFT 15
+
+#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
+
+typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t);
+typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t);
+
+static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if ((val & bit) == value)
+ return 0;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
+{
+ return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
+{
+ return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address,
+ void *buf, size_t size, read_block_fn read_block)
+{
+ unsigned int retries = USB4_DATA_RETRIES;
+ unsigned int offset;
+
+ offset = address & 3;
+ address = address & ~3;
+
+ do {
+ size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
+ unsigned int dwaddress, dwords;
+ u8 data[USB4_DATA_DWORDS * 4];
+ int ret;
+
+ dwaddress = address / 4;
+ dwords = ALIGN(nbytes, 4) / 4;
+
+ ret = read_block(sw, dwaddress, data, dwords);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ memcpy(buf, data + offset, nbytes);
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address,
+ const void *buf, size_t size, write_block_fn write_next_block)
+{
+ unsigned int retries = USB4_DATA_RETRIES;
+ unsigned int offset;
+
+ offset = address & 3;
+ address = address & ~3;
+
+ do {
+ u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
+ u8 data[USB4_DATA_DWORDS * 4];
+ int ret;
+
+ memcpy(data + offset, buf, nbytes);
+
+ ret = write_next_block(sw, data, nbytes / 4);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
+{
+ u32 val;
+ int ret;
+
+ val = opcode | ROUTER_CS_26_OV;
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (val & ROUTER_CS_26_ONS)
+ return -EOPNOTSUPP;
+
+ *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
+ return 0;
+}
+
+/**
+ * usb4_switch_setup() - Additional setup for USB4 device
+ * @sw: USB4 router to setup
+ *
+ * USB4 routers need additional settings in order to enable all the
+ * tunneling. This function enables USB and PCIe tunneling if it can be
+ * enabled (e.g the parent switch also supports them). If USB tunneling
+ * is not available for some reason (like that there is Thunderbolt 3
+ * switch upstream) then the internal xHCI controller is enabled
+ * instead.
+ */
+int usb4_switch_setup(struct tb_switch *sw)
+{
+ struct tb_switch *parent;
+ bool tbt3, xhci;
+ u32 val = 0;
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
+ if (ret)
+ return ret;
+
+ xhci = val & ROUTER_CS_6_HCI;
+ tbt3 = !(val & ROUTER_CS_6_TNS);
+
+ tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
+ tbt3 ? "yes" : "no", xhci ? "yes" : "no");
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ parent = tb_switch_parent(sw);
+
+ if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
+ val |= ROUTER_CS_5_UTO;
+ xhci = false;
+ }
+
+ /* Only enable PCIe tunneling if the parent router supports it */
+ if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
+ val |= ROUTER_CS_5_PTO;
+ /*
+ * xHCI can be enabled if PCIe tunneling is supported
+ * and the parent does not have any USB3 dowstream
+ * adapters (so we cannot do USB 3.x tunneling).
+ */
+ if (xhci)
+ val |= ROUTER_CS_5_HCO;
+ }
+
+ /* TBT3 supported by the CM */
+ val |= ROUTER_CS_5_C3S;
+ /* Tunneling configuration is ready now */
+ val |= ROUTER_CS_5_CV;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
+ ROUTER_CS_6_CR, 50);
+}
+
+/**
+ * usb4_switch_read_uid() - Read UID from USB4 router
+ * @sw: USB4 router
+ *
+ * Reads 64-bit UID from USB4 router config space.
+ */
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
+{
+ return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
+}
+
+static int usb4_switch_drom_read_block(struct tb_switch *sw,
+ unsigned int dwaddress, void *buf,
+ size_t dwords)
+{
+ u8 status = 0;
+ u32 metadata;
+ int ret;
+
+ metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
+ metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
+ USB4_DROM_ADDRESS_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return -EIO;
+
+ return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
+ * @sw: USB4 router
+ *
+ * Uses USB4 router operations to read router DROM. For devices this
+ * should always work but for hosts it may return %-EOPNOTSUPP in which
+ * case the host router does not have DROM.
+ */
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_switch_do_read_data(sw, address, buf, size,
+ usb4_switch_drom_read_block);
+}
+
+static int usb4_set_port_configured(struct tb_port *port, bool configured)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ if (configured)
+ val |= PORT_CS_19_PC;
+ else
+ val &= ~PORT_CS_19_PC;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_switch_configure_link() - Set upstream USB4 link configured
+ * @sw: USB4 router
+ *
+ * Sets the upstream USB4 link to be configured for power management
+ * purposes.
+ */
+int usb4_switch_configure_link(struct tb_switch *sw)
+{
+ struct tb_port *up;
+
+ if (!tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ return usb4_set_port_configured(up, true);
+}
+
+/**
+ * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
+ * @sw: USB4 router
+ *
+ * Reverse of usb4_switch_configure_link().
+ */
+void usb4_switch_unconfigure_link(struct tb_switch *sw)
+{
+ struct tb_port *up;
+
+ if (sw->is_unplugged || !tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ usb4_set_port_configured(up, false);
+}
+
+/**
+ * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
+ * @sw: USB4 router
+ *
+ * Checks whether conditions are met so that lane bonding can be
+ * established with the upstream router. Call only for device routers.
+ */
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int ret;
+ u32 val;
+
+ up = tb_upstream_port(sw);
+ ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
+ if (ret)
+ return false;
+
+ return !!(val & PORT_CS_18_BE);
+}
+
+/**
+ * usb4_switch_set_sleep() - Prepare the router to enter sleep
+ * @sw: USB4 router
+ *
+ * Enables wakes and sets sleep bit for the router. Returns when the
+ * router sleep ready bit has been asserted.
+ */
+int usb4_switch_set_sleep(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ /* Set sleep bit and wait for sleep ready to be asserted */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val |= ROUTER_CS_5_SLP;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
+ ROUTER_CS_6_SLPR, 500);
+}
+
+/**
+ * usb4_switch_nvm_sector_size() - Return router NVM sector size
+ * @sw: USB4 router
+ *
+ * If the router supports NVM operations this function returns the NVM
+ * sector size in bytes. If NVM operations are not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_sector_size(struct tb_switch *sw)
+{
+ u32 metadata;
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return status == 0x2 ? -EOPNOTSUPP : -EIO;
+
+ ret = usb4_switch_op_read_metadata(sw, &metadata);
+ if (ret)
+ return ret;
+
+ return metadata & USB4_NVM_SECTOR_SIZE_MASK;
+}
+
+static int usb4_switch_nvm_read_block(struct tb_switch *sw,
+ unsigned int dwaddress, void *buf, size_t dwords)
+{
+ u8 status = 0;
+ u32 metadata;
+ int ret;
+
+ metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
+ USB4_NVM_READ_LENGTH_MASK;
+ metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
+ USB4_NVM_READ_OFFSET_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return -EIO;
+
+ return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
+ * @sw: USB4 router
+ * @address: Starting address in bytes
+ * @buf: Read data is placed here
+ * @size: How many bytes to read
+ *
+ * Reads NVM contents of the router. If NVM is not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_switch_do_read_data(sw, address, buf, size,
+ usb4_switch_nvm_read_block);
+}
+
+static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
+ unsigned int address)
+{
+ u32 metadata, dwaddress;
+ u8 status = 0;
+ int ret;
+
+ dwaddress = address / 4;
+ metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
+ USB4_NVM_SET_OFFSET_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
+ if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+static int usb4_switch_nvm_write_next_block(struct tb_switch *sw,
+ const void *buf, size_t dwords)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_data(sw, buf, dwords);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
+ if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+/**
+ * usb4_switch_nvm_write() - Write to the router NVM
+ * @sw: USB4 router
+ * @address: Start address where to write in bytes
+ * @buf: Pointer to the data to write
+ * @size: Size of @buf in bytes
+ *
+ * Writes @buf to the router NVM using USB4 router operations. If NVM
+ * write is not supported returns %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+ const void *buf, size_t size)
+{
+ int ret;
+
+ ret = usb4_switch_nvm_set_offset(sw, address);
+ if (ret)
+ return ret;
+
+ return usb4_switch_do_write_data(sw, address, buf, size,
+ usb4_switch_nvm_write_next_block);
+}
+
+/**
+ * usb4_switch_nvm_authenticate() - Authenticate new NVM
+ * @sw: USB4 router
+ *
+ * After the new NVM has been written via usb4_switch_nvm_write(), this
+ * function triggers NVM authentication process. If the authentication
+ * is successful the router is power cycled and the new NVM starts
+ * running. In case of failure returns negative errno.
+ */
+int usb4_switch_nvm_authenticate(struct tb_switch *sw)
+{
+ u8 status = 0;
+ int ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case 0x0:
+ tb_sw_dbg(sw, "NVM authentication successful\n");
+ return 0;
+ case 0x1:
+ return -EINVAL;
+ case 0x2:
+ return -EAGAIN;
+ case 0x3:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
+/**
+ * usb4_switch_query_dp_resource() - Query availability of DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * For DP tunneling this function can be used to query availability of
+ * DP IN resource. Returns true if the resource is available for DP
+ * tunneling, false otherwise.
+ */
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return false;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
+ /*
+ * If DP resource allocation is not supported assume it is
+ * always available.
+ */
+ if (ret == -EOPNOTSUPP)
+ return true;
+ else if (ret)
+ return false;
+
+ return !status;
+}
+
+/**
+ * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Allocates DP IN resource for DP tunneling using USB4 router
+ * operations. If the resource was allocated returns %0. Otherwise
+ * returns negative errno, in particular %-EBUSY if the resource is
+ * already allocated.
+ */
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+
+ return status ? -EBUSY : 0;
+}
+
+/**
+ * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Releases the previously allocated DP IN resource.
+ */
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
+{
+ struct tb_port *p;
+ int usb4_idx = 0;
+
+ /* Assume port is primary */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_null(p))
+ continue;
+ if (tb_is_upstream_port(p))
+ continue;
+ if (!p->link_nr) {
+ if (p == port)
+ break;
+ usb4_idx++;
+ }
+ }
+
+ return usb4_idx;
+}
+
+/**
+ * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and PCIe
+ * downstream adapters where the PCIe topology is extended. This
+ * function returns the corresponding downstream PCIe adapter or %NULL
+ * if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ int usb4_idx = usb4_port_idx(sw, port);
+ struct tb_port *p;
+ int pcie_idx = 0;
+
+ /* Find PCIe down port matching usb4_port */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_pcie_down(p))
+ continue;
+
+ if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p))
+ return p;
+
+ pcie_idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and USB 3.x
+ * downstream adapters where the USB 3.x topology is extended. This
+ * function returns the corresponding downstream USB 3.x adapter or
+ * %NULL if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ int usb4_idx = usb4_port_idx(sw, port);
+ struct tb_port *p;
+ int usb_idx = 0;
+
+ /* Find USB3 down port matching usb4_port */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_usb3_down(p))
+ continue;
+
+ if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p))
+ return p;
+
+ usb_idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * usb4_port_unlock() - Unlock USB4 downstream port
+ * @port: USB4 port to unlock
+ *
+ * Unlocks USB4 downstream port so that the connection manager can
+ * access the router below this port.
+ */
+int usb4_port_unlock(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_CS_4_LCK;
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 880d784398a3..053f918e00e8 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1220,7 +1220,13 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
u64 route, const uuid_t *local_uuid,
const uuid_t *remote_uuid)
{
+ struct tb_switch *parent_sw = tb_to_switch(parent);
struct tb_xdomain *xd;
+ struct tb_port *down;
+
+ /* Make sure the downstream domain is accessible */
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
xd = kzalloc(sizeof(*xd), GFP_KERNEL);
if (!xd)
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 4562c8060d09..a6aabfd6e2da 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -3256,7 +3256,7 @@ static int __init cy_detect_isa(void)
return nboard;
/* probe for CD1400... */
- cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
+ cy_isa_address = ioremap(isa_address, CyISA_Ywin);
if (cy_isa_address == NULL) {
printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
"address\n");
@@ -3690,13 +3690,13 @@ static int cy_pci_probe(struct pci_dev *pdev,
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
card_name = "Cyclom-Y";
- addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+ addr0 = ioremap(pci_resource_start(pdev, 0),
CyPCI_Yctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
goto err_reg;
}
- addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+ addr2 = ioremap(pci_resource_start(pdev, 2),
CyPCI_Ywin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
@@ -3712,7 +3712,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
} else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) {
struct RUNTIME_9060 __iomem *ctl_addr;
- ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+ ctl_addr = addr0 = ioremap(pci_resource_start(pdev, 0),
CyPCI_Zctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
@@ -3727,7 +3727,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
mailbox = readl(&ctl_addr->mail_box_0);
- addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+ addr2 = ioremap(pci_resource_start(pdev, 2),
mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 4c1cd49ae95b..620d8488b83e 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -898,7 +898,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
atomic_set(&priv->xmit_total, 0);
raw_spin_lock_init(&priv->lock);
- priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start,
+ priv->reg = devm_ioremap(priv->dev, dev->res.start,
resource_size(&dev->res));
if (!priv->reg) {
dev_err(priv->dev, "ioremap failed for resource %pR\n",
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 3a1a5e0ee93f..9f13f7d49dd7 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -961,7 +961,7 @@ static int moxa_pci_probe(struct pci_dev *pdev,
goto err;
}
- board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000);
+ board->basemem = ioremap(pci_resource_start(pdev, 2), 0x4000);
if (board->basemem == NULL) {
dev_err(&pdev->dev, "can't remap io space 2\n");
retval = -ENOMEM;
@@ -1071,7 +1071,7 @@ static int __init moxa_init(void)
brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
numports[i];
brd->busType = MOXA_BUS_TYPE_ISA;
- brd->basemem = ioremap_nocache(baseaddr[i], 0x4000);
+ brd->basemem = ioremap(baseaddr[i], 0x4000);
if (!brd->basemem) {
printk(KERN_ERR "MOXA: can't remap %lx\n",
baseaddr[i]);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 36a3eb4ad4c5..f1c90fa2978e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2704,7 +2704,7 @@ static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb,
}
/* called when a packet did not ack after watchdogtimeout */
-static void gsm_mux_net_tx_timeout(struct net_device *net)
+static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue)
{
/* Tell syslog we are hosed. */
dev_dbg(&net->dev, "Tx timed out.\n");
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 98361acd3053..27b506bf03ce 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -115,11 +115,9 @@
struct n_hdlc_buf {
struct list_head list_item;
int count;
- char buf[1];
+ char buf[];
};
-#define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
-
struct n_hdlc_buf_list {
struct list_head list;
int count;
@@ -524,7 +522,8 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
/* no buffers in free list, attempt to allocate another rx buffer */
/* unless the maximum count has been reached */
if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
- buf = kmalloc(N_HDLC_BUF_SIZE, GFP_ATOMIC);
+ buf = kmalloc(struct_size(buf, buf, maxframe),
+ GFP_ATOMIC);
}
if (!buf) {
@@ -853,7 +852,7 @@ static struct n_hdlc *n_hdlc_alloc(void)
/* allocate free rx buffer list */
for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
- buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+ buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
if (buf)
n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,buf);
else if (debuglevel >= DEBUG_LEVEL_INFO)
@@ -862,7 +861,7 @@ static struct n_hdlc *n_hdlc_alloc(void)
/* allocate free tx buffer list */
for(i=0;i<DEFAULT_TX_BUF_COUNT;i++) {
- buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+ buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
if (buf)
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list,buf);
else if (debuglevel >= DEBUG_LEVEL_INFO)
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index ce5309d00280..42345e79920c 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -115,8 +115,8 @@ int serdev_device_add(struct serdev_device *serdev)
err = device_add(&serdev->dev);
if (err < 0) {
- dev_err(&serdev->dev, "Can't add %s, status %d\n",
- dev_name(&serdev->dev), err);
+ dev_err(&serdev->dev, "Can't add %s, status %pe\n",
+ dev_name(&serdev->dev), ERR_PTR(err));
goto err_clear_serdev;
}
@@ -540,7 +540,8 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
err = serdev_device_add(serdev);
if (err) {
dev_err(&serdev->dev,
- "failure adding device. status %d\n", err);
+ "failure adding device. status %pe\n",
+ ERR_PTR(err));
serdev_device_put(serdev);
} else
found = true;
@@ -656,7 +657,8 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
err = serdev_device_add(serdev);
if (err) {
dev_err(&serdev->dev,
- "failure adding ACPI serdev device. status %d\n", err);
+ "failure adding ACPI serdev device. status %pe\n",
+ ERR_PTR(err));
serdev_device_put(serdev);
}
@@ -741,8 +743,8 @@ int serdev_controller_add(struct serdev_controller *ctrl)
ret_of = of_serdev_register_devices(ctrl);
ret_acpi = acpi_serdev_register_devices(ctrl);
if (ret_of && ret_acpi) {
- dev_dbg(&ctrl->dev, "no devices registered: of:%d acpi:%d\n",
- ret_of, ret_acpi);
+ dev_dbg(&ctrl->dev, "no devices registered: of:%pe acpi:%pe\n",
+ ERR_PTR(ret_of), ERR_PTR(ret_acpi));
ret = -ENODEV;
goto err_rpm_disable;
}
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 32b3acf8150a..718e010fcb04 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -41,8 +41,43 @@
static const char serial21285_name[] = "Footbridge UART";
-#define tx_enabled(port) ((port)->unused[0])
-#define rx_enabled(port) ((port)->unused[1])
+/*
+ * We only need 2 bits of data, so instead of creating a whole structure for
+ * this, use bits of the private_data pointer of the uart port structure.
+ */
+#define tx_enabled_bit 0
+#define rx_enabled_bit 1
+
+static bool is_enabled(struct uart_port *port, int bit)
+{
+ unsigned long private_data = (unsigned long)port->private_data;
+
+ if (test_bit(bit, &private_data))
+ return true;
+ return false;
+}
+
+static void enable(struct uart_port *port, int bit)
+{
+ unsigned long private_data = (unsigned long)port->private_data;
+
+ set_bit(bit, &private_data);
+}
+
+static void disable(struct uart_port *port, int bit)
+{
+ unsigned long private_data = (unsigned long)port->private_data;
+
+ clear_bit(bit, &private_data);
+}
+
+#define is_tx_enabled(port) is_enabled(port, tx_enabled_bit)
+#define tx_enable(port) enable(port, tx_enabled_bit)
+#define tx_disable(port) disable(port, tx_enabled_bit)
+
+#define is_rx_enabled(port) is_enabled(port, rx_enabled_bit)
+#define rx_enable(port) enable(port, rx_enabled_bit)
+#define rx_disable(port) disable(port, rx_enabled_bit)
/*
* The documented expression for selecting the divisor is:
@@ -57,25 +92,25 @@ static const char serial21285_name[] = "Footbridge UART";
static void serial21285_stop_tx(struct uart_port *port)
{
- if (tx_enabled(port)) {
+ if (is_tx_enabled(port)) {
disable_irq_nosync(IRQ_CONTX);
- tx_enabled(port) = 0;
+ tx_disable(port);
}
}
static void serial21285_start_tx(struct uart_port *port)
{
- if (!tx_enabled(port)) {
+ if (!is_tx_enabled(port)) {
enable_irq(IRQ_CONTX);
- tx_enabled(port) = 1;
+ tx_enable(port);
}
}
static void serial21285_stop_rx(struct uart_port *port)
{
- if (rx_enabled(port)) {
+ if (is_rx_enabled(port)) {
disable_irq_nosync(IRQ_CONRX);
- rx_enabled(port) = 0;
+ rx_disable(port);
}
}
@@ -185,8 +220,8 @@ static int serial21285_startup(struct uart_port *port)
{
int ret;
- tx_enabled(port) = 1;
- rx_enabled(port) = 1;
+ tx_enable(port);
+ rx_enable(port);
ret = request_irq(IRQ_CONRX, serial21285_rx_chars, 0,
serial21285_name, port);
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 6e67fd89445a..d657aa14c3e4 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -5,10 +5,6 @@
* Copyright (C) 2016 Jeremy Kerr <jk@ozlabs.org>, IBM Corp.
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
*/
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -406,6 +402,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.unthrottle = aspeed_vuart_unthrottle;
port.port.status = UPSTAT_SYNC_FIFO;
port.port.dev = &pdev->dev;
+ port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
if (rc < 0)
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 8ce700c1a7fc..e70e3cc30050 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -16,14 +16,19 @@
#include "8250.h"
+/**
+ * struct bcm2835aux_data - driver private data of BCM2835 auxiliary UART
+ * @clk: clock producer of the port's uartclk
+ * @line: index of the port's serial8250_ports[] entry
+ */
struct bcm2835aux_data {
- struct uart_8250_port uart;
struct clk *clk;
int line;
};
static int bcm2835aux_serial_probe(struct platform_device *pdev)
{
+ struct uart_8250_port up = { };
struct bcm2835aux_data *data;
struct resource *res;
int ret;
@@ -34,23 +39,21 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
return -ENOMEM;
/* initialize data */
- spin_lock_init(&data->uart.port.lock);
- data->uart.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
- data->uart.port.dev = &pdev->dev;
- data->uart.port.regshift = 2;
- data->uart.port.type = PORT_16550;
- data->uart.port.iotype = UPIO_MEM;
- data->uart.port.fifosize = 8;
- data->uart.port.flags = UPF_SHARE_IRQ |
- UPF_FIXED_PORT |
- UPF_FIXED_TYPE |
- UPF_SKIP_TEST;
+ up.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
+ up.port.dev = &pdev->dev;
+ up.port.regshift = 2;
+ up.port.type = PORT_16550;
+ up.port.iotype = UPIO_MEM;
+ up.port.fifosize = 8;
+ up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
+ UPF_SKIP_TEST | UPF_IOREMAP;
/* get the clock - this also enables the HW */
data->clk = devm_clk_get(&pdev->dev, NULL);
ret = PTR_ERR_OR_ZERO(data->clk);
if (ret) {
- dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "could not get clk: %d\n", ret);
return ret;
}
@@ -58,7 +61,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
- data->uart.port.irq = ret;
+ up.port.irq = ret;
/* map the main registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -66,15 +69,13 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "memory resource not found");
return -EINVAL;
}
- data->uart.port.membase = devm_ioremap_resource(&pdev->dev, res);
- ret = PTR_ERR_OR_ZERO(data->uart.port.membase);
- if (ret)
- return ret;
+ up.port.mapbase = res->start;
+ up.port.mapsize = resource_size(res);
/* Check for a fixed line number */
ret = of_alias_get_id(pdev->dev.of_node, "serial");
if (ret >= 0)
- data->uart.port.line = ret;
+ up.port.line = ret;
/* enable the clock as a last step */
ret = clk_prepare_enable(data->clk);
@@ -89,13 +90,14 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
* so we have to multiply the actual clock by 2
* to get identical baudrates.
*/
- data->uart.port.uartclk = clk_get_rate(data->clk) * 2;
+ up.port.uartclk = clk_get_rate(data->clk) * 2;
/* register the port */
- ret = serial8250_register_8250_port(&data->uart);
+ ret = serial8250_register_8250_port(&up);
if (ret < 0) {
- dev_err(&pdev->dev, "unable to register 8250 port - %d\n",
- ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "unable to register 8250 port - %d\n", ret);
goto dis_clk;
}
data->line = ret;
@@ -113,7 +115,7 @@ static int bcm2835aux_serial_remove(struct platform_device *pdev)
{
struct bcm2835aux_data *data = platform_get_drvdata(pdev);
- serial8250_unregister_port(data->uart.port.line);
+ serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk);
return 0;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e682390ce0de..0894a22fd702 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -816,6 +816,7 @@ static int serial8250_probe(struct platform_device *dev)
uart.port.flags = p->flags;
uart.port.mapbase = p->mapbase;
uart.port.hub6 = p->hub6;
+ uart.port.has_sysrq = p->has_sysrq;
uart.port.private_data = p->private_data;
uart.port.type = p->type;
uart.port.serial_in = p->serial_in;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 108cd55f9c4d..91e9b070d36d 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -186,7 +186,7 @@ static int xr17v35x_startup(struct uart_port *port)
static void exar_shutdown(struct uart_port *port)
{
unsigned char lsr;
- bool tx_complete = 0;
+ bool tx_complete = false;
struct uart_8250_port *up = up_to_u8250p(port);
struct circ_buf *xmit = &port->state->xmit;
int i = 0;
@@ -194,9 +194,9 @@ static void exar_shutdown(struct uart_port *port)
do {
lsr = serial_in(up, UART_LSR);
if (lsr & (UART_LSR_TEMT | UART_LSR_THRE))
- tx_complete = 1;
+ tx_complete = true;
else
- tx_complete = 0;
+ tx_complete = false;
usleep_range(1000, 1100);
} while (!uart_circ_empty(xmit) && !tx_complete && i++ < 1000);
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index aa0e216d5ead..0d0c80905c58 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -1,8 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 0809ae2aa9b1..673cda3d011d 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -55,7 +55,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
uart.port.uartclk = (dev->id.sversion != 0xad) ?
7272727 : 1843200;
uart.port.mapbase = address;
- uart.port.membase = ioremap_nocache(address, 16);
+ uart.port.membase = ioremap(address, 16);
if (!uart.port.membase) {
dev_warn(&dev->dev, "Failed to map memory\n");
return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_ioc3.c b/drivers/tty/serial/8250/8250_ioc3.c
new file mode 100644
index 000000000000..4c405f1b9c67
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_ioc3.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 8250 UART driver
+ *
+ * Copyright (C) 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * based on code Copyright (C) 2005 Stanislaw Skowronek <skylark@unaligned.org>
+ * Copyright (C) 2014 Joshua Kinard <kumba@gentoo.org>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "8250.h"
+
+#define IOC3_UARTCLK (22000000 / 3)
+
+struct ioc3_8250_data {
+ int line;
+};
+
+static unsigned int ioc3_serial_in(struct uart_port *p, int offset)
+{
+ return readb(p->membase + (offset ^ 3));
+}
+
+static void ioc3_serial_out(struct uart_port *p, int offset, int value)
+{
+ writeb(value, p->membase + (offset ^ 3));
+}
+
+static int serial8250_ioc3_probe(struct platform_device *pdev)
+{
+ struct ioc3_8250_data *data;
+ struct uart_8250_port up;
+ struct resource *r;
+ void __iomem *membase;
+ int irq, line;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ membase = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
+ if (!membase)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ irq = 0; /* no interrupt -> use polling */
+
+ /* Register serial ports with 8250.c */
+ memset(&up, 0, sizeof(struct uart_8250_port));
+ up.port.iotype = UPIO_MEM;
+ up.port.uartclk = IOC3_UARTCLK;
+ up.port.type = PORT_16550A;
+ up.port.irq = irq;
+ up.port.flags = (UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ);
+ up.port.dev = &pdev->dev;
+ up.port.membase = membase;
+ up.port.mapbase = r->start;
+ up.port.serial_in = ioc3_serial_in;
+ up.port.serial_out = ioc3_serial_out;
+ line = serial8250_register_8250_port(&up);
+ if (line < 0)
+ return line;
+
+ platform_set_drvdata(pdev, data);
+ return 0;
+}
+
+static int serial8250_ioc3_remove(struct platform_device *pdev)
+{
+ struct ioc3_8250_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->line);
+ return 0;
+}
+
+static struct platform_driver serial8250_ioc3_driver = {
+ .probe = serial8250_ioc3_probe,
+ .remove = serial8250_ioc3_remove,
+ .driver = {
+ .name = "ioc3-serial8250",
+ }
+};
+
+module_platform_driver(serial8250_ioc3_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 8250 UART driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 92fbf46ce3bd..531ad67395e0 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -222,8 +222,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
if (IS_ENABLED(CONFIG_SERIAL_8250_FSL) &&
(of_device_is_compatible(np, "fsl,ns16550") ||
- of_device_is_compatible(np, "fsl,16550-FIFO64")))
+ of_device_is_compatible(np, "fsl,16550-FIFO64"))) {
port->handle_irq = fsl8250_handle_irq;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ }
return 0;
err_unprepare:
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 836e736ae188..6f343ca08440 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -8,10 +8,6 @@
*
*/
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -1147,7 +1143,7 @@ static int omap8250_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- membase = devm_ioremap_nocache(&pdev->dev, regs->start,
+ membase = devm_ioremap(&pdev->dev, regs->start,
resource_size(regs));
if (!membase)
return -ENODEV;
@@ -1192,6 +1188,7 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.throttle = omap_8250_throttle;
up.port.unthrottle = omap_8250_unthrottle;
up.port.rs485_config = omap_8250_rs485_config;
+ up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 022924d5ad54..939685fed396 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -275,7 +275,7 @@ static int pci_plx9050_init(struct pci_dev *dev)
/*
* enable/disable interrupts
*/
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p == NULL)
return -ENOMEM;
writel(irq_config, p + 0x4c);
@@ -299,7 +299,7 @@ static void pci_plx9050_exit(struct pci_dev *dev)
/*
* disable interrupts
*/
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p != NULL) {
writel(0, p + 0x4c);
@@ -475,7 +475,7 @@ static int pci_siig10x_init(struct pci_dev *dev)
break;
}
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p == NULL)
return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 90655910b0c7..430e3467aff7 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -11,10 +11,6 @@
* membase is an 'ioremapped' cookie.
*/
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
@@ -1001,6 +997,9 @@ static void autoconfig_16550a(struct uart_8250_port *up)
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
+ if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS))
+ return;
+
/*
* Check for presence of the EFR when DLAB is set.
* Only ST16C650V1 UARTs pass this test.
@@ -2766,7 +2765,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
}
if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap_nocache(port->mapbase, size);
+ port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
release_mem_region(port->mapbase, size);
ret = -ENOMEM;
@@ -3055,6 +3054,7 @@ void serial8250_init_port(struct uart_8250_port *up)
spin_lock_init(&port->lock);
port->ops = &serial8250_pops;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
up->cur_iotype = 0xFF;
}
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index fab3d4f20667..f16824bbb573 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -60,6 +60,16 @@ config SERIAL_8250_PNP
This builds standard PNP serial support. You may be able to
disable this feature if you only need legacy serial support.
+config SERIAL_8250_16550A_VARIANTS
+ bool "Support for variants of the 16550A serial port"
+ depends on SERIAL_8250
+ help
+ The 8250 driver can probe for many variants of the venerable 16550A
+ serial port. Doing so takes additional time at boot.
+
+ On modern systems, especially those using serial only for a simple
+ console, you can say N here.
+
config SERIAL_8250_FINTEK
bool "Support for Fintek F81216A LPC to 4 UART RS485 API"
depends on SERIAL_8250
@@ -371,6 +381,17 @@ config SERIAL_8250_EM
port hardware found on the Emma Mobile line of processors.
If unsure, say N.
+config SERIAL_8250_IOC3
+ tristate "SGI IOC3 8250 UART support"
+ depends on SGI_MFD_IOC3 && SERIAL_8250
+ select SERIAL_8250_EXTENDED
+ select SERIAL_8250_SHARE_IRQ
+ help
+ Enable this if you have a SGI Origin or Octane machine. This module
+ provides basic serial support by directly driving the UART chip
+ behind the IOC3 device on those systems. Maximum baud speed is
+ 38400bps using this driver.
+
config SERIAL_8250_RT288X
bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
depends on SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 08c1d8117506..51a6079d3f1f 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
+obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o
obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
obj-$(CONFIG_SERIAL_8250_LPC18XX) += 8250_lpc18xx.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 99f5da3bf913..52eaac21ff9f 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -237,7 +237,7 @@ config SERIAL_CLPS711X_CONSOLE
config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
- depends on PLAT_SAMSUNG || ARCH_EXYNOS
+ depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
select SERIAL_CORE
help
Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
@@ -975,7 +975,7 @@ config SERIAL_QCOM_GENI
config SERIAL_QCOM_GENI_CONSOLE
bool "QCOM GENI Serial Console support"
- depends on SERIAL_QCOM_GENI=y
+ depends on SERIAL_QCOM_GENI
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2c37d11726ab..3284f34e9dfe 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -15,10 +15,6 @@
* and hooked into this driver.
*/
-#if defined(CONFIG_SERIAL_AMBA_PL010_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -728,6 +724,7 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.iotype = UPIO_MEM;
uap->port.irq = dev->irq[0];
uap->port.fifosize = 16;
+ uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL010_CONSOLE);
uap->port.ops = &amba_pl010_pops;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4b28134d596a..2296bb0f9578 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -16,11 +16,6 @@
* and hooked into this driver.
*/
-
-#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -1452,8 +1447,6 @@ static void pl011_modem_status(struct uart_amba_port *uap)
static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
{
- unsigned int dummy_read;
-
if (!uap->vendor->cts_event_workaround)
return;
@@ -1465,8 +1458,8 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
* single apb access will incur 2 pclk(133.12Mhz) delay,
* so add 2 dummy reads
*/
- dummy_read = pl011_read(uap, REG_ICR);
- dummy_read = pl011_read(uap, REG_ICR);
+ pl011_read(uap, REG_ICR);
+ pl011_read(uap, REG_ICR);
}
static irqreturn_t pl011_int(int irq, void *dev_id)
@@ -2579,6 +2572,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
uap->port.mapbase = mmiobase->start;
uap->port.membase = base;
uap->port.fifosize = uap->fifosize;
+ uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = index;
@@ -2769,6 +2763,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
.remove = sbsa_uart_remove,
.driver = {
.name = "sbsa-uart",
+ .pm = &pl011_dev_pm_ops,
.of_match_table = of_match_ptr(sbsa_uart_of_match),
.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 60cd133ffbbc..e8d56e899ec7 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -11,10 +11,6 @@
* Copyright (C) 2009 Kristoffer Glembo <kristoffer@gaisler.com>, Aeroflex Gaisler AB
*/
-#if defined(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
@@ -626,6 +622,7 @@ static int __init grlib_apbuart_configure(void)
port->irq = 0;
port->iotype = UPIO_MEM;
port->ops = &grlib_apbuart_ops;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE);
port->flags = UPF_BOOT_AUTOCONF;
port->line = line;
port->uartclk = *freq_hz;
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index d904a3a345e7..17c3fc398fc6 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -21,10 +21,6 @@
* -check if sysreq works
*/
-#if defined(CONFIG_SERIAL_ARC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/console.h>
@@ -625,6 +621,7 @@ static int arc_serial_probe(struct platform_device *pdev)
port->flags = UPF_BOOT_AUTOCONF;
port->line = dev_id;
port->ops = &arc_serial_pops;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ARC_CONSOLE);
port->fifosize = ARC_UART_TX_FIFO_SIZE;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 1ba9bc667e13..c15c398c88a9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -51,10 +51,6 @@
#define ATMEL_RTS_HIGH_OFFSET 16
#define ATMEL_RTS_LOW_OFFSET 20
-#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include "serial_mctrl_gpio.h"
@@ -196,10 +192,6 @@ struct atmel_uart_port {
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
-#ifdef SUPPORT_SYSRQ
-static struct console atmel_console;
-#endif
-
#if defined(CONFIG_OF)
static const struct of_device_id atmel_serial_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-usart-serial" },
@@ -313,7 +305,11 @@ static int atmel_config_rs485(struct uart_port *port,
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ if (port->rs485.flags & SER_RS485_RX_DURING_TX)
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ else
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+
atmel_uart_writel(port, ATMEL_US_TTGR,
rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
@@ -831,7 +827,7 @@ static void atmel_tx_chars(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (port->x_char &&
- (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
+ (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) {
atmel_uart_write_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
@@ -839,8 +835,7 @@ static void atmel_tx_chars(struct uart_port *port)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
- while (atmel_uart_readl(port, ATMEL_US_CSR) &
- atmel_port->tx_done_mask) {
+ while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) {
atmel_uart_write_char(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -851,10 +846,20 @@ static void atmel_tx_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (!uart_circ_empty(xmit))
+ if (!uart_circ_empty(xmit)) {
+ /* we still have characters to transmit, so we should continue
+ * transmitting them when TX is ready, regardless of
+ * mode or duplexity
+ */
+ atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
+
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
+ } else {
+ if (atmel_uart_is_half_duplex(port))
+ atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
+ }
}
static void atmel_complete_tx_dma(void *arg)
@@ -1067,7 +1072,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
chan_err:
dev_err(port->dev, "TX channel not available, switch to pio\n");
- atmel_port->use_dma_tx = 0;
+ atmel_port->use_dma_tx = false;
if (atmel_port->chan_tx)
atmel_release_tx_dma(port);
return -EINVAL;
@@ -1266,7 +1271,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
chan_err:
dev_err(port->dev, "RX channel not available, switch to pio\n");
- atmel_port->use_dma_rx = 0;
+ atmel_port->use_dma_rx = false;
if (atmel_port->chan_rx)
atmel_release_rx_dma(port);
return -EINVAL;
@@ -1693,7 +1698,7 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
DMA_FROM_DEVICE);
kfree(atmel_port->pdc_rx[0].buf);
}
- atmel_port->use_pdc_rx = 0;
+ atmel_port->use_pdc_rx = false;
return -ENOMEM;
}
pdc->dma_addr = dma_map_single(port->dev,
@@ -2526,8 +2531,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
* Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
* ENDTX|TXBUFE
*/
- if (port->rs485.flags & SER_RS485_ENABLED ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
else if (atmel_use_pdc_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
@@ -2878,6 +2882,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
atmel_port = &atmel_ports[ret];
atmel_port->backup_imr = 0;
atmel_port->uart.line = ret;
+ atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
atmel_serial_probe_fifos(atmel_port, pdev);
atomic_set(&atmel_port->tasklet_shutdown, 0);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index b7adc6127b3d..5674da2b76f0 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -10,10 +10,6 @@
* my board.
*/
-#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/init.h>
@@ -858,6 +854,7 @@ static int bcm_uart_probe(struct platform_device *pdev)
port->fifosize = 16;
port->uartclk = clk_get_rate(clk) / 2;
port->line = pdev->id;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_BCM63XX_CONSOLE);
clk_put(clk);
ret = uart_add_one_port(&bcm_uart_driver, port);
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 061590795680..95abc6faa3d5 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -8,10 +8,6 @@
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*/
-#if defined(CONFIG_SERIAL_CLPS711X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/device.h>
#include <linux/console.h>
@@ -479,6 +475,7 @@ static int uart_clps711x_probe(struct platform_device *pdev)
s->port.mapbase = res->start;
s->port.type = PORT_CLPS711X;
s->port.fifosize = 16;
+ s->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_CLPS711X_CONSOLE);
s->port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
s->port.uartclk = clk_get_rate(uart_clk);
s->port.ops = &uart_clps711x_ops;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index de6d02f7abe2..19d5a4cf29a6 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -40,10 +40,6 @@
#include <asm/fs_pd.h>
#include <asm/udbg.h>
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/kernel.h>
@@ -347,9 +343,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
/* ASSUMPTION: it contains nothing valid */
i = 0;
}
-#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
-#endif
goto error_return;
}
@@ -1204,7 +1198,8 @@ static int cpm_uart_init_port(struct device_node *np,
pinfo->port.uartclk = ppc_proc_freq;
pinfo->port.mapbase = (unsigned long)mem;
pinfo->port.type = PORT_CPM;
- pinfo->port.ops = &cpm_uart_pops,
+ pinfo->port.ops = &cpm_uart_pops;
+ pinfo->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_CPM_CONSOLE);
pinfo->port.iotype = UPIO_MEM;
pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
spin_lock_init(&pinfo->port.lock);
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 7b57e840e255..4552742c3859 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -29,10 +29,6 @@
#undef DEBUG_DZ
-#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/console.h>
@@ -677,7 +673,7 @@ static void dz_release_port(struct uart_port *uport)
static int dz_map_port(struct uart_port *uport)
{
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
dec_kn_slot_size);
if (!uport->membase) {
printk(KERN_ERR "dz: Cannot map MMIO\n");
@@ -787,6 +783,7 @@ static void __init dz_init_ports(void)
uport->ops = &dz_ops;
uport->line = line;
uport->mapbase = base;
+ uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_DZ_CONSOLE);
}
}
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index d6b5e5463746..2ac87128d7fd 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -1,8 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#if defined(CONFIG_SERIAL_EFM32_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -748,6 +744,7 @@ static int efm32_uart_probe(struct platform_device *pdev)
efm_port->port.type = PORT_EFMUART;
efm_port->port.iotype = UPIO_MEM32;
efm_port->port.fifosize = 2;
+ efm_port->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_EFM32_UART_CONSOLE);
efm_port->port.ops = &efm32_uart_pops;
efm_port->port.flags = UPF_BOOT_AUTOCONF;
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 205c31a61684..3e28be402aef 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -6,11 +6,6 @@
* Copyright 2017-2019 NXP
*/
-#if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \
- defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/console.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -279,10 +274,8 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
if (brk) {
uart_handle_break(sport);
} else {
-#ifdef SUPPORT_SYSRQ
if (uart_handle_sysrq_char(sport, (unsigned char)rx))
continue;
-#endif
tty_insert_flip_char(port, rx, flg);
}
}
@@ -863,6 +856,7 @@ static int linflex_probe(struct platform_device *pdev)
sport->irq = platform_get_irq(pdev, 0);
sport->ops = &linflex_pops;
sport->flags = UPF_BOOT_AUTOCONF;
+ sport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE);
linflex_ports[sport->line] = sport;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 4e128d19e0ad..91e2805e6441 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -5,10 +5,6 @@
* Copyright 2012-2014 Freescale Semiconductor, Inc.
*/
-#if defined(CONFIG_SERIAL_FSL_LPUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/dma-mapping.h>
@@ -864,9 +860,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
if (sr & UARTSR1_OR)
flg = TTY_OVERRUN;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
tty_insert_flip_char(port, rx, flg);
@@ -946,9 +940,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
if (sr & UARTSTAT_OR)
flg = TTY_OVERRUN;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
tty_insert_flip_char(port, rx, flg);
@@ -2376,7 +2368,9 @@ static int __init lpuart32_early_console_setup(struct earlycon_device *device,
if (!device->port.membase)
return -ENODEV;
- device->port.iotype = UPIO_MEM32BE;
+ if (device->port.iotype != UPIO_MEM32)
+ device->port.iotype = UPIO_MEM32BE;
+
device->con->write = lpuart32_early_write;
return 0;
}
@@ -2396,9 +2390,6 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
-OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
-EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
-EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
#define LPUART_CONSOLE (&lpuart_console)
#define LPUART32_CONSOLE (&lpuart32_console)
@@ -2461,6 +2452,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.ops = &lpuart32_pops;
else
sport->port.ops = &lpuart_pops;
+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LPUART_CONSOLE);
sport->port.flags = UPF_BOOT_AUTOCONF;
if (lpuart_is_32(sport))
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a9e20e6c63ad..0c6c63166250 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -8,10 +8,6 @@
* Copyright (C) 2004 Pengutronix
*/
-#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -700,22 +696,33 @@ static void imx_uart_start_tx(struct uart_port *port)
}
}
-static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
+static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
u32 usr1;
- spin_lock(&sport->port.lock);
-
imx_uart_writel(sport, USR1_RTSD, USR1);
usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
uart_handle_cts_change(&sport->port, !!usr1);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
- spin_unlock(&sport->port.lock);
return IRQ_HANDLED;
}
+static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
+{
+ struct imx_port *sport = dev_id;
+ irqreturn_t ret;
+
+ spin_lock(&sport->port.lock);
+
+ ret = __imx_uart_rtsint(irq, dev_id);
+
+ spin_unlock(&sport->port.lock);
+
+ return ret;
+}
+
static irqreturn_t imx_uart_txint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
@@ -726,14 +733,12 @@ static irqreturn_t imx_uart_txint(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
+static irqreturn_t __imx_uart_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int rx, flg, ignored = 0;
struct tty_port *port = &sport->port.state->port;
- spin_lock(&sport->port.lock);
-
while (imx_uart_readl(sport, USR2) & USR2_RDR) {
u32 usr2;
@@ -779,9 +784,7 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
if (rx & URXD_OVRRUN)
flg = TTY_OVERRUN;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
@@ -792,11 +795,25 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
}
out:
- spin_unlock(&sport->port.lock);
tty_flip_buffer_push(port);
+
return IRQ_HANDLED;
}
+static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
+{
+ struct imx_port *sport = dev_id;
+ irqreturn_t ret;
+
+ spin_lock(&sport->port.lock);
+
+ ret = __imx_uart_rxint(irq, dev_id);
+
+ spin_unlock(&sport->port.lock);
+
+ return ret;
+}
+
static void imx_uart_clear_rx_errors(struct imx_port *sport);
/*
@@ -855,6 +872,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
irqreturn_t ret = IRQ_NONE;
+ spin_lock(&sport->port.lock);
+
usr1 = imx_uart_readl(sport, USR1);
usr2 = imx_uart_readl(sport, USR2);
ucr1 = imx_uart_readl(sport, UCR1);
@@ -888,27 +907,25 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
usr2 &= ~USR2_ORE;
if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
- imx_uart_rxint(irq, dev_id);
+ __imx_uart_rxint(irq, dev_id);
ret = IRQ_HANDLED;
}
if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) {
- imx_uart_txint(irq, dev_id);
+ imx_uart_transmit_buffer(sport);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_DTRD) {
imx_uart_writel(sport, USR1_DTRD, USR1);
- spin_lock(&sport->port.lock);
imx_uart_mctrl_check(sport);
- spin_unlock(&sport->port.lock);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_RTSD) {
- imx_uart_rtsint(irq, dev_id);
+ __imx_uart_rtsint(irq, dev_id);
ret = IRQ_HANDLED;
}
@@ -923,6 +940,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
+ spin_unlock(&sport->port.lock);
+
return ret;
}
@@ -2231,6 +2250,7 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->port.iotype = UPIO_MEM;
sport->port.irq = rxirq;
sport->port.fifosize = 32;
+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
sport->port.ops = &imx_uart_pops;
sport->port.rs485_config = imx_uart_rs485_config;
sport->port.flags = UPF_BOOT_AUTOCONF;
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 8c810733df3d..86fff69d7e7c 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -38,10 +38,6 @@
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
-#if defined(CONFIG_SERIAL_IP22_ZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include "ip22zilog.h"
@@ -1080,6 +1076,7 @@ static struct uart_driver ip22zilog_reg = {
static void __init ip22zilog_prepare(void)
{
+ unsigned char sysrq_on = IS_ENABLED(CONFIG_SERIAL_IP22_ZILOG_CONSOLE);
struct uart_ip22zilog_port *up;
struct zilog_layout *rp;
int channel, chip;
@@ -1115,6 +1112,7 @@ static void __init ip22zilog_prepare(void)
up[(chip * 2) + 0].port.irq = zilog_irq;
up[(chip * 2) + 0].port.uartclk = ZS_CLOCK;
up[(chip * 2) + 0].port.fifosize = 1;
+ up[(chip * 2) + 0].port.has_sysrq = sysrq_on;
up[(chip * 2) + 0].port.ops = &ip22zilog_pops;
up[(chip * 2) + 0].port.type = PORT_IP22ZILOG;
up[(chip * 2) + 0].port.flags = 0;
@@ -1126,6 +1124,7 @@ static void __init ip22zilog_prepare(void)
up[(chip * 2) + 1].port.irq = zilog_irq;
up[(chip * 2) + 1].port.uartclk = ZS_CLOCK;
up[(chip * 2) + 1].port.fifosize = 1;
+ up[(chip * 2) + 1].port.has_sysrq = sysrq_on;
up[(chip * 2) + 1].port.ops = &ip22zilog_pops;
up[(chip * 2) + 1].port.type = PORT_IP22ZILOG;
up[(chip * 2) + 1].port.line = (chip * 2) + 1;
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 4029272891f9..5022447afa23 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -118,7 +118,7 @@ static int kgdb_nmi_poll_one_knock(void)
int c = -1;
const char *magic = kgdb_nmi_magic;
size_t m = strlen(magic);
- bool printch = 0;
+ bool printch = false;
c = dbg_io_ops->read_char();
if (c == NO_POLL_CHAR)
@@ -130,7 +130,7 @@ static int kgdb_nmi_poll_one_knock(void)
n = (n + 1) % m;
if (!n)
return 1;
- printch = 1;
+ printch = true;
} else {
n = 0;
}
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index fcbea43dc334..f67226df30d4 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -549,7 +549,7 @@ lqasc_request_port(struct uart_port *port)
}
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(&pdev->dev,
+ port->membase = devm_ioremap(&pdev->dev,
port->mapbase, size);
if (port->membase == NULL)
return -ENOMEM;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index fbc5bc022a39..d2c08b760f83 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -5,15 +5,12 @@
* Copyright (C) 2014 Carlo Caione <carlo@caione.org>
*/
-#if defined(CONFIG_SERIAL_MESON_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -76,6 +73,8 @@
#define AML_UART_PORT_OFFSET 6
#define AML_UART_DEV_NAME "ttyAML"
+#define AML_UART_POLL_USEC 5
+#define AML_UART_TIMEOUT_USEC 10000
static struct uart_driver meson_uart_driver;
@@ -411,7 +410,7 @@ static int meson_uart_request_port(struct uart_port *port)
return -EBUSY;
}
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
port->mapsize);
if (!port->membase)
return -ENOMEM;
@@ -427,6 +426,64 @@ static void meson_uart_config_port(struct uart_port *port, int flags)
}
}
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context (i.e. kgdb).
+ */
+
+static int meson_uart_poll_get_char(struct uart_port *port)
+{
+ u32 c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
+ c = NO_POLL_CHAR;
+ else
+ c = readl(port->membase + AML_UART_RFIFO);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return c;
+}
+
+static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Wait until FIFO is empty or timeout */
+ ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
+ reg & AML_UART_TX_EMPTY,
+ AML_UART_POLL_USEC,
+ AML_UART_TIMEOUT_USEC);
+ if (ret == -ETIMEDOUT) {
+ dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
+ goto out;
+ }
+
+ /* Write the character */
+ writel(c, port->membase + AML_UART_WFIFO);
+
+ /* Wait until FIFO is empty or timeout */
+ ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
+ reg & AML_UART_TX_EMPTY,
+ AML_UART_POLL_USEC,
+ AML_UART_TIMEOUT_USEC);
+ if (ret == -ETIMEDOUT)
+ dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
static const struct uart_ops meson_uart_ops = {
.set_mctrl = meson_uart_set_mctrl,
.get_mctrl = meson_uart_get_mctrl,
@@ -442,6 +499,10 @@ static const struct uart_ops meson_uart_ops = {
.request_port = meson_uart_request_port,
.release_port = meson_uart_release_port,
.verify_port = meson_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = meson_uart_poll_get_char,
+ .poll_put_char = meson_uart_poll_put_char,
+#endif
};
#ifdef CONFIG_SERIAL_MESON_CONSOLE
@@ -703,6 +764,7 @@ static int meson_uart_probe(struct platform_device *pdev)
port->mapsize = resource_size(res_mem);
port->irq = res_irq->start;
port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
port->dev = &pdev->dev;
port->line = pdev->id;
port->type = PORT_MESON;
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index 949ab7efc4fc..8f2cab7f66ad 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -3,10 +3,6 @@
* Copyright (C) 2018 Socionext Inc.
*/
-#if defined(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/module.h>
@@ -537,6 +533,7 @@ static int mlb_usio_probe(struct platform_device *pdev)
port->irq = mlb_usio_irq[index][RX];
port->uartclk = clk_get_rate(clk);
port->fifosize = 128;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE);
port->iotype = UPIO_MEM32;
port->flags = UPF_BOOT_AUTOCONF | UPF_SPD_VHI;
port->line = index;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 3a75ee08d619..af1700445251 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -44,10 +44,6 @@
#include <asm/mpc52xx.h>
#include <asm/mpc52xx_psc.h>
-#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
@@ -1382,12 +1378,8 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
ch = psc_ops->read_char(port);
/* Handle sysreq char */
-#ifdef SUPPORT_SYSRQ
- if (uart_handle_sysrq_char(port, ch)) {
- port->sysrq = 0;
+ if (uart_handle_sysrq_char(port, ch))
continue;
- }
-#endif
/* Store it */
@@ -1770,6 +1762,7 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
spin_lock_init(&port->lock);
port->uartclk = uartclk;
port->fifosize = 512;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MPC52xx_CONSOLE);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF |
(uart_console(port) ? 0 : UPF_IOREMAP);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index f6c45a796433..60a9c53fa7cb 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -7,10 +7,6 @@
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*/
-#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/dma-mapping.h>
@@ -610,7 +606,7 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
UARTDM_RX_SIZE, dma->dir);
ret = dma_mapping_error(uart->dev, dma->phys);
if (ret)
- return;
+ goto sw_mode;
dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
@@ -661,6 +657,22 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
return;
unmap:
dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
+
+sw_mode:
+ /*
+ * Switch from DMA to SW/FIFO mode. After clearing Rx BAM (UARTDM_DMEN),
+ * receiver must be reset.
+ */
+ msm_write(uart, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(uart, UART_CR_RX_ENABLE, UART_CR);
+
+ msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+ /* Re-enable RX interrupts */
+ msm_port->imr |= (UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_write(uart, msm_port->imr, UART_IMR);
}
static void msm_stop_rx(struct uart_port *port)
@@ -1810,6 +1822,7 @@ static int msm_serial_probe(struct platform_device *pdev)
if (unlikely(irq < 0))
return -ENXIO;
port->irq = irq;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MSM_CONSOLE);
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 00ce31e8d19a..47ab280f553b 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -25,11 +25,7 @@
#include <asm/irq.h>
#include <asm/parisc-device.h>
-#if defined(CONFIG_SERIAL_MUX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#include <linux/sysrq.h>
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#define MUX_OFFSET 0x800
@@ -474,7 +470,7 @@ static int __init mux_probe(struct parisc_device *dev)
port->iobase = 0;
port->mapbase = dev->hpa.start + MUX_OFFSET +
(i * MUX_LINE_OFFSET);
- port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET);
+ port->membase = ioremap(port->mapbase, MUX_LINE_OFFSET);
port->iotype = UPIO_MEM;
port->type = PORT_MUX;
port->irq = 0;
@@ -483,6 +479,7 @@ static int __init mux_probe(struct parisc_device *dev)
port->ops = &mux_pops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = port_cnt;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE);
/* The port->timeout needs to match what is present in
* uart_wait_until_sent in serial_core.c. Otherwise
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index e34525970682..b4f835e7de23 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -12,10 +12,6 @@
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
-#if defined(CONFIG_SERIAL_MXS_AUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -1693,6 +1689,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.fifosize = MXS_AUART_FIFO_SIZE;
s->port.uartclk = clk_get_rate(s->clk);
s->port.type = PORT_IMX;
+ s->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_MXS_AUART_CONSOLE);
mxs_init_regs(s);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 6420ae581a80..48017cec7f2f 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -16,10 +16,6 @@
* this driver as required for the omap-platform.
*/
-#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -493,10 +489,13 @@ static unsigned int check_modem_status(struct uart_omap_port *up)
static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
{
unsigned int flag;
- unsigned char ch = 0;
+ /*
+ * Read one data character out to avoid stalling the receiver according
+ * to the table 23-246 of the omap4 TRM.
+ */
if (likely(lsr & UART_LSR_DR))
- ch = serial_in(up, UART_RX);
+ serial_in(up, UART_RX);
up->port.icount.rx++;
flag = TTY_NORMAL;
@@ -1680,6 +1679,7 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
+ up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_OMAP_CONSOLE);
if (pdev->dev.of_node)
ret = of_alias_get_id(pdev->dev.of_node, "serial");
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index d2d8b3494685..42c8cc93b603 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -427,7 +427,7 @@ static int owl_uart_request_port(struct uart_port *port)
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res));
if (!port->membase)
return -EBUSY;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index c16234bca78f..0a96217dba67 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -2,9 +2,6 @@
/*
*Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
-#if defined(CONFIG_SERIAL_PCH_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
#include <linux/kernel.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
@@ -587,12 +584,8 @@ static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
if (uart_handle_break(port))
continue;
}
-#ifdef SUPPORT_SYSRQ
- if (port->sysrq) {
- if (uart_handle_sysrq_char(port, rbr))
- continue;
- }
-#endif
+ if (uart_handle_sysrq_char(port, rbr))
+ continue;
buf[i++] = rbr;
}
@@ -1796,6 +1789,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
priv->port.flags = UPF_BOOT_AUTOCONF;
priv->port.fifosize = fifosize;
priv->port.line = board->line_no;
+ priv->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PCH_UART_CONSOLE);
priv->trigger = PCH_UART_HAL_TRIGGER_M;
snprintf(priv->irq_name, IRQ_NAME_SIZE,
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 0bdf1687983f..484b7e8d5381 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -618,7 +618,7 @@ static int pic32_uart_request_port(struct uart_port *port)
"pic32_uart_mem"))
return -EBUSY;
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res_mem));
if (!port->membase) {
dev_err(port->dev, "Unable to map registers\n");
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index bcb5bf70534e..ba65a3bbd72a 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -61,10 +61,6 @@
#define of_machine_is_compatible(x) (0)
#endif
-#if defined (CONFIG_SERIAL_PMACZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial.h>
#include <linux/serial_core.h>
@@ -1721,6 +1717,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
uap->control_reg = uap->port.membase;
uap->data_reg = uap->control_reg + 4;
uap->port_type = 0;
+ uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PMACZILOG_CONSOLE);
pmz_convert_to_zs(uap, CS8, 0, 9600);
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 223a9499104e..972d94e8d32b 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -10,10 +10,6 @@
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*/
-#if defined(CONFIG_SERIAL_PNX8XXX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -220,9 +216,7 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
flg = TTY_FRAME;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
if (uart_handle_sysrq_char(&sport->port, ch))
@@ -800,6 +794,7 @@ static int pnx8xxx_serial_probe(struct platform_device *pdev)
if (pnx8xxx_ports[i].port.mapbase != res->start)
continue;
+ pnx8xxx_ports[i].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PNX8XXX_CONSOLE);
pnx8xxx_ports[i].port.dev = &pdev->dev;
uart_add_one_port(&pnx8xxx_reg, &pnx8xxx_ports[i].port);
platform_set_drvdata(pdev, &pnx8xxx_ports[i]);
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 4932b674f7ef..41319ef96fa6 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -19,10 +19,6 @@
*/
-#if defined(CONFIG_SERIAL_PXA_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -879,6 +875,7 @@ static int serial_pxa_probe(struct platform_device *dev)
sport->port.dev = &dev->dev;
sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
sport->port.uartclk = clk_get_rate(sport->clk);
+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PXA_CONSOLE);
ret = serial_pxa_probe_dt(dev, sport);
if (ret > 0)
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index ff63728a95f4..191abb18fc2a 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
-#if defined(CONFIG_SERIAL_QCOM_GENI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
@@ -14,6 +10,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/qcom-geni-se.h>
#include <linux/serial.h>
@@ -92,7 +89,11 @@
#define DEF_TX_WM 2
#define DEF_FIFO_WIDTH_BITS 32
#define UART_RX_WM 2
-#define MAX_LOOPBACK_CFG 3
+
+/* SE_UART_LOOPBACK_CFG */
+#define RX_TX_SORTED BIT(0)
+#define CTS_RTS_SORTED BIT(1)
+#define RX_TX_CTS_RTS_SORTED (RX_TX_SORTED | CTS_RTS_SORTED)
#ifdef CONFIG_CONSOLE_POLL
#define CONSOLE_RX_BYTES_PW 1
@@ -103,7 +104,7 @@
struct qcom_geni_serial_port {
struct uart_port uport;
struct geni_se se;
- char name[20];
+ const char *name;
u32 tx_fifo_depth;
u32 tx_fifo_width;
u32 rx_fifo_depth;
@@ -164,30 +165,6 @@ static struct qcom_geni_serial_port qcom_geni_uart_ports[GENI_UART_PORTS] = {
},
};
-static ssize_t loopback_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
-
- return snprintf(buf, sizeof(u32), "%d\n", port->loopback);
-}
-
-static ssize_t loopback_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t size)
-{
- struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
- u32 loopback;
-
- if (kstrtoint(buf, 0, &loopback) || loopback > MAX_LOOPBACK_CFG) {
- dev_err(dev, "Invalid input\n");
- return -EINVAL;
- }
- port->loopback = loopback;
- return size;
-}
-static DEVICE_ATTR_RW(loopback);
-
static struct qcom_geni_serial_port qcom_geni_console_port = {
.uport = {
.iotype = UPIO_MEM,
@@ -237,10 +214,14 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
unsigned int mctrl)
{
u32 uart_manual_rfr = 0;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
if (uart_console(uport))
return;
+ if (mctrl & TIOCM_LOOP)
+ port->loopback = RX_TX_CTS_RTS_SORTED;
+
if (!(mctrl & TIOCM_RTS))
uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
@@ -757,15 +738,6 @@ out_write_wakeup:
uart_write_wakeup(uport);
}
-static irqreturn_t qcom_geni_serial_wakeup_isr(int isr, void *dev)
-{
- struct uart_port *uport = dev;
-
- pm_wakeup_event(uport->dev, 2000);
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
{
u32 m_irq_en;
@@ -1302,50 +1274,57 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
- scnprintf(port->name, sizeof(port->name), "qcom_geni_serial_%s%d",
- (uart_console(uport) ? "console" : "uart"), uport->line);
+ port->name = devm_kasprintf(uport->dev, GFP_KERNEL,
+ "qcom_geni_serial_%s%d",
+ uart_console(uport) ? "console" : "uart", uport->line);
+ if (!port->name)
+ return -ENOMEM;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
uport->irq = irq;
+ uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_QCOM_GENI_CONSOLE);
+
+ if (!console)
+ port->wakeup_irq = platform_get_irq_optional(pdev, 1);
+
+ uport->private_data = drv;
+ platform_set_drvdata(pdev, port);
+ port->handle_rx = console ? handle_rx_console : handle_rx_uart;
+
+ ret = uart_add_one_port(drv, uport);
+ if (ret)
+ return ret;
irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
IRQF_TRIGGER_HIGH, port->name, uport);
if (ret) {
dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+ uart_remove_one_port(drv, uport);
return ret;
}
- if (!console) {
- port->wakeup_irq = platform_get_irq(pdev, 1);
- if (port->wakeup_irq < 0) {
- dev_err(&pdev->dev, "Failed to get wakeup IRQ %d\n",
- port->wakeup_irq);
- } else {
- irq_set_status_flags(port->wakeup_irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(uport->dev, port->wakeup_irq,
- qcom_geni_serial_wakeup_isr,
- IRQF_TRIGGER_FALLING, "uart_wakeup", uport);
- if (ret) {
- dev_err(uport->dev, "Failed to register wakeup IRQ ret %d\n",
- ret);
- return ret;
- }
-
- device_init_wakeup(&pdev->dev, true);
- ret = dev_pm_set_wake_irq(&pdev->dev, port->wakeup_irq);
- if (unlikely(ret))
- dev_err(uport->dev, "%s:Failed to set IRQ wake:%d\n",
- __func__, ret);
+ /*
+ * Set pm_runtime status as ACTIVE so that wakeup_irq gets
+ * enabled/disabled from dev_pm_arm_wake_irq during system
+ * suspend/resume respectively.
+ */
+ pm_runtime_set_active(&pdev->dev);
+
+ if (port->wakeup_irq > 0) {
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+ port->wakeup_irq);
+ if (ret) {
+ device_init_wakeup(&pdev->dev, false);
+ uart_remove_one_port(drv, uport);
+ return ret;
}
}
- uport->private_data = drv;
- platform_set_drvdata(pdev, port);
- port->handle_rx = console ? handle_rx_console : handle_rx_uart;
- if (!console)
- device_create_file(uport->dev, &dev_attr_loopback);
- return uart_add_one_port(drv, uport);
+
+ return 0;
}
static int qcom_geni_serial_remove(struct platform_device *pdev)
@@ -1353,7 +1332,10 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
struct uart_driver *drv = port->uport.private_data;
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, &port->uport);
+
return 0;
}
@@ -1362,12 +1344,7 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- uart_suspend_port(uport->private_data, uport);
-
- if (port->wakeup_irq > 0)
- enable_irq(port->wakeup_irq);
-
- return 0;
+ return uart_suspend_port(uport->private_data, uport);
}
static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
@@ -1375,9 +1352,6 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- if (port->wakeup_irq > 0)
- disable_irq(port->wakeup_irq);
-
return uart_resume_port(uport->private_data, uport);
}
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index ff9a27d48bca..b5ef86ae2746 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -498,7 +498,7 @@ static int rda_uart_request_port(struct uart_port *port)
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res));
if (!port->membase)
return -EBUSY;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 8e618129e65c..75c2a22895f9 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -7,10 +7,6 @@
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*/
-#if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -214,9 +210,7 @@ sa1100_rx_chars(struct sa1100_port *sport)
else if (status & UTSR1_TO_SM(UTSR1_FRE))
flg = TTY_FRAME;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
if (uart_handle_sysrq_char(&sport->port, ch))
@@ -860,6 +854,7 @@ static int sa1100_serial_resume(struct platform_device *dev)
static int sa1100_serial_add_one_port(struct sa1100_port *sport, struct platform_device *dev)
{
sport->port.dev = &dev->dev;
+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SA1100_CONSOLE);
// mctrl_gpio_init() requires that the GPIO driver supports interrupts,
// but we need to support GPIO drivers for hardware that has no such
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
deleted file mode 100644
index f93022113f59..000000000000
--- a/drivers/tty/serial/samsung.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef __SAMSUNG_H
-#define __SAMSUNG_H
-
-/*
- * Driver for Samsung SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
-*/
-
-#include <linux/dmaengine.h>
-
-struct s3c24xx_uart_info {
- char *name;
- unsigned int type;
- unsigned int fifosize;
- unsigned long rx_fifomask;
- unsigned long rx_fifoshift;
- unsigned long rx_fifofull;
- unsigned long tx_fifomask;
- unsigned long tx_fifoshift;
- unsigned long tx_fifofull;
- unsigned int def_clk_sel;
- unsigned long num_clks;
- unsigned long clksel_mask;
- unsigned long clksel_shift;
-
- /* uart port features */
-
- unsigned int has_divslot:1;
-
- /* uart controls */
- int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
-};
-
-struct s3c24xx_serial_drv_data {
- struct s3c24xx_uart_info *info;
- struct s3c2410_uartcfg *def_cfg;
- unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
-};
-
-struct s3c24xx_uart_dma {
- unsigned int rx_chan_id;
- unsigned int tx_chan_id;
-
- struct dma_slave_config rx_conf;
- struct dma_slave_config tx_conf;
-
- struct dma_chan *rx_chan;
- struct dma_chan *tx_chan;
-
- dma_addr_t rx_addr;
- dma_addr_t tx_addr;
-
- dma_cookie_t rx_cookie;
- dma_cookie_t tx_cookie;
-
- char *rx_buf;
-
- dma_addr_t tx_transfer_addr;
-
- size_t rx_size;
- size_t tx_size;
-
- struct dma_async_tx_descriptor *tx_desc;
- struct dma_async_tx_descriptor *rx_desc;
-
- int tx_bytes_requested;
- int rx_bytes_requested;
-};
-
-struct s3c24xx_uart_port {
- unsigned char rx_claimed;
- unsigned char tx_claimed;
- unsigned int pm_level;
- unsigned long baudclk_rate;
- unsigned int min_dma_size;
-
- unsigned int rx_irq;
- unsigned int tx_irq;
-
- unsigned int tx_in_progress;
- unsigned int tx_mode;
- unsigned int rx_mode;
-
- struct s3c24xx_uart_info *info;
- struct clk *clk;
- struct clk *baudclk;
- struct uart_port port;
- struct s3c24xx_serial_drv_data *drv_data;
-
- /* reference to platform data */
- struct s3c2410_uartcfg *cfg;
-
- struct s3c24xx_uart_dma *dma;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
- struct notifier_block freq_transition;
-#endif
-};
-
-/* conversion functions */
-
-#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
-
-/* register access controls */
-
-#define portaddr(port, reg) ((port)->membase + (reg))
-#define portaddrl(port, reg) \
- ((unsigned long *)(unsigned long)((port)->membase + (reg)))
-
-#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
-#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
-
-#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
-#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
-
-/* Byte-order aware bit setting/clearing functions. */
-
-static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
- unsigned int reg)
-{
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- val = rd_regl(port, reg);
- val |= (1 << idx);
- wr_regl(port, reg, val);
- local_irq_restore(flags);
-}
-
-static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
- unsigned int reg)
-{
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- val = rd_regl(port, reg);
- val &= ~(1 << idx);
- wr_regl(port, reg, val);
- local_irq_restore(flags);
-}
-
-#endif
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 83fd51607741..73f951d65b93 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -4,7 +4,7 @@
*
* Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
-*/
+ */
/* Hote on 2410 error handling
*
@@ -19,11 +19,7 @@
* and change the policy on BREAK
*
* BJD, 04-Nov-2004
-*/
-
-#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
+ */
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -44,33 +40,8 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
-
#include <asm/irq.h>
-#include "samsung.h"
-
-#if defined(CONFIG_SERIAL_SAMSUNG_DEBUG) && \
- !defined(MODULE)
-
-extern void printascii(const char *);
-
-__printf(1, 2)
-static void dbg(const char *fmt, ...)
-{
- va_list va;
- char buff[256];
-
- va_start(va, fmt);
- vscnprintf(buff, sizeof(buff), fmt, va);
- va_end(va);
-
- printascii(buff);
-}
-
-#else
-#define dbg(fmt, ...) do { if (0) no_printk(fmt, ##__VA_ARGS__); } while (0)
-#endif
-
/* UART name and device definitions */
#define S3C24XX_SERIAL_NAME "ttySAC"
@@ -81,14 +52,142 @@ static void dbg(const char *fmt, ...)
#define S3C24XX_TX_DMA 2
#define S3C24XX_RX_PIO 1
#define S3C24XX_RX_DMA 2
-/* macros to change one thing to another */
-
-#define tx_enabled(port) ((port)->unused[0])
-#define rx_enabled(port) ((port)->unused[1])
/* flag to ignore all characters coming in */
#define RXSTAT_DUMMY_READ (0x10000000)
+struct s3c24xx_uart_info {
+ char *name;
+ unsigned int type;
+ unsigned int fifosize;
+ unsigned long rx_fifomask;
+ unsigned long rx_fifoshift;
+ unsigned long rx_fifofull;
+ unsigned long tx_fifomask;
+ unsigned long tx_fifoshift;
+ unsigned long tx_fifofull;
+ unsigned int def_clk_sel;
+ unsigned long num_clks;
+ unsigned long clksel_mask;
+ unsigned long clksel_shift;
+
+ /* uart port features */
+
+ unsigned int has_divslot:1;
+};
+
+struct s3c24xx_serial_drv_data {
+ struct s3c24xx_uart_info *info;
+ struct s3c2410_uartcfg *def_cfg;
+ unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+};
+
+struct s3c24xx_uart_dma {
+ unsigned int rx_chan_id;
+ unsigned int tx_chan_id;
+
+ struct dma_slave_config rx_conf;
+ struct dma_slave_config tx_conf;
+
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+
+ dma_addr_t rx_addr;
+ dma_addr_t tx_addr;
+
+ dma_cookie_t rx_cookie;
+ dma_cookie_t tx_cookie;
+
+ char *rx_buf;
+
+ dma_addr_t tx_transfer_addr;
+
+ size_t rx_size;
+ size_t tx_size;
+
+ struct dma_async_tx_descriptor *tx_desc;
+ struct dma_async_tx_descriptor *rx_desc;
+
+ int tx_bytes_requested;
+ int rx_bytes_requested;
+};
+
+struct s3c24xx_uart_port {
+ unsigned char rx_claimed;
+ unsigned char tx_claimed;
+ unsigned char rx_enabled;
+ unsigned char tx_enabled;
+ unsigned int pm_level;
+ unsigned long baudclk_rate;
+ unsigned int min_dma_size;
+
+ unsigned int rx_irq;
+ unsigned int tx_irq;
+
+ unsigned int tx_in_progress;
+ unsigned int tx_mode;
+ unsigned int rx_mode;
+
+ struct s3c24xx_uart_info *info;
+ struct clk *clk;
+ struct clk *baudclk;
+ struct uart_port port;
+ struct s3c24xx_serial_drv_data *drv_data;
+
+ /* reference to platform data */
+ struct s3c2410_uartcfg *cfg;
+
+ struct s3c24xx_uart_dma *dma;
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+/* conversion functions */
+
+#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
+
+/* register access controls */
+
+#define portaddr(port, reg) ((port)->membase + (reg))
+#define portaddrl(port, reg) \
+ ((unsigned long *)(unsigned long)((port)->membase + (reg)))
+
+#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
+#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
+
+#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
+
+/* Byte-order aware bit setting/clearing functions. */
+
+static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
+ unsigned int reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = rd_regl(port, reg);
+ val |= (1 << idx);
+ wr_regl(port, reg, val);
+ local_irq_restore(flags);
+}
+
+static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
+ unsigned int reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = rd_regl(port, reg);
+ val &= ~(1 << idx);
+ wr_regl(port, reg, val);
+ local_irq_restore(flags);
+}
+
static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port)
{
return container_of(port, struct s3c24xx_uart_port, port);
@@ -118,6 +217,7 @@ static int s3c24xx_serial_has_interrupt_mask(struct uart_port *port)
static void s3c24xx_serial_rx_enable(struct uart_port *port)
{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
unsigned int ucon, ufcon;
int count = 10000;
@@ -135,12 +235,13 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
ucon |= S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
- rx_enabled(port) = 1;
+ ourport->rx_enabled = 1;
spin_unlock_irqrestore(&port->lock, flags);
}
static void s3c24xx_serial_rx_disable(struct uart_port *port)
{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
unsigned int ucon;
@@ -150,7 +251,7 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
ucon &= ~S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
- rx_enabled(port) = 0;
+ ourport->rx_enabled = 0;
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -162,7 +263,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
struct dma_tx_state state;
int count;
- if (!tx_enabled(port))
+ if (!ourport->tx_enabled)
return;
if (s3c24xx_serial_has_interrupt_mask(port))
@@ -182,7 +283,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
port->icount.tx += count;
}
- tx_enabled(port) = 0;
+ ourport->tx_enabled = 0;
ourport->tx_in_progress = 0;
if (port->flags & UPF_CONS_FLOW)
@@ -340,11 +441,11 @@ static void s3c24xx_serial_start_tx(struct uart_port *port)
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct circ_buf *xmit = &port->state->xmit;
- if (!tx_enabled(port)) {
+ if (!ourport->tx_enabled) {
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_disable(port);
- tx_enabled(port) = 1;
+ ourport->tx_enabled = 1;
if (!ourport->dma || !ourport->dma->tx_chan)
s3c24xx_serial_start_tx_pio(ourport);
}
@@ -389,14 +490,14 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
enum dma_status dma_status;
unsigned int received;
- if (rx_enabled(port)) {
- dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
+ if (ourport->rx_enabled) {
+ dev_dbg(port->dev, "stopping rx\n");
if (s3c24xx_serial_has_interrupt_mask(port))
s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
S3C64XX_UINTM);
else
disable_irq_nosync(ourport->rx_irq);
- rx_enabled(port) = 0;
+ ourport->rx_enabled = 0;
}
if (dma && dma->rx_chan) {
dmaengine_pause(dma->tx_chan);
@@ -546,7 +647,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport);
static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
{
- unsigned int utrstat, ufstat, received;
+ unsigned int utrstat, received;
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
struct s3c24xx_uart_dma *dma = ourport->dma;
@@ -556,7 +657,7 @@ static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
struct dma_tx_state state;
utrstat = rd_regl(port, S3C2410_UTRSTAT);
- ufstat = rd_regl(port, S3C2410_UFSTAT);
+ rd_regl(port, S3C2410_UFSTAT);
spin_lock_irqsave(&port->lock, flags);
@@ -618,9 +719,9 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
if (port->flags & UPF_CONS_FLOW) {
int txe = s3c24xx_serial_txempty_nofifo(port);
- if (rx_enabled(port)) {
+ if (ourport->rx_enabled) {
if (!txe) {
- rx_enabled(port) = 0;
+ ourport->rx_enabled = 0;
continue;
}
} else {
@@ -628,7 +729,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX;
wr_regl(port, S3C2410_UFCON, ufcon);
- rx_enabled(port) = 1;
+ ourport->rx_enabled = 1;
return;
}
continue;
@@ -641,12 +742,13 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
port->icount.rx++;
if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
- dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n",
- ch, uerstat);
+ dev_dbg(port->dev,
+ "rxerr: port ch=0x%02x, rxs=0x%08x\n",
+ ch, uerstat);
/* check for break */
if (uerstat & S3C2410_UERSTAT_BREAK) {
- dbg("break!\n");
+ dev_dbg(port->dev, "break!\n");
port->icount.brk++;
if (uart_handle_break(port))
continue; /* Ignore character */
@@ -732,7 +834,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
/* if there isn't anything more to transmit, or the uart is now
* stopped, disable the uart and exit
- */
+ */
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
s3c24xx_serial_stop_tx(port);
@@ -978,7 +1080,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
if (ourport->tx_claimed) {
if (!s3c24xx_serial_has_interrupt_mask(port))
free_irq(ourport->tx_irq, ourport);
- tx_enabled(port) = 0;
+ ourport->tx_enabled = 0;
ourport->tx_claimed = 0;
ourport->tx_mode = 0;
}
@@ -987,7 +1089,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
if (!s3c24xx_serial_has_interrupt_mask(port))
free_irq(ourport->rx_irq, ourport);
ourport->rx_claimed = 0;
- rx_enabled(port) = 0;
+ ourport->rx_enabled = 0;
}
/* Clear pending interrupts and mask all interrupts */
@@ -1009,10 +1111,7 @@ static int s3c24xx_serial_startup(struct uart_port *port)
struct s3c24xx_uart_port *ourport = to_ourport(port);
int ret;
- dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
- port, (unsigned long long)port->mapbase, port->membase);
-
- rx_enabled(port) = 1;
+ ourport->rx_enabled = 1;
ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_chars, 0,
s3c24xx_serial_portname(port), ourport);
@@ -1024,9 +1123,9 @@ static int s3c24xx_serial_startup(struct uart_port *port)
ourport->rx_claimed = 1;
- dbg("requesting tx irq...\n");
+ dev_dbg(port->dev, "requesting tx irq...\n");
- tx_enabled(port) = 1;
+ ourport->tx_enabled = 1;
ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_chars, 0,
s3c24xx_serial_portname(port), ourport);
@@ -1038,10 +1137,9 @@ static int s3c24xx_serial_startup(struct uart_port *port)
ourport->tx_claimed = 1;
- dbg("s3c24xx_serial_startup ok\n");
-
/* the port reset code should have done the correct
- * register setup for the port controls */
+ * register setup for the port controls
+ */
return ret;
@@ -1057,9 +1155,6 @@ static int s3c64xx_serial_startup(struct uart_port *port)
unsigned int ufcon;
int ret;
- dbg("s3c64xx_serial_startup: port=%p (%08llx,%p)\n",
- port, (unsigned long long)port->mapbase, port->membase);
-
wr_regl(port, S3C64XX_UINTM, 0xf);
if (ourport->dma) {
ret = s3c24xx_serial_request_dma(ourport);
@@ -1077,9 +1172,9 @@ static int s3c64xx_serial_startup(struct uart_port *port)
}
/* For compatibility with s3c24xx Soc's */
- rx_enabled(port) = 1;
+ ourport->rx_enabled = 1;
ourport->rx_claimed = 1;
- tx_enabled(port) = 0;
+ ourport->tx_enabled = 0;
ourport->tx_claimed = 1;
spin_lock_irqsave(&port->lock, flags);
@@ -1097,7 +1192,6 @@ static int s3c64xx_serial_startup(struct uart_port *port)
/* Enable Rx Interrupt */
s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
- dbg("s3c64xx_serial_startup ok\n");
return ret;
}
@@ -1145,7 +1239,7 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
* baud clocks (and the resultant actual baud rates) and then tries to
* pick the closest one and select that.
*
-*/
+ */
#define MAX_CLK_NAME_LENGTH 15
@@ -1315,29 +1409,30 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if (cfg->has_fracval) {
udivslot = (div & 15);
- dbg("fracval = %04x\n", udivslot);
+ dev_dbg(port->dev, "fracval = %04x\n", udivslot);
} else {
udivslot = udivslot_table[div & 15];
- dbg("udivslot = %04x (div %d)\n", udivslot, div & 15);
+ dev_dbg(port->dev, "udivslot = %04x (div %d)\n",
+ udivslot, div & 15);
}
}
switch (termios->c_cflag & CSIZE) {
case CS5:
- dbg("config: 5bits/char\n");
+ dev_dbg(port->dev, "config: 5bits/char\n");
ulcon = S3C2410_LCON_CS5;
break;
case CS6:
- dbg("config: 6bits/char\n");
+ dev_dbg(port->dev, "config: 6bits/char\n");
ulcon = S3C2410_LCON_CS6;
break;
case CS7:
- dbg("config: 7bits/char\n");
+ dev_dbg(port->dev, "config: 7bits/char\n");
ulcon = S3C2410_LCON_CS7;
break;
case CS8:
default:
- dbg("config: 8bits/char\n");
+ dev_dbg(port->dev, "config: 8bits/char\n");
ulcon = S3C2410_LCON_CS8;
break;
}
@@ -1359,8 +1454,9 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
- dbg("setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
- ulcon, quot, udivslot);
+ dev_dbg(port->dev,
+ "setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
+ ulcon, quot, udivslot);
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
@@ -1381,10 +1477,11 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if (ourport->info->has_divslot)
wr_regl(port, S3C2443_DIVSLOT, udivslot);
- dbg("uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
- rd_regl(port, S3C2410_ULCON),
- rd_regl(port, S3C2410_UCON),
- rd_regl(port, S3C2410_UFCON));
+ dev_dbg(port->dev,
+ "uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
+ rd_regl(port, S3C2410_ULCON),
+ rd_regl(port, S3C2410_UCON),
+ rd_regl(port, S3C2410_UFCON));
/*
* Update the per-port timeout.
@@ -1442,6 +1539,7 @@ static void s3c24xx_serial_release_port(struct uart_port *port)
static int s3c24xx_serial_request_port(struct uart_port *port)
{
const char *name = s3c24xx_serial_portname(port);
+
return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
}
@@ -1583,7 +1681,7 @@ s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
/* s3c24xx_serial_resetport
*
* reset the fifos and other the settings.
-*/
+ */
static void s3c24xx_serial_resetport(struct uart_port *port,
struct s3c2410_uartcfg *cfg)
@@ -1637,7 +1735,8 @@ static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
if (val == CPUFREQ_PRECHANGE) {
/* we should really shut the port down whilst the
- * frequency change is in progress. */
+ * frequency change is in progress.
+ */
} else if (val == CPUFREQ_POSTCHANGE) {
struct ktermios *termios;
@@ -1743,8 +1842,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
struct resource *res;
int ret;
- dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
-
if (platdev == NULL)
return -ENODEV;
@@ -1761,7 +1858,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
port->uartclk = 1;
if (cfg->uart_flags & UPF_CONS_FLOW) {
- dbg("s3c24xx_serial_init_port: enabling flow control\n");
+ dev_dbg(port->dev, "enabling flow control\n");
port->flags |= UPF_CONS_FLOW;
}
@@ -1773,7 +1870,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
return -EINVAL;
}
- dbg("resource %pR)\n", res);
+ dev_dbg(port->dev, "resource %pR)\n", res);
port->membase = devm_ioremap(port->dev, res->start, resource_size(res));
if (!port->membase) {
@@ -1835,9 +1932,9 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
wr_regl(port, S3C64XX_UINTSP, 0xf);
}
- dbg("port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n",
- &port->mapbase, port->membase, port->irq,
- ourport->rx_irq, ourport->tx_irq, port->uartclk);
+ dev_dbg(port->dev, "port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n",
+ &port->mapbase, port->membase, port->irq,
+ ourport->rx_irq, ourport->tx_irq, port->uartclk);
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
@@ -1851,7 +1948,10 @@ err:
/* Device driver serial port probe */
+#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_uart_dt_match[];
+#endif
+
static int probe_index;
static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
@@ -1860,6 +1960,7 @@ static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
#ifdef CONFIG_OF
if (pdev->dev.of_node) {
const struct of_device_id *match;
+
match = of_match_node(s3c24xx_uart_dt_match, pdev->dev.of_node);
return (struct s3c24xx_serial_drv_data *)match->data;
}
@@ -1881,8 +1982,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
index = ret;
}
- dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
-
if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", index);
return -EINVAL;
@@ -1909,6 +2008,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->port.fifosize = ourport->drv_data->fifosize[index];
else if (ourport->info->fifosize)
ourport->port.fifosize = ourport->info->fifosize;
+ ourport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SAMSUNG_CONSOLE);
/*
* DMA transfers must be aligned at least to cache line size,
@@ -1917,7 +2017,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->min_dma_size = max_t(int, ourport->port.fifosize,
dma_get_cache_alignment());
- dbg("%s: initialising port %p...\n", __func__, ourport);
+ dev_dbg(&pdev->dev, "%s: initialising port %p...\n", __func__, ourport);
ret = s3c24xx_serial_init_port(ourport, pdev);
if (ret < 0)
@@ -1931,7 +2031,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
}
- dbg("%s: adding port\n", __func__);
+ dev_dbg(&pdev->dev, "%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
platform_set_drvdata(pdev, &ourport->port);
@@ -2008,9 +2108,10 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
/* restore IRQ mask */
if (s3c24xx_serial_has_interrupt_mask(port)) {
unsigned int uintm = 0xf;
- if (tx_enabled(port))
+
+ if (ourport->tx_enabled)
uintm &= ~S3C64XX_UINTM_TXD_MSK;
- if (rx_enabled(port))
+ if (ourport->rx_enabled)
uintm &= ~S3C64XX_UINTM_RXD_MSK;
clk_prepare_enable(ourport->clk);
if (!IS_ERR(ourport->baudclk))
@@ -2143,10 +2244,6 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
ucon = rd_regl(port, S3C2410_UCON);
ubrdiv = rd_regl(port, S3C2410_UBRDIV);
- dbg("s3c24xx_serial_get_options: port=%p\n"
- "registers: ulcon=%08x, ucon=%08x, ubdriv=%08x\n",
- port, ulcon, ucon, ubrdiv);
-
if (s3c24xx_port_configured(ucon)) {
switch (ulcon & S3C2410_LCON_CSMASK) {
case S3C2410_LCON_CS5:
@@ -2190,7 +2287,7 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
rate = 1;
*baud = rate / (16 * (ubrdiv + 1));
- dbg("calculated baud %d\n", *baud);
+ dev_dbg(port->dev, "calculated baud %d\n", *baud);
}
}
@@ -2204,9 +2301,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- dbg("s3c24xx_serial_console_setup: co=%p (%d), %s\n",
- co, co->index, options);
-
/* is this a valid port */
if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
@@ -2221,8 +2315,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
cons_uart = port;
- dbg("s3c24xx_serial_console_setup: port=%p (%d)\n", port, co->index);
-
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
@@ -2233,7 +2325,7 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
else
s3c24xx_serial_get_options(port, &baud, &parity, &bits);
- dbg("s3c24xx_serial_console_setup: baud %d\n", baud);
+ dev_dbg(port->dev, "baud %d\n", baud);
return uart_set_options(port, co, baud, parity, bits, flow);
}
@@ -2523,7 +2615,8 @@ static void samsung_early_putc(struct uart_port *port, int c)
writeb(c, port->membase + S3C2410_UTXH);
}
-static void samsung_early_write(struct console *con, const char *s, unsigned n)
+static void samsung_early_write(struct console *con, const char *s,
+ unsigned int n)
{
struct earlycon_device *dev = con->data;
@@ -2572,7 +2665,7 @@ OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
s3c2440_early_console_setup);
-/* S5PV210, EXYNOS */
+/* S5PV210, Exynos */
static struct samsung_early_console_data s5pv210_early_console_data = {
.txfull_mask = S5PV210_UFSTAT_TXFULL,
};
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 329aced26bd8..bd5e7e9938ce 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -15,10 +15,6 @@
* "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
*/
-#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -668,7 +664,7 @@ static int sbd_map_port(struct uart_port *uport)
struct sbd_duart *duart = sport->duart;
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
DUART_CHANREG_SPACING);
if (!uport->membase) {
printk(err);
@@ -676,7 +672,7 @@ static int sbd_map_port(struct uart_port *uport)
}
if (!sport->memctrl)
- sport->memctrl = ioremap_nocache(duart->mapctrl,
+ sport->memctrl = ioremap(duart->mapctrl,
DUART_CHANREG_SPACING);
if (!sport->memctrl) {
printk(err);
@@ -813,6 +809,7 @@ static void __init sbd_probe_duarts(void)
uport->ops = &sbd_ops;
uport->line = line;
uport->mapbase = SBD_CHANREGS(line);
+ uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SB1250_DUART_CONSOLE);
}
}
}
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index d2b77aae42ae..10cc16a71f26 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -7,10 +7,6 @@
* Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de)
*/
-#if defined(CONFIG_SERIAL_SCCNXP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -1000,6 +996,7 @@ static int sccnxp_probe(struct platform_device *pdev)
s->port[i].regshift = s->pdata.reg_shift;
s->port[i].uartclk = uartclk;
s->port[i].ops = &sccnxp_ops;
+ s->port[i].has_sysrq = IS_ENABLED(CONFIG_SERIAL_SCCNXP_CONSOLE);
uart_add_one_port(&s->uart, &s->port[i]);
/* Set direction to input */
if (s->chip->flags & SCCNXP_HAVE_IO)
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index b6ace6290e23..33034b852a51 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -141,6 +141,7 @@ struct tegra_uart_port {
int configured_rate;
bool use_rx_pio;
bool use_tx_pio;
+ bool rx_dma_active;
};
static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
@@ -533,11 +534,12 @@ static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
struct circ_buf *xmit = &tup->uport.state->xmit;
dma_addr_t tx_phys_addr;
- dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
- UART_XMIT_SIZE, DMA_TO_DEVICE);
-
tup->tx_bytes = count & ~(0xF);
tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
+
+ dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
+ tup->tx_bytes, DMA_TO_DEVICE);
+
tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
@@ -679,7 +681,7 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
return;
dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
- TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ count, DMA_FROM_DEVICE);
copied = tty_insert_flip_string(tty,
((unsigned char *)(tup->rx_dma_buf_virt)), count);
if (copied != count) {
@@ -687,7 +689,7 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
}
dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
- TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ count, DMA_TO_DEVICE);
}
static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
@@ -731,6 +733,7 @@ static void tegra_uart_rx_dma_complete(void *args)
if (tup->rts_active)
set_rts(tup, false);
+ tup->rx_dma_active = false;
tegra_uart_rx_buffer_push(tup, 0);
tegra_uart_start_rx_dma(tup);
@@ -742,18 +745,27 @@ done:
spin_unlock_irqrestore(&u->lock, flags);
}
-static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
{
struct dma_tx_state state;
- /* Deactivate flow control to stop sender */
- if (tup->rts_active)
- set_rts(tup, false);
+ if (!tup->rx_dma_active)
+ return;
dmaengine_terminate_all(tup->rx_dma_chan);
dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+
tegra_uart_rx_buffer_push(tup, state.residue);
- tegra_uart_start_rx_dma(tup);
+ tup->rx_dma_active = false;
+}
+
+static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+{
+ /* Deactivate flow control to stop sender */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ tegra_uart_terminate_rx_dma(tup);
if (tup->rts_active)
set_rts(tup, true);
@@ -763,6 +775,9 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
+ if (tup->rx_dma_active)
+ return 0;
+
tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
@@ -771,10 +786,9 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
return -EIO;
}
+ tup->rx_dma_active = true;
tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
tup->rx_dma_desc->callback_param = tup;
- dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
- count, DMA_TO_DEVICE);
tup->rx_bytes_requested = count;
tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
dma_async_issue_pending(tup->rx_dma_chan);
@@ -820,6 +834,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
struct uart_port *u = &tup->uport;
unsigned long iir;
unsigned long ier;
+ bool is_rx_start = false;
bool is_rx_int = false;
unsigned long flags;
@@ -832,10 +847,12 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
if (tup->rx_in_progress) {
ier = tup->ier_shadow;
ier |= (UART_IER_RLSI | UART_IER_RTOIE |
- TEGRA_UART_IER_EORD);
+ TEGRA_UART_IER_EORD | UART_IER_RDI);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
}
+ } else if (is_rx_start) {
+ tegra_uart_start_rx_dma(tup);
}
spin_unlock_irqrestore(&u->lock, flags);
return IRQ_HANDLED;
@@ -854,17 +871,23 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
case 4: /* End of data */
case 6: /* Rx timeout */
- case 2: /* Receive */
- if (!tup->use_rx_pio && !is_rx_int) {
- is_rx_int = true;
+ if (!tup->use_rx_pio) {
+ is_rx_int = tup->rx_in_progress;
/* Disable Rx interrupts */
ier = tup->ier_shadow;
- ier |= UART_IER_RDI;
- tegra_uart_write(tup, ier, UART_IER);
ier &= ~(UART_IER_RDI | UART_IER_RLSI |
UART_IER_RTOIE | TEGRA_UART_IER_EORD);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
+ break;
+ }
+ /* Fall through */
+ case 2: /* Receive */
+ if (!tup->use_rx_pio) {
+ is_rx_start = tup->rx_in_progress;
+ tup->ier_shadow &= ~UART_IER_RDI;
+ tegra_uart_write(tup, tup->ier_shadow,
+ UART_IER);
} else {
do_handle_rx_pio(tup);
}
@@ -886,7 +909,6 @@ static void tegra_uart_stop_rx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
struct tty_port *port = &tup->uport.state->port;
- struct dma_tx_state state;
unsigned long ier;
if (tup->rts_active)
@@ -903,13 +925,11 @@ static void tegra_uart_stop_rx(struct uart_port *u)
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
tup->rx_in_progress = 0;
- if (tup->rx_dma_chan && !tup->use_rx_pio) {
- dmaengine_terminate_all(tup->rx_dma_chan);
- dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
- tegra_uart_rx_buffer_push(tup, state.residue);
- } else {
+
+ if (!tup->use_rx_pio)
+ tegra_uart_terminate_rx_dma(tup);
+ else
tegra_uart_handle_rx_pio(tup, port);
- }
}
static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
@@ -1052,12 +1072,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
tup->fcr_shadow |= UART_FCR_DMA_SELECT;
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
-
- ret = tegra_uart_start_rx_dma(tup);
- if (ret < 0) {
- dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
- return ret;
- }
} else {
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
}
@@ -1067,10 +1081,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
* Enable IE_RXS for the receive status interrupts like line errros.
* Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
*
- * If using DMA mode, enable EORD instead of receive interrupt which
- * will interrupt after the UART is done with the receive instead of
- * the interrupt when the FIFO "threshold" is reached.
- *
* EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
* the DATA is sitting in the FIFO and couldn't be transferred to the
* DMA as the DMA size alignment (4 bytes) is not met. EORD will be
@@ -1081,11 +1091,14 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
* both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
* then the EORD.
*/
+ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+
+ /*
+ * If using DMA mode, enable EORD interrupt to notify about RX
+ * completion.
+ */
if (!tup->use_rx_pio)
- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE |
- TEGRA_UART_IER_EORD;
- else
- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+ tup->ier_shadow |= TEGRA_UART_IER_EORD;
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
return 0;
@@ -1140,6 +1153,9 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
dma_release_channel(dma_chan);
return -ENOMEM;
}
+ dma_sync_single_for_device(tup->uport.dev, dma_phys,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE,
+ DMA_TO_DEVICE);
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7c2782785736..76e506ee335c 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2603,6 +2603,7 @@ struct tty_driver *uart_console_device(struct console *co, int *index)
*index = co->index;
return p->tty_driver;
}
+EXPORT_SYMBOL_GPL(uart_console_device);
static ssize_t uart_get_attr_uartclk(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -3081,6 +3082,89 @@ void uart_insert_char(struct uart_port *port, unsigned int status,
}
EXPORT_SYMBOL_GPL(uart_insert_char);
+int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
+ return 0;
+
+ if (!port->has_sysrq || !port->sysrq)
+ return 0;
+
+ if (ch && time_before(jiffies, port->sysrq)) {
+ handle_sysrq(ch);
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uart_handle_sysrq_char);
+
+int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
+ return 0;
+
+ if (!port->has_sysrq || !port->sysrq)
+ return 0;
+
+ if (ch && time_before(jiffies, port->sysrq)) {
+ port->sysrq_ch = ch;
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char);
+
+void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+ int sysrq_ch;
+
+ if (!port->has_sysrq) {
+ spin_unlock_irqrestore(&port->lock, irqflags);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock_irqrestore(&port->lock, irqflags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
+EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq);
+
+/*
+ * We do the SysRQ and SAK checking like this...
+ */
+int uart_handle_break(struct uart_port *port)
+{
+ struct uart_state *state = port->state;
+
+ if (port->handle_break)
+ port->handle_break(port);
+
+ if (port->has_sysrq) {
+ if (port->cons && port->cons->index == port->line) {
+ if (!port->sysrq) {
+ port->sysrq = jiffies + HZ*5;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+ }
+
+ if (port->flags & UPF_SAK)
+ do_SAK(state->port.tty);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uart_handle_break);
+
EXPORT_SYMBOL(uart_write_wakeup);
EXPORT_SYMBOL(uart_register_driver);
EXPORT_SYMBOL(uart_unregister_driver);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index d22ccb32aa9b..b4d89e31730e 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -12,10 +12,6 @@
* Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller
*/
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -1095,6 +1091,7 @@ static int serial_txx9_probe(struct platform_device *dev)
port.flags = p->flags;
port.mapbase = p->mapbase;
port.dev = &dev->dev;
+ port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_TXX9_CONSOLE);
ret = serial_txx9_register_port(&port);
if (ret < 0) {
dev_err(&dev->dev, "unable to register port at index %d "
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 58bf9d496ba5..c073aa7001c4 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -15,10 +15,6 @@
* Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
* Removed SH7300 support (Jul 2007).
*/
-#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#undef DEBUG
#include <linux/clk.h>
@@ -2680,7 +2676,7 @@ static int sci_remap_port(struct uart_port *port)
return 0;
if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
- port->membase = ioremap_nocache(port->mapbase, sport->reg_size);
+ port->membase = ioremap(port->mapbase, sport->reg_size);
if (unlikely(!port->membase)) {
dev_err(port->dev, "can't remap port#%d\n", port->line);
return -ENXIO;
@@ -2887,6 +2883,7 @@ static int sci_init_single(struct platform_device *dev,
port->ops = &sci_uart_ops;
port->iotype = UPIO_MEM;
port->line = index;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SH_SCI_CONSOLE);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res == NULL)
@@ -3015,12 +3012,9 @@ static void serial_console_write(struct console *co, const char *s,
unsigned long flags;
int locked = 1;
-#if defined(SUPPORT_SYSRQ)
if (port->sysrq)
locked = 0;
- else
-#endif
- if (oops_in_progress)
+ else if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index f60a59d9bf27..3d3c70634589 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -3,10 +3,6 @@
* Copyright (C) 2012-2015 Spreadtrum Communications Inc.
*/
-#if defined(CONFIG_SERIAL_SPRD_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -1230,6 +1226,7 @@ static int sprd_probe(struct platform_device *pdev)
up->fifosize = SPRD_FIFO_SIZE;
up->ops = &serial_sprd_ops;
up->flags = UPF_BOOT_AUTOCONF;
+ up->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SPRD_CONSOLE);
ret = sprd_clk_init(up);
if (ret)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 7971997cdead..e7048515a79c 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -5,10 +5,6 @@
* Copyright (C) 2003-2013 STMicroelectronics (R&D) Limited
*/
-#if defined(CONFIG_SERIAL_ST_ASC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/console.h>
@@ -508,7 +504,6 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct asc_port *ascport = to_asc_port(port);
- struct device_node *np = port->dev->of_node;
struct gpio_desc *gpiod;
unsigned int baud;
u32 ctrl_val;
@@ -570,13 +565,12 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
pinctrl_select_state(ascport->pinctrl,
ascport->states[NO_HW_FLOWCTRL]);
- gpiod = devm_fwnode_get_gpiod_from_child(port->dev,
- "rts",
- &np->fwnode,
- GPIOD_OUT_LOW,
- np->name);
- if (!IS_ERR(gpiod))
+ gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW);
+ if (!IS_ERR(gpiod)) {
+ gpiod_set_consumer_name(gpiod,
+ port->dev->of_node->name);
ascport->rts = gpiod;
+ }
}
}
@@ -730,6 +724,7 @@ static int asc_init_port(struct asc_port *ascport,
port->fifosize = ASC_FIFO_SIZE;
port->dev = &pdev->dev;
port->irq = platform_get_irq(pdev, 0);
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ST_ASC_CONSOLE);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
port->membase = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 2f72514d63ed..5e93e8d40f59 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -8,10 +8,6 @@
* Inspired by st-asc.c from STMicroelectronics (c)
*/
-#if defined(CONFIG_SERIAL_STM32_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -926,6 +922,7 @@ static int stm32_init_port(struct stm32_port *stm32port,
port->ops = &stm32_uart_ops;
port->dev = &pdev->dev;
port->fifosize = stm32port->info->cfg.fifosize;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
ret = platform_get_irq(pdev, 0);
if (ret <= 0)
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index f8503f8fc44e..eafada8fb6fa 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -25,10 +25,6 @@
#include <asm/irq.h>
#include <asm/setup.h>
-#if defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
@@ -552,6 +548,7 @@ static int hv_probe(struct platform_device *op)
sunhv_port = port;
+ port->has_sysrq = 1;
port->line = 0;
port->ops = &sunhv_pops;
port->type = PORT_SUNHV;
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 72131b5e132e..1eb703c980e0 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -40,10 +40,6 @@
#include <asm/prom.h>
#include <asm/setup.h>
-#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
@@ -985,6 +981,7 @@ static int sunsab_init_one(struct uart_sunsab_port *up,
up->port.fifosize = SAB82532_XMIT_FIFO_SIZE;
up->port.iotype = UPIO_MEM;
+ up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNSAB_CONSOLE);
writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc);
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 4db6aaa330b2..8ce9a7a256e5 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -44,10 +44,6 @@
#include <asm/prom.h>
#include <asm/setup.h>
-#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
@@ -1475,6 +1471,7 @@ static int su_probe(struct platform_device *op)
up->port.type = PORT_UNKNOWN;
up->port.uartclk = (SU_BASE_BAUD * 16);
+ up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNSU_CONSOLE);
err = 0;
if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) {
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index bc7af8b08a72..103ab8c556e7 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -40,10 +40,6 @@
#include <asm/prom.h>
#include <asm/setup.h>
-#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
@@ -1444,6 +1440,7 @@ static int zs_probe(struct platform_device *op)
up[0].port.line = (inst * 2) + 0;
up[0].port.dev = &op->dev;
up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
+ up[0].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNZILOG_CONSOLE);
if (keyboard_mouse)
up[0].flags |= SUNZILOG_FLAG_CONS_KEYB;
sunzilog_init_hw(&up[0]);
@@ -1461,6 +1458,7 @@ static int zs_probe(struct platform_device *op)
up[1].port.line = (inst * 2) + 1;
up[1].port.dev = &op->dev;
up[1].flags |= 0;
+ up[1].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNZILOG_CONSOLE);
if (keyboard_mouse)
up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE;
sunzilog_init_hw(&up[1]);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index a0555ae2b1ef..2e151a4c222b 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -332,8 +332,6 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
struct uart_port *port = &qe_port->port;
struct circ_buf *xmit = &port->state->xmit;
- bdp = qe_port->rx_cur;
-
/* Handle xon/xoff */
if (port->x_char) {
/* Pick next descriptor and fill from buffer */
@@ -551,9 +549,7 @@ handle_error:
/* Overrun does not affect the current character ! */
if (status & BD_SC_OV)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
-#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
-#endif
goto error_return;
}
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 6d106e33f842..eeb4b6568776 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -7,10 +7,6 @@
* Based on drivers/serial/8250.c, by Russell King.
*/
-#if defined(CONFIG_SERIAL_VR41XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -869,6 +865,7 @@ static int siu_probe(struct platform_device *dev)
port = &siu_uart_ports[i];
port->ops = &siu_uart_ops;
port->dev = &dev->dev;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_VR41XX_CONSOLE);
retval = uart_add_one_port(&siu_uart_driver, port);
if (retval < 0) {
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 3d58e9b34553..764e992438b2 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -7,10 +7,6 @@
* Author: Robert Love <rlove@google.com>
*/
-#if defined(CONFIG_SERIAL_VT8500_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -703,6 +699,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
vt8500_port->uart.line = port;
vt8500_port->uart.dev = &pdev->dev;
vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+ vt8500_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_VT8500_CONSOLE);
/* Serial core uses the magic "16" everywhere - adjust for it */
vt8500_port->uart.uartclk = 16 * clk_get_rate(vt8500_port->clk) /
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 4e55bc327a54..98db9dc168ff 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -9,10 +9,6 @@
* in the code.
*/
-#if defined(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/console.h>
@@ -158,6 +154,16 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_MODEMCR_DTR 0x00000001 /* Data Terminal Ready */
/*
+ * Modem Status register:
+ * The read/write Modem Status register reports the interface with the modem
+ * or data set, or a peripheral device emulating a modem.
+ */
+#define CDNS_UART_MODEMSR_DCD BIT(7) /* Data Carrier Detect */
+#define CDNS_UART_MODEMSR_RI BIT(6) /* Ting Indicator */
+#define CDNS_UART_MODEMSR_DSR BIT(5) /* Data Set Ready */
+#define CDNS_UART_MODEMSR_CTS BIT(4) /* Clear To Send */
+
+/*
* Channel Status Register:
* The channel status register (CSR) is provided to enable the control logic
* to monitor the status of bits in the channel interrupt status register,
@@ -684,7 +690,7 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
static void cdns_uart_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
- unsigned int cval = 0;
+ u32 cval = 0;
unsigned int baud, minbaud, maxbaud;
unsigned long flags;
unsigned int ctrl_reg, mode_reg, val;
@@ -805,6 +811,13 @@ static void cdns_uart_set_termios(struct uart_port *port,
cval |= mode_reg & 1;
writel(cval, port->membase + CDNS_UART_MR);
+ cval = readl(port->membase + CDNS_UART_MODEMCR);
+ if (termios->c_cflag & CRTSCTS)
+ cval |= CDNS_UART_MODEMCR_FCM;
+ else
+ cval &= ~CDNS_UART_MODEMCR_FCM;
+ writel(cval, port->membase + CDNS_UART_MODEMCR);
+
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1007,12 +1020,24 @@ static void cdns_uart_config_port(struct uart_port *port, int flags)
*/
static unsigned int cdns_uart_get_mctrl(struct uart_port *port)
{
+ u32 val;
+ unsigned int mctrl = 0;
struct cdns_uart *cdns_uart_data = port->private_data;
if (cdns_uart_data->cts_override)
- return 0;
-
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ val = readl(port->membase + CDNS_UART_MODEMSR);
+ if (val & CDNS_UART_MODEMSR_CTS)
+ mctrl |= TIOCM_CTS;
+ if (val & CDNS_UART_MODEMSR_DSR)
+ mctrl |= TIOCM_DSR;
+ if (val & CDNS_UART_MODEMSR_RI)
+ mctrl |= TIOCM_RNG;
+ if (val & CDNS_UART_MODEMSR_DCD)
+ mctrl |= TIOCM_CAR;
+
+ return mctrl;
}
static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1027,12 +1052,13 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
val = readl(port->membase + CDNS_UART_MODEMCR);
mode_reg = readl(port->membase + CDNS_UART_MR);
- val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR |
- CDNS_UART_MODEMCR_FCM);
+ val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR);
mode_reg &= ~CDNS_UART_MR_CHMODE_MASK;
- if (mctrl & TIOCM_RTS || mctrl & TIOCM_DTR)
- val |= CDNS_UART_MODEMCR_FCM;
+ if (mctrl & TIOCM_RTS)
+ val |= CDNS_UART_MODEMCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ val |= CDNS_UART_MODEMCR_DTR;
if (mctrl & TIOCM_LOOP)
mode_reg |= CDNS_UART_MR_CHMODE_L_LOOP;
else
@@ -1634,6 +1660,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &cdns_uart_ops;
port->fifosize = CDNS_UART_FIFO_SIZE;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE);
/*
* Register the port.
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index b03d3e458ea2..4b4f604646a7 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -44,10 +44,6 @@
* complicated and prevents the use of some automatic modes of operation.
*/
-#if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/bug.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -992,7 +988,7 @@ static void zs_release_port(struct uart_port *uport)
static int zs_map_port(struct uart_port *uport)
{
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
ZS_CHAN_IO_SIZE);
if (!uport->membase) {
printk(KERN_ERR "zs: Cannot map MMIO\n");
@@ -1106,6 +1102,7 @@ static int __init zs_probe_sccs(void)
zport->scc = &zs_sccs[chip];
zport->clk_mode = 16;
+ uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ZS_CONSOLE);
uport->irq = zs_parms.irq[chip];
uport->uartclk = ZS_CLOCK;
uport->fifosize = 1;
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 84f26e43b229..08d2976593d5 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -4054,7 +4054,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
}
info->lcr_mem_requested = true;
- info->memory_base = ioremap_nocache(info->phys_memory_base,
+ info->memory_base = ioremap(info->phys_memory_base,
0x40000);
if (!info->memory_base) {
printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
@@ -4068,7 +4068,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
goto errout;
}
- info->lcr_base = ioremap_nocache(info->phys_lcr_base,
+ info->lcr_base = ioremap(info->phys_lcr_base,
PAGE_SIZE);
if (!info->lcr_base) {
printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
@@ -7837,7 +7837,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mgsl_struct *info = dev_to_port(dev);
unsigned long flags;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index e8a9047de451..b794177ccfb9 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1334,10 +1334,10 @@ static void throttle(struct tty_struct * tty)
DBGINFO(("%s throttle\n", info->device_name));
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals &= ~SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1359,10 +1359,10 @@ static void unthrottle(struct tty_struct * tty)
else
send_xchar(tty, START_CHAR(tty));
}
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals |= SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1682,7 +1682,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct slgt_info *info = dev_to_port(dev);
unsigned long flags;
@@ -2098,7 +2098,7 @@ static void isr_rxdata(struct slgt_info *info)
if (desc_complete(info->rbufs[i])) {
/* all buffers full */
rx_stop(info);
- info->rx_restart = 1;
+ info->rx_restart = true;
continue;
}
info->rbufs[i].buf[count++] = (unsigned char)reg;
@@ -2560,8 +2560,8 @@ static void change_params(struct slgt_info *info)
info->read_status_mask = IRQ_RXOVER;
if (I_INPCK(info->port.tty))
info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
- if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- info->read_status_mask |= MASK_BREAK;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask |= MASK_BREAK;
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
if (I_IGNBRK(info->port.tty)) {
@@ -3192,7 +3192,7 @@ static int tiocmset(struct tty_struct *tty,
info->signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
@@ -3203,7 +3203,7 @@ static int carrier_raised(struct tty_port *port)
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->signals & SerialSignal_DCD) ? 1 : 0;
}
@@ -3218,7 +3218,7 @@ static void dtr_rts(struct tty_port *port, int on)
info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3450,7 +3450,7 @@ static int claim_resources(struct slgt_info *info)
else
info->reg_addr_requested = true;
- info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
+ info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
if (!info->reg_addr) {
DBGERR(("%s can't map device registers, addr=%08X\n",
info->device_name, info->phys_reg_addr));
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index fcb91bf7a15b..33ff2dbb6650 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1453,10 +1453,10 @@ static void throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->serial_signals &= ~SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1482,10 +1482,10 @@ static void unthrottle(struct tty_struct * tty)
send_xchar(tty, START_CHAR(tty));
}
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->serial_signals |= SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1807,7 +1807,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
SLMP_INFO *info = dev_to_port(dev);
unsigned long flags;
@@ -2470,7 +2470,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
if (status & SerialSignal_CTS) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx start...");
- info->port.tty->hw_stopped = 0;
+ info->port.tty->hw_stopped = 0;
tx_start(info);
info->pending_bh |= BH_TRANSMIT;
return;
@@ -2479,7 +2479,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
if (!(status & SerialSignal_CTS)) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx stop...");
- info->port.tty->hw_stopped = 1;
+ info->port.tty->hw_stopped = 1;
tx_stop(info);
}
}
@@ -2806,8 +2806,8 @@ static void change_params(SLMP_INFO *info)
info->read_status_mask2 = OVRN;
if (I_INPCK(info->port.tty))
info->read_status_mask2 |= PE | FRME;
- if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- info->read_status_mask1 |= BRKD;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask1 |= BRKD;
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask2 |= PE | FRME;
if (I_IGNBRK(info->port.tty)) {
@@ -3177,7 +3177,7 @@ static int tiocmget(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) |
@@ -3215,7 +3215,7 @@ static int tiocmset(struct tty_struct *tty,
info->serial_signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
@@ -3227,7 +3227,7 @@ static int carrier_raised(struct tty_port *port)
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
@@ -3243,7 +3243,7 @@ static void dtr_rts(struct tty_port *port, int on)
info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3559,7 +3559,7 @@ static int claim_resources(SLMP_INFO *info)
else
info->sca_statctrl_requested = true;
- info->memory_base = ioremap_nocache(info->phys_memory_base,
+ info->memory_base = ioremap(info->phys_memory_base,
SCA_MEM_SIZE);
if (!info->memory_base) {
printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n",
@@ -3568,7 +3568,7 @@ static int claim_resources(SLMP_INFO *info)
goto errout;
}
- info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE);
+ info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE);
if (!info->lcr_base) {
printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_lcr_base );
@@ -3577,7 +3577,7 @@ static int claim_resources(SLMP_INFO *info)
}
info->lcr_base += info->lcr_offset;
- info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE);
+ info->sca_base = ioremap(info->phys_sca_base, PAGE_SIZE);
if (!info->sca_base) {
printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_sca_base );
@@ -3586,7 +3586,7 @@ static int claim_resources(SLMP_INFO *info)
}
info->sca_base += info->sca_offset;
- info->statctrl_base = ioremap_nocache(info->phys_statctrl_base,
+ info->statctrl_base = ioremap(info->phys_statctrl_base,
PAGE_SIZE);
if (!info->statctrl_base) {
printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n",
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 573b2055173c..1d4f317a0e42 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -967,8 +967,6 @@ static struct input_handler sysrq_handler = {
.id_table = sysrq_ids,
};
-static bool sysrq_handler_registered;
-
static inline void sysrq_register_handler(void)
{
int error;
@@ -978,16 +976,11 @@ static inline void sysrq_register_handler(void)
error = input_register_handler(&sysrq_handler);
if (error)
pr_err("Failed to register input handler, error %d", error);
- else
- sysrq_handler_registered = true;
}
static inline void sysrq_unregister_handler(void)
{
- if (sysrq_handler_registered) {
- input_unregister_handler(&sysrq_handler);
- sysrq_handler_registered = false;
- }
+ input_unregister_handler(&sysrq_handler);
}
static int sysrq_reset_seq_param_set(const char *buffer,
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index f438eaa68246..40207cab3b2a 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -17,32 +17,28 @@
* include/asm/termbits.h file.
*/
static const speed_t baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 115200, 230400, 460800,
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800,
#ifdef __sparc__
- 76800, 153600, 307200, 614400, 921600
+ 76800, 153600, 307200, 614400, 921600, 500000, 576000,
+ 1000000, 1152000, 1500000, 2000000
#else
500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
2500000, 3000000, 3500000, 4000000
#endif
};
-#ifndef __sparc__
static const tcflag_t baud_bits[] = {
- B0, B50, B75, B110, B134, B150, B200, B300, B600,
- B1200, B1800, B2400, B4800, B9600, B19200, B38400,
- B57600, B115200, B230400, B460800, B500000, B576000,
- B921600, B1000000, B1152000, B1500000, B2000000, B2500000,
- B3000000, B3500000, B4000000
-};
+ B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400,
+ B4800, B9600, B19200, B38400, B57600, B115200, B230400, B460800,
+#ifdef __sparc__
+ B76800, B153600, B307200, B614400, B921600, B500000, B576000,
+ B1000000, B1152000, B1500000, B2000000
#else
-static const tcflag_t baud_bits[] = {
- B0, B50, B75, B110, B134, B150, B200, B300, B600,
- B1200, B1800, B2400, B4800, B9600, B19200, B38400,
- B57600, B115200, B230400, B460800, B76800, B153600,
- B307200, B614400, B921600
-};
+ B500000, B576000, B921600, B1000000, B1152000, B1500000, B2000000,
+ B2500000, B3000000, B3500000, B4000000
#endif
+};
static int n_baud_table = ARRAY_SIZE(baud_table);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index d9f54c7d94f2..a1453fe10862 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1893,7 +1893,7 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
struct tty_struct *tty_kopen(dev_t device)
{
struct tty_struct *tty;
- struct tty_driver *driver = NULL;
+ struct tty_driver *driver;
int index = -1;
mutex_lock(&tty_mutex);
diff --git a/drivers/tty/vt/.gitignore b/drivers/tty/vt/.gitignore
index 9b38b85f9d9a..3ecf42234d89 100644
--- a/drivers/tty/vt/.gitignore
+++ b/drivers/tty/vt/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+conmakehash
consolemap_deftbl.c
defkeymap.c
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index edbbe0ccdb83..329ca336b8ee 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -12,10 +12,12 @@ obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
# Files generated that shall be removed upon make clean
clean-files := consolemap_deftbl.c defkeymap.c
+hostprogs-y += conmakehash
+
quiet_cmd_conmk = CONMK $@
- cmd_conmk = scripts/conmakehash $< > $@
+ cmd_conmk = $(obj)/conmakehash $< > $@
-$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
+$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE) $(obj)/conmakehash
$(call cmd,conmk)
$(obj)/defkeymap.o: $(obj)/defkeymap.c
diff --git a/drivers/tty/vt/conmakehash.c b/drivers/tty/vt/conmakehash.c
new file mode 100644
index 000000000000..cddd789fe46e
--- /dev/null
+++ b/drivers/tty/vt/conmakehash.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * conmakehash.c
+ *
+ * Create arrays for initializing the kernel folded tables (using a hash
+ * table turned out to be to limiting...) Unfortunately we can't simply
+ * preinitialize the tables at compile time since kfree() cannot accept
+ * memory not allocated by kmalloc(), and doing our own memory management
+ * just for this seems like massive overkill.
+ *
+ * Copyright (C) 1995-1997 H. Peter Anvin
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sysexits.h>
+#include <string.h>
+#include <ctype.h>
+
+#define MAX_FONTLEN 256
+
+typedef unsigned short unicode;
+
+static void usage(char *argv0)
+{
+ fprintf(stderr, "Usage: \n"
+ " %s chartable [hashsize] [hashstep] [maxhashlevel]\n", argv0);
+ exit(EX_USAGE);
+}
+
+static int getunicode(char **p0)
+{
+ char *p = *p0;
+
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (*p != 'U' || p[1] != '+' ||
+ !isxdigit(p[2]) || !isxdigit(p[3]) || !isxdigit(p[4]) ||
+ !isxdigit(p[5]) || isxdigit(p[6]))
+ return -1;
+ *p0 = p+6;
+ return strtol(p+2,0,16);
+}
+
+unicode unitable[MAX_FONTLEN][255];
+ /* Massive overkill, but who cares? */
+int unicount[MAX_FONTLEN];
+
+static void addpair(int fp, int un)
+{
+ int i;
+
+ if ( un <= 0xfffe )
+ {
+ /* Check it isn't a duplicate */
+
+ for ( i = 0 ; i < unicount[fp] ; i++ )
+ if ( unitable[fp][i] == un )
+ return;
+
+ /* Add to list */
+
+ if ( unicount[fp] > 254 )
+ {
+ fprintf(stderr, "ERROR: Only 255 unicodes/glyph permitted!\n");
+ exit(EX_DATAERR);
+ }
+
+ unitable[fp][unicount[fp]] = un;
+ unicount[fp]++;
+ }
+
+ /* otherwise: ignore */
+}
+
+int main(int argc, char *argv[])
+{
+ FILE *ctbl;
+ char *tblname;
+ char buffer[65536];
+ int fontlen;
+ int i, nuni, nent;
+ int fp0, fp1, un0, un1;
+ char *p, *p1;
+
+ if ( argc < 2 || argc > 5 )
+ usage(argv[0]);
+
+ if ( !strcmp(argv[1],"-") )
+ {
+ ctbl = stdin;
+ tblname = "stdin";
+ }
+ else
+ {
+ ctbl = fopen(tblname = argv[1], "r");
+ if ( !ctbl )
+ {
+ perror(tblname);
+ exit(EX_NOINPUT);
+ }
+ }
+
+ /* For now we assume the default font is always 256 characters. */
+ fontlen = 256;
+
+ /* Initialize table */
+
+ for ( i = 0 ; i < fontlen ; i++ )
+ unicount[i] = 0;
+
+ /* Now we come to the tricky part. Parse the input table. */
+
+ while ( fgets(buffer, sizeof(buffer), ctbl) != NULL )
+ {
+ if ( (p = strchr(buffer, '\n')) != NULL )
+ *p = '\0';
+ else
+ fprintf(stderr, "%s: Warning: line too long\n", tblname);
+
+ p = buffer;
+
+/*
+ * Syntax accepted:
+ * <fontpos> <unicode> <unicode> ...
+ * <range> idem
+ * <range> <unicode range>
+ *
+ * where <range> ::= <fontpos>-<fontpos>
+ * and <unicode> ::= U+<h><h><h><h>
+ * and <h> ::= <hexadecimal digit>
+ */
+
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (!*p || *p == '#')
+ continue; /* skip comment or blank line */
+
+ fp0 = strtol(p, &p1, 0);
+ if (p1 == p)
+ {
+ fprintf(stderr, "Bad input line: %s\n", buffer);
+ exit(EX_DATAERR);
+ }
+ p = p1;
+
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (*p == '-')
+ {
+ p++;
+ fp1 = strtol(p, &p1, 0);
+ if (p1 == p)
+ {
+ fprintf(stderr, "Bad input line: %s\n", buffer);
+ exit(EX_DATAERR);
+ }
+ p = p1;
+ }
+ else
+ fp1 = 0;
+
+ if ( fp0 < 0 || fp0 >= fontlen )
+ {
+ fprintf(stderr,
+ "%s: Glyph number (0x%x) larger than font length\n",
+ tblname, fp0);
+ exit(EX_DATAERR);
+ }
+ if ( fp1 && (fp1 < fp0 || fp1 >= fontlen) )
+ {
+ fprintf(stderr,
+ "%s: Bad end of range (0x%x)\n",
+ tblname, fp1);
+ exit(EX_DATAERR);
+ }
+
+ if (fp1)
+ {
+ /* we have a range; expect the word "idem" or a Unicode range of the
+ same length */
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (!strncmp(p, "idem", 4))
+ {
+ for (i=fp0; i<=fp1; i++)
+ addpair(i,i);
+ p += 4;
+ }
+ else
+ {
+ un0 = getunicode(&p);
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (*p != '-')
+ {
+ fprintf(stderr,
+"%s: Corresponding to a range of font positions, there should be a Unicode range\n",
+ tblname);
+ exit(EX_DATAERR);
+ }
+ p++;
+ un1 = getunicode(&p);
+ if (un0 < 0 || un1 < 0)
+ {
+ fprintf(stderr,
+"%s: Bad Unicode range corresponding to font position range 0x%x-0x%x\n",
+ tblname, fp0, fp1);
+ exit(EX_DATAERR);
+ }
+ if (un1 - un0 != fp1 - fp0)
+ {
+ fprintf(stderr,
+"%s: Unicode range U+%x-U+%x not of the same length as font position range 0x%x-0x%x\n",
+ tblname, un0, un1, fp0, fp1);
+ exit(EX_DATAERR);
+ }
+ for(i=fp0; i<=fp1; i++)
+ addpair(i,un0-fp0+i);
+ }
+ }
+ else
+ {
+ /* no range; expect a list of unicode values for a single font position */
+
+ while ( (un0 = getunicode(&p)) >= 0 )
+ addpair(fp0, un0);
+ }
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (*p && *p != '#')
+ fprintf(stderr, "%s: trailing junk (%s) ignored\n", tblname, p);
+ }
+
+ /* Okay, we hit EOF, now output hash table */
+
+ fclose(ctbl);
+
+
+ /* Compute total size of Unicode list */
+ nuni = 0;
+ for ( i = 0 ; i < fontlen ; i++ )
+ nuni += unicount[i];
+
+ printf("\
+/*\n\
+ * Do not edit this file; it was automatically generated by\n\
+ *\n\
+ * conmakehash %s > [this file]\n\
+ *\n\
+ */\n\
+\n\
+#include <linux/types.h>\n\
+\n\
+u8 dfont_unicount[%d] = \n\
+{\n\t", argv[1], fontlen);
+
+ for ( i = 0 ; i < fontlen ; i++ )
+ {
+ printf("%3d", unicount[i]);
+ if ( i == fontlen-1 )
+ printf("\n};\n");
+ else if ( i % 8 == 7 )
+ printf(",\n\t");
+ else
+ printf(", ");
+ }
+
+ printf("\nu16 dfont_unitable[%d] = \n{\n\t", nuni);
+
+ fp0 = 0;
+ nent = 0;
+ for ( i = 0 ; i < nuni ; i++ )
+ {
+ while ( nent >= unicount[fp0] )
+ {
+ fp0++;
+ nent = 0;
+ }
+ printf("0x%04x", unitable[fp0][nent++]);
+ if ( i == nuni-1 )
+ printf("\n};\n");
+ else if ( i % 8 == 7 )
+ printf(",\n\t");
+ else
+ printf(", ");
+ }
+
+ exit(EX_OK);
+}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 34aa39d1aed9..35d21cdb60d0 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3326,8 +3326,9 @@ static int __init con_init(void)
console_lock();
- if (conswitchp)
- display_desc = conswitchp->con_startup();
+ if (!conswitchp)
+ conswitchp = &dummy_con;
+ display_desc = conswitchp->con_startup();
if (!display_desc) {
fg_console = 0;
console_unlock();
@@ -3567,7 +3568,6 @@ err:
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
-/* unlocked version of unbind_con_driver() */
int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
struct module *owner = csw->owner;
@@ -4095,7 +4095,7 @@ static void con_driver_unregister_callback(struct work_struct *ignored)
* when a driver wants to take over some existing consoles
* and become default driver for newly opened ones.
*
- * do_take_over_console is basically a register followed by unbind
+ * do_take_over_console is basically a register followed by bind
*/
int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
{
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 81c88f7bbbcb..f6ab3f28c838 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -132,11 +132,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
if (irq_on) {
if (test_and_clear_bit(0, &priv->flags))
enable_irq(dev_info->irq);
+ spin_unlock_irqrestore(&priv->lock, flags);
} else {
- if (!test_and_set_bit(0, &priv->flags))
+ if (!test_and_set_bit(0, &priv->flags)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
disable_irq(dev_info->irq);
+ }
}
- spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 1303b165055b..fc25ce90da3b 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -156,6 +156,8 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
uioinfo->irq = ret;
if (ret == -ENXIO && pdev->dev.of_node)
uioinfo->irq = UIO_IRQ_NONE;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
else if (ret < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
return ret;
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index 2a1e89d12ed9..84716d216ae5 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -53,4 +53,14 @@ config USB_CDNS3_TI
e.g. J721e.
+config USB_CDNS3_IMX
+ tristate "Cadence USB3 support on NXP i.MX platforms"
+ depends on ARCH_MXC || COMPILE_TEST
+ default USB_CDNS3
+ help
+ Say 'Y' or 'M' here if you are building for NXP i.MX
+ platforms that contain Cadence USB3 controller core.
+
+ For example, imx8qm and imx8qxp.
+
endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index 948e6b88d1a9..d47e341a6f39 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -15,3 +15,4 @@ cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
+obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
new file mode 100644
index 000000000000..aba988e71958
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-imx.c - NXP i.MX specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2019 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+
+#define USB3_CORE_CTRL1 0x00
+#define USB3_CORE_CTRL2 0x04
+#define USB3_INT_REG 0x08
+#define USB3_CORE_STATUS 0x0c
+#define XHCI_DEBUG_LINK_ST 0x10
+#define XHCI_DEBUG_BUS 0x14
+#define USB3_SSPHY_CTRL1 0x40
+#define USB3_SSPHY_CTRL2 0x44
+#define USB3_SSPHY_STATUS 0x4c
+#define USB2_PHY_CTRL1 0x50
+#define USB2_PHY_CTRL2 0x54
+#define USB2_PHY_STATUS 0x5c
+
+/* Register bits definition */
+
+/* USB3_CORE_CTRL1 */
+#define SW_RESET_MASK (0x3f << 26)
+#define PWR_SW_RESET BIT(31)
+#define APB_SW_RESET BIT(30)
+#define AXI_SW_RESET BIT(29)
+#define RW_SW_RESET BIT(28)
+#define PHY_SW_RESET BIT(27)
+#define PHYAHB_SW_RESET BIT(26)
+#define ALL_SW_RESET (PWR_SW_RESET | APB_SW_RESET | AXI_SW_RESET | \
+ RW_SW_RESET | PHY_SW_RESET | PHYAHB_SW_RESET)
+#define OC_DISABLE BIT(9)
+#define MDCTRL_CLK_SEL BIT(7)
+#define MODE_STRAP_MASK (0x7)
+#define DEV_MODE (1 << 2)
+#define HOST_MODE (1 << 1)
+#define OTG_MODE (1 << 0)
+
+/* USB3_INT_REG */
+#define CLK_125_REQ BIT(29)
+#define LPM_CLK_REQ BIT(28)
+#define DEVU3_WAEKUP_EN BIT(14)
+#define OTG_WAKEUP_EN BIT(12)
+#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */
+#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */
+
+/* USB3_CORE_STATUS */
+#define MDCTRL_CLK_STATUS BIT(15)
+#define DEV_POWER_ON_READY BIT(13)
+#define HOST_POWER_ON_READY BIT(12)
+
+/* USB3_SSPHY_STATUS */
+#define CLK_VALID_MASK (0x3f << 26)
+#define CLK_VALID_COMPARE_BITS (0xf << 28)
+#define PHY_REFCLK_REQ (1 << 0)
+
+struct cdns_imx {
+ struct device *dev;
+ void __iomem *noncore;
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static inline u32 cdns_imx_readl(struct cdns_imx *data, u32 offset)
+{
+ return readl(data->noncore + offset);
+}
+
+static inline void cdns_imx_writel(struct cdns_imx *data, u32 offset, u32 value)
+{
+ writel(value, data->noncore + offset);
+}
+
+static const struct clk_bulk_data imx_cdns3_core_clks[] = {
+ { .id = "usb3_lpm_clk" },
+ { .id = "usb3_bus_clk" },
+ { .id = "usb3_aclk" },
+ { .id = "usb3_ipg_clk" },
+ { .id = "usb3_core_pclk" },
+};
+
+static int cdns_imx_noncore_init(struct cdns_imx *data)
+{
+ u32 value;
+ int ret;
+ struct device *dev = data->dev;
+
+ cdns_imx_writel(data, USB3_SSPHY_STATUS, CLK_VALID_MASK);
+ udelay(1);
+ ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value,
+ (value & CLK_VALID_COMPARE_BITS) == CLK_VALID_COMPARE_BITS,
+ 10, 100000);
+ if (ret) {
+ dev_err(dev, "wait clkvld timeout\n");
+ return ret;
+ }
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value |= ALL_SW_RESET;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+ udelay(1);
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value = (value & ~MODE_STRAP_MASK) | OTG_MODE | OC_DISABLE;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+
+ value = cdns_imx_readl(data, USB3_INT_REG);
+ value |= HOST_INT1_EN | DEV_INT_EN;
+ cdns_imx_writel(data, USB3_INT_REG, value);
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value &= ~ALL_SW_RESET;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+ return ret;
+}
+
+static int cdns_imx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct cdns_imx *data;
+ int ret;
+
+ if (!node)
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+ data->dev = dev;
+ data->noncore = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->noncore)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->noncore);
+ }
+
+ data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
+ data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(data->num_clks, data->clks);
+ if (ret)
+ return ret;
+
+ ret = cdns_imx_noncore_init(data);
+ if (ret)
+ goto err;
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create children: %d\n", ret);
+ goto err;
+ }
+
+ return ret;
+
+err:
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ return ret;
+}
+
+static int cdns_imx_remove_core(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int cdns_imx_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_imx_of_match[] = {
+ { .compatible = "fsl,imx8qm-usb3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cdns_imx_of_match);
+
+static struct platform_driver cdns_imx_driver = {
+ .probe = cdns_imx_probe,
+ .remove = cdns_imx_remove,
+ .driver = {
+ .name = "cdns3-imx",
+ .of_match_table = cdns_imx_of_match,
+ },
+};
+module_platform_driver(cdns_imx_driver);
+
+MODULE_ALIAS("platform:cdns3-imx");
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 i.MX Glue Layer");
diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/debug.h
index 2c9afbfe988b..a5c6a29e1340 100644
--- a/drivers/usb/cdns3/debug.h
+++ b/drivers/usb/cdns3/debug.h
@@ -140,7 +140,7 @@ static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep,
trb_per_sector = TRBS_PER_SEGMENT;
if (trb_per_sector > TRBS_PER_SEGMENT) {
- sprintf(str + ret, "\t\tTo big transfer ring %d\n",
+ sprintf(str + ret, "\t\tTransfer ring %d too big\n",
trb_per_sector);
return str;
}
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 02f6ca2cb1ba..736b0c6e27fe 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -71,6 +71,23 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
struct usb_request *request,
gfp_t gfp_flags);
+static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request);
+
+static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request);
+
+/**
+ * cdns3_clear_register_bit - clear bit in given register.
+ * @ptr: address of device controller register to be read and changed
+ * @mask: bits requested to clar
+ */
+void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
+{
+ mask = readl(ptr) & ~mask;
+ writel(mask, ptr);
+}
+
/**
* cdns3_set_register_bit - set bit in given register.
* @ptr: address of device controller register to be read and changed
@@ -150,6 +167,21 @@ void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
writel(ep, &priv_dev->regs->ep_sel);
}
+/**
+ * cdns3_get_tdl - gets current tdl for selected endpoint.
+ * @priv_dev: extended gadget object
+ *
+ * Before calling this function the appropriate endpoint must
+ * be selected by means of cdns3_select_ep function.
+ */
+static int cdns3_get_tdl(struct cdns3_device *priv_dev)
+{
+ if (priv_dev->dev_ver < DEV_VER_V3)
+ return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+ else
+ return readl(&priv_dev->regs->ep_tdl);
+}
+
dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
struct cdns3_trb *trb)
{
@@ -166,7 +198,22 @@ int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
case USB_ENDPOINT_XFER_CONTROL:
return TRB_CTRL_RING_SIZE;
default:
- return TRB_RING_SIZE;
+ if (priv_ep->use_streams)
+ return TRB_STREAM_RING_SIZE;
+ else
+ return TRB_RING_SIZE;
+ }
+}
+
+static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (priv_ep->trb_pool) {
+ dma_free_coherent(priv_dev->sysdev,
+ cdns3_ring_size(priv_ep),
+ priv_ep->trb_pool, priv_ep->trb_pool_dma);
+ priv_ep->trb_pool = NULL;
}
}
@@ -180,8 +227,12 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
int ring_size = cdns3_ring_size(priv_ep);
+ int num_trbs = ring_size / TRB_SIZE;
struct cdns3_trb *link_trb;
+ if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size)
+ cdns3_free_trb_pool(priv_ep);
+
if (!priv_ep->trb_pool) {
priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev,
ring_size,
@@ -189,32 +240,30 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
GFP_DMA32 | GFP_ATOMIC);
if (!priv_ep->trb_pool)
return -ENOMEM;
- } else {
+
+ priv_ep->alloc_ring_size = ring_size;
memset(priv_ep->trb_pool, 0, ring_size);
}
+ priv_ep->num_trbs = num_trbs;
+
if (!priv_ep->num)
return 0;
- priv_ep->num_trbs = ring_size / TRB_SIZE;
- /* Initialize the last TRB as Link TRB. */
+ /* Initialize the last TRB as Link TRB */
link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
- link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
- link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
-
- return 0;
-}
-static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
-{
- struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
-
- if (priv_ep->trb_pool) {
- dma_free_coherent(priv_dev->sysdev,
- cdns3_ring_size(priv_ep),
- priv_ep->trb_pool, priv_ep->trb_pool_dma);
- priv_ep->trb_pool = NULL;
+ if (priv_ep->use_streams) {
+ /*
+ * For stream capable endpoints driver use single correct TRB.
+ * The last trb has zeroed cycle bit
+ */
+ link_trb->control = 0;
+ } else {
+ link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
+ link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
}
+ return 0;
}
/**
@@ -253,6 +302,7 @@ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
priv_dev->onchip_used_size = 0;
priv_dev->out_mem_is_allocated = 0;
priv_dev->wait_for_setup = 0;
+ priv_dev->using_streams = 0;
}
/**
@@ -356,17 +406,43 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
{
struct usb_request *request;
int ret = 0;
+ u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+
+ /*
+ * If the last pending transfer is INTERNAL
+ * OR streams are enabled for this endpoint
+ * do NOT start new transfer till the last one is pending
+ */
+ if (!pending_empty) {
+ struct cdns3_request *priv_req;
+
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ priv_req = to_cdns3_request(request);
+ if ((priv_req->flags & REQUEST_INTERNAL) ||
+ (priv_ep->flags & EP_TDLCHK_EN) ||
+ priv_ep->use_streams) {
+ trace_printk("Blocking external request\n");
+ return ret;
+ }
+ }
while (!list_empty(&priv_ep->deferred_req_list)) {
request = cdns3_next_request(&priv_ep->deferred_req_list);
- ret = cdns3_ep_run_transfer(priv_ep, request);
+ if (!priv_ep->use_streams) {
+ ret = cdns3_ep_run_transfer(priv_ep, request);
+ } else {
+ priv_ep->stream_sg_idx = 0;
+ ret = cdns3_ep_run_stream_transfer(priv_ep, request);
+ }
if (ret)
return ret;
list_del(&request->list);
list_add_tail(&request->list,
&priv_ep->pending_req_list);
+ if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN))
+ break;
}
priv_ep->flags &= ~EP_RING_FULL;
@@ -379,7 +455,7 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
* buffer for unblocking on-chip FIFO buffer. This flag will be cleared
* if before first DESCMISS interrupt the DMA will be armed.
*/
-#define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \
+#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \
if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
(reg) |= EP_STS_EN_DESCMISEN; \
@@ -450,10 +526,17 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
if (!req)
return NULL;
+ /* unmap the gadget request before copying data */
+ usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req,
+ priv_ep->dir);
+
cdns3_wa2_descmiss_copy_data(priv_ep, req);
if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
req->length != req->actual) {
/* wait for next part of transfer */
+ /* re-map the gadget request buffer*/
+ usb_gadget_map_request_by_dev(priv_dev->sysdev, req,
+ usb_endpoint_dir_in(priv_ep->endpoint.desc));
return NULL;
}
@@ -570,6 +653,13 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
{
struct cdns3_request *priv_req;
struct usb_request *request;
+ u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+
+ /* check for pending transfer */
+ if (!pending_empty) {
+ trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n");
+ return;
+ }
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
@@ -578,8 +668,10 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
- if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS)
+ if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) {
+ trace_cdns3_wa2(priv_ep, "WA2 overflow\n");
cdns3_wa2_remove_old_request(priv_ep);
+ }
request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
GFP_ATOMIC);
@@ -621,6 +713,78 @@ err:
"Failed: No sufficient memory for DESCMIS\n");
}
+static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev)
+{
+ u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+
+ if (tdl) {
+ u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl;
+
+ writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ }
+}
+
+static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev)
+{
+ u32 ep_sts_reg;
+
+ /* select EP0-out */
+ cdns3_select_ep(priv_dev, 0);
+
+ ep_sts_reg = readl(&priv_dev->regs->ep_sts);
+
+ if (EP_STS_OUTQ_VAL(ep_sts_reg)) {
+ u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg);
+ struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num];
+
+ if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) &&
+ outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) {
+ u8 pending_empty = list_empty(&outq_ep->pending_req_list);
+
+ if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) ||
+ (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) ||
+ !pending_empty) {
+ } else {
+ u32 ep_sts_en_reg;
+ u32 ep_cmd_reg;
+
+ cdns3_select_ep(priv_dev, outq_ep->num |
+ outq_ep->dir);
+ ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en);
+ ep_cmd_reg = readl(&priv_dev->regs->ep_cmd);
+
+ outq_ep->flags |= EP_TDLCHK_EN;
+ cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
+ EP_CFG_TDL_CHK);
+
+ cdns3_wa2_enable_detection(priv_dev, outq_ep,
+ ep_sts_en_reg);
+ writel(ep_sts_en_reg,
+ &priv_dev->regs->ep_sts_en);
+ /* reset tdl value to zero */
+ cdns3_wa2_reset_tdl(priv_dev);
+ /*
+ * Memory barrier - Reset tdl before ringing the
+ * doorbell.
+ */
+ wmb();
+ if (EP_CMD_DRDY & ep_cmd_reg) {
+ trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n");
+
+ } else {
+ trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n");
+ /*
+ * ring doorbell to generate DESCMIS irq
+ */
+ writel(EP_CMD_DRDY,
+ &priv_dev->regs->ep_cmd);
+ }
+ }
+ }
+ }
+}
+
/**
* cdns3_gadget_giveback - call struct usb_request's ->complete callback
* @priv_ep: The endpoint to whom the request belongs to
@@ -807,14 +971,120 @@ static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
cdns3_wa1_restore_cycle_bit(priv_ep);
}
+static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_request *priv_req;
+ struct cdns3_trb *trb;
+ dma_addr_t trb_dma;
+ int address;
+ u32 control;
+ u32 length;
+ u32 tdl;
+ unsigned int sg_idx = priv_ep->stream_sg_idx;
+
+ priv_req = to_cdns3_request(request);
+ address = priv_ep->endpoint.desc->bEndpointAddress;
+
+ priv_ep->flags |= EP_PENDING_REQUEST;
+
+ /* must allocate buffer aligned to 8 */
+ if (priv_req->flags & REQUEST_UNALIGNED)
+ trb_dma = priv_req->aligned_buf->dma;
+ else
+ trb_dma = request->dma;
+
+ /* For stream capable endpoints driver use only single TD. */
+ trb = priv_ep->trb_pool + priv_ep->enqueue;
+ priv_req->start_trb = priv_ep->enqueue;
+ priv_req->end_trb = priv_req->start_trb;
+ priv_req->trb = trb;
+
+ cdns3_select_ep(priv_ep->cdns3_dev, address);
+
+ control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE |
+ TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
+
+ if (!request->num_sgs) {
+ trb->buffer = TRB_BUFFER(trb_dma);
+ length = request->length;
+ } else {
+ trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address);
+ length = request->sg[sg_idx].length;
+ }
+
+ tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
+
+ trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) |
+ TRB_LEN(length);
+
+ /*
+ * For DEV_VER_V2 controller version we have enabled
+ * USB_CONF2_EN_TDL_TRB in DMULT configuration.
+ * This enables TDL calculation based on TRB, hence setting TDL in TRB.
+ */
+ if (priv_dev->dev_ver >= DEV_VER_V2) {
+ if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+ trb->length |= TRB_TDL_SS_SIZE(tdl);
+ }
+ priv_req->flags |= REQUEST_PENDING;
+
+ trb->control = control;
+
+ trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
+
+ /*
+ * Memory barrier - Cycle Bit must be set before trb->length and
+ * trb->buffer fields.
+ */
+ wmb();
+
+ /* always first element */
+ writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma),
+ &priv_dev->regs->ep_traddr);
+
+ if (!(priv_ep->flags & EP_STALLED)) {
+ trace_cdns3_ring(priv_ep);
+ /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
+ writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
+
+ priv_ep->prime_flag = false;
+
+ /*
+ * Controller version DEV_VER_V2 tdl calculation
+ * is based on TRB
+ */
+
+ if (priv_dev->dev_ver < DEV_VER_V2)
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ else if (priv_dev->dev_ver > DEV_VER_V2)
+ writel(tdl, &priv_dev->regs->ep_tdl);
+
+ priv_ep->last_stream_id = priv_req->request.stream_id;
+ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+ writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) |
+ EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
+
+ trace_cdns3_doorbell_epx(priv_ep->name,
+ readl(&priv_dev->regs->ep_traddr));
+ }
+
+ /* WORKAROUND for transition to L0 */
+ __cdns3_gadget_wakeup(priv_dev);
+
+ return 0;
+}
+
/**
* cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
* @priv_ep: endpoint object
*
* Returns zero on success or negative value on failure
*/
-int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
- struct usb_request *request)
+static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_request *priv_req;
@@ -826,6 +1096,7 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
int address;
u32 control;
int pcs;
+ u16 total_tdl = 0;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
num_trb = priv_ep->interval;
@@ -910,6 +1181,9 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
if (likely(priv_dev->dev_ver >= DEV_VER_V2))
td_size = DIV_ROUND_UP(length,
priv_ep->endpoint.maxpacket);
+ else if (priv_ep->flags & EP_TDLCHK_EN)
+ total_tdl += DIV_ROUND_UP(length,
+ priv_ep->endpoint.maxpacket);
trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
TRB_LEN(length);
@@ -954,6 +1228,23 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
if (sg_iter == 1)
trb->control |= TRB_IOC | TRB_ISP;
+ if (priv_dev->dev_ver < DEV_VER_V2 &&
+ (priv_ep->flags & EP_TDLCHK_EN)) {
+ u16 tdl = total_tdl;
+ u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+
+ if (tdl > EP_CMD_TDL_MAX) {
+ tdl = EP_CMD_TDL_MAX;
+ priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX;
+ }
+
+ if (old_tdl < tdl) {
+ tdl -= old_tdl;
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ }
+ }
+
/*
* Memory barrier - cycle bit must be set before other filds in trb.
*/
@@ -1153,29 +1444,56 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
cdns3_move_deq_to_next_trb(priv_req);
}
- /* Re-select endpoint. It could be changed by other CPU during
- * handling usb_gadget_giveback_request.
- */
- cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+ if (!request->stream_id) {
+ /* Re-select endpoint. It could be changed by other CPU
+ * during handling usb_gadget_giveback_request.
+ */
+ cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
- if (!cdns3_request_handled(priv_ep, priv_req))
- goto prepare_next_td;
+ if (!cdns3_request_handled(priv_ep, priv_req))
+ goto prepare_next_td;
- trb = priv_ep->trb_pool + priv_ep->dequeue;
- trace_cdns3_complete_trb(priv_ep, trb);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+ trace_cdns3_complete_trb(priv_ep, trb);
+
+ if (trb != priv_req->trb)
+ dev_warn(priv_dev->dev,
+ "request_trb=0x%p, queue_trb=0x%p\n",
+ priv_req->trb, trb);
+
+ request->actual = TRB_LEN(le32_to_cpu(trb->length));
+ cdns3_move_deq_to_next_trb(priv_req);
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
+ TRBS_PER_SEGMENT == 2)
+ break;
+ } else {
+ /* Re-select endpoint. It could be changed by other CPU
+ * during handling usb_gadget_giveback_request.
+ */
+ cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+
+ trb = priv_ep->trb_pool;
+ trace_cdns3_complete_trb(priv_ep, trb);
- if (trb != priv_req->trb)
- dev_warn(priv_dev->dev,
- "request_trb=0x%p, queue_trb=0x%p\n",
- priv_req->trb, trb);
+ if (trb != priv_req->trb)
+ dev_warn(priv_dev->dev,
+ "request_trb=0x%p, queue_trb=0x%p\n",
+ priv_req->trb, trb);
- request->actual = TRB_LEN(le32_to_cpu(trb->length));
- cdns3_move_deq_to_next_trb(priv_req);
- cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ request->actual += TRB_LEN(le32_to_cpu(trb->length));
- if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
- TRBS_PER_SEGMENT == 2)
+ if (!request->num_sgs ||
+ (request->num_sgs == (priv_ep->stream_sg_idx + 1))) {
+ priv_ep->stream_sg_idx = 0;
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ } else {
+ priv_ep->stream_sg_idx++;
+ cdns3_ep_run_stream_transfer(priv_ep, request);
+ }
break;
+ }
}
priv_ep->flags &= ~EP_PENDING_REQUEST;
@@ -1205,6 +1523,21 @@ void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
}
}
+static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep)
+{
+ u16 tdl = priv_ep->pending_tdl;
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (tdl > EP_CMD_TDL_MAX) {
+ tdl = EP_CMD_TDL_MAX;
+ priv_ep->pending_tdl -= EP_CMD_TDL_MAX;
+ } else {
+ priv_ep->pending_tdl = 0;
+ }
+
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd);
+}
+
/**
* cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
* @priv_ep: endpoint object
@@ -1215,6 +1548,9 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
u32 ep_sts_reg;
+ struct usb_request *deferred_request;
+ struct usb_request *pending_request;
+ u32 tdl = 0;
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
@@ -1223,6 +1559,36 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
ep_sts_reg = readl(&priv_dev->regs->ep_sts);
writel(ep_sts_reg, &priv_dev->regs->ep_sts);
+ if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) {
+ bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY);
+
+ tdl = cdns3_get_tdl(priv_dev);
+
+ /*
+ * Continue the previous transfer:
+ * There is some racing between ERDY and PRIME. The device send
+ * ERDY and almost in the same time Host send PRIME. It cause
+ * that host ignore the ERDY packet and driver has to send it
+ * again.
+ */
+ if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) |
+ EP_STS_HOSTPP(ep_sts_reg))) {
+ writel(EP_CMD_ERDY |
+ EP_CMD_ERDY_SID(priv_ep->last_stream_id),
+ &priv_dev->regs->ep_cmd);
+ ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC);
+ } else {
+ priv_ep->prime_flag = true;
+
+ pending_request = cdns3_next_request(&priv_ep->pending_req_list);
+ deferred_request = cdns3_next_request(&priv_ep->deferred_req_list);
+
+ if (deferred_request && !pending_request) {
+ cdns3_start_all_request(priv_dev, priv_ep);
+ }
+ }
+ }
+
if (ep_sts_reg & EP_STS_TRBERR) {
if (priv_ep->flags & EP_STALL_PENDING &&
!(ep_sts_reg & EP_STS_DESCMIS &&
@@ -1259,7 +1625,8 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
}
}
- if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
+ if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) ||
+ (ep_sts_reg & EP_STS_IOT)) {
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
if (ep_sts_reg & EP_STS_ISP)
priv_ep->flags |= EP_QUIRK_END_TRANSFER;
@@ -1267,6 +1634,29 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
}
+ if (!priv_ep->use_streams) {
+ if ((ep_sts_reg & EP_STS_IOC) ||
+ (ep_sts_reg & EP_STS_ISP)) {
+ cdns3_transfer_completed(priv_dev, priv_ep);
+ } else if ((priv_ep->flags & EP_TDLCHK_EN) &
+ priv_ep->pending_tdl) {
+ /* handle IOT with pending tdl */
+ cdns3_reprogram_tdl(priv_ep);
+ }
+ } else if (priv_ep->dir == USB_DIR_OUT) {
+ priv_ep->ep_sts_pending |= ep_sts_reg;
+ } else if (ep_sts_reg & EP_STS_IOT) {
+ cdns3_transfer_completed(priv_dev, priv_ep);
+ }
+ }
+
+ /*
+ * MD_EXIT interrupt sets when stream capable endpoint exits
+ * from MOVE DATA state of Bulk IN/OUT stream protocol state machine
+ */
+ if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) &&
+ (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) {
+ priv_ep->ep_sts_pending = 0;
cdns3_transfer_completed(priv_dev, priv_ep);
}
@@ -1274,7 +1664,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
* WA2: this condition should only be meet when
* priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
* priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
- * In other cases this interrupt will be disabled/
+ * In other cases this interrupt will be disabled.
*/
if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
!(priv_ep->flags & EP_STALLED))
@@ -1457,6 +1847,9 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
ret = IRQ_HANDLED;
}
+ if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams)
+ cdns3_wa2_check_outq_status(priv_dev);
+
irqend:
writel(~0, &priv_dev->regs->ep_ien);
spin_unlock_irqrestore(&priv_dev->lock, flags);
@@ -1511,6 +1904,27 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
return 0;
}
+void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
+ return;
+
+ if (priv_dev->dev_ver >= DEV_VER_V3) {
+ u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
+
+ /*
+ * Stream capable endpoints are handled by using ep_tdl
+ * register. Other endpoints use TDL from TRB feature.
+ */
+ cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, mask);
+ }
+
+ /* Enable Stream Bit TDL chk and SID chk */
+ cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_STREAM_EN |
+ EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
+}
+
void cdns3_configure_dmult(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
@@ -1772,6 +2186,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
{
struct cdns3_endpoint *priv_ep;
struct cdns3_device *priv_dev;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
u32 reg = EP_STS_EN_TRBERREN;
u32 bEndpointAddress;
unsigned long flags;
@@ -1781,6 +2196,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
priv_ep = ep_to_cdns3_ep(ep);
priv_dev = priv_ep->cdns3_dev;
+ comp_desc = priv_ep->endpoint.comp_desc;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
@@ -1811,6 +2227,24 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
goto exit;
}
+ bEndpointAddress = priv_ep->num | priv_ep->dir;
+ cdns3_select_ep(priv_dev, bEndpointAddress);
+
+ if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+ /*
+ * Enable stream support (SS mode) related interrupts
+ * in EP_STS_EN Register
+ */
+ if (priv_dev->gadget.speed >= USB_SPEED_SUPER) {
+ reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN |
+ EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
+ EP_STS_EN_STREAMREN;
+ priv_ep->use_streams = true;
+ cdns3_stream_ep_reconfig(priv_dev, priv_ep);
+ priv_dev->using_streams |= true;
+ }
+ }
+
ret = cdns3_allocate_trb_pool(priv_ep);
if (ret)
@@ -1957,6 +2391,7 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
ep->desc = NULL;
priv_ep->flags &= ~EP_ENABLED;
+ priv_ep->use_streams = false;
spin_unlock_irqrestore(&priv_dev->lock, flags);
@@ -2005,13 +2440,21 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
list_add_tail(&request->list, &priv_ep->deferred_req_list);
/*
+ * For stream capable endpoint if prime irq flag is set then only start
+ * request.
* If hardware endpoint configuration has not been set yet then
* just queue request in deferred list. Transfer will be started in
* cdns3_set_hw_configuration.
*/
- if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) &&
- !(priv_ep->flags & EP_STALL_PENDING))
- cdns3_start_all_request(priv_dev, priv_ep);
+ if (!request->stream_id) {
+ if (priv_dev->hw_configured_flag &&
+ !(priv_ep->flags & EP_STALLED) &&
+ !(priv_ep->flags & EP_STALL_PENDING))
+ cdns3_start_all_request(priv_dev, priv_ep);
+ } else {
+ if (priv_dev->hw_configured_flag && priv_ep->prime_flag)
+ cdns3_start_all_request(priv_dev, priv_ep);
+ }
return 0;
}
@@ -2384,7 +2827,6 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
struct cdns3_endpoint *priv_ep;
u32 bEndpointAddress;
struct usb_ep *ep;
- int ret = 0;
int val;
priv_dev->gadget_driver = NULL;
@@ -2408,7 +2850,7 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
writel(0, &priv_dev->regs->usb_ien);
writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
- return ret;
+ return 0;
}
static const struct usb_gadget_ops cdns3_gadget_ops = {
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
index bc4024041ef2..f003a7801872 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/gadget.h
@@ -599,6 +599,7 @@ struct cdns3_usb_regs {
#define EP_CMD_TDL_MASK GENMASK(15, 9)
#define EP_CMD_TDL_SET(p) (((p) << 9) & EP_CMD_TDL_MASK)
#define EP_CMD_TDL_GET(p) (((p) & EP_CMD_TDL_MASK) >> 9)
+#define EP_CMD_TDL_MAX (EP_CMD_TDL_MASK >> 9)
/* ERDY Stream ID value (used in SS mode). */
#define EP_CMD_ERDY_SID_MASK GENMASK(31, 16)
@@ -969,10 +970,18 @@ struct cdns3_usb_regs {
#define ISO_MAX_INTERVAL 10
+#define MAX_TRB_LENGTH BIT(16)
+
#if TRBS_PER_SEGMENT < 2
#error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2."
#endif
+#define TRBS_PER_STREAM_SEGMENT 2
+
+#if TRBS_PER_STREAM_SEGMENT < 2
+#error "Incorrect TRBS_PER_STREAMS_SEGMENT. Minimal Transfer Ring size is 2."
+#endif
+
/*
*Only for ISOC endpoints - maximum number of TRBs is calculated as
* pow(2, bInterval-1) * number of usb requests. It is limitation made by
@@ -1000,6 +1009,7 @@ struct cdns3_trb {
#define TRB_SIZE (sizeof(struct cdns3_trb))
#define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT)
+#define TRB_STREAM_RING_SIZE (TRB_SIZE * TRBS_PER_STREAM_SEGMENT)
#define TRB_ISO_RING_SIZE (TRB_SIZE * TRBS_PER_ISOC_SEGMENT)
#define TRB_CTRL_RING_SIZE (TRB_SIZE * 2)
@@ -1078,7 +1088,7 @@ struct cdns3_trb {
#define CDNS3_ENDPOINTS_MAX_COUNT 32
#define CDNS3_EP_ZLP_BUF_SIZE 1024
-#define CDNS3_EP_BUF_SIZE 2 /* KB */
+#define CDNS3_EP_BUF_SIZE 4 /* KB */
#define CDNS3_EP_ISO_HS_MULT 3
#define CDNS3_EP_ISO_SS_BURST 3
#define CDNS3_MAX_NUM_DESCMISS_BUF 32
@@ -1109,6 +1119,7 @@ struct cdns3_device;
* @interval: interval between packets used for ISOC endpoint.
* @free_trbs: number of free TRBs in transfer ring
* @num_trbs: number of all TRBs in transfer ring
+ * @alloc_ring_size: size of the allocated TRB ring
* @pcs: producer cycle state
* @ccs: consumer cycle state
* @enqueue: enqueue index in transfer ring
@@ -1142,6 +1153,7 @@ struct cdns3_endpoint {
#define EP_QUIRK_END_TRANSFER BIT(11)
#define EP_QUIRK_EXTRA_BUF_DET BIT(12)
#define EP_QUIRK_EXTRA_BUF_EN BIT(13)
+#define EP_TDLCHK_EN BIT(15)
u32 flags;
struct cdns3_request *descmis_req;
@@ -1153,6 +1165,7 @@ struct cdns3_endpoint {
int free_trbs;
int num_trbs;
+ int alloc_ring_size;
u8 pcs;
u8 ccs;
int enqueue;
@@ -1163,6 +1176,14 @@ struct cdns3_endpoint {
struct cdns3_trb *wa1_trb;
unsigned int wa1_trb_index;
unsigned int wa1_cycle_bit:1;
+
+ /* Stream related */
+ unsigned int use_streams:1;
+ unsigned int prime_flag:1;
+ u32 ep_sts_pending;
+ u16 last_stream_id;
+ u16 pending_tdl;
+ unsigned int stream_sg_idx;
};
/**
@@ -1290,6 +1311,7 @@ struct cdns3_device {
int hw_configured_flag:1;
int wake_up_flag:1;
unsigned status_completion_no_call:1;
+ unsigned using_streams:1;
int out_mem_is_allocated;
struct work_struct pending_status_wq;
@@ -1310,8 +1332,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev);
void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep);
void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable);
struct usb_request *cdns3_next_request(struct list_head *list);
-int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
- struct usb_request *request);
void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm);
int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep);
u8 cdns3_ep_addr_to_index(u8 ep_addr);
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
index e92348c9b4d7..8d121e207fd8 100644
--- a/drivers/usb/cdns3/trace.h
+++ b/drivers/usb/cdns3/trace.h
@@ -122,18 +122,24 @@ DECLARE_EVENT_CLASS(cdns3_log_epx_irq,
__string(ep_name, priv_ep->name)
__field(u32, ep_sts)
__field(u32, ep_traddr)
+ __field(u32, ep_last_sid)
+ __field(u32, use_streams)
__dynamic_array(char, str, CDNS3_MSG_MAX)
),
TP_fast_assign(
__assign_str(ep_name, priv_ep->name);
__entry->ep_sts = readl(&priv_dev->regs->ep_sts);
__entry->ep_traddr = readl(&priv_dev->regs->ep_traddr);
+ __entry->ep_last_sid = priv_ep->last_stream_id;
+ __entry->use_streams = priv_ep->use_streams;
),
- TP_printk("%s, ep_traddr: %08x",
+ TP_printk("%s, ep_traddr: %08x ep_last_sid: %08x use_streams: %d",
cdns3_decode_epx_irq(__get_str(str),
__get_str(ep_name),
__entry->ep_sts),
- __entry->ep_traddr)
+ __entry->ep_traddr,
+ __entry->ep_last_sid,
+ __entry->use_streams)
);
DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq,
@@ -210,6 +216,7 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__field(int, end_trb)
__field(struct cdns3_trb *, start_trb_addr)
__field(int, flags)
+ __field(unsigned int, stream_id)
),
TP_fast_assign(
__assign_str(name, req->priv_ep->name);
@@ -225,9 +232,10 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__entry->end_trb = req->end_trb;
__entry->start_trb_addr = req->trb;
__entry->flags = req->flags;
+ __entry->stream_id = req->request.stream_id;
),
TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d,"
- " trb: [start:%d, end:%d: virt addr %pa], flags:%x ",
+ " trb: [start:%d, end:%d: virt addr %pa], flags:%x SID: %u",
__get_str(name), __entry->req, __entry->buf, __entry->actual,
__entry->length,
__entry->zero ? "Z" : "z",
@@ -237,7 +245,8 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__entry->start_trb,
__entry->end_trb,
__entry->start_trb_addr,
- __entry->flags
+ __entry->flags,
+ __entry->stream_id
)
);
@@ -281,6 +290,39 @@ TRACE_EVENT(cdns3_ep0_queue,
__entry->length)
);
+DECLARE_EVENT_CLASS(cdns3_stream_split_transfer_len,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __string(name, req->priv_ep->name)
+ __field(struct cdns3_request *, req)
+ __field(unsigned int, length)
+ __field(unsigned int, actual)
+ __field(unsigned int, stream_id)
+ ),
+ TP_fast_assign(
+ __assign_str(name, req->priv_ep->name);
+ __entry->req = req;
+ __entry->actual = req->request.length;
+ __entry->length = req->request.actual;
+ __entry->stream_id = req->request.stream_id;
+ ),
+ TP_printk("%s: req: %p,request length: %u actual length: %u SID: %u",
+ __get_str(name), __entry->req, __entry->length,
+ __entry->actual, __entry->stream_id)
+);
+
+DEFINE_EVENT(cdns3_stream_split_transfer_len, cdns3_stream_transfer_split,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_stream_split_transfer_len,
+ cdns3_stream_transfer_split_next_part,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
DECLARE_EVENT_CLASS(cdns3_log_aligned_request,
TP_PROTO(struct cdns3_request *priv_req),
TP_ARGS(priv_req),
@@ -319,6 +361,34 @@ DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request,
TP_ARGS(req)
);
+DECLARE_EVENT_CLASS(cdns3_log_map_request,
+ TP_PROTO(struct cdns3_request *priv_req),
+ TP_ARGS(priv_req),
+ TP_STRUCT__entry(
+ __string(name, priv_req->priv_ep->name)
+ __field(struct usb_request *, req)
+ __field(void *, buf)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __assign_str(name, priv_req->priv_ep->name);
+ __entry->req = &priv_req->request;
+ __entry->buf = priv_req->request.buf;
+ __entry->dma = priv_req->request.dma;
+ ),
+ TP_printk("%s: req: %p, req buf %p, dma %p",
+ __get_str(name), __entry->req, __entry->buf, &__entry->dma
+ )
+);
+DEFINE_EVENT(cdns3_log_map_request, cdns3_map_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+DEFINE_EVENT(cdns3_log_map_request, cdns3_mapped_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
DECLARE_EVENT_CLASS(cdns3_log_trb,
TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb),
TP_ARGS(priv_ep, trb),
@@ -329,6 +399,7 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__field(u32, length)
__field(u32, control)
__field(u32, type)
+ __field(unsigned int, last_stream_id)
),
TP_fast_assign(
__assign_str(name, priv_ep->name);
@@ -337,8 +408,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__entry->length = trb->length;
__entry->control = trb->control;
__entry->type = usb_endpoint_type(priv_ep->endpoint.desc);
+ __entry->last_stream_id = priv_ep->last_stream_id;
),
- TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)",
+ TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u",
__get_str(name), __entry->trb, __entry->buffer,
TRB_LEN(__entry->length),
(u8)TRB_BURST_LEN_GET(__entry->length),
@@ -349,7 +421,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__entry->control & TRB_FIFO_MODE ? "FIFO, " : "",
__entry->control & TRB_CHAIN ? "CHAIN, " : "",
__entry->control & TRB_IOC ? "IOC, " : "",
- TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK"
+ TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK",
+ TRB_FIELD_TO_STREAMID(__entry->control),
+ __entry->last_stream_id
)
);
@@ -398,6 +472,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep,
__field(unsigned int, maxpacket)
__field(unsigned int, maxpacket_limit)
__field(unsigned int, max_streams)
+ __field(unsigned int, use_streams)
__field(unsigned int, maxburst)
__field(unsigned int, flags)
__field(unsigned int, dir)
@@ -409,16 +484,18 @@ DECLARE_EVENT_CLASS(cdns3_log_ep,
__entry->maxpacket = priv_ep->endpoint.maxpacket;
__entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit;
__entry->max_streams = priv_ep->endpoint.max_streams;
+ __entry->use_streams = priv_ep->use_streams;
__entry->maxburst = priv_ep->endpoint.maxburst;
__entry->flags = priv_ep->flags;
__entry->dir = priv_ep->dir;
__entry->enqueue = priv_ep->enqueue;
__entry->dequeue = priv_ep->dequeue;
),
- TP_printk("%s: mps: %d/%d. streams: %d, burst: %d, enq idx: %d, "
- "deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
+ TP_printk("%s: mps: %d/%d. streams: %d, stream enable: %d, burst: %d, "
+ "enq idx: %d, deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
__get_str(name), __entry->maxpacket,
__entry->maxpacket_limit, __entry->max_streams,
+ __entry->use_streams,
__entry->maxburst, __entry->enqueue,
__entry->dequeue,
__entry->flags & EP_ENABLED ? "EN | " : "",
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index ae850b3fddf2..d53db520e209 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -7,6 +7,7 @@ config USB_CHIPIDEA
select RESET_CONTROLLER
select USB_ULPI_BUS
select USB_ROLE_SWITCH
+ select USB_TEGRA_PHY if ARCH_TEGRA
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. It supports:
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6911aef500e9..d49d5e1235d0 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -302,6 +302,16 @@ static inline enum usb_role ci_role_to_usb_role(struct ci_hdrc *ci)
return USB_ROLE_NONE;
}
+static inline enum ci_role usb_role_to_ci_role(enum usb_role role)
+{
+ if (role == USB_ROLE_HOST)
+ return CI_ROLE_HOST;
+ else if (role == USB_ROLE_DEVICE)
+ return CI_ROLE_GADGET;
+ else
+ return CI_ROLE_END;
+}
+
/**
* hw_read_id_reg: reads from a identification register
* @ci: the controller
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 0c9911d44ee5..7455df0ede49 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -83,13 +83,6 @@ static int tegra_udc_probe(struct platform_device *pdev)
return err;
}
- /*
- * Tegra's USB PHY driver doesn't implement optional phy_init()
- * hook, so we have to power on UDC controller before ChipIdea
- * driver initialization kicks in.
- */
- usb_phy_set_suspend(udc->phy, 0);
-
/* setup and register ChipIdea HDRC device */
udc->data.name = "tegra-udc";
udc->data.flags = soc->flags;
@@ -109,7 +102,6 @@ static int tegra_udc_probe(struct platform_device *pdev)
return 0;
fail_power_off:
- usb_phy_set_suspend(udc->phy, 1);
clk_disable_unprepare(udc->clk);
return err;
}
@@ -119,7 +111,6 @@ static int tegra_udc_remove(struct platform_device *pdev)
struct tegra_udc *udc = platform_get_drvdata(pdev);
ci_hdrc_remove_device(udc->dev);
- usb_phy_set_suspend(udc->phy, 1);
clk_disable_unprepare(udc->clk);
return 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index dce5db41501c..52139c2a9924 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -618,9 +618,11 @@ static int ci_usb_role_switch_set(struct device *dev, enum usb_role role)
struct ci_hdrc *ci = dev_get_drvdata(dev);
struct ci_hdrc_cable *cable = NULL;
enum usb_role current_role = ci_role_to_usb_role(ci);
+ enum ci_role ci_role = usb_role_to_ci_role(role);
unsigned long flags;
- if (current_role == role)
+ if ((ci_role != CI_ROLE_END && !ci->roles[ci_role]) ||
+ (current_role == role))
return 0;
pm_runtime_get_sync(ci->dev);
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 70112cf0f195..2625aa01a911 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -20,7 +20,7 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
}
-static void ci_hdrc_host_driver_init(void)
+static inline void ci_hdrc_host_driver_init(void)
{
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 12bb5722b420..6833c918abce 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -574,7 +574,7 @@ __acquires(ps->lock)
/* Now carefully unlink all the marked pending URBs */
rescan:
- list_for_each_entry(as, &ps->async_pending, asynclist) {
+ list_for_each_entry_reverse(as, &ps->async_pending, asynclist) {
if (as->bulk_status == AS_UNLINK) {
as->bulk_status = 0; /* Only once */
urb = as->urb;
@@ -636,7 +636,7 @@ static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
spin_lock_irqsave(&ps->lock, flags);
while (!list_empty(list)) {
- as = list_entry(list->next, struct async, asynclist);
+ as = list_last_entry(list, struct async, asynclist);
list_del_init(&as->asynclist);
urb = as->urb;
usb_get_urb(urb);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 9ae2a7a93df2..f0a259937da8 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -222,7 +222,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
retval = -EBUSY;
goto put_hcd;
}
- hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+ hcd->regs = devm_ioremap(&dev->dev, hcd->rsrc_start,
hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&dev->dev, "error mapping memory\n");
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 8c4e5adbf820..3405b146edc9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1192,6 +1192,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
* PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+ (portchange & USB_PORT_STAT_C_CONNECTION) ||
(portstatus & USB_PORT_STAT_OVERCURRENT) ||
(portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 6af6add3d4c0..876ff31261d5 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -288,14 +288,9 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
/*
* Need to schedule a work, as there are possible DELAY function calls.
- * Release lock before scheduling workq as it holds spinlock during
- * scheduling.
*/
- if (hsotg->wq_otg) {
- spin_unlock(&hsotg->lock);
+ if (hsotg->wq_otg)
queue_work(hsotg->wq_otg, &hsotg->wf_otg);
- spin_lock(&hsotg->lock);
- }
}
/**
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index b8f2790abf91..3a0dcbfbc827 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -183,6 +183,7 @@ DEFINE_SHOW_ATTRIBUTE(state);
static int fifo_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
+ int fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
u32 val;
int idx;
@@ -196,7 +197,7 @@ static int fifo_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
- for (idx = 1; idx < hsotg->num_of_eps; idx++) {
+ for (idx = 1; idx <= fifo_count; idx++) {
val = dwc2_readl(hsotg, DPTXFSIZN(idx));
seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 6be10e496e10..88f7d6d4ff2d 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3784,15 +3784,26 @@ irq_retry:
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
- if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
+ if (BIT(idx) & ~daintmsk)
continue;
epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
- if (epctrl & DXEPCTL_EPENA) {
+ //ISOC Ep's only
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
epctrl |= DXEPCTL_SNAK;
epctrl |= DXEPCTL_EPDIS;
dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
+ continue;
+ }
+
+ //Non-ISOC EP's
+ if (hs_ep->halted) {
+ if (!(epctrl & DXEPCTL_EPENA))
+ epctrl |= DXEPCTL_EPENA;
+ epctrl |= DXEPCTL_EPDIS;
+ epctrl |= DXEPCTL_STALL;
+ dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
}
}
@@ -4056,11 +4067,12 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
* a unique tx-fifo even if it is non-periodic.
*/
if (dir_in && hsotg->dedicated_fifos) {
+ unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
u32 fifo_index = 0;
u32 fifo_size = UINT_MAX;
size = hs_ep->ep.maxpacket * hs_ep->mc;
- for (i = 1; i < hsotg->num_of_eps; ++i) {
+ for (i = 1; i <= fifo_count; ++i) {
if (hsotg->fifo_map & (1 << i))
continue;
val = dwc2_readl(hsotg, DPTXFSIZN(i));
@@ -4310,19 +4322,20 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
epctl = dwc2_readl(hs, epreg);
if (value) {
- epctl |= DXEPCTL_STALL;
+ if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
+ dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
+ // STALL bit will be set in GOUTNAKEFF interrupt handler
} else {
epctl &= ~DXEPCTL_STALL;
xfertype = epctl & DXEPCTL_EPTYPE_MASK;
if (xfertype == DXEPCTL_EPTYPE_BULK ||
xfertype == DXEPCTL_EPTYPE_INTERRUPT)
epctl |= DXEPCTL_SETD0PID;
+ dwc2_writel(hs, epctl, epreg);
}
- dwc2_writel(hs, epctl, epreg);
}
hs_ep->halted = value;
-
return 0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 81afe553aa66..b90f858af960 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2824,7 +2824,7 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
list_move_tail(&chan->split_order_list_entry,
&hsotg->split_order);
- if (hsotg->params.host_dma) {
+ if (hsotg->params.host_dma && chan->qh) {
if (hsotg->params.dma_desc_enable) {
if (!chan->xfer_started ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f561c6c9e8a9..1d85c42b9c67 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1246,6 +1246,9 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
/* do nothing */
break;
}
+
+ /* de-assert DRVVBUS for HOST and OTG mode */
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
}
static void dwc3_get_properties(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1c8b349379af..77c4a9abe365 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -688,7 +688,9 @@ struct dwc3_ep {
#define DWC3_EP_STALL BIT(1)
#define DWC3_EP_WEDGE BIT(2)
#define DWC3_EP_TRANSFER_STARTED BIT(3)
+#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
#define DWC3_EP_PENDING_REQUEST BIT(5)
+#define DWC3_EP_DELAY_START BIT(6)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index c1e9ea621f41..90bb022737da 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/**
- * dwc3-exynos.c - Samsung EXYNOS DWC3 Specific Glue layer
+ * dwc3-exynos.c - Samsung Exynos DWC3 Specific Glue layer
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -255,4 +255,4 @@ module_platform_driver(dwc3_exynos_driver);
MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer");
+MODULE_DESCRIPTION("DesignWare USB3 Exynos Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 294276f7deb9..7051611229c9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,7 @@
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
+#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0
#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
#define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
@@ -342,6 +343,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index fd1b100d2927..6dee4dabc0a4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1136,8 +1136,10 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
case DWC3_DEPEVT_EPCMDCMPLT:
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
- if (cmd == DWC3_DEPCMD_ENDTRANSFER)
+ if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ }
break;
}
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 154f3f3e8cff..1b8014ab0b25 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -57,7 +57,7 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
return -EINVAL;
}
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
return 0;
}
@@ -111,6 +111,9 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ /* set no action before sending new link state change */
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
/* set requested state */
reg |= DWC3_DCTL_ULSTCHNGREQ(state);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
@@ -1447,6 +1450,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
req->status = DWC3_REQUEST_STATUS_QUEUED;
+ /* Start the transfer only after the END_TRANSFER is completed */
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
+ dep->flags |= DWC3_EP_DELAY_START;
+ return 0;
+ }
+
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
@@ -1828,7 +1837,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc->pullups_connected = false;
}
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
do {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
@@ -2625,8 +2634,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+ if ((dep->flags & DWC3_EP_DELAY_START) &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
+
+ dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
case DWC3_DEPEVT_STREAMEVT:
@@ -2678,12 +2693,12 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{
- struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
+ (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
/*
@@ -2693,16 +2708,13 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* much trouble synchronizing between us and gadget driver.
*
* We have discussed this with the IP Provider and it was
- * suggested to giveback all requests here, but give HW some
- * extra time to synchronize with the interconnect. We're using
- * an arbitrary 100us delay for that.
+ * suggested to giveback all requests here.
*
* Note also that a similar handling was tested by Synopsys
* (thanks a lot Paul) and nothing bad has come out of it.
- * In short, what we're doing is:
- *
- * - Issue EndTransfer WITH CMDIOC bit set
- * - Wait 100us
+ * In short, what we're doing is issuing EndTransfer with
+ * CMDIOC bit set and delay kicking transfer until the
+ * EndTransfer command had completed.
*
* As of IP version 3.10a of the DWC_usb3 IP, the controller
* supports a mode to work around the above limitation. The
@@ -2711,8 +2723,7 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* by writing GUCTL2[14]. This polling is already done in the
* dwc3_send_gadget_ep_cmd() function so if the mode is
* enabled, the EndTransfer command will have completed upon
- * returning from this function and we don't need to delay for
- * 100us.
+ * returning from this function.
*
* This mode is NOT available on the DWC_usb31 IP.
*/
@@ -2728,9 +2739,8 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
-
- if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
- udelay(100);
+ else
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
}
static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
@@ -2759,12 +2769,12 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
{
int reg;
+ dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_INITU1ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
reg &= ~DWC3_DCTL_INITU2ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
dwc3_disconnect_gadget(dwc);
@@ -2816,7 +2826,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
dwc->test_mode = false;
dwc3_clear_stall_all_ep(dwc);
@@ -2920,11 +2930,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
} else {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
}
dep = dwc->eps[0];
@@ -3033,7 +3043,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
reg &= ~u1u2;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
break;
default:
/* do nothing */
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 5faf4d1249e0..fbc7d8013f0b 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -127,4 +127,18 @@ static inline void dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
dep->resource_index = DWC3_DEPCMD_GET_RSC_IDX(res_id);
}
+/**
+ * dwc3_gadget_dctl_write_safe - write to DCTL safe from link state change
+ * @dwc: pointer to our context structure
+ * @value: value to write to DCTL
+ *
+ * Use this function when doing read-modify-write to DCTL. It will not
+ * send link state change request.
+ */
+static inline void dwc3_gadget_dctl_write_safe(struct dwc3 *dwc, u32 value)
+{
+ value &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, value);
+}
+
#endif /* __DRIVERS_USB_DWC3_GADGET_H */
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 5567ed2cddbe..fa252870c926 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc)
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
- props[prop_idx++].name = "usb3-lpm-capable";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
if (dwc->usb2_lpm_disable)
- props[prop_idx++].name = "usb2-lpm-disable";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
@@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc)
* This following flag tells XHCI to do just that.
*/
if (dwc->revision <= DWC3_REVISION_300A)
- props[prop_idx++].name = "quirk-broken-port-ped";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
ret = platform_device_add_properties(xhci, props);
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index cac991173ac0..971c6b92484a 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -971,7 +971,7 @@ static int __init xdbc_init(void)
goto free_and_quit;
}
- base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length);
+ base = ioremap(xdbc.xhci_start, xdbc.xhci_length);
if (!base) {
xdbc_trace("failed to remap the io address\n");
ret = -ENOMEM;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 02ff850278b1..c6db0a0a340c 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -483,34 +483,6 @@ config USB_CONFIGFS_F_TCM
Both protocols can work on USB2.0 and USB3.0.
UAS utilizes the USB 3.0 feature called streams support.
-choice
- tristate "USB Gadget precomposed configurations"
- default USB_ETH
- optional
- help
- A Linux "Gadget Driver" talks to the USB Peripheral Controller
- driver through the abstract "gadget" API. Some other operating
- systems call these "client" drivers, of which "class drivers"
- are a subset (implementing a USB device class specification).
- A gadget driver implements one or more USB functions using
- the peripheral hardware.
-
- Gadget drivers are hardware-neutral, or "platform independent",
- except that they sometimes must understand quirks or limitations
- of the particular controllers they work with. For example, when
- a controller doesn't support alternate configurations or provide
- enough of the right types of endpoints, the gadget driver might
- not be able work with that controller, or might need to implement
- a less common variant of a device class protocol.
-
- The available choices each represent a single precomposed USB
- gadget configuration. In the device model, each option contains
- both the device instantiation as a child for a USB gadget
- controller, and the relevant drivers for each function declared
- by the device.
-
source "drivers/usb/gadget/legacy/Kconfig"
-endchoice
-
endif # USB_GADGET
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index ab9ac48a751a..32b637e3e1fa 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -293,6 +293,47 @@ err:
return ret;
}
+static ssize_t gadget_dev_desc_max_speed_show(struct config_item *item,
+ char *page)
+{
+ enum usb_device_speed speed = to_gadget_info(item)->composite.max_speed;
+
+ return sprintf(page, "%s\n", usb_speed_string(speed));
+}
+
+static ssize_t gadget_dev_desc_max_speed_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = to_gadget_info(item);
+
+ mutex_lock(&gi->lock);
+
+ /* Prevent changing of max_speed after the driver is binded */
+ if (gi->composite.gadget_driver.udc_name)
+ goto err;
+
+ if (strncmp(page, "super-speed-plus", 16) == 0)
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
+ else if (strncmp(page, "super-speed", 11) == 0)
+ gi->composite.max_speed = USB_SPEED_SUPER;
+ else if (strncmp(page, "high-speed", 10) == 0)
+ gi->composite.max_speed = USB_SPEED_HIGH;
+ else if (strncmp(page, "full-speed", 10) == 0)
+ gi->composite.max_speed = USB_SPEED_FULL;
+ else if (strncmp(page, "low-speed", 9) == 0)
+ gi->composite.max_speed = USB_SPEED_LOW;
+ else
+ goto err;
+
+ gi->composite.gadget_driver.max_speed = gi->composite.max_speed;
+
+ mutex_unlock(&gi->lock);
+ return len;
+err:
+ mutex_unlock(&gi->lock);
+ return -EINVAL;
+}
+
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceClass);
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceSubClass);
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceProtocol);
@@ -302,6 +343,7 @@ CONFIGFS_ATTR(gadget_dev_desc_, idProduct);
CONFIGFS_ATTR(gadget_dev_desc_, bcdDevice);
CONFIGFS_ATTR(gadget_dev_desc_, bcdUSB);
CONFIGFS_ATTR(gadget_dev_desc_, UDC);
+CONFIGFS_ATTR(gadget_dev_desc_, max_speed);
static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_bDeviceClass,
@@ -313,6 +355,7 @@ static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_bcdDevice,
&gadget_dev_desc_attr_bcdUSB,
&gadget_dev_desc_attr_UDC,
+ &gadget_dev_desc_attr_max_speed,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 460d5d7c984f..7f5cf488b2b1 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -52,6 +52,7 @@ struct f_ecm {
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
+ atomic_t notify_count;
bool is_open;
/* FIXME is_open needs some irq-ish locking
@@ -380,7 +381,7 @@ static void ecm_do_notify(struct f_ecm *ecm)
int status;
/* notification already in flight? */
- if (!req)
+ if (atomic_read(&ecm->notify_count))
return;
event = req->buf;
@@ -420,10 +421,10 @@ static void ecm_do_notify(struct f_ecm *ecm)
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ecm->ctrl_id);
- ecm->notify_req = NULL;
+ atomic_inc(&ecm->notify_count);
status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
if (status < 0) {
- ecm->notify_req = req;
+ atomic_dec(&ecm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
@@ -448,17 +449,19 @@ static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req)
switch (req->status) {
case 0:
/* no fault */
+ atomic_dec(&ecm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
+ atomic_set(&ecm->notify_count, 0);
ecm->notify_state = ECM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
+ atomic_dec(&ecm->notify_count);
break;
}
- ecm->notify_req = req;
ecm_do_notify(ecm);
}
@@ -907,6 +910,11 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
+ if (atomic_read(&ecm->notify_count)) {
+ usb_ep_dequeue(ecm->notify, ecm->notify_req);
+ atomic_set(&ecm->notify_count, 0);
+ }
+
kfree(ecm->notify_req->buf);
usb_ep_free_request(ecm->notify, ecm->notify_req);
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 0bbccac94d6c..6f8b67e61771 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1062,6 +1062,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
+ req->num_sgs = 0;
}
req->length = data_len;
@@ -1105,6 +1106,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
+ req->num_sgs = 0;
}
req->length = data_len;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 2d6e76e4cffa..1d900081b1f0 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -53,6 +53,7 @@ struct f_ncm {
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
+ atomic_t notify_count;
bool is_open;
const struct ndp_parser_opts *parser_opts;
@@ -547,7 +548,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
int status;
/* notification already in flight? */
- if (!req)
+ if (atomic_read(&ncm->notify_count))
return;
event = req->buf;
@@ -587,7 +588,8 @@ static void ncm_do_notify(struct f_ncm *ncm)
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ncm->ctrl_id);
- ncm->notify_req = NULL;
+ atomic_inc(&ncm->notify_count);
+
/*
* In double buffering if there is a space in FIFO,
* completion callback can be called right after the call,
@@ -597,7 +599,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
spin_lock(&ncm->lock);
if (status < 0) {
- ncm->notify_req = req;
+ atomic_dec(&ncm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
@@ -632,17 +634,19 @@ static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
case 0:
VDBG(cdev, "Notification %02x sent\n",
event->bNotificationType);
+ atomic_dec(&ncm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
+ atomic_set(&ncm->notify_count, 0);
ncm->notify_state = NCM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
+ atomic_dec(&ncm->notify_count);
break;
}
- ncm->notify_req = req;
ncm_do_notify(ncm);
spin_unlock(&ncm->lock);
}
@@ -1649,6 +1653,11 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
+ if (atomic_read(&ncm->notify_count)) {
+ usb_ep_dequeue(ncm->notify, ncm->notify_req);
+ atomic_set(&ncm->notify_count, 0);
+ }
+
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 7ec6a996af26..6d956f190f5a 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -239,18 +239,6 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, prm->hw_ptr);
}
-static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- return snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
-}
-
-static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
static int uac_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
@@ -326,9 +314,6 @@ static int uac_pcm_null(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops uac_pcm_ops = {
.open = uac_pcm_open,
.close = uac_pcm_null,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = uac_pcm_hw_params,
- .hw_free = uac_pcm_hw_free,
.trigger = uac_pcm_trigger,
.pointer = uac_pcm_pointer,
.prepare = uac_pcm_null,
@@ -422,7 +407,7 @@ int u_audio_start_playback(struct g_audio *audio_dev)
struct usb_ep *ep;
struct uac_rtd_params *prm;
struct uac_params *params = &audio_dev->params;
- unsigned int factor, rate;
+ unsigned int factor;
const struct usb_endpoint_descriptor *ep_desc;
int req_len, i;
@@ -441,13 +426,15 @@ int u_audio_start_playback(struct g_audio *audio_dev)
/* pre-compute some values for iso_complete() */
uac->p_framesize = params->p_ssize *
num_channels(params->p_chmask);
- rate = params->p_srate * uac->p_framesize;
uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
- uac->p_pktsize = min_t(unsigned int, rate / uac->p_interval,
+ uac->p_pktsize = min_t(unsigned int,
+ uac->p_framesize *
+ (params->p_srate / uac->p_interval),
prm->max_psize);
if (uac->p_pktsize < prm->max_psize)
- uac->p_pktsize_residue = rate % uac->p_interval;
+ uac->p_pktsize_residue = uac->p_framesize *
+ (params->p_srate % uac->p_interval);
else
uac->p_pktsize_residue = 0;
@@ -584,8 +571,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
strlcpy(card->shortname, card_name, sizeof(card->shortname));
sprintf(card->longname, "%s %i", card_name, card->dev->id);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- NULL, 0, BUFF_SIZE_MAX);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ NULL, 0, BUFF_SIZE_MAX);
err = snd_card_register(card);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 119a4e47681f..6e7e1a9202e6 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -14,6 +14,32 @@
# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
#
+choice
+ tristate "USB Gadget precomposed configurations"
+ default USB_ETH
+ optional
+ help
+ A Linux "Gadget Driver" talks to the USB Peripheral Controller
+ driver through the abstract "gadget" API. Some other operating
+ systems call these "client" drivers, of which "class drivers"
+ are a subset (implementing a USB device class specification).
+ A gadget driver implements one or more USB functions using
+ the peripheral hardware.
+
+ Gadget drivers are hardware-neutral, or "platform independent",
+ except that they sometimes must understand quirks or limitations
+ of the particular controllers they work with. For example, when
+ a controller doesn't support alternate configurations or provide
+ enough of the right types of endpoints, the gadget driver might
+ not be able work with that controller, or might need to implement
+ a less common variant of a device class protocol.
+
+ The available choices each represent a single precomposed USB
+ gadget configuration. In the device model, each option contains
+ both the device instantiation as a child for a USB gadget
+ controller, and the relevant drivers for each function declared
+ by the device.
+
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
select USB_LIBCOMPOSITE
@@ -489,3 +515,5 @@ config USB_G_WEBCAM
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "g_webcam".
+
+endchoice
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index da1c37933ca1..8d7a556ece30 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -225,7 +225,7 @@ static struct usb_composite_driver cdc_driver = {
.name = "g_cdc",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = cdc_bind,
.unbind = cdc_unbind,
};
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index b640ed3fcf70..ae6d8f7092b8 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -149,7 +149,7 @@ static struct usb_composite_driver gfs_driver = {
.name = DRIVER_NAME,
.dev = &gfs_dev_desc,
.strings = gfs_dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = gfs_bind,
.unbind = gfs_unbind,
};
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index 50515f9e1022..ec9749845660 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -482,7 +482,7 @@ static struct usb_composite_driver multi_driver = {
.name = "g_multi",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = multi_bind,
.unbind = multi_unbind,
.needs_serial = 1,
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 8465f081e921..c61e71ba7045 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -197,7 +197,7 @@ static struct usb_composite_driver ncm_driver = {
.name = "g_ncm",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = gncm_bind,
.unbind = gncm_unbind,
};
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 57b6f66331cf..bfd1c9e80a1f 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -116,7 +116,7 @@ static int udc_pci_probe(
goto err_memreg;
}
- dev->virt_addr = ioremap_nocache(resource, len);
+ dev->virt_addr = ioremap(resource, len);
if (!dev->virt_addr) {
dev_dbg(&pdev->dev, "start address cannot be mapped\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 8a42768e3213..6e0432141c40 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1122,7 +1122,7 @@ static struct usb_endpoint_descriptor usba_ep0_desc = {
.bInterval = 1,
};
-static struct usb_gadget usba_gadget_template = {
+static const struct usb_gadget usba_gadget_template = {
.ops = &usba_udc_ops,
.max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 51fa614b4079..9b11046480fe 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1414,6 +1414,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
}
mutex_unlock(&udc_lock);
+ if (ret)
+ pr_warn("udc-core: couldn't find an available UDC or it's busy\n");
return ret;
found:
ret = udc_bind_to_driver(udc, driver);
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index c3721225b61e..4a46f661d0e4 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1782,7 +1782,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->got_region = 1;
- base = ioremap_nocache(resource, len);
+ base = ioremap(resource, len);
if (base == NULL) {
DBG(dev, "can't map memory\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 64d80c65bb96..aaf975c809bf 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2175,8 +2175,6 @@ static int gr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- spin_lock(&dev->lock);
-
/* Inside lock so that no gadget can use this udc until probe is done */
retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
if (retval) {
@@ -2185,15 +2183,21 @@ static int gr_probe(struct platform_device *pdev)
}
dev->added = 1;
+ spin_lock(&dev->lock);
+
retval = gr_udc_init(dev);
- if (retval)
+ if (retval) {
+ spin_unlock(&dev->lock);
goto out;
-
- gr_dfs_create(dev);
+ }
/* Clear all interrupt enables that might be left on since last boot */
gr_disable_interrupts_and_pullup(dev);
+ spin_unlock(&dev->lock);
+
+ gr_dfs_create(dev);
+
retval = gr_request_irq(dev, dev->irq);
if (retval) {
dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
@@ -2222,8 +2226,6 @@ static int gr_probe(struct platform_device *pdev)
dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
out:
- spin_unlock(&dev->lock);
-
if (retval)
gr_remove(pdev);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 247de0faaeb7..a8273b589456 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2323,7 +2323,7 @@ net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
goto err;
}
- mem_mapped_addr[i] = ioremap_nocache(resource, len);
+ mem_mapped_addr[i] = ioremap(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
@@ -2401,7 +2401,7 @@ net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
goto err;
}
- mem_mapped_addr[i] = ioremap_nocache(resource, len);
+ mem_mapped_addr[i] = ioremap(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
@@ -2625,7 +2625,7 @@ net2272_plat_probe(struct platform_device *pdev)
ret = -EBUSY;
goto err;
}
- dev->base_addr = ioremap_nocache(base, len);
+ dev->base_addr = ioremap(base, len);
if (!dev->base_addr) {
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 51efee21915f..1fd1b9186e46 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -3659,7 +3659,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* 8051 code into the chip, e.g. to turn on PCI PM.
*/
- base = ioremap_nocache(resource, len);
+ base = ioremap(resource, len);
if (base == NULL) {
ep_dbg(dev, "can't map memory\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index f36f0730afab..bd12417996db 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2757,7 +2757,7 @@ static int omap_udc_probe(struct platform_device *pdev)
/* NOTE: "knows" the order of the resources! */
if (!request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1,
+ resource_size(&pdev->resource[0]),
driver_name)) {
DBG("request_mem_region failed\n");
return -EBUSY;
@@ -2934,7 +2934,7 @@ cleanup0:
}
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ resource_size(&pdev->resource[0]));
return status;
}
@@ -2950,7 +2950,7 @@ static int omap_udc_remove(struct platform_device *pdev)
wait_for_completion(&done);
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ resource_size(&pdev->resource[0]));
return 0;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8d730180db06..55bdfdf11e4c 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -186,7 +186,7 @@ config USB_EHCI_FSL
config USB_EHCI_MXC
tristate "Support for Freescale i.MX on-chip EHCI USB controller"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select USB_EHCI_ROOT_HUB_TT
---help---
Variation of ARC USB block used in some Freescale chips.
@@ -210,8 +210,8 @@ config USB_EHCI_HCD_OMAP
config USB_EHCI_HCD_ORION
tristate "Support for Marvell EBU on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU)
- default y
+ depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU || COMPILE_TEST)
+ default y if (PLAT_ORION || ARCH_MVEBU)
---help---
Enables support for the on-chip EHCI controller on Marvell's
embedded ARM SoCs, including Orion, Kirkwood, Dove, Armada XP,
@@ -221,15 +221,15 @@ config USB_EHCI_HCD_ORION
config USB_EHCI_HCD_SPEAR
tristate "Support for ST SPEAr on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && PLAT_SPEAR
- default y
+ depends on USB_EHCI_HCD && (PLAT_SPEAR || COMPILE_TEST)
+ default y if PLAT_SPEAR
---help---
Enables support for the on-chip EHCI controller on
ST SPEAr chips.
config USB_EHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip EHCI USB controller"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
select GENERIC_PHY
select USB_EHCI_HCD_PLATFORM
help
@@ -238,8 +238,8 @@ config USB_EHCI_HCD_STI
config USB_EHCI_HCD_AT91
tristate "Support for Atmel on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && ARCH_AT91
- default y
+ depends on USB_EHCI_HCD && (ARCH_AT91 || COMPILE_TEST)
+ default y if ARCH_AT91
---help---
Enables support for the on-chip EHCI controller on
Atmel chips.
@@ -263,20 +263,20 @@ config USB_EHCI_HCD_PPC_OF
config USB_EHCI_SH
bool "EHCI support for SuperH USB controller"
- depends on SUPERH
+ depends on SUPERH || COMPILE_TEST
---help---
Enables support for the on-chip EHCI controller on the SuperH.
If you use the PCI EHCI controller, this option is not necessary.
config USB_EHCI_EXYNOS
- tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
+ tristate "EHCI support for Samsung S5P/Exynos SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
tristate "EHCI support for Marvell PXA/MMP USB controller"
- depends on (ARCH_PXA || ARCH_MMP)
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
select USB_EHCI_ROOT_HUB_TT
---help---
Enables support for Marvell (including PXA and MMP series) on-chip
@@ -289,7 +289,7 @@ config USB_EHCI_MV
config USB_CNS3XXX_EHCI
bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
- depends on ARCH_CNS3XXX
+ depends on ARCH_CNS3XXX || COMPILE_TEST
select USB_EHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
@@ -410,15 +410,15 @@ config USB_OHCI_HCD_OMAP1
config USB_OHCI_HCD_SPEAR
tristate "Support for ST SPEAr on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && PLAT_SPEAR
- default y
+ depends on USB_OHCI_HCD && (PLAT_SPEAR || COMPILE_TEST)
+ default y if PLAT_SPEAR
---help---
Enables support for the on-chip OHCI controller on
ST SPEAr chips.
config USB_OHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip OHCI USB controller"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
select GENERIC_PHY
select USB_OHCI_HCD_PLATFORM
help
@@ -427,8 +427,8 @@ config USB_OHCI_HCD_STI
config USB_OHCI_HCD_S3C2410
tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
- depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
- default y
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX || COMPILE_TEST)
+ default y if (ARCH_S3C24XX || ARCH_S3C64XX)
---help---
Enables support for the on-chip OHCI controller on
S3C24xx/S3C64xx chips.
@@ -453,17 +453,17 @@ config USB_OHCI_HCD_PXA27X
config USB_OHCI_HCD_AT91
tristate "Support for Atmel on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && ARCH_AT91 && OF
- default y
+ depends on USB_OHCI_HCD && (ARCH_AT91 || COMPILE_TEST) && OF
+ default y if ARCH_AT91
---help---
Enables support for the on-chip OHCI controller on
Atmel chips.
config USB_OHCI_HCD_OMAP3
tristate "OHCI support for OMAP3 and later chips"
- depends on (ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5)
+ depends on ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
- default y
+ default y if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
help
This option is deprecated now and the driver was removed, use
USB_OHCI_HCD_PLATFORM instead.
@@ -473,10 +473,10 @@ config USB_OHCI_HCD_OMAP3
config USB_OHCI_HCD_DAVINCI
tristate "OHCI support for TI DaVinci DA8xx"
- depends on ARCH_DAVINCI_DA8XX
+ depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
depends on USB_OHCI_HCD
select PHY_DA8XX_USB
- default y
+ default y if ARCH_DAVINCI_DA8XX
help
Enables support for the DaVinci DA8xx integrated OHCI
controller. This driver cannot currently be a loadable
@@ -532,7 +532,7 @@ config USB_OHCI_HCD_SSB
config USB_OHCI_SH
bool "OHCI support for SuperH USB controller (DEPRECATED)"
- depends on SUPERH
+ depends on SUPERH || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
@@ -542,14 +542,14 @@ config USB_OHCI_SH
If you use the PCI OHCI controller, this option is not necessary.
config USB_OHCI_EXYNOS
- tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
+ tristate "OHCI support for Samsung S5P/Exynos SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
- depends on ARCH_CNS3XXX
+ depends on ARCH_CNS3XXX || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 01debfd03d4a..a4e9abcbdc4f 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * SAMSUNG EXYNOS USB HOST EHCI Controller
+ * Samsung Exynos USB HOST EHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
@@ -21,7 +21,7 @@
#include "ehci.h"
-#define DRIVER_DESC "EHCI EXYNOS driver"
+#define DRIVER_DESC "EHCI Exynos driver"
#define EHCI_INSNREG00(base) (base + 0x90)
#define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 66ec1fdf9fe7..bd4f6ef534d9 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/usb/otg.h>
+#include <linux/usb/of.h>
#include <linux/platform_data/mv_usb.h>
#include <linux/io.h>
@@ -67,6 +68,8 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 status;
int retval;
if (ehci_mv == NULL) {
@@ -80,6 +83,14 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
if (retval)
dev_err(dev, "ehci_setup failed %d\n", retval);
+ if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) {
+ status = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ status |= PORT_TEST_FORCE;
+ ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+ status &= ~PORT_TEST_FORCE;
+ ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+ }
+
return retval;
}
@@ -116,7 +127,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
ehci_mv->set_vbus = pdata->set_vbus;
}
- ehci_mv->phy = devm_phy_get(&pdev->dev, "usb");
+ ehci_mv->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(ehci_mv->phy)) {
retval = PTR_ERR(ehci_mv->phy);
if (retval != -EPROBE_DEFER)
@@ -164,7 +175,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
}
ehci = hcd_to_ehci(hcd);
- ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+ ehci->caps = (struct ehci_caps __iomem *) ehci_mv->cap_regs;
if (ehci_mv->mode == MV_USB_MODE_OTG) {
ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
@@ -246,10 +257,8 @@ static int mv_ehci_remove(struct platform_device *pdev)
MODULE_ALIAS("mv-ehci");
static const struct platform_device_id ehci_id_table[] = {
- {"pxa-u2oehci", PXA_U2OEHCI},
- {"pxa-sph", PXA_SPH},
- {"mmp3-hsic", MMP3_HSIC},
- {"mmp3-fsic", MMP3_FSIC},
+ {"pxa-u2oehci", 0},
+ {"pxa-sph", 0},
{},
};
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index a2b610dbedfc..2d462fbbe0a6 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -107,7 +107,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
if (!request_mem_region(res->start, res_len, "mab regs"))
return -EBUSY;
- dev->mab_regs = ioremap_nocache(res->start, res_len);
+ dev->mab_regs = ioremap(res->start, res_len);
if (dev->mab_regs == NULL) {
retval = -ENOMEM;
goto err1;
@@ -124,7 +124,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
retval = -EBUSY;
goto err2;
}
- dev->usbid_regs = ioremap_nocache(res->start, res_len);
+ dev->usbid_regs = ioremap(res->start, res_len);
if (dev->usbid_regs == NULL) {
retval = -ENOMEM;
goto err3;
@@ -178,7 +178,7 @@ int usb_hcd_msp_probe(const struct hc_driver *driver,
retval = -EBUSY;
goto err1;
}
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
pr_debug("ioremap failed");
retval = -ENOMEM;
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 2afde14dc425..c25c51d26f26 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -8,7 +8,6 @@
*/
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/platform_data/ehci-sh.h>
struct ehci_sh_priv {
struct clk *iclk, *fclk;
@@ -76,7 +75,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct ehci_sh_priv *priv;
- struct ehci_sh_platdata *pdata;
struct usb_hcd *hcd;
int irq, ret;
@@ -89,8 +87,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- pdata = dev_get_platdata(&pdev->dev);
-
/* initialize hcd */
hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
@@ -127,9 +123,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
clk_enable(priv->fclk);
clk_enable(priv->iclk);
- if (pdata && pdata->phy_init)
- pdata->phy_init();
-
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd");
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4d2cdec4cb78..d6433f206c17 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -42,12 +42,10 @@ struct tegra_ehci_soc_config {
};
struct tegra_ehci_hcd {
- struct tegra_usb_phy *phy;
struct clk *clk;
struct reset_control *rst;
int port_resuming;
bool needs_double_reset;
- enum tegra_usb_phy_port_speed port_speed;
};
static int tegra_reset_usb_controller(struct platform_device *pdev)
@@ -480,12 +478,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
u_phy->otg->host = hcd_to_bus(hcd);
- err = usb_phy_set_suspend(hcd->usb_phy, 0);
- if (err) {
- dev_err(&pdev->dev, "Failed to power on the phy\n");
- goto cleanup_phy;
- }
-
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
@@ -521,16 +513,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
struct tegra_ehci_hcd *tegra =
(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+ usb_remove_hcd(hcd);
otg_set_host(hcd->usb_phy->otg, NULL);
-
usb_phy_shutdown(hcd->usb_phy);
- usb_remove_hcd(hcd);
-
- reset_control_assert(tegra->rst);
- udelay(1);
-
clk_disable_unprepare(tegra->clk);
-
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index d5ce98e205c7..bd40e597f256 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -19,7 +19,7 @@
#include "ohci.h"
-#define DRIVER_DESC "OHCI EXYNOS driver"
+#define DRIVER_DESC "OHCI Exynos driver"
static const char hcd_name[] = "ohci-exynos";
static struct hc_driver __read_mostly exynos_ohci_hc_driver;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index fe09b8626329..120666a0d590 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2783,11 +2783,15 @@ static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
return;
oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
- for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
- (void) oxu_hub_control(oxu_to_hcd(oxu),
- is_on ? SetPortFeature : ClearPortFeature,
- USB_PORT_FEAT_POWER,
- port--, NULL, 0);
+ for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
+ if (is_on)
+ oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
+ USB_PORT_FEAT_POWER, port--, NULL, 0);
+ else
+ oxu_hub_control(oxu_to_hcd(oxu), ClearPortFeature,
+ USB_PORT_FEAT_POWER, port--, NULL, 0);
+ }
+
msleep(20);
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 6c7f0a876b96..beb2efa71341 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -1150,7 +1150,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if (!mmio_resource_enabled(pdev, 0))
return;
- base = ioremap_nocache(pci_resource_start(pdev, 0), len);
+ base = ioremap(pci_resource_start(pdev, 0), len);
if (base == NULL)
return;
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index b18a6baef204..bfbdb3ceed29 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -488,11 +488,6 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto disable_clk;
}
- /* Initialize dma_mask and coherent_dma_mask to 32-bits */
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- goto disable_clk;
-
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index bf9065438320..8163aefc6c6b 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -11,6 +11,7 @@
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -38,7 +39,15 @@
#define XUSB_CFG_4 0x010
#define XUSB_BASE_ADDR_SHIFT 15
#define XUSB_BASE_ADDR_MASK 0x1ffff
+#define XUSB_CFG_16 0x040
+#define XUSB_CFG_24 0x060
+#define XUSB_CFG_AXI_CFG 0x0f8
#define XUSB_CFG_ARU_C11_CSBRANGE 0x41c
+#define XUSB_CFG_ARU_CONTEXT 0x43c
+#define XUSB_CFG_ARU_CONTEXT_HS_PLS 0x478
+#define XUSB_CFG_ARU_CONTEXT_FS_PLS 0x47c
+#define XUSB_CFG_ARU_CONTEXT_HSFS_SPEED 0x480
+#define XUSB_CFG_ARU_CONTEXT_HSFS_PP 0x484
#define XUSB_CFG_CSB_BASE_ADDR 0x800
/* FPCI mailbox registers */
@@ -62,11 +71,20 @@
#define MBOX_SMI_INTR_EN BIT(3)
/* IPFS registers */
+#define IPFS_XUSB_HOST_MSI_BAR_SZ_0 0x0c0
+#define IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0 0x0c4
+#define IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0 0x0c8
+#define IPFS_XUSB_HOST_MSI_VEC0_0 0x100
+#define IPFS_XUSB_HOST_MSI_EN_VEC0_0 0x140
#define IPFS_XUSB_HOST_CONFIGURATION_0 0x180
#define IPFS_EN_FPCI BIT(0)
+#define IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0 0x184
#define IPFS_XUSB_HOST_INTR_MASK_0 0x188
#define IPFS_IP_INT_MASK BIT(16)
+#define IPFS_XUSB_HOST_INTR_ENABLE_0 0x198
+#define IPFS_XUSB_HOST_UFPCI_CONFIG_0 0x19c
#define IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0 0x1bc
+#define IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0 0x1dc
#define CSB_PAGE_SELECT_MASK 0x7fffff
#define CSB_PAGE_SELECT_SHIFT 9
@@ -101,6 +119,8 @@
#define L2IMEMOP_ACTION_SHIFT 24
#define L2IMEMOP_INVALIDATE_ALL (0x40 << L2IMEMOP_ACTION_SHIFT)
#define L2IMEMOP_LOAD_LOCKED_RESULT (0x11 << L2IMEMOP_ACTION_SHIFT)
+#define XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT 0x101a18
+#define L2IMEMOP_RESULT_VLD BIT(31)
#define XUSB_CSB_MP_APMAP 0x10181c
#define APMAP_BOOTPATH BIT(31)
@@ -145,19 +165,32 @@ struct tegra_xusb_phy_type {
unsigned int num;
};
-struct tega_xusb_mbox_regs {
+struct tegra_xusb_mbox_regs {
u16 cmd;
u16 data_in;
u16 data_out;
u16 owner;
};
+struct tegra_xusb_context_soc {
+ struct {
+ const unsigned int *offsets;
+ unsigned int num_offsets;
+ } ipfs;
+
+ struct {
+ const unsigned int *offsets;
+ unsigned int num_offsets;
+ } fpci;
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
unsigned int num_supplies;
const struct tegra_xusb_phy_type *phy_types;
unsigned int num_types;
+ const struct tegra_xusb_context_soc *context;
struct {
struct {
@@ -166,12 +199,17 @@ struct tegra_xusb_soc {
} usb2, ulpi, hsic, usb3;
} ports;
- struct tega_xusb_mbox_regs mbox;
+ struct tegra_xusb_mbox_regs mbox;
bool scale_ss_clock;
bool has_ipfs;
};
+struct tegra_xusb_context {
+ u32 *ipfs;
+ u32 *fpci;
+};
+
struct tegra_xusb {
struct device *dev;
void __iomem *regs;
@@ -218,6 +256,8 @@ struct tegra_xusb {
void *virt;
dma_addr_t phys;
} fw;
+
+ struct tegra_xusb_context context;
};
static struct hc_driver __read_mostly tegra_xhci_hc_driver;
@@ -623,9 +663,9 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void tegra_xusb_config(struct tegra_xusb *tegra,
- struct resource *regs)
+static void tegra_xusb_config(struct tegra_xusb *tegra)
{
+ u32 regs = tegra->hcd->rsrc_start;
u32 value;
if (tegra->soc->has_ipfs) {
@@ -639,7 +679,7 @@ static void tegra_xusb_config(struct tegra_xusb *tegra,
/* Program BAR0 space */
value = fpci_readl(tegra, XUSB_CFG_4);
value &= ~(XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
- value |= regs->start & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+ value |= regs & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
fpci_writel(tegra, value, XUSB_CFG_4);
usleep_range(100, 200);
@@ -793,17 +833,34 @@ disable_clk:
return err;
}
-static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+#ifdef CONFIG_PM_SLEEP
+static int tegra_xusb_init_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+
+ tegra->context.ipfs = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+ sizeof(u32), GFP_KERNEL);
+ if (!tegra->context.ipfs)
+ return -ENOMEM;
+
+ tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+ sizeof(u32), GFP_KERNEL);
+ if (!tegra->context.fpci)
+ return -ENOMEM;
+
+ return 0;
+}
+#else
+static inline int tegra_xusb_init_context(struct tegra_xusb *tegra)
+{
+ return 0;
+}
+#endif
+
+static int tegra_xusb_request_firmware(struct tegra_xusb *tegra)
{
- unsigned int code_tag_blocks, code_size_blocks, code_blocks;
struct tegra_xusb_fw_header *header;
- struct device *dev = tegra->dev;
const struct firmware *fw;
- unsigned long timeout;
- time64_t timestamp;
- struct tm time;
- u64 address;
- u32 value;
int err;
err = request_firmware(&fw, tegra->soc->firmware, tegra->dev);
@@ -828,6 +885,26 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
memcpy(tegra->fw.virt, fw->data, tegra->fw.size);
release_firmware(fw);
+ return 0;
+}
+
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+ unsigned int code_tag_blocks, code_size_blocks, code_blocks;
+ struct xhci_cap_regs __iomem *cap = tegra->regs;
+ struct tegra_xusb_fw_header *header;
+ struct device *dev = tegra->dev;
+ struct xhci_op_regs __iomem *op;
+ unsigned long timeout;
+ time64_t timestamp;
+ struct tm time;
+ u64 address;
+ u32 value;
+ int err;
+
+ header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
+ op = tegra->regs + HC_LENGTH(readl(&cap->hc_capbase));
+
if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
csb_readl(tegra, XUSB_FALC_CPUCTL));
@@ -882,26 +959,37 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
csb_writel(tegra, 0, XUSB_FALC_DMACTL);
- msleep(50);
+ /* wait for RESULT_VLD to get set */
+#define tegra_csb_readl(offset) csb_readl(tegra, offset)
+ err = readx_poll_timeout(tegra_csb_readl,
+ XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT, value,
+ value & L2IMEMOP_RESULT_VLD, 100, 10000);
+ if (err < 0) {
+ dev_err(dev, "DMA controller not ready %#010x\n", value);
+ return err;
+ }
+#undef tegra_csb_readl
csb_writel(tegra, le32_to_cpu(header->boot_codetag),
XUSB_FALC_BOOTVEC);
- /* Boot Falcon CPU and wait for it to enter the STOPPED (idle) state. */
- timeout = jiffies + msecs_to_jiffies(5);
-
+ /* Boot Falcon CPU and wait for USBSTS_CNR to get cleared. */
csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
- while (time_before(jiffies, timeout)) {
- if (csb_readl(tegra, XUSB_FALC_CPUCTL) == CPUCTL_STATE_STOPPED)
+ timeout = jiffies + msecs_to_jiffies(200);
+
+ do {
+ value = readl(&op->status);
+ if ((value & STS_CNR) == 0)
break;
- usleep_range(100, 200);
- }
+ usleep_range(1000, 2000);
+ } while (time_is_after_jiffies(timeout));
- if (csb_readl(tegra, XUSB_FALC_CPUCTL) != CPUCTL_STATE_STOPPED) {
- dev_err(dev, "Falcon failed to start, state: %#x\n",
- csb_readl(tegra, XUSB_FALC_CPUCTL));
+ value = readl(&op->status);
+ if (value & STS_CNR) {
+ value = csb_readl(tegra, XUSB_FALC_CPUCTL);
+ dev_err(dev, "XHCI controller not read: %#010x\n", value);
return -EIO;
}
@@ -966,11 +1054,37 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
return 0;
}
-static int tegra_xusb_probe(struct platform_device *pdev)
+static int __tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
{
struct tegra_xusb_mbox_msg msg;
- struct resource *regs;
+ int err;
+
+ /* Enable firmware messages from controller. */
+ msg.cmd = MBOX_CMD_MSG_ENABLED;
+ msg.data = 0;
+
+ err = tegra_xusb_mbox_send(tegra, &msg);
+ if (err < 0)
+ dev_err(tegra->dev, "failed to enable messages: %d\n", err);
+
+ return err;
+}
+
+static int tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
+{
+ int err;
+
+ mutex_lock(&tegra->lock);
+ err = __tegra_xusb_enable_firmware_messages(tegra);
+ mutex_unlock(&tegra->lock);
+
+ return err;
+}
+
+static int tegra_xusb_probe(struct platform_device *pdev)
+{
struct tegra_xusb *tegra;
+ struct resource *regs;
struct xhci_hcd *xhci;
unsigned int i, j, k;
struct phy *phy;
@@ -986,6 +1100,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
mutex_init(&tegra->lock);
tegra->dev = &pdev->dev;
+ err = tegra_xusb_init_context(tegra);
+ if (err < 0)
+ return err;
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tegra->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(tegra->regs))
@@ -1173,6 +1291,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
+ tegra->hcd->regs = tegra->regs;
+ tegra->hcd->rsrc_start = regs->start;
+ tegra->hcd->rsrc_len = resource_size(regs);
+
/*
* This must happen after usb_create_hcd(), because usb_create_hcd()
* will overwrite the drvdata of the device with the hcd it creates.
@@ -1185,19 +1307,6 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_hcd;
}
- pm_runtime_enable(&pdev->dev);
- if (pm_runtime_enabled(&pdev->dev))
- err = pm_runtime_get_sync(&pdev->dev);
- else
- err = tegra_xusb_runtime_resume(&pdev->dev);
-
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable device: %d\n", err);
- goto disable_phy;
- }
-
- tegra_xusb_config(tegra, regs);
-
/*
* The XUSB Falcon microcontroller can only address 40 bits, so set
* the DMA mask accordingly.
@@ -1205,19 +1314,35 @@ static int tegra_xusb_probe(struct platform_device *pdev)
err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
- goto put_rpm;
+ goto disable_phy;
+ }
+
+ err = tegra_xusb_request_firmware(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request firmware: %d\n", err);
+ goto disable_phy;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ if (!pm_runtime_enabled(&pdev->dev))
+ err = tegra_xusb_runtime_resume(&pdev->dev);
+ else
+ err = pm_runtime_get_sync(&pdev->dev);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable device: %d\n", err);
+ goto free_firmware;
}
+ tegra_xusb_config(tegra);
+
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
goto put_rpm;
}
- tegra->hcd->regs = tegra->regs;
- tegra->hcd->rsrc_start = regs->start;
- tegra->hcd->rsrc_len = resource_size(regs);
-
err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
if (err < 0) {
dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
@@ -1244,21 +1369,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_usb3;
}
- mutex_lock(&tegra->lock);
-
- /* Enable firmware messages from controller. */
- msg.cmd = MBOX_CMD_MSG_ENABLED;
- msg.data = 0;
-
- err = tegra_xusb_mbox_send(tegra, &msg);
+ err = tegra_xusb_enable_firmware_messages(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
- mutex_unlock(&tegra->lock);
goto remove_usb3;
}
- mutex_unlock(&tegra->lock);
-
err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
tegra_xusb_mbox_irq,
tegra_xusb_mbox_thread, 0,
@@ -1281,6 +1397,9 @@ put_rpm:
tegra_xusb_runtime_suspend(&pdev->dev);
put_hcd:
usb_put_hcd(tegra->hcd);
+free_firmware:
+ dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
+ tegra->fw.phys);
disable_phy:
tegra_xusb_phy_disable(tegra);
pm_runtime_disable(&pdev->dev);
@@ -1328,22 +1447,176 @@ static int tegra_xusb_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
+{
+ struct device *dev = hub->hcd->self.controller;
+ bool status = true;
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < hub->num_ports; i++) {
+ value = readl(hub->ports[i]->addr);
+ if ((value & PORT_PE) == 0)
+ continue;
+
+ if ((value & PORT_PLS_MASK) != XDEV_U3) {
+ dev_info(dev, "%u-%u isn't suspended: %#010x\n",
+ hub->hcd->self.busnum, i + 1, value);
+ status = false;
+ }
+ }
+
+ return status;
+}
+
+static int tegra_xusb_check_ports(struct tegra_xusb *tegra)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (!xhci_hub_ports_suspended(&xhci->usb2_rhub) ||
+ !xhci_hub_ports_suspended(&xhci->usb3_rhub))
+ err = -EBUSY;
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return err;
+}
+
+static void tegra_xusb_save_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+ struct tegra_xusb_context *ctx = &tegra->context;
+ unsigned int i;
+
+ if (soc->ipfs.num_offsets > 0) {
+ for (i = 0; i < soc->ipfs.num_offsets; i++)
+ ctx->ipfs[i] = ipfs_readl(tegra, soc->ipfs.offsets[i]);
+ }
+
+ if (soc->fpci.num_offsets > 0) {
+ for (i = 0; i < soc->fpci.num_offsets; i++)
+ ctx->fpci[i] = fpci_readl(tegra, soc->fpci.offsets[i]);
+ }
+}
+
+static void tegra_xusb_restore_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+ struct tegra_xusb_context *ctx = &tegra->context;
+ unsigned int i;
+
+ if (soc->fpci.num_offsets > 0) {
+ for (i = 0; i < soc->fpci.num_offsets; i++)
+ fpci_writel(tegra, ctx->fpci[i], soc->fpci.offsets[i]);
+ }
+
+ if (soc->ipfs.num_offsets > 0) {
+ for (i = 0; i < soc->ipfs.num_offsets; i++)
+ ipfs_writel(tegra, ctx->ipfs[i], soc->ipfs.offsets[i]);
+ }
+}
+
+static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool wakeup)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ int err;
+
+ err = tegra_xusb_check_ports(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "not all ports suspended: %d\n", err);
+ return err;
+ }
+
+ err = xhci_suspend(xhci, wakeup);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to suspend XHCI: %d\n", err);
+ return err;
+ }
+
+ tegra_xusb_save_context(tegra);
+ tegra_xusb_phy_disable(tegra);
+ tegra_xusb_clk_disable(tegra);
+
+ return 0;
+}
+
+static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool wakeup)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ int err;
+
+ err = tegra_xusb_clk_enable(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable clocks: %d\n", err);
+ return err;
+ }
+
+ err = tegra_xusb_phy_enable(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable PHYs: %d\n", err);
+ goto disable_clk;
+ }
+
+ tegra_xusb_config(tegra);
+ tegra_xusb_restore_context(tegra);
+
+ err = tegra_xusb_load_firmware(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to load firmware: %d\n", err);
+ goto disable_phy;
+ }
+
+ err = __tegra_xusb_enable_firmware_messages(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable messages: %d\n", err);
+ goto disable_phy;
+ }
+
+ err = xhci_resume(xhci, true);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
+ goto disable_phy;
+ }
+
+ return 0;
+
+disable_phy:
+ tegra_xusb_phy_disable(tegra);
+disable_clk:
+ tegra_xusb_clk_disable(tegra);
+ return err;
+}
+
static int tegra_xusb_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
bool wakeup = device_may_wakeup(dev);
+ int err;
+
+ synchronize_irq(tegra->mbox_irq);
- /* TODO: Powergate controller across suspend/resume. */
- return xhci_suspend(xhci, wakeup);
+ mutex_lock(&tegra->lock);
+ err = tegra_xusb_enter_elpg(tegra, wakeup);
+ mutex_unlock(&tegra->lock);
+
+ return err;
}
static int tegra_xusb_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ bool wakeup = device_may_wakeup(dev);
+ int err;
- return xhci_resume(xhci, 0);
+ mutex_lock(&tegra->lock);
+ err = tegra_xusb_exit_elpg(tegra, wakeup);
+ mutex_unlock(&tegra->lock);
+
+ return err;
}
#endif
@@ -1370,12 +1643,50 @@ static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
{ .name = "hsic", .num = 2, },
};
+static const unsigned int tegra124_xusb_context_ipfs[] = {
+ IPFS_XUSB_HOST_MSI_BAR_SZ_0,
+ IPFS_XUSB_HOST_MSI_BAR_SZ_0,
+ IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0,
+ IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0,
+ IPFS_XUSB_HOST_MSI_VEC0_0,
+ IPFS_XUSB_HOST_MSI_EN_VEC0_0,
+ IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0,
+ IPFS_XUSB_HOST_INTR_MASK_0,
+ IPFS_XUSB_HOST_INTR_ENABLE_0,
+ IPFS_XUSB_HOST_UFPCI_CONFIG_0,
+ IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0,
+ IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0,
+};
+
+static const unsigned int tegra124_xusb_context_fpci[] = {
+ XUSB_CFG_ARU_CONTEXT_HS_PLS,
+ XUSB_CFG_ARU_CONTEXT_FS_PLS,
+ XUSB_CFG_ARU_CONTEXT_HSFS_SPEED,
+ XUSB_CFG_ARU_CONTEXT_HSFS_PP,
+ XUSB_CFG_ARU_CONTEXT,
+ XUSB_CFG_AXI_CFG,
+ XUSB_CFG_24,
+ XUSB_CFG_16,
+};
+
+static const struct tegra_xusb_context_soc tegra124_xusb_context = {
+ .ipfs = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_ipfs),
+ .offsets = tegra124_xusb_context_ipfs,
+ },
+ .fpci = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
+ .offsets = tegra124_xusb_context_fpci,
+ },
+};
+
static const struct tegra_xusb_soc tegra124_soc = {
.firmware = "nvidia/tegra124/xusb.bin",
.supply_names = tegra124_supply_names,
.num_supplies = ARRAY_SIZE(tegra124_supply_names),
.phy_types = tegra124_phy_types,
.num_types = ARRAY_SIZE(tegra124_phy_types),
+ .context = &tegra124_xusb_context,
.ports = {
.usb2 = { .offset = 4, .count = 4, },
.hsic = { .offset = 6, .count = 2, },
@@ -1414,6 +1725,7 @@ static const struct tegra_xusb_soc tegra210_soc = {
.num_supplies = ARRAY_SIZE(tegra210_supply_names),
.phy_types = tegra210_phy_types,
.num_types = ARRAY_SIZE(tegra210_phy_types),
+ .context = &tegra124_xusb_context,
.ports = {
.usb2 = { .offset = 4, .count = 4, },
.hsic = { .offset = 8, .count = 1, },
@@ -1432,6 +1744,7 @@ MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
static const char * const tegra186_supply_names[] = {
};
+MODULE_FIRMWARE("nvidia/tegra186/xusb.bin");
static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
{ .name = "usb3", .num = 3, },
@@ -1439,12 +1752,20 @@ static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
{ .name = "hsic", .num = 1, },
};
+static const struct tegra_xusb_context_soc tegra186_xusb_context = {
+ .fpci = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
+ .offsets = tegra124_xusb_context_fpci,
+ },
+};
+
static const struct tegra_xusb_soc tegra186_soc = {
.firmware = "nvidia/tegra186/xusb.bin",
.supply_names = tegra186_supply_names,
.num_supplies = ARRAY_SIZE(tegra186_supply_names),
.phy_types = tegra186_phy_types,
.num_types = ARRAY_SIZE(tegra186_phy_types),
+ .context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 3, },
.usb2 = { .offset = 3, .count = 3, },
@@ -1474,6 +1795,7 @@ static const struct tegra_xusb_soc tegra194_soc = {
.num_supplies = ARRAY_SIZE(tegra194_supply_names),
.phy_types = tegra194_phy_types,
.num_types = ARRAY_SIZE(tegra194_phy_types),
+ .context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 4, },
.usb2 = { .offset = 4, .count = 4, },
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 07cc82ff327c..ccd30f835888 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -50,7 +50,7 @@ static int isp1761_pci_init(struct pci_dev *dev)
}
/* map available memory */
- iobase = ioremap_nocache(mem_start, mem_length);
+ iobase = ioremap(mem_start, mem_length);
if (!iobase) {
printk(KERN_ERR "Error ioremap failed\n");
release_mem_region(mem_start, mem_length);
@@ -101,7 +101,7 @@ static int isp1761_pci_init(struct pci_dev *dev)
return -EBUSY;
}
- iobase = ioremap_nocache(mem_start, mem_length);
+ iobase = ioremap(mem_start, mem_length);
if (!iobase) {
printk(KERN_ERR "ioremap #1\n");
release_mem_region(mem_start, mem_length);
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 72f39a9751b5..116bd789e568 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -7,11 +7,10 @@
#include <linux/clk.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb3503.h>
#include <linux/regmap.h>
@@ -47,19 +46,19 @@ struct usb3503 {
struct device *dev;
struct clk *clk;
u8 port_off_mask;
- int gpio_intn;
- int gpio_reset;
- int gpio_connect;
+ struct gpio_desc *intn;
+ struct gpio_desc *reset;
+ struct gpio_desc *connect;
bool secondary_ref_clk;
};
static int usb3503_reset(struct usb3503 *hub, int state)
{
- if (!state && gpio_is_valid(hub->gpio_connect))
- gpio_set_value_cansleep(hub->gpio_connect, 0);
+ if (!state && hub->connect)
+ gpiod_set_value_cansleep(hub->connect, 0);
- if (gpio_is_valid(hub->gpio_reset))
- gpio_set_value_cansleep(hub->gpio_reset, state);
+ if (hub->reset)
+ gpiod_set_value_cansleep(hub->reset, !state);
/* Wait T_HUBINIT == 4ms for hub logic to stabilize */
if (state)
@@ -115,8 +114,8 @@ static int usb3503_connect(struct usb3503 *hub)
}
}
- if (gpio_is_valid(hub->gpio_connect))
- gpio_set_value_cansleep(hub->gpio_connect, 1);
+ if (hub->connect)
+ gpiod_set_value_cansleep(hub->connect, 1);
hub->mode = USB3503_MODE_HUB;
dev_info(dev, "switched to HUB mode\n");
@@ -163,13 +162,11 @@ static int usb3503_probe(struct usb3503 *hub)
int err;
u32 mode = USB3503_MODE_HUB;
const u32 *property;
+ enum gpiod_flags flags;
int len;
if (pdata) {
hub->port_off_mask = pdata->port_off_mask;
- hub->gpio_intn = pdata->gpio_intn;
- hub->gpio_connect = pdata->gpio_connect;
- hub->gpio_reset = pdata->gpio_reset;
hub->mode = pdata->initial_mode;
} else if (np) {
u32 rate = 0;
@@ -230,59 +227,38 @@ static int usb3503_probe(struct usb3503 *hub)
}
}
- hub->gpio_intn = of_get_named_gpio(np, "intn-gpios", 0);
- if (hub->gpio_intn == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- hub->gpio_connect = of_get_named_gpio(np, "connect-gpios", 0);
- if (hub->gpio_connect == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
- if (hub->gpio_reset == -EPROBE_DEFER)
- return -EPROBE_DEFER;
of_property_read_u32(np, "initial-mode", &mode);
hub->mode = mode;
}
- if (hub->port_off_mask && !hub->regmap)
- dev_err(dev, "Ports disabled with no control interface\n");
-
- if (gpio_is_valid(hub->gpio_intn)) {
- int val = hub->secondary_ref_clk ? GPIOF_OUT_INIT_LOW :
- GPIOF_OUT_INIT_HIGH;
- err = devm_gpio_request_one(dev, hub->gpio_intn, val,
- "usb3503 intn");
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as interrupt pin (%d)\n",
- hub->gpio_intn, err);
- return err;
- }
- }
-
- if (gpio_is_valid(hub->gpio_connect)) {
- err = devm_gpio_request_one(dev, hub->gpio_connect,
- GPIOF_OUT_INIT_LOW, "usb3503 connect");
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as connect pin (%d)\n",
- hub->gpio_connect, err);
- return err;
- }
- }
-
- if (gpio_is_valid(hub->gpio_reset)) {
- err = devm_gpio_request_one(dev, hub->gpio_reset,
- GPIOF_OUT_INIT_LOW, "usb3503 reset");
+ if (hub->secondary_ref_clk)
+ flags = GPIOD_OUT_LOW;
+ else
+ flags = GPIOD_OUT_HIGH;
+ hub->intn = devm_gpiod_get_optional(dev, "intn", flags);
+ if (IS_ERR(hub->intn))
+ return PTR_ERR(hub->intn);
+ if (hub->intn)
+ gpiod_set_consumer_name(hub->intn, "usb3503 intn");
+
+ hub->connect = devm_gpiod_get_optional(dev, "connect", GPIOD_OUT_LOW);
+ if (IS_ERR(hub->connect))
+ return PTR_ERR(hub->connect);
+ if (hub->connect)
+ gpiod_set_consumer_name(hub->connect, "usb3503 connect");
+
+ hub->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(hub->reset))
+ return PTR_ERR(hub->reset);
+ if (hub->reset) {
/* Datasheet defines a hardware reset to be at least 100us */
usleep_range(100, 10000);
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as reset pin (%d)\n",
- hub->gpio_reset, err);
- return err;
- }
+ gpiod_set_consumer_name(hub->reset, "usb3503 reset");
}
+ if (hub->port_off_mask && !hub->regmap)
+ dev_err(dev, "Ports disabled with no control interface\n");
+
usb3503_switch_mode(hub, hub->mode);
dev_info(dev, "%s: probed in %s mode\n", __func__,
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 52f8e2b57ad5..eb2ded1026ee 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -101,7 +101,6 @@ config USB_MUSB_AM35X
config USB_MUSB_DSPS
tristate "TI DSPS platforms"
- select USB_MUSB_AM335X_CHILD
depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on OF_IRQ
@@ -111,13 +110,16 @@ config USB_MUSB_UX500
config USB_MUSB_JZ4740
tristate "JZ4740"
- depends on NOP_USB_XCEIV
depends on MIPS || COMPILE_TEST
depends on USB_MUSB_GADGET
depends on USB=n || USB_OTG_BLACKLIST_HUB
-config USB_MUSB_AM335X_CHILD
- tristate
+config USB_MUSB_MEDIATEK
+ tristate "MediaTek platforms"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on NOP_USB_XCEIV
+ depends on GENERIC_PHY
+ select USB_ROLE_SWITCH
comment "MUSB DMA mode"
@@ -142,7 +144,7 @@ config USB_UX500_DMA
config USB_INVENTRA_DMA
bool 'Inventra'
- depends on USB_MUSB_OMAP2PLUS
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 3a88c79e650c..932247360a9f 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -24,9 +24,7 @@ obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o
obj-$(CONFIG_USB_MUSB_SUNXI) += sunxi.o
-
-
-obj-$(CONFIG_USB_MUSB_AM335X_CHILD) += musb_am335x.o
+obj-$(CONFIG_USB_MUSB_MEDIATEK) += mediatek.o
# the kconfig must guarantee that only one of the
# possible I/O schemes will be enabled at a time ...
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index fb6bbd254ab7..704435526394 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -13,7 +13,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/usb_phy_generic.h>
@@ -25,10 +25,6 @@
#include "musb_core.h"
-#ifdef CONFIG_MACH_DAVINCI_EVM
-#define GPIO_nVBUS_DRV 160
-#endif
-
#include "davinci.h"
#include "cppi_dma.h"
@@ -40,6 +36,9 @@ struct davinci_glue {
struct device *dev;
struct platform_device *musb;
struct clk *clk;
+ bool vbus_state;
+ struct gpio_desc *vbus;
+ struct work_struct vbus_work;
};
/* REVISIT (PM) we should be able to keep the PHY in low power mode most
@@ -135,43 +134,44 @@ static void davinci_musb_disable(struct musb *musb)
* when J10 is out, and TI documents it as handling OTG.
*/
-#ifdef CONFIG_MACH_DAVINCI_EVM
-
-static int vbus_state = -1;
-
/* I2C operations are always synchronous, and require a task context.
* With unloaded systems, using the shared workqueue seems to suffice
* to satisfy the 100msec A_WAIT_VRISE timeout...
*/
-static void evm_deferred_drvvbus(struct work_struct *ignored)
+static void evm_deferred_drvvbus(struct work_struct *work)
{
- gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
- vbus_state = !vbus_state;
-}
+ struct davinci_glue *glue = container_of(work, struct davinci_glue,
+ vbus_work);
-#endif /* EVM */
+ gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
+ glue->vbus_state = !glue->vbus_state;
+}
-static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate)
+static void davinci_musb_source_power(struct musb *musb, int is_on,
+ int immediate)
{
-#ifdef CONFIG_MACH_DAVINCI_EVM
+ struct davinci_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ /* This GPIO handling is entirely optional */
+ if (!glue->vbus)
+ return;
+
if (is_on)
is_on = 1;
- if (vbus_state == is_on)
+ if (glue->vbus_state == is_on)
return;
- vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
+ /* 0/1 vs "-1 == unknown/init" */
+ glue->vbus_state = !is_on;
if (machine_is_davinci_evm()) {
- static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
-
if (immediate)
- gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
+ gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
else
- schedule_work(&evm_vbus_work);
+ schedule_work(&glue->vbus_work);
}
if (immediate)
- vbus_state = is_on;
-#endif
+ glue->vbus_state = is_on;
}
static void davinci_musb_set_vbus(struct musb *musb, int is_on)
@@ -524,6 +524,15 @@ static int davinci_probe(struct platform_device *pdev)
pdata->platform_ops = &davinci_ops;
+ glue->vbus = devm_gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(glue->vbus)) {
+ ret = PTR_ERR(glue->vbus);
+ goto err0;
+ } else {
+ glue->vbus_state = -1;
+ INIT_WORK(&glue->vbus_work, evm_deferred_drvvbus);
+ }
+
usb_phy_generic_register();
platform_set_drvdata(pdev, glue);
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index e3b8c84ccdb8..bc0109f4700b 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -17,16 +17,15 @@
#include "musb_core.h"
struct jz4740_glue {
- struct device *dev;
- struct platform_device *musb;
+ struct platform_device *pdev;
struct clk *clk;
};
static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
{
- unsigned long flags;
- irqreturn_t retval = IRQ_NONE;
- struct musb *musb = __hci;
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
spin_lock_irqsave(&musb->lock, flags);
@@ -40,7 +39,7 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
* never see them set
*/
musb->int_usb &= MUSB_INTR_SUSPEND | MUSB_INTR_RESUME |
- MUSB_INTR_RESET | MUSB_INTR_SOF;
+ MUSB_INTR_RESET | MUSB_INTR_SOF;
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
@@ -51,25 +50,20 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
}
static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
-{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
};
static const struct musb_hdrc_config jz4740_musb_config = {
/* Silicon does not implement USB OTG. */
- .multipoint = 0,
+ .multipoint = 0,
/* Max EPs scanned, driver will decide which EP can be used. */
- .num_eps = 4,
+ .num_eps = 4,
/* RAMbits needed to configure EPs from table */
- .ram_bits = 9,
- .fifo_cfg = jz4740_musb_fifo_cfg,
- .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
-};
-
-static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
- .mode = MUSB_PERIPHERAL,
- .config = &jz4740_musb_config,
+ .ram_bits = 9,
+ .fifo_cfg = jz4740_musb_fifo_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
};
static int jz4740_musb_init(struct musb *musb)
@@ -88,7 +82,8 @@ static int jz4740_musb_init(struct musb *musb)
return err;
}
- /* Silicon does not implement ConfigData register.
+ /*
+ * Silicon does not implement ConfigData register.
* Set dyn_fifo to avoid reading EP config from hardware.
*/
musb->dyn_fifo = true;
@@ -108,63 +103,67 @@ static const struct musb_platform_ops jz4740_musb_ops = {
.init = jz4740_musb_init,
};
+static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
+ .mode = MUSB_PERIPHERAL,
+ .config = &jz4740_musb_config,
+ .platform_ops = &jz4740_musb_ops,
+};
+
static int jz4740_probe(struct platform_device *pdev)
{
- struct musb_hdrc_platform_data *pdata = &jz4740_musb_platform_data;
+ struct device *dev = &pdev->dev;
+ const struct musb_hdrc_platform_data *pdata = &jz4740_musb_pdata;
struct platform_device *musb;
struct jz4740_glue *glue;
- struct clk *clk;
+ struct clk *clk;
int ret;
- glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
- dev_err(&pdev->dev, "failed to allocate musb device\n");
+ dev_err(dev, "failed to allocate musb device");
return -ENOMEM;
}
- clk = devm_clk_get(&pdev->dev, "udc");
+ clk = devm_clk_get(dev, "udc");
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
+ dev_err(dev, "failed to get clock");
ret = PTR_ERR(clk);
goto err_platform_device_put;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "failed to enable clock\n");
+ dev_err(dev, "failed to enable clock");
goto err_platform_device_put;
}
- musb->dev.parent = &pdev->dev;
+ musb->dev.parent = dev;
- glue->dev = &pdev->dev;
- glue->musb = musb;
+ glue->pdev = musb;
glue->clk = clk;
- pdata->platform_ops = &jz4740_musb_ops;
-
platform_set_drvdata(pdev, glue);
ret = platform_device_add_resources(musb, pdev->resource,
pdev->num_resources);
if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
+ dev_err(dev, "failed to add resources");
goto err_clk_disable;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
- dev_err(&pdev->dev, "failed to add platform_data\n");
+ dev_err(dev, "failed to add platform_data");
goto err_clk_disable;
}
ret = platform_device_add(musb);
if (ret) {
- dev_err(&pdev->dev, "failed to register musb device\n");
+ dev_err(dev, "failed to register musb device");
goto err_clk_disable;
}
@@ -179,9 +178,9 @@ err_platform_device_put:
static int jz4740_remove(struct platform_device *pdev)
{
- struct jz4740_glue *glue = platform_get_drvdata(pdev);
+ struct jz4740_glue *glue = platform_get_drvdata(pdev);
- platform_device_unregister(glue->musb);
+ platform_device_unregister(glue->pdev);
clk_disable_unprepare(glue->clk);
return 0;
@@ -190,7 +189,7 @@ static int jz4740_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id jz4740_musb_of_match[] = {
{ .compatible = "ingenic,jz4740-musb" },
- {},
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, jz4740_musb_of_match);
#endif
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
new file mode 100644
index 000000000000..6b88c2f5d970
--- /dev/null
+++ b/drivers/usb/musb/mediatek.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author:
+ * Min Guo <min.guo@mediatek.com>
+ * Yonglong Wu <yonglong.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb/role.h>
+#include <linux/usb/usb_phy_generic.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define USB_L1INTS 0x00a0
+#define USB_L1INTM 0x00a4
+#define MTK_MUSB_TXFUNCADDR 0x0480
+
+/* MediaTek controller toggle enable and status reg */
+#define MUSB_RXTOG 0x80
+#define MUSB_RXTOGEN 0x82
+#define MUSB_TXTOG 0x84
+#define MUSB_TXTOGEN 0x86
+#define MTK_TOGGLE_EN GENMASK(15, 0)
+
+#define TX_INT_STATUS BIT(0)
+#define RX_INT_STATUS BIT(1)
+#define USBCOM_INT_STATUS BIT(2)
+#define DMA_INT_STATUS BIT(3)
+
+#define DMA_INTR_STATUS_MSK GENMASK(7, 0)
+#define DMA_INTR_UNMASK_SET_MSK GENMASK(31, 24)
+
+struct mtk_glue {
+ struct device *dev;
+ struct musb *musb;
+ struct platform_device *musb_pdev;
+ struct platform_device *usb_phy;
+ struct phy *phy;
+ struct usb_phy *xceiv;
+ enum phy_mode phy_mode;
+ struct clk *main;
+ struct clk *mcu;
+ struct clk *univpll;
+ enum usb_role role;
+ struct usb_role_switch *role_sw;
+};
+
+static int mtk_musb_clks_get(struct mtk_glue *glue)
+{
+ struct device *dev = glue->dev;
+
+ glue->main = devm_clk_get(dev, "main");
+ if (IS_ERR(glue->main)) {
+ dev_err(dev, "fail to get main clock\n");
+ return PTR_ERR(glue->main);
+ }
+
+ glue->mcu = devm_clk_get(dev, "mcu");
+ if (IS_ERR(glue->mcu)) {
+ dev_err(dev, "fail to get mcu clock\n");
+ return PTR_ERR(glue->mcu);
+ }
+
+ glue->univpll = devm_clk_get(dev, "univpll");
+ if (IS_ERR(glue->univpll)) {
+ dev_err(dev, "fail to get univpll clock\n");
+ return PTR_ERR(glue->univpll);
+ }
+
+ return 0;
+}
+
+static int mtk_musb_clks_enable(struct mtk_glue *glue)
+{
+ int ret;
+
+ ret = clk_prepare_enable(glue->main);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable main clock\n");
+ goto err_main_clk;
+ }
+
+ ret = clk_prepare_enable(glue->mcu);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable mcu clock\n");
+ goto err_mcu_clk;
+ }
+
+ ret = clk_prepare_enable(glue->univpll);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable univpll clock\n");
+ goto err_univpll_clk;
+ }
+
+ return 0;
+
+err_univpll_clk:
+ clk_disable_unprepare(glue->mcu);
+err_mcu_clk:
+ clk_disable_unprepare(glue->main);
+err_main_clk:
+ return ret;
+}
+
+static void mtk_musb_clks_disable(struct mtk_glue *glue)
+{
+ clk_disable_unprepare(glue->univpll);
+ clk_disable_unprepare(glue->mcu);
+ clk_disable_unprepare(glue->main);
+}
+
+static int musb_usb_role_sx_set(struct device *dev, enum usb_role role)
+{
+ struct mtk_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue->musb;
+ u8 devctl = readb(musb->mregs + MUSB_DEVCTL);
+ enum usb_role new_role;
+
+ if (role == glue->role)
+ return 0;
+
+ switch (role) {
+ case USB_ROLE_HOST:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ new_role = USB_ROLE_HOST;
+ if (glue->role == USB_ROLE_NONE)
+ phy_power_on(glue->phy);
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ MUSB_HST_MODE(musb);
+ break;
+ case USB_ROLE_DEVICE:
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ new_role = USB_ROLE_DEVICE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ if (glue->role == USB_ROLE_NONE)
+ phy_power_on(glue->phy);
+
+ MUSB_DEV_MODE(musb);
+ break;
+ case USB_ROLE_NONE:
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ new_role = USB_ROLE_NONE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ if (glue->role != USB_ROLE_NONE)
+ phy_power_off(glue->phy);
+
+ break;
+ default:
+ dev_err(glue->dev, "Invalid State\n");
+ return -EINVAL;
+ }
+
+ glue->role = new_role;
+ phy_set_mode(glue->phy, glue->phy_mode);
+
+ return 0;
+}
+
+static enum usb_role musb_usb_role_sx_get(struct device *dev)
+{
+ struct mtk_glue *glue = dev_get_drvdata(dev);
+
+ return glue->role;
+}
+
+static int mtk_otg_switch_init(struct mtk_glue *glue)
+{
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+
+ role_sx_desc.set = musb_usb_role_sx_set;
+ role_sx_desc.get = musb_usb_role_sx_get;
+ role_sx_desc.fwnode = dev_fwnode(glue->dev);
+ glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc);
+
+ return PTR_ERR_OR_ZERO(glue->role_sw);
+}
+
+static void mtk_otg_switch_exit(struct mtk_glue *glue)
+{
+ return usb_role_switch_unregister(glue->role_sw);
+}
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->int_usb = musb_clearb(musb->mregs, MUSB_INTRUSB);
+ musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
+ musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static irqreturn_t mtk_musb_interrupt(int irq, void *dev_id)
+{
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = (struct musb *)dev_id;
+ u32 l1_ints;
+
+ l1_ints = musb_readl(musb->mregs, USB_L1INTS) &
+ musb_readl(musb->mregs, USB_L1INTM);
+
+ if (l1_ints & (TX_INT_STATUS | RX_INT_STATUS | USBCOM_INT_STATUS))
+ retval = generic_interrupt(irq, musb);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+ if (l1_ints & DMA_INT_STATUS)
+ retval = dma_controller_irq(irq, musb->dma_controller);
+#endif
+ return retval;
+}
+
+static u32 mtk_musb_busctl_offset(u8 epnum, u16 offset)
+{
+ return MTK_MUSB_TXFUNCADDR + offset + 8 * epnum;
+}
+
+static u8 mtk_musb_clearb(void __iomem *addr, unsigned int offset)
+{
+ u8 data;
+
+ /* W1C */
+ data = musb_readb(addr, offset);
+ musb_writeb(addr, offset, data);
+ return data;
+}
+
+static u16 mtk_musb_clearw(void __iomem *addr, unsigned int offset)
+{
+ u16 data;
+
+ /* W1C */
+ data = musb_readw(addr, offset);
+ musb_writew(addr, offset, data);
+ return data;
+}
+
+static int mtk_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+ enum phy_mode new_mode;
+ enum usb_role new_role;
+
+ switch (mode) {
+ case MUSB_HOST:
+ new_mode = PHY_MODE_USB_HOST;
+ new_role = USB_ROLE_HOST;
+ break;
+ case MUSB_PERIPHERAL:
+ new_mode = PHY_MODE_USB_DEVICE;
+ new_role = USB_ROLE_DEVICE;
+ break;
+ case MUSB_OTG:
+ new_mode = PHY_MODE_USB_OTG;
+ new_role = USB_ROLE_NONE;
+ break;
+ default:
+ dev_err(glue->dev, "Invalid mode request\n");
+ return -EINVAL;
+ }
+
+ if (glue->phy_mode == new_mode)
+ return 0;
+
+ if (musb->port_mode != MUSB_OTG) {
+ dev_err(glue->dev, "Does not support changing modes\n");
+ return -EINVAL;
+ }
+
+ glue->role = new_role;
+ musb_usb_role_sx_set(dev, glue->role);
+ return 0;
+}
+
+static int mtk_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+ int ret;
+
+ glue->musb = musb;
+ musb->phy = glue->phy;
+ musb->xceiv = glue->xceiv;
+ musb->is_host = false;
+ musb->isr = mtk_musb_interrupt;
+
+ /* Set TX/RX toggle enable */
+ musb_writew(musb->mregs, MUSB_TXTOGEN, MTK_TOGGLE_EN);
+ musb_writew(musb->mregs, MUSB_RXTOGEN, MTK_TOGGLE_EN);
+
+ if (musb->port_mode == MUSB_OTG) {
+ ret = mtk_otg_switch_init(glue);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_init(glue->phy);
+ if (ret)
+ goto err_phy_init;
+
+ ret = phy_power_on(glue->phy);
+ if (ret)
+ goto err_phy_power_on;
+
+ phy_set_mode(glue->phy, glue->phy_mode);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+ musb_writel(musb->mregs, MUSB_HSDMA_INTR,
+ DMA_INTR_STATUS_MSK | DMA_INTR_UNMASK_SET_MSK);
+#endif
+ musb_writel(musb->mregs, USB_L1INTM, TX_INT_STATUS | RX_INT_STATUS |
+ USBCOM_INT_STATUS | DMA_INT_STATUS);
+ return 0;
+
+err_phy_power_on:
+ phy_exit(glue->phy);
+err_phy_init:
+ mtk_otg_switch_exit(glue);
+ return ret;
+}
+
+static u16 mtk_musb_get_toggle(struct musb_qh *qh, int is_out)
+{
+ struct musb *musb = qh->hw_ep->musb;
+ u8 epnum = qh->hw_ep->epnum;
+ u16 toggle;
+
+ toggle = musb_readw(musb->mregs, is_out ? MUSB_TXTOG : MUSB_RXTOG);
+ return toggle & (1 << epnum);
+}
+
+static u16 mtk_musb_set_toggle(struct musb_qh *qh, int is_out, struct urb *urb)
+{
+ struct musb *musb = qh->hw_ep->musb;
+ u8 epnum = qh->hw_ep->epnum;
+ u16 value, toggle;
+
+ toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+ if (is_out) {
+ value = musb_readw(musb->mregs, MUSB_TXTOG);
+ value |= toggle << epnum;
+ musb_writew(musb->mregs, MUSB_TXTOG, value);
+ } else {
+ value = musb_readw(musb->mregs, MUSB_RXTOG);
+ value |= toggle << epnum;
+ musb_writew(musb->mregs, MUSB_RXTOG, value);
+ }
+
+ return 0;
+}
+
+static int mtk_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+
+ mtk_otg_switch_exit(glue);
+ phy_power_off(glue->phy);
+ phy_exit(glue->phy);
+ mtk_musb_clks_disable(glue);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return 0;
+}
+
+static const struct musb_platform_ops mtk_musb_ops = {
+ .quirks = MUSB_DMA_INVENTRA,
+ .init = mtk_musb_init,
+ .get_toggle = mtk_musb_get_toggle,
+ .set_toggle = mtk_musb_set_toggle,
+ .exit = mtk_musb_exit,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create_noirq,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+ .clearb = mtk_musb_clearb,
+ .clearw = mtk_musb_clearw,
+ .busctl_offset = mtk_musb_busctl_offset,
+ .set_mode = mtk_musb_set_mode,
+};
+
+#define MTK_MUSB_MAX_EP_NUM 8
+#define MTK_MUSB_RAM_BITS 11
+
+static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 1024, },
+ { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 1024, },
+ { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 64, },
+};
+
+static const struct musb_hdrc_config mtk_musb_hdrc_config = {
+ .fifo_cfg = mtk_musb_mode_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(mtk_musb_mode_cfg),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = MTK_MUSB_MAX_EP_NUM,
+ .ram_bits = MTK_MUSB_RAM_BITS,
+};
+
+static const struct platform_device_info mtk_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int mtk_musb_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata;
+ struct mtk_glue *glue;
+ struct platform_device_info pinfo;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret = -ENOMEM;
+
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ glue->dev = dev;
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create child devices at %p\n", np);
+ return ret;
+ }
+
+ ret = mtk_musb_clks_get(glue);
+ if (ret)
+ return ret;
+
+ pdata->config = &mtk_musb_hdrc_config;
+ pdata->platform_ops = &mtk_musb_ops;
+ pdata->mode = usb_get_dr_mode(dev);
+
+ if (IS_ENABLED(CONFIG_USB_MUSB_HOST))
+ pdata->mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_MUSB_GADGET))
+ pdata->mode = USB_DR_MODE_PERIPHERAL;
+
+ switch (pdata->mode) {
+ case USB_DR_MODE_HOST:
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ glue->role = USB_ROLE_HOST;
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ glue->role = USB_ROLE_DEVICE;
+ break;
+ case USB_DR_MODE_OTG:
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ glue->role = USB_ROLE_NONE;
+ break;
+ default:
+ dev_err(&pdev->dev, "Error 'dr_mode' property\n");
+ return -EINVAL;
+ }
+
+ glue->phy = devm_of_phy_get_by_index(dev, np, 0);
+ if (IS_ERR(glue->phy)) {
+ dev_err(dev, "fail to getting phy %ld\n",
+ PTR_ERR(glue->phy));
+ return PTR_ERR(glue->phy);
+ }
+
+ glue->usb_phy = usb_phy_generic_register();
+ if (IS_ERR(glue->usb_phy)) {
+ dev_err(dev, "fail to registering usb-phy %ld\n",
+ PTR_ERR(glue->usb_phy));
+ return PTR_ERR(glue->usb_phy);
+ }
+
+ glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(glue->xceiv)) {
+ dev_err(dev, "fail to getting usb-phy %d\n", ret);
+ ret = PTR_ERR(glue->xceiv);
+ goto err_unregister_usb_phy;
+ }
+
+ platform_set_drvdata(pdev, glue);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ ret = mtk_musb_clks_enable(glue);
+ if (ret)
+ goto err_enable_clk;
+
+ pinfo = mtk_dev_info;
+ pinfo.parent = dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb_pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb_pdev)) {
+ ret = PTR_ERR(glue->musb_pdev);
+ dev_err(dev, "failed to register musb device: %d\n", ret);
+ goto err_device_register;
+ }
+
+ return 0;
+
+err_device_register:
+ mtk_musb_clks_disable(glue);
+err_enable_clk:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+err_unregister_usb_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
+ return ret;
+}
+
+static int mtk_musb_remove(struct platform_device *pdev)
+{
+ struct mtk_glue *glue = platform_get_drvdata(pdev);
+ struct platform_device *usb_phy = glue->usb_phy;
+
+ platform_device_unregister(glue->musb_pdev);
+ usb_phy_generic_unregister(usb_phy);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_musb_match[] = {
+ {.compatible = "mediatek,mtk-musb",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_musb_match);
+#endif
+
+static struct platform_driver mtk_musb_driver = {
+ .probe = mtk_musb_probe,
+ .remove = mtk_musb_remove,
+ .driver = {
+ .name = "musb-mtk",
+ .of_match_table = of_match_ptr(mtk_musb_match),
+ },
+};
+
+module_platform_driver(mtk_musb_driver);
+
+MODULE_DESCRIPTION("MediaTek MUSB Glue Layer");
+MODULE_AUTHOR("Min Guo <min.guo@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
deleted file mode 100644
index 5f04f8e3a640..000000000000
--- a/drivers/usb/musb/musb_am335x.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-static int am335x_child_probe(struct platform_device *pdev)
-{
- int ret;
-
- pm_runtime_enable(&pdev->dev);
-
- ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
- if (ret)
- goto err;
-
- return 0;
-err:
- pm_runtime_disable(&pdev->dev);
- return ret;
-}
-
-static const struct of_device_id am335x_child_of_match[] = {
- { .compatible = "ti,am33xx-usb" },
- { },
-};
-MODULE_DEVICE_TABLE(of, am335x_child_of_match);
-
-static struct platform_driver am335x_child_driver = {
- .probe = am335x_child_probe,
- .driver = {
- .name = "am335x-usb-childs",
- .of_match_table = am335x_child_of_match,
- },
-};
-
-static int __init am335x_child_init(void)
-{
- return platform_driver_register(&am335x_child_driver);
-}
-module_init(am335x_child_init);
-
-MODULE_DESCRIPTION("AM33xx child devices");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 5ebf30bd61bd..f616fb489542 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -73,6 +73,7 @@
#include <linux/prefetch.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include <linux/usb/of.h>
@@ -246,7 +247,7 @@ static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
return 0x80 + (0x08 * epnum) + offset;
}
-static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
+static u8 musb_default_readb(void __iomem *addr, u32 offset)
{
u8 data = __raw_readb(addr + offset);
@@ -254,13 +255,13 @@ static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
return data;
}
-static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
+static void musb_default_writeb(void __iomem *addr, u32 offset, u8 data)
{
trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
__raw_writeb(data, addr + offset);
}
-static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
+static u16 musb_default_readw(void __iomem *addr, u32 offset)
{
u16 data = __raw_readw(addr + offset);
@@ -268,12 +269,44 @@ static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
return data;
}
-static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
+static void musb_default_writew(void __iomem *addr, u32 offset, u16 data)
{
trace_musb_writew(__builtin_return_address(0), addr, offset, data);
__raw_writew(data, addr + offset);
}
+static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out)
+{
+ void __iomem *epio = qh->hw_ep->regs;
+ u16 csr;
+
+ if (is_out)
+ csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
+
+ return csr;
+}
+
+static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out,
+ struct urb *urb)
+{
+ u16 csr;
+ u16 toggle;
+
+ toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+ if (is_out)
+ csr = toggle ? (MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE)
+ : MUSB_TXCSR_CLRDATATOG;
+ else
+ csr = toggle ? (MUSB_RXCSR_H_WR_DATATOGGLE
+ | MUSB_RXCSR_H_DATATOGGLE) : 0;
+
+ return csr;
+}
+
/*
* Load an endpoint's FIFO
*/
@@ -364,19 +397,25 @@ static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
/*
* Old style IO functions
*/
-u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
+u8 (*musb_readb)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_readb);
-void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
+void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
EXPORT_SYMBOL_GPL(musb_writeb);
-u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
+u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearb);
+
+u16 (*musb_readw)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_readw);
-void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
+void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
EXPORT_SYMBOL_GPL(musb_writew);
-u32 musb_readl(const void __iomem *addr, unsigned offset)
+u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearw);
+
+u32 musb_readl(void __iomem *addr, u32 offset)
{
u32 data = __raw_readl(addr + offset);
@@ -385,7 +424,7 @@ u32 musb_readl(const void __iomem *addr, unsigned offset)
}
EXPORT_SYMBOL_GPL(musb_readl);
-void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+void musb_writel(void __iomem *addr, u32 offset, u32 data)
{
trace_musb_writel(__builtin_return_address(0), addr, offset, data);
__raw_writel(data, addr + offset);
@@ -414,6 +453,108 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
return hw_ep->musb->io.write_fifo(hw_ep, len, src);
}
+static u8 musb_read_devctl(struct musb *musb)
+{
+ return musb_readb(musb->mregs, MUSB_DEVCTL);
+}
+
+/**
+ * musb_set_host - set and initialize host mode
+ * @musb: musb controller driver data
+ *
+ * At least some musb revisions need to enable devctl session bit in
+ * peripheral mode to switch to host mode. Initializes things to host
+ * mode and sets A_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ *
+ * Note that the SoC glue code may need to wait for musb to settle
+ * on enable before calling this to avoid babble.
+ */
+int musb_set_host(struct musb *musb)
+{
+ int error = 0;
+ u8 devctl;
+
+ if (!musb)
+ return -EINVAL;
+
+ devctl = musb_read_devctl(musb);
+ if (!(devctl & MUSB_DEVCTL_BDEVICE)) {
+ dev_info(musb->controller,
+ "%s: already in host mode: %02x\n",
+ __func__, devctl);
+ goto init_data;
+ }
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+ !(devctl & MUSB_DEVCTL_BDEVICE), 5000,
+ 1000000);
+ if (error) {
+ dev_err(musb->controller, "%s: could not set host: %02x\n",
+ __func__, devctl);
+
+ return error;
+ }
+
+init_data:
+ musb->is_active = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_host);
+
+/**
+ * musb_set_peripheral - set and initialize peripheral mode
+ * @musb: musb controller driver data
+ *
+ * Clears devctl session bit and initializes things for peripheral
+ * mode and sets B_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ */
+int musb_set_peripheral(struct musb *musb)
+{
+ int error = 0;
+ u8 devctl;
+
+ if (!musb)
+ return -EINVAL;
+
+ devctl = musb_read_devctl(musb);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ dev_info(musb->controller,
+ "%s: already in peripheral mode: %02x\n",
+ __func__, devctl);
+
+ goto init_data;
+ }
+
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+ devctl & MUSB_DEVCTL_BDEVICE, 5000,
+ 1000000);
+ if (error) {
+ dev_err(musb->controller, "%s: could not set peripheral: %02x\n",
+ __func__, devctl);
+
+ return error;
+ }
+
+init_data:
+ musb->is_active = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_peripheral);
+
/*-------------------------------------------------------------------------*/
/* for high speed test mode; see USB 2.0 spec 7.1.20 */
@@ -909,7 +1050,6 @@ static void musb_handle_intr_reset(struct musb *musb)
* @param musb instance pointer
* @param int_usb register contents
* @param devctl
- * @param power
*/
static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
@@ -1015,7 +1155,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
static void musb_disable_interrupts(struct musb *musb)
{
void __iomem *mbase = musb->mregs;
- u16 temp;
/* disable interrupts */
musb_writeb(mbase, MUSB_INTRUSBE, 0);
@@ -1025,9 +1164,9 @@ static void musb_disable_interrupts(struct musb *musb)
musb_writew(mbase, MUSB_INTRRXE, 0);
/* flush pending interrupts */
- temp = musb_readb(mbase, MUSB_INTRUSB);
- temp = musb_readw(mbase, MUSB_INTRTX);
- temp = musb_readw(mbase, MUSB_INTRRX);
+ musb_clearb(mbase, MUSB_INTRUSB);
+ musb_clearw(mbase, MUSB_INTRTX);
+ musb_clearw(mbase, MUSB_INTRRX);
}
static void musb_enable_interrupts(struct musb *musb)
@@ -2254,10 +2393,19 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_readb = musb->ops->readb;
if (musb->ops->writeb)
musb_writeb = musb->ops->writeb;
+ if (musb->ops->clearb)
+ musb_clearb = musb->ops->clearb;
+ else
+ musb_clearb = musb_readb;
+
if (musb->ops->readw)
musb_readw = musb->ops->readw;
if (musb->ops->writew)
musb_writew = musb->ops->writew;
+ if (musb->ops->clearw)
+ musb_clearw = musb->ops->clearw;
+ else
+ musb_clearw = musb_readw;
#ifndef CONFIG_MUSB_PIO_ONLY
if (!musb->ops->dma_init || !musb->ops->dma_exit) {
@@ -2279,6 +2427,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
else
musb->io.write_fifo = musb_default_write_fifo;
+ if (musb->ops->get_toggle)
+ musb->io.get_toggle = musb->ops->get_toggle;
+ else
+ musb->io.get_toggle = musb_default_get_toggle;
+
+ if (musb->ops->set_toggle)
+ musb->io.set_toggle = musb->ops->set_toggle;
+ else
+ musb->io.set_toggle = musb_default_set_toggle;
+
if (!musb->xceiv->io_ops) {
musb->xceiv->io_dev = musb->controller;
musb->xceiv->io_priv = musb->mregs;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 04203b7126d5..290a2bc46606 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -27,6 +27,7 @@
struct musb;
struct musb_hw_ep;
struct musb_ep;
+struct musb_qh;
/* Helper defines for struct musb->hwvers */
#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
@@ -119,10 +120,14 @@ struct musb_io;
* @fifo_offset: returns the fifo offset
* @readb: read 8 bits
* @writeb: write 8 bits
+ * @clearb: could be clear-on-readb or W1C
* @readw: read 16 bits
* @writew: write 16 bits
+ * @clearw: could be clear-on-readw or W1C
* @read_fifo: reads the fifo
* @write_fifo: writes to fifo
+ * @get_toggle: platform specific get toggle function
+ * @set_toggle: platform specific set toggle function
* @dma_init: platform specific dma init function
* @dma_exit: platform specific dma exit function
* @init: turns on clocks, sets up platform-specific registers, etc
@@ -161,12 +166,16 @@ struct musb_platform_ops {
u16 fifo_mode;
u32 (*fifo_offset)(u8 epnum);
u32 (*busctl_offset)(u8 epnum, u16 offset);
- u8 (*readb)(const void __iomem *addr, unsigned offset);
- void (*writeb)(void __iomem *addr, unsigned offset, u8 data);
- u16 (*readw)(const void __iomem *addr, unsigned offset);
- void (*writew)(void __iomem *addr, unsigned offset, u16 data);
+ u8 (*readb)(void __iomem *addr, u32 offset);
+ void (*writeb)(void __iomem *addr, u32 offset, u8 data);
+ u8 (*clearb)(void __iomem *addr, u32 offset);
+ u16 (*readw)(void __iomem *addr, u32 offset);
+ void (*writew)(void __iomem *addr, u32 offset, u16 data);
+ u16 (*clearw)(void __iomem *addr, u32 offset);
void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
+ u16 (*get_toggle)(struct musb_qh *qh, int is_out);
+ u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
struct dma_controller *
(*dma_init) (struct musb *musb, void __iomem *base);
void (*dma_exit)(struct dma_controller *c);
@@ -487,6 +496,9 @@ extern void musb_start(struct musb *musb);
extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+extern int musb_set_host(struct musb *musb);
+extern int musb_set_peripheral(struct musb *musb);
+
extern void musb_load_testpacket(struct musb *);
extern irqreturn_t musb_interrupt(struct musb *);
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 8f60271c0a9d..4b4d8dc5d3f2 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -35,6 +35,12 @@ struct musb_hw_ep;
* whether shared with the Inventra core or separate.
*/
+#define MUSB_HSDMA_BASE 0x200
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x4
+#define MUSB_HSDMA_ADDRESS 0x8
+#define MUSB_HSDMA_COUNT 0xc
+
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
#ifdef CONFIG_MUSB_PIO_ONLY
@@ -191,6 +197,9 @@ extern void (*musb_dma_controller_destroy)(struct dma_controller *);
extern struct dma_controller *
musbhs_dma_controller_create(struct musb *musb, void __iomem *base);
extern void musbhs_dma_controller_destroy(struct dma_controller *c);
+extern struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base);
+extern irqreturn_t dma_controller_irq(int irq, void *private_data);
extern struct dma_controller *
tusb_dma_controller_create(struct musb *musb, void __iomem *base);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 5a44b70372d9..886c9b602f8c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -286,26 +286,6 @@ __acquires(musb->lock)
spin_lock(&musb->lock);
}
-/* For bulk/interrupt endpoints only */
-static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
- struct urb *urb)
-{
- void __iomem *epio = qh->hw_ep->regs;
- u16 csr;
-
- /*
- * FIXME: the current Mentor DMA code seems to have
- * problems getting toggle correct.
- */
-
- if (is_in)
- csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
- else
- csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
-
- usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
-}
-
/*
* Advance this hardware endpoint's queue, completing the specified URB and
* advancing to either the next URB queued to that qh, or else invalidating
@@ -320,6 +300,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
struct musb_hw_ep *ep = qh->hw_ep;
int ready = qh->is_ready;
int status;
+ u16 toggle;
status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
@@ -327,7 +308,8 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
switch (qh->type) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
- musb_save_toggle(qh, is_in, urb);
+ toggle = musb->io.get_toggle(qh, !is_in);
+ usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
break;
case USB_ENDPOINT_XFER_ISOC:
if (status == 0 && urb->error_count)
@@ -772,13 +754,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
);
csr |= MUSB_TXCSR_MODE;
- if (!hw_ep->tx_double_buffered) {
- if (usb_gettoggle(urb->dev, qh->epnum, 1))
- csr |= MUSB_TXCSR_H_WR_DATATOGGLE
- | MUSB_TXCSR_H_DATATOGGLE;
- else
- csr |= MUSB_TXCSR_CLRDATATOG;
- }
+ if (!hw_ep->tx_double_buffered)
+ csr |= musb->io.set_toggle(qh, is_out, urb);
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
@@ -860,17 +837,12 @@ finish:
/* IN/receive */
} else {
- u16 csr;
+ u16 csr = 0;
if (hw_ep->rx_reinit) {
musb_rx_reinit(musb, qh, epnum);
+ csr |= musb->io.set_toggle(qh, is_out, urb);
- /* init new state: toggle and NYET, maybe DMA later */
- if (usb_gettoggle(urb->dev, qh->epnum, 0))
- csr = MUSB_RXCSR_H_WR_DATATOGGLE
- | MUSB_RXCSR_H_DATATOGGLE;
- else
- csr = 0;
if (qh->type == USB_ENDPOINT_XFER_INT)
csr |= MUSB_RXCSR_DISNYET;
@@ -933,6 +905,7 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
void __iomem *epio = ep->regs;
struct musb_qh *cur_qh, *next_qh;
u16 rx_csr, tx_csr;
+ u16 toggle;
musb_ep_select(mbase, ep->epnum);
if (is_in) {
@@ -970,7 +943,8 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
urb->actual_length += dma->actual_len;
dma->actual_len = 0L;
}
- musb_save_toggle(cur_qh, is_in, urb);
+ toggle = musb->io.get_toggle(cur_qh, !is_in);
+ usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
if (is_in) {
/* move cur_qh to end of queue */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 8058a58092cf..f17aabd95a50 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -22,6 +22,8 @@
* @read_fifo: platform specific function to read fifo
* @write_fifo: platform specific function to write fifo
* @busctl_offset: platform specific function to get busctl offset
+ * @get_toggle: platform specific function to get toggle
+ * @set_toggle: platform specific function to set toggle
*/
struct musb_io {
u32 (*ep_offset)(u8 epnum, u16 offset);
@@ -30,14 +32,18 @@ struct musb_io {
void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
u32 (*busctl_offset)(u8 epnum, u16 offset);
+ u16 (*get_toggle)(struct musb_qh *qh, int is_out);
+ u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
};
/* Do not add new entries here, add them the struct musb_io instead */
-extern u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
-extern void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
-extern u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
-extern void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
-extern u32 musb_readl(const void __iomem *addr, unsigned offset);
-extern void musb_writel(void __iomem *addr, unsigned offset, u32 data);
+extern u8 (*musb_readb)(void __iomem *addr, u32 offset);
+extern void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
+extern u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+extern u16 (*musb_readw)(void __iomem *addr, u32 offset);
+extern void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
+extern u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+extern u32 musb_readl(void __iomem *addr, u32 offset);
+extern void musb_writel(void __iomem *addr, u32 offset, u32 data);
#endif
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index a97d618fe8ff..b193daf69685 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -38,11 +38,12 @@ TRACE_EVENT(musb_log,
);
DECLARE_EVENT_CLASS(musb_regb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u8, data)
),
@@ -57,21 +58,24 @@ DECLARE_EVENT_CLASS(musb_regb,
);
DEFINE_EVENT(musb_regb, musb_readb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regb, musb_writeb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data)
);
DECLARE_EVENT_CLASS(musb_regw,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u16, data)
),
@@ -86,21 +90,24 @@ DECLARE_EVENT_CLASS(musb_regw,
);
DEFINE_EVENT(musb_regw, musb_readw,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regw, musb_writew,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data)
);
DECLARE_EVENT_CLASS(musb_regl,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u32, data)
),
@@ -115,12 +122,14 @@ DECLARE_EVENT_CLASS(musb_regl,
);
DEFINE_EVENT(musb_regl, musb_readl,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regl, musb_writel,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data)
);
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 2d3751d885b4..0aacfc8be5a1 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -10,12 +10,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "musb_core.h"
-
-#define MUSB_HSDMA_BASE 0x200
-#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
-#define MUSB_HSDMA_CONTROL 0x4
-#define MUSB_HSDMA_ADDRESS 0x8
-#define MUSB_HSDMA_COUNT 0xc
+#include "musb_dma.h"
#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset) \
(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
@@ -268,7 +263,7 @@ static int dma_channel_abort(struct dma_channel *channel)
return 0;
}
-static irqreturn_t dma_controller_irq(int irq, void *private_data)
+irqreturn_t dma_controller_irq(int irq, void *private_data)
{
struct musb_dma_controller *controller = private_data;
struct musb *musb = controller->private_data;
@@ -289,7 +284,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
spin_lock_irqsave(&musb->lock, flags);
- int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+ int_hsdma = musb_clearb(mbase, MUSB_HSDMA_INTR);
if (!int_hsdma) {
musb_dbg(musb, "spurious DMA irq");
@@ -383,6 +378,7 @@ done:
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(dma_controller_irq);
void musbhs_dma_controller_destroy(struct dma_controller *c)
{
@@ -398,18 +394,10 @@ void musbhs_dma_controller_destroy(struct dma_controller *c)
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_destroy);
-struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
- void __iomem *base)
+static struct musb_dma_controller *
+dma_controller_alloc(struct musb *musb, void __iomem *base)
{
struct musb_dma_controller *controller;
- struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev);
- int irq = platform_get_irq_byname(pdev, "dma");
-
- if (irq <= 0) {
- dev_err(dev, "No DMA interrupt line!\n");
- return NULL;
- }
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
@@ -423,6 +411,25 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
controller->controller.channel_release = dma_channel_release;
controller->controller.channel_program = dma_channel_program;
controller->controller.channel_abort = dma_channel_abort;
+ return controller;
+}
+
+struct dma_controller *
+musbhs_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq_byname(pdev, "dma");
+
+ if (irq <= 0) {
+ dev_err(dev, "No DMA interrupt line!\n");
+ return NULL;
+ }
+
+ controller = dma_controller_alloc(musb, base);
+ if (!controller)
+ return NULL;
if (request_irq(irq, dma_controller_irq, 0,
dev_name(musb->controller), controller)) {
@@ -437,3 +444,16 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
return &controller->controller;
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_create);
+
+struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+
+ controller = dma_controller_alloc(musb, base);
+ if (!controller)
+ return NULL;
+
+ return &controller->controller;
+}
+EXPORT_SYMBOL_GPL(musbhs_dma_controller_create_noirq);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3d2fef67746..d62c78b97cad 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -38,69 +38,6 @@ struct omap2430_glue {
static struct omap2430_glue *_glue;
-static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
-{
- struct usb_otg *otg = musb->xceiv->otg;
- u8 devctl;
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- /* HDRC controls CPEN, but beware current surges during device
- * connect. They can trigger transient overcurrent conditions
- * that must be ignored.
- */
-
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
- if (is_on) {
- if (musb->xceiv->otg->state == OTG_STATE_A_IDLE) {
- int loops = 100;
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- /*
- * Wait for the musb to set as A device to enable the
- * VBUS
- */
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
-
- mdelay(5);
- cpu_relax();
-
- if (time_after(jiffies, timeout)
- || loops-- <= 0) {
- dev_err(musb->controller,
- "configured as A device timeout");
- break;
- }
- }
-
- otg_set_vbus(otg, 1);
- } else {
- musb->is_active = 1;
- musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- devctl |= MUSB_DEVCTL_SESSION;
- MUSB_HST_MODE(musb);
- }
- } else {
- musb->is_active = 0;
-
- /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
- * jumping right to B_IDLE...
- */
-
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- devctl &= ~MUSB_DEVCTL_SESSION;
-
- MUSB_DEV_MODE(musb);
- }
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
- dev_dbg(musb->controller, "VBUS %s, devctl %02x "
- /* otg %3x conf %08x prcm %08x */ "\n",
- usb_otg_state_string(musb->xceiv->otg->state),
- musb_readb(musb->mregs, MUSB_DEVCTL));
-}
-
static inline void omap2430_low_level_exit(struct musb *musb)
{
u32 l;
@@ -140,24 +77,52 @@ static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
return 0;
}
+/*
+ * HDRC controls CPEN, but beware current surges during device connect.
+ * They can trigger transient overcurrent conditions that must be ignored.
+ *
+ * Note that we're skipping A_WAIT_VFALL -> A_IDLE and jumping right to B_IDLE
+ * as set by musb_set_peripheral().
+ */
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
{
struct musb *musb = glue_to_musb(glue);
- struct musb_hdrc_platform_data *pdata =
- dev_get_platdata(musb->controller);
- struct omap_musb_board_data *data = pdata->board_data;
+ int error;
pm_runtime_get_sync(musb->controller);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+
switch (glue->status) {
case MUSB_ID_GROUND:
dev_dbg(musb->controller, "ID GND\n");
-
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
- musb->xceiv->last_event = USB_EVENT_ID;
- if (musb->gadget_driver) {
- omap_control_usb_set_mode(glue->control_otghs,
- USB_MODE_HOST);
- omap2430_musb_set_vbus(musb, 1);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_IDLE:
+ error = musb_set_host(musb);
+ if (error)
+ break;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ /* Fall through */
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ /*
+ * On multiple ID ground interrupts just keep enabling
+ * VBUS. At least cpcap VBUS shuts down otherwise.
+ */
+ otg_set_vbus(musb->xceiv->otg, 1);
+ break;
+ default:
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ musb->xceiv->last_event = USB_EVENT_ID;
+ if (musb->gadget_driver) {
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_HOST);
+ otg_set_vbus(musb->xceiv->otg, 1);
+ }
+ break;
}
break;
@@ -174,12 +139,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
dev_dbg(musb->controller, "VBUS Disconnect\n");
musb->xceiv->last_event = USB_EVENT_NONE;
- if (musb->gadget_driver)
- omap2430_musb_set_vbus(musb, 0);
-
- if (data->interface_type == MUSB_INTERFACE_UTMI)
- otg_set_vbus(musb->xceiv->otg, 0);
-
+ musb_set_peripheral(musb);
+ otg_set_vbus(musb->xceiv->otg, 0);
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
break;
@@ -226,7 +187,6 @@ static int omap2430_musb_init(struct musb *musb)
u32 l;
int status = 0;
struct device *dev = musb->controller;
- struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
@@ -282,50 +242,17 @@ static int omap2430_musb_init(struct musb *musb)
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
- if (glue->status != MUSB_UNKNOWN)
- omap_musb_set_mailbox(glue);
-
return 0;
}
static void omap2430_musb_enable(struct musb *musb)
{
- u8 devctl;
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
- struct omap_musb_board_data *data = pdata->board_data;
-
-
- switch (glue->status) {
-
- case MUSB_ID_GROUND:
- omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST);
- if (data->interface_type != MUSB_INTERFACE_UTMI)
- break;
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_err(dev, "configured as A device timeout");
- break;
- }
- }
- break;
- case MUSB_VBUS_VALID:
- omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
- break;
-
- default:
- break;
- }
+ if (glue->status == MUSB_UNKNOWN)
+ glue->status = MUSB_VBUS_OFF;
+ omap_musb_set_mailbox(glue);
}
static void omap2430_musb_disable(struct musb *musb)
@@ -361,8 +288,6 @@ static const struct musb_platform_ops omap2430_ops = {
.init = omap2430_musb_init,
.exit = omap2430_musb_exit,
- .set_vbus = omap2430_musb_set_vbus,
-
.enable = omap2430_musb_enable,
.disable = omap2430_musb_disable,
@@ -552,6 +477,9 @@ static int omap2430_runtime_resume(struct device *dev)
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
+ /* Wait for musb to get oriented. Otherwise we can get babble */
+ usleep_range(200000, 250000);
+
return 0;
}
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 832a41f9ee7d..f3f76f2ac63f 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -407,7 +407,7 @@ static u32 sunxi_musb_busctl_offset(u8 epnum, u16 offset)
return SUNXI_MUSB_TXFUNCADDR + offset;
}
-static u8 sunxi_musb_readb(const void __iomem *addr, unsigned offset)
+static u8 sunxi_musb_readb(void __iomem *addr, u32 offset)
{
struct sunxi_glue *glue;
@@ -520,7 +520,7 @@ static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
(int)(addr - sunxi_musb->mregs));
}
-static u16 sunxi_musb_readw(const void __iomem *addr, unsigned offset)
+static u16 sunxi_musb_readw(void __iomem *addr, u32 offset)
{
if (addr == sunxi_musb->mregs) {
/* generic control or fifo control reg access */
@@ -781,6 +781,8 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pinfo.name = "musb-hdrc";
pinfo.id = PLATFORM_DEVID_AUTO;
pinfo.parent = &pdev->dev;
+ pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node);
+ pinfo.of_node_reused = true;
pinfo.res = pdev->resource;
pinfo.num_res = pdev->num_resources;
pinfo.data = &pdata;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 39453287b5c3..5d449089e3ad 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -142,7 +142,7 @@ static void tusb_ep_select(void __iomem *mbase, u8 epnum)
/*
* TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
*/
-static u8 tusb_readb(const void __iomem *addr, unsigned offset)
+static u8 tusb_readb(void __iomem *addr, u32 offset)
{
u16 tmp;
u8 val;
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d19bb3e89da6..d5cf5e8bb1ca 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -310,9 +310,9 @@ static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
dma_channel->max_len = SZ_16M;
ux500_channel->dma_chan =
- dma_request_slave_channel(dev, chan_names[ch_num]);
+ dma_request_chan(dev, chan_names[ch_num]);
- if (!ux500_channel->dma_chan)
+ if (IS_ERR(ux500_channel->dma_chan))
ux500_channel->dma_chan =
dma_request_channel(mask,
data ?
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 24b4f091acb8..ff24fca0a2d9 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -162,7 +162,7 @@ config USB_MXS_PHY
config USB_TEGRA_PHY
tristate "NVIDIA Tegra USB PHY Driver"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
select USB_COMMON
select USB_PHY
select USB_ULPI
@@ -172,7 +172,7 @@ config USB_TEGRA_PHY
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
- depends on ARM || ARM64
+ depends on ARM || ARM64 || COMPILE_TEST
select USB_ULPI_VIEWPORT
help
Enable this to support ULPI connected USB OTG transceivers which
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 4bb4b1d42f32..20c0f082bf9c 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -108,7 +108,8 @@ enum ab8500_usb_mode {
USB_IDLE = 0,
USB_PERIPHERAL,
USB_HOST,
- USB_DEDICATED_CHG
+ USB_DEDICATED_CHG,
+ USB_UART
};
/* Register USB_LINK_STATUS interrupt */
@@ -393,6 +394,24 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
+ /*
+ * FIXME: For now we rely on the boot firmware to set up the necessary
+ * PHY/pin configuration for UART mode.
+ *
+ * AB8505 does not seem to report any status change for UART cables,
+ * possibly because it cannot detect them autonomously.
+ * We may need to measure the ID resistance manually to reliably
+ * detect UART cables after bootup.
+ */
+ case USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505:
+ case USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505:
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_UART;
+ ab8500_usb_peri_phy_en(ab);
+ }
+
+ break;
+
default:
break;
}
@@ -566,6 +585,11 @@ static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
ab->vbus_draw = 0;
}
+ if (ab->mode == USB_UART) {
+ ab8500_usb_peri_phy_dis(ab);
+ ab->mode = USB_IDLE;
+ }
+
if (is_ab8500_2p0(ab->ab8500)) {
if (ab->mode == USB_DEDICATED_CHG) {
ab8500_usb_wd_linkstatus(ab,
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index f5f0568d8533..8524475d942d 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -57,7 +57,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
- ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
+ ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen);
if (ret)
return ret;
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index a53b89be5324..661a229c105d 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -21,8 +21,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "phy-generic.h"
@@ -204,8 +203,7 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
return 0;
}
-int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
- struct usb_phy_generic_platform_data *pdata)
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
{
enum usb_phy_type type = USB_PHY_TYPE_USB2;
int err = 0;
@@ -221,28 +219,15 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
needs_vcc = of_property_read_bool(node, "vcc-supply");
needs_clk = of_property_read_bool(node, "clocks");
- nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
- GPIOD_ASIS);
- err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
- if (!err) {
- nop->gpiod_vbus = devm_gpiod_get_optional(dev,
- "vbus-detect",
- GPIOD_ASIS);
- err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
- }
- } else if (pdata) {
- type = pdata->type;
- clk_rate = pdata->clk_rate;
- needs_vcc = pdata->needs_vcc;
- if (gpio_is_valid(pdata->gpio_reset)) {
- err = devm_gpio_request_one(dev, pdata->gpio_reset,
- GPIOF_ACTIVE_LOW,
- dev_name(dev));
- if (!err)
- nop->gpiod_reset =
- gpio_to_desc(pdata->gpio_reset);
- }
- nop->gpiod_vbus = pdata->gpiod_vbus;
+ }
+ nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_ASIS);
+ err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
+ if (!err) {
+ nop->gpiod_vbus = devm_gpiod_get_optional(dev,
+ "vbus-detect",
+ GPIOD_ASIS);
+ err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
}
if (err == -EPROBE_DEFER)
@@ -308,7 +293,7 @@ static int usb_phy_generic_probe(struct platform_device *pdev)
if (!nop)
return -ENOMEM;
- err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
+ err = usb_phy_gen_create_phy(dev, nop);
if (err)
return err;
if (nop->gpiod_vbus) {
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index 97289627561d..7ee80211a688 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -22,7 +22,6 @@ struct usb_phy_generic {
int usb_gen_phy_init(struct usb_phy *phy);
void usb_gen_phy_shutdown(struct usb_phy *phy);
-int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
- struct usb_phy_generic_platform_data *pdata);
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop);
#endif
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 553e2573c74f..f13f5530746c 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -17,7 +17,6 @@
#include <linux/regulator/consumer.h>
#include <linux/usb/gadget.h>
-#include <linux/usb/gpio_vbus.h>
#include <linux/usb/otg.h>
@@ -29,6 +28,8 @@
* Needs to be loaded before the UDC driver that will use it.
*/
struct gpio_vbus_data {
+ struct gpio_desc *vbus_gpiod;
+ struct gpio_desc *pullup_gpiod;
struct usb_phy phy;
struct device *dev;
struct regulator *vbus_draw;
@@ -83,38 +84,30 @@ static void set_vbus_draw(struct gpio_vbus_data *gpio_vbus, unsigned mA)
gpio_vbus->mA = mA;
}
-static int is_vbus_powered(struct gpio_vbus_mach_info *pdata)
+static int is_vbus_powered(struct gpio_vbus_data *gpio_vbus)
{
- int vbus;
-
- vbus = gpio_get_value(pdata->gpio_vbus);
- if (pdata->gpio_vbus_inverted)
- vbus = !vbus;
-
- return vbus;
+ return gpiod_get_value(gpio_vbus->vbus_gpiod);
}
static void gpio_vbus_work(struct work_struct *work)
{
struct gpio_vbus_data *gpio_vbus =
container_of(work, struct gpio_vbus_data, work.work);
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(gpio_vbus->dev);
- int gpio, status, vbus;
+ int status, vbus;
if (!gpio_vbus->phy.otg->gadget)
return;
- vbus = is_vbus_powered(pdata);
+ vbus = is_vbus_powered(gpio_vbus);
if ((vbus ^ gpio_vbus->vbus) == 0)
return;
gpio_vbus->vbus = vbus;
/* Peripheral controllers which manage the pullup themselves won't have
- * gpio_pullup configured here. If it's configured here, we'll do what
- * isp1301_omap::b_peripheral() does and enable the pullup here... although
- * that may complicate usb_gadget_{,dis}connect() support.
+ * a pullup GPIO configured here. If it's configured here, we'll do
+ * what isp1301_omap::b_peripheral() does and enable the pullup here...
+ * although that may complicate usb_gadget_{,dis}connect() support.
*/
- gpio = pdata->gpio_pullup;
if (vbus) {
status = USB_EVENT_VBUS;
@@ -126,16 +119,16 @@ static void gpio_vbus_work(struct work_struct *work)
set_vbus_draw(gpio_vbus, 100);
/* optionally enable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 1);
atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
status, gpio_vbus->phy.otg->gadget);
usb_phy_set_event(&gpio_vbus->phy, USB_EVENT_ENUMERATED);
} else {
/* optionally disable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
set_vbus_draw(gpio_vbus, 0);
@@ -154,12 +147,11 @@ static void gpio_vbus_work(struct work_struct *work)
static irqreturn_t gpio_vbus_irq(int irq, void *data)
{
struct platform_device *pdev = data;
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
struct usb_otg *otg = gpio_vbus->phy.otg;
dev_dbg(&pdev->dev, "VBUS %s (gadget: %s)\n",
- is_vbus_powered(pdata) ? "supplied" : "inactive",
+ is_vbus_powered(gpio_vbus) ? "supplied" : "inactive",
otg->gadget ? otg->gadget->name : "none");
if (otg->gadget)
@@ -175,22 +167,18 @@ static int gpio_vbus_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct gpio_vbus_data *gpio_vbus;
- struct gpio_vbus_mach_info *pdata;
struct platform_device *pdev;
- int gpio;
gpio_vbus = container_of(otg->usb_phy, struct gpio_vbus_data, phy);
pdev = to_platform_device(gpio_vbus->dev);
- pdata = dev_get_platdata(gpio_vbus->dev);
- gpio = pdata->gpio_pullup;
if (!gadget) {
dev_dbg(&pdev->dev, "unregistering gadget '%s'\n",
otg->gadget->name);
/* optionally disable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
set_vbus_draw(gpio_vbus, 0);
@@ -242,16 +230,12 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
static int gpio_vbus_probe(struct platform_device *pdev)
{
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct gpio_vbus_data *gpio_vbus;
struct resource *res;
- int err, gpio, irq;
+ struct device *dev = &pdev->dev;
+ int err, irq;
unsigned long irqflags;
- if (!pdata || !gpio_is_valid(pdata->gpio_vbus))
- return -EINVAL;
- gpio = pdata->gpio_vbus;
-
gpio_vbus = devm_kzalloc(&pdev->dev, sizeof(struct gpio_vbus_data),
GFP_KERNEL);
if (!gpio_vbus)
@@ -273,37 +257,43 @@ static int gpio_vbus_probe(struct platform_device *pdev)
gpio_vbus->phy.otg->usb_phy = &gpio_vbus->phy;
gpio_vbus->phy.otg->set_peripheral = gpio_vbus_set_peripheral;
- err = devm_gpio_request(&pdev->dev, gpio, "vbus_detect");
- if (err) {
- dev_err(&pdev->dev, "can't request vbus gpio %d, err: %d\n",
- gpio, err);
+ /* Look up the VBUS sensing GPIO */
+ gpio_vbus->vbus_gpiod = devm_gpiod_get(dev, "vbus", GPIOD_IN);
+ if (IS_ERR(gpio_vbus->vbus_gpiod)) {
+ err = PTR_ERR(gpio_vbus->vbus_gpiod);
+ dev_err(&pdev->dev, "can't request vbus gpio, err: %d\n", err);
return err;
}
- gpio_direction_input(gpio);
+ gpiod_set_consumer_name(gpio_vbus->vbus_gpiod, "vbus_detect");
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res) {
irq = res->start;
irqflags = (res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
} else {
- irq = gpio_to_irq(gpio);
+ irq = gpiod_to_irq(gpio_vbus->vbus_gpiod);
irqflags = VBUS_IRQ_FLAGS;
}
gpio_vbus->irq = irq;
- /* if data line pullup is in use, initialize it to "not pulling up" */
- gpio = pdata->gpio_pullup;
- if (gpio_is_valid(gpio)) {
- err = devm_gpio_request(&pdev->dev, gpio, "udc_pullup");
- if (err) {
- dev_err(&pdev->dev,
- "can't request pullup gpio %d, err: %d\n",
- gpio, err);
- return err;
- }
- gpio_direction_output(gpio, pdata->gpio_pullup_inverted);
+ /*
+ * The VBUS sensing GPIO should have a pulldown, which will normally be
+ * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
+ * value the GPIO detects as active. Some systems will use comparators.
+ * Get the optional D+ or D- pullup GPIO. If the data line pullup is
+ * in use, initialize it to "not pulling up"
+ */
+ gpio_vbus->pullup_gpiod = devm_gpiod_get_optional(dev, "pullup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio_vbus->pullup_gpiod)) {
+ err = PTR_ERR(gpio_vbus->pullup_gpiod);
+ dev_err(&pdev->dev, "can't request pullup gpio, err: %d\n",
+ err);
+ return err;
}
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_consumer_name(gpio_vbus->pullup_gpiod, "udc_pullup");
err = devm_request_irq(&pdev->dev, irq, gpio_vbus_irq, irqflags,
"vbus_detect", pdev);
@@ -330,7 +320,7 @@ static int gpio_vbus_probe(struct platform_device *pdev)
return err;
}
- device_init_wakeup(&pdev->dev, pdata->wakeup);
+ /* TODO: wakeup could be enabled here with device_init_wakeup(dev, 1) */
return 0;
}
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 110e6e9ad621..9c226b57153b 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -76,7 +76,7 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
- ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen, NULL);
+ ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen);
if (ret)
return ret;
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index ea7ef1dc0b42..037e8eee737d 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -9,54 +9,56 @@
* Venu Byravarasu <vbyravarasu@nvidia.com>
*/
-#include <linux/resource.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/iopoll.h>
#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/ulpi.h>
-#include <linux/usb/of.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/regulator/consumer.h>
+
#include <linux/usb/ehci_def.h>
+#include <linux/usb/of.h>
#include <linux/usb/tegra_usb_phy.h>
-#include <linux/regulator/consumer.h>
+#include <linux/usb/ulpi.h>
-#define ULPI_VIEWPORT 0x170
+#define ULPI_VIEWPORT 0x170
/* PORTSC PTS/PHCD bits, Tegra20 only */
-#define TEGRA_USB_PORTSC1 0x184
-#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
-#define TEGRA_USB_PORTSC1_PHCD (1 << 23)
+#define TEGRA_USB_PORTSC1 0x184
+#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
+#define TEGRA_USB_PORTSC1_PHCD BIT(23)
/* HOSTPC1 PTS/PHCD bits, Tegra30 and above */
-#define TEGRA_USB_HOSTPC1_DEVLC 0x1b4
-#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
-#define TEGRA_USB_HOSTPC1_DEVLC_PHCD (1 << 22)
+#define TEGRA_USB_HOSTPC1_DEVLC 0x1b4
+#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
+#define TEGRA_USB_HOSTPC1_DEVLC_PHCD BIT(22)
/* Bits of PORTSC1, which will get cleared by writing 1 into them */
#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
-#define USB_SUSP_CTRL 0x400
-#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
-#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
-#define USB_SUSP_CLR (1 << 5)
-#define USB_PHY_CLK_VALID (1 << 7)
-#define UTMIP_RESET (1 << 11)
-#define UHSIC_RESET (1 << 11)
-#define UTMIP_PHY_ENABLE (1 << 12)
-#define ULPI_PHY_ENABLE (1 << 13)
-#define USB_SUSP_SET (1 << 14)
-#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
-
-#define USB1_LEGACY_CTRL 0x410
-#define USB1_NO_LEGACY_MODE (1 << 0)
+#define USB_SUSP_CTRL 0x400
+#define USB_WAKE_ON_CNNT_EN_DEV BIT(3)
+#define USB_WAKE_ON_DISCON_EN_DEV BIT(4)
+#define USB_SUSP_CLR BIT(5)
+#define USB_PHY_CLK_VALID BIT(7)
+#define UTMIP_RESET BIT(11)
+#define UHSIC_RESET BIT(11)
+#define UTMIP_PHY_ENABLE BIT(12)
+#define ULPI_PHY_ENABLE BIT(13)
+#define USB_SUSP_SET BIT(14)
+#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
+
+#define USB1_LEGACY_CTRL 0x410
+#define USB1_NO_LEGACY_MODE BIT(0)
#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
#define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1)
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \
@@ -64,94 +66,94 @@
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1)
#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1)
-#define ULPI_TIMING_CTRL_0 0x424
-#define ULPI_OUTPUT_PINMUX_BYP (1 << 10)
-#define ULPI_CLKOUT_PINMUX_BYP (1 << 11)
+#define ULPI_TIMING_CTRL_0 0x424
+#define ULPI_OUTPUT_PINMUX_BYP BIT(10)
+#define ULPI_CLKOUT_PINMUX_BYP BIT(11)
-#define ULPI_TIMING_CTRL_1 0x428
-#define ULPI_DATA_TRIMMER_LOAD (1 << 0)
-#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
-#define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16)
-#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
-#define ULPI_DIR_TRIMMER_LOAD (1 << 24)
-#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
+#define ULPI_TIMING_CTRL_1 0x428
+#define ULPI_DATA_TRIMMER_LOAD BIT(0)
+#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
+#define ULPI_STPDIRNXT_TRIMMER_LOAD BIT(16)
+#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
+#define ULPI_DIR_TRIMMER_LOAD BIT(24)
+#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
-#define UTMIP_PLL_CFG1 0x804
+#define UTMIP_PLL_CFG1 0x804
#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
#define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
-#define UTMIP_XCVR_CFG0 0x808
+#define UTMIP_XCVR_CFG0 0x808
#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0)
#define UTMIP_XCVR_SETUP_MSB(x) ((((x) & 0x70) >> 4) << 22)
#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8)
#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10)
-#define UTMIP_FORCE_PD_POWERDOWN (1 << 14)
-#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16)
-#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18)
-#define UTMIP_XCVR_LSBIAS_SEL (1 << 21)
+#define UTMIP_FORCE_PD_POWERDOWN BIT(14)
+#define UTMIP_FORCE_PD2_POWERDOWN BIT(16)
+#define UTMIP_FORCE_PDZI_POWERDOWN BIT(18)
+#define UTMIP_XCVR_LSBIAS_SEL BIT(21)
#define UTMIP_XCVR_HSSLEW(x) (((x) & 0x3) << 4)
#define UTMIP_XCVR_HSSLEW_MSB(x) ((((x) & 0x1fc) >> 2) << 25)
-#define UTMIP_BIAS_CFG0 0x80c
-#define UTMIP_OTGPD (1 << 11)
-#define UTMIP_BIASPD (1 << 10)
-#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
-#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
-#define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24)
+#define UTMIP_BIAS_CFG0 0x80c
+#define UTMIP_OTGPD BIT(11)
+#define UTMIP_BIASPD BIT(10)
+#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
+#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
+#define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24)
-#define UTMIP_HSRX_CFG0 0x810
-#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
-#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
+#define UTMIP_HSRX_CFG0 0x810
+#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
+#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
-#define UTMIP_HSRX_CFG1 0x814
-#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
+#define UTMIP_HSRX_CFG1 0x814
+#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
-#define UTMIP_TX_CFG0 0x820
-#define UTMIP_FS_PREABMLE_J (1 << 19)
-#define UTMIP_HS_DISCON_DISABLE (1 << 8)
+#define UTMIP_TX_CFG0 0x820
+#define UTMIP_FS_PREABMLE_J BIT(19)
+#define UTMIP_HS_DISCON_DISABLE BIT(8)
-#define UTMIP_MISC_CFG0 0x824
-#define UTMIP_DPDM_OBSERVE (1 << 26)
-#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
-#define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22)
+#define UTMIP_MISC_CFG0 0x824
+#define UTMIP_DPDM_OBSERVE BIT(26)
+#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
+#define UTMIP_SUSPEND_EXIT_ON_EDGE BIT(22)
-#define UTMIP_MISC_CFG1 0x828
-#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
-#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
+#define UTMIP_MISC_CFG1 0x828
+#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
+#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
-#define UTMIP_DEBOUNCE_CFG0 0x82c
-#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
+#define UTMIP_DEBOUNCE_CFG0 0x82c
+#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
-#define UTMIP_BAT_CHRG_CFG0 0x830
-#define UTMIP_PD_CHRG (1 << 0)
+#define UTMIP_BAT_CHRG_CFG0 0x830
+#define UTMIP_PD_CHRG BIT(0)
-#define UTMIP_SPARE_CFG0 0x834
-#define FUSE_SETUP_SEL (1 << 3)
+#define UTMIP_SPARE_CFG0 0x834
+#define FUSE_SETUP_SEL BIT(3)
-#define UTMIP_XCVR_CFG1 0x838
-#define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0)
-#define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2)
-#define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4)
-#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
+#define UTMIP_XCVR_CFG1 0x838
+#define UTMIP_FORCE_PDDISC_POWERDOWN BIT(0)
+#define UTMIP_FORCE_PDCHRP_POWERDOWN BIT(2)
+#define UTMIP_FORCE_PDDR_POWERDOWN BIT(4)
+#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
-#define UTMIP_BIAS_CFG1 0x83c
-#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
+#define UTMIP_BIAS_CFG1 0x83c
+#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
/* For Tegra30 and above only, the address is different in Tegra20 */
-#define USB_USBMODE 0x1f8
-#define USB_USBMODE_MASK (3 << 0)
-#define USB_USBMODE_HOST (3 << 0)
-#define USB_USBMODE_DEVICE (2 << 0)
+#define USB_USBMODE 0x1f8
+#define USB_USBMODE_MASK (3 << 0)
+#define USB_USBMODE_HOST (3 << 0)
+#define USB_USBMODE_DEVICE (2 << 0)
static DEFINE_SPINLOCK(utmip_pad_lock);
-static int utmip_pad_count;
+static unsigned int utmip_pad_count;
struct tegra_xtal_freq {
- int freq;
+ unsigned int freq;
u8 enable_delay;
u8 stable_count;
u8 active_delay;
@@ -194,43 +196,49 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
},
};
+static inline struct tegra_usb_phy *to_tegra_usb_phy(struct usb_phy *u_phy)
+{
+ return container_of(u_phy, struct tegra_usb_phy, u_phy);
+}
+
static void set_pts(struct tegra_usb_phy *phy, u8 pts_val)
{
void __iomem *base = phy->regs;
- unsigned long val;
+ u32 val;
if (phy->soc_config->has_hostpc) {
- val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+ val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
val &= ~TEGRA_USB_HOSTPC1_DEVLC_PTS(~0);
val |= TEGRA_USB_HOSTPC1_DEVLC_PTS(pts_val);
- writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+ writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
} else {
- val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+ val = readl_relaxed(base + TEGRA_USB_PORTSC1);
+ val &= ~TEGRA_PORTSC1_RWC_BITS;
val &= ~TEGRA_USB_PORTSC1_PTS(~0);
val |= TEGRA_USB_PORTSC1_PTS(pts_val);
- writel(val, base + TEGRA_USB_PORTSC1);
+ writel_relaxed(val, base + TEGRA_USB_PORTSC1);
}
}
static void set_phcd(struct tegra_usb_phy *phy, bool enable)
{
void __iomem *base = phy->regs;
- unsigned long val;
+ u32 val;
if (phy->soc_config->has_hostpc) {
- val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+ val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
if (enable)
val |= TEGRA_USB_HOSTPC1_DEVLC_PHCD;
else
val &= ~TEGRA_USB_HOSTPC1_DEVLC_PHCD;
- writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+ writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
} else {
- val = readl(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
+ val = readl_relaxed(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
if (enable)
val |= TEGRA_USB_PORTSC1_PHCD;
else
val &= ~TEGRA_USB_PORTSC1_PHCD;
- writel(val, base + TEGRA_USB_PORTSC1);
+ writel_relaxed(val, base + TEGRA_USB_PORTSC1);
}
}
@@ -238,23 +246,6 @@ static int utmip_pad_open(struct tegra_usb_phy *phy)
{
int ret;
- phy->pad_clk = devm_clk_get(phy->u_phy.dev, "utmi-pads");
- if (IS_ERR(phy->pad_clk)) {
- ret = PTR_ERR(phy->pad_clk);
- dev_err(phy->u_phy.dev,
- "Failed to get UTMIP pad clock: %d\n", ret);
- return ret;
- }
-
- phy->pad_rst = devm_reset_control_get_optional_shared(
- phy->u_phy.dev, "utmi-pads");
- if (IS_ERR(phy->pad_rst)) {
- ret = PTR_ERR(phy->pad_rst);
- dev_err(phy->u_phy.dev,
- "Failed to get UTMI-pads reset: %d\n", ret);
- return ret;
- }
-
ret = clk_prepare_enable(phy->pad_clk);
if (ret) {
dev_err(phy->u_phy.dev,
@@ -315,18 +306,21 @@ static int utmip_pad_close(struct tegra_usb_phy *phy)
return ret;
}
-static void utmip_pad_power_on(struct tegra_usb_phy *phy)
+static int utmip_pad_power_on(struct tegra_usb_phy *phy)
{
- unsigned long val, flags;
- void __iomem *base = phy->pad_regs;
struct tegra_utmip_config *config = phy->config;
+ void __iomem *base = phy->pad_regs;
+ u32 val;
+ int err;
- clk_prepare_enable(phy->pad_clk);
+ err = clk_prepare_enable(phy->pad_clk);
+ if (err)
+ return err;
- spin_lock_irqsave(&utmip_pad_lock, flags);
+ spin_lock(&utmip_pad_lock);
if (utmip_pad_count++ == 0) {
- val = readl(base + UTMIP_BIAS_CFG0);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
if (phy->soc_config->requires_extra_tuning_parameters) {
@@ -338,53 +332,59 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_HSDISCON_LEVEL(config->hsdiscon_level);
val |= UTMIP_HSDISCON_LEVEL_MSB(config->hsdiscon_level);
}
- writel(val, base + UTMIP_BIAS_CFG0);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
- spin_unlock_irqrestore(&utmip_pad_lock, flags);
+ spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
+
+ return 0;
}
static int utmip_pad_power_off(struct tegra_usb_phy *phy)
{
- unsigned long val, flags;
void __iomem *base = phy->pad_regs;
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(phy->pad_clk);
+ if (ret)
+ return ret;
+
+ spin_lock(&utmip_pad_lock);
if (!utmip_pad_count) {
dev_err(phy->u_phy.dev, "UTMIP pad already powered off\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto ulock;
}
- clk_prepare_enable(phy->pad_clk);
-
- spin_lock_irqsave(&utmip_pad_lock, flags);
-
if (--utmip_pad_count == 0) {
- val = readl(base + UTMIP_BIAS_CFG0);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val |= UTMIP_OTGPD | UTMIP_BIASPD;
- writel(val, base + UTMIP_BIAS_CFG0);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
-
- spin_unlock_irqrestore(&utmip_pad_lock, flags);
+ulock:
+ spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
- return 0;
+ return ret;
}
static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
{
u32 tmp;
- return readl_poll_timeout(reg, tmp, (tmp & mask) == result,
- 2000, 6000);
+ return readl_relaxed_poll_timeout(reg, tmp, (tmp & mask) == result,
+ 2000, 6000);
}
static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
/*
* The USB driver may have already initiated the phy clock
@@ -395,27 +395,28 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
return;
if (phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- udelay(10);
+ usleep_range(10, 100);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
- } else
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ } else {
set_phcd(phy, true);
+ }
- if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0))
dev_err(phy->u_phy.dev,
"Timeout waiting for PHY to stabilize on disable\n");
}
static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
/*
* The USB driver may have already initiated the phy clock
@@ -427,97 +428,101 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
return;
if (phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- udelay(10);
+ usleep_range(10, 100);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
- } else
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ } else {
set_phcd(phy, false);
+ }
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
- USB_PHY_CLK_VALID))
+ USB_PHY_CLK_VALID))
dev_err(phy->u_phy.dev,
"Timeout waiting for PHY to stabilize on enable\n");
}
static int utmi_phy_power_on(struct tegra_usb_phy *phy)
{
- unsigned long val;
- void __iomem *base = phy->regs;
struct tegra_utmip_config *config = phy->config;
+ void __iomem *base = phy->regs;
+ u32 val;
+ int err;
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->is_legacy_phy) {
- val = readl(base + USB1_LEGACY_CTRL);
+ val = readl_relaxed(base + USB1_LEGACY_CTRL);
val |= USB1_NO_LEGACY_MODE;
- writel(val, base + USB1_LEGACY_CTRL);
+ writel_relaxed(val, base + USB1_LEGACY_CTRL);
}
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val |= UTMIP_FS_PREABMLE_J;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
- val = readl(base + UTMIP_HSRX_CFG0);
+ val = readl_relaxed(base + UTMIP_HSRX_CFG0);
val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0));
val |= UTMIP_IDLE_WAIT(config->idle_wait_delay);
val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit);
- writel(val, base + UTMIP_HSRX_CFG0);
+ writel_relaxed(val, base + UTMIP_HSRX_CFG0);
- val = readl(base + UTMIP_HSRX_CFG1);
+ val = readl_relaxed(base + UTMIP_HSRX_CFG1);
val &= ~UTMIP_HS_SYNC_START_DLY(~0);
val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay);
- writel(val, base + UTMIP_HSRX_CFG1);
+ writel_relaxed(val, base + UTMIP_HSRX_CFG1);
- val = readl(base + UTMIP_DEBOUNCE_CFG0);
+ val = readl_relaxed(base + UTMIP_DEBOUNCE_CFG0);
val &= ~UTMIP_BIAS_DEBOUNCE_A(~0);
val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce);
- writel(val, base + UTMIP_DEBOUNCE_CFG0);
+ writel_relaxed(val, base + UTMIP_DEBOUNCE_CFG0);
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
- writel(val, base + UTMIP_MISC_CFG0);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
if (!phy->soc_config->utmi_pll_config_in_car_module) {
- val = readl(base + UTMIP_MISC_CFG1);
+ val = readl_relaxed(base + UTMIP_MISC_CFG1);
val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) |
UTMIP_PLLU_STABLE_COUNT(~0));
val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
- writel(val, base + UTMIP_MISC_CFG1);
+ writel_relaxed(val, base + UTMIP_MISC_CFG1);
- val = readl(base + UTMIP_PLL_CFG1);
+ val = readl_relaxed(base + UTMIP_PLL_CFG1);
val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) |
UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
- writel(val, base + UTMIP_PLL_CFG1);
+ writel_relaxed(val, base + UTMIP_PLL_CFG1);
}
if (phy->mode == USB_DR_MODE_PERIPHERAL) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val &= ~UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
} else {
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
}
- utmip_pad_power_on(phy);
+ err = utmip_pad_power_on(phy);
+ if (err)
+ return err;
- val = readl(base + UTMIP_XCVR_CFG0);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG0);
val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_LSBIAS_SEL |
UTMIP_XCVR_SETUP(~0) | UTMIP_XCVR_SETUP_MSB(~0) |
@@ -535,57 +540,57 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_XCVR_HSSLEW(config->xcvr_hsslew);
val |= UTMIP_XCVR_HSSLEW_MSB(config->xcvr_hsslew);
}
- writel(val, base + UTMIP_XCVR_CFG0);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG0);
- val = readl(base + UTMIP_XCVR_CFG1);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0));
val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj);
- writel(val, base + UTMIP_XCVR_CFG1);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG1);
- val = readl(base + UTMIP_BIAS_CFG1);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG1);
val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
- writel(val, base + UTMIP_BIAS_CFG1);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG1);
- val = readl(base + UTMIP_SPARE_CFG0);
+ val = readl_relaxed(base + UTMIP_SPARE_CFG0);
if (config->xcvr_setup_use_fuses)
val |= FUSE_SETUP_SEL;
else
val &= ~FUSE_SETUP_SEL;
- writel(val, base + UTMIP_SPARE_CFG0);
+ writel_relaxed(val, base + UTMIP_SPARE_CFG0);
if (!phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->is_legacy_phy) {
- val = readl(base + USB1_LEGACY_CTRL);
+ val = readl_relaxed(base + USB1_LEGACY_CTRL);
val &= ~USB1_VBUS_SENSE_CTL_MASK;
val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
- writel(val, base + USB1_LEGACY_CTRL);
+ writel_relaxed(val, base + USB1_LEGACY_CTRL);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
utmi_phy_clk_enable(phy);
if (phy->soc_config->requires_usbmode_setup) {
- val = readl(base + USB_USBMODE);
+ val = readl_relaxed(base + USB_USBMODE);
val &= ~USB_USBMODE_MASK;
if (phy->mode == USB_DR_MODE_HOST)
val |= USB_USBMODE_HOST;
else
val |= USB_USBMODE_DEVICE;
- writel(val, base + USB_USBMODE);
+ writel_relaxed(val, base + USB_USBMODE);
}
if (!phy->is_legacy_phy)
@@ -596,258 +601,252 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
static int utmi_phy_power_off(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
utmi_phy_clk_disable(phy);
if (phy->mode == USB_DR_MODE_PERIPHERAL) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
- val = readl(base + UTMIP_XCVR_CFG0);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG0);
val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN;
- writel(val, base + UTMIP_XCVR_CFG0);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG0);
- val = readl(base + UTMIP_XCVR_CFG1);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN;
- writel(val, base + UTMIP_XCVR_CFG1);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG1);
return utmip_pad_power_off(phy);
}
static void utmi_phy_preresume(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val |= UTMIP_HS_DISCON_DISABLE;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_postresume(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val &= ~UTMIP_HS_DISCON_DISABLE;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
val |= UTMIP_DPDM_OBSERVE_SEL_FS_K;
else
val |= UTMIP_DPDM_OBSERVE_SEL_FS_J;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(1);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(1, 10);
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val |= UTMIP_DPDM_OBSERVE;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(10);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(10, 100);
}
static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(10);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(10, 100);
}
static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
{
- int ret;
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
+ int err;
- ret = gpio_direction_output(phy->reset_gpio, 0);
- if (ret < 0) {
- dev_err(phy->u_phy.dev, "GPIO %d not set to 0: %d\n",
- phy->reset_gpio, ret);
- return ret;
- }
- msleep(5);
- ret = gpio_direction_output(phy->reset_gpio, 1);
- if (ret < 0) {
- dev_err(phy->u_phy.dev, "GPIO %d not set to 1: %d\n",
- phy->reset_gpio, ret);
- return ret;
- }
+ gpiod_set_value_cansleep(phy->reset_gpio, 1);
+
+ err = clk_prepare_enable(phy->clk);
+ if (err)
+ return err;
+
+ usleep_range(5000, 6000);
+
+ gpiod_set_value_cansleep(phy->reset_gpio, 0);
- clk_prepare_enable(phy->clk);
- msleep(1);
+ usleep_range(1000, 2000);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UHSIC_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + ULPI_TIMING_CTRL_0);
+ val = readl_relaxed(base + ULPI_TIMING_CTRL_0);
val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
- writel(val, base + ULPI_TIMING_CTRL_0);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_0);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= ULPI_PHY_ENABLE;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
val = 0;
- writel(val, base + ULPI_TIMING_CTRL_1);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
val |= ULPI_DATA_TRIMMER_SEL(4);
val |= ULPI_STPDIRNXT_TRIMMER_SEL(4);
val |= ULPI_DIR_TRIMMER_SEL(4);
- writel(val, base + ULPI_TIMING_CTRL_1);
- udelay(10);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
+ usleep_range(10, 100);
val |= ULPI_DATA_TRIMMER_LOAD;
val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
val |= ULPI_DIR_TRIMMER_LOAD;
- writel(val, base + ULPI_TIMING_CTRL_1);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
/* Fix VbusInvalid due to floating VBUS */
- ret = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
- if (ret) {
- dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", ret);
- return ret;
+ err = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
+ if (err) {
+ dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
+ goto disable_clk;
}
- ret = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
- if (ret) {
- dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", ret);
- return ret;
+ err = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
+ if (err) {
+ dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
+ goto disable_clk;
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
- udelay(100);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ usleep_range(100, 1000);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
return 0;
-}
-static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
-{
- clk_disable(phy->clk);
- return gpio_direction_output(phy->reset_gpio, 0);
+disable_clk:
+ clk_disable_unprepare(phy->clk);
+
+ return err;
}
-static void tegra_usb_phy_close(struct tegra_usb_phy *phy)
+static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
{
- if (!IS_ERR(phy->vbus))
- regulator_disable(phy->vbus);
+ gpiod_set_value_cansleep(phy->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ clk_disable_unprepare(phy->clk);
- if (!phy->is_ulpi_phy)
- utmip_pad_close(phy);
-
- clk_disable_unprepare(phy->pll_u);
+ return 0;
}
static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
{
+ int err;
+
+ if (phy->powered_on)
+ return 0;
+
if (phy->is_ulpi_phy)
- return ulpi_phy_power_on(phy);
+ err = ulpi_phy_power_on(phy);
else
- return utmi_phy_power_on(phy);
+ err = utmi_phy_power_on(phy);
+ if (err)
+ return err;
+
+ phy->powered_on = true;
+
+ return 0;
}
static int tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
{
+ int err;
+
+ if (!phy->powered_on)
+ return 0;
+
if (phy->is_ulpi_phy)
- return ulpi_phy_power_off(phy);
+ err = ulpi_phy_power_off(phy);
else
- return utmi_phy_power_off(phy);
-}
+ err = utmi_phy_power_off(phy);
+ if (err)
+ return err;
-static int tegra_usb_phy_suspend(struct usb_phy *x, int suspend)
-{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
- if (suspend)
- return tegra_usb_phy_power_off(phy);
- else
- return tegra_usb_phy_power_on(phy);
+ phy->powered_on = false;
+
+ return 0;
}
-static int ulpi_open(struct tegra_usb_phy *phy)
+static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
{
- int err;
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
- phy->clk = devm_clk_get(phy->u_phy.dev, "ulpi-link");
- if (IS_ERR(phy->clk)) {
- err = PTR_ERR(phy->clk);
- dev_err(phy->u_phy.dev, "Failed to get ULPI clock: %d\n", err);
- return err;
- }
+ if (WARN_ON(!phy->freq))
+ return;
- err = devm_gpio_request(phy->u_phy.dev, phy->reset_gpio,
- "ulpi_phy_reset_b");
- if (err < 0) {
- dev_err(phy->u_phy.dev, "Request failed for GPIO %d: %d\n",
- phy->reset_gpio, err);
- return err;
- }
+ tegra_usb_phy_power_off(phy);
- err = gpio_direction_output(phy->reset_gpio, 0);
- if (err < 0) {
- dev_err(phy->u_phy.dev,
- "GPIO %d direction not set to output: %d\n",
- phy->reset_gpio, err);
- return err;
- }
+ if (!phy->is_ulpi_phy)
+ utmip_pad_close(phy);
- phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
- if (!phy->ulpi) {
- dev_err(phy->u_phy.dev, "Failed to create ULPI OTG\n");
- err = -ENOMEM;
- return err;
- }
+ regulator_disable(phy->vbus);
+ clk_disable_unprepare(phy->pll_u);
- phy->ulpi->io_priv = phy->regs + ULPI_VIEWPORT;
- return 0;
+ phy->freq = NULL;
}
-static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
+static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend)
{
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+
+ if (WARN_ON(!phy->freq))
+ return -EINVAL;
+
+ if (suspend)
+ return tegra_usb_phy_power_off(phy);
+ else
+ return tegra_usb_phy_power_on(phy);
+}
+
+static int tegra_usb_phy_init(struct usb_phy *u_phy)
+{
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
unsigned long parent_rate;
- int i;
+ unsigned int i;
int err;
- phy->pll_u = devm_clk_get(phy->u_phy.dev, "pll_u");
- if (IS_ERR(phy->pll_u)) {
- err = PTR_ERR(phy->pll_u);
- dev_err(phy->u_phy.dev,
- "Failed to get pll_u clock: %d\n", err);
- return err;
- }
+ if (WARN_ON(phy->freq))
+ return 0;
err = clk_prepare_enable(phy->pll_u);
if (err)
@@ -864,64 +863,74 @@ static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
dev_err(phy->u_phy.dev, "Invalid pll_u parent rate %ld\n",
parent_rate);
err = -EINVAL;
- goto fail;
+ goto disable_clk;
}
- if (!IS_ERR(phy->vbus)) {
- err = regulator_enable(phy->vbus);
- if (err) {
- dev_err(phy->u_phy.dev,
- "Failed to enable USB VBUS regulator: %d\n",
- err);
- goto fail;
- }
+ err = regulator_enable(phy->vbus);
+ if (err) {
+ dev_err(phy->u_phy.dev,
+ "Failed to enable USB VBUS regulator: %d\n", err);
+ goto disable_clk;
}
- if (phy->is_ulpi_phy)
- err = ulpi_open(phy);
- else
+ if (!phy->is_ulpi_phy) {
err = utmip_pad_open(phy);
- if (err < 0)
- goto fail;
+ if (err)
+ goto disable_vbus;
+ }
+
+ err = tegra_usb_phy_power_on(phy);
+ if (err)
+ goto close_phy;
return 0;
-fail:
+close_phy:
+ if (!phy->is_ulpi_phy)
+ utmip_pad_close(phy);
+
+disable_vbus:
+ regulator_disable(phy->vbus);
+
+disable_clk:
clk_disable_unprepare(phy->pll_u);
+
+ phy->freq = NULL;
+
return err;
}
-void tegra_usb_phy_preresume(struct usb_phy *x)
+void tegra_usb_phy_preresume(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_preresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_preresume);
-void tegra_usb_phy_postresume(struct usb_phy *x)
+void tegra_usb_phy_postresume(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_postresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_postresume);
-void tegra_ehci_phy_restore_start(struct usb_phy *x,
- enum tegra_usb_phy_port_speed port_speed)
+void tegra_ehci_phy_restore_start(struct usb_phy *u_phy,
+ enum tegra_usb_phy_port_speed port_speed)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_restore_start(phy, port_speed);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_start);
-void tegra_ehci_phy_restore_end(struct usb_phy *x)
+void tegra_ehci_phy_restore_end(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_restore_end(phy);
@@ -932,21 +941,25 @@ static int read_utmi_param(struct platform_device *pdev, const char *param,
u8 *dest)
{
u32 value;
- int err = of_property_read_u32(pdev->dev.of_node, param, &value);
- *dest = (u8)value;
- if (err < 0)
+ int err;
+
+ err = of_property_read_u32(pdev->dev.of_node, param, &value);
+ if (err)
dev_err(&pdev->dev,
"Failed to read USB UTMI parameter %s: %d\n",
param, err);
+ else
+ *dest = value;
+
return err;
}
static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
struct platform_device *pdev)
{
+ struct tegra_utmip_config *config;
struct resource *res;
int err;
- struct tegra_utmip_config *config;
tegra_phy->is_ulpi_phy = false;
@@ -957,7 +970,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
}
tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ resource_size(res));
if (!tegra_phy->pad_regs) {
dev_err(&pdev->dev, "Failed to remap UTMI pad regs\n");
return -ENOMEM;
@@ -971,49 +984,49 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
config = tegra_phy->config;
err = read_utmi_param(pdev, "nvidia,hssync-start-delay",
- &config->hssync_start_delay);
- if (err < 0)
+ &config->hssync_start_delay);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,elastic-limit",
- &config->elastic_limit);
- if (err < 0)
+ &config->elastic_limit);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,idle-wait-delay",
- &config->idle_wait_delay);
- if (err < 0)
+ &config->idle_wait_delay);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,term-range-adj",
- &config->term_range_adj);
- if (err < 0)
+ &config->term_range_adj);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,xcvr-lsfslew",
- &config->xcvr_lsfslew);
- if (err < 0)
+ &config->xcvr_lsfslew);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,xcvr-lsrslew",
- &config->xcvr_lsrslew);
- if (err < 0)
+ &config->xcvr_lsrslew);
+ if (err)
return err;
if (tegra_phy->soc_config->requires_extra_tuning_parameters) {
err = read_utmi_param(pdev, "nvidia,xcvr-hsslew",
- &config->xcvr_hsslew);
- if (err < 0)
+ &config->xcvr_hsslew);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,hssquelch-level",
- &config->hssquelch_level);
- if (err < 0)
+ &config->hssquelch_level);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,hsdiscon-level",
- &config->hsdiscon_level);
- if (err < 0)
+ &config->hsdiscon_level);
+ if (err)
return err;
}
@@ -1022,8 +1035,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
if (!config->xcvr_setup_use_fuses) {
err = read_utmi_param(pdev, "nvidia,xcvr-setup",
- &config->xcvr_setup);
- if (err < 0)
+ &config->xcvr_setup);
+ if (err)
return err;
}
@@ -1053,23 +1066,20 @@ MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table);
static int tegra_usb_phy_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- struct resource *res;
- struct tegra_usb_phy *tegra_phy = NULL;
struct device_node *np = pdev->dev.of_node;
+ struct tegra_usb_phy *tegra_phy;
enum usb_phy_interface phy_type;
+ struct reset_control *reset;
+ struct gpio_desc *gpiod;
+ struct resource *res;
+ struct usb_phy *phy;
int err;
tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL);
if (!tegra_phy)
return -ENOMEM;
- match = of_match_device(tegra_usb_phy_id_table, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- tegra_phy->soc_config = match->data;
+ tegra_phy->soc_config = of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -1078,7 +1088,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
}
tegra_phy->regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ resource_size(res));
if (!tegra_phy->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
return -ENOMEM;
@@ -1087,25 +1097,86 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
tegra_phy->is_legacy_phy =
of_property_read_bool(np, "nvidia,has-legacy-mode");
+ if (of_find_property(np, "dr_mode", NULL))
+ tegra_phy->mode = usb_get_dr_mode(&pdev->dev);
+ else
+ tegra_phy->mode = USB_DR_MODE_HOST;
+
+ if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) {
+ dev_err(&pdev->dev, "dr_mode is invalid\n");
+ return -EINVAL;
+ }
+
+ /* On some boards, the VBUS regulator doesn't need to be controlled */
+ tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
+ if (IS_ERR(tegra_phy->vbus))
+ return PTR_ERR(tegra_phy->vbus);
+
+ tegra_phy->pll_u = devm_clk_get(&pdev->dev, "pll_u");
+ err = PTR_ERR_OR_ZERO(tegra_phy->pll_u);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get pll_u clock: %d\n", err);
+ return err;
+ }
+
phy_type = of_usb_get_phy_mode(np);
switch (phy_type) {
case USBPHY_INTERFACE_MODE_UTMI:
err = utmi_phy_probe(tegra_phy, pdev);
- if (err < 0)
+ if (err)
return err;
+
+ tegra_phy->pad_clk = devm_clk_get(&pdev->dev, "utmi-pads");
+ err = PTR_ERR_OR_ZERO(tegra_phy->pad_clk);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get UTMIP pad clock: %d\n", err);
+ return err;
+ }
+
+ reset = devm_reset_control_get_optional_shared(&pdev->dev,
+ "utmi-pads");
+ err = PTR_ERR_OR_ZERO(reset);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get UTMI-pads reset: %d\n", err);
+ return err;
+ }
+ tegra_phy->pad_rst = reset;
break;
case USBPHY_INTERFACE_MODE_ULPI:
tegra_phy->is_ulpi_phy = true;
- tegra_phy->reset_gpio =
- of_get_named_gpio(np, "nvidia,phy-reset-gpio", 0);
- if (!gpio_is_valid(tegra_phy->reset_gpio)) {
+ tegra_phy->clk = devm_clk_get(&pdev->dev, "ulpi-link");
+ err = PTR_ERR_OR_ZERO(tegra_phy->clk);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get ULPI clock: %d\n", err);
+ return err;
+ }
+
+ gpiod = devm_gpiod_get_from_of_node(&pdev->dev, np,
+ "nvidia,phy-reset-gpio",
+ 0, GPIOD_OUT_HIGH,
+ "ulpi_phy_reset_b");
+ err = PTR_ERR_OR_ZERO(gpiod);
+ if (err) {
dev_err(&pdev->dev,
- "Invalid GPIO: %d\n", tegra_phy->reset_gpio);
- return tegra_phy->reset_gpio;
+ "Request failed for reset GPIO: %d\n", err);
+ return err;
+ }
+ tegra_phy->reset_gpio = gpiod;
+
+ phy = devm_otg_ulpi_create(&pdev->dev,
+ &ulpi_viewport_access_ops, 0);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to create ULPI OTG\n");
+ return -ENOMEM;
}
- tegra_phy->config = NULL;
+
+ tegra_phy->ulpi = phy;
+ tegra_phy->ulpi->io_priv = tegra_phy->regs + ULPI_VIEWPORT;
break;
default:
@@ -1114,40 +1185,16 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (of_find_property(np, "dr_mode", NULL))
- tegra_phy->mode = usb_get_dr_mode(&pdev->dev);
- else
- tegra_phy->mode = USB_DR_MODE_HOST;
-
- if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) {
- dev_err(&pdev->dev, "dr_mode is invalid\n");
- return -EINVAL;
- }
-
- /* On some boards, the VBUS regulator doesn't need to be controlled */
- if (of_find_property(np, "vbus-supply", NULL)) {
- tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
- if (IS_ERR(tegra_phy->vbus))
- return PTR_ERR(tegra_phy->vbus);
- } else {
- dev_notice(&pdev->dev, "no vbus regulator");
- tegra_phy->vbus = ERR_PTR(-ENODEV);
- }
-
tegra_phy->u_phy.dev = &pdev->dev;
- err = tegra_usb_phy_init(tegra_phy);
- if (err < 0)
- return err;
-
- tegra_phy->u_phy.set_suspend = tegra_usb_phy_suspend;
+ tegra_phy->u_phy.init = tegra_usb_phy_init;
+ tegra_phy->u_phy.shutdown = tegra_usb_phy_shutdown;
+ tegra_phy->u_phy.set_suspend = tegra_usb_phy_set_suspend;
platform_set_drvdata(pdev, tegra_phy);
err = usb_add_phy_dev(&tegra_phy->u_phy);
- if (err < 0) {
- tegra_usb_phy_close(tegra_phy);
+ if (err)
return err;
- }
return 0;
}
@@ -1157,7 +1204,6 @@ static int tegra_usb_phy_remove(struct platform_device *pdev)
struct tegra_usb_phy *tegra_phy = platform_get_drvdata(pdev);
usb_remove_phy(&tegra_phy->u_phy);
- tegra_usb_phy_close(tegra_phy);
return 0;
}
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index a43c49369a60..e683a37e3a7a 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -240,6 +240,21 @@ static int ulpi_set_vbus(struct usb_otg *otg, bool on)
return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
}
+static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
+ struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ phy->label = "ULPI";
+ phy->flags = flags;
+ phy->io_ops = ops;
+ phy->otg = otg;
+ phy->init = ulpi_init;
+
+ otg->usb_phy = phy;
+ otg->set_host = ulpi_set_host;
+ otg->set_vbus = ulpi_set_vbus;
+}
+
struct usb_phy *
otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags)
@@ -257,17 +272,32 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
return NULL;
}
- phy->label = "ULPI";
- phy->flags = flags;
- phy->io_ops = ops;
- phy->otg = otg;
- phy->init = ulpi_init;
-
- otg->usb_phy = phy;
- otg->set_host = ulpi_set_host;
- otg->set_vbus = ulpi_set_vbus;
+ otg_ulpi_init(phy, otg, ops, flags);
return phy;
}
EXPORT_SYMBOL_GPL(otg_ulpi_create);
+struct usb_phy *
+devm_otg_ulpi_create(struct device *dev,
+ struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL);
+ if (!otg) {
+ devm_kfree(dev, phy);
+ return NULL;
+ }
+
+ otg_ulpi_init(phy, otg, ops, flags);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_otg_ulpi_create);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 0277f62739a2..ad2554630889 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,6 +34,14 @@ struct phy_devm {
struct notifier_block *nb;
};
+static const char *const usb_chger_type[] = {
+ [UNKNOWN_TYPE] = "USB_CHARGER_UNKNOWN_TYPE",
+ [SDP_TYPE] = "USB_CHARGER_SDP_TYPE",
+ [CDP_TYPE] = "USB_CHARGER_CDP_TYPE",
+ [DCP_TYPE] = "USB_CHARGER_DCP_TYPE",
+ [ACA_TYPE] = "USB_CHARGER_ACA_TYPE",
+};
+
static struct usb_phy *__usb_find_phy(struct list_head *list,
enum usb_phy_type type)
{
@@ -98,7 +106,8 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
{
struct usb_phy *usb_phy = container_of(work, struct usb_phy, chg_work);
char uchger_state[50] = { 0 };
- char *envp[] = { uchger_state, NULL };
+ char uchger_type[50] = { 0 };
+ char *envp[] = { uchger_state, uchger_type, NULL };
unsigned int min, max;
switch (usb_phy->chg_state) {
@@ -122,6 +131,8 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
return;
}
+ snprintf(uchger_type, ARRAY_SIZE(uchger_type),
+ "USB_CHARGER_TYPE=%s", usb_chger_type[usb_phy->chg_type]);
kobject_uevent_env(&usb_phy->dev->kobj, KOBJ_CHANGE, envp);
}
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d438b7871446..3af91b2b8f76 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -8,11 +8,10 @@
*/
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -592,7 +591,8 @@ static int usbhs_probe(struct platform_device *pdev)
struct usbhs_priv *priv;
struct resource *irq_res;
struct device *dev = &pdev->dev;
- int ret, gpio;
+ struct gpio_desc *gpiod;
+ int ret;
u32 tmp;
/* check device node */
@@ -657,10 +657,9 @@ static int usbhs_probe(struct platform_device *pdev)
priv->dparam.pio_dma_border = 64; /* 64byte */
if (!of_property_read_u32(dev_of_node(dev), "renesas,buswait", &tmp))
priv->dparam.buswait_bwait = tmp;
- gpio = of_get_named_gpio_flags(dev_of_node(dev), "renesas,enable-gpio",
- 0, NULL);
- if (gpio > 0)
- priv->dparam.enable_gpio = gpio;
+ gpiod = devm_gpiod_get_optional(dev, "renesas,enable", GPIOD_IN);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
/* FIXME */
/* runtime power control ? */
@@ -708,13 +707,10 @@ static int usbhs_probe(struct platform_device *pdev)
usbhs_sys_clock_ctrl(priv, 0);
/* check GPIO determining if USB function should be enabled */
- if (priv->dparam.enable_gpio) {
- gpio_request_one(priv->dparam.enable_gpio, GPIOF_IN, NULL);
- ret = !gpio_get_value(priv->dparam.enable_gpio);
- gpio_free(priv->dparam.enable_gpio);
+ if (gpiod) {
+ ret = !gpiod_get_value(gpiod);
if (ret) {
- dev_warn(dev, "USB function not selected (GPIO %d)\n",
- priv->dparam.enable_gpio);
+ dev_warn(dev, "USB function not selected (GPIO)\n");
ret = -ENOTSUPP;
goto probe_end_mod_exit;
}
diff --git a/drivers/usb/renesas_usbhs/rcar2.c b/drivers/usb/renesas_usbhs/rcar2.c
index 440d213e1749..52756fc2ac9c 100644
--- a/drivers/usb/renesas_usbhs/rcar2.c
+++ b/drivers/usb/renesas_usbhs/rcar2.c
@@ -6,8 +6,6 @@
* Copyright (C) 2019 Renesas Electronics Corporation
*/
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include "common.h"
#include "rcar2.h"
@@ -34,7 +32,7 @@ static int usbhs_rcar2_hardware_exit(struct platform_device *pdev)
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
if (priv->phy) {
- phy_put(priv->phy);
+ phy_put(&pdev->dev, priv->phy);
priv->phy = NULL;
}
diff --git a/drivers/usb/renesas_usbhs/rza2.c b/drivers/usb/renesas_usbhs/rza2.c
index 021749594389..3eed3334a17f 100644
--- a/drivers/usb/renesas_usbhs/rza2.c
+++ b/drivers/usb/renesas_usbhs/rza2.c
@@ -29,7 +29,7 @@ static int usbhs_rza2_hardware_exit(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- phy_put(priv->phy);
+ phy_put(&pdev->dev, priv->phy);
priv->phy = NULL;
return 0;
diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c
index 409851306e99..80d6559bbcb2 100644
--- a/drivers/usb/roles/intel-xhci-usb-role-switch.c
+++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c
@@ -161,7 +161,7 @@ static int intel_xhci_usb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
- data->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ data->base = devm_ioremap(dev, res->start, resource_size(res));
if (!data->base)
return -ENOMEM;
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ed4a18b435a0..25d7e0c36d38 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -129,9 +129,6 @@ config USB_SERIAL_DIGI_ACCELEPORT
parallel port on the USB 2 appears as a third serial port on Linux.
The Digi Acceleport USB 8 is not yet supported by this driver.
- This driver works under SMP with the usb-uhci driver. It does not
- work under SMP with the uhci driver.
-
To compile this driver as a module, choose M here: the
module will be called digi_acceleport.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index df582fe855f0..d3f420f3a083 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -642,9 +642,13 @@ static int ch341_tiocmget(struct tty_struct *tty)
static int ch341_reset_resume(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
- struct ch341_private *priv = usb_get_serial_port_data(port);
+ struct ch341_private *priv;
int ret;
+ priv = usb_get_serial_port_data(port);
+ if (!priv)
+ return 0;
+
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index ebd76ab07b72..821970609695 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -276,7 +276,7 @@ static void cyberjack_read_int_callback(struct urb *urb)
old_rdtodo = priv->rdtodo;
if (old_rdtodo > SHRT_MAX - size) {
- dev_dbg(dev, "To many bulk_in urbs to do.\n");
+ dev_dbg(dev, "Too many bulk_in urbs to do.\n");
spin_unlock_irqrestore(&priv->lock, flags);
goto resubmit;
}
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 633550ec3025..ffd984142171 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -104,7 +104,7 @@ struct garmin_packet {
int seq;
/* the real size of the data array, always > 0 */
int size;
- __u8 data[1];
+ __u8 data[];
};
/* structure used to keep the current state of the driver */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 9690a5f4b9d6..5737add6a2a4 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -716,7 +716,7 @@ static void edge_interrupt_callback(struct urb *urb)
if (txCredits) {
port = edge_serial->serial->port[portNumber];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
spin_lock_irqsave(&edge_port->ep_lock,
flags);
edge_port->txCredits += txCredits;
@@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state)
static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength)
{
- struct device *dev = &edge_serial->serial->dev->dev;
+ struct usb_serial *serial = edge_serial->serial;
+ struct device *dev = &serial->dev->dev;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
__u16 lastBufferLength;
@@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
/* spit this data back into the tty driver if this
port is open */
- if (rxLen) {
- port = edge_serial->serial->port[
- edge_serial->rxPort];
+ if (rxLen && edge_serial->rxPort < serial->num_ports) {
+ port = serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
__func__, rxLen,
edge_serial->rxPort);
@@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
rxLen);
edge_port->port->icount.rx += rxLen;
}
- buffer += rxLen;
}
+ buffer += rxLen;
break;
case EXPECT_HDR3: /* Expect 3rd byte of status header */
@@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
__u8 code = edge_serial->rxStatusCode;
/* switch the port pointer to the one being currently talked about */
+ if (edge_serial->rxPort >= edge_serial->serial->num_ports)
+ return;
port = edge_serial->serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port == NULL) {
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 302eb9530859..79d0586e2b33 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -45,9 +45,10 @@ static int buffer_size;
static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port);
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size);
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+static int ir_write_room(struct tty_struct *tty);
+static void ir_write_bulk_callback(struct urb *urb);
static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
@@ -75,10 +76,13 @@ static struct usb_serial_driver ir_device = {
.description = "IR Dongle",
.id_table = ir_id_table,
.num_ports = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
.set_termios = ir_set_termios,
.attach = ir_startup,
- .open = ir_open,
- .prepare_write_buffer = ir_prepare_write_buffer,
+ .write = ir_write,
+ .write_room = ir_write_room,
+ .write_bulk_callback = ir_write_bulk_callback,
.process_read_urb = ir_process_read_urb,
};
@@ -251,35 +255,102 @@ static int ir_startup(struct usb_serial *serial)
return 0;
}
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count)
{
- int i;
+ struct urb *urb = NULL;
+ unsigned long flags;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
- port->write_urbs[i]->transfer_flags = URB_ZERO_PACKET;
+ if (port->bulk_out_size == 0)
+ return -EINVAL;
- /* Start reading from the device */
- return usb_serial_generic_open(tty, port);
-}
+ if (count == 0)
+ return 0;
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size)
-{
- unsigned char *buf = dest;
- int count;
+ count = min(count, port->bulk_out_size - 1);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (__test_and_clear_bit(0, &port->write_urbs_free)) {
+ urb = port->write_urbs[0];
+ port->tx_bytes += count;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (!urb)
+ return 0;
/*
* The first byte of the packet we send to the device contains an
- * inbound header which indicates an additional number of BOFs and
+ * outbound header which indicates an additional number of BOFs and
* a baud rate change.
*
* See section 5.4.2.2 of the USB IrDA spec.
*/
- *buf = ir_xbof | ir_baud;
+ *(u8 *)urb->transfer_buffer = ir_xbof | ir_baud;
+
+ memcpy(urb->transfer_buffer + 1, buf, count);
+
+ urb->transfer_buffer_length = count + 1;
+ urb->transfer_flags = URB_ZERO_PACKET;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+
+ spin_lock_irqsave(&port->lock, flags);
+ __set_bit(0, &port->write_urbs_free);
+ port->tx_bytes -= count;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret;
+ }
+
+ return count;
+}
+
+static void ir_write_bulk_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ int status = urb->status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ __set_bit(0, &port->write_urbs_free);
+ port->tx_bytes -= urb->transfer_buffer_length - 1;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ switch (status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "write urb stopped: %d\n", status);
+ return;
+ case -EPIPE:
+ dev_err(&port->dev, "write urb stopped: %d\n", status);
+ return;
+ default:
+ dev_err(&port->dev, "nonzero write-urb status: %d\n", status);
+ break;
+ }
+
+ usb_serial_port_softint(port);
+}
- count = kfifo_out_locked(&port->write_fifo, buf + 1, size - 1,
- &port->lock);
- return count + 1;
+static int ir_write_room(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ int count = 0;
+
+ if (port->bulk_out_size == 0)
+ return 0;
+
+ if (test_bit(0, &port->write_urbs_free))
+ count = port->bulk_out_size - 1;
+
+ return count;
}
static void ir_process_read_urb(struct urb *urb)
@@ -304,23 +375,15 @@ static void ir_process_read_urb(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
-static void ir_set_termios_callback(struct urb *urb)
-{
- kfree(urb->transfer_buffer);
-
- if (urb->status)
- dev_dbg(&urb->dev->dev, "%s - non-zero urb status: %d\n",
- __func__, urb->status);
-}
-
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
- struct urb *urb;
+ struct usb_device *udev = port->serial->dev;
unsigned char *transfer_buffer;
- int result;
+ int actual_length;
speed_t baud;
int ir_baud;
+ int ret;
baud = tty_get_baud_rate(tty);
@@ -332,34 +395,34 @@ static void ir_set_termios(struct tty_struct *tty,
switch (baud) {
case 2400:
- ir_baud = USB_IRDA_BR_2400;
+ ir_baud = USB_IRDA_LS_2400;
break;
case 9600:
- ir_baud = USB_IRDA_BR_9600;
+ ir_baud = USB_IRDA_LS_9600;
break;
case 19200:
- ir_baud = USB_IRDA_BR_19200;
+ ir_baud = USB_IRDA_LS_19200;
break;
case 38400:
- ir_baud = USB_IRDA_BR_38400;
+ ir_baud = USB_IRDA_LS_38400;
break;
case 57600:
- ir_baud = USB_IRDA_BR_57600;
+ ir_baud = USB_IRDA_LS_57600;
break;
case 115200:
- ir_baud = USB_IRDA_BR_115200;
+ ir_baud = USB_IRDA_LS_115200;
break;
case 576000:
- ir_baud = USB_IRDA_BR_576000;
+ ir_baud = USB_IRDA_LS_576000;
break;
case 1152000:
- ir_baud = USB_IRDA_BR_1152000;
+ ir_baud = USB_IRDA_LS_1152000;
break;
case 4000000:
- ir_baud = USB_IRDA_BR_4000000;
+ ir_baud = USB_IRDA_LS_4000000;
break;
default:
- ir_baud = USB_IRDA_BR_9600;
+ ir_baud = USB_IRDA_LS_9600;
baud = 9600;
}
@@ -375,42 +438,22 @@ static void ir_set_termios(struct tty_struct *tty,
/*
* send the baud change out on an "empty" data packet
*/
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return;
-
transfer_buffer = kmalloc(1, GFP_KERNEL);
if (!transfer_buffer)
- goto err_buf;
+ return;
*transfer_buffer = ir_xbof | ir_baud;
- usb_fill_bulk_urb(
- urb,
- port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- transfer_buffer,
- 1,
- ir_set_termios_callback,
- port);
-
- urb->transfer_flags = URB_ZERO_PACKET;
-
- result = usb_submit_urb(urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev, "%s - failed to submit urb: %d\n",
- __func__, result);
- goto err_subm;
+ ret = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, port->bulk_out_endpointAddress),
+ transfer_buffer, 1, &actual_length, 5000);
+ if (ret || actual_length != 1) {
+ if (actual_length != 1)
+ ret = -EIO;
+ dev_err(&port->dev, "failed to change line speed: %d\n", ret);
}
- usb_free_urb(urb);
-
- return;
-err_subm:
kfree(transfer_buffer);
-err_buf:
- usb_free_urb(urb);
}
static int __init ir_init(void)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index e66a59ef43a1..aa3dbce22cfb 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1058,6 +1058,8 @@ static void usa49_glocont_callback(struct urb *urb)
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
@@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb)
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index cb7aac9cd9e7..0af76800bd78 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -41,6 +41,9 @@ struct opticon_private {
bool rts;
bool cts;
int outstanding_urbs;
+ int outstanding_bytes;
+
+ struct usb_anchor anchor;
};
@@ -113,7 +116,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
requesttype,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
- 0, 0, buffer, 1, 0);
+ 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT);
kfree(buffer);
if (retval < 0)
@@ -149,6 +152,15 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
return res;
}
+static void opticon_close(struct usb_serial_port *port)
+{
+ struct opticon_private *priv = usb_get_serial_port_data(port);
+
+ usb_kill_anchored_urbs(&priv->anchor);
+
+ usb_serial_generic_close(port);
+}
+
static void opticon_write_control_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
@@ -169,6 +181,7 @@ static void opticon_write_control_callback(struct urb *urb)
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
+ priv->outstanding_bytes -= urb->transfer_buffer_length;
spin_unlock_irqrestore(&priv->lock, flags);
usb_serial_port_softint(port);
@@ -182,8 +195,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
struct urb *urb;
unsigned char *buffer;
unsigned long flags;
- int status;
struct usb_ctrlrequest *dr;
+ int ret = -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
@@ -192,19 +205,16 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
return 0;
}
priv->outstanding_urbs++;
+ priv->outstanding_bytes += count;
spin_unlock_irqrestore(&priv->lock, flags);
buffer = kmalloc(count, GFP_ATOMIC);
- if (!buffer) {
- count = -ENOMEM;
+ if (!buffer)
goto error_no_buffer;
- }
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- count = -ENOMEM;
+ if (!urb)
goto error_no_urb;
- }
memcpy(buffer, buf, count);
@@ -213,10 +223,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
/* The connected devices do not have a bulk write endpoint,
* to transmit data to de barcode device the control endpoint is used */
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!dr) {
- count = -ENOMEM;
+ if (!dr)
goto error_no_dr;
- }
dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
dr->bRequest = 0x01;
@@ -229,13 +237,13 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
(unsigned char *)dr, buffer, count,
opticon_write_control_callback, port);
+ usb_anchor_urb(urb, &priv->anchor);
+
/* send it down the pipe */
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - usb_submit_urb(write endpoint) failed status = %d\n",
- __func__, status);
- count = status;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+ usb_unanchor_urb(urb);
goto error;
}
@@ -253,8 +261,10 @@ error_no_urb:
error_no_buffer:
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
+ priv->outstanding_bytes -= count;
spin_unlock_irqrestore(&priv->lock, flags);
- return count;
+
+ return ret;
}
static int opticon_write_room(struct tty_struct *tty)
@@ -279,6 +289,20 @@ static int opticon_write_room(struct tty_struct *tty)
return 2048;
}
+static int opticon_chars_in_buffer(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct opticon_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ count = priv->outstanding_bytes;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return count;
+}
+
static int opticon_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
@@ -354,6 +378,7 @@ static int opticon_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
+ init_usb_anchor(&priv->anchor);
usb_set_serial_port_data(port, priv);
@@ -381,8 +406,10 @@ static struct usb_serial_driver opticon_device = {
.port_probe = opticon_port_probe,
.port_remove = opticon_port_remove,
.open = opticon_open,
+ .close = opticon_close,
.write = opticon_write,
.write_room = opticon_write_room,
+ .chars_in_buffer = opticon_chars_in_buffer,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.get_serial = get_serial_info,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2d919d0e6e45..084cc2fff3ae 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
+#define QUECTEL_PRODUCT_RM500Q 0x0800
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1104,6 +1105,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ .driver_info = ZLP },
+
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a62981ca7a73..f93b81a297d6 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
u8 newMSR = (u8) *ch;
unsigned long flags;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
spin_lock_irqsave(&port_priv->lock, flags);
port_priv->shadowMSR = newMSR;
@@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch)
unsigned long flags;
u8 newLSR = (u8) *ch;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
if (newLSR & UART_LSR_BI)
newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index edbbb13d6de6..bd23a7cb1be2 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS);
#define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
{ USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+ { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \
+ { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \
{ USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8f066bb55d7d..dc7a65b9ec98 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver)
return -EINVAL;
}
+ /* Prevent individual ports from being unbound. */
+ driver->driver.suppress_bind_attrs = true;
+
usb_serial_operations_init(driver);
/* Add this device to our list of devices */
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 4092248a5936..0edfb89e04a8 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -188,7 +188,7 @@ static void dp_altmode_work(struct work_struct *work)
switch (dp->state) {
case DP_STATE_ENTER:
- ret = typec_altmode_enter(dp->alt);
+ ret = typec_altmode_enter(dp->alt, NULL);
if (ret)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
@@ -306,7 +306,8 @@ err_unlock:
static int dp_altmode_activate(struct typec_altmode *alt, int activate)
{
- return activate ? typec_altmode_enter(alt) : typec_altmode_exit(alt);
+ return activate ? typec_altmode_enter(alt, NULL) :
+ typec_altmode_exit(alt);
}
static const struct typec_altmode_ops dp_altmode_ops = {
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 74cb3c2ecb34..2e45eb479386 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Bus for USB Type-C Alternate Modes
*
* Copyright (C) 2018 Intel Corporation
@@ -10,12 +10,23 @@
#include "bus.h"
-static inline int typec_altmode_set_mux(struct altmode *alt, u8 state)
+static inline int
+typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
{
- return alt->mux ? alt->mux->set(alt->mux, state) : 0;
+ struct typec_mux_state state;
+
+ if (!alt->mux)
+ return 0;
+
+ state.alt = &alt->adev;
+ state.mode = conf;
+ state.data = data;
+
+ return alt->mux->set(alt->mux, &state);
}
-static int typec_altmode_set_state(struct typec_altmode *adev, int state)
+static int typec_altmode_set_state(struct typec_altmode *adev,
+ unsigned long conf, void *data)
{
bool is_port = is_typec_port(adev->dev.parent);
struct altmode *port_altmode;
@@ -23,11 +34,11 @@ static int typec_altmode_set_state(struct typec_altmode *adev, int state)
port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
- ret = typec_altmode_set_mux(port_altmode, state);
+ ret = typec_altmode_set_mux(port_altmode, conf, data);
if (ret)
return ret;
- blocking_notifier_call_chain(&port_altmode->nh, state, NULL);
+ blocking_notifier_call_chain(&port_altmode->nh, conf, NULL);
return 0;
}
@@ -67,7 +78,7 @@ int typec_altmode_notify(struct typec_altmode *adev,
is_port = is_typec_port(adev->dev.parent);
partner = altmode->partner;
- ret = typec_altmode_set_mux(is_port ? altmode : partner, (u8)conf);
+ ret = typec_altmode_set_mux(is_port ? altmode : partner, conf, data);
if (ret)
return ret;
@@ -84,12 +95,14 @@ EXPORT_SYMBOL_GPL(typec_altmode_notify);
/**
* typec_altmode_enter - Enter Mode
* @adev: The alternate mode
+ * @vdo: VDO for the Enter Mode command
*
* The alternate mode drivers use this function to enter mode. The port drivers
* use this to inform the alternate mode drivers that the partner has initiated
- * Enter Mode command.
+ * Enter Mode command. If the alternate mode does not require VDO, @vdo must be
+ * NULL.
*/
-int typec_altmode_enter(struct typec_altmode *adev)
+int typec_altmode_enter(struct typec_altmode *adev, u32 *vdo)
{
struct altmode *partner = to_altmode(adev)->partner;
struct typec_altmode *pdev = &partner->adev;
@@ -101,13 +114,16 @@ int typec_altmode_enter(struct typec_altmode *adev)
if (!pdev->ops || !pdev->ops->enter)
return -EOPNOTSUPP;
+ if (is_typec_port(pdev->dev.parent) && !pdev->active)
+ return -EPERM;
+
/* Moving to USB Safe State */
- ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+ ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
if (ret)
return ret;
/* Enter Mode */
- return pdev->ops->enter(pdev);
+ return pdev->ops->enter(pdev, vdo);
}
EXPORT_SYMBOL_GPL(typec_altmode_enter);
@@ -130,7 +146,7 @@ int typec_altmode_exit(struct typec_altmode *adev)
return -EOPNOTSUPP;
/* Moving to USB Safe State */
- ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+ ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
if (ret)
return ret;
@@ -383,7 +399,7 @@ static int typec_remove(struct device *dev)
drv->remove(to_typec_altmode(dev));
if (adev->active) {
- WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE));
+ WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL));
typec_altmode_update_active(adev, false);
}
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 91d62276b56f..7c44e930602f 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -834,6 +834,52 @@ static const struct device_type typec_cable_dev_type = {
.release = typec_cable_release,
};
+static int cable_match(struct device *dev, void *data)
+{
+ return is_typec_cable(dev);
+}
+
+/**
+ * typec_cable_get - Get a reference to the USB Type-C cable
+ * @port: The USB Type-C Port the cable is connected to
+ *
+ * The caller must decrement the reference count with typec_cable_put() after
+ * use.
+ */
+struct typec_cable *typec_cable_get(struct typec_port *port)
+{
+ struct device *dev;
+
+ dev = device_find_child(&port->dev, NULL, cable_match);
+ if (!dev)
+ return NULL;
+
+ return to_typec_cable(dev);
+}
+EXPORT_SYMBOL_GPL(typec_cable_get);
+
+/**
+ * typec_cable_get - Decrement the reference count on USB Type-C cable
+ * @cable: The USB Type-C cable
+ */
+void typec_cable_put(struct typec_cable *cable)
+{
+ put_device(&cable->dev);
+}
+EXPORT_SYMBOL_GPL(typec_cable_put);
+
+/**
+ * typec_cable_is_active - Check is the USB Type-C cable active or passive
+ * @cable: The USB Type-C Cable
+ *
+ * Return 1 if the cable is active or 0 if it's passive.
+ */
+int typec_cable_is_active(struct typec_cable *cable)
+{
+ return cable->active;
+}
+EXPORT_SYMBOL_GPL(typec_cable_is_active);
+
/**
* typec_cable_set_identity - Report result from Discover Identity command
* @cable: The cable updated identity values
@@ -1483,7 +1529,11 @@ EXPORT_SYMBOL_GPL(typec_get_orientation);
*/
int typec_set_mode(struct typec_port *port, int mode)
{
- return port->mux ? port->mux->set(port->mux, mode) : 0;
+ struct typec_mux_state state = { };
+
+ state.mode = mode;
+
+ return port->mux ? port->mux->set(port->mux, &state) : 0;
}
EXPORT_SYMBOL_GPL(typec_set_mode);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 57907f26f681..5baf0f416c73 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* USB Type-C Multiplexer/DeMultiplexer Switch support
*
* Copyright (C) 2018 Intel Corporation
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 5585b109095b..46457c133d2b 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -73,7 +73,8 @@ static int pi3usb30532_sw_set(struct typec_switch *sw,
return ret;
}
-static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
+static int
+pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
{
struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
u8 new_conf;
@@ -82,7 +83,7 @@ static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
mutex_lock(&pi->lock);
new_conf = pi->conf;
- switch (state) {
+ switch (state->mode) {
case TYPEC_STATE_SAFE:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_OPEN;
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index ed8655c6af8c..b498960ff72b 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1666,7 +1666,7 @@ static const struct property_entry port_props[] = {
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
- PROPERTY_ENTRY_U32("op-sink-microwatt", 2500),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
{ }
};
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 8b4ff9fff340..753645bb2527 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -591,6 +591,12 @@ static int tcpci_probe(struct i2c_client *client,
static int tcpci_remove(struct i2c_client *client)
{
struct tcpci_chip *chip = i2c_get_clientdata(client);
+ int err;
+
+ /* Disable chip interrupts before unregistering port */
+ err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
+ if (err < 0)
+ return err;
tcpci_unregister_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 56fc356bc55c..f3087ef8265c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1475,16 +1475,16 @@ static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
return 0;
}
-static int tcpm_altmode_enter(struct typec_altmode *altmode)
+static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
u32 header;
mutex_lock(&port->lock);
- header = VDO(altmode->svid, 1, CMD_ENTER_MODE);
+ header = VDO(altmode->svid, vdo ? 2 : 1, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm(port, header, NULL, 0);
+ tcpm_queue_vdm(port, header, vdo, vdo ? 1 : 0);
mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index edc271da14f4..9b745f432c91 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -597,7 +597,7 @@ static const struct property_entry wcove_props[] = {
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
- PROPERTY_ENTRY_U32("op-sink-microwatt", 15000),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 15000000),
{ }
};
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index d4d5189edfb8..0f1273ae086c 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -45,7 +45,7 @@ struct ucsi_dp {
* -EOPNOTSUPP.
*/
-static int ucsi_displayport_enter(struct typec_altmode *alt)
+static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
struct ucsi *ucsi = dp->con->ucsi;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 4459bc68aa33..d5a6aac86327 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -189,7 +189,7 @@ int ucsi_resume(struct ucsi *ucsi)
u64 command;
/* Restore UCSI notification enable mask after system resume */
- command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
return ucsi_send_command(ucsi, command, NULL, 0);
}
@@ -318,6 +318,82 @@ err:
return ret;
}
+static int
+ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
+{
+ int max_altmodes = UCSI_MAX_ALTMODES;
+ struct typec_altmode_desc desc;
+ struct ucsi_altmode alt;
+ struct ucsi_altmode orig[UCSI_MAX_ALTMODES];
+ struct ucsi_altmode updated[UCSI_MAX_ALTMODES];
+ struct ucsi *ucsi = con->ucsi;
+ bool multi_dp = false;
+ u64 command;
+ int ret;
+ int len;
+ int i;
+ int k = 0;
+
+ if (recipient == UCSI_RECIPIENT_CON)
+ max_altmodes = con->ucsi->cap.num_alt_modes;
+
+ memset(orig, 0, sizeof(orig));
+ memset(updated, 0, sizeof(updated));
+
+ /* First get all the alternate modes */
+ for (i = 0; i < max_altmodes; i++) {
+ memset(&alt, 0, sizeof(alt));
+ command = UCSI_GET_ALTERNATE_MODES;
+ command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
+ command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_ALTMODE_OFFSET(i);
+ len = ucsi_run_command(con->ucsi, command, &alt, sizeof(alt));
+ /*
+ * We are collecting all altmodes first and then registering.
+ * Some type-C device will return zero length data beyond last
+ * alternate modes. We should not return if length is zero.
+ */
+ if (len < 0)
+ return len;
+
+ /* We got all altmodes, now break out and register them */
+ if (!len || !alt.svid)
+ break;
+
+ orig[k].mid = alt.mid;
+ orig[k].svid = alt.svid;
+ k++;
+ }
+ /*
+ * Update the original altmode table as some ppms may report
+ * multiple DP altmodes.
+ */
+ if (recipient == UCSI_RECIPIENT_CON)
+ multi_dp = ucsi->ops->update_altmodes(ucsi, orig, updated);
+
+ /* now register altmodes */
+ for (i = 0; i < max_altmodes; i++) {
+ memset(&desc, 0, sizeof(desc));
+ if (multi_dp && recipient == UCSI_RECIPIENT_CON) {
+ desc.svid = updated[i].svid;
+ desc.vdo = updated[i].mid;
+ } else {
+ desc.svid = orig[i].svid;
+ desc.vdo = orig[i].mid;
+ }
+ desc.roles = TYPEC_PORT_DRD;
+
+ if (!desc.svid)
+ return 0;
+
+ ret = ucsi_register_altmode(con, &desc, recipient);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
{
int max_altmodes = UCSI_MAX_ALTMODES;
@@ -336,6 +412,9 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
if (recipient == UCSI_RECIPIENT_SOP && con->partner_altmode[0])
return 0;
+ if (con->ucsi->ops->update_altmodes)
+ return ucsi_register_altmodes_nvidia(con, recipient);
+
if (recipient == UCSI_RECIPIENT_CON)
max_altmodes = con->ucsi->cap.num_alt_modes;
@@ -589,6 +668,11 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
{
struct ucsi_connector *con = &ucsi->connector[num - 1];
+ if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
+ dev_dbg(ucsi->dev, "Bogus connector change event\n");
+ return;
+ }
+
if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
schedule_work(&con->work);
}
@@ -656,7 +740,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
ucsi_reset_ppm(con->ucsi);
mutex_unlock(&con->ucsi->ppm_lock);
- c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
@@ -890,8 +974,8 @@ int ucsi_init(struct ucsi *ucsi)
}
/* Enable basic notifications */
- command = UCSI_SET_NOTIFICATION_ENABLE;
- command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
@@ -923,7 +1007,8 @@ int ucsi_init(struct ucsi *ucsi)
}
/* Enable all notifications */
- command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 831c9470bdc1..e434b9c9a9eb 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -11,6 +11,7 @@
/* -------------------------------------------------------------------------- */
struct ucsi;
+struct ucsi_altmode;
/* UCSI offsets (Bytes) */
#define UCSI_VERSION 0
@@ -35,6 +36,7 @@ struct ucsi;
* @read: Read operation
* @sync_write: Blocking write operation
* @async_write: Non-blocking write operation
+ * @update_altmodes: Squashes duplicate DP altmodes
*
* Read and write routines for UCSI interface. @sync_write must wait for the
* Command Completion Event from the PPM before returning, and @async_write must
@@ -47,6 +49,8 @@ struct ucsi_operations {
const void *val, size_t val_len);
int (*async_write)(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len);
+ bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
+ struct ucsi_altmode *updated);
};
struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
@@ -82,6 +86,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_ERROR_STATUS 0x13
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
+#define UCSI_COMMAND(_cmd_) ((_cmd_) & 0xff)
/* CONNECTOR_RESET command bits */
#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
@@ -140,6 +145,12 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_ERROR_PPM_POLICY_CONFLICT BIT(11)
#define UCSI_ERROR_SWAP_REJECTED BIT(12)
+#define UCSI_SET_NEW_CAM_ENTER(x) (((x) >> 23) & 0x1)
+#define UCSI_SET_NEW_CAM_GET_AM(x) (((x) >> 24) & 0xff)
+#define UCSI_SET_NEW_CAM_AM_MASK (0xff << 24)
+#define UCSI_SET_NEW_CAM_SET_AM(x) (((x) & 0xff) << 24)
+#define UCSI_CMD_CONNECTOR_MASK (0x7)
+
/* Data structure filled by PPM in response to GET_CAPABILITY command. */
struct ucsi_capability {
u32 attributes;
@@ -269,6 +280,9 @@ struct ucsi {
/* PPM Communication lock */
struct mutex ppm_lock;
+ /* The latest "Notification Enable" bits (SET_NOTIFICATION_ENABLE) */
+ u64 ntfy;
+
/* PPM communication flags */
unsigned long flags;
#define EVENT_PENDING 0
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 3f1786170098..9fc4f338e870 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -127,7 +127,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- /* This will make sure we can use ioremap_nocache() */
+ /* This will make sure we can use ioremap() */
status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
if (ACPI_FAILURE(status))
return -ENOMEM;
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 3370b3fc37b1..a5b8530490db 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/usb/typec_dp.h>
#include <asm/unaligned.h>
#include "ucsi.h"
@@ -173,6 +174,15 @@ struct ccg_resp {
u8 length;
};
+struct ucsi_ccg_altmode {
+ u16 svid;
+ u32 mid;
+ u8 linked_idx;
+ u8 active_idx;
+#define UCSI_MULTI_DP_INDEX (0xff)
+ bool checked;
+} __packed;
+
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
@@ -198,6 +208,11 @@ struct ucsi_ccg {
struct work_struct pm_work;
struct completion complete;
+
+ u64 last_cmd_sent;
+ bool has_multiple_dp;
+ struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
+ struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -319,12 +334,169 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
return -ETIMEDOUT;
}
+static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
+{
+ u8 cam, new_cam;
+
+ cam = data[0];
+ new_cam = uc->orig[cam].linked_idx;
+ uc->updated[new_cam].active_idx = cam;
+ data[0] = new_cam;
+}
+
+static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
+ struct ucsi_altmode *orig,
+ struct ucsi_altmode *updated)
+{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ struct ucsi_ccg_altmode *alt, *new_alt;
+ int i, j, k = 0;
+ bool found = false;
+
+ alt = uc->orig;
+ new_alt = uc->updated;
+ memset(uc->updated, 0, sizeof(uc->updated));
+
+ /*
+ * Copy original connector altmodes to new structure.
+ * We need this before second loop since second loop
+ * checks for duplicate altmodes.
+ */
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
+ alt[i].svid = orig[i].svid;
+ alt[i].mid = orig[i].mid;
+ if (!alt[i].svid)
+ break;
+ }
+
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
+ if (!alt[i].svid)
+ break;
+
+ /* already checked and considered */
+ if (alt[i].checked)
+ continue;
+
+ if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
+ /* Found Non DP altmode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid;
+ new_alt[k].linked_idx = i;
+ alt[i].linked_idx = k;
+ updated[k].svid = new_alt[k].svid;
+ updated[k].mid = new_alt[k].mid;
+ k++;
+ continue;
+ }
+
+ for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
+ if (alt[i].svid != alt[j].svid ||
+ !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
+ continue;
+ } else {
+ /* Found duplicate DP mode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid | alt[j].mid;
+ new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
+ alt[i].linked_idx = k;
+ alt[j].linked_idx = k;
+ alt[j].checked = true;
+ found = true;
+ }
+ }
+ if (found) {
+ uc->has_multiple_dp = true;
+ } else {
+ /* Didn't find any duplicate DP altmode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid;
+ new_alt[k].linked_idx = i;
+ alt[i].linked_idx = k;
+ }
+ updated[k].svid = new_alt[k].svid;
+ updated[k].mid = new_alt[k].mid;
+ k++;
+ }
+ return found;
+}
+
+static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
+ struct ucsi_connector *con,
+ u64 *cmd)
+{
+ struct ucsi_ccg_altmode *new_port, *port;
+ struct typec_altmode *alt = NULL;
+ u8 new_cam, cam, pin;
+ bool enter_new_mode;
+ int i, j, k = 0xff;
+
+ port = uc->orig;
+ new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
+ new_port = &uc->updated[new_cam];
+ cam = new_port->linked_idx;
+ enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
+
+ /*
+ * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
+ * with multiple DP mode. Find out CAM for best pin assignment
+ * among all DP mode. Priorite pin E->D->C after making sure
+ * the partner supports that pin.
+ */
+ if (cam == UCSI_MULTI_DP_INDEX) {
+ if (enter_new_mode) {
+ for (i = 0; con->partner_altmode[i]; i++) {
+ alt = con->partner_altmode[i];
+ if (alt->svid == new_port->svid)
+ break;
+ }
+ /*
+ * alt will always be non NULL since this is
+ * UCSI_SET_NEW_CAM command and so there will be
+ * at least one con->partner_altmode[i] with svid
+ * matching with new_port->svid.
+ */
+ for (j = 0; port[j].svid; j++) {
+ pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
+ if (alt && port[j].svid == alt->svid &&
+ (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
+ /* prioritize pin E->D->C */
+ if (k == 0xff || (k != 0xff && pin >
+ DP_CONF_GET_PIN_ASSIGN(port[k].mid))
+ ) {
+ k = j;
+ }
+ }
+ }
+ cam = k;
+ new_port->active_idx = cam;
+ } else {
+ cam = new_port->active_idx;
+ }
+ }
+ *cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
+ *cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
+}
+
static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
void *val, size_t val_len)
{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ int ret;
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len);
+ ret = ccg_read(uc, reg, val, val_len);
+ if (ret)
+ return ret;
+
+ if (offset == UCSI_MESSAGE_IN) {
+ if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_GET_CURRENT_CAM &&
+ uc->has_multiple_dp) {
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
+ }
+ uc->last_cmd_sent = 0;
+ }
+
+ return ret;
}
static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
@@ -339,12 +511,26 @@ static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ struct ucsi_connector *con;
+ int con_index;
int ret;
mutex_lock(&uc->lock);
pm_runtime_get_sync(uc->dev);
set_bit(DEV_CMD_PENDING, &uc->flags);
+ if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) {
+ uc->last_cmd_sent = *(u64 *)val;
+
+ if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
+ uc->has_multiple_dp) {
+ con_index = (uc->last_cmd_sent >> 16) &
+ UCSI_CMD_CONNECTOR_MASK;
+ con = &uc->ucsi->connector[con_index - 1];
+ ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val);
+ }
+ }
+
ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
if (ret)
goto err_clear_bit;
@@ -363,7 +549,8 @@ err_clear_bit:
static const struct ucsi_operations ucsi_ccg_ops = {
.read = ucsi_ccg_read,
.sync_write = ucsi_ccg_sync_write,
- .async_write = ucsi_ccg_async_write
+ .async_write = ucsi_ccg_async_write,
+ .update_altmodes = ucsi_ccg_update_altmodes
};
static irqreturn_t ccg_irq_handler(int irq, void *data)
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 0120d8324a40..a87992892a9f 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -230,7 +230,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
switch ((u32)pos) {
case 0xa0000 ... 0xbffff:
count = min(count, (size_t)(0xc0000 - pos));
- iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+ iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1);
off = pos - 0xa0000;
rsrc = VGA_RSRC_LEGACY_MEM;
is_ioport = false;
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index 2d2babe21b2f..40d4fb9276ba 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -54,13 +54,13 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
if (!xgmac_regs->ioaddr) {
xgmac_regs->ioaddr =
- ioremap_nocache(xgmac_regs->addr, xgmac_regs->size);
+ ioremap(xgmac_regs->addr, xgmac_regs->size);
if (!xgmac_regs->ioaddr)
return -ENOMEM;
}
if (!xpcs_regs->ioaddr) {
xpcs_regs->ioaddr =
- ioremap_nocache(xpcs_regs->addr, xpcs_regs->size);
+ ioremap(xpcs_regs->addr, xpcs_regs->size);
if (!xpcs_regs->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
index 16165a62b86d..96064ef8f629 100644
--- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
+++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
@@ -82,7 +82,7 @@ static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev)
/* Map FlexRM ring registers if not mapped */
if (!reg->ioaddr) {
- reg->ioaddr = ioremap_nocache(reg->addr, reg->size);
+ reg->ioaddr = ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
index f67bab547501..09a9453b75c5 100644
--- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
+++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
@@ -52,7 +52,7 @@ static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index e8f2bdbe0542..c0771a9567fb 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -409,7 +409,7 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
@@ -486,7 +486,7 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ada8e6cdb88..a177bf2c6683 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -309,9 +309,8 @@ static int put_pfn(unsigned long pfn, int prot)
{
if (!is_invalid_reserved_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
- if (prot & IOMMU_WRITE)
- SetPageDirty(page);
- put_page(page);
+
+ unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE);
return 1;
}
return 0;
@@ -322,7 +321,6 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
{
struct page *page[1];
struct vm_area_struct *vma;
- struct vm_area_struct *vmas[1];
unsigned int flags = 0;
int ret;
@@ -330,33 +328,14 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
flags |= FOLL_WRITE;
down_read(&mm->mmap_sem);
- if (mm == current->mm) {
- ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page,
- vmas);
- } else {
- ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
- vmas, NULL);
- /*
- * The lifetime of a vaddr_get_pfn() page pin is
- * userspace-controlled. In the fs-dax case this could
- * lead to indefinite stalls in filesystem operations.
- * Disallow attempts to pin fs-dax pages via this
- * interface.
- */
- if (ret > 0 && vma_is_fsdax(vmas[0])) {
- ret = -EOPNOTSUPP;
- put_page(page[0]);
- }
- }
- up_read(&mm->mmap_sem);
-
+ ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
+ page, NULL, NULL);
if (ret == 1) {
*pfn = page_to_pfn(page[0]);
- return 0;
+ ret = 0;
+ goto done;
}
- down_read(&mm->mmap_sem);
-
vaddr = untagged_addr(vaddr);
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
@@ -366,7 +345,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
if (is_invalid_reserved_pfn(*pfn))
ret = 0;
}
-
+done:
up_read(&mm->mmap_sem);
return ret;
}
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index c10e17fb9a9a..70c10ea1c38b 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -93,7 +93,6 @@ config SGI_NEWPORT_CONSOLE
config DUMMY_CONSOLE
bool
- depends on VGA_CONSOLE!=y || SGI_NEWPORT_CONSOLE!=y
default y
config DUMMY_CONSOLE_COLUMNS
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index d48e96088f76..9811f1bad8d4 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -96,7 +96,7 @@ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma);
-static struct fb_ops mc68x328fb_ops = {
+static const struct fb_ops mc68x328fb_ops = {
.fb_check_var = mc68x328fb_check_var,
.fb_set_par = mc68x328fb_set_par,
.fb_setcolreg = mc68x328fb_setcolreg,
@@ -405,20 +405,8 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
int __init mc68x328fb_setup(char *options)
{
-#if 0
- char *this_opt;
-#endif
-
if (!options || !*options)
return 1;
-#if 0
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt)
- continue;
- if (!strncmp(this_opt, "disable", 7))
- mc68x328fb_enable = 0;
- }
-#endif
return 1;
}
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 7cacae5a8797..a3af49529173 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -604,7 +604,7 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-static struct fb_ops acornfb_ops = {
+static const struct fb_ops acornfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = acornfb_check_var,
.fb_set_par = acornfb_set_par,
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 7de43be6ef2c..c3d55fc6c4e0 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -423,7 +423,7 @@ static int clcdfb_mmap(struct fb_info *info,
return ret;
}
-static struct fb_ops clcdfb_ops = {
+static const struct fb_ops clcdfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = clcdfb_check_var,
.fb_set_par = clcdfb_set_par,
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 91ddc9602014..20e03e00b66d 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -3493,7 +3493,7 @@ static irqreturn_t amifb_interrupt(int irq, void *dev_id)
}
-static struct fb_ops amifb_ops = {
+static const struct fb_ops amifb_ops = {
.owner = THIS_MODULE,
.fb_check_var = amifb_check_var,
.fb_set_par = amifb_set_par,
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index a48741aab240..314ab82e01c0 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -491,7 +491,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
return err;
}
-static struct fb_ops arcfb_ops = {
+static const struct fb_ops arcfb_ops = {
.owner = THIS_MODULE,
.fb_open = arcfb_open,
.fb_read = fb_sys_read,
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index f940e8b66b85..11ab9a153860 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -917,7 +917,7 @@ static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info
/* Frame buffer operations */
-static struct fb_ops arkfb_ops = {
+static const struct fb_ops arkfb_ops = {
.owner = THIS_MODULE,
.fb_open = arkfb_open,
.fb_release = arkfb_release,
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index ea31054a28ca..3e006da47752 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -95,7 +95,7 @@ static int asiliantfb_set_par(struct fb_info *info);
static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info);
-static struct fb_ops asiliantfb_ops = {
+static const struct fb_ops asiliantfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = asiliantfb_check_var,
.fb_set_par = asiliantfb_set_par,
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 5ff8e0320d95..d567f5d56c13 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -824,7 +824,7 @@ static int atmel_lcdfb_blank(int blank_mode, struct fb_info *info)
return ((blank_mode == FB_BLANK_NORMAL) ? 1 : 0);
}
-static struct fb_ops atmel_lcdfb_ops = {
+static const struct fb_ops atmel_lcdfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = atmel_lcdfb_check_var,
.fb_set_par = atmel_lcdfb_set_par,
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index fc1e45d44719..d7e41c8dd533 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -509,7 +509,7 @@ static void aty128_bl_set_power(struct fb_info *info, int power);
(readb(bios + (v) + 3) << 24))
-static struct fb_ops aty128fb_ops = {
+static const struct fb_ops aty128fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = aty128fb_check_var,
.fb_set_par = aty128fb_set_par,
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index e5a347c58180..a7833bc98225 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -341,7 +341,7 @@ extern const u8 aty_postdividers[8];
* Hardware cursor support
*/
-extern int aty_init_cursor(struct fb_info *info);
+extern int aty_init_cursor(struct fb_info *info, struct fb_ops *atyfb_ops);
/*
* Hardware acceleration
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 79d548746efd..175d2598f28e 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -1326,10 +1326,10 @@ static int atyfb_set_par(struct fb_info *info)
par->accel_flags = var->accel_flags; /* hack */
if (var->accel_flags) {
- info->fbops->fb_sync = atyfb_sync;
+ atyfb_ops.fb_sync = atyfb_sync;
info->flags &= ~FBINFO_HWACCEL_DISABLED;
} else {
- info->fbops->fb_sync = NULL;
+ atyfb_ops.fb_sync = NULL;
info->flags |= FBINFO_HWACCEL_DISABLED;
}
@@ -2712,7 +2712,7 @@ static int aty_init(struct fb_info *info)
#ifdef CONFIG_FB_ATY_CT
if (!noaccel && M64_HAS(INTEGRATED))
- aty_init_cursor(info);
+ aty_init_cursor(info, &atyfb_ops);
#endif /* CONFIG_FB_ATY_CT */
info->var = var;
diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
index 4cde25eab8e8..b06fa6e42e6e 100644
--- a/drivers/video/fbdev/aty/mach64_cursor.c
+++ b/drivers/video/fbdev/aty/mach64_cursor.c
@@ -194,7 +194,7 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
return 0;
}
-int aty_init_cursor(struct fb_info *info)
+int aty_init_cursor(struct fb_info *info, struct fb_ops *atyfb_ops)
{
unsigned long addr;
@@ -219,7 +219,7 @@ int aty_init_cursor(struct fb_info *info)
info->sprite.buf_align = 16; /* and 64 lines tall. */
info->sprite.flags = FB_PIXMAP_IO;
- info->fbops->fb_cursor = atyfb_cursor;
+ atyfb_ops->fb_cursor = atyfb_cursor;
return 0;
}
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 4ca07866f2f6..3af00e3b965e 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1965,7 +1965,7 @@ static int radeonfb_set_par(struct fb_info *info)
}
-static struct fb_ops radeonfb_ops = {
+static const struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = radeonfb_check_var,
.fb_set_par = radeonfb_set_par,
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 99941ae1f3a1..37a6512feda0 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -348,7 +348,7 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
fbdev->fb_len);
}
-static struct fb_ops au1100fb_ops =
+static const struct fb_ops au1100fb_ops =
{
.owner = THIS_MODULE,
.fb_setcolreg = au1100fb_fb_setcolreg,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 265d3b45efd0..c00e01a17368 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1483,7 +1483,7 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
}
-static struct fb_ops au1200fb_fb_ops = {
+static const struct fb_ops au1200fb_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = au1200fb_fb_check_var,
.fb_set_par = au1200fb_fb_set_par,
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index d6ba348deb9f..fd66f4d4a621 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -1048,7 +1048,7 @@ static ssize_t broadsheetfb_write(struct fb_info *info, const char __user *buf,
return (err) ? err : count;
}
-static struct fb_ops broadsheetfb_ops = {
+static const struct fb_ops broadsheetfb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = broadsheetfb_write,
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index 436f10f3d375..0d9a6bb57a09 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -37,7 +37,7 @@ static int bw2_ioctl(struct fb_info *, unsigned int, unsigned long);
* Frame buffer operations
*/
-static struct fb_ops bw2_ops = {
+static const struct fb_ops bw2_ops = {
.owner = THIS_MODULE,
.fb_blank = bw2_blank,
.fb_fillrect = cfb_fillrect,
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index 9f3be0258623..3a1c2e0739a1 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -527,7 +527,7 @@ static int init_hardware(struct carmine_hw *hw)
return 0;
}
-static struct fb_ops carminefb_ops = {
+static const struct fb_ops carminefb_ops = {
.owner = THIS_MODULE,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -633,7 +633,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
ret = -EBUSY;
goto err_free_hw;
}
- hw->v_regs = ioremap_nocache(carminefb_fix.mmio_start,
+ hw->v_regs = ioremap(carminefb_fix.mmio_start,
carminefb_fix.mmio_len);
if (!hw->v_regs) {
printk(KERN_ERR "carminefb: Can't remap %s register.\n",
@@ -664,7 +664,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_unmap_vregs;
}
- hw->screen_mem = ioremap_nocache(carminefb_fix.smem_start,
+ hw->screen_mem = ioremap(carminefb_fix.smem_start,
carminefb_fix.smem_len);
if (!hw->screen_mem) {
printk(KERN_ERR "carmine: Can't ioremap smem area.\n");
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index d80d99db3a46..a620b51cf7d0 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -39,7 +39,7 @@ static int cg14_pan_display(struct fb_var_screeninfo *, struct fb_info *);
* Frame buffer operations
*/
-static struct fb_ops cg14_ops = {
+static const struct fb_ops cg14_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = cg14_setcolreg,
.fb_pan_display = cg14_pan_display,
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 09f616dddfd7..77f6470ce665 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -39,7 +39,7 @@ static int cg3_ioctl(struct fb_info *, unsigned int, unsigned long);
* Frame buffer operations
*/
-static struct fb_ops cg3_ops = {
+static const struct fb_ops cg3_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = cg3_setcolreg,
.fb_blank = cg3_blank,
diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c
index d5888aecc2fb..a1c68cd48d7e 100644
--- a/drivers/video/fbdev/cg6.c
+++ b/drivers/video/fbdev/cg6.c
@@ -44,7 +44,7 @@ static int cg6_pan_display(struct fb_var_screeninfo *, struct fb_info *);
* Frame buffer operations
*/
-static struct fb_ops cg6_ops = {
+static const struct fb_ops cg6_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = cg6_setcolreg,
.fb_blank = cg6_blank,
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index f4dc320dcafe..998067b701fa 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -79,7 +79,7 @@ static int chipsfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info);
static int chipsfb_blank(int blank, struct fb_info *info);
-static struct fb_ops chipsfb_ops = {
+static const struct fb_ops chipsfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = chipsfb_check_var,
.fb_set_par = chipsfb_set_par,
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index e4ce5667b125..c3a3e344cee3 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1956,7 +1956,7 @@ static void cirrusfb_zorro_unmap(struct fb_info *info)
#endif /* CONFIG_ZORRO */
/* function table of the above functions */
-static struct fb_ops cirrusfb_ops = {
+static const struct fb_ops cirrusfb_ops = {
.owner = THIS_MODULE,
.fb_open = cirrusfb_open,
.fb_release = cirrusfb_release,
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index cabbc721f894..c5d15c6db287 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -153,7 +153,7 @@ static int clps711x_fb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops clps711x_fb_ops = {
+static const struct fb_ops clps711x_fb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = clps711x_fb_setcolreg,
.fb_check_var = clps711x_fb_check_var,
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 544946901e8b..5f8b6324d2e8 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -269,7 +269,7 @@ static int cobalt_lcdfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
return 0;
}
-static struct fb_ops cobalt_lcd_fbops = {
+static const struct fb_ops cobalt_lcd_fbops = {
.owner = THIS_MODULE,
.fb_read = cobalt_lcdfb_read,
.fb_write = cobalt_lcdfb_write,
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 9a680ef3ffc3..38b61cdb5ca4 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -157,7 +157,7 @@ static int default_vmode __initdata = VMODE_NVRAM;
static int default_cmode __initdata = CMODE_NVRAM;
-static struct fb_ops controlfb_ops = {
+static const struct fb_ops controlfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = controlfb_check_var,
.fb_set_par = controlfb_set_par,
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 82c20c6047b0..a591d291b231 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -171,7 +171,6 @@ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma->vm_private_data = info;
return 0;
}
-EXPORT_SYMBOL(fb_deferred_io_mmap);
/* workqueue callback */
static void fb_deferred_io_work(struct work_struct *work)
@@ -206,7 +205,6 @@ void fb_deferred_io_init(struct fb_info *info)
BUG_ON(!fbdefio);
mutex_init(&fbdefio->lock);
- info->fbops->fb_mmap = fb_deferred_io_mmap;
INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
INIT_LIST_HEAD(&fbdefio->pagelist);
if (fbdefio->delay == 0) /* set a default of 1 s */
@@ -237,7 +235,6 @@ void fb_deferred_io_cleanup(struct fb_info *info)
page->mapping = NULL;
}
- info->fbops->fb_mmap = NULL;
mutex_destroy(&fbdefio->lock);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index c9235a2f42f8..bb6ae995c2e5 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -536,6 +536,13 @@ static int __init fb_console_setup(char *this_opt)
fb_center_logo = true;
continue;
}
+
+ if (!strncmp(options, "logo-count:", 11)) {
+ options += 11;
+ if (*options)
+ fb_logo_count = simple_strtol(options, &options, 0);
+ continue;
+ }
}
return 1;
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 6f6fc785b545..d04554959ea7 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -54,7 +54,8 @@ int num_registered_fb __read_mostly;
EXPORT_SYMBOL(num_registered_fb);
bool fb_center_logo __read_mostly;
-EXPORT_SYMBOL(fb_center_logo);
+
+int fb_logo_count __read_mostly = -1;
static struct fb_info *get_fb_info(unsigned int idx)
{
@@ -620,7 +621,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
memset(&fb_logo, 0, sizeof(struct logo_data));
if (info->flags & FBINFO_MISC_TILEBLITTING ||
- info->fbops->owner)
+ info->fbops->owner || !fb_logo_count)
return 0;
if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -686,10 +687,14 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
int fb_show_logo(struct fb_info *info, int rotate)
{
+ unsigned int count;
int y;
- y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
- num_online_cpus());
+ if (!fb_logo_count)
+ return 0;
+
+ count = fb_logo_count < 0 ? num_online_cpus() : fb_logo_count;
+ y = fb_show_logo_line(info, rotate, fb_logo.logo, 0, count);
y = fb_show_extra_logos(info, y, rotate);
return y;
@@ -1079,7 +1084,7 @@ EXPORT_SYMBOL(fb_blank);
static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
- struct fb_ops *fb;
+ const struct fb_ops *fb;
struct fb_var_screeninfo var;
struct fb_fix_screeninfo fix;
struct fb_cmap cmap_from;
@@ -1292,7 +1297,7 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fb_info *info = file_fb_info(file);
- struct fb_ops *fb;
+ const struct fb_ops *fb;
long ret = -ENOIOCTLCMD;
if (!info)
@@ -1332,16 +1337,23 @@ static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
{
struct fb_info *info = file_fb_info(file);
- struct fb_ops *fb;
+ int (*fb_mmap_fn)(struct fb_info *info, struct vm_area_struct *vma);
unsigned long mmio_pgoff;
unsigned long start;
u32 len;
if (!info)
return -ENODEV;
- fb = info->fbops;
mutex_lock(&info->mm_lock);
- if (fb->fb_mmap) {
+
+ fb_mmap_fn = info->fbops->fb_mmap;
+
+#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
+ if (info->fbdefio)
+ fb_mmap_fn = fb_deferred_io_mmap;
+#endif
+
+ if (fb_mmap_fn) {
int res;
/*
@@ -1349,7 +1361,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
* SME protection is removed ahead of the call
*/
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
- res = fb->fb_mmap(info, vma);
+ res = fb_mmap_fn(info, vma);
mutex_unlock(&info->mm_lock);
return res;
}
@@ -1673,7 +1685,7 @@ static void unbind_console(struct fb_info *fb_info)
console_unlock();
}
-void unlink_framebuffer(struct fb_info *fb_info)
+static void unlink_framebuffer(struct fb_info *fb_info)
{
int i;
@@ -1692,7 +1704,6 @@ void unlink_framebuffer(struct fb_info *fb_info)
fb_info->dev = NULL;
}
-EXPORT_SYMBOL(unlink_framebuffer);
static void do_unregister_framebuffer(struct fb_info *fb_info)
{
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 3a2d9ff0aa42..460826a7ad55 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1060,7 +1060,7 @@ static int cyber2000fb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops cyber2000fb_ops = {
+static const struct fb_ops cyber2000fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = cyber2000fb_check_var,
.fb_set_par = cyber2000fb_set_par,
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 2d3dcc52fcf3..73c3c4c8cc12 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1294,7 +1294,7 @@ static int da8xxfb_set_par(struct fb_info *info)
return 0;
}
-static struct fb_ops da8xx_fb_ops = {
+static const struct fb_ops da8xx_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = fb_check_var,
.fb_set_par = da8xxfb_set_par,
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 8da517eaa4a3..3688f9165848 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -108,7 +108,7 @@
static int dnfb_blank(int blank, struct fb_info *info);
static void dnfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
-static struct fb_ops dn_fb_ops = {
+static const struct fb_ops dn_fb_ops = {
.owner = THIS_MODULE,
.fb_blank = dnfb_blank,
.fb_fillrect = cfb_fillrect,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 1caa3726cb45..65491ae74808 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -255,7 +255,7 @@ static void efifb_destroy(struct fb_info *info)
fb_dealloc_cmap(&info->cmap);
}
-static struct fb_ops efifb_ops = {
+static const struct fb_ops efifb_ops = {
.owner = THIS_MODULE,
.fb_destroy = efifb_destroy,
.fb_setcolreg = efifb_setcolreg,
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index d04a047094fc..cda2ef337423 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -402,7 +402,7 @@ static int ep93xxfb_setcolreg(unsigned int regno, unsigned int red,
return 0;
}
-static struct fb_ops ep93xxfb_ops = {
+static const struct fb_ops ep93xxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = ep93xxfb_check_var,
.fb_set_par = ep93xxfb_set_par,
diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c
index fa62c4dff7d1..75df6aabac21 100644
--- a/drivers/video/fbdev/fb-puv3.c
+++ b/drivers/video/fbdev/fb-puv3.c
@@ -644,7 +644,7 @@ int unifb_mmap(struct fb_info *info,
return vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
}
-static struct fb_ops unifb_ops = {
+static const struct fb_ops unifb_ops = {
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
.fb_check_var = unifb_check_var,
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index cd2d1db239a2..948b73184433 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -44,7 +44,7 @@ static int ffb_pan_display(struct fb_var_screeninfo *, struct fb_info *);
* Frame buffer operations
*/
-static struct fb_ops ffb_ops = {
+static const struct fb_ops ffb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = ffb_setcolreg,
.fb_blank = ffb_blank,
diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c
index ac7a4ebfd390..3b727d528fde 100644
--- a/drivers/video/fbdev/fm2fb.c
+++ b/drivers/video/fbdev/fm2fb.c
@@ -165,7 +165,7 @@ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info);
static int fm2fb_blank(int blank, struct fb_info *info);
-static struct fb_ops fm2fb_ops = {
+static const struct fb_ops fm2fb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = fm2fb_setcolreg,
.fb_blank = fm2fb_blank,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index d19f58263b4e..67ebfe5c9f1d 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1287,6 +1287,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
dev_warn(info->dev,
"MFB_SET_PIXFMT value of 0x%08x is deprecated.\n",
MFB_SET_PIXFMT_OLD);
+ /* fall through */
case MFB_SET_PIXFMT:
if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
return -EFAULT;
@@ -1296,6 +1297,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
dev_warn(info->dev,
"MFB_GET_PIXFMT value of 0x%08x is deprecated.\n",
MFB_GET_PIXFMT_OLD);
+ /* fall through */
case MFB_GET_PIXFMT:
pix_fmt = ad->pix_fmt;
if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt)))
@@ -1448,7 +1450,7 @@ static int fsl_diu_release(struct fb_info *info, int user)
return res;
}
-static struct fb_ops fsl_diu_ops = {
+static const struct fb_ops fsl_diu_ops = {
.owner = THIS_MODULE,
.fb_check_var = fsl_diu_check_var,
.fb_set_par = fsl_diu_set_par,
diff --git a/drivers/video/fbdev/g364fb.c b/drivers/video/fbdev/g364fb.c
index 223896cc5f7d..845b79da2a7c 100644
--- a/drivers/video/fbdev/g364fb.c
+++ b/drivers/video/fbdev/g364fb.c
@@ -111,7 +111,7 @@ static int g364fb_setcolreg(u_int regno, u_int red, u_int green,
static int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor);
static int g364fb_blank(int blank, struct fb_info *info);
-static struct fb_ops g364fb_ops = {
+static const struct fb_ops g364fb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = g364fb_setcolreg,
.fb_pan_display = g364fb_pan_display,
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index b9f6a82a0495..31270a8986e8 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1044,7 +1044,7 @@ static int gbefb_mmap(struct fb_info *info,
return 0;
}
-static struct fb_ops gbefb_ops = {
+static const struct fb_ops gbefb_ops = {
.owner = THIS_MODULE,
.fb_check_var = gbefb_check_var,
.fb_set_par = gbefb_set_par,
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 737e472fac14..5d34d89fb665 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -252,7 +252,7 @@ static int parse_panel_option(struct fb_info *info)
return 0;
}
-static struct fb_ops gx1fb_ops = {
+static const struct fb_ops gx1fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = gx1fb_check_var,
.fb_set_par = gx1fb_set_par,
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index 435ce2aa4240..d38a148d4746 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -265,7 +265,7 @@ static int gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
return 0;
}
-static struct fb_ops gxfb_ops = {
+static const struct fb_ops gxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = gxfb_check_var,
.fb_set_par = gxfb_set_par,
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index b0f07d676eb3..adc2d9c2395e 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -386,7 +386,7 @@ static int lxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
return 0;
}
-static struct fb_ops lxfb_ops = {
+static const struct fb_ops lxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = lxfb_check_var,
.fb_set_par = lxfb_set_par,
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index f60ac276703d..9c83ec3f8e1f 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -160,7 +160,7 @@ static int goldfish_fb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops goldfish_fb_ops = {
+static const struct fb_ops goldfish_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = goldfish_fb_check_var,
.fb_set_par = goldfish_fb_set_par,
diff --git a/drivers/video/fbdev/grvga.c b/drivers/video/fbdev/grvga.c
index d22e8b0c906d..07dda03e0957 100644
--- a/drivers/video/fbdev/grvga.c
+++ b/drivers/video/fbdev/grvga.c
@@ -251,7 +251,7 @@ static int grvga_pan_display(struct fb_var_screeninfo *var,
return 0;
}
-static struct fb_ops grvga_ops = {
+static const struct fb_ops grvga_ops = {
.owner = THIS_MODULE,
.fb_check_var = grvga_check_var,
.fb_set_par = grvga_set_par,
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index c7502fd8f447..13ded3a10708 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -599,7 +599,7 @@ static const struct fb_fix_screeninfo gxt4500_fix = {
.mmio_len = 0x20000,
};
-static struct fb_ops gxt4500_ops = {
+static const struct fb_ops gxt4500_ops = {
.owner = THIS_MODULE,
.fb_check_var = gxt4500_check_var,
.fb_set_par = gxt4500_set_par,
diff --git a/drivers/video/fbdev/hecubafb.c b/drivers/video/fbdev/hecubafb.c
index 8577195cb533..00d77105161a 100644
--- a/drivers/video/fbdev/hecubafb.c
+++ b/drivers/video/fbdev/hecubafb.c
@@ -197,7 +197,7 @@ static ssize_t hecubafb_write(struct fb_info *info, const char __user *buf,
return (err) ? err : count;
}
-static struct fb_ops hecubafb_ops = {
+static const struct fb_ops hecubafb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = hecubafb_write,
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 59e1cae57948..a45fcff1461f 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -523,7 +523,7 @@ static void hgafb_imageblit(struct fb_info *info, const struct fb_image *image)
}
}
-static struct fb_ops hgafb_ops = {
+static const struct fb_ops hgafb_ops = {
.owner = THIS_MODULE,
.fb_open = hgafb_open,
.fb_release = hgafb_release,
diff --git a/drivers/video/fbdev/hitfb.c b/drivers/video/fbdev/hitfb.c
index abe3e54d4506..009e5d2aa100 100644
--- a/drivers/video/fbdev/hitfb.c
+++ b/drivers/video/fbdev/hitfb.c
@@ -311,7 +311,7 @@ static int hitfb_set_par(struct fb_info *info)
return 0;
}
-static struct fb_ops hitfb_ops = {
+static const struct fb_ops hitfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = hitfb_check_var,
.fb_set_par = hitfb_set_par,
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index a79af8f069d1..f02be0db335e 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -184,7 +184,7 @@ static int hpfb_sync(struct fb_info *info)
return 0;
}
-static struct fb_ops hpfb_ops = {
+static const struct fb_ops hpfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = hpfb_setcolreg,
.fb_blank = hpfb_blank,
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 4cd27e5172a1..afe9fd751cd5 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -895,7 +895,7 @@ static void hvfb_cfb_imageblit(struct fb_info *p,
image->width, image->height);
}
-static struct fb_ops hvfb_ops = {
+static const struct fb_ops hvfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = hvfb_check_var,
.fb_set_par = hvfb_set_par,
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 347cf8babc3e..c65ec7386e87 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -981,7 +981,7 @@ static int i740fb_blank(int blank_mode, struct fb_info *info)
return (blank_mode == FB_BLANK_NORMAL) ? 1 : 0;
}
-static struct fb_ops i740fb_ops = {
+static const struct fb_ops i740fb_ops = {
.owner = THIS_MODULE,
.fb_open = i740fb_open,
.fb_release = i740fb_release,
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index d18f7b31932c..aa7583d963ac 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -1883,7 +1883,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
}
par->res_flags |= MMIO_REQ;
- par->mmio_start_virtual = ioremap_nocache(par->mmio_start_phys,
+ par->mmio_start_virtual = ioremap(par->mmio_start_phys,
MMIO_SIZE);
if (!par->mmio_start_virtual) {
printk("i810fb_init: cannot remap mmio region\n");
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 58b01c7d9056..3ac053b88495 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1333,7 +1333,7 @@ static struct pci_driver imsttfb_pci_driver = {
.remove = imsttfb_remove,
};
-static struct fb_ops imsttfb_ops = {
+static const struct fb_ops imsttfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = imsttfb_check_var,
.fb_set_par = imsttfb_set_par,
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index b3286d1fa543..08a17eb2a5c7 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -566,7 +566,7 @@ static int imxfb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops imxfb_ops = {
+static const struct fb_ops imxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = imxfb_check_var,
.fb_set_par = imxfb_set_par,
diff --git a/drivers/video/fbdev/intelfb/intelfb.h b/drivers/video/fbdev/intelfb/intelfb.h
index b54db05f028d..5de703902a21 100644
--- a/drivers/video/fbdev/intelfb/intelfb.h
+++ b/drivers/video/fbdev/intelfb/intelfb.h
@@ -273,7 +273,7 @@ struct intelfb_vsync {
struct intelfb_info {
struct fb_info *info;
- struct fb_ops *fbops;
+ const struct fb_ops *fbops;
struct pci_dev *pdev;
struct intelfb_hwstate save_state;
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index a76c61512c60..a9579964eaba 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -193,7 +193,7 @@ static const struct pci_device_id intelfb_pci_table[] = {
static int num_registered = 0;
/* fb ops */
-static struct fb_ops intel_fb_ops = {
+static const struct fb_ops intel_fb_ops = {
.owner = THIS_MODULE,
.fb_open = intelfb_open,
.fb_release = intelfb_release,
@@ -654,7 +654,7 @@ static int intelfb_pci_register(struct pci_dev *pdev,
}
dinfo->mmio_base =
- (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
+ (u8 __iomem *)ioremap(dinfo->mmio_base_phys,
INTEL_REG_SIZE);
if (!dinfo->mmio_base) {
ERR_MSG("Cannot remap MMIO region.\n");
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index a7bd9f25911b..8fbde92ae8b9 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -648,7 +648,7 @@ static struct pci_driver kyrofb_pci_driver = {
.remove = kyrofb_remove,
};
-static struct fb_ops kyrofb_ops = {
+static const struct fb_ops kyrofb_ops = {
.owner = THIS_MODULE,
.fb_check_var = kyrofb_check_var,
.fb_set_par = kyrofb_set_par,
@@ -683,7 +683,7 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
kyro_fix.mmio_len = pci_resource_len(pdev, 1);
currentpar->regbase = deviceInfo.pSTGReg =
- ioremap_nocache(kyro_fix.mmio_start, kyro_fix.mmio_len);
+ ioremap(kyro_fix.mmio_start, kyro_fix.mmio_len);
if (!currentpar->regbase)
goto out_free_fb;
diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c
index 5b1141ac182b..40b11cce0ad6 100644
--- a/drivers/video/fbdev/leo.c
+++ b/drivers/video/fbdev/leo.c
@@ -39,7 +39,7 @@ static int leo_pan_display(struct fb_var_screeninfo *, struct fb_info *);
* Frame buffer operations
*/
-static struct fb_ops leo_ops = {
+static const struct fb_ops leo_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = leo_setcolreg,
.fb_blank = leo_blank,
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index 9a6feee96133..e05a97662ca8 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -496,7 +496,7 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
-static struct fb_ops macfb_ops = {
+static const struct fb_ops macfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = macfb_setcolreg,
.fb_fillrect = cfb_fillrect,
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 1a555f70923a..36cc718b96ae 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1710,7 +1710,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
memsize = mem;
err = -ENOMEM;
- minfo->mmio.vbase.vaddr = ioremap_nocache(ctrlptr_phys, 16384);
+ minfo->mmio.vbase.vaddr = ioremap(ctrlptr_phys, 16384);
if (!minfo->mmio.vbase.vaddr) {
printk(KERN_ERR "matroxfb: cannot ioremap(%lX, 16384), matroxfb disabled\n", ctrlptr_phys);
goto failVideoMR;
diff --git a/drivers/video/fbdev/matrox/matroxfb_crtc2.c b/drivers/video/fbdev/matrox/matroxfb_crtc2.c
index d2a81a2c3ac0..7655afa3fd50 100644
--- a/drivers/video/fbdev/matrox/matroxfb_crtc2.c
+++ b/drivers/video/fbdev/matrox/matroxfb_crtc2.c
@@ -563,7 +563,7 @@ static int matroxfb_dh_blank(int blank, struct fb_info* info) {
#undef m2info
}
-static struct fb_ops matroxfb_dh_ops = {
+static const struct fb_ops matroxfb_dh_ops = {
.owner = THIS_MODULE,
.fb_open = matroxfb_dh_open,
.fb_release = matroxfb_dh_release,
diff --git a/drivers/video/fbdev/matrox/matroxfb_misc.c b/drivers/video/fbdev/matrox/matroxfb_misc.c
index c7aaca12805e..8f159a2ad8d0 100644
--- a/drivers/video/fbdev/matrox/matroxfb_misc.c
+++ b/drivers/video/fbdev/matrox/matroxfb_misc.c
@@ -673,7 +673,10 @@ static int parse_pins5(struct matrox_fb_info *minfo,
if (bd->pins[115] & 4) {
minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst;
} else {
- u_int32_t wtst_xlat[] = { 0, 1, 5, 6, 7, 5, 2, 3 };
+ static const u8 wtst_xlat[] = {
+ 0, 1, 5, 6, 7, 5, 2, 3
+ };
+
minfo->values.reg.mctlwtst_core = (minfo->values.reg.mctlwtst & ~7) |
wtst_xlat[minfo->values.reg.mctlwtst & 7];
}
diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c
index 5bb1b5c308a7..ae1a42bcb0ea 100644
--- a/drivers/video/fbdev/maxinefb.c
+++ b/drivers/video/fbdev/maxinefb.c
@@ -105,7 +105,7 @@ static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
-static struct fb_ops maxinefb_ops = {
+static const struct fb_ops maxinefb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = maxinefb_setcolreg,
.fb_fillrect = cfb_fillrect,
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb.h b/drivers/video/fbdev/mb862xx/mb862xxfb.h
index 50bc9b584ca1..52a77ea4e849 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb.h
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb.h
@@ -89,7 +89,7 @@ struct mb862xxfb_par {
u32 pseudo_palette[16];
};
-extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
+extern void mb862xxfb_init_accel(struct fb_info *info, struct fb_ops *fbops, int xres);
#ifdef CONFIG_FB_MB862XX_I2C
extern int mb862xx_i2c_init(struct mb862xxfb_par *par);
extern void mb862xx_i2c_exit(struct mb862xxfb_par *par);
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
index f58ff900e82a..42569264801f 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
@@ -303,19 +303,19 @@ static void mb86290fb_fillrect(struct fb_info *info,
mb862xxfb_write_fifo(7, cmd, info);
}
-void mb862xxfb_init_accel(struct fb_info *info, int xres)
+void mb862xxfb_init_accel(struct fb_info *info, struct fb_ops *fbops, int xres)
{
struct mb862xxfb_par *par = info->par;
if (info->var.bits_per_pixel == 32) {
- info->fbops->fb_fillrect = cfb_fillrect;
- info->fbops->fb_copyarea = cfb_copyarea;
- info->fbops->fb_imageblit = cfb_imageblit;
+ fbops->fb_fillrect = cfb_fillrect;
+ fbops->fb_copyarea = cfb_copyarea;
+ fbops->fb_imageblit = cfb_imageblit;
} else {
outreg(disp, GC_L0EM, 3);
- info->fbops->fb_fillrect = mb86290fb_fillrect;
- info->fbops->fb_copyarea = mb86290fb_copyarea;
- info->fbops->fb_imageblit = mb86290fb_imageblit;
+ fbops->fb_fillrect = mb86290fb_fillrect;
+ fbops->fb_copyarea = mb86290fb_copyarea;
+ fbops->fb_imageblit = mb86290fb_imageblit;
}
outreg(draw, GDC_REG_DRAW_BASE, 0);
outreg(draw, GDC_REG_MODE_MISC, 0x8000);
@@ -326,6 +326,5 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
FBINFO_HWACCEL_IMAGEBLIT;
info->fix.accel = 0xff; /*FIXME: add right define */
}
-EXPORT_SYMBOL(mb862xxfb_init_accel);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 962c0171d271..52755b591c14 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -194,6 +194,8 @@ static int mb862xxfb_check_var(struct fb_var_screeninfo *var,
return 0;
}
+static struct fb_ops mb862xxfb_ops;
+
/*
* set display parameters
*/
@@ -204,7 +206,7 @@ static int mb862xxfb_set_par(struct fb_info *fbi)
dev_dbg(par->dev, "%s\n", __func__);
if (par->type == BT_CORALP)
- mb862xxfb_init_accel(fbi, fbi->var.xres);
+ mb862xxfb_init_accel(fbi, &mb862xxfb_ops, fbi->var.xres);
if (par->pre_init)
return 0;
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index 50935252b50b..6dc287c819cb 100644
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
@@ -671,7 +671,7 @@ static int mbxfb_ioctl(struct fb_info *info, unsigned int cmd,
return -EINVAL;
}
-static struct fb_ops mbxfb_ops = {
+static const struct fb_ops mbxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = mbxfb_check_var,
.fb_set_par = mbxfb_set_par,
@@ -938,7 +938,7 @@ static int mbxfb_probe(struct platform_device *dev)
}
mfbi->reg_phys_addr = mfbi->reg_res->start;
- mfbi->reg_virt_addr = devm_ioremap_nocache(&dev->dev,
+ mfbi->reg_virt_addr = devm_ioremap(&dev->dev,
mfbi->reg_phys_addr,
res_size(mfbi->reg_req));
if (!mfbi->reg_virt_addr) {
@@ -948,7 +948,7 @@ static int mbxfb_probe(struct platform_device *dev)
}
virt_base_2700 = mfbi->reg_virt_addr;
- mfbi->fb_virt_addr = devm_ioremap_nocache(&dev->dev, mfbi->fb_phys_addr,
+ mfbi->fb_virt_addr = devm_ioremap(&dev->dev, mfbi->fb_phys_addr,
res_size(mfbi->fb_req));
if (!mfbi->fb_virt_addr) {
dev_err(&dev->dev, "failed to ioremap frame buffer\n");
diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c
index bb4fee52e501..a42e2eceee48 100644
--- a/drivers/video/fbdev/metronomefb.c
+++ b/drivers/video/fbdev/metronomefb.c
@@ -558,7 +558,7 @@ static ssize_t metronomefb_write(struct fb_info *info, const char __user *buf,
return (err) ? err : count;
}
-static struct fb_ops metronomefb_ops = {
+static const struct fb_ops metronomefb_ops = {
.owner = THIS_MODULE,
.fb_write = metronomefb_write,
.fb_fillrect = metronomefb_fillrect,
diff --git a/drivers/video/fbdev/mmp/Kconfig b/drivers/video/fbdev/mmp/Kconfig
index 9041ffd2cfcf..5c6cc97c96f0 100644
--- a/drivers/video/fbdev/mmp/Kconfig
+++ b/drivers/video/fbdev/mmp/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig MMP_DISP
tristate "Marvell MMP Display Subsystem support"
- depends on CPU_PXA910 || CPU_MMP2
+ depends on CPU_PXA910 || CPU_MMP2 || COMPILE_TEST
help
Marvell Display Subsystem support.
diff --git a/drivers/video/fbdev/mmp/fb/Kconfig b/drivers/video/fbdev/mmp/fb/Kconfig
index 39944eb23ef8..0ec2e3fb9e17 100644
--- a/drivers/video/fbdev/mmp/fb/Kconfig
+++ b/drivers/video/fbdev/mmp/fb/Kconfig
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-if MMP_DISP
-
config MMP_FB
tristate "fb driver for Marvell MMP Display Subsystem"
depends on FB
@@ -10,5 +8,3 @@ config MMP_FB
default y
help
fb driver for Marvell MMP Display Subsystem
-
-endif
diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c
index 47bc7c59bbd8..01c75c031cb6 100644
--- a/drivers/video/fbdev/mmp/fb/mmpfb.c
+++ b/drivers/video/fbdev/mmp/fb/mmpfb.c
@@ -454,7 +454,7 @@ static int mmpfb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops mmpfb_ops = {
+static const struct fb_ops mmpfb_ops = {
.owner = THIS_MODULE,
.fb_blank = mmpfb_blank,
.fb_check_var = mmpfb_check_var,
@@ -522,7 +522,7 @@ static int fb_info_setup(struct fb_info *info,
info->var.bits_per_pixel / 8;
info->fbops = &mmpfb_ops;
info->pseudo_palette = fbi->pseudo_palette;
- info->screen_base = fbi->fb_start;
+ info->screen_buffer = fbi->fb_start;
info->screen_size = fbi->fb_size;
/* For FB framework: Allocate color map and Register framebuffer*/
diff --git a/drivers/video/fbdev/mmp/hw/Kconfig b/drivers/video/fbdev/mmp/hw/Kconfig
index 4d018cf661ec..7ebe125093d5 100644
--- a/drivers/video/fbdev/mmp/hw/Kconfig
+++ b/drivers/video/fbdev/mmp/hw/Kconfig
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-if MMP_DISP
-
config MMP_DISP_CONTROLLER
bool "mmp display controller hw support"
- depends on CPU_PXA910 || CPU_MMP2
+ depends on HAVE_CLK && HAS_IOMEM
+ depends on CPU_PXA910 || CPU_MMP2 || COMPILE_TEST
help
Marvell MMP display hw controller support
this controller is used on Marvell PXA910 and
@@ -16,5 +15,3 @@ config MMP_DISP_SPI
help
Marvell MMP display hw controller spi port support
will register as a spi master for panel usage
-
-endif
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 17174cd7a5bb..061a105afb86 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -136,19 +136,26 @@ static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
mutex_lock(&overlay->access_ok);
if (overlay_is_vid(overlay)) {
- writel_relaxed(win->pitch[0], &regs->v_pitch_yc);
- writel_relaxed(win->pitch[2] << 16 |
- win->pitch[1], &regs->v_pitch_uv);
-
- writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->v_size);
- writel_relaxed((win->ydst << 16) | win->xdst, &regs->v_size_z);
- writel_relaxed(win->ypos << 16 | win->xpos, &regs->v_start);
+ writel_relaxed(win->pitch[0],
+ (void __iomem *)&regs->v_pitch_yc);
+ writel_relaxed(win->pitch[2] << 16 | win->pitch[1],
+ (void __iomem *)&regs->v_pitch_uv);
+
+ writel_relaxed((win->ysrc << 16) | win->xsrc,
+ (void __iomem *)&regs->v_size);
+ writel_relaxed((win->ydst << 16) | win->xdst,
+ (void __iomem *)&regs->v_size_z);
+ writel_relaxed(win->ypos << 16 | win->xpos,
+ (void __iomem *)&regs->v_start);
} else {
- writel_relaxed(win->pitch[0], &regs->g_pitch);
-
- writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
- writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
- writel_relaxed(win->ypos << 16 | win->xpos, &regs->g_start);
+ writel_relaxed(win->pitch[0], (void __iomem *)&regs->g_pitch);
+
+ writel_relaxed((win->ysrc << 16) | win->xsrc,
+ (void __iomem *)&regs->g_size);
+ writel_relaxed((win->ydst << 16) | win->xdst,
+ (void __iomem *)&regs->g_size_z);
+ writel_relaxed(win->ypos << 16 | win->xpos,
+ (void __iomem *)&regs->g_start);
}
dmafetch_set_fmt(overlay);
@@ -233,11 +240,11 @@ static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
if (overlay_is_vid(overlay)) {
- writel_relaxed(addr->phys[0], &regs->v_y0);
- writel_relaxed(addr->phys[1], &regs->v_u0);
- writel_relaxed(addr->phys[2], &regs->v_v0);
+ writel_relaxed(addr->phys[0], (void __iomem *)&regs->v_y0);
+ writel_relaxed(addr->phys[1], (void __iomem *)&regs->v_u0);
+ writel_relaxed(addr->phys[2], (void __iomem *)&regs->v_v0);
} else
- writel_relaxed(addr->phys[0], &regs->g_0);
+ writel_relaxed(addr->phys[0], (void __iomem *)&regs->g_0);
return overlay->addr.phys[0];
}
@@ -268,16 +275,18 @@ static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
tmp |= dsi_rbswap & CFG_INTFRBSWAP_MASK;
writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id));
- writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
+ writel_relaxed((mode->yres << 16) | mode->xres,
+ (void __iomem *)&regs->screen_active);
writel_relaxed((mode->left_margin << 16) | mode->right_margin,
- &regs->screen_h_porch);
+ (void __iomem *)&regs->screen_h_porch);
writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
- &regs->screen_v_porch);
+ (void __iomem *)&regs->screen_v_porch);
total_x = mode->xres + mode->left_margin + mode->right_margin +
mode->hsync_len;
total_y = mode->yres + mode->upper_margin + mode->lower_margin +
mode->vsync_len;
- writel_relaxed((total_y << 16) | total_x, &regs->screen_size);
+ writel_relaxed((total_y << 16) | total_x,
+ (void __iomem *)&regs->screen_size);
/* vsync ctrl */
if (path->output_type == PATH_OUT_DSI)
@@ -285,7 +294,7 @@ static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
else
vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
| (mode->xres + mode->right_margin);
- writel_relaxed(vsync_ctrl, &regs->vsync_ctrl);
+ writel_relaxed(vsync_ctrl, (void __iomem *)&regs->vsync_ctrl);
/* set pixclock div */
sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
@@ -366,9 +375,9 @@ static void path_set_default(struct mmp_path *path)
writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
/* Configure default register values */
- writel_relaxed(0x00000000, &regs->blank_color);
- writel_relaxed(0x00000000, &regs->g_1);
- writel_relaxed(0x00000000, &regs->g_start);
+ writel_relaxed(0x00000000, (void __iomem *)&regs->blank_color);
+ writel_relaxed(0x00000000, (void __iomem *)&regs->g_1);
+ writel_relaxed(0x00000000, (void __iomem *)&regs->g_start);
/*
* 1.enable multiple burst request in DMA AXI
@@ -447,7 +456,6 @@ static int mmphw_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
ret = -ENOENT;
goto failed;
}
@@ -485,7 +493,7 @@ static int mmphw_probe(struct platform_device *pdev)
goto failed;
}
- ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
+ ctrl->reg_base = devm_ioremap(ctrl->dev,
res->start, resource_size(res));
if (ctrl->reg_base == NULL) {
dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index e9ec45c118fb..335d4983dc52 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
@@ -1393,7 +1393,7 @@ struct mmphw_ctrl {
/* platform related, get from config */
const char *name;
int irq;
- void *reg_base;
+ void __iomem *reg_base;
struct clk *clk;
/* sys info */
@@ -1429,7 +1429,7 @@ static inline struct mmphw_ctrl *overlay_to_ctrl(struct mmp_overlay *overlay)
return path_to_ctrl(overlay->path);
}
-static inline void *ctrl_regs(struct mmp_path *path)
+static inline void __iomem *ctrl_regs(struct mmp_path *path)
{
return path_to_ctrl(path)->reg_base;
}
@@ -1438,11 +1438,11 @@ static inline void *ctrl_regs(struct mmp_path *path)
static inline struct lcd_regs *path_regs(struct mmp_path *path)
{
if (path->id == PATH_PN)
- return (struct lcd_regs *)(ctrl_regs(path) + 0xc0);
+ return (struct lcd_regs __force *)(ctrl_regs(path) + 0xc0);
else if (path->id == PATH_TV)
- return (struct lcd_regs *)ctrl_regs(path);
+ return (struct lcd_regs __force *)ctrl_regs(path);
else if (path->id == PATH_P2)
- return (struct lcd_regs *)(ctrl_regs(path) + 0x200);
+ return (struct lcd_regs __force *)(ctrl_regs(path) + 0x200);
else {
dev_err(path->dev, "path id %d invalid\n", path->id);
BUG_ON(1);
diff --git a/drivers/video/fbdev/mmp/hw/mmp_spi.c b/drivers/video/fbdev/mmp/hw/mmp_spi.c
index bbb75de5e441..1911a47769b6 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_spi.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_spi.c
@@ -31,7 +31,7 @@ static inline int lcd_spi_write(struct spi_device *spi, u32 data)
{
int timeout = 100000, isr, ret = 0;
u32 tmp;
- void *reg_base =
+ void __iomem *reg_base = (void __iomem *)
*(void **)spi_master_get_devdata(spi->master);
/* clear ISR */
@@ -80,7 +80,7 @@ static inline int lcd_spi_write(struct spi_device *spi, u32 data)
static int lcd_spi_setup(struct spi_device *spi)
{
- void *reg_base =
+ void __iomem *reg_base = (void __iomem *)
*(void **)spi_master_get_devdata(spi->master);
u32 tmp;
@@ -146,7 +146,7 @@ int lcd_spi_register(struct mmphw_ctrl *ctrl)
return -ENOMEM;
}
p_regbase = spi_master_get_devdata(master);
- *p_regbase = ctrl->reg_base;
+ *p_regbase = (void __force *)ctrl->reg_base;
/* set bus num to 5 to avoid conflict with other spi hosts */
master->bus_num = 5;
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index bafd5f5fac5a..4af28e4421e5 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -1249,7 +1249,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
* invoked by the core framebuffer driver to perform operations like
* blitting, rectangle filling, copy regions and cursor definition.
*/
-static struct fb_ops mx3fb_ops = {
+static const struct fb_ops mx3fb_ops = {
.owner = THIS_MODULE,
.fb_set_par = mx3fb_set_par,
.fb_check_var = mx3fb_check_var,
@@ -1389,7 +1389,8 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
* mx3fb_init_fbinfo() - initialize framebuffer information object.
* @return: initialized framebuffer structure.
*/
-static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops)
+static struct fb_info *mx3fb_init_fbinfo(struct device *dev,
+ const struct fb_ops *ops)
{
struct fb_info *fbi;
struct mx3fb_info *mx3fbi;
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index b770946a0920..e6ea853c1723 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -1610,7 +1610,7 @@ neofb_cursor(struct fb_info *info, struct fb_cursor *cursor)
}
*/
-static struct fb_ops neofb_ops = {
+static const struct fb_ops neofb_ops = {
.owner = THIS_MODULE,
.fb_open = neofb_open,
.fb_release = neofb_release,
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index fbeeed5afe35..c583c018304d 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -607,6 +607,8 @@ static int nvidiafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
return 0;
}
+static struct fb_ops nvidia_fb_ops;
+
static int nvidiafb_set_par(struct fb_info *info)
{
struct nvidia_par *par = info->par;
@@ -660,19 +662,19 @@ static int nvidiafb_set_par(struct fb_info *info)
info->fix.line_length = (info->var.xres_virtual *
info->var.bits_per_pixel) >> 3;
if (info->var.accel_flags) {
- info->fbops->fb_imageblit = nvidiafb_imageblit;
- info->fbops->fb_fillrect = nvidiafb_fillrect;
- info->fbops->fb_copyarea = nvidiafb_copyarea;
- info->fbops->fb_sync = nvidiafb_sync;
+ nvidia_fb_ops.fb_imageblit = nvidiafb_imageblit;
+ nvidia_fb_ops.fb_fillrect = nvidiafb_fillrect;
+ nvidia_fb_ops.fb_copyarea = nvidiafb_copyarea;
+ nvidia_fb_ops.fb_sync = nvidiafb_sync;
info->pixmap.scan_align = 4;
info->flags &= ~FBINFO_HWACCEL_DISABLED;
info->flags |= FBINFO_READS_FAST;
NVResetGraphics(info);
} else {
- info->fbops->fb_imageblit = cfb_imageblit;
- info->fbops->fb_fillrect = cfb_fillrect;
- info->fbops->fb_copyarea = cfb_copyarea;
- info->fbops->fb_sync = NULL;
+ nvidia_fb_ops.fb_imageblit = cfb_imageblit;
+ nvidia_fb_ops.fb_fillrect = cfb_fillrect;
+ nvidia_fb_ops.fb_copyarea = cfb_copyarea;
+ nvidia_fb_ops.fb_sync = NULL;
info->pixmap.scan_align = 1;
info->flags |= FBINFO_HWACCEL_DISABLED;
info->flags &= ~FBINFO_READS_FAST;
@@ -1165,7 +1167,7 @@ static int nvidia_set_fbinfo(struct fb_info *info)
info->pixmap.flags = FB_PIXMAP_SYSTEM;
if (!hwcur)
- info->fbops->fb_cursor = NULL;
+ nvidia_fb_ops.fb_cursor = NULL;
info->var.accel_flags = (!noaccel);
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index a970edc2a6f8..bfa4ed421148 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -285,7 +285,7 @@ static int ocfb_init_var(struct ocfb_dev *fbdev)
return 0;
}
-static struct fb_ops ocfb_ops = {
+static const struct fb_ops ocfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = ocfb_setcolreg,
.fb_fillrect = cfb_fillrect,
@@ -297,7 +297,6 @@ static int ocfb_probe(struct platform_device *pdev)
{
int ret = 0;
struct ocfb_dev *fbdev;
- struct resource *res;
int fbsize;
fbdev = devm_kzalloc(&pdev->dev, sizeof(*fbdev), GFP_KERNEL);
@@ -319,13 +318,7 @@ static int ocfb_probe(struct platform_device *pdev)
ocfb_init_var(fbdev);
ocfb_init_fix(fbdev);
- /* Request I/O resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "I/O resource request failed\n");
- return -ENXIO;
- }
- fbdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ fbdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fbdev->regs))
return PTR_ERR(fbdev->regs);
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index fbc6eafb63c7..5cd0f5f6a4ae 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -286,7 +286,7 @@ static void offb_destroy(struct fb_info *info)
framebuffer_release(info);
}
-static struct fb_ops offb_ops = {
+static const struct fb_ops offb_ops = {
.owner = THIS_MODULE,
.fb_destroy = offb_destroy,
.fb_setcolreg = offb_setcolreg,
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 702cca59bda1..e8a304f84ea8 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1052,7 +1052,7 @@ static int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd,
{
struct omapfb_plane_struct *plane = fbi->par;
struct omapfb_device *fbdev = plane->fbdev;
- struct fb_ops *ops = fbi->fbops;
+ const struct fb_ops *ops = fbi->fbops;
union {
struct omapfb_update_window update_window;
struct omapfb_plane_info plane_info;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index 376ee5bc3ddc..ce37da85cc45 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -1635,7 +1635,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
- bool chroma_upscale = plane != OMAP_DSS_WB ? true : false;
+ bool chroma_upscale = plane != OMAP_DSS_WB;
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
@@ -3100,9 +3100,9 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
- return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+ return pclk <= dispc.feat->max_lcd_pclk;
else
- return pclk <= dispc.feat->max_tv_pclk ? true : false;
+ return pclk <= dispc.feat->max_tv_pclk;
}
bool dispc_mgr_timings_ok(enum omap_channel channel,
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 858c2c011d19..8dfa9158ba78 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1280,7 +1280,7 @@ ssize_t omapfb_write(struct fb_info *info, const char __user *buf,
}
#endif
-static struct fb_ops omapfb_ops = {
+static const struct fb_ops omapfb_ops = {
.owner = THIS_MODULE,
.fb_open = omapfb_open,
.fb_release = omapfb_release,
diff --git a/drivers/video/fbdev/omap2/omapfb/vrfb.c b/drivers/video/fbdev/omap2/omapfb/vrfb.c
index 819e0bc35b2d..ee0dd4c6a646 100644
--- a/drivers/video/fbdev/omap2/omapfb/vrfb.c
+++ b/drivers/video/fbdev/omap2/omapfb/vrfb.c
@@ -339,9 +339,7 @@ static int __init vrfb_probe(struct platform_device *pdev)
int i;
/* first resource is the register res, the rest are vrfb contexts */
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vrfb_base = devm_ioremap_resource(&pdev->dev, mem);
+ vrfb_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vrfb_base))
return PTR_ERR(vrfb_base);
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 8c18cc51aae2..6da672e92643 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -37,7 +37,7 @@ static int p9100_ioctl(struct fb_info *, unsigned int, unsigned long);
* Frame buffer operations
*/
-static struct fb_ops p9100_ops = {
+static const struct fb_ops p9100_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = p9100_setcolreg,
.fb_blank = p9100_blank,
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 632b246ca35f..ce413a9df06e 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -96,7 +96,7 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
* Interface used by the world
*/
-static struct fb_ops platinumfb_ops = {
+static const struct fb_ops platinumfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = platinumfb_check_var,
.fb_set_par = platinumfb_set_par,
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 1dcf02e12af4..fe2cadeb1b66 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1483,7 +1483,7 @@ static int pm2fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
* Frame buffer operations
*/
-static struct fb_ops pm2fb_ops = {
+static const struct fb_ops pm2fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = pm2fb_check_var,
.fb_set_par = pm2fb_set_par,
@@ -1563,7 +1563,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_exit_neither;
}
default_par->v_regs =
- ioremap_nocache(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
+ ioremap(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
if (!default_par->v_regs) {
printk(KERN_WARNING "pm2fb: Can't remap %s register area.\n",
pm2fb_fix.id);
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 6130aa56a1e9..2f5e23c8f8ec 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -1200,7 +1200,7 @@ static int pm3fb_blank(int blank_mode, struct fb_info *info)
* Frame buffer operations
*/
-static struct fb_ops pm3fb_ops = {
+static const struct fb_ops pm3fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = pm3fb_check_var,
.fb_set_par = pm3fb_set_par,
@@ -1236,7 +1236,7 @@ static unsigned long pm3fb_size_memory(struct pm3_par *par)
return 0;
}
screen_mem =
- ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
+ ioremap(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
if (!screen_mem) {
printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n");
release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
@@ -1347,7 +1347,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_exit_neither;
}
par->v_regs =
- ioremap_nocache(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
+ ioremap(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
if (!par->v_regs) {
printk(KERN_WARNING "pm3fb: Can't remap %s register area.\n",
pm3fb_fix.id);
diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c
index d1e78ce3a9c2..62c8de99af0b 100644
--- a/drivers/video/fbdev/pmag-aa-fb.c
+++ b/drivers/video/fbdev/pmag-aa-fb.c
@@ -147,7 +147,7 @@ static int aafb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops aafb_ops = {
+static const struct fb_ops aafb_ops = {
.owner = THIS_MODULE,
.fb_blank = aafb_blank,
.fb_fillrect = cfb_fillrect,
@@ -188,7 +188,7 @@ static int pmagaafb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start + PMAG_AA_BT455_OFFSET;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -199,7 +199,7 @@ static int pmagaafb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAG_AA_ONBOARD_FBMEM_OFFSET;
- info->screen_base = ioremap_nocache(info->fix.smem_start,
+ info->screen_base = ioremap(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 56b912bb28de..1296f9b370c2 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -117,7 +117,7 @@ static int pmagbafb_setcolreg(unsigned int regno, unsigned int red,
return 0;
}
-static struct fb_ops pmagbafb_ops = {
+static const struct fb_ops pmagbafb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = pmagbafb_setcolreg,
.fb_fillrect = cfb_fillrect,
@@ -180,7 +180,7 @@ static int pmagbafb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -190,7 +190,7 @@ static int pmagbafb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAG_BA_FBMEM;
- info->screen_base = ioremap_nocache(info->fix.smem_start,
+ info->screen_base = ioremap(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c
index 2822b2225924..9dccd51ee65a 100644
--- a/drivers/video/fbdev/pmagb-b-fb.c
+++ b/drivers/video/fbdev/pmagb-b-fb.c
@@ -121,7 +121,7 @@ static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red,
return 0;
}
-static struct fb_ops pmagbbfb_ops = {
+static const struct fb_ops pmagbbfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = pmagbbfb_setcolreg,
.fb_fillrect = cfb_fillrect,
@@ -287,7 +287,7 @@ static int pmagbbfb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -298,7 +298,7 @@ static int pmagbbfb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAGB_B_FBMEM;
- par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
+ par->smem = ioremap(info->fix.smem_start, info->fix.smem_len);
if (!par->smem) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
err = -ENOMEM;
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 5ed2db39d823..834f63edf700 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -934,7 +934,7 @@ static irqreturn_t ps3fb_vsync_interrupt(int irq, void *ptr)
}
-static struct fb_ops ps3fb_ops = {
+static const struct fb_ops ps3fb_ops = {
.fb_open = ps3fb_open,
.fb_release = ps3fb_release,
.fb_read = fb_sys_read,
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 0a3b2b7c7891..f18d457175d9 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -707,7 +707,7 @@ out_unmap:
}
#endif /* CONFIG_PVR2_DMA */
-static struct fb_ops pvr2fb_ops = {
+static const struct fb_ops pvr2fb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = pvr2fb_setcolreg,
.fb_blank = pvr2fb_blank,
@@ -770,7 +770,7 @@ static int __maybe_unused pvr2fb_common_init(void)
struct pvr2fb_par *par = currentpar;
unsigned long modememused, rev;
- fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start,
+ fb_info->screen_base = ioremap(pvr2_fix.smem_start,
pvr2_fix.smem_len);
if (!fb_info->screen_base) {
@@ -778,7 +778,7 @@ static int __maybe_unused pvr2fb_common_init(void)
goto out_err;
}
- par->mmio_base = ioremap_nocache(pvr2_fix.mmio_start,
+ par->mmio_base = ioremap(pvr2_fix.mmio_start,
pvr2_fix.mmio_len);
if (!par->mmio_base) {
printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n");
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 1410f476e135..9b9ec1468347 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -545,7 +545,7 @@ static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id)
return IRQ_NONE;
}
-static struct fb_ops pxa168fb_ops = {
+static const struct fb_ops pxa168fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = pxa168fb_check_var,
.fb_set_par = pxa168fb_set_par,
@@ -665,7 +665,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
/*
* Map LCD controller registers.
*/
- fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ fbi->reg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (fbi->reg_base == NULL) {
ret = -ENOMEM;
@@ -766,8 +766,8 @@ failed_free_cmap:
failed_free_clk:
clk_disable_unprepare(fbi->clk);
failed_free_fbmem:
- dma_free_coherent(fbi->dev, info->fix.smem_len,
- info->screen_base, fbi->fb_start_dma);
+ dma_free_wc(fbi->dev, info->fix.smem_len,
+ info->screen_base, fbi->fb_start_dma);
failed_free_info:
kfree(info);
@@ -801,7 +801,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
- dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ dma_free_wc(fbi->dev, info->fix.smem_len,
info->screen_base, info->fix.smem_start);
clk_disable_unprepare(fbi->clk);
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index f70c9f79622e..00b96a78676e 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -597,7 +597,7 @@ static int pxafb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops pxafb_ops = {
+static const struct fb_ops pxafb_ops = {
.owner = THIS_MODULE,
.fb_check_var = pxafb_check_var,
.fb_set_par = pxafb_set_par,
@@ -865,7 +865,7 @@ static int overlayfb_set_par(struct fb_info *info)
return 0;
}
-static struct fb_ops overlay_fb_ops = {
+static const struct fb_ops overlay_fb_ops = {
.owner = THIS_MODULE,
.fb_open = overlayfb_open,
.fb_release = overlayfb_release,
@@ -2237,7 +2237,6 @@ static int pxafb_probe(struct platform_device *dev)
{
struct pxafb_info *fbi;
struct pxafb_mach_info *inf, *pdata;
- struct resource *r;
int i, irq, ret;
dev_dbg(&dev->dev, "pxafb_probe\n");
@@ -2303,14 +2302,7 @@ static int pxafb_probe(struct platform_device *dev)
fbi->lcd_supply = NULL;
}
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&dev->dev, "no I/O memory resource defined\n");
- ret = -ENODEV;
- goto failed;
- }
-
- fbi->mmio_base = devm_ioremap_resource(&dev->dev, r);
+ fbi->mmio_base = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(fbi->mmio_base)) {
dev_err(&dev->dev, "failed to get I/O memory\n");
ret = -EBUSY;
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index 0b93aa964d43..79ff14a35c85 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -75,7 +75,7 @@ static int q40fb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
-static struct fb_ops q40fb_ops = {
+static const struct fb_ops q40fb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = q40fb_setcolreg,
.fb_fillrect = cfb_fillrect,
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index ca593a3e41d7..764ec3285e62 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -1673,7 +1673,7 @@ static int rivafb_sync(struct fb_info *info)
* ------------------------------------------------------------------------- */
/* kernel interface */
-static struct fb_ops riva_fb_ops = {
+static const struct fb_ops riva_fb_ops = {
.owner = THIS_MODULE,
.fb_open = rivafb_open,
.fb_release = rivafb_release,
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index e04efb567b5c..8048499e398d 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -809,7 +809,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
default_par = info->par;
- default_par->regs = ioremap_nocache(pdev->resource[1].start,
+ default_par->regs = ioremap(pdev->resource[1].start,
pdev->resource[1].end - pdev->resource[1].start +1);
if (!default_par->regs) {
printk(KERN_ERR PFX "unable to map registers\n");
@@ -818,7 +818,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
}
info->pseudo_palette = default_par->pseudo_palette;
- info->screen_base = ioremap_nocache(pdev->resource[0].start,
+ info->screen_base = ioremap(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start +1);
if (!info->screen_base) {
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index ba04d7a67829..9dc925054930 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1035,7 +1035,7 @@ static int s3c_fb_ioctl(struct fb_info *info, unsigned int cmd,
return ret;
}
-static struct fb_ops s3c_fb_ops = {
+static const struct fb_ops s3c_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = s3c_fb_check_var,
.fb_set_par = s3c_fb_set_par,
@@ -1411,8 +1411,7 @@ static int s3c_fb_probe(struct platform_device *pdev)
pm_runtime_enable(sfb->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sfb->regs = devm_ioremap_resource(dev, res);
+ sfb->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sfb->regs)) {
ret = PTR_ERR(sfb->regs);
goto err_lcd_clk;
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index a702da89910b..2fb15a540167 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -618,7 +618,7 @@ static int s3c2410fb_debug_store(struct device *dev,
static DEVICE_ATTR(debug, 0664, s3c2410fb_debug_show, s3c2410fb_debug_store);
-static struct fb_ops s3c2410fb_ops = {
+static const struct fb_ops s3c2410fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = s3c2410fb_check_var,
.fb_set_par = s3c2410fb_set_par,
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index be16c349c10f..60c424fae988 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -1037,7 +1037,7 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
/* Frame buffer operations */
-static struct fb_ops s3fb_ops = {
+static const struct fb_ops s3fb_ops = {
.owner = THIS_MODULE,
.fb_open = s3fb_open,
.fb_release = s3fb_release,
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 81ad3aa1ca06..5bb653db0cec 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -574,7 +574,7 @@ static int sa1100fb_mmap(struct fb_info *info,
return vm_iomap_memory(vma, info->fix.mmio_start, info->fix.mmio_len);
}
-static struct fb_ops sa1100fb_ops = {
+static const struct fb_ops sa1100fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = sa1100fb_check_var,
.fb_set_par = sa1100fb_set_par,
@@ -1143,7 +1143,6 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
static int sa1100fb_probe(struct platform_device *pdev)
{
struct sa1100fb_info *fbi;
- struct resource *res;
int ret, irq;
if (!dev_get_platdata(&pdev->dev)) {
@@ -1159,8 +1158,7 @@ static int sa1100fb_probe(struct platform_device *pdev)
if (!fbi)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fbi->base = devm_ioremap_resource(&pdev->dev, res);
+ fbi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fbi->base))
return PTR_ERR(fbi->base);
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 512789f5f884..aab312a7d9da 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -1637,7 +1637,7 @@ static int savagefb_release(struct fb_info *info, int user)
return 0;
}
-static struct fb_ops savagefb_ops = {
+static const struct fb_ops savagefb_ops = {
.owner = THIS_MODULE,
.fb_open = savagefb_open,
.fb_release = savagefb_release,
diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
index ab8fe838c776..5978a8921232 100644
--- a/drivers/video/fbdev/sh7760fb.c
+++ b/drivers/video/fbdev/sh7760fb.c
@@ -341,7 +341,7 @@ static int sh7760fb_set_par(struct fb_info *info)
return 0;
}
-static struct fb_ops sh7760fb_ops = {
+static const struct fb_ops sh7760fb_ops = {
.owner = THIS_MODULE,
.fb_blank = sh7760fb_blank,
.fb_check_var = sh7760fb_check_var,
@@ -463,7 +463,7 @@ static int sh7760fb_probe(struct platform_device *pdev)
goto out_fb;
}
- par->base = ioremap_nocache(res->start, resource_size(res));
+ par->base = ioremap(res->start, resource_size(res));
if (!par->base) {
dev_err(&pdev->dev, "cannot remap\n");
ret = -ENODEV;
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index c249763dbf0b..4ea6f932b334 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1490,7 +1490,7 @@ sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
ovl->dma_handle, ovl->fb_size);
}
-static struct fb_ops sh_mobile_lcdc_overlay_ops = {
+static const struct fb_ops sh_mobile_lcdc_overlay_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
@@ -1964,7 +1964,7 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
ch->dma_handle, ch->fb_size);
}
-static struct fb_ops sh_mobile_lcdc_ops = {
+static const struct fb_ops sh_mobile_lcdc_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = sh_mobile_lcdc_setcolreg,
.fb_read = fb_sys_read,
@@ -2588,7 +2588,7 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
if (num_channels == 2)
priv->forced_fourcc = pdata->ch[0].fourcc;
- priv->base = ioremap_nocache(res->start, resource_size(res));
+ priv->base = ioremap(res->start, resource_size(res));
if (!priv->base) {
error = -ENOMEM;
goto err1;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 7dc0105f700d..533a047d07a2 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -78,7 +78,7 @@ static void simplefb_destroy(struct fb_info *info)
iounmap(info->screen_base);
}
-static struct fb_ops simplefb_ops = {
+static const struct fb_ops simplefb_ops = {
.owner = THIS_MODULE,
.fb_destroy = simplefb_destroy,
.fb_setcolreg = simplefb_setcolreg,
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index b443a8ed4600..ac140962b1bf 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1906,7 +1906,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
/* ---------------- fb_ops structures ----------------- */
-static struct fb_ops sisfb_ops = {
+static const struct fb_ops sisfb_ops = {
.owner = THIS_MODULE,
.fb_open = sisfb_open,
.fb_release = sisfb_release,
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index 812a36cb60c3..bcacfb6934fa 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -634,7 +634,7 @@ int xxxfb_sync(struct fb_info *info)
* Frame buffer operations
*/
-static struct fb_ops xxxfb_ops = {
+static const struct fb_ops xxxfb_ops = {
.owner = THIS_MODULE,
.fb_open = xxxfb_open,
.fb_read = xxxfb_read,
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 207d0add684b..6a1b4a853d9e 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1369,7 +1369,7 @@ static int smtc_set_par(struct fb_info *info)
return 0;
}
-static struct fb_ops smtcfb_ops = {
+static const struct fb_ops smtcfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = smtc_check_var,
.fb_set_par = smtc_set_par,
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 0e0f5bbfc5ef..bfac3ee4a642 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1170,7 +1170,6 @@ static int ufx_ops_release(struct fb_info *info, int user)
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
info->fbdefio = NULL;
- info->fbops->fb_mmap = ufx_ops_mmap;
}
pr_debug("released /dev/fb%d user=%d count=%d",
@@ -1269,7 +1268,7 @@ static int ufx_ops_blank(int blank_mode, struct fb_info *info)
return 0;
}
-static struct fb_ops ufx_ops = {
+static const struct fb_ops ufx_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = ufx_ops_write,
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 78ca7ffc40c2..142535267fec 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -280,7 +280,7 @@ static void ssd1307fb_imageblit(struct fb_info *info, const struct fb_image *ima
ssd1307fb_update_display(par);
}
-static struct fb_ops ssd1307fb_ops = {
+static const struct fb_ops ssd1307fb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = ssd1307fb_write,
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 4e22ae383c87..afe6d1b7c3a0 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1307,7 +1307,7 @@ static int sstfb_setup(char *options)
}
-static struct fb_ops sstfb_ops = {
+static const struct fb_ops sstfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = sstfb_check_var,
.fb_set_par = sstfb_set_par,
@@ -1363,14 +1363,14 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail_fb_mem;
}
- par->mmio_vbase = ioremap_nocache(fix->mmio_start,
+ par->mmio_vbase = ioremap(fix->mmio_start,
fix->mmio_len);
if (!par->mmio_vbase) {
printk(KERN_ERR "sstfb: cannot remap register area %#lx\n",
fix->mmio_start);
goto fail_mmio_remap;
}
- info->screen_base = ioremap_nocache(fix->smem_start, 0x400000);
+ info->screen_base = ioremap(fix->smem_start, 0x400000);
if (!info->screen_base) {
printk(KERN_ERR "sstfb: cannot remap framebuffer %#lx\n",
fix->smem_start);
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 9e88e3f594c2..de953ddb6312 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1101,7 +1101,7 @@ stifb_init_display(struct stifb_info *fb)
/* ------------ Interfaces to hardware functions ------------ */
-static struct fb_ops stifb_ops = {
+static const struct fb_ops stifb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
@@ -1198,7 +1198,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
case S9000_ID_TOMCAT: /* Dual CRX, behaves else like a CRX */
/* FIXME: TomCat supports two heads:
* fb.iobase = REGION_BASE(fb_info,3);
- * fb.screen_base = ioremap_nocache(REGION_BASE(fb_info,2),xxx);
+ * fb.screen_base = ioremap(REGION_BASE(fb_info,2),xxx);
* for now we only support the left one ! */
xres = fb->ngle_rom.x_size_visible;
yres = fb->ngle_rom.y_size_visible;
@@ -1291,7 +1291,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
strcpy(fix->id, "stifb");
info->fbops = &stifb_ops;
- info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
+ info->screen_base = ioremap(REGION_BASE(fb,1), fix->smem_len);
if (!info->screen_base) {
printk(KERN_ERR "stifb: failed to map memory\n");
goto out_err0;
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 784c9bd5d502..15b079505a00 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -59,7 +59,7 @@ static int gfb_setcolreg(unsigned regno,
return 0;
}
-static struct fb_ops gfb_ops = {
+static const struct fb_ops gfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = gfb_setcolreg,
.fb_fillrect = cfb_fillrect,
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 31683e5a8b79..1d3bacd9d5ac 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -63,7 +63,7 @@ static int s3d_setcolreg(unsigned regno,
return 0;
}
-static struct fb_ops s3d_ops = {
+static const struct fb_ops s3d_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = s3d_setcolreg,
.fb_fillrect = cfb_fillrect,
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index d392976126a6..9daf17b11106 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -186,7 +186,7 @@ static void e3d_copyarea(struct fb_info *info, const struct fb_copyarea *area)
spin_unlock_irqrestore(&ep->lock, flags);
}
-static struct fb_ops e3d_ops = {
+static const struct fb_ops e3d_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = e3d_setcolreg,
.fb_fillrect = e3d_fillrect,
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 7897f86fb23e..34b2e5b6e84a 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -40,7 +40,7 @@ static int tcx_pan_display(struct fb_var_screeninfo *, struct fb_info *);
* Frame buffer operations
*/
-static struct fb_ops tcx_ops = {
+static const struct fb_ops tcx_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = tcx_setcolreg,
.fb_blank = tcx_blank,
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index fdbb1ea66e6c..f73e26c18c09 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1141,7 +1141,7 @@ static int tdfxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
return 0;
}
-static struct fb_ops tdfxfb_ops = {
+static const struct fb_ops tdfxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = tdfxfb_check_var,
.fb_set_par = tdfxfb_set_par,
@@ -1417,7 +1417,7 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
default_par->regbase_virt =
- ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!default_par->regbase_virt) {
printk(KERN_ERR "fb: Can't remap %s register area.\n",
info->fix.id);
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 286b2371c7dd..e9869135d833 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -70,7 +70,7 @@ static struct tc_driver tgafb_tc_driver;
* Frame buffer operations
*/
-static struct fb_ops tgafb_ops = {
+static const struct fb_ops tgafb_ops = {
.owner = THIS_MODULE,
.fb_check_var = tgafb_check_var,
.fb_set_par = tgafb_set_par,
@@ -1438,7 +1438,7 @@ static int tgafb_register(struct device *dev)
}
/* Map the framebuffer. */
- mem_base = ioremap_nocache(bar0_start, bar0_len);
+ mem_base = ioremap(bar0_start, bar0_len);
if (!mem_base) {
printk(KERN_ERR "tgafb: Cannot map MMIO\n");
goto err1;
diff --git a/drivers/video/fbdev/tmiofb.c b/drivers/video/fbdev/tmiofb.c
index 4f2fcea10d2b..50111966c981 100644
--- a/drivers/video/fbdev/tmiofb.c
+++ b/drivers/video/fbdev/tmiofb.c
@@ -646,7 +646,7 @@ static int tmiofb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops tmiofb_ops = {
+static const struct fb_ops tmiofb_ops = {
.owner = THIS_MODULE,
.fb_ioctl = tmiofb_ioctl,
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index da74bf6c5996..4d20cb557ff0 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -1443,7 +1443,7 @@ static int tridentfb_blank(int blank_mode, struct fb_info *info)
return (blank_mode == FB_BLANK_NORMAL) ? 1 : 0;
}
-static struct fb_ops tridentfb_ops = {
+static const struct fb_ops tridentfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = tridentfb_setcolreg,
.fb_pan_display = tridentfb_pan_display,
@@ -1556,7 +1556,7 @@ static int trident_pci_probe(struct pci_dev *dev,
return -1;
}
- default_par->io_virt = ioremap_nocache(tridentfb_fix.mmio_start,
+ default_par->io_virt = ioremap(tridentfb_fix.mmio_start,
tridentfb_fix.mmio_len);
if (!default_par->io_virt) {
@@ -1579,7 +1579,7 @@ static int trident_pci_probe(struct pci_dev *dev,
goto out_unmap1;
}
- info->screen_base = ioremap_nocache(tridentfb_fix.smem_start,
+ info->screen_base = ioremap(tridentfb_fix.smem_start,
tridentfb_fix.smem_len);
if (!info->screen_base) {
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index fe373b63ddd6..07905d385949 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1037,7 +1037,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
info->fbdefio = NULL;
- info->fbops->fb_mmap = dlfb_ops_mmap;
}
dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 439565cae7ab..53d08d1b56f5 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1440,7 +1440,7 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
/* Disable blanking if the user requested so. */
if (!blank)
- info->fbops->fb_blank = NULL;
+ uvesafb_ops.fb_blank = NULL;
/*
* Find out how much IO memory is required for the mode with
@@ -1510,7 +1510,7 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
(par->ypan ? FBINFO_HWACCEL_YPAN : 0);
if (!par->ypan)
- info->fbops->fb_pan_display = NULL;
+ uvesafb_ops.fb_pan_display = NULL;
}
static void uvesafb_init_mtrr(struct fb_info *info)
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index e04fde9c1fcd..4d20c4603e5a 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -113,7 +113,7 @@ static int valkyrie_init_info(struct fb_info *info, struct fb_info_valkyrie *p);
static void valkyrie_par_to_fix(struct fb_par_valkyrie *par, struct fb_fix_screeninfo *fix);
static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p);
-static struct fb_ops valkyriefb_ops = {
+static const struct fb_ops valkyriefb_ops = {
.owner = THIS_MODULE,
.fb_check_var = valkyriefb_check_var,
.fb_set_par = valkyriefb_set_par,
@@ -356,7 +356,7 @@ int __init valkyriefb_init(void)
p->total_vram = 0x100000;
p->frame_buffer_phys = frame_buffer_phys;
#ifdef CONFIG_MAC
- p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram);
+ p->frame_buffer = ioremap(frame_buffer_phys, p->total_vram);
#else
p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram);
#endif
diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c
index c1e3738e6789..79d42b23d850 100644
--- a/drivers/video/fbdev/vermilion/cr_pll.c
+++ b/drivers/video/fbdev/vermilion/cr_pll.c
@@ -159,7 +159,7 @@ static int __init cr_pll_init(void)
pci_read_config_dword(mch_dev, CRVML_REG_MCHBAR,
&mch_bar);
mch_regs_base =
- ioremap_nocache(mch_bar, CRVML_MCHMAP_SIZE);
+ ioremap(mch_bar, CRVML_MCHMAP_SIZE);
if (!mch_regs_base) {
printk(KERN_ERR
"Carillo Ranch MCH device was not enabled.\n");
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 498038a964ee..ff61605b8764 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -317,7 +317,7 @@ static int vmlfb_enable_mmio(struct vml_par *par)
": Could not claim display controller MMIO.\n");
return -EBUSY;
}
- par->vdc_mem = ioremap_nocache(par->vdc_mem_base, par->vdc_mem_size);
+ par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size);
if (par->vdc_mem == NULL) {
printk(KERN_ERR MODULE_NAME
": Could not map display controller MMIO.\n");
@@ -332,7 +332,7 @@ static int vmlfb_enable_mmio(struct vml_par *par)
err = -EBUSY;
goto out_err_1;
}
- par->gpu_mem = ioremap_nocache(par->gpu_mem_base, par->gpu_mem_size);
+ par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size);
if (par->gpu_mem == NULL) {
printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
err = -ENOMEM;
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index d9c08f6c2155..a1fe24ea869b 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -447,15 +447,15 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_fix.smem_start, info->screen_base,
size_remap/1024, size_total/1024);
+ if (!ypan)
+ vesafb_ops.fb_pan_display = NULL;
+
info->fbops = &vesafb_ops;
info->var = vesafb_defined;
info->fix = vesafb_fix;
info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
(ypan ? FBINFO_HWACCEL_YPAN : 0);
- if (!ypan)
- info->fbops->fb_pan_display = NULL;
-
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENOMEM;
goto err;
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 54127905bfe7..95d3c59867d0 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -78,7 +78,7 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
static int vfb_mmap(struct fb_info *info,
struct vm_area_struct *vma);
-static struct fb_ops vfb_ops = {
+static const struct fb_ops vfb_ops = {
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
.fb_check_var = vfb_check_var,
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 2c6a576ed84c..a20eeb8308ff 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1270,7 +1270,7 @@ static void vga16fb_destroy(struct fb_info *info)
framebuffer_release(info);
}
-static struct fb_ops vga16fb_ops = {
+static const struct fb_ops vga16fb_ops = {
.owner = THIS_MODULE,
.fb_open = vga16fb_open,
.fb_release = vga16fb_release,
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index ffa2ca2d3f5e..703ddee9a244 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -442,7 +442,7 @@ static int via_pci_setup_mmio(struct viafb_dev *vdev)
*/
vdev->engine_start = pci_resource_start(vdev->pdev, 1);
vdev->engine_len = pci_resource_len(vdev->pdev, 1);
- vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
+ vdev->engine_mmio = ioremap(vdev->engine_start,
vdev->engine_len);
if (vdev->engine_mmio == NULL)
dev_err(&vdev->pdev->dev,
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index be8d9702cbb2..f744479dc7df 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -238,7 +238,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops vt8500lcd_ops = {
+static const struct fb_ops vt8500lcd_ops = {
.owner = THIS_MODULE,
.fb_set_par = vt8500lcd_set_par,
.fb_setcolreg = vt8500lcd_setcolreg,
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index c339a8fbad81..7b3eef1b893f 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -634,7 +634,7 @@ static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *i
/* Frame buffer operations */
-static struct fb_ops vt8623fb_ops = {
+static const struct fb_ops vt8623fb_ops = {
.owner = THIS_MODULE,
.fb_open = vt8623fb_open,
.fb_release = vt8623fb_release,
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index 3be07807edcd..ad26cbffbc6f 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -549,7 +549,7 @@ static int w100fb_set_par(struct fb_info *info)
/*
* Frame buffer operations
*/
-static struct fb_ops w100fb_ops = {
+static const struct fb_ops w100fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = w100fb_check_var,
.fb_set_par = w100fb_set_par,
@@ -648,12 +648,12 @@ int w100fb_probe(struct platform_device *pdev)
return -EINVAL;
/* Remap the chip base address */
- remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN);
+ remapped_base = ioremap(mem->start+W100_CFG_BASE, W100_CFG_LEN);
if (remapped_base == NULL)
goto out;
/* Map the register space */
- remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN);
+ remapped_regs = ioremap(mem->start+W100_REG_BASE, W100_REG_LEN);
if (remapped_regs == NULL)
goto out;
@@ -672,7 +672,7 @@ int w100fb_probe(struct platform_device *pdev)
printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE);
/* Remap the framebuffer */
- remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
+ remapped_fbuf = ioremap(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
if (remapped_fbuf == NULL)
goto out;
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index 17c780315ca5..b656eff58c23 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -246,7 +246,7 @@ static int wm8505fb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct fb_ops wm8505fb_ops = {
+static const struct fb_ops wm8505fb_ops = {
.owner = THIS_MODULE,
.fb_set_par = wm8505fb_set_par,
.fb_setcolreg = wm8505fb_setcolreg,
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index a3d6b6db221b..00307b8693bf 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -328,7 +328,7 @@ static int xenfb_set_par(struct fb_info *info)
return 0;
}
-static struct fb_ops xenfb_fb_ops = {
+static const struct fb_ops xenfb_fb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = xenfb_write,
diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index 8628829b470d..ca4ff658cad0 100644
--- a/drivers/video/fbdev/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
@@ -247,7 +247,7 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
return 0; /* success */
}
-static struct fb_ops xilinxfb_ops = {
+static const struct fb_ops xilinxfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = xilinx_fb_setcolreg,
.fb_blank = xilinx_fb_blank,
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 2307b0329aec..d823d558c0c4 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sizes.h>
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 43c391626a00..50920b6fc319 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -7,6 +7,7 @@
*/
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
index ca752b8f495f..cb1eb7e05f87 100644
--- a/drivers/visorbus/visorchipset.c
+++ b/drivers/visorbus/visorchipset.c
@@ -1210,14 +1210,17 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
{
struct controlvm_message local_crash_bus_msg;
struct controlvm_message local_crash_dev_msg;
- struct controlvm_message msg;
+ struct controlvm_message msg = {
+ .hdr.id = CONTROLVM_CHIPSET_INIT,
+ .cmd.init_chipset = {
+ .bus_count = 23,
+ .switch_count = 0,
+ },
+ };
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
/* send init chipset msg */
- msg.hdr.id = CONTROLVM_CHIPSET_INIT;
- msg.cmd.init_chipset.bus_count = 23;
- msg.cmd.init_chipset.switch_count = 0;
chipset_init(&msg);
/* get saved message count */
if (visorchannel_read(chipset_dev->controlvm_channel,
diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c
index 1b6e42e5e8fd..51e056bae943 100644
--- a/drivers/vme/boards/vme_vmivme7805.c
+++ b/drivers/vme/boards/vme_vmivme7805.c
@@ -55,7 +55,7 @@ static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Map registers in BAR 0 */
- vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
+ vmic_base = ioremap(pci_resource_start(pdev, 0), 16);
if (!vmic_base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 1edb8a5de873..ea938dc29c5e 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -554,7 +554,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
goto err_resource;
}
- image->kern_base = ioremap_nocache(
+ image->kern_base = ioremap(
image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
@@ -1638,7 +1638,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* map registers in BAR 0 */
- ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0),
4096);
if (!ca91cx42_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
index 3208a4409e44..6a1bc284f297 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/vme/bridges/vme_fake.c
@@ -414,8 +414,9 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
}
}
-static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u8 retval = 0xff;
int i;
@@ -446,8 +447,9 @@ static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
return retval;
}
-static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u16 retval = 0xffff;
int i;
@@ -478,8 +480,9 @@ static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
return retval;
}
-static u32 fake_vmeread32(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u32 retval = 0xffffffff;
int i;
@@ -609,8 +612,9 @@ out:
return retval;
}
-static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge,
+ u8 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
@@ -639,8 +643,9 @@ static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
}
-static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge,
+ u16 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
@@ -669,8 +674,9 @@ static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
}
-static void fake_vmewrite32(struct fake_driver *bridge, u32 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge,
+ u32 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 7e079d39bd76..50ae26977a02 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -770,7 +770,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
goto err_resource;
}
- image->kern_base = ioremap_nocache(
+ image->kern_base = ioremap(
image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
@@ -2317,7 +2317,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* map registers in BAR 0 */
- tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 3110791a2f1c..ee716c715710 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -139,7 +139,7 @@ static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent
dev->phys_addr = pci_resource_start(pdev, 1);
- dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384);
+ dev->virt_addr = ioremap(dev->phys_addr, 16384);
if (!dev->virt_addr) {
dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n",
__func__, dev->phys_addr, 16384);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 4164045866b3..aa09f8527776 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -38,12 +38,6 @@
#define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
#define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
#define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
-#define OMAP_HDQ_SYSCONFIG 0x14
-#define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1)
-#define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0)
-#define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0
-#define OMAP_HDQ_SYSSTATUS 0x18
-#define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0)
#define OMAP_HDQ_FLAG_CLEAR 0
#define OMAP_HDQ_FLAG_SET 1
@@ -62,17 +56,9 @@ struct hdq_data {
void __iomem *hdq_base;
/* lock status update */
struct mutex hdq_mutex;
- int hdq_usecount;
u8 hdq_irqstatus;
/* device lock */
spinlock_t hdq_spinlock;
- /*
- * Used to control the call to omap_hdq_get and omap_hdq_put.
- * HDQ Protocol: Write the CMD|REG_address first, followed by
- * the data wrire or read.
- */
- int init_trans;
- int rrw;
/* mode: 0-HDQ 1-W1 */
int mode;
@@ -99,15 +85,6 @@ static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
return new_val;
}
-static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
- u32 mask)
-{
- u32 ie;
-
- ie = readl(hdq_data->hdq_base + offset);
- writel(ie & mask, hdq_data->hdq_base + offset);
-}
-
/*
* Wait for one or more bits in flag change.
* HDQ_FLAG_SET: wait until any bit in the flag is set.
@@ -142,22 +119,24 @@ static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
return ret;
}
+/* Clear saved irqstatus after using an interrupt */
+static void hdq_reset_irqstatus(struct hdq_data *hdq_data)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
+ hdq_data->hdq_irqstatus = 0;
+ spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
+}
+
/* write out a byte and fill *status with HDQ_INT_STATUS */
static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
{
int ret;
u8 tmp_status;
- unsigned long irqflags;
*status = 0;
- spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
- /* clear interrupt flags via a dummy read */
- hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
- /* ISR loads it with new INT_STATUS */
- hdq_data->hdq_irqstatus = 0;
- spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
-
hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
/* set the GO bit */
@@ -191,6 +170,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
}
out:
+ hdq_reset_irqstatus(hdq_data);
return ret;
}
@@ -237,47 +217,11 @@ static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
slave_found(master_dev, id);
}
-static int _omap_hdq_reset(struct hdq_data *hdq_data)
-{
- int ret;
- u8 tmp_status;
-
- hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
- OMAP_HDQ_SYSCONFIG_SOFTRESET);
- /*
- * Select HDQ/1W mode & enable clocks.
- * It is observed that INT flags can't be cleared via a read and GO/INIT
- * won't return to zero if interrupt is disabled. So we always enable
- * interrupt.
- */
- hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
- OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
- OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
-
- /* wait for reset to complete */
- ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
- OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
- if (ret)
- dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
- tmp_status);
- else {
- hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
- OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
- OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
- hdq_data->mode);
- hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
- OMAP_HDQ_SYSCONFIG_AUTOIDLE);
- }
-
- return ret;
-}
-
/* Issue break pulse to the device */
static int omap_hdq_break(struct hdq_data *hdq_data)
{
int ret = 0;
u8 tmp_status;
- unsigned long irqflags;
ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (ret < 0) {
@@ -286,13 +230,6 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
goto rtn;
}
- spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
- /* clear interrupt flags via a dummy read */
- hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
- /* ISR loads it with new INT_STATUS */
- hdq_data->hdq_irqstatus = 0;
- spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
-
/* set the INIT and GO bit */
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
@@ -341,6 +278,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
" return to zero, %x", tmp_status);
out:
+ hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
@@ -357,7 +295,7 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
goto rtn;
}
- if (!hdq_data->hdq_usecount) {
+ if (pm_runtime_suspended(hdq_data->dev)) {
ret = -EINVAL;
goto out;
}
@@ -388,86 +326,13 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
/* the data is ready. Read it in! */
*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
out:
+ hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
}
-/* Enable clocks and set the controller to HDQ/1W mode */
-static int omap_hdq_get(struct hdq_data *hdq_data)
-{
- int ret = 0;
-
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- ret = -EINTR;
- goto rtn;
- }
-
- if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
- dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
- ret = -EINVAL;
- goto out;
- } else {
- hdq_data->hdq_usecount++;
- try_module_get(THIS_MODULE);
- if (1 == hdq_data->hdq_usecount) {
-
- pm_runtime_get_sync(hdq_data->dev);
-
- /* make sure HDQ/1W is out of reset */
- if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
- OMAP_HDQ_SYSSTATUS_RESETDONE)) {
- ret = _omap_hdq_reset(hdq_data);
- if (ret)
- /* back up the count */
- hdq_data->hdq_usecount--;
- } else {
- /* select HDQ/1W mode & enable clocks */
- hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
- OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
- OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
- hdq_data->mode);
- hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
- OMAP_HDQ_SYSCONFIG_NOIDLE);
- hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
- }
- }
- }
-
-out:
- mutex_unlock(&hdq_data->hdq_mutex);
-rtn:
- return ret;
-}
-
-/* Disable clocks to the module */
-static int omap_hdq_put(struct hdq_data *hdq_data)
-{
- int ret = 0;
-
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0)
- return -EINTR;
-
- hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
- OMAP_HDQ_SYSCONFIG_AUTOIDLE);
- if (0 == hdq_data->hdq_usecount) {
- dev_dbg(hdq_data->dev, "attempt to decrement use count"
- " when it is zero");
- ret = -EINVAL;
- } else {
- hdq_data->hdq_usecount--;
- module_put(THIS_MODULE);
- if (0 == hdq_data->hdq_usecount)
- pm_runtime_put_sync(hdq_data->dev);
- }
- mutex_unlock(&hdq_data->hdq_mutex);
-
- return ret;
-}
-
/*
* W1 triplet callback function - used for searching ROM addresses.
* Registered only when controller is in 1-wire mode.
@@ -482,7 +347,12 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
- omap_hdq_get(_hdq);
+ err = pm_runtime_get_sync(hdq_data->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(hdq_data->dev);
+
+ return err;
+ }
err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (err < 0) {
@@ -490,7 +360,6 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
goto rtn;
}
- hdq_data->hdq_irqstatus = 0;
/* read id_bit */
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
@@ -504,7 +373,9 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
}
id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
- hdq_data->hdq_irqstatus = 0;
+ /* Must clear irqstatus for another RXCOMPLETE interrupt */
+ hdq_reset_irqstatus(hdq_data);
+
/* read comp_bit */
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
@@ -547,18 +418,33 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
OMAP_HDQ_CTRL_STATUS_SINGLE);
out:
+ hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
- omap_hdq_put(_hdq);
+ pm_runtime_mark_last_busy(hdq_data->dev);
+ pm_runtime_put_autosuspend(hdq_data->dev);
+
return ret;
}
/* reset callback */
static u8 omap_w1_reset_bus(void *_hdq)
{
- omap_hdq_get(_hdq);
- omap_hdq_break(_hdq);
- omap_hdq_put(_hdq);
+ struct hdq_data *hdq_data = _hdq;
+ int err;
+
+ err = pm_runtime_get_sync(hdq_data->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(hdq_data->dev);
+
+ return err;
+ }
+
+ omap_hdq_break(hdq_data);
+
+ pm_runtime_mark_last_busy(hdq_data->dev);
+ pm_runtime_put_autosuspend(hdq_data->dev);
+
return 0;
}
@@ -569,37 +455,19 @@ static u8 omap_w1_read_byte(void *_hdq)
u8 val = 0;
int ret;
- /* First write to initialize the transfer */
- if (hdq_data->init_trans == 0)
- omap_hdq_get(hdq_data);
+ ret = pm_runtime_get_sync(hdq_data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(hdq_data->dev);
- ret = hdq_read_byte(hdq_data, &val);
- if (ret) {
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
- return -EINTR;
- }
- hdq_data->init_trans = 0;
- mutex_unlock(&hdq_data->hdq_mutex);
- omap_hdq_put(hdq_data);
return -1;
}
- hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
- ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
+ ret = hdq_read_byte(hdq_data, &val);
+ if (ret)
+ ret = -1;
- /* Write followed by a read, release the module */
- if (hdq_data->init_trans) {
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
- return -EINTR;
- }
- hdq_data->init_trans = 0;
- mutex_unlock(&hdq_data->hdq_mutex);
- omap_hdq_put(hdq_data);
- }
+ pm_runtime_mark_last_busy(hdq_data->dev);
+ pm_runtime_put_autosuspend(hdq_data->dev);
return val;
}
@@ -611,9 +479,12 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
int ret;
u8 status;
- /* First write to initialize the transfer */
- if (hdq_data->init_trans == 0)
- omap_hdq_get(hdq_data);
+ ret = pm_runtime_get_sync(hdq_data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(hdq_data->dev);
+
+ return;
+ }
/*
* We need to reset the slave before
@@ -623,31 +494,15 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
if (byte == W1_SKIP_ROM)
omap_hdq_break(hdq_data);
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
- return;
- }
- hdq_data->init_trans++;
- mutex_unlock(&hdq_data->hdq_mutex);
-
ret = hdq_write_byte(hdq_data, byte, &status);
if (ret < 0) {
dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
- return;
+ goto out_err;
}
- /* Second write, data transferred. Release the module */
- if (hdq_data->init_trans > 1) {
- omap_hdq_put(hdq_data);
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
- return;
- }
- hdq_data->init_trans = 0;
- mutex_unlock(&hdq_data->hdq_mutex);
- }
+out_err:
+ pm_runtime_mark_last_busy(hdq_data->dev);
+ pm_runtime_put_autosuspend(hdq_data->dev);
}
static struct w1_bus_master omap_w1_master = {
@@ -656,6 +511,35 @@ static struct w1_bus_master omap_w1_master = {
.reset_bus = omap_w1_reset_bus,
};
+static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
+{
+ struct hdq_data *hdq_data = dev_get_drvdata(dev);
+
+ hdq_reg_out(hdq_data, 0, hdq_data->mode);
+ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
+
+ return 0;
+}
+
+static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
+{
+ struct hdq_data *hdq_data = dev_get_drvdata(dev);
+
+ /* select HDQ/1W mode & enable clocks */
+ hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
+ OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
+ OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
+ hdq_data->mode);
+ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_hdq_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
+ omap_hdq_runtime_resume, NULL)
+};
+
static int omap_hdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -677,23 +561,27 @@ static int omap_hdq_probe(struct platform_device *pdev)
if (IS_ERR(hdq_data->hdq_base))
return PTR_ERR(hdq_data->hdq_base);
- hdq_data->hdq_usecount = 0;
- hdq_data->rrw = 0;
mutex_init(&hdq_data->hdq_mutex);
+ ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
+ if (ret < 0 || !strcmp(mode, "hdq")) {
+ hdq_data->mode = 0;
+ omap_w1_master.search = omap_w1_search_bus;
+ } else {
+ hdq_data->mode = 1;
+ omap_w1_master.triplet = omap_w1_triplet;
+ }
+
pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
goto err_w1;
}
- ret = _omap_hdq_reset(hdq_data);
- if (ret) {
- dev_dbg(&pdev->dev, "reset failed\n");
- goto err_irq;
- }
-
rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
(rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
@@ -715,16 +603,8 @@ static int omap_hdq_probe(struct platform_device *pdev)
omap_hdq_break(hdq_data);
- pm_runtime_put_sync(&pdev->dev);
-
- ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
- if (ret < 0 || !strcmp(mode, "hdq")) {
- hdq_data->mode = 0;
- omap_w1_master.search = omap_w1_search_bus;
- } else {
- hdq_data->mode = 1;
- omap_w1_master.triplet = omap_w1_triplet;
- }
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
omap_w1_master.data = hdq_data;
@@ -739,6 +619,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
err_irq:
pm_runtime_put_sync(&pdev->dev);
err_w1:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
@@ -746,23 +627,19 @@ err_w1:
static int omap_hdq_remove(struct platform_device *pdev)
{
- struct hdq_data *hdq_data = platform_get_drvdata(pdev);
+ int active;
- mutex_lock(&hdq_data->hdq_mutex);
-
- if (hdq_data->hdq_usecount) {
- dev_dbg(&pdev->dev, "removed when use count is not zero\n");
- mutex_unlock(&hdq_data->hdq_mutex);
- return -EBUSY;
- }
+ active = pm_runtime_get_sync(&pdev->dev);
+ if (active < 0)
+ pm_runtime_put_noidle(&pdev->dev);
- mutex_unlock(&hdq_data->hdq_mutex);
+ w1_remove_master_device(&omap_w1_master);
- /* remove module dependency */
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ if (active >= 0)
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- w1_remove_master_device(&omap_w1_master);
-
return 0;
}
@@ -779,6 +656,7 @@ static struct platform_driver omap_hdq_driver = {
.driver = {
.name = "omap_hdq",
.of_match_table = omap_hdq_dt_ids,
+ .pm = &omap_hdq_pm_ops,
},
};
module_platform_driver(omap_hdq_driver);
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 8a043b52aa2f..7cdb25363ea0 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -246,7 +246,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- bcm63xx_wdt_device.regs = devm_ioremap_nocache(&pdev->dev, r->start,
+ bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start,
resource_size(r));
if (!bcm63xx_wdt_device.regs) {
dev_err(&pdev->dev, "failed to remap I/O resources\n");
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 6ad5bf3451ec..804e35940983 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -463,7 +463,7 @@ static int __init intel_scu_watchdog_init(void)
return -ENODEV;
}
- tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
+ tmp_addr = ioremap(watchdog_device.timer_tbl_ptr->phys_addr,
20);
if (tmp_addr == NULL) {
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 1dfede0abf18..aee3c2efd565 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -26,7 +26,7 @@
#include <linux/platform_device.h> /* For platform_driver framework */
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-#include <linux/io.h> /* For devm_ioremap_nocache */
+#include <linux/io.h> /* For devm_ioremap */
#include <asm/mach-rc32434/integ.h> /* For the Watchdog registers */
@@ -267,7 +267,7 @@ static int rc32434_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- wdt_reg = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
+ wdt_reg = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!wdt_reg) {
pr_err("failed to remap I/O resources\n");
return -ENXIO;
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 63f0857bf62d..75d3bb948bf3 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -342,35 +342,12 @@ static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
mutex_unlock(&priv->lock);
}
-static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* Not implemented. */
- return NULL;
-}
-
-static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
- /* Not implemented. */
-}
-
-static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
-{
- /* Not implemented. */
- return 0;
-}
-
static const struct dma_buf_ops dmabuf_exp_ops = {
.attach = dmabuf_exp_ops_attach,
.detach = dmabuf_exp_ops_detach,
.map_dma_buf = dmabuf_exp_ops_map_dma_buf,
.unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
.release = dmabuf_exp_ops_release,
- .map = dmabuf_exp_ops_kmap,
- .unmap = dmabuf_exp_ops_kunmap,
- .mmap = dmabuf_exp_ops_mmap,
};
struct gntdev_dmabuf_export_args {
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
index 8b9919c26095..70650b248de5 100644
--- a/drivers/xen/preempt.c
+++ b/drivers/xen/preempt.c
@@ -8,7 +8,7 @@
#include <linux/sched.h>
#include <xen/xen-ops.h>
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
/*
* Some hypercalls issued by the toolstack can take many 10s of
@@ -37,4 +37,4 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
__this_cpu_write(xen_in_preemptible_hcall, true);
}
}
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */